1 #include <linux/delay.h>
2 #include <linux/etherdevice.h>
3
4 #include "opt_global.h"
5 #include "bnxt.h"
6 #include "hsi_struct_def.h"
7 #include "bnxt_hwrm.h"
8 #include "bnxt_sriov.h"
9
10 static int
bnxt_set_vf_admin_mac(struct bnxt_softc * softc,struct bnxt_vf_info * vf,const uint8_t * mac)11 bnxt_set_vf_admin_mac(struct bnxt_softc *softc, struct bnxt_vf_info *vf,
12 const uint8_t *mac)
13 {
14 struct hwrm_func_cfg_input req = {0};
15 int rc;
16
17 if (!BNXT_PF(softc))
18 return (EOPNOTSUPP);
19
20 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_CFG);
21
22 req.fid = htole16(vf->fw_fid);
23 req.enables = htole32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
24 memcpy(req.dflt_mac_addr, mac, ETHER_ADDR_LEN);
25
26 BNXT_HWRM_LOCK(softc);
27 rc = _hwrm_send_message(softc, &req, sizeof(req));
28 BNXT_HWRM_UNLOCK(softc);
29
30 return (rc);
31 }
32
33 static bool
bnxt_vf_parse_schema(struct bnxt_softc * softc,struct bnxt_vf_info * vf,const nvlist_t * params)34 bnxt_vf_parse_schema(struct bnxt_softc *softc, struct bnxt_vf_info *vf,
35 const nvlist_t *params)
36 {
37 const void *mac;
38 size_t maclen;
39
40 memset(vf->mac_addr, 0, ETHER_ADDR_LEN);
41 memset(vf->vf_mac_addr, 0, ETHER_ADDR_LEN);
42
43 if (params == NULL)
44 return (false);
45
46 if (nvlist_exists(params, "mac-anti-spoof"))
47 vf->spoofchk = nvlist_get_bool(params, "mac-anti-spoof");
48 if (nvlist_exists(params, "trust"))
49 vf->trusted = nvlist_get_bool(params, "trust");
50
51 if (!nvlist_exists(params, "mac-addr"))
52 return (false);
53
54 mac = nvlist_get_binary(params, "mac-addr", &maclen);
55
56 if (maclen != ETHER_ADDR_LEN)
57 return (false);
58
59 if (!is_valid_ether_addr(mac))
60 return (false);
61
62 memcpy(vf->mac_addr, mac, ETHER_ADDR_LEN);
63 return (true);
64 }
65
66 /* Add a Virtual Functions */
67 int
bnxt_iov_vf_add(if_ctx_t ctx,uint16_t vfnum,const nvlist_t * params)68 bnxt_iov_vf_add(if_ctx_t ctx, uint16_t vfnum, const nvlist_t *params)
69 {
70 struct bnxt_softc *softc = iflib_get_softc(ctx);
71 struct bnxt_vf_info *vf = &softc->pf.vf[vfnum];
72 int rc;
73
74 vf->fw_fid = softc->pf.first_vf_id + vfnum;
75 vf->vfnum = vfnum;
76
77 /*
78 * If the schema provided a valid admin MAC, program it into firmware.
79 */
80 if (bnxt_vf_parse_schema(softc, vf, params)) {
81 rc = bnxt_set_vf_admin_mac(softc, vf, vf->mac_addr);
82 if (rc)
83 device_printf(softc->dev,
84 "vf%u: PF-assigned MAC programming failed (rc=%d), falling back to firmware/default MAC\n",
85 vfnum, rc);
86 }
87
88 (void)bnxt_set_vf_trust(softc, vfnum, vf->trusted);
89 (void)bnxt_set_vf_spoofchk(softc, vfnum, vf->spoofchk);
90
91 return 0;
92 }
93
94 /* Free driver-side VF resources (called after hwrm_vf_resc_free) */
bnxt_free_vf_resources(struct bnxt_softc * softc)95 void bnxt_free_vf_resources(struct bnxt_softc *softc)
96 {
97 int i;
98 size_t page_size = 1UL << softc->pf.vf_hwrm_cmd_req_page_shift;
99
100 softc->pf.active_vfs = 0;
101
102 if (softc->pf.vf) {
103 kfree(softc->pf.vf);
104 softc->pf.vf = NULL;
105 }
106 if (softc->pf.vf_event_bmap) {
107 kfree(softc->pf.vf_event_bmap);
108 softc->pf.vf_event_bmap = NULL;
109 }
110 for (i = 0; i < softc->pf.hwrm_cmd_req_pages; i++) {
111 if (softc->pf.hwrm_cmd_req_addr[i]) {
112 dma_free_coherent(&softc->pdev->dev, page_size,
113 softc->pf.hwrm_cmd_req_addr[i],
114 softc->pf.hwrm_cmd_req_dma_addr[i]);
115 softc->pf.hwrm_cmd_req_addr[i] = NULL;
116 }
117 }
118 }
119
120 /* Free firmware-side VF resources */
121 int
bnxt_hwrm_func_vf_resource_free(struct bnxt_softc * softc,int num_vfs)122 bnxt_hwrm_func_vf_resource_free(struct bnxt_softc *softc, int num_vfs)
123 {
124 int i, rc;
125 int first_vf_id, last_vf_id;
126 struct hwrm_func_vf_resc_free_input req;
127
128 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_VF_RESC_FREE);
129
130 first_vf_id = softc->pf.first_vf_id;
131 last_vf_id = first_vf_id + num_vfs - 1;
132
133 BNXT_HWRM_LOCK(softc);
134 for (i = first_vf_id; i <= last_vf_id; i++) {
135 req.vf_id = cpu_to_le16(i);
136 rc = _hwrm_send_message(softc, &req, sizeof(req));
137 if (rc)
138 break;
139 }
140 BNXT_HWRM_UNLOCK(softc);
141
142 return rc;
143 }
144
145 /* Free all VF resources */
bnxt_iov_uninit(if_ctx_t ctx)146 void bnxt_iov_uninit(if_ctx_t ctx)
147 {
148 int rc;
149 struct bnxt_softc *softc = iflib_get_softc(ctx);
150 int num_vfs = softc->pf.num_vfs;
151
152 if (!num_vfs)
153 return;
154
155 BNXT_SRIOV_LOCK(softc);
156 softc->pf.num_vfs = 0;
157 BNXT_SRIOV_UNLOCK(softc);
158
159 rc = bnxt_hwrm_func_vf_resource_free(softc, num_vfs);
160 if (rc)
161 device_printf(softc->dev, "VF resource free HWRM failed: %d\n", rc);
162
163 bnxt_destroy_trusted_vf_sysctls(softc);
164 bnxt_free_vf_resources(softc);
165 BNXT_SRIOV_LOCK_DESTROY(softc);
166 }
167
168 static inline int
bnxt_set_vf_resc_field(uint16_t * min_field,uint16_t * max_field,uint16_t hw_max,uint16_t pf_alloc,int num_vfs)169 bnxt_set_vf_resc_field(uint16_t *min_field, uint16_t *max_field,
170 uint16_t hw_max, uint16_t pf_alloc, int num_vfs)
171 {
172 uint16_t val = 0;
173
174 if (num_vfs <= 0)
175 return -EINVAL;
176
177 if (hw_max > pf_alloc)
178 val = (hw_max - pf_alloc) / num_vfs;
179
180 *min_field = *max_field = cpu_to_le16(val);
181
182 return 0;
183 }
184
bnxt_set_vf_params(struct bnxt_softc * softc,int vf_id)185 static int bnxt_set_vf_params(struct bnxt_softc *softc, int vf_id)
186 {
187 struct hwrm_func_cfg_input req = {0};
188 struct bnxt_vf_info *vf;
189 int rc;
190
191 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_CFG);
192
193 vf = &softc->pf.vf[vf_id];
194 req.fid = cpu_to_le16(vf->fw_fid);
195
196
197 if (is_valid_ether_addr(vf->mac_addr)) {
198 req.enables |= cpu_to_le32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
199 memcpy(req.dflt_mac_addr, vf->mac_addr, ETHER_ADDR_LEN);
200 }
201
202 if (vf->vlan) {
203 req.enables |= cpu_to_le32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
204 req.dflt_vlan = cpu_to_le16(vf->vlan);
205 }
206
207 if (vf->flags & BNXT_VF_TRUST)
208 req.flags = cpu_to_le32(HWRM_FUNC_CFG_INPUT_FLAGS_TRUSTED_VF_ENABLE);
209
210 BNXT_HWRM_LOCK(softc);
211 rc = _hwrm_send_message(softc, &req, sizeof(req));
212 BNXT_HWRM_UNLOCK(softc);
213 if (rc)
214 device_printf(softc->dev, "hwrm_func_cfg failed (error:%d)\n", rc);
215
216 return rc;
217 }
218
bnxt_approve_mac(struct bnxt_softc * sc)219 int bnxt_approve_mac(struct bnxt_softc *sc)
220 {
221
222 struct hwrm_func_vf_cfg_input req = (struct hwrm_func_vf_cfg_input){0};
223 struct bnxt_vf_info *vf = &sc->vf;
224 u8 *mac = vf->mac_addr;
225 int rc = 0;
226
227 if (!BNXT_VF(sc))
228 return EOPNOTSUPP;
229
230 bnxt_hwrm_cmd_hdr_init(sc, &req, HWRM_FUNC_VF_CFG);
231 req.enables = htole32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
232 memcpy(req.dflt_mac_addr, mac, ETHER_ADDR_LEN);
233
234 BNXT_HWRM_LOCK(sc);
235 rc = _hwrm_send_message(sc, &req, sizeof(req));
236 BNXT_HWRM_UNLOCK(sc);
237
238 if (rc) {
239 device_printf(sc->dev,
240 "VF MAC %02x:%02x:%02x:%02x:%02x:%02x not approved by PF (rc=%d)\n",
241 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], rc);
242 return EADDRNOTAVAIL;
243 }
244 return rc;
245 }
246
247 void
bnxt_update_vf_mac(struct bnxt_softc * sc)248 bnxt_update_vf_mac(struct bnxt_softc *sc)
249 {
250 int rc = 0;
251 struct hwrm_func_qcaps_input req = {0};
252 struct hwrm_func_qcaps_output *resp =
253 (void *)sc->hwrm_cmd_resp.idi_vaddr;
254 bool inform_pf = false;
255
256 bnxt_hwrm_cmd_hdr_init(sc, &req, HWRM_FUNC_QCAPS);
257 req.fid = htole16(0xffff);
258
259 BNXT_HWRM_LOCK(sc);
260 rc = _hwrm_send_message(sc, &req, sizeof(req));
261 if (rc)
262 goto update_vf_mac_exit;
263
264 if (!ether_addr_equal(resp->mac_address, sc->vf.mac_addr)) {
265 memcpy(sc->vf.mac_addr, resp->mac_address, ETHER_ADDR_LEN);
266 if (!is_valid_ether_addr(sc->vf.mac_addr))
267 inform_pf = true;
268 }
269
270 if (is_valid_ether_addr(sc->vf.mac_addr)) {
271 iflib_set_mac(sc->ctx, sc->vf.mac_addr);
272 memcpy(sc->func.mac_addr, sc->vf.mac_addr, ETHER_ADDR_LEN);
273 }
274
275 update_vf_mac_exit:
276 BNXT_HWRM_UNLOCK(sc);
277 if (inform_pf)
278 bnxt_approve_mac(sc);
279 }
280
281 static int
bnxt_hwrm_fwd_err_resp(struct bnxt_softc * softc,struct bnxt_vf_info * vf,u32 msg_size)282 bnxt_hwrm_fwd_err_resp(struct bnxt_softc *softc, struct bnxt_vf_info *vf,
283 u32 msg_size)
284 {
285 struct hwrm_reject_fwd_resp_input req;
286
287 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_REJECT_FWD_RESP);
288
289 if (msg_size > sizeof(req.encap_request))
290 msg_size = sizeof(req.encap_request);
291
292 req.target_id = cpu_to_le16(vf->fw_fid);
293 req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
294 memcpy(&req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
295
296 BNXT_HWRM_LOCK(softc);
297 int rc = _hwrm_send_message(softc, &req, sizeof(req));
298 BNXT_HWRM_UNLOCK(softc);
299 if (rc)
300 device_printf(softc->dev, "hwrm_fwd_err_resp failed (error=%d)\n", rc);
301
302 return rc;
303 }
304
305 static int
bnxt_hwrm_exec_fwd_resp(struct bnxt_softc * softc,struct bnxt_vf_info * vf,u32 msg_size)306 bnxt_hwrm_exec_fwd_resp(struct bnxt_softc *softc, struct bnxt_vf_info *vf,
307 u32 msg_size)
308 {
309 struct hwrm_exec_fwd_resp_input req;
310
311 if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size))
312 return bnxt_hwrm_fwd_err_resp(softc, vf, msg_size);
313
314 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_EXEC_FWD_RESP);
315
316 req.target_id = cpu_to_le16(vf->fw_fid);
317 req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
318 memcpy(&req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
319
320 BNXT_HWRM_LOCK(softc);
321 int rc = _hwrm_send_message(softc, &req, sizeof(req));
322 BNXT_HWRM_UNLOCK(softc);
323 if (rc)
324 device_printf(softc->dev, "hwrm_exec_fw_resp failed (error=%d)\n", rc);
325
326 return rc;
327 }
328
329 static int
bnxt_hwrm_func_qcfg_flags(struct bnxt_softc * softc,struct bnxt_vf_info * vf)330 bnxt_hwrm_func_qcfg_flags(struct bnxt_softc *softc, struct bnxt_vf_info *vf)
331 {
332 struct hwrm_func_qcfg_input req;
333 struct hwrm_func_qcfg_output *resp =
334 (void *)softc->hwrm_cmd_resp.idi_vaddr;
335
336 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_QCFG);
337
338 req.fid = cpu_to_le16(BNXT_PF(softc) ? vf->fw_fid : 0xffff);
339
340 BNXT_HWRM_LOCK(softc);
341 int rc = _hwrm_send_message(softc, &req, sizeof(req));
342 BNXT_HWRM_UNLOCK(softc);
343 if (!rc)
344 vf->func_qcfg_flags = cpu_to_le16(resp->flags);
345
346 return rc;
347 }
348
349 bool
bnxt_is_trusted_vf(struct bnxt_softc * softc,struct bnxt_vf_info * vf)350 bnxt_is_trusted_vf(struct bnxt_softc *softc, struct bnxt_vf_info *vf)
351 {
352 bnxt_hwrm_func_qcfg_flags(softc, vf);
353 return !!(vf->func_qcfg_flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF);
354 }
355
bnxt_promisc_ok(struct bnxt_softc * softc)356 bool bnxt_promisc_ok(struct bnxt_softc *softc)
357 {
358 if (BNXT_VF(softc) && !bnxt_is_trusted_vf(softc, &softc->vf))
359 return false;
360 return true;
361 }
362
363 static int
bnxt_hwrm_set_trusted_vf(struct bnxt_softc * softc,struct bnxt_vf_info * vf)364 bnxt_hwrm_set_trusted_vf(struct bnxt_softc *softc, struct bnxt_vf_info *vf)
365 {
366 struct hwrm_func_cfg_input req = {0};
367 int rc;
368
369 if (!(softc->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
370 return (EOPNOTSUPP);
371
372 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_CFG);
373
374 req.fid = htole16(vf->fw_fid);
375
376 if (vf->flags & BNXT_VF_TRUST)
377 req.flags = cpu_to_le32(HWRM_FUNC_CFG_INPUT_FLAGS_TRUSTED_VF_ENABLE);
378 else
379 req.flags = cpu_to_le32(HWRM_FUNC_CFG_INPUT_FLAGS_TRUSTED_VF_DISABLE);
380
381 BNXT_HWRM_LOCK(softc);
382 rc = _hwrm_send_message(softc, &req, sizeof(req));
383 BNXT_HWRM_UNLOCK(softc);
384 if (rc)
385 device_printf(softc->dev, "bnxt_hwrm_set_trusted_vf failed. rc:%d\n", rc);
386
387 return rc;
388 }
389
390 int
bnxt_set_vf_trust(struct bnxt_softc * softc,int vf_id,bool trusted)391 bnxt_set_vf_trust(struct bnxt_softc *softc, int vf_id, bool trusted)
392 {
393 int rc;
394 struct bnxt_vf_info *vf = NULL;
395
396 BNXT_SRIOV_LOCK(softc);
397 if (softc->pf.num_vfs == 0 || vf_id >= softc->pf.num_vfs) {
398 BNXT_SRIOV_UNLOCK(softc);
399 return (ENOENT);
400 }
401 vf = &softc->pf.vf[vf_id];
402
403 if (trusted)
404 vf->flags |= BNXT_VF_TRUST;
405 else
406 vf->flags &= ~BNXT_VF_TRUST;
407
408 BNXT_SRIOV_UNLOCK(softc);
409
410 rc = bnxt_hwrm_set_trusted_vf(softc, vf);
411 if (rc == 0) {
412 BNXT_SRIOV_LOCK(softc);
413 if (softc->pf.num_vfs != 0 && vf_id < softc->pf.num_vfs) {
414 vf = &softc->pf.vf[vf_id];
415 if (trusted)
416 vf->flags |= BNXT_VF_TRUST;
417 else
418 vf->flags &= ~BNXT_VF_TRUST;
419 }
420 BNXT_SRIOV_UNLOCK(softc);
421 }
422 return rc;
423 }
424
425 static int
bnxt_vf_configure_mac(struct bnxt_softc * softc,struct bnxt_vf_info * vf)426 bnxt_vf_configure_mac(struct bnxt_softc *softc, struct bnxt_vf_info *vf)
427 {
428 u32 msg_size = sizeof(struct hwrm_func_vf_cfg_input);
429 struct hwrm_func_vf_cfg_input *req =
430 (struct hwrm_func_vf_cfg_input *)vf->hwrm_cmd_req_addr;
431
432 /* Allow VF to set a valid MAC address, if trust is set to on or
433 * if the PF assigned MAC address is zero
434 */
435 if (req->enables &
436 cpu_to_le32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR)) {
437 bool trust = bnxt_is_trusted_vf(softc, vf);
438
439 if (is_valid_ether_addr(req->dflt_mac_addr) &&
440 (trust || !is_valid_ether_addr(vf->mac_addr) ||
441 ether_addr_equal(req->dflt_mac_addr, vf->mac_addr))) {
442 ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr);
443 return bnxt_hwrm_exec_fwd_resp(softc, vf, msg_size);
444 }
445 return bnxt_hwrm_fwd_err_resp(softc, vf, msg_size);
446 }
447 return bnxt_hwrm_exec_fwd_resp(softc, vf, msg_size);
448 }
449
bnxt_vf_validate_set_mac(struct bnxt_softc * softc,struct bnxt_vf_info * vf)450 static int bnxt_vf_validate_set_mac(struct bnxt_softc *softc, struct bnxt_vf_info *vf)
451 {
452 u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input);
453 struct hwrm_cfa_l2_filter_alloc_input *req =
454 (struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
455 bool mac_ok = false;
456
457 if (!is_valid_ether_addr((const u8 *)req->l2_addr))
458 return bnxt_hwrm_fwd_err_resp(softc, vf, msg_size);
459
460 /* Allow VF to set a valid MAC address, if trust is set to on.
461 * Or VF MAC address must first match MAC address in PF's context.
462 * Otherwise, it must match the VF MAC address if firmware spec >=
463 * 1.2.2
464 */
465 if (bnxt_is_trusted_vf(softc, vf)) {
466 mac_ok = true;
467 } else if (is_valid_ether_addr(vf->mac_addr)) {
468 if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
469 mac_ok = true;
470 } else if (is_valid_ether_addr(vf->vf_mac_addr)) {
471 if (ether_addr_equal((const u8 *)req->l2_addr, vf->vf_mac_addr))
472 mac_ok = true;
473 } else {
474 mac_ok = true;
475 }
476 if (mac_ok)
477 return bnxt_hwrm_exec_fwd_resp(softc, vf, msg_size);
478
479 return bnxt_hwrm_fwd_err_resp(softc, vf, msg_size);
480 }
481
bnxt_vf_req_validate_snd(struct bnxt_softc * softc,struct bnxt_vf_info * vf)482 static int bnxt_vf_req_validate_snd(struct bnxt_softc *softc, struct bnxt_vf_info *vf)
483 {
484 int rc = 0;
485 struct input *encap_req = vf->hwrm_cmd_req_addr;
486 u32 req_type = le16_to_cpu(encap_req->req_type);
487
488 switch (req_type) {
489 case HWRM_FUNC_VF_CFG:
490 rc = bnxt_vf_configure_mac(softc, vf);
491 break;
492 case HWRM_CFA_L2_FILTER_ALLOC:
493 rc = bnxt_vf_validate_set_mac(softc, vf);
494 break;
495 case HWRM_FUNC_CFG:
496 rc = bnxt_hwrm_exec_fwd_resp(
497 softc, vf, sizeof(struct hwrm_func_cfg_input));
498 break;
499 case HWRM_PORT_PHY_QCFG:
500 /* ckp todo: Disable set VF link command now, enable it later
501 * Auto neg works as of now.
502 * rc = bnxt_vf_set_link(softc, vf);
503 */
504 break;
505 default:
506 rc = bnxt_hwrm_fwd_err_resp(softc, vf, softc->hwrm_max_req_len);
507 break;
508 }
509 return rc;
510 }
511
bnxt_hwrm_exec_fwd_req(struct bnxt_softc * softc)512 void bnxt_hwrm_exec_fwd_req(struct bnxt_softc *softc)
513 {
514 u32 i = 0, active_vfs = softc->pf.active_vfs, vf_id;
515
516 /* Scan through VF's and process commands */
517 while (1) {
518 vf_id = find_next_bit(softc->pf.vf_event_bmap, active_vfs, i);
519 if (vf_id >= active_vfs)
520 break;
521
522 clear_bit(vf_id, softc->pf.vf_event_bmap);
523 bnxt_vf_req_validate_snd(softc, &softc->pf.vf[vf_id]);
524 i = vf_id + 1;
525 }
526 }
527
528 /* destroy VF sysctls when VFs are removed / PF detaches. */
529 void
bnxt_destroy_trusted_vf_sysctls(struct bnxt_softc * softc)530 bnxt_destroy_trusted_vf_sysctls(struct bnxt_softc *softc)
531 {
532 sysctl_ctx_free(&softc->pf.sysctl_ctx);
533 }
534
535 /* Handler for: dev.bnxt.<unit>.vf<N>.trusted (0/1) */
536 static int
bnxt_sysctl_vf_trusted(SYSCTL_HANDLER_ARGS)537 bnxt_sysctl_vf_trusted(SYSCTL_HANDLER_ARGS)
538 {
539 struct bnxt_softc *softc = (struct bnxt_softc *)arg1;
540 int vf_id = (int)arg2;
541 int val, rc;
542
543 BNXT_SRIOV_LOCK(softc);
544 if (softc->pf.num_vfs == 0 || vf_id < 0 || vf_id >= softc->pf.num_vfs) {
545 BNXT_SRIOV_UNLOCK(softc);
546 return (ENOENT);
547 }
548 val = (softc->pf.vf[vf_id].flags & BNXT_VF_TRUST) ? 1 : 0;
549 BNXT_SRIOV_UNLOCK(softc);
550
551 rc = sysctl_handle_int(oidp, &val, 0, req);
552 if (rc)
553 return rc;
554
555 /* If no new value supplied, it was a READ */
556 if (req->newptr == NULL)
557 return 0;
558
559 /* WRITE path: 'val' now holds the user's 0/1 */
560 rc = bnxt_set_vf_trust(softc, vf_id, (val != 0));
561
562 return rc;
563 }
564
565 /*
566 * Create per-VF sysctls:
567 * dev.bnxt.<unit>.vf0.trusted
568 * dev.bnxt.<unit>.vf1.trusted
569 * ..
570 */
571 int
bnxt_create_trusted_vf_sysctls(struct bnxt_softc * softc,uint16_t num_vfs)572 bnxt_create_trusted_vf_sysctls(struct bnxt_softc *softc, uint16_t num_vfs)
573 {
574 struct sysctl_oid_list *root_list;
575 struct sysctl_oid *vf_node;
576 char node_name[16];
577
578 /* use the device's sysctl tree as root: dev.bnxt.<unit>. */
579 sysctl_ctx_init(&softc->pf.sysctl_ctx);
580 root_list = SYSCTL_CHILDREN(device_get_sysctl_tree(softc->dev));
581
582 for (int i = 0; i < num_vfs; i++) {
583 snprintf(node_name, sizeof(node_name), "vf%d", i);
584
585 /* dev.bnxt.<unit>.vfN */
586 vf_node = SYSCTL_ADD_NODE(&softc->pf.sysctl_ctx,
587 root_list, OID_AUTO,
588 node_name, CTLFLAG_RW, 0, "VF node");
589
590 /* dev.bnxt.<unit>.vfN.trusted */
591 SYSCTL_ADD_PROC(&softc->pf.sysctl_ctx,
592 SYSCTL_CHILDREN(vf_node),
593 OID_AUTO, "trusted",
594 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
595 softc, i, bnxt_sysctl_vf_trusted, "I",
596 "0=untrusted (default), 1=trusted");
597 }
598 return 0;
599 }
600
601 static int
bnxt_hwrm_set_vf_spoofchk(struct bnxt_softc * sc,struct bnxt_vf_info * vf,bool enable)602 bnxt_hwrm_set_vf_spoofchk(struct bnxt_softc *sc, struct bnxt_vf_info *vf,
603 bool enable)
604 {
605 struct hwrm_func_cfg_input req = {0};
606 int rc = 0;
607
608 bnxt_hwrm_cmd_hdr_init(sc, &req, HWRM_FUNC_CFG);
609
610 req.fid = htole16(vf->fw_fid);
611 req.flags = htole32(enable ?
612 HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE :
613 HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE);
614
615 BNXT_HWRM_LOCK(sc);
616 rc = _hwrm_send_message(sc, &req, sizeof(req));
617 BNXT_HWRM_UNLOCK(sc);
618 if (rc)
619 device_printf(sc->dev, "bnxt_hwrm_set_vf_spoofchk failed. rc:%d\n", rc);
620
621 return rc;
622 }
623
624 int
bnxt_set_vf_spoofchk(struct bnxt_softc * sc,int vf_id,bool enable)625 bnxt_set_vf_spoofchk(struct bnxt_softc *sc, int vf_id, bool enable)
626 {
627 struct bnxt_vf_info *vf;
628 int rc;
629
630 BNXT_SRIOV_LOCK(sc);
631 if (sc->pf.num_vfs == 0 || vf_id >= sc->pf.num_vfs) {
632 BNXT_SRIOV_UNLOCK(sc);
633 return (ENOENT);
634 }
635 vf = &sc->pf.vf[vf_id];
636 BNXT_SRIOV_UNLOCK(sc);
637
638 rc = bnxt_hwrm_set_vf_spoofchk(sc, vf, enable);
639 if (rc == 0) {
640 BNXT_SRIOV_LOCK(sc);
641 if (sc->pf.num_vfs != 0 && vf_id < sc->pf.num_vfs) {
642 vf = &sc->pf.vf[vf_id];
643 if (enable)
644 vf->flags |= BNXT_VF_SPOOFCHK;
645 else
646 vf->flags &= ~BNXT_VF_SPOOFCHK;
647 }
648 BNXT_SRIOV_UNLOCK(sc);
649 }
650 return rc;
651 }
652
653 static int
bnxt_sysctl_vf_spoofchk(SYSCTL_HANDLER_ARGS)654 bnxt_sysctl_vf_spoofchk(SYSCTL_HANDLER_ARGS)
655 {
656 struct bnxt_softc *sc = (struct bnxt_softc *)arg1;
657 int vf_id = (int)arg2;
658 int val, rc;
659
660 BNXT_SRIOV_LOCK(sc);
661 if (sc->pf.num_vfs == 0 || vf_id >= sc->pf.num_vfs) {
662 BNXT_SRIOV_UNLOCK(sc);
663 return (ENOENT);
664 }
665 val = (sc->pf.vf[vf_id].flags & BNXT_VF_SPOOFCHK) ? 1 : 0;
666 BNXT_SRIOV_UNLOCK(sc);
667
668 rc = sysctl_handle_int(oidp, &val, 0, req);
669 if (rc || req->newptr == NULL)
670 return rc;
671
672 return bnxt_set_vf_spoofchk(sc, vf_id, val != 0);
673 }
674
675 /*
676 * Create per-VF spoofchk:
677 * dev.bnxt.<unit>.vf0.spoofchk
678 * dev.bnxt.<unit>.vf1.spoofchk
679 * ..
680 */
681 int
bnxt_create_spoofchk_vf_sysctls(struct bnxt_softc * softc,uint16_t num_vfs)682 bnxt_create_spoofchk_vf_sysctls(struct bnxt_softc *softc, uint16_t num_vfs)
683 {
684 struct sysctl_oid_list *root_list;
685 struct sysctl_oid *vf_node;
686 char node_name[16];
687
688 /* Reuse the same ctx & root tree as trusted vf */
689 root_list = SYSCTL_CHILDREN(device_get_sysctl_tree(softc->dev));
690
691 for (int i = 0; i < num_vfs; i++) {
692 snprintf(node_name, sizeof(node_name), "vf%d", i);
693
694 vf_node = SYSCTL_ADD_NODE(&softc->pf.sysctl_ctx,
695 root_list, OID_AUTO,
696 node_name, CTLFLAG_RW, 0, "VF node");
697
698 SYSCTL_ADD_PROC(&softc->pf.sysctl_ctx,
699 SYSCTL_CHILDREN(vf_node),
700 OID_AUTO, "spoofchk",
701 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
702 softc, i, bnxt_sysctl_vf_spoofchk, "I",
703 "0=spoofchk off, 1=spoofchk on");
704 }
705 return 0;
706 }
707
708 static int
bnxt_hwrm_func_vf_resc_cfg(struct bnxt_softc * softc,int num_vfs,bool reset)709 bnxt_hwrm_func_vf_resc_cfg(struct bnxt_softc *softc, int num_vfs, bool reset)
710 {
711 struct hwrm_func_vf_resource_cfg_input req = {0};
712 struct bnxt_pf_info *pf = &softc->pf;
713 struct bnxt_func_qcfg *fn_qcfg = &softc->fn_qcfg;
714 struct bnxt_hw_resc *hw_resc = &softc->hw_resc;
715 int i, rc;
716 uint16_t msix_val = 0;
717
718 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_VF_RESOURCE_CFG);
719 struct bnxt_resc_map resc_table[] = {
720 { &req.min_tx_rings, &req.max_tx_rings, hw_resc->max_tx_rings, fn_qcfg->alloc_tx_rings },
721 { &req.min_rx_rings, &req.max_rx_rings, hw_resc->max_rx_rings, fn_qcfg->alloc_rx_rings },
722 { &req.min_cmpl_rings, &req.max_cmpl_rings, hw_resc->max_cp_rings, fn_qcfg->alloc_completion_rings },
723 { &req.min_stat_ctx, &req.max_stat_ctx, hw_resc->max_stat_ctxs, fn_qcfg->alloc_stat_ctx },
724 { &req.min_vnics, &req.max_vnics, hw_resc->max_vnics, fn_qcfg->alloc_vnics },
725 { &req.min_hw_ring_grps, &req.max_hw_ring_grps, hw_resc->max_hw_ring_grps, fn_qcfg->alloc_hw_ring_grps },
726 { &req.min_rsscos_ctx, &req.max_rsscos_ctx, hw_resc->max_rsscos_ctxs, fn_qcfg->alloc_rss_ctx },
727 { &req.min_l2_ctxs, &req.max_l2_ctxs, hw_resc->max_l2_ctxs, fn_qcfg->alloc_l2_ctx },
728 };
729
730 for (i = 0; i < sizeof(resc_table) / sizeof(resc_table[0]); i++) {
731 rc = bnxt_set_vf_resc_field(resc_table[i].min_field,
732 resc_table[i].max_field,
733 resc_table[i].hw_max,
734 resc_table[i].pf_alloc,
735 num_vfs);
736 if (rc)
737 return rc;
738 }
739
740 if (hw_resc->max_irqs > fn_qcfg->alloc_msix && num_vfs > 0)
741 msix_val = (hw_resc->max_irqs - fn_qcfg->alloc_msix) / num_vfs;
742
743 req.max_msix = cpu_to_le16(msix_val);
744
745 for (i = 0; i < num_vfs; i++) {
746 struct bnxt_vf_info *vf = &pf->vf[i];
747
748 vf->fw_fid = pf->first_vf_id + i;
749 if (reset) {
750 rc = bnxt_set_vf_params(softc, i);
751 if (rc)
752 break;
753 }
754
755 req.vf_id = cpu_to_le16(vf->fw_fid);
756
757 BNXT_HWRM_LOCK(softc);
758 rc = _hwrm_send_message(softc, &req, sizeof(req));
759 BNXT_HWRM_UNLOCK(softc);
760
761 if (rc) {
762 device_printf(softc->dev, "HWRM_FUNC_VF_RESOURCE_CFG req dump:\n");
763 break;
764 }
765
766 pf->active_vfs = i + 1;
767
768 vf->min_tx_rings = le16_to_cpu(req.min_tx_rings);
769 vf->min_rx_rings = le16_to_cpu(req.min_rx_rings);
770 vf->min_cp_rings = le16_to_cpu(req.min_cmpl_rings);
771 vf->min_stat_ctxs = le16_to_cpu(req.min_stat_ctx);
772 vf->min_ring_grps = le16_to_cpu(req.min_hw_ring_grps);
773 vf->min_vnics = le16_to_cpu(req.min_vnics);
774 }
775
776 if (pf->active_vfs)
777 memcpy(&softc->vf_resc_cfg_input, &req,
778 sizeof(struct hwrm_func_vf_resource_cfg_input));
779
780 return rc;
781 }
782
783 static int
bnxt_hwrm_func_buf_rgtr(struct bnxt_softc * softc)784 bnxt_hwrm_func_buf_rgtr(struct bnxt_softc *softc)
785 {
786 int rc;
787 struct hwrm_func_buf_rgtr_input req;
788
789 bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_BUF_RGTR);
790
791 req.req_buf_num_pages = cpu_to_le16(softc->pf.hwrm_cmd_req_pages);
792 req.req_buf_page_size = cpu_to_le16(softc->pf.vf_hwrm_cmd_req_page_shift);
793 req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE);
794 req.req_buf_page_addr0 = cpu_to_le64(softc->pf.hwrm_cmd_req_dma_addr[0]);
795 req.req_buf_page_addr1 = cpu_to_le64(softc->pf.hwrm_cmd_req_dma_addr[1]);
796 req.req_buf_page_addr2 = cpu_to_le64(softc->pf.hwrm_cmd_req_dma_addr[2]);
797 req.req_buf_page_addr3 = cpu_to_le64(softc->pf.hwrm_cmd_req_dma_addr[3]);
798
799 BNXT_HWRM_LOCK(softc);
800 rc = _hwrm_send_message(softc, &req, sizeof(req));
801 BNXT_HWRM_UNLOCK(softc);
802
803 return rc;
804 }
805
806 static void
bnxt_set_vf_attr(struct bnxt_softc * softc,int num_vfs)807 bnxt_set_vf_attr(struct bnxt_softc *softc, int num_vfs)
808 {
809 int i;
810 struct bnxt_vf_info *vf;
811
812 for (i = 0; i < num_vfs; i++) {
813 vf = &softc->pf.vf[i];
814 memset(vf, 0, sizeof(*vf));
815 }
816 }
817
818 static int
bnxt_alloc_vf_resources(struct bnxt_softc * softc,int num_vfs)819 bnxt_alloc_vf_resources(struct bnxt_softc *softc, int num_vfs)
820 {
821 struct pci_dev *pdev = softc->pdev;
822 u32 nr_pages, size, i, j, k = 0;
823 u32 page_size, reqs_per_page;
824 void *p;
825
826 p = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL);
827 if (!p)
828 return ENOMEM;
829
830 rcu_assign_pointer(softc->pf.vf, p);
831 bnxt_set_vf_attr(softc, num_vfs);
832
833 size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE;
834 page_size = BNXT_PAGE_SIZE;
835 softc->pf.vf_hwrm_cmd_req_page_shift = BNXT_PAGE_SHIFT;
836 while (size > page_size * BNXT_MAX_VF_CMD_FWD_PAGES) {
837 page_size *= 2;
838 softc->pf.vf_hwrm_cmd_req_page_shift++;
839 }
840 nr_pages = DIV_ROUND_UP(size, page_size);
841 reqs_per_page = page_size / BNXT_HWRM_REQ_MAX_SIZE;
842
843 for (i = 0; i < nr_pages; i++) {
844 softc->pf.hwrm_cmd_req_addr[i] =
845 dma_alloc_coherent(&pdev->dev, page_size,
846 &softc->pf.hwrm_cmd_req_dma_addr[i],
847 GFP_ATOMIC);
848
849 if (!softc->pf.hwrm_cmd_req_addr[i])
850 return ENOMEM;
851
852 for (j = 0; j < reqs_per_page && k < num_vfs; j++) {
853 struct bnxt_vf_info *vf = &softc->pf.vf[k];
854
855 vf->hwrm_cmd_req_addr = (char *)softc->pf.hwrm_cmd_req_addr[i] +
856 j * BNXT_HWRM_REQ_MAX_SIZE;
857 vf->hwrm_cmd_req_dma_addr =
858 softc->pf.hwrm_cmd_req_dma_addr[i] + j *
859 BNXT_HWRM_REQ_MAX_SIZE;
860 k++;
861 }
862 }
863
864 softc->pf.vf_event_bmap = kzalloc(ALIGN(DIV_ROUND_UP(num_vfs, 8),
865 sizeof(long)), GFP_ATOMIC);
866 if (!softc->pf.vf_event_bmap)
867 return ENOMEM;
868
869 softc->pf.hwrm_cmd_req_pages = nr_pages;
870
871 return 0;
872 }
873
bnxt_cfg_hw_sriov(struct bnxt_softc * softc,uint16_t * num_vfs,bool reset)874 int bnxt_cfg_hw_sriov(struct bnxt_softc *softc, uint16_t *num_vfs, bool reset)
875 {
876 int rc;
877
878 rc = bnxt_hwrm_func_buf_rgtr(softc);
879 if (rc) {
880 device_printf(softc->dev, "hwrm func buf rgtr failed (error=%d)\n", rc);
881 return (EIO);
882 }
883
884 rc = bnxt_hwrm_func_vf_resc_cfg(softc, *num_vfs, reset);
885 if (rc) {
886 device_printf(softc->dev, "hwrm func VF resc config failed (error=%d)\n", rc);
887 return (EIO);
888 }
889
890 return (0);
891 }
892
893 int
bnxt_iov_init(if_ctx_t ctx,uint16_t num_vfs,const nvlist_t * params)894 bnxt_iov_init(if_ctx_t ctx, uint16_t num_vfs, const nvlist_t *params)
895 {
896 int rc;
897 if_t ifp = iflib_get_ifp(ctx);
898 struct bnxt_softc *softc = iflib_get_softc(ctx);
899 bool admin_up = !!(if_getflags(ifp) & IFF_UP);
900 bool running = !!(if_getdrvflags(ifp) & IFF_DRV_RUNNING);
901
902 if (!admin_up || !running) {
903 device_printf(softc->dev, "PF is down, rejecting VF creation\n");
904 return ENETDOWN;
905 }
906
907 if (num_vfs > BNXT_MAX_VFS) {
908 device_printf(softc->dev, "Requested %u VFs exceeds maximum supported (%u)\n",
909 num_vfs, BNXT_MAX_VFS);
910 return ERANGE;
911 }
912
913 /*
914 * Initialize SR-IOV lock before creating any SR-IOV state, so sysctl/VF
915 * paths can safely synchronize and error paths can always destroy it.
916 */
917 BNXT_SRIOV_LOCK_INIT(softc, device_get_nameunit(softc->dev));
918
919 rc = bnxt_alloc_vf_resources(softc, num_vfs);
920 if (rc) {
921 device_printf(softc->dev, "VF resource alloc failed (error=%d)\n", rc);
922 goto fail_lock;
923 }
924
925 rc = bnxt_cfg_hw_sriov(softc, &num_vfs, false);
926 if (rc)
927 goto fail_free_vf_resc;
928
929 rc = bnxt_create_trusted_vf_sysctls(softc, num_vfs);
930 if (rc) {
931 device_printf(softc->dev, "trusted VF sysctl creation failed (error=%d)\n", rc);
932 goto fail_free_hwrm_vf_resc;
933 }
934
935 rc = bnxt_create_spoofchk_vf_sysctls(softc, num_vfs);
936 if (rc) {
937 device_printf(softc->dev, "spoof check VF sysctl creation failed (error=%d)\n", rc);
938 goto fail_free_hwrm_vf_resc;
939 }
940
941 BNXT_SRIOV_LOCK(softc);
942 softc->pf.num_vfs = num_vfs;
943 BNXT_SRIOV_UNLOCK(softc);
944
945 return 0;
946
947 fail_free_hwrm_vf_resc:
948 bnxt_hwrm_func_vf_resource_free(softc, num_vfs);
949 fail_free_vf_resc:
950 bnxt_free_vf_resources(softc);
951 fail_lock:
952 BNXT_SRIOV_LOCK_DESTROY(softc);
953 return rc;
954 }
955
bnxt_sriov_attach(struct bnxt_softc * softc)956 void bnxt_sriov_attach(struct bnxt_softc *softc)
957 {
958 int rc;
959 device_t dev = softc->dev;
960 nvlist_t *pf_schema, *vf_schema;
961
962 pf_schema = pci_iov_schema_alloc_node();
963 vf_schema = pci_iov_schema_alloc_node();
964
965 /* Optionally add VF-specific attributes to the VF schema */
966 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
967 pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof", IOV_SCHEMA_HASDEFAULT, FALSE);
968 pci_iov_schema_add_bool(vf_schema, "trust", IOV_SCHEMA_HASDEFAULT, FALSE);
969
970 /* Attach SR-IOV schemas to the device */
971 rc = pci_iov_attach(dev, pf_schema, vf_schema);
972 if (rc)
973 device_printf(dev, "Failed to initialize SR-IOV (error=%d)\n", rc);
974 }
975
bnxt_reenable_sriov(struct bnxt_softc * bp)976 void bnxt_reenable_sriov(struct bnxt_softc *bp)
977 {
978 if (BNXT_PF(bp)) {
979 struct bnxt_pf_info *pf = &bp->pf;
980 uint16_t n = pf->active_vfs;
981
982 if (n)
983 bnxt_cfg_hw_sriov(bp, &n, true);
984 }
985 }
986