1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3
4 #include <linux/net/intel/libie/rx.h>
5
6 #include "iavf.h"
7 #include "iavf_ptp.h"
8 #include "iavf_prototype.h"
9
10 /**
11 * iavf_send_pf_msg
12 * @adapter: adapter structure
13 * @op: virtual channel opcode
14 * @msg: pointer to message buffer
15 * @len: message length
16 *
17 * Send message to PF and print status if failure.
18 **/
iavf_send_pf_msg(struct iavf_adapter * adapter,enum virtchnl_ops op,u8 * msg,u16 len)19 static int iavf_send_pf_msg(struct iavf_adapter *adapter,
20 enum virtchnl_ops op, u8 *msg, u16 len)
21 {
22 struct iavf_hw *hw = &adapter->hw;
23 enum iavf_status status;
24
25 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
26 return 0; /* nothing to see here, move along */
27
28 status = iavf_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
29 if (status)
30 dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, status %s, aq_err %s\n",
31 op, iavf_stat_str(hw, status),
32 iavf_aq_str(hw, hw->aq.asq_last_status));
33 return iavf_status_to_errno(status);
34 }
35
36 /**
37 * iavf_send_api_ver
38 * @adapter: adapter structure
39 *
40 * Send API version admin queue message to the PF. The reply is not checked
41 * in this function. Returns 0 if the message was successfully
42 * sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not.
43 **/
iavf_send_api_ver(struct iavf_adapter * adapter)44 int iavf_send_api_ver(struct iavf_adapter *adapter)
45 {
46 struct virtchnl_version_info vvi;
47
48 vvi.major = VIRTCHNL_VERSION_MAJOR;
49 vvi.minor = VIRTCHNL_VERSION_MINOR;
50
51 return iavf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi,
52 sizeof(vvi));
53 }
54
55 /**
56 * iavf_poll_virtchnl_msg
57 * @hw: HW configuration structure
58 * @event: event to populate on success
59 * @op_to_poll: requested virtchnl op to poll for
60 *
61 * Initialize poll for virtchnl msg matching the requested_op. Returns 0
62 * if a message of the correct opcode is in the queue or an error code
63 * if no message matching the op code is waiting and other failures.
64 */
65 static int
iavf_poll_virtchnl_msg(struct iavf_hw * hw,struct iavf_arq_event_info * event,enum virtchnl_ops op_to_poll)66 iavf_poll_virtchnl_msg(struct iavf_hw *hw, struct iavf_arq_event_info *event,
67 enum virtchnl_ops op_to_poll)
68 {
69 enum virtchnl_ops received_op;
70 enum iavf_status status;
71 u32 v_retval;
72
73 while (1) {
74 /* When the AQ is empty, iavf_clean_arq_element will return
75 * nonzero and this loop will terminate.
76 */
77 status = iavf_clean_arq_element(hw, event, NULL);
78 if (status != IAVF_SUCCESS)
79 return iavf_status_to_errno(status);
80 received_op =
81 (enum virtchnl_ops)le32_to_cpu(event->desc.cookie_high);
82 if (op_to_poll == received_op)
83 break;
84 }
85
86 v_retval = le32_to_cpu(event->desc.cookie_low);
87 return virtchnl_status_to_errno((enum virtchnl_status_code)v_retval);
88 }
89
90 /**
91 * iavf_verify_api_ver
92 * @adapter: adapter structure
93 *
94 * Compare API versions with the PF. Must be called after admin queue is
95 * initialized. Returns 0 if API versions match, -EIO if they do not,
96 * IAVF_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors
97 * from the firmware are propagated.
98 **/
iavf_verify_api_ver(struct iavf_adapter * adapter)99 int iavf_verify_api_ver(struct iavf_adapter *adapter)
100 {
101 struct iavf_arq_event_info event;
102 int err;
103
104 event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
105 event.msg_buf = kzalloc(IAVF_MAX_AQ_BUF_SIZE, GFP_KERNEL);
106 if (!event.msg_buf)
107 return -ENOMEM;
108
109 err = iavf_poll_virtchnl_msg(&adapter->hw, &event, VIRTCHNL_OP_VERSION);
110 if (!err) {
111 struct virtchnl_version_info *pf_vvi =
112 (struct virtchnl_version_info *)event.msg_buf;
113 adapter->pf_version = *pf_vvi;
114
115 if (pf_vvi->major > VIRTCHNL_VERSION_MAJOR ||
116 (pf_vvi->major == VIRTCHNL_VERSION_MAJOR &&
117 pf_vvi->minor > VIRTCHNL_VERSION_MINOR))
118 err = -EIO;
119 }
120
121 kfree(event.msg_buf);
122
123 return err;
124 }
125
126 /**
127 * iavf_send_vf_config_msg
128 * @adapter: adapter structure
129 *
130 * Send VF configuration request admin queue message to the PF. The reply
131 * is not checked in this function. Returns 0 if the message was
132 * successfully sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not.
133 **/
iavf_send_vf_config_msg(struct iavf_adapter * adapter)134 int iavf_send_vf_config_msg(struct iavf_adapter *adapter)
135 {
136 u32 caps;
137
138 caps = VIRTCHNL_VF_OFFLOAD_L2 |
139 VIRTCHNL_VF_OFFLOAD_RSS_PF |
140 VIRTCHNL_VF_OFFLOAD_RSS_AQ |
141 VIRTCHNL_VF_OFFLOAD_RSS_REG |
142 VIRTCHNL_VF_OFFLOAD_VLAN |
143 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR |
144 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 |
145 VIRTCHNL_VF_OFFLOAD_ENCAP |
146 VIRTCHNL_VF_OFFLOAD_TC_U32 |
147 VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
148 VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
149 VIRTCHNL_VF_OFFLOAD_CRC |
150 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM |
151 VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
152 VIRTCHNL_VF_CAP_PTP |
153 VIRTCHNL_VF_OFFLOAD_ADQ |
154 VIRTCHNL_VF_OFFLOAD_USO |
155 VIRTCHNL_VF_OFFLOAD_FDIR_PF |
156 VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF |
157 VIRTCHNL_VF_CAP_ADV_LINK_SPEED |
158 VIRTCHNL_VF_OFFLOAD_QOS;
159
160 adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES;
161 adapter->aq_required &= ~IAVF_FLAG_AQ_GET_CONFIG;
162 if (PF_IS_V11(adapter))
163 return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES,
164 (u8 *)&caps, sizeof(caps));
165 else
166 return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES,
167 NULL, 0);
168 }
169
iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter * adapter)170 int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter)
171 {
172 adapter->aq_required &= ~IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS;
173
174 if (!VLAN_V2_ALLOWED(adapter))
175 return -EOPNOTSUPP;
176
177 adapter->current_op = VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS;
178
179 return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS,
180 NULL, 0);
181 }
182
iavf_send_vf_supported_rxdids_msg(struct iavf_adapter * adapter)183 int iavf_send_vf_supported_rxdids_msg(struct iavf_adapter *adapter)
184 {
185 adapter->aq_required &= ~IAVF_FLAG_AQ_GET_SUPPORTED_RXDIDS;
186
187 if (!IAVF_RXDID_ALLOWED(adapter))
188 return -EOPNOTSUPP;
189
190 adapter->current_op = VIRTCHNL_OP_GET_SUPPORTED_RXDIDS;
191
192 return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
193 NULL, 0);
194 }
195
196 /**
197 * iavf_send_vf_ptp_caps_msg - Send request for PTP capabilities
198 * @adapter: private adapter structure
199 *
200 * Send the VIRTCHNL_OP_1588_PTP_GET_CAPS command to the PF to request the PTP
201 * capabilities available to this device. This includes the following
202 * potential access:
203 *
204 * * READ_PHC - access to read the PTP hardware clock time
205 * * RX_TSTAMP - access to request Rx timestamps on all received packets
206 *
207 * The PF will reply with the same opcode a filled out copy of the
208 * virtchnl_ptp_caps structure which defines the specifics of which features
209 * are accessible to this device.
210 *
211 * Return: 0 if success, error code otherwise.
212 */
iavf_send_vf_ptp_caps_msg(struct iavf_adapter * adapter)213 int iavf_send_vf_ptp_caps_msg(struct iavf_adapter *adapter)
214 {
215 struct virtchnl_ptp_caps hw_caps = {
216 .caps = VIRTCHNL_1588_PTP_CAP_READ_PHC |
217 VIRTCHNL_1588_PTP_CAP_RX_TSTAMP
218 };
219
220 adapter->aq_required &= ~IAVF_FLAG_AQ_GET_PTP_CAPS;
221
222 if (!IAVF_PTP_ALLOWED(adapter))
223 return -EOPNOTSUPP;
224
225 adapter->current_op = VIRTCHNL_OP_1588_PTP_GET_CAPS;
226
227 return iavf_send_pf_msg(adapter, VIRTCHNL_OP_1588_PTP_GET_CAPS,
228 (u8 *)&hw_caps, sizeof(hw_caps));
229 }
230
231 /**
232 * iavf_validate_num_queues
233 * @adapter: adapter structure
234 *
235 * Validate that the number of queues the PF has sent in
236 * VIRTCHNL_OP_GET_VF_RESOURCES is not larger than the VF can handle.
237 **/
iavf_validate_num_queues(struct iavf_adapter * adapter)238 static void iavf_validate_num_queues(struct iavf_adapter *adapter)
239 {
240 if (adapter->vf_res->num_queue_pairs > IAVF_MAX_REQ_QUEUES) {
241 struct virtchnl_vsi_resource *vsi_res;
242 int i;
243
244 dev_info(&adapter->pdev->dev, "Received %d queues, but can only have a max of %d\n",
245 adapter->vf_res->num_queue_pairs,
246 IAVF_MAX_REQ_QUEUES);
247 dev_info(&adapter->pdev->dev, "Fixing by reducing queues to %d\n",
248 IAVF_MAX_REQ_QUEUES);
249 adapter->vf_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES;
250 for (i = 0; i < adapter->vf_res->num_vsis; i++) {
251 vsi_res = &adapter->vf_res->vsi_res[i];
252 vsi_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES;
253 }
254 }
255 }
256
257 /**
258 * iavf_get_vf_config
259 * @adapter: private adapter structure
260 *
261 * Get VF configuration from PF and populate hw structure. Must be called after
262 * admin queue is initialized. Busy waits until response is received from PF,
263 * with maximum timeout. Response from PF is returned in the buffer for further
264 * processing by the caller.
265 **/
iavf_get_vf_config(struct iavf_adapter * adapter)266 int iavf_get_vf_config(struct iavf_adapter *adapter)
267 {
268 struct iavf_hw *hw = &adapter->hw;
269 struct iavf_arq_event_info event;
270 u16 len;
271 int err;
272
273 len = IAVF_VIRTCHNL_VF_RESOURCE_SIZE;
274 event.buf_len = len;
275 event.msg_buf = kzalloc(len, GFP_KERNEL);
276 if (!event.msg_buf)
277 return -ENOMEM;
278
279 err = iavf_poll_virtchnl_msg(hw, &event, VIRTCHNL_OP_GET_VF_RESOURCES);
280 memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len));
281
282 /* some PFs send more queues than we should have so validate that
283 * we aren't getting too many queues
284 */
285 if (!err)
286 iavf_validate_num_queues(adapter);
287 iavf_vf_parse_hw_config(hw, adapter->vf_res);
288
289 kfree(event.msg_buf);
290
291 return err;
292 }
293
iavf_get_vf_vlan_v2_caps(struct iavf_adapter * adapter)294 int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter)
295 {
296 struct iavf_arq_event_info event;
297 int err;
298 u16 len;
299
300 len = sizeof(struct virtchnl_vlan_caps);
301 event.buf_len = len;
302 event.msg_buf = kzalloc(len, GFP_KERNEL);
303 if (!event.msg_buf)
304 return -ENOMEM;
305
306 err = iavf_poll_virtchnl_msg(&adapter->hw, &event,
307 VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS);
308 if (!err)
309 memcpy(&adapter->vlan_v2_caps, event.msg_buf,
310 min(event.msg_len, len));
311
312 kfree(event.msg_buf);
313
314 return err;
315 }
316
iavf_get_vf_supported_rxdids(struct iavf_adapter * adapter)317 int iavf_get_vf_supported_rxdids(struct iavf_adapter *adapter)
318 {
319 struct iavf_arq_event_info event;
320 u64 rxdids;
321 int err;
322
323 event.msg_buf = (u8 *)&rxdids;
324 event.buf_len = sizeof(rxdids);
325
326 err = iavf_poll_virtchnl_msg(&adapter->hw, &event,
327 VIRTCHNL_OP_GET_SUPPORTED_RXDIDS);
328 if (!err)
329 adapter->supp_rxdids = rxdids;
330
331 return err;
332 }
333
iavf_get_vf_ptp_caps(struct iavf_adapter * adapter)334 int iavf_get_vf_ptp_caps(struct iavf_adapter *adapter)
335 {
336 struct virtchnl_ptp_caps caps = {};
337 struct iavf_arq_event_info event;
338 int err;
339
340 event.msg_buf = (u8 *)∩︀
341 event.buf_len = sizeof(caps);
342
343 err = iavf_poll_virtchnl_msg(&adapter->hw, &event,
344 VIRTCHNL_OP_1588_PTP_GET_CAPS);
345 if (!err)
346 adapter->ptp.hw_caps = caps;
347
348 return err;
349 }
350
351 /**
352 * iavf_configure_queues
353 * @adapter: adapter structure
354 *
355 * Request that the PF set up our (previously allocated) queues.
356 **/
iavf_configure_queues(struct iavf_adapter * adapter)357 void iavf_configure_queues(struct iavf_adapter *adapter)
358 {
359 struct virtchnl_vsi_queue_config_info *vqci;
360 int pairs = adapter->num_active_queues;
361 struct virtchnl_queue_pair_info *vqpi;
362 u32 i, max_frame;
363 u8 rx_flags = 0;
364 size_t len;
365
366 max_frame = LIBIE_MAX_RX_FRM_LEN(adapter->rx_rings->pp->p.offset);
367 max_frame = min_not_zero(adapter->vf_res->max_mtu, max_frame);
368
369 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
370 /* bail because we already have a command pending */
371 dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",
372 adapter->current_op);
373 return;
374 }
375 adapter->current_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
376 len = virtchnl_struct_size(vqci, qpair, pairs);
377 vqci = kzalloc(len, GFP_KERNEL);
378 if (!vqci)
379 return;
380
381 if (iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_RX_TSTAMP))
382 rx_flags |= VIRTCHNL_PTP_RX_TSTAMP;
383
384 vqci->vsi_id = adapter->vsi_res->vsi_id;
385 vqci->num_queue_pairs = pairs;
386 vqpi = vqci->qpair;
387 /* Size check is not needed here - HW max is 16 queue pairs, and we
388 * can fit info for 31 of them into the AQ buffer before it overflows.
389 */
390 for (i = 0; i < pairs; i++) {
391 vqpi->txq.vsi_id = vqci->vsi_id;
392 vqpi->txq.queue_id = i;
393 vqpi->txq.ring_len = adapter->tx_rings[i].count;
394 vqpi->txq.dma_ring_addr = adapter->tx_rings[i].dma;
395 vqpi->rxq.vsi_id = vqci->vsi_id;
396 vqpi->rxq.queue_id = i;
397 vqpi->rxq.ring_len = adapter->rx_rings[i].count;
398 vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma;
399 vqpi->rxq.max_pkt_size = max_frame;
400 vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len;
401 if (IAVF_RXDID_ALLOWED(adapter))
402 vqpi->rxq.rxdid = adapter->rxdid;
403 if (CRC_OFFLOAD_ALLOWED(adapter))
404 vqpi->rxq.crc_disable = !!(adapter->netdev->features &
405 NETIF_F_RXFCS);
406 vqpi->rxq.flags = rx_flags;
407 vqpi++;
408 }
409
410 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_QUEUES;
411 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
412 (u8 *)vqci, len);
413 kfree(vqci);
414 }
415
416 /**
417 * iavf_enable_queues
418 * @adapter: adapter structure
419 *
420 * Request that the PF enable all of our queues.
421 **/
iavf_enable_queues(struct iavf_adapter * adapter)422 void iavf_enable_queues(struct iavf_adapter *adapter)
423 {
424 struct virtchnl_queue_select vqs;
425
426 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
427 /* bail because we already have a command pending */
428 dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n",
429 adapter->current_op);
430 return;
431 }
432 adapter->current_op = VIRTCHNL_OP_ENABLE_QUEUES;
433 vqs.vsi_id = adapter->vsi_res->vsi_id;
434 vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
435 vqs.rx_queues = vqs.tx_queues;
436 adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_QUEUES;
437 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES,
438 (u8 *)&vqs, sizeof(vqs));
439 }
440
441 /**
442 * iavf_disable_queues
443 * @adapter: adapter structure
444 *
445 * Request that the PF disable all of our queues.
446 **/
iavf_disable_queues(struct iavf_adapter * adapter)447 void iavf_disable_queues(struct iavf_adapter *adapter)
448 {
449 struct virtchnl_queue_select vqs;
450
451 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
452 /* bail because we already have a command pending */
453 dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n",
454 adapter->current_op);
455 return;
456 }
457 adapter->current_op = VIRTCHNL_OP_DISABLE_QUEUES;
458 vqs.vsi_id = adapter->vsi_res->vsi_id;
459 vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
460 vqs.rx_queues = vqs.tx_queues;
461 adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_QUEUES;
462 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_QUEUES,
463 (u8 *)&vqs, sizeof(vqs));
464 }
465
466 /**
467 * iavf_map_queues
468 * @adapter: adapter structure
469 *
470 * Request that the PF map queues to interrupt vectors. Misc causes, including
471 * admin queue, are always mapped to vector 0.
472 **/
iavf_map_queues(struct iavf_adapter * adapter)473 void iavf_map_queues(struct iavf_adapter *adapter)
474 {
475 struct virtchnl_irq_map_info *vimi;
476 struct virtchnl_vector_map *vecmap;
477 struct iavf_q_vector *q_vector;
478 int v_idx, q_vectors;
479 size_t len;
480
481 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
482 /* bail because we already have a command pending */
483 dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n",
484 adapter->current_op);
485 return;
486 }
487 adapter->current_op = VIRTCHNL_OP_CONFIG_IRQ_MAP;
488
489 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
490
491 len = virtchnl_struct_size(vimi, vecmap, adapter->num_msix_vectors);
492 vimi = kzalloc(len, GFP_KERNEL);
493 if (!vimi)
494 return;
495
496 vimi->num_vectors = adapter->num_msix_vectors;
497 /* Queue vectors first */
498 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
499 q_vector = &adapter->q_vectors[v_idx];
500 vecmap = &vimi->vecmap[v_idx];
501
502 vecmap->vsi_id = adapter->vsi_res->vsi_id;
503 vecmap->vector_id = v_idx + NONQ_VECS;
504 vecmap->txq_map = q_vector->ring_mask;
505 vecmap->rxq_map = q_vector->ring_mask;
506 vecmap->rxitr_idx = IAVF_RX_ITR;
507 vecmap->txitr_idx = IAVF_TX_ITR;
508 }
509 /* Misc vector last - this is only for AdminQ messages */
510 vecmap = &vimi->vecmap[v_idx];
511 vecmap->vsi_id = adapter->vsi_res->vsi_id;
512 vecmap->vector_id = 0;
513 vecmap->txq_map = 0;
514 vecmap->rxq_map = 0;
515
516 adapter->aq_required &= ~IAVF_FLAG_AQ_MAP_VECTORS;
517 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP,
518 (u8 *)vimi, len);
519 kfree(vimi);
520 }
521
522 /**
523 * iavf_set_mac_addr_type - Set the correct request type from the filter type
524 * @virtchnl_ether_addr: pointer to requested list element
525 * @filter: pointer to requested filter
526 **/
527 static void
iavf_set_mac_addr_type(struct virtchnl_ether_addr * virtchnl_ether_addr,const struct iavf_mac_filter * filter)528 iavf_set_mac_addr_type(struct virtchnl_ether_addr *virtchnl_ether_addr,
529 const struct iavf_mac_filter *filter)
530 {
531 virtchnl_ether_addr->type = filter->is_primary ?
532 VIRTCHNL_ETHER_ADDR_PRIMARY :
533 VIRTCHNL_ETHER_ADDR_EXTRA;
534 }
535
536 /**
537 * iavf_add_ether_addrs
538 * @adapter: adapter structure
539 *
540 * Request that the PF add one or more addresses to our filters.
541 **/
iavf_add_ether_addrs(struct iavf_adapter * adapter)542 void iavf_add_ether_addrs(struct iavf_adapter *adapter)
543 {
544 struct virtchnl_ether_addr_list *veal;
545 struct iavf_mac_filter *f;
546 int i = 0, count = 0;
547 bool more = false;
548 size_t len;
549
550 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
551 /* bail because we already have a command pending */
552 dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n",
553 adapter->current_op);
554 return;
555 }
556
557 spin_lock_bh(&adapter->mac_vlan_list_lock);
558
559 list_for_each_entry(f, &adapter->mac_filter_list, list) {
560 if (f->add)
561 count++;
562 }
563 if (!count) {
564 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER;
565 spin_unlock_bh(&adapter->mac_vlan_list_lock);
566 return;
567 }
568 adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR;
569
570 len = virtchnl_struct_size(veal, list, count);
571 if (len > IAVF_MAX_AQ_BUF_SIZE) {
572 dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n");
573 while (len > IAVF_MAX_AQ_BUF_SIZE)
574 len = virtchnl_struct_size(veal, list, --count);
575 more = true;
576 }
577
578 veal = kzalloc(len, GFP_ATOMIC);
579 if (!veal) {
580 spin_unlock_bh(&adapter->mac_vlan_list_lock);
581 return;
582 }
583
584 veal->vsi_id = adapter->vsi_res->vsi_id;
585 veal->num_elements = count;
586 list_for_each_entry(f, &adapter->mac_filter_list, list) {
587 if (f->add) {
588 ether_addr_copy(veal->list[i].addr, f->macaddr);
589 iavf_set_mac_addr_type(&veal->list[i], f);
590 i++;
591 f->add = false;
592 if (i == count)
593 break;
594 }
595 }
596 if (!more)
597 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER;
598
599 spin_unlock_bh(&adapter->mac_vlan_list_lock);
600
601 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)veal, len);
602 kfree(veal);
603 }
604
605 /**
606 * iavf_del_ether_addrs
607 * @adapter: adapter structure
608 *
609 * Request that the PF remove one or more addresses from our filters.
610 **/
iavf_del_ether_addrs(struct iavf_adapter * adapter)611 void iavf_del_ether_addrs(struct iavf_adapter *adapter)
612 {
613 struct virtchnl_ether_addr_list *veal;
614 struct iavf_mac_filter *f, *ftmp;
615 int i = 0, count = 0;
616 bool more = false;
617 size_t len;
618
619 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
620 /* bail because we already have a command pending */
621 dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n",
622 adapter->current_op);
623 return;
624 }
625
626 spin_lock_bh(&adapter->mac_vlan_list_lock);
627
628 list_for_each_entry(f, &adapter->mac_filter_list, list) {
629 if (f->remove)
630 count++;
631 }
632 if (!count) {
633 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER;
634 spin_unlock_bh(&adapter->mac_vlan_list_lock);
635 return;
636 }
637 adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR;
638
639 len = virtchnl_struct_size(veal, list, count);
640 if (len > IAVF_MAX_AQ_BUF_SIZE) {
641 dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n");
642 while (len > IAVF_MAX_AQ_BUF_SIZE)
643 len = virtchnl_struct_size(veal, list, --count);
644 more = true;
645 }
646 veal = kzalloc(len, GFP_ATOMIC);
647 if (!veal) {
648 spin_unlock_bh(&adapter->mac_vlan_list_lock);
649 return;
650 }
651
652 veal->vsi_id = adapter->vsi_res->vsi_id;
653 veal->num_elements = count;
654 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
655 if (f->remove) {
656 ether_addr_copy(veal->list[i].addr, f->macaddr);
657 iavf_set_mac_addr_type(&veal->list[i], f);
658 i++;
659 list_del(&f->list);
660 kfree(f);
661 if (i == count)
662 break;
663 }
664 }
665 if (!more)
666 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER;
667
668 spin_unlock_bh(&adapter->mac_vlan_list_lock);
669
670 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)veal, len);
671 kfree(veal);
672 }
673
674 /**
675 * iavf_mac_add_ok
676 * @adapter: adapter structure
677 *
678 * Submit list of filters based on PF response.
679 **/
iavf_mac_add_ok(struct iavf_adapter * adapter)680 static void iavf_mac_add_ok(struct iavf_adapter *adapter)
681 {
682 struct iavf_mac_filter *f, *ftmp;
683
684 spin_lock_bh(&adapter->mac_vlan_list_lock);
685 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
686 f->is_new_mac = false;
687 if (!f->add && !f->add_handled)
688 f->add_handled = true;
689 }
690 spin_unlock_bh(&adapter->mac_vlan_list_lock);
691 }
692
693 /**
694 * iavf_mac_add_reject
695 * @adapter: adapter structure
696 *
697 * Remove filters from list based on PF response.
698 **/
iavf_mac_add_reject(struct iavf_adapter * adapter)699 static void iavf_mac_add_reject(struct iavf_adapter *adapter)
700 {
701 struct net_device *netdev = adapter->netdev;
702 struct iavf_mac_filter *f, *ftmp;
703
704 spin_lock_bh(&adapter->mac_vlan_list_lock);
705 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
706 if (f->remove && ether_addr_equal(f->macaddr, netdev->dev_addr))
707 f->remove = false;
708
709 if (!f->add && !f->add_handled)
710 f->add_handled = true;
711
712 if (f->is_new_mac) {
713 list_del(&f->list);
714 kfree(f);
715 }
716 }
717 spin_unlock_bh(&adapter->mac_vlan_list_lock);
718 }
719
720 /**
721 * iavf_vlan_add_reject
722 * @adapter: adapter structure
723 *
724 * Remove VLAN filters from list based on PF response.
725 **/
iavf_vlan_add_reject(struct iavf_adapter * adapter)726 static void iavf_vlan_add_reject(struct iavf_adapter *adapter)
727 {
728 struct iavf_vlan_filter *f, *ftmp;
729
730 spin_lock_bh(&adapter->mac_vlan_list_lock);
731 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
732 if (f->state == IAVF_VLAN_IS_NEW) {
733 list_del(&f->list);
734 kfree(f);
735 adapter->num_vlan_filters--;
736 }
737 }
738 spin_unlock_bh(&adapter->mac_vlan_list_lock);
739 }
740
741 /**
742 * iavf_add_vlans
743 * @adapter: adapter structure
744 *
745 * Request that the PF add one or more VLAN filters to our VSI.
746 **/
iavf_add_vlans(struct iavf_adapter * adapter)747 void iavf_add_vlans(struct iavf_adapter *adapter)
748 {
749 int len, i = 0, count = 0;
750 struct iavf_vlan_filter *f;
751 bool more = false;
752
753 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
754 /* bail because we already have a command pending */
755 dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n",
756 adapter->current_op);
757 return;
758 }
759
760 spin_lock_bh(&adapter->mac_vlan_list_lock);
761
762 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
763 if (f->state == IAVF_VLAN_ADD)
764 count++;
765 }
766 if (!count || !VLAN_FILTERING_ALLOWED(adapter)) {
767 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
768 spin_unlock_bh(&adapter->mac_vlan_list_lock);
769 return;
770 }
771
772 if (VLAN_ALLOWED(adapter)) {
773 struct virtchnl_vlan_filter_list *vvfl;
774
775 adapter->current_op = VIRTCHNL_OP_ADD_VLAN;
776
777 len = virtchnl_struct_size(vvfl, vlan_id, count);
778 if (len > IAVF_MAX_AQ_BUF_SIZE) {
779 dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
780 while (len > IAVF_MAX_AQ_BUF_SIZE)
781 len = virtchnl_struct_size(vvfl, vlan_id,
782 --count);
783 more = true;
784 }
785 vvfl = kzalloc(len, GFP_ATOMIC);
786 if (!vvfl) {
787 spin_unlock_bh(&adapter->mac_vlan_list_lock);
788 return;
789 }
790
791 vvfl->vsi_id = adapter->vsi_res->vsi_id;
792 vvfl->num_elements = count;
793 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
794 if (f->state == IAVF_VLAN_ADD) {
795 vvfl->vlan_id[i] = f->vlan.vid;
796 i++;
797 f->state = IAVF_VLAN_IS_NEW;
798 if (i == count)
799 break;
800 }
801 }
802 if (!more)
803 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
804
805 spin_unlock_bh(&adapter->mac_vlan_list_lock);
806
807 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
808 kfree(vvfl);
809 } else {
810 u16 max_vlans = adapter->vlan_v2_caps.filtering.max_filters;
811 u16 current_vlans = iavf_get_num_vlans_added(adapter);
812 struct virtchnl_vlan_filter_list_v2 *vvfl_v2;
813
814 adapter->current_op = VIRTCHNL_OP_ADD_VLAN_V2;
815
816 if ((count + current_vlans) > max_vlans &&
817 current_vlans < max_vlans) {
818 count = max_vlans - iavf_get_num_vlans_added(adapter);
819 more = true;
820 }
821
822 len = virtchnl_struct_size(vvfl_v2, filters, count);
823 if (len > IAVF_MAX_AQ_BUF_SIZE) {
824 dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
825 while (len > IAVF_MAX_AQ_BUF_SIZE)
826 len = virtchnl_struct_size(vvfl_v2, filters,
827 --count);
828 more = true;
829 }
830
831 vvfl_v2 = kzalloc(len, GFP_ATOMIC);
832 if (!vvfl_v2) {
833 spin_unlock_bh(&adapter->mac_vlan_list_lock);
834 return;
835 }
836
837 vvfl_v2->vport_id = adapter->vsi_res->vsi_id;
838 vvfl_v2->num_elements = count;
839 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
840 if (f->state == IAVF_VLAN_ADD) {
841 struct virtchnl_vlan_supported_caps *filtering_support =
842 &adapter->vlan_v2_caps.filtering.filtering_support;
843 struct virtchnl_vlan *vlan;
844
845 if (i == count)
846 break;
847
848 /* give priority over outer if it's enabled */
849 if (filtering_support->outer)
850 vlan = &vvfl_v2->filters[i].outer;
851 else
852 vlan = &vvfl_v2->filters[i].inner;
853
854 vlan->tci = f->vlan.vid;
855 vlan->tpid = f->vlan.tpid;
856
857 i++;
858 f->state = IAVF_VLAN_IS_NEW;
859 }
860 }
861
862 if (!more)
863 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
864
865 spin_unlock_bh(&adapter->mac_vlan_list_lock);
866
867 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN_V2,
868 (u8 *)vvfl_v2, len);
869 kfree(vvfl_v2);
870 }
871 }
872
873 /**
874 * iavf_del_vlans
875 * @adapter: adapter structure
876 *
877 * Request that the PF remove one or more VLAN filters from our VSI.
878 **/
iavf_del_vlans(struct iavf_adapter * adapter)879 void iavf_del_vlans(struct iavf_adapter *adapter)
880 {
881 struct iavf_vlan_filter *f, *ftmp;
882 int len, i = 0, count = 0;
883 bool more = false;
884
885 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
886 /* bail because we already have a command pending */
887 dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n",
888 adapter->current_op);
889 return;
890 }
891
892 spin_lock_bh(&adapter->mac_vlan_list_lock);
893
894 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
895 /* since VLAN capabilities are not allowed, we dont want to send
896 * a VLAN delete request because it will most likely fail and
897 * create unnecessary errors/noise, so just free the VLAN
898 * filters marked for removal to enable bailing out before
899 * sending a virtchnl message
900 */
901 if (f->state == IAVF_VLAN_REMOVE &&
902 !VLAN_FILTERING_ALLOWED(adapter)) {
903 list_del(&f->list);
904 kfree(f);
905 adapter->num_vlan_filters--;
906 } else if (f->state == IAVF_VLAN_DISABLE &&
907 !VLAN_FILTERING_ALLOWED(adapter)) {
908 f->state = IAVF_VLAN_INACTIVE;
909 } else if (f->state == IAVF_VLAN_REMOVE ||
910 f->state == IAVF_VLAN_DISABLE) {
911 count++;
912 }
913 }
914 if (!count || !VLAN_FILTERING_ALLOWED(adapter)) {
915 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
916 spin_unlock_bh(&adapter->mac_vlan_list_lock);
917 return;
918 }
919
920 if (VLAN_ALLOWED(adapter)) {
921 struct virtchnl_vlan_filter_list *vvfl;
922
923 adapter->current_op = VIRTCHNL_OP_DEL_VLAN;
924
925 len = virtchnl_struct_size(vvfl, vlan_id, count);
926 if (len > IAVF_MAX_AQ_BUF_SIZE) {
927 dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n");
928 while (len > IAVF_MAX_AQ_BUF_SIZE)
929 len = virtchnl_struct_size(vvfl, vlan_id,
930 --count);
931 more = true;
932 }
933 vvfl = kzalloc(len, GFP_ATOMIC);
934 if (!vvfl) {
935 spin_unlock_bh(&adapter->mac_vlan_list_lock);
936 return;
937 }
938
939 vvfl->vsi_id = adapter->vsi_res->vsi_id;
940 vvfl->num_elements = count;
941 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
942 if (f->state == IAVF_VLAN_DISABLE) {
943 vvfl->vlan_id[i] = f->vlan.vid;
944 f->state = IAVF_VLAN_INACTIVE;
945 i++;
946 if (i == count)
947 break;
948 } else if (f->state == IAVF_VLAN_REMOVE) {
949 vvfl->vlan_id[i] = f->vlan.vid;
950 list_del(&f->list);
951 kfree(f);
952 adapter->num_vlan_filters--;
953 i++;
954 if (i == count)
955 break;
956 }
957 }
958
959 if (!more)
960 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
961
962 spin_unlock_bh(&adapter->mac_vlan_list_lock);
963
964 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
965 kfree(vvfl);
966 } else {
967 struct virtchnl_vlan_filter_list_v2 *vvfl_v2;
968
969 adapter->current_op = VIRTCHNL_OP_DEL_VLAN_V2;
970
971 len = virtchnl_struct_size(vvfl_v2, filters, count);
972 if (len > IAVF_MAX_AQ_BUF_SIZE) {
973 dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
974 while (len > IAVF_MAX_AQ_BUF_SIZE)
975 len = virtchnl_struct_size(vvfl_v2, filters,
976 --count);
977 more = true;
978 }
979
980 vvfl_v2 = kzalloc(len, GFP_ATOMIC);
981 if (!vvfl_v2) {
982 spin_unlock_bh(&adapter->mac_vlan_list_lock);
983 return;
984 }
985
986 vvfl_v2->vport_id = adapter->vsi_res->vsi_id;
987 vvfl_v2->num_elements = count;
988 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
989 if (f->state == IAVF_VLAN_DISABLE ||
990 f->state == IAVF_VLAN_REMOVE) {
991 struct virtchnl_vlan_supported_caps *filtering_support =
992 &adapter->vlan_v2_caps.filtering.filtering_support;
993 struct virtchnl_vlan *vlan;
994
995 /* give priority over outer if it's enabled */
996 if (filtering_support->outer)
997 vlan = &vvfl_v2->filters[i].outer;
998 else
999 vlan = &vvfl_v2->filters[i].inner;
1000
1001 vlan->tci = f->vlan.vid;
1002 vlan->tpid = f->vlan.tpid;
1003
1004 if (f->state == IAVF_VLAN_DISABLE) {
1005 f->state = IAVF_VLAN_INACTIVE;
1006 } else {
1007 list_del(&f->list);
1008 kfree(f);
1009 adapter->num_vlan_filters--;
1010 }
1011 i++;
1012 if (i == count)
1013 break;
1014 }
1015 }
1016
1017 if (!more)
1018 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
1019
1020 spin_unlock_bh(&adapter->mac_vlan_list_lock);
1021
1022 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN_V2,
1023 (u8 *)vvfl_v2, len);
1024 kfree(vvfl_v2);
1025 }
1026 }
1027
1028 /**
1029 * iavf_set_promiscuous
1030 * @adapter: adapter structure
1031 *
1032 * Request that the PF enable promiscuous mode for our VSI.
1033 **/
iavf_set_promiscuous(struct iavf_adapter * adapter)1034 void iavf_set_promiscuous(struct iavf_adapter *adapter)
1035 {
1036 struct net_device *netdev = adapter->netdev;
1037 struct virtchnl_promisc_info vpi;
1038 unsigned int flags;
1039
1040 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1041 /* bail because we already have a command pending */
1042 dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n",
1043 adapter->current_op);
1044 return;
1045 }
1046
1047 /* prevent changes to promiscuous flags */
1048 spin_lock_bh(&adapter->current_netdev_promisc_flags_lock);
1049
1050 /* sanity check to prevent duplicate AQ calls */
1051 if (!iavf_promiscuous_mode_changed(adapter)) {
1052 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
1053 dev_dbg(&adapter->pdev->dev, "No change in promiscuous mode\n");
1054 /* allow changes to promiscuous flags */
1055 spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
1056 return;
1057 }
1058
1059 /* there are 2 bits, but only 3 states */
1060 if (!(netdev->flags & IFF_PROMISC) &&
1061 netdev->flags & IFF_ALLMULTI) {
1062 /* State 1 - only multicast promiscuous mode enabled
1063 * - !IFF_PROMISC && IFF_ALLMULTI
1064 */
1065 flags = FLAG_VF_MULTICAST_PROMISC;
1066 adapter->current_netdev_promisc_flags |= IFF_ALLMULTI;
1067 adapter->current_netdev_promisc_flags &= ~IFF_PROMISC;
1068 dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n");
1069 } else if (!(netdev->flags & IFF_PROMISC) &&
1070 !(netdev->flags & IFF_ALLMULTI)) {
1071 /* State 2 - unicast/multicast promiscuous mode disabled
1072 * - !IFF_PROMISC && !IFF_ALLMULTI
1073 */
1074 flags = 0;
1075 adapter->current_netdev_promisc_flags &=
1076 ~(IFF_PROMISC | IFF_ALLMULTI);
1077 dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
1078 } else {
1079 /* State 3 - unicast/multicast promiscuous mode enabled
1080 * - IFF_PROMISC && IFF_ALLMULTI
1081 * - IFF_PROMISC && !IFF_ALLMULTI
1082 */
1083 flags = FLAG_VF_UNICAST_PROMISC | FLAG_VF_MULTICAST_PROMISC;
1084 adapter->current_netdev_promisc_flags |= IFF_PROMISC;
1085 if (netdev->flags & IFF_ALLMULTI)
1086 adapter->current_netdev_promisc_flags |= IFF_ALLMULTI;
1087 else
1088 adapter->current_netdev_promisc_flags &= ~IFF_ALLMULTI;
1089
1090 dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
1091 }
1092
1093 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
1094
1095 /* allow changes to promiscuous flags */
1096 spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
1097
1098 adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
1099 vpi.vsi_id = adapter->vsi_res->vsi_id;
1100 vpi.flags = flags;
1101 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
1102 (u8 *)&vpi, sizeof(vpi));
1103 }
1104
1105 /**
1106 * iavf_request_stats
1107 * @adapter: adapter structure
1108 *
1109 * Request VSI statistics from PF.
1110 **/
iavf_request_stats(struct iavf_adapter * adapter)1111 void iavf_request_stats(struct iavf_adapter *adapter)
1112 {
1113 struct virtchnl_queue_select vqs;
1114
1115 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1116 /* no error message, this isn't crucial */
1117 return;
1118 }
1119
1120 adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_STATS;
1121 adapter->current_op = VIRTCHNL_OP_GET_STATS;
1122 vqs.vsi_id = adapter->vsi_res->vsi_id;
1123 /* queue maps are ignored for this message - only the vsi is used */
1124 if (iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS, (u8 *)&vqs,
1125 sizeof(vqs)))
1126 /* if the request failed, don't lock out others */
1127 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1128 }
1129
1130 /**
1131 * iavf_get_hena
1132 * @adapter: adapter structure
1133 *
1134 * Request hash enable capabilities from PF
1135 **/
iavf_get_hena(struct iavf_adapter * adapter)1136 void iavf_get_hena(struct iavf_adapter *adapter)
1137 {
1138 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1139 /* bail because we already have a command pending */
1140 dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n",
1141 adapter->current_op);
1142 return;
1143 }
1144 adapter->current_op = VIRTCHNL_OP_GET_RSS_HENA_CAPS;
1145 adapter->aq_required &= ~IAVF_FLAG_AQ_GET_HENA;
1146 iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HENA_CAPS, NULL, 0);
1147 }
1148
1149 /**
1150 * iavf_set_hena
1151 * @adapter: adapter structure
1152 *
1153 * Request the PF to set our RSS hash capabilities
1154 **/
iavf_set_hena(struct iavf_adapter * adapter)1155 void iavf_set_hena(struct iavf_adapter *adapter)
1156 {
1157 struct virtchnl_rss_hena vrh;
1158
1159 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1160 /* bail because we already have a command pending */
1161 dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n",
1162 adapter->current_op);
1163 return;
1164 }
1165 vrh.hena = adapter->hena;
1166 adapter->current_op = VIRTCHNL_OP_SET_RSS_HENA;
1167 adapter->aq_required &= ~IAVF_FLAG_AQ_SET_HENA;
1168 iavf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HENA, (u8 *)&vrh,
1169 sizeof(vrh));
1170 }
1171
1172 /**
1173 * iavf_set_rss_key
1174 * @adapter: adapter structure
1175 *
1176 * Request the PF to set our RSS hash key
1177 **/
iavf_set_rss_key(struct iavf_adapter * adapter)1178 void iavf_set_rss_key(struct iavf_adapter *adapter)
1179 {
1180 struct virtchnl_rss_key *vrk;
1181 int len;
1182
1183 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1184 /* bail because we already have a command pending */
1185 dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n",
1186 adapter->current_op);
1187 return;
1188 }
1189 len = virtchnl_struct_size(vrk, key, adapter->rss_key_size);
1190 vrk = kzalloc(len, GFP_KERNEL);
1191 if (!vrk)
1192 return;
1193 vrk->vsi_id = adapter->vsi.id;
1194 vrk->key_len = adapter->rss_key_size;
1195 memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size);
1196
1197 adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_KEY;
1198 adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_KEY;
1199 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_KEY, (u8 *)vrk, len);
1200 kfree(vrk);
1201 }
1202
1203 /**
1204 * iavf_set_rss_lut
1205 * @adapter: adapter structure
1206 *
1207 * Request the PF to set our RSS lookup table
1208 **/
iavf_set_rss_lut(struct iavf_adapter * adapter)1209 void iavf_set_rss_lut(struct iavf_adapter *adapter)
1210 {
1211 struct virtchnl_rss_lut *vrl;
1212 int len;
1213
1214 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1215 /* bail because we already have a command pending */
1216 dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n",
1217 adapter->current_op);
1218 return;
1219 }
1220 len = virtchnl_struct_size(vrl, lut, adapter->rss_lut_size);
1221 vrl = kzalloc(len, GFP_KERNEL);
1222 if (!vrl)
1223 return;
1224 vrl->vsi_id = adapter->vsi.id;
1225 vrl->lut_entries = adapter->rss_lut_size;
1226 memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size);
1227 adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_LUT;
1228 adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_LUT;
1229 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_LUT, (u8 *)vrl, len);
1230 kfree(vrl);
1231 }
1232
1233 /**
1234 * iavf_set_rss_hfunc
1235 * @adapter: adapter structure
1236 *
1237 * Request the PF to set our RSS Hash function
1238 **/
iavf_set_rss_hfunc(struct iavf_adapter * adapter)1239 void iavf_set_rss_hfunc(struct iavf_adapter *adapter)
1240 {
1241 struct virtchnl_rss_hfunc *vrh;
1242 int len = sizeof(*vrh);
1243
1244 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1245 /* bail because we already have a command pending */
1246 dev_err(&adapter->pdev->dev, "Cannot set RSS Hash function, command %d pending\n",
1247 adapter->current_op);
1248 return;
1249 }
1250 vrh = kzalloc(len, GFP_KERNEL);
1251 if (!vrh)
1252 return;
1253 vrh->vsi_id = adapter->vsi.id;
1254 vrh->rss_algorithm = adapter->hfunc;
1255 adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_HFUNC;
1256 adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_HFUNC;
1257 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_HFUNC, (u8 *)vrh, len);
1258 kfree(vrh);
1259 }
1260
1261 /**
1262 * iavf_enable_vlan_stripping
1263 * @adapter: adapter structure
1264 *
1265 * Request VLAN header stripping to be enabled
1266 **/
iavf_enable_vlan_stripping(struct iavf_adapter * adapter)1267 void iavf_enable_vlan_stripping(struct iavf_adapter *adapter)
1268 {
1269 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1270 /* bail because we already have a command pending */
1271 dev_err(&adapter->pdev->dev, "Cannot enable stripping, command %d pending\n",
1272 adapter->current_op);
1273 return;
1274 }
1275 adapter->current_op = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING;
1276 adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
1277 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, NULL, 0);
1278 }
1279
1280 /**
1281 * iavf_disable_vlan_stripping
1282 * @adapter: adapter structure
1283 *
1284 * Request VLAN header stripping to be disabled
1285 **/
iavf_disable_vlan_stripping(struct iavf_adapter * adapter)1286 void iavf_disable_vlan_stripping(struct iavf_adapter *adapter)
1287 {
1288 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1289 /* bail because we already have a command pending */
1290 dev_err(&adapter->pdev->dev, "Cannot disable stripping, command %d pending\n",
1291 adapter->current_op);
1292 return;
1293 }
1294 adapter->current_op = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING;
1295 adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
1296 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, NULL, 0);
1297 }
1298
1299 /**
1300 * iavf_tpid_to_vc_ethertype - transform from VLAN TPID to virtchnl ethertype
1301 * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.)
1302 */
iavf_tpid_to_vc_ethertype(u16 tpid)1303 static u32 iavf_tpid_to_vc_ethertype(u16 tpid)
1304 {
1305 switch (tpid) {
1306 case ETH_P_8021Q:
1307 return VIRTCHNL_VLAN_ETHERTYPE_8100;
1308 case ETH_P_8021AD:
1309 return VIRTCHNL_VLAN_ETHERTYPE_88A8;
1310 }
1311
1312 return 0;
1313 }
1314
1315 /**
1316 * iavf_set_vc_offload_ethertype - set virtchnl ethertype for offload message
1317 * @adapter: adapter structure
1318 * @msg: message structure used for updating offloads over virtchnl to update
1319 * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.)
1320 * @offload_op: opcode used to determine which support structure to check
1321 */
1322 static int
iavf_set_vc_offload_ethertype(struct iavf_adapter * adapter,struct virtchnl_vlan_setting * msg,u16 tpid,enum virtchnl_ops offload_op)1323 iavf_set_vc_offload_ethertype(struct iavf_adapter *adapter,
1324 struct virtchnl_vlan_setting *msg, u16 tpid,
1325 enum virtchnl_ops offload_op)
1326 {
1327 struct virtchnl_vlan_supported_caps *offload_support;
1328 u16 vc_ethertype = iavf_tpid_to_vc_ethertype(tpid);
1329
1330 /* reference the correct offload support structure */
1331 switch (offload_op) {
1332 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
1333 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
1334 offload_support =
1335 &adapter->vlan_v2_caps.offloads.stripping_support;
1336 break;
1337 case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
1338 case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
1339 offload_support =
1340 &adapter->vlan_v2_caps.offloads.insertion_support;
1341 break;
1342 default:
1343 dev_err(&adapter->pdev->dev, "Invalid opcode %d for setting virtchnl ethertype to enable/disable VLAN offloads\n",
1344 offload_op);
1345 return -EINVAL;
1346 }
1347
1348 /* make sure ethertype is supported */
1349 if (offload_support->outer & vc_ethertype &&
1350 offload_support->outer & VIRTCHNL_VLAN_TOGGLE) {
1351 msg->outer_ethertype_setting = vc_ethertype;
1352 } else if (offload_support->inner & vc_ethertype &&
1353 offload_support->inner & VIRTCHNL_VLAN_TOGGLE) {
1354 msg->inner_ethertype_setting = vc_ethertype;
1355 } else {
1356 dev_dbg(&adapter->pdev->dev, "opcode %d unsupported for VLAN TPID 0x%04x\n",
1357 offload_op, tpid);
1358 return -EINVAL;
1359 }
1360
1361 return 0;
1362 }
1363
1364 /**
1365 * iavf_clear_offload_v2_aq_required - clear AQ required bit for offload request
1366 * @adapter: adapter structure
1367 * @tpid: VLAN TPID
1368 * @offload_op: opcode used to determine which AQ required bit to clear
1369 */
1370 static void
iavf_clear_offload_v2_aq_required(struct iavf_adapter * adapter,u16 tpid,enum virtchnl_ops offload_op)1371 iavf_clear_offload_v2_aq_required(struct iavf_adapter *adapter, u16 tpid,
1372 enum virtchnl_ops offload_op)
1373 {
1374 switch (offload_op) {
1375 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
1376 if (tpid == ETH_P_8021Q)
1377 adapter->aq_required &=
1378 ~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING;
1379 else if (tpid == ETH_P_8021AD)
1380 adapter->aq_required &=
1381 ~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING;
1382 break;
1383 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
1384 if (tpid == ETH_P_8021Q)
1385 adapter->aq_required &=
1386 ~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING;
1387 else if (tpid == ETH_P_8021AD)
1388 adapter->aq_required &=
1389 ~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING;
1390 break;
1391 case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
1392 if (tpid == ETH_P_8021Q)
1393 adapter->aq_required &=
1394 ~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION;
1395 else if (tpid == ETH_P_8021AD)
1396 adapter->aq_required &=
1397 ~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION;
1398 break;
1399 case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
1400 if (tpid == ETH_P_8021Q)
1401 adapter->aq_required &=
1402 ~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION;
1403 else if (tpid == ETH_P_8021AD)
1404 adapter->aq_required &=
1405 ~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION;
1406 break;
1407 default:
1408 dev_err(&adapter->pdev->dev, "Unsupported opcode %d specified for clearing aq_required bits for VIRTCHNL_VF_OFFLOAD_VLAN_V2 offload request\n",
1409 offload_op);
1410 }
1411 }
1412
1413 /**
1414 * iavf_send_vlan_offload_v2 - send offload enable/disable over virtchnl
1415 * @adapter: adapter structure
1416 * @tpid: VLAN TPID used for the command (i.e. 0x8100 or 0x88a8)
1417 * @offload_op: offload_op used to make the request over virtchnl
1418 */
1419 static void
iavf_send_vlan_offload_v2(struct iavf_adapter * adapter,u16 tpid,enum virtchnl_ops offload_op)1420 iavf_send_vlan_offload_v2(struct iavf_adapter *adapter, u16 tpid,
1421 enum virtchnl_ops offload_op)
1422 {
1423 struct virtchnl_vlan_setting *msg;
1424 int len = sizeof(*msg);
1425
1426 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1427 /* bail because we already have a command pending */
1428 dev_err(&adapter->pdev->dev, "Cannot send %d, command %d pending\n",
1429 offload_op, adapter->current_op);
1430 return;
1431 }
1432
1433 adapter->current_op = offload_op;
1434
1435 msg = kzalloc(len, GFP_KERNEL);
1436 if (!msg)
1437 return;
1438
1439 msg->vport_id = adapter->vsi_res->vsi_id;
1440
1441 /* always clear to prevent unsupported and endless requests */
1442 iavf_clear_offload_v2_aq_required(adapter, tpid, offload_op);
1443
1444 /* only send valid offload requests */
1445 if (!iavf_set_vc_offload_ethertype(adapter, msg, tpid, offload_op))
1446 iavf_send_pf_msg(adapter, offload_op, (u8 *)msg, len);
1447 else
1448 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1449
1450 kfree(msg);
1451 }
1452
1453 /**
1454 * iavf_enable_vlan_stripping_v2 - enable VLAN stripping
1455 * @adapter: adapter structure
1456 * @tpid: VLAN TPID used to enable VLAN stripping
1457 */
iavf_enable_vlan_stripping_v2(struct iavf_adapter * adapter,u16 tpid)1458 void iavf_enable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid)
1459 {
1460 iavf_send_vlan_offload_v2(adapter, tpid,
1461 VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2);
1462 }
1463
1464 /**
1465 * iavf_disable_vlan_stripping_v2 - disable VLAN stripping
1466 * @adapter: adapter structure
1467 * @tpid: VLAN TPID used to disable VLAN stripping
1468 */
iavf_disable_vlan_stripping_v2(struct iavf_adapter * adapter,u16 tpid)1469 void iavf_disable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid)
1470 {
1471 iavf_send_vlan_offload_v2(adapter, tpid,
1472 VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2);
1473 }
1474
1475 /**
1476 * iavf_enable_vlan_insertion_v2 - enable VLAN insertion
1477 * @adapter: adapter structure
1478 * @tpid: VLAN TPID used to enable VLAN insertion
1479 */
iavf_enable_vlan_insertion_v2(struct iavf_adapter * adapter,u16 tpid)1480 void iavf_enable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid)
1481 {
1482 iavf_send_vlan_offload_v2(adapter, tpid,
1483 VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2);
1484 }
1485
1486 /**
1487 * iavf_disable_vlan_insertion_v2 - disable VLAN insertion
1488 * @adapter: adapter structure
1489 * @tpid: VLAN TPID used to disable VLAN insertion
1490 */
iavf_disable_vlan_insertion_v2(struct iavf_adapter * adapter,u16 tpid)1491 void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid)
1492 {
1493 iavf_send_vlan_offload_v2(adapter, tpid,
1494 VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2);
1495 }
1496
1497 #if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
1498 /**
1499 * iavf_virtchnl_send_ptp_cmd - Send one queued PTP command
1500 * @adapter: adapter private structure
1501 *
1502 * De-queue one PTP command request and send the command message to the PF.
1503 * Clear IAVF_FLAG_AQ_SEND_PTP_CMD if no more messages are left to send.
1504 */
iavf_virtchnl_send_ptp_cmd(struct iavf_adapter * adapter)1505 void iavf_virtchnl_send_ptp_cmd(struct iavf_adapter *adapter)
1506 {
1507 struct iavf_ptp_aq_cmd *cmd;
1508 int err;
1509
1510 if (!adapter->ptp.clock) {
1511 /* This shouldn't be possible to hit, since no messages should
1512 * be queued if PTP is not initialized.
1513 */
1514 pci_err(adapter->pdev, "PTP is not initialized\n");
1515 adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD;
1516 return;
1517 }
1518
1519 mutex_lock(&adapter->ptp.aq_cmd_lock);
1520 cmd = list_first_entry_or_null(&adapter->ptp.aq_cmds,
1521 struct iavf_ptp_aq_cmd, list);
1522 if (!cmd) {
1523 /* no further PTP messages to send */
1524 adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD;
1525 goto out_unlock;
1526 }
1527
1528 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1529 /* bail because we already have a command pending */
1530 pci_err(adapter->pdev,
1531 "Cannot send PTP command %d, command %d pending\n",
1532 cmd->v_opcode, adapter->current_op);
1533 goto out_unlock;
1534 }
1535
1536 err = iavf_send_pf_msg(adapter, cmd->v_opcode, cmd->msg, cmd->msglen);
1537 if (!err) {
1538 /* Command was sent without errors, so we can remove it from
1539 * the list and discard it.
1540 */
1541 list_del(&cmd->list);
1542 kfree(cmd);
1543 } else {
1544 /* We failed to send the command, try again next cycle */
1545 pci_err(adapter->pdev, "Failed to send PTP command %d\n",
1546 cmd->v_opcode);
1547 }
1548
1549 if (list_empty(&adapter->ptp.aq_cmds))
1550 /* no further PTP messages to send */
1551 adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD;
1552
1553 out_unlock:
1554 mutex_unlock(&adapter->ptp.aq_cmd_lock);
1555 }
1556 #endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
1557
1558 /**
1559 * iavf_print_link_message - print link up or down
1560 * @adapter: adapter structure
1561 *
1562 * Log a message telling the world of our wonderous link status
1563 */
iavf_print_link_message(struct iavf_adapter * adapter)1564 static void iavf_print_link_message(struct iavf_adapter *adapter)
1565 {
1566 struct net_device *netdev = adapter->netdev;
1567 int link_speed_mbps;
1568 char *speed;
1569
1570 if (!adapter->link_up) {
1571 netdev_info(netdev, "NIC Link is Down\n");
1572 return;
1573 }
1574
1575 if (ADV_LINK_SUPPORT(adapter)) {
1576 link_speed_mbps = adapter->link_speed_mbps;
1577 goto print_link_msg;
1578 }
1579
1580 switch (adapter->link_speed) {
1581 case VIRTCHNL_LINK_SPEED_40GB:
1582 link_speed_mbps = SPEED_40000;
1583 break;
1584 case VIRTCHNL_LINK_SPEED_25GB:
1585 link_speed_mbps = SPEED_25000;
1586 break;
1587 case VIRTCHNL_LINK_SPEED_20GB:
1588 link_speed_mbps = SPEED_20000;
1589 break;
1590 case VIRTCHNL_LINK_SPEED_10GB:
1591 link_speed_mbps = SPEED_10000;
1592 break;
1593 case VIRTCHNL_LINK_SPEED_5GB:
1594 link_speed_mbps = SPEED_5000;
1595 break;
1596 case VIRTCHNL_LINK_SPEED_2_5GB:
1597 link_speed_mbps = SPEED_2500;
1598 break;
1599 case VIRTCHNL_LINK_SPEED_1GB:
1600 link_speed_mbps = SPEED_1000;
1601 break;
1602 case VIRTCHNL_LINK_SPEED_100MB:
1603 link_speed_mbps = SPEED_100;
1604 break;
1605 default:
1606 link_speed_mbps = SPEED_UNKNOWN;
1607 break;
1608 }
1609
1610 print_link_msg:
1611 if (link_speed_mbps > SPEED_1000) {
1612 if (link_speed_mbps == SPEED_2500) {
1613 speed = kasprintf(GFP_KERNEL, "%s", "2.5 Gbps");
1614 } else {
1615 /* convert to Gbps inline */
1616 speed = kasprintf(GFP_KERNEL, "%d Gbps",
1617 link_speed_mbps / 1000);
1618 }
1619 } else if (link_speed_mbps == SPEED_UNKNOWN) {
1620 speed = kasprintf(GFP_KERNEL, "%s", "Unknown Mbps");
1621 } else {
1622 speed = kasprintf(GFP_KERNEL, "%d Mbps", link_speed_mbps);
1623 }
1624
1625 netdev_info(netdev, "NIC Link is Up Speed is %s Full Duplex\n", speed);
1626 kfree(speed);
1627 }
1628
1629 /**
1630 * iavf_get_vpe_link_status
1631 * @adapter: adapter structure
1632 * @vpe: virtchnl_pf_event structure
1633 *
1634 * Helper function for determining the link status
1635 **/
1636 static bool
iavf_get_vpe_link_status(struct iavf_adapter * adapter,struct virtchnl_pf_event * vpe)1637 iavf_get_vpe_link_status(struct iavf_adapter *adapter,
1638 struct virtchnl_pf_event *vpe)
1639 {
1640 if (ADV_LINK_SUPPORT(adapter))
1641 return vpe->event_data.link_event_adv.link_status;
1642 else
1643 return vpe->event_data.link_event.link_status;
1644 }
1645
1646 /**
1647 * iavf_set_adapter_link_speed_from_vpe
1648 * @adapter: adapter structure for which we are setting the link speed
1649 * @vpe: virtchnl_pf_event structure that contains the link speed we are setting
1650 *
1651 * Helper function for setting iavf_adapter link speed
1652 **/
1653 static void
iavf_set_adapter_link_speed_from_vpe(struct iavf_adapter * adapter,struct virtchnl_pf_event * vpe)1654 iavf_set_adapter_link_speed_from_vpe(struct iavf_adapter *adapter,
1655 struct virtchnl_pf_event *vpe)
1656 {
1657 if (ADV_LINK_SUPPORT(adapter))
1658 adapter->link_speed_mbps =
1659 vpe->event_data.link_event_adv.link_speed;
1660 else
1661 adapter->link_speed = vpe->event_data.link_event.link_speed;
1662 }
1663
1664 /**
1665 * iavf_get_qos_caps - get qos caps support
1666 * @adapter: iavf adapter struct instance
1667 *
1668 * This function requests PF for Supported QoS Caps.
1669 */
iavf_get_qos_caps(struct iavf_adapter * adapter)1670 void iavf_get_qos_caps(struct iavf_adapter *adapter)
1671 {
1672 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1673 /* bail because we already have a command pending */
1674 dev_err(&adapter->pdev->dev,
1675 "Cannot get qos caps, command %d pending\n",
1676 adapter->current_op);
1677 return;
1678 }
1679
1680 adapter->current_op = VIRTCHNL_OP_GET_QOS_CAPS;
1681 adapter->aq_required &= ~IAVF_FLAG_AQ_GET_QOS_CAPS;
1682 iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_QOS_CAPS, NULL, 0);
1683 }
1684
1685 /**
1686 * iavf_set_quanta_size - set quanta size of queue chunk
1687 * @adapter: iavf adapter struct instance
1688 * @quanta_size: quanta size in bytes
1689 * @queue_index: starting index of queue chunk
1690 * @num_queues: number of queues in the queue chunk
1691 *
1692 * This function requests PF to set quanta size of queue chunk
1693 * starting at queue_index.
1694 */
1695 static void
iavf_set_quanta_size(struct iavf_adapter * adapter,u16 quanta_size,u16 queue_index,u16 num_queues)1696 iavf_set_quanta_size(struct iavf_adapter *adapter, u16 quanta_size,
1697 u16 queue_index, u16 num_queues)
1698 {
1699 struct virtchnl_quanta_cfg quanta_cfg;
1700
1701 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1702 /* bail because we already have a command pending */
1703 dev_err(&adapter->pdev->dev,
1704 "Cannot set queue quanta size, command %d pending\n",
1705 adapter->current_op);
1706 return;
1707 }
1708
1709 adapter->current_op = VIRTCHNL_OP_CONFIG_QUANTA;
1710 quanta_cfg.quanta_size = quanta_size;
1711 quanta_cfg.queue_select.type = VIRTCHNL_QUEUE_TYPE_TX;
1712 quanta_cfg.queue_select.start_queue_id = queue_index;
1713 quanta_cfg.queue_select.num_queues = num_queues;
1714 adapter->aq_required &= ~IAVF_FLAG_AQ_CFG_QUEUES_QUANTA_SIZE;
1715 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_QUANTA,
1716 (u8 *)&quanta_cfg, sizeof(quanta_cfg));
1717 }
1718
1719 /**
1720 * iavf_cfg_queues_quanta_size - configure quanta size of queues
1721 * @adapter: adapter structure
1722 *
1723 * Request that the PF configure quanta size of allocated queues.
1724 **/
iavf_cfg_queues_quanta_size(struct iavf_adapter * adapter)1725 void iavf_cfg_queues_quanta_size(struct iavf_adapter *adapter)
1726 {
1727 int quanta_size = IAVF_DEFAULT_QUANTA_SIZE;
1728
1729 /* Set Queue Quanta Size to default */
1730 iavf_set_quanta_size(adapter, quanta_size, 0,
1731 adapter->num_active_queues);
1732 }
1733
1734 /**
1735 * iavf_cfg_queues_bw - configure bandwidth of allocated queues
1736 * @adapter: iavf adapter structure instance
1737 *
1738 * This function requests PF to configure queue bandwidth of allocated queues
1739 */
iavf_cfg_queues_bw(struct iavf_adapter * adapter)1740 void iavf_cfg_queues_bw(struct iavf_adapter *adapter)
1741 {
1742 struct virtchnl_queues_bw_cfg *qs_bw_cfg;
1743 struct net_shaper *q_shaper;
1744 int qs_to_update = 0;
1745 int i, inx = 0;
1746 size_t len;
1747
1748 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1749 /* bail because we already have a command pending */
1750 dev_err(&adapter->pdev->dev,
1751 "Cannot set tc queue bw, command %d pending\n",
1752 adapter->current_op);
1753 return;
1754 }
1755
1756 for (i = 0; i < adapter->num_active_queues; i++) {
1757 if (adapter->tx_rings[i].q_shaper_update)
1758 qs_to_update++;
1759 }
1760 len = struct_size(qs_bw_cfg, cfg, qs_to_update);
1761 qs_bw_cfg = kzalloc(len, GFP_KERNEL);
1762 if (!qs_bw_cfg)
1763 return;
1764
1765 qs_bw_cfg->vsi_id = adapter->vsi.id;
1766 qs_bw_cfg->num_queues = qs_to_update;
1767
1768 for (i = 0; i < adapter->num_active_queues; i++) {
1769 struct iavf_ring *tx_ring = &adapter->tx_rings[i];
1770
1771 q_shaper = &tx_ring->q_shaper;
1772 if (tx_ring->q_shaper_update) {
1773 qs_bw_cfg->cfg[inx].queue_id = i;
1774 qs_bw_cfg->cfg[inx].shaper.peak = q_shaper->bw_max;
1775 qs_bw_cfg->cfg[inx].shaper.committed = q_shaper->bw_min;
1776 qs_bw_cfg->cfg[inx].tc = 0;
1777 inx++;
1778 }
1779 }
1780
1781 adapter->current_op = VIRTCHNL_OP_CONFIG_QUEUE_BW;
1782 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW;
1783 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_QUEUE_BW,
1784 (u8 *)qs_bw_cfg, len);
1785 kfree(qs_bw_cfg);
1786 }
1787
1788 /**
1789 * iavf_enable_channels
1790 * @adapter: adapter structure
1791 *
1792 * Request that the PF enable channels as specified by
1793 * the user via tc tool.
1794 **/
iavf_enable_channels(struct iavf_adapter * adapter)1795 void iavf_enable_channels(struct iavf_adapter *adapter)
1796 {
1797 struct virtchnl_tc_info *vti = NULL;
1798 size_t len;
1799 int i;
1800
1801 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1802 /* bail because we already have a command pending */
1803 dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n",
1804 adapter->current_op);
1805 return;
1806 }
1807
1808 len = virtchnl_struct_size(vti, list, adapter->num_tc);
1809 vti = kzalloc(len, GFP_KERNEL);
1810 if (!vti)
1811 return;
1812 vti->num_tc = adapter->num_tc;
1813 for (i = 0; i < vti->num_tc; i++) {
1814 vti->list[i].count = adapter->ch_config.ch_info[i].count;
1815 vti->list[i].offset = adapter->ch_config.ch_info[i].offset;
1816 vti->list[i].pad = 0;
1817 vti->list[i].max_tx_rate =
1818 adapter->ch_config.ch_info[i].max_tx_rate;
1819 }
1820
1821 adapter->ch_config.state = __IAVF_TC_RUNNING;
1822 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
1823 adapter->current_op = VIRTCHNL_OP_ENABLE_CHANNELS;
1824 adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_CHANNELS;
1825 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_CHANNELS, (u8 *)vti, len);
1826 kfree(vti);
1827 }
1828
1829 /**
1830 * iavf_disable_channels
1831 * @adapter: adapter structure
1832 *
1833 * Request that the PF disable channels that are configured
1834 **/
iavf_disable_channels(struct iavf_adapter * adapter)1835 void iavf_disable_channels(struct iavf_adapter *adapter)
1836 {
1837 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1838 /* bail because we already have a command pending */
1839 dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n",
1840 adapter->current_op);
1841 return;
1842 }
1843
1844 adapter->ch_config.state = __IAVF_TC_INVALID;
1845 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
1846 adapter->current_op = VIRTCHNL_OP_DISABLE_CHANNELS;
1847 adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_CHANNELS;
1848 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_CHANNELS, NULL, 0);
1849 }
1850
1851 /**
1852 * iavf_print_cloud_filter
1853 * @adapter: adapter structure
1854 * @f: cloud filter to print
1855 *
1856 * Print the cloud filter
1857 **/
iavf_print_cloud_filter(struct iavf_adapter * adapter,struct virtchnl_filter * f)1858 static void iavf_print_cloud_filter(struct iavf_adapter *adapter,
1859 struct virtchnl_filter *f)
1860 {
1861 switch (f->flow_type) {
1862 case VIRTCHNL_TCP_V4_FLOW:
1863 dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI4 src_ip %pI4 dst_port %hu src_port %hu\n",
1864 &f->data.tcp_spec.dst_mac,
1865 &f->data.tcp_spec.src_mac,
1866 ntohs(f->data.tcp_spec.vlan_id),
1867 &f->data.tcp_spec.dst_ip[0],
1868 &f->data.tcp_spec.src_ip[0],
1869 ntohs(f->data.tcp_spec.dst_port),
1870 ntohs(f->data.tcp_spec.src_port));
1871 break;
1872 case VIRTCHNL_TCP_V6_FLOW:
1873 dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI6 src_ip %pI6 dst_port %hu src_port %hu\n",
1874 &f->data.tcp_spec.dst_mac,
1875 &f->data.tcp_spec.src_mac,
1876 ntohs(f->data.tcp_spec.vlan_id),
1877 &f->data.tcp_spec.dst_ip,
1878 &f->data.tcp_spec.src_ip,
1879 ntohs(f->data.tcp_spec.dst_port),
1880 ntohs(f->data.tcp_spec.src_port));
1881 break;
1882 }
1883 }
1884
1885 /**
1886 * iavf_add_cloud_filter
1887 * @adapter: adapter structure
1888 *
1889 * Request that the PF add cloud filters as specified
1890 * by the user via tc tool.
1891 **/
iavf_add_cloud_filter(struct iavf_adapter * adapter)1892 void iavf_add_cloud_filter(struct iavf_adapter *adapter)
1893 {
1894 struct iavf_cloud_filter *cf;
1895 struct virtchnl_filter *f;
1896 int len = 0, count = 0;
1897
1898 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1899 /* bail because we already have a command pending */
1900 dev_err(&adapter->pdev->dev, "Cannot add cloud filter, command %d pending\n",
1901 adapter->current_op);
1902 return;
1903 }
1904 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1905 if (cf->add) {
1906 count++;
1907 break;
1908 }
1909 }
1910 if (!count) {
1911 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
1912 return;
1913 }
1914 adapter->current_op = VIRTCHNL_OP_ADD_CLOUD_FILTER;
1915
1916 len = sizeof(struct virtchnl_filter);
1917 f = kzalloc(len, GFP_KERNEL);
1918 if (!f)
1919 return;
1920
1921 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1922 if (cf->add) {
1923 memcpy(f, &cf->f, sizeof(struct virtchnl_filter));
1924 cf->add = false;
1925 cf->state = __IAVF_CF_ADD_PENDING;
1926 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_CLOUD_FILTER,
1927 (u8 *)f, len);
1928 }
1929 }
1930 kfree(f);
1931 }
1932
1933 /**
1934 * iavf_del_cloud_filter
1935 * @adapter: adapter structure
1936 *
1937 * Request that the PF delete cloud filters as specified
1938 * by the user via tc tool.
1939 **/
iavf_del_cloud_filter(struct iavf_adapter * adapter)1940 void iavf_del_cloud_filter(struct iavf_adapter *adapter)
1941 {
1942 struct iavf_cloud_filter *cf, *cftmp;
1943 struct virtchnl_filter *f;
1944 int len = 0, count = 0;
1945
1946 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1947 /* bail because we already have a command pending */
1948 dev_err(&adapter->pdev->dev, "Cannot remove cloud filter, command %d pending\n",
1949 adapter->current_op);
1950 return;
1951 }
1952 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1953 if (cf->del) {
1954 count++;
1955 break;
1956 }
1957 }
1958 if (!count) {
1959 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
1960 return;
1961 }
1962 adapter->current_op = VIRTCHNL_OP_DEL_CLOUD_FILTER;
1963
1964 len = sizeof(struct virtchnl_filter);
1965 f = kzalloc(len, GFP_KERNEL);
1966 if (!f)
1967 return;
1968
1969 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
1970 if (cf->del) {
1971 memcpy(f, &cf->f, sizeof(struct virtchnl_filter));
1972 cf->del = false;
1973 cf->state = __IAVF_CF_DEL_PENDING;
1974 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_CLOUD_FILTER,
1975 (u8 *)f, len);
1976 }
1977 }
1978 kfree(f);
1979 }
1980
1981 /**
1982 * iavf_add_fdir_filter
1983 * @adapter: the VF adapter structure
1984 *
1985 * Request that the PF add Flow Director filters as specified
1986 * by the user via ethtool.
1987 **/
iavf_add_fdir_filter(struct iavf_adapter * adapter)1988 void iavf_add_fdir_filter(struct iavf_adapter *adapter)
1989 {
1990 struct iavf_fdir_fltr *fdir;
1991 struct virtchnl_fdir_add *f;
1992 bool process_fltr = false;
1993 int len;
1994
1995 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1996 /* bail because we already have a command pending */
1997 dev_err(&adapter->pdev->dev, "Cannot add Flow Director filter, command %d pending\n",
1998 adapter->current_op);
1999 return;
2000 }
2001
2002 len = sizeof(struct virtchnl_fdir_add);
2003 f = kzalloc(len, GFP_KERNEL);
2004 if (!f)
2005 return;
2006
2007 spin_lock_bh(&adapter->fdir_fltr_lock);
2008 list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
2009 if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
2010 process_fltr = true;
2011 fdir->state = IAVF_FDIR_FLTR_ADD_PENDING;
2012 memcpy(f, &fdir->vc_add_msg, len);
2013 break;
2014 }
2015 }
2016 spin_unlock_bh(&adapter->fdir_fltr_lock);
2017
2018 if (!process_fltr) {
2019 /* prevent iavf_add_fdir_filter() from being called when there
2020 * are no filters to add
2021 */
2022 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_FDIR_FILTER;
2023 kfree(f);
2024 return;
2025 }
2026 adapter->current_op = VIRTCHNL_OP_ADD_FDIR_FILTER;
2027 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_FDIR_FILTER, (u8 *)f, len);
2028 kfree(f);
2029 }
2030
2031 /**
2032 * iavf_del_fdir_filter
2033 * @adapter: the VF adapter structure
2034 *
2035 * Request that the PF delete Flow Director filters as specified
2036 * by the user via ethtool.
2037 **/
iavf_del_fdir_filter(struct iavf_adapter * adapter)2038 void iavf_del_fdir_filter(struct iavf_adapter *adapter)
2039 {
2040 struct virtchnl_fdir_del f = {};
2041 struct iavf_fdir_fltr *fdir;
2042 bool process_fltr = false;
2043 int len;
2044
2045 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
2046 /* bail because we already have a command pending */
2047 dev_err(&adapter->pdev->dev, "Cannot remove Flow Director filter, command %d pending\n",
2048 adapter->current_op);
2049 return;
2050 }
2051
2052 len = sizeof(struct virtchnl_fdir_del);
2053
2054 spin_lock_bh(&adapter->fdir_fltr_lock);
2055 list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
2056 if (fdir->state == IAVF_FDIR_FLTR_DEL_REQUEST) {
2057 process_fltr = true;
2058 f.vsi_id = fdir->vc_add_msg.vsi_id;
2059 f.flow_id = fdir->flow_id;
2060 fdir->state = IAVF_FDIR_FLTR_DEL_PENDING;
2061 break;
2062 } else if (fdir->state == IAVF_FDIR_FLTR_DIS_REQUEST) {
2063 process_fltr = true;
2064 f.vsi_id = fdir->vc_add_msg.vsi_id;
2065 f.flow_id = fdir->flow_id;
2066 fdir->state = IAVF_FDIR_FLTR_DIS_PENDING;
2067 break;
2068 }
2069 }
2070 spin_unlock_bh(&adapter->fdir_fltr_lock);
2071
2072 if (!process_fltr) {
2073 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_FDIR_FILTER;
2074 return;
2075 }
2076
2077 adapter->current_op = VIRTCHNL_OP_DEL_FDIR_FILTER;
2078 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_FDIR_FILTER, (u8 *)&f, len);
2079 }
2080
2081 /**
2082 * iavf_add_adv_rss_cfg
2083 * @adapter: the VF adapter structure
2084 *
2085 * Request that the PF add RSS configuration as specified
2086 * by the user via ethtool.
2087 **/
iavf_add_adv_rss_cfg(struct iavf_adapter * adapter)2088 void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter)
2089 {
2090 struct virtchnl_rss_cfg *rss_cfg;
2091 struct iavf_adv_rss *rss;
2092 bool process_rss = false;
2093 int len;
2094
2095 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
2096 /* bail because we already have a command pending */
2097 dev_err(&adapter->pdev->dev, "Cannot add RSS configuration, command %d pending\n",
2098 adapter->current_op);
2099 return;
2100 }
2101
2102 len = sizeof(struct virtchnl_rss_cfg);
2103 rss_cfg = kzalloc(len, GFP_KERNEL);
2104 if (!rss_cfg)
2105 return;
2106
2107 spin_lock_bh(&adapter->adv_rss_lock);
2108 list_for_each_entry(rss, &adapter->adv_rss_list_head, list) {
2109 if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) {
2110 process_rss = true;
2111 rss->state = IAVF_ADV_RSS_ADD_PENDING;
2112 memcpy(rss_cfg, &rss->cfg_msg, len);
2113 iavf_print_adv_rss_cfg(adapter, rss,
2114 "Input set change for",
2115 "is pending");
2116 break;
2117 }
2118 }
2119 spin_unlock_bh(&adapter->adv_rss_lock);
2120
2121 if (process_rss) {
2122 adapter->current_op = VIRTCHNL_OP_ADD_RSS_CFG;
2123 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_RSS_CFG,
2124 (u8 *)rss_cfg, len);
2125 } else {
2126 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_ADV_RSS_CFG;
2127 }
2128
2129 kfree(rss_cfg);
2130 }
2131
2132 /**
2133 * iavf_del_adv_rss_cfg
2134 * @adapter: the VF adapter structure
2135 *
2136 * Request that the PF delete RSS configuration as specified
2137 * by the user via ethtool.
2138 **/
iavf_del_adv_rss_cfg(struct iavf_adapter * adapter)2139 void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter)
2140 {
2141 struct virtchnl_rss_cfg *rss_cfg;
2142 struct iavf_adv_rss *rss;
2143 bool process_rss = false;
2144 int len;
2145
2146 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
2147 /* bail because we already have a command pending */
2148 dev_err(&adapter->pdev->dev, "Cannot remove RSS configuration, command %d pending\n",
2149 adapter->current_op);
2150 return;
2151 }
2152
2153 len = sizeof(struct virtchnl_rss_cfg);
2154 rss_cfg = kzalloc(len, GFP_KERNEL);
2155 if (!rss_cfg)
2156 return;
2157
2158 spin_lock_bh(&adapter->adv_rss_lock);
2159 list_for_each_entry(rss, &adapter->adv_rss_list_head, list) {
2160 if (rss->state == IAVF_ADV_RSS_DEL_REQUEST) {
2161 process_rss = true;
2162 rss->state = IAVF_ADV_RSS_DEL_PENDING;
2163 memcpy(rss_cfg, &rss->cfg_msg, len);
2164 break;
2165 }
2166 }
2167 spin_unlock_bh(&adapter->adv_rss_lock);
2168
2169 if (process_rss) {
2170 adapter->current_op = VIRTCHNL_OP_DEL_RSS_CFG;
2171 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_RSS_CFG,
2172 (u8 *)rss_cfg, len);
2173 } else {
2174 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
2175 }
2176
2177 kfree(rss_cfg);
2178 }
2179
2180 /**
2181 * iavf_request_reset
2182 * @adapter: adapter structure
2183 *
2184 * Request that the PF reset this VF. No response is expected.
2185 **/
iavf_request_reset(struct iavf_adapter * adapter)2186 int iavf_request_reset(struct iavf_adapter *adapter)
2187 {
2188 int err;
2189 /* Don't check CURRENT_OP - this is always higher priority */
2190 err = iavf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0);
2191 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2192 return err;
2193 }
2194
2195 /**
2196 * iavf_netdev_features_vlan_strip_set - update vlan strip status
2197 * @netdev: ptr to netdev being adjusted
2198 * @enable: enable or disable vlan strip
2199 *
2200 * Helper function to change vlan strip status in netdev->features.
2201 */
iavf_netdev_features_vlan_strip_set(struct net_device * netdev,const bool enable)2202 static void iavf_netdev_features_vlan_strip_set(struct net_device *netdev,
2203 const bool enable)
2204 {
2205 if (enable)
2206 netdev->features |= NETIF_F_HW_VLAN_CTAG_RX;
2207 else
2208 netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
2209 }
2210
2211 /**
2212 * iavf_activate_fdir_filters - Reactivate all FDIR filters after a reset
2213 * @adapter: private adapter structure
2214 *
2215 * Called after a reset to re-add all FDIR filters and delete some of them
2216 * if they were pending to be deleted.
2217 */
iavf_activate_fdir_filters(struct iavf_adapter * adapter)2218 static void iavf_activate_fdir_filters(struct iavf_adapter *adapter)
2219 {
2220 struct iavf_fdir_fltr *f, *ftmp;
2221 bool add_filters = false;
2222
2223 spin_lock_bh(&adapter->fdir_fltr_lock);
2224 list_for_each_entry_safe(f, ftmp, &adapter->fdir_list_head, list) {
2225 if (f->state == IAVF_FDIR_FLTR_ADD_REQUEST ||
2226 f->state == IAVF_FDIR_FLTR_ADD_PENDING ||
2227 f->state == IAVF_FDIR_FLTR_ACTIVE) {
2228 /* All filters and requests have been removed in PF,
2229 * restore them
2230 */
2231 f->state = IAVF_FDIR_FLTR_ADD_REQUEST;
2232 add_filters = true;
2233 } else if (f->state == IAVF_FDIR_FLTR_DIS_REQUEST ||
2234 f->state == IAVF_FDIR_FLTR_DIS_PENDING) {
2235 /* Link down state, leave filters as inactive */
2236 f->state = IAVF_FDIR_FLTR_INACTIVE;
2237 } else if (f->state == IAVF_FDIR_FLTR_DEL_REQUEST ||
2238 f->state == IAVF_FDIR_FLTR_DEL_PENDING) {
2239 /* Delete filters that were pending to be deleted, the
2240 * list on PF is already cleared after a reset
2241 */
2242 list_del(&f->list);
2243 iavf_dec_fdir_active_fltr(adapter, f);
2244 kfree(f);
2245 }
2246 }
2247 spin_unlock_bh(&adapter->fdir_fltr_lock);
2248
2249 if (add_filters)
2250 adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
2251 }
2252
2253 /**
2254 * iavf_virtchnl_ptp_get_time - Respond to VIRTCHNL_OP_1588_PTP_GET_TIME
2255 * @adapter: private adapter structure
2256 * @data: the message from the PF
2257 * @len: length of the message from the PF
2258 *
2259 * Handle the VIRTCHNL_OP_1588_PTP_GET_TIME message from the PF. This message
2260 * is sent by the PF in response to the same op as a request from the VF.
2261 * Extract the 64bit nanoseconds time from the message and store it in
2262 * cached_phc_time. Then, notify any thread that is waiting for the update via
2263 * the wait queue.
2264 */
iavf_virtchnl_ptp_get_time(struct iavf_adapter * adapter,void * data,u16 len)2265 static void iavf_virtchnl_ptp_get_time(struct iavf_adapter *adapter,
2266 void *data, u16 len)
2267 {
2268 struct virtchnl_phc_time *msg = data;
2269
2270 if (len != sizeof(*msg)) {
2271 dev_err_once(&adapter->pdev->dev,
2272 "Invalid VIRTCHNL_OP_1588_PTP_GET_TIME from PF. Got size %u, expected %zu\n",
2273 len, sizeof(*msg));
2274 return;
2275 }
2276
2277 adapter->ptp.cached_phc_time = msg->time;
2278 adapter->ptp.cached_phc_updated = jiffies;
2279 adapter->ptp.phc_time_ready = true;
2280
2281 wake_up(&adapter->ptp.phc_time_waitqueue);
2282 }
2283
2284 /**
2285 * iavf_virtchnl_completion
2286 * @adapter: adapter structure
2287 * @v_opcode: opcode sent by PF
2288 * @v_retval: retval sent by PF
2289 * @msg: message sent by PF
2290 * @msglen: message length
2291 *
2292 * Asynchronous completion function for admin queue messages. Rather than busy
2293 * wait, we fire off our requests and assume that no errors will be returned.
2294 * This function handles the reply messages.
2295 **/
iavf_virtchnl_completion(struct iavf_adapter * adapter,enum virtchnl_ops v_opcode,enum iavf_status v_retval,u8 * msg,u16 msglen)2296 void iavf_virtchnl_completion(struct iavf_adapter *adapter,
2297 enum virtchnl_ops v_opcode,
2298 enum iavf_status v_retval, u8 *msg, u16 msglen)
2299 {
2300 struct net_device *netdev = adapter->netdev;
2301
2302 if (v_opcode == VIRTCHNL_OP_EVENT) {
2303 struct virtchnl_pf_event *vpe =
2304 (struct virtchnl_pf_event *)msg;
2305 bool link_up = iavf_get_vpe_link_status(adapter, vpe);
2306
2307 switch (vpe->event) {
2308 case VIRTCHNL_EVENT_LINK_CHANGE:
2309 iavf_set_adapter_link_speed_from_vpe(adapter, vpe);
2310
2311 /* we've already got the right link status, bail */
2312 if (adapter->link_up == link_up)
2313 break;
2314
2315 if (link_up) {
2316 /* If we get link up message and start queues
2317 * before our queues are configured it will
2318 * trigger a TX hang. In that case, just ignore
2319 * the link status message,we'll get another one
2320 * after we enable queues and actually prepared
2321 * to send traffic.
2322 */
2323 if (adapter->state != __IAVF_RUNNING)
2324 break;
2325
2326 /* For ADq enabled VF, we reconfigure VSIs and
2327 * re-allocate queues. Hence wait till all
2328 * queues are enabled.
2329 */
2330 if (adapter->flags &
2331 IAVF_FLAG_QUEUES_DISABLED)
2332 break;
2333 }
2334
2335 adapter->link_up = link_up;
2336 if (link_up) {
2337 netif_tx_start_all_queues(netdev);
2338 netif_carrier_on(netdev);
2339 } else {
2340 netif_tx_stop_all_queues(netdev);
2341 netif_carrier_off(netdev);
2342 }
2343 iavf_print_link_message(adapter);
2344 break;
2345 case VIRTCHNL_EVENT_RESET_IMPENDING:
2346 dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n");
2347 if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) {
2348 dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
2349 iavf_schedule_reset(adapter, IAVF_FLAG_RESET_PENDING);
2350 }
2351 break;
2352 default:
2353 dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n",
2354 vpe->event);
2355 break;
2356 }
2357 return;
2358 }
2359 if (v_retval) {
2360 switch (v_opcode) {
2361 case VIRTCHNL_OP_ADD_VLAN:
2362 dev_err(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n",
2363 iavf_stat_str(&adapter->hw, v_retval));
2364 break;
2365 case VIRTCHNL_OP_ADD_ETH_ADDR:
2366 dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n",
2367 iavf_stat_str(&adapter->hw, v_retval));
2368 iavf_mac_add_reject(adapter);
2369 /* restore administratively set MAC address */
2370 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
2371 wake_up(&adapter->vc_waitqueue);
2372 break;
2373 case VIRTCHNL_OP_DEL_VLAN:
2374 dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n",
2375 iavf_stat_str(&adapter->hw, v_retval));
2376 break;
2377 case VIRTCHNL_OP_DEL_ETH_ADDR:
2378 dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n",
2379 iavf_stat_str(&adapter->hw, v_retval));
2380 break;
2381 case VIRTCHNL_OP_ENABLE_CHANNELS:
2382 dev_err(&adapter->pdev->dev, "Failed to configure queue channels, error %s\n",
2383 iavf_stat_str(&adapter->hw, v_retval));
2384 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
2385 adapter->ch_config.state = __IAVF_TC_INVALID;
2386 netdev_reset_tc(netdev);
2387 netif_tx_start_all_queues(netdev);
2388 break;
2389 case VIRTCHNL_OP_DISABLE_CHANNELS:
2390 dev_err(&adapter->pdev->dev, "Failed to disable queue channels, error %s\n",
2391 iavf_stat_str(&adapter->hw, v_retval));
2392 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
2393 adapter->ch_config.state = __IAVF_TC_RUNNING;
2394 netif_tx_start_all_queues(netdev);
2395 break;
2396 case VIRTCHNL_OP_ADD_CLOUD_FILTER: {
2397 struct iavf_cloud_filter *cf, *cftmp;
2398
2399 list_for_each_entry_safe(cf, cftmp,
2400 &adapter->cloud_filter_list,
2401 list) {
2402 if (cf->state == __IAVF_CF_ADD_PENDING) {
2403 cf->state = __IAVF_CF_INVALID;
2404 dev_info(&adapter->pdev->dev, "Failed to add cloud filter, error %s\n",
2405 iavf_stat_str(&adapter->hw,
2406 v_retval));
2407 iavf_print_cloud_filter(adapter,
2408 &cf->f);
2409 list_del(&cf->list);
2410 kfree(cf);
2411 adapter->num_cloud_filters--;
2412 }
2413 }
2414 }
2415 break;
2416 case VIRTCHNL_OP_DEL_CLOUD_FILTER: {
2417 struct iavf_cloud_filter *cf;
2418
2419 list_for_each_entry(cf, &adapter->cloud_filter_list,
2420 list) {
2421 if (cf->state == __IAVF_CF_DEL_PENDING) {
2422 cf->state = __IAVF_CF_ACTIVE;
2423 dev_info(&adapter->pdev->dev, "Failed to del cloud filter, error %s\n",
2424 iavf_stat_str(&adapter->hw,
2425 v_retval));
2426 iavf_print_cloud_filter(adapter,
2427 &cf->f);
2428 }
2429 }
2430 }
2431 break;
2432 case VIRTCHNL_OP_ADD_FDIR_FILTER: {
2433 struct iavf_fdir_fltr *fdir, *fdir_tmp;
2434
2435 spin_lock_bh(&adapter->fdir_fltr_lock);
2436 list_for_each_entry_safe(fdir, fdir_tmp,
2437 &adapter->fdir_list_head,
2438 list) {
2439 if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) {
2440 dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter, error %s\n",
2441 iavf_stat_str(&adapter->hw,
2442 v_retval));
2443 iavf_print_fdir_fltr(adapter, fdir);
2444 if (msglen)
2445 dev_err(&adapter->pdev->dev,
2446 "%s\n", msg);
2447 list_del(&fdir->list);
2448 iavf_dec_fdir_active_fltr(adapter, fdir);
2449 kfree(fdir);
2450 }
2451 }
2452 spin_unlock_bh(&adapter->fdir_fltr_lock);
2453 }
2454 break;
2455 case VIRTCHNL_OP_DEL_FDIR_FILTER: {
2456 struct iavf_fdir_fltr *fdir;
2457
2458 spin_lock_bh(&adapter->fdir_fltr_lock);
2459 list_for_each_entry(fdir, &adapter->fdir_list_head,
2460 list) {
2461 if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING ||
2462 fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) {
2463 fdir->state = IAVF_FDIR_FLTR_ACTIVE;
2464 dev_info(&adapter->pdev->dev, "Failed to del Flow Director filter, error %s\n",
2465 iavf_stat_str(&adapter->hw,
2466 v_retval));
2467 iavf_print_fdir_fltr(adapter, fdir);
2468 }
2469 }
2470 spin_unlock_bh(&adapter->fdir_fltr_lock);
2471 }
2472 break;
2473 case VIRTCHNL_OP_ADD_RSS_CFG: {
2474 struct iavf_adv_rss *rss, *rss_tmp;
2475
2476 spin_lock_bh(&adapter->adv_rss_lock);
2477 list_for_each_entry_safe(rss, rss_tmp,
2478 &adapter->adv_rss_list_head,
2479 list) {
2480 if (rss->state == IAVF_ADV_RSS_ADD_PENDING) {
2481 iavf_print_adv_rss_cfg(adapter, rss,
2482 "Failed to change the input set for",
2483 NULL);
2484 list_del(&rss->list);
2485 kfree(rss);
2486 }
2487 }
2488 spin_unlock_bh(&adapter->adv_rss_lock);
2489 }
2490 break;
2491 case VIRTCHNL_OP_DEL_RSS_CFG: {
2492 struct iavf_adv_rss *rss;
2493
2494 spin_lock_bh(&adapter->adv_rss_lock);
2495 list_for_each_entry(rss, &adapter->adv_rss_list_head,
2496 list) {
2497 if (rss->state == IAVF_ADV_RSS_DEL_PENDING) {
2498 rss->state = IAVF_ADV_RSS_ACTIVE;
2499 dev_err(&adapter->pdev->dev, "Failed to delete RSS configuration, error %s\n",
2500 iavf_stat_str(&adapter->hw,
2501 v_retval));
2502 }
2503 }
2504 spin_unlock_bh(&adapter->adv_rss_lock);
2505 }
2506 break;
2507 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
2508 dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n");
2509 /* Vlan stripping could not be enabled by ethtool.
2510 * Disable it in netdev->features.
2511 */
2512 iavf_netdev_features_vlan_strip_set(netdev, false);
2513 break;
2514 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
2515 dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n");
2516 /* Vlan stripping could not be disabled by ethtool.
2517 * Enable it in netdev->features.
2518 */
2519 iavf_netdev_features_vlan_strip_set(netdev, true);
2520 break;
2521 case VIRTCHNL_OP_ADD_VLAN_V2:
2522 iavf_vlan_add_reject(adapter);
2523 dev_warn(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n",
2524 iavf_stat_str(&adapter->hw, v_retval));
2525 break;
2526 case VIRTCHNL_OP_CONFIG_RSS_HFUNC:
2527 dev_warn(&adapter->pdev->dev, "Failed to configure hash function, error %s\n",
2528 iavf_stat_str(&adapter->hw, v_retval));
2529
2530 if (adapter->hfunc ==
2531 VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
2532 adapter->hfunc =
2533 VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC;
2534 else
2535 adapter->hfunc =
2536 VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC;
2537
2538 break;
2539 case VIRTCHNL_OP_GET_QOS_CAPS:
2540 dev_warn(&adapter->pdev->dev, "Failed to Get Qos CAPs, error %s\n",
2541 iavf_stat_str(&adapter->hw, v_retval));
2542 break;
2543 case VIRTCHNL_OP_CONFIG_QUANTA:
2544 dev_warn(&adapter->pdev->dev, "Failed to Config Quanta, error %s\n",
2545 iavf_stat_str(&adapter->hw, v_retval));
2546 break;
2547 case VIRTCHNL_OP_CONFIG_QUEUE_BW:
2548 dev_warn(&adapter->pdev->dev, "Failed to Config Queue BW, error %s\n",
2549 iavf_stat_str(&adapter->hw, v_retval));
2550 break;
2551 default:
2552 dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
2553 v_retval, iavf_stat_str(&adapter->hw, v_retval),
2554 v_opcode);
2555 }
2556 }
2557 switch (v_opcode) {
2558 case VIRTCHNL_OP_ADD_ETH_ADDR:
2559 if (!v_retval)
2560 iavf_mac_add_ok(adapter);
2561 if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr))
2562 if (!ether_addr_equal(netdev->dev_addr,
2563 adapter->hw.mac.addr)) {
2564 netif_addr_lock_bh(netdev);
2565 eth_hw_addr_set(netdev, adapter->hw.mac.addr);
2566 netif_addr_unlock_bh(netdev);
2567 }
2568 wake_up(&adapter->vc_waitqueue);
2569 break;
2570 case VIRTCHNL_OP_GET_STATS: {
2571 struct iavf_eth_stats *stats =
2572 (struct iavf_eth_stats *)msg;
2573 netdev->stats.rx_packets = stats->rx_unicast +
2574 stats->rx_multicast +
2575 stats->rx_broadcast;
2576 netdev->stats.tx_packets = stats->tx_unicast +
2577 stats->tx_multicast +
2578 stats->tx_broadcast;
2579 netdev->stats.rx_bytes = stats->rx_bytes;
2580 netdev->stats.tx_bytes = stats->tx_bytes;
2581 netdev->stats.tx_errors = stats->tx_errors;
2582 netdev->stats.rx_dropped = stats->rx_discards;
2583 netdev->stats.tx_dropped = stats->tx_discards;
2584 adapter->current_stats = *stats;
2585 }
2586 break;
2587 case VIRTCHNL_OP_GET_VF_RESOURCES: {
2588 u16 len = IAVF_VIRTCHNL_VF_RESOURCE_SIZE;
2589
2590 memcpy(adapter->vf_res, msg, min(msglen, len));
2591 iavf_validate_num_queues(adapter);
2592 iavf_vf_parse_hw_config(&adapter->hw, adapter->vf_res);
2593 if (is_zero_ether_addr(adapter->hw.mac.addr)) {
2594 /* restore current mac address */
2595 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
2596 } else {
2597 netif_addr_lock_bh(netdev);
2598 /* refresh current mac address if changed */
2599 ether_addr_copy(netdev->perm_addr,
2600 adapter->hw.mac.addr);
2601 netif_addr_unlock_bh(netdev);
2602 }
2603 spin_lock_bh(&adapter->mac_vlan_list_lock);
2604 iavf_add_filter(adapter, adapter->hw.mac.addr);
2605
2606 if (VLAN_ALLOWED(adapter)) {
2607 if (!list_empty(&adapter->vlan_filter_list)) {
2608 struct iavf_vlan_filter *vlf;
2609
2610 /* re-add all VLAN filters over virtchnl */
2611 list_for_each_entry(vlf,
2612 &adapter->vlan_filter_list,
2613 list)
2614 vlf->state = IAVF_VLAN_ADD;
2615
2616 adapter->aq_required |=
2617 IAVF_FLAG_AQ_ADD_VLAN_FILTER;
2618 }
2619 }
2620
2621 spin_unlock_bh(&adapter->mac_vlan_list_lock);
2622
2623 iavf_activate_fdir_filters(adapter);
2624
2625 iavf_parse_vf_resource_msg(adapter);
2626
2627 /* negotiated VIRTCHNL_VF_OFFLOAD_VLAN_V2, so wait for the
2628 * response to VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS to finish
2629 * configuration
2630 */
2631 if (VLAN_V2_ALLOWED(adapter))
2632 break;
2633 /* fallthrough and finish config if VIRTCHNL_VF_OFFLOAD_VLAN_V2
2634 * wasn't successfully negotiated with the PF
2635 */
2636 }
2637 fallthrough;
2638 case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: {
2639 struct iavf_mac_filter *f;
2640 bool was_mac_changed;
2641 u64 aq_required = 0;
2642
2643 if (v_opcode == VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS)
2644 memcpy(&adapter->vlan_v2_caps, msg,
2645 min_t(u16, msglen,
2646 sizeof(adapter->vlan_v2_caps)));
2647
2648 iavf_process_config(adapter);
2649 adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES;
2650 iavf_schedule_finish_config(adapter);
2651
2652 iavf_set_queue_vlan_tag_loc(adapter);
2653
2654 was_mac_changed = !ether_addr_equal(netdev->dev_addr,
2655 adapter->hw.mac.addr);
2656
2657 spin_lock_bh(&adapter->mac_vlan_list_lock);
2658
2659 /* re-add all MAC filters */
2660 list_for_each_entry(f, &adapter->mac_filter_list, list) {
2661 if (was_mac_changed &&
2662 ether_addr_equal(netdev->dev_addr, f->macaddr))
2663 ether_addr_copy(f->macaddr,
2664 adapter->hw.mac.addr);
2665
2666 f->is_new_mac = true;
2667 f->add = true;
2668 f->add_handled = false;
2669 f->remove = false;
2670 }
2671
2672 /* re-add all VLAN filters */
2673 if (VLAN_FILTERING_ALLOWED(adapter)) {
2674 struct iavf_vlan_filter *vlf;
2675
2676 if (!list_empty(&adapter->vlan_filter_list)) {
2677 list_for_each_entry(vlf,
2678 &adapter->vlan_filter_list,
2679 list)
2680 vlf->state = IAVF_VLAN_ADD;
2681
2682 aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
2683 }
2684 }
2685
2686 spin_unlock_bh(&adapter->mac_vlan_list_lock);
2687
2688 netif_addr_lock_bh(netdev);
2689 eth_hw_addr_set(netdev, adapter->hw.mac.addr);
2690 netif_addr_unlock_bh(netdev);
2691
2692 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER |
2693 aq_required;
2694 }
2695 break;
2696 case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
2697 if (msglen != sizeof(u64))
2698 return;
2699
2700 adapter->supp_rxdids = *(u64 *)msg;
2701
2702 break;
2703 case VIRTCHNL_OP_1588_PTP_GET_CAPS:
2704 if (msglen != sizeof(adapter->ptp.hw_caps))
2705 return;
2706
2707 adapter->ptp.hw_caps = *(struct virtchnl_ptp_caps *)msg;
2708
2709 /* process any state change needed due to new capabilities */
2710 iavf_ptp_process_caps(adapter);
2711 break;
2712 case VIRTCHNL_OP_1588_PTP_GET_TIME:
2713 iavf_virtchnl_ptp_get_time(adapter, msg, msglen);
2714 break;
2715 case VIRTCHNL_OP_ENABLE_QUEUES:
2716 /* enable transmits */
2717 iavf_irq_enable(adapter, true);
2718 wake_up(&adapter->reset_waitqueue);
2719 adapter->flags &= ~IAVF_FLAG_QUEUES_DISABLED;
2720 break;
2721 case VIRTCHNL_OP_DISABLE_QUEUES:
2722 iavf_free_all_tx_resources(adapter);
2723 iavf_free_all_rx_resources(adapter);
2724 if (adapter->state == __IAVF_DOWN_PENDING) {
2725 iavf_change_state(adapter, __IAVF_DOWN);
2726 wake_up(&adapter->down_waitqueue);
2727 }
2728 break;
2729 case VIRTCHNL_OP_VERSION:
2730 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
2731 /* Don't display an error if we get these out of sequence.
2732 * If the firmware needed to get kicked, we'll get these and
2733 * it's no problem.
2734 */
2735 if (v_opcode != adapter->current_op)
2736 return;
2737 break;
2738 case VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
2739 struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg;
2740
2741 if (msglen == sizeof(*vrh))
2742 adapter->hena = vrh->hena;
2743 else
2744 dev_warn(&adapter->pdev->dev,
2745 "Invalid message %d from PF\n", v_opcode);
2746 }
2747 break;
2748 case VIRTCHNL_OP_REQUEST_QUEUES: {
2749 struct virtchnl_vf_res_request *vfres =
2750 (struct virtchnl_vf_res_request *)msg;
2751
2752 if (vfres->num_queue_pairs != adapter->num_req_queues) {
2753 dev_info(&adapter->pdev->dev,
2754 "Requested %d queues, PF can support %d\n",
2755 adapter->num_req_queues,
2756 vfres->num_queue_pairs);
2757 adapter->num_req_queues = 0;
2758 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
2759 }
2760 }
2761 break;
2762 case VIRTCHNL_OP_ADD_CLOUD_FILTER: {
2763 struct iavf_cloud_filter *cf;
2764
2765 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
2766 if (cf->state == __IAVF_CF_ADD_PENDING)
2767 cf->state = __IAVF_CF_ACTIVE;
2768 }
2769 }
2770 break;
2771 case VIRTCHNL_OP_DEL_CLOUD_FILTER: {
2772 struct iavf_cloud_filter *cf, *cftmp;
2773
2774 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
2775 list) {
2776 if (cf->state == __IAVF_CF_DEL_PENDING) {
2777 cf->state = __IAVF_CF_INVALID;
2778 list_del(&cf->list);
2779 kfree(cf);
2780 adapter->num_cloud_filters--;
2781 }
2782 }
2783 }
2784 break;
2785 case VIRTCHNL_OP_ADD_FDIR_FILTER: {
2786 struct virtchnl_fdir_add *add_fltr = (struct virtchnl_fdir_add *)msg;
2787 struct iavf_fdir_fltr *fdir, *fdir_tmp;
2788
2789 spin_lock_bh(&adapter->fdir_fltr_lock);
2790 list_for_each_entry_safe(fdir, fdir_tmp,
2791 &adapter->fdir_list_head,
2792 list) {
2793 if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) {
2794 if (add_fltr->status == VIRTCHNL_FDIR_SUCCESS) {
2795 if (!iavf_is_raw_fdir(fdir))
2796 dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is added\n",
2797 fdir->loc);
2798 else
2799 dev_info(&adapter->pdev->dev, "Flow Director filter (raw) for TC handle %x is added\n",
2800 TC_U32_USERHTID(fdir->cls_u32_handle));
2801 fdir->state = IAVF_FDIR_FLTR_ACTIVE;
2802 fdir->flow_id = add_fltr->flow_id;
2803 } else {
2804 dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter with status: %d\n",
2805 add_fltr->status);
2806 iavf_print_fdir_fltr(adapter, fdir);
2807 list_del(&fdir->list);
2808 iavf_dec_fdir_active_fltr(adapter, fdir);
2809 kfree(fdir);
2810 }
2811 }
2812 }
2813 spin_unlock_bh(&adapter->fdir_fltr_lock);
2814 }
2815 break;
2816 case VIRTCHNL_OP_DEL_FDIR_FILTER: {
2817 struct virtchnl_fdir_del *del_fltr = (struct virtchnl_fdir_del *)msg;
2818 struct iavf_fdir_fltr *fdir, *fdir_tmp;
2819
2820 spin_lock_bh(&adapter->fdir_fltr_lock);
2821 list_for_each_entry_safe(fdir, fdir_tmp, &adapter->fdir_list_head,
2822 list) {
2823 if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
2824 if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS ||
2825 del_fltr->status ==
2826 VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) {
2827 if (!iavf_is_raw_fdir(fdir))
2828 dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n",
2829 fdir->loc);
2830 else
2831 dev_info(&adapter->pdev->dev, "Flow Director filter (raw) for TC handle %x is deleted\n",
2832 TC_U32_USERHTID(fdir->cls_u32_handle));
2833 list_del(&fdir->list);
2834 iavf_dec_fdir_active_fltr(adapter, fdir);
2835 kfree(fdir);
2836 } else {
2837 fdir->state = IAVF_FDIR_FLTR_ACTIVE;
2838 dev_info(&adapter->pdev->dev, "Failed to delete Flow Director filter with status: %d\n",
2839 del_fltr->status);
2840 iavf_print_fdir_fltr(adapter, fdir);
2841 }
2842 } else if (fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) {
2843 if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS ||
2844 del_fltr->status ==
2845 VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) {
2846 fdir->state = IAVF_FDIR_FLTR_INACTIVE;
2847 } else {
2848 fdir->state = IAVF_FDIR_FLTR_ACTIVE;
2849 dev_info(&adapter->pdev->dev, "Failed to disable Flow Director filter with status: %d\n",
2850 del_fltr->status);
2851 iavf_print_fdir_fltr(adapter, fdir);
2852 }
2853 }
2854 }
2855 spin_unlock_bh(&adapter->fdir_fltr_lock);
2856 }
2857 break;
2858 case VIRTCHNL_OP_ADD_RSS_CFG: {
2859 struct iavf_adv_rss *rss;
2860
2861 spin_lock_bh(&adapter->adv_rss_lock);
2862 list_for_each_entry(rss, &adapter->adv_rss_list_head, list) {
2863 if (rss->state == IAVF_ADV_RSS_ADD_PENDING) {
2864 iavf_print_adv_rss_cfg(adapter, rss,
2865 "Input set change for",
2866 "successful");
2867 rss->state = IAVF_ADV_RSS_ACTIVE;
2868 }
2869 }
2870 spin_unlock_bh(&adapter->adv_rss_lock);
2871 }
2872 break;
2873 case VIRTCHNL_OP_DEL_RSS_CFG: {
2874 struct iavf_adv_rss *rss, *rss_tmp;
2875
2876 spin_lock_bh(&adapter->adv_rss_lock);
2877 list_for_each_entry_safe(rss, rss_tmp,
2878 &adapter->adv_rss_list_head, list) {
2879 if (rss->state == IAVF_ADV_RSS_DEL_PENDING) {
2880 list_del(&rss->list);
2881 kfree(rss);
2882 }
2883 }
2884 spin_unlock_bh(&adapter->adv_rss_lock);
2885 }
2886 break;
2887 case VIRTCHNL_OP_ADD_VLAN_V2: {
2888 struct iavf_vlan_filter *f;
2889
2890 spin_lock_bh(&adapter->mac_vlan_list_lock);
2891 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
2892 if (f->state == IAVF_VLAN_IS_NEW)
2893 f->state = IAVF_VLAN_ACTIVE;
2894 }
2895 spin_unlock_bh(&adapter->mac_vlan_list_lock);
2896 }
2897 break;
2898 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
2899 /* PF enabled vlan strip on this VF.
2900 * Update netdev->features if needed to be in sync with ethtool.
2901 */
2902 if (!v_retval)
2903 iavf_netdev_features_vlan_strip_set(netdev, true);
2904 break;
2905 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
2906 /* PF disabled vlan strip on this VF.
2907 * Update netdev->features if needed to be in sync with ethtool.
2908 */
2909 if (!v_retval)
2910 iavf_netdev_features_vlan_strip_set(netdev, false);
2911 break;
2912 case VIRTCHNL_OP_GET_QOS_CAPS: {
2913 u16 len = struct_size(adapter->qos_caps, cap,
2914 IAVF_MAX_QOS_TC_NUM);
2915
2916 memcpy(adapter->qos_caps, msg, min(msglen, len));
2917
2918 adapter->aq_required |= IAVF_FLAG_AQ_CFG_QUEUES_QUANTA_SIZE;
2919 }
2920 break;
2921 case VIRTCHNL_OP_CONFIG_QUANTA:
2922 break;
2923 case VIRTCHNL_OP_CONFIG_QUEUE_BW: {
2924 int i;
2925 /* shaper configuration is successful for all queues */
2926 for (i = 0; i < adapter->num_active_queues; i++)
2927 adapter->tx_rings[i].q_shaper_update = false;
2928 }
2929 break;
2930 default:
2931 if (adapter->current_op && (v_opcode != adapter->current_op))
2932 dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
2933 adapter->current_op, v_opcode);
2934 break;
2935 } /* switch v_opcode */
2936 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2937 }
2938