1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2005-2018 Solarflare Communications Inc.
5 * Copyright 2019-2020 Xilinx Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published
9 * by the Free Software Foundation, incorporated herein by reference.
10 */
11
12 #include "mcdi_filters.h"
13 #include "mcdi.h"
14 #include "nic.h"
15 #include "rx_common.h"
16
17 /* The maximum size of a shared RSS context */
18 /* TODO: this should really be from the mcdi protocol export */
19 #define EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE 64UL
20
21 #define EFX_EF10_FILTER_ID_INVALID 0xffff
22
23 /* An arbitrary search limit for the software hash table */
24 #define EFX_EF10_FILTER_SEARCH_LIMIT 200
25
26 static struct efx_filter_spec *
efx_mcdi_filter_entry_spec(const struct efx_mcdi_filter_table * table,unsigned int filter_idx)27 efx_mcdi_filter_entry_spec(const struct efx_mcdi_filter_table *table,
28 unsigned int filter_idx)
29 {
30 return (struct efx_filter_spec *)(table->entry[filter_idx].spec &
31 ~EFX_EF10_FILTER_FLAGS);
32 }
33
34 static unsigned int
efx_mcdi_filter_entry_flags(const struct efx_mcdi_filter_table * table,unsigned int filter_idx)35 efx_mcdi_filter_entry_flags(const struct efx_mcdi_filter_table *table,
36 unsigned int filter_idx)
37 {
38 return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS;
39 }
40
efx_mcdi_filter_get_unsafe_id(u32 filter_id)41 static u32 efx_mcdi_filter_get_unsafe_id(u32 filter_id)
42 {
43 WARN_ON_ONCE(filter_id == EFX_EF10_FILTER_ID_INVALID);
44 return filter_id & (EFX_MCDI_FILTER_TBL_ROWS - 1);
45 }
46
efx_mcdi_filter_get_unsafe_pri(u32 filter_id)47 static unsigned int efx_mcdi_filter_get_unsafe_pri(u32 filter_id)
48 {
49 return filter_id / (EFX_MCDI_FILTER_TBL_ROWS * 2);
50 }
51
efx_mcdi_filter_make_filter_id(unsigned int pri,u16 idx)52 static u32 efx_mcdi_filter_make_filter_id(unsigned int pri, u16 idx)
53 {
54 return pri * EFX_MCDI_FILTER_TBL_ROWS * 2 + idx;
55 }
56
57 /*
58 * Decide whether a filter should be exclusive or else should allow
59 * delivery to additional recipients. Currently we decide that
60 * filters for specific local unicast MAC and IP addresses are
61 * exclusive.
62 */
efx_mcdi_filter_is_exclusive(const struct efx_filter_spec * spec)63 static bool efx_mcdi_filter_is_exclusive(const struct efx_filter_spec *spec)
64 {
65 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC &&
66 !is_multicast_ether_addr(spec->loc_mac))
67 return true;
68
69 if ((spec->match_flags &
70 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
71 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
72 if (spec->ether_type == htons(ETH_P_IP) &&
73 !ipv4_is_multicast(spec->loc_host[0]))
74 return true;
75 if (spec->ether_type == htons(ETH_P_IPV6) &&
76 ((const u8 *)spec->loc_host)[0] != 0xff)
77 return true;
78 }
79
80 return false;
81 }
82
83 static void
efx_mcdi_filter_set_entry(struct efx_mcdi_filter_table * table,unsigned int filter_idx,const struct efx_filter_spec * spec,unsigned int flags)84 efx_mcdi_filter_set_entry(struct efx_mcdi_filter_table *table,
85 unsigned int filter_idx,
86 const struct efx_filter_spec *spec,
87 unsigned int flags)
88 {
89 table->entry[filter_idx].spec = (unsigned long)spec | flags;
90 }
91
92 static void
efx_mcdi_filter_push_prep_set_match_fields(struct efx_nic * efx,const struct efx_filter_spec * spec,efx_dword_t * inbuf)93 efx_mcdi_filter_push_prep_set_match_fields(struct efx_nic *efx,
94 const struct efx_filter_spec *spec,
95 efx_dword_t *inbuf)
96 {
97 enum efx_encap_type encap_type = efx_filter_get_encap_type(spec);
98 u32 match_fields = 0, uc_match, mc_match;
99
100 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
101 efx_mcdi_filter_is_exclusive(spec) ?
102 MC_CMD_FILTER_OP_IN_OP_INSERT :
103 MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE);
104
105 /*
106 * Convert match flags and values. Unlike almost
107 * everything else in MCDI, these fields are in
108 * network byte order.
109 */
110 #define COPY_VALUE(value, mcdi_field) \
111 do { \
112 match_fields |= \
113 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
114 mcdi_field ## _LBN; \
115 BUILD_BUG_ON( \
116 MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \
117 sizeof(value)); \
118 memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \
119 &value, sizeof(value)); \
120 } while (0)
121 #define COPY_FIELD(gen_flag, gen_field, mcdi_field) \
122 if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \
123 COPY_VALUE(spec->gen_field, mcdi_field); \
124 }
125 /*
126 * Handle encap filters first. They will always be mismatch
127 * (unknown UC or MC) filters
128 */
129 if (encap_type) {
130 /*
131 * ether_type and outer_ip_proto need to be variables
132 * because COPY_VALUE wants to memcpy them
133 */
134 __be16 ether_type =
135 htons(encap_type & EFX_ENCAP_FLAG_IPV6 ?
136 ETH_P_IPV6 : ETH_P_IP);
137 u8 vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE;
138 u8 outer_ip_proto;
139
140 switch (encap_type & EFX_ENCAP_TYPES_MASK) {
141 case EFX_ENCAP_TYPE_VXLAN:
142 vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN;
143 fallthrough;
144 case EFX_ENCAP_TYPE_GENEVE:
145 COPY_VALUE(ether_type, ETHER_TYPE);
146 outer_ip_proto = IPPROTO_UDP;
147 COPY_VALUE(outer_ip_proto, IP_PROTO);
148 /*
149 * We always need to set the type field, even
150 * though we're not matching on the TNI.
151 */
152 MCDI_POPULATE_DWORD_1(inbuf,
153 FILTER_OP_EXT_IN_VNI_OR_VSID,
154 FILTER_OP_EXT_IN_VNI_TYPE,
155 vni_type);
156 break;
157 case EFX_ENCAP_TYPE_NVGRE:
158 COPY_VALUE(ether_type, ETHER_TYPE);
159 outer_ip_proto = IPPROTO_GRE;
160 COPY_VALUE(outer_ip_proto, IP_PROTO);
161 break;
162 default:
163 WARN_ON(1);
164 }
165
166 uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN;
167 mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN;
168 } else {
169 uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
170 mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN;
171 }
172
173 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG)
174 match_fields |=
175 is_multicast_ether_addr(spec->loc_mac) ?
176 1 << mc_match :
177 1 << uc_match;
178 COPY_FIELD(REM_HOST, rem_host, SRC_IP);
179 COPY_FIELD(LOC_HOST, loc_host, DST_IP);
180 COPY_FIELD(REM_MAC, rem_mac, SRC_MAC);
181 COPY_FIELD(REM_PORT, rem_port, SRC_PORT);
182 COPY_FIELD(LOC_MAC, loc_mac, DST_MAC);
183 COPY_FIELD(LOC_PORT, loc_port, DST_PORT);
184 COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE);
185 COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN);
186 COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN);
187 COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO);
188 #undef COPY_FIELD
189 #undef COPY_VALUE
190 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS,
191 match_fields);
192 }
193
efx_mcdi_filter_push_prep(struct efx_nic * efx,const struct efx_filter_spec * spec,efx_dword_t * inbuf,u64 handle,struct efx_rss_context_priv * ctx,bool replacing)194 static void efx_mcdi_filter_push_prep(struct efx_nic *efx,
195 const struct efx_filter_spec *spec,
196 efx_dword_t *inbuf, u64 handle,
197 struct efx_rss_context_priv *ctx,
198 bool replacing)
199 {
200 u32 flags = spec->flags;
201
202 memset(inbuf, 0, MC_CMD_FILTER_OP_EXT_IN_LEN);
203
204 /* If RSS filter, caller better have given us an RSS context */
205 if (flags & EFX_FILTER_FLAG_RX_RSS) {
206 /*
207 * We don't have the ability to return an error, so we'll just
208 * log a warning and disable RSS for the filter.
209 */
210 if (WARN_ON_ONCE(!ctx))
211 flags &= ~EFX_FILTER_FLAG_RX_RSS;
212 else if (WARN_ON_ONCE(ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID))
213 flags &= ~EFX_FILTER_FLAG_RX_RSS;
214 }
215
216 if (replacing) {
217 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
218 MC_CMD_FILTER_OP_IN_OP_REPLACE);
219 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle);
220 } else {
221 efx_mcdi_filter_push_prep_set_match_fields(efx, spec, inbuf);
222 }
223
224 if (flags & EFX_FILTER_FLAG_VPORT_ID)
225 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, spec->vport_id);
226 else
227 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, efx->vport_id);
228 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
229 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
230 MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
231 MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
232 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DOMAIN, 0);
233 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
234 MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
235 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE,
236 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
237 0 : spec->dmaq_id);
238 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
239 (flags & EFX_FILTER_FLAG_RX_RSS) ?
240 MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
241 MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
242 if (flags & EFX_FILTER_FLAG_RX_RSS)
243 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT, ctx->context_id);
244 }
245
efx_mcdi_filter_push(struct efx_nic * efx,const struct efx_filter_spec * spec,u64 * handle,struct efx_rss_context_priv * ctx,bool replacing)246 static int efx_mcdi_filter_push(struct efx_nic *efx,
247 const struct efx_filter_spec *spec, u64 *handle,
248 struct efx_rss_context_priv *ctx, bool replacing)
249 {
250 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
251 MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_EXT_OUT_LEN);
252 size_t outlen;
253 int rc;
254
255 efx_mcdi_filter_push_prep(efx, spec, inbuf, *handle, ctx, replacing);
256 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
257 outbuf, sizeof(outbuf), &outlen);
258 if (rc && spec->priority != EFX_FILTER_PRI_HINT)
259 efx_mcdi_display_error(efx, MC_CMD_FILTER_OP, sizeof(inbuf),
260 outbuf, outlen, rc);
261 if (rc == 0)
262 *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
263 if (rc == -ENOSPC)
264 rc = -EBUSY; /* to match efx_farch_filter_insert() */
265 return rc;
266 }
267
efx_mcdi_filter_mcdi_flags_from_spec(const struct efx_filter_spec * spec)268 static u32 efx_mcdi_filter_mcdi_flags_from_spec(const struct efx_filter_spec *spec)
269 {
270 enum efx_encap_type encap_type = efx_filter_get_encap_type(spec);
271 unsigned int match_flags = spec->match_flags;
272 unsigned int uc_match, mc_match;
273 u32 mcdi_flags = 0;
274
275 #define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field, encap) { \
276 unsigned int old_match_flags = match_flags; \
277 match_flags &= ~EFX_FILTER_MATCH_ ## gen_flag; \
278 if (match_flags != old_match_flags) \
279 mcdi_flags |= \
280 (1 << ((encap) ? \
281 MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ ## \
282 mcdi_field ## _LBN : \
283 MC_CMD_FILTER_OP_EXT_IN_MATCH_ ##\
284 mcdi_field ## _LBN)); \
285 }
286 /* inner or outer based on encap type */
287 MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP, encap_type);
288 MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP, encap_type);
289 MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC, encap_type);
290 MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT, encap_type);
291 MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC, encap_type);
292 MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT, encap_type);
293 MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE, encap_type);
294 MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO, encap_type);
295 /* always outer */
296 MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN, false);
297 MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN, false);
298 #undef MAP_FILTER_TO_MCDI_FLAG
299
300 /* special handling for encap type, and mismatch */
301 if (encap_type) {
302 match_flags &= ~EFX_FILTER_MATCH_ENCAP_TYPE;
303 mcdi_flags |=
304 (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN);
305 mcdi_flags |= (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN);
306
307 uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN;
308 mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN;
309 } else {
310 uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
311 mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN;
312 }
313
314 if (match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) {
315 match_flags &= ~EFX_FILTER_MATCH_LOC_MAC_IG;
316 mcdi_flags |=
317 is_multicast_ether_addr(spec->loc_mac) ?
318 1 << mc_match :
319 1 << uc_match;
320 }
321
322 /* Did we map them all? */
323 WARN_ON_ONCE(match_flags);
324
325 return mcdi_flags;
326 }
327
efx_mcdi_filter_pri(struct efx_mcdi_filter_table * table,const struct efx_filter_spec * spec)328 static int efx_mcdi_filter_pri(struct efx_mcdi_filter_table *table,
329 const struct efx_filter_spec *spec)
330 {
331 u32 mcdi_flags = efx_mcdi_filter_mcdi_flags_from_spec(spec);
332 unsigned int match_pri;
333
334 for (match_pri = 0;
335 match_pri < table->rx_match_count;
336 match_pri++)
337 if (table->rx_match_mcdi_flags[match_pri] == mcdi_flags)
338 return match_pri;
339
340 return -EPROTONOSUPPORT;
341 }
342
efx_mcdi_filter_insert_locked(struct efx_nic * efx,struct efx_filter_spec * spec,bool replace_equal)343 static s32 efx_mcdi_filter_insert_locked(struct efx_nic *efx,
344 struct efx_filter_spec *spec,
345 bool replace_equal)
346 {
347 DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
348 struct efx_rss_context_priv *ctx = NULL;
349 struct efx_mcdi_filter_table *table;
350 struct efx_filter_spec *saved_spec;
351 unsigned int match_pri, hash;
352 unsigned int priv_flags;
353 bool rss_locked = false;
354 bool replacing = false;
355 unsigned int depth, i;
356 int ins_index = -1;
357 DEFINE_WAIT(wait);
358 bool is_mc_recip;
359 s32 rc;
360
361 WARN_ON(!rwsem_is_locked(&efx->filter_sem));
362 table = efx->filter_state;
363 down_write(&table->lock);
364
365 /* For now, only support RX filters */
366 if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) !=
367 EFX_FILTER_FLAG_RX) {
368 rc = -EINVAL;
369 goto out_unlock;
370 }
371
372 rc = efx_mcdi_filter_pri(table, spec);
373 if (rc < 0)
374 goto out_unlock;
375 match_pri = rc;
376
377 hash = efx_filter_spec_hash(spec);
378 is_mc_recip = efx_filter_is_mc_recipient(spec);
379 if (is_mc_recip)
380 bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
381
382 if (spec->flags & EFX_FILTER_FLAG_RX_RSS) {
383 mutex_lock(&efx->net_dev->ethtool->rss_lock);
384 rss_locked = true;
385 if (spec->rss_context)
386 ctx = efx_find_rss_context_entry(efx, spec->rss_context);
387 else
388 ctx = &efx->rss_context.priv;
389 if (!ctx) {
390 rc = -ENOENT;
391 goto out_unlock;
392 }
393 if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID) {
394 rc = -EOPNOTSUPP;
395 goto out_unlock;
396 }
397 }
398
399 /* Find any existing filters with the same match tuple or
400 * else a free slot to insert at.
401 */
402 for (depth = 1; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
403 i = (hash + depth) & (EFX_MCDI_FILTER_TBL_ROWS - 1);
404 saved_spec = efx_mcdi_filter_entry_spec(table, i);
405
406 if (!saved_spec) {
407 if (ins_index < 0)
408 ins_index = i;
409 } else if (efx_filter_spec_equal(spec, saved_spec)) {
410 if (spec->priority < saved_spec->priority &&
411 spec->priority != EFX_FILTER_PRI_AUTO) {
412 rc = -EPERM;
413 goto out_unlock;
414 }
415 if (!is_mc_recip) {
416 /* This is the only one */
417 if (spec->priority ==
418 saved_spec->priority &&
419 !replace_equal) {
420 rc = -EEXIST;
421 goto out_unlock;
422 }
423 ins_index = i;
424 break;
425 } else if (spec->priority >
426 saved_spec->priority ||
427 (spec->priority ==
428 saved_spec->priority &&
429 replace_equal)) {
430 if (ins_index < 0)
431 ins_index = i;
432 else
433 __set_bit(depth, mc_rem_map);
434 }
435 }
436 }
437
438 /* Once we reach the maximum search depth, use the first suitable
439 * slot, or return -EBUSY if there was none
440 */
441 if (ins_index < 0) {
442 rc = -EBUSY;
443 goto out_unlock;
444 }
445
446 /* Create a software table entry if necessary. */
447 saved_spec = efx_mcdi_filter_entry_spec(table, ins_index);
448 if (saved_spec) {
449 if (spec->priority == EFX_FILTER_PRI_AUTO &&
450 saved_spec->priority >= EFX_FILTER_PRI_AUTO) {
451 /* Just make sure it won't be removed */
452 if (saved_spec->priority > EFX_FILTER_PRI_AUTO)
453 saved_spec->flags |= EFX_FILTER_FLAG_RX_OVER_AUTO;
454 table->entry[ins_index].spec &=
455 ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
456 rc = ins_index;
457 goto out_unlock;
458 }
459 replacing = true;
460 priv_flags = efx_mcdi_filter_entry_flags(table, ins_index);
461 } else {
462 saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
463 if (!saved_spec) {
464 rc = -ENOMEM;
465 goto out_unlock;
466 }
467 *saved_spec = *spec;
468 priv_flags = 0;
469 }
470 efx_mcdi_filter_set_entry(table, ins_index, saved_spec, priv_flags);
471
472 /* Actually insert the filter on the HW */
473 rc = efx_mcdi_filter_push(efx, spec, &table->entry[ins_index].handle,
474 ctx, replacing);
475
476 if (rc == -EINVAL && efx->must_realloc_vis)
477 /* The MC rebooted under us, causing it to reject our filter
478 * insertion as pointing to an invalid VI (spec->dmaq_id).
479 */
480 rc = -EAGAIN;
481
482 /* Finalise the software table entry */
483 if (rc == 0) {
484 if (replacing) {
485 /* Update the fields that may differ */
486 if (saved_spec->priority == EFX_FILTER_PRI_AUTO)
487 saved_spec->flags |=
488 EFX_FILTER_FLAG_RX_OVER_AUTO;
489 saved_spec->priority = spec->priority;
490 saved_spec->flags &= EFX_FILTER_FLAG_RX_OVER_AUTO;
491 saved_spec->flags |= spec->flags;
492 saved_spec->rss_context = spec->rss_context;
493 saved_spec->dmaq_id = spec->dmaq_id;
494 saved_spec->vport_id = spec->vport_id;
495 }
496 } else if (!replacing) {
497 kfree(saved_spec);
498 saved_spec = NULL;
499 } else {
500 /* We failed to replace, so the old filter is still present.
501 * Roll back the software table to reflect this. In fact the
502 * efx_mcdi_filter_set_entry() call below will do the right
503 * thing, so nothing extra is needed here.
504 */
505 }
506 efx_mcdi_filter_set_entry(table, ins_index, saved_spec, priv_flags);
507
508 /* Remove and finalise entries for lower-priority multicast
509 * recipients
510 */
511 if (is_mc_recip) {
512 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
513 unsigned int depth, i;
514
515 memset(inbuf, 0, sizeof(inbuf));
516
517 for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
518 if (!test_bit(depth, mc_rem_map))
519 continue;
520
521 i = (hash + depth) & (EFX_MCDI_FILTER_TBL_ROWS - 1);
522 saved_spec = efx_mcdi_filter_entry_spec(table, i);
523 priv_flags = efx_mcdi_filter_entry_flags(table, i);
524
525 if (rc == 0) {
526 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
527 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
528 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
529 table->entry[i].handle);
530 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
531 inbuf, sizeof(inbuf),
532 NULL, 0, NULL);
533 }
534
535 if (rc == 0) {
536 kfree(saved_spec);
537 saved_spec = NULL;
538 priv_flags = 0;
539 }
540 efx_mcdi_filter_set_entry(table, i, saved_spec,
541 priv_flags);
542 }
543 }
544
545 /* If successful, return the inserted filter ID */
546 if (rc == 0)
547 rc = efx_mcdi_filter_make_filter_id(match_pri, ins_index);
548
549 out_unlock:
550 if (rss_locked)
551 mutex_unlock(&efx->net_dev->ethtool->rss_lock);
552 up_write(&table->lock);
553 return rc;
554 }
555
efx_mcdi_filter_insert(struct efx_nic * efx,struct efx_filter_spec * spec,bool replace_equal)556 s32 efx_mcdi_filter_insert(struct efx_nic *efx, struct efx_filter_spec *spec,
557 bool replace_equal)
558 {
559 s32 ret;
560
561 down_read(&efx->filter_sem);
562 ret = efx_mcdi_filter_insert_locked(efx, spec, replace_equal);
563 up_read(&efx->filter_sem);
564
565 return ret;
566 }
567
568 /*
569 * Remove a filter.
570 * If !by_index, remove by ID
571 * If by_index, remove by index
572 * Filter ID may come from userland and must be range-checked.
573 * Caller must hold efx->filter_sem for read, and efx->filter_state->lock
574 * for write.
575 */
efx_mcdi_filter_remove_internal(struct efx_nic * efx,unsigned int priority_mask,u32 filter_id,bool by_index)576 static int efx_mcdi_filter_remove_internal(struct efx_nic *efx,
577 unsigned int priority_mask,
578 u32 filter_id, bool by_index)
579 {
580 unsigned int filter_idx = efx_mcdi_filter_get_unsafe_id(filter_id);
581 struct efx_mcdi_filter_table *table = efx->filter_state;
582 MCDI_DECLARE_BUF(inbuf,
583 MC_CMD_FILTER_OP_IN_HANDLE_OFST +
584 MC_CMD_FILTER_OP_IN_HANDLE_LEN);
585 struct efx_filter_spec *spec;
586 DEFINE_WAIT(wait);
587 int rc;
588
589 spec = efx_mcdi_filter_entry_spec(table, filter_idx);
590 if (!spec ||
591 (!by_index &&
592 efx_mcdi_filter_pri(table, spec) !=
593 efx_mcdi_filter_get_unsafe_pri(filter_id)))
594 return -ENOENT;
595
596 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO &&
597 priority_mask == (1U << EFX_FILTER_PRI_AUTO)) {
598 /* Just remove flags */
599 spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO;
600 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
601 return 0;
602 }
603
604 if (!(priority_mask & (1U << spec->priority)))
605 return -ENOENT;
606
607 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
608 /* Reset to an automatic filter */
609
610 struct efx_filter_spec new_spec = *spec;
611
612 new_spec.priority = EFX_FILTER_PRI_AUTO;
613 new_spec.flags = (EFX_FILTER_FLAG_RX |
614 (efx_rss_active(&efx->rss_context.priv) ?
615 EFX_FILTER_FLAG_RX_RSS : 0));
616 new_spec.dmaq_id = 0;
617 new_spec.rss_context = 0;
618 rc = efx_mcdi_filter_push(efx, &new_spec,
619 &table->entry[filter_idx].handle,
620 &efx->rss_context.priv,
621 true);
622
623 if (rc == 0)
624 *spec = new_spec;
625 } else {
626 /* Really remove the filter */
627
628 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
629 efx_mcdi_filter_is_exclusive(spec) ?
630 MC_CMD_FILTER_OP_IN_OP_REMOVE :
631 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
632 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
633 table->entry[filter_idx].handle);
634 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP,
635 inbuf, sizeof(inbuf), NULL, 0, NULL);
636
637 if ((rc == 0) || (rc == -ENOENT)) {
638 /* Filter removed OK or didn't actually exist */
639 kfree(spec);
640 efx_mcdi_filter_set_entry(table, filter_idx, NULL, 0);
641 } else {
642 efx_mcdi_display_error(efx, MC_CMD_FILTER_OP,
643 MC_CMD_FILTER_OP_EXT_IN_LEN,
644 NULL, 0, rc);
645 }
646 }
647
648 return rc;
649 }
650
651 /* Remove filters that weren't renewed. */
efx_mcdi_filter_remove_old(struct efx_nic * efx)652 static void efx_mcdi_filter_remove_old(struct efx_nic *efx)
653 {
654 struct efx_mcdi_filter_table *table = efx->filter_state;
655 int remove_failed = 0;
656 int remove_noent = 0;
657 int rc;
658 int i;
659
660 down_write(&table->lock);
661 for (i = 0; i < EFX_MCDI_FILTER_TBL_ROWS; i++) {
662 if (READ_ONCE(table->entry[i].spec) &
663 EFX_EF10_FILTER_FLAG_AUTO_OLD) {
664 rc = efx_mcdi_filter_remove_internal(efx,
665 1U << EFX_FILTER_PRI_AUTO, i, true);
666 if (rc == -ENOENT)
667 remove_noent++;
668 else if (rc)
669 remove_failed++;
670 }
671 }
672 up_write(&table->lock);
673
674 if (remove_failed)
675 netif_info(efx, drv, efx->net_dev,
676 "%s: failed to remove %d filters\n",
677 __func__, remove_failed);
678 if (remove_noent)
679 netif_info(efx, drv, efx->net_dev,
680 "%s: failed to remove %d non-existent filters\n",
681 __func__, remove_noent);
682 }
683
efx_mcdi_filter_remove_safe(struct efx_nic * efx,enum efx_filter_priority priority,u32 filter_id)684 int efx_mcdi_filter_remove_safe(struct efx_nic *efx,
685 enum efx_filter_priority priority,
686 u32 filter_id)
687 {
688 struct efx_mcdi_filter_table *table;
689 int rc;
690
691 down_read(&efx->filter_sem);
692 table = efx->filter_state;
693 down_write(&table->lock);
694 rc = efx_mcdi_filter_remove_internal(efx, 1U << priority, filter_id,
695 false);
696 up_write(&table->lock);
697 up_read(&efx->filter_sem);
698 return rc;
699 }
700
701 /* Caller must hold efx->filter_sem for read */
efx_mcdi_filter_remove_unsafe(struct efx_nic * efx,enum efx_filter_priority priority,u32 filter_id)702 static void efx_mcdi_filter_remove_unsafe(struct efx_nic *efx,
703 enum efx_filter_priority priority,
704 u32 filter_id)
705 {
706 struct efx_mcdi_filter_table *table = efx->filter_state;
707
708 if (filter_id == EFX_EF10_FILTER_ID_INVALID)
709 return;
710
711 down_write(&table->lock);
712 efx_mcdi_filter_remove_internal(efx, 1U << priority, filter_id,
713 true);
714 up_write(&table->lock);
715 }
716
efx_mcdi_filter_get_safe(struct efx_nic * efx,enum efx_filter_priority priority,u32 filter_id,struct efx_filter_spec * spec)717 int efx_mcdi_filter_get_safe(struct efx_nic *efx,
718 enum efx_filter_priority priority,
719 u32 filter_id, struct efx_filter_spec *spec)
720 {
721 unsigned int filter_idx = efx_mcdi_filter_get_unsafe_id(filter_id);
722 const struct efx_filter_spec *saved_spec;
723 struct efx_mcdi_filter_table *table;
724 int rc;
725
726 down_read(&efx->filter_sem);
727 table = efx->filter_state;
728 down_read(&table->lock);
729 saved_spec = efx_mcdi_filter_entry_spec(table, filter_idx);
730 if (saved_spec && saved_spec->priority == priority &&
731 efx_mcdi_filter_pri(table, saved_spec) ==
732 efx_mcdi_filter_get_unsafe_pri(filter_id)) {
733 *spec = *saved_spec;
734 rc = 0;
735 } else {
736 rc = -ENOENT;
737 }
738 up_read(&table->lock);
739 up_read(&efx->filter_sem);
740 return rc;
741 }
742
efx_mcdi_filter_insert_addr_list(struct efx_nic * efx,struct efx_mcdi_filter_vlan * vlan,bool multicast,bool rollback)743 static int efx_mcdi_filter_insert_addr_list(struct efx_nic *efx,
744 struct efx_mcdi_filter_vlan *vlan,
745 bool multicast, bool rollback)
746 {
747 struct efx_mcdi_filter_table *table = efx->filter_state;
748 struct efx_mcdi_dev_addr *addr_list;
749 enum efx_filter_flags filter_flags;
750 struct efx_filter_spec spec;
751 u8 baddr[ETH_ALEN];
752 unsigned int i, j;
753 int addr_count;
754 u16 *ids;
755 int rc;
756
757 if (multicast) {
758 addr_list = table->dev_mc_list;
759 addr_count = table->dev_mc_count;
760 ids = vlan->mc;
761 } else {
762 addr_list = table->dev_uc_list;
763 addr_count = table->dev_uc_count;
764 ids = vlan->uc;
765 }
766
767 filter_flags = efx_rss_active(&efx->rss_context.priv) ? EFX_FILTER_FLAG_RX_RSS : 0;
768
769 /* Insert/renew filters */
770 for (i = 0; i < addr_count; i++) {
771 EFX_WARN_ON_PARANOID(ids[i] != EFX_EF10_FILTER_ID_INVALID);
772 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
773 efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr);
774 rc = efx_mcdi_filter_insert_locked(efx, &spec, true);
775 if (rc < 0) {
776 if (rollback) {
777 netif_info(efx, drv, efx->net_dev,
778 "efx_mcdi_filter_insert failed rc=%d\n",
779 rc);
780 /* Fall back to promiscuous */
781 for (j = 0; j < i; j++) {
782 efx_mcdi_filter_remove_unsafe(
783 efx, EFX_FILTER_PRI_AUTO,
784 ids[j]);
785 ids[j] = EFX_EF10_FILTER_ID_INVALID;
786 }
787 return rc;
788 } else {
789 /* keep invalid ID, and carry on */
790 }
791 } else {
792 ids[i] = efx_mcdi_filter_get_unsafe_id(rc);
793 }
794 }
795
796 if (multicast && rollback) {
797 /* Also need an Ethernet broadcast filter */
798 EFX_WARN_ON_PARANOID(vlan->default_filters[EFX_EF10_BCAST] !=
799 EFX_EF10_FILTER_ID_INVALID);
800 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
801 eth_broadcast_addr(baddr);
802 efx_filter_set_eth_local(&spec, vlan->vid, baddr);
803 rc = efx_mcdi_filter_insert_locked(efx, &spec, true);
804 if (rc < 0) {
805 netif_warn(efx, drv, efx->net_dev,
806 "Broadcast filter insert failed rc=%d\n", rc);
807 /* Fall back to promiscuous */
808 for (j = 0; j < i; j++) {
809 efx_mcdi_filter_remove_unsafe(
810 efx, EFX_FILTER_PRI_AUTO,
811 ids[j]);
812 ids[j] = EFX_EF10_FILTER_ID_INVALID;
813 }
814 return rc;
815 } else {
816 vlan->default_filters[EFX_EF10_BCAST] =
817 efx_mcdi_filter_get_unsafe_id(rc);
818 }
819 }
820
821 return 0;
822 }
823
efx_mcdi_filter_insert_def(struct efx_nic * efx,struct efx_mcdi_filter_vlan * vlan,enum efx_encap_type encap_type,bool multicast,bool rollback)824 static int efx_mcdi_filter_insert_def(struct efx_nic *efx,
825 struct efx_mcdi_filter_vlan *vlan,
826 enum efx_encap_type encap_type,
827 bool multicast, bool rollback)
828 {
829 struct efx_mcdi_filter_table *table = efx->filter_state;
830 enum efx_filter_flags filter_flags;
831 struct efx_filter_spec spec;
832 u8 baddr[ETH_ALEN];
833 int rc;
834 u16 *id;
835
836 filter_flags = efx_rss_active(&efx->rss_context.priv) ? EFX_FILTER_FLAG_RX_RSS : 0;
837
838 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
839
840 if (multicast)
841 efx_filter_set_mc_def(&spec);
842 else
843 efx_filter_set_uc_def(&spec);
844
845 if (encap_type) {
846 if (efx_has_cap(efx, VXLAN_NVGRE))
847 efx_filter_set_encap_type(&spec, encap_type);
848 else
849 /*
850 * don't insert encap filters on non-supporting
851 * platforms. ID will be left as INVALID.
852 */
853 return 0;
854 }
855
856 if (vlan->vid != EFX_FILTER_VID_UNSPEC)
857 efx_filter_set_eth_local(&spec, vlan->vid, NULL);
858
859 rc = efx_mcdi_filter_insert_locked(efx, &spec, true);
860 if (rc < 0) {
861 const char *um = multicast ? "Multicast" : "Unicast";
862 const char *encap_name = "";
863 const char *encap_ipv = "";
864
865 if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
866 EFX_ENCAP_TYPE_VXLAN)
867 encap_name = "VXLAN ";
868 else if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
869 EFX_ENCAP_TYPE_NVGRE)
870 encap_name = "NVGRE ";
871 else if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
872 EFX_ENCAP_TYPE_GENEVE)
873 encap_name = "GENEVE ";
874 if (encap_type & EFX_ENCAP_FLAG_IPV6)
875 encap_ipv = "IPv6 ";
876 else if (encap_type)
877 encap_ipv = "IPv4 ";
878
879 /*
880 * unprivileged functions can't insert mismatch filters
881 * for encapsulated or unicast traffic, so downgrade
882 * those warnings to debug.
883 */
884 netif_cond_dbg(efx, drv, efx->net_dev,
885 rc == -EPERM && (encap_type || !multicast), warn,
886 "%s%s%s mismatch filter insert failed rc=%d\n",
887 encap_name, encap_ipv, um, rc);
888 } else if (multicast) {
889 /* mapping from encap types to default filter IDs (multicast) */
890 static enum efx_mcdi_filter_default_filters map[] = {
891 [EFX_ENCAP_TYPE_NONE] = EFX_EF10_MCDEF,
892 [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_MCDEF,
893 [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_MCDEF,
894 [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_MCDEF,
895 [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] =
896 EFX_EF10_VXLAN6_MCDEF,
897 [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] =
898 EFX_EF10_NVGRE6_MCDEF,
899 [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] =
900 EFX_EF10_GENEVE6_MCDEF,
901 };
902
903 /* quick bounds check (BCAST result impossible) */
904 BUILD_BUG_ON(EFX_EF10_BCAST != 0);
905 if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) {
906 WARN_ON(1);
907 return -EINVAL;
908 }
909 /* then follow map */
910 id = &vlan->default_filters[map[encap_type]];
911
912 EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID);
913 *id = efx_mcdi_filter_get_unsafe_id(rc);
914 if (!table->mc_chaining && !encap_type) {
915 /* Also need an Ethernet broadcast filter */
916 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
917 filter_flags, 0);
918 eth_broadcast_addr(baddr);
919 efx_filter_set_eth_local(&spec, vlan->vid, baddr);
920 rc = efx_mcdi_filter_insert_locked(efx, &spec, true);
921 if (rc < 0) {
922 netif_warn(efx, drv, efx->net_dev,
923 "Broadcast filter insert failed rc=%d\n",
924 rc);
925 if (rollback) {
926 /* Roll back the mc_def filter */
927 efx_mcdi_filter_remove_unsafe(
928 efx, EFX_FILTER_PRI_AUTO,
929 *id);
930 *id = EFX_EF10_FILTER_ID_INVALID;
931 return rc;
932 }
933 } else {
934 EFX_WARN_ON_PARANOID(
935 vlan->default_filters[EFX_EF10_BCAST] !=
936 EFX_EF10_FILTER_ID_INVALID);
937 vlan->default_filters[EFX_EF10_BCAST] =
938 efx_mcdi_filter_get_unsafe_id(rc);
939 }
940 }
941 rc = 0;
942 } else {
943 /* mapping from encap types to default filter IDs (unicast) */
944 static enum efx_mcdi_filter_default_filters map[] = {
945 [EFX_ENCAP_TYPE_NONE] = EFX_EF10_UCDEF,
946 [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_UCDEF,
947 [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_UCDEF,
948 [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_UCDEF,
949 [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] =
950 EFX_EF10_VXLAN6_UCDEF,
951 [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] =
952 EFX_EF10_NVGRE6_UCDEF,
953 [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] =
954 EFX_EF10_GENEVE6_UCDEF,
955 };
956
957 /* quick bounds check (BCAST result impossible) */
958 BUILD_BUG_ON(EFX_EF10_BCAST != 0);
959 if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) {
960 WARN_ON(1);
961 return -EINVAL;
962 }
963 /* then follow map */
964 id = &vlan->default_filters[map[encap_type]];
965 EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID);
966 *id = rc;
967 rc = 0;
968 }
969 return rc;
970 }
971
972 /*
973 * Caller must hold efx->filter_sem for read if race against
974 * efx_mcdi_filter_table_remove() is possible
975 */
efx_mcdi_filter_vlan_sync_rx_mode(struct efx_nic * efx,struct efx_mcdi_filter_vlan * vlan)976 static void efx_mcdi_filter_vlan_sync_rx_mode(struct efx_nic *efx,
977 struct efx_mcdi_filter_vlan *vlan)
978 {
979 struct efx_mcdi_filter_table *table = efx->filter_state;
980
981 /*
982 * Do not install unspecified VID if VLAN filtering is enabled.
983 * Do not install all specified VIDs if VLAN filtering is disabled.
984 */
985 if ((vlan->vid == EFX_FILTER_VID_UNSPEC) == table->vlan_filter)
986 return;
987
988 /* Insert/renew unicast filters */
989 if (table->uc_promisc) {
990 efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NONE,
991 false, false);
992 efx_mcdi_filter_insert_addr_list(efx, vlan, false, false);
993 } else {
994 /*
995 * If any of the filters failed to insert, fall back to
996 * promiscuous mode - add in the uc_def filter. But keep
997 * our individual unicast filters.
998 */
999 if (efx_mcdi_filter_insert_addr_list(efx, vlan, false, false))
1000 efx_mcdi_filter_insert_def(efx, vlan,
1001 EFX_ENCAP_TYPE_NONE,
1002 false, false);
1003 }
1004 efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN,
1005 false, false);
1006 efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN |
1007 EFX_ENCAP_FLAG_IPV6,
1008 false, false);
1009 efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE,
1010 false, false);
1011 efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE |
1012 EFX_ENCAP_FLAG_IPV6,
1013 false, false);
1014 efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE,
1015 false, false);
1016 efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE |
1017 EFX_ENCAP_FLAG_IPV6,
1018 false, false);
1019
1020 /*
1021 * Insert/renew multicast filters
1022 *
1023 * If changing promiscuous state with cascaded multicast filters, remove
1024 * old filters first, so that packets are dropped rather than duplicated
1025 */
1026 if (table->mc_chaining && table->mc_promisc_last != table->mc_promisc)
1027 efx_mcdi_filter_remove_old(efx);
1028 if (table->mc_promisc) {
1029 if (table->mc_chaining) {
1030 /*
1031 * If we failed to insert promiscuous filters, rollback
1032 * and fall back to individual multicast filters
1033 */
1034 if (efx_mcdi_filter_insert_def(efx, vlan,
1035 EFX_ENCAP_TYPE_NONE,
1036 true, true)) {
1037 /* Changing promisc state, so remove old filters */
1038 efx_mcdi_filter_remove_old(efx);
1039 efx_mcdi_filter_insert_addr_list(efx, vlan,
1040 true, false);
1041 }
1042 } else {
1043 /*
1044 * If we failed to insert promiscuous filters, don't
1045 * rollback. Regardless, also insert the mc_list,
1046 * unless it's incomplete due to overflow
1047 */
1048 efx_mcdi_filter_insert_def(efx, vlan,
1049 EFX_ENCAP_TYPE_NONE,
1050 true, false);
1051 if (!table->mc_overflow)
1052 efx_mcdi_filter_insert_addr_list(efx, vlan,
1053 true, false);
1054 }
1055 } else {
1056 /*
1057 * If any filters failed to insert, rollback and fall back to
1058 * promiscuous mode - mc_def filter and maybe broadcast. If
1059 * that fails, roll back again and insert as many of our
1060 * individual multicast filters as we can.
1061 */
1062 if (efx_mcdi_filter_insert_addr_list(efx, vlan, true, true)) {
1063 /* Changing promisc state, so remove old filters */
1064 if (table->mc_chaining)
1065 efx_mcdi_filter_remove_old(efx);
1066 if (efx_mcdi_filter_insert_def(efx, vlan,
1067 EFX_ENCAP_TYPE_NONE,
1068 true, true))
1069 efx_mcdi_filter_insert_addr_list(efx, vlan,
1070 true, false);
1071 }
1072 }
1073 efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN,
1074 true, false);
1075 efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN |
1076 EFX_ENCAP_FLAG_IPV6,
1077 true, false);
1078 efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE,
1079 true, false);
1080 efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE |
1081 EFX_ENCAP_FLAG_IPV6,
1082 true, false);
1083 efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE,
1084 true, false);
1085 efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE |
1086 EFX_ENCAP_FLAG_IPV6,
1087 true, false);
1088 }
1089
efx_mcdi_filter_clear_rx(struct efx_nic * efx,enum efx_filter_priority priority)1090 int efx_mcdi_filter_clear_rx(struct efx_nic *efx,
1091 enum efx_filter_priority priority)
1092 {
1093 struct efx_mcdi_filter_table *table;
1094 unsigned int priority_mask;
1095 unsigned int i;
1096 int rc;
1097
1098 priority_mask = (((1U << (priority + 1)) - 1) &
1099 ~(1U << EFX_FILTER_PRI_AUTO));
1100
1101 down_read(&efx->filter_sem);
1102 table = efx->filter_state;
1103 down_write(&table->lock);
1104 for (i = 0; i < EFX_MCDI_FILTER_TBL_ROWS; i++) {
1105 rc = efx_mcdi_filter_remove_internal(efx, priority_mask,
1106 i, true);
1107 if (rc && rc != -ENOENT)
1108 break;
1109 rc = 0;
1110 }
1111
1112 up_write(&table->lock);
1113 up_read(&efx->filter_sem);
1114 return rc;
1115 }
1116
efx_mcdi_filter_count_rx_used(struct efx_nic * efx,enum efx_filter_priority priority)1117 u32 efx_mcdi_filter_count_rx_used(struct efx_nic *efx,
1118 enum efx_filter_priority priority)
1119 {
1120 struct efx_mcdi_filter_table *table;
1121 unsigned int filter_idx;
1122 s32 count = 0;
1123
1124 down_read(&efx->filter_sem);
1125 table = efx->filter_state;
1126 down_read(&table->lock);
1127 for (filter_idx = 0; filter_idx < EFX_MCDI_FILTER_TBL_ROWS; filter_idx++) {
1128 if (table->entry[filter_idx].spec &&
1129 efx_mcdi_filter_entry_spec(table, filter_idx)->priority ==
1130 priority)
1131 ++count;
1132 }
1133 up_read(&table->lock);
1134 up_read(&efx->filter_sem);
1135 return count;
1136 }
1137
efx_mcdi_filter_get_rx_id_limit(struct efx_nic * efx)1138 u32 efx_mcdi_filter_get_rx_id_limit(struct efx_nic *efx)
1139 {
1140 struct efx_mcdi_filter_table *table = efx->filter_state;
1141
1142 return table->rx_match_count * EFX_MCDI_FILTER_TBL_ROWS * 2;
1143 }
1144
efx_mcdi_filter_get_rx_ids(struct efx_nic * efx,enum efx_filter_priority priority,u32 * buf,u32 size)1145 s32 efx_mcdi_filter_get_rx_ids(struct efx_nic *efx,
1146 enum efx_filter_priority priority,
1147 u32 *buf, u32 size)
1148 {
1149 struct efx_mcdi_filter_table *table;
1150 struct efx_filter_spec *spec;
1151 unsigned int filter_idx;
1152 s32 count = 0;
1153
1154 down_read(&efx->filter_sem);
1155 table = efx->filter_state;
1156 down_read(&table->lock);
1157
1158 for (filter_idx = 0; filter_idx < EFX_MCDI_FILTER_TBL_ROWS; filter_idx++) {
1159 spec = efx_mcdi_filter_entry_spec(table, filter_idx);
1160 if (spec && spec->priority == priority) {
1161 if (count == size) {
1162 count = -EMSGSIZE;
1163 break;
1164 }
1165 buf[count++] =
1166 efx_mcdi_filter_make_filter_id(
1167 efx_mcdi_filter_pri(table, spec),
1168 filter_idx);
1169 }
1170 }
1171 up_read(&table->lock);
1172 up_read(&efx->filter_sem);
1173 return count;
1174 }
1175
efx_mcdi_filter_match_flags_from_mcdi(bool encap,u32 mcdi_flags)1176 static int efx_mcdi_filter_match_flags_from_mcdi(bool encap, u32 mcdi_flags)
1177 {
1178 int match_flags = 0;
1179
1180 #define MAP_FLAG(gen_flag, mcdi_field) do { \
1181 u32 old_mcdi_flags = mcdi_flags; \
1182 mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ ## \
1183 mcdi_field ## _LBN); \
1184 if (mcdi_flags != old_mcdi_flags) \
1185 match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \
1186 } while (0)
1187
1188 if (encap) {
1189 /* encap filters must specify encap type */
1190 match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
1191 /* and imply ethertype and ip proto */
1192 mcdi_flags &=
1193 ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN);
1194 mcdi_flags &=
1195 ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN);
1196 /* VLAN tags refer to the outer packet */
1197 MAP_FLAG(INNER_VID, INNER_VLAN);
1198 MAP_FLAG(OUTER_VID, OUTER_VLAN);
1199 /* everything else refers to the inner packet */
1200 MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_UCAST_DST);
1201 MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_MCAST_DST);
1202 MAP_FLAG(REM_HOST, IFRM_SRC_IP);
1203 MAP_FLAG(LOC_HOST, IFRM_DST_IP);
1204 MAP_FLAG(REM_MAC, IFRM_SRC_MAC);
1205 MAP_FLAG(REM_PORT, IFRM_SRC_PORT);
1206 MAP_FLAG(LOC_MAC, IFRM_DST_MAC);
1207 MAP_FLAG(LOC_PORT, IFRM_DST_PORT);
1208 MAP_FLAG(ETHER_TYPE, IFRM_ETHER_TYPE);
1209 MAP_FLAG(IP_PROTO, IFRM_IP_PROTO);
1210 } else {
1211 MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST);
1212 MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST);
1213 MAP_FLAG(REM_HOST, SRC_IP);
1214 MAP_FLAG(LOC_HOST, DST_IP);
1215 MAP_FLAG(REM_MAC, SRC_MAC);
1216 MAP_FLAG(REM_PORT, SRC_PORT);
1217 MAP_FLAG(LOC_MAC, DST_MAC);
1218 MAP_FLAG(LOC_PORT, DST_PORT);
1219 MAP_FLAG(ETHER_TYPE, ETHER_TYPE);
1220 MAP_FLAG(INNER_VID, INNER_VLAN);
1221 MAP_FLAG(OUTER_VID, OUTER_VLAN);
1222 MAP_FLAG(IP_PROTO, IP_PROTO);
1223 }
1224 #undef MAP_FLAG
1225
1226 /* Did we map them all? */
1227 if (mcdi_flags)
1228 return -EINVAL;
1229
1230 return match_flags;
1231 }
1232
efx_mcdi_filter_match_supported(struct efx_mcdi_filter_table * table,bool encap,enum efx_filter_match_flags match_flags)1233 bool efx_mcdi_filter_match_supported(struct efx_mcdi_filter_table *table,
1234 bool encap,
1235 enum efx_filter_match_flags match_flags)
1236 {
1237 unsigned int match_pri;
1238 int mf;
1239
1240 for (match_pri = 0;
1241 match_pri < table->rx_match_count;
1242 match_pri++) {
1243 mf = efx_mcdi_filter_match_flags_from_mcdi(encap,
1244 table->rx_match_mcdi_flags[match_pri]);
1245 if (mf == match_flags)
1246 return true;
1247 }
1248
1249 return false;
1250 }
1251
1252 static int
efx_mcdi_filter_table_probe_matches(struct efx_nic * efx,struct efx_mcdi_filter_table * table,bool encap)1253 efx_mcdi_filter_table_probe_matches(struct efx_nic *efx,
1254 struct efx_mcdi_filter_table *table,
1255 bool encap)
1256 {
1257 MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN);
1258 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX);
1259 unsigned int pd_match_pri, pd_match_count;
1260 size_t outlen;
1261 int rc;
1262
1263 /* Find out which RX filter types are supported, and their priorities */
1264 MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP,
1265 encap ?
1266 MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES :
1267 MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
1268 rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO,
1269 inbuf, sizeof(inbuf), outbuf, sizeof(outbuf),
1270 &outlen);
1271 if (rc)
1272 return rc;
1273
1274 pd_match_count = MCDI_VAR_ARRAY_LEN(
1275 outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES);
1276
1277 for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) {
1278 u32 mcdi_flags =
1279 MCDI_ARRAY_DWORD(
1280 outbuf,
1281 GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES,
1282 pd_match_pri);
1283 rc = efx_mcdi_filter_match_flags_from_mcdi(encap, mcdi_flags);
1284 if (rc < 0) {
1285 netif_dbg(efx, probe, efx->net_dev,
1286 "%s: fw flags %#x pri %u not supported in driver\n",
1287 __func__, mcdi_flags, pd_match_pri);
1288 } else {
1289 netif_dbg(efx, probe, efx->net_dev,
1290 "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n",
1291 __func__, mcdi_flags, pd_match_pri,
1292 rc, table->rx_match_count);
1293 table->rx_match_mcdi_flags[table->rx_match_count] = mcdi_flags;
1294 table->rx_match_count++;
1295 }
1296 }
1297
1298 return 0;
1299 }
1300
efx_mcdi_filter_table_probe(struct efx_nic * efx,bool multicast_chaining)1301 int efx_mcdi_filter_table_probe(struct efx_nic *efx, bool multicast_chaining)
1302 {
1303 struct net_device *net_dev = efx->net_dev;
1304 struct efx_mcdi_filter_table *table;
1305 int rc;
1306
1307 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
1308 return -EINVAL;
1309
1310 if (efx->filter_state) /* already probed */
1311 return 0;
1312
1313 table = kzalloc(sizeof(*table), GFP_KERNEL);
1314 if (!table)
1315 return -ENOMEM;
1316
1317 table->mc_chaining = multicast_chaining;
1318 table->rx_match_count = 0;
1319 rc = efx_mcdi_filter_table_probe_matches(efx, table, false);
1320 if (rc)
1321 goto fail;
1322 if (efx_has_cap(efx, VXLAN_NVGRE))
1323 rc = efx_mcdi_filter_table_probe_matches(efx, table, true);
1324 if (rc)
1325 goto fail;
1326 if ((efx_supported_features(efx) & NETIF_F_HW_VLAN_CTAG_FILTER) &&
1327 !(efx_mcdi_filter_match_supported(table, false,
1328 (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC)) &&
1329 efx_mcdi_filter_match_supported(table, false,
1330 (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC_IG)))) {
1331 netif_info(efx, probe, net_dev,
1332 "VLAN filters are not supported in this firmware variant\n");
1333 net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
1334 efx->fixed_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
1335 net_dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
1336 }
1337
1338 table->entry = vzalloc(array_size(EFX_MCDI_FILTER_TBL_ROWS,
1339 sizeof(*table->entry)));
1340 if (!table->entry) {
1341 rc = -ENOMEM;
1342 goto fail;
1343 }
1344
1345 table->mc_promisc_last = false;
1346 table->vlan_filter =
1347 !!(efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER);
1348 INIT_LIST_HEAD(&table->vlan_list);
1349 init_rwsem(&table->lock);
1350
1351 efx->filter_state = table;
1352
1353 return 0;
1354 fail:
1355 kfree(table);
1356 return rc;
1357 }
1358
efx_mcdi_filter_table_reset_mc_allocations(struct efx_nic * efx)1359 void efx_mcdi_filter_table_reset_mc_allocations(struct efx_nic *efx)
1360 {
1361 struct efx_mcdi_filter_table *table = efx->filter_state;
1362
1363 if (table) {
1364 table->must_restore_filters = true;
1365 table->must_restore_rss_contexts = true;
1366 }
1367 }
1368
1369 /*
1370 * Caller must hold efx->filter_sem for read if race against
1371 * efx_mcdi_filter_table_remove() is possible
1372 */
efx_mcdi_filter_table_restore(struct efx_nic * efx)1373 void efx_mcdi_filter_table_restore(struct efx_nic *efx)
1374 {
1375 struct efx_mcdi_filter_table *table = efx->filter_state;
1376 unsigned int invalid_filters = 0, failed = 0;
1377 struct efx_mcdi_filter_vlan *vlan;
1378 struct efx_rss_context_priv *ctx;
1379 struct efx_filter_spec *spec;
1380 unsigned int filter_idx;
1381 u32 mcdi_flags;
1382 int match_pri;
1383 int rc, i;
1384
1385 WARN_ON(!rwsem_is_locked(&efx->filter_sem));
1386
1387 if (!table || !table->must_restore_filters)
1388 return;
1389
1390 down_write(&table->lock);
1391 mutex_lock(&efx->net_dev->ethtool->rss_lock);
1392
1393 for (filter_idx = 0; filter_idx < EFX_MCDI_FILTER_TBL_ROWS; filter_idx++) {
1394 spec = efx_mcdi_filter_entry_spec(table, filter_idx);
1395 if (!spec)
1396 continue;
1397
1398 mcdi_flags = efx_mcdi_filter_mcdi_flags_from_spec(spec);
1399 match_pri = 0;
1400 while (match_pri < table->rx_match_count &&
1401 table->rx_match_mcdi_flags[match_pri] != mcdi_flags)
1402 ++match_pri;
1403 if (match_pri >= table->rx_match_count) {
1404 invalid_filters++;
1405 goto not_restored;
1406 }
1407 if (spec->rss_context)
1408 ctx = efx_find_rss_context_entry(efx, spec->rss_context);
1409 else
1410 ctx = &efx->rss_context.priv;
1411 if (spec->flags & EFX_FILTER_FLAG_RX_RSS) {
1412 if (!ctx) {
1413 netif_warn(efx, drv, efx->net_dev,
1414 "Warning: unable to restore a filter with nonexistent RSS context %u.\n",
1415 spec->rss_context);
1416 invalid_filters++;
1417 goto not_restored;
1418 }
1419 if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID) {
1420 netif_warn(efx, drv, efx->net_dev,
1421 "Warning: unable to restore a filter with RSS context %u as it was not created.\n",
1422 spec->rss_context);
1423 invalid_filters++;
1424 goto not_restored;
1425 }
1426 }
1427
1428 rc = efx_mcdi_filter_push(efx, spec,
1429 &table->entry[filter_idx].handle,
1430 ctx, false);
1431 if (rc)
1432 failed++;
1433
1434 if (rc) {
1435 not_restored:
1436 list_for_each_entry(vlan, &table->vlan_list, list)
1437 for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; ++i)
1438 if (vlan->default_filters[i] == filter_idx)
1439 vlan->default_filters[i] =
1440 EFX_EF10_FILTER_ID_INVALID;
1441
1442 kfree(spec);
1443 efx_mcdi_filter_set_entry(table, filter_idx, NULL, 0);
1444 }
1445 }
1446
1447 mutex_unlock(&efx->net_dev->ethtool->rss_lock);
1448 up_write(&table->lock);
1449
1450 /*
1451 * This can happen validly if the MC's capabilities have changed, so
1452 * is not an error.
1453 */
1454 if (invalid_filters)
1455 netif_dbg(efx, drv, efx->net_dev,
1456 "Did not restore %u filters that are now unsupported.\n",
1457 invalid_filters);
1458
1459 if (failed)
1460 netif_err(efx, hw, efx->net_dev,
1461 "unable to restore %u filters\n", failed);
1462 else
1463 table->must_restore_filters = false;
1464 }
1465
efx_mcdi_filter_table_down(struct efx_nic * efx)1466 void efx_mcdi_filter_table_down(struct efx_nic *efx)
1467 {
1468 struct efx_mcdi_filter_table *table = efx->filter_state;
1469 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
1470 struct efx_filter_spec *spec;
1471 unsigned int filter_idx;
1472 int rc;
1473
1474 if (!table)
1475 return;
1476
1477 efx_mcdi_filter_cleanup_vlans(efx);
1478
1479 for (filter_idx = 0; filter_idx < EFX_MCDI_FILTER_TBL_ROWS; filter_idx++) {
1480 spec = efx_mcdi_filter_entry_spec(table, filter_idx);
1481 if (!spec)
1482 continue;
1483
1484 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
1485 efx_mcdi_filter_is_exclusive(spec) ?
1486 MC_CMD_FILTER_OP_IN_OP_REMOVE :
1487 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
1488 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
1489 table->entry[filter_idx].handle);
1490 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf,
1491 sizeof(inbuf), NULL, 0, NULL);
1492 if (rc)
1493 netif_info(efx, drv, efx->net_dev,
1494 "%s: filter %04x remove failed\n",
1495 __func__, filter_idx);
1496 kfree(spec);
1497 }
1498 }
1499
efx_mcdi_filter_table_remove(struct efx_nic * efx)1500 void efx_mcdi_filter_table_remove(struct efx_nic *efx)
1501 {
1502 struct efx_mcdi_filter_table *table = efx->filter_state;
1503
1504 efx_mcdi_filter_table_down(efx);
1505
1506 efx->filter_state = NULL;
1507 /*
1508 * If we were called without locking, then it's not safe to free
1509 * the table as others might be using it. So we just WARN, leak
1510 * the memory, and potentially get an inconsistent filter table
1511 * state.
1512 * This should never actually happen.
1513 */
1514 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
1515 return;
1516
1517 if (!table)
1518 return;
1519
1520 vfree(table->entry);
1521 kfree(table);
1522 }
1523
efx_mcdi_filter_mark_one_old(struct efx_nic * efx,uint16_t * id)1524 static void efx_mcdi_filter_mark_one_old(struct efx_nic *efx, uint16_t *id)
1525 {
1526 struct efx_mcdi_filter_table *table = efx->filter_state;
1527 unsigned int filter_idx;
1528
1529 efx_rwsem_assert_write_locked(&table->lock);
1530
1531 if (*id != EFX_EF10_FILTER_ID_INVALID) {
1532 filter_idx = efx_mcdi_filter_get_unsafe_id(*id);
1533 if (!table->entry[filter_idx].spec)
1534 netif_dbg(efx, drv, efx->net_dev,
1535 "marked null spec old %04x:%04x\n", *id,
1536 filter_idx);
1537 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
1538 *id = EFX_EF10_FILTER_ID_INVALID;
1539 }
1540 }
1541
1542 /* Mark old per-VLAN filters that may need to be removed */
_efx_mcdi_filter_vlan_mark_old(struct efx_nic * efx,struct efx_mcdi_filter_vlan * vlan)1543 static void _efx_mcdi_filter_vlan_mark_old(struct efx_nic *efx,
1544 struct efx_mcdi_filter_vlan *vlan)
1545 {
1546 struct efx_mcdi_filter_table *table = efx->filter_state;
1547 unsigned int i;
1548
1549 for (i = 0; i < table->dev_uc_count; i++)
1550 efx_mcdi_filter_mark_one_old(efx, &vlan->uc[i]);
1551 for (i = 0; i < table->dev_mc_count; i++)
1552 efx_mcdi_filter_mark_one_old(efx, &vlan->mc[i]);
1553 for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
1554 efx_mcdi_filter_mark_one_old(efx, &vlan->default_filters[i]);
1555 }
1556
1557 /*
1558 * Mark old filters that may need to be removed.
1559 * Caller must hold efx->filter_sem for read if race against
1560 * efx_mcdi_filter_table_remove() is possible
1561 */
efx_mcdi_filter_mark_old(struct efx_nic * efx)1562 static void efx_mcdi_filter_mark_old(struct efx_nic *efx)
1563 {
1564 struct efx_mcdi_filter_table *table = efx->filter_state;
1565 struct efx_mcdi_filter_vlan *vlan;
1566
1567 down_write(&table->lock);
1568 list_for_each_entry(vlan, &table->vlan_list, list)
1569 _efx_mcdi_filter_vlan_mark_old(efx, vlan);
1570 up_write(&table->lock);
1571 }
1572
efx_mcdi_filter_add_vlan(struct efx_nic * efx,u16 vid)1573 int efx_mcdi_filter_add_vlan(struct efx_nic *efx, u16 vid)
1574 {
1575 struct efx_mcdi_filter_table *table = efx->filter_state;
1576 struct efx_mcdi_filter_vlan *vlan;
1577 unsigned int i;
1578
1579 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
1580 return -EINVAL;
1581
1582 vlan = efx_mcdi_filter_find_vlan(efx, vid);
1583 if (WARN_ON(vlan)) {
1584 netif_err(efx, drv, efx->net_dev,
1585 "VLAN %u already added\n", vid);
1586 return -EALREADY;
1587 }
1588
1589 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1590 if (!vlan)
1591 return -ENOMEM;
1592
1593 vlan->vid = vid;
1594
1595 for (i = 0; i < ARRAY_SIZE(vlan->uc); i++)
1596 vlan->uc[i] = EFX_EF10_FILTER_ID_INVALID;
1597 for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
1598 vlan->mc[i] = EFX_EF10_FILTER_ID_INVALID;
1599 for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
1600 vlan->default_filters[i] = EFX_EF10_FILTER_ID_INVALID;
1601
1602 list_add_tail(&vlan->list, &table->vlan_list);
1603
1604 if (efx_dev_registered(efx))
1605 efx_mcdi_filter_vlan_sync_rx_mode(efx, vlan);
1606
1607 return 0;
1608 }
1609
efx_mcdi_filter_del_vlan_internal(struct efx_nic * efx,struct efx_mcdi_filter_vlan * vlan)1610 static void efx_mcdi_filter_del_vlan_internal(struct efx_nic *efx,
1611 struct efx_mcdi_filter_vlan *vlan)
1612 {
1613 unsigned int i;
1614
1615 /* See comment in efx_mcdi_filter_table_remove() */
1616 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
1617 return;
1618
1619 list_del(&vlan->list);
1620
1621 for (i = 0; i < ARRAY_SIZE(vlan->uc); i++)
1622 efx_mcdi_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
1623 vlan->uc[i]);
1624 for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
1625 efx_mcdi_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
1626 vlan->mc[i]);
1627 for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
1628 if (vlan->default_filters[i] != EFX_EF10_FILTER_ID_INVALID)
1629 efx_mcdi_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
1630 vlan->default_filters[i]);
1631
1632 kfree(vlan);
1633 }
1634
efx_mcdi_filter_del_vlan(struct efx_nic * efx,u16 vid)1635 void efx_mcdi_filter_del_vlan(struct efx_nic *efx, u16 vid)
1636 {
1637 struct efx_mcdi_filter_vlan *vlan;
1638
1639 /* See comment in efx_mcdi_filter_table_remove() */
1640 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
1641 return;
1642
1643 vlan = efx_mcdi_filter_find_vlan(efx, vid);
1644 if (!vlan) {
1645 netif_err(efx, drv, efx->net_dev,
1646 "VLAN %u not found in filter state\n", vid);
1647 return;
1648 }
1649
1650 efx_mcdi_filter_del_vlan_internal(efx, vlan);
1651 }
1652
efx_mcdi_filter_find_vlan(struct efx_nic * efx,u16 vid)1653 struct efx_mcdi_filter_vlan *efx_mcdi_filter_find_vlan(struct efx_nic *efx,
1654 u16 vid)
1655 {
1656 struct efx_mcdi_filter_table *table = efx->filter_state;
1657 struct efx_mcdi_filter_vlan *vlan;
1658
1659 WARN_ON(!rwsem_is_locked(&efx->filter_sem));
1660
1661 list_for_each_entry(vlan, &table->vlan_list, list) {
1662 if (vlan->vid == vid)
1663 return vlan;
1664 }
1665
1666 return NULL;
1667 }
1668
efx_mcdi_filter_cleanup_vlans(struct efx_nic * efx)1669 void efx_mcdi_filter_cleanup_vlans(struct efx_nic *efx)
1670 {
1671 struct efx_mcdi_filter_table *table = efx->filter_state;
1672 struct efx_mcdi_filter_vlan *vlan, *next_vlan;
1673
1674 /* See comment in efx_mcdi_filter_table_remove() */
1675 if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
1676 return;
1677
1678 if (!table)
1679 return;
1680
1681 list_for_each_entry_safe(vlan, next_vlan, &table->vlan_list, list)
1682 efx_mcdi_filter_del_vlan_internal(efx, vlan);
1683 }
1684
efx_mcdi_filter_uc_addr_list(struct efx_nic * efx)1685 static void efx_mcdi_filter_uc_addr_list(struct efx_nic *efx)
1686 {
1687 struct efx_mcdi_filter_table *table = efx->filter_state;
1688 struct net_device *net_dev = efx->net_dev;
1689 struct netdev_hw_addr *uc;
1690 unsigned int i;
1691
1692 table->uc_promisc = !!(net_dev->flags & IFF_PROMISC);
1693 ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
1694 i = 1;
1695 netdev_for_each_uc_addr(uc, net_dev) {
1696 if (i >= EFX_EF10_FILTER_DEV_UC_MAX) {
1697 table->uc_promisc = true;
1698 break;
1699 }
1700 ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
1701 i++;
1702 }
1703
1704 table->dev_uc_count = i;
1705 }
1706
efx_mcdi_filter_mc_addr_list(struct efx_nic * efx)1707 static void efx_mcdi_filter_mc_addr_list(struct efx_nic *efx)
1708 {
1709 struct efx_mcdi_filter_table *table = efx->filter_state;
1710 struct net_device *net_dev = efx->net_dev;
1711 struct netdev_hw_addr *mc;
1712 unsigned int i;
1713
1714 table->mc_overflow = false;
1715 table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI));
1716
1717 i = 0;
1718 netdev_for_each_mc_addr(mc, net_dev) {
1719 if (i >= EFX_EF10_FILTER_DEV_MC_MAX) {
1720 table->mc_promisc = true;
1721 table->mc_overflow = true;
1722 break;
1723 }
1724 ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
1725 i++;
1726 }
1727
1728 table->dev_mc_count = i;
1729 }
1730
1731 /*
1732 * Caller must hold efx->filter_sem for read if race against
1733 * efx_mcdi_filter_table_remove() is possible
1734 */
efx_mcdi_filter_sync_rx_mode(struct efx_nic * efx)1735 void efx_mcdi_filter_sync_rx_mode(struct efx_nic *efx)
1736 {
1737 struct efx_mcdi_filter_table *table = efx->filter_state;
1738 struct net_device *net_dev = efx->net_dev;
1739 struct efx_mcdi_filter_vlan *vlan;
1740 bool vlan_filter;
1741
1742 if (!efx_dev_registered(efx))
1743 return;
1744
1745 if (!table)
1746 return;
1747
1748 efx_mcdi_filter_mark_old(efx);
1749
1750 /*
1751 * Copy/convert the address lists; add the primary station
1752 * address and broadcast address
1753 */
1754 netif_addr_lock_bh(net_dev);
1755 efx_mcdi_filter_uc_addr_list(efx);
1756 efx_mcdi_filter_mc_addr_list(efx);
1757 netif_addr_unlock_bh(net_dev);
1758
1759 /*
1760 * If VLAN filtering changes, all old filters are finally removed.
1761 * Do it in advance to avoid conflicts for unicast untagged and
1762 * VLAN 0 tagged filters.
1763 */
1764 vlan_filter = !!(net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER);
1765 if (table->vlan_filter != vlan_filter) {
1766 table->vlan_filter = vlan_filter;
1767 efx_mcdi_filter_remove_old(efx);
1768 }
1769
1770 list_for_each_entry(vlan, &table->vlan_list, list)
1771 efx_mcdi_filter_vlan_sync_rx_mode(efx, vlan);
1772
1773 efx_mcdi_filter_remove_old(efx);
1774 table->mc_promisc_last = table->mc_promisc;
1775 }
1776
1777 #ifdef CONFIG_RFS_ACCEL
1778
efx_mcdi_filter_rfs_expire_one(struct efx_nic * efx,u32 flow_id,unsigned int filter_idx)1779 bool efx_mcdi_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
1780 unsigned int filter_idx)
1781 {
1782 struct efx_filter_spec *spec, saved_spec;
1783 struct efx_mcdi_filter_table *table;
1784 struct efx_arfs_rule *rule = NULL;
1785 bool ret = true, force = false;
1786 u16 arfs_id;
1787
1788 down_read(&efx->filter_sem);
1789 table = efx->filter_state;
1790 down_write(&table->lock);
1791 spec = efx_mcdi_filter_entry_spec(table, filter_idx);
1792
1793 if (!spec || spec->priority != EFX_FILTER_PRI_HINT)
1794 goto out_unlock;
1795
1796 spin_lock_bh(&efx->rps_hash_lock);
1797 if (!efx->rps_hash_table) {
1798 /* In the absence of the table, we always return 0 to ARFS. */
1799 arfs_id = 0;
1800 } else {
1801 rule = efx_rps_hash_find(efx, spec);
1802 if (!rule)
1803 /* ARFS table doesn't know of this filter, so remove it */
1804 goto expire;
1805 arfs_id = rule->arfs_id;
1806 ret = efx_rps_check_rule(rule, filter_idx, &force);
1807 if (force)
1808 goto expire;
1809 if (!ret) {
1810 spin_unlock_bh(&efx->rps_hash_lock);
1811 goto out_unlock;
1812 }
1813 }
1814 if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, flow_id, arfs_id))
1815 ret = false;
1816 else if (rule)
1817 rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
1818 expire:
1819 saved_spec = *spec; /* remove operation will kfree spec */
1820 spin_unlock_bh(&efx->rps_hash_lock);
1821 /*
1822 * At this point (since we dropped the lock), another thread might queue
1823 * up a fresh insertion request (but the actual insertion will be held
1824 * up by our possession of the filter table lock). In that case, it
1825 * will set rule->filter_id to EFX_ARFS_FILTER_ID_PENDING, meaning that
1826 * the rule is not removed by efx_rps_hash_del() below.
1827 */
1828 if (ret)
1829 ret = efx_mcdi_filter_remove_internal(efx, 1U << spec->priority,
1830 filter_idx, true) == 0;
1831 /*
1832 * While we can't safely dereference rule (we dropped the lock), we can
1833 * still test it for NULL.
1834 */
1835 if (ret && rule) {
1836 /* Expiring, so remove entry from ARFS table */
1837 spin_lock_bh(&efx->rps_hash_lock);
1838 efx_rps_hash_del(efx, &saved_spec);
1839 spin_unlock_bh(&efx->rps_hash_lock);
1840 }
1841 out_unlock:
1842 up_write(&table->lock);
1843 up_read(&efx->filter_sem);
1844 return ret;
1845 }
1846
1847 #endif /* CONFIG_RFS_ACCEL */
1848
1849 #define RSS_MODE_HASH_ADDRS (1 << RSS_MODE_HASH_SRC_ADDR_LBN |\
1850 1 << RSS_MODE_HASH_DST_ADDR_LBN)
1851 #define RSS_MODE_HASH_PORTS (1 << RSS_MODE_HASH_SRC_PORT_LBN |\
1852 1 << RSS_MODE_HASH_DST_PORT_LBN)
1853 #define RSS_CONTEXT_FLAGS_DEFAULT (1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN |\
1854 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN |\
1855 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN |\
1856 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN |\
1857 (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN |\
1858 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN |\
1859 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN |\
1860 (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN |\
1861 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN |\
1862 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN)
1863
efx_mcdi_get_rss_context_flags(struct efx_nic * efx,u32 context,u32 * flags)1864 static int efx_mcdi_get_rss_context_flags(struct efx_nic *efx, u32 context,
1865 u32 *flags)
1866 {
1867 /*
1868 * Firmware had a bug (sfc bug 61952) where it would not actually
1869 * fill in the flags field in the response to MC_CMD_RSS_CONTEXT_GET_FLAGS.
1870 * This meant that it would always contain whatever was previously
1871 * in the MCDI buffer. Fortunately, all firmware versions with
1872 * this bug have the same default flags value for a newly-allocated
1873 * RSS context, and the only time we want to get the flags is just
1874 * after allocating. Moreover, the response has a 32-bit hole
1875 * where the context ID would be in the request, so we can use an
1876 * overlength buffer in the request and pre-fill the flags field
1877 * with what we believe the default to be. Thus if the firmware
1878 * has the bug, it will leave our pre-filled value in the flags
1879 * field of the response, and we will get the right answer.
1880 *
1881 * However, this does mean that this function should NOT be used if
1882 * the RSS context flags might not be their defaults - it is ONLY
1883 * reliably correct for a newly-allocated RSS context.
1884 */
1885 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN);
1886 MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN);
1887 size_t outlen;
1888 int rc;
1889
1890 /* Check we have a hole for the context ID */
1891 BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN != MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST);
1892 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID, context);
1893 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS,
1894 RSS_CONTEXT_FLAGS_DEFAULT);
1895 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_FLAGS, inbuf,
1896 sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
1897 if (rc == 0) {
1898 if (outlen < MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN)
1899 rc = -EIO;
1900 else
1901 *flags = MCDI_DWORD(outbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS);
1902 }
1903 return rc;
1904 }
1905
1906 /*
1907 * Attempt to enable 4-tuple UDP hashing on the specified RSS context.
1908 * If we fail, we just leave the RSS context at its default hash settings,
1909 * which is safe but may slightly reduce performance.
1910 * Defaults are 4-tuple for TCP and 2-tuple for UDP and other-IP, so we
1911 * just need to set the UDP ports flags (for both IP versions).
1912 */
efx_mcdi_set_rss_context_flags(struct efx_nic * efx,struct efx_rss_context_priv * ctx)1913 static void efx_mcdi_set_rss_context_flags(struct efx_nic *efx,
1914 struct efx_rss_context_priv *ctx)
1915 {
1916 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN);
1917 u32 flags;
1918
1919 BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN != 0);
1920
1921 if (efx_mcdi_get_rss_context_flags(efx, ctx->context_id, &flags) != 0)
1922 return;
1923 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID,
1924 ctx->context_id);
1925 flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN;
1926 flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN;
1927 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_FLAGS, flags);
1928 if (!efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_FLAGS, inbuf, sizeof(inbuf),
1929 NULL, 0, NULL))
1930 /* Succeeded, so UDP 4-tuple is now enabled */
1931 ctx->rx_hash_udp_4tuple = true;
1932 }
1933
efx_mcdi_filter_alloc_rss_context(struct efx_nic * efx,bool exclusive,struct efx_rss_context_priv * ctx,unsigned * context_size)1934 static int efx_mcdi_filter_alloc_rss_context(struct efx_nic *efx, bool exclusive,
1935 struct efx_rss_context_priv *ctx,
1936 unsigned *context_size)
1937 {
1938 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
1939 MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
1940 size_t outlen;
1941 int rc;
1942 u32 alloc_type = exclusive ?
1943 MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE :
1944 MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED;
1945 unsigned rss_spread = exclusive ?
1946 efx->rss_spread :
1947 min(rounddown_pow_of_two(efx->rss_spread),
1948 EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE);
1949
1950 if (!exclusive && rss_spread == 1) {
1951 ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
1952 if (context_size)
1953 *context_size = 1;
1954 return 0;
1955 }
1956
1957 if (efx_has_cap(efx, RX_RSS_LIMITED))
1958 return -EOPNOTSUPP;
1959
1960 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
1961 efx->vport_id);
1962 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type);
1963 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, rss_spread);
1964
1965 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf),
1966 outbuf, sizeof(outbuf), &outlen);
1967 if (rc != 0)
1968 return rc;
1969
1970 if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)
1971 return -EIO;
1972
1973 ctx->context_id = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
1974
1975 if (context_size)
1976 *context_size = rss_spread;
1977
1978 if (efx_has_cap(efx, ADDITIONAL_RSS_MODES))
1979 efx_mcdi_set_rss_context_flags(efx, ctx);
1980
1981 return 0;
1982 }
1983
efx_mcdi_filter_free_rss_context(struct efx_nic * efx,u32 context)1984 static int efx_mcdi_filter_free_rss_context(struct efx_nic *efx, u32 context)
1985 {
1986 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN);
1987
1988 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID,
1989 context);
1990 return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf),
1991 NULL, 0, NULL);
1992 }
1993
efx_mcdi_filter_populate_rss_table(struct efx_nic * efx,u32 context,const u32 * rx_indir_table,const u8 * key)1994 static int efx_mcdi_filter_populate_rss_table(struct efx_nic *efx, u32 context,
1995 const u32 *rx_indir_table, const u8 *key)
1996 {
1997 MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
1998 MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
1999 int i, rc;
2000
2001 MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID,
2002 context);
2003 BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) !=
2004 MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN);
2005
2006 /* This iterates over the length of efx->rss_context.rx_indir_table, but
2007 * copies bytes from rx_indir_table. That's because the latter is a
2008 * pointer rather than an array, but should have the same length.
2009 * The efx->rss_context.rx_hash_key loop below is similar.
2010 */
2011 for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_indir_table); ++i)
2012 MCDI_PTR(tablebuf,
2013 RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
2014 (u8) rx_indir_table[i];
2015
2016 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf,
2017 sizeof(tablebuf), NULL, 0, NULL);
2018 if (rc != 0)
2019 return rc;
2020
2021 MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID,
2022 context);
2023 BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_hash_key) !=
2024 MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
2025 for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_hash_key); ++i)
2026 MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] = key[i];
2027
2028 return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf,
2029 sizeof(keybuf), NULL, 0, NULL);
2030 }
2031
efx_mcdi_rx_free_indir_table(struct efx_nic * efx)2032 void efx_mcdi_rx_free_indir_table(struct efx_nic *efx)
2033 {
2034 int rc;
2035
2036 if (efx->rss_context.priv.context_id != EFX_MCDI_RSS_CONTEXT_INVALID) {
2037 rc = efx_mcdi_filter_free_rss_context(efx, efx->rss_context.priv.context_id);
2038 WARN_ON(rc != 0);
2039 }
2040 efx->rss_context.priv.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
2041 }
2042
efx_mcdi_filter_rx_push_shared_rss_config(struct efx_nic * efx,unsigned * context_size)2043 static int efx_mcdi_filter_rx_push_shared_rss_config(struct efx_nic *efx,
2044 unsigned *context_size)
2045 {
2046 struct efx_mcdi_filter_table *table = efx->filter_state;
2047 int rc = efx_mcdi_filter_alloc_rss_context(efx, false,
2048 &efx->rss_context.priv,
2049 context_size);
2050
2051 if (rc != 0)
2052 return rc;
2053
2054 table->rx_rss_context_exclusive = false;
2055 efx_set_default_rx_indir_table(efx, efx->rss_context.rx_indir_table);
2056 return 0;
2057 }
2058
efx_mcdi_filter_rx_push_exclusive_rss_config(struct efx_nic * efx,const u32 * rx_indir_table,const u8 * key)2059 static int efx_mcdi_filter_rx_push_exclusive_rss_config(struct efx_nic *efx,
2060 const u32 *rx_indir_table,
2061 const u8 *key)
2062 {
2063 u32 old_rx_rss_context = efx->rss_context.priv.context_id;
2064 struct efx_mcdi_filter_table *table = efx->filter_state;
2065 int rc;
2066
2067 if (efx->rss_context.priv.context_id == EFX_MCDI_RSS_CONTEXT_INVALID ||
2068 !table->rx_rss_context_exclusive) {
2069 rc = efx_mcdi_filter_alloc_rss_context(efx, true,
2070 &efx->rss_context.priv,
2071 NULL);
2072 if (rc == -EOPNOTSUPP)
2073 return rc;
2074 else if (rc != 0)
2075 goto fail1;
2076 }
2077
2078 rc = efx_mcdi_filter_populate_rss_table(efx, efx->rss_context.priv.context_id,
2079 rx_indir_table, key);
2080 if (rc != 0)
2081 goto fail2;
2082
2083 if (efx->rss_context.priv.context_id != old_rx_rss_context &&
2084 old_rx_rss_context != EFX_MCDI_RSS_CONTEXT_INVALID)
2085 WARN_ON(efx_mcdi_filter_free_rss_context(efx, old_rx_rss_context) != 0);
2086 table->rx_rss_context_exclusive = true;
2087 if (rx_indir_table != efx->rss_context.rx_indir_table)
2088 memcpy(efx->rss_context.rx_indir_table, rx_indir_table,
2089 sizeof(efx->rss_context.rx_indir_table));
2090 if (key != efx->rss_context.rx_hash_key)
2091 memcpy(efx->rss_context.rx_hash_key, key,
2092 efx->type->rx_hash_key_size);
2093
2094 return 0;
2095
2096 fail2:
2097 if (old_rx_rss_context != efx->rss_context.priv.context_id) {
2098 WARN_ON(efx_mcdi_filter_free_rss_context(efx, efx->rss_context.priv.context_id) != 0);
2099 efx->rss_context.priv.context_id = old_rx_rss_context;
2100 }
2101 fail1:
2102 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
2103 return rc;
2104 }
2105
efx_mcdi_rx_push_rss_context_config(struct efx_nic * efx,struct efx_rss_context_priv * ctx,const u32 * rx_indir_table,const u8 * key,bool delete)2106 int efx_mcdi_rx_push_rss_context_config(struct efx_nic *efx,
2107 struct efx_rss_context_priv *ctx,
2108 const u32 *rx_indir_table,
2109 const u8 *key, bool delete)
2110 {
2111 int rc;
2112
2113 WARN_ON(!mutex_is_locked(&efx->net_dev->ethtool->rss_lock));
2114
2115 if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID) {
2116 if (delete)
2117 /* already wasn't in HW, nothing to do */
2118 return 0;
2119 rc = efx_mcdi_filter_alloc_rss_context(efx, true, ctx, NULL);
2120 if (rc)
2121 return rc;
2122 }
2123
2124 if (delete) /* Delete this context */
2125 return efx_mcdi_filter_free_rss_context(efx, ctx->context_id);
2126
2127 return efx_mcdi_filter_populate_rss_table(efx, ctx->context_id,
2128 rx_indir_table, key);
2129 }
2130
efx_mcdi_rx_pull_rss_context_config(struct efx_nic * efx,struct efx_rss_context * ctx)2131 int efx_mcdi_rx_pull_rss_context_config(struct efx_nic *efx,
2132 struct efx_rss_context *ctx)
2133 {
2134 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN);
2135 MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN);
2136 MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN);
2137 size_t outlen;
2138 int rc, i;
2139
2140 WARN_ON(!mutex_is_locked(&efx->net_dev->ethtool->rss_lock));
2141
2142 BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN !=
2143 MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN);
2144
2145 if (ctx->priv.context_id == EFX_MCDI_RSS_CONTEXT_INVALID)
2146 return -ENOENT;
2147
2148 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID,
2149 ctx->priv.context_id);
2150 BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_indir_table) !=
2151 MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN);
2152 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_TABLE, inbuf, sizeof(inbuf),
2153 tablebuf, sizeof(tablebuf), &outlen);
2154 if (rc != 0)
2155 return rc;
2156
2157 if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN))
2158 return -EIO;
2159
2160 for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++)
2161 ctx->rx_indir_table[i] = MCDI_PTR(tablebuf,
2162 RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE)[i];
2163
2164 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID,
2165 ctx->priv.context_id);
2166 BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_hash_key) !=
2167 MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
2168 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_KEY, inbuf, sizeof(inbuf),
2169 keybuf, sizeof(keybuf), &outlen);
2170 if (rc != 0)
2171 return rc;
2172
2173 if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN))
2174 return -EIO;
2175
2176 for (i = 0; i < ARRAY_SIZE(ctx->rx_hash_key); ++i)
2177 ctx->rx_hash_key[i] = MCDI_PTR(
2178 keybuf, RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY)[i];
2179
2180 return 0;
2181 }
2182
efx_mcdi_rx_pull_rss_config(struct efx_nic * efx)2183 int efx_mcdi_rx_pull_rss_config(struct efx_nic *efx)
2184 {
2185 int rc;
2186
2187 mutex_lock(&efx->net_dev->ethtool->rss_lock);
2188 rc = efx_mcdi_rx_pull_rss_context_config(efx, &efx->rss_context);
2189 mutex_unlock(&efx->net_dev->ethtool->rss_lock);
2190 return rc;
2191 }
2192
efx_mcdi_rx_restore_rss_contexts(struct efx_nic * efx)2193 void efx_mcdi_rx_restore_rss_contexts(struct efx_nic *efx)
2194 {
2195 struct efx_mcdi_filter_table *table = efx->filter_state;
2196 struct ethtool_rxfh_context *ctx;
2197 unsigned long context;
2198 int rc;
2199
2200 WARN_ON(!mutex_is_locked(&efx->net_dev->ethtool->rss_lock));
2201
2202 if (!table->must_restore_rss_contexts)
2203 return;
2204
2205 xa_for_each(&efx->net_dev->ethtool->rss_ctx, context, ctx) {
2206 struct efx_rss_context_priv *priv;
2207 u32 *indir;
2208 u8 *key;
2209
2210 priv = ethtool_rxfh_context_priv(ctx);
2211 /* previous NIC RSS context is gone */
2212 priv->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
2213 /* so try to allocate a new one */
2214 indir = ethtool_rxfh_context_indir(ctx);
2215 key = ethtool_rxfh_context_key(ctx);
2216 rc = efx_mcdi_rx_push_rss_context_config(efx, priv, indir, key,
2217 false);
2218 if (rc)
2219 netif_warn(efx, probe, efx->net_dev,
2220 "failed to restore RSS context %lu, rc=%d"
2221 "; RSS filters may fail to be applied\n",
2222 context, rc);
2223 }
2224 table->must_restore_rss_contexts = false;
2225 }
2226
efx_mcdi_pf_rx_push_rss_config(struct efx_nic * efx,bool user,const u32 * rx_indir_table,const u8 * key)2227 int efx_mcdi_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
2228 const u32 *rx_indir_table,
2229 const u8 *key)
2230 {
2231 int rc;
2232
2233 if (efx->rss_spread == 1)
2234 return 0;
2235
2236 if (!key)
2237 key = efx->rss_context.rx_hash_key;
2238
2239 rc = efx_mcdi_filter_rx_push_exclusive_rss_config(efx, rx_indir_table, key);
2240
2241 if (rc == -ENOBUFS && !user) {
2242 unsigned context_size;
2243 bool mismatch = false;
2244 size_t i;
2245
2246 for (i = 0;
2247 i < ARRAY_SIZE(efx->rss_context.rx_indir_table) && !mismatch;
2248 i++)
2249 mismatch = rx_indir_table[i] !=
2250 ethtool_rxfh_indir_default(i, efx->rss_spread);
2251
2252 rc = efx_mcdi_filter_rx_push_shared_rss_config(efx, &context_size);
2253 if (rc == 0) {
2254 if (context_size != efx->rss_spread)
2255 netif_warn(efx, probe, efx->net_dev,
2256 "Could not allocate an exclusive RSS"
2257 " context; allocated a shared one of"
2258 " different size."
2259 " Wanted %u, got %u.\n",
2260 efx->rss_spread, context_size);
2261 else if (mismatch)
2262 netif_warn(efx, probe, efx->net_dev,
2263 "Could not allocate an exclusive RSS"
2264 " context; allocated a shared one but"
2265 " could not apply custom"
2266 " indirection.\n");
2267 else
2268 netif_info(efx, probe, efx->net_dev,
2269 "Could not allocate an exclusive RSS"
2270 " context; allocated a shared one.\n");
2271 }
2272 }
2273 return rc;
2274 }
2275
efx_mcdi_vf_rx_push_rss_config(struct efx_nic * efx,bool user,const u32 * rx_indir_table,const u8 * key)2276 int efx_mcdi_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
2277 const u32 *rx_indir_table
2278 __attribute__ ((unused)),
2279 const u8 *key
2280 __attribute__ ((unused)))
2281 {
2282 if (user)
2283 return -EOPNOTSUPP;
2284 if (efx->rss_context.priv.context_id != EFX_MCDI_RSS_CONTEXT_INVALID)
2285 return 0;
2286 return efx_mcdi_filter_rx_push_shared_rss_config(efx, NULL);
2287 }
2288
efx_mcdi_push_default_indir_table(struct efx_nic * efx,unsigned int rss_spread)2289 int efx_mcdi_push_default_indir_table(struct efx_nic *efx,
2290 unsigned int rss_spread)
2291 {
2292 int rc = 0;
2293
2294 if (efx->rss_spread == rss_spread)
2295 return 0;
2296
2297 efx->rss_spread = rss_spread;
2298 if (!efx->filter_state)
2299 return 0;
2300
2301 efx_mcdi_rx_free_indir_table(efx);
2302 if (rss_spread > 1) {
2303 efx_set_default_rx_indir_table(efx, efx->rss_context.rx_indir_table);
2304 rc = efx->type->rx_push_rss_config(efx, false,
2305 efx->rss_context.rx_indir_table, NULL);
2306 }
2307 return rc;
2308 }
2309