xref: /linux/drivers/net/dsa/sja1105/sja1105_vl.c (revision 7bb377107c72a40ab7505341f8626c8eb79a0cb7)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2020, NXP Semiconductors
3  */
4 #include <net/tc_act/tc_gate.h>
5 #include <linux/dsa/8021q.h>
6 #include "sja1105.h"
7 
8 #define SJA1105_VL_FRAME_MEMORY			100
9 #define SJA1105_SIZE_VL_STATUS			8
10 
11 /* The switch flow classification core implements TTEthernet, which 'thinks' in
12  * terms of Virtual Links (VL), a concept borrowed from ARINC 664 part 7.
13  * However it also has one other operating mode (VLLUPFORMAT=0) where it acts
14  * somewhat closer to a pre-standard implementation of IEEE 802.1Qci
15  * (Per-Stream Filtering and Policing), which is what the driver is going to be
16  * implementing.
17  *
18  *                                 VL Lookup
19  *        Key = {DMAC && VLANID   +---------+  Key = { (DMAC[47:16] & VLMASK ==
20  *               && VLAN PCP      |         |                         VLMARKER)
21  *               && INGRESS PORT} +---------+                      (both fixed)
22  *            (exact match,            |             && DMAC[15:0] == VLID
23  *         all specified in rule)      |                    (specified in rule)
24  *                                     v             && INGRESS PORT }
25  *                               ------------
26  *                    0 (PSFP)  /            \  1 (ARINC664)
27  *                 +-----------/  VLLUPFORMAT \----------+
28  *                 |           \    (fixed)   /          |
29  *                 |            \            /           |
30  *  0 (forwarding) v             ------------            |
31  *           ------------                                |
32  *          /            \  1 (QoS classification)       |
33  *     +---/  ISCRITICAL  \-----------+                  |
34  *     |   \  (per rule)  /           |                  |
35  *     |    \            /   VLID taken from      VLID taken from
36  *     v     ------------     index of rule       contents of rule
37  *  select                     that matched         that matched
38  * DESTPORTS                          |                  |
39  *  |                                 +---------+--------+
40  *  |                                           |
41  *  |                                           v
42  *  |                                     VL Forwarding
43  *  |                                   (indexed by VLID)
44  *  |                                      +---------+
45  *  |                       +--------------|         |
46  *  |                       |  select TYPE +---------+
47  *  |                       v
48  *  |   0 (rate      ------------    1 (time
49  *  |  constrained) /            \   triggered)
50  *  |       +------/     TYPE     \------------+
51  *  |       |      \  (per VLID)  /            |
52  *  |       v       \            /             v
53  *  |  VL Policing   ------------         VL Policing
54  *  | (indexed by VLID)                (indexed by VLID)
55  *  |  +---------+                        +---------+
56  *  |  | TYPE=0  |                        | TYPE=1  |
57  *  |  +---------+                        +---------+
58  *  |  select SHARINDX                 select SHARINDX to
59  *  |  to rate-limit                 re-enter VL Forwarding
60  *  |  groups of VL's               with new VLID for egress
61  *  |  to same quota                           |
62  *  |       |                                  |
63  *  |  select MAXLEN -> exceed => drop    select MAXLEN -> exceed => drop
64  *  |       |                                  |
65  *  |       v                                  v
66  *  |  VL Forwarding                      VL Forwarding
67  *  | (indexed by SHARINDX)             (indexed by SHARINDX)
68  *  |  +---------+                        +---------+
69  *  |  | TYPE=0  |                        | TYPE=1  |
70  *  |  +---------+                        +---------+
71  *  |  select PRIORITY,                 select PRIORITY,
72  *  | PARTITION, DESTPORTS            PARTITION, DESTPORTS
73  *  |       |                                  |
74  *  |       v                                  v
75  *  |  VL Policing                        VL Policing
76  *  | (indexed by SHARINDX)           (indexed by SHARINDX)
77  *  |  +---------+                        +---------+
78  *  |  | TYPE=0  |                        | TYPE=1  |
79  *  |  +---------+                        +---------+
80  *  |       |                                  |
81  *  |       v                                  |
82  *  |  select BAG, -> exceed => drop           |
83  *  |    JITTER                                v
84  *  |       |             ----------------------------------------------
85  *  |       |            /    Reception Window is open for this VL      \
86  *  |       |           /    (the Schedule Table executes an entry i     \
87  *  |       |          /   M <= i < N, for which these conditions hold):  \ no
88  *  |       |    +----/                                                    \-+
89  *  |       |    |yes \       WINST[M] == 1 && WINSTINDEX[M] == VLID       / |
90  *  |       |    |     \     WINEND[N] == 1 && WINSTINDEX[N] == VLID      /  |
91  *  |       |    |      \                                                /   |
92  *  |       |    |       \ (the VL window has opened and not yet closed)/    |
93  *  |       |    |        ----------------------------------------------     |
94  *  |       |    v                                                           v
95  *  |       |  dispatch to DESTPORTS when the Schedule Table               drop
96  *  |       |  executes an entry i with TXEN == 1 && VLINDEX == i
97  *  v       v
98  * dispatch immediately to DESTPORTS
99  *
100  * The per-port classification key is always composed of {DMAC, VID, PCP} and
101  * is non-maskable. This 'looks like' the NULL stream identification function
102  * from IEEE 802.1CB clause 6, except for the extra VLAN PCP. When the switch
103  * ports operate as VLAN-unaware, we do allow the user to not specify the VLAN
104  * ID and PCP, and then the port-based defaults will be used.
105  *
106  * In TTEthernet, routing is something that needs to be done manually for each
107  * Virtual Link. So the flow action must always include one of:
108  * a. 'redirect', 'trap' or 'drop': select the egress port list
109  * Additionally, the following actions may be applied on a Virtual Link,
110  * turning it into 'critical' traffic:
111  * b. 'police': turn it into a rate-constrained VL, with bandwidth limitation
112  *    given by the maximum frame length, bandwidth allocation gap (BAG) and
113  *    maximum jitter.
114  * c. 'gate': turn it into a time-triggered VL, which can be only be received
115  *    and forwarded according to a given schedule.
116  */
117 
118 static bool sja1105_vl_key_lower(struct sja1105_vl_lookup_entry *a,
119 				 struct sja1105_vl_lookup_entry *b)
120 {
121 	if (a->macaddr < b->macaddr)
122 		return true;
123 	if (a->macaddr > b->macaddr)
124 		return false;
125 	if (a->vlanid < b->vlanid)
126 		return true;
127 	if (a->vlanid > b->vlanid)
128 		return false;
129 	if (a->port < b->port)
130 		return true;
131 	if (a->port > b->port)
132 		return false;
133 	if (a->vlanprior < b->vlanprior)
134 		return true;
135 	if (a->vlanprior > b->vlanprior)
136 		return false;
137 	/* Keys are equal */
138 	return false;
139 }
140 
141 static int sja1105_init_virtual_links(struct sja1105_private *priv,
142 				      struct netlink_ext_ack *extack)
143 {
144 	struct sja1105_l2_forwarding_params_entry *l2_fwd_params;
145 	struct sja1105_vl_forwarding_params_entry *vl_fwd_params;
146 	struct sja1105_vl_policing_entry *vl_policing;
147 	struct sja1105_vl_forwarding_entry *vl_fwd;
148 	struct sja1105_vl_lookup_entry *vl_lookup;
149 	bool have_critical_virtual_links = false;
150 	struct sja1105_table *table;
151 	struct sja1105_rule *rule;
152 	int num_virtual_links = 0;
153 	int max_sharindx = 0;
154 	int i, j, k;
155 
156 	table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
157 	l2_fwd_params = table->entries;
158 	l2_fwd_params->part_spc[0] = SJA1105_MAX_FRAME_MEMORY;
159 
160 	/* Figure out the dimensioning of the problem */
161 	list_for_each_entry(rule, &priv->flow_block.rules, list) {
162 		if (rule->type != SJA1105_RULE_VL)
163 			continue;
164 		/* Each VL lookup entry matches on a single ingress port */
165 		num_virtual_links += hweight_long(rule->port_mask);
166 
167 		if (rule->vl.type != SJA1105_VL_NONCRITICAL)
168 			have_critical_virtual_links = true;
169 		if (max_sharindx < rule->vl.sharindx)
170 			max_sharindx = rule->vl.sharindx;
171 	}
172 
173 	if (num_virtual_links > SJA1105_MAX_VL_LOOKUP_COUNT) {
174 		NL_SET_ERR_MSG_MOD(extack, "Not enough VL entries available");
175 		return -ENOSPC;
176 	}
177 
178 	if (max_sharindx + 1 > SJA1105_MAX_VL_LOOKUP_COUNT) {
179 		NL_SET_ERR_MSG_MOD(extack, "Policer index out of range");
180 		return -ENOSPC;
181 	}
182 
183 	max_sharindx = max_t(int, num_virtual_links, max_sharindx) + 1;
184 
185 	/* Discard previous VL Lookup Table */
186 	table = &priv->static_config.tables[BLK_IDX_VL_LOOKUP];
187 	if (table->entry_count) {
188 		kfree(table->entries);
189 		table->entry_count = 0;
190 	}
191 
192 	/* Discard previous VL Policing Table */
193 	table = &priv->static_config.tables[BLK_IDX_VL_POLICING];
194 	if (table->entry_count) {
195 		kfree(table->entries);
196 		table->entry_count = 0;
197 	}
198 
199 	/* Discard previous VL Forwarding Table */
200 	table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING];
201 	if (table->entry_count) {
202 		kfree(table->entries);
203 		table->entry_count = 0;
204 	}
205 
206 	/* Discard previous VL Forwarding Parameters Table */
207 	table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS];
208 	if (table->entry_count) {
209 		kfree(table->entries);
210 		table->entry_count = 0;
211 	}
212 
213 	/* Nothing to do */
214 	if (!num_virtual_links)
215 		return 0;
216 
217 	/* Pre-allocate space in the static config tables */
218 
219 	/* VL Lookup Table */
220 	table = &priv->static_config.tables[BLK_IDX_VL_LOOKUP];
221 	table->entries = kcalloc(num_virtual_links,
222 				 table->ops->unpacked_entry_size,
223 				 GFP_KERNEL);
224 	if (!table->entries)
225 		return -ENOMEM;
226 	table->entry_count = num_virtual_links;
227 	vl_lookup = table->entries;
228 
229 	k = 0;
230 
231 	list_for_each_entry(rule, &priv->flow_block.rules, list) {
232 		unsigned long port;
233 
234 		if (rule->type != SJA1105_RULE_VL)
235 			continue;
236 
237 		for_each_set_bit(port, &rule->port_mask, SJA1105_NUM_PORTS) {
238 			vl_lookup[k].format = SJA1105_VL_FORMAT_PSFP;
239 			vl_lookup[k].port = port;
240 			vl_lookup[k].macaddr = rule->key.vl.dmac;
241 			if (rule->key.type == SJA1105_KEY_VLAN_AWARE_VL) {
242 				vl_lookup[k].vlanid = rule->key.vl.vid;
243 				vl_lookup[k].vlanprior = rule->key.vl.pcp;
244 			} else {
245 				u16 vid = dsa_8021q_rx_vid(priv->ds, port);
246 
247 				vl_lookup[k].vlanid = vid;
248 				vl_lookup[k].vlanprior = 0;
249 			}
250 			/* For critical VLs, the DESTPORTS mask is taken from
251 			 * the VL Forwarding Table, so no point in putting it
252 			 * in the VL Lookup Table
253 			 */
254 			if (rule->vl.type == SJA1105_VL_NONCRITICAL)
255 				vl_lookup[k].destports = rule->vl.destports;
256 			else
257 				vl_lookup[k].iscritical = true;
258 			vl_lookup[k].flow_cookie = rule->cookie;
259 			k++;
260 		}
261 	}
262 
263 	/* UM10944.pdf chapter 4.2.3 VL Lookup table:
264 	 * "the entries in the VL Lookup table must be sorted in ascending
265 	 * order (i.e. the smallest value must be loaded first) according to
266 	 * the following sort order: MACADDR, VLANID, PORT, VLANPRIOR."
267 	 */
268 	for (i = 0; i < num_virtual_links; i++) {
269 		struct sja1105_vl_lookup_entry *a = &vl_lookup[i];
270 
271 		for (j = i + 1; j < num_virtual_links; j++) {
272 			struct sja1105_vl_lookup_entry *b = &vl_lookup[j];
273 
274 			if (sja1105_vl_key_lower(b, a)) {
275 				struct sja1105_vl_lookup_entry tmp = *a;
276 
277 				*a = *b;
278 				*b = tmp;
279 			}
280 		}
281 	}
282 
283 	if (!have_critical_virtual_links)
284 		return 0;
285 
286 	/* VL Policing Table */
287 	table = &priv->static_config.tables[BLK_IDX_VL_POLICING];
288 	table->entries = kcalloc(max_sharindx, table->ops->unpacked_entry_size,
289 				 GFP_KERNEL);
290 	if (!table->entries)
291 		return -ENOMEM;
292 	table->entry_count = max_sharindx;
293 	vl_policing = table->entries;
294 
295 	/* VL Forwarding Table */
296 	table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING];
297 	table->entries = kcalloc(max_sharindx, table->ops->unpacked_entry_size,
298 				 GFP_KERNEL);
299 	if (!table->entries)
300 		return -ENOMEM;
301 	table->entry_count = max_sharindx;
302 	vl_fwd = table->entries;
303 
304 	/* VL Forwarding Parameters Table */
305 	table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS];
306 	table->entries = kcalloc(1, table->ops->unpacked_entry_size,
307 				 GFP_KERNEL);
308 	if (!table->entries)
309 		return -ENOMEM;
310 	table->entry_count = 1;
311 	vl_fwd_params = table->entries;
312 
313 	/* Reserve some frame buffer memory for the critical-traffic virtual
314 	 * links (this needs to be done). At the moment, hardcode the value
315 	 * at 100 blocks of 128 bytes of memory each. This leaves 829 blocks
316 	 * remaining for best-effort traffic. TODO: figure out a more flexible
317 	 * way to perform the frame buffer partitioning.
318 	 */
319 	l2_fwd_params->part_spc[0] = SJA1105_MAX_FRAME_MEMORY -
320 				     SJA1105_VL_FRAME_MEMORY;
321 	vl_fwd_params->partspc[0] = SJA1105_VL_FRAME_MEMORY;
322 
323 	for (i = 0; i < num_virtual_links; i++) {
324 		unsigned long cookie = vl_lookup[i].flow_cookie;
325 		struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
326 
327 		if (rule->vl.type == SJA1105_VL_NONCRITICAL)
328 			continue;
329 		if (rule->vl.type == SJA1105_VL_TIME_TRIGGERED) {
330 			int sharindx = rule->vl.sharindx;
331 
332 			vl_policing[i].type = 1;
333 			vl_policing[i].sharindx = sharindx;
334 			vl_policing[i].maxlen = rule->vl.maxlen;
335 			vl_policing[sharindx].type = 1;
336 
337 			vl_fwd[i].type = 1;
338 			vl_fwd[sharindx].type = 1;
339 			vl_fwd[sharindx].priority = rule->vl.ipv;
340 			vl_fwd[sharindx].partition = 0;
341 			vl_fwd[sharindx].destports = rule->vl.destports;
342 		}
343 	}
344 
345 	return 0;
346 }
347 
348 int sja1105_vl_redirect(struct sja1105_private *priv, int port,
349 			struct netlink_ext_ack *extack, unsigned long cookie,
350 			struct sja1105_key *key, unsigned long destports,
351 			bool append)
352 {
353 	struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
354 	int rc;
355 
356 	if (dsa_port_is_vlan_filtering(dsa_to_port(priv->ds, port)) &&
357 	    key->type != SJA1105_KEY_VLAN_AWARE_VL) {
358 		NL_SET_ERR_MSG_MOD(extack,
359 				   "Can only redirect based on {DMAC, VID, PCP}");
360 		return -EOPNOTSUPP;
361 	} else if (key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
362 		NL_SET_ERR_MSG_MOD(extack,
363 				   "Can only redirect based on DMAC");
364 		return -EOPNOTSUPP;
365 	}
366 
367 	if (!rule) {
368 		rule = kzalloc(sizeof(*rule), GFP_KERNEL);
369 		if (!rule)
370 			return -ENOMEM;
371 
372 		rule->cookie = cookie;
373 		rule->type = SJA1105_RULE_VL;
374 		rule->key = *key;
375 		list_add(&rule->list, &priv->flow_block.rules);
376 	}
377 
378 	rule->port_mask |= BIT(port);
379 	if (append)
380 		rule->vl.destports |= destports;
381 	else
382 		rule->vl.destports = destports;
383 
384 	rc = sja1105_init_virtual_links(priv, extack);
385 	if (rc) {
386 		rule->port_mask &= ~BIT(port);
387 		if (!rule->port_mask) {
388 			list_del(&rule->list);
389 			kfree(rule);
390 		}
391 	}
392 
393 	return rc;
394 }
395 
396 int sja1105_vl_delete(struct sja1105_private *priv, int port,
397 		      struct sja1105_rule *rule, struct netlink_ext_ack *extack)
398 {
399 	int rc;
400 
401 	rule->port_mask &= ~BIT(port);
402 	if (!rule->port_mask) {
403 		list_del(&rule->list);
404 		kfree(rule);
405 	}
406 
407 	rc = sja1105_init_virtual_links(priv, extack);
408 	if (rc)
409 		return rc;
410 
411 	return sja1105_static_config_reload(priv, SJA1105_VIRTUAL_LINKS);
412 }
413 
414 /* Insert into the global gate list, sorted by gate action time. */
415 static int sja1105_insert_gate_entry(struct sja1105_gating_config *gating_cfg,
416 				     struct sja1105_rule *rule,
417 				     u8 gate_state, s64 entry_time,
418 				     struct netlink_ext_ack *extack)
419 {
420 	struct sja1105_gate_entry *e;
421 	int rc;
422 
423 	e = kzalloc(sizeof(*e), GFP_KERNEL);
424 	if (!e)
425 		return -ENOMEM;
426 
427 	e->rule = rule;
428 	e->gate_state = gate_state;
429 	e->interval = entry_time;
430 
431 	if (list_empty(&gating_cfg->entries)) {
432 		list_add(&e->list, &gating_cfg->entries);
433 	} else {
434 		struct sja1105_gate_entry *p;
435 
436 		list_for_each_entry(p, &gating_cfg->entries, list) {
437 			if (p->interval == e->interval) {
438 				NL_SET_ERR_MSG_MOD(extack,
439 						   "Gate conflict");
440 				rc = -EBUSY;
441 				goto err;
442 			}
443 
444 			if (e->interval < p->interval)
445 				break;
446 		}
447 		list_add(&e->list, p->list.prev);
448 	}
449 
450 	gating_cfg->num_entries++;
451 
452 	return 0;
453 err:
454 	kfree(e);
455 	return rc;
456 }
457 
458 /* The gate entries contain absolute times in their e->interval field. Convert
459  * that to proper intervals (i.e. "0, 5, 10, 15" to "5, 5, 5, 5").
460  */
461 static void
462 sja1105_gating_cfg_time_to_interval(struct sja1105_gating_config *gating_cfg,
463 				    u64 cycle_time)
464 {
465 	struct sja1105_gate_entry *last_e;
466 	struct sja1105_gate_entry *e;
467 	struct list_head *prev;
468 
469 	list_for_each_entry(e, &gating_cfg->entries, list) {
470 		struct sja1105_gate_entry *p;
471 
472 		prev = e->list.prev;
473 
474 		if (prev == &gating_cfg->entries)
475 			continue;
476 
477 		p = list_entry(prev, struct sja1105_gate_entry, list);
478 		p->interval = e->interval - p->interval;
479 	}
480 	last_e = list_last_entry(&gating_cfg->entries,
481 				 struct sja1105_gate_entry, list);
482 	if (last_e->list.prev != &gating_cfg->entries)
483 		last_e->interval = cycle_time - last_e->interval;
484 }
485 
486 static void sja1105_free_gating_config(struct sja1105_gating_config *gating_cfg)
487 {
488 	struct sja1105_gate_entry *e, *n;
489 
490 	list_for_each_entry_safe(e, n, &gating_cfg->entries, list) {
491 		list_del(&e->list);
492 		kfree(e);
493 	}
494 }
495 
496 static int sja1105_compose_gating_subschedule(struct sja1105_private *priv,
497 					      struct netlink_ext_ack *extack)
498 {
499 	struct sja1105_gating_config *gating_cfg = &priv->tas_data.gating_cfg;
500 	struct sja1105_rule *rule;
501 	s64 max_cycle_time = 0;
502 	s64 its_base_time = 0;
503 	int i, rc = 0;
504 
505 	list_for_each_entry(rule, &priv->flow_block.rules, list) {
506 		if (rule->type != SJA1105_RULE_VL)
507 			continue;
508 		if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
509 			continue;
510 
511 		if (max_cycle_time < rule->vl.cycle_time) {
512 			max_cycle_time = rule->vl.cycle_time;
513 			its_base_time = rule->vl.base_time;
514 		}
515 	}
516 
517 	if (!max_cycle_time)
518 		return 0;
519 
520 	dev_dbg(priv->ds->dev, "max_cycle_time %lld its_base_time %lld\n",
521 		max_cycle_time, its_base_time);
522 
523 	sja1105_free_gating_config(gating_cfg);
524 
525 	gating_cfg->base_time = its_base_time;
526 	gating_cfg->cycle_time = max_cycle_time;
527 	gating_cfg->num_entries = 0;
528 
529 	list_for_each_entry(rule, &priv->flow_block.rules, list) {
530 		s64 time;
531 		s64 rbt;
532 
533 		if (rule->type != SJA1105_RULE_VL)
534 			continue;
535 		if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
536 			continue;
537 
538 		/* Calculate the difference between this gating schedule's
539 		 * base time, and the base time of the gating schedule with the
540 		 * longest cycle time. We call it the relative base time (rbt).
541 		 */
542 		rbt = future_base_time(rule->vl.base_time, rule->vl.cycle_time,
543 				       its_base_time);
544 		rbt -= its_base_time;
545 
546 		time = rbt;
547 
548 		for (i = 0; i < rule->vl.num_entries; i++) {
549 			u8 gate_state = rule->vl.entries[i].gate_state;
550 			s64 entry_time = time;
551 
552 			while (entry_time < max_cycle_time) {
553 				rc = sja1105_insert_gate_entry(gating_cfg, rule,
554 							       gate_state,
555 							       entry_time,
556 							       extack);
557 				if (rc)
558 					goto err;
559 
560 				entry_time += rule->vl.cycle_time;
561 			}
562 			time += rule->vl.entries[i].interval;
563 		}
564 	}
565 
566 	sja1105_gating_cfg_time_to_interval(gating_cfg, max_cycle_time);
567 
568 	return 0;
569 err:
570 	sja1105_free_gating_config(gating_cfg);
571 	return rc;
572 }
573 
574 int sja1105_vl_gate(struct sja1105_private *priv, int port,
575 		    struct netlink_ext_ack *extack, unsigned long cookie,
576 		    struct sja1105_key *key, u32 index, s32 prio,
577 		    u64 base_time, u64 cycle_time, u64 cycle_time_ext,
578 		    u32 num_entries, struct action_gate_entry *entries)
579 {
580 	struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
581 	int ipv = -1;
582 	int i, rc;
583 	s32 rem;
584 
585 	if (cycle_time_ext) {
586 		NL_SET_ERR_MSG_MOD(extack,
587 				   "Cycle time extension not supported");
588 		return -EOPNOTSUPP;
589 	}
590 
591 	div_s64_rem(base_time, sja1105_delta_to_ns(1), &rem);
592 	if (rem) {
593 		NL_SET_ERR_MSG_MOD(extack,
594 				   "Base time must be multiple of 200 ns");
595 		return -ERANGE;
596 	}
597 
598 	div_s64_rem(cycle_time, sja1105_delta_to_ns(1), &rem);
599 	if (rem) {
600 		NL_SET_ERR_MSG_MOD(extack,
601 				   "Cycle time must be multiple of 200 ns");
602 		return -ERANGE;
603 	}
604 
605 	if (dsa_port_is_vlan_filtering(dsa_to_port(priv->ds, port)) &&
606 	    key->type != SJA1105_KEY_VLAN_AWARE_VL) {
607 		NL_SET_ERR_MSG_MOD(extack,
608 				   "Can only gate based on {DMAC, VID, PCP}");
609 		return -EOPNOTSUPP;
610 	} else if (key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
611 		NL_SET_ERR_MSG_MOD(extack,
612 				   "Can only gate based on DMAC");
613 		return -EOPNOTSUPP;
614 	}
615 
616 	if (!rule) {
617 		rule = kzalloc(sizeof(*rule), GFP_KERNEL);
618 		if (!rule)
619 			return -ENOMEM;
620 
621 		list_add(&rule->list, &priv->flow_block.rules);
622 		rule->cookie = cookie;
623 		rule->type = SJA1105_RULE_VL;
624 		rule->key = *key;
625 		rule->vl.type = SJA1105_VL_TIME_TRIGGERED;
626 		rule->vl.sharindx = index;
627 		rule->vl.base_time = base_time;
628 		rule->vl.cycle_time = cycle_time;
629 		rule->vl.num_entries = num_entries;
630 		rule->vl.entries = kcalloc(num_entries,
631 					   sizeof(struct action_gate_entry),
632 					   GFP_KERNEL);
633 		if (!rule->vl.entries) {
634 			rc = -ENOMEM;
635 			goto out;
636 		}
637 
638 		for (i = 0; i < num_entries; i++) {
639 			div_s64_rem(entries[i].interval,
640 				    sja1105_delta_to_ns(1), &rem);
641 			if (rem) {
642 				NL_SET_ERR_MSG_MOD(extack,
643 						   "Interval must be multiple of 200 ns");
644 				rc = -ERANGE;
645 				goto out;
646 			}
647 
648 			if (!entries[i].interval) {
649 				NL_SET_ERR_MSG_MOD(extack,
650 						   "Interval cannot be zero");
651 				rc = -ERANGE;
652 				goto out;
653 			}
654 
655 			if (ns_to_sja1105_delta(entries[i].interval) >
656 			    SJA1105_TAS_MAX_DELTA) {
657 				NL_SET_ERR_MSG_MOD(extack,
658 						   "Maximum interval is 52 ms");
659 				rc = -ERANGE;
660 				goto out;
661 			}
662 
663 			if (entries[i].maxoctets != -1) {
664 				NL_SET_ERR_MSG_MOD(extack,
665 						   "Cannot offload IntervalOctetMax");
666 				rc = -EOPNOTSUPP;
667 				goto out;
668 			}
669 
670 			if (ipv == -1) {
671 				ipv = entries[i].ipv;
672 			} else if (ipv != entries[i].ipv) {
673 				NL_SET_ERR_MSG_MOD(extack,
674 						   "Only support a single IPV per VL");
675 				rc = -EOPNOTSUPP;
676 				goto out;
677 			}
678 
679 			rule->vl.entries[i] = entries[i];
680 		}
681 
682 		if (ipv == -1) {
683 			if (key->type == SJA1105_KEY_VLAN_AWARE_VL)
684 				ipv = key->vl.pcp;
685 			else
686 				ipv = 0;
687 		}
688 
689 		/* TODO: support per-flow MTU */
690 		rule->vl.maxlen = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
691 		rule->vl.ipv = ipv;
692 	}
693 
694 	rule->port_mask |= BIT(port);
695 
696 	rc = sja1105_compose_gating_subschedule(priv, extack);
697 	if (rc)
698 		goto out;
699 
700 	rc = sja1105_init_virtual_links(priv, extack);
701 	if (rc)
702 		goto out;
703 
704 	if (sja1105_gating_check_conflicts(priv, -1, extack)) {
705 		NL_SET_ERR_MSG_MOD(extack, "Conflict with tc-taprio schedule");
706 		rc = -ERANGE;
707 		goto out;
708 	}
709 
710 out:
711 	if (rc) {
712 		rule->port_mask &= ~BIT(port);
713 		if (!rule->port_mask) {
714 			list_del(&rule->list);
715 			kfree(rule->vl.entries);
716 			kfree(rule);
717 		}
718 	}
719 
720 	return rc;
721 }
722 
723 static int sja1105_find_vlid(struct sja1105_private *priv, int port,
724 			     struct sja1105_key *key)
725 {
726 	struct sja1105_vl_lookup_entry *vl_lookup;
727 	struct sja1105_table *table;
728 	int i;
729 
730 	if (WARN_ON(key->type != SJA1105_KEY_VLAN_AWARE_VL &&
731 		    key->type != SJA1105_KEY_VLAN_UNAWARE_VL))
732 		return -1;
733 
734 	table = &priv->static_config.tables[BLK_IDX_VL_LOOKUP];
735 	vl_lookup = table->entries;
736 
737 	for (i = 0; i < table->entry_count; i++) {
738 		if (key->type == SJA1105_KEY_VLAN_AWARE_VL) {
739 			if (vl_lookup[i].port == port &&
740 			    vl_lookup[i].macaddr == key->vl.dmac &&
741 			    vl_lookup[i].vlanid == key->vl.vid &&
742 			    vl_lookup[i].vlanprior == key->vl.pcp)
743 				return i;
744 		} else {
745 			if (vl_lookup[i].port == port &&
746 			    vl_lookup[i].macaddr == key->vl.dmac)
747 				return i;
748 		}
749 	}
750 
751 	return -1;
752 }
753 
754 int sja1105_vl_stats(struct sja1105_private *priv, int port,
755 		     struct sja1105_rule *rule, struct flow_stats *stats,
756 		     struct netlink_ext_ack *extack)
757 {
758 	const struct sja1105_regs *regs = priv->info->regs;
759 	u8 buf[SJA1105_SIZE_VL_STATUS] = {0};
760 	u64 unreleased;
761 	u64 timingerr;
762 	u64 lengtherr;
763 	int vlid, rc;
764 	u64 pkts;
765 
766 	if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
767 		return 0;
768 
769 	vlid = sja1105_find_vlid(priv, port, &rule->key);
770 	if (vlid < 0)
771 		return 0;
772 
773 	rc = sja1105_xfer_buf(priv, SPI_READ, regs->vl_status + 2 * vlid, buf,
774 			      SJA1105_SIZE_VL_STATUS);
775 	if (rc) {
776 		NL_SET_ERR_MSG_MOD(extack, "SPI access failed");
777 		return rc;
778 	}
779 
780 	sja1105_unpack(buf, &timingerr,  31, 16, SJA1105_SIZE_VL_STATUS);
781 	sja1105_unpack(buf, &unreleased, 15,  0, SJA1105_SIZE_VL_STATUS);
782 	sja1105_unpack(buf, &lengtherr,  47, 32, SJA1105_SIZE_VL_STATUS);
783 
784 	pkts = timingerr + unreleased + lengtherr;
785 
786 	flow_stats_update(stats, 0, pkts - rule->vl.stats.pkts,
787 			  jiffies - rule->vl.stats.lastused,
788 			  FLOW_ACTION_HW_STATS_IMMEDIATE);
789 
790 	rule->vl.stats.pkts = pkts;
791 	rule->vl.stats.lastused = jiffies;
792 
793 	return 0;
794 }
795