xref: /linux/net/ipv6/ioam6.c (revision 249ebf3f65f8530beb2cbfb91bff1d83ba88d23c)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  *  IPv6 IOAM implementation
4  *
5  *  Author:
6  *  Justin Iurman <justin.iurman@uliege.be>
7  */
8 
9 #include <linux/errno.h>
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/net.h>
13 #include <linux/ioam6.h>
14 #include <linux/ioam6_genl.h>
15 #include <linux/rhashtable.h>
16 #include <linux/netdevice.h>
17 
18 #include <net/addrconf.h>
19 #include <net/genetlink.h>
20 #include <net/ioam6.h>
21 #include <net/sch_generic.h>
22 
23 static void ioam6_ns_release(struct ioam6_namespace *ns)
24 {
25 	kfree_rcu(ns, rcu);
26 }
27 
28 static void ioam6_sc_release(struct ioam6_schema *sc)
29 {
30 	kfree_rcu(sc, rcu);
31 }
32 
33 static void ioam6_free_ns(void *ptr, void *arg)
34 {
35 	struct ioam6_namespace *ns = (struct ioam6_namespace *)ptr;
36 
37 	if (ns)
38 		ioam6_ns_release(ns);
39 }
40 
41 static void ioam6_free_sc(void *ptr, void *arg)
42 {
43 	struct ioam6_schema *sc = (struct ioam6_schema *)ptr;
44 
45 	if (sc)
46 		ioam6_sc_release(sc);
47 }
48 
49 static int ioam6_ns_cmpfn(struct rhashtable_compare_arg *arg, const void *obj)
50 {
51 	const struct ioam6_namespace *ns = obj;
52 
53 	return (ns->id != *(__be16 *)arg->key);
54 }
55 
56 static int ioam6_sc_cmpfn(struct rhashtable_compare_arg *arg, const void *obj)
57 {
58 	const struct ioam6_schema *sc = obj;
59 
60 	return (sc->id != *(u32 *)arg->key);
61 }
62 
63 static const struct rhashtable_params rht_ns_params = {
64 	.key_len		= sizeof(__be16),
65 	.key_offset		= offsetof(struct ioam6_namespace, id),
66 	.head_offset		= offsetof(struct ioam6_namespace, head),
67 	.automatic_shrinking	= true,
68 	.obj_cmpfn		= ioam6_ns_cmpfn,
69 };
70 
71 static const struct rhashtable_params rht_sc_params = {
72 	.key_len		= sizeof(u32),
73 	.key_offset		= offsetof(struct ioam6_schema, id),
74 	.head_offset		= offsetof(struct ioam6_schema, head),
75 	.automatic_shrinking	= true,
76 	.obj_cmpfn		= ioam6_sc_cmpfn,
77 };
78 
79 static struct genl_family ioam6_genl_family;
80 
81 static const struct nla_policy ioam6_genl_policy_addns[] = {
82 	[IOAM6_ATTR_NS_ID]	= { .type = NLA_U16 },
83 	[IOAM6_ATTR_NS_DATA]	= { .type = NLA_U32 },
84 	[IOAM6_ATTR_NS_DATA_WIDE] = { .type = NLA_U64 },
85 };
86 
87 static const struct nla_policy ioam6_genl_policy_delns[] = {
88 	[IOAM6_ATTR_NS_ID]	= { .type = NLA_U16 },
89 };
90 
91 static const struct nla_policy ioam6_genl_policy_addsc[] = {
92 	[IOAM6_ATTR_SC_ID]	= { .type = NLA_U32 },
93 	[IOAM6_ATTR_SC_DATA]	= { .type = NLA_BINARY,
94 				    .len = IOAM6_MAX_SCHEMA_DATA_LEN },
95 };
96 
97 static const struct nla_policy ioam6_genl_policy_delsc[] = {
98 	[IOAM6_ATTR_SC_ID]	= { .type = NLA_U32 },
99 };
100 
101 static const struct nla_policy ioam6_genl_policy_ns_sc[] = {
102 	[IOAM6_ATTR_NS_ID]	= { .type = NLA_U16 },
103 	[IOAM6_ATTR_SC_ID]	= { .type = NLA_U32 },
104 	[IOAM6_ATTR_SC_NONE]	= { .type = NLA_FLAG },
105 };
106 
107 static int ioam6_genl_addns(struct sk_buff *skb, struct genl_info *info)
108 {
109 	struct ioam6_pernet_data *nsdata;
110 	struct ioam6_namespace *ns;
111 	u64 data64;
112 	u32 data32;
113 	__be16 id;
114 	int err;
115 
116 	if (!info->attrs[IOAM6_ATTR_NS_ID])
117 		return -EINVAL;
118 
119 	id = cpu_to_be16(nla_get_u16(info->attrs[IOAM6_ATTR_NS_ID]));
120 	nsdata = ioam6_pernet(genl_info_net(info));
121 
122 	mutex_lock(&nsdata->lock);
123 
124 	ns = rhashtable_lookup_fast(&nsdata->namespaces, &id, rht_ns_params);
125 	if (ns) {
126 		err = -EEXIST;
127 		goto out_unlock;
128 	}
129 
130 	ns = kzalloc(sizeof(*ns), GFP_KERNEL);
131 	if (!ns) {
132 		err = -ENOMEM;
133 		goto out_unlock;
134 	}
135 
136 	ns->id = id;
137 
138 	if (!info->attrs[IOAM6_ATTR_NS_DATA])
139 		data32 = IOAM6_U32_UNAVAILABLE;
140 	else
141 		data32 = nla_get_u32(info->attrs[IOAM6_ATTR_NS_DATA]);
142 
143 	if (!info->attrs[IOAM6_ATTR_NS_DATA_WIDE])
144 		data64 = IOAM6_U64_UNAVAILABLE;
145 	else
146 		data64 = nla_get_u64(info->attrs[IOAM6_ATTR_NS_DATA_WIDE]);
147 
148 	ns->data = cpu_to_be32(data32);
149 	ns->data_wide = cpu_to_be64(data64);
150 
151 	err = rhashtable_lookup_insert_fast(&nsdata->namespaces, &ns->head,
152 					    rht_ns_params);
153 	if (err)
154 		kfree(ns);
155 
156 out_unlock:
157 	mutex_unlock(&nsdata->lock);
158 	return err;
159 }
160 
161 static int ioam6_genl_delns(struct sk_buff *skb, struct genl_info *info)
162 {
163 	struct ioam6_pernet_data *nsdata;
164 	struct ioam6_namespace *ns;
165 	struct ioam6_schema *sc;
166 	__be16 id;
167 	int err;
168 
169 	if (!info->attrs[IOAM6_ATTR_NS_ID])
170 		return -EINVAL;
171 
172 	id = cpu_to_be16(nla_get_u16(info->attrs[IOAM6_ATTR_NS_ID]));
173 	nsdata = ioam6_pernet(genl_info_net(info));
174 
175 	mutex_lock(&nsdata->lock);
176 
177 	ns = rhashtable_lookup_fast(&nsdata->namespaces, &id, rht_ns_params);
178 	if (!ns) {
179 		err = -ENOENT;
180 		goto out_unlock;
181 	}
182 
183 	sc = rcu_dereference_protected(ns->schema,
184 				       lockdep_is_held(&nsdata->lock));
185 
186 	err = rhashtable_remove_fast(&nsdata->namespaces, &ns->head,
187 				     rht_ns_params);
188 	if (err)
189 		goto out_unlock;
190 
191 	if (sc)
192 		rcu_assign_pointer(sc->ns, NULL);
193 
194 	ioam6_ns_release(ns);
195 
196 out_unlock:
197 	mutex_unlock(&nsdata->lock);
198 	return err;
199 }
200 
201 static int __ioam6_genl_dumpns_element(struct ioam6_namespace *ns,
202 				       u32 portid,
203 				       u32 seq,
204 				       u32 flags,
205 				       struct sk_buff *skb,
206 				       u8 cmd)
207 {
208 	struct ioam6_schema *sc;
209 	u64 data64;
210 	u32 data32;
211 	void *hdr;
212 
213 	hdr = genlmsg_put(skb, portid, seq, &ioam6_genl_family, flags, cmd);
214 	if (!hdr)
215 		return -ENOMEM;
216 
217 	data32 = be32_to_cpu(ns->data);
218 	data64 = be64_to_cpu(ns->data_wide);
219 
220 	if (nla_put_u16(skb, IOAM6_ATTR_NS_ID, be16_to_cpu(ns->id)) ||
221 	    (data32 != IOAM6_U32_UNAVAILABLE &&
222 	     nla_put_u32(skb, IOAM6_ATTR_NS_DATA, data32)) ||
223 	    (data64 != IOAM6_U64_UNAVAILABLE &&
224 	     nla_put_u64_64bit(skb, IOAM6_ATTR_NS_DATA_WIDE,
225 			       data64, IOAM6_ATTR_PAD)))
226 		goto nla_put_failure;
227 
228 	rcu_read_lock();
229 
230 	sc = rcu_dereference(ns->schema);
231 	if (sc && nla_put_u32(skb, IOAM6_ATTR_SC_ID, sc->id)) {
232 		rcu_read_unlock();
233 		goto nla_put_failure;
234 	}
235 
236 	rcu_read_unlock();
237 
238 	genlmsg_end(skb, hdr);
239 	return 0;
240 
241 nla_put_failure:
242 	genlmsg_cancel(skb, hdr);
243 	return -EMSGSIZE;
244 }
245 
246 static int ioam6_genl_dumpns_start(struct netlink_callback *cb)
247 {
248 	struct ioam6_pernet_data *nsdata = ioam6_pernet(sock_net(cb->skb->sk));
249 	struct rhashtable_iter *iter = (struct rhashtable_iter *)cb->args[0];
250 
251 	if (!iter) {
252 		iter = kmalloc(sizeof(*iter), GFP_KERNEL);
253 		if (!iter)
254 			return -ENOMEM;
255 
256 		cb->args[0] = (long)iter;
257 	}
258 
259 	rhashtable_walk_enter(&nsdata->namespaces, iter);
260 
261 	return 0;
262 }
263 
264 static int ioam6_genl_dumpns_done(struct netlink_callback *cb)
265 {
266 	struct rhashtable_iter *iter = (struct rhashtable_iter *)cb->args[0];
267 
268 	rhashtable_walk_exit(iter);
269 	kfree(iter);
270 
271 	return 0;
272 }
273 
274 static int ioam6_genl_dumpns(struct sk_buff *skb, struct netlink_callback *cb)
275 {
276 	struct rhashtable_iter *iter;
277 	struct ioam6_namespace *ns;
278 	int err;
279 
280 	iter = (struct rhashtable_iter *)cb->args[0];
281 	rhashtable_walk_start(iter);
282 
283 	for (;;) {
284 		ns = rhashtable_walk_next(iter);
285 
286 		if (IS_ERR(ns)) {
287 			if (PTR_ERR(ns) == -EAGAIN)
288 				continue;
289 			err = PTR_ERR(ns);
290 			goto done;
291 		} else if (!ns) {
292 			break;
293 		}
294 
295 		err = __ioam6_genl_dumpns_element(ns,
296 						  NETLINK_CB(cb->skb).portid,
297 						  cb->nlh->nlmsg_seq,
298 						  NLM_F_MULTI,
299 						  skb,
300 						  IOAM6_CMD_DUMP_NAMESPACES);
301 		if (err)
302 			goto done;
303 	}
304 
305 	err = skb->len;
306 
307 done:
308 	rhashtable_walk_stop(iter);
309 	return err;
310 }
311 
312 static int ioam6_genl_addsc(struct sk_buff *skb, struct genl_info *info)
313 {
314 	struct ioam6_pernet_data *nsdata;
315 	int len, len_aligned, err;
316 	struct ioam6_schema *sc;
317 	u32 id;
318 
319 	if (!info->attrs[IOAM6_ATTR_SC_ID] || !info->attrs[IOAM6_ATTR_SC_DATA])
320 		return -EINVAL;
321 
322 	id = nla_get_u32(info->attrs[IOAM6_ATTR_SC_ID]);
323 	nsdata = ioam6_pernet(genl_info_net(info));
324 
325 	mutex_lock(&nsdata->lock);
326 
327 	sc = rhashtable_lookup_fast(&nsdata->schemas, &id, rht_sc_params);
328 	if (sc) {
329 		err = -EEXIST;
330 		goto out_unlock;
331 	}
332 
333 	len = nla_len(info->attrs[IOAM6_ATTR_SC_DATA]);
334 	len_aligned = ALIGN(len, 4);
335 
336 	sc = kzalloc(sizeof(*sc) + len_aligned, GFP_KERNEL);
337 	if (!sc) {
338 		err = -ENOMEM;
339 		goto out_unlock;
340 	}
341 
342 	sc->id = id;
343 	sc->len = len_aligned;
344 	sc->hdr = cpu_to_be32(sc->id | ((u8)(sc->len / 4) << 24));
345 	nla_memcpy(sc->data, info->attrs[IOAM6_ATTR_SC_DATA], len);
346 
347 	err = rhashtable_lookup_insert_fast(&nsdata->schemas, &sc->head,
348 					    rht_sc_params);
349 	if (err)
350 		goto free_sc;
351 
352 out_unlock:
353 	mutex_unlock(&nsdata->lock);
354 	return err;
355 free_sc:
356 	kfree(sc);
357 	goto out_unlock;
358 }
359 
360 static int ioam6_genl_delsc(struct sk_buff *skb, struct genl_info *info)
361 {
362 	struct ioam6_pernet_data *nsdata;
363 	struct ioam6_namespace *ns;
364 	struct ioam6_schema *sc;
365 	int err;
366 	u32 id;
367 
368 	if (!info->attrs[IOAM6_ATTR_SC_ID])
369 		return -EINVAL;
370 
371 	id = nla_get_u32(info->attrs[IOAM6_ATTR_SC_ID]);
372 	nsdata = ioam6_pernet(genl_info_net(info));
373 
374 	mutex_lock(&nsdata->lock);
375 
376 	sc = rhashtable_lookup_fast(&nsdata->schemas, &id, rht_sc_params);
377 	if (!sc) {
378 		err = -ENOENT;
379 		goto out_unlock;
380 	}
381 
382 	ns = rcu_dereference_protected(sc->ns, lockdep_is_held(&nsdata->lock));
383 
384 	err = rhashtable_remove_fast(&nsdata->schemas, &sc->head,
385 				     rht_sc_params);
386 	if (err)
387 		goto out_unlock;
388 
389 	if (ns)
390 		rcu_assign_pointer(ns->schema, NULL);
391 
392 	ioam6_sc_release(sc);
393 
394 out_unlock:
395 	mutex_unlock(&nsdata->lock);
396 	return err;
397 }
398 
399 static int __ioam6_genl_dumpsc_element(struct ioam6_schema *sc,
400 				       u32 portid, u32 seq, u32 flags,
401 				       struct sk_buff *skb, u8 cmd)
402 {
403 	struct ioam6_namespace *ns;
404 	void *hdr;
405 
406 	hdr = genlmsg_put(skb, portid, seq, &ioam6_genl_family, flags, cmd);
407 	if (!hdr)
408 		return -ENOMEM;
409 
410 	if (nla_put_u32(skb, IOAM6_ATTR_SC_ID, sc->id) ||
411 	    nla_put(skb, IOAM6_ATTR_SC_DATA, sc->len, sc->data))
412 		goto nla_put_failure;
413 
414 	rcu_read_lock();
415 
416 	ns = rcu_dereference(sc->ns);
417 	if (ns && nla_put_u16(skb, IOAM6_ATTR_NS_ID, be16_to_cpu(ns->id))) {
418 		rcu_read_unlock();
419 		goto nla_put_failure;
420 	}
421 
422 	rcu_read_unlock();
423 
424 	genlmsg_end(skb, hdr);
425 	return 0;
426 
427 nla_put_failure:
428 	genlmsg_cancel(skb, hdr);
429 	return -EMSGSIZE;
430 }
431 
432 static int ioam6_genl_dumpsc_start(struct netlink_callback *cb)
433 {
434 	struct ioam6_pernet_data *nsdata = ioam6_pernet(sock_net(cb->skb->sk));
435 	struct rhashtable_iter *iter = (struct rhashtable_iter *)cb->args[0];
436 
437 	if (!iter) {
438 		iter = kmalloc(sizeof(*iter), GFP_KERNEL);
439 		if (!iter)
440 			return -ENOMEM;
441 
442 		cb->args[0] = (long)iter;
443 	}
444 
445 	rhashtable_walk_enter(&nsdata->schemas, iter);
446 
447 	return 0;
448 }
449 
450 static int ioam6_genl_dumpsc_done(struct netlink_callback *cb)
451 {
452 	struct rhashtable_iter *iter = (struct rhashtable_iter *)cb->args[0];
453 
454 	rhashtable_walk_exit(iter);
455 	kfree(iter);
456 
457 	return 0;
458 }
459 
460 static int ioam6_genl_dumpsc(struct sk_buff *skb, struct netlink_callback *cb)
461 {
462 	struct rhashtable_iter *iter;
463 	struct ioam6_schema *sc;
464 	int err;
465 
466 	iter = (struct rhashtable_iter *)cb->args[0];
467 	rhashtable_walk_start(iter);
468 
469 	for (;;) {
470 		sc = rhashtable_walk_next(iter);
471 
472 		if (IS_ERR(sc)) {
473 			if (PTR_ERR(sc) == -EAGAIN)
474 				continue;
475 			err = PTR_ERR(sc);
476 			goto done;
477 		} else if (!sc) {
478 			break;
479 		}
480 
481 		err = __ioam6_genl_dumpsc_element(sc,
482 						  NETLINK_CB(cb->skb).portid,
483 						  cb->nlh->nlmsg_seq,
484 						  NLM_F_MULTI,
485 						  skb,
486 						  IOAM6_CMD_DUMP_SCHEMAS);
487 		if (err)
488 			goto done;
489 	}
490 
491 	err = skb->len;
492 
493 done:
494 	rhashtable_walk_stop(iter);
495 	return err;
496 }
497 
498 static int ioam6_genl_ns_set_schema(struct sk_buff *skb, struct genl_info *info)
499 {
500 	struct ioam6_namespace *ns, *ns_ref;
501 	struct ioam6_schema *sc, *sc_ref;
502 	struct ioam6_pernet_data *nsdata;
503 	__be16 ns_id;
504 	u32 sc_id;
505 	int err;
506 
507 	if (!info->attrs[IOAM6_ATTR_NS_ID] ||
508 	    (!info->attrs[IOAM6_ATTR_SC_ID] &&
509 	     !info->attrs[IOAM6_ATTR_SC_NONE]))
510 		return -EINVAL;
511 
512 	ns_id = cpu_to_be16(nla_get_u16(info->attrs[IOAM6_ATTR_NS_ID]));
513 	nsdata = ioam6_pernet(genl_info_net(info));
514 
515 	mutex_lock(&nsdata->lock);
516 
517 	ns = rhashtable_lookup_fast(&nsdata->namespaces, &ns_id, rht_ns_params);
518 	if (!ns) {
519 		err = -ENOENT;
520 		goto out_unlock;
521 	}
522 
523 	if (info->attrs[IOAM6_ATTR_SC_NONE]) {
524 		sc = NULL;
525 	} else {
526 		sc_id = nla_get_u32(info->attrs[IOAM6_ATTR_SC_ID]);
527 		sc = rhashtable_lookup_fast(&nsdata->schemas, &sc_id,
528 					    rht_sc_params);
529 		if (!sc) {
530 			err = -ENOENT;
531 			goto out_unlock;
532 		}
533 	}
534 
535 	sc_ref = rcu_dereference_protected(ns->schema,
536 					   lockdep_is_held(&nsdata->lock));
537 	if (sc_ref)
538 		rcu_assign_pointer(sc_ref->ns, NULL);
539 	rcu_assign_pointer(ns->schema, sc);
540 
541 	if (sc) {
542 		ns_ref = rcu_dereference_protected(sc->ns,
543 						   lockdep_is_held(&nsdata->lock));
544 		if (ns_ref)
545 			rcu_assign_pointer(ns_ref->schema, NULL);
546 		rcu_assign_pointer(sc->ns, ns);
547 	}
548 
549 	err = 0;
550 
551 out_unlock:
552 	mutex_unlock(&nsdata->lock);
553 	return err;
554 }
555 
556 static const struct genl_ops ioam6_genl_ops[] = {
557 	{
558 		.cmd	= IOAM6_CMD_ADD_NAMESPACE,
559 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
560 		.doit	= ioam6_genl_addns,
561 		.flags	= GENL_ADMIN_PERM,
562 		.policy	= ioam6_genl_policy_addns,
563 		.maxattr = ARRAY_SIZE(ioam6_genl_policy_addns) - 1,
564 	},
565 	{
566 		.cmd	= IOAM6_CMD_DEL_NAMESPACE,
567 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
568 		.doit	= ioam6_genl_delns,
569 		.flags	= GENL_ADMIN_PERM,
570 		.policy	= ioam6_genl_policy_delns,
571 		.maxattr = ARRAY_SIZE(ioam6_genl_policy_delns) - 1,
572 	},
573 	{
574 		.cmd	= IOAM6_CMD_DUMP_NAMESPACES,
575 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
576 		.start	= ioam6_genl_dumpns_start,
577 		.dumpit	= ioam6_genl_dumpns,
578 		.done	= ioam6_genl_dumpns_done,
579 		.flags	= GENL_ADMIN_PERM,
580 	},
581 	{
582 		.cmd	= IOAM6_CMD_ADD_SCHEMA,
583 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
584 		.doit	= ioam6_genl_addsc,
585 		.flags	= GENL_ADMIN_PERM,
586 		.policy	= ioam6_genl_policy_addsc,
587 		.maxattr = ARRAY_SIZE(ioam6_genl_policy_addsc) - 1,
588 	},
589 	{
590 		.cmd	= IOAM6_CMD_DEL_SCHEMA,
591 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
592 		.doit	= ioam6_genl_delsc,
593 		.flags	= GENL_ADMIN_PERM,
594 		.policy	= ioam6_genl_policy_delsc,
595 		.maxattr = ARRAY_SIZE(ioam6_genl_policy_delsc) - 1,
596 	},
597 	{
598 		.cmd	= IOAM6_CMD_DUMP_SCHEMAS,
599 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
600 		.start	= ioam6_genl_dumpsc_start,
601 		.dumpit	= ioam6_genl_dumpsc,
602 		.done	= ioam6_genl_dumpsc_done,
603 		.flags	= GENL_ADMIN_PERM,
604 	},
605 	{
606 		.cmd	= IOAM6_CMD_NS_SET_SCHEMA,
607 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
608 		.doit	= ioam6_genl_ns_set_schema,
609 		.flags	= GENL_ADMIN_PERM,
610 		.policy	= ioam6_genl_policy_ns_sc,
611 		.maxattr = ARRAY_SIZE(ioam6_genl_policy_ns_sc) - 1,
612 	},
613 };
614 
615 #define IOAM6_GENL_EV_GRP_OFFSET 0
616 
617 static const struct genl_multicast_group ioam6_mcgrps[] = {
618 	[IOAM6_GENL_EV_GRP_OFFSET] = { .name = IOAM6_GENL_EV_GRP_NAME,
619 				       .flags = GENL_MCAST_CAP_NET_ADMIN },
620 };
621 
622 static int ioam6_event_put_trace(struct sk_buff *skb,
623 				 struct ioam6_trace_hdr *trace,
624 				 unsigned int len)
625 {
626 	if (nla_put_u16(skb, IOAM6_EVENT_ATTR_TRACE_NAMESPACE,
627 			be16_to_cpu(trace->namespace_id)) ||
628 	    nla_put_u8(skb, IOAM6_EVENT_ATTR_TRACE_NODELEN, trace->nodelen) ||
629 	    nla_put_u32(skb, IOAM6_EVENT_ATTR_TRACE_TYPE,
630 			be32_to_cpu(trace->type_be32)) ||
631 	    nla_put(skb, IOAM6_EVENT_ATTR_TRACE_DATA,
632 		    len - sizeof(struct ioam6_trace_hdr) - trace->remlen * 4,
633 		    trace->data + trace->remlen * 4))
634 		return 1;
635 
636 	return 0;
637 }
638 
639 void ioam6_event(enum ioam6_event_type type, struct net *net, gfp_t gfp,
640 		 void *opt, unsigned int opt_len)
641 {
642 	struct nlmsghdr *nlh;
643 	struct sk_buff *skb;
644 
645 	if (!genl_has_listeners(&ioam6_genl_family, net,
646 				IOAM6_GENL_EV_GRP_OFFSET))
647 		return;
648 
649 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
650 	if (!skb)
651 		return;
652 
653 	nlh = genlmsg_put(skb, 0, 0, &ioam6_genl_family, 0, type);
654 	if (!nlh)
655 		goto nla_put_failure;
656 
657 	switch (type) {
658 	case IOAM6_EVENT_UNSPEC:
659 		WARN_ON_ONCE(1);
660 		break;
661 	case IOAM6_EVENT_TRACE:
662 		if (ioam6_event_put_trace(skb, (struct ioam6_trace_hdr *)opt,
663 					  opt_len))
664 			goto nla_put_failure;
665 		break;
666 	}
667 
668 	genlmsg_end(skb, nlh);
669 	genlmsg_multicast_netns(&ioam6_genl_family, net, skb, 0,
670 				IOAM6_GENL_EV_GRP_OFFSET, gfp);
671 	return;
672 
673 nla_put_failure:
674 	nlmsg_free(skb);
675 }
676 
677 static struct genl_family ioam6_genl_family __ro_after_init = {
678 	.name		= IOAM6_GENL_NAME,
679 	.version	= IOAM6_GENL_VERSION,
680 	.netnsok	= true,
681 	.parallel_ops	= true,
682 	.ops		= ioam6_genl_ops,
683 	.n_ops		= ARRAY_SIZE(ioam6_genl_ops),
684 	.resv_start_op	= IOAM6_CMD_NS_SET_SCHEMA + 1,
685 	.mcgrps		= ioam6_mcgrps,
686 	.n_mcgrps	= ARRAY_SIZE(ioam6_mcgrps),
687 	.module		= THIS_MODULE,
688 };
689 
690 struct ioam6_namespace *ioam6_namespace(struct net *net, __be16 id)
691 {
692 	struct ioam6_pernet_data *nsdata = ioam6_pernet(net);
693 
694 	return rhashtable_lookup_fast(&nsdata->namespaces, &id, rht_ns_params);
695 }
696 
697 static void __ioam6_fill_trace_data(struct sk_buff *skb,
698 				    struct ioam6_namespace *ns,
699 				    struct ioam6_trace_hdr *trace,
700 				    struct ioam6_schema *sc,
701 				    u8 sclen, bool is_input)
702 {
703 	struct timespec64 ts;
704 	ktime_t tstamp;
705 	u64 raw64;
706 	u32 raw32;
707 	u16 raw16;
708 	u8 *data;
709 	u8 byte;
710 
711 	data = trace->data + trace->remlen * 4 - trace->nodelen * 4 - sclen * 4;
712 
713 	/* hop_lim and node_id */
714 	if (trace->type.bit0) {
715 		byte = ipv6_hdr(skb)->hop_limit;
716 		if (is_input)
717 			byte--;
718 
719 		raw32 = dev_net(skb_dst(skb)->dev)->ipv6.sysctl.ioam6_id;
720 
721 		*(__be32 *)data = cpu_to_be32((byte << 24) | raw32);
722 		data += sizeof(__be32);
723 	}
724 
725 	/* ingress_if_id and egress_if_id */
726 	if (trace->type.bit1) {
727 		if (!skb->dev)
728 			raw16 = IOAM6_U16_UNAVAILABLE;
729 		else
730 			raw16 = (__force u16)READ_ONCE(__in6_dev_get(skb->dev)->cnf.ioam6_id);
731 
732 		*(__be16 *)data = cpu_to_be16(raw16);
733 		data += sizeof(__be16);
734 
735 		if (skb_dst(skb)->dev->flags & IFF_LOOPBACK)
736 			raw16 = IOAM6_U16_UNAVAILABLE;
737 		else
738 			raw16 = (__force u16)READ_ONCE(__in6_dev_get(skb_dst(skb)->dev)->cnf.ioam6_id);
739 
740 		*(__be16 *)data = cpu_to_be16(raw16);
741 		data += sizeof(__be16);
742 	}
743 
744 	/* timestamp seconds */
745 	if (trace->type.bit2) {
746 		if (!skb->dev) {
747 			*(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
748 		} else {
749 			tstamp = skb_tstamp_cond(skb, true);
750 			ts = ktime_to_timespec64(tstamp);
751 
752 			*(__be32 *)data = cpu_to_be32((u32)ts.tv_sec);
753 		}
754 		data += sizeof(__be32);
755 	}
756 
757 	/* timestamp subseconds */
758 	if (trace->type.bit3) {
759 		if (!skb->dev) {
760 			*(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
761 		} else {
762 			if (!trace->type.bit2) {
763 				tstamp = skb_tstamp_cond(skb, true);
764 				ts = ktime_to_timespec64(tstamp);
765 			}
766 
767 			*(__be32 *)data = cpu_to_be32((u32)(ts.tv_nsec / NSEC_PER_USEC));
768 		}
769 		data += sizeof(__be32);
770 	}
771 
772 	/* transit delay */
773 	if (trace->type.bit4) {
774 		*(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
775 		data += sizeof(__be32);
776 	}
777 
778 	/* namespace data */
779 	if (trace->type.bit5) {
780 		*(__be32 *)data = ns->data;
781 		data += sizeof(__be32);
782 	}
783 
784 	/* queue depth */
785 	if (trace->type.bit6) {
786 		struct netdev_queue *queue;
787 		struct Qdisc *qdisc;
788 		__u32 qlen, backlog;
789 
790 		if (skb_dst(skb)->dev->flags & IFF_LOOPBACK) {
791 			*(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
792 		} else {
793 			queue = skb_get_tx_queue(skb_dst(skb)->dev, skb);
794 			qdisc = rcu_dereference(queue->qdisc);
795 			qdisc_qstats_qlen_backlog(qdisc, &qlen, &backlog);
796 
797 			*(__be32 *)data = cpu_to_be32(backlog);
798 		}
799 		data += sizeof(__be32);
800 	}
801 
802 	/* checksum complement */
803 	if (trace->type.bit7) {
804 		*(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
805 		data += sizeof(__be32);
806 	}
807 
808 	/* hop_lim and node_id (wide) */
809 	if (trace->type.bit8) {
810 		byte = ipv6_hdr(skb)->hop_limit;
811 		if (is_input)
812 			byte--;
813 
814 		raw64 = dev_net(skb_dst(skb)->dev)->ipv6.sysctl.ioam6_id_wide;
815 
816 		*(__be64 *)data = cpu_to_be64(((u64)byte << 56) | raw64);
817 		data += sizeof(__be64);
818 	}
819 
820 	/* ingress_if_id and egress_if_id (wide) */
821 	if (trace->type.bit9) {
822 		if (!skb->dev)
823 			raw32 = IOAM6_U32_UNAVAILABLE;
824 		else
825 			raw32 = READ_ONCE(__in6_dev_get(skb->dev)->cnf.ioam6_id_wide);
826 
827 		*(__be32 *)data = cpu_to_be32(raw32);
828 		data += sizeof(__be32);
829 
830 		if (skb_dst(skb)->dev->flags & IFF_LOOPBACK)
831 			raw32 = IOAM6_U32_UNAVAILABLE;
832 		else
833 			raw32 = READ_ONCE(__in6_dev_get(skb_dst(skb)->dev)->cnf.ioam6_id_wide);
834 
835 		*(__be32 *)data = cpu_to_be32(raw32);
836 		data += sizeof(__be32);
837 	}
838 
839 	/* namespace data (wide) */
840 	if (trace->type.bit10) {
841 		*(__be64 *)data = ns->data_wide;
842 		data += sizeof(__be64);
843 	}
844 
845 	/* buffer occupancy */
846 	if (trace->type.bit11) {
847 		*(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
848 		data += sizeof(__be32);
849 	}
850 
851 	/* bit12 undefined: filled with empty value */
852 	if (trace->type.bit12) {
853 		*(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
854 		data += sizeof(__be32);
855 	}
856 
857 	/* bit13 undefined: filled with empty value */
858 	if (trace->type.bit13) {
859 		*(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
860 		data += sizeof(__be32);
861 	}
862 
863 	/* bit14 undefined: filled with empty value */
864 	if (trace->type.bit14) {
865 		*(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
866 		data += sizeof(__be32);
867 	}
868 
869 	/* bit15 undefined: filled with empty value */
870 	if (trace->type.bit15) {
871 		*(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
872 		data += sizeof(__be32);
873 	}
874 
875 	/* bit16 undefined: filled with empty value */
876 	if (trace->type.bit16) {
877 		*(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
878 		data += sizeof(__be32);
879 	}
880 
881 	/* bit17 undefined: filled with empty value */
882 	if (trace->type.bit17) {
883 		*(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
884 		data += sizeof(__be32);
885 	}
886 
887 	/* bit18 undefined: filled with empty value */
888 	if (trace->type.bit18) {
889 		*(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
890 		data += sizeof(__be32);
891 	}
892 
893 	/* bit19 undefined: filled with empty value */
894 	if (trace->type.bit19) {
895 		*(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
896 		data += sizeof(__be32);
897 	}
898 
899 	/* bit20 undefined: filled with empty value */
900 	if (trace->type.bit20) {
901 		*(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
902 		data += sizeof(__be32);
903 	}
904 
905 	/* bit21 undefined: filled with empty value */
906 	if (trace->type.bit21) {
907 		*(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
908 		data += sizeof(__be32);
909 	}
910 
911 	/* opaque state snapshot */
912 	if (trace->type.bit22) {
913 		if (!sc) {
914 			*(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE >> 8);
915 		} else {
916 			*(__be32 *)data = sc->hdr;
917 			data += sizeof(__be32);
918 
919 			memcpy(data, sc->data, sc->len);
920 		}
921 	}
922 }
923 
924 /* called with rcu_read_lock() */
925 void ioam6_fill_trace_data(struct sk_buff *skb,
926 			   struct ioam6_namespace *ns,
927 			   struct ioam6_trace_hdr *trace,
928 			   bool is_input)
929 {
930 	struct ioam6_schema *sc;
931 	u8 sclen = 0;
932 
933 	/* Skip if Overflow flag is set
934 	 */
935 	if (trace->overflow)
936 		return;
937 
938 	/* NodeLen does not include Opaque State Snapshot length. We need to
939 	 * take it into account if the corresponding bit is set (bit 22) and
940 	 * if the current IOAM namespace has an active schema attached to it
941 	 */
942 	sc = rcu_dereference(ns->schema);
943 	if (trace->type.bit22) {
944 		sclen = sizeof_field(struct ioam6_schema, hdr) / 4;
945 
946 		if (sc)
947 			sclen += sc->len / 4;
948 	}
949 
950 	/* If there is no space remaining, we set the Overflow flag and we
951 	 * skip without filling the trace
952 	 */
953 	if (!trace->remlen || trace->remlen < trace->nodelen + sclen) {
954 		trace->overflow = 1;
955 		return;
956 	}
957 
958 	__ioam6_fill_trace_data(skb, ns, trace, sc, sclen, is_input);
959 	trace->remlen -= trace->nodelen + sclen;
960 }
961 
962 static int __net_init ioam6_net_init(struct net *net)
963 {
964 	struct ioam6_pernet_data *nsdata;
965 	int err = -ENOMEM;
966 
967 	nsdata = kzalloc(sizeof(*nsdata), GFP_KERNEL);
968 	if (!nsdata)
969 		goto out;
970 
971 	mutex_init(&nsdata->lock);
972 	net->ipv6.ioam6_data = nsdata;
973 
974 	err = rhashtable_init(&nsdata->namespaces, &rht_ns_params);
975 	if (err)
976 		goto free_nsdata;
977 
978 	err = rhashtable_init(&nsdata->schemas, &rht_sc_params);
979 	if (err)
980 		goto free_rht_ns;
981 
982 out:
983 	return err;
984 free_rht_ns:
985 	rhashtable_destroy(&nsdata->namespaces);
986 free_nsdata:
987 	kfree(nsdata);
988 	net->ipv6.ioam6_data = NULL;
989 	goto out;
990 }
991 
992 static void __net_exit ioam6_net_exit(struct net *net)
993 {
994 	struct ioam6_pernet_data *nsdata = ioam6_pernet(net);
995 
996 	rhashtable_free_and_destroy(&nsdata->namespaces, ioam6_free_ns, NULL);
997 	rhashtable_free_and_destroy(&nsdata->schemas, ioam6_free_sc, NULL);
998 
999 	kfree(nsdata);
1000 }
1001 
1002 static struct pernet_operations ioam6_net_ops = {
1003 	.init = ioam6_net_init,
1004 	.exit = ioam6_net_exit,
1005 };
1006 
1007 int __init ioam6_init(void)
1008 {
1009 	int err = register_pernet_subsys(&ioam6_net_ops);
1010 	if (err)
1011 		goto out;
1012 
1013 	err = genl_register_family(&ioam6_genl_family);
1014 	if (err)
1015 		goto out_unregister_pernet_subsys;
1016 
1017 #ifdef CONFIG_IPV6_IOAM6_LWTUNNEL
1018 	err = ioam6_iptunnel_init();
1019 	if (err)
1020 		goto out_unregister_genl;
1021 #endif
1022 
1023 	pr_info("In-situ OAM (IOAM) with IPv6\n");
1024 
1025 out:
1026 	return err;
1027 #ifdef CONFIG_IPV6_IOAM6_LWTUNNEL
1028 out_unregister_genl:
1029 	genl_unregister_family(&ioam6_genl_family);
1030 #endif
1031 out_unregister_pernet_subsys:
1032 	unregister_pernet_subsys(&ioam6_net_ops);
1033 	goto out;
1034 }
1035 
1036 void ioam6_exit(void)
1037 {
1038 #ifdef CONFIG_IPV6_IOAM6_LWTUNNEL
1039 	ioam6_iptunnel_exit();
1040 #endif
1041 	genl_unregister_family(&ioam6_genl_family);
1042 	unregister_pernet_subsys(&ioam6_net_ops);
1043 }
1044