xref: /linux/net/netfilter/nft_cmp.c (revision 9410645520e9b820069761f3450ef6661418e279)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
4  *
5  * Development of this code funded by Astaro AG (http://www.astaro.com/)
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/netlink.h>
12 #include <linux/netfilter.h>
13 #include <linux/if_arp.h>
14 #include <linux/netfilter/nf_tables.h>
15 #include <net/netfilter/nf_tables_core.h>
16 #include <net/netfilter/nf_tables_offload.h>
17 #include <net/netfilter/nf_tables.h>
18 
19 struct nft_cmp_expr {
20 	struct nft_data		data;
21 	u8			sreg;
22 	u8			len;
23 	enum nft_cmp_ops	op:8;
24 };
25 
nft_cmp_eval(const struct nft_expr * expr,struct nft_regs * regs,const struct nft_pktinfo * pkt)26 void nft_cmp_eval(const struct nft_expr *expr,
27 		  struct nft_regs *regs,
28 		  const struct nft_pktinfo *pkt)
29 {
30 	const struct nft_cmp_expr *priv = nft_expr_priv(expr);
31 	int d;
32 
33 	d = memcmp(&regs->data[priv->sreg], &priv->data, priv->len);
34 	switch (priv->op) {
35 	case NFT_CMP_EQ:
36 		if (d != 0)
37 			goto mismatch;
38 		break;
39 	case NFT_CMP_NEQ:
40 		if (d == 0)
41 			goto mismatch;
42 		break;
43 	case NFT_CMP_LT:
44 		if (d == 0)
45 			goto mismatch;
46 		fallthrough;
47 	case NFT_CMP_LTE:
48 		if (d > 0)
49 			goto mismatch;
50 		break;
51 	case NFT_CMP_GT:
52 		if (d == 0)
53 			goto mismatch;
54 		fallthrough;
55 	case NFT_CMP_GTE:
56 		if (d < 0)
57 			goto mismatch;
58 		break;
59 	}
60 	return;
61 
62 mismatch:
63 	regs->verdict.code = NFT_BREAK;
64 }
65 
66 static const struct nla_policy nft_cmp_policy[NFTA_CMP_MAX + 1] = {
67 	[NFTA_CMP_SREG]		= { .type = NLA_U32 },
68 	[NFTA_CMP_OP]		= { .type = NLA_U32 },
69 	[NFTA_CMP_DATA]		= { .type = NLA_NESTED },
70 };
71 
nft_cmp_init(const struct nft_ctx * ctx,const struct nft_expr * expr,const struct nlattr * const tb[])72 static int nft_cmp_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
73 			const struct nlattr * const tb[])
74 {
75 	struct nft_cmp_expr *priv = nft_expr_priv(expr);
76 	struct nft_data_desc desc = {
77 		.type	= NFT_DATA_VALUE,
78 		.size	= sizeof(priv->data),
79 	};
80 	int err;
81 
82 	err = nft_data_init(NULL, &priv->data, &desc, tb[NFTA_CMP_DATA]);
83 	if (err < 0)
84 		return err;
85 
86 	err = nft_parse_register_load(ctx, tb[NFTA_CMP_SREG], &priv->sreg, desc.len);
87 	if (err < 0)
88 		return err;
89 
90 	priv->op  = ntohl(nla_get_be32(tb[NFTA_CMP_OP]));
91 	priv->len = desc.len;
92 	return 0;
93 }
94 
nft_cmp_dump(struct sk_buff * skb,const struct nft_expr * expr,bool reset)95 static int nft_cmp_dump(struct sk_buff *skb,
96 			const struct nft_expr *expr, bool reset)
97 {
98 	const struct nft_cmp_expr *priv = nft_expr_priv(expr);
99 
100 	if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg))
101 		goto nla_put_failure;
102 	if (nla_put_be32(skb, NFTA_CMP_OP, htonl(priv->op)))
103 		goto nla_put_failure;
104 
105 	if (nft_data_dump(skb, NFTA_CMP_DATA, &priv->data,
106 			  NFT_DATA_VALUE, priv->len) < 0)
107 		goto nla_put_failure;
108 	return 0;
109 
110 nla_put_failure:
111 	return -1;
112 }
113 
114 union nft_cmp_offload_data {
115 	u16	val16;
116 	u32	val32;
117 	u64	val64;
118 };
119 
nft_payload_n2h(union nft_cmp_offload_data * data,const u8 * val,u32 len)120 static void nft_payload_n2h(union nft_cmp_offload_data *data,
121 			    const u8 *val, u32 len)
122 {
123 	switch (len) {
124 	case 2:
125 		data->val16 = ntohs(*((__be16 *)val));
126 		break;
127 	case 4:
128 		data->val32 = ntohl(*((__be32 *)val));
129 		break;
130 	case 8:
131 		data->val64 = be64_to_cpu(*((__be64 *)val));
132 		break;
133 	default:
134 		WARN_ON_ONCE(1);
135 		break;
136 	}
137 }
138 
__nft_cmp_offload(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_cmp_expr * priv)139 static int __nft_cmp_offload(struct nft_offload_ctx *ctx,
140 			     struct nft_flow_rule *flow,
141 			     const struct nft_cmp_expr *priv)
142 {
143 	struct nft_offload_reg *reg = &ctx->regs[priv->sreg];
144 	union nft_cmp_offload_data _data, _datamask;
145 	u8 *mask = (u8 *)&flow->match.mask;
146 	u8 *key = (u8 *)&flow->match.key;
147 	u8 *data, *datamask;
148 
149 	if (priv->op != NFT_CMP_EQ || priv->len > reg->len)
150 		return -EOPNOTSUPP;
151 
152 	if (reg->flags & NFT_OFFLOAD_F_NETWORK2HOST) {
153 		nft_payload_n2h(&_data, (u8 *)&priv->data, reg->len);
154 		nft_payload_n2h(&_datamask, (u8 *)&reg->mask, reg->len);
155 		data = (u8 *)&_data;
156 		datamask = (u8 *)&_datamask;
157 	} else {
158 		data = (u8 *)&priv->data;
159 		datamask = (u8 *)&reg->mask;
160 	}
161 
162 	memcpy(key + reg->offset, data, reg->len);
163 	memcpy(mask + reg->offset, datamask, reg->len);
164 
165 	flow->match.dissector.used_keys |= BIT_ULL(reg->key);
166 	flow->match.dissector.offset[reg->key] = reg->base_offset;
167 
168 	if (reg->key == FLOW_DISSECTOR_KEY_META &&
169 	    reg->offset == offsetof(struct nft_flow_key, meta.ingress_iftype) &&
170 	    nft_reg_load16(priv->data.data) != ARPHRD_ETHER)
171 		return -EOPNOTSUPP;
172 
173 	nft_offload_update_dependency(ctx, &priv->data, reg->len);
174 
175 	return 0;
176 }
177 
nft_cmp_offload(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_expr * expr)178 static int nft_cmp_offload(struct nft_offload_ctx *ctx,
179 			   struct nft_flow_rule *flow,
180 			   const struct nft_expr *expr)
181 {
182 	const struct nft_cmp_expr *priv = nft_expr_priv(expr);
183 
184 	return __nft_cmp_offload(ctx, flow, priv);
185 }
186 
187 static const struct nft_expr_ops nft_cmp_ops = {
188 	.type		= &nft_cmp_type,
189 	.size		= NFT_EXPR_SIZE(sizeof(struct nft_cmp_expr)),
190 	.eval		= nft_cmp_eval,
191 	.init		= nft_cmp_init,
192 	.dump		= nft_cmp_dump,
193 	.reduce		= NFT_REDUCE_READONLY,
194 	.offload	= nft_cmp_offload,
195 };
196 
197 /* Calculate the mask for the nft_cmp_fast expression. On big endian the
198  * mask needs to include the *upper* bytes when interpreting that data as
199  * something smaller than the full u32, therefore a cpu_to_le32 is done.
200  */
nft_cmp_fast_mask(unsigned int len)201 static u32 nft_cmp_fast_mask(unsigned int len)
202 {
203 	__le32 mask = cpu_to_le32(~0U >> (sizeof_field(struct nft_cmp_fast_expr,
204 					  data) * BITS_PER_BYTE - len));
205 
206 	return (__force u32)mask;
207 }
208 
nft_cmp_fast_init(const struct nft_ctx * ctx,const struct nft_expr * expr,const struct nlattr * const tb[])209 static int nft_cmp_fast_init(const struct nft_ctx *ctx,
210 			     const struct nft_expr *expr,
211 			     const struct nlattr * const tb[])
212 {
213 	struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
214 	struct nft_data data;
215 	struct nft_data_desc desc = {
216 		.type	= NFT_DATA_VALUE,
217 		.size	= sizeof(data),
218 	};
219 	int err;
220 
221 	err = nft_data_init(NULL, &data, &desc, tb[NFTA_CMP_DATA]);
222 	if (err < 0)
223 		return err;
224 
225 	err = nft_parse_register_load(ctx, tb[NFTA_CMP_SREG], &priv->sreg, desc.len);
226 	if (err < 0)
227 		return err;
228 
229 	desc.len *= BITS_PER_BYTE;
230 
231 	priv->mask = nft_cmp_fast_mask(desc.len);
232 	priv->data = data.data[0] & priv->mask;
233 	priv->len  = desc.len;
234 	priv->inv  = ntohl(nla_get_be32(tb[NFTA_CMP_OP])) != NFT_CMP_EQ;
235 	return 0;
236 }
237 
nft_cmp_fast_offload(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_expr * expr)238 static int nft_cmp_fast_offload(struct nft_offload_ctx *ctx,
239 				struct nft_flow_rule *flow,
240 				const struct nft_expr *expr)
241 {
242 	const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
243 	struct nft_cmp_expr cmp = {
244 		.data	= {
245 			.data	= {
246 				[0] = priv->data,
247 			},
248 		},
249 		.sreg	= priv->sreg,
250 		.len	= priv->len / BITS_PER_BYTE,
251 		.op	= priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ,
252 	};
253 
254 	return __nft_cmp_offload(ctx, flow, &cmp);
255 }
256 
nft_cmp_fast_dump(struct sk_buff * skb,const struct nft_expr * expr,bool reset)257 static int nft_cmp_fast_dump(struct sk_buff *skb,
258 			     const struct nft_expr *expr, bool reset)
259 {
260 	const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
261 	enum nft_cmp_ops op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ;
262 	struct nft_data data;
263 
264 	if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg))
265 		goto nla_put_failure;
266 	if (nla_put_be32(skb, NFTA_CMP_OP, htonl(op)))
267 		goto nla_put_failure;
268 
269 	data.data[0] = priv->data;
270 	if (nft_data_dump(skb, NFTA_CMP_DATA, &data,
271 			  NFT_DATA_VALUE, priv->len / BITS_PER_BYTE) < 0)
272 		goto nla_put_failure;
273 	return 0;
274 
275 nla_put_failure:
276 	return -1;
277 }
278 
279 const struct nft_expr_ops nft_cmp_fast_ops = {
280 	.type		= &nft_cmp_type,
281 	.size		= NFT_EXPR_SIZE(sizeof(struct nft_cmp_fast_expr)),
282 	.eval		= NULL,	/* inlined */
283 	.init		= nft_cmp_fast_init,
284 	.dump		= nft_cmp_fast_dump,
285 	.reduce		= NFT_REDUCE_READONLY,
286 	.offload	= nft_cmp_fast_offload,
287 };
288 
nft_cmp_mask(u32 bitlen)289 static u32 nft_cmp_mask(u32 bitlen)
290 {
291 	return (__force u32)cpu_to_le32(~0U >> (sizeof(u32) * BITS_PER_BYTE - bitlen));
292 }
293 
nft_cmp16_fast_mask(struct nft_data * data,unsigned int bitlen)294 static void nft_cmp16_fast_mask(struct nft_data *data, unsigned int bitlen)
295 {
296 	int len = bitlen / BITS_PER_BYTE;
297 	int i, words = len / sizeof(u32);
298 
299 	for (i = 0; i < words; i++) {
300 		data->data[i] = 0xffffffff;
301 		bitlen -= sizeof(u32) * BITS_PER_BYTE;
302 	}
303 
304 	if (len % sizeof(u32))
305 		data->data[i++] = nft_cmp_mask(bitlen);
306 
307 	for (; i < 4; i++)
308 		data->data[i] = 0;
309 }
310 
nft_cmp16_fast_init(const struct nft_ctx * ctx,const struct nft_expr * expr,const struct nlattr * const tb[])311 static int nft_cmp16_fast_init(const struct nft_ctx *ctx,
312 			       const struct nft_expr *expr,
313 			       const struct nlattr * const tb[])
314 {
315 	struct nft_cmp16_fast_expr *priv = nft_expr_priv(expr);
316 	struct nft_data_desc desc = {
317 		.type	= NFT_DATA_VALUE,
318 		.size	= sizeof(priv->data),
319 	};
320 	int err;
321 
322 	err = nft_data_init(NULL, &priv->data, &desc, tb[NFTA_CMP_DATA]);
323 	if (err < 0)
324 		return err;
325 
326 	err = nft_parse_register_load(ctx, tb[NFTA_CMP_SREG], &priv->sreg, desc.len);
327 	if (err < 0)
328 		return err;
329 
330 	nft_cmp16_fast_mask(&priv->mask, desc.len * BITS_PER_BYTE);
331 	priv->inv = ntohl(nla_get_be32(tb[NFTA_CMP_OP])) != NFT_CMP_EQ;
332 	priv->len = desc.len;
333 
334 	return 0;
335 }
336 
nft_cmp16_fast_offload(struct nft_offload_ctx * ctx,struct nft_flow_rule * flow,const struct nft_expr * expr)337 static int nft_cmp16_fast_offload(struct nft_offload_ctx *ctx,
338 				  struct nft_flow_rule *flow,
339 				  const struct nft_expr *expr)
340 {
341 	const struct nft_cmp16_fast_expr *priv = nft_expr_priv(expr);
342 	struct nft_cmp_expr cmp = {
343 		.data	= priv->data,
344 		.sreg	= priv->sreg,
345 		.len	= priv->len,
346 		.op	= priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ,
347 	};
348 
349 	return __nft_cmp_offload(ctx, flow, &cmp);
350 }
351 
nft_cmp16_fast_dump(struct sk_buff * skb,const struct nft_expr * expr,bool reset)352 static int nft_cmp16_fast_dump(struct sk_buff *skb,
353 			       const struct nft_expr *expr, bool reset)
354 {
355 	const struct nft_cmp16_fast_expr *priv = nft_expr_priv(expr);
356 	enum nft_cmp_ops op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ;
357 
358 	if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg))
359 		goto nla_put_failure;
360 	if (nla_put_be32(skb, NFTA_CMP_OP, htonl(op)))
361 		goto nla_put_failure;
362 
363 	if (nft_data_dump(skb, NFTA_CMP_DATA, &priv->data,
364 			  NFT_DATA_VALUE, priv->len) < 0)
365 		goto nla_put_failure;
366 	return 0;
367 
368 nla_put_failure:
369 	return -1;
370 }
371 
372 
373 const struct nft_expr_ops nft_cmp16_fast_ops = {
374 	.type		= &nft_cmp_type,
375 	.size		= NFT_EXPR_SIZE(sizeof(struct nft_cmp16_fast_expr)),
376 	.eval		= NULL,	/* inlined */
377 	.init		= nft_cmp16_fast_init,
378 	.dump		= nft_cmp16_fast_dump,
379 	.reduce		= NFT_REDUCE_READONLY,
380 	.offload	= nft_cmp16_fast_offload,
381 };
382 
383 static const struct nft_expr_ops *
nft_cmp_select_ops(const struct nft_ctx * ctx,const struct nlattr * const tb[])384 nft_cmp_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
385 {
386 	struct nft_data data;
387 	struct nft_data_desc desc = {
388 		.type	= NFT_DATA_VALUE,
389 		.size	= sizeof(data),
390 	};
391 	enum nft_cmp_ops op;
392 	u8 sreg;
393 	int err;
394 
395 	if (tb[NFTA_CMP_SREG] == NULL ||
396 	    tb[NFTA_CMP_OP] == NULL ||
397 	    tb[NFTA_CMP_DATA] == NULL)
398 		return ERR_PTR(-EINVAL);
399 
400 	op = ntohl(nla_get_be32(tb[NFTA_CMP_OP]));
401 	switch (op) {
402 	case NFT_CMP_EQ:
403 	case NFT_CMP_NEQ:
404 	case NFT_CMP_LT:
405 	case NFT_CMP_LTE:
406 	case NFT_CMP_GT:
407 	case NFT_CMP_GTE:
408 		break;
409 	default:
410 		return ERR_PTR(-EINVAL);
411 	}
412 
413 	err = nft_data_init(NULL, &data, &desc, tb[NFTA_CMP_DATA]);
414 	if (err < 0)
415 		return ERR_PTR(err);
416 
417 	sreg = ntohl(nla_get_be32(tb[NFTA_CMP_SREG]));
418 
419 	if (op == NFT_CMP_EQ || op == NFT_CMP_NEQ) {
420 		if (desc.len <= sizeof(u32))
421 			return &nft_cmp_fast_ops;
422 		else if (desc.len <= sizeof(data) &&
423 			 ((sreg >= NFT_REG_1 && sreg <= NFT_REG_4) ||
424 			  (sreg >= NFT_REG32_00 && sreg <= NFT_REG32_12 && sreg % 2 == 0)))
425 			return &nft_cmp16_fast_ops;
426 	}
427 	return &nft_cmp_ops;
428 }
429 
430 struct nft_expr_type nft_cmp_type __read_mostly = {
431 	.name		= "cmp",
432 	.select_ops	= nft_cmp_select_ops,
433 	.policy		= nft_cmp_policy,
434 	.maxattr	= NFTA_CMP_MAX,
435 	.owner		= THIS_MODULE,
436 };
437