1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
4 *
5 * Development of this code funded by Astaro AG (http://www.astaro.com/)
6 */
7
8 #include <linux/unaligned.h>
9 #include <linux/kernel.h>
10 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/netlink.h>
13 #include <linux/netfilter.h>
14 #include <linux/netfilter/nf_tables.h>
15 #include <net/netfilter/nf_tables_core.h>
16 #include <net/netfilter/nf_tables.h>
17
18 struct nft_byteorder {
19 u8 sreg;
20 u8 dreg;
21 enum nft_byteorder_ops op:8;
22 u8 len;
23 u8 size;
24 };
25
nft_byteorder_eval(const struct nft_expr * expr,struct nft_regs * regs,const struct nft_pktinfo * pkt)26 void nft_byteorder_eval(const struct nft_expr *expr,
27 struct nft_regs *regs,
28 const struct nft_pktinfo *pkt)
29 {
30 const struct nft_byteorder *priv = nft_expr_priv(expr);
31 u32 *src = ®s->data[priv->sreg];
32 u32 *dst = ®s->data[priv->dreg];
33 u16 *s16, *d16;
34 unsigned int i;
35
36 s16 = (void *)src;
37 d16 = (void *)dst;
38
39 switch (priv->size) {
40 case 8: {
41 u64 *dst64 = (void *)dst;
42 u64 src64;
43
44 switch (priv->op) {
45 case NFT_BYTEORDER_NTOH:
46 for (i = 0; i < priv->len / 8; i++) {
47 src64 = nft_reg_load64(&src[i]);
48 nft_reg_store64(&dst64[i],
49 be64_to_cpu((__force __be64)src64));
50 }
51 break;
52 case NFT_BYTEORDER_HTON:
53 for (i = 0; i < priv->len / 8; i++) {
54 src64 = (__force __u64)
55 cpu_to_be64(nft_reg_load64(&src[i]));
56 nft_reg_store64(&dst64[i], src64);
57 }
58 break;
59 }
60 break;
61 }
62 case 4:
63 switch (priv->op) {
64 case NFT_BYTEORDER_NTOH:
65 for (i = 0; i < priv->len / 4; i++)
66 dst[i] = ntohl((__force __be32)src[i]);
67 break;
68 case NFT_BYTEORDER_HTON:
69 for (i = 0; i < priv->len / 4; i++)
70 dst[i] = (__force __u32)htonl(src[i]);
71 break;
72 }
73 break;
74 case 2:
75 switch (priv->op) {
76 case NFT_BYTEORDER_NTOH:
77 for (i = 0; i < priv->len / 2; i++)
78 d16[i] = ntohs((__force __be16)s16[i]);
79 break;
80 case NFT_BYTEORDER_HTON:
81 for (i = 0; i < priv->len / 2; i++)
82 d16[i] = (__force __u16)htons(s16[i]);
83 break;
84 }
85 break;
86 }
87 }
88
89 static const struct nla_policy nft_byteorder_policy[NFTA_BYTEORDER_MAX + 1] = {
90 [NFTA_BYTEORDER_SREG] = { .type = NLA_U32 },
91 [NFTA_BYTEORDER_DREG] = { .type = NLA_U32 },
92 [NFTA_BYTEORDER_OP] = NLA_POLICY_MAX(NLA_BE32, 255),
93 [NFTA_BYTEORDER_LEN] = NLA_POLICY_MAX(NLA_BE32, 255),
94 [NFTA_BYTEORDER_SIZE] = NLA_POLICY_MAX(NLA_BE32, 255),
95 };
96
nft_byteorder_init(const struct nft_ctx * ctx,const struct nft_expr * expr,const struct nlattr * const tb[])97 static int nft_byteorder_init(const struct nft_ctx *ctx,
98 const struct nft_expr *expr,
99 const struct nlattr * const tb[])
100 {
101 struct nft_byteorder *priv = nft_expr_priv(expr);
102 u32 size, len;
103 int err;
104
105 if (tb[NFTA_BYTEORDER_SREG] == NULL ||
106 tb[NFTA_BYTEORDER_DREG] == NULL ||
107 tb[NFTA_BYTEORDER_LEN] == NULL ||
108 tb[NFTA_BYTEORDER_SIZE] == NULL ||
109 tb[NFTA_BYTEORDER_OP] == NULL)
110 return -EINVAL;
111
112 priv->op = ntohl(nla_get_be32(tb[NFTA_BYTEORDER_OP]));
113 switch (priv->op) {
114 case NFT_BYTEORDER_NTOH:
115 case NFT_BYTEORDER_HTON:
116 break;
117 default:
118 return -EINVAL;
119 }
120
121 err = nft_parse_u32_check(tb[NFTA_BYTEORDER_SIZE], U8_MAX, &size);
122 if (err < 0)
123 return err;
124
125 priv->size = size;
126
127 switch (priv->size) {
128 case 2:
129 case 4:
130 case 8:
131 break;
132 default:
133 return -EINVAL;
134 }
135
136 err = nft_parse_u32_check(tb[NFTA_BYTEORDER_LEN], U8_MAX, &len);
137 if (err < 0)
138 return err;
139
140 priv->len = len;
141
142 err = nft_parse_register_load(ctx, tb[NFTA_BYTEORDER_SREG], &priv->sreg,
143 priv->len);
144 if (err < 0)
145 return err;
146
147 return nft_parse_register_store(ctx, tb[NFTA_BYTEORDER_DREG],
148 &priv->dreg, NULL, NFT_DATA_VALUE,
149 priv->len);
150 }
151
nft_byteorder_dump(struct sk_buff * skb,const struct nft_expr * expr,bool reset)152 static int nft_byteorder_dump(struct sk_buff *skb,
153 const struct nft_expr *expr, bool reset)
154 {
155 const struct nft_byteorder *priv = nft_expr_priv(expr);
156
157 if (nft_dump_register(skb, NFTA_BYTEORDER_SREG, priv->sreg))
158 goto nla_put_failure;
159 if (nft_dump_register(skb, NFTA_BYTEORDER_DREG, priv->dreg))
160 goto nla_put_failure;
161 if (nla_put_be32(skb, NFTA_BYTEORDER_OP, htonl(priv->op)))
162 goto nla_put_failure;
163 if (nla_put_be32(skb, NFTA_BYTEORDER_LEN, htonl(priv->len)))
164 goto nla_put_failure;
165 if (nla_put_be32(skb, NFTA_BYTEORDER_SIZE, htonl(priv->size)))
166 goto nla_put_failure;
167 return 0;
168
169 nla_put_failure:
170 return -1;
171 }
172
nft_byteorder_reduce(struct nft_regs_track * track,const struct nft_expr * expr)173 static bool nft_byteorder_reduce(struct nft_regs_track *track,
174 const struct nft_expr *expr)
175 {
176 struct nft_byteorder *priv = nft_expr_priv(expr);
177
178 nft_reg_track_cancel(track, priv->dreg, priv->len);
179
180 return false;
181 }
182
183 static const struct nft_expr_ops nft_byteorder_ops = {
184 .type = &nft_byteorder_type,
185 .size = NFT_EXPR_SIZE(sizeof(struct nft_byteorder)),
186 .eval = nft_byteorder_eval,
187 .init = nft_byteorder_init,
188 .dump = nft_byteorder_dump,
189 .reduce = nft_byteorder_reduce,
190 };
191
192 struct nft_expr_type nft_byteorder_type __read_mostly = {
193 .name = "byteorder",
194 .ops = &nft_byteorder_ops,
195 .policy = nft_byteorder_policy,
196 .maxattr = NFTA_BYTEORDER_MAX,
197 .owner = THIS_MODULE,
198 };
199