xref: /linux/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c (revision b04df400c30235fa347313c9e2a0695549bd2c8e)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved. */
3 
4 #include "ixgbe.h"
5 #include <net/xfrm.h>
6 #include <crypto/aead.h>
7 
8 /**
9  * ixgbe_ipsec_set_tx_sa - set the Tx SA registers
10  * @hw: hw specific details
11  * @idx: register index to write
12  * @key: key byte array
13  * @salt: salt bytes
14  **/
15 static void ixgbe_ipsec_set_tx_sa(struct ixgbe_hw *hw, u16 idx,
16 				  u32 key[], u32 salt)
17 {
18 	u32 reg;
19 	int i;
20 
21 	for (i = 0; i < 4; i++)
22 		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(i), cpu_to_be32(key[3 - i]));
23 	IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, cpu_to_be32(salt));
24 	IXGBE_WRITE_FLUSH(hw);
25 
26 	reg = IXGBE_READ_REG(hw, IXGBE_IPSTXIDX);
27 	reg &= IXGBE_RXTXIDX_IPS_EN;
28 	reg |= idx << IXGBE_RXTXIDX_IDX_SHIFT | IXGBE_RXTXIDX_WRITE;
29 	IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, reg);
30 	IXGBE_WRITE_FLUSH(hw);
31 }
32 
33 /**
34  * ixgbe_ipsec_set_rx_item - set an Rx table item
35  * @hw: hw specific details
36  * @idx: register index to write
37  * @tbl: table selector
38  *
39  * Trigger the device to store into a particular Rx table the
40  * data that has already been loaded into the input register
41  **/
42 static void ixgbe_ipsec_set_rx_item(struct ixgbe_hw *hw, u16 idx,
43 				    enum ixgbe_ipsec_tbl_sel tbl)
44 {
45 	u32 reg;
46 
47 	reg = IXGBE_READ_REG(hw, IXGBE_IPSRXIDX);
48 	reg &= IXGBE_RXTXIDX_IPS_EN;
49 	reg |= tbl << IXGBE_RXIDX_TBL_SHIFT |
50 	       idx << IXGBE_RXTXIDX_IDX_SHIFT |
51 	       IXGBE_RXTXIDX_WRITE;
52 	IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, reg);
53 	IXGBE_WRITE_FLUSH(hw);
54 }
55 
56 /**
57  * ixgbe_ipsec_set_rx_sa - set up the register bits to save SA info
58  * @hw: hw specific details
59  * @idx: register index to write
60  * @spi: security parameter index
61  * @key: key byte array
62  * @salt: salt bytes
63  * @mode: rx decrypt control bits
64  * @ip_idx: index into IP table for related IP address
65  **/
66 static void ixgbe_ipsec_set_rx_sa(struct ixgbe_hw *hw, u16 idx, __be32 spi,
67 				  u32 key[], u32 salt, u32 mode, u32 ip_idx)
68 {
69 	int i;
70 
71 	/* store the SPI (in bigendian) and IPidx */
72 	IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, cpu_to_le32(spi));
73 	IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, ip_idx);
74 	IXGBE_WRITE_FLUSH(hw);
75 
76 	ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_spi_tbl);
77 
78 	/* store the key, salt, and mode */
79 	for (i = 0; i < 4; i++)
80 		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(i), cpu_to_be32(key[3 - i]));
81 	IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, cpu_to_be32(salt));
82 	IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, mode);
83 	IXGBE_WRITE_FLUSH(hw);
84 
85 	ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_key_tbl);
86 }
87 
88 /**
89  * ixgbe_ipsec_set_rx_ip - set up the register bits to save SA IP addr info
90  * @hw: hw specific details
91  * @idx: register index to write
92  * @addr: IP address byte array
93  **/
94 static void ixgbe_ipsec_set_rx_ip(struct ixgbe_hw *hw, u16 idx, __be32 addr[])
95 {
96 	int i;
97 
98 	/* store the ip address */
99 	for (i = 0; i < 4; i++)
100 		IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(i), cpu_to_le32(addr[i]));
101 	IXGBE_WRITE_FLUSH(hw);
102 
103 	ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_ip_tbl);
104 }
105 
106 /**
107  * ixgbe_ipsec_clear_hw_tables - because some tables don't get cleared on reset
108  * @adapter: board private structure
109  **/
110 static void ixgbe_ipsec_clear_hw_tables(struct ixgbe_adapter *adapter)
111 {
112 	struct ixgbe_ipsec *ipsec = adapter->ipsec;
113 	struct ixgbe_hw *hw = &adapter->hw;
114 	u32 buf[4] = {0, 0, 0, 0};
115 	u16 idx;
116 
117 	/* disable Rx and Tx SA lookup */
118 	IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0);
119 	IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0);
120 
121 	/* scrub the tables - split the loops for the max of the IP table */
122 	for (idx = 0; idx < IXGBE_IPSEC_MAX_RX_IP_COUNT; idx++) {
123 		ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0);
124 		ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0);
125 		ixgbe_ipsec_set_rx_ip(hw, idx, (__be32 *)buf);
126 	}
127 	for (; idx < IXGBE_IPSEC_MAX_SA_COUNT; idx++) {
128 		ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0);
129 		ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0);
130 	}
131 
132 	ipsec->num_rx_sa = 0;
133 	ipsec->num_tx_sa = 0;
134 }
135 
136 /**
137  * ixgbe_ipsec_stop_data
138  * @adapter: board private structure
139  **/
140 static void ixgbe_ipsec_stop_data(struct ixgbe_adapter *adapter)
141 {
142 	struct ixgbe_hw *hw = &adapter->hw;
143 	bool link = adapter->link_up;
144 	u32 t_rdy, r_rdy;
145 	u32 limit;
146 	u32 reg;
147 
148 	/* halt data paths */
149 	reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
150 	reg |= IXGBE_SECTXCTRL_TX_DIS;
151 	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg);
152 
153 	reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
154 	reg |= IXGBE_SECRXCTRL_RX_DIS;
155 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg);
156 
157 	IXGBE_WRITE_FLUSH(hw);
158 
159 	/* If the tx fifo doesn't have link, but still has data,
160 	 * we can't clear the tx sec block.  Set the MAC loopback
161 	 * before block clear
162 	 */
163 	if (!link) {
164 		reg = IXGBE_READ_REG(hw, IXGBE_MACC);
165 		reg |= IXGBE_MACC_FLU;
166 		IXGBE_WRITE_REG(hw, IXGBE_MACC, reg);
167 
168 		reg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
169 		reg |= IXGBE_HLREG0_LPBK;
170 		IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
171 
172 		IXGBE_WRITE_FLUSH(hw);
173 		mdelay(3);
174 	}
175 
176 	/* wait for the paths to empty */
177 	limit = 20;
178 	do {
179 		mdelay(10);
180 		t_rdy = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) &
181 			IXGBE_SECTXSTAT_SECTX_RDY;
182 		r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) &
183 			IXGBE_SECRXSTAT_SECRX_RDY;
184 	} while (!t_rdy && !r_rdy && limit--);
185 
186 	/* undo loopback if we played with it earlier */
187 	if (!link) {
188 		reg = IXGBE_READ_REG(hw, IXGBE_MACC);
189 		reg &= ~IXGBE_MACC_FLU;
190 		IXGBE_WRITE_REG(hw, IXGBE_MACC, reg);
191 
192 		reg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
193 		reg &= ~IXGBE_HLREG0_LPBK;
194 		IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
195 
196 		IXGBE_WRITE_FLUSH(hw);
197 	}
198 }
199 
200 /**
201  * ixgbe_ipsec_stop_engine
202  * @adapter: board private structure
203  **/
204 static void ixgbe_ipsec_stop_engine(struct ixgbe_adapter *adapter)
205 {
206 	struct ixgbe_hw *hw = &adapter->hw;
207 	u32 reg;
208 
209 	ixgbe_ipsec_stop_data(adapter);
210 
211 	/* disable Rx and Tx SA lookup */
212 	IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0);
213 	IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0);
214 
215 	/* disable the Rx and Tx engines and full packet store-n-forward */
216 	reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
217 	reg |= IXGBE_SECTXCTRL_SECTX_DIS;
218 	reg &= ~IXGBE_SECTXCTRL_STORE_FORWARD;
219 	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg);
220 
221 	reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
222 	reg |= IXGBE_SECRXCTRL_SECRX_DIS;
223 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg);
224 
225 	/* restore the "tx security buffer almost full threshold" to 0x250 */
226 	IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, 0x250);
227 
228 	/* Set minimum IFG between packets back to the default 0x1 */
229 	reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
230 	reg = (reg & 0xfffffff0) | 0x1;
231 	IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
232 
233 	/* final set for normal (no ipsec offload) processing */
234 	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_SECTX_DIS);
235 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, IXGBE_SECRXCTRL_SECRX_DIS);
236 
237 	IXGBE_WRITE_FLUSH(hw);
238 }
239 
240 /**
241  * ixgbe_ipsec_start_engine
242  * @adapter: board private structure
243  *
244  * NOTE: this increases power consumption whether being used or not
245  **/
246 static void ixgbe_ipsec_start_engine(struct ixgbe_adapter *adapter)
247 {
248 	struct ixgbe_hw *hw = &adapter->hw;
249 	u32 reg;
250 
251 	ixgbe_ipsec_stop_data(adapter);
252 
253 	/* Set minimum IFG between packets to 3 */
254 	reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
255 	reg = (reg & 0xfffffff0) | 0x3;
256 	IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
257 
258 	/* Set "tx security buffer almost full threshold" to 0x15 so that the
259 	 * almost full indication is generated only after buffer contains at
260 	 * least an entire jumbo packet.
261 	 */
262 	reg = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF);
263 	reg = (reg & 0xfffffc00) | 0x15;
264 	IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, reg);
265 
266 	/* restart the data paths by clearing the DISABLE bits */
267 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
268 	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_STORE_FORWARD);
269 
270 	/* enable Rx and Tx SA lookup */
271 	IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, IXGBE_RXTXIDX_IPS_EN);
272 	IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, IXGBE_RXTXIDX_IPS_EN);
273 
274 	IXGBE_WRITE_FLUSH(hw);
275 }
276 
277 /**
278  * ixgbe_ipsec_restore - restore the ipsec HW settings after a reset
279  * @adapter: board private structure
280  **/
281 void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter)
282 {
283 	struct ixgbe_ipsec *ipsec = adapter->ipsec;
284 	struct ixgbe_hw *hw = &adapter->hw;
285 	int i;
286 
287 	if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED))
288 		return;
289 
290 	/* clean up and restart the engine */
291 	ixgbe_ipsec_stop_engine(adapter);
292 	ixgbe_ipsec_clear_hw_tables(adapter);
293 	ixgbe_ipsec_start_engine(adapter);
294 
295 	/* reload the IP addrs */
296 	for (i = 0; i < IXGBE_IPSEC_MAX_RX_IP_COUNT; i++) {
297 		struct rx_ip_sa *ipsa = &ipsec->ip_tbl[i];
298 
299 		if (ipsa->used)
300 			ixgbe_ipsec_set_rx_ip(hw, i, ipsa->ipaddr);
301 	}
302 
303 	/* reload the Rx and Tx keys */
304 	for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
305 		struct rx_sa *rsa = &ipsec->rx_tbl[i];
306 		struct tx_sa *tsa = &ipsec->tx_tbl[i];
307 
308 		if (rsa->used)
309 			ixgbe_ipsec_set_rx_sa(hw, i, rsa->xs->id.spi,
310 					      rsa->key, rsa->salt,
311 					      rsa->mode, rsa->iptbl_ind);
312 
313 		if (tsa->used)
314 			ixgbe_ipsec_set_tx_sa(hw, i, tsa->key, tsa->salt);
315 	}
316 }
317 
318 /**
319  * ixgbe_ipsec_find_empty_idx - find the first unused security parameter index
320  * @ipsec: pointer to ipsec struct
321  * @rxtable: true if we need to look in the Rx table
322  *
323  * Returns the first unused index in either the Rx or Tx SA table
324  **/
325 static int ixgbe_ipsec_find_empty_idx(struct ixgbe_ipsec *ipsec, bool rxtable)
326 {
327 	u32 i;
328 
329 	if (rxtable) {
330 		if (ipsec->num_rx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
331 			return -ENOSPC;
332 
333 		/* search rx sa table */
334 		for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
335 			if (!ipsec->rx_tbl[i].used)
336 				return i;
337 		}
338 	} else {
339 		if (ipsec->num_tx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
340 			return -ENOSPC;
341 
342 		/* search tx sa table */
343 		for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
344 			if (!ipsec->tx_tbl[i].used)
345 				return i;
346 		}
347 	}
348 
349 	return -ENOSPC;
350 }
351 
352 /**
353  * ixgbe_ipsec_find_rx_state - find the state that matches
354  * @ipsec: pointer to ipsec struct
355  * @daddr: inbound address to match
356  * @proto: protocol to match
357  * @spi: SPI to match
358  * @ip4: true if using an ipv4 address
359  *
360  * Returns a pointer to the matching SA state information
361  **/
362 static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec,
363 						    __be32 *daddr, u8 proto,
364 						    __be32 spi, bool ip4)
365 {
366 	struct rx_sa *rsa;
367 	struct xfrm_state *ret = NULL;
368 
369 	rcu_read_lock();
370 	hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist, spi)
371 		if (spi == rsa->xs->id.spi &&
372 		    ((ip4 && *daddr == rsa->xs->id.daddr.a4) ||
373 		      (!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6,
374 				       sizeof(rsa->xs->id.daddr.a6)))) &&
375 		    proto == rsa->xs->id.proto) {
376 			ret = rsa->xs;
377 			xfrm_state_hold(ret);
378 			break;
379 		}
380 	rcu_read_unlock();
381 	return ret;
382 }
383 
384 /**
385  * ixgbe_ipsec_parse_proto_keys - find the key and salt based on the protocol
386  * @xs: pointer to xfrm_state struct
387  * @mykey: pointer to key array to populate
388  * @mysalt: pointer to salt value to populate
389  *
390  * This copies the protocol keys and salt to our own data tables.  The
391  * 82599 family only supports the one algorithm.
392  **/
393 static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs,
394 					u32 *mykey, u32 *mysalt)
395 {
396 	struct net_device *dev = xs->xso.dev;
397 	unsigned char *key_data;
398 	char *alg_name = NULL;
399 	const char aes_gcm_name[] = "rfc4106(gcm(aes))";
400 	int key_len;
401 
402 	if (!xs->aead) {
403 		netdev_err(dev, "Unsupported IPsec algorithm\n");
404 		return -EINVAL;
405 	}
406 
407 	if (xs->aead->alg_icv_len != IXGBE_IPSEC_AUTH_BITS) {
408 		netdev_err(dev, "IPsec offload requires %d bit authentication\n",
409 			   IXGBE_IPSEC_AUTH_BITS);
410 		return -EINVAL;
411 	}
412 
413 	key_data = &xs->aead->alg_key[0];
414 	key_len = xs->aead->alg_key_len;
415 	alg_name = xs->aead->alg_name;
416 
417 	if (strcmp(alg_name, aes_gcm_name)) {
418 		netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n",
419 			   aes_gcm_name);
420 		return -EINVAL;
421 	}
422 
423 	/* The key bytes come down in a bigendian array of bytes, so
424 	 * we don't need to do any byteswapping.
425 	 * 160 accounts for 16 byte key and 4 byte salt
426 	 */
427 	if (key_len == 160) {
428 		*mysalt = ((u32 *)key_data)[4];
429 	} else if (key_len != 128) {
430 		netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n");
431 		return -EINVAL;
432 	} else {
433 		netdev_info(dev, "IPsec hw offload parameters missing 32 bit salt value\n");
434 		*mysalt = 0;
435 	}
436 	memcpy(mykey, key_data, 16);
437 
438 	return 0;
439 }
440 
441 /**
442  * ixgbe_ipsec_add_sa - program device with a security association
443  * @xs: pointer to transformer state struct
444  **/
445 static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
446 {
447 	struct net_device *dev = xs->xso.dev;
448 	struct ixgbe_adapter *adapter = netdev_priv(dev);
449 	struct ixgbe_ipsec *ipsec = adapter->ipsec;
450 	struct ixgbe_hw *hw = &adapter->hw;
451 	int checked, match, first;
452 	u16 sa_idx;
453 	int ret;
454 	int i;
455 
456 	if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
457 		netdev_err(dev, "Unsupported protocol 0x%04x for ipsec offload\n",
458 			   xs->id.proto);
459 		return -EINVAL;
460 	}
461 
462 	if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
463 		struct rx_sa rsa;
464 
465 		if (xs->calg) {
466 			netdev_err(dev, "Compression offload not supported\n");
467 			return -EINVAL;
468 		}
469 
470 		/* find the first unused index */
471 		ret = ixgbe_ipsec_find_empty_idx(ipsec, true);
472 		if (ret < 0) {
473 			netdev_err(dev, "No space for SA in Rx table!\n");
474 			return ret;
475 		}
476 		sa_idx = (u16)ret;
477 
478 		memset(&rsa, 0, sizeof(rsa));
479 		rsa.used = true;
480 		rsa.xs = xs;
481 
482 		if (rsa.xs->id.proto & IPPROTO_ESP)
483 			rsa.decrypt = xs->ealg || xs->aead;
484 
485 		/* get the key and salt */
486 		ret = ixgbe_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);
487 		if (ret) {
488 			netdev_err(dev, "Failed to get key data for Rx SA table\n");
489 			return ret;
490 		}
491 
492 		/* get ip for rx sa table */
493 		if (xs->props.family == AF_INET6)
494 			memcpy(rsa.ipaddr, &xs->id.daddr.a6, 16);
495 		else
496 			memcpy(&rsa.ipaddr[3], &xs->id.daddr.a4, 4);
497 
498 		/* The HW does not have a 1:1 mapping from keys to IP addrs, so
499 		 * check for a matching IP addr entry in the table.  If the addr
500 		 * already exists, use it; else find an unused slot and add the
501 		 * addr.  If one does not exist and there are no unused table
502 		 * entries, fail the request.
503 		 */
504 
505 		/* Find an existing match or first not used, and stop looking
506 		 * after we've checked all we know we have.
507 		 */
508 		checked = 0;
509 		match = -1;
510 		first = -1;
511 		for (i = 0;
512 		     i < IXGBE_IPSEC_MAX_RX_IP_COUNT &&
513 		     (checked < ipsec->num_rx_sa || first < 0);
514 		     i++) {
515 			if (ipsec->ip_tbl[i].used) {
516 				if (!memcmp(ipsec->ip_tbl[i].ipaddr,
517 					    rsa.ipaddr, sizeof(rsa.ipaddr))) {
518 					match = i;
519 					break;
520 				}
521 				checked++;
522 			} else if (first < 0) {
523 				first = i;  /* track the first empty seen */
524 			}
525 		}
526 
527 		if (ipsec->num_rx_sa == 0)
528 			first = 0;
529 
530 		if (match >= 0) {
531 			/* addrs are the same, we should use this one */
532 			rsa.iptbl_ind = match;
533 			ipsec->ip_tbl[match].ref_cnt++;
534 
535 		} else if (first >= 0) {
536 			/* no matches, but here's an empty slot */
537 			rsa.iptbl_ind = first;
538 
539 			memcpy(ipsec->ip_tbl[first].ipaddr,
540 			       rsa.ipaddr, sizeof(rsa.ipaddr));
541 			ipsec->ip_tbl[first].ref_cnt = 1;
542 			ipsec->ip_tbl[first].used = true;
543 
544 			ixgbe_ipsec_set_rx_ip(hw, rsa.iptbl_ind, rsa.ipaddr);
545 
546 		} else {
547 			/* no match and no empty slot */
548 			netdev_err(dev, "No space for SA in Rx IP SA table\n");
549 			memset(&rsa, 0, sizeof(rsa));
550 			return -ENOSPC;
551 		}
552 
553 		rsa.mode = IXGBE_RXMOD_VALID;
554 		if (rsa.xs->id.proto & IPPROTO_ESP)
555 			rsa.mode |= IXGBE_RXMOD_PROTO_ESP;
556 		if (rsa.decrypt)
557 			rsa.mode |= IXGBE_RXMOD_DECRYPT;
558 		if (rsa.xs->props.family == AF_INET6)
559 			rsa.mode |= IXGBE_RXMOD_IPV6;
560 
561 		/* the preparations worked, so save the info */
562 		memcpy(&ipsec->rx_tbl[sa_idx], &rsa, sizeof(rsa));
563 
564 		ixgbe_ipsec_set_rx_sa(hw, sa_idx, rsa.xs->id.spi, rsa.key,
565 				      rsa.salt, rsa.mode, rsa.iptbl_ind);
566 		xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_RX_INDEX;
567 
568 		ipsec->num_rx_sa++;
569 
570 		/* hash the new entry for faster search in Rx path */
571 		hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist,
572 			     rsa.xs->id.spi);
573 	} else {
574 		struct tx_sa tsa;
575 
576 		/* find the first unused index */
577 		ret = ixgbe_ipsec_find_empty_idx(ipsec, false);
578 		if (ret < 0) {
579 			netdev_err(dev, "No space for SA in Tx table\n");
580 			return ret;
581 		}
582 		sa_idx = (u16)ret;
583 
584 		memset(&tsa, 0, sizeof(tsa));
585 		tsa.used = true;
586 		tsa.xs = xs;
587 
588 		if (xs->id.proto & IPPROTO_ESP)
589 			tsa.encrypt = xs->ealg || xs->aead;
590 
591 		ret = ixgbe_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);
592 		if (ret) {
593 			netdev_err(dev, "Failed to get key data for Tx SA table\n");
594 			memset(&tsa, 0, sizeof(tsa));
595 			return ret;
596 		}
597 
598 		/* the preparations worked, so save the info */
599 		memcpy(&ipsec->tx_tbl[sa_idx], &tsa, sizeof(tsa));
600 
601 		ixgbe_ipsec_set_tx_sa(hw, sa_idx, tsa.key, tsa.salt);
602 
603 		xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_TX_INDEX;
604 
605 		ipsec->num_tx_sa++;
606 	}
607 
608 	/* enable the engine if not already warmed up */
609 	if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED)) {
610 		ixgbe_ipsec_start_engine(adapter);
611 		adapter->flags2 |= IXGBE_FLAG2_IPSEC_ENABLED;
612 	}
613 
614 	return 0;
615 }
616 
617 /**
618  * ixgbe_ipsec_del_sa - clear out this specific SA
619  * @xs: pointer to transformer state struct
620  **/
621 static void ixgbe_ipsec_del_sa(struct xfrm_state *xs)
622 {
623 	struct net_device *dev = xs->xso.dev;
624 	struct ixgbe_adapter *adapter = netdev_priv(dev);
625 	struct ixgbe_ipsec *ipsec = adapter->ipsec;
626 	struct ixgbe_hw *hw = &adapter->hw;
627 	u32 zerobuf[4] = {0, 0, 0, 0};
628 	u16 sa_idx;
629 
630 	if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
631 		struct rx_sa *rsa;
632 		u8 ipi;
633 
634 		sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;
635 		rsa = &ipsec->rx_tbl[sa_idx];
636 
637 		if (!rsa->used) {
638 			netdev_err(dev, "Invalid Rx SA selected sa_idx=%d offload_handle=%lu\n",
639 				   sa_idx, xs->xso.offload_handle);
640 			return;
641 		}
642 
643 		ixgbe_ipsec_set_rx_sa(hw, sa_idx, 0, zerobuf, 0, 0, 0);
644 		hash_del_rcu(&rsa->hlist);
645 
646 		/* if the IP table entry is referenced by only this SA,
647 		 * i.e. ref_cnt is only 1, clear the IP table entry as well
648 		 */
649 		ipi = rsa->iptbl_ind;
650 		if (ipsec->ip_tbl[ipi].ref_cnt > 0) {
651 			ipsec->ip_tbl[ipi].ref_cnt--;
652 
653 			if (!ipsec->ip_tbl[ipi].ref_cnt) {
654 				memset(&ipsec->ip_tbl[ipi], 0,
655 				       sizeof(struct rx_ip_sa));
656 				ixgbe_ipsec_set_rx_ip(hw, ipi, zerobuf);
657 			}
658 		}
659 
660 		memset(rsa, 0, sizeof(struct rx_sa));
661 		ipsec->num_rx_sa--;
662 	} else {
663 		sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
664 
665 		if (!ipsec->tx_tbl[sa_idx].used) {
666 			netdev_err(dev, "Invalid Tx SA selected sa_idx=%d offload_handle=%lu\n",
667 				   sa_idx, xs->xso.offload_handle);
668 			return;
669 		}
670 
671 		ixgbe_ipsec_set_tx_sa(hw, sa_idx, zerobuf, 0);
672 		memset(&ipsec->tx_tbl[sa_idx], 0, sizeof(struct tx_sa));
673 		ipsec->num_tx_sa--;
674 	}
675 
676 	/* if there are no SAs left, stop the engine to save energy */
677 	if (ipsec->num_rx_sa == 0 && ipsec->num_tx_sa == 0) {
678 		adapter->flags2 &= ~IXGBE_FLAG2_IPSEC_ENABLED;
679 		ixgbe_ipsec_stop_engine(adapter);
680 	}
681 }
682 
683 /**
684  * ixgbe_ipsec_offload_ok - can this packet use the xfrm hw offload
685  * @skb: current data packet
686  * @xs: pointer to transformer state struct
687  **/
688 static bool ixgbe_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
689 {
690 	if (xs->props.family == AF_INET) {
691 		/* Offload with IPv4 options is not supported yet */
692 		if (ip_hdr(skb)->ihl != 5)
693 			return false;
694 	} else {
695 		/* Offload with IPv6 extension headers is not support yet */
696 		if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
697 			return false;
698 	}
699 
700 	return true;
701 }
702 
703 static const struct xfrmdev_ops ixgbe_xfrmdev_ops = {
704 	.xdo_dev_state_add = ixgbe_ipsec_add_sa,
705 	.xdo_dev_state_delete = ixgbe_ipsec_del_sa,
706 	.xdo_dev_offload_ok = ixgbe_ipsec_offload_ok,
707 };
708 
709 /**
710  * ixgbe_ipsec_tx - setup Tx flags for ipsec offload
711  * @tx_ring: outgoing context
712  * @first: current data packet
713  * @itd: ipsec Tx data for later use in building context descriptor
714  **/
715 int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
716 		   struct ixgbe_tx_buffer *first,
717 		   struct ixgbe_ipsec_tx_data *itd)
718 {
719 	struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev);
720 	struct ixgbe_ipsec *ipsec = adapter->ipsec;
721 	struct xfrm_state *xs;
722 	struct tx_sa *tsa;
723 
724 	if (unlikely(!first->skb->sp->len)) {
725 		netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n",
726 			   __func__, first->skb->sp->len);
727 		return 0;
728 	}
729 
730 	xs = xfrm_input_state(first->skb);
731 	if (unlikely(!xs)) {
732 		netdev_err(tx_ring->netdev, "%s: no xfrm_input_state() xs = %p\n",
733 			   __func__, xs);
734 		return 0;
735 	}
736 
737 	itd->sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
738 	if (unlikely(itd->sa_idx > IXGBE_IPSEC_MAX_SA_COUNT)) {
739 		netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n",
740 			   __func__, itd->sa_idx, xs->xso.offload_handle);
741 		return 0;
742 	}
743 
744 	tsa = &ipsec->tx_tbl[itd->sa_idx];
745 	if (unlikely(!tsa->used)) {
746 		netdev_err(tx_ring->netdev, "%s: unused sa_idx=%d\n",
747 			   __func__, itd->sa_idx);
748 		return 0;
749 	}
750 
751 	first->tx_flags |= IXGBE_TX_FLAGS_IPSEC | IXGBE_TX_FLAGS_CC;
752 
753 	if (xs->id.proto == IPPROTO_ESP) {
754 
755 		itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
756 			      IXGBE_ADVTXD_TUCMD_L4T_TCP;
757 		if (first->protocol == htons(ETH_P_IP))
758 			itd->flags |= IXGBE_ADVTXD_TUCMD_IPV4;
759 
760 		/* The actual trailer length is authlen (16 bytes) plus
761 		 * 2 bytes for the proto and the padlen values, plus
762 		 * padlen bytes of padding.  This ends up not the same
763 		 * as the static value found in xs->props.trailer_len (21).
764 		 *
765 		 * ... but if we're doing GSO, don't bother as the stack
766 		 * doesn't add a trailer for those.
767 		 */
768 		if (!skb_is_gso(first->skb)) {
769 			/* The "correct" way to get the auth length would be
770 			 * to use
771 			 *    authlen = crypto_aead_authsize(xs->data);
772 			 * but since we know we only have one size to worry
773 			 * about * we can let the compiler use the constant
774 			 * and save us a few CPU cycles.
775 			 */
776 			const int authlen = IXGBE_IPSEC_AUTH_BITS / 8;
777 			struct sk_buff *skb = first->skb;
778 			u8 padlen;
779 			int ret;
780 
781 			ret = skb_copy_bits(skb, skb->len - (authlen + 2),
782 					    &padlen, 1);
783 			if (unlikely(ret))
784 				return 0;
785 			itd->trailer_len = authlen + 2 + padlen;
786 		}
787 	}
788 	if (tsa->encrypt)
789 		itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN;
790 
791 	return 1;
792 }
793 
794 /**
795  * ixgbe_ipsec_rx - decode ipsec bits from Rx descriptor
796  * @rx_ring: receiving ring
797  * @rx_desc: receive data descriptor
798  * @skb: current data packet
799  *
800  * Determine if there was an ipsec encapsulation noticed, and if so set up
801  * the resulting status for later in the receive stack.
802  **/
803 void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
804 		    union ixgbe_adv_rx_desc *rx_desc,
805 		    struct sk_buff *skb)
806 {
807 	struct ixgbe_adapter *adapter = netdev_priv(rx_ring->netdev);
808 	__le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
809 	__le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH |
810 					     IXGBE_RXDADV_PKTTYPE_IPSEC_ESP);
811 	struct ixgbe_ipsec *ipsec = adapter->ipsec;
812 	struct xfrm_offload *xo = NULL;
813 	struct xfrm_state *xs = NULL;
814 	struct ipv6hdr *ip6 = NULL;
815 	struct iphdr *ip4 = NULL;
816 	void *daddr;
817 	__be32 spi;
818 	u8 *c_hdr;
819 	u8 proto;
820 
821 	/* Find the ip and crypto headers in the data.
822 	 * We can assume no vlan header in the way, b/c the
823 	 * hw won't recognize the IPsec packet and anyway the
824 	 * currently vlan device doesn't support xfrm offload.
825 	 */
826 	if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV4)) {
827 		ip4 = (struct iphdr *)(skb->data + ETH_HLEN);
828 		daddr = &ip4->daddr;
829 		c_hdr = (u8 *)ip4 + ip4->ihl * 4;
830 	} else if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV6)) {
831 		ip6 = (struct ipv6hdr *)(skb->data + ETH_HLEN);
832 		daddr = &ip6->daddr;
833 		c_hdr = (u8 *)ip6 + sizeof(struct ipv6hdr);
834 	} else {
835 		return;
836 	}
837 
838 	switch (pkt_info & ipsec_pkt_types) {
839 	case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH):
840 		spi = ((struct ip_auth_hdr *)c_hdr)->spi;
841 		proto = IPPROTO_AH;
842 		break;
843 	case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_ESP):
844 		spi = ((struct ip_esp_hdr *)c_hdr)->spi;
845 		proto = IPPROTO_ESP;
846 		break;
847 	default:
848 		return;
849 	}
850 
851 	xs = ixgbe_ipsec_find_rx_state(ipsec, daddr, proto, spi, !!ip4);
852 	if (unlikely(!xs))
853 		return;
854 
855 	skb->sp = secpath_dup(skb->sp);
856 	if (unlikely(!skb->sp))
857 		return;
858 
859 	skb->sp->xvec[skb->sp->len++] = xs;
860 	skb->sp->olen++;
861 	xo = xfrm_offload(skb);
862 	xo->flags = CRYPTO_DONE;
863 	xo->status = CRYPTO_SUCCESS;
864 
865 	adapter->rx_ipsec++;
866 }
867 
868 /**
869  * ixgbe_init_ipsec_offload - initialize security registers for IPSec operation
870  * @adapter: board private structure
871  **/
872 void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter)
873 {
874 	struct ixgbe_ipsec *ipsec;
875 	size_t size;
876 
877 	if (adapter->hw.mac.type == ixgbe_mac_82598EB)
878 		return;
879 
880 	ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
881 	if (!ipsec)
882 		goto err1;
883 	hash_init(ipsec->rx_sa_list);
884 
885 	size = sizeof(struct rx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
886 	ipsec->rx_tbl = kzalloc(size, GFP_KERNEL);
887 	if (!ipsec->rx_tbl)
888 		goto err2;
889 
890 	size = sizeof(struct tx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
891 	ipsec->tx_tbl = kzalloc(size, GFP_KERNEL);
892 	if (!ipsec->tx_tbl)
893 		goto err2;
894 
895 	size = sizeof(struct rx_ip_sa) * IXGBE_IPSEC_MAX_RX_IP_COUNT;
896 	ipsec->ip_tbl = kzalloc(size, GFP_KERNEL);
897 	if (!ipsec->ip_tbl)
898 		goto err2;
899 
900 	ipsec->num_rx_sa = 0;
901 	ipsec->num_tx_sa = 0;
902 
903 	adapter->ipsec = ipsec;
904 	ixgbe_ipsec_stop_engine(adapter);
905 	ixgbe_ipsec_clear_hw_tables(adapter);
906 
907 	adapter->netdev->xfrmdev_ops = &ixgbe_xfrmdev_ops;
908 
909 #define IXGBE_ESP_FEATURES	(NETIF_F_HW_ESP | \
910 				 NETIF_F_HW_ESP_TX_CSUM | \
911 				 NETIF_F_GSO_ESP)
912 
913 	adapter->netdev->features |= IXGBE_ESP_FEATURES;
914 	adapter->netdev->hw_enc_features |= IXGBE_ESP_FEATURES;
915 
916 	return;
917 
918 err2:
919 	kfree(ipsec->ip_tbl);
920 	kfree(ipsec->rx_tbl);
921 	kfree(ipsec->tx_tbl);
922 	kfree(ipsec);
923 err1:
924 	netdev_err(adapter->netdev, "Unable to allocate memory for SA tables");
925 }
926 
927 /**
928  * ixgbe_stop_ipsec_offload - tear down the ipsec offload
929  * @adapter: board private structure
930  **/
931 void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter)
932 {
933 	struct ixgbe_ipsec *ipsec = adapter->ipsec;
934 
935 	adapter->ipsec = NULL;
936 	if (ipsec) {
937 		kfree(ipsec->ip_tbl);
938 		kfree(ipsec->rx_tbl);
939 		kfree(ipsec->tx_tbl);
940 		kfree(ipsec);
941 	}
942 }
943