xref: /linux/drivers/net/ethernet/intel/ice/ice_flex_pipe.c (revision 3f1c07fc21c68bd3bd2df9d2c9441f6485e934d9)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
3 
4 #include "ice_common.h"
5 #include "ice_flex_pipe.h"
6 #include "ice_flow.h"
7 #include "ice.h"
8 
9 static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = {
10 	/* SWITCH */
11 	{
12 		ICE_SID_XLT0_SW,
13 		ICE_SID_XLT_KEY_BUILDER_SW,
14 		ICE_SID_XLT1_SW,
15 		ICE_SID_XLT2_SW,
16 		ICE_SID_PROFID_TCAM_SW,
17 		ICE_SID_PROFID_REDIR_SW,
18 		ICE_SID_FLD_VEC_SW,
19 		ICE_SID_CDID_KEY_BUILDER_SW,
20 		ICE_SID_CDID_REDIR_SW
21 	},
22 
23 	/* ACL */
24 	{
25 		ICE_SID_XLT0_ACL,
26 		ICE_SID_XLT_KEY_BUILDER_ACL,
27 		ICE_SID_XLT1_ACL,
28 		ICE_SID_XLT2_ACL,
29 		ICE_SID_PROFID_TCAM_ACL,
30 		ICE_SID_PROFID_REDIR_ACL,
31 		ICE_SID_FLD_VEC_ACL,
32 		ICE_SID_CDID_KEY_BUILDER_ACL,
33 		ICE_SID_CDID_REDIR_ACL
34 	},
35 
36 	/* FD */
37 	{
38 		ICE_SID_XLT0_FD,
39 		ICE_SID_XLT_KEY_BUILDER_FD,
40 		ICE_SID_XLT1_FD,
41 		ICE_SID_XLT2_FD,
42 		ICE_SID_PROFID_TCAM_FD,
43 		ICE_SID_PROFID_REDIR_FD,
44 		ICE_SID_FLD_VEC_FD,
45 		ICE_SID_CDID_KEY_BUILDER_FD,
46 		ICE_SID_CDID_REDIR_FD
47 	},
48 
49 	/* RSS */
50 	{
51 		ICE_SID_XLT0_RSS,
52 		ICE_SID_XLT_KEY_BUILDER_RSS,
53 		ICE_SID_XLT1_RSS,
54 		ICE_SID_XLT2_RSS,
55 		ICE_SID_PROFID_TCAM_RSS,
56 		ICE_SID_PROFID_REDIR_RSS,
57 		ICE_SID_FLD_VEC_RSS,
58 		ICE_SID_CDID_KEY_BUILDER_RSS,
59 		ICE_SID_CDID_REDIR_RSS
60 	},
61 
62 	/* PE */
63 	{
64 		ICE_SID_XLT0_PE,
65 		ICE_SID_XLT_KEY_BUILDER_PE,
66 		ICE_SID_XLT1_PE,
67 		ICE_SID_XLT2_PE,
68 		ICE_SID_PROFID_TCAM_PE,
69 		ICE_SID_PROFID_REDIR_PE,
70 		ICE_SID_FLD_VEC_PE,
71 		ICE_SID_CDID_KEY_BUILDER_PE,
72 		ICE_SID_CDID_REDIR_PE
73 	}
74 };
75 
76 /**
77  * ice_sect_id - returns section ID
78  * @blk: block type
79  * @sect: section type
80  *
81  * This helper function returns the proper section ID given a block type and a
82  * section type.
83  */
ice_sect_id(enum ice_block blk,enum ice_sect sect)84 static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect)
85 {
86 	return ice_sect_lkup[blk][sect];
87 }
88 
89 /**
90  * ice_hw_ptype_ena - check if the PTYPE is enabled or not
91  * @hw: pointer to the HW structure
92  * @ptype: the hardware PTYPE
93  */
ice_hw_ptype_ena(struct ice_hw * hw,u16 ptype)94 bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype)
95 {
96 	return ptype < ICE_FLOW_PTYPE_MAX &&
97 	       test_bit(ptype, hw->hw_ptype);
98 }
99 
100 /* Key creation */
101 
102 #define ICE_DC_KEY	0x1	/* don't care */
103 #define ICE_DC_KEYINV	0x1
104 #define ICE_NM_KEY	0x0	/* never match */
105 #define ICE_NM_KEYINV	0x0
106 #define ICE_0_KEY	0x1	/* match 0 */
107 #define ICE_0_KEYINV	0x0
108 #define ICE_1_KEY	0x0	/* match 1 */
109 #define ICE_1_KEYINV	0x1
110 
111 /**
112  * ice_gen_key_word - generate 16-bits of a key/mask word
113  * @val: the value
114  * @valid: valid bits mask (change only the valid bits)
115  * @dont_care: don't care mask
116  * @nvr_mtch: never match mask
117  * @key: pointer to an array of where the resulting key portion
118  * @key_inv: pointer to an array of where the resulting key invert portion
119  *
120  * This function generates 16-bits from a 8-bit value, an 8-bit don't care mask
121  * and an 8-bit never match mask. The 16-bits of output are divided into 8 bits
122  * of key and 8 bits of key invert.
123  *
124  *     '0' =    b01, always match a 0 bit
125  *     '1' =    b10, always match a 1 bit
126  *     '?' =    b11, don't care bit (always matches)
127  *     '~' =    b00, never match bit
128  *
129  * Input:
130  *          val:         b0  1  0  1  0  1
131  *          dont_care:   b0  0  1  1  0  0
132  *          never_mtch:  b0  0  0  0  1  1
133  *          ------------------------------
134  * Result:  key:        b01 10 11 11 00 00
135  */
136 static int
ice_gen_key_word(u8 val,u8 valid,u8 dont_care,u8 nvr_mtch,u8 * key,u8 * key_inv)137 ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
138 		 u8 *key_inv)
139 {
140 	u8 in_key = *key, in_key_inv = *key_inv;
141 	u8 i;
142 
143 	/* 'dont_care' and 'nvr_mtch' masks cannot overlap */
144 	if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch))
145 		return -EIO;
146 
147 	*key = 0;
148 	*key_inv = 0;
149 
150 	/* encode the 8 bits into 8-bit key and 8-bit key invert */
151 	for (i = 0; i < 8; i++) {
152 		*key >>= 1;
153 		*key_inv >>= 1;
154 
155 		if (!(valid & 0x1)) { /* change only valid bits */
156 			*key |= (in_key & 0x1) << 7;
157 			*key_inv |= (in_key_inv & 0x1) << 7;
158 		} else if (dont_care & 0x1) { /* don't care bit */
159 			*key |= ICE_DC_KEY << 7;
160 			*key_inv |= ICE_DC_KEYINV << 7;
161 		} else if (nvr_mtch & 0x1) { /* never match bit */
162 			*key |= ICE_NM_KEY << 7;
163 			*key_inv |= ICE_NM_KEYINV << 7;
164 		} else if (val & 0x01) { /* exact 1 match */
165 			*key |= ICE_1_KEY << 7;
166 			*key_inv |= ICE_1_KEYINV << 7;
167 		} else { /* exact 0 match */
168 			*key |= ICE_0_KEY << 7;
169 			*key_inv |= ICE_0_KEYINV << 7;
170 		}
171 
172 		dont_care >>= 1;
173 		nvr_mtch >>= 1;
174 		valid >>= 1;
175 		val >>= 1;
176 		in_key >>= 1;
177 		in_key_inv >>= 1;
178 	}
179 
180 	return 0;
181 }
182 
183 /**
184  * ice_bits_max_set - determine if the number of bits set is within a maximum
185  * @mask: pointer to the byte array which is the mask
186  * @size: the number of bytes in the mask
187  * @max: the max number of set bits
188  *
189  * This function determines if there are at most 'max' number of bits set in an
190  * array. Returns true if the number for bits set is <= max or will return false
191  * otherwise.
192  */
ice_bits_max_set(const u8 * mask,u16 size,u16 max)193 static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
194 {
195 	u16 count = 0;
196 	u16 i;
197 
198 	/* check each byte */
199 	for (i = 0; i < size; i++) {
200 		/* if 0, go to next byte */
201 		if (!mask[i])
202 			continue;
203 
204 		/* We know there is at least one set bit in this byte because of
205 		 * the above check; if we already have found 'max' number of
206 		 * bits set, then we can return failure now.
207 		 */
208 		if (count == max)
209 			return false;
210 
211 		/* count the bits in this byte, checking threshold */
212 		count += hweight8(mask[i]);
213 		if (count > max)
214 			return false;
215 	}
216 
217 	return true;
218 }
219 
220 /**
221  * ice_set_key - generate a variable sized key with multiples of 16-bits
222  * @key: pointer to where the key will be stored
223  * @size: the size of the complete key in bytes (must be even)
224  * @val: array of 8-bit values that makes up the value portion of the key
225  * @upd: array of 8-bit masks that determine what key portion to update
226  * @dc: array of 8-bit masks that make up the don't care mask
227  * @nm: array of 8-bit masks that make up the never match mask
228  * @off: the offset of the first byte in the key to update
229  * @len: the number of bytes in the key update
230  *
231  * This function generates a key from a value, a don't care mask and a never
232  * match mask.
233  * upd, dc, and nm are optional parameters, and can be NULL:
234  *	upd == NULL --> upd mask is all 1's (update all bits)
235  *	dc == NULL --> dc mask is all 0's (no don't care bits)
236  *	nm == NULL --> nm mask is all 0's (no never match bits)
237  */
238 static int
ice_set_key(u8 * key,u16 size,u8 * val,u8 * upd,u8 * dc,u8 * nm,u16 off,u16 len)239 ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
240 	    u16 len)
241 {
242 	u16 half_size;
243 	u16 i;
244 
245 	/* size must be a multiple of 2 bytes. */
246 	if (size % 2)
247 		return -EIO;
248 
249 	half_size = size / 2;
250 	if (off + len > half_size)
251 		return -EIO;
252 
253 	/* Make sure at most one bit is set in the never match mask. Having more
254 	 * than one never match mask bit set will cause HW to consume excessive
255 	 * power otherwise; this is a power management efficiency check.
256 	 */
257 #define ICE_NVR_MTCH_BITS_MAX	1
258 	if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX))
259 		return -EIO;
260 
261 	for (i = 0; i < len; i++)
262 		if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff,
263 				     dc ? dc[i] : 0, nm ? nm[i] : 0,
264 				     key + off + i, key + half_size + off + i))
265 			return -EIO;
266 
267 	return 0;
268 }
269 
270 /**
271  * ice_acquire_change_lock
272  * @hw: pointer to the HW structure
273  * @access: access type (read or write)
274  *
275  * This function will request ownership of the change lock.
276  */
277 int
ice_acquire_change_lock(struct ice_hw * hw,enum ice_aq_res_access_type access)278 ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
279 {
280 	return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
281 			       ICE_CHANGE_LOCK_TIMEOUT);
282 }
283 
284 /**
285  * ice_release_change_lock
286  * @hw: pointer to the HW structure
287  *
288  * This function will release the change lock using the proper Admin Command.
289  */
ice_release_change_lock(struct ice_hw * hw)290 void ice_release_change_lock(struct ice_hw *hw)
291 {
292 	ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
293 }
294 
295 /**
296  * ice_get_open_tunnel_port - retrieve an open tunnel port
297  * @hw: pointer to the HW structure
298  * @port: returns open port
299  * @type: type of tunnel, can be TNL_LAST if it doesn't matter
300  */
301 bool
ice_get_open_tunnel_port(struct ice_hw * hw,u16 * port,enum ice_tunnel_type type)302 ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port,
303 			 enum ice_tunnel_type type)
304 {
305 	bool res = false;
306 	u16 i;
307 
308 	mutex_lock(&hw->tnl_lock);
309 
310 	for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
311 		if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].port &&
312 		    (type == TNL_LAST || type == hw->tnl.tbl[i].type)) {
313 			*port = hw->tnl.tbl[i].port;
314 			res = true;
315 			break;
316 		}
317 
318 	mutex_unlock(&hw->tnl_lock);
319 
320 	return res;
321 }
322 
323 /**
324  * ice_upd_dvm_boost_entry
325  * @hw: pointer to the HW structure
326  * @entry: pointer to double vlan boost entry info
327  */
328 static int
ice_upd_dvm_boost_entry(struct ice_hw * hw,struct ice_dvm_entry * entry)329 ice_upd_dvm_boost_entry(struct ice_hw *hw, struct ice_dvm_entry *entry)
330 {
331 	struct ice_boost_tcam_section *sect_rx, *sect_tx;
332 	int status = -ENOSPC;
333 	struct ice_buf_build *bld;
334 	u8 val, dc, nm;
335 
336 	bld = ice_pkg_buf_alloc(hw);
337 	if (!bld)
338 		return -ENOMEM;
339 
340 	/* allocate 2 sections, one for Rx parser, one for Tx parser */
341 	if (ice_pkg_buf_reserve_section(bld, 2))
342 		goto ice_upd_dvm_boost_entry_err;
343 
344 	sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
345 					    struct_size(sect_rx, tcam, 1));
346 	if (!sect_rx)
347 		goto ice_upd_dvm_boost_entry_err;
348 	sect_rx->count = cpu_to_le16(1);
349 
350 	sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
351 					    struct_size(sect_tx, tcam, 1));
352 	if (!sect_tx)
353 		goto ice_upd_dvm_boost_entry_err;
354 	sect_tx->count = cpu_to_le16(1);
355 
356 	/* copy original boost entry to update package buffer */
357 	memcpy(sect_rx->tcam, entry->boost_entry, sizeof(*sect_rx->tcam));
358 
359 	/* re-write the don't care and never match bits accordingly */
360 	if (entry->enable) {
361 		/* all bits are don't care */
362 		val = 0x00;
363 		dc = 0xFF;
364 		nm = 0x00;
365 	} else {
366 		/* disable, one never match bit, the rest are don't care */
367 		val = 0x00;
368 		dc = 0xF7;
369 		nm = 0x08;
370 	}
371 
372 	ice_set_key((u8 *)&sect_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),
373 		    &val, NULL, &dc, &nm, 0, sizeof(u8));
374 
375 	/* exact copy of entry to Tx section entry */
376 	memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam));
377 
378 	status = ice_update_pkg_no_lock(hw, ice_pkg_buf(bld), 1);
379 
380 ice_upd_dvm_boost_entry_err:
381 	ice_pkg_buf_free(hw, bld);
382 
383 	return status;
384 }
385 
386 /**
387  * ice_set_dvm_boost_entries
388  * @hw: pointer to the HW structure
389  *
390  * Enable double vlan by updating the appropriate boost tcam entries.
391  */
ice_set_dvm_boost_entries(struct ice_hw * hw)392 int ice_set_dvm_boost_entries(struct ice_hw *hw)
393 {
394 	u16 i;
395 
396 	for (i = 0; i < hw->dvm_upd.count; i++) {
397 		int status;
398 
399 		status = ice_upd_dvm_boost_entry(hw, &hw->dvm_upd.tbl[i]);
400 		if (status)
401 			return status;
402 	}
403 
404 	return 0;
405 }
406 
407 /**
408  * ice_tunnel_idx_to_entry - convert linear index to the sparse one
409  * @hw: pointer to the HW structure
410  * @type: type of tunnel
411  * @idx: linear index
412  *
413  * Stack assumes we have 2 linear tables with indexes [0, count_valid),
414  * but really the port table may be sprase, and types are mixed, so convert
415  * the stack index into the device index.
416  */
ice_tunnel_idx_to_entry(struct ice_hw * hw,enum ice_tunnel_type type,u16 idx)417 static u16 ice_tunnel_idx_to_entry(struct ice_hw *hw, enum ice_tunnel_type type,
418 				   u16 idx)
419 {
420 	u16 i;
421 
422 	for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
423 		if (hw->tnl.tbl[i].valid &&
424 		    hw->tnl.tbl[i].type == type &&
425 		    idx-- == 0)
426 			return i;
427 
428 	WARN_ON_ONCE(1);
429 	return 0;
430 }
431 
432 /**
433  * ice_create_tunnel
434  * @hw: pointer to the HW structure
435  * @index: device table entry
436  * @type: type of tunnel
437  * @port: port of tunnel to create
438  *
439  * Create a tunnel by updating the parse graph in the parser. We do that by
440  * creating a package buffer with the tunnel info and issuing an update package
441  * command.
442  */
443 static int
ice_create_tunnel(struct ice_hw * hw,u16 index,enum ice_tunnel_type type,u16 port)444 ice_create_tunnel(struct ice_hw *hw, u16 index,
445 		  enum ice_tunnel_type type, u16 port)
446 {
447 	struct ice_boost_tcam_section *sect_rx, *sect_tx;
448 	struct ice_buf_build *bld;
449 	int status = -ENOSPC;
450 
451 	mutex_lock(&hw->tnl_lock);
452 
453 	bld = ice_pkg_buf_alloc(hw);
454 	if (!bld) {
455 		status = -ENOMEM;
456 		goto ice_create_tunnel_end;
457 	}
458 
459 	/* allocate 2 sections, one for Rx parser, one for Tx parser */
460 	if (ice_pkg_buf_reserve_section(bld, 2))
461 		goto ice_create_tunnel_err;
462 
463 	sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
464 					    struct_size(sect_rx, tcam, 1));
465 	if (!sect_rx)
466 		goto ice_create_tunnel_err;
467 	sect_rx->count = cpu_to_le16(1);
468 
469 	sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
470 					    struct_size(sect_tx, tcam, 1));
471 	if (!sect_tx)
472 		goto ice_create_tunnel_err;
473 	sect_tx->count = cpu_to_le16(1);
474 
475 	/* copy original boost entry to update package buffer */
476 	memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
477 	       sizeof(*sect_rx->tcam));
478 
479 	/* over-write the never-match dest port key bits with the encoded port
480 	 * bits
481 	 */
482 	ice_set_key((u8 *)&sect_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),
483 		    (u8 *)&port, NULL, NULL, NULL,
484 		    (u16)offsetof(struct ice_boost_key_value, hv_dst_port_key),
485 		    sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key));
486 
487 	/* exact copy of entry to Tx section entry */
488 	memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam));
489 
490 	status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
491 	if (!status)
492 		hw->tnl.tbl[index].port = port;
493 
494 ice_create_tunnel_err:
495 	ice_pkg_buf_free(hw, bld);
496 
497 ice_create_tunnel_end:
498 	mutex_unlock(&hw->tnl_lock);
499 
500 	return status;
501 }
502 
503 /**
504  * ice_destroy_tunnel
505  * @hw: pointer to the HW structure
506  * @index: device table entry
507  * @type: type of tunnel
508  * @port: port of tunnel to destroy (ignored if the all parameter is true)
509  *
510  * Destroys a tunnel or all tunnels by creating an update package buffer
511  * targeting the specific updates requested and then performing an update
512  * package.
513  */
514 static int
ice_destroy_tunnel(struct ice_hw * hw,u16 index,enum ice_tunnel_type type,u16 port)515 ice_destroy_tunnel(struct ice_hw *hw, u16 index, enum ice_tunnel_type type,
516 		   u16 port)
517 {
518 	struct ice_boost_tcam_section *sect_rx, *sect_tx;
519 	struct ice_buf_build *bld;
520 	int status = -ENOSPC;
521 
522 	mutex_lock(&hw->tnl_lock);
523 
524 	if (WARN_ON(!hw->tnl.tbl[index].valid ||
525 		    hw->tnl.tbl[index].type != type ||
526 		    hw->tnl.tbl[index].port != port)) {
527 		status = -EIO;
528 		goto ice_destroy_tunnel_end;
529 	}
530 
531 	bld = ice_pkg_buf_alloc(hw);
532 	if (!bld) {
533 		status = -ENOMEM;
534 		goto ice_destroy_tunnel_end;
535 	}
536 
537 	/* allocate 2 sections, one for Rx parser, one for Tx parser */
538 	if (ice_pkg_buf_reserve_section(bld, 2))
539 		goto ice_destroy_tunnel_err;
540 
541 	sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
542 					    struct_size(sect_rx, tcam, 1));
543 	if (!sect_rx)
544 		goto ice_destroy_tunnel_err;
545 	sect_rx->count = cpu_to_le16(1);
546 
547 	sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
548 					    struct_size(sect_tx, tcam, 1));
549 	if (!sect_tx)
550 		goto ice_destroy_tunnel_err;
551 	sect_tx->count = cpu_to_le16(1);
552 
553 	/* copy original boost entry to update package buffer, one copy to Rx
554 	 * section, another copy to the Tx section
555 	 */
556 	memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
557 	       sizeof(*sect_rx->tcam));
558 	memcpy(sect_tx->tcam, hw->tnl.tbl[index].boost_entry,
559 	       sizeof(*sect_tx->tcam));
560 
561 	status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
562 	if (!status)
563 		hw->tnl.tbl[index].port = 0;
564 
565 ice_destroy_tunnel_err:
566 	ice_pkg_buf_free(hw, bld);
567 
568 ice_destroy_tunnel_end:
569 	mutex_unlock(&hw->tnl_lock);
570 
571 	return status;
572 }
573 
ice_udp_tunnel_set_port(struct net_device * netdev,unsigned int table,unsigned int idx,struct udp_tunnel_info * ti)574 int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
575 			    unsigned int idx, struct udp_tunnel_info *ti)
576 {
577 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
578 	enum ice_tunnel_type tnl_type;
579 	int status;
580 	u16 index;
581 
582 	tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
583 	index = ice_tunnel_idx_to_entry(&pf->hw, tnl_type, idx);
584 
585 	status = ice_create_tunnel(&pf->hw, index, tnl_type, ntohs(ti->port));
586 	if (status) {
587 		netdev_err(netdev, "Error adding UDP tunnel - %d\n",
588 			   status);
589 		return -EIO;
590 	}
591 
592 	udp_tunnel_nic_set_port_priv(netdev, table, idx, index);
593 	return 0;
594 }
595 
ice_udp_tunnel_unset_port(struct net_device * netdev,unsigned int table,unsigned int idx,struct udp_tunnel_info * ti)596 int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
597 			      unsigned int idx, struct udp_tunnel_info *ti)
598 {
599 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
600 	enum ice_tunnel_type tnl_type;
601 	int status;
602 
603 	tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
604 
605 	status = ice_destroy_tunnel(&pf->hw, ti->hw_priv, tnl_type,
606 				    ntohs(ti->port));
607 	if (status) {
608 		netdev_err(netdev, "Error removing UDP tunnel - %d\n",
609 			   status);
610 		return -EIO;
611 	}
612 
613 	return 0;
614 }
615 
616 /**
617  * ice_find_prot_off - find prot ID and offset pair, based on prof and FV index
618  * @hw: pointer to the hardware structure
619  * @blk: hardware block
620  * @prof: profile ID
621  * @fv_idx: field vector word index
622  * @prot: variable to receive the protocol ID
623  * @off: variable to receive the protocol offset
624  */
625 int
ice_find_prot_off(struct ice_hw * hw,enum ice_block blk,u8 prof,u16 fv_idx,u8 * prot,u16 * off)626 ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
627 		  u8 *prot, u16 *off)
628 {
629 	struct ice_fv_word *fv_ext;
630 
631 	if (prof >= hw->blk[blk].es.count)
632 		return -EINVAL;
633 
634 	if (fv_idx >= hw->blk[blk].es.fvw)
635 		return -EINVAL;
636 
637 	fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw);
638 
639 	*prot = fv_ext[fv_idx].prot_id;
640 	*off = fv_ext[fv_idx].off;
641 
642 	return 0;
643 }
644 
645 /* PTG Management */
646 
647 /**
648  * ice_ptg_find_ptype - Search for packet type group using packet type (ptype)
649  * @hw: pointer to the hardware structure
650  * @blk: HW block
651  * @ptype: the ptype to search for
652  * @ptg: pointer to variable that receives the PTG
653  *
654  * This function will search the PTGs for a particular ptype, returning the
655  * PTG ID that contains it through the PTG parameter, with the value of
656  * ICE_DEFAULT_PTG (0) meaning it is part the default PTG.
657  */
658 static int
ice_ptg_find_ptype(struct ice_hw * hw,enum ice_block blk,u16 ptype,u8 * ptg)659 ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
660 {
661 	if (ptype >= ICE_XLT1_CNT || !ptg)
662 		return -EINVAL;
663 
664 	*ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg;
665 	return 0;
666 }
667 
668 /**
669  * ice_ptg_alloc_val - Allocates a new packet type group ID by value
670  * @hw: pointer to the hardware structure
671  * @blk: HW block
672  * @ptg: the PTG to allocate
673  *
674  * This function allocates a given packet type group ID specified by the PTG
675  * parameter.
676  */
ice_ptg_alloc_val(struct ice_hw * hw,enum ice_block blk,u8 ptg)677 static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg)
678 {
679 	hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true;
680 }
681 
682 /**
683  * ice_ptg_remove_ptype - Removes ptype from a particular packet type group
684  * @hw: pointer to the hardware structure
685  * @blk: HW block
686  * @ptype: the ptype to remove
687  * @ptg: the PTG to remove the ptype from
688  *
689  * This function will remove the ptype from the specific PTG, and move it to
690  * the default PTG (ICE_DEFAULT_PTG).
691  */
692 static int
ice_ptg_remove_ptype(struct ice_hw * hw,enum ice_block blk,u16 ptype,u8 ptg)693 ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
694 {
695 	struct ice_ptg_ptype **ch;
696 	struct ice_ptg_ptype *p;
697 
698 	if (ptype > ICE_XLT1_CNT - 1)
699 		return -EINVAL;
700 
701 	if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use)
702 		return -ENOENT;
703 
704 	/* Should not happen if .in_use is set, bad config */
705 	if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype)
706 		return -EIO;
707 
708 	/* find the ptype within this PTG, and bypass the link over it */
709 	p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
710 	ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
711 	while (p) {
712 		if (ptype == (p - hw->blk[blk].xlt1.ptypes)) {
713 			*ch = p->next_ptype;
714 			break;
715 		}
716 
717 		ch = &p->next_ptype;
718 		p = p->next_ptype;
719 	}
720 
721 	hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG;
722 	hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL;
723 
724 	return 0;
725 }
726 
727 /**
728  * ice_ptg_add_mv_ptype - Adds/moves ptype to a particular packet type group
729  * @hw: pointer to the hardware structure
730  * @blk: HW block
731  * @ptype: the ptype to add or move
732  * @ptg: the PTG to add or move the ptype to
733  *
734  * This function will either add or move a ptype to a particular PTG depending
735  * on if the ptype is already part of another group. Note that using a
736  * destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
737  * default PTG.
738  */
739 static int
ice_ptg_add_mv_ptype(struct ice_hw * hw,enum ice_block blk,u16 ptype,u8 ptg)740 ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
741 {
742 	u8 original_ptg;
743 	int status;
744 
745 	if (ptype > ICE_XLT1_CNT - 1)
746 		return -EINVAL;
747 
748 	if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG)
749 		return -ENOENT;
750 
751 	status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg);
752 	if (status)
753 		return status;
754 
755 	/* Is ptype already in the correct PTG? */
756 	if (original_ptg == ptg)
757 		return 0;
758 
759 	/* Remove from original PTG and move back to the default PTG */
760 	if (original_ptg != ICE_DEFAULT_PTG)
761 		ice_ptg_remove_ptype(hw, blk, ptype, original_ptg);
762 
763 	/* Moving to default PTG? Then we're done with this request */
764 	if (ptg == ICE_DEFAULT_PTG)
765 		return 0;
766 
767 	/* Add ptype to PTG at beginning of list */
768 	hw->blk[blk].xlt1.ptypes[ptype].next_ptype =
769 		hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
770 	hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype =
771 		&hw->blk[blk].xlt1.ptypes[ptype];
772 
773 	hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg;
774 	hw->blk[blk].xlt1.t[ptype] = ptg;
775 
776 	return 0;
777 }
778 
779 /* Block / table size info */
780 struct ice_blk_size_details {
781 	u16 xlt1;			/* # XLT1 entries */
782 	u16 xlt2;			/* # XLT2 entries */
783 	u16 prof_tcam;			/* # profile ID TCAM entries */
784 	u16 prof_id;			/* # profile IDs */
785 	u8 prof_cdid_bits;		/* # CDID one-hot bits used in key */
786 	u16 prof_redir;			/* # profile redirection entries */
787 	u16 es;				/* # extraction sequence entries */
788 	u16 fvw;			/* # field vector words */
789 	u8 overwrite;			/* overwrite existing entries allowed */
790 	u8 reverse;			/* reverse FV order */
791 };
792 
793 static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = {
794 	/**
795 	 * Table Definitions
796 	 * XLT1 - Number of entries in XLT1 table
797 	 * XLT2 - Number of entries in XLT2 table
798 	 * TCAM - Number of entries Profile ID TCAM table
799 	 * CDID - Control Domain ID of the hardware block
800 	 * PRED - Number of entries in the Profile Redirection Table
801 	 * FV   - Number of entries in the Field Vector
802 	 * FVW  - Width (in WORDs) of the Field Vector
803 	 * OVR  - Overwrite existing table entries
804 	 * REV  - Reverse FV
805 	 */
806 	/*          XLT1        , XLT2        ,TCAM, PID,CDID,PRED,   FV, FVW */
807 	/*          Overwrite   , Reverse FV */
808 	/* SW  */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256,   0,  256, 256,  48,
809 		    false, false },
810 	/* ACL */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128,   0,  128, 128,  32,
811 		    false, false },
812 	/* FD  */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128,   0,  128, 128,  24,
813 		    false, true  },
814 	/* RSS */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128,   0,  128, 128,  24,
815 		    true,  true  },
816 	/* PE  */ { ICE_XLT1_CNT, ICE_XLT2_CNT,  64,  32,   0,   32,  32,  24,
817 		    false, false },
818 };
819 
820 enum ice_sid_all {
821 	ICE_SID_XLT1_OFF = 0,
822 	ICE_SID_XLT2_OFF,
823 	ICE_SID_PR_OFF,
824 	ICE_SID_PR_REDIR_OFF,
825 	ICE_SID_ES_OFF,
826 	ICE_SID_OFF_COUNT,
827 };
828 
829 /* Characteristic handling */
830 
831 /**
832  * ice_match_prop_lst - determine if properties of two lists match
833  * @list1: first properties list
834  * @list2: second properties list
835  *
836  * Count, cookies and the order must match in order to be considered equivalent.
837  */
838 static bool
ice_match_prop_lst(struct list_head * list1,struct list_head * list2)839 ice_match_prop_lst(struct list_head *list1, struct list_head *list2)
840 {
841 	struct ice_vsig_prof *tmp1;
842 	struct ice_vsig_prof *tmp2;
843 	u16 chk_count = 0;
844 	u16 count = 0;
845 
846 	/* compare counts */
847 	list_for_each_entry(tmp1, list1, list)
848 		count++;
849 	list_for_each_entry(tmp2, list2, list)
850 		chk_count++;
851 	if (!count || count != chk_count)
852 		return false;
853 
854 	tmp1 = list_first_entry(list1, struct ice_vsig_prof, list);
855 	tmp2 = list_first_entry(list2, struct ice_vsig_prof, list);
856 
857 	/* profile cookies must compare, and in the exact same order to take
858 	 * into account priority
859 	 */
860 	while (count--) {
861 		if (tmp2->profile_cookie != tmp1->profile_cookie)
862 			return false;
863 
864 		tmp1 = list_next_entry(tmp1, list);
865 		tmp2 = list_next_entry(tmp2, list);
866 	}
867 
868 	return true;
869 }
870 
871 /* VSIG Management */
872 
873 /**
874  * ice_vsig_find_vsi - find a VSIG that contains a specified VSI
875  * @hw: pointer to the hardware structure
876  * @blk: HW block
877  * @vsi: VSI of interest
878  * @vsig: pointer to receive the VSI group
879  *
880  * This function will lookup the VSI entry in the XLT2 list and return
881  * the VSI group its associated with.
882  */
883 static int
ice_vsig_find_vsi(struct ice_hw * hw,enum ice_block blk,u16 vsi,u16 * vsig)884 ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
885 {
886 	if (!vsig || vsi >= ICE_MAX_VSI)
887 		return -EINVAL;
888 
889 	/* As long as there's a default or valid VSIG associated with the input
890 	 * VSI, the functions returns a success. Any handling of VSIG will be
891 	 * done by the following add, update or remove functions.
892 	 */
893 	*vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
894 
895 	return 0;
896 }
897 
898 /**
899  * ice_vsig_alloc_val - allocate a new VSIG by value
900  * @hw: pointer to the hardware structure
901  * @blk: HW block
902  * @vsig: the VSIG to allocate
903  *
904  * This function will allocate a given VSIG specified by the VSIG parameter.
905  */
ice_vsig_alloc_val(struct ice_hw * hw,enum ice_block blk,u16 vsig)906 static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig)
907 {
908 	u16 idx = vsig & ICE_VSIG_IDX_M;
909 
910 	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) {
911 		INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
912 		hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true;
913 	}
914 
915 	return ICE_VSIG_VALUE(idx, hw->pf_id);
916 }
917 
918 /**
919  * ice_vsig_alloc - Finds a free entry and allocates a new VSIG
920  * @hw: pointer to the hardware structure
921  * @blk: HW block
922  *
923  * This function will iterate through the VSIG list and mark the first
924  * unused entry for the new VSIG entry as used and return that value.
925  */
ice_vsig_alloc(struct ice_hw * hw,enum ice_block blk)926 static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk)
927 {
928 	u16 i;
929 
930 	for (i = 1; i < ICE_MAX_VSIGS; i++)
931 		if (!hw->blk[blk].xlt2.vsig_tbl[i].in_use)
932 			return ice_vsig_alloc_val(hw, blk, i);
933 
934 	return ICE_DEFAULT_VSIG;
935 }
936 
937 /**
938  * ice_find_dup_props_vsig - find VSI group with a specified set of properties
939  * @hw: pointer to the hardware structure
940  * @blk: HW block
941  * @chs: characteristic list
942  * @vsig: returns the VSIG with the matching profiles, if found
943  *
944  * Each VSIG is associated with a characteristic set; i.e. all VSIs under
945  * a group have the same characteristic set. To check if there exists a VSIG
946  * which has the same characteristics as the input characteristics; this
947  * function will iterate through the XLT2 list and return the VSIG that has a
948  * matching configuration. In order to make sure that priorities are accounted
949  * for, the list must match exactly, including the order in which the
950  * characteristics are listed.
951  */
952 static int
ice_find_dup_props_vsig(struct ice_hw * hw,enum ice_block blk,struct list_head * chs,u16 * vsig)953 ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
954 			struct list_head *chs, u16 *vsig)
955 {
956 	struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2;
957 	u16 i;
958 
959 	for (i = 0; i < xlt2->count; i++)
960 		if (xlt2->vsig_tbl[i].in_use &&
961 		    ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) {
962 			*vsig = ICE_VSIG_VALUE(i, hw->pf_id);
963 			return 0;
964 		}
965 
966 	return -ENOENT;
967 }
968 
969 /**
970  * ice_vsig_free - free VSI group
971  * @hw: pointer to the hardware structure
972  * @blk: HW block
973  * @vsig: VSIG to remove
974  *
975  * The function will remove all VSIs associated with the input VSIG and move
976  * them to the DEFAULT_VSIG and mark the VSIG available.
977  */
ice_vsig_free(struct ice_hw * hw,enum ice_block blk,u16 vsig)978 static int ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
979 {
980 	struct ice_vsig_prof *dtmp, *del;
981 	struct ice_vsig_vsi *vsi_cur;
982 	u16 idx;
983 
984 	idx = vsig & ICE_VSIG_IDX_M;
985 	if (idx >= ICE_MAX_VSIGS)
986 		return -EINVAL;
987 
988 	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
989 		return -ENOENT;
990 
991 	hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false;
992 
993 	vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
994 	/* If the VSIG has at least 1 VSI then iterate through the
995 	 * list and remove the VSIs before deleting the group.
996 	 */
997 	if (vsi_cur) {
998 		/* remove all vsis associated with this VSIG XLT2 entry */
999 		do {
1000 			struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
1001 
1002 			vsi_cur->vsig = ICE_DEFAULT_VSIG;
1003 			vsi_cur->changed = 1;
1004 			vsi_cur->next_vsi = NULL;
1005 			vsi_cur = tmp;
1006 		} while (vsi_cur);
1007 
1008 		/* NULL terminate head of VSI list */
1009 		hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL;
1010 	}
1011 
1012 	/* free characteristic list */
1013 	list_for_each_entry_safe(del, dtmp,
1014 				 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
1015 				 list) {
1016 		list_del(&del->list);
1017 		devm_kfree(ice_hw_to_dev(hw), del);
1018 	}
1019 
1020 	/* if VSIG characteristic list was cleared for reset
1021 	 * re-initialize the list head
1022 	 */
1023 	INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
1024 
1025 	return 0;
1026 }
1027 
1028 /**
1029  * ice_vsig_remove_vsi - remove VSI from VSIG
1030  * @hw: pointer to the hardware structure
1031  * @blk: HW block
1032  * @vsi: VSI to remove
1033  * @vsig: VSI group to remove from
1034  *
1035  * The function will remove the input VSI from its VSI group and move it
1036  * to the DEFAULT_VSIG.
1037  */
1038 static int
ice_vsig_remove_vsi(struct ice_hw * hw,enum ice_block blk,u16 vsi,u16 vsig)1039 ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
1040 {
1041 	struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt;
1042 	u16 idx;
1043 
1044 	idx = vsig & ICE_VSIG_IDX_M;
1045 
1046 	if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
1047 		return -EINVAL;
1048 
1049 	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
1050 		return -ENOENT;
1051 
1052 	/* entry already in default VSIG, don't have to remove */
1053 	if (idx == ICE_DEFAULT_VSIG)
1054 		return 0;
1055 
1056 	vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
1057 	if (!(*vsi_head))
1058 		return -EIO;
1059 
1060 	vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi];
1061 	vsi_cur = (*vsi_head);
1062 
1063 	/* iterate the VSI list, skip over the entry to be removed */
1064 	while (vsi_cur) {
1065 		if (vsi_tgt == vsi_cur) {
1066 			(*vsi_head) = vsi_cur->next_vsi;
1067 			break;
1068 		}
1069 		vsi_head = &vsi_cur->next_vsi;
1070 		vsi_cur = vsi_cur->next_vsi;
1071 	}
1072 
1073 	/* verify if VSI was removed from group list */
1074 	if (!vsi_cur)
1075 		return -ENOENT;
1076 
1077 	vsi_cur->vsig = ICE_DEFAULT_VSIG;
1078 	vsi_cur->changed = 1;
1079 	vsi_cur->next_vsi = NULL;
1080 
1081 	return 0;
1082 }
1083 
1084 /**
1085  * ice_vsig_add_mv_vsi - add or move a VSI to a VSI group
1086  * @hw: pointer to the hardware structure
1087  * @blk: HW block
1088  * @vsi: VSI to move
1089  * @vsig: destination VSI group
1090  *
1091  * This function will move or add the input VSI to the target VSIG.
1092  * The function will find the original VSIG the VSI belongs to and
1093  * move the entry to the DEFAULT_VSIG, update the original VSIG and
1094  * then move entry to the new VSIG.
1095  */
1096 static int
ice_vsig_add_mv_vsi(struct ice_hw * hw,enum ice_block blk,u16 vsi,u16 vsig)1097 ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
1098 {
1099 	struct ice_vsig_vsi *tmp;
1100 	u16 orig_vsig, idx;
1101 	int status;
1102 
1103 	idx = vsig & ICE_VSIG_IDX_M;
1104 
1105 	if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
1106 		return -EINVAL;
1107 
1108 	/* if VSIG not in use and VSIG is not default type this VSIG
1109 	 * doesn't exist.
1110 	 */
1111 	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use &&
1112 	    vsig != ICE_DEFAULT_VSIG)
1113 		return -ENOENT;
1114 
1115 	status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
1116 	if (status)
1117 		return status;
1118 
1119 	/* no update required if vsigs match */
1120 	if (orig_vsig == vsig)
1121 		return 0;
1122 
1123 	if (orig_vsig != ICE_DEFAULT_VSIG) {
1124 		/* remove entry from orig_vsig and add to default VSIG */
1125 		status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig);
1126 		if (status)
1127 			return status;
1128 	}
1129 
1130 	if (idx == ICE_DEFAULT_VSIG)
1131 		return 0;
1132 
1133 	/* Create VSI entry and add VSIG and prop_mask values */
1134 	hw->blk[blk].xlt2.vsis[vsi].vsig = vsig;
1135 	hw->blk[blk].xlt2.vsis[vsi].changed = 1;
1136 
1137 	/* Add new entry to the head of the VSIG list */
1138 	tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
1139 	hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi =
1140 		&hw->blk[blk].xlt2.vsis[vsi];
1141 	hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp;
1142 	hw->blk[blk].xlt2.t[vsi] = vsig;
1143 
1144 	return 0;
1145 }
1146 
1147 /**
1148  * ice_prof_has_mask_idx - determine if profile index masking is identical
1149  * @hw: pointer to the hardware structure
1150  * @blk: HW block
1151  * @prof: profile to check
1152  * @idx: profile index to check
1153  * @mask: mask to match
1154  */
1155 static bool
ice_prof_has_mask_idx(struct ice_hw * hw,enum ice_block blk,u8 prof,u16 idx,u16 mask)1156 ice_prof_has_mask_idx(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 idx,
1157 		      u16 mask)
1158 {
1159 	bool expect_no_mask = false;
1160 	bool found = false;
1161 	bool match = false;
1162 	u16 i;
1163 
1164 	/* If mask is 0x0000 or 0xffff, then there is no masking */
1165 	if (mask == 0 || mask == 0xffff)
1166 		expect_no_mask = true;
1167 
1168 	/* Scan the enabled masks on this profile, for the specified idx */
1169 	for (i = hw->blk[blk].masks.first; i < hw->blk[blk].masks.first +
1170 	     hw->blk[blk].masks.count; i++)
1171 		if (hw->blk[blk].es.mask_ena[prof] & BIT(i))
1172 			if (hw->blk[blk].masks.masks[i].in_use &&
1173 			    hw->blk[blk].masks.masks[i].idx == idx) {
1174 				found = true;
1175 				if (hw->blk[blk].masks.masks[i].mask == mask)
1176 					match = true;
1177 				break;
1178 			}
1179 
1180 	if (expect_no_mask) {
1181 		if (found)
1182 			return false;
1183 	} else {
1184 		if (!match)
1185 			return false;
1186 	}
1187 
1188 	return true;
1189 }
1190 
1191 /**
1192  * ice_prof_has_mask - determine if profile masking is identical
1193  * @hw: pointer to the hardware structure
1194  * @blk: HW block
1195  * @prof: profile to check
1196  * @masks: masks to match
1197  */
1198 static bool
ice_prof_has_mask(struct ice_hw * hw,enum ice_block blk,u8 prof,u16 * masks)1199 ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks)
1200 {
1201 	u16 i;
1202 
1203 	/* es->mask_ena[prof] will have the mask */
1204 	for (i = 0; i < hw->blk[blk].es.fvw; i++)
1205 		if (!ice_prof_has_mask_idx(hw, blk, prof, i, masks[i]))
1206 			return false;
1207 
1208 	return true;
1209 }
1210 
1211 /**
1212  * ice_find_prof_id_with_mask - find profile ID for a given field vector
1213  * @hw: pointer to the hardware structure
1214  * @blk: HW block
1215  * @fv: field vector to search for
1216  * @masks: masks for FV
1217  * @symm: symmetric setting for RSS flows
1218  * @prof_id: receives the profile ID
1219  */
1220 static int
ice_find_prof_id_with_mask(struct ice_hw * hw,enum ice_block blk,struct ice_fv_word * fv,u16 * masks,bool symm,u8 * prof_id)1221 ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
1222 			   struct ice_fv_word *fv, u16 *masks, bool symm,
1223 			   u8 *prof_id)
1224 {
1225 	struct ice_es *es = &hw->blk[blk].es;
1226 	u8 i;
1227 
1228 	/* For FD, we don't want to re-use a existed profile with the same
1229 	 * field vector and mask. This will cause rule interference.
1230 	 */
1231 	if (blk == ICE_BLK_FD)
1232 		return -ENOENT;
1233 
1234 	for (i = 0; i < (u8)es->count; i++) {
1235 		u16 off = i * es->fvw;
1236 
1237 		if (blk == ICE_BLK_RSS && es->symm[i] != symm)
1238 			continue;
1239 
1240 		if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
1241 			continue;
1242 
1243 		/* check if masks settings are the same for this profile */
1244 		if (masks && !ice_prof_has_mask(hw, blk, i, masks))
1245 			continue;
1246 
1247 		*prof_id = i;
1248 		return 0;
1249 	}
1250 
1251 	return -ENOENT;
1252 }
1253 
1254 /**
1255  * ice_prof_id_rsrc_type - get profile ID resource type for a block type
1256  * @blk: the block type
1257  * @rsrc_type: pointer to variable to receive the resource type
1258  */
ice_prof_id_rsrc_type(enum ice_block blk,u16 * rsrc_type)1259 static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type)
1260 {
1261 	switch (blk) {
1262 	case ICE_BLK_FD:
1263 		*rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID;
1264 		break;
1265 	case ICE_BLK_RSS:
1266 		*rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID;
1267 		break;
1268 	default:
1269 		return false;
1270 	}
1271 	return true;
1272 }
1273 
1274 /**
1275  * ice_tcam_ent_rsrc_type - get TCAM entry resource type for a block type
1276  * @blk: the block type
1277  * @rsrc_type: pointer to variable to receive the resource type
1278  */
ice_tcam_ent_rsrc_type(enum ice_block blk,u16 * rsrc_type)1279 static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
1280 {
1281 	switch (blk) {
1282 	case ICE_BLK_FD:
1283 		*rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM;
1284 		break;
1285 	case ICE_BLK_RSS:
1286 		*rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM;
1287 		break;
1288 	default:
1289 		return false;
1290 	}
1291 	return true;
1292 }
1293 
1294 /**
1295  * ice_alloc_tcam_ent - allocate hardware TCAM entry
1296  * @hw: pointer to the HW struct
1297  * @blk: the block to allocate the TCAM for
1298  * @btm: true to allocate from bottom of table, false to allocate from top
1299  * @tcam_idx: pointer to variable to receive the TCAM entry
1300  *
1301  * This function allocates a new entry in a Profile ID TCAM for a specific
1302  * block.
1303  */
1304 static int
ice_alloc_tcam_ent(struct ice_hw * hw,enum ice_block blk,bool btm,u16 * tcam_idx)1305 ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm,
1306 		   u16 *tcam_idx)
1307 {
1308 	u16 res_type;
1309 
1310 	if (!ice_tcam_ent_rsrc_type(blk, &res_type))
1311 		return -EINVAL;
1312 
1313 	return ice_alloc_hw_res(hw, res_type, 1, btm, tcam_idx);
1314 }
1315 
1316 /**
1317  * ice_free_tcam_ent - free hardware TCAM entry
1318  * @hw: pointer to the HW struct
1319  * @blk: the block from which to free the TCAM entry
1320  * @tcam_idx: the TCAM entry to free
1321  *
1322  * This function frees an entry in a Profile ID TCAM for a specific block.
1323  */
1324 static int
ice_free_tcam_ent(struct ice_hw * hw,enum ice_block blk,u16 tcam_idx)1325 ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
1326 {
1327 	u16 res_type;
1328 
1329 	if (!ice_tcam_ent_rsrc_type(blk, &res_type))
1330 		return -EINVAL;
1331 
1332 	return ice_free_hw_res(hw, res_type, 1, &tcam_idx);
1333 }
1334 
1335 /**
1336  * ice_alloc_prof_id - allocate profile ID
1337  * @hw: pointer to the HW struct
1338  * @blk: the block to allocate the profile ID for
1339  * @prof_id: pointer to variable to receive the profile ID
1340  *
1341  * This function allocates a new profile ID, which also corresponds to a Field
1342  * Vector (Extraction Sequence) entry.
1343  */
ice_alloc_prof_id(struct ice_hw * hw,enum ice_block blk,u8 * prof_id)1344 static int ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
1345 {
1346 	u16 res_type;
1347 	u16 get_prof;
1348 	int status;
1349 
1350 	if (!ice_prof_id_rsrc_type(blk, &res_type))
1351 		return -EINVAL;
1352 
1353 	status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof);
1354 	if (!status)
1355 		*prof_id = (u8)get_prof;
1356 
1357 	return status;
1358 }
1359 
1360 /**
1361  * ice_free_prof_id - free profile ID
1362  * @hw: pointer to the HW struct
1363  * @blk: the block from which to free the profile ID
1364  * @prof_id: the profile ID to free
1365  *
1366  * This function frees a profile ID, which also corresponds to a Field Vector.
1367  */
ice_free_prof_id(struct ice_hw * hw,enum ice_block blk,u8 prof_id)1368 static int ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
1369 {
1370 	u16 tmp_prof_id = (u16)prof_id;
1371 	u16 res_type;
1372 
1373 	if (!ice_prof_id_rsrc_type(blk, &res_type))
1374 		return -EINVAL;
1375 
1376 	return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id);
1377 }
1378 
1379 /**
1380  * ice_prof_inc_ref - increment reference count for profile
1381  * @hw: pointer to the HW struct
1382  * @blk: the block from which to free the profile ID
1383  * @prof_id: the profile ID for which to increment the reference count
1384  */
ice_prof_inc_ref(struct ice_hw * hw,enum ice_block blk,u8 prof_id)1385 static int ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
1386 {
1387 	if (prof_id > hw->blk[blk].es.count)
1388 		return -EINVAL;
1389 
1390 	hw->blk[blk].es.ref_count[prof_id]++;
1391 
1392 	return 0;
1393 }
1394 
1395 /**
1396  * ice_write_prof_mask_reg - write profile mask register
1397  * @hw: pointer to the HW struct
1398  * @blk: hardware block
1399  * @mask_idx: mask index
1400  * @idx: index of the FV which will use the mask
1401  * @mask: the 16-bit mask
1402  */
1403 static void
ice_write_prof_mask_reg(struct ice_hw * hw,enum ice_block blk,u16 mask_idx,u16 idx,u16 mask)1404 ice_write_prof_mask_reg(struct ice_hw *hw, enum ice_block blk, u16 mask_idx,
1405 			u16 idx, u16 mask)
1406 {
1407 	u32 offset;
1408 	u32 val;
1409 
1410 	switch (blk) {
1411 	case ICE_BLK_RSS:
1412 		offset = GLQF_HMASK(mask_idx);
1413 		val = FIELD_PREP(GLQF_HMASK_MSK_INDEX_M, idx);
1414 		val |= FIELD_PREP(GLQF_HMASK_MASK_M, mask);
1415 		break;
1416 	case ICE_BLK_FD:
1417 		offset = GLQF_FDMASK(mask_idx);
1418 		val = FIELD_PREP(GLQF_FDMASK_MSK_INDEX_M, idx);
1419 		val |= FIELD_PREP(GLQF_FDMASK_MASK_M, mask);
1420 		break;
1421 	default:
1422 		ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
1423 			  blk);
1424 		return;
1425 	}
1426 
1427 	wr32(hw, offset, val);
1428 	ice_debug(hw, ICE_DBG_PKG, "write mask, blk %d (%d): %x = %x\n",
1429 		  blk, idx, offset, val);
1430 }
1431 
1432 /**
1433  * ice_write_prof_mask_enable_res - write profile mask enable register
1434  * @hw: pointer to the HW struct
1435  * @blk: hardware block
1436  * @prof_id: profile ID
1437  * @enable_mask: enable mask
1438  */
1439 static void
ice_write_prof_mask_enable_res(struct ice_hw * hw,enum ice_block blk,u16 prof_id,u32 enable_mask)1440 ice_write_prof_mask_enable_res(struct ice_hw *hw, enum ice_block blk,
1441 			       u16 prof_id, u32 enable_mask)
1442 {
1443 	u32 offset;
1444 
1445 	switch (blk) {
1446 	case ICE_BLK_RSS:
1447 		offset = GLQF_HMASK_SEL(prof_id);
1448 		break;
1449 	case ICE_BLK_FD:
1450 		offset = GLQF_FDMASK_SEL(prof_id);
1451 		break;
1452 	default:
1453 		ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
1454 			  blk);
1455 		return;
1456 	}
1457 
1458 	wr32(hw, offset, enable_mask);
1459 	ice_debug(hw, ICE_DBG_PKG, "write mask enable, blk %d (%d): %x = %x\n",
1460 		  blk, prof_id, offset, enable_mask);
1461 }
1462 
1463 /**
1464  * ice_init_prof_masks - initial prof masks
1465  * @hw: pointer to the HW struct
1466  * @blk: hardware block
1467  */
ice_init_prof_masks(struct ice_hw * hw,enum ice_block blk)1468 static void ice_init_prof_masks(struct ice_hw *hw, enum ice_block blk)
1469 {
1470 	u16 per_pf;
1471 	u16 i;
1472 
1473 	mutex_init(&hw->blk[blk].masks.lock);
1474 
1475 	per_pf = ICE_PROF_MASK_COUNT / hw->dev_caps.num_funcs;
1476 
1477 	hw->blk[blk].masks.count = per_pf;
1478 	hw->blk[blk].masks.first = hw->logical_pf_id * per_pf;
1479 
1480 	memset(hw->blk[blk].masks.masks, 0, sizeof(hw->blk[blk].masks.masks));
1481 
1482 	for (i = hw->blk[blk].masks.first;
1483 	     i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
1484 		ice_write_prof_mask_reg(hw, blk, i, 0, 0);
1485 }
1486 
1487 /**
1488  * ice_init_all_prof_masks - initialize all prof masks
1489  * @hw: pointer to the HW struct
1490  */
ice_init_all_prof_masks(struct ice_hw * hw)1491 static void ice_init_all_prof_masks(struct ice_hw *hw)
1492 {
1493 	ice_init_prof_masks(hw, ICE_BLK_RSS);
1494 	ice_init_prof_masks(hw, ICE_BLK_FD);
1495 }
1496 
1497 /**
1498  * ice_alloc_prof_mask - allocate profile mask
1499  * @hw: pointer to the HW struct
1500  * @blk: hardware block
1501  * @idx: index of FV which will use the mask
1502  * @mask: the 16-bit mask
1503  * @mask_idx: variable to receive the mask index
1504  */
1505 static int
ice_alloc_prof_mask(struct ice_hw * hw,enum ice_block blk,u16 idx,u16 mask,u16 * mask_idx)1506 ice_alloc_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 idx, u16 mask,
1507 		    u16 *mask_idx)
1508 {
1509 	bool found_unused = false, found_copy = false;
1510 	u16 unused_idx = 0, copy_idx = 0;
1511 	int status = -ENOSPC;
1512 	u16 i;
1513 
1514 	if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
1515 		return -EINVAL;
1516 
1517 	mutex_lock(&hw->blk[blk].masks.lock);
1518 
1519 	for (i = hw->blk[blk].masks.first;
1520 	     i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
1521 		if (hw->blk[blk].masks.masks[i].in_use) {
1522 			/* if mask is in use and it exactly duplicates the
1523 			 * desired mask and index, then in can be reused
1524 			 */
1525 			if (hw->blk[blk].masks.masks[i].mask == mask &&
1526 			    hw->blk[blk].masks.masks[i].idx == idx) {
1527 				found_copy = true;
1528 				copy_idx = i;
1529 				break;
1530 			}
1531 		} else {
1532 			/* save off unused index, but keep searching in case
1533 			 * there is an exact match later on
1534 			 */
1535 			if (!found_unused) {
1536 				found_unused = true;
1537 				unused_idx = i;
1538 			}
1539 		}
1540 
1541 	if (found_copy)
1542 		i = copy_idx;
1543 	else if (found_unused)
1544 		i = unused_idx;
1545 	else
1546 		goto err_ice_alloc_prof_mask;
1547 
1548 	/* update mask for a new entry */
1549 	if (found_unused) {
1550 		hw->blk[blk].masks.masks[i].in_use = true;
1551 		hw->blk[blk].masks.masks[i].mask = mask;
1552 		hw->blk[blk].masks.masks[i].idx = idx;
1553 		hw->blk[blk].masks.masks[i].ref = 0;
1554 		ice_write_prof_mask_reg(hw, blk, i, idx, mask);
1555 	}
1556 
1557 	hw->blk[blk].masks.masks[i].ref++;
1558 	*mask_idx = i;
1559 	status = 0;
1560 
1561 err_ice_alloc_prof_mask:
1562 	mutex_unlock(&hw->blk[blk].masks.lock);
1563 
1564 	return status;
1565 }
1566 
1567 /**
1568  * ice_free_prof_mask - free profile mask
1569  * @hw: pointer to the HW struct
1570  * @blk: hardware block
1571  * @mask_idx: index of mask
1572  */
1573 static int
ice_free_prof_mask(struct ice_hw * hw,enum ice_block blk,u16 mask_idx)1574 ice_free_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 mask_idx)
1575 {
1576 	if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
1577 		return -EINVAL;
1578 
1579 	if (!(mask_idx >= hw->blk[blk].masks.first &&
1580 	      mask_idx < hw->blk[blk].masks.first + hw->blk[blk].masks.count))
1581 		return -ENOENT;
1582 
1583 	mutex_lock(&hw->blk[blk].masks.lock);
1584 
1585 	if (!hw->blk[blk].masks.masks[mask_idx].in_use)
1586 		goto exit_ice_free_prof_mask;
1587 
1588 	if (hw->blk[blk].masks.masks[mask_idx].ref > 1) {
1589 		hw->blk[blk].masks.masks[mask_idx].ref--;
1590 		goto exit_ice_free_prof_mask;
1591 	}
1592 
1593 	/* remove mask */
1594 	hw->blk[blk].masks.masks[mask_idx].in_use = false;
1595 	hw->blk[blk].masks.masks[mask_idx].mask = 0;
1596 	hw->blk[blk].masks.masks[mask_idx].idx = 0;
1597 
1598 	/* update mask as unused entry */
1599 	ice_debug(hw, ICE_DBG_PKG, "Free mask, blk %d, mask %d\n", blk,
1600 		  mask_idx);
1601 	ice_write_prof_mask_reg(hw, blk, mask_idx, 0, 0);
1602 
1603 exit_ice_free_prof_mask:
1604 	mutex_unlock(&hw->blk[blk].masks.lock);
1605 
1606 	return 0;
1607 }
1608 
1609 /**
1610  * ice_free_prof_masks - free all profile masks for a profile
1611  * @hw: pointer to the HW struct
1612  * @blk: hardware block
1613  * @prof_id: profile ID
1614  */
1615 static int
ice_free_prof_masks(struct ice_hw * hw,enum ice_block blk,u16 prof_id)1616 ice_free_prof_masks(struct ice_hw *hw, enum ice_block blk, u16 prof_id)
1617 {
1618 	u32 mask_bm;
1619 	u16 i;
1620 
1621 	if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
1622 		return -EINVAL;
1623 
1624 	mask_bm = hw->blk[blk].es.mask_ena[prof_id];
1625 	for (i = 0; i < BITS_PER_BYTE * sizeof(mask_bm); i++)
1626 		if (mask_bm & BIT(i))
1627 			ice_free_prof_mask(hw, blk, i);
1628 
1629 	return 0;
1630 }
1631 
1632 /**
1633  * ice_shutdown_prof_masks - releases lock for masking
1634  * @hw: pointer to the HW struct
1635  * @blk: hardware block
1636  *
1637  * This should be called before unloading the driver
1638  */
ice_shutdown_prof_masks(struct ice_hw * hw,enum ice_block blk)1639 static void ice_shutdown_prof_masks(struct ice_hw *hw, enum ice_block blk)
1640 {
1641 	u16 i;
1642 
1643 	mutex_lock(&hw->blk[blk].masks.lock);
1644 
1645 	for (i = hw->blk[blk].masks.first;
1646 	     i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++) {
1647 		ice_write_prof_mask_reg(hw, blk, i, 0, 0);
1648 
1649 		hw->blk[blk].masks.masks[i].in_use = false;
1650 		hw->blk[blk].masks.masks[i].idx = 0;
1651 		hw->blk[blk].masks.masks[i].mask = 0;
1652 	}
1653 
1654 	mutex_unlock(&hw->blk[blk].masks.lock);
1655 	mutex_destroy(&hw->blk[blk].masks.lock);
1656 }
1657 
1658 /**
1659  * ice_shutdown_all_prof_masks - releases all locks for masking
1660  * @hw: pointer to the HW struct
1661  *
1662  * This should be called before unloading the driver
1663  */
ice_shutdown_all_prof_masks(struct ice_hw * hw)1664 static void ice_shutdown_all_prof_masks(struct ice_hw *hw)
1665 {
1666 	ice_shutdown_prof_masks(hw, ICE_BLK_RSS);
1667 	ice_shutdown_prof_masks(hw, ICE_BLK_FD);
1668 }
1669 
1670 /**
1671  * ice_update_prof_masking - set registers according to masking
1672  * @hw: pointer to the HW struct
1673  * @blk: hardware block
1674  * @prof_id: profile ID
1675  * @masks: masks
1676  */
1677 static int
ice_update_prof_masking(struct ice_hw * hw,enum ice_block blk,u16 prof_id,u16 * masks)1678 ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id,
1679 			u16 *masks)
1680 {
1681 	bool err = false;
1682 	u32 ena_mask = 0;
1683 	u16 idx;
1684 	u16 i;
1685 
1686 	/* Only support FD and RSS masking, otherwise nothing to be done */
1687 	if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
1688 		return 0;
1689 
1690 	for (i = 0; i < hw->blk[blk].es.fvw; i++)
1691 		if (masks[i] && masks[i] != 0xFFFF) {
1692 			if (!ice_alloc_prof_mask(hw, blk, i, masks[i], &idx)) {
1693 				ena_mask |= BIT(idx);
1694 			} else {
1695 				/* not enough bitmaps */
1696 				err = true;
1697 				break;
1698 			}
1699 		}
1700 
1701 	if (err) {
1702 		/* free any bitmaps we have allocated */
1703 		for (i = 0; i < BITS_PER_BYTE * sizeof(ena_mask); i++)
1704 			if (ena_mask & BIT(i))
1705 				ice_free_prof_mask(hw, blk, i);
1706 
1707 		return -EIO;
1708 	}
1709 
1710 	/* enable the masks for this profile */
1711 	ice_write_prof_mask_enable_res(hw, blk, prof_id, ena_mask);
1712 
1713 	/* store enabled masks with profile so that they can be freed later */
1714 	hw->blk[blk].es.mask_ena[prof_id] = ena_mask;
1715 
1716 	return 0;
1717 }
1718 
1719 /**
1720  * ice_write_es - write an extraction sequence and symmetric setting to hardware
1721  * @hw: pointer to the HW struct
1722  * @blk: the block in which to write the extraction sequence
1723  * @prof_id: the profile ID to write
1724  * @fv: pointer to the extraction sequence to write - NULL to clear extraction
1725  * @symm: symmetric setting for RSS profiles
1726  */
1727 static void
ice_write_es(struct ice_hw * hw,enum ice_block blk,u8 prof_id,struct ice_fv_word * fv,bool symm)1728 ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
1729 	     struct ice_fv_word *fv, bool symm)
1730 {
1731 	u16 off;
1732 
1733 	off = prof_id * hw->blk[blk].es.fvw;
1734 	if (!fv) {
1735 		memset(&hw->blk[blk].es.t[off], 0,
1736 		       hw->blk[blk].es.fvw * sizeof(*fv));
1737 		hw->blk[blk].es.written[prof_id] = false;
1738 	} else {
1739 		memcpy(&hw->blk[blk].es.t[off], fv,
1740 		       hw->blk[blk].es.fvw * sizeof(*fv));
1741 	}
1742 
1743 	if (blk == ICE_BLK_RSS)
1744 		hw->blk[blk].es.symm[prof_id] = symm;
1745 }
1746 
1747 /**
1748  * ice_prof_dec_ref - decrement reference count for profile
1749  * @hw: pointer to the HW struct
1750  * @blk: the block from which to free the profile ID
1751  * @prof_id: the profile ID for which to decrement the reference count
1752  */
1753 static int
ice_prof_dec_ref(struct ice_hw * hw,enum ice_block blk,u8 prof_id)1754 ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
1755 {
1756 	if (prof_id > hw->blk[blk].es.count)
1757 		return -EINVAL;
1758 
1759 	if (hw->blk[blk].es.ref_count[prof_id] > 0) {
1760 		if (!--hw->blk[blk].es.ref_count[prof_id]) {
1761 			ice_write_es(hw, blk, prof_id, NULL, false);
1762 			ice_free_prof_masks(hw, blk, prof_id);
1763 			return ice_free_prof_id(hw, blk, prof_id);
1764 		}
1765 	}
1766 
1767 	return 0;
1768 }
1769 
1770 /* Block / table section IDs */
1771 static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = {
1772 	/* SWITCH */
1773 	{	ICE_SID_XLT1_SW,
1774 		ICE_SID_XLT2_SW,
1775 		ICE_SID_PROFID_TCAM_SW,
1776 		ICE_SID_PROFID_REDIR_SW,
1777 		ICE_SID_FLD_VEC_SW
1778 	},
1779 
1780 	/* ACL */
1781 	{	ICE_SID_XLT1_ACL,
1782 		ICE_SID_XLT2_ACL,
1783 		ICE_SID_PROFID_TCAM_ACL,
1784 		ICE_SID_PROFID_REDIR_ACL,
1785 		ICE_SID_FLD_VEC_ACL
1786 	},
1787 
1788 	/* FD */
1789 	{	ICE_SID_XLT1_FD,
1790 		ICE_SID_XLT2_FD,
1791 		ICE_SID_PROFID_TCAM_FD,
1792 		ICE_SID_PROFID_REDIR_FD,
1793 		ICE_SID_FLD_VEC_FD
1794 	},
1795 
1796 	/* RSS */
1797 	{	ICE_SID_XLT1_RSS,
1798 		ICE_SID_XLT2_RSS,
1799 		ICE_SID_PROFID_TCAM_RSS,
1800 		ICE_SID_PROFID_REDIR_RSS,
1801 		ICE_SID_FLD_VEC_RSS
1802 	},
1803 
1804 	/* PE */
1805 	{	ICE_SID_XLT1_PE,
1806 		ICE_SID_XLT2_PE,
1807 		ICE_SID_PROFID_TCAM_PE,
1808 		ICE_SID_PROFID_REDIR_PE,
1809 		ICE_SID_FLD_VEC_PE
1810 	}
1811 };
1812 
1813 /**
1814  * ice_init_sw_xlt1_db - init software XLT1 database from HW tables
1815  * @hw: pointer to the hardware structure
1816  * @blk: the HW block to initialize
1817  */
ice_init_sw_xlt1_db(struct ice_hw * hw,enum ice_block blk)1818 static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk)
1819 {
1820 	u16 pt;
1821 
1822 	for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) {
1823 		u8 ptg;
1824 
1825 		ptg = hw->blk[blk].xlt1.t[pt];
1826 		if (ptg != ICE_DEFAULT_PTG) {
1827 			ice_ptg_alloc_val(hw, blk, ptg);
1828 			ice_ptg_add_mv_ptype(hw, blk, pt, ptg);
1829 		}
1830 	}
1831 }
1832 
1833 /**
1834  * ice_init_sw_xlt2_db - init software XLT2 database from HW tables
1835  * @hw: pointer to the hardware structure
1836  * @blk: the HW block to initialize
1837  */
ice_init_sw_xlt2_db(struct ice_hw * hw,enum ice_block blk)1838 static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk)
1839 {
1840 	u16 vsi;
1841 
1842 	for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) {
1843 		u16 vsig;
1844 
1845 		vsig = hw->blk[blk].xlt2.t[vsi];
1846 		if (vsig) {
1847 			ice_vsig_alloc_val(hw, blk, vsig);
1848 			ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
1849 			/* no changes at this time, since this has been
1850 			 * initialized from the original package
1851 			 */
1852 			hw->blk[blk].xlt2.vsis[vsi].changed = 0;
1853 		}
1854 	}
1855 }
1856 
1857 /**
1858  * ice_init_sw_db - init software database from HW tables
1859  * @hw: pointer to the hardware structure
1860  */
ice_init_sw_db(struct ice_hw * hw)1861 static void ice_init_sw_db(struct ice_hw *hw)
1862 {
1863 	u16 i;
1864 
1865 	for (i = 0; i < ICE_BLK_COUNT; i++) {
1866 		ice_init_sw_xlt1_db(hw, (enum ice_block)i);
1867 		ice_init_sw_xlt2_db(hw, (enum ice_block)i);
1868 	}
1869 }
1870 
1871 /**
1872  * ice_fill_tbl - Reads content of a single table type into database
1873  * @hw: pointer to the hardware structure
1874  * @block_id: Block ID of the table to copy
1875  * @sid: Section ID of the table to copy
1876  *
1877  * Will attempt to read the entire content of a given table of a single block
1878  * into the driver database. We assume that the buffer will always
1879  * be as large or larger than the data contained in the package. If
1880  * this condition is not met, there is most likely an error in the package
1881  * contents.
1882  */
ice_fill_tbl(struct ice_hw * hw,enum ice_block block_id,u32 sid)1883 static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
1884 {
1885 	u32 dst_len, sect_len, offset = 0;
1886 	struct ice_prof_redir_section *pr;
1887 	struct ice_prof_id_section *pid;
1888 	struct ice_xlt1_section *xlt1;
1889 	struct ice_xlt2_section *xlt2;
1890 	struct ice_sw_fv_section *es;
1891 	struct ice_pkg_enum state;
1892 	u8 *src, *dst;
1893 	void *sect;
1894 
1895 	/* if the HW segment pointer is null then the first iteration of
1896 	 * ice_pkg_enum_section() will fail. In this case the HW tables will
1897 	 * not be filled and return success.
1898 	 */
1899 	if (!hw->seg) {
1900 		ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n");
1901 		return;
1902 	}
1903 
1904 	memset(&state, 0, sizeof(state));
1905 
1906 	sect = ice_pkg_enum_section(hw->seg, &state, sid);
1907 
1908 	while (sect) {
1909 		switch (sid) {
1910 		case ICE_SID_XLT1_SW:
1911 		case ICE_SID_XLT1_FD:
1912 		case ICE_SID_XLT1_RSS:
1913 		case ICE_SID_XLT1_ACL:
1914 		case ICE_SID_XLT1_PE:
1915 			xlt1 = sect;
1916 			src = xlt1->value;
1917 			sect_len = le16_to_cpu(xlt1->count) *
1918 				sizeof(*hw->blk[block_id].xlt1.t);
1919 			dst = hw->blk[block_id].xlt1.t;
1920 			dst_len = hw->blk[block_id].xlt1.count *
1921 				sizeof(*hw->blk[block_id].xlt1.t);
1922 			break;
1923 		case ICE_SID_XLT2_SW:
1924 		case ICE_SID_XLT2_FD:
1925 		case ICE_SID_XLT2_RSS:
1926 		case ICE_SID_XLT2_ACL:
1927 		case ICE_SID_XLT2_PE:
1928 			xlt2 = sect;
1929 			src = (__force u8 *)xlt2->value;
1930 			sect_len = le16_to_cpu(xlt2->count) *
1931 				sizeof(*hw->blk[block_id].xlt2.t);
1932 			dst = (u8 *)hw->blk[block_id].xlt2.t;
1933 			dst_len = hw->blk[block_id].xlt2.count *
1934 				sizeof(*hw->blk[block_id].xlt2.t);
1935 			break;
1936 		case ICE_SID_PROFID_TCAM_SW:
1937 		case ICE_SID_PROFID_TCAM_FD:
1938 		case ICE_SID_PROFID_TCAM_RSS:
1939 		case ICE_SID_PROFID_TCAM_ACL:
1940 		case ICE_SID_PROFID_TCAM_PE:
1941 			pid = sect;
1942 			src = (u8 *)pid->entry;
1943 			sect_len = le16_to_cpu(pid->count) *
1944 				sizeof(*hw->blk[block_id].prof.t);
1945 			dst = (u8 *)hw->blk[block_id].prof.t;
1946 			dst_len = hw->blk[block_id].prof.count *
1947 				sizeof(*hw->blk[block_id].prof.t);
1948 			break;
1949 		case ICE_SID_PROFID_REDIR_SW:
1950 		case ICE_SID_PROFID_REDIR_FD:
1951 		case ICE_SID_PROFID_REDIR_RSS:
1952 		case ICE_SID_PROFID_REDIR_ACL:
1953 		case ICE_SID_PROFID_REDIR_PE:
1954 			pr = sect;
1955 			src = pr->redir_value;
1956 			sect_len = le16_to_cpu(pr->count) *
1957 				sizeof(*hw->blk[block_id].prof_redir.t);
1958 			dst = hw->blk[block_id].prof_redir.t;
1959 			dst_len = hw->blk[block_id].prof_redir.count *
1960 				sizeof(*hw->blk[block_id].prof_redir.t);
1961 			break;
1962 		case ICE_SID_FLD_VEC_SW:
1963 		case ICE_SID_FLD_VEC_FD:
1964 		case ICE_SID_FLD_VEC_RSS:
1965 		case ICE_SID_FLD_VEC_ACL:
1966 		case ICE_SID_FLD_VEC_PE:
1967 			es = sect;
1968 			src = (u8 *)es->fv;
1969 			sect_len = (u32)(le16_to_cpu(es->count) *
1970 					 hw->blk[block_id].es.fvw) *
1971 				sizeof(*hw->blk[block_id].es.t);
1972 			dst = (u8 *)hw->blk[block_id].es.t;
1973 			dst_len = (u32)(hw->blk[block_id].es.count *
1974 					hw->blk[block_id].es.fvw) *
1975 				sizeof(*hw->blk[block_id].es.t);
1976 			break;
1977 		default:
1978 			return;
1979 		}
1980 
1981 		/* if the section offset exceeds destination length, terminate
1982 		 * table fill.
1983 		 */
1984 		if (offset > dst_len)
1985 			return;
1986 
1987 		/* if the sum of section size and offset exceed destination size
1988 		 * then we are out of bounds of the HW table size for that PF.
1989 		 * Changing section length to fill the remaining table space
1990 		 * of that PF.
1991 		 */
1992 		if ((offset + sect_len) > dst_len)
1993 			sect_len = dst_len - offset;
1994 
1995 		memcpy(dst + offset, src, sect_len);
1996 		offset += sect_len;
1997 		sect = ice_pkg_enum_section(NULL, &state, sid);
1998 	}
1999 }
2000 
2001 /**
2002  * ice_fill_blk_tbls - Read package context for tables
2003  * @hw: pointer to the hardware structure
2004  *
2005  * Reads the current package contents and populates the driver
2006  * database with the data iteratively for all advanced feature
2007  * blocks. Assume that the HW tables have been allocated.
2008  */
ice_fill_blk_tbls(struct ice_hw * hw)2009 void ice_fill_blk_tbls(struct ice_hw *hw)
2010 {
2011 	u8 i;
2012 
2013 	for (i = 0; i < ICE_BLK_COUNT; i++) {
2014 		enum ice_block blk_id = (enum ice_block)i;
2015 
2016 		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid);
2017 		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid);
2018 		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid);
2019 		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid);
2020 		ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid);
2021 	}
2022 
2023 	ice_init_sw_db(hw);
2024 }
2025 
2026 /**
2027  * ice_free_prof_map - free profile map
2028  * @hw: pointer to the hardware structure
2029  * @blk_idx: HW block index
2030  */
ice_free_prof_map(struct ice_hw * hw,u8 blk_idx)2031 static void ice_free_prof_map(struct ice_hw *hw, u8 blk_idx)
2032 {
2033 	struct ice_es *es = &hw->blk[blk_idx].es;
2034 	struct ice_prof_map *del, *tmp;
2035 
2036 	mutex_lock(&es->prof_map_lock);
2037 	list_for_each_entry_safe(del, tmp, &es->prof_map, list) {
2038 		list_del(&del->list);
2039 		devm_kfree(ice_hw_to_dev(hw), del);
2040 	}
2041 	INIT_LIST_HEAD(&es->prof_map);
2042 	mutex_unlock(&es->prof_map_lock);
2043 }
2044 
2045 /**
2046  * ice_free_flow_profs - free flow profile entries
2047  * @hw: pointer to the hardware structure
2048  * @blk_idx: HW block index
2049  */
ice_free_flow_profs(struct ice_hw * hw,u8 blk_idx)2050 static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx)
2051 {
2052 	struct ice_flow_prof *p, *tmp;
2053 
2054 	mutex_lock(&hw->fl_profs_locks[blk_idx]);
2055 	list_for_each_entry_safe(p, tmp, &hw->fl_profs[blk_idx], l_entry) {
2056 		struct ice_flow_entry *e, *t;
2057 
2058 		list_for_each_entry_safe(e, t, &p->entries, l_entry)
2059 			ice_flow_rem_entry(hw, (enum ice_block)blk_idx,
2060 					   ICE_FLOW_ENTRY_HNDL(e));
2061 
2062 		list_del(&p->l_entry);
2063 
2064 		mutex_destroy(&p->entries_lock);
2065 		devm_kfree(ice_hw_to_dev(hw), p);
2066 	}
2067 	mutex_unlock(&hw->fl_profs_locks[blk_idx]);
2068 
2069 	/* if driver is in reset and tables are being cleared
2070 	 * re-initialize the flow profile list heads
2071 	 */
2072 	INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
2073 }
2074 
2075 /**
2076  * ice_free_vsig_tbl - free complete VSIG table entries
2077  * @hw: pointer to the hardware structure
2078  * @blk: the HW block on which to free the VSIG table entries
2079  */
ice_free_vsig_tbl(struct ice_hw * hw,enum ice_block blk)2080 static void ice_free_vsig_tbl(struct ice_hw *hw, enum ice_block blk)
2081 {
2082 	u16 i;
2083 
2084 	if (!hw->blk[blk].xlt2.vsig_tbl)
2085 		return;
2086 
2087 	for (i = 1; i < ICE_MAX_VSIGS; i++)
2088 		if (hw->blk[blk].xlt2.vsig_tbl[i].in_use)
2089 			ice_vsig_free(hw, blk, i);
2090 }
2091 
2092 /**
2093  * ice_free_hw_tbls - free hardware table memory
2094  * @hw: pointer to the hardware structure
2095  */
ice_free_hw_tbls(struct ice_hw * hw)2096 void ice_free_hw_tbls(struct ice_hw *hw)
2097 {
2098 	struct ice_rss_cfg *r, *rt;
2099 	u8 i;
2100 
2101 	for (i = 0; i < ICE_BLK_COUNT; i++) {
2102 		if (hw->blk[i].is_list_init) {
2103 			struct ice_es *es = &hw->blk[i].es;
2104 
2105 			ice_free_prof_map(hw, i);
2106 			mutex_destroy(&es->prof_map_lock);
2107 
2108 			ice_free_flow_profs(hw, i);
2109 			mutex_destroy(&hw->fl_profs_locks[i]);
2110 
2111 			hw->blk[i].is_list_init = false;
2112 		}
2113 		ice_free_vsig_tbl(hw, (enum ice_block)i);
2114 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptypes);
2115 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptg_tbl);
2116 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.t);
2117 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.t);
2118 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsig_tbl);
2119 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsis);
2120 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof.t);
2121 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_redir.t);
2122 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.t);
2123 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.ref_count);
2124 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.symm);
2125 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written);
2126 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.mask_ena);
2127 		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_id.id);
2128 	}
2129 
2130 	list_for_each_entry_safe(r, rt, &hw->rss_list_head, l_entry) {
2131 		list_del(&r->l_entry);
2132 		devm_kfree(ice_hw_to_dev(hw), r);
2133 	}
2134 	mutex_destroy(&hw->rss_locks);
2135 	ice_shutdown_all_prof_masks(hw);
2136 	memset(hw->blk, 0, sizeof(hw->blk));
2137 }
2138 
2139 /**
2140  * ice_init_flow_profs - init flow profile locks and list heads
2141  * @hw: pointer to the hardware structure
2142  * @blk_idx: HW block index
2143  */
ice_init_flow_profs(struct ice_hw * hw,u8 blk_idx)2144 static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx)
2145 {
2146 	mutex_init(&hw->fl_profs_locks[blk_idx]);
2147 	INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
2148 }
2149 
2150 /**
2151  * ice_clear_hw_tbls - clear HW tables and flow profiles
2152  * @hw: pointer to the hardware structure
2153  */
ice_clear_hw_tbls(struct ice_hw * hw)2154 void ice_clear_hw_tbls(struct ice_hw *hw)
2155 {
2156 	u8 i;
2157 
2158 	for (i = 0; i < ICE_BLK_COUNT; i++) {
2159 		struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
2160 		struct ice_prof_id *prof_id = &hw->blk[i].prof_id;
2161 		struct ice_prof_tcam *prof = &hw->blk[i].prof;
2162 		struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
2163 		struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
2164 		struct ice_es *es = &hw->blk[i].es;
2165 
2166 		if (hw->blk[i].is_list_init) {
2167 			ice_free_prof_map(hw, i);
2168 			ice_free_flow_profs(hw, i);
2169 		}
2170 
2171 		ice_free_vsig_tbl(hw, (enum ice_block)i);
2172 
2173 		memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes));
2174 		memset(xlt1->ptg_tbl, 0,
2175 		       ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl));
2176 		memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t));
2177 
2178 		memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis));
2179 		memset(xlt2->vsig_tbl, 0,
2180 		       xlt2->count * sizeof(*xlt2->vsig_tbl));
2181 		memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t));
2182 
2183 		memset(prof->t, 0, prof->count * sizeof(*prof->t));
2184 		memset(prof_redir->t, 0,
2185 		       prof_redir->count * sizeof(*prof_redir->t));
2186 
2187 		memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw);
2188 		memset(es->ref_count, 0, es->count * sizeof(*es->ref_count));
2189 		memset(es->symm, 0, es->count * sizeof(*es->symm));
2190 		memset(es->written, 0, es->count * sizeof(*es->written));
2191 		memset(es->mask_ena, 0, es->count * sizeof(*es->mask_ena));
2192 
2193 		memset(prof_id->id, 0, prof_id->count * sizeof(*prof_id->id));
2194 	}
2195 }
2196 
2197 /**
2198  * ice_init_hw_tbls - init hardware table memory
2199  * @hw: pointer to the hardware structure
2200  */
ice_init_hw_tbls(struct ice_hw * hw)2201 int ice_init_hw_tbls(struct ice_hw *hw)
2202 {
2203 	u8 i;
2204 
2205 	mutex_init(&hw->rss_locks);
2206 	INIT_LIST_HEAD(&hw->rss_list_head);
2207 	ice_init_all_prof_masks(hw);
2208 	for (i = 0; i < ICE_BLK_COUNT; i++) {
2209 		struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
2210 		struct ice_prof_id *prof_id = &hw->blk[i].prof_id;
2211 		struct ice_prof_tcam *prof = &hw->blk[i].prof;
2212 		struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
2213 		struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
2214 		struct ice_es *es = &hw->blk[i].es;
2215 		u16 j;
2216 
2217 		if (hw->blk[i].is_list_init)
2218 			continue;
2219 
2220 		ice_init_flow_profs(hw, i);
2221 		mutex_init(&es->prof_map_lock);
2222 		INIT_LIST_HEAD(&es->prof_map);
2223 		hw->blk[i].is_list_init = true;
2224 
2225 		hw->blk[i].overwrite = blk_sizes[i].overwrite;
2226 		es->reverse = blk_sizes[i].reverse;
2227 
2228 		xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF];
2229 		xlt1->count = blk_sizes[i].xlt1;
2230 
2231 		xlt1->ptypes = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count,
2232 					    sizeof(*xlt1->ptypes), GFP_KERNEL);
2233 
2234 		if (!xlt1->ptypes)
2235 			goto err;
2236 
2237 		xlt1->ptg_tbl = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_PTGS,
2238 					     sizeof(*xlt1->ptg_tbl),
2239 					     GFP_KERNEL);
2240 
2241 		if (!xlt1->ptg_tbl)
2242 			goto err;
2243 
2244 		xlt1->t = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count,
2245 				       sizeof(*xlt1->t), GFP_KERNEL);
2246 		if (!xlt1->t)
2247 			goto err;
2248 
2249 		xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF];
2250 		xlt2->count = blk_sizes[i].xlt2;
2251 
2252 		xlt2->vsis = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
2253 					  sizeof(*xlt2->vsis), GFP_KERNEL);
2254 
2255 		if (!xlt2->vsis)
2256 			goto err;
2257 
2258 		xlt2->vsig_tbl = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
2259 					      sizeof(*xlt2->vsig_tbl),
2260 					      GFP_KERNEL);
2261 		if (!xlt2->vsig_tbl)
2262 			goto err;
2263 
2264 		for (j = 0; j < xlt2->count; j++)
2265 			INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst);
2266 
2267 		xlt2->t = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
2268 				       sizeof(*xlt2->t), GFP_KERNEL);
2269 		if (!xlt2->t)
2270 			goto err;
2271 
2272 		prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF];
2273 		prof->count = blk_sizes[i].prof_tcam;
2274 		prof->max_prof_id = blk_sizes[i].prof_id;
2275 		prof->cdid_bits = blk_sizes[i].prof_cdid_bits;
2276 		prof->t = devm_kcalloc(ice_hw_to_dev(hw), prof->count,
2277 				       sizeof(*prof->t), GFP_KERNEL);
2278 
2279 		if (!prof->t)
2280 			goto err;
2281 
2282 		prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF];
2283 		prof_redir->count = blk_sizes[i].prof_redir;
2284 		prof_redir->t = devm_kcalloc(ice_hw_to_dev(hw),
2285 					     prof_redir->count,
2286 					     sizeof(*prof_redir->t),
2287 					     GFP_KERNEL);
2288 
2289 		if (!prof_redir->t)
2290 			goto err;
2291 
2292 		es->sid = ice_blk_sids[i][ICE_SID_ES_OFF];
2293 		es->count = blk_sizes[i].es;
2294 		es->fvw = blk_sizes[i].fvw;
2295 		es->t = devm_kcalloc(ice_hw_to_dev(hw),
2296 				     (u32)(es->count * es->fvw),
2297 				     sizeof(*es->t), GFP_KERNEL);
2298 		if (!es->t)
2299 			goto err;
2300 
2301 		es->ref_count = devm_kcalloc(ice_hw_to_dev(hw), es->count,
2302 					     sizeof(*es->ref_count),
2303 					     GFP_KERNEL);
2304 		if (!es->ref_count)
2305 			goto err;
2306 
2307 		es->symm = devm_kcalloc(ice_hw_to_dev(hw), es->count,
2308 					sizeof(*es->symm), GFP_KERNEL);
2309 		if (!es->symm)
2310 			goto err;
2311 
2312 		es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count,
2313 					   sizeof(*es->written), GFP_KERNEL);
2314 		if (!es->written)
2315 			goto err;
2316 
2317 		es->mask_ena = devm_kcalloc(ice_hw_to_dev(hw), es->count,
2318 					    sizeof(*es->mask_ena), GFP_KERNEL);
2319 		if (!es->mask_ena)
2320 			goto err;
2321 
2322 		prof_id->count = blk_sizes[i].prof_id;
2323 		prof_id->id = devm_kcalloc(ice_hw_to_dev(hw), prof_id->count,
2324 					   sizeof(*prof_id->id), GFP_KERNEL);
2325 		if (!prof_id->id)
2326 			goto err;
2327 	}
2328 	return 0;
2329 
2330 err:
2331 	ice_free_hw_tbls(hw);
2332 	return -ENOMEM;
2333 }
2334 
2335 /**
2336  * ice_prof_gen_key - generate profile ID key
2337  * @hw: pointer to the HW struct
2338  * @blk: the block in which to write profile ID to
2339  * @ptg: packet type group (PTG) portion of key
2340  * @vsig: VSIG portion of key
2341  * @cdid: CDID portion of key
2342  * @flags: flag portion of key
2343  * @vl_msk: valid mask
2344  * @dc_msk: don't care mask
2345  * @nm_msk: never match mask
2346  * @key: output of profile ID key
2347  */
2348 static int
ice_prof_gen_key(struct ice_hw * hw,enum ice_block blk,u8 ptg,u16 vsig,u8 cdid,u16 flags,u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],u8 dc_msk[ICE_TCAM_KEY_VAL_SZ],u8 nm_msk[ICE_TCAM_KEY_VAL_SZ],u8 key[ICE_TCAM_KEY_SZ])2349 ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
2350 		 u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
2351 		 u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ],
2352 		 u8 key[ICE_TCAM_KEY_SZ])
2353 {
2354 	struct ice_prof_id_key inkey;
2355 
2356 	inkey.xlt1 = ptg;
2357 	inkey.xlt2_cdid = cpu_to_le16(vsig);
2358 	inkey.flags = cpu_to_le16(flags);
2359 
2360 	switch (hw->blk[blk].prof.cdid_bits) {
2361 	case 0:
2362 		break;
2363 	case 2:
2364 #define ICE_CD_2_M 0xC000U
2365 #define ICE_CD_2_S 14
2366 		inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_2_M);
2367 		inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_2_S);
2368 		break;
2369 	case 4:
2370 #define ICE_CD_4_M 0xF000U
2371 #define ICE_CD_4_S 12
2372 		inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_4_M);
2373 		inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_4_S);
2374 		break;
2375 	case 8:
2376 #define ICE_CD_8_M 0xFF00U
2377 #define ICE_CD_8_S 16
2378 		inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_8_M);
2379 		inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_8_S);
2380 		break;
2381 	default:
2382 		ice_debug(hw, ICE_DBG_PKG, "Error in profile config\n");
2383 		break;
2384 	}
2385 
2386 	return ice_set_key(key, ICE_TCAM_KEY_SZ, (u8 *)&inkey, vl_msk, dc_msk,
2387 			   nm_msk, 0, ICE_TCAM_KEY_SZ / 2);
2388 }
2389 
2390 /**
2391  * ice_tcam_write_entry - write TCAM entry
2392  * @hw: pointer to the HW struct
2393  * @blk: the block in which to write profile ID to
2394  * @idx: the entry index to write to
2395  * @prof_id: profile ID
2396  * @ptg: packet type group (PTG) portion of key
2397  * @vsig: VSIG portion of key
2398  * @cdid: CDID portion of key
2399  * @flags: flag portion of key
2400  * @vl_msk: valid mask
2401  * @dc_msk: don't care mask
2402  * @nm_msk: never match mask
2403  */
2404 static int
ice_tcam_write_entry(struct ice_hw * hw,enum ice_block blk,u16 idx,u8 prof_id,u8 ptg,u16 vsig,u8 cdid,u16 flags,u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],u8 dc_msk[ICE_TCAM_KEY_VAL_SZ],u8 nm_msk[ICE_TCAM_KEY_VAL_SZ])2405 ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
2406 		     u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags,
2407 		     u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
2408 		     u8 dc_msk[ICE_TCAM_KEY_VAL_SZ],
2409 		     u8 nm_msk[ICE_TCAM_KEY_VAL_SZ])
2410 {
2411 	struct ice_prof_tcam_entry;
2412 	int status;
2413 
2414 	status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk,
2415 				  dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key);
2416 	if (!status) {
2417 		hw->blk[blk].prof.t[idx].addr = cpu_to_le16(idx);
2418 		hw->blk[blk].prof.t[idx].prof_id = prof_id;
2419 	}
2420 
2421 	return status;
2422 }
2423 
2424 /**
2425  * ice_vsig_get_ref - returns number of VSIs belong to a VSIG
2426  * @hw: pointer to the hardware structure
2427  * @blk: HW block
2428  * @vsig: VSIG to query
2429  * @refs: pointer to variable to receive the reference count
2430  */
2431 static int
ice_vsig_get_ref(struct ice_hw * hw,enum ice_block blk,u16 vsig,u16 * refs)2432 ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
2433 {
2434 	u16 idx = vsig & ICE_VSIG_IDX_M;
2435 	struct ice_vsig_vsi *ptr;
2436 
2437 	*refs = 0;
2438 
2439 	if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
2440 		return -ENOENT;
2441 
2442 	ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
2443 	while (ptr) {
2444 		(*refs)++;
2445 		ptr = ptr->next_vsi;
2446 	}
2447 
2448 	return 0;
2449 }
2450 
2451 /**
2452  * ice_has_prof_vsig - check to see if VSIG has a specific profile
2453  * @hw: pointer to the hardware structure
2454  * @blk: HW block
2455  * @vsig: VSIG to check against
2456  * @hdl: profile handle
2457  */
2458 static bool
ice_has_prof_vsig(struct ice_hw * hw,enum ice_block blk,u16 vsig,u64 hdl)2459 ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl)
2460 {
2461 	u16 idx = vsig & ICE_VSIG_IDX_M;
2462 	struct ice_vsig_prof *ent;
2463 
2464 	list_for_each_entry(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
2465 			    list)
2466 		if (ent->profile_cookie == hdl)
2467 			return true;
2468 
2469 	ice_debug(hw, ICE_DBG_INIT, "Characteristic list for VSI group %d not found.\n",
2470 		  vsig);
2471 	return false;
2472 }
2473 
2474 /**
2475  * ice_prof_bld_es - build profile ID extraction sequence changes
2476  * @hw: pointer to the HW struct
2477  * @blk: hardware block
2478  * @bld: the update package buffer build to add to
2479  * @chgs: the list of changes to make in hardware
2480  */
2481 static int
ice_prof_bld_es(struct ice_hw * hw,enum ice_block blk,struct ice_buf_build * bld,struct list_head * chgs)2482 ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
2483 		struct ice_buf_build *bld, struct list_head *chgs)
2484 {
2485 	u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word);
2486 	struct ice_chs_chg *tmp;
2487 
2488 	list_for_each_entry(tmp, chgs, list_entry)
2489 		if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) {
2490 			u16 off = tmp->prof_id * hw->blk[blk].es.fvw;
2491 			struct ice_pkg_es *p;
2492 			u32 id;
2493 
2494 			id = ice_sect_id(blk, ICE_VEC_TBL);
2495 			p = ice_pkg_buf_alloc_section(bld, id,
2496 						      struct_size(p, es, 1) +
2497 						      vec_size -
2498 						      sizeof(p->es[0]));
2499 
2500 			if (!p)
2501 				return -ENOSPC;
2502 
2503 			p->count = cpu_to_le16(1);
2504 			p->offset = cpu_to_le16(tmp->prof_id);
2505 
2506 			memcpy(p->es, &hw->blk[blk].es.t[off], vec_size);
2507 		}
2508 
2509 	return 0;
2510 }
2511 
2512 /**
2513  * ice_prof_bld_tcam - build profile ID TCAM changes
2514  * @hw: pointer to the HW struct
2515  * @blk: hardware block
2516  * @bld: the update package buffer build to add to
2517  * @chgs: the list of changes to make in hardware
2518  */
2519 static int
ice_prof_bld_tcam(struct ice_hw * hw,enum ice_block blk,struct ice_buf_build * bld,struct list_head * chgs)2520 ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
2521 		  struct ice_buf_build *bld, struct list_head *chgs)
2522 {
2523 	struct ice_chs_chg *tmp;
2524 
2525 	list_for_each_entry(tmp, chgs, list_entry)
2526 		if (tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) {
2527 			struct ice_prof_id_section *p;
2528 			u32 id;
2529 
2530 			id = ice_sect_id(blk, ICE_PROF_TCAM);
2531 			p = ice_pkg_buf_alloc_section(bld, id,
2532 						      struct_size(p, entry, 1));
2533 
2534 			if (!p)
2535 				return -ENOSPC;
2536 
2537 			p->count = cpu_to_le16(1);
2538 			p->entry[0].addr = cpu_to_le16(tmp->tcam_idx);
2539 			p->entry[0].prof_id = tmp->prof_id;
2540 
2541 			memcpy(p->entry[0].key,
2542 			       &hw->blk[blk].prof.t[tmp->tcam_idx].key,
2543 			       sizeof(hw->blk[blk].prof.t->key));
2544 		}
2545 
2546 	return 0;
2547 }
2548 
2549 /**
2550  * ice_prof_bld_xlt1 - build XLT1 changes
2551  * @blk: hardware block
2552  * @bld: the update package buffer build to add to
2553  * @chgs: the list of changes to make in hardware
2554  */
2555 static int
ice_prof_bld_xlt1(enum ice_block blk,struct ice_buf_build * bld,struct list_head * chgs)2556 ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
2557 		  struct list_head *chgs)
2558 {
2559 	struct ice_chs_chg *tmp;
2560 
2561 	list_for_each_entry(tmp, chgs, list_entry)
2562 		if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) {
2563 			struct ice_xlt1_section *p;
2564 			u32 id;
2565 
2566 			id = ice_sect_id(blk, ICE_XLT1);
2567 			p = ice_pkg_buf_alloc_section(bld, id,
2568 						      struct_size(p, value, 1));
2569 
2570 			if (!p)
2571 				return -ENOSPC;
2572 
2573 			p->count = cpu_to_le16(1);
2574 			p->offset = cpu_to_le16(tmp->ptype);
2575 			p->value[0] = tmp->ptg;
2576 		}
2577 
2578 	return 0;
2579 }
2580 
2581 /**
2582  * ice_prof_bld_xlt2 - build XLT2 changes
2583  * @blk: hardware block
2584  * @bld: the update package buffer build to add to
2585  * @chgs: the list of changes to make in hardware
2586  */
2587 static int
ice_prof_bld_xlt2(enum ice_block blk,struct ice_buf_build * bld,struct list_head * chgs)2588 ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
2589 		  struct list_head *chgs)
2590 {
2591 	struct ice_chs_chg *tmp;
2592 
2593 	list_for_each_entry(tmp, chgs, list_entry) {
2594 		struct ice_xlt2_section *p;
2595 		u32 id;
2596 
2597 		switch (tmp->type) {
2598 		case ICE_VSIG_ADD:
2599 		case ICE_VSI_MOVE:
2600 		case ICE_VSIG_REM:
2601 			id = ice_sect_id(blk, ICE_XLT2);
2602 			p = ice_pkg_buf_alloc_section(bld, id,
2603 						      struct_size(p, value, 1));
2604 
2605 			if (!p)
2606 				return -ENOSPC;
2607 
2608 			p->count = cpu_to_le16(1);
2609 			p->offset = cpu_to_le16(tmp->vsi);
2610 			p->value[0] = cpu_to_le16(tmp->vsig);
2611 			break;
2612 		default:
2613 			break;
2614 		}
2615 	}
2616 
2617 	return 0;
2618 }
2619 
2620 /**
2621  * ice_upd_prof_hw - update hardware using the change list
2622  * @hw: pointer to the HW struct
2623  * @blk: hardware block
2624  * @chgs: the list of changes to make in hardware
2625  */
2626 static int
ice_upd_prof_hw(struct ice_hw * hw,enum ice_block blk,struct list_head * chgs)2627 ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
2628 		struct list_head *chgs)
2629 {
2630 	struct ice_buf_build *b;
2631 	struct ice_chs_chg *tmp;
2632 	u16 pkg_sects;
2633 	u16 xlt1 = 0;
2634 	u16 xlt2 = 0;
2635 	u16 tcam = 0;
2636 	u16 es = 0;
2637 	int status;
2638 	u16 sects;
2639 
2640 	/* count number of sections we need */
2641 	list_for_each_entry(tmp, chgs, list_entry) {
2642 		switch (tmp->type) {
2643 		case ICE_PTG_ES_ADD:
2644 			if (tmp->add_ptg)
2645 				xlt1++;
2646 			if (tmp->add_prof)
2647 				es++;
2648 			break;
2649 		case ICE_TCAM_ADD:
2650 			tcam++;
2651 			break;
2652 		case ICE_VSIG_ADD:
2653 		case ICE_VSI_MOVE:
2654 		case ICE_VSIG_REM:
2655 			xlt2++;
2656 			break;
2657 		default:
2658 			break;
2659 		}
2660 	}
2661 	sects = xlt1 + xlt2 + tcam + es;
2662 
2663 	if (!sects)
2664 		return 0;
2665 
2666 	/* Build update package buffer */
2667 	b = ice_pkg_buf_alloc(hw);
2668 	if (!b)
2669 		return -ENOMEM;
2670 
2671 	status = ice_pkg_buf_reserve_section(b, sects);
2672 	if (status)
2673 		goto error_tmp;
2674 
2675 	/* Preserve order of table update: ES, TCAM, PTG, VSIG */
2676 	if (es) {
2677 		status = ice_prof_bld_es(hw, blk, b, chgs);
2678 		if (status)
2679 			goto error_tmp;
2680 	}
2681 
2682 	if (tcam) {
2683 		status = ice_prof_bld_tcam(hw, blk, b, chgs);
2684 		if (status)
2685 			goto error_tmp;
2686 	}
2687 
2688 	if (xlt1) {
2689 		status = ice_prof_bld_xlt1(blk, b, chgs);
2690 		if (status)
2691 			goto error_tmp;
2692 	}
2693 
2694 	if (xlt2) {
2695 		status = ice_prof_bld_xlt2(blk, b, chgs);
2696 		if (status)
2697 			goto error_tmp;
2698 	}
2699 
2700 	/* After package buffer build check if the section count in buffer is
2701 	 * non-zero and matches the number of sections detected for package
2702 	 * update.
2703 	 */
2704 	pkg_sects = ice_pkg_buf_get_active_sections(b);
2705 	if (!pkg_sects || pkg_sects != sects) {
2706 		status = -EINVAL;
2707 		goto error_tmp;
2708 	}
2709 
2710 	/* update package */
2711 	status = ice_update_pkg(hw, ice_pkg_buf(b), 1);
2712 	if (status == -EIO)
2713 		ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n");
2714 
2715 error_tmp:
2716 	ice_pkg_buf_free(hw, b);
2717 	return status;
2718 }
2719 
2720 /**
2721  * ice_update_fd_mask - set Flow Director Field Vector mask for a profile
2722  * @hw: pointer to the HW struct
2723  * @prof_id: profile ID
2724  * @mask_sel: mask select
2725  *
2726  * This function enable any of the masks selected by the mask select parameter
2727  * for the profile specified.
2728  */
ice_update_fd_mask(struct ice_hw * hw,u16 prof_id,u32 mask_sel)2729 static void ice_update_fd_mask(struct ice_hw *hw, u16 prof_id, u32 mask_sel)
2730 {
2731 	wr32(hw, GLQF_FDMASK_SEL(prof_id), mask_sel);
2732 
2733 	ice_debug(hw, ICE_DBG_INIT, "fd mask(%d): %x = %x\n", prof_id,
2734 		  GLQF_FDMASK_SEL(prof_id), mask_sel);
2735 }
2736 
2737 struct ice_fd_src_dst_pair {
2738 	u8 prot_id;
2739 	u8 count;
2740 	u16 off;
2741 };
2742 
2743 static const struct ice_fd_src_dst_pair ice_fd_pairs[] = {
2744 	/* These are defined in pairs */
2745 	{ ICE_PROT_IPV4_OF_OR_S, 2, 12 },
2746 	{ ICE_PROT_IPV4_OF_OR_S, 2, 16 },
2747 
2748 	{ ICE_PROT_IPV4_IL, 2, 12 },
2749 	{ ICE_PROT_IPV4_IL, 2, 16 },
2750 
2751 	{ ICE_PROT_IPV6_OF_OR_S, 8, 8 },
2752 	{ ICE_PROT_IPV6_OF_OR_S, 8, 24 },
2753 
2754 	{ ICE_PROT_IPV6_IL, 8, 8 },
2755 	{ ICE_PROT_IPV6_IL, 8, 24 },
2756 
2757 	{ ICE_PROT_TCP_IL, 1, 0 },
2758 	{ ICE_PROT_TCP_IL, 1, 2 },
2759 
2760 	{ ICE_PROT_UDP_OF, 1, 0 },
2761 	{ ICE_PROT_UDP_OF, 1, 2 },
2762 
2763 	{ ICE_PROT_UDP_IL_OR_S, 1, 0 },
2764 	{ ICE_PROT_UDP_IL_OR_S, 1, 2 },
2765 
2766 	{ ICE_PROT_SCTP_IL, 1, 0 },
2767 	{ ICE_PROT_SCTP_IL, 1, 2 }
2768 };
2769 
2770 #define ICE_FD_SRC_DST_PAIR_COUNT	ARRAY_SIZE(ice_fd_pairs)
2771 
2772 /**
2773  * ice_update_fd_swap - set register appropriately for a FD FV extraction
2774  * @hw: pointer to the HW struct
2775  * @prof_id: profile ID
2776  * @es: extraction sequence (length of array is determined by the block)
2777  */
2778 static int
ice_update_fd_swap(struct ice_hw * hw,u16 prof_id,struct ice_fv_word * es)2779 ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
2780 {
2781 	DECLARE_BITMAP(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
2782 	u8 pair_start[ICE_FD_SRC_DST_PAIR_COUNT] = { 0 };
2783 #define ICE_FD_FV_NOT_FOUND (-2)
2784 	s8 first_free = ICE_FD_FV_NOT_FOUND;
2785 	u8 used[ICE_MAX_FV_WORDS] = { 0 };
2786 	s8 orig_free, si;
2787 	u32 mask_sel = 0;
2788 	u8 i, j, k;
2789 
2790 	bitmap_zero(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
2791 
2792 	/* This code assumes that the Flow Director field vectors are assigned
2793 	 * from the end of the FV indexes working towards the zero index, that
2794 	 * only complete fields will be included and will be consecutive, and
2795 	 * that there are no gaps between valid indexes.
2796 	 */
2797 
2798 	/* Determine swap fields present */
2799 	for (i = 0; i < hw->blk[ICE_BLK_FD].es.fvw; i++) {
2800 		/* Find the first free entry, assuming right to left population.
2801 		 * This is where we can start adding additional pairs if needed.
2802 		 */
2803 		if (first_free == ICE_FD_FV_NOT_FOUND && es[i].prot_id !=
2804 		    ICE_PROT_INVALID)
2805 			first_free = i - 1;
2806 
2807 		for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
2808 			if (es[i].prot_id == ice_fd_pairs[j].prot_id &&
2809 			    es[i].off == ice_fd_pairs[j].off) {
2810 				__set_bit(j, pair_list);
2811 				pair_start[j] = i;
2812 			}
2813 	}
2814 
2815 	orig_free = first_free;
2816 
2817 	/* determine missing swap fields that need to be added */
2818 	for (i = 0; i < ICE_FD_SRC_DST_PAIR_COUNT; i += 2) {
2819 		u8 bit1 = test_bit(i + 1, pair_list);
2820 		u8 bit0 = test_bit(i, pair_list);
2821 
2822 		if (bit0 ^ bit1) {
2823 			u8 index;
2824 
2825 			/* add the appropriate 'paired' entry */
2826 			if (!bit0)
2827 				index = i;
2828 			else
2829 				index = i + 1;
2830 
2831 			/* check for room */
2832 			if (first_free + 1 < (s8)ice_fd_pairs[index].count)
2833 				return -ENOSPC;
2834 
2835 			/* place in extraction sequence */
2836 			for (k = 0; k < ice_fd_pairs[index].count; k++) {
2837 				es[first_free - k].prot_id =
2838 					ice_fd_pairs[index].prot_id;
2839 				es[first_free - k].off =
2840 					ice_fd_pairs[index].off + (k * 2);
2841 
2842 				if (k > first_free)
2843 					return -EIO;
2844 
2845 				/* keep track of non-relevant fields */
2846 				mask_sel |= BIT(first_free - k);
2847 			}
2848 
2849 			pair_start[index] = first_free;
2850 			first_free -= ice_fd_pairs[index].count;
2851 		}
2852 	}
2853 
2854 	/* fill in the swap array */
2855 	si = hw->blk[ICE_BLK_FD].es.fvw - 1;
2856 	while (si >= 0) {
2857 		u8 indexes_used = 1;
2858 
2859 		/* assume flat at this index */
2860 #define ICE_SWAP_VALID	0x80
2861 		used[si] = si | ICE_SWAP_VALID;
2862 
2863 		if (orig_free == ICE_FD_FV_NOT_FOUND || si <= orig_free) {
2864 			si -= indexes_used;
2865 			continue;
2866 		}
2867 
2868 		/* check for a swap location */
2869 		for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
2870 			if (es[si].prot_id == ice_fd_pairs[j].prot_id &&
2871 			    es[si].off == ice_fd_pairs[j].off) {
2872 				u8 idx;
2873 
2874 				/* determine the appropriate matching field */
2875 				idx = j + ((j % 2) ? -1 : 1);
2876 
2877 				indexes_used = ice_fd_pairs[idx].count;
2878 				for (k = 0; k < indexes_used; k++) {
2879 					used[si - k] = (pair_start[idx] - k) |
2880 						ICE_SWAP_VALID;
2881 				}
2882 
2883 				break;
2884 			}
2885 
2886 		si -= indexes_used;
2887 	}
2888 
2889 	/* for each set of 4 swap and 4 inset indexes, write the appropriate
2890 	 * register
2891 	 */
2892 	for (j = 0; j < hw->blk[ICE_BLK_FD].es.fvw / 4; j++) {
2893 		u32 raw_swap = 0;
2894 		u32 raw_in = 0;
2895 
2896 		for (k = 0; k < 4; k++) {
2897 			u8 idx;
2898 
2899 			idx = (j * 4) + k;
2900 			if (used[idx] && !(mask_sel & BIT(idx))) {
2901 				raw_swap |= used[idx] << (k * BITS_PER_BYTE);
2902 #define ICE_INSET_DFLT 0x9f
2903 				raw_in |= ICE_INSET_DFLT << (k * BITS_PER_BYTE);
2904 			}
2905 		}
2906 
2907 		/* write the appropriate swap register set */
2908 		wr32(hw, GLQF_FDSWAP(prof_id, j), raw_swap);
2909 
2910 		ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): %x = %08x\n",
2911 			  prof_id, j, GLQF_FDSWAP(prof_id, j), raw_swap);
2912 
2913 		/* write the appropriate inset register set */
2914 		wr32(hw, GLQF_FDINSET(prof_id, j), raw_in);
2915 
2916 		ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): %x = %08x\n",
2917 			  prof_id, j, GLQF_FDINSET(prof_id, j), raw_in);
2918 	}
2919 
2920 	/* initially clear the mask select for this profile */
2921 	ice_update_fd_mask(hw, prof_id, 0);
2922 
2923 	return 0;
2924 }
2925 
2926 /* The entries here needs to match the order of enum ice_ptype_attrib */
2927 static const struct ice_ptype_attrib_info ice_ptype_attributes[] = {
2928 	{ ICE_GTP_PDU_EH,	ICE_GTP_PDU_FLAG_MASK },
2929 	{ ICE_GTP_SESSION,	ICE_GTP_FLAGS_MASK },
2930 	{ ICE_GTP_DOWNLINK,	ICE_GTP_FLAGS_MASK },
2931 	{ ICE_GTP_UPLINK,	ICE_GTP_FLAGS_MASK },
2932 };
2933 
2934 /**
2935  * ice_get_ptype_attrib_info - get PTYPE attribute information
2936  * @type: attribute type
2937  * @info: pointer to variable to the attribute information
2938  */
2939 static void
ice_get_ptype_attrib_info(enum ice_ptype_attrib_type type,struct ice_ptype_attrib_info * info)2940 ice_get_ptype_attrib_info(enum ice_ptype_attrib_type type,
2941 			  struct ice_ptype_attrib_info *info)
2942 {
2943 	*info = ice_ptype_attributes[type];
2944 }
2945 
2946 /**
2947  * ice_add_prof_attrib - add any PTG with attributes to profile
2948  * @prof: pointer to the profile to which PTG entries will be added
2949  * @ptg: PTG to be added
2950  * @ptype: PTYPE that needs to be looked up
2951  * @attr: array of attributes that will be considered
2952  * @attr_cnt: number of elements in the attribute array
2953  */
2954 static int
ice_add_prof_attrib(struct ice_prof_map * prof,u8 ptg,u16 ptype,const struct ice_ptype_attributes * attr,u16 attr_cnt)2955 ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
2956 		    const struct ice_ptype_attributes *attr, u16 attr_cnt)
2957 {
2958 	bool found = false;
2959 	u16 i;
2960 
2961 	for (i = 0; i < attr_cnt; i++)
2962 		if (attr[i].ptype == ptype) {
2963 			found = true;
2964 
2965 			prof->ptg[prof->ptg_cnt] = ptg;
2966 			ice_get_ptype_attrib_info(attr[i].attrib,
2967 						  &prof->attr[prof->ptg_cnt]);
2968 
2969 			if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
2970 				return -ENOSPC;
2971 		}
2972 
2973 	if (!found)
2974 		return -ENOENT;
2975 
2976 	return 0;
2977 }
2978 
2979 /**
2980  * ice_disable_fd_swap - set register appropriately to disable FD SWAP
2981  * @hw: pointer to the HW struct
2982  * @prof_id: profile ID
2983  */
2984 static void
ice_disable_fd_swap(struct ice_hw * hw,u8 prof_id)2985 ice_disable_fd_swap(struct ice_hw *hw, u8 prof_id)
2986 {
2987 	u16 swap_val, fvw_num;
2988 	unsigned int i;
2989 
2990 	swap_val = ICE_SWAP_VALID;
2991 	fvw_num = hw->blk[ICE_BLK_FD].es.fvw / ICE_FDIR_REG_SET_SIZE;
2992 
2993 	/* Since the SWAP Flag in the Programming Desc doesn't work,
2994 	 * here add method to disable the SWAP Option via setting
2995 	 * certain SWAP and INSET register sets.
2996 	 */
2997 	for (i = 0; i < fvw_num ; i++) {
2998 		u32 raw_swap, raw_in;
2999 		unsigned int j;
3000 
3001 		raw_swap = 0;
3002 		raw_in = 0;
3003 
3004 		for (j = 0; j < ICE_FDIR_REG_SET_SIZE; j++) {
3005 			raw_swap |= (swap_val++) << (j * BITS_PER_BYTE);
3006 			raw_in |= ICE_INSET_DFLT << (j * BITS_PER_BYTE);
3007 		}
3008 
3009 		/* write the FDIR swap register set */
3010 		wr32(hw, GLQF_FDSWAP(prof_id, i), raw_swap);
3011 
3012 		ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): 0x%x = 0x%08x\n",
3013 			  prof_id, i, GLQF_FDSWAP(prof_id, i), raw_swap);
3014 
3015 		/* write the FDIR inset register set */
3016 		wr32(hw, GLQF_FDINSET(prof_id, i), raw_in);
3017 
3018 		ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): 0x%x = 0x%08x\n",
3019 			  prof_id, i, GLQF_FDINSET(prof_id, i), raw_in);
3020 	}
3021 }
3022 
3023 /*
3024  * ice_add_prof - add profile
3025  * @hw: pointer to the HW struct
3026  * @blk: hardware block
3027  * @id: profile tracking ID
3028  * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits)
3029  * @attr: array of attributes
3030  * @attr_cnt: number of elements in attr array
3031  * @es: extraction sequence (length of array is determined by the block)
3032  * @masks: mask for extraction sequence
3033  * @symm: symmetric setting for RSS profiles
3034  * @fd_swap: enable/disable FDIR paired src/dst fields swap option
3035  *
3036  * This function registers a profile, which matches a set of PTYPES with a
3037  * particular extraction sequence. While the hardware profile is allocated
3038  * it will not be written until the first call to ice_add_flow that specifies
3039  * the ID value used here.
3040  */
3041 int
ice_add_prof(struct ice_hw * hw,enum ice_block blk,u64 id,unsigned long * ptypes,const struct ice_ptype_attributes * attr,u16 attr_cnt,struct ice_fv_word * es,u16 * masks,bool symm,bool fd_swap)3042 ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id,
3043 	     unsigned long *ptypes, const struct ice_ptype_attributes *attr,
3044 	     u16 attr_cnt, struct ice_fv_word *es, u16 *masks, bool symm,
3045 	     bool fd_swap)
3046 {
3047 	DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
3048 	struct ice_prof_map *prof;
3049 	int status;
3050 	u8 prof_id;
3051 	u16 ptype;
3052 
3053 	bitmap_zero(ptgs_used, ICE_XLT1_CNT);
3054 
3055 	mutex_lock(&hw->blk[blk].es.prof_map_lock);
3056 
3057 	/* search for existing profile */
3058 	status = ice_find_prof_id_with_mask(hw, blk, es, masks, symm, &prof_id);
3059 	if (status) {
3060 		/* allocate profile ID */
3061 		status = ice_alloc_prof_id(hw, blk, &prof_id);
3062 		if (status)
3063 			goto err_ice_add_prof;
3064 		if (blk == ICE_BLK_FD && fd_swap) {
3065 			/* For Flow Director block, the extraction sequence may
3066 			 * need to be altered in the case where there are paired
3067 			 * fields that have no match. This is necessary because
3068 			 * for Flow Director, src and dest fields need to paired
3069 			 * for filter programming and these values are swapped
3070 			 * during Tx.
3071 			 */
3072 			status = ice_update_fd_swap(hw, prof_id, es);
3073 			if (status)
3074 				goto err_ice_add_prof;
3075 		} else if (blk == ICE_BLK_FD) {
3076 			ice_disable_fd_swap(hw, prof_id);
3077 		}
3078 		status = ice_update_prof_masking(hw, blk, prof_id, masks);
3079 		if (status)
3080 			goto err_ice_add_prof;
3081 
3082 		/* and write new es */
3083 		ice_write_es(hw, blk, prof_id, es, symm);
3084 	}
3085 
3086 	ice_prof_inc_ref(hw, blk, prof_id);
3087 
3088 	/* add profile info */
3089 	prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*prof), GFP_KERNEL);
3090 	if (!prof) {
3091 		status = -ENOMEM;
3092 		goto err_ice_add_prof;
3093 	}
3094 
3095 	prof->profile_cookie = id;
3096 	prof->prof_id = prof_id;
3097 	prof->ptg_cnt = 0;
3098 	prof->context = 0;
3099 
3100 	/* build list of ptgs */
3101 	for_each_set_bit(ptype, ptypes, ICE_FLOW_PTYPE_MAX) {
3102 		u8 ptg;
3103 
3104 		/* The package should place all ptypes in a non-zero
3105 		 * PTG, so the following call should never fail.
3106 		 */
3107 		if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
3108 			continue;
3109 
3110 		/* If PTG is already added, skip and continue */
3111 		if (test_bit(ptg, ptgs_used))
3112 			continue;
3113 
3114 		set_bit(ptg, ptgs_used);
3115 		/* Check to see there are any attributes for this ptype, and
3116 		 * add them if found.
3117 		 */
3118 		status = ice_add_prof_attrib(prof, ptg, ptype, attr, attr_cnt);
3119 		if (status == -ENOSPC)
3120 			break;
3121 		if (status) {
3122 			/* This is simple a ptype/PTG with no attribute */
3123 			prof->ptg[prof->ptg_cnt] = ptg;
3124 			prof->attr[prof->ptg_cnt].flags = 0;
3125 			prof->attr[prof->ptg_cnt].mask = 0;
3126 
3127 			if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
3128 				break;
3129 		}
3130 	}
3131 
3132 	list_add(&prof->list, &hw->blk[blk].es.prof_map);
3133 	status = 0;
3134 
3135 err_ice_add_prof:
3136 	mutex_unlock(&hw->blk[blk].es.prof_map_lock);
3137 	return status;
3138 }
3139 
3140 /**
3141  * ice_search_prof_id - Search for a profile tracking ID
3142  * @hw: pointer to the HW struct
3143  * @blk: hardware block
3144  * @id: profile tracking ID
3145  *
3146  * This will search for a profile tracking ID which was previously added.
3147  * The profile map lock should be held before calling this function.
3148  */
3149 struct ice_prof_map *
ice_search_prof_id(struct ice_hw * hw,enum ice_block blk,u64 id)3150 ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
3151 {
3152 	struct ice_prof_map *entry = NULL;
3153 	struct ice_prof_map *map;
3154 
3155 	list_for_each_entry(map, &hw->blk[blk].es.prof_map, list)
3156 		if (map->profile_cookie == id) {
3157 			entry = map;
3158 			break;
3159 		}
3160 
3161 	return entry;
3162 }
3163 
3164 /**
3165  * ice_vsig_prof_id_count - count profiles in a VSIG
3166  * @hw: pointer to the HW struct
3167  * @blk: hardware block
3168  * @vsig: VSIG to remove the profile from
3169  */
3170 static u16
ice_vsig_prof_id_count(struct ice_hw * hw,enum ice_block blk,u16 vsig)3171 ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig)
3172 {
3173 	u16 idx = vsig & ICE_VSIG_IDX_M, count = 0;
3174 	struct ice_vsig_prof *p;
3175 
3176 	list_for_each_entry(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
3177 			    list)
3178 		count++;
3179 
3180 	return count;
3181 }
3182 
3183 /**
3184  * ice_rel_tcam_idx - release a TCAM index
3185  * @hw: pointer to the HW struct
3186  * @blk: hardware block
3187  * @idx: the index to release
3188  */
ice_rel_tcam_idx(struct ice_hw * hw,enum ice_block blk,u16 idx)3189 static int ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
3190 {
3191 	/* Masks to invoke a never match entry */
3192 	u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
3193 	u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF };
3194 	u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
3195 	int status;
3196 
3197 	/* write the TCAM entry */
3198 	status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk,
3199 				      dc_msk, nm_msk);
3200 	if (status)
3201 		return status;
3202 
3203 	/* release the TCAM entry */
3204 	status = ice_free_tcam_ent(hw, blk, idx);
3205 
3206 	return status;
3207 }
3208 
3209 /**
3210  * ice_rem_prof_id - remove one profile from a VSIG
3211  * @hw: pointer to the HW struct
3212  * @blk: hardware block
3213  * @prof: pointer to profile structure to remove
3214  */
3215 static int
ice_rem_prof_id(struct ice_hw * hw,enum ice_block blk,struct ice_vsig_prof * prof)3216 ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
3217 		struct ice_vsig_prof *prof)
3218 {
3219 	int status;
3220 	u16 i;
3221 
3222 	for (i = 0; i < prof->tcam_count; i++)
3223 		if (prof->tcam[i].in_use) {
3224 			prof->tcam[i].in_use = false;
3225 			status = ice_rel_tcam_idx(hw, blk,
3226 						  prof->tcam[i].tcam_idx);
3227 			if (status)
3228 				return -EIO;
3229 		}
3230 
3231 	return 0;
3232 }
3233 
3234 /**
3235  * ice_rem_vsig - remove VSIG
3236  * @hw: pointer to the HW struct
3237  * @blk: hardware block
3238  * @vsig: the VSIG to remove
3239  * @chg: the change list
3240  */
3241 static int
ice_rem_vsig(struct ice_hw * hw,enum ice_block blk,u16 vsig,struct list_head * chg)3242 ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
3243 	     struct list_head *chg)
3244 {
3245 	u16 idx = vsig & ICE_VSIG_IDX_M;
3246 	struct ice_vsig_vsi *vsi_cur;
3247 	struct ice_vsig_prof *d, *t;
3248 
3249 	/* remove TCAM entries */
3250 	list_for_each_entry_safe(d, t,
3251 				 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
3252 				 list) {
3253 		int status;
3254 
3255 		status = ice_rem_prof_id(hw, blk, d);
3256 		if (status)
3257 			return status;
3258 
3259 		list_del(&d->list);
3260 		devm_kfree(ice_hw_to_dev(hw), d);
3261 	}
3262 
3263 	/* Move all VSIS associated with this VSIG to the default VSIG */
3264 	vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
3265 	/* If the VSIG has at least 1 VSI then iterate through the list
3266 	 * and remove the VSIs before deleting the group.
3267 	 */
3268 	if (vsi_cur)
3269 		do {
3270 			struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
3271 			struct ice_chs_chg *p;
3272 
3273 			p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
3274 					 GFP_KERNEL);
3275 			if (!p)
3276 				return -ENOMEM;
3277 
3278 			p->type = ICE_VSIG_REM;
3279 			p->orig_vsig = vsig;
3280 			p->vsig = ICE_DEFAULT_VSIG;
3281 			p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis;
3282 
3283 			list_add(&p->list_entry, chg);
3284 
3285 			vsi_cur = tmp;
3286 		} while (vsi_cur);
3287 
3288 	return ice_vsig_free(hw, blk, vsig);
3289 }
3290 
3291 /**
3292  * ice_rem_prof_id_vsig - remove a specific profile from a VSIG
3293  * @hw: pointer to the HW struct
3294  * @blk: hardware block
3295  * @vsig: VSIG to remove the profile from
3296  * @hdl: profile handle indicating which profile to remove
3297  * @chg: list to receive a record of changes
3298  */
3299 static int
ice_rem_prof_id_vsig(struct ice_hw * hw,enum ice_block blk,u16 vsig,u64 hdl,struct list_head * chg)3300 ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
3301 		     struct list_head *chg)
3302 {
3303 	u16 idx = vsig & ICE_VSIG_IDX_M;
3304 	struct ice_vsig_prof *p, *t;
3305 
3306 	list_for_each_entry_safe(p, t,
3307 				 &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
3308 				 list)
3309 		if (p->profile_cookie == hdl) {
3310 			int status;
3311 
3312 			if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)
3313 				/* this is the last profile, remove the VSIG */
3314 				return ice_rem_vsig(hw, blk, vsig, chg);
3315 
3316 			status = ice_rem_prof_id(hw, blk, p);
3317 			if (!status) {
3318 				list_del(&p->list);
3319 				devm_kfree(ice_hw_to_dev(hw), p);
3320 			}
3321 			return status;
3322 		}
3323 
3324 	return -ENOENT;
3325 }
3326 
3327 /**
3328  * ice_rem_flow_all - remove all flows with a particular profile
3329  * @hw: pointer to the HW struct
3330  * @blk: hardware block
3331  * @id: profile tracking ID
3332  */
ice_rem_flow_all(struct ice_hw * hw,enum ice_block blk,u64 id)3333 static int ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
3334 {
3335 	struct ice_chs_chg *del, *tmp;
3336 	struct list_head chg;
3337 	int status;
3338 	u16 i;
3339 
3340 	INIT_LIST_HEAD(&chg);
3341 
3342 	for (i = 1; i < ICE_MAX_VSIGS; i++)
3343 		if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) {
3344 			if (ice_has_prof_vsig(hw, blk, i, id)) {
3345 				status = ice_rem_prof_id_vsig(hw, blk, i, id,
3346 							      &chg);
3347 				if (status)
3348 					goto err_ice_rem_flow_all;
3349 			}
3350 		}
3351 
3352 	status = ice_upd_prof_hw(hw, blk, &chg);
3353 
3354 err_ice_rem_flow_all:
3355 	list_for_each_entry_safe(del, tmp, &chg, list_entry) {
3356 		list_del(&del->list_entry);
3357 		devm_kfree(ice_hw_to_dev(hw), del);
3358 	}
3359 
3360 	return status;
3361 }
3362 
3363 /**
3364  * ice_rem_prof - remove profile
3365  * @hw: pointer to the HW struct
3366  * @blk: hardware block
3367  * @id: profile tracking ID
3368  *
3369  * This will remove the profile specified by the ID parameter, which was
3370  * previously created through ice_add_prof. If any existing entries
3371  * are associated with this profile, they will be removed as well.
3372  */
ice_rem_prof(struct ice_hw * hw,enum ice_block blk,u64 id)3373 int ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
3374 {
3375 	struct ice_prof_map *pmap;
3376 	int status;
3377 
3378 	mutex_lock(&hw->blk[blk].es.prof_map_lock);
3379 
3380 	pmap = ice_search_prof_id(hw, blk, id);
3381 	if (!pmap) {
3382 		status = -ENOENT;
3383 		goto err_ice_rem_prof;
3384 	}
3385 
3386 	/* remove all flows with this profile */
3387 	status = ice_rem_flow_all(hw, blk, pmap->profile_cookie);
3388 	if (status)
3389 		goto err_ice_rem_prof;
3390 
3391 	/* dereference profile, and possibly remove */
3392 	ice_prof_dec_ref(hw, blk, pmap->prof_id);
3393 
3394 	list_del(&pmap->list);
3395 	devm_kfree(ice_hw_to_dev(hw), pmap);
3396 
3397 err_ice_rem_prof:
3398 	mutex_unlock(&hw->blk[blk].es.prof_map_lock);
3399 	return status;
3400 }
3401 
3402 /**
3403  * ice_get_prof - get profile
3404  * @hw: pointer to the HW struct
3405  * @blk: hardware block
3406  * @hdl: profile handle
3407  * @chg: change list
3408  */
3409 static int
ice_get_prof(struct ice_hw * hw,enum ice_block blk,u64 hdl,struct list_head * chg)3410 ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
3411 	     struct list_head *chg)
3412 {
3413 	struct ice_prof_map *map;
3414 	struct ice_chs_chg *p;
3415 	int status = 0;
3416 	u16 i;
3417 
3418 	mutex_lock(&hw->blk[blk].es.prof_map_lock);
3419 	/* Get the details on the profile specified by the handle ID */
3420 	map = ice_search_prof_id(hw, blk, hdl);
3421 	if (!map) {
3422 		status = -ENOENT;
3423 		goto err_ice_get_prof;
3424 	}
3425 
3426 	for (i = 0; i < map->ptg_cnt; i++)
3427 		if (!hw->blk[blk].es.written[map->prof_id]) {
3428 			/* add ES to change list */
3429 			p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
3430 					 GFP_KERNEL);
3431 			if (!p) {
3432 				status = -ENOMEM;
3433 				goto err_ice_get_prof;
3434 			}
3435 
3436 			p->type = ICE_PTG_ES_ADD;
3437 			p->ptype = 0;
3438 			p->ptg = map->ptg[i];
3439 			p->add_ptg = 0;
3440 
3441 			p->add_prof = 1;
3442 			p->prof_id = map->prof_id;
3443 
3444 			hw->blk[blk].es.written[map->prof_id] = true;
3445 
3446 			list_add(&p->list_entry, chg);
3447 		}
3448 
3449 err_ice_get_prof:
3450 	mutex_unlock(&hw->blk[blk].es.prof_map_lock);
3451 	/* let caller clean up the change list */
3452 	return status;
3453 }
3454 
3455 /**
3456  * ice_get_profs_vsig - get a copy of the list of profiles from a VSIG
3457  * @hw: pointer to the HW struct
3458  * @blk: hardware block
3459  * @vsig: VSIG from which to copy the list
3460  * @lst: output list
3461  *
3462  * This routine makes a copy of the list of profiles in the specified VSIG.
3463  */
3464 static int
ice_get_profs_vsig(struct ice_hw * hw,enum ice_block blk,u16 vsig,struct list_head * lst)3465 ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
3466 		   struct list_head *lst)
3467 {
3468 	struct ice_vsig_prof *ent1, *ent2;
3469 	u16 idx = vsig & ICE_VSIG_IDX_M;
3470 
3471 	list_for_each_entry(ent1, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
3472 			    list) {
3473 		struct ice_vsig_prof *p;
3474 
3475 		/* copy to the input list */
3476 		p = devm_kmemdup(ice_hw_to_dev(hw), ent1, sizeof(*p),
3477 				 GFP_KERNEL);
3478 		if (!p)
3479 			goto err_ice_get_profs_vsig;
3480 
3481 		list_add_tail(&p->list, lst);
3482 	}
3483 
3484 	return 0;
3485 
3486 err_ice_get_profs_vsig:
3487 	list_for_each_entry_safe(ent1, ent2, lst, list) {
3488 		list_del(&ent1->list);
3489 		devm_kfree(ice_hw_to_dev(hw), ent1);
3490 	}
3491 
3492 	return -ENOMEM;
3493 }
3494 
3495 /**
3496  * ice_add_prof_to_lst - add profile entry to a list
3497  * @hw: pointer to the HW struct
3498  * @blk: hardware block
3499  * @lst: the list to be added to
3500  * @hdl: profile handle of entry to add
3501  */
3502 static int
ice_add_prof_to_lst(struct ice_hw * hw,enum ice_block blk,struct list_head * lst,u64 hdl)3503 ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
3504 		    struct list_head *lst, u64 hdl)
3505 {
3506 	struct ice_prof_map *map;
3507 	struct ice_vsig_prof *p;
3508 	int status = 0;
3509 	u16 i;
3510 
3511 	mutex_lock(&hw->blk[blk].es.prof_map_lock);
3512 	map = ice_search_prof_id(hw, blk, hdl);
3513 	if (!map) {
3514 		status = -ENOENT;
3515 		goto err_ice_add_prof_to_lst;
3516 	}
3517 
3518 	p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
3519 	if (!p) {
3520 		status = -ENOMEM;
3521 		goto err_ice_add_prof_to_lst;
3522 	}
3523 
3524 	p->profile_cookie = map->profile_cookie;
3525 	p->prof_id = map->prof_id;
3526 	p->tcam_count = map->ptg_cnt;
3527 
3528 	for (i = 0; i < map->ptg_cnt; i++) {
3529 		p->tcam[i].prof_id = map->prof_id;
3530 		p->tcam[i].tcam_idx = ICE_INVALID_TCAM;
3531 		p->tcam[i].ptg = map->ptg[i];
3532 	}
3533 
3534 	list_add(&p->list, lst);
3535 
3536 err_ice_add_prof_to_lst:
3537 	mutex_unlock(&hw->blk[blk].es.prof_map_lock);
3538 	return status;
3539 }
3540 
3541 /**
3542  * ice_move_vsi - move VSI to another VSIG
3543  * @hw: pointer to the HW struct
3544  * @blk: hardware block
3545  * @vsi: the VSI to move
3546  * @vsig: the VSIG to move the VSI to
3547  * @chg: the change list
3548  */
3549 static int
ice_move_vsi(struct ice_hw * hw,enum ice_block blk,u16 vsi,u16 vsig,struct list_head * chg)3550 ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
3551 	     struct list_head *chg)
3552 {
3553 	struct ice_chs_chg *p;
3554 	u16 orig_vsig;
3555 	int status;
3556 
3557 	p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
3558 	if (!p)
3559 		return -ENOMEM;
3560 
3561 	status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
3562 	if (!status)
3563 		status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
3564 
3565 	if (status) {
3566 		devm_kfree(ice_hw_to_dev(hw), p);
3567 		return status;
3568 	}
3569 
3570 	p->type = ICE_VSI_MOVE;
3571 	p->vsi = vsi;
3572 	p->orig_vsig = orig_vsig;
3573 	p->vsig = vsig;
3574 
3575 	list_add(&p->list_entry, chg);
3576 
3577 	return 0;
3578 }
3579 
3580 /**
3581  * ice_set_tcam_flags - set TCAM flag don't care mask
3582  * @mask: mask for flags
3583  * @dc_mask: pointer to the don't care mask
3584  */
ice_set_tcam_flags(u16 mask,u8 dc_mask[ICE_TCAM_KEY_VAL_SZ])3585 static void ice_set_tcam_flags(u16 mask, u8 dc_mask[ICE_TCAM_KEY_VAL_SZ])
3586 {
3587 	u16 inverted_mask = ~mask;
3588 
3589 	/* flags are lowest u16 */
3590 	put_unaligned_le16(inverted_mask, dc_mask);
3591 }
3592 
3593 /**
3594  * ice_rem_chg_tcam_ent - remove a specific TCAM entry from change list
3595  * @hw: pointer to the HW struct
3596  * @idx: the index of the TCAM entry to remove
3597  * @chg: the list of change structures to search
3598  */
3599 static void
ice_rem_chg_tcam_ent(struct ice_hw * hw,u16 idx,struct list_head * chg)3600 ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct list_head *chg)
3601 {
3602 	struct ice_chs_chg *pos, *tmp;
3603 
3604 	list_for_each_entry_safe(tmp, pos, chg, list_entry)
3605 		if (tmp->type == ICE_TCAM_ADD && tmp->tcam_idx == idx) {
3606 			list_del(&tmp->list_entry);
3607 			devm_kfree(ice_hw_to_dev(hw), tmp);
3608 		}
3609 }
3610 
3611 /**
3612  * ice_prof_tcam_ena_dis - add enable or disable TCAM change
3613  * @hw: pointer to the HW struct
3614  * @blk: hardware block
3615  * @enable: true to enable, false to disable
3616  * @vsig: the VSIG of the TCAM entry
3617  * @tcam: pointer the TCAM info structure of the TCAM to disable
3618  * @chg: the change list
3619  *
3620  * This function appends an enable or disable TCAM entry in the change log
3621  */
3622 static int
ice_prof_tcam_ena_dis(struct ice_hw * hw,enum ice_block blk,bool enable,u16 vsig,struct ice_tcam_inf * tcam,struct list_head * chg)3623 ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
3624 		      u16 vsig, struct ice_tcam_inf *tcam,
3625 		      struct list_head *chg)
3626 {
3627 	struct ice_chs_chg *p;
3628 	int status;
3629 
3630 	u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
3631 	u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
3632 	u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
3633 
3634 	/* if disabling, free the TCAM */
3635 	if (!enable) {
3636 		status = ice_rel_tcam_idx(hw, blk, tcam->tcam_idx);
3637 
3638 		/* if we have already created a change for this TCAM entry, then
3639 		 * we need to remove that entry, in order to prevent writing to
3640 		 * a TCAM entry we no longer will have ownership of.
3641 		 */
3642 		ice_rem_chg_tcam_ent(hw, tcam->tcam_idx, chg);
3643 		tcam->tcam_idx = 0;
3644 		tcam->in_use = 0;
3645 		return status;
3646 	}
3647 
3648 	/* for re-enabling, reallocate a TCAM */
3649 	/* for entries with empty attribute masks, allocate entry from
3650 	 * the bottom of the TCAM table; otherwise, allocate from the
3651 	 * top of the table in order to give it higher priority
3652 	 */
3653 	status = ice_alloc_tcam_ent(hw, blk, tcam->attr.mask == 0,
3654 				    &tcam->tcam_idx);
3655 	if (status)
3656 		return status;
3657 
3658 	/* add TCAM to change list */
3659 	p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
3660 	if (!p)
3661 		return -ENOMEM;
3662 
3663 	/* set don't care masks for TCAM flags */
3664 	ice_set_tcam_flags(tcam->attr.mask, dc_msk);
3665 
3666 	status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
3667 				      tcam->ptg, vsig, 0, tcam->attr.flags,
3668 				      vl_msk, dc_msk, nm_msk);
3669 	if (status)
3670 		goto err_ice_prof_tcam_ena_dis;
3671 
3672 	tcam->in_use = 1;
3673 
3674 	p->type = ICE_TCAM_ADD;
3675 	p->add_tcam_idx = true;
3676 	p->prof_id = tcam->prof_id;
3677 	p->ptg = tcam->ptg;
3678 	p->vsig = 0;
3679 	p->tcam_idx = tcam->tcam_idx;
3680 
3681 	/* log change */
3682 	list_add(&p->list_entry, chg);
3683 
3684 	return 0;
3685 
3686 err_ice_prof_tcam_ena_dis:
3687 	devm_kfree(ice_hw_to_dev(hw), p);
3688 	return status;
3689 }
3690 
3691 /**
3692  * ice_ptg_attr_in_use - determine if PTG and attribute pair is in use
3693  * @ptg_attr: pointer to the PTG and attribute pair to check
3694  * @ptgs_used: bitmap that denotes which PTGs are in use
3695  * @attr_used: array of PTG and attributes pairs already used
3696  * @attr_cnt: count of entries in the attr_used array
3697  *
3698  * Return: true if the PTG and attribute pair is in use, false otherwise.
3699  */
3700 static bool
ice_ptg_attr_in_use(struct ice_tcam_inf * ptg_attr,unsigned long * ptgs_used,struct ice_tcam_inf * attr_used[],u16 attr_cnt)3701 ice_ptg_attr_in_use(struct ice_tcam_inf *ptg_attr, unsigned long *ptgs_used,
3702 		    struct ice_tcam_inf *attr_used[], u16 attr_cnt)
3703 {
3704 	u16 i;
3705 
3706 	if (!test_bit(ptg_attr->ptg, ptgs_used))
3707 		return false;
3708 
3709 	/* the PTG is used, so now look for correct attributes */
3710 	for (i = 0; i < attr_cnt; i++)
3711 		if (attr_used[i]->ptg == ptg_attr->ptg &&
3712 		    attr_used[i]->attr.flags == ptg_attr->attr.flags &&
3713 		    attr_used[i]->attr.mask == ptg_attr->attr.mask)
3714 			return true;
3715 
3716 	return false;
3717 }
3718 
3719 /**
3720  * ice_adj_prof_priorities - adjust profile based on priorities
3721  * @hw: pointer to the HW struct
3722  * @blk: hardware block
3723  * @vsig: the VSIG for which to adjust profile priorities
3724  * @chg: the change list
3725  */
3726 static int
ice_adj_prof_priorities(struct ice_hw * hw,enum ice_block blk,u16 vsig,struct list_head * chg)3727 ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
3728 			struct list_head *chg)
3729 {
3730 	DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
3731 	struct ice_tcam_inf **attr_used;
3732 	struct ice_vsig_prof *t;
3733 	u16 attr_used_cnt = 0;
3734 	int status = 0;
3735 	u16 idx;
3736 
3737 	attr_used = kcalloc(ICE_MAX_PTG_ATTRS, sizeof(*attr_used), GFP_KERNEL);
3738 	if (!attr_used)
3739 		return -ENOMEM;
3740 
3741 	bitmap_zero(ptgs_used, ICE_XLT1_CNT);
3742 	idx = vsig & ICE_VSIG_IDX_M;
3743 
3744 	/* Priority is based on the order in which the profiles are added. The
3745 	 * newest added profile has highest priority and the oldest added
3746 	 * profile has the lowest priority. Since the profile property list for
3747 	 * a VSIG is sorted from newest to oldest, this code traverses the list
3748 	 * in order and enables the first of each PTG that it finds (that is not
3749 	 * already enabled); it also disables any duplicate PTGs that it finds
3750 	 * in the older profiles (that are currently enabled).
3751 	 */
3752 
3753 	list_for_each_entry(t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
3754 			    list) {
3755 		u16 i;
3756 
3757 		for (i = 0; i < t->tcam_count; i++) {
3758 			bool used;
3759 
3760 			/* Scan the priorities from newest to oldest.
3761 			 * Make sure that the newest profiles take priority.
3762 			 */
3763 			used = ice_ptg_attr_in_use(&t->tcam[i], ptgs_used,
3764 						   attr_used, attr_used_cnt);
3765 
3766 			if (used && t->tcam[i].in_use) {
3767 				/* need to mark this PTG as never match, as it
3768 				 * was already in use and therefore duplicate
3769 				 * (and lower priority)
3770 				 */
3771 				status = ice_prof_tcam_ena_dis(hw, blk, false,
3772 							       vsig,
3773 							       &t->tcam[i],
3774 							       chg);
3775 				if (status)
3776 					goto free_attr_used;
3777 			} else if (!used && !t->tcam[i].in_use) {
3778 				/* need to enable this PTG, as it in not in use
3779 				 * and not enabled (highest priority)
3780 				 */
3781 				status = ice_prof_tcam_ena_dis(hw, blk, true,
3782 							       vsig,
3783 							       &t->tcam[i],
3784 							       chg);
3785 				if (status)
3786 					goto free_attr_used;
3787 			}
3788 
3789 			/* keep track of used ptgs */
3790 			set_bit(t->tcam[i].ptg, ptgs_used);
3791 			if (attr_used_cnt < ICE_MAX_PTG_ATTRS)
3792 				attr_used[attr_used_cnt++] = &t->tcam[i];
3793 			else
3794 				ice_debug(hw, ICE_DBG_INIT, "Warn: ICE_MAX_PTG_ATTRS exceeded\n");
3795 		}
3796 	}
3797 
3798 free_attr_used:
3799 	kfree(attr_used);
3800 	return status;
3801 }
3802 
3803 /**
3804  * ice_add_prof_id_vsig - add profile to VSIG
3805  * @hw: pointer to the HW struct
3806  * @blk: hardware block
3807  * @vsig: the VSIG to which this profile is to be added
3808  * @hdl: the profile handle indicating the profile to add
3809  * @rev: true to add entries to the end of the list
3810  * @chg: the change list
3811  */
3812 static int
ice_add_prof_id_vsig(struct ice_hw * hw,enum ice_block blk,u16 vsig,u64 hdl,bool rev,struct list_head * chg)3813 ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
3814 		     bool rev, struct list_head *chg)
3815 {
3816 	/* Masks that ignore flags */
3817 	u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
3818 	u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
3819 	u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
3820 	struct ice_prof_map *map;
3821 	struct ice_vsig_prof *t;
3822 	struct ice_chs_chg *p;
3823 	u16 vsig_idx, i;
3824 	int status = 0;
3825 
3826 	/* Error, if this VSIG already has this profile */
3827 	if (ice_has_prof_vsig(hw, blk, vsig, hdl))
3828 		return -EEXIST;
3829 
3830 	/* new VSIG profile structure */
3831 	t = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*t), GFP_KERNEL);
3832 	if (!t)
3833 		return -ENOMEM;
3834 
3835 	mutex_lock(&hw->blk[blk].es.prof_map_lock);
3836 	/* Get the details on the profile specified by the handle ID */
3837 	map = ice_search_prof_id(hw, blk, hdl);
3838 	if (!map) {
3839 		status = -ENOENT;
3840 		goto err_ice_add_prof_id_vsig;
3841 	}
3842 
3843 	t->profile_cookie = map->profile_cookie;
3844 	t->prof_id = map->prof_id;
3845 	t->tcam_count = map->ptg_cnt;
3846 
3847 	/* create TCAM entries */
3848 	for (i = 0; i < map->ptg_cnt; i++) {
3849 		u16 tcam_idx;
3850 
3851 		/* add TCAM to change list */
3852 		p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
3853 		if (!p) {
3854 			status = -ENOMEM;
3855 			goto err_ice_add_prof_id_vsig;
3856 		}
3857 
3858 		/* allocate the TCAM entry index */
3859 		/* for entries with empty attribute masks, allocate entry from
3860 		 * the bottom of the TCAM table; otherwise, allocate from the
3861 		 * top of the table in order to give it higher priority
3862 		 */
3863 		status = ice_alloc_tcam_ent(hw, blk, map->attr[i].mask == 0,
3864 					    &tcam_idx);
3865 		if (status) {
3866 			devm_kfree(ice_hw_to_dev(hw), p);
3867 			goto err_ice_add_prof_id_vsig;
3868 		}
3869 
3870 		t->tcam[i].ptg = map->ptg[i];
3871 		t->tcam[i].prof_id = map->prof_id;
3872 		t->tcam[i].tcam_idx = tcam_idx;
3873 		t->tcam[i].attr = map->attr[i];
3874 		t->tcam[i].in_use = true;
3875 
3876 		p->type = ICE_TCAM_ADD;
3877 		p->add_tcam_idx = true;
3878 		p->prof_id = t->tcam[i].prof_id;
3879 		p->ptg = t->tcam[i].ptg;
3880 		p->vsig = vsig;
3881 		p->tcam_idx = t->tcam[i].tcam_idx;
3882 
3883 		/* set don't care masks for TCAM flags */
3884 		ice_set_tcam_flags(t->tcam[i].attr.mask, dc_msk);
3885 
3886 		/* write the TCAM entry */
3887 		status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx,
3888 					      t->tcam[i].prof_id,
3889 					      t->tcam[i].ptg, vsig, 0,
3890 					      t->tcam[i].attr.flags, vl_msk,
3891 					      dc_msk, nm_msk);
3892 		if (status) {
3893 			devm_kfree(ice_hw_to_dev(hw), p);
3894 			goto err_ice_add_prof_id_vsig;
3895 		}
3896 
3897 		/* log change */
3898 		list_add(&p->list_entry, chg);
3899 	}
3900 
3901 	/* add profile to VSIG */
3902 	vsig_idx = vsig & ICE_VSIG_IDX_M;
3903 	if (rev)
3904 		list_add_tail(&t->list,
3905 			      &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
3906 	else
3907 		list_add(&t->list,
3908 			 &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
3909 
3910 	mutex_unlock(&hw->blk[blk].es.prof_map_lock);
3911 	return status;
3912 
3913 err_ice_add_prof_id_vsig:
3914 	mutex_unlock(&hw->blk[blk].es.prof_map_lock);
3915 	/* let caller clean up the change list */
3916 	devm_kfree(ice_hw_to_dev(hw), t);
3917 	return status;
3918 }
3919 
3920 /**
3921  * ice_create_prof_id_vsig - add a new VSIG with a single profile
3922  * @hw: pointer to the HW struct
3923  * @blk: hardware block
3924  * @vsi: the initial VSI that will be in VSIG
3925  * @hdl: the profile handle of the profile that will be added to the VSIG
3926  * @chg: the change list
3927  */
3928 static int
ice_create_prof_id_vsig(struct ice_hw * hw,enum ice_block blk,u16 vsi,u64 hdl,struct list_head * chg)3929 ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
3930 			struct list_head *chg)
3931 {
3932 	struct ice_chs_chg *p;
3933 	u16 new_vsig;
3934 	int status;
3935 
3936 	p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
3937 	if (!p)
3938 		return -ENOMEM;
3939 
3940 	new_vsig = ice_vsig_alloc(hw, blk);
3941 	if (!new_vsig) {
3942 		status = -EIO;
3943 		goto err_ice_create_prof_id_vsig;
3944 	}
3945 
3946 	status = ice_move_vsi(hw, blk, vsi, new_vsig, chg);
3947 	if (status)
3948 		goto err_ice_create_prof_id_vsig;
3949 
3950 	status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, false, chg);
3951 	if (status)
3952 		goto err_ice_create_prof_id_vsig;
3953 
3954 	p->type = ICE_VSIG_ADD;
3955 	p->vsi = vsi;
3956 	p->orig_vsig = ICE_DEFAULT_VSIG;
3957 	p->vsig = new_vsig;
3958 
3959 	list_add(&p->list_entry, chg);
3960 
3961 	return 0;
3962 
3963 err_ice_create_prof_id_vsig:
3964 	/* let caller clean up the change list */
3965 	devm_kfree(ice_hw_to_dev(hw), p);
3966 	return status;
3967 }
3968 
3969 /**
3970  * ice_create_vsig_from_lst - create a new VSIG with a list of profiles
3971  * @hw: pointer to the HW struct
3972  * @blk: hardware block
3973  * @vsi: the initial VSI that will be in VSIG
3974  * @lst: the list of profile that will be added to the VSIG
3975  * @new_vsig: return of new VSIG
3976  * @chg: the change list
3977  */
3978 static int
ice_create_vsig_from_lst(struct ice_hw * hw,enum ice_block blk,u16 vsi,struct list_head * lst,u16 * new_vsig,struct list_head * chg)3979 ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
3980 			 struct list_head *lst, u16 *new_vsig,
3981 			 struct list_head *chg)
3982 {
3983 	struct ice_vsig_prof *t;
3984 	int status;
3985 	u16 vsig;
3986 
3987 	vsig = ice_vsig_alloc(hw, blk);
3988 	if (!vsig)
3989 		return -EIO;
3990 
3991 	status = ice_move_vsi(hw, blk, vsi, vsig, chg);
3992 	if (status)
3993 		return status;
3994 
3995 	list_for_each_entry(t, lst, list) {
3996 		/* Reverse the order here since we are copying the list */
3997 		status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie,
3998 					      true, chg);
3999 		if (status)
4000 			return status;
4001 	}
4002 
4003 	*new_vsig = vsig;
4004 
4005 	return 0;
4006 }
4007 
4008 /**
4009  * ice_find_prof_vsig - find a VSIG with a specific profile handle
4010  * @hw: pointer to the HW struct
4011  * @blk: hardware block
4012  * @hdl: the profile handle of the profile to search for
4013  * @vsig: returns the VSIG with the matching profile
4014  */
4015 static bool
ice_find_prof_vsig(struct ice_hw * hw,enum ice_block blk,u64 hdl,u16 * vsig)4016 ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
4017 {
4018 	struct ice_vsig_prof *t;
4019 	struct list_head lst;
4020 	int status;
4021 
4022 	INIT_LIST_HEAD(&lst);
4023 
4024 	t = kzalloc(sizeof(*t), GFP_KERNEL);
4025 	if (!t)
4026 		return false;
4027 
4028 	t->profile_cookie = hdl;
4029 	list_add(&t->list, &lst);
4030 
4031 	status = ice_find_dup_props_vsig(hw, blk, &lst, vsig);
4032 
4033 	list_del(&t->list);
4034 	kfree(t);
4035 
4036 	return !status;
4037 }
4038 
4039 /**
4040  * ice_add_prof_id_flow - add profile flow
4041  * @hw: pointer to the HW struct
4042  * @blk: hardware block
4043  * @vsi: the VSI to enable with the profile specified by ID
4044  * @hdl: profile handle
4045  *
4046  * Calling this function will update the hardware tables to enable the
4047  * profile indicated by the ID parameter for the VSIs specified in the VSI
4048  * array. Once successfully called, the flow will be enabled.
4049  */
4050 int
ice_add_prof_id_flow(struct ice_hw * hw,enum ice_block blk,u16 vsi,u64 hdl)4051 ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
4052 {
4053 	struct ice_vsig_prof *tmp1, *del1;
4054 	struct ice_chs_chg *tmp, *del;
4055 	struct list_head union_lst;
4056 	struct list_head chg;
4057 	int status;
4058 	u16 vsig;
4059 
4060 	INIT_LIST_HEAD(&union_lst);
4061 	INIT_LIST_HEAD(&chg);
4062 
4063 	/* Get profile */
4064 	status = ice_get_prof(hw, blk, hdl, &chg);
4065 	if (status)
4066 		return status;
4067 
4068 	/* determine if VSI is already part of a VSIG */
4069 	status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
4070 	if (!status && vsig) {
4071 		bool only_vsi;
4072 		u16 or_vsig;
4073 		u16 ref;
4074 
4075 		/* found in VSIG */
4076 		or_vsig = vsig;
4077 
4078 		/* make sure that there is no overlap/conflict between the new
4079 		 * characteristics and the existing ones; we don't support that
4080 		 * scenario
4081 		 */
4082 		if (ice_has_prof_vsig(hw, blk, vsig, hdl)) {
4083 			status = -EEXIST;
4084 			goto err_ice_add_prof_id_flow;
4085 		}
4086 
4087 		/* last VSI in the VSIG? */
4088 		status = ice_vsig_get_ref(hw, blk, vsig, &ref);
4089 		if (status)
4090 			goto err_ice_add_prof_id_flow;
4091 		only_vsi = (ref == 1);
4092 
4093 		/* create a union of the current profiles and the one being
4094 		 * added
4095 		 */
4096 		status = ice_get_profs_vsig(hw, blk, vsig, &union_lst);
4097 		if (status)
4098 			goto err_ice_add_prof_id_flow;
4099 
4100 		status = ice_add_prof_to_lst(hw, blk, &union_lst, hdl);
4101 		if (status)
4102 			goto err_ice_add_prof_id_flow;
4103 
4104 		/* search for an existing VSIG with an exact charc match */
4105 		status = ice_find_dup_props_vsig(hw, blk, &union_lst, &vsig);
4106 		if (!status) {
4107 			/* move VSI to the VSIG that matches */
4108 			status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
4109 			if (status)
4110 				goto err_ice_add_prof_id_flow;
4111 
4112 			/* VSI has been moved out of or_vsig. If the or_vsig had
4113 			 * only that VSI it is now empty and can be removed.
4114 			 */
4115 			if (only_vsi) {
4116 				status = ice_rem_vsig(hw, blk, or_vsig, &chg);
4117 				if (status)
4118 					goto err_ice_add_prof_id_flow;
4119 			}
4120 		} else if (only_vsi) {
4121 			/* If the original VSIG only contains one VSI, then it
4122 			 * will be the requesting VSI. In this case the VSI is
4123 			 * not sharing entries and we can simply add the new
4124 			 * profile to the VSIG.
4125 			 */
4126 			status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, false,
4127 						      &chg);
4128 			if (status)
4129 				goto err_ice_add_prof_id_flow;
4130 
4131 			/* Adjust priorities */
4132 			status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
4133 			if (status)
4134 				goto err_ice_add_prof_id_flow;
4135 		} else {
4136 			/* No match, so we need a new VSIG */
4137 			status = ice_create_vsig_from_lst(hw, blk, vsi,
4138 							  &union_lst, &vsig,
4139 							  &chg);
4140 			if (status)
4141 				goto err_ice_add_prof_id_flow;
4142 
4143 			/* Adjust priorities */
4144 			status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
4145 			if (status)
4146 				goto err_ice_add_prof_id_flow;
4147 		}
4148 	} else {
4149 		/* need to find or add a VSIG */
4150 		/* search for an existing VSIG with an exact charc match */
4151 		if (ice_find_prof_vsig(hw, blk, hdl, &vsig)) {
4152 			/* found an exact match */
4153 			/* add or move VSI to the VSIG that matches */
4154 			status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
4155 			if (status)
4156 				goto err_ice_add_prof_id_flow;
4157 		} else {
4158 			/* we did not find an exact match */
4159 			/* we need to add a VSIG */
4160 			status = ice_create_prof_id_vsig(hw, blk, vsi, hdl,
4161 							 &chg);
4162 			if (status)
4163 				goto err_ice_add_prof_id_flow;
4164 		}
4165 	}
4166 
4167 	/* update hardware */
4168 	if (!status)
4169 		status = ice_upd_prof_hw(hw, blk, &chg);
4170 
4171 err_ice_add_prof_id_flow:
4172 	list_for_each_entry_safe(del, tmp, &chg, list_entry) {
4173 		list_del(&del->list_entry);
4174 		devm_kfree(ice_hw_to_dev(hw), del);
4175 	}
4176 
4177 	list_for_each_entry_safe(del1, tmp1, &union_lst, list) {
4178 		list_del(&del1->list);
4179 		devm_kfree(ice_hw_to_dev(hw), del1);
4180 	}
4181 
4182 	return status;
4183 }
4184 
4185 /**
4186  * ice_flow_assoc_fdir_prof - add an FDIR profile for main/ctrl VSI
4187  * @hw: pointer to the HW struct
4188  * @blk: HW block
4189  * @dest_vsi: dest VSI
4190  * @fdir_vsi: fdir programming VSI
4191  * @hdl: profile handle
4192  *
4193  * Update the hardware tables to enable the FDIR profile indicated by @hdl for
4194  * the VSI specified by @dest_vsi. On success, the flow will be enabled.
4195  *
4196  * Return: 0 on success or negative errno on failure.
4197  */
4198 int
ice_flow_assoc_fdir_prof(struct ice_hw * hw,enum ice_block blk,u16 dest_vsi,u16 fdir_vsi,u64 hdl)4199 ice_flow_assoc_fdir_prof(struct ice_hw *hw, enum ice_block blk,
4200 			 u16 dest_vsi, u16 fdir_vsi, u64 hdl)
4201 {
4202 	u16 vsi_num;
4203 	int status;
4204 
4205 	vsi_num = ice_get_hw_vsi_num(hw, dest_vsi);
4206 	status = ice_add_prof_id_flow(hw, blk, vsi_num, hdl);
4207 	if (status) {
4208 		ice_debug(hw, ICE_DBG_FLOW, "Adding HW profile failed for main VSI flow entry: %d\n",
4209 			  status);
4210 		return status;
4211 	}
4212 
4213 	if (blk != ICE_BLK_FD)
4214 		return 0;
4215 
4216 	vsi_num = ice_get_hw_vsi_num(hw, fdir_vsi);
4217 	status = ice_add_prof_id_flow(hw, blk, vsi_num, hdl);
4218 	if (status) {
4219 		ice_debug(hw, ICE_DBG_FLOW, "Adding HW profile failed for ctrl VSI flow entry: %d\n",
4220 			  status);
4221 		goto err;
4222 	}
4223 
4224 	return 0;
4225 
4226 err:
4227 	vsi_num = ice_get_hw_vsi_num(hw, dest_vsi);
4228 	ice_rem_prof_id_flow(hw, blk, vsi_num, hdl);
4229 
4230 	return status;
4231 }
4232 
4233 /**
4234  * ice_rem_prof_from_list - remove a profile from list
4235  * @hw: pointer to the HW struct
4236  * @lst: list to remove the profile from
4237  * @hdl: the profile handle indicating the profile to remove
4238  */
4239 static int
ice_rem_prof_from_list(struct ice_hw * hw,struct list_head * lst,u64 hdl)4240 ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl)
4241 {
4242 	struct ice_vsig_prof *ent, *tmp;
4243 
4244 	list_for_each_entry_safe(ent, tmp, lst, list)
4245 		if (ent->profile_cookie == hdl) {
4246 			list_del(&ent->list);
4247 			devm_kfree(ice_hw_to_dev(hw), ent);
4248 			return 0;
4249 		}
4250 
4251 	return -ENOENT;
4252 }
4253 
4254 /**
4255  * ice_rem_prof_id_flow - remove flow
4256  * @hw: pointer to the HW struct
4257  * @blk: hardware block
4258  * @vsi: the VSI from which to remove the profile specified by ID
4259  * @hdl: profile tracking handle
4260  *
4261  * Calling this function will update the hardware tables to remove the
4262  * profile indicated by the ID parameter for the VSIs specified in the VSI
4263  * array. Once successfully called, the flow will be disabled.
4264  */
4265 int
ice_rem_prof_id_flow(struct ice_hw * hw,enum ice_block blk,u16 vsi,u64 hdl)4266 ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
4267 {
4268 	struct ice_vsig_prof *tmp1, *del1;
4269 	struct ice_chs_chg *tmp, *del;
4270 	struct list_head chg, copy;
4271 	int status;
4272 	u16 vsig;
4273 
4274 	INIT_LIST_HEAD(&copy);
4275 	INIT_LIST_HEAD(&chg);
4276 
4277 	/* determine if VSI is already part of a VSIG */
4278 	status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
4279 	if (!status && vsig) {
4280 		bool last_profile;
4281 		bool only_vsi;
4282 		u16 ref;
4283 
4284 		/* found in VSIG */
4285 		last_profile = ice_vsig_prof_id_count(hw, blk, vsig) == 1;
4286 		status = ice_vsig_get_ref(hw, blk, vsig, &ref);
4287 		if (status)
4288 			goto err_ice_rem_prof_id_flow;
4289 		only_vsi = (ref == 1);
4290 
4291 		if (only_vsi) {
4292 			/* If the original VSIG only contains one reference,
4293 			 * which will be the requesting VSI, then the VSI is not
4294 			 * sharing entries and we can simply remove the specific
4295 			 * characteristics from the VSIG.
4296 			 */
4297 
4298 			if (last_profile) {
4299 				/* If there are no profiles left for this VSIG,
4300 				 * then simply remove the VSIG.
4301 				 */
4302 				status = ice_rem_vsig(hw, blk, vsig, &chg);
4303 				if (status)
4304 					goto err_ice_rem_prof_id_flow;
4305 			} else {
4306 				status = ice_rem_prof_id_vsig(hw, blk, vsig,
4307 							      hdl, &chg);
4308 				if (status)
4309 					goto err_ice_rem_prof_id_flow;
4310 
4311 				/* Adjust priorities */
4312 				status = ice_adj_prof_priorities(hw, blk, vsig,
4313 								 &chg);
4314 				if (status)
4315 					goto err_ice_rem_prof_id_flow;
4316 			}
4317 
4318 		} else {
4319 			/* Make a copy of the VSIG's list of Profiles */
4320 			status = ice_get_profs_vsig(hw, blk, vsig, &copy);
4321 			if (status)
4322 				goto err_ice_rem_prof_id_flow;
4323 
4324 			/* Remove specified profile entry from the list */
4325 			status = ice_rem_prof_from_list(hw, &copy, hdl);
4326 			if (status)
4327 				goto err_ice_rem_prof_id_flow;
4328 
4329 			if (list_empty(&copy)) {
4330 				status = ice_move_vsi(hw, blk, vsi,
4331 						      ICE_DEFAULT_VSIG, &chg);
4332 				if (status)
4333 					goto err_ice_rem_prof_id_flow;
4334 
4335 			} else if (!ice_find_dup_props_vsig(hw, blk, &copy,
4336 							    &vsig)) {
4337 				/* found an exact match */
4338 				/* add or move VSI to the VSIG that matches */
4339 				/* Search for a VSIG with a matching profile
4340 				 * list
4341 				 */
4342 
4343 				/* Found match, move VSI to the matching VSIG */
4344 				status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
4345 				if (status)
4346 					goto err_ice_rem_prof_id_flow;
4347 			} else {
4348 				/* since no existing VSIG supports this
4349 				 * characteristic pattern, we need to create a
4350 				 * new VSIG and TCAM entries
4351 				 */
4352 				status = ice_create_vsig_from_lst(hw, blk, vsi,
4353 								  &copy, &vsig,
4354 								  &chg);
4355 				if (status)
4356 					goto err_ice_rem_prof_id_flow;
4357 
4358 				/* Adjust priorities */
4359 				status = ice_adj_prof_priorities(hw, blk, vsig,
4360 								 &chg);
4361 				if (status)
4362 					goto err_ice_rem_prof_id_flow;
4363 			}
4364 		}
4365 	} else {
4366 		status = -ENOENT;
4367 	}
4368 
4369 	/* update hardware tables */
4370 	if (!status)
4371 		status = ice_upd_prof_hw(hw, blk, &chg);
4372 
4373 err_ice_rem_prof_id_flow:
4374 	list_for_each_entry_safe(del, tmp, &chg, list_entry) {
4375 		list_del(&del->list_entry);
4376 		devm_kfree(ice_hw_to_dev(hw), del);
4377 	}
4378 
4379 	list_for_each_entry_safe(del1, tmp1, &copy, list) {
4380 		list_del(&del1->list);
4381 		devm_kfree(ice_hw_to_dev(hw), del1);
4382 	}
4383 
4384 	return status;
4385 }
4386