xref: /linux/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c (revision 0d2ab5f922e75d10162e7199826e14df9cfae5cc)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) Meta Platforms, Inc. and affiliates. */
3 
4 #include <linux/etherdevice.h>
5 #include <linux/ethtool.h>
6 #include <net/ipv6.h>
7 
8 #include "fbnic.h"
9 #include "fbnic_fw.h"
10 #include "fbnic_netdev.h"
11 #include "fbnic_rpc.h"
12 
13 void fbnic_reset_indir_tbl(struct fbnic_net *fbn)
14 {
15 	unsigned int num_rx = fbn->num_rx_queues;
16 	unsigned int i;
17 
18 	if (netif_is_rxfh_configured(fbn->netdev))
19 		return;
20 
21 	for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++)
22 		fbn->indir_tbl[0][i] = ethtool_rxfh_indir_default(i, num_rx);
23 }
24 
25 void fbnic_rss_key_fill(u32 *buffer)
26 {
27 	static u32 rss_key[FBNIC_RPC_RSS_KEY_DWORD_LEN];
28 
29 	net_get_random_once(rss_key, sizeof(rss_key));
30 	rss_key[FBNIC_RPC_RSS_KEY_LAST_IDX] &= FBNIC_RPC_RSS_KEY_LAST_MASK;
31 
32 	memcpy(buffer, rss_key, sizeof(rss_key));
33 }
34 
35 #define RX_HASH_OPT_L4 \
36 	(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
37 #define RX_HASH_OPT_L3 \
38 	(RXH_IP_SRC | RXH_IP_DST)
39 #define RX_HASH_OPT_L2 RXH_L2DA
40 
41 void fbnic_rss_init_en_mask(struct fbnic_net *fbn)
42 {
43 	fbn->rss_flow_hash[FBNIC_TCP4_HASH_OPT] = RX_HASH_OPT_L4;
44 	fbn->rss_flow_hash[FBNIC_TCP6_HASH_OPT] = RX_HASH_OPT_L4;
45 
46 	fbn->rss_flow_hash[FBNIC_UDP4_HASH_OPT] = RX_HASH_OPT_L3;
47 	fbn->rss_flow_hash[FBNIC_UDP6_HASH_OPT] = RX_HASH_OPT_L3;
48 	fbn->rss_flow_hash[FBNIC_IPV4_HASH_OPT] = RX_HASH_OPT_L3;
49 	fbn->rss_flow_hash[FBNIC_IPV6_HASH_OPT] = RX_HASH_OPT_L3;
50 
51 	fbn->rss_flow_hash[FBNIC_ETHER_HASH_OPT] = RX_HASH_OPT_L2;
52 }
53 
54 void fbnic_rss_disable_hw(struct fbnic_dev *fbd)
55 {
56 	/* Disable RPC by clearing enable bit and configuration */
57 	if (!fbnic_bmc_present(fbd))
58 		wr32(fbd, FBNIC_RPC_RMI_CONFIG,
59 		     FIELD_PREP(FBNIC_RPC_RMI_CONFIG_OH_BYTES, 20));
60 }
61 
62 #define FBNIC_FH_2_RSSEM_BIT(_fh, _rssem, _val)		\
63 	FIELD_PREP(FBNIC_RPC_ACT_TBL1_RSS_ENA_##_rssem,	\
64 		   FIELD_GET(RXH_##_fh, _val))
65 u16 fbnic_flow_hash_2_rss_en_mask(struct fbnic_net *fbn, int flow_type)
66 {
67 	u32 flow_hash = fbn->rss_flow_hash[flow_type];
68 	u32 rss_en_mask = 0;
69 
70 	rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(L2DA, L2_DA, flow_hash);
71 	rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(IP_SRC, IP_SRC, flow_hash);
72 	rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(IP_DST, IP_DST, flow_hash);
73 	rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(L4_B_0_1, L4_SRC, flow_hash);
74 	rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(L4_B_2_3, L4_DST, flow_hash);
75 	rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(IP6_FL, OV6_FL_LBL, flow_hash);
76 	rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(IP6_FL, IV6_FL_LBL, flow_hash);
77 
78 	return rss_en_mask;
79 }
80 
81 void fbnic_rss_reinit_hw(struct fbnic_dev *fbd, struct fbnic_net *fbn)
82 {
83 	unsigned int i;
84 
85 	for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++) {
86 		wr32(fbd, FBNIC_RPC_RSS_TBL(0, i), fbn->indir_tbl[0][i]);
87 		wr32(fbd, FBNIC_RPC_RSS_TBL(1, i), fbn->indir_tbl[1][i]);
88 	}
89 
90 	for (i = 0; i < FBNIC_RPC_RSS_KEY_DWORD_LEN; i++)
91 		wr32(fbd, FBNIC_RPC_RSS_KEY(i), fbn->rss_key[i]);
92 
93 	/* Default action for this to drop w/ no destination */
94 	wr32(fbd, FBNIC_RPC_ACT_TBL0_DEFAULT, FBNIC_RPC_ACT_TBL0_DROP);
95 	wrfl(fbd);
96 
97 	wr32(fbd, FBNIC_RPC_ACT_TBL1_DEFAULT, 0);
98 
99 	/* If it isn't already enabled set the RMI Config value to enable RPC */
100 	wr32(fbd, FBNIC_RPC_RMI_CONFIG,
101 	     FIELD_PREP(FBNIC_RPC_RMI_CONFIG_MTU, FBNIC_MAX_JUMBO_FRAME_SIZE) |
102 	     FIELD_PREP(FBNIC_RPC_RMI_CONFIG_OH_BYTES, 20) |
103 	     FBNIC_RPC_RMI_CONFIG_ENABLE);
104 }
105 
106 void fbnic_bmc_rpc_all_multi_config(struct fbnic_dev *fbd,
107 				    bool enable_host)
108 {
109 	struct fbnic_act_tcam *act_tcam;
110 	struct fbnic_mac_addr *mac_addr;
111 	int j;
112 
113 	/* We need to add the all multicast filter at the end of the
114 	 * multicast address list. This way if there are any that are
115 	 * shared between the host and the BMC they can be directed to
116 	 * both. Otherwise the remainder just get sent directly to the
117 	 * BMC.
118 	 */
119 	mac_addr = &fbd->mac_addr[fbd->mac_addr_boundary - 1];
120 	if (fbnic_bmc_present(fbd) && fbd->fw_cap.all_multi) {
121 		if (mac_addr->state != FBNIC_TCAM_S_VALID) {
122 			eth_zero_addr(mac_addr->value.addr8);
123 			eth_broadcast_addr(mac_addr->mask.addr8);
124 			mac_addr->value.addr8[0] ^= 1;
125 			mac_addr->mask.addr8[0] ^= 1;
126 			set_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam);
127 			mac_addr->state = FBNIC_TCAM_S_ADD;
128 		}
129 		if (enable_host)
130 			set_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
131 				mac_addr->act_tcam);
132 		else
133 			clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
134 				  mac_addr->act_tcam);
135 	} else {
136 		__fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_BMC);
137 		__fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_ALLMULTI);
138 	}
139 
140 	/* We have to add a special handler for multicast as the
141 	 * BMC may have an all-multi rule already in place. As such
142 	 * adding a rule ourselves won't do any good so we will have
143 	 * to modify the rules for the ALL MULTI below if the BMC
144 	 * already has the rule in place.
145 	 */
146 	act_tcam = &fbd->act_tcam[FBNIC_RPC_ACT_TBL_BMC_ALL_MULTI_OFFSET];
147 
148 	/* If we are not enabling the rule just delete it. We will fall
149 	 * back to the RSS rules that support the multicast addresses.
150 	 */
151 	if (!fbnic_bmc_present(fbd) || !fbd->fw_cap.all_multi || enable_host) {
152 		if (act_tcam->state == FBNIC_TCAM_S_VALID)
153 			act_tcam->state = FBNIC_TCAM_S_DELETE;
154 		return;
155 	}
156 
157 	/* Rewrite TCAM rule 23 to handle BMC all-multi traffic */
158 	act_tcam->dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
159 				    FBNIC_RPC_ACT_TBL0_DEST_BMC);
160 	act_tcam->mask.tcam[0] = 0xffff;
161 
162 	/* MACDA 0 - 3 is reserved for the BMC MAC address */
163 	act_tcam->value.tcam[1] =
164 			FIELD_PREP(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
165 				   fbd->mac_addr_boundary - 1) |
166 			FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
167 	act_tcam->mask.tcam[1] = 0xffff &
168 			 ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX &
169 			 ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
170 
171 	for (j = 2; j < FBNIC_RPC_TCAM_ACT_WORD_LEN; j++)
172 		act_tcam->mask.tcam[j] = 0xffff;
173 
174 	act_tcam->state = FBNIC_TCAM_S_UPDATE;
175 }
176 
177 void fbnic_bmc_rpc_init(struct fbnic_dev *fbd)
178 {
179 	int i = FBNIC_RPC_TCAM_MACDA_BMC_ADDR_IDX;
180 	struct fbnic_act_tcam *act_tcam;
181 	struct fbnic_mac_addr *mac_addr;
182 	int j;
183 
184 	/* Check if BMC is present */
185 	if (!fbnic_bmc_present(fbd))
186 		return;
187 
188 	/* Fetch BMC MAC addresses from firmware capabilities */
189 	for (j = 0; j < 4; j++) {
190 		u8 *bmc_mac = fbd->fw_cap.bmc_mac_addr[j];
191 
192 		/* Validate BMC MAC addresses */
193 		if (is_zero_ether_addr(bmc_mac))
194 			continue;
195 
196 		if (is_multicast_ether_addr(bmc_mac))
197 			mac_addr = __fbnic_mc_sync(fbd, bmc_mac);
198 		else
199 			mac_addr = &fbd->mac_addr[i++];
200 
201 		if (!mac_addr) {
202 			netdev_err(fbd->netdev,
203 				   "No slot for BMC MAC address[%d]\n", j);
204 			continue;
205 		}
206 
207 		ether_addr_copy(mac_addr->value.addr8, bmc_mac);
208 		eth_zero_addr(mac_addr->mask.addr8);
209 
210 		set_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam);
211 		mac_addr->state = FBNIC_TCAM_S_ADD;
212 	}
213 
214 	/* Validate Broadcast is also present, record it and tag it */
215 	mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX];
216 	eth_broadcast_addr(mac_addr->value.addr8);
217 	set_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam);
218 	mac_addr->state = FBNIC_TCAM_S_ADD;
219 
220 	/* Rewrite TCAM rule 0 if it isn't present to relocate BMC rules */
221 	act_tcam = &fbd->act_tcam[FBNIC_RPC_ACT_TBL_BMC_OFFSET];
222 	act_tcam->dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
223 				    FBNIC_RPC_ACT_TBL0_DEST_BMC);
224 	act_tcam->mask.tcam[0] = 0xffff;
225 
226 	/* MACDA 0 - 3 is reserved for the BMC MAC address
227 	 * to account for that we have to mask out the lower 2 bits
228 	 * of the macda by performing an &= with 0x1c.
229 	 */
230 	act_tcam->value.tcam[1] = FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
231 	act_tcam->mask.tcam[1] = 0xffff &
232 			~FIELD_PREP(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX, 0x1c) &
233 			~FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
234 
235 	for (j = 2; j < FBNIC_RPC_TCAM_ACT_WORD_LEN; j++)
236 		act_tcam->mask.tcam[j] = 0xffff;
237 
238 	act_tcam->state = FBNIC_TCAM_S_UPDATE;
239 }
240 
241 void fbnic_bmc_rpc_check(struct fbnic_dev *fbd)
242 {
243 	int err;
244 
245 	if (fbd->fw_cap.need_bmc_tcam_reinit) {
246 		fbnic_bmc_rpc_init(fbd);
247 		__fbnic_set_rx_mode(fbd);
248 		fbd->fw_cap.need_bmc_tcam_reinit = false;
249 	}
250 
251 	if (fbd->fw_cap.need_bmc_macda_sync) {
252 		err = fbnic_fw_xmit_rpc_macda_sync(fbd);
253 		if (err)
254 			dev_warn(fbd->dev,
255 				 "Writing MACDA table to FW failed, err: %d\n", err);
256 		fbd->fw_cap.need_bmc_macda_sync = false;
257 	}
258 }
259 
260 #define FBNIC_ACT1_INIT(_l4, _udp, _ip, _v6)		\
261 	(((_l4) ? FBNIC_RPC_TCAM_ACT1_L4_VALID : 0) |	\
262 	 ((_udp) ? FBNIC_RPC_TCAM_ACT1_L4_IS_UDP : 0) |	\
263 	 ((_ip) ? FBNIC_RPC_TCAM_ACT1_IP_VALID : 0) |	\
264 	 ((_v6) ? FBNIC_RPC_TCAM_ACT1_IP_IS_V6 : 0))
265 
266 #define FBNIC_TSTAMP_MASK(_all, _udp, _ether)			\
267 	(((_all) ? ((1u << FBNIC_NUM_HASH_OPT) - 1) : 0) |	\
268 	 ((_udp) ? (1u << FBNIC_UDP6_HASH_OPT) |		\
269 		   (1u << FBNIC_UDP4_HASH_OPT) : 0) |		\
270 	 ((_ether) ? (1u << FBNIC_ETHER_HASH_OPT) : 0))
271 
272 void fbnic_rss_reinit(struct fbnic_dev *fbd, struct fbnic_net *fbn)
273 {
274 	static const u32 act1_value[FBNIC_NUM_HASH_OPT] = {
275 		FBNIC_ACT1_INIT(1, 1, 1, 1),	/* UDP6 */
276 		FBNIC_ACT1_INIT(1, 1, 1, 0),	/* UDP4 */
277 		FBNIC_ACT1_INIT(1, 0, 1, 1),	/* TCP6 */
278 		FBNIC_ACT1_INIT(1, 0, 1, 0),	/* TCP4 */
279 		FBNIC_ACT1_INIT(0, 0, 1, 1),	/* IP6 */
280 		FBNIC_ACT1_INIT(0, 0, 1, 0),	/* IP4 */
281 		0				/* Ether */
282 	};
283 	u32 tstamp_mask = 0;
284 	unsigned int i;
285 
286 	/* To support scenarios where a BMC is present we must write the
287 	 * rules twice, once for the unicast cases, and once again for
288 	 * the broadcast/multicast cases as we have to support 2 destinations.
289 	 */
290 	BUILD_BUG_ON(FBNIC_RSS_EN_NUM_UNICAST * 2 != FBNIC_RSS_EN_NUM_ENTRIES);
291 	BUILD_BUG_ON(ARRAY_SIZE(act1_value) != FBNIC_NUM_HASH_OPT);
292 
293 	/* Set timestamp mask with 1b per flow type */
294 	if (fbn->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) {
295 		switch (fbn->hwtstamp_config.rx_filter) {
296 		case HWTSTAMP_FILTER_ALL:
297 			tstamp_mask = FBNIC_TSTAMP_MASK(1, 1, 1);
298 			break;
299 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
300 			tstamp_mask = FBNIC_TSTAMP_MASK(0, 1, 1);
301 			break;
302 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
303 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
304 			tstamp_mask = FBNIC_TSTAMP_MASK(0, 1, 0);
305 			break;
306 		case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
307 			tstamp_mask = FBNIC_TSTAMP_MASK(0, 0, 1);
308 			break;
309 		default:
310 			netdev_warn(fbn->netdev, "Unsupported hwtstamp_rx_filter\n");
311 			break;
312 		}
313 	}
314 
315 	/* Program RSS hash enable mask for host in action TCAM/table. */
316 	for (i = fbnic_bmc_present(fbd) ? 0 : FBNIC_RSS_EN_NUM_UNICAST;
317 	     i < FBNIC_RSS_EN_NUM_ENTRIES; i++) {
318 		unsigned int idx = i + FBNIC_RPC_ACT_TBL_RSS_OFFSET;
319 		struct fbnic_act_tcam *act_tcam = &fbd->act_tcam[idx];
320 		u32 flow_hash, dest, rss_en_mask;
321 		int flow_type, j;
322 		u16 value = 0;
323 
324 		flow_type = i % FBNIC_RSS_EN_NUM_UNICAST;
325 		flow_hash = fbn->rss_flow_hash[flow_type];
326 
327 		/* Set DEST_HOST based on absence of RXH_DISCARD */
328 		dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
329 				  !(RXH_DISCARD & flow_hash) ?
330 				  FBNIC_RPC_ACT_TBL0_DEST_HOST : 0);
331 
332 		if (i >= FBNIC_RSS_EN_NUM_UNICAST && fbnic_bmc_present(fbd))
333 			dest |= FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
334 					   FBNIC_RPC_ACT_TBL0_DEST_BMC);
335 
336 		if (!dest)
337 			dest = FBNIC_RPC_ACT_TBL0_DROP;
338 		else if (tstamp_mask & (1u << flow_type))
339 			dest |= FBNIC_RPC_ACT_TBL0_TS_ENA;
340 
341 		if (act1_value[flow_type] & FBNIC_RPC_TCAM_ACT1_L4_VALID)
342 			dest |= FIELD_PREP(FBNIC_RPC_ACT_TBL0_DMA_HINT,
343 					   FBNIC_RCD_HDR_AL_DMA_HINT_L4);
344 
345 		rss_en_mask = fbnic_flow_hash_2_rss_en_mask(fbn, flow_type);
346 
347 		act_tcam->dest = dest;
348 		act_tcam->rss_en_mask = rss_en_mask;
349 		act_tcam->state = FBNIC_TCAM_S_UPDATE;
350 
351 		act_tcam->mask.tcam[0] = 0xffff;
352 
353 		/* We reserve the upper 8 MACDA TCAM entries for host
354 		 * unicast. So we set the value to 24, and the mask the
355 		 * lower bits so that the lower entries can be used as
356 		 * multicast or BMC addresses.
357 		 */
358 		if (i < FBNIC_RSS_EN_NUM_UNICAST)
359 			value = FIELD_PREP(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
360 					   fbd->mac_addr_boundary);
361 		value |= FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
362 
363 		flow_type = i % FBNIC_RSS_EN_NUM_UNICAST;
364 		value |= act1_value[flow_type];
365 
366 		act_tcam->value.tcam[1] = value;
367 		act_tcam->mask.tcam[1] = ~value;
368 
369 		for (j = 2; j < FBNIC_RPC_TCAM_ACT_WORD_LEN; j++)
370 			act_tcam->mask.tcam[j] = 0xffff;
371 
372 		act_tcam->state = FBNIC_TCAM_S_UPDATE;
373 	}
374 }
375 
376 struct fbnic_mac_addr *__fbnic_uc_sync(struct fbnic_dev *fbd,
377 				       const unsigned char *addr)
378 {
379 	struct fbnic_mac_addr *avail_addr = NULL;
380 	unsigned int i;
381 
382 	/* Scan from middle of list to bottom, filling bottom up.
383 	 * Skip the first entry which is reserved for dev_addr and
384 	 * leave the last entry to use for promiscuous filtering.
385 	 */
386 	for (i = fbd->mac_addr_boundary - 1;
387 	     i < FBNIC_RPC_TCAM_MACDA_HOST_ADDR_IDX; i++) {
388 		struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
389 
390 		if (mac_addr->state == FBNIC_TCAM_S_DISABLED) {
391 			avail_addr = mac_addr;
392 		} else if (ether_addr_equal(mac_addr->value.addr8, addr)) {
393 			avail_addr = mac_addr;
394 			break;
395 		}
396 	}
397 
398 	if (avail_addr && avail_addr->state == FBNIC_TCAM_S_DISABLED) {
399 		ether_addr_copy(avail_addr->value.addr8, addr);
400 		eth_zero_addr(avail_addr->mask.addr8);
401 		avail_addr->state = FBNIC_TCAM_S_ADD;
402 	}
403 
404 	return avail_addr;
405 }
406 
407 struct fbnic_mac_addr *__fbnic_mc_sync(struct fbnic_dev *fbd,
408 				       const unsigned char *addr)
409 {
410 	struct fbnic_mac_addr *avail_addr = NULL;
411 	unsigned int i;
412 
413 	/* Scan from middle of list to top, filling top down.
414 	 * Skip over the address reserved for the BMC MAC and
415 	 * exclude index 0 as that belongs to the broadcast address
416 	 */
417 	for (i = fbd->mac_addr_boundary;
418 	     --i > FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX;) {
419 		struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
420 
421 		if (mac_addr->state == FBNIC_TCAM_S_DISABLED) {
422 			avail_addr = mac_addr;
423 		} else if (ether_addr_equal(mac_addr->value.addr8, addr)) {
424 			avail_addr = mac_addr;
425 			break;
426 		}
427 	}
428 
429 	/* Scan the BMC addresses to see if it may have already
430 	 * reserved the address.
431 	 */
432 	while (--i) {
433 		struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
434 
435 		if (!is_zero_ether_addr(mac_addr->mask.addr8))
436 			continue;
437 
438 		/* Only move on if we find a match */
439 		if (!ether_addr_equal(mac_addr->value.addr8, addr))
440 			continue;
441 
442 		/* We need to pull this address to the shared area */
443 		if (avail_addr) {
444 			memcpy(avail_addr, mac_addr, sizeof(*mac_addr));
445 			mac_addr->state = FBNIC_TCAM_S_DELETE;
446 			avail_addr->state = FBNIC_TCAM_S_ADD;
447 		}
448 
449 		break;
450 	}
451 
452 	if (avail_addr && avail_addr->state == FBNIC_TCAM_S_DISABLED) {
453 		ether_addr_copy(avail_addr->value.addr8, addr);
454 		eth_zero_addr(avail_addr->mask.addr8);
455 		avail_addr->state = FBNIC_TCAM_S_ADD;
456 	}
457 
458 	return avail_addr;
459 }
460 
461 int __fbnic_xc_unsync(struct fbnic_mac_addr *mac_addr, unsigned int tcam_idx)
462 {
463 	if (!test_and_clear_bit(tcam_idx, mac_addr->act_tcam))
464 		return -ENOENT;
465 
466 	if (bitmap_empty(mac_addr->act_tcam, FBNIC_RPC_TCAM_ACT_NUM_ENTRIES))
467 		mac_addr->state = FBNIC_TCAM_S_DELETE;
468 
469 	return 0;
470 }
471 
472 void fbnic_promisc_sync(struct fbnic_dev *fbd,
473 			bool uc_promisc, bool mc_promisc)
474 {
475 	struct fbnic_mac_addr *mac_addr;
476 
477 	/* Populate last TCAM entry with promiscuous entry and 0/1 bit mask */
478 	mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_PROMISC_IDX];
479 	if (uc_promisc) {
480 		if (!is_zero_ether_addr(mac_addr->value.addr8) ||
481 		    mac_addr->state != FBNIC_TCAM_S_VALID) {
482 			eth_zero_addr(mac_addr->value.addr8);
483 			eth_broadcast_addr(mac_addr->mask.addr8);
484 			clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
485 				  mac_addr->act_tcam);
486 			set_bit(FBNIC_MAC_ADDR_T_PROMISC,
487 				mac_addr->act_tcam);
488 			mac_addr->state = FBNIC_TCAM_S_ADD;
489 		}
490 	} else if (mc_promisc &&
491 		   (!fbnic_bmc_present(fbd) || !fbd->fw_cap.all_multi)) {
492 		/* We have to add a special handler for multicast as the
493 		 * BMC may have an all-multi rule already in place. As such
494 		 * adding a rule ourselves won't do any good so we will have
495 		 * to modify the rules for the ALL MULTI below if the BMC
496 		 * already has the rule in place.
497 		 */
498 		if (!is_multicast_ether_addr(mac_addr->value.addr8) ||
499 		    mac_addr->state != FBNIC_TCAM_S_VALID) {
500 			eth_zero_addr(mac_addr->value.addr8);
501 			eth_broadcast_addr(mac_addr->mask.addr8);
502 			mac_addr->value.addr8[0] ^= 1;
503 			mac_addr->mask.addr8[0] ^= 1;
504 			set_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
505 				mac_addr->act_tcam);
506 			clear_bit(FBNIC_MAC_ADDR_T_PROMISC,
507 				  mac_addr->act_tcam);
508 			mac_addr->state = FBNIC_TCAM_S_ADD;
509 		}
510 	} else if (mac_addr->state == FBNIC_TCAM_S_VALID) {
511 		__fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_ALLMULTI);
512 		__fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_PROMISC);
513 	}
514 }
515 
516 void fbnic_sift_macda(struct fbnic_dev *fbd)
517 {
518 	int dest, src;
519 
520 	/* Move BMC only addresses back into BMC region */
521 	for (dest = FBNIC_RPC_TCAM_MACDA_BMC_ADDR_IDX,
522 	     src = FBNIC_RPC_TCAM_MACDA_MULTICAST_IDX;
523 	     ++dest < FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX &&
524 	     src < fbd->mac_addr_boundary;) {
525 		struct fbnic_mac_addr *dest_addr = &fbd->mac_addr[dest];
526 
527 		if (dest_addr->state != FBNIC_TCAM_S_DISABLED)
528 			continue;
529 
530 		while (src < fbd->mac_addr_boundary) {
531 			struct fbnic_mac_addr *src_addr = &fbd->mac_addr[src++];
532 
533 			/* Verify BMC bit is set */
534 			if (!test_bit(FBNIC_MAC_ADDR_T_BMC, src_addr->act_tcam))
535 				continue;
536 
537 			/* Verify filter isn't already disabled */
538 			if (src_addr->state == FBNIC_TCAM_S_DISABLED ||
539 			    src_addr->state == FBNIC_TCAM_S_DELETE)
540 				continue;
541 
542 			/* Verify only BMC bit is set */
543 			if (bitmap_weight(src_addr->act_tcam,
544 					  FBNIC_RPC_TCAM_ACT_NUM_ENTRIES) != 1)
545 				continue;
546 
547 			/* Verify we are not moving wildcard address */
548 			if (!is_zero_ether_addr(src_addr->mask.addr8))
549 				continue;
550 
551 			memcpy(dest_addr, src_addr, sizeof(*src_addr));
552 			src_addr->state = FBNIC_TCAM_S_DELETE;
553 			dest_addr->state = FBNIC_TCAM_S_ADD;
554 		}
555 	}
556 }
557 
558 static void fbnic_clear_macda_entry(struct fbnic_dev *fbd, unsigned int idx)
559 {
560 	int i;
561 
562 	/* Invalidate entry and clear addr state info */
563 	for (i = 0; i <= FBNIC_RPC_TCAM_MACDA_WORD_LEN; i++)
564 		wr32(fbd, FBNIC_RPC_TCAM_MACDA(idx, i), 0);
565 }
566 
567 static void fbnic_clear_macda(struct fbnic_dev *fbd)
568 {
569 	int idx;
570 
571 	for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;) {
572 		struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[idx];
573 
574 		if (mac_addr->state == FBNIC_TCAM_S_DISABLED)
575 			continue;
576 
577 		if (test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam)) {
578 			if (fbnic_bmc_present(fbd))
579 				continue;
580 			dev_warn_once(fbd->dev,
581 				      "Found BMC MAC address w/ BMC not present\n");
582 		}
583 
584 		fbnic_clear_macda_entry(fbd, idx);
585 
586 		/* If rule was already destined for deletion just wipe it now */
587 		if (mac_addr->state == FBNIC_TCAM_S_DELETE) {
588 			memset(mac_addr, 0, sizeof(*mac_addr));
589 			continue;
590 		}
591 
592 		/* Change state to update so that we will rewrite
593 		 * this tcam the next time fbnic_write_macda is called.
594 		 */
595 		mac_addr->state = FBNIC_TCAM_S_UPDATE;
596 	}
597 }
598 
599 static void fbnic_write_macda_entry(struct fbnic_dev *fbd, unsigned int idx,
600 				    struct fbnic_mac_addr *mac_addr)
601 {
602 	__be16 *mask, *value;
603 	int i;
604 
605 	mask = &mac_addr->mask.addr16[FBNIC_RPC_TCAM_MACDA_WORD_LEN - 1];
606 	value = &mac_addr->value.addr16[FBNIC_RPC_TCAM_MACDA_WORD_LEN - 1];
607 
608 	for (i = 0; i < FBNIC_RPC_TCAM_MACDA_WORD_LEN; i++)
609 		wr32(fbd, FBNIC_RPC_TCAM_MACDA(idx, i),
610 		     FIELD_PREP(FBNIC_RPC_TCAM_MACDA_MASK, ntohs(*mask--)) |
611 		     FIELD_PREP(FBNIC_RPC_TCAM_MACDA_VALUE, ntohs(*value--)));
612 
613 	wrfl(fbd);
614 
615 	wr32(fbd, FBNIC_RPC_TCAM_MACDA(idx, i), FBNIC_RPC_TCAM_VALIDATE);
616 }
617 
618 void fbnic_write_macda(struct fbnic_dev *fbd)
619 {
620 	int idx, updates = 0;
621 
622 	for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;) {
623 		struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[idx];
624 
625 		/* Check if update flag is set else exit. */
626 		if (!(mac_addr->state & FBNIC_TCAM_S_UPDATE))
627 			continue;
628 
629 		/* Record update count */
630 		updates++;
631 
632 		/* Clear by writing 0s. */
633 		if (mac_addr->state == FBNIC_TCAM_S_DELETE) {
634 			/* Invalidate entry and clear addr state info */
635 			fbnic_clear_macda_entry(fbd, idx);
636 			memset(mac_addr, 0, sizeof(*mac_addr));
637 
638 			continue;
639 		}
640 
641 		fbnic_write_macda_entry(fbd, idx, mac_addr);
642 
643 		mac_addr->state = FBNIC_TCAM_S_VALID;
644 	}
645 
646 	/* If reinitializing the BMC TCAM we are doing an initial update */
647 	if (fbd->fw_cap.need_bmc_tcam_reinit)
648 		updates++;
649 
650 	/* If needed notify firmware of changes to MACDA TCAM */
651 	if (updates != 0 && fbnic_bmc_present(fbd))
652 		fbd->fw_cap.need_bmc_macda_sync = true;
653 }
654 
655 static void fbnic_clear_act_tcam(struct fbnic_dev *fbd, unsigned int idx)
656 {
657 	int i;
658 
659 	/* Invalidate entry and clear addr state info */
660 	for (i = 0; i <= FBNIC_RPC_TCAM_ACT_WORD_LEN; i++)
661 		wr32(fbd, FBNIC_RPC_TCAM_ACT(idx, i), 0);
662 }
663 
664 static void fbnic_clear_tce_tcam_entry(struct fbnic_dev *fbd, unsigned int idx)
665 {
666 	int i;
667 
668 	/* Invalidate entry and clear addr state info */
669 	for (i = 0; i <= FBNIC_TCE_TCAM_WORD_LEN; i++)
670 		wr32(fbd, FBNIC_TCE_RAM_TCAM(idx, i), 0);
671 }
672 
673 static void fbnic_write_tce_tcam_dest(struct fbnic_dev *fbd, unsigned int idx,
674 				      struct fbnic_mac_addr *mac_addr)
675 {
676 	u32 dest = FBNIC_TCE_TCAM_DEST_BMC;
677 	u32 idx2dest_map;
678 
679 	if (is_multicast_ether_addr(mac_addr->value.addr8))
680 		dest |= FBNIC_TCE_TCAM_DEST_MAC;
681 
682 	idx2dest_map = rd32(fbd, FBNIC_TCE_TCAM_IDX2DEST_MAP);
683 	idx2dest_map &= ~(FBNIC_TCE_TCAM_IDX2DEST_MAP_DEST_ID_0 << (4 * idx));
684 	idx2dest_map |= dest << (4 * idx);
685 
686 	wr32(fbd, FBNIC_TCE_TCAM_IDX2DEST_MAP, idx2dest_map);
687 }
688 
689 static void fbnic_write_tce_tcam_entry(struct fbnic_dev *fbd, unsigned int idx,
690 				       struct fbnic_mac_addr *mac_addr)
691 {
692 	__be16 *mask, *value;
693 	int i;
694 
695 	mask = &mac_addr->mask.addr16[FBNIC_TCE_TCAM_WORD_LEN - 1];
696 	value = &mac_addr->value.addr16[FBNIC_TCE_TCAM_WORD_LEN - 1];
697 
698 	for (i = 0; i < FBNIC_TCE_TCAM_WORD_LEN; i++)
699 		wr32(fbd, FBNIC_TCE_RAM_TCAM(idx, i),
700 		     FIELD_PREP(FBNIC_TCE_RAM_TCAM_MASK, ntohs(*mask--)) |
701 		     FIELD_PREP(FBNIC_TCE_RAM_TCAM_VALUE, ntohs(*value--)));
702 
703 	wrfl(fbd);
704 
705 	wr32(fbd, FBNIC_TCE_RAM_TCAM3(idx), FBNIC_TCE_RAM_TCAM3_MCQ_MASK |
706 				       FBNIC_TCE_RAM_TCAM3_DEST_MASK |
707 				       FBNIC_TCE_RAM_TCAM3_VALIDATE);
708 }
709 
710 static void __fbnic_write_tce_tcam_rev(struct fbnic_dev *fbd)
711 {
712 	int tcam_idx = FBNIC_TCE_TCAM_NUM_ENTRIES;
713 	int mac_idx;
714 
715 	for (mac_idx = ARRAY_SIZE(fbd->mac_addr); mac_idx--;) {
716 		struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[mac_idx];
717 
718 		/* Verify BMC bit is set */
719 		if (!test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam))
720 			continue;
721 
722 		if (!tcam_idx) {
723 			dev_err(fbd->dev, "TCE TCAM overflow\n");
724 			return;
725 		}
726 
727 		tcam_idx--;
728 		fbnic_write_tce_tcam_dest(fbd, tcam_idx, mac_addr);
729 		fbnic_write_tce_tcam_entry(fbd, tcam_idx, mac_addr);
730 	}
731 
732 	while (tcam_idx)
733 		fbnic_clear_tce_tcam_entry(fbd, --tcam_idx);
734 
735 	fbd->tce_tcam_last = tcam_idx;
736 }
737 
738 static void __fbnic_write_tce_tcam(struct fbnic_dev *fbd)
739 {
740 	int tcam_idx = 0;
741 	int mac_idx;
742 
743 	for (mac_idx = 0; mac_idx < ARRAY_SIZE(fbd->mac_addr); mac_idx++) {
744 		struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[mac_idx];
745 
746 		/* Verify BMC bit is set */
747 		if (!test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam))
748 			continue;
749 
750 		if (tcam_idx == FBNIC_TCE_TCAM_NUM_ENTRIES) {
751 			dev_err(fbd->dev, "TCE TCAM overflow\n");
752 			return;
753 		}
754 
755 		fbnic_write_tce_tcam_dest(fbd, tcam_idx, mac_addr);
756 		fbnic_write_tce_tcam_entry(fbd, tcam_idx, mac_addr);
757 		tcam_idx++;
758 	}
759 
760 	while (tcam_idx < FBNIC_TCE_TCAM_NUM_ENTRIES)
761 		fbnic_clear_tce_tcam_entry(fbd, tcam_idx++);
762 
763 	fbd->tce_tcam_last = tcam_idx;
764 }
765 
766 void fbnic_write_tce_tcam(struct fbnic_dev *fbd)
767 {
768 	if (fbd->tce_tcam_last)
769 		__fbnic_write_tce_tcam_rev(fbd);
770 	else
771 		__fbnic_write_tce_tcam(fbd);
772 }
773 
774 struct fbnic_ip_addr *__fbnic_ip4_sync(struct fbnic_dev *fbd,
775 				       struct fbnic_ip_addr *ip_addr,
776 				       const struct in_addr *addr,
777 				       const struct in_addr *mask)
778 {
779 	struct fbnic_ip_addr *avail_addr = NULL;
780 	unsigned int i;
781 
782 	/* Scan from top of list to bottom, filling bottom up. */
783 	for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES; i++, ip_addr++) {
784 		struct in6_addr *m = &ip_addr->mask;
785 
786 		if (ip_addr->state == FBNIC_TCAM_S_DISABLED) {
787 			avail_addr = ip_addr;
788 			continue;
789 		}
790 
791 		if (ip_addr->version != 4)
792 			continue;
793 
794 		/* Drop avail_addr if mask is a subset of our current mask,
795 		 * This prevents us from inserting a longer prefix behind a
796 		 * shorter one.
797 		 *
798 		 * The mask is stored inverted value so as an example:
799 		 * m	ffff ffff ffff ffff ffff ffff ffff 0000 0000
800 		 * mask 0000 0000 0000 0000 0000 0000 0000 ffff ffff
801 		 *
802 		 * "m" and "mask" represent typical IPv4 mask stored in
803 		 * the TCAM and those provided by the stack. The code below
804 		 * should return a non-zero result if there is a 0 stored
805 		 * anywhere in "m" where "mask" has a 0.
806 		 */
807 		if (~m->s6_addr32[3] & ~mask->s_addr) {
808 			avail_addr = NULL;
809 			continue;
810 		}
811 
812 		/* Check to see if the mask actually contains fewer bits than
813 		 * our new mask "m". The XOR below should only result in 0 if
814 		 * "m" is masking a bit that we are looking for in our new
815 		 * "mask", we eliminated the 0^0 case with the check above.
816 		 *
817 		 * If it contains fewer bits we need to stop here, otherwise
818 		 * we might be adding an unreachable rule.
819 		 */
820 		if (~(m->s6_addr32[3] ^ mask->s_addr))
821 			break;
822 
823 		if (ip_addr->value.s6_addr32[3] == addr->s_addr) {
824 			avail_addr = ip_addr;
825 			break;
826 		}
827 	}
828 
829 	if (avail_addr && avail_addr->state == FBNIC_TCAM_S_DISABLED) {
830 		ipv6_addr_set(&avail_addr->value, 0, 0, 0, addr->s_addr);
831 		ipv6_addr_set(&avail_addr->mask, htonl(~0), htonl(~0),
832 			      htonl(~0), ~mask->s_addr);
833 		avail_addr->version = 4;
834 
835 		avail_addr->state = FBNIC_TCAM_S_ADD;
836 	}
837 
838 	return avail_addr;
839 }
840 
841 struct fbnic_ip_addr *__fbnic_ip6_sync(struct fbnic_dev *fbd,
842 				       struct fbnic_ip_addr *ip_addr,
843 				       const struct in6_addr *addr,
844 				       const struct in6_addr *mask)
845 {
846 	struct fbnic_ip_addr *avail_addr = NULL;
847 	unsigned int i;
848 
849 	ip_addr = &ip_addr[FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES - 1];
850 
851 	/* Scan from bottom of list to top, filling top down. */
852 	for (i = FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES; i--; ip_addr--) {
853 		struct in6_addr *m = &ip_addr->mask;
854 
855 		if (ip_addr->state == FBNIC_TCAM_S_DISABLED) {
856 			avail_addr = ip_addr;
857 			continue;
858 		}
859 
860 		if (ip_addr->version != 6)
861 			continue;
862 
863 		/* Drop avail_addr if mask is a superset of our current mask.
864 		 * This prevents us from inserting a longer prefix behind a
865 		 * shorter one.
866 		 *
867 		 * The mask is stored inverted value so as an example:
868 		 * m	0000 0000 0000 0000 0000 0000 0000 0000 0000
869 		 * mask ffff ffff ffff ffff ffff ffff ffff ffff ffff
870 		 *
871 		 * "m" and "mask" represent typical IPv6 mask stored in
872 		 * the TCAM and those provided by the stack. The code below
873 		 * should return a non-zero result which will cause us
874 		 * to drop the avail_addr value that might be cached
875 		 * to prevent us from dropping a v6 address behind it.
876 		 */
877 		if ((m->s6_addr32[0] & mask->s6_addr32[0]) |
878 		    (m->s6_addr32[1] & mask->s6_addr32[1]) |
879 		    (m->s6_addr32[2] & mask->s6_addr32[2]) |
880 		    (m->s6_addr32[3] & mask->s6_addr32[3])) {
881 			avail_addr = NULL;
882 			continue;
883 		}
884 
885 		/* The previous test eliminated any overlap between the
886 		 * two values so now we need to check for gaps.
887 		 *
888 		 * If the mask is equal to our current mask then it should
889 		 * result with m ^ mask = ffff ffff, if however the value
890 		 * stored in m is bigger then we should see a 0 appear
891 		 * somewhere in the mask.
892 		 */
893 		if (~(m->s6_addr32[0] ^ mask->s6_addr32[0]) |
894 		    ~(m->s6_addr32[1] ^ mask->s6_addr32[1]) |
895 		    ~(m->s6_addr32[2] ^ mask->s6_addr32[2]) |
896 		    ~(m->s6_addr32[3] ^ mask->s6_addr32[3]))
897 			break;
898 
899 		if (ipv6_addr_cmp(&ip_addr->value, addr))
900 			continue;
901 
902 		avail_addr = ip_addr;
903 		break;
904 	}
905 
906 	if (avail_addr && avail_addr->state == FBNIC_TCAM_S_DISABLED) {
907 		memcpy(&avail_addr->value, addr, sizeof(*addr));
908 		ipv6_addr_set(&avail_addr->mask,
909 			      ~mask->s6_addr32[0], ~mask->s6_addr32[1],
910 			      ~mask->s6_addr32[2], ~mask->s6_addr32[3]);
911 		avail_addr->version = 6;
912 
913 		avail_addr->state = FBNIC_TCAM_S_ADD;
914 	}
915 
916 	return avail_addr;
917 }
918 
919 int __fbnic_ip_unsync(struct fbnic_ip_addr *ip_addr, unsigned int tcam_idx)
920 {
921 	if (!test_and_clear_bit(tcam_idx, ip_addr->act_tcam))
922 		return -ENOENT;
923 
924 	if (bitmap_empty(ip_addr->act_tcam, FBNIC_RPC_TCAM_ACT_NUM_ENTRIES))
925 		ip_addr->state = FBNIC_TCAM_S_DELETE;
926 
927 	return 0;
928 }
929 
930 static void fbnic_clear_ip_src_entry(struct fbnic_dev *fbd, unsigned int idx)
931 {
932 	int i;
933 
934 	/* Invalidate entry and clear addr state info */
935 	for (i = 0; i <= FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
936 		wr32(fbd, FBNIC_RPC_TCAM_IPSRC(idx, i), 0);
937 }
938 
939 static void fbnic_clear_ip_dst_entry(struct fbnic_dev *fbd, unsigned int idx)
940 {
941 	int i;
942 
943 	/* Invalidate entry and clear addr state info */
944 	for (i = 0; i <= FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
945 		wr32(fbd, FBNIC_RPC_TCAM_IPDST(idx, i), 0);
946 }
947 
948 static void fbnic_clear_ip_outer_src_entry(struct fbnic_dev *fbd,
949 					   unsigned int idx)
950 {
951 	int i;
952 
953 	/* Invalidate entry and clear addr state info */
954 	for (i = 0; i <= FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
955 		wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPSRC(idx, i), 0);
956 }
957 
958 static void fbnic_clear_ip_outer_dst_entry(struct fbnic_dev *fbd,
959 					   unsigned int idx)
960 {
961 	int i;
962 
963 	/* Invalidate entry and clear addr state info */
964 	for (i = 0; i <= FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
965 		wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPDST(idx, i), 0);
966 }
967 
968 static void fbnic_write_ip_src_entry(struct fbnic_dev *fbd, unsigned int idx,
969 				     struct fbnic_ip_addr *ip_addr)
970 {
971 	__be16 *mask, *value;
972 	int i;
973 
974 	mask = &ip_addr->mask.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
975 	value = &ip_addr->value.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
976 
977 	for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
978 		wr32(fbd, FBNIC_RPC_TCAM_IPSRC(idx, i),
979 		     FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_MASK, ntohs(*mask--)) |
980 		     FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_VALUE, ntohs(*value--)));
981 	wrfl(fbd);
982 
983 	/* Bit 129 is used to flag for v4/v6 */
984 	wr32(fbd, FBNIC_RPC_TCAM_IPSRC(idx, i),
985 	     (ip_addr->version == 6) | FBNIC_RPC_TCAM_VALIDATE);
986 }
987 
988 static void fbnic_write_ip_dst_entry(struct fbnic_dev *fbd, unsigned int idx,
989 				     struct fbnic_ip_addr *ip_addr)
990 {
991 	__be16 *mask, *value;
992 	int i;
993 
994 	mask = &ip_addr->mask.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
995 	value = &ip_addr->value.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
996 
997 	for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
998 		wr32(fbd, FBNIC_RPC_TCAM_IPDST(idx, i),
999 		     FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_MASK, ntohs(*mask--)) |
1000 		     FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_VALUE, ntohs(*value--)));
1001 	wrfl(fbd);
1002 
1003 	/* Bit 129 is used to flag for v4/v6 */
1004 	wr32(fbd, FBNIC_RPC_TCAM_IPDST(idx, i),
1005 	     (ip_addr->version == 6) | FBNIC_RPC_TCAM_VALIDATE);
1006 }
1007 
1008 static void fbnic_write_ip_outer_src_entry(struct fbnic_dev *fbd,
1009 					   unsigned int idx,
1010 					   struct fbnic_ip_addr *ip_addr)
1011 {
1012 	__be16 *mask, *value;
1013 	int i;
1014 
1015 	mask = &ip_addr->mask.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
1016 	value = &ip_addr->value.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
1017 
1018 	for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
1019 		wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPSRC(idx, i),
1020 		     FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_MASK, ntohs(*mask--)) |
1021 		     FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_VALUE, ntohs(*value--)));
1022 	wrfl(fbd);
1023 
1024 	wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPSRC(idx, i), FBNIC_RPC_TCAM_VALIDATE);
1025 }
1026 
1027 static void fbnic_write_ip_outer_dst_entry(struct fbnic_dev *fbd,
1028 					   unsigned int idx,
1029 					   struct fbnic_ip_addr *ip_addr)
1030 {
1031 	__be16 *mask, *value;
1032 	int i;
1033 
1034 	mask = &ip_addr->mask.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
1035 	value = &ip_addr->value.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
1036 
1037 	for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
1038 		wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPDST(idx, i),
1039 		     FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_MASK, ntohs(*mask--)) |
1040 		     FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_VALUE, ntohs(*value--)));
1041 	wrfl(fbd);
1042 
1043 	wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPDST(idx, i), FBNIC_RPC_TCAM_VALIDATE);
1044 }
1045 
1046 void fbnic_write_ip_addr(struct fbnic_dev *fbd)
1047 {
1048 	int idx;
1049 
1050 	for (idx = ARRAY_SIZE(fbd->ip_src); idx--;) {
1051 		struct fbnic_ip_addr *ip_addr = &fbd->ip_src[idx];
1052 
1053 		/* Check if update flag is set else skip. */
1054 		if (!(ip_addr->state & FBNIC_TCAM_S_UPDATE))
1055 			continue;
1056 
1057 		/* Clear by writing 0s. */
1058 		if (ip_addr->state == FBNIC_TCAM_S_DELETE) {
1059 			/* Invalidate entry and clear addr state info */
1060 			fbnic_clear_ip_src_entry(fbd, idx);
1061 			memset(ip_addr, 0, sizeof(*ip_addr));
1062 
1063 			continue;
1064 		}
1065 
1066 		fbnic_write_ip_src_entry(fbd, idx, ip_addr);
1067 
1068 		ip_addr->state = FBNIC_TCAM_S_VALID;
1069 	}
1070 
1071 	/* Repeat process for other IP TCAMs */
1072 	for (idx = ARRAY_SIZE(fbd->ip_dst); idx--;) {
1073 		struct fbnic_ip_addr *ip_addr = &fbd->ip_dst[idx];
1074 
1075 		if (!(ip_addr->state & FBNIC_TCAM_S_UPDATE))
1076 			continue;
1077 
1078 		if (ip_addr->state == FBNIC_TCAM_S_DELETE) {
1079 			fbnic_clear_ip_dst_entry(fbd, idx);
1080 			memset(ip_addr, 0, sizeof(*ip_addr));
1081 
1082 			continue;
1083 		}
1084 
1085 		fbnic_write_ip_dst_entry(fbd, idx, ip_addr);
1086 
1087 		ip_addr->state = FBNIC_TCAM_S_VALID;
1088 	}
1089 
1090 	for (idx = ARRAY_SIZE(fbd->ipo_src); idx--;) {
1091 		struct fbnic_ip_addr *ip_addr = &fbd->ipo_src[idx];
1092 
1093 		if (!(ip_addr->state & FBNIC_TCAM_S_UPDATE))
1094 			continue;
1095 
1096 		if (ip_addr->state == FBNIC_TCAM_S_DELETE) {
1097 			fbnic_clear_ip_outer_src_entry(fbd, idx);
1098 			memset(ip_addr, 0, sizeof(*ip_addr));
1099 
1100 			continue;
1101 		}
1102 
1103 		fbnic_write_ip_outer_src_entry(fbd, idx, ip_addr);
1104 
1105 		ip_addr->state = FBNIC_TCAM_S_VALID;
1106 	}
1107 
1108 	for (idx = ARRAY_SIZE(fbd->ipo_dst); idx--;) {
1109 		struct fbnic_ip_addr *ip_addr = &fbd->ipo_dst[idx];
1110 
1111 		if (!(ip_addr->state & FBNIC_TCAM_S_UPDATE))
1112 			continue;
1113 
1114 		if (ip_addr->state == FBNIC_TCAM_S_DELETE) {
1115 			fbnic_clear_ip_outer_dst_entry(fbd, idx);
1116 			memset(ip_addr, 0, sizeof(*ip_addr));
1117 
1118 			continue;
1119 		}
1120 
1121 		fbnic_write_ip_outer_dst_entry(fbd, idx, ip_addr);
1122 
1123 		ip_addr->state = FBNIC_TCAM_S_VALID;
1124 	}
1125 }
1126 
1127 void fbnic_clear_rules(struct fbnic_dev *fbd)
1128 {
1129 	u32 dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
1130 			      FBNIC_RPC_ACT_TBL0_DEST_BMC);
1131 	int i = FBNIC_RPC_TCAM_ACT_NUM_ENTRIES - 1;
1132 	struct fbnic_act_tcam *act_tcam;
1133 
1134 	/* Clear MAC rules */
1135 	fbnic_clear_macda(fbd);
1136 
1137 	/* If BMC is present we need to preserve the last rule which
1138 	 * will be used to route traffic to the BMC if it is received.
1139 	 *
1140 	 * At this point it should be the only MAC address in the MACDA
1141 	 * so any unicast or multicast traffic received should be routed
1142 	 * to it. So leave the last rule in place.
1143 	 *
1144 	 * It will be rewritten to add the host again when we bring
1145 	 * the interface back up.
1146 	 */
1147 	if (fbnic_bmc_present(fbd)) {
1148 		act_tcam = &fbd->act_tcam[i];
1149 
1150 		if (act_tcam->state == FBNIC_TCAM_S_VALID &&
1151 		    (act_tcam->dest & dest)) {
1152 			wr32(fbd, FBNIC_RPC_ACT_TBL0(i), dest);
1153 			wr32(fbd, FBNIC_RPC_ACT_TBL1(i), 0);
1154 
1155 			act_tcam->state = FBNIC_TCAM_S_UPDATE;
1156 
1157 			i--;
1158 		}
1159 	}
1160 
1161 	/* Work from the bottom up deleting all other rules from hardware */
1162 	do {
1163 		act_tcam = &fbd->act_tcam[i];
1164 
1165 		if (act_tcam->state != FBNIC_TCAM_S_VALID)
1166 			continue;
1167 
1168 		fbnic_clear_act_tcam(fbd, i);
1169 		act_tcam->state = FBNIC_TCAM_S_UPDATE;
1170 	} while (i--);
1171 }
1172 
1173 static void fbnic_delete_act_tcam(struct fbnic_dev *fbd, unsigned int idx)
1174 {
1175 	fbnic_clear_act_tcam(fbd, idx);
1176 	memset(&fbd->act_tcam[idx], 0, sizeof(struct fbnic_act_tcam));
1177 }
1178 
1179 static void fbnic_update_act_tcam(struct fbnic_dev *fbd, unsigned int idx)
1180 {
1181 	struct fbnic_act_tcam *act_tcam = &fbd->act_tcam[idx];
1182 	int i;
1183 
1184 	/* Update entry by writing the destination and RSS mask */
1185 	wr32(fbd, FBNIC_RPC_ACT_TBL0(idx), act_tcam->dest);
1186 	wr32(fbd, FBNIC_RPC_ACT_TBL1(idx), act_tcam->rss_en_mask);
1187 
1188 	/* Write new TCAM rule to hardware */
1189 	for (i = 0; i < FBNIC_RPC_TCAM_ACT_WORD_LEN; i++)
1190 		wr32(fbd, FBNIC_RPC_TCAM_ACT(idx, i),
1191 		     FIELD_PREP(FBNIC_RPC_TCAM_ACT_MASK,
1192 				act_tcam->mask.tcam[i]) |
1193 		     FIELD_PREP(FBNIC_RPC_TCAM_ACT_VALUE,
1194 				act_tcam->value.tcam[i]));
1195 
1196 	wrfl(fbd);
1197 
1198 	wr32(fbd, FBNIC_RPC_TCAM_ACT(idx, i), FBNIC_RPC_TCAM_VALIDATE);
1199 	act_tcam->state = FBNIC_TCAM_S_VALID;
1200 }
1201 
1202 void fbnic_write_rules(struct fbnic_dev *fbd)
1203 {
1204 	int i;
1205 
1206 	/* Flush any pending action table rules */
1207 	for (i = 0; i < FBNIC_RPC_ACT_TBL_NUM_ENTRIES; i++) {
1208 		struct fbnic_act_tcam *act_tcam = &fbd->act_tcam[i];
1209 
1210 		/* Check if update flag is set else exit. */
1211 		if (!(act_tcam->state & FBNIC_TCAM_S_UPDATE))
1212 			continue;
1213 
1214 		if (act_tcam->state == FBNIC_TCAM_S_DELETE)
1215 			fbnic_delete_act_tcam(fbd, i);
1216 		else
1217 			fbnic_update_act_tcam(fbd, i);
1218 	}
1219 }
1220