xref: /linux/drivers/net/ethernet/cisco/enic/enic_res.c (revision 1a9239bb4253f9076b5b4b2a1a4e8d7defd77a95)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
4  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
9 #include <linux/types.h>
10 #include <linux/pci.h>
11 #include <linux/netdevice.h>
12 
13 #include "wq_enet_desc.h"
14 #include "rq_enet_desc.h"
15 #include "cq_enet_desc.h"
16 #include "vnic_resource.h"
17 #include "vnic_enet.h"
18 #include "vnic_dev.h"
19 #include "vnic_wq.h"
20 #include "vnic_rq.h"
21 #include "vnic_cq.h"
22 #include "vnic_intr.h"
23 #include "vnic_stats.h"
24 #include "vnic_nic.h"
25 #include "vnic_rss.h"
26 #include "enic_res.h"
27 #include "enic.h"
28 
enic_get_vnic_config(struct enic * enic)29 int enic_get_vnic_config(struct enic *enic)
30 {
31 	struct vnic_enet_config *c = &enic->config;
32 	int err;
33 
34 	err = vnic_dev_get_mac_addr(enic->vdev, enic->mac_addr);
35 	if (err) {
36 		dev_err(enic_get_dev(enic),
37 			"Error getting MAC addr, %d\n", err);
38 		return err;
39 	}
40 
41 #define GET_CONFIG(m) \
42 	do { \
43 		err = vnic_dev_spec(enic->vdev, \
44 			offsetof(struct vnic_enet_config, m), \
45 			sizeof(c->m), &c->m); \
46 		if (err) { \
47 			dev_err(enic_get_dev(enic), \
48 				"Error getting %s, %d\n", #m, err); \
49 			return err; \
50 		} \
51 	} while (0)
52 
53 	GET_CONFIG(flags);
54 	GET_CONFIG(wq_desc_count);
55 	GET_CONFIG(rq_desc_count);
56 	GET_CONFIG(mtu);
57 	GET_CONFIG(intr_timer_type);
58 	GET_CONFIG(intr_mode);
59 	GET_CONFIG(intr_timer_usec);
60 	GET_CONFIG(loop_tag);
61 	GET_CONFIG(num_arfs);
62 	GET_CONFIG(max_rq_ring);
63 	GET_CONFIG(max_wq_ring);
64 	GET_CONFIG(max_cq_ring);
65 
66 	if (!c->max_wq_ring)
67 		c->max_wq_ring = ENIC_MAX_WQ_DESCS_DEFAULT;
68 	if (!c->max_rq_ring)
69 		c->max_rq_ring = ENIC_MAX_RQ_DESCS_DEFAULT;
70 	if (!c->max_cq_ring)
71 		c->max_cq_ring = ENIC_MAX_CQ_DESCS_DEFAULT;
72 
73 	c->wq_desc_count =
74 		min_t(u32, c->max_wq_ring,
75 		      max_t(u32, ENIC_MIN_WQ_DESCS, c->wq_desc_count));
76 	c->wq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
77 
78 	c->rq_desc_count =
79 		min_t(u32, c->max_rq_ring,
80 		      max_t(u32, ENIC_MIN_RQ_DESCS, c->rq_desc_count));
81 	c->rq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
82 
83 	if (c->mtu == 0)
84 		c->mtu = 1500;
85 	c->mtu = min_t(u16, ENIC_MAX_MTU, max_t(u16, ENIC_MIN_MTU, c->mtu));
86 
87 	c->intr_timer_usec = min_t(u32, c->intr_timer_usec,
88 		vnic_dev_get_intr_coal_timer_max(enic->vdev));
89 
90 	dev_info(enic_get_dev(enic),
91 		 "vNIC MAC addr %pM wq/rq %d/%d max wq/rq/cq %d/%d/%d mtu %d\n",
92 		 enic->mac_addr, c->wq_desc_count, c->rq_desc_count,
93 		 c->max_wq_ring, c->max_rq_ring, c->max_cq_ring, c->mtu);
94 
95 	dev_info(enic_get_dev(enic), "vNIC csum tx/rx %s/%s "
96 		"tso/lro %s/%s rss %s intr mode %s type %s timer %d usec "
97 		"loopback tag 0x%04x\n",
98 		ENIC_SETTING(enic, TXCSUM) ? "yes" : "no",
99 		ENIC_SETTING(enic, RXCSUM) ? "yes" : "no",
100 		ENIC_SETTING(enic, TSO) ? "yes" : "no",
101 		ENIC_SETTING(enic, LRO) ? "yes" : "no",
102 		ENIC_SETTING(enic, RSS) ? "yes" : "no",
103 		c->intr_mode == VENET_INTR_MODE_INTX ? "INTx" :
104 		c->intr_mode == VENET_INTR_MODE_MSI ? "MSI" :
105 		c->intr_mode == VENET_INTR_MODE_ANY ? "any" :
106 		"unknown",
107 		c->intr_timer_type == VENET_INTR_TYPE_MIN ? "min" :
108 		c->intr_timer_type == VENET_INTR_TYPE_IDLE ? "idle" :
109 		"unknown",
110 		c->intr_timer_usec,
111 		c->loop_tag);
112 
113 	return 0;
114 }
115 
enic_add_vlan(struct enic * enic,u16 vlanid)116 int enic_add_vlan(struct enic *enic, u16 vlanid)
117 {
118 	u64 a0 = vlanid, a1 = 0;
119 	int wait = 1000;
120 	int err;
121 
122 	err = vnic_dev_cmd(enic->vdev, CMD_VLAN_ADD, &a0, &a1, wait);
123 	if (err)
124 		dev_err(enic_get_dev(enic), "Can't add vlan id, %d\n", err);
125 
126 	return err;
127 }
128 
enic_del_vlan(struct enic * enic,u16 vlanid)129 int enic_del_vlan(struct enic *enic, u16 vlanid)
130 {
131 	u64 a0 = vlanid, a1 = 0;
132 	int wait = 1000;
133 	int err;
134 
135 	err = vnic_dev_cmd(enic->vdev, CMD_VLAN_DEL, &a0, &a1, wait);
136 	if (err)
137 		dev_err(enic_get_dev(enic), "Can't delete vlan id, %d\n", err);
138 
139 	return err;
140 }
141 
enic_set_nic_cfg(struct enic * enic,u8 rss_default_cpu,u8 rss_hash_type,u8 rss_hash_bits,u8 rss_base_cpu,u8 rss_enable,u8 tso_ipid_split_en,u8 ig_vlan_strip_en)142 int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
143 	u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en,
144 	u8 ig_vlan_strip_en)
145 {
146 	enum vnic_devcmd_cmd cmd = CMD_NIC_CFG;
147 	u64 a0, a1;
148 	u32 nic_cfg;
149 	int wait = 1000;
150 
151 	vnic_set_nic_cfg(&nic_cfg, rss_default_cpu,
152 		rss_hash_type, rss_hash_bits, rss_base_cpu,
153 		rss_enable, tso_ipid_split_en, ig_vlan_strip_en);
154 
155 	a0 = nic_cfg;
156 	a1 = 0;
157 
158 	if (rss_hash_type & (NIC_CFG_RSS_HASH_TYPE_UDP_IPV4 |
159 			     NIC_CFG_RSS_HASH_TYPE_UDP_IPV6))
160 		cmd = CMD_NIC_CFG_CHK;
161 
162 	return vnic_dev_cmd(enic->vdev, cmd, &a0, &a1, wait);
163 }
164 
enic_set_rss_key(struct enic * enic,dma_addr_t key_pa,u64 len)165 int enic_set_rss_key(struct enic *enic, dma_addr_t key_pa, u64 len)
166 {
167 	u64 a0 = (u64)key_pa, a1 = len;
168 	int wait = 1000;
169 
170 	return vnic_dev_cmd(enic->vdev, CMD_RSS_KEY, &a0, &a1, wait);
171 }
172 
enic_set_rss_cpu(struct enic * enic,dma_addr_t cpu_pa,u64 len)173 int enic_set_rss_cpu(struct enic *enic, dma_addr_t cpu_pa, u64 len)
174 {
175 	u64 a0 = (u64)cpu_pa, a1 = len;
176 	int wait = 1000;
177 
178 	return vnic_dev_cmd(enic->vdev, CMD_RSS_CPU, &a0, &a1, wait);
179 }
180 
enic_free_vnic_resources(struct enic * enic)181 void enic_free_vnic_resources(struct enic *enic)
182 {
183 	unsigned int i;
184 
185 	for (i = 0; i < enic->wq_count; i++)
186 		vnic_wq_free(&enic->wq[i].vwq);
187 	for (i = 0; i < enic->rq_count; i++)
188 		vnic_rq_free(&enic->rq[i].vrq);
189 	for (i = 0; i < enic->cq_count; i++)
190 		vnic_cq_free(&enic->cq[i]);
191 	for (i = 0; i < enic->intr_count; i++)
192 		vnic_intr_free(&enic->intr[i]);
193 }
194 
enic_get_res_counts(struct enic * enic)195 void enic_get_res_counts(struct enic *enic)
196 {
197 	enic->wq_avail = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ);
198 	enic->rq_avail = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ);
199 	enic->cq_avail = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ);
200 	enic->intr_avail = vnic_dev_get_res_count(enic->vdev,
201 						  RES_TYPE_INTR_CTRL);
202 
203 	enic->wq_count = enic->wq_avail;
204 	enic->rq_count = enic->rq_avail;
205 	enic->cq_count = enic->cq_avail;
206 	enic->intr_count = enic->intr_avail;
207 
208 	dev_info(enic_get_dev(enic),
209 		"vNIC resources avail: wq %d rq %d cq %d intr %d\n",
210 		enic->wq_avail, enic->rq_avail,
211 		enic->cq_avail, enic->intr_avail);
212 }
213 
enic_init_vnic_resources(struct enic * enic)214 void enic_init_vnic_resources(struct enic *enic)
215 {
216 	enum vnic_dev_intr_mode intr_mode;
217 	unsigned int mask_on_assertion;
218 	unsigned int interrupt_offset;
219 	unsigned int error_interrupt_enable;
220 	unsigned int error_interrupt_offset;
221 	unsigned int cq_index;
222 	unsigned int i;
223 
224 	intr_mode = vnic_dev_get_intr_mode(enic->vdev);
225 
226 	/* Init RQ/WQ resources.
227 	 *
228 	 * RQ[0 - n-1] point to CQ[0 - n-1]
229 	 * WQ[0 - m-1] point to CQ[n - n+m-1]
230 	 *
231 	 * Error interrupt is not enabled for MSI.
232 	 */
233 
234 	switch (intr_mode) {
235 	case VNIC_DEV_INTR_MODE_INTX:
236 		error_interrupt_enable = 1;
237 		error_interrupt_offset = ENIC_LEGACY_ERR_INTR;
238 		break;
239 	case VNIC_DEV_INTR_MODE_MSIX:
240 		error_interrupt_enable = 1;
241 		error_interrupt_offset = enic_msix_err_intr(enic);
242 		break;
243 	default:
244 		error_interrupt_enable = 0;
245 		error_interrupt_offset = 0;
246 		break;
247 	}
248 
249 	for (i = 0; i < enic->rq_count; i++) {
250 		cq_index = i;
251 		vnic_rq_init(&enic->rq[i].vrq,
252 			cq_index,
253 			error_interrupt_enable,
254 			error_interrupt_offset);
255 	}
256 
257 	for (i = 0; i < enic->wq_count; i++) {
258 		cq_index = enic->rq_count + i;
259 		vnic_wq_init(&enic->wq[i].vwq,
260 			cq_index,
261 			error_interrupt_enable,
262 			error_interrupt_offset);
263 	}
264 
265 	/* Init CQ resources
266 	 *
267 	 * All CQs point to INTR[0] for INTx, MSI
268 	 * CQ[i] point to INTR[ENIC_MSIX_IO_INTR_BASE + i] for MSI-X
269 	 */
270 
271 	for (i = 0; i < enic->cq_count; i++) {
272 
273 		switch (intr_mode) {
274 		case VNIC_DEV_INTR_MODE_MSIX:
275 			interrupt_offset = ENIC_MSIX_IO_INTR_BASE + i;
276 			break;
277 		default:
278 			interrupt_offset = 0;
279 			break;
280 		}
281 
282 		vnic_cq_init(&enic->cq[i],
283 			0 /* flow_control_enable */,
284 			1 /* color_enable */,
285 			0 /* cq_head */,
286 			0 /* cq_tail */,
287 			1 /* cq_tail_color */,
288 			1 /* interrupt_enable */,
289 			1 /* cq_entry_enable */,
290 			0 /* cq_message_enable */,
291 			interrupt_offset,
292 			0 /* cq_message_addr */);
293 	}
294 
295 	/* Init INTR resources
296 	 *
297 	 * mask_on_assertion is not used for INTx due to the level-
298 	 * triggered nature of INTx
299 	 */
300 
301 	switch (intr_mode) {
302 	case VNIC_DEV_INTR_MODE_MSI:
303 	case VNIC_DEV_INTR_MODE_MSIX:
304 		mask_on_assertion = 1;
305 		break;
306 	default:
307 		mask_on_assertion = 0;
308 		break;
309 	}
310 
311 	for (i = 0; i < enic->intr_count; i++) {
312 		vnic_intr_init(&enic->intr[i],
313 			enic->config.intr_timer_usec,
314 			enic->config.intr_timer_type,
315 			mask_on_assertion);
316 	}
317 }
318 
enic_alloc_vnic_resources(struct enic * enic)319 int enic_alloc_vnic_resources(struct enic *enic)
320 {
321 	enum vnic_dev_intr_mode intr_mode;
322 	int rq_cq_desc_size;
323 	unsigned int i;
324 	int err;
325 
326 	intr_mode = vnic_dev_get_intr_mode(enic->vdev);
327 
328 	dev_info(enic_get_dev(enic), "vNIC resources used:  "
329 		"wq %d rq %d cq %d intr %d intr mode %s\n",
330 		enic->wq_count, enic->rq_count,
331 		enic->cq_count, enic->intr_count,
332 		intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" :
333 		intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" :
334 		intr_mode == VNIC_DEV_INTR_MODE_MSIX ? "MSI-X" :
335 		"unknown");
336 
337 	switch (enic->ext_cq) {
338 	case ENIC_RQ_CQ_ENTRY_SIZE_16:
339 		rq_cq_desc_size = 16;
340 		break;
341 	case ENIC_RQ_CQ_ENTRY_SIZE_32:
342 		rq_cq_desc_size = 32;
343 		break;
344 	case ENIC_RQ_CQ_ENTRY_SIZE_64:
345 		rq_cq_desc_size = 64;
346 		break;
347 	default:
348 		dev_err(enic_get_dev(enic),
349 			"Unable to determine rq cq desc size: %d",
350 			enic->ext_cq);
351 		err = -ENODEV;
352 		goto err_out;
353 	}
354 
355 	/* Allocate queue resources
356 	 */
357 
358 	for (i = 0; i < enic->wq_count; i++) {
359 		err = vnic_wq_alloc(enic->vdev, &enic->wq[i].vwq, i,
360 			enic->config.wq_desc_count,
361 			sizeof(struct wq_enet_desc));
362 		if (err)
363 			goto err_out_cleanup;
364 	}
365 
366 	for (i = 0; i < enic->rq_count; i++) {
367 		err = vnic_rq_alloc(enic->vdev, &enic->rq[i].vrq, i,
368 			enic->config.rq_desc_count,
369 			sizeof(struct rq_enet_desc));
370 		if (err)
371 			goto err_out_cleanup;
372 	}
373 
374 	for (i = 0; i < enic->cq_count; i++) {
375 		if (i < enic->rq_count)
376 			err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i,
377 					enic->config.rq_desc_count,
378 					rq_cq_desc_size);
379 		else
380 			err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i,
381 				enic->config.wq_desc_count,
382 				sizeof(struct cq_enet_wq_desc));
383 		if (err)
384 			goto err_out_cleanup;
385 	}
386 
387 	for (i = 0; i < enic->intr_count; i++) {
388 		err = vnic_intr_alloc(enic->vdev, &enic->intr[i], i);
389 		if (err)
390 			goto err_out_cleanup;
391 	}
392 
393 	/* Hook remaining resource
394 	 */
395 
396 	enic->legacy_pba = vnic_dev_get_res(enic->vdev,
397 		RES_TYPE_INTR_PBA_LEGACY, 0);
398 	if (!enic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) {
399 		dev_err(enic_get_dev(enic),
400 			"Failed to hook legacy pba resource\n");
401 		err = -ENODEV;
402 		goto err_out_cleanup;
403 	}
404 
405 	return 0;
406 
407 err_out_cleanup:
408 	enic_free_vnic_resources(enic);
409 err_out:
410 	return err;
411 }
412 
413 /*
414  * CMD_CQ_ENTRY_SIZE_SET can fail on older hw generations that don't support
415  * that command
416  */
enic_ext_cq(struct enic * enic)417 void enic_ext_cq(struct enic *enic)
418 {
419 	u64 a0 = CMD_CQ_ENTRY_SIZE_SET, a1 = 0;
420 	int wait = 1000;
421 	int ret;
422 
423 	spin_lock_bh(&enic->devcmd_lock);
424 	ret = vnic_dev_cmd(enic->vdev, CMD_CAPABILITY, &a0, &a1, wait);
425 	if (ret || a0) {
426 		dev_info(&enic->pdev->dev,
427 			 "CMD_CQ_ENTRY_SIZE_SET not supported.");
428 		enic->ext_cq = ENIC_RQ_CQ_ENTRY_SIZE_16;
429 		goto out;
430 	}
431 	a1 &= VNIC_RQ_CQ_ENTRY_SIZE_ALL_BIT;
432 	enic->ext_cq = fls(a1) - 1;
433 	a0 = VNIC_RQ_ALL;
434 	a1 = enic->ext_cq;
435 	ret = vnic_dev_cmd(enic->vdev, CMD_CQ_ENTRY_SIZE_SET, &a0, &a1, wait);
436 	if (ret) {
437 		dev_info(&enic->pdev->dev, "CMD_CQ_ENTRY_SIZE_SET failed.");
438 		enic->ext_cq = ENIC_RQ_CQ_ENTRY_SIZE_16;
439 	}
440 out:
441 	spin_unlock_bh(&enic->devcmd_lock);
442 	dev_info(&enic->pdev->dev, "CQ entry size set to %d bytes",
443 		 16 << enic->ext_cq);
444 }
445