1 /*-
2 * Copyright (c) 2016 Chelsio Communications, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 #include "common.h"
29 #include "t4_regs.h"
30 #include "t4_regs_values.h"
31
32 #undef msleep
33 #define msleep(x) do { \
34 if (cold) \
35 DELAY((x) * 1000); \
36 else \
37 pause("t4hw", (x) * hz / 1000); \
38 } while (0)
39
40 /*
41 * Wait for the device to become ready (signified by our "who am I" register
42 * returning a value other than all 1's). Return an error if it doesn't
43 * become ready ...
44 */
t4vf_wait_dev_ready(struct adapter * adapter)45 int t4vf_wait_dev_ready(struct adapter *adapter)
46 {
47 const u32 whoami = VF_PL_REG(A_PL_VF_WHOAMI);
48 const u32 notready1 = 0xffffffff;
49 const u32 notready2 = 0xeeeeeeee;
50 u32 val;
51
52 val = t4_read_reg(adapter, whoami);
53 if (val != notready1 && val != notready2)
54 return 0;
55 msleep(500);
56 val = t4_read_reg(adapter, whoami);
57 if (val != notready1 && val != notready2)
58 return 0;
59 else
60 return -EIO;
61 }
62
63
64 /**
65 * t4vf_fw_reset - issue a reset to FW
66 * @adapter: the adapter
67 *
68 * Issues a reset command to FW. For a Physical Function this would
69 * result in the Firmware reseting all of its state. For a Virtual
70 * Function this just resets the state associated with the VF.
71 */
t4vf_fw_reset(struct adapter * adapter)72 int t4vf_fw_reset(struct adapter *adapter)
73 {
74 struct fw_reset_cmd cmd;
75
76 memset(&cmd, 0, sizeof(cmd));
77 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RESET_CMD) |
78 F_FW_CMD_WRITE);
79 cmd.retval_len16 = cpu_to_be32(V_FW_CMD_LEN16(FW_LEN16(cmd)));
80 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
81 }
82
83 /**
84 * t4vf_get_sge_params - retrieve adapter Scatter gather Engine parameters
85 * @adapter: the adapter
86 *
87 * Retrieves various core SGE parameters in the form of hardware SGE
88 * register values. The caller is responsible for decoding these as
89 * needed. The SGE parameters are stored in @adapter->params.sge.
90 */
t4vf_get_sge_params(struct adapter * adapter)91 int t4vf_get_sge_params(struct adapter *adapter)
92 {
93 struct sge_params *sp = &adapter->params.sge;
94 u32 params[7], vals[7];
95 u32 whoami;
96 unsigned int pf, s_hps;
97 int i, v;
98
99 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
100 V_FW_PARAMS_PARAM_XYZ(A_SGE_CONTROL));
101 params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
102 V_FW_PARAMS_PARAM_XYZ(A_SGE_HOST_PAGE_SIZE));
103 params[2] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
104 V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_0_AND_1));
105 params[3] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
106 V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_2_AND_3));
107 params[4] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
108 V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_4_AND_5));
109 params[5] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
110 V_FW_PARAMS_PARAM_XYZ(A_SGE_CONM_CTRL));
111 params[6] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
112 V_FW_PARAMS_PARAM_XYZ(A_SGE_INGRESS_RX_THRESHOLD));
113 v = t4vf_query_params(adapter, 7, params, vals);
114 if (v != FW_SUCCESS)
115 return v;
116
117 sp->sge_control = vals[0];
118 sp->counter_val[0] = G_THRESHOLD_0(vals[6]);
119 sp->counter_val[1] = G_THRESHOLD_1(vals[6]);
120 sp->counter_val[2] = G_THRESHOLD_2(vals[6]);
121 sp->counter_val[3] = G_THRESHOLD_3(vals[6]);
122 sp->timer_val[0] = core_ticks_to_us(adapter, G_TIMERVALUE0(vals[2]));
123 sp->timer_val[1] = core_ticks_to_us(adapter, G_TIMERVALUE1(vals[2]));
124 sp->timer_val[2] = core_ticks_to_us(adapter, G_TIMERVALUE2(vals[3]));
125 sp->timer_val[3] = core_ticks_to_us(adapter, G_TIMERVALUE3(vals[3]));
126 sp->timer_val[4] = core_ticks_to_us(adapter, G_TIMERVALUE4(vals[4]));
127 sp->timer_val[5] = core_ticks_to_us(adapter, G_TIMERVALUE5(vals[4]));
128
129 sp->fl_starve_threshold = G_EGRTHRESHOLD(vals[5]) * 2 + 1;
130 if (is_t4(adapter))
131 sp->fl_starve_threshold2 = sp->fl_starve_threshold;
132 else if (is_t5(adapter))
133 sp->fl_starve_threshold2 = G_EGRTHRESHOLDPACKING(vals[5]) * 2 + 1;
134 else
135 sp->fl_starve_threshold2 = G_T6_EGRTHRESHOLDPACKING(vals[5]) * 2 + 1;
136
137 /*
138 * We need the Queues/Page and Host Page Size for our VF.
139 * This is based on the PF from which we're instantiated.
140 */
141 whoami = t4_read_reg(adapter, VF_PL_REG(A_PL_VF_WHOAMI));
142 if (chip_id(adapter) <= CHELSIO_T5)
143 pf = G_SOURCEPF(whoami);
144 else
145 pf = G_T6_SOURCEPF(whoami);
146
147 s_hps = (S_HOSTPAGESIZEPF0 +
148 (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * pf);
149 sp->page_shift = ((vals[1] >> s_hps) & M_HOSTPAGESIZEPF0) + 10;
150
151 for (i = 0; i < SGE_FLBUF_SIZES; i++) {
152 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
153 V_FW_PARAMS_PARAM_XYZ(A_SGE_FL_BUFFER_SIZE0 + (4 * i)));
154 v = t4vf_query_params(adapter, 1, params, vals);
155 if (v != FW_SUCCESS)
156 return v;
157
158 sp->sge_fl_buffer_size[i] = vals[0];
159 }
160
161 /*
162 * T4 uses a single control field to specify both the PCIe Padding and
163 * Packing Boundary. T5 introduced the ability to specify these
164 * separately with the Padding Boundary in SGE_CONTROL and and Packing
165 * Boundary in SGE_CONTROL2. So for T5 and later we need to grab
166 * SGE_CONTROL in order to determine how ingress packet data will be
167 * laid out in Packed Buffer Mode. Unfortunately, older versions of
168 * the firmware won't let us retrieve SGE_CONTROL2 so if we get a
169 * failure grabbing it we throw an error since we can't figure out the
170 * right value.
171 */
172 sp->spg_len = sp->sge_control & F_EGRSTATUSPAGESIZE ? 128 : 64;
173 sp->fl_pktshift = G_PKTSHIFT(sp->sge_control);
174 if (chip_id(adapter) <= CHELSIO_T5) {
175 sp->pad_boundary = 1 << (G_INGPADBOUNDARY(sp->sge_control) +
176 X_INGPADBOUNDARY_SHIFT);
177 } else {
178 sp->pad_boundary = 1 << (G_INGPADBOUNDARY(sp->sge_control) +
179 X_T6_INGPADBOUNDARY_SHIFT);
180 }
181 if (is_t4(adapter))
182 sp->pack_boundary = sp->pad_boundary;
183 else {
184 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
185 V_FW_PARAMS_PARAM_XYZ(A_SGE_CONTROL2));
186 v = t4vf_query_params(adapter, 1, params, vals);
187 if (v != FW_SUCCESS) {
188 CH_ERR(adapter, "Unable to get SGE Control2; "
189 "probably old firmware.\n");
190 return v;
191 }
192 if (G_INGPACKBOUNDARY(vals[0]) == 0)
193 sp->pack_boundary = 16;
194 else
195 sp->pack_boundary = 1 << (G_INGPACKBOUNDARY(vals[0]) +
196 5);
197 }
198
199 /*
200 * For T5 and later we want to use the new BAR2 Doorbells.
201 * Unfortunately, older firmware didn't allow the this register to be
202 * read.
203 */
204 if (!is_t4(adapter)) {
205 unsigned int s_qpp;
206
207 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
208 V_FW_PARAMS_PARAM_XYZ(A_SGE_EGRESS_QUEUES_PER_PAGE_VF));
209 params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
210 V_FW_PARAMS_PARAM_XYZ(A_SGE_INGRESS_QUEUES_PER_PAGE_VF));
211 v = t4vf_query_params(adapter, 2, params, vals);
212 if (v != FW_SUCCESS) {
213 CH_WARN(adapter, "Unable to get VF SGE Queues/Page; "
214 "probably old firmware.\n");
215 return v;
216 }
217
218 s_qpp = (S_QUEUESPERPAGEPF0 +
219 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * pf);
220 sp->eq_s_qpp = ((vals[0] >> s_qpp) & M_QUEUESPERPAGEPF0);
221 sp->iq_s_qpp = ((vals[1] >> s_qpp) & M_QUEUESPERPAGEPF0);
222 }
223
224 return 0;
225 }
226
227 /**
228 * t4vf_get_rss_glb_config - retrieve adapter RSS Global Configuration
229 * @adapter: the adapter
230 *
231 * Retrieves global RSS mode and parameters with which we have to live
232 * and stores them in the @adapter's RSS parameters.
233 */
t4vf_get_rss_glb_config(struct adapter * adapter)234 int t4vf_get_rss_glb_config(struct adapter *adapter)
235 {
236 struct rss_params *rss = &adapter->params.rss;
237 struct fw_rss_glb_config_cmd cmd, rpl;
238 int v;
239
240 /*
241 * Execute an RSS Global Configuration read command to retrieve
242 * our RSS configuration.
243 */
244 memset(&cmd, 0, sizeof(cmd));
245 cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
246 F_FW_CMD_REQUEST |
247 F_FW_CMD_READ);
248 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
249 v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
250 if (v != FW_SUCCESS)
251 return v;
252
253 /*
254 * Transate the big-endian RSS Global Configuration into our
255 * cpu-endian format based on the RSS mode. We also do first level
256 * filtering at this point to weed out modes which don't support
257 * VF Drivers ...
258 */
259 rss->mode = G_FW_RSS_GLB_CONFIG_CMD_MODE(
260 be32_to_cpu(rpl.u.manual.mode_pkd));
261 switch (rss->mode) {
262 case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
263 u32 word = be32_to_cpu(
264 rpl.u.basicvirtual.synmapen_to_hashtoeplitz);
265
266 rss->u.basicvirtual.synmapen =
267 ((word & F_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN) != 0);
268 rss->u.basicvirtual.syn4tupenipv6 =
269 ((word & F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6) != 0);
270 rss->u.basicvirtual.syn2tupenipv6 =
271 ((word & F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6) != 0);
272 rss->u.basicvirtual.syn4tupenipv4 =
273 ((word & F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4) != 0);
274 rss->u.basicvirtual.syn2tupenipv4 =
275 ((word & F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4) != 0);
276
277 rss->u.basicvirtual.ofdmapen =
278 ((word & F_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN) != 0);
279
280 rss->u.basicvirtual.tnlmapen =
281 ((word & F_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN) != 0);
282 rss->u.basicvirtual.tnlalllookup =
283 ((word & F_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP) != 0);
284
285 rss->u.basicvirtual.hashtoeplitz =
286 ((word & F_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ) != 0);
287
288 /* we need at least Tunnel Map Enable to be set */
289 if (!rss->u.basicvirtual.tnlmapen)
290 return -EINVAL;
291 break;
292 }
293
294 default:
295 /* all unknown/unsupported RSS modes result in an error */
296 return -EINVAL;
297 }
298
299 return 0;
300 }
301
302 /**
303 * t4vf_get_vfres - retrieve VF resource limits
304 * @adapter: the adapter
305 *
306 * Retrieves configured resource limits and capabilities for a virtual
307 * function. The results are stored in @adapter->vfres.
308 */
t4vf_get_vfres(struct adapter * adapter)309 int t4vf_get_vfres(struct adapter *adapter)
310 {
311 struct vf_resources *vfres = &adapter->params.vfres;
312 struct fw_pfvf_cmd cmd, rpl;
313 int v;
314 u32 word;
315
316 /*
317 * Execute PFVF Read command to get VF resource limits; bail out early
318 * with error on command failure.
319 */
320 memset(&cmd, 0, sizeof(cmd));
321 cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) |
322 F_FW_CMD_REQUEST |
323 F_FW_CMD_READ);
324 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
325 v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
326 if (v != FW_SUCCESS)
327 return v;
328
329 /*
330 * Extract VF resource limits and return success.
331 */
332 word = be32_to_cpu(rpl.niqflint_niq);
333 vfres->niqflint = G_FW_PFVF_CMD_NIQFLINT(word);
334 vfres->niq = G_FW_PFVF_CMD_NIQ(word);
335
336 word = be32_to_cpu(rpl.type_to_neq);
337 vfres->neq = G_FW_PFVF_CMD_NEQ(word);
338 vfres->pmask = G_FW_PFVF_CMD_PMASK(word);
339
340 word = be32_to_cpu(rpl.tc_to_nexactf);
341 vfres->tc = G_FW_PFVF_CMD_TC(word);
342 vfres->nvi = G_FW_PFVF_CMD_NVI(word);
343 vfres->nexactf = G_FW_PFVF_CMD_NEXACTF(word);
344
345 word = be32_to_cpu(rpl.r_caps_to_nethctrl);
346 vfres->r_caps = G_FW_PFVF_CMD_R_CAPS(word);
347 vfres->wx_caps = G_FW_PFVF_CMD_WX_CAPS(word);
348 vfres->nethctrl = G_FW_PFVF_CMD_NETHCTRL(word);
349
350 return 0;
351 }
352
353 /**
354 */
t4vf_prep_adapter(struct adapter * adapter)355 int t4vf_prep_adapter(struct adapter *adapter)
356 {
357 int err;
358
359 /*
360 * Wait for the device to become ready before proceeding ...
361 */
362 err = t4vf_wait_dev_ready(adapter);
363 if (err)
364 return err;
365
366 adapter->params.chipid = pci_get_device(adapter->dev) >> 12;
367 if (adapter->params.chipid >= 0xa) {
368 adapter->params.chipid -= (0xa - 0x4);
369 adapter->params.fpga = 1;
370 }
371
372 /*
373 * Default port and clock for debugging in case we can't reach
374 * firmware.
375 */
376 adapter->params.nports = 1;
377 adapter->params.vfres.pmask = 1;
378 adapter->params.vpd.cclk = 50000;
379
380 adapter->chip_params = t4_get_chip_params(chip_id(adapter));
381 if (adapter->chip_params == NULL)
382 return -EINVAL;
383
384 return 0;
385 }
386
387 /*
388 * t4vf_get_vf_mac - Get the MAC address to be set to the VI of this VF.
389 * @adapter: The adapter
390 * @port: The port associated with vf
391 * @naddr: the number of ACL MAC addresses returned in addr
392 * @addr: Placeholder for MAC addresses
393 *
394 * Find the MAC address to be set to the VF's VI. The requested MAC address
395 * is from the host OS via callback in the PF driver.
396 */
t4vf_get_vf_mac(struct adapter * adapter,unsigned int port,unsigned int * naddr,u8 * addr)397 int t4vf_get_vf_mac(struct adapter *adapter, unsigned int port,
398 unsigned int *naddr, u8 *addr)
399 {
400 struct fw_acl_mac_cmd cmd;
401 int ret;
402
403 memset(&cmd, 0, sizeof(cmd));
404 cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_ACL_MAC_CMD) |
405 F_FW_CMD_REQUEST |
406 F_FW_CMD_READ);
407 cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
408 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &cmd);
409 if (ret)
410 return ret;
411
412 if (cmd.nmac < *naddr)
413 *naddr = cmd.nmac;
414
415 switch (port) {
416 case 3:
417 memcpy(addr, cmd.macaddr3, sizeof(cmd.macaddr3));
418 break;
419 case 2:
420 memcpy(addr, cmd.macaddr2, sizeof(cmd.macaddr2));
421 break;
422 case 1:
423 memcpy(addr, cmd.macaddr1, sizeof(cmd.macaddr1));
424 break;
425 case 0:
426 memcpy(addr, cmd.macaddr0, sizeof(cmd.macaddr0));
427 break;
428 }
429
430 return ret;
431 }
432
433 /*
434 * t4vf_get_vf_vlan - Get the VLAN ID to be set to the VI of this VF.
435 * @adapter: The adapter
436 *
437 * Find the VLAN ID to be set to the VF's VI. The requested VLAN ID
438 * is from the host OS via callback in the PF driver.
439 */
t4vf_get_vf_vlan(struct adapter * adapter)440 int t4vf_get_vf_vlan(struct adapter *adapter)
441 {
442 struct fw_acl_vlan_cmd cmd = {0};
443 int vlan = 0;
444 int ret = 0;
445
446 cmd.op_to_vfn = htonl(V_FW_CMD_OP(FW_ACL_VLAN_CMD) |
447 F_FW_CMD_REQUEST | F_FW_CMD_READ);
448
449 /* Note: Do not enable the ACL */
450 cmd.en_to_len16 = htonl((unsigned int)FW_LEN16(cmd));
451
452 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &cmd);
453
454 if (!ret)
455 vlan = be16_to_cpu(cmd.vlanid[0]);
456
457 return vlan;
458 }
459