1 /**********************************************************************
2 * Author: Cavium, Inc.
3 *
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
6 *
7 * Copyright (c) 2003-2016 Cavium, Inc.
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more details.
17 ***********************************************************************/
18 #include <linux/pci.h>
19 #include <linux/vmalloc.h>
20 #include <linux/etherdevice.h>
21 #include "liquidio_common.h"
22 #include "octeon_droq.h"
23 #include "octeon_iq.h"
24 #include "response_manager.h"
25 #include "octeon_device.h"
26 #include "cn23xx_pf_device.h"
27 #include "octeon_main.h"
28 #include "octeon_mailbox.h"
29
30 #define RESET_NOTDONE 0
31 #define RESET_DONE 1
32
33 /* Change the value of SLI Packet Input Jabber Register to allow
34 * VXLAN TSO packets which can be 64424 bytes, exceeding the
35 * MAX_GSO_SIZE we supplied to the kernel
36 */
37 #define CN23XX_INPUT_JABBER 64600
38
cn23xx_pf_soft_reset(struct octeon_device * oct)39 static int cn23xx_pf_soft_reset(struct octeon_device *oct)
40 {
41 octeon_write_csr64(oct, CN23XX_WIN_WR_MASK_REG, 0xFF);
42
43 dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: BIST enabled for CN23XX soft reset\n",
44 oct->octeon_id);
45
46 octeon_write_csr64(oct, CN23XX_SLI_SCRATCH1, 0x1234ULL);
47
48 /* Initiate chip-wide soft reset */
49 lio_pci_readq(oct, CN23XX_RST_SOFT_RST);
50 lio_pci_writeq(oct, 1, CN23XX_RST_SOFT_RST);
51
52 /* Wait for 100ms as Octeon resets */
53 mdelay(100);
54
55 if (octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1)) {
56 dev_err(&oct->pci_dev->dev, "OCTEON[%d]: Soft reset failed\n",
57 oct->octeon_id);
58 return 1;
59 }
60
61 dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: Reset completed\n",
62 oct->octeon_id);
63
64 /* Restore the reset value */
65 octeon_write_csr64(oct, CN23XX_WIN_WR_MASK_REG, 0xFF);
66
67 return 0;
68 }
69
cn23xx_enable_error_reporting(struct octeon_device * oct)70 static void cn23xx_enable_error_reporting(struct octeon_device *oct)
71 {
72 u32 regval;
73 u32 uncorrectable_err_mask, corrtable_err_status;
74
75 pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, ®val);
76 if (regval & CN23XX_CONFIG_PCIE_DEVCTL_MASK) {
77 uncorrectable_err_mask = 0;
78 corrtable_err_status = 0;
79 pci_read_config_dword(oct->pci_dev,
80 CN23XX_CONFIG_PCIE_UNCORRECT_ERR_MASK,
81 &uncorrectable_err_mask);
82 pci_read_config_dword(oct->pci_dev,
83 CN23XX_CONFIG_PCIE_CORRECT_ERR_STATUS,
84 &corrtable_err_status);
85 dev_err(&oct->pci_dev->dev, "PCI-E Fatal error detected;\n"
86 "\tdev_ctl_status_reg = 0x%08x\n"
87 "\tuncorrectable_error_mask_reg = 0x%08x\n"
88 "\tcorrectable_error_status_reg = 0x%08x\n",
89 regval, uncorrectable_err_mask,
90 corrtable_err_status);
91 }
92
93 regval |= 0xf; /* Enable Link error reporting */
94
95 dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: Enabling PCI-E error reporting..\n",
96 oct->octeon_id);
97 pci_write_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, regval);
98 }
99
cn23xx_coprocessor_clock(struct octeon_device * oct)100 static u32 cn23xx_coprocessor_clock(struct octeon_device *oct)
101 {
102 /* Bits 29:24 of RST_BOOT[PNR_MUL] holds the ref.clock MULTIPLIER
103 * for SLI.
104 */
105
106 /* TBD: get the info in Hand-shake */
107 return (((lio_pci_readq(oct, CN23XX_RST_BOOT) >> 24) & 0x3f) * 50);
108 }
109
cn23xx_pf_get_oq_ticks(struct octeon_device * oct,u32 time_intr_in_us)110 u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us)
111 {
112 /* This gives the SLI clock per microsec */
113 u32 oqticks_per_us = cn23xx_coprocessor_clock(oct);
114
115 oct->pfvf_hsword.coproc_tics_per_us = oqticks_per_us;
116
117 /* This gives the clock cycles per millisecond */
118 oqticks_per_us *= 1000;
119
120 /* This gives the oq ticks (1024 core clock cycles) per millisecond */
121 oqticks_per_us /= 1024;
122
123 /* time_intr is in microseconds. The next 2 steps gives the oq ticks
124 * corresponding to time_intr.
125 */
126 oqticks_per_us *= time_intr_in_us;
127 oqticks_per_us /= 1000;
128
129 return oqticks_per_us;
130 }
131
cn23xx_setup_global_mac_regs(struct octeon_device * oct)132 static void cn23xx_setup_global_mac_regs(struct octeon_device *oct)
133 {
134 u16 mac_no = oct->pcie_port;
135 u16 pf_num = oct->pf_num;
136 u64 reg_val;
137 u64 temp;
138
139 /* Programming SRN and TRS for each MAC(0..3) */
140
141 dev_dbg(&oct->pci_dev->dev, "%s:Using pcie port %d\n",
142 __func__, mac_no);
143 /* By default, map all 64 IOQs to a single MAC */
144
145 reg_val =
146 octeon_read_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num));
147
148 if (oct->rev_id == OCTEON_CN23XX_REV_1_1) {
149 /* setting SRN <6:0> */
150 reg_val = pf_num * CN23XX_MAX_RINGS_PER_PF_PASS_1_1;
151 } else {
152 /* setting SRN <6:0> */
153 reg_val = pf_num * CN23XX_MAX_RINGS_PER_PF;
154 }
155
156 /* setting TRS <23:16> */
157 reg_val = reg_val |
158 (oct->sriov_info.trs << CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS);
159 /* setting RPVF <39:32> */
160 temp = oct->sriov_info.rings_per_vf & 0xff;
161 reg_val |= (temp << CN23XX_PKT_MAC_CTL_RINFO_RPVF_BIT_POS);
162
163 /* setting NVFS <55:48> */
164 temp = oct->sriov_info.max_vfs & 0xff;
165 reg_val |= (temp << CN23XX_PKT_MAC_CTL_RINFO_NVFS_BIT_POS);
166
167 /* Write these settings to MAC register */
168 octeon_write_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num),
169 reg_val);
170
171 dev_dbg(&oct->pci_dev->dev, "SLI_PKT_MAC(%d)_PF(%d)_RINFO : 0x%016llx\n",
172 mac_no, pf_num, (u64)octeon_read_csr64
173 (oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num)));
174 }
175
cn23xx_reset_io_queues(struct octeon_device * oct)176 static int cn23xx_reset_io_queues(struct octeon_device *oct)
177 {
178 int ret_val = 0;
179 u64 d64;
180 u32 q_no, srn, ern;
181 u32 loop = 1000;
182
183 srn = oct->sriov_info.pf_srn;
184 ern = srn + oct->sriov_info.num_pf_rings;
185
186 /* As per HRM reg description, s/w can't write 0 to ENB. */
187 /* We need to set the RST bit, to turn the queue off. */
188
189 /* Reset the enable bit for all the 64 IQs. */
190 for (q_no = srn; q_no < ern; q_no++) {
191 /* set RST bit to 1. This bit applies to both IQ and OQ */
192 d64 = octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
193 d64 = d64 | CN23XX_PKT_INPUT_CTL_RST;
194 octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), d64);
195 }
196
197 /* Wait until the RST bit is clear or the RST and quiet bits are set */
198 for (q_no = srn; q_no < ern; q_no++) {
199 u64 reg_val = octeon_read_csr64(oct,
200 CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
201 while ((READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_RST) &&
202 !(READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_QUIET) &&
203 loop--) {
204 WRITE_ONCE(reg_val, octeon_read_csr64(
205 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)));
206 }
207 if (!loop) {
208 dev_err(&oct->pci_dev->dev,
209 "clearing the reset reg failed or setting the quiet reg failed for qno: %u\n",
210 q_no);
211 return -1;
212 }
213 WRITE_ONCE(reg_val, READ_ONCE(reg_val) &
214 ~CN23XX_PKT_INPUT_CTL_RST);
215 octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
216 READ_ONCE(reg_val));
217
218 WRITE_ONCE(reg_val, octeon_read_csr64(
219 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)));
220 if (READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_RST) {
221 dev_err(&oct->pci_dev->dev,
222 "clearing the reset failed for qno: %u\n",
223 q_no);
224 ret_val = -1;
225 }
226 }
227
228 return ret_val;
229 }
230
cn23xx_pf_setup_global_input_regs(struct octeon_device * oct)231 static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct)
232 {
233 struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
234 struct octeon_instr_queue *iq;
235 u64 intr_threshold, reg_val;
236 u32 q_no, ern, srn;
237 u64 pf_num;
238 u64 vf_num;
239
240 pf_num = oct->pf_num;
241
242 srn = oct->sriov_info.pf_srn;
243 ern = srn + oct->sriov_info.num_pf_rings;
244
245 if (cn23xx_reset_io_queues(oct))
246 return -1;
247
248 /* Set the MAC_NUM and PVF_NUM in IQ_PKT_CONTROL reg
249 * for all queues. Only PF can set these bits.
250 * bits 29:30 indicate the MAC num.
251 * bits 32:47 indicate the PVF num.
252 */
253 for (q_no = 0; q_no < ern; q_no++) {
254 reg_val = (u64)oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS;
255
256 /* For VF assigned queues. */
257 if (q_no < oct->sriov_info.pf_srn) {
258 vf_num = q_no / oct->sriov_info.rings_per_vf;
259 vf_num += 1; /* VF1, VF2,........ */
260 } else {
261 vf_num = 0;
262 }
263
264 reg_val |= vf_num << CN23XX_PKT_INPUT_CTL_VF_NUM_POS;
265 reg_val |= pf_num << CN23XX_PKT_INPUT_CTL_PF_NUM_POS;
266
267 octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
268 reg_val);
269 }
270
271 /* Select ES, RO, NS, RDSIZE,DPTR Format#0 for
272 * pf queues
273 */
274 for (q_no = srn; q_no < ern; q_no++) {
275 void __iomem *inst_cnt_reg;
276
277 iq = oct->instr_queue[q_no];
278 if (iq)
279 inst_cnt_reg = iq->inst_cnt_reg;
280 else
281 inst_cnt_reg = (u8 *)oct->mmio[0].hw_addr +
282 CN23XX_SLI_IQ_INSTR_COUNT64(q_no);
283
284 reg_val =
285 octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
286
287 reg_val |= CN23XX_PKT_INPUT_CTL_MASK;
288
289 octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
290 reg_val);
291
292 /* Set WMARK level to trigger PI_INT */
293 /* intr_threshold = CN23XX_DEF_IQ_INTR_THRESHOLD & */
294 intr_threshold = CFG_GET_IQ_INTR_PKT(cn23xx->conf) &
295 CN23XX_PKT_IN_DONE_WMARK_MASK;
296
297 writeq((readq(inst_cnt_reg) &
298 ~(CN23XX_PKT_IN_DONE_WMARK_MASK <<
299 CN23XX_PKT_IN_DONE_WMARK_BIT_POS)) |
300 (intr_threshold << CN23XX_PKT_IN_DONE_WMARK_BIT_POS),
301 inst_cnt_reg);
302 }
303 return 0;
304 }
305
cn23xx_pf_setup_global_output_regs(struct octeon_device * oct)306 static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct)
307 {
308 u32 reg_val;
309 u32 q_no, ern, srn;
310 u64 time_threshold;
311
312 struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
313
314 srn = oct->sriov_info.pf_srn;
315 ern = srn + oct->sriov_info.num_pf_rings;
316
317 if (CFG_GET_IS_SLI_BP_ON(cn23xx->conf)) {
318 octeon_write_csr64(oct, CN23XX_SLI_OQ_WMARK, 32);
319 } else {
320 /** Set Output queue watermark to 0 to disable backpressure */
321 octeon_write_csr64(oct, CN23XX_SLI_OQ_WMARK, 0);
322 }
323
324 for (q_no = srn; q_no < ern; q_no++) {
325 reg_val = octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no));
326
327 /* clear IPTR */
328 reg_val &= ~CN23XX_PKT_OUTPUT_CTL_IPTR;
329
330 /* set DPTR */
331 reg_val |= CN23XX_PKT_OUTPUT_CTL_DPTR;
332
333 /* reset BMODE */
334 reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_BMODE);
335
336 /* No Relaxed Ordering, No Snoop, 64-bit Byte swap
337 * for Output Queue ScatterList
338 * reset ROR_P, NSR_P
339 */
340 reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR_P);
341 reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR_P);
342
343 #ifdef __LITTLE_ENDIAN_BITFIELD
344 reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ES_P);
345 #else
346 reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES_P);
347 #endif
348 /* No Relaxed Ordering, No Snoop, 64-bit Byte swap
349 * for Output Queue Data
350 * reset ROR, NSR
351 */
352 reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR);
353 reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR);
354 /* set the ES bit */
355 reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES);
356
357 /* Write all the selected settings */
358 octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no), reg_val);
359
360 /* Enabling these interrupt in oct->fn_list.enable_interrupt()
361 * routine which called after IOQ init.
362 * Set up interrupt packet and time thresholds
363 * for all the OQs
364 */
365 time_threshold = cn23xx_pf_get_oq_ticks(
366 oct, (u32)CFG_GET_OQ_INTR_TIME(cn23xx->conf));
367
368 octeon_write_csr64(oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
369 (CFG_GET_OQ_INTR_PKT(cn23xx->conf) |
370 (time_threshold << 32)));
371 }
372
373 /** Setting the water mark level for pko back pressure **/
374 writeq(0x40, (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_WMARK);
375
376 /* Disabling setting OQs in reset when ring has no doorbells
377 * enabling this will cause of head of line blocking
378 */
379 /* Do it only for pass1.1. and pass1.2 */
380 if ((oct->rev_id == OCTEON_CN23XX_REV_1_0) ||
381 (oct->rev_id == OCTEON_CN23XX_REV_1_1))
382 writeq(readq((u8 *)oct->mmio[0].hw_addr +
383 CN23XX_SLI_GBL_CONTROL) | 0x2,
384 (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_GBL_CONTROL);
385
386 /** Enable channel-level backpressure **/
387 if (oct->pf_num)
388 writeq(0xffffffffffffffffULL,
389 (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OUT_BP_EN2_W1S);
390 else
391 writeq(0xffffffffffffffffULL,
392 (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OUT_BP_EN_W1S);
393 }
394
cn23xx_setup_pf_device_regs(struct octeon_device * oct)395 static int cn23xx_setup_pf_device_regs(struct octeon_device *oct)
396 {
397 cn23xx_enable_error_reporting(oct);
398
399 /* Program the MAC(0..3)_RINFO before setting up input/output regs */
400 cn23xx_setup_global_mac_regs(oct);
401
402 if (cn23xx_pf_setup_global_input_regs(oct))
403 return -1;
404
405 cn23xx_pf_setup_global_output_regs(oct);
406
407 /* Default error timeout value should be 0x200000 to avoid host hang
408 * when reads invalid register
409 */
410 octeon_write_csr64(oct, CN23XX_SLI_WINDOW_CTL,
411 CN23XX_SLI_WINDOW_CTL_DEFAULT);
412
413 /* Set SLI_PKT_IN_JABBER to handle large VXLAN packets */
414 octeon_write_csr64(oct, CN23XX_SLI_PKT_IN_JABBER, CN23XX_INPUT_JABBER);
415 return 0;
416 }
417
cn23xx_setup_iq_regs(struct octeon_device * oct,u32 iq_no)418 static void cn23xx_setup_iq_regs(struct octeon_device *oct, u32 iq_no)
419 {
420 struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
421 u64 pkt_in_done;
422
423 iq_no += oct->sriov_info.pf_srn;
424
425 /* Write the start of the input queue's ring and its size */
426 octeon_write_csr64(oct, CN23XX_SLI_IQ_BASE_ADDR64(iq_no),
427 iq->base_addr_dma);
428 octeon_write_csr(oct, CN23XX_SLI_IQ_SIZE(iq_no), iq->max_count);
429
430 /* Remember the doorbell & instruction count register addr
431 * for this queue
432 */
433 iq->doorbell_reg =
434 (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_IQ_DOORBELL(iq_no);
435 iq->inst_cnt_reg =
436 (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_IQ_INSTR_COUNT64(iq_no);
437 dev_dbg(&oct->pci_dev->dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n",
438 iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
439
440 /* Store the current instruction counter (used in flush_iq
441 * calculation)
442 */
443 pkt_in_done = readq(iq->inst_cnt_reg);
444
445 if (oct->msix_on) {
446 /* Set CINT_ENB to enable IQ interrupt */
447 writeq((pkt_in_done | CN23XX_INTR_CINT_ENB),
448 iq->inst_cnt_reg);
449 } else {
450 /* Clear the count by writing back what we read, but don't
451 * enable interrupts
452 */
453 writeq(pkt_in_done, iq->inst_cnt_reg);
454 }
455
456 iq->reset_instr_cnt = 0;
457 }
458
cn23xx_setup_oq_regs(struct octeon_device * oct,u32 oq_no)459 static void cn23xx_setup_oq_regs(struct octeon_device *oct, u32 oq_no)
460 {
461 u32 reg_val;
462 struct octeon_droq *droq = oct->droq[oq_no];
463 struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
464 u64 time_threshold;
465 u64 cnt_threshold;
466
467 oq_no += oct->sriov_info.pf_srn;
468
469 octeon_write_csr64(oct, CN23XX_SLI_OQ_BASE_ADDR64(oq_no),
470 droq->desc_ring_dma);
471 octeon_write_csr(oct, CN23XX_SLI_OQ_SIZE(oq_no), droq->max_count);
472
473 octeon_write_csr(oct, CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq_no),
474 droq->buffer_size);
475
476 /* Get the mapped address of the pkt_sent and pkts_credit regs */
477 droq->pkts_sent_reg =
478 (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_PKTS_SENT(oq_no);
479 droq->pkts_credit_reg =
480 (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_PKTS_CREDIT(oq_no);
481
482 if (!oct->msix_on) {
483 /* Enable this output queue to generate Packet Timer Interrupt
484 */
485 reg_val =
486 octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no));
487 reg_val |= CN23XX_PKT_OUTPUT_CTL_TENB;
488 octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no),
489 reg_val);
490
491 /* Enable this output queue to generate Packet Count Interrupt
492 */
493 reg_val =
494 octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no));
495 reg_val |= CN23XX_PKT_OUTPUT_CTL_CENB;
496 octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no),
497 reg_val);
498 } else {
499 time_threshold = cn23xx_pf_get_oq_ticks(
500 oct, (u32)CFG_GET_OQ_INTR_TIME(cn23xx->conf));
501 cnt_threshold = (u32)CFG_GET_OQ_INTR_PKT(cn23xx->conf);
502
503 octeon_write_csr64(
504 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(oq_no),
505 ((time_threshold << 32 | cnt_threshold)));
506 }
507 }
508
cn23xx_pf_mbox_thread(struct work_struct * work)509 static void cn23xx_pf_mbox_thread(struct work_struct *work)
510 {
511 struct cavium_wk *wk = (struct cavium_wk *)work;
512 struct octeon_mbox *mbox = (struct octeon_mbox *)wk->ctxptr;
513 struct octeon_device *oct = mbox->oct_dev;
514 u64 mbox_int_val, val64;
515 u32 q_no, i;
516
517 if (oct->rev_id < OCTEON_CN23XX_REV_1_1) {
518 /*read and clear by writing 1*/
519 mbox_int_val = readq(mbox->mbox_int_reg);
520 writeq(mbox_int_val, mbox->mbox_int_reg);
521
522 for (i = 0; i < oct->sriov_info.num_vfs_alloced; i++) {
523 q_no = i * oct->sriov_info.rings_per_vf;
524
525 val64 = readq(oct->mbox[q_no]->mbox_write_reg);
526
527 if (val64 && (val64 != OCTEON_PFVFACK)) {
528 if (octeon_mbox_read(oct->mbox[q_no]))
529 octeon_mbox_process_message(
530 oct->mbox[q_no]);
531 }
532 }
533
534 schedule_delayed_work(&wk->work, msecs_to_jiffies(10));
535 } else {
536 octeon_mbox_process_message(mbox);
537 }
538 }
539
cn23xx_setup_pf_mbox(struct octeon_device * oct)540 static int cn23xx_setup_pf_mbox(struct octeon_device *oct)
541 {
542 struct octeon_mbox *mbox = NULL;
543 u16 mac_no = oct->pcie_port;
544 u16 pf_num = oct->pf_num;
545 u32 q_no, i;
546
547 if (!oct->sriov_info.max_vfs)
548 return 0;
549
550 for (i = 0; i < oct->sriov_info.max_vfs; i++) {
551 q_no = i * oct->sriov_info.rings_per_vf;
552
553 mbox = vzalloc(sizeof(*mbox));
554 if (!mbox)
555 goto free_mbox;
556
557 spin_lock_init(&mbox->lock);
558
559 mbox->oct_dev = oct;
560
561 mbox->q_no = q_no;
562
563 mbox->state = OCTEON_MBOX_STATE_IDLE;
564
565 /* PF mbox interrupt reg */
566 mbox->mbox_int_reg = (u8 *)oct->mmio[0].hw_addr +
567 CN23XX_SLI_MAC_PF_MBOX_INT(mac_no, pf_num);
568
569 /* PF writes into SIG0 reg */
570 mbox->mbox_write_reg = (u8 *)oct->mmio[0].hw_addr +
571 CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q_no, 0);
572
573 /* PF reads from SIG1 reg */
574 mbox->mbox_read_reg = (u8 *)oct->mmio[0].hw_addr +
575 CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q_no, 1);
576
577 /* Mail Box Thread creation */
578 INIT_DELAYED_WORK(&mbox->mbox_poll_wk.work,
579 cn23xx_pf_mbox_thread);
580 mbox->mbox_poll_wk.ctxptr = (void *)mbox;
581
582 oct->mbox[q_no] = mbox;
583
584 writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg);
585 }
586
587 if (oct->rev_id < OCTEON_CN23XX_REV_1_1)
588 schedule_delayed_work(&oct->mbox[0]->mbox_poll_wk.work,
589 msecs_to_jiffies(0));
590
591 return 0;
592
593 free_mbox:
594 while (i) {
595 i--;
596 vfree(oct->mbox[i]);
597 }
598
599 return 1;
600 }
601
cn23xx_free_pf_mbox(struct octeon_device * oct)602 static int cn23xx_free_pf_mbox(struct octeon_device *oct)
603 {
604 u32 q_no, i;
605
606 if (!oct->sriov_info.max_vfs)
607 return 0;
608
609 for (i = 0; i < oct->sriov_info.max_vfs; i++) {
610 q_no = i * oct->sriov_info.rings_per_vf;
611 cancel_delayed_work_sync(
612 &oct->mbox[q_no]->mbox_poll_wk.work);
613 vfree(oct->mbox[q_no]);
614 }
615
616 return 0;
617 }
618
cn23xx_enable_io_queues(struct octeon_device * oct)619 static int cn23xx_enable_io_queues(struct octeon_device *oct)
620 {
621 u64 reg_val;
622 u32 srn, ern, q_no;
623 u32 loop = 1000;
624
625 srn = oct->sriov_info.pf_srn;
626 ern = srn + oct->num_iqs;
627
628 for (q_no = srn; q_no < ern; q_no++) {
629 /* Set the corresponding IQ IS_64B bit */
630 if (oct->io_qmask.iq64B & BIT_ULL(q_no - srn)) {
631 reg_val = octeon_read_csr64(
632 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
633 reg_val = reg_val | CN23XX_PKT_INPUT_CTL_IS_64B;
634 octeon_write_csr64(
635 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
636 }
637
638 /* Set the corresponding IQ ENB bit */
639 if (oct->io_qmask.iq & BIT_ULL(q_no - srn)) {
640 /* IOQs are in reset by default in PEM2 mode,
641 * clearing reset bit
642 */
643 reg_val = octeon_read_csr64(
644 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
645
646 if (reg_val & CN23XX_PKT_INPUT_CTL_RST) {
647 while ((reg_val & CN23XX_PKT_INPUT_CTL_RST) &&
648 !(reg_val &
649 CN23XX_PKT_INPUT_CTL_QUIET) &&
650 --loop) {
651 reg_val = octeon_read_csr64(
652 oct,
653 CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
654 }
655 if (!loop) {
656 dev_err(&oct->pci_dev->dev,
657 "clearing the reset reg failed or setting the quiet reg failed for qno: %u\n",
658 q_no);
659 return -1;
660 }
661 reg_val = reg_val & ~CN23XX_PKT_INPUT_CTL_RST;
662 octeon_write_csr64(
663 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
664 reg_val);
665
666 reg_val = octeon_read_csr64(
667 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
668 if (reg_val & CN23XX_PKT_INPUT_CTL_RST) {
669 dev_err(&oct->pci_dev->dev,
670 "clearing the reset failed for qno: %u\n",
671 q_no);
672 return -1;
673 }
674 }
675 reg_val = octeon_read_csr64(
676 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
677 reg_val = reg_val | CN23XX_PKT_INPUT_CTL_RING_ENB;
678 octeon_write_csr64(
679 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
680 }
681 }
682 for (q_no = srn; q_no < ern; q_no++) {
683 u32 reg_val;
684 /* Set the corresponding OQ ENB bit */
685 if (oct->io_qmask.oq & BIT_ULL(q_no - srn)) {
686 reg_val = octeon_read_csr(
687 oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no));
688 reg_val = reg_val | CN23XX_PKT_OUTPUT_CTL_RING_ENB;
689 octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no),
690 reg_val);
691 }
692 }
693 return 0;
694 }
695
cn23xx_disable_io_queues(struct octeon_device * oct)696 static void cn23xx_disable_io_queues(struct octeon_device *oct)
697 {
698 int q_no, loop;
699 u64 d64;
700 u32 d32;
701 u32 srn, ern;
702
703 srn = oct->sriov_info.pf_srn;
704 ern = srn + oct->num_iqs;
705
706 /*** Disable Input Queues. ***/
707 for (q_no = srn; q_no < ern; q_no++) {
708 loop = HZ;
709
710 /* Start the Reset for a particular ring */
711 WRITE_ONCE(d64, octeon_read_csr64(
712 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)));
713 WRITE_ONCE(d64, READ_ONCE(d64) &
714 (~(CN23XX_PKT_INPUT_CTL_RING_ENB)));
715 WRITE_ONCE(d64, READ_ONCE(d64) | CN23XX_PKT_INPUT_CTL_RST);
716 octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
717 READ_ONCE(d64));
718
719 /* Wait until hardware indicates that the particular IQ
720 * is out of reset.
721 */
722 WRITE_ONCE(d64, octeon_read_csr64(
723 oct, CN23XX_SLI_PKT_IOQ_RING_RST));
724 while (!(READ_ONCE(d64) & BIT_ULL(q_no)) && loop--) {
725 WRITE_ONCE(d64, octeon_read_csr64(
726 oct, CN23XX_SLI_PKT_IOQ_RING_RST));
727 schedule_timeout_uninterruptible(1);
728 }
729
730 /* Reset the doorbell register for this Input Queue. */
731 octeon_write_csr(oct, CN23XX_SLI_IQ_DOORBELL(q_no), 0xFFFFFFFF);
732 while (octeon_read_csr64(oct, CN23XX_SLI_IQ_DOORBELL(q_no)) &&
733 loop--) {
734 schedule_timeout_uninterruptible(1);
735 }
736 }
737
738 /*** Disable Output Queues. ***/
739 for (q_no = srn; q_no < ern; q_no++) {
740 loop = HZ;
741
742 /* Wait until hardware indicates that the particular IQ
743 * is out of reset. Given that SLI_PKT_RING_RST is
744 * common for both IQs and OQs
745 */
746 WRITE_ONCE(d64, octeon_read_csr64(
747 oct, CN23XX_SLI_PKT_IOQ_RING_RST));
748 while (!(READ_ONCE(d64) & BIT_ULL(q_no)) && loop--) {
749 WRITE_ONCE(d64, octeon_read_csr64(
750 oct, CN23XX_SLI_PKT_IOQ_RING_RST));
751 schedule_timeout_uninterruptible(1);
752 }
753
754 /* Reset the doorbell register for this Output Queue. */
755 octeon_write_csr(oct, CN23XX_SLI_OQ_PKTS_CREDIT(q_no),
756 0xFFFFFFFF);
757 while (octeon_read_csr64(oct,
758 CN23XX_SLI_OQ_PKTS_CREDIT(q_no)) &&
759 loop--) {
760 schedule_timeout_uninterruptible(1);
761 }
762
763 /* Clear the SLI_PKT(0..63)_CNTS[CNT] reg value */
764 WRITE_ONCE(d32, octeon_read_csr(
765 oct, CN23XX_SLI_OQ_PKTS_SENT(q_no)));
766 octeon_write_csr(oct, CN23XX_SLI_OQ_PKTS_SENT(q_no),
767 READ_ONCE(d32));
768 }
769 }
770
cn23xx_pf_msix_interrupt_handler(void * dev)771 static u64 cn23xx_pf_msix_interrupt_handler(void *dev)
772 {
773 struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
774 struct octeon_device *oct = ioq_vector->oct_dev;
775 u64 pkts_sent;
776 u64 ret = 0;
777 struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
778
779 dev_dbg(&oct->pci_dev->dev, "In %s octeon_dev @ %p\n", __func__, oct);
780
781 if (!droq) {
782 dev_err(&oct->pci_dev->dev, "23XX bringup FIXME: oct pfnum:%d ioq_vector->ioq_num :%d droq is NULL\n",
783 oct->pf_num, ioq_vector->ioq_num);
784 return 0;
785 }
786
787 pkts_sent = readq(droq->pkts_sent_reg);
788
789 /* If our device has interrupted, then proceed. Also check
790 * for all f's if interrupt was triggered on an error
791 * and the PCI read fails.
792 */
793 if (!pkts_sent || (pkts_sent == 0xFFFFFFFFFFFFFFFFULL))
794 return ret;
795
796 /* Write count reg in sli_pkt_cnts to clear these int. */
797 if ((pkts_sent & CN23XX_INTR_PO_INT) ||
798 (pkts_sent & CN23XX_INTR_PI_INT)) {
799 if (pkts_sent & CN23XX_INTR_PO_INT)
800 ret |= MSIX_PO_INT;
801 }
802
803 if (pkts_sent & CN23XX_INTR_PI_INT)
804 /* We will clear the count when we update the read_index. */
805 ret |= MSIX_PI_INT;
806
807 /* Never need to handle msix mbox intr for pf. They arrive on the last
808 * msix
809 */
810 return ret;
811 }
812
cn23xx_handle_pf_mbox_intr(struct octeon_device * oct)813 static void cn23xx_handle_pf_mbox_intr(struct octeon_device *oct)
814 {
815 struct delayed_work *work;
816 u64 mbox_int_val;
817 u32 i, q_no;
818
819 mbox_int_val = readq(oct->mbox[0]->mbox_int_reg);
820
821 for (i = 0; i < oct->sriov_info.num_vfs_alloced; i++) {
822 q_no = i * oct->sriov_info.rings_per_vf;
823
824 if (mbox_int_val & BIT_ULL(q_no)) {
825 writeq(BIT_ULL(q_no),
826 oct->mbox[0]->mbox_int_reg);
827 if (octeon_mbox_read(oct->mbox[q_no])) {
828 work = &oct->mbox[q_no]->mbox_poll_wk.work;
829 schedule_delayed_work(work,
830 msecs_to_jiffies(0));
831 }
832 }
833 }
834 }
835
cn23xx_interrupt_handler(void * dev)836 static irqreturn_t cn23xx_interrupt_handler(void *dev)
837 {
838 struct octeon_device *oct = (struct octeon_device *)dev;
839 struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
840 u64 intr64;
841
842 dev_dbg(&oct->pci_dev->dev, "In %s octeon_dev @ %p\n", __func__, oct);
843 intr64 = readq(cn23xx->intr_sum_reg64);
844
845 oct->int_status = 0;
846
847 if (intr64 & CN23XX_INTR_ERR)
848 dev_err(&oct->pci_dev->dev, "OCTEON[%d]: Error Intr: 0x%016llx\n",
849 oct->octeon_id, CVM_CAST64(intr64));
850
851 /* When VFs write into MBOX_SIG2 reg,these intr is set in PF */
852 if (intr64 & CN23XX_INTR_VF_MBOX)
853 cn23xx_handle_pf_mbox_intr(oct);
854
855 if (oct->msix_on != LIO_FLAG_MSIX_ENABLED) {
856 if (intr64 & CN23XX_INTR_PKT_DATA)
857 oct->int_status |= OCT_DEV_INTR_PKT_DATA;
858 }
859
860 if (intr64 & (CN23XX_INTR_DMA0_FORCE))
861 oct->int_status |= OCT_DEV_INTR_DMA0_FORCE;
862 if (intr64 & (CN23XX_INTR_DMA1_FORCE))
863 oct->int_status |= OCT_DEV_INTR_DMA1_FORCE;
864
865 /* Clear the current interrupts */
866 writeq(intr64, cn23xx->intr_sum_reg64);
867
868 return IRQ_HANDLED;
869 }
870
cn23xx_bar1_idx_setup(struct octeon_device * oct,u64 core_addr,u32 idx,int valid)871 static void cn23xx_bar1_idx_setup(struct octeon_device *oct, u64 core_addr,
872 u32 idx, int valid)
873 {
874 u64 bar1;
875 u64 reg_adr;
876
877 if (!valid) {
878 reg_adr = lio_pci_readq(
879 oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
880 WRITE_ONCE(bar1, reg_adr);
881 lio_pci_writeq(oct, (READ_ONCE(bar1) & 0xFFFFFFFEULL),
882 CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
883 reg_adr = lio_pci_readq(
884 oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
885 WRITE_ONCE(bar1, reg_adr);
886 return;
887 }
888
889 /* The PEM(0..3)_BAR1_INDEX(0..15)[ADDR_IDX]<23:4> stores
890 * bits <41:22> of the Core Addr
891 */
892 lio_pci_writeq(oct, (((core_addr >> 22) << 4) | PCI_BAR1_MASK),
893 CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
894
895 WRITE_ONCE(bar1, lio_pci_readq(
896 oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx)));
897 }
898
cn23xx_bar1_idx_write(struct octeon_device * oct,u32 idx,u32 mask)899 static void cn23xx_bar1_idx_write(struct octeon_device *oct, u32 idx, u32 mask)
900 {
901 lio_pci_writeq(oct, mask,
902 CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
903 }
904
cn23xx_bar1_idx_read(struct octeon_device * oct,u32 idx)905 static u32 cn23xx_bar1_idx_read(struct octeon_device *oct, u32 idx)
906 {
907 return (u32)lio_pci_readq(
908 oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
909 }
910
911 /* Always call with lock held */
cn23xx_update_read_index(struct octeon_instr_queue * iq)912 static u32 cn23xx_update_read_index(struct octeon_instr_queue *iq)
913 {
914 u32 new_idx;
915 u32 last_done;
916 u32 pkt_in_done = readl(iq->inst_cnt_reg);
917
918 last_done = pkt_in_done - iq->pkt_in_done;
919 iq->pkt_in_done = pkt_in_done;
920
921 /* Modulo of the new index with the IQ size will give us
922 * the new index. The iq->reset_instr_cnt is always zero for
923 * cn23xx, so no extra adjustments are needed.
924 */
925 new_idx = (iq->octeon_read_index +
926 (u32)(last_done & CN23XX_PKT_IN_DONE_CNT_MASK)) %
927 iq->max_count;
928
929 return new_idx;
930 }
931
cn23xx_enable_pf_interrupt(struct octeon_device * oct,u8 intr_flag)932 static void cn23xx_enable_pf_interrupt(struct octeon_device *oct, u8 intr_flag)
933 {
934 struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
935 u64 intr_val = 0;
936
937 /* Divide the single write to multiple writes based on the flag. */
938 /* Enable Interrupts */
939 if (intr_flag == OCTEON_ALL_INTR) {
940 writeq(cn23xx->intr_mask64, cn23xx->intr_enb_reg64);
941 } else if (intr_flag & OCTEON_OUTPUT_INTR) {
942 intr_val = readq(cn23xx->intr_enb_reg64);
943 intr_val |= CN23XX_INTR_PKT_DATA;
944 writeq(intr_val, cn23xx->intr_enb_reg64);
945 } else if ((intr_flag & OCTEON_MBOX_INTR) &&
946 (oct->sriov_info.max_vfs > 0)) {
947 if (oct->rev_id >= OCTEON_CN23XX_REV_1_1) {
948 intr_val = readq(cn23xx->intr_enb_reg64);
949 intr_val |= CN23XX_INTR_VF_MBOX;
950 writeq(intr_val, cn23xx->intr_enb_reg64);
951 }
952 }
953 }
954
cn23xx_disable_pf_interrupt(struct octeon_device * oct,u8 intr_flag)955 static void cn23xx_disable_pf_interrupt(struct octeon_device *oct, u8 intr_flag)
956 {
957 struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
958 u64 intr_val = 0;
959
960 /* Disable Interrupts */
961 if (intr_flag == OCTEON_ALL_INTR) {
962 writeq(0, cn23xx->intr_enb_reg64);
963 } else if (intr_flag & OCTEON_OUTPUT_INTR) {
964 intr_val = readq(cn23xx->intr_enb_reg64);
965 intr_val &= ~CN23XX_INTR_PKT_DATA;
966 writeq(intr_val, cn23xx->intr_enb_reg64);
967 } else if ((intr_flag & OCTEON_MBOX_INTR) &&
968 (oct->sriov_info.max_vfs > 0)) {
969 if (oct->rev_id >= OCTEON_CN23XX_REV_1_1) {
970 intr_val = readq(cn23xx->intr_enb_reg64);
971 intr_val &= ~CN23XX_INTR_VF_MBOX;
972 writeq(intr_val, cn23xx->intr_enb_reg64);
973 }
974 }
975 }
976
cn23xx_get_pcie_qlmport(struct octeon_device * oct)977 static void cn23xx_get_pcie_qlmport(struct octeon_device *oct)
978 {
979 oct->pcie_port = (octeon_read_csr(oct, CN23XX_SLI_MAC_NUMBER)) & 0xff;
980
981 dev_dbg(&oct->pci_dev->dev, "OCTEON: CN23xx uses PCIE Port %d\n",
982 oct->pcie_port);
983 }
984
cn23xx_get_pf_num(struct octeon_device * oct)985 static int cn23xx_get_pf_num(struct octeon_device *oct)
986 {
987 u32 fdl_bit = 0;
988 u64 pkt0_in_ctl, d64;
989 int pfnum, mac, trs, ret;
990
991 ret = 0;
992
993 /* Read Function Dependency Link reg to get the function number */
994 if (pci_read_config_dword(oct->pci_dev, CN23XX_PCIE_SRIOV_FDL,
995 &fdl_bit) == 0) {
996 oct->pf_num = ((fdl_bit >> CN23XX_PCIE_SRIOV_FDL_BIT_POS) &
997 CN23XX_PCIE_SRIOV_FDL_MASK);
998 } else {
999 ret = -EINVAL;
1000
1001 /* Under some virtual environments, extended PCI regs are
1002 * inaccessible, in which case the above read will have failed.
1003 * In this case, read the PF number from the
1004 * SLI_PKT0_INPUT_CONTROL reg (written by f/w)
1005 */
1006 pkt0_in_ctl =
1007 octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(0));
1008 pfnum = (pkt0_in_ctl >> CN23XX_PKT_INPUT_CTL_PF_NUM_POS) &
1009 CN23XX_PKT_INPUT_CTL_PF_NUM_MASK;
1010 mac = (octeon_read_csr(oct, CN23XX_SLI_MAC_NUMBER)) & 0xff;
1011
1012 /* Validate PF num by reading RINFO; f/w writes RINFO.trs == 1 */
1013 d64 = octeon_read_csr64(oct,
1014 CN23XX_SLI_PKT_MAC_RINFO64(mac, pfnum));
1015 trs = (int)(d64 >> CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS) & 0xff;
1016 if (trs == 1) {
1017 dev_err(&oct->pci_dev->dev,
1018 "OCTEON: error reading PCI cfg space pfnum, re-read %u\n",
1019 pfnum);
1020 oct->pf_num = pfnum;
1021 ret = 0;
1022 } else {
1023 dev_err(&oct->pci_dev->dev,
1024 "OCTEON: error reading PCI cfg space pfnum; could not ascertain PF number\n");
1025 }
1026 }
1027
1028 return ret;
1029 }
1030
cn23xx_setup_reg_address(struct octeon_device * oct)1031 static void cn23xx_setup_reg_address(struct octeon_device *oct)
1032 {
1033 u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr;
1034 struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
1035
1036 oct->reg_list.pci_win_wr_addr_hi =
1037 (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_ADDR_HI);
1038 oct->reg_list.pci_win_wr_addr_lo =
1039 (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_ADDR_LO);
1040 oct->reg_list.pci_win_wr_addr =
1041 (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_ADDR64);
1042
1043 oct->reg_list.pci_win_rd_addr_hi =
1044 (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_ADDR_HI);
1045 oct->reg_list.pci_win_rd_addr_lo =
1046 (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_ADDR_LO);
1047 oct->reg_list.pci_win_rd_addr =
1048 (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_ADDR64);
1049
1050 oct->reg_list.pci_win_wr_data_hi =
1051 (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_DATA_HI);
1052 oct->reg_list.pci_win_wr_data_lo =
1053 (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_DATA_LO);
1054 oct->reg_list.pci_win_wr_data =
1055 (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_DATA64);
1056
1057 oct->reg_list.pci_win_rd_data_hi =
1058 (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_DATA_HI);
1059 oct->reg_list.pci_win_rd_data_lo =
1060 (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_DATA_LO);
1061 oct->reg_list.pci_win_rd_data =
1062 (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_DATA64);
1063
1064 cn23xx_get_pcie_qlmport(oct);
1065
1066 cn23xx->intr_mask64 = CN23XX_INTR_MASK;
1067 if (!oct->msix_on)
1068 cn23xx->intr_mask64 |= CN23XX_INTR_PKT_TIME;
1069 if (oct->rev_id >= OCTEON_CN23XX_REV_1_1)
1070 cn23xx->intr_mask64 |= CN23XX_INTR_VF_MBOX;
1071
1072 cn23xx->intr_sum_reg64 =
1073 bar0_pciaddr +
1074 CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num);
1075 cn23xx->intr_enb_reg64 =
1076 bar0_pciaddr +
1077 CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num);
1078 }
1079
cn23xx_sriov_config(struct octeon_device * oct)1080 int cn23xx_sriov_config(struct octeon_device *oct)
1081 {
1082 struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
1083 u32 max_rings, total_rings, max_vfs, rings_per_vf;
1084 u32 pf_srn, num_pf_rings;
1085 u32 max_possible_vfs;
1086
1087 cn23xx->conf =
1088 (struct octeon_config *)oct_get_config_info(oct, LIO_23XX);
1089 switch (oct->rev_id) {
1090 case OCTEON_CN23XX_REV_1_0:
1091 max_rings = CN23XX_MAX_RINGS_PER_PF_PASS_1_0;
1092 max_possible_vfs = CN23XX_MAX_VFS_PER_PF_PASS_1_0;
1093 break;
1094 case OCTEON_CN23XX_REV_1_1:
1095 max_rings = CN23XX_MAX_RINGS_PER_PF_PASS_1_1;
1096 max_possible_vfs = CN23XX_MAX_VFS_PER_PF_PASS_1_1;
1097 break;
1098 default:
1099 max_rings = CN23XX_MAX_RINGS_PER_PF;
1100 max_possible_vfs = CN23XX_MAX_VFS_PER_PF;
1101 break;
1102 }
1103
1104 if (oct->sriov_info.num_pf_rings)
1105 num_pf_rings = oct->sriov_info.num_pf_rings;
1106 else
1107 num_pf_rings = num_present_cpus();
1108
1109 #ifdef CONFIG_PCI_IOV
1110 max_vfs = min_t(u32,
1111 (max_rings - num_pf_rings), max_possible_vfs);
1112 rings_per_vf = 1;
1113 #else
1114 max_vfs = 0;
1115 rings_per_vf = 0;
1116 #endif
1117
1118 total_rings = num_pf_rings + max_vfs;
1119
1120 /* the first ring of the pf */
1121 pf_srn = total_rings - num_pf_rings;
1122
1123 oct->sriov_info.trs = total_rings;
1124 oct->sriov_info.max_vfs = max_vfs;
1125 oct->sriov_info.rings_per_vf = rings_per_vf;
1126 oct->sriov_info.pf_srn = pf_srn;
1127 oct->sriov_info.num_pf_rings = num_pf_rings;
1128 dev_notice(&oct->pci_dev->dev, "trs:%d max_vfs:%d rings_per_vf:%d pf_srn:%d num_pf_rings:%d\n",
1129 oct->sriov_info.trs, oct->sriov_info.max_vfs,
1130 oct->sriov_info.rings_per_vf, oct->sriov_info.pf_srn,
1131 oct->sriov_info.num_pf_rings);
1132
1133 oct->sriov_info.sriov_enabled = 0;
1134
1135 return 0;
1136 }
1137
setup_cn23xx_octeon_pf_device(struct octeon_device * oct)1138 int setup_cn23xx_octeon_pf_device(struct octeon_device *oct)
1139 {
1140 u32 data32;
1141 u64 BAR0, BAR1;
1142
1143 pci_read_config_dword(oct->pci_dev, PCI_BASE_ADDRESS_0, &data32);
1144 BAR0 = (u64)(data32 & ~0xf);
1145 pci_read_config_dword(oct->pci_dev, PCI_BASE_ADDRESS_1, &data32);
1146 BAR0 |= ((u64)data32 << 32);
1147 pci_read_config_dword(oct->pci_dev, PCI_BASE_ADDRESS_2, &data32);
1148 BAR1 = (u64)(data32 & ~0xf);
1149 pci_read_config_dword(oct->pci_dev, PCI_BASE_ADDRESS_3, &data32);
1150 BAR1 |= ((u64)data32 << 32);
1151
1152 if (!BAR0 || !BAR1) {
1153 if (!BAR0)
1154 dev_err(&oct->pci_dev->dev, "device BAR0 unassigned\n");
1155 if (!BAR1)
1156 dev_err(&oct->pci_dev->dev, "device BAR1 unassigned\n");
1157 return 1;
1158 }
1159
1160 if (octeon_map_pci_barx(oct, 0, 0))
1161 return 1;
1162
1163 if (octeon_map_pci_barx(oct, 1, MAX_BAR1_IOREMAP_SIZE)) {
1164 dev_err(&oct->pci_dev->dev, "%s CN23XX BAR1 map failed\n",
1165 __func__);
1166 octeon_unmap_pci_barx(oct, 0);
1167 return 1;
1168 }
1169
1170 if (cn23xx_get_pf_num(oct) != 0)
1171 return 1;
1172
1173 if (cn23xx_sriov_config(oct)) {
1174 octeon_unmap_pci_barx(oct, 0);
1175 octeon_unmap_pci_barx(oct, 1);
1176 return 1;
1177 }
1178
1179 octeon_write_csr64(oct, CN23XX_SLI_MAC_CREDIT_CNT, 0x3F802080802080ULL);
1180
1181 oct->fn_list.setup_iq_regs = cn23xx_setup_iq_regs;
1182 oct->fn_list.setup_oq_regs = cn23xx_setup_oq_regs;
1183 oct->fn_list.setup_mbox = cn23xx_setup_pf_mbox;
1184 oct->fn_list.free_mbox = cn23xx_free_pf_mbox;
1185
1186 oct->fn_list.process_interrupt_regs = cn23xx_interrupt_handler;
1187 oct->fn_list.msix_interrupt_handler = cn23xx_pf_msix_interrupt_handler;
1188
1189 oct->fn_list.soft_reset = cn23xx_pf_soft_reset;
1190 oct->fn_list.setup_device_regs = cn23xx_setup_pf_device_regs;
1191 oct->fn_list.update_iq_read_idx = cn23xx_update_read_index;
1192
1193 oct->fn_list.bar1_idx_setup = cn23xx_bar1_idx_setup;
1194 oct->fn_list.bar1_idx_write = cn23xx_bar1_idx_write;
1195 oct->fn_list.bar1_idx_read = cn23xx_bar1_idx_read;
1196
1197 oct->fn_list.enable_interrupt = cn23xx_enable_pf_interrupt;
1198 oct->fn_list.disable_interrupt = cn23xx_disable_pf_interrupt;
1199
1200 oct->fn_list.enable_io_queues = cn23xx_enable_io_queues;
1201 oct->fn_list.disable_io_queues = cn23xx_disable_io_queues;
1202
1203 cn23xx_setup_reg_address(oct);
1204
1205 oct->coproc_clock_rate = 1000000ULL * cn23xx_coprocessor_clock(oct);
1206
1207 return 0;
1208 }
1209 EXPORT_SYMBOL_GPL(setup_cn23xx_octeon_pf_device);
1210
validate_cn23xx_pf_config_info(struct octeon_device * oct,struct octeon_config * conf23xx)1211 int validate_cn23xx_pf_config_info(struct octeon_device *oct,
1212 struct octeon_config *conf23xx)
1213 {
1214 if (CFG_GET_IQ_MAX_Q(conf23xx) > CN23XX_MAX_INPUT_QUEUES) {
1215 dev_err(&oct->pci_dev->dev, "%s: Num IQ (%d) exceeds Max (%d)\n",
1216 __func__, CFG_GET_IQ_MAX_Q(conf23xx),
1217 CN23XX_MAX_INPUT_QUEUES);
1218 return 1;
1219 }
1220
1221 if (CFG_GET_OQ_MAX_Q(conf23xx) > CN23XX_MAX_OUTPUT_QUEUES) {
1222 dev_err(&oct->pci_dev->dev, "%s: Num OQ (%d) exceeds Max (%d)\n",
1223 __func__, CFG_GET_OQ_MAX_Q(conf23xx),
1224 CN23XX_MAX_OUTPUT_QUEUES);
1225 return 1;
1226 }
1227
1228 if (CFG_GET_IQ_INSTR_TYPE(conf23xx) != OCTEON_32BYTE_INSTR &&
1229 CFG_GET_IQ_INSTR_TYPE(conf23xx) != OCTEON_64BYTE_INSTR) {
1230 dev_err(&oct->pci_dev->dev, "%s: Invalid instr type for IQ\n",
1231 __func__);
1232 return 1;
1233 }
1234
1235 if (!CFG_GET_OQ_REFILL_THRESHOLD(conf23xx)) {
1236 dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
1237 __func__);
1238 return 1;
1239 }
1240
1241 if (!(CFG_GET_OQ_INTR_TIME(conf23xx))) {
1242 dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
1243 __func__);
1244 return 1;
1245 }
1246
1247 return 0;
1248 }
1249
cn23xx_fw_loaded(struct octeon_device * oct)1250 int cn23xx_fw_loaded(struct octeon_device *oct)
1251 {
1252 u64 val;
1253
1254 /* If there's more than one active PF on this NIC, then that
1255 * implies that the NIC firmware is loaded and running. This check
1256 * prevents a rare false negative that might occur if we only relied
1257 * on checking the SCR2_BIT_FW_LOADED flag. The false negative would
1258 * happen if the PF driver sees SCR2_BIT_FW_LOADED as cleared even
1259 * though the firmware was already loaded but still booting and has yet
1260 * to set SCR2_BIT_FW_LOADED.
1261 */
1262 if (atomic_read(oct->adapter_refcount) > 1)
1263 return 1;
1264
1265 val = octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
1266 return (val >> SCR2_BIT_FW_LOADED) & 1ULL;
1267 }
1268 EXPORT_SYMBOL_GPL(cn23xx_fw_loaded);
1269
cn23xx_tell_vf_its_macaddr_changed(struct octeon_device * oct,int vfidx,u8 * mac)1270 void cn23xx_tell_vf_its_macaddr_changed(struct octeon_device *oct, int vfidx,
1271 u8 *mac)
1272 {
1273 if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vfidx)) {
1274 struct octeon_mbox_cmd mbox_cmd;
1275
1276 mbox_cmd.msg.u64 = 0;
1277 mbox_cmd.msg.s.type = OCTEON_MBOX_REQUEST;
1278 mbox_cmd.msg.s.resp_needed = 0;
1279 mbox_cmd.msg.s.cmd = OCTEON_PF_CHANGED_VF_MACADDR;
1280 mbox_cmd.msg.s.len = 1;
1281 mbox_cmd.recv_len = 0;
1282 mbox_cmd.recv_status = 0;
1283 mbox_cmd.fn = NULL;
1284 mbox_cmd.fn_arg = NULL;
1285 ether_addr_copy(mbox_cmd.msg.s.params, mac);
1286 mbox_cmd.q_no = vfidx * oct->sriov_info.rings_per_vf;
1287 octeon_mbox_write(oct, &mbox_cmd);
1288 }
1289 }
1290 EXPORT_SYMBOL_GPL(cn23xx_tell_vf_its_macaddr_changed);
1291
1292 static void
cn23xx_get_vf_stats_callback(struct octeon_device * oct,struct octeon_mbox_cmd * cmd,void * arg)1293 cn23xx_get_vf_stats_callback(struct octeon_device *oct,
1294 struct octeon_mbox_cmd *cmd, void *arg)
1295 {
1296 struct oct_vf_stats_ctx *ctx = arg;
1297
1298 memcpy(ctx->stats, cmd->data, sizeof(struct oct_vf_stats));
1299 atomic_set(&ctx->status, 1);
1300 }
1301
cn23xx_get_vf_stats(struct octeon_device * oct,int vfidx,struct oct_vf_stats * stats)1302 int cn23xx_get_vf_stats(struct octeon_device *oct, int vfidx,
1303 struct oct_vf_stats *stats)
1304 {
1305 u32 timeout = HZ; // 1sec
1306 struct octeon_mbox_cmd mbox_cmd;
1307 struct oct_vf_stats_ctx ctx;
1308 u32 count = 0, ret;
1309
1310 if (!(oct->sriov_info.vf_drv_loaded_mask & (1ULL << vfidx)))
1311 return -1;
1312
1313 if (sizeof(struct oct_vf_stats) > sizeof(mbox_cmd.data))
1314 return -1;
1315
1316 mbox_cmd.msg.u64 = 0;
1317 mbox_cmd.msg.s.type = OCTEON_MBOX_REQUEST;
1318 mbox_cmd.msg.s.resp_needed = 1;
1319 mbox_cmd.msg.s.cmd = OCTEON_GET_VF_STATS;
1320 mbox_cmd.msg.s.len = 1;
1321 mbox_cmd.q_no = vfidx * oct->sriov_info.rings_per_vf;
1322 mbox_cmd.recv_len = 0;
1323 mbox_cmd.recv_status = 0;
1324 mbox_cmd.fn = cn23xx_get_vf_stats_callback;
1325 ctx.stats = stats;
1326 atomic_set(&ctx.status, 0);
1327 mbox_cmd.fn_arg = (void *)&ctx;
1328 memset(mbox_cmd.data, 0, sizeof(mbox_cmd.data));
1329 octeon_mbox_write(oct, &mbox_cmd);
1330
1331 do {
1332 schedule_timeout_uninterruptible(1);
1333 } while ((atomic_read(&ctx.status) == 0) && (count++ < timeout));
1334
1335 ret = atomic_read(&ctx.status);
1336 if (ret == 0) {
1337 octeon_mbox_cancel(oct, 0);
1338 dev_err(&oct->pci_dev->dev, "Unable to get stats from VF-%d, timedout\n",
1339 vfidx);
1340 return -1;
1341 }
1342
1343 return 0;
1344 }
1345 EXPORT_SYMBOL_GPL(cn23xx_get_vf_stats);
1346