xref: /linux/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c (revision 42422993cf28d456778ee9168d73758ec037cd51)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell Octeon EP (EndPoint) Ethernet Driver
3  *
4  * Copyright (C) 2020 Marvell.
5  *
6  */
7 
8 #include <linux/pci.h>
9 #include <linux/netdevice.h>
10 #include <linux/etherdevice.h>
11 
12 #include "octep_config.h"
13 #include "octep_main.h"
14 #include "octep_regs_cn9k_pf.h"
15 
16 #define CTRL_MBOX_MAX_PF	128
17 #define CTRL_MBOX_SZ		((size_t)(0x400000 / CTRL_MBOX_MAX_PF))
18 
19 /* Names of Hardware non-queue generic interrupts */
20 static char *cn93_non_ioq_msix_names[] = {
21 	"epf_ire_rint",
22 	"epf_ore_rint",
23 	"epf_vfire_rint0",
24 	"epf_vfire_rint1",
25 	"epf_vfore_rint0",
26 	"epf_vfore_rint1",
27 	"epf_mbox_rint0",
28 	"epf_mbox_rint1",
29 	"epf_oei_rint",
30 	"epf_dma_rint",
31 	"epf_dma_vf_rint0",
32 	"epf_dma_vf_rint1",
33 	"epf_pp_vf_rint0",
34 	"epf_pp_vf_rint1",
35 	"epf_misc_rint",
36 	"epf_rsvd",
37 };
38 
39 /* Dump useful hardware CSRs for debug purpose */
40 static void cn93_dump_regs(struct octep_device *oct, int qno)
41 {
42 	struct device *dev = &oct->pdev->dev;
43 
44 	dev_info(dev, "IQ-%d register dump\n", qno);
45 	dev_info(dev, "R[%d]_IN_INSTR_DBELL[0x%llx]: 0x%016llx\n",
46 		 qno, CN93_SDP_R_IN_INSTR_DBELL(qno),
47 		 octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(qno)));
48 	dev_info(dev, "R[%d]_IN_CONTROL[0x%llx]: 0x%016llx\n",
49 		 qno, CN93_SDP_R_IN_CONTROL(qno),
50 		 octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(qno)));
51 	dev_info(dev, "R[%d]_IN_ENABLE[0x%llx]: 0x%016llx\n",
52 		 qno, CN93_SDP_R_IN_ENABLE(qno),
53 		 octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(qno)));
54 	dev_info(dev, "R[%d]_IN_INSTR_BADDR[0x%llx]: 0x%016llx\n",
55 		 qno, CN93_SDP_R_IN_INSTR_BADDR(qno),
56 		 octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(qno)));
57 	dev_info(dev, "R[%d]_IN_INSTR_RSIZE[0x%llx]: 0x%016llx\n",
58 		 qno, CN93_SDP_R_IN_INSTR_RSIZE(qno),
59 		 octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(qno)));
60 	dev_info(dev, "R[%d]_IN_CNTS[0x%llx]: 0x%016llx\n",
61 		 qno, CN93_SDP_R_IN_CNTS(qno),
62 		 octep_read_csr64(oct, CN93_SDP_R_IN_CNTS(qno)));
63 	dev_info(dev, "R[%d]_IN_INT_LEVELS[0x%llx]: 0x%016llx\n",
64 		 qno, CN93_SDP_R_IN_INT_LEVELS(qno),
65 		 octep_read_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(qno)));
66 	dev_info(dev, "R[%d]_IN_PKT_CNT[0x%llx]: 0x%016llx\n",
67 		 qno, CN93_SDP_R_IN_PKT_CNT(qno),
68 		 octep_read_csr64(oct, CN93_SDP_R_IN_PKT_CNT(qno)));
69 	dev_info(dev, "R[%d]_IN_BYTE_CNT[0x%llx]: 0x%016llx\n",
70 		 qno, CN93_SDP_R_IN_BYTE_CNT(qno),
71 		 octep_read_csr64(oct, CN93_SDP_R_IN_BYTE_CNT(qno)));
72 
73 	dev_info(dev, "OQ-%d register dump\n", qno);
74 	dev_info(dev, "R[%d]_OUT_SLIST_DBELL[0x%llx]: 0x%016llx\n",
75 		 qno, CN93_SDP_R_OUT_SLIST_DBELL(qno),
76 		 octep_read_csr64(oct, CN93_SDP_R_OUT_SLIST_DBELL(qno)));
77 	dev_info(dev, "R[%d]_OUT_CONTROL[0x%llx]: 0x%016llx\n",
78 		 qno, CN93_SDP_R_OUT_CONTROL(qno),
79 		 octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(qno)));
80 	dev_info(dev, "R[%d]_OUT_ENABLE[0x%llx]: 0x%016llx\n",
81 		 qno, CN93_SDP_R_OUT_ENABLE(qno),
82 		 octep_read_csr64(oct, CN93_SDP_R_OUT_ENABLE(qno)));
83 	dev_info(dev, "R[%d]_OUT_SLIST_BADDR[0x%llx]: 0x%016llx\n",
84 		 qno, CN93_SDP_R_OUT_SLIST_BADDR(qno),
85 		 octep_read_csr64(oct, CN93_SDP_R_OUT_SLIST_BADDR(qno)));
86 	dev_info(dev, "R[%d]_OUT_SLIST_RSIZE[0x%llx]: 0x%016llx\n",
87 		 qno, CN93_SDP_R_OUT_SLIST_RSIZE(qno),
88 		 octep_read_csr64(oct, CN93_SDP_R_OUT_SLIST_RSIZE(qno)));
89 	dev_info(dev, "R[%d]_OUT_CNTS[0x%llx]: 0x%016llx\n",
90 		 qno, CN93_SDP_R_OUT_CNTS(qno),
91 		 octep_read_csr64(oct, CN93_SDP_R_OUT_CNTS(qno)));
92 	dev_info(dev, "R[%d]_OUT_INT_LEVELS[0x%llx]: 0x%016llx\n",
93 		 qno, CN93_SDP_R_OUT_INT_LEVELS(qno),
94 		 octep_read_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(qno)));
95 	dev_info(dev, "R[%d]_OUT_PKT_CNT[0x%llx]: 0x%016llx\n",
96 		 qno, CN93_SDP_R_OUT_PKT_CNT(qno),
97 		 octep_read_csr64(oct, CN93_SDP_R_OUT_PKT_CNT(qno)));
98 	dev_info(dev, "R[%d]_OUT_BYTE_CNT[0x%llx]: 0x%016llx\n",
99 		 qno, CN93_SDP_R_OUT_BYTE_CNT(qno),
100 		 octep_read_csr64(oct, CN93_SDP_R_OUT_BYTE_CNT(qno)));
101 	dev_info(dev, "R[%d]_ERR_TYPE[0x%llx]: 0x%016llx\n",
102 		 qno, CN93_SDP_R_ERR_TYPE(qno),
103 		 octep_read_csr64(oct, CN93_SDP_R_ERR_TYPE(qno)));
104 }
105 
106 /* Reset Hardware Tx queue */
107 static int cn93_reset_iq(struct octep_device *oct, int q_no)
108 {
109 	struct octep_config *conf = oct->conf;
110 	u64 val = 0ULL;
111 
112 	dev_dbg(&oct->pdev->dev, "Reset PF IQ-%d\n", q_no);
113 
114 	/* Get absolute queue number */
115 	q_no += conf->pf_ring_cfg.srn;
116 
117 	/* Disable the Tx/Instruction Ring */
118 	octep_write_csr64(oct, CN93_SDP_R_IN_ENABLE(q_no), val);
119 
120 	/* clear the Instruction Ring packet/byte counts and doorbell CSRs */
121 	octep_write_csr64(oct, CN93_SDP_R_IN_CNTS(q_no), val);
122 	octep_write_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(q_no), val);
123 	octep_write_csr64(oct, CN93_SDP_R_IN_PKT_CNT(q_no), val);
124 	octep_write_csr64(oct, CN93_SDP_R_IN_BYTE_CNT(q_no), val);
125 	octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(q_no), val);
126 	octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(q_no), val);
127 
128 	val = 0xFFFFFFFF;
129 	octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(q_no), val);
130 
131 	return 0;
132 }
133 
134 /* Reset Hardware Rx queue */
135 static void cn93_reset_oq(struct octep_device *oct, int q_no)
136 {
137 	u64 val = 0ULL;
138 
139 	q_no += CFG_GET_PORTS_PF_SRN(oct->conf);
140 
141 	/* Disable Output (Rx) Ring */
142 	octep_write_csr64(oct, CN93_SDP_R_OUT_ENABLE(q_no), val);
143 
144 	/* Clear count CSRs */
145 	val = octep_read_csr(oct, CN93_SDP_R_OUT_CNTS(q_no));
146 	octep_write_csr(oct, CN93_SDP_R_OUT_CNTS(q_no), val);
147 
148 	octep_write_csr64(oct, CN93_SDP_R_OUT_PKT_CNT(q_no), 0xFFFFFFFFFULL);
149 	octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_DBELL(q_no), 0xFFFFFFFF);
150 }
151 
152 /* Reset all hardware Tx/Rx queues */
153 static void octep_reset_io_queues_cn93_pf(struct octep_device *oct)
154 {
155 	struct pci_dev *pdev = oct->pdev;
156 	int q;
157 
158 	dev_dbg(&pdev->dev, "Reset OCTEP_CN93 PF IO Queues\n");
159 
160 	for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
161 		cn93_reset_iq(oct, q);
162 		cn93_reset_oq(oct, q);
163 	}
164 }
165 
166 /* Initialize windowed addresses to access some hardware registers */
167 static void octep_setup_pci_window_regs_cn93_pf(struct octep_device *oct)
168 {
169 	u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr;
170 
171 	oct->pci_win_regs.pci_win_wr_addr = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_WR_ADDR64);
172 	oct->pci_win_regs.pci_win_rd_addr = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_RD_ADDR64);
173 	oct->pci_win_regs.pci_win_wr_data = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_WR_DATA64);
174 	oct->pci_win_regs.pci_win_rd_data = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_RD_DATA64);
175 }
176 
177 /* Configure Hardware mapping: inform hardware which rings belong to PF. */
178 static void octep_configure_ring_mapping_cn93_pf(struct octep_device *oct)
179 {
180 	struct octep_config *conf = oct->conf;
181 	struct pci_dev *pdev = oct->pdev;
182 	u64 pf_srn = CFG_GET_PORTS_PF_SRN(oct->conf);
183 	int q;
184 
185 	for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(conf); q++) {
186 		u64 regval = 0;
187 
188 		if (oct->pcie_port)
189 			regval = 8 << CN93_SDP_FUNC_SEL_EPF_BIT_POS;
190 
191 		octep_write_csr64(oct, CN93_SDP_EPVF_RING(pf_srn + q), regval);
192 
193 		regval = octep_read_csr64(oct, CN93_SDP_EPVF_RING(pf_srn + q));
194 		dev_dbg(&pdev->dev, "Write SDP_EPVF_RING[0x%llx] = 0x%llx\n",
195 			CN93_SDP_EPVF_RING(pf_srn + q), regval);
196 	}
197 }
198 
199 /* Initialize configuration limits and initial active config 93xx PF. */
200 static void octep_init_config_cn93_pf(struct octep_device *oct)
201 {
202 	struct octep_config *conf = oct->conf;
203 	struct pci_dev *pdev = oct->pdev;
204 	u8 link = 0;
205 	u64 val;
206 	int pos;
207 
208 	/* Read ring configuration:
209 	 * PF ring count, number of VFs and rings per VF supported
210 	 */
211 	val = octep_read_csr64(oct, CN93_SDP_EPF_RINFO);
212 	conf->sriov_cfg.max_rings_per_vf = CN93_SDP_EPF_RINFO_RPVF(val);
213 	conf->sriov_cfg.active_rings_per_vf = conf->sriov_cfg.max_rings_per_vf;
214 	conf->sriov_cfg.max_vfs = CN93_SDP_EPF_RINFO_NVFS(val);
215 	conf->sriov_cfg.active_vfs = conf->sriov_cfg.max_vfs;
216 	conf->sriov_cfg.vf_srn = CN93_SDP_EPF_RINFO_SRN(val);
217 
218 	val = octep_read_csr64(oct, CN93_SDP_MAC_PF_RING_CTL(oct->pcie_port));
219 	conf->pf_ring_cfg.srn =  CN93_SDP_MAC_PF_RING_CTL_SRN(val);
220 	conf->pf_ring_cfg.max_io_rings = CN93_SDP_MAC_PF_RING_CTL_RPPF(val);
221 	conf->pf_ring_cfg.active_io_rings = conf->pf_ring_cfg.max_io_rings;
222 	dev_info(&pdev->dev, "pf_srn=%u rpvf=%u nvfs=%u rppf=%u\n",
223 		 conf->pf_ring_cfg.srn, conf->sriov_cfg.active_rings_per_vf,
224 		 conf->sriov_cfg.active_vfs, conf->pf_ring_cfg.active_io_rings);
225 
226 	conf->iq.num_descs = OCTEP_IQ_MAX_DESCRIPTORS;
227 	conf->iq.instr_type = OCTEP_64BYTE_INSTR;
228 	conf->iq.pkind = 0;
229 	conf->iq.db_min = OCTEP_DB_MIN;
230 	conf->iq.intr_threshold = OCTEP_IQ_INTR_THRESHOLD;
231 
232 	conf->oq.num_descs = OCTEP_OQ_MAX_DESCRIPTORS;
233 	conf->oq.buf_size = OCTEP_OQ_BUF_SIZE;
234 	conf->oq.refill_threshold = OCTEP_OQ_REFILL_THRESHOLD;
235 	conf->oq.oq_intr_pkt = OCTEP_OQ_INTR_PKT_THRESHOLD;
236 	conf->oq.oq_intr_time = OCTEP_OQ_INTR_TIME_THRESHOLD;
237 
238 	conf->msix_cfg.non_ioq_msix = CN93_NUM_NON_IOQ_INTR;
239 	conf->msix_cfg.ioq_msix = conf->pf_ring_cfg.active_io_rings;
240 	conf->msix_cfg.non_ioq_msix_names = cn93_non_ioq_msix_names;
241 
242 	pos = pci_find_ext_capability(oct->pdev, PCI_EXT_CAP_ID_SRIOV);
243 	if (pos) {
244 		pci_read_config_byte(oct->pdev,
245 				     pos + PCI_SRIOV_FUNC_LINK,
246 				     &link);
247 		link = PCI_DEVFN(PCI_SLOT(oct->pdev->devfn), link);
248 	}
249 	conf->ctrl_mbox_cfg.barmem_addr = (void __iomem *)oct->mmio[2].hw_addr +
250 					   CN93_PEM_BAR4_INDEX_OFFSET +
251 					   (link * CTRL_MBOX_SZ);
252 
253 	conf->fw_info.hb_interval = OCTEP_DEFAULT_FW_HB_INTERVAL;
254 	conf->fw_info.hb_miss_count = OCTEP_DEFAULT_FW_HB_MISS_COUNT;
255 }
256 
257 /* Setup registers for a hardware Tx Queue  */
258 static void octep_setup_iq_regs_cn93_pf(struct octep_device *oct, int iq_no)
259 {
260 	struct octep_iq *iq = oct->iq[iq_no];
261 	u32 reset_instr_cnt;
262 	u64 reg_val;
263 
264 	iq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
265 	reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(iq_no));
266 
267 	/* wait for IDLE to set to 1 */
268 	if (!(reg_val & CN93_R_IN_CTL_IDLE)) {
269 		do {
270 			reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(iq_no));
271 		} while (!(reg_val & CN93_R_IN_CTL_IDLE));
272 	}
273 
274 	reg_val |= CN93_R_IN_CTL_RDSIZE;
275 	reg_val |= CN93_R_IN_CTL_IS_64B;
276 	reg_val |= CN93_R_IN_CTL_ESR;
277 	octep_write_csr64(oct, CN93_SDP_R_IN_CONTROL(iq_no), reg_val);
278 
279 	/* Write the start of the input queue's ring and its size  */
280 	octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(iq_no),
281 			  iq->desc_ring_dma);
282 	octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(iq_no),
283 			  iq->max_count);
284 
285 	/* Remember the doorbell & instruction count register addr
286 	 * for this queue
287 	 */
288 	iq->doorbell_reg = oct->mmio[0].hw_addr +
289 			   CN93_SDP_R_IN_INSTR_DBELL(iq_no);
290 	iq->inst_cnt_reg = oct->mmio[0].hw_addr +
291 			   CN93_SDP_R_IN_CNTS(iq_no);
292 	iq->intr_lvl_reg = oct->mmio[0].hw_addr +
293 			   CN93_SDP_R_IN_INT_LEVELS(iq_no);
294 
295 	/* Store the current instruction counter (used in flush_iq calculation) */
296 	reset_instr_cnt = readl(iq->inst_cnt_reg);
297 	writel(reset_instr_cnt, iq->inst_cnt_reg);
298 
299 	/* INTR_THRESHOLD is set to max(FFFFFFFF) to disable the INTR */
300 	reg_val = CFG_GET_IQ_INTR_THRESHOLD(oct->conf) & 0xffffffff;
301 	octep_write_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
302 }
303 
304 /* Setup registers for a hardware Rx Queue  */
305 static void octep_setup_oq_regs_cn93_pf(struct octep_device *oct, int oq_no)
306 {
307 	u64 reg_val;
308 	u64 oq_ctl = 0ULL;
309 	u32 time_threshold = 0;
310 	struct octep_oq *oq = oct->oq[oq_no];
311 
312 	oq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
313 	reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no));
314 
315 	/* wait for IDLE to set to 1 */
316 	if (!(reg_val & CN93_R_OUT_CTL_IDLE)) {
317 		do {
318 			reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no));
319 		} while (!(reg_val & CN93_R_OUT_CTL_IDLE));
320 	}
321 
322 	reg_val &= ~(CN93_R_OUT_CTL_IMODE);
323 	reg_val &= ~(CN93_R_OUT_CTL_ROR_P);
324 	reg_val &= ~(CN93_R_OUT_CTL_NSR_P);
325 	reg_val &= ~(CN93_R_OUT_CTL_ROR_I);
326 	reg_val &= ~(CN93_R_OUT_CTL_NSR_I);
327 	reg_val &= ~(CN93_R_OUT_CTL_ES_I);
328 	reg_val &= ~(CN93_R_OUT_CTL_ROR_D);
329 	reg_val &= ~(CN93_R_OUT_CTL_NSR_D);
330 	reg_val &= ~(CN93_R_OUT_CTL_ES_D);
331 	reg_val |= (CN93_R_OUT_CTL_ES_P);
332 
333 	octep_write_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no), reg_val);
334 	octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_BADDR(oq_no),
335 			  oq->desc_ring_dma);
336 	octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_RSIZE(oq_no),
337 			  oq->max_count);
338 
339 	oq_ctl = octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no));
340 	oq_ctl &= ~0x7fffffULL;	//clear the ISIZE and BSIZE (22-0)
341 	oq_ctl |= (oq->buffer_size & 0xffff);	//populate the BSIZE (15-0)
342 	octep_write_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no), oq_ctl);
343 
344 	/* Get the mapped address of the pkt_sent and pkts_credit regs */
345 	oq->pkts_sent_reg = oct->mmio[0].hw_addr + CN93_SDP_R_OUT_CNTS(oq_no);
346 	oq->pkts_credit_reg = oct->mmio[0].hw_addr +
347 			      CN93_SDP_R_OUT_SLIST_DBELL(oq_no);
348 
349 	time_threshold = CFG_GET_OQ_INTR_TIME(oct->conf);
350 	reg_val = ((u64)time_threshold << 32) |
351 		  CFG_GET_OQ_INTR_PKT(oct->conf);
352 	octep_write_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
353 }
354 
355 /* Setup registers for a PF mailbox */
356 static void octep_setup_mbox_regs_cn93_pf(struct octep_device *oct, int q_no)
357 {
358 	struct octep_mbox *mbox = oct->mbox[q_no];
359 
360 	mbox->q_no = q_no;
361 
362 	/* PF mbox interrupt reg */
363 	mbox->mbox_int_reg = oct->mmio[0].hw_addr + CN93_SDP_EPF_MBOX_RINT(0);
364 
365 	/* PF to VF DATA reg. PF writes into this reg */
366 	mbox->mbox_write_reg = oct->mmio[0].hw_addr + CN93_SDP_R_MBOX_PF_VF_DATA(q_no);
367 
368 	/* VF to PF DATA reg. PF reads from this reg */
369 	mbox->mbox_read_reg = oct->mmio[0].hw_addr + CN93_SDP_R_MBOX_VF_PF_DATA(q_no);
370 }
371 
372 /* Poll OEI events like heartbeat */
373 static void octep_poll_oei_cn93_pf(struct octep_device *oct)
374 {
375 	u64 reg;
376 
377 	reg = octep_read_csr64(oct, CN93_SDP_EPF_OEI_RINT);
378 	if (reg) {
379 		octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT, reg);
380 		if (reg & CN93_SDP_EPF_OEI_RINT_DATA_BIT_MBOX)
381 			queue_work(octep_wq, &oct->ctrl_mbox_task);
382 		else if (reg & CN93_SDP_EPF_OEI_RINT_DATA_BIT_HBEAT)
383 			atomic_set(&oct->hb_miss_cnt, 0);
384 	}
385 }
386 
387 /* OEI interrupt handler */
388 static irqreturn_t octep_oei_intr_handler_cn93_pf(void *dev)
389 {
390 	struct octep_device *oct = (struct octep_device *)dev;
391 
392 	octep_poll_oei_cn93_pf(oct);
393 	return IRQ_HANDLED;
394 }
395 
396 /* Process non-ioq interrupts required to keep pf interface running.
397  * OEI_RINT is needed for control mailbox
398  */
399 static void octep_poll_non_ioq_interrupts_cn93_pf(struct octep_device *oct)
400 {
401 	octep_poll_oei_cn93_pf(oct);
402 }
403 
404 /* Interrupt handler for input ring error interrupts. */
405 static irqreturn_t octep_ire_intr_handler_cn93_pf(void *dev)
406 {
407 	struct octep_device *oct = (struct octep_device *)dev;
408 	struct pci_dev *pdev = oct->pdev;
409 	u64 reg_val = 0;
410 	int i = 0;
411 
412 	/* Check for IRERR INTR */
413 	reg_val = octep_read_csr64(oct, CN93_SDP_EPF_IRERR_RINT);
414 	if (reg_val) {
415 		dev_info(&pdev->dev,
416 			 "received IRERR_RINT intr: 0x%llx\n", reg_val);
417 		octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT, reg_val);
418 
419 		for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
420 			reg_val = octep_read_csr64(oct,
421 						   CN93_SDP_R_ERR_TYPE(i));
422 			if (reg_val) {
423 				dev_info(&pdev->dev,
424 					 "Received err type on IQ-%d: 0x%llx\n",
425 					 i, reg_val);
426 				octep_write_csr64(oct, CN93_SDP_R_ERR_TYPE(i),
427 						  reg_val);
428 			}
429 		}
430 	}
431 	return IRQ_HANDLED;
432 }
433 
434 /* Interrupt handler for output ring error interrupts. */
435 static irqreturn_t octep_ore_intr_handler_cn93_pf(void *dev)
436 {
437 	struct octep_device *oct = (struct octep_device *)dev;
438 	struct pci_dev *pdev = oct->pdev;
439 	u64 reg_val = 0;
440 	int i = 0;
441 
442 	/* Check for ORERR INTR */
443 	reg_val = octep_read_csr64(oct, CN93_SDP_EPF_ORERR_RINT);
444 	if (reg_val) {
445 		dev_info(&pdev->dev,
446 			 "Received ORERR_RINT intr: 0x%llx\n", reg_val);
447 		octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT, reg_val);
448 		for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
449 			reg_val = octep_read_csr64(oct, CN93_SDP_R_ERR_TYPE(i));
450 			if (reg_val) {
451 				dev_info(&pdev->dev,
452 					 "Received err type on OQ-%d: 0x%llx\n",
453 					 i, reg_val);
454 				octep_write_csr64(oct, CN93_SDP_R_ERR_TYPE(i),
455 						  reg_val);
456 			}
457 		}
458 	}
459 	return IRQ_HANDLED;
460 }
461 
462 /* Interrupt handler for vf input ring error interrupts. */
463 static irqreturn_t octep_vfire_intr_handler_cn93_pf(void *dev)
464 {
465 	struct octep_device *oct = (struct octep_device *)dev;
466 	struct pci_dev *pdev = oct->pdev;
467 	u64 reg_val = 0;
468 
469 	/* Check for VFIRE INTR */
470 	reg_val = octep_read_csr64(oct, CN93_SDP_EPF_VFIRE_RINT(0));
471 	if (reg_val) {
472 		dev_info(&pdev->dev,
473 			 "Received VFIRE_RINT intr: 0x%llx\n", reg_val);
474 		octep_write_csr64(oct, CN93_SDP_EPF_VFIRE_RINT(0), reg_val);
475 	}
476 	return IRQ_HANDLED;
477 }
478 
479 /* Interrupt handler for vf output ring error interrupts. */
480 static irqreturn_t octep_vfore_intr_handler_cn93_pf(void *dev)
481 {
482 	struct octep_device *oct = (struct octep_device *)dev;
483 	struct pci_dev *pdev = oct->pdev;
484 	u64 reg_val = 0;
485 
486 	/* Check for VFORE INTR */
487 	reg_val = octep_read_csr64(oct, CN93_SDP_EPF_VFORE_RINT(0));
488 	if (reg_val) {
489 		dev_info(&pdev->dev,
490 			 "Received VFORE_RINT intr: 0x%llx\n", reg_val);
491 		octep_write_csr64(oct, CN93_SDP_EPF_VFORE_RINT(0), reg_val);
492 	}
493 	return IRQ_HANDLED;
494 }
495 
496 /* Interrupt handler for dpi dma related interrupts. */
497 static irqreturn_t octep_dma_intr_handler_cn93_pf(void *dev)
498 {
499 	struct octep_device *oct = (struct octep_device *)dev;
500 	u64 reg_val = 0;
501 
502 	/* Check for DMA INTR */
503 	reg_val = octep_read_csr64(oct, CN93_SDP_EPF_DMA_RINT);
504 	if (reg_val) {
505 		octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT, reg_val);
506 	}
507 	return IRQ_HANDLED;
508 }
509 
510 /* Interrupt handler for dpi dma transaction error interrupts for VFs  */
511 static irqreturn_t octep_dma_vf_intr_handler_cn93_pf(void *dev)
512 {
513 	struct octep_device *oct = (struct octep_device *)dev;
514 	struct pci_dev *pdev = oct->pdev;
515 	u64 reg_val = 0;
516 
517 	/* Check for DMA VF INTR */
518 	reg_val = octep_read_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT(0));
519 	if (reg_val) {
520 		dev_info(&pdev->dev,
521 			 "Received DMA_VF_RINT intr: 0x%llx\n", reg_val);
522 		octep_write_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT(0), reg_val);
523 	}
524 	return IRQ_HANDLED;
525 }
526 
527 /* Interrupt handler for pp transaction error interrupts for VFs  */
528 static irqreturn_t octep_pp_vf_intr_handler_cn93_pf(void *dev)
529 {
530 	struct octep_device *oct = (struct octep_device *)dev;
531 	struct pci_dev *pdev = oct->pdev;
532 	u64 reg_val = 0;
533 
534 	/* Check for PPVF INTR */
535 	reg_val = octep_read_csr64(oct, CN93_SDP_EPF_PP_VF_RINT(0));
536 	if (reg_val) {
537 		dev_info(&pdev->dev,
538 			 "Received PP_VF_RINT intr: 0x%llx\n", reg_val);
539 		octep_write_csr64(oct, CN93_SDP_EPF_PP_VF_RINT(0), reg_val);
540 	}
541 	return IRQ_HANDLED;
542 }
543 
544 /* Interrupt handler for mac related interrupts. */
545 static irqreturn_t octep_misc_intr_handler_cn93_pf(void *dev)
546 {
547 	struct octep_device *oct = (struct octep_device *)dev;
548 	struct pci_dev *pdev = oct->pdev;
549 	u64 reg_val = 0;
550 
551 	/* Check for MISC INTR */
552 	reg_val = octep_read_csr64(oct, CN93_SDP_EPF_MISC_RINT);
553 	if (reg_val) {
554 		dev_info(&pdev->dev,
555 			 "Received MISC_RINT intr: 0x%llx\n", reg_val);
556 		octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT, reg_val);
557 	}
558 	return IRQ_HANDLED;
559 }
560 
561 /* Interrupts handler for all reserved interrupts. */
562 static irqreturn_t octep_rsvd_intr_handler_cn93_pf(void *dev)
563 {
564 	struct octep_device *oct = (struct octep_device *)dev;
565 	struct pci_dev *pdev = oct->pdev;
566 
567 	dev_info(&pdev->dev, "Reserved interrupts raised; Ignore\n");
568 	return IRQ_HANDLED;
569 }
570 
571 /* Tx/Rx queue interrupt handler */
572 static irqreturn_t octep_ioq_intr_handler_cn93_pf(void *data)
573 {
574 	struct octep_ioq_vector *vector = (struct octep_ioq_vector *)data;
575 	struct octep_oq *oq = vector->oq;
576 
577 	napi_schedule_irqoff(oq->napi);
578 	return IRQ_HANDLED;
579 }
580 
581 /* soft reset of 93xx */
582 static int octep_soft_reset_cn93_pf(struct octep_device *oct)
583 {
584 	dev_info(&oct->pdev->dev, "CN93XX: Doing soft reset\n");
585 
586 	octep_write_csr64(oct, CN93_SDP_WIN_WR_MASK_REG, 0xFF);
587 
588 	/* Set core domain reset bit */
589 	OCTEP_PCI_WIN_WRITE(oct, CN93_RST_CORE_DOMAIN_W1S, 1);
590 	/* Wait for 100ms as Octeon resets. */
591 	mdelay(100);
592 	/* clear core domain reset bit */
593 	OCTEP_PCI_WIN_WRITE(oct, CN93_RST_CORE_DOMAIN_W1C, 1);
594 
595 	return 0;
596 }
597 
598 /* Re-initialize Octeon hardware registers */
599 static void octep_reinit_regs_cn93_pf(struct octep_device *oct)
600 {
601 	u32 i;
602 
603 	for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
604 		oct->hw_ops.setup_iq_regs(oct, i);
605 
606 	for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
607 		oct->hw_ops.setup_oq_regs(oct, i);
608 
609 	oct->hw_ops.enable_interrupts(oct);
610 	oct->hw_ops.enable_io_queues(oct);
611 
612 	for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
613 		writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg);
614 }
615 
616 /* Enable all interrupts */
617 static void octep_enable_interrupts_cn93_pf(struct octep_device *oct)
618 {
619 	u64 intr_mask = 0ULL;
620 	int srn, num_rings, i;
621 
622 	srn = CFG_GET_PORTS_PF_SRN(oct->conf);
623 	num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
624 
625 	for (i = 0; i < num_rings; i++)
626 		intr_mask |= (0x1ULL << (srn + i));
627 
628 	octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT_ENA_W1S, intr_mask);
629 	octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT_ENA_W1S, intr_mask);
630 	octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT_ENA_W1S, -1ULL);
631 
632 	octep_write_csr64(oct, CN93_SDP_EPF_VFIRE_RINT_ENA_W1S(0), -1ULL);
633 	octep_write_csr64(oct, CN93_SDP_EPF_VFORE_RINT_ENA_W1S(0), -1ULL);
634 
635 	octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1S, intr_mask);
636 	octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1S, intr_mask);
637 
638 	octep_write_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT_ENA_W1S(0), -1ULL);
639 	octep_write_csr64(oct, CN93_SDP_EPF_PP_VF_RINT_ENA_W1S(0), -1ULL);
640 }
641 
642 /* Disable all interrupts */
643 static void octep_disable_interrupts_cn93_pf(struct octep_device *oct)
644 {
645 	u64 intr_mask = 0ULL;
646 	int srn, num_rings, i;
647 
648 	srn = CFG_GET_PORTS_PF_SRN(oct->conf);
649 	num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
650 
651 	for (i = 0; i < num_rings; i++)
652 		intr_mask |= (0x1ULL << (srn + i));
653 
654 	octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT_ENA_W1C, intr_mask);
655 	octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT_ENA_W1C, intr_mask);
656 	octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT_ENA_W1C, -1ULL);
657 
658 	octep_write_csr64(oct, CN93_SDP_EPF_VFIRE_RINT_ENA_W1C(0), -1ULL);
659 	octep_write_csr64(oct, CN93_SDP_EPF_VFORE_RINT_ENA_W1C(0), -1ULL);
660 
661 	octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1C, intr_mask);
662 	octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1C, intr_mask);
663 
664 	octep_write_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT_ENA_W1C(0), -1ULL);
665 	octep_write_csr64(oct, CN93_SDP_EPF_PP_VF_RINT_ENA_W1C(0), -1ULL);
666 }
667 
668 /* Get new Octeon Read Index: index of descriptor that Octeon reads next. */
669 static u32 octep_update_iq_read_index_cn93_pf(struct octep_iq *iq)
670 {
671 	u32 pkt_in_done = readl(iq->inst_cnt_reg);
672 	u32 last_done, new_idx;
673 
674 	last_done = pkt_in_done - iq->pkt_in_done;
675 	iq->pkt_in_done = pkt_in_done;
676 
677 	new_idx = (iq->octep_read_index + last_done) % iq->max_count;
678 
679 	return new_idx;
680 }
681 
682 /* Enable a hardware Tx Queue */
683 static void octep_enable_iq_cn93_pf(struct octep_device *oct, int iq_no)
684 {
685 	u64 loop = HZ;
686 	u64 reg_val;
687 
688 	iq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
689 
690 	octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(iq_no), 0xFFFFFFFF);
691 
692 	while (octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(iq_no)) &&
693 	       loop--) {
694 		schedule_timeout_interruptible(1);
695 	}
696 
697 	reg_val = octep_read_csr64(oct,  CN93_SDP_R_IN_INT_LEVELS(iq_no));
698 	reg_val |= (0x1ULL << 62);
699 	octep_write_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
700 
701 	reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no));
702 	reg_val |= 0x1ULL;
703 	octep_write_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no), reg_val);
704 }
705 
706 /* Enable a hardware Rx Queue */
707 static void octep_enable_oq_cn93_pf(struct octep_device *oct, int oq_no)
708 {
709 	u64 reg_val = 0ULL;
710 
711 	oq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
712 
713 	reg_val = octep_read_csr64(oct,  CN93_SDP_R_OUT_INT_LEVELS(oq_no));
714 	reg_val |= (0x1ULL << 62);
715 	octep_write_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
716 
717 	octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_DBELL(oq_no), 0xFFFFFFFF);
718 
719 	reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no));
720 	reg_val |= 0x1ULL;
721 	octep_write_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no), reg_val);
722 }
723 
724 /* Enable all hardware Tx/Rx Queues assined to PF */
725 static void octep_enable_io_queues_cn93_pf(struct octep_device *oct)
726 {
727 	u8 q;
728 
729 	for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
730 		octep_enable_iq_cn93_pf(oct, q);
731 		octep_enable_oq_cn93_pf(oct, q);
732 	}
733 }
734 
735 /* Disable a hardware Tx Queue assined to PF */
736 static void octep_disable_iq_cn93_pf(struct octep_device *oct, int iq_no)
737 {
738 	u64 reg_val = 0ULL;
739 
740 	iq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
741 
742 	reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no));
743 	reg_val &= ~0x1ULL;
744 	octep_write_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no), reg_val);
745 }
746 
747 /* Disable a hardware Rx Queue assined to PF */
748 static void octep_disable_oq_cn93_pf(struct octep_device *oct, int oq_no)
749 {
750 	u64 reg_val = 0ULL;
751 
752 	oq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
753 	reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no));
754 	reg_val &= ~0x1ULL;
755 	octep_write_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no), reg_val);
756 }
757 
758 /* Disable all hardware Tx/Rx Queues assined to PF */
759 static void octep_disable_io_queues_cn93_pf(struct octep_device *oct)
760 {
761 	int q = 0;
762 
763 	for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
764 		octep_disable_iq_cn93_pf(oct, q);
765 		octep_disable_oq_cn93_pf(oct, q);
766 	}
767 }
768 
769 /* Dump hardware registers (including Tx/Rx queues) for debugging. */
770 static void octep_dump_registers_cn93_pf(struct octep_device *oct)
771 {
772 	u8 srn, num_rings, q;
773 
774 	srn = CFG_GET_PORTS_PF_SRN(oct->conf);
775 	num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
776 
777 	for (q = srn; q < srn + num_rings; q++)
778 		cn93_dump_regs(oct, q);
779 }
780 
781 /**
782  * octep_device_setup_cn93_pf() - Setup Octeon device.
783  *
784  * @oct: Octeon device private data structure.
785  *
786  * - initialize hardware operations.
787  * - get target side pcie port number for the device.
788  * - setup window access to hardware registers.
789  * - set initial configuration and max limits.
790  * - setup hardware mapping of rings to the PF device.
791  */
792 void octep_device_setup_cn93_pf(struct octep_device *oct)
793 {
794 	oct->hw_ops.setup_iq_regs = octep_setup_iq_regs_cn93_pf;
795 	oct->hw_ops.setup_oq_regs = octep_setup_oq_regs_cn93_pf;
796 	oct->hw_ops.setup_mbox_regs = octep_setup_mbox_regs_cn93_pf;
797 
798 	oct->hw_ops.oei_intr_handler = octep_oei_intr_handler_cn93_pf;
799 	oct->hw_ops.ire_intr_handler = octep_ire_intr_handler_cn93_pf;
800 	oct->hw_ops.ore_intr_handler = octep_ore_intr_handler_cn93_pf;
801 	oct->hw_ops.vfire_intr_handler = octep_vfire_intr_handler_cn93_pf;
802 	oct->hw_ops.vfore_intr_handler = octep_vfore_intr_handler_cn93_pf;
803 	oct->hw_ops.dma_intr_handler = octep_dma_intr_handler_cn93_pf;
804 	oct->hw_ops.dma_vf_intr_handler = octep_dma_vf_intr_handler_cn93_pf;
805 	oct->hw_ops.pp_vf_intr_handler = octep_pp_vf_intr_handler_cn93_pf;
806 	oct->hw_ops.misc_intr_handler = octep_misc_intr_handler_cn93_pf;
807 	oct->hw_ops.rsvd_intr_handler = octep_rsvd_intr_handler_cn93_pf;
808 	oct->hw_ops.ioq_intr_handler = octep_ioq_intr_handler_cn93_pf;
809 	oct->hw_ops.soft_reset = octep_soft_reset_cn93_pf;
810 	oct->hw_ops.reinit_regs = octep_reinit_regs_cn93_pf;
811 
812 	oct->hw_ops.enable_interrupts = octep_enable_interrupts_cn93_pf;
813 	oct->hw_ops.disable_interrupts = octep_disable_interrupts_cn93_pf;
814 	oct->hw_ops.poll_non_ioq_interrupts = octep_poll_non_ioq_interrupts_cn93_pf;
815 
816 	oct->hw_ops.update_iq_read_idx = octep_update_iq_read_index_cn93_pf;
817 
818 	oct->hw_ops.enable_iq = octep_enable_iq_cn93_pf;
819 	oct->hw_ops.enable_oq = octep_enable_oq_cn93_pf;
820 	oct->hw_ops.enable_io_queues = octep_enable_io_queues_cn93_pf;
821 
822 	oct->hw_ops.disable_iq = octep_disable_iq_cn93_pf;
823 	oct->hw_ops.disable_oq = octep_disable_oq_cn93_pf;
824 	oct->hw_ops.disable_io_queues = octep_disable_io_queues_cn93_pf;
825 	oct->hw_ops.reset_io_queues = octep_reset_io_queues_cn93_pf;
826 
827 	oct->hw_ops.dump_registers = octep_dump_registers_cn93_pf;
828 
829 	octep_setup_pci_window_regs_cn93_pf(oct);
830 
831 	oct->pcie_port = octep_read_csr64(oct, CN93_SDP_MAC_NUMBER) & 0xff;
832 	dev_info(&oct->pdev->dev,
833 		 "Octeon device using PCIE Port %d\n", oct->pcie_port);
834 
835 	octep_init_config_cn93_pf(oct);
836 	octep_configure_ring_mapping_cn93_pf(oct);
837 }
838