1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell Octeon EP (EndPoint) VF Ethernet Driver
3 *
4 * Copyright (C) 2020 Marvell.
5 *
6 */
7
8 #include <linux/pci.h>
9 #include <linux/netdevice.h>
10 #include <linux/etherdevice.h>
11
12 #include "octep_vf_config.h"
13 #include "octep_vf_main.h"
14 #include "octep_vf_regs_cn9k.h"
15
16 /* Dump useful hardware IQ/OQ CSRs for debug purpose */
cn93_vf_dump_q_regs(struct octep_vf_device * oct,int qno)17 static void cn93_vf_dump_q_regs(struct octep_vf_device *oct, int qno)
18 {
19 struct device *dev = &oct->pdev->dev;
20
21 dev_info(dev, "IQ-%d register dump\n", qno);
22 dev_info(dev, "R[%d]_IN_INSTR_DBELL[0x%llx]: 0x%016llx\n",
23 qno, CN93_VF_SDP_R_IN_INSTR_DBELL(qno),
24 octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INSTR_DBELL(qno)));
25 dev_info(dev, "R[%d]_IN_CONTROL[0x%llx]: 0x%016llx\n",
26 qno, CN93_VF_SDP_R_IN_CONTROL(qno),
27 octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(qno)));
28 dev_info(dev, "R[%d]_IN_ENABLE[0x%llx]: 0x%016llx\n",
29 qno, CN93_VF_SDP_R_IN_ENABLE(qno),
30 octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(qno)));
31 dev_info(dev, "R[%d]_IN_INSTR_BADDR[0x%llx]: 0x%016llx\n",
32 qno, CN93_VF_SDP_R_IN_INSTR_BADDR(qno),
33 octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INSTR_BADDR(qno)));
34 dev_info(dev, "R[%d]_IN_INSTR_RSIZE[0x%llx]: 0x%016llx\n",
35 qno, CN93_VF_SDP_R_IN_INSTR_RSIZE(qno),
36 octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INSTR_RSIZE(qno)));
37 dev_info(dev, "R[%d]_IN_CNTS[0x%llx]: 0x%016llx\n",
38 qno, CN93_VF_SDP_R_IN_CNTS(qno),
39 octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CNTS(qno)));
40 dev_info(dev, "R[%d]_IN_INT_LEVELS[0x%llx]: 0x%016llx\n",
41 qno, CN93_VF_SDP_R_IN_INT_LEVELS(qno),
42 octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(qno)));
43 dev_info(dev, "R[%d]_IN_PKT_CNT[0x%llx]: 0x%016llx\n",
44 qno, CN93_VF_SDP_R_IN_PKT_CNT(qno),
45 octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_PKT_CNT(qno)));
46 dev_info(dev, "R[%d]_IN_BYTE_CNT[0x%llx]: 0x%016llx\n",
47 qno, CN93_VF_SDP_R_IN_BYTE_CNT(qno),
48 octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_BYTE_CNT(qno)));
49
50 dev_info(dev, "OQ-%d register dump\n", qno);
51 dev_info(dev, "R[%d]_OUT_SLIST_DBELL[0x%llx]: 0x%016llx\n",
52 qno, CN93_VF_SDP_R_OUT_SLIST_DBELL(qno),
53 octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_DBELL(qno)));
54 dev_info(dev, "R[%d]_OUT_CONTROL[0x%llx]: 0x%016llx\n",
55 qno, CN93_VF_SDP_R_OUT_CONTROL(qno),
56 octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(qno)));
57 dev_info(dev, "R[%d]_OUT_ENABLE[0x%llx]: 0x%016llx\n",
58 qno, CN93_VF_SDP_R_OUT_ENABLE(qno),
59 octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(qno)));
60 dev_info(dev, "R[%d]_OUT_SLIST_BADDR[0x%llx]: 0x%016llx\n",
61 qno, CN93_VF_SDP_R_OUT_SLIST_BADDR(qno),
62 octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_BADDR(qno)));
63 dev_info(dev, "R[%d]_OUT_SLIST_RSIZE[0x%llx]: 0x%016llx\n",
64 qno, CN93_VF_SDP_R_OUT_SLIST_RSIZE(qno),
65 octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_RSIZE(qno)));
66 dev_info(dev, "R[%d]_OUT_CNTS[0x%llx]: 0x%016llx\n",
67 qno, CN93_VF_SDP_R_OUT_CNTS(qno),
68 octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CNTS(qno)));
69 dev_info(dev, "R[%d]_OUT_INT_LEVELS[0x%llx]: 0x%016llx\n",
70 qno, CN93_VF_SDP_R_OUT_INT_LEVELS(qno),
71 octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(qno)));
72 dev_info(dev, "R[%d]_OUT_PKT_CNT[0x%llx]: 0x%016llx\n",
73 qno, CN93_VF_SDP_R_OUT_PKT_CNT(qno),
74 octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_PKT_CNT(qno)));
75 dev_info(dev, "R[%d]_OUT_BYTE_CNT[0x%llx]: 0x%016llx\n",
76 qno, CN93_VF_SDP_R_OUT_BYTE_CNT(qno),
77 octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_BYTE_CNT(qno)));
78 }
79
80 /* Reset Hardware Tx queue */
cn93_vf_reset_iq(struct octep_vf_device * oct,int q_no)81 static void cn93_vf_reset_iq(struct octep_vf_device *oct, int q_no)
82 {
83 u64 val = ULL(0);
84
85 dev_dbg(&oct->pdev->dev, "Reset VF IQ-%d\n", q_no);
86
87 /* Disable the Tx/Instruction Ring */
88 octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(q_no), val);
89
90 /* clear the Instruction Ring packet/byte counts and doorbell CSRs */
91 octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q_no), val);
92 octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_PKT_CNT(q_no), val);
93 octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_BYTE_CNT(q_no), val);
94 octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_BADDR(q_no), val);
95 octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_RSIZE(q_no), val);
96
97 val = GENMASK_ULL(31, 0);
98 octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_DBELL(q_no), val);
99
100 val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CNTS(q_no));
101 octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_CNTS(q_no),
102 val & GENMASK_ULL(31, 0));
103 }
104
105 /* Reset Hardware Rx queue */
cn93_vf_reset_oq(struct octep_vf_device * oct,int q_no)106 static void cn93_vf_reset_oq(struct octep_vf_device *oct, int q_no)
107 {
108 u64 val = ULL(0);
109
110 /* Disable Output (Rx) Ring */
111 octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(q_no), val);
112
113 /* Clear count CSRs */
114 val = octep_vf_read_csr(oct, CN93_VF_SDP_R_OUT_CNTS(q_no));
115 octep_vf_write_csr(oct, CN93_VF_SDP_R_OUT_CNTS(q_no), val);
116
117 octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_PKT_CNT(q_no), GENMASK_ULL(35, 0));
118 octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_DBELL(q_no), GENMASK_ULL(31, 0));
119 }
120
121 /* Reset all hardware Tx/Rx queues */
octep_vf_reset_io_queues_cn93(struct octep_vf_device * oct)122 static void octep_vf_reset_io_queues_cn93(struct octep_vf_device *oct)
123 {
124 struct pci_dev *pdev = oct->pdev;
125 int q;
126
127 dev_dbg(&pdev->dev, "Reset OCTEP_CN93 VF IO Queues\n");
128
129 for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
130 cn93_vf_reset_iq(oct, q);
131 cn93_vf_reset_oq(oct, q);
132 }
133 }
134
135 /* Initialize configuration limits and initial active config */
octep_vf_init_config_cn93_vf(struct octep_vf_device * oct)136 static void octep_vf_init_config_cn93_vf(struct octep_vf_device *oct)
137 {
138 struct octep_vf_config *conf = oct->conf;
139 u64 reg_val;
140
141 reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(0));
142 conf->ring_cfg.max_io_rings = (reg_val >> CN93_VF_R_IN_CTL_RPVF_POS) &
143 CN93_VF_R_IN_CTL_RPVF_MASK;
144 conf->ring_cfg.active_io_rings = conf->ring_cfg.max_io_rings;
145
146 conf->iq.num_descs = OCTEP_VF_IQ_MAX_DESCRIPTORS;
147 conf->iq.instr_type = OCTEP_VF_64BYTE_INSTR;
148 conf->iq.db_min = OCTEP_VF_DB_MIN;
149 conf->iq.intr_threshold = OCTEP_VF_IQ_INTR_THRESHOLD;
150
151 conf->oq.num_descs = OCTEP_VF_OQ_MAX_DESCRIPTORS;
152 conf->oq.buf_size = OCTEP_VF_OQ_BUF_SIZE;
153 conf->oq.refill_threshold = OCTEP_VF_OQ_REFILL_THRESHOLD;
154 conf->oq.oq_intr_pkt = OCTEP_VF_OQ_INTR_PKT_THRESHOLD;
155 conf->oq.oq_intr_time = OCTEP_VF_OQ_INTR_TIME_THRESHOLD;
156
157 conf->msix_cfg.ioq_msix = conf->ring_cfg.active_io_rings;
158 }
159
160 /* Setup registers for a hardware Tx Queue */
octep_vf_setup_iq_regs_cn93(struct octep_vf_device * oct,int iq_no)161 static void octep_vf_setup_iq_regs_cn93(struct octep_vf_device *oct, int iq_no)
162 {
163 struct octep_vf_iq *iq = oct->iq[iq_no];
164 u32 reset_instr_cnt;
165 u64 reg_val;
166
167 reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(iq_no));
168
169 /* wait for IDLE to set to 1 */
170 if (!(reg_val & CN93_VF_R_IN_CTL_IDLE)) {
171 do {
172 reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(iq_no));
173 } while (!(reg_val & CN93_VF_R_IN_CTL_IDLE));
174 }
175 reg_val |= CN93_VF_R_IN_CTL_RDSIZE;
176 reg_val |= CN93_VF_R_IN_CTL_IS_64B;
177 reg_val |= CN93_VF_R_IN_CTL_ESR;
178 octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(iq_no), reg_val);
179
180 /* Write the start of the input queue's ring and its size */
181 octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_BADDR(iq_no), iq->desc_ring_dma);
182 octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_RSIZE(iq_no), iq->max_count);
183
184 /* Remember the doorbell & instruction count register addr for this queue */
185 iq->doorbell_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_IN_INSTR_DBELL(iq_no);
186 iq->inst_cnt_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_IN_CNTS(iq_no);
187 iq->intr_lvl_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_IN_INT_LEVELS(iq_no);
188
189 /* Store the current instruction counter (used in flush_iq calculation) */
190 reset_instr_cnt = readl(iq->inst_cnt_reg);
191 writel(reset_instr_cnt, iq->inst_cnt_reg);
192
193 /* INTR_THRESHOLD is set to max(FFFFFFFF) to disable the INTR */
194 reg_val = CFG_GET_IQ_INTR_THRESHOLD(oct->conf) & GENMASK_ULL(31, 0);
195 octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
196 }
197
198 /* Setup registers for a hardware Rx Queue */
octep_vf_setup_oq_regs_cn93(struct octep_vf_device * oct,int oq_no)199 static void octep_vf_setup_oq_regs_cn93(struct octep_vf_device *oct, int oq_no)
200 {
201 struct octep_vf_oq *oq = oct->oq[oq_no];
202 u32 time_threshold = 0;
203 u64 oq_ctl = ULL(0);
204 u64 reg_val;
205
206 reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no));
207
208 /* wait for IDLE to set to 1 */
209 if (!(reg_val & CN93_VF_R_OUT_CTL_IDLE)) {
210 do {
211 reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no));
212 } while (!(reg_val & CN93_VF_R_OUT_CTL_IDLE));
213 }
214
215 reg_val &= ~(CN93_VF_R_OUT_CTL_IMODE);
216 reg_val &= ~(CN93_VF_R_OUT_CTL_ROR_P);
217 reg_val &= ~(CN93_VF_R_OUT_CTL_NSR_P);
218 reg_val &= ~(CN93_VF_R_OUT_CTL_ROR_I);
219 reg_val &= ~(CN93_VF_R_OUT_CTL_NSR_I);
220 reg_val &= ~(CN93_VF_R_OUT_CTL_ES_I);
221 reg_val &= ~(CN93_VF_R_OUT_CTL_ROR_D);
222 reg_val &= ~(CN93_VF_R_OUT_CTL_NSR_D);
223 reg_val &= ~(CN93_VF_R_OUT_CTL_ES_D);
224 reg_val |= (CN93_VF_R_OUT_CTL_ES_P);
225
226 octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no), reg_val);
227 octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_BADDR(oq_no), oq->desc_ring_dma);
228 octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_RSIZE(oq_no), oq->max_count);
229
230 oq_ctl = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no));
231 oq_ctl &= ~GENMASK_ULL(22, 0); //clear the ISIZE and BSIZE (22-0)
232 oq_ctl |= (oq->buffer_size & GENMASK_ULL(15, 0)); //populate the BSIZE (15-0)
233 octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no), oq_ctl);
234
235 /* Get the mapped address of the pkt_sent and pkts_credit regs */
236 oq->pkts_sent_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_OUT_CNTS(oq_no);
237 oq->pkts_credit_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_OUT_SLIST_DBELL(oq_no);
238
239 time_threshold = CFG_GET_OQ_INTR_TIME(oct->conf);
240 reg_val = ((u64)time_threshold << 32) | CFG_GET_OQ_INTR_PKT(oct->conf);
241 octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
242 }
243
244 /* Setup registers for a VF mailbox */
octep_vf_setup_mbox_regs_cn93(struct octep_vf_device * oct,int q_no)245 static void octep_vf_setup_mbox_regs_cn93(struct octep_vf_device *oct, int q_no)
246 {
247 struct octep_vf_mbox *mbox = oct->mbox;
248
249 /* PF to VF DATA reg. VF reads from this reg */
250 mbox->mbox_read_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_MBOX_PF_VF_DATA(q_no);
251
252 /* VF mbox interrupt reg */
253 mbox->mbox_int_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_MBOX_PF_VF_INT(q_no);
254
255 /* VF to PF DATA reg. VF writes into this reg */
256 mbox->mbox_write_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_MBOX_VF_PF_DATA(q_no);
257 }
258
259 /* Mailbox Interrupt handler */
cn93_handle_vf_mbox_intr(struct octep_vf_device * oct)260 static void cn93_handle_vf_mbox_intr(struct octep_vf_device *oct)
261 {
262 if (oct->mbox)
263 schedule_work(&oct->mbox->wk.work);
264 else
265 dev_err(&oct->pdev->dev, "cannot schedule work on invalid mbox\n");
266 }
267
268 /* Tx/Rx queue interrupt handler */
octep_vf_ioq_intr_handler_cn93(void * data)269 static irqreturn_t octep_vf_ioq_intr_handler_cn93(void *data)
270 {
271 struct octep_vf_ioq_vector *vector = data;
272 struct octep_vf_device *oct;
273 struct octep_vf_oq *oq;
274 u64 reg_val;
275
276 oct = vector->octep_vf_dev;
277 oq = vector->oq;
278 /* Mailbox interrupt arrives along with interrupt of tx/rx ring pair 0 */
279 if (oq->q_no == 0) {
280 reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_MBOX_PF_VF_INT(0));
281 if (reg_val & CN93_VF_SDP_R_MBOX_PF_VF_INT_STATUS) {
282 cn93_handle_vf_mbox_intr(oct);
283 octep_vf_write_csr64(oct, CN93_VF_SDP_R_MBOX_PF_VF_INT(0), reg_val);
284 }
285 }
286 napi_schedule_irqoff(oq->napi);
287 return IRQ_HANDLED;
288 }
289
290 /* Re-initialize Octeon hardware registers */
octep_vf_reinit_regs_cn93(struct octep_vf_device * oct)291 static void octep_vf_reinit_regs_cn93(struct octep_vf_device *oct)
292 {
293 u32 i;
294
295 for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
296 oct->hw_ops.setup_iq_regs(oct, i);
297
298 for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
299 oct->hw_ops.setup_oq_regs(oct, i);
300
301 oct->hw_ops.enable_interrupts(oct);
302 oct->hw_ops.enable_io_queues(oct);
303
304 for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
305 writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg);
306 }
307
308 /* Enable all interrupts */
octep_vf_enable_interrupts_cn93(struct octep_vf_device * oct)309 static void octep_vf_enable_interrupts_cn93(struct octep_vf_device *oct)
310 {
311 int num_rings, q;
312 u64 reg_val;
313
314 num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
315 for (q = 0; q < num_rings; q++) {
316 reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q));
317 reg_val |= BIT_ULL_MASK(62);
318 octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q), reg_val);
319
320 reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(q));
321 reg_val |= BIT_ULL_MASK(62);
322 octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(q), reg_val);
323 }
324 /* Enable PF to VF mbox interrupt by setting 2nd bit*/
325 octep_vf_write_csr64(oct, CN93_VF_SDP_R_MBOX_PF_VF_INT(0),
326 CN93_VF_SDP_R_MBOX_PF_VF_INT_ENAB);
327 }
328
329 /* Disable all interrupts */
octep_vf_disable_interrupts_cn93(struct octep_vf_device * oct)330 static void octep_vf_disable_interrupts_cn93(struct octep_vf_device *oct)
331 {
332 int num_rings, q;
333 u64 reg_val;
334
335 /* Disable PF to VF mbox interrupt by setting 2nd bit*/
336 if (oct->mbox)
337 octep_vf_write_csr64(oct, CN93_VF_SDP_R_MBOX_PF_VF_INT(0), 0x0);
338
339 num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
340 for (q = 0; q < num_rings; q++) {
341 reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q));
342 reg_val &= ~BIT_ULL_MASK(62);
343 octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q), reg_val);
344
345 reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(q));
346 reg_val &= ~BIT_ULL_MASK(62);
347 octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(q), reg_val);
348 }
349 }
350
351 /* Get new Octeon Read Index: index of descriptor that Octeon reads next. */
octep_vf_update_iq_read_index_cn93(struct octep_vf_iq * iq)352 static u32 octep_vf_update_iq_read_index_cn93(struct octep_vf_iq *iq)
353 {
354 u32 pkt_in_done = readl(iq->inst_cnt_reg);
355 u32 last_done, new_idx;
356
357 last_done = pkt_in_done - iq->pkt_in_done;
358 iq->pkt_in_done = pkt_in_done;
359
360 new_idx = (iq->octep_vf_read_index + last_done) % iq->max_count;
361
362 return new_idx;
363 }
364
365 /* Enable a hardware Tx Queue */
octep_vf_enable_iq_cn93(struct octep_vf_device * oct,int iq_no)366 static void octep_vf_enable_iq_cn93(struct octep_vf_device *oct, int iq_no)
367 {
368 u64 loop = HZ;
369 u64 reg_val;
370
371 octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_DBELL(iq_no), GENMASK_ULL(31, 0));
372
373 while (octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INSTR_DBELL(iq_no)) &&
374 loop--) {
375 schedule_timeout_interruptible(1);
376 }
377
378 reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(iq_no));
379 reg_val |= BIT_ULL_MASK(62);
380 octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
381
382 reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(iq_no));
383 reg_val |= ULL(1);
384 octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(iq_no), reg_val);
385 }
386
387 /* Enable a hardware Rx Queue */
octep_vf_enable_oq_cn93(struct octep_vf_device * oct,int oq_no)388 static void octep_vf_enable_oq_cn93(struct octep_vf_device *oct, int oq_no)
389 {
390 u64 reg_val;
391
392 reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(oq_no));
393 reg_val |= BIT_ULL_MASK(62);
394 octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
395
396 octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_DBELL(oq_no), GENMASK_ULL(31, 0));
397
398 reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(oq_no));
399 reg_val |= ULL(1);
400 octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(oq_no), reg_val);
401 }
402
403 /* Enable all hardware Tx/Rx Queues assigned to VF */
octep_vf_enable_io_queues_cn93(struct octep_vf_device * oct)404 static void octep_vf_enable_io_queues_cn93(struct octep_vf_device *oct)
405 {
406 u8 q;
407
408 for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
409 octep_vf_enable_iq_cn93(oct, q);
410 octep_vf_enable_oq_cn93(oct, q);
411 }
412 }
413
414 /* Disable a hardware Tx Queue assigned to VF */
octep_vf_disable_iq_cn93(struct octep_vf_device * oct,int iq_no)415 static void octep_vf_disable_iq_cn93(struct octep_vf_device *oct, int iq_no)
416 {
417 u64 reg_val;
418
419 reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(iq_no));
420 reg_val &= ~ULL(1);
421 octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(iq_no), reg_val);
422 }
423
424 /* Disable a hardware Rx Queue assigned to VF */
octep_vf_disable_oq_cn93(struct octep_vf_device * oct,int oq_no)425 static void octep_vf_disable_oq_cn93(struct octep_vf_device *oct, int oq_no)
426 {
427 u64 reg_val;
428
429 reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(oq_no));
430 reg_val &= ~ULL(1);
431 octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(oq_no), reg_val);
432 }
433
434 /* Disable all hardware Tx/Rx Queues assigned to VF */
octep_vf_disable_io_queues_cn93(struct octep_vf_device * oct)435 static void octep_vf_disable_io_queues_cn93(struct octep_vf_device *oct)
436 {
437 int q;
438
439 for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
440 octep_vf_disable_iq_cn93(oct, q);
441 octep_vf_disable_oq_cn93(oct, q);
442 }
443 }
444
445 /* Dump hardware registers (including Tx/Rx queues) for debugging. */
octep_vf_dump_registers_cn93(struct octep_vf_device * oct)446 static void octep_vf_dump_registers_cn93(struct octep_vf_device *oct)
447 {
448 u8 num_rings, q;
449
450 num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
451 for (q = 0; q < num_rings; q++)
452 cn93_vf_dump_q_regs(oct, q);
453 }
454
455 /**
456 * octep_vf_device_setup_cn93() - Setup Octeon device.
457 *
458 * @oct: Octeon device private data structure.
459 *
460 * - initialize hardware operations.
461 * - get target side pcie port number for the device.
462 * - set initial configuration and max limits.
463 */
octep_vf_device_setup_cn93(struct octep_vf_device * oct)464 void octep_vf_device_setup_cn93(struct octep_vf_device *oct)
465 {
466 oct->hw_ops.setup_iq_regs = octep_vf_setup_iq_regs_cn93;
467 oct->hw_ops.setup_oq_regs = octep_vf_setup_oq_regs_cn93;
468 oct->hw_ops.setup_mbox_regs = octep_vf_setup_mbox_regs_cn93;
469
470 oct->hw_ops.ioq_intr_handler = octep_vf_ioq_intr_handler_cn93;
471 oct->hw_ops.reinit_regs = octep_vf_reinit_regs_cn93;
472
473 oct->hw_ops.enable_interrupts = octep_vf_enable_interrupts_cn93;
474 oct->hw_ops.disable_interrupts = octep_vf_disable_interrupts_cn93;
475
476 oct->hw_ops.update_iq_read_idx = octep_vf_update_iq_read_index_cn93;
477
478 oct->hw_ops.enable_iq = octep_vf_enable_iq_cn93;
479 oct->hw_ops.enable_oq = octep_vf_enable_oq_cn93;
480 oct->hw_ops.enable_io_queues = octep_vf_enable_io_queues_cn93;
481
482 oct->hw_ops.disable_iq = octep_vf_disable_iq_cn93;
483 oct->hw_ops.disable_oq = octep_vf_disable_oq_cn93;
484 oct->hw_ops.disable_io_queues = octep_vf_disable_io_queues_cn93;
485 oct->hw_ops.reset_io_queues = octep_vf_reset_io_queues_cn93;
486
487 oct->hw_ops.dump_registers = octep_vf_dump_registers_cn93;
488 octep_vf_init_config_cn93_vf(oct);
489 }
490