1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Ethernet driver
3 *
4 * Copyright (C) 2024 Marvell.
5 *
6 */
7
8 #include "otx2_common.h"
9 #include "otx2_reg.h"
10 #include "otx2_struct.h"
11 #include "cn10k.h"
12
13 static struct dev_hw_ops cn20k_hw_ops = {
14 .pfaf_mbox_intr_handler = cn20k_pfaf_mbox_intr_handler,
15 .vfaf_mbox_intr_handler = cn20k_vfaf_mbox_intr_handler,
16 .pfvf_mbox_intr_handler = cn20k_pfvf_mbox_intr_handler,
17 };
18
cn20k_init(struct otx2_nic * pfvf)19 void cn20k_init(struct otx2_nic *pfvf)
20 {
21 pfvf->hw_ops = &cn20k_hw_ops;
22 }
23 EXPORT_SYMBOL(cn20k_init);
24 /* CN20K mbox AF => PFx irq handler */
cn20k_pfaf_mbox_intr_handler(int irq,void * pf_irq)25 irqreturn_t cn20k_pfaf_mbox_intr_handler(int irq, void *pf_irq)
26 {
27 struct otx2_nic *pf = pf_irq;
28 struct mbox *mw = &pf->mbox;
29 struct otx2_mbox_dev *mdev;
30 struct otx2_mbox *mbox;
31 struct mbox_hdr *hdr;
32 u64 pf_trig_val;
33
34 pf_trig_val = otx2_read64(pf, RVU_PF_INT) & 0x3ULL;
35
36 /* Clear the IRQ */
37 otx2_write64(pf, RVU_PF_INT, pf_trig_val);
38
39 if (pf_trig_val & BIT_ULL(0)) {
40 mbox = &mw->mbox_up;
41 mdev = &mbox->dev[0];
42 otx2_sync_mbox_bbuf(mbox, 0);
43
44 hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
45 if (hdr->num_msgs)
46 queue_work(pf->mbox_wq, &mw->mbox_up_wrk);
47
48 trace_otx2_msg_interrupt(pf->pdev, "UP message from AF to PF",
49 BIT_ULL(0));
50 }
51
52 if (pf_trig_val & BIT_ULL(1)) {
53 mbox = &mw->mbox;
54 mdev = &mbox->dev[0];
55 otx2_sync_mbox_bbuf(mbox, 0);
56
57 hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
58 if (hdr->num_msgs)
59 queue_work(pf->mbox_wq, &mw->mbox_wrk);
60 trace_otx2_msg_interrupt(pf->pdev, "DOWN reply from AF to PF",
61 BIT_ULL(1));
62 }
63
64 return IRQ_HANDLED;
65 }
66
cn20k_vfaf_mbox_intr_handler(int irq,void * vf_irq)67 irqreturn_t cn20k_vfaf_mbox_intr_handler(int irq, void *vf_irq)
68 {
69 struct otx2_nic *vf = vf_irq;
70 struct otx2_mbox_dev *mdev;
71 struct otx2_mbox *mbox;
72 struct mbox_hdr *hdr;
73 u64 vf_trig_val;
74
75 vf_trig_val = otx2_read64(vf, RVU_VF_INT) & 0x3ULL;
76 /* Clear the IRQ */
77 otx2_write64(vf, RVU_VF_INT, vf_trig_val);
78
79 /* Read latest mbox data */
80 smp_rmb();
81
82 if (vf_trig_val & BIT_ULL(1)) {
83 /* Check for PF => VF response messages */
84 mbox = &vf->mbox.mbox;
85 mdev = &mbox->dev[0];
86 otx2_sync_mbox_bbuf(mbox, 0);
87
88 hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
89 if (hdr->num_msgs)
90 queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk);
91
92 trace_otx2_msg_interrupt(mbox->pdev, "DOWN reply from PF0 to VF",
93 BIT_ULL(1));
94 }
95
96 if (vf_trig_val & BIT_ULL(0)) {
97 /* Check for PF => VF notification messages */
98 mbox = &vf->mbox.mbox_up;
99 mdev = &mbox->dev[0];
100 otx2_sync_mbox_bbuf(mbox, 0);
101
102 hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
103 if (hdr->num_msgs)
104 queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk);
105
106 trace_otx2_msg_interrupt(mbox->pdev, "UP message from PF0 to VF",
107 BIT_ULL(0));
108 }
109
110 return IRQ_HANDLED;
111 }
112
cn20k_enable_pfvf_mbox_intr(struct otx2_nic * pf,int numvfs)113 void cn20k_enable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
114 {
115 /* Clear PF <=> VF mailbox IRQ */
116 otx2_write64(pf, RVU_MBOX_PF_VFPF_INTX(0), ~0ull);
117 otx2_write64(pf, RVU_MBOX_PF_VFPF_INTX(1), ~0ull);
118 otx2_write64(pf, RVU_MBOX_PF_VFPF1_INTX(0), ~0ull);
119 otx2_write64(pf, RVU_MBOX_PF_VFPF1_INTX(1), ~0ull);
120
121 /* Enable PF <=> VF mailbox IRQ */
122 otx2_write64(pf, RVU_MBOX_PF_VFPF_INT_ENA_W1SX(0), INTR_MASK(numvfs));
123 otx2_write64(pf, RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(0), INTR_MASK(numvfs));
124 if (numvfs > 64) {
125 numvfs -= 64;
126 otx2_write64(pf, RVU_MBOX_PF_VFPF_INT_ENA_W1SX(1),
127 INTR_MASK(numvfs));
128 otx2_write64(pf, RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(1),
129 INTR_MASK(numvfs));
130 }
131 }
132
cn20k_disable_pfvf_mbox_intr(struct otx2_nic * pf,int numvfs)133 void cn20k_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
134 {
135 int vector, intr_vec, vec = 0;
136
137 /* Disable PF <=> VF mailbox IRQ */
138 otx2_write64(pf, RVU_MBOX_PF_VFPF_INT_ENA_W1CX(0), ~0ull);
139 otx2_write64(pf, RVU_MBOX_PF_VFPF_INT_ENA_W1CX(1), ~0ull);
140 otx2_write64(pf, RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(0), ~0ull);
141 otx2_write64(pf, RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(1), ~0ull);
142
143 otx2_write64(pf, RVU_MBOX_PF_VFPF_INTX(0), ~0ull);
144 otx2_write64(pf, RVU_MBOX_PF_VFPF1_INTX(0), ~0ull);
145
146 if (numvfs > 64) {
147 otx2_write64(pf, RVU_MBOX_PF_VFPF_INTX(1), ~0ull);
148 otx2_write64(pf, RVU_MBOX_PF_VFPF1_INTX(1), ~0ull);
149 }
150
151 for (intr_vec = RVU_MBOX_PF_INT_VEC_VFPF_MBOX0; intr_vec <=
152 RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1; intr_vec++, vec++) {
153 vector = pci_irq_vector(pf->pdev, intr_vec);
154 free_irq(vector, pf->hw.pfvf_irq_devid[vec]);
155 }
156 }
157
cn20k_pfvf_mbox_intr_handler(int irq,void * pf_irq)158 irqreturn_t cn20k_pfvf_mbox_intr_handler(int irq, void *pf_irq)
159 {
160 struct pf_irq_data *irq_data = pf_irq;
161 struct otx2_nic *pf = irq_data->pf;
162 struct mbox *mbox;
163 u64 intr;
164
165 /* Sync with mbox memory region */
166 rmb();
167
168 /* Clear interrupts */
169 intr = otx2_read64(pf, irq_data->intr_status);
170 otx2_write64(pf, irq_data->intr_status, intr);
171 mbox = pf->mbox_pfvf;
172
173 if (intr)
174 trace_otx2_msg_interrupt(pf->pdev, "VF(s) to PF", intr);
175
176 irq_data->pf_queue_work_hdlr(mbox, pf->mbox_pfvf_wq, irq_data->start,
177 irq_data->mdevs, intr);
178
179 return IRQ_HANDLED;
180 }
181
cn20k_register_pfvf_mbox_intr(struct otx2_nic * pf,int numvfs)182 int cn20k_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
183 {
184 struct otx2_hw *hw = &pf->hw;
185 struct pf_irq_data *irq_data;
186 int intr_vec, ret, vec = 0;
187 char *irq_name;
188
189 /* irq data for 4 PF intr vectors */
190 irq_data = devm_kcalloc(pf->dev, 4,
191 sizeof(struct pf_irq_data), GFP_KERNEL);
192 if (!irq_data)
193 return -ENOMEM;
194
195 for (intr_vec = RVU_MBOX_PF_INT_VEC_VFPF_MBOX0; intr_vec <=
196 RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1; intr_vec++, vec++) {
197 switch (intr_vec) {
198 case RVU_MBOX_PF_INT_VEC_VFPF_MBOX0:
199 irq_data[vec].intr_status =
200 RVU_MBOX_PF_VFPF_INTX(0);
201 irq_data[vec].start = 0;
202 irq_data[vec].mdevs = 64;
203 break;
204 case RVU_MBOX_PF_INT_VEC_VFPF_MBOX1:
205 irq_data[vec].intr_status =
206 RVU_MBOX_PF_VFPF_INTX(1);
207 irq_data[vec].start = 64;
208 irq_data[vec].mdevs = 96;
209 break;
210 case RVU_MBOX_PF_INT_VEC_VFPF1_MBOX0:
211 irq_data[vec].intr_status =
212 RVU_MBOX_PF_VFPF1_INTX(0);
213 irq_data[vec].start = 0;
214 irq_data[vec].mdevs = 64;
215 break;
216 case RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1:
217 irq_data[vec].intr_status =
218 RVU_MBOX_PF_VFPF1_INTX(1);
219 irq_data[vec].start = 64;
220 irq_data[vec].mdevs = 96;
221 break;
222 }
223 irq_data[vec].pf_queue_work_hdlr = otx2_queue_vf_work;
224 irq_data[vec].vec_num = intr_vec;
225 irq_data[vec].pf = pf;
226
227 /* Register mailbox interrupt handler */
228 irq_name = &hw->irq_name[intr_vec * NAME_SIZE];
229 if (pf->pcifunc)
230 snprintf(irq_name, NAME_SIZE,
231 "RVUPF%d_VF%d Mbox%d", rvu_get_pf(pf->pdev,
232 pf->pcifunc), vec / 2, vec % 2);
233 else
234 snprintf(irq_name, NAME_SIZE, "RVUPF_VF%d Mbox%d",
235 vec / 2, vec % 2);
236
237 hw->pfvf_irq_devid[vec] = &irq_data[vec];
238 ret = request_irq(pci_irq_vector(pf->pdev, intr_vec),
239 pf->hw_ops->pfvf_mbox_intr_handler, 0,
240 irq_name,
241 &irq_data[vec]);
242 if (ret) {
243 dev_err(pf->dev,
244 "RVUPF: IRQ registration failed for PFVF mbox0 irq\n");
245 return ret;
246 }
247 }
248
249 cn20k_enable_pfvf_mbox_intr(pf, numvfs);
250
251 return 0;
252 }
253