xref: /linux/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c (revision 06d07429858317ded2db7986113a9e0129cd599b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Marvell. */
3 
4 #include <linux/firmware.h>
5 #include "otx2_cpt_hw_types.h"
6 #include "otx2_cpt_common.h"
7 #include "otx2_cpt_devlink.h"
8 #include "otx2_cptpf_ucode.h"
9 #include "otx2_cptpf.h"
10 #include "cn10k_cpt.h"
11 #include "rvu_reg.h"
12 
13 #define OTX2_CPT_DRV_NAME    "rvu_cptpf"
14 #define OTX2_CPT_DRV_STRING  "Marvell RVU CPT Physical Function Driver"
15 
16 #define CPT_UC_RID_CN9K_B0   1
17 #define CPT_UC_RID_CN10K_A   4
18 #define CPT_UC_RID_CN10K_B   5
19 
cptpf_enable_vfpf_mbox_intr(struct otx2_cptpf_dev * cptpf,int num_vfs)20 static void cptpf_enable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
21 					int num_vfs)
22 {
23 	int ena_bits;
24 
25 	/* Clear any pending interrupts */
26 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
27 			 RVU_PF_VFPF_MBOX_INTX(0), ~0x0ULL);
28 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
29 			 RVU_PF_VFPF_MBOX_INTX(1), ~0x0ULL);
30 
31 	/* Enable VF interrupts for VFs from 0 to 63 */
32 	ena_bits = ((num_vfs - 1) % 64);
33 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
34 			 RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0),
35 			 GENMASK_ULL(ena_bits, 0));
36 
37 	if (num_vfs > 64) {
38 		/* Enable VF interrupts for VFs from 64 to 127 */
39 		ena_bits = num_vfs - 64 - 1;
40 		otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
41 				RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
42 				GENMASK_ULL(ena_bits, 0));
43 	}
44 }
45 
cptpf_disable_vfpf_mbox_intr(struct otx2_cptpf_dev * cptpf,int num_vfs)46 static void cptpf_disable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
47 					 int num_vfs)
48 {
49 	int vector;
50 
51 	/* Disable VF-PF interrupts */
52 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
53 			 RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ULL);
54 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
55 			 RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ULL);
56 	/* Clear any pending interrupts */
57 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
58 			 RVU_PF_VFPF_MBOX_INTX(0), ~0ULL);
59 
60 	vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
61 	free_irq(vector, cptpf);
62 
63 	if (num_vfs > 64) {
64 		otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
65 				 RVU_PF_VFPF_MBOX_INTX(1), ~0ULL);
66 		vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
67 		free_irq(vector, cptpf);
68 	}
69 }
70 
cptpf_enable_vf_flr_me_intrs(struct otx2_cptpf_dev * cptpf,int num_vfs)71 static void cptpf_enable_vf_flr_me_intrs(struct otx2_cptpf_dev *cptpf,
72 					 int num_vfs)
73 {
74 	/* Clear FLR interrupt if any */
75 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(0),
76 			 INTR_MASK(num_vfs));
77 
78 	/* Enable VF FLR interrupts */
79 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
80 			 RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(num_vfs));
81 	/* Clear ME interrupt if any */
82 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFME_INTX(0),
83 			 INTR_MASK(num_vfs));
84 	/* Enable VF ME interrupts */
85 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
86 			 RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(num_vfs));
87 
88 	if (num_vfs <= 64)
89 		return;
90 
91 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1),
92 			 INTR_MASK(num_vfs - 64));
93 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
94 			 RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(num_vfs - 64));
95 
96 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFME_INTX(1),
97 			 INTR_MASK(num_vfs - 64));
98 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
99 			 RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(num_vfs - 64));
100 }
101 
cptpf_disable_vf_flr_me_intrs(struct otx2_cptpf_dev * cptpf,int num_vfs)102 static void cptpf_disable_vf_flr_me_intrs(struct otx2_cptpf_dev *cptpf,
103 				       int num_vfs)
104 {
105 	int vector;
106 
107 	/* Disable VF FLR interrupts */
108 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
109 			 RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(num_vfs));
110 	vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR0);
111 	free_irq(vector, cptpf);
112 
113 	/* Disable VF ME interrupts */
114 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
115 			 RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(num_vfs));
116 	vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFME0);
117 	free_irq(vector, cptpf);
118 
119 	if (num_vfs <= 64)
120 		return;
121 
122 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
123 			 RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(num_vfs - 64));
124 	vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR1);
125 	free_irq(vector, cptpf);
126 
127 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
128 			 RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(num_vfs - 64));
129 	vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFME1);
130 	free_irq(vector, cptpf);
131 }
132 
cptpf_flr_wq_handler(struct work_struct * work)133 static void cptpf_flr_wq_handler(struct work_struct *work)
134 {
135 	struct cptpf_flr_work *flr_work;
136 	struct otx2_cptpf_dev *pf;
137 	struct mbox_msghdr *req;
138 	struct otx2_mbox *mbox;
139 	int vf, reg = 0;
140 
141 	flr_work = container_of(work, struct cptpf_flr_work, work);
142 	pf = flr_work->pf;
143 	mbox = &pf->afpf_mbox;
144 
145 	vf = flr_work - pf->flr_work;
146 
147 	mutex_lock(&pf->lock);
148 	req = otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
149 				      sizeof(struct msg_rsp));
150 	if (!req) {
151 		mutex_unlock(&pf->lock);
152 		return;
153 	}
154 
155 	req->sig = OTX2_MBOX_REQ_SIG;
156 	req->id = MBOX_MSG_VF_FLR;
157 	req->pcifunc &= RVU_PFVF_FUNC_MASK;
158 	req->pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
159 
160 	otx2_cpt_send_mbox_msg(mbox, pf->pdev);
161 	if (!otx2_cpt_sync_mbox_msg(&pf->afpf_mbox)) {
162 
163 		if (vf >= 64) {
164 			reg = 1;
165 			vf = vf - 64;
166 		}
167 		/* Clear transaction pending register */
168 		otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
169 				 RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
170 		otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
171 				 RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
172 	}
173 	mutex_unlock(&pf->lock);
174 }
175 
cptpf_vf_flr_intr(int __always_unused irq,void * arg)176 static irqreturn_t cptpf_vf_flr_intr(int __always_unused irq, void *arg)
177 {
178 	int reg, dev, vf, start_vf, num_reg = 1;
179 	struct otx2_cptpf_dev *cptpf = arg;
180 	u64 intr;
181 
182 	if (cptpf->max_vfs > 64)
183 		num_reg = 2;
184 
185 	for (reg = 0; reg < num_reg; reg++) {
186 		intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
187 				       RVU_PF_VFFLR_INTX(reg));
188 		if (!intr)
189 			continue;
190 		start_vf = 64 * reg;
191 		for (vf = 0; vf < 64; vf++) {
192 			if (!(intr & BIT_ULL(vf)))
193 				continue;
194 			dev = vf + start_vf;
195 			queue_work(cptpf->flr_wq, &cptpf->flr_work[dev].work);
196 			/* Clear interrupt */
197 			otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
198 					 RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
199 			/* Disable the interrupt */
200 			otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
201 					 RVU_PF_VFFLR_INT_ENA_W1CX(reg),
202 					 BIT_ULL(vf));
203 		}
204 	}
205 	return IRQ_HANDLED;
206 }
207 
cptpf_vf_me_intr(int __always_unused irq,void * arg)208 static irqreturn_t cptpf_vf_me_intr(int __always_unused irq, void *arg)
209 {
210 	struct otx2_cptpf_dev *cptpf = arg;
211 	int reg, vf, num_reg = 1;
212 	u64 intr;
213 
214 	if (cptpf->max_vfs > 64)
215 		num_reg = 2;
216 
217 	for (reg = 0; reg < num_reg; reg++) {
218 		intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
219 				       RVU_PF_VFME_INTX(reg));
220 		if (!intr)
221 			continue;
222 		for (vf = 0; vf < 64; vf++) {
223 			if (!(intr & BIT_ULL(vf)))
224 				continue;
225 			otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
226 					 RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
227 			/* Clear interrupt */
228 			otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
229 					 RVU_PF_VFME_INTX(reg), BIT_ULL(vf));
230 		}
231 	}
232 	return IRQ_HANDLED;
233 }
234 
cptpf_unregister_vfpf_intr(struct otx2_cptpf_dev * cptpf,int num_vfs)235 static void cptpf_unregister_vfpf_intr(struct otx2_cptpf_dev *cptpf,
236 				       int num_vfs)
237 {
238 	cptpf_disable_vfpf_mbox_intr(cptpf, num_vfs);
239 	cptpf_disable_vf_flr_me_intrs(cptpf, num_vfs);
240 }
241 
cptpf_register_vfpf_intr(struct otx2_cptpf_dev * cptpf,int num_vfs)242 static int cptpf_register_vfpf_intr(struct otx2_cptpf_dev *cptpf, int num_vfs)
243 {
244 	struct pci_dev *pdev = cptpf->pdev;
245 	struct device *dev = &pdev->dev;
246 	int ret, vector;
247 
248 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
249 	/* Register VF-PF mailbox interrupt handler */
250 	ret = request_irq(vector, otx2_cptpf_vfpf_mbox_intr, 0, "CPTVFPF Mbox0",
251 			  cptpf);
252 	if (ret) {
253 		dev_err(dev,
254 			"IRQ registration failed for PFVF mbox0 irq\n");
255 		return ret;
256 	}
257 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR0);
258 	/* Register VF FLR interrupt handler */
259 	ret = request_irq(vector, cptpf_vf_flr_intr, 0, "CPTPF FLR0", cptpf);
260 	if (ret) {
261 		dev_err(dev,
262 			"IRQ registration failed for VFFLR0 irq\n");
263 		goto free_mbox0_irq;
264 	}
265 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME0);
266 	/* Register VF ME interrupt handler */
267 	ret = request_irq(vector, cptpf_vf_me_intr, 0, "CPTPF ME0", cptpf);
268 	if (ret) {
269 		dev_err(dev,
270 			"IRQ registration failed for PFVF mbox0 irq\n");
271 		goto free_flr0_irq;
272 	}
273 
274 	if (num_vfs > 64) {
275 		vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
276 		ret = request_irq(vector, otx2_cptpf_vfpf_mbox_intr, 0,
277 				  "CPTVFPF Mbox1", cptpf);
278 		if (ret) {
279 			dev_err(dev,
280 				"IRQ registration failed for PFVF mbox1 irq\n");
281 			goto free_me0_irq;
282 		}
283 		vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR1);
284 		/* Register VF FLR interrupt handler */
285 		ret = request_irq(vector, cptpf_vf_flr_intr, 0, "CPTPF FLR1",
286 				  cptpf);
287 		if (ret) {
288 			dev_err(dev,
289 				"IRQ registration failed for VFFLR1 irq\n");
290 			goto free_mbox1_irq;
291 		}
292 		vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME1);
293 		/* Register VF FLR interrupt handler */
294 		ret = request_irq(vector, cptpf_vf_me_intr, 0, "CPTPF ME1",
295 				  cptpf);
296 		if (ret) {
297 			dev_err(dev,
298 				"IRQ registration failed for VFFLR1 irq\n");
299 			goto free_flr1_irq;
300 		}
301 	}
302 	cptpf_enable_vfpf_mbox_intr(cptpf, num_vfs);
303 	cptpf_enable_vf_flr_me_intrs(cptpf, num_vfs);
304 
305 	return 0;
306 
307 free_flr1_irq:
308 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR1);
309 	free_irq(vector, cptpf);
310 free_mbox1_irq:
311 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
312 	free_irq(vector, cptpf);
313 free_me0_irq:
314 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME0);
315 	free_irq(vector, cptpf);
316 free_flr0_irq:
317 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR0);
318 	free_irq(vector, cptpf);
319 free_mbox0_irq:
320 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
321 	free_irq(vector, cptpf);
322 	return ret;
323 }
324 
cptpf_flr_wq_destroy(struct otx2_cptpf_dev * pf)325 static void cptpf_flr_wq_destroy(struct otx2_cptpf_dev *pf)
326 {
327 	if (!pf->flr_wq)
328 		return;
329 	destroy_workqueue(pf->flr_wq);
330 	pf->flr_wq = NULL;
331 	kfree(pf->flr_work);
332 }
333 
cptpf_flr_wq_init(struct otx2_cptpf_dev * cptpf,int num_vfs)334 static int cptpf_flr_wq_init(struct otx2_cptpf_dev *cptpf, int num_vfs)
335 {
336 	int vf;
337 
338 	cptpf->flr_wq = alloc_ordered_workqueue("cptpf_flr_wq", 0);
339 	if (!cptpf->flr_wq)
340 		return -ENOMEM;
341 
342 	cptpf->flr_work = kcalloc(num_vfs, sizeof(struct cptpf_flr_work),
343 				  GFP_KERNEL);
344 	if (!cptpf->flr_work)
345 		goto destroy_wq;
346 
347 	for (vf = 0; vf < num_vfs; vf++) {
348 		cptpf->flr_work[vf].pf = cptpf;
349 		INIT_WORK(&cptpf->flr_work[vf].work, cptpf_flr_wq_handler);
350 	}
351 	return 0;
352 
353 destroy_wq:
354 	destroy_workqueue(cptpf->flr_wq);
355 	return -ENOMEM;
356 }
357 
cptpf_vfpf_mbox_init(struct otx2_cptpf_dev * cptpf,int num_vfs)358 static int cptpf_vfpf_mbox_init(struct otx2_cptpf_dev *cptpf, int num_vfs)
359 {
360 	struct device *dev = &cptpf->pdev->dev;
361 	u64 vfpf_mbox_base;
362 	int err, i;
363 
364 	cptpf->vfpf_mbox_wq =
365 		alloc_ordered_workqueue("cpt_vfpf_mailbox",
366 					WQ_HIGHPRI | WQ_MEM_RECLAIM);
367 	if (!cptpf->vfpf_mbox_wq)
368 		return -ENOMEM;
369 
370 	/* Map VF-PF mailbox memory */
371 	if (test_bit(CN10K_MBOX, &cptpf->cap_flag))
372 		vfpf_mbox_base = readq(cptpf->reg_base + RVU_PF_VF_MBOX_ADDR);
373 	else
374 		vfpf_mbox_base = readq(cptpf->reg_base + RVU_PF_VF_BAR4_ADDR);
375 
376 	if (!vfpf_mbox_base) {
377 		dev_err(dev, "VF-PF mailbox address not configured\n");
378 		err = -ENOMEM;
379 		goto free_wqe;
380 	}
381 	cptpf->vfpf_mbox_base = devm_ioremap_wc(dev, vfpf_mbox_base,
382 						MBOX_SIZE * cptpf->max_vfs);
383 	if (!cptpf->vfpf_mbox_base) {
384 		dev_err(dev, "Mapping of VF-PF mailbox address failed\n");
385 		err = -ENOMEM;
386 		goto free_wqe;
387 	}
388 	err = otx2_mbox_init(&cptpf->vfpf_mbox, cptpf->vfpf_mbox_base,
389 			     cptpf->pdev, cptpf->reg_base, MBOX_DIR_PFVF,
390 			     num_vfs);
391 	if (err)
392 		goto free_wqe;
393 
394 	for (i = 0; i < num_vfs; i++) {
395 		cptpf->vf[i].vf_id = i;
396 		cptpf->vf[i].cptpf = cptpf;
397 		cptpf->vf[i].intr_idx = i % 64;
398 		INIT_WORK(&cptpf->vf[i].vfpf_mbox_work,
399 			  otx2_cptpf_vfpf_mbox_handler);
400 	}
401 	return 0;
402 
403 free_wqe:
404 	destroy_workqueue(cptpf->vfpf_mbox_wq);
405 	return err;
406 }
407 
cptpf_vfpf_mbox_destroy(struct otx2_cptpf_dev * cptpf)408 static void cptpf_vfpf_mbox_destroy(struct otx2_cptpf_dev *cptpf)
409 {
410 	destroy_workqueue(cptpf->vfpf_mbox_wq);
411 	otx2_mbox_destroy(&cptpf->vfpf_mbox);
412 }
413 
cptpf_disable_afpf_mbox_intr(struct otx2_cptpf_dev * cptpf)414 static void cptpf_disable_afpf_mbox_intr(struct otx2_cptpf_dev *cptpf)
415 {
416 	/* Disable AF-PF interrupt */
417 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1C,
418 			 0x1ULL);
419 	/* Clear interrupt if any */
420 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
421 }
422 
cptpf_register_afpf_mbox_intr(struct otx2_cptpf_dev * cptpf)423 static int cptpf_register_afpf_mbox_intr(struct otx2_cptpf_dev *cptpf)
424 {
425 	struct pci_dev *pdev = cptpf->pdev;
426 	struct device *dev = &pdev->dev;
427 	int ret, irq;
428 
429 	irq = pci_irq_vector(pdev, RVU_PF_INT_VEC_AFPF_MBOX);
430 	/* Register AF-PF mailbox interrupt handler */
431 	ret = devm_request_irq(dev, irq, otx2_cptpf_afpf_mbox_intr, 0,
432 			       "CPTAFPF Mbox", cptpf);
433 	if (ret) {
434 		dev_err(dev,
435 			"IRQ registration failed for PFAF mbox irq\n");
436 		return ret;
437 	}
438 	/* Clear interrupt if any, to avoid spurious interrupts */
439 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
440 	/* Enable AF-PF interrupt */
441 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1S,
442 			 0x1ULL);
443 
444 	ret = otx2_cpt_send_ready_msg(&cptpf->afpf_mbox, cptpf->pdev);
445 	if (ret) {
446 		dev_warn(dev,
447 			 "AF not responding to mailbox, deferring probe\n");
448 		cptpf_disable_afpf_mbox_intr(cptpf);
449 		return -EPROBE_DEFER;
450 	}
451 	return 0;
452 }
453 
cptpf_afpf_mbox_init(struct otx2_cptpf_dev * cptpf)454 static int cptpf_afpf_mbox_init(struct otx2_cptpf_dev *cptpf)
455 {
456 	struct pci_dev *pdev = cptpf->pdev;
457 	resource_size_t offset;
458 	int err;
459 
460 	cptpf->afpf_mbox_wq =
461 		alloc_ordered_workqueue("cpt_afpf_mailbox",
462 					WQ_HIGHPRI | WQ_MEM_RECLAIM);
463 	if (!cptpf->afpf_mbox_wq)
464 		return -ENOMEM;
465 
466 	offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
467 	/* Map AF-PF mailbox memory */
468 	cptpf->afpf_mbox_base = devm_ioremap_wc(&pdev->dev, offset, MBOX_SIZE);
469 	if (!cptpf->afpf_mbox_base) {
470 		dev_err(&pdev->dev, "Unable to map BAR4\n");
471 		err = -ENOMEM;
472 		goto error;
473 	}
474 
475 	err = otx2_mbox_init(&cptpf->afpf_mbox, cptpf->afpf_mbox_base,
476 			     pdev, cptpf->reg_base, MBOX_DIR_PFAF, 1);
477 	if (err)
478 		goto error;
479 
480 	err = otx2_mbox_init(&cptpf->afpf_mbox_up, cptpf->afpf_mbox_base,
481 			     pdev, cptpf->reg_base, MBOX_DIR_PFAF_UP, 1);
482 	if (err)
483 		goto mbox_cleanup;
484 
485 	INIT_WORK(&cptpf->afpf_mbox_work, otx2_cptpf_afpf_mbox_handler);
486 	INIT_WORK(&cptpf->afpf_mbox_up_work, otx2_cptpf_afpf_mbox_up_handler);
487 	mutex_init(&cptpf->lock);
488 
489 	return 0;
490 
491 mbox_cleanup:
492 	otx2_mbox_destroy(&cptpf->afpf_mbox);
493 error:
494 	destroy_workqueue(cptpf->afpf_mbox_wq);
495 	return err;
496 }
497 
cptpf_afpf_mbox_destroy(struct otx2_cptpf_dev * cptpf)498 static void cptpf_afpf_mbox_destroy(struct otx2_cptpf_dev *cptpf)
499 {
500 	destroy_workqueue(cptpf->afpf_mbox_wq);
501 	otx2_mbox_destroy(&cptpf->afpf_mbox);
502 	otx2_mbox_destroy(&cptpf->afpf_mbox_up);
503 }
504 
sso_pf_func_ovrd_show(struct device * dev,struct device_attribute * attr,char * buf)505 static ssize_t sso_pf_func_ovrd_show(struct device *dev,
506 				     struct device_attribute *attr, char *buf)
507 {
508 	struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
509 
510 	return sprintf(buf, "%d\n", cptpf->sso_pf_func_ovrd);
511 }
512 
sso_pf_func_ovrd_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)513 static ssize_t sso_pf_func_ovrd_store(struct device *dev,
514 				      struct device_attribute *attr,
515 				      const char *buf, size_t count)
516 {
517 	struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
518 	u8 sso_pf_func_ovrd;
519 
520 	if (!(cptpf->pdev->revision == CPT_UC_RID_CN9K_B0))
521 		return count;
522 
523 	if (kstrtou8(buf, 0, &sso_pf_func_ovrd))
524 		return -EINVAL;
525 
526 	cptpf->sso_pf_func_ovrd = sso_pf_func_ovrd;
527 
528 	return count;
529 }
530 
kvf_limits_show(struct device * dev,struct device_attribute * attr,char * buf)531 static ssize_t kvf_limits_show(struct device *dev,
532 			       struct device_attribute *attr, char *buf)
533 {
534 	struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
535 
536 	return sprintf(buf, "%d\n", cptpf->kvf_limits);
537 }
538 
kvf_limits_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)539 static ssize_t kvf_limits_store(struct device *dev,
540 				struct device_attribute *attr,
541 				const char *buf, size_t count)
542 {
543 	struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
544 	int lfs_num;
545 	int ret;
546 
547 	ret = kstrtoint(buf, 0, &lfs_num);
548 	if (ret)
549 		return ret;
550 	if (lfs_num < 1 || lfs_num > num_online_cpus()) {
551 		dev_err(dev, "lfs count %d must be in range [1 - %d]\n",
552 			lfs_num, num_online_cpus());
553 		return -EINVAL;
554 	}
555 	cptpf->kvf_limits = lfs_num;
556 
557 	return count;
558 }
559 
560 static DEVICE_ATTR_RW(kvf_limits);
561 static DEVICE_ATTR_RW(sso_pf_func_ovrd);
562 
563 static struct attribute *cptpf_attrs[] = {
564 	&dev_attr_kvf_limits.attr,
565 	&dev_attr_sso_pf_func_ovrd.attr,
566 	NULL
567 };
568 
569 static const struct attribute_group cptpf_sysfs_group = {
570 	.attrs = cptpf_attrs,
571 };
572 
cpt_is_pf_usable(struct otx2_cptpf_dev * cptpf)573 static int cpt_is_pf_usable(struct otx2_cptpf_dev *cptpf)
574 {
575 	u64 rev;
576 
577 	rev = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
578 			      RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
579 	rev = (rev >> 12) & 0xFF;
580 	/*
581 	 * Check if AF has setup revision for RVUM block, otherwise
582 	 * driver probe should be deferred until AF driver comes up
583 	 */
584 	if (!rev) {
585 		dev_warn(&cptpf->pdev->dev,
586 			 "AF is not initialized, deferring probe\n");
587 		return -EPROBE_DEFER;
588 	}
589 	return 0;
590 }
591 
cptpf_get_rid(struct pci_dev * pdev,struct otx2_cptpf_dev * cptpf)592 static void cptpf_get_rid(struct pci_dev *pdev, struct otx2_cptpf_dev *cptpf)
593 {
594 	struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;
595 	u64 reg_val = 0x0;
596 
597 	if (is_dev_otx2(pdev)) {
598 		eng_grps->rid = pdev->revision;
599 		return;
600 	}
601 	otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL, &reg_val,
602 			     BLKADDR_CPT0);
603 	if ((cpt_feature_sgv2(pdev) && (reg_val & BIT_ULL(18))) ||
604 	    is_dev_cn10ka_ax(pdev))
605 		eng_grps->rid = CPT_UC_RID_CN10K_A;
606 	else if (cpt_feature_sgv2(pdev))
607 		eng_grps->rid = CPT_UC_RID_CN10K_B;
608 }
609 
cptpf_check_block_implemented(struct otx2_cptpf_dev * cptpf)610 static void cptpf_check_block_implemented(struct otx2_cptpf_dev *cptpf)
611 {
612 	u64 cfg;
613 
614 	cfg = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
615 			      RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_CPT1));
616 	if (cfg & BIT_ULL(11))
617 		cptpf->has_cpt1 = true;
618 }
619 
cptpf_device_init(struct otx2_cptpf_dev * cptpf)620 static int cptpf_device_init(struct otx2_cptpf_dev *cptpf)
621 {
622 	union otx2_cptx_af_constants1 af_cnsts1 = {0};
623 	int ret = 0;
624 
625 	/* check if 'implemented' bit is set for block BLKADDR_CPT1 */
626 	cptpf_check_block_implemented(cptpf);
627 
628 	/* Get number of SE, IE and AE engines */
629 	ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
630 				   CPT_AF_CONSTANTS1, &af_cnsts1.u,
631 				   BLKADDR_CPT0);
632 	if (ret)
633 		return ret;
634 
635 	cptpf->eng_grps.avail.max_se_cnt = af_cnsts1.s.se;
636 	cptpf->eng_grps.avail.max_ie_cnt = af_cnsts1.s.ie;
637 	cptpf->eng_grps.avail.max_ae_cnt = af_cnsts1.s.ae;
638 
639 	/* Disable all cores */
640 	ret = otx2_cpt_disable_all_cores(cptpf);
641 
642 	return ret;
643 }
644 
cptpf_sriov_disable(struct pci_dev * pdev)645 static int cptpf_sriov_disable(struct pci_dev *pdev)
646 {
647 	struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
648 	int num_vfs = pci_num_vf(pdev);
649 
650 	if (!num_vfs)
651 		return 0;
652 
653 	pci_disable_sriov(pdev);
654 	cptpf_unregister_vfpf_intr(cptpf, num_vfs);
655 	cptpf_flr_wq_destroy(cptpf);
656 	cptpf_vfpf_mbox_destroy(cptpf);
657 	module_put(THIS_MODULE);
658 	cptpf->enabled_vfs = 0;
659 
660 	return 0;
661 }
662 
cptpf_sriov_enable(struct pci_dev * pdev,int num_vfs)663 static int cptpf_sriov_enable(struct pci_dev *pdev, int num_vfs)
664 {
665 	struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
666 	int ret;
667 
668 	/* Initialize VF<=>PF mailbox */
669 	ret = cptpf_vfpf_mbox_init(cptpf, num_vfs);
670 	if (ret)
671 		return ret;
672 
673 	ret = cptpf_flr_wq_init(cptpf, num_vfs);
674 	if (ret)
675 		goto destroy_mbox;
676 	/* Register VF<=>PF mailbox interrupt */
677 	ret = cptpf_register_vfpf_intr(cptpf, num_vfs);
678 	if (ret)
679 		goto destroy_flr;
680 
681 	cptpf_get_rid(pdev, cptpf);
682 	/* Get CPT HW capabilities using LOAD_FVC operation. */
683 	ret = otx2_cpt_discover_eng_capabilities(cptpf);
684 	if (ret)
685 		goto disable_intr;
686 
687 	ret = otx2_cpt_create_eng_grps(cptpf, &cptpf->eng_grps);
688 	if (ret)
689 		goto disable_intr;
690 
691 	cptpf->enabled_vfs = num_vfs;
692 	ret = pci_enable_sriov(pdev, num_vfs);
693 	if (ret)
694 		goto disable_intr;
695 
696 	dev_notice(&cptpf->pdev->dev, "VFs enabled: %d\n", num_vfs);
697 
698 	try_module_get(THIS_MODULE);
699 	return num_vfs;
700 
701 disable_intr:
702 	cptpf_unregister_vfpf_intr(cptpf, num_vfs);
703 	cptpf->enabled_vfs = 0;
704 destroy_flr:
705 	cptpf_flr_wq_destroy(cptpf);
706 destroy_mbox:
707 	cptpf_vfpf_mbox_destroy(cptpf);
708 	return ret;
709 }
710 
otx2_cptpf_sriov_configure(struct pci_dev * pdev,int num_vfs)711 static int otx2_cptpf_sriov_configure(struct pci_dev *pdev, int num_vfs)
712 {
713 	if (num_vfs > 0) {
714 		return cptpf_sriov_enable(pdev, num_vfs);
715 	} else {
716 		return cptpf_sriov_disable(pdev);
717 	}
718 }
719 
otx2_cptpf_probe(struct pci_dev * pdev,const struct pci_device_id * ent)720 static int otx2_cptpf_probe(struct pci_dev *pdev,
721 			    const struct pci_device_id *ent)
722 {
723 	struct device *dev = &pdev->dev;
724 	struct otx2_cptpf_dev *cptpf;
725 	int err, num_vec;
726 
727 	cptpf = devm_kzalloc(dev, sizeof(*cptpf), GFP_KERNEL);
728 	if (!cptpf)
729 		return -ENOMEM;
730 
731 	err = pcim_enable_device(pdev);
732 	if (err) {
733 		dev_err(dev, "Failed to enable PCI device\n");
734 		goto clear_drvdata;
735 	}
736 
737 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
738 	if (err) {
739 		dev_err(dev, "Unable to get usable DMA configuration\n");
740 		goto clear_drvdata;
741 	}
742 	/* Map PF's configuration registers */
743 	err = pcim_iomap_regions_request_all(pdev, 1 << PCI_PF_REG_BAR_NUM,
744 					     OTX2_CPT_DRV_NAME);
745 	if (err) {
746 		dev_err(dev, "Couldn't get PCI resources 0x%x\n", err);
747 		goto clear_drvdata;
748 	}
749 	pci_set_master(pdev);
750 	pci_set_drvdata(pdev, cptpf);
751 	cptpf->pdev = pdev;
752 
753 	cptpf->reg_base = pcim_iomap_table(pdev)[PCI_PF_REG_BAR_NUM];
754 
755 	/* Check if AF driver is up, otherwise defer probe */
756 	err = cpt_is_pf_usable(cptpf);
757 	if (err)
758 		goto clear_drvdata;
759 
760 	num_vec = pci_msix_vec_count(cptpf->pdev);
761 	if (num_vec <= 0) {
762 		err = -EINVAL;
763 		goto clear_drvdata;
764 	}
765 
766 	err = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSIX);
767 	if (err < 0) {
768 		dev_err(dev, "Request for %d msix vectors failed\n",
769 			RVU_PF_INT_VEC_CNT);
770 		goto clear_drvdata;
771 	}
772 	otx2_cpt_set_hw_caps(pdev, &cptpf->cap_flag);
773 	/* Initialize AF-PF mailbox */
774 	err = cptpf_afpf_mbox_init(cptpf);
775 	if (err)
776 		goto clear_drvdata;
777 	/* Register mailbox interrupt */
778 	err = cptpf_register_afpf_mbox_intr(cptpf);
779 	if (err)
780 		goto destroy_afpf_mbox;
781 
782 	cptpf->max_vfs = pci_sriov_get_totalvfs(pdev);
783 	cptpf->kvf_limits = 1;
784 
785 	err = cn10k_cptpf_lmtst_init(cptpf);
786 	if (err)
787 		goto unregister_intr;
788 
789 	/* Initialize CPT PF device */
790 	err = cptpf_device_init(cptpf);
791 	if (err)
792 		goto unregister_intr;
793 
794 	/* Initialize engine groups */
795 	err = otx2_cpt_init_eng_grps(pdev, &cptpf->eng_grps);
796 	if (err)
797 		goto unregister_intr;
798 
799 	err = sysfs_create_group(&dev->kobj, &cptpf_sysfs_group);
800 	if (err)
801 		goto cleanup_eng_grps;
802 
803 	err = otx2_cpt_register_dl(cptpf);
804 	if (err)
805 		goto sysfs_grp_del;
806 
807 	return 0;
808 
809 sysfs_grp_del:
810 	sysfs_remove_group(&dev->kobj, &cptpf_sysfs_group);
811 cleanup_eng_grps:
812 	otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
813 unregister_intr:
814 	cptpf_disable_afpf_mbox_intr(cptpf);
815 destroy_afpf_mbox:
816 	cptpf_afpf_mbox_destroy(cptpf);
817 clear_drvdata:
818 	pci_set_drvdata(pdev, NULL);
819 	return err;
820 }
821 
otx2_cptpf_remove(struct pci_dev * pdev)822 static void otx2_cptpf_remove(struct pci_dev *pdev)
823 {
824 	struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
825 
826 	if (!cptpf)
827 		return;
828 
829 	cptpf_sriov_disable(pdev);
830 	otx2_cpt_unregister_dl(cptpf);
831 
832 	/* Cleanup Inline CPT LF's if attached */
833 	if (cptpf->lfs.lfs_num)
834 		otx2_inline_cptlf_cleanup(&cptpf->lfs);
835 
836 	if (cptpf->cpt1_lfs.lfs_num)
837 		otx2_inline_cptlf_cleanup(&cptpf->cpt1_lfs);
838 
839 	/* Delete sysfs entry created for kernel VF limits */
840 	sysfs_remove_group(&pdev->dev.kobj, &cptpf_sysfs_group);
841 	/* Cleanup engine groups */
842 	otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
843 	/* Disable AF-PF mailbox interrupt */
844 	cptpf_disable_afpf_mbox_intr(cptpf);
845 	/* Destroy AF-PF mbox */
846 	cptpf_afpf_mbox_destroy(cptpf);
847 	pci_set_drvdata(pdev, NULL);
848 }
849 
850 /* Supported devices */
851 static const struct pci_device_id otx2_cpt_id_table[] = {
852 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OTX2_CPT_PCI_PF_DEVICE_ID) },
853 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, CN10K_CPT_PCI_PF_DEVICE_ID) },
854 	{ 0, }  /* end of table */
855 };
856 
857 static struct pci_driver otx2_cpt_pci_driver = {
858 	.name = OTX2_CPT_DRV_NAME,
859 	.id_table = otx2_cpt_id_table,
860 	.probe = otx2_cptpf_probe,
861 	.remove = otx2_cptpf_remove,
862 	.sriov_configure = otx2_cptpf_sriov_configure
863 };
864 
865 module_pci_driver(otx2_cpt_pci_driver);
866 
867 MODULE_IMPORT_NS(CRYPTO_DEV_OCTEONTX2_CPT);
868 
869 MODULE_AUTHOR("Marvell");
870 MODULE_DESCRIPTION(OTX2_CPT_DRV_STRING);
871 MODULE_LICENSE("GPL v2");
872 MODULE_DEVICE_TABLE(pci, otx2_cpt_id_table);
873