xref: /linux/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c (revision 64b14a184e83eb62ea0615e31a409956049d40e7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Marvell. */
3 
4 #include <linux/firmware.h>
5 #include "otx2_cpt_hw_types.h"
6 #include "otx2_cpt_common.h"
7 #include "otx2_cpt_devlink.h"
8 #include "otx2_cptpf_ucode.h"
9 #include "otx2_cptpf.h"
10 #include "cn10k_cpt.h"
11 #include "rvu_reg.h"
12 
13 #define OTX2_CPT_DRV_NAME    "rvu_cptpf"
14 #define OTX2_CPT_DRV_STRING  "Marvell RVU CPT Physical Function Driver"
15 
16 static void cptpf_enable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
17 					int num_vfs)
18 {
19 	int ena_bits;
20 
21 	/* Clear any pending interrupts */
22 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
23 			 RVU_PF_VFPF_MBOX_INTX(0), ~0x0ULL);
24 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
25 			 RVU_PF_VFPF_MBOX_INTX(1), ~0x0ULL);
26 
27 	/* Enable VF interrupts for VFs from 0 to 63 */
28 	ena_bits = ((num_vfs - 1) % 64);
29 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
30 			 RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0),
31 			 GENMASK_ULL(ena_bits, 0));
32 
33 	if (num_vfs > 64) {
34 		/* Enable VF interrupts for VFs from 64 to 127 */
35 		ena_bits = num_vfs - 64 - 1;
36 		otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
37 				RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
38 				GENMASK_ULL(ena_bits, 0));
39 	}
40 }
41 
42 static void cptpf_disable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
43 					 int num_vfs)
44 {
45 	int vector;
46 
47 	/* Disable VF-PF interrupts */
48 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
49 			 RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ULL);
50 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
51 			 RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ULL);
52 	/* Clear any pending interrupts */
53 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
54 			 RVU_PF_VFPF_MBOX_INTX(0), ~0ULL);
55 
56 	vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
57 	free_irq(vector, cptpf);
58 
59 	if (num_vfs > 64) {
60 		otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
61 				 RVU_PF_VFPF_MBOX_INTX(1), ~0ULL);
62 		vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
63 		free_irq(vector, cptpf);
64 	}
65 }
66 
67 static void cptpf_enable_vf_flr_me_intrs(struct otx2_cptpf_dev *cptpf,
68 					 int num_vfs)
69 {
70 	/* Clear FLR interrupt if any */
71 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(0),
72 			 INTR_MASK(num_vfs));
73 
74 	/* Enable VF FLR interrupts */
75 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
76 			 RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(num_vfs));
77 	/* Clear ME interrupt if any */
78 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFME_INTX(0),
79 			 INTR_MASK(num_vfs));
80 	/* Enable VF ME interrupts */
81 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
82 			 RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(num_vfs));
83 
84 	if (num_vfs <= 64)
85 		return;
86 
87 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1),
88 			 INTR_MASK(num_vfs - 64));
89 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
90 			 RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(num_vfs - 64));
91 
92 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFME_INTX(1),
93 			 INTR_MASK(num_vfs - 64));
94 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
95 			 RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(num_vfs - 64));
96 }
97 
98 static void cptpf_disable_vf_flr_me_intrs(struct otx2_cptpf_dev *cptpf,
99 				       int num_vfs)
100 {
101 	int vector;
102 
103 	/* Disable VF FLR interrupts */
104 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
105 			 RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(num_vfs));
106 	vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR0);
107 	free_irq(vector, cptpf);
108 
109 	/* Disable VF ME interrupts */
110 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
111 			 RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(num_vfs));
112 	vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFME0);
113 	free_irq(vector, cptpf);
114 
115 	if (num_vfs <= 64)
116 		return;
117 
118 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
119 			 RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(num_vfs - 64));
120 	vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR1);
121 	free_irq(vector, cptpf);
122 
123 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
124 			 RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(num_vfs - 64));
125 	vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFME1);
126 	free_irq(vector, cptpf);
127 }
128 
129 static void cptpf_flr_wq_handler(struct work_struct *work)
130 {
131 	struct cptpf_flr_work *flr_work;
132 	struct otx2_cptpf_dev *pf;
133 	struct mbox_msghdr *req;
134 	struct otx2_mbox *mbox;
135 	int vf, reg = 0;
136 
137 	flr_work = container_of(work, struct cptpf_flr_work, work);
138 	pf = flr_work->pf;
139 	mbox = &pf->afpf_mbox;
140 
141 	vf = flr_work - pf->flr_work;
142 
143 	req = otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
144 				      sizeof(struct msg_rsp));
145 	if (!req)
146 		return;
147 
148 	req->sig = OTX2_MBOX_REQ_SIG;
149 	req->id = MBOX_MSG_VF_FLR;
150 	req->pcifunc &= RVU_PFVF_FUNC_MASK;
151 	req->pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
152 
153 	otx2_cpt_send_mbox_msg(mbox, pf->pdev);
154 
155 	if (vf >= 64) {
156 		reg = 1;
157 		vf = vf - 64;
158 	}
159 	/* Clear transaction pending register */
160 	otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
161 			 RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
162 	otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
163 			 RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
164 }
165 
166 static irqreturn_t cptpf_vf_flr_intr(int __always_unused irq, void *arg)
167 {
168 	int reg, dev, vf, start_vf, num_reg = 1;
169 	struct otx2_cptpf_dev *cptpf = arg;
170 	u64 intr;
171 
172 	if (cptpf->max_vfs > 64)
173 		num_reg = 2;
174 
175 	for (reg = 0; reg < num_reg; reg++) {
176 		intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
177 				       RVU_PF_VFFLR_INTX(reg));
178 		if (!intr)
179 			continue;
180 		start_vf = 64 * reg;
181 		for (vf = 0; vf < 64; vf++) {
182 			if (!(intr & BIT_ULL(vf)))
183 				continue;
184 			dev = vf + start_vf;
185 			queue_work(cptpf->flr_wq, &cptpf->flr_work[dev].work);
186 			/* Clear interrupt */
187 			otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
188 					 RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
189 			/* Disable the interrupt */
190 			otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
191 					 RVU_PF_VFFLR_INT_ENA_W1CX(reg),
192 					 BIT_ULL(vf));
193 		}
194 	}
195 	return IRQ_HANDLED;
196 }
197 
198 static irqreturn_t cptpf_vf_me_intr(int __always_unused irq, void *arg)
199 {
200 	struct otx2_cptpf_dev *cptpf = arg;
201 	int reg, vf, num_reg = 1;
202 	u64 intr;
203 
204 	if (cptpf->max_vfs > 64)
205 		num_reg = 2;
206 
207 	for (reg = 0; reg < num_reg; reg++) {
208 		intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
209 				       RVU_PF_VFME_INTX(reg));
210 		if (!intr)
211 			continue;
212 		for (vf = 0; vf < 64; vf++) {
213 			if (!(intr & BIT_ULL(vf)))
214 				continue;
215 			otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
216 					 RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
217 			/* Clear interrupt */
218 			otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
219 					 RVU_PF_VFME_INTX(reg), BIT_ULL(vf));
220 		}
221 	}
222 	return IRQ_HANDLED;
223 }
224 
225 static void cptpf_unregister_vfpf_intr(struct otx2_cptpf_dev *cptpf,
226 				       int num_vfs)
227 {
228 	cptpf_disable_vfpf_mbox_intr(cptpf, num_vfs);
229 	cptpf_disable_vf_flr_me_intrs(cptpf, num_vfs);
230 }
231 
232 static int cptpf_register_vfpf_intr(struct otx2_cptpf_dev *cptpf, int num_vfs)
233 {
234 	struct pci_dev *pdev = cptpf->pdev;
235 	struct device *dev = &pdev->dev;
236 	int ret, vector;
237 
238 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
239 	/* Register VF-PF mailbox interrupt handler */
240 	ret = request_irq(vector, otx2_cptpf_vfpf_mbox_intr, 0, "CPTVFPF Mbox0",
241 			  cptpf);
242 	if (ret) {
243 		dev_err(dev,
244 			"IRQ registration failed for PFVF mbox0 irq\n");
245 		return ret;
246 	}
247 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR0);
248 	/* Register VF FLR interrupt handler */
249 	ret = request_irq(vector, cptpf_vf_flr_intr, 0, "CPTPF FLR0", cptpf);
250 	if (ret) {
251 		dev_err(dev,
252 			"IRQ registration failed for VFFLR0 irq\n");
253 		goto free_mbox0_irq;
254 	}
255 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME0);
256 	/* Register VF ME interrupt handler */
257 	ret = request_irq(vector, cptpf_vf_me_intr, 0, "CPTPF ME0", cptpf);
258 	if (ret) {
259 		dev_err(dev,
260 			"IRQ registration failed for PFVF mbox0 irq\n");
261 		goto free_flr0_irq;
262 	}
263 
264 	if (num_vfs > 64) {
265 		vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
266 		ret = request_irq(vector, otx2_cptpf_vfpf_mbox_intr, 0,
267 				  "CPTVFPF Mbox1", cptpf);
268 		if (ret) {
269 			dev_err(dev,
270 				"IRQ registration failed for PFVF mbox1 irq\n");
271 			goto free_me0_irq;
272 		}
273 		vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR1);
274 		/* Register VF FLR interrupt handler */
275 		ret = request_irq(vector, cptpf_vf_flr_intr, 0, "CPTPF FLR1",
276 				  cptpf);
277 		if (ret) {
278 			dev_err(dev,
279 				"IRQ registration failed for VFFLR1 irq\n");
280 			goto free_mbox1_irq;
281 		}
282 		vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME1);
283 		/* Register VF FLR interrupt handler */
284 		ret = request_irq(vector, cptpf_vf_me_intr, 0, "CPTPF ME1",
285 				  cptpf);
286 		if (ret) {
287 			dev_err(dev,
288 				"IRQ registration failed for VFFLR1 irq\n");
289 			goto free_flr1_irq;
290 		}
291 	}
292 	cptpf_enable_vfpf_mbox_intr(cptpf, num_vfs);
293 	cptpf_enable_vf_flr_me_intrs(cptpf, num_vfs);
294 
295 	return 0;
296 
297 free_flr1_irq:
298 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR1);
299 	free_irq(vector, cptpf);
300 free_mbox1_irq:
301 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
302 	free_irq(vector, cptpf);
303 free_me0_irq:
304 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME0);
305 	free_irq(vector, cptpf);
306 free_flr0_irq:
307 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR0);
308 	free_irq(vector, cptpf);
309 free_mbox0_irq:
310 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
311 	free_irq(vector, cptpf);
312 	return ret;
313 }
314 
315 static void cptpf_flr_wq_destroy(struct otx2_cptpf_dev *pf)
316 {
317 	if (!pf->flr_wq)
318 		return;
319 	destroy_workqueue(pf->flr_wq);
320 	pf->flr_wq = NULL;
321 	kfree(pf->flr_work);
322 }
323 
324 static int cptpf_flr_wq_init(struct otx2_cptpf_dev *cptpf, int num_vfs)
325 {
326 	int vf;
327 
328 	cptpf->flr_wq = alloc_ordered_workqueue("cptpf_flr_wq", 0);
329 	if (!cptpf->flr_wq)
330 		return -ENOMEM;
331 
332 	cptpf->flr_work = kcalloc(num_vfs, sizeof(struct cptpf_flr_work),
333 				  GFP_KERNEL);
334 	if (!cptpf->flr_work)
335 		goto destroy_wq;
336 
337 	for (vf = 0; vf < num_vfs; vf++) {
338 		cptpf->flr_work[vf].pf = cptpf;
339 		INIT_WORK(&cptpf->flr_work[vf].work, cptpf_flr_wq_handler);
340 	}
341 	return 0;
342 
343 destroy_wq:
344 	destroy_workqueue(cptpf->flr_wq);
345 	return -ENOMEM;
346 }
347 
348 static int cptpf_vfpf_mbox_init(struct otx2_cptpf_dev *cptpf, int num_vfs)
349 {
350 	struct device *dev = &cptpf->pdev->dev;
351 	u64 vfpf_mbox_base;
352 	int err, i;
353 
354 	cptpf->vfpf_mbox_wq = alloc_workqueue("cpt_vfpf_mailbox",
355 					      WQ_UNBOUND | WQ_HIGHPRI |
356 					      WQ_MEM_RECLAIM, 1);
357 	if (!cptpf->vfpf_mbox_wq)
358 		return -ENOMEM;
359 
360 	/* Map VF-PF mailbox memory */
361 	if (test_bit(CN10K_MBOX, &cptpf->cap_flag))
362 		vfpf_mbox_base = readq(cptpf->reg_base + RVU_PF_VF_MBOX_ADDR);
363 	else
364 		vfpf_mbox_base = readq(cptpf->reg_base + RVU_PF_VF_BAR4_ADDR);
365 
366 	if (!vfpf_mbox_base) {
367 		dev_err(dev, "VF-PF mailbox address not configured\n");
368 		err = -ENOMEM;
369 		goto free_wqe;
370 	}
371 	cptpf->vfpf_mbox_base = devm_ioremap_wc(dev, vfpf_mbox_base,
372 						MBOX_SIZE * cptpf->max_vfs);
373 	if (!cptpf->vfpf_mbox_base) {
374 		dev_err(dev, "Mapping of VF-PF mailbox address failed\n");
375 		err = -ENOMEM;
376 		goto free_wqe;
377 	}
378 	err = otx2_mbox_init(&cptpf->vfpf_mbox, cptpf->vfpf_mbox_base,
379 			     cptpf->pdev, cptpf->reg_base, MBOX_DIR_PFVF,
380 			     num_vfs);
381 	if (err)
382 		goto free_wqe;
383 
384 	for (i = 0; i < num_vfs; i++) {
385 		cptpf->vf[i].vf_id = i;
386 		cptpf->vf[i].cptpf = cptpf;
387 		cptpf->vf[i].intr_idx = i % 64;
388 		INIT_WORK(&cptpf->vf[i].vfpf_mbox_work,
389 			  otx2_cptpf_vfpf_mbox_handler);
390 	}
391 	return 0;
392 
393 free_wqe:
394 	destroy_workqueue(cptpf->vfpf_mbox_wq);
395 	return err;
396 }
397 
398 static void cptpf_vfpf_mbox_destroy(struct otx2_cptpf_dev *cptpf)
399 {
400 	destroy_workqueue(cptpf->vfpf_mbox_wq);
401 	otx2_mbox_destroy(&cptpf->vfpf_mbox);
402 }
403 
404 static void cptpf_disable_afpf_mbox_intr(struct otx2_cptpf_dev *cptpf)
405 {
406 	/* Disable AF-PF interrupt */
407 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1C,
408 			 0x1ULL);
409 	/* Clear interrupt if any */
410 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
411 }
412 
413 static int cptpf_register_afpf_mbox_intr(struct otx2_cptpf_dev *cptpf)
414 {
415 	struct pci_dev *pdev = cptpf->pdev;
416 	struct device *dev = &pdev->dev;
417 	int ret, irq;
418 
419 	irq = pci_irq_vector(pdev, RVU_PF_INT_VEC_AFPF_MBOX);
420 	/* Register AF-PF mailbox interrupt handler */
421 	ret = devm_request_irq(dev, irq, otx2_cptpf_afpf_mbox_intr, 0,
422 			       "CPTAFPF Mbox", cptpf);
423 	if (ret) {
424 		dev_err(dev,
425 			"IRQ registration failed for PFAF mbox irq\n");
426 		return ret;
427 	}
428 	/* Clear interrupt if any, to avoid spurious interrupts */
429 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
430 	/* Enable AF-PF interrupt */
431 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1S,
432 			 0x1ULL);
433 
434 	ret = otx2_cpt_send_ready_msg(&cptpf->afpf_mbox, cptpf->pdev);
435 	if (ret) {
436 		dev_warn(dev,
437 			 "AF not responding to mailbox, deferring probe\n");
438 		cptpf_disable_afpf_mbox_intr(cptpf);
439 		return -EPROBE_DEFER;
440 	}
441 	return 0;
442 }
443 
444 static int cptpf_afpf_mbox_init(struct otx2_cptpf_dev *cptpf)
445 {
446 	struct pci_dev *pdev = cptpf->pdev;
447 	resource_size_t offset;
448 	int err;
449 
450 	cptpf->afpf_mbox_wq = alloc_workqueue("cpt_afpf_mailbox",
451 					      WQ_UNBOUND | WQ_HIGHPRI |
452 					      WQ_MEM_RECLAIM, 1);
453 	if (!cptpf->afpf_mbox_wq)
454 		return -ENOMEM;
455 
456 	offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
457 	/* Map AF-PF mailbox memory */
458 	cptpf->afpf_mbox_base = devm_ioremap_wc(&pdev->dev, offset, MBOX_SIZE);
459 	if (!cptpf->afpf_mbox_base) {
460 		dev_err(&pdev->dev, "Unable to map BAR4\n");
461 		err = -ENOMEM;
462 		goto error;
463 	}
464 
465 	err = otx2_mbox_init(&cptpf->afpf_mbox, cptpf->afpf_mbox_base,
466 			     pdev, cptpf->reg_base, MBOX_DIR_PFAF, 1);
467 	if (err)
468 		goto error;
469 
470 	INIT_WORK(&cptpf->afpf_mbox_work, otx2_cptpf_afpf_mbox_handler);
471 	return 0;
472 
473 error:
474 	destroy_workqueue(cptpf->afpf_mbox_wq);
475 	return err;
476 }
477 
478 static void cptpf_afpf_mbox_destroy(struct otx2_cptpf_dev *cptpf)
479 {
480 	destroy_workqueue(cptpf->afpf_mbox_wq);
481 	otx2_mbox_destroy(&cptpf->afpf_mbox);
482 }
483 
484 static ssize_t kvf_limits_show(struct device *dev,
485 			       struct device_attribute *attr, char *buf)
486 {
487 	struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
488 
489 	return sprintf(buf, "%d\n", cptpf->kvf_limits);
490 }
491 
492 static ssize_t kvf_limits_store(struct device *dev,
493 				struct device_attribute *attr,
494 				const char *buf, size_t count)
495 {
496 	struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
497 	int lfs_num;
498 	int ret;
499 
500 	ret = kstrtoint(buf, 0, &lfs_num);
501 	if (ret)
502 		return ret;
503 	if (lfs_num < 1 || lfs_num > num_online_cpus()) {
504 		dev_err(dev, "lfs count %d must be in range [1 - %d]\n",
505 			lfs_num, num_online_cpus());
506 		return -EINVAL;
507 	}
508 	cptpf->kvf_limits = lfs_num;
509 
510 	return count;
511 }
512 
513 static DEVICE_ATTR_RW(kvf_limits);
514 static struct attribute *cptpf_attrs[] = {
515 	&dev_attr_kvf_limits.attr,
516 	NULL
517 };
518 
519 static const struct attribute_group cptpf_sysfs_group = {
520 	.attrs = cptpf_attrs,
521 };
522 
523 static int cpt_is_pf_usable(struct otx2_cptpf_dev *cptpf)
524 {
525 	u64 rev;
526 
527 	rev = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
528 			      RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
529 	rev = (rev >> 12) & 0xFF;
530 	/*
531 	 * Check if AF has setup revision for RVUM block, otherwise
532 	 * driver probe should be deferred until AF driver comes up
533 	 */
534 	if (!rev) {
535 		dev_warn(&cptpf->pdev->dev,
536 			 "AF is not initialized, deferring probe\n");
537 		return -EPROBE_DEFER;
538 	}
539 	return 0;
540 }
541 
542 static int cptx_device_reset(struct otx2_cptpf_dev *cptpf, int blkaddr)
543 {
544 	int timeout = 10, ret;
545 	u64 reg = 0;
546 
547 	ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
548 				    CPT_AF_BLK_RST, 0x1, blkaddr);
549 	if (ret)
550 		return ret;
551 
552 	do {
553 		ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
554 					   CPT_AF_BLK_RST, &reg, blkaddr);
555 		if (ret)
556 			return ret;
557 
558 		if (!((reg >> 63) & 0x1))
559 			break;
560 
561 		usleep_range(10000, 20000);
562 		if (timeout-- < 0)
563 			return -EBUSY;
564 	} while (1);
565 
566 	return ret;
567 }
568 
569 static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf)
570 {
571 	int ret = 0;
572 
573 	if (cptpf->has_cpt1) {
574 		ret = cptx_device_reset(cptpf, BLKADDR_CPT1);
575 		if (ret)
576 			return ret;
577 	}
578 	return cptx_device_reset(cptpf, BLKADDR_CPT0);
579 }
580 
581 static void cptpf_check_block_implemented(struct otx2_cptpf_dev *cptpf)
582 {
583 	u64 cfg;
584 
585 	cfg = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
586 			      RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_CPT1));
587 	if (cfg & BIT_ULL(11))
588 		cptpf->has_cpt1 = true;
589 }
590 
591 static int cptpf_device_init(struct otx2_cptpf_dev *cptpf)
592 {
593 	union otx2_cptx_af_constants1 af_cnsts1 = {0};
594 	int ret = 0;
595 
596 	/* check if 'implemented' bit is set for block BLKADDR_CPT1 */
597 	cptpf_check_block_implemented(cptpf);
598 	/* Reset the CPT PF device */
599 	ret = cptpf_device_reset(cptpf);
600 	if (ret)
601 		return ret;
602 
603 	/* Get number of SE, IE and AE engines */
604 	ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
605 				   CPT_AF_CONSTANTS1, &af_cnsts1.u,
606 				   BLKADDR_CPT0);
607 	if (ret)
608 		return ret;
609 
610 	cptpf->eng_grps.avail.max_se_cnt = af_cnsts1.s.se;
611 	cptpf->eng_grps.avail.max_ie_cnt = af_cnsts1.s.ie;
612 	cptpf->eng_grps.avail.max_ae_cnt = af_cnsts1.s.ae;
613 
614 	/* Disable all cores */
615 	ret = otx2_cpt_disable_all_cores(cptpf);
616 
617 	return ret;
618 }
619 
620 static int cptpf_sriov_disable(struct pci_dev *pdev)
621 {
622 	struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
623 	int num_vfs = pci_num_vf(pdev);
624 
625 	if (!num_vfs)
626 		return 0;
627 
628 	pci_disable_sriov(pdev);
629 	cptpf_unregister_vfpf_intr(cptpf, num_vfs);
630 	cptpf_flr_wq_destroy(cptpf);
631 	cptpf_vfpf_mbox_destroy(cptpf);
632 	module_put(THIS_MODULE);
633 	cptpf->enabled_vfs = 0;
634 
635 	return 0;
636 }
637 
638 static int cptpf_sriov_enable(struct pci_dev *pdev, int num_vfs)
639 {
640 	struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
641 	int ret;
642 
643 	/* Initialize VF<=>PF mailbox */
644 	ret = cptpf_vfpf_mbox_init(cptpf, num_vfs);
645 	if (ret)
646 		return ret;
647 
648 	ret = cptpf_flr_wq_init(cptpf, num_vfs);
649 	if (ret)
650 		goto destroy_mbox;
651 	/* Register VF<=>PF mailbox interrupt */
652 	ret = cptpf_register_vfpf_intr(cptpf, num_vfs);
653 	if (ret)
654 		goto destroy_flr;
655 
656 	/* Get CPT HW capabilities using LOAD_FVC operation. */
657 	ret = otx2_cpt_discover_eng_capabilities(cptpf);
658 	if (ret)
659 		goto disable_intr;
660 
661 	ret = otx2_cpt_create_eng_grps(cptpf, &cptpf->eng_grps);
662 	if (ret)
663 		goto disable_intr;
664 
665 	cptpf->enabled_vfs = num_vfs;
666 	ret = pci_enable_sriov(pdev, num_vfs);
667 	if (ret)
668 		goto disable_intr;
669 
670 	dev_notice(&cptpf->pdev->dev, "VFs enabled: %d\n", num_vfs);
671 
672 	try_module_get(THIS_MODULE);
673 	return num_vfs;
674 
675 disable_intr:
676 	cptpf_unregister_vfpf_intr(cptpf, num_vfs);
677 	cptpf->enabled_vfs = 0;
678 destroy_flr:
679 	cptpf_flr_wq_destroy(cptpf);
680 destroy_mbox:
681 	cptpf_vfpf_mbox_destroy(cptpf);
682 	return ret;
683 }
684 
685 static int otx2_cptpf_sriov_configure(struct pci_dev *pdev, int num_vfs)
686 {
687 	if (num_vfs > 0) {
688 		return cptpf_sriov_enable(pdev, num_vfs);
689 	} else {
690 		return cptpf_sriov_disable(pdev);
691 	}
692 }
693 
694 static int otx2_cptpf_probe(struct pci_dev *pdev,
695 			    const struct pci_device_id *ent)
696 {
697 	struct device *dev = &pdev->dev;
698 	struct otx2_cptpf_dev *cptpf;
699 	int err;
700 
701 	cptpf = devm_kzalloc(dev, sizeof(*cptpf), GFP_KERNEL);
702 	if (!cptpf)
703 		return -ENOMEM;
704 
705 	err = pcim_enable_device(pdev);
706 	if (err) {
707 		dev_err(dev, "Failed to enable PCI device\n");
708 		goto clear_drvdata;
709 	}
710 
711 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
712 	if (err) {
713 		dev_err(dev, "Unable to get usable DMA configuration\n");
714 		goto clear_drvdata;
715 	}
716 	/* Map PF's configuration registers */
717 	err = pcim_iomap_regions_request_all(pdev, 1 << PCI_PF_REG_BAR_NUM,
718 					     OTX2_CPT_DRV_NAME);
719 	if (err) {
720 		dev_err(dev, "Couldn't get PCI resources 0x%x\n", err);
721 		goto clear_drvdata;
722 	}
723 	pci_set_master(pdev);
724 	pci_set_drvdata(pdev, cptpf);
725 	cptpf->pdev = pdev;
726 
727 	cptpf->reg_base = pcim_iomap_table(pdev)[PCI_PF_REG_BAR_NUM];
728 
729 	/* Check if AF driver is up, otherwise defer probe */
730 	err = cpt_is_pf_usable(cptpf);
731 	if (err)
732 		goto clear_drvdata;
733 
734 	err = pci_alloc_irq_vectors(pdev, RVU_PF_INT_VEC_CNT,
735 				    RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
736 	if (err < 0) {
737 		dev_err(dev, "Request for %d msix vectors failed\n",
738 			RVU_PF_INT_VEC_CNT);
739 		goto clear_drvdata;
740 	}
741 	otx2_cpt_set_hw_caps(pdev, &cptpf->cap_flag);
742 	/* Initialize AF-PF mailbox */
743 	err = cptpf_afpf_mbox_init(cptpf);
744 	if (err)
745 		goto clear_drvdata;
746 	/* Register mailbox interrupt */
747 	err = cptpf_register_afpf_mbox_intr(cptpf);
748 	if (err)
749 		goto destroy_afpf_mbox;
750 
751 	cptpf->max_vfs = pci_sriov_get_totalvfs(pdev);
752 
753 	err = cn10k_cptpf_lmtst_init(cptpf);
754 	if (err)
755 		goto unregister_intr;
756 
757 	/* Initialize CPT PF device */
758 	err = cptpf_device_init(cptpf);
759 	if (err)
760 		goto unregister_intr;
761 
762 	/* Initialize engine groups */
763 	err = otx2_cpt_init_eng_grps(pdev, &cptpf->eng_grps);
764 	if (err)
765 		goto unregister_intr;
766 
767 	err = sysfs_create_group(&dev->kobj, &cptpf_sysfs_group);
768 	if (err)
769 		goto cleanup_eng_grps;
770 
771 	err = otx2_cpt_register_dl(cptpf);
772 	if (err)
773 		goto sysfs_grp_del;
774 
775 	return 0;
776 
777 sysfs_grp_del:
778 	sysfs_remove_group(&dev->kobj, &cptpf_sysfs_group);
779 cleanup_eng_grps:
780 	otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
781 unregister_intr:
782 	cptpf_disable_afpf_mbox_intr(cptpf);
783 destroy_afpf_mbox:
784 	cptpf_afpf_mbox_destroy(cptpf);
785 clear_drvdata:
786 	pci_set_drvdata(pdev, NULL);
787 	return err;
788 }
789 
790 static void otx2_cptpf_remove(struct pci_dev *pdev)
791 {
792 	struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
793 
794 	if (!cptpf)
795 		return;
796 
797 	cptpf_sriov_disable(pdev);
798 	otx2_cpt_unregister_dl(cptpf);
799 	/* Delete sysfs entry created for kernel VF limits */
800 	sysfs_remove_group(&pdev->dev.kobj, &cptpf_sysfs_group);
801 	/* Cleanup engine groups */
802 	otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
803 	/* Disable AF-PF mailbox interrupt */
804 	cptpf_disable_afpf_mbox_intr(cptpf);
805 	/* Destroy AF-PF mbox */
806 	cptpf_afpf_mbox_destroy(cptpf);
807 	pci_set_drvdata(pdev, NULL);
808 }
809 
810 /* Supported devices */
811 static const struct pci_device_id otx2_cpt_id_table[] = {
812 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OTX2_CPT_PCI_PF_DEVICE_ID) },
813 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, CN10K_CPT_PCI_PF_DEVICE_ID) },
814 	{ 0, }  /* end of table */
815 };
816 
817 static struct pci_driver otx2_cpt_pci_driver = {
818 	.name = OTX2_CPT_DRV_NAME,
819 	.id_table = otx2_cpt_id_table,
820 	.probe = otx2_cptpf_probe,
821 	.remove = otx2_cptpf_remove,
822 	.sriov_configure = otx2_cptpf_sriov_configure
823 };
824 
825 module_pci_driver(otx2_cpt_pci_driver);
826 
827 MODULE_AUTHOR("Marvell");
828 MODULE_DESCRIPTION(OTX2_CPT_DRV_STRING);
829 MODULE_LICENSE("GPL v2");
830 MODULE_DEVICE_TABLE(pci, otx2_cpt_id_table);
831