xref: /linux/drivers/vdpa/solidrun/snet_main.c (revision 3616bf377a5a8ef4f124dfde5f3522c4da335561)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * SolidRun DPU driver for control plane
4  *
5  * Copyright (C) 2022-2023 SolidRun
6  *
7  * Author: Alvaro Karsz <alvaro.karsz@solid-run.com>
8  *
9  */
10 #include <linux/iopoll.h>
11 
12 #include "snet_vdpa.h"
13 
14 /* SNET DPU device ID */
15 #define SNET_DEVICE_ID          0x1000
16 /* SNET signature */
17 #define SNET_SIGNATURE          0xD0D06363
18 /* Max. config version that we can work with */
19 #define SNET_CFG_VERSION        0x2
20 /* Queue align */
21 #define SNET_QUEUE_ALIGNMENT    PAGE_SIZE
22 /* Kick value to notify that new data is available */
23 #define SNET_KICK_VAL           0x1
24 #define SNET_CONFIG_OFF         0x0
25 /* How long we are willing to wait for a SNET device */
26 #define SNET_DETECT_TIMEOUT	5000000
27 /* How long should we wait for the DPU to read our config */
28 #define SNET_READ_CFG_TIMEOUT	3000000
29 /* Size of configs written to the DPU */
30 #define SNET_GENERAL_CFG_LEN	36
31 #define SNET_GENERAL_CFG_VQ_LEN	40
32 
33 static struct snet *vdpa_to_snet(struct vdpa_device *vdpa)
34 {
35 	return container_of(vdpa, struct snet, vdpa);
36 }
37 
38 static irqreturn_t snet_cfg_irq_hndlr(int irq, void *data)
39 {
40 	struct snet *snet = data;
41 	/* Call callback if any */
42 	if (snet->cb.callback)
43 		return snet->cb.callback(snet->cb.private);
44 
45 	return IRQ_HANDLED;
46 }
47 
48 static irqreturn_t snet_vq_irq_hndlr(int irq, void *data)
49 {
50 	struct snet_vq *vq = data;
51 	/* Call callback if any */
52 	if (vq->cb.callback)
53 		return vq->cb.callback(vq->cb.private);
54 
55 	return IRQ_HANDLED;
56 }
57 
58 static void snet_free_irqs(struct snet *snet)
59 {
60 	struct psnet *psnet = snet->psnet;
61 	struct pci_dev *pdev;
62 	u32 i;
63 
64 	/* Which Device allcoated the IRQs? */
65 	if (PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_IRQ_PF))
66 		pdev = snet->pdev->physfn;
67 	else
68 		pdev = snet->pdev;
69 
70 	/* Free config's IRQ */
71 	if (snet->cfg_irq != -1) {
72 		devm_free_irq(&pdev->dev, snet->cfg_irq, snet);
73 		snet->cfg_irq = -1;
74 	}
75 	/* Free VQ IRQs */
76 	for (i = 0; i < snet->cfg->vq_num; i++) {
77 		if (snet->vqs[i] && snet->vqs[i]->irq != -1) {
78 			devm_free_irq(&pdev->dev, snet->vqs[i]->irq, snet->vqs[i]);
79 			snet->vqs[i]->irq = -1;
80 		}
81 	}
82 
83 	/* IRQ vectors are freed when the pci remove callback is called */
84 }
85 
86 static int snet_set_vq_address(struct vdpa_device *vdev, u16 idx, u64 desc_area,
87 			       u64 driver_area, u64 device_area)
88 {
89 	struct snet *snet = vdpa_to_snet(vdev);
90 	/* save received parameters in vqueue sturct */
91 	snet->vqs[idx]->desc_area = desc_area;
92 	snet->vqs[idx]->driver_area = driver_area;
93 	snet->vqs[idx]->device_area = device_area;
94 
95 	return 0;
96 }
97 
98 static void snet_set_vq_num(struct vdpa_device *vdev, u16 idx, u32 num)
99 {
100 	struct snet *snet = vdpa_to_snet(vdev);
101 	/* save num in vqueue */
102 	snet->vqs[idx]->num = num;
103 }
104 
105 static void snet_kick_vq(struct vdpa_device *vdev, u16 idx)
106 {
107 	struct snet *snet = vdpa_to_snet(vdev);
108 	/* not ready - ignore */
109 	if (!snet->vqs[idx]->ready)
110 		return;
111 
112 	iowrite32(SNET_KICK_VAL, snet->vqs[idx]->kick_ptr);
113 }
114 
115 static void snet_set_vq_cb(struct vdpa_device *vdev, u16 idx, struct vdpa_callback *cb)
116 {
117 	struct snet *snet = vdpa_to_snet(vdev);
118 
119 	snet->vqs[idx]->cb.callback = cb->callback;
120 	snet->vqs[idx]->cb.private = cb->private;
121 }
122 
123 static void snet_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready)
124 {
125 	struct snet *snet = vdpa_to_snet(vdev);
126 
127 	snet->vqs[idx]->ready = ready;
128 }
129 
130 static bool snet_get_vq_ready(struct vdpa_device *vdev, u16 idx)
131 {
132 	struct snet *snet = vdpa_to_snet(vdev);
133 
134 	return snet->vqs[idx]->ready;
135 }
136 
137 static bool snet_vq_state_is_initial(struct snet *snet, const struct vdpa_vq_state *state)
138 {
139 	if (SNET_HAS_FEATURE(snet, VIRTIO_F_RING_PACKED)) {
140 		const struct vdpa_vq_state_packed *p = &state->packed;
141 
142 		if (p->last_avail_counter == 1 && p->last_used_counter == 1 &&
143 		    p->last_avail_idx == 0 && p->last_used_idx == 0)
144 			return true;
145 	} else {
146 		const struct vdpa_vq_state_split *s = &state->split;
147 
148 		if (s->avail_index == 0)
149 			return true;
150 	}
151 
152 	return false;
153 }
154 
155 static int snet_set_vq_state(struct vdpa_device *vdev, u16 idx, const struct vdpa_vq_state *state)
156 {
157 	struct snet *snet = vdpa_to_snet(vdev);
158 
159 	/* We can set any state for config version 2+ */
160 	if (SNET_CFG_VER(snet, 2)) {
161 		memcpy(&snet->vqs[idx]->vq_state, state, sizeof(*state));
162 		return 0;
163 	}
164 
165 	/* Older config - we can't set the VQ state.
166 	 * Return 0 only if this is the initial state we use in the DPU.
167 	 */
168 	if (snet_vq_state_is_initial(snet, state))
169 		return 0;
170 
171 	return -EOPNOTSUPP;
172 }
173 
174 static int snet_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa_vq_state *state)
175 {
176 	struct snet *snet = vdpa_to_snet(vdev);
177 
178 	return snet_read_vq_state(snet, idx, state);
179 }
180 
181 static int snet_get_vq_irq(struct vdpa_device *vdev, u16 idx)
182 {
183 	struct snet *snet = vdpa_to_snet(vdev);
184 
185 	return snet->vqs[idx]->irq;
186 }
187 
188 static u32 snet_get_vq_align(struct vdpa_device *vdev)
189 {
190 	return (u32)SNET_QUEUE_ALIGNMENT;
191 }
192 
193 static int snet_reset_dev(struct snet *snet)
194 {
195 	struct pci_dev *pdev = snet->pdev;
196 	int ret = 0;
197 	u32 i;
198 
199 	/* If status is 0, nothing to do */
200 	if (!snet->status)
201 		return 0;
202 
203 	/* If DPU started, destroy it */
204 	if (snet->status & VIRTIO_CONFIG_S_DRIVER_OK)
205 		ret = snet_destroy_dev(snet);
206 
207 	/* Clear VQs */
208 	for (i = 0; i < snet->cfg->vq_num; i++) {
209 		if (!snet->vqs[i])
210 			continue;
211 		snet->vqs[i]->cb.callback = NULL;
212 		snet->vqs[i]->cb.private = NULL;
213 		snet->vqs[i]->desc_area = 0;
214 		snet->vqs[i]->device_area = 0;
215 		snet->vqs[i]->driver_area = 0;
216 		snet->vqs[i]->ready = false;
217 	}
218 
219 	/* Clear config callback */
220 	snet->cb.callback = NULL;
221 	snet->cb.private = NULL;
222 	/* Free IRQs */
223 	snet_free_irqs(snet);
224 	/* Reset status */
225 	snet->status = 0;
226 	snet->dpu_ready = false;
227 
228 	if (ret)
229 		SNET_WARN(pdev, "Incomplete reset to SNET[%u] device, err: %d\n", snet->sid, ret);
230 	else
231 		SNET_DBG(pdev, "Reset SNET[%u] device\n", snet->sid);
232 
233 	return 0;
234 }
235 
236 static int snet_reset(struct vdpa_device *vdev)
237 {
238 	struct snet *snet = vdpa_to_snet(vdev);
239 
240 	return snet_reset_dev(snet);
241 }
242 
243 static size_t snet_get_config_size(struct vdpa_device *vdev)
244 {
245 	struct snet *snet = vdpa_to_snet(vdev);
246 
247 	return (size_t)snet->cfg->cfg_size;
248 }
249 
250 static u64 snet_get_features(struct vdpa_device *vdev)
251 {
252 	struct snet *snet = vdpa_to_snet(vdev);
253 
254 	return snet->cfg->features;
255 }
256 
257 static int snet_set_drv_features(struct vdpa_device *vdev, u64 features)
258 {
259 	struct snet *snet = vdpa_to_snet(vdev);
260 
261 	snet->negotiated_features = snet->cfg->features & features;
262 	return 0;
263 }
264 
265 static u64 snet_get_drv_features(struct vdpa_device *vdev)
266 {
267 	struct snet *snet = vdpa_to_snet(vdev);
268 
269 	return snet->negotiated_features;
270 }
271 
272 static u16 snet_get_vq_num_max(struct vdpa_device *vdev)
273 {
274 	struct snet *snet = vdpa_to_snet(vdev);
275 
276 	return (u16)snet->cfg->vq_size;
277 }
278 
279 static void snet_set_config_cb(struct vdpa_device *vdev, struct vdpa_callback *cb)
280 {
281 	struct snet *snet = vdpa_to_snet(vdev);
282 
283 	snet->cb.callback = cb->callback;
284 	snet->cb.private = cb->private;
285 }
286 
287 static u32 snet_get_device_id(struct vdpa_device *vdev)
288 {
289 	struct snet *snet = vdpa_to_snet(vdev);
290 
291 	return snet->cfg->virtio_id;
292 }
293 
294 static u32 snet_get_vendor_id(struct vdpa_device *vdev)
295 {
296 	return (u32)PCI_VENDOR_ID_SOLIDRUN;
297 }
298 
299 static u8 snet_get_status(struct vdpa_device *vdev)
300 {
301 	struct snet *snet = vdpa_to_snet(vdev);
302 
303 	return snet->status;
304 }
305 
306 static int snet_write_conf(struct snet *snet)
307 {
308 	u32 off, i, tmp;
309 	int ret;
310 
311 	/* No need to write the config twice */
312 	if (snet->dpu_ready)
313 		return true;
314 
315 	/* Snet data :
316 	 *
317 	 * General data: SNET_GENERAL_CFG_LEN bytes long
318 	 *  0             0x4       0x8        0xC               0x10      0x14        0x1C     0x24
319 	 *  | MAGIC NUMBER | CFG VER | SNET SID | NUMBER OF QUEUES | IRQ IDX | FEATURES |  RSVD  |
320 	 *
321 	 * For every VQ: SNET_GENERAL_CFG_VQ_LEN bytes long
322 	 * 0                          0x4        0x8
323 	 * |  VQ SID  AND  QUEUE SIZE | IRQ Index |
324 	 * |             DESC AREA                |
325 	 * |            DEVICE AREA               |
326 	 * |            DRIVER AREA               |
327 	 * |    VQ STATE (CFG 2+)     |   RSVD    |
328 	 *
329 	 * Magic number should be written last, this is the DPU indication that the data is ready
330 	 */
331 
332 	/* Init offset */
333 	off = snet->psnet->cfg.host_cfg_off;
334 
335 	/* Ignore magic number for now */
336 	off += 4;
337 	snet_write32(snet, off, snet->psnet->negotiated_cfg_ver);
338 	off += 4;
339 	snet_write32(snet, off, snet->sid);
340 	off += 4;
341 	snet_write32(snet, off, snet->cfg->vq_num);
342 	off += 4;
343 	snet_write32(snet, off, snet->cfg_irq_idx);
344 	off += 4;
345 	snet_write64(snet, off, snet->negotiated_features);
346 	off += 8;
347 	/* Ignore reserved */
348 	off += 8;
349 	/* Write VQs */
350 	for (i = 0 ; i < snet->cfg->vq_num ; i++) {
351 		tmp = (i << 16) | (snet->vqs[i]->num & 0xFFFF);
352 		snet_write32(snet, off, tmp);
353 		off += 4;
354 		snet_write32(snet, off, snet->vqs[i]->irq_idx);
355 		off += 4;
356 		snet_write64(snet, off, snet->vqs[i]->desc_area);
357 		off += 8;
358 		snet_write64(snet, off, snet->vqs[i]->device_area);
359 		off += 8;
360 		snet_write64(snet, off, snet->vqs[i]->driver_area);
361 		off += 8;
362 		/* Write VQ state if config version is 2+ */
363 		if (SNET_CFG_VER(snet, 2))
364 			snet_write32(snet, off, *(u32 *)&snet->vqs[i]->vq_state);
365 		off += 4;
366 
367 		/* Ignore reserved */
368 		off += 4;
369 	}
370 
371 	/* Write magic number - data is ready */
372 	snet_write32(snet, snet->psnet->cfg.host_cfg_off, SNET_SIGNATURE);
373 
374 	/* The DPU will ACK the config by clearing the signature */
375 	ret = readx_poll_timeout(ioread32, snet->bar + snet->psnet->cfg.host_cfg_off,
376 				 tmp, !tmp, 10, SNET_READ_CFG_TIMEOUT);
377 	if (ret) {
378 		SNET_ERR(snet->pdev, "Timeout waiting for the DPU to read the config\n");
379 		return false;
380 	}
381 
382 	/* set DPU flag */
383 	snet->dpu_ready = true;
384 
385 	return true;
386 }
387 
388 static int snet_request_irqs(struct pci_dev *pdev, struct snet *snet)
389 {
390 	int ret, i, irq;
391 
392 	/* Request config IRQ */
393 	irq = pci_irq_vector(pdev, snet->cfg_irq_idx);
394 	ret = devm_request_irq(&pdev->dev, irq, snet_cfg_irq_hndlr, 0,
395 			       snet->cfg_irq_name, snet);
396 	if (ret) {
397 		SNET_ERR(pdev, "Failed to request IRQ\n");
398 		return ret;
399 	}
400 	snet->cfg_irq = irq;
401 
402 	/* Request IRQ for every VQ */
403 	for (i = 0; i < snet->cfg->vq_num; i++) {
404 		irq = pci_irq_vector(pdev, snet->vqs[i]->irq_idx);
405 		ret = devm_request_irq(&pdev->dev, irq, snet_vq_irq_hndlr, 0,
406 				       snet->vqs[i]->irq_name, snet->vqs[i]);
407 		if (ret) {
408 			SNET_ERR(pdev, "Failed to request IRQ\n");
409 			return ret;
410 		}
411 		snet->vqs[i]->irq = irq;
412 	}
413 	return 0;
414 }
415 
416 static void snet_set_status(struct vdpa_device *vdev, u8 status)
417 {
418 	struct snet *snet = vdpa_to_snet(vdev);
419 	struct psnet *psnet = snet->psnet;
420 	struct pci_dev *pdev = snet->pdev;
421 	int ret;
422 	bool pf_irqs;
423 
424 	if (status == snet->status)
425 		return;
426 
427 	if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
428 	    !(snet->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
429 		/* Request IRQs */
430 		pf_irqs = PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_IRQ_PF);
431 		ret = snet_request_irqs(pf_irqs ? pdev->physfn : pdev, snet);
432 		if (ret)
433 			goto set_err;
434 
435 		/* Write config to the DPU */
436 		if (snet_write_conf(snet)) {
437 			SNET_INFO(pdev, "Create SNET[%u] device\n", snet->sid);
438 		} else {
439 			snet_free_irqs(snet);
440 			goto set_err;
441 		}
442 	}
443 
444 	/* Save the new status */
445 	snet->status = status;
446 	return;
447 
448 set_err:
449 	snet->status |= VIRTIO_CONFIG_S_FAILED;
450 }
451 
452 static void snet_get_config(struct vdpa_device *vdev, unsigned int offset,
453 			    void *buf, unsigned int len)
454 {
455 	struct snet *snet = vdpa_to_snet(vdev);
456 	void __iomem *cfg_ptr = snet->cfg->virtio_cfg + offset;
457 	u8 *buf_ptr = buf;
458 	u32 i;
459 
460 	/* check for offset error */
461 	if (offset + len > snet->cfg->cfg_size)
462 		return;
463 
464 	/* Write into buffer */
465 	for (i = 0; i < len; i++)
466 		*buf_ptr++ = ioread8(cfg_ptr + i);
467 }
468 
469 static void snet_set_config(struct vdpa_device *vdev, unsigned int offset,
470 			    const void *buf, unsigned int len)
471 {
472 	struct snet *snet = vdpa_to_snet(vdev);
473 	void __iomem *cfg_ptr = snet->cfg->virtio_cfg + offset;
474 	const u8 *buf_ptr = buf;
475 	u32 i;
476 
477 	/* check for offset error */
478 	if (offset + len > snet->cfg->cfg_size)
479 		return;
480 
481 	/* Write into PCI BAR */
482 	for (i = 0; i < len; i++)
483 		iowrite8(*buf_ptr++, cfg_ptr + i);
484 }
485 
486 static int snet_suspend(struct vdpa_device *vdev)
487 {
488 	struct snet *snet = vdpa_to_snet(vdev);
489 	int ret;
490 
491 	ret = snet_suspend_dev(snet);
492 	if (ret)
493 		SNET_ERR(snet->pdev, "SNET[%u] suspend failed, err: %d\n", snet->sid, ret);
494 	else
495 		SNET_DBG(snet->pdev, "Suspend SNET[%u] device\n", snet->sid);
496 
497 	return ret;
498 }
499 
500 static const struct vdpa_config_ops snet_config_ops = {
501 	.set_vq_address         = snet_set_vq_address,
502 	.set_vq_num             = snet_set_vq_num,
503 	.kick_vq                = snet_kick_vq,
504 	.set_vq_cb              = snet_set_vq_cb,
505 	.set_vq_ready           = snet_set_vq_ready,
506 	.get_vq_ready           = snet_get_vq_ready,
507 	.set_vq_state           = snet_set_vq_state,
508 	.get_vq_state           = snet_get_vq_state,
509 	.get_vq_irq		= snet_get_vq_irq,
510 	.get_vq_align           = snet_get_vq_align,
511 	.reset                  = snet_reset,
512 	.get_config_size        = snet_get_config_size,
513 	.get_device_features    = snet_get_features,
514 	.set_driver_features    = snet_set_drv_features,
515 	.get_driver_features    = snet_get_drv_features,
516 	.get_vq_num_min         = snet_get_vq_num_max,
517 	.get_vq_num_max         = snet_get_vq_num_max,
518 	.set_config_cb          = snet_set_config_cb,
519 	.get_device_id          = snet_get_device_id,
520 	.get_vendor_id          = snet_get_vendor_id,
521 	.get_status             = snet_get_status,
522 	.set_status             = snet_set_status,
523 	.get_config             = snet_get_config,
524 	.set_config             = snet_set_config,
525 	.suspend		= snet_suspend,
526 };
527 
528 static int psnet_open_pf_bar(struct pci_dev *pdev, struct psnet *psnet)
529 {
530 	char name[50];
531 	int ret, i, mask = 0;
532 	/* We don't know which BAR will be used to communicate..
533 	 * We will map every bar with len > 0.
534 	 *
535 	 * Later, we will discover the BAR and unmap all other BARs.
536 	 */
537 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
538 		if (pci_resource_len(pdev, i))
539 			mask |= (1 << i);
540 	}
541 
542 	/* No BAR can be used.. */
543 	if (!mask) {
544 		SNET_ERR(pdev, "Failed to find a PCI BAR\n");
545 		return -ENODEV;
546 	}
547 
548 	snprintf(name, sizeof(name), "psnet[%s]-bars", pci_name(pdev));
549 	ret = pcim_iomap_regions(pdev, mask, name);
550 	if (ret) {
551 		SNET_ERR(pdev, "Failed to request and map PCI BARs\n");
552 		return ret;
553 	}
554 
555 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
556 		if (mask & (1 << i))
557 			psnet->bars[i] = pcim_iomap_table(pdev)[i];
558 	}
559 
560 	return 0;
561 }
562 
563 static int snet_open_vf_bar(struct pci_dev *pdev, struct snet *snet)
564 {
565 	char name[50];
566 	int ret;
567 
568 	snprintf(name, sizeof(name), "snet[%s]-bar", pci_name(pdev));
569 	/* Request and map BAR */
570 	ret = pcim_iomap_regions(pdev, BIT(snet->psnet->cfg.vf_bar), name);
571 	if (ret) {
572 		SNET_ERR(pdev, "Failed to request and map PCI BAR for a VF\n");
573 		return ret;
574 	}
575 
576 	snet->bar = pcim_iomap_table(pdev)[snet->psnet->cfg.vf_bar];
577 
578 	return 0;
579 }
580 
581 static void snet_free_cfg(struct snet_cfg *cfg)
582 {
583 	u32 i;
584 
585 	if (!cfg->devs)
586 		return;
587 
588 	/* Free devices */
589 	for (i = 0; i < cfg->devices_num; i++) {
590 		if (!cfg->devs[i])
591 			break;
592 
593 		kfree(cfg->devs[i]);
594 	}
595 	/* Free pointers to devices */
596 	kfree(cfg->devs);
597 }
598 
599 /* Detect which BAR is used for communication with the device. */
600 static int psnet_detect_bar(struct psnet *psnet, u32 off)
601 {
602 	unsigned long exit_time;
603 	int i;
604 
605 	exit_time = jiffies + usecs_to_jiffies(SNET_DETECT_TIMEOUT);
606 
607 	/* SNET DPU will write SNET's signature when the config is ready. */
608 	while (time_before(jiffies, exit_time)) {
609 		for (i = 0; i < PCI_STD_NUM_BARS; i++) {
610 			/* Is this BAR mapped? */
611 			if (!psnet->bars[i])
612 				continue;
613 
614 			if (ioread32(psnet->bars[i] + off) == SNET_SIGNATURE)
615 				return i;
616 		}
617 		usleep_range(1000, 10000);
618 	}
619 
620 	return -ENODEV;
621 }
622 
623 static void psnet_unmap_unused_bars(struct pci_dev *pdev, struct psnet *psnet)
624 {
625 	int i, mask = 0;
626 
627 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
628 		if (psnet->bars[i] && i != psnet->barno)
629 			mask |= (1 << i);
630 	}
631 
632 	if (mask)
633 		pcim_iounmap_regions(pdev, mask);
634 }
635 
636 /* Read SNET config from PCI BAR */
637 static int psnet_read_cfg(struct pci_dev *pdev, struct psnet *psnet)
638 {
639 	struct snet_cfg *cfg = &psnet->cfg;
640 	u32 i, off;
641 	int barno;
642 
643 	/* Move to where the config starts */
644 	off = SNET_CONFIG_OFF;
645 
646 	/* Find BAR used for communication */
647 	barno = psnet_detect_bar(psnet, off);
648 	if (barno < 0) {
649 		SNET_ERR(pdev, "SNET config is not ready.\n");
650 		return barno;
651 	}
652 
653 	/* Save used BAR number and unmap all other BARs */
654 	psnet->barno = barno;
655 	SNET_DBG(pdev, "Using BAR number %d\n", barno);
656 
657 	psnet_unmap_unused_bars(pdev, psnet);
658 
659 	/* load config from BAR */
660 	cfg->key = psnet_read32(psnet, off);
661 	off += 4;
662 	cfg->cfg_size = psnet_read32(psnet, off);
663 	off += 4;
664 	cfg->cfg_ver = psnet_read32(psnet, off);
665 	off += 4;
666 	/* The negotiated config version is the lower one between this driver's config
667 	 * and the DPU's.
668 	 */
669 	psnet->negotiated_cfg_ver = min_t(u32, cfg->cfg_ver, SNET_CFG_VERSION);
670 	SNET_DBG(pdev, "SNET config version %u\n", psnet->negotiated_cfg_ver);
671 
672 	cfg->vf_num = psnet_read32(psnet, off);
673 	off += 4;
674 	cfg->vf_bar = psnet_read32(psnet, off);
675 	off += 4;
676 	cfg->host_cfg_off = psnet_read32(psnet, off);
677 	off += 4;
678 	cfg->max_size_host_cfg = psnet_read32(psnet, off);
679 	off += 4;
680 	cfg->virtio_cfg_off = psnet_read32(psnet, off);
681 	off += 4;
682 	cfg->kick_off = psnet_read32(psnet, off);
683 	off += 4;
684 	cfg->hwmon_off = psnet_read32(psnet, off);
685 	off += 4;
686 	cfg->ctrl_off = psnet_read32(psnet, off);
687 	off += 4;
688 	cfg->flags = psnet_read32(psnet, off);
689 	off += 4;
690 	/* Ignore Reserved */
691 	off += sizeof(cfg->rsvd);
692 
693 	cfg->devices_num = psnet_read32(psnet, off);
694 	off += 4;
695 	/* Allocate memory to hold pointer to the devices */
696 	cfg->devs = kcalloc(cfg->devices_num, sizeof(void *), GFP_KERNEL);
697 	if (!cfg->devs)
698 		return -ENOMEM;
699 
700 	/* Load device configuration from BAR */
701 	for (i = 0; i < cfg->devices_num; i++) {
702 		cfg->devs[i] = kzalloc(sizeof(*cfg->devs[i]), GFP_KERNEL);
703 		if (!cfg->devs[i]) {
704 			snet_free_cfg(cfg);
705 			return -ENOMEM;
706 		}
707 		/* Read device config */
708 		cfg->devs[i]->virtio_id = psnet_read32(psnet, off);
709 		off += 4;
710 		cfg->devs[i]->vq_num = psnet_read32(psnet, off);
711 		off += 4;
712 		cfg->devs[i]->vq_size = psnet_read32(psnet, off);
713 		off += 4;
714 		cfg->devs[i]->vfid = psnet_read32(psnet, off);
715 		off += 4;
716 		cfg->devs[i]->features = psnet_read64(psnet, off);
717 		off += 8;
718 		/* Ignore Reserved */
719 		off += sizeof(cfg->devs[i]->rsvd);
720 
721 		cfg->devs[i]->cfg_size = psnet_read32(psnet, off);
722 		off += 4;
723 
724 		/* Is the config witten to the DPU going to be too big? */
725 		if (SNET_GENERAL_CFG_LEN + SNET_GENERAL_CFG_VQ_LEN * cfg->devs[i]->vq_num >
726 		    cfg->max_size_host_cfg) {
727 			SNET_ERR(pdev, "Failed to read SNET config, the config is too big..\n");
728 			snet_free_cfg(cfg);
729 			return -EINVAL;
730 		}
731 	}
732 	return 0;
733 }
734 
735 static int psnet_alloc_irq_vector(struct pci_dev *pdev, struct psnet *psnet)
736 {
737 	int ret = 0;
738 	u32 i, irq_num = 0;
739 
740 	/* Let's count how many IRQs we need, 1 for every VQ + 1 for config change */
741 	for (i = 0; i < psnet->cfg.devices_num; i++)
742 		irq_num += psnet->cfg.devs[i]->vq_num + 1;
743 
744 	ret = pci_alloc_irq_vectors(pdev, irq_num, irq_num, PCI_IRQ_MSIX);
745 	if (ret != irq_num) {
746 		SNET_ERR(pdev, "Failed to allocate IRQ vectors\n");
747 		return ret;
748 	}
749 	SNET_DBG(pdev, "Allocated %u IRQ vectors from physical function\n", irq_num);
750 
751 	return 0;
752 }
753 
754 static int snet_alloc_irq_vector(struct pci_dev *pdev, struct snet_dev_cfg *snet_cfg)
755 {
756 	int ret = 0;
757 	u32 irq_num;
758 
759 	/* We want 1 IRQ for every VQ + 1 for config change events */
760 	irq_num = snet_cfg->vq_num + 1;
761 
762 	ret = pci_alloc_irq_vectors(pdev, irq_num, irq_num, PCI_IRQ_MSIX);
763 	if (ret <= 0) {
764 		SNET_ERR(pdev, "Failed to allocate IRQ vectors\n");
765 		return ret;
766 	}
767 
768 	return 0;
769 }
770 
771 static void snet_free_vqs(struct snet *snet)
772 {
773 	u32 i;
774 
775 	if (!snet->vqs)
776 		return;
777 
778 	for (i = 0 ; i < snet->cfg->vq_num ; i++) {
779 		if (!snet->vqs[i])
780 			break;
781 
782 		kfree(snet->vqs[i]);
783 	}
784 	kfree(snet->vqs);
785 }
786 
787 static int snet_build_vqs(struct snet *snet)
788 {
789 	u32 i;
790 	/* Allocate the VQ pointers array */
791 	snet->vqs = kcalloc(snet->cfg->vq_num, sizeof(void *), GFP_KERNEL);
792 	if (!snet->vqs)
793 		return -ENOMEM;
794 
795 	/* Allocate the VQs */
796 	for (i = 0; i < snet->cfg->vq_num; i++) {
797 		snet->vqs[i] = kzalloc(sizeof(*snet->vqs[i]), GFP_KERNEL);
798 		if (!snet->vqs[i]) {
799 			snet_free_vqs(snet);
800 			return -ENOMEM;
801 		}
802 		/* Reset IRQ num */
803 		snet->vqs[i]->irq = -1;
804 		/* VQ serial ID */
805 		snet->vqs[i]->sid = i;
806 		/* Kick address - every VQ gets 4B */
807 		snet->vqs[i]->kick_ptr = snet->bar + snet->psnet->cfg.kick_off +
808 					 snet->vqs[i]->sid * 4;
809 		/* Clear kick address for this VQ */
810 		iowrite32(0, snet->vqs[i]->kick_ptr);
811 	}
812 	return 0;
813 }
814 
815 static int psnet_get_next_irq_num(struct psnet *psnet)
816 {
817 	int irq;
818 
819 	spin_lock(&psnet->lock);
820 	irq = psnet->next_irq++;
821 	spin_unlock(&psnet->lock);
822 
823 	return irq;
824 }
825 
826 static void snet_reserve_irq_idx(struct pci_dev *pdev, struct snet *snet)
827 {
828 	struct psnet *psnet = snet->psnet;
829 	int  i;
830 
831 	/* one IRQ for every VQ, and one for config changes */
832 	snet->cfg_irq_idx = psnet_get_next_irq_num(psnet);
833 	snprintf(snet->cfg_irq_name, SNET_NAME_SIZE, "snet[%s]-cfg[%d]",
834 		 pci_name(pdev), snet->cfg_irq_idx);
835 
836 	for (i = 0; i < snet->cfg->vq_num; i++) {
837 		/* Get next free IRQ ID */
838 		snet->vqs[i]->irq_idx = psnet_get_next_irq_num(psnet);
839 		/* Write IRQ name */
840 		snprintf(snet->vqs[i]->irq_name, SNET_NAME_SIZE, "snet[%s]-vq[%d]",
841 			 pci_name(pdev), snet->vqs[i]->irq_idx);
842 	}
843 }
844 
845 /* Find a device config based on virtual function id */
846 static struct snet_dev_cfg *snet_find_dev_cfg(struct snet_cfg *cfg, u32 vfid)
847 {
848 	u32 i;
849 
850 	for (i = 0; i < cfg->devices_num; i++) {
851 		if (cfg->devs[i]->vfid == vfid)
852 			return cfg->devs[i];
853 	}
854 	/* Oppss.. no config found.. */
855 	return NULL;
856 }
857 
858 /* Probe function for a physical PCI function */
859 static int snet_vdpa_probe_pf(struct pci_dev *pdev)
860 {
861 	struct psnet *psnet;
862 	int ret = 0;
863 	bool pf_irqs = false;
864 
865 	ret = pcim_enable_device(pdev);
866 	if (ret) {
867 		SNET_ERR(pdev, "Failed to enable PCI device\n");
868 		return ret;
869 	}
870 
871 	/* Allocate a PCI physical function device */
872 	psnet = kzalloc(sizeof(*psnet), GFP_KERNEL);
873 	if (!psnet)
874 		return -ENOMEM;
875 
876 	/* Init PSNET spinlock */
877 	spin_lock_init(&psnet->lock);
878 
879 	pci_set_master(pdev);
880 	pci_set_drvdata(pdev, psnet);
881 
882 	/* Open SNET MAIN BAR */
883 	ret = psnet_open_pf_bar(pdev, psnet);
884 	if (ret)
885 		goto free_psnet;
886 
887 	/* Try to read SNET's config from PCI BAR */
888 	ret = psnet_read_cfg(pdev, psnet);
889 	if (ret)
890 		goto free_psnet;
891 
892 	/* If SNET_CFG_FLAG_IRQ_PF flag is set, we should use
893 	 * PF MSI-X vectors
894 	 */
895 	pf_irqs = PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_IRQ_PF);
896 
897 	if (pf_irqs) {
898 		ret = psnet_alloc_irq_vector(pdev, psnet);
899 		if (ret)
900 			goto free_cfg;
901 	}
902 
903 	SNET_DBG(pdev, "Enable %u virtual functions\n", psnet->cfg.vf_num);
904 	ret = pci_enable_sriov(pdev, psnet->cfg.vf_num);
905 	if (ret) {
906 		SNET_ERR(pdev, "Failed to enable SR-IOV\n");
907 		goto free_irq;
908 	}
909 
910 	/* Create HW monitor device */
911 	if (PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_HWMON)) {
912 #if IS_ENABLED(CONFIG_HWMON)
913 		psnet_create_hwmon(pdev);
914 #else
915 		SNET_WARN(pdev, "Can't start HWMON, CONFIG_HWMON is not enabled\n");
916 #endif
917 	}
918 
919 	return 0;
920 
921 free_irq:
922 	if (pf_irqs)
923 		pci_free_irq_vectors(pdev);
924 free_cfg:
925 	snet_free_cfg(&psnet->cfg);
926 free_psnet:
927 	kfree(psnet);
928 	return ret;
929 }
930 
931 /* Probe function for a virtual PCI function */
932 static int snet_vdpa_probe_vf(struct pci_dev *pdev)
933 {
934 	struct pci_dev *pdev_pf = pdev->physfn;
935 	struct psnet *psnet = pci_get_drvdata(pdev_pf);
936 	struct snet_dev_cfg *dev_cfg;
937 	struct snet *snet;
938 	u32 vfid;
939 	int ret;
940 	bool pf_irqs = false;
941 
942 	/* Get virtual function id.
943 	 * (the DPU counts the VFs from 1)
944 	 */
945 	ret = pci_iov_vf_id(pdev);
946 	if (ret < 0) {
947 		SNET_ERR(pdev, "Failed to find a VF id\n");
948 		return ret;
949 	}
950 	vfid = ret + 1;
951 
952 	/* Find the snet_dev_cfg based on vfid */
953 	dev_cfg = snet_find_dev_cfg(&psnet->cfg, vfid);
954 	if (!dev_cfg) {
955 		SNET_WARN(pdev, "Failed to find a VF config..\n");
956 		return -ENODEV;
957 	}
958 
959 	/* Which PCI device should allocate the IRQs?
960 	 * If the SNET_CFG_FLAG_IRQ_PF flag set, the PF device allocates the IRQs
961 	 */
962 	pf_irqs = PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_IRQ_PF);
963 
964 	ret = pcim_enable_device(pdev);
965 	if (ret) {
966 		SNET_ERR(pdev, "Failed to enable PCI VF device\n");
967 		return ret;
968 	}
969 
970 	/* Request for MSI-X IRQs */
971 	if (!pf_irqs) {
972 		ret = snet_alloc_irq_vector(pdev, dev_cfg);
973 		if (ret)
974 			return ret;
975 	}
976 
977 	/* Allocate vdpa device */
978 	snet = vdpa_alloc_device(struct snet, vdpa, &pdev->dev, &snet_config_ops, 1, 1, NULL,
979 				 false);
980 	if (!snet) {
981 		SNET_ERR(pdev, "Failed to allocate a vdpa device\n");
982 		ret = -ENOMEM;
983 		goto free_irqs;
984 	}
985 
986 	/* Init control mutex and spinlock */
987 	mutex_init(&snet->ctrl_lock);
988 	spin_lock_init(&snet->ctrl_spinlock);
989 
990 	/* Save pci device pointer */
991 	snet->pdev = pdev;
992 	snet->psnet = psnet;
993 	snet->cfg = dev_cfg;
994 	snet->dpu_ready = false;
995 	snet->sid = vfid;
996 	/* Reset IRQ value */
997 	snet->cfg_irq = -1;
998 
999 	ret = snet_open_vf_bar(pdev, snet);
1000 	if (ret)
1001 		goto put_device;
1002 
1003 	/* Create a VirtIO config pointer */
1004 	snet->cfg->virtio_cfg = snet->bar + snet->psnet->cfg.virtio_cfg_off;
1005 
1006 	/* Clear control registers */
1007 	snet_ctrl_clear(snet);
1008 
1009 	pci_set_master(pdev);
1010 	pci_set_drvdata(pdev, snet);
1011 
1012 	ret = snet_build_vqs(snet);
1013 	if (ret)
1014 		goto put_device;
1015 
1016 	/* Reserve IRQ indexes,
1017 	 * The IRQs may be requested and freed multiple times,
1018 	 * but the indexes won't change.
1019 	 */
1020 	snet_reserve_irq_idx(pf_irqs ? pdev_pf : pdev, snet);
1021 
1022 	/*set DMA device*/
1023 	snet->vdpa.dma_dev = &pdev->dev;
1024 
1025 	/* Register VDPA device */
1026 	ret = vdpa_register_device(&snet->vdpa, snet->cfg->vq_num);
1027 	if (ret) {
1028 		SNET_ERR(pdev, "Failed to register vdpa device\n");
1029 		goto free_vqs;
1030 	}
1031 
1032 	return 0;
1033 
1034 free_vqs:
1035 	snet_free_vqs(snet);
1036 put_device:
1037 	put_device(&snet->vdpa.dev);
1038 free_irqs:
1039 	if (!pf_irqs)
1040 		pci_free_irq_vectors(pdev);
1041 	return ret;
1042 }
1043 
1044 static int snet_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1045 {
1046 	if (pdev->is_virtfn)
1047 		return snet_vdpa_probe_vf(pdev);
1048 	else
1049 		return snet_vdpa_probe_pf(pdev);
1050 }
1051 
1052 static void snet_vdpa_remove_pf(struct pci_dev *pdev)
1053 {
1054 	struct psnet *psnet = pci_get_drvdata(pdev);
1055 
1056 	pci_disable_sriov(pdev);
1057 	/* If IRQs are allocated from the PF, we should free the IRQs */
1058 	if (PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_IRQ_PF))
1059 		pci_free_irq_vectors(pdev);
1060 
1061 	snet_free_cfg(&psnet->cfg);
1062 	kfree(psnet);
1063 }
1064 
1065 static void snet_vdpa_remove_vf(struct pci_dev *pdev)
1066 {
1067 	struct snet *snet = pci_get_drvdata(pdev);
1068 	struct psnet *psnet = snet->psnet;
1069 
1070 	vdpa_unregister_device(&snet->vdpa);
1071 	snet_free_vqs(snet);
1072 	/* If IRQs are allocated from the VF, we should free the IRQs */
1073 	if (!PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_IRQ_PF))
1074 		pci_free_irq_vectors(pdev);
1075 }
1076 
1077 static void snet_vdpa_remove(struct pci_dev *pdev)
1078 {
1079 	if (pdev->is_virtfn)
1080 		snet_vdpa_remove_vf(pdev);
1081 	else
1082 		snet_vdpa_remove_pf(pdev);
1083 }
1084 
1085 static struct pci_device_id snet_driver_pci_ids[] = {
1086 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_SOLIDRUN, SNET_DEVICE_ID,
1087 			 PCI_VENDOR_ID_SOLIDRUN, SNET_DEVICE_ID) },
1088 	{ 0 },
1089 };
1090 
1091 MODULE_DEVICE_TABLE(pci, snet_driver_pci_ids);
1092 
1093 static struct pci_driver snet_vdpa_driver = {
1094 	.name		= "snet-vdpa-driver",
1095 	.id_table	= snet_driver_pci_ids,
1096 	.probe		= snet_vdpa_probe,
1097 	.remove		= snet_vdpa_remove,
1098 };
1099 
1100 module_pci_driver(snet_vdpa_driver);
1101 
1102 MODULE_AUTHOR("Alvaro Karsz <alvaro.karsz@solid-run.com>");
1103 MODULE_DESCRIPTION("SolidRun vDPA driver");
1104 MODULE_LICENSE("GPL v2");
1105