xref: /linux/drivers/vdpa/octeon_ep/octep_vdpa_hw.c (revision deee7487f5d495d0d9e5ab40d866d69ad524c46a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2024 Marvell. */
3 
4 #include <linux/iopoll.h>
5 #include <linux/build_bug.h>
6 
7 #include "octep_vdpa.h"
8 
9 enum octep_mbox_ids {
10 	OCTEP_MBOX_MSG_SET_VQ_STATE = 1,
11 	OCTEP_MBOX_MSG_GET_VQ_STATE,
12 };
13 
14 #define OCTEP_HW_TIMEOUT       10000000
15 
16 #define MBOX_OFFSET            64
17 #define MBOX_RSP_MASK          0x00000001
18 #define MBOX_RC_MASK           0x0000FFFE
19 
20 #define MBOX_RSP_TO_ERR(val)   (-(((val) & MBOX_RC_MASK) >> 2))
21 #define MBOX_AVAIL(val)        (((val) & MBOX_RSP_MASK))
22 #define MBOX_RSP(val)          ((val) & (MBOX_RC_MASK | MBOX_RSP_MASK))
23 
24 #define DEV_RST_ACK_BIT        7
25 #define FEATURE_SEL_ACK_BIT    15
26 #define QUEUE_SEL_ACK_BIT      15
27 
28 struct octep_mbox_hdr {
29 	u8 ver;
30 	u8 rsvd1;
31 	u16 id;
32 	u16 rsvd2;
33 #define MBOX_REQ_SIG (0xdead)
34 #define MBOX_RSP_SIG (0xbeef)
35 	u16 sig;
36 };
37 
38 struct octep_mbox_sts {
39 	u16 rsp:1;
40 	u16 rc:15;
41 	u16 rsvd;
42 };
43 
44 struct octep_mbox {
45 	struct octep_mbox_hdr hdr;
46 	struct octep_mbox_sts sts;
47 	u64 rsvd;
48 	u32 data[];
49 };
50 
51 static inline struct octep_mbox __iomem *octep_get_mbox(struct octep_hw *oct_hw)
52 {
53 	return (struct octep_mbox __iomem *)(oct_hw->dev_cfg + MBOX_OFFSET);
54 }
55 
56 static inline int octep_wait_for_mbox_avail(struct octep_mbox __iomem *mbox)
57 {
58 	u32 val;
59 
60 	return readx_poll_timeout(ioread32, &mbox->sts, val, MBOX_AVAIL(val), 10,
61 				  OCTEP_HW_TIMEOUT);
62 }
63 
64 static inline int octep_wait_for_mbox_rsp(struct octep_mbox __iomem *mbox)
65 {
66 	u32 val;
67 
68 	return readx_poll_timeout(ioread32, &mbox->sts, val, MBOX_RSP(val), 10,
69 				  OCTEP_HW_TIMEOUT);
70 }
71 
72 static inline void octep_write_hdr(struct octep_mbox __iomem *mbox, u16 id, u16 sig)
73 {
74 	iowrite16(id, &mbox->hdr.id);
75 	iowrite16(sig, &mbox->hdr.sig);
76 }
77 
78 static inline u32 octep_read_sig(struct octep_mbox __iomem *mbox)
79 {
80 	return ioread16(&mbox->hdr.sig);
81 }
82 
83 static inline void octep_write_sts(struct octep_mbox __iomem *mbox, u32 sts)
84 {
85 	iowrite32(sts, &mbox->sts);
86 }
87 
88 static inline u32 octep_read_sts(struct octep_mbox __iomem *mbox)
89 {
90 	return ioread32(&mbox->sts);
91 }
92 
93 static inline u32 octep_read32_word(struct octep_mbox __iomem *mbox, u16 word_idx)
94 {
95 	return ioread32(&mbox->data[word_idx]);
96 }
97 
98 static inline void octep_write32_word(struct octep_mbox __iomem *mbox, u16 word_idx, u32 word)
99 {
100 	return iowrite32(word, &mbox->data[word_idx]);
101 }
102 
103 static int octep_process_mbox(struct octep_hw *oct_hw, u16 id, u16 qid, void *buffer,
104 			      u32 buf_size, bool write)
105 {
106 	struct octep_mbox __iomem *mbox = octep_get_mbox(oct_hw);
107 	struct pci_dev *pdev = oct_hw->pdev;
108 	u32 *p = (u32 *)buffer;
109 	u16 data_wds;
110 	int ret, i;
111 	u32 val;
112 
113 	if (!IS_ALIGNED(buf_size, 4))
114 		return -EINVAL;
115 
116 	/* Make sure mbox space is available */
117 	ret = octep_wait_for_mbox_avail(mbox);
118 	if (ret) {
119 		dev_warn(&pdev->dev, "Timeout waiting for previous mbox data to be consumed\n");
120 		return ret;
121 	}
122 	data_wds = buf_size / 4;
123 
124 	if (write) {
125 		for (i = 1; i <= data_wds; i++) {
126 			octep_write32_word(mbox, i, *p);
127 			p++;
128 		}
129 	}
130 	octep_write32_word(mbox, 0, (u32)qid);
131 	octep_write_sts(mbox, 0);
132 
133 	octep_write_hdr(mbox, id, MBOX_REQ_SIG);
134 
135 	ret = octep_wait_for_mbox_rsp(mbox);
136 	if (ret) {
137 		dev_warn(&pdev->dev, "Timeout waiting for mbox : %d response\n", id);
138 		return ret;
139 	}
140 
141 	val = octep_read_sig(mbox);
142 	if ((val & 0xFFFF) != MBOX_RSP_SIG) {
143 		dev_warn(&pdev->dev, "Invalid Signature from mbox : %d response\n", id);
144 		return -EINVAL;
145 	}
146 
147 	val = octep_read_sts(mbox);
148 	if (val & MBOX_RC_MASK) {
149 		ret = MBOX_RSP_TO_ERR(val);
150 		dev_warn(&pdev->dev, "Error while processing mbox : %d, err %d\n", id, ret);
151 		return ret;
152 	}
153 
154 	if (!write)
155 		for (i = 1; i <= data_wds; i++)
156 			*p++ = octep_read32_word(mbox, i);
157 
158 	return 0;
159 }
160 
161 static void octep_mbox_init(struct octep_mbox __iomem *mbox)
162 {
163 	iowrite32(1, &mbox->sts);
164 }
165 
166 int octep_verify_features(u64 features)
167 {
168 	/* Minimum features to expect */
169 	if (!(features & BIT_ULL(VIRTIO_F_VERSION_1)))
170 		return -EOPNOTSUPP;
171 
172 	if (!(features & BIT_ULL(VIRTIO_F_NOTIFICATION_DATA)))
173 		return -EOPNOTSUPP;
174 
175 	if (!(features & BIT_ULL(VIRTIO_F_RING_PACKED)))
176 		return -EOPNOTSUPP;
177 
178 	return 0;
179 }
180 
181 u8 octep_hw_get_status(struct octep_hw *oct_hw)
182 {
183 	return ioread8(&oct_hw->common_cfg->device_status);
184 }
185 
186 void octep_hw_set_status(struct octep_hw *oct_hw, u8 status)
187 {
188 	iowrite8(status, &oct_hw->common_cfg->device_status);
189 }
190 
191 void octep_hw_reset(struct octep_hw *oct_hw)
192 {
193 	u8 val;
194 
195 	octep_hw_set_status(oct_hw, 0 | BIT(DEV_RST_ACK_BIT));
196 	if (readx_poll_timeout(ioread8, &oct_hw->common_cfg->device_status, val, !val, 10,
197 			       OCTEP_HW_TIMEOUT)) {
198 		dev_warn(&oct_hw->pdev->dev, "Octeon device reset timeout\n");
199 		return;
200 	}
201 }
202 
203 static int feature_sel_write_with_timeout(struct octep_hw *oct_hw, u32 select, void __iomem *addr)
204 {
205 	u32 val;
206 
207 	iowrite32(select | BIT(FEATURE_SEL_ACK_BIT), addr);
208 
209 	if (readx_poll_timeout(ioread32, addr, val, val == select, 10, OCTEP_HW_TIMEOUT)) {
210 		dev_warn(&oct_hw->pdev->dev, "Feature select%d write timeout\n", select);
211 		return -1;
212 	}
213 	return 0;
214 }
215 
216 u64 octep_hw_get_dev_features(struct octep_hw *oct_hw)
217 {
218 	u32 features_lo, features_hi;
219 
220 	if (feature_sel_write_with_timeout(oct_hw, 0, &oct_hw->common_cfg->device_feature_select))
221 		return 0;
222 
223 	features_lo = ioread32(&oct_hw->common_cfg->device_feature);
224 
225 	if (feature_sel_write_with_timeout(oct_hw, 1, &oct_hw->common_cfg->device_feature_select))
226 		return 0;
227 
228 	features_hi = ioread32(&oct_hw->common_cfg->device_feature);
229 
230 	return ((u64)features_hi << 32) | features_lo;
231 }
232 
233 u64 octep_hw_get_drv_features(struct octep_hw *oct_hw)
234 {
235 	u32 features_lo, features_hi;
236 
237 	if (feature_sel_write_with_timeout(oct_hw, 0, &oct_hw->common_cfg->guest_feature_select))
238 		return 0;
239 
240 	features_lo = ioread32(&oct_hw->common_cfg->guest_feature);
241 
242 	if (feature_sel_write_with_timeout(oct_hw, 1, &oct_hw->common_cfg->guest_feature_select))
243 		return 0;
244 
245 	features_hi = ioread32(&oct_hw->common_cfg->guest_feature);
246 
247 	return ((u64)features_hi << 32) | features_lo;
248 }
249 
250 void octep_hw_set_drv_features(struct octep_hw *oct_hw, u64 features)
251 {
252 	if (feature_sel_write_with_timeout(oct_hw, 0, &oct_hw->common_cfg->guest_feature_select))
253 		return;
254 
255 	iowrite32(features & (BIT_ULL(32) - 1), &oct_hw->common_cfg->guest_feature);
256 
257 	if (feature_sel_write_with_timeout(oct_hw, 1, &oct_hw->common_cfg->guest_feature_select))
258 		return;
259 
260 	iowrite32(features >> 32, &oct_hw->common_cfg->guest_feature);
261 }
262 
263 void octep_write_queue_select(struct octep_hw *oct_hw, u16 queue_id)
264 {
265 	u16 val;
266 
267 	iowrite16(queue_id | BIT(QUEUE_SEL_ACK_BIT), &oct_hw->common_cfg->queue_select);
268 
269 	if (readx_poll_timeout(ioread16, &oct_hw->common_cfg->queue_select, val, val == queue_id,
270 			       10, OCTEP_HW_TIMEOUT)) {
271 		dev_warn(&oct_hw->pdev->dev, "Queue select write timeout\n");
272 		return;
273 	}
274 }
275 
276 void octep_notify_queue(struct octep_hw *oct_hw, u16 qid)
277 {
278 	iowrite16(qid, oct_hw->vqs[qid].notify_addr);
279 }
280 
281 void octep_read_dev_config(struct octep_hw *oct_hw, u64 offset, void *dst, int length)
282 {
283 	u8 old_gen, new_gen, *p;
284 	int i;
285 
286 	if (WARN_ON(offset + length > oct_hw->config_size))
287 		return;
288 
289 	do {
290 		old_gen = ioread8(&oct_hw->common_cfg->config_generation);
291 		p = dst;
292 		for (i = 0; i < length; i++)
293 			*p++ = ioread8(oct_hw->dev_cfg + offset + i);
294 
295 		new_gen = ioread8(&oct_hw->common_cfg->config_generation);
296 	} while (old_gen != new_gen);
297 }
298 
299 int octep_set_vq_address(struct octep_hw *oct_hw, u16 qid, u64 desc_area, u64 driver_area,
300 			 u64 device_area)
301 {
302 	struct virtio_pci_common_cfg __iomem *cfg = oct_hw->common_cfg;
303 
304 	octep_write_queue_select(oct_hw, qid);
305 	vp_iowrite64_twopart(desc_area, &cfg->queue_desc_lo,
306 			     &cfg->queue_desc_hi);
307 	vp_iowrite64_twopart(driver_area, &cfg->queue_avail_lo,
308 			     &cfg->queue_avail_hi);
309 	vp_iowrite64_twopart(device_area, &cfg->queue_used_lo,
310 			     &cfg->queue_used_hi);
311 
312 	return 0;
313 }
314 
315 int octep_get_vq_state(struct octep_hw *oct_hw, u16 qid, struct vdpa_vq_state *state)
316 {
317 	return octep_process_mbox(oct_hw, OCTEP_MBOX_MSG_GET_VQ_STATE, qid, state,
318 				  sizeof(*state), 0);
319 }
320 
321 int octep_set_vq_state(struct octep_hw *oct_hw, u16 qid, const struct vdpa_vq_state *state)
322 {
323 	struct vdpa_vq_state q_state;
324 
325 	memcpy(&q_state, state, sizeof(struct vdpa_vq_state));
326 	return octep_process_mbox(oct_hw, OCTEP_MBOX_MSG_SET_VQ_STATE, qid, &q_state,
327 				  sizeof(*state), 1);
328 }
329 
330 void octep_set_vq_num(struct octep_hw *oct_hw, u16 qid, u32 num)
331 {
332 	struct virtio_pci_common_cfg __iomem *cfg = oct_hw->common_cfg;
333 
334 	octep_write_queue_select(oct_hw, qid);
335 	iowrite16(num, &cfg->queue_size);
336 }
337 
338 void octep_set_vq_ready(struct octep_hw *oct_hw, u16 qid, bool ready)
339 {
340 	struct virtio_pci_common_cfg __iomem *cfg = oct_hw->common_cfg;
341 
342 	octep_write_queue_select(oct_hw, qid);
343 	iowrite16(ready, &cfg->queue_enable);
344 }
345 
346 bool octep_get_vq_ready(struct octep_hw *oct_hw, u16 qid)
347 {
348 	struct virtio_pci_common_cfg __iomem *cfg = oct_hw->common_cfg;
349 
350 	octep_write_queue_select(oct_hw, qid);
351 	return ioread16(&cfg->queue_enable);
352 }
353 
354 u16 octep_get_vq_size(struct octep_hw *oct_hw)
355 {
356 	octep_write_queue_select(oct_hw, 0);
357 	return ioread16(&oct_hw->common_cfg->queue_size);
358 }
359 
360 static u32 octep_get_config_size(struct octep_hw *oct_hw)
361 {
362 	switch (oct_hw->dev_id) {
363 	case VIRTIO_ID_NET:
364 		return sizeof(struct virtio_net_config);
365 	case VIRTIO_ID_CRYPTO:
366 		return sizeof(struct virtio_crypto_config);
367 	default:
368 		return 0;
369 	}
370 }
371 
372 static void __iomem *octep_get_cap_addr(struct octep_hw *oct_hw, struct virtio_pci_cap *cap)
373 {
374 	struct device *dev = &oct_hw->pdev->dev;
375 	u32 length = le32_to_cpu(cap->length);
376 	u32 offset = le32_to_cpu(cap->offset);
377 	u8  bar    = cap->bar;
378 	u32 len;
379 
380 	if (bar != OCTEP_HW_CAPS_BAR) {
381 		dev_err(dev, "Invalid bar: %u\n", bar);
382 		return NULL;
383 	}
384 	if (offset + length < offset) {
385 		dev_err(dev, "offset(%u) + length(%u) overflows\n",
386 			offset, length);
387 		return NULL;
388 	}
389 	len = pci_resource_len(oct_hw->pdev, bar);
390 	if (offset + length > len) {
391 		dev_err(dev, "invalid cap: overflows bar space: %u > %u\n",
392 			offset + length, len);
393 		return NULL;
394 	}
395 	return oct_hw->base[bar] + offset;
396 }
397 
398 /* In Octeon DPU device, the virtio config space is completely
399  * emulated by the device's firmware. So, the standard pci config
400  * read apis can't be used for reading the virtio capability.
401  */
402 static void octep_pci_caps_read(struct octep_hw *oct_hw, void *buf, size_t len, off_t offset)
403 {
404 	u8 __iomem *bar = oct_hw->base[OCTEP_HW_CAPS_BAR];
405 	u8 *p = buf;
406 	size_t i;
407 
408 	for (i = 0; i < len; i++)
409 		*p++ = ioread8(bar + offset + i);
410 }
411 
412 static int octep_pci_signature_verify(struct octep_hw *oct_hw)
413 {
414 	u32 signature[2];
415 
416 	octep_pci_caps_read(oct_hw, &signature, sizeof(signature), 0);
417 
418 	if (signature[0] != OCTEP_FW_READY_SIGNATURE0)
419 		return -1;
420 
421 	if (signature[1] != OCTEP_FW_READY_SIGNATURE1)
422 		return -1;
423 
424 	return 0;
425 }
426 
427 static void octep_vndr_data_process(struct octep_hw *oct_hw,
428 				    struct octep_pci_vndr_data *vndr_data)
429 {
430 	BUILD_BUG_ON(sizeof(struct octep_pci_vndr_data) % 4 != 0);
431 
432 	switch (vndr_data->id) {
433 	case OCTEP_PCI_VNDR_CFG_TYPE_VIRTIO_ID:
434 		oct_hw->dev_id = (u8)vndr_data->data;
435 		break;
436 	default:
437 		dev_err(&oct_hw->pdev->dev, "Invalid vendor data id %u\n",
438 			vndr_data->id);
439 		break;
440 	}
441 }
442 
443 int octep_hw_caps_read(struct octep_hw *oct_hw, struct pci_dev *pdev)
444 {
445 	struct octep_pci_vndr_data vndr_data;
446 	struct octep_mbox __iomem *mbox;
447 	struct device *dev = &pdev->dev;
448 	struct virtio_pci_cap cap;
449 	u16 notify_off;
450 	int i, ret;
451 	u8 pos;
452 
453 	oct_hw->pdev = pdev;
454 	ret = octep_pci_signature_verify(oct_hw);
455 	if (ret) {
456 		dev_err(dev, "Octeon Virtio FW is not initialized\n");
457 		return -EIO;
458 	}
459 
460 	octep_pci_caps_read(oct_hw, &pos, 1, PCI_CAPABILITY_LIST);
461 
462 	while (pos) {
463 		octep_pci_caps_read(oct_hw, &cap, 2, pos);
464 
465 		if (cap.cap_vndr != PCI_CAP_ID_VNDR) {
466 			dev_err(dev, "Found invalid capability vndr id: %d\n", cap.cap_vndr);
467 			break;
468 		}
469 
470 		octep_pci_caps_read(oct_hw, &cap, sizeof(cap), pos);
471 
472 		dev_info(dev, "[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u\n",
473 			 pos, cap.cfg_type, cap.bar, cap.offset, cap.length);
474 
475 		switch (cap.cfg_type) {
476 		case VIRTIO_PCI_CAP_COMMON_CFG:
477 			oct_hw->common_cfg = octep_get_cap_addr(oct_hw, &cap);
478 			break;
479 		case VIRTIO_PCI_CAP_NOTIFY_CFG:
480 			octep_pci_caps_read(oct_hw, &oct_hw->notify_off_multiplier,
481 					    4, pos + sizeof(cap));
482 
483 			oct_hw->notify_base = octep_get_cap_addr(oct_hw, &cap);
484 			oct_hw->notify_bar = cap.bar;
485 			oct_hw->notify_base_pa = pci_resource_start(pdev, cap.bar) +
486 						 le32_to_cpu(cap.offset);
487 			break;
488 		case VIRTIO_PCI_CAP_DEVICE_CFG:
489 			oct_hw->dev_cfg = octep_get_cap_addr(oct_hw, &cap);
490 			break;
491 		case VIRTIO_PCI_CAP_ISR_CFG:
492 			oct_hw->isr = octep_get_cap_addr(oct_hw, &cap);
493 			break;
494 		case VIRTIO_PCI_CAP_VENDOR_CFG:
495 			octep_pci_caps_read(oct_hw, &vndr_data, sizeof(vndr_data), pos);
496 			if (vndr_data.hdr.vendor_id != PCI_VENDOR_ID_CAVIUM) {
497 				dev_err(dev, "Invalid vendor data\n");
498 				return -EINVAL;
499 			}
500 
501 			octep_vndr_data_process(oct_hw, &vndr_data);
502 			break;
503 		}
504 
505 		pos = cap.cap_next;
506 	}
507 	if (!oct_hw->common_cfg || !oct_hw->notify_base ||
508 	    !oct_hw->dev_cfg    || !oct_hw->isr) {
509 		dev_err(dev, "Incomplete PCI capabilities");
510 		return -EIO;
511 	}
512 	dev_info(dev, "common cfg mapped at: %p\n", oct_hw->common_cfg);
513 	dev_info(dev, "device cfg mapped at: %p\n", oct_hw->dev_cfg);
514 	dev_info(dev, "isr cfg mapped at: %p\n", oct_hw->isr);
515 	dev_info(dev, "notify base: %p, notify off multiplier: %u\n",
516 		 oct_hw->notify_base, oct_hw->notify_off_multiplier);
517 
518 	oct_hw->config_size = octep_get_config_size(oct_hw);
519 	oct_hw->features = octep_hw_get_dev_features(oct_hw);
520 
521 	ret = octep_verify_features(oct_hw->features);
522 	if (ret) {
523 		dev_err(&pdev->dev, "Couldn't read features from the device FW\n");
524 		return ret;
525 	}
526 	oct_hw->nr_vring = vp_ioread16(&oct_hw->common_cfg->num_queues);
527 
528 	oct_hw->vqs = devm_kcalloc(&pdev->dev, oct_hw->nr_vring, sizeof(*oct_hw->vqs), GFP_KERNEL);
529 	if (!oct_hw->vqs)
530 		return -ENOMEM;
531 
532 	dev_info(&pdev->dev, "Device features : %llx\n", oct_hw->features);
533 	dev_info(&pdev->dev, "Maximum queues : %u\n", oct_hw->nr_vring);
534 
535 	for (i = 0; i < oct_hw->nr_vring; i++) {
536 		octep_write_queue_select(oct_hw, i);
537 		notify_off = vp_ioread16(&oct_hw->common_cfg->queue_notify_off);
538 		oct_hw->vqs[i].notify_addr = oct_hw->notify_base +
539 			notify_off * oct_hw->notify_off_multiplier;
540 		oct_hw->vqs[i].cb_notify_addr = (u32 __iomem *)oct_hw->vqs[i].notify_addr + 1;
541 		oct_hw->vqs[i].notify_pa = oct_hw->notify_base_pa +
542 			notify_off * oct_hw->notify_off_multiplier;
543 	}
544 	mbox = octep_get_mbox(oct_hw);
545 	octep_mbox_init(mbox);
546 	dev_info(dev, "mbox mapped at: %p\n", mbox);
547 
548 	return 0;
549 }
550