1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Virtio PCI driver - modern (virtio 1.0) device support
4 *
5 * This module allows virtio devices to be used over a virtual PCI device.
6 * This can be used with QEMU based VMMs like KVM or Xen.
7 *
8 * Copyright IBM Corp. 2007
9 * Copyright Red Hat, Inc. 2014
10 *
11 * Authors:
12 * Anthony Liguori <aliguori@us.ibm.com>
13 * Rusty Russell <rusty@rustcorp.com.au>
14 * Michael S. Tsirkin <mst@redhat.com>
15 */
16
17 #include <linux/delay.h>
18 #define VIRTIO_PCI_NO_LEGACY
19 #define VIRTIO_RING_NO_LEGACY
20 #include "virtio_pci_common.h"
21
22 #define VIRTIO_AVQ_SGS_MAX 4
23
vp_get_features(struct virtio_device * vdev)24 static u64 vp_get_features(struct virtio_device *vdev)
25 {
26 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
27
28 return vp_modern_get_features(&vp_dev->mdev);
29 }
30
vp_avq_index(struct virtio_device * vdev,u16 * index,u16 * num)31 static int vp_avq_index(struct virtio_device *vdev, u16 *index, u16 *num)
32 {
33 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
34
35 *num = 0;
36 if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
37 return 0;
38
39 *num = vp_modern_avq_num(&vp_dev->mdev);
40 if (!(*num))
41 return -EINVAL;
42 *index = vp_modern_avq_index(&vp_dev->mdev);
43 return 0;
44 }
45
vp_is_avq(struct virtio_device * vdev,unsigned int index)46 static bool vp_is_avq(struct virtio_device *vdev, unsigned int index)
47 {
48 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
49
50 if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
51 return false;
52
53 return index == vp_dev->admin_vq.vq_index;
54 }
55
vp_modern_avq_done(struct virtqueue * vq)56 void vp_modern_avq_done(struct virtqueue *vq)
57 {
58 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
59 struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq;
60 struct virtio_admin_cmd *cmd;
61 unsigned long flags;
62 unsigned int len;
63
64 spin_lock_irqsave(&admin_vq->lock, flags);
65 do {
66 virtqueue_disable_cb(vq);
67 while ((cmd = virtqueue_get_buf(vq, &len)))
68 complete(&cmd->completion);
69 } while (!virtqueue_enable_cb(vq));
70 spin_unlock_irqrestore(&admin_vq->lock, flags);
71 }
72
virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq * admin_vq,u16 opcode,struct scatterlist ** sgs,unsigned int out_num,unsigned int in_num,struct virtio_admin_cmd * cmd)73 static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
74 u16 opcode,
75 struct scatterlist **sgs,
76 unsigned int out_num,
77 unsigned int in_num,
78 struct virtio_admin_cmd *cmd)
79 {
80 struct virtqueue *vq;
81 unsigned long flags;
82 int ret;
83
84 vq = admin_vq->info->vq;
85 if (!vq)
86 return -EIO;
87
88 if (opcode != VIRTIO_ADMIN_CMD_LIST_QUERY &&
89 opcode != VIRTIO_ADMIN_CMD_LIST_USE &&
90 !((1ULL << opcode) & admin_vq->supported_cmds))
91 return -EOPNOTSUPP;
92
93 init_completion(&cmd->completion);
94
95 again:
96 if (virtqueue_is_broken(vq))
97 return -EIO;
98
99 spin_lock_irqsave(&admin_vq->lock, flags);
100 ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_KERNEL);
101 if (ret < 0) {
102 if (ret == -ENOSPC) {
103 spin_unlock_irqrestore(&admin_vq->lock, flags);
104 cpu_relax();
105 goto again;
106 }
107 goto unlock_err;
108 }
109 if (!virtqueue_kick(vq))
110 goto unlock_err;
111 spin_unlock_irqrestore(&admin_vq->lock, flags);
112
113 wait_for_completion(&cmd->completion);
114
115 return cmd->ret;
116
117 unlock_err:
118 spin_unlock_irqrestore(&admin_vq->lock, flags);
119 return -EIO;
120 }
121
vp_modern_admin_cmd_exec(struct virtio_device * vdev,struct virtio_admin_cmd * cmd)122 int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
123 struct virtio_admin_cmd *cmd)
124 {
125 struct scatterlist *sgs[VIRTIO_AVQ_SGS_MAX], hdr, stat;
126 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
127 struct virtio_admin_cmd_status *va_status;
128 unsigned int out_num = 0, in_num = 0;
129 struct virtio_admin_cmd_hdr *va_hdr;
130 u16 status;
131 int ret;
132
133 if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
134 return -EOPNOTSUPP;
135
136 va_status = kzalloc(sizeof(*va_status), GFP_KERNEL);
137 if (!va_status)
138 return -ENOMEM;
139
140 va_hdr = kzalloc(sizeof(*va_hdr), GFP_KERNEL);
141 if (!va_hdr) {
142 ret = -ENOMEM;
143 goto err_alloc;
144 }
145
146 va_hdr->opcode = cmd->opcode;
147 va_hdr->group_type = cmd->group_type;
148 va_hdr->group_member_id = cmd->group_member_id;
149
150 /* Add header */
151 sg_init_one(&hdr, va_hdr, sizeof(*va_hdr));
152 sgs[out_num] = &hdr;
153 out_num++;
154
155 if (cmd->data_sg) {
156 sgs[out_num] = cmd->data_sg;
157 out_num++;
158 }
159
160 /* Add return status */
161 sg_init_one(&stat, va_status, sizeof(*va_status));
162 sgs[out_num + in_num] = &stat;
163 in_num++;
164
165 if (cmd->result_sg) {
166 sgs[out_num + in_num] = cmd->result_sg;
167 in_num++;
168 }
169
170 ret = virtqueue_exec_admin_cmd(&vp_dev->admin_vq,
171 le16_to_cpu(cmd->opcode),
172 sgs, out_num, in_num, cmd);
173 if (ret) {
174 dev_err(&vdev->dev,
175 "Failed to execute command on admin vq: %d\n.", ret);
176 goto err_cmd_exec;
177 }
178
179 status = le16_to_cpu(va_status->status);
180 if (status != VIRTIO_ADMIN_STATUS_OK) {
181 dev_err(&vdev->dev,
182 "admin command error: status(%#x) qualifier(%#x)\n",
183 status, le16_to_cpu(va_status->status_qualifier));
184 ret = -status;
185 }
186
187 err_cmd_exec:
188 kfree(va_hdr);
189 err_alloc:
190 kfree(va_status);
191 return ret;
192 }
193
virtio_pci_admin_cmd_list_init(struct virtio_device * virtio_dev)194 static void virtio_pci_admin_cmd_list_init(struct virtio_device *virtio_dev)
195 {
196 struct virtio_pci_device *vp_dev = to_vp_device(virtio_dev);
197 struct virtio_admin_cmd cmd = {};
198 struct scatterlist result_sg;
199 struct scatterlist data_sg;
200 __le64 *data;
201 int ret;
202
203 data = kzalloc(sizeof(*data), GFP_KERNEL);
204 if (!data)
205 return;
206
207 sg_init_one(&result_sg, data, sizeof(*data));
208 cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_LIST_QUERY);
209 cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
210 cmd.result_sg = &result_sg;
211
212 ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
213 if (ret)
214 goto end;
215
216 *data &= cpu_to_le64(VIRTIO_ADMIN_CMD_BITMAP);
217 sg_init_one(&data_sg, data, sizeof(*data));
218 cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_LIST_USE);
219 cmd.data_sg = &data_sg;
220 cmd.result_sg = NULL;
221
222 ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
223 if (ret)
224 goto end;
225
226 vp_dev->admin_vq.supported_cmds = le64_to_cpu(*data);
227 end:
228 kfree(data);
229 }
230
vp_modern_avq_activate(struct virtio_device * vdev)231 static void vp_modern_avq_activate(struct virtio_device *vdev)
232 {
233 if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
234 return;
235
236 virtio_pci_admin_cmd_list_init(vdev);
237 }
238
vp_modern_avq_cleanup(struct virtio_device * vdev)239 static void vp_modern_avq_cleanup(struct virtio_device *vdev)
240 {
241 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
242 struct virtio_admin_cmd *cmd;
243 struct virtqueue *vq;
244
245 if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
246 return;
247
248 vq = vp_dev->vqs[vp_dev->admin_vq.vq_index]->vq;
249 if (!vq)
250 return;
251
252 while ((cmd = virtqueue_detach_unused_buf(vq))) {
253 cmd->ret = -EIO;
254 complete(&cmd->completion);
255 }
256 }
257
vp_transport_features(struct virtio_device * vdev,u64 features)258 static void vp_transport_features(struct virtio_device *vdev, u64 features)
259 {
260 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
261 struct pci_dev *pci_dev = vp_dev->pci_dev;
262
263 if ((features & BIT_ULL(VIRTIO_F_SR_IOV)) &&
264 pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV))
265 __virtio_set_bit(vdev, VIRTIO_F_SR_IOV);
266
267 if (features & BIT_ULL(VIRTIO_F_RING_RESET))
268 __virtio_set_bit(vdev, VIRTIO_F_RING_RESET);
269
270 if (features & BIT_ULL(VIRTIO_F_ADMIN_VQ))
271 __virtio_set_bit(vdev, VIRTIO_F_ADMIN_VQ);
272 }
273
__vp_check_common_size_one_feature(struct virtio_device * vdev,u32 fbit,u32 offset,const char * fname)274 static int __vp_check_common_size_one_feature(struct virtio_device *vdev, u32 fbit,
275 u32 offset, const char *fname)
276 {
277 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
278
279 if (!__virtio_test_bit(vdev, fbit))
280 return 0;
281
282 if (likely(vp_dev->mdev.common_len >= offset))
283 return 0;
284
285 dev_err(&vdev->dev,
286 "virtio: common cfg size(%zu) does not match the feature %s\n",
287 vp_dev->mdev.common_len, fname);
288
289 return -EINVAL;
290 }
291
292 #define vp_check_common_size_one_feature(vdev, fbit, field) \
293 __vp_check_common_size_one_feature(vdev, fbit, \
294 offsetofend(struct virtio_pci_modern_common_cfg, field), #fbit)
295
vp_check_common_size(struct virtio_device * vdev)296 static int vp_check_common_size(struct virtio_device *vdev)
297 {
298 if (vp_check_common_size_one_feature(vdev, VIRTIO_F_NOTIF_CONFIG_DATA, queue_notify_data))
299 return -EINVAL;
300
301 if (vp_check_common_size_one_feature(vdev, VIRTIO_F_RING_RESET, queue_reset))
302 return -EINVAL;
303
304 if (vp_check_common_size_one_feature(vdev, VIRTIO_F_ADMIN_VQ, admin_queue_num))
305 return -EINVAL;
306
307 return 0;
308 }
309
310 /* virtio config->finalize_features() implementation */
vp_finalize_features(struct virtio_device * vdev)311 static int vp_finalize_features(struct virtio_device *vdev)
312 {
313 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
314 u64 features = vdev->features;
315
316 /* Give virtio_ring a chance to accept features. */
317 vring_transport_features(vdev);
318
319 /* Give virtio_pci a chance to accept features. */
320 vp_transport_features(vdev, features);
321
322 if (!__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) {
323 dev_err(&vdev->dev, "virtio: device uses modern interface "
324 "but does not have VIRTIO_F_VERSION_1\n");
325 return -EINVAL;
326 }
327
328 if (vp_check_common_size(vdev))
329 return -EINVAL;
330
331 vp_modern_set_features(&vp_dev->mdev, vdev->features);
332
333 return 0;
334 }
335
336 /* virtio config->get() implementation */
vp_get(struct virtio_device * vdev,unsigned int offset,void * buf,unsigned int len)337 static void vp_get(struct virtio_device *vdev, unsigned int offset,
338 void *buf, unsigned int len)
339 {
340 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
341 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
342 void __iomem *device = mdev->device;
343 u8 b;
344 __le16 w;
345 __le32 l;
346
347 BUG_ON(offset + len > mdev->device_len);
348
349 switch (len) {
350 case 1:
351 b = ioread8(device + offset);
352 memcpy(buf, &b, sizeof b);
353 break;
354 case 2:
355 w = cpu_to_le16(ioread16(device + offset));
356 memcpy(buf, &w, sizeof w);
357 break;
358 case 4:
359 l = cpu_to_le32(ioread32(device + offset));
360 memcpy(buf, &l, sizeof l);
361 break;
362 case 8:
363 l = cpu_to_le32(ioread32(device + offset));
364 memcpy(buf, &l, sizeof l);
365 l = cpu_to_le32(ioread32(device + offset + sizeof l));
366 memcpy(buf + sizeof l, &l, sizeof l);
367 break;
368 default:
369 BUG();
370 }
371 }
372
373 /* the config->set() implementation. it's symmetric to the config->get()
374 * implementation */
vp_set(struct virtio_device * vdev,unsigned int offset,const void * buf,unsigned int len)375 static void vp_set(struct virtio_device *vdev, unsigned int offset,
376 const void *buf, unsigned int len)
377 {
378 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
379 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
380 void __iomem *device = mdev->device;
381 u8 b;
382 __le16 w;
383 __le32 l;
384
385 BUG_ON(offset + len > mdev->device_len);
386
387 switch (len) {
388 case 1:
389 memcpy(&b, buf, sizeof b);
390 iowrite8(b, device + offset);
391 break;
392 case 2:
393 memcpy(&w, buf, sizeof w);
394 iowrite16(le16_to_cpu(w), device + offset);
395 break;
396 case 4:
397 memcpy(&l, buf, sizeof l);
398 iowrite32(le32_to_cpu(l), device + offset);
399 break;
400 case 8:
401 memcpy(&l, buf, sizeof l);
402 iowrite32(le32_to_cpu(l), device + offset);
403 memcpy(&l, buf + sizeof l, sizeof l);
404 iowrite32(le32_to_cpu(l), device + offset + sizeof l);
405 break;
406 default:
407 BUG();
408 }
409 }
410
vp_generation(struct virtio_device * vdev)411 static u32 vp_generation(struct virtio_device *vdev)
412 {
413 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
414
415 return vp_modern_generation(&vp_dev->mdev);
416 }
417
418 /* config->{get,set}_status() implementations */
vp_get_status(struct virtio_device * vdev)419 static u8 vp_get_status(struct virtio_device *vdev)
420 {
421 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
422
423 return vp_modern_get_status(&vp_dev->mdev);
424 }
425
vp_set_status(struct virtio_device * vdev,u8 status)426 static void vp_set_status(struct virtio_device *vdev, u8 status)
427 {
428 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
429
430 /* We should never be setting status to 0. */
431 BUG_ON(status == 0);
432 vp_modern_set_status(&vp_dev->mdev, status);
433 if (status & VIRTIO_CONFIG_S_DRIVER_OK)
434 vp_modern_avq_activate(vdev);
435 }
436
vp_reset(struct virtio_device * vdev)437 static void vp_reset(struct virtio_device *vdev)
438 {
439 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
440 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
441
442 /* 0 status means a reset. */
443 vp_modern_set_status(mdev, 0);
444 /* After writing 0 to device_status, the driver MUST wait for a read of
445 * device_status to return 0 before reinitializing the device.
446 * This will flush out the status write, and flush in device writes,
447 * including MSI-X interrupts, if any.
448 */
449 while (vp_modern_get_status(mdev))
450 msleep(1);
451
452 vp_modern_avq_cleanup(vdev);
453
454 /* Flush pending VQ/configuration callbacks. */
455 vp_synchronize_vectors(vdev);
456 }
457
vp_active_vq(struct virtqueue * vq,u16 msix_vec)458 static int vp_active_vq(struct virtqueue *vq, u16 msix_vec)
459 {
460 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
461 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
462 unsigned long index;
463
464 index = vq->index;
465
466 /* activate the queue */
467 vp_modern_set_queue_size(mdev, index, virtqueue_get_vring_size(vq));
468 vp_modern_queue_address(mdev, index, virtqueue_get_desc_addr(vq),
469 virtqueue_get_avail_addr(vq),
470 virtqueue_get_used_addr(vq));
471
472 if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
473 msix_vec = vp_modern_queue_vector(mdev, index, msix_vec);
474 if (msix_vec == VIRTIO_MSI_NO_VECTOR)
475 return -EBUSY;
476 }
477
478 return 0;
479 }
480
vp_modern_disable_vq_and_reset(struct virtqueue * vq)481 static int vp_modern_disable_vq_and_reset(struct virtqueue *vq)
482 {
483 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
484 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
485 struct virtio_pci_vq_info *info;
486 unsigned long flags;
487
488 if (!virtio_has_feature(vq->vdev, VIRTIO_F_RING_RESET))
489 return -ENOENT;
490
491 vp_modern_set_queue_reset(mdev, vq->index);
492
493 info = vp_dev->vqs[vq->index];
494
495 /* delete vq from irq handler */
496 spin_lock_irqsave(&vp_dev->lock, flags);
497 list_del(&info->node);
498 spin_unlock_irqrestore(&vp_dev->lock, flags);
499
500 INIT_LIST_HEAD(&info->node);
501
502 #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
503 __virtqueue_break(vq);
504 #endif
505
506 /* For the case where vq has an exclusive irq, call synchronize_irq() to
507 * wait for completion.
508 *
509 * note: We can't use disable_irq() since it conflicts with the affinity
510 * managed IRQ that is used by some drivers.
511 */
512 if (vp_dev->per_vq_vectors && info->msix_vector != VIRTIO_MSI_NO_VECTOR)
513 synchronize_irq(pci_irq_vector(vp_dev->pci_dev, info->msix_vector));
514
515 vq->reset = true;
516
517 return 0;
518 }
519
vp_modern_enable_vq_after_reset(struct virtqueue * vq)520 static int vp_modern_enable_vq_after_reset(struct virtqueue *vq)
521 {
522 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
523 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
524 struct virtio_pci_vq_info *info;
525 unsigned long flags, index;
526 int err;
527
528 if (!vq->reset)
529 return -EBUSY;
530
531 index = vq->index;
532 info = vp_dev->vqs[index];
533
534 if (vp_modern_get_queue_reset(mdev, index))
535 return -EBUSY;
536
537 if (vp_modern_get_queue_enable(mdev, index))
538 return -EBUSY;
539
540 err = vp_active_vq(vq, info->msix_vector);
541 if (err)
542 return err;
543
544 if (vq->callback) {
545 spin_lock_irqsave(&vp_dev->lock, flags);
546 list_add(&info->node, &vp_dev->virtqueues);
547 spin_unlock_irqrestore(&vp_dev->lock, flags);
548 } else {
549 INIT_LIST_HEAD(&info->node);
550 }
551
552 #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
553 __virtqueue_unbreak(vq);
554 #endif
555
556 vp_modern_set_queue_enable(&vp_dev->mdev, index, true);
557 vq->reset = false;
558
559 return 0;
560 }
561
vp_config_vector(struct virtio_pci_device * vp_dev,u16 vector)562 static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
563 {
564 return vp_modern_config_vector(&vp_dev->mdev, vector);
565 }
566
vp_notify_with_data(struct virtqueue * vq)567 static bool vp_notify_with_data(struct virtqueue *vq)
568 {
569 u32 data = vring_notification_data(vq);
570
571 iowrite32(data, (void __iomem *)vq->priv);
572
573 return true;
574 }
575
setup_vq(struct virtio_pci_device * vp_dev,struct virtio_pci_vq_info * info,unsigned int index,void (* callback)(struct virtqueue * vq),const char * name,bool ctx,u16 msix_vec)576 static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
577 struct virtio_pci_vq_info *info,
578 unsigned int index,
579 void (*callback)(struct virtqueue *vq),
580 const char *name,
581 bool ctx,
582 u16 msix_vec)
583 {
584
585 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
586 bool (*notify)(struct virtqueue *vq);
587 struct virtqueue *vq;
588 bool is_avq;
589 u16 num;
590 int err;
591
592 if (__virtio_test_bit(&vp_dev->vdev, VIRTIO_F_NOTIFICATION_DATA))
593 notify = vp_notify_with_data;
594 else
595 notify = vp_notify;
596
597 is_avq = vp_is_avq(&vp_dev->vdev, index);
598 if (index >= vp_modern_get_num_queues(mdev) && !is_avq)
599 return ERR_PTR(-EINVAL);
600
601 num = vp_modern_get_queue_size(mdev, index);
602 /* Check if queue is either not available or already active. */
603 if (!num || vp_modern_get_queue_enable(mdev, index))
604 return ERR_PTR(-ENOENT);
605
606 info->msix_vector = msix_vec;
607
608 /* create the vring */
609 vq = vring_create_virtqueue(index, num,
610 SMP_CACHE_BYTES, &vp_dev->vdev,
611 true, true, ctx,
612 notify, callback, name);
613 if (!vq)
614 return ERR_PTR(-ENOMEM);
615
616 vq->num_max = num;
617
618 err = vp_active_vq(vq, msix_vec);
619 if (err)
620 goto err;
621
622 vq->priv = (void __force *)vp_modern_map_vq_notify(mdev, index, NULL);
623 if (!vq->priv) {
624 err = -ENOMEM;
625 goto err;
626 }
627
628 return vq;
629
630 err:
631 vring_del_virtqueue(vq);
632 return ERR_PTR(err);
633 }
634
vp_modern_find_vqs(struct virtio_device * vdev,unsigned int nvqs,struct virtqueue * vqs[],struct virtqueue_info vqs_info[],struct irq_affinity * desc)635 static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
636 struct virtqueue *vqs[],
637 struct virtqueue_info vqs_info[],
638 struct irq_affinity *desc)
639 {
640 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
641 struct virtqueue *vq;
642 int rc = vp_find_vqs(vdev, nvqs, vqs, vqs_info, desc);
643
644 if (rc)
645 return rc;
646
647 /* Select and activate all queues. Has to be done last: once we do
648 * this, there's no way to go back except reset.
649 */
650 list_for_each_entry(vq, &vdev->vqs, list)
651 vp_modern_set_queue_enable(&vp_dev->mdev, vq->index, true);
652
653 return 0;
654 }
655
del_vq(struct virtio_pci_vq_info * info)656 static void del_vq(struct virtio_pci_vq_info *info)
657 {
658 struct virtqueue *vq = info->vq;
659 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
660 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
661
662 if (vp_dev->msix_enabled)
663 vp_modern_queue_vector(mdev, vq->index,
664 VIRTIO_MSI_NO_VECTOR);
665
666 if (!mdev->notify_base)
667 pci_iounmap(mdev->pci_dev, (void __force __iomem *)vq->priv);
668
669 vring_del_virtqueue(vq);
670 }
671
virtio_pci_find_shm_cap(struct pci_dev * dev,u8 required_id,u8 * bar,u64 * offset,u64 * len)672 static int virtio_pci_find_shm_cap(struct pci_dev *dev, u8 required_id,
673 u8 *bar, u64 *offset, u64 *len)
674 {
675 int pos;
676
677 for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR); pos > 0;
678 pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) {
679 u8 type, cap_len, id, res_bar;
680 u32 tmp32;
681 u64 res_offset, res_length;
682
683 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
684 cfg_type), &type);
685 if (type != VIRTIO_PCI_CAP_SHARED_MEMORY_CFG)
686 continue;
687
688 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
689 cap_len), &cap_len);
690 if (cap_len != sizeof(struct virtio_pci_cap64)) {
691 dev_err(&dev->dev, "%s: shm cap with bad size offset:"
692 " %d size: %d\n", __func__, pos, cap_len);
693 continue;
694 }
695
696 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
697 id), &id);
698 if (id != required_id)
699 continue;
700
701 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
702 bar), &res_bar);
703 if (res_bar >= PCI_STD_NUM_BARS)
704 continue;
705
706 /* Type and ID match, and the BAR value isn't reserved.
707 * Looks good.
708 */
709
710 /* Read the lower 32bit of length and offset */
711 pci_read_config_dword(dev, pos + offsetof(struct virtio_pci_cap,
712 offset), &tmp32);
713 res_offset = tmp32;
714 pci_read_config_dword(dev, pos + offsetof(struct virtio_pci_cap,
715 length), &tmp32);
716 res_length = tmp32;
717
718 /* and now the top half */
719 pci_read_config_dword(dev,
720 pos + offsetof(struct virtio_pci_cap64,
721 offset_hi), &tmp32);
722 res_offset |= ((u64)tmp32) << 32;
723 pci_read_config_dword(dev,
724 pos + offsetof(struct virtio_pci_cap64,
725 length_hi), &tmp32);
726 res_length |= ((u64)tmp32) << 32;
727
728 *bar = res_bar;
729 *offset = res_offset;
730 *len = res_length;
731
732 return pos;
733 }
734 return 0;
735 }
736
vp_get_shm_region(struct virtio_device * vdev,struct virtio_shm_region * region,u8 id)737 static bool vp_get_shm_region(struct virtio_device *vdev,
738 struct virtio_shm_region *region, u8 id)
739 {
740 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
741 struct pci_dev *pci_dev = vp_dev->pci_dev;
742 u8 bar;
743 u64 offset, len;
744 phys_addr_t phys_addr;
745 size_t bar_len;
746
747 if (!virtio_pci_find_shm_cap(pci_dev, id, &bar, &offset, &len))
748 return false;
749
750 phys_addr = pci_resource_start(pci_dev, bar);
751 bar_len = pci_resource_len(pci_dev, bar);
752
753 if ((offset + len) < offset) {
754 dev_err(&pci_dev->dev, "%s: cap offset+len overflow detected\n",
755 __func__);
756 return false;
757 }
758
759 if (offset + len > bar_len) {
760 dev_err(&pci_dev->dev, "%s: bar shorter than cap offset+len\n",
761 __func__);
762 return false;
763 }
764
765 region->len = len;
766 region->addr = (u64) phys_addr + offset;
767
768 return true;
769 }
770
771 static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
772 .get = NULL,
773 .set = NULL,
774 .generation = vp_generation,
775 .get_status = vp_get_status,
776 .set_status = vp_set_status,
777 .reset = vp_reset,
778 .find_vqs = vp_modern_find_vqs,
779 .del_vqs = vp_del_vqs,
780 .synchronize_cbs = vp_synchronize_vectors,
781 .get_features = vp_get_features,
782 .finalize_features = vp_finalize_features,
783 .bus_name = vp_bus_name,
784 .set_vq_affinity = vp_set_vq_affinity,
785 .get_vq_affinity = vp_get_vq_affinity,
786 .get_shm_region = vp_get_shm_region,
787 .disable_vq_and_reset = vp_modern_disable_vq_and_reset,
788 .enable_vq_after_reset = vp_modern_enable_vq_after_reset,
789 };
790
791 static const struct virtio_config_ops virtio_pci_config_ops = {
792 .get = vp_get,
793 .set = vp_set,
794 .generation = vp_generation,
795 .get_status = vp_get_status,
796 .set_status = vp_set_status,
797 .reset = vp_reset,
798 .find_vqs = vp_modern_find_vqs,
799 .del_vqs = vp_del_vqs,
800 .synchronize_cbs = vp_synchronize_vectors,
801 .get_features = vp_get_features,
802 .finalize_features = vp_finalize_features,
803 .bus_name = vp_bus_name,
804 .set_vq_affinity = vp_set_vq_affinity,
805 .get_vq_affinity = vp_get_vq_affinity,
806 .get_shm_region = vp_get_shm_region,
807 .disable_vq_and_reset = vp_modern_disable_vq_and_reset,
808 .enable_vq_after_reset = vp_modern_enable_vq_after_reset,
809 };
810
811 /* the PCI probing function */
virtio_pci_modern_probe(struct virtio_pci_device * vp_dev)812 int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
813 {
814 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
815 struct pci_dev *pci_dev = vp_dev->pci_dev;
816 int err;
817
818 mdev->pci_dev = pci_dev;
819
820 err = vp_modern_probe(mdev);
821 if (err)
822 return err;
823
824 if (mdev->device)
825 vp_dev->vdev.config = &virtio_pci_config_ops;
826 else
827 vp_dev->vdev.config = &virtio_pci_config_nodev_ops;
828
829 vp_dev->config_vector = vp_config_vector;
830 vp_dev->setup_vq = setup_vq;
831 vp_dev->del_vq = del_vq;
832 vp_dev->avq_index = vp_avq_index;
833 vp_dev->isr = mdev->isr;
834 vp_dev->vdev.id = mdev->id;
835
836 spin_lock_init(&vp_dev->admin_vq.lock);
837 return 0;
838 }
839
virtio_pci_modern_remove(struct virtio_pci_device * vp_dev)840 void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev)
841 {
842 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
843
844 vp_modern_remove(mdev);
845 }
846