1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Virtio PCI driver - modern (virtio 1.0) device support
4 *
5 * This module allows virtio devices to be used over a virtual PCI device.
6 * This can be used with QEMU based VMMs like KVM or Xen.
7 *
8 * Copyright IBM Corp. 2007
9 * Copyright Red Hat, Inc. 2014
10 *
11 * Authors:
12 * Anthony Liguori <aliguori@us.ibm.com>
13 * Rusty Russell <rusty@rustcorp.com.au>
14 * Michael S. Tsirkin <mst@redhat.com>
15 */
16
17 #include <linux/delay.h>
18 #define VIRTIO_PCI_NO_LEGACY
19 #define VIRTIO_RING_NO_LEGACY
20 #include "virtio_pci_common.h"
21
22 #define VIRTIO_AVQ_SGS_MAX 4
23
vp_get_features(struct virtio_device * vdev)24 static u64 vp_get_features(struct virtio_device *vdev)
25 {
26 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
27
28 return vp_modern_get_features(&vp_dev->mdev);
29 }
30
vp_avq_index(struct virtio_device * vdev,u16 * index,u16 * num)31 static int vp_avq_index(struct virtio_device *vdev, u16 *index, u16 *num)
32 {
33 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
34
35 *num = 0;
36 if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
37 return 0;
38
39 *num = vp_modern_avq_num(&vp_dev->mdev);
40 if (!(*num))
41 return -EINVAL;
42 *index = vp_modern_avq_index(&vp_dev->mdev);
43 return 0;
44 }
45
vp_modern_avq_done(struct virtqueue * vq)46 void vp_modern_avq_done(struct virtqueue *vq)
47 {
48 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
49 struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq;
50 struct virtio_admin_cmd *cmd;
51 unsigned long flags;
52 unsigned int len;
53
54 spin_lock_irqsave(&admin_vq->lock, flags);
55 do {
56 virtqueue_disable_cb(vq);
57 while ((cmd = virtqueue_get_buf(vq, &len)))
58 complete(&cmd->completion);
59 } while (!virtqueue_enable_cb(vq));
60 spin_unlock_irqrestore(&admin_vq->lock, flags);
61 }
62
virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq * admin_vq,u16 opcode,struct scatterlist ** sgs,unsigned int out_num,unsigned int in_num,struct virtio_admin_cmd * cmd)63 static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
64 u16 opcode,
65 struct scatterlist **sgs,
66 unsigned int out_num,
67 unsigned int in_num,
68 struct virtio_admin_cmd *cmd)
69 {
70 struct virtqueue *vq;
71 unsigned long flags;
72 int ret;
73
74 vq = admin_vq->info->vq;
75 if (!vq)
76 return -EIO;
77
78 if (opcode != VIRTIO_ADMIN_CMD_LIST_QUERY &&
79 opcode != VIRTIO_ADMIN_CMD_LIST_USE &&
80 !((1ULL << opcode) & admin_vq->supported_cmds))
81 return -EOPNOTSUPP;
82
83 init_completion(&cmd->completion);
84
85 again:
86 if (virtqueue_is_broken(vq))
87 return -EIO;
88
89 spin_lock_irqsave(&admin_vq->lock, flags);
90 ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_KERNEL);
91 if (ret < 0) {
92 if (ret == -ENOSPC) {
93 spin_unlock_irqrestore(&admin_vq->lock, flags);
94 cpu_relax();
95 goto again;
96 }
97 goto unlock_err;
98 }
99 if (!virtqueue_kick(vq))
100 goto unlock_err;
101 spin_unlock_irqrestore(&admin_vq->lock, flags);
102
103 wait_for_completion(&cmd->completion);
104
105 return cmd->ret;
106
107 unlock_err:
108 spin_unlock_irqrestore(&admin_vq->lock, flags);
109 return -EIO;
110 }
111
vp_modern_admin_cmd_exec(struct virtio_device * vdev,struct virtio_admin_cmd * cmd)112 int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
113 struct virtio_admin_cmd *cmd)
114 {
115 struct scatterlist *sgs[VIRTIO_AVQ_SGS_MAX], hdr, stat;
116 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
117 struct virtio_admin_cmd_status *va_status;
118 unsigned int out_num = 0, in_num = 0;
119 struct virtio_admin_cmd_hdr *va_hdr;
120 u16 status;
121 int ret;
122
123 if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
124 return -EOPNOTSUPP;
125
126 va_status = kzalloc(sizeof(*va_status), GFP_KERNEL);
127 if (!va_status)
128 return -ENOMEM;
129
130 va_hdr = kzalloc(sizeof(*va_hdr), GFP_KERNEL);
131 if (!va_hdr) {
132 ret = -ENOMEM;
133 goto err_alloc;
134 }
135
136 va_hdr->opcode = cmd->opcode;
137 va_hdr->group_type = cmd->group_type;
138 va_hdr->group_member_id = cmd->group_member_id;
139
140 /* Add header */
141 sg_init_one(&hdr, va_hdr, sizeof(*va_hdr));
142 sgs[out_num] = &hdr;
143 out_num++;
144
145 if (cmd->data_sg) {
146 sgs[out_num] = cmd->data_sg;
147 out_num++;
148 }
149
150 /* Add return status */
151 sg_init_one(&stat, va_status, sizeof(*va_status));
152 sgs[out_num + in_num] = &stat;
153 in_num++;
154
155 if (cmd->result_sg) {
156 sgs[out_num + in_num] = cmd->result_sg;
157 in_num++;
158 }
159
160 ret = virtqueue_exec_admin_cmd(&vp_dev->admin_vq,
161 le16_to_cpu(cmd->opcode),
162 sgs, out_num, in_num, cmd);
163 if (ret) {
164 dev_err(&vdev->dev,
165 "Failed to execute command on admin vq: %d\n.", ret);
166 goto err_cmd_exec;
167 }
168
169 status = le16_to_cpu(va_status->status);
170 if (status != VIRTIO_ADMIN_STATUS_OK) {
171 dev_err(&vdev->dev,
172 "admin command error: status(%#x) qualifier(%#x)\n",
173 status, le16_to_cpu(va_status->status_qualifier));
174 ret = -status;
175 }
176
177 err_cmd_exec:
178 kfree(va_hdr);
179 err_alloc:
180 kfree(va_status);
181 return ret;
182 }
183
virtio_pci_admin_cmd_list_init(struct virtio_device * virtio_dev)184 static void virtio_pci_admin_cmd_list_init(struct virtio_device *virtio_dev)
185 {
186 struct virtio_pci_device *vp_dev = to_vp_device(virtio_dev);
187 struct virtio_admin_cmd cmd = {};
188 struct scatterlist result_sg;
189 struct scatterlist data_sg;
190 __le64 *data;
191 int ret;
192
193 data = kzalloc(sizeof(*data), GFP_KERNEL);
194 if (!data)
195 return;
196
197 sg_init_one(&result_sg, data, sizeof(*data));
198 cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_LIST_QUERY);
199 cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
200 cmd.result_sg = &result_sg;
201
202 ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
203 if (ret)
204 goto end;
205
206 *data &= cpu_to_le64(VIRTIO_ADMIN_CMD_BITMAP);
207 sg_init_one(&data_sg, data, sizeof(*data));
208 cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_LIST_USE);
209 cmd.data_sg = &data_sg;
210 cmd.result_sg = NULL;
211
212 ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
213 if (ret)
214 goto end;
215
216 vp_dev->admin_vq.supported_cmds = le64_to_cpu(*data);
217 end:
218 kfree(data);
219 }
220
vp_modern_avq_activate(struct virtio_device * vdev)221 static void vp_modern_avq_activate(struct virtio_device *vdev)
222 {
223 if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
224 return;
225
226 virtio_pci_admin_cmd_list_init(vdev);
227 }
228
vp_modern_avq_cleanup(struct virtio_device * vdev)229 static void vp_modern_avq_cleanup(struct virtio_device *vdev)
230 {
231 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
232 struct virtio_admin_cmd *cmd;
233 struct virtqueue *vq;
234
235 if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
236 return;
237
238 vq = vp_dev->admin_vq.info->vq;
239 if (!vq)
240 return;
241
242 while ((cmd = virtqueue_detach_unused_buf(vq))) {
243 cmd->ret = -EIO;
244 complete(&cmd->completion);
245 }
246 }
247
vp_transport_features(struct virtio_device * vdev,u64 features)248 static void vp_transport_features(struct virtio_device *vdev, u64 features)
249 {
250 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
251 struct pci_dev *pci_dev = vp_dev->pci_dev;
252
253 if ((features & BIT_ULL(VIRTIO_F_SR_IOV)) &&
254 pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV))
255 __virtio_set_bit(vdev, VIRTIO_F_SR_IOV);
256
257 if (features & BIT_ULL(VIRTIO_F_RING_RESET))
258 __virtio_set_bit(vdev, VIRTIO_F_RING_RESET);
259
260 if (features & BIT_ULL(VIRTIO_F_ADMIN_VQ))
261 __virtio_set_bit(vdev, VIRTIO_F_ADMIN_VQ);
262 }
263
__vp_check_common_size_one_feature(struct virtio_device * vdev,u32 fbit,u32 offset,const char * fname)264 static int __vp_check_common_size_one_feature(struct virtio_device *vdev, u32 fbit,
265 u32 offset, const char *fname)
266 {
267 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
268
269 if (!__virtio_test_bit(vdev, fbit))
270 return 0;
271
272 if (likely(vp_dev->mdev.common_len >= offset))
273 return 0;
274
275 dev_err(&vdev->dev,
276 "virtio: common cfg size(%zu) does not match the feature %s\n",
277 vp_dev->mdev.common_len, fname);
278
279 return -EINVAL;
280 }
281
282 #define vp_check_common_size_one_feature(vdev, fbit, field) \
283 __vp_check_common_size_one_feature(vdev, fbit, \
284 offsetofend(struct virtio_pci_modern_common_cfg, field), #fbit)
285
vp_check_common_size(struct virtio_device * vdev)286 static int vp_check_common_size(struct virtio_device *vdev)
287 {
288 if (vp_check_common_size_one_feature(vdev, VIRTIO_F_NOTIF_CONFIG_DATA, queue_notify_data))
289 return -EINVAL;
290
291 if (vp_check_common_size_one_feature(vdev, VIRTIO_F_RING_RESET, queue_reset))
292 return -EINVAL;
293
294 if (vp_check_common_size_one_feature(vdev, VIRTIO_F_ADMIN_VQ, admin_queue_num))
295 return -EINVAL;
296
297 return 0;
298 }
299
300 /* virtio config->finalize_features() implementation */
vp_finalize_features(struct virtio_device * vdev)301 static int vp_finalize_features(struct virtio_device *vdev)
302 {
303 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
304 u64 features = vdev->features;
305
306 /* Give virtio_ring a chance to accept features. */
307 vring_transport_features(vdev);
308
309 /* Give virtio_pci a chance to accept features. */
310 vp_transport_features(vdev, features);
311
312 if (!__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) {
313 dev_err(&vdev->dev, "virtio: device uses modern interface "
314 "but does not have VIRTIO_F_VERSION_1\n");
315 return -EINVAL;
316 }
317
318 if (vp_check_common_size(vdev))
319 return -EINVAL;
320
321 vp_modern_set_features(&vp_dev->mdev, vdev->features);
322
323 return 0;
324 }
325
326 /* virtio config->get() implementation */
vp_get(struct virtio_device * vdev,unsigned int offset,void * buf,unsigned int len)327 static void vp_get(struct virtio_device *vdev, unsigned int offset,
328 void *buf, unsigned int len)
329 {
330 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
331 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
332 void __iomem *device = mdev->device;
333 u8 b;
334 __le16 w;
335 __le32 l;
336
337 BUG_ON(offset + len > mdev->device_len);
338
339 switch (len) {
340 case 1:
341 b = ioread8(device + offset);
342 memcpy(buf, &b, sizeof b);
343 break;
344 case 2:
345 w = cpu_to_le16(ioread16(device + offset));
346 memcpy(buf, &w, sizeof w);
347 break;
348 case 4:
349 l = cpu_to_le32(ioread32(device + offset));
350 memcpy(buf, &l, sizeof l);
351 break;
352 case 8:
353 l = cpu_to_le32(ioread32(device + offset));
354 memcpy(buf, &l, sizeof l);
355 l = cpu_to_le32(ioread32(device + offset + sizeof l));
356 memcpy(buf + sizeof l, &l, sizeof l);
357 break;
358 default:
359 BUG();
360 }
361 }
362
363 /* the config->set() implementation. it's symmetric to the config->get()
364 * implementation */
vp_set(struct virtio_device * vdev,unsigned int offset,const void * buf,unsigned int len)365 static void vp_set(struct virtio_device *vdev, unsigned int offset,
366 const void *buf, unsigned int len)
367 {
368 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
369 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
370 void __iomem *device = mdev->device;
371 u8 b;
372 __le16 w;
373 __le32 l;
374
375 BUG_ON(offset + len > mdev->device_len);
376
377 switch (len) {
378 case 1:
379 memcpy(&b, buf, sizeof b);
380 iowrite8(b, device + offset);
381 break;
382 case 2:
383 memcpy(&w, buf, sizeof w);
384 iowrite16(le16_to_cpu(w), device + offset);
385 break;
386 case 4:
387 memcpy(&l, buf, sizeof l);
388 iowrite32(le32_to_cpu(l), device + offset);
389 break;
390 case 8:
391 memcpy(&l, buf, sizeof l);
392 iowrite32(le32_to_cpu(l), device + offset);
393 memcpy(&l, buf + sizeof l, sizeof l);
394 iowrite32(le32_to_cpu(l), device + offset + sizeof l);
395 break;
396 default:
397 BUG();
398 }
399 }
400
vp_generation(struct virtio_device * vdev)401 static u32 vp_generation(struct virtio_device *vdev)
402 {
403 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
404
405 return vp_modern_generation(&vp_dev->mdev);
406 }
407
408 /* config->{get,set}_status() implementations */
vp_get_status(struct virtio_device * vdev)409 static u8 vp_get_status(struct virtio_device *vdev)
410 {
411 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
412
413 return vp_modern_get_status(&vp_dev->mdev);
414 }
415
vp_set_status(struct virtio_device * vdev,u8 status)416 static void vp_set_status(struct virtio_device *vdev, u8 status)
417 {
418 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
419
420 /* We should never be setting status to 0. */
421 BUG_ON(status == 0);
422 vp_modern_set_status(&vp_dev->mdev, status);
423 if (status & VIRTIO_CONFIG_S_DRIVER_OK)
424 vp_modern_avq_activate(vdev);
425 }
426
vp_reset(struct virtio_device * vdev)427 static void vp_reset(struct virtio_device *vdev)
428 {
429 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
430 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
431
432 /* 0 status means a reset. */
433 vp_modern_set_status(mdev, 0);
434 /* After writing 0 to device_status, the driver MUST wait for a read of
435 * device_status to return 0 before reinitializing the device.
436 * This will flush out the status write, and flush in device writes,
437 * including MSI-X interrupts, if any.
438 */
439 while (vp_modern_get_status(mdev))
440 msleep(1);
441
442 vp_modern_avq_cleanup(vdev);
443
444 /* Flush pending VQ/configuration callbacks. */
445 vp_synchronize_vectors(vdev);
446 }
447
vp_active_vq(struct virtqueue * vq,u16 msix_vec)448 static int vp_active_vq(struct virtqueue *vq, u16 msix_vec)
449 {
450 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
451 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
452 unsigned long index;
453
454 index = vq->index;
455
456 /* activate the queue */
457 vp_modern_set_queue_size(mdev, index, virtqueue_get_vring_size(vq));
458 vp_modern_queue_address(mdev, index, virtqueue_get_desc_addr(vq),
459 virtqueue_get_avail_addr(vq),
460 virtqueue_get_used_addr(vq));
461
462 if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
463 msix_vec = vp_modern_queue_vector(mdev, index, msix_vec);
464 if (msix_vec == VIRTIO_MSI_NO_VECTOR)
465 return -EBUSY;
466 }
467
468 return 0;
469 }
470
vp_modern_disable_vq_and_reset(struct virtqueue * vq)471 static int vp_modern_disable_vq_and_reset(struct virtqueue *vq)
472 {
473 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
474 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
475 struct virtio_pci_vq_info *info;
476 unsigned long flags;
477
478 if (!virtio_has_feature(vq->vdev, VIRTIO_F_RING_RESET))
479 return -ENOENT;
480
481 vp_modern_set_queue_reset(mdev, vq->index);
482
483 info = vp_dev->vqs[vq->index];
484
485 /* delete vq from irq handler */
486 spin_lock_irqsave(&vp_dev->lock, flags);
487 list_del(&info->node);
488 spin_unlock_irqrestore(&vp_dev->lock, flags);
489
490 INIT_LIST_HEAD(&info->node);
491
492 #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
493 __virtqueue_break(vq);
494 #endif
495
496 /* For the case where vq has an exclusive irq, call synchronize_irq() to
497 * wait for completion.
498 *
499 * note: We can't use disable_irq() since it conflicts with the affinity
500 * managed IRQ that is used by some drivers.
501 */
502 if (vp_dev->per_vq_vectors && info->msix_vector != VIRTIO_MSI_NO_VECTOR)
503 synchronize_irq(pci_irq_vector(vp_dev->pci_dev, info->msix_vector));
504
505 vq->reset = true;
506
507 return 0;
508 }
509
vp_modern_enable_vq_after_reset(struct virtqueue * vq)510 static int vp_modern_enable_vq_after_reset(struct virtqueue *vq)
511 {
512 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
513 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
514 struct virtio_pci_vq_info *info;
515 unsigned long flags, index;
516 int err;
517
518 if (!vq->reset)
519 return -EBUSY;
520
521 index = vq->index;
522 info = vp_dev->vqs[index];
523
524 if (vp_modern_get_queue_reset(mdev, index))
525 return -EBUSY;
526
527 if (vp_modern_get_queue_enable(mdev, index))
528 return -EBUSY;
529
530 err = vp_active_vq(vq, info->msix_vector);
531 if (err)
532 return err;
533
534 if (vq->callback) {
535 spin_lock_irqsave(&vp_dev->lock, flags);
536 list_add(&info->node, &vp_dev->virtqueues);
537 spin_unlock_irqrestore(&vp_dev->lock, flags);
538 } else {
539 INIT_LIST_HEAD(&info->node);
540 }
541
542 #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
543 __virtqueue_unbreak(vq);
544 #endif
545
546 vp_modern_set_queue_enable(&vp_dev->mdev, index, true);
547 vq->reset = false;
548
549 return 0;
550 }
551
vp_config_vector(struct virtio_pci_device * vp_dev,u16 vector)552 static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
553 {
554 return vp_modern_config_vector(&vp_dev->mdev, vector);
555 }
556
vp_notify_with_data(struct virtqueue * vq)557 static bool vp_notify_with_data(struct virtqueue *vq)
558 {
559 u32 data = vring_notification_data(vq);
560
561 iowrite32(data, (void __iomem *)vq->priv);
562
563 return true;
564 }
565
setup_vq(struct virtio_pci_device * vp_dev,struct virtio_pci_vq_info * info,unsigned int index,void (* callback)(struct virtqueue * vq),const char * name,bool ctx,u16 msix_vec)566 static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
567 struct virtio_pci_vq_info *info,
568 unsigned int index,
569 void (*callback)(struct virtqueue *vq),
570 const char *name,
571 bool ctx,
572 u16 msix_vec)
573 {
574
575 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
576 bool (*notify)(struct virtqueue *vq);
577 struct virtqueue *vq;
578 bool is_avq;
579 u16 num;
580 int err;
581
582 if (__virtio_test_bit(&vp_dev->vdev, VIRTIO_F_NOTIFICATION_DATA))
583 notify = vp_notify_with_data;
584 else
585 notify = vp_notify;
586
587 is_avq = vp_is_avq(&vp_dev->vdev, index);
588 if (index >= vp_modern_get_num_queues(mdev) && !is_avq)
589 return ERR_PTR(-EINVAL);
590
591 num = vp_modern_get_queue_size(mdev, index);
592 /* Check if queue is either not available or already active. */
593 if (!num || vp_modern_get_queue_enable(mdev, index))
594 return ERR_PTR(-ENOENT);
595
596 info->msix_vector = msix_vec;
597
598 /* create the vring */
599 vq = vring_create_virtqueue(index, num,
600 SMP_CACHE_BYTES, &vp_dev->vdev,
601 true, true, ctx,
602 notify, callback, name);
603 if (!vq)
604 return ERR_PTR(-ENOMEM);
605
606 vq->num_max = num;
607
608 err = vp_active_vq(vq, msix_vec);
609 if (err)
610 goto err;
611
612 vq->priv = (void __force *)vp_modern_map_vq_notify(mdev, index, NULL);
613 if (!vq->priv) {
614 err = -ENOMEM;
615 goto err;
616 }
617
618 return vq;
619
620 err:
621 vring_del_virtqueue(vq);
622 return ERR_PTR(err);
623 }
624
vp_modern_find_vqs(struct virtio_device * vdev,unsigned int nvqs,struct virtqueue * vqs[],struct virtqueue_info vqs_info[],struct irq_affinity * desc)625 static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
626 struct virtqueue *vqs[],
627 struct virtqueue_info vqs_info[],
628 struct irq_affinity *desc)
629 {
630 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
631 struct virtqueue *vq;
632 int rc = vp_find_vqs(vdev, nvqs, vqs, vqs_info, desc);
633
634 if (rc)
635 return rc;
636
637 /* Select and activate all queues. Has to be done last: once we do
638 * this, there's no way to go back except reset.
639 */
640 list_for_each_entry(vq, &vdev->vqs, list)
641 vp_modern_set_queue_enable(&vp_dev->mdev, vq->index, true);
642
643 return 0;
644 }
645
del_vq(struct virtio_pci_vq_info * info)646 static void del_vq(struct virtio_pci_vq_info *info)
647 {
648 struct virtqueue *vq = info->vq;
649 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
650 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
651
652 if (vp_dev->msix_enabled)
653 vp_modern_queue_vector(mdev, vq->index,
654 VIRTIO_MSI_NO_VECTOR);
655
656 if (!mdev->notify_base)
657 pci_iounmap(mdev->pci_dev, (void __force __iomem *)vq->priv);
658
659 vring_del_virtqueue(vq);
660 }
661
virtio_pci_find_shm_cap(struct pci_dev * dev,u8 required_id,u8 * bar,u64 * offset,u64 * len)662 static int virtio_pci_find_shm_cap(struct pci_dev *dev, u8 required_id,
663 u8 *bar, u64 *offset, u64 *len)
664 {
665 int pos;
666
667 for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR); pos > 0;
668 pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) {
669 u8 type, cap_len, id, res_bar;
670 u32 tmp32;
671 u64 res_offset, res_length;
672
673 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
674 cfg_type), &type);
675 if (type != VIRTIO_PCI_CAP_SHARED_MEMORY_CFG)
676 continue;
677
678 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
679 cap_len), &cap_len);
680 if (cap_len != sizeof(struct virtio_pci_cap64)) {
681 dev_err(&dev->dev, "%s: shm cap with bad size offset:"
682 " %d size: %d\n", __func__, pos, cap_len);
683 continue;
684 }
685
686 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
687 id), &id);
688 if (id != required_id)
689 continue;
690
691 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
692 bar), &res_bar);
693 if (res_bar >= PCI_STD_NUM_BARS)
694 continue;
695
696 /* Type and ID match, and the BAR value isn't reserved.
697 * Looks good.
698 */
699
700 /* Read the lower 32bit of length and offset */
701 pci_read_config_dword(dev, pos + offsetof(struct virtio_pci_cap,
702 offset), &tmp32);
703 res_offset = tmp32;
704 pci_read_config_dword(dev, pos + offsetof(struct virtio_pci_cap,
705 length), &tmp32);
706 res_length = tmp32;
707
708 /* and now the top half */
709 pci_read_config_dword(dev,
710 pos + offsetof(struct virtio_pci_cap64,
711 offset_hi), &tmp32);
712 res_offset |= ((u64)tmp32) << 32;
713 pci_read_config_dword(dev,
714 pos + offsetof(struct virtio_pci_cap64,
715 length_hi), &tmp32);
716 res_length |= ((u64)tmp32) << 32;
717
718 *bar = res_bar;
719 *offset = res_offset;
720 *len = res_length;
721
722 return pos;
723 }
724 return 0;
725 }
726
vp_get_shm_region(struct virtio_device * vdev,struct virtio_shm_region * region,u8 id)727 static bool vp_get_shm_region(struct virtio_device *vdev,
728 struct virtio_shm_region *region, u8 id)
729 {
730 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
731 struct pci_dev *pci_dev = vp_dev->pci_dev;
732 u8 bar;
733 u64 offset, len;
734 phys_addr_t phys_addr;
735 size_t bar_len;
736
737 if (!virtio_pci_find_shm_cap(pci_dev, id, &bar, &offset, &len))
738 return false;
739
740 phys_addr = pci_resource_start(pci_dev, bar);
741 bar_len = pci_resource_len(pci_dev, bar);
742
743 if ((offset + len) < offset) {
744 dev_err(&pci_dev->dev, "%s: cap offset+len overflow detected\n",
745 __func__);
746 return false;
747 }
748
749 if (offset + len > bar_len) {
750 dev_err(&pci_dev->dev, "%s: bar shorter than cap offset+len\n",
751 __func__);
752 return false;
753 }
754
755 region->len = len;
756 region->addr = (u64) phys_addr + offset;
757
758 return true;
759 }
760
761 static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
762 .get = NULL,
763 .set = NULL,
764 .generation = vp_generation,
765 .get_status = vp_get_status,
766 .set_status = vp_set_status,
767 .reset = vp_reset,
768 .find_vqs = vp_modern_find_vqs,
769 .del_vqs = vp_del_vqs,
770 .synchronize_cbs = vp_synchronize_vectors,
771 .get_features = vp_get_features,
772 .finalize_features = vp_finalize_features,
773 .bus_name = vp_bus_name,
774 .set_vq_affinity = vp_set_vq_affinity,
775 .get_vq_affinity = vp_get_vq_affinity,
776 .get_shm_region = vp_get_shm_region,
777 .disable_vq_and_reset = vp_modern_disable_vq_and_reset,
778 .enable_vq_after_reset = vp_modern_enable_vq_after_reset,
779 };
780
781 static const struct virtio_config_ops virtio_pci_config_ops = {
782 .get = vp_get,
783 .set = vp_set,
784 .generation = vp_generation,
785 .get_status = vp_get_status,
786 .set_status = vp_set_status,
787 .reset = vp_reset,
788 .find_vqs = vp_modern_find_vqs,
789 .del_vqs = vp_del_vqs,
790 .synchronize_cbs = vp_synchronize_vectors,
791 .get_features = vp_get_features,
792 .finalize_features = vp_finalize_features,
793 .bus_name = vp_bus_name,
794 .set_vq_affinity = vp_set_vq_affinity,
795 .get_vq_affinity = vp_get_vq_affinity,
796 .get_shm_region = vp_get_shm_region,
797 .disable_vq_and_reset = vp_modern_disable_vq_and_reset,
798 .enable_vq_after_reset = vp_modern_enable_vq_after_reset,
799 };
800
801 /* the PCI probing function */
virtio_pci_modern_probe(struct virtio_pci_device * vp_dev)802 int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
803 {
804 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
805 struct pci_dev *pci_dev = vp_dev->pci_dev;
806 int err;
807
808 mdev->pci_dev = pci_dev;
809
810 err = vp_modern_probe(mdev);
811 if (err)
812 return err;
813
814 if (mdev->device)
815 vp_dev->vdev.config = &virtio_pci_config_ops;
816 else
817 vp_dev->vdev.config = &virtio_pci_config_nodev_ops;
818
819 vp_dev->config_vector = vp_config_vector;
820 vp_dev->setup_vq = setup_vq;
821 vp_dev->del_vq = del_vq;
822 vp_dev->avq_index = vp_avq_index;
823 vp_dev->isr = mdev->isr;
824 vp_dev->vdev.id = mdev->id;
825
826 spin_lock_init(&vp_dev->admin_vq.lock);
827 return 0;
828 }
829
virtio_pci_modern_remove(struct virtio_pci_device * vp_dev)830 void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev)
831 {
832 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
833
834 vp_modern_remove(mdev);
835 }
836