1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Virtio PCI driver - common functionality for all device versions
4 *
5 * This module allows virtio devices to be used over a virtual PCI device.
6 * This can be used with QEMU based VMMs like KVM or Xen.
7 *
8 * Copyright IBM Corp. 2007
9 * Copyright Red Hat, Inc. 2014
10 *
11 * Authors:
12 * Anthony Liguori <aliguori@us.ibm.com>
13 * Rusty Russell <rusty@rustcorp.com.au>
14 * Michael S. Tsirkin <mst@redhat.com>
15 */
16
17 #include "virtio_pci_common.h"
18
19 static bool force_legacy = false;
20
21 #if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY)
22 module_param(force_legacy, bool, 0444);
23 MODULE_PARM_DESC(force_legacy,
24 "Force legacy mode for transitional virtio 1 devices");
25 #endif
26
vp_is_avq(struct virtio_device * vdev,unsigned int index)27 bool vp_is_avq(struct virtio_device *vdev, unsigned int index)
28 {
29 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
30
31 if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
32 return false;
33
34 return index == vp_dev->admin_vq.vq_index;
35 }
36
37 /* wait for pending irq handlers */
vp_synchronize_vectors(struct virtio_device * vdev)38 void vp_synchronize_vectors(struct virtio_device *vdev)
39 {
40 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
41 int i;
42
43 if (vp_dev->intx_enabled)
44 synchronize_irq(vp_dev->pci_dev->irq);
45
46 for (i = 0; i < vp_dev->msix_vectors; ++i)
47 synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i));
48 }
49
50 /* the notify function used when creating a virt queue */
vp_notify(struct virtqueue * vq)51 bool vp_notify(struct virtqueue *vq)
52 {
53 /* we write the queue's selector into the notification register to
54 * signal the other end */
55 iowrite16(vq->index, (void __iomem *)vq->priv);
56 return true;
57 }
58
59 /* Notify all slow path virtqueues on an interrupt. */
vp_vring_slow_path_interrupt(int irq,struct virtio_pci_device * vp_dev)60 static void vp_vring_slow_path_interrupt(int irq,
61 struct virtio_pci_device *vp_dev)
62 {
63 struct virtio_pci_vq_info *info;
64 unsigned long flags;
65
66 spin_lock_irqsave(&vp_dev->lock, flags);
67 list_for_each_entry(info, &vp_dev->slow_virtqueues, node)
68 vring_interrupt(irq, info->vq);
69 spin_unlock_irqrestore(&vp_dev->lock, flags);
70 }
71
72 /* Handle a configuration change: Tell driver if it wants to know. */
vp_config_changed(int irq,void * opaque)73 static irqreturn_t vp_config_changed(int irq, void *opaque)
74 {
75 struct virtio_pci_device *vp_dev = opaque;
76
77 virtio_config_changed(&vp_dev->vdev);
78 vp_vring_slow_path_interrupt(irq, vp_dev);
79 return IRQ_HANDLED;
80 }
81
82 /* Notify all virtqueues on an interrupt. */
vp_vring_interrupt(int irq,void * opaque)83 static irqreturn_t vp_vring_interrupt(int irq, void *opaque)
84 {
85 struct virtio_pci_device *vp_dev = opaque;
86 struct virtio_pci_vq_info *info;
87 irqreturn_t ret = IRQ_NONE;
88 unsigned long flags;
89
90 spin_lock_irqsave(&vp_dev->lock, flags);
91 list_for_each_entry(info, &vp_dev->virtqueues, node) {
92 if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
93 ret = IRQ_HANDLED;
94 }
95 spin_unlock_irqrestore(&vp_dev->lock, flags);
96
97 return ret;
98 }
99
100 /* A small wrapper to also acknowledge the interrupt when it's handled.
101 * I really need an EIO hook for the vring so I can ack the interrupt once we
102 * know that we'll be handling the IRQ but before we invoke the callback since
103 * the callback may notify the host which results in the host attempting to
104 * raise an interrupt that we would then mask once we acknowledged the
105 * interrupt. */
vp_interrupt(int irq,void * opaque)106 static irqreturn_t vp_interrupt(int irq, void *opaque)
107 {
108 struct virtio_pci_device *vp_dev = opaque;
109 u8 isr;
110
111 /* reading the ISR has the effect of also clearing it so it's very
112 * important to save off the value. */
113 isr = ioread8(vp_dev->isr);
114
115 /* It's definitely not us if the ISR was not high */
116 if (!isr)
117 return IRQ_NONE;
118
119 /* Configuration change? Tell driver if it wants to know. */
120 if (isr & VIRTIO_PCI_ISR_CONFIG)
121 vp_config_changed(irq, opaque);
122
123 return vp_vring_interrupt(irq, opaque);
124 }
125
vp_request_msix_vectors(struct virtio_device * vdev,int nvectors,bool per_vq_vectors,struct irq_affinity * desc)126 static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
127 bool per_vq_vectors, struct irq_affinity *desc)
128 {
129 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
130 const char *name = dev_name(&vp_dev->vdev.dev);
131 unsigned int flags = PCI_IRQ_MSIX;
132 unsigned int i, v;
133 int err = -ENOMEM;
134
135 vp_dev->msix_vectors = nvectors;
136
137 vp_dev->msix_names = kmalloc_objs(*vp_dev->msix_names, nvectors);
138 if (!vp_dev->msix_names)
139 goto error;
140 vp_dev->msix_affinity_masks
141 = kzalloc_objs(*vp_dev->msix_affinity_masks, nvectors);
142 if (!vp_dev->msix_affinity_masks)
143 goto error;
144 for (i = 0; i < nvectors; ++i)
145 if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
146 GFP_KERNEL))
147 goto error;
148
149 if (!per_vq_vectors)
150 desc = NULL;
151
152 if (desc) {
153 flags |= PCI_IRQ_AFFINITY;
154 desc->pre_vectors++; /* virtio config vector */
155 }
156
157 err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
158 nvectors, flags, desc);
159 if (err < 0)
160 goto error;
161 vp_dev->msix_enabled = 1;
162
163 /* Set the vector used for configuration */
164 v = vp_dev->msix_used_vectors;
165 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
166 "%s-config", name);
167 err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
168 vp_config_changed, 0, vp_dev->msix_names[v],
169 vp_dev);
170 if (err)
171 goto error;
172 ++vp_dev->msix_used_vectors;
173
174 v = vp_dev->config_vector(vp_dev, v);
175 /* Verify we had enough resources to assign the vector */
176 if (v == VIRTIO_MSI_NO_VECTOR) {
177 err = -EBUSY;
178 goto error;
179 }
180
181 if (!per_vq_vectors) {
182 /* Shared vector for all VQs */
183 v = vp_dev->msix_used_vectors;
184 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
185 "%s-virtqueues", name);
186 err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
187 vp_vring_interrupt, 0, vp_dev->msix_names[v],
188 vp_dev);
189 if (err)
190 goto error;
191 ++vp_dev->msix_used_vectors;
192 }
193 return 0;
194 error:
195 return err;
196 }
197
vp_is_slow_path_vector(u16 msix_vec)198 static bool vp_is_slow_path_vector(u16 msix_vec)
199 {
200 return msix_vec == VP_MSIX_CONFIG_VECTOR;
201 }
202
vp_setup_vq(struct virtio_device * vdev,unsigned int index,void (* callback)(struct virtqueue * vq),const char * name,bool ctx,u16 msix_vec,struct virtio_pci_vq_info ** p_info)203 static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned int index,
204 void (*callback)(struct virtqueue *vq),
205 const char *name,
206 bool ctx,
207 u16 msix_vec,
208 struct virtio_pci_vq_info **p_info)
209 {
210 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
211 struct virtio_pci_vq_info *info = kmalloc_obj(*info);
212 struct virtqueue *vq;
213 unsigned long flags;
214
215 /* fill out our structure that represents an active queue */
216 if (!info)
217 return ERR_PTR(-ENOMEM);
218
219 vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, ctx,
220 msix_vec);
221 if (IS_ERR(vq))
222 goto out_info;
223
224 info->vq = vq;
225 if (callback) {
226 spin_lock_irqsave(&vp_dev->lock, flags);
227 if (!vp_is_slow_path_vector(msix_vec))
228 list_add(&info->node, &vp_dev->virtqueues);
229 else
230 list_add(&info->node, &vp_dev->slow_virtqueues);
231 spin_unlock_irqrestore(&vp_dev->lock, flags);
232 } else {
233 INIT_LIST_HEAD(&info->node);
234 }
235
236 *p_info = info;
237 return vq;
238
239 out_info:
240 kfree(info);
241 return vq;
242 }
243
vp_del_vq(struct virtqueue * vq,struct virtio_pci_vq_info * info)244 static void vp_del_vq(struct virtqueue *vq, struct virtio_pci_vq_info *info)
245 {
246 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
247 unsigned long flags;
248
249 /*
250 * If it fails during re-enable reset vq. This way we won't rejoin
251 * info->node to the queue. Prevent unexpected irqs.
252 */
253 if (!vq->reset) {
254 spin_lock_irqsave(&vp_dev->lock, flags);
255 list_del(&info->node);
256 spin_unlock_irqrestore(&vp_dev->lock, flags);
257 }
258
259 vp_dev->del_vq(info);
260 kfree(info);
261 }
262
263 /* the config->del_vqs() implementation */
vp_del_vqs(struct virtio_device * vdev)264 void vp_del_vqs(struct virtio_device *vdev)
265 {
266 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
267 struct virtio_pci_vq_info *info;
268 struct virtqueue *vq, *n;
269 int i;
270
271 list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
272 info = vp_is_avq(vdev, vq->index) ? vp_dev->admin_vq.info :
273 vp_dev->vqs[vq->index];
274
275 if (vp_dev->per_vq_vectors) {
276 int v = info->msix_vector;
277 if (v != VIRTIO_MSI_NO_VECTOR &&
278 !vp_is_slow_path_vector(v)) {
279 int irq = pci_irq_vector(vp_dev->pci_dev, v);
280
281 irq_update_affinity_hint(irq, NULL);
282 free_irq(irq, vq);
283 }
284 }
285 vp_del_vq(vq, info);
286 }
287 vp_dev->per_vq_vectors = false;
288
289 if (vp_dev->intx_enabled) {
290 free_irq(vp_dev->pci_dev->irq, vp_dev);
291 vp_dev->intx_enabled = 0;
292 }
293
294 for (i = 0; i < vp_dev->msix_used_vectors; ++i)
295 free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev);
296
297 if (vp_dev->msix_affinity_masks) {
298 for (i = 0; i < vp_dev->msix_vectors; i++)
299 free_cpumask_var(vp_dev->msix_affinity_masks[i]);
300 }
301
302 if (vp_dev->msix_enabled) {
303 /* Disable the vector used for configuration */
304 vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
305
306 pci_free_irq_vectors(vp_dev->pci_dev);
307 vp_dev->msix_enabled = 0;
308 }
309
310 vp_dev->msix_vectors = 0;
311 vp_dev->msix_used_vectors = 0;
312 kfree(vp_dev->msix_names);
313 vp_dev->msix_names = NULL;
314 kfree(vp_dev->msix_affinity_masks);
315 vp_dev->msix_affinity_masks = NULL;
316 kfree(vp_dev->vqs);
317 vp_dev->vqs = NULL;
318 }
319
320 enum vp_vq_vector_policy {
321 VP_VQ_VECTOR_POLICY_EACH,
322 VP_VQ_VECTOR_POLICY_SHARED_SLOW,
323 VP_VQ_VECTOR_POLICY_SHARED,
324 };
325
326 static struct virtqueue *
vp_find_one_vq_msix(struct virtio_device * vdev,int queue_idx,vq_callback_t * callback,const char * name,bool ctx,bool slow_path,int * allocated_vectors,enum vp_vq_vector_policy vector_policy,struct virtio_pci_vq_info ** p_info)327 vp_find_one_vq_msix(struct virtio_device *vdev, int queue_idx,
328 vq_callback_t *callback, const char *name, bool ctx,
329 bool slow_path, int *allocated_vectors,
330 enum vp_vq_vector_policy vector_policy,
331 struct virtio_pci_vq_info **p_info)
332 {
333 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
334 struct virtqueue *vq;
335 u16 msix_vec;
336 int err;
337
338 if (!callback)
339 msix_vec = VIRTIO_MSI_NO_VECTOR;
340 else if (vector_policy == VP_VQ_VECTOR_POLICY_EACH ||
341 (vector_policy == VP_VQ_VECTOR_POLICY_SHARED_SLOW &&
342 !slow_path))
343 msix_vec = (*allocated_vectors)++;
344 else if (vector_policy != VP_VQ_VECTOR_POLICY_EACH &&
345 slow_path)
346 msix_vec = VP_MSIX_CONFIG_VECTOR;
347 else
348 msix_vec = VP_MSIX_VQ_VECTOR;
349 vq = vp_setup_vq(vdev, queue_idx, callback, name, ctx, msix_vec,
350 p_info);
351 if (IS_ERR(vq))
352 return vq;
353
354 if (vector_policy == VP_VQ_VECTOR_POLICY_SHARED ||
355 msix_vec == VIRTIO_MSI_NO_VECTOR ||
356 vp_is_slow_path_vector(msix_vec))
357 return vq;
358
359 /* allocate per-vq irq if available and necessary */
360 snprintf(vp_dev->msix_names[msix_vec], sizeof(*vp_dev->msix_names),
361 "%s-%s", dev_name(&vp_dev->vdev.dev), name);
362 err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
363 vring_interrupt, 0,
364 vp_dev->msix_names[msix_vec], vq);
365 if (err) {
366 vp_del_vq(vq, *p_info);
367 return ERR_PTR(err);
368 }
369
370 return vq;
371 }
372
vp_find_vqs_msix(struct virtio_device * vdev,unsigned int nvqs,struct virtqueue * vqs[],struct virtqueue_info vqs_info[],enum vp_vq_vector_policy vector_policy,struct irq_affinity * desc)373 static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
374 struct virtqueue *vqs[],
375 struct virtqueue_info vqs_info[],
376 enum vp_vq_vector_policy vector_policy,
377 struct irq_affinity *desc)
378 {
379 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
380 struct virtio_pci_admin_vq *avq = &vp_dev->admin_vq;
381 struct virtqueue_info *vqi;
382 int i, err, nvectors, allocated_vectors, queue_idx = 0;
383 struct virtqueue *vq;
384 bool per_vq_vectors;
385 u16 avq_num = 0;
386
387 vp_dev->vqs = kzalloc_objs(*vp_dev->vqs, nvqs);
388 if (!vp_dev->vqs)
389 return -ENOMEM;
390
391 if (vp_dev->avq_index) {
392 err = vp_dev->avq_index(vdev, &avq->vq_index, &avq_num);
393 if (err)
394 goto error_find;
395 }
396
397 per_vq_vectors = vector_policy != VP_VQ_VECTOR_POLICY_SHARED;
398
399 if (per_vq_vectors) {
400 /* Best option: one for change interrupt, one per vq. */
401 nvectors = 1;
402 for (i = 0; i < nvqs; ++i) {
403 vqi = &vqs_info[i];
404 if (vqi->name && vqi->callback)
405 ++nvectors;
406 }
407 if (avq_num && vector_policy == VP_VQ_VECTOR_POLICY_EACH)
408 ++nvectors;
409 } else {
410 /* Second best: one for change, shared for all vqs. */
411 nvectors = 2;
412 }
413
414 err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors, desc);
415 if (err)
416 goto error_find;
417
418 vp_dev->per_vq_vectors = per_vq_vectors;
419 allocated_vectors = vp_dev->msix_used_vectors;
420 for (i = 0; i < nvqs; ++i) {
421 vqi = &vqs_info[i];
422 if (!vqi->name) {
423 vqs[i] = NULL;
424 continue;
425 }
426 vqs[i] = vp_find_one_vq_msix(vdev, queue_idx++, vqi->callback,
427 vqi->name, vqi->ctx, false,
428 &allocated_vectors, vector_policy,
429 &vp_dev->vqs[i]);
430 if (IS_ERR(vqs[i])) {
431 err = PTR_ERR(vqs[i]);
432 goto error_find;
433 }
434 }
435
436 if (!avq_num)
437 return 0;
438 sprintf(avq->name, "avq.%u", avq->vq_index);
439 vq = vp_find_one_vq_msix(vdev, avq->vq_index, vp_modern_avq_done,
440 avq->name, false, true, &allocated_vectors,
441 vector_policy, &vp_dev->admin_vq.info);
442 if (IS_ERR(vq)) {
443 err = PTR_ERR(vq);
444 goto error_find;
445 }
446
447 return 0;
448
449 error_find:
450 vp_del_vqs(vdev);
451 return err;
452 }
453
vp_find_vqs_intx(struct virtio_device * vdev,unsigned int nvqs,struct virtqueue * vqs[],struct virtqueue_info vqs_info[])454 static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs,
455 struct virtqueue *vqs[],
456 struct virtqueue_info vqs_info[])
457 {
458 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
459 struct virtio_pci_admin_vq *avq = &vp_dev->admin_vq;
460 int i, err, queue_idx = 0;
461 struct virtqueue *vq;
462 u16 avq_num = 0;
463
464 vp_dev->vqs = kzalloc_objs(*vp_dev->vqs, nvqs);
465 if (!vp_dev->vqs)
466 return -ENOMEM;
467
468 if (vp_dev->avq_index) {
469 err = vp_dev->avq_index(vdev, &avq->vq_index, &avq_num);
470 if (err)
471 goto out_del_vqs;
472 }
473
474 err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
475 dev_name(&vdev->dev), vp_dev);
476 if (err)
477 goto out_del_vqs;
478
479 vp_dev->intx_enabled = 1;
480 vp_dev->per_vq_vectors = false;
481 for (i = 0; i < nvqs; ++i) {
482 struct virtqueue_info *vqi = &vqs_info[i];
483
484 if (!vqi->name) {
485 vqs[i] = NULL;
486 continue;
487 }
488 vqs[i] = vp_setup_vq(vdev, queue_idx++, vqi->callback,
489 vqi->name, vqi->ctx,
490 VIRTIO_MSI_NO_VECTOR, &vp_dev->vqs[i]);
491 if (IS_ERR(vqs[i])) {
492 err = PTR_ERR(vqs[i]);
493 goto out_del_vqs;
494 }
495 }
496
497 if (!avq_num)
498 return 0;
499 sprintf(avq->name, "avq.%u", avq->vq_index);
500 vq = vp_setup_vq(vdev, queue_idx++, vp_modern_avq_done, avq->name,
501 false, VIRTIO_MSI_NO_VECTOR,
502 &vp_dev->admin_vq.info);
503 if (IS_ERR(vq)) {
504 err = PTR_ERR(vq);
505 goto out_del_vqs;
506 }
507
508 return 0;
509 out_del_vqs:
510 vp_del_vqs(vdev);
511 return err;
512 }
513
514 /* the config->find_vqs() implementation */
vp_find_vqs(struct virtio_device * vdev,unsigned int nvqs,struct virtqueue * vqs[],struct virtqueue_info vqs_info[],struct irq_affinity * desc)515 int vp_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
516 struct virtqueue *vqs[], struct virtqueue_info vqs_info[],
517 struct irq_affinity *desc)
518 {
519 int err;
520
521 /* Try MSI-X with one vector per queue. */
522 err = vp_find_vqs_msix(vdev, nvqs, vqs, vqs_info,
523 VP_VQ_VECTOR_POLICY_EACH, desc);
524 if (!err)
525 return 0;
526 /* Fallback: MSI-X with one shared vector for config and
527 * slow path queues, one vector per queue for the rest.
528 */
529 err = vp_find_vqs_msix(vdev, nvqs, vqs, vqs_info,
530 VP_VQ_VECTOR_POLICY_SHARED_SLOW, desc);
531 if (!err)
532 return 0;
533 /* Fallback: MSI-X with one vector for config, one shared for queues. */
534 err = vp_find_vqs_msix(vdev, nvqs, vqs, vqs_info,
535 VP_VQ_VECTOR_POLICY_SHARED, desc);
536 if (!err)
537 return 0;
538 /* Is there an interrupt? If not give up. */
539 if (!(to_vp_device(vdev)->pci_dev->irq))
540 return err;
541 /* Finally fall back to regular interrupts. */
542 return vp_find_vqs_intx(vdev, nvqs, vqs, vqs_info);
543 }
544
vp_bus_name(struct virtio_device * vdev)545 const char *vp_bus_name(struct virtio_device *vdev)
546 {
547 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
548
549 return pci_name(vp_dev->pci_dev);
550 }
551
552 /* Setup the affinity for a virtqueue:
553 * - force the affinity for per vq vector
554 * - OR over all affinities for shared MSI
555 * - ignore the affinity request if we're using INTX
556 */
vp_set_vq_affinity(struct virtqueue * vq,const struct cpumask * cpu_mask)557 int vp_set_vq_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
558 {
559 struct virtio_device *vdev = vq->vdev;
560 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
561 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
562 struct cpumask *mask;
563 unsigned int irq;
564
565 if (!vq->callback)
566 return -EINVAL;
567
568 if (vp_dev->msix_enabled) {
569 mask = vp_dev->msix_affinity_masks[info->msix_vector];
570 irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
571 if (!cpu_mask)
572 irq_update_affinity_hint(irq, NULL);
573 else {
574 cpumask_copy(mask, cpu_mask);
575 irq_set_affinity_and_hint(irq, mask);
576 }
577 }
578 return 0;
579 }
580
vp_get_vq_affinity(struct virtio_device * vdev,int index)581 const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index)
582 {
583 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
584
585 if (!vp_dev->per_vq_vectors ||
586 vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR ||
587 vp_is_slow_path_vector(vp_dev->vqs[index]->msix_vector))
588 return NULL;
589
590 return pci_irq_get_affinity(vp_dev->pci_dev,
591 vp_dev->vqs[index]->msix_vector);
592 }
593
594 #ifdef CONFIG_PM_SLEEP
virtio_pci_freeze(struct device * dev)595 static int virtio_pci_freeze(struct device *dev)
596 {
597 struct pci_dev *pci_dev = to_pci_dev(dev);
598 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
599 int ret;
600
601 ret = virtio_device_freeze(&vp_dev->vdev);
602
603 if (!ret)
604 pci_disable_device(pci_dev);
605 return ret;
606 }
607
virtio_pci_restore(struct device * dev)608 static int virtio_pci_restore(struct device *dev)
609 {
610 struct pci_dev *pci_dev = to_pci_dev(dev);
611 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
612 int ret;
613
614 ret = pci_enable_device(pci_dev);
615 if (ret)
616 return ret;
617
618 pci_set_master(pci_dev);
619 return virtio_device_restore(&vp_dev->vdev);
620 }
621
vp_supports_pm_no_reset(struct device * dev)622 static bool vp_supports_pm_no_reset(struct device *dev)
623 {
624 struct pci_dev *pci_dev = to_pci_dev(dev);
625 u16 pmcsr;
626
627 if (!pci_dev->pm_cap)
628 return false;
629
630 pci_read_config_word(pci_dev, pci_dev->pm_cap + PCI_PM_CTRL, &pmcsr);
631 if (PCI_POSSIBLE_ERROR(pmcsr)) {
632 dev_err(dev, "Unable to query pmcsr");
633 return false;
634 }
635
636 return pmcsr & PCI_PM_CTRL_NO_SOFT_RESET;
637 }
638
virtio_pci_suspend(struct device * dev)639 static int virtio_pci_suspend(struct device *dev)
640 {
641 return vp_supports_pm_no_reset(dev) ? 0 : virtio_pci_freeze(dev);
642 }
643
virtio_pci_resume(struct device * dev)644 static int virtio_pci_resume(struct device *dev)
645 {
646 return vp_supports_pm_no_reset(dev) ? 0 : virtio_pci_restore(dev);
647 }
648
649 static const struct dev_pm_ops virtio_pci_pm_ops = {
650 .suspend = virtio_pci_suspend,
651 .resume = virtio_pci_resume,
652 .freeze = virtio_pci_freeze,
653 .thaw = virtio_pci_restore,
654 .poweroff = virtio_pci_freeze,
655 .restore = virtio_pci_restore,
656 };
657 #endif
658
659
660 /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
661 static const struct pci_device_id virtio_pci_id_table[] = {
662 { PCI_DEVICE(PCI_VENDOR_ID_REDHAT_QUMRANET, PCI_ANY_ID) },
663 { 0 }
664 };
665
666 MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
667
virtio_pci_release_dev(struct device * _d)668 static void virtio_pci_release_dev(struct device *_d)
669 {
670 struct virtio_device *vdev = dev_to_virtio(_d);
671 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
672
673 /* As struct device is a kobject, it's not safe to
674 * free the memory (including the reference counter itself)
675 * until it's release callback. */
676 kfree(vp_dev);
677 }
678
virtio_pci_probe(struct pci_dev * pci_dev,const struct pci_device_id * id)679 static int virtio_pci_probe(struct pci_dev *pci_dev,
680 const struct pci_device_id *id)
681 {
682 struct virtio_pci_device *vp_dev, *reg_dev = NULL;
683 int rc;
684
685 /* allocate our structure and fill it out */
686 vp_dev = kzalloc_obj(struct virtio_pci_device);
687 if (!vp_dev)
688 return -ENOMEM;
689
690 pci_set_drvdata(pci_dev, vp_dev);
691 vp_dev->vdev.dev.parent = &pci_dev->dev;
692 vp_dev->vdev.dev.release = virtio_pci_release_dev;
693 vp_dev->pci_dev = pci_dev;
694 INIT_LIST_HEAD(&vp_dev->virtqueues);
695 INIT_LIST_HEAD(&vp_dev->slow_virtqueues);
696 spin_lock_init(&vp_dev->lock);
697
698 /* enable the device */
699 rc = pci_enable_device(pci_dev);
700 if (rc)
701 goto err_enable_device;
702
703 if (force_legacy) {
704 rc = virtio_pci_legacy_probe(vp_dev);
705 /* Also try modern mode if we can't map BAR0 (no IO space). */
706 if (rc == -ENODEV || rc == -ENOMEM)
707 rc = virtio_pci_modern_probe(vp_dev);
708 if (rc)
709 goto err_probe;
710 } else {
711 rc = virtio_pci_modern_probe(vp_dev);
712 if (rc == -ENODEV)
713 rc = virtio_pci_legacy_probe(vp_dev);
714 if (rc)
715 goto err_probe;
716 }
717
718 pci_set_master(pci_dev);
719
720 rc = register_virtio_device(&vp_dev->vdev);
721 reg_dev = vp_dev;
722 if (rc)
723 goto err_register;
724
725 return 0;
726
727 err_register:
728 if (vp_dev->is_legacy)
729 virtio_pci_legacy_remove(vp_dev);
730 else
731 virtio_pci_modern_remove(vp_dev);
732 err_probe:
733 pci_disable_device(pci_dev);
734 err_enable_device:
735 if (reg_dev)
736 put_device(&vp_dev->vdev.dev);
737 else
738 kfree(vp_dev);
739 return rc;
740 }
741
virtio_pci_remove(struct pci_dev * pci_dev)742 static void virtio_pci_remove(struct pci_dev *pci_dev)
743 {
744 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
745 struct device *dev = get_device(&vp_dev->vdev.dev);
746
747 /*
748 * Device is marked broken on surprise removal so that virtio upper
749 * layers can abort any ongoing operation.
750 */
751 if (!pci_device_is_present(pci_dev))
752 virtio_break_device(&vp_dev->vdev);
753
754 pci_disable_sriov(pci_dev);
755
756 unregister_virtio_device(&vp_dev->vdev);
757
758 if (vp_dev->is_legacy)
759 virtio_pci_legacy_remove(vp_dev);
760 else
761 virtio_pci_modern_remove(vp_dev);
762
763 pci_disable_device(pci_dev);
764 put_device(dev);
765 }
766
virtio_pci_sriov_configure(struct pci_dev * pci_dev,int num_vfs)767 static int virtio_pci_sriov_configure(struct pci_dev *pci_dev, int num_vfs)
768 {
769 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
770 struct virtio_device *vdev = &vp_dev->vdev;
771 int ret;
772
773 if (!(vdev->config->get_status(vdev) & VIRTIO_CONFIG_S_DRIVER_OK))
774 return -EBUSY;
775
776 if (!__virtio_test_bit(vdev, VIRTIO_F_SR_IOV))
777 return -EINVAL;
778
779 if (pci_vfs_assigned(pci_dev))
780 return -EPERM;
781
782 if (num_vfs == 0) {
783 pci_disable_sriov(pci_dev);
784 return 0;
785 }
786
787 ret = pci_enable_sriov(pci_dev, num_vfs);
788 if (ret < 0)
789 return ret;
790
791 return num_vfs;
792 }
793
virtio_pci_reset_prepare(struct pci_dev * pci_dev)794 static void virtio_pci_reset_prepare(struct pci_dev *pci_dev)
795 {
796 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
797 int ret = 0;
798
799 ret = virtio_device_reset_prepare(&vp_dev->vdev);
800 if (ret) {
801 if (ret != -EOPNOTSUPP)
802 dev_warn(&pci_dev->dev, "Reset prepare failure: %d",
803 ret);
804 return;
805 }
806
807 if (pci_is_enabled(pci_dev))
808 pci_disable_device(pci_dev);
809 }
810
virtio_pci_reset_done(struct pci_dev * pci_dev)811 static void virtio_pci_reset_done(struct pci_dev *pci_dev)
812 {
813 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
814 int ret;
815
816 if (pci_is_enabled(pci_dev))
817 return;
818
819 ret = pci_enable_device(pci_dev);
820 if (!ret) {
821 pci_set_master(pci_dev);
822 ret = virtio_device_reset_done(&vp_dev->vdev);
823 }
824
825 if (ret && ret != -EOPNOTSUPP)
826 dev_warn(&pci_dev->dev, "Reset done failure: %d", ret);
827 }
828
829 static const struct pci_error_handlers virtio_pci_err_handler = {
830 .reset_prepare = virtio_pci_reset_prepare,
831 .reset_done = virtio_pci_reset_done,
832 };
833
834 static struct pci_driver virtio_pci_driver = {
835 .name = "virtio-pci",
836 .id_table = virtio_pci_id_table,
837 .probe = virtio_pci_probe,
838 .remove = virtio_pci_remove,
839 #ifdef CONFIG_PM_SLEEP
840 .driver.pm = &virtio_pci_pm_ops,
841 #endif
842 .sriov_configure = virtio_pci_sriov_configure,
843 .err_handler = &virtio_pci_err_handler,
844 };
845
virtio_pci_vf_get_pf_dev(struct pci_dev * pdev)846 struct virtio_device *virtio_pci_vf_get_pf_dev(struct pci_dev *pdev)
847 {
848 struct virtio_pci_device *pf_vp_dev;
849
850 pf_vp_dev = pci_iov_get_pf_drvdata(pdev, &virtio_pci_driver);
851 if (IS_ERR(pf_vp_dev))
852 return NULL;
853
854 return &pf_vp_dev->vdev;
855 }
856
857 module_pci_driver(virtio_pci_driver);
858
859 MODULE_AUTHOR("Anthony Liguori <aliguori@us.ibm.com>");
860 MODULE_DESCRIPTION("virtio-pci");
861 MODULE_LICENSE("GPL");
862 MODULE_VERSION("1");
863