1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Virtio PCI driver - modern (virtio 1.0) device support
4 *
5 * This module allows virtio devices to be used over a virtual PCI device.
6 * This can be used with QEMU based VMMs like KVM or Xen.
7 *
8 * Copyright IBM Corp. 2007
9 * Copyright Red Hat, Inc. 2014
10 *
11 * Authors:
12 * Anthony Liguori <aliguori@us.ibm.com>
13 * Rusty Russell <rusty@rustcorp.com.au>
14 * Michael S. Tsirkin <mst@redhat.com>
15 */
16
17 #include <linux/delay.h>
18 #include <linux/virtio_pci_admin.h>
19 #define VIRTIO_PCI_NO_LEGACY
20 #define VIRTIO_RING_NO_LEGACY
21 #include "virtio_pci_common.h"
22
23 #define VIRTIO_AVQ_SGS_MAX 4
24
vp_get_features(struct virtio_device * vdev)25 static u64 vp_get_features(struct virtio_device *vdev)
26 {
27 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
28
29 return vp_modern_get_features(&vp_dev->mdev);
30 }
31
vp_avq_index(struct virtio_device * vdev,u16 * index,u16 * num)32 static int vp_avq_index(struct virtio_device *vdev, u16 *index, u16 *num)
33 {
34 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
35
36 *num = 0;
37 if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
38 return 0;
39
40 *num = vp_modern_avq_num(&vp_dev->mdev);
41 if (!(*num))
42 return -EINVAL;
43 *index = vp_modern_avq_index(&vp_dev->mdev);
44 return 0;
45 }
46
vp_modern_avq_done(struct virtqueue * vq)47 void vp_modern_avq_done(struct virtqueue *vq)
48 {
49 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
50 struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq;
51 unsigned int status_size = sizeof(struct virtio_admin_cmd_status);
52 struct virtio_admin_cmd *cmd;
53 unsigned long flags;
54 unsigned int len;
55
56 spin_lock_irqsave(&admin_vq->lock, flags);
57 do {
58 virtqueue_disable_cb(vq);
59 while ((cmd = virtqueue_get_buf(vq, &len))) {
60 /* If the number of bytes written by the device is less
61 * than the size of struct virtio_admin_cmd_status, the
62 * remaining status bytes will remain zero-initialized,
63 * since the buffer was zeroed during allocation.
64 * In this case, set the size of command_specific_result
65 * to 0.
66 */
67 if (len < status_size)
68 cmd->result_sg_size = 0;
69 else
70 cmd->result_sg_size = len - status_size;
71 complete(&cmd->completion);
72 }
73 } while (!virtqueue_enable_cb(vq));
74 spin_unlock_irqrestore(&admin_vq->lock, flags);
75 }
76
virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq * admin_vq,u16 opcode,struct scatterlist ** sgs,unsigned int out_num,unsigned int in_num,struct virtio_admin_cmd * cmd)77 static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
78 u16 opcode,
79 struct scatterlist **sgs,
80 unsigned int out_num,
81 unsigned int in_num,
82 struct virtio_admin_cmd *cmd)
83 {
84 struct virtqueue *vq;
85 unsigned long flags;
86 int ret;
87
88 vq = admin_vq->info->vq;
89 if (!vq)
90 return -EIO;
91
92 if (opcode != VIRTIO_ADMIN_CMD_LIST_QUERY &&
93 opcode != VIRTIO_ADMIN_CMD_LIST_USE &&
94 !((1ULL << opcode) & admin_vq->supported_cmds))
95 return -EOPNOTSUPP;
96
97 init_completion(&cmd->completion);
98
99 again:
100 if (virtqueue_is_broken(vq))
101 return -EIO;
102
103 spin_lock_irqsave(&admin_vq->lock, flags);
104 ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_KERNEL);
105 if (ret < 0) {
106 if (ret == -ENOSPC) {
107 spin_unlock_irqrestore(&admin_vq->lock, flags);
108 cpu_relax();
109 goto again;
110 }
111 goto unlock_err;
112 }
113 if (!virtqueue_kick(vq))
114 goto unlock_err;
115 spin_unlock_irqrestore(&admin_vq->lock, flags);
116
117 wait_for_completion(&cmd->completion);
118
119 return cmd->ret;
120
121 unlock_err:
122 spin_unlock_irqrestore(&admin_vq->lock, flags);
123 return -EIO;
124 }
125
vp_modern_admin_cmd_exec(struct virtio_device * vdev,struct virtio_admin_cmd * cmd)126 int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
127 struct virtio_admin_cmd *cmd)
128 {
129 struct scatterlist *sgs[VIRTIO_AVQ_SGS_MAX], hdr, stat;
130 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
131 struct virtio_admin_cmd_status *va_status;
132 unsigned int out_num = 0, in_num = 0;
133 struct virtio_admin_cmd_hdr *va_hdr;
134 u16 status;
135 int ret;
136
137 if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
138 return -EOPNOTSUPP;
139
140 va_status = kzalloc(sizeof(*va_status), GFP_KERNEL);
141 if (!va_status)
142 return -ENOMEM;
143
144 va_hdr = kzalloc(sizeof(*va_hdr), GFP_KERNEL);
145 if (!va_hdr) {
146 ret = -ENOMEM;
147 goto err_alloc;
148 }
149
150 va_hdr->opcode = cmd->opcode;
151 va_hdr->group_type = cmd->group_type;
152 va_hdr->group_member_id = cmd->group_member_id;
153
154 /* Add header */
155 sg_init_one(&hdr, va_hdr, sizeof(*va_hdr));
156 sgs[out_num] = &hdr;
157 out_num++;
158
159 if (cmd->data_sg) {
160 sgs[out_num] = cmd->data_sg;
161 out_num++;
162 }
163
164 /* Add return status */
165 sg_init_one(&stat, va_status, sizeof(*va_status));
166 sgs[out_num + in_num] = &stat;
167 in_num++;
168
169 if (cmd->result_sg) {
170 sgs[out_num + in_num] = cmd->result_sg;
171 in_num++;
172 }
173
174 ret = virtqueue_exec_admin_cmd(&vp_dev->admin_vq,
175 le16_to_cpu(cmd->opcode),
176 sgs, out_num, in_num, cmd);
177 if (ret) {
178 dev_err(&vdev->dev,
179 "Failed to execute command on admin vq: %d\n.", ret);
180 goto err_cmd_exec;
181 }
182
183 status = le16_to_cpu(va_status->status);
184 if (status != VIRTIO_ADMIN_STATUS_OK) {
185 dev_err(&vdev->dev,
186 "admin command error: status(%#x) qualifier(%#x)\n",
187 status, le16_to_cpu(va_status->status_qualifier));
188 ret = -status;
189 }
190
191 err_cmd_exec:
192 kfree(va_hdr);
193 err_alloc:
194 kfree(va_status);
195 return ret;
196 }
197
virtio_pci_admin_cmd_list_init(struct virtio_device * virtio_dev)198 static void virtio_pci_admin_cmd_list_init(struct virtio_device *virtio_dev)
199 {
200 struct virtio_pci_device *vp_dev = to_vp_device(virtio_dev);
201 struct virtio_admin_cmd cmd = {};
202 struct scatterlist result_sg;
203 struct scatterlist data_sg;
204 __le64 *data;
205 int ret;
206
207 data = kzalloc(sizeof(*data), GFP_KERNEL);
208 if (!data)
209 return;
210
211 sg_init_one(&result_sg, data, sizeof(*data));
212 cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_LIST_QUERY);
213 cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
214 cmd.result_sg = &result_sg;
215
216 ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
217 if (ret)
218 goto end;
219
220 *data &= cpu_to_le64(VIRTIO_ADMIN_CMD_BITMAP);
221 sg_init_one(&data_sg, data, sizeof(*data));
222 cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_LIST_USE);
223 cmd.data_sg = &data_sg;
224 cmd.result_sg = NULL;
225
226 ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
227 if (ret)
228 goto end;
229
230 vp_dev->admin_vq.supported_cmds = le64_to_cpu(*data);
231 end:
232 kfree(data);
233 }
234
235 static void
virtio_pci_admin_cmd_dev_parts_objects_enable(struct virtio_device * virtio_dev)236 virtio_pci_admin_cmd_dev_parts_objects_enable(struct virtio_device *virtio_dev)
237 {
238 struct virtio_pci_device *vp_dev = to_vp_device(virtio_dev);
239 struct virtio_admin_cmd_cap_get_data *get_data;
240 struct virtio_admin_cmd_cap_set_data *set_data;
241 struct virtio_dev_parts_cap *result;
242 struct virtio_admin_cmd cmd = {};
243 struct scatterlist result_sg;
244 struct scatterlist data_sg;
245 u8 resource_objects_limit;
246 u16 set_data_size;
247 int ret;
248
249 get_data = kzalloc(sizeof(*get_data), GFP_KERNEL);
250 if (!get_data)
251 return;
252
253 result = kzalloc(sizeof(*result), GFP_KERNEL);
254 if (!result)
255 goto end;
256
257 get_data->id = cpu_to_le16(VIRTIO_DEV_PARTS_CAP);
258 sg_init_one(&data_sg, get_data, sizeof(*get_data));
259 sg_init_one(&result_sg, result, sizeof(*result));
260 cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEVICE_CAP_GET);
261 cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SELF);
262 cmd.data_sg = &data_sg;
263 cmd.result_sg = &result_sg;
264 ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
265 if (ret)
266 goto err_get;
267
268 set_data_size = sizeof(*set_data) + sizeof(*result);
269 set_data = kzalloc(set_data_size, GFP_KERNEL);
270 if (!set_data)
271 goto err_get;
272
273 set_data->id = cpu_to_le16(VIRTIO_DEV_PARTS_CAP);
274
275 /* Set the limit to the minimum value between the GET and SET values
276 * supported by the device. Since the obj_id for VIRTIO_DEV_PARTS_CAP
277 * is a globally unique value per PF, there is no possibility of
278 * overlap between GET and SET operations.
279 */
280 resource_objects_limit = min(result->get_parts_resource_objects_limit,
281 result->set_parts_resource_objects_limit);
282 result->get_parts_resource_objects_limit = resource_objects_limit;
283 result->set_parts_resource_objects_limit = resource_objects_limit;
284 memcpy(set_data->cap_specific_data, result, sizeof(*result));
285 sg_init_one(&data_sg, set_data, set_data_size);
286 cmd.data_sg = &data_sg;
287 cmd.result_sg = NULL;
288 cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DRIVER_CAP_SET);
289 ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
290 if (ret)
291 goto err_set;
292
293 /* Allocate IDR to manage the dev caps objects */
294 ida_init(&vp_dev->admin_vq.dev_parts_ida);
295 vp_dev->admin_vq.max_dev_parts_objects = resource_objects_limit;
296
297 err_set:
298 kfree(set_data);
299 err_get:
300 kfree(result);
301 end:
302 kfree(get_data);
303 }
304
virtio_pci_admin_cmd_cap_init(struct virtio_device * virtio_dev)305 static void virtio_pci_admin_cmd_cap_init(struct virtio_device *virtio_dev)
306 {
307 struct virtio_pci_device *vp_dev = to_vp_device(virtio_dev);
308 struct virtio_admin_cmd_query_cap_id_result *data;
309 struct virtio_admin_cmd cmd = {};
310 struct scatterlist result_sg;
311 int ret;
312
313 data = kzalloc(sizeof(*data), GFP_KERNEL);
314 if (!data)
315 return;
316
317 sg_init_one(&result_sg, data, sizeof(*data));
318 cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_CAP_ID_LIST_QUERY);
319 cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SELF);
320 cmd.result_sg = &result_sg;
321
322 ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
323 if (ret)
324 goto end;
325
326 /* Max number of caps fits into a single u64 */
327 BUILD_BUG_ON(sizeof(data->supported_caps) > sizeof(u64));
328
329 vp_dev->admin_vq.supported_caps = le64_to_cpu(data->supported_caps[0]);
330
331 if (!(vp_dev->admin_vq.supported_caps & (1 << VIRTIO_DEV_PARTS_CAP)))
332 goto end;
333
334 virtio_pci_admin_cmd_dev_parts_objects_enable(virtio_dev);
335 end:
336 kfree(data);
337 }
338
vp_modern_avq_activate(struct virtio_device * vdev)339 static void vp_modern_avq_activate(struct virtio_device *vdev)
340 {
341 if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
342 return;
343
344 virtio_pci_admin_cmd_list_init(vdev);
345 virtio_pci_admin_cmd_cap_init(vdev);
346 }
347
vp_modern_avq_cleanup(struct virtio_device * vdev)348 static void vp_modern_avq_cleanup(struct virtio_device *vdev)
349 {
350 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
351 struct virtio_admin_cmd *cmd;
352 struct virtqueue *vq;
353
354 if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
355 return;
356
357 vq = vp_dev->admin_vq.info->vq;
358 if (!vq)
359 return;
360
361 while ((cmd = virtqueue_detach_unused_buf(vq))) {
362 cmd->ret = -EIO;
363 complete(&cmd->completion);
364 }
365 }
366
vp_transport_features(struct virtio_device * vdev,u64 features)367 static void vp_transport_features(struct virtio_device *vdev, u64 features)
368 {
369 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
370 struct pci_dev *pci_dev = vp_dev->pci_dev;
371
372 if ((features & BIT_ULL(VIRTIO_F_SR_IOV)) &&
373 pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV))
374 __virtio_set_bit(vdev, VIRTIO_F_SR_IOV);
375
376 if (features & BIT_ULL(VIRTIO_F_RING_RESET))
377 __virtio_set_bit(vdev, VIRTIO_F_RING_RESET);
378
379 if (features & BIT_ULL(VIRTIO_F_ADMIN_VQ))
380 __virtio_set_bit(vdev, VIRTIO_F_ADMIN_VQ);
381 }
382
__vp_check_common_size_one_feature(struct virtio_device * vdev,u32 fbit,u32 offset,const char * fname)383 static int __vp_check_common_size_one_feature(struct virtio_device *vdev, u32 fbit,
384 u32 offset, const char *fname)
385 {
386 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
387
388 if (!__virtio_test_bit(vdev, fbit))
389 return 0;
390
391 if (likely(vp_dev->mdev.common_len >= offset))
392 return 0;
393
394 dev_err(&vdev->dev,
395 "virtio: common cfg size(%zu) does not match the feature %s\n",
396 vp_dev->mdev.common_len, fname);
397
398 return -EINVAL;
399 }
400
401 #define vp_check_common_size_one_feature(vdev, fbit, field) \
402 __vp_check_common_size_one_feature(vdev, fbit, \
403 offsetofend(struct virtio_pci_modern_common_cfg, field), #fbit)
404
vp_check_common_size(struct virtio_device * vdev)405 static int vp_check_common_size(struct virtio_device *vdev)
406 {
407 if (vp_check_common_size_one_feature(vdev, VIRTIO_F_NOTIF_CONFIG_DATA, queue_notify_data))
408 return -EINVAL;
409
410 if (vp_check_common_size_one_feature(vdev, VIRTIO_F_RING_RESET, queue_reset))
411 return -EINVAL;
412
413 if (vp_check_common_size_one_feature(vdev, VIRTIO_F_ADMIN_VQ, admin_queue_num))
414 return -EINVAL;
415
416 return 0;
417 }
418
419 /* virtio config->finalize_features() implementation */
vp_finalize_features(struct virtio_device * vdev)420 static int vp_finalize_features(struct virtio_device *vdev)
421 {
422 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
423 u64 features = vdev->features;
424
425 /* Give virtio_ring a chance to accept features. */
426 vring_transport_features(vdev);
427
428 /* Give virtio_pci a chance to accept features. */
429 vp_transport_features(vdev, features);
430
431 if (!__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) {
432 dev_err(&vdev->dev, "virtio: device uses modern interface "
433 "but does not have VIRTIO_F_VERSION_1\n");
434 return -EINVAL;
435 }
436
437 if (vp_check_common_size(vdev))
438 return -EINVAL;
439
440 vp_modern_set_features(&vp_dev->mdev, vdev->features);
441
442 return 0;
443 }
444
445 /* virtio config->get() implementation */
vp_get(struct virtio_device * vdev,unsigned int offset,void * buf,unsigned int len)446 static void vp_get(struct virtio_device *vdev, unsigned int offset,
447 void *buf, unsigned int len)
448 {
449 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
450 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
451 void __iomem *device = mdev->device;
452 u8 b;
453 __le16 w;
454 __le32 l;
455
456 BUG_ON(offset + len > mdev->device_len);
457
458 switch (len) {
459 case 1:
460 b = ioread8(device + offset);
461 memcpy(buf, &b, sizeof b);
462 break;
463 case 2:
464 w = cpu_to_le16(ioread16(device + offset));
465 memcpy(buf, &w, sizeof w);
466 break;
467 case 4:
468 l = cpu_to_le32(ioread32(device + offset));
469 memcpy(buf, &l, sizeof l);
470 break;
471 case 8:
472 l = cpu_to_le32(ioread32(device + offset));
473 memcpy(buf, &l, sizeof l);
474 l = cpu_to_le32(ioread32(device + offset + sizeof l));
475 memcpy(buf + sizeof l, &l, sizeof l);
476 break;
477 default:
478 BUG();
479 }
480 }
481
482 /* the config->set() implementation. it's symmetric to the config->get()
483 * implementation */
vp_set(struct virtio_device * vdev,unsigned int offset,const void * buf,unsigned int len)484 static void vp_set(struct virtio_device *vdev, unsigned int offset,
485 const void *buf, unsigned int len)
486 {
487 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
488 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
489 void __iomem *device = mdev->device;
490 u8 b;
491 __le16 w;
492 __le32 l;
493
494 BUG_ON(offset + len > mdev->device_len);
495
496 switch (len) {
497 case 1:
498 memcpy(&b, buf, sizeof b);
499 iowrite8(b, device + offset);
500 break;
501 case 2:
502 memcpy(&w, buf, sizeof w);
503 iowrite16(le16_to_cpu(w), device + offset);
504 break;
505 case 4:
506 memcpy(&l, buf, sizeof l);
507 iowrite32(le32_to_cpu(l), device + offset);
508 break;
509 case 8:
510 memcpy(&l, buf, sizeof l);
511 iowrite32(le32_to_cpu(l), device + offset);
512 memcpy(&l, buf + sizeof l, sizeof l);
513 iowrite32(le32_to_cpu(l), device + offset + sizeof l);
514 break;
515 default:
516 BUG();
517 }
518 }
519
vp_generation(struct virtio_device * vdev)520 static u32 vp_generation(struct virtio_device *vdev)
521 {
522 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
523
524 return vp_modern_generation(&vp_dev->mdev);
525 }
526
527 /* config->{get,set}_status() implementations */
vp_get_status(struct virtio_device * vdev)528 static u8 vp_get_status(struct virtio_device *vdev)
529 {
530 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
531
532 return vp_modern_get_status(&vp_dev->mdev);
533 }
534
vp_set_status(struct virtio_device * vdev,u8 status)535 static void vp_set_status(struct virtio_device *vdev, u8 status)
536 {
537 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
538
539 /* We should never be setting status to 0. */
540 BUG_ON(status == 0);
541 vp_modern_set_status(&vp_dev->mdev, status);
542 if (status & VIRTIO_CONFIG_S_DRIVER_OK)
543 vp_modern_avq_activate(vdev);
544 }
545
vp_reset(struct virtio_device * vdev)546 static void vp_reset(struct virtio_device *vdev)
547 {
548 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
549 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
550
551 /* 0 status means a reset. */
552 vp_modern_set_status(mdev, 0);
553 /* After writing 0 to device_status, the driver MUST wait for a read of
554 * device_status to return 0 before reinitializing the device.
555 * This will flush out the status write, and flush in device writes,
556 * including MSI-X interrupts, if any.
557 */
558 while (vp_modern_get_status(mdev))
559 msleep(1);
560
561 vp_modern_avq_cleanup(vdev);
562
563 /* Flush pending VQ/configuration callbacks. */
564 vp_synchronize_vectors(vdev);
565 }
566
vp_active_vq(struct virtqueue * vq,u16 msix_vec)567 static int vp_active_vq(struct virtqueue *vq, u16 msix_vec)
568 {
569 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
570 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
571 unsigned long index;
572
573 index = vq->index;
574
575 /* activate the queue */
576 vp_modern_set_queue_size(mdev, index, virtqueue_get_vring_size(vq));
577 vp_modern_queue_address(mdev, index, virtqueue_get_desc_addr(vq),
578 virtqueue_get_avail_addr(vq),
579 virtqueue_get_used_addr(vq));
580
581 if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
582 msix_vec = vp_modern_queue_vector(mdev, index, msix_vec);
583 if (msix_vec == VIRTIO_MSI_NO_VECTOR)
584 return -EBUSY;
585 }
586
587 return 0;
588 }
589
vp_modern_disable_vq_and_reset(struct virtqueue * vq)590 static int vp_modern_disable_vq_and_reset(struct virtqueue *vq)
591 {
592 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
593 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
594 struct virtio_pci_vq_info *info;
595 unsigned long flags;
596
597 if (!virtio_has_feature(vq->vdev, VIRTIO_F_RING_RESET))
598 return -ENOENT;
599
600 vp_modern_set_queue_reset(mdev, vq->index);
601
602 info = vp_dev->vqs[vq->index];
603
604 /* delete vq from irq handler */
605 spin_lock_irqsave(&vp_dev->lock, flags);
606 list_del(&info->node);
607 spin_unlock_irqrestore(&vp_dev->lock, flags);
608
609 INIT_LIST_HEAD(&info->node);
610
611 #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
612 __virtqueue_break(vq);
613 #endif
614
615 /* For the case where vq has an exclusive irq, call synchronize_irq() to
616 * wait for completion.
617 *
618 * note: We can't use disable_irq() since it conflicts with the affinity
619 * managed IRQ that is used by some drivers.
620 */
621 if (vp_dev->per_vq_vectors && info->msix_vector != VIRTIO_MSI_NO_VECTOR)
622 synchronize_irq(pci_irq_vector(vp_dev->pci_dev, info->msix_vector));
623
624 vq->reset = true;
625
626 return 0;
627 }
628
vp_modern_enable_vq_after_reset(struct virtqueue * vq)629 static int vp_modern_enable_vq_after_reset(struct virtqueue *vq)
630 {
631 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
632 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
633 struct virtio_pci_vq_info *info;
634 unsigned long flags, index;
635 int err;
636
637 if (!vq->reset)
638 return -EBUSY;
639
640 index = vq->index;
641 info = vp_dev->vqs[index];
642
643 if (vp_modern_get_queue_reset(mdev, index))
644 return -EBUSY;
645
646 if (vp_modern_get_queue_enable(mdev, index))
647 return -EBUSY;
648
649 err = vp_active_vq(vq, info->msix_vector);
650 if (err)
651 return err;
652
653 if (vq->callback) {
654 spin_lock_irqsave(&vp_dev->lock, flags);
655 list_add(&info->node, &vp_dev->virtqueues);
656 spin_unlock_irqrestore(&vp_dev->lock, flags);
657 } else {
658 INIT_LIST_HEAD(&info->node);
659 }
660
661 #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
662 __virtqueue_unbreak(vq);
663 #endif
664
665 vp_modern_set_queue_enable(&vp_dev->mdev, index, true);
666 vq->reset = false;
667
668 return 0;
669 }
670
vp_config_vector(struct virtio_pci_device * vp_dev,u16 vector)671 static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
672 {
673 return vp_modern_config_vector(&vp_dev->mdev, vector);
674 }
675
vp_notify_with_data(struct virtqueue * vq)676 static bool vp_notify_with_data(struct virtqueue *vq)
677 {
678 u32 data = vring_notification_data(vq);
679
680 iowrite32(data, (void __iomem *)vq->priv);
681
682 return true;
683 }
684
setup_vq(struct virtio_pci_device * vp_dev,struct virtio_pci_vq_info * info,unsigned int index,void (* callback)(struct virtqueue * vq),const char * name,bool ctx,u16 msix_vec)685 static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
686 struct virtio_pci_vq_info *info,
687 unsigned int index,
688 void (*callback)(struct virtqueue *vq),
689 const char *name,
690 bool ctx,
691 u16 msix_vec)
692 {
693
694 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
695 bool (*notify)(struct virtqueue *vq);
696 struct virtqueue *vq;
697 bool is_avq;
698 u16 num;
699 int err;
700
701 if (__virtio_test_bit(&vp_dev->vdev, VIRTIO_F_NOTIFICATION_DATA))
702 notify = vp_notify_with_data;
703 else
704 notify = vp_notify;
705
706 is_avq = vp_is_avq(&vp_dev->vdev, index);
707 if (index >= vp_modern_get_num_queues(mdev) && !is_avq)
708 return ERR_PTR(-EINVAL);
709
710 num = vp_modern_get_queue_size(mdev, index);
711 /* Check if queue is either not available or already active. */
712 if (!num || vp_modern_get_queue_enable(mdev, index))
713 return ERR_PTR(-ENOENT);
714
715 info->msix_vector = msix_vec;
716
717 /* create the vring */
718 vq = vring_create_virtqueue(index, num,
719 SMP_CACHE_BYTES, &vp_dev->vdev,
720 true, true, ctx,
721 notify, callback, name);
722 if (!vq)
723 return ERR_PTR(-ENOMEM);
724
725 vq->num_max = num;
726
727 err = vp_active_vq(vq, msix_vec);
728 if (err)
729 goto err;
730
731 vq->priv = (void __force *)vp_modern_map_vq_notify(mdev, index, NULL);
732 if (!vq->priv) {
733 err = -ENOMEM;
734 goto err;
735 }
736
737 return vq;
738
739 err:
740 vring_del_virtqueue(vq);
741 return ERR_PTR(err);
742 }
743
vp_modern_find_vqs(struct virtio_device * vdev,unsigned int nvqs,struct virtqueue * vqs[],struct virtqueue_info vqs_info[],struct irq_affinity * desc)744 static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
745 struct virtqueue *vqs[],
746 struct virtqueue_info vqs_info[],
747 struct irq_affinity *desc)
748 {
749 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
750 struct virtqueue *vq;
751 int rc = vp_find_vqs(vdev, nvqs, vqs, vqs_info, desc);
752
753 if (rc)
754 return rc;
755
756 /* Select and activate all queues. Has to be done last: once we do
757 * this, there's no way to go back except reset.
758 */
759 list_for_each_entry(vq, &vdev->vqs, list)
760 vp_modern_set_queue_enable(&vp_dev->mdev, vq->index, true);
761
762 return 0;
763 }
764
del_vq(struct virtio_pci_vq_info * info)765 static void del_vq(struct virtio_pci_vq_info *info)
766 {
767 struct virtqueue *vq = info->vq;
768 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
769 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
770
771 if (vp_dev->msix_enabled)
772 vp_modern_queue_vector(mdev, vq->index,
773 VIRTIO_MSI_NO_VECTOR);
774
775 if (!mdev->notify_base)
776 pci_iounmap(mdev->pci_dev, (void __force __iomem *)vq->priv);
777
778 vring_del_virtqueue(vq);
779 }
780
virtio_pci_find_shm_cap(struct pci_dev * dev,u8 required_id,u8 * bar,u64 * offset,u64 * len)781 static int virtio_pci_find_shm_cap(struct pci_dev *dev, u8 required_id,
782 u8 *bar, u64 *offset, u64 *len)
783 {
784 int pos;
785
786 for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR); pos > 0;
787 pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) {
788 u8 type, cap_len, id, res_bar;
789 u32 tmp32;
790 u64 res_offset, res_length;
791
792 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
793 cfg_type), &type);
794 if (type != VIRTIO_PCI_CAP_SHARED_MEMORY_CFG)
795 continue;
796
797 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
798 cap_len), &cap_len);
799 if (cap_len != sizeof(struct virtio_pci_cap64)) {
800 dev_err(&dev->dev, "%s: shm cap with bad size offset:"
801 " %d size: %d\n", __func__, pos, cap_len);
802 continue;
803 }
804
805 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
806 id), &id);
807 if (id != required_id)
808 continue;
809
810 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
811 bar), &res_bar);
812 if (res_bar >= PCI_STD_NUM_BARS)
813 continue;
814
815 /* Type and ID match, and the BAR value isn't reserved.
816 * Looks good.
817 */
818
819 /* Read the lower 32bit of length and offset */
820 pci_read_config_dword(dev, pos + offsetof(struct virtio_pci_cap,
821 offset), &tmp32);
822 res_offset = tmp32;
823 pci_read_config_dword(dev, pos + offsetof(struct virtio_pci_cap,
824 length), &tmp32);
825 res_length = tmp32;
826
827 /* and now the top half */
828 pci_read_config_dword(dev,
829 pos + offsetof(struct virtio_pci_cap64,
830 offset_hi), &tmp32);
831 res_offset |= ((u64)tmp32) << 32;
832 pci_read_config_dword(dev,
833 pos + offsetof(struct virtio_pci_cap64,
834 length_hi), &tmp32);
835 res_length |= ((u64)tmp32) << 32;
836
837 *bar = res_bar;
838 *offset = res_offset;
839 *len = res_length;
840
841 return pos;
842 }
843 return 0;
844 }
845
vp_get_shm_region(struct virtio_device * vdev,struct virtio_shm_region * region,u8 id)846 static bool vp_get_shm_region(struct virtio_device *vdev,
847 struct virtio_shm_region *region, u8 id)
848 {
849 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
850 struct pci_dev *pci_dev = vp_dev->pci_dev;
851 u8 bar;
852 u64 offset, len;
853 phys_addr_t phys_addr;
854 size_t bar_len;
855
856 if (!virtio_pci_find_shm_cap(pci_dev, id, &bar, &offset, &len))
857 return false;
858
859 phys_addr = pci_resource_start(pci_dev, bar);
860 bar_len = pci_resource_len(pci_dev, bar);
861
862 if ((offset + len) < offset) {
863 dev_err(&pci_dev->dev, "%s: cap offset+len overflow detected\n",
864 __func__);
865 return false;
866 }
867
868 if (offset + len > bar_len) {
869 dev_err(&pci_dev->dev, "%s: bar shorter than cap offset+len\n",
870 __func__);
871 return false;
872 }
873
874 region->len = len;
875 region->addr = (u64) phys_addr + offset;
876
877 return true;
878 }
879
880 /*
881 * virtio_pci_admin_has_dev_parts - Checks whether the device parts
882 * functionality is supported
883 * @pdev: VF pci_dev
884 *
885 * Returns true on success.
886 */
virtio_pci_admin_has_dev_parts(struct pci_dev * pdev)887 bool virtio_pci_admin_has_dev_parts(struct pci_dev *pdev)
888 {
889 struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
890 struct virtio_pci_device *vp_dev;
891
892 if (!virtio_dev)
893 return false;
894
895 if (!virtio_has_feature(virtio_dev, VIRTIO_F_ADMIN_VQ))
896 return false;
897
898 vp_dev = to_vp_device(virtio_dev);
899
900 if (!((vp_dev->admin_vq.supported_cmds & VIRTIO_DEV_PARTS_ADMIN_CMD_BITMAP) ==
901 VIRTIO_DEV_PARTS_ADMIN_CMD_BITMAP))
902 return false;
903
904 return vp_dev->admin_vq.max_dev_parts_objects;
905 }
906 EXPORT_SYMBOL_GPL(virtio_pci_admin_has_dev_parts);
907
908 /*
909 * virtio_pci_admin_mode_set - Sets the mode of a member device
910 * @pdev: VF pci_dev
911 * @flags: device mode's flags
912 *
913 * Note: caller must serialize access for the given device.
914 * Returns 0 on success, or negative on failure.
915 */
virtio_pci_admin_mode_set(struct pci_dev * pdev,u8 flags)916 int virtio_pci_admin_mode_set(struct pci_dev *pdev, u8 flags)
917 {
918 struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
919 struct virtio_admin_cmd_dev_mode_set_data *data;
920 struct virtio_admin_cmd cmd = {};
921 struct scatterlist data_sg;
922 int vf_id;
923 int ret;
924
925 if (!virtio_dev)
926 return -ENODEV;
927
928 vf_id = pci_iov_vf_id(pdev);
929 if (vf_id < 0)
930 return vf_id;
931
932 data = kzalloc(sizeof(*data), GFP_KERNEL);
933 if (!data)
934 return -ENOMEM;
935
936 data->flags = flags;
937 sg_init_one(&data_sg, data, sizeof(*data));
938 cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEV_MODE_SET);
939 cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
940 cmd.group_member_id = cpu_to_le64(vf_id + 1);
941 cmd.data_sg = &data_sg;
942 ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
943
944 kfree(data);
945 return ret;
946 }
947 EXPORT_SYMBOL_GPL(virtio_pci_admin_mode_set);
948
949 /*
950 * virtio_pci_admin_obj_create - Creates an object for a given type and operation,
951 * following the max objects that can be created for that request.
952 * @pdev: VF pci_dev
953 * @obj_type: Object type
954 * @operation_type: Operation type
955 * @obj_id: Output unique object id
956 *
957 * Note: caller must serialize access for the given device.
958 * Returns 0 on success, or negative on failure.
959 */
virtio_pci_admin_obj_create(struct pci_dev * pdev,u16 obj_type,u8 operation_type,u32 * obj_id)960 int virtio_pci_admin_obj_create(struct pci_dev *pdev, u16 obj_type, u8 operation_type,
961 u32 *obj_id)
962 {
963 struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
964 u16 data_size = sizeof(struct virtio_admin_cmd_resource_obj_create_data);
965 struct virtio_admin_cmd_resource_obj_create_data *obj_create_data;
966 struct virtio_resource_obj_dev_parts obj_dev_parts = {};
967 struct virtio_pci_admin_vq *avq;
968 struct virtio_admin_cmd cmd = {};
969 struct scatterlist data_sg;
970 void *data;
971 int id = -1;
972 int vf_id;
973 int ret;
974
975 if (!virtio_dev)
976 return -ENODEV;
977
978 vf_id = pci_iov_vf_id(pdev);
979 if (vf_id < 0)
980 return vf_id;
981
982 if (obj_type != VIRTIO_RESOURCE_OBJ_DEV_PARTS)
983 return -EOPNOTSUPP;
984
985 if (operation_type != VIRTIO_RESOURCE_OBJ_DEV_PARTS_TYPE_GET &&
986 operation_type != VIRTIO_RESOURCE_OBJ_DEV_PARTS_TYPE_SET)
987 return -EINVAL;
988
989 avq = &to_vp_device(virtio_dev)->admin_vq;
990 if (!avq->max_dev_parts_objects)
991 return -EOPNOTSUPP;
992
993 id = ida_alloc_range(&avq->dev_parts_ida, 0,
994 avq->max_dev_parts_objects - 1, GFP_KERNEL);
995 if (id < 0)
996 return id;
997
998 *obj_id = id;
999 data_size += sizeof(obj_dev_parts);
1000 data = kzalloc(data_size, GFP_KERNEL);
1001 if (!data) {
1002 ret = -ENOMEM;
1003 goto end;
1004 }
1005
1006 obj_create_data = data;
1007 obj_create_data->hdr.type = cpu_to_le16(obj_type);
1008 obj_create_data->hdr.id = cpu_to_le32(*obj_id);
1009 obj_dev_parts.type = operation_type;
1010 memcpy(obj_create_data->resource_obj_specific_data, &obj_dev_parts,
1011 sizeof(obj_dev_parts));
1012 sg_init_one(&data_sg, data, data_size);
1013 cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_RESOURCE_OBJ_CREATE);
1014 cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
1015 cmd.group_member_id = cpu_to_le64(vf_id + 1);
1016 cmd.data_sg = &data_sg;
1017 ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
1018
1019 kfree(data);
1020 end:
1021 if (ret)
1022 ida_free(&avq->dev_parts_ida, id);
1023
1024 return ret;
1025 }
1026 EXPORT_SYMBOL_GPL(virtio_pci_admin_obj_create);
1027
1028 /*
1029 * virtio_pci_admin_obj_destroy - Destroys an object of a given type and id
1030 * @pdev: VF pci_dev
1031 * @obj_type: Object type
1032 * @id: Object id
1033 *
1034 * Note: caller must serialize access for the given device.
1035 * Returns 0 on success, or negative on failure.
1036 */
virtio_pci_admin_obj_destroy(struct pci_dev * pdev,u16 obj_type,u32 id)1037 int virtio_pci_admin_obj_destroy(struct pci_dev *pdev, u16 obj_type, u32 id)
1038 {
1039 struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
1040 struct virtio_admin_cmd_resource_obj_cmd_hdr *data;
1041 struct virtio_pci_device *vp_dev;
1042 struct virtio_admin_cmd cmd = {};
1043 struct scatterlist data_sg;
1044 int vf_id;
1045 int ret;
1046
1047 if (!virtio_dev)
1048 return -ENODEV;
1049
1050 vf_id = pci_iov_vf_id(pdev);
1051 if (vf_id < 0)
1052 return vf_id;
1053
1054 if (obj_type != VIRTIO_RESOURCE_OBJ_DEV_PARTS)
1055 return -EINVAL;
1056
1057 data = kzalloc(sizeof(*data), GFP_KERNEL);
1058 if (!data)
1059 return -ENOMEM;
1060
1061 data->type = cpu_to_le16(obj_type);
1062 data->id = cpu_to_le32(id);
1063 sg_init_one(&data_sg, data, sizeof(*data));
1064 cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_RESOURCE_OBJ_DESTROY);
1065 cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
1066 cmd.group_member_id = cpu_to_le64(vf_id + 1);
1067 cmd.data_sg = &data_sg;
1068 ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
1069 if (!ret) {
1070 vp_dev = to_vp_device(virtio_dev);
1071 ida_free(&vp_dev->admin_vq.dev_parts_ida, id);
1072 }
1073
1074 kfree(data);
1075 return ret;
1076 }
1077 EXPORT_SYMBOL_GPL(virtio_pci_admin_obj_destroy);
1078
1079 /*
1080 * virtio_pci_admin_dev_parts_metadata_get - Gets the metadata of the device parts
1081 * identified by the below attributes.
1082 * @pdev: VF pci_dev
1083 * @obj_type: Object type
1084 * @id: Object id
1085 * @metadata_type: Metadata type
1086 * @out: Upon success holds the output for 'metadata type size'
1087 *
1088 * Note: caller must serialize access for the given device.
1089 * Returns 0 on success, or negative on failure.
1090 */
virtio_pci_admin_dev_parts_metadata_get(struct pci_dev * pdev,u16 obj_type,u32 id,u8 metadata_type,u32 * out)1091 int virtio_pci_admin_dev_parts_metadata_get(struct pci_dev *pdev, u16 obj_type,
1092 u32 id, u8 metadata_type, u32 *out)
1093 {
1094 struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
1095 struct virtio_admin_cmd_dev_parts_metadata_result *result;
1096 struct virtio_admin_cmd_dev_parts_metadata_data *data;
1097 struct scatterlist data_sg, result_sg;
1098 struct virtio_admin_cmd cmd = {};
1099 int vf_id;
1100 int ret;
1101
1102 if (!virtio_dev)
1103 return -ENODEV;
1104
1105 if (metadata_type != VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_TYPE_SIZE)
1106 return -EOPNOTSUPP;
1107
1108 vf_id = pci_iov_vf_id(pdev);
1109 if (vf_id < 0)
1110 return vf_id;
1111
1112 data = kzalloc(sizeof(*data), GFP_KERNEL);
1113 if (!data)
1114 return -ENOMEM;
1115
1116 result = kzalloc(sizeof(*result), GFP_KERNEL);
1117 if (!result) {
1118 ret = -ENOMEM;
1119 goto end;
1120 }
1121
1122 data->hdr.type = cpu_to_le16(obj_type);
1123 data->hdr.id = cpu_to_le32(id);
1124 data->type = metadata_type;
1125 sg_init_one(&data_sg, data, sizeof(*data));
1126 sg_init_one(&result_sg, result, sizeof(*result));
1127 cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_GET);
1128 cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
1129 cmd.group_member_id = cpu_to_le64(vf_id + 1);
1130 cmd.data_sg = &data_sg;
1131 cmd.result_sg = &result_sg;
1132 ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
1133 if (!ret)
1134 *out = le32_to_cpu(result->parts_size.size);
1135
1136 kfree(result);
1137 end:
1138 kfree(data);
1139 return ret;
1140 }
1141 EXPORT_SYMBOL_GPL(virtio_pci_admin_dev_parts_metadata_get);
1142
1143 /*
1144 * virtio_pci_admin_dev_parts_get - Gets the device parts identified by the below attributes.
1145 * @pdev: VF pci_dev
1146 * @obj_type: Object type
1147 * @id: Object id
1148 * @get_type: Get type
1149 * @res_sg: Upon success holds the output result data
1150 * @res_size: Upon success holds the output result size
1151 *
1152 * Note: caller must serialize access for the given device.
1153 * Returns 0 on success, or negative on failure.
1154 */
virtio_pci_admin_dev_parts_get(struct pci_dev * pdev,u16 obj_type,u32 id,u8 get_type,struct scatterlist * res_sg,u32 * res_size)1155 int virtio_pci_admin_dev_parts_get(struct pci_dev *pdev, u16 obj_type, u32 id,
1156 u8 get_type, struct scatterlist *res_sg,
1157 u32 *res_size)
1158 {
1159 struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
1160 struct virtio_admin_cmd_dev_parts_get_data *data;
1161 struct scatterlist data_sg;
1162 struct virtio_admin_cmd cmd = {};
1163 int vf_id;
1164 int ret;
1165
1166 if (!virtio_dev)
1167 return -ENODEV;
1168
1169 if (get_type != VIRTIO_ADMIN_CMD_DEV_PARTS_GET_TYPE_ALL)
1170 return -EOPNOTSUPP;
1171
1172 vf_id = pci_iov_vf_id(pdev);
1173 if (vf_id < 0)
1174 return vf_id;
1175
1176 data = kzalloc(sizeof(*data), GFP_KERNEL);
1177 if (!data)
1178 return -ENOMEM;
1179
1180 data->hdr.type = cpu_to_le16(obj_type);
1181 data->hdr.id = cpu_to_le32(id);
1182 data->type = get_type;
1183 sg_init_one(&data_sg, data, sizeof(*data));
1184 cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEV_PARTS_GET);
1185 cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
1186 cmd.group_member_id = cpu_to_le64(vf_id + 1);
1187 cmd.data_sg = &data_sg;
1188 cmd.result_sg = res_sg;
1189 ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
1190 if (!ret)
1191 *res_size = cmd.result_sg_size;
1192
1193 kfree(data);
1194 return ret;
1195 }
1196 EXPORT_SYMBOL_GPL(virtio_pci_admin_dev_parts_get);
1197
1198 /*
1199 * virtio_pci_admin_dev_parts_set - Sets the device parts identified by the below attributes.
1200 * @pdev: VF pci_dev
1201 * @data_sg: The device parts data, its layout follows struct virtio_admin_cmd_dev_parts_set_data
1202 *
1203 * Note: caller must serialize access for the given device.
1204 * Returns 0 on success, or negative on failure.
1205 */
virtio_pci_admin_dev_parts_set(struct pci_dev * pdev,struct scatterlist * data_sg)1206 int virtio_pci_admin_dev_parts_set(struct pci_dev *pdev, struct scatterlist *data_sg)
1207 {
1208 struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev);
1209 struct virtio_admin_cmd cmd = {};
1210 int vf_id;
1211
1212 if (!virtio_dev)
1213 return -ENODEV;
1214
1215 vf_id = pci_iov_vf_id(pdev);
1216 if (vf_id < 0)
1217 return vf_id;
1218
1219 cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEV_PARTS_SET);
1220 cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
1221 cmd.group_member_id = cpu_to_le64(vf_id + 1);
1222 cmd.data_sg = data_sg;
1223 return vp_modern_admin_cmd_exec(virtio_dev, &cmd);
1224 }
1225 EXPORT_SYMBOL_GPL(virtio_pci_admin_dev_parts_set);
1226
1227 static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
1228 .get = NULL,
1229 .set = NULL,
1230 .generation = vp_generation,
1231 .get_status = vp_get_status,
1232 .set_status = vp_set_status,
1233 .reset = vp_reset,
1234 .find_vqs = vp_modern_find_vqs,
1235 .del_vqs = vp_del_vqs,
1236 .synchronize_cbs = vp_synchronize_vectors,
1237 .get_features = vp_get_features,
1238 .finalize_features = vp_finalize_features,
1239 .bus_name = vp_bus_name,
1240 .set_vq_affinity = vp_set_vq_affinity,
1241 .get_vq_affinity = vp_get_vq_affinity,
1242 .get_shm_region = vp_get_shm_region,
1243 .disable_vq_and_reset = vp_modern_disable_vq_and_reset,
1244 .enable_vq_after_reset = vp_modern_enable_vq_after_reset,
1245 };
1246
1247 static const struct virtio_config_ops virtio_pci_config_ops = {
1248 .get = vp_get,
1249 .set = vp_set,
1250 .generation = vp_generation,
1251 .get_status = vp_get_status,
1252 .set_status = vp_set_status,
1253 .reset = vp_reset,
1254 .find_vqs = vp_modern_find_vqs,
1255 .del_vqs = vp_del_vqs,
1256 .synchronize_cbs = vp_synchronize_vectors,
1257 .get_features = vp_get_features,
1258 .finalize_features = vp_finalize_features,
1259 .bus_name = vp_bus_name,
1260 .set_vq_affinity = vp_set_vq_affinity,
1261 .get_vq_affinity = vp_get_vq_affinity,
1262 .get_shm_region = vp_get_shm_region,
1263 .disable_vq_and_reset = vp_modern_disable_vq_and_reset,
1264 .enable_vq_after_reset = vp_modern_enable_vq_after_reset,
1265 };
1266
1267 /* the PCI probing function */
virtio_pci_modern_probe(struct virtio_pci_device * vp_dev)1268 int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
1269 {
1270 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
1271 struct pci_dev *pci_dev = vp_dev->pci_dev;
1272 int err;
1273
1274 mdev->pci_dev = pci_dev;
1275
1276 err = vp_modern_probe(mdev);
1277 if (err)
1278 return err;
1279
1280 if (mdev->device)
1281 vp_dev->vdev.config = &virtio_pci_config_ops;
1282 else
1283 vp_dev->vdev.config = &virtio_pci_config_nodev_ops;
1284
1285 vp_dev->config_vector = vp_config_vector;
1286 vp_dev->setup_vq = setup_vq;
1287 vp_dev->del_vq = del_vq;
1288 vp_dev->avq_index = vp_avq_index;
1289 vp_dev->isr = mdev->isr;
1290 vp_dev->vdev.id = mdev->id;
1291
1292 spin_lock_init(&vp_dev->admin_vq.lock);
1293 return 0;
1294 }
1295
virtio_pci_modern_remove(struct virtio_pci_device * vp_dev)1296 void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev)
1297 {
1298 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
1299
1300 vp_modern_remove(mdev);
1301 }
1302