1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * ccw based virtio transport
4 *
5 * Copyright IBM Corp. 2012, 2014
6 *
7 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
8 */
9
10 #include <linux/kernel_stat.h>
11 #include <linux/hex.h>
12 #include <linux/init.h>
13 #include <linux/memblock.h>
14 #include <linux/err.h>
15 #include <linux/virtio.h>
16 #include <linux/virtio_config.h>
17 #include <linux/slab.h>
18 #include <linux/interrupt.h>
19 #include <linux/virtio_ring.h>
20 #include <linux/pfn.h>
21 #include <linux/async.h>
22 #include <linux/wait.h>
23 #include <linux/list.h>
24 #include <linux/bitops.h>
25 #include <linux/moduleparam.h>
26 #include <linux/io.h>
27 #include <linux/kvm_para.h>
28 #include <linux/notifier.h>
29 #include <asm/diag.h>
30 #include <asm/setup.h>
31 #include <asm/irq.h>
32 #include <asm/cio.h>
33 #include <asm/ccwdev.h>
34 #include <asm/virtio-ccw.h>
35 #include <asm/isc.h>
36 #include <asm/airq.h>
37 #include <asm/tpi.h>
38
39 /*
40 * virtio related functions
41 */
42
43 struct vq_config_block {
44 __u16 index;
45 __u16 num;
46 } __packed;
47
48 #define VIRTIO_CCW_CONFIG_SIZE 0x100
49 /* same as PCI config space size, should be enough for all drivers */
50
51 struct vcdev_dma_area {
52 unsigned long indicators;
53 unsigned long indicators2;
54 struct vq_config_block config_block;
55 __u8 status;
56 };
57
58 struct virtio_ccw_device {
59 struct virtio_device vdev;
60 __u8 config[VIRTIO_CCW_CONFIG_SIZE];
61 struct ccw_device *cdev;
62 /* we make cdev->dev.dma_parms point to this */
63 struct device_dma_parameters dma_parms;
64 __u32 curr_io;
65 int err;
66 unsigned int revision; /* Transport revision */
67 wait_queue_head_t wait_q;
68 spinlock_t lock;
69 rwlock_t irq_lock;
70 struct mutex io_lock; /* Serializes I/O requests */
71 struct list_head virtqueues;
72 bool is_thinint;
73 bool going_away;
74 bool device_lost;
75 unsigned int config_ready;
76 void *airq_info;
77 struct vcdev_dma_area *dma_area;
78 dma32_t dma_area_addr;
79 };
80
indicators(struct virtio_ccw_device * vcdev)81 static inline unsigned long *indicators(struct virtio_ccw_device *vcdev)
82 {
83 return &vcdev->dma_area->indicators;
84 }
85
indicators2(struct virtio_ccw_device * vcdev)86 static inline unsigned long *indicators2(struct virtio_ccw_device *vcdev)
87 {
88 return &vcdev->dma_area->indicators2;
89 }
90
91 /* Spec stipulates a 64 bit address */
indicators_dma(struct virtio_ccw_device * vcdev)92 static inline dma64_t indicators_dma(struct virtio_ccw_device *vcdev)
93 {
94 u64 dma_area_addr = dma32_to_u32(vcdev->dma_area_addr);
95
96 return dma64_add(u64_to_dma64(dma_area_addr),
97 offsetof(struct vcdev_dma_area, indicators));
98 }
99
100 /* Spec stipulates a 64 bit address */
indicators2_dma(struct virtio_ccw_device * vcdev)101 static inline dma64_t indicators2_dma(struct virtio_ccw_device *vcdev)
102 {
103 u64 dma_area_addr = dma32_to_u32(vcdev->dma_area_addr);
104
105 return dma64_add(u64_to_dma64(dma_area_addr),
106 offsetof(struct vcdev_dma_area, indicators2));
107 }
108
config_block_dma(struct virtio_ccw_device * vcdev)109 static inline dma32_t config_block_dma(struct virtio_ccw_device *vcdev)
110 {
111 return dma32_add(vcdev->dma_area_addr,
112 offsetof(struct vcdev_dma_area, config_block));
113 }
114
status_dma(struct virtio_ccw_device * vcdev)115 static inline dma32_t status_dma(struct virtio_ccw_device *vcdev)
116 {
117 return dma32_add(vcdev->dma_area_addr,
118 offsetof(struct vcdev_dma_area, status));
119 }
120
121 struct vq_info_block_legacy {
122 dma64_t queue;
123 __u32 align;
124 __u16 index;
125 __u16 num;
126 } __packed;
127
128 struct vq_info_block {
129 dma64_t desc;
130 __u32 res0;
131 __u16 index;
132 __u16 num;
133 dma64_t avail;
134 dma64_t used;
135 } __packed;
136
137 struct virtio_feature_desc {
138 __le32 features;
139 __u8 index;
140 } __packed;
141
142 struct virtio_thinint_area {
143 dma64_t summary_indicator;
144 dma64_t indicator;
145 u64 bit_nr;
146 u8 isc;
147 } __packed;
148
149 struct virtio_rev_info {
150 __u16 revision;
151 __u16 length;
152 __u8 data[];
153 };
154
155 /* the highest virtio-ccw revision we support */
156 #define VIRTIO_CCW_REV_MAX 2
157
158 struct virtio_ccw_vq_info {
159 struct virtqueue *vq;
160 dma32_t info_block_addr;
161 int num;
162 union {
163 struct vq_info_block s;
164 struct vq_info_block_legacy l;
165 } *info_block;
166 int bit_nr;
167 struct list_head node;
168 long cookie;
169 };
170
171 #define VIRTIO_AIRQ_ISC IO_SCH_ISC /* inherit from subchannel */
172
173 #define VIRTIO_IV_BITS (L1_CACHE_BYTES * 8)
174 #define MAX_AIRQ_AREAS 20
175
176 static int virtio_ccw_use_airq = 1;
177
178 struct airq_info {
179 rwlock_t lock;
180 u8 summary_indicator_idx;
181 struct airq_struct airq;
182 struct airq_iv *aiv;
183 };
184 static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
185 static DEFINE_MUTEX(airq_areas_lock);
186
187 static u8 *summary_indicators;
188
get_summary_indicator(struct airq_info * info)189 static inline u8 *get_summary_indicator(struct airq_info *info)
190 {
191 return summary_indicators + info->summary_indicator_idx;
192 }
193
get_summary_indicator_dma(struct airq_info * info)194 static inline dma64_t get_summary_indicator_dma(struct airq_info *info)
195 {
196 return virt_to_dma64(get_summary_indicator(info));
197 }
198
199 #define CCW_CMD_SET_VQ 0x13
200 #define CCW_CMD_VDEV_RESET 0x33
201 #define CCW_CMD_SET_IND 0x43
202 #define CCW_CMD_SET_CONF_IND 0x53
203 #define CCW_CMD_READ_FEAT 0x12
204 #define CCW_CMD_WRITE_FEAT 0x11
205 #define CCW_CMD_READ_CONF 0x22
206 #define CCW_CMD_WRITE_CONF 0x21
207 #define CCW_CMD_WRITE_STATUS 0x31
208 #define CCW_CMD_READ_VQ_CONF 0x32
209 #define CCW_CMD_READ_STATUS 0x72
210 #define CCW_CMD_SET_IND_ADAPTER 0x73
211 #define CCW_CMD_SET_VIRTIO_REV 0x83
212
213 #define VIRTIO_CCW_DOING_SET_VQ 0x00010000
214 #define VIRTIO_CCW_DOING_RESET 0x00040000
215 #define VIRTIO_CCW_DOING_READ_FEAT 0x00080000
216 #define VIRTIO_CCW_DOING_WRITE_FEAT 0x00100000
217 #define VIRTIO_CCW_DOING_READ_CONFIG 0x00200000
218 #define VIRTIO_CCW_DOING_WRITE_CONFIG 0x00400000
219 #define VIRTIO_CCW_DOING_WRITE_STATUS 0x00800000
220 #define VIRTIO_CCW_DOING_SET_IND 0x01000000
221 #define VIRTIO_CCW_DOING_READ_VQ_CONF 0x02000000
222 #define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000
223 #define VIRTIO_CCW_DOING_SET_IND_ADAPTER 0x08000000
224 #define VIRTIO_CCW_DOING_SET_VIRTIO_REV 0x10000000
225 #define VIRTIO_CCW_DOING_READ_STATUS 0x20000000
226 #define VIRTIO_CCW_INTPARM_MASK 0xffff0000
227
to_vc_device(struct virtio_device * vdev)228 static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev)
229 {
230 return container_of(vdev, struct virtio_ccw_device, vdev);
231 }
232
drop_airq_indicator(struct virtqueue * vq,struct airq_info * info)233 static void drop_airq_indicator(struct virtqueue *vq, struct airq_info *info)
234 {
235 unsigned long i, flags;
236
237 write_lock_irqsave(&info->lock, flags);
238 for (i = 0; i < airq_iv_end(info->aiv); i++) {
239 if (vq == (void *)airq_iv_get_ptr(info->aiv, i)) {
240 airq_iv_free_bit(info->aiv, i);
241 airq_iv_set_ptr(info->aiv, i, 0);
242 break;
243 }
244 }
245 write_unlock_irqrestore(&info->lock, flags);
246 }
247
virtio_airq_handler(struct airq_struct * airq,struct tpi_info * tpi_info)248 static void virtio_airq_handler(struct airq_struct *airq,
249 struct tpi_info *tpi_info)
250 {
251 struct airq_info *info = container_of(airq, struct airq_info, airq);
252 unsigned long ai;
253
254 inc_irq_stat(IRQIO_VAI);
255 read_lock(&info->lock);
256 /* Walk through indicators field, summary indicator active. */
257 for (ai = 0;;) {
258 ai = airq_iv_scan(info->aiv, ai, airq_iv_end(info->aiv));
259 if (ai == -1UL)
260 break;
261 vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai));
262 }
263 *(get_summary_indicator(info)) = 0;
264 smp_wmb();
265 /* Walk through indicators field, summary indicator not active. */
266 for (ai = 0;;) {
267 ai = airq_iv_scan(info->aiv, ai, airq_iv_end(info->aiv));
268 if (ai == -1UL)
269 break;
270 vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai));
271 }
272 read_unlock(&info->lock);
273 }
274
new_airq_info(int index)275 static struct airq_info *new_airq_info(int index)
276 {
277 struct airq_info *info;
278 int rc;
279
280 info = kzalloc_obj(*info);
281 if (!info)
282 return NULL;
283 rwlock_init(&info->lock);
284 info->aiv = airq_iv_create(VIRTIO_IV_BITS, AIRQ_IV_ALLOC | AIRQ_IV_PTR
285 | AIRQ_IV_CACHELINE, NULL);
286 if (!info->aiv) {
287 kfree(info);
288 return NULL;
289 }
290 info->airq.handler = virtio_airq_handler;
291 info->summary_indicator_idx = index;
292 info->airq.lsi_ptr = get_summary_indicator(info);
293 info->airq.isc = VIRTIO_AIRQ_ISC;
294 rc = register_adapter_interrupt(&info->airq);
295 if (rc) {
296 airq_iv_release(info->aiv);
297 kfree(info);
298 return NULL;
299 }
300 return info;
301 }
302
get_airq_indicator(struct virtqueue * vqs[],int nvqs,u64 * first,void ** airq_info)303 static unsigned long *get_airq_indicator(struct virtqueue *vqs[], int nvqs,
304 u64 *first, void **airq_info)
305 {
306 int i, j, queue_idx, highest_queue_idx = -1;
307 struct airq_info *info;
308 unsigned long *indicator_addr = NULL;
309 unsigned long bit, flags;
310
311 /* Array entries without an actual queue pointer must be ignored. */
312 for (i = 0; i < nvqs; i++) {
313 if (vqs[i])
314 highest_queue_idx++;
315 }
316
317 for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) {
318 mutex_lock(&airq_areas_lock);
319 if (!airq_areas[i])
320 airq_areas[i] = new_airq_info(i);
321 info = airq_areas[i];
322 mutex_unlock(&airq_areas_lock);
323 if (!info)
324 return NULL;
325 write_lock_irqsave(&info->lock, flags);
326 bit = airq_iv_alloc(info->aiv, highest_queue_idx + 1);
327 if (bit == -1UL) {
328 /* Not enough vacancies. */
329 write_unlock_irqrestore(&info->lock, flags);
330 continue;
331 }
332 *first = bit;
333 *airq_info = info;
334 indicator_addr = info->aiv->vector;
335 for (j = 0, queue_idx = 0; j < nvqs; j++) {
336 if (!vqs[j])
337 continue;
338 airq_iv_set_ptr(info->aiv, bit + queue_idx++,
339 (unsigned long)vqs[j]);
340 }
341 write_unlock_irqrestore(&info->lock, flags);
342 }
343 return indicator_addr;
344 }
345
virtio_ccw_drop_indicators(struct virtio_ccw_device * vcdev)346 static void virtio_ccw_drop_indicators(struct virtio_ccw_device *vcdev)
347 {
348 struct virtio_ccw_vq_info *info;
349
350 if (!vcdev->airq_info)
351 return;
352 list_for_each_entry(info, &vcdev->virtqueues, node)
353 drop_airq_indicator(info->vq, vcdev->airq_info);
354 }
355
doing_io(struct virtio_ccw_device * vcdev,__u32 flag)356 static int doing_io(struct virtio_ccw_device *vcdev, __u32 flag)
357 {
358 unsigned long flags;
359 __u32 ret;
360
361 spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
362 if (vcdev->err)
363 ret = 0;
364 else
365 ret = vcdev->curr_io & flag;
366 spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags);
367 return ret;
368 }
369
ccw_io_helper(struct virtio_ccw_device * vcdev,struct ccw1 * ccw,__u32 intparm)370 static int ccw_io_helper(struct virtio_ccw_device *vcdev,
371 struct ccw1 *ccw, __u32 intparm)
372 {
373 int ret;
374 unsigned long flags;
375 int flag = intparm & VIRTIO_CCW_INTPARM_MASK;
376
377 mutex_lock(&vcdev->io_lock);
378 do {
379 spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
380 ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0);
381 if (!ret) {
382 if (!vcdev->curr_io)
383 vcdev->err = 0;
384 vcdev->curr_io |= flag;
385 }
386 spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags);
387 cpu_relax();
388 } while (ret == -EBUSY);
389 wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0);
390 ret = ret ? ret : vcdev->err;
391 mutex_unlock(&vcdev->io_lock);
392 return ret;
393 }
394
virtio_ccw_drop_indicator(struct virtio_ccw_device * vcdev,struct ccw1 * ccw)395 static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev,
396 struct ccw1 *ccw)
397 {
398 int ret;
399 struct virtio_thinint_area *thinint_area = NULL;
400 struct airq_info *airq_info = vcdev->airq_info;
401 dma64_t *indicatorp = NULL;
402
403 if (vcdev->is_thinint) {
404 thinint_area = ccw_device_dma_zalloc(vcdev->cdev,
405 sizeof(*thinint_area),
406 &ccw->cda);
407 if (!thinint_area)
408 return;
409 thinint_area->summary_indicator =
410 get_summary_indicator_dma(airq_info);
411 thinint_area->isc = VIRTIO_AIRQ_ISC;
412 ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
413 ccw->count = sizeof(*thinint_area);
414 } else {
415 /* payload is the address of the indicators */
416 indicatorp = ccw_device_dma_zalloc(vcdev->cdev,
417 sizeof(*indicatorp),
418 &ccw->cda);
419 if (!indicatorp)
420 return;
421 *indicatorp = 0;
422 ccw->cmd_code = CCW_CMD_SET_IND;
423 ccw->count = sizeof(*indicatorp);
424 }
425 /* Deregister indicators from host. */
426 *indicators(vcdev) = 0;
427 ccw->flags = 0;
428 ret = ccw_io_helper(vcdev, ccw,
429 vcdev->is_thinint ?
430 VIRTIO_CCW_DOING_SET_IND_ADAPTER :
431 VIRTIO_CCW_DOING_SET_IND);
432 if (ret && (ret != -ENODEV))
433 dev_info(&vcdev->cdev->dev,
434 "Failed to deregister indicators (%d)\n", ret);
435 else if (vcdev->is_thinint)
436 virtio_ccw_drop_indicators(vcdev);
437 ccw_device_dma_free(vcdev->cdev, indicatorp, sizeof(*indicatorp));
438 ccw_device_dma_free(vcdev->cdev, thinint_area, sizeof(*thinint_area));
439 }
440
virtio_ccw_do_kvm_notify(struct virtqueue * vq,u32 data)441 static inline bool virtio_ccw_do_kvm_notify(struct virtqueue *vq, u32 data)
442 {
443 struct virtio_ccw_vq_info *info = vq->priv;
444 struct virtio_ccw_device *vcdev;
445 struct subchannel_id schid;
446
447 vcdev = to_vc_device(info->vq->vdev);
448 ccw_device_get_schid(vcdev->cdev, &schid);
449 BUILD_BUG_ON(sizeof(struct subchannel_id) != sizeof(unsigned int));
450 info->cookie = kvm_hypercall3(KVM_S390_VIRTIO_CCW_NOTIFY,
451 *((unsigned int *)&schid),
452 data, info->cookie);
453 if (info->cookie < 0)
454 return false;
455 return true;
456 }
457
virtio_ccw_kvm_notify(struct virtqueue * vq)458 static bool virtio_ccw_kvm_notify(struct virtqueue *vq)
459 {
460 return virtio_ccw_do_kvm_notify(vq, vq->index);
461 }
462
virtio_ccw_kvm_notify_with_data(struct virtqueue * vq)463 static bool virtio_ccw_kvm_notify_with_data(struct virtqueue *vq)
464 {
465 return virtio_ccw_do_kvm_notify(vq, vring_notification_data(vq));
466 }
467
virtio_ccw_read_vq_conf(struct virtio_ccw_device * vcdev,struct ccw1 * ccw,int index)468 static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
469 struct ccw1 *ccw, int index)
470 {
471 int ret;
472
473 vcdev->dma_area->config_block.index = index;
474 ccw->cmd_code = CCW_CMD_READ_VQ_CONF;
475 ccw->flags = 0;
476 ccw->count = sizeof(struct vq_config_block);
477 ccw->cda = config_block_dma(vcdev);
478 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF);
479 if (ret)
480 return ret;
481 return vcdev->dma_area->config_block.num ?: -ENOENT;
482 }
483
virtio_ccw_del_vq(struct virtqueue * vq,struct ccw1 * ccw)484 static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
485 {
486 struct virtio_ccw_device *vcdev = to_vc_device(vq->vdev);
487 struct virtio_ccw_vq_info *info = vq->priv;
488 unsigned long flags;
489 int ret;
490 unsigned int index = vq->index;
491
492 /* Remove from our list. */
493 spin_lock_irqsave(&vcdev->lock, flags);
494 list_del(&info->node);
495 spin_unlock_irqrestore(&vcdev->lock, flags);
496
497 /* Release from host. */
498 if (vcdev->revision == 0) {
499 info->info_block->l.queue = 0;
500 info->info_block->l.align = 0;
501 info->info_block->l.index = index;
502 info->info_block->l.num = 0;
503 ccw->count = sizeof(info->info_block->l);
504 } else {
505 info->info_block->s.desc = 0;
506 info->info_block->s.index = index;
507 info->info_block->s.num = 0;
508 info->info_block->s.avail = 0;
509 info->info_block->s.used = 0;
510 ccw->count = sizeof(info->info_block->s);
511 }
512 ccw->cmd_code = CCW_CMD_SET_VQ;
513 ccw->flags = 0;
514 ccw->cda = info->info_block_addr;
515 ret = ccw_io_helper(vcdev, ccw,
516 VIRTIO_CCW_DOING_SET_VQ | index);
517 /*
518 * -ENODEV isn't considered an error: The device is gone anyway.
519 * This may happen on device detach.
520 */
521 if (ret && (ret != -ENODEV))
522 dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d\n",
523 ret, index);
524
525 vring_del_virtqueue(vq);
526 ccw_device_dma_free(vcdev->cdev, info->info_block,
527 sizeof(*info->info_block));
528 kfree(info);
529 }
530
virtio_ccw_del_vqs(struct virtio_device * vdev)531 static void virtio_ccw_del_vqs(struct virtio_device *vdev)
532 {
533 struct virtqueue *vq, *n;
534 struct ccw1 *ccw;
535 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
536
537 ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
538 if (!ccw)
539 return;
540
541 virtio_ccw_drop_indicator(vcdev, ccw);
542
543 list_for_each_entry_safe(vq, n, &vdev->vqs, list)
544 virtio_ccw_del_vq(vq, ccw);
545
546 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
547 }
548
virtio_ccw_setup_vq(struct virtio_device * vdev,int i,vq_callback_t * callback,const char * name,bool ctx,struct ccw1 * ccw)549 static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
550 int i, vq_callback_t *callback,
551 const char *name, bool ctx,
552 struct ccw1 *ccw)
553 {
554 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
555 bool (*notify)(struct virtqueue *vq);
556 int err;
557 struct virtqueue *vq = NULL;
558 struct virtio_ccw_vq_info *info;
559 u64 queue;
560 unsigned long flags;
561 bool may_reduce;
562
563 if (__virtio_test_bit(vdev, VIRTIO_F_NOTIFICATION_DATA))
564 notify = virtio_ccw_kvm_notify_with_data;
565 else
566 notify = virtio_ccw_kvm_notify;
567
568 /* Allocate queue. */
569 info = kzalloc_obj(struct virtio_ccw_vq_info);
570 if (!info) {
571 dev_warn(&vcdev->cdev->dev, "no info\n");
572 err = -ENOMEM;
573 goto out_err;
574 }
575 info->info_block = ccw_device_dma_zalloc(vcdev->cdev,
576 sizeof(*info->info_block),
577 &info->info_block_addr);
578 if (!info->info_block) {
579 dev_warn(&vcdev->cdev->dev, "no info block\n");
580 err = -ENOMEM;
581 goto out_err;
582 }
583 info->num = virtio_ccw_read_vq_conf(vcdev, ccw, i);
584 if (info->num < 0) {
585 err = info->num;
586 goto out_err;
587 }
588 may_reduce = vcdev->revision > 0;
589 vq = vring_create_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN,
590 vdev, true, may_reduce, ctx,
591 notify, callback, name);
592
593 if (!vq) {
594 /* For now, we fail if we can't get the requested size. */
595 dev_warn(&vcdev->cdev->dev, "no vq\n");
596 err = -ENOMEM;
597 goto out_err;
598 }
599
600 vq->num_max = info->num;
601
602 /* it may have been reduced */
603 info->num = virtqueue_get_vring_size(vq);
604
605 /* Register it with the host. */
606 queue = virtqueue_get_desc_addr(vq);
607 if (vcdev->revision == 0) {
608 info->info_block->l.queue = u64_to_dma64(queue);
609 info->info_block->l.align = KVM_VIRTIO_CCW_RING_ALIGN;
610 info->info_block->l.index = i;
611 info->info_block->l.num = info->num;
612 ccw->count = sizeof(info->info_block->l);
613 } else {
614 info->info_block->s.desc = u64_to_dma64(queue);
615 info->info_block->s.index = i;
616 info->info_block->s.num = info->num;
617 info->info_block->s.avail = u64_to_dma64(virtqueue_get_avail_addr(vq));
618 info->info_block->s.used = u64_to_dma64(virtqueue_get_used_addr(vq));
619 ccw->count = sizeof(info->info_block->s);
620 }
621 ccw->cmd_code = CCW_CMD_SET_VQ;
622 ccw->flags = 0;
623 ccw->cda = info->info_block_addr;
624 err = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_VQ | i);
625 if (err) {
626 dev_warn(&vcdev->cdev->dev, "SET_VQ failed\n");
627 goto out_err;
628 }
629
630 info->vq = vq;
631 vq->priv = info;
632
633 /* Save it to our list. */
634 spin_lock_irqsave(&vcdev->lock, flags);
635 list_add(&info->node, &vcdev->virtqueues);
636 spin_unlock_irqrestore(&vcdev->lock, flags);
637
638 return vq;
639
640 out_err:
641 if (vq)
642 vring_del_virtqueue(vq);
643 if (info) {
644 ccw_device_dma_free(vcdev->cdev, info->info_block,
645 sizeof(*info->info_block));
646 }
647 kfree(info);
648 return ERR_PTR(err);
649 }
650
virtio_ccw_register_adapter_ind(struct virtio_ccw_device * vcdev,struct virtqueue * vqs[],int nvqs,struct ccw1 * ccw)651 static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev,
652 struct virtqueue *vqs[], int nvqs,
653 struct ccw1 *ccw)
654 {
655 int ret;
656 struct virtio_thinint_area *thinint_area = NULL;
657 unsigned long *indicator_addr;
658 struct airq_info *info;
659
660 thinint_area = ccw_device_dma_zalloc(vcdev->cdev,
661 sizeof(*thinint_area),
662 &ccw->cda);
663 if (!thinint_area) {
664 ret = -ENOMEM;
665 goto out;
666 }
667 /* Try to get an indicator. */
668 indicator_addr = get_airq_indicator(vqs, nvqs,
669 &thinint_area->bit_nr,
670 &vcdev->airq_info);
671 if (!indicator_addr) {
672 ret = -ENOSPC;
673 goto out;
674 }
675 thinint_area->indicator = virt_to_dma64(indicator_addr);
676 info = vcdev->airq_info;
677 thinint_area->summary_indicator = get_summary_indicator_dma(info);
678 thinint_area->isc = VIRTIO_AIRQ_ISC;
679 ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
680 ccw->flags = CCW_FLAG_SLI;
681 ccw->count = sizeof(*thinint_area);
682 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND_ADAPTER);
683 if (ret) {
684 if (ret == -EOPNOTSUPP) {
685 /*
686 * The host does not support adapter interrupts
687 * for virtio-ccw, stop trying.
688 */
689 virtio_ccw_use_airq = 0;
690 pr_info("Adapter interrupts unsupported on host\n");
691 } else
692 dev_warn(&vcdev->cdev->dev,
693 "enabling adapter interrupts = %d\n", ret);
694 virtio_ccw_drop_indicators(vcdev);
695 }
696 out:
697 ccw_device_dma_free(vcdev->cdev, thinint_area, sizeof(*thinint_area));
698 return ret;
699 }
700
virtio_ccw_find_vqs(struct virtio_device * vdev,unsigned nvqs,struct virtqueue * vqs[],struct virtqueue_info vqs_info[],struct irq_affinity * desc)701 static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
702 struct virtqueue *vqs[],
703 struct virtqueue_info vqs_info[],
704 struct irq_affinity *desc)
705 {
706 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
707 dma64_t *indicatorp = NULL;
708 int ret, i, queue_idx = 0;
709 struct ccw1 *ccw;
710 dma32_t indicatorp_dma = 0;
711
712 ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
713 if (!ccw)
714 return -ENOMEM;
715
716 for (i = 0; i < nvqs; ++i) {
717 struct virtqueue_info *vqi = &vqs_info[i];
718
719 if (!vqi->name) {
720 vqs[i] = NULL;
721 continue;
722 }
723
724 vqs[i] = virtio_ccw_setup_vq(vdev, queue_idx++, vqi->callback,
725 vqi->name, vqi->ctx, ccw);
726 if (IS_ERR(vqs[i])) {
727 ret = PTR_ERR(vqs[i]);
728 vqs[i] = NULL;
729 goto out;
730 }
731 }
732 ret = -ENOMEM;
733 /*
734 * We need a data area under 2G to communicate. Our payload is
735 * the address of the indicators.
736 */
737 indicatorp = ccw_device_dma_zalloc(vcdev->cdev,
738 sizeof(*indicatorp),
739 &indicatorp_dma);
740 if (!indicatorp)
741 goto out;
742 *indicatorp = indicators_dma(vcdev);
743 if (vcdev->is_thinint) {
744 ret = virtio_ccw_register_adapter_ind(vcdev, vqs, nvqs, ccw);
745 if (ret)
746 /* no error, just fall back to legacy interrupts */
747 vcdev->is_thinint = false;
748 }
749 ccw->cda = indicatorp_dma;
750 if (!vcdev->is_thinint) {
751 /* Register queue indicators with host. */
752 *indicators(vcdev) = 0;
753 ccw->cmd_code = CCW_CMD_SET_IND;
754 ccw->flags = 0;
755 ccw->count = sizeof(*indicatorp);
756 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND);
757 if (ret)
758 goto out;
759 }
760 /* Register indicators2 with host for config changes */
761 *indicatorp = indicators2_dma(vcdev);
762 *indicators2(vcdev) = 0;
763 ccw->cmd_code = CCW_CMD_SET_CONF_IND;
764 ccw->flags = 0;
765 ccw->count = sizeof(*indicatorp);
766 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_CONF_IND);
767 if (ret)
768 goto out;
769
770 if (indicatorp)
771 ccw_device_dma_free(vcdev->cdev, indicatorp,
772 sizeof(*indicatorp));
773 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
774 return 0;
775 out:
776 if (indicatorp)
777 ccw_device_dma_free(vcdev->cdev, indicatorp,
778 sizeof(*indicatorp));
779 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
780 virtio_ccw_del_vqs(vdev);
781 return ret;
782 }
783
virtio_ccw_reset(struct virtio_device * vdev)784 static void virtio_ccw_reset(struct virtio_device *vdev)
785 {
786 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
787 struct ccw1 *ccw;
788
789 ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
790 if (!ccw)
791 return;
792
793 /* Zero status bits. */
794 vcdev->dma_area->status = 0;
795
796 /* Send a reset ccw on device. */
797 ccw->cmd_code = CCW_CMD_VDEV_RESET;
798 ccw->flags = 0;
799 ccw->count = 0;
800 ccw->cda = 0;
801 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_RESET);
802 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
803 }
804
virtio_ccw_get_features(struct virtio_device * vdev)805 static u64 virtio_ccw_get_features(struct virtio_device *vdev)
806 {
807 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
808 struct virtio_feature_desc *features;
809 int ret;
810 u64 rc;
811 struct ccw1 *ccw;
812
813 ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
814 if (!ccw)
815 return 0;
816
817 features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features),
818 &ccw->cda);
819 if (!features) {
820 rc = 0;
821 goto out_free;
822 }
823 /* Read the feature bits from the host. */
824 features->index = 0;
825 ccw->cmd_code = CCW_CMD_READ_FEAT;
826 ccw->flags = 0;
827 ccw->count = sizeof(*features);
828 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT);
829 if (ret) {
830 rc = 0;
831 goto out_free;
832 }
833
834 rc = le32_to_cpu(features->features);
835
836 if (vcdev->revision == 0)
837 goto out_free;
838
839 /* Read second half of the feature bits from the host. */
840 features->index = 1;
841 ccw->cmd_code = CCW_CMD_READ_FEAT;
842 ccw->flags = 0;
843 ccw->count = sizeof(*features);
844 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT);
845 if (ret == 0)
846 rc |= (u64)le32_to_cpu(features->features) << 32;
847
848 out_free:
849 ccw_device_dma_free(vcdev->cdev, features, sizeof(*features));
850 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
851 return rc;
852 }
853
ccw_transport_features(struct virtio_device * vdev)854 static void ccw_transport_features(struct virtio_device *vdev)
855 {
856 /*
857 * Currently nothing to do here.
858 */
859 }
860
virtio_ccw_finalize_features(struct virtio_device * vdev)861 static int virtio_ccw_finalize_features(struct virtio_device *vdev)
862 {
863 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
864 struct virtio_feature_desc *features;
865 struct ccw1 *ccw;
866 int ret;
867
868 if (vcdev->revision >= 1 &&
869 !__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) {
870 dev_err(&vdev->dev, "virtio: device uses revision 1 "
871 "but does not have VIRTIO_F_VERSION_1\n");
872 return -EINVAL;
873 }
874
875 ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
876 if (!ccw)
877 return -ENOMEM;
878
879 features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features),
880 &ccw->cda);
881 if (!features) {
882 ret = -ENOMEM;
883 goto out_free;
884 }
885 /* Give virtio_ring a chance to accept features. */
886 vring_transport_features(vdev);
887
888 /* Give virtio_ccw a chance to accept features. */
889 ccw_transport_features(vdev);
890
891 features->index = 0;
892 features->features = cpu_to_le32((u32)vdev->features);
893 /* Write the first half of the feature bits to the host. */
894 ccw->cmd_code = CCW_CMD_WRITE_FEAT;
895 ccw->flags = 0;
896 ccw->count = sizeof(*features);
897 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
898 if (ret)
899 goto out_free;
900
901 if (vcdev->revision == 0)
902 goto out_free;
903
904 features->index = 1;
905 features->features = cpu_to_le32(vdev->features >> 32);
906 /* Write the second half of the feature bits to the host. */
907 ccw->cmd_code = CCW_CMD_WRITE_FEAT;
908 ccw->flags = 0;
909 ccw->count = sizeof(*features);
910 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
911
912 out_free:
913 ccw_device_dma_free(vcdev->cdev, features, sizeof(*features));
914 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
915
916 return ret;
917 }
918
virtio_ccw_get_config(struct virtio_device * vdev,unsigned int offset,void * buf,unsigned len)919 static void virtio_ccw_get_config(struct virtio_device *vdev,
920 unsigned int offset, void *buf, unsigned len)
921 {
922 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
923 int ret;
924 struct ccw1 *ccw;
925 void *config_area;
926 unsigned long flags;
927
928 ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
929 if (!ccw)
930 return;
931
932 config_area = ccw_device_dma_zalloc(vcdev->cdev,
933 VIRTIO_CCW_CONFIG_SIZE,
934 &ccw->cda);
935 if (!config_area)
936 goto out_free;
937
938 /* Read the config area from the host. */
939 ccw->cmd_code = CCW_CMD_READ_CONF;
940 ccw->flags = 0;
941 ccw->count = offset + len;
942 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_CONFIG);
943 if (ret)
944 goto out_free;
945
946 spin_lock_irqsave(&vcdev->lock, flags);
947 memcpy(vcdev->config, config_area, offset + len);
948 if (vcdev->config_ready < offset + len)
949 vcdev->config_ready = offset + len;
950 spin_unlock_irqrestore(&vcdev->lock, flags);
951 if (buf)
952 memcpy(buf, config_area + offset, len);
953
954 out_free:
955 ccw_device_dma_free(vcdev->cdev, config_area, VIRTIO_CCW_CONFIG_SIZE);
956 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
957 }
958
virtio_ccw_set_config(struct virtio_device * vdev,unsigned int offset,const void * buf,unsigned len)959 static void virtio_ccw_set_config(struct virtio_device *vdev,
960 unsigned int offset, const void *buf,
961 unsigned len)
962 {
963 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
964 struct ccw1 *ccw;
965 void *config_area;
966 unsigned long flags;
967
968 ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
969 if (!ccw)
970 return;
971
972 config_area = ccw_device_dma_zalloc(vcdev->cdev,
973 VIRTIO_CCW_CONFIG_SIZE,
974 &ccw->cda);
975 if (!config_area)
976 goto out_free;
977
978 /* Make sure we don't overwrite fields. */
979 if (vcdev->config_ready < offset)
980 virtio_ccw_get_config(vdev, 0, NULL, offset);
981 spin_lock_irqsave(&vcdev->lock, flags);
982 memcpy(&vcdev->config[offset], buf, len);
983 /* Write the config area to the host. */
984 memcpy(config_area, vcdev->config, sizeof(vcdev->config));
985 spin_unlock_irqrestore(&vcdev->lock, flags);
986 ccw->cmd_code = CCW_CMD_WRITE_CONF;
987 ccw->flags = 0;
988 ccw->count = offset + len;
989 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_CONFIG);
990
991 out_free:
992 ccw_device_dma_free(vcdev->cdev, config_area, VIRTIO_CCW_CONFIG_SIZE);
993 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
994 }
995
virtio_ccw_get_status(struct virtio_device * vdev)996 static u8 virtio_ccw_get_status(struct virtio_device *vdev)
997 {
998 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
999 u8 old_status = vcdev->dma_area->status;
1000 struct ccw1 *ccw;
1001
1002 if (vcdev->revision < 2)
1003 return vcdev->dma_area->status;
1004
1005 ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
1006 if (!ccw)
1007 return old_status;
1008
1009 ccw->cmd_code = CCW_CMD_READ_STATUS;
1010 ccw->flags = 0;
1011 ccw->count = sizeof(vcdev->dma_area->status);
1012 ccw->cda = status_dma(vcdev);
1013 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_STATUS);
1014 /*
1015 * If the channel program failed (should only happen if the device
1016 * was hotunplugged, and then we clean up via the machine check
1017 * handler anyway), vcdev->dma_area->status was not overwritten and we just
1018 * return the old status, which is fine.
1019 */
1020 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
1021
1022 return vcdev->dma_area->status;
1023 }
1024
virtio_ccw_set_status(struct virtio_device * vdev,u8 status)1025 static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
1026 {
1027 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
1028 u8 old_status = vcdev->dma_area->status;
1029 struct ccw1 *ccw;
1030 int ret;
1031
1032 ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
1033 if (!ccw)
1034 return;
1035
1036 /* Write the status to the host. */
1037 vcdev->dma_area->status = status;
1038 ccw->cmd_code = CCW_CMD_WRITE_STATUS;
1039 ccw->flags = 0;
1040 ccw->count = sizeof(status);
1041 /* We use ssch for setting the status which is a serializing
1042 * instruction that guarantees the memory writes have
1043 * completed before ssch.
1044 */
1045 ccw->cda = status_dma(vcdev);
1046 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS);
1047 /* Write failed? We assume status is unchanged. */
1048 if (ret)
1049 vcdev->dma_area->status = old_status;
1050 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
1051 }
1052
virtio_ccw_bus_name(struct virtio_device * vdev)1053 static const char *virtio_ccw_bus_name(struct virtio_device *vdev)
1054 {
1055 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
1056
1057 return dev_name(&vcdev->cdev->dev);
1058 }
1059
virtio_ccw_synchronize_cbs(struct virtio_device * vdev)1060 static void virtio_ccw_synchronize_cbs(struct virtio_device *vdev)
1061 {
1062 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
1063 struct airq_info *info = vcdev->airq_info;
1064
1065 if (info) {
1066 /*
1067 * This device uses adapter interrupts: synchronize with
1068 * vring_interrupt() called by virtio_airq_handler()
1069 * via the indicator area lock.
1070 */
1071 write_lock_irq(&info->lock);
1072 write_unlock_irq(&info->lock);
1073 } else {
1074 /* This device uses classic interrupts: synchronize
1075 * with vring_interrupt() called by
1076 * virtio_ccw_int_handler() via the per-device
1077 * irq_lock
1078 */
1079 write_lock_irq(&vcdev->irq_lock);
1080 write_unlock_irq(&vcdev->irq_lock);
1081 }
1082 }
1083
1084 static const struct virtio_config_ops virtio_ccw_config_ops = {
1085 .get_features = virtio_ccw_get_features,
1086 .finalize_features = virtio_ccw_finalize_features,
1087 .get = virtio_ccw_get_config,
1088 .set = virtio_ccw_set_config,
1089 .get_status = virtio_ccw_get_status,
1090 .set_status = virtio_ccw_set_status,
1091 .reset = virtio_ccw_reset,
1092 .find_vqs = virtio_ccw_find_vqs,
1093 .del_vqs = virtio_ccw_del_vqs,
1094 .bus_name = virtio_ccw_bus_name,
1095 .synchronize_cbs = virtio_ccw_synchronize_cbs,
1096 };
1097
1098
1099 /*
1100 * ccw bus driver related functions
1101 */
1102
virtio_ccw_release_dev(struct device * _d)1103 static void virtio_ccw_release_dev(struct device *_d)
1104 {
1105 struct virtio_device *dev = dev_to_virtio(_d);
1106 struct virtio_ccw_device *vcdev = to_vc_device(dev);
1107
1108 ccw_device_dma_free(vcdev->cdev, vcdev->dma_area,
1109 sizeof(*vcdev->dma_area));
1110 kfree(vcdev);
1111 }
1112
irb_is_error(struct irb * irb)1113 static int irb_is_error(struct irb *irb)
1114 {
1115 if (scsw_cstat(&irb->scsw) != 0)
1116 return 1;
1117 if (scsw_dstat(&irb->scsw) & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))
1118 return 1;
1119 if (scsw_cc(&irb->scsw) != 0)
1120 return 1;
1121 return 0;
1122 }
1123
virtio_ccw_vq_by_ind(struct virtio_ccw_device * vcdev,int index)1124 static struct virtqueue *virtio_ccw_vq_by_ind(struct virtio_ccw_device *vcdev,
1125 int index)
1126 {
1127 struct virtio_ccw_vq_info *info;
1128 unsigned long flags;
1129 struct virtqueue *vq;
1130
1131 vq = NULL;
1132 spin_lock_irqsave(&vcdev->lock, flags);
1133 list_for_each_entry(info, &vcdev->virtqueues, node) {
1134 if (info->vq->index == index) {
1135 vq = info->vq;
1136 break;
1137 }
1138 }
1139 spin_unlock_irqrestore(&vcdev->lock, flags);
1140 return vq;
1141 }
1142
virtio_ccw_check_activity(struct virtio_ccw_device * vcdev,__u32 activity)1143 static void virtio_ccw_check_activity(struct virtio_ccw_device *vcdev,
1144 __u32 activity)
1145 {
1146 if (vcdev->curr_io & activity) {
1147 switch (activity) {
1148 case VIRTIO_CCW_DOING_READ_FEAT:
1149 case VIRTIO_CCW_DOING_WRITE_FEAT:
1150 case VIRTIO_CCW_DOING_READ_CONFIG:
1151 case VIRTIO_CCW_DOING_WRITE_CONFIG:
1152 case VIRTIO_CCW_DOING_WRITE_STATUS:
1153 case VIRTIO_CCW_DOING_READ_STATUS:
1154 case VIRTIO_CCW_DOING_SET_VQ:
1155 case VIRTIO_CCW_DOING_SET_IND:
1156 case VIRTIO_CCW_DOING_SET_CONF_IND:
1157 case VIRTIO_CCW_DOING_RESET:
1158 case VIRTIO_CCW_DOING_READ_VQ_CONF:
1159 case VIRTIO_CCW_DOING_SET_IND_ADAPTER:
1160 case VIRTIO_CCW_DOING_SET_VIRTIO_REV:
1161 vcdev->curr_io &= ~activity;
1162 wake_up(&vcdev->wait_q);
1163 break;
1164 default:
1165 /* don't know what to do... */
1166 dev_warn(&vcdev->cdev->dev,
1167 "Suspicious activity '%08x'\n", activity);
1168 WARN_ON(1);
1169 break;
1170 }
1171 }
1172 }
1173
virtio_ccw_int_handler(struct ccw_device * cdev,unsigned long intparm,struct irb * irb)1174 static void virtio_ccw_int_handler(struct ccw_device *cdev,
1175 unsigned long intparm,
1176 struct irb *irb)
1177 {
1178 __u32 activity = intparm & VIRTIO_CCW_INTPARM_MASK;
1179 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
1180 int i;
1181 struct virtqueue *vq;
1182
1183 if (!vcdev)
1184 return;
1185 if (IS_ERR(irb)) {
1186 vcdev->err = PTR_ERR(irb);
1187 virtio_ccw_check_activity(vcdev, activity);
1188 /* Don't poke around indicators, something's wrong. */
1189 return;
1190 }
1191 /* Check if it's a notification from the host. */
1192 if ((intparm == 0) &&
1193 (scsw_stctl(&irb->scsw) ==
1194 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) {
1195 /* OK */
1196 }
1197 if (irb_is_error(irb)) {
1198 /* Command reject? */
1199 if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
1200 (irb->ecw[0] & SNS0_CMD_REJECT))
1201 vcdev->err = -EOPNOTSUPP;
1202 else
1203 /* Map everything else to -EIO. */
1204 vcdev->err = -EIO;
1205 }
1206 virtio_ccw_check_activity(vcdev, activity);
1207 #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
1208 /*
1209 * Paired with virtio_ccw_synchronize_cbs() and interrupts are
1210 * disabled here.
1211 */
1212 read_lock(&vcdev->irq_lock);
1213 #endif
1214 for_each_set_bit(i, indicators(vcdev),
1215 sizeof(*indicators(vcdev)) * BITS_PER_BYTE) {
1216 /* The bit clear must happen before the vring kick. */
1217 clear_bit(i, indicators(vcdev));
1218 barrier();
1219 vq = virtio_ccw_vq_by_ind(vcdev, i);
1220 vring_interrupt(0, vq);
1221 }
1222 #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
1223 read_unlock(&vcdev->irq_lock);
1224 #endif
1225 if (test_bit(0, indicators2(vcdev))) {
1226 virtio_config_changed(&vcdev->vdev);
1227 clear_bit(0, indicators2(vcdev));
1228 }
1229 }
1230
1231 /*
1232 * We usually want to autoonline all devices, but give the admin
1233 * a way to exempt devices from this.
1234 */
1235 #define __DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \
1236 (8*sizeof(long)))
1237 static unsigned long devs_no_auto[__MAX_SSID + 1][__DEV_WORDS];
1238
1239 static char *no_auto = "";
1240
1241 module_param(no_auto, charp, 0444);
1242 MODULE_PARM_DESC(no_auto, "list of ccw bus id ranges not to be auto-onlined");
1243
virtio_ccw_check_autoonline(struct ccw_device * cdev)1244 static int virtio_ccw_check_autoonline(struct ccw_device *cdev)
1245 {
1246 struct ccw_dev_id id;
1247
1248 ccw_device_get_id(cdev, &id);
1249 if (test_bit(id.devno, devs_no_auto[id.ssid]))
1250 return 0;
1251 return 1;
1252 }
1253
virtio_ccw_auto_online(void * data,async_cookie_t cookie)1254 static void virtio_ccw_auto_online(void *data, async_cookie_t cookie)
1255 {
1256 struct ccw_device *cdev = data;
1257 int ret;
1258
1259 ret = ccw_device_set_online(cdev);
1260 if (ret)
1261 dev_warn(&cdev->dev, "Failed to set online: %d\n", ret);
1262 }
1263
virtio_ccw_probe(struct ccw_device * cdev)1264 static int virtio_ccw_probe(struct ccw_device *cdev)
1265 {
1266 cdev->handler = virtio_ccw_int_handler;
1267
1268 if (virtio_ccw_check_autoonline(cdev))
1269 async_schedule(virtio_ccw_auto_online, cdev);
1270 return 0;
1271 }
1272
virtio_grab_drvdata(struct ccw_device * cdev)1273 static struct virtio_ccw_device *virtio_grab_drvdata(struct ccw_device *cdev)
1274 {
1275 unsigned long flags;
1276 struct virtio_ccw_device *vcdev;
1277
1278 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1279 vcdev = dev_get_drvdata(&cdev->dev);
1280 if (!vcdev || vcdev->going_away) {
1281 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1282 return NULL;
1283 }
1284 vcdev->going_away = true;
1285 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1286 return vcdev;
1287 }
1288
virtio_ccw_remove(struct ccw_device * cdev)1289 static void virtio_ccw_remove(struct ccw_device *cdev)
1290 {
1291 unsigned long flags;
1292 struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev);
1293
1294 if (vcdev && cdev->online) {
1295 if (vcdev->device_lost)
1296 virtio_break_device(&vcdev->vdev);
1297 unregister_virtio_device(&vcdev->vdev);
1298 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1299 dev_set_drvdata(&cdev->dev, NULL);
1300 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1301 }
1302 cdev->handler = NULL;
1303 }
1304
virtio_ccw_offline(struct ccw_device * cdev)1305 static int virtio_ccw_offline(struct ccw_device *cdev)
1306 {
1307 unsigned long flags;
1308 struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev);
1309
1310 if (!vcdev)
1311 return 0;
1312 if (vcdev->device_lost)
1313 virtio_break_device(&vcdev->vdev);
1314 unregister_virtio_device(&vcdev->vdev);
1315 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1316 dev_set_drvdata(&cdev->dev, NULL);
1317 cdev->dev.dma_parms = NULL;
1318 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1319 return 0;
1320 }
1321
virtio_ccw_set_transport_rev(struct virtio_ccw_device * vcdev)1322 static int virtio_ccw_set_transport_rev(struct virtio_ccw_device *vcdev)
1323 {
1324 struct virtio_rev_info *rev;
1325 struct ccw1 *ccw;
1326 int ret;
1327
1328 ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
1329 if (!ccw)
1330 return -ENOMEM;
1331 rev = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*rev), &ccw->cda);
1332 if (!rev) {
1333 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
1334 return -ENOMEM;
1335 }
1336
1337 /* Set transport revision */
1338 ccw->cmd_code = CCW_CMD_SET_VIRTIO_REV;
1339 ccw->flags = 0;
1340 ccw->count = sizeof(*rev);
1341
1342 vcdev->revision = VIRTIO_CCW_REV_MAX;
1343 do {
1344 rev->revision = vcdev->revision;
1345 /* none of our supported revisions carry payload */
1346 rev->length = 0;
1347 ret = ccw_io_helper(vcdev, ccw,
1348 VIRTIO_CCW_DOING_SET_VIRTIO_REV);
1349 if (ret == -EOPNOTSUPP) {
1350 if (vcdev->revision == 0)
1351 /*
1352 * The host device does not support setting
1353 * the revision: let's operate it in legacy
1354 * mode.
1355 */
1356 ret = 0;
1357 else
1358 vcdev->revision--;
1359 }
1360 } while (ret == -EOPNOTSUPP);
1361
1362 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
1363 ccw_device_dma_free(vcdev->cdev, rev, sizeof(*rev));
1364 return ret;
1365 }
1366
virtio_ccw_online(struct ccw_device * cdev)1367 static int virtio_ccw_online(struct ccw_device *cdev)
1368 {
1369 int ret;
1370 struct virtio_ccw_device *vcdev;
1371 unsigned long flags;
1372
1373 vcdev = kzalloc_obj(*vcdev);
1374 if (!vcdev) {
1375 dev_warn(&cdev->dev, "Could not get memory for virtio\n");
1376 ret = -ENOMEM;
1377 goto out_free;
1378 }
1379 vcdev->vdev.dev.parent = &cdev->dev;
1380 vcdev->cdev = cdev;
1381 cdev->dev.dma_parms = &vcdev->dma_parms;
1382 vcdev->dma_area = ccw_device_dma_zalloc(vcdev->cdev,
1383 sizeof(*vcdev->dma_area),
1384 &vcdev->dma_area_addr);
1385 if (!vcdev->dma_area) {
1386 ret = -ENOMEM;
1387 goto out_free;
1388 }
1389
1390 vcdev->is_thinint = virtio_ccw_use_airq; /* at least try */
1391
1392 vcdev->vdev.dev.release = virtio_ccw_release_dev;
1393 vcdev->vdev.config = &virtio_ccw_config_ops;
1394 init_waitqueue_head(&vcdev->wait_q);
1395 INIT_LIST_HEAD(&vcdev->virtqueues);
1396 spin_lock_init(&vcdev->lock);
1397 rwlock_init(&vcdev->irq_lock);
1398 mutex_init(&vcdev->io_lock);
1399
1400 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1401 dev_set_drvdata(&cdev->dev, vcdev);
1402 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1403 vcdev->vdev.id.vendor = cdev->id.cu_type;
1404 vcdev->vdev.id.device = cdev->id.cu_model;
1405
1406 ret = virtio_ccw_set_transport_rev(vcdev);
1407 if (ret)
1408 goto out_free;
1409
1410 ret = register_virtio_device(&vcdev->vdev);
1411 if (ret) {
1412 dev_warn(&cdev->dev, "Failed to register virtio device: %d\n",
1413 ret);
1414 goto out_put;
1415 }
1416 return 0;
1417 out_put:
1418 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1419 dev_set_drvdata(&cdev->dev, NULL);
1420 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1421 put_device(&vcdev->vdev.dev);
1422 return ret;
1423 out_free:
1424 if (vcdev) {
1425 ccw_device_dma_free(vcdev->cdev, vcdev->dma_area,
1426 sizeof(*vcdev->dma_area));
1427 }
1428 kfree(vcdev);
1429 return ret;
1430 }
1431
virtio_ccw_cio_notify(struct ccw_device * cdev,int event)1432 static int virtio_ccw_cio_notify(struct ccw_device *cdev, int event)
1433 {
1434 int rc;
1435 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
1436
1437 /*
1438 * Make sure vcdev is set
1439 * i.e. set_offline/remove callback not already running
1440 */
1441 if (!vcdev)
1442 return NOTIFY_DONE;
1443
1444 switch (event) {
1445 case CIO_GONE:
1446 vcdev->device_lost = true;
1447 rc = NOTIFY_DONE;
1448 break;
1449 case CIO_OPER:
1450 rc = NOTIFY_OK;
1451 break;
1452 default:
1453 rc = NOTIFY_DONE;
1454 break;
1455 }
1456 return rc;
1457 }
1458
1459 static struct ccw_device_id virtio_ids[] = {
1460 { CCW_DEVICE(0x3832, 0) },
1461 {},
1462 };
1463
1464 static struct ccw_driver virtio_ccw_driver = {
1465 .driver = {
1466 .owner = THIS_MODULE,
1467 .name = "virtio_ccw",
1468 },
1469 .ids = virtio_ids,
1470 .probe = virtio_ccw_probe,
1471 .remove = virtio_ccw_remove,
1472 .set_offline = virtio_ccw_offline,
1473 .set_online = virtio_ccw_online,
1474 .notify = virtio_ccw_cio_notify,
1475 .int_class = IRQIO_VIR,
1476 };
1477
pure_hex(char ** cp,unsigned int * val,int min_digit,int max_digit,int max_val)1478 static int __init pure_hex(char **cp, unsigned int *val, int min_digit,
1479 int max_digit, int max_val)
1480 {
1481 int diff;
1482
1483 diff = 0;
1484 *val = 0;
1485
1486 while (diff <= max_digit) {
1487 int value = hex_to_bin(**cp);
1488
1489 if (value < 0)
1490 break;
1491 *val = *val * 16 + value;
1492 (*cp)++;
1493 diff++;
1494 }
1495
1496 if ((diff < min_digit) || (diff > max_digit) || (*val > max_val))
1497 return 1;
1498
1499 return 0;
1500 }
1501
parse_busid(char * str,unsigned int * cssid,unsigned int * ssid,unsigned int * devno)1502 static int __init parse_busid(char *str, unsigned int *cssid,
1503 unsigned int *ssid, unsigned int *devno)
1504 {
1505 char *str_work;
1506 int rc, ret;
1507
1508 rc = 1;
1509
1510 if (*str == '\0')
1511 goto out;
1512
1513 str_work = str;
1514 ret = pure_hex(&str_work, cssid, 1, 2, __MAX_CSSID);
1515 if (ret || (str_work[0] != '.'))
1516 goto out;
1517 str_work++;
1518 ret = pure_hex(&str_work, ssid, 1, 1, __MAX_SSID);
1519 if (ret || (str_work[0] != '.'))
1520 goto out;
1521 str_work++;
1522 ret = pure_hex(&str_work, devno, 4, 4, __MAX_SUBCHANNEL);
1523 if (ret || (str_work[0] != '\0'))
1524 goto out;
1525
1526 rc = 0;
1527 out:
1528 return rc;
1529 }
1530
no_auto_parse(void)1531 static void __init no_auto_parse(void)
1532 {
1533 unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to;
1534 char *parm, *str;
1535 int rc;
1536
1537 str = no_auto;
1538 while ((parm = strsep(&str, ","))) {
1539 rc = parse_busid(strsep(&parm, "-"), &from_cssid,
1540 &from_ssid, &from);
1541 if (rc)
1542 continue;
1543 if (parm != NULL) {
1544 rc = parse_busid(parm, &to_cssid,
1545 &to_ssid, &to);
1546 if ((from_ssid > to_ssid) ||
1547 ((from_ssid == to_ssid) && (from > to)))
1548 rc = -EINVAL;
1549 } else {
1550 to_cssid = from_cssid;
1551 to_ssid = from_ssid;
1552 to = from;
1553 }
1554 if (rc)
1555 continue;
1556 while ((from_ssid < to_ssid) ||
1557 ((from_ssid == to_ssid) && (from <= to))) {
1558 set_bit(from, devs_no_auto[from_ssid]);
1559 from++;
1560 if (from > __MAX_SUBCHANNEL) {
1561 from_ssid++;
1562 from = 0;
1563 }
1564 }
1565 }
1566 }
1567
virtio_ccw_init(void)1568 static int __init virtio_ccw_init(void)
1569 {
1570 int rc;
1571
1572 /* parse no_auto string before we do anything further */
1573 no_auto_parse();
1574
1575 summary_indicators = cio_dma_zalloc(MAX_AIRQ_AREAS);
1576 if (!summary_indicators)
1577 return -ENOMEM;
1578 rc = ccw_driver_register(&virtio_ccw_driver);
1579 if (rc)
1580 cio_dma_free(summary_indicators, MAX_AIRQ_AREAS);
1581 return rc;
1582 }
1583 device_initcall(virtio_ccw_init);
1584