1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2024 Loongson Technology Corporation Limited
4 */
5
6 #include <asm/kvm_eiointc.h>
7 #include <asm/kvm_pch_pic.h>
8 #include <asm/kvm_vcpu.h>
9 #include <linux/count_zeros.h>
10
11 /* update the isr according to irq level and route irq to eiointc */
pch_pic_update_irq(struct loongarch_pch_pic * s,int irq,int level)12 static void pch_pic_update_irq(struct loongarch_pch_pic *s, int irq, int level)
13 {
14 u64 mask = BIT(irq);
15
16 /*
17 * set isr and route irq to eiointc and
18 * the route table is in htmsi_vector[]
19 */
20 if (level) {
21 if (mask & s->irr & ~s->mask) {
22 s->isr |= mask;
23 irq = s->htmsi_vector[irq];
24 eiointc_set_irq(s->kvm->arch.eiointc, irq, level);
25 }
26 } else {
27 if (mask & s->isr & ~s->irr) {
28 s->isr &= ~mask;
29 irq = s->htmsi_vector[irq];
30 eiointc_set_irq(s->kvm->arch.eiointc, irq, level);
31 }
32 }
33 }
34
35 /* update batch irqs, the irq_mask is a bitmap of irqs */
pch_pic_update_batch_irqs(struct loongarch_pch_pic * s,u64 irq_mask,int level)36 static void pch_pic_update_batch_irqs(struct loongarch_pch_pic *s, u64 irq_mask, int level)
37 {
38 int irq, bits;
39
40 /* find each irq by irqs bitmap and update each irq */
41 bits = sizeof(irq_mask) * 8;
42 irq = find_first_bit((void *)&irq_mask, bits);
43 while (irq < bits) {
44 pch_pic_update_irq(s, irq, level);
45 bitmap_clear((void *)&irq_mask, irq, 1);
46 irq = find_first_bit((void *)&irq_mask, bits);
47 }
48 }
49
50 /* called when a irq is triggered in pch pic */
pch_pic_set_irq(struct loongarch_pch_pic * s,int irq,int level)51 void pch_pic_set_irq(struct loongarch_pch_pic *s, int irq, int level)
52 {
53 u64 mask = BIT(irq);
54
55 spin_lock(&s->lock);
56 if (level)
57 s->irr |= mask; /* set irr */
58 else {
59 /*
60 * In edge triggered mode, 0 does not mean to clear irq
61 * The irr register variable is cleared when cpu writes to the
62 * PCH_PIC_CLEAR_START address area
63 */
64 if (s->edge & mask) {
65 spin_unlock(&s->lock);
66 return;
67 }
68 s->irr &= ~mask;
69 }
70 pch_pic_update_irq(s, irq, level);
71 spin_unlock(&s->lock);
72 }
73
74 /* msi irq handler */
pch_msi_set_irq(struct kvm * kvm,int irq,int level)75 void pch_msi_set_irq(struct kvm *kvm, int irq, int level)
76 {
77 eiointc_set_irq(kvm->arch.eiointc, irq, level);
78 }
79
80 /*
81 * pch pic register is 64-bit, but it is accessed by 32-bit,
82 * so we use high to get whether low or high 32 bits we want
83 * to read.
84 */
pch_pic_read_reg(u64 * s,int high)85 static u32 pch_pic_read_reg(u64 *s, int high)
86 {
87 u64 val = *s;
88
89 /* read the high 32 bits when high is 1 */
90 return high ? (u32)(val >> 32) : (u32)val;
91 }
92
93 /*
94 * pch pic register is 64-bit, but it is accessed by 32-bit,
95 * so we use high to get whether low or high 32 bits we want
96 * to write.
97 */
pch_pic_write_reg(u64 * s,int high,u32 v)98 static u32 pch_pic_write_reg(u64 *s, int high, u32 v)
99 {
100 u64 val = *s, data = v;
101
102 if (high) {
103 /*
104 * Clear val high 32 bits
105 * Write the high 32 bits when the high is 1
106 */
107 *s = (val << 32 >> 32) | (data << 32);
108 val >>= 32;
109 } else
110 /*
111 * Clear val low 32 bits
112 * Write the low 32 bits when the high is 0
113 */
114 *s = (val >> 32 << 32) | v;
115
116 return (u32)val;
117 }
118
loongarch_pch_pic_read(struct loongarch_pch_pic * s,gpa_t addr,int len,void * val)119 static int loongarch_pch_pic_read(struct loongarch_pch_pic *s, gpa_t addr, int len, void *val)
120 {
121 int offset, index, ret = 0;
122 u32 data = 0;
123 u64 int_id = 0;
124
125 offset = addr - s->pch_pic_base;
126
127 spin_lock(&s->lock);
128 switch (offset) {
129 case PCH_PIC_INT_ID_START ... PCH_PIC_INT_ID_END:
130 /* int id version */
131 int_id |= (u64)PCH_PIC_INT_ID_VER << 32;
132 /* irq number */
133 int_id |= (u64)31 << (32 + 16);
134 /* int id value */
135 int_id |= PCH_PIC_INT_ID_VAL;
136 *(u64 *)val = int_id;
137 break;
138 case PCH_PIC_MASK_START ... PCH_PIC_MASK_END:
139 offset -= PCH_PIC_MASK_START;
140 index = offset >> 2;
141 /* read mask reg */
142 data = pch_pic_read_reg(&s->mask, index);
143 *(u32 *)val = data;
144 break;
145 case PCH_PIC_HTMSI_EN_START ... PCH_PIC_HTMSI_EN_END:
146 offset -= PCH_PIC_HTMSI_EN_START;
147 index = offset >> 2;
148 /* read htmsi enable reg */
149 data = pch_pic_read_reg(&s->htmsi_en, index);
150 *(u32 *)val = data;
151 break;
152 case PCH_PIC_EDGE_START ... PCH_PIC_EDGE_END:
153 offset -= PCH_PIC_EDGE_START;
154 index = offset >> 2;
155 /* read edge enable reg */
156 data = pch_pic_read_reg(&s->edge, index);
157 *(u32 *)val = data;
158 break;
159 case PCH_PIC_AUTO_CTRL0_START ... PCH_PIC_AUTO_CTRL0_END:
160 case PCH_PIC_AUTO_CTRL1_START ... PCH_PIC_AUTO_CTRL1_END:
161 /* we only use default mode: fixed interrupt distribution mode */
162 *(u32 *)val = 0;
163 break;
164 case PCH_PIC_ROUTE_ENTRY_START ... PCH_PIC_ROUTE_ENTRY_END:
165 /* only route to int0: eiointc */
166 *(u8 *)val = 1;
167 break;
168 case PCH_PIC_HTMSI_VEC_START ... PCH_PIC_HTMSI_VEC_END:
169 offset -= PCH_PIC_HTMSI_VEC_START;
170 /* read htmsi vector */
171 data = s->htmsi_vector[offset];
172 *(u8 *)val = data;
173 break;
174 case PCH_PIC_POLARITY_START ... PCH_PIC_POLARITY_END:
175 /* we only use defalut value 0: high level triggered */
176 *(u32 *)val = 0;
177 break;
178 default:
179 ret = -EINVAL;
180 }
181 spin_unlock(&s->lock);
182
183 return ret;
184 }
185
kvm_pch_pic_read(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,int len,void * val)186 static int kvm_pch_pic_read(struct kvm_vcpu *vcpu,
187 struct kvm_io_device *dev,
188 gpa_t addr, int len, void *val)
189 {
190 int ret;
191 struct loongarch_pch_pic *s = vcpu->kvm->arch.pch_pic;
192
193 if (!s) {
194 kvm_err("%s: pch pic irqchip not valid!\n", __func__);
195 return -EINVAL;
196 }
197
198 if (addr & (len - 1)) {
199 kvm_err("%s: pch pic not aligned addr %llx len %d\n", __func__, addr, len);
200 return -EINVAL;
201 }
202
203 /* statistics of pch pic reading */
204 vcpu->stat.pch_pic_read_exits++;
205 ret = loongarch_pch_pic_read(s, addr, len, val);
206
207 return ret;
208 }
209
loongarch_pch_pic_write(struct loongarch_pch_pic * s,gpa_t addr,int len,const void * val)210 static int loongarch_pch_pic_write(struct loongarch_pch_pic *s, gpa_t addr,
211 int len, const void *val)
212 {
213 int ret;
214 u32 old, data, offset, index;
215 u64 irq;
216
217 ret = 0;
218 data = *(u32 *)val;
219 offset = addr - s->pch_pic_base;
220
221 spin_lock(&s->lock);
222 switch (offset) {
223 case PCH_PIC_MASK_START ... PCH_PIC_MASK_END:
224 offset -= PCH_PIC_MASK_START;
225 /* get whether high or low 32 bits we want to write */
226 index = offset >> 2;
227 old = pch_pic_write_reg(&s->mask, index, data);
228 /* enable irq when mask value change to 0 */
229 irq = (old & ~data) << (32 * index);
230 pch_pic_update_batch_irqs(s, irq, 1);
231 /* disable irq when mask value change to 1 */
232 irq = (~old & data) << (32 * index);
233 pch_pic_update_batch_irqs(s, irq, 0);
234 break;
235 case PCH_PIC_HTMSI_EN_START ... PCH_PIC_HTMSI_EN_END:
236 offset -= PCH_PIC_HTMSI_EN_START;
237 index = offset >> 2;
238 pch_pic_write_reg(&s->htmsi_en, index, data);
239 break;
240 case PCH_PIC_EDGE_START ... PCH_PIC_EDGE_END:
241 offset -= PCH_PIC_EDGE_START;
242 index = offset >> 2;
243 /* 1: edge triggered, 0: level triggered */
244 pch_pic_write_reg(&s->edge, index, data);
245 break;
246 case PCH_PIC_CLEAR_START ... PCH_PIC_CLEAR_END:
247 offset -= PCH_PIC_CLEAR_START;
248 index = offset >> 2;
249 /* write 1 to clear edge irq */
250 old = pch_pic_read_reg(&s->irr, index);
251 /*
252 * get the irq bitmap which is edge triggered and
253 * already set and to be cleared
254 */
255 irq = old & pch_pic_read_reg(&s->edge, index) & data;
256 /* write irr to the new state where irqs have been cleared */
257 pch_pic_write_reg(&s->irr, index, old & ~irq);
258 /* update cleared irqs */
259 pch_pic_update_batch_irqs(s, irq, 0);
260 break;
261 case PCH_PIC_AUTO_CTRL0_START ... PCH_PIC_AUTO_CTRL0_END:
262 offset -= PCH_PIC_AUTO_CTRL0_START;
263 index = offset >> 2;
264 /* we only use default mode: fixed interrupt distribution mode */
265 pch_pic_write_reg(&s->auto_ctrl0, index, 0);
266 break;
267 case PCH_PIC_AUTO_CTRL1_START ... PCH_PIC_AUTO_CTRL1_END:
268 offset -= PCH_PIC_AUTO_CTRL1_START;
269 index = offset >> 2;
270 /* we only use default mode: fixed interrupt distribution mode */
271 pch_pic_write_reg(&s->auto_ctrl1, index, 0);
272 break;
273 case PCH_PIC_ROUTE_ENTRY_START ... PCH_PIC_ROUTE_ENTRY_END:
274 offset -= PCH_PIC_ROUTE_ENTRY_START;
275 /* only route to int0: eiointc */
276 s->route_entry[offset] = 1;
277 break;
278 case PCH_PIC_HTMSI_VEC_START ... PCH_PIC_HTMSI_VEC_END:
279 /* route table to eiointc */
280 offset -= PCH_PIC_HTMSI_VEC_START;
281 s->htmsi_vector[offset] = (u8)data;
282 break;
283 case PCH_PIC_POLARITY_START ... PCH_PIC_POLARITY_END:
284 offset -= PCH_PIC_POLARITY_START;
285 index = offset >> 2;
286 /* we only use defalut value 0: high level triggered */
287 pch_pic_write_reg(&s->polarity, index, 0);
288 break;
289 default:
290 ret = -EINVAL;
291 break;
292 }
293 spin_unlock(&s->lock);
294
295 return ret;
296 }
297
kvm_pch_pic_write(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,int len,const void * val)298 static int kvm_pch_pic_write(struct kvm_vcpu *vcpu,
299 struct kvm_io_device *dev,
300 gpa_t addr, int len, const void *val)
301 {
302 int ret;
303 struct loongarch_pch_pic *s = vcpu->kvm->arch.pch_pic;
304
305 if (!s) {
306 kvm_err("%s: pch pic irqchip not valid!\n", __func__);
307 return -EINVAL;
308 }
309
310 if (addr & (len - 1)) {
311 kvm_err("%s: pch pic not aligned addr %llx len %d\n", __func__, addr, len);
312 return -EINVAL;
313 }
314
315 /* statistics of pch pic writing */
316 vcpu->stat.pch_pic_write_exits++;
317 ret = loongarch_pch_pic_write(s, addr, len, val);
318
319 return ret;
320 }
321
322 static const struct kvm_io_device_ops kvm_pch_pic_ops = {
323 .read = kvm_pch_pic_read,
324 .write = kvm_pch_pic_write,
325 };
326
kvm_pch_pic_init(struct kvm_device * dev,u64 addr)327 static int kvm_pch_pic_init(struct kvm_device *dev, u64 addr)
328 {
329 int ret;
330 struct kvm *kvm = dev->kvm;
331 struct kvm_io_device *device;
332 struct loongarch_pch_pic *s = dev->kvm->arch.pch_pic;
333
334 s->pch_pic_base = addr;
335 device = &s->device;
336 /* init device by pch pic writing and reading ops */
337 kvm_iodevice_init(device, &kvm_pch_pic_ops);
338 mutex_lock(&kvm->slots_lock);
339 /* register pch pic device */
340 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, addr, PCH_PIC_SIZE, device);
341 mutex_unlock(&kvm->slots_lock);
342
343 return (ret < 0) ? -EFAULT : 0;
344 }
345
346 /* used by user space to get or set pch pic registers */
kvm_pch_pic_regs_access(struct kvm_device * dev,struct kvm_device_attr * attr,bool is_write)347 static int kvm_pch_pic_regs_access(struct kvm_device *dev,
348 struct kvm_device_attr *attr,
349 bool is_write)
350 {
351 char buf[8];
352 int addr, offset, len = 8, ret = 0;
353 void __user *data;
354 void *p = NULL;
355 struct loongarch_pch_pic *s;
356
357 s = dev->kvm->arch.pch_pic;
358 addr = attr->attr;
359 data = (void __user *)attr->addr;
360
361 /* get pointer to pch pic register by addr */
362 switch (addr) {
363 case PCH_PIC_MASK_START:
364 p = &s->mask;
365 break;
366 case PCH_PIC_HTMSI_EN_START:
367 p = &s->htmsi_en;
368 break;
369 case PCH_PIC_EDGE_START:
370 p = &s->edge;
371 break;
372 case PCH_PIC_AUTO_CTRL0_START:
373 p = &s->auto_ctrl0;
374 break;
375 case PCH_PIC_AUTO_CTRL1_START:
376 p = &s->auto_ctrl1;
377 break;
378 case PCH_PIC_ROUTE_ENTRY_START ... PCH_PIC_ROUTE_ENTRY_END:
379 offset = addr - PCH_PIC_ROUTE_ENTRY_START;
380 p = &s->route_entry[offset];
381 len = 1;
382 break;
383 case PCH_PIC_HTMSI_VEC_START ... PCH_PIC_HTMSI_VEC_END:
384 offset = addr - PCH_PIC_HTMSI_VEC_START;
385 p = &s->htmsi_vector[offset];
386 len = 1;
387 break;
388 case PCH_PIC_INT_IRR_START:
389 p = &s->irr;
390 break;
391 case PCH_PIC_INT_ISR_START:
392 p = &s->isr;
393 break;
394 case PCH_PIC_POLARITY_START:
395 p = &s->polarity;
396 break;
397 default:
398 return -EINVAL;
399 }
400
401 if (is_write) {
402 if (copy_from_user(buf, data, len))
403 return -EFAULT;
404 }
405
406 spin_lock(&s->lock);
407 if (is_write)
408 memcpy(p, buf, len);
409 else
410 memcpy(buf, p, len);
411 spin_unlock(&s->lock);
412
413 if (!is_write) {
414 if (copy_to_user(data, buf, len))
415 return -EFAULT;
416 }
417
418 return ret;
419 }
420
kvm_pch_pic_get_attr(struct kvm_device * dev,struct kvm_device_attr * attr)421 static int kvm_pch_pic_get_attr(struct kvm_device *dev,
422 struct kvm_device_attr *attr)
423 {
424 switch (attr->group) {
425 case KVM_DEV_LOONGARCH_PCH_PIC_GRP_REGS:
426 return kvm_pch_pic_regs_access(dev, attr, false);
427 default:
428 return -EINVAL;
429 }
430 }
431
kvm_pch_pic_set_attr(struct kvm_device * dev,struct kvm_device_attr * attr)432 static int kvm_pch_pic_set_attr(struct kvm_device *dev,
433 struct kvm_device_attr *attr)
434 {
435 u64 addr;
436 void __user *uaddr = (void __user *)(long)attr->addr;
437
438 switch (attr->group) {
439 case KVM_DEV_LOONGARCH_PCH_PIC_GRP_CTRL:
440 switch (attr->attr) {
441 case KVM_DEV_LOONGARCH_PCH_PIC_CTRL_INIT:
442 if (copy_from_user(&addr, uaddr, sizeof(addr)))
443 return -EFAULT;
444
445 if (!dev->kvm->arch.pch_pic) {
446 kvm_err("%s: please create pch_pic irqchip first!\n", __func__);
447 return -ENODEV;
448 }
449
450 return kvm_pch_pic_init(dev, addr);
451 default:
452 kvm_err("%s: unknown group (%d) attr (%lld)\n", __func__, attr->group,
453 attr->attr);
454 return -EINVAL;
455 }
456 case KVM_DEV_LOONGARCH_PCH_PIC_GRP_REGS:
457 return kvm_pch_pic_regs_access(dev, attr, true);
458 default:
459 return -EINVAL;
460 }
461 }
462
kvm_setup_default_irq_routing(struct kvm * kvm)463 static int kvm_setup_default_irq_routing(struct kvm *kvm)
464 {
465 int i, ret;
466 u32 nr = KVM_IRQCHIP_NUM_PINS;
467 struct kvm_irq_routing_entry *entries;
468
469 entries = kcalloc(nr, sizeof(*entries), GFP_KERNEL);
470 if (!entries)
471 return -ENOMEM;
472
473 for (i = 0; i < nr; i++) {
474 entries[i].gsi = i;
475 entries[i].type = KVM_IRQ_ROUTING_IRQCHIP;
476 entries[i].u.irqchip.irqchip = 0;
477 entries[i].u.irqchip.pin = i;
478 }
479 ret = kvm_set_irq_routing(kvm, entries, nr, 0);
480 kfree(entries);
481
482 return ret;
483 }
484
kvm_pch_pic_create(struct kvm_device * dev,u32 type)485 static int kvm_pch_pic_create(struct kvm_device *dev, u32 type)
486 {
487 int ret;
488 struct kvm *kvm = dev->kvm;
489 struct loongarch_pch_pic *s;
490
491 /* pch pic should not has been created */
492 if (kvm->arch.pch_pic)
493 return -EINVAL;
494
495 ret = kvm_setup_default_irq_routing(kvm);
496 if (ret)
497 return -ENOMEM;
498
499 s = kzalloc(sizeof(struct loongarch_pch_pic), GFP_KERNEL);
500 if (!s)
501 return -ENOMEM;
502
503 spin_lock_init(&s->lock);
504 s->kvm = kvm;
505 kvm->arch.pch_pic = s;
506
507 return 0;
508 }
509
kvm_pch_pic_destroy(struct kvm_device * dev)510 static void kvm_pch_pic_destroy(struct kvm_device *dev)
511 {
512 struct kvm *kvm;
513 struct loongarch_pch_pic *s;
514
515 if (!dev || !dev->kvm || !dev->kvm->arch.pch_pic)
516 return;
517
518 kvm = dev->kvm;
519 s = kvm->arch.pch_pic;
520 /* unregister pch pic device and free it's memory */
521 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &s->device);
522 kfree(s);
523 }
524
525 static struct kvm_device_ops kvm_pch_pic_dev_ops = {
526 .name = "kvm-loongarch-pch-pic",
527 .create = kvm_pch_pic_create,
528 .destroy = kvm_pch_pic_destroy,
529 .set_attr = kvm_pch_pic_set_attr,
530 .get_attr = kvm_pch_pic_get_attr,
531 };
532
kvm_loongarch_register_pch_pic_device(void)533 int kvm_loongarch_register_pch_pic_device(void)
534 {
535 return kvm_register_device_ops(&kvm_pch_pic_dev_ops, KVM_DEV_TYPE_LOONGARCH_PCHPIC);
536 }
537