xref: /linux/samples/vfio-mdev/mtty.c (revision c995498636c704641c9e809c31b59445b48f7adc)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Mediated virtual PCI serial host device driver
4  *
5  * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
6  *     Author: Neo Jia <cjia@nvidia.com>
7  *             Kirti Wankhede <kwankhede@nvidia.com>
8  *
9  * Sample driver that creates mdev device that simulates serial port over PCI
10  * card.
11  */
12 
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/fs.h>
17 #include <linux/poll.h>
18 #include <linux/slab.h>
19 #include <linux/cdev.h>
20 #include <linux/sched.h>
21 #include <linux/wait.h>
22 #include <linux/vfio.h>
23 #include <linux/iommu.h>
24 #include <linux/sysfs.h>
25 #include <linux/ctype.h>
26 #include <linux/file.h>
27 #include <linux/mdev.h>
28 #include <linux/pci.h>
29 #include <linux/serial.h>
30 #include <uapi/linux/serial_reg.h>
31 #include <linux/eventfd.h>
32 #include <linux/anon_inodes.h>
33 
34 /*
35  * #defines
36  */
37 
38 #define VERSION_STRING  "0.1"
39 #define DRIVER_AUTHOR   "NVIDIA Corporation"
40 
41 #define MTTY_CLASS_NAME "mtty"
42 
43 #define MTTY_NAME       "mtty"
44 
45 #define MTTY_STRING_LEN		16
46 
47 #define MTTY_CONFIG_SPACE_SIZE  0xff
48 #define MTTY_IO_BAR_SIZE        0x8
49 #define MTTY_MMIO_BAR_SIZE      0x100000
50 
51 #define STORE_LE16(addr, val)   (*(u16 *)addr = val)
52 #define STORE_LE32(addr, val)   (*(u32 *)addr = val)
53 
54 #define MAX_FIFO_SIZE   16
55 
56 #define CIRCULAR_BUF_INC_IDX(idx)    (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
57 
58 #define MTTY_VFIO_PCI_OFFSET_SHIFT   40
59 
60 #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off)   (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
61 #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
62 				((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
63 #define MTTY_VFIO_PCI_OFFSET_MASK    \
64 				(((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
65 #define MAX_MTTYS	24
66 
67 /*
68  * Global Structures
69  */
70 
71 static const struct class mtty_class = {
72 	.name	= MTTY_CLASS_NAME
73 };
74 
75 static struct mtty_dev {
76 	dev_t		vd_devt;
77 	struct cdev	vd_cdev;
78 	struct idr	vd_idr;
79 	struct device	dev;
80 	struct mdev_parent parent;
81 } mtty_dev;
82 
83 struct mdev_region_info {
84 	u64 start;
85 	u64 phys_start;
86 	u32 size;
87 	u64 vfio_offset;
88 };
89 
90 #if defined(DEBUG_REGS)
91 static const char *wr_reg[] = {
92 	"TX",
93 	"IER",
94 	"FCR",
95 	"LCR",
96 	"MCR",
97 	"LSR",
98 	"MSR",
99 	"SCR"
100 };
101 
102 static const char *rd_reg[] = {
103 	"RX",
104 	"IER",
105 	"IIR",
106 	"LCR",
107 	"MCR",
108 	"LSR",
109 	"MSR",
110 	"SCR"
111 };
112 #endif
113 
114 /* loop back buffer */
115 struct rxtx {
116 	u8 fifo[MAX_FIFO_SIZE];
117 	u8 head, tail;
118 	u8 count;
119 };
120 
121 struct serial_port {
122 	u8 uart_reg[8];         /* 8 registers */
123 	struct rxtx rxtx;       /* loop back buffer */
124 	bool dlab;
125 	bool overrun;
126 	u16 divisor;
127 	u8 fcr;                 /* FIFO control register */
128 	u8 max_fifo_size;
129 	u8 intr_trigger_level;  /* interrupt trigger level */
130 };
131 
132 struct mtty_data {
133 	u64 magic;
134 #define MTTY_MAGIC 0x7e9d09898c3e2c4e /* Nothing clever, just random */
135 	u32 major_ver;
136 #define MTTY_MAJOR_VER 1
137 	u32 minor_ver;
138 #define MTTY_MINOR_VER 0
139 	u32 nr_ports;
140 	u32 flags;
141 	struct serial_port ports[2];
142 };
143 
144 struct mdev_state;
145 
146 struct mtty_migration_file {
147 	struct file *filp;
148 	struct mutex lock;
149 	struct mdev_state *mdev_state;
150 	struct mtty_data data;
151 	ssize_t filled_size;
152 	u8 disabled:1;
153 };
154 
155 /* State of each mdev device */
156 struct mdev_state {
157 	struct vfio_device vdev;
158 	struct eventfd_ctx *intx_evtfd;
159 	struct eventfd_ctx *msi_evtfd;
160 	int irq_index;
161 	u8 *vconfig;
162 	struct mutex ops_lock;
163 	struct mdev_device *mdev;
164 	struct mdev_region_info region_info[VFIO_PCI_NUM_REGIONS];
165 	u32 bar_mask[VFIO_PCI_NUM_REGIONS];
166 	struct list_head next;
167 	struct serial_port s[2];
168 	struct mutex rxtx_lock;
169 	struct vfio_device_info dev_info;
170 	int nr_ports;
171 	enum vfio_device_mig_state state;
172 	struct mutex state_mutex;
173 	struct mutex reset_mutex;
174 	struct mtty_migration_file *saving_migf;
175 	struct mtty_migration_file *resuming_migf;
176 	u8 deferred_reset:1;
177 	u8 intx_mask:1;
178 };
179 
180 static struct mtty_type {
181 	struct mdev_type type;
182 	int nr_ports;
183 } mtty_types[2] = {
184 	{ .nr_ports = 1, .type.sysfs_name = "1",
185 	  .type.pretty_name = "Single port serial" },
186 	{ .nr_ports = 2, .type.sysfs_name = "2",
187 	  .type.pretty_name = "Dual port serial" },
188 };
189 
190 static struct mdev_type *mtty_mdev_types[] = {
191 	&mtty_types[0].type,
192 	&mtty_types[1].type,
193 };
194 
195 static atomic_t mdev_avail_ports = ATOMIC_INIT(MAX_MTTYS);
196 
197 static const struct file_operations vd_fops = {
198 	.owner          = THIS_MODULE,
199 };
200 
201 static const struct vfio_device_ops mtty_dev_ops;
202 
203 /* Helper functions */
204 
205 static void dump_buffer(u8 *buf, uint32_t count)
206 {
207 #if defined(DEBUG)
208 	int i;
209 
210 	pr_info("Buffer:\n");
211 	for (i = 0; i < count; i++) {
212 		pr_info("%2x ", *(buf + i));
213 		if ((i + 1) % 16 == 0)
214 			pr_info("\n");
215 	}
216 #endif
217 }
218 
219 static bool is_intx(struct mdev_state *mdev_state)
220 {
221 	return mdev_state->irq_index == VFIO_PCI_INTX_IRQ_INDEX;
222 }
223 
224 static bool is_msi(struct mdev_state *mdev_state)
225 {
226 	return mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX;
227 }
228 
229 static bool is_noirq(struct mdev_state *mdev_state)
230 {
231 	return !is_intx(mdev_state) && !is_msi(mdev_state);
232 }
233 
234 static void mtty_trigger_interrupt(struct mdev_state *mdev_state)
235 {
236 	lockdep_assert_held(&mdev_state->ops_lock);
237 
238 	if (is_msi(mdev_state)) {
239 		if (mdev_state->msi_evtfd)
240 			eventfd_signal(mdev_state->msi_evtfd);
241 	} else if (is_intx(mdev_state)) {
242 		if (mdev_state->intx_evtfd && !mdev_state->intx_mask) {
243 			eventfd_signal(mdev_state->intx_evtfd);
244 			mdev_state->intx_mask = true;
245 		}
246 	}
247 }
248 
249 static void mtty_create_config_space(struct mdev_state *mdev_state)
250 {
251 	/* PCI dev ID */
252 	STORE_LE32((u32 *) &mdev_state->vconfig[0x0], 0x32534348);
253 
254 	/* Control: I/O+, Mem-, BusMaster- */
255 	STORE_LE16((u16 *) &mdev_state->vconfig[0x4], 0x0001);
256 
257 	/* Status: capabilities list absent */
258 	STORE_LE16((u16 *) &mdev_state->vconfig[0x6], 0x0200);
259 
260 	/* Rev ID */
261 	mdev_state->vconfig[0x8] =  0x10;
262 
263 	/* programming interface class : 16550-compatible serial controller */
264 	mdev_state->vconfig[0x9] =  0x02;
265 
266 	/* Sub class : 00 */
267 	mdev_state->vconfig[0xa] =  0x00;
268 
269 	/* Base class : Simple Communication controllers */
270 	mdev_state->vconfig[0xb] =  0x07;
271 
272 	/* base address registers */
273 	/* BAR0: IO space */
274 	STORE_LE32((u32 *) &mdev_state->vconfig[0x10], 0x000001);
275 	mdev_state->bar_mask[0] = ~(MTTY_IO_BAR_SIZE) + 1;
276 
277 	if (mdev_state->nr_ports == 2) {
278 		/* BAR1: IO space */
279 		STORE_LE32((u32 *) &mdev_state->vconfig[0x14], 0x000001);
280 		mdev_state->bar_mask[1] = ~(MTTY_IO_BAR_SIZE) + 1;
281 	}
282 
283 	/* Subsystem ID */
284 	STORE_LE32((u32 *) &mdev_state->vconfig[0x2c], 0x32534348);
285 
286 	mdev_state->vconfig[0x34] =  0x00;   /* Cap Ptr */
287 	mdev_state->vconfig[0x3d] =  0x01;   /* interrupt pin (INTA#) */
288 
289 	/* Vendor specific data */
290 	mdev_state->vconfig[0x40] =  0x23;
291 	mdev_state->vconfig[0x43] =  0x80;
292 	mdev_state->vconfig[0x44] =  0x23;
293 	mdev_state->vconfig[0x48] =  0x23;
294 	mdev_state->vconfig[0x4c] =  0x23;
295 
296 	mdev_state->vconfig[0x60] =  0x50;
297 	mdev_state->vconfig[0x61] =  0x43;
298 	mdev_state->vconfig[0x62] =  0x49;
299 	mdev_state->vconfig[0x63] =  0x20;
300 	mdev_state->vconfig[0x64] =  0x53;
301 	mdev_state->vconfig[0x65] =  0x65;
302 	mdev_state->vconfig[0x66] =  0x72;
303 	mdev_state->vconfig[0x67] =  0x69;
304 	mdev_state->vconfig[0x68] =  0x61;
305 	mdev_state->vconfig[0x69] =  0x6c;
306 	mdev_state->vconfig[0x6a] =  0x2f;
307 	mdev_state->vconfig[0x6b] =  0x55;
308 	mdev_state->vconfig[0x6c] =  0x41;
309 	mdev_state->vconfig[0x6d] =  0x52;
310 	mdev_state->vconfig[0x6e] =  0x54;
311 }
312 
313 static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset,
314 				 u8 *buf, u32 count)
315 {
316 	u32 cfg_addr, bar_mask, bar_index = 0;
317 
318 	switch (offset) {
319 	case 0x04: /* device control */
320 	case 0x06: /* device status */
321 		/* do nothing */
322 		break;
323 	case 0x3c:  /* interrupt line */
324 		mdev_state->vconfig[0x3c] = buf[0];
325 		break;
326 	case 0x3d:
327 		/*
328 		 * Interrupt Pin is hardwired to INTA.
329 		 * This field is write protected by hardware
330 		 */
331 		break;
332 	case 0x10:  /* BAR0 */
333 	case 0x14:  /* BAR1 */
334 		if (offset == 0x10)
335 			bar_index = 0;
336 		else if (offset == 0x14)
337 			bar_index = 1;
338 
339 		if ((mdev_state->nr_ports == 1) && (bar_index == 1)) {
340 			STORE_LE32(&mdev_state->vconfig[offset], 0);
341 			break;
342 		}
343 
344 		cfg_addr = *(u32 *)buf;
345 		pr_info("BAR%d addr 0x%x\n", bar_index, cfg_addr);
346 
347 		if (cfg_addr == 0xffffffff) {
348 			bar_mask = mdev_state->bar_mask[bar_index];
349 			cfg_addr = (cfg_addr & bar_mask);
350 		}
351 
352 		cfg_addr |= (mdev_state->vconfig[offset] & 0x3ul);
353 		STORE_LE32(&mdev_state->vconfig[offset], cfg_addr);
354 		break;
355 	case 0x18:  /* BAR2 */
356 	case 0x1c:  /* BAR3 */
357 	case 0x20:  /* BAR4 */
358 		STORE_LE32(&mdev_state->vconfig[offset], 0);
359 		break;
360 	default:
361 		pr_info("PCI config write @0x%x of %d bytes not handled\n",
362 			offset, count);
363 		break;
364 	}
365 }
366 
367 static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state,
368 				u16 offset, u8 *buf, u32 count)
369 {
370 	u8 data = *buf;
371 
372 	/* Handle data written by guest */
373 	switch (offset) {
374 	case UART_TX:
375 		/* if DLAB set, data is LSB of divisor */
376 		if (mdev_state->s[index].dlab) {
377 			mdev_state->s[index].divisor |= data;
378 			break;
379 		}
380 
381 		mutex_lock(&mdev_state->rxtx_lock);
382 
383 		/* save in TX buffer */
384 		if (mdev_state->s[index].rxtx.count <
385 				mdev_state->s[index].max_fifo_size) {
386 			mdev_state->s[index].rxtx.fifo[
387 					mdev_state->s[index].rxtx.head] = data;
388 			mdev_state->s[index].rxtx.count++;
389 			CIRCULAR_BUF_INC_IDX(mdev_state->s[index].rxtx.head);
390 			mdev_state->s[index].overrun = false;
391 
392 			/*
393 			 * Trigger interrupt if receive data interrupt is
394 			 * enabled and fifo reached trigger level
395 			 */
396 			if ((mdev_state->s[index].uart_reg[UART_IER] &
397 						UART_IER_RDI) &&
398 			   (mdev_state->s[index].rxtx.count ==
399 				    mdev_state->s[index].intr_trigger_level)) {
400 				/* trigger interrupt */
401 #if defined(DEBUG_INTR)
402 				pr_err("Serial port %d: Fifo level trigger\n",
403 					index);
404 #endif
405 				mtty_trigger_interrupt(mdev_state);
406 			}
407 		} else {
408 #if defined(DEBUG_INTR)
409 			pr_err("Serial port %d: Buffer Overflow\n", index);
410 #endif
411 			mdev_state->s[index].overrun = true;
412 
413 			/*
414 			 * Trigger interrupt if receiver line status interrupt
415 			 * is enabled
416 			 */
417 			if (mdev_state->s[index].uart_reg[UART_IER] &
418 								UART_IER_RLSI)
419 				mtty_trigger_interrupt(mdev_state);
420 		}
421 		mutex_unlock(&mdev_state->rxtx_lock);
422 		break;
423 
424 	case UART_IER:
425 		/* if DLAB set, data is MSB of divisor */
426 		if (mdev_state->s[index].dlab)
427 			mdev_state->s[index].divisor |= (u16)data << 8;
428 		else {
429 			mdev_state->s[index].uart_reg[offset] = data;
430 			mutex_lock(&mdev_state->rxtx_lock);
431 			if ((data & UART_IER_THRI) &&
432 			    (mdev_state->s[index].rxtx.head ==
433 					mdev_state->s[index].rxtx.tail)) {
434 #if defined(DEBUG_INTR)
435 				pr_err("Serial port %d: IER_THRI write\n",
436 					index);
437 #endif
438 				mtty_trigger_interrupt(mdev_state);
439 			}
440 
441 			mutex_unlock(&mdev_state->rxtx_lock);
442 		}
443 
444 		break;
445 
446 	case UART_FCR:
447 		mdev_state->s[index].fcr = data;
448 
449 		mutex_lock(&mdev_state->rxtx_lock);
450 		if (data & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT)) {
451 			/* clear loop back FIFO */
452 			mdev_state->s[index].rxtx.count = 0;
453 			mdev_state->s[index].rxtx.head = 0;
454 			mdev_state->s[index].rxtx.tail = 0;
455 		}
456 		mutex_unlock(&mdev_state->rxtx_lock);
457 
458 		switch (data & UART_FCR_TRIGGER_MASK) {
459 		case UART_FCR_TRIGGER_1:
460 			mdev_state->s[index].intr_trigger_level = 1;
461 			break;
462 
463 		case UART_FCR_TRIGGER_4:
464 			mdev_state->s[index].intr_trigger_level = 4;
465 			break;
466 
467 		case UART_FCR_TRIGGER_8:
468 			mdev_state->s[index].intr_trigger_level = 8;
469 			break;
470 
471 		case UART_FCR_TRIGGER_14:
472 			mdev_state->s[index].intr_trigger_level = 14;
473 			break;
474 		}
475 
476 		/*
477 		 * Set trigger level to 1 otherwise or  implement timer with
478 		 * timeout of 4 characters and on expiring that timer set
479 		 * Recevice data timeout in IIR register
480 		 */
481 		mdev_state->s[index].intr_trigger_level = 1;
482 		if (data & UART_FCR_ENABLE_FIFO)
483 			mdev_state->s[index].max_fifo_size = MAX_FIFO_SIZE;
484 		else {
485 			mdev_state->s[index].max_fifo_size = 1;
486 			mdev_state->s[index].intr_trigger_level = 1;
487 		}
488 
489 		break;
490 
491 	case UART_LCR:
492 		if (data & UART_LCR_DLAB) {
493 			mdev_state->s[index].dlab = true;
494 			mdev_state->s[index].divisor = 0;
495 		} else
496 			mdev_state->s[index].dlab = false;
497 
498 		mdev_state->s[index].uart_reg[offset] = data;
499 		break;
500 
501 	case UART_MCR:
502 		mdev_state->s[index].uart_reg[offset] = data;
503 
504 		if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) &&
505 				(data & UART_MCR_OUT2)) {
506 #if defined(DEBUG_INTR)
507 			pr_err("Serial port %d: MCR_OUT2 write\n", index);
508 #endif
509 			mtty_trigger_interrupt(mdev_state);
510 		}
511 
512 		if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) &&
513 				(data & (UART_MCR_RTS | UART_MCR_DTR))) {
514 #if defined(DEBUG_INTR)
515 			pr_err("Serial port %d: MCR RTS/DTR write\n", index);
516 #endif
517 			mtty_trigger_interrupt(mdev_state);
518 		}
519 		break;
520 
521 	case UART_LSR:
522 	case UART_MSR:
523 		/* do nothing */
524 		break;
525 
526 	case UART_SCR:
527 		mdev_state->s[index].uart_reg[offset] = data;
528 		break;
529 
530 	default:
531 		break;
532 	}
533 }
534 
535 static void handle_bar_read(unsigned int index, struct mdev_state *mdev_state,
536 			    u16 offset, u8 *buf, u32 count)
537 {
538 	/* Handle read requests by guest */
539 	switch (offset) {
540 	case UART_RX:
541 		/* if DLAB set, data is LSB of divisor */
542 		if (mdev_state->s[index].dlab) {
543 			*buf  = (u8)mdev_state->s[index].divisor;
544 			break;
545 		}
546 
547 		mutex_lock(&mdev_state->rxtx_lock);
548 		/* return data in tx buffer */
549 		if (mdev_state->s[index].rxtx.head !=
550 				 mdev_state->s[index].rxtx.tail) {
551 			*buf = mdev_state->s[index].rxtx.fifo[
552 						mdev_state->s[index].rxtx.tail];
553 			mdev_state->s[index].rxtx.count--;
554 			CIRCULAR_BUF_INC_IDX(mdev_state->s[index].rxtx.tail);
555 		}
556 
557 		if (mdev_state->s[index].rxtx.head ==
558 				mdev_state->s[index].rxtx.tail) {
559 		/*
560 		 *  Trigger interrupt if tx buffer empty interrupt is
561 		 *  enabled and fifo is empty
562 		 */
563 #if defined(DEBUG_INTR)
564 			pr_err("Serial port %d: Buffer Empty\n", index);
565 #endif
566 			if (mdev_state->s[index].uart_reg[UART_IER] &
567 							 UART_IER_THRI)
568 				mtty_trigger_interrupt(mdev_state);
569 		}
570 		mutex_unlock(&mdev_state->rxtx_lock);
571 
572 		break;
573 
574 	case UART_IER:
575 		if (mdev_state->s[index].dlab) {
576 			*buf = (u8)(mdev_state->s[index].divisor >> 8);
577 			break;
578 		}
579 		*buf = mdev_state->s[index].uart_reg[offset] & 0x0f;
580 		break;
581 
582 	case UART_IIR:
583 	{
584 		u8 ier = mdev_state->s[index].uart_reg[UART_IER];
585 		*buf = 0;
586 
587 		mutex_lock(&mdev_state->rxtx_lock);
588 		/* Interrupt priority 1: Parity, overrun, framing or break */
589 		if ((ier & UART_IER_RLSI) && mdev_state->s[index].overrun)
590 			*buf |= UART_IIR_RLSI;
591 
592 		/* Interrupt priority 2: Fifo trigger level reached */
593 		if ((ier & UART_IER_RDI) &&
594 		    (mdev_state->s[index].rxtx.count >=
595 		      mdev_state->s[index].intr_trigger_level))
596 			*buf |= UART_IIR_RDI;
597 
598 		/* Interrupt priotiry 3: transmitter holding register empty */
599 		if ((ier & UART_IER_THRI) &&
600 		    (mdev_state->s[index].rxtx.head ==
601 				mdev_state->s[index].rxtx.tail))
602 			*buf |= UART_IIR_THRI;
603 
604 		/* Interrupt priotiry 4: Modem status: CTS, DSR, RI or DCD  */
605 		if ((ier & UART_IER_MSI) &&
606 		    (mdev_state->s[index].uart_reg[UART_MCR] &
607 				 (UART_MCR_RTS | UART_MCR_DTR)))
608 			*buf |= UART_IIR_MSI;
609 
610 		/* bit0: 0=> interrupt pending, 1=> no interrupt is pending */
611 		if (*buf == 0)
612 			*buf = UART_IIR_NO_INT;
613 
614 		/* set bit 6 & 7 to be 16550 compatible */
615 		*buf |= 0xC0;
616 		mutex_unlock(&mdev_state->rxtx_lock);
617 	}
618 	break;
619 
620 	case UART_LCR:
621 	case UART_MCR:
622 		*buf = mdev_state->s[index].uart_reg[offset];
623 		break;
624 
625 	case UART_LSR:
626 	{
627 		u8 lsr = 0;
628 
629 		mutex_lock(&mdev_state->rxtx_lock);
630 		/* at least one char in FIFO */
631 		if (mdev_state->s[index].rxtx.head !=
632 				 mdev_state->s[index].rxtx.tail)
633 			lsr |= UART_LSR_DR;
634 
635 		/* if FIFO overrun */
636 		if (mdev_state->s[index].overrun)
637 			lsr |= UART_LSR_OE;
638 
639 		/* transmit FIFO empty and tramsitter empty */
640 		if (mdev_state->s[index].rxtx.head ==
641 				 mdev_state->s[index].rxtx.tail)
642 			lsr |= UART_LSR_TEMT | UART_LSR_THRE;
643 
644 		mutex_unlock(&mdev_state->rxtx_lock);
645 		*buf = lsr;
646 		break;
647 	}
648 	case UART_MSR:
649 		*buf = UART_MSR_DSR | UART_MSR_DDSR | UART_MSR_DCD;
650 
651 		mutex_lock(&mdev_state->rxtx_lock);
652 		/* if AFE is 1 and FIFO have space, set CTS bit */
653 		if (mdev_state->s[index].uart_reg[UART_MCR] &
654 						 UART_MCR_AFE) {
655 			if (mdev_state->s[index].rxtx.count <
656 					mdev_state->s[index].max_fifo_size)
657 				*buf |= UART_MSR_CTS | UART_MSR_DCTS;
658 		} else
659 			*buf |= UART_MSR_CTS | UART_MSR_DCTS;
660 		mutex_unlock(&mdev_state->rxtx_lock);
661 
662 		break;
663 
664 	case UART_SCR:
665 		*buf = mdev_state->s[index].uart_reg[offset];
666 		break;
667 
668 	default:
669 		break;
670 	}
671 }
672 
673 static void mdev_read_base(struct mdev_state *mdev_state)
674 {
675 	int index, pos;
676 	u32 start_lo, start_hi;
677 	u32 mem_type;
678 
679 	pos = PCI_BASE_ADDRESS_0;
680 
681 	for (index = 0; index <= VFIO_PCI_BAR5_REGION_INDEX; index++) {
682 
683 		if (!mdev_state->region_info[index].size)
684 			continue;
685 
686 		start_lo = (*(u32 *)(mdev_state->vconfig + pos)) &
687 			PCI_BASE_ADDRESS_MEM_MASK;
688 		mem_type = (*(u32 *)(mdev_state->vconfig + pos)) &
689 			PCI_BASE_ADDRESS_MEM_TYPE_MASK;
690 
691 		switch (mem_type) {
692 		case PCI_BASE_ADDRESS_MEM_TYPE_64:
693 			start_hi = (*(u32 *)(mdev_state->vconfig + pos + 4));
694 			pos += 4;
695 			break;
696 		case PCI_BASE_ADDRESS_MEM_TYPE_32:
697 		case PCI_BASE_ADDRESS_MEM_TYPE_1M:
698 			/* 1M mem BAR treated as 32-bit BAR */
699 		default:
700 			/* mem unknown type treated as 32-bit BAR */
701 			start_hi = 0;
702 			break;
703 		}
704 		pos += 4;
705 		mdev_state->region_info[index].start = ((u64)start_hi << 32) |
706 							start_lo;
707 	}
708 }
709 
710 static ssize_t mdev_access(struct mdev_state *mdev_state, u8 *buf, size_t count,
711 			   loff_t pos, bool is_write)
712 {
713 	unsigned int index;
714 	loff_t offset;
715 	int ret = 0;
716 
717 	if (!buf)
718 		return -EINVAL;
719 
720 	mutex_lock(&mdev_state->ops_lock);
721 
722 	index = MTTY_VFIO_PCI_OFFSET_TO_INDEX(pos);
723 	offset = pos & MTTY_VFIO_PCI_OFFSET_MASK;
724 	switch (index) {
725 	case VFIO_PCI_CONFIG_REGION_INDEX:
726 
727 #if defined(DEBUG)
728 		pr_info("%s: PCI config space %s at offset 0x%llx\n",
729 			 __func__, is_write ? "write" : "read", offset);
730 #endif
731 		if (is_write) {
732 			dump_buffer(buf, count);
733 			handle_pci_cfg_write(mdev_state, offset, buf, count);
734 		} else {
735 			memcpy(buf, (mdev_state->vconfig + offset), count);
736 			dump_buffer(buf, count);
737 		}
738 
739 		break;
740 
741 	case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
742 		if (!mdev_state->region_info[index].start)
743 			mdev_read_base(mdev_state);
744 
745 		if (is_write) {
746 			dump_buffer(buf, count);
747 
748 #if defined(DEBUG_REGS)
749 			pr_info("%s: BAR%d  WR @0x%llx %s val:0x%02x dlab:%d\n",
750 				__func__, index, offset, wr_reg[offset],
751 				*buf, mdev_state->s[index].dlab);
752 #endif
753 			handle_bar_write(index, mdev_state, offset, buf, count);
754 		} else {
755 			handle_bar_read(index, mdev_state, offset, buf, count);
756 			dump_buffer(buf, count);
757 
758 #if defined(DEBUG_REGS)
759 			pr_info("%s: BAR%d  RD @0x%llx %s val:0x%02x dlab:%d\n",
760 				__func__, index, offset, rd_reg[offset],
761 				*buf, mdev_state->s[index].dlab);
762 #endif
763 		}
764 		break;
765 
766 	default:
767 		ret = -1;
768 		goto accessfailed;
769 	}
770 
771 	ret = count;
772 
773 
774 accessfailed:
775 	mutex_unlock(&mdev_state->ops_lock);
776 
777 	return ret;
778 }
779 
780 static size_t mtty_data_size(struct mdev_state *mdev_state)
781 {
782 	return offsetof(struct mtty_data, ports) +
783 		(mdev_state->nr_ports * sizeof(struct serial_port));
784 }
785 
786 static void mtty_disable_file(struct mtty_migration_file *migf)
787 {
788 	mutex_lock(&migf->lock);
789 	migf->disabled = true;
790 	migf->filled_size = 0;
791 	migf->filp->f_pos = 0;
792 	mutex_unlock(&migf->lock);
793 }
794 
795 static void mtty_disable_files(struct mdev_state *mdev_state)
796 {
797 	if (mdev_state->saving_migf) {
798 		mtty_disable_file(mdev_state->saving_migf);
799 		fput(mdev_state->saving_migf->filp);
800 		mdev_state->saving_migf = NULL;
801 	}
802 
803 	if (mdev_state->resuming_migf) {
804 		mtty_disable_file(mdev_state->resuming_migf);
805 		fput(mdev_state->resuming_migf->filp);
806 		mdev_state->resuming_migf = NULL;
807 	}
808 }
809 
810 static void mtty_state_mutex_unlock(struct mdev_state *mdev_state)
811 {
812 again:
813 	mutex_lock(&mdev_state->reset_mutex);
814 	if (mdev_state->deferred_reset) {
815 		mdev_state->deferred_reset = false;
816 		mutex_unlock(&mdev_state->reset_mutex);
817 		mdev_state->state = VFIO_DEVICE_STATE_RUNNING;
818 		mtty_disable_files(mdev_state);
819 		goto again;
820 	}
821 	mutex_unlock(&mdev_state->state_mutex);
822 	mutex_unlock(&mdev_state->reset_mutex);
823 }
824 
825 static int mtty_release_migf(struct inode *inode, struct file *filp)
826 {
827 	struct mtty_migration_file *migf = filp->private_data;
828 
829 	mtty_disable_file(migf);
830 	mutex_destroy(&migf->lock);
831 	kfree(migf);
832 
833 	return 0;
834 }
835 
836 static long mtty_precopy_ioctl(struct file *filp, unsigned int cmd,
837 			       unsigned long arg)
838 {
839 	struct mtty_migration_file *migf = filp->private_data;
840 	struct mdev_state *mdev_state = migf->mdev_state;
841 	loff_t *pos = &filp->f_pos;
842 	struct vfio_precopy_info info = {};
843 	int ret;
844 
845 	ret = vfio_check_precopy_ioctl(&mdev_state->vdev, cmd, arg, &info);
846 	if (ret)
847 		return ret;
848 
849 	mutex_lock(&mdev_state->state_mutex);
850 	if (mdev_state->state != VFIO_DEVICE_STATE_PRE_COPY &&
851 	    mdev_state->state != VFIO_DEVICE_STATE_PRE_COPY_P2P) {
852 		ret = -EINVAL;
853 		goto unlock;
854 	}
855 
856 	mutex_lock(&migf->lock);
857 
858 	if (migf->disabled) {
859 		mutex_unlock(&migf->lock);
860 		ret = -ENODEV;
861 		goto unlock;
862 	}
863 
864 	if (*pos > migf->filled_size) {
865 		mutex_unlock(&migf->lock);
866 		ret = -EINVAL;
867 		goto unlock;
868 	}
869 
870 	info.dirty_bytes = 0;
871 	info.initial_bytes = migf->filled_size - *pos;
872 	mutex_unlock(&migf->lock);
873 
874 	ret = copy_to_user((void __user *)arg, &info,
875 		offsetofend(struct vfio_precopy_info, dirty_bytes)) ? -EFAULT : 0;
876 unlock:
877 	mtty_state_mutex_unlock(mdev_state);
878 	return ret;
879 }
880 
881 static ssize_t mtty_save_read(struct file *filp, char __user *buf,
882 			      size_t len, loff_t *pos)
883 {
884 	struct mtty_migration_file *migf = filp->private_data;
885 	ssize_t ret = 0;
886 
887 	if (pos)
888 		return -ESPIPE;
889 
890 	pos = &filp->f_pos;
891 
892 	mutex_lock(&migf->lock);
893 
894 	dev_dbg(migf->mdev_state->vdev.dev, "%s ask %zu\n", __func__, len);
895 
896 	if (migf->disabled) {
897 		ret = -ENODEV;
898 		goto out_unlock;
899 	}
900 
901 	if (*pos > migf->filled_size) {
902 		ret = -EINVAL;
903 		goto out_unlock;
904 	}
905 
906 	len = min_t(size_t, migf->filled_size - *pos, len);
907 	if (len) {
908 		if (copy_to_user(buf, (void *)&migf->data + *pos, len)) {
909 			ret = -EFAULT;
910 			goto out_unlock;
911 		}
912 		*pos += len;
913 		ret = len;
914 	}
915 out_unlock:
916 	dev_dbg(migf->mdev_state->vdev.dev, "%s read %zu\n", __func__, ret);
917 	mutex_unlock(&migf->lock);
918 	return ret;
919 }
920 
921 static const struct file_operations mtty_save_fops = {
922 	.owner = THIS_MODULE,
923 	.read = mtty_save_read,
924 	.unlocked_ioctl = mtty_precopy_ioctl,
925 	.compat_ioctl = compat_ptr_ioctl,
926 	.release = mtty_release_migf,
927 };
928 
929 static void mtty_save_state(struct mdev_state *mdev_state)
930 {
931 	struct mtty_migration_file *migf = mdev_state->saving_migf;
932 	int i;
933 
934 	mutex_lock(&migf->lock);
935 	for (i = 0; i < mdev_state->nr_ports; i++) {
936 		memcpy(&migf->data.ports[i],
937 			&mdev_state->s[i], sizeof(struct serial_port));
938 		migf->filled_size += sizeof(struct serial_port);
939 	}
940 	dev_dbg(mdev_state->vdev.dev,
941 		"%s filled to %zu\n", __func__, migf->filled_size);
942 	mutex_unlock(&migf->lock);
943 }
944 
945 static int mtty_load_state(struct mdev_state *mdev_state)
946 {
947 	struct mtty_migration_file *migf = mdev_state->resuming_migf;
948 	int i;
949 
950 	mutex_lock(&migf->lock);
951 	/* magic and version already tested by resume write fn */
952 	if (migf->filled_size < mtty_data_size(mdev_state)) {
953 		dev_dbg(mdev_state->vdev.dev, "%s expected %zu, got %zu\n",
954 			__func__, mtty_data_size(mdev_state),
955 			migf->filled_size);
956 		mutex_unlock(&migf->lock);
957 		return -EINVAL;
958 	}
959 
960 	for (i = 0; i < mdev_state->nr_ports; i++)
961 		memcpy(&mdev_state->s[i],
962 		       &migf->data.ports[i], sizeof(struct serial_port));
963 
964 	mutex_unlock(&migf->lock);
965 	return 0;
966 }
967 
968 static struct mtty_migration_file *
969 mtty_save_device_data(struct mdev_state *mdev_state,
970 		      enum vfio_device_mig_state state)
971 {
972 	struct mtty_migration_file *migf = mdev_state->saving_migf;
973 	struct mtty_migration_file *ret = NULL;
974 
975 	if (migf) {
976 		if (state == VFIO_DEVICE_STATE_STOP_COPY)
977 			goto fill_data;
978 		return ret;
979 	}
980 
981 	migf = kzalloc(sizeof(*migf), GFP_KERNEL_ACCOUNT);
982 	if (!migf)
983 		return ERR_PTR(-ENOMEM);
984 
985 	migf->filp = anon_inode_getfile("mtty_mig", &mtty_save_fops,
986 					migf, O_RDONLY);
987 	if (IS_ERR(migf->filp)) {
988 		int rc = PTR_ERR(migf->filp);
989 
990 		kfree(migf);
991 		return ERR_PTR(rc);
992 	}
993 
994 	stream_open(migf->filp->f_inode, migf->filp);
995 	mutex_init(&migf->lock);
996 	migf->mdev_state = mdev_state;
997 
998 	migf->data.magic = MTTY_MAGIC;
999 	migf->data.major_ver = MTTY_MAJOR_VER;
1000 	migf->data.minor_ver = MTTY_MINOR_VER;
1001 	migf->data.nr_ports = mdev_state->nr_ports;
1002 
1003 	migf->filled_size = offsetof(struct mtty_data, ports);
1004 
1005 	dev_dbg(mdev_state->vdev.dev, "%s filled header to %zu\n",
1006 		__func__, migf->filled_size);
1007 
1008 	ret = mdev_state->saving_migf = migf;
1009 
1010 fill_data:
1011 	if (state == VFIO_DEVICE_STATE_STOP_COPY)
1012 		mtty_save_state(mdev_state);
1013 
1014 	return ret;
1015 }
1016 
1017 static ssize_t mtty_resume_write(struct file *filp, const char __user *buf,
1018 				 size_t len, loff_t *pos)
1019 {
1020 	struct mtty_migration_file *migf = filp->private_data;
1021 	struct mdev_state *mdev_state = migf->mdev_state;
1022 	loff_t requested_length;
1023 	ssize_t ret = 0;
1024 
1025 	if (pos)
1026 		return -ESPIPE;
1027 
1028 	pos = &filp->f_pos;
1029 
1030 	if (*pos < 0 ||
1031 	    check_add_overflow((loff_t)len, *pos, &requested_length))
1032 		return -EINVAL;
1033 
1034 	if (requested_length > mtty_data_size(mdev_state))
1035 		return -ENOMEM;
1036 
1037 	mutex_lock(&migf->lock);
1038 
1039 	if (migf->disabled) {
1040 		ret = -ENODEV;
1041 		goto out_unlock;
1042 	}
1043 
1044 	if (copy_from_user((void *)&migf->data + *pos, buf, len)) {
1045 		ret = -EFAULT;
1046 		goto out_unlock;
1047 	}
1048 
1049 	*pos += len;
1050 	ret = len;
1051 
1052 	dev_dbg(migf->mdev_state->vdev.dev, "%s received %zu, total %zu\n",
1053 		__func__, len, migf->filled_size + len);
1054 
1055 	if (migf->filled_size < offsetof(struct mtty_data, ports) &&
1056 	    migf->filled_size + len >= offsetof(struct mtty_data, ports)) {
1057 		if (migf->data.magic != MTTY_MAGIC || migf->data.flags ||
1058 		    migf->data.major_ver != MTTY_MAJOR_VER ||
1059 		    migf->data.minor_ver != MTTY_MINOR_VER ||
1060 		    migf->data.nr_ports != mdev_state->nr_ports) {
1061 			dev_dbg(migf->mdev_state->vdev.dev,
1062 				"%s failed validation\n", __func__);
1063 			ret = -EFAULT;
1064 		} else {
1065 			dev_dbg(migf->mdev_state->vdev.dev,
1066 				"%s header validated\n", __func__);
1067 		}
1068 	}
1069 
1070 	migf->filled_size += len;
1071 
1072 out_unlock:
1073 	mutex_unlock(&migf->lock);
1074 	return ret;
1075 }
1076 
1077 static const struct file_operations mtty_resume_fops = {
1078 	.owner = THIS_MODULE,
1079 	.write = mtty_resume_write,
1080 	.release = mtty_release_migf,
1081 };
1082 
1083 static struct mtty_migration_file *
1084 mtty_resume_device_data(struct mdev_state *mdev_state)
1085 {
1086 	struct mtty_migration_file *migf;
1087 	int ret;
1088 
1089 	migf = kzalloc(sizeof(*migf), GFP_KERNEL_ACCOUNT);
1090 	if (!migf)
1091 		return ERR_PTR(-ENOMEM);
1092 
1093 	migf->filp = anon_inode_getfile("mtty_mig", &mtty_resume_fops,
1094 					migf, O_WRONLY);
1095 	if (IS_ERR(migf->filp)) {
1096 		ret = PTR_ERR(migf->filp);
1097 		kfree(migf);
1098 		return ERR_PTR(ret);
1099 	}
1100 
1101 	stream_open(migf->filp->f_inode, migf->filp);
1102 	mutex_init(&migf->lock);
1103 	migf->mdev_state = mdev_state;
1104 
1105 	mdev_state->resuming_migf = migf;
1106 
1107 	return migf;
1108 }
1109 
1110 static struct file *mtty_step_state(struct mdev_state *mdev_state,
1111 				     enum vfio_device_mig_state new)
1112 {
1113 	enum vfio_device_mig_state cur = mdev_state->state;
1114 
1115 	dev_dbg(mdev_state->vdev.dev, "%s: %d -> %d\n", __func__, cur, new);
1116 
1117 	/*
1118 	 * The following state transitions are no-op considering
1119 	 * mtty does not do DMA nor require any explicit start/stop.
1120 	 *
1121 	 *         RUNNING -> RUNNING_P2P
1122 	 *         RUNNING_P2P -> RUNNING
1123 	 *         RUNNING_P2P -> STOP
1124 	 *         PRE_COPY -> PRE_COPY_P2P
1125 	 *         PRE_COPY_P2P -> PRE_COPY
1126 	 *         STOP -> RUNNING_P2P
1127 	 */
1128 	if ((cur == VFIO_DEVICE_STATE_RUNNING &&
1129 	     new == VFIO_DEVICE_STATE_RUNNING_P2P) ||
1130 	    (cur == VFIO_DEVICE_STATE_RUNNING_P2P &&
1131 	     (new == VFIO_DEVICE_STATE_RUNNING ||
1132 	      new == VFIO_DEVICE_STATE_STOP)) ||
1133 	    (cur == VFIO_DEVICE_STATE_PRE_COPY &&
1134 	     new == VFIO_DEVICE_STATE_PRE_COPY_P2P) ||
1135 	    (cur == VFIO_DEVICE_STATE_PRE_COPY_P2P &&
1136 	     new == VFIO_DEVICE_STATE_PRE_COPY) ||
1137 	    (cur == VFIO_DEVICE_STATE_STOP &&
1138 	     new == VFIO_DEVICE_STATE_RUNNING_P2P))
1139 		return NULL;
1140 
1141 	/*
1142 	 * The following state transitions simply close migration files,
1143 	 * with the exception of RESUMING -> STOP, which needs to load
1144 	 * the state first.
1145 	 *
1146 	 *         RESUMING -> STOP
1147 	 *         PRE_COPY -> RUNNING
1148 	 *         PRE_COPY_P2P -> RUNNING_P2P
1149 	 *         STOP_COPY -> STOP
1150 	 */
1151 	if (cur == VFIO_DEVICE_STATE_RESUMING &&
1152 	    new == VFIO_DEVICE_STATE_STOP) {
1153 		int ret;
1154 
1155 		ret = mtty_load_state(mdev_state);
1156 		if (ret)
1157 			return ERR_PTR(ret);
1158 		mtty_disable_files(mdev_state);
1159 		return NULL;
1160 	}
1161 
1162 	if ((cur == VFIO_DEVICE_STATE_PRE_COPY &&
1163 	     new == VFIO_DEVICE_STATE_RUNNING) ||
1164 	    (cur == VFIO_DEVICE_STATE_PRE_COPY_P2P &&
1165 	     new == VFIO_DEVICE_STATE_RUNNING_P2P) ||
1166 	    (cur == VFIO_DEVICE_STATE_STOP_COPY &&
1167 	     new == VFIO_DEVICE_STATE_STOP)) {
1168 		mtty_disable_files(mdev_state);
1169 		return NULL;
1170 	}
1171 
1172 	/*
1173 	 * The following state transitions return migration files.
1174 	 *
1175 	 *         RUNNING -> PRE_COPY
1176 	 *         RUNNING_P2P -> PRE_COPY_P2P
1177 	 *         STOP -> STOP_COPY
1178 	 *         STOP -> RESUMING
1179 	 *         PRE_COPY_P2P -> STOP_COPY
1180 	 */
1181 	if ((cur == VFIO_DEVICE_STATE_RUNNING &&
1182 	     new == VFIO_DEVICE_STATE_PRE_COPY) ||
1183 	    (cur == VFIO_DEVICE_STATE_RUNNING_P2P &&
1184 	     new == VFIO_DEVICE_STATE_PRE_COPY_P2P) ||
1185 	    (cur == VFIO_DEVICE_STATE_STOP &&
1186 	     new == VFIO_DEVICE_STATE_STOP_COPY) ||
1187 	    (cur == VFIO_DEVICE_STATE_PRE_COPY_P2P &&
1188 	     new == VFIO_DEVICE_STATE_STOP_COPY)) {
1189 		struct mtty_migration_file *migf;
1190 
1191 		migf = mtty_save_device_data(mdev_state, new);
1192 		if (IS_ERR(migf))
1193 			return ERR_CAST(migf);
1194 
1195 		if (migf) {
1196 			get_file(migf->filp);
1197 
1198 			return migf->filp;
1199 		}
1200 		return NULL;
1201 	}
1202 
1203 	if (cur == VFIO_DEVICE_STATE_STOP &&
1204 	    new == VFIO_DEVICE_STATE_RESUMING) {
1205 		struct mtty_migration_file *migf;
1206 
1207 		migf = mtty_resume_device_data(mdev_state);
1208 		if (IS_ERR(migf))
1209 			return ERR_CAST(migf);
1210 
1211 		get_file(migf->filp);
1212 
1213 		return migf->filp;
1214 	}
1215 
1216 	/* vfio_mig_get_next_state() does not use arcs other than the above */
1217 	WARN_ON(true);
1218 	return ERR_PTR(-EINVAL);
1219 }
1220 
1221 static struct file *mtty_set_state(struct vfio_device *vdev,
1222 				   enum vfio_device_mig_state new_state)
1223 {
1224 	struct mdev_state *mdev_state =
1225 		container_of(vdev, struct mdev_state, vdev);
1226 	struct file *ret = NULL;
1227 
1228 	dev_dbg(vdev->dev, "%s -> %d\n", __func__, new_state);
1229 
1230 	mutex_lock(&mdev_state->state_mutex);
1231 	while (mdev_state->state != new_state) {
1232 		enum vfio_device_mig_state next_state;
1233 		int rc = vfio_mig_get_next_state(vdev, mdev_state->state,
1234 						 new_state, &next_state);
1235 		if (rc) {
1236 			ret = ERR_PTR(rc);
1237 			break;
1238 		}
1239 
1240 		ret = mtty_step_state(mdev_state, next_state);
1241 		if (IS_ERR(ret))
1242 			break;
1243 
1244 		mdev_state->state = next_state;
1245 
1246 		if (WARN_ON(ret && new_state != next_state)) {
1247 			fput(ret);
1248 			ret = ERR_PTR(-EINVAL);
1249 			break;
1250 		}
1251 	}
1252 	mtty_state_mutex_unlock(mdev_state);
1253 	return ret;
1254 }
1255 
1256 static int mtty_get_state(struct vfio_device *vdev,
1257 			  enum vfio_device_mig_state *current_state)
1258 {
1259 	struct mdev_state *mdev_state =
1260 		container_of(vdev, struct mdev_state, vdev);
1261 
1262 	mutex_lock(&mdev_state->state_mutex);
1263 	*current_state = mdev_state->state;
1264 	mtty_state_mutex_unlock(mdev_state);
1265 	return 0;
1266 }
1267 
1268 static int mtty_get_data_size(struct vfio_device *vdev,
1269 			      unsigned long *stop_copy_length)
1270 {
1271 	struct mdev_state *mdev_state =
1272 		container_of(vdev, struct mdev_state, vdev);
1273 
1274 	*stop_copy_length = mtty_data_size(mdev_state);
1275 	return 0;
1276 }
1277 
1278 static const struct vfio_migration_ops mtty_migration_ops = {
1279 	.migration_set_state = mtty_set_state,
1280 	.migration_get_state = mtty_get_state,
1281 	.migration_get_data_size = mtty_get_data_size,
1282 };
1283 
1284 static int mtty_log_start(struct vfio_device *vdev,
1285 			  struct rb_root_cached *ranges,
1286 			  u32 nnodes, u64 *page_size)
1287 {
1288 	return 0;
1289 }
1290 
1291 static int mtty_log_stop(struct vfio_device *vdev)
1292 {
1293 	return 0;
1294 }
1295 
1296 static int mtty_log_read_and_clear(struct vfio_device *vdev,
1297 				   unsigned long iova, unsigned long length,
1298 				   struct iova_bitmap *dirty)
1299 {
1300 	return 0;
1301 }
1302 
1303 static const struct vfio_log_ops mtty_log_ops = {
1304 	.log_start = mtty_log_start,
1305 	.log_stop = mtty_log_stop,
1306 	.log_read_and_clear = mtty_log_read_and_clear,
1307 };
1308 
1309 static int mtty_init_dev(struct vfio_device *vdev)
1310 {
1311 	struct mdev_state *mdev_state =
1312 		container_of(vdev, struct mdev_state, vdev);
1313 	struct mdev_device *mdev = to_mdev_device(vdev->dev);
1314 	struct mtty_type *type =
1315 		container_of(mdev->type, struct mtty_type, type);
1316 	int avail_ports = atomic_read(&mdev_avail_ports);
1317 	int ret;
1318 
1319 	do {
1320 		if (avail_ports < type->nr_ports)
1321 			return -ENOSPC;
1322 	} while (!atomic_try_cmpxchg(&mdev_avail_ports,
1323 				     &avail_ports,
1324 				     avail_ports - type->nr_ports));
1325 
1326 	mdev_state->nr_ports = type->nr_ports;
1327 	mdev_state->irq_index = -1;
1328 	mdev_state->s[0].max_fifo_size = MAX_FIFO_SIZE;
1329 	mdev_state->s[1].max_fifo_size = MAX_FIFO_SIZE;
1330 	mutex_init(&mdev_state->rxtx_lock);
1331 
1332 	mdev_state->vconfig = kzalloc(MTTY_CONFIG_SPACE_SIZE, GFP_KERNEL);
1333 	if (!mdev_state->vconfig) {
1334 		ret = -ENOMEM;
1335 		goto err_nr_ports;
1336 	}
1337 
1338 	mutex_init(&mdev_state->ops_lock);
1339 	mdev_state->mdev = mdev;
1340 	mtty_create_config_space(mdev_state);
1341 
1342 	mutex_init(&mdev_state->state_mutex);
1343 	mutex_init(&mdev_state->reset_mutex);
1344 	vdev->migration_flags = VFIO_MIGRATION_STOP_COPY |
1345 				VFIO_MIGRATION_P2P |
1346 				VFIO_MIGRATION_PRE_COPY;
1347 	vdev->mig_ops = &mtty_migration_ops;
1348 	vdev->log_ops = &mtty_log_ops;
1349 	mdev_state->state = VFIO_DEVICE_STATE_RUNNING;
1350 
1351 	return 0;
1352 
1353 err_nr_ports:
1354 	atomic_add(type->nr_ports, &mdev_avail_ports);
1355 	return ret;
1356 }
1357 
1358 static int mtty_probe(struct mdev_device *mdev)
1359 {
1360 	struct mdev_state *mdev_state;
1361 	int ret;
1362 
1363 	mdev_state = vfio_alloc_device(mdev_state, vdev, &mdev->dev,
1364 				       &mtty_dev_ops);
1365 	if (IS_ERR(mdev_state))
1366 		return PTR_ERR(mdev_state);
1367 
1368 	ret = vfio_register_emulated_iommu_dev(&mdev_state->vdev);
1369 	if (ret)
1370 		goto err_put_vdev;
1371 	dev_set_drvdata(&mdev->dev, mdev_state);
1372 	return 0;
1373 
1374 err_put_vdev:
1375 	vfio_put_device(&mdev_state->vdev);
1376 	return ret;
1377 }
1378 
1379 static void mtty_release_dev(struct vfio_device *vdev)
1380 {
1381 	struct mdev_state *mdev_state =
1382 		container_of(vdev, struct mdev_state, vdev);
1383 
1384 	mutex_destroy(&mdev_state->reset_mutex);
1385 	mutex_destroy(&mdev_state->state_mutex);
1386 	atomic_add(mdev_state->nr_ports, &mdev_avail_ports);
1387 	kfree(mdev_state->vconfig);
1388 }
1389 
1390 static void mtty_remove(struct mdev_device *mdev)
1391 {
1392 	struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev);
1393 
1394 	vfio_unregister_group_dev(&mdev_state->vdev);
1395 	vfio_put_device(&mdev_state->vdev);
1396 }
1397 
1398 static int mtty_reset(struct mdev_state *mdev_state)
1399 {
1400 	pr_info("%s: called\n", __func__);
1401 
1402 	mutex_lock(&mdev_state->reset_mutex);
1403 	mdev_state->deferred_reset = true;
1404 	if (!mutex_trylock(&mdev_state->state_mutex)) {
1405 		mutex_unlock(&mdev_state->reset_mutex);
1406 		return 0;
1407 	}
1408 	mutex_unlock(&mdev_state->reset_mutex);
1409 	mtty_state_mutex_unlock(mdev_state);
1410 
1411 	return 0;
1412 }
1413 
1414 static ssize_t mtty_read(struct vfio_device *vdev, char __user *buf,
1415 			 size_t count, loff_t *ppos)
1416 {
1417 	struct mdev_state *mdev_state =
1418 		container_of(vdev, struct mdev_state, vdev);
1419 	unsigned int done = 0;
1420 	int ret;
1421 
1422 	while (count) {
1423 		size_t filled;
1424 
1425 		if (count >= 4 && !(*ppos % 4)) {
1426 			u32 val;
1427 
1428 			ret =  mdev_access(mdev_state, (u8 *)&val, sizeof(val),
1429 					   *ppos, false);
1430 			if (ret <= 0)
1431 				goto read_err;
1432 
1433 			if (copy_to_user(buf, &val, sizeof(val)))
1434 				goto read_err;
1435 
1436 			filled = 4;
1437 		} else if (count >= 2 && !(*ppos % 2)) {
1438 			u16 val;
1439 
1440 			ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
1441 					  *ppos, false);
1442 			if (ret <= 0)
1443 				goto read_err;
1444 
1445 			if (copy_to_user(buf, &val, sizeof(val)))
1446 				goto read_err;
1447 
1448 			filled = 2;
1449 		} else {
1450 			u8 val;
1451 
1452 			ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
1453 					  *ppos, false);
1454 			if (ret <= 0)
1455 				goto read_err;
1456 
1457 			if (copy_to_user(buf, &val, sizeof(val)))
1458 				goto read_err;
1459 
1460 			filled = 1;
1461 		}
1462 
1463 		count -= filled;
1464 		done += filled;
1465 		*ppos += filled;
1466 		buf += filled;
1467 	}
1468 
1469 	return done;
1470 
1471 read_err:
1472 	return -EFAULT;
1473 }
1474 
1475 static ssize_t mtty_write(struct vfio_device *vdev, const char __user *buf,
1476 		   size_t count, loff_t *ppos)
1477 {
1478 	struct mdev_state *mdev_state =
1479 		container_of(vdev, struct mdev_state, vdev);
1480 	unsigned int done = 0;
1481 	int ret;
1482 
1483 	while (count) {
1484 		size_t filled;
1485 
1486 		if (count >= 4 && !(*ppos % 4)) {
1487 			u32 val;
1488 
1489 			if (copy_from_user(&val, buf, sizeof(val)))
1490 				goto write_err;
1491 
1492 			ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
1493 					  *ppos, true);
1494 			if (ret <= 0)
1495 				goto write_err;
1496 
1497 			filled = 4;
1498 		} else if (count >= 2 && !(*ppos % 2)) {
1499 			u16 val;
1500 
1501 			if (copy_from_user(&val, buf, sizeof(val)))
1502 				goto write_err;
1503 
1504 			ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
1505 					  *ppos, true);
1506 			if (ret <= 0)
1507 				goto write_err;
1508 
1509 			filled = 2;
1510 		} else {
1511 			u8 val;
1512 
1513 			if (copy_from_user(&val, buf, sizeof(val)))
1514 				goto write_err;
1515 
1516 			ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
1517 					  *ppos, true);
1518 			if (ret <= 0)
1519 				goto write_err;
1520 
1521 			filled = 1;
1522 		}
1523 		count -= filled;
1524 		done += filled;
1525 		*ppos += filled;
1526 		buf += filled;
1527 	}
1528 
1529 	return done;
1530 write_err:
1531 	return -EFAULT;
1532 }
1533 
1534 static void mtty_disable_intx(struct mdev_state *mdev_state)
1535 {
1536 	if (mdev_state->intx_evtfd) {
1537 		eventfd_ctx_put(mdev_state->intx_evtfd);
1538 		mdev_state->intx_evtfd = NULL;
1539 		mdev_state->intx_mask = false;
1540 		mdev_state->irq_index = -1;
1541 	}
1542 }
1543 
1544 static void mtty_disable_msi(struct mdev_state *mdev_state)
1545 {
1546 	if (mdev_state->msi_evtfd) {
1547 		eventfd_ctx_put(mdev_state->msi_evtfd);
1548 		mdev_state->msi_evtfd = NULL;
1549 		mdev_state->irq_index = -1;
1550 	}
1551 }
1552 
1553 static int mtty_set_irqs(struct mdev_state *mdev_state, uint32_t flags,
1554 			 unsigned int index, unsigned int start,
1555 			 unsigned int count, void *data)
1556 {
1557 	int ret = 0;
1558 
1559 	mutex_lock(&mdev_state->ops_lock);
1560 	switch (index) {
1561 	case VFIO_PCI_INTX_IRQ_INDEX:
1562 		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
1563 		case VFIO_IRQ_SET_ACTION_MASK:
1564 			if (!is_intx(mdev_state) || start != 0 || count != 1) {
1565 				ret = -EINVAL;
1566 				break;
1567 			}
1568 
1569 			if (flags & VFIO_IRQ_SET_DATA_NONE) {
1570 				mdev_state->intx_mask = true;
1571 			} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
1572 				uint8_t mask = *(uint8_t *)data;
1573 
1574 				if (mask)
1575 					mdev_state->intx_mask = true;
1576 			} else if (flags &  VFIO_IRQ_SET_DATA_EVENTFD) {
1577 				ret = -ENOTTY; /* No support for mask fd */
1578 			}
1579 			break;
1580 		case VFIO_IRQ_SET_ACTION_UNMASK:
1581 			if (!is_intx(mdev_state) || start != 0 || count != 1) {
1582 				ret = -EINVAL;
1583 				break;
1584 			}
1585 
1586 			if (flags & VFIO_IRQ_SET_DATA_NONE) {
1587 				mdev_state->intx_mask = false;
1588 			} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
1589 				uint8_t mask = *(uint8_t *)data;
1590 
1591 				if (mask)
1592 					mdev_state->intx_mask = false;
1593 			} else if (flags &  VFIO_IRQ_SET_DATA_EVENTFD) {
1594 				ret = -ENOTTY; /* No support for unmask fd */
1595 			}
1596 			break;
1597 		case VFIO_IRQ_SET_ACTION_TRIGGER:
1598 			if (is_intx(mdev_state) && !count &&
1599 			    (flags & VFIO_IRQ_SET_DATA_NONE)) {
1600 				mtty_disable_intx(mdev_state);
1601 				break;
1602 			}
1603 
1604 			if (!(is_intx(mdev_state) || is_noirq(mdev_state)) ||
1605 			    start != 0 || count != 1) {
1606 				ret = -EINVAL;
1607 				break;
1608 			}
1609 
1610 			if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
1611 				int fd = *(int *)data;
1612 				struct eventfd_ctx *evt;
1613 
1614 				mtty_disable_intx(mdev_state);
1615 
1616 				if (fd < 0)
1617 					break;
1618 
1619 				evt = eventfd_ctx_fdget(fd);
1620 				if (IS_ERR(evt)) {
1621 					ret = PTR_ERR(evt);
1622 					break;
1623 				}
1624 				mdev_state->intx_evtfd = evt;
1625 				mdev_state->irq_index = index;
1626 				break;
1627 			}
1628 
1629 			if (!is_intx(mdev_state)) {
1630 				ret = -EINVAL;
1631 				break;
1632 			}
1633 
1634 			if (flags & VFIO_IRQ_SET_DATA_NONE) {
1635 				mtty_trigger_interrupt(mdev_state);
1636 			} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
1637 				uint8_t trigger = *(uint8_t *)data;
1638 
1639 				if (trigger)
1640 					mtty_trigger_interrupt(mdev_state);
1641 			}
1642 			break;
1643 		}
1644 		break;
1645 	case VFIO_PCI_MSI_IRQ_INDEX:
1646 		switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
1647 		case VFIO_IRQ_SET_ACTION_MASK:
1648 		case VFIO_IRQ_SET_ACTION_UNMASK:
1649 			ret = -ENOTTY;
1650 			break;
1651 		case VFIO_IRQ_SET_ACTION_TRIGGER:
1652 			if (is_msi(mdev_state) && !count &&
1653 			    (flags & VFIO_IRQ_SET_DATA_NONE)) {
1654 				mtty_disable_msi(mdev_state);
1655 				break;
1656 			}
1657 
1658 			if (!(is_msi(mdev_state) || is_noirq(mdev_state)) ||
1659 			    start != 0 || count != 1) {
1660 				ret = -EINVAL;
1661 				break;
1662 			}
1663 
1664 			if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
1665 				int fd = *(int *)data;
1666 				struct eventfd_ctx *evt;
1667 
1668 				mtty_disable_msi(mdev_state);
1669 
1670 				if (fd < 0)
1671 					break;
1672 
1673 				evt = eventfd_ctx_fdget(fd);
1674 				if (IS_ERR(evt)) {
1675 					ret = PTR_ERR(evt);
1676 					break;
1677 				}
1678 				mdev_state->msi_evtfd = evt;
1679 				mdev_state->irq_index = index;
1680 				break;
1681 			}
1682 
1683 			if (!is_msi(mdev_state)) {
1684 				ret = -EINVAL;
1685 				break;
1686 			}
1687 
1688 			if (flags & VFIO_IRQ_SET_DATA_NONE) {
1689 				mtty_trigger_interrupt(mdev_state);
1690 			} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
1691 				uint8_t trigger = *(uint8_t *)data;
1692 
1693 				if (trigger)
1694 					mtty_trigger_interrupt(mdev_state);
1695 			}
1696 			break;
1697 		}
1698 		break;
1699 	case VFIO_PCI_MSIX_IRQ_INDEX:
1700 		dev_dbg(mdev_state->vdev.dev, "%s: MSIX_IRQ\n", __func__);
1701 		ret = -ENOTTY;
1702 		break;
1703 	case VFIO_PCI_ERR_IRQ_INDEX:
1704 		dev_dbg(mdev_state->vdev.dev, "%s: ERR_IRQ\n", __func__);
1705 		ret = -ENOTTY;
1706 		break;
1707 	case VFIO_PCI_REQ_IRQ_INDEX:
1708 		dev_dbg(mdev_state->vdev.dev, "%s: REQ_IRQ\n", __func__);
1709 		ret = -ENOTTY;
1710 		break;
1711 	}
1712 
1713 	mutex_unlock(&mdev_state->ops_lock);
1714 	return ret;
1715 }
1716 
1717 static int mtty_ioctl_get_region_info(struct vfio_device *vdev,
1718 				      struct vfio_region_info *region_info,
1719 				      struct vfio_info_cap *caps)
1720 {
1721 	struct mdev_state *mdev_state =
1722 		container_of(vdev, struct mdev_state, vdev);
1723 	unsigned int size = 0;
1724 	u32 bar_index;
1725 
1726 	bar_index = region_info->index;
1727 	if (bar_index >= VFIO_PCI_NUM_REGIONS)
1728 		return -EINVAL;
1729 
1730 	mutex_lock(&mdev_state->ops_lock);
1731 
1732 	switch (bar_index) {
1733 	case VFIO_PCI_CONFIG_REGION_INDEX:
1734 		size = MTTY_CONFIG_SPACE_SIZE;
1735 		break;
1736 	case VFIO_PCI_BAR0_REGION_INDEX:
1737 		size = MTTY_IO_BAR_SIZE;
1738 		break;
1739 	case VFIO_PCI_BAR1_REGION_INDEX:
1740 		if (mdev_state->nr_ports == 2)
1741 			size = MTTY_IO_BAR_SIZE;
1742 		break;
1743 	default:
1744 		size = 0;
1745 		break;
1746 	}
1747 
1748 	mdev_state->region_info[bar_index].size = size;
1749 	mdev_state->region_info[bar_index].vfio_offset =
1750 		MTTY_VFIO_PCI_INDEX_TO_OFFSET(bar_index);
1751 
1752 	region_info->size = size;
1753 	region_info->offset = MTTY_VFIO_PCI_INDEX_TO_OFFSET(bar_index);
1754 	region_info->flags = VFIO_REGION_INFO_FLAG_READ |
1755 		VFIO_REGION_INFO_FLAG_WRITE;
1756 	mutex_unlock(&mdev_state->ops_lock);
1757 	return 0;
1758 }
1759 
1760 static int mtty_get_irq_info(struct vfio_irq_info *irq_info)
1761 {
1762 	if (irq_info->index != VFIO_PCI_INTX_IRQ_INDEX &&
1763 	    irq_info->index != VFIO_PCI_MSI_IRQ_INDEX)
1764 		return -EINVAL;
1765 
1766 	irq_info->flags = VFIO_IRQ_INFO_EVENTFD;
1767 	irq_info->count = 1;
1768 
1769 	if (irq_info->index == VFIO_PCI_INTX_IRQ_INDEX)
1770 		irq_info->flags |= VFIO_IRQ_INFO_MASKABLE |
1771 				   VFIO_IRQ_INFO_AUTOMASKED;
1772 	else
1773 		irq_info->flags |= VFIO_IRQ_INFO_NORESIZE;
1774 
1775 	return 0;
1776 }
1777 
1778 static int mtty_get_device_info(struct vfio_device_info *dev_info)
1779 {
1780 	dev_info->flags = VFIO_DEVICE_FLAGS_PCI;
1781 	dev_info->num_regions = VFIO_PCI_NUM_REGIONS;
1782 	dev_info->num_irqs = VFIO_PCI_NUM_IRQS;
1783 
1784 	return 0;
1785 }
1786 
1787 static long mtty_ioctl(struct vfio_device *vdev, unsigned int cmd,
1788 			unsigned long arg)
1789 {
1790 	struct mdev_state *mdev_state =
1791 		container_of(vdev, struct mdev_state, vdev);
1792 	int ret = 0;
1793 	unsigned long minsz;
1794 
1795 	switch (cmd) {
1796 	case VFIO_DEVICE_GET_INFO:
1797 	{
1798 		struct vfio_device_info info;
1799 
1800 		minsz = offsetofend(struct vfio_device_info, num_irqs);
1801 
1802 		if (copy_from_user(&info, (void __user *)arg, minsz))
1803 			return -EFAULT;
1804 
1805 		if (info.argsz < minsz)
1806 			return -EINVAL;
1807 
1808 		ret = mtty_get_device_info(&info);
1809 		if (ret)
1810 			return ret;
1811 
1812 		memcpy(&mdev_state->dev_info, &info, sizeof(info));
1813 
1814 		if (copy_to_user((void __user *)arg, &info, minsz))
1815 			return -EFAULT;
1816 
1817 		return 0;
1818 	}
1819 
1820 	case VFIO_DEVICE_GET_IRQ_INFO:
1821 	{
1822 		struct vfio_irq_info info;
1823 
1824 		minsz = offsetofend(struct vfio_irq_info, count);
1825 
1826 		if (copy_from_user(&info, (void __user *)arg, minsz))
1827 			return -EFAULT;
1828 
1829 		if ((info.argsz < minsz) ||
1830 		    (info.index >= mdev_state->dev_info.num_irqs))
1831 			return -EINVAL;
1832 
1833 		ret = mtty_get_irq_info(&info);
1834 		if (ret)
1835 			return ret;
1836 
1837 		if (copy_to_user((void __user *)arg, &info, minsz))
1838 			return -EFAULT;
1839 
1840 		return 0;
1841 	}
1842 	case VFIO_DEVICE_SET_IRQS:
1843 	{
1844 		struct vfio_irq_set hdr;
1845 		u8 *data = NULL, *ptr = NULL;
1846 		size_t data_size = 0;
1847 
1848 		minsz = offsetofend(struct vfio_irq_set, count);
1849 
1850 		if (copy_from_user(&hdr, (void __user *)arg, minsz))
1851 			return -EFAULT;
1852 
1853 		ret = vfio_set_irqs_validate_and_prepare(&hdr,
1854 						mdev_state->dev_info.num_irqs,
1855 						VFIO_PCI_NUM_IRQS,
1856 						&data_size);
1857 		if (ret)
1858 			return ret;
1859 
1860 		if (data_size) {
1861 			ptr = data = memdup_user((void __user *)(arg + minsz),
1862 						 data_size);
1863 			if (IS_ERR(data))
1864 				return PTR_ERR(data);
1865 		}
1866 
1867 		ret = mtty_set_irqs(mdev_state, hdr.flags, hdr.index, hdr.start,
1868 				    hdr.count, data);
1869 
1870 		kfree(ptr);
1871 		return ret;
1872 	}
1873 	case VFIO_DEVICE_RESET:
1874 		return mtty_reset(mdev_state);
1875 	}
1876 	return -ENOTTY;
1877 }
1878 
1879 static ssize_t
1880 sample_mdev_dev_show(struct device *dev, struct device_attribute *attr,
1881 		     char *buf)
1882 {
1883 	return sprintf(buf, "This is MDEV %s\n", dev_name(dev));
1884 }
1885 
1886 static DEVICE_ATTR_RO(sample_mdev_dev);
1887 
1888 static struct attribute *mdev_dev_attrs[] = {
1889 	&dev_attr_sample_mdev_dev.attr,
1890 	NULL,
1891 };
1892 
1893 static const struct attribute_group mdev_dev_group = {
1894 	.name  = "vendor",
1895 	.attrs = mdev_dev_attrs,
1896 };
1897 
1898 static const struct attribute_group *mdev_dev_groups[] = {
1899 	&mdev_dev_group,
1900 	NULL,
1901 };
1902 
1903 static unsigned int mtty_get_available(struct mdev_type *mtype)
1904 {
1905 	struct mtty_type *type = container_of(mtype, struct mtty_type, type);
1906 
1907 	return atomic_read(&mdev_avail_ports) / type->nr_ports;
1908 }
1909 
1910 static void mtty_close(struct vfio_device *vdev)
1911 {
1912 	struct mdev_state *mdev_state =
1913 				container_of(vdev, struct mdev_state, vdev);
1914 
1915 	mtty_disable_files(mdev_state);
1916 	mtty_disable_intx(mdev_state);
1917 	mtty_disable_msi(mdev_state);
1918 }
1919 
1920 static const struct vfio_device_ops mtty_dev_ops = {
1921 	.name = "vfio-mtty",
1922 	.init = mtty_init_dev,
1923 	.release = mtty_release_dev,
1924 	.read = mtty_read,
1925 	.write = mtty_write,
1926 	.ioctl = mtty_ioctl,
1927 	.get_region_info_caps = mtty_ioctl_get_region_info,
1928 	.bind_iommufd	= vfio_iommufd_emulated_bind,
1929 	.unbind_iommufd	= vfio_iommufd_emulated_unbind,
1930 	.attach_ioas	= vfio_iommufd_emulated_attach_ioas,
1931 	.detach_ioas	= vfio_iommufd_emulated_detach_ioas,
1932 	.close_device	= mtty_close,
1933 };
1934 
1935 static struct mdev_driver mtty_driver = {
1936 	.device_api = VFIO_DEVICE_API_PCI_STRING,
1937 	.driver = {
1938 		.name = "mtty",
1939 		.owner = THIS_MODULE,
1940 		.mod_name = KBUILD_MODNAME,
1941 		.dev_groups = mdev_dev_groups,
1942 	},
1943 	.probe = mtty_probe,
1944 	.remove	= mtty_remove,
1945 	.get_available = mtty_get_available,
1946 };
1947 
1948 static void mtty_device_release(struct device *dev)
1949 {
1950 	dev_dbg(dev, "mtty: released\n");
1951 }
1952 
1953 static int __init mtty_dev_init(void)
1954 {
1955 	int ret = 0;
1956 
1957 	pr_info("mtty_dev: %s\n", __func__);
1958 
1959 	memset(&mtty_dev, 0, sizeof(mtty_dev));
1960 
1961 	idr_init(&mtty_dev.vd_idr);
1962 
1963 	ret = alloc_chrdev_region(&mtty_dev.vd_devt, 0, MINORMASK + 1,
1964 				  MTTY_NAME);
1965 
1966 	if (ret < 0) {
1967 		pr_err("Error: failed to register mtty_dev, err:%d\n", ret);
1968 		return ret;
1969 	}
1970 
1971 	cdev_init(&mtty_dev.vd_cdev, &vd_fops);
1972 	cdev_add(&mtty_dev.vd_cdev, mtty_dev.vd_devt, MINORMASK + 1);
1973 
1974 	pr_info("major_number:%d\n", MAJOR(mtty_dev.vd_devt));
1975 
1976 	ret = mdev_register_driver(&mtty_driver);
1977 	if (ret)
1978 		goto err_cdev;
1979 
1980 	ret = class_register(&mtty_class);
1981 
1982 	if (ret) {
1983 		pr_err("Error: failed to register mtty_dev class\n");
1984 		goto err_driver;
1985 	}
1986 
1987 	mtty_dev.dev.class = &mtty_class;
1988 	mtty_dev.dev.release = mtty_device_release;
1989 	dev_set_name(&mtty_dev.dev, "%s", MTTY_NAME);
1990 
1991 	ret = device_register(&mtty_dev.dev);
1992 	if (ret)
1993 		goto err_put;
1994 
1995 	ret = mdev_register_parent(&mtty_dev.parent, &mtty_dev.dev,
1996 				   &mtty_driver, mtty_mdev_types,
1997 				   ARRAY_SIZE(mtty_mdev_types));
1998 	if (ret)
1999 		goto err_device;
2000 	return 0;
2001 
2002 err_device:
2003 	device_del(&mtty_dev.dev);
2004 err_put:
2005 	put_device(&mtty_dev.dev);
2006 	class_unregister(&mtty_class);
2007 err_driver:
2008 	mdev_unregister_driver(&mtty_driver);
2009 err_cdev:
2010 	cdev_del(&mtty_dev.vd_cdev);
2011 	unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK + 1);
2012 	return ret;
2013 }
2014 
2015 static void __exit mtty_dev_exit(void)
2016 {
2017 	mtty_dev.dev.bus = NULL;
2018 	mdev_unregister_parent(&mtty_dev.parent);
2019 
2020 	device_unregister(&mtty_dev.dev);
2021 	idr_destroy(&mtty_dev.vd_idr);
2022 	mdev_unregister_driver(&mtty_driver);
2023 	cdev_del(&mtty_dev.vd_cdev);
2024 	unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK + 1);
2025 	class_unregister(&mtty_class);
2026 	pr_info("mtty_dev: Unloaded!\n");
2027 }
2028 
2029 module_init(mtty_dev_init)
2030 module_exit(mtty_dev_exit)
2031 
2032 MODULE_LICENSE("GPL v2");
2033 MODULE_DESCRIPTION("Test driver that simulate serial port over PCI");
2034 MODULE_VERSION(VERSION_STRING);
2035 MODULE_AUTHOR(DRIVER_AUTHOR);
2036