xref: /linux/arch/x86/include/asm/vm86.h (revision 4b132aacb0768ac1e652cf517097ea6f237214b9)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_VM86_H
3 #define _ASM_X86_VM86_H
4 
5 #include <asm/ptrace.h>
6 #include <uapi/asm/vm86.h>
7 
8 /*
9  * This is the (kernel) stack-layout when we have done a "SAVE_ALL" from vm86
10  * mode - the main change is that the old segment descriptors aren't
11  * useful any more and are forced to be zero by the kernel (and the
12  * hardware when a trap occurs), and the real segment descriptors are
13  * at the end of the structure. Look at ptrace.h to see the "normal"
14  * setup. For user space layout see 'struct vm86_regs' above.
15  */
16 
17 struct kernel_vm86_regs {
18 /*
19  * normal regs, with special meaning for the segment descriptors..
20  */
21 	struct pt_regs pt;
22 /*
23  * these are specific to v86 mode:
24  */
25 	unsigned short es, __esh;
26 	unsigned short ds, __dsh;
27 	unsigned short fs, __fsh;
28 	unsigned short gs, __gsh;
29 };
30 
31 struct vm86 {
32 	struct vm86plus_struct __user *user_vm86;
33 	struct pt_regs regs32;
34 	unsigned long veflags;
35 	unsigned long veflags_mask;
36 	unsigned long saved_sp0;
37 
38 	unsigned long flags;
39 	unsigned long cpu_type;
40 	struct revectored_struct int_revectored;
41 	struct revectored_struct int21_revectored;
42 	struct vm86plus_info_struct vm86plus;
43 };
44 
45 #ifdef CONFIG_VM86
46 
47 void handle_vm86_fault(struct kernel_vm86_regs *, long);
48 int handle_vm86_trap(struct kernel_vm86_regs *, long, int);
49 void save_v86_state(struct kernel_vm86_regs *, int);
50 
51 struct task_struct;
52 
53 #define free_vm86(t) do {				\
54 	struct thread_struct *__t = (t);		\
55 	if (__t->vm86 != NULL) {			\
56 		kfree(__t->vm86);			\
57 		__t->vm86 = NULL;			\
58 	}						\
59 } while (0)
60 
61 /*
62  * Support for VM86 programs to request interrupts for
63  * real mode hardware drivers:
64  */
65 #define FIRST_VM86_IRQ		 3
66 #define LAST_VM86_IRQ		15
67 
68 static inline int invalid_vm86_irq(int irq)
69 {
70 	return irq < FIRST_VM86_IRQ || irq > LAST_VM86_IRQ;
71 }
72 
73 void release_vm86_irqs(struct task_struct *);
74 
75 #else
76 
77 #define handle_vm86_fault(a, b)
78 #define release_vm86_irqs(a)
79 
80 static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c)
81 {
82 	return 0;
83 }
84 
85 static inline void save_v86_state(struct kernel_vm86_regs *a, int b) { }
86 
87 #define free_vm86(task) do { (void)(task); } while(0)
88 
89 #endif /* CONFIG_VM86 */
90 
91 #endif /* _ASM_X86_VM86_H */
92