xref: /linux/arch/x86/include/asm/vm86.h (revision 90c6085a248f8f964588617f51329688bcc9f2bc)
1 #ifndef _ASM_X86_VM86_H
2 #define _ASM_X86_VM86_H
3 
4 #include <asm/ptrace.h>
5 #include <uapi/asm/vm86.h>
6 
7 /*
8  * This is the (kernel) stack-layout when we have done a "SAVE_ALL" from vm86
9  * mode - the main change is that the old segment descriptors aren't
10  * useful any more and are forced to be zero by the kernel (and the
11  * hardware when a trap occurs), and the real segment descriptors are
12  * at the end of the structure. Look at ptrace.h to see the "normal"
13  * setup. For user space layout see 'struct vm86_regs' above.
14  */
15 
16 struct kernel_vm86_regs {
17 /*
18  * normal regs, with special meaning for the segment descriptors..
19  */
20 	struct pt_regs pt;
21 /*
22  * these are specific to v86 mode:
23  */
24 	unsigned short es, __esh;
25 	unsigned short ds, __dsh;
26 	unsigned short fs, __fsh;
27 	unsigned short gs, __gsh;
28 };
29 
30 struct vm86 {
31 	struct vm86plus_struct __user *vm86_info;
32 	struct pt_regs *regs32;
33 	unsigned long v86flags;
34 	unsigned long v86mask;
35 	unsigned long saved_sp0;
36 
37 	unsigned long flags;
38 	unsigned long screen_bitmap;
39 	unsigned long cpu_type;
40 	struct revectored_struct int_revectored;
41 	struct revectored_struct int21_revectored;
42 	struct vm86plus_info_struct vm86plus;
43 };
44 
45 #ifdef CONFIG_VM86
46 
47 void handle_vm86_fault(struct kernel_vm86_regs *, long);
48 int handle_vm86_trap(struct kernel_vm86_regs *, long, int);
49 struct pt_regs *save_v86_state(struct kernel_vm86_regs *);
50 
51 struct task_struct;
52 void release_vm86_irqs(struct task_struct *);
53 
54 #define free_vm86(t) do {				\
55 	struct thread_struct *__t = (t);		\
56 	if (__t->vm86 != NULL) {			\
57 		kfree(__t->vm86);			\
58 		__t->vm86 = NULL;			\
59 	}						\
60 } while (0)
61 
62 #else
63 
64 #define handle_vm86_fault(a, b)
65 #define release_vm86_irqs(a)
66 
67 static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c)
68 {
69 	return 0;
70 }
71 
72 #define free_vm86(t) do { } while(0)
73 
74 #endif /* CONFIG_VM86 */
75 
76 #endif /* _ASM_X86_VM86_H */
77