vm86_32.c (1342635638cba9b7c8eac776da5e54390d14d313) vm86_32.c (decd275e62d5eef4b947fab89652fa6afdadf2f2)
1/*
2 * Copyright (C) 1994 Linus Torvalds
3 *
4 * 29 dec 2001 - Fixed oopses caused by unchecked access to the vm86
5 * stack - Manfred Spraul <manfred@colorfullife.com>
6 *
7 * 22 mar 2002 - Manfred detected the stackfaults, but didn't handle
8 * them correctly. Now the emulation will be in a

--- 66 unchanged lines hidden (view full) ---

75#define AL(regs) (((unsigned char *)&((regs)->pt.ax))[0])
76#define AH(regs) (((unsigned char *)&((regs)->pt.ax))[1])
77#define IP(regs) (*(unsigned short *)&((regs)->pt.ip))
78#define SP(regs) (*(unsigned short *)&((regs)->pt.sp))
79
80/*
81 * virtual flags (16 and 32-bit versions)
82 */
1/*
2 * Copyright (C) 1994 Linus Torvalds
3 *
4 * 29 dec 2001 - Fixed oopses caused by unchecked access to the vm86
5 * stack - Manfred Spraul <manfred@colorfullife.com>
6 *
7 * 22 mar 2002 - Manfred detected the stackfaults, but didn't handle
8 * them correctly. Now the emulation will be in a

--- 66 unchanged lines hidden (view full) ---

75#define AL(regs) (((unsigned char *)&((regs)->pt.ax))[0])
76#define AH(regs) (((unsigned char *)&((regs)->pt.ax))[1])
77#define IP(regs) (*(unsigned short *)&((regs)->pt.ip))
78#define SP(regs) (*(unsigned short *)&((regs)->pt.sp))
79
80/*
81 * virtual flags (16 and 32-bit versions)
82 */
83#define VFLAGS (*(unsigned short *)&(current->thread.vm86->v86flags))
84#define VEFLAGS (current->thread.vm86->v86flags)
83#define VFLAGS (*(unsigned short *)&(current->thread.vm86->veflags))
84#define VEFLAGS (current->thread.vm86->veflags)
85
86#define set_flags(X, new, mask) \
87((X) = ((X) & ~(mask)) | ((new) & (mask)))
88
89#define SAFE_MASK (0xDD5)
90#define RETURN_MASK (0xDFF)
91
92void save_v86_state(struct kernel_vm86_regs *regs, int retval)

--- 10 unchanged lines hidden (view full) ---

103 * to access user space.
104 */
105 local_irq_enable();
106
107 if (!vm86 || !vm86->user_vm86) {
108 pr_alert("no user_vm86: BAD\n");
109 do_exit(SIGSEGV);
110 }
85
86#define set_flags(X, new, mask) \
87((X) = ((X) & ~(mask)) | ((new) & (mask)))
88
89#define SAFE_MASK (0xDD5)
90#define RETURN_MASK (0xDFF)
91
92void save_v86_state(struct kernel_vm86_regs *regs, int retval)

--- 10 unchanged lines hidden (view full) ---

103 * to access user space.
104 */
105 local_irq_enable();
106
107 if (!vm86 || !vm86->user_vm86) {
108 pr_alert("no user_vm86: BAD\n");
109 do_exit(SIGSEGV);
110 }
111 set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->v86mask);
111 set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->veflags_mask);
112 user = vm86->user_vm86;
113
114 if (!access_ok(VERIFY_WRITE, user, vm86->vm86plus.is_vm86pus ?
115 sizeof(struct vm86plus_struct) :
116 sizeof(struct vm86_struct))) {
117 pr_alert("could not access userspace vm86 info\n");
118 do_exit(SIGSEGV);
119 }

--- 183 unchanged lines hidden (view full) ---

303 vm86regs.pt.flags &= SAFE_MASK;
304 vm86regs.pt.flags |= regs->flags & ~SAFE_MASK;
305 vm86regs.pt.flags |= X86_VM_MASK;
306
307 vm86regs.pt.orig_ax = regs->orig_ax;
308
309 switch (vm86->cpu_type) {
310 case CPU_286:
112 user = vm86->user_vm86;
113
114 if (!access_ok(VERIFY_WRITE, user, vm86->vm86plus.is_vm86pus ?
115 sizeof(struct vm86plus_struct) :
116 sizeof(struct vm86_struct))) {
117 pr_alert("could not access userspace vm86 info\n");
118 do_exit(SIGSEGV);
119 }

--- 183 unchanged lines hidden (view full) ---

303 vm86regs.pt.flags &= SAFE_MASK;
304 vm86regs.pt.flags |= regs->flags & ~SAFE_MASK;
305 vm86regs.pt.flags |= X86_VM_MASK;
306
307 vm86regs.pt.orig_ax = regs->orig_ax;
308
309 switch (vm86->cpu_type) {
310 case CPU_286:
311 vm86->v86mask = 0;
311 vm86->veflags_mask = 0;
312 break;
313 case CPU_386:
312 break;
313 case CPU_386:
314 vm86->v86mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL;
314 vm86->veflags_mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL;
315 break;
316 case CPU_486:
315 break;
316 case CPU_486:
317 vm86->v86mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
317 vm86->veflags_mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
318 break;
319 default:
318 break;
319 default:
320 vm86->v86mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
320 vm86->veflags_mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
321 break;
322 }
323
324/*
325 * Save old state
326 */
327 vm86->saved_sp0 = tsk->thread.sp0;
328 lazy_save_gs(vm86->regs32.gs);

--- 43 unchanged lines hidden (view full) ---

372 * interrupts enabled.
373 * ( I was testing my own changes, but the only bug I
374 * could find was in a function I had not changed. )
375 * [KD]
376 */
377
378static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs)
379{
321 break;
322 }
323
324/*
325 * Save old state
326 */
327 vm86->saved_sp0 = tsk->thread.sp0;
328 lazy_save_gs(vm86->regs32.gs);

--- 43 unchanged lines hidden (view full) ---

372 * interrupts enabled.
373 * ( I was testing my own changes, but the only bug I
374 * could find was in a function I had not changed. )
375 * [KD]
376 */
377
378static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs)
379{
380 set_flags(VEFLAGS, flags, current->thread.vm86->v86mask);
380 set_flags(VEFLAGS, flags, current->thread.vm86->veflags_mask);
381 set_flags(regs->pt.flags, flags, SAFE_MASK);
382 if (flags & X86_EFLAGS_IF)
383 set_IF(regs);
384 else
385 clear_IF(regs);
386}
387
388static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs)
389{
381 set_flags(regs->pt.flags, flags, SAFE_MASK);
382 if (flags & X86_EFLAGS_IF)
383 set_IF(regs);
384 else
385 clear_IF(regs);
386}
387
388static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs)
389{
390 set_flags(VFLAGS, flags, current->thread.vm86->v86mask);
390 set_flags(VFLAGS, flags, current->thread.vm86->veflags_mask);
391 set_flags(regs->pt.flags, flags, SAFE_MASK);
392 if (flags & X86_EFLAGS_IF)
393 set_IF(regs);
394 else
395 clear_IF(regs);
396}
397
398static inline unsigned long get_vflags(struct kernel_vm86_regs *regs)
399{
400 unsigned long flags = regs->pt.flags & RETURN_MASK;
401
402 if (VEFLAGS & X86_EFLAGS_VIF)
403 flags |= X86_EFLAGS_IF;
404 flags |= X86_EFLAGS_IOPL;
391 set_flags(regs->pt.flags, flags, SAFE_MASK);
392 if (flags & X86_EFLAGS_IF)
393 set_IF(regs);
394 else
395 clear_IF(regs);
396}
397
398static inline unsigned long get_vflags(struct kernel_vm86_regs *regs)
399{
400 unsigned long flags = regs->pt.flags & RETURN_MASK;
401
402 if (VEFLAGS & X86_EFLAGS_VIF)
403 flags |= X86_EFLAGS_IF;
404 flags |= X86_EFLAGS_IOPL;
405 return flags | (VEFLAGS & current->thread.vm86->v86mask);
405 return flags | (VEFLAGS & current->thread.vm86->veflags_mask);
406}
407
408static inline int is_revectored(int nr, struct revectored_struct *bitmap)
409{
410 __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0"
411 :"=r" (nr)
412 :"m" (*bitmap), "r" (nr));
413 return nr;

--- 421 unchanged lines hidden ---
406}
407
408static inline int is_revectored(int nr, struct revectored_struct *bitmap)
409{
410 __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0"
411 :"=r" (nr)
412 :"m" (*bitmap), "r" (nr));
413 return nr;

--- 421 unchanged lines hidden ---