xref: /linux/arch/x86/boot/pmjump.S (revision da1d9caf95def6f0320819cf941c9fd1069ba9e1)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/* ----------------------------------------------------------------------- *
3 *
4 *   Copyright (C) 1991, 1992 Linus Torvalds
5 *   Copyright 2007 rPath, Inc. - All Rights Reserved
6 *
7 * ----------------------------------------------------------------------- */
8
9/*
10 * The actual transition into protected mode
11 */
12
13#include <asm/boot.h>
14#include <asm/processor-flags.h>
15#include <asm/segment.h>
16#include <linux/linkage.h>
17
18	.text
19	.code16
20
21/*
22 * void protected_mode_jump(u32 entrypoint, u32 bootparams);
23 */
24SYM_FUNC_START_NOALIGN(protected_mode_jump)
25	movl	%edx, %esi		# Pointer to boot_params table
26
27	xorl	%ebx, %ebx
28	movw	%cs, %bx
29	shll	$4, %ebx
30	addl	%ebx, 2f
31	jmp	1f			# Short jump to serialize on 386/486
321:
33
34	movw	$__BOOT_DS, %cx
35	movw	$__BOOT_TSS, %di
36
37	movl	%cr0, %edx
38	orb	$X86_CR0_PE, %dl	# Protected mode
39	movl	%edx, %cr0
40
41	# Transition to 32-bit mode
42	.byte	0x66, 0xea		# ljmpl opcode
432:	.long	.Lin_pm32		# offset
44	.word	__BOOT_CS		# segment
45SYM_FUNC_END(protected_mode_jump)
46
47	.code32
48	.section ".text32","ax"
49SYM_FUNC_START_LOCAL_NOALIGN(.Lin_pm32)
50	# Set up data segments for flat 32-bit mode
51	movl	%ecx, %ds
52	movl	%ecx, %es
53	movl	%ecx, %fs
54	movl	%ecx, %gs
55	movl	%ecx, %ss
56	# The 32-bit code sets up its own stack, but this way we do have
57	# a valid stack if some debugging hack wants to use it.
58	addl	%ebx, %esp
59
60	# Set up TR to make Intel VT happy
61	ltr	%di
62
63	# Clear registers to allow for future extensions to the
64	# 32-bit boot protocol
65	xorl	%ecx, %ecx
66	xorl	%edx, %edx
67	xorl	%ebx, %ebx
68	xorl	%ebp, %ebp
69	xorl	%edi, %edi
70
71	# Set up LDTR to make Intel VT happy
72	lldt	%cx
73
74	jmpl	*%eax			# Jump to the 32-bit entrypoint
75SYM_FUNC_END(.Lin_pm32)
76