1.text 2#include <linux/linkage.h> 3#include <asm/segment.h> 4#include <asm/pgtable.h> 5#include <asm/page.h> 6#include <asm/msr.h> 7 8# Copyright 2003 Pavel Machek <pavel@suse.cz>, distribute under GPLv2 9# 10# wakeup_code runs in real mode, and at unknown address (determined at run-time). 11# Therefore it must only use relative jumps/calls. 12# 13# Do we need to deal with A20? It is okay: ACPI specs says A20 must be enabled 14# 15# If physical address of wakeup_code is 0x12345, BIOS should call us with 16# cs = 0x1234, eip = 0x05 17# 18 19#define BEEP \ 20 inb $97, %al; \ 21 outb %al, $0x80; \ 22 movb $3, %al; \ 23 outb %al, $97; \ 24 outb %al, $0x80; \ 25 movb $-74, %al; \ 26 outb %al, $67; \ 27 outb %al, $0x80; \ 28 movb $-119, %al; \ 29 outb %al, $66; \ 30 outb %al, $0x80; \ 31 movb $15, %al; \ 32 outb %al, $66; 33 34 35ALIGN 36 .align 16 37ENTRY(wakeup_start) 38wakeup_code: 39 wakeup_code_start = . 40 .code16 41 42# Running in *copy* of this code, somewhere in low 1MB. 43 44 movb $0xa1, %al ; outb %al, $0x80 45 cli 46 cld 47 # setup data segment 48 movw %cs, %ax 49 movw %ax, %ds # Make ds:0 point to wakeup_start 50 movw %ax, %ss 51 52 # Data segment must be set up before we can see whether to beep. 53 testl $4, realmode_flags - wakeup_code 54 jz 1f 55 BEEP 561: 57 58 # Private stack is needed for ASUS board 59 mov $(wakeup_stack - wakeup_code), %sp 60 61 pushl $0 # Kill any dangerous flags 62 popfl 63 64 movl real_magic - wakeup_code, %eax 65 cmpl $0x12345678, %eax 66 jne bogus_real_magic 67 68 call verify_cpu # Verify the cpu supports long 69 # mode 70 testl %eax, %eax 71 jnz no_longmode 72 73 testl $1, realmode_flags - wakeup_code 74 jz 1f 75 lcall $0xc000,$3 76 movw %cs, %ax 77 movw %ax, %ds # Bios might have played with that 78 movw %ax, %ss 791: 80 81 testl $2, realmode_flags - wakeup_code 82 jz 1f 83 mov video_mode - wakeup_code, %ax 84 call mode_set 851: 86 87 movw $0xb800, %ax 88 movw %ax,%fs 89 movw $0x0e00 + 'L', %fs:(0x10) 90 91 movb $0xa2, %al ; outb %al, $0x80 92 93 mov %ds, %ax # Find 32bit wakeup_code addr 94 movzx %ax, %esi # (Convert %ds:gdt to a liner ptr) 95 shll $4, %esi 96 # Fix up the vectors 97 addl %esi, wakeup_32_vector - wakeup_code 98 addl %esi, wakeup_long64_vector - wakeup_code 99 addl %esi, gdt_48a + 2 - wakeup_code # Fixup the gdt pointer 100 101 lidtl %ds:idt_48a - wakeup_code 102 lgdtl %ds:gdt_48a - wakeup_code # load gdt with whatever is 103 # appropriate 104 105 movl $1, %eax # protected mode (PE) bit 106 lmsw %ax # This is it! 107 jmp 1f 1081: 109 110 ljmpl *(wakeup_32_vector - wakeup_code) 111 112 .balign 4 113wakeup_32_vector: 114 .long wakeup_32 - wakeup_code 115 .word __KERNEL32_CS, 0 116 117 .code32 118wakeup_32: 119# Running in this code, but at low address; paging is not yet turned on. 120 movb $0xa5, %al ; outb %al, $0x80 121 122 movl $__KERNEL_DS, %eax 123 movl %eax, %ds 124 125 movw $0x0e00 + 'i', %ds:(0xb8012) 126 movb $0xa8, %al ; outb %al, $0x80; 127 128 /* 129 * Prepare for entering 64bits mode 130 */ 131 132 /* Enable PAE */ 133 xorl %eax, %eax 134 btsl $5, %eax 135 movl %eax, %cr4 136 137 /* Setup early boot stage 4 level pagetables */ 138 leal (wakeup_level4_pgt - wakeup_code)(%esi), %eax 139 movl %eax, %cr3 140 141 /* Check if nx is implemented */ 142 movl $0x80000001, %eax 143 cpuid 144 movl %edx,%edi 145 146 /* Enable Long Mode */ 147 xorl %eax, %eax 148 btsl $_EFER_LME, %eax 149 150 /* No Execute supported? */ 151 btl $20,%edi 152 jnc 1f 153 btsl $_EFER_NX, %eax 154 155 /* Make changes effective */ 1561: movl $MSR_EFER, %ecx 157 xorl %edx, %edx 158 wrmsr 159 160 xorl %eax, %eax 161 btsl $31, %eax /* Enable paging and in turn activate Long Mode */ 162 btsl $0, %eax /* Enable protected mode */ 163 164 /* Make changes effective */ 165 movl %eax, %cr0 166 167 /* At this point: 168 CR4.PAE must be 1 169 CS.L must be 0 170 CR3 must point to PML4 171 Next instruction must be a branch 172 This must be on identity-mapped page 173 */ 174 /* 175 * At this point we're in long mode but in 32bit compatibility mode 176 * with EFER.LME = 1, CS.L = 0, CS.D = 1 (and in turn 177 * EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we load 178 * the new gdt/idt that has __KERNEL_CS with CS.L = 1. 179 */ 180 181 /* Finally jump in 64bit mode */ 182 ljmp *(wakeup_long64_vector - wakeup_code)(%esi) 183 184 .balign 4 185wakeup_long64_vector: 186 .long wakeup_long64 - wakeup_code 187 .word __KERNEL_CS, 0 188 189.code64 190 191 /* Hooray, we are in Long 64-bit mode (but still running in 192 * low memory) 193 */ 194wakeup_long64: 195 /* 196 * We must switch to a new descriptor in kernel space for the GDT 197 * because soon the kernel won't have access anymore to the userspace 198 * addresses where we're currently running on. We have to do that here 199 * because in 32bit we couldn't load a 64bit linear address. 200 */ 201 lgdt cpu_gdt_descr 202 203 movw $0x0e00 + 'n', %ds:(0xb8014) 204 movb $0xa9, %al ; outb %al, $0x80 205 206 movq saved_magic, %rax 207 movq $0x123456789abcdef0, %rdx 208 cmpq %rdx, %rax 209 jne bogus_64_magic 210 211 movw $0x0e00 + 'u', %ds:(0xb8016) 212 213 nop 214 nop 215 movw $__KERNEL_DS, %ax 216 movw %ax, %ss 217 movw %ax, %ds 218 movw %ax, %es 219 movw %ax, %fs 220 movw %ax, %gs 221 movq saved_rsp, %rsp 222 223 movw $0x0e00 + 'x', %ds:(0xb8018) 224 movq saved_rbx, %rbx 225 movq saved_rdi, %rdi 226 movq saved_rsi, %rsi 227 movq saved_rbp, %rbp 228 229 movw $0x0e00 + '!', %ds:(0xb801a) 230 movq saved_rip, %rax 231 jmp *%rax 232 233.code32 234 235 .align 64 236gdta: 237 /* Its good to keep gdt in sync with one in trampoline.S */ 238 .word 0, 0, 0, 0 # dummy 239 /* ??? Why I need the accessed bit set in order for this to work? */ 240 .quad 0x00cf9b000000ffff # __KERNEL32_CS 241 .quad 0x00af9b000000ffff # __KERNEL_CS 242 .quad 0x00cf93000000ffff # __KERNEL_DS 243 244idt_48a: 245 .word 0 # idt limit = 0 246 .word 0, 0 # idt base = 0L 247 248gdt_48a: 249 .word 0x800 # gdt limit=2048, 250 # 256 GDT entries 251 .long gdta - wakeup_code # gdt base (relocated in later) 252 253real_magic: .quad 0 254video_mode: .quad 0 255realmode_flags: .quad 0 256 257.code16 258bogus_real_magic: 259 movb $0xba,%al ; outb %al,$0x80 260 jmp bogus_real_magic 261 262.code64 263bogus_64_magic: 264 movb $0xb3,%al ; outb %al,$0x80 265 jmp bogus_64_magic 266 267.code16 268no_longmode: 269 movb $0xbc,%al ; outb %al,$0x80 270 jmp no_longmode 271 272#include "../verify_cpu_64.S" 273 274/* This code uses an extended set of video mode numbers. These include: 275 * Aliases for standard modes 276 * NORMAL_VGA (-1) 277 * EXTENDED_VGA (-2) 278 * ASK_VGA (-3) 279 * Video modes numbered by menu position -- NOT RECOMMENDED because of lack 280 * of compatibility when extending the table. These are between 0x00 and 0xff. 281 */ 282#define VIDEO_FIRST_MENU 0x0000 283 284/* Standard BIOS video modes (BIOS number + 0x0100) */ 285#define VIDEO_FIRST_BIOS 0x0100 286 287/* VESA BIOS video modes (VESA number + 0x0200) */ 288#define VIDEO_FIRST_VESA 0x0200 289 290/* Video7 special modes (BIOS number + 0x0900) */ 291#define VIDEO_FIRST_V7 0x0900 292 293# Setting of user mode (AX=mode ID) => CF=success 294 295# For now, we only handle VESA modes (0x0200..0x03ff). To handle other 296# modes, we should probably compile in the video code from the boot 297# directory. 298.code16 299mode_set: 300 movw %ax, %bx 301 subb $VIDEO_FIRST_VESA>>8, %bh 302 cmpb $2, %bh 303 jb check_vesa 304 305setbad: 306 clc 307 ret 308 309check_vesa: 310 orw $0x4000, %bx # Use linear frame buffer 311 movw $0x4f02, %ax # VESA BIOS mode set call 312 int $0x10 313 cmpw $0x004f, %ax # AL=4f if implemented 314 jnz setbad # AH=0 if OK 315 316 stc 317 ret 318 319wakeup_stack_begin: # Stack grows down 320 321.org 0xff0 322wakeup_stack: # Just below end of page 323 324.org 0x1000 325ENTRY(wakeup_level4_pgt) 326 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE 327 .fill 510,8,0 328 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ 329 .quad level3_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE 330 331ENTRY(wakeup_end) 332 333## 334# acpi_copy_wakeup_routine 335# 336# Copy the above routine to low memory. 337# 338# Parameters: 339# %rdi: place to copy wakeup routine to 340# 341# Returned address is location of code in low memory (past data and stack) 342# 343 .code64 344ENTRY(acpi_copy_wakeup_routine) 345 pushq %rax 346 pushq %rdx 347 348 movl saved_video_mode, %edx 349 movl %edx, video_mode - wakeup_start (,%rdi) 350 movl acpi_realmode_flags, %edx 351 movl %edx, realmode_flags - wakeup_start (,%rdi) 352 movq $0x12345678, real_magic - wakeup_start (,%rdi) 353 movq $0x123456789abcdef0, %rdx 354 movq %rdx, saved_magic 355 356 movq saved_magic, %rax 357 movq $0x123456789abcdef0, %rdx 358 cmpq %rdx, %rax 359 jne bogus_64_magic 360 361 # restore the regs we used 362 popq %rdx 363 popq %rax 364ENTRY(do_suspend_lowlevel_s4bios) 365 ret 366 367 .align 2 368 .p2align 4,,15 369.globl do_suspend_lowlevel 370 .type do_suspend_lowlevel,@function 371do_suspend_lowlevel: 372.LFB5: 373 subq $8, %rsp 374 xorl %eax, %eax 375 call save_processor_state 376 377 movq %rsp, saved_context_esp(%rip) 378 movq %rax, saved_context_eax(%rip) 379 movq %rbx, saved_context_ebx(%rip) 380 movq %rcx, saved_context_ecx(%rip) 381 movq %rdx, saved_context_edx(%rip) 382 movq %rbp, saved_context_ebp(%rip) 383 movq %rsi, saved_context_esi(%rip) 384 movq %rdi, saved_context_edi(%rip) 385 movq %r8, saved_context_r08(%rip) 386 movq %r9, saved_context_r09(%rip) 387 movq %r10, saved_context_r10(%rip) 388 movq %r11, saved_context_r11(%rip) 389 movq %r12, saved_context_r12(%rip) 390 movq %r13, saved_context_r13(%rip) 391 movq %r14, saved_context_r14(%rip) 392 movq %r15, saved_context_r15(%rip) 393 pushfq ; popq saved_context_eflags(%rip) 394 395 movq $.L97, saved_rip(%rip) 396 397 movq %rsp,saved_rsp 398 movq %rbp,saved_rbp 399 movq %rbx,saved_rbx 400 movq %rdi,saved_rdi 401 movq %rsi,saved_rsi 402 403 addq $8, %rsp 404 movl $3, %edi 405 xorl %eax, %eax 406 jmp acpi_enter_sleep_state 407.L97: 408 .p2align 4,,7 409.L99: 410 .align 4 411 movl $24, %eax 412 movw %ax, %ds 413 movq saved_context+58(%rip), %rax 414 movq %rax, %cr4 415 movq saved_context+50(%rip), %rax 416 movq %rax, %cr3 417 movq saved_context+42(%rip), %rax 418 movq %rax, %cr2 419 movq saved_context+34(%rip), %rax 420 movq %rax, %cr0 421 pushq saved_context_eflags(%rip) ; popfq 422 movq saved_context_esp(%rip), %rsp 423 movq saved_context_ebp(%rip), %rbp 424 movq saved_context_eax(%rip), %rax 425 movq saved_context_ebx(%rip), %rbx 426 movq saved_context_ecx(%rip), %rcx 427 movq saved_context_edx(%rip), %rdx 428 movq saved_context_esi(%rip), %rsi 429 movq saved_context_edi(%rip), %rdi 430 movq saved_context_r08(%rip), %r8 431 movq saved_context_r09(%rip), %r9 432 movq saved_context_r10(%rip), %r10 433 movq saved_context_r11(%rip), %r11 434 movq saved_context_r12(%rip), %r12 435 movq saved_context_r13(%rip), %r13 436 movq saved_context_r14(%rip), %r14 437 movq saved_context_r15(%rip), %r15 438 439 xorl %eax, %eax 440 addq $8, %rsp 441 jmp restore_processor_state 442.LFE5: 443.Lfe5: 444 .size do_suspend_lowlevel,.Lfe5-do_suspend_lowlevel 445 446.data 447ALIGN 448ENTRY(saved_rbp) .quad 0 449ENTRY(saved_rsi) .quad 0 450ENTRY(saved_rdi) .quad 0 451ENTRY(saved_rbx) .quad 0 452 453ENTRY(saved_rip) .quad 0 454ENTRY(saved_rsp) .quad 0 455 456ENTRY(saved_magic) .quad 0 457