summaryrefslogtreecommitdiff
path: root/arch/x86/cpu
diff options
context:
space:
mode:
authorSimon Glass <sjg@chromium.org>2014-10-10 08:21:55 -0600
committerSimon Glass <sjg@chromium.org>2014-10-28 20:43:47 -0600
commit200182a748afd430da97b707dc5dff53c3147b5f (patch)
tree6552542960e3ed576811f85ba8431d1566542c32 /arch/x86/cpu
parent92cc94a1fe8e7a3ec78e993ea1ff1dd0bbaa5c36 (diff)
x86: Add support for starting 64-bit kernel
Add code to jump to a 64-bit Linux kernel. We need to set up a flat page table structure, a new GDT and then go through a few hoops in the right order. Signed-off-by: Simon Glass <sjg@chromium.org>
Diffstat (limited to 'arch/x86/cpu')
-rw-r--r--arch/x86/cpu/Makefile2
-rw-r--r--arch/x86/cpu/call64.S93
-rw-r--r--arch/x86/cpu/cpu.c45
3 files changed, 139 insertions, 1 deletions
diff --git a/arch/x86/cpu/Makefile b/arch/x86/cpu/Makefile
index e7bb3e33d5..9d38ef73a7 100644
--- a/arch/x86/cpu/Makefile
+++ b/arch/x86/cpu/Makefile
@@ -10,4 +10,4 @@
extra-y = start.o
obj-$(CONFIG_X86_RESET_VECTOR) += resetvec.o start16.o
-obj-y += interrupts.o cpu.o
+obj-y += interrupts.o cpu.o call64.o
diff --git a/arch/x86/cpu/call64.S b/arch/x86/cpu/call64.S
new file mode 100644
index 0000000000..74dd5a89dc
--- /dev/null
+++ b/arch/x86/cpu/call64.S
@@ -0,0 +1,93 @@
+/*
+ * (C) Copyright 2014 Google, Inc
+ * Copyright (C) 1991, 1992, 1993 Linus Torvalds
+ *
+ * Parts of this copied from Linux arch/x86/boot/compressed/head_64.S
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <asm/global_data.h>
+#include <asm/msr-index.h>
+#include <asm/processor-flags.h>
+
+.code32
+.globl cpu_call64
+cpu_call64:
+ /*
+ * cpu_call64(ulong pgtable, ulong setup_base, ulong target)
+ *
+ * eax - pgtable
+ * edx - setup_base
+ * ecx - target
+ */
+ cli
+ push %ecx /* arg2 = target */
+ push %edx /* arg1 = setup_base */
+ mov %eax, %ebx
+
+ /* Load new GDT with the 64bit segments using 32bit descriptor */
+ leal gdt, %eax
+ movl %eax, gdt+2
+ lgdt gdt
+
+ /* Enable PAE mode */
+ movl $(X86_CR4_PAE), %eax
+ movl %eax, %cr4
+
+ /* Enable the boot page tables */
+ leal (%ebx), %eax
+ movl %eax, %cr3
+
+ /* Enable Long mode in EFER (Extended Feature Enable Register) */
+ movl $MSR_EFER, %ecx
+ rdmsr
+ btsl $_EFER_LME, %eax
+ wrmsr
+
+ /* After gdt is loaded */
+ xorl %eax, %eax
+ lldt %ax
+ movl $0x20, %eax
+ ltr %ax
+
+ /*
+ * Setup for the jump to 64bit mode
+ *
+ * When the jump is performed we will be in long mode but
+ * in 32bit compatibility mode with EFER.LME = 1, CS.L = 0, CS.D = 1
+ * (and in turn EFER.LMA = 1). To jump into 64bit mode we use
+ * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
+ * We place all of the values on our mini stack so lret can
+ * used to perform that far jump. See the gdt below.
+ */
+ pop %esi /* setup_base */
+
+ pushl $0x10
+ leal lret_target, %eax
+ pushl %eax
+
+ /* Enter paged protected Mode, activating Long Mode */
+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
+ movl %eax, %cr0
+
+ /* Jump from 32bit compatibility mode into 64bit mode. */
+ lret
+
+code64:
+lret_target:
+ pop %eax /* target */
+ mov %eax, %eax /* Clear bits 63:32 */
+ jmp *%eax /* Jump to the 64-bit target */
+
+ .data
+gdt:
+ .word gdt_end - gdt
+ .long gdt
+ .word 0
+ .quad 0x0000000000000000 /* NULL descriptor */
+ .quad 0x00af9a000000ffff /* __KERNEL_CS */
+ .quad 0x00cf92000000ffff /* __KERNEL_DS */
+ .quad 0x0080890000000000 /* TS descriptor */
+ .quad 0x0000000000000000 /* TS continued */
+gdt_end:
diff --git a/arch/x86/cpu/cpu.c b/arch/x86/cpu/cpu.c
index 8c1eacc0c0..2e252532d6 100644
--- a/arch/x86/cpu/cpu.c
+++ b/arch/x86/cpu/cpu.c
@@ -18,7 +18,10 @@
#include <common.h>
#include <command.h>
+#include <errno.h>
+#include <malloc.h>
#include <asm/control_regs.h>
+#include <asm/cpu.h>
#include <asm/processor.h>
#include <asm/processor-flags.h>
#include <asm/interrupt.h>
@@ -339,3 +342,45 @@ int print_cpuinfo(void)
return 0;
}
+
+#define PAGETABLE_SIZE (6 * 4096)
+
+/**
+ * build_pagetable() - build a flat 4GiB page table structure for 64-bti mode
+ *
+ * @pgtable: Pointer to a 24iKB block of memory
+ */
+static void build_pagetable(uint32_t *pgtable)
+{
+ uint i;
+
+ memset(pgtable, '\0', PAGETABLE_SIZE);
+
+ /* Level 4 needs a single entry */
+ pgtable[0] = (uint32_t)&pgtable[1024] + 7;
+
+ /* Level 3 has one 64-bit entry for each GiB of memory */
+ for (i = 0; i < 4; i++) {
+ pgtable[1024 + i * 2] = (uint32_t)&pgtable[2048] +
+ 0x1000 * i + 7;
+ }
+
+ /* Level 2 has 2048 64-bit entries, each repesenting 2MiB */
+ for (i = 0; i < 2048; i++)
+ pgtable[2048 + i * 2] = 0x183 + (i << 21UL);
+}
+
+int cpu_jump_to_64bit(ulong setup_base, ulong target)
+{
+ uint32_t *pgtable;
+
+ pgtable = memalign(4096, PAGETABLE_SIZE);
+ if (!pgtable)
+ return -ENOMEM;
+
+ build_pagetable(pgtable);
+ cpu_call64((ulong)pgtable, setup_base, target);
+ free(pgtable);
+
+ return -EFAULT;
+}