summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/lguest
diff options
context:
space:
mode:
authorYunhong Jiang <yunhong.jiang@intel.com>2015-08-04 12:17:53 -0700
committerYunhong Jiang <yunhong.jiang@intel.com>2015-08-04 15:44:42 -0700
commit9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00 (patch)
tree1c9cafbcd35f783a87880a10f85d1a060db1a563 /kernel/drivers/lguest
parent98260f3884f4a202f9ca5eabed40b1354c489b29 (diff)
Add the rt linux 4.1.3-rt3 as base
Import the rt linux 4.1.3-rt3 as OPNFV kvm base. It's from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git linux-4.1.y-rt and the base is: commit 0917f823c59692d751951bf5ea699a2d1e2f26a2 Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Date: Sat Jul 25 12:13:34 2015 +0200 Prepare v4.1.3-rt3 Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> We lose all the git history this way and it's not good. We should apply another opnfv project repo in future. Change-Id: I87543d81c9df70d99c5001fbdf646b202c19f423 Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
Diffstat (limited to 'kernel/drivers/lguest')
-rw-r--r--kernel/drivers/lguest/Kconfig13
-rw-r--r--kernel/drivers/lguest/Makefile26
-rw-r--r--kernel/drivers/lguest/README47
-rw-r--r--kernel/drivers/lguest/core.c369
-rw-r--r--kernel/drivers/lguest/hypercalls.c308
-rw-r--r--kernel/drivers/lguest/interrupts_and_traps.c702
-rw-r--r--kernel/drivers/lguest/lg.h258
-rw-r--r--kernel/drivers/lguest/lguest_user.c445
-rw-r--r--kernel/drivers/lguest/page_tables.c1239
-rw-r--r--kernel/drivers/lguest/segments.c228
-rw-r--r--kernel/drivers/lguest/x86/core.c737
-rw-r--r--kernel/drivers/lguest/x86/switcher_32.S388
12 files changed, 4760 insertions, 0 deletions
diff --git a/kernel/drivers/lguest/Kconfig b/kernel/drivers/lguest/Kconfig
new file mode 100644
index 000000000..169172d2b
--- /dev/null
+++ b/kernel/drivers/lguest/Kconfig
@@ -0,0 +1,13 @@
+config LGUEST
+ tristate "Linux hypervisor example code"
+ depends on X86_32 && EVENTFD && TTY && PCI_DIRECT
+ select HVC_DRIVER
+ ---help---
+ This is a very simple module which allows you to run
+ multiple instances of the same Linux kernel, using the
+ "lguest" command found in the tools/lguest directory.
+
+ Note that "lguest" is pronounced to rhyme with "fell quest",
+ not "rustyvisor". See tools/lguest/lguest.txt.
+
+ If unsure, say N. If curious, say M. If masochistic, say Y.
diff --git a/kernel/drivers/lguest/Makefile b/kernel/drivers/lguest/Makefile
new file mode 100644
index 000000000..16f52ee73
--- /dev/null
+++ b/kernel/drivers/lguest/Makefile
@@ -0,0 +1,26 @@
+# Host requires the other files, which can be a module.
+obj-$(CONFIG_LGUEST) += lg.o
+lg-y = core.o hypercalls.o page_tables.o interrupts_and_traps.o \
+ segments.o lguest_user.o
+
+lg-$(CONFIG_X86_32) += x86/switcher_32.o x86/core.o
+
+Preparation Preparation!: PREFIX=P
+Guest: PREFIX=G
+Drivers: PREFIX=D
+Launcher: PREFIX=L
+Host: PREFIX=H
+Switcher: PREFIX=S
+Mastery: PREFIX=M
+Beer:
+ @for f in Preparation Guest Drivers Launcher Host Switcher Mastery; do echo "{==- $$f -==}"; make -s $$f; done; echo "{==-==}"
+Preparation Preparation! Guest Drivers Launcher Host Switcher Mastery:
+ @sh ../../tools/lguest/extract $(PREFIX) `find ../../* -name '*.[chS]' -wholename '*lguest*'`
+Puppy:
+ @clear
+ @printf " __ \n (___()'\`;\n /, /\`\n \\\\\\\"--\\\\\\ \n"
+ @sleep 2; clear; printf "\n\n Sit!\n\n"; sleep 1; clear
+ @printf " __ \n ()'\`; \n /\\|\` \n / | \n(/_)_|_ \n"
+ @sleep 2; clear; printf "\n\n Stand!\n\n"; sleep 1; clear
+ @printf " __ \n ()'\`; \n /\\|\` \n /._.= \n /| / \n(_\_)_ \n"
+ @sleep 2; clear; printf "\n\n Good puppy!\n\n"; sleep 1; clear
diff --git a/kernel/drivers/lguest/README b/kernel/drivers/lguest/README
new file mode 100644
index 000000000..b7db39a64
--- /dev/null
+++ b/kernel/drivers/lguest/README
@@ -0,0 +1,47 @@
+Welcome, friend reader, to lguest.
+
+Lguest is an adventure, with you, the reader, as Hero. I can't think of many
+5000-line projects which offer both such capability and glimpses of future
+potential; it is an exciting time to be delving into the source!
+
+But be warned; this is an arduous journey of several hours or more! And as we
+know, all true Heroes are driven by a Noble Goal. Thus I offer a Beer (or
+equivalent) to anyone I meet who has completed this documentation.
+
+So get comfortable and keep your wits about you (both quick and humorous).
+Along your way to the Noble Goal, you will also gain masterly insight into
+lguest, and hypervisors and x86 virtualization in general.
+
+Our Quest is in seven parts: (best read with C highlighting turned on)
+
+I) Preparation
+ - In which our potential hero is flown quickly over the landscape for a
+ taste of its scope. Suitable for the armchair coders and other such
+ persons of faint constitution.
+
+II) Guest
+ - Where we encounter the first tantalising wisps of code, and come to
+ understand the details of the life of a Guest kernel.
+
+III) Drivers
+ - Whereby the Guest finds its voice and become useful, and our
+ understanding of the Guest is completed.
+
+IV) Launcher
+ - Where we trace back to the creation of the Guest, and thus begin our
+ understanding of the Host.
+
+V) Host
+ - Where we master the Host code, through a long and tortuous journey.
+ Indeed, it is here that our hero is tested in the Bit of Despair.
+
+VI) Switcher
+ - Where our understanding of the intertwined nature of Guests and Hosts
+ is completed.
+
+VII) Mastery
+ - Where our fully fledged hero grapples with the Great Question:
+ "What next?"
+
+make Preparation!
+Rusty Russell.
diff --git a/kernel/drivers/lguest/core.c b/kernel/drivers/lguest/core.c
new file mode 100644
index 000000000..312ffd3d0
--- /dev/null
+++ b/kernel/drivers/lguest/core.c
@@ -0,0 +1,369 @@
+/*P:400
+ * This contains run_guest() which actually calls into the Host<->Guest
+ * Switcher and analyzes the return, such as determining if the Guest wants the
+ * Host to do something. This file also contains useful helper routines.
+:*/
+#include <linux/module.h>
+#include <linux/stringify.h>
+#include <linux/stddef.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/cpu.h>
+#include <linux/freezer.h>
+#include <linux/highmem.h>
+#include <linux/slab.h>
+#include <asm/paravirt.h>
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
+#include <asm/poll.h>
+#include <asm/asm-offsets.h>
+#include "lg.h"
+
+unsigned long switcher_addr;
+struct page **lg_switcher_pages;
+static struct vm_struct *switcher_vma;
+
+/* This One Big lock protects all inter-guest data structures. */
+DEFINE_MUTEX(lguest_lock);
+
+/*H:010
+ * We need to set up the Switcher at a high virtual address. Remember the
+ * Switcher is a few hundred bytes of assembler code which actually changes the
+ * CPU to run the Guest, and then changes back to the Host when a trap or
+ * interrupt happens.
+ *
+ * The Switcher code must be at the same virtual address in the Guest as the
+ * Host since it will be running as the switchover occurs.
+ *
+ * Trying to map memory at a particular address is an unusual thing to do, so
+ * it's not a simple one-liner.
+ */
+static __init int map_switcher(void)
+{
+ int i, err;
+
+ /*
+ * Map the Switcher in to high memory.
+ *
+ * It turns out that if we choose the address 0xFFC00000 (4MB under the
+ * top virtual address), it makes setting up the page tables really
+ * easy.
+ */
+
+ /* We assume Switcher text fits into a single page. */
+ if (end_switcher_text - start_switcher_text > PAGE_SIZE) {
+ printk(KERN_ERR "lguest: switcher text too large (%zu)\n",
+ end_switcher_text - start_switcher_text);
+ return -EINVAL;
+ }
+
+ /*
+ * We allocate an array of struct page pointers. map_vm_area() wants
+ * this, rather than just an array of pages.
+ */
+ lg_switcher_pages = kmalloc(sizeof(lg_switcher_pages[0])
+ * TOTAL_SWITCHER_PAGES,
+ GFP_KERNEL);
+ if (!lg_switcher_pages) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * Now we actually allocate the pages. The Guest will see these pages,
+ * so we make sure they're zeroed.
+ */
+ for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) {
+ lg_switcher_pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
+ if (!lg_switcher_pages[i]) {
+ err = -ENOMEM;
+ goto free_some_pages;
+ }
+ }
+
+ /*
+ * We place the Switcher underneath the fixmap area, which is the
+ * highest virtual address we can get. This is important, since we
+ * tell the Guest it can't access this memory, so we want its ceiling
+ * as high as possible.
+ */
+ switcher_addr = FIXADDR_START - (TOTAL_SWITCHER_PAGES+1)*PAGE_SIZE;
+
+ /*
+ * Now we reserve the "virtual memory area" we want. We might
+ * not get it in theory, but in practice it's worked so far.
+ * The end address needs +1 because __get_vm_area allocates an
+ * extra guard page, so we need space for that.
+ */
+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
+ VM_ALLOC, switcher_addr, switcher_addr
+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
+ if (!switcher_vma) {
+ err = -ENOMEM;
+ printk("lguest: could not map switcher pages high\n");
+ goto free_pages;
+ }
+
+ /*
+ * This code actually sets up the pages we've allocated to appear at
+ * switcher_addr. map_vm_area() takes the vma we allocated above, the
+ * kind of pages we're mapping (kernel pages), and a pointer to our
+ * array of struct pages.
+ */
+ err = map_vm_area(switcher_vma, PAGE_KERNEL_EXEC, lg_switcher_pages);
+ if (err) {
+ printk("lguest: map_vm_area failed: %i\n", err);
+ goto free_vma;
+ }
+
+ /*
+ * Now the Switcher is mapped at the right address, we can't fail!
+ * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
+ */
+ memcpy(switcher_vma->addr, start_switcher_text,
+ end_switcher_text - start_switcher_text);
+
+ printk(KERN_INFO "lguest: mapped switcher at %p\n",
+ switcher_vma->addr);
+ /* And we succeeded... */
+ return 0;
+
+free_vma:
+ vunmap(switcher_vma->addr);
+free_pages:
+ i = TOTAL_SWITCHER_PAGES;
+free_some_pages:
+ for (--i; i >= 0; i--)
+ __free_pages(lg_switcher_pages[i], 0);
+ kfree(lg_switcher_pages);
+out:
+ return err;
+}
+/*:*/
+
+/* Cleaning up the mapping when the module is unloaded is almost... too easy. */
+static void unmap_switcher(void)
+{
+ unsigned int i;
+
+ /* vunmap() undoes *both* map_vm_area() and __get_vm_area(). */
+ vunmap(switcher_vma->addr);
+ /* Now we just need to free the pages we copied the switcher into */
+ for (i = 0; i < TOTAL_SWITCHER_PAGES; i++)
+ __free_pages(lg_switcher_pages[i], 0);
+ kfree(lg_switcher_pages);
+}
+
+/*H:032
+ * Dealing With Guest Memory.
+ *
+ * Before we go too much further into the Host, we need to grok the routines
+ * we use to deal with Guest memory.
+ *
+ * When the Guest gives us (what it thinks is) a physical address, we can use
+ * the normal copy_from_user() & copy_to_user() on the corresponding place in
+ * the memory region allocated by the Launcher.
+ *
+ * But we can't trust the Guest: it might be trying to access the Launcher
+ * code. We have to check that the range is below the pfn_limit the Launcher
+ * gave us. We have to make sure that addr + len doesn't give us a false
+ * positive by overflowing, too.
+ */
+bool lguest_address_ok(const struct lguest *lg,
+ unsigned long addr, unsigned long len)
+{
+ return addr+len <= lg->pfn_limit * PAGE_SIZE && (addr+len >= addr);
+}
+
+/*
+ * This routine copies memory from the Guest. Here we can see how useful the
+ * kill_lguest() routine we met in the Launcher can be: we return a random
+ * value (all zeroes) instead of needing to return an error.
+ */
+void __lgread(struct lg_cpu *cpu, void *b, unsigned long addr, unsigned bytes)
+{
+ if (!lguest_address_ok(cpu->lg, addr, bytes)
+ || copy_from_user(b, cpu->lg->mem_base + addr, bytes) != 0) {
+ /* copy_from_user should do this, but as we rely on it... */
+ memset(b, 0, bytes);
+ kill_guest(cpu, "bad read address %#lx len %u", addr, bytes);
+ }
+}
+
+/* This is the write (copy into Guest) version. */
+void __lgwrite(struct lg_cpu *cpu, unsigned long addr, const void *b,
+ unsigned bytes)
+{
+ if (!lguest_address_ok(cpu->lg, addr, bytes)
+ || copy_to_user(cpu->lg->mem_base + addr, b, bytes) != 0)
+ kill_guest(cpu, "bad write address %#lx len %u", addr, bytes);
+}
+/*:*/
+
+/*H:030
+ * Let's jump straight to the the main loop which runs the Guest.
+ * Remember, this is called by the Launcher reading /dev/lguest, and we keep
+ * going around and around until something interesting happens.
+ */
+int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
+{
+ /* If the launcher asked for a register with LHREQ_GETREG */
+ if (cpu->reg_read) {
+ if (put_user(*cpu->reg_read, user))
+ return -EFAULT;
+ cpu->reg_read = NULL;
+ return sizeof(*cpu->reg_read);
+ }
+
+ /* We stop running once the Guest is dead. */
+ while (!cpu->lg->dead) {
+ unsigned int irq;
+ bool more;
+
+ /* First we run any hypercalls the Guest wants done. */
+ if (cpu->hcall)
+ do_hypercalls(cpu);
+
+ /* Do we have to tell the Launcher about a trap? */
+ if (cpu->pending.trap) {
+ if (copy_to_user(user, &cpu->pending,
+ sizeof(cpu->pending)))
+ return -EFAULT;
+ return sizeof(cpu->pending);
+ }
+
+ /*
+ * All long-lived kernel loops need to check with this horrible
+ * thing called the freezer. If the Host is trying to suspend,
+ * it stops us.
+ */
+ try_to_freeze();
+
+ /* Check for signals */
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+
+ /*
+ * Check if there are any interrupts which can be delivered now:
+ * if so, this sets up the hander to be executed when we next
+ * run the Guest.
+ */
+ irq = interrupt_pending(cpu, &more);
+ if (irq < LGUEST_IRQS)
+ try_deliver_interrupt(cpu, irq, more);
+
+ /*
+ * Just make absolutely sure the Guest is still alive. One of
+ * those hypercalls could have been fatal, for example.
+ */
+ if (cpu->lg->dead)
+ break;
+
+ /*
+ * If the Guest asked to be stopped, we sleep. The Guest's
+ * clock timer will wake us.
+ */
+ if (cpu->halted) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ /*
+ * Just before we sleep, make sure no interrupt snuck in
+ * which we should be doing.
+ */
+ if (interrupt_pending(cpu, &more) < LGUEST_IRQS)
+ set_current_state(TASK_RUNNING);
+ else
+ schedule();
+ continue;
+ }
+
+ /*
+ * OK, now we're ready to jump into the Guest. First we put up
+ * the "Do Not Disturb" sign:
+ */
+ local_irq_disable();
+
+ /* Actually run the Guest until something happens. */
+ lguest_arch_run_guest(cpu);
+
+ /* Now we're ready to be interrupted or moved to other CPUs */
+ local_irq_enable();
+
+ /* Now we deal with whatever happened to the Guest. */
+ lguest_arch_handle_trap(cpu);
+ }
+
+ /* Special case: Guest is 'dead' but wants a reboot. */
+ if (cpu->lg->dead == ERR_PTR(-ERESTART))
+ return -ERESTART;
+
+ /* The Guest is dead => "No such file or directory" */
+ return -ENOENT;
+}
+
+/*H:000
+ * Welcome to the Host!
+ *
+ * By this point your brain has been tickled by the Guest code and numbed by
+ * the Launcher code; prepare for it to be stretched by the Host code. This is
+ * the heart. Let's begin at the initialization routine for the Host's lg
+ * module.
+ */
+static int __init init(void)
+{
+ int err;
+
+ /* Lguest can't run under Xen, VMI or itself. It does Tricky Stuff. */
+ if (get_kernel_rpl() != 0) {
+ printk("lguest is afraid of being a guest\n");
+ return -EPERM;
+ }
+
+ /* First we put the Switcher up in very high virtual memory. */
+ err = map_switcher();
+ if (err)
+ goto out;
+
+ /* We might need to reserve an interrupt vector. */
+ err = init_interrupts();
+ if (err)
+ goto unmap;
+
+ /* /dev/lguest needs to be registered. */
+ err = lguest_device_init();
+ if (err)
+ goto free_interrupts;
+
+ /* Finally we do some architecture-specific setup. */
+ lguest_arch_host_init();
+
+ /* All good! */
+ return 0;
+
+free_interrupts:
+ free_interrupts();
+unmap:
+ unmap_switcher();
+out:
+ return err;
+}
+
+/* Cleaning up is just the same code, backwards. With a little French. */
+static void __exit fini(void)
+{
+ lguest_device_remove();
+ free_interrupts();
+ unmap_switcher();
+
+ lguest_arch_host_fini();
+}
+/*:*/
+
+/*
+ * The Host side of lguest can be a module. This is a nice way for people to
+ * play with it.
+ */
+module_init(init);
+module_exit(fini);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>");
diff --git a/kernel/drivers/lguest/hypercalls.c b/kernel/drivers/lguest/hypercalls.c
new file mode 100644
index 000000000..19a322807
--- /dev/null
+++ b/kernel/drivers/lguest/hypercalls.c
@@ -0,0 +1,308 @@
+/*P:500
+ * Just as userspace programs request kernel operations through a system
+ * call, the Guest requests Host operations through a "hypercall". You might
+ * notice this nomenclature doesn't really follow any logic, but the name has
+ * been around for long enough that we're stuck with it. As you'd expect, this
+ * code is basically a one big switch statement.
+:*/
+
+/* Copyright (C) 2006 Rusty Russell IBM Corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+#include <linux/uaccess.h>
+#include <linux/syscalls.h>
+#include <linux/mm.h>
+#include <linux/ktime.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include "lg.h"
+
+/*H:120
+ * This is the core hypercall routine: where the Guest gets what it wants.
+ * Or gets killed. Or, in the case of LHCALL_SHUTDOWN, both.
+ */
+static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
+{
+ switch (args->arg0) {
+ case LHCALL_FLUSH_ASYNC:
+ /*
+ * This call does nothing, except by breaking out of the Guest
+ * it makes us process all the asynchronous hypercalls.
+ */
+ break;
+ case LHCALL_SEND_INTERRUPTS:
+ /*
+ * This call does nothing too, but by breaking out of the Guest
+ * it makes us process any pending interrupts.
+ */
+ break;
+ case LHCALL_LGUEST_INIT:
+ /*
+ * You can't get here unless you're already initialized. Don't
+ * do that.
+ */
+ kill_guest(cpu, "already have lguest_data");
+ break;
+ case LHCALL_SHUTDOWN: {
+ char msg[128];
+ /*
+ * Shutdown is such a trivial hypercall that we do it in five
+ * lines right here.
+ *
+ * If the lgread fails, it will call kill_guest() itself; the
+ * kill_guest() with the message will be ignored.
+ */
+ __lgread(cpu, msg, args->arg1, sizeof(msg));
+ msg[sizeof(msg)-1] = '\0';
+ kill_guest(cpu, "CRASH: %s", msg);
+ if (args->arg2 == LGUEST_SHUTDOWN_RESTART)
+ cpu->lg->dead = ERR_PTR(-ERESTART);
+ break;
+ }
+ case LHCALL_FLUSH_TLB:
+ /* FLUSH_TLB comes in two flavors, depending on the argument: */
+ if (args->arg1)
+ guest_pagetable_clear_all(cpu);
+ else
+ guest_pagetable_flush_user(cpu);
+ break;
+
+ /*
+ * All these calls simply pass the arguments through to the right
+ * routines.
+ */
+ case LHCALL_NEW_PGTABLE:
+ guest_new_pagetable(cpu, args->arg1);
+ break;
+ case LHCALL_SET_STACK:
+ guest_set_stack(cpu, args->arg1, args->arg2, args->arg3);
+ break;
+ case LHCALL_SET_PTE:
+#ifdef CONFIG_X86_PAE
+ guest_set_pte(cpu, args->arg1, args->arg2,
+ __pte(args->arg3 | (u64)args->arg4 << 32));
+#else
+ guest_set_pte(cpu, args->arg1, args->arg2, __pte(args->arg3));
+#endif
+ break;
+ case LHCALL_SET_PGD:
+ guest_set_pgd(cpu->lg, args->arg1, args->arg2);
+ break;
+#ifdef CONFIG_X86_PAE
+ case LHCALL_SET_PMD:
+ guest_set_pmd(cpu->lg, args->arg1, args->arg2);
+ break;
+#endif
+ case LHCALL_SET_CLOCKEVENT:
+ guest_set_clockevent(cpu, args->arg1);
+ break;
+ case LHCALL_TS:
+ /* This sets the TS flag, as we saw used in run_guest(). */
+ cpu->ts = args->arg1;
+ break;
+ case LHCALL_HALT:
+ /* Similarly, this sets the halted flag for run_guest(). */
+ cpu->halted = 1;
+ break;
+ default:
+ /* It should be an architecture-specific hypercall. */
+ if (lguest_arch_do_hcall(cpu, args))
+ kill_guest(cpu, "Bad hypercall %li\n", args->arg0);
+ }
+}
+
+/*H:124
+ * Asynchronous hypercalls are easy: we just look in the array in the
+ * Guest's "struct lguest_data" to see if any new ones are marked "ready".
+ *
+ * We are careful to do these in order: obviously we respect the order the
+ * Guest put them in the ring, but we also promise the Guest that they will
+ * happen before any normal hypercall (which is why we check this before
+ * checking for a normal hcall).
+ */
+static void do_async_hcalls(struct lg_cpu *cpu)
+{
+ unsigned int i;
+ u8 st[LHCALL_RING_SIZE];
+
+ /* For simplicity, we copy the entire call status array in at once. */
+ if (copy_from_user(&st, &cpu->lg->lguest_data->hcall_status, sizeof(st)))
+ return;
+
+ /* We process "struct lguest_data"s hcalls[] ring once. */
+ for (i = 0; i < ARRAY_SIZE(st); i++) {
+ struct hcall_args args;
+ /*
+ * We remember where we were up to from last time. This makes
+ * sure that the hypercalls are done in the order the Guest
+ * places them in the ring.
+ */
+ unsigned int n = cpu->next_hcall;
+
+ /* 0xFF means there's no call here (yet). */
+ if (st[n] == 0xFF)
+ break;
+
+ /*
+ * OK, we have hypercall. Increment the "next_hcall" cursor,
+ * and wrap back to 0 if we reach the end.
+ */
+ if (++cpu->next_hcall == LHCALL_RING_SIZE)
+ cpu->next_hcall = 0;
+
+ /*
+ * Copy the hypercall arguments into a local copy of the
+ * hcall_args struct.
+ */
+ if (copy_from_user(&args, &cpu->lg->lguest_data->hcalls[n],
+ sizeof(struct hcall_args))) {
+ kill_guest(cpu, "Fetching async hypercalls");
+ break;
+ }
+
+ /* Do the hypercall, same as a normal one. */
+ do_hcall(cpu, &args);
+
+ /* Mark the hypercall done. */
+ if (put_user(0xFF, &cpu->lg->lguest_data->hcall_status[n])) {
+ kill_guest(cpu, "Writing result for async hypercall");
+ break;
+ }
+
+ /*
+ * Stop doing hypercalls if they want to notify the Launcher:
+ * it needs to service this first.
+ */
+ if (cpu->pending.trap)
+ break;
+ }
+}
+
+/*
+ * Last of all, we look at what happens first of all. The very first time the
+ * Guest makes a hypercall, we end up here to set things up:
+ */
+static void initialize(struct lg_cpu *cpu)
+{
+ /*
+ * You can't do anything until you're initialized. The Guest knows the
+ * rules, so we're unforgiving here.
+ */
+ if (cpu->hcall->arg0 != LHCALL_LGUEST_INIT) {
+ kill_guest(cpu, "hypercall %li before INIT", cpu->hcall->arg0);
+ return;
+ }
+
+ if (lguest_arch_init_hypercalls(cpu))
+ kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
+
+ /*
+ * The Guest tells us where we're not to deliver interrupts by putting
+ * the instruction address into "struct lguest_data".
+ */
+ if (get_user(cpu->lg->noirq_iret, &cpu->lg->lguest_data->noirq_iret))
+ kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
+
+ /*
+ * We write the current time into the Guest's data page once so it can
+ * set its clock.
+ */
+ write_timestamp(cpu);
+
+ /* page_tables.c will also do some setup. */
+ page_table_guest_data_init(cpu);
+
+ /*
+ * This is the one case where the above accesses might have been the
+ * first write to a Guest page. This may have caused a copy-on-write
+ * fault, but the old page might be (read-only) in the Guest
+ * pagetable.
+ */
+ guest_pagetable_clear_all(cpu);
+}
+/*:*/
+
+/*M:013
+ * If a Guest reads from a page (so creates a mapping) that it has never
+ * written to, and then the Launcher writes to it (ie. the output of a virtual
+ * device), the Guest will still see the old page. In practice, this never
+ * happens: why would the Guest read a page which it has never written to? But
+ * a similar scenario might one day bite us, so it's worth mentioning.
+ *
+ * Note that if we used a shared anonymous mapping in the Launcher instead of
+ * mapping /dev/zero private, we wouldn't worry about cop-on-write. And we
+ * need that to switch the Launcher to processes (away from threads) anyway.
+:*/
+
+/*H:100
+ * Hypercalls
+ *
+ * Remember from the Guest, hypercalls come in two flavors: normal and
+ * asynchronous. This file handles both of types.
+ */
+void do_hypercalls(struct lg_cpu *cpu)
+{
+ /* Not initialized yet? This hypercall must do it. */
+ if (unlikely(!cpu->lg->lguest_data)) {
+ /* Set up the "struct lguest_data" */
+ initialize(cpu);
+ /* Hcall is done. */
+ cpu->hcall = NULL;
+ return;
+ }
+
+ /*
+ * The Guest has initialized.
+ *
+ * Look in the hypercall ring for the async hypercalls:
+ */
+ do_async_hcalls(cpu);
+
+ /*
+ * If we stopped reading the hypercall ring because the Guest did a
+ * NOTIFY to the Launcher, we want to return now. Otherwise we do
+ * the hypercall.
+ */
+ if (!cpu->pending.trap) {
+ do_hcall(cpu, cpu->hcall);
+ /*
+ * Tricky point: we reset the hcall pointer to mark the
+ * hypercall as "done". We use the hcall pointer rather than
+ * the trap number to indicate a hypercall is pending.
+ * Normally it doesn't matter: the Guest will run again and
+ * update the trap number before we come back here.
+ *
+ * However, if we are signalled or the Guest sends I/O to the
+ * Launcher, the run_guest() loop will exit without running the
+ * Guest. When it comes back it would try to re-run the
+ * hypercall. Finding that bug sucked.
+ */
+ cpu->hcall = NULL;
+ }
+}
+
+/*
+ * This routine supplies the Guest with time: it's used for wallclock time at
+ * initial boot and as a rough time source if the TSC isn't available.
+ */
+void write_timestamp(struct lg_cpu *cpu)
+{
+ struct timespec now;
+ ktime_get_real_ts(&now);
+ if (copy_to_user(&cpu->lg->lguest_data->time,
+ &now, sizeof(struct timespec)))
+ kill_guest(cpu, "Writing timestamp");
+}
diff --git a/kernel/drivers/lguest/interrupts_and_traps.c b/kernel/drivers/lguest/interrupts_and_traps.c
new file mode 100644
index 000000000..5e7559be2
--- /dev/null
+++ b/kernel/drivers/lguest/interrupts_and_traps.c
@@ -0,0 +1,702 @@
+/*P:800
+ * Interrupts (traps) are complicated enough to earn their own file.
+ * There are three classes of interrupts:
+ *
+ * 1) Real hardware interrupts which occur while we're running the Guest,
+ * 2) Interrupts for virtual devices attached to the Guest, and
+ * 3) Traps and faults from the Guest.
+ *
+ * Real hardware interrupts must be delivered to the Host, not the Guest.
+ * Virtual interrupts must be delivered to the Guest, but we make them look
+ * just like real hardware would deliver them. Traps from the Guest can be set
+ * up to go directly back into the Guest, but sometimes the Host wants to see
+ * them first, so we also have a way of "reflecting" them into the Guest as if
+ * they had been delivered to it directly.
+:*/
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include "lg.h"
+
+/* Allow Guests to use a non-128 (ie. non-Linux) syscall trap. */
+static unsigned int syscall_vector = SYSCALL_VECTOR;
+module_param(syscall_vector, uint, 0444);
+
+/* The address of the interrupt handler is split into two bits: */
+static unsigned long idt_address(u32 lo, u32 hi)
+{
+ return (lo & 0x0000FFFF) | (hi & 0xFFFF0000);
+}
+
+/*
+ * The "type" of the interrupt handler is a 4 bit field: we only support a
+ * couple of types.
+ */
+static int idt_type(u32 lo, u32 hi)
+{
+ return (hi >> 8) & 0xF;
+}
+
+/* An IDT entry can't be used unless the "present" bit is set. */
+static bool idt_present(u32 lo, u32 hi)
+{
+ return (hi & 0x8000);
+}
+
+/*
+ * We need a helper to "push" a value onto the Guest's stack, since that's a
+ * big part of what delivering an interrupt does.
+ */
+static void push_guest_stack(struct lg_cpu *cpu, unsigned long *gstack, u32 val)
+{
+ /* Stack grows upwards: move stack then write value. */
+ *gstack -= 4;
+ lgwrite(cpu, *gstack, u32, val);
+}
+
+/*H:210
+ * The push_guest_interrupt_stack() routine saves Guest state on the stack for
+ * an interrupt or trap. The mechanics of delivering traps and interrupts to
+ * the Guest are the same, except some traps have an "error code" which gets
+ * pushed onto the stack as well: the caller tells us if this is one.
+ *
+ * We set up the stack just like the CPU does for a real interrupt, so it's
+ * identical for the Guest (and the standard "iret" instruction will undo
+ * it).
+ */
+static void push_guest_interrupt_stack(struct lg_cpu *cpu, bool has_err)
+{
+ unsigned long gstack, origstack;
+ u32 eflags, ss, irq_enable;
+ unsigned long virtstack;
+
+ /*
+ * There are two cases for interrupts: one where the Guest is already
+ * in the kernel, and a more complex one where the Guest is in
+ * userspace. We check the privilege level to find out.
+ */
+ if ((cpu->regs->ss&0x3) != GUEST_PL) {
+ /*
+ * The Guest told us their kernel stack with the SET_STACK
+ * hypercall: both the virtual address and the segment.
+ */
+ virtstack = cpu->esp1;
+ ss = cpu->ss1;
+
+ origstack = gstack = guest_pa(cpu, virtstack);
+ /*
+ * We push the old stack segment and pointer onto the new
+ * stack: when the Guest does an "iret" back from the interrupt
+ * handler the CPU will notice they're dropping privilege
+ * levels and expect these here.
+ */
+ push_guest_stack(cpu, &gstack, cpu->regs->ss);
+ push_guest_stack(cpu, &gstack, cpu->regs->esp);
+ } else {
+ /* We're staying on the same Guest (kernel) stack. */
+ virtstack = cpu->regs->esp;
+ ss = cpu->regs->ss;
+
+ origstack = gstack = guest_pa(cpu, virtstack);
+ }
+
+ /*
+ * Remember that we never let the Guest actually disable interrupts, so
+ * the "Interrupt Flag" bit is always set. We copy that bit from the
+ * Guest's "irq_enabled" field into the eflags word: we saw the Guest
+ * copy it back in "lguest_iret".
+ */
+ eflags = cpu->regs->eflags;
+ if (get_user(irq_enable, &cpu->lg->lguest_data->irq_enabled) == 0
+ && !(irq_enable & X86_EFLAGS_IF))
+ eflags &= ~X86_EFLAGS_IF;
+
+ /*
+ * An interrupt is expected to push three things on the stack: the old
+ * "eflags" word, the old code segment, and the old instruction
+ * pointer.
+ */
+ push_guest_stack(cpu, &gstack, eflags);
+ push_guest_stack(cpu, &gstack, cpu->regs->cs);
+ push_guest_stack(cpu, &gstack, cpu->regs->eip);
+
+ /* For the six traps which supply an error code, we push that, too. */
+ if (has_err)
+ push_guest_stack(cpu, &gstack, cpu->regs->errcode);
+
+ /* Adjust the stack pointer and stack segment. */
+ cpu->regs->ss = ss;
+ cpu->regs->esp = virtstack + (gstack - origstack);
+}
+
+/*
+ * This actually makes the Guest start executing the given interrupt/trap
+ * handler.
+ *
+ * "lo" and "hi" are the two parts of the Interrupt Descriptor Table for this
+ * interrupt or trap. It's split into two parts for traditional reasons: gcc
+ * on i386 used to be frightened by 64 bit numbers.
+ */
+static void guest_run_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi)
+{
+ /* If we're already in the kernel, we don't change stacks. */
+ if ((cpu->regs->ss&0x3) != GUEST_PL)
+ cpu->regs->ss = cpu->esp1;
+
+ /*
+ * Set the code segment and the address to execute.
+ */
+ cpu->regs->cs = (__KERNEL_CS|GUEST_PL);
+ cpu->regs->eip = idt_address(lo, hi);
+
+ /*
+ * Trapping always clears these flags:
+ * TF: Trap flag
+ * VM: Virtual 8086 mode
+ * RF: Resume
+ * NT: Nested task.
+ */
+ cpu->regs->eflags &=
+ ~(X86_EFLAGS_TF|X86_EFLAGS_VM|X86_EFLAGS_RF|X86_EFLAGS_NT);
+
+ /*
+ * There are two kinds of interrupt handlers: 0xE is an "interrupt
+ * gate" which expects interrupts to be disabled on entry.
+ */
+ if (idt_type(lo, hi) == 0xE)
+ if (put_user(0, &cpu->lg->lguest_data->irq_enabled))
+ kill_guest(cpu, "Disabling interrupts");
+}
+
+/* This restores the eflags word which was pushed on the stack by a trap */
+static void restore_eflags(struct lg_cpu *cpu)
+{
+ /* This is the physical address of the stack. */
+ unsigned long stack_pa = guest_pa(cpu, cpu->regs->esp);
+
+ /*
+ * Stack looks like this:
+ * Address Contents
+ * esp EIP
+ * esp + 4 CS
+ * esp + 8 EFLAGS
+ */
+ cpu->regs->eflags = lgread(cpu, stack_pa + 8, u32);
+ cpu->regs->eflags &=
+ ~(X86_EFLAGS_TF|X86_EFLAGS_VM|X86_EFLAGS_RF|X86_EFLAGS_NT);
+}
+
+/*H:205
+ * Virtual Interrupts.
+ *
+ * interrupt_pending() returns the first pending interrupt which isn't blocked
+ * by the Guest. It is called before every entry to the Guest, and just before
+ * we go to sleep when the Guest has halted itself.
+ */
+unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more)
+{
+ unsigned int irq;
+ DECLARE_BITMAP(blk, LGUEST_IRQS);
+
+ /* If the Guest hasn't even initialized yet, we can do nothing. */
+ if (!cpu->lg->lguest_data)
+ return LGUEST_IRQS;
+
+ /*
+ * Take our "irqs_pending" array and remove any interrupts the Guest
+ * wants blocked: the result ends up in "blk".
+ */
+ if (copy_from_user(&blk, cpu->lg->lguest_data->blocked_interrupts,
+ sizeof(blk)))
+ return LGUEST_IRQS;
+ bitmap_andnot(blk, cpu->irqs_pending, blk, LGUEST_IRQS);
+
+ /* Find the first interrupt. */
+ irq = find_first_bit(blk, LGUEST_IRQS);
+ *more = find_next_bit(blk, LGUEST_IRQS, irq+1);
+
+ return irq;
+}
+
+/*
+ * This actually diverts the Guest to running an interrupt handler, once an
+ * interrupt has been identified by interrupt_pending().
+ */
+void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more)
+{
+ struct desc_struct *idt;
+
+ BUG_ON(irq >= LGUEST_IRQS);
+
+ /* If they're halted, interrupts restart them. */
+ if (cpu->halted) {
+ /* Re-enable interrupts. */
+ if (put_user(X86_EFLAGS_IF, &cpu->lg->lguest_data->irq_enabled))
+ kill_guest(cpu, "Re-enabling interrupts");
+ cpu->halted = 0;
+ } else {
+ /* Otherwise we check if they have interrupts disabled. */
+ u32 irq_enabled;
+ if (get_user(irq_enabled, &cpu->lg->lguest_data->irq_enabled))
+ irq_enabled = 0;
+ if (!irq_enabled) {
+ /* Make sure they know an IRQ is pending. */
+ put_user(X86_EFLAGS_IF,
+ &cpu->lg->lguest_data->irq_pending);
+ return;
+ }
+ }
+
+ /*
+ * Look at the IDT entry the Guest gave us for this interrupt. The
+ * first 32 (FIRST_EXTERNAL_VECTOR) entries are for traps, so we skip
+ * over them.
+ */
+ idt = &cpu->arch.idt[FIRST_EXTERNAL_VECTOR+irq];
+ /* If they don't have a handler (yet?), we just ignore it */
+ if (idt_present(idt->a, idt->b)) {
+ /* OK, mark it no longer pending and deliver it. */
+ clear_bit(irq, cpu->irqs_pending);
+
+ /*
+ * They may be about to iret, where they asked us never to
+ * deliver interrupts. In this case, we can emulate that iret
+ * then immediately deliver the interrupt. This is basically
+ * a noop: the iret would pop the interrupt frame and restore
+ * eflags, and then we'd set it up again. So just restore the
+ * eflags word and jump straight to the handler in this case.
+ *
+ * Denys Vlasenko points out that this isn't quite right: if
+ * the iret was returning to userspace, then that interrupt
+ * would reset the stack pointer (which the Guest told us
+ * about via LHCALL_SET_STACK). But unless the Guest is being
+ * *really* weird, that will be the same as the current stack
+ * anyway.
+ */
+ if (cpu->regs->eip == cpu->lg->noirq_iret) {
+ restore_eflags(cpu);
+ } else {
+ /*
+ * set_guest_interrupt() takes a flag to say whether
+ * this interrupt pushes an error code onto the stack
+ * as well: virtual interrupts never do.
+ */
+ push_guest_interrupt_stack(cpu, false);
+ }
+ /* Actually make Guest cpu jump to handler. */
+ guest_run_interrupt(cpu, idt->a, idt->b);
+ }
+
+ /*
+ * Every time we deliver an interrupt, we update the timestamp in the
+ * Guest's lguest_data struct. It would be better for the Guest if we
+ * did this more often, but it can actually be quite slow: doing it
+ * here is a compromise which means at least it gets updated every
+ * timer interrupt.
+ */
+ write_timestamp(cpu);
+
+ /*
+ * If there are no other interrupts we want to deliver, clear
+ * the pending flag.
+ */
+ if (!more)
+ put_user(0, &cpu->lg->lguest_data->irq_pending);
+}
+
+/* And this is the routine when we want to set an interrupt for the Guest. */
+void set_interrupt(struct lg_cpu *cpu, unsigned int irq)
+{
+ /*
+ * Next time the Guest runs, the core code will see if it can deliver
+ * this interrupt.
+ */
+ set_bit(irq, cpu->irqs_pending);
+
+ /*
+ * Make sure it sees it; it might be asleep (eg. halted), or running
+ * the Guest right now, in which case kick_process() will knock it out.
+ */
+ if (!wake_up_process(cpu->tsk))
+ kick_process(cpu->tsk);
+}
+/*:*/
+
+/*
+ * Linux uses trap 128 for system calls. Plan9 uses 64, and Ron Minnich sent
+ * me a patch, so we support that too. It'd be a big step for lguest if half
+ * the Plan 9 user base were to start using it.
+ *
+ * Actually now I think of it, it's possible that Ron *is* half the Plan 9
+ * userbase. Oh well.
+ */
+static bool could_be_syscall(unsigned int num)
+{
+ /* Normal Linux SYSCALL_VECTOR or reserved vector? */
+ return num == SYSCALL_VECTOR || num == syscall_vector;
+}
+
+/* The syscall vector it wants must be unused by Host. */
+bool check_syscall_vector(struct lguest *lg)
+{
+ u32 vector;
+
+ if (get_user(vector, &lg->lguest_data->syscall_vec))
+ return false;
+
+ return could_be_syscall(vector);
+}
+
+int init_interrupts(void)
+{
+ /* If they want some strange system call vector, reserve it now */
+ if (syscall_vector != SYSCALL_VECTOR) {
+ if (test_bit(syscall_vector, used_vectors) ||
+ vector_used_by_percpu_irq(syscall_vector)) {
+ printk(KERN_ERR "lg: couldn't reserve syscall %u\n",
+ syscall_vector);
+ return -EBUSY;
+ }
+ set_bit(syscall_vector, used_vectors);
+ }
+
+ return 0;
+}
+
+void free_interrupts(void)
+{
+ if (syscall_vector != SYSCALL_VECTOR)
+ clear_bit(syscall_vector, used_vectors);
+}
+
+/*H:220
+ * Now we've got the routines to deliver interrupts, delivering traps like
+ * page fault is easy. The only trick is that Intel decided that some traps
+ * should have error codes:
+ */
+static bool has_err(unsigned int trap)
+{
+ return (trap == 8 || (trap >= 10 && trap <= 14) || trap == 17);
+}
+
+/* deliver_trap() returns true if it could deliver the trap. */
+bool deliver_trap(struct lg_cpu *cpu, unsigned int num)
+{
+ /*
+ * Trap numbers are always 8 bit, but we set an impossible trap number
+ * for traps inside the Switcher, so check that here.
+ */
+ if (num >= ARRAY_SIZE(cpu->arch.idt))
+ return false;
+
+ /*
+ * Early on the Guest hasn't set the IDT entries (or maybe it put a
+ * bogus one in): if we fail here, the Guest will be killed.
+ */
+ if (!idt_present(cpu->arch.idt[num].a, cpu->arch.idt[num].b))
+ return false;
+ push_guest_interrupt_stack(cpu, has_err(num));
+ guest_run_interrupt(cpu, cpu->arch.idt[num].a,
+ cpu->arch.idt[num].b);
+ return true;
+}
+
+/*H:250
+ * Here's the hard part: returning to the Host every time a trap happens
+ * and then calling deliver_trap() and re-entering the Guest is slow.
+ * Particularly because Guest userspace system calls are traps (usually trap
+ * 128).
+ *
+ * So we'd like to set up the IDT to tell the CPU to deliver traps directly
+ * into the Guest. This is possible, but the complexities cause the size of
+ * this file to double! However, 150 lines of code is worth writing for taking
+ * system calls down from 1750ns to 270ns. Plus, if lguest didn't do it, all
+ * the other hypervisors would beat it up at lunchtime.
+ *
+ * This routine indicates if a particular trap number could be delivered
+ * directly.
+ */
+static bool direct_trap(unsigned int num)
+{
+ /*
+ * Hardware interrupts don't go to the Guest at all (except system
+ * call).
+ */
+ if (num >= FIRST_EXTERNAL_VECTOR && !could_be_syscall(num))
+ return false;
+
+ /*
+ * The Host needs to see page faults (for shadow paging and to save the
+ * fault address), general protection faults (in/out emulation) and
+ * device not available (TS handling) and of course, the hypercall trap.
+ */
+ return num != 14 && num != 13 && num != 7 && num != LGUEST_TRAP_ENTRY;
+}
+/*:*/
+
+/*M:005
+ * The Guest has the ability to turn its interrupt gates into trap gates,
+ * if it is careful. The Host will let trap gates can go directly to the
+ * Guest, but the Guest needs the interrupts atomically disabled for an
+ * interrupt gate. The Host could provide a mechanism to register more
+ * "no-interrupt" regions, and the Guest could point the trap gate at
+ * instructions within that region, where it can safely disable interrupts.
+ */
+
+/*M:006
+ * The Guests do not use the sysenter (fast system call) instruction,
+ * because it's hardcoded to enter privilege level 0 and so can't go direct.
+ * It's about twice as fast as the older "int 0x80" system call, so it might
+ * still be worthwhile to handle it in the Switcher and lcall down to the
+ * Guest. The sysenter semantics are hairy tho: search for that keyword in
+ * entry.S
+:*/
+
+/*H:260
+ * When we make traps go directly into the Guest, we need to make sure
+ * the kernel stack is valid (ie. mapped in the page tables). Otherwise, the
+ * CPU trying to deliver the trap will fault while trying to push the interrupt
+ * words on the stack: this is called a double fault, and it forces us to kill
+ * the Guest.
+ *
+ * Which is deeply unfair, because (literally!) it wasn't the Guests' fault.
+ */
+void pin_stack_pages(struct lg_cpu *cpu)
+{
+ unsigned int i;
+
+ /*
+ * Depending on the CONFIG_4KSTACKS option, the Guest can have one or
+ * two pages of stack space.
+ */
+ for (i = 0; i < cpu->lg->stack_pages; i++)
+ /*
+ * The stack grows *upwards*, so the address we're given is the
+ * start of the page after the kernel stack. Subtract one to
+ * get back onto the first stack page, and keep subtracting to
+ * get to the rest of the stack pages.
+ */
+ pin_page(cpu, cpu->esp1 - 1 - i * PAGE_SIZE);
+}
+
+/*
+ * Direct traps also mean that we need to know whenever the Guest wants to use
+ * a different kernel stack, so we can change the guest TSS to use that
+ * stack. The TSS entries expect a virtual address, so unlike most addresses
+ * the Guest gives us, the "esp" (stack pointer) value here is virtual, not
+ * physical.
+ *
+ * In Linux each process has its own kernel stack, so this happens a lot: we
+ * change stacks on each context switch.
+ */
+void guest_set_stack(struct lg_cpu *cpu, u32 seg, u32 esp, unsigned int pages)
+{
+ /*
+ * You're not allowed a stack segment with privilege level 0: bad Guest!
+ */
+ if ((seg & 0x3) != GUEST_PL)
+ kill_guest(cpu, "bad stack segment %i", seg);
+ /* We only expect one or two stack pages. */
+ if (pages > 2)
+ kill_guest(cpu, "bad stack pages %u", pages);
+ /* Save where the stack is, and how many pages */
+ cpu->ss1 = seg;
+ cpu->esp1 = esp;
+ cpu->lg->stack_pages = pages;
+ /* Make sure the new stack pages are mapped */
+ pin_stack_pages(cpu);
+}
+
+/*
+ * All this reference to mapping stacks leads us neatly into the other complex
+ * part of the Host: page table handling.
+ */
+
+/*H:235
+ * This is the routine which actually checks the Guest's IDT entry and
+ * transfers it into the entry in "struct lguest":
+ */
+static void set_trap(struct lg_cpu *cpu, struct desc_struct *trap,
+ unsigned int num, u32 lo, u32 hi)
+{
+ u8 type = idt_type(lo, hi);
+
+ /* We zero-out a not-present entry */
+ if (!idt_present(lo, hi)) {
+ trap->a = trap->b = 0;
+ return;
+ }
+
+ /* We only support interrupt and trap gates. */
+ if (type != 0xE && type != 0xF)
+ kill_guest(cpu, "bad IDT type %i", type);
+
+ /*
+ * We only copy the handler address, present bit, privilege level and
+ * type. The privilege level controls where the trap can be triggered
+ * manually with an "int" instruction. This is usually GUEST_PL,
+ * except for system calls which userspace can use.
+ */
+ trap->a = ((__KERNEL_CS|GUEST_PL)<<16) | (lo&0x0000FFFF);
+ trap->b = (hi&0xFFFFEF00);
+}
+
+/*H:230
+ * While we're here, dealing with delivering traps and interrupts to the
+ * Guest, we might as well complete the picture: how the Guest tells us where
+ * it wants them to go. This would be simple, except making traps fast
+ * requires some tricks.
+ *
+ * We saw the Guest setting Interrupt Descriptor Table (IDT) entries with the
+ * LHCALL_LOAD_IDT_ENTRY hypercall before: that comes here.
+ */
+void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int num, u32 lo, u32 hi)
+{
+ /*
+ * Guest never handles: NMI, doublefault, spurious interrupt or
+ * hypercall. We ignore when it tries to set them.
+ */
+ if (num == 2 || num == 8 || num == 15 || num == LGUEST_TRAP_ENTRY)
+ return;
+
+ /*
+ * Mark the IDT as changed: next time the Guest runs we'll know we have
+ * to copy this again.
+ */
+ cpu->changed |= CHANGED_IDT;
+
+ /* Check that the Guest doesn't try to step outside the bounds. */
+ if (num >= ARRAY_SIZE(cpu->arch.idt))
+ kill_guest(cpu, "Setting idt entry %u", num);
+ else
+ set_trap(cpu, &cpu->arch.idt[num], num, lo, hi);
+}
+
+/*
+ * The default entry for each interrupt points into the Switcher routines which
+ * simply return to the Host. The run_guest() loop will then call
+ * deliver_trap() to bounce it back into the Guest.
+ */
+static void default_idt_entry(struct desc_struct *idt,
+ int trap,
+ const unsigned long handler,
+ const struct desc_struct *base)
+{
+ /* A present interrupt gate. */
+ u32 flags = 0x8e00;
+
+ /*
+ * Set the privilege level on the entry for the hypercall: this allows
+ * the Guest to use the "int" instruction to trigger it.
+ */
+ if (trap == LGUEST_TRAP_ENTRY)
+ flags |= (GUEST_PL << 13);
+ else if (base)
+ /*
+ * Copy privilege level from what Guest asked for. This allows
+ * debug (int 3) traps from Guest userspace, for example.
+ */
+ flags |= (base->b & 0x6000);
+
+ /* Now pack it into the IDT entry in its weird format. */
+ idt->a = (LGUEST_CS<<16) | (handler&0x0000FFFF);
+ idt->b = (handler&0xFFFF0000) | flags;
+}
+
+/* When the Guest first starts, we put default entries into the IDT. */
+void setup_default_idt_entries(struct lguest_ro_state *state,
+ const unsigned long *def)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(state->guest_idt); i++)
+ default_idt_entry(&state->guest_idt[i], i, def[i], NULL);
+}
+
+/*H:240
+ * We don't use the IDT entries in the "struct lguest" directly, instead
+ * we copy them into the IDT which we've set up for Guests on this CPU, just
+ * before we run the Guest. This routine does that copy.
+ */
+void copy_traps(const struct lg_cpu *cpu, struct desc_struct *idt,
+ const unsigned long *def)
+{
+ unsigned int i;
+
+ /*
+ * We can simply copy the direct traps, otherwise we use the default
+ * ones in the Switcher: they will return to the Host.
+ */
+ for (i = 0; i < ARRAY_SIZE(cpu->arch.idt); i++) {
+ const struct desc_struct *gidt = &cpu->arch.idt[i];
+
+ /* If no Guest can ever override this trap, leave it alone. */
+ if (!direct_trap(i))
+ continue;
+
+ /*
+ * Only trap gates (type 15) can go direct to the Guest.
+ * Interrupt gates (type 14) disable interrupts as they are
+ * entered, which we never let the Guest do. Not present
+ * entries (type 0x0) also can't go direct, of course.
+ *
+ * If it can't go direct, we still need to copy the priv. level:
+ * they might want to give userspace access to a software
+ * interrupt.
+ */
+ if (idt_type(gidt->a, gidt->b) == 0xF)
+ idt[i] = *gidt;
+ else
+ default_idt_entry(&idt[i], i, def[i], gidt);
+ }
+}
+
+/*H:200
+ * The Guest Clock.
+ *
+ * There are two sources of virtual interrupts. We saw one in lguest_user.c:
+ * the Launcher sending interrupts for virtual devices. The other is the Guest
+ * timer interrupt.
+ *
+ * The Guest uses the LHCALL_SET_CLOCKEVENT hypercall to tell us how long to
+ * the next timer interrupt (in nanoseconds). We use the high-resolution timer
+ * infrastructure to set a callback at that time.
+ *
+ * 0 means "turn off the clock".
+ */
+void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta)
+{
+ ktime_t expires;
+
+ if (unlikely(delta == 0)) {
+ /* Clock event device is shutting down. */
+ hrtimer_cancel(&cpu->hrt);
+ return;
+ }
+
+ /*
+ * We use wallclock time here, so the Guest might not be running for
+ * all the time between now and the timer interrupt it asked for. This
+ * is almost always the right thing to do.
+ */
+ expires = ktime_add_ns(ktime_get_real(), delta);
+ hrtimer_start(&cpu->hrt, expires, HRTIMER_MODE_ABS);
+}
+
+/* This is the function called when the Guest's timer expires. */
+static enum hrtimer_restart clockdev_fn(struct hrtimer *timer)
+{
+ struct lg_cpu *cpu = container_of(timer, struct lg_cpu, hrt);
+
+ /* Remember the first interrupt is the timer interrupt. */
+ set_interrupt(cpu, 0);
+ return HRTIMER_NORESTART;
+}
+
+/* This sets up the timer for this Guest. */
+void init_clockdev(struct lg_cpu *cpu)
+{
+ hrtimer_init(&cpu->hrt, CLOCK_REALTIME, HRTIMER_MODE_ABS);
+ cpu->hrt.function = clockdev_fn;
+}
diff --git a/kernel/drivers/lguest/lg.h b/kernel/drivers/lguest/lg.h
new file mode 100644
index 000000000..ac8ad0461
--- /dev/null
+++ b/kernel/drivers/lguest/lg.h
@@ -0,0 +1,258 @@
+#ifndef _LGUEST_H
+#define _LGUEST_H
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/stringify.h>
+#include <linux/lguest.h>
+#include <linux/lguest_launcher.h>
+#include <linux/wait.h>
+#include <linux/hrtimer.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+#include <asm/lguest.h>
+
+struct pgdir {
+ unsigned long gpgdir;
+ bool switcher_mapped;
+ int last_host_cpu;
+ pgd_t *pgdir;
+};
+
+/* We have two pages shared with guests, per cpu. */
+struct lguest_pages {
+ /* This is the stack page mapped rw in guest */
+ char spare[PAGE_SIZE - sizeof(struct lguest_regs)];
+ struct lguest_regs regs;
+
+ /* This is the host state & guest descriptor page, ro in guest */
+ struct lguest_ro_state state;
+} __attribute__((aligned(PAGE_SIZE)));
+
+#define CHANGED_IDT 1
+#define CHANGED_GDT 2
+#define CHANGED_GDT_TLS 4 /* Actually a subset of CHANGED_GDT */
+#define CHANGED_ALL 3
+
+struct lg_cpu {
+ unsigned int id;
+ struct lguest *lg;
+ struct task_struct *tsk;
+ struct mm_struct *mm; /* == tsk->mm, but that becomes NULL on exit */
+
+ u32 cr2;
+ int ts;
+ u32 esp1;
+ u16 ss1;
+
+ /* Bitmap of what has changed: see CHANGED_* above. */
+ int changed;
+
+ /* Pending operation. */
+ struct lguest_pending pending;
+
+ unsigned long *reg_read; /* register from LHREQ_GETREG */
+
+ /* At end of a page shared mapped over lguest_pages in guest. */
+ unsigned long regs_page;
+ struct lguest_regs *regs;
+
+ struct lguest_pages *last_pages;
+
+ /* Initialization mode: linear map everything. */
+ bool linear_pages;
+ int cpu_pgd; /* Which pgd this cpu is currently using */
+
+ /* If a hypercall was asked for, this points to the arguments. */
+ struct hcall_args *hcall;
+ u32 next_hcall;
+
+ /* Virtual clock device */
+ struct hrtimer hrt;
+
+ /* Did the Guest tell us to halt? */
+ int halted;
+
+ /* Pending virtual interrupts */
+ DECLARE_BITMAP(irqs_pending, LGUEST_IRQS);
+
+ struct lg_cpu_arch arch;
+};
+
+/* The private info the thread maintains about the guest. */
+struct lguest {
+ struct lguest_data __user *lguest_data;
+ struct lg_cpu cpus[NR_CPUS];
+ unsigned int nr_cpus;
+
+ /* Valid guest memory pages must be < this. */
+ u32 pfn_limit;
+
+ /* Device memory is >= pfn_limit and < device_limit. */
+ u32 device_limit;
+
+ /*
+ * This provides the offset to the base of guest-physical memory in the
+ * Launcher.
+ */
+ void __user *mem_base;
+ unsigned long kernel_address;
+
+ struct pgdir pgdirs[4];
+
+ unsigned long noirq_iret;
+
+ unsigned int stack_pages;
+ u32 tsc_khz;
+
+ /* Dead? */
+ const char *dead;
+};
+
+extern struct mutex lguest_lock;
+
+/* core.c: */
+bool lguest_address_ok(const struct lguest *lg,
+ unsigned long addr, unsigned long len);
+void __lgread(struct lg_cpu *, void *, unsigned long, unsigned);
+void __lgwrite(struct lg_cpu *, unsigned long, const void *, unsigned);
+extern struct page **lg_switcher_pages;
+
+/*H:035
+ * Using memory-copy operations like that is usually inconvient, so we
+ * have the following helper macros which read and write a specific type (often
+ * an unsigned long).
+ *
+ * This reads into a variable of the given type then returns that.
+ */
+#define lgread(cpu, addr, type) \
+ ({ type _v; __lgread((cpu), &_v, (addr), sizeof(_v)); _v; })
+
+/* This checks that the variable is of the given type, then writes it out. */
+#define lgwrite(cpu, addr, type, val) \
+ do { \
+ typecheck(type, val); \
+ __lgwrite((cpu), (addr), &(val), sizeof(val)); \
+ } while(0)
+/* (end of memory access helper routines) :*/
+
+int run_guest(struct lg_cpu *cpu, unsigned long __user *user);
+
+/*
+ * Helper macros to obtain the first 12 or the last 20 bits, this is only the
+ * first step in the migration to the kernel types. pte_pfn is already defined
+ * in the kernel.
+ */
+#define pgd_flags(x) (pgd_val(x) & ~PAGE_MASK)
+#define pgd_pfn(x) (pgd_val(x) >> PAGE_SHIFT)
+#define pmd_flags(x) (pmd_val(x) & ~PAGE_MASK)
+#define pmd_pfn(x) (pmd_val(x) >> PAGE_SHIFT)
+
+/* interrupts_and_traps.c: */
+unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more);
+void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more);
+void set_interrupt(struct lg_cpu *cpu, unsigned int irq);
+bool deliver_trap(struct lg_cpu *cpu, unsigned int num);
+void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int i,
+ u32 low, u32 hi);
+void guest_set_stack(struct lg_cpu *cpu, u32 seg, u32 esp, unsigned int pages);
+void pin_stack_pages(struct lg_cpu *cpu);
+void setup_default_idt_entries(struct lguest_ro_state *state,
+ const unsigned long *def);
+void copy_traps(const struct lg_cpu *cpu, struct desc_struct *idt,
+ const unsigned long *def);
+void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta);
+bool send_notify_to_eventfd(struct lg_cpu *cpu);
+void init_clockdev(struct lg_cpu *cpu);
+bool check_syscall_vector(struct lguest *lg);
+int init_interrupts(void);
+void free_interrupts(void);
+
+/* segments.c: */
+void setup_default_gdt_entries(struct lguest_ro_state *state);
+void setup_guest_gdt(struct lg_cpu *cpu);
+void load_guest_gdt_entry(struct lg_cpu *cpu, unsigned int i,
+ u32 low, u32 hi);
+void guest_load_tls(struct lg_cpu *cpu, unsigned long tls_array);
+void copy_gdt(const struct lg_cpu *cpu, struct desc_struct *gdt);
+void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt);
+
+/* page_tables.c: */
+int init_guest_pagetable(struct lguest *lg);
+void free_guest_pagetable(struct lguest *lg);
+void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable);
+void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 i);
+#ifdef CONFIG_X86_PAE
+void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 i);
+#endif
+void guest_pagetable_clear_all(struct lg_cpu *cpu);
+void guest_pagetable_flush_user(struct lg_cpu *cpu);
+void guest_set_pte(struct lg_cpu *cpu, unsigned long gpgdir,
+ unsigned long vaddr, pte_t val);
+void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages);
+bool demand_page(struct lg_cpu *cpu, unsigned long cr2, int errcode,
+ unsigned long *iomem);
+void pin_page(struct lg_cpu *cpu, unsigned long vaddr);
+bool __guest_pa(struct lg_cpu *cpu, unsigned long vaddr, unsigned long *paddr);
+unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr);
+void page_table_guest_data_init(struct lg_cpu *cpu);
+
+/* <arch>/core.c: */
+void lguest_arch_host_init(void);
+void lguest_arch_host_fini(void);
+void lguest_arch_run_guest(struct lg_cpu *cpu);
+void lguest_arch_handle_trap(struct lg_cpu *cpu);
+int lguest_arch_init_hypercalls(struct lg_cpu *cpu);
+int lguest_arch_do_hcall(struct lg_cpu *cpu, struct hcall_args *args);
+void lguest_arch_setup_regs(struct lg_cpu *cpu, unsigned long start);
+unsigned long *lguest_arch_regptr(struct lg_cpu *cpu, size_t reg_off, bool any);
+
+/* <arch>/switcher.S: */
+extern char start_switcher_text[], end_switcher_text[], switch_to_guest[];
+
+/* lguest_user.c: */
+int lguest_device_init(void);
+void lguest_device_remove(void);
+
+/* hypercalls.c: */
+void do_hypercalls(struct lg_cpu *cpu);
+void write_timestamp(struct lg_cpu *cpu);
+
+/*L:035
+ * Let's step aside for the moment, to study one important routine that's used
+ * widely in the Host code.
+ *
+ * There are many cases where the Guest can do something invalid, like pass crap
+ * to a hypercall. Since only the Guest kernel can make hypercalls, it's quite
+ * acceptable to simply terminate the Guest and give the Launcher a nicely
+ * formatted reason. It's also simpler for the Guest itself, which doesn't
+ * need to check most hypercalls for "success"; if you're still running, it
+ * succeeded.
+ *
+ * Once this is called, the Guest will never run again, so most Host code can
+ * call this then continue as if nothing had happened. This means many
+ * functions don't have to explicitly return an error code, which keeps the
+ * code simple.
+ *
+ * It also means that this can be called more than once: only the first one is
+ * remembered. The only trick is that we still need to kill the Guest even if
+ * we can't allocate memory to store the reason. Linux has a neat way of
+ * packing error codes into invalid pointers, so we use that here.
+ *
+ * Like any macro which uses an "if", it is safely wrapped in a run-once "do {
+ * } while(0)".
+ */
+#define kill_guest(cpu, fmt...) \
+do { \
+ if (!(cpu)->lg->dead) { \
+ (cpu)->lg->dead = kasprintf(GFP_ATOMIC, fmt); \
+ if (!(cpu)->lg->dead) \
+ (cpu)->lg->dead = ERR_PTR(-ENOMEM); \
+ } \
+} while(0)
+/* (End of aside) :*/
+
+#endif /* __ASSEMBLY__ */
+#endif /* _LGUEST_H */
diff --git a/kernel/drivers/lguest/lguest_user.c b/kernel/drivers/lguest/lguest_user.c
new file mode 100644
index 000000000..30c60687d
--- /dev/null
+++ b/kernel/drivers/lguest/lguest_user.c
@@ -0,0 +1,445 @@
+/*P:200 This contains all the /dev/lguest code, whereby the userspace
+ * launcher controls and communicates with the Guest. For example,
+ * the first write will tell us the Guest's memory layout and entry
+ * point. A read will run the Guest until something happens, such as
+ * a signal or the Guest accessing a device.
+:*/
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/file.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include "lg.h"
+
+/*L:052
+ The Launcher can get the registers, and also set some of them.
+*/
+static int getreg_setup(struct lg_cpu *cpu, const unsigned long __user *input)
+{
+ unsigned long which;
+
+ /* We re-use the ptrace structure to specify which register to read. */
+ if (get_user(which, input) != 0)
+ return -EFAULT;
+
+ /*
+ * We set up the cpu register pointer, and their next read will
+ * actually get the value (instead of running the guest).
+ *
+ * The last argument 'true' says we can access any register.
+ */
+ cpu->reg_read = lguest_arch_regptr(cpu, which, true);
+ if (!cpu->reg_read)
+ return -ENOENT;
+
+ /* And because this is a write() call, we return the length used. */
+ return sizeof(unsigned long) * 2;
+}
+
+static int setreg(struct lg_cpu *cpu, const unsigned long __user *input)
+{
+ unsigned long which, value, *reg;
+
+ /* We re-use the ptrace structure to specify which register to read. */
+ if (get_user(which, input) != 0)
+ return -EFAULT;
+ input++;
+ if (get_user(value, input) != 0)
+ return -EFAULT;
+
+ /* The last argument 'false' means we can't access all registers. */
+ reg = lguest_arch_regptr(cpu, which, false);
+ if (!reg)
+ return -ENOENT;
+
+ *reg = value;
+
+ /* And because this is a write() call, we return the length used. */
+ return sizeof(unsigned long) * 3;
+}
+
+/*L:050
+ * Sending an interrupt is done by writing LHREQ_IRQ and an interrupt
+ * number to /dev/lguest.
+ */
+static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input)
+{
+ unsigned long irq;
+
+ if (get_user(irq, input) != 0)
+ return -EFAULT;
+ if (irq >= LGUEST_IRQS)
+ return -EINVAL;
+
+ /*
+ * Next time the Guest runs, the core code will see if it can deliver
+ * this interrupt.
+ */
+ set_interrupt(cpu, irq);
+ return 0;
+}
+
+/*L:053
+ * Deliver a trap: this is used by the Launcher if it can't emulate
+ * an instruction.
+ */
+static int trap(struct lg_cpu *cpu, const unsigned long __user *input)
+{
+ unsigned long trapnum;
+
+ if (get_user(trapnum, input) != 0)
+ return -EFAULT;
+
+ if (!deliver_trap(cpu, trapnum))
+ return -EINVAL;
+
+ return 0;
+}
+
+/*L:040
+ * Once our Guest is initialized, the Launcher makes it run by reading
+ * from /dev/lguest.
+ */
+static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o)
+{
+ struct lguest *lg = file->private_data;
+ struct lg_cpu *cpu;
+ unsigned int cpu_id = *o;
+
+ /* You must write LHREQ_INITIALIZE first! */
+ if (!lg)
+ return -EINVAL;
+
+ /* Watch out for arbitrary vcpu indexes! */
+ if (cpu_id >= lg->nr_cpus)
+ return -EINVAL;
+
+ cpu = &lg->cpus[cpu_id];
+
+ /* If you're not the task which owns the Guest, go away. */
+ if (current != cpu->tsk)
+ return -EPERM;
+
+ /* If the Guest is already dead, we indicate why */
+ if (lg->dead) {
+ size_t len;
+
+ /* lg->dead either contains an error code, or a string. */
+ if (IS_ERR(lg->dead))
+ return PTR_ERR(lg->dead);
+
+ /* We can only return as much as the buffer they read with. */
+ len = min(size, strlen(lg->dead)+1);
+ if (copy_to_user(user, lg->dead, len) != 0)
+ return -EFAULT;
+ return len;
+ }
+
+ /*
+ * If we returned from read() last time because the Guest sent I/O,
+ * clear the flag.
+ */
+ if (cpu->pending.trap)
+ cpu->pending.trap = 0;
+
+ /* Run the Guest until something interesting happens. */
+ return run_guest(cpu, (unsigned long __user *)user);
+}
+
+/*L:025
+ * This actually initializes a CPU. For the moment, a Guest is only
+ * uniprocessor, so "id" is always 0.
+ */
+static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip)
+{
+ /* We have a limited number of CPUs in the lguest struct. */
+ if (id >= ARRAY_SIZE(cpu->lg->cpus))
+ return -EINVAL;
+
+ /* Set up this CPU's id, and pointer back to the lguest struct. */
+ cpu->id = id;
+ cpu->lg = container_of(cpu, struct lguest, cpus[id]);
+ cpu->lg->nr_cpus++;
+
+ /* Each CPU has a timer it can set. */
+ init_clockdev(cpu);
+
+ /*
+ * We need a complete page for the Guest registers: they are accessible
+ * to the Guest and we can only grant it access to whole pages.
+ */
+ cpu->regs_page = get_zeroed_page(GFP_KERNEL);
+ if (!cpu->regs_page)
+ return -ENOMEM;
+
+ /* We actually put the registers at the end of the page. */
+ cpu->regs = (void *)cpu->regs_page + PAGE_SIZE - sizeof(*cpu->regs);
+
+ /*
+ * Now we initialize the Guest's registers, handing it the start
+ * address.
+ */
+ lguest_arch_setup_regs(cpu, start_ip);
+
+ /*
+ * We keep a pointer to the Launcher task (ie. current task) for when
+ * other Guests want to wake this one (eg. console input).
+ */
+ cpu->tsk = current;
+
+ /*
+ * We need to keep a pointer to the Launcher's memory map, because if
+ * the Launcher dies we need to clean it up. If we don't keep a
+ * reference, it is destroyed before close() is called.
+ */
+ cpu->mm = get_task_mm(cpu->tsk);
+
+ /*
+ * We remember which CPU's pages this Guest used last, for optimization
+ * when the same Guest runs on the same CPU twice.
+ */
+ cpu->last_pages = NULL;
+
+ /* No error == success. */
+ return 0;
+}
+
+/*L:020
+ * The initialization write supplies 3 pointer sized (32 or 64 bit) values (in
+ * addition to the LHREQ_INITIALIZE value). These are:
+ *
+ * base: The start of the Guest-physical memory inside the Launcher memory.
+ *
+ * pfnlimit: The highest (Guest-physical) page number the Guest should be
+ * allowed to access. The Guest memory lives inside the Launcher, so it sets
+ * this to ensure the Guest can only reach its own memory.
+ *
+ * start: The first instruction to execute ("eip" in x86-speak).
+ */
+static int initialize(struct file *file, const unsigned long __user *input)
+{
+ /* "struct lguest" contains all we (the Host) know about a Guest. */
+ struct lguest *lg;
+ int err;
+ unsigned long args[4];
+
+ /*
+ * We grab the Big Lguest lock, which protects against multiple
+ * simultaneous initializations.
+ */
+ mutex_lock(&lguest_lock);
+ /* You can't initialize twice! Close the device and start again... */
+ if (file->private_data) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ if (copy_from_user(args, input, sizeof(args)) != 0) {
+ err = -EFAULT;
+ goto unlock;
+ }
+
+ lg = kzalloc(sizeof(*lg), GFP_KERNEL);
+ if (!lg) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ /* Populate the easy fields of our "struct lguest" */
+ lg->mem_base = (void __user *)args[0];
+ lg->pfn_limit = args[1];
+ lg->device_limit = args[3];
+
+ /* This is the first cpu (cpu 0) and it will start booting at args[2] */
+ err = lg_cpu_start(&lg->cpus[0], 0, args[2]);
+ if (err)
+ goto free_lg;
+
+ /*
+ * Initialize the Guest's shadow page tables. This allocates
+ * memory, so can fail.
+ */
+ err = init_guest_pagetable(lg);
+ if (err)
+ goto free_regs;
+
+ /* We keep our "struct lguest" in the file's private_data. */
+ file->private_data = lg;
+
+ mutex_unlock(&lguest_lock);
+
+ /* And because this is a write() call, we return the length used. */
+ return sizeof(args);
+
+free_regs:
+ /* FIXME: This should be in free_vcpu */
+ free_page(lg->cpus[0].regs_page);
+free_lg:
+ kfree(lg);
+unlock:
+ mutex_unlock(&lguest_lock);
+ return err;
+}
+
+/*L:010
+ * The first operation the Launcher does must be a write. All writes
+ * start with an unsigned long number: for the first write this must be
+ * LHREQ_INITIALIZE to set up the Guest. After that the Launcher can use
+ * writes of other values to send interrupts or set up receipt of notifications.
+ *
+ * Note that we overload the "offset" in the /dev/lguest file to indicate what
+ * CPU number we're dealing with. Currently this is always 0 since we only
+ * support uniprocessor Guests, but you can see the beginnings of SMP support
+ * here.
+ */
+static ssize_t write(struct file *file, const char __user *in,
+ size_t size, loff_t *off)
+{
+ /*
+ * Once the Guest is initialized, we hold the "struct lguest" in the
+ * file private data.
+ */
+ struct lguest *lg = file->private_data;
+ const unsigned long __user *input = (const unsigned long __user *)in;
+ unsigned long req;
+ struct lg_cpu *uninitialized_var(cpu);
+ unsigned int cpu_id = *off;
+
+ /* The first value tells us what this request is. */
+ if (get_user(req, input) != 0)
+ return -EFAULT;
+ input++;
+
+ /* If you haven't initialized, you must do that first. */
+ if (req != LHREQ_INITIALIZE) {
+ if (!lg || (cpu_id >= lg->nr_cpus))
+ return -EINVAL;
+ cpu = &lg->cpus[cpu_id];
+
+ /* Once the Guest is dead, you can only read() why it died. */
+ if (lg->dead)
+ return -ENOENT;
+ }
+
+ switch (req) {
+ case LHREQ_INITIALIZE:
+ return initialize(file, input);
+ case LHREQ_IRQ:
+ return user_send_irq(cpu, input);
+ case LHREQ_GETREG:
+ return getreg_setup(cpu, input);
+ case LHREQ_SETREG:
+ return setreg(cpu, input);
+ case LHREQ_TRAP:
+ return trap(cpu, input);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int open(struct inode *inode, struct file *file)
+{
+ file->private_data = NULL;
+
+ return 0;
+}
+
+/*L:060
+ * The final piece of interface code is the close() routine. It reverses
+ * everything done in initialize(). This is usually called because the
+ * Launcher exited.
+ *
+ * Note that the close routine returns 0 or a negative error number: it can't
+ * really fail, but it can whine. I blame Sun for this wart, and K&R C for
+ * letting them do it.
+:*/
+static int close(struct inode *inode, struct file *file)
+{
+ struct lguest *lg = file->private_data;
+ unsigned int i;
+
+ /* If we never successfully initialized, there's nothing to clean up */
+ if (!lg)
+ return 0;
+
+ /*
+ * We need the big lock, to protect from inter-guest I/O and other
+ * Launchers initializing guests.
+ */
+ mutex_lock(&lguest_lock);
+
+ /* Free up the shadow page tables for the Guest. */
+ free_guest_pagetable(lg);
+
+ for (i = 0; i < lg->nr_cpus; i++) {
+ /* Cancels the hrtimer set via LHCALL_SET_CLOCKEVENT. */
+ hrtimer_cancel(&lg->cpus[i].hrt);
+ /* We can free up the register page we allocated. */
+ free_page(lg->cpus[i].regs_page);
+ /*
+ * Now all the memory cleanups are done, it's safe to release
+ * the Launcher's memory management structure.
+ */
+ mmput(lg->cpus[i].mm);
+ }
+
+ /*
+ * If lg->dead doesn't contain an error code it will be NULL or a
+ * kmalloc()ed string, either of which is ok to hand to kfree().
+ */
+ if (!IS_ERR(lg->dead))
+ kfree(lg->dead);
+ /* Free the memory allocated to the lguest_struct */
+ kfree(lg);
+ /* Release lock and exit. */
+ mutex_unlock(&lguest_lock);
+
+ return 0;
+}
+
+/*L:000
+ * Welcome to our journey through the Launcher!
+ *
+ * The Launcher is the Host userspace program which sets up, runs and services
+ * the Guest. In fact, many comments in the Drivers which refer to "the Host"
+ * doing things are inaccurate: the Launcher does all the device handling for
+ * the Guest, but the Guest can't know that.
+ *
+ * Just to confuse you: to the Host kernel, the Launcher *is* the Guest and we
+ * shall see more of that later.
+ *
+ * We begin our understanding with the Host kernel interface which the Launcher
+ * uses: reading and writing a character device called /dev/lguest. All the
+ * work happens in the read(), write() and close() routines:
+ */
+static const struct file_operations lguest_fops = {
+ .owner = THIS_MODULE,
+ .open = open,
+ .release = close,
+ .write = write,
+ .read = read,
+ .llseek = default_llseek,
+};
+/*:*/
+
+/*
+ * This is a textbook example of a "misc" character device. Populate a "struct
+ * miscdevice" and register it with misc_register().
+ */
+static struct miscdevice lguest_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "lguest",
+ .fops = &lguest_fops,
+};
+
+int __init lguest_device_init(void)
+{
+ return misc_register(&lguest_dev);
+}
+
+void __exit lguest_device_remove(void)
+{
+ misc_deregister(&lguest_dev);
+}
diff --git a/kernel/drivers/lguest/page_tables.c b/kernel/drivers/lguest/page_tables.c
new file mode 100644
index 000000000..e3abebc91
--- /dev/null
+++ b/kernel/drivers/lguest/page_tables.c
@@ -0,0 +1,1239 @@
+/*P:700
+ * The pagetable code, on the other hand, still shows the scars of
+ * previous encounters. It's functional, and as neat as it can be in the
+ * circumstances, but be wary, for these things are subtle and break easily.
+ * The Guest provides a virtual to physical mapping, but we can neither trust
+ * it nor use it: we verify and convert it here then point the CPU to the
+ * converted Guest pages when running the Guest.
+:*/
+
+/* Copyright (C) Rusty Russell IBM Corporation 2013.
+ * GPL v2 and any later version */
+#include <linux/mm.h>
+#include <linux/gfp.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/random.h>
+#include <linux/percpu.h>
+#include <asm/tlbflush.h>
+#include <asm/uaccess.h>
+#include "lg.h"
+
+/*M:008
+ * We hold reference to pages, which prevents them from being swapped.
+ * It'd be nice to have a callback in the "struct mm_struct" when Linux wants
+ * to swap out. If we had this, and a shrinker callback to trim PTE pages, we
+ * could probably consider launching Guests as non-root.
+:*/
+
+/*H:300
+ * The Page Table Code
+ *
+ * We use two-level page tables for the Guest, or three-level with PAE. If
+ * you're not entirely comfortable with virtual addresses, physical addresses
+ * and page tables then I recommend you review arch/x86/lguest/boot.c's "Page
+ * Table Handling" (with diagrams!).
+ *
+ * The Guest keeps page tables, but we maintain the actual ones here: these are
+ * called "shadow" page tables. Which is a very Guest-centric name: these are
+ * the real page tables the CPU uses, although we keep them up to date to
+ * reflect the Guest's. (See what I mean about weird naming? Since when do
+ * shadows reflect anything?)
+ *
+ * Anyway, this is the most complicated part of the Host code. There are seven
+ * parts to this:
+ * (i) Looking up a page table entry when the Guest faults,
+ * (ii) Making sure the Guest stack is mapped,
+ * (iii) Setting up a page table entry when the Guest tells us one has changed,
+ * (iv) Switching page tables,
+ * (v) Flushing (throwing away) page tables,
+ * (vi) Mapping the Switcher when the Guest is about to run,
+ * (vii) Setting up the page tables initially.
+:*/
+
+/*
+ * The Switcher uses the complete top PTE page. That's 1024 PTE entries (4MB)
+ * or 512 PTE entries with PAE (2MB).
+ */
+#define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1)
+
+/*
+ * For PAE we need the PMD index as well. We use the last 2MB, so we
+ * will need the last pmd entry of the last pmd page.
+ */
+#ifdef CONFIG_X86_PAE
+#define CHECK_GPGD_MASK _PAGE_PRESENT
+#else
+#define CHECK_GPGD_MASK _PAGE_TABLE
+#endif
+
+/*H:320
+ * The page table code is curly enough to need helper functions to keep it
+ * clear and clean. The kernel itself provides many of them; one advantage
+ * of insisting that the Guest and Host use the same CONFIG_X86_PAE setting.
+ *
+ * There are two functions which return pointers to the shadow (aka "real")
+ * page tables.
+ *
+ * spgd_addr() takes the virtual address and returns a pointer to the top-level
+ * page directory entry (PGD) for that address. Since we keep track of several
+ * page tables, the "i" argument tells us which one we're interested in (it's
+ * usually the current one).
+ */
+static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr)
+{
+ unsigned int index = pgd_index(vaddr);
+
+ /* Return a pointer index'th pgd entry for the i'th page table. */
+ return &cpu->lg->pgdirs[i].pgdir[index];
+}
+
+#ifdef CONFIG_X86_PAE
+/*
+ * This routine then takes the PGD entry given above, which contains the
+ * address of the PMD page. It then returns a pointer to the PMD entry for the
+ * given address.
+ */
+static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
+{
+ unsigned int index = pmd_index(vaddr);
+ pmd_t *page;
+
+ /* You should never call this if the PGD entry wasn't valid */
+ BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
+ page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
+
+ return &page[index];
+}
+#endif
+
+/*
+ * This routine then takes the page directory entry returned above, which
+ * contains the address of the page table entry (PTE) page. It then returns a
+ * pointer to the PTE entry for the given address.
+ */
+static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
+{
+#ifdef CONFIG_X86_PAE
+ pmd_t *pmd = spmd_addr(cpu, spgd, vaddr);
+ pte_t *page = __va(pmd_pfn(*pmd) << PAGE_SHIFT);
+
+ /* You should never call this if the PMD entry wasn't valid */
+ BUG_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT));
+#else
+ pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
+ /* You should never call this if the PGD entry wasn't valid */
+ BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
+#endif
+
+ return &page[pte_index(vaddr)];
+}
+
+/*
+ * These functions are just like the above, except they access the Guest
+ * page tables. Hence they return a Guest address.
+ */
+static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr)
+{
+ unsigned int index = vaddr >> (PGDIR_SHIFT);
+ return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t);
+}
+
+#ifdef CONFIG_X86_PAE
+/* Follow the PGD to the PMD. */
+static unsigned long gpmd_addr(pgd_t gpgd, unsigned long vaddr)
+{
+ unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
+ BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
+ return gpage + pmd_index(vaddr) * sizeof(pmd_t);
+}
+
+/* Follow the PMD to the PTE. */
+static unsigned long gpte_addr(struct lg_cpu *cpu,
+ pmd_t gpmd, unsigned long vaddr)
+{
+ unsigned long gpage = pmd_pfn(gpmd) << PAGE_SHIFT;
+
+ BUG_ON(!(pmd_flags(gpmd) & _PAGE_PRESENT));
+ return gpage + pte_index(vaddr) * sizeof(pte_t);
+}
+#else
+/* Follow the PGD to the PTE (no mid-level for !PAE). */
+static unsigned long gpte_addr(struct lg_cpu *cpu,
+ pgd_t gpgd, unsigned long vaddr)
+{
+ unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
+
+ BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
+ return gpage + pte_index(vaddr) * sizeof(pte_t);
+}
+#endif
+/*:*/
+
+/*M:007
+ * get_pfn is slow: we could probably try to grab batches of pages here as
+ * an optimization (ie. pre-faulting).
+:*/
+
+/*H:350
+ * This routine takes a page number given by the Guest and converts it to
+ * an actual, physical page number. It can fail for several reasons: the
+ * virtual address might not be mapped by the Launcher, the write flag is set
+ * and the page is read-only, or the write flag was set and the page was
+ * shared so had to be copied, but we ran out of memory.
+ *
+ * This holds a reference to the page, so release_pte() is careful to put that
+ * back.
+ */
+static unsigned long get_pfn(unsigned long virtpfn, int write)
+{
+ struct page *page;
+
+ /* gup me one page at this address please! */
+ if (get_user_pages_fast(virtpfn << PAGE_SHIFT, 1, write, &page) == 1)
+ return page_to_pfn(page);
+
+ /* This value indicates failure. */
+ return -1UL;
+}
+
+/*H:340
+ * Converting a Guest page table entry to a shadow (ie. real) page table
+ * entry can be a little tricky. The flags are (almost) the same, but the
+ * Guest PTE contains a virtual page number: the CPU needs the real page
+ * number.
+ */
+static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write)
+{
+ unsigned long pfn, base, flags;
+
+ /*
+ * The Guest sets the global flag, because it thinks that it is using
+ * PGE. We only told it to use PGE so it would tell us whether it was
+ * flushing a kernel mapping or a userspace mapping. We don't actually
+ * use the global bit, so throw it away.
+ */
+ flags = (pte_flags(gpte) & ~_PAGE_GLOBAL);
+
+ /* The Guest's pages are offset inside the Launcher. */
+ base = (unsigned long)cpu->lg->mem_base / PAGE_SIZE;
+
+ /*
+ * We need a temporary "unsigned long" variable to hold the answer from
+ * get_pfn(), because it returns 0xFFFFFFFF on failure, which wouldn't
+ * fit in spte.pfn. get_pfn() finds the real physical number of the
+ * page, given the virtual number.
+ */
+ pfn = get_pfn(base + pte_pfn(gpte), write);
+ if (pfn == -1UL) {
+ kill_guest(cpu, "failed to get page %lu", pte_pfn(gpte));
+ /*
+ * When we destroy the Guest, we'll go through the shadow page
+ * tables and release_pte() them. Make sure we don't think
+ * this one is valid!
+ */
+ flags = 0;
+ }
+ /* Now we assemble our shadow PTE from the page number and flags. */
+ return pfn_pte(pfn, __pgprot(flags));
+}
+
+/*H:460 And to complete the chain, release_pte() looks like this: */
+static void release_pte(pte_t pte)
+{
+ /*
+ * Remember that get_user_pages_fast() took a reference to the page, in
+ * get_pfn()? We have to put it back now.
+ */
+ if (pte_flags(pte) & _PAGE_PRESENT)
+ put_page(pte_page(pte));
+}
+/*:*/
+
+static bool gpte_in_iomem(struct lg_cpu *cpu, pte_t gpte)
+{
+ /* We don't handle large pages. */
+ if (pte_flags(gpte) & _PAGE_PSE)
+ return false;
+
+ return (pte_pfn(gpte) >= cpu->lg->pfn_limit
+ && pte_pfn(gpte) < cpu->lg->device_limit);
+}
+
+static bool check_gpte(struct lg_cpu *cpu, pte_t gpte)
+{
+ if ((pte_flags(gpte) & _PAGE_PSE) ||
+ pte_pfn(gpte) >= cpu->lg->pfn_limit) {
+ kill_guest(cpu, "bad page table entry");
+ return false;
+ }
+ return true;
+}
+
+static bool check_gpgd(struct lg_cpu *cpu, pgd_t gpgd)
+{
+ if ((pgd_flags(gpgd) & ~CHECK_GPGD_MASK) ||
+ (pgd_pfn(gpgd) >= cpu->lg->pfn_limit)) {
+ kill_guest(cpu, "bad page directory entry");
+ return false;
+ }
+ return true;
+}
+
+#ifdef CONFIG_X86_PAE
+static bool check_gpmd(struct lg_cpu *cpu, pmd_t gpmd)
+{
+ if ((pmd_flags(gpmd) & ~_PAGE_TABLE) ||
+ (pmd_pfn(gpmd) >= cpu->lg->pfn_limit)) {
+ kill_guest(cpu, "bad page middle directory entry");
+ return false;
+ }
+ return true;
+}
+#endif
+
+/*H:331
+ * This is the core routine to walk the shadow page tables and find the page
+ * table entry for a specific address.
+ *
+ * If allocate is set, then we allocate any missing levels, setting the flags
+ * on the new page directory and mid-level directories using the arguments
+ * (which are copied from the Guest's page table entries).
+ */
+static pte_t *find_spte(struct lg_cpu *cpu, unsigned long vaddr, bool allocate,
+ int pgd_flags, int pmd_flags)
+{
+ pgd_t *spgd;
+ /* Mid level for PAE. */
+#ifdef CONFIG_X86_PAE
+ pmd_t *spmd;
+#endif
+
+ /* Get top level entry. */
+ spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
+ if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) {
+ /* No shadow entry: allocate a new shadow PTE page. */
+ unsigned long ptepage;
+
+ /* If they didn't want us to allocate anything, stop. */
+ if (!allocate)
+ return NULL;
+
+ ptepage = get_zeroed_page(GFP_KERNEL);
+ /*
+ * This is not really the Guest's fault, but killing it is
+ * simple for this corner case.
+ */
+ if (!ptepage) {
+ kill_guest(cpu, "out of memory allocating pte page");
+ return NULL;
+ }
+ /*
+ * And we copy the flags to the shadow PGD entry. The page
+ * number in the shadow PGD is the page we just allocated.
+ */
+ set_pgd(spgd, __pgd(__pa(ptepage) | pgd_flags));
+ }
+
+ /*
+ * Intel's Physical Address Extension actually uses three levels of
+ * page tables, so we need to look in the mid-level.
+ */
+#ifdef CONFIG_X86_PAE
+ /* Now look at the mid-level shadow entry. */
+ spmd = spmd_addr(cpu, *spgd, vaddr);
+
+ if (!(pmd_flags(*spmd) & _PAGE_PRESENT)) {
+ /* No shadow entry: allocate a new shadow PTE page. */
+ unsigned long ptepage;
+
+ /* If they didn't want us to allocate anything, stop. */
+ if (!allocate)
+ return NULL;
+
+ ptepage = get_zeroed_page(GFP_KERNEL);
+
+ /*
+ * This is not really the Guest's fault, but killing it is
+ * simple for this corner case.
+ */
+ if (!ptepage) {
+ kill_guest(cpu, "out of memory allocating pmd page");
+ return NULL;
+ }
+
+ /*
+ * And we copy the flags to the shadow PMD entry. The page
+ * number in the shadow PMD is the page we just allocated.
+ */
+ set_pmd(spmd, __pmd(__pa(ptepage) | pmd_flags));
+ }
+#endif
+
+ /* Get the pointer to the shadow PTE entry we're going to set. */
+ return spte_addr(cpu, *spgd, vaddr);
+}
+
+/*H:330
+ * (i) Looking up a page table entry when the Guest faults.
+ *
+ * We saw this call in run_guest(): when we see a page fault in the Guest, we
+ * come here. That's because we only set up the shadow page tables lazily as
+ * they're needed, so we get page faults all the time and quietly fix them up
+ * and return to the Guest without it knowing.
+ *
+ * If we fixed up the fault (ie. we mapped the address), this routine returns
+ * true. Otherwise, it was a real fault and we need to tell the Guest.
+ *
+ * There's a corner case: they're trying to access memory between
+ * pfn_limit and device_limit, which is I/O memory. In this case, we
+ * return false and set @iomem to the physical address, so the the
+ * Launcher can handle the instruction manually.
+ */
+bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode,
+ unsigned long *iomem)
+{
+ unsigned long gpte_ptr;
+ pte_t gpte;
+ pte_t *spte;
+ pmd_t gpmd;
+ pgd_t gpgd;
+
+ *iomem = 0;
+
+ /* We never demand page the Switcher, so trying is a mistake. */
+ if (vaddr >= switcher_addr)
+ return false;
+
+ /* First step: get the top-level Guest page table entry. */
+ if (unlikely(cpu->linear_pages)) {
+ /* Faking up a linear mapping. */
+ gpgd = __pgd(CHECK_GPGD_MASK);
+ } else {
+ gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
+ /* Toplevel not present? We can't map it in. */
+ if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
+ return false;
+
+ /*
+ * This kills the Guest if it has weird flags or tries to
+ * refer to a "physical" address outside the bounds.
+ */
+ if (!check_gpgd(cpu, gpgd))
+ return false;
+ }
+
+ /* This "mid-level" entry is only used for non-linear, PAE mode. */
+ gpmd = __pmd(_PAGE_TABLE);
+
+#ifdef CONFIG_X86_PAE
+ if (likely(!cpu->linear_pages)) {
+ gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
+ /* Middle level not present? We can't map it in. */
+ if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
+ return false;
+
+ /*
+ * This kills the Guest if it has weird flags or tries to
+ * refer to a "physical" address outside the bounds.
+ */
+ if (!check_gpmd(cpu, gpmd))
+ return false;
+ }
+
+ /*
+ * OK, now we look at the lower level in the Guest page table: keep its
+ * address, because we might update it later.
+ */
+ gpte_ptr = gpte_addr(cpu, gpmd, vaddr);
+#else
+ /*
+ * OK, now we look at the lower level in the Guest page table: keep its
+ * address, because we might update it later.
+ */
+ gpte_ptr = gpte_addr(cpu, gpgd, vaddr);
+#endif
+
+ if (unlikely(cpu->linear_pages)) {
+ /* Linear? Make up a PTE which points to same page. */
+ gpte = __pte((vaddr & PAGE_MASK) | _PAGE_RW | _PAGE_PRESENT);
+ } else {
+ /* Read the actual PTE value. */
+ gpte = lgread(cpu, gpte_ptr, pte_t);
+ }
+
+ /* If this page isn't in the Guest page tables, we can't page it in. */
+ if (!(pte_flags(gpte) & _PAGE_PRESENT))
+ return false;
+
+ /*
+ * Check they're not trying to write to a page the Guest wants
+ * read-only (bit 2 of errcode == write).
+ */
+ if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW))
+ return false;
+
+ /* User access to a kernel-only page? (bit 3 == user access) */
+ if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER))
+ return false;
+
+ /* If they're accessing io memory, we expect a fault. */
+ if (gpte_in_iomem(cpu, gpte)) {
+ *iomem = (pte_pfn(gpte) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK);
+ return false;
+ }
+
+ /*
+ * Check that the Guest PTE flags are OK, and the page number is below
+ * the pfn_limit (ie. not mapping the Launcher binary).
+ */
+ if (!check_gpte(cpu, gpte))
+ return false;
+
+ /* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */
+ gpte = pte_mkyoung(gpte);
+ if (errcode & 2)
+ gpte = pte_mkdirty(gpte);
+
+ /* Get the pointer to the shadow PTE entry we're going to set. */
+ spte = find_spte(cpu, vaddr, true, pgd_flags(gpgd), pmd_flags(gpmd));
+ if (!spte)
+ return false;
+
+ /*
+ * If there was a valid shadow PTE entry here before, we release it.
+ * This can happen with a write to a previously read-only entry.
+ */
+ release_pte(*spte);
+
+ /*
+ * If this is a write, we insist that the Guest page is writable (the
+ * final arg to gpte_to_spte()).
+ */
+ if (pte_dirty(gpte))
+ *spte = gpte_to_spte(cpu, gpte, 1);
+ else
+ /*
+ * If this is a read, don't set the "writable" bit in the page
+ * table entry, even if the Guest says it's writable. That way
+ * we will come back here when a write does actually occur, so
+ * we can update the Guest's _PAGE_DIRTY flag.
+ */
+ set_pte(spte, gpte_to_spte(cpu, pte_wrprotect(gpte), 0));
+
+ /*
+ * Finally, we write the Guest PTE entry back: we've set the
+ * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags.
+ */
+ if (likely(!cpu->linear_pages))
+ lgwrite(cpu, gpte_ptr, pte_t, gpte);
+
+ /*
+ * The fault is fixed, the page table is populated, the mapping
+ * manipulated, the result returned and the code complete. A small
+ * delay and a trace of alliteration are the only indications the Guest
+ * has that a page fault occurred at all.
+ */
+ return true;
+}
+
+/*H:360
+ * (ii) Making sure the Guest stack is mapped.
+ *
+ * Remember that direct traps into the Guest need a mapped Guest kernel stack.
+ * pin_stack_pages() calls us here: we could simply call demand_page(), but as
+ * we've seen that logic is quite long, and usually the stack pages are already
+ * mapped, so it's overkill.
+ *
+ * This is a quick version which answers the question: is this virtual address
+ * mapped by the shadow page tables, and is it writable?
+ */
+static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr)
+{
+ pte_t *spte;
+ unsigned long flags;
+
+ /* You can't put your stack in the Switcher! */
+ if (vaddr >= switcher_addr)
+ return false;
+
+ /* If there's no shadow PTE, it's not writable. */
+ spte = find_spte(cpu, vaddr, false, 0, 0);
+ if (!spte)
+ return false;
+
+ /*
+ * Check the flags on the pte entry itself: it must be present and
+ * writable.
+ */
+ flags = pte_flags(*spte);
+ return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW);
+}
+
+/*
+ * So, when pin_stack_pages() asks us to pin a page, we check if it's already
+ * in the page tables, and if not, we call demand_page() with error code 2
+ * (meaning "write").
+ */
+void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
+{
+ unsigned long iomem;
+
+ if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2, &iomem))
+ kill_guest(cpu, "bad stack page %#lx", vaddr);
+}
+/*:*/
+
+#ifdef CONFIG_X86_PAE
+static void release_pmd(pmd_t *spmd)
+{
+ /* If the entry's not present, there's nothing to release. */
+ if (pmd_flags(*spmd) & _PAGE_PRESENT) {
+ unsigned int i;
+ pte_t *ptepage = __va(pmd_pfn(*spmd) << PAGE_SHIFT);
+ /* For each entry in the page, we might need to release it. */
+ for (i = 0; i < PTRS_PER_PTE; i++)
+ release_pte(ptepage[i]);
+ /* Now we can free the page of PTEs */
+ free_page((long)ptepage);
+ /* And zero out the PMD entry so we never release it twice. */
+ set_pmd(spmd, __pmd(0));
+ }
+}
+
+static void release_pgd(pgd_t *spgd)
+{
+ /* If the entry's not present, there's nothing to release. */
+ if (pgd_flags(*spgd) & _PAGE_PRESENT) {
+ unsigned int i;
+ pmd_t *pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
+
+ for (i = 0; i < PTRS_PER_PMD; i++)
+ release_pmd(&pmdpage[i]);
+
+ /* Now we can free the page of PMDs */
+ free_page((long)pmdpage);
+ /* And zero out the PGD entry so we never release it twice. */
+ set_pgd(spgd, __pgd(0));
+ }
+}
+
+#else /* !CONFIG_X86_PAE */
+/*H:450
+ * If we chase down the release_pgd() code, the non-PAE version looks like
+ * this. The PAE version is almost identical, but instead of calling
+ * release_pte it calls release_pmd(), which looks much like this.
+ */
+static void release_pgd(pgd_t *spgd)
+{
+ /* If the entry's not present, there's nothing to release. */
+ if (pgd_flags(*spgd) & _PAGE_PRESENT) {
+ unsigned int i;
+ /*
+ * Converting the pfn to find the actual PTE page is easy: turn
+ * the page number into a physical address, then convert to a
+ * virtual address (easy for kernel pages like this one).
+ */
+ pte_t *ptepage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
+ /* For each entry in the page, we might need to release it. */
+ for (i = 0; i < PTRS_PER_PTE; i++)
+ release_pte(ptepage[i]);
+ /* Now we can free the page of PTEs */
+ free_page((long)ptepage);
+ /* And zero out the PGD entry so we never release it twice. */
+ *spgd = __pgd(0);
+ }
+}
+#endif
+
+/*H:445
+ * We saw flush_user_mappings() twice: once from the flush_user_mappings()
+ * hypercall and once in new_pgdir() when we re-used a top-level pgdir page.
+ * It simply releases every PTE page from 0 up to the Guest's kernel address.
+ */
+static void flush_user_mappings(struct lguest *lg, int idx)
+{
+ unsigned int i;
+ /* Release every pgd entry up to the kernel's address. */
+ for (i = 0; i < pgd_index(lg->kernel_address); i++)
+ release_pgd(lg->pgdirs[idx].pgdir + i);
+}
+
+/*H:440
+ * (v) Flushing (throwing away) page tables,
+ *
+ * The Guest has a hypercall to throw away the page tables: it's used when a
+ * large number of mappings have been changed.
+ */
+void guest_pagetable_flush_user(struct lg_cpu *cpu)
+{
+ /* Drop the userspace part of the current page table. */
+ flush_user_mappings(cpu->lg, cpu->cpu_pgd);
+}
+/*:*/
+
+/* We walk down the guest page tables to get a guest-physical address */
+bool __guest_pa(struct lg_cpu *cpu, unsigned long vaddr, unsigned long *paddr)
+{
+ pgd_t gpgd;
+ pte_t gpte;
+#ifdef CONFIG_X86_PAE
+ pmd_t gpmd;
+#endif
+
+ /* Still not set up? Just map 1:1. */
+ if (unlikely(cpu->linear_pages)) {
+ *paddr = vaddr;
+ return true;
+ }
+
+ /* First step: get the top-level Guest page table entry. */
+ gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
+ /* Toplevel not present? We can't map it in. */
+ if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
+ goto fail;
+
+#ifdef CONFIG_X86_PAE
+ gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
+ if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
+ goto fail;
+ gpte = lgread(cpu, gpte_addr(cpu, gpmd, vaddr), pte_t);
+#else
+ gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t);
+#endif
+ if (!(pte_flags(gpte) & _PAGE_PRESENT))
+ goto fail;
+
+ *paddr = pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK);
+ return true;
+
+fail:
+ *paddr = -1UL;
+ return false;
+}
+
+/*
+ * This is the version we normally use: kills the Guest if it uses a
+ * bad address
+ */
+unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
+{
+ unsigned long paddr;
+
+ if (!__guest_pa(cpu, vaddr, &paddr))
+ kill_guest(cpu, "Bad address %#lx", vaddr);
+ return paddr;
+}
+
+/*
+ * We keep several page tables. This is a simple routine to find the page
+ * table (if any) corresponding to this top-level address the Guest has given
+ * us.
+ */
+static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable)
+{
+ unsigned int i;
+ for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
+ if (lg->pgdirs[i].pgdir && lg->pgdirs[i].gpgdir == pgtable)
+ break;
+ return i;
+}
+
+/*H:435
+ * And this is us, creating the new page directory. If we really do
+ * allocate a new one (and so the kernel parts are not there), we set
+ * blank_pgdir.
+ */
+static unsigned int new_pgdir(struct lg_cpu *cpu,
+ unsigned long gpgdir,
+ int *blank_pgdir)
+{
+ unsigned int next;
+
+ /*
+ * We pick one entry at random to throw out. Choosing the Least
+ * Recently Used might be better, but this is easy.
+ */
+ next = prandom_u32() % ARRAY_SIZE(cpu->lg->pgdirs);
+ /* If it's never been allocated at all before, try now. */
+ if (!cpu->lg->pgdirs[next].pgdir) {
+ cpu->lg->pgdirs[next].pgdir =
+ (pgd_t *)get_zeroed_page(GFP_KERNEL);
+ /* If the allocation fails, just keep using the one we have */
+ if (!cpu->lg->pgdirs[next].pgdir)
+ next = cpu->cpu_pgd;
+ else {
+ /*
+ * This is a blank page, so there are no kernel
+ * mappings: caller must map the stack!
+ */
+ *blank_pgdir = 1;
+ }
+ }
+ /* Record which Guest toplevel this shadows. */
+ cpu->lg->pgdirs[next].gpgdir = gpgdir;
+ /* Release all the non-kernel mappings. */
+ flush_user_mappings(cpu->lg, next);
+
+ /* This hasn't run on any CPU at all. */
+ cpu->lg->pgdirs[next].last_host_cpu = -1;
+
+ return next;
+}
+
+/*H:501
+ * We do need the Switcher code mapped at all times, so we allocate that
+ * part of the Guest page table here. We map the Switcher code immediately,
+ * but defer mapping of the guest register page and IDT/LDT etc page until
+ * just before we run the guest in map_switcher_in_guest().
+ *
+ * We *could* do this setup in map_switcher_in_guest(), but at that point
+ * we've interrupts disabled, and allocating pages like that is fraught: we
+ * can't sleep if we need to free up some memory.
+ */
+static bool allocate_switcher_mapping(struct lg_cpu *cpu)
+{
+ int i;
+
+ for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) {
+ pte_t *pte = find_spte(cpu, switcher_addr + i * PAGE_SIZE, true,
+ CHECK_GPGD_MASK, _PAGE_TABLE);
+ if (!pte)
+ return false;
+
+ /*
+ * Map the switcher page if not already there. It might
+ * already be there because we call allocate_switcher_mapping()
+ * in guest_set_pgd() just in case it did discard our Switcher
+ * mapping, but it probably didn't.
+ */
+ if (i == 0 && !(pte_flags(*pte) & _PAGE_PRESENT)) {
+ /* Get a reference to the Switcher page. */
+ get_page(lg_switcher_pages[0]);
+ /* Create a read-only, exectuable, kernel-style PTE */
+ set_pte(pte,
+ mk_pte(lg_switcher_pages[0], PAGE_KERNEL_RX));
+ }
+ }
+ cpu->lg->pgdirs[cpu->cpu_pgd].switcher_mapped = true;
+ return true;
+}
+
+/*H:470
+ * Finally, a routine which throws away everything: all PGD entries in all
+ * the shadow page tables, including the Guest's kernel mappings. This is used
+ * when we destroy the Guest.
+ */
+static void release_all_pagetables(struct lguest *lg)
+{
+ unsigned int i, j;
+
+ /* Every shadow pagetable this Guest has */
+ for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) {
+ if (!lg->pgdirs[i].pgdir)
+ continue;
+
+ /* Every PGD entry. */
+ for (j = 0; j < PTRS_PER_PGD; j++)
+ release_pgd(lg->pgdirs[i].pgdir + j);
+ lg->pgdirs[i].switcher_mapped = false;
+ lg->pgdirs[i].last_host_cpu = -1;
+ }
+}
+
+/*
+ * We also throw away everything when a Guest tells us it's changed a kernel
+ * mapping. Since kernel mappings are in every page table, it's easiest to
+ * throw them all away. This traps the Guest in amber for a while as
+ * everything faults back in, but it's rare.
+ */
+void guest_pagetable_clear_all(struct lg_cpu *cpu)
+{
+ release_all_pagetables(cpu->lg);
+ /* We need the Guest kernel stack mapped again. */
+ pin_stack_pages(cpu);
+ /* And we need Switcher allocated. */
+ if (!allocate_switcher_mapping(cpu))
+ kill_guest(cpu, "Cannot populate switcher mapping");
+}
+
+/*H:430
+ * (iv) Switching page tables
+ *
+ * Now we've seen all the page table setting and manipulation, let's see
+ * what happens when the Guest changes page tables (ie. changes the top-level
+ * pgdir). This occurs on almost every context switch.
+ */
+void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable)
+{
+ int newpgdir, repin = 0;
+
+ /*
+ * The very first time they call this, we're actually running without
+ * any page tables; we've been making it up. Throw them away now.
+ */
+ if (unlikely(cpu->linear_pages)) {
+ release_all_pagetables(cpu->lg);
+ cpu->linear_pages = false;
+ /* Force allocation of a new pgdir. */
+ newpgdir = ARRAY_SIZE(cpu->lg->pgdirs);
+ } else {
+ /* Look to see if we have this one already. */
+ newpgdir = find_pgdir(cpu->lg, pgtable);
+ }
+
+ /*
+ * If not, we allocate or mug an existing one: if it's a fresh one,
+ * repin gets set to 1.
+ */
+ if (newpgdir == ARRAY_SIZE(cpu->lg->pgdirs))
+ newpgdir = new_pgdir(cpu, pgtable, &repin);
+ /* Change the current pgd index to the new one. */
+ cpu->cpu_pgd = newpgdir;
+ /*
+ * If it was completely blank, we map in the Guest kernel stack and
+ * the Switcher.
+ */
+ if (repin)
+ pin_stack_pages(cpu);
+
+ if (!cpu->lg->pgdirs[cpu->cpu_pgd].switcher_mapped) {
+ if (!allocate_switcher_mapping(cpu))
+ kill_guest(cpu, "Cannot populate switcher mapping");
+ }
+}
+/*:*/
+
+/*M:009
+ * Since we throw away all mappings when a kernel mapping changes, our
+ * performance sucks for guests using highmem. In fact, a guest with
+ * PAGE_OFFSET 0xc0000000 (the default) and more than about 700MB of RAM is
+ * usually slower than a Guest with less memory.
+ *
+ * This, of course, cannot be fixed. It would take some kind of... well, I
+ * don't know, but the term "puissant code-fu" comes to mind.
+:*/
+
+/*H:420
+ * This is the routine which actually sets the page table entry for then
+ * "idx"'th shadow page table.
+ *
+ * Normally, we can just throw out the old entry and replace it with 0: if they
+ * use it demand_page() will put the new entry in. We need to do this anyway:
+ * The Guest expects _PAGE_ACCESSED to be set on its PTE the first time a page
+ * is read from, and _PAGE_DIRTY when it's written to.
+ *
+ * But Avi Kivity pointed out that most Operating Systems (Linux included) set
+ * these bits on PTEs immediately anyway. This is done to save the CPU from
+ * having to update them, but it helps us the same way: if they set
+ * _PAGE_ACCESSED then we can put a read-only PTE entry in immediately, and if
+ * they set _PAGE_DIRTY then we can put a writable PTE entry in immediately.
+ */
+static void __guest_set_pte(struct lg_cpu *cpu, int idx,
+ unsigned long vaddr, pte_t gpte)
+{
+ /* Look up the matching shadow page directory entry. */
+ pgd_t *spgd = spgd_addr(cpu, idx, vaddr);
+#ifdef CONFIG_X86_PAE
+ pmd_t *spmd;
+#endif
+
+ /* If the top level isn't present, there's no entry to update. */
+ if (pgd_flags(*spgd) & _PAGE_PRESENT) {
+#ifdef CONFIG_X86_PAE
+ spmd = spmd_addr(cpu, *spgd, vaddr);
+ if (pmd_flags(*spmd) & _PAGE_PRESENT) {
+#endif
+ /* Otherwise, start by releasing the existing entry. */
+ pte_t *spte = spte_addr(cpu, *spgd, vaddr);
+ release_pte(*spte);
+
+ /*
+ * If they're setting this entry as dirty or accessed,
+ * we might as well put that entry they've given us in
+ * now. This shaves 10% off a copy-on-write
+ * micro-benchmark.
+ */
+ if ((pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED))
+ && !gpte_in_iomem(cpu, gpte)) {
+ if (!check_gpte(cpu, gpte))
+ return;
+ set_pte(spte,
+ gpte_to_spte(cpu, gpte,
+ pte_flags(gpte) & _PAGE_DIRTY));
+ } else {
+ /*
+ * Otherwise kill it and we can demand_page()
+ * it in later.
+ */
+ set_pte(spte, __pte(0));
+ }
+#ifdef CONFIG_X86_PAE
+ }
+#endif
+ }
+}
+
+/*H:410
+ * Updating a PTE entry is a little trickier.
+ *
+ * We keep track of several different page tables (the Guest uses one for each
+ * process, so it makes sense to cache at least a few). Each of these have
+ * identical kernel parts: ie. every mapping above PAGE_OFFSET is the same for
+ * all processes. So when the page table above that address changes, we update
+ * all the page tables, not just the current one. This is rare.
+ *
+ * The benefit is that when we have to track a new page table, we can keep all
+ * the kernel mappings. This speeds up context switch immensely.
+ */
+void guest_set_pte(struct lg_cpu *cpu,
+ unsigned long gpgdir, unsigned long vaddr, pte_t gpte)
+{
+ /* We don't let you remap the Switcher; we need it to get back! */
+ if (vaddr >= switcher_addr) {
+ kill_guest(cpu, "attempt to set pte into Switcher pages");
+ return;
+ }
+
+ /*
+ * Kernel mappings must be changed on all top levels. Slow, but doesn't
+ * happen often.
+ */
+ if (vaddr >= cpu->lg->kernel_address) {
+ unsigned int i;
+ for (i = 0; i < ARRAY_SIZE(cpu->lg->pgdirs); i++)
+ if (cpu->lg->pgdirs[i].pgdir)
+ __guest_set_pte(cpu, i, vaddr, gpte);
+ } else {
+ /* Is this page table one we have a shadow for? */
+ int pgdir = find_pgdir(cpu->lg, gpgdir);
+ if (pgdir != ARRAY_SIZE(cpu->lg->pgdirs))
+ /* If so, do the update. */
+ __guest_set_pte(cpu, pgdir, vaddr, gpte);
+ }
+}
+
+/*H:400
+ * (iii) Setting up a page table entry when the Guest tells us one has changed.
+ *
+ * Just like we did in interrupts_and_traps.c, it makes sense for us to deal
+ * with the other side of page tables while we're here: what happens when the
+ * Guest asks for a page table to be updated?
+ *
+ * We already saw that demand_page() will fill in the shadow page tables when
+ * needed, so we can simply remove shadow page table entries whenever the Guest
+ * tells us they've changed. When the Guest tries to use the new entry it will
+ * fault and demand_page() will fix it up.
+ *
+ * So with that in mind here's our code to update a (top-level) PGD entry:
+ */
+void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx)
+{
+ int pgdir;
+
+ if (idx > PTRS_PER_PGD) {
+ kill_guest(&lg->cpus[0], "Attempt to set pgd %u/%u",
+ idx, PTRS_PER_PGD);
+ return;
+ }
+
+ /* If they're talking about a page table we have a shadow for... */
+ pgdir = find_pgdir(lg, gpgdir);
+ if (pgdir < ARRAY_SIZE(lg->pgdirs)) {
+ /* ... throw it away. */
+ release_pgd(lg->pgdirs[pgdir].pgdir + idx);
+ /* That might have been the Switcher mapping, remap it. */
+ if (!allocate_switcher_mapping(&lg->cpus[0])) {
+ kill_guest(&lg->cpus[0],
+ "Cannot populate switcher mapping");
+ }
+ lg->pgdirs[pgdir].last_host_cpu = -1;
+ }
+}
+
+#ifdef CONFIG_X86_PAE
+/* For setting a mid-level, we just throw everything away. It's easy. */
+void guest_set_pmd(struct lguest *lg, unsigned long pmdp, u32 idx)
+{
+ guest_pagetable_clear_all(&lg->cpus[0]);
+}
+#endif
+
+/*H:500
+ * (vii) Setting up the page tables initially.
+ *
+ * When a Guest is first created, set initialize a shadow page table which
+ * we will populate on future faults. The Guest doesn't have any actual
+ * pagetables yet, so we set linear_pages to tell demand_page() to fake it
+ * for the moment.
+ *
+ * We do need the Switcher to be mapped at all times, so we allocate that
+ * part of the Guest page table here.
+ */
+int init_guest_pagetable(struct lguest *lg)
+{
+ struct lg_cpu *cpu = &lg->cpus[0];
+ int allocated = 0;
+
+ /* lg (and lg->cpus[]) starts zeroed: this allocates a new pgdir */
+ cpu->cpu_pgd = new_pgdir(cpu, 0, &allocated);
+ if (!allocated)
+ return -ENOMEM;
+
+ /* We start with a linear mapping until the initialize. */
+ cpu->linear_pages = true;
+
+ /* Allocate the page tables for the Switcher. */
+ if (!allocate_switcher_mapping(cpu)) {
+ release_all_pagetables(lg);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/*H:508 When the Guest calls LHCALL_LGUEST_INIT we do more setup. */
+void page_table_guest_data_init(struct lg_cpu *cpu)
+{
+ /*
+ * We tell the Guest that it can't use the virtual addresses
+ * used by the Switcher. This trick is equivalent to 4GB -
+ * switcher_addr.
+ */
+ u32 top = ~switcher_addr + 1;
+
+ /* We get the kernel address: above this is all kernel memory. */
+ if (get_user(cpu->lg->kernel_address,
+ &cpu->lg->lguest_data->kernel_address)
+ /*
+ * We tell the Guest that it can't use the top virtual
+ * addresses (used by the Switcher).
+ */
+ || put_user(top, &cpu->lg->lguest_data->reserve_mem)) {
+ kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
+ return;
+ }
+
+ /*
+ * In flush_user_mappings() we loop from 0 to
+ * "pgd_index(lg->kernel_address)". This assumes it won't hit the
+ * Switcher mappings, so check that now.
+ */
+ if (cpu->lg->kernel_address >= switcher_addr)
+ kill_guest(cpu, "bad kernel address %#lx",
+ cpu->lg->kernel_address);
+}
+
+/* When a Guest dies, our cleanup is fairly simple. */
+void free_guest_pagetable(struct lguest *lg)
+{
+ unsigned int i;
+
+ /* Throw away all page table pages. */
+ release_all_pagetables(lg);
+ /* Now free the top levels: free_page() can handle 0 just fine. */
+ for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
+ free_page((long)lg->pgdirs[i].pgdir);
+}
+
+/*H:481
+ * This clears the Switcher mappings for cpu #i.
+ */
+static void remove_switcher_percpu_map(struct lg_cpu *cpu, unsigned int i)
+{
+ unsigned long base = switcher_addr + PAGE_SIZE + i * PAGE_SIZE*2;
+ pte_t *pte;
+
+ /* Clear the mappings for both pages. */
+ pte = find_spte(cpu, base, false, 0, 0);
+ release_pte(*pte);
+ set_pte(pte, __pte(0));
+
+ pte = find_spte(cpu, base + PAGE_SIZE, false, 0, 0);
+ release_pte(*pte);
+ set_pte(pte, __pte(0));
+}
+
+/*H:480
+ * (vi) Mapping the Switcher when the Guest is about to run.
+ *
+ * The Switcher and the two pages for this CPU need to be visible in the Guest
+ * (and not the pages for other CPUs).
+ *
+ * The pages for the pagetables have all been allocated before: we just need
+ * to make sure the actual PTEs are up-to-date for the CPU we're about to run
+ * on.
+ */
+void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
+{
+ unsigned long base;
+ struct page *percpu_switcher_page, *regs_page;
+ pte_t *pte;
+ struct pgdir *pgdir = &cpu->lg->pgdirs[cpu->cpu_pgd];
+
+ /* Switcher page should always be mapped by now! */
+ BUG_ON(!pgdir->switcher_mapped);
+
+ /*
+ * Remember that we have two pages for each Host CPU, so we can run a
+ * Guest on each CPU without them interfering. We need to make sure
+ * those pages are mapped correctly in the Guest, but since we usually
+ * run on the same CPU, we cache that, and only update the mappings
+ * when we move.
+ */
+ if (pgdir->last_host_cpu == raw_smp_processor_id())
+ return;
+
+ /* -1 means unknown so we remove everything. */
+ if (pgdir->last_host_cpu == -1) {
+ unsigned int i;
+ for_each_possible_cpu(i)
+ remove_switcher_percpu_map(cpu, i);
+ } else {
+ /* We know exactly what CPU mapping to remove. */
+ remove_switcher_percpu_map(cpu, pgdir->last_host_cpu);
+ }
+
+ /*
+ * When we're running the Guest, we want the Guest's "regs" page to
+ * appear where the first Switcher page for this CPU is. This is an
+ * optimization: when the Switcher saves the Guest registers, it saves
+ * them into the first page of this CPU's "struct lguest_pages": if we
+ * make sure the Guest's register page is already mapped there, we
+ * don't have to copy them out again.
+ */
+ /* Find the shadow PTE for this regs page. */
+ base = switcher_addr + PAGE_SIZE
+ + raw_smp_processor_id() * sizeof(struct lguest_pages);
+ pte = find_spte(cpu, base, false, 0, 0);
+ regs_page = pfn_to_page(__pa(cpu->regs_page) >> PAGE_SHIFT);
+ get_page(regs_page);
+ set_pte(pte, mk_pte(regs_page, __pgprot(__PAGE_KERNEL & ~_PAGE_GLOBAL)));
+
+ /*
+ * We map the second page of the struct lguest_pages read-only in
+ * the Guest: the IDT, GDT and other things it's not supposed to
+ * change.
+ */
+ pte = find_spte(cpu, base + PAGE_SIZE, false, 0, 0);
+ percpu_switcher_page
+ = lg_switcher_pages[1 + raw_smp_processor_id()*2 + 1];
+ get_page(percpu_switcher_page);
+ set_pte(pte, mk_pte(percpu_switcher_page,
+ __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL)));
+
+ pgdir->last_host_cpu = raw_smp_processor_id();
+}
+
+/*H:490
+ * We've made it through the page table code. Perhaps our tired brains are
+ * still processing the details, or perhaps we're simply glad it's over.
+ *
+ * If nothing else, note that all this complexity in juggling shadow page tables
+ * in sync with the Guest's page tables is for one reason: for most Guests this
+ * page table dance determines how bad performance will be. This is why Xen
+ * uses exotic direct Guest pagetable manipulation, and why both Intel and AMD
+ * have implemented shadow page table support directly into hardware.
+ *
+ * There is just one file remaining in the Host.
+ */
diff --git a/kernel/drivers/lguest/segments.c b/kernel/drivers/lguest/segments.c
new file mode 100644
index 000000000..c4fb424df
--- /dev/null
+++ b/kernel/drivers/lguest/segments.c
@@ -0,0 +1,228 @@
+/*P:600
+ * The x86 architecture has segments, which involve a table of descriptors
+ * which can be used to do funky things with virtual address interpretation.
+ * We originally used to use segments so the Guest couldn't alter the
+ * Guest<->Host Switcher, and then we had to trim Guest segments, and restore
+ * for userspace per-thread segments, but trim again for on userspace->kernel
+ * transitions... This nightmarish creation was contained within this file,
+ * where we knew not to tread without heavy armament and a change of underwear.
+ *
+ * In these modern times, the segment handling code consists of simple sanity
+ * checks, and the worst you'll experience reading this code is butterfly-rash
+ * from frolicking through its parklike serenity.
+:*/
+#include "lg.h"
+
+/*H:600
+ * Segments & The Global Descriptor Table
+ *
+ * (That title sounds like a bad Nerdcore group. Not to suggest that there are
+ * any good Nerdcore groups, but in high school a friend of mine had a band
+ * called Joe Fish and the Chips, so there are definitely worse band names).
+ *
+ * To refresh: the GDT is a table of 8-byte values describing segments. Once
+ * set up, these segments can be loaded into one of the 6 "segment registers".
+ *
+ * GDT entries are passed around as "struct desc_struct"s, which like IDT
+ * entries are split into two 32-bit members, "a" and "b". One day, someone
+ * will clean that up, and be declared a Hero. (No pressure, I'm just saying).
+ *
+ * Anyway, the GDT entry contains a base (the start address of the segment), a
+ * limit (the size of the segment - 1), and some flags. Sounds simple, and it
+ * would be, except those zany Intel engineers decided that it was too boring
+ * to put the base at one end, the limit at the other, and the flags in
+ * between. They decided to shotgun the bits at random throughout the 8 bytes,
+ * like so:
+ *
+ * 0 16 40 48 52 56 63
+ * [ limit part 1 ][ base part 1 ][ flags ][li][fl][base ]
+ * mit ags part 2
+ * part 2
+ *
+ * As a result, this file contains a certain amount of magic numeracy. Let's
+ * begin.
+ */
+
+/*
+ * There are several entries we don't let the Guest set. The TSS entry is the
+ * "Task State Segment" which controls all kinds of delicate things. The
+ * LGUEST_CS and LGUEST_DS entries are reserved for the Switcher, and the
+ * the Guest can't be trusted to deal with double faults.
+ */
+static bool ignored_gdt(unsigned int num)
+{
+ return (num == GDT_ENTRY_TSS
+ || num == GDT_ENTRY_LGUEST_CS
+ || num == GDT_ENTRY_LGUEST_DS
+ || num == GDT_ENTRY_DOUBLEFAULT_TSS);
+}
+
+/*H:630
+ * Once the Guest gave us new GDT entries, we fix them up a little. We
+ * don't care if they're invalid: the worst that can happen is a General
+ * Protection Fault in the Switcher when it restores a Guest segment register
+ * which tries to use that entry. Then we kill the Guest for causing such a
+ * mess: the message will be "unhandled trap 256".
+ */
+static void fixup_gdt_table(struct lg_cpu *cpu, unsigned start, unsigned end)
+{
+ unsigned int i;
+
+ for (i = start; i < end; i++) {
+ /*
+ * We never copy these ones to real GDT, so we don't care what
+ * they say
+ */
+ if (ignored_gdt(i))
+ continue;
+
+ /*
+ * Segment descriptors contain a privilege level: the Guest is
+ * sometimes careless and leaves this as 0, even though it's
+ * running at privilege level 1. If so, we fix it here.
+ */
+ if (cpu->arch.gdt[i].dpl == 0)
+ cpu->arch.gdt[i].dpl |= GUEST_PL;
+
+ /*
+ * Each descriptor has an "accessed" bit. If we don't set it
+ * now, the CPU will try to set it when the Guest first loads
+ * that entry into a segment register. But the GDT isn't
+ * writable by the Guest, so bad things can happen.
+ */
+ cpu->arch.gdt[i].type |= 0x1;
+ }
+}
+
+/*H:610
+ * Like the IDT, we never simply use the GDT the Guest gives us. We keep
+ * a GDT for each CPU, and copy across the Guest's entries each time we want to
+ * run the Guest on that CPU.
+ *
+ * This routine is called at boot or modprobe time for each CPU to set up the
+ * constant GDT entries: the ones which are the same no matter what Guest we're
+ * running.
+ */
+void setup_default_gdt_entries(struct lguest_ro_state *state)
+{
+ struct desc_struct *gdt = state->guest_gdt;
+ unsigned long tss = (unsigned long)&state->guest_tss;
+
+ /* The Switcher segments are full 0-4G segments, privilege level 0 */
+ gdt[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT;
+ gdt[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT;
+
+ /*
+ * The TSS segment refers to the TSS entry for this particular CPU.
+ */
+ gdt[GDT_ENTRY_TSS].a = 0;
+ gdt[GDT_ENTRY_TSS].b = 0;
+
+ gdt[GDT_ENTRY_TSS].limit0 = 0x67;
+ gdt[GDT_ENTRY_TSS].base0 = tss & 0xFFFF;
+ gdt[GDT_ENTRY_TSS].base1 = (tss >> 16) & 0xFF;
+ gdt[GDT_ENTRY_TSS].base2 = tss >> 24;
+ gdt[GDT_ENTRY_TSS].type = 0x9; /* 32-bit TSS (available) */
+ gdt[GDT_ENTRY_TSS].p = 0x1; /* Entry is present */
+ gdt[GDT_ENTRY_TSS].dpl = 0x0; /* Privilege level 0 */
+ gdt[GDT_ENTRY_TSS].s = 0x0; /* system segment */
+
+}
+
+/*
+ * This routine sets up the initial Guest GDT for booting. All entries start
+ * as 0 (unusable).
+ */
+void setup_guest_gdt(struct lg_cpu *cpu)
+{
+ /*
+ * Start with full 0-4G segments...except the Guest is allowed to use
+ * them, so set the privilege level appropriately in the flags.
+ */
+ cpu->arch.gdt[GDT_ENTRY_KERNEL_CS] = FULL_EXEC_SEGMENT;
+ cpu->arch.gdt[GDT_ENTRY_KERNEL_DS] = FULL_SEGMENT;
+ cpu->arch.gdt[GDT_ENTRY_KERNEL_CS].dpl |= GUEST_PL;
+ cpu->arch.gdt[GDT_ENTRY_KERNEL_DS].dpl |= GUEST_PL;
+}
+
+/*H:650
+ * An optimization of copy_gdt(), for just the three "thead-local storage"
+ * entries.
+ */
+void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt)
+{
+ unsigned int i;
+
+ for (i = GDT_ENTRY_TLS_MIN; i <= GDT_ENTRY_TLS_MAX; i++)
+ gdt[i] = cpu->arch.gdt[i];
+}
+
+/*H:640
+ * When the Guest is run on a different CPU, or the GDT entries have changed,
+ * copy_gdt() is called to copy the Guest's GDT entries across to this CPU's
+ * GDT.
+ */
+void copy_gdt(const struct lg_cpu *cpu, struct desc_struct *gdt)
+{
+ unsigned int i;
+
+ /*
+ * The default entries from setup_default_gdt_entries() are not
+ * replaced. See ignored_gdt() above.
+ */
+ for (i = 0; i < GDT_ENTRIES; i++)
+ if (!ignored_gdt(i))
+ gdt[i] = cpu->arch.gdt[i];
+}
+
+/*H:620
+ * This is where the Guest asks us to load a new GDT entry
+ * (LHCALL_LOAD_GDT_ENTRY). We tweak the entry and copy it in.
+ */
+void load_guest_gdt_entry(struct lg_cpu *cpu, u32 num, u32 lo, u32 hi)
+{
+ /*
+ * We assume the Guest has the same number of GDT entries as the
+ * Host, otherwise we'd have to dynamically allocate the Guest GDT.
+ */
+ if (num >= ARRAY_SIZE(cpu->arch.gdt)) {
+ kill_guest(cpu, "too many gdt entries %i", num);
+ return;
+ }
+
+ /* Set it up, then fix it. */
+ cpu->arch.gdt[num].a = lo;
+ cpu->arch.gdt[num].b = hi;
+ fixup_gdt_table(cpu, num, num+1);
+ /*
+ * Mark that the GDT changed so the core knows it has to copy it again,
+ * even if the Guest is run on the same CPU.
+ */
+ cpu->changed |= CHANGED_GDT;
+}
+
+/*
+ * This is the fast-track version for just changing the three TLS entries.
+ * Remember that this happens on every context switch, so it's worth
+ * optimizing. But wouldn't it be neater to have a single hypercall to cover
+ * both cases?
+ */
+void guest_load_tls(struct lg_cpu *cpu, unsigned long gtls)
+{
+ struct desc_struct *tls = &cpu->arch.gdt[GDT_ENTRY_TLS_MIN];
+
+ __lgread(cpu, tls, gtls, sizeof(*tls)*GDT_ENTRY_TLS_ENTRIES);
+ fixup_gdt_table(cpu, GDT_ENTRY_TLS_MIN, GDT_ENTRY_TLS_MAX+1);
+ /* Note that just the TLS entries have changed. */
+ cpu->changed |= CHANGED_GDT_TLS;
+}
+
+/*H:660
+ * With this, we have finished the Host.
+ *
+ * Five of the seven parts of our task are complete. You have made it through
+ * the Bit of Despair (I think that's somewhere in the page table code,
+ * myself).
+ *
+ * Next, we examine "make Switcher". It's short, but intense.
+ */
diff --git a/kernel/drivers/lguest/x86/core.c b/kernel/drivers/lguest/x86/core.c
new file mode 100644
index 000000000..30f2aef69
--- /dev/null
+++ b/kernel/drivers/lguest/x86/core.c
@@ -0,0 +1,737 @@
+/*
+ * Copyright (C) 2006, Rusty Russell <rusty@rustcorp.com.au> IBM Corporation.
+ * Copyright (C) 2007, Jes Sorensen <jes@sgi.com> SGI.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+/*P:450
+ * This file contains the x86-specific lguest code. It used to be all
+ * mixed in with drivers/lguest/core.c but several foolhardy code slashers
+ * wrestled most of the dependencies out to here in preparation for porting
+ * lguest to other architectures (see what I mean by foolhardy?).
+ *
+ * This also contains a couple of non-obvious setup and teardown pieces which
+ * were implemented after days of debugging pain.
+:*/
+#include <linux/kernel.h>
+#include <linux/start_kernel.h>
+#include <linux/string.h>
+#include <linux/console.h>
+#include <linux/screen_info.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/cpu.h>
+#include <linux/lguest.h>
+#include <linux/lguest_launcher.h>
+#include <asm/paravirt.h>
+#include <asm/param.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/desc.h>
+#include <asm/setup.h>
+#include <asm/lguest.h>
+#include <asm/uaccess.h>
+#include <asm/i387.h>
+#include <asm/tlbflush.h>
+#include "../lg.h"
+
+static int cpu_had_pge;
+
+static struct {
+ unsigned long offset;
+ unsigned short segment;
+} lguest_entry;
+
+/* Offset from where switcher.S was compiled to where we've copied it */
+static unsigned long switcher_offset(void)
+{
+ return switcher_addr - (unsigned long)start_switcher_text;
+}
+
+/* This cpu's struct lguest_pages (after the Switcher text page) */
+static struct lguest_pages *lguest_pages(unsigned int cpu)
+{
+ return &(((struct lguest_pages *)(switcher_addr + PAGE_SIZE))[cpu]);
+}
+
+static DEFINE_PER_CPU(struct lg_cpu *, lg_last_cpu);
+
+/*S:010
+ * We approach the Switcher.
+ *
+ * Remember that each CPU has two pages which are visible to the Guest when it
+ * runs on that CPU. This has to contain the state for that Guest: we copy the
+ * state in just before we run the Guest.
+ *
+ * Each Guest has "changed" flags which indicate what has changed in the Guest
+ * since it last ran. We saw this set in interrupts_and_traps.c and
+ * segments.c.
+ */
+static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
+{
+ /*
+ * Copying all this data can be quite expensive. We usually run the
+ * same Guest we ran last time (and that Guest hasn't run anywhere else
+ * meanwhile). If that's not the case, we pretend everything in the
+ * Guest has changed.
+ */
+ if (__this_cpu_read(lg_last_cpu) != cpu || cpu->last_pages != pages) {
+ __this_cpu_write(lg_last_cpu, cpu);
+ cpu->last_pages = pages;
+ cpu->changed = CHANGED_ALL;
+ }
+
+ /*
+ * These copies are pretty cheap, so we do them unconditionally: */
+ /* Save the current Host top-level page directory.
+ */
+ pages->state.host_cr3 = __pa(current->mm->pgd);
+ /*
+ * Set up the Guest's page tables to see this CPU's pages (and no
+ * other CPU's pages).
+ */
+ map_switcher_in_guest(cpu, pages);
+ /*
+ * Set up the two "TSS" members which tell the CPU what stack to use
+ * for traps which do directly into the Guest (ie. traps at privilege
+ * level 1).
+ */
+ pages->state.guest_tss.sp1 = cpu->esp1;
+ pages->state.guest_tss.ss1 = cpu->ss1;
+
+ /* Copy direct-to-Guest trap entries. */
+ if (cpu->changed & CHANGED_IDT)
+ copy_traps(cpu, pages->state.guest_idt, default_idt_entries);
+
+ /* Copy all GDT entries which the Guest can change. */
+ if (cpu->changed & CHANGED_GDT)
+ copy_gdt(cpu, pages->state.guest_gdt);
+ /* If only the TLS entries have changed, copy them. */
+ else if (cpu->changed & CHANGED_GDT_TLS)
+ copy_gdt_tls(cpu, pages->state.guest_gdt);
+
+ /* Mark the Guest as unchanged for next time. */
+ cpu->changed = 0;
+}
+
+/* Finally: the code to actually call into the Switcher to run the Guest. */
+static void run_guest_once(struct lg_cpu *cpu, struct lguest_pages *pages)
+{
+ /* This is a dummy value we need for GCC's sake. */
+ unsigned int clobber;
+
+ /*
+ * Copy the guest-specific information into this CPU's "struct
+ * lguest_pages".
+ */
+ copy_in_guest_info(cpu, pages);
+
+ /*
+ * Set the trap number to 256 (impossible value). If we fault while
+ * switching to the Guest (bad segment registers or bug), this will
+ * cause us to abort the Guest.
+ */
+ cpu->regs->trapnum = 256;
+
+ /*
+ * Now: we push the "eflags" register on the stack, then do an "lcall".
+ * This is how we change from using the kernel code segment to using
+ * the dedicated lguest code segment, as well as jumping into the
+ * Switcher.
+ *
+ * The lcall also pushes the old code segment (KERNEL_CS) onto the
+ * stack, then the address of this call. This stack layout happens to
+ * exactly match the stack layout created by an interrupt...
+ */
+ asm volatile("pushf; lcall *%4"
+ /*
+ * This is how we tell GCC that %eax ("a") and %ebx ("b")
+ * are changed by this routine. The "=" means output.
+ */
+ : "=a"(clobber), "=b"(clobber)
+ /*
+ * %eax contains the pages pointer. ("0" refers to the
+ * 0-th argument above, ie "a"). %ebx contains the
+ * physical address of the Guest's top-level page
+ * directory.
+ */
+ : "0"(pages),
+ "1"(__pa(cpu->lg->pgdirs[cpu->cpu_pgd].pgdir)),
+ "m"(lguest_entry)
+ /*
+ * We tell gcc that all these registers could change,
+ * which means we don't have to save and restore them in
+ * the Switcher.
+ */
+ : "memory", "%edx", "%ecx", "%edi", "%esi");
+}
+/*:*/
+
+unsigned long *lguest_arch_regptr(struct lg_cpu *cpu, size_t reg_off, bool any)
+{
+ switch (reg_off) {
+ case offsetof(struct pt_regs, bx):
+ return &cpu->regs->ebx;
+ case offsetof(struct pt_regs, cx):
+ return &cpu->regs->ecx;
+ case offsetof(struct pt_regs, dx):
+ return &cpu->regs->edx;
+ case offsetof(struct pt_regs, si):
+ return &cpu->regs->esi;
+ case offsetof(struct pt_regs, di):
+ return &cpu->regs->edi;
+ case offsetof(struct pt_regs, bp):
+ return &cpu->regs->ebp;
+ case offsetof(struct pt_regs, ax):
+ return &cpu->regs->eax;
+ case offsetof(struct pt_regs, ip):
+ return &cpu->regs->eip;
+ case offsetof(struct pt_regs, sp):
+ return &cpu->regs->esp;
+ }
+
+ /* Launcher can read these, but we don't allow any setting. */
+ if (any) {
+ switch (reg_off) {
+ case offsetof(struct pt_regs, ds):
+ return &cpu->regs->ds;
+ case offsetof(struct pt_regs, es):
+ return &cpu->regs->es;
+ case offsetof(struct pt_regs, fs):
+ return &cpu->regs->fs;
+ case offsetof(struct pt_regs, gs):
+ return &cpu->regs->gs;
+ case offsetof(struct pt_regs, cs):
+ return &cpu->regs->cs;
+ case offsetof(struct pt_regs, flags):
+ return &cpu->regs->eflags;
+ case offsetof(struct pt_regs, ss):
+ return &cpu->regs->ss;
+ }
+ }
+
+ return NULL;
+}
+
+/*M:002
+ * There are hooks in the scheduler which we can register to tell when we
+ * get kicked off the CPU (preempt_notifier_register()). This would allow us
+ * to lazily disable SYSENTER which would regain some performance, and should
+ * also simplify copy_in_guest_info(). Note that we'd still need to restore
+ * things when we exit to Launcher userspace, but that's fairly easy.
+ *
+ * We could also try using these hooks for PGE, but that might be too expensive.
+ *
+ * The hooks were designed for KVM, but we can also put them to good use.
+:*/
+
+/*H:040
+ * This is the i386-specific code to setup and run the Guest. Interrupts
+ * are disabled: we own the CPU.
+ */
+void lguest_arch_run_guest(struct lg_cpu *cpu)
+{
+ /*
+ * Remember the awfully-named TS bit? If the Guest has asked to set it
+ * we set it now, so we can trap and pass that trap to the Guest if it
+ * uses the FPU.
+ */
+ if (cpu->ts && user_has_fpu())
+ stts();
+
+ /*
+ * SYSENTER is an optimized way of doing system calls. We can't allow
+ * it because it always jumps to privilege level 0. A normal Guest
+ * won't try it because we don't advertise it in CPUID, but a malicious
+ * Guest (or malicious Guest userspace program) could, so we tell the
+ * CPU to disable it before running the Guest.
+ */
+ if (boot_cpu_has(X86_FEATURE_SEP))
+ wrmsr(MSR_IA32_SYSENTER_CS, 0, 0);
+
+ /*
+ * Now we actually run the Guest. It will return when something
+ * interesting happens, and we can examine its registers to see what it
+ * was doing.
+ */
+ run_guest_once(cpu, lguest_pages(raw_smp_processor_id()));
+
+ /*
+ * Note that the "regs" structure contains two extra entries which are
+ * not really registers: a trap number which says what interrupt or
+ * trap made the switcher code come back, and an error code which some
+ * traps set.
+ */
+
+ /* Restore SYSENTER if it's supposed to be on. */
+ if (boot_cpu_has(X86_FEATURE_SEP))
+ wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
+
+ /* Clear the host TS bit if it was set above. */
+ if (cpu->ts && user_has_fpu())
+ clts();
+
+ /*
+ * If the Guest page faulted, then the cr2 register will tell us the
+ * bad virtual address. We have to grab this now, because once we
+ * re-enable interrupts an interrupt could fault and thus overwrite
+ * cr2, or we could even move off to a different CPU.
+ */
+ if (cpu->regs->trapnum == 14)
+ cpu->arch.last_pagefault = read_cr2();
+ /*
+ * Similarly, if we took a trap because the Guest used the FPU,
+ * we have to restore the FPU it expects to see.
+ * math_state_restore() may sleep and we may even move off to
+ * a different CPU. So all the critical stuff should be done
+ * before this.
+ */
+ else if (cpu->regs->trapnum == 7 && !user_has_fpu())
+ math_state_restore();
+}
+
+/*H:130
+ * Now we've examined the hypercall code; our Guest can make requests.
+ * Our Guest is usually so well behaved; it never tries to do things it isn't
+ * allowed to, and uses hypercalls instead. Unfortunately, Linux's paravirtual
+ * infrastructure isn't quite complete, because it doesn't contain replacements
+ * for the Intel I/O instructions. As a result, the Guest sometimes fumbles
+ * across one during the boot process as it probes for various things which are
+ * usually attached to a PC.
+ *
+ * When the Guest uses one of these instructions, we get a trap (General
+ * Protection Fault) and come here. We queue this to be sent out to the
+ * Launcher to handle.
+ */
+
+/*
+ * The eip contains the *virtual* address of the Guest's instruction:
+ * we copy the instruction here so the Launcher doesn't have to walk
+ * the page tables to decode it. We handle the case (eg. in a kernel
+ * module) where the instruction is over two pages, and the pages are
+ * virtually but not physically contiguous.
+ *
+ * The longest possible x86 instruction is 15 bytes, but we don't handle
+ * anything that strange.
+ */
+static void copy_from_guest(struct lg_cpu *cpu,
+ void *dst, unsigned long vaddr, size_t len)
+{
+ size_t to_page_end = PAGE_SIZE - (vaddr % PAGE_SIZE);
+ unsigned long paddr;
+
+ BUG_ON(len > PAGE_SIZE);
+
+ /* If it goes over a page, copy in two parts. */
+ if (len > to_page_end) {
+ /* But make sure the next page is mapped! */
+ if (__guest_pa(cpu, vaddr + to_page_end, &paddr))
+ copy_from_guest(cpu, dst + to_page_end,
+ vaddr + to_page_end,
+ len - to_page_end);
+ else
+ /* Otherwise fill with zeroes. */
+ memset(dst + to_page_end, 0, len - to_page_end);
+ len = to_page_end;
+ }
+
+ /* This will kill the guest if it isn't mapped, but that
+ * shouldn't happen. */
+ __lgread(cpu, dst, guest_pa(cpu, vaddr), len);
+}
+
+
+static void setup_emulate_insn(struct lg_cpu *cpu)
+{
+ cpu->pending.trap = 13;
+ copy_from_guest(cpu, cpu->pending.insn, cpu->regs->eip,
+ sizeof(cpu->pending.insn));
+}
+
+static void setup_iomem_insn(struct lg_cpu *cpu, unsigned long iomem_addr)
+{
+ cpu->pending.trap = 14;
+ cpu->pending.addr = iomem_addr;
+ copy_from_guest(cpu, cpu->pending.insn, cpu->regs->eip,
+ sizeof(cpu->pending.insn));
+}
+
+/*H:050 Once we've re-enabled interrupts, we look at why the Guest exited. */
+void lguest_arch_handle_trap(struct lg_cpu *cpu)
+{
+ unsigned long iomem_addr;
+
+ switch (cpu->regs->trapnum) {
+ case 13: /* We've intercepted a General Protection Fault. */
+ /* Hand to Launcher to emulate those pesky IN and OUT insns */
+ if (cpu->regs->errcode == 0) {
+ setup_emulate_insn(cpu);
+ return;
+ }
+ break;
+ case 14: /* We've intercepted a Page Fault. */
+ /*
+ * The Guest accessed a virtual address that wasn't mapped.
+ * This happens a lot: we don't actually set up most of the page
+ * tables for the Guest at all when we start: as it runs it asks
+ * for more and more, and we set them up as required. In this
+ * case, we don't even tell the Guest that the fault happened.
+ *
+ * The errcode tells whether this was a read or a write, and
+ * whether kernel or userspace code.
+ */
+ if (demand_page(cpu, cpu->arch.last_pagefault,
+ cpu->regs->errcode, &iomem_addr))
+ return;
+
+ /* Was this an access to memory mapped IO? */
+ if (iomem_addr) {
+ /* Tell Launcher, let it handle it. */
+ setup_iomem_insn(cpu, iomem_addr);
+ return;
+ }
+
+ /*
+ * OK, it's really not there (or not OK): the Guest needs to
+ * know. We write out the cr2 value so it knows where the
+ * fault occurred.
+ *
+ * Note that if the Guest were really messed up, this could
+ * happen before it's done the LHCALL_LGUEST_INIT hypercall, so
+ * lg->lguest_data could be NULL
+ */
+ if (cpu->lg->lguest_data &&
+ put_user(cpu->arch.last_pagefault,
+ &cpu->lg->lguest_data->cr2))
+ kill_guest(cpu, "Writing cr2");
+ break;
+ case 7: /* We've intercepted a Device Not Available fault. */
+ /*
+ * If the Guest doesn't want to know, we already restored the
+ * Floating Point Unit, so we just continue without telling it.
+ */
+ if (!cpu->ts)
+ return;
+ break;
+ case 32 ... 255:
+ /*
+ * These values mean a real interrupt occurred, in which case
+ * the Host handler has already been run. We just do a
+ * friendly check if another process should now be run, then
+ * return to run the Guest again.
+ */
+ cond_resched();
+ return;
+ case LGUEST_TRAP_ENTRY:
+ /*
+ * Our 'struct hcall_args' maps directly over our regs: we set
+ * up the pointer now to indicate a hypercall is pending.
+ */
+ cpu->hcall = (struct hcall_args *)cpu->regs;
+ return;
+ }
+
+ /* We didn't handle the trap, so it needs to go to the Guest. */
+ if (!deliver_trap(cpu, cpu->regs->trapnum))
+ /*
+ * If the Guest doesn't have a handler (either it hasn't
+ * registered any yet, or it's one of the faults we don't let
+ * it handle), it dies with this cryptic error message.
+ */
+ kill_guest(cpu, "unhandled trap %li at %#lx (%#lx)",
+ cpu->regs->trapnum, cpu->regs->eip,
+ cpu->regs->trapnum == 14 ? cpu->arch.last_pagefault
+ : cpu->regs->errcode);
+}
+
+/*
+ * Now we can look at each of the routines this calls, in increasing order of
+ * complexity: do_hypercalls(), emulate_insn(), maybe_do_interrupt(),
+ * deliver_trap() and demand_page(). After all those, we'll be ready to
+ * examine the Switcher, and our philosophical understanding of the Host/Guest
+ * duality will be complete.
+:*/
+static void adjust_pge(void *on)
+{
+ if (on)
+ cr4_set_bits(X86_CR4_PGE);
+ else
+ cr4_clear_bits(X86_CR4_PGE);
+}
+
+/*H:020
+ * Now the Switcher is mapped and every thing else is ready, we need to do
+ * some more i386-specific initialization.
+ */
+void __init lguest_arch_host_init(void)
+{
+ int i;
+
+ /*
+ * Most of the x86/switcher_32.S doesn't care that it's been moved; on
+ * Intel, jumps are relative, and it doesn't access any references to
+ * external code or data.
+ *
+ * The only exception is the interrupt handlers in switcher.S: their
+ * addresses are placed in a table (default_idt_entries), so we need to
+ * update the table with the new addresses. switcher_offset() is a
+ * convenience function which returns the distance between the
+ * compiled-in switcher code and the high-mapped copy we just made.
+ */
+ for (i = 0; i < IDT_ENTRIES; i++)
+ default_idt_entries[i] += switcher_offset();
+
+ /*
+ * Set up the Switcher's per-cpu areas.
+ *
+ * Each CPU gets two pages of its own within the high-mapped region
+ * (aka. "struct lguest_pages"). Much of this can be initialized now,
+ * but some depends on what Guest we are running (which is set up in
+ * copy_in_guest_info()).
+ */
+ for_each_possible_cpu(i) {
+ /* lguest_pages() returns this CPU's two pages. */
+ struct lguest_pages *pages = lguest_pages(i);
+ /* This is a convenience pointer to make the code neater. */
+ struct lguest_ro_state *state = &pages->state;
+
+ /*
+ * The Global Descriptor Table: the Host has a different one
+ * for each CPU. We keep a descriptor for the GDT which says
+ * where it is and how big it is (the size is actually the last
+ * byte, not the size, hence the "-1").
+ */
+ state->host_gdt_desc.size = GDT_SIZE-1;
+ state->host_gdt_desc.address = (long)get_cpu_gdt_table(i);
+
+ /*
+ * All CPUs on the Host use the same Interrupt Descriptor
+ * Table, so we just use store_idt(), which gets this CPU's IDT
+ * descriptor.
+ */
+ store_idt(&state->host_idt_desc);
+
+ /*
+ * The descriptors for the Guest's GDT and IDT can be filled
+ * out now, too. We copy the GDT & IDT into ->guest_gdt and
+ * ->guest_idt before actually running the Guest.
+ */
+ state->guest_idt_desc.size = sizeof(state->guest_idt)-1;
+ state->guest_idt_desc.address = (long)&state->guest_idt;
+ state->guest_gdt_desc.size = sizeof(state->guest_gdt)-1;
+ state->guest_gdt_desc.address = (long)&state->guest_gdt;
+
+ /*
+ * We know where we want the stack to be when the Guest enters
+ * the Switcher: in pages->regs. The stack grows upwards, so
+ * we start it at the end of that structure.
+ */
+ state->guest_tss.sp0 = (long)(&pages->regs + 1);
+ /*
+ * And this is the GDT entry to use for the stack: we keep a
+ * couple of special LGUEST entries.
+ */
+ state->guest_tss.ss0 = LGUEST_DS;
+
+ /*
+ * x86 can have a finegrained bitmap which indicates what I/O
+ * ports the process can use. We set it to the end of our
+ * structure, meaning "none".
+ */
+ state->guest_tss.io_bitmap_base = sizeof(state->guest_tss);
+
+ /*
+ * Some GDT entries are the same across all Guests, so we can
+ * set them up now.
+ */
+ setup_default_gdt_entries(state);
+ /* Most IDT entries are the same for all Guests, too.*/
+ setup_default_idt_entries(state, default_idt_entries);
+
+ /*
+ * The Host needs to be able to use the LGUEST segments on this
+ * CPU, too, so put them in the Host GDT.
+ */
+ get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT;
+ get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT;
+ }
+
+ /*
+ * In the Switcher, we want the %cs segment register to use the
+ * LGUEST_CS GDT entry: we've put that in the Host and Guest GDTs, so
+ * it will be undisturbed when we switch. To change %cs and jump we
+ * need this structure to feed to Intel's "lcall" instruction.
+ */
+ lguest_entry.offset = (long)switch_to_guest + switcher_offset();
+ lguest_entry.segment = LGUEST_CS;
+
+ /*
+ * Finally, we need to turn off "Page Global Enable". PGE is an
+ * optimization where page table entries are specially marked to show
+ * they never change. The Host kernel marks all the kernel pages this
+ * way because it's always present, even when userspace is running.
+ *
+ * Lguest breaks this: unbeknownst to the rest of the Host kernel, we
+ * switch to the Guest kernel. If you don't disable this on all CPUs,
+ * you'll get really weird bugs that you'll chase for two days.
+ *
+ * I used to turn PGE off every time we switched to the Guest and back
+ * on when we return, but that slowed the Switcher down noticibly.
+ */
+
+ /*
+ * We don't need the complexity of CPUs coming and going while we're
+ * doing this.
+ */
+ get_online_cpus();
+ if (cpu_has_pge) { /* We have a broader idea of "global". */
+ /* Remember that this was originally set (for cleanup). */
+ cpu_had_pge = 1;
+ /*
+ * adjust_pge is a helper function which sets or unsets the PGE
+ * bit on its CPU, depending on the argument (0 == unset).
+ */
+ on_each_cpu(adjust_pge, (void *)0, 1);
+ /* Turn off the feature in the global feature set. */
+ clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PGE);
+ }
+ put_online_cpus();
+}
+/*:*/
+
+void __exit lguest_arch_host_fini(void)
+{
+ /* If we had PGE before we started, turn it back on now. */
+ get_online_cpus();
+ if (cpu_had_pge) {
+ set_cpu_cap(&boot_cpu_data, X86_FEATURE_PGE);
+ /* adjust_pge's argument "1" means set PGE. */
+ on_each_cpu(adjust_pge, (void *)1, 1);
+ }
+ put_online_cpus();
+}
+
+
+/*H:122 The i386-specific hypercalls simply farm out to the right functions. */
+int lguest_arch_do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
+{
+ switch (args->arg0) {
+ case LHCALL_LOAD_GDT_ENTRY:
+ load_guest_gdt_entry(cpu, args->arg1, args->arg2, args->arg3);
+ break;
+ case LHCALL_LOAD_IDT_ENTRY:
+ load_guest_idt_entry(cpu, args->arg1, args->arg2, args->arg3);
+ break;
+ case LHCALL_LOAD_TLS:
+ guest_load_tls(cpu, args->arg1);
+ break;
+ default:
+ /* Bad Guest. Bad! */
+ return -EIO;
+ }
+ return 0;
+}
+
+/*H:126 i386-specific hypercall initialization: */
+int lguest_arch_init_hypercalls(struct lg_cpu *cpu)
+{
+ u32 tsc_speed;
+
+ /*
+ * The pointer to the Guest's "struct lguest_data" is the only argument.
+ * We check that address now.
+ */
+ if (!lguest_address_ok(cpu->lg, cpu->hcall->arg1,
+ sizeof(*cpu->lg->lguest_data)))
+ return -EFAULT;
+
+ /*
+ * Having checked it, we simply set lg->lguest_data to point straight
+ * into the Launcher's memory at the right place and then use
+ * copy_to_user/from_user from now on, instead of lgread/write. I put
+ * this in to show that I'm not immune to writing stupid
+ * optimizations.
+ */
+ cpu->lg->lguest_data = cpu->lg->mem_base + cpu->hcall->arg1;
+
+ /*
+ * We insist that the Time Stamp Counter exist and doesn't change with
+ * cpu frequency. Some devious chip manufacturers decided that TSC
+ * changes could be handled in software. I decided that time going
+ * backwards might be good for benchmarks, but it's bad for users.
+ *
+ * We also insist that the TSC be stable: the kernel detects unreliable
+ * TSCs for its own purposes, and we use that here.
+ */
+ if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) && !check_tsc_unstable())
+ tsc_speed = tsc_khz;
+ else
+ tsc_speed = 0;
+ if (put_user(tsc_speed, &cpu->lg->lguest_data->tsc_khz))
+ return -EFAULT;
+
+ /* The interrupt code might not like the system call vector. */
+ if (!check_syscall_vector(cpu->lg))
+ kill_guest(cpu, "bad syscall vector");
+
+ return 0;
+}
+/*:*/
+
+/*L:030
+ * Most of the Guest's registers are left alone: we used get_zeroed_page() to
+ * allocate the structure, so they will be 0.
+ */
+void lguest_arch_setup_regs(struct lg_cpu *cpu, unsigned long start)
+{
+ struct lguest_regs *regs = cpu->regs;
+
+ /*
+ * There are four "segment" registers which the Guest needs to boot:
+ * The "code segment" register (cs) refers to the kernel code segment
+ * __KERNEL_CS, and the "data", "extra" and "stack" segment registers
+ * refer to the kernel data segment __KERNEL_DS.
+ *
+ * The privilege level is packed into the lower bits. The Guest runs
+ * at privilege level 1 (GUEST_PL).
+ */
+ regs->ds = regs->es = regs->ss = __KERNEL_DS|GUEST_PL;
+ regs->cs = __KERNEL_CS|GUEST_PL;
+
+ /*
+ * The "eflags" register contains miscellaneous flags. Bit 1 (0x002)
+ * is supposed to always be "1". Bit 9 (0x200) controls whether
+ * interrupts are enabled. We always leave interrupts enabled while
+ * running the Guest.
+ */
+ regs->eflags = X86_EFLAGS_IF | X86_EFLAGS_FIXED;
+
+ /*
+ * The "Extended Instruction Pointer" register says where the Guest is
+ * running.
+ */
+ regs->eip = start;
+
+ /*
+ * %esi points to our boot information, at physical address 0, so don't
+ * touch it.
+ */
+
+ /* There are a couple of GDT entries the Guest expects at boot. */
+ setup_guest_gdt(cpu);
+}
diff --git a/kernel/drivers/lguest/x86/switcher_32.S b/kernel/drivers/lguest/x86/switcher_32.S
new file mode 100644
index 000000000..40634b0db
--- /dev/null
+++ b/kernel/drivers/lguest/x86/switcher_32.S
@@ -0,0 +1,388 @@
+/*P:900
+ * This is the Switcher: code which sits at 0xFFC00000 (or 0xFFE00000) astride
+ * both the Host and Guest to do the low-level Guest<->Host switch. It is as
+ * simple as it can be made, but it's naturally very specific to x86.
+ *
+ * You have now completed Preparation. If this has whet your appetite; if you
+ * are feeling invigorated and refreshed then the next, more challenging stage
+ * can be found in "make Guest".
+ :*/
+
+/*M:012
+ * Lguest is meant to be simple: my rule of thumb is that 1% more LOC must
+ * gain at least 1% more performance. Since neither LOC nor performance can be
+ * measured beforehand, it generally means implementing a feature then deciding
+ * if it's worth it. And once it's implemented, who can say no?
+ *
+ * This is why I haven't implemented this idea myself. I want to, but I
+ * haven't. You could, though.
+ *
+ * The main place where lguest performance sucks is Guest page faulting. When
+ * a Guest userspace process hits an unmapped page we switch back to the Host,
+ * walk the page tables, find it's not mapped, switch back to the Guest page
+ * fault handler, which calls a hypercall to set the page table entry, then
+ * finally returns to userspace. That's two round-trips.
+ *
+ * If we had a small walker in the Switcher, we could quickly check the Guest
+ * page table and if the page isn't mapped, immediately reflect the fault back
+ * into the Guest. This means the Switcher would have to know the top of the
+ * Guest page table and the page fault handler address.
+ *
+ * For simplicity, the Guest should only handle the case where the privilege
+ * level of the fault is 3 and probably only not present or write faults. It
+ * should also detect recursive faults, and hand the original fault to the
+ * Host (which is actually really easy).
+ *
+ * Two questions remain. Would the performance gain outweigh the complexity?
+ * And who would write the verse documenting it?
+:*/
+
+/*M:011
+ * Lguest64 handles NMI. This gave me NMI envy (until I looked at their
+ * code). It's worth doing though, since it would let us use oprofile in the
+ * Host when a Guest is running.
+:*/
+
+/*S:100
+ * Welcome to the Switcher itself!
+ *
+ * This file contains the low-level code which changes the CPU to run the Guest
+ * code, and returns to the Host when something happens. Understand this, and
+ * you understand the heart of our journey.
+ *
+ * Because this is in assembler rather than C, our tale switches from prose to
+ * verse. First I tried limericks:
+ *
+ * There once was an eax reg,
+ * To which our pointer was fed,
+ * It needed an add,
+ * Which asm-offsets.h had
+ * But this limerick is hurting my head.
+ *
+ * Next I tried haikus, but fitting the required reference to the seasons in
+ * every stanza was quickly becoming tiresome:
+ *
+ * The %eax reg
+ * Holds "struct lguest_pages" now:
+ * Cherry blossoms fall.
+ *
+ * Then I started with Heroic Verse, but the rhyming requirement leeched away
+ * the content density and led to some uniquely awful oblique rhymes:
+ *
+ * These constants are coming from struct offsets
+ * For use within the asm switcher text.
+ *
+ * Finally, I settled for something between heroic hexameter, and normal prose
+ * with inappropriate linebreaks. Anyway, it aint no Shakespeare.
+ */
+
+// Not all kernel headers work from assembler
+// But these ones are needed: the ENTRY() define
+// And constants extracted from struct offsets
+// To avoid magic numbers and breakage:
+// Should they change the compiler can't save us
+// Down here in the depths of assembler code.
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/page.h>
+#include <asm/segment.h>
+#include <asm/lguest.h>
+
+// We mark the start of the code to copy
+// It's placed in .text tho it's never run here
+// You'll see the trick macro at the end
+// Which interleaves data and text to effect.
+.text
+ENTRY(start_switcher_text)
+
+// When we reach switch_to_guest we have just left
+// The safe and comforting shores of C code
+// %eax has the "struct lguest_pages" to use
+// Where we save state and still see it from the Guest
+// And %ebx holds the Guest shadow pagetable:
+// Once set we have truly left Host behind.
+ENTRY(switch_to_guest)
+ // We told gcc all its regs could fade,
+ // Clobbered by our journey into the Guest
+ // We could have saved them, if we tried
+ // But time is our master and cycles count.
+
+ // Segment registers must be saved for the Host
+ // We push them on the Host stack for later
+ pushl %es
+ pushl %ds
+ pushl %gs
+ pushl %fs
+ // But the compiler is fickle, and heeds
+ // No warning of %ebp clobbers
+ // When frame pointers are used. That register
+ // Must be saved and restored or chaos strikes.
+ pushl %ebp
+ // The Host's stack is done, now save it away
+ // In our "struct lguest_pages" at offset
+ // Distilled into asm-offsets.h
+ movl %esp, LGUEST_PAGES_host_sp(%eax)
+
+ // All saved and there's now five steps before us:
+ // Stack, GDT, IDT, TSS
+ // Then last of all the page tables are flipped.
+
+ // Yet beware that our stack pointer must be
+ // Always valid lest an NMI hits
+ // %edx does the duty here as we juggle
+ // %eax is lguest_pages: our stack lies within.
+ movl %eax, %edx
+ addl $LGUEST_PAGES_regs, %edx
+ movl %edx, %esp
+
+ // The Guest's GDT we so carefully
+ // Placed in the "struct lguest_pages" before
+ lgdt LGUEST_PAGES_guest_gdt_desc(%eax)
+
+ // The Guest's IDT we did partially
+ // Copy to "struct lguest_pages" as well.
+ lidt LGUEST_PAGES_guest_idt_desc(%eax)
+
+ // The TSS entry which controls traps
+ // Must be loaded up with "ltr" now:
+ // The GDT entry that TSS uses
+ // Changes type when we load it: damn Intel!
+ // For after we switch over our page tables
+ // That entry will be read-only: we'd crash.
+ movl $(GDT_ENTRY_TSS*8), %edx
+ ltr %dx
+
+ // Look back now, before we take this last step!
+ // The Host's TSS entry was also marked used;
+ // Let's clear it again for our return.
+ // The GDT descriptor of the Host
+ // Points to the table after two "size" bytes
+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
+ // Clear "used" from type field (byte 5, bit 2)
+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
+
+ // Once our page table's switched, the Guest is live!
+ // The Host fades as we run this final step.
+ // Our "struct lguest_pages" is now read-only.
+ movl %ebx, %cr3
+
+ // The page table change did one tricky thing:
+ // The Guest's register page has been mapped
+ // Writable under our %esp (stack) --
+ // We can simply pop off all Guest regs.
+ popl %eax
+ popl %ebx
+ popl %ecx
+ popl %edx
+ popl %esi
+ popl %edi
+ popl %ebp
+ popl %gs
+ popl %fs
+ popl %ds
+ popl %es
+
+ // Near the base of the stack lurk two strange fields
+ // Which we fill as we exit the Guest
+ // These are the trap number and its error
+ // We can simply step past them on our way.
+ addl $8, %esp
+
+ // The last five stack slots hold return address
+ // And everything needed to switch privilege
+ // From Switcher's level 0 to Guest's 1,
+ // And the stack where the Guest had last left it.
+ // Interrupts are turned back on: we are Guest.
+ iret
+
+// We tread two paths to switch back to the Host
+// Yet both must save Guest state and restore Host
+// So we put the routine in a macro.
+#define SWITCH_TO_HOST \
+ /* We save the Guest state: all registers first \
+ * Laid out just as "struct lguest_regs" defines */ \
+ pushl %es; \
+ pushl %ds; \
+ pushl %fs; \
+ pushl %gs; \
+ pushl %ebp; \
+ pushl %edi; \
+ pushl %esi; \
+ pushl %edx; \
+ pushl %ecx; \
+ pushl %ebx; \
+ pushl %eax; \
+ /* Our stack and our code are using segments \
+ * Set in the TSS and IDT \
+ * Yet if we were to touch data we'd use \
+ * Whatever data segment the Guest had. \
+ * Load the lguest ds segment for now. */ \
+ movl $(LGUEST_DS), %eax; \
+ movl %eax, %ds; \
+ /* So where are we? Which CPU, which struct? \
+ * The stack is our clue: our TSS starts \
+ * It at the end of "struct lguest_pages". \
+ * Or we may have stumbled while restoring \
+ * Our Guest segment regs while in switch_to_guest, \
+ * The fault pushed atop that part-unwound stack. \
+ * If we round the stack down to the page start \
+ * We're at the start of "struct lguest_pages". */ \
+ movl %esp, %eax; \
+ andl $(~(1 << PAGE_SHIFT - 1)), %eax; \
+ /* Save our trap number: the switch will obscure it \
+ * (In the Host the Guest regs are not mapped here) \
+ * %ebx holds it safe for deliver_to_host */ \
+ movl LGUEST_PAGES_regs_trapnum(%eax), %ebx; \
+ /* The Host GDT, IDT and stack! \
+ * All these lie safely hidden from the Guest: \
+ * We must return to the Host page tables \
+ * (Hence that was saved in struct lguest_pages) */ \
+ movl LGUEST_PAGES_host_cr3(%eax), %edx; \
+ movl %edx, %cr3; \
+ /* As before, when we looked back at the Host \
+ * As we left and marked TSS unused \
+ * So must we now for the Guest left behind. */ \
+ andb $0xFD, (LGUEST_PAGES_guest_gdt+GDT_ENTRY_TSS*8+5)(%eax); \
+ /* Switch to Host's GDT, IDT. */ \
+ lgdt LGUEST_PAGES_host_gdt_desc(%eax); \
+ lidt LGUEST_PAGES_host_idt_desc(%eax); \
+ /* Restore the Host's stack where its saved regs lie */ \
+ movl LGUEST_PAGES_host_sp(%eax), %esp; \
+ /* Last the TSS: our Host is returned */ \
+ movl $(GDT_ENTRY_TSS*8), %edx; \
+ ltr %dx; \
+ /* Restore now the regs saved right at the first. */ \
+ popl %ebp; \
+ popl %fs; \
+ popl %gs; \
+ popl %ds; \
+ popl %es
+
+// The first path is trod when the Guest has trapped:
+// (Which trap it was has been pushed on the stack).
+// We need only switch back, and the Host will decode
+// Why we came home, and what needs to be done.
+return_to_host:
+ SWITCH_TO_HOST
+ iret
+
+// We are lead to the second path like so:
+// An interrupt, with some cause external
+// Has ajerked us rudely from the Guest's code
+// Again we must return home to the Host
+deliver_to_host:
+ SWITCH_TO_HOST
+ // But now we must go home via that place
+ // Where that interrupt was supposed to go
+ // Had we not been ensconced, running the Guest.
+ // Here we see the trickness of run_guest_once():
+ // The Host stack is formed like an interrupt
+ // With EIP, CS and EFLAGS layered.
+ // Interrupt handlers end with "iret"
+ // And that will take us home at long long last.
+
+ // But first we must find the handler to call!
+ // The IDT descriptor for the Host
+ // Has two bytes for size, and four for address:
+ // %edx will hold it for us for now.
+ movl (LGUEST_PAGES_host_idt_desc+2)(%eax), %edx
+ // We now know the table address we need,
+ // And saved the trap's number inside %ebx.
+ // Yet the pointer to the handler is smeared
+ // Across the bits of the table entry.
+ // What oracle can tell us how to extract
+ // From such a convoluted encoding?
+ // I consulted gcc, and it gave
+ // These instructions, which I gladly credit:
+ leal (%edx,%ebx,8), %eax
+ movzwl (%eax),%edx
+ movl 4(%eax), %eax
+ xorw %ax, %ax
+ orl %eax, %edx
+ // Now the address of the handler's in %edx
+ // We call it now: its "iret" drops us home.
+ jmp *%edx
+
+// Every interrupt can come to us here
+// But we must truly tell each apart.
+// They number two hundred and fifty six
+// And each must land in a different spot,
+// Push its number on stack, and join the stream.
+
+// And worse, a mere six of the traps stand apart
+// And push on their stack an addition:
+// An error number, thirty two bits long
+// So we punish the other two fifty
+// And make them push a zero so they match.
+
+// Yet two fifty six entries is long
+// And all will look most the same as the last
+// So we create a macro which can make
+// As many entries as we need to fill.
+
+// Note the change to .data then .text:
+// We plant the address of each entry
+// Into a (data) table for the Host
+// To know where each Guest interrupt should go.
+.macro IRQ_STUB N TARGET
+ .data; .long 1f; .text; 1:
+ // Trap eight, ten through fourteen and seventeen
+ // Supply an error number. Else zero.
+ .if (\N <> 8) && (\N < 10 || \N > 14) && (\N <> 17)
+ pushl $0
+ .endif
+ pushl $\N
+ jmp \TARGET
+ ALIGN
+.endm
+
+// This macro creates numerous entries
+// Using GAS macros which out-power C's.
+.macro IRQ_STUBS FIRST LAST TARGET
+ irq=\FIRST
+ .rept \LAST-\FIRST+1
+ IRQ_STUB irq \TARGET
+ irq=irq+1
+ .endr
+.endm
+
+// Here's the marker for our pointer table
+// Laid in the data section just before
+// Each macro places the address of code
+// Forming an array: each one points to text
+// Which handles interrupt in its turn.
+.data
+.global default_idt_entries
+default_idt_entries:
+.text
+ // The first two traps go straight back to the Host
+ IRQ_STUBS 0 1 return_to_host
+ // We'll say nothing, yet, about NMI
+ IRQ_STUB 2 handle_nmi
+ // Other traps also return to the Host
+ IRQ_STUBS 3 31 return_to_host
+ // All interrupts go via their handlers
+ IRQ_STUBS 32 127 deliver_to_host
+ // 'Cept system calls coming from userspace
+ // Are to go to the Guest, never the Host.
+ IRQ_STUB 128 return_to_host
+ IRQ_STUBS 129 255 deliver_to_host
+
+// The NMI, what a fabulous beast
+// Which swoops in and stops us no matter that
+// We're suspended between heaven and hell,
+// (Or more likely between the Host and Guest)
+// When in it comes! We are dazed and confused
+// So we do the simplest thing which one can.
+// Though we've pushed the trap number and zero
+// We discard them, return, and hope we live.
+handle_nmi:
+ addl $8, %esp
+ iret
+
+// We are done; all that's left is Mastery
+// And "make Mastery" is a journey long
+// Designed to make your fingers itch to code.
+
+// Here ends the text, the file and poem.
+ENTRY(end_switcher_text)