summaryrefslogtreecommitdiffstats
path: root/kernel/arch/x86/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/arch/x86/include/asm')
-rw-r--r--kernel/arch/x86/include/asm/Kbuild12
-rw-r--r--kernel/arch/x86/include/asm/a.out-core.h65
-rw-r--r--kernel/arch/x86/include/asm/acenv.h45
-rw-r--r--kernel/arch/x86/include/asm/acpi.h150
-rw-r--r--kernel/arch/x86/include/asm/agp.h31
-rw-r--r--kernel/arch/x86/include/asm/alternative-asm.h82
-rw-r--r--kernel/arch/x86/include/asm/alternative.h268
-rw-r--r--kernel/arch/x86/include/asm/amd_nb.h110
-rw-r--r--kernel/arch/x86/include/asm/apb_timer.h49
-rw-r--r--kernel/arch/x86/include/asm/apic.h661
-rw-r--r--kernel/arch/x86/include/asm/apic_flat_64.h7
-rw-r--r--kernel/arch/x86/include/asm/apicdef.h445
-rw-r--r--kernel/arch/x86/include/asm/apm.h73
-rw-r--r--kernel/arch/x86/include/asm/arch_hweight.h61
-rw-r--r--kernel/arch/x86/include/asm/archrandom.h136
-rw-r--r--kernel/arch/x86/include/asm/asm-offsets.h1
-rw-r--r--kernel/arch/x86/include/asm/asm.h83
-rw-r--r--kernel/arch/x86/include/asm/atomic.h238
-rw-r--r--kernel/arch/x86/include/asm/atomic64_32.h316
-rw-r--r--kernel/arch/x86/include/asm/atomic64_64.h223
-rw-r--r--kernel/arch/x86/include/asm/barrier.h105
-rw-r--r--kernel/arch/x86/include/asm/bios_ebda.h60
-rw-r--r--kernel/arch/x86/include/asm/bitops.h509
-rw-r--r--kernel/arch/x86/include/asm/boot.h40
-rw-r--r--kernel/arch/x86/include/asm/bootparam_utils.h54
-rw-r--r--kernel/arch/x86/include/asm/bug.h37
-rw-r--r--kernel/arch/x86/include/asm/bugs.h12
-rw-r--r--kernel/arch/x86/include/asm/cache.h23
-rw-r--r--kernel/arch/x86/include/asm/cacheflush.h107
-rw-r--r--kernel/arch/x86/include/asm/calgary.h70
-rw-r--r--kernel/arch/x86/include/asm/calling.h247
-rw-r--r--kernel/arch/x86/include/asm/ce4100.h6
-rw-r--r--kernel/arch/x86/include/asm/checksum.h5
-rw-r--r--kernel/arch/x86/include/asm/checksum_32.h202
-rw-r--r--kernel/arch/x86/include/asm/checksum_64.h198
-rw-r--r--kernel/arch/x86/include/asm/clocksource.h15
-rw-r--r--kernel/arch/x86/include/asm/cmdline.h6
-rw-r--r--kernel/arch/x86/include/asm/cmpxchg.h233
-rw-r--r--kernel/arch/x86/include/asm/cmpxchg_32.h114
-rw-r--r--kernel/arch/x86/include/asm/cmpxchg_64.h23
-rw-r--r--kernel/arch/x86/include/asm/compat.h324
-rw-r--r--kernel/arch/x86/include/asm/context_tracking.h10
-rw-r--r--kernel/arch/x86/include/asm/cpu.h39
-rw-r--r--kernel/arch/x86/include/asm/cpu_device_id.h13
-rw-r--r--kernel/arch/x86/include/asm/cpufeature.h603
-rw-r--r--kernel/arch/x86/include/asm/cpumask.h14
-rw-r--r--kernel/arch/x86/include/asm/crash.h9
-rw-r--r--kernel/arch/x86/include/asm/crypto/aes.h11
-rw-r--r--kernel/arch/x86/include/asm/crypto/camellia.h101
-rw-r--r--kernel/arch/x86/include/asm/crypto/glue_helper.h145
-rw-r--r--kernel/arch/x86/include/asm/crypto/serpent-avx.h48
-rw-r--r--kernel/arch/x86/include/asm/crypto/serpent-sse2.h63
-rw-r--r--kernel/arch/x86/include/asm/crypto/twofish.h46
-rw-r--r--kernel/arch/x86/include/asm/current.h21
-rw-r--r--kernel/arch/x86/include/asm/debugreg.h123
-rw-r--r--kernel/arch/x86/include/asm/delay.h8
-rw-r--r--kernel/arch/x86/include/asm/desc.h519
-rw-r--r--kernel/arch/x86/include/asm/desc_defs.h101
-rw-r--r--kernel/arch/x86/include/asm/device.h16
-rw-r--r--kernel/arch/x86/include/asm/disabled-features.h45
-rw-r--r--kernel/arch/x86/include/asm/div64.h66
-rw-r--r--kernel/arch/x86/include/asm/dma-mapping.h176
-rw-r--r--kernel/arch/x86/include/asm/dma.h317
-rw-r--r--kernel/arch/x86/include/asm/dmi.h21
-rw-r--r--kernel/arch/x86/include/asm/dwarf2.h170
-rw-r--r--kernel/arch/x86/include/asm/e820.h69
-rw-r--r--kernel/arch/x86/include/asm/edac.h18
-rw-r--r--kernel/arch/x86/include/asm/efi.h197
-rw-r--r--kernel/arch/x86/include/asm/elf.h371
-rw-r--r--kernel/arch/x86/include/asm/emergency-restart.h6
-rw-r--r--kernel/arch/x86/include/asm/entry_arch.h53
-rw-r--r--kernel/arch/x86/include/asm/espfix.h16
-rw-r--r--kernel/arch/x86/include/asm/exec.h1
-rw-r--r--kernel/arch/x86/include/asm/fb.h21
-rw-r--r--kernel/arch/x86/include/asm/fixmap.h170
-rw-r--r--kernel/arch/x86/include/asm/floppy.h281
-rw-r--r--kernel/arch/x86/include/asm/fpu-internal.h626
-rw-r--r--kernel/arch/x86/include/asm/frame.h26
-rw-r--r--kernel/arch/x86/include/asm/ftrace.h68
-rw-r--r--kernel/arch/x86/include/asm/futex.h117
-rw-r--r--kernel/arch/x86/include/asm/gart.h113
-rw-r--r--kernel/arch/x86/include/asm/genapic.h1
-rw-r--r--kernel/arch/x86/include/asm/geode.h36
-rw-r--r--kernel/arch/x86/include/asm/gpio.h4
-rw-r--r--kernel/arch/x86/include/asm/hardirq.h63
-rw-r--r--kernel/arch/x86/include/asm/highmem.h80
-rw-r--r--kernel/arch/x86/include/asm/hpet.h118
-rw-r--r--kernel/arch/x86/include/asm/hugetlb.h99
-rw-r--r--kernel/arch/x86/include/asm/hw_breakpoint.h76
-rw-r--r--kernel/arch/x86/include/asm/hw_irq.h197
-rw-r--r--kernel/arch/x86/include/asm/hypertransport.h45
-rw-r--r--kernel/arch/x86/include/asm/hypervisor.h64
-rw-r--r--kernel/arch/x86/include/asm/i387.h108
-rw-r--r--kernel/arch/x86/include/asm/i8259.h75
-rw-r--r--kernel/arch/x86/include/asm/ia32.h81
-rw-r--r--kernel/arch/x86/include/asm/ia32_unistd.h11
-rw-r--r--kernel/arch/x86/include/asm/idle.h22
-rw-r--r--kernel/arch/x86/include/asm/imr.h60
-rw-r--r--kernel/arch/x86/include/asm/inat.h221
-rw-r--r--kernel/arch/x86/include/asm/inat_types.h29
-rw-r--r--kernel/arch/x86/include/asm/init.h14
-rw-r--r--kernel/arch/x86/include/asm/insn.h201
-rw-r--r--kernel/arch/x86/include/asm/inst.h310
-rw-r--r--kernel/arch/x86/include/asm/intel-mid.h151
-rw-r--r--kernel/arch/x86/include/asm/intel_mid_vrtc.h9
-rw-r--r--kernel/arch/x86/include/asm/intel_scu_ipc.h73
-rw-r--r--kernel/arch/x86/include/asm/io.h347
-rw-r--r--kernel/arch/x86/include/asm/io_apic.h289
-rw-r--r--kernel/arch/x86/include/asm/iomap.h41
-rw-r--r--kernel/arch/x86/include/asm/iommu.h12
-rw-r--r--kernel/arch/x86/include/asm/iommu_table.h101
-rw-r--r--kernel/arch/x86/include/asm/iosf_mbi.h145
-rw-r--r--kernel/arch/x86/include/asm/ipi.h162
-rw-r--r--kernel/arch/x86/include/asm/irq.h50
-rw-r--r--kernel/arch/x86/include/asm/irq_regs.h31
-rw-r--r--kernel/arch/x86/include/asm/irq_remapping.h105
-rw-r--r--kernel/arch/x86/include/asm/irq_vectors.h172
-rw-r--r--kernel/arch/x86/include/asm/irq_work.h11
-rw-r--r--kernel/arch/x86/include/asm/irqflags.h198
-rw-r--r--kernel/arch/x86/include/asm/ist.h23
-rw-r--r--kernel/arch/x86/include/asm/jump_label.h46
-rw-r--r--kernel/arch/x86/include/asm/kasan.h31
-rw-r--r--kernel/arch/x86/include/asm/kbdleds.h17
-rw-r--r--kernel/arch/x86/include/asm/kdebug.h39
-rw-r--r--kernel/arch/x86/include/asm/kexec-bzimage64.h6
-rw-r--r--kernel/arch/x86/include/asm/kexec.h216
-rw-r--r--kernel/arch/x86/include/asm/kgdb.h89
-rw-r--r--kernel/arch/x86/include/asm/kmap_types.h12
-rw-r--r--kernel/arch/x86/include/asm/kmemcheck.h42
-rw-r--r--kernel/arch/x86/include/asm/kprobes.h120
-rw-r--r--kernel/arch/x86/include/asm/kvm_emulate.h428
-rw-r--r--kernel/arch/x86/include/asm/kvm_guest.h6
-rw-r--r--kernel/arch/x86/include/asm/kvm_host.h1185
-rw-r--r--kernel/arch/x86/include/asm/kvm_para.h137
-rw-r--r--kernel/arch/x86/include/asm/lguest.h89
-rw-r--r--kernel/arch/x86/include/asm/lguest_hcall.h75
-rw-r--r--kernel/arch/x86/include/asm/linkage.h61
-rw-r--r--kernel/arch/x86/include/asm/livepatch.h46
-rw-r--r--kernel/arch/x86/include/asm/local.h161
-rw-r--r--kernel/arch/x86/include/asm/local64.h1
-rw-r--r--kernel/arch/x86/include/asm/mach_timer.h48
-rw-r--r--kernel/arch/x86/include/asm/mach_traps.h43
-rw-r--r--kernel/arch/x86/include/asm/math_emu.h18
-rw-r--r--kernel/arch/x86/include/asm/mc146818rtc.h103
-rw-r--r--kernel/arch/x86/include/asm/mce.h258
-rw-r--r--kernel/arch/x86/include/asm/microcode.h165
-rw-r--r--kernel/arch/x86/include/asm/microcode_amd.h79
-rw-r--r--kernel/arch/x86/include/asm/microcode_intel.h92
-rw-r--r--kernel/arch/x86/include/asm/misc.h6
-rw-r--r--kernel/arch/x86/include/asm/mmconfig.h12
-rw-r--r--kernel/arch/x86/include/asm/mmu.h34
-rw-r--r--kernel/arch/x86/include/asm/mmu_context.h175
-rw-r--r--kernel/arch/x86/include/asm/mmx.h14
-rw-r--r--kernel/arch/x86/include/asm/mmzone.h5
-rw-r--r--kernel/arch/x86/include/asm/mmzone_32.h55
-rw-r--r--kernel/arch/x86/include/asm/mmzone_64.h17
-rw-r--r--kernel/arch/x86/include/asm/module.h64
-rw-r--r--kernel/arch/x86/include/asm/mpspec.h153
-rw-r--r--kernel/arch/x86/include/asm/mpspec_def.h173
-rw-r--r--kernel/arch/x86/include/asm/mpx.h103
-rw-r--r--kernel/arch/x86/include/asm/mshyperv.h23
-rw-r--r--kernel/arch/x86/include/asm/msidef.h56
-rw-r--r--kernel/arch/x86/include/asm/msr.h291
-rw-r--r--kernel/arch/x86/include/asm/mtrr.h130
-rw-r--r--kernel/arch/x86/include/asm/mutex.h5
-rw-r--r--kernel/arch/x86/include/asm/mutex_32.h110
-rw-r--r--kernel/arch/x86/include/asm/mutex_64.h127
-rw-r--r--kernel/arch/x86/include/asm/mwait.h67
-rw-r--r--kernel/arch/x86/include/asm/nmi.h66
-rw-r--r--kernel/arch/x86/include/asm/nops.h146
-rw-r--r--kernel/arch/x86/include/asm/numa.h82
-rw-r--r--kernel/arch/x86/include/asm/numa_32.h12
-rw-r--r--kernel/arch/x86/include/asm/numachip/numachip.h19
-rw-r--r--kernel/arch/x86/include/asm/numachip/numachip_csr.h167
-rw-r--r--kernel/arch/x86/include/asm/olpc.h132
-rw-r--r--kernel/arch/x86/include/asm/olpc_ofw.h37
-rw-r--r--kernel/arch/x86/include/asm/page.h76
-rw-r--r--kernel/arch/x86/include/asm/page_32.h48
-rw-r--r--kernel/arch/x86/include/asm/page_32_types.h58
-rw-r--r--kernel/arch/x86/include/asm/page_64.h46
-rw-r--r--kernel/arch/x86/include/asm/page_64_types.h61
-rw-r--r--kernel/arch/x86/include/asm/page_types.h70
-rw-r--r--kernel/arch/x86/include/asm/paravirt.h995
-rw-r--r--kernel/arch/x86/include/asm/paravirt_types.h708
-rw-r--r--kernel/arch/x86/include/asm/parport.h10
-rw-r--r--kernel/arch/x86/include/asm/pat.h28
-rw-r--r--kernel/arch/x86/include/asm/pci-direct.h21
-rw-r--r--kernel/arch/x86/include/asm/pci-functions.h19
-rw-r--r--kernel/arch/x86/include/asm/pci.h158
-rw-r--r--kernel/arch/x86/include/asm/pci_64.h27
-rw-r--r--kernel/arch/x86/include/asm/pci_x86.h210
-rw-r--r--kernel/arch/x86/include/asm/percpu.h620
-rw-r--r--kernel/arch/x86/include/asm/perf_event.h290
-rw-r--r--kernel/arch/x86/include/asm/perf_event_p4.h876
-rw-r--r--kernel/arch/x86/include/asm/pgalloc.h148
-rw-r--r--kernel/arch/x86/include/asm/pgtable-2level.h80
-rw-r--r--kernel/arch/x86/include/asm/pgtable-2level_types.h36
-rw-r--r--kernel/arch/x86/include/asm/pgtable-3level.h187
-rw-r--r--kernel/arch/x86/include/asm/pgtable-3level_types.h46
-rw-r--r--kernel/arch/x86/include/asm/pgtable.h881
-rw-r--r--kernel/arch/x86/include/asm/pgtable_32.h78
-rw-r--r--kernel/arch/x86/include/asm/pgtable_32_types.h55
-rw-r--r--kernel/arch/x86/include/asm/pgtable_64.h183
-rw-r--r--kernel/arch/x86/include/asm/pgtable_64_types.h70
-rw-r--r--kernel/arch/x86/include/asm/pgtable_types.h423
-rw-r--r--kernel/arch/x86/include/asm/platform_sst_audio.h140
-rw-r--r--kernel/arch/x86/include/asm/pm-trace.h23
-rw-r--r--kernel/arch/x86/include/asm/pmc_atom.h129
-rw-r--r--kernel/arch/x86/include/asm/posix_types.h5
-rw-r--r--kernel/arch/x86/include/asm/preempt.h125
-rw-r--r--kernel/arch/x86/include/asm/probe_roms.h8
-rw-r--r--kernel/arch/x86/include/asm/processor-cyrix.h38
-rw-r--r--kernel/arch/x86/include/asm/processor-flags.h11
-rw-r--r--kernel/arch/x86/include/asm/processor.h977
-rw-r--r--kernel/arch/x86/include/asm/prom.h41
-rw-r--r--kernel/arch/x86/include/asm/proto.h22
-rw-r--r--kernel/arch/x86/include/asm/ptrace.h257
-rw-r--r--kernel/arch/x86/include/asm/pvclock-abi.h45
-rw-r--r--kernel/arch/x86/include/asm/pvclock.h107
-rw-r--r--kernel/arch/x86/include/asm/qrwlock.h17
-rw-r--r--kernel/arch/x86/include/asm/realmode.h64
-rw-r--r--kernel/arch/x86/include/asm/reboot.h29
-rw-r--r--kernel/arch/x86/include/asm/reboot_fixups.h6
-rw-r--r--kernel/arch/x86/include/asm/required-features.h96
-rw-r--r--kernel/arch/x86/include/asm/rio.h63
-rw-r--r--kernel/arch/x86/include/asm/rmwcc.h41
-rw-r--r--kernel/arch/x86/include/asm/rtc.h1
-rw-r--r--kernel/arch/x86/include/asm/rwsem.h225
-rw-r--r--kernel/arch/x86/include/asm/seccomp.h20
-rw-r--r--kernel/arch/x86/include/asm/sections.h14
-rw-r--r--kernel/arch/x86/include/asm/segment.h300
-rw-r--r--kernel/arch/x86/include/asm/serial.h29
-rw-r--r--kernel/arch/x86/include/asm/setup.h131
-rw-r--r--kernel/arch/x86/include/asm/setup_arch.h3
-rw-r--r--kernel/arch/x86/include/asm/shmparam.h6
-rw-r--r--kernel/arch/x86/include/asm/sigcontext.h79
-rw-r--r--kernel/arch/x86/include/asm/sigframe.h83
-rw-r--r--kernel/arch/x86/include/asm/sighandling.h20
-rw-r--r--kernel/arch/x86/include/asm/signal.h117
-rw-r--r--kernel/arch/x86/include/asm/simd.h11
-rw-r--r--kernel/arch/x86/include/asm/smap.h79
-rw-r--r--kernel/arch/x86/include/asm/smp.h235
-rw-r--r--kernel/arch/x86/include/asm/sparsemem.h34
-rw-r--r--kernel/arch/x86/include/asm/special_insns.h231
-rw-r--r--kernel/arch/x86/include/asm/spinlock.h223
-rw-r--r--kernel/arch/x86/include/asm/spinlock_types.h39
-rw-r--r--kernel/arch/x86/include/asm/sta2x11.h12
-rw-r--r--kernel/arch/x86/include/asm/stackprotector.h129
-rw-r--r--kernel/arch/x86/include/asm/stacktrace.h118
-rw-r--r--kernel/arch/x86/include/asm/string.h5
-rw-r--r--kernel/arch/x86/include/asm/string_32.h342
-rw-r--r--kernel/arch/x86/include/asm/string_64.h84
-rw-r--r--kernel/arch/x86/include/asm/suspend.h5
-rw-r--r--kernel/arch/x86/include/asm/suspend_32.h27
-rw-r--r--kernel/arch/x86/include/asm/suspend_64.h48
-rw-r--r--kernel/arch/x86/include/asm/svm.h279
-rw-r--r--kernel/arch/x86/include/asm/swiotlb.h39
-rw-r--r--kernel/arch/x86/include/asm/switch_to.h133
-rw-r--r--kernel/arch/x86/include/asm/sync_bitops.h130
-rw-r--r--kernel/arch/x86/include/asm/sys_ia32.h55
-rw-r--r--kernel/arch/x86/include/asm/syscall.h243
-rw-r--r--kernel/arch/x86/include/asm/syscalls.h54
-rw-r--r--kernel/arch/x86/include/asm/sysfb.h98
-rw-r--r--kernel/arch/x86/include/asm/tce.h48
-rw-r--r--kernel/arch/x86/include/asm/thread_info.h303
-rw-r--r--kernel/arch/x86/include/asm/time.h12
-rw-r--r--kernel/arch/x86/include/asm/timer.h37
-rw-r--r--kernel/arch/x86/include/asm/timex.h12
-rw-r--r--kernel/arch/x86/include/asm/tlb.h18
-rw-r--r--kernel/arch/x86/include/asm/tlbflush.h269
-rw-r--r--kernel/arch/x86/include/asm/topology.h138
-rw-r--r--kernel/arch/x86/include/asm/trace/exceptions.h52
-rw-r--r--kernel/arch/x86/include/asm/trace/irq_vectors.h115
-rw-r--r--kernel/arch/x86/include/asm/trace_clock.h20
-rw-r--r--kernel/arch/x86/include/asm/traps.h144
-rw-r--r--kernel/arch/x86/include/asm/tsc.h71
-rw-r--r--kernel/arch/x86/include/asm/uaccess.h749
-rw-r--r--kernel/arch/x86/include/asm/uaccess_32.h189
-rw-r--r--kernel/arch/x86/include/asm/uaccess_64.h236
-rw-r--r--kernel/arch/x86/include/asm/unaligned.h14
-rw-r--r--kernel/arch/x86/include/asm/unistd.h55
-rw-r--r--kernel/arch/x86/include/asm/uprobes.h67
-rw-r--r--kernel/arch/x86/include/asm/user.h63
-rw-r--r--kernel/arch/x86/include/asm/user32.h70
-rw-r--r--kernel/arch/x86/include/asm/user_32.h131
-rw-r--r--kernel/arch/x86/include/asm/user_64.h137
-rw-r--r--kernel/arch/x86/include/asm/uv/bios.h114
-rw-r--r--kernel/arch/x86/include/asm/uv/uv.h35
-rw-r--r--kernel/arch/x86/include/asm/uv/uv_bau.h791
-rw-r--r--kernel/arch/x86/include/asm/uv/uv_hub.h680
-rw-r--r--kernel/arch/x86/include/asm/uv/uv_irq.h38
-rw-r--r--kernel/arch/x86/include/asm/uv/uv_mmrs.h2928
-rw-r--r--kernel/arch/x86/include/asm/vdso.h54
-rw-r--r--kernel/arch/x86/include/asm/vga.h20
-rw-r--r--kernel/arch/x86/include/asm/vgtod.h94
-rw-r--r--kernel/arch/x86/include/asm/virtext.h132
-rw-r--r--kernel/arch/x86/include/asm/vm86.h82
-rw-r--r--kernel/arch/x86/include/asm/vmx.h494
-rw-r--r--kernel/arch/x86/include/asm/vsyscall.h23
-rw-r--r--kernel/arch/x86/include/asm/vvar.h51
-rw-r--r--kernel/arch/x86/include/asm/word-at-a-time.h105
-rw-r--r--kernel/arch/x86/include/asm/x2apic.h49
-rw-r--r--kernel/arch/x86/include/asm/x86_init.h216
-rw-r--r--kernel/arch/x86/include/asm/xcr.h49
-rw-r--r--kernel/arch/x86/include/asm/xen/cpuid.h91
-rw-r--r--kernel/arch/x86/include/asm/xen/events.h23
-rw-r--r--kernel/arch/x86/include/asm/xen/hypercall.h603
-rw-r--r--kernel/arch/x86/include/asm/xen/hypervisor.h60
-rw-r--r--kernel/arch/x86/include/asm/xen/interface.h189
-rw-r--r--kernel/arch/x86/include/asm/xen/interface_32.h102
-rw-r--r--kernel/arch/x86/include/asm/xen/interface_64.h148
-rw-r--r--kernel/arch/x86/include/asm/xen/page-coherent.h38
-rw-r--r--kernel/arch/x86/include/asm/xen/page.h277
-rw-r--r--kernel/arch/x86/include/asm/xen/pci.h82
-rw-r--r--kernel/arch/x86/include/asm/xen/swiotlb-xen.h16
-rw-r--r--kernel/arch/x86/include/asm/xen/trace_types.h18
-rw-r--r--kernel/arch/x86/include/asm/xor.h499
-rw-r--r--kernel/arch/x86/include/asm/xor_32.h567
-rw-r--r--kernel/arch/x86/include/asm/xor_64.h27
-rw-r--r--kernel/arch/x86/include/asm/xor_avx.h184
-rw-r--r--kernel/arch/x86/include/asm/xsave.h257
321 files changed, 44532 insertions, 0 deletions
diff --git a/kernel/arch/x86/include/asm/Kbuild b/kernel/arch/x86/include/asm/Kbuild
new file mode 100644
index 000000000..d55a210a4
--- /dev/null
+++ b/kernel/arch/x86/include/asm/Kbuild
@@ -0,0 +1,12 @@
+
+
+genhdr-y += unistd_32.h
+genhdr-y += unistd_64.h
+genhdr-y += unistd_x32.h
+
+generic-y += clkdev.h
+generic-y += cputime.h
+generic-y += dma-contiguous.h
+generic-y += early_ioremap.h
+generic-y += mcs_spinlock.h
+generic-y += scatterlist.h
diff --git a/kernel/arch/x86/include/asm/a.out-core.h b/kernel/arch/x86/include/asm/a.out-core.h
new file mode 100644
index 000000000..7a15588e4
--- /dev/null
+++ b/kernel/arch/x86/include/asm/a.out-core.h
@@ -0,0 +1,65 @@
+/* a.out coredump register dumper
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _ASM_X86_A_OUT_CORE_H
+#define _ASM_X86_A_OUT_CORE_H
+
+#ifdef __KERNEL__
+#ifdef CONFIG_X86_32
+
+#include <linux/user.h>
+#include <linux/elfcore.h>
+#include <asm/debugreg.h>
+
+/*
+ * fill in the user structure for an a.out core dump
+ */
+static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
+{
+/* changed the size calculations - should hopefully work better. lbt */
+ dump->magic = CMAGIC;
+ dump->start_code = 0;
+ dump->start_stack = regs->sp & ~(PAGE_SIZE - 1);
+ dump->u_tsize = ((unsigned long)current->mm->end_code) >> PAGE_SHIFT;
+ dump->u_dsize = ((unsigned long)(current->mm->brk + (PAGE_SIZE - 1)))
+ >> PAGE_SHIFT;
+ dump->u_dsize -= dump->u_tsize;
+ dump->u_ssize = 0;
+ aout_dump_debugregs(dump);
+
+ if (dump->start_stack < TASK_SIZE)
+ dump->u_ssize = ((unsigned long)(TASK_SIZE - dump->start_stack))
+ >> PAGE_SHIFT;
+
+ dump->regs.bx = regs->bx;
+ dump->regs.cx = regs->cx;
+ dump->regs.dx = regs->dx;
+ dump->regs.si = regs->si;
+ dump->regs.di = regs->di;
+ dump->regs.bp = regs->bp;
+ dump->regs.ax = regs->ax;
+ dump->regs.ds = (u16)regs->ds;
+ dump->regs.es = (u16)regs->es;
+ dump->regs.fs = (u16)regs->fs;
+ dump->regs.gs = get_user_gs(regs);
+ dump->regs.orig_ax = regs->orig_ax;
+ dump->regs.ip = regs->ip;
+ dump->regs.cs = (u16)regs->cs;
+ dump->regs.flags = regs->flags;
+ dump->regs.sp = regs->sp;
+ dump->regs.ss = (u16)regs->ss;
+
+ dump->u_fpvalid = dump_fpu(regs, &dump->i387);
+}
+
+#endif /* CONFIG_X86_32 */
+#endif /* __KERNEL__ */
+#endif /* _ASM_X86_A_OUT_CORE_H */
diff --git a/kernel/arch/x86/include/asm/acenv.h b/kernel/arch/x86/include/asm/acenv.h
new file mode 100644
index 000000000..1b010a859
--- /dev/null
+++ b/kernel/arch/x86/include/asm/acenv.h
@@ -0,0 +1,45 @@
+/*
+ * X86 specific ACPICA environments and implementation
+ *
+ * Copyright (C) 2014, Intel Corporation
+ * Author: Lv Zheng <lv.zheng@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_X86_ACENV_H
+#define _ASM_X86_ACENV_H
+
+#include <asm/special_insns.h>
+
+/* Asm macros */
+
+#define ACPI_FLUSH_CPU_CACHE() wbinvd()
+
+int __acpi_acquire_global_lock(unsigned int *lock);
+int __acpi_release_global_lock(unsigned int *lock);
+
+#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \
+ ((Acq) = __acpi_acquire_global_lock(&facs->global_lock))
+
+#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \
+ ((Acq) = __acpi_release_global_lock(&facs->global_lock))
+
+/*
+ * Math helper asm macros
+ */
+#define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \
+ asm("divl %2;" \
+ : "=a"(q32), "=d"(r32) \
+ : "r"(d32), \
+ "0"(n_lo), "1"(n_hi))
+
+#define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \
+ asm("shrl $1,%2 ;" \
+ "rcrl $1,%3;" \
+ : "=r"(n_hi), "=r"(n_lo) \
+ : "0"(n_hi), "1"(n_lo))
+
+#endif /* _ASM_X86_ACENV_H */
diff --git a/kernel/arch/x86/include/asm/acpi.h b/kernel/arch/x86/include/asm/acpi.h
new file mode 100644
index 000000000..3a45668f6
--- /dev/null
+++ b/kernel/arch/x86/include/asm/acpi.h
@@ -0,0 +1,150 @@
+#ifndef _ASM_X86_ACPI_H
+#define _ASM_X86_ACPI_H
+
+/*
+ * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ * Copyright (C) 2001 Patrick Mochel <mochel@osdl.org>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#include <acpi/pdc_intel.h>
+
+#include <asm/numa.h>
+#include <asm/fixmap.h>
+#include <asm/processor.h>
+#include <asm/mmu.h>
+#include <asm/mpspec.h>
+#include <asm/realmode.h>
+
+#ifdef CONFIG_ACPI
+extern int acpi_lapic;
+extern int acpi_ioapic;
+extern int acpi_noirq;
+extern int acpi_strict;
+extern int acpi_disabled;
+extern int acpi_pci_disabled;
+extern int acpi_skip_timer_override;
+extern int acpi_use_timer_override;
+extern int acpi_fix_pin2_polarity;
+extern int acpi_disable_cmcff;
+
+extern u8 acpi_sci_flags;
+extern int acpi_sci_override_gsi;
+void acpi_pic_sci_set_trigger(unsigned int, u16);
+
+extern int (*__acpi_register_gsi)(struct device *dev, u32 gsi,
+ int trigger, int polarity);
+extern void (*__acpi_unregister_gsi)(u32 gsi);
+
+static inline void disable_acpi(void)
+{
+ acpi_disabled = 1;
+ acpi_pci_disabled = 1;
+ acpi_noirq = 1;
+}
+
+extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
+
+static inline void acpi_noirq_set(void) { acpi_noirq = 1; }
+static inline void acpi_disable_pci(void)
+{
+ acpi_pci_disabled = 1;
+ acpi_noirq_set();
+}
+
+/* Low-level suspend routine. */
+extern int (*acpi_suspend_lowlevel)(void);
+
+/* Physical address to resume after wakeup */
+#define acpi_wakeup_address ((unsigned long)(real_mode_header->wakeup_start))
+
+/*
+ * Check if the CPU can handle C2 and deeper
+ */
+static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate)
+{
+ /*
+ * Early models (<=5) of AMD Opterons are not supposed to go into
+ * C2 state.
+ *
+ * Steppings 0x0A and later are good
+ */
+ if (boot_cpu_data.x86 == 0x0F &&
+ boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+ boot_cpu_data.x86_model <= 0x05 &&
+ boot_cpu_data.x86_mask < 0x0A)
+ return 1;
+ else if (amd_e400_c1e_detected)
+ return 1;
+ else
+ return max_cstate;
+}
+
+static inline bool arch_has_acpi_pdc(void)
+{
+ struct cpuinfo_x86 *c = &cpu_data(0);
+ return (c->x86_vendor == X86_VENDOR_INTEL ||
+ c->x86_vendor == X86_VENDOR_CENTAUR);
+}
+
+static inline void arch_acpi_set_pdc_bits(u32 *buf)
+{
+ struct cpuinfo_x86 *c = &cpu_data(0);
+
+ buf[2] |= ACPI_PDC_C_CAPABILITY_SMP;
+
+ if (cpu_has(c, X86_FEATURE_EST))
+ buf[2] |= ACPI_PDC_EST_CAPABILITY_SWSMP;
+
+ if (cpu_has(c, X86_FEATURE_ACPI))
+ buf[2] |= ACPI_PDC_T_FFH;
+
+ /*
+ * If mwait/monitor is unsupported, C2/C3_FFH will be disabled
+ */
+ if (!cpu_has(c, X86_FEATURE_MWAIT))
+ buf[2] &= ~(ACPI_PDC_C_C2C3_FFH);
+}
+
+static inline bool acpi_has_cpu_in_madt(void)
+{
+ return !!acpi_lapic;
+}
+
+#else /* !CONFIG_ACPI */
+
+#define acpi_lapic 0
+#define acpi_ioapic 0
+#define acpi_disable_cmcff 0
+static inline void acpi_noirq_set(void) { }
+static inline void acpi_disable_pci(void) { }
+static inline void disable_acpi(void) { }
+
+#endif /* !CONFIG_ACPI */
+
+#define ARCH_HAS_POWER_INIT 1
+
+#ifdef CONFIG_ACPI_NUMA
+extern int acpi_numa;
+extern int x86_acpi_numa_init(void);
+#endif /* CONFIG_ACPI_NUMA */
+
+#define acpi_unlazy_tlb(x) leave_mm(x)
+
+#endif /* _ASM_X86_ACPI_H */
diff --git a/kernel/arch/x86/include/asm/agp.h b/kernel/arch/x86/include/asm/agp.h
new file mode 100644
index 000000000..eec2a70d4
--- /dev/null
+++ b/kernel/arch/x86/include/asm/agp.h
@@ -0,0 +1,31 @@
+#ifndef _ASM_X86_AGP_H
+#define _ASM_X86_AGP_H
+
+#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
+
+/*
+ * Functions to keep the agpgart mappings coherent with the MMU. The
+ * GART gives the CPU a physical alias of pages in memory. The alias
+ * region is mapped uncacheable. Make sure there are no conflicting
+ * mappings with different cachability attributes for the same
+ * page. This avoids data corruption on some CPUs.
+ */
+
+#define map_page_into_agp(page) set_pages_uc(page, 1)
+#define unmap_page_from_agp(page) set_pages_wb(page, 1)
+
+/*
+ * Could use CLFLUSH here if the cpu supports it. But then it would
+ * need to be called for each cacheline of the whole page so it may
+ * not be worth it. Would need a page for it.
+ */
+#define flush_agp_cache() wbinvd()
+
+/* GATT allocation. Returns/accepts GATT kernel virtual address. */
+#define alloc_gatt_pages(order) \
+ ((char *)__get_free_pages(GFP_KERNEL, (order)))
+#define free_gatt_pages(table, order) \
+ free_pages((unsigned long)(table), (order))
+
+#endif /* _ASM_X86_AGP_H */
diff --git a/kernel/arch/x86/include/asm/alternative-asm.h b/kernel/arch/x86/include/asm/alternative-asm.h
new file mode 100644
index 000000000..bdf02eeee
--- /dev/null
+++ b/kernel/arch/x86/include/asm/alternative-asm.h
@@ -0,0 +1,82 @@
+#ifndef _ASM_X86_ALTERNATIVE_ASM_H
+#define _ASM_X86_ALTERNATIVE_ASM_H
+
+#ifdef __ASSEMBLY__
+
+#include <asm/asm.h>
+
+#ifdef CONFIG_SMP
+ .macro LOCK_PREFIX
+672: lock
+ .pushsection .smp_locks,"a"
+ .balign 4
+ .long 672b - .
+ .popsection
+ .endm
+#else
+ .macro LOCK_PREFIX
+ .endm
+#endif
+
+.macro altinstruction_entry orig alt feature orig_len alt_len pad_len
+ .long \orig - .
+ .long \alt - .
+ .word \feature
+ .byte \orig_len
+ .byte \alt_len
+ .byte \pad_len
+.endm
+
+.macro ALTERNATIVE oldinstr, newinstr, feature
+140:
+ \oldinstr
+141:
+ .skip -(((144f-143f)-(141b-140b)) > 0) * ((144f-143f)-(141b-140b)),0x90
+142:
+
+ .pushsection .altinstructions,"a"
+ altinstruction_entry 140b,143f,\feature,142b-140b,144f-143f,142b-141b
+ .popsection
+
+ .pushsection .altinstr_replacement,"ax"
+143:
+ \newinstr
+144:
+ .popsection
+.endm
+
+#define old_len 141b-140b
+#define new_len1 144f-143f
+#define new_len2 145f-144f
+
+/*
+ * max without conditionals. Idea adapted from:
+ * http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
+ */
+#define alt_max_short(a, b) ((a) ^ (((a) ^ (b)) & -(-((a) < (b)))))
+
+.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2
+140:
+ \oldinstr
+141:
+ .skip -((alt_max_short(new_len1, new_len2) - (old_len)) > 0) * \
+ (alt_max_short(new_len1, new_len2) - (old_len)),0x90
+142:
+
+ .pushsection .altinstructions,"a"
+ altinstruction_entry 140b,143f,\feature1,142b-140b,144f-143f,142b-141b
+ altinstruction_entry 140b,144f,\feature2,142b-140b,145f-144f,142b-141b
+ .popsection
+
+ .pushsection .altinstr_replacement,"ax"
+143:
+ \newinstr1
+144:
+ \newinstr2
+145:
+ .popsection
+.endm
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_X86_ALTERNATIVE_ASM_H */
diff --git a/kernel/arch/x86/include/asm/alternative.h b/kernel/arch/x86/include/asm/alternative.h
new file mode 100644
index 000000000..ba32af062
--- /dev/null
+++ b/kernel/arch/x86/include/asm/alternative.h
@@ -0,0 +1,268 @@
+#ifndef _ASM_X86_ALTERNATIVE_H
+#define _ASM_X86_ALTERNATIVE_H
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <linux/stringify.h>
+#include <asm/asm.h>
+#include <asm/ptrace.h>
+
+/*
+ * Alternative inline assembly for SMP.
+ *
+ * The LOCK_PREFIX macro defined here replaces the LOCK and
+ * LOCK_PREFIX macros used everywhere in the source tree.
+ *
+ * SMP alternatives use the same data structures as the other
+ * alternatives and the X86_FEATURE_UP flag to indicate the case of a
+ * UP system running a SMP kernel. The existing apply_alternatives()
+ * works fine for patching a SMP kernel for UP.
+ *
+ * The SMP alternative tables can be kept after boot and contain both
+ * UP and SMP versions of the instructions to allow switching back to
+ * SMP at runtime, when hotplugging in a new CPU, which is especially
+ * useful in virtualized environments.
+ *
+ * The very common lock prefix is handled as special case in a
+ * separate table which is a pure address list without replacement ptr
+ * and size information. That keeps the table sizes small.
+ */
+
+#ifdef CONFIG_SMP
+#define LOCK_PREFIX_HERE \
+ ".pushsection .smp_locks,\"a\"\n" \
+ ".balign 4\n" \
+ ".long 671f - .\n" /* offset */ \
+ ".popsection\n" \
+ "671:"
+
+#define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; "
+
+#else /* ! CONFIG_SMP */
+#define LOCK_PREFIX_HERE ""
+#define LOCK_PREFIX ""
+#endif
+
+struct alt_instr {
+ s32 instr_offset; /* original instruction */
+ s32 repl_offset; /* offset to replacement instruction */
+ u16 cpuid; /* cpuid bit set for replacement */
+ u8 instrlen; /* length of original instruction */
+ u8 replacementlen; /* length of new instruction */
+ u8 padlen; /* length of build-time padding */
+} __packed;
+
+extern void alternative_instructions(void);
+extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
+
+struct module;
+
+#ifdef CONFIG_SMP
+extern void alternatives_smp_module_add(struct module *mod, char *name,
+ void *locks, void *locks_end,
+ void *text, void *text_end);
+extern void alternatives_smp_module_del(struct module *mod);
+extern void alternatives_enable_smp(void);
+extern int alternatives_text_reserved(void *start, void *end);
+extern bool skip_smp_alternatives;
+#else
+static inline void alternatives_smp_module_add(struct module *mod, char *name,
+ void *locks, void *locks_end,
+ void *text, void *text_end) {}
+static inline void alternatives_smp_module_del(struct module *mod) {}
+static inline void alternatives_enable_smp(void) {}
+static inline int alternatives_text_reserved(void *start, void *end)
+{
+ return 0;
+}
+#endif /* CONFIG_SMP */
+
+#define b_replacement(num) "664"#num
+#define e_replacement(num) "665"#num
+
+#define alt_end_marker "663"
+#define alt_slen "662b-661b"
+#define alt_pad_len alt_end_marker"b-662b"
+#define alt_total_slen alt_end_marker"b-661b"
+#define alt_rlen(num) e_replacement(num)"f-"b_replacement(num)"f"
+
+#define __OLDINSTR(oldinstr, num) \
+ "661:\n\t" oldinstr "\n662:\n" \
+ ".skip -(((" alt_rlen(num) ")-(" alt_slen ")) > 0) * " \
+ "((" alt_rlen(num) ")-(" alt_slen ")),0x90\n"
+
+#define OLDINSTR(oldinstr, num) \
+ __OLDINSTR(oldinstr, num) \
+ alt_end_marker ":\n"
+
+/*
+ * max without conditionals. Idea adapted from:
+ * http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
+ *
+ * The additional "-" is needed because gas works with s32s.
+ */
+#define alt_max_short(a, b) "((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") - (" b ")))))"
+
+/*
+ * Pad the second replacement alternative with additional NOPs if it is
+ * additionally longer than the first replacement alternative.
+ */
+#define OLDINSTR_2(oldinstr, num1, num2) \
+ "661:\n\t" oldinstr "\n662:\n" \
+ ".skip -((" alt_max_short(alt_rlen(num1), alt_rlen(num2)) " - (" alt_slen ")) > 0) * " \
+ "(" alt_max_short(alt_rlen(num1), alt_rlen(num2)) " - (" alt_slen ")), 0x90\n" \
+ alt_end_marker ":\n"
+
+#define ALTINSTR_ENTRY(feature, num) \
+ " .long 661b - .\n" /* label */ \
+ " .long " b_replacement(num)"f - .\n" /* new instruction */ \
+ " .word " __stringify(feature) "\n" /* feature bit */ \
+ " .byte " alt_total_slen "\n" /* source len */ \
+ " .byte " alt_rlen(num) "\n" /* replacement len */ \
+ " .byte " alt_pad_len "\n" /* pad len */
+
+#define ALTINSTR_REPLACEMENT(newinstr, feature, num) /* replacement */ \
+ b_replacement(num)":\n\t" newinstr "\n" e_replacement(num) ":\n\t"
+
+/* alternative assembly primitive: */
+#define ALTERNATIVE(oldinstr, newinstr, feature) \
+ OLDINSTR(oldinstr, 1) \
+ ".pushsection .altinstructions,\"a\"\n" \
+ ALTINSTR_ENTRY(feature, 1) \
+ ".popsection\n" \
+ ".pushsection .altinstr_replacement, \"ax\"\n" \
+ ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
+ ".popsection"
+
+#define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\
+ OLDINSTR_2(oldinstr, 1, 2) \
+ ".pushsection .altinstructions,\"a\"\n" \
+ ALTINSTR_ENTRY(feature1, 1) \
+ ALTINSTR_ENTRY(feature2, 2) \
+ ".popsection\n" \
+ ".pushsection .altinstr_replacement, \"ax\"\n" \
+ ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
+ ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
+ ".popsection"
+
+/*
+ * This must be included *after* the definition of ALTERNATIVE due to
+ * <asm/arch_hweight.h>
+ */
+#include <asm/cpufeature.h>
+
+/*
+ * Alternative instructions for different CPU types or capabilities.
+ *
+ * This allows to use optimized instructions even on generic binary
+ * kernels.
+ *
+ * length of oldinstr must be longer or equal the length of newinstr
+ * It can be padded with nops as needed.
+ *
+ * For non barrier like inlines please define new variants
+ * without volatile and memory clobber.
+ */
+#define alternative(oldinstr, newinstr, feature) \
+ asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) : : : "memory")
+
+#define alternative_2(oldinstr, newinstr1, feature1, newinstr2, feature2) \
+ asm volatile(ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2) ::: "memory")
+
+/*
+ * Alternative inline assembly with input.
+ *
+ * Pecularities:
+ * No memory clobber here.
+ * Argument numbers start with 1.
+ * Best is to use constraints that are fixed size (like (%1) ... "r")
+ * If you use variable sized constraints like "m" or "g" in the
+ * replacement make sure to pad to the worst case length.
+ * Leaving an unused argument 0 to keep API compatibility.
+ */
+#define alternative_input(oldinstr, newinstr, feature, input...) \
+ asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) \
+ : : "i" (0), ## input)
+
+/*
+ * This is similar to alternative_input. But it has two features and
+ * respective instructions.
+ *
+ * If CPU has feature2, newinstr2 is used.
+ * Otherwise, if CPU has feature1, newinstr1 is used.
+ * Otherwise, oldinstr is used.
+ */
+#define alternative_input_2(oldinstr, newinstr1, feature1, newinstr2, \
+ feature2, input...) \
+ asm volatile(ALTERNATIVE_2(oldinstr, newinstr1, feature1, \
+ newinstr2, feature2) \
+ : : "i" (0), ## input)
+
+/* Like alternative_input, but with a single output argument */
+#define alternative_io(oldinstr, newinstr, feature, output, input...) \
+ asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) \
+ : output : "i" (0), ## input)
+
+/* Like alternative_io, but for replacing a direct call with another one. */
+#define alternative_call(oldfunc, newfunc, feature, output, input...) \
+ asm volatile (ALTERNATIVE("call %P[old]", "call %P[new]", feature) \
+ : output : [old] "i" (oldfunc), [new] "i" (newfunc), ## input)
+
+/*
+ * Like alternative_call, but there are two features and respective functions.
+ * If CPU has feature2, function2 is used.
+ * Otherwise, if CPU has feature1, function1 is used.
+ * Otherwise, old function is used.
+ */
+#define alternative_call_2(oldfunc, newfunc1, feature1, newfunc2, feature2, \
+ output, input...) \
+ asm volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", feature1,\
+ "call %P[new2]", feature2) \
+ : output : [old] "i" (oldfunc), [new1] "i" (newfunc1), \
+ [new2] "i" (newfunc2), ## input)
+
+/*
+ * use this macro(s) if you need more than one output parameter
+ * in alternative_io
+ */
+#define ASM_OUTPUT2(a...) a
+
+/*
+ * use this macro if you need clobbers but no inputs in
+ * alternative_{input,io,call}()
+ */
+#define ASM_NO_INPUT_CLOBBER(clbr...) "i" (0) : clbr
+
+struct paravirt_patch_site;
+#ifdef CONFIG_PARAVIRT
+void apply_paravirt(struct paravirt_patch_site *start,
+ struct paravirt_patch_site *end);
+#else
+static inline void apply_paravirt(struct paravirt_patch_site *start,
+ struct paravirt_patch_site *end)
+{}
+#define __parainstructions NULL
+#define __parainstructions_end NULL
+#endif
+
+extern void *text_poke_early(void *addr, const void *opcode, size_t len);
+
+/*
+ * Clear and restore the kernel write-protection flag on the local CPU.
+ * Allows the kernel to edit read-only pages.
+ * Side-effect: any interrupt handler running between save and restore will have
+ * the ability to write to read-only pages.
+ *
+ * Warning:
+ * Code patching in the UP case is safe if NMIs and MCE handlers are stopped and
+ * no thread can be preempted in the instructions being modified (no iret to an
+ * invalid instruction possible) or if the instructions are changed from a
+ * consistent state to another consistent state atomically.
+ * On the local CPU you need to be protected again NMI or MCE handlers seeing an
+ * inconsistent instruction while you patch.
+ */
+extern void *text_poke(void *addr, const void *opcode, size_t len);
+extern int poke_int3_handler(struct pt_regs *regs);
+extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler);
+
+#endif /* _ASM_X86_ALTERNATIVE_H */
diff --git a/kernel/arch/x86/include/asm/amd_nb.h b/kernel/arch/x86/include/asm/amd_nb.h
new file mode 100644
index 000000000..aaac3b2fb
--- /dev/null
+++ b/kernel/arch/x86/include/asm/amd_nb.h
@@ -0,0 +1,110 @@
+#ifndef _ASM_X86_AMD_NB_H
+#define _ASM_X86_AMD_NB_H
+
+#include <linux/ioport.h>
+#include <linux/pci.h>
+
+struct amd_nb_bus_dev_range {
+ u8 bus;
+ u8 dev_base;
+ u8 dev_limit;
+};
+
+extern const struct pci_device_id amd_nb_misc_ids[];
+extern const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[];
+
+extern bool early_is_amd_nb(u32 value);
+extern struct resource *amd_get_mmconfig_range(struct resource *res);
+extern int amd_cache_northbridges(void);
+extern void amd_flush_garts(void);
+extern int amd_numa_init(void);
+extern int amd_get_subcaches(int);
+extern int amd_set_subcaches(int, unsigned long);
+
+struct amd_l3_cache {
+ unsigned indices;
+ u8 subcaches[4];
+};
+
+struct threshold_block {
+ unsigned int block;
+ unsigned int bank;
+ unsigned int cpu;
+ u32 address;
+ u16 interrupt_enable;
+ bool interrupt_capable;
+ u16 threshold_limit;
+ struct kobject kobj;
+ struct list_head miscj;
+};
+
+struct threshold_bank {
+ struct kobject *kobj;
+ struct threshold_block *blocks;
+
+ /* initialized to the number of CPUs on the node sharing this bank */
+ atomic_t cpus;
+};
+
+struct amd_northbridge {
+ struct pci_dev *misc;
+ struct pci_dev *link;
+ struct amd_l3_cache l3_cache;
+ struct threshold_bank *bank4;
+};
+
+struct amd_northbridge_info {
+ u16 num;
+ u64 flags;
+ struct amd_northbridge *nb;
+};
+extern struct amd_northbridge_info amd_northbridges;
+
+#define AMD_NB_GART BIT(0)
+#define AMD_NB_L3_INDEX_DISABLE BIT(1)
+#define AMD_NB_L3_PARTITIONING BIT(2)
+
+#ifdef CONFIG_AMD_NB
+
+static inline u16 amd_nb_num(void)
+{
+ return amd_northbridges.num;
+}
+
+static inline bool amd_nb_has_feature(unsigned feature)
+{
+ return ((amd_northbridges.flags & feature) == feature);
+}
+
+static inline struct amd_northbridge *node_to_amd_nb(int node)
+{
+ return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
+}
+
+static inline u16 amd_get_node_id(struct pci_dev *pdev)
+{
+ struct pci_dev *misc;
+ int i;
+
+ for (i = 0; i != amd_nb_num(); i++) {
+ misc = node_to_amd_nb(i)->misc;
+
+ if (pci_domain_nr(misc->bus) == pci_domain_nr(pdev->bus) &&
+ PCI_SLOT(misc->devfn) == PCI_SLOT(pdev->devfn))
+ return i;
+ }
+
+ WARN(1, "Unable to find AMD Northbridge id for %s\n", pci_name(pdev));
+ return 0;
+}
+
+#else
+
+#define amd_nb_num(x) 0
+#define amd_nb_has_feature(x) false
+#define node_to_amd_nb(x) NULL
+
+#endif
+
+
+#endif /* _ASM_X86_AMD_NB_H */
diff --git a/kernel/arch/x86/include/asm/apb_timer.h b/kernel/arch/x86/include/asm/apb_timer.h
new file mode 100644
index 000000000..0acbac299
--- /dev/null
+++ b/kernel/arch/x86/include/asm/apb_timer.h
@@ -0,0 +1,49 @@
+/*
+ * apb_timer.h: Driver for Langwell APB timer based on Synopsis DesignWare
+ *
+ * (C) Copyright 2009 Intel Corporation
+ * Author: Jacob Pan (jacob.jun.pan@intel.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ *
+ * Note:
+ */
+
+#ifndef ASM_X86_APBT_H
+#define ASM_X86_APBT_H
+#include <linux/sfi.h>
+
+#ifdef CONFIG_APB_TIMER
+
+/* default memory mapped register base */
+#define LNW_SCU_ADDR 0xFF100000
+#define LNW_EXT_TIMER_OFFSET 0x1B800
+#define APBT_DEFAULT_BASE (LNW_SCU_ADDR+LNW_EXT_TIMER_OFFSET)
+#define LNW_EXT_TIMER_PGOFFSET 0x800
+
+/* APBT clock speed range from PCLK to fabric base, 25-100MHz */
+#define APBT_MAX_FREQ 50000000
+#define APBT_MIN_FREQ 1000000
+#define APBT_MMAP_SIZE 1024
+
+#define APBT_DEV_USED 1
+
+extern void apbt_time_init(void);
+extern unsigned long apbt_quick_calibrate(void);
+extern int arch_setup_apbt_irqs(int irq, int trigger, int mask, int cpu);
+extern void apbt_setup_secondary_clock(void);
+
+extern struct sfi_timer_table_entry *sfi_get_mtmr(int hint);
+extern void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr);
+extern int sfi_mtimer_num;
+
+#else /* CONFIG_APB_TIMER */
+
+static inline unsigned long apbt_quick_calibrate(void) {return 0; }
+static inline void apbt_time_init(void) { }
+
+#endif
+#endif /* ASM_X86_APBT_H */
diff --git a/kernel/arch/x86/include/asm/apic.h b/kernel/arch/x86/include/asm/apic.h
new file mode 100644
index 000000000..976b86a32
--- /dev/null
+++ b/kernel/arch/x86/include/asm/apic.h
@@ -0,0 +1,661 @@
+#ifndef _ASM_X86_APIC_H
+#define _ASM_X86_APIC_H
+
+#include <linux/cpumask.h>
+#include <linux/pm.h>
+
+#include <asm/alternative.h>
+#include <asm/cpufeature.h>
+#include <asm/processor.h>
+#include <asm/apicdef.h>
+#include <linux/atomic.h>
+#include <asm/fixmap.h>
+#include <asm/mpspec.h>
+#include <asm/msr.h>
+#include <asm/idle.h>
+
+#define ARCH_APICTIMER_STOPS_ON_C3 1
+
+/*
+ * Debugging macros
+ */
+#define APIC_QUIET 0
+#define APIC_VERBOSE 1
+#define APIC_DEBUG 2
+
+/*
+ * Define the default level of output to be very little
+ * This can be turned up by using apic=verbose for more
+ * information and apic=debug for _lots_ of information.
+ * apic_verbosity is defined in apic.c
+ */
+#define apic_printk(v, s, a...) do { \
+ if ((v) <= apic_verbosity) \
+ printk(s, ##a); \
+ } while (0)
+
+
+#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
+extern void generic_apic_probe(void);
+#else
+static inline void generic_apic_probe(void)
+{
+}
+#endif
+
+#ifdef CONFIG_X86_LOCAL_APIC
+
+extern unsigned int apic_verbosity;
+extern int local_apic_timer_c2_ok;
+
+extern int disable_apic;
+extern unsigned int lapic_timer_frequency;
+
+#ifdef CONFIG_SMP
+extern void __inquire_remote_apic(int apicid);
+#else /* CONFIG_SMP */
+static inline void __inquire_remote_apic(int apicid)
+{
+}
+#endif /* CONFIG_SMP */
+
+static inline void default_inquire_remote_apic(int apicid)
+{
+ if (apic_verbosity >= APIC_DEBUG)
+ __inquire_remote_apic(apicid);
+}
+
+/*
+ * With 82489DX we can't rely on apic feature bit
+ * retrieved via cpuid but still have to deal with
+ * such an apic chip so we assume that SMP configuration
+ * is found from MP table (64bit case uses ACPI mostly
+ * which set smp presence flag as well so we are safe
+ * to use this helper too).
+ */
+static inline bool apic_from_smp_config(void)
+{
+ return smp_found_config && !disable_apic;
+}
+
+/*
+ * Basic functions accessing APICs.
+ */
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#endif
+
+extern int setup_profiling_timer(unsigned int);
+
+static inline void native_apic_mem_write(u32 reg, u32 v)
+{
+ volatile u32 *addr = (volatile u32 *)(APIC_BASE + reg);
+
+ alternative_io("movl %0, %P1", "xchgl %0, %P1", X86_BUG_11AP,
+ ASM_OUTPUT2("=r" (v), "=m" (*addr)),
+ ASM_OUTPUT2("0" (v), "m" (*addr)));
+}
+
+static inline u32 native_apic_mem_read(u32 reg)
+{
+ return *((volatile u32 *)(APIC_BASE + reg));
+}
+
+extern void native_apic_wait_icr_idle(void);
+extern u32 native_safe_apic_wait_icr_idle(void);
+extern void native_apic_icr_write(u32 low, u32 id);
+extern u64 native_apic_icr_read(void);
+
+static inline bool apic_is_x2apic_enabled(void)
+{
+ u64 msr;
+
+ if (rdmsrl_safe(MSR_IA32_APICBASE, &msr))
+ return false;
+ return msr & X2APIC_ENABLE;
+}
+
+#ifdef CONFIG_X86_X2APIC
+/*
+ * Make previous memory operations globally visible before
+ * sending the IPI through x2apic wrmsr. We need a serializing instruction or
+ * mfence for this.
+ */
+static inline void x2apic_wrmsr_fence(void)
+{
+ asm volatile("mfence" : : : "memory");
+}
+
+static inline void native_apic_msr_write(u32 reg, u32 v)
+{
+ if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR ||
+ reg == APIC_LVR)
+ return;
+
+ wrmsr(APIC_BASE_MSR + (reg >> 4), v, 0);
+}
+
+static inline void native_apic_msr_eoi_write(u32 reg, u32 v)
+{
+ wrmsr(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK, 0);
+}
+
+static inline u32 native_apic_msr_read(u32 reg)
+{
+ u64 msr;
+
+ if (reg == APIC_DFR)
+ return -1;
+
+ rdmsrl(APIC_BASE_MSR + (reg >> 4), msr);
+ return (u32)msr;
+}
+
+static inline void native_x2apic_wait_icr_idle(void)
+{
+ /* no need to wait for icr idle in x2apic */
+ return;
+}
+
+static inline u32 native_safe_x2apic_wait_icr_idle(void)
+{
+ /* no need to wait for icr idle in x2apic */
+ return 0;
+}
+
+static inline void native_x2apic_icr_write(u32 low, u32 id)
+{
+ wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low);
+}
+
+static inline u64 native_x2apic_icr_read(void)
+{
+ unsigned long val;
+
+ rdmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), val);
+ return val;
+}
+
+extern int x2apic_mode;
+extern int x2apic_phys;
+extern void __init check_x2apic(void);
+extern void x2apic_setup(void);
+static inline int x2apic_enabled(void)
+{
+ return cpu_has_x2apic && apic_is_x2apic_enabled();
+}
+
+#define x2apic_supported() (cpu_has_x2apic)
+#else
+static inline void check_x2apic(void) { }
+static inline void x2apic_setup(void) { }
+static inline int x2apic_enabled(void) { return 0; }
+
+#define x2apic_mode (0)
+#define x2apic_supported() (0)
+#endif
+
+extern void enable_IR_x2apic(void);
+
+extern int get_physical_broadcast(void);
+
+extern int lapic_get_maxlvt(void);
+extern void clear_local_APIC(void);
+extern void disconnect_bsp_APIC(int virt_wire_setup);
+extern void disable_local_APIC(void);
+extern void lapic_shutdown(void);
+extern void sync_Arb_IDs(void);
+extern void init_bsp_APIC(void);
+extern void setup_local_APIC(void);
+extern void init_apic_mappings(void);
+void register_lapic_address(unsigned long address);
+extern void setup_boot_APIC_clock(void);
+extern void setup_secondary_APIC_clock(void);
+extern int APIC_init_uniprocessor(void);
+
+#ifdef CONFIG_X86_64
+static inline int apic_force_enable(unsigned long addr)
+{
+ return -1;
+}
+#else
+extern int apic_force_enable(unsigned long addr);
+#endif
+
+extern int apic_bsp_setup(bool upmode);
+extern void apic_ap_setup(void);
+
+/*
+ * On 32bit this is mach-xxx local
+ */
+#ifdef CONFIG_X86_64
+extern int apic_is_clustered_box(void);
+#else
+static inline int apic_is_clustered_box(void)
+{
+ return 0;
+}
+#endif
+
+extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask);
+
+#else /* !CONFIG_X86_LOCAL_APIC */
+static inline void lapic_shutdown(void) { }
+#define local_apic_timer_c2_ok 1
+static inline void init_apic_mappings(void) { }
+static inline void disable_local_APIC(void) { }
+# define setup_boot_APIC_clock x86_init_noop
+# define setup_secondary_APIC_clock x86_init_noop
+#endif /* !CONFIG_X86_LOCAL_APIC */
+
+#ifdef CONFIG_X86_64
+#define SET_APIC_ID(x) (apic->set_apic_id(x))
+#else
+
+#endif
+
+/*
+ * Copyright 2004 James Cleverdon, IBM.
+ * Subject to the GNU Public License, v.2
+ *
+ * Generic APIC sub-arch data struct.
+ *
+ * Hacked for x86-64 by James Cleverdon from i386 architecture code by
+ * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
+ * James Cleverdon.
+ */
+struct apic {
+ char *name;
+
+ int (*probe)(void);
+ int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
+ int (*apic_id_valid)(int apicid);
+ int (*apic_id_registered)(void);
+
+ u32 irq_delivery_mode;
+ u32 irq_dest_mode;
+
+ const struct cpumask *(*target_cpus)(void);
+
+ int disable_esr;
+
+ int dest_logical;
+ unsigned long (*check_apicid_used)(physid_mask_t *map, int apicid);
+
+ void (*vector_allocation_domain)(int cpu, struct cpumask *retmask,
+ const struct cpumask *mask);
+ void (*init_apic_ldr)(void);
+
+ void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap);
+
+ void (*setup_apic_routing)(void);
+ int (*cpu_present_to_apicid)(int mps_cpu);
+ void (*apicid_to_cpu_present)(int phys_apicid, physid_mask_t *retmap);
+ int (*check_phys_apicid_present)(int phys_apicid);
+ int (*phys_pkg_id)(int cpuid_apic, int index_msb);
+
+ unsigned int (*get_apic_id)(unsigned long x);
+ unsigned long (*set_apic_id)(unsigned int id);
+ unsigned long apic_id_mask;
+
+ int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
+ const struct cpumask *andmask,
+ unsigned int *apicid);
+
+ /* ipi */
+ void (*send_IPI_mask)(const struct cpumask *mask, int vector);
+ void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
+ int vector);
+ void (*send_IPI_allbutself)(int vector);
+ void (*send_IPI_all)(int vector);
+ void (*send_IPI_self)(int vector);
+
+ /* wakeup_secondary_cpu */
+ int (*wakeup_secondary_cpu)(int apicid, unsigned long start_eip);
+
+ bool wait_for_init_deassert;
+ void (*inquire_remote_apic)(int apicid);
+
+ /* apic ops */
+ u32 (*read)(u32 reg);
+ void (*write)(u32 reg, u32 v);
+ /*
+ * ->eoi_write() has the same signature as ->write().
+ *
+ * Drivers can support both ->eoi_write() and ->write() by passing the same
+ * callback value. Kernel can override ->eoi_write() and fall back
+ * on write for EOI.
+ */
+ void (*eoi_write)(u32 reg, u32 v);
+ u64 (*icr_read)(void);
+ void (*icr_write)(u32 low, u32 high);
+ void (*wait_icr_idle)(void);
+ u32 (*safe_wait_icr_idle)(void);
+
+#ifdef CONFIG_X86_32
+ /*
+ * Called very early during boot from get_smp_config(). It should
+ * return the logical apicid. x86_[bios]_cpu_to_apicid is
+ * initialized before this function is called.
+ *
+ * If logical apicid can't be determined that early, the function
+ * may return BAD_APICID. Logical apicid will be configured after
+ * init_apic_ldr() while bringing up CPUs. Note that NUMA affinity
+ * won't be applied properly during early boot in this case.
+ */
+ int (*x86_32_early_logical_apicid)(int cpu);
+#endif
+};
+
+/*
+ * Pointer to the local APIC driver in use on this system (there's
+ * always just one such driver in use - the kernel decides via an
+ * early probing process which one it picks - and then sticks to it):
+ */
+extern struct apic *apic;
+
+/*
+ * APIC drivers are probed based on how they are listed in the .apicdrivers
+ * section. So the order is important and enforced by the ordering
+ * of different apic driver files in the Makefile.
+ *
+ * For the files having two apic drivers, we use apic_drivers()
+ * to enforce the order with in them.
+ */
+#define apic_driver(sym) \
+ static const struct apic *__apicdrivers_##sym __used \
+ __aligned(sizeof(struct apic *)) \
+ __section(.apicdrivers) = { &sym }
+
+#define apic_drivers(sym1, sym2) \
+ static struct apic *__apicdrivers_##sym1##sym2[2] __used \
+ __aligned(sizeof(struct apic *)) \
+ __section(.apicdrivers) = { &sym1, &sym2 }
+
+extern struct apic *__apicdrivers[], *__apicdrivers_end[];
+
+/*
+ * APIC functionality to boot other CPUs - only used on SMP:
+ */
+#ifdef CONFIG_SMP
+extern atomic_t init_deasserted;
+extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip);
+#endif
+
+#ifdef CONFIG_X86_LOCAL_APIC
+
+static inline u32 apic_read(u32 reg)
+{
+ return apic->read(reg);
+}
+
+static inline void apic_write(u32 reg, u32 val)
+{
+ apic->write(reg, val);
+}
+
+static inline void apic_eoi(void)
+{
+ apic->eoi_write(APIC_EOI, APIC_EOI_ACK);
+}
+
+static inline u64 apic_icr_read(void)
+{
+ return apic->icr_read();
+}
+
+static inline void apic_icr_write(u32 low, u32 high)
+{
+ apic->icr_write(low, high);
+}
+
+static inline void apic_wait_icr_idle(void)
+{
+ apic->wait_icr_idle();
+}
+
+static inline u32 safe_apic_wait_icr_idle(void)
+{
+ return apic->safe_wait_icr_idle();
+}
+
+extern void __init apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v));
+
+#else /* CONFIG_X86_LOCAL_APIC */
+
+static inline u32 apic_read(u32 reg) { return 0; }
+static inline void apic_write(u32 reg, u32 val) { }
+static inline void apic_eoi(void) { }
+static inline u64 apic_icr_read(void) { return 0; }
+static inline void apic_icr_write(u32 low, u32 high) { }
+static inline void apic_wait_icr_idle(void) { }
+static inline u32 safe_apic_wait_icr_idle(void) { return 0; }
+static inline void apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v)) {}
+
+#endif /* CONFIG_X86_LOCAL_APIC */
+
+static inline void ack_APIC_irq(void)
+{
+ /*
+ * ack_APIC_irq() actually gets compiled as a single instruction
+ * ... yummie.
+ */
+ apic_eoi();
+}
+
+static inline unsigned default_get_apic_id(unsigned long x)
+{
+ unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
+
+ if (APIC_XAPIC(ver) || boot_cpu_has(X86_FEATURE_EXTD_APICID))
+ return (x >> 24) & 0xFF;
+ else
+ return (x >> 24) & 0x0F;
+}
+
+/*
+ * Warm reset vector position:
+ */
+#define TRAMPOLINE_PHYS_LOW 0x467
+#define TRAMPOLINE_PHYS_HIGH 0x469
+
+#ifdef CONFIG_X86_64
+extern void apic_send_IPI_self(int vector);
+
+DECLARE_PER_CPU(int, x2apic_extra_bits);
+
+extern int default_cpu_present_to_apicid(int mps_cpu);
+extern int default_check_phys_apicid_present(int phys_apicid);
+#endif
+
+extern void generic_bigsmp_probe(void);
+
+
+#ifdef CONFIG_X86_LOCAL_APIC
+
+#include <asm/smp.h>
+
+#define APIC_DFR_VALUE (APIC_DFR_FLAT)
+
+static inline const struct cpumask *default_target_cpus(void)
+{
+#ifdef CONFIG_SMP
+ return cpu_online_mask;
+#else
+ return cpumask_of(0);
+#endif
+}
+
+static inline const struct cpumask *online_target_cpus(void)
+{
+ return cpu_online_mask;
+}
+
+DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid);
+
+
+static inline unsigned int read_apic_id(void)
+{
+ unsigned int reg;
+
+ reg = apic_read(APIC_ID);
+
+ return apic->get_apic_id(reg);
+}
+
+static inline int default_apic_id_valid(int apicid)
+{
+ return (apicid < 255);
+}
+
+extern int default_acpi_madt_oem_check(char *, char *);
+
+extern void default_setup_apic_routing(void);
+
+extern struct apic apic_noop;
+
+#ifdef CONFIG_X86_32
+
+static inline int noop_x86_32_early_logical_apicid(int cpu)
+{
+ return BAD_APICID;
+}
+
+/*
+ * Set up the logical destination ID.
+ *
+ * Intel recommends to set DFR, LDR and TPR before enabling
+ * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
+ * document number 292116). So here it goes...
+ */
+extern void default_init_apic_ldr(void);
+
+static inline int default_apic_id_registered(void)
+{
+ return physid_isset(read_apic_id(), phys_cpu_present_map);
+}
+
+static inline int default_phys_pkg_id(int cpuid_apic, int index_msb)
+{
+ return cpuid_apic >> index_msb;
+}
+
+#endif
+
+static inline int
+flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+ const struct cpumask *andmask,
+ unsigned int *apicid)
+{
+ unsigned long cpu_mask = cpumask_bits(cpumask)[0] &
+ cpumask_bits(andmask)[0] &
+ cpumask_bits(cpu_online_mask)[0] &
+ APIC_ALL_CPUS;
+
+ if (likely(cpu_mask)) {
+ *apicid = (unsigned int)cpu_mask;
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+}
+
+extern int
+default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+ const struct cpumask *andmask,
+ unsigned int *apicid);
+
+static inline void
+flat_vector_allocation_domain(int cpu, struct cpumask *retmask,
+ const struct cpumask *mask)
+{
+ /* Careful. Some cpus do not strictly honor the set of cpus
+ * specified in the interrupt destination when using lowest
+ * priority interrupt delivery mode.
+ *
+ * In particular there was a hyperthreading cpu observed to
+ * deliver interrupts to the wrong hyperthread when only one
+ * hyperthread was specified in the interrupt desitination.
+ */
+ cpumask_clear(retmask);
+ cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
+}
+
+static inline void
+default_vector_allocation_domain(int cpu, struct cpumask *retmask,
+ const struct cpumask *mask)
+{
+ cpumask_copy(retmask, cpumask_of(cpu));
+}
+
+static inline unsigned long default_check_apicid_used(physid_mask_t *map, int apicid)
+{
+ return physid_isset(apicid, *map);
+}
+
+static inline void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
+{
+ *retmap = *phys_map;
+}
+
+static inline int __default_cpu_present_to_apicid(int mps_cpu)
+{
+ if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
+ return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
+ else
+ return BAD_APICID;
+}
+
+static inline int
+__default_check_phys_apicid_present(int phys_apicid)
+{
+ return physid_isset(phys_apicid, phys_cpu_present_map);
+}
+
+#ifdef CONFIG_X86_32
+static inline int default_cpu_present_to_apicid(int mps_cpu)
+{
+ return __default_cpu_present_to_apicid(mps_cpu);
+}
+
+static inline int
+default_check_phys_apicid_present(int phys_apicid)
+{
+ return __default_check_phys_apicid_present(phys_apicid);
+}
+#else
+extern int default_cpu_present_to_apicid(int mps_cpu);
+extern int default_check_phys_apicid_present(int phys_apicid);
+#endif
+
+#endif /* CONFIG_X86_LOCAL_APIC */
+extern void irq_enter(void);
+extern void irq_exit(void);
+
+static inline void entering_irq(void)
+{
+ irq_enter();
+ exit_idle();
+}
+
+static inline void entering_ack_irq(void)
+{
+ ack_APIC_irq();
+ entering_irq();
+}
+
+static inline void exiting_irq(void)
+{
+ irq_exit();
+}
+
+static inline void exiting_ack_irq(void)
+{
+ irq_exit();
+ /* Ack only at the end to avoid potential reentry */
+ ack_APIC_irq();
+}
+
+extern void ioapic_zap_locks(void);
+
+#endif /* _ASM_X86_APIC_H */
diff --git a/kernel/arch/x86/include/asm/apic_flat_64.h b/kernel/arch/x86/include/asm/apic_flat_64.h
new file mode 100644
index 000000000..a2d312796
--- /dev/null
+++ b/kernel/arch/x86/include/asm/apic_flat_64.h
@@ -0,0 +1,7 @@
+#ifndef _ASM_X86_APIC_FLAT_64_H
+#define _ASM_X86_APIC_FLAT_64_H
+
+extern void flat_init_apic_ldr(void);
+
+#endif
+
diff --git a/kernel/arch/x86/include/asm/apicdef.h b/kernel/arch/x86/include/asm/apicdef.h
new file mode 100644
index 000000000..c46bb99d5
--- /dev/null
+++ b/kernel/arch/x86/include/asm/apicdef.h
@@ -0,0 +1,445 @@
+#ifndef _ASM_X86_APICDEF_H
+#define _ASM_X86_APICDEF_H
+
+/*
+ * Constants for various Intel APICs. (local APIC, IOAPIC, etc.)
+ *
+ * Alan Cox <Alan.Cox@linux.org>, 1995.
+ * Ingo Molnar <mingo@redhat.com>, 1999, 2000
+ */
+
+#define IO_APIC_DEFAULT_PHYS_BASE 0xfec00000
+#define APIC_DEFAULT_PHYS_BASE 0xfee00000
+
+/*
+ * This is the IO-APIC register space as specified
+ * by Intel docs:
+ */
+#define IO_APIC_SLOT_SIZE 1024
+
+#define APIC_ID 0x20
+
+#define APIC_LVR 0x30
+#define APIC_LVR_MASK 0xFF00FF
+#define APIC_LVR_DIRECTED_EOI (1 << 24)
+#define GET_APIC_VERSION(x) ((x) & 0xFFu)
+#define GET_APIC_MAXLVT(x) (((x) >> 16) & 0xFFu)
+#ifdef CONFIG_X86_32
+# define APIC_INTEGRATED(x) ((x) & 0xF0u)
+#else
+# define APIC_INTEGRATED(x) (1)
+#endif
+#define APIC_XAPIC(x) ((x) >= 0x14)
+#define APIC_EXT_SPACE(x) ((x) & 0x80000000)
+#define APIC_TASKPRI 0x80
+#define APIC_TPRI_MASK 0xFFu
+#define APIC_ARBPRI 0x90
+#define APIC_ARBPRI_MASK 0xFFu
+#define APIC_PROCPRI 0xA0
+#define APIC_EOI 0xB0
+#define APIC_EOI_ACK 0x0 /* Docs say 0 for future compat. */
+#define APIC_RRR 0xC0
+#define APIC_LDR 0xD0
+#define APIC_LDR_MASK (0xFFu << 24)
+#define GET_APIC_LOGICAL_ID(x) (((x) >> 24) & 0xFFu)
+#define SET_APIC_LOGICAL_ID(x) (((x) << 24))
+#define APIC_ALL_CPUS 0xFFu
+#define APIC_DFR 0xE0
+#define APIC_DFR_CLUSTER 0x0FFFFFFFul
+#define APIC_DFR_FLAT 0xFFFFFFFFul
+#define APIC_SPIV 0xF0
+#define APIC_SPIV_DIRECTED_EOI (1 << 12)
+#define APIC_SPIV_FOCUS_DISABLED (1 << 9)
+#define APIC_SPIV_APIC_ENABLED (1 << 8)
+#define APIC_ISR 0x100
+#define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */
+#define APIC_TMR 0x180
+#define APIC_IRR 0x200
+#define APIC_ESR 0x280
+#define APIC_ESR_SEND_CS 0x00001
+#define APIC_ESR_RECV_CS 0x00002
+#define APIC_ESR_SEND_ACC 0x00004
+#define APIC_ESR_RECV_ACC 0x00008
+#define APIC_ESR_SENDILL 0x00020
+#define APIC_ESR_RECVILL 0x00040
+#define APIC_ESR_ILLREGA 0x00080
+#define APIC_LVTCMCI 0x2f0
+#define APIC_ICR 0x300
+#define APIC_DEST_SELF 0x40000
+#define APIC_DEST_ALLINC 0x80000
+#define APIC_DEST_ALLBUT 0xC0000
+#define APIC_ICR_RR_MASK 0x30000
+#define APIC_ICR_RR_INVALID 0x00000
+#define APIC_ICR_RR_INPROG 0x10000
+#define APIC_ICR_RR_VALID 0x20000
+#define APIC_INT_LEVELTRIG 0x08000
+#define APIC_INT_ASSERT 0x04000
+#define APIC_ICR_BUSY 0x01000
+#define APIC_DEST_LOGICAL 0x00800
+#define APIC_DEST_PHYSICAL 0x00000
+#define APIC_DM_FIXED 0x00000
+#define APIC_DM_FIXED_MASK 0x00700
+#define APIC_DM_LOWEST 0x00100
+#define APIC_DM_SMI 0x00200
+#define APIC_DM_REMRD 0x00300
+#define APIC_DM_NMI 0x00400
+#define APIC_DM_INIT 0x00500
+#define APIC_DM_STARTUP 0x00600
+#define APIC_DM_EXTINT 0x00700
+#define APIC_VECTOR_MASK 0x000FF
+#define APIC_ICR2 0x310
+#define GET_APIC_DEST_FIELD(x) (((x) >> 24) & 0xFF)
+#define SET_APIC_DEST_FIELD(x) ((x) << 24)
+#define APIC_LVTT 0x320
+#define APIC_LVTTHMR 0x330
+#define APIC_LVTPC 0x340
+#define APIC_LVT0 0x350
+#define APIC_LVT_TIMER_BASE_MASK (0x3 << 18)
+#define GET_APIC_TIMER_BASE(x) (((x) >> 18) & 0x3)
+#define SET_APIC_TIMER_BASE(x) (((x) << 18))
+#define APIC_TIMER_BASE_CLKIN 0x0
+#define APIC_TIMER_BASE_TMBASE 0x1
+#define APIC_TIMER_BASE_DIV 0x2
+#define APIC_LVT_TIMER_ONESHOT (0 << 17)
+#define APIC_LVT_TIMER_PERIODIC (1 << 17)
+#define APIC_LVT_TIMER_TSCDEADLINE (2 << 17)
+#define APIC_LVT_MASKED (1 << 16)
+#define APIC_LVT_LEVEL_TRIGGER (1 << 15)
+#define APIC_LVT_REMOTE_IRR (1 << 14)
+#define APIC_INPUT_POLARITY (1 << 13)
+#define APIC_SEND_PENDING (1 << 12)
+#define APIC_MODE_MASK 0x700
+#define GET_APIC_DELIVERY_MODE(x) (((x) >> 8) & 0x7)
+#define SET_APIC_DELIVERY_MODE(x, y) (((x) & ~0x700) | ((y) << 8))
+#define APIC_MODE_FIXED 0x0
+#define APIC_MODE_NMI 0x4
+#define APIC_MODE_EXTINT 0x7
+#define APIC_LVT1 0x360
+#define APIC_LVTERR 0x370
+#define APIC_TMICT 0x380
+#define APIC_TMCCT 0x390
+#define APIC_TDCR 0x3E0
+#define APIC_SELF_IPI 0x3F0
+#define APIC_TDR_DIV_TMBASE (1 << 2)
+#define APIC_TDR_DIV_1 0xB
+#define APIC_TDR_DIV_2 0x0
+#define APIC_TDR_DIV_4 0x1
+#define APIC_TDR_DIV_8 0x2
+#define APIC_TDR_DIV_16 0x3
+#define APIC_TDR_DIV_32 0x8
+#define APIC_TDR_DIV_64 0x9
+#define APIC_TDR_DIV_128 0xA
+#define APIC_EFEAT 0x400
+#define APIC_ECTRL 0x410
+#define APIC_EILVTn(n) (0x500 + 0x10 * n)
+#define APIC_EILVT_NR_AMD_K8 1 /* # of extended interrupts */
+#define APIC_EILVT_NR_AMD_10H 4
+#define APIC_EILVT_NR_MAX APIC_EILVT_NR_AMD_10H
+#define APIC_EILVT_LVTOFF(x) (((x) >> 4) & 0xF)
+#define APIC_EILVT_MSG_FIX 0x0
+#define APIC_EILVT_MSG_SMI 0x2
+#define APIC_EILVT_MSG_NMI 0x4
+#define APIC_EILVT_MSG_EXT 0x7
+#define APIC_EILVT_MASKED (1 << 16)
+
+#define APIC_BASE (fix_to_virt(FIX_APIC_BASE))
+#define APIC_BASE_MSR 0x800
+#define XAPIC_ENABLE (1UL << 11)
+#define X2APIC_ENABLE (1UL << 10)
+
+#ifdef CONFIG_X86_32
+# define MAX_IO_APICS 64
+# define MAX_LOCAL_APIC 256
+#else
+# define MAX_IO_APICS 128
+# define MAX_LOCAL_APIC 32768
+#endif
+
+/*
+ * All x86-64 systems are xAPIC compatible.
+ * In the following, "apicid" is a physical APIC ID.
+ */
+#define XAPIC_DEST_CPUS_SHIFT 4
+#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1)
+#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT)
+#define APIC_CLUSTER(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK)
+#define APIC_CLUSTERID(apicid) (APIC_CLUSTER(apicid) >> XAPIC_DEST_CPUS_SHIFT)
+#define APIC_CPUID(apicid) ((apicid) & XAPIC_DEST_CPUS_MASK)
+#define NUM_APIC_CLUSTERS ((BAD_APICID + 1) >> XAPIC_DEST_CPUS_SHIFT)
+
+/*
+ * the local APIC register structure, memory mapped. Not terribly well
+ * tested, but we might eventually use this one in the future - the
+ * problem why we cannot use it right now is the P5 APIC, it has an
+ * errata which cannot take 8-bit reads and writes, only 32-bit ones ...
+ */
+#define u32 unsigned int
+
+struct local_apic {
+
+/*000*/ struct { u32 __reserved[4]; } __reserved_01;
+
+/*010*/ struct { u32 __reserved[4]; } __reserved_02;
+
+/*020*/ struct { /* APIC ID Register */
+ u32 __reserved_1 : 24,
+ phys_apic_id : 4,
+ __reserved_2 : 4;
+ u32 __reserved[3];
+ } id;
+
+/*030*/ const
+ struct { /* APIC Version Register */
+ u32 version : 8,
+ __reserved_1 : 8,
+ max_lvt : 8,
+ __reserved_2 : 8;
+ u32 __reserved[3];
+ } version;
+
+/*040*/ struct { u32 __reserved[4]; } __reserved_03;
+
+/*050*/ struct { u32 __reserved[4]; } __reserved_04;
+
+/*060*/ struct { u32 __reserved[4]; } __reserved_05;
+
+/*070*/ struct { u32 __reserved[4]; } __reserved_06;
+
+/*080*/ struct { /* Task Priority Register */
+ u32 priority : 8,
+ __reserved_1 : 24;
+ u32 __reserved_2[3];
+ } tpr;
+
+/*090*/ const
+ struct { /* Arbitration Priority Register */
+ u32 priority : 8,
+ __reserved_1 : 24;
+ u32 __reserved_2[3];
+ } apr;
+
+/*0A0*/ const
+ struct { /* Processor Priority Register */
+ u32 priority : 8,
+ __reserved_1 : 24;
+ u32 __reserved_2[3];
+ } ppr;
+
+/*0B0*/ struct { /* End Of Interrupt Register */
+ u32 eoi;
+ u32 __reserved[3];
+ } eoi;
+
+/*0C0*/ struct { u32 __reserved[4]; } __reserved_07;
+
+/*0D0*/ struct { /* Logical Destination Register */
+ u32 __reserved_1 : 24,
+ logical_dest : 8;
+ u32 __reserved_2[3];
+ } ldr;
+
+/*0E0*/ struct { /* Destination Format Register */
+ u32 __reserved_1 : 28,
+ model : 4;
+ u32 __reserved_2[3];
+ } dfr;
+
+/*0F0*/ struct { /* Spurious Interrupt Vector Register */
+ u32 spurious_vector : 8,
+ apic_enabled : 1,
+ focus_cpu : 1,
+ __reserved_2 : 22;
+ u32 __reserved_3[3];
+ } svr;
+
+/*100*/ struct { /* In Service Register */
+/*170*/ u32 bitfield;
+ u32 __reserved[3];
+ } isr [8];
+
+/*180*/ struct { /* Trigger Mode Register */
+/*1F0*/ u32 bitfield;
+ u32 __reserved[3];
+ } tmr [8];
+
+/*200*/ struct { /* Interrupt Request Register */
+/*270*/ u32 bitfield;
+ u32 __reserved[3];
+ } irr [8];
+
+/*280*/ union { /* Error Status Register */
+ struct {
+ u32 send_cs_error : 1,
+ receive_cs_error : 1,
+ send_accept_error : 1,
+ receive_accept_error : 1,
+ __reserved_1 : 1,
+ send_illegal_vector : 1,
+ receive_illegal_vector : 1,
+ illegal_register_address : 1,
+ __reserved_2 : 24;
+ u32 __reserved_3[3];
+ } error_bits;
+ struct {
+ u32 errors;
+ u32 __reserved_3[3];
+ } all_errors;
+ } esr;
+
+/*290*/ struct { u32 __reserved[4]; } __reserved_08;
+
+/*2A0*/ struct { u32 __reserved[4]; } __reserved_09;
+
+/*2B0*/ struct { u32 __reserved[4]; } __reserved_10;
+
+/*2C0*/ struct { u32 __reserved[4]; } __reserved_11;
+
+/*2D0*/ struct { u32 __reserved[4]; } __reserved_12;
+
+/*2E0*/ struct { u32 __reserved[4]; } __reserved_13;
+
+/*2F0*/ struct { u32 __reserved[4]; } __reserved_14;
+
+/*300*/ struct { /* Interrupt Command Register 1 */
+ u32 vector : 8,
+ delivery_mode : 3,
+ destination_mode : 1,
+ delivery_status : 1,
+ __reserved_1 : 1,
+ level : 1,
+ trigger : 1,
+ __reserved_2 : 2,
+ shorthand : 2,
+ __reserved_3 : 12;
+ u32 __reserved_4[3];
+ } icr1;
+
+/*310*/ struct { /* Interrupt Command Register 2 */
+ union {
+ u32 __reserved_1 : 24,
+ phys_dest : 4,
+ __reserved_2 : 4;
+ u32 __reserved_3 : 24,
+ logical_dest : 8;
+ } dest;
+ u32 __reserved_4[3];
+ } icr2;
+
+/*320*/ struct { /* LVT - Timer */
+ u32 vector : 8,
+ __reserved_1 : 4,
+ delivery_status : 1,
+ __reserved_2 : 3,
+ mask : 1,
+ timer_mode : 1,
+ __reserved_3 : 14;
+ u32 __reserved_4[3];
+ } lvt_timer;
+
+/*330*/ struct { /* LVT - Thermal Sensor */
+ u32 vector : 8,
+ delivery_mode : 3,
+ __reserved_1 : 1,
+ delivery_status : 1,
+ __reserved_2 : 3,
+ mask : 1,
+ __reserved_3 : 15;
+ u32 __reserved_4[3];
+ } lvt_thermal;
+
+/*340*/ struct { /* LVT - Performance Counter */
+ u32 vector : 8,
+ delivery_mode : 3,
+ __reserved_1 : 1,
+ delivery_status : 1,
+ __reserved_2 : 3,
+ mask : 1,
+ __reserved_3 : 15;
+ u32 __reserved_4[3];
+ } lvt_pc;
+
+/*350*/ struct { /* LVT - LINT0 */
+ u32 vector : 8,
+ delivery_mode : 3,
+ __reserved_1 : 1,
+ delivery_status : 1,
+ polarity : 1,
+ remote_irr : 1,
+ trigger : 1,
+ mask : 1,
+ __reserved_2 : 15;
+ u32 __reserved_3[3];
+ } lvt_lint0;
+
+/*360*/ struct { /* LVT - LINT1 */
+ u32 vector : 8,
+ delivery_mode : 3,
+ __reserved_1 : 1,
+ delivery_status : 1,
+ polarity : 1,
+ remote_irr : 1,
+ trigger : 1,
+ mask : 1,
+ __reserved_2 : 15;
+ u32 __reserved_3[3];
+ } lvt_lint1;
+
+/*370*/ struct { /* LVT - Error */
+ u32 vector : 8,
+ __reserved_1 : 4,
+ delivery_status : 1,
+ __reserved_2 : 3,
+ mask : 1,
+ __reserved_3 : 15;
+ u32 __reserved_4[3];
+ } lvt_error;
+
+/*380*/ struct { /* Timer Initial Count Register */
+ u32 initial_count;
+ u32 __reserved_2[3];
+ } timer_icr;
+
+/*390*/ const
+ struct { /* Timer Current Count Register */
+ u32 curr_count;
+ u32 __reserved_2[3];
+ } timer_ccr;
+
+/*3A0*/ struct { u32 __reserved[4]; } __reserved_16;
+
+/*3B0*/ struct { u32 __reserved[4]; } __reserved_17;
+
+/*3C0*/ struct { u32 __reserved[4]; } __reserved_18;
+
+/*3D0*/ struct { u32 __reserved[4]; } __reserved_19;
+
+/*3E0*/ struct { /* Timer Divide Configuration Register */
+ u32 divisor : 4,
+ __reserved_1 : 28;
+ u32 __reserved_2[3];
+ } timer_dcr;
+
+/*3F0*/ struct { u32 __reserved[4]; } __reserved_20;
+
+} __attribute__ ((packed));
+
+#undef u32
+
+#ifdef CONFIG_X86_32
+ #define BAD_APICID 0xFFu
+#else
+ #define BAD_APICID 0xFFFFu
+#endif
+
+enum ioapic_irq_destination_types {
+ dest_Fixed = 0,
+ dest_LowestPrio = 1,
+ dest_SMI = 2,
+ dest__reserved_1 = 3,
+ dest_NMI = 4,
+ dest_INIT = 5,
+ dest__reserved_2 = 6,
+ dest_ExtINT = 7
+};
+
+#endif /* _ASM_X86_APICDEF_H */
diff --git a/kernel/arch/x86/include/asm/apm.h b/kernel/arch/x86/include/asm/apm.h
new file mode 100644
index 000000000..20370c6db
--- /dev/null
+++ b/kernel/arch/x86/include/asm/apm.h
@@ -0,0 +1,73 @@
+/*
+ * Machine specific APM BIOS functions for generic.
+ * Split out from apm.c by Osamu Tomita <tomita@cinet.co.jp>
+ */
+
+#ifndef _ASM_X86_MACH_DEFAULT_APM_H
+#define _ASM_X86_MACH_DEFAULT_APM_H
+
+#ifdef APM_ZERO_SEGS
+# define APM_DO_ZERO_SEGS \
+ "pushl %%ds\n\t" \
+ "pushl %%es\n\t" \
+ "xorl %%edx, %%edx\n\t" \
+ "mov %%dx, %%ds\n\t" \
+ "mov %%dx, %%es\n\t" \
+ "mov %%dx, %%fs\n\t" \
+ "mov %%dx, %%gs\n\t"
+# define APM_DO_POP_SEGS \
+ "popl %%es\n\t" \
+ "popl %%ds\n\t"
+#else
+# define APM_DO_ZERO_SEGS
+# define APM_DO_POP_SEGS
+#endif
+
+static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
+ u32 *eax, u32 *ebx, u32 *ecx,
+ u32 *edx, u32 *esi)
+{
+ /*
+ * N.B. We do NOT need a cld after the BIOS call
+ * because we always save and restore the flags.
+ */
+ __asm__ __volatile__(APM_DO_ZERO_SEGS
+ "pushl %%edi\n\t"
+ "pushl %%ebp\n\t"
+ "lcall *%%cs:apm_bios_entry\n\t"
+ "setc %%al\n\t"
+ "popl %%ebp\n\t"
+ "popl %%edi\n\t"
+ APM_DO_POP_SEGS
+ : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx),
+ "=S" (*esi)
+ : "a" (func), "b" (ebx_in), "c" (ecx_in)
+ : "memory", "cc");
+}
+
+static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
+ u32 ecx_in, u32 *eax)
+{
+ int cx, dx, si;
+ u8 error;
+
+ /*
+ * N.B. We do NOT need a cld after the BIOS call
+ * because we always save and restore the flags.
+ */
+ __asm__ __volatile__(APM_DO_ZERO_SEGS
+ "pushl %%edi\n\t"
+ "pushl %%ebp\n\t"
+ "lcall *%%cs:apm_bios_entry\n\t"
+ "setc %%bl\n\t"
+ "popl %%ebp\n\t"
+ "popl %%edi\n\t"
+ APM_DO_POP_SEGS
+ : "=a" (*eax), "=b" (error), "=c" (cx), "=d" (dx),
+ "=S" (si)
+ : "a" (func), "b" (ebx_in), "c" (ecx_in)
+ : "memory", "cc");
+ return error;
+}
+
+#endif /* _ASM_X86_MACH_DEFAULT_APM_H */
diff --git a/kernel/arch/x86/include/asm/arch_hweight.h b/kernel/arch/x86/include/asm/arch_hweight.h
new file mode 100644
index 000000000..9686c3d9f
--- /dev/null
+++ b/kernel/arch/x86/include/asm/arch_hweight.h
@@ -0,0 +1,61 @@
+#ifndef _ASM_X86_HWEIGHT_H
+#define _ASM_X86_HWEIGHT_H
+
+#ifdef CONFIG_64BIT
+/* popcnt %edi, %eax -- redundant REX prefix for alignment */
+#define POPCNT32 ".byte 0xf3,0x40,0x0f,0xb8,0xc7"
+/* popcnt %rdi, %rax */
+#define POPCNT64 ".byte 0xf3,0x48,0x0f,0xb8,0xc7"
+#define REG_IN "D"
+#define REG_OUT "a"
+#else
+/* popcnt %eax, %eax */
+#define POPCNT32 ".byte 0xf3,0x0f,0xb8,0xc0"
+#define REG_IN "a"
+#define REG_OUT "a"
+#endif
+
+/*
+ * __sw_hweightXX are called from within the alternatives below
+ * and callee-clobbered registers need to be taken care of. See
+ * ARCH_HWEIGHT_CFLAGS in <arch/x86/Kconfig> for the respective
+ * compiler switches.
+ */
+static inline unsigned int __arch_hweight32(unsigned int w)
+{
+ unsigned int res = 0;
+
+ asm (ALTERNATIVE("call __sw_hweight32", POPCNT32, X86_FEATURE_POPCNT)
+ : "="REG_OUT (res)
+ : REG_IN (w));
+
+ return res;
+}
+
+static inline unsigned int __arch_hweight16(unsigned int w)
+{
+ return __arch_hweight32(w & 0xffff);
+}
+
+static inline unsigned int __arch_hweight8(unsigned int w)
+{
+ return __arch_hweight32(w & 0xff);
+}
+
+static inline unsigned long __arch_hweight64(__u64 w)
+{
+ unsigned long res = 0;
+
+#ifdef CONFIG_X86_32
+ return __arch_hweight32((u32)w) +
+ __arch_hweight32((u32)(w >> 32));
+#else
+ asm (ALTERNATIVE("call __sw_hweight64", POPCNT64, X86_FEATURE_POPCNT)
+ : "="REG_OUT (res)
+ : REG_IN (w));
+#endif /* CONFIG_X86_32 */
+
+ return res;
+}
+
+#endif
diff --git a/kernel/arch/x86/include/asm/archrandom.h b/kernel/arch/x86/include/asm/archrandom.h
new file mode 100644
index 000000000..69f1366f1
--- /dev/null
+++ b/kernel/arch/x86/include/asm/archrandom.h
@@ -0,0 +1,136 @@
+/*
+ * This file is part of the Linux kernel.
+ *
+ * Copyright (c) 2011-2014, Intel Corporation
+ * Authors: Fenghua Yu <fenghua.yu@intel.com>,
+ * H. Peter Anvin <hpa@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef ASM_X86_ARCHRANDOM_H
+#define ASM_X86_ARCHRANDOM_H
+
+#include <asm/processor.h>
+#include <asm/cpufeature.h>
+#include <asm/alternative.h>
+#include <asm/nops.h>
+
+#define RDRAND_RETRY_LOOPS 10
+
+#define RDRAND_INT ".byte 0x0f,0xc7,0xf0"
+#define RDSEED_INT ".byte 0x0f,0xc7,0xf8"
+#ifdef CONFIG_X86_64
+# define RDRAND_LONG ".byte 0x48,0x0f,0xc7,0xf0"
+# define RDSEED_LONG ".byte 0x48,0x0f,0xc7,0xf8"
+#else
+# define RDRAND_LONG RDRAND_INT
+# define RDSEED_LONG RDSEED_INT
+#endif
+
+#ifdef CONFIG_ARCH_RANDOM
+
+/* Instead of arch_get_random_long() when alternatives haven't run. */
+static inline int rdrand_long(unsigned long *v)
+{
+ int ok;
+ asm volatile("1: " RDRAND_LONG "\n\t"
+ "jc 2f\n\t"
+ "decl %0\n\t"
+ "jnz 1b\n\t"
+ "2:"
+ : "=r" (ok), "=a" (*v)
+ : "0" (RDRAND_RETRY_LOOPS));
+ return ok;
+}
+
+/* A single attempt at RDSEED */
+static inline bool rdseed_long(unsigned long *v)
+{
+ unsigned char ok;
+ asm volatile(RDSEED_LONG "\n\t"
+ "setc %0"
+ : "=qm" (ok), "=a" (*v));
+ return ok;
+}
+
+#define GET_RANDOM(name, type, rdrand, nop) \
+static inline int name(type *v) \
+{ \
+ int ok; \
+ alternative_io("movl $0, %0\n\t" \
+ nop, \
+ "\n1: " rdrand "\n\t" \
+ "jc 2f\n\t" \
+ "decl %0\n\t" \
+ "jnz 1b\n\t" \
+ "2:", \
+ X86_FEATURE_RDRAND, \
+ ASM_OUTPUT2("=r" (ok), "=a" (*v)), \
+ "0" (RDRAND_RETRY_LOOPS)); \
+ return ok; \
+}
+
+#define GET_SEED(name, type, rdseed, nop) \
+static inline int name(type *v) \
+{ \
+ unsigned char ok; \
+ alternative_io("movb $0, %0\n\t" \
+ nop, \
+ rdseed "\n\t" \
+ "setc %0", \
+ X86_FEATURE_RDSEED, \
+ ASM_OUTPUT2("=q" (ok), "=a" (*v))); \
+ return ok; \
+}
+
+#ifdef CONFIG_X86_64
+
+GET_RANDOM(arch_get_random_long, unsigned long, RDRAND_LONG, ASM_NOP5);
+GET_RANDOM(arch_get_random_int, unsigned int, RDRAND_INT, ASM_NOP4);
+
+GET_SEED(arch_get_random_seed_long, unsigned long, RDSEED_LONG, ASM_NOP5);
+GET_SEED(arch_get_random_seed_int, unsigned int, RDSEED_INT, ASM_NOP4);
+
+#else
+
+GET_RANDOM(arch_get_random_long, unsigned long, RDRAND_LONG, ASM_NOP3);
+GET_RANDOM(arch_get_random_int, unsigned int, RDRAND_INT, ASM_NOP3);
+
+GET_SEED(arch_get_random_seed_long, unsigned long, RDSEED_LONG, ASM_NOP4);
+GET_SEED(arch_get_random_seed_int, unsigned int, RDSEED_INT, ASM_NOP4);
+
+#endif /* CONFIG_X86_64 */
+
+#define arch_has_random() static_cpu_has(X86_FEATURE_RDRAND)
+#define arch_has_random_seed() static_cpu_has(X86_FEATURE_RDSEED)
+
+#else
+
+static inline int rdrand_long(unsigned long *v)
+{
+ return 0;
+}
+
+static inline bool rdseed_long(unsigned long *v)
+{
+ return 0;
+}
+
+#endif /* CONFIG_ARCH_RANDOM */
+
+extern void x86_init_rdrand(struct cpuinfo_x86 *c);
+
+#endif /* ASM_X86_ARCHRANDOM_H */
diff --git a/kernel/arch/x86/include/asm/asm-offsets.h b/kernel/arch/x86/include/asm/asm-offsets.h
new file mode 100644
index 000000000..d370ee36a
--- /dev/null
+++ b/kernel/arch/x86/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/kernel/arch/x86/include/asm/asm.h b/kernel/arch/x86/include/asm/asm.h
new file mode 100644
index 000000000..7730c1c5c
--- /dev/null
+++ b/kernel/arch/x86/include/asm/asm.h
@@ -0,0 +1,83 @@
+#ifndef _ASM_X86_ASM_H
+#define _ASM_X86_ASM_H
+
+#ifdef __ASSEMBLY__
+# define __ASM_FORM(x) x
+# define __ASM_FORM_RAW(x) x
+# define __ASM_FORM_COMMA(x) x,
+#else
+# define __ASM_FORM(x) " " #x " "
+# define __ASM_FORM_RAW(x) #x
+# define __ASM_FORM_COMMA(x) " " #x ","
+#endif
+
+#ifdef CONFIG_X86_32
+# define __ASM_SEL(a,b) __ASM_FORM(a)
+# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(a)
+#else
+# define __ASM_SEL(a,b) __ASM_FORM(b)
+# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(b)
+#endif
+
+#define __ASM_SIZE(inst, ...) __ASM_SEL(inst##l##__VA_ARGS__, \
+ inst##q##__VA_ARGS__)
+#define __ASM_REG(reg) __ASM_SEL_RAW(e##reg, r##reg)
+
+#define _ASM_PTR __ASM_SEL(.long, .quad)
+#define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8)
+
+#define _ASM_MOV __ASM_SIZE(mov)
+#define _ASM_INC __ASM_SIZE(inc)
+#define _ASM_DEC __ASM_SIZE(dec)
+#define _ASM_ADD __ASM_SIZE(add)
+#define _ASM_SUB __ASM_SIZE(sub)
+#define _ASM_XADD __ASM_SIZE(xadd)
+
+#define _ASM_AX __ASM_REG(ax)
+#define _ASM_BX __ASM_REG(bx)
+#define _ASM_CX __ASM_REG(cx)
+#define _ASM_DX __ASM_REG(dx)
+#define _ASM_SP __ASM_REG(sp)
+#define _ASM_BP __ASM_REG(bp)
+#define _ASM_SI __ASM_REG(si)
+#define _ASM_DI __ASM_REG(di)
+
+/* Exception table entry */
+#ifdef __ASSEMBLY__
+# define _ASM_EXTABLE(from,to) \
+ .pushsection "__ex_table","a" ; \
+ .balign 8 ; \
+ .long (from) - . ; \
+ .long (to) - . ; \
+ .popsection
+
+# define _ASM_EXTABLE_EX(from,to) \
+ .pushsection "__ex_table","a" ; \
+ .balign 8 ; \
+ .long (from) - . ; \
+ .long (to) - . + 0x7ffffff0 ; \
+ .popsection
+
+# define _ASM_NOKPROBE(entry) \
+ .pushsection "_kprobe_blacklist","aw" ; \
+ _ASM_ALIGN ; \
+ _ASM_PTR (entry); \
+ .popsection
+#else
+# define _ASM_EXTABLE(from,to) \
+ " .pushsection \"__ex_table\",\"a\"\n" \
+ " .balign 8\n" \
+ " .long (" #from ") - .\n" \
+ " .long (" #to ") - .\n" \
+ " .popsection\n"
+
+# define _ASM_EXTABLE_EX(from,to) \
+ " .pushsection \"__ex_table\",\"a\"\n" \
+ " .balign 8\n" \
+ " .long (" #from ") - .\n" \
+ " .long (" #to ") - . + 0x7ffffff0\n" \
+ " .popsection\n"
+/* For C file, we already have NOKPROBE_SYMBOL macro */
+#endif
+
+#endif /* _ASM_X86_ASM_H */
diff --git a/kernel/arch/x86/include/asm/atomic.h b/kernel/arch/x86/include/asm/atomic.h
new file mode 100644
index 000000000..5e5cd123f
--- /dev/null
+++ b/kernel/arch/x86/include/asm/atomic.h
@@ -0,0 +1,238 @@
+#ifndef _ASM_X86_ATOMIC_H
+#define _ASM_X86_ATOMIC_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <asm/processor.h>
+#include <asm/alternative.h>
+#include <asm/cmpxchg.h>
+#include <asm/rmwcc.h>
+#include <asm/barrier.h>
+
+/*
+ * Atomic operations that C can't guarantee us. Useful for
+ * resource counting etc..
+ */
+
+#define ATOMIC_INIT(i) { (i) }
+
+/**
+ * atomic_read - read atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically reads the value of @v.
+ */
+static inline int atomic_read(const atomic_t *v)
+{
+ return ACCESS_ONCE((v)->counter);
+}
+
+/**
+ * atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+ *
+ * Atomically sets the value of @v to @i.
+ */
+static inline void atomic_set(atomic_t *v, int i)
+{
+ v->counter = i;
+}
+
+/**
+ * atomic_add - add integer to atomic variable
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v.
+ */
+static inline void atomic_add(int i, atomic_t *v)
+{
+ asm volatile(LOCK_PREFIX "addl %1,%0"
+ : "+m" (v->counter)
+ : "ir" (i));
+}
+
+/**
+ * atomic_sub - subtract integer from atomic variable
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v.
+ */
+static inline void atomic_sub(int i, atomic_t *v)
+{
+ asm volatile(LOCK_PREFIX "subl %1,%0"
+ : "+m" (v->counter)
+ : "ir" (i));
+}
+
+/**
+ * atomic_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+static inline int atomic_sub_and_test(int i, atomic_t *v)
+{
+ GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
+}
+
+/**
+ * atomic_inc - increment atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1.
+ */
+static inline void atomic_inc(atomic_t *v)
+{
+ asm volatile(LOCK_PREFIX "incl %0"
+ : "+m" (v->counter));
+}
+
+/**
+ * atomic_dec - decrement atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically decrements @v by 1.
+ */
+static inline void atomic_dec(atomic_t *v)
+{
+ asm volatile(LOCK_PREFIX "decl %0"
+ : "+m" (v->counter));
+}
+
+/**
+ * atomic_dec_and_test - decrement and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+static inline int atomic_dec_and_test(atomic_t *v)
+{
+ GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
+}
+
+/**
+ * atomic_inc_and_test - increment and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+static inline int atomic_inc_and_test(atomic_t *v)
+{
+ GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
+}
+
+/**
+ * atomic_add_negative - add and test if negative
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+static inline int atomic_add_negative(int i, atomic_t *v)
+{
+ GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
+}
+
+/**
+ * atomic_add_return - add integer and return
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v and returns @i + @v
+ */
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+ return i + xadd(&v->counter, i);
+}
+
+/**
+ * atomic_sub_return - subtract integer and return
+ * @v: pointer of type atomic_t
+ * @i: integer value to subtract
+ *
+ * Atomically subtracts @i from @v and returns @v - @i
+ */
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+ return atomic_add_return(-i, v);
+}
+
+#define atomic_inc_return(v) (atomic_add_return(1, v))
+#define atomic_dec_return(v) (atomic_sub_return(1, v))
+
+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+ return cmpxchg(&v->counter, old, new);
+}
+
+static inline int atomic_xchg(atomic_t *v, int new)
+{
+ return xchg(&v->counter, new);
+}
+
+/**
+ * __atomic_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as @v was not already @u.
+ * Returns the old value of @v.
+ */
+static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int c, old;
+ c = atomic_read(v);
+ for (;;) {
+ if (unlikely(c == (u)))
+ break;
+ old = atomic_cmpxchg((v), c, c + (a));
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c;
+}
+
+/**
+ * atomic_inc_short - increment of a short integer
+ * @v: pointer to type int
+ *
+ * Atomically adds 1 to @v
+ * Returns the new value of @u
+ */
+static inline short int atomic_inc_short(short int *v)
+{
+ asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v));
+ return *v;
+}
+
+/* These are x86-specific, used by some header files */
+#define atomic_clear_mask(mask, addr) \
+ asm volatile(LOCK_PREFIX "andl %0,%1" \
+ : : "r" (~(mask)), "m" (*(addr)) : "memory")
+
+#define atomic_set_mask(mask, addr) \
+ asm volatile(LOCK_PREFIX "orl %0,%1" \
+ : : "r" ((unsigned)(mask)), "m" (*(addr)) \
+ : "memory")
+
+#ifdef CONFIG_X86_32
+# include <asm/atomic64_32.h>
+#else
+# include <asm/atomic64_64.h>
+#endif
+
+#endif /* _ASM_X86_ATOMIC_H */
diff --git a/kernel/arch/x86/include/asm/atomic64_32.h b/kernel/arch/x86/include/asm/atomic64_32.h
new file mode 100644
index 000000000..b154de75c
--- /dev/null
+++ b/kernel/arch/x86/include/asm/atomic64_32.h
@@ -0,0 +1,316 @@
+#ifndef _ASM_X86_ATOMIC64_32_H
+#define _ASM_X86_ATOMIC64_32_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <asm/processor.h>
+//#include <asm/cmpxchg.h>
+
+/* An 64bit atomic type */
+
+typedef struct {
+ u64 __aligned(8) counter;
+} atomic64_t;
+
+#define ATOMIC64_INIT(val) { (val) }
+
+#define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
+#ifndef ATOMIC64_EXPORT
+#define ATOMIC64_DECL_ONE __ATOMIC64_DECL
+#else
+#define ATOMIC64_DECL_ONE(sym) __ATOMIC64_DECL(sym); \
+ ATOMIC64_EXPORT(atomic64_##sym)
+#endif
+
+#ifdef CONFIG_X86_CMPXCHG64
+#define __alternative_atomic64(f, g, out, in...) \
+ asm volatile("call %P[func]" \
+ : out : [func] "i" (atomic64_##g##_cx8), ## in)
+
+#define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8)
+#else
+#define __alternative_atomic64(f, g, out, in...) \
+ alternative_call(atomic64_##f##_386, atomic64_##g##_cx8, \
+ X86_FEATURE_CX8, ASM_OUTPUT2(out), ## in)
+
+#define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8); \
+ ATOMIC64_DECL_ONE(sym##_386)
+
+ATOMIC64_DECL_ONE(add_386);
+ATOMIC64_DECL_ONE(sub_386);
+ATOMIC64_DECL_ONE(inc_386);
+ATOMIC64_DECL_ONE(dec_386);
+#endif
+
+#define alternative_atomic64(f, out, in...) \
+ __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
+
+ATOMIC64_DECL(read);
+ATOMIC64_DECL(set);
+ATOMIC64_DECL(xchg);
+ATOMIC64_DECL(add_return);
+ATOMIC64_DECL(sub_return);
+ATOMIC64_DECL(inc_return);
+ATOMIC64_DECL(dec_return);
+ATOMIC64_DECL(dec_if_positive);
+ATOMIC64_DECL(inc_not_zero);
+ATOMIC64_DECL(add_unless);
+
+#undef ATOMIC64_DECL
+#undef ATOMIC64_DECL_ONE
+#undef __ATOMIC64_DECL
+#undef ATOMIC64_EXPORT
+
+/**
+ * atomic64_cmpxchg - cmpxchg atomic64 variable
+ * @v: pointer to type atomic64_t
+ * @o: expected value
+ * @n: new value
+ *
+ * Atomically sets @v to @n if it was equal to @o and returns
+ * the old value.
+ */
+
+static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
+{
+ return cmpxchg64(&v->counter, o, n);
+}
+
+/**
+ * atomic64_xchg - xchg atomic64 variable
+ * @v: pointer to type atomic64_t
+ * @n: value to assign
+ *
+ * Atomically xchgs the value of @v to @n and returns
+ * the old value.
+ */
+static inline long long atomic64_xchg(atomic64_t *v, long long n)
+{
+ long long o;
+ unsigned high = (unsigned)(n >> 32);
+ unsigned low = (unsigned)n;
+ alternative_atomic64(xchg, "=&A" (o),
+ "S" (v), "b" (low), "c" (high)
+ : "memory");
+ return o;
+}
+
+/**
+ * atomic64_set - set atomic64 variable
+ * @v: pointer to type atomic64_t
+ * @i: value to assign
+ *
+ * Atomically sets the value of @v to @n.
+ */
+static inline void atomic64_set(atomic64_t *v, long long i)
+{
+ unsigned high = (unsigned)(i >> 32);
+ unsigned low = (unsigned)i;
+ alternative_atomic64(set, /* no output */,
+ "S" (v), "b" (low), "c" (high)
+ : "eax", "edx", "memory");
+}
+
+/**
+ * atomic64_read - read atomic64 variable
+ * @v: pointer to type atomic64_t
+ *
+ * Atomically reads the value of @v and returns it.
+ */
+static inline long long atomic64_read(const atomic64_t *v)
+{
+ long long r;
+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
+ return r;
+ }
+
+/**
+ * atomic64_add_return - add and return
+ * @i: integer value to add
+ * @v: pointer to type atomic64_t
+ *
+ * Atomically adds @i to @v and returns @i + *@v
+ */
+static inline long long atomic64_add_return(long long i, atomic64_t *v)
+{
+ alternative_atomic64(add_return,
+ ASM_OUTPUT2("+A" (i), "+c" (v)),
+ ASM_NO_INPUT_CLOBBER("memory"));
+ return i;
+}
+
+/*
+ * Other variants with different arithmetic operators:
+ */
+static inline long long atomic64_sub_return(long long i, atomic64_t *v)
+{
+ alternative_atomic64(sub_return,
+ ASM_OUTPUT2("+A" (i), "+c" (v)),
+ ASM_NO_INPUT_CLOBBER("memory"));
+ return i;
+}
+
+static inline long long atomic64_inc_return(atomic64_t *v)
+{
+ long long a;
+ alternative_atomic64(inc_return, "=&A" (a),
+ "S" (v) : "memory", "ecx");
+ return a;
+}
+
+static inline long long atomic64_dec_return(atomic64_t *v)
+{
+ long long a;
+ alternative_atomic64(dec_return, "=&A" (a),
+ "S" (v) : "memory", "ecx");
+ return a;
+}
+
+/**
+ * atomic64_add - add integer to atomic64 variable
+ * @i: integer value to add
+ * @v: pointer to type atomic64_t
+ *
+ * Atomically adds @i to @v.
+ */
+static inline long long atomic64_add(long long i, atomic64_t *v)
+{
+ __alternative_atomic64(add, add_return,
+ ASM_OUTPUT2("+A" (i), "+c" (v)),
+ ASM_NO_INPUT_CLOBBER("memory"));
+ return i;
+}
+
+/**
+ * atomic64_sub - subtract the atomic64 variable
+ * @i: integer value to subtract
+ * @v: pointer to type atomic64_t
+ *
+ * Atomically subtracts @i from @v.
+ */
+static inline long long atomic64_sub(long long i, atomic64_t *v)
+{
+ __alternative_atomic64(sub, sub_return,
+ ASM_OUTPUT2("+A" (i), "+c" (v)),
+ ASM_NO_INPUT_CLOBBER("memory"));
+ return i;
+}
+
+/**
+ * atomic64_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer to type atomic64_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+static inline int atomic64_sub_and_test(long long i, atomic64_t *v)
+{
+ return atomic64_sub_return(i, v) == 0;
+}
+
+/**
+ * atomic64_inc - increment atomic64 variable
+ * @v: pointer to type atomic64_t
+ *
+ * Atomically increments @v by 1.
+ */
+static inline void atomic64_inc(atomic64_t *v)
+{
+ __alternative_atomic64(inc, inc_return, /* no output */,
+ "S" (v) : "memory", "eax", "ecx", "edx");
+}
+
+/**
+ * atomic64_dec - decrement atomic64 variable
+ * @v: pointer to type atomic64_t
+ *
+ * Atomically decrements @v by 1.
+ */
+static inline void atomic64_dec(atomic64_t *v)
+{
+ __alternative_atomic64(dec, dec_return, /* no output */,
+ "S" (v) : "memory", "eax", "ecx", "edx");
+}
+
+/**
+ * atomic64_dec_and_test - decrement and test
+ * @v: pointer to type atomic64_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+static inline int atomic64_dec_and_test(atomic64_t *v)
+{
+ return atomic64_dec_return(v) == 0;
+}
+
+/**
+ * atomic64_inc_and_test - increment and test
+ * @v: pointer to type atomic64_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+static inline int atomic64_inc_and_test(atomic64_t *v)
+{
+ return atomic64_inc_return(v) == 0;
+}
+
+/**
+ * atomic64_add_negative - add and test if negative
+ * @i: integer value to add
+ * @v: pointer to type atomic64_t
+ *
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+static inline int atomic64_add_negative(long long i, atomic64_t *v)
+{
+ return atomic64_add_return(i, v) < 0;
+}
+
+/**
+ * atomic64_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns non-zero if the add was done, zero otherwise.
+ */
+static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+{
+ unsigned low = (unsigned)u;
+ unsigned high = (unsigned)(u >> 32);
+ alternative_atomic64(add_unless,
+ ASM_OUTPUT2("+A" (a), "+c" (low), "+D" (high)),
+ "S" (v) : "memory");
+ return (int)a;
+}
+
+
+static inline int atomic64_inc_not_zero(atomic64_t *v)
+{
+ int r;
+ alternative_atomic64(inc_not_zero, "=&a" (r),
+ "S" (v) : "ecx", "edx", "memory");
+ return r;
+}
+
+static inline long long atomic64_dec_if_positive(atomic64_t *v)
+{
+ long long r;
+ alternative_atomic64(dec_if_positive, "=&A" (r),
+ "S" (v) : "ecx", "memory");
+ return r;
+}
+
+#undef alternative_atomic64
+#undef __alternative_atomic64
+
+#endif /* _ASM_X86_ATOMIC64_32_H */
diff --git a/kernel/arch/x86/include/asm/atomic64_64.h b/kernel/arch/x86/include/asm/atomic64_64.h
new file mode 100644
index 000000000..f8d273e18
--- /dev/null
+++ b/kernel/arch/x86/include/asm/atomic64_64.h
@@ -0,0 +1,223 @@
+#ifndef _ASM_X86_ATOMIC64_64_H
+#define _ASM_X86_ATOMIC64_64_H
+
+#include <linux/types.h>
+#include <asm/alternative.h>
+#include <asm/cmpxchg.h>
+
+/* The 64-bit atomic type */
+
+#define ATOMIC64_INIT(i) { (i) }
+
+/**
+ * atomic64_read - read atomic64 variable
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically reads the value of @v.
+ * Doesn't imply a read memory barrier.
+ */
+static inline long atomic64_read(const atomic64_t *v)
+{
+ return ACCESS_ONCE((v)->counter);
+}
+
+/**
+ * atomic64_set - set atomic64 variable
+ * @v: pointer to type atomic64_t
+ * @i: required value
+ *
+ * Atomically sets the value of @v to @i.
+ */
+static inline void atomic64_set(atomic64_t *v, long i)
+{
+ v->counter = i;
+}
+
+/**
+ * atomic64_add - add integer to atomic64 variable
+ * @i: integer value to add
+ * @v: pointer to type atomic64_t
+ *
+ * Atomically adds @i to @v.
+ */
+static inline void atomic64_add(long i, atomic64_t *v)
+{
+ asm volatile(LOCK_PREFIX "addq %1,%0"
+ : "=m" (v->counter)
+ : "er" (i), "m" (v->counter));
+}
+
+/**
+ * atomic64_sub - subtract the atomic64 variable
+ * @i: integer value to subtract
+ * @v: pointer to type atomic64_t
+ *
+ * Atomically subtracts @i from @v.
+ */
+static inline void atomic64_sub(long i, atomic64_t *v)
+{
+ asm volatile(LOCK_PREFIX "subq %1,%0"
+ : "=m" (v->counter)
+ : "er" (i), "m" (v->counter));
+}
+
+/**
+ * atomic64_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer to type atomic64_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+static inline int atomic64_sub_and_test(long i, atomic64_t *v)
+{
+ GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
+}
+
+/**
+ * atomic64_inc - increment atomic64 variable
+ * @v: pointer to type atomic64_t
+ *
+ * Atomically increments @v by 1.
+ */
+static inline void atomic64_inc(atomic64_t *v)
+{
+ asm volatile(LOCK_PREFIX "incq %0"
+ : "=m" (v->counter)
+ : "m" (v->counter));
+}
+
+/**
+ * atomic64_dec - decrement atomic64 variable
+ * @v: pointer to type atomic64_t
+ *
+ * Atomically decrements @v by 1.
+ */
+static inline void atomic64_dec(atomic64_t *v)
+{
+ asm volatile(LOCK_PREFIX "decq %0"
+ : "=m" (v->counter)
+ : "m" (v->counter));
+}
+
+/**
+ * atomic64_dec_and_test - decrement and test
+ * @v: pointer to type atomic64_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+static inline int atomic64_dec_and_test(atomic64_t *v)
+{
+ GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
+}
+
+/**
+ * atomic64_inc_and_test - increment and test
+ * @v: pointer to type atomic64_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+static inline int atomic64_inc_and_test(atomic64_t *v)
+{
+ GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
+}
+
+/**
+ * atomic64_add_negative - add and test if negative
+ * @i: integer value to add
+ * @v: pointer to type atomic64_t
+ *
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+static inline int atomic64_add_negative(long i, atomic64_t *v)
+{
+ GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
+}
+
+/**
+ * atomic64_add_return - add and return
+ * @i: integer value to add
+ * @v: pointer to type atomic64_t
+ *
+ * Atomically adds @i to @v and returns @i + @v
+ */
+static inline long atomic64_add_return(long i, atomic64_t *v)
+{
+ return i + xadd(&v->counter, i);
+}
+
+static inline long atomic64_sub_return(long i, atomic64_t *v)
+{
+ return atomic64_add_return(-i, v);
+}
+
+#define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
+#define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
+
+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
+{
+ return cmpxchg(&v->counter, old, new);
+}
+
+static inline long atomic64_xchg(atomic64_t *v, long new)
+{
+ return xchg(&v->counter, new);
+}
+
+/**
+ * atomic64_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns the old value of @v.
+ */
+static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
+{
+ long c, old;
+ c = atomic64_read(v);
+ for (;;) {
+ if (unlikely(c == (u)))
+ break;
+ old = atomic64_cmpxchg((v), c, c + (a));
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c != (u);
+}
+
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
+/*
+ * atomic64_dec_if_positive - decrement by 1 if old value positive
+ * @v: pointer of type atomic_t
+ *
+ * The function returns the old value of *v minus 1, even if
+ * the atomic variable, v, was not decremented.
+ */
+static inline long atomic64_dec_if_positive(atomic64_t *v)
+{
+ long c, old, dec;
+ c = atomic64_read(v);
+ for (;;) {
+ dec = c - 1;
+ if (unlikely(dec < 0))
+ break;
+ old = atomic64_cmpxchg((v), c, dec);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return dec;
+}
+
+#endif /* _ASM_X86_ATOMIC64_64_H */
diff --git a/kernel/arch/x86/include/asm/barrier.h b/kernel/arch/x86/include/asm/barrier.h
new file mode 100644
index 000000000..959e45b81
--- /dev/null
+++ b/kernel/arch/x86/include/asm/barrier.h
@@ -0,0 +1,105 @@
+#ifndef _ASM_X86_BARRIER_H
+#define _ASM_X86_BARRIER_H
+
+#include <asm/alternative.h>
+#include <asm/nops.h>
+
+/*
+ * Force strict CPU ordering.
+ * And yes, this is required on UP too when we're talking
+ * to devices.
+ */
+
+#ifdef CONFIG_X86_32
+/*
+ * Some non-Intel clones support out of order store. wmb() ceases to be a
+ * nop for these.
+ */
+#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
+#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
+#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
+#else
+#define mb() asm volatile("mfence":::"memory")
+#define rmb() asm volatile("lfence":::"memory")
+#define wmb() asm volatile("sfence" ::: "memory")
+#endif
+
+#ifdef CONFIG_X86_PPRO_FENCE
+#define dma_rmb() rmb()
+#else
+#define dma_rmb() barrier()
+#endif
+#define dma_wmb() barrier()
+
+#ifdef CONFIG_SMP
+#define smp_mb() mb()
+#define smp_rmb() dma_rmb()
+#define smp_wmb() barrier()
+#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
+#else /* !SMP */
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#define set_mb(var, value) do { var = value; barrier(); } while (0)
+#endif /* SMP */
+
+#define read_barrier_depends() do { } while (0)
+#define smp_read_barrier_depends() do { } while (0)
+
+#if defined(CONFIG_X86_PPRO_FENCE)
+
+/*
+ * For this option x86 doesn't have a strong TSO memory
+ * model and we should fall back to full barriers.
+ */
+
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+ ACCESS_ONCE(*p) = (v); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+ ___p1; \
+})
+
+#else /* regular x86 TSO memory ordering */
+
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ barrier(); \
+ ACCESS_ONCE(*p) = (v); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ barrier(); \
+ ___p1; \
+})
+
+#endif
+
+/* Atomic operations are already serializing on x86 */
+#define smp_mb__before_atomic() barrier()
+#define smp_mb__after_atomic() barrier()
+
+/*
+ * Stop RDTSC speculation. This is needed when you need to use RDTSC
+ * (or get_cycles or vread that possibly accesses the TSC) in a defined
+ * code region.
+ */
+static __always_inline void rdtsc_barrier(void)
+{
+ alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC,
+ "lfence", X86_FEATURE_LFENCE_RDTSC);
+}
+
+#endif /* _ASM_X86_BARRIER_H */
diff --git a/kernel/arch/x86/include/asm/bios_ebda.h b/kernel/arch/x86/include/asm/bios_ebda.h
new file mode 100644
index 000000000..aa6a3170a
--- /dev/null
+++ b/kernel/arch/x86/include/asm/bios_ebda.h
@@ -0,0 +1,60 @@
+#ifndef _ASM_X86_BIOS_EBDA_H
+#define _ASM_X86_BIOS_EBDA_H
+
+#include <asm/io.h>
+
+/*
+ * Returns physical address of EBDA. Returns 0 if there is no EBDA.
+ */
+static inline unsigned int get_bios_ebda(void)
+{
+ /*
+ * There is a real-mode segmented pointer pointing to the
+ * 4K EBDA area at 0x40E.
+ */
+ unsigned int address = *(unsigned short *)phys_to_virt(0x40E);
+ address <<= 4;
+ return address; /* 0 means none */
+}
+
+/*
+ * Return the sanitized length of the EBDA in bytes, if it exists.
+ */
+static inline unsigned int get_bios_ebda_length(void)
+{
+ unsigned int address;
+ unsigned int length;
+
+ address = get_bios_ebda();
+ if (!address)
+ return 0;
+
+ /* EBDA length is byte 0 of the EBDA (stored in KiB) */
+ length = *(unsigned char *)phys_to_virt(address);
+ length <<= 10;
+
+ /* Trim the length if it extends beyond 640KiB */
+ length = min_t(unsigned int, (640 * 1024) - address, length);
+ return length;
+}
+
+void reserve_ebda_region(void);
+
+#ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION
+/*
+ * This is obviously not a great place for this, but we want to be
+ * able to scatter it around anywhere in the kernel.
+ */
+void check_for_bios_corruption(void);
+void start_periodic_check_for_corruption(void);
+#else
+static inline void check_for_bios_corruption(void)
+{
+}
+
+static inline void start_periodic_check_for_corruption(void)
+{
+}
+#endif
+
+#endif /* _ASM_X86_BIOS_EBDA_H */
diff --git a/kernel/arch/x86/include/asm/bitops.h b/kernel/arch/x86/include/asm/bitops.h
new file mode 100644
index 000000000..cfe3b954d
--- /dev/null
+++ b/kernel/arch/x86/include/asm/bitops.h
@@ -0,0 +1,509 @@
+#ifndef _ASM_X86_BITOPS_H
+#define _ASM_X86_BITOPS_H
+
+/*
+ * Copyright 1992, Linus Torvalds.
+ *
+ * Note: inlines with more than a single statement should be marked
+ * __always_inline to avoid problems with older gcc's inlining heuristics.
+ */
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#include <linux/compiler.h>
+#include <asm/alternative.h>
+#include <asm/rmwcc.h>
+#include <asm/barrier.h>
+
+#if BITS_PER_LONG == 32
+# define _BITOPS_LONG_SHIFT 5
+#elif BITS_PER_LONG == 64
+# define _BITOPS_LONG_SHIFT 6
+#else
+# error "Unexpected BITS_PER_LONG"
+#endif
+
+#define BIT_64(n) (U64_C(1) << (n))
+
+/*
+ * These have to be done with inline assembly: that way the bit-setting
+ * is guaranteed to be atomic. All bit operations return 0 if the bit
+ * was cleared before the operation and != 0 if it was not.
+ *
+ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
+ */
+
+#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
+/* Technically wrong, but this avoids compilation errors on some gcc
+ versions. */
+#define BITOP_ADDR(x) "=m" (*(volatile long *) (x))
+#else
+#define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
+#endif
+
+#define ADDR BITOP_ADDR(addr)
+
+/*
+ * We do the locked ops that don't return the old value as
+ * a mask operation on a byte.
+ */
+#define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
+#define CONST_MASK(nr) (1 << ((nr) & 7))
+
+/**
+ * set_bit - Atomically set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This function is atomic and may not be reordered. See __set_bit()
+ * if you do not require the atomic guarantees.
+ *
+ * Note: there are no guarantees that this function will not be reordered
+ * on non x86 architectures, so if you are writing portable code,
+ * make sure not to rely on its reordering guarantees.
+ *
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static __always_inline void
+set_bit(long nr, volatile unsigned long *addr)
+{
+ if (IS_IMMEDIATE(nr)) {
+ asm volatile(LOCK_PREFIX "orb %1,%0"
+ : CONST_MASK_ADDR(nr, addr)
+ : "iq" ((u8)CONST_MASK(nr))
+ : "memory");
+ } else {
+ asm volatile(LOCK_PREFIX "bts %1,%0"
+ : BITOP_ADDR(addr) : "Ir" (nr) : "memory");
+ }
+}
+
+/**
+ * __set_bit - Set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * Unlike set_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static inline void __set_bit(long nr, volatile unsigned long *addr)
+{
+ asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
+}
+
+/**
+ * clear_bit - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * clear_bit() is atomic and may not be reordered. However, it does
+ * not contain a memory barrier, so if it is used for locking purposes,
+ * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
+ * in order to ensure changes are visible on other processors.
+ */
+static __always_inline void
+clear_bit(long nr, volatile unsigned long *addr)
+{
+ if (IS_IMMEDIATE(nr)) {
+ asm volatile(LOCK_PREFIX "andb %1,%0"
+ : CONST_MASK_ADDR(nr, addr)
+ : "iq" ((u8)~CONST_MASK(nr)));
+ } else {
+ asm volatile(LOCK_PREFIX "btr %1,%0"
+ : BITOP_ADDR(addr)
+ : "Ir" (nr));
+ }
+}
+
+/*
+ * clear_bit_unlock - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * clear_bit() is atomic and implies release semantics before the memory
+ * operation. It can be used for an unlock.
+ */
+static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
+{
+ barrier();
+ clear_bit(nr, addr);
+}
+
+static inline void __clear_bit(long nr, volatile unsigned long *addr)
+{
+ asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
+}
+
+/*
+ * __clear_bit_unlock - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * __clear_bit() is non-atomic and implies release semantics before the memory
+ * operation. It can be used for an unlock if no other CPUs can concurrently
+ * modify other bits in the word.
+ *
+ * No memory barrier is required here, because x86 cannot reorder stores past
+ * older loads. Same principle as spin_unlock.
+ */
+static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
+{
+ barrier();
+ __clear_bit(nr, addr);
+}
+
+/**
+ * __change_bit - Toggle a bit in memory
+ * @nr: the bit to change
+ * @addr: the address to start counting from
+ *
+ * Unlike change_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static inline void __change_bit(long nr, volatile unsigned long *addr)
+{
+ asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
+}
+
+/**
+ * change_bit - Toggle a bit in memory
+ * @nr: Bit to change
+ * @addr: Address to start counting from
+ *
+ * change_bit() is atomic and may not be reordered.
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static inline void change_bit(long nr, volatile unsigned long *addr)
+{
+ if (IS_IMMEDIATE(nr)) {
+ asm volatile(LOCK_PREFIX "xorb %1,%0"
+ : CONST_MASK_ADDR(nr, addr)
+ : "iq" ((u8)CONST_MASK(nr)));
+ } else {
+ asm volatile(LOCK_PREFIX "btc %1,%0"
+ : BITOP_ADDR(addr)
+ : "Ir" (nr));
+ }
+}
+
+/**
+ * test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
+{
+ GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
+}
+
+/**
+ * test_and_set_bit_lock - Set a bit and return its old value for lock
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This is the same as test_and_set_bit on x86.
+ */
+static __always_inline int
+test_and_set_bit_lock(long nr, volatile unsigned long *addr)
+{
+ return test_and_set_bit(nr, addr);
+}
+
+/**
+ * __test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail. You must protect multiple accesses with a lock.
+ */
+static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
+{
+ int oldbit;
+
+ asm("bts %2,%1\n\t"
+ "sbb %0,%0"
+ : "=r" (oldbit), ADDR
+ : "Ir" (nr));
+ return oldbit;
+}
+
+/**
+ * test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
+{
+ GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
+}
+
+/**
+ * __test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail. You must protect multiple accesses with a lock.
+ *
+ * Note: the operation is performed atomically with respect to
+ * the local CPU, but not other CPUs. Portable code should not
+ * rely on this behaviour.
+ * KVM relies on this behaviour on x86 for modifying memory that is also
+ * accessed from a hypervisor on the same CPU if running in a VM: don't change
+ * this without also updating arch/x86/kernel/kvm.c
+ */
+static inline int __test_and_clear_bit(long nr, volatile unsigned long *addr)
+{
+ int oldbit;
+
+ asm volatile("btr %2,%1\n\t"
+ "sbb %0,%0"
+ : "=r" (oldbit), ADDR
+ : "Ir" (nr));
+ return oldbit;
+}
+
+/* WARNING: non atomic and it can be reordered! */
+static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
+{
+ int oldbit;
+
+ asm volatile("btc %2,%1\n\t"
+ "sbb %0,%0"
+ : "=r" (oldbit), ADDR
+ : "Ir" (nr) : "memory");
+
+ return oldbit;
+}
+
+/**
+ * test_and_change_bit - Change a bit and return its old value
+ * @nr: Bit to change
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
+{
+ GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
+}
+
+static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
+{
+ return ((1UL << (nr & (BITS_PER_LONG-1))) &
+ (addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
+}
+
+static inline int variable_test_bit(long nr, volatile const unsigned long *addr)
+{
+ int oldbit;
+
+ asm volatile("bt %2,%1\n\t"
+ "sbb %0,%0"
+ : "=r" (oldbit)
+ : "m" (*(unsigned long *)addr), "Ir" (nr));
+
+ return oldbit;
+}
+
+#if 0 /* Fool kernel-doc since it doesn't do macros yet */
+/**
+ * test_bit - Determine whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static int test_bit(int nr, const volatile unsigned long *addr);
+#endif
+
+#define test_bit(nr, addr) \
+ (__builtin_constant_p((nr)) \
+ ? constant_test_bit((nr), (addr)) \
+ : variable_test_bit((nr), (addr)))
+
+/**
+ * __ffs - find first set bit in word
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static inline unsigned long __ffs(unsigned long word)
+{
+ asm("rep; bsf %1,%0"
+ : "=r" (word)
+ : "rm" (word));
+ return word;
+}
+
+/**
+ * ffz - find first zero bit in word
+ * @word: The word to search
+ *
+ * Undefined if no zero exists, so code should check against ~0UL first.
+ */
+static inline unsigned long ffz(unsigned long word)
+{
+ asm("rep; bsf %1,%0"
+ : "=r" (word)
+ : "r" (~word));
+ return word;
+}
+
+/*
+ * __fls: find last set bit in word
+ * @word: The word to search
+ *
+ * Undefined if no set bit exists, so code should check against 0 first.
+ */
+static inline unsigned long __fls(unsigned long word)
+{
+ asm("bsr %1,%0"
+ : "=r" (word)
+ : "rm" (word));
+ return word;
+}
+
+#undef ADDR
+
+#ifdef __KERNEL__
+/**
+ * ffs - find first set bit in word
+ * @x: the word to search
+ *
+ * This is defined the same way as the libc and compiler builtin ffs
+ * routines, therefore differs in spirit from the other bitops.
+ *
+ * ffs(value) returns 0 if value is 0 or the position of the first
+ * set bit if value is nonzero. The first (least significant) bit
+ * is at position 1.
+ */
+static inline int ffs(int x)
+{
+ int r;
+
+#ifdef CONFIG_X86_64
+ /*
+ * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the
+ * dest reg is undefined if x==0, but their CPU architect says its
+ * value is written to set it to the same as before, except that the
+ * top 32 bits will be cleared.
+ *
+ * We cannot do this on 32 bits because at the very least some
+ * 486 CPUs did not behave this way.
+ */
+ asm("bsfl %1,%0"
+ : "=r" (r)
+ : "rm" (x), "0" (-1));
+#elif defined(CONFIG_X86_CMOV)
+ asm("bsfl %1,%0\n\t"
+ "cmovzl %2,%0"
+ : "=&r" (r) : "rm" (x), "r" (-1));
+#else
+ asm("bsfl %1,%0\n\t"
+ "jnz 1f\n\t"
+ "movl $-1,%0\n"
+ "1:" : "=r" (r) : "rm" (x));
+#endif
+ return r + 1;
+}
+
+/**
+ * fls - find last set bit in word
+ * @x: the word to search
+ *
+ * This is defined in a similar way as the libc and compiler builtin
+ * ffs, but returns the position of the most significant set bit.
+ *
+ * fls(value) returns 0 if value is 0 or the position of the last
+ * set bit if value is nonzero. The last (most significant) bit is
+ * at position 32.
+ */
+static inline int fls(int x)
+{
+ int r;
+
+#ifdef CONFIG_X86_64
+ /*
+ * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the
+ * dest reg is undefined if x==0, but their CPU architect says its
+ * value is written to set it to the same as before, except that the
+ * top 32 bits will be cleared.
+ *
+ * We cannot do this on 32 bits because at the very least some
+ * 486 CPUs did not behave this way.
+ */
+ asm("bsrl %1,%0"
+ : "=r" (r)
+ : "rm" (x), "0" (-1));
+#elif defined(CONFIG_X86_CMOV)
+ asm("bsrl %1,%0\n\t"
+ "cmovzl %2,%0"
+ : "=&r" (r) : "rm" (x), "rm" (-1));
+#else
+ asm("bsrl %1,%0\n\t"
+ "jnz 1f\n\t"
+ "movl $-1,%0\n"
+ "1:" : "=r" (r) : "rm" (x));
+#endif
+ return r + 1;
+}
+
+/**
+ * fls64 - find last set bit in a 64-bit word
+ * @x: the word to search
+ *
+ * This is defined in a similar way as the libc and compiler builtin
+ * ffsll, but returns the position of the most significant set bit.
+ *
+ * fls64(value) returns 0 if value is 0 or the position of the last
+ * set bit if value is nonzero. The last (most significant) bit is
+ * at position 64.
+ */
+#ifdef CONFIG_X86_64
+static __always_inline int fls64(__u64 x)
+{
+ int bitpos = -1;
+ /*
+ * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the
+ * dest reg is undefined if x==0, but their CPU architect says its
+ * value is written to set it to the same as before.
+ */
+ asm("bsrq %1,%q0"
+ : "+r" (bitpos)
+ : "rm" (x));
+ return bitpos + 1;
+}
+#else
+#include <asm-generic/bitops/fls64.h>
+#endif
+
+#include <asm-generic/bitops/find.h>
+
+#include <asm-generic/bitops/sched.h>
+
+#include <asm/arch_hweight.h>
+
+#include <asm-generic/bitops/const_hweight.h>
+
+#include <asm-generic/bitops/le.h>
+
+#include <asm-generic/bitops/ext2-atomic-setbit.h>
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_X86_BITOPS_H */
diff --git a/kernel/arch/x86/include/asm/boot.h b/kernel/arch/x86/include/asm/boot.h
new file mode 100644
index 000000000..4fa687a47
--- /dev/null
+++ b/kernel/arch/x86/include/asm/boot.h
@@ -0,0 +1,40 @@
+#ifndef _ASM_X86_BOOT_H
+#define _ASM_X86_BOOT_H
+
+
+#include <asm/pgtable_types.h>
+#include <uapi/asm/boot.h>
+
+/* Physical address where kernel should be loaded. */
+#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
+ + (CONFIG_PHYSICAL_ALIGN - 1)) \
+ & ~(CONFIG_PHYSICAL_ALIGN - 1))
+
+/* Minimum kernel alignment, as a power of two */
+#ifdef CONFIG_X86_64
+#define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
+#else
+#define MIN_KERNEL_ALIGN_LG2 (PAGE_SHIFT + THREAD_SIZE_ORDER)
+#endif
+#define MIN_KERNEL_ALIGN (_AC(1, UL) << MIN_KERNEL_ALIGN_LG2)
+
+#if (CONFIG_PHYSICAL_ALIGN & (CONFIG_PHYSICAL_ALIGN-1)) || \
+ (CONFIG_PHYSICAL_ALIGN < MIN_KERNEL_ALIGN)
+#error "Invalid value for CONFIG_PHYSICAL_ALIGN"
+#endif
+
+#ifdef CONFIG_KERNEL_BZIP2
+#define BOOT_HEAP_SIZE 0x400000
+#else /* !CONFIG_KERNEL_BZIP2 */
+
+#define BOOT_HEAP_SIZE 0x8000
+
+#endif /* !CONFIG_KERNEL_BZIP2 */
+
+#ifdef CONFIG_X86_64
+#define BOOT_STACK_SIZE 0x4000
+#else
+#define BOOT_STACK_SIZE 0x1000
+#endif
+
+#endif /* _ASM_X86_BOOT_H */
diff --git a/kernel/arch/x86/include/asm/bootparam_utils.h b/kernel/arch/x86/include/asm/bootparam_utils.h
new file mode 100644
index 000000000..4a8cb8d7c
--- /dev/null
+++ b/kernel/arch/x86/include/asm/bootparam_utils.h
@@ -0,0 +1,54 @@
+#ifndef _ASM_X86_BOOTPARAM_UTILS_H
+#define _ASM_X86_BOOTPARAM_UTILS_H
+
+#include <asm/bootparam.h>
+
+/*
+ * This file is included from multiple environments. Do not
+ * add completing #includes to make it standalone.
+ */
+
+/*
+ * Deal with bootloaders which fail to initialize unknown fields in
+ * boot_params to zero. The list fields in this list are taken from
+ * analysis of kexec-tools; if other broken bootloaders initialize a
+ * different set of fields we will need to figure out how to disambiguate.
+ *
+ * Note: efi_info is commonly left uninitialized, but that field has a
+ * private magic, so it is better to leave it unchanged.
+ */
+static void sanitize_boot_params(struct boot_params *boot_params)
+{
+ /*
+ * IMPORTANT NOTE TO BOOTLOADER AUTHORS: do not simply clear
+ * this field. The purpose of this field is to guarantee
+ * compliance with the x86 boot spec located in
+ * Documentation/x86/boot.txt . That spec says that the
+ * *whole* structure should be cleared, after which only the
+ * portion defined by struct setup_header (boot_params->hdr)
+ * should be copied in.
+ *
+ * If you're having an issue because the sentinel is set, you
+ * need to change the whole structure to be cleared, not this
+ * (or any other) individual field, or you will soon have
+ * problems again.
+ */
+ if (boot_params->sentinel) {
+ /* fields in boot_params are left uninitialized, clear them */
+ memset(&boot_params->ext_ramdisk_image, 0,
+ (char *)&boot_params->efi_info -
+ (char *)&boot_params->ext_ramdisk_image);
+ memset(&boot_params->kbd_status, 0,
+ (char *)&boot_params->hdr -
+ (char *)&boot_params->kbd_status);
+ memset(&boot_params->_pad7[0], 0,
+ (char *)&boot_params->edd_mbr_sig_buffer[0] -
+ (char *)&boot_params->_pad7[0]);
+ memset(&boot_params->_pad8[0], 0,
+ (char *)&boot_params->eddbuf[0] -
+ (char *)&boot_params->_pad8[0]);
+ memset(&boot_params->_pad9[0], 0, sizeof(boot_params->_pad9));
+ }
+}
+
+#endif /* _ASM_X86_BOOTPARAM_UTILS_H */
diff --git a/kernel/arch/x86/include/asm/bug.h b/kernel/arch/x86/include/asm/bug.h
new file mode 100644
index 000000000..ba38ebbac
--- /dev/null
+++ b/kernel/arch/x86/include/asm/bug.h
@@ -0,0 +1,37 @@
+#ifndef _ASM_X86_BUG_H
+#define _ASM_X86_BUG_H
+
+#define HAVE_ARCH_BUG
+
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+
+#ifdef CONFIG_X86_32
+# define __BUG_C0 "2:\t.long 1b, %c0\n"
+#else
+# define __BUG_C0 "2:\t.long 1b - 2b, %c0 - 2b\n"
+#endif
+
+#define BUG() \
+do { \
+ asm volatile("1:\tud2\n" \
+ ".pushsection __bug_table,\"a\"\n" \
+ __BUG_C0 \
+ "\t.word %c1, 0\n" \
+ "\t.org 2b+%c2\n" \
+ ".popsection" \
+ : : "i" (__FILE__), "i" (__LINE__), \
+ "i" (sizeof(struct bug_entry))); \
+ unreachable(); \
+} while (0)
+
+#else
+#define BUG() \
+do { \
+ asm volatile("ud2"); \
+ unreachable(); \
+} while (0)
+#endif
+
+#include <asm-generic/bug.h>
+
+#endif /* _ASM_X86_BUG_H */
diff --git a/kernel/arch/x86/include/asm/bugs.h b/kernel/arch/x86/include/asm/bugs.h
new file mode 100644
index 000000000..08abf6390
--- /dev/null
+++ b/kernel/arch/x86/include/asm/bugs.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_X86_BUGS_H
+#define _ASM_X86_BUGS_H
+
+extern void check_bugs(void);
+
+#if defined(CONFIG_CPU_SUP_INTEL) && defined(CONFIG_X86_32)
+int ppro_with_ram_bug(void);
+#else
+static inline int ppro_with_ram_bug(void) { return 0; }
+#endif
+
+#endif /* _ASM_X86_BUGS_H */
diff --git a/kernel/arch/x86/include/asm/cache.h b/kernel/arch/x86/include/asm/cache.h
new file mode 100644
index 000000000..48f99f154
--- /dev/null
+++ b/kernel/arch/x86/include/asm/cache.h
@@ -0,0 +1,23 @@
+#ifndef _ASM_X86_CACHE_H
+#define _ASM_X86_CACHE_H
+
+#include <linux/linkage.h>
+
+/* L1 cache line size */
+#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+#define __read_mostly __attribute__((__section__(".data..read_mostly")))
+
+#define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
+#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
+
+#ifdef CONFIG_X86_VSMP
+#ifdef CONFIG_SMP
+#define __cacheline_aligned_in_smp \
+ __attribute__((__aligned__(INTERNODE_CACHE_BYTES))) \
+ __page_aligned_data
+#endif
+#endif
+
+#endif /* _ASM_X86_CACHE_H */
diff --git a/kernel/arch/x86/include/asm/cacheflush.h b/kernel/arch/x86/include/asm/cacheflush.h
new file mode 100644
index 000000000..47c8e32f6
--- /dev/null
+++ b/kernel/arch/x86/include/asm/cacheflush.h
@@ -0,0 +1,107 @@
+#ifndef _ASM_X86_CACHEFLUSH_H
+#define _ASM_X86_CACHEFLUSH_H
+
+/* Caches aren't brain-dead on the intel. */
+#include <asm-generic/cacheflush.h>
+#include <asm/special_insns.h>
+
+/*
+ * The set_memory_* API can be used to change various attributes of a virtual
+ * address range. The attributes include:
+ * Cachability : UnCached, WriteCombining, WriteBack
+ * Executability : eXeutable, NoteXecutable
+ * Read/Write : ReadOnly, ReadWrite
+ * Presence : NotPresent
+ *
+ * Within a category, the attributes are mutually exclusive.
+ *
+ * The implementation of this API will take care of various aspects that
+ * are associated with changing such attributes, such as:
+ * - Flushing TLBs
+ * - Flushing CPU caches
+ * - Making sure aliases of the memory behind the mapping don't violate
+ * coherency rules as defined by the CPU in the system.
+ *
+ * What this API does not do:
+ * - Provide exclusion between various callers - including callers that
+ * operation on other mappings of the same physical page
+ * - Restore default attributes when a page is freed
+ * - Guarantee that mappings other than the requested one are
+ * in any state, other than that these do not violate rules for
+ * the CPU you have. Do not depend on any effects on other mappings,
+ * CPUs other than the one you have may have more relaxed rules.
+ * The caller is required to take care of these.
+ */
+
+int _set_memory_uc(unsigned long addr, int numpages);
+int _set_memory_wc(unsigned long addr, int numpages);
+int _set_memory_wb(unsigned long addr, int numpages);
+int set_memory_uc(unsigned long addr, int numpages);
+int set_memory_wc(unsigned long addr, int numpages);
+int set_memory_wb(unsigned long addr, int numpages);
+int set_memory_x(unsigned long addr, int numpages);
+int set_memory_nx(unsigned long addr, int numpages);
+int set_memory_ro(unsigned long addr, int numpages);
+int set_memory_rw(unsigned long addr, int numpages);
+int set_memory_np(unsigned long addr, int numpages);
+int set_memory_4k(unsigned long addr, int numpages);
+
+int set_memory_array_uc(unsigned long *addr, int addrinarray);
+int set_memory_array_wc(unsigned long *addr, int addrinarray);
+int set_memory_array_wb(unsigned long *addr, int addrinarray);
+
+int set_pages_array_uc(struct page **pages, int addrinarray);
+int set_pages_array_wc(struct page **pages, int addrinarray);
+int set_pages_array_wb(struct page **pages, int addrinarray);
+
+/*
+ * For legacy compatibility with the old APIs, a few functions
+ * are provided that work on a "struct page".
+ * These functions operate ONLY on the 1:1 kernel mapping of the
+ * memory that the struct page represents, and internally just
+ * call the set_memory_* function. See the description of the
+ * set_memory_* function for more details on conventions.
+ *
+ * These APIs should be considered *deprecated* and are likely going to
+ * be removed in the future.
+ * The reason for this is the implicit operation on the 1:1 mapping only,
+ * making this not a generally useful API.
+ *
+ * Specifically, many users of the old APIs had a virtual address,
+ * called virt_to_page() or vmalloc_to_page() on that address to
+ * get a struct page* that the old API required.
+ * To convert these cases, use set_memory_*() on the original
+ * virtual address, do not use these functions.
+ */
+
+int set_pages_uc(struct page *page, int numpages);
+int set_pages_wb(struct page *page, int numpages);
+int set_pages_x(struct page *page, int numpages);
+int set_pages_nx(struct page *page, int numpages);
+int set_pages_ro(struct page *page, int numpages);
+int set_pages_rw(struct page *page, int numpages);
+
+
+void clflush_cache_range(void *addr, unsigned int size);
+
+#ifdef CONFIG_DEBUG_RODATA
+void mark_rodata_ro(void);
+extern const int rodata_test_data;
+extern int kernel_set_to_readonly;
+void set_kernel_text_rw(void);
+void set_kernel_text_ro(void);
+#else
+static inline void set_kernel_text_rw(void) { }
+static inline void set_kernel_text_ro(void) { }
+#endif
+
+#ifdef CONFIG_DEBUG_RODATA_TEST
+int rodata_test(void);
+#else
+static inline int rodata_test(void)
+{
+ return 0;
+}
+#endif
+
+#endif /* _ASM_X86_CACHEFLUSH_H */
diff --git a/kernel/arch/x86/include/asm/calgary.h b/kernel/arch/x86/include/asm/calgary.h
new file mode 100644
index 000000000..0d467b338
--- /dev/null
+++ b/kernel/arch/x86/include/asm/calgary.h
@@ -0,0 +1,70 @@
+/*
+ * Derived from include/asm-powerpc/iommu.h
+ *
+ * Copyright IBM Corporation, 2006-2007
+ *
+ * Author: Jon Mason <jdmason@us.ibm.com>
+ * Author: Muli Ben-Yehuda <muli@il.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_X86_CALGARY_H
+#define _ASM_X86_CALGARY_H
+
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/timer.h>
+#include <asm/types.h>
+
+struct iommu_table {
+ struct cal_chipset_ops *chip_ops; /* chipset specific funcs */
+ unsigned long it_base; /* mapped address of tce table */
+ unsigned long it_hint; /* Hint for next alloc */
+ unsigned long *it_map; /* A simple allocation bitmap for now */
+ void __iomem *bbar; /* Bridge BAR */
+ u64 tar_val; /* Table Address Register */
+ struct timer_list watchdog_timer;
+ spinlock_t it_lock; /* Protects it_map */
+ unsigned int it_size; /* Size of iommu table in entries */
+ unsigned char it_busno; /* Bus number this table belongs to */
+};
+
+struct cal_chipset_ops {
+ void (*handle_quirks)(struct iommu_table *tbl, struct pci_dev *dev);
+ void (*tce_cache_blast)(struct iommu_table *tbl);
+ void (*dump_error_regs)(struct iommu_table *tbl);
+};
+
+#define TCE_TABLE_SIZE_UNSPECIFIED ~0
+#define TCE_TABLE_SIZE_64K 0
+#define TCE_TABLE_SIZE_128K 1
+#define TCE_TABLE_SIZE_256K 2
+#define TCE_TABLE_SIZE_512K 3
+#define TCE_TABLE_SIZE_1M 4
+#define TCE_TABLE_SIZE_2M 5
+#define TCE_TABLE_SIZE_4M 6
+#define TCE_TABLE_SIZE_8M 7
+
+extern int use_calgary;
+
+#ifdef CONFIG_CALGARY_IOMMU
+extern int detect_calgary(void);
+#else
+static inline int detect_calgary(void) { return -ENODEV; }
+#endif
+
+#endif /* _ASM_X86_CALGARY_H */
diff --git a/kernel/arch/x86/include/asm/calling.h b/kernel/arch/x86/include/asm/calling.h
new file mode 100644
index 000000000..1c8b50edb
--- /dev/null
+++ b/kernel/arch/x86/include/asm/calling.h
@@ -0,0 +1,247 @@
+/*
+
+ x86 function call convention, 64-bit:
+ -------------------------------------
+ arguments | callee-saved | extra caller-saved | return
+ [callee-clobbered] | | [callee-clobbered] |
+ ---------------------------------------------------------------------------
+ rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11 | rax, rdx [**]
+
+ ( rsp is obviously invariant across normal function calls. (gcc can 'merge'
+ functions when it sees tail-call optimization possibilities) rflags is
+ clobbered. Leftover arguments are passed over the stack frame.)
+
+ [*] In the frame-pointers case rbp is fixed to the stack frame.
+
+ [**] for struct return values wider than 64 bits the return convention is a
+ bit more complex: up to 128 bits width we return small structures
+ straight in rax, rdx. For structures larger than that (3 words or
+ larger) the caller puts a pointer to an on-stack return struct
+ [allocated in the caller's stack frame] into the first argument - i.e.
+ into rdi. All other arguments shift up by one in this case.
+ Fortunately this case is rare in the kernel.
+
+For 32-bit we have the following conventions - kernel is built with
+-mregparm=3 and -freg-struct-return:
+
+ x86 function calling convention, 32-bit:
+ ----------------------------------------
+ arguments | callee-saved | extra caller-saved | return
+ [callee-clobbered] | | [callee-clobbered] |
+ -------------------------------------------------------------------------
+ eax edx ecx | ebx edi esi ebp [*] | <none> | eax, edx [**]
+
+ ( here too esp is obviously invariant across normal function calls. eflags
+ is clobbered. Leftover arguments are passed over the stack frame. )
+
+ [*] In the frame-pointers case ebp is fixed to the stack frame.
+
+ [**] We build with -freg-struct-return, which on 32-bit means similar
+ semantics as on 64-bit: edx can be used for a second return value
+ (i.e. covering integer and structure sizes up to 64 bits) - after that
+ it gets more complex and more expensive: 3-word or larger struct returns
+ get done in the caller's frame and the pointer to the return struct goes
+ into regparm0, i.e. eax - the other arguments shift up and the
+ function's register parameters degenerate to regparm=2 in essence.
+
+*/
+
+#include <asm/dwarf2.h>
+
+#ifdef CONFIG_X86_64
+
+/*
+ * 64-bit system call stack frame layout defines and helpers,
+ * for assembly code:
+ */
+
+/* The layout forms the "struct pt_regs" on the stack: */
+/*
+ * C ABI says these regs are callee-preserved. They aren't saved on kernel entry
+ * unless syscall needs a complete, fully filled "struct pt_regs".
+ */
+#define R15 0*8
+#define R14 1*8
+#define R13 2*8
+#define R12 3*8
+#define RBP 4*8
+#define RBX 5*8
+/* These regs are callee-clobbered. Always saved on kernel entry. */
+#define R11 6*8
+#define R10 7*8
+#define R9 8*8
+#define R8 9*8
+#define RAX 10*8
+#define RCX 11*8
+#define RDX 12*8
+#define RSI 13*8
+#define RDI 14*8
+/*
+ * On syscall entry, this is syscall#. On CPU exception, this is error code.
+ * On hw interrupt, it's IRQ number:
+ */
+#define ORIG_RAX 15*8
+/* Return frame for iretq */
+#define RIP 16*8
+#define CS 17*8
+#define EFLAGS 18*8
+#define RSP 19*8
+#define SS 20*8
+
+#define SIZEOF_PTREGS 21*8
+
+ .macro ALLOC_PT_GPREGS_ON_STACK addskip=0
+ subq $15*8+\addskip, %rsp
+ CFI_ADJUST_CFA_OFFSET 15*8+\addskip
+ .endm
+
+ .macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1
+ .if \r11
+ movq_cfi r11, 6*8+\offset
+ .endif
+ .if \r8910
+ movq_cfi r10, 7*8+\offset
+ movq_cfi r9, 8*8+\offset
+ movq_cfi r8, 9*8+\offset
+ .endif
+ .if \rax
+ movq_cfi rax, 10*8+\offset
+ .endif
+ .if \rcx
+ movq_cfi rcx, 11*8+\offset
+ .endif
+ movq_cfi rdx, 12*8+\offset
+ movq_cfi rsi, 13*8+\offset
+ movq_cfi rdi, 14*8+\offset
+ .endm
+ .macro SAVE_C_REGS offset=0
+ SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1
+ .endm
+ .macro SAVE_C_REGS_EXCEPT_RAX_RCX offset=0
+ SAVE_C_REGS_HELPER \offset, 0, 0, 1, 1
+ .endm
+ .macro SAVE_C_REGS_EXCEPT_R891011
+ SAVE_C_REGS_HELPER 0, 1, 1, 0, 0
+ .endm
+ .macro SAVE_C_REGS_EXCEPT_RCX_R891011
+ SAVE_C_REGS_HELPER 0, 1, 0, 0, 0
+ .endm
+ .macro SAVE_C_REGS_EXCEPT_RAX_RCX_R11
+ SAVE_C_REGS_HELPER 0, 0, 0, 1, 0
+ .endm
+
+ .macro SAVE_EXTRA_REGS offset=0
+ movq_cfi r15, 0*8+\offset
+ movq_cfi r14, 1*8+\offset
+ movq_cfi r13, 2*8+\offset
+ movq_cfi r12, 3*8+\offset
+ movq_cfi rbp, 4*8+\offset
+ movq_cfi rbx, 5*8+\offset
+ .endm
+ .macro SAVE_EXTRA_REGS_RBP offset=0
+ movq_cfi rbp, 4*8+\offset
+ .endm
+
+ .macro RESTORE_EXTRA_REGS offset=0
+ movq_cfi_restore 0*8+\offset, r15
+ movq_cfi_restore 1*8+\offset, r14
+ movq_cfi_restore 2*8+\offset, r13
+ movq_cfi_restore 3*8+\offset, r12
+ movq_cfi_restore 4*8+\offset, rbp
+ movq_cfi_restore 5*8+\offset, rbx
+ .endm
+
+ .macro ZERO_EXTRA_REGS
+ xorl %r15d, %r15d
+ xorl %r14d, %r14d
+ xorl %r13d, %r13d
+ xorl %r12d, %r12d
+ xorl %ebp, %ebp
+ xorl %ebx, %ebx
+ .endm
+
+ .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1
+ .if \rstor_r11
+ movq_cfi_restore 6*8, r11
+ .endif
+ .if \rstor_r8910
+ movq_cfi_restore 7*8, r10
+ movq_cfi_restore 8*8, r9
+ movq_cfi_restore 9*8, r8
+ .endif
+ .if \rstor_rax
+ movq_cfi_restore 10*8, rax
+ .endif
+ .if \rstor_rcx
+ movq_cfi_restore 11*8, rcx
+ .endif
+ .if \rstor_rdx
+ movq_cfi_restore 12*8, rdx
+ .endif
+ movq_cfi_restore 13*8, rsi
+ movq_cfi_restore 14*8, rdi
+ .endm
+ .macro RESTORE_C_REGS
+ RESTORE_C_REGS_HELPER 1,1,1,1,1
+ .endm
+ .macro RESTORE_C_REGS_EXCEPT_RAX
+ RESTORE_C_REGS_HELPER 0,1,1,1,1
+ .endm
+ .macro RESTORE_C_REGS_EXCEPT_RCX
+ RESTORE_C_REGS_HELPER 1,0,1,1,1
+ .endm
+ .macro RESTORE_C_REGS_EXCEPT_R11
+ RESTORE_C_REGS_HELPER 1,1,0,1,1
+ .endm
+ .macro RESTORE_C_REGS_EXCEPT_RCX_R11
+ RESTORE_C_REGS_HELPER 1,0,0,1,1
+ .endm
+ .macro RESTORE_RSI_RDI
+ RESTORE_C_REGS_HELPER 0,0,0,0,0
+ .endm
+ .macro RESTORE_RSI_RDI_RDX
+ RESTORE_C_REGS_HELPER 0,0,0,0,1
+ .endm
+
+ .macro REMOVE_PT_GPREGS_FROM_STACK addskip=0
+ addq $15*8+\addskip, %rsp
+ CFI_ADJUST_CFA_OFFSET -(15*8+\addskip)
+ .endm
+
+ .macro icebp
+ .byte 0xf1
+ .endm
+
+#else /* CONFIG_X86_64 */
+
+/*
+ * For 32bit only simplified versions of SAVE_ALL/RESTORE_ALL. These
+ * are different from the entry_32.S versions in not changing the segment
+ * registers. So only suitable for in kernel use, not when transitioning
+ * from or to user space. The resulting stack frame is not a standard
+ * pt_regs frame. The main use case is calling C code from assembler
+ * when all the registers need to be preserved.
+ */
+
+ .macro SAVE_ALL
+ pushl_cfi_reg eax
+ pushl_cfi_reg ebp
+ pushl_cfi_reg edi
+ pushl_cfi_reg esi
+ pushl_cfi_reg edx
+ pushl_cfi_reg ecx
+ pushl_cfi_reg ebx
+ .endm
+
+ .macro RESTORE_ALL
+ popl_cfi_reg ebx
+ popl_cfi_reg ecx
+ popl_cfi_reg edx
+ popl_cfi_reg esi
+ popl_cfi_reg edi
+ popl_cfi_reg ebp
+ popl_cfi_reg eax
+ .endm
+
+#endif /* CONFIG_X86_64 */
+
diff --git a/kernel/arch/x86/include/asm/ce4100.h b/kernel/arch/x86/include/asm/ce4100.h
new file mode 100644
index 000000000..e656ad8c0
--- /dev/null
+++ b/kernel/arch/x86/include/asm/ce4100.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_CE4100_H_
+#define _ASM_CE4100_H_
+
+int ce4100_pci_init(void);
+
+#endif
diff --git a/kernel/arch/x86/include/asm/checksum.h b/kernel/arch/x86/include/asm/checksum.h
new file mode 100644
index 000000000..5f5bb0f97
--- /dev/null
+++ b/kernel/arch/x86/include/asm/checksum.h
@@ -0,0 +1,5 @@
+#ifdef CONFIG_X86_32
+# include <asm/checksum_32.h>
+#else
+# include <asm/checksum_64.h>
+#endif
diff --git a/kernel/arch/x86/include/asm/checksum_32.h b/kernel/arch/x86/include/asm/checksum_32.h
new file mode 100644
index 000000000..f50de6951
--- /dev/null
+++ b/kernel/arch/x86/include/asm/checksum_32.h
@@ -0,0 +1,202 @@
+#ifndef _ASM_X86_CHECKSUM_32_H
+#define _ASM_X86_CHECKSUM_32_H
+
+#include <linux/in6.h>
+
+#include <asm/uaccess.h>
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
+
+/*
+ * the same as csum_partial, but copies from src while it
+ * checksums, and handles user-space pointer exceptions correctly, when needed.
+ *
+ * here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+
+asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
+ int len, __wsum sum,
+ int *src_err_ptr, int *dst_err_ptr);
+
+/*
+ * Note: when you get a NULL pointer exception here this means someone
+ * passed in an incorrect kernel address to one of these functions.
+ *
+ * If you use these functions directly please don't forget the
+ * access_ok().
+ */
+static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst,
+ int len, __wsum sum)
+{
+ return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
+}
+
+static inline __wsum csum_partial_copy_from_user(const void __user *src,
+ void *dst,
+ int len, __wsum sum,
+ int *err_ptr)
+{
+ __wsum ret;
+
+ might_sleep();
+ stac();
+ ret = csum_partial_copy_generic((__force void *)src, dst,
+ len, sum, err_ptr, NULL);
+ clac();
+
+ return ret;
+}
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ *
+ * By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
+ * Arnt Gulbrandsen.
+ */
+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
+{
+ unsigned int sum;
+
+ asm volatile("movl (%1), %0 ;\n"
+ "subl $4, %2 ;\n"
+ "jbe 2f ;\n"
+ "addl 4(%1), %0 ;\n"
+ "adcl 8(%1), %0 ;\n"
+ "adcl 12(%1), %0;\n"
+ "1: adcl 16(%1), %0 ;\n"
+ "lea 4(%1), %1 ;\n"
+ "decl %2 ;\n"
+ "jne 1b ;\n"
+ "adcl $0, %0 ;\n"
+ "movl %0, %2 ;\n"
+ "shrl $16, %0 ;\n"
+ "addw %w2, %w0 ;\n"
+ "adcl $0, %0 ;\n"
+ "notl %0 ;\n"
+ "2: ;\n"
+ /* Since the input registers which are loaded with iph and ihl
+ are modified, we must also specify them as outputs, or gcc
+ will assume they contain their original values. */
+ : "=r" (sum), "=r" (iph), "=r" (ihl)
+ : "1" (iph), "2" (ihl)
+ : "memory");
+ return (__force __sum16)sum;
+}
+
+/*
+ * Fold a partial checksum
+ */
+
+static inline __sum16 csum_fold(__wsum sum)
+{
+ asm("addl %1, %0 ;\n"
+ "adcl $0xffff, %0 ;\n"
+ : "=r" (sum)
+ : "r" ((__force u32)sum << 16),
+ "0" ((__force u32)sum & 0xffff0000));
+ return (__force __sum16)(~(__force u32)sum >> 16);
+}
+
+static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
+ unsigned short len,
+ unsigned short proto,
+ __wsum sum)
+{
+ asm("addl %1, %0 ;\n"
+ "adcl %2, %0 ;\n"
+ "adcl %3, %0 ;\n"
+ "adcl $0, %0 ;\n"
+ : "=r" (sum)
+ : "g" (daddr), "g"(saddr),
+ "g" ((len + proto) << 8), "0" (sum));
+ return sum;
+}
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
+ unsigned short len,
+ unsigned short proto,
+ __wsum sum)
+{
+ return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
+}
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+
+static inline __sum16 ip_compute_csum(const void *buff, int len)
+{
+ return csum_fold(csum_partial(buff, len, 0));
+}
+
+#define _HAVE_ARCH_IPV6_CSUM
+static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u32 len, unsigned short proto,
+ __wsum sum)
+{
+ asm("addl 0(%1), %0 ;\n"
+ "adcl 4(%1), %0 ;\n"
+ "adcl 8(%1), %0 ;\n"
+ "adcl 12(%1), %0 ;\n"
+ "adcl 0(%2), %0 ;\n"
+ "adcl 4(%2), %0 ;\n"
+ "adcl 8(%2), %0 ;\n"
+ "adcl 12(%2), %0 ;\n"
+ "adcl %3, %0 ;\n"
+ "adcl %4, %0 ;\n"
+ "adcl $0, %0 ;\n"
+ : "=&r" (sum)
+ : "r" (saddr), "r" (daddr),
+ "r" (htonl(len)), "r" (htonl(proto)), "0" (sum)
+ : "memory");
+
+ return csum_fold(sum);
+}
+
+/*
+ * Copy and checksum to user
+ */
+#define HAVE_CSUM_COPY_USER
+static inline __wsum csum_and_copy_to_user(const void *src,
+ void __user *dst,
+ int len, __wsum sum,
+ int *err_ptr)
+{
+ __wsum ret;
+
+ might_sleep();
+ if (access_ok(VERIFY_WRITE, dst, len)) {
+ stac();
+ ret = csum_partial_copy_generic(src, (__force void *)dst,
+ len, sum, NULL, err_ptr);
+ clac();
+ return ret;
+ }
+
+ if (len)
+ *err_ptr = -EFAULT;
+
+ return (__force __wsum)-1; /* invalid checksum */
+}
+
+#endif /* _ASM_X86_CHECKSUM_32_H */
diff --git a/kernel/arch/x86/include/asm/checksum_64.h b/kernel/arch/x86/include/asm/checksum_64.h
new file mode 100644
index 000000000..cd00e1774
--- /dev/null
+++ b/kernel/arch/x86/include/asm/checksum_64.h
@@ -0,0 +1,198 @@
+#ifndef _ASM_X86_CHECKSUM_64_H
+#define _ASM_X86_CHECKSUM_64_H
+
+/*
+ * Checksums for x86-64
+ * Copyright 2002 by Andi Kleen, SuSE Labs
+ * with some code from asm-x86/checksum.h
+ */
+
+#include <linux/compiler.h>
+#include <asm/uaccess.h>
+#include <asm/byteorder.h>
+
+/**
+ * csum_fold - Fold and invert a 32bit checksum.
+ * sum: 32bit unfolded sum
+ *
+ * Fold a 32bit running checksum to 16bit and invert it. This is usually
+ * the last step before putting a checksum into a packet.
+ * Make sure not to mix with 64bit checksums.
+ */
+static inline __sum16 csum_fold(__wsum sum)
+{
+ asm(" addl %1,%0\n"
+ " adcl $0xffff,%0"
+ : "=r" (sum)
+ : "r" ((__force u32)sum << 16),
+ "0" ((__force u32)sum & 0xffff0000));
+ return (__force __sum16)(~(__force u32)sum >> 16);
+}
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ *
+ * By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
+ * Arnt Gulbrandsen.
+ */
+
+/**
+ * ip_fast_csum - Compute the IPv4 header checksum efficiently.
+ * iph: ipv4 header
+ * ihl: length of header / 4
+ */
+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
+{
+ unsigned int sum;
+
+ asm(" movl (%1), %0\n"
+ " subl $4, %2\n"
+ " jbe 2f\n"
+ " addl 4(%1), %0\n"
+ " adcl 8(%1), %0\n"
+ " adcl 12(%1), %0\n"
+ "1: adcl 16(%1), %0\n"
+ " lea 4(%1), %1\n"
+ " decl %2\n"
+ " jne 1b\n"
+ " adcl $0, %0\n"
+ " movl %0, %2\n"
+ " shrl $16, %0\n"
+ " addw %w2, %w0\n"
+ " adcl $0, %0\n"
+ " notl %0\n"
+ "2:"
+ /* Since the input registers which are loaded with iph and ihl
+ are modified, we must also specify them as outputs, or gcc
+ will assume they contain their original values. */
+ : "=r" (sum), "=r" (iph), "=r" (ihl)
+ : "1" (iph), "2" (ihl)
+ : "memory");
+ return (__force __sum16)sum;
+}
+
+/**
+ * csum_tcpup_nofold - Compute an IPv4 pseudo header checksum.
+ * @saddr: source address
+ * @daddr: destination address
+ * @len: length of packet
+ * @proto: ip protocol of packet
+ * @sum: initial sum to be added in (32bit unfolded)
+ *
+ * Returns the pseudo header checksum the input data. Result is
+ * 32bit unfolded.
+ */
+static inline __wsum
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
+ unsigned short proto, __wsum sum)
+{
+ asm(" addl %1, %0\n"
+ " adcl %2, %0\n"
+ " adcl %3, %0\n"
+ " adcl $0, %0\n"
+ : "=r" (sum)
+ : "g" (daddr), "g" (saddr),
+ "g" ((len + proto)<<8), "0" (sum));
+ return sum;
+}
+
+
+/**
+ * csum_tcpup_magic - Compute an IPv4 pseudo header checksum.
+ * @saddr: source address
+ * @daddr: destination address
+ * @len: length of packet
+ * @proto: ip protocol of packet
+ * @sum: initial sum to be added in (32bit unfolded)
+ *
+ * Returns the 16bit pseudo header checksum the input data already
+ * complemented and ready to be filled in.
+ */
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
+ unsigned short len,
+ unsigned short proto, __wsum sum)
+{
+ return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
+}
+
+/**
+ * csum_partial - Compute an internet checksum.
+ * @buff: buffer to be checksummed
+ * @len: length of buffer.
+ * @sum: initial sum to be added in (32bit unfolded)
+ *
+ * Returns the 32bit unfolded internet checksum of the buffer.
+ * Before filling it in it needs to be csum_fold()'ed.
+ * buff should be aligned to a 64bit boundary if possible.
+ */
+extern __wsum csum_partial(const void *buff, int len, __wsum sum);
+
+#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER 1
+#define HAVE_CSUM_COPY_USER 1
+
+
+/* Do not call this directly. Use the wrappers below */
+extern __visible __wsum csum_partial_copy_generic(const void *src, const void *dst,
+ int len, __wsum sum,
+ int *src_err_ptr, int *dst_err_ptr);
+
+
+extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
+ int len, __wsum isum, int *errp);
+extern __wsum csum_partial_copy_to_user(const void *src, void __user *dst,
+ int len, __wsum isum, int *errp);
+extern __wsum csum_partial_copy_nocheck(const void *src, void *dst,
+ int len, __wsum sum);
+
+/* Old names. To be removed. */
+#define csum_and_copy_to_user csum_partial_copy_to_user
+#define csum_and_copy_from_user csum_partial_copy_from_user
+
+/**
+ * ip_compute_csum - Compute an 16bit IP checksum.
+ * @buff: buffer address.
+ * @len: length of buffer.
+ *
+ * Returns the 16bit folded/inverted checksum of the passed buffer.
+ * Ready to fill in.
+ */
+extern __sum16 ip_compute_csum(const void *buff, int len);
+
+/**
+ * csum_ipv6_magic - Compute checksum of an IPv6 pseudo header.
+ * @saddr: source address
+ * @daddr: destination address
+ * @len: length of packet
+ * @proto: protocol of packet
+ * @sum: initial sum (32bit unfolded) to be added in
+ *
+ * Computes an IPv6 pseudo header checksum. This sum is added the checksum
+ * into UDP/TCP packets and contains some link layer information.
+ * Returns the unfolded 32bit checksum.
+ */
+
+struct in6_addr;
+
+#define _HAVE_ARCH_IPV6_CSUM 1
+extern __sum16
+csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
+ __u32 len, unsigned short proto, __wsum sum);
+
+static inline unsigned add32_with_carry(unsigned a, unsigned b)
+{
+ asm("addl %2,%0\n\t"
+ "adcl $0,%0"
+ : "=r" (a)
+ : "0" (a), "rm" (b));
+ return a;
+}
+
+#define HAVE_ARCH_CSUM_ADD
+static inline __wsum csum_add(__wsum csum, __wsum addend)
+{
+ return (__force __wsum)add32_with_carry((__force unsigned)csum,
+ (__force unsigned)addend);
+}
+
+#endif /* _ASM_X86_CHECKSUM_64_H */
diff --git a/kernel/arch/x86/include/asm/clocksource.h b/kernel/arch/x86/include/asm/clocksource.h
new file mode 100644
index 000000000..eda81dc0f
--- /dev/null
+++ b/kernel/arch/x86/include/asm/clocksource.h
@@ -0,0 +1,15 @@
+/* x86-specific clocksource additions */
+
+#ifndef _ASM_X86_CLOCKSOURCE_H
+#define _ASM_X86_CLOCKSOURCE_H
+
+#define VCLOCK_NONE 0 /* No vDSO clock available. */
+#define VCLOCK_TSC 1 /* vDSO should use vread_tsc. */
+#define VCLOCK_HPET 2 /* vDSO should use vread_hpet. */
+#define VCLOCK_PVCLOCK 3 /* vDSO should use vread_pvclock. */
+
+struct arch_clocksource_data {
+ int vclock_mode;
+};
+
+#endif /* _ASM_X86_CLOCKSOURCE_H */
diff --git a/kernel/arch/x86/include/asm/cmdline.h b/kernel/arch/x86/include/asm/cmdline.h
new file mode 100644
index 000000000..e01f7f7cc
--- /dev/null
+++ b/kernel/arch/x86/include/asm/cmdline.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_X86_CMDLINE_H
+#define _ASM_X86_CMDLINE_H
+
+int cmdline_find_option_bool(const char *cmdline_ptr, const char *option);
+
+#endif /* _ASM_X86_CMDLINE_H */
diff --git a/kernel/arch/x86/include/asm/cmpxchg.h b/kernel/arch/x86/include/asm/cmpxchg.h
new file mode 100644
index 000000000..99c105d78
--- /dev/null
+++ b/kernel/arch/x86/include/asm/cmpxchg.h
@@ -0,0 +1,233 @@
+#ifndef ASM_X86_CMPXCHG_H
+#define ASM_X86_CMPXCHG_H
+
+#include <linux/compiler.h>
+#include <asm/alternative.h> /* Provides LOCK_PREFIX */
+
+#define __HAVE_ARCH_CMPXCHG 1
+
+/*
+ * Non-existant functions to indicate usage errors at link time
+ * (or compile-time if the compiler implements __compiletime_error().
+ */
+extern void __xchg_wrong_size(void)
+ __compiletime_error("Bad argument size for xchg");
+extern void __cmpxchg_wrong_size(void)
+ __compiletime_error("Bad argument size for cmpxchg");
+extern void __xadd_wrong_size(void)
+ __compiletime_error("Bad argument size for xadd");
+extern void __add_wrong_size(void)
+ __compiletime_error("Bad argument size for add");
+
+/*
+ * Constants for operation sizes. On 32-bit, the 64-bit size it set to
+ * -1 because sizeof will never return -1, thereby making those switch
+ * case statements guaranteeed dead code which the compiler will
+ * eliminate, and allowing the "missing symbol in the default case" to
+ * indicate a usage error.
+ */
+#define __X86_CASE_B 1
+#define __X86_CASE_W 2
+#define __X86_CASE_L 4
+#ifdef CONFIG_64BIT
+#define __X86_CASE_Q 8
+#else
+#define __X86_CASE_Q -1 /* sizeof will never return -1 */
+#endif
+
+/*
+ * An exchange-type operation, which takes a value and a pointer, and
+ * returns the old value.
+ */
+#define __xchg_op(ptr, arg, op, lock) \
+ ({ \
+ __typeof__ (*(ptr)) __ret = (arg); \
+ switch (sizeof(*(ptr))) { \
+ case __X86_CASE_B: \
+ asm volatile (lock #op "b %b0, %1\n" \
+ : "+q" (__ret), "+m" (*(ptr)) \
+ : : "memory", "cc"); \
+ break; \
+ case __X86_CASE_W: \
+ asm volatile (lock #op "w %w0, %1\n" \
+ : "+r" (__ret), "+m" (*(ptr)) \
+ : : "memory", "cc"); \
+ break; \
+ case __X86_CASE_L: \
+ asm volatile (lock #op "l %0, %1\n" \
+ : "+r" (__ret), "+m" (*(ptr)) \
+ : : "memory", "cc"); \
+ break; \
+ case __X86_CASE_Q: \
+ asm volatile (lock #op "q %q0, %1\n" \
+ : "+r" (__ret), "+m" (*(ptr)) \
+ : : "memory", "cc"); \
+ break; \
+ default: \
+ __ ## op ## _wrong_size(); \
+ } \
+ __ret; \
+ })
+
+/*
+ * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
+ * Since this is generally used to protect other memory information, we
+ * use "asm volatile" and "memory" clobbers to prevent gcc from moving
+ * information around.
+ */
+#define xchg(ptr, v) __xchg_op((ptr), (v), xchg, "")
+
+/*
+ * Atomic compare and exchange. Compare OLD with MEM, if identical,
+ * store NEW in MEM. Return the initial value in MEM. Success is
+ * indicated by comparing RETURN with OLD.
+ */
+#define __raw_cmpxchg(ptr, old, new, size, lock) \
+({ \
+ __typeof__(*(ptr)) __ret; \
+ __typeof__(*(ptr)) __old = (old); \
+ __typeof__(*(ptr)) __new = (new); \
+ switch (size) { \
+ case __X86_CASE_B: \
+ { \
+ volatile u8 *__ptr = (volatile u8 *)(ptr); \
+ asm volatile(lock "cmpxchgb %2,%1" \
+ : "=a" (__ret), "+m" (*__ptr) \
+ : "q" (__new), "0" (__old) \
+ : "memory"); \
+ break; \
+ } \
+ case __X86_CASE_W: \
+ { \
+ volatile u16 *__ptr = (volatile u16 *)(ptr); \
+ asm volatile(lock "cmpxchgw %2,%1" \
+ : "=a" (__ret), "+m" (*__ptr) \
+ : "r" (__new), "0" (__old) \
+ : "memory"); \
+ break; \
+ } \
+ case __X86_CASE_L: \
+ { \
+ volatile u32 *__ptr = (volatile u32 *)(ptr); \
+ asm volatile(lock "cmpxchgl %2,%1" \
+ : "=a" (__ret), "+m" (*__ptr) \
+ : "r" (__new), "0" (__old) \
+ : "memory"); \
+ break; \
+ } \
+ case __X86_CASE_Q: \
+ { \
+ volatile u64 *__ptr = (volatile u64 *)(ptr); \
+ asm volatile(lock "cmpxchgq %2,%1" \
+ : "=a" (__ret), "+m" (*__ptr) \
+ : "r" (__new), "0" (__old) \
+ : "memory"); \
+ break; \
+ } \
+ default: \
+ __cmpxchg_wrong_size(); \
+ } \
+ __ret; \
+})
+
+#define __cmpxchg(ptr, old, new, size) \
+ __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
+
+#define __sync_cmpxchg(ptr, old, new, size) \
+ __raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
+
+#define __cmpxchg_local(ptr, old, new, size) \
+ __raw_cmpxchg((ptr), (old), (new), (size), "")
+
+#ifdef CONFIG_X86_32
+# include <asm/cmpxchg_32.h>
+#else
+# include <asm/cmpxchg_64.h>
+#endif
+
+#define cmpxchg(ptr, old, new) \
+ __cmpxchg(ptr, old, new, sizeof(*(ptr)))
+
+#define sync_cmpxchg(ptr, old, new) \
+ __sync_cmpxchg(ptr, old, new, sizeof(*(ptr)))
+
+#define cmpxchg_local(ptr, old, new) \
+ __cmpxchg_local(ptr, old, new, sizeof(*(ptr)))
+
+/*
+ * xadd() adds "inc" to "*ptr" and atomically returns the previous
+ * value of "*ptr".
+ *
+ * xadd() is locked when multiple CPUs are online
+ * xadd_sync() is always locked
+ * xadd_local() is never locked
+ */
+#define __xadd(ptr, inc, lock) __xchg_op((ptr), (inc), xadd, lock)
+#define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX)
+#define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
+#define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
+
+#define __add(ptr, inc, lock) \
+ ({ \
+ __typeof__ (*(ptr)) __ret = (inc); \
+ switch (sizeof(*(ptr))) { \
+ case __X86_CASE_B: \
+ asm volatile (lock "addb %b1, %0\n" \
+ : "+m" (*(ptr)) : "qi" (inc) \
+ : "memory", "cc"); \
+ break; \
+ case __X86_CASE_W: \
+ asm volatile (lock "addw %w1, %0\n" \
+ : "+m" (*(ptr)) : "ri" (inc) \
+ : "memory", "cc"); \
+ break; \
+ case __X86_CASE_L: \
+ asm volatile (lock "addl %1, %0\n" \
+ : "+m" (*(ptr)) : "ri" (inc) \
+ : "memory", "cc"); \
+ break; \
+ case __X86_CASE_Q: \
+ asm volatile (lock "addq %1, %0\n" \
+ : "+m" (*(ptr)) : "ri" (inc) \
+ : "memory", "cc"); \
+ break; \
+ default: \
+ __add_wrong_size(); \
+ } \
+ __ret; \
+ })
+
+/*
+ * add_*() adds "inc" to "*ptr"
+ *
+ * __add() takes a lock prefix
+ * add_smp() is locked when multiple CPUs are online
+ * add_sync() is always locked
+ */
+#define add_smp(ptr, inc) __add((ptr), (inc), LOCK_PREFIX)
+#define add_sync(ptr, inc) __add((ptr), (inc), "lock; ")
+
+#define __cmpxchg_double(pfx, p1, p2, o1, o2, n1, n2) \
+({ \
+ bool __ret; \
+ __typeof__(*(p1)) __old1 = (o1), __new1 = (n1); \
+ __typeof__(*(p2)) __old2 = (o2), __new2 = (n2); \
+ BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \
+ BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \
+ VM_BUG_ON((unsigned long)(p1) % (2 * sizeof(long))); \
+ VM_BUG_ON((unsigned long)((p1) + 1) != (unsigned long)(p2)); \
+ asm volatile(pfx "cmpxchg%c4b %2; sete %0" \
+ : "=a" (__ret), "+d" (__old2), \
+ "+m" (*(p1)), "+m" (*(p2)) \
+ : "i" (2 * sizeof(long)), "a" (__old1), \
+ "b" (__new1), "c" (__new2)); \
+ __ret; \
+})
+
+#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
+ __cmpxchg_double(LOCK_PREFIX, p1, p2, o1, o2, n1, n2)
+
+#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
+ __cmpxchg_double(, p1, p2, o1, o2, n1, n2)
+
+#endif /* ASM_X86_CMPXCHG_H */
diff --git a/kernel/arch/x86/include/asm/cmpxchg_32.h b/kernel/arch/x86/include/asm/cmpxchg_32.h
new file mode 100644
index 000000000..f7e142926
--- /dev/null
+++ b/kernel/arch/x86/include/asm/cmpxchg_32.h
@@ -0,0 +1,114 @@
+#ifndef _ASM_X86_CMPXCHG_32_H
+#define _ASM_X86_CMPXCHG_32_H
+
+/*
+ * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
+ * you need to test for the feature in boot_cpu_data.
+ */
+
+/*
+ * CMPXCHG8B only writes to the target if we had the previous
+ * value in registers, otherwise it acts as a read and gives us the
+ * "new previous" value. That is why there is a loop. Preloading
+ * EDX:EAX is a performance optimization: in the common case it means
+ * we need only one locked operation.
+ *
+ * A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very
+ * least an FPU save and/or %cr0.ts manipulation.
+ *
+ * cmpxchg8b must be used with the lock prefix here to allow the
+ * instruction to be executed atomically. We need to have the reader
+ * side to see the coherent 64bit value.
+ */
+static inline void set_64bit(volatile u64 *ptr, u64 value)
+{
+ u32 low = value;
+ u32 high = value >> 32;
+ u64 prev = *ptr;
+
+ asm volatile("\n1:\t"
+ LOCK_PREFIX "cmpxchg8b %0\n\t"
+ "jnz 1b"
+ : "=m" (*ptr), "+A" (prev)
+ : "b" (low), "c" (high)
+ : "memory");
+}
+
+#ifdef CONFIG_X86_CMPXCHG64
+#define cmpxchg64(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
+ (unsigned long long)(n)))
+#define cmpxchg64_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
+ (unsigned long long)(n)))
+#endif
+
+static inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
+{
+ u64 prev;
+ asm volatile(LOCK_PREFIX "cmpxchg8b %1"
+ : "=A" (prev),
+ "+m" (*ptr)
+ : "b" ((u32)new),
+ "c" ((u32)(new >> 32)),
+ "0" (old)
+ : "memory");
+ return prev;
+}
+
+static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
+{
+ u64 prev;
+ asm volatile("cmpxchg8b %1"
+ : "=A" (prev),
+ "+m" (*ptr)
+ : "b" ((u32)new),
+ "c" ((u32)(new >> 32)),
+ "0" (old)
+ : "memory");
+ return prev;
+}
+
+#ifndef CONFIG_X86_CMPXCHG64
+/*
+ * Building a kernel capable running on 80386 and 80486. It may be necessary
+ * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
+ */
+
+#define cmpxchg64(ptr, o, n) \
+({ \
+ __typeof__(*(ptr)) __ret; \
+ __typeof__(*(ptr)) __old = (o); \
+ __typeof__(*(ptr)) __new = (n); \
+ alternative_io(LOCK_PREFIX_HERE \
+ "call cmpxchg8b_emu", \
+ "lock; cmpxchg8b (%%esi)" , \
+ X86_FEATURE_CX8, \
+ "=A" (__ret), \
+ "S" ((ptr)), "0" (__old), \
+ "b" ((unsigned int)__new), \
+ "c" ((unsigned int)(__new>>32)) \
+ : "memory"); \
+ __ret; })
+
+
+#define cmpxchg64_local(ptr, o, n) \
+({ \
+ __typeof__(*(ptr)) __ret; \
+ __typeof__(*(ptr)) __old = (o); \
+ __typeof__(*(ptr)) __new = (n); \
+ alternative_io("call cmpxchg8b_emu", \
+ "cmpxchg8b (%%esi)" , \
+ X86_FEATURE_CX8, \
+ "=A" (__ret), \
+ "S" ((ptr)), "0" (__old), \
+ "b" ((unsigned int)__new), \
+ "c" ((unsigned int)(__new>>32)) \
+ : "memory"); \
+ __ret; })
+
+#endif
+
+#define system_has_cmpxchg_double() cpu_has_cx8
+
+#endif /* _ASM_X86_CMPXCHG_32_H */
diff --git a/kernel/arch/x86/include/asm/cmpxchg_64.h b/kernel/arch/x86/include/asm/cmpxchg_64.h
new file mode 100644
index 000000000..1af94697a
--- /dev/null
+++ b/kernel/arch/x86/include/asm/cmpxchg_64.h
@@ -0,0 +1,23 @@
+#ifndef _ASM_X86_CMPXCHG_64_H
+#define _ASM_X86_CMPXCHG_64_H
+
+static inline void set_64bit(volatile u64 *ptr, u64 val)
+{
+ *ptr = val;
+}
+
+#define cmpxchg64(ptr, o, n) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ cmpxchg((ptr), (o), (n)); \
+})
+
+#define cmpxchg64_local(ptr, o, n) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ cmpxchg_local((ptr), (o), (n)); \
+})
+
+#define system_has_cmpxchg_double() cpu_has_cx16
+
+#endif /* _ASM_X86_CMPXCHG_64_H */
diff --git a/kernel/arch/x86/include/asm/compat.h b/kernel/arch/x86/include/asm/compat.h
new file mode 100644
index 000000000..acdee0922
--- /dev/null
+++ b/kernel/arch/x86/include/asm/compat.h
@@ -0,0 +1,324 @@
+#ifndef _ASM_X86_COMPAT_H
+#define _ASM_X86_COMPAT_H
+
+/*
+ * Architecture specific compatibility types
+ */
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <asm/processor.h>
+#include <asm/user32.h>
+#include <asm/unistd.h>
+
+#define COMPAT_USER_HZ 100
+#define COMPAT_UTS_MACHINE "i686\0\0"
+
+typedef u32 compat_size_t;
+typedef s32 compat_ssize_t;
+typedef s32 compat_time_t;
+typedef s32 compat_clock_t;
+typedef s32 compat_pid_t;
+typedef u16 __compat_uid_t;
+typedef u16 __compat_gid_t;
+typedef u32 __compat_uid32_t;
+typedef u32 __compat_gid32_t;
+typedef u16 compat_mode_t;
+typedef u32 compat_ino_t;
+typedef u16 compat_dev_t;
+typedef s32 compat_off_t;
+typedef s64 compat_loff_t;
+typedef u16 compat_nlink_t;
+typedef u16 compat_ipc_pid_t;
+typedef s32 compat_daddr_t;
+typedef u32 compat_caddr_t;
+typedef __kernel_fsid_t compat_fsid_t;
+typedef s32 compat_timer_t;
+typedef s32 compat_key_t;
+
+typedef s32 compat_int_t;
+typedef s32 compat_long_t;
+typedef s64 __attribute__((aligned(4))) compat_s64;
+typedef u32 compat_uint_t;
+typedef u32 compat_ulong_t;
+typedef u64 __attribute__((aligned(4))) compat_u64;
+typedef u32 compat_uptr_t;
+
+struct compat_timespec {
+ compat_time_t tv_sec;
+ s32 tv_nsec;
+};
+
+struct compat_timeval {
+ compat_time_t tv_sec;
+ s32 tv_usec;
+};
+
+struct compat_stat {
+ compat_dev_t st_dev;
+ u16 __pad1;
+ compat_ino_t st_ino;
+ compat_mode_t st_mode;
+ compat_nlink_t st_nlink;
+ __compat_uid_t st_uid;
+ __compat_gid_t st_gid;
+ compat_dev_t st_rdev;
+ u16 __pad2;
+ u32 st_size;
+ u32 st_blksize;
+ u32 st_blocks;
+ u32 st_atime;
+ u32 st_atime_nsec;
+ u32 st_mtime;
+ u32 st_mtime_nsec;
+ u32 st_ctime;
+ u32 st_ctime_nsec;
+ u32 __unused4;
+ u32 __unused5;
+};
+
+struct compat_flock {
+ short l_type;
+ short l_whence;
+ compat_off_t l_start;
+ compat_off_t l_len;
+ compat_pid_t l_pid;
+};
+
+#define F_GETLK64 12 /* using 'struct flock64' */
+#define F_SETLK64 13
+#define F_SETLKW64 14
+
+/*
+ * IA32 uses 4 byte alignment for 64 bit quantities,
+ * so we need to pack this structure.
+ */
+struct compat_flock64 {
+ short l_type;
+ short l_whence;
+ compat_loff_t l_start;
+ compat_loff_t l_len;
+ compat_pid_t l_pid;
+} __attribute__((packed));
+
+struct compat_statfs {
+ int f_type;
+ int f_bsize;
+ int f_blocks;
+ int f_bfree;
+ int f_bavail;
+ int f_files;
+ int f_ffree;
+ compat_fsid_t f_fsid;
+ int f_namelen; /* SunOS ignores this field. */
+ int f_frsize;
+ int f_flags;
+ int f_spare[4];
+};
+
+#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff
+#define COMPAT_RLIM_INFINITY 0xffffffff
+
+typedef u32 compat_old_sigset_t; /* at least 32 bits */
+
+#define _COMPAT_NSIG 64
+#define _COMPAT_NSIG_BPW 32
+
+typedef u32 compat_sigset_word;
+
+typedef union compat_sigval {
+ compat_int_t sival_int;
+ compat_uptr_t sival_ptr;
+} compat_sigval_t;
+
+typedef struct compat_siginfo {
+ int si_signo;
+ int si_errno;
+ int si_code;
+
+ union {
+ int _pad[128/sizeof(int) - 3];
+
+ /* kill() */
+ struct {
+ unsigned int _pid; /* sender's pid */
+ unsigned int _uid; /* sender's uid */
+ } _kill;
+
+ /* POSIX.1b timers */
+ struct {
+ compat_timer_t _tid; /* timer id */
+ int _overrun; /* overrun count */
+ compat_sigval_t _sigval; /* same as below */
+ int _sys_private; /* not to be passed to user */
+ int _overrun_incr; /* amount to add to overrun */
+ } _timer;
+
+ /* POSIX.1b signals */
+ struct {
+ unsigned int _pid; /* sender's pid */
+ unsigned int _uid; /* sender's uid */
+ compat_sigval_t _sigval;
+ } _rt;
+
+ /* SIGCHLD */
+ struct {
+ unsigned int _pid; /* which child */
+ unsigned int _uid; /* sender's uid */
+ int _status; /* exit code */
+ compat_clock_t _utime;
+ compat_clock_t _stime;
+ } _sigchld;
+
+ /* SIGCHLD (x32 version) */
+ struct {
+ unsigned int _pid; /* which child */
+ unsigned int _uid; /* sender's uid */
+ int _status; /* exit code */
+ compat_s64 _utime;
+ compat_s64 _stime;
+ } _sigchld_x32;
+
+ /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
+ struct {
+ unsigned int _addr; /* faulting insn/memory ref. */
+ } _sigfault;
+
+ /* SIGPOLL */
+ struct {
+ int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
+ int _fd;
+ } _sigpoll;
+
+ struct {
+ unsigned int _call_addr; /* calling insn */
+ int _syscall; /* triggering system call number */
+ unsigned int _arch; /* AUDIT_ARCH_* of syscall */
+ } _sigsys;
+ } _sifields;
+} compat_siginfo_t;
+
+#define COMPAT_OFF_T_MAX 0x7fffffff
+#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
+
+struct compat_ipc64_perm {
+ compat_key_t key;
+ __compat_uid32_t uid;
+ __compat_gid32_t gid;
+ __compat_uid32_t cuid;
+ __compat_gid32_t cgid;
+ unsigned short mode;
+ unsigned short __pad1;
+ unsigned short seq;
+ unsigned short __pad2;
+ compat_ulong_t unused1;
+ compat_ulong_t unused2;
+};
+
+struct compat_semid64_ds {
+ struct compat_ipc64_perm sem_perm;
+ compat_time_t sem_otime;
+ compat_ulong_t __unused1;
+ compat_time_t sem_ctime;
+ compat_ulong_t __unused2;
+ compat_ulong_t sem_nsems;
+ compat_ulong_t __unused3;
+ compat_ulong_t __unused4;
+};
+
+struct compat_msqid64_ds {
+ struct compat_ipc64_perm msg_perm;
+ compat_time_t msg_stime;
+ compat_ulong_t __unused1;
+ compat_time_t msg_rtime;
+ compat_ulong_t __unused2;
+ compat_time_t msg_ctime;
+ compat_ulong_t __unused3;
+ compat_ulong_t msg_cbytes;
+ compat_ulong_t msg_qnum;
+ compat_ulong_t msg_qbytes;
+ compat_pid_t msg_lspid;
+ compat_pid_t msg_lrpid;
+ compat_ulong_t __unused4;
+ compat_ulong_t __unused5;
+};
+
+struct compat_shmid64_ds {
+ struct compat_ipc64_perm shm_perm;
+ compat_size_t shm_segsz;
+ compat_time_t shm_atime;
+ compat_ulong_t __unused1;
+ compat_time_t shm_dtime;
+ compat_ulong_t __unused2;
+ compat_time_t shm_ctime;
+ compat_ulong_t __unused3;
+ compat_pid_t shm_cpid;
+ compat_pid_t shm_lpid;
+ compat_ulong_t shm_nattch;
+ compat_ulong_t __unused4;
+ compat_ulong_t __unused5;
+};
+
+/*
+ * The type of struct elf_prstatus.pr_reg in compatible core dumps.
+ */
+#ifdef CONFIG_X86_X32_ABI
+typedef struct user_regs_struct compat_elf_gregset_t;
+
+#define PR_REG_SIZE(S) (test_thread_flag(TIF_IA32) ? 68 : 216)
+#define PRSTATUS_SIZE(S) (test_thread_flag(TIF_IA32) ? 144 : 296)
+#define SET_PR_FPVALID(S,V) \
+ do { *(int *) (((void *) &((S)->pr_reg)) + PR_REG_SIZE(0)) = (V); } \
+ while (0)
+
+#define COMPAT_USE_64BIT_TIME \
+ (!!(task_pt_regs(current)->orig_ax & __X32_SYSCALL_BIT))
+#else
+typedef struct user_regs_struct32 compat_elf_gregset_t;
+#endif
+
+/*
+ * A pointer passed in from user mode. This should not
+ * be used for syscall parameters, just declare them
+ * as pointers because the syscall entry code will have
+ * appropriately converted them already.
+ */
+
+static inline void __user *compat_ptr(compat_uptr_t uptr)
+{
+ return (void __user *)(unsigned long)uptr;
+}
+
+static inline compat_uptr_t ptr_to_compat(void __user *uptr)
+{
+ return (u32)(unsigned long)uptr;
+}
+
+static inline void __user *arch_compat_alloc_user_space(long len)
+{
+ compat_uptr_t sp;
+
+ if (test_thread_flag(TIF_IA32)) {
+ sp = task_pt_regs(current)->sp;
+ } else {
+ /* -128 for the x32 ABI redzone */
+ sp = task_pt_regs(current)->sp - 128;
+ }
+
+ return (void __user *)round_down(sp - len, 16);
+}
+
+static inline bool is_x32_task(void)
+{
+#ifdef CONFIG_X86_X32_ABI
+ if (task_pt_regs(current)->orig_ax & __X32_SYSCALL_BIT)
+ return true;
+#endif
+ return false;
+}
+
+static inline bool is_compat_task(void)
+{
+ return is_ia32_task() || is_x32_task();
+}
+
+#endif /* _ASM_X86_COMPAT_H */
diff --git a/kernel/arch/x86/include/asm/context_tracking.h b/kernel/arch/x86/include/asm/context_tracking.h
new file mode 100644
index 000000000..1fe49704b
--- /dev/null
+++ b/kernel/arch/x86/include/asm/context_tracking.h
@@ -0,0 +1,10 @@
+#ifndef _ASM_X86_CONTEXT_TRACKING_H
+#define _ASM_X86_CONTEXT_TRACKING_H
+
+#ifdef CONFIG_CONTEXT_TRACKING
+# define SCHEDULE_USER call schedule_user
+#else
+# define SCHEDULE_USER call schedule
+#endif
+
+#endif
diff --git a/kernel/arch/x86/include/asm/cpu.h b/kernel/arch/x86/include/asm/cpu.h
new file mode 100644
index 000000000..bf2caa1de
--- /dev/null
+++ b/kernel/arch/x86/include/asm/cpu.h
@@ -0,0 +1,39 @@
+#ifndef _ASM_X86_CPU_H
+#define _ASM_X86_CPU_H
+
+#include <linux/device.h>
+#include <linux/cpu.h>
+#include <linux/topology.h>
+#include <linux/nodemask.h>
+#include <linux/percpu.h>
+
+#ifdef CONFIG_SMP
+
+extern void prefill_possible_map(void);
+
+#else /* CONFIG_SMP */
+
+static inline void prefill_possible_map(void) {}
+
+#define cpu_physical_id(cpu) boot_cpu_physical_apicid
+#define safe_smp_processor_id() 0
+#define stack_smp_processor_id() 0
+
+#endif /* CONFIG_SMP */
+
+struct x86_cpu {
+ struct cpu cpu;
+};
+
+#ifdef CONFIG_HOTPLUG_CPU
+extern int arch_register_cpu(int num);
+extern void arch_unregister_cpu(int);
+extern void start_cpu0(void);
+#ifdef CONFIG_DEBUG_HOTPLUG_CPU0
+extern int _debug_hotplug_cpu(int cpu, int action);
+#endif
+#endif
+
+int mwait_usable(const struct cpuinfo_x86 *);
+
+#endif /* _ASM_X86_CPU_H */
diff --git a/kernel/arch/x86/include/asm/cpu_device_id.h b/kernel/arch/x86/include/asm/cpu_device_id.h
new file mode 100644
index 000000000..ff501e511
--- /dev/null
+++ b/kernel/arch/x86/include/asm/cpu_device_id.h
@@ -0,0 +1,13 @@
+#ifndef _CPU_DEVICE_ID
+#define _CPU_DEVICE_ID 1
+
+/*
+ * Declare drivers belonging to specific x86 CPUs
+ * Similar in spirit to pci_device_id and related PCI functions
+ */
+
+#include <linux/mod_devicetable.h>
+
+extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match);
+
+#endif
diff --git a/kernel/arch/x86/include/asm/cpufeature.h b/kernel/arch/x86/include/asm/cpufeature.h
new file mode 100644
index 000000000..3d6606fb9
--- /dev/null
+++ b/kernel/arch/x86/include/asm/cpufeature.h
@@ -0,0 +1,603 @@
+/*
+ * Defines x86 CPU feature bits
+ */
+#ifndef _ASM_X86_CPUFEATURE_H
+#define _ASM_X86_CPUFEATURE_H
+
+#ifndef _ASM_X86_REQUIRED_FEATURES_H
+#include <asm/required-features.h>
+#endif
+
+#ifndef _ASM_X86_DISABLED_FEATURES_H
+#include <asm/disabled-features.h>
+#endif
+
+#define NCAPINTS 13 /* N 32-bit words worth of info */
+#define NBUGINTS 1 /* N 32-bit bug flags */
+
+/*
+ * Note: If the comment begins with a quoted string, that string is used
+ * in /proc/cpuinfo instead of the macro name. If the string is "",
+ * this feature bit is not displayed in /proc/cpuinfo at all.
+ */
+
+/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
+#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */
+#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */
+#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */
+#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */
+#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */
+#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */
+#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */
+#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */
+#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */
+#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */
+#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */
+#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */
+#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */
+#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */
+#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions */
+ /* (plus FCMOVcc, FCOMI with FPU) */
+#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */
+#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */
+#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */
+#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */
+#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */
+#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */
+#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */
+#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
+#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */
+#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */
+#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */
+#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */
+#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */
+#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */
+#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */
+
+/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
+/* Don't duplicate feature flags which are redundant with Intel! */
+#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */
+#define X86_FEATURE_MP ( 1*32+19) /* MP Capable. */
+#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */
+#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */
+#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */
+#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */
+#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */
+#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64) */
+#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow! extensions */
+#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow! */
+
+/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
+#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */
+#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */
+#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */
+
+/* Other features, Linux-defined mapping, word 3 */
+/* This range is used for feature bits which conflict or are synthesized */
+#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */
+#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
+#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
+#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
+/* cpu types for specific tunings: */
+#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */
+#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */
+#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */
+#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */
+#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
+#define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */
+/* free, was #define X86_FEATURE_FXSAVE_LEAK ( 3*32+10) * "" FXSAVE leaks FOP/FIP/FOP */
+#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */
+#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */
+#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */
+#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in ia32 userspace */
+#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */
+#define X86_FEATURE_REP_GOOD ( 3*32+16) /* rep microcode works well */
+#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */
+#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */
+/* free, was #define X86_FEATURE_11AP ( 3*32+19) * "" Bad local APIC aka 11AP */
+#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
+#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
+#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */
+#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
+#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */
+/* free, was #define X86_FEATURE_CLFLUSH_MONITOR ( 3*32+25) * "" clflush reqd with monitor */
+#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */
+#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */
+#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
+#define X86_FEATURE_EAGER_FPU ( 3*32+29) /* "eagerfpu" Non lazy FPU restore */
+#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
+
+/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
+#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */
+#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */
+#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */
+#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" Monitor/Mwait support */
+#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */
+#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */
+#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer mode */
+#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */
+#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */
+#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */
+#define X86_FEATURE_CID ( 4*32+10) /* Context ID */
+#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */
+#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */
+#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */
+#define X86_FEATURE_PDCM ( 4*32+15) /* Performance Capabilities */
+#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */
+#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */
+#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */
+#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */
+#define X86_FEATURE_X2APIC ( 4*32+21) /* x2APIC */
+#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */
+#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */
+#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */
+#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */
+#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
+#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE enabled in the OS */
+#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */
+#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit fp conversions */
+#define X86_FEATURE_RDRAND ( 4*32+30) /* The RDRAND instruction */
+#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */
+
+/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
+#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */
+#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */
+#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
+#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
+#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */
+#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */
+#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */
+#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */
+#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */
+#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */
+
+/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
+#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */
+#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */
+#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure virtual machine */
+#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */
+#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */
+#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */
+#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */
+#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */
+#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */
+#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */
+#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */
+#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */
+#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */
+#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */
+#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */
+#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */
+#define X86_FEATURE_TCE ( 6*32+17) /* translation cache extension */
+#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */
+#define X86_FEATURE_TBM ( 6*32+21) /* trailing bit manipulations */
+#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */
+#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */
+#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */
+#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */
+#define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */
+
+/*
+ * Auxiliary flags: Linux defined - For features scattered in various
+ * CPUID levels like 0x6, 0xA etc, word 7
+ */
+#define X86_FEATURE_IDA ( 7*32+ 0) /* Intel Dynamic Acceleration */
+#define X86_FEATURE_ARAT ( 7*32+ 1) /* Always Running APIC Timer */
+#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */
+#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
+#define X86_FEATURE_PLN ( 7*32+ 5) /* Intel Power Limit Notification */
+#define X86_FEATURE_PTS ( 7*32+ 6) /* Intel Package Thermal Status */
+#define X86_FEATURE_DTHERM ( 7*32+ 7) /* Digital Thermal Sensor */
+#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
+#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
+#define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */
+#define X86_FEATURE_HWP_NOITFY ( 7*32+ 11) /* Intel HWP_NOTIFY */
+#define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */
+#define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */
+#define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */
+#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
+
+/* Virtualization flags: Linux defined, word 8 */
+#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
+#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */
+#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
+#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */
+#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */
+#define X86_FEATURE_NPT ( 8*32+ 5) /* AMD Nested Page Table support */
+#define X86_FEATURE_LBRV ( 8*32+ 6) /* AMD LBR Virtualization support */
+#define X86_FEATURE_SVML ( 8*32+ 7) /* "svm_lock" AMD SVM locking MSR */
+#define X86_FEATURE_NRIPS ( 8*32+ 8) /* "nrip_save" AMD SVM next_rip save */
+#define X86_FEATURE_TSCRATEMSR ( 8*32+ 9) /* "tsc_scale" AMD TSC scaling support */
+#define X86_FEATURE_VMCBCLEAN ( 8*32+10) /* "vmcb_clean" AMD VMCB clean bits support */
+#define X86_FEATURE_FLUSHBYASID ( 8*32+11) /* AMD flush-by-ASID support */
+#define X86_FEATURE_DECODEASSISTS ( 8*32+12) /* AMD Decode Assists support */
+#define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */
+#define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
+#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
+
+
+/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
+#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
+#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */
+#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
+#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
+#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
+#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
+#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
+#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
+#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
+#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
+#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
+#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
+#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
+#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */
+#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */
+#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
+#define X86_FEATURE_PCOMMIT ( 9*32+22) /* PCOMMIT instruction */
+#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
+#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
+#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
+#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
+#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
+
+/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */
+#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */
+#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC */
+#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */
+#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */
+
+/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */
+#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */
+
+/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */
+#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */
+
+/*
+ * BUG word(s)
+ */
+#define X86_BUG(x) (NCAPINTS*32 + (x))
+
+#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */
+#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */
+#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */
+#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
+#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
+#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */
+#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
+#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
+#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
+
+#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
+
+#include <asm/asm.h>
+#include <linux/bitops.h>
+
+#ifdef CONFIG_X86_FEATURE_NAMES
+extern const char * const x86_cap_flags[NCAPINTS*32];
+extern const char * const x86_power_flags[32];
+#define X86_CAP_FMT "%s"
+#define x86_cap_flag(flag) x86_cap_flags[flag]
+#else
+#define X86_CAP_FMT "%d:%d"
+#define x86_cap_flag(flag) ((flag) >> 5), ((flag) & 31)
+#endif
+
+/*
+ * In order to save room, we index into this array by doing
+ * X86_BUG_<name> - NCAPINTS*32.
+ */
+extern const char * const x86_bug_flags[NBUGINTS*32];
+
+#define test_cpu_cap(c, bit) \
+ test_bit(bit, (unsigned long *)((c)->x86_capability))
+
+#define REQUIRED_MASK_BIT_SET(bit) \
+ ( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \
+ (((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1)) || \
+ (((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2)) || \
+ (((bit)>>5)==3 && (1UL<<((bit)&31) & REQUIRED_MASK3)) || \
+ (((bit)>>5)==4 && (1UL<<((bit)&31) & REQUIRED_MASK4)) || \
+ (((bit)>>5)==5 && (1UL<<((bit)&31) & REQUIRED_MASK5)) || \
+ (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \
+ (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) || \
+ (((bit)>>5)==8 && (1UL<<((bit)&31) & REQUIRED_MASK8)) || \
+ (((bit)>>5)==9 && (1UL<<((bit)&31) & REQUIRED_MASK9)) )
+
+#define DISABLED_MASK_BIT_SET(bit) \
+ ( (((bit)>>5)==0 && (1UL<<((bit)&31) & DISABLED_MASK0)) || \
+ (((bit)>>5)==1 && (1UL<<((bit)&31) & DISABLED_MASK1)) || \
+ (((bit)>>5)==2 && (1UL<<((bit)&31) & DISABLED_MASK2)) || \
+ (((bit)>>5)==3 && (1UL<<((bit)&31) & DISABLED_MASK3)) || \
+ (((bit)>>5)==4 && (1UL<<((bit)&31) & DISABLED_MASK4)) || \
+ (((bit)>>5)==5 && (1UL<<((bit)&31) & DISABLED_MASK5)) || \
+ (((bit)>>5)==6 && (1UL<<((bit)&31) & DISABLED_MASK6)) || \
+ (((bit)>>5)==7 && (1UL<<((bit)&31) & DISABLED_MASK7)) || \
+ (((bit)>>5)==8 && (1UL<<((bit)&31) & DISABLED_MASK8)) || \
+ (((bit)>>5)==9 && (1UL<<((bit)&31) & DISABLED_MASK9)) )
+
+#define cpu_has(c, bit) \
+ (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
+ test_cpu_cap(c, bit))
+
+#define this_cpu_has(bit) \
+ (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
+ x86_this_cpu_test_bit(bit, (unsigned long *)&cpu_info.x86_capability))
+
+/*
+ * This macro is for detection of features which need kernel
+ * infrastructure to be used. It may *not* directly test the CPU
+ * itself. Use the cpu_has() family if you want true runtime
+ * testing of CPU features, like in hypervisor code where you are
+ * supporting a possible guest feature where host support for it
+ * is not relevant.
+ */
+#define cpu_feature_enabled(bit) \
+ (__builtin_constant_p(bit) && DISABLED_MASK_BIT_SET(bit) ? 0 : \
+ cpu_has(&boot_cpu_data, bit))
+
+#define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit)
+
+#define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability))
+#define clear_cpu_cap(c, bit) clear_bit(bit, (unsigned long *)((c)->x86_capability))
+#define setup_clear_cpu_cap(bit) do { \
+ clear_cpu_cap(&boot_cpu_data, bit); \
+ set_bit(bit, (unsigned long *)cpu_caps_cleared); \
+} while (0)
+#define setup_force_cpu_cap(bit) do { \
+ set_cpu_cap(&boot_cpu_data, bit); \
+ set_bit(bit, (unsigned long *)cpu_caps_set); \
+} while (0)
+
+#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU)
+#define cpu_has_de boot_cpu_has(X86_FEATURE_DE)
+#define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE)
+#define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC)
+#define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE)
+#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC)
+#define cpu_has_sep boot_cpu_has(X86_FEATURE_SEP)
+#define cpu_has_mtrr boot_cpu_has(X86_FEATURE_MTRR)
+#define cpu_has_mmx boot_cpu_has(X86_FEATURE_MMX)
+#define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR)
+#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM)
+#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2)
+#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3)
+#define cpu_has_ssse3 boot_cpu_has(X86_FEATURE_SSSE3)
+#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES)
+#define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX)
+#define cpu_has_avx2 boot_cpu_has(X86_FEATURE_AVX2)
+#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT)
+#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX)
+#define cpu_has_xstore boot_cpu_has(X86_FEATURE_XSTORE)
+#define cpu_has_xstore_enabled boot_cpu_has(X86_FEATURE_XSTORE_EN)
+#define cpu_has_xcrypt boot_cpu_has(X86_FEATURE_XCRYPT)
+#define cpu_has_xcrypt_enabled boot_cpu_has(X86_FEATURE_XCRYPT_EN)
+#define cpu_has_ace2 boot_cpu_has(X86_FEATURE_ACE2)
+#define cpu_has_ace2_enabled boot_cpu_has(X86_FEATURE_ACE2_EN)
+#define cpu_has_phe boot_cpu_has(X86_FEATURE_PHE)
+#define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN)
+#define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM)
+#define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN)
+#define cpu_has_ds boot_cpu_has(X86_FEATURE_DS)
+#define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS)
+#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLUSH)
+#define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS)
+#define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES)
+#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
+#define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT)
+#define cpu_has_xmm4_1 boot_cpu_has(X86_FEATURE_XMM4_1)
+#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2)
+#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC)
+#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
+#define cpu_has_xsaveopt boot_cpu_has(X86_FEATURE_XSAVEOPT)
+#define cpu_has_xsaves boot_cpu_has(X86_FEATURE_XSAVES)
+#define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE)
+#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
+#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ)
+#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
+#define cpu_has_perfctr_nb boot_cpu_has(X86_FEATURE_PERFCTR_NB)
+#define cpu_has_perfctr_l2 boot_cpu_has(X86_FEATURE_PERFCTR_L2)
+#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8)
+#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
+#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
+#define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
+#define cpu_has_bpext boot_cpu_has(X86_FEATURE_BPEXT)
+
+#if __GNUC__ >= 4
+extern void warn_pre_alternatives(void);
+extern bool __static_cpu_has_safe(u16 bit);
+
+/*
+ * Static testing of CPU features. Used the same as boot_cpu_has().
+ * These are only valid after alternatives have run, but will statically
+ * patch the target code for additional performance.
+ */
+static __always_inline __pure bool __static_cpu_has(u16 bit)
+{
+#ifdef CC_HAVE_ASM_GOTO
+
+#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
+
+ /*
+ * Catch too early usage of this before alternatives
+ * have run.
+ */
+ asm_volatile_goto("1: jmp %l[t_warn]\n"
+ "2:\n"
+ ".section .altinstructions,\"a\"\n"
+ " .long 1b - .\n"
+ " .long 0\n" /* no replacement */
+ " .word %P0\n" /* 1: do replace */
+ " .byte 2b - 1b\n" /* source len */
+ " .byte 0\n" /* replacement len */
+ " .byte 0\n" /* pad len */
+ ".previous\n"
+ /* skipping size check since replacement size = 0 */
+ : : "i" (X86_FEATURE_ALWAYS) : : t_warn);
+
+#endif
+
+ asm_volatile_goto("1: jmp %l[t_no]\n"
+ "2:\n"
+ ".section .altinstructions,\"a\"\n"
+ " .long 1b - .\n"
+ " .long 0\n" /* no replacement */
+ " .word %P0\n" /* feature bit */
+ " .byte 2b - 1b\n" /* source len */
+ " .byte 0\n" /* replacement len */
+ " .byte 0\n" /* pad len */
+ ".previous\n"
+ /* skipping size check since replacement size = 0 */
+ : : "i" (bit) : : t_no);
+ return true;
+ t_no:
+ return false;
+
+#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
+ t_warn:
+ warn_pre_alternatives();
+ return false;
+#endif
+
+#else /* CC_HAVE_ASM_GOTO */
+
+ u8 flag;
+ /* Open-coded due to __stringify() in ALTERNATIVE() */
+ asm volatile("1: movb $0,%0\n"
+ "2:\n"
+ ".section .altinstructions,\"a\"\n"
+ " .long 1b - .\n"
+ " .long 3f - .\n"
+ " .word %P1\n" /* feature bit */
+ " .byte 2b - 1b\n" /* source len */
+ " .byte 4f - 3f\n" /* replacement len */
+ " .byte 0\n" /* pad len */
+ ".previous\n"
+ ".section .discard,\"aw\",@progbits\n"
+ " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
+ ".previous\n"
+ ".section .altinstr_replacement,\"ax\"\n"
+ "3: movb $1,%0\n"
+ "4:\n"
+ ".previous\n"
+ : "=qm" (flag) : "i" (bit));
+ return flag;
+
+#endif /* CC_HAVE_ASM_GOTO */
+}
+
+#define static_cpu_has(bit) \
+( \
+ __builtin_constant_p(boot_cpu_has(bit)) ? \
+ boot_cpu_has(bit) : \
+ __builtin_constant_p(bit) ? \
+ __static_cpu_has(bit) : \
+ boot_cpu_has(bit) \
+)
+
+static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
+{
+#ifdef CC_HAVE_ASM_GOTO
+ asm_volatile_goto("1: jmp %l[t_dynamic]\n"
+ "2:\n"
+ ".skip -(((5f-4f) - (2b-1b)) > 0) * "
+ "((5f-4f) - (2b-1b)),0x90\n"
+ "3:\n"
+ ".section .altinstructions,\"a\"\n"
+ " .long 1b - .\n" /* src offset */
+ " .long 4f - .\n" /* repl offset */
+ " .word %P1\n" /* always replace */
+ " .byte 3b - 1b\n" /* src len */
+ " .byte 5f - 4f\n" /* repl len */
+ " .byte 3b - 2b\n" /* pad len */
+ ".previous\n"
+ ".section .altinstr_replacement,\"ax\"\n"
+ "4: jmp %l[t_no]\n"
+ "5:\n"
+ ".previous\n"
+ ".section .altinstructions,\"a\"\n"
+ " .long 1b - .\n" /* src offset */
+ " .long 0\n" /* no replacement */
+ " .word %P0\n" /* feature bit */
+ " .byte 3b - 1b\n" /* src len */
+ " .byte 0\n" /* repl len */
+ " .byte 0\n" /* pad len */
+ ".previous\n"
+ : : "i" (bit), "i" (X86_FEATURE_ALWAYS)
+ : : t_dynamic, t_no);
+ return true;
+ t_no:
+ return false;
+ t_dynamic:
+ return __static_cpu_has_safe(bit);
+#else
+ u8 flag;
+ /* Open-coded due to __stringify() in ALTERNATIVE() */
+ asm volatile("1: movb $2,%0\n"
+ "2:\n"
+ ".section .altinstructions,\"a\"\n"
+ " .long 1b - .\n" /* src offset */
+ " .long 3f - .\n" /* repl offset */
+ " .word %P2\n" /* always replace */
+ " .byte 2b - 1b\n" /* source len */
+ " .byte 4f - 3f\n" /* replacement len */
+ " .byte 0\n" /* pad len */
+ ".previous\n"
+ ".section .discard,\"aw\",@progbits\n"
+ " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
+ ".previous\n"
+ ".section .altinstr_replacement,\"ax\"\n"
+ "3: movb $0,%0\n"
+ "4:\n"
+ ".previous\n"
+ ".section .altinstructions,\"a\"\n"
+ " .long 1b - .\n" /* src offset */
+ " .long 5f - .\n" /* repl offset */
+ " .word %P1\n" /* feature bit */
+ " .byte 4b - 3b\n" /* src len */
+ " .byte 6f - 5f\n" /* repl len */
+ " .byte 0\n" /* pad len */
+ ".previous\n"
+ ".section .discard,\"aw\",@progbits\n"
+ " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
+ ".previous\n"
+ ".section .altinstr_replacement,\"ax\"\n"
+ "5: movb $1,%0\n"
+ "6:\n"
+ ".previous\n"
+ : "=qm" (flag)
+ : "i" (bit), "i" (X86_FEATURE_ALWAYS));
+ return (flag == 2 ? __static_cpu_has_safe(bit) : flag);
+#endif /* CC_HAVE_ASM_GOTO */
+}
+
+#define static_cpu_has_safe(bit) \
+( \
+ __builtin_constant_p(boot_cpu_has(bit)) ? \
+ boot_cpu_has(bit) : \
+ _static_cpu_has_safe(bit) \
+)
+#else
+/*
+ * gcc 3.x is too stupid to do the static test; fall back to dynamic.
+ */
+#define static_cpu_has(bit) boot_cpu_has(bit)
+#define static_cpu_has_safe(bit) boot_cpu_has(bit)
+#endif
+
+#define cpu_has_bug(c, bit) cpu_has(c, (bit))
+#define set_cpu_bug(c, bit) set_cpu_cap(c, (bit))
+#define clear_cpu_bug(c, bit) clear_cpu_cap(c, (bit))
+
+#define static_cpu_has_bug(bit) static_cpu_has((bit))
+#define static_cpu_has_bug_safe(bit) static_cpu_has_safe((bit))
+#define boot_cpu_has_bug(bit) cpu_has_bug(&boot_cpu_data, (bit))
+
+#define MAX_CPU_FEATURES (NCAPINTS * 32)
+#define cpu_have_feature boot_cpu_has
+
+#define CPU_FEATURE_TYPEFMT "x86,ven%04Xfam%04Xmod%04X"
+#define CPU_FEATURE_TYPEVAL boot_cpu_data.x86_vendor, boot_cpu_data.x86, \
+ boot_cpu_data.x86_model
+
+#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */
+#endif /* _ASM_X86_CPUFEATURE_H */
diff --git a/kernel/arch/x86/include/asm/cpumask.h b/kernel/arch/x86/include/asm/cpumask.h
new file mode 100644
index 000000000..61c852fa3
--- /dev/null
+++ b/kernel/arch/x86/include/asm/cpumask.h
@@ -0,0 +1,14 @@
+#ifndef _ASM_X86_CPUMASK_H
+#define _ASM_X86_CPUMASK_H
+#ifndef __ASSEMBLY__
+#include <linux/cpumask.h>
+
+extern cpumask_var_t cpu_callin_mask;
+extern cpumask_var_t cpu_callout_mask;
+extern cpumask_var_t cpu_initialized_mask;
+extern cpumask_var_t cpu_sibling_setup_mask;
+
+extern void setup_cpu_local_masks(void);
+
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_X86_CPUMASK_H */
diff --git a/kernel/arch/x86/include/asm/crash.h b/kernel/arch/x86/include/asm/crash.h
new file mode 100644
index 000000000..f498411f2
--- /dev/null
+++ b/kernel/arch/x86/include/asm/crash.h
@@ -0,0 +1,9 @@
+#ifndef _ASM_X86_CRASH_H
+#define _ASM_X86_CRASH_H
+
+int crash_load_segments(struct kimage *image);
+int crash_copy_backup_region(struct kimage *image);
+int crash_setup_memmap_entries(struct kimage *image,
+ struct boot_params *params);
+
+#endif /* _ASM_X86_CRASH_H */
diff --git a/kernel/arch/x86/include/asm/crypto/aes.h b/kernel/arch/x86/include/asm/crypto/aes.h
new file mode 100644
index 000000000..80545a1cb
--- /dev/null
+++ b/kernel/arch/x86/include/asm/crypto/aes.h
@@ -0,0 +1,11 @@
+#ifndef ASM_X86_AES_H
+#define ASM_X86_AES_H
+
+#include <linux/crypto.h>
+#include <crypto/aes.h>
+
+void crypto_aes_encrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst,
+ const u8 *src);
+void crypto_aes_decrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst,
+ const u8 *src);
+#endif
diff --git a/kernel/arch/x86/include/asm/crypto/camellia.h b/kernel/arch/x86/include/asm/crypto/camellia.h
new file mode 100644
index 000000000..bb93333d9
--- /dev/null
+++ b/kernel/arch/x86/include/asm/crypto/camellia.h
@@ -0,0 +1,101 @@
+#ifndef ASM_X86_CAMELLIA_H
+#define ASM_X86_CAMELLIA_H
+
+#include <linux/kernel.h>
+#include <linux/crypto.h>
+
+#define CAMELLIA_MIN_KEY_SIZE 16
+#define CAMELLIA_MAX_KEY_SIZE 32
+#define CAMELLIA_BLOCK_SIZE 16
+#define CAMELLIA_TABLE_BYTE_LEN 272
+#define CAMELLIA_PARALLEL_BLOCKS 2
+
+struct camellia_ctx {
+ u64 key_table[CAMELLIA_TABLE_BYTE_LEN / sizeof(u64)];
+ u32 key_length;
+};
+
+struct camellia_lrw_ctx {
+ struct lrw_table_ctx lrw_table;
+ struct camellia_ctx camellia_ctx;
+};
+
+struct camellia_xts_ctx {
+ struct camellia_ctx tweak_ctx;
+ struct camellia_ctx crypt_ctx;
+};
+
+extern int __camellia_setkey(struct camellia_ctx *cctx,
+ const unsigned char *key,
+ unsigned int key_len, u32 *flags);
+
+extern int lrw_camellia_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen);
+extern void lrw_camellia_exit_tfm(struct crypto_tfm *tfm);
+
+extern int xts_camellia_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen);
+
+/* regular block cipher functions */
+asmlinkage void __camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst,
+ const u8 *src, bool xor);
+asmlinkage void camellia_dec_blk(struct camellia_ctx *ctx, u8 *dst,
+ const u8 *src);
+
+/* 2-way parallel cipher functions */
+asmlinkage void __camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst,
+ const u8 *src, bool xor);
+asmlinkage void camellia_dec_blk_2way(struct camellia_ctx *ctx, u8 *dst,
+ const u8 *src);
+
+/* 16-way parallel cipher functions (avx/aes-ni) */
+asmlinkage void camellia_ecb_enc_16way(struct camellia_ctx *ctx, u8 *dst,
+ const u8 *src);
+asmlinkage void camellia_ecb_dec_16way(struct camellia_ctx *ctx, u8 *dst,
+ const u8 *src);
+
+asmlinkage void camellia_cbc_dec_16way(struct camellia_ctx *ctx, u8 *dst,
+ const u8 *src);
+asmlinkage void camellia_ctr_16way(struct camellia_ctx *ctx, u8 *dst,
+ const u8 *src, le128 *iv);
+
+asmlinkage void camellia_xts_enc_16way(struct camellia_ctx *ctx, u8 *dst,
+ const u8 *src, le128 *iv);
+asmlinkage void camellia_xts_dec_16way(struct camellia_ctx *ctx, u8 *dst,
+ const u8 *src, le128 *iv);
+
+static inline void camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst,
+ const u8 *src)
+{
+ __camellia_enc_blk(ctx, dst, src, false);
+}
+
+static inline void camellia_enc_blk_xor(struct camellia_ctx *ctx, u8 *dst,
+ const u8 *src)
+{
+ __camellia_enc_blk(ctx, dst, src, true);
+}
+
+static inline void camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst,
+ const u8 *src)
+{
+ __camellia_enc_blk_2way(ctx, dst, src, false);
+}
+
+static inline void camellia_enc_blk_xor_2way(struct camellia_ctx *ctx, u8 *dst,
+ const u8 *src)
+{
+ __camellia_enc_blk_2way(ctx, dst, src, true);
+}
+
+/* glue helpers */
+extern void camellia_decrypt_cbc_2way(void *ctx, u128 *dst, const u128 *src);
+extern void camellia_crypt_ctr(void *ctx, u128 *dst, const u128 *src,
+ le128 *iv);
+extern void camellia_crypt_ctr_2way(void *ctx, u128 *dst, const u128 *src,
+ le128 *iv);
+
+extern void camellia_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv);
+extern void camellia_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv);
+
+#endif /* ASM_X86_CAMELLIA_H */
diff --git a/kernel/arch/x86/include/asm/crypto/glue_helper.h b/kernel/arch/x86/include/asm/crypto/glue_helper.h
new file mode 100644
index 000000000..1eef55596
--- /dev/null
+++ b/kernel/arch/x86/include/asm/crypto/glue_helper.h
@@ -0,0 +1,145 @@
+/*
+ * Shared glue code for 128bit block ciphers
+ */
+
+#ifndef _CRYPTO_GLUE_HELPER_H
+#define _CRYPTO_GLUE_HELPER_H
+
+#include <linux/kernel.h>
+#include <linux/crypto.h>
+#include <asm/i387.h>
+#include <crypto/b128ops.h>
+
+typedef void (*common_glue_func_t)(void *ctx, u8 *dst, const u8 *src);
+typedef void (*common_glue_cbc_func_t)(void *ctx, u128 *dst, const u128 *src);
+typedef void (*common_glue_ctr_func_t)(void *ctx, u128 *dst, const u128 *src,
+ le128 *iv);
+typedef void (*common_glue_xts_func_t)(void *ctx, u128 *dst, const u128 *src,
+ le128 *iv);
+
+#define GLUE_FUNC_CAST(fn) ((common_glue_func_t)(fn))
+#define GLUE_CBC_FUNC_CAST(fn) ((common_glue_cbc_func_t)(fn))
+#define GLUE_CTR_FUNC_CAST(fn) ((common_glue_ctr_func_t)(fn))
+#define GLUE_XTS_FUNC_CAST(fn) ((common_glue_xts_func_t)(fn))
+
+struct common_glue_func_entry {
+ unsigned int num_blocks; /* number of blocks that @fn will process */
+ union {
+ common_glue_func_t ecb;
+ common_glue_cbc_func_t cbc;
+ common_glue_ctr_func_t ctr;
+ common_glue_xts_func_t xts;
+ } fn_u;
+};
+
+struct common_glue_ctx {
+ unsigned int num_funcs;
+ int fpu_blocks_limit; /* -1 means fpu not needed at all */
+
+ /*
+ * First funcs entry must have largest num_blocks and last funcs entry
+ * must have num_blocks == 1!
+ */
+ struct common_glue_func_entry funcs[];
+};
+
+static inline bool glue_fpu_begin(unsigned int bsize, int fpu_blocks_limit,
+ struct blkcipher_desc *desc,
+ bool fpu_enabled, unsigned int nbytes)
+{
+ if (likely(fpu_blocks_limit < 0))
+ return false;
+
+ if (fpu_enabled)
+ return true;
+
+ /*
+ * Vector-registers are only used when chunk to be processed is large
+ * enough, so do not enable FPU until it is necessary.
+ */
+ if (nbytes < bsize * (unsigned int)fpu_blocks_limit)
+ return false;
+
+ if (desc) {
+ /* prevent sleeping if FPU is in use */
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ }
+
+ kernel_fpu_begin();
+ return true;
+}
+
+static inline void glue_fpu_end(bool fpu_enabled)
+{
+ if (fpu_enabled)
+ kernel_fpu_end();
+}
+
+static inline void le128_to_be128(be128 *dst, const le128 *src)
+{
+ dst->a = cpu_to_be64(le64_to_cpu(src->a));
+ dst->b = cpu_to_be64(le64_to_cpu(src->b));
+}
+
+static inline void be128_to_le128(le128 *dst, const be128 *src)
+{
+ dst->a = cpu_to_le64(be64_to_cpu(src->a));
+ dst->b = cpu_to_le64(be64_to_cpu(src->b));
+}
+
+static inline void le128_inc(le128 *i)
+{
+ u64 a = le64_to_cpu(i->a);
+ u64 b = le64_to_cpu(i->b);
+
+ b++;
+ if (!b)
+ a++;
+
+ i->a = cpu_to_le64(a);
+ i->b = cpu_to_le64(b);
+}
+
+static inline void le128_gf128mul_x_ble(le128 *dst, const le128 *src)
+{
+ u64 a = le64_to_cpu(src->a);
+ u64 b = le64_to_cpu(src->b);
+ u64 _tt = ((s64)a >> 63) & 0x87;
+
+ dst->a = cpu_to_le64((a << 1) ^ (b >> 63));
+ dst->b = cpu_to_le64((b << 1) ^ _tt);
+}
+
+extern int glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
+ struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes);
+
+extern int glue_cbc_encrypt_128bit(const common_glue_func_t fn,
+ struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes);
+
+extern int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
+ struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes);
+
+extern int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
+ struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes);
+
+extern int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
+ struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes,
+ common_glue_func_t tweak_fn, void *tweak_ctx,
+ void *crypt_ctx);
+
+extern void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src,
+ le128 *iv, common_glue_func_t fn);
+
+#endif /* _CRYPTO_GLUE_HELPER_H */
diff --git a/kernel/arch/x86/include/asm/crypto/serpent-avx.h b/kernel/arch/x86/include/asm/crypto/serpent-avx.h
new file mode 100644
index 000000000..33c2b8a43
--- /dev/null
+++ b/kernel/arch/x86/include/asm/crypto/serpent-avx.h
@@ -0,0 +1,48 @@
+#ifndef ASM_X86_SERPENT_AVX_H
+#define ASM_X86_SERPENT_AVX_H
+
+#include <linux/crypto.h>
+#include <crypto/serpent.h>
+
+#define SERPENT_PARALLEL_BLOCKS 8
+
+struct serpent_lrw_ctx {
+ struct lrw_table_ctx lrw_table;
+ struct serpent_ctx serpent_ctx;
+};
+
+struct serpent_xts_ctx {
+ struct serpent_ctx tweak_ctx;
+ struct serpent_ctx crypt_ctx;
+};
+
+asmlinkage void serpent_ecb_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+ const u8 *src);
+asmlinkage void serpent_ecb_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+ const u8 *src);
+
+asmlinkage void serpent_cbc_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+ const u8 *src);
+asmlinkage void serpent_ctr_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+ const u8 *src, le128 *iv);
+
+asmlinkage void serpent_xts_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+ const u8 *src, le128 *iv);
+asmlinkage void serpent_xts_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+ const u8 *src, le128 *iv);
+
+extern void __serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src,
+ le128 *iv);
+
+extern void serpent_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv);
+extern void serpent_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv);
+
+extern int lrw_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen);
+
+extern void lrw_serpent_exit_tfm(struct crypto_tfm *tfm);
+
+extern int xts_serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen);
+
+#endif
diff --git a/kernel/arch/x86/include/asm/crypto/serpent-sse2.h b/kernel/arch/x86/include/asm/crypto/serpent-sse2.h
new file mode 100644
index 000000000..e6e77dffb
--- /dev/null
+++ b/kernel/arch/x86/include/asm/crypto/serpent-sse2.h
@@ -0,0 +1,63 @@
+#ifndef ASM_X86_SERPENT_SSE2_H
+#define ASM_X86_SERPENT_SSE2_H
+
+#include <linux/crypto.h>
+#include <crypto/serpent.h>
+
+#ifdef CONFIG_X86_32
+
+#define SERPENT_PARALLEL_BLOCKS 4
+
+asmlinkage void __serpent_enc_blk_4way(struct serpent_ctx *ctx, u8 *dst,
+ const u8 *src, bool xor);
+asmlinkage void serpent_dec_blk_4way(struct serpent_ctx *ctx, u8 *dst,
+ const u8 *src);
+
+static inline void serpent_enc_blk_xway(struct serpent_ctx *ctx, u8 *dst,
+ const u8 *src)
+{
+ __serpent_enc_blk_4way(ctx, dst, src, false);
+}
+
+static inline void serpent_enc_blk_xway_xor(struct serpent_ctx *ctx, u8 *dst,
+ const u8 *src)
+{
+ __serpent_enc_blk_4way(ctx, dst, src, true);
+}
+
+static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst,
+ const u8 *src)
+{
+ serpent_dec_blk_4way(ctx, dst, src);
+}
+
+#else
+
+#define SERPENT_PARALLEL_BLOCKS 8
+
+asmlinkage void __serpent_enc_blk_8way(struct serpent_ctx *ctx, u8 *dst,
+ const u8 *src, bool xor);
+asmlinkage void serpent_dec_blk_8way(struct serpent_ctx *ctx, u8 *dst,
+ const u8 *src);
+
+static inline void serpent_enc_blk_xway(struct serpent_ctx *ctx, u8 *dst,
+ const u8 *src)
+{
+ __serpent_enc_blk_8way(ctx, dst, src, false);
+}
+
+static inline void serpent_enc_blk_xway_xor(struct serpent_ctx *ctx, u8 *dst,
+ const u8 *src)
+{
+ __serpent_enc_blk_8way(ctx, dst, src, true);
+}
+
+static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst,
+ const u8 *src)
+{
+ serpent_dec_blk_8way(ctx, dst, src);
+}
+
+#endif
+
+#endif
diff --git a/kernel/arch/x86/include/asm/crypto/twofish.h b/kernel/arch/x86/include/asm/crypto/twofish.h
new file mode 100644
index 000000000..878c51cee
--- /dev/null
+++ b/kernel/arch/x86/include/asm/crypto/twofish.h
@@ -0,0 +1,46 @@
+#ifndef ASM_X86_TWOFISH_H
+#define ASM_X86_TWOFISH_H
+
+#include <linux/crypto.h>
+#include <crypto/twofish.h>
+#include <crypto/lrw.h>
+#include <crypto/b128ops.h>
+
+struct twofish_lrw_ctx {
+ struct lrw_table_ctx lrw_table;
+ struct twofish_ctx twofish_ctx;
+};
+
+struct twofish_xts_ctx {
+ struct twofish_ctx tweak_ctx;
+ struct twofish_ctx crypt_ctx;
+};
+
+/* regular block cipher functions from twofish_x86_64 module */
+asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst,
+ const u8 *src);
+asmlinkage void twofish_dec_blk(struct twofish_ctx *ctx, u8 *dst,
+ const u8 *src);
+
+/* 3-way parallel cipher functions */
+asmlinkage void __twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
+ const u8 *src, bool xor);
+asmlinkage void twofish_dec_blk_3way(struct twofish_ctx *ctx, u8 *dst,
+ const u8 *src);
+
+/* helpers from twofish_x86_64-3way module */
+extern void twofish_dec_blk_cbc_3way(void *ctx, u128 *dst, const u128 *src);
+extern void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src,
+ le128 *iv);
+extern void twofish_enc_blk_ctr_3way(void *ctx, u128 *dst, const u128 *src,
+ le128 *iv);
+
+extern int lrw_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen);
+
+extern void lrw_twofish_exit_tfm(struct crypto_tfm *tfm);
+
+extern int xts_twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen);
+
+#endif /* ASM_X86_TWOFISH_H */
diff --git a/kernel/arch/x86/include/asm/current.h b/kernel/arch/x86/include/asm/current.h
new file mode 100644
index 000000000..9476c04ee
--- /dev/null
+++ b/kernel/arch/x86/include/asm/current.h
@@ -0,0 +1,21 @@
+#ifndef _ASM_X86_CURRENT_H
+#define _ASM_X86_CURRENT_H
+
+#include <linux/compiler.h>
+#include <asm/percpu.h>
+
+#ifndef __ASSEMBLY__
+struct task_struct;
+
+DECLARE_PER_CPU(struct task_struct *, current_task);
+
+static __always_inline struct task_struct *get_current(void)
+{
+ return this_cpu_read_stable(current_task);
+}
+
+#define current get_current()
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_X86_CURRENT_H */
diff --git a/kernel/arch/x86/include/asm/debugreg.h b/kernel/arch/x86/include/asm/debugreg.h
new file mode 100644
index 000000000..12cb66f6d
--- /dev/null
+++ b/kernel/arch/x86/include/asm/debugreg.h
@@ -0,0 +1,123 @@
+#ifndef _ASM_X86_DEBUGREG_H
+#define _ASM_X86_DEBUGREG_H
+
+
+#include <linux/bug.h>
+#include <uapi/asm/debugreg.h>
+
+DECLARE_PER_CPU(unsigned long, cpu_dr7);
+
+#ifndef CONFIG_PARAVIRT
+/*
+ * These special macros can be used to get or set a debugging register
+ */
+#define get_debugreg(var, register) \
+ (var) = native_get_debugreg(register)
+#define set_debugreg(value, register) \
+ native_set_debugreg(register, value)
+#endif
+
+static inline unsigned long native_get_debugreg(int regno)
+{
+ unsigned long val = 0; /* Damn you, gcc! */
+
+ switch (regno) {
+ case 0:
+ asm("mov %%db0, %0" :"=r" (val));
+ break;
+ case 1:
+ asm("mov %%db1, %0" :"=r" (val));
+ break;
+ case 2:
+ asm("mov %%db2, %0" :"=r" (val));
+ break;
+ case 3:
+ asm("mov %%db3, %0" :"=r" (val));
+ break;
+ case 6:
+ asm("mov %%db6, %0" :"=r" (val));
+ break;
+ case 7:
+ asm("mov %%db7, %0" :"=r" (val));
+ break;
+ default:
+ BUG();
+ }
+ return val;
+}
+
+static inline void native_set_debugreg(int regno, unsigned long value)
+{
+ switch (regno) {
+ case 0:
+ asm("mov %0, %%db0" ::"r" (value));
+ break;
+ case 1:
+ asm("mov %0, %%db1" ::"r" (value));
+ break;
+ case 2:
+ asm("mov %0, %%db2" ::"r" (value));
+ break;
+ case 3:
+ asm("mov %0, %%db3" ::"r" (value));
+ break;
+ case 6:
+ asm("mov %0, %%db6" ::"r" (value));
+ break;
+ case 7:
+ asm("mov %0, %%db7" ::"r" (value));
+ break;
+ default:
+ BUG();
+ }
+}
+
+static inline void hw_breakpoint_disable(void)
+{
+ /* Zero the control register for HW Breakpoint */
+ set_debugreg(0UL, 7);
+
+ /* Zero-out the individual HW breakpoint address registers */
+ set_debugreg(0UL, 0);
+ set_debugreg(0UL, 1);
+ set_debugreg(0UL, 2);
+ set_debugreg(0UL, 3);
+}
+
+static inline int hw_breakpoint_active(void)
+{
+ return __this_cpu_read(cpu_dr7) & DR_GLOBAL_ENABLE_MASK;
+}
+
+extern void aout_dump_debugregs(struct user *dump);
+
+extern void hw_breakpoint_restore(void);
+
+#ifdef CONFIG_X86_64
+DECLARE_PER_CPU(int, debug_stack_usage);
+static inline void debug_stack_usage_inc(void)
+{
+ __this_cpu_inc(debug_stack_usage);
+}
+static inline void debug_stack_usage_dec(void)
+{
+ __this_cpu_dec(debug_stack_usage);
+}
+int is_debug_stack(unsigned long addr);
+void debug_stack_set_zero(void);
+void debug_stack_reset(void);
+#else /* !X86_64 */
+static inline int is_debug_stack(unsigned long addr) { return 0; }
+static inline void debug_stack_set_zero(void) { }
+static inline void debug_stack_reset(void) { }
+static inline void debug_stack_usage_inc(void) { }
+static inline void debug_stack_usage_dec(void) { }
+#endif /* X86_64 */
+
+#ifdef CONFIG_CPU_SUP_AMD
+extern void set_dr_addr_mask(unsigned long mask, int dr);
+#else
+static inline void set_dr_addr_mask(unsigned long mask, int dr) { }
+#endif
+
+#endif /* _ASM_X86_DEBUGREG_H */
diff --git a/kernel/arch/x86/include/asm/delay.h b/kernel/arch/x86/include/asm/delay.h
new file mode 100644
index 000000000..9b3b4f275
--- /dev/null
+++ b/kernel/arch/x86/include/asm/delay.h
@@ -0,0 +1,8 @@
+#ifndef _ASM_X86_DELAY_H
+#define _ASM_X86_DELAY_H
+
+#include <asm-generic/delay.h>
+
+void use_tsc_delay(void);
+
+#endif /* _ASM_X86_DELAY_H */
diff --git a/kernel/arch/x86/include/asm/desc.h b/kernel/arch/x86/include/asm/desc.h
new file mode 100644
index 000000000..a0bf89fd2
--- /dev/null
+++ b/kernel/arch/x86/include/asm/desc.h
@@ -0,0 +1,519 @@
+#ifndef _ASM_X86_DESC_H
+#define _ASM_X86_DESC_H
+
+#include <asm/desc_defs.h>
+#include <asm/ldt.h>
+#include <asm/mmu.h>
+
+#include <linux/smp.h>
+#include <linux/percpu.h>
+
+static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *info)
+{
+ desc->limit0 = info->limit & 0x0ffff;
+
+ desc->base0 = (info->base_addr & 0x0000ffff);
+ desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
+
+ desc->type = (info->read_exec_only ^ 1) << 1;
+ desc->type |= info->contents << 2;
+
+ desc->s = 1;
+ desc->dpl = 0x3;
+ desc->p = info->seg_not_present ^ 1;
+ desc->limit = (info->limit & 0xf0000) >> 16;
+ desc->avl = info->useable;
+ desc->d = info->seg_32bit;
+ desc->g = info->limit_in_pages;
+
+ desc->base2 = (info->base_addr & 0xff000000) >> 24;
+ /*
+ * Don't allow setting of the lm bit. It would confuse
+ * user_64bit_mode and would get overridden by sysret anyway.
+ */
+ desc->l = 0;
+}
+
+extern struct desc_ptr idt_descr;
+extern gate_desc idt_table[];
+extern struct desc_ptr debug_idt_descr;
+extern gate_desc debug_idt_table[];
+
+struct gdt_page {
+ struct desc_struct gdt[GDT_ENTRIES];
+} __attribute__((aligned(PAGE_SIZE)));
+
+DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
+
+static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
+{
+ return per_cpu(gdt_page, cpu).gdt;
+}
+
+#ifdef CONFIG_X86_64
+
+static inline void pack_gate(gate_desc *gate, unsigned type, unsigned long func,
+ unsigned dpl, unsigned ist, unsigned seg)
+{
+ gate->offset_low = PTR_LOW(func);
+ gate->segment = __KERNEL_CS;
+ gate->ist = ist;
+ gate->p = 1;
+ gate->dpl = dpl;
+ gate->zero0 = 0;
+ gate->zero1 = 0;
+ gate->type = type;
+ gate->offset_middle = PTR_MIDDLE(func);
+ gate->offset_high = PTR_HIGH(func);
+}
+
+#else
+static inline void pack_gate(gate_desc *gate, unsigned char type,
+ unsigned long base, unsigned dpl, unsigned flags,
+ unsigned short seg)
+{
+ gate->a = (seg << 16) | (base & 0xffff);
+ gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
+}
+
+#endif
+
+static inline int desc_empty(const void *ptr)
+{
+ const u32 *desc = ptr;
+
+ return !(desc[0] | desc[1]);
+}
+
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+#define load_TR_desc() native_load_tr_desc()
+#define load_gdt(dtr) native_load_gdt(dtr)
+#define load_idt(dtr) native_load_idt(dtr)
+#define load_tr(tr) asm volatile("ltr %0"::"m" (tr))
+#define load_ldt(ldt) asm volatile("lldt %0"::"m" (ldt))
+
+#define store_gdt(dtr) native_store_gdt(dtr)
+#define store_idt(dtr) native_store_idt(dtr)
+#define store_tr(tr) (tr = native_store_tr())
+
+#define load_TLS(t, cpu) native_load_tls(t, cpu)
+#define set_ldt native_set_ldt
+
+#define write_ldt_entry(dt, entry, desc) native_write_ldt_entry(dt, entry, desc)
+#define write_gdt_entry(dt, entry, desc, type) native_write_gdt_entry(dt, entry, desc, type)
+#define write_idt_entry(dt, entry, g) native_write_idt_entry(dt, entry, g)
+
+static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
+{
+}
+
+static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
+{
+}
+#endif /* CONFIG_PARAVIRT */
+
+#define store_ldt(ldt) asm("sldt %0" : "=m"(ldt))
+
+static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
+{
+ memcpy(&idt[entry], gate, sizeof(*gate));
+}
+
+static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
+{
+ memcpy(&ldt[entry], desc, 8);
+}
+
+static inline void
+native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int type)
+{
+ unsigned int size;
+
+ switch (type) {
+ case DESC_TSS: size = sizeof(tss_desc); break;
+ case DESC_LDT: size = sizeof(ldt_desc); break;
+ default: size = sizeof(*gdt); break;
+ }
+
+ memcpy(&gdt[entry], desc, size);
+}
+
+static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
+ unsigned long limit, unsigned char type,
+ unsigned char flags)
+{
+ desc->a = ((base & 0xffff) << 16) | (limit & 0xffff);
+ desc->b = (base & 0xff000000) | ((base & 0xff0000) >> 16) |
+ (limit & 0x000f0000) | ((type & 0xff) << 8) |
+ ((flags & 0xf) << 20);
+ desc->p = 1;
+}
+
+
+static inline void set_tssldt_descriptor(void *d, unsigned long addr, unsigned type, unsigned size)
+{
+#ifdef CONFIG_X86_64
+ struct ldttss_desc64 *desc = d;
+
+ memset(desc, 0, sizeof(*desc));
+
+ desc->limit0 = size & 0xFFFF;
+ desc->base0 = PTR_LOW(addr);
+ desc->base1 = PTR_MIDDLE(addr) & 0xFF;
+ desc->type = type;
+ desc->p = 1;
+ desc->limit1 = (size >> 16) & 0xF;
+ desc->base2 = (PTR_MIDDLE(addr) >> 8) & 0xFF;
+ desc->base3 = PTR_HIGH(addr);
+#else
+ pack_descriptor((struct desc_struct *)d, addr, size, 0x80 | type, 0);
+#endif
+}
+
+static inline void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr)
+{
+ struct desc_struct *d = get_cpu_gdt_table(cpu);
+ tss_desc tss;
+
+ /*
+ * sizeof(unsigned long) coming from an extra "long" at the end
+ * of the iobitmap. See tss_struct definition in processor.h
+ *
+ * -1? seg base+limit should be pointing to the address of the
+ * last valid byte
+ */
+ set_tssldt_descriptor(&tss, (unsigned long)addr, DESC_TSS,
+ IO_BITMAP_OFFSET + IO_BITMAP_BYTES +
+ sizeof(unsigned long) - 1);
+ write_gdt_entry(d, entry, &tss, DESC_TSS);
+}
+
+#define set_tss_desc(cpu, addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
+
+static inline void native_set_ldt(const void *addr, unsigned int entries)
+{
+ if (likely(entries == 0))
+ asm volatile("lldt %w0"::"q" (0));
+ else {
+ unsigned cpu = smp_processor_id();
+ ldt_desc ldt;
+
+ set_tssldt_descriptor(&ldt, (unsigned long)addr, DESC_LDT,
+ entries * LDT_ENTRY_SIZE - 1);
+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT,
+ &ldt, DESC_LDT);
+ asm volatile("lldt %w0"::"q" (GDT_ENTRY_LDT*8));
+ }
+}
+
+static inline void native_load_tr_desc(void)
+{
+ asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
+}
+
+static inline void native_load_gdt(const struct desc_ptr *dtr)
+{
+ asm volatile("lgdt %0"::"m" (*dtr));
+}
+
+static inline void native_load_idt(const struct desc_ptr *dtr)
+{
+ asm volatile("lidt %0"::"m" (*dtr));
+}
+
+static inline void native_store_gdt(struct desc_ptr *dtr)
+{
+ asm volatile("sgdt %0":"=m" (*dtr));
+}
+
+static inline void native_store_idt(struct desc_ptr *dtr)
+{
+ asm volatile("sidt %0":"=m" (*dtr));
+}
+
+static inline unsigned long native_store_tr(void)
+{
+ unsigned long tr;
+
+ asm volatile("str %0":"=r" (tr));
+
+ return tr;
+}
+
+static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
+{
+ struct desc_struct *gdt = get_cpu_gdt_table(cpu);
+ unsigned int i;
+
+ for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
+ gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
+}
+
+/* This intentionally ignores lm, since 32-bit apps don't have that field. */
+#define LDT_empty(info) \
+ ((info)->base_addr == 0 && \
+ (info)->limit == 0 && \
+ (info)->contents == 0 && \
+ (info)->read_exec_only == 1 && \
+ (info)->seg_32bit == 0 && \
+ (info)->limit_in_pages == 0 && \
+ (info)->seg_not_present == 1 && \
+ (info)->useable == 0)
+
+/* Lots of programs expect an all-zero user_desc to mean "no segment at all". */
+static inline bool LDT_zero(const struct user_desc *info)
+{
+ return (info->base_addr == 0 &&
+ info->limit == 0 &&
+ info->contents == 0 &&
+ info->read_exec_only == 0 &&
+ info->seg_32bit == 0 &&
+ info->limit_in_pages == 0 &&
+ info->seg_not_present == 0 &&
+ info->useable == 0);
+}
+
+static inline void clear_LDT(void)
+{
+ set_ldt(NULL, 0);
+}
+
+/*
+ * load one particular LDT into the current CPU
+ */
+static inline void load_LDT_nolock(mm_context_t *pc)
+{
+ set_ldt(pc->ldt, pc->size);
+}
+
+static inline void load_LDT(mm_context_t *pc)
+{
+ preempt_disable();
+ load_LDT_nolock(pc);
+ preempt_enable();
+}
+
+static inline unsigned long get_desc_base(const struct desc_struct *desc)
+{
+ return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
+}
+
+static inline void set_desc_base(struct desc_struct *desc, unsigned long base)
+{
+ desc->base0 = base & 0xffff;
+ desc->base1 = (base >> 16) & 0xff;
+ desc->base2 = (base >> 24) & 0xff;
+}
+
+static inline unsigned long get_desc_limit(const struct desc_struct *desc)
+{
+ return desc->limit0 | (desc->limit << 16);
+}
+
+static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
+{
+ desc->limit0 = limit & 0xffff;
+ desc->limit = (limit >> 16) & 0xf;
+}
+
+#ifdef CONFIG_X86_64
+static inline void set_nmi_gate(int gate, void *addr)
+{
+ gate_desc s;
+
+ pack_gate(&s, GATE_INTERRUPT, (unsigned long)addr, 0, 0, __KERNEL_CS);
+ write_idt_entry(debug_idt_table, gate, &s);
+}
+#endif
+
+#ifdef CONFIG_TRACING
+extern struct desc_ptr trace_idt_descr;
+extern gate_desc trace_idt_table[];
+static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
+{
+ write_idt_entry(trace_idt_table, entry, gate);
+}
+
+static inline void _trace_set_gate(int gate, unsigned type, void *addr,
+ unsigned dpl, unsigned ist, unsigned seg)
+{
+ gate_desc s;
+
+ pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg);
+ /*
+ * does not need to be atomic because it is only done once at
+ * setup time
+ */
+ write_trace_idt_entry(gate, &s);
+}
+#else
+static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
+{
+}
+
+#define _trace_set_gate(gate, type, addr, dpl, ist, seg)
+#endif
+
+static inline void _set_gate(int gate, unsigned type, void *addr,
+ unsigned dpl, unsigned ist, unsigned seg)
+{
+ gate_desc s;
+
+ pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg);
+ /*
+ * does not need to be atomic because it is only done once at
+ * setup time
+ */
+ write_idt_entry(idt_table, gate, &s);
+ write_trace_idt_entry(gate, &s);
+}
+
+/*
+ * This needs to use 'idt_table' rather than 'idt', and
+ * thus use the _nonmapped_ version of the IDT, as the
+ * Pentium F0 0F bugfix can have resulted in the mapped
+ * IDT being write-protected.
+ */
+#define set_intr_gate_notrace(n, addr) \
+ do { \
+ BUG_ON((unsigned)n > 0xFF); \
+ _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \
+ __KERNEL_CS); \
+ } while (0)
+
+#define set_intr_gate(n, addr) \
+ do { \
+ set_intr_gate_notrace(n, addr); \
+ _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
+ 0, 0, __KERNEL_CS); \
+ } while (0)
+
+extern int first_system_vector;
+/* used_vectors is BITMAP for irq is not managed by percpu vector_irq */
+extern unsigned long used_vectors[];
+
+static inline void alloc_system_vector(int vector)
+{
+ if (!test_bit(vector, used_vectors)) {
+ set_bit(vector, used_vectors);
+ if (first_system_vector > vector)
+ first_system_vector = vector;
+ } else {
+ BUG();
+ }
+}
+
+#define alloc_intr_gate(n, addr) \
+ do { \
+ alloc_system_vector(n); \
+ set_intr_gate(n, addr); \
+ } while (0)
+
+/*
+ * This routine sets up an interrupt gate at directory privilege level 3.
+ */
+static inline void set_system_intr_gate(unsigned int n, void *addr)
+{
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
+}
+
+static inline void set_system_trap_gate(unsigned int n, void *addr)
+{
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
+}
+
+static inline void set_trap_gate(unsigned int n, void *addr)
+{
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
+}
+
+static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
+{
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
+}
+
+static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
+{
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
+}
+
+static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
+{
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
+}
+
+#ifdef CONFIG_X86_64
+DECLARE_PER_CPU(u32, debug_idt_ctr);
+static inline bool is_debug_idt_enabled(void)
+{
+ if (this_cpu_read(debug_idt_ctr))
+ return true;
+
+ return false;
+}
+
+static inline void load_debug_idt(void)
+{
+ load_idt((const struct desc_ptr *)&debug_idt_descr);
+}
+#else
+static inline bool is_debug_idt_enabled(void)
+{
+ return false;
+}
+
+static inline void load_debug_idt(void)
+{
+}
+#endif
+
+#ifdef CONFIG_TRACING
+extern atomic_t trace_idt_ctr;
+static inline bool is_trace_idt_enabled(void)
+{
+ if (atomic_read(&trace_idt_ctr))
+ return true;
+
+ return false;
+}
+
+static inline void load_trace_idt(void)
+{
+ load_idt((const struct desc_ptr *)&trace_idt_descr);
+}
+#else
+static inline bool is_trace_idt_enabled(void)
+{
+ return false;
+}
+
+static inline void load_trace_idt(void)
+{
+}
+#endif
+
+/*
+ * The load_current_idt() must be called with interrupts disabled
+ * to avoid races. That way the IDT will always be set back to the expected
+ * descriptor. It's also called when a CPU is being initialized, and
+ * that doesn't need to disable interrupts, as nothing should be
+ * bothering the CPU then.
+ */
+static inline void load_current_idt(void)
+{
+ if (is_debug_idt_enabled())
+ load_debug_idt();
+ else if (is_trace_idt_enabled())
+ load_trace_idt();
+ else
+ load_idt((const struct desc_ptr *)&idt_descr);
+}
+#endif /* _ASM_X86_DESC_H */
diff --git a/kernel/arch/x86/include/asm/desc_defs.h b/kernel/arch/x86/include/asm/desc_defs.h
new file mode 100644
index 000000000..278441f39
--- /dev/null
+++ b/kernel/arch/x86/include/asm/desc_defs.h
@@ -0,0 +1,101 @@
+/* Written 2000 by Andi Kleen */
+#ifndef _ASM_X86_DESC_DEFS_H
+#define _ASM_X86_DESC_DEFS_H
+
+/*
+ * Segment descriptor structure definitions, usable from both x86_64 and i386
+ * archs.
+ */
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+/*
+ * FIXME: Accessing the desc_struct through its fields is more elegant,
+ * and should be the one valid thing to do. However, a lot of open code
+ * still touches the a and b accessors, and doing this allow us to do it
+ * incrementally. We keep the signature as a struct, rather than an union,
+ * so we can get rid of it transparently in the future -- glommer
+ */
+/* 8 byte segment descriptor */
+struct desc_struct {
+ union {
+ struct {
+ unsigned int a;
+ unsigned int b;
+ };
+ struct {
+ u16 limit0;
+ u16 base0;
+ unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
+ unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
+ };
+ };
+} __attribute__((packed));
+
+#define GDT_ENTRY_INIT(flags, base, limit) { { { \
+ .a = ((limit) & 0xffff) | (((base) & 0xffff) << 16), \
+ .b = (((base) & 0xff0000) >> 16) | (((flags) & 0xf0ff) << 8) | \
+ ((limit) & 0xf0000) | ((base) & 0xff000000), \
+ } } }
+
+enum {
+ GATE_INTERRUPT = 0xE,
+ GATE_TRAP = 0xF,
+ GATE_CALL = 0xC,
+ GATE_TASK = 0x5,
+};
+
+/* 16byte gate */
+struct gate_struct64 {
+ u16 offset_low;
+ u16 segment;
+ unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1;
+ u16 offset_middle;
+ u32 offset_high;
+ u32 zero1;
+} __attribute__((packed));
+
+#define PTR_LOW(x) ((unsigned long long)(x) & 0xFFFF)
+#define PTR_MIDDLE(x) (((unsigned long long)(x) >> 16) & 0xFFFF)
+#define PTR_HIGH(x) ((unsigned long long)(x) >> 32)
+
+enum {
+ DESC_TSS = 0x9,
+ DESC_LDT = 0x2,
+ DESCTYPE_S = 0x10, /* !system */
+};
+
+/* LDT or TSS descriptor in the GDT. 16 bytes. */
+struct ldttss_desc64 {
+ u16 limit0;
+ u16 base0;
+ unsigned base1 : 8, type : 5, dpl : 2, p : 1;
+ unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
+ u32 base3;
+ u32 zero1;
+} __attribute__((packed));
+
+#ifdef CONFIG_X86_64
+typedef struct gate_struct64 gate_desc;
+typedef struct ldttss_desc64 ldt_desc;
+typedef struct ldttss_desc64 tss_desc;
+#define gate_offset(g) ((g).offset_low | ((unsigned long)(g).offset_middle << 16) | ((unsigned long)(g).offset_high << 32))
+#define gate_segment(g) ((g).segment)
+#else
+typedef struct desc_struct gate_desc;
+typedef struct desc_struct ldt_desc;
+typedef struct desc_struct tss_desc;
+#define gate_offset(g) (((g).b & 0xffff0000) | ((g).a & 0x0000ffff))
+#define gate_segment(g) ((g).a >> 16)
+#endif
+
+struct desc_ptr {
+ unsigned short size;
+ unsigned long address;
+} __attribute__((packed)) ;
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_X86_DESC_DEFS_H */
diff --git a/kernel/arch/x86/include/asm/device.h b/kernel/arch/x86/include/asm/device.h
new file mode 100644
index 000000000..03dd72957
--- /dev/null
+++ b/kernel/arch/x86/include/asm/device.h
@@ -0,0 +1,16 @@
+#ifndef _ASM_X86_DEVICE_H
+#define _ASM_X86_DEVICE_H
+
+struct dev_archdata {
+#ifdef CONFIG_X86_DEV_DMA_OPS
+ struct dma_map_ops *dma_ops;
+#endif
+#if defined(CONFIG_INTEL_IOMMU) || defined(CONFIG_AMD_IOMMU)
+ void *iommu; /* hook for IOMMU specific extension */
+#endif
+};
+
+struct pdev_archdata {
+};
+
+#endif /* _ASM_X86_DEVICE_H */
diff --git a/kernel/arch/x86/include/asm/disabled-features.h b/kernel/arch/x86/include/asm/disabled-features.h
new file mode 100644
index 000000000..f226df064
--- /dev/null
+++ b/kernel/arch/x86/include/asm/disabled-features.h
@@ -0,0 +1,45 @@
+#ifndef _ASM_X86_DISABLED_FEATURES_H
+#define _ASM_X86_DISABLED_FEATURES_H
+
+/* These features, although they might be available in a CPU
+ * will not be used because the compile options to support
+ * them are not present.
+ *
+ * This code allows them to be checked and disabled at
+ * compile time without an explicit #ifdef. Use
+ * cpu_feature_enabled().
+ */
+
+#ifdef CONFIG_X86_INTEL_MPX
+# define DISABLE_MPX 0
+#else
+# define DISABLE_MPX (1<<(X86_FEATURE_MPX & 31))
+#endif
+
+#ifdef CONFIG_X86_64
+# define DISABLE_VME (1<<(X86_FEATURE_VME & 31))
+# define DISABLE_K6_MTRR (1<<(X86_FEATURE_K6_MTRR & 31))
+# define DISABLE_CYRIX_ARR (1<<(X86_FEATURE_CYRIX_ARR & 31))
+# define DISABLE_CENTAUR_MCR (1<<(X86_FEATURE_CENTAUR_MCR & 31))
+#else
+# define DISABLE_VME 0
+# define DISABLE_K6_MTRR 0
+# define DISABLE_CYRIX_ARR 0
+# define DISABLE_CENTAUR_MCR 0
+#endif /* CONFIG_X86_64 */
+
+/*
+ * Make sure to add features to the correct mask
+ */
+#define DISABLED_MASK0 (DISABLE_VME)
+#define DISABLED_MASK1 0
+#define DISABLED_MASK2 0
+#define DISABLED_MASK3 (DISABLE_CYRIX_ARR|DISABLE_CENTAUR_MCR|DISABLE_K6_MTRR)
+#define DISABLED_MASK4 0
+#define DISABLED_MASK5 0
+#define DISABLED_MASK6 0
+#define DISABLED_MASK7 0
+#define DISABLED_MASK8 0
+#define DISABLED_MASK9 (DISABLE_MPX)
+
+#endif /* _ASM_X86_DISABLED_FEATURES_H */
diff --git a/kernel/arch/x86/include/asm/div64.h b/kernel/arch/x86/include/asm/div64.h
new file mode 100644
index 000000000..ced283ac7
--- /dev/null
+++ b/kernel/arch/x86/include/asm/div64.h
@@ -0,0 +1,66 @@
+#ifndef _ASM_X86_DIV64_H
+#define _ASM_X86_DIV64_H
+
+#ifdef CONFIG_X86_32
+
+#include <linux/types.h>
+#include <linux/log2.h>
+
+/*
+ * do_div() is NOT a C function. It wants to return
+ * two values (the quotient and the remainder), but
+ * since that doesn't work very well in C, what it
+ * does is:
+ *
+ * - modifies the 64-bit dividend _in_place_
+ * - returns the 32-bit remainder
+ *
+ * This ends up being the most efficient "calling
+ * convention" on x86.
+ */
+#define do_div(n, base) \
+({ \
+ unsigned long __upper, __low, __high, __mod, __base; \
+ __base = (base); \
+ if (__builtin_constant_p(__base) && is_power_of_2(__base)) { \
+ __mod = n & (__base - 1); \
+ n >>= ilog2(__base); \
+ } else { \
+ asm("" : "=a" (__low), "=d" (__high) : "A" (n));\
+ __upper = __high; \
+ if (__high) { \
+ __upper = __high % (__base); \
+ __high = __high / (__base); \
+ } \
+ asm("divl %2" : "=a" (__low), "=d" (__mod) \
+ : "rm" (__base), "0" (__low), "1" (__upper)); \
+ asm("" : "=A" (n) : "a" (__low), "d" (__high)); \
+ } \
+ __mod; \
+})
+
+static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
+{
+ union {
+ u64 v64;
+ u32 v32[2];
+ } d = { dividend };
+ u32 upper;
+
+ upper = d.v32[1];
+ d.v32[1] = 0;
+ if (upper >= divisor) {
+ d.v32[1] = upper / divisor;
+ upper %= divisor;
+ }
+ asm ("divl %2" : "=a" (d.v32[0]), "=d" (*remainder) :
+ "rm" (divisor), "0" (d.v32[0]), "1" (upper));
+ return d.v64;
+}
+#define div_u64_rem div_u64_rem
+
+#else
+# include <asm-generic/div64.h>
+#endif /* CONFIG_X86_32 */
+
+#endif /* _ASM_X86_DIV64_H */
diff --git a/kernel/arch/x86/include/asm/dma-mapping.h b/kernel/arch/x86/include/asm/dma-mapping.h
new file mode 100644
index 000000000..808dae63e
--- /dev/null
+++ b/kernel/arch/x86/include/asm/dma-mapping.h
@@ -0,0 +1,176 @@
+#ifndef _ASM_X86_DMA_MAPPING_H
+#define _ASM_X86_DMA_MAPPING_H
+
+/*
+ * IOMMU interface. See Documentation/DMA-API-HOWTO.txt and
+ * Documentation/DMA-API.txt for documentation.
+ */
+
+#include <linux/kmemcheck.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-debug.h>
+#include <linux/dma-attrs.h>
+#include <asm/io.h>
+#include <asm/swiotlb.h>
+#include <asm-generic/dma-coherent.h>
+#include <linux/dma-contiguous.h>
+
+#ifdef CONFIG_ISA
+# define ISA_DMA_BIT_MASK DMA_BIT_MASK(24)
+#else
+# define ISA_DMA_BIT_MASK DMA_BIT_MASK(32)
+#endif
+
+#define DMA_ERROR_CODE 0
+
+extern int iommu_merge;
+extern struct device x86_dma_fallback_dev;
+extern int panic_on_overflow;
+
+extern struct dma_map_ops *dma_ops;
+
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+{
+#ifndef CONFIG_X86_DEV_DMA_OPS
+ return dma_ops;
+#else
+ if (unlikely(!dev) || !dev->archdata.dma_ops)
+ return dma_ops;
+ else
+ return dev->archdata.dma_ops;
+#endif
+}
+
+#include <asm-generic/dma-mapping-common.h>
+
+/* Make sure we keep the same behaviour */
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ debug_dma_mapping_error(dev, dma_addr);
+ if (ops->mapping_error)
+ return ops->mapping_error(dev, dma_addr);
+
+ return (dma_addr == DMA_ERROR_CODE);
+}
+
+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+
+extern int dma_supported(struct device *hwdev, u64 mask);
+extern int dma_set_mask(struct device *dev, u64 mask);
+
+extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_addr, gfp_t flag,
+ struct dma_attrs *attrs);
+
+extern void dma_generic_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_addr,
+ struct dma_attrs *attrs);
+
+#ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */
+extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
+extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
+extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
+#else
+
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+{
+ if (!dev->dma_mask)
+ return 0;
+
+ return addr + size - 1 <= *dev->dma_mask;
+}
+
+static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ return paddr;
+}
+
+static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+ return daddr;
+}
+#endif /* CONFIG_X86_DMA_REMAP */
+
+static inline void
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction dir)
+{
+ flush_write_buffers();
+}
+
+static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
+ gfp_t gfp)
+{
+ unsigned long dma_mask = 0;
+
+ dma_mask = dev->coherent_dma_mask;
+ if (!dma_mask)
+ dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
+
+ return dma_mask;
+}
+
+static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
+{
+ unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
+
+ if (dma_mask <= DMA_BIT_MASK(24))
+ gfp |= GFP_DMA;
+#ifdef CONFIG_X86_64
+ if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
+ gfp |= GFP_DMA32;
+#endif
+ return gfp;
+}
+
+#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
+
+static inline void *
+dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ void *memory;
+
+ gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
+
+ if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
+ return memory;
+
+ if (!dev)
+ dev = &x86_dma_fallback_dev;
+
+ if (!is_device_dma_capable(dev))
+ return NULL;
+
+ if (!ops->alloc)
+ return NULL;
+
+ memory = ops->alloc(dev, size, dma_handle,
+ dma_alloc_coherent_gfp_flags(dev, gfp), attrs);
+ debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
+
+ return memory;
+}
+
+#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t bus,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ WARN_ON(irqs_disabled()); /* for portability */
+
+ if (dma_release_from_coherent(dev, get_order(size), vaddr))
+ return;
+
+ debug_dma_free_coherent(dev, size, vaddr, bus);
+ if (ops->free)
+ ops->free(dev, size, vaddr, bus, attrs);
+}
+
+#endif
diff --git a/kernel/arch/x86/include/asm/dma.h b/kernel/arch/x86/include/asm/dma.h
new file mode 100644
index 000000000..fe884e18f
--- /dev/null
+++ b/kernel/arch/x86/include/asm/dma.h
@@ -0,0 +1,317 @@
+/*
+ * linux/include/asm/dma.h: Defines for using and allocating dma channels.
+ * Written by Hennus Bergman, 1992.
+ * High DMA channel support & info by Hannu Savolainen
+ * and John Boyd, Nov. 1992.
+ */
+
+#ifndef _ASM_X86_DMA_H
+#define _ASM_X86_DMA_H
+
+#include <linux/spinlock.h> /* And spinlocks */
+#include <asm/io.h> /* need byte IO */
+
+#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
+#define dma_outb outb_p
+#else
+#define dma_outb outb
+#endif
+
+#define dma_inb inb
+
+/*
+ * NOTES about DMA transfers:
+ *
+ * controller 1: channels 0-3, byte operations, ports 00-1F
+ * controller 2: channels 4-7, word operations, ports C0-DF
+ *
+ * - ALL registers are 8 bits only, regardless of transfer size
+ * - channel 4 is not used - cascades 1 into 2.
+ * - channels 0-3 are byte - addresses/counts are for physical bytes
+ * - channels 5-7 are word - addresses/counts are for physical words
+ * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries
+ * - transfer count loaded to registers is 1 less than actual count
+ * - controller 2 offsets are all even (2x offsets for controller 1)
+ * - page registers for 5-7 don't use data bit 0, represent 128K pages
+ * - page registers for 0-3 use bit 0, represent 64K pages
+ *
+ * DMA transfers are limited to the lower 16MB of _physical_ memory.
+ * Note that addresses loaded into registers must be _physical_ addresses,
+ * not logical addresses (which may differ if paging is active).
+ *
+ * Address mapping for channels 0-3:
+ *
+ * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses)
+ * | ... | | ... | | ... |
+ * | ... | | ... | | ... |
+ * | ... | | ... | | ... |
+ * P7 ... P0 A7 ... A0 A7 ... A0
+ * | Page | Addr MSB | Addr LSB | (DMA registers)
+ *
+ * Address mapping for channels 5-7:
+ *
+ * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses)
+ * | ... | \ \ ... \ \ \ ... \ \
+ * | ... | \ \ ... \ \ \ ... \ (not used)
+ * | ... | \ \ ... \ \ \ ... \
+ * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0
+ * | Page | Addr MSB | Addr LSB | (DMA registers)
+ *
+ * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
+ * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
+ * the hardware level, so odd-byte transfers aren't possible).
+ *
+ * Transfer count (_not # bytes_) is limited to 64K, represented as actual
+ * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more,
+ * and up to 128K bytes may be transferred on channels 5-7 in one operation.
+ *
+ */
+
+#define MAX_DMA_CHANNELS 8
+
+/* 16MB ISA DMA zone */
+#define MAX_DMA_PFN ((16UL * 1024 * 1024) >> PAGE_SHIFT)
+
+/* 4GB broken PCI/AGP hardware bus master zone */
+#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT)
+
+#ifdef CONFIG_X86_32
+/* The maximum address that we can perform a DMA transfer to on this platform */
+#define MAX_DMA_ADDRESS (PAGE_OFFSET + 0x1000000)
+#else
+/* Compat define for old dma zone */
+#define MAX_DMA_ADDRESS ((unsigned long)__va(MAX_DMA_PFN << PAGE_SHIFT))
+#endif
+
+/* 8237 DMA controllers */
+#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
+#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */
+
+/* DMA controller registers */
+#define DMA1_CMD_REG 0x08 /* command register (w) */
+#define DMA1_STAT_REG 0x08 /* status register (r) */
+#define DMA1_REQ_REG 0x09 /* request register (w) */
+#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */
+#define DMA1_MODE_REG 0x0B /* mode register (w) */
+#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */
+#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */
+#define DMA1_RESET_REG 0x0D /* Master Clear (w) */
+#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */
+#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */
+
+#define DMA2_CMD_REG 0xD0 /* command register (w) */
+#define DMA2_STAT_REG 0xD0 /* status register (r) */
+#define DMA2_REQ_REG 0xD2 /* request register (w) */
+#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */
+#define DMA2_MODE_REG 0xD6 /* mode register (w) */
+#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */
+#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */
+#define DMA2_RESET_REG 0xDA /* Master Clear (w) */
+#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */
+#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */
+
+#define DMA_ADDR_0 0x00 /* DMA address registers */
+#define DMA_ADDR_1 0x02
+#define DMA_ADDR_2 0x04
+#define DMA_ADDR_3 0x06
+#define DMA_ADDR_4 0xC0
+#define DMA_ADDR_5 0xC4
+#define DMA_ADDR_6 0xC8
+#define DMA_ADDR_7 0xCC
+
+#define DMA_CNT_0 0x01 /* DMA count registers */
+#define DMA_CNT_1 0x03
+#define DMA_CNT_2 0x05
+#define DMA_CNT_3 0x07
+#define DMA_CNT_4 0xC2
+#define DMA_CNT_5 0xC6
+#define DMA_CNT_6 0xCA
+#define DMA_CNT_7 0xCE
+
+#define DMA_PAGE_0 0x87 /* DMA page registers */
+#define DMA_PAGE_1 0x83
+#define DMA_PAGE_2 0x81
+#define DMA_PAGE_3 0x82
+#define DMA_PAGE_5 0x8B
+#define DMA_PAGE_6 0x89
+#define DMA_PAGE_7 0x8A
+
+/* I/O to memory, no autoinit, increment, single mode */
+#define DMA_MODE_READ 0x44
+/* memory to I/O, no autoinit, increment, single mode */
+#define DMA_MODE_WRITE 0x48
+/* pass thru DREQ->HRQ, DACK<-HLDA only */
+#define DMA_MODE_CASCADE 0xC0
+
+#define DMA_AUTOINIT 0x10
+
+
+#ifdef CONFIG_ISA_DMA_API
+extern spinlock_t dma_spin_lock;
+
+static inline unsigned long claim_dma_lock(void)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&dma_spin_lock, flags);
+ return flags;
+}
+
+static inline void release_dma_lock(unsigned long flags)
+{
+ spin_unlock_irqrestore(&dma_spin_lock, flags);
+}
+#endif /* CONFIG_ISA_DMA_API */
+
+/* enable/disable a specific DMA channel */
+static inline void enable_dma(unsigned int dmanr)
+{
+ if (dmanr <= 3)
+ dma_outb(dmanr, DMA1_MASK_REG);
+ else
+ dma_outb(dmanr & 3, DMA2_MASK_REG);
+}
+
+static inline void disable_dma(unsigned int dmanr)
+{
+ if (dmanr <= 3)
+ dma_outb(dmanr | 4, DMA1_MASK_REG);
+ else
+ dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
+}
+
+/* Clear the 'DMA Pointer Flip Flop'.
+ * Write 0 for LSB/MSB, 1 for MSB/LSB access.
+ * Use this once to initialize the FF to a known state.
+ * After that, keep track of it. :-)
+ * --- In order to do that, the DMA routines below should ---
+ * --- only be used while holding the DMA lock ! ---
+ */
+static inline void clear_dma_ff(unsigned int dmanr)
+{
+ if (dmanr <= 3)
+ dma_outb(0, DMA1_CLEAR_FF_REG);
+ else
+ dma_outb(0, DMA2_CLEAR_FF_REG);
+}
+
+/* set mode (above) for a specific DMA channel */
+static inline void set_dma_mode(unsigned int dmanr, char mode)
+{
+ if (dmanr <= 3)
+ dma_outb(mode | dmanr, DMA1_MODE_REG);
+ else
+ dma_outb(mode | (dmanr & 3), DMA2_MODE_REG);
+}
+
+/* Set only the page register bits of the transfer address.
+ * This is used for successive transfers when we know the contents of
+ * the lower 16 bits of the DMA current address register, but a 64k boundary
+ * may have been crossed.
+ */
+static inline void set_dma_page(unsigned int dmanr, char pagenr)
+{
+ switch (dmanr) {
+ case 0:
+ dma_outb(pagenr, DMA_PAGE_0);
+ break;
+ case 1:
+ dma_outb(pagenr, DMA_PAGE_1);
+ break;
+ case 2:
+ dma_outb(pagenr, DMA_PAGE_2);
+ break;
+ case 3:
+ dma_outb(pagenr, DMA_PAGE_3);
+ break;
+ case 5:
+ dma_outb(pagenr & 0xfe, DMA_PAGE_5);
+ break;
+ case 6:
+ dma_outb(pagenr & 0xfe, DMA_PAGE_6);
+ break;
+ case 7:
+ dma_outb(pagenr & 0xfe, DMA_PAGE_7);
+ break;
+ }
+}
+
+
+/* Set transfer address & page bits for specific DMA channel.
+ * Assumes dma flipflop is clear.
+ */
+static inline void set_dma_addr(unsigned int dmanr, unsigned int a)
+{
+ set_dma_page(dmanr, a>>16);
+ if (dmanr <= 3) {
+ dma_outb(a & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE);
+ dma_outb((a >> 8) & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE);
+ } else {
+ dma_outb((a >> 1) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE);
+ dma_outb((a >> 9) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE);
+ }
+}
+
+
+/* Set transfer size (max 64k for DMA0..3, 128k for DMA5..7) for
+ * a specific DMA channel.
+ * You must ensure the parameters are valid.
+ * NOTE: from a manual: "the number of transfers is one more
+ * than the initial word count"! This is taken into account.
+ * Assumes dma flip-flop is clear.
+ * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
+ */
+static inline void set_dma_count(unsigned int dmanr, unsigned int count)
+{
+ count--;
+ if (dmanr <= 3) {
+ dma_outb(count & 0xff, ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
+ dma_outb((count >> 8) & 0xff,
+ ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
+ } else {
+ dma_outb((count >> 1) & 0xff,
+ ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
+ dma_outb((count >> 9) & 0xff,
+ ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
+ }
+}
+
+
+/* Get DMA residue count. After a DMA transfer, this
+ * should return zero. Reading this while a DMA transfer is
+ * still in progress will return unpredictable results.
+ * If called before the channel has been used, it may return 1.
+ * Otherwise, it returns the number of _bytes_ left to transfer.
+ *
+ * Assumes DMA flip-flop is clear.
+ */
+static inline int get_dma_residue(unsigned int dmanr)
+{
+ unsigned int io_port;
+ /* using short to get 16-bit wrap around */
+ unsigned short count;
+
+ io_port = (dmanr <= 3) ? ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE
+ : ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE;
+
+ count = 1 + dma_inb(io_port);
+ count += dma_inb(io_port) << 8;
+
+ return (dmanr <= 3) ? count : (count << 1);
+}
+
+
+/* These are in kernel/dma.c because x86 uses CONFIG_GENERIC_ISA_DMA */
+#ifdef CONFIG_ISA_DMA_API
+extern int request_dma(unsigned int dmanr, const char *device_id);
+extern void free_dma(unsigned int dmanr);
+#endif
+
+/* From PCI */
+
+#ifdef CONFIG_PCI
+extern int isa_dma_bridge_buggy;
+#else
+#define isa_dma_bridge_buggy (0)
+#endif
+
+#endif /* _ASM_X86_DMA_H */
diff --git a/kernel/arch/x86/include/asm/dmi.h b/kernel/arch/x86/include/asm/dmi.h
new file mode 100644
index 000000000..535192f6b
--- /dev/null
+++ b/kernel/arch/x86/include/asm/dmi.h
@@ -0,0 +1,21 @@
+#ifndef _ASM_X86_DMI_H
+#define _ASM_X86_DMI_H
+
+#include <linux/compiler.h>
+#include <linux/init.h>
+
+#include <asm/io.h>
+#include <asm/setup.h>
+
+static __always_inline __init void *dmi_alloc(unsigned len)
+{
+ return extend_brk(len, sizeof(int));
+}
+
+/* Use early IO mappings for DMI because it's initialized early */
+#define dmi_early_remap early_ioremap
+#define dmi_early_unmap early_iounmap
+#define dmi_remap ioremap
+#define dmi_unmap iounmap
+
+#endif /* _ASM_X86_DMI_H */
diff --git a/kernel/arch/x86/include/asm/dwarf2.h b/kernel/arch/x86/include/asm/dwarf2.h
new file mode 100644
index 000000000..de1cdaf4d
--- /dev/null
+++ b/kernel/arch/x86/include/asm/dwarf2.h
@@ -0,0 +1,170 @@
+#ifndef _ASM_X86_DWARF2_H
+#define _ASM_X86_DWARF2_H
+
+#ifndef __ASSEMBLY__
+#warning "asm/dwarf2.h should be only included in pure assembly files"
+#endif
+
+/*
+ * Macros for dwarf2 CFI unwind table entries.
+ * See "as.info" for details on these pseudo ops. Unfortunately
+ * they are only supported in very new binutils, so define them
+ * away for older version.
+ */
+
+#ifdef CONFIG_AS_CFI
+
+#define CFI_STARTPROC .cfi_startproc
+#define CFI_ENDPROC .cfi_endproc
+#define CFI_DEF_CFA .cfi_def_cfa
+#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
+#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
+#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
+#define CFI_OFFSET .cfi_offset
+#define CFI_REL_OFFSET .cfi_rel_offset
+#define CFI_REGISTER .cfi_register
+#define CFI_RESTORE .cfi_restore
+#define CFI_REMEMBER_STATE .cfi_remember_state
+#define CFI_RESTORE_STATE .cfi_restore_state
+#define CFI_UNDEFINED .cfi_undefined
+#define CFI_ESCAPE .cfi_escape
+
+#ifdef CONFIG_AS_CFI_SIGNAL_FRAME
+#define CFI_SIGNAL_FRAME .cfi_signal_frame
+#else
+#define CFI_SIGNAL_FRAME
+#endif
+
+#if defined(CONFIG_AS_CFI_SECTIONS) && defined(__ASSEMBLY__)
+ /*
+ * Emit CFI data in .debug_frame sections, not .eh_frame sections.
+ * The latter we currently just discard since we don't do DWARF
+ * unwinding at runtime. So only the offline DWARF information is
+ * useful to anyone. Note we should not use this directive if this
+ * file is used in the vDSO assembly, or if vmlinux.lds.S gets
+ * changed so it doesn't discard .eh_frame.
+ */
+ .cfi_sections .debug_frame
+#endif
+
+#else
+
+/*
+ * Due to the structure of pre-exisiting code, don't use assembler line
+ * comment character # to ignore the arguments. Instead, use a dummy macro.
+ */
+.macro cfi_ignore a=0, b=0, c=0, d=0
+.endm
+
+#define CFI_STARTPROC cfi_ignore
+#define CFI_ENDPROC cfi_ignore
+#define CFI_DEF_CFA cfi_ignore
+#define CFI_DEF_CFA_REGISTER cfi_ignore
+#define CFI_DEF_CFA_OFFSET cfi_ignore
+#define CFI_ADJUST_CFA_OFFSET cfi_ignore
+#define CFI_OFFSET cfi_ignore
+#define CFI_REL_OFFSET cfi_ignore
+#define CFI_REGISTER cfi_ignore
+#define CFI_RESTORE cfi_ignore
+#define CFI_REMEMBER_STATE cfi_ignore
+#define CFI_RESTORE_STATE cfi_ignore
+#define CFI_UNDEFINED cfi_ignore
+#define CFI_ESCAPE cfi_ignore
+#define CFI_SIGNAL_FRAME cfi_ignore
+
+#endif
+
+/*
+ * An attempt to make CFI annotations more or less
+ * correct and shorter. It is implied that you know
+ * what you're doing if you use them.
+ */
+#ifdef __ASSEMBLY__
+#ifdef CONFIG_X86_64
+ .macro pushq_cfi reg
+ pushq \reg
+ CFI_ADJUST_CFA_OFFSET 8
+ .endm
+
+ .macro pushq_cfi_reg reg
+ pushq %\reg
+ CFI_ADJUST_CFA_OFFSET 8
+ CFI_REL_OFFSET \reg, 0
+ .endm
+
+ .macro popq_cfi reg
+ popq \reg
+ CFI_ADJUST_CFA_OFFSET -8
+ .endm
+
+ .macro popq_cfi_reg reg
+ popq %\reg
+ CFI_ADJUST_CFA_OFFSET -8
+ CFI_RESTORE \reg
+ .endm
+
+ .macro pushfq_cfi
+ pushfq
+ CFI_ADJUST_CFA_OFFSET 8
+ .endm
+
+ .macro popfq_cfi
+ popfq
+ CFI_ADJUST_CFA_OFFSET -8
+ .endm
+
+ .macro movq_cfi reg offset=0
+ movq %\reg, \offset(%rsp)
+ CFI_REL_OFFSET \reg, \offset
+ .endm
+
+ .macro movq_cfi_restore offset reg
+ movq \offset(%rsp), %\reg
+ CFI_RESTORE \reg
+ .endm
+#else /*!CONFIG_X86_64*/
+ .macro pushl_cfi reg
+ pushl \reg
+ CFI_ADJUST_CFA_OFFSET 4
+ .endm
+
+ .macro pushl_cfi_reg reg
+ pushl %\reg
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET \reg, 0
+ .endm
+
+ .macro popl_cfi reg
+ popl \reg
+ CFI_ADJUST_CFA_OFFSET -4
+ .endm
+
+ .macro popl_cfi_reg reg
+ popl %\reg
+ CFI_ADJUST_CFA_OFFSET -4
+ CFI_RESTORE \reg
+ .endm
+
+ .macro pushfl_cfi
+ pushfl
+ CFI_ADJUST_CFA_OFFSET 4
+ .endm
+
+ .macro popfl_cfi
+ popfl
+ CFI_ADJUST_CFA_OFFSET -4
+ .endm
+
+ .macro movl_cfi reg offset=0
+ movl %\reg, \offset(%esp)
+ CFI_REL_OFFSET \reg, \offset
+ .endm
+
+ .macro movl_cfi_restore offset reg
+ movl \offset(%esp), %\reg
+ CFI_RESTORE \reg
+ .endm
+#endif /*!CONFIG_X86_64*/
+#endif /*__ASSEMBLY__*/
+
+#endif /* _ASM_X86_DWARF2_H */
diff --git a/kernel/arch/x86/include/asm/e820.h b/kernel/arch/x86/include/asm/e820.h
new file mode 100644
index 000000000..3ab053787
--- /dev/null
+++ b/kernel/arch/x86/include/asm/e820.h
@@ -0,0 +1,69 @@
+#ifndef _ASM_X86_E820_H
+#define _ASM_X86_E820_H
+
+#ifdef CONFIG_EFI
+#include <linux/numa.h>
+#define E820_X_MAX (E820MAX + 3 * MAX_NUMNODES)
+#else /* ! CONFIG_EFI */
+#define E820_X_MAX E820MAX
+#endif
+#include <uapi/asm/e820.h>
+#ifndef __ASSEMBLY__
+/* see comment in arch/x86/kernel/e820.c */
+extern struct e820map e820;
+extern struct e820map e820_saved;
+
+extern unsigned long pci_mem_start;
+extern int e820_any_mapped(u64 start, u64 end, unsigned type);
+extern int e820_all_mapped(u64 start, u64 end, unsigned type);
+extern void e820_add_region(u64 start, u64 size, int type);
+extern void e820_print_map(char *who);
+extern int
+sanitize_e820_map(struct e820entry *biosmap, int max_nr_map, u32 *pnr_map);
+extern u64 e820_update_range(u64 start, u64 size, unsigned old_type,
+ unsigned new_type);
+extern u64 e820_remove_range(u64 start, u64 size, unsigned old_type,
+ int checktype);
+extern void update_e820(void);
+extern void e820_setup_gap(void);
+extern int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize,
+ unsigned long start_addr, unsigned long long end_addr);
+struct setup_data;
+extern void parse_e820_ext(u64 phys_addr, u32 data_len);
+
+#if defined(CONFIG_X86_64) || \
+ (defined(CONFIG_X86_32) && defined(CONFIG_HIBERNATION))
+extern void e820_mark_nosave_regions(unsigned long limit_pfn);
+#else
+static inline void e820_mark_nosave_regions(unsigned long limit_pfn)
+{
+}
+#endif
+
+extern unsigned long e820_end_of_ram_pfn(void);
+extern unsigned long e820_end_of_low_ram_pfn(void);
+extern u64 early_reserve_e820(u64 sizet, u64 align);
+
+void memblock_x86_fill(void);
+void memblock_find_dma_reserve(void);
+
+extern void finish_e820_parsing(void);
+extern void e820_reserve_resources(void);
+extern void e820_reserve_resources_late(void);
+extern void setup_memory_map(void);
+extern char *default_machine_specific_memory_setup(void);
+
+/*
+ * Returns true iff the specified range [s,e) is completely contained inside
+ * the ISA region.
+ */
+static inline bool is_ISA_range(u64 s, u64 e)
+{
+ return s >= ISA_START_ADDRESS && e <= ISA_END_ADDRESS;
+}
+
+#endif /* __ASSEMBLY__ */
+#include <linux/ioport.h>
+
+#define HIGH_MEMORY (1024*1024)
+#endif /* _ASM_X86_E820_H */
diff --git a/kernel/arch/x86/include/asm/edac.h b/kernel/arch/x86/include/asm/edac.h
new file mode 100644
index 000000000..e9b57ecc7
--- /dev/null
+++ b/kernel/arch/x86/include/asm/edac.h
@@ -0,0 +1,18 @@
+#ifndef _ASM_X86_EDAC_H
+#define _ASM_X86_EDAC_H
+
+/* ECC atomic, DMA, SMP and interrupt safe scrub function */
+
+static inline void atomic_scrub(void *va, u32 size)
+{
+ u32 i, *virt_addr = va;
+
+ /*
+ * Very carefully read and write to memory atomically so we
+ * are interrupt, DMA and SMP safe.
+ */
+ for (i = 0; i < size / 4; i++, virt_addr++)
+ asm volatile("lock; addl $0, %0"::"m" (*virt_addr));
+}
+
+#endif /* _ASM_X86_EDAC_H */
diff --git a/kernel/arch/x86/include/asm/efi.h b/kernel/arch/x86/include/asm/efi.h
new file mode 100644
index 000000000..3738b138b
--- /dev/null
+++ b/kernel/arch/x86/include/asm/efi.h
@@ -0,0 +1,197 @@
+#ifndef _ASM_X86_EFI_H
+#define _ASM_X86_EFI_H
+
+#include <asm/i387.h>
+#include <asm/pgtable.h>
+
+/*
+ * We map the EFI regions needed for runtime services non-contiguously,
+ * with preserved alignment on virtual addresses starting from -4G down
+ * for a total max space of 64G. This way, we provide for stable runtime
+ * services addresses across kernels so that a kexec'd kernel can still
+ * use them.
+ *
+ * This is the main reason why we're doing stable VA mappings for RT
+ * services.
+ *
+ * This flag is used in conjuction with a chicken bit called
+ * "efi=old_map" which can be used as a fallback to the old runtime
+ * services mapping method in case there's some b0rkage with a
+ * particular EFI implementation (haha, it is hard to hold up the
+ * sarcasm here...).
+ */
+#define EFI_OLD_MEMMAP EFI_ARCH_1
+
+#define EFI32_LOADER_SIGNATURE "EL32"
+#define EFI64_LOADER_SIGNATURE "EL64"
+
+#ifdef CONFIG_X86_32
+
+
+extern unsigned long asmlinkage efi_call_phys(void *, ...);
+
+/*
+ * Wrap all the virtual calls in a way that forces the parameters on the stack.
+ */
+
+/* Use this macro if your virtual returns a non-void value */
+#define efi_call_virt(f, args...) \
+({ \
+ efi_status_t __s; \
+ kernel_fpu_begin(); \
+ __s = ((efi_##f##_t __attribute__((regparm(0)))*) \
+ efi.systab->runtime->f)(args); \
+ kernel_fpu_end(); \
+ __s; \
+})
+
+/* Use this macro if your virtual call does not return any value */
+#define __efi_call_virt(f, args...) \
+({ \
+ kernel_fpu_begin(); \
+ ((efi_##f##_t __attribute__((regparm(0)))*) \
+ efi.systab->runtime->f)(args); \
+ kernel_fpu_end(); \
+})
+
+#define efi_ioremap(addr, size, type, attr) ioremap_cache(addr, size)
+
+#else /* !CONFIG_X86_32 */
+
+#define EFI_LOADER_SIGNATURE "EL64"
+
+extern u64 asmlinkage efi_call(void *fp, ...);
+
+#define efi_call_phys(f, args...) efi_call((f), args)
+
+#define efi_call_virt(f, ...) \
+({ \
+ efi_status_t __s; \
+ \
+ efi_sync_low_kernel_mappings(); \
+ preempt_disable(); \
+ __kernel_fpu_begin(); \
+ __s = efi_call((void *)efi.systab->runtime->f, __VA_ARGS__); \
+ __kernel_fpu_end(); \
+ preempt_enable(); \
+ __s; \
+})
+
+/*
+ * All X86_64 virt calls return non-void values. Thus, use non-void call for
+ * virt calls that would be void on X86_32.
+ */
+#define __efi_call_virt(f, args...) efi_call_virt(f, args)
+
+extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
+ u32 type, u64 attribute);
+
+#endif /* CONFIG_X86_32 */
+
+extern struct efi_scratch efi_scratch;
+extern void __init efi_set_executable(efi_memory_desc_t *md, bool executable);
+extern int __init efi_memblock_x86_reserve_range(void);
+extern pgd_t * __init efi_call_phys_prolog(void);
+extern void __init efi_call_phys_epilog(pgd_t *save_pgd);
+extern void __init efi_unmap_memmap(void);
+extern void __init efi_memory_uc(u64 addr, unsigned long size);
+extern void __init efi_map_region(efi_memory_desc_t *md);
+extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
+extern void efi_sync_low_kernel_mappings(void);
+extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages);
+extern void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages);
+extern void __init old_map_region(efi_memory_desc_t *md);
+extern void __init runtime_code_page_mkexec(void);
+extern void __init efi_runtime_mkexec(void);
+extern void __init efi_dump_pagetable(void);
+extern void __init efi_apply_memmap_quirks(void);
+extern int __init efi_reuse_config(u64 tables, int nr_tables);
+extern void efi_delete_dummy_variable(void);
+
+struct efi_setup_data {
+ u64 fw_vendor;
+ u64 runtime;
+ u64 tables;
+ u64 smbios;
+ u64 reserved[8];
+};
+
+extern u64 efi_setup;
+
+#ifdef CONFIG_EFI
+
+static inline bool efi_is_native(void)
+{
+ return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT);
+}
+
+static inline bool efi_runtime_supported(void)
+{
+ if (efi_is_native())
+ return true;
+
+ if (IS_ENABLED(CONFIG_EFI_MIXED) && !efi_enabled(EFI_OLD_MEMMAP))
+ return true;
+
+ return false;
+}
+
+extern struct console early_efi_console;
+extern void parse_efi_setup(u64 phys_addr, u32 data_len);
+
+#ifdef CONFIG_EFI_MIXED
+extern void efi_thunk_runtime_setup(void);
+extern efi_status_t efi_thunk_set_virtual_address_map(
+ void *phys_set_virtual_address_map,
+ unsigned long memory_map_size,
+ unsigned long descriptor_size,
+ u32 descriptor_version,
+ efi_memory_desc_t *virtual_map);
+#else
+static inline void efi_thunk_runtime_setup(void) {}
+static inline efi_status_t efi_thunk_set_virtual_address_map(
+ void *phys_set_virtual_address_map,
+ unsigned long memory_map_size,
+ unsigned long descriptor_size,
+ u32 descriptor_version,
+ efi_memory_desc_t *virtual_map)
+{
+ return EFI_SUCCESS;
+}
+#endif /* CONFIG_EFI_MIXED */
+
+
+/* arch specific definitions used by the stub code */
+
+struct efi_config {
+ u64 image_handle;
+ u64 table;
+ u64 allocate_pool;
+ u64 allocate_pages;
+ u64 get_memory_map;
+ u64 free_pool;
+ u64 free_pages;
+ u64 locate_handle;
+ u64 handle_protocol;
+ u64 exit_boot_services;
+ u64 text_output;
+ efi_status_t (*call)(unsigned long, ...);
+ bool is64;
+} __packed;
+
+__pure const struct efi_config *__efi_early(void);
+
+#define efi_call_early(f, ...) \
+ __efi_early()->call(__efi_early()->f, __VA_ARGS__);
+
+extern bool efi_reboot_required(void);
+
+#else
+static inline void parse_efi_setup(u64 phys_addr, u32 data_len) {}
+static inline bool efi_reboot_required(void)
+{
+ return false;
+}
+#endif /* CONFIG_EFI */
+
+#endif /* _ASM_X86_EFI_H */
diff --git a/kernel/arch/x86/include/asm/elf.h b/kernel/arch/x86/include/asm/elf.h
new file mode 100644
index 000000000..f161c189c
--- /dev/null
+++ b/kernel/arch/x86/include/asm/elf.h
@@ -0,0 +1,371 @@
+#ifndef _ASM_X86_ELF_H
+#define _ASM_X86_ELF_H
+
+/*
+ * ELF register definitions..
+ */
+#include <linux/thread_info.h>
+
+#include <asm/ptrace.h>
+#include <asm/user.h>
+#include <asm/auxvec.h>
+
+typedef unsigned long elf_greg_t;
+
+#define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t))
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+typedef struct user_i387_struct elf_fpregset_t;
+
+#ifdef __i386__
+
+typedef struct user_fxsr_struct elf_fpxregset_t;
+
+#define R_386_NONE 0
+#define R_386_32 1
+#define R_386_PC32 2
+#define R_386_GOT32 3
+#define R_386_PLT32 4
+#define R_386_COPY 5
+#define R_386_GLOB_DAT 6
+#define R_386_JMP_SLOT 7
+#define R_386_RELATIVE 8
+#define R_386_GOTOFF 9
+#define R_386_GOTPC 10
+#define R_386_NUM 11
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS ELFCLASS32
+#define ELF_DATA ELFDATA2LSB
+#define ELF_ARCH EM_386
+
+#else
+
+/* x86-64 relocation types */
+#define R_X86_64_NONE 0 /* No reloc */
+#define R_X86_64_64 1 /* Direct 64 bit */
+#define R_X86_64_PC32 2 /* PC relative 32 bit signed */
+#define R_X86_64_GOT32 3 /* 32 bit GOT entry */
+#define R_X86_64_PLT32 4 /* 32 bit PLT address */
+#define R_X86_64_COPY 5 /* Copy symbol at runtime */
+#define R_X86_64_GLOB_DAT 6 /* Create GOT entry */
+#define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */
+#define R_X86_64_RELATIVE 8 /* Adjust by program base */
+#define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative
+ offset to GOT */
+#define R_X86_64_32 10 /* Direct 32 bit zero extended */
+#define R_X86_64_32S 11 /* Direct 32 bit sign extended */
+#define R_X86_64_16 12 /* Direct 16 bit zero extended */
+#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */
+#define R_X86_64_8 14 /* Direct 8 bit sign extended */
+#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */
+
+#define R_X86_64_NUM 16
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS ELFCLASS64
+#define ELF_DATA ELFDATA2LSB
+#define ELF_ARCH EM_X86_64
+
+#endif
+
+#include <asm/vdso.h>
+
+#ifdef CONFIG_X86_64
+extern unsigned int vdso64_enabled;
+#endif
+#if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
+extern unsigned int vdso32_enabled;
+#endif
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch_ia32(x) \
+ (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486))
+
+#include <asm/processor.h>
+
+#ifdef CONFIG_X86_32
+#include <asm/desc.h>
+
+#define elf_check_arch(x) elf_check_arch_ia32(x)
+
+/* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx
+ contains a pointer to a function which might be registered using `atexit'.
+ This provides a mean for the dynamic linker to call DT_FINI functions for
+ shared libraries that have been loaded before the code runs.
+
+ A value of 0 tells we have no such handler.
+
+ We might as well make sure everything else is cleared too (except for %esp),
+ just to make things more deterministic.
+ */
+#define ELF_PLAT_INIT(_r, load_addr) \
+ do { \
+ _r->bx = 0; _r->cx = 0; _r->dx = 0; \
+ _r->si = 0; _r->di = 0; _r->bp = 0; \
+ _r->ax = 0; \
+} while (0)
+
+/*
+ * regs is struct pt_regs, pr_reg is elf_gregset_t (which is
+ * now struct_user_regs, they are different)
+ */
+
+#define ELF_CORE_COPY_REGS_COMMON(pr_reg, regs) \
+do { \
+ pr_reg[0] = regs->bx; \
+ pr_reg[1] = regs->cx; \
+ pr_reg[2] = regs->dx; \
+ pr_reg[3] = regs->si; \
+ pr_reg[4] = regs->di; \
+ pr_reg[5] = regs->bp; \
+ pr_reg[6] = regs->ax; \
+ pr_reg[7] = regs->ds & 0xffff; \
+ pr_reg[8] = regs->es & 0xffff; \
+ pr_reg[9] = regs->fs & 0xffff; \
+ pr_reg[11] = regs->orig_ax; \
+ pr_reg[12] = regs->ip; \
+ pr_reg[13] = regs->cs & 0xffff; \
+ pr_reg[14] = regs->flags; \
+ pr_reg[15] = regs->sp; \
+ pr_reg[16] = regs->ss & 0xffff; \
+} while (0);
+
+#define ELF_CORE_COPY_REGS(pr_reg, regs) \
+do { \
+ ELF_CORE_COPY_REGS_COMMON(pr_reg, regs);\
+ pr_reg[10] = get_user_gs(regs); \
+} while (0);
+
+#define ELF_CORE_COPY_KERNEL_REGS(pr_reg, regs) \
+do { \
+ ELF_CORE_COPY_REGS_COMMON(pr_reg, regs);\
+ savesegment(gs, pr_reg[10]); \
+} while (0);
+
+#define ELF_PLATFORM (utsname()->machine)
+#define set_personality_64bit() do { } while (0)
+
+#else /* CONFIG_X86_32 */
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch(x) \
+ ((x)->e_machine == EM_X86_64)
+
+#define compat_elf_check_arch(x) \
+ (elf_check_arch_ia32(x) || \
+ (IS_ENABLED(CONFIG_X86_X32_ABI) && (x)->e_machine == EM_X86_64))
+
+#if __USER32_DS != __USER_DS
+# error "The following code assumes __USER32_DS == __USER_DS"
+#endif
+
+static inline void elf_common_init(struct thread_struct *t,
+ struct pt_regs *regs, const u16 ds)
+{
+ /* Commented-out registers are cleared in stub_execve */
+ /*regs->ax = regs->bx =*/ regs->cx = regs->dx = 0;
+ regs->si = regs->di /*= regs->bp*/ = 0;
+ regs->r8 = regs->r9 = regs->r10 = regs->r11 = 0;
+ /*regs->r12 = regs->r13 = regs->r14 = regs->r15 = 0;*/
+ t->fs = t->gs = 0;
+ t->fsindex = t->gsindex = 0;
+ t->ds = t->es = ds;
+}
+
+#define ELF_PLAT_INIT(_r, load_addr) \
+ elf_common_init(&current->thread, _r, 0)
+
+#define COMPAT_ELF_PLAT_INIT(regs, load_addr) \
+ elf_common_init(&current->thread, regs, __USER_DS)
+
+void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp);
+#define compat_start_thread start_thread_ia32
+
+void set_personality_ia32(bool);
+#define COMPAT_SET_PERSONALITY(ex) \
+ set_personality_ia32((ex).e_machine == EM_X86_64)
+
+#define COMPAT_ELF_PLATFORM ("i686")
+
+/*
+ * regs is struct pt_regs, pr_reg is elf_gregset_t (which is
+ * now struct_user_regs, they are different). Assumes current is the process
+ * getting dumped.
+ */
+
+#define ELF_CORE_COPY_REGS(pr_reg, regs) \
+do { \
+ unsigned v; \
+ (pr_reg)[0] = (regs)->r15; \
+ (pr_reg)[1] = (regs)->r14; \
+ (pr_reg)[2] = (regs)->r13; \
+ (pr_reg)[3] = (regs)->r12; \
+ (pr_reg)[4] = (regs)->bp; \
+ (pr_reg)[5] = (regs)->bx; \
+ (pr_reg)[6] = (regs)->r11; \
+ (pr_reg)[7] = (regs)->r10; \
+ (pr_reg)[8] = (regs)->r9; \
+ (pr_reg)[9] = (regs)->r8; \
+ (pr_reg)[10] = (regs)->ax; \
+ (pr_reg)[11] = (regs)->cx; \
+ (pr_reg)[12] = (regs)->dx; \
+ (pr_reg)[13] = (regs)->si; \
+ (pr_reg)[14] = (regs)->di; \
+ (pr_reg)[15] = (regs)->orig_ax; \
+ (pr_reg)[16] = (regs)->ip; \
+ (pr_reg)[17] = (regs)->cs; \
+ (pr_reg)[18] = (regs)->flags; \
+ (pr_reg)[19] = (regs)->sp; \
+ (pr_reg)[20] = (regs)->ss; \
+ (pr_reg)[21] = current->thread.fs; \
+ (pr_reg)[22] = current->thread.gs; \
+ asm("movl %%ds,%0" : "=r" (v)); (pr_reg)[23] = v; \
+ asm("movl %%es,%0" : "=r" (v)); (pr_reg)[24] = v; \
+ asm("movl %%fs,%0" : "=r" (v)); (pr_reg)[25] = v; \
+ asm("movl %%gs,%0" : "=r" (v)); (pr_reg)[26] = v; \
+} while (0);
+
+/* I'm not sure if we can use '-' here */
+#define ELF_PLATFORM ("x86_64")
+extern void set_personality_64bit(void);
+extern unsigned int sysctl_vsyscall32;
+extern int force_personality32;
+
+#endif /* !CONFIG_X86_32 */
+
+#define CORE_DUMP_USE_REGSET
+#define ELF_EXEC_PAGESIZE 4096
+
+/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ use of this is to invoke "./ld.so someprog" to test out a new version of
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+
+/* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+ but it's not easy, and we've already done it here. */
+
+#define ELF_HWCAP (boot_cpu_data.x86_capability[0])
+
+/* This yields a string that ld.so will use to load implementation
+ specific libraries for optimization. This is more specific in
+ intent than poking at uname or /proc/cpuinfo.
+
+ For the moment, we have only optimizations for the Intel generations,
+ but that could change... */
+
+#define SET_PERSONALITY(ex) set_personality_64bit()
+
+/*
+ * An executable for which elf_read_implies_exec() returns TRUE will
+ * have the READ_IMPLIES_EXEC personality flag set automatically.
+ */
+#define elf_read_implies_exec(ex, executable_stack) \
+ (executable_stack != EXSTACK_DISABLE_X)
+
+struct task_struct;
+
+#define ARCH_DLINFO_IA32 \
+do { \
+ if (vdso32_enabled) { \
+ NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \
+ NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \
+ } \
+} while (0)
+
+#ifdef CONFIG_X86_32
+
+#define STACK_RND_MASK (0x7ff)
+
+#define ARCH_DLINFO ARCH_DLINFO_IA32
+
+/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
+
+#else /* CONFIG_X86_32 */
+
+/* 1GB for 64bit, 8MB for 32bit */
+#define STACK_RND_MASK (test_thread_flag(TIF_ADDR32) ? 0x7ff : 0x3fffff)
+
+#define ARCH_DLINFO \
+do { \
+ if (vdso64_enabled) \
+ NEW_AUX_ENT(AT_SYSINFO_EHDR, \
+ (unsigned long __force)current->mm->context.vdso); \
+} while (0)
+
+/* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */
+#define ARCH_DLINFO_X32 \
+do { \
+ if (vdso64_enabled) \
+ NEW_AUX_ENT(AT_SYSINFO_EHDR, \
+ (unsigned long __force)current->mm->context.vdso); \
+} while (0)
+
+#define AT_SYSINFO 32
+
+#define COMPAT_ARCH_DLINFO \
+if (test_thread_flag(TIF_X32)) \
+ ARCH_DLINFO_X32; \
+else \
+ ARCH_DLINFO_IA32
+
+#define COMPAT_ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
+
+#endif /* !CONFIG_X86_32 */
+
+#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
+
+#define VDSO_ENTRY \
+ ((unsigned long)current->mm->context.vdso + \
+ selected_vdso32->sym___kernel_vsyscall)
+
+struct linux_binprm;
+
+#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
+extern int arch_setup_additional_pages(struct linux_binprm *bprm,
+ int uses_interp);
+extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
+ int uses_interp);
+#define compat_arch_setup_additional_pages compat_arch_setup_additional_pages
+
+/*
+ * True on X86_32 or when emulating IA32 on X86_64
+ */
+static inline int mmap_is_ia32(void)
+{
+#ifdef CONFIG_X86_32
+ return 1;
+#endif
+#ifdef CONFIG_IA32_EMULATION
+ if (test_thread_flag(TIF_ADDR32))
+ return 1;
+#endif
+ return 0;
+}
+
+/* Do not change the values. See get_align_mask() */
+enum align_flags {
+ ALIGN_VA_32 = BIT(0),
+ ALIGN_VA_64 = BIT(1),
+};
+
+struct va_alignment {
+ int flags;
+ unsigned long mask;
+ unsigned long bits;
+} ____cacheline_aligned;
+
+extern struct va_alignment va_align;
+extern unsigned long align_vdso_addr(unsigned long);
+#endif /* _ASM_X86_ELF_H */
diff --git a/kernel/arch/x86/include/asm/emergency-restart.h b/kernel/arch/x86/include/asm/emergency-restart.h
new file mode 100644
index 000000000..77a99ac06
--- /dev/null
+++ b/kernel/arch/x86/include/asm/emergency-restart.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_X86_EMERGENCY_RESTART_H
+#define _ASM_X86_EMERGENCY_RESTART_H
+
+extern void machine_emergency_restart(void);
+
+#endif /* _ASM_X86_EMERGENCY_RESTART_H */
diff --git a/kernel/arch/x86/include/asm/entry_arch.h b/kernel/arch/x86/include/asm/entry_arch.h
new file mode 100644
index 000000000..dc5fa6614
--- /dev/null
+++ b/kernel/arch/x86/include/asm/entry_arch.h
@@ -0,0 +1,53 @@
+/*
+ * This file is designed to contain the BUILD_INTERRUPT specifications for
+ * all of the extra named interrupt vectors used by the architecture.
+ * Usually this is the Inter Process Interrupts (IPIs)
+ */
+
+/*
+ * The following vectors are part of the Linux architecture, there
+ * is no hardware IRQ pin equivalent for them, they are triggered
+ * through the ICC by us (IPIs)
+ */
+#ifdef CONFIG_SMP
+BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
+BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
+BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
+BUILD_INTERRUPT3(irq_move_cleanup_interrupt, IRQ_MOVE_CLEANUP_VECTOR,
+ smp_irq_move_cleanup_interrupt)
+BUILD_INTERRUPT3(reboot_interrupt, REBOOT_VECTOR, smp_reboot_interrupt)
+#endif
+
+BUILD_INTERRUPT(x86_platform_ipi, X86_PLATFORM_IPI_VECTOR)
+
+#ifdef CONFIG_HAVE_KVM
+BUILD_INTERRUPT3(kvm_posted_intr_ipi, POSTED_INTR_VECTOR,
+ smp_kvm_posted_intr_ipi)
+#endif
+
+/*
+ * every pentium local APIC has two 'local interrupts', with a
+ * soft-definable vector attached to both interrupts, one of
+ * which is a timer interrupt, the other one is error counter
+ * overflow. Linux uses the local APIC timer interrupt to get
+ * a much simpler SMP time architecture:
+ */
+#ifdef CONFIG_X86_LOCAL_APIC
+
+BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
+BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
+BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
+
+#ifdef CONFIG_IRQ_WORK
+BUILD_INTERRUPT(irq_work_interrupt, IRQ_WORK_VECTOR)
+#endif
+
+#ifdef CONFIG_X86_THERMAL_VECTOR
+BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR)
+#endif
+
+#ifdef CONFIG_X86_MCE_THRESHOLD
+BUILD_INTERRUPT(threshold_interrupt,THRESHOLD_APIC_VECTOR)
+#endif
+
+#endif
diff --git a/kernel/arch/x86/include/asm/espfix.h b/kernel/arch/x86/include/asm/espfix.h
new file mode 100644
index 000000000..99efebb2f
--- /dev/null
+++ b/kernel/arch/x86/include/asm/espfix.h
@@ -0,0 +1,16 @@
+#ifndef _ASM_X86_ESPFIX_H
+#define _ASM_X86_ESPFIX_H
+
+#ifdef CONFIG_X86_64
+
+#include <asm/percpu.h>
+
+DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);
+DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr);
+
+extern void init_espfix_bsp(void);
+extern void init_espfix_ap(void);
+
+#endif /* CONFIG_X86_64 */
+
+#endif /* _ASM_X86_ESPFIX_H */
diff --git a/kernel/arch/x86/include/asm/exec.h b/kernel/arch/x86/include/asm/exec.h
new file mode 100644
index 000000000..54c2e1db2
--- /dev/null
+++ b/kernel/arch/x86/include/asm/exec.h
@@ -0,0 +1 @@
+/* define arch_align_stack() here */
diff --git a/kernel/arch/x86/include/asm/fb.h b/kernel/arch/x86/include/asm/fb.h
new file mode 100644
index 000000000..c3dd5e71f
--- /dev/null
+++ b/kernel/arch/x86/include/asm/fb.h
@@ -0,0 +1,21 @@
+#ifndef _ASM_X86_FB_H
+#define _ASM_X86_FB_H
+
+#include <linux/fb.h>
+#include <linux/fs.h>
+#include <asm/page.h>
+
+static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
+ unsigned long off)
+{
+ unsigned long prot;
+
+ prot = pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK;
+ if (boot_cpu_data.x86 > 3)
+ pgprot_val(vma->vm_page_prot) =
+ prot | cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS);
+}
+
+extern int fb_is_primary_device(struct fb_info *info);
+
+#endif /* _ASM_X86_FB_H */
diff --git a/kernel/arch/x86/include/asm/fixmap.h b/kernel/arch/x86/include/asm/fixmap.h
new file mode 100644
index 000000000..f80d70009
--- /dev/null
+++ b/kernel/arch/x86/include/asm/fixmap.h
@@ -0,0 +1,170 @@
+/*
+ * fixmap.h: compile-time virtual memory allocation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Ingo Molnar
+ *
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ * x86_32 and x86_64 integration by Gustavo F. Padovan, February 2009
+ */
+
+#ifndef _ASM_X86_FIXMAP_H
+#define _ASM_X86_FIXMAP_H
+
+#ifndef __ASSEMBLY__
+#include <linux/kernel.h>
+#include <asm/acpi.h>
+#include <asm/apicdef.h>
+#include <asm/page.h>
+#include <asm/pvclock.h>
+#ifdef CONFIG_X86_32
+#include <linux/threads.h>
+#include <asm/kmap_types.h>
+#else
+#include <uapi/asm/vsyscall.h>
+#endif
+
+/*
+ * We can't declare FIXADDR_TOP as variable for x86_64 because vsyscall
+ * uses fixmaps that relies on FIXADDR_TOP for proper address calculation.
+ * Because of this, FIXADDR_TOP x86 integration was left as later work.
+ */
+#ifdef CONFIG_X86_32
+/* used by vmalloc.c, vsyscall.lds.S.
+ *
+ * Leave one empty page between vmalloc'ed areas and
+ * the start of the fixmap.
+ */
+extern unsigned long __FIXADDR_TOP;
+#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP)
+#else
+#define FIXADDR_TOP (round_up(VSYSCALL_ADDR + PAGE_SIZE, 1<<PMD_SHIFT) - \
+ PAGE_SIZE)
+#endif
+
+
+/*
+ * Here we define all the compile-time 'special' virtual
+ * addresses. The point is to have a constant address at
+ * compile time, but to set the physical address only
+ * in the boot process.
+ * for x86_32: We allocate these special addresses
+ * from the end of virtual memory (0xfffff000) backwards.
+ * Also this lets us do fail-safe vmalloc(), we
+ * can guarantee that these special addresses and
+ * vmalloc()-ed addresses never overlap.
+ *
+ * These 'compile-time allocated' memory buffers are
+ * fixed-size 4k pages (or larger if used with an increment
+ * higher than 1). Use set_fixmap(idx,phys) to associate
+ * physical memory with fixmap indices.
+ *
+ * TLB entries of such buffers will not be flushed across
+ * task switches.
+ */
+enum fixed_addresses {
+#ifdef CONFIG_X86_32
+ FIX_HOLE,
+#else
+#ifdef CONFIG_X86_VSYSCALL_EMULATION
+ VSYSCALL_PAGE = (FIXADDR_TOP - VSYSCALL_ADDR) >> PAGE_SHIFT,
+#endif
+#ifdef CONFIG_PARAVIRT_CLOCK
+ PVCLOCK_FIXMAP_BEGIN,
+ PVCLOCK_FIXMAP_END = PVCLOCK_FIXMAP_BEGIN+PVCLOCK_VSYSCALL_NR_PAGES-1,
+#endif
+#endif
+ FIX_DBGP_BASE,
+ FIX_EARLYCON_MEM_BASE,
+#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
+ FIX_OHCI1394_BASE,
+#endif
+#ifdef CONFIG_X86_LOCAL_APIC
+ FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
+#endif
+#ifdef CONFIG_X86_IO_APIC
+ FIX_IO_APIC_BASE_0,
+ FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1,
+#endif
+ FIX_RO_IDT, /* Virtual mapping for read-only IDT */
+#ifdef CONFIG_X86_32
+ FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
+ FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
+#ifdef CONFIG_PCI_MMCONFIG
+ FIX_PCIE_MCFG,
+#endif
+#endif
+#ifdef CONFIG_PARAVIRT
+ FIX_PARAVIRT_BOOTMAP,
+#endif
+ FIX_TEXT_POKE1, /* reserve 2 pages for text_poke() */
+ FIX_TEXT_POKE0, /* first page is last, because allocation is backward */
+#ifdef CONFIG_X86_INTEL_MID
+ FIX_LNW_VRTC,
+#endif
+ __end_of_permanent_fixed_addresses,
+
+ /*
+ * 512 temporary boot-time mappings, used by early_ioremap(),
+ * before ioremap() is functional.
+ *
+ * If necessary we round it up to the next 512 pages boundary so
+ * that we can have a single pgd entry and a single pte table:
+ */
+#define NR_FIX_BTMAPS 64
+#define FIX_BTMAPS_SLOTS 8
+#define TOTAL_FIX_BTMAPS (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS)
+ FIX_BTMAP_END =
+ (__end_of_permanent_fixed_addresses ^
+ (__end_of_permanent_fixed_addresses + TOTAL_FIX_BTMAPS - 1)) &
+ -PTRS_PER_PTE
+ ? __end_of_permanent_fixed_addresses + TOTAL_FIX_BTMAPS -
+ (__end_of_permanent_fixed_addresses & (TOTAL_FIX_BTMAPS - 1))
+ : __end_of_permanent_fixed_addresses,
+ FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1,
+#ifdef CONFIG_X86_32
+ FIX_WP_TEST,
+#endif
+#ifdef CONFIG_INTEL_TXT
+ FIX_TBOOT_BASE,
+#endif
+ __end_of_fixed_addresses
+};
+
+
+extern void reserve_top_address(unsigned long reserve);
+
+#define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
+#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
+
+extern int fixmaps_set;
+
+extern pte_t *kmap_pte;
+extern pgprot_t kmap_prot;
+extern pte_t *pkmap_page_table;
+
+void __native_set_fixmap(enum fixed_addresses idx, pte_t pte);
+void native_set_fixmap(enum fixed_addresses idx,
+ phys_addr_t phys, pgprot_t flags);
+
+#ifndef CONFIG_PARAVIRT
+static inline void __set_fixmap(enum fixed_addresses idx,
+ phys_addr_t phys, pgprot_t flags)
+{
+ native_set_fixmap(idx, phys, flags);
+}
+#endif
+
+#include <asm-generic/fixmap.h>
+
+#define __late_set_fixmap(idx, phys, flags) __set_fixmap(idx, phys, flags)
+#define __late_clear_fixmap(idx) __set_fixmap(idx, 0, __pgprot(0))
+
+void __early_set_fixmap(enum fixed_addresses idx,
+ phys_addr_t phys, pgprot_t flags);
+
+#endif /* !__ASSEMBLY__ */
+#endif /* _ASM_X86_FIXMAP_H */
diff --git a/kernel/arch/x86/include/asm/floppy.h b/kernel/arch/x86/include/asm/floppy.h
new file mode 100644
index 000000000..1c7eefe32
--- /dev/null
+++ b/kernel/arch/x86/include/asm/floppy.h
@@ -0,0 +1,281 @@
+/*
+ * Architecture specific parts of the Floppy driver
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995
+ */
+#ifndef _ASM_X86_FLOPPY_H
+#define _ASM_X86_FLOPPY_H
+
+#include <linux/vmalloc.h>
+
+/*
+ * The DMA channel used by the floppy controller cannot access data at
+ * addresses >= 16MB
+ *
+ * Went back to the 1MB limit, as some people had problems with the floppy
+ * driver otherwise. It doesn't matter much for performance anyway, as most
+ * floppy accesses go through the track buffer.
+ */
+#define _CROSS_64KB(a, s, vdma) \
+ (!(vdma) && \
+ ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
+
+#define CROSS_64KB(a, s) _CROSS_64KB(a, s, use_virtual_dma & 1)
+
+
+#define SW fd_routine[use_virtual_dma & 1]
+#define CSW fd_routine[can_use_virtual_dma & 1]
+
+
+#define fd_inb(port) inb_p(port)
+#define fd_outb(value, port) outb_p(value, port)
+
+#define fd_request_dma() CSW._request_dma(FLOPPY_DMA, "floppy")
+#define fd_free_dma() CSW._free_dma(FLOPPY_DMA)
+#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
+#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
+#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL)
+#define fd_get_dma_residue() SW._get_dma_residue(FLOPPY_DMA)
+#define fd_dma_mem_alloc(size) SW._dma_mem_alloc(size)
+#define fd_dma_setup(addr, size, mode, io) SW._dma_setup(addr, size, mode, io)
+
+#define FLOPPY_CAN_FALLBACK_ON_NODMA
+
+static int virtual_dma_count;
+static int virtual_dma_residue;
+static char *virtual_dma_addr;
+static int virtual_dma_mode;
+static int doing_pdma;
+
+static irqreturn_t floppy_hardint(int irq, void *dev_id)
+{
+ unsigned char st;
+
+#undef TRACE_FLPY_INT
+
+#ifdef TRACE_FLPY_INT
+ static int calls;
+ static int bytes;
+ static int dma_wait;
+#endif
+ if (!doing_pdma)
+ return floppy_interrupt(irq, dev_id);
+
+#ifdef TRACE_FLPY_INT
+ if (!calls)
+ bytes = virtual_dma_count;
+#endif
+
+ {
+ int lcount;
+ char *lptr;
+
+ st = 1;
+ for (lcount = virtual_dma_count, lptr = virtual_dma_addr;
+ lcount; lcount--, lptr++) {
+ st = inb(virtual_dma_port + 4) & 0xa0;
+ if (st != 0xa0)
+ break;
+ if (virtual_dma_mode)
+ outb_p(*lptr, virtual_dma_port + 5);
+ else
+ *lptr = inb_p(virtual_dma_port + 5);
+ }
+ virtual_dma_count = lcount;
+ virtual_dma_addr = lptr;
+ st = inb(virtual_dma_port + 4);
+ }
+
+#ifdef TRACE_FLPY_INT
+ calls++;
+#endif
+ if (st == 0x20)
+ return IRQ_HANDLED;
+ if (!(st & 0x20)) {
+ virtual_dma_residue += virtual_dma_count;
+ virtual_dma_count = 0;
+#ifdef TRACE_FLPY_INT
+ printk(KERN_DEBUG "count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n",
+ virtual_dma_count, virtual_dma_residue, calls, bytes,
+ dma_wait);
+ calls = 0;
+ dma_wait = 0;
+#endif
+ doing_pdma = 0;
+ floppy_interrupt(irq, dev_id);
+ return IRQ_HANDLED;
+ }
+#ifdef TRACE_FLPY_INT
+ if (!virtual_dma_count)
+ dma_wait++;
+#endif
+ return IRQ_HANDLED;
+}
+
+static void fd_disable_dma(void)
+{
+ if (!(can_use_virtual_dma & 1))
+ disable_dma(FLOPPY_DMA);
+ doing_pdma = 0;
+ virtual_dma_residue += virtual_dma_count;
+ virtual_dma_count = 0;
+}
+
+static int vdma_request_dma(unsigned int dmanr, const char *device_id)
+{
+ return 0;
+}
+
+static void vdma_nop(unsigned int dummy)
+{
+}
+
+
+static int vdma_get_dma_residue(unsigned int dummy)
+{
+ return virtual_dma_count + virtual_dma_residue;
+}
+
+
+static int fd_request_irq(void)
+{
+ if (can_use_virtual_dma)
+ return request_irq(FLOPPY_IRQ, floppy_hardint,
+ 0, "floppy", NULL);
+ else
+ return request_irq(FLOPPY_IRQ, floppy_interrupt,
+ 0, "floppy", NULL);
+}
+
+static unsigned long dma_mem_alloc(unsigned long size)
+{
+ return __get_dma_pages(GFP_KERNEL|__GFP_NORETRY, get_order(size));
+}
+
+
+static unsigned long vdma_mem_alloc(unsigned long size)
+{
+ return (unsigned long)vmalloc(size);
+
+}
+
+#define nodma_mem_alloc(size) vdma_mem_alloc(size)
+
+static void _fd_dma_mem_free(unsigned long addr, unsigned long size)
+{
+ if ((unsigned long)addr >= (unsigned long)high_memory)
+ vfree((void *)addr);
+ else
+ free_pages(addr, get_order(size));
+}
+
+#define fd_dma_mem_free(addr, size) _fd_dma_mem_free(addr, size)
+
+static void _fd_chose_dma_mode(char *addr, unsigned long size)
+{
+ if (can_use_virtual_dma == 2) {
+ if ((unsigned long)addr >= (unsigned long)high_memory ||
+ isa_virt_to_bus(addr) >= 0x1000000 ||
+ _CROSS_64KB(addr, size, 0))
+ use_virtual_dma = 1;
+ else
+ use_virtual_dma = 0;
+ } else {
+ use_virtual_dma = can_use_virtual_dma & 1;
+ }
+}
+
+#define fd_chose_dma_mode(addr, size) _fd_chose_dma_mode(addr, size)
+
+
+static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
+{
+ doing_pdma = 1;
+ virtual_dma_port = io;
+ virtual_dma_mode = (mode == DMA_MODE_WRITE);
+ virtual_dma_addr = addr;
+ virtual_dma_count = size;
+ virtual_dma_residue = 0;
+ return 0;
+}
+
+static int hard_dma_setup(char *addr, unsigned long size, int mode, int io)
+{
+#ifdef FLOPPY_SANITY_CHECK
+ if (CROSS_64KB(addr, size)) {
+ printk("DMA crossing 64-K boundary %p-%p\n", addr, addr+size);
+ return -1;
+ }
+#endif
+ /* actual, physical DMA */
+ doing_pdma = 0;
+ clear_dma_ff(FLOPPY_DMA);
+ set_dma_mode(FLOPPY_DMA, mode);
+ set_dma_addr(FLOPPY_DMA, isa_virt_to_bus(addr));
+ set_dma_count(FLOPPY_DMA, size);
+ enable_dma(FLOPPY_DMA);
+ return 0;
+}
+
+static struct fd_routine_l {
+ int (*_request_dma)(unsigned int dmanr, const char *device_id);
+ void (*_free_dma)(unsigned int dmanr);
+ int (*_get_dma_residue)(unsigned int dummy);
+ unsigned long (*_dma_mem_alloc)(unsigned long size);
+ int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
+} fd_routine[] = {
+ {
+ request_dma,
+ free_dma,
+ get_dma_residue,
+ dma_mem_alloc,
+ hard_dma_setup
+ },
+ {
+ vdma_request_dma,
+ vdma_nop,
+ vdma_get_dma_residue,
+ vdma_mem_alloc,
+ vdma_dma_setup
+ }
+};
+
+
+static int FDC1 = 0x3f0;
+static int FDC2 = -1;
+
+/*
+ * Floppy types are stored in the rtc's CMOS RAM and so rtc_lock
+ * is needed to prevent corrupted CMOS RAM in case "insmod floppy"
+ * coincides with another rtc CMOS user. Paul G.
+ */
+#define FLOPPY0_TYPE \
+({ \
+ unsigned long flags; \
+ unsigned char val; \
+ spin_lock_irqsave(&rtc_lock, flags); \
+ val = (CMOS_READ(0x10) >> 4) & 15; \
+ spin_unlock_irqrestore(&rtc_lock, flags); \
+ val; \
+})
+
+#define FLOPPY1_TYPE \
+({ \
+ unsigned long flags; \
+ unsigned char val; \
+ spin_lock_irqsave(&rtc_lock, flags); \
+ val = CMOS_READ(0x10) & 15; \
+ spin_unlock_irqrestore(&rtc_lock, flags); \
+ val; \
+})
+
+#define N_FDC 2
+#define N_DRIVE 8
+
+#define EXTRA_FLOPPY_PARAMS
+
+#endif /* _ASM_X86_FLOPPY_H */
diff --git a/kernel/arch/x86/include/asm/fpu-internal.h b/kernel/arch/x86/include/asm/fpu-internal.h
new file mode 100644
index 000000000..da5e96756
--- /dev/null
+++ b/kernel/arch/x86/include/asm/fpu-internal.h
@@ -0,0 +1,626 @@
+/*
+ * Copyright (C) 1994 Linus Torvalds
+ *
+ * Pentium III FXSR, SSE support
+ * General FPU state handling cleanups
+ * Gareth Hughes <gareth@valinux.com>, May 2000
+ * x86-64 work by Andi Kleen 2002
+ */
+
+#ifndef _FPU_INTERNAL_H
+#define _FPU_INTERNAL_H
+
+#include <linux/kernel_stat.h>
+#include <linux/regset.h>
+#include <linux/compat.h>
+#include <linux/slab.h>
+#include <asm/asm.h>
+#include <asm/cpufeature.h>
+#include <asm/processor.h>
+#include <asm/sigcontext.h>
+#include <asm/user.h>
+#include <asm/uaccess.h>
+#include <asm/xsave.h>
+#include <asm/smap.h>
+
+#ifdef CONFIG_X86_64
+# include <asm/sigcontext32.h>
+# include <asm/user32.h>
+struct ksignal;
+int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
+ compat_sigset_t *set, struct pt_regs *regs);
+int ia32_setup_frame(int sig, struct ksignal *ksig,
+ compat_sigset_t *set, struct pt_regs *regs);
+#else
+# define user_i387_ia32_struct user_i387_struct
+# define user32_fxsr_struct user_fxsr_struct
+# define ia32_setup_frame __setup_frame
+# define ia32_setup_rt_frame __setup_rt_frame
+#endif
+
+extern unsigned int mxcsr_feature_mask;
+extern void fpu_init(void);
+extern void eager_fpu_init(void);
+
+DECLARE_PER_CPU(struct task_struct *, fpu_owner_task);
+
+extern void convert_from_fxsr(struct user_i387_ia32_struct *env,
+ struct task_struct *tsk);
+extern void convert_to_fxsr(struct task_struct *tsk,
+ const struct user_i387_ia32_struct *env);
+
+extern user_regset_active_fn fpregs_active, xfpregs_active;
+extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get,
+ xstateregs_get;
+extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set,
+ xstateregs_set;
+
+/*
+ * xstateregs_active == fpregs_active. Please refer to the comment
+ * at the definition of fpregs_active.
+ */
+#define xstateregs_active fpregs_active
+
+#ifdef CONFIG_MATH_EMULATION
+extern void finit_soft_fpu(struct i387_soft_struct *soft);
+#else
+static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
+#endif
+
+/*
+ * Must be run with preemption disabled: this clears the fpu_owner_task,
+ * on this CPU.
+ *
+ * This will disable any lazy FPU state restore of the current FPU state,
+ * but if the current thread owns the FPU, it will still be saved by.
+ */
+static inline void __cpu_disable_lazy_restore(unsigned int cpu)
+{
+ per_cpu(fpu_owner_task, cpu) = NULL;
+}
+
+/*
+ * Used to indicate that the FPU state in memory is newer than the FPU
+ * state in registers, and the FPU state should be reloaded next time the
+ * task is run. Only safe on the current task, or non-running tasks.
+ */
+static inline void task_disable_lazy_fpu_restore(struct task_struct *tsk)
+{
+ tsk->thread.fpu.last_cpu = ~0;
+}
+
+static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
+{
+ return new == this_cpu_read_stable(fpu_owner_task) &&
+ cpu == new->thread.fpu.last_cpu;
+}
+
+static inline int is_ia32_compat_frame(void)
+{
+ return config_enabled(CONFIG_IA32_EMULATION) &&
+ test_thread_flag(TIF_IA32);
+}
+
+static inline int is_ia32_frame(void)
+{
+ return config_enabled(CONFIG_X86_32) || is_ia32_compat_frame();
+}
+
+static inline int is_x32_frame(void)
+{
+ return config_enabled(CONFIG_X86_X32_ABI) && test_thread_flag(TIF_X32);
+}
+
+#define X87_FSW_ES (1 << 7) /* Exception Summary */
+
+static __always_inline __pure bool use_eager_fpu(void)
+{
+ return static_cpu_has_safe(X86_FEATURE_EAGER_FPU);
+}
+
+static __always_inline __pure bool use_xsaveopt(void)
+{
+ return static_cpu_has_safe(X86_FEATURE_XSAVEOPT);
+}
+
+static __always_inline __pure bool use_xsave(void)
+{
+ return static_cpu_has_safe(X86_FEATURE_XSAVE);
+}
+
+static __always_inline __pure bool use_fxsr(void)
+{
+ return static_cpu_has_safe(X86_FEATURE_FXSR);
+}
+
+static inline void fx_finit(struct i387_fxsave_struct *fx)
+{
+ fx->cwd = 0x37f;
+ fx->mxcsr = MXCSR_DEFAULT;
+}
+
+extern void __sanitize_i387_state(struct task_struct *);
+
+static inline void sanitize_i387_state(struct task_struct *tsk)
+{
+ if (!use_xsaveopt())
+ return;
+ __sanitize_i387_state(tsk);
+}
+
+#define user_insn(insn, output, input...) \
+({ \
+ int err; \
+ asm volatile(ASM_STAC "\n" \
+ "1:" #insn "\n\t" \
+ "2: " ASM_CLAC "\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: movl $-1,%[err]\n" \
+ " jmp 2b\n" \
+ ".previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : [err] "=r" (err), output \
+ : "0"(0), input); \
+ err; \
+})
+
+#define check_insn(insn, output, input...) \
+({ \
+ int err; \
+ asm volatile("1:" #insn "\n\t" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: movl $-1,%[err]\n" \
+ " jmp 2b\n" \
+ ".previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : [err] "=r" (err), output \
+ : "0"(0), input); \
+ err; \
+})
+
+static inline int fsave_user(struct i387_fsave_struct __user *fx)
+{
+ return user_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx));
+}
+
+static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
+{
+ if (config_enabled(CONFIG_X86_32))
+ return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
+ else if (config_enabled(CONFIG_AS_FXSAVEQ))
+ return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));
+
+ /* See comment in fpu_fxsave() below. */
+ return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx));
+}
+
+static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
+{
+ if (config_enabled(CONFIG_X86_32))
+ return check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
+ else if (config_enabled(CONFIG_AS_FXSAVEQ))
+ return check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
+
+ /* See comment in fpu_fxsave() below. */
+ return check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
+ "m" (*fx));
+}
+
+static inline int fxrstor_user(struct i387_fxsave_struct __user *fx)
+{
+ if (config_enabled(CONFIG_X86_32))
+ return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
+ else if (config_enabled(CONFIG_AS_FXSAVEQ))
+ return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
+
+ /* See comment in fpu_fxsave() below. */
+ return user_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
+ "m" (*fx));
+}
+
+static inline int frstor_checking(struct i387_fsave_struct *fx)
+{
+ return check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
+}
+
+static inline int frstor_user(struct i387_fsave_struct __user *fx)
+{
+ return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
+}
+
+static inline void fpu_fxsave(struct fpu *fpu)
+{
+ if (config_enabled(CONFIG_X86_32))
+ asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state->fxsave));
+ else if (config_enabled(CONFIG_AS_FXSAVEQ))
+ asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state->fxsave));
+ else {
+ /* Using "rex64; fxsave %0" is broken because, if the memory
+ * operand uses any extended registers for addressing, a second
+ * REX prefix will be generated (to the assembler, rex64
+ * followed by semicolon is a separate instruction), and hence
+ * the 64-bitness is lost.
+ *
+ * Using "fxsaveq %0" would be the ideal choice, but is only
+ * supported starting with gas 2.16.
+ *
+ * Using, as a workaround, the properly prefixed form below
+ * isn't accepted by any binutils version so far released,
+ * complaining that the same type of prefix is used twice if
+ * an extended register is needed for addressing (fix submitted
+ * to mainline 2005-11-21).
+ *
+ * asm volatile("rex64/fxsave %0" : "=m" (fpu->state->fxsave));
+ *
+ * This, however, we can work around by forcing the compiler to
+ * select an addressing mode that doesn't require extended
+ * registers.
+ */
+ asm volatile( "rex64/fxsave (%[fx])"
+ : "=m" (fpu->state->fxsave)
+ : [fx] "R" (&fpu->state->fxsave));
+ }
+}
+
+/*
+ * These must be called with preempt disabled. Returns
+ * 'true' if the FPU state is still intact.
+ */
+static inline int fpu_save_init(struct fpu *fpu)
+{
+ if (use_xsave()) {
+ fpu_xsave(fpu);
+
+ /*
+ * xsave header may indicate the init state of the FP.
+ */
+ if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP))
+ return 1;
+ } else if (use_fxsr()) {
+ fpu_fxsave(fpu);
+ } else {
+ asm volatile("fnsave %[fx]; fwait"
+ : [fx] "=m" (fpu->state->fsave));
+ return 0;
+ }
+
+ /*
+ * If exceptions are pending, we need to clear them so
+ * that we don't randomly get exceptions later.
+ *
+ * FIXME! Is this perhaps only true for the old-style
+ * irq13 case? Maybe we could leave the x87 state
+ * intact otherwise?
+ */
+ if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) {
+ asm volatile("fnclex");
+ return 0;
+ }
+ return 1;
+}
+
+static inline int __save_init_fpu(struct task_struct *tsk)
+{
+ return fpu_save_init(&tsk->thread.fpu);
+}
+
+static inline int fpu_restore_checking(struct fpu *fpu)
+{
+ if (use_xsave())
+ return fpu_xrstor_checking(&fpu->state->xsave);
+ else if (use_fxsr())
+ return fxrstor_checking(&fpu->state->fxsave);
+ else
+ return frstor_checking(&fpu->state->fsave);
+}
+
+static inline int restore_fpu_checking(struct task_struct *tsk)
+{
+ /*
+ * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is
+ * pending. Clear the x87 state here by setting it to fixed values.
+ * "m" is a random variable that should be in L1.
+ */
+ if (unlikely(static_cpu_has_bug_safe(X86_BUG_FXSAVE_LEAK))) {
+ asm volatile(
+ "fnclex\n\t"
+ "emms\n\t"
+ "fildl %P[addr]" /* set F?P to defined value */
+ : : [addr] "m" (tsk->thread.fpu.has_fpu));
+ }
+
+ return fpu_restore_checking(&tsk->thread.fpu);
+}
+
+/*
+ * Software FPU state helpers. Careful: these need to
+ * be preemption protection *and* they need to be
+ * properly paired with the CR0.TS changes!
+ */
+static inline int __thread_has_fpu(struct task_struct *tsk)
+{
+ return tsk->thread.fpu.has_fpu;
+}
+
+/* Must be paired with an 'stts' after! */
+static inline void __thread_clear_has_fpu(struct task_struct *tsk)
+{
+ tsk->thread.fpu.has_fpu = 0;
+ this_cpu_write(fpu_owner_task, NULL);
+}
+
+/* Must be paired with a 'clts' before! */
+static inline void __thread_set_has_fpu(struct task_struct *tsk)
+{
+ tsk->thread.fpu.has_fpu = 1;
+ this_cpu_write(fpu_owner_task, tsk);
+}
+
+/*
+ * Encapsulate the CR0.TS handling together with the
+ * software flag.
+ *
+ * These generally need preemption protection to work,
+ * do try to avoid using these on their own.
+ */
+static inline void __thread_fpu_end(struct task_struct *tsk)
+{
+ __thread_clear_has_fpu(tsk);
+ if (!use_eager_fpu())
+ stts();
+}
+
+static inline void __thread_fpu_begin(struct task_struct *tsk)
+{
+ if (!use_eager_fpu())
+ clts();
+ __thread_set_has_fpu(tsk);
+}
+
+static inline void drop_fpu(struct task_struct *tsk)
+{
+ /*
+ * Forget coprocessor state..
+ */
+ preempt_disable();
+ tsk->thread.fpu_counter = 0;
+
+ if (__thread_has_fpu(tsk)) {
+ /* Ignore delayed exceptions from user space */
+ asm volatile("1: fwait\n"
+ "2:\n"
+ _ASM_EXTABLE(1b, 2b));
+ __thread_fpu_end(tsk);
+ }
+
+ clear_stopped_child_used_math(tsk);
+ preempt_enable();
+}
+
+static inline void restore_init_xstate(void)
+{
+ if (use_xsave())
+ xrstor_state(init_xstate_buf, -1);
+ else
+ fxrstor_checking(&init_xstate_buf->i387);
+}
+
+/*
+ * Reset the FPU state in the eager case and drop it in the lazy case (later use
+ * will reinit it).
+ */
+static inline void fpu_reset_state(struct task_struct *tsk)
+{
+ if (!use_eager_fpu())
+ drop_fpu(tsk);
+ else
+ restore_init_xstate();
+}
+
+/*
+ * FPU state switching for scheduling.
+ *
+ * This is a two-stage process:
+ *
+ * - switch_fpu_prepare() saves the old state and
+ * sets the new state of the CR0.TS bit. This is
+ * done within the context of the old process.
+ *
+ * - switch_fpu_finish() restores the new state as
+ * necessary.
+ */
+typedef struct { int preload; } fpu_switch_t;
+
+static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu)
+{
+ fpu_switch_t fpu;
+
+ /*
+ * If the task has used the math, pre-load the FPU on xsave processors
+ * or if the past 5 consecutive context-switches used math.
+ */
+ fpu.preload = tsk_used_math(new) &&
+ (use_eager_fpu() || new->thread.fpu_counter > 5);
+
+ if (__thread_has_fpu(old)) {
+ if (!__save_init_fpu(old))
+ task_disable_lazy_fpu_restore(old);
+ else
+ old->thread.fpu.last_cpu = cpu;
+
+ /* But leave fpu_owner_task! */
+ old->thread.fpu.has_fpu = 0;
+
+ /* Don't change CR0.TS if we just switch! */
+ if (fpu.preload) {
+ new->thread.fpu_counter++;
+ __thread_set_has_fpu(new);
+ prefetch(new->thread.fpu.state);
+ } else if (!use_eager_fpu())
+ stts();
+ } else {
+ old->thread.fpu_counter = 0;
+ task_disable_lazy_fpu_restore(old);
+ if (fpu.preload) {
+ new->thread.fpu_counter++;
+ if (fpu_lazy_restore(new, cpu))
+ fpu.preload = 0;
+ else
+ prefetch(new->thread.fpu.state);
+ __thread_fpu_begin(new);
+ }
+ }
+ return fpu;
+}
+
+/*
+ * By the time this gets called, we've already cleared CR0.TS and
+ * given the process the FPU if we are going to preload the FPU
+ * state - all we need to do is to conditionally restore the register
+ * state itself.
+ */
+static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu)
+{
+ if (fpu.preload) {
+ if (unlikely(restore_fpu_checking(new)))
+ fpu_reset_state(new);
+ }
+}
+
+/*
+ * Signal frame handlers...
+ */
+extern int save_xstate_sig(void __user *buf, void __user *fx, int size);
+extern int __restore_xstate_sig(void __user *buf, void __user *fx, int size);
+
+static inline int xstate_sigframe_size(void)
+{
+ return use_xsave() ? xstate_size + FP_XSTATE_MAGIC2_SIZE : xstate_size;
+}
+
+static inline int restore_xstate_sig(void __user *buf, int ia32_frame)
+{
+ void __user *buf_fx = buf;
+ int size = xstate_sigframe_size();
+
+ if (ia32_frame && use_fxsr()) {
+ buf_fx = buf + sizeof(struct i387_fsave_struct);
+ size += sizeof(struct i387_fsave_struct);
+ }
+
+ return __restore_xstate_sig(buf, buf_fx, size);
+}
+
+/*
+ * Needs to be preemption-safe.
+ *
+ * NOTE! user_fpu_begin() must be used only immediately before restoring
+ * the save state. It does not do any saving/restoring on its own. In
+ * lazy FPU mode, it is just an optimization to avoid a #NM exception,
+ * the task can lose the FPU right after preempt_enable().
+ */
+static inline void user_fpu_begin(void)
+{
+ preempt_disable();
+ if (!user_has_fpu())
+ __thread_fpu_begin(current);
+ preempt_enable();
+}
+
+static inline void __save_fpu(struct task_struct *tsk)
+{
+ if (use_xsave()) {
+ if (unlikely(system_state == SYSTEM_BOOTING))
+ xsave_state_booting(&tsk->thread.fpu.state->xsave, -1);
+ else
+ xsave_state(&tsk->thread.fpu.state->xsave, -1);
+ } else
+ fpu_fxsave(&tsk->thread.fpu);
+}
+
+/*
+ * i387 state interaction
+ */
+static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
+{
+ if (cpu_has_fxsr) {
+ return tsk->thread.fpu.state->fxsave.cwd;
+ } else {
+ return (unsigned short)tsk->thread.fpu.state->fsave.cwd;
+ }
+}
+
+static inline unsigned short get_fpu_swd(struct task_struct *tsk)
+{
+ if (cpu_has_fxsr) {
+ return tsk->thread.fpu.state->fxsave.swd;
+ } else {
+ return (unsigned short)tsk->thread.fpu.state->fsave.swd;
+ }
+}
+
+static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
+{
+ if (cpu_has_xmm) {
+ return tsk->thread.fpu.state->fxsave.mxcsr;
+ } else {
+ return MXCSR_DEFAULT;
+ }
+}
+
+static bool fpu_allocated(struct fpu *fpu)
+{
+ return fpu->state != NULL;
+}
+
+static inline int fpu_alloc(struct fpu *fpu)
+{
+ if (fpu_allocated(fpu))
+ return 0;
+ fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
+ if (!fpu->state)
+ return -ENOMEM;
+ WARN_ON((unsigned long)fpu->state & 15);
+ return 0;
+}
+
+static inline void fpu_free(struct fpu *fpu)
+{
+ if (fpu->state) {
+ kmem_cache_free(task_xstate_cachep, fpu->state);
+ fpu->state = NULL;
+ }
+}
+
+static inline void fpu_copy(struct task_struct *dst, struct task_struct *src)
+{
+ if (use_eager_fpu()) {
+ memset(&dst->thread.fpu.state->xsave, 0, xstate_size);
+ __save_fpu(dst);
+ } else {
+ struct fpu *dfpu = &dst->thread.fpu;
+ struct fpu *sfpu = &src->thread.fpu;
+
+ unlazy_fpu(src);
+ memcpy(dfpu->state, sfpu->state, xstate_size);
+ }
+}
+
+static inline unsigned long
+alloc_mathframe(unsigned long sp, int ia32_frame, unsigned long *buf_fx,
+ unsigned long *size)
+{
+ unsigned long frame_size = xstate_sigframe_size();
+
+ *buf_fx = sp = round_down(sp - frame_size, 64);
+ if (ia32_frame && use_fxsr()) {
+ frame_size += sizeof(struct i387_fsave_struct);
+ sp -= sizeof(struct i387_fsave_struct);
+ }
+
+ *size = frame_size;
+ return sp;
+}
+
+#endif
diff --git a/kernel/arch/x86/include/asm/frame.h b/kernel/arch/x86/include/asm/frame.h
new file mode 100644
index 000000000..3b629f47e
--- /dev/null
+++ b/kernel/arch/x86/include/asm/frame.h
@@ -0,0 +1,26 @@
+#ifdef __ASSEMBLY__
+
+#include <asm/asm.h>
+#include <asm/dwarf2.h>
+
+/* The annotation hides the frame from the unwinder and makes it look
+ like a ordinary ebp save/restore. This avoids some special cases for
+ frame pointer later */
+#ifdef CONFIG_FRAME_POINTER
+ .macro FRAME
+ __ASM_SIZE(push,_cfi) %__ASM_REG(bp)
+ CFI_REL_OFFSET __ASM_REG(bp), 0
+ __ASM_SIZE(mov) %__ASM_REG(sp), %__ASM_REG(bp)
+ .endm
+ .macro ENDFRAME
+ __ASM_SIZE(pop,_cfi) %__ASM_REG(bp)
+ CFI_RESTORE __ASM_REG(bp)
+ .endm
+#else
+ .macro FRAME
+ .endm
+ .macro ENDFRAME
+ .endm
+#endif
+
+#endif /* __ASSEMBLY__ */
diff --git a/kernel/arch/x86/include/asm/ftrace.h b/kernel/arch/x86/include/asm/ftrace.h
new file mode 100644
index 000000000..f45acad3c
--- /dev/null
+++ b/kernel/arch/x86/include/asm/ftrace.h
@@ -0,0 +1,68 @@
+#ifndef _ASM_X86_FTRACE_H
+#define _ASM_X86_FTRACE_H
+
+#ifdef CONFIG_FUNCTION_TRACER
+#ifdef CC_USING_FENTRY
+# define MCOUNT_ADDR ((long)(__fentry__))
+#else
+# define MCOUNT_ADDR ((long)(mcount))
+#endif
+#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+#define ARCH_SUPPORTS_FTRACE_OPS 1
+#endif
+
+#ifndef __ASSEMBLY__
+extern void mcount(void);
+extern atomic_t modifying_ftrace_code;
+extern void __fentry__(void);
+
+static inline unsigned long ftrace_call_adjust(unsigned long addr)
+{
+ /*
+ * addr is the address of the mcount call instruction.
+ * recordmcount does the necessary offset calculation.
+ */
+ return addr;
+}
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+struct dyn_arch_ftrace {
+ /* No extra data needed for x86 */
+};
+
+int ftrace_int3_handler(struct pt_regs *regs);
+
+#define FTRACE_GRAPH_TRAMP_ADDR FTRACE_GRAPH_ADDR
+
+#endif /* CONFIG_DYNAMIC_FTRACE */
+#endif /* __ASSEMBLY__ */
+#endif /* CONFIG_FUNCTION_TRACER */
+
+
+#if !defined(__ASSEMBLY__) && !defined(COMPILE_OFFSETS)
+
+#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_IA32_EMULATION)
+#include <asm/compat.h>
+
+/*
+ * Because ia32 syscalls do not map to x86_64 syscall numbers
+ * this screws up the trace output when tracing a ia32 task.
+ * Instead of reporting bogus syscalls, just do not trace them.
+ *
+ * If the user realy wants these, then they should use the
+ * raw syscall tracepoints with filtering.
+ */
+#define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS 1
+static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
+{
+ if (is_compat_task())
+ return true;
+ return false;
+}
+#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_IA32_EMULATION */
+#endif /* !__ASSEMBLY__ && !COMPILE_OFFSETS */
+
+#endif /* _ASM_X86_FTRACE_H */
diff --git a/kernel/arch/x86/include/asm/futex.h b/kernel/arch/x86/include/asm/futex.h
new file mode 100644
index 000000000..b4c1f5453
--- /dev/null
+++ b/kernel/arch/x86/include/asm/futex.h
@@ -0,0 +1,117 @@
+#ifndef _ASM_X86_FUTEX_H
+#define _ASM_X86_FUTEX_H
+
+#ifdef __KERNEL__
+
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+
+#include <asm/asm.h>
+#include <asm/errno.h>
+#include <asm/processor.h>
+#include <asm/smap.h>
+
+#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
+ asm volatile("\t" ASM_STAC "\n" \
+ "1:\t" insn "\n" \
+ "2:\t" ASM_CLAC "\n" \
+ "\t.section .fixup,\"ax\"\n" \
+ "3:\tmov\t%3, %1\n" \
+ "\tjmp\t2b\n" \
+ "\t.previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
+ : "i" (-EFAULT), "0" (oparg), "1" (0))
+
+#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
+ asm volatile("\t" ASM_STAC "\n" \
+ "1:\tmovl %2, %0\n" \
+ "\tmovl\t%0, %3\n" \
+ "\t" insn "\n" \
+ "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
+ "\tjnz\t1b\n" \
+ "3:\t" ASM_CLAC "\n" \
+ "\t.section .fixup,\"ax\"\n" \
+ "4:\tmov\t%5, %1\n" \
+ "\tjmp\t3b\n" \
+ "\t.previous\n" \
+ _ASM_EXTABLE(1b, 4b) \
+ _ASM_EXTABLE(2b, 4b) \
+ : "=&a" (oldval), "=&r" (ret), \
+ "+m" (*uaddr), "=&r" (tem) \
+ : "r" (oparg), "i" (-EFAULT), "1" (0))
+
+static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+{
+ int op = (encoded_op >> 28) & 7;
+ int cmp = (encoded_op >> 24) & 15;
+ int oparg = (encoded_op << 8) >> 20;
+ int cmparg = (encoded_op << 20) >> 20;
+ int oldval = 0, ret, tem;
+
+ if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+ oparg = 1 << oparg;
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ return -EFAULT;
+
+ pagefault_disable();
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+ __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
+ uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op2("orl %4, %3", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_atomic_op2("andl %4, %3", ret, oldval, uaddr, ~oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_atomic_op2("xorl %4, %3", ret, oldval, uaddr, oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ pagefault_enable();
+
+ if (!ret) {
+ switch (cmp) {
+ case FUTEX_OP_CMP_EQ:
+ ret = (oldval == cmparg);
+ break;
+ case FUTEX_OP_CMP_NE:
+ ret = (oldval != cmparg);
+ break;
+ case FUTEX_OP_CMP_LT:
+ ret = (oldval < cmparg);
+ break;
+ case FUTEX_OP_CMP_GE:
+ ret = (oldval >= cmparg);
+ break;
+ case FUTEX_OP_CMP_LE:
+ ret = (oldval <= cmparg);
+ break;
+ case FUTEX_OP_CMP_GT:
+ ret = (oldval > cmparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+ }
+ return ret;
+}
+
+static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval)
+{
+ return user_atomic_cmpxchg_inatomic(uval, uaddr, oldval, newval);
+}
+
+#endif
+#endif /* _ASM_X86_FUTEX_H */
diff --git a/kernel/arch/x86/include/asm/gart.h b/kernel/arch/x86/include/asm/gart.h
new file mode 100644
index 000000000..156cd5d18
--- /dev/null
+++ b/kernel/arch/x86/include/asm/gart.h
@@ -0,0 +1,113 @@
+#ifndef _ASM_X86_GART_H
+#define _ASM_X86_GART_H
+
+#include <asm/e820.h>
+
+extern void set_up_gart_resume(u32, u32);
+
+extern int fallback_aper_order;
+extern int fallback_aper_force;
+extern int fix_aperture;
+
+/* PTE bits. */
+#define GPTE_VALID 1
+#define GPTE_COHERENT 2
+
+/* Aperture control register bits. */
+#define GARTEN (1<<0)
+#define DISGARTCPU (1<<4)
+#define DISGARTIO (1<<5)
+#define DISTLBWALKPRB (1<<6)
+
+/* GART cache control register bits. */
+#define INVGART (1<<0)
+#define GARTPTEERR (1<<1)
+
+/* K8 On-cpu GART registers */
+#define AMD64_GARTAPERTURECTL 0x90
+#define AMD64_GARTAPERTUREBASE 0x94
+#define AMD64_GARTTABLEBASE 0x98
+#define AMD64_GARTCACHECTL 0x9c
+
+#ifdef CONFIG_GART_IOMMU
+extern int gart_iommu_aperture;
+extern int gart_iommu_aperture_allowed;
+extern int gart_iommu_aperture_disabled;
+
+extern void early_gart_iommu_check(void);
+extern int gart_iommu_init(void);
+extern void __init gart_parse_options(char *);
+extern int gart_iommu_hole_init(void);
+
+#else
+#define gart_iommu_aperture 0
+#define gart_iommu_aperture_allowed 0
+#define gart_iommu_aperture_disabled 1
+
+static inline void early_gart_iommu_check(void)
+{
+}
+static inline void gart_parse_options(char *options)
+{
+}
+static inline int gart_iommu_hole_init(void)
+{
+ return -ENODEV;
+}
+#endif
+
+extern int agp_amd64_init(void);
+
+static inline void gart_set_size_and_enable(struct pci_dev *dev, u32 order)
+{
+ u32 ctl;
+
+ /*
+ * Don't enable translation but enable GART IO and CPU accesses.
+ * Also, set DISTLBWALKPRB since GART tables memory is UC.
+ */
+ ctl = order << 1;
+
+ pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
+}
+
+static inline void enable_gart_translation(struct pci_dev *dev, u64 addr)
+{
+ u32 tmp, ctl;
+
+ /* address of the mappings table */
+ addr >>= 12;
+ tmp = (u32) addr<<4;
+ tmp &= ~0xf;
+ pci_write_config_dword(dev, AMD64_GARTTABLEBASE, tmp);
+
+ /* Enable GART translation for this hammer. */
+ pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
+ ctl |= GARTEN | DISTLBWALKPRB;
+ ctl &= ~(DISGARTCPU | DISGARTIO);
+ pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
+}
+
+static inline int aperture_valid(u64 aper_base, u32 aper_size, u32 min_size)
+{
+ if (!aper_base)
+ return 0;
+
+ if (aper_base + aper_size > 0x100000000ULL) {
+ printk(KERN_INFO "Aperture beyond 4GB. Ignoring.\n");
+ return 0;
+ }
+ if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) {
+ printk(KERN_INFO "Aperture pointing to e820 RAM. Ignoring.\n");
+ return 0;
+ }
+ if (aper_size < min_size) {
+ printk(KERN_INFO "Aperture too small (%d MB) than (%d MB)\n",
+ aper_size>>20, min_size>>20);
+ return 0;
+ }
+
+ return 1;
+}
+
+#endif /* _ASM_X86_GART_H */
diff --git a/kernel/arch/x86/include/asm/genapic.h b/kernel/arch/x86/include/asm/genapic.h
new file mode 100644
index 000000000..4b8b98fa7
--- /dev/null
+++ b/kernel/arch/x86/include/asm/genapic.h
@@ -0,0 +1 @@
+#include <asm/apic.h>
diff --git a/kernel/arch/x86/include/asm/geode.h b/kernel/arch/x86/include/asm/geode.h
new file mode 100644
index 000000000..7cd73552a
--- /dev/null
+++ b/kernel/arch/x86/include/asm/geode.h
@@ -0,0 +1,36 @@
+/*
+ * AMD Geode definitions
+ * Copyright (C) 2006, Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_X86_GEODE_H
+#define _ASM_X86_GEODE_H
+
+#include <asm/processor.h>
+#include <linux/io.h>
+#include <linux/cs5535.h>
+
+static inline int is_geode_gx(void)
+{
+ return ((boot_cpu_data.x86_vendor == X86_VENDOR_NSC) &&
+ (boot_cpu_data.x86 == 5) &&
+ (boot_cpu_data.x86_model == 5));
+}
+
+static inline int is_geode_lx(void)
+{
+ return ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
+ (boot_cpu_data.x86 == 5) &&
+ (boot_cpu_data.x86_model == 10));
+}
+
+static inline int is_geode(void)
+{
+ return (is_geode_gx() || is_geode_lx());
+}
+
+#endif /* _ASM_X86_GEODE_H */
diff --git a/kernel/arch/x86/include/asm/gpio.h b/kernel/arch/x86/include/asm/gpio.h
new file mode 100644
index 000000000..b3799d88f
--- /dev/null
+++ b/kernel/arch/x86/include/asm/gpio.h
@@ -0,0 +1,4 @@
+#ifndef __LINUX_GPIO_H
+#warning Include linux/gpio.h instead of asm/gpio.h
+#include <linux/gpio.h>
+#endif
diff --git a/kernel/arch/x86/include/asm/hardirq.h b/kernel/arch/x86/include/asm/hardirq.h
new file mode 100644
index 000000000..0f5fb6b65
--- /dev/null
+++ b/kernel/arch/x86/include/asm/hardirq.h
@@ -0,0 +1,63 @@
+#ifndef _ASM_X86_HARDIRQ_H
+#define _ASM_X86_HARDIRQ_H
+
+#include <linux/threads.h>
+#include <linux/irq.h>
+
+typedef struct {
+ unsigned int __softirq_pending;
+ unsigned int __nmi_count; /* arch dependent */
+#ifdef CONFIG_X86_LOCAL_APIC
+ unsigned int apic_timer_irqs; /* arch dependent */
+ unsigned int irq_spurious_count;
+ unsigned int icr_read_retry_count;
+#endif
+#ifdef CONFIG_HAVE_KVM
+ unsigned int kvm_posted_intr_ipis;
+#endif
+ unsigned int x86_platform_ipis; /* arch dependent */
+ unsigned int apic_perf_irqs;
+ unsigned int apic_irq_work_irqs;
+#ifdef CONFIG_SMP
+ unsigned int irq_resched_count;
+ unsigned int irq_call_count;
+ /*
+ * irq_tlb_count is double-counted in irq_call_count, so it must be
+ * subtracted from irq_call_count when displaying irq_call_count
+ */
+ unsigned int irq_tlb_count;
+#endif
+#ifdef CONFIG_X86_THERMAL_VECTOR
+ unsigned int irq_thermal_count;
+#endif
+#ifdef CONFIG_X86_MCE_THRESHOLD
+ unsigned int irq_threshold_count;
+#endif
+#if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN)
+ unsigned int irq_hv_callback_count;
+#endif
+} ____cacheline_aligned irq_cpustat_t;
+
+DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
+
+#define __ARCH_IRQ_STAT
+
+#define inc_irq_stat(member) this_cpu_inc(irq_stat.member)
+
+#define local_softirq_pending() this_cpu_read(irq_stat.__softirq_pending)
+
+#define __ARCH_SET_SOFTIRQ_PENDING
+
+#define set_softirq_pending(x) \
+ this_cpu_write(irq_stat.__softirq_pending, (x))
+#define or_softirq_pending(x) this_cpu_or(irq_stat.__softirq_pending, (x))
+
+extern void ack_bad_irq(unsigned int irq);
+
+extern u64 arch_irq_stat_cpu(unsigned int cpu);
+#define arch_irq_stat_cpu arch_irq_stat_cpu
+
+extern u64 arch_irq_stat(void);
+#define arch_irq_stat arch_irq_stat
+
+#endif /* _ASM_X86_HARDIRQ_H */
diff --git a/kernel/arch/x86/include/asm/highmem.h b/kernel/arch/x86/include/asm/highmem.h
new file mode 100644
index 000000000..04e9d0231
--- /dev/null
+++ b/kernel/arch/x86/include/asm/highmem.h
@@ -0,0 +1,80 @@
+/*
+ * highmem.h: virtual kernel memory mappings for high memory
+ *
+ * Used in CONFIG_HIGHMEM systems for memory pages which
+ * are not addressable by direct kernel virtual addresses.
+ *
+ * Copyright (C) 1999 Gerhard Wichert, Siemens AG
+ * Gerhard.Wichert@pdb.siemens.de
+ *
+ *
+ * Redesigned the x86 32-bit VM architecture to deal with
+ * up to 16 Terabyte physical memory. With current x86 CPUs
+ * we now support up to 64 Gigabytes physical RAM.
+ *
+ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
+ */
+
+#ifndef _ASM_X86_HIGHMEM_H
+#define _ASM_X86_HIGHMEM_H
+
+#ifdef __KERNEL__
+
+#include <linux/interrupt.h>
+#include <linux/threads.h>
+#include <asm/kmap_types.h>
+#include <asm/tlbflush.h>
+#include <asm/paravirt.h>
+#include <asm/fixmap.h>
+
+/* declarations for highmem.c */
+extern unsigned long highstart_pfn, highend_pfn;
+
+/*
+ * Right now we initialize only a single pte table. It can be extended
+ * easily, subsequent pte tables have to be allocated in one physical
+ * chunk of RAM.
+ */
+/*
+ * Ordering is:
+ *
+ * high memory on: high_memory off:
+ * FIXADDR_TOP FIXADDR_TOP
+ * fixed addresses fixed addresses
+ * FIXADDR_START FIXADDR_START
+ * temp fixed addresses/persistent kmap area VMALLOC_END
+ * PKMAP_BASE temp fixed addresses/vmalloc area
+ * VMALLOC_END VMALLOC_START
+ * vmalloc area high_memory
+ * VMALLOC_START
+ * high_memory
+ *
+ * The temp fixed area is only used during boot for early_ioremap(), and
+ * it is unused when the ioremap() is functional. vmalloc/pkmap area become
+ * available after early boot so the temp fixed area is available for re-use.
+ */
+#define LAST_PKMAP_MASK (LAST_PKMAP-1)
+#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
+#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
+
+extern void *kmap_high(struct page *page);
+extern void kunmap_high(struct page *page);
+
+void *kmap(struct page *page);
+void kunmap(struct page *page);
+
+void *kmap_atomic_prot(struct page *page, pgprot_t prot);
+void *kmap_atomic(struct page *page);
+void __kunmap_atomic(void *kvaddr);
+void *kmap_atomic_pfn(unsigned long pfn);
+void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
+struct page *kmap_atomic_to_page(void *ptr);
+
+#define flush_cache_kmaps() do { } while (0)
+
+extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn,
+ unsigned long end_pfn);
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_X86_HIGHMEM_H */
diff --git a/kernel/arch/x86/include/asm/hpet.h b/kernel/arch/x86/include/asm/hpet.h
new file mode 100644
index 000000000..36f712594
--- /dev/null
+++ b/kernel/arch/x86/include/asm/hpet.h
@@ -0,0 +1,118 @@
+#ifndef _ASM_X86_HPET_H
+#define _ASM_X86_HPET_H
+
+#include <linux/msi.h>
+
+#ifdef CONFIG_HPET_TIMER
+
+#define HPET_MMAP_SIZE 1024
+
+#define HPET_ID 0x000
+#define HPET_PERIOD 0x004
+#define HPET_CFG 0x010
+#define HPET_STATUS 0x020
+#define HPET_COUNTER 0x0f0
+
+#define HPET_Tn_CFG(n) (0x100 + 0x20 * n)
+#define HPET_Tn_CMP(n) (0x108 + 0x20 * n)
+#define HPET_Tn_ROUTE(n) (0x110 + 0x20 * n)
+
+#define HPET_T0_CFG 0x100
+#define HPET_T0_CMP 0x108
+#define HPET_T0_ROUTE 0x110
+#define HPET_T1_CFG 0x120
+#define HPET_T1_CMP 0x128
+#define HPET_T1_ROUTE 0x130
+#define HPET_T2_CFG 0x140
+#define HPET_T2_CMP 0x148
+#define HPET_T2_ROUTE 0x150
+
+#define HPET_ID_REV 0x000000ff
+#define HPET_ID_NUMBER 0x00001f00
+#define HPET_ID_64BIT 0x00002000
+#define HPET_ID_LEGSUP 0x00008000
+#define HPET_ID_VENDOR 0xffff0000
+#define HPET_ID_NUMBER_SHIFT 8
+#define HPET_ID_VENDOR_SHIFT 16
+
+#define HPET_CFG_ENABLE 0x001
+#define HPET_CFG_LEGACY 0x002
+#define HPET_LEGACY_8254 2
+#define HPET_LEGACY_RTC 8
+
+#define HPET_TN_LEVEL 0x0002
+#define HPET_TN_ENABLE 0x0004
+#define HPET_TN_PERIODIC 0x0008
+#define HPET_TN_PERIODIC_CAP 0x0010
+#define HPET_TN_64BIT_CAP 0x0020
+#define HPET_TN_SETVAL 0x0040
+#define HPET_TN_32BIT 0x0100
+#define HPET_TN_ROUTE 0x3e00
+#define HPET_TN_FSB 0x4000
+#define HPET_TN_FSB_CAP 0x8000
+#define HPET_TN_ROUTE_SHIFT 9
+
+/* Max HPET Period is 10^8 femto sec as in HPET spec */
+#define HPET_MAX_PERIOD 100000000UL
+/*
+ * Min HPET period is 10^5 femto sec just for safety. If it is less than this,
+ * then 32 bit HPET counter wrapsaround in less than 0.5 sec.
+ */
+#define HPET_MIN_PERIOD 100000UL
+
+/* hpet memory map physical address */
+extern unsigned long hpet_address;
+extern unsigned long force_hpet_address;
+extern int boot_hpet_disable;
+extern u8 hpet_blockid;
+extern int hpet_force_user;
+extern u8 hpet_msi_disable;
+extern int is_hpet_enabled(void);
+extern int hpet_enable(void);
+extern void hpet_disable(void);
+extern unsigned int hpet_readl(unsigned int a);
+extern void force_hpet_resume(void);
+
+struct irq_data;
+extern void hpet_msi_unmask(struct irq_data *data);
+extern void hpet_msi_mask(struct irq_data *data);
+struct hpet_dev;
+extern void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg);
+extern void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg);
+
+#ifdef CONFIG_PCI_MSI
+extern int default_setup_hpet_msi(unsigned int irq, unsigned int id);
+#else
+static inline int default_setup_hpet_msi(unsigned int irq, unsigned int id)
+{
+ return -EINVAL;
+}
+#endif
+
+#ifdef CONFIG_HPET_EMULATE_RTC
+
+#include <linux/interrupt.h>
+
+typedef irqreturn_t (*rtc_irq_handler)(int interrupt, void *cookie);
+extern int hpet_mask_rtc_irq_bit(unsigned long bit_mask);
+extern int hpet_set_rtc_irq_bit(unsigned long bit_mask);
+extern int hpet_set_alarm_time(unsigned char hrs, unsigned char min,
+ unsigned char sec);
+extern int hpet_set_periodic_freq(unsigned long freq);
+extern int hpet_rtc_dropped_irq(void);
+extern int hpet_rtc_timer_init(void);
+extern irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id);
+extern int hpet_register_irq_handler(rtc_irq_handler handler);
+extern void hpet_unregister_irq_handler(rtc_irq_handler handler);
+
+#endif /* CONFIG_HPET_EMULATE_RTC */
+
+#else /* CONFIG_HPET_TIMER */
+
+static inline int hpet_enable(void) { return 0; }
+static inline int is_hpet_enabled(void) { return 0; }
+#define hpet_readl(a) 0
+#define default_setup_hpet_msi NULL
+
+#endif
+#endif /* _ASM_X86_HPET_H */
diff --git a/kernel/arch/x86/include/asm/hugetlb.h b/kernel/arch/x86/include/asm/hugetlb.h
new file mode 100644
index 000000000..68c05398b
--- /dev/null
+++ b/kernel/arch/x86/include/asm/hugetlb.h
@@ -0,0 +1,99 @@
+#ifndef _ASM_X86_HUGETLB_H
+#define _ASM_X86_HUGETLB_H
+
+#include <asm/page.h>
+#include <asm-generic/hugetlb.h>
+
+
+static inline int is_hugepage_only_range(struct mm_struct *mm,
+ unsigned long addr,
+ unsigned long len) {
+ return 0;
+}
+
+/*
+ * If the arch doesn't supply something else, assume that hugepage
+ * size aligned regions are ok without further preparation.
+ */
+static inline int prepare_hugepage_range(struct file *file,
+ unsigned long addr, unsigned long len)
+{
+ struct hstate *h = hstate_file(file);
+ if (len & ~huge_page_mask(h))
+ return -EINVAL;
+ if (addr & ~huge_page_mask(h))
+ return -EINVAL;
+ return 0;
+}
+
+static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) {
+}
+
+static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
+ unsigned long addr, unsigned long end,
+ unsigned long floor,
+ unsigned long ceiling)
+{
+ free_pgd_range(tlb, addr, end, floor, ceiling);
+}
+
+static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ set_pte_at(mm, addr, ptep, pte);
+}
+
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ return ptep_get_and_clear(mm, addr, ptep);
+}
+
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ ptep_clear_flush(vma, addr, ptep);
+}
+
+static inline int huge_pte_none(pte_t pte)
+{
+ return pte_none(pte);
+}
+
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+ return pte_wrprotect(pte);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ ptep_set_wrprotect(mm, addr, ptep);
+}
+
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty)
+{
+ return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+}
+
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+ return *ptep;
+}
+
+static inline int arch_prepare_hugepage(struct page *page)
+{
+ return 0;
+}
+
+static inline void arch_release_hugepage(struct page *page)
+{
+}
+
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+}
+
+#endif /* _ASM_X86_HUGETLB_H */
diff --git a/kernel/arch/x86/include/asm/hw_breakpoint.h b/kernel/arch/x86/include/asm/hw_breakpoint.h
new file mode 100644
index 000000000..6c98be864
--- /dev/null
+++ b/kernel/arch/x86/include/asm/hw_breakpoint.h
@@ -0,0 +1,76 @@
+#ifndef _I386_HW_BREAKPOINT_H
+#define _I386_HW_BREAKPOINT_H
+
+#include <uapi/asm/hw_breakpoint.h>
+
+#define __ARCH_HW_BREAKPOINT_H
+
+/*
+ * The name should probably be something dealt in
+ * a higher level. While dealing with the user
+ * (display/resolving)
+ */
+struct arch_hw_breakpoint {
+ unsigned long address;
+ unsigned long mask;
+ u8 len;
+ u8 type;
+};
+
+#include <linux/kdebug.h>
+#include <linux/percpu.h>
+#include <linux/list.h>
+
+/* Available HW breakpoint length encodings */
+#define X86_BREAKPOINT_LEN_X 0x40
+#define X86_BREAKPOINT_LEN_1 0x40
+#define X86_BREAKPOINT_LEN_2 0x44
+#define X86_BREAKPOINT_LEN_4 0x4c
+
+#ifdef CONFIG_X86_64
+#define X86_BREAKPOINT_LEN_8 0x48
+#endif
+
+/* Available HW breakpoint type encodings */
+
+/* trigger on instruction execute */
+#define X86_BREAKPOINT_EXECUTE 0x80
+/* trigger on memory write */
+#define X86_BREAKPOINT_WRITE 0x81
+/* trigger on memory read or write */
+#define X86_BREAKPOINT_RW 0x83
+
+/* Total number of available HW breakpoint registers */
+#define HBP_NUM 4
+
+static inline int hw_breakpoint_slots(int type)
+{
+ return HBP_NUM;
+}
+
+struct perf_event;
+struct pmu;
+
+extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
+extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
+extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
+ unsigned long val, void *data);
+
+
+int arch_install_hw_breakpoint(struct perf_event *bp);
+void arch_uninstall_hw_breakpoint(struct perf_event *bp);
+void hw_breakpoint_pmu_read(struct perf_event *bp);
+void hw_breakpoint_pmu_unthrottle(struct perf_event *bp);
+
+extern void
+arch_fill_perf_breakpoint(struct perf_event *bp);
+
+unsigned long encode_dr7(int drnum, unsigned int len, unsigned int type);
+int decode_dr7(unsigned long dr7, int bpnum, unsigned *len, unsigned *type);
+
+extern int arch_bp_generic_fields(int x86_len, int x86_type,
+ int *gen_len, int *gen_type);
+
+extern struct pmu perf_ops_bp;
+
+#endif /* _I386_HW_BREAKPOINT_H */
diff --git a/kernel/arch/x86/include/asm/hw_irq.h b/kernel/arch/x86/include/asm/hw_irq.h
new file mode 100644
index 000000000..e9571ddab
--- /dev/null
+++ b/kernel/arch/x86/include/asm/hw_irq.h
@@ -0,0 +1,197 @@
+#ifndef _ASM_X86_HW_IRQ_H
+#define _ASM_X86_HW_IRQ_H
+
+/*
+ * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
+ *
+ * moved some of the old arch/i386/kernel/irq.h to here. VY
+ *
+ * IRQ/IPI changes taken from work by Thomas Radke
+ * <tomsoft@informatik.tu-chemnitz.de>
+ *
+ * hacked by Andi Kleen for x86-64.
+ * unified by tglx
+ */
+
+#include <asm/irq_vectors.h>
+
+#ifndef __ASSEMBLY__
+
+#include <linux/percpu.h>
+#include <linux/profile.h>
+#include <linux/smp.h>
+
+#include <linux/atomic.h>
+#include <asm/irq.h>
+#include <asm/sections.h>
+
+/* Interrupt handlers registered during init_IRQ */
+extern asmlinkage void apic_timer_interrupt(void);
+extern asmlinkage void x86_platform_ipi(void);
+extern asmlinkage void kvm_posted_intr_ipi(void);
+extern asmlinkage void error_interrupt(void);
+extern asmlinkage void irq_work_interrupt(void);
+
+extern asmlinkage void spurious_interrupt(void);
+extern asmlinkage void thermal_interrupt(void);
+extern asmlinkage void reschedule_interrupt(void);
+
+extern asmlinkage void invalidate_interrupt(void);
+extern asmlinkage void invalidate_interrupt0(void);
+extern asmlinkage void invalidate_interrupt1(void);
+extern asmlinkage void invalidate_interrupt2(void);
+extern asmlinkage void invalidate_interrupt3(void);
+extern asmlinkage void invalidate_interrupt4(void);
+extern asmlinkage void invalidate_interrupt5(void);
+extern asmlinkage void invalidate_interrupt6(void);
+extern asmlinkage void invalidate_interrupt7(void);
+extern asmlinkage void invalidate_interrupt8(void);
+extern asmlinkage void invalidate_interrupt9(void);
+extern asmlinkage void invalidate_interrupt10(void);
+extern asmlinkage void invalidate_interrupt11(void);
+extern asmlinkage void invalidate_interrupt12(void);
+extern asmlinkage void invalidate_interrupt13(void);
+extern asmlinkage void invalidate_interrupt14(void);
+extern asmlinkage void invalidate_interrupt15(void);
+extern asmlinkage void invalidate_interrupt16(void);
+extern asmlinkage void invalidate_interrupt17(void);
+extern asmlinkage void invalidate_interrupt18(void);
+extern asmlinkage void invalidate_interrupt19(void);
+extern asmlinkage void invalidate_interrupt20(void);
+extern asmlinkage void invalidate_interrupt21(void);
+extern asmlinkage void invalidate_interrupt22(void);
+extern asmlinkage void invalidate_interrupt23(void);
+extern asmlinkage void invalidate_interrupt24(void);
+extern asmlinkage void invalidate_interrupt25(void);
+extern asmlinkage void invalidate_interrupt26(void);
+extern asmlinkage void invalidate_interrupt27(void);
+extern asmlinkage void invalidate_interrupt28(void);
+extern asmlinkage void invalidate_interrupt29(void);
+extern asmlinkage void invalidate_interrupt30(void);
+extern asmlinkage void invalidate_interrupt31(void);
+
+extern asmlinkage void irq_move_cleanup_interrupt(void);
+extern asmlinkage void reboot_interrupt(void);
+extern asmlinkage void threshold_interrupt(void);
+
+extern asmlinkage void call_function_interrupt(void);
+extern asmlinkage void call_function_single_interrupt(void);
+
+#ifdef CONFIG_TRACING
+/* Interrupt handlers registered during init_IRQ */
+extern void trace_apic_timer_interrupt(void);
+extern void trace_x86_platform_ipi(void);
+extern void trace_error_interrupt(void);
+extern void trace_irq_work_interrupt(void);
+extern void trace_spurious_interrupt(void);
+extern void trace_thermal_interrupt(void);
+extern void trace_reschedule_interrupt(void);
+extern void trace_threshold_interrupt(void);
+extern void trace_call_function_interrupt(void);
+extern void trace_call_function_single_interrupt(void);
+#define trace_irq_move_cleanup_interrupt irq_move_cleanup_interrupt
+#define trace_reboot_interrupt reboot_interrupt
+#define trace_kvm_posted_intr_ipi kvm_posted_intr_ipi
+#endif /* CONFIG_TRACING */
+
+#ifdef CONFIG_IRQ_REMAP
+/* Intel specific interrupt remapping information */
+struct irq_2_iommu {
+ struct intel_iommu *iommu;
+ u16 irte_index;
+ u16 sub_handle;
+ u8 irte_mask;
+};
+
+/* AMD specific interrupt remapping information */
+struct irq_2_irte {
+ u16 devid; /* Device ID for IRTE table */
+ u16 index; /* Index into IRTE table*/
+};
+#endif /* CONFIG_IRQ_REMAP */
+
+#ifdef CONFIG_X86_LOCAL_APIC
+struct irq_data;
+
+struct irq_cfg {
+ cpumask_var_t domain;
+ cpumask_var_t old_domain;
+ u8 vector;
+ u8 move_in_progress : 1;
+#ifdef CONFIG_IRQ_REMAP
+ u8 remapped : 1;
+ union {
+ struct irq_2_iommu irq_2_iommu;
+ struct irq_2_irte irq_2_irte;
+ };
+#endif
+ union {
+#ifdef CONFIG_X86_IO_APIC
+ struct {
+ struct list_head irq_2_pin;
+ };
+#endif
+ };
+};
+
+extern struct irq_cfg *irq_cfg(unsigned int irq);
+extern struct irq_cfg *irqd_cfg(struct irq_data *irq_data);
+extern struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node);
+extern void lock_vector_lock(void);
+extern void unlock_vector_lock(void);
+extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *);
+extern void clear_irq_vector(int irq, struct irq_cfg *cfg);
+extern void setup_vector_irq(int cpu);
+#ifdef CONFIG_SMP
+extern void send_cleanup_vector(struct irq_cfg *);
+extern void irq_complete_move(struct irq_cfg *cfg);
+#else
+static inline void send_cleanup_vector(struct irq_cfg *c) { }
+static inline void irq_complete_move(struct irq_cfg *c) { }
+#endif
+
+extern int apic_retrigger_irq(struct irq_data *data);
+extern void apic_ack_edge(struct irq_data *data);
+extern int apic_set_affinity(struct irq_data *data, const struct cpumask *mask,
+ unsigned int *dest_id);
+#else /* CONFIG_X86_LOCAL_APIC */
+static inline void lock_vector_lock(void) {}
+static inline void unlock_vector_lock(void) {}
+#endif /* CONFIG_X86_LOCAL_APIC */
+
+/* Statistics */
+extern atomic_t irq_err_count;
+extern atomic_t irq_mis_count;
+
+/* EISA */
+extern void eisa_set_level_irq(unsigned int irq);
+
+/* SMP */
+extern __visible void smp_apic_timer_interrupt(struct pt_regs *);
+extern __visible void smp_spurious_interrupt(struct pt_regs *);
+extern __visible void smp_x86_platform_ipi(struct pt_regs *);
+extern __visible void smp_error_interrupt(struct pt_regs *);
+#ifdef CONFIG_X86_IO_APIC
+extern asmlinkage void smp_irq_move_cleanup_interrupt(void);
+#endif
+#ifdef CONFIG_SMP
+extern __visible void smp_reschedule_interrupt(struct pt_regs *);
+extern __visible void smp_call_function_interrupt(struct pt_regs *);
+extern __visible void smp_call_function_single_interrupt(struct pt_regs *);
+extern __visible void smp_invalidate_interrupt(struct pt_regs *);
+#endif
+
+extern char irq_entries_start[];
+#ifdef CONFIG_TRACING
+#define trace_irq_entries_start irq_entries_start
+#endif
+
+#define VECTOR_UNDEFINED (-1)
+#define VECTOR_RETRIGGERED (-2)
+
+typedef int vector_irq_t[NR_VECTORS];
+DECLARE_PER_CPU(vector_irq_t, vector_irq);
+
+#endif /* !ASSEMBLY_ */
+
+#endif /* _ASM_X86_HW_IRQ_H */
diff --git a/kernel/arch/x86/include/asm/hypertransport.h b/kernel/arch/x86/include/asm/hypertransport.h
new file mode 100644
index 000000000..334b1a885
--- /dev/null
+++ b/kernel/arch/x86/include/asm/hypertransport.h
@@ -0,0 +1,45 @@
+#ifndef _ASM_X86_HYPERTRANSPORT_H
+#define _ASM_X86_HYPERTRANSPORT_H
+
+/*
+ * Constants for x86 Hypertransport Interrupts.
+ */
+
+#define HT_IRQ_LOW_BASE 0xf8000000
+
+#define HT_IRQ_LOW_VECTOR_SHIFT 16
+#define HT_IRQ_LOW_VECTOR_MASK 0x00ff0000
+#define HT_IRQ_LOW_VECTOR(v) \
+ (((v) << HT_IRQ_LOW_VECTOR_SHIFT) & HT_IRQ_LOW_VECTOR_MASK)
+
+#define HT_IRQ_LOW_DEST_ID_SHIFT 8
+#define HT_IRQ_LOW_DEST_ID_MASK 0x0000ff00
+#define HT_IRQ_LOW_DEST_ID(v) \
+ (((v) << HT_IRQ_LOW_DEST_ID_SHIFT) & HT_IRQ_LOW_DEST_ID_MASK)
+
+#define HT_IRQ_LOW_DM_PHYSICAL 0x0000000
+#define HT_IRQ_LOW_DM_LOGICAL 0x0000040
+
+#define HT_IRQ_LOW_RQEOI_EDGE 0x0000000
+#define HT_IRQ_LOW_RQEOI_LEVEL 0x0000020
+
+
+#define HT_IRQ_LOW_MT_FIXED 0x0000000
+#define HT_IRQ_LOW_MT_ARBITRATED 0x0000004
+#define HT_IRQ_LOW_MT_SMI 0x0000008
+#define HT_IRQ_LOW_MT_NMI 0x000000c
+#define HT_IRQ_LOW_MT_INIT 0x0000010
+#define HT_IRQ_LOW_MT_STARTUP 0x0000014
+#define HT_IRQ_LOW_MT_EXTINT 0x0000018
+#define HT_IRQ_LOW_MT_LINT1 0x000008c
+#define HT_IRQ_LOW_MT_LINT0 0x0000098
+
+#define HT_IRQ_LOW_IRQ_MASKED 0x0000001
+
+
+#define HT_IRQ_HIGH_DEST_ID_SHIFT 0
+#define HT_IRQ_HIGH_DEST_ID_MASK 0x00ffffff
+#define HT_IRQ_HIGH_DEST_ID(v) \
+ ((((v) >> 8) << HT_IRQ_HIGH_DEST_ID_SHIFT) & HT_IRQ_HIGH_DEST_ID_MASK)
+
+#endif /* _ASM_X86_HYPERTRANSPORT_H */
diff --git a/kernel/arch/x86/include/asm/hypervisor.h b/kernel/arch/x86/include/asm/hypervisor.h
new file mode 100644
index 000000000..055ea9941
--- /dev/null
+++ b/kernel/arch/x86/include/asm/hypervisor.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2008, VMware, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#ifndef _ASM_X86_HYPERVISOR_H
+#define _ASM_X86_HYPERVISOR_H
+
+#ifdef CONFIG_HYPERVISOR_GUEST
+
+#include <asm/kvm_para.h>
+#include <asm/xen/hypervisor.h>
+
+/*
+ * x86 hypervisor information
+ */
+struct hypervisor_x86 {
+ /* Hypervisor name */
+ const char *name;
+
+ /* Detection routine */
+ uint32_t (*detect)(void);
+
+ /* Adjust CPU feature bits (run once per CPU) */
+ void (*set_cpu_features)(struct cpuinfo_x86 *);
+
+ /* Platform setup (run once per boot) */
+ void (*init_platform)(void);
+
+ /* X2APIC detection (run once per boot) */
+ bool (*x2apic_available)(void);
+};
+
+extern const struct hypervisor_x86 *x86_hyper;
+
+/* Recognized hypervisors */
+extern const struct hypervisor_x86 x86_hyper_vmware;
+extern const struct hypervisor_x86 x86_hyper_ms_hyperv;
+extern const struct hypervisor_x86 x86_hyper_xen;
+extern const struct hypervisor_x86 x86_hyper_kvm;
+
+extern void init_hypervisor(struct cpuinfo_x86 *c);
+extern void init_hypervisor_platform(void);
+extern bool hypervisor_x2apic_available(void);
+#else
+static inline void init_hypervisor(struct cpuinfo_x86 *c) { }
+static inline void init_hypervisor_platform(void) { }
+static inline bool hypervisor_x2apic_available(void) { return false; }
+#endif /* CONFIG_HYPERVISOR_GUEST */
+#endif /* _ASM_X86_HYPERVISOR_H */
diff --git a/kernel/arch/x86/include/asm/i387.h b/kernel/arch/x86/include/asm/i387.h
new file mode 100644
index 000000000..6eb6fcb83
--- /dev/null
+++ b/kernel/arch/x86/include/asm/i387.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 1994 Linus Torvalds
+ *
+ * Pentium III FXSR, SSE support
+ * General FPU state handling cleanups
+ * Gareth Hughes <gareth@valinux.com>, May 2000
+ * x86-64 work by Andi Kleen 2002
+ */
+
+#ifndef _ASM_X86_I387_H
+#define _ASM_X86_I387_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/sched.h>
+#include <linux/hardirq.h>
+
+struct pt_regs;
+struct user_i387_struct;
+
+extern int init_fpu(struct task_struct *child);
+extern void fpu_finit(struct fpu *fpu);
+extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
+extern void math_state_restore(void);
+
+extern bool irq_fpu_usable(void);
+
+/*
+ * Careful: __kernel_fpu_begin/end() must be called with preempt disabled
+ * and they don't touch the preempt state on their own.
+ * If you enable preemption after __kernel_fpu_begin(), preempt notifier
+ * should call the __kernel_fpu_end() to prevent the kernel/user FPU
+ * state from getting corrupted. KVM for example uses this model.
+ *
+ * All other cases use kernel_fpu_begin/end() which disable preemption
+ * during kernel FPU usage.
+ */
+extern void __kernel_fpu_begin(void);
+extern void __kernel_fpu_end(void);
+
+static inline void kernel_fpu_begin(void)
+{
+ preempt_disable();
+ WARN_ON_ONCE(!irq_fpu_usable());
+ __kernel_fpu_begin();
+}
+
+static inline void kernel_fpu_end(void)
+{
+ __kernel_fpu_end();
+ preempt_enable();
+}
+
+/* Must be called with preempt disabled */
+extern void kernel_fpu_disable(void);
+extern void kernel_fpu_enable(void);
+
+/*
+ * Some instructions like VIA's padlock instructions generate a spurious
+ * DNA fault but don't modify SSE registers. And these instructions
+ * get used from interrupt context as well. To prevent these kernel instructions
+ * in interrupt context interacting wrongly with other user/kernel fpu usage, we
+ * should use them only in the context of irq_ts_save/restore()
+ */
+static inline int irq_ts_save(void)
+{
+ /*
+ * If in process context and not atomic, we can take a spurious DNA fault.
+ * Otherwise, doing clts() in process context requires disabling preemption
+ * or some heavy lifting like kernel_fpu_begin()
+ */
+ if (!in_atomic())
+ return 0;
+
+ if (read_cr0() & X86_CR0_TS) {
+ clts();
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline void irq_ts_restore(int TS_state)
+{
+ if (TS_state)
+ stts();
+}
+
+/*
+ * The question "does this thread have fpu access?"
+ * is slightly racy, since preemption could come in
+ * and revoke it immediately after the test.
+ *
+ * However, even in that very unlikely scenario,
+ * we can just assume we have FPU access - typically
+ * to save the FP state - we'll just take a #NM
+ * fault and get the FPU access back.
+ */
+static inline int user_has_fpu(void)
+{
+ return current->thread.fpu.has_fpu;
+}
+
+extern void unlazy_fpu(struct task_struct *tsk);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_X86_I387_H */
diff --git a/kernel/arch/x86/include/asm/i8259.h b/kernel/arch/x86/include/asm/i8259.h
new file mode 100644
index 000000000..ccffa5375
--- /dev/null
+++ b/kernel/arch/x86/include/asm/i8259.h
@@ -0,0 +1,75 @@
+#ifndef _ASM_X86_I8259_H
+#define _ASM_X86_I8259_H
+
+#include <linux/delay.h>
+
+extern unsigned int cached_irq_mask;
+
+#define __byte(x, y) (((unsigned char *)&(y))[x])
+#define cached_master_mask (__byte(0, cached_irq_mask))
+#define cached_slave_mask (__byte(1, cached_irq_mask))
+
+/* i8259A PIC registers */
+#define PIC_MASTER_CMD 0x20
+#define PIC_MASTER_IMR 0x21
+#define PIC_MASTER_ISR PIC_MASTER_CMD
+#define PIC_MASTER_POLL PIC_MASTER_ISR
+#define PIC_MASTER_OCW3 PIC_MASTER_ISR
+#define PIC_SLAVE_CMD 0xa0
+#define PIC_SLAVE_IMR 0xa1
+
+/* i8259A PIC related value */
+#define PIC_CASCADE_IR 2
+#define MASTER_ICW4_DEFAULT 0x01
+#define SLAVE_ICW4_DEFAULT 0x01
+#define PIC_ICW4_AEOI 2
+
+extern raw_spinlock_t i8259A_lock;
+
+/* the PIC may need a careful delay on some platforms, hence specific calls */
+static inline unsigned char inb_pic(unsigned int port)
+{
+ unsigned char value = inb(port);
+
+ /*
+ * delay for some accesses to PIC on motherboard or in chipset
+ * must be at least one microsecond, so be safe here:
+ */
+ udelay(2);
+
+ return value;
+}
+
+static inline void outb_pic(unsigned char value, unsigned int port)
+{
+ outb(value, port);
+ /*
+ * delay for some accesses to PIC on motherboard or in chipset
+ * must be at least one microsecond, so be safe here:
+ */
+ udelay(2);
+}
+
+extern struct irq_chip i8259A_chip;
+
+struct legacy_pic {
+ int nr_legacy_irqs;
+ struct irq_chip *chip;
+ void (*mask)(unsigned int irq);
+ void (*unmask)(unsigned int irq);
+ void (*mask_all)(void);
+ void (*restore_mask)(void);
+ void (*init)(int auto_eoi);
+ int (*irq_pending)(unsigned int irq);
+ void (*make_irq)(unsigned int irq);
+};
+
+extern struct legacy_pic *legacy_pic;
+extern struct legacy_pic null_legacy_pic;
+
+static inline int nr_legacy_irqs(void)
+{
+ return legacy_pic->nr_legacy_irqs;
+}
+
+#endif /* _ASM_X86_I8259_H */
diff --git a/kernel/arch/x86/include/asm/ia32.h b/kernel/arch/x86/include/asm/ia32.h
new file mode 100644
index 000000000..d0e8e0141
--- /dev/null
+++ b/kernel/arch/x86/include/asm/ia32.h
@@ -0,0 +1,81 @@
+#ifndef _ASM_X86_IA32_H
+#define _ASM_X86_IA32_H
+
+
+#ifdef CONFIG_IA32_EMULATION
+
+#include <linux/compat.h>
+
+/*
+ * 32 bit structures for IA32 support.
+ */
+
+#include <asm/sigcontext32.h>
+
+/* signal.h */
+
+struct ucontext_ia32 {
+ unsigned int uc_flags;
+ unsigned int uc_link;
+ compat_stack_t uc_stack;
+ struct sigcontext_ia32 uc_mcontext;
+ compat_sigset_t uc_sigmask; /* mask last for extensibility */
+};
+
+struct ucontext_x32 {
+ unsigned int uc_flags;
+ unsigned int uc_link;
+ compat_stack_t uc_stack;
+ unsigned int uc__pad0; /* needed for alignment */
+ struct sigcontext uc_mcontext; /* the 64-bit sigcontext type */
+ compat_sigset_t uc_sigmask; /* mask last for extensibility */
+};
+
+/* This matches struct stat64 in glibc2.2, hence the absolutely
+ * insane amounts of padding around dev_t's.
+ */
+struct stat64 {
+ unsigned long long st_dev;
+ unsigned char __pad0[4];
+
+#define STAT64_HAS_BROKEN_ST_INO 1
+ unsigned int __st_ino;
+
+ unsigned int st_mode;
+ unsigned int st_nlink;
+
+ unsigned int st_uid;
+ unsigned int st_gid;
+
+ unsigned long long st_rdev;
+ unsigned char __pad3[4];
+
+ long long st_size;
+ unsigned int st_blksize;
+
+ long long st_blocks;/* Number 512-byte blocks allocated */
+
+ unsigned st_atime;
+ unsigned st_atime_nsec;
+ unsigned st_mtime;
+ unsigned st_mtime_nsec;
+ unsigned st_ctime;
+ unsigned st_ctime_nsec;
+
+ unsigned long long st_ino;
+} __attribute__((packed));
+
+#define IA32_STACK_TOP IA32_PAGE_OFFSET
+
+#ifdef __KERNEL__
+struct linux_binprm;
+extern int ia32_setup_arg_pages(struct linux_binprm *bprm,
+ unsigned long stack_top, int exec_stack);
+struct mm_struct;
+extern void ia32_pick_mmap_layout(struct mm_struct *mm);
+
+#endif
+
+#endif /* !CONFIG_IA32_SUPPORT */
+
+#endif /* _ASM_X86_IA32_H */
diff --git a/kernel/arch/x86/include/asm/ia32_unistd.h b/kernel/arch/x86/include/asm/ia32_unistd.h
new file mode 100644
index 000000000..b0d5716ca
--- /dev/null
+++ b/kernel/arch/x86/include/asm/ia32_unistd.h
@@ -0,0 +1,11 @@
+#ifndef _ASM_X86_IA32_UNISTD_H
+#define _ASM_X86_IA32_UNISTD_H
+
+/*
+ * This file contains the system call numbers of the ia32 compat ABI,
+ * this is for the kernel only.
+ */
+#define __SYSCALL_ia32_NR(x) (x)
+#include <asm/unistd_32_ia32.h>
+
+#endif /* _ASM_X86_IA32_UNISTD_H */
diff --git a/kernel/arch/x86/include/asm/idle.h b/kernel/arch/x86/include/asm/idle.h
new file mode 100644
index 000000000..c5d178537
--- /dev/null
+++ b/kernel/arch/x86/include/asm/idle.h
@@ -0,0 +1,22 @@
+#ifndef _ASM_X86_IDLE_H
+#define _ASM_X86_IDLE_H
+
+#define IDLE_START 1
+#define IDLE_END 2
+
+struct notifier_block;
+void idle_notifier_register(struct notifier_block *n);
+void idle_notifier_unregister(struct notifier_block *n);
+
+#ifdef CONFIG_X86_64
+void enter_idle(void);
+void exit_idle(void);
+#else /* !CONFIG_X86_64 */
+static inline void enter_idle(void) { }
+static inline void exit_idle(void) { }
+static inline void __exit_idle(void) { }
+#endif /* CONFIG_X86_64 */
+
+void amd_e400_remove_cpu(int cpu);
+
+#endif /* _ASM_X86_IDLE_H */
diff --git a/kernel/arch/x86/include/asm/imr.h b/kernel/arch/x86/include/asm/imr.h
new file mode 100644
index 000000000..cd2ce4068
--- /dev/null
+++ b/kernel/arch/x86/include/asm/imr.h
@@ -0,0 +1,60 @@
+/*
+ * imr.h: Isolated Memory Region API
+ *
+ * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2015 Bryan O'Donoghue <pure.logic@nexus-software.ie>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _IMR_H
+#define _IMR_H
+
+#include <linux/types.h>
+
+/*
+ * IMR agent access mask bits
+ * See section 12.7.4.7 from quark-x1000-datasheet.pdf for register
+ * definitions.
+ */
+#define IMR_ESRAM_FLUSH BIT(31)
+#define IMR_CPU_SNOOP BIT(30) /* Applicable only to write */
+#define IMR_RMU BIT(29)
+#define IMR_VC1_SAI_ID3 BIT(15)
+#define IMR_VC1_SAI_ID2 BIT(14)
+#define IMR_VC1_SAI_ID1 BIT(13)
+#define IMR_VC1_SAI_ID0 BIT(12)
+#define IMR_VC0_SAI_ID3 BIT(11)
+#define IMR_VC0_SAI_ID2 BIT(10)
+#define IMR_VC0_SAI_ID1 BIT(9)
+#define IMR_VC0_SAI_ID0 BIT(8)
+#define IMR_CPU_0 BIT(1) /* SMM mode */
+#define IMR_CPU BIT(0) /* Non SMM mode */
+#define IMR_ACCESS_NONE 0
+
+/*
+ * Read/Write access-all bits here include some reserved bits
+ * These are the values firmware uses and are accepted by hardware.
+ * The kernel defines read/write access-all in the same way as firmware
+ * in order to have a consistent and crisp definition across firmware,
+ * bootloader and kernel.
+ */
+#define IMR_READ_ACCESS_ALL 0xBFFFFFFF
+#define IMR_WRITE_ACCESS_ALL 0xFFFFFFFF
+
+/* Number of IMRs provided by Quark X1000 SoC */
+#define QUARK_X1000_IMR_MAX 0x08
+#define QUARK_X1000_IMR_REGBASE 0x40
+
+/* IMR alignment bits - only bits 31:10 are checked for IMR validity */
+#define IMR_ALIGN 0x400
+#define IMR_MASK (IMR_ALIGN - 1)
+
+int imr_add_range(phys_addr_t base, size_t size,
+ unsigned int rmask, unsigned int wmask, bool lock);
+
+int imr_remove_range(phys_addr_t base, size_t size);
+
+#endif /* _IMR_H */
diff --git a/kernel/arch/x86/include/asm/inat.h b/kernel/arch/x86/include/asm/inat.h
new file mode 100644
index 000000000..74a2e312e
--- /dev/null
+++ b/kernel/arch/x86/include/asm/inat.h
@@ -0,0 +1,221 @@
+#ifndef _ASM_X86_INAT_H
+#define _ASM_X86_INAT_H
+/*
+ * x86 instruction attributes
+ *
+ * Written by Masami Hiramatsu <mhiramat@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+#include <asm/inat_types.h>
+
+/*
+ * Internal bits. Don't use bitmasks directly, because these bits are
+ * unstable. You should use checking functions.
+ */
+
+#define INAT_OPCODE_TABLE_SIZE 256
+#define INAT_GROUP_TABLE_SIZE 8
+
+/* Legacy last prefixes */
+#define INAT_PFX_OPNDSZ 1 /* 0x66 */ /* LPFX1 */
+#define INAT_PFX_REPE 2 /* 0xF3 */ /* LPFX2 */
+#define INAT_PFX_REPNE 3 /* 0xF2 */ /* LPFX3 */
+/* Other Legacy prefixes */
+#define INAT_PFX_LOCK 4 /* 0xF0 */
+#define INAT_PFX_CS 5 /* 0x2E */
+#define INAT_PFX_DS 6 /* 0x3E */
+#define INAT_PFX_ES 7 /* 0x26 */
+#define INAT_PFX_FS 8 /* 0x64 */
+#define INAT_PFX_GS 9 /* 0x65 */
+#define INAT_PFX_SS 10 /* 0x36 */
+#define INAT_PFX_ADDRSZ 11 /* 0x67 */
+/* x86-64 REX prefix */
+#define INAT_PFX_REX 12 /* 0x4X */
+/* AVX VEX prefixes */
+#define INAT_PFX_VEX2 13 /* 2-bytes VEX prefix */
+#define INAT_PFX_VEX3 14 /* 3-bytes VEX prefix */
+
+#define INAT_LSTPFX_MAX 3
+#define INAT_LGCPFX_MAX 11
+
+/* Immediate size */
+#define INAT_IMM_BYTE 1
+#define INAT_IMM_WORD 2
+#define INAT_IMM_DWORD 3
+#define INAT_IMM_QWORD 4
+#define INAT_IMM_PTR 5
+#define INAT_IMM_VWORD32 6
+#define INAT_IMM_VWORD 7
+
+/* Legacy prefix */
+#define INAT_PFX_OFFS 0
+#define INAT_PFX_BITS 4
+#define INAT_PFX_MAX ((1 << INAT_PFX_BITS) - 1)
+#define INAT_PFX_MASK (INAT_PFX_MAX << INAT_PFX_OFFS)
+/* Escape opcodes */
+#define INAT_ESC_OFFS (INAT_PFX_OFFS + INAT_PFX_BITS)
+#define INAT_ESC_BITS 2
+#define INAT_ESC_MAX ((1 << INAT_ESC_BITS) - 1)
+#define INAT_ESC_MASK (INAT_ESC_MAX << INAT_ESC_OFFS)
+/* Group opcodes (1-16) */
+#define INAT_GRP_OFFS (INAT_ESC_OFFS + INAT_ESC_BITS)
+#define INAT_GRP_BITS 5
+#define INAT_GRP_MAX ((1 << INAT_GRP_BITS) - 1)
+#define INAT_GRP_MASK (INAT_GRP_MAX << INAT_GRP_OFFS)
+/* Immediates */
+#define INAT_IMM_OFFS (INAT_GRP_OFFS + INAT_GRP_BITS)
+#define INAT_IMM_BITS 3
+#define INAT_IMM_MASK (((1 << INAT_IMM_BITS) - 1) << INAT_IMM_OFFS)
+/* Flags */
+#define INAT_FLAG_OFFS (INAT_IMM_OFFS + INAT_IMM_BITS)
+#define INAT_MODRM (1 << (INAT_FLAG_OFFS))
+#define INAT_FORCE64 (1 << (INAT_FLAG_OFFS + 1))
+#define INAT_SCNDIMM (1 << (INAT_FLAG_OFFS + 2))
+#define INAT_MOFFSET (1 << (INAT_FLAG_OFFS + 3))
+#define INAT_VARIANT (1 << (INAT_FLAG_OFFS + 4))
+#define INAT_VEXOK (1 << (INAT_FLAG_OFFS + 5))
+#define INAT_VEXONLY (1 << (INAT_FLAG_OFFS + 6))
+/* Attribute making macros for attribute tables */
+#define INAT_MAKE_PREFIX(pfx) (pfx << INAT_PFX_OFFS)
+#define INAT_MAKE_ESCAPE(esc) (esc << INAT_ESC_OFFS)
+#define INAT_MAKE_GROUP(grp) ((grp << INAT_GRP_OFFS) | INAT_MODRM)
+#define INAT_MAKE_IMM(imm) (imm << INAT_IMM_OFFS)
+
+/* Attribute search APIs */
+extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode);
+extern int inat_get_last_prefix_id(insn_byte_t last_pfx);
+extern insn_attr_t inat_get_escape_attribute(insn_byte_t opcode,
+ int lpfx_id,
+ insn_attr_t esc_attr);
+extern insn_attr_t inat_get_group_attribute(insn_byte_t modrm,
+ int lpfx_id,
+ insn_attr_t esc_attr);
+extern insn_attr_t inat_get_avx_attribute(insn_byte_t opcode,
+ insn_byte_t vex_m,
+ insn_byte_t vex_pp);
+
+/* Attribute checking functions */
+static inline int inat_is_legacy_prefix(insn_attr_t attr)
+{
+ attr &= INAT_PFX_MASK;
+ return attr && attr <= INAT_LGCPFX_MAX;
+}
+
+static inline int inat_is_address_size_prefix(insn_attr_t attr)
+{
+ return (attr & INAT_PFX_MASK) == INAT_PFX_ADDRSZ;
+}
+
+static inline int inat_is_operand_size_prefix(insn_attr_t attr)
+{
+ return (attr & INAT_PFX_MASK) == INAT_PFX_OPNDSZ;
+}
+
+static inline int inat_is_rex_prefix(insn_attr_t attr)
+{
+ return (attr & INAT_PFX_MASK) == INAT_PFX_REX;
+}
+
+static inline int inat_last_prefix_id(insn_attr_t attr)
+{
+ if ((attr & INAT_PFX_MASK) > INAT_LSTPFX_MAX)
+ return 0;
+ else
+ return attr & INAT_PFX_MASK;
+}
+
+static inline int inat_is_vex_prefix(insn_attr_t attr)
+{
+ attr &= INAT_PFX_MASK;
+ return attr == INAT_PFX_VEX2 || attr == INAT_PFX_VEX3;
+}
+
+static inline int inat_is_vex3_prefix(insn_attr_t attr)
+{
+ return (attr & INAT_PFX_MASK) == INAT_PFX_VEX3;
+}
+
+static inline int inat_is_escape(insn_attr_t attr)
+{
+ return attr & INAT_ESC_MASK;
+}
+
+static inline int inat_escape_id(insn_attr_t attr)
+{
+ return (attr & INAT_ESC_MASK) >> INAT_ESC_OFFS;
+}
+
+static inline int inat_is_group(insn_attr_t attr)
+{
+ return attr & INAT_GRP_MASK;
+}
+
+static inline int inat_group_id(insn_attr_t attr)
+{
+ return (attr & INAT_GRP_MASK) >> INAT_GRP_OFFS;
+}
+
+static inline int inat_group_common_attribute(insn_attr_t attr)
+{
+ return attr & ~INAT_GRP_MASK;
+}
+
+static inline int inat_has_immediate(insn_attr_t attr)
+{
+ return attr & INAT_IMM_MASK;
+}
+
+static inline int inat_immediate_size(insn_attr_t attr)
+{
+ return (attr & INAT_IMM_MASK) >> INAT_IMM_OFFS;
+}
+
+static inline int inat_has_modrm(insn_attr_t attr)
+{
+ return attr & INAT_MODRM;
+}
+
+static inline int inat_is_force64(insn_attr_t attr)
+{
+ return attr & INAT_FORCE64;
+}
+
+static inline int inat_has_second_immediate(insn_attr_t attr)
+{
+ return attr & INAT_SCNDIMM;
+}
+
+static inline int inat_has_moffset(insn_attr_t attr)
+{
+ return attr & INAT_MOFFSET;
+}
+
+static inline int inat_has_variant(insn_attr_t attr)
+{
+ return attr & INAT_VARIANT;
+}
+
+static inline int inat_accept_vex(insn_attr_t attr)
+{
+ return attr & INAT_VEXOK;
+}
+
+static inline int inat_must_vex(insn_attr_t attr)
+{
+ return attr & INAT_VEXONLY;
+}
+#endif
diff --git a/kernel/arch/x86/include/asm/inat_types.h b/kernel/arch/x86/include/asm/inat_types.h
new file mode 100644
index 000000000..cb3c20ce3
--- /dev/null
+++ b/kernel/arch/x86/include/asm/inat_types.h
@@ -0,0 +1,29 @@
+#ifndef _ASM_X86_INAT_TYPES_H
+#define _ASM_X86_INAT_TYPES_H
+/*
+ * x86 instruction attributes
+ *
+ * Written by Masami Hiramatsu <mhiramat@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+
+/* Instruction attributes */
+typedef unsigned int insn_attr_t;
+typedef unsigned char insn_byte_t;
+typedef signed int insn_value_t;
+
+#endif
diff --git a/kernel/arch/x86/include/asm/init.h b/kernel/arch/x86/include/asm/init.h
new file mode 100644
index 000000000..223042086
--- /dev/null
+++ b/kernel/arch/x86/include/asm/init.h
@@ -0,0 +1,14 @@
+#ifndef _ASM_X86_INIT_H
+#define _ASM_X86_INIT_H
+
+struct x86_mapping_info {
+ void *(*alloc_pgt_page)(void *); /* allocate buf for page table */
+ void *context; /* context for alloc_pgt_page */
+ unsigned long pmd_flag; /* page flag for PMD entry */
+ bool kernel_mapping; /* kernel mapping or ident mapping */
+};
+
+int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
+ unsigned long addr, unsigned long end);
+
+#endif /* _ASM_X86_INIT_H */
diff --git a/kernel/arch/x86/include/asm/insn.h b/kernel/arch/x86/include/asm/insn.h
new file mode 100644
index 000000000..e7814b74c
--- /dev/null
+++ b/kernel/arch/x86/include/asm/insn.h
@@ -0,0 +1,201 @@
+#ifndef _ASM_X86_INSN_H
+#define _ASM_X86_INSN_H
+/*
+ * x86 instruction analysis
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2009
+ */
+
+/* insn_attr_t is defined in inat.h */
+#include <asm/inat.h>
+
+struct insn_field {
+ union {
+ insn_value_t value;
+ insn_byte_t bytes[4];
+ };
+ /* !0 if we've run insn_get_xxx() for this field */
+ unsigned char got;
+ unsigned char nbytes;
+};
+
+struct insn {
+ struct insn_field prefixes; /*
+ * Prefixes
+ * prefixes.bytes[3]: last prefix
+ */
+ struct insn_field rex_prefix; /* REX prefix */
+ struct insn_field vex_prefix; /* VEX prefix */
+ struct insn_field opcode; /*
+ * opcode.bytes[0]: opcode1
+ * opcode.bytes[1]: opcode2
+ * opcode.bytes[2]: opcode3
+ */
+ struct insn_field modrm;
+ struct insn_field sib;
+ struct insn_field displacement;
+ union {
+ struct insn_field immediate;
+ struct insn_field moffset1; /* for 64bit MOV */
+ struct insn_field immediate1; /* for 64bit imm or off16/32 */
+ };
+ union {
+ struct insn_field moffset2; /* for 64bit MOV */
+ struct insn_field immediate2; /* for 64bit imm or seg16 */
+ };
+
+ insn_attr_t attr;
+ unsigned char opnd_bytes;
+ unsigned char addr_bytes;
+ unsigned char length;
+ unsigned char x86_64;
+
+ const insn_byte_t *kaddr; /* kernel address of insn to analyze */
+ const insn_byte_t *end_kaddr; /* kernel address of last insn in buffer */
+ const insn_byte_t *next_byte;
+};
+
+#define MAX_INSN_SIZE 15
+
+#define X86_MODRM_MOD(modrm) (((modrm) & 0xc0) >> 6)
+#define X86_MODRM_REG(modrm) (((modrm) & 0x38) >> 3)
+#define X86_MODRM_RM(modrm) ((modrm) & 0x07)
+
+#define X86_SIB_SCALE(sib) (((sib) & 0xc0) >> 6)
+#define X86_SIB_INDEX(sib) (((sib) & 0x38) >> 3)
+#define X86_SIB_BASE(sib) ((sib) & 0x07)
+
+#define X86_REX_W(rex) ((rex) & 8)
+#define X86_REX_R(rex) ((rex) & 4)
+#define X86_REX_X(rex) ((rex) & 2)
+#define X86_REX_B(rex) ((rex) & 1)
+
+/* VEX bit flags */
+#define X86_VEX_W(vex) ((vex) & 0x80) /* VEX3 Byte2 */
+#define X86_VEX_R(vex) ((vex) & 0x80) /* VEX2/3 Byte1 */
+#define X86_VEX_X(vex) ((vex) & 0x40) /* VEX3 Byte1 */
+#define X86_VEX_B(vex) ((vex) & 0x20) /* VEX3 Byte1 */
+#define X86_VEX_L(vex) ((vex) & 0x04) /* VEX3 Byte2, VEX2 Byte1 */
+/* VEX bit fields */
+#define X86_VEX3_M(vex) ((vex) & 0x1f) /* VEX3 Byte1 */
+#define X86_VEX2_M 1 /* VEX2.M always 1 */
+#define X86_VEX_V(vex) (((vex) & 0x78) >> 3) /* VEX3 Byte2, VEX2 Byte1 */
+#define X86_VEX_P(vex) ((vex) & 0x03) /* VEX3 Byte2, VEX2 Byte1 */
+#define X86_VEX_M_MAX 0x1f /* VEX3.M Maximum value */
+
+extern void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64);
+extern void insn_get_prefixes(struct insn *insn);
+extern void insn_get_opcode(struct insn *insn);
+extern void insn_get_modrm(struct insn *insn);
+extern void insn_get_sib(struct insn *insn);
+extern void insn_get_displacement(struct insn *insn);
+extern void insn_get_immediate(struct insn *insn);
+extern void insn_get_length(struct insn *insn);
+
+/* Attribute will be determined after getting ModRM (for opcode groups) */
+static inline void insn_get_attribute(struct insn *insn)
+{
+ insn_get_modrm(insn);
+}
+
+/* Instruction uses RIP-relative addressing */
+extern int insn_rip_relative(struct insn *insn);
+
+/* Init insn for kernel text */
+static inline void kernel_insn_init(struct insn *insn,
+ const void *kaddr, int buf_len)
+{
+#ifdef CONFIG_X86_64
+ insn_init(insn, kaddr, buf_len, 1);
+#else /* CONFIG_X86_32 */
+ insn_init(insn, kaddr, buf_len, 0);
+#endif
+}
+
+static inline int insn_is_avx(struct insn *insn)
+{
+ if (!insn->prefixes.got)
+ insn_get_prefixes(insn);
+ return (insn->vex_prefix.value != 0);
+}
+
+/* Ensure this instruction is decoded completely */
+static inline int insn_complete(struct insn *insn)
+{
+ return insn->opcode.got && insn->modrm.got && insn->sib.got &&
+ insn->displacement.got && insn->immediate.got;
+}
+
+static inline insn_byte_t insn_vex_m_bits(struct insn *insn)
+{
+ if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */
+ return X86_VEX2_M;
+ else
+ return X86_VEX3_M(insn->vex_prefix.bytes[1]);
+}
+
+static inline insn_byte_t insn_vex_p_bits(struct insn *insn)
+{
+ if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */
+ return X86_VEX_P(insn->vex_prefix.bytes[1]);
+ else
+ return X86_VEX_P(insn->vex_prefix.bytes[2]);
+}
+
+/* Get the last prefix id from last prefix or VEX prefix */
+static inline int insn_last_prefix_id(struct insn *insn)
+{
+ if (insn_is_avx(insn))
+ return insn_vex_p_bits(insn); /* VEX_p is a SIMD prefix id */
+
+ if (insn->prefixes.bytes[3])
+ return inat_get_last_prefix_id(insn->prefixes.bytes[3]);
+
+ return 0;
+}
+
+/* Offset of each field from kaddr */
+static inline int insn_offset_rex_prefix(struct insn *insn)
+{
+ return insn->prefixes.nbytes;
+}
+static inline int insn_offset_vex_prefix(struct insn *insn)
+{
+ return insn_offset_rex_prefix(insn) + insn->rex_prefix.nbytes;
+}
+static inline int insn_offset_opcode(struct insn *insn)
+{
+ return insn_offset_vex_prefix(insn) + insn->vex_prefix.nbytes;
+}
+static inline int insn_offset_modrm(struct insn *insn)
+{
+ return insn_offset_opcode(insn) + insn->opcode.nbytes;
+}
+static inline int insn_offset_sib(struct insn *insn)
+{
+ return insn_offset_modrm(insn) + insn->modrm.nbytes;
+}
+static inline int insn_offset_displacement(struct insn *insn)
+{
+ return insn_offset_sib(insn) + insn->sib.nbytes;
+}
+static inline int insn_offset_immediate(struct insn *insn)
+{
+ return insn_offset_displacement(insn) + insn->displacement.nbytes;
+}
+
+#endif /* _ASM_X86_INSN_H */
diff --git a/kernel/arch/x86/include/asm/inst.h b/kernel/arch/x86/include/asm/inst.h
new file mode 100644
index 000000000..3e115273e
--- /dev/null
+++ b/kernel/arch/x86/include/asm/inst.h
@@ -0,0 +1,310 @@
+/*
+ * Generate .byte code for some instructions not supported by old
+ * binutils.
+ */
+#ifndef X86_ASM_INST_H
+#define X86_ASM_INST_H
+
+#ifdef __ASSEMBLY__
+
+#define REG_NUM_INVALID 100
+
+#define REG_TYPE_R32 0
+#define REG_TYPE_R64 1
+#define REG_TYPE_XMM 2
+#define REG_TYPE_INVALID 100
+
+ .macro R32_NUM opd r32
+ \opd = REG_NUM_INVALID
+ .ifc \r32,%eax
+ \opd = 0
+ .endif
+ .ifc \r32,%ecx
+ \opd = 1
+ .endif
+ .ifc \r32,%edx
+ \opd = 2
+ .endif
+ .ifc \r32,%ebx
+ \opd = 3
+ .endif
+ .ifc \r32,%esp
+ \opd = 4
+ .endif
+ .ifc \r32,%ebp
+ \opd = 5
+ .endif
+ .ifc \r32,%esi
+ \opd = 6
+ .endif
+ .ifc \r32,%edi
+ \opd = 7
+ .endif
+#ifdef CONFIG_X86_64
+ .ifc \r32,%r8d
+ \opd = 8
+ .endif
+ .ifc \r32,%r9d
+ \opd = 9
+ .endif
+ .ifc \r32,%r10d
+ \opd = 10
+ .endif
+ .ifc \r32,%r11d
+ \opd = 11
+ .endif
+ .ifc \r32,%r12d
+ \opd = 12
+ .endif
+ .ifc \r32,%r13d
+ \opd = 13
+ .endif
+ .ifc \r32,%r14d
+ \opd = 14
+ .endif
+ .ifc \r32,%r15d
+ \opd = 15
+ .endif
+#endif
+ .endm
+
+ .macro R64_NUM opd r64
+ \opd = REG_NUM_INVALID
+#ifdef CONFIG_X86_64
+ .ifc \r64,%rax
+ \opd = 0
+ .endif
+ .ifc \r64,%rcx
+ \opd = 1
+ .endif
+ .ifc \r64,%rdx
+ \opd = 2
+ .endif
+ .ifc \r64,%rbx
+ \opd = 3
+ .endif
+ .ifc \r64,%rsp
+ \opd = 4
+ .endif
+ .ifc \r64,%rbp
+ \opd = 5
+ .endif
+ .ifc \r64,%rsi
+ \opd = 6
+ .endif
+ .ifc \r64,%rdi
+ \opd = 7
+ .endif
+ .ifc \r64,%r8
+ \opd = 8
+ .endif
+ .ifc \r64,%r9
+ \opd = 9
+ .endif
+ .ifc \r64,%r10
+ \opd = 10
+ .endif
+ .ifc \r64,%r11
+ \opd = 11
+ .endif
+ .ifc \r64,%r12
+ \opd = 12
+ .endif
+ .ifc \r64,%r13
+ \opd = 13
+ .endif
+ .ifc \r64,%r14
+ \opd = 14
+ .endif
+ .ifc \r64,%r15
+ \opd = 15
+ .endif
+#endif
+ .endm
+
+ .macro XMM_NUM opd xmm
+ \opd = REG_NUM_INVALID
+ .ifc \xmm,%xmm0
+ \opd = 0
+ .endif
+ .ifc \xmm,%xmm1
+ \opd = 1
+ .endif
+ .ifc \xmm,%xmm2
+ \opd = 2
+ .endif
+ .ifc \xmm,%xmm3
+ \opd = 3
+ .endif
+ .ifc \xmm,%xmm4
+ \opd = 4
+ .endif
+ .ifc \xmm,%xmm5
+ \opd = 5
+ .endif
+ .ifc \xmm,%xmm6
+ \opd = 6
+ .endif
+ .ifc \xmm,%xmm7
+ \opd = 7
+ .endif
+ .ifc \xmm,%xmm8
+ \opd = 8
+ .endif
+ .ifc \xmm,%xmm9
+ \opd = 9
+ .endif
+ .ifc \xmm,%xmm10
+ \opd = 10
+ .endif
+ .ifc \xmm,%xmm11
+ \opd = 11
+ .endif
+ .ifc \xmm,%xmm12
+ \opd = 12
+ .endif
+ .ifc \xmm,%xmm13
+ \opd = 13
+ .endif
+ .ifc \xmm,%xmm14
+ \opd = 14
+ .endif
+ .ifc \xmm,%xmm15
+ \opd = 15
+ .endif
+ .endm
+
+ .macro REG_TYPE type reg
+ R32_NUM reg_type_r32 \reg
+ R64_NUM reg_type_r64 \reg
+ XMM_NUM reg_type_xmm \reg
+ .if reg_type_r64 <> REG_NUM_INVALID
+ \type = REG_TYPE_R64
+ .elseif reg_type_r32 <> REG_NUM_INVALID
+ \type = REG_TYPE_R32
+ .elseif reg_type_xmm <> REG_NUM_INVALID
+ \type = REG_TYPE_XMM
+ .else
+ \type = REG_TYPE_INVALID
+ .endif
+ .endm
+
+ .macro PFX_OPD_SIZE
+ .byte 0x66
+ .endm
+
+ .macro PFX_REX opd1 opd2 W=0
+ .if ((\opd1 | \opd2) & 8) || \W
+ .byte 0x40 | ((\opd1 & 8) >> 3) | ((\opd2 & 8) >> 1) | (\W << 3)
+ .endif
+ .endm
+
+ .macro MODRM mod opd1 opd2
+ .byte \mod | (\opd1 & 7) | ((\opd2 & 7) << 3)
+ .endm
+
+ .macro PSHUFB_XMM xmm1 xmm2
+ XMM_NUM pshufb_opd1 \xmm1
+ XMM_NUM pshufb_opd2 \xmm2
+ PFX_OPD_SIZE
+ PFX_REX pshufb_opd1 pshufb_opd2
+ .byte 0x0f, 0x38, 0x00
+ MODRM 0xc0 pshufb_opd1 pshufb_opd2
+ .endm
+
+ .macro PCLMULQDQ imm8 xmm1 xmm2
+ XMM_NUM clmul_opd1 \xmm1
+ XMM_NUM clmul_opd2 \xmm2
+ PFX_OPD_SIZE
+ PFX_REX clmul_opd1 clmul_opd2
+ .byte 0x0f, 0x3a, 0x44
+ MODRM 0xc0 clmul_opd1 clmul_opd2
+ .byte \imm8
+ .endm
+
+ .macro PEXTRD imm8 xmm gpr
+ R32_NUM extrd_opd1 \gpr
+ XMM_NUM extrd_opd2 \xmm
+ PFX_OPD_SIZE
+ PFX_REX extrd_opd1 extrd_opd2
+ .byte 0x0f, 0x3a, 0x16
+ MODRM 0xc0 extrd_opd1 extrd_opd2
+ .byte \imm8
+ .endm
+
+ .macro AESKEYGENASSIST rcon xmm1 xmm2
+ XMM_NUM aeskeygen_opd1 \xmm1
+ XMM_NUM aeskeygen_opd2 \xmm2
+ PFX_OPD_SIZE
+ PFX_REX aeskeygen_opd1 aeskeygen_opd2
+ .byte 0x0f, 0x3a, 0xdf
+ MODRM 0xc0 aeskeygen_opd1 aeskeygen_opd2
+ .byte \rcon
+ .endm
+
+ .macro AESIMC xmm1 xmm2
+ XMM_NUM aesimc_opd1 \xmm1
+ XMM_NUM aesimc_opd2 \xmm2
+ PFX_OPD_SIZE
+ PFX_REX aesimc_opd1 aesimc_opd2
+ .byte 0x0f, 0x38, 0xdb
+ MODRM 0xc0 aesimc_opd1 aesimc_opd2
+ .endm
+
+ .macro AESENC xmm1 xmm2
+ XMM_NUM aesenc_opd1 \xmm1
+ XMM_NUM aesenc_opd2 \xmm2
+ PFX_OPD_SIZE
+ PFX_REX aesenc_opd1 aesenc_opd2
+ .byte 0x0f, 0x38, 0xdc
+ MODRM 0xc0 aesenc_opd1 aesenc_opd2
+ .endm
+
+ .macro AESENCLAST xmm1 xmm2
+ XMM_NUM aesenclast_opd1 \xmm1
+ XMM_NUM aesenclast_opd2 \xmm2
+ PFX_OPD_SIZE
+ PFX_REX aesenclast_opd1 aesenclast_opd2
+ .byte 0x0f, 0x38, 0xdd
+ MODRM 0xc0 aesenclast_opd1 aesenclast_opd2
+ .endm
+
+ .macro AESDEC xmm1 xmm2
+ XMM_NUM aesdec_opd1 \xmm1
+ XMM_NUM aesdec_opd2 \xmm2
+ PFX_OPD_SIZE
+ PFX_REX aesdec_opd1 aesdec_opd2
+ .byte 0x0f, 0x38, 0xde
+ MODRM 0xc0 aesdec_opd1 aesdec_opd2
+ .endm
+
+ .macro AESDECLAST xmm1 xmm2
+ XMM_NUM aesdeclast_opd1 \xmm1
+ XMM_NUM aesdeclast_opd2 \xmm2
+ PFX_OPD_SIZE
+ PFX_REX aesdeclast_opd1 aesdeclast_opd2
+ .byte 0x0f, 0x38, 0xdf
+ MODRM 0xc0 aesdeclast_opd1 aesdeclast_opd2
+ .endm
+
+ .macro MOVQ_R64_XMM opd1 opd2
+ REG_TYPE movq_r64_xmm_opd1_type \opd1
+ .if movq_r64_xmm_opd1_type == REG_TYPE_XMM
+ XMM_NUM movq_r64_xmm_opd1 \opd1
+ R64_NUM movq_r64_xmm_opd2 \opd2
+ .else
+ R64_NUM movq_r64_xmm_opd1 \opd1
+ XMM_NUM movq_r64_xmm_opd2 \opd2
+ .endif
+ PFX_OPD_SIZE
+ PFX_REX movq_r64_xmm_opd1 movq_r64_xmm_opd2 1
+ .if movq_r64_xmm_opd1_type == REG_TYPE_XMM
+ .byte 0x0f, 0x7e
+ .else
+ .byte 0x0f, 0x6e
+ .endif
+ MODRM 0xc0 movq_r64_xmm_opd1 movq_r64_xmm_opd2
+ .endm
+#endif
+
+#endif
diff --git a/kernel/arch/x86/include/asm/intel-mid.h b/kernel/arch/x86/include/asm/intel-mid.h
new file mode 100644
index 000000000..7c5af123b
--- /dev/null
+++ b/kernel/arch/x86/include/asm/intel-mid.h
@@ -0,0 +1,151 @@
+/*
+ * intel-mid.h: Intel MID specific setup code
+ *
+ * (C) Copyright 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _ASM_X86_INTEL_MID_H
+#define _ASM_X86_INTEL_MID_H
+
+#include <linux/sfi.h>
+#include <linux/platform_device.h>
+
+extern int intel_mid_pci_init(void);
+extern int get_gpio_by_name(const char *name);
+extern void intel_scu_device_register(struct platform_device *pdev);
+extern int __init sfi_parse_mrtc(struct sfi_table_header *table);
+extern int __init sfi_parse_mtmr(struct sfi_table_header *table);
+extern int sfi_mrtc_num;
+extern struct sfi_rtc_table_entry sfi_mrtc_array[];
+
+/*
+ * Here defines the array of devices platform data that IAFW would export
+ * through SFI "DEVS" table, we use name and type to match the device and
+ * its platform data.
+ */
+struct devs_id {
+ char name[SFI_NAME_LEN + 1];
+ u8 type;
+ u8 delay;
+ void *(*get_platform_data)(void *info);
+ /* Custom handler for devices */
+ void (*device_handler)(struct sfi_device_table_entry *pentry,
+ struct devs_id *dev);
+};
+
+#define sfi_device(i) \
+ static const struct devs_id *const __intel_mid_sfi_##i##_dev __used \
+ __attribute__((__section__(".x86_intel_mid_dev.init"))) = &i
+
+/*
+ * Medfield is the follow-up of Moorestown, it combines two chip solution into
+ * one. Other than that it also added always-on and constant tsc and lapic
+ * timers. Medfield is the platform name, and the chip name is called Penwell
+ * we treat Medfield/Penwell as a variant of Moorestown. Penwell can be
+ * identified via MSRs.
+ */
+enum intel_mid_cpu_type {
+ /* 1 was Moorestown */
+ INTEL_MID_CPU_CHIP_PENWELL = 2,
+ INTEL_MID_CPU_CHIP_CLOVERVIEW,
+ INTEL_MID_CPU_CHIP_TANGIER,
+};
+
+extern enum intel_mid_cpu_type __intel_mid_cpu_chip;
+
+/**
+ * struct intel_mid_ops - Interface between intel-mid & sub archs
+ * @arch_setup: arch_setup function to re-initialize platform
+ * structures (x86_init, x86_platform_init)
+ *
+ * This structure can be extended if any new interface is required
+ * between intel-mid & its sub arch files.
+ */
+struct intel_mid_ops {
+ void (*arch_setup)(void);
+};
+
+/* Helper API's for INTEL_MID_OPS_INIT */
+#define DECLARE_INTEL_MID_OPS_INIT(cpuname, cpuid) \
+ [cpuid] = get_##cpuname##_ops
+
+/* Maximum number of CPU ops */
+#define MAX_CPU_OPS(a) (sizeof(a)/sizeof(void *))
+
+/*
+ * For every new cpu addition, a weak get_<cpuname>_ops() function needs be
+ * declared in arch/x86/platform/intel_mid/intel_mid_weak_decls.h.
+ */
+#define INTEL_MID_OPS_INIT {\
+ DECLARE_INTEL_MID_OPS_INIT(penwell, INTEL_MID_CPU_CHIP_PENWELL), \
+ DECLARE_INTEL_MID_OPS_INIT(cloverview, INTEL_MID_CPU_CHIP_CLOVERVIEW), \
+ DECLARE_INTEL_MID_OPS_INIT(tangier, INTEL_MID_CPU_CHIP_TANGIER) \
+};
+
+#ifdef CONFIG_X86_INTEL_MID
+
+static inline enum intel_mid_cpu_type intel_mid_identify_cpu(void)
+{
+ return __intel_mid_cpu_chip;
+}
+
+static inline bool intel_mid_has_msic(void)
+{
+ return (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_PENWELL);
+}
+
+#else /* !CONFIG_X86_INTEL_MID */
+
+#define intel_mid_identify_cpu() (0)
+#define intel_mid_has_msic() (0)
+
+#endif /* !CONFIG_X86_INTEL_MID */
+
+enum intel_mid_timer_options {
+ INTEL_MID_TIMER_DEFAULT,
+ INTEL_MID_TIMER_APBT_ONLY,
+ INTEL_MID_TIMER_LAPIC_APBT,
+};
+
+extern enum intel_mid_timer_options intel_mid_timer_options;
+
+/*
+ * Penwell uses spread spectrum clock, so the freq number is not exactly
+ * the same as reported by MSR based on SDM.
+ */
+#define FSB_FREQ_83SKU 83200
+#define FSB_FREQ_100SKU 99840
+#define FSB_FREQ_133SKU 133000
+
+#define FSB_FREQ_167SKU 167000
+#define FSB_FREQ_200SKU 200000
+#define FSB_FREQ_267SKU 267000
+#define FSB_FREQ_333SKU 333000
+#define FSB_FREQ_400SKU 400000
+
+/* Bus Select SoC Fuse value */
+#define BSEL_SOC_FUSE_MASK 0x7
+#define BSEL_SOC_FUSE_001 0x1 /* FSB 133MHz */
+#define BSEL_SOC_FUSE_101 0x5 /* FSB 100MHz */
+#define BSEL_SOC_FUSE_111 0x7 /* FSB 83MHz */
+
+#define SFI_MTMR_MAX_NUM 8
+#define SFI_MRTC_MAX 8
+
+extern void intel_scu_devices_create(void);
+extern void intel_scu_devices_destroy(void);
+
+/* VRTC timer */
+#define MRST_VRTC_MAP_SZ (1024)
+/*#define MRST_VRTC_PGOFFSET (0xc00) */
+
+extern void intel_mid_rtc_init(void);
+
+/* the offset for the mapping of global gpio pin to irq */
+#define INTEL_MID_IRQ_OFFSET 0x100
+
+#endif /* _ASM_X86_INTEL_MID_H */
diff --git a/kernel/arch/x86/include/asm/intel_mid_vrtc.h b/kernel/arch/x86/include/asm/intel_mid_vrtc.h
new file mode 100644
index 000000000..86ff4685c
--- /dev/null
+++ b/kernel/arch/x86/include/asm/intel_mid_vrtc.h
@@ -0,0 +1,9 @@
+#ifndef _INTEL_MID_VRTC_H
+#define _INTEL_MID_VRTC_H
+
+extern unsigned char vrtc_cmos_read(unsigned char reg);
+extern void vrtc_cmos_write(unsigned char val, unsigned char reg);
+extern void vrtc_get_time(struct timespec *now);
+extern int vrtc_set_mmss(const struct timespec *now);
+
+#endif
diff --git a/kernel/arch/x86/include/asm/intel_scu_ipc.h b/kernel/arch/x86/include/asm/intel_scu_ipc.h
new file mode 100644
index 000000000..925b605eb
--- /dev/null
+++ b/kernel/arch/x86/include/asm/intel_scu_ipc.h
@@ -0,0 +1,73 @@
+#ifndef _ASM_X86_INTEL_SCU_IPC_H_
+#define _ASM_X86_INTEL_SCU_IPC_H_
+
+#include <linux/notifier.h>
+
+#define IPCMSG_WARM_RESET 0xF0
+#define IPCMSG_COLD_RESET 0xF1
+#define IPCMSG_SOFT_RESET 0xF2
+#define IPCMSG_COLD_BOOT 0xF3
+
+#define IPCMSG_VRTC 0xFA /* Set vRTC device */
+ /* Command id associated with message IPCMSG_VRTC */
+ #define IPC_CMD_VRTC_SETTIME 1 /* Set time */
+ #define IPC_CMD_VRTC_SETALARM 2 /* Set alarm */
+
+/* Read single register */
+int intel_scu_ipc_ioread8(u16 addr, u8 *data);
+
+/* Read two sequential registers */
+int intel_scu_ipc_ioread16(u16 addr, u16 *data);
+
+/* Read four sequential registers */
+int intel_scu_ipc_ioread32(u16 addr, u32 *data);
+
+/* Read a vector */
+int intel_scu_ipc_readv(u16 *addr, u8 *data, int len);
+
+/* Write single register */
+int intel_scu_ipc_iowrite8(u16 addr, u8 data);
+
+/* Write two sequential registers */
+int intel_scu_ipc_iowrite16(u16 addr, u16 data);
+
+/* Write four sequential registers */
+int intel_scu_ipc_iowrite32(u16 addr, u32 data);
+
+/* Write a vector */
+int intel_scu_ipc_writev(u16 *addr, u8 *data, int len);
+
+/* Update single register based on the mask */
+int intel_scu_ipc_update_register(u16 addr, u8 data, u8 mask);
+
+/* Issue commands to the SCU with or without data */
+int intel_scu_ipc_simple_command(int cmd, int sub);
+int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
+ u32 *out, int outlen);
+/* I2C control api */
+int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data);
+
+/* Update FW version */
+int intel_scu_ipc_fw_update(u8 *buffer, u32 length);
+
+extern struct blocking_notifier_head intel_scu_notifier;
+
+static inline void intel_scu_notifier_add(struct notifier_block *nb)
+{
+ blocking_notifier_chain_register(&intel_scu_notifier, nb);
+}
+
+static inline void intel_scu_notifier_remove(struct notifier_block *nb)
+{
+ blocking_notifier_chain_unregister(&intel_scu_notifier, nb);
+}
+
+static inline int intel_scu_notifier_post(unsigned long v, void *p)
+{
+ return blocking_notifier_call_chain(&intel_scu_notifier, v, p);
+}
+
+#define SCU_AVAILABLE 1
+#define SCU_DOWN 2
+
+#endif
diff --git a/kernel/arch/x86/include/asm/io.h b/kernel/arch/x86/include/asm/io.h
new file mode 100644
index 000000000..34a5b9370
--- /dev/null
+++ b/kernel/arch/x86/include/asm/io.h
@@ -0,0 +1,347 @@
+#ifndef _ASM_X86_IO_H
+#define _ASM_X86_IO_H
+
+/*
+ * This file contains the definitions for the x86 IO instructions
+ * inb/inw/inl/outb/outw/outl and the "string versions" of the same
+ * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
+ * versions of the single-IO instructions (inb_p/inw_p/..).
+ *
+ * This file is not meant to be obfuscating: it's just complicated
+ * to (a) handle it all in a way that makes gcc able to optimize it
+ * as well as possible and (b) trying to avoid writing the same thing
+ * over and over again with slight variations and possibly making a
+ * mistake somewhere.
+ */
+
+/*
+ * Thanks to James van Artsdalen for a better timing-fix than
+ * the two short jumps: using outb's to a nonexistent port seems
+ * to guarantee better timings even on fast machines.
+ *
+ * On the other hand, I'd like to be sure of a non-existent port:
+ * I feel a bit unsafe about using 0x80 (should be safe, though)
+ *
+ * Linus
+ */
+
+ /*
+ * Bit simplified and optimized by Jan Hubicka
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
+ *
+ * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
+ * isa_read[wl] and isa_write[wl] fixed
+ * - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ */
+
+#define ARCH_HAS_IOREMAP_WC
+
+#include <linux/string.h>
+#include <linux/compiler.h>
+#include <asm/page.h>
+#include <asm/early_ioremap.h>
+
+#define build_mmio_read(name, size, type, reg, barrier) \
+static inline type name(const volatile void __iomem *addr) \
+{ type ret; asm volatile("mov" size " %1,%0":reg (ret) \
+:"m" (*(volatile type __force *)addr) barrier); return ret; }
+
+#define build_mmio_write(name, size, type, reg, barrier) \
+static inline void name(type val, volatile void __iomem *addr) \
+{ asm volatile("mov" size " %0,%1": :reg (val), \
+"m" (*(volatile type __force *)addr) barrier); }
+
+build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
+build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
+build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
+
+build_mmio_read(__readb, "b", unsigned char, "=q", )
+build_mmio_read(__readw, "w", unsigned short, "=r", )
+build_mmio_read(__readl, "l", unsigned int, "=r", )
+
+build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
+build_mmio_write(writew, "w", unsigned short, "r", :"memory")
+build_mmio_write(writel, "l", unsigned int, "r", :"memory")
+
+build_mmio_write(__writeb, "b", unsigned char, "q", )
+build_mmio_write(__writew, "w", unsigned short, "r", )
+build_mmio_write(__writel, "l", unsigned int, "r", )
+
+#define readb_relaxed(a) __readb(a)
+#define readw_relaxed(a) __readw(a)
+#define readl_relaxed(a) __readl(a)
+#define __raw_readb __readb
+#define __raw_readw __readw
+#define __raw_readl __readl
+
+#define writeb_relaxed(v, a) __writeb(v, a)
+#define writew_relaxed(v, a) __writew(v, a)
+#define writel_relaxed(v, a) __writel(v, a)
+#define __raw_writeb __writeb
+#define __raw_writew __writew
+#define __raw_writel __writel
+
+#define mmiowb() barrier()
+
+#ifdef CONFIG_X86_64
+
+build_mmio_read(readq, "q", unsigned long, "=r", :"memory")
+build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
+
+#define readq_relaxed(a) readq(a)
+#define writeq_relaxed(v, a) writeq(v, a)
+
+#define __raw_readq(a) readq(a)
+#define __raw_writeq(val, addr) writeq(val, addr)
+
+/* Let people know that we have them */
+#define readq readq
+#define writeq writeq
+
+#endif
+
+/**
+ * virt_to_phys - map virtual addresses to physical
+ * @address: address to remap
+ *
+ * The returned physical address is the physical (CPU) mapping for
+ * the memory address given. It is only valid to use this function on
+ * addresses directly mapped or allocated via kmalloc.
+ *
+ * This function does not give bus mappings for DMA transfers. In
+ * almost all conceivable cases a device driver should not be using
+ * this function
+ */
+
+static inline phys_addr_t virt_to_phys(volatile void *address)
+{
+ return __pa(address);
+}
+
+/**
+ * phys_to_virt - map physical address to virtual
+ * @address: address to remap
+ *
+ * The returned virtual address is a current CPU mapping for
+ * the memory address given. It is only valid to use this function on
+ * addresses that have a kernel mapping
+ *
+ * This function does not handle bus mappings for DMA transfers. In
+ * almost all conceivable cases a device driver should not be using
+ * this function
+ */
+
+static inline void *phys_to_virt(phys_addr_t address)
+{
+ return __va(address);
+}
+
+/*
+ * Change "struct page" to physical address.
+ */
+#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
+
+/*
+ * ISA I/O bus memory addresses are 1:1 with the physical address.
+ * However, we truncate the address to unsigned int to avoid undesirable
+ * promitions in legacy drivers.
+ */
+static inline unsigned int isa_virt_to_bus(volatile void *address)
+{
+ return (unsigned int)virt_to_phys(address);
+}
+#define isa_page_to_bus(page) ((unsigned int)page_to_phys(page))
+#define isa_bus_to_virt phys_to_virt
+
+/*
+ * However PCI ones are not necessarily 1:1 and therefore these interfaces
+ * are forbidden in portable PCI drivers.
+ *
+ * Allow them on x86 for legacy drivers, though.
+ */
+#define virt_to_bus virt_to_phys
+#define bus_to_virt phys_to_virt
+
+/**
+ * ioremap - map bus memory into CPU space
+ * @offset: bus address of the memory
+ * @size: size of the resource to map
+ *
+ * ioremap performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address.
+ *
+ * If the area you are trying to map is a PCI BAR you should have a
+ * look at pci_iomap().
+ */
+extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
+extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
+extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
+ unsigned long prot_val);
+
+/*
+ * The default ioremap() behavior is non-cached:
+ */
+static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
+{
+ return ioremap_nocache(offset, size);
+}
+
+extern void iounmap(volatile void __iomem *addr);
+
+extern void set_iounmap_nonlazy(void);
+
+#ifdef __KERNEL__
+
+#include <asm-generic/iomap.h>
+
+#include <linux/vmalloc.h>
+
+/*
+ * Convert a virtual cached pointer to an uncached pointer
+ */
+#define xlate_dev_kmem_ptr(p) p
+
+static inline void
+memset_io(volatile void __iomem *addr, unsigned char val, size_t count)
+{
+ memset((void __force *)addr, val, count);
+}
+
+static inline void
+memcpy_fromio(void *dst, const volatile void __iomem *src, size_t count)
+{
+ memcpy(dst, (const void __force *)src, count);
+}
+
+static inline void
+memcpy_toio(volatile void __iomem *dst, const void *src, size_t count)
+{
+ memcpy((void __force *)dst, src, count);
+}
+
+/*
+ * ISA space is 'always mapped' on a typical x86 system, no need to
+ * explicitly ioremap() it. The fact that the ISA IO space is mapped
+ * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
+ * are physical addresses. The following constant pointer can be
+ * used as the IO-area pointer (it can be iounmapped as well, so the
+ * analogy with PCI is quite large):
+ */
+#define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
+
+/*
+ * Cache management
+ *
+ * This needed for two cases
+ * 1. Out of order aware processors
+ * 2. Accidentally out of order processors (PPro errata #51)
+ */
+
+static inline void flush_write_buffers(void)
+{
+#if defined(CONFIG_X86_PPRO_FENCE)
+ asm volatile("lock; addl $0,0(%%esp)": : :"memory");
+#endif
+}
+
+#endif /* __KERNEL__ */
+
+extern void native_io_delay(void);
+
+extern int io_delay_type;
+extern void io_delay_init(void);
+
+#if defined(CONFIG_PARAVIRT)
+#include <asm/paravirt.h>
+#else
+
+static inline void slow_down_io(void)
+{
+ native_io_delay();
+#ifdef REALLY_SLOW_IO
+ native_io_delay();
+ native_io_delay();
+ native_io_delay();
+#endif
+}
+
+#endif
+
+#define BUILDIO(bwl, bw, type) \
+static inline void out##bwl(unsigned type value, int port) \
+{ \
+ asm volatile("out" #bwl " %" #bw "0, %w1" \
+ : : "a"(value), "Nd"(port)); \
+} \
+ \
+static inline unsigned type in##bwl(int port) \
+{ \
+ unsigned type value; \
+ asm volatile("in" #bwl " %w1, %" #bw "0" \
+ : "=a"(value) : "Nd"(port)); \
+ return value; \
+} \
+ \
+static inline void out##bwl##_p(unsigned type value, int port) \
+{ \
+ out##bwl(value, port); \
+ slow_down_io(); \
+} \
+ \
+static inline unsigned type in##bwl##_p(int port) \
+{ \
+ unsigned type value = in##bwl(port); \
+ slow_down_io(); \
+ return value; \
+} \
+ \
+static inline void outs##bwl(int port, const void *addr, unsigned long count) \
+{ \
+ asm volatile("rep; outs" #bwl \
+ : "+S"(addr), "+c"(count) : "d"(port)); \
+} \
+ \
+static inline void ins##bwl(int port, void *addr, unsigned long count) \
+{ \
+ asm volatile("rep; ins" #bwl \
+ : "+D"(addr), "+c"(count) : "d"(port)); \
+}
+
+BUILDIO(b, b, char)
+BUILDIO(w, w, short)
+BUILDIO(l, , int)
+
+extern void *xlate_dev_mem_ptr(phys_addr_t phys);
+extern void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
+
+extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
+ enum page_cache_mode pcm);
+extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
+
+extern bool is_early_ioremap_ptep(pte_t *ptep);
+
+#ifdef CONFIG_XEN
+#include <xen/xen.h>
+struct bio_vec;
+
+extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
+ const struct bio_vec *vec2);
+
+#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
+ (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \
+ (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2)))
+#endif /* CONFIG_XEN */
+
+#define IO_SPACE_LIMIT 0xffff
+
+#ifdef CONFIG_MTRR
+extern int __must_check arch_phys_wc_add(unsigned long base,
+ unsigned long size);
+extern void arch_phys_wc_del(int handle);
+#define arch_phys_wc_add arch_phys_wc_add
+#endif
+
+#endif /* _ASM_X86_IO_H */
diff --git a/kernel/arch/x86/include/asm/io_apic.h b/kernel/arch/x86/include/asm/io_apic.h
new file mode 100644
index 000000000..2f91685fe
--- /dev/null
+++ b/kernel/arch/x86/include/asm/io_apic.h
@@ -0,0 +1,289 @@
+#ifndef _ASM_X86_IO_APIC_H
+#define _ASM_X86_IO_APIC_H
+
+#include <linux/types.h>
+#include <asm/mpspec.h>
+#include <asm/apicdef.h>
+#include <asm/irq_vectors.h>
+#include <asm/x86_init.h>
+/*
+ * Intel IO-APIC support for SMP and UP systems.
+ *
+ * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar
+ */
+
+/* I/O Unit Redirection Table */
+#define IO_APIC_REDIR_VECTOR_MASK 0x000FF
+#define IO_APIC_REDIR_DEST_LOGICAL 0x00800
+#define IO_APIC_REDIR_DEST_PHYSICAL 0x00000
+#define IO_APIC_REDIR_SEND_PENDING (1 << 12)
+#define IO_APIC_REDIR_REMOTE_IRR (1 << 14)
+#define IO_APIC_REDIR_LEVEL_TRIGGER (1 << 15)
+#define IO_APIC_REDIR_MASKED (1 << 16)
+
+/*
+ * The structure of the IO-APIC:
+ */
+union IO_APIC_reg_00 {
+ u32 raw;
+ struct {
+ u32 __reserved_2 : 14,
+ LTS : 1,
+ delivery_type : 1,
+ __reserved_1 : 8,
+ ID : 8;
+ } __attribute__ ((packed)) bits;
+};
+
+union IO_APIC_reg_01 {
+ u32 raw;
+ struct {
+ u32 version : 8,
+ __reserved_2 : 7,
+ PRQ : 1,
+ entries : 8,
+ __reserved_1 : 8;
+ } __attribute__ ((packed)) bits;
+};
+
+union IO_APIC_reg_02 {
+ u32 raw;
+ struct {
+ u32 __reserved_2 : 24,
+ arbitration : 4,
+ __reserved_1 : 4;
+ } __attribute__ ((packed)) bits;
+};
+
+union IO_APIC_reg_03 {
+ u32 raw;
+ struct {
+ u32 boot_DT : 1,
+ __reserved_1 : 31;
+ } __attribute__ ((packed)) bits;
+};
+
+struct IO_APIC_route_entry {
+ __u32 vector : 8,
+ delivery_mode : 3, /* 000: FIXED
+ * 001: lowest prio
+ * 111: ExtINT
+ */
+ dest_mode : 1, /* 0: physical, 1: logical */
+ delivery_status : 1,
+ polarity : 1,
+ irr : 1,
+ trigger : 1, /* 0: edge, 1: level */
+ mask : 1, /* 0: enabled, 1: disabled */
+ __reserved_2 : 15;
+
+ __u32 __reserved_3 : 24,
+ dest : 8;
+} __attribute__ ((packed));
+
+struct IR_IO_APIC_route_entry {
+ __u64 vector : 8,
+ zero : 3,
+ index2 : 1,
+ delivery_status : 1,
+ polarity : 1,
+ irr : 1,
+ trigger : 1,
+ mask : 1,
+ reserved : 31,
+ format : 1,
+ index : 15;
+} __attribute__ ((packed));
+
+#define IOAPIC_AUTO -1
+#define IOAPIC_EDGE 0
+#define IOAPIC_LEVEL 1
+#define IOAPIC_MAP_ALLOC 0x1
+#define IOAPIC_MAP_CHECK 0x2
+
+#ifdef CONFIG_X86_IO_APIC
+
+/*
+ * # of IO-APICs and # of IRQ routing registers
+ */
+extern int nr_ioapics;
+
+extern int mpc_ioapic_id(int ioapic);
+extern unsigned int mpc_ioapic_addr(int ioapic);
+extern struct mp_ioapic_gsi *mp_ioapic_gsi_routing(int ioapic);
+
+#define MP_MAX_IOAPIC_PIN 127
+
+/* # of MP IRQ source entries */
+extern int mp_irq_entries;
+
+/* MP IRQ source entries */
+extern struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
+
+/* Older SiS APIC requires we rewrite the index register */
+extern int sis_apic_bug;
+
+/* 1 if "noapic" boot option passed */
+extern int skip_ioapic_setup;
+
+/* 1 if "noapic" boot option passed */
+extern int noioapicquirk;
+
+/* -1 if "noapic" boot option passed */
+extern int noioapicreroute;
+
+extern unsigned long io_apic_irqs;
+
+#define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1 << (x)) & io_apic_irqs))
+
+/*
+ * If we use the IO-APIC for IRQ routing, disable automatic
+ * assignment of PCI IRQ's.
+ */
+#define io_apic_assign_pci_irqs \
+ (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs)
+
+struct irq_cfg;
+extern void ioapic_insert_resources(void);
+extern int arch_early_ioapic_init(void);
+
+extern int native_setup_ioapic_entry(int, struct IO_APIC_route_entry *,
+ unsigned int, int,
+ struct io_apic_irq_attr *);
+extern void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg);
+
+extern void native_eoi_ioapic_pin(int apic, int pin, int vector);
+
+extern int save_ioapic_entries(void);
+extern void mask_ioapic_entries(void);
+extern int restore_ioapic_entries(void);
+
+extern void setup_ioapic_ids_from_mpc(void);
+extern void setup_ioapic_ids_from_mpc_nocheck(void);
+
+struct io_apic_irq_attr {
+ int ioapic;
+ int ioapic_pin;
+ int trigger;
+ int polarity;
+};
+
+enum ioapic_domain_type {
+ IOAPIC_DOMAIN_INVALID,
+ IOAPIC_DOMAIN_LEGACY,
+ IOAPIC_DOMAIN_STRICT,
+ IOAPIC_DOMAIN_DYNAMIC,
+};
+
+struct device_node;
+struct irq_domain;
+struct irq_domain_ops;
+
+struct ioapic_domain_cfg {
+ enum ioapic_domain_type type;
+ const struct irq_domain_ops *ops;
+ struct device_node *dev;
+};
+
+struct mp_ioapic_gsi{
+ u32 gsi_base;
+ u32 gsi_end;
+};
+extern u32 gsi_top;
+
+extern int mp_find_ioapic(u32 gsi);
+extern int mp_find_ioapic_pin(int ioapic, u32 gsi);
+extern u32 mp_pin_to_gsi(int ioapic, int pin);
+extern int mp_map_gsi_to_irq(u32 gsi, unsigned int flags);
+extern void mp_unmap_irq(int irq);
+extern int mp_register_ioapic(int id, u32 address, u32 gsi_base,
+ struct ioapic_domain_cfg *cfg);
+extern int mp_unregister_ioapic(u32 gsi_base);
+extern int mp_ioapic_registered(u32 gsi_base);
+extern int mp_irqdomain_map(struct irq_domain *domain, unsigned int virq,
+ irq_hw_number_t hwirq);
+extern void mp_irqdomain_unmap(struct irq_domain *domain, unsigned int virq);
+extern int mp_set_gsi_attr(u32 gsi, int trigger, int polarity, int node);
+extern void __init pre_init_apic_IRQ0(void);
+
+extern void mp_save_irq(struct mpc_intsrc *m);
+
+extern void disable_ioapic_support(void);
+
+extern void __init native_io_apic_init_mappings(void);
+extern unsigned int native_io_apic_read(unsigned int apic, unsigned int reg);
+extern void native_io_apic_write(unsigned int apic, unsigned int reg, unsigned int val);
+extern void native_io_apic_modify(unsigned int apic, unsigned int reg, unsigned int val);
+extern void native_disable_io_apic(void);
+extern void native_io_apic_print_entries(unsigned int apic, unsigned int nr_entries);
+extern void intel_ir_io_apic_print_entries(unsigned int apic, unsigned int nr_entries);
+extern int native_ioapic_set_affinity(struct irq_data *,
+ const struct cpumask *,
+ bool);
+
+static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
+{
+ return x86_io_apic_ops.read(apic, reg);
+}
+
+static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
+{
+ x86_io_apic_ops.write(apic, reg, value);
+}
+static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
+{
+ x86_io_apic_ops.modify(apic, reg, value);
+}
+
+extern void io_apic_eoi(unsigned int apic, unsigned int vector);
+
+extern void setup_IO_APIC(void);
+extern void enable_IO_APIC(void);
+extern void disable_IO_APIC(void);
+extern void setup_ioapic_dest(void);
+extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin);
+extern void print_IO_APICs(void);
+#else /* !CONFIG_X86_IO_APIC */
+
+#define IO_APIC_IRQ(x) 0
+#define io_apic_assign_pci_irqs 0
+#define setup_ioapic_ids_from_mpc x86_init_noop
+static inline void ioapic_insert_resources(void) { }
+static inline int arch_early_ioapic_init(void) { return 0; }
+static inline void print_IO_APICs(void) {}
+#define gsi_top (NR_IRQS_LEGACY)
+static inline int mp_find_ioapic(u32 gsi) { return 0; }
+static inline u32 mp_pin_to_gsi(int ioapic, int pin) { return UINT_MAX; }
+static inline int mp_map_gsi_to_irq(u32 gsi, unsigned int flags) { return gsi; }
+static inline void mp_unmap_irq(int irq) { }
+
+static inline int save_ioapic_entries(void)
+{
+ return -ENOMEM;
+}
+
+static inline void mask_ioapic_entries(void) { }
+static inline int restore_ioapic_entries(void)
+{
+ return -ENOMEM;
+}
+
+static inline void mp_save_irq(struct mpc_intsrc *m) { };
+static inline void disable_ioapic_support(void) { }
+#define native_io_apic_init_mappings NULL
+#define native_io_apic_read NULL
+#define native_io_apic_write NULL
+#define native_io_apic_modify NULL
+#define native_disable_io_apic NULL
+#define native_io_apic_print_entries NULL
+#define native_ioapic_set_affinity NULL
+#define native_setup_ioapic_entry NULL
+#define native_eoi_ioapic_pin NULL
+
+static inline void setup_IO_APIC(void) { }
+static inline void enable_IO_APIC(void) { }
+static inline void setup_ioapic_dest(void) { }
+
+#endif
+
+#endif /* _ASM_X86_IO_APIC_H */
diff --git a/kernel/arch/x86/include/asm/iomap.h b/kernel/arch/x86/include/asm/iomap.h
new file mode 100644
index 000000000..363e33eb6
--- /dev/null
+++ b/kernel/arch/x86/include/asm/iomap.h
@@ -0,0 +1,41 @@
+#ifndef _ASM_X86_IOMAP_H
+#define _ASM_X86_IOMAP_H
+
+/*
+ * Copyright © 2008 Ingo Molnar
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+
+void __iomem *
+iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
+
+void
+iounmap_atomic(void __iomem *kvaddr);
+
+int
+iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
+
+void
+iomap_free(resource_size_t base, unsigned long size);
+
+#endif /* _ASM_X86_IOMAP_H */
diff --git a/kernel/arch/x86/include/asm/iommu.h b/kernel/arch/x86/include/asm/iommu.h
new file mode 100644
index 000000000..345c99cef
--- /dev/null
+++ b/kernel/arch/x86/include/asm/iommu.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_X86_IOMMU_H
+#define _ASM_X86_IOMMU_H
+
+extern struct dma_map_ops nommu_dma_ops;
+extern int force_iommu, no_iommu;
+extern int iommu_detected;
+extern int iommu_pass_through;
+
+/* 10 seconds */
+#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
+
+#endif /* _ASM_X86_IOMMU_H */
diff --git a/kernel/arch/x86/include/asm/iommu_table.h b/kernel/arch/x86/include/asm/iommu_table.h
new file mode 100644
index 000000000..e37d6b3ad
--- /dev/null
+++ b/kernel/arch/x86/include/asm/iommu_table.h
@@ -0,0 +1,101 @@
+#ifndef _ASM_X86_IOMMU_TABLE_H
+#define _ASM_X86_IOMMU_TABLE_H
+
+#include <asm/swiotlb.h>
+
+/*
+ * History lesson:
+ * The execution chain of IOMMUs in 2.6.36 looks as so:
+ *
+ * [xen-swiotlb]
+ * |
+ * +----[swiotlb *]--+
+ * / | \
+ * / | \
+ * [GART] [Calgary] [Intel VT-d]
+ * /
+ * /
+ * [AMD-Vi]
+ *
+ * *: if SWIOTLB detected 'iommu=soft'/'swiotlb=force' it would skip
+ * over the rest of IOMMUs and unconditionally initialize the SWIOTLB.
+ * Also it would surreptitiously initialize set the swiotlb=1 if there were
+ * more than 4GB and if the user did not pass in 'iommu=off'. The swiotlb
+ * flag would be turned off by all IOMMUs except the Calgary one.
+ *
+ * The IOMMU_INIT* macros allow a similar tree (or more complex if desired)
+ * to be built by defining who we depend on.
+ *
+ * And all that needs to be done is to use one of the macros in the IOMMU
+ * and the pci-dma.c will take care of the rest.
+ */
+
+struct iommu_table_entry {
+ initcall_t detect;
+ initcall_t depend;
+ void (*early_init)(void); /* No memory allocate available. */
+ void (*late_init)(void); /* Yes, can allocate memory. */
+#define IOMMU_FINISH_IF_DETECTED (1<<0)
+#define IOMMU_DETECTED (1<<1)
+ int flags;
+};
+/*
+ * Macro fills out an entry in the .iommu_table that is equivalent
+ * to the fields that 'struct iommu_table_entry' has. The entries
+ * that are put in the .iommu_table section are not put in any order
+ * hence during boot-time we will have to resort them based on
+ * dependency. */
+
+
+#define __IOMMU_INIT(_detect, _depend, _early_init, _late_init, _finish)\
+ static const struct iommu_table_entry \
+ __iommu_entry_##_detect __used \
+ __attribute__ ((unused, __section__(".iommu_table"), \
+ aligned((sizeof(void *))))) \
+ = {_detect, _depend, _early_init, _late_init, \
+ _finish ? IOMMU_FINISH_IF_DETECTED : 0}
+/*
+ * The simplest IOMMU definition. Provide the detection routine
+ * and it will be run after the SWIOTLB and the other IOMMUs
+ * that utilize this macro. If the IOMMU is detected (ie, the
+ * detect routine returns a positive value), the other IOMMUs
+ * are also checked. You can use IOMMU_INIT_POST_FINISH if you prefer
+ * to stop detecting the other IOMMUs after yours has been detected.
+ */
+#define IOMMU_INIT_POST(_detect) \
+ __IOMMU_INIT(_detect, pci_swiotlb_detect_4gb, NULL, NULL, 0)
+
+#define IOMMU_INIT_POST_FINISH(detect) \
+ __IOMMU_INIT(_detect, pci_swiotlb_detect_4gb, NULL, NULL, 1)
+
+/*
+ * A more sophisticated version of IOMMU_INIT. This variant requires:
+ * a). A detection routine function.
+ * b). The name of the detection routine we depend on to get called
+ * before us.
+ * c). The init routine which gets called if the detection routine
+ * returns a positive value from the pci_iommu_alloc. This means
+ * no presence of a memory allocator.
+ * d). Similar to the 'init', except that this gets called from pci_iommu_init
+ * where we do have a memory allocator.
+ *
+ * The standard IOMMU_INIT differs from the IOMMU_INIT_FINISH variant
+ * in that the former will continue detecting other IOMMUs in the call
+ * list after the detection routine returns a positive number, while the
+ * latter will stop the execution chain upon first successful detection.
+ * Both variants will still call the 'init' and 'late_init' functions if
+ * they are set.
+ */
+#define IOMMU_INIT_FINISH(_detect, _depend, _init, _late_init) \
+ __IOMMU_INIT(_detect, _depend, _init, _late_init, 1)
+
+#define IOMMU_INIT(_detect, _depend, _init, _late_init) \
+ __IOMMU_INIT(_detect, _depend, _init, _late_init, 0)
+
+void sort_iommu_table(struct iommu_table_entry *start,
+ struct iommu_table_entry *finish);
+
+void check_iommu_entries(struct iommu_table_entry *start,
+ struct iommu_table_entry *finish);
+
+#endif /* _ASM_X86_IOMMU_TABLE_H */
diff --git a/kernel/arch/x86/include/asm/iosf_mbi.h b/kernel/arch/x86/include/asm/iosf_mbi.h
new file mode 100644
index 000000000..57995f059
--- /dev/null
+++ b/kernel/arch/x86/include/asm/iosf_mbi.h
@@ -0,0 +1,145 @@
+/*
+ * iosf_mbi.h: Intel OnChip System Fabric MailBox access support
+ */
+
+#ifndef IOSF_MBI_SYMS_H
+#define IOSF_MBI_SYMS_H
+
+#define MBI_MCR_OFFSET 0xD0
+#define MBI_MDR_OFFSET 0xD4
+#define MBI_MCRX_OFFSET 0xD8
+
+#define MBI_RD_MASK 0xFEFFFFFF
+#define MBI_WR_MASK 0X01000000
+
+#define MBI_MASK_HI 0xFFFFFF00
+#define MBI_MASK_LO 0x000000FF
+#define MBI_ENABLE 0xF0
+
+/* Baytrail available units */
+#define BT_MBI_UNIT_AUNIT 0x00
+#define BT_MBI_UNIT_SMC 0x01
+#define BT_MBI_UNIT_CPU 0x02
+#define BT_MBI_UNIT_BUNIT 0x03
+#define BT_MBI_UNIT_PMC 0x04
+#define BT_MBI_UNIT_GFX 0x06
+#define BT_MBI_UNIT_SMI 0x0C
+#define BT_MBI_UNIT_USB 0x43
+#define BT_MBI_UNIT_SATA 0xA3
+#define BT_MBI_UNIT_PCIE 0xA6
+
+/* Baytrail read/write opcodes */
+#define BT_MBI_AUNIT_READ 0x10
+#define BT_MBI_AUNIT_WRITE 0x11
+#define BT_MBI_SMC_READ 0x10
+#define BT_MBI_SMC_WRITE 0x11
+#define BT_MBI_CPU_READ 0x10
+#define BT_MBI_CPU_WRITE 0x11
+#define BT_MBI_BUNIT_READ 0x10
+#define BT_MBI_BUNIT_WRITE 0x11
+#define BT_MBI_PMC_READ 0x06
+#define BT_MBI_PMC_WRITE 0x07
+#define BT_MBI_GFX_READ 0x00
+#define BT_MBI_GFX_WRITE 0x01
+#define BT_MBI_SMIO_READ 0x06
+#define BT_MBI_SMIO_WRITE 0x07
+#define BT_MBI_USB_READ 0x06
+#define BT_MBI_USB_WRITE 0x07
+#define BT_MBI_SATA_READ 0x00
+#define BT_MBI_SATA_WRITE 0x01
+#define BT_MBI_PCIE_READ 0x00
+#define BT_MBI_PCIE_WRITE 0x01
+
+/* Quark available units */
+#define QRK_MBI_UNIT_HBA 0x00
+#define QRK_MBI_UNIT_HB 0x03
+#define QRK_MBI_UNIT_RMU 0x04
+#define QRK_MBI_UNIT_MM 0x05
+#define QRK_MBI_UNIT_MMESRAM 0x05
+#define QRK_MBI_UNIT_SOC 0x31
+
+/* Quark read/write opcodes */
+#define QRK_MBI_HBA_READ 0x10
+#define QRK_MBI_HBA_WRITE 0x11
+#define QRK_MBI_HB_READ 0x10
+#define QRK_MBI_HB_WRITE 0x11
+#define QRK_MBI_RMU_READ 0x10
+#define QRK_MBI_RMU_WRITE 0x11
+#define QRK_MBI_MM_READ 0x10
+#define QRK_MBI_MM_WRITE 0x11
+#define QRK_MBI_MMESRAM_READ 0x12
+#define QRK_MBI_MMESRAM_WRITE 0x13
+#define QRK_MBI_SOC_READ 0x06
+#define QRK_MBI_SOC_WRITE 0x07
+
+#if IS_ENABLED(CONFIG_IOSF_MBI)
+
+bool iosf_mbi_available(void);
+
+/**
+ * iosf_mbi_read() - MailBox Interface read command
+ * @port: port indicating subunit being accessed
+ * @opcode: port specific read or write opcode
+ * @offset: register address offset
+ * @mdr: register data to be read
+ *
+ * Locking is handled by spinlock - cannot sleep.
+ * Return: Nonzero on error
+ */
+int iosf_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr);
+
+/**
+ * iosf_mbi_write() - MailBox unmasked write command
+ * @port: port indicating subunit being accessed
+ * @opcode: port specific read or write opcode
+ * @offset: register address offset
+ * @mdr: register data to be written
+ *
+ * Locking is handled by spinlock - cannot sleep.
+ * Return: Nonzero on error
+ */
+int iosf_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr);
+
+/**
+ * iosf_mbi_modify() - MailBox masked write command
+ * @port: port indicating subunit being accessed
+ * @opcode: port specific read or write opcode
+ * @offset: register address offset
+ * @mdr: register data being modified
+ * @mask: mask indicating bits in mdr to be modified
+ *
+ * Locking is handled by spinlock - cannot sleep.
+ * Return: Nonzero on error
+ */
+int iosf_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask);
+
+#else /* CONFIG_IOSF_MBI is not enabled */
+static inline
+bool iosf_mbi_available(void)
+{
+ return false;
+}
+
+static inline
+int iosf_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr)
+{
+ WARN(1, "IOSF_MBI driver not available");
+ return -EPERM;
+}
+
+static inline
+int iosf_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr)
+{
+ WARN(1, "IOSF_MBI driver not available");
+ return -EPERM;
+}
+
+static inline
+int iosf_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask)
+{
+ WARN(1, "IOSF_MBI driver not available");
+ return -EPERM;
+}
+#endif /* CONFIG_IOSF_MBI */
+
+#endif /* IOSF_MBI_SYMS_H */
diff --git a/kernel/arch/x86/include/asm/ipi.h b/kernel/arch/x86/include/asm/ipi.h
new file mode 100644
index 000000000..615fa9061
--- /dev/null
+++ b/kernel/arch/x86/include/asm/ipi.h
@@ -0,0 +1,162 @@
+#ifndef _ASM_X86_IPI_H
+#define _ASM_X86_IPI_H
+
+#ifdef CONFIG_X86_LOCAL_APIC
+
+/*
+ * Copyright 2004 James Cleverdon, IBM.
+ * Subject to the GNU Public License, v.2
+ *
+ * Generic APIC InterProcessor Interrupt code.
+ *
+ * Moved to include file by James Cleverdon from
+ * arch/x86-64/kernel/smp.c
+ *
+ * Copyrights from kernel/smp.c:
+ *
+ * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
+ * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
+ * (c) 2002,2003 Andi Kleen, SuSE Labs.
+ * Subject to the GNU Public License, v.2
+ */
+
+#include <asm/hw_irq.h>
+#include <asm/apic.h>
+#include <asm/smp.h>
+
+/*
+ * the following functions deal with sending IPIs between CPUs.
+ *
+ * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
+ */
+
+static inline unsigned int __prepare_ICR(unsigned int shortcut, int vector,
+ unsigned int dest)
+{
+ unsigned int icr = shortcut | dest;
+
+ switch (vector) {
+ default:
+ icr |= APIC_DM_FIXED | vector;
+ break;
+ case NMI_VECTOR:
+ icr |= APIC_DM_NMI;
+ break;
+ }
+ return icr;
+}
+
+static inline int __prepare_ICR2(unsigned int mask)
+{
+ return SET_APIC_DEST_FIELD(mask);
+}
+
+static inline void __xapic_wait_icr_idle(void)
+{
+ while (native_apic_mem_read(APIC_ICR) & APIC_ICR_BUSY)
+ cpu_relax();
+}
+
+static inline void
+__default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
+{
+ /*
+ * Subtle. In the case of the 'never do double writes' workaround
+ * we have to lock out interrupts to be safe. As we don't care
+ * of the value read we use an atomic rmw access to avoid costly
+ * cli/sti. Otherwise we use an even cheaper single atomic write
+ * to the APIC.
+ */
+ unsigned int cfg;
+
+ /*
+ * Wait for idle.
+ */
+ __xapic_wait_icr_idle();
+
+ /*
+ * No need to touch the target chip field
+ */
+ cfg = __prepare_ICR(shortcut, vector, dest);
+
+ /*
+ * Send the IPI. The write to APIC_ICR fires this off.
+ */
+ native_apic_mem_write(APIC_ICR, cfg);
+}
+
+/*
+ * This is used to send an IPI with no shorthand notation (the destination is
+ * specified in bits 56 to 63 of the ICR).
+ */
+static inline void
+ __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
+{
+ unsigned long cfg;
+
+ /*
+ * Wait for idle.
+ */
+ if (unlikely(vector == NMI_VECTOR))
+ safe_apic_wait_icr_idle();
+ else
+ __xapic_wait_icr_idle();
+
+ /*
+ * prepare target chip field
+ */
+ cfg = __prepare_ICR2(mask);
+ native_apic_mem_write(APIC_ICR2, cfg);
+
+ /*
+ * program the ICR
+ */
+ cfg = __prepare_ICR(0, vector, dest);
+
+ /*
+ * Send the IPI. The write to APIC_ICR fires this off.
+ */
+ native_apic_mem_write(APIC_ICR, cfg);
+}
+
+extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask,
+ int vector);
+extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
+ int vector);
+
+/* Avoid include hell */
+#define NMI_VECTOR 0x02
+
+extern int no_broadcast;
+
+static inline void __default_local_send_IPI_allbutself(int vector)
+{
+ if (no_broadcast || vector == NMI_VECTOR)
+ apic->send_IPI_mask_allbutself(cpu_online_mask, vector);
+ else
+ __default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector, apic->dest_logical);
+}
+
+static inline void __default_local_send_IPI_all(int vector)
+{
+ if (no_broadcast || vector == NMI_VECTOR)
+ apic->send_IPI_mask(cpu_online_mask, vector);
+ else
+ __default_send_IPI_shortcut(APIC_DEST_ALLINC, vector, apic->dest_logical);
+}
+
+#ifdef CONFIG_X86_32
+extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
+ int vector);
+extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
+ int vector);
+extern void default_send_IPI_mask_logical(const struct cpumask *mask,
+ int vector);
+extern void default_send_IPI_allbutself(int vector);
+extern void default_send_IPI_all(int vector);
+extern void default_send_IPI_self(int vector);
+#endif
+
+#endif
+
+#endif /* _ASM_X86_IPI_H */
diff --git a/kernel/arch/x86/include/asm/irq.h b/kernel/arch/x86/include/asm/irq.h
new file mode 100644
index 000000000..a80cbb88e
--- /dev/null
+++ b/kernel/arch/x86/include/asm/irq.h
@@ -0,0 +1,50 @@
+#ifndef _ASM_X86_IRQ_H
+#define _ASM_X86_IRQ_H
+/*
+ * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
+ *
+ * IRQ/IPI changes taken from work by Thomas Radke
+ * <tomsoft@informatik.tu-chemnitz.de>
+ */
+
+#include <asm/apicdef.h>
+#include <asm/irq_vectors.h>
+
+static inline int irq_canonicalize(int irq)
+{
+ return ((irq == 2) ? 9 : irq);
+}
+
+#ifdef CONFIG_X86_32
+extern void irq_ctx_init(int cpu);
+#else
+# define irq_ctx_init(cpu) do { } while (0)
+#endif
+
+#define __ARCH_HAS_DO_SOFTIRQ
+
+#ifdef CONFIG_HOTPLUG_CPU
+#include <linux/cpumask.h>
+extern int check_irq_vectors_for_cpu_disable(void);
+extern void fixup_irqs(void);
+extern void irq_force_complete_move(int);
+#endif
+
+extern void (*x86_platform_ipi_callback)(void);
+extern void native_init_IRQ(void);
+extern bool handle_irq(unsigned irq, struct pt_regs *regs);
+
+extern __visible unsigned int do_IRQ(struct pt_regs *regs);
+
+/* Interrupt vector management */
+extern DECLARE_BITMAP(used_vectors, NR_VECTORS);
+extern int vector_used_by_percpu_irq(unsigned int vector);
+
+extern void init_ISA_irqs(void);
+
+#ifdef CONFIG_X86_LOCAL_APIC
+void arch_trigger_all_cpu_backtrace(bool);
+#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
+#endif
+
+#endif /* _ASM_X86_IRQ_H */
diff --git a/kernel/arch/x86/include/asm/irq_regs.h b/kernel/arch/x86/include/asm/irq_regs.h
new file mode 100644
index 000000000..d82250b1d
--- /dev/null
+++ b/kernel/arch/x86/include/asm/irq_regs.h
@@ -0,0 +1,31 @@
+/*
+ * Per-cpu current frame pointer - the location of the last exception frame on
+ * the stack, stored in the per-cpu area.
+ *
+ * Jeremy Fitzhardinge <jeremy@goop.org>
+ */
+#ifndef _ASM_X86_IRQ_REGS_H
+#define _ASM_X86_IRQ_REGS_H
+
+#include <asm/percpu.h>
+
+#define ARCH_HAS_OWN_IRQ_REGS
+
+DECLARE_PER_CPU(struct pt_regs *, irq_regs);
+
+static inline struct pt_regs *get_irq_regs(void)
+{
+ return this_cpu_read(irq_regs);
+}
+
+static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
+{
+ struct pt_regs *old_regs;
+
+ old_regs = get_irq_regs();
+ this_cpu_write(irq_regs, new_regs);
+
+ return old_regs;
+}
+
+#endif /* _ASM_X86_IRQ_REGS_32_H */
diff --git a/kernel/arch/x86/include/asm/irq_remapping.h b/kernel/arch/x86/include/asm/irq_remapping.h
new file mode 100644
index 000000000..6224d316c
--- /dev/null
+++ b/kernel/arch/x86/include/asm/irq_remapping.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2012 Advanced Micro Devices, Inc.
+ * Author: Joerg Roedel <joerg.roedel@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * This header file contains the interface of the interrupt remapping code to
+ * the x86 interrupt management code.
+ */
+
+#ifndef __X86_IRQ_REMAPPING_H
+#define __X86_IRQ_REMAPPING_H
+
+#include <asm/io_apic.h>
+
+struct IO_APIC_route_entry;
+struct io_apic_irq_attr;
+struct irq_chip;
+struct msi_msg;
+struct pci_dev;
+struct irq_cfg;
+
+#ifdef CONFIG_IRQ_REMAP
+
+extern void set_irq_remapping_broken(void);
+extern int irq_remapping_prepare(void);
+extern int irq_remapping_enable(void);
+extern void irq_remapping_disable(void);
+extern int irq_remapping_reenable(int);
+extern int irq_remap_enable_fault_handling(void);
+extern int setup_ioapic_remapped_entry(int irq,
+ struct IO_APIC_route_entry *entry,
+ unsigned int destination,
+ int vector,
+ struct io_apic_irq_attr *attr);
+extern void free_remapped_irq(int irq);
+extern void compose_remapped_msi_msg(struct pci_dev *pdev,
+ unsigned int irq, unsigned int dest,
+ struct msi_msg *msg, u8 hpet_id);
+extern int setup_hpet_msi_remapped(unsigned int irq, unsigned int id);
+extern void panic_if_irq_remap(const char *msg);
+extern bool setup_remapped_irq(int irq,
+ struct irq_cfg *cfg,
+ struct irq_chip *chip);
+
+void irq_remap_modify_chip_defaults(struct irq_chip *chip);
+
+#else /* CONFIG_IRQ_REMAP */
+
+static inline void set_irq_remapping_broken(void) { }
+static inline int irq_remapping_prepare(void) { return -ENODEV; }
+static inline int irq_remapping_enable(void) { return -ENODEV; }
+static inline void irq_remapping_disable(void) { }
+static inline int irq_remapping_reenable(int eim) { return -ENODEV; }
+static inline int irq_remap_enable_fault_handling(void) { return -ENODEV; }
+static inline int setup_ioapic_remapped_entry(int irq,
+ struct IO_APIC_route_entry *entry,
+ unsigned int destination,
+ int vector,
+ struct io_apic_irq_attr *attr)
+{
+ return -ENODEV;
+}
+static inline void free_remapped_irq(int irq) { }
+static inline void compose_remapped_msi_msg(struct pci_dev *pdev,
+ unsigned int irq, unsigned int dest,
+ struct msi_msg *msg, u8 hpet_id)
+{
+}
+static inline int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
+{
+ return -ENODEV;
+}
+
+static inline void panic_if_irq_remap(const char *msg)
+{
+}
+
+static inline void irq_remap_modify_chip_defaults(struct irq_chip *chip)
+{
+}
+
+static inline bool setup_remapped_irq(int irq,
+ struct irq_cfg *cfg,
+ struct irq_chip *chip)
+{
+ return false;
+}
+#endif /* CONFIG_IRQ_REMAP */
+
+#define dmar_alloc_hwirq() irq_alloc_hwirq(-1)
+#define dmar_free_hwirq irq_free_hwirq
+
+#endif /* __X86_IRQ_REMAPPING_H */
diff --git a/kernel/arch/x86/include/asm/irq_vectors.h b/kernel/arch/x86/include/asm/irq_vectors.h
new file mode 100644
index 000000000..666c89ec4
--- /dev/null
+++ b/kernel/arch/x86/include/asm/irq_vectors.h
@@ -0,0 +1,172 @@
+#ifndef _ASM_X86_IRQ_VECTORS_H
+#define _ASM_X86_IRQ_VECTORS_H
+
+#include <linux/threads.h>
+/*
+ * Linux IRQ vector layout.
+ *
+ * There are 256 IDT entries (per CPU - each entry is 8 bytes) which can
+ * be defined by Linux. They are used as a jump table by the CPU when a
+ * given vector is triggered - by a CPU-external, CPU-internal or
+ * software-triggered event.
+ *
+ * Linux sets the kernel code address each entry jumps to early during
+ * bootup, and never changes them. This is the general layout of the
+ * IDT entries:
+ *
+ * Vectors 0 ... 31 : system traps and exceptions - hardcoded events
+ * Vectors 32 ... 127 : device interrupts
+ * Vector 128 : legacy int80 syscall interface
+ * Vectors 129 ... INVALIDATE_TLB_VECTOR_START-1 except 204 : device interrupts
+ * Vectors INVALIDATE_TLB_VECTOR_START ... 255 : special interrupts
+ *
+ * 64-bit x86 has per CPU IDT tables, 32-bit has one shared IDT table.
+ *
+ * This file enumerates the exact layout of them:
+ */
+
+#define NMI_VECTOR 0x02
+#define MCE_VECTOR 0x12
+
+/*
+ * IDT vectors usable for external interrupt sources start at 0x20.
+ * (0x80 is the syscall vector, 0x30-0x3f are for ISA)
+ */
+#define FIRST_EXTERNAL_VECTOR 0x20
+/*
+ * We start allocating at 0x21 to spread out vectors evenly between
+ * priority levels. (0x80 is the syscall vector)
+ */
+#define VECTOR_OFFSET_START 1
+
+/*
+ * Reserve the lowest usable vector (and hence lowest priority) 0x20 for
+ * triggering cleanup after irq migration. 0x21-0x2f will still be used
+ * for device interrupts.
+ */
+#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR
+
+#define IA32_SYSCALL_VECTOR 0x80
+#ifdef CONFIG_X86_32
+# define SYSCALL_VECTOR 0x80
+#endif
+
+/*
+ * Vectors 0x30-0x3f are used for ISA interrupts.
+ * round up to the next 16-vector boundary
+ */
+#define IRQ0_VECTOR ((FIRST_EXTERNAL_VECTOR + 16) & ~15)
+
+#define IRQ1_VECTOR (IRQ0_VECTOR + 1)
+#define IRQ2_VECTOR (IRQ0_VECTOR + 2)
+#define IRQ3_VECTOR (IRQ0_VECTOR + 3)
+#define IRQ4_VECTOR (IRQ0_VECTOR + 4)
+#define IRQ5_VECTOR (IRQ0_VECTOR + 5)
+#define IRQ6_VECTOR (IRQ0_VECTOR + 6)
+#define IRQ7_VECTOR (IRQ0_VECTOR + 7)
+#define IRQ8_VECTOR (IRQ0_VECTOR + 8)
+#define IRQ9_VECTOR (IRQ0_VECTOR + 9)
+#define IRQ10_VECTOR (IRQ0_VECTOR + 10)
+#define IRQ11_VECTOR (IRQ0_VECTOR + 11)
+#define IRQ12_VECTOR (IRQ0_VECTOR + 12)
+#define IRQ13_VECTOR (IRQ0_VECTOR + 13)
+#define IRQ14_VECTOR (IRQ0_VECTOR + 14)
+#define IRQ15_VECTOR (IRQ0_VECTOR + 15)
+
+/*
+ * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
+ *
+ * some of the following vectors are 'rare', they are merged
+ * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
+ * TLB, reschedule and local APIC vectors are performance-critical.
+ */
+
+#define SPURIOUS_APIC_VECTOR 0xff
+/*
+ * Sanity check
+ */
+#if ((SPURIOUS_APIC_VECTOR & 0x0F) != 0x0F)
+# error SPURIOUS_APIC_VECTOR definition error
+#endif
+
+#define ERROR_APIC_VECTOR 0xfe
+#define RESCHEDULE_VECTOR 0xfd
+#define CALL_FUNCTION_VECTOR 0xfc
+#define CALL_FUNCTION_SINGLE_VECTOR 0xfb
+#define THERMAL_APIC_VECTOR 0xfa
+#define THRESHOLD_APIC_VECTOR 0xf9
+#define REBOOT_VECTOR 0xf8
+
+/*
+ * Generic system vector for platform specific use
+ */
+#define X86_PLATFORM_IPI_VECTOR 0xf7
+
+/* Vector for KVM to deliver posted interrupt IPI */
+#ifdef CONFIG_HAVE_KVM
+#define POSTED_INTR_VECTOR 0xf2
+#endif
+
+/*
+ * IRQ work vector:
+ */
+#define IRQ_WORK_VECTOR 0xf6
+
+#define UV_BAU_MESSAGE 0xf5
+
+/* Vector on which hypervisor callbacks will be delivered */
+#define HYPERVISOR_CALLBACK_VECTOR 0xf3
+
+/*
+ * Local APIC timer IRQ vector is on a different priority level,
+ * to work around the 'lost local interrupt if more than 2 IRQ
+ * sources per level' errata.
+ */
+#define LOCAL_TIMER_VECTOR 0xef
+
+#define NR_VECTORS 256
+
+#ifdef CONFIG_X86_LOCAL_APIC
+#define FIRST_SYSTEM_VECTOR LOCAL_TIMER_VECTOR
+#else
+#define FIRST_SYSTEM_VECTOR NR_VECTORS
+#endif
+
+#define FPU_IRQ 13
+
+#define FIRST_VM86_IRQ 3
+#define LAST_VM86_IRQ 15
+
+#ifndef __ASSEMBLY__
+static inline int invalid_vm86_irq(int irq)
+{
+ return irq < FIRST_VM86_IRQ || irq > LAST_VM86_IRQ;
+}
+#endif
+
+/*
+ * Size the maximum number of interrupts.
+ *
+ * If the irq_desc[] array has a sparse layout, we can size things
+ * generously - it scales up linearly with the maximum number of CPUs,
+ * and the maximum number of IO-APICs, whichever is higher.
+ *
+ * In other cases we size more conservatively, to not create too large
+ * static arrays.
+ */
+
+#define NR_IRQS_LEGACY 16
+
+#define IO_APIC_VECTOR_LIMIT ( 32 * MAX_IO_APICS )
+
+#ifdef CONFIG_X86_IO_APIC
+# define CPU_VECTOR_LIMIT (64 * NR_CPUS)
+# define NR_IRQS \
+ (CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \
+ (NR_VECTORS + CPU_VECTOR_LIMIT) : \
+ (NR_VECTORS + IO_APIC_VECTOR_LIMIT))
+#else /* !CONFIG_X86_IO_APIC: */
+# define NR_IRQS NR_IRQS_LEGACY
+#endif
+
+#endif /* _ASM_X86_IRQ_VECTORS_H */
diff --git a/kernel/arch/x86/include/asm/irq_work.h b/kernel/arch/x86/include/asm/irq_work.h
new file mode 100644
index 000000000..78162f8e2
--- /dev/null
+++ b/kernel/arch/x86/include/asm/irq_work.h
@@ -0,0 +1,11 @@
+#ifndef _ASM_IRQ_WORK_H
+#define _ASM_IRQ_WORK_H
+
+#include <asm/processor.h>
+
+static inline bool arch_irq_work_has_interrupt(void)
+{
+ return cpu_has_apic;
+}
+
+#endif /* _ASM_IRQ_WORK_H */
diff --git a/kernel/arch/x86/include/asm/irqflags.h b/kernel/arch/x86/include/asm/irqflags.h
new file mode 100644
index 000000000..b77f5edb0
--- /dev/null
+++ b/kernel/arch/x86/include/asm/irqflags.h
@@ -0,0 +1,198 @@
+#ifndef _X86_IRQFLAGS_H_
+#define _X86_IRQFLAGS_H_
+
+#include <asm/processor-flags.h>
+
+#ifndef __ASSEMBLY__
+/*
+ * Interrupt control:
+ */
+
+static inline unsigned long native_save_fl(void)
+{
+ unsigned long flags;
+
+ /*
+ * "=rm" is safe here, because "pop" adjusts the stack before
+ * it evaluates its effective address -- this is part of the
+ * documented behavior of the "pop" instruction.
+ */
+ asm volatile("# __raw_save_flags\n\t"
+ "pushf ; pop %0"
+ : "=rm" (flags)
+ : /* no input */
+ : "memory");
+
+ return flags;
+}
+
+static inline void native_restore_fl(unsigned long flags)
+{
+ asm volatile("push %0 ; popf"
+ : /* no output */
+ :"g" (flags)
+ :"memory", "cc");
+}
+
+static inline void native_irq_disable(void)
+{
+ asm volatile("cli": : :"memory");
+}
+
+static inline void native_irq_enable(void)
+{
+ asm volatile("sti": : :"memory");
+}
+
+static inline void native_safe_halt(void)
+{
+ asm volatile("sti; hlt": : :"memory");
+}
+
+static inline void native_halt(void)
+{
+ asm volatile("hlt": : :"memory");
+}
+
+#endif
+
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+
+static inline notrace unsigned long arch_local_save_flags(void)
+{
+ return native_save_fl();
+}
+
+static inline notrace void arch_local_irq_restore(unsigned long flags)
+{
+ native_restore_fl(flags);
+}
+
+static inline notrace void arch_local_irq_disable(void)
+{
+ native_irq_disable();
+}
+
+static inline notrace void arch_local_irq_enable(void)
+{
+ native_irq_enable();
+}
+
+/*
+ * Used in the idle loop; sti takes one instruction cycle
+ * to complete:
+ */
+static inline void arch_safe_halt(void)
+{
+ native_safe_halt();
+}
+
+/*
+ * Used when interrupts are already enabled or to
+ * shutdown the processor:
+ */
+static inline void halt(void)
+{
+ native_halt();
+}
+
+/*
+ * For spinlocks, etc:
+ */
+static inline notrace unsigned long arch_local_irq_save(void)
+{
+ unsigned long flags = arch_local_save_flags();
+ arch_local_irq_disable();
+ return flags;
+}
+#else
+
+#define ENABLE_INTERRUPTS(x) sti
+#define DISABLE_INTERRUPTS(x) cli
+
+#ifdef CONFIG_X86_64
+#define SWAPGS swapgs
+/*
+ * Currently paravirt can't handle swapgs nicely when we
+ * don't have a stack we can rely on (such as a user space
+ * stack). So we either find a way around these or just fault
+ * and emulate if a guest tries to call swapgs directly.
+ *
+ * Either way, this is a good way to document that we don't
+ * have a reliable stack. x86_64 only.
+ */
+#define SWAPGS_UNSAFE_STACK swapgs
+
+#define PARAVIRT_ADJUST_EXCEPTION_FRAME /* */
+
+#define INTERRUPT_RETURN jmp native_iret
+#define USERGS_SYSRET64 \
+ swapgs; \
+ sysretq;
+#define USERGS_SYSRET32 \
+ swapgs; \
+ sysretl
+
+#else
+#define INTERRUPT_RETURN iret
+#define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
+#define GET_CR0_INTO_EAX movl %cr0, %eax
+#endif
+
+
+#endif /* __ASSEMBLY__ */
+#endif /* CONFIG_PARAVIRT */
+
+#ifndef __ASSEMBLY__
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+ return !(flags & X86_EFLAGS_IF);
+}
+
+static inline int arch_irqs_disabled(void)
+{
+ unsigned long flags = arch_local_save_flags();
+
+ return arch_irqs_disabled_flags(flags);
+}
+#endif /* !__ASSEMBLY__ */
+
+#ifdef __ASSEMBLY__
+#ifdef CONFIG_TRACE_IRQFLAGS
+# define TRACE_IRQS_ON call trace_hardirqs_on_thunk;
+# define TRACE_IRQS_OFF call trace_hardirqs_off_thunk;
+#else
+# define TRACE_IRQS_ON
+# define TRACE_IRQS_OFF
+#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# ifdef CONFIG_X86_64
+# define LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk
+# define LOCKDEP_SYS_EXIT_IRQ \
+ TRACE_IRQS_ON; \
+ sti; \
+ call lockdep_sys_exit_thunk; \
+ cli; \
+ TRACE_IRQS_OFF;
+# else
+# define LOCKDEP_SYS_EXIT \
+ pushl %eax; \
+ pushl %ecx; \
+ pushl %edx; \
+ call lockdep_sys_exit; \
+ popl %edx; \
+ popl %ecx; \
+ popl %eax;
+# define LOCKDEP_SYS_EXIT_IRQ
+# endif
+#else
+# define LOCKDEP_SYS_EXIT
+# define LOCKDEP_SYS_EXIT_IRQ
+#endif
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/kernel/arch/x86/include/asm/ist.h b/kernel/arch/x86/include/asm/ist.h
new file mode 100644
index 000000000..c9803f1a2
--- /dev/null
+++ b/kernel/arch/x86/include/asm/ist.h
@@ -0,0 +1,23 @@
+/*
+ * Include file for the interface to IST BIOS
+ * Copyright 2002 Andy Grover <andrew.grover@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#ifndef _ASM_X86_IST_H
+#define _ASM_X86_IST_H
+
+#include <uapi/asm/ist.h>
+
+
+extern struct ist_info ist_info;
+
+#endif /* _ASM_X86_IST_H */
diff --git a/kernel/arch/x86/include/asm/jump_label.h b/kernel/arch/x86/include/asm/jump_label.h
new file mode 100644
index 000000000..a4c1cf7e9
--- /dev/null
+++ b/kernel/arch/x86/include/asm/jump_label.h
@@ -0,0 +1,46 @@
+#ifndef _ASM_X86_JUMP_LABEL_H
+#define _ASM_X86_JUMP_LABEL_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <asm/nops.h>
+#include <asm/asm.h>
+
+#define JUMP_LABEL_NOP_SIZE 5
+
+#ifdef CONFIG_X86_64
+# define STATIC_KEY_INIT_NOP P6_NOP5_ATOMIC
+#else
+# define STATIC_KEY_INIT_NOP GENERIC_NOP5_ATOMIC
+#endif
+
+static __always_inline bool arch_static_branch(struct static_key *key)
+{
+ asm_volatile_goto("1:"
+ ".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
+ ".pushsection __jump_table, \"aw\" \n\t"
+ _ASM_ALIGN "\n\t"
+ _ASM_PTR "1b, %l[l_yes], %c0 \n\t"
+ ".popsection \n\t"
+ : : "i" (key) : : l_yes);
+ return false;
+l_yes:
+ return true;
+}
+
+#ifdef CONFIG_X86_64
+typedef u64 jump_label_t;
+#else
+typedef u32 jump_label_t;
+#endif
+
+struct jump_entry {
+ jump_label_t code;
+ jump_label_t target;
+ jump_label_t key;
+};
+
+#endif /* __ASSEMBLY__ */
+#endif
diff --git a/kernel/arch/x86/include/asm/kasan.h b/kernel/arch/x86/include/asm/kasan.h
new file mode 100644
index 000000000..8b22422fb
--- /dev/null
+++ b/kernel/arch/x86/include/asm/kasan.h
@@ -0,0 +1,31 @@
+#ifndef _ASM_X86_KASAN_H
+#define _ASM_X86_KASAN_H
+
+/*
+ * Compiler uses shadow offset assuming that addresses start
+ * from 0. Kernel addresses don't start from 0, so shadow
+ * for kernel really starts from compiler's shadow offset +
+ * 'kernel address space start' >> KASAN_SHADOW_SCALE_SHIFT
+ */
+#define KASAN_SHADOW_START (KASAN_SHADOW_OFFSET + \
+ (0xffff800000000000ULL >> 3))
+/* 47 bits for kernel address -> (47 - 3) bits for shadow */
+#define KASAN_SHADOW_END (KASAN_SHADOW_START + (1ULL << (47 - 3)))
+
+#ifndef __ASSEMBLY__
+
+extern pte_t kasan_zero_pte[];
+extern pte_t kasan_zero_pmd[];
+extern pte_t kasan_zero_pud[];
+
+#ifdef CONFIG_KASAN
+void __init kasan_map_early_shadow(pgd_t *pgd);
+void __init kasan_init(void);
+#else
+static inline void kasan_map_early_shadow(pgd_t *pgd) { }
+static inline void kasan_init(void) { }
+#endif
+
+#endif
+
+#endif
diff --git a/kernel/arch/x86/include/asm/kbdleds.h b/kernel/arch/x86/include/asm/kbdleds.h
new file mode 100644
index 000000000..f27ac5ff5
--- /dev/null
+++ b/kernel/arch/x86/include/asm/kbdleds.h
@@ -0,0 +1,17 @@
+#ifndef _ASM_X86_KBDLEDS_H
+#define _ASM_X86_KBDLEDS_H
+
+/*
+ * Some laptops take the 789uiojklm,. keys as number pad when NumLock is on.
+ * This seems a good reason to start with NumLock off. That's why on X86 we
+ * ask the bios for the correct state.
+ */
+
+#include <asm/setup.h>
+
+static inline int kbd_defleds(void)
+{
+ return boot_params.kbd_status & 0x20 ? (1 << VC_NUMLOCK) : 0;
+}
+
+#endif /* _ASM_X86_KBDLEDS_H */
diff --git a/kernel/arch/x86/include/asm/kdebug.h b/kernel/arch/x86/include/asm/kdebug.h
new file mode 100644
index 000000000..32ce71375
--- /dev/null
+++ b/kernel/arch/x86/include/asm/kdebug.h
@@ -0,0 +1,39 @@
+#ifndef _ASM_X86_KDEBUG_H
+#define _ASM_X86_KDEBUG_H
+
+#include <linux/notifier.h>
+
+struct pt_regs;
+
+/* Grossly misnamed. */
+enum die_val {
+ DIE_OOPS = 1,
+ DIE_INT3,
+ DIE_DEBUG,
+ DIE_PANIC,
+ DIE_NMI,
+ DIE_DIE,
+ DIE_KERNELDEBUG,
+ DIE_TRAP,
+ DIE_GPF,
+ DIE_CALL,
+ DIE_PAGE_FAULT,
+ DIE_NMIUNKNOWN,
+};
+
+extern void printk_address(unsigned long address);
+extern void die(const char *, struct pt_regs *,long);
+extern int __must_check __die(const char *, struct pt_regs *, long);
+extern void show_trace(struct task_struct *t, struct pt_regs *regs,
+ unsigned long *sp, unsigned long bp);
+extern void __show_regs(struct pt_regs *regs, int all);
+extern unsigned long oops_begin(void);
+extern void oops_end(unsigned long, struct pt_regs *, int signr);
+#ifdef CONFIG_KEXEC
+extern int in_crash_kexec;
+#else
+/* no crash dump is ever in progress if no crash kernel can be kexec'd */
+#define in_crash_kexec 0
+#endif
+
+#endif /* _ASM_X86_KDEBUG_H */
diff --git a/kernel/arch/x86/include/asm/kexec-bzimage64.h b/kernel/arch/x86/include/asm/kexec-bzimage64.h
new file mode 100644
index 000000000..d1b5d194e
--- /dev/null
+++ b/kernel/arch/x86/include/asm/kexec-bzimage64.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_KEXEC_BZIMAGE64_H
+#define _ASM_KEXEC_BZIMAGE64_H
+
+extern struct kexec_file_ops kexec_bzImage64_ops;
+
+#endif /* _ASM_KEXE_BZIMAGE64_H */
diff --git a/kernel/arch/x86/include/asm/kexec.h b/kernel/arch/x86/include/asm/kexec.h
new file mode 100644
index 000000000..d2434c1ca
--- /dev/null
+++ b/kernel/arch/x86/include/asm/kexec.h
@@ -0,0 +1,216 @@
+#ifndef _ASM_X86_KEXEC_H
+#define _ASM_X86_KEXEC_H
+
+#ifdef CONFIG_X86_32
+# define PA_CONTROL_PAGE 0
+# define VA_CONTROL_PAGE 1
+# define PA_PGD 2
+# define PA_SWAP_PAGE 3
+# define PAGES_NR 4
+#else
+# define PA_CONTROL_PAGE 0
+# define VA_CONTROL_PAGE 1
+# define PA_TABLE_PAGE 2
+# define PA_SWAP_PAGE 3
+# define PAGES_NR 4
+#endif
+
+# define KEXEC_CONTROL_CODE_MAX_SIZE 2048
+
+#ifndef __ASSEMBLY__
+
+#include <linux/string.h>
+
+#include <asm/page.h>
+#include <asm/ptrace.h>
+#include <asm/bootparam.h>
+
+struct kimage;
+
+/*
+ * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
+ * I.e. Maximum page that is mapped directly into kernel memory,
+ * and kmap is not required.
+ *
+ * So far x86_64 is limited to 40 physical address bits.
+ */
+#ifdef CONFIG_X86_32
+/* Maximum physical address we can use pages from */
+# define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
+/* Maximum address we can reach in physical address mode */
+# define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
+/* Maximum address we can use for the control code buffer */
+# define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
+
+# define KEXEC_CONTROL_PAGE_SIZE 4096
+
+/* The native architecture */
+# define KEXEC_ARCH KEXEC_ARCH_386
+
+/* We can also handle crash dumps from 64 bit kernel. */
+# define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64)
+#else
+/* Maximum physical address we can use pages from */
+# define KEXEC_SOURCE_MEMORY_LIMIT (MAXMEM-1)
+/* Maximum address we can reach in physical address mode */
+# define KEXEC_DESTINATION_MEMORY_LIMIT (MAXMEM-1)
+/* Maximum address we can use for the control pages */
+# define KEXEC_CONTROL_MEMORY_LIMIT (MAXMEM-1)
+
+/* Allocate one page for the pdp and the second for the code */
+# define KEXEC_CONTROL_PAGE_SIZE (4096UL + 4096UL)
+
+/* The native architecture */
+# define KEXEC_ARCH KEXEC_ARCH_X86_64
+#endif
+
+/* Memory to backup during crash kdump */
+#define KEXEC_BACKUP_SRC_START (0UL)
+#define KEXEC_BACKUP_SRC_END (640 * 1024UL) /* 640K */
+
+/*
+ * CPU does not save ss and sp on stack if execution is already
+ * running in kernel mode at the time of NMI occurrence. This code
+ * fixes it.
+ */
+static inline void crash_fixup_ss_esp(struct pt_regs *newregs,
+ struct pt_regs *oldregs)
+{
+#ifdef CONFIG_X86_32
+ newregs->sp = (unsigned long)&(oldregs->sp);
+ asm volatile("xorl %%eax, %%eax\n\t"
+ "movw %%ss, %%ax\n\t"
+ :"=a"(newregs->ss));
+#endif
+}
+
+/*
+ * This function is responsible for capturing register states if coming
+ * via panic otherwise just fix up the ss and sp if coming via kernel
+ * mode exception.
+ */
+static inline void crash_setup_regs(struct pt_regs *newregs,
+ struct pt_regs *oldregs)
+{
+ if (oldregs) {
+ memcpy(newregs, oldregs, sizeof(*newregs));
+ crash_fixup_ss_esp(newregs, oldregs);
+ } else {
+#ifdef CONFIG_X86_32
+ asm volatile("movl %%ebx,%0" : "=m"(newregs->bx));
+ asm volatile("movl %%ecx,%0" : "=m"(newregs->cx));
+ asm volatile("movl %%edx,%0" : "=m"(newregs->dx));
+ asm volatile("movl %%esi,%0" : "=m"(newregs->si));
+ asm volatile("movl %%edi,%0" : "=m"(newregs->di));
+ asm volatile("movl %%ebp,%0" : "=m"(newregs->bp));
+ asm volatile("movl %%eax,%0" : "=m"(newregs->ax));
+ asm volatile("movl %%esp,%0" : "=m"(newregs->sp));
+ asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
+ asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
+ asm volatile("movl %%ds, %%eax;" :"=a"(newregs->ds));
+ asm volatile("movl %%es, %%eax;" :"=a"(newregs->es));
+ asm volatile("pushfl; popl %0" :"=m"(newregs->flags));
+#else
+ asm volatile("movq %%rbx,%0" : "=m"(newregs->bx));
+ asm volatile("movq %%rcx,%0" : "=m"(newregs->cx));
+ asm volatile("movq %%rdx,%0" : "=m"(newregs->dx));
+ asm volatile("movq %%rsi,%0" : "=m"(newregs->si));
+ asm volatile("movq %%rdi,%0" : "=m"(newregs->di));
+ asm volatile("movq %%rbp,%0" : "=m"(newregs->bp));
+ asm volatile("movq %%rax,%0" : "=m"(newregs->ax));
+ asm volatile("movq %%rsp,%0" : "=m"(newregs->sp));
+ asm volatile("movq %%r8,%0" : "=m"(newregs->r8));
+ asm volatile("movq %%r9,%0" : "=m"(newregs->r9));
+ asm volatile("movq %%r10,%0" : "=m"(newregs->r10));
+ asm volatile("movq %%r11,%0" : "=m"(newregs->r11));
+ asm volatile("movq %%r12,%0" : "=m"(newregs->r12));
+ asm volatile("movq %%r13,%0" : "=m"(newregs->r13));
+ asm volatile("movq %%r14,%0" : "=m"(newregs->r14));
+ asm volatile("movq %%r15,%0" : "=m"(newregs->r15));
+ asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
+ asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
+ asm volatile("pushfq; popq %0" :"=m"(newregs->flags));
+#endif
+ newregs->ip = (unsigned long)current_text_addr();
+ }
+}
+
+#ifdef CONFIG_X86_32
+asmlinkage unsigned long
+relocate_kernel(unsigned long indirection_page,
+ unsigned long control_page,
+ unsigned long start_address,
+ unsigned int has_pae,
+ unsigned int preserve_context);
+#else
+unsigned long
+relocate_kernel(unsigned long indirection_page,
+ unsigned long page_list,
+ unsigned long start_address,
+ unsigned int preserve_context);
+#endif
+
+#define ARCH_HAS_KIMAGE_ARCH
+
+#ifdef CONFIG_X86_32
+struct kimage_arch {
+ pgd_t *pgd;
+#ifdef CONFIG_X86_PAE
+ pmd_t *pmd0;
+ pmd_t *pmd1;
+#endif
+ pte_t *pte0;
+ pte_t *pte1;
+};
+#else
+struct kimage_arch {
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ /* Details of backup region */
+ unsigned long backup_src_start;
+ unsigned long backup_src_sz;
+
+ /* Physical address of backup segment */
+ unsigned long backup_load_addr;
+
+ /* Core ELF header buffer */
+ void *elf_headers;
+ unsigned long elf_headers_sz;
+ unsigned long elf_load_addr;
+};
+#endif /* CONFIG_X86_32 */
+
+#ifdef CONFIG_X86_64
+/*
+ * Number of elements and order of elements in this structure should match
+ * with the ones in arch/x86/purgatory/entry64.S. If you make a change here
+ * make an appropriate change in purgatory too.
+ */
+struct kexec_entry64_regs {
+ uint64_t rax;
+ uint64_t rcx;
+ uint64_t rdx;
+ uint64_t rbx;
+ uint64_t rsp;
+ uint64_t rbp;
+ uint64_t rsi;
+ uint64_t rdi;
+ uint64_t r8;
+ uint64_t r9;
+ uint64_t r10;
+ uint64_t r11;
+ uint64_t r12;
+ uint64_t r13;
+ uint64_t r14;
+ uint64_t r15;
+ uint64_t rip;
+};
+#endif
+
+typedef void crash_vmclear_fn(void);
+extern crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss;
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_X86_KEXEC_H */
diff --git a/kernel/arch/x86/include/asm/kgdb.h b/kernel/arch/x86/include/asm/kgdb.h
new file mode 100644
index 000000000..332f98c91
--- /dev/null
+++ b/kernel/arch/x86/include/asm/kgdb.h
@@ -0,0 +1,89 @@
+#ifndef _ASM_X86_KGDB_H
+#define _ASM_X86_KGDB_H
+
+/*
+ * Copyright (C) 2001-2004 Amit S. Kale
+ * Copyright (C) 2008 Wind River Systems, Inc.
+ */
+
+/*
+ * BUFMAX defines the maximum number of characters in inbound/outbound
+ * buffers at least NUMREGBYTES*2 are needed for register packets
+ * Longer buffer is needed to list all threads
+ */
+#define BUFMAX 1024
+
+/*
+ * Note that this register image is in a different order than
+ * the register image that Linux produces at interrupt time.
+ *
+ * Linux's register image is defined by struct pt_regs in ptrace.h.
+ * Just why GDB uses a different order is a historical mystery.
+ */
+#ifdef CONFIG_X86_32
+enum regnames {
+ GDB_AX, /* 0 */
+ GDB_CX, /* 1 */
+ GDB_DX, /* 2 */
+ GDB_BX, /* 3 */
+ GDB_SP, /* 4 */
+ GDB_BP, /* 5 */
+ GDB_SI, /* 6 */
+ GDB_DI, /* 7 */
+ GDB_PC, /* 8 also known as eip */
+ GDB_PS, /* 9 also known as eflags */
+ GDB_CS, /* 10 */
+ GDB_SS, /* 11 */
+ GDB_DS, /* 12 */
+ GDB_ES, /* 13 */
+ GDB_FS, /* 14 */
+ GDB_GS, /* 15 */
+};
+#define GDB_ORIG_AX 41
+#define DBG_MAX_REG_NUM 16
+#define NUMREGBYTES ((GDB_GS+1)*4)
+#else /* ! CONFIG_X86_32 */
+enum regnames {
+ GDB_AX, /* 0 */
+ GDB_BX, /* 1 */
+ GDB_CX, /* 2 */
+ GDB_DX, /* 3 */
+ GDB_SI, /* 4 */
+ GDB_DI, /* 5 */
+ GDB_BP, /* 6 */
+ GDB_SP, /* 7 */
+ GDB_R8, /* 8 */
+ GDB_R9, /* 9 */
+ GDB_R10, /* 10 */
+ GDB_R11, /* 11 */
+ GDB_R12, /* 12 */
+ GDB_R13, /* 13 */
+ GDB_R14, /* 14 */
+ GDB_R15, /* 15 */
+ GDB_PC, /* 16 */
+ GDB_PS, /* 17 */
+ GDB_CS, /* 18 */
+ GDB_SS, /* 19 */
+ GDB_DS, /* 20 */
+ GDB_ES, /* 21 */
+ GDB_FS, /* 22 */
+ GDB_GS, /* 23 */
+};
+#define GDB_ORIG_AX 57
+#define DBG_MAX_REG_NUM 24
+/* 17 64 bit regs and 5 32 bit regs */
+#define NUMREGBYTES ((17 * 8) + (5 * 4))
+#endif /* ! CONFIG_X86_32 */
+
+static inline void arch_kgdb_breakpoint(void)
+{
+ asm(" int $3");
+}
+#define BREAK_INSTR_SIZE 1
+#define CACHE_FLUSH_IS_SAFE 1
+#define GDB_ADJUSTS_BREAK_OFFSET
+
+extern int kgdb_ll_trap(int cmd, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig);
+
+#endif /* _ASM_X86_KGDB_H */
diff --git a/kernel/arch/x86/include/asm/kmap_types.h b/kernel/arch/x86/include/asm/kmap_types.h
new file mode 100644
index 000000000..9e00a731a
--- /dev/null
+++ b/kernel/arch/x86/include/asm/kmap_types.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_X86_KMAP_TYPES_H
+#define _ASM_X86_KMAP_TYPES_H
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_DEBUG_HIGHMEM)
+#define __WITH_KM_FENCE
+#endif
+
+#include <asm-generic/kmap_types.h>
+
+#undef __WITH_KM_FENCE
+
+#endif /* _ASM_X86_KMAP_TYPES_H */
diff --git a/kernel/arch/x86/include/asm/kmemcheck.h b/kernel/arch/x86/include/asm/kmemcheck.h
new file mode 100644
index 000000000..ed01518f2
--- /dev/null
+++ b/kernel/arch/x86/include/asm/kmemcheck.h
@@ -0,0 +1,42 @@
+#ifndef ASM_X86_KMEMCHECK_H
+#define ASM_X86_KMEMCHECK_H
+
+#include <linux/types.h>
+#include <asm/ptrace.h>
+
+#ifdef CONFIG_KMEMCHECK
+bool kmemcheck_active(struct pt_regs *regs);
+
+void kmemcheck_show(struct pt_regs *regs);
+void kmemcheck_hide(struct pt_regs *regs);
+
+bool kmemcheck_fault(struct pt_regs *regs,
+ unsigned long address, unsigned long error_code);
+bool kmemcheck_trap(struct pt_regs *regs);
+#else
+static inline bool kmemcheck_active(struct pt_regs *regs)
+{
+ return false;
+}
+
+static inline void kmemcheck_show(struct pt_regs *regs)
+{
+}
+
+static inline void kmemcheck_hide(struct pt_regs *regs)
+{
+}
+
+static inline bool kmemcheck_fault(struct pt_regs *regs,
+ unsigned long address, unsigned long error_code)
+{
+ return false;
+}
+
+static inline bool kmemcheck_trap(struct pt_regs *regs)
+{
+ return false;
+}
+#endif /* CONFIG_KMEMCHECK */
+
+#endif
diff --git a/kernel/arch/x86/include/asm/kprobes.h b/kernel/arch/x86/include/asm/kprobes.h
new file mode 100644
index 000000000..4421b5da4
--- /dev/null
+++ b/kernel/arch/x86/include/asm/kprobes.h
@@ -0,0 +1,120 @@
+#ifndef _ASM_X86_KPROBES_H
+#define _ASM_X86_KPROBES_H
+/*
+ * Kernel Probes (KProbes)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2002, 2004
+ *
+ * See arch/x86/kernel/kprobes.c for x86 kprobes history.
+ */
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/percpu.h>
+#include <asm/insn.h>
+
+#define __ARCH_WANT_KPROBES_INSN_SLOT
+
+struct pt_regs;
+struct kprobe;
+
+typedef u8 kprobe_opcode_t;
+#define BREAKPOINT_INSTRUCTION 0xcc
+#define RELATIVEJUMP_OPCODE 0xe9
+#define RELATIVEJUMP_SIZE 5
+#define RELATIVECALL_OPCODE 0xe8
+#define RELATIVE_ADDR_SIZE 4
+#define MAX_STACK_SIZE 64
+#define MIN_STACK_SIZE(ADDR) \
+ (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
+ THREAD_SIZE - (unsigned long)(ADDR))) \
+ ? (MAX_STACK_SIZE) \
+ : (((unsigned long)current_thread_info()) + \
+ THREAD_SIZE - (unsigned long)(ADDR)))
+
+#define flush_insn_slot(p) do { } while (0)
+
+/* optinsn template addresses */
+extern __visible kprobe_opcode_t optprobe_template_entry;
+extern __visible kprobe_opcode_t optprobe_template_val;
+extern __visible kprobe_opcode_t optprobe_template_call;
+extern __visible kprobe_opcode_t optprobe_template_end;
+#define MAX_OPTIMIZED_LENGTH (MAX_INSN_SIZE + RELATIVE_ADDR_SIZE)
+#define MAX_OPTINSN_SIZE \
+ (((unsigned long)&optprobe_template_end - \
+ (unsigned long)&optprobe_template_entry) + \
+ MAX_OPTIMIZED_LENGTH + RELATIVEJUMP_SIZE)
+
+extern const int kretprobe_blacklist_size;
+
+void arch_remove_kprobe(struct kprobe *p);
+asmlinkage void kretprobe_trampoline(void);
+
+/* Architecture specific copy of original instruction*/
+struct arch_specific_insn {
+ /* copy of the original instruction */
+ kprobe_opcode_t *insn;
+ /*
+ * boostable = -1: This instruction type is not boostable.
+ * boostable = 0: This instruction type is boostable.
+ * boostable = 1: This instruction has been boosted: we have
+ * added a relative jump after the instruction copy in insn,
+ * so no single-step and fixup are needed (unless there's
+ * a post_handler or break_handler).
+ */
+ int boostable;
+ bool if_modifier;
+};
+
+struct arch_optimized_insn {
+ /* copy of the original instructions */
+ kprobe_opcode_t copied_insn[RELATIVE_ADDR_SIZE];
+ /* detour code buffer */
+ kprobe_opcode_t *insn;
+ /* the size of instructions copied to detour code buffer */
+ size_t size;
+};
+
+/* Return true (!0) if optinsn is prepared for optimization. */
+static inline int arch_prepared_optinsn(struct arch_optimized_insn *optinsn)
+{
+ return optinsn->size;
+}
+
+struct prev_kprobe {
+ struct kprobe *kp;
+ unsigned long status;
+ unsigned long old_flags;
+ unsigned long saved_flags;
+};
+
+/* per-cpu kprobe control block */
+struct kprobe_ctlblk {
+ unsigned long kprobe_status;
+ unsigned long kprobe_old_flags;
+ unsigned long kprobe_saved_flags;
+ unsigned long *jprobe_saved_sp;
+ struct pt_regs jprobe_saved_regs;
+ kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
+ struct prev_kprobe prev_kprobe;
+};
+
+extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
+extern int kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data);
+extern int kprobe_int3_handler(struct pt_regs *regs);
+extern int kprobe_debug_handler(struct pt_regs *regs);
+#endif /* _ASM_X86_KPROBES_H */
diff --git a/kernel/arch/x86/include/asm/kvm_emulate.h b/kernel/arch/x86/include/asm/kvm_emulate.h
new file mode 100644
index 000000000..57a9d94fe
--- /dev/null
+++ b/kernel/arch/x86/include/asm/kvm_emulate.h
@@ -0,0 +1,428 @@
+/******************************************************************************
+ * x86_emulate.h
+ *
+ * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
+ *
+ * Copyright (c) 2005 Keir Fraser
+ *
+ * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
+ */
+
+#ifndef _ASM_X86_KVM_X86_EMULATE_H
+#define _ASM_X86_KVM_X86_EMULATE_H
+
+#include <asm/desc_defs.h>
+
+struct x86_emulate_ctxt;
+enum x86_intercept;
+enum x86_intercept_stage;
+
+struct x86_exception {
+ u8 vector;
+ bool error_code_valid;
+ u16 error_code;
+ bool nested_page_fault;
+ u64 address; /* cr2 or nested page fault gpa */
+};
+
+/*
+ * This struct is used to carry enough information from the instruction
+ * decoder to main KVM so that a decision can be made whether the
+ * instruction needs to be intercepted or not.
+ */
+struct x86_instruction_info {
+ u8 intercept; /* which intercept */
+ u8 rep_prefix; /* rep prefix? */
+ u8 modrm_mod; /* mod part of modrm */
+ u8 modrm_reg; /* index of register used */
+ u8 modrm_rm; /* rm part of modrm */
+ u64 src_val; /* value of source operand */
+ u64 dst_val; /* value of destination operand */
+ u8 src_bytes; /* size of source operand */
+ u8 dst_bytes; /* size of destination operand */
+ u8 ad_bytes; /* size of src/dst address */
+ u64 next_rip; /* rip following the instruction */
+};
+
+/*
+ * x86_emulate_ops:
+ *
+ * These operations represent the instruction emulator's interface to memory.
+ * There are two categories of operation: those that act on ordinary memory
+ * regions (*_std), and those that act on memory regions known to require
+ * special treatment or emulation (*_emulated).
+ *
+ * The emulator assumes that an instruction accesses only one 'emulated memory'
+ * location, that this location is the given linear faulting address (cr2), and
+ * that this is one of the instruction's data operands. Instruction fetches and
+ * stack operations are assumed never to access emulated memory. The emulator
+ * automatically deduces which operand of a string-move operation is accessing
+ * emulated memory, and assumes that the other operand accesses normal memory.
+ *
+ * NOTES:
+ * 1. The emulator isn't very smart about emulated vs. standard memory.
+ * 'Emulated memory' access addresses should be checked for sanity.
+ * 'Normal memory' accesses may fault, and the caller must arrange to
+ * detect and handle reentrancy into the emulator via recursive faults.
+ * Accesses may be unaligned and may cross page boundaries.
+ * 2. If the access fails (cannot emulate, or a standard access faults) then
+ * it is up to the memop to propagate the fault to the guest VM via
+ * some out-of-band mechanism, unknown to the emulator. The memop signals
+ * failure by returning X86EMUL_PROPAGATE_FAULT to the emulator, which will
+ * then immediately bail.
+ * 3. Valid access sizes are 1, 2, 4 and 8 bytes. On x86/32 systems only
+ * cmpxchg8b_emulated need support 8-byte accesses.
+ * 4. The emulator cannot handle 64-bit mode emulation on an x86/32 system.
+ */
+/* Access completed successfully: continue emulation as normal. */
+#define X86EMUL_CONTINUE 0
+/* Access is unhandleable: bail from emulation and return error to caller. */
+#define X86EMUL_UNHANDLEABLE 1
+/* Terminate emulation but return success to the caller. */
+#define X86EMUL_PROPAGATE_FAULT 2 /* propagate a generated fault to guest */
+#define X86EMUL_RETRY_INSTR 3 /* retry the instruction for some reason */
+#define X86EMUL_CMPXCHG_FAILED 4 /* cmpxchg did not see expected value */
+#define X86EMUL_IO_NEEDED 5 /* IO is needed to complete emulation */
+#define X86EMUL_INTERCEPTED 6 /* Intercepted by nested VMCB/VMCS */
+
+struct x86_emulate_ops {
+ /*
+ * read_gpr: read a general purpose register (rax - r15)
+ *
+ * @reg: gpr number.
+ */
+ ulong (*read_gpr)(struct x86_emulate_ctxt *ctxt, unsigned reg);
+ /*
+ * write_gpr: write a general purpose register (rax - r15)
+ *
+ * @reg: gpr number.
+ * @val: value to write.
+ */
+ void (*write_gpr)(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val);
+ /*
+ * read_std: Read bytes of standard (non-emulated/special) memory.
+ * Used for descriptor reading.
+ * @addr: [IN ] Linear address from which to read.
+ * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
+ * @bytes: [IN ] Number of bytes to read from memory.
+ */
+ int (*read_std)(struct x86_emulate_ctxt *ctxt,
+ unsigned long addr, void *val,
+ unsigned int bytes,
+ struct x86_exception *fault);
+
+ /*
+ * write_std: Write bytes of standard (non-emulated/special) memory.
+ * Used for descriptor writing.
+ * @addr: [IN ] Linear address to which to write.
+ * @val: [OUT] Value write to memory, zero-extended to 'u_long'.
+ * @bytes: [IN ] Number of bytes to write to memory.
+ */
+ int (*write_std)(struct x86_emulate_ctxt *ctxt,
+ unsigned long addr, void *val, unsigned int bytes,
+ struct x86_exception *fault);
+ /*
+ * fetch: Read bytes of standard (non-emulated/special) memory.
+ * Used for instruction fetch.
+ * @addr: [IN ] Linear address from which to read.
+ * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
+ * @bytes: [IN ] Number of bytes to read from memory.
+ */
+ int (*fetch)(struct x86_emulate_ctxt *ctxt,
+ unsigned long addr, void *val, unsigned int bytes,
+ struct x86_exception *fault);
+
+ /*
+ * read_emulated: Read bytes from emulated/special memory area.
+ * @addr: [IN ] Linear address from which to read.
+ * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
+ * @bytes: [IN ] Number of bytes to read from memory.
+ */
+ int (*read_emulated)(struct x86_emulate_ctxt *ctxt,
+ unsigned long addr, void *val, unsigned int bytes,
+ struct x86_exception *fault);
+
+ /*
+ * write_emulated: Write bytes to emulated/special memory area.
+ * @addr: [IN ] Linear address to which to write.
+ * @val: [IN ] Value to write to memory (low-order bytes used as
+ * required).
+ * @bytes: [IN ] Number of bytes to write to memory.
+ */
+ int (*write_emulated)(struct x86_emulate_ctxt *ctxt,
+ unsigned long addr, const void *val,
+ unsigned int bytes,
+ struct x86_exception *fault);
+
+ /*
+ * cmpxchg_emulated: Emulate an atomic (LOCKed) CMPXCHG operation on an
+ * emulated/special memory area.
+ * @addr: [IN ] Linear address to access.
+ * @old: [IN ] Value expected to be current at @addr.
+ * @new: [IN ] Value to write to @addr.
+ * @bytes: [IN ] Number of bytes to access using CMPXCHG.
+ */
+ int (*cmpxchg_emulated)(struct x86_emulate_ctxt *ctxt,
+ unsigned long addr,
+ const void *old,
+ const void *new,
+ unsigned int bytes,
+ struct x86_exception *fault);
+ void (*invlpg)(struct x86_emulate_ctxt *ctxt, ulong addr);
+
+ int (*pio_in_emulated)(struct x86_emulate_ctxt *ctxt,
+ int size, unsigned short port, void *val,
+ unsigned int count);
+
+ int (*pio_out_emulated)(struct x86_emulate_ctxt *ctxt,
+ int size, unsigned short port, const void *val,
+ unsigned int count);
+
+ bool (*get_segment)(struct x86_emulate_ctxt *ctxt, u16 *selector,
+ struct desc_struct *desc, u32 *base3, int seg);
+ void (*set_segment)(struct x86_emulate_ctxt *ctxt, u16 selector,
+ struct desc_struct *desc, u32 base3, int seg);
+ unsigned long (*get_cached_segment_base)(struct x86_emulate_ctxt *ctxt,
+ int seg);
+ void (*get_gdt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt);
+ void (*get_idt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt);
+ void (*set_gdt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt);
+ void (*set_idt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt);
+ ulong (*get_cr)(struct x86_emulate_ctxt *ctxt, int cr);
+ int (*set_cr)(struct x86_emulate_ctxt *ctxt, int cr, ulong val);
+ int (*cpl)(struct x86_emulate_ctxt *ctxt);
+ int (*get_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong *dest);
+ int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value);
+ int (*set_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data);
+ int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata);
+ int (*check_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc);
+ int (*read_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc, u64 *pdata);
+ void (*halt)(struct x86_emulate_ctxt *ctxt);
+ void (*wbinvd)(struct x86_emulate_ctxt *ctxt);
+ int (*fix_hypercall)(struct x86_emulate_ctxt *ctxt);
+ void (*get_fpu)(struct x86_emulate_ctxt *ctxt); /* disables preempt */
+ void (*put_fpu)(struct x86_emulate_ctxt *ctxt); /* reenables preempt */
+ int (*intercept)(struct x86_emulate_ctxt *ctxt,
+ struct x86_instruction_info *info,
+ enum x86_intercept_stage stage);
+
+ void (*get_cpuid)(struct x86_emulate_ctxt *ctxt,
+ u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
+ void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked);
+};
+
+typedef u32 __attribute__((vector_size(16))) sse128_t;
+
+/* Type, address-of, and value of an instruction's operand. */
+struct operand {
+ enum { OP_REG, OP_MEM, OP_MEM_STR, OP_IMM, OP_XMM, OP_MM, OP_NONE } type;
+ unsigned int bytes;
+ unsigned int count;
+ union {
+ unsigned long orig_val;
+ u64 orig_val64;
+ };
+ union {
+ unsigned long *reg;
+ struct segmented_address {
+ ulong ea;
+ unsigned seg;
+ } mem;
+ unsigned xmm;
+ unsigned mm;
+ } addr;
+ union {
+ unsigned long val;
+ u64 val64;
+ char valptr[sizeof(sse128_t)];
+ sse128_t vec_val;
+ u64 mm_val;
+ void *data;
+ };
+};
+
+struct fetch_cache {
+ u8 data[15];
+ u8 *ptr;
+ u8 *end;
+};
+
+struct read_cache {
+ u8 data[1024];
+ unsigned long pos;
+ unsigned long end;
+};
+
+/* Execution mode, passed to the emulator. */
+enum x86emul_mode {
+ X86EMUL_MODE_REAL, /* Real mode. */
+ X86EMUL_MODE_VM86, /* Virtual 8086 mode. */
+ X86EMUL_MODE_PROT16, /* 16-bit protected mode. */
+ X86EMUL_MODE_PROT32, /* 32-bit protected mode. */
+ X86EMUL_MODE_PROT64, /* 64-bit (long) mode. */
+};
+
+struct x86_emulate_ctxt {
+ const struct x86_emulate_ops *ops;
+
+ /* Register state before/after emulation. */
+ unsigned long eflags;
+ unsigned long eip; /* eip before instruction emulation */
+ /* Emulated execution mode, represented by an X86EMUL_MODE value. */
+ enum x86emul_mode mode;
+
+ /* interruptibility state, as a result of execution of STI or MOV SS */
+ int interruptibility;
+
+ bool guest_mode; /* guest running a nested guest */
+ bool perm_ok; /* do not check permissions if true */
+ bool ud; /* inject an #UD if host doesn't support insn */
+
+ bool have_exception;
+ struct x86_exception exception;
+
+ /*
+ * decode cache
+ */
+
+ /* current opcode length in bytes */
+ u8 opcode_len;
+ u8 b;
+ u8 intercept;
+ u8 op_bytes;
+ u8 ad_bytes;
+ struct operand src;
+ struct operand src2;
+ struct operand dst;
+ int (*execute)(struct x86_emulate_ctxt *ctxt);
+ int (*check_perm)(struct x86_emulate_ctxt *ctxt);
+ /*
+ * The following six fields are cleared together,
+ * the rest are initialized unconditionally in x86_decode_insn
+ * or elsewhere
+ */
+ bool rip_relative;
+ u8 rex_prefix;
+ u8 lock_prefix;
+ u8 rep_prefix;
+ /* bitmaps of registers in _regs[] that can be read */
+ u32 regs_valid;
+ /* bitmaps of registers in _regs[] that have been written */
+ u32 regs_dirty;
+ /* modrm */
+ u8 modrm;
+ u8 modrm_mod;
+ u8 modrm_reg;
+ u8 modrm_rm;
+ u8 modrm_seg;
+ u8 seg_override;
+ u64 d;
+ unsigned long _eip;
+ struct operand memop;
+ /* Fields above regs are cleared together. */
+ unsigned long _regs[NR_VCPU_REGS];
+ struct operand *memopp;
+ struct fetch_cache fetch;
+ struct read_cache io_read;
+ struct read_cache mem_read;
+};
+
+/* Repeat String Operation Prefix */
+#define REPE_PREFIX 0xf3
+#define REPNE_PREFIX 0xf2
+
+/* CPUID vendors */
+#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx 0x68747541
+#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx 0x444d4163
+#define X86EMUL_CPUID_VENDOR_AuthenticAMD_edx 0x69746e65
+
+#define X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx 0x69444d41
+#define X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx 0x21726574
+#define X86EMUL_CPUID_VENDOR_AMDisbetterI_edx 0x74656273
+
+#define X86EMUL_CPUID_VENDOR_GenuineIntel_ebx 0x756e6547
+#define X86EMUL_CPUID_VENDOR_GenuineIntel_ecx 0x6c65746e
+#define X86EMUL_CPUID_VENDOR_GenuineIntel_edx 0x49656e69
+
+enum x86_intercept_stage {
+ X86_ICTP_NONE = 0, /* Allow zero-init to not match anything */
+ X86_ICPT_PRE_EXCEPT,
+ X86_ICPT_POST_EXCEPT,
+ X86_ICPT_POST_MEMACCESS,
+};
+
+enum x86_intercept {
+ x86_intercept_none,
+ x86_intercept_cr_read,
+ x86_intercept_cr_write,
+ x86_intercept_clts,
+ x86_intercept_lmsw,
+ x86_intercept_smsw,
+ x86_intercept_dr_read,
+ x86_intercept_dr_write,
+ x86_intercept_lidt,
+ x86_intercept_sidt,
+ x86_intercept_lgdt,
+ x86_intercept_sgdt,
+ x86_intercept_lldt,
+ x86_intercept_sldt,
+ x86_intercept_ltr,
+ x86_intercept_str,
+ x86_intercept_rdtsc,
+ x86_intercept_rdpmc,
+ x86_intercept_pushf,
+ x86_intercept_popf,
+ x86_intercept_cpuid,
+ x86_intercept_rsm,
+ x86_intercept_iret,
+ x86_intercept_intn,
+ x86_intercept_invd,
+ x86_intercept_pause,
+ x86_intercept_hlt,
+ x86_intercept_invlpg,
+ x86_intercept_invlpga,
+ x86_intercept_vmrun,
+ x86_intercept_vmload,
+ x86_intercept_vmsave,
+ x86_intercept_vmmcall,
+ x86_intercept_stgi,
+ x86_intercept_clgi,
+ x86_intercept_skinit,
+ x86_intercept_rdtscp,
+ x86_intercept_icebp,
+ x86_intercept_wbinvd,
+ x86_intercept_monitor,
+ x86_intercept_mwait,
+ x86_intercept_rdmsr,
+ x86_intercept_wrmsr,
+ x86_intercept_in,
+ x86_intercept_ins,
+ x86_intercept_out,
+ x86_intercept_outs,
+
+ nr_x86_intercepts
+};
+
+/* Host execution mode. */
+#if defined(CONFIG_X86_32)
+#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32
+#elif defined(CONFIG_X86_64)
+#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64
+#endif
+
+int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len);
+bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt);
+#define EMULATION_FAILED -1
+#define EMULATION_OK 0
+#define EMULATION_RESTART 1
+#define EMULATION_INTERCEPTED 2
+void init_decode_cache(struct x86_emulate_ctxt *ctxt);
+int x86_emulate_insn(struct x86_emulate_ctxt *ctxt);
+int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
+ u16 tss_selector, int idt_index, int reason,
+ bool has_error_code, u32 error_code);
+int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq);
+void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt);
+void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt);
+
+#endif /* _ASM_X86_KVM_X86_EMULATE_H */
diff --git a/kernel/arch/x86/include/asm/kvm_guest.h b/kernel/arch/x86/include/asm/kvm_guest.h
new file mode 100644
index 000000000..a92b1763c
--- /dev/null
+++ b/kernel/arch/x86/include/asm/kvm_guest.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_X86_KVM_GUEST_H
+#define _ASM_X86_KVM_GUEST_H
+
+int kvm_setup_vsyscall_timeinfo(void);
+
+#endif /* _ASM_X86_KVM_GUEST_H */
diff --git a/kernel/arch/x86/include/asm/kvm_host.h b/kernel/arch/x86/include/asm/kvm_host.h
new file mode 100644
index 000000000..41b06fca3
--- /dev/null
+++ b/kernel/arch/x86/include/asm/kvm_host.h
@@ -0,0 +1,1185 @@
+/*
+ * Kernel-based Virtual Machine driver for Linux
+ *
+ * This header defines architecture specific interfaces, x86 version
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef _ASM_X86_KVM_HOST_H
+#define _ASM_X86_KVM_HOST_H
+
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/mmu_notifier.h>
+#include <linux/tracepoint.h>
+#include <linux/cpumask.h>
+#include <linux/irq_work.h>
+
+#include <linux/kvm.h>
+#include <linux/kvm_para.h>
+#include <linux/kvm_types.h>
+#include <linux/perf_event.h>
+#include <linux/pvclock_gtod.h>
+#include <linux/clocksource.h>
+
+#include <asm/pvclock-abi.h>
+#include <asm/desc.h>
+#include <asm/mtrr.h>
+#include <asm/msr-index.h>
+#include <asm/asm.h>
+
+#define KVM_MAX_VCPUS 255
+#define KVM_SOFT_MAX_VCPUS 160
+#define KVM_USER_MEM_SLOTS 509
+/* memory slots that are not exposed to userspace */
+#define KVM_PRIVATE_MEM_SLOTS 3
+#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
+
+#define KVM_PIO_PAGE_OFFSET 1
+#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
+
+#define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS
+
+#define CR0_RESERVED_BITS \
+ (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
+ | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
+ | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
+
+#define CR3_L_MODE_RESERVED_BITS 0xFFFFFF0000000000ULL
+#define CR3_PCID_INVD BIT_64(63)
+#define CR4_RESERVED_BITS \
+ (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
+ | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
+ | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \
+ | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \
+ | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE | X86_CR4_SMAP))
+
+#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
+
+
+
+#define INVALID_PAGE (~(hpa_t)0)
+#define VALID_PAGE(x) ((x) != INVALID_PAGE)
+
+#define UNMAPPED_GVA (~(gpa_t)0)
+
+/* KVM Hugepage definitions for x86 */
+#define KVM_NR_PAGE_SIZES 3
+#define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 9)
+#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
+#define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x))
+#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
+#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
+
+static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
+{
+ /* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */
+ return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
+ (base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
+}
+
+#define KVM_PERMILLE_MMU_PAGES 20
+#define KVM_MIN_ALLOC_MMU_PAGES 64
+#define KVM_MMU_HASH_SHIFT 10
+#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
+#define KVM_MIN_FREE_MMU_PAGES 5
+#define KVM_REFILL_PAGES 25
+#define KVM_MAX_CPUID_ENTRIES 80
+#define KVM_NR_FIXED_MTRR_REGION 88
+#define KVM_NR_VAR_MTRR 8
+
+#define ASYNC_PF_PER_VCPU 64
+
+enum kvm_reg {
+ VCPU_REGS_RAX = 0,
+ VCPU_REGS_RCX = 1,
+ VCPU_REGS_RDX = 2,
+ VCPU_REGS_RBX = 3,
+ VCPU_REGS_RSP = 4,
+ VCPU_REGS_RBP = 5,
+ VCPU_REGS_RSI = 6,
+ VCPU_REGS_RDI = 7,
+#ifdef CONFIG_X86_64
+ VCPU_REGS_R8 = 8,
+ VCPU_REGS_R9 = 9,
+ VCPU_REGS_R10 = 10,
+ VCPU_REGS_R11 = 11,
+ VCPU_REGS_R12 = 12,
+ VCPU_REGS_R13 = 13,
+ VCPU_REGS_R14 = 14,
+ VCPU_REGS_R15 = 15,
+#endif
+ VCPU_REGS_RIP,
+ NR_VCPU_REGS
+};
+
+enum kvm_reg_ex {
+ VCPU_EXREG_PDPTR = NR_VCPU_REGS,
+ VCPU_EXREG_CR3,
+ VCPU_EXREG_RFLAGS,
+ VCPU_EXREG_SEGMENTS,
+};
+
+enum {
+ VCPU_SREG_ES,
+ VCPU_SREG_CS,
+ VCPU_SREG_SS,
+ VCPU_SREG_DS,
+ VCPU_SREG_FS,
+ VCPU_SREG_GS,
+ VCPU_SREG_TR,
+ VCPU_SREG_LDTR,
+};
+
+#include <asm/kvm_emulate.h>
+
+#define KVM_NR_MEM_OBJS 40
+
+#define KVM_NR_DB_REGS 4
+
+#define DR6_BD (1 << 13)
+#define DR6_BS (1 << 14)
+#define DR6_RTM (1 << 16)
+#define DR6_FIXED_1 0xfffe0ff0
+#define DR6_INIT 0xffff0ff0
+#define DR6_VOLATILE 0x0001e00f
+
+#define DR7_BP_EN_MASK 0x000000ff
+#define DR7_GE (1 << 9)
+#define DR7_GD (1 << 13)
+#define DR7_FIXED_1 0x00000400
+#define DR7_VOLATILE 0xffff2bff
+
+#define PFERR_PRESENT_BIT 0
+#define PFERR_WRITE_BIT 1
+#define PFERR_USER_BIT 2
+#define PFERR_RSVD_BIT 3
+#define PFERR_FETCH_BIT 4
+
+#define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT)
+#define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT)
+#define PFERR_USER_MASK (1U << PFERR_USER_BIT)
+#define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT)
+#define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT)
+
+/* apic attention bits */
+#define KVM_APIC_CHECK_VAPIC 0
+/*
+ * The following bit is set with PV-EOI, unset on EOI.
+ * We detect PV-EOI changes by guest by comparing
+ * this bit with PV-EOI in guest memory.
+ * See the implementation in apic_update_pv_eoi.
+ */
+#define KVM_APIC_PV_EOI_PENDING 1
+
+/*
+ * We don't want allocation failures within the mmu code, so we preallocate
+ * enough memory for a single page fault in a cache.
+ */
+struct kvm_mmu_memory_cache {
+ int nobjs;
+ void *objects[KVM_NR_MEM_OBJS];
+};
+
+/*
+ * kvm_mmu_page_role, below, is defined as:
+ *
+ * bits 0:3 - total guest paging levels (2-4, or zero for real mode)
+ * bits 4:7 - page table level for this shadow (1-4)
+ * bits 8:9 - page table quadrant for 2-level guests
+ * bit 16 - direct mapping of virtual to physical mapping at gfn
+ * used for real mode and two-dimensional paging
+ * bits 17:19 - common access permissions for all ptes in this shadow page
+ */
+union kvm_mmu_page_role {
+ unsigned word;
+ struct {
+ unsigned level:4;
+ unsigned cr4_pae:1;
+ unsigned quadrant:2;
+ unsigned pad_for_nice_hex_output:6;
+ unsigned direct:1;
+ unsigned access:3;
+ unsigned invalid:1;
+ unsigned nxe:1;
+ unsigned cr0_wp:1;
+ unsigned smep_andnot_wp:1;
+ unsigned smap_andnot_wp:1;
+ };
+};
+
+struct kvm_mmu_page {
+ struct list_head link;
+ struct hlist_node hash_link;
+
+ /*
+ * The following two entries are used to key the shadow page in the
+ * hash table.
+ */
+ gfn_t gfn;
+ union kvm_mmu_page_role role;
+
+ u64 *spt;
+ /* hold the gfn of each spte inside spt */
+ gfn_t *gfns;
+ bool unsync;
+ int root_count; /* Currently serving as active root */
+ unsigned int unsync_children;
+ unsigned long parent_ptes; /* Reverse mapping for parent_pte */
+
+ /* The page is obsolete if mmu_valid_gen != kvm->arch.mmu_valid_gen. */
+ unsigned long mmu_valid_gen;
+
+ DECLARE_BITMAP(unsync_child_bitmap, 512);
+
+#ifdef CONFIG_X86_32
+ /*
+ * Used out of the mmu-lock to avoid reading spte values while an
+ * update is in progress; see the comments in __get_spte_lockless().
+ */
+ int clear_spte_count;
+#endif
+
+ /* Number of writes since the last time traversal visited this page. */
+ int write_flooding_count;
+};
+
+struct kvm_pio_request {
+ unsigned long count;
+ int in;
+ int port;
+ int size;
+};
+
+/*
+ * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
+ * 32-bit). The kvm_mmu structure abstracts the details of the current mmu
+ * mode.
+ */
+struct kvm_mmu {
+ void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
+ unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
+ u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
+ int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err,
+ bool prefault);
+ void (*inject_page_fault)(struct kvm_vcpu *vcpu,
+ struct x86_exception *fault);
+ gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
+ struct x86_exception *exception);
+ gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
+ struct x86_exception *exception);
+ int (*sync_page)(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_page *sp);
+ void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva);
+ void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
+ u64 *spte, const void *pte);
+ hpa_t root_hpa;
+ int root_level;
+ int shadow_root_level;
+ union kvm_mmu_page_role base_role;
+ bool direct_map;
+
+ /*
+ * Bitmap; bit set = permission fault
+ * Byte index: page fault error code [4:1]
+ * Bit index: pte permissions in ACC_* format
+ */
+ u8 permissions[16];
+
+ u64 *pae_root;
+ u64 *lm_root;
+ u64 rsvd_bits_mask[2][4];
+ u64 bad_mt_xwr;
+
+ /*
+ * Bitmap: bit set = last pte in walk
+ * index[0:1]: level (zero-based)
+ * index[2]: pte.ps
+ */
+ u8 last_pte_bitmap;
+
+ bool nx;
+
+ u64 pdptrs[4]; /* pae */
+};
+
+enum pmc_type {
+ KVM_PMC_GP = 0,
+ KVM_PMC_FIXED,
+};
+
+struct kvm_pmc {
+ enum pmc_type type;
+ u8 idx;
+ u64 counter;
+ u64 eventsel;
+ struct perf_event *perf_event;
+ struct kvm_vcpu *vcpu;
+};
+
+struct kvm_pmu {
+ unsigned nr_arch_gp_counters;
+ unsigned nr_arch_fixed_counters;
+ unsigned available_event_types;
+ u64 fixed_ctr_ctrl;
+ u64 global_ctrl;
+ u64 global_status;
+ u64 global_ovf_ctrl;
+ u64 counter_bitmask[2];
+ u64 global_ctrl_mask;
+ u64 reserved_bits;
+ u8 version;
+ struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
+ struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
+ struct irq_work irq_work;
+ u64 reprogram_pmi;
+};
+
+enum {
+ KVM_DEBUGREG_BP_ENABLED = 1,
+ KVM_DEBUGREG_WONT_EXIT = 2,
+ KVM_DEBUGREG_RELOAD = 4,
+};
+
+struct kvm_vcpu_arch {
+ /*
+ * rip and regs accesses must go through
+ * kvm_{register,rip}_{read,write} functions.
+ */
+ unsigned long regs[NR_VCPU_REGS];
+ u32 regs_avail;
+ u32 regs_dirty;
+
+ unsigned long cr0;
+ unsigned long cr0_guest_owned_bits;
+ unsigned long cr2;
+ unsigned long cr3;
+ unsigned long cr4;
+ unsigned long cr4_guest_owned_bits;
+ unsigned long cr8;
+ u32 hflags;
+ u64 efer;
+ u64 apic_base;
+ struct kvm_lapic *apic; /* kernel irqchip context */
+ unsigned long apic_attention;
+ int32_t apic_arb_prio;
+ int mp_state;
+ u64 ia32_misc_enable_msr;
+ bool tpr_access_reporting;
+ u64 ia32_xss;
+
+ /*
+ * Paging state of the vcpu
+ *
+ * If the vcpu runs in guest mode with two level paging this still saves
+ * the paging mode of the l1 guest. This context is always used to
+ * handle faults.
+ */
+ struct kvm_mmu mmu;
+
+ /*
+ * Paging state of an L2 guest (used for nested npt)
+ *
+ * This context will save all necessary information to walk page tables
+ * of the an L2 guest. This context is only initialized for page table
+ * walking and not for faulting since we never handle l2 page faults on
+ * the host.
+ */
+ struct kvm_mmu nested_mmu;
+
+ /*
+ * Pointer to the mmu context currently used for
+ * gva_to_gpa translations.
+ */
+ struct kvm_mmu *walk_mmu;
+
+ struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
+ struct kvm_mmu_memory_cache mmu_page_cache;
+ struct kvm_mmu_memory_cache mmu_page_header_cache;
+
+ struct fpu guest_fpu;
+ bool eager_fpu;
+ u64 xcr0;
+ u64 guest_supported_xcr0;
+ u32 guest_xstate_size;
+
+ struct kvm_pio_request pio;
+ void *pio_data;
+
+ u8 event_exit_inst_len;
+
+ struct kvm_queued_exception {
+ bool pending;
+ bool has_error_code;
+ bool reinject;
+ u8 nr;
+ u32 error_code;
+ } exception;
+
+ struct kvm_queued_interrupt {
+ bool pending;
+ bool soft;
+ u8 nr;
+ } interrupt;
+
+ int halt_request; /* real mode on Intel only */
+
+ int cpuid_nent;
+ struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
+
+ int maxphyaddr;
+
+ /* emulate context */
+
+ struct x86_emulate_ctxt emulate_ctxt;
+ bool emulate_regs_need_sync_to_vcpu;
+ bool emulate_regs_need_sync_from_vcpu;
+ int (*complete_userspace_io)(struct kvm_vcpu *vcpu);
+
+ gpa_t time;
+ struct pvclock_vcpu_time_info hv_clock;
+ unsigned int hw_tsc_khz;
+ struct gfn_to_hva_cache pv_time;
+ bool pv_time_enabled;
+ /* set guest stopped flag in pvclock flags field */
+ bool pvclock_set_guest_stopped_request;
+
+ struct {
+ u64 msr_val;
+ u64 last_steal;
+ u64 accum_steal;
+ struct gfn_to_hva_cache stime;
+ struct kvm_steal_time steal;
+ } st;
+
+ u64 last_guest_tsc;
+ u64 last_host_tsc;
+ u64 tsc_offset_adjustment;
+ u64 this_tsc_nsec;
+ u64 this_tsc_write;
+ u64 this_tsc_generation;
+ bool tsc_catchup;
+ bool tsc_always_catchup;
+ s8 virtual_tsc_shift;
+ u32 virtual_tsc_mult;
+ u32 virtual_tsc_khz;
+ s64 ia32_tsc_adjust_msr;
+
+ atomic_t nmi_queued; /* unprocessed asynchronous NMIs */
+ unsigned nmi_pending; /* NMI queued after currently running handler */
+ bool nmi_injected; /* Trying to inject an NMI this entry */
+
+ struct mtrr_state_type mtrr_state;
+ u64 pat;
+
+ unsigned switch_db_regs;
+ unsigned long db[KVM_NR_DB_REGS];
+ unsigned long dr6;
+ unsigned long dr7;
+ unsigned long eff_db[KVM_NR_DB_REGS];
+ unsigned long guest_debug_dr7;
+
+ u64 mcg_cap;
+ u64 mcg_status;
+ u64 mcg_ctl;
+ u64 *mce_banks;
+
+ /* Cache MMIO info */
+ u64 mmio_gva;
+ unsigned access;
+ gfn_t mmio_gfn;
+ u64 mmio_gen;
+
+ struct kvm_pmu pmu;
+
+ /* used for guest single stepping over the given code position */
+ unsigned long singlestep_rip;
+
+ /* fields used by HYPER-V emulation */
+ u64 hv_vapic;
+
+ cpumask_var_t wbinvd_dirty_mask;
+
+ unsigned long last_retry_eip;
+ unsigned long last_retry_addr;
+
+ struct {
+ bool halted;
+ gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
+ struct gfn_to_hva_cache data;
+ u64 msr_val;
+ u32 id;
+ bool send_user_only;
+ } apf;
+
+ /* OSVW MSRs (AMD only) */
+ struct {
+ u64 length;
+ u64 status;
+ } osvw;
+
+ struct {
+ u64 msr_val;
+ struct gfn_to_hva_cache data;
+ } pv_eoi;
+
+ /*
+ * Indicate whether the access faults on its page table in guest
+ * which is set when fix page fault and used to detect unhandeable
+ * instruction.
+ */
+ bool write_fault_to_shadow_pgtable;
+
+ /* set at EPT violation at this point */
+ unsigned long exit_qualification;
+
+ /* pv related host specific info */
+ struct {
+ bool pv_unhalted;
+ } pv;
+};
+
+struct kvm_lpage_info {
+ int write_count;
+};
+
+struct kvm_arch_memory_slot {
+ unsigned long *rmap[KVM_NR_PAGE_SIZES];
+ struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
+};
+
+/*
+ * We use as the mode the number of bits allocated in the LDR for the
+ * logical processor ID. It happens that these are all powers of two.
+ * This makes it is very easy to detect cases where the APICs are
+ * configured for multiple modes; in that case, we cannot use the map and
+ * hence cannot use kvm_irq_delivery_to_apic_fast either.
+ */
+#define KVM_APIC_MODE_XAPIC_CLUSTER 4
+#define KVM_APIC_MODE_XAPIC_FLAT 8
+#define KVM_APIC_MODE_X2APIC 16
+
+struct kvm_apic_map {
+ struct rcu_head rcu;
+ u8 mode;
+ struct kvm_lapic *phys_map[256];
+ /* first index is cluster id second is cpu id in a cluster */
+ struct kvm_lapic *logical_map[16][16];
+};
+
+struct kvm_arch {
+ unsigned int n_used_mmu_pages;
+ unsigned int n_requested_mmu_pages;
+ unsigned int n_max_mmu_pages;
+ unsigned int indirect_shadow_pages;
+ unsigned long mmu_valid_gen;
+ struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
+ /*
+ * Hash table of struct kvm_mmu_page.
+ */
+ struct list_head active_mmu_pages;
+ struct list_head zapped_obsolete_pages;
+
+ struct list_head assigned_dev_head;
+ struct iommu_domain *iommu_domain;
+ bool iommu_noncoherent;
+#define __KVM_HAVE_ARCH_NONCOHERENT_DMA
+ atomic_t noncoherent_dma_count;
+ struct kvm_pic *vpic;
+ struct kvm_ioapic *vioapic;
+ struct kvm_pit *vpit;
+ atomic_t vapics_in_nmi_mode;
+ struct mutex apic_map_lock;
+ struct kvm_apic_map *apic_map;
+
+ unsigned int tss_addr;
+ bool apic_access_page_done;
+
+ gpa_t wall_clock;
+
+ bool ept_identity_pagetable_done;
+ gpa_t ept_identity_map_addr;
+
+ unsigned long irq_sources_bitmap;
+ s64 kvmclock_offset;
+ raw_spinlock_t tsc_write_lock;
+ u64 last_tsc_nsec;
+ u64 last_tsc_write;
+ u32 last_tsc_khz;
+ u64 cur_tsc_nsec;
+ u64 cur_tsc_write;
+ u64 cur_tsc_offset;
+ u64 cur_tsc_generation;
+ int nr_vcpus_matched_tsc;
+
+ spinlock_t pvclock_gtod_sync_lock;
+ bool use_master_clock;
+ u64 master_kernel_ns;
+ cycle_t master_cycle_now;
+ struct delayed_work kvmclock_update_work;
+ struct delayed_work kvmclock_sync_work;
+
+ struct kvm_xen_hvm_config xen_hvm_config;
+
+ /* reads protected by irq_srcu, writes by irq_lock */
+ struct hlist_head mask_notifier_list;
+
+ /* fields used by HYPER-V emulation */
+ u64 hv_guest_os_id;
+ u64 hv_hypercall;
+ u64 hv_tsc_page;
+
+ #ifdef CONFIG_KVM_MMU_AUDIT
+ int audit_point;
+ #endif
+
+ bool boot_vcpu_runs_old_kvmclock;
+};
+
+struct kvm_vm_stat {
+ u32 mmu_shadow_zapped;
+ u32 mmu_pte_write;
+ u32 mmu_pte_updated;
+ u32 mmu_pde_zapped;
+ u32 mmu_flooded;
+ u32 mmu_recycled;
+ u32 mmu_cache_miss;
+ u32 mmu_unsync;
+ u32 remote_tlb_flush;
+ u32 lpages;
+};
+
+struct kvm_vcpu_stat {
+ u32 pf_fixed;
+ u32 pf_guest;
+ u32 tlb_flush;
+ u32 invlpg;
+
+ u32 exits;
+ u32 io_exits;
+ u32 mmio_exits;
+ u32 signal_exits;
+ u32 irq_window_exits;
+ u32 nmi_window_exits;
+ u32 halt_exits;
+ u32 halt_successful_poll;
+ u32 halt_wakeup;
+ u32 request_irq_exits;
+ u32 irq_exits;
+ u32 host_state_reload;
+ u32 efer_reload;
+ u32 fpu_reload;
+ u32 insn_emulation;
+ u32 insn_emulation_fail;
+ u32 hypercalls;
+ u32 irq_injections;
+ u32 nmi_injections;
+};
+
+struct x86_instruction_info;
+
+struct msr_data {
+ bool host_initiated;
+ u32 index;
+ u64 data;
+};
+
+struct kvm_lapic_irq {
+ u32 vector;
+ u32 delivery_mode;
+ u32 dest_mode;
+ u32 level;
+ u32 trig_mode;
+ u32 shorthand;
+ u32 dest_id;
+};
+
+struct kvm_x86_ops {
+ int (*cpu_has_kvm_support)(void); /* __init */
+ int (*disabled_by_bios)(void); /* __init */
+ int (*hardware_enable)(void);
+ void (*hardware_disable)(void);
+ void (*check_processor_compatibility)(void *rtn);
+ int (*hardware_setup)(void); /* __init */
+ void (*hardware_unsetup)(void); /* __exit */
+ bool (*cpu_has_accelerated_tpr)(void);
+ void (*cpuid_update)(struct kvm_vcpu *vcpu);
+
+ /* Create, but do not attach this VCPU */
+ struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
+ void (*vcpu_free)(struct kvm_vcpu *vcpu);
+ void (*vcpu_reset)(struct kvm_vcpu *vcpu);
+
+ void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
+ void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
+ void (*vcpu_put)(struct kvm_vcpu *vcpu);
+
+ void (*update_db_bp_intercept)(struct kvm_vcpu *vcpu);
+ int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
+ int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
+ u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
+ void (*get_segment)(struct kvm_vcpu *vcpu,
+ struct kvm_segment *var, int seg);
+ int (*get_cpl)(struct kvm_vcpu *vcpu);
+ void (*set_segment)(struct kvm_vcpu *vcpu,
+ struct kvm_segment *var, int seg);
+ void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
+ void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu);
+ void (*decache_cr3)(struct kvm_vcpu *vcpu);
+ void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
+ void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
+ void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
+ int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
+ void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
+ void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
+ void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
+ void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
+ void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
+ u64 (*get_dr6)(struct kvm_vcpu *vcpu);
+ void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value);
+ void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
+ void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
+ void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
+ unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
+ void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
+ void (*fpu_activate)(struct kvm_vcpu *vcpu);
+ void (*fpu_deactivate)(struct kvm_vcpu *vcpu);
+
+ void (*tlb_flush)(struct kvm_vcpu *vcpu);
+
+ void (*run)(struct kvm_vcpu *vcpu);
+ int (*handle_exit)(struct kvm_vcpu *vcpu);
+ void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
+ void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
+ u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu);
+ void (*patch_hypercall)(struct kvm_vcpu *vcpu,
+ unsigned char *hypercall_addr);
+ void (*set_irq)(struct kvm_vcpu *vcpu);
+ void (*set_nmi)(struct kvm_vcpu *vcpu);
+ void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
+ bool has_error_code, u32 error_code,
+ bool reinject);
+ void (*cancel_injection)(struct kvm_vcpu *vcpu);
+ int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
+ int (*nmi_allowed)(struct kvm_vcpu *vcpu);
+ bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
+ void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked);
+ void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
+ void (*enable_irq_window)(struct kvm_vcpu *vcpu);
+ void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
+ int (*vm_has_apicv)(struct kvm *kvm);
+ void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
+ void (*hwapic_isr_update)(struct kvm *kvm, int isr);
+ void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
+ void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set);
+ void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
+ void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
+ void (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
+ int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
+ int (*get_tdp_level)(void);
+ u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
+ int (*get_lpage_level)(void);
+ bool (*rdtscp_supported)(void);
+ bool (*invpcid_supported)(void);
+ void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment, bool host);
+
+ void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
+
+ void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry);
+
+ bool (*has_wbinvd_exit)(void);
+
+ void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale);
+ u64 (*read_tsc_offset)(struct kvm_vcpu *vcpu);
+ void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
+
+ u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc);
+ u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu, u64 host_tsc);
+
+ void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
+
+ int (*check_intercept)(struct kvm_vcpu *vcpu,
+ struct x86_instruction_info *info,
+ enum x86_intercept_stage stage);
+ void (*handle_external_intr)(struct kvm_vcpu *vcpu);
+ bool (*mpx_supported)(void);
+ bool (*xsaves_supported)(void);
+
+ int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
+
+ void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
+
+ /*
+ * Arch-specific dirty logging hooks. These hooks are only supposed to
+ * be valid if the specific arch has hardware-accelerated dirty logging
+ * mechanism. Currently only for PML on VMX.
+ *
+ * - slot_enable_log_dirty:
+ * called when enabling log dirty mode for the slot.
+ * - slot_disable_log_dirty:
+ * called when disabling log dirty mode for the slot.
+ * also called when slot is created with log dirty disabled.
+ * - flush_log_dirty:
+ * called before reporting dirty_bitmap to userspace.
+ * - enable_log_dirty_pt_masked:
+ * called when reenabling log dirty for the GFNs in the mask after
+ * corresponding bits are cleared in slot->dirty_bitmap.
+ */
+ void (*slot_enable_log_dirty)(struct kvm *kvm,
+ struct kvm_memory_slot *slot);
+ void (*slot_disable_log_dirty)(struct kvm *kvm,
+ struct kvm_memory_slot *slot);
+ void (*flush_log_dirty)(struct kvm *kvm);
+ void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ gfn_t offset, unsigned long mask);
+};
+
+struct kvm_arch_async_pf {
+ u32 token;
+ gfn_t gfn;
+ unsigned long cr3;
+ bool direct_map;
+};
+
+extern struct kvm_x86_ops *kvm_x86_ops;
+
+static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
+ s64 adjustment)
+{
+ kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, false);
+}
+
+static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
+{
+ kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, true);
+}
+
+int kvm_mmu_module_init(void);
+void kvm_mmu_module_exit(void);
+
+void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
+int kvm_mmu_create(struct kvm_vcpu *vcpu);
+void kvm_mmu_setup(struct kvm_vcpu *vcpu);
+void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
+ u64 dirty_mask, u64 nx_mask, u64 x_mask);
+
+void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
+void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
+ struct kvm_memory_slot *memslot);
+void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
+ struct kvm_memory_slot *memslot);
+void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
+ struct kvm_memory_slot *memslot);
+void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
+ struct kvm_memory_slot *memslot);
+void kvm_mmu_slot_set_dirty(struct kvm *kvm,
+ struct kvm_memory_slot *memslot);
+void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ gfn_t gfn_offset, unsigned long mask);
+void kvm_mmu_zap_all(struct kvm *kvm);
+void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm);
+unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
+void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
+
+int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
+
+int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
+ const void *val, int bytes);
+u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
+
+struct kvm_irq_mask_notifier {
+ void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
+ int irq;
+ struct hlist_node link;
+};
+
+void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
+ struct kvm_irq_mask_notifier *kimn);
+void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
+ struct kvm_irq_mask_notifier *kimn);
+void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
+ bool mask);
+
+extern bool tdp_enabled;
+
+u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);
+
+/* control of guest tsc rate supported? */
+extern bool kvm_has_tsc_control;
+/* minimum supported tsc_khz for guests */
+extern u32 kvm_min_guest_tsc_khz;
+/* maximum supported tsc_khz for guests */
+extern u32 kvm_max_guest_tsc_khz;
+
+enum emulation_result {
+ EMULATE_DONE, /* no further processing */
+ EMULATE_USER_EXIT, /* kvm_run ready for userspace exit */
+ EMULATE_FAIL, /* can't emulate this instruction */
+};
+
+#define EMULTYPE_NO_DECODE (1 << 0)
+#define EMULTYPE_TRAP_UD (1 << 1)
+#define EMULTYPE_SKIP (1 << 2)
+#define EMULTYPE_RETRY (1 << 3)
+#define EMULTYPE_NO_REEXECUTE (1 << 4)
+int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
+ int emulation_type, void *insn, int insn_len);
+
+static inline int emulate_instruction(struct kvm_vcpu *vcpu,
+ int emulation_type)
+{
+ return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
+}
+
+void kvm_enable_efer_bits(u64);
+bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
+int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
+int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
+
+struct x86_emulate_ctxt;
+
+int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port);
+void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
+int kvm_emulate_halt(struct kvm_vcpu *vcpu);
+int kvm_vcpu_halt(struct kvm_vcpu *vcpu);
+int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
+
+void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
+int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
+void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
+
+int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
+ int reason, bool has_error_code, u32 error_code);
+
+int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
+int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
+int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
+int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
+int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
+int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
+unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
+void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
+void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
+int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
+
+int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
+int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
+
+unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
+void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
+bool kvm_rdpmc(struct kvm_vcpu *vcpu);
+
+void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
+void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
+void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
+void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
+void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
+int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
+ gfn_t gfn, void *data, int offset, int len,
+ u32 access);
+bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
+bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr);
+
+static inline int __kvm_irq_line_state(unsigned long *irq_state,
+ int irq_source_id, int level)
+{
+ /* Logical OR for level trig interrupt */
+ if (level)
+ __set_bit(irq_source_id, irq_state);
+ else
+ __clear_bit(irq_source_id, irq_state);
+
+ return !!(*irq_state);
+}
+
+int kvm_pic_set_irq(struct kvm_pic *pic, int irq, int irq_source_id, int level);
+void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id);
+
+void kvm_inject_nmi(struct kvm_vcpu *vcpu);
+
+int fx_init(struct kvm_vcpu *vcpu);
+
+void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
+ const u8 *new, int bytes);
+int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
+int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
+void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
+int kvm_mmu_load(struct kvm_vcpu *vcpu);
+void kvm_mmu_unload(struct kvm_vcpu *vcpu);
+void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
+gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
+ struct x86_exception *exception);
+gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
+ struct x86_exception *exception);
+gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
+ struct x86_exception *exception);
+gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
+ struct x86_exception *exception);
+gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
+ struct x86_exception *exception);
+
+int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
+
+int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
+ void *insn, int insn_len);
+void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
+void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu);
+
+void kvm_enable_tdp(void);
+void kvm_disable_tdp(void);
+
+static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
+ struct x86_exception *exception)
+{
+ return gpa;
+}
+
+static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
+{
+ struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
+
+ return (struct kvm_mmu_page *)page_private(page);
+}
+
+static inline u16 kvm_read_ldt(void)
+{
+ u16 ldt;
+ asm("sldt %0" : "=g"(ldt));
+ return ldt;
+}
+
+static inline void kvm_load_ldt(u16 sel)
+{
+ asm("lldt %0" : : "rm"(sel));
+}
+
+#ifdef CONFIG_X86_64
+static inline unsigned long read_msr(unsigned long msr)
+{
+ u64 value;
+
+ rdmsrl(msr, value);
+ return value;
+}
+#endif
+
+static inline u32 get_rdx_init_val(void)
+{
+ return 0x600; /* P6 family */
+}
+
+static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
+{
+ kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
+}
+
+static inline u64 get_canonical(u64 la)
+{
+ return ((int64_t)la << 16) >> 16;
+}
+
+static inline bool is_noncanonical_address(u64 la)
+{
+#ifdef CONFIG_X86_64
+ return get_canonical(la) != la;
+#else
+ return false;
+#endif
+}
+
+#define TSS_IOPB_BASE_OFFSET 0x66
+#define TSS_BASE_SIZE 0x68
+#define TSS_IOPB_SIZE (65536 / 8)
+#define TSS_REDIRECTION_SIZE (256 / 8)
+#define RMODE_TSS_SIZE \
+ (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
+
+enum {
+ TASK_SWITCH_CALL = 0,
+ TASK_SWITCH_IRET = 1,
+ TASK_SWITCH_JMP = 2,
+ TASK_SWITCH_GATE = 3,
+};
+
+#define HF_GIF_MASK (1 << 0)
+#define HF_HIF_MASK (1 << 1)
+#define HF_VINTR_MASK (1 << 2)
+#define HF_NMI_MASK (1 << 3)
+#define HF_IRET_MASK (1 << 4)
+#define HF_GUEST_MASK (1 << 5) /* VCPU is in guest-mode */
+
+/*
+ * Hardware virtualization extension instructions may fault if a
+ * reboot turns off virtualization while processes are running.
+ * Trap the fault and ignore the instruction if that happens.
+ */
+asmlinkage void kvm_spurious_fault(void);
+
+#define ____kvm_handle_fault_on_reboot(insn, cleanup_insn) \
+ "666: " insn "\n\t" \
+ "668: \n\t" \
+ ".pushsection .fixup, \"ax\" \n" \
+ "667: \n\t" \
+ cleanup_insn "\n\t" \
+ "cmpb $0, kvm_rebooting \n\t" \
+ "jne 668b \n\t" \
+ __ASM_SIZE(push) " $666b \n\t" \
+ "call kvm_spurious_fault \n\t" \
+ ".popsection \n\t" \
+ _ASM_EXTABLE(666b, 667b)
+
+#define __kvm_handle_fault_on_reboot(insn) \
+ ____kvm_handle_fault_on_reboot(insn, "")
+
+#define KVM_ARCH_WANT_MMU_NOTIFIER
+int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
+int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
+int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
+void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
+int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
+int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
+int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
+void kvm_vcpu_reset(struct kvm_vcpu *vcpu);
+void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu);
+void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
+ unsigned long address);
+
+void kvm_define_shared_msr(unsigned index, u32 msr);
+int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
+
+unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
+bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
+
+void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
+ struct kvm_async_pf *work);
+void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
+ struct kvm_async_pf *work);
+void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
+ struct kvm_async_pf *work);
+bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
+extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
+
+void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
+
+int kvm_is_in_guest(void);
+
+void kvm_pmu_init(struct kvm_vcpu *vcpu);
+void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
+void kvm_pmu_reset(struct kvm_vcpu *vcpu);
+void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu);
+bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr);
+int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
+int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
+int kvm_pmu_check_pmc(struct kvm_vcpu *vcpu, unsigned pmc);
+int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
+void kvm_handle_pmu_event(struct kvm_vcpu *vcpu);
+void kvm_deliver_pmi(struct kvm_vcpu *vcpu);
+
+#endif /* _ASM_X86_KVM_HOST_H */
diff --git a/kernel/arch/x86/include/asm/kvm_para.h b/kernel/arch/x86/include/asm/kvm_para.h
new file mode 100644
index 000000000..c1adf33fd
--- /dev/null
+++ b/kernel/arch/x86/include/asm/kvm_para.h
@@ -0,0 +1,137 @@
+#ifndef _ASM_X86_KVM_PARA_H
+#define _ASM_X86_KVM_PARA_H
+
+#include <asm/processor.h>
+#include <asm/alternative.h>
+#include <uapi/asm/kvm_para.h>
+
+extern void kvmclock_init(void);
+extern int kvm_register_clock(char *txt);
+
+#ifdef CONFIG_KVM_GUEST
+bool kvm_check_and_clear_guest_paused(void);
+#else
+static inline bool kvm_check_and_clear_guest_paused(void)
+{
+ return false;
+}
+#endif /* CONFIG_KVM_GUEST */
+
+#ifdef CONFIG_DEBUG_RODATA
+#define KVM_HYPERCALL \
+ ALTERNATIVE(".byte 0x0f,0x01,0xc1", ".byte 0x0f,0x01,0xd9", X86_FEATURE_VMMCALL)
+#else
+/* On AMD processors, vmcall will generate a trap that we will
+ * then rewrite to the appropriate instruction.
+ */
+#define KVM_HYPERCALL ".byte 0x0f,0x01,0xc1"
+#endif
+
+/* For KVM hypercalls, a three-byte sequence of either the vmcall or the vmmcall
+ * instruction. The hypervisor may replace it with something else but only the
+ * instructions are guaranteed to be supported.
+ *
+ * Up to four arguments may be passed in rbx, rcx, rdx, and rsi respectively.
+ * The hypercall number should be placed in rax and the return value will be
+ * placed in rax. No other registers will be clobbered unless explicitly
+ * noted by the particular hypercall.
+ */
+
+static inline long kvm_hypercall0(unsigned int nr)
+{
+ long ret;
+ asm volatile(KVM_HYPERCALL
+ : "=a"(ret)
+ : "a"(nr)
+ : "memory");
+ return ret;
+}
+
+static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
+{
+ long ret;
+ asm volatile(KVM_HYPERCALL
+ : "=a"(ret)
+ : "a"(nr), "b"(p1)
+ : "memory");
+ return ret;
+}
+
+static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
+ unsigned long p2)
+{
+ long ret;
+ asm volatile(KVM_HYPERCALL
+ : "=a"(ret)
+ : "a"(nr), "b"(p1), "c"(p2)
+ : "memory");
+ return ret;
+}
+
+static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
+ unsigned long p2, unsigned long p3)
+{
+ long ret;
+ asm volatile(KVM_HYPERCALL
+ : "=a"(ret)
+ : "a"(nr), "b"(p1), "c"(p2), "d"(p3)
+ : "memory");
+ return ret;
+}
+
+static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
+ unsigned long p2, unsigned long p3,
+ unsigned long p4)
+{
+ long ret;
+ asm volatile(KVM_HYPERCALL
+ : "=a"(ret)
+ : "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4)
+ : "memory");
+ return ret;
+}
+
+#ifdef CONFIG_KVM_GUEST
+bool kvm_para_available(void);
+unsigned int kvm_arch_para_features(void);
+void __init kvm_guest_init(void);
+void kvm_async_pf_task_wait(u32 token);
+void kvm_async_pf_task_wake(u32 token);
+u32 kvm_read_and_reset_pf_reason(void);
+extern void kvm_disable_steal_time(void);
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+void __init kvm_spinlock_init(void);
+#else /* !CONFIG_PARAVIRT_SPINLOCKS */
+static inline void kvm_spinlock_init(void)
+{
+}
+#endif /* CONFIG_PARAVIRT_SPINLOCKS */
+
+#else /* CONFIG_KVM_GUEST */
+#define kvm_guest_init() do {} while (0)
+#define kvm_async_pf_task_wait(T) do {} while(0)
+#define kvm_async_pf_task_wake(T) do {} while(0)
+
+static inline bool kvm_para_available(void)
+{
+ return false;
+}
+
+static inline unsigned int kvm_arch_para_features(void)
+{
+ return 0;
+}
+
+static inline u32 kvm_read_and_reset_pf_reason(void)
+{
+ return 0;
+}
+
+static inline void kvm_disable_steal_time(void)
+{
+ return;
+}
+#endif
+
+#endif /* _ASM_X86_KVM_PARA_H */
diff --git a/kernel/arch/x86/include/asm/lguest.h b/kernel/arch/x86/include/asm/lguest.h
new file mode 100644
index 000000000..3bbc07a57
--- /dev/null
+++ b/kernel/arch/x86/include/asm/lguest.h
@@ -0,0 +1,89 @@
+#ifndef _ASM_X86_LGUEST_H
+#define _ASM_X86_LGUEST_H
+
+#define GDT_ENTRY_LGUEST_CS 10
+#define GDT_ENTRY_LGUEST_DS 11
+#define LGUEST_CS (GDT_ENTRY_LGUEST_CS * 8)
+#define LGUEST_DS (GDT_ENTRY_LGUEST_DS * 8)
+
+#ifndef __ASSEMBLY__
+#include <asm/desc.h>
+
+#define GUEST_PL 1
+
+/* Page for Switcher text itself, then two pages per cpu */
+#define TOTAL_SWITCHER_PAGES (1 + 2 * nr_cpu_ids)
+
+/* Where we map the Switcher, in both Host and Guest. */
+extern unsigned long switcher_addr;
+
+/* Found in switcher.S */
+extern unsigned long default_idt_entries[];
+
+/* Declarations for definitions in arch/x86/lguest/head_32.S */
+extern char lguest_noirq_iret[];
+extern const char lgstart_cli[], lgend_cli[];
+extern const char lgstart_pushf[], lgend_pushf[];
+
+extern void lguest_iret(void);
+extern void lguest_init(void);
+
+struct lguest_regs {
+ /* Manually saved part. */
+ unsigned long eax, ebx, ecx, edx;
+ unsigned long esi, edi, ebp;
+ unsigned long gs;
+ unsigned long fs, ds, es;
+ unsigned long trapnum, errcode;
+ /* Trap pushed part */
+ unsigned long eip;
+ unsigned long cs;
+ unsigned long eflags;
+ unsigned long esp;
+ unsigned long ss;
+};
+
+/* This is a guest-specific page (mapped ro) into the guest. */
+struct lguest_ro_state {
+ /* Host information we need to restore when we switch back. */
+ u32 host_cr3;
+ struct desc_ptr host_idt_desc;
+ struct desc_ptr host_gdt_desc;
+ u32 host_sp;
+
+ /* Fields which are used when guest is running. */
+ struct desc_ptr guest_idt_desc;
+ struct desc_ptr guest_gdt_desc;
+ struct x86_hw_tss guest_tss;
+ struct desc_struct guest_idt[IDT_ENTRIES];
+ struct desc_struct guest_gdt[GDT_ENTRIES];
+};
+
+struct lg_cpu_arch {
+ /* The GDT entries copied into lguest_ro_state when running. */
+ struct desc_struct gdt[GDT_ENTRIES];
+
+ /* The IDT entries: some copied into lguest_ro_state when running. */
+ struct desc_struct idt[IDT_ENTRIES];
+
+ /* The address of the last guest-visible pagefault (ie. cr2). */
+ unsigned long last_pagefault;
+};
+
+static inline void lguest_set_ts(void)
+{
+ u32 cr0;
+
+ cr0 = read_cr0();
+ if (!(cr0 & 8))
+ write_cr0(cr0 | 8);
+}
+
+/* Full 4G segment descriptors, suitable for CS and DS. */
+#define FULL_EXEC_SEGMENT \
+ ((struct desc_struct)GDT_ENTRY_INIT(0xc09b, 0, 0xfffff))
+#define FULL_SEGMENT ((struct desc_struct)GDT_ENTRY_INIT(0xc093, 0, 0xfffff))
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_X86_LGUEST_H */
diff --git a/kernel/arch/x86/include/asm/lguest_hcall.h b/kernel/arch/x86/include/asm/lguest_hcall.h
new file mode 100644
index 000000000..ef01fef3e
--- /dev/null
+++ b/kernel/arch/x86/include/asm/lguest_hcall.h
@@ -0,0 +1,75 @@
+/* Architecture specific portion of the lguest hypercalls */
+#ifndef _ASM_X86_LGUEST_HCALL_H
+#define _ASM_X86_LGUEST_HCALL_H
+
+#define LHCALL_FLUSH_ASYNC 0
+#define LHCALL_LGUEST_INIT 1
+#define LHCALL_SHUTDOWN 2
+#define LHCALL_NEW_PGTABLE 4
+#define LHCALL_FLUSH_TLB 5
+#define LHCALL_LOAD_IDT_ENTRY 6
+#define LHCALL_SET_STACK 7
+#define LHCALL_TS 8
+#define LHCALL_SET_CLOCKEVENT 9
+#define LHCALL_HALT 10
+#define LHCALL_SET_PMD 13
+#define LHCALL_SET_PTE 14
+#define LHCALL_SET_PGD 15
+#define LHCALL_LOAD_TLS 16
+#define LHCALL_LOAD_GDT_ENTRY 18
+#define LHCALL_SEND_INTERRUPTS 19
+
+#define LGUEST_TRAP_ENTRY 0x1F
+
+/* Argument number 3 to LHCALL_LGUEST_SHUTDOWN */
+#define LGUEST_SHUTDOWN_POWEROFF 1
+#define LGUEST_SHUTDOWN_RESTART 2
+
+#ifndef __ASSEMBLY__
+#include <asm/hw_irq.h>
+
+/*G:030
+ * But first, how does our Guest contact the Host to ask for privileged
+ * operations? There are two ways: the direct way is to make a "hypercall",
+ * to make requests of the Host Itself.
+ *
+ * Our hypercall mechanism uses the highest unused trap code (traps 32 and
+ * above are used by real hardware interrupts). Seventeen hypercalls are
+ * available: the hypercall number is put in the %eax register, and the
+ * arguments (when required) are placed in %ebx, %ecx, %edx and %esi.
+ * If a return value makes sense, it's returned in %eax.
+ *
+ * Grossly invalid calls result in Sudden Death at the hands of the vengeful
+ * Host, rather than returning failure. This reflects Winston Churchill's
+ * definition of a gentleman: "someone who is only rude intentionally".
+ */
+static inline unsigned long
+hcall(unsigned long call,
+ unsigned long arg1, unsigned long arg2, unsigned long arg3,
+ unsigned long arg4)
+{
+ /* "int" is the Intel instruction to trigger a trap. */
+ asm volatile("int $" __stringify(LGUEST_TRAP_ENTRY)
+ /* The call in %eax (aka "a") might be overwritten */
+ : "=a"(call)
+ /* The arguments are in %eax, %ebx, %ecx, %edx & %esi */
+ : "a"(call), "b"(arg1), "c"(arg2), "d"(arg3), "S"(arg4)
+ /* "memory" means this might write somewhere in memory.
+ * This isn't true for all calls, but it's safe to tell
+ * gcc that it might happen so it doesn't get clever. */
+ : "memory");
+ return call;
+}
+/*:*/
+
+/* Can't use our min() macro here: needs to be a constant */
+#define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32)
+
+#define LHCALL_RING_SIZE 64
+struct hcall_args {
+ /* These map directly onto eax/ebx/ecx/edx/esi in struct lguest_regs */
+ unsigned long arg0, arg1, arg2, arg3, arg4;
+};
+
+#endif /* !__ASSEMBLY__ */
+#endif /* _ASM_X86_LGUEST_HCALL_H */
diff --git a/kernel/arch/x86/include/asm/linkage.h b/kernel/arch/x86/include/asm/linkage.h
new file mode 100644
index 000000000..79327e948
--- /dev/null
+++ b/kernel/arch/x86/include/asm/linkage.h
@@ -0,0 +1,61 @@
+#ifndef _ASM_X86_LINKAGE_H
+#define _ASM_X86_LINKAGE_H
+
+#include <linux/stringify.h>
+
+#undef notrace
+#define notrace __attribute__((no_instrument_function))
+
+#ifdef CONFIG_X86_32
+#define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0)))
+
+/*
+ * Make sure the compiler doesn't do anything stupid with the
+ * arguments on the stack - they are owned by the *caller*, not
+ * the callee. This just fools gcc into not spilling into them,
+ * and keeps it from doing tailcall recursion and/or using the
+ * stack slots for temporaries, since they are live and "used"
+ * all the way to the end of the function.
+ *
+ * NOTE! On x86-64, all the arguments are in registers, so this
+ * only matters on a 32-bit kernel.
+ */
+#define asmlinkage_protect(n, ret, args...) \
+ __asmlinkage_protect##n(ret, ##args)
+#define __asmlinkage_protect_n(ret, args...) \
+ __asm__ __volatile__ ("" : "=r" (ret) : "0" (ret), ##args)
+#define __asmlinkage_protect0(ret) \
+ __asmlinkage_protect_n(ret)
+#define __asmlinkage_protect1(ret, arg1) \
+ __asmlinkage_protect_n(ret, "m" (arg1))
+#define __asmlinkage_protect2(ret, arg1, arg2) \
+ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2))
+#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \
+ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3))
+#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \
+ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
+ "m" (arg4))
+#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \
+ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
+ "m" (arg4), "m" (arg5))
+#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \
+ __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
+ "m" (arg4), "m" (arg5), "m" (arg6))
+
+#endif /* CONFIG_X86_32 */
+
+#ifdef __ASSEMBLY__
+
+#define GLOBAL(name) \
+ .globl name; \
+ name:
+
+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_ALIGNMENT_16)
+#define __ALIGN .p2align 4, 0x90
+#define __ALIGN_STR __stringify(__ALIGN)
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_X86_LINKAGE_H */
+
diff --git a/kernel/arch/x86/include/asm/livepatch.h b/kernel/arch/x86/include/asm/livepatch.h
new file mode 100644
index 000000000..2d29197bd
--- /dev/null
+++ b/kernel/arch/x86/include/asm/livepatch.h
@@ -0,0 +1,46 @@
+/*
+ * livepatch.h - x86-specific Kernel Live Patching Core
+ *
+ * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
+ * Copyright (C) 2014 SUSE
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ASM_X86_LIVEPATCH_H
+#define _ASM_X86_LIVEPATCH_H
+
+#include <linux/module.h>
+#include <linux/ftrace.h>
+
+#ifdef CONFIG_LIVEPATCH
+static inline int klp_check_compiler_support(void)
+{
+#ifndef CC_USING_FENTRY
+ return 1;
+#endif
+ return 0;
+}
+int klp_write_module_reloc(struct module *mod, unsigned long type,
+ unsigned long loc, unsigned long value);
+
+static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+ regs->ip = ip;
+}
+#else
+#error Live patching support is disabled; check CONFIG_LIVEPATCH
+#endif
+
+#endif /* _ASM_X86_LIVEPATCH_H */
diff --git a/kernel/arch/x86/include/asm/local.h b/kernel/arch/x86/include/asm/local.h
new file mode 100644
index 000000000..4ad656084
--- /dev/null
+++ b/kernel/arch/x86/include/asm/local.h
@@ -0,0 +1,161 @@
+#ifndef _ASM_X86_LOCAL_H
+#define _ASM_X86_LOCAL_H
+
+#include <linux/percpu.h>
+
+#include <linux/atomic.h>
+#include <asm/asm.h>
+
+typedef struct {
+ atomic_long_t a;
+} local_t;
+
+#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
+
+#define local_read(l) atomic_long_read(&(l)->a)
+#define local_set(l, i) atomic_long_set(&(l)->a, (i))
+
+static inline void local_inc(local_t *l)
+{
+ asm volatile(_ASM_INC "%0"
+ : "+m" (l->a.counter));
+}
+
+static inline void local_dec(local_t *l)
+{
+ asm volatile(_ASM_DEC "%0"
+ : "+m" (l->a.counter));
+}
+
+static inline void local_add(long i, local_t *l)
+{
+ asm volatile(_ASM_ADD "%1,%0"
+ : "+m" (l->a.counter)
+ : "ir" (i));
+}
+
+static inline void local_sub(long i, local_t *l)
+{
+ asm volatile(_ASM_SUB "%1,%0"
+ : "+m" (l->a.counter)
+ : "ir" (i));
+}
+
+/**
+ * local_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @l: pointer to type local_t
+ *
+ * Atomically subtracts @i from @l and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+static inline int local_sub_and_test(long i, local_t *l)
+{
+ GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
+}
+
+/**
+ * local_dec_and_test - decrement and test
+ * @l: pointer to type local_t
+ *
+ * Atomically decrements @l by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+static inline int local_dec_and_test(local_t *l)
+{
+ GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
+}
+
+/**
+ * local_inc_and_test - increment and test
+ * @l: pointer to type local_t
+ *
+ * Atomically increments @l by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+static inline int local_inc_and_test(local_t *l)
+{
+ GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
+}
+
+/**
+ * local_add_negative - add and test if negative
+ * @i: integer value to add
+ * @l: pointer to type local_t
+ *
+ * Atomically adds @i to @l and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+static inline int local_add_negative(long i, local_t *l)
+{
+ GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
+}
+
+/**
+ * local_add_return - add and return
+ * @i: integer value to add
+ * @l: pointer to type local_t
+ *
+ * Atomically adds @i to @l and returns @i + @l
+ */
+static inline long local_add_return(long i, local_t *l)
+{
+ long __i = i;
+ asm volatile(_ASM_XADD "%0, %1;"
+ : "+r" (i), "+m" (l->a.counter)
+ : : "memory");
+ return i + __i;
+}
+
+static inline long local_sub_return(long i, local_t *l)
+{
+ return local_add_return(-i, l);
+}
+
+#define local_inc_return(l) (local_add_return(1, l))
+#define local_dec_return(l) (local_sub_return(1, l))
+
+#define local_cmpxchg(l, o, n) \
+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
+/* Always has a lock prefix */
+#define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
+
+/**
+ * local_add_unless - add unless the number is a given value
+ * @l: pointer of type local_t
+ * @a: the amount to add to l...
+ * @u: ...unless l is equal to u.
+ *
+ * Atomically adds @a to @l, so long as it was not @u.
+ * Returns non-zero if @l was not @u, and zero otherwise.
+ */
+#define local_add_unless(l, a, u) \
+({ \
+ long c, old; \
+ c = local_read((l)); \
+ for (;;) { \
+ if (unlikely(c == (u))) \
+ break; \
+ old = local_cmpxchg((l), c, c + (a)); \
+ if (likely(old == c)) \
+ break; \
+ c = old; \
+ } \
+ c != (u); \
+})
+#define local_inc_not_zero(l) local_add_unless((l), 1, 0)
+
+/* On x86_32, these are no better than the atomic variants.
+ * On x86-64 these are better than the atomic variants on SMP kernels
+ * because they dont use a lock prefix.
+ */
+#define __local_inc(l) local_inc(l)
+#define __local_dec(l) local_dec(l)
+#define __local_add(i, l) local_add((i), (l))
+#define __local_sub(i, l) local_sub((i), (l))
+
+#endif /* _ASM_X86_LOCAL_H */
diff --git a/kernel/arch/x86/include/asm/local64.h b/kernel/arch/x86/include/asm/local64.h
new file mode 100644
index 000000000..36c93b5cc
--- /dev/null
+++ b/kernel/arch/x86/include/asm/local64.h
@@ -0,0 +1 @@
+#include <asm-generic/local64.h>
diff --git a/kernel/arch/x86/include/asm/mach_timer.h b/kernel/arch/x86/include/asm/mach_timer.h
new file mode 100644
index 000000000..88d0c3c74
--- /dev/null
+++ b/kernel/arch/x86/include/asm/mach_timer.h
@@ -0,0 +1,48 @@
+/*
+ * Machine specific calibrate_tsc() for generic.
+ * Split out from timer_tsc.c by Osamu Tomita <tomita@cinet.co.jp>
+ */
+/* ------ Calibrate the TSC -------
+ * Return 2^32 * (1 / (TSC clocks per usec)) for do_fast_gettimeoffset().
+ * Too much 64-bit arithmetic here to do this cleanly in C, and for
+ * accuracy's sake we want to keep the overhead on the CTC speaker (channel 2)
+ * output busy loop as low as possible. We avoid reading the CTC registers
+ * directly because of the awkward 8-bit access mechanism of the 82C54
+ * device.
+ */
+#ifndef _ASM_X86_MACH_DEFAULT_MACH_TIMER_H
+#define _ASM_X86_MACH_DEFAULT_MACH_TIMER_H
+
+#define CALIBRATE_TIME_MSEC 30 /* 30 msecs */
+#define CALIBRATE_LATCH \
+ ((PIT_TICK_RATE * CALIBRATE_TIME_MSEC + 1000/2)/1000)
+
+static inline void mach_prepare_counter(void)
+{
+ /* Set the Gate high, disable speaker */
+ outb((inb(0x61) & ~0x02) | 0x01, 0x61);
+
+ /*
+ * Now let's take care of CTC channel 2
+ *
+ * Set the Gate high, program CTC channel 2 for mode 0,
+ * (interrupt on terminal count mode), binary count,
+ * load 5 * LATCH count, (LSB and MSB) to begin countdown.
+ *
+ * Some devices need a delay here.
+ */
+ outb(0xb0, 0x43); /* binary, mode 0, LSB/MSB, Ch 2 */
+ outb_p(CALIBRATE_LATCH & 0xff, 0x42); /* LSB of count */
+ outb_p(CALIBRATE_LATCH >> 8, 0x42); /* MSB of count */
+}
+
+static inline void mach_countup(unsigned long *count_p)
+{
+ unsigned long count = 0;
+ do {
+ count++;
+ } while ((inb_p(0x61) & 0x20) == 0);
+ *count_p = count;
+}
+
+#endif /* _ASM_X86_MACH_DEFAULT_MACH_TIMER_H */
diff --git a/kernel/arch/x86/include/asm/mach_traps.h b/kernel/arch/x86/include/asm/mach_traps.h
new file mode 100644
index 000000000..a01e7ec7d
--- /dev/null
+++ b/kernel/arch/x86/include/asm/mach_traps.h
@@ -0,0 +1,43 @@
+/*
+ * Machine specific NMI handling for generic.
+ * Split out from traps.c by Osamu Tomita <tomita@cinet.co.jp>
+ */
+#ifndef _ASM_X86_MACH_DEFAULT_MACH_TRAPS_H
+#define _ASM_X86_MACH_DEFAULT_MACH_TRAPS_H
+
+#include <asm/mc146818rtc.h>
+
+#define NMI_REASON_PORT 0x61
+
+#define NMI_REASON_SERR 0x80
+#define NMI_REASON_IOCHK 0x40
+#define NMI_REASON_MASK (NMI_REASON_SERR | NMI_REASON_IOCHK)
+
+#define NMI_REASON_CLEAR_SERR 0x04
+#define NMI_REASON_CLEAR_IOCHK 0x08
+#define NMI_REASON_CLEAR_MASK 0x0f
+
+static inline unsigned char default_get_nmi_reason(void)
+{
+ return inb(NMI_REASON_PORT);
+}
+
+static inline void reassert_nmi(void)
+{
+ int old_reg = -1;
+
+ if (do_i_have_lock_cmos())
+ old_reg = current_lock_cmos_reg();
+ else
+ lock_cmos(0); /* register doesn't matter here */
+ outb(0x8f, 0x70);
+ inb(0x71); /* dummy */
+ outb(0x0f, 0x70);
+ inb(0x71); /* dummy */
+ if (old_reg >= 0)
+ outb(old_reg, 0x70);
+ else
+ unlock_cmos();
+}
+
+#endif /* _ASM_X86_MACH_DEFAULT_MACH_TRAPS_H */
diff --git a/kernel/arch/x86/include/asm/math_emu.h b/kernel/arch/x86/include/asm/math_emu.h
new file mode 100644
index 000000000..031f6266f
--- /dev/null
+++ b/kernel/arch/x86/include/asm/math_emu.h
@@ -0,0 +1,18 @@
+#ifndef _ASM_X86_MATH_EMU_H
+#define _ASM_X86_MATH_EMU_H
+
+#include <asm/ptrace.h>
+#include <asm/vm86.h>
+
+/* This structure matches the layout of the data saved to the stack
+ following a device-not-present interrupt, part of it saved
+ automatically by the 80386/80486.
+ */
+struct math_emu_info {
+ long ___orig_eip;
+ union {
+ struct pt_regs *regs;
+ struct kernel_vm86_regs *vm86;
+ };
+};
+#endif /* _ASM_X86_MATH_EMU_H */
diff --git a/kernel/arch/x86/include/asm/mc146818rtc.h b/kernel/arch/x86/include/asm/mc146818rtc.h
new file mode 100644
index 000000000..0f555cc31
--- /dev/null
+++ b/kernel/arch/x86/include/asm/mc146818rtc.h
@@ -0,0 +1,103 @@
+/*
+ * Machine dependent access functions for RTC registers.
+ */
+#ifndef _ASM_X86_MC146818RTC_H
+#define _ASM_X86_MC146818RTC_H
+
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <linux/mc146818rtc.h>
+
+#ifndef RTC_PORT
+#define RTC_PORT(x) (0x70 + (x))
+#define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */
+#endif
+
+#if defined(CONFIG_X86_32)
+/*
+ * This lock provides nmi access to the CMOS/RTC registers. It has some
+ * special properties. It is owned by a CPU and stores the index register
+ * currently being accessed (if owned). The idea here is that it works
+ * like a normal lock (normally). However, in an NMI, the NMI code will
+ * first check to see if its CPU owns the lock, meaning that the NMI
+ * interrupted during the read/write of the device. If it does, it goes ahead
+ * and performs the access and then restores the index register. If it does
+ * not, it locks normally.
+ *
+ * Note that since we are working with NMIs, we need this lock even in
+ * a non-SMP machine just to mark that the lock is owned.
+ *
+ * This only works with compare-and-swap. There is no other way to
+ * atomically claim the lock and set the owner.
+ */
+#include <linux/smp.h>
+extern volatile unsigned long cmos_lock;
+
+/*
+ * All of these below must be called with interrupts off, preempt
+ * disabled, etc.
+ */
+
+static inline void lock_cmos(unsigned char reg)
+{
+ unsigned long new;
+ new = ((smp_processor_id() + 1) << 8) | reg;
+ for (;;) {
+ if (cmos_lock) {
+ cpu_relax();
+ continue;
+ }
+ if (__cmpxchg(&cmos_lock, 0, new, sizeof(cmos_lock)) == 0)
+ return;
+ }
+}
+
+static inline void unlock_cmos(void)
+{
+ cmos_lock = 0;
+}
+
+static inline int do_i_have_lock_cmos(void)
+{
+ return (cmos_lock >> 8) == (smp_processor_id() + 1);
+}
+
+static inline unsigned char current_lock_cmos_reg(void)
+{
+ return cmos_lock & 0xff;
+}
+
+#define lock_cmos_prefix(reg) \
+ do { \
+ unsigned long cmos_flags; \
+ local_irq_save(cmos_flags); \
+ lock_cmos(reg)
+
+#define lock_cmos_suffix(reg) \
+ unlock_cmos(); \
+ local_irq_restore(cmos_flags); \
+ } while (0)
+#else
+#define lock_cmos_prefix(reg) do {} while (0)
+#define lock_cmos_suffix(reg) do {} while (0)
+#define lock_cmos(reg) do { } while (0)
+#define unlock_cmos() do { } while (0)
+#define do_i_have_lock_cmos() 0
+#define current_lock_cmos_reg() 0
+#endif
+
+/*
+ * The yet supported machines all access the RTC index register via
+ * an ISA port access but the way to access the date register differs ...
+ */
+#define CMOS_READ(addr) rtc_cmos_read(addr)
+#define CMOS_WRITE(val, addr) rtc_cmos_write(val, addr)
+unsigned char rtc_cmos_read(unsigned char addr);
+void rtc_cmos_write(unsigned char val, unsigned char addr);
+
+extern int mach_set_rtc_mmss(const struct timespec *now);
+extern void mach_get_cmos_time(struct timespec *now);
+
+#define RTC_IRQ 8
+
+#endif /* _ASM_X86_MC146818RTC_H */
diff --git a/kernel/arch/x86/include/asm/mce.h b/kernel/arch/x86/include/asm/mce.h
new file mode 100644
index 000000000..1f5a86d51
--- /dev/null
+++ b/kernel/arch/x86/include/asm/mce.h
@@ -0,0 +1,258 @@
+#ifndef _ASM_X86_MCE_H
+#define _ASM_X86_MCE_H
+
+#include <uapi/asm/mce.h>
+
+/*
+ * Machine Check support for x86
+ */
+
+/* MCG_CAP register defines */
+#define MCG_BANKCNT_MASK 0xff /* Number of Banks */
+#define MCG_CTL_P (1ULL<<8) /* MCG_CTL register available */
+#define MCG_EXT_P (1ULL<<9) /* Extended registers available */
+#define MCG_CMCI_P (1ULL<<10) /* CMCI supported */
+#define MCG_EXT_CNT_MASK 0xff0000 /* Number of Extended registers */
+#define MCG_EXT_CNT_SHIFT 16
+#define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT)
+#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
+#define MCG_ELOG_P (1ULL<<26) /* Extended error log supported */
+
+/* MCG_STATUS register defines */
+#define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */
+#define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */
+#define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */
+
+/* MCi_STATUS register defines */
+#define MCI_STATUS_VAL (1ULL<<63) /* valid error */
+#define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */
+#define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */
+#define MCI_STATUS_EN (1ULL<<60) /* error enabled */
+#define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */
+#define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */
+#define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
+#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
+#define MCI_STATUS_AR (1ULL<<55) /* Action required */
+
+/* AMD-specific bits */
+#define MCI_STATUS_DEFERRED (1ULL<<44) /* declare an uncorrected error */
+#define MCI_STATUS_POISON (1ULL<<43) /* access poisonous data */
+
+/*
+ * Note that the full MCACOD field of IA32_MCi_STATUS MSR is
+ * bits 15:0. But bit 12 is the 'F' bit, defined for corrected
+ * errors to indicate that errors are being filtered by hardware.
+ * We should mask out bit 12 when looking for specific signatures
+ * of uncorrected errors - so the F bit is deliberately skipped
+ * in this #define.
+ */
+#define MCACOD 0xefff /* MCA Error Code */
+
+/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
+#define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */
+#define MCACOD_SCRUBMSK 0xeff0 /* Skip bit 12 ('F' bit) */
+#define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */
+#define MCACOD_DATA 0x0134 /* Data Load */
+#define MCACOD_INSTR 0x0150 /* Instruction Fetch */
+
+/* MCi_MISC register defines */
+#define MCI_MISC_ADDR_LSB(m) ((m) & 0x3f)
+#define MCI_MISC_ADDR_MODE(m) (((m) >> 6) & 7)
+#define MCI_MISC_ADDR_SEGOFF 0 /* segment offset */
+#define MCI_MISC_ADDR_LINEAR 1 /* linear address */
+#define MCI_MISC_ADDR_PHYS 2 /* physical address */
+#define MCI_MISC_ADDR_MEM 3 /* memory address */
+#define MCI_MISC_ADDR_GENERIC 7 /* generic */
+
+/* CTL2 register defines */
+#define MCI_CTL2_CMCI_EN (1ULL << 30)
+#define MCI_CTL2_CMCI_THRESHOLD_MASK 0x7fffULL
+
+#define MCJ_CTX_MASK 3
+#define MCJ_CTX(flags) ((flags) & MCJ_CTX_MASK)
+#define MCJ_CTX_RANDOM 0 /* inject context: random */
+#define MCJ_CTX_PROCESS 0x1 /* inject context: process */
+#define MCJ_CTX_IRQ 0x2 /* inject context: IRQ */
+#define MCJ_NMI_BROADCAST 0x4 /* do NMI broadcasting */
+#define MCJ_EXCEPTION 0x8 /* raise as exception */
+#define MCJ_IRQ_BROADCAST 0x10 /* do IRQ broadcasting */
+
+#define MCE_OVERFLOW 0 /* bit 0 in flags means overflow */
+
+/* Software defined banks */
+#define MCE_EXTENDED_BANK 128
+#define MCE_THERMAL_BANK (MCE_EXTENDED_BANK + 0)
+
+#define MCE_LOG_LEN 32
+#define MCE_LOG_SIGNATURE "MACHINECHECK"
+
+/*
+ * This structure contains all data related to the MCE log. Also
+ * carries a signature to make it easier to find from external
+ * debugging tools. Each entry is only valid when its finished flag
+ * is set.
+ */
+struct mce_log {
+ char signature[12]; /* "MACHINECHECK" */
+ unsigned len; /* = MCE_LOG_LEN */
+ unsigned next;
+ unsigned flags;
+ unsigned recordlen; /* length of struct mce */
+ struct mce entry[MCE_LOG_LEN];
+};
+
+struct mca_config {
+ bool dont_log_ce;
+ bool cmci_disabled;
+ bool ignore_ce;
+ bool disabled;
+ bool ser;
+ bool bios_cmci_threshold;
+ u8 banks;
+ s8 bootlog;
+ int tolerant;
+ int monarch_timeout;
+ int panic_timeout;
+ u32 rip_msr;
+};
+
+struct mce_vendor_flags {
+ __u64 overflow_recov : 1, /* cpuid_ebx(80000007) */
+ __reserved_0 : 63;
+};
+extern struct mce_vendor_flags mce_flags;
+
+extern struct mca_config mca_cfg;
+extern void mce_register_decode_chain(struct notifier_block *nb);
+extern void mce_unregister_decode_chain(struct notifier_block *nb);
+
+#include <linux/percpu.h>
+#include <linux/atomic.h>
+
+extern int mce_p5_enabled;
+
+#ifdef CONFIG_X86_MCE
+int mcheck_init(void);
+void mcheck_cpu_init(struct cpuinfo_x86 *c);
+void mcheck_vendor_init_severity(void);
+#else
+static inline int mcheck_init(void) { return 0; }
+static inline void mcheck_cpu_init(struct cpuinfo_x86 *c) {}
+static inline void mcheck_vendor_init_severity(void) {}
+#endif
+
+#ifdef CONFIG_X86_ANCIENT_MCE
+void intel_p5_mcheck_init(struct cpuinfo_x86 *c);
+void winchip_mcheck_init(struct cpuinfo_x86 *c);
+static inline void enable_p5_mce(void) { mce_p5_enabled = 1; }
+#else
+static inline void intel_p5_mcheck_init(struct cpuinfo_x86 *c) {}
+static inline void winchip_mcheck_init(struct cpuinfo_x86 *c) {}
+static inline void enable_p5_mce(void) {}
+#endif
+
+void mce_setup(struct mce *m);
+void mce_log(struct mce *m);
+DECLARE_PER_CPU(struct device *, mce_device);
+
+/*
+ * Maximum banks number.
+ * This is the limit of the current register layout on
+ * Intel CPUs.
+ */
+#define MAX_NR_BANKS 32
+
+#ifdef CONFIG_X86_MCE_INTEL
+void mce_intel_feature_init(struct cpuinfo_x86 *c);
+void cmci_clear(void);
+void cmci_reenable(void);
+void cmci_rediscover(void);
+void cmci_recheck(void);
+#else
+static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { }
+static inline void cmci_clear(void) {}
+static inline void cmci_reenable(void) {}
+static inline void cmci_rediscover(void) {}
+static inline void cmci_recheck(void) {}
+#endif
+
+#ifdef CONFIG_X86_MCE_AMD
+void mce_amd_feature_init(struct cpuinfo_x86 *c);
+#else
+static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { }
+#endif
+
+int mce_available(struct cpuinfo_x86 *c);
+
+DECLARE_PER_CPU(unsigned, mce_exception_count);
+DECLARE_PER_CPU(unsigned, mce_poll_count);
+
+typedef DECLARE_BITMAP(mce_banks_t, MAX_NR_BANKS);
+DECLARE_PER_CPU(mce_banks_t, mce_poll_banks);
+
+enum mcp_flags {
+ MCP_TIMESTAMP = BIT(0), /* log time stamp */
+ MCP_UC = BIT(1), /* log uncorrected errors */
+ MCP_DONTLOG = BIT(2), /* only clear, don't log */
+};
+bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b);
+
+int mce_notify_irq(void);
+
+DECLARE_PER_CPU(struct mce, injectm);
+
+extern void register_mce_write_callback(ssize_t (*)(struct file *filp,
+ const char __user *ubuf,
+ size_t usize, loff_t *off));
+
+/* Disable CMCI/polling for MCA bank claimed by firmware */
+extern void mce_disable_bank(int bank);
+
+/*
+ * Exception handler
+ */
+
+/* Call the installed machine check handler for this CPU setup. */
+extern void (*machine_check_vector)(struct pt_regs *, long error_code);
+void do_machine_check(struct pt_regs *, long);
+
+/*
+ * Threshold handler
+ */
+
+extern void (*mce_threshold_vector)(void);
+extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
+
+/*
+ * Thermal handler
+ */
+
+void intel_init_thermal(struct cpuinfo_x86 *c);
+
+void mce_log_therm_throt_event(__u64 status);
+
+/* Interrupt Handler for core thermal thresholds */
+extern int (*platform_thermal_notify)(__u64 msr_val);
+
+/* Interrupt Handler for package thermal thresholds */
+extern int (*platform_thermal_package_notify)(__u64 msr_val);
+
+/* Callback support of rate control, return true, if
+ * callback has rate control */
+extern bool (*platform_thermal_package_rate_control)(void);
+
+#ifdef CONFIG_X86_THERMAL_VECTOR
+extern void mcheck_intel_therm_init(void);
+#else
+static inline void mcheck_intel_therm_init(void) { }
+#endif
+
+/*
+ * Used by APEI to report memory error via /dev/mcelog
+ */
+
+struct cper_sec_mem_err;
+extern void apei_mce_report_mem_error(int corrected,
+ struct cper_sec_mem_err *mem_err);
+
+#endif /* _ASM_X86_MCE_H */
diff --git a/kernel/arch/x86/include/asm/microcode.h b/kernel/arch/x86/include/asm/microcode.h
new file mode 100644
index 000000000..2fb20d6f7
--- /dev/null
+++ b/kernel/arch/x86/include/asm/microcode.h
@@ -0,0 +1,165 @@
+#ifndef _ASM_X86_MICROCODE_H
+#define _ASM_X86_MICROCODE_H
+
+#define native_rdmsr(msr, val1, val2) \
+do { \
+ u64 __val = native_read_msr((msr)); \
+ (void)((val1) = (u32)__val); \
+ (void)((val2) = (u32)(__val >> 32)); \
+} while (0)
+
+#define native_wrmsr(msr, low, high) \
+ native_write_msr(msr, low, high)
+
+#define native_wrmsrl(msr, val) \
+ native_write_msr((msr), \
+ (u32)((u64)(val)), \
+ (u32)((u64)(val) >> 32))
+
+struct cpu_signature {
+ unsigned int sig;
+ unsigned int pf;
+ unsigned int rev;
+};
+
+struct device;
+
+enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
+extern bool dis_ucode_ldr;
+
+struct microcode_ops {
+ enum ucode_state (*request_microcode_user) (int cpu,
+ const void __user *buf, size_t size);
+
+ enum ucode_state (*request_microcode_fw) (int cpu, struct device *,
+ bool refresh_fw);
+
+ void (*microcode_fini_cpu) (int cpu);
+
+ /*
+ * The generic 'microcode_core' part guarantees that
+ * the callbacks below run on a target cpu when they
+ * are being called.
+ * See also the "Synchronization" section in microcode_core.c.
+ */
+ int (*apply_microcode) (int cpu);
+ int (*collect_cpu_info) (int cpu, struct cpu_signature *csig);
+};
+
+struct ucode_cpu_info {
+ struct cpu_signature cpu_sig;
+ int valid;
+ void *mc;
+};
+extern struct ucode_cpu_info ucode_cpu_info[];
+
+#ifdef CONFIG_MICROCODE_INTEL
+extern struct microcode_ops * __init init_intel_microcode(void);
+#else
+static inline struct microcode_ops * __init init_intel_microcode(void)
+{
+ return NULL;
+}
+#endif /* CONFIG_MICROCODE_INTEL */
+
+#ifdef CONFIG_MICROCODE_AMD
+extern struct microcode_ops * __init init_amd_microcode(void);
+extern void __exit exit_amd_microcode(void);
+#else
+static inline struct microcode_ops * __init init_amd_microcode(void)
+{
+ return NULL;
+}
+static inline void __exit exit_amd_microcode(void) {}
+#endif
+
+#ifdef CONFIG_MICROCODE_EARLY
+#define MAX_UCODE_COUNT 128
+
+#define QCHAR(a, b, c, d) ((a) + ((b) << 8) + ((c) << 16) + ((d) << 24))
+#define CPUID_INTEL1 QCHAR('G', 'e', 'n', 'u')
+#define CPUID_INTEL2 QCHAR('i', 'n', 'e', 'I')
+#define CPUID_INTEL3 QCHAR('n', 't', 'e', 'l')
+#define CPUID_AMD1 QCHAR('A', 'u', 't', 'h')
+#define CPUID_AMD2 QCHAR('e', 'n', 't', 'i')
+#define CPUID_AMD3 QCHAR('c', 'A', 'M', 'D')
+
+#define CPUID_IS(a, b, c, ebx, ecx, edx) \
+ (!((ebx ^ (a))|(edx ^ (b))|(ecx ^ (c))))
+
+/*
+ * In early loading microcode phase on BSP, boot_cpu_data is not set up yet.
+ * x86_vendor() gets vendor id for BSP.
+ *
+ * In 32 bit AP case, accessing boot_cpu_data needs linear address. To simplify
+ * coding, we still use x86_vendor() to get vendor id for AP.
+ *
+ * x86_vendor() gets vendor information directly from CPUID.
+ */
+static inline int x86_vendor(void)
+{
+ u32 eax = 0x00000000;
+ u32 ebx, ecx = 0, edx;
+
+ native_cpuid(&eax, &ebx, &ecx, &edx);
+
+ if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx))
+ return X86_VENDOR_INTEL;
+
+ if (CPUID_IS(CPUID_AMD1, CPUID_AMD2, CPUID_AMD3, ebx, ecx, edx))
+ return X86_VENDOR_AMD;
+
+ return X86_VENDOR_UNKNOWN;
+}
+
+static inline unsigned int __x86_family(unsigned int sig)
+{
+ unsigned int x86;
+
+ x86 = (sig >> 8) & 0xf;
+
+ if (x86 == 0xf)
+ x86 += (sig >> 20) & 0xff;
+
+ return x86;
+}
+
+static inline unsigned int x86_family(void)
+{
+ u32 eax = 0x00000001;
+ u32 ebx, ecx = 0, edx;
+
+ native_cpuid(&eax, &ebx, &ecx, &edx);
+
+ return __x86_family(eax);
+}
+
+static inline unsigned int x86_model(unsigned int sig)
+{
+ unsigned int x86, model;
+
+ x86 = __x86_family(sig);
+
+ model = (sig >> 4) & 0xf;
+
+ if (x86 == 0x6 || x86 == 0xf)
+ model += ((sig >> 16) & 0xf) << 4;
+
+ return model;
+}
+
+extern void __init load_ucode_bsp(void);
+extern void load_ucode_ap(void);
+extern int __init save_microcode_in_initrd(void);
+void reload_early_microcode(void);
+#else
+static inline void __init load_ucode_bsp(void) {}
+static inline void load_ucode_ap(void) {}
+static inline int __init save_microcode_in_initrd(void)
+{
+ return 0;
+}
+static inline void reload_early_microcode(void) {}
+#endif
+
+#endif /* _ASM_X86_MICROCODE_H */
diff --git a/kernel/arch/x86/include/asm/microcode_amd.h b/kernel/arch/x86/include/asm/microcode_amd.h
new file mode 100644
index 000000000..af935397e
--- /dev/null
+++ b/kernel/arch/x86/include/asm/microcode_amd.h
@@ -0,0 +1,79 @@
+#ifndef _ASM_X86_MICROCODE_AMD_H
+#define _ASM_X86_MICROCODE_AMD_H
+
+#include <asm/microcode.h>
+
+#define UCODE_MAGIC 0x00414d44
+#define UCODE_EQUIV_CPU_TABLE_TYPE 0x00000000
+#define UCODE_UCODE_TYPE 0x00000001
+
+#define SECTION_HDR_SIZE 8
+#define CONTAINER_HDR_SZ 12
+
+struct equiv_cpu_entry {
+ u32 installed_cpu;
+ u32 fixed_errata_mask;
+ u32 fixed_errata_compare;
+ u16 equiv_cpu;
+ u16 res;
+} __attribute__((packed));
+
+struct microcode_header_amd {
+ u32 data_code;
+ u32 patch_id;
+ u16 mc_patch_data_id;
+ u8 mc_patch_data_len;
+ u8 init_flag;
+ u32 mc_patch_data_checksum;
+ u32 nb_dev_id;
+ u32 sb_dev_id;
+ u16 processor_rev_id;
+ u8 nb_rev_id;
+ u8 sb_rev_id;
+ u8 bios_api_rev;
+ u8 reserved1[3];
+ u32 match_reg[8];
+} __attribute__((packed));
+
+struct microcode_amd {
+ struct microcode_header_amd hdr;
+ unsigned int mpb[0];
+};
+
+static inline u16 find_equiv_id(struct equiv_cpu_entry *equiv_cpu_table,
+ unsigned int sig)
+{
+ int i = 0;
+
+ if (!equiv_cpu_table)
+ return 0;
+
+ while (equiv_cpu_table[i].installed_cpu != 0) {
+ if (sig == equiv_cpu_table[i].installed_cpu)
+ return equiv_cpu_table[i].equiv_cpu;
+
+ i++;
+ }
+ return 0;
+}
+
+extern int __apply_microcode_amd(struct microcode_amd *mc_amd);
+extern int apply_microcode_amd(int cpu);
+extern enum ucode_state load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size);
+
+#define PATCH_MAX_SIZE PAGE_SIZE
+extern u8 amd_ucode_patch[PATCH_MAX_SIZE];
+
+#ifdef CONFIG_MICROCODE_AMD_EARLY
+extern void __init load_ucode_amd_bsp(void);
+extern void load_ucode_amd_ap(void);
+extern int __init save_microcode_in_initrd_amd(void);
+void reload_ucode_amd(void);
+#else
+static inline void __init load_ucode_amd_bsp(void) {}
+static inline void load_ucode_amd_ap(void) {}
+static inline int __init save_microcode_in_initrd_amd(void) { return -EINVAL; }
+void reload_ucode_amd(void) {}
+#endif
+
+#endif /* _ASM_X86_MICROCODE_AMD_H */
diff --git a/kernel/arch/x86/include/asm/microcode_intel.h b/kernel/arch/x86/include/asm/microcode_intel.h
new file mode 100644
index 000000000..2b9209c46
--- /dev/null
+++ b/kernel/arch/x86/include/asm/microcode_intel.h
@@ -0,0 +1,92 @@
+#ifndef _ASM_X86_MICROCODE_INTEL_H
+#define _ASM_X86_MICROCODE_INTEL_H
+
+#include <asm/microcode.h>
+
+struct microcode_header_intel {
+ unsigned int hdrver;
+ unsigned int rev;
+ unsigned int date;
+ unsigned int sig;
+ unsigned int cksum;
+ unsigned int ldrver;
+ unsigned int pf;
+ unsigned int datasize;
+ unsigned int totalsize;
+ unsigned int reserved[3];
+};
+
+struct microcode_intel {
+ struct microcode_header_intel hdr;
+ unsigned int bits[0];
+};
+
+/* microcode format is extended from prescott processors */
+struct extended_signature {
+ unsigned int sig;
+ unsigned int pf;
+ unsigned int cksum;
+};
+
+struct extended_sigtable {
+ unsigned int count;
+ unsigned int cksum;
+ unsigned int reserved[3];
+ struct extended_signature sigs[0];
+};
+
+#define DEFAULT_UCODE_DATASIZE (2000)
+#define MC_HEADER_SIZE (sizeof(struct microcode_header_intel))
+#define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE)
+#define EXT_HEADER_SIZE (sizeof(struct extended_sigtable))
+#define EXT_SIGNATURE_SIZE (sizeof(struct extended_signature))
+#define DWSIZE (sizeof(u32))
+
+#define get_totalsize(mc) \
+ (((struct microcode_intel *)mc)->hdr.datasize ? \
+ ((struct microcode_intel *)mc)->hdr.totalsize : \
+ DEFAULT_UCODE_TOTALSIZE)
+
+#define get_datasize(mc) \
+ (((struct microcode_intel *)mc)->hdr.datasize ? \
+ ((struct microcode_intel *)mc)->hdr.datasize : DEFAULT_UCODE_DATASIZE)
+
+#define sigmatch(s1, s2, p1, p2) \
+ (((s1) == (s2)) && (((p1) & (p2)) || (((p1) == 0) && ((p2) == 0))))
+
+#define exttable_size(et) ((et)->count * EXT_SIGNATURE_SIZE + EXT_HEADER_SIZE)
+
+extern int get_matching_microcode(unsigned int csig, int cpf, int rev, void *mc);
+extern int microcode_sanity_check(void *mc, int print_err);
+extern int get_matching_sig(unsigned int csig, int cpf, int rev, void *mc);
+
+static inline int
+revision_is_newer(struct microcode_header_intel *mc_header, int rev)
+{
+ return (mc_header->rev <= rev) ? 0 : 1;
+}
+
+#ifdef CONFIG_MICROCODE_INTEL_EARLY
+extern void __init load_ucode_intel_bsp(void);
+extern void load_ucode_intel_ap(void);
+extern void show_ucode_info_early(void);
+extern int __init save_microcode_in_initrd_intel(void);
+void reload_ucode_intel(void);
+#else
+static inline __init void load_ucode_intel_bsp(void) {}
+static inline void load_ucode_intel_ap(void) {}
+static inline void show_ucode_info_early(void) {}
+static inline int __init save_microcode_in_initrd_intel(void) { return -EINVAL; }
+static inline void reload_ucode_intel(void) {}
+#endif
+
+#if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU)
+extern int save_mc_for_early(u8 *mc);
+#else
+static inline int save_mc_for_early(u8 *mc)
+{
+ return 0;
+}
+#endif
+
+#endif /* _ASM_X86_MICROCODE_INTEL_H */
diff --git a/kernel/arch/x86/include/asm/misc.h b/kernel/arch/x86/include/asm/misc.h
new file mode 100644
index 000000000..475f5bbc7
--- /dev/null
+++ b/kernel/arch/x86/include/asm/misc.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_X86_MISC_H
+#define _ASM_X86_MISC_H
+
+int num_digits(int val);
+
+#endif /* _ASM_X86_MISC_H */
diff --git a/kernel/arch/x86/include/asm/mmconfig.h b/kernel/arch/x86/include/asm/mmconfig.h
new file mode 100644
index 000000000..04a3fed22
--- /dev/null
+++ b/kernel/arch/x86/include/asm/mmconfig.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_X86_MMCONFIG_H
+#define _ASM_X86_MMCONFIG_H
+
+#ifdef CONFIG_PCI_MMCONFIG
+extern void fam10h_check_enable_mmcfg(void);
+extern void check_enable_amd_mmconf_dmi(void);
+#else
+static inline void fam10h_check_enable_mmcfg(void) { }
+static inline void check_enable_amd_mmconf_dmi(void) { }
+#endif
+
+#endif /* _ASM_X86_MMCONFIG_H */
diff --git a/kernel/arch/x86/include/asm/mmu.h b/kernel/arch/x86/include/asm/mmu.h
new file mode 100644
index 000000000..09b9620a7
--- /dev/null
+++ b/kernel/arch/x86/include/asm/mmu.h
@@ -0,0 +1,34 @@
+#ifndef _ASM_X86_MMU_H
+#define _ASM_X86_MMU_H
+
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+
+/*
+ * The x86 doesn't have a mmu context, but
+ * we put the segment information here.
+ */
+typedef struct {
+ void *ldt;
+ int size;
+
+#ifdef CONFIG_X86_64
+ /* True if mm supports a task running in 32 bit compatibility mode. */
+ unsigned short ia32_compat;
+#endif
+
+ struct mutex lock;
+ void __user *vdso;
+
+ atomic_t perf_rdpmc_allowed; /* nonzero if rdpmc is allowed */
+} mm_context_t;
+
+#ifdef CONFIG_SMP
+void leave_mm(int cpu);
+#else
+static inline void leave_mm(int cpu)
+{
+}
+#endif
+
+#endif /* _ASM_X86_MMU_H */
diff --git a/kernel/arch/x86/include/asm/mmu_context.h b/kernel/arch/x86/include/asm/mmu_context.h
new file mode 100644
index 000000000..883f6b933
--- /dev/null
+++ b/kernel/arch/x86/include/asm/mmu_context.h
@@ -0,0 +1,175 @@
+#ifndef _ASM_X86_MMU_CONTEXT_H
+#define _ASM_X86_MMU_CONTEXT_H
+
+#include <asm/desc.h>
+#include <linux/atomic.h>
+#include <linux/mm_types.h>
+
+#include <trace/events/tlb.h>
+
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+#include <asm/paravirt.h>
+#include <asm/mpx.h>
+#ifndef CONFIG_PARAVIRT
+static inline void paravirt_activate_mm(struct mm_struct *prev,
+ struct mm_struct *next)
+{
+}
+#endif /* !CONFIG_PARAVIRT */
+
+#ifdef CONFIG_PERF_EVENTS
+extern struct static_key rdpmc_always_available;
+
+static inline void load_mm_cr4(struct mm_struct *mm)
+{
+ if (static_key_true(&rdpmc_always_available) ||
+ atomic_read(&mm->context.perf_rdpmc_allowed))
+ cr4_set_bits(X86_CR4_PCE);
+ else
+ cr4_clear_bits(X86_CR4_PCE);
+}
+#else
+static inline void load_mm_cr4(struct mm_struct *mm) {}
+#endif
+
+/*
+ * Used for LDT copy/destruction.
+ */
+int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
+void destroy_context(struct mm_struct *mm);
+
+
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+#ifdef CONFIG_SMP
+ if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
+ this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
+#endif
+}
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ unsigned cpu = smp_processor_id();
+
+ if (likely(prev != next)) {
+#ifdef CONFIG_SMP
+ this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+ this_cpu_write(cpu_tlbstate.active_mm, next);
+#endif
+ cpumask_set_cpu(cpu, mm_cpumask(next));
+
+ /* Re-load page tables */
+ load_cr3(next->pgd);
+ trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
+
+ /* Stop flush ipis for the previous mm */
+ cpumask_clear_cpu(cpu, mm_cpumask(prev));
+
+ /* Load per-mm CR4 state */
+ load_mm_cr4(next);
+
+ /*
+ * Load the LDT, if the LDT is different.
+ *
+ * It's possible that prev->context.ldt doesn't match
+ * the LDT register. This can happen if leave_mm(prev)
+ * was called and then modify_ldt changed
+ * prev->context.ldt but suppressed an IPI to this CPU.
+ * In this case, prev->context.ldt != NULL, because we
+ * never free an LDT while the mm still exists. That
+ * means that next->context.ldt != prev->context.ldt,
+ * because mms never share an LDT.
+ */
+ if (unlikely(prev->context.ldt != next->context.ldt))
+ load_LDT_nolock(&next->context);
+ }
+#ifdef CONFIG_SMP
+ else {
+ this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+ BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
+
+ if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
+ /*
+ * On established mms, the mm_cpumask is only changed
+ * from irq context, from ptep_clear_flush() while in
+ * lazy tlb mode, and here. Irqs are blocked during
+ * schedule, protecting us from simultaneous changes.
+ */
+ cpumask_set_cpu(cpu, mm_cpumask(next));
+ /*
+ * We were in lazy tlb mode and leave_mm disabled
+ * tlb flush IPI delivery. We must reload CR3
+ * to make sure to use no freed page tables.
+ */
+ load_cr3(next->pgd);
+ trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
+ load_mm_cr4(next);
+ load_LDT_nolock(&next->context);
+ }
+ }
+#endif
+}
+
+#define activate_mm(prev, next) \
+do { \
+ paravirt_activate_mm((prev), (next)); \
+ switch_mm((prev), (next), NULL); \
+} while (0);
+
+#ifdef CONFIG_X86_32
+#define deactivate_mm(tsk, mm) \
+do { \
+ lazy_load_gs(0); \
+} while (0)
+#else
+#define deactivate_mm(tsk, mm) \
+do { \
+ load_gs_index(0); \
+ loadsegment(fs, 0); \
+} while (0)
+#endif
+
+static inline void arch_dup_mmap(struct mm_struct *oldmm,
+ struct mm_struct *mm)
+{
+ paravirt_arch_dup_mmap(oldmm, mm);
+}
+
+static inline void arch_exit_mmap(struct mm_struct *mm)
+{
+ paravirt_arch_exit_mmap(mm);
+}
+
+static inline void arch_bprm_mm_init(struct mm_struct *mm,
+ struct vm_area_struct *vma)
+{
+ mpx_mm_init(mm);
+}
+
+static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ /*
+ * mpx_notify_unmap() goes and reads a rarely-hot
+ * cacheline in the mm_struct. That can be expensive
+ * enough to be seen in profiles.
+ *
+ * The mpx_notify_unmap() call and its contents have been
+ * observed to affect munmap() performance on hardware
+ * where MPX is not present.
+ *
+ * The unlikely() optimizes for the fast case: no MPX
+ * in the CPU, or no MPX use in the process. Even if
+ * we get this wrong (in the unlikely event that MPX
+ * is widely enabled on some system) the overhead of
+ * MPX itself (reading bounds tables) is expected to
+ * overwhelm the overhead of getting this unlikely()
+ * consistently wrong.
+ */
+ if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
+ mpx_notify_unmap(mm, vma, start, end);
+}
+
+#endif /* _ASM_X86_MMU_CONTEXT_H */
diff --git a/kernel/arch/x86/include/asm/mmx.h b/kernel/arch/x86/include/asm/mmx.h
new file mode 100644
index 000000000..5cbf3135b
--- /dev/null
+++ b/kernel/arch/x86/include/asm/mmx.h
@@ -0,0 +1,14 @@
+#ifndef _ASM_X86_MMX_H
+#define _ASM_X86_MMX_H
+
+/*
+ * MMX 3Dnow! helper operations
+ */
+
+#include <linux/types.h>
+
+extern void *_mmx_memcpy(void *to, const void *from, size_t size);
+extern void mmx_clear_page(void *page);
+extern void mmx_copy_page(void *to, void *from);
+
+#endif /* _ASM_X86_MMX_H */
diff --git a/kernel/arch/x86/include/asm/mmzone.h b/kernel/arch/x86/include/asm/mmzone.h
new file mode 100644
index 000000000..d497bc425
--- /dev/null
+++ b/kernel/arch/x86/include/asm/mmzone.h
@@ -0,0 +1,5 @@
+#ifdef CONFIG_X86_32
+# include <asm/mmzone_32.h>
+#else
+# include <asm/mmzone_64.h>
+#endif
diff --git a/kernel/arch/x86/include/asm/mmzone_32.h b/kernel/arch/x86/include/asm/mmzone_32.h
new file mode 100644
index 000000000..1ec990bd7
--- /dev/null
+++ b/kernel/arch/x86/include/asm/mmzone_32.h
@@ -0,0 +1,55 @@
+/*
+ * Written by Pat Gaughen (gone@us.ibm.com) Mar 2002
+ *
+ */
+
+#ifndef _ASM_X86_MMZONE_32_H
+#define _ASM_X86_MMZONE_32_H
+
+#include <asm/smp.h>
+
+#ifdef CONFIG_NUMA
+extern struct pglist_data *node_data[];
+#define NODE_DATA(nid) (node_data[nid])
+#endif /* CONFIG_NUMA */
+
+#ifdef CONFIG_DISCONTIGMEM
+
+/*
+ * generic node memory support, the following assumptions apply:
+ *
+ * 1) memory comes in 64Mb contiguous chunks which are either present or not
+ * 2) we will not have more than 64Gb in total
+ *
+ * for now assume that 64Gb is max amount of RAM for whole system
+ * 64Gb / 4096bytes/page = 16777216 pages
+ */
+#define MAX_NR_PAGES 16777216
+#define MAX_SECTIONS 1024
+#define PAGES_PER_SECTION (MAX_NR_PAGES/MAX_SECTIONS)
+
+extern s8 physnode_map[];
+
+static inline int pfn_to_nid(unsigned long pfn)
+{
+#ifdef CONFIG_NUMA
+ return((int) physnode_map[(pfn) / PAGES_PER_SECTION]);
+#else
+ return 0;
+#endif
+}
+
+static inline int pfn_valid(int pfn)
+{
+ int nid = pfn_to_nid(pfn);
+
+ if (nid >= 0)
+ return (pfn < node_end_pfn(nid));
+ return 0;
+}
+
+#define early_pfn_valid(pfn) pfn_valid((pfn))
+
+#endif /* CONFIG_DISCONTIGMEM */
+
+#endif /* _ASM_X86_MMZONE_32_H */
diff --git a/kernel/arch/x86/include/asm/mmzone_64.h b/kernel/arch/x86/include/asm/mmzone_64.h
new file mode 100644
index 000000000..129d9aa3c
--- /dev/null
+++ b/kernel/arch/x86/include/asm/mmzone_64.h
@@ -0,0 +1,17 @@
+/* K8 NUMA support */
+/* Copyright 2002,2003 by Andi Kleen, SuSE Labs */
+/* 2.5 Version loosely based on the NUMAQ Code by Pat Gaughen. */
+#ifndef _ASM_X86_MMZONE_64_H
+#define _ASM_X86_MMZONE_64_H
+
+#ifdef CONFIG_NUMA
+
+#include <linux/mmdebug.h>
+#include <asm/smp.h>
+
+extern struct pglist_data *node_data[];
+
+#define NODE_DATA(nid) (node_data[nid])
+
+#endif
+#endif /* _ASM_X86_MMZONE_64_H */
diff --git a/kernel/arch/x86/include/asm/module.h b/kernel/arch/x86/include/asm/module.h
new file mode 100644
index 000000000..e3b7819ca
--- /dev/null
+++ b/kernel/arch/x86/include/asm/module.h
@@ -0,0 +1,64 @@
+#ifndef _ASM_X86_MODULE_H
+#define _ASM_X86_MODULE_H
+
+#include <asm-generic/module.h>
+
+#ifdef CONFIG_X86_64
+/* X86_64 does not define MODULE_PROC_FAMILY */
+#elif defined CONFIG_M486
+#define MODULE_PROC_FAMILY "486 "
+#elif defined CONFIG_M586
+#define MODULE_PROC_FAMILY "586 "
+#elif defined CONFIG_M586TSC
+#define MODULE_PROC_FAMILY "586TSC "
+#elif defined CONFIG_M586MMX
+#define MODULE_PROC_FAMILY "586MMX "
+#elif defined CONFIG_MCORE2
+#define MODULE_PROC_FAMILY "CORE2 "
+#elif defined CONFIG_MATOM
+#define MODULE_PROC_FAMILY "ATOM "
+#elif defined CONFIG_M686
+#define MODULE_PROC_FAMILY "686 "
+#elif defined CONFIG_MPENTIUMII
+#define MODULE_PROC_FAMILY "PENTIUMII "
+#elif defined CONFIG_MPENTIUMIII
+#define MODULE_PROC_FAMILY "PENTIUMIII "
+#elif defined CONFIG_MPENTIUMM
+#define MODULE_PROC_FAMILY "PENTIUMM "
+#elif defined CONFIG_MPENTIUM4
+#define MODULE_PROC_FAMILY "PENTIUM4 "
+#elif defined CONFIG_MK6
+#define MODULE_PROC_FAMILY "K6 "
+#elif defined CONFIG_MK7
+#define MODULE_PROC_FAMILY "K7 "
+#elif defined CONFIG_MK8
+#define MODULE_PROC_FAMILY "K8 "
+#elif defined CONFIG_MELAN
+#define MODULE_PROC_FAMILY "ELAN "
+#elif defined CONFIG_MCRUSOE
+#define MODULE_PROC_FAMILY "CRUSOE "
+#elif defined CONFIG_MEFFICEON
+#define MODULE_PROC_FAMILY "EFFICEON "
+#elif defined CONFIG_MWINCHIPC6
+#define MODULE_PROC_FAMILY "WINCHIPC6 "
+#elif defined CONFIG_MWINCHIP3D
+#define MODULE_PROC_FAMILY "WINCHIP3D "
+#elif defined CONFIG_MCYRIXIII
+#define MODULE_PROC_FAMILY "CYRIXIII "
+#elif defined CONFIG_MVIAC3_2
+#define MODULE_PROC_FAMILY "VIAC3-2 "
+#elif defined CONFIG_MVIAC7
+#define MODULE_PROC_FAMILY "VIAC7 "
+#elif defined CONFIG_MGEODEGX1
+#define MODULE_PROC_FAMILY "GEODEGX1 "
+#elif defined CONFIG_MGEODE_LX
+#define MODULE_PROC_FAMILY "GEODE "
+#else
+#error unknown processor family
+#endif
+
+#ifdef CONFIG_X86_32
+# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
+#endif
+
+#endif /* _ASM_X86_MODULE_H */
diff --git a/kernel/arch/x86/include/asm/mpspec.h b/kernel/arch/x86/include/asm/mpspec.h
new file mode 100644
index 000000000..b07233b64
--- /dev/null
+++ b/kernel/arch/x86/include/asm/mpspec.h
@@ -0,0 +1,153 @@
+#ifndef _ASM_X86_MPSPEC_H
+#define _ASM_X86_MPSPEC_H
+
+
+#include <asm/mpspec_def.h>
+#include <asm/x86_init.h>
+#include <asm/apicdef.h>
+
+extern int apic_version[];
+extern int pic_mode;
+
+#ifdef CONFIG_X86_32
+
+/*
+ * Summit or generic (i.e. installer) kernels need lots of bus entries.
+ * Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets.
+ */
+#if CONFIG_BASE_SMALL == 0
+# define MAX_MP_BUSSES 260
+#else
+# define MAX_MP_BUSSES 32
+#endif
+
+#define MAX_IRQ_SOURCES 256
+
+extern unsigned int def_to_bigsmp;
+
+#else /* CONFIG_X86_64: */
+
+#define MAX_MP_BUSSES 256
+/* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */
+#define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4)
+
+#endif /* CONFIG_X86_64 */
+
+#ifdef CONFIG_EISA
+extern int mp_bus_id_to_type[MAX_MP_BUSSES];
+#endif
+
+extern DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
+
+extern unsigned int boot_cpu_physical_apicid;
+extern unsigned long mp_lapic_addr;
+
+#ifdef CONFIG_X86_LOCAL_APIC
+extern int smp_found_config;
+#else
+# define smp_found_config 0
+#endif
+
+static inline void get_smp_config(void)
+{
+ x86_init.mpparse.get_smp_config(0);
+}
+
+static inline void early_get_smp_config(void)
+{
+ x86_init.mpparse.get_smp_config(1);
+}
+
+static inline void find_smp_config(void)
+{
+ x86_init.mpparse.find_smp_config();
+}
+
+#ifdef CONFIG_X86_MPPARSE
+extern void early_reserve_e820_mpc_new(void);
+extern int enable_update_mptable;
+extern int default_mpc_apic_id(struct mpc_cpu *m);
+extern void default_smp_read_mpc_oem(struct mpc_table *mpc);
+# ifdef CONFIG_X86_IO_APIC
+extern void default_mpc_oem_bus_info(struct mpc_bus *m, char *str);
+# else
+# define default_mpc_oem_bus_info NULL
+# endif
+extern void default_find_smp_config(void);
+extern void default_get_smp_config(unsigned int early);
+#else
+static inline void early_reserve_e820_mpc_new(void) { }
+#define enable_update_mptable 0
+#define default_mpc_apic_id NULL
+#define default_smp_read_mpc_oem NULL
+#define default_mpc_oem_bus_info NULL
+#define default_find_smp_config x86_init_noop
+#define default_get_smp_config x86_init_uint_noop
+#endif
+
+int generic_processor_info(int apicid, int version);
+
+#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_LOCAL_APIC)
+
+struct physid_mask {
+ unsigned long mask[PHYSID_ARRAY_SIZE];
+};
+
+typedef struct physid_mask physid_mask_t;
+
+#define physid_set(physid, map) set_bit(physid, (map).mask)
+#define physid_clear(physid, map) clear_bit(physid, (map).mask)
+#define physid_isset(physid, map) test_bit(physid, (map).mask)
+#define physid_test_and_set(physid, map) \
+ test_and_set_bit(physid, (map).mask)
+
+#define physids_and(dst, src1, src2) \
+ bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_LOCAL_APIC)
+
+#define physids_or(dst, src1, src2) \
+ bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_LOCAL_APIC)
+
+#define physids_clear(map) \
+ bitmap_zero((map).mask, MAX_LOCAL_APIC)
+
+#define physids_complement(dst, src) \
+ bitmap_complement((dst).mask, (src).mask, MAX_LOCAL_APIC)
+
+#define physids_empty(map) \
+ bitmap_empty((map).mask, MAX_LOCAL_APIC)
+
+#define physids_equal(map1, map2) \
+ bitmap_equal((map1).mask, (map2).mask, MAX_LOCAL_APIC)
+
+#define physids_weight(map) \
+ bitmap_weight((map).mask, MAX_LOCAL_APIC)
+
+#define physids_shift_right(d, s, n) \
+ bitmap_shift_right((d).mask, (s).mask, n, MAX_LOCAL_APIC)
+
+#define physids_shift_left(d, s, n) \
+ bitmap_shift_left((d).mask, (s).mask, n, MAX_LOCAL_APIC)
+
+static inline unsigned long physids_coerce(physid_mask_t *map)
+{
+ return map->mask[0];
+}
+
+static inline void physids_promote(unsigned long physids, physid_mask_t *map)
+{
+ physids_clear(*map);
+ map->mask[0] = physids;
+}
+
+static inline void physid_set_mask_of_physid(int physid, physid_mask_t *map)
+{
+ physids_clear(*map);
+ physid_set(physid, *map);
+}
+
+#define PHYSID_MASK_ALL { {[0 ... PHYSID_ARRAY_SIZE-1] = ~0UL} }
+#define PHYSID_MASK_NONE { {[0 ... PHYSID_ARRAY_SIZE-1] = 0UL} }
+
+extern physid_mask_t phys_cpu_present_map;
+
+#endif /* _ASM_X86_MPSPEC_H */
diff --git a/kernel/arch/x86/include/asm/mpspec_def.h b/kernel/arch/x86/include/asm/mpspec_def.h
new file mode 100644
index 000000000..b31f8c098
--- /dev/null
+++ b/kernel/arch/x86/include/asm/mpspec_def.h
@@ -0,0 +1,173 @@
+#ifndef _ASM_X86_MPSPEC_DEF_H
+#define _ASM_X86_MPSPEC_DEF_H
+
+/*
+ * Structure definitions for SMP machines following the
+ * Intel Multiprocessing Specification 1.1 and 1.4.
+ */
+
+/*
+ * This tag identifies where the SMP configuration
+ * information is.
+ */
+
+#define SMP_MAGIC_IDENT (('_'<<24) | ('P'<<16) | ('M'<<8) | '_')
+
+#ifdef CONFIG_X86_32
+# define MAX_MPC_ENTRY 1024
+#endif
+
+/* Intel MP Floating Pointer Structure */
+struct mpf_intel {
+ char signature[4]; /* "_MP_" */
+ unsigned int physptr; /* Configuration table address */
+ unsigned char length; /* Our length (paragraphs) */
+ unsigned char specification; /* Specification version */
+ unsigned char checksum; /* Checksum (makes sum 0) */
+ unsigned char feature1; /* Standard or configuration ? */
+ unsigned char feature2; /* Bit7 set for IMCR|PIC */
+ unsigned char feature3; /* Unused (0) */
+ unsigned char feature4; /* Unused (0) */
+ unsigned char feature5; /* Unused (0) */
+};
+
+#define MPC_SIGNATURE "PCMP"
+
+struct mpc_table {
+ char signature[4];
+ unsigned short length; /* Size of table */
+ char spec; /* 0x01 */
+ char checksum;
+ char oem[8];
+ char productid[12];
+ unsigned int oemptr; /* 0 if not present */
+ unsigned short oemsize; /* 0 if not present */
+ unsigned short oemcount;
+ unsigned int lapic; /* APIC address */
+ unsigned int reserved;
+};
+
+/* Followed by entries */
+
+#define MP_PROCESSOR 0
+#define MP_BUS 1
+#define MP_IOAPIC 2
+#define MP_INTSRC 3
+#define MP_LINTSRC 4
+/* Used by IBM NUMA-Q to describe node locality */
+#define MP_TRANSLATION 192
+
+#define CPU_ENABLED 1 /* Processor is available */
+#define CPU_BOOTPROCESSOR 2 /* Processor is the BP */
+
+#define CPU_STEPPING_MASK 0x000F
+#define CPU_MODEL_MASK 0x00F0
+#define CPU_FAMILY_MASK 0x0F00
+
+struct mpc_cpu {
+ unsigned char type;
+ unsigned char apicid; /* Local APIC number */
+ unsigned char apicver; /* Its versions */
+ unsigned char cpuflag;
+ unsigned int cpufeature;
+ unsigned int featureflag; /* CPUID feature value */
+ unsigned int reserved[2];
+};
+
+struct mpc_bus {
+ unsigned char type;
+ unsigned char busid;
+ unsigned char bustype[6];
+};
+
+/* List of Bus Type string values, Intel MP Spec. */
+#define BUSTYPE_EISA "EISA"
+#define BUSTYPE_ISA "ISA"
+#define BUSTYPE_INTERN "INTERN" /* Internal BUS */
+#define BUSTYPE_MCA "MCA" /* Obsolete */
+#define BUSTYPE_VL "VL" /* Local bus */
+#define BUSTYPE_PCI "PCI"
+#define BUSTYPE_PCMCIA "PCMCIA"
+#define BUSTYPE_CBUS "CBUS"
+#define BUSTYPE_CBUSII "CBUSII"
+#define BUSTYPE_FUTURE "FUTURE"
+#define BUSTYPE_MBI "MBI"
+#define BUSTYPE_MBII "MBII"
+#define BUSTYPE_MPI "MPI"
+#define BUSTYPE_MPSA "MPSA"
+#define BUSTYPE_NUBUS "NUBUS"
+#define BUSTYPE_TC "TC"
+#define BUSTYPE_VME "VME"
+#define BUSTYPE_XPRESS "XPRESS"
+
+#define MPC_APIC_USABLE 0x01
+
+struct mpc_ioapic {
+ unsigned char type;
+ unsigned char apicid;
+ unsigned char apicver;
+ unsigned char flags;
+ unsigned int apicaddr;
+};
+
+struct mpc_intsrc {
+ unsigned char type;
+ unsigned char irqtype;
+ unsigned short irqflag;
+ unsigned char srcbus;
+ unsigned char srcbusirq;
+ unsigned char dstapic;
+ unsigned char dstirq;
+};
+
+enum mp_irq_source_types {
+ mp_INT = 0,
+ mp_NMI = 1,
+ mp_SMI = 2,
+ mp_ExtINT = 3
+};
+
+#define MP_IRQDIR_DEFAULT 0
+#define MP_IRQDIR_HIGH 1
+#define MP_IRQDIR_LOW 3
+
+#define MP_APIC_ALL 0xFF
+
+struct mpc_lintsrc {
+ unsigned char type;
+ unsigned char irqtype;
+ unsigned short irqflag;
+ unsigned char srcbusid;
+ unsigned char srcbusirq;
+ unsigned char destapic;
+ unsigned char destapiclint;
+};
+
+#define MPC_OEM_SIGNATURE "_OEM"
+
+struct mpc_oemtable {
+ char signature[4];
+ unsigned short length; /* Size of table */
+ char rev; /* 0x01 */
+ char checksum;
+ char mpc[8];
+};
+
+/*
+ * Default configurations
+ *
+ * 1 2 CPU ISA 82489DX
+ * 2 2 CPU EISA 82489DX neither IRQ 0 timer nor IRQ 13 DMA chaining
+ * 3 2 CPU EISA 82489DX
+ * 4 2 CPU MCA 82489DX
+ * 5 2 CPU ISA+PCI
+ * 6 2 CPU EISA+PCI
+ * 7 2 CPU MCA+PCI
+ */
+
+enum mp_bustype {
+ MP_BUS_ISA = 1,
+ MP_BUS_EISA,
+ MP_BUS_PCI,
+};
+#endif /* _ASM_X86_MPSPEC_DEF_H */
diff --git a/kernel/arch/x86/include/asm/mpx.h b/kernel/arch/x86/include/asm/mpx.h
new file mode 100644
index 000000000..a952a13d5
--- /dev/null
+++ b/kernel/arch/x86/include/asm/mpx.h
@@ -0,0 +1,103 @@
+#ifndef _ASM_X86_MPX_H
+#define _ASM_X86_MPX_H
+
+#include <linux/types.h>
+#include <asm/ptrace.h>
+#include <asm/insn.h>
+
+/*
+ * NULL is theoretically a valid place to put the bounds
+ * directory, so point this at an invalid address.
+ */
+#define MPX_INVALID_BOUNDS_DIR ((void __user *)-1)
+#define MPX_BNDCFG_ENABLE_FLAG 0x1
+#define MPX_BD_ENTRY_VALID_FLAG 0x1
+
+#ifdef CONFIG_X86_64
+
+/* upper 28 bits [47:20] of the virtual address in 64-bit used to
+ * index into bounds directory (BD).
+ */
+#define MPX_BD_ENTRY_OFFSET 28
+#define MPX_BD_ENTRY_SHIFT 3
+/* bits [19:3] of the virtual address in 64-bit used to index into
+ * bounds table (BT).
+ */
+#define MPX_BT_ENTRY_OFFSET 17
+#define MPX_BT_ENTRY_SHIFT 5
+#define MPX_IGN_BITS 3
+#define MPX_BD_ENTRY_TAIL 3
+
+#else
+
+#define MPX_BD_ENTRY_OFFSET 20
+#define MPX_BD_ENTRY_SHIFT 2
+#define MPX_BT_ENTRY_OFFSET 10
+#define MPX_BT_ENTRY_SHIFT 4
+#define MPX_IGN_BITS 2
+#define MPX_BD_ENTRY_TAIL 2
+
+#endif
+
+#define MPX_BD_SIZE_BYTES (1UL<<(MPX_BD_ENTRY_OFFSET+MPX_BD_ENTRY_SHIFT))
+#define MPX_BT_SIZE_BYTES (1UL<<(MPX_BT_ENTRY_OFFSET+MPX_BT_ENTRY_SHIFT))
+
+#define MPX_BNDSTA_TAIL 2
+#define MPX_BNDCFG_TAIL 12
+#define MPX_BNDSTA_ADDR_MASK (~((1UL<<MPX_BNDSTA_TAIL)-1))
+#define MPX_BNDCFG_ADDR_MASK (~((1UL<<MPX_BNDCFG_TAIL)-1))
+#define MPX_BT_ADDR_MASK (~((1UL<<MPX_BD_ENTRY_TAIL)-1))
+
+#define MPX_BNDCFG_ADDR_MASK (~((1UL<<MPX_BNDCFG_TAIL)-1))
+#define MPX_BNDSTA_ERROR_CODE 0x3
+
+#define MPX_BD_ENTRY_MASK ((1<<MPX_BD_ENTRY_OFFSET)-1)
+#define MPX_BT_ENTRY_MASK ((1<<MPX_BT_ENTRY_OFFSET)-1)
+#define MPX_GET_BD_ENTRY_OFFSET(addr) ((((addr)>>(MPX_BT_ENTRY_OFFSET+ \
+ MPX_IGN_BITS)) & MPX_BD_ENTRY_MASK) << MPX_BD_ENTRY_SHIFT)
+#define MPX_GET_BT_ENTRY_OFFSET(addr) ((((addr)>>MPX_IGN_BITS) & \
+ MPX_BT_ENTRY_MASK) << MPX_BT_ENTRY_SHIFT)
+
+#ifdef CONFIG_X86_INTEL_MPX
+siginfo_t *mpx_generate_siginfo(struct pt_regs *regs,
+ struct xsave_struct *xsave_buf);
+int mpx_handle_bd_fault(struct xsave_struct *xsave_buf);
+static inline int kernel_managing_mpx_tables(struct mm_struct *mm)
+{
+ return (mm->bd_addr != MPX_INVALID_BOUNDS_DIR);
+}
+static inline void mpx_mm_init(struct mm_struct *mm)
+{
+ /*
+ * NULL is theoretically a valid place to put the bounds
+ * directory, so point this at an invalid address.
+ */
+ mm->bd_addr = MPX_INVALID_BOUNDS_DIR;
+}
+void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+#else
+static inline siginfo_t *mpx_generate_siginfo(struct pt_regs *regs,
+ struct xsave_struct *xsave_buf)
+{
+ return NULL;
+}
+static inline int mpx_handle_bd_fault(struct xsave_struct *xsave_buf)
+{
+ return -EINVAL;
+}
+static inline int kernel_managing_mpx_tables(struct mm_struct *mm)
+{
+ return 0;
+}
+static inline void mpx_mm_init(struct mm_struct *mm)
+{
+}
+static inline void mpx_notify_unmap(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+}
+#endif /* CONFIG_X86_INTEL_MPX */
+
+#endif /* _ASM_X86_MPX_H */
diff --git a/kernel/arch/x86/include/asm/mshyperv.h b/kernel/arch/x86/include/asm/mshyperv.h
new file mode 100644
index 000000000..c163215ab
--- /dev/null
+++ b/kernel/arch/x86/include/asm/mshyperv.h
@@ -0,0 +1,23 @@
+#ifndef _ASM_X86_MSHYPER_H
+#define _ASM_X86_MSHYPER_H
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <asm/hyperv.h>
+
+struct ms_hyperv_info {
+ u32 features;
+ u32 hints;
+};
+
+extern struct ms_hyperv_info ms_hyperv;
+
+void hyperv_callback_vector(void);
+#ifdef CONFIG_TRACING
+#define trace_hyperv_callback_vector hyperv_callback_vector
+#endif
+void hyperv_vector_handler(struct pt_regs *regs);
+void hv_setup_vmbus_irq(void (*handler)(void));
+void hv_remove_vmbus_irq(void);
+
+#endif
diff --git a/kernel/arch/x86/include/asm/msidef.h b/kernel/arch/x86/include/asm/msidef.h
new file mode 100644
index 000000000..4cc48af23
--- /dev/null
+++ b/kernel/arch/x86/include/asm/msidef.h
@@ -0,0 +1,56 @@
+#ifndef _ASM_X86_MSIDEF_H
+#define _ASM_X86_MSIDEF_H
+
+/*
+ * Constants for Intel APIC based MSI messages.
+ */
+
+/*
+ * Shifts for MSI data
+ */
+
+#define MSI_DATA_VECTOR_SHIFT 0
+#define MSI_DATA_VECTOR_MASK 0x000000ff
+#define MSI_DATA_VECTOR(v) (((v) << MSI_DATA_VECTOR_SHIFT) & \
+ MSI_DATA_VECTOR_MASK)
+
+#define MSI_DATA_DELIVERY_MODE_SHIFT 8
+#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_MODE_SHIFT)
+#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_MODE_SHIFT)
+
+#define MSI_DATA_LEVEL_SHIFT 14
+#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT)
+#define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT)
+
+#define MSI_DATA_TRIGGER_SHIFT 15
+#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT)
+#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT)
+
+/*
+ * Shift/mask fields for msi address
+ */
+
+#define MSI_ADDR_BASE_HI 0
+#define MSI_ADDR_BASE_LO 0xfee00000
+
+#define MSI_ADDR_DEST_MODE_SHIFT 2
+#define MSI_ADDR_DEST_MODE_PHYSICAL (0 << MSI_ADDR_DEST_MODE_SHIFT)
+#define MSI_ADDR_DEST_MODE_LOGICAL (1 << MSI_ADDR_DEST_MODE_SHIFT)
+
+#define MSI_ADDR_REDIRECTION_SHIFT 3
+#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT)
+ /* dedicated cpu */
+#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT)
+ /* lowest priority */
+
+#define MSI_ADDR_DEST_ID_SHIFT 12
+#define MSI_ADDR_DEST_ID_MASK 0x00ffff0
+#define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & \
+ MSI_ADDR_DEST_ID_MASK)
+#define MSI_ADDR_EXT_DEST_ID(dest) ((dest) & 0xffffff00)
+
+#define MSI_ADDR_IR_EXT_INT (1 << 4)
+#define MSI_ADDR_IR_SHV (1 << 3)
+#define MSI_ADDR_IR_INDEX1(index) ((index & 0x8000) >> 13)
+#define MSI_ADDR_IR_INDEX2(index) ((index & 0x7fff) << 5)
+#endif /* _ASM_X86_MSIDEF_H */
diff --git a/kernel/arch/x86/include/asm/msr.h b/kernel/arch/x86/include/asm/msr.h
new file mode 100644
index 000000000..de36f22eb
--- /dev/null
+++ b/kernel/arch/x86/include/asm/msr.h
@@ -0,0 +1,291 @@
+#ifndef _ASM_X86_MSR_H
+#define _ASM_X86_MSR_H
+
+#include <uapi/asm/msr.h>
+
+#ifndef __ASSEMBLY__
+
+#include <asm/asm.h>
+#include <asm/errno.h>
+#include <asm/cpumask.h>
+
+struct msr {
+ union {
+ struct {
+ u32 l;
+ u32 h;
+ };
+ u64 q;
+ };
+};
+
+struct msr_info {
+ u32 msr_no;
+ struct msr reg;
+ struct msr *msrs;
+ int err;
+};
+
+struct msr_regs_info {
+ u32 *regs;
+ int err;
+};
+
+static inline unsigned long long native_read_tscp(unsigned int *aux)
+{
+ unsigned long low, high;
+ asm volatile(".byte 0x0f,0x01,0xf9"
+ : "=a" (low), "=d" (high), "=c" (*aux));
+ return low | ((u64)high << 32);
+}
+
+/*
+ * both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A"
+ * constraint has different meanings. For i386, "A" means exactly
+ * edx:eax, while for x86_64 it doesn't mean rdx:rax or edx:eax. Instead,
+ * it means rax *or* rdx.
+ */
+#ifdef CONFIG_X86_64
+#define DECLARE_ARGS(val, low, high) unsigned low, high
+#define EAX_EDX_VAL(val, low, high) ((low) | ((u64)(high) << 32))
+#define EAX_EDX_ARGS(val, low, high) "a" (low), "d" (high)
+#define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high)
+#else
+#define DECLARE_ARGS(val, low, high) unsigned long long val
+#define EAX_EDX_VAL(val, low, high) (val)
+#define EAX_EDX_ARGS(val, low, high) "A" (val)
+#define EAX_EDX_RET(val, low, high) "=A" (val)
+#endif
+
+static inline unsigned long long native_read_msr(unsigned int msr)
+{
+ DECLARE_ARGS(val, low, high);
+
+ asm volatile("rdmsr" : EAX_EDX_RET(val, low, high) : "c" (msr));
+ return EAX_EDX_VAL(val, low, high);
+}
+
+static inline unsigned long long native_read_msr_safe(unsigned int msr,
+ int *err)
+{
+ DECLARE_ARGS(val, low, high);
+
+ asm volatile("2: rdmsr ; xor %[err],%[err]\n"
+ "1:\n\t"
+ ".section .fixup,\"ax\"\n\t"
+ "3: mov %[fault],%[err] ; jmp 1b\n\t"
+ ".previous\n\t"
+ _ASM_EXTABLE(2b, 3b)
+ : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
+ : "c" (msr), [fault] "i" (-EIO));
+ return EAX_EDX_VAL(val, low, high);
+}
+
+static inline void native_write_msr(unsigned int msr,
+ unsigned low, unsigned high)
+{
+ asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory");
+}
+
+/* Can be uninlined because referenced by paravirt */
+notrace static inline int native_write_msr_safe(unsigned int msr,
+ unsigned low, unsigned high)
+{
+ int err;
+ asm volatile("2: wrmsr ; xor %[err],%[err]\n"
+ "1:\n\t"
+ ".section .fixup,\"ax\"\n\t"
+ "3: mov %[fault],%[err] ; jmp 1b\n\t"
+ ".previous\n\t"
+ _ASM_EXTABLE(2b, 3b)
+ : [err] "=a" (err)
+ : "c" (msr), "0" (low), "d" (high),
+ [fault] "i" (-EIO)
+ : "memory");
+ return err;
+}
+
+extern unsigned long long native_read_tsc(void);
+
+extern int rdmsr_safe_regs(u32 regs[8]);
+extern int wrmsr_safe_regs(u32 regs[8]);
+
+static __always_inline unsigned long long __native_read_tsc(void)
+{
+ DECLARE_ARGS(val, low, high);
+
+ asm volatile("rdtsc" : EAX_EDX_RET(val, low, high));
+
+ return EAX_EDX_VAL(val, low, high);
+}
+
+static inline unsigned long long native_read_pmc(int counter)
+{
+ DECLARE_ARGS(val, low, high);
+
+ asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter));
+ return EAX_EDX_VAL(val, low, high);
+}
+
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+#include <linux/errno.h>
+/*
+ * Access to machine-specific registers (available on 586 and better only)
+ * Note: the rd* operations modify the parameters directly (without using
+ * pointer indirection), this allows gcc to optimize better
+ */
+
+#define rdmsr(msr, low, high) \
+do { \
+ u64 __val = native_read_msr((msr)); \
+ (void)((low) = (u32)__val); \
+ (void)((high) = (u32)(__val >> 32)); \
+} while (0)
+
+static inline void wrmsr(unsigned msr, unsigned low, unsigned high)
+{
+ native_write_msr(msr, low, high);
+}
+
+#define rdmsrl(msr, val) \
+ ((val) = native_read_msr((msr)))
+
+#define wrmsrl(msr, val) \
+ native_write_msr((msr), (u32)((u64)(val)), (u32)((u64)(val) >> 32))
+
+/* wrmsr with exception handling */
+static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
+{
+ return native_write_msr_safe(msr, low, high);
+}
+
+/* rdmsr with exception handling */
+#define rdmsr_safe(msr, low, high) \
+({ \
+ int __err; \
+ u64 __val = native_read_msr_safe((msr), &__err); \
+ (*low) = (u32)__val; \
+ (*high) = (u32)(__val >> 32); \
+ __err; \
+})
+
+static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
+{
+ int err;
+
+ *p = native_read_msr_safe(msr, &err);
+ return err;
+}
+
+#define rdtscl(low) \
+ ((low) = (u32)__native_read_tsc())
+
+#define rdtscll(val) \
+ ((val) = __native_read_tsc())
+
+#define rdpmc(counter, low, high) \
+do { \
+ u64 _l = native_read_pmc((counter)); \
+ (low) = (u32)_l; \
+ (high) = (u32)(_l >> 32); \
+} while (0)
+
+#define rdpmcl(counter, val) ((val) = native_read_pmc(counter))
+
+#define rdtscp(low, high, aux) \
+do { \
+ unsigned long long _val = native_read_tscp(&(aux)); \
+ (low) = (u32)_val; \
+ (high) = (u32)(_val >> 32); \
+} while (0)
+
+#define rdtscpll(val, aux) (val) = native_read_tscp(&(aux))
+
+#endif /* !CONFIG_PARAVIRT */
+
+#define wrmsrl_safe(msr, val) wrmsr_safe((msr), (u32)(val), \
+ (u32)((val) >> 32))
+
+#define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high))
+
+#define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0)
+
+struct msr *msrs_alloc(void);
+void msrs_free(struct msr *msrs);
+int msr_set_bit(u32 msr, u8 bit);
+int msr_clear_bit(u32 msr, u8 bit);
+
+#ifdef CONFIG_SMP
+int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
+int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
+int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
+int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
+void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
+void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
+int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
+int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
+int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
+int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
+int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
+int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
+#else /* CONFIG_SMP */
+static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
+{
+ rdmsr(msr_no, *l, *h);
+ return 0;
+}
+static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
+{
+ wrmsr(msr_no, l, h);
+ return 0;
+}
+static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
+{
+ rdmsrl(msr_no, *q);
+ return 0;
+}
+static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
+{
+ wrmsrl(msr_no, q);
+ return 0;
+}
+static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no,
+ struct msr *msrs)
+{
+ rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h));
+}
+static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no,
+ struct msr *msrs)
+{
+ wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h);
+}
+static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
+ u32 *l, u32 *h)
+{
+ return rdmsr_safe(msr_no, l, h);
+}
+static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
+{
+ return wrmsr_safe(msr_no, l, h);
+}
+static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
+{
+ return rdmsrl_safe(msr_no, q);
+}
+static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
+{
+ return wrmsrl_safe(msr_no, q);
+}
+static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
+{
+ return rdmsr_safe_regs(regs);
+}
+static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
+{
+ return wrmsr_safe_regs(regs);
+}
+#endif /* CONFIG_SMP */
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_X86_MSR_H */
diff --git a/kernel/arch/x86/include/asm/mtrr.h b/kernel/arch/x86/include/asm/mtrr.h
new file mode 100644
index 000000000..f768f6298
--- /dev/null
+++ b/kernel/arch/x86/include/asm/mtrr.h
@@ -0,0 +1,130 @@
+/* Generic MTRR (Memory Type Range Register) ioctls.
+
+ Copyright (C) 1997-1999 Richard Gooch
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Library General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Library General Public License for more details.
+
+ You should have received a copy of the GNU Library General Public
+ License along with this library; if not, write to the Free
+ Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+ Richard Gooch may be reached by email at rgooch@atnf.csiro.au
+ The postal address is:
+ Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
+*/
+#ifndef _ASM_X86_MTRR_H
+#define _ASM_X86_MTRR_H
+
+#include <uapi/asm/mtrr.h>
+
+
+/*
+ * The following functions are for use by other drivers that cannot use
+ * arch_phys_wc_add and arch_phys_wc_del.
+ */
+# ifdef CONFIG_MTRR
+extern u8 mtrr_type_lookup(u64 addr, u64 end);
+extern void mtrr_save_fixed_ranges(void *);
+extern void mtrr_save_state(void);
+extern int mtrr_add(unsigned long base, unsigned long size,
+ unsigned int type, bool increment);
+extern int mtrr_add_page(unsigned long base, unsigned long size,
+ unsigned int type, bool increment);
+extern int mtrr_del(int reg, unsigned long base, unsigned long size);
+extern int mtrr_del_page(int reg, unsigned long base, unsigned long size);
+extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi);
+extern void mtrr_ap_init(void);
+extern void mtrr_bp_init(void);
+extern void set_mtrr_aps_delayed_init(void);
+extern void mtrr_aps_init(void);
+extern void mtrr_bp_restore(void);
+extern int mtrr_trim_uncached_memory(unsigned long end_pfn);
+extern int amd_special_default_mtrr(void);
+extern int phys_wc_to_mtrr_index(int handle);
+# else
+static inline u8 mtrr_type_lookup(u64 addr, u64 end)
+{
+ /*
+ * Return no-MTRRs:
+ */
+ return 0xff;
+}
+#define mtrr_save_fixed_ranges(arg) do {} while (0)
+#define mtrr_save_state() do {} while (0)
+static inline int mtrr_add(unsigned long base, unsigned long size,
+ unsigned int type, bool increment)
+{
+ return -ENODEV;
+}
+static inline int mtrr_add_page(unsigned long base, unsigned long size,
+ unsigned int type, bool increment)
+{
+ return -ENODEV;
+}
+static inline int mtrr_del(int reg, unsigned long base, unsigned long size)
+{
+ return -ENODEV;
+}
+static inline int mtrr_del_page(int reg, unsigned long base, unsigned long size)
+{
+ return -ENODEV;
+}
+static inline int mtrr_trim_uncached_memory(unsigned long end_pfn)
+{
+ return 0;
+}
+static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
+{
+}
+static inline int phys_wc_to_mtrr_index(int handle)
+{
+ return -1;
+}
+
+#define mtrr_ap_init() do {} while (0)
+#define mtrr_bp_init() do {} while (0)
+#define set_mtrr_aps_delayed_init() do {} while (0)
+#define mtrr_aps_init() do {} while (0)
+#define mtrr_bp_restore() do {} while (0)
+# endif
+
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+
+struct mtrr_sentry32 {
+ compat_ulong_t base; /* Base address */
+ compat_uint_t size; /* Size of region */
+ compat_uint_t type; /* Type of region */
+};
+
+struct mtrr_gentry32 {
+ compat_ulong_t regnum; /* Register number */
+ compat_uint_t base; /* Base address */
+ compat_uint_t size; /* Size of region */
+ compat_uint_t type; /* Type of region */
+};
+
+#define MTRR_IOCTL_BASE 'M'
+
+#define MTRRIOC32_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry32)
+#define MTRRIOC32_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry32)
+#define MTRRIOC32_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry32)
+#define MTRRIOC32_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry32)
+#define MTRRIOC32_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry32)
+#define MTRRIOC32_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry32)
+#define MTRRIOC32_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry32)
+#define MTRRIOC32_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry32)
+#define MTRRIOC32_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry32)
+#define MTRRIOC32_KILL_PAGE_ENTRY \
+ _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry32)
+#endif /* CONFIG_COMPAT */
+
+#endif /* _ASM_X86_MTRR_H */
diff --git a/kernel/arch/x86/include/asm/mutex.h b/kernel/arch/x86/include/asm/mutex.h
new file mode 100644
index 000000000..7d3a48275
--- /dev/null
+++ b/kernel/arch/x86/include/asm/mutex.h
@@ -0,0 +1,5 @@
+#ifdef CONFIG_X86_32
+# include <asm/mutex_32.h>
+#else
+# include <asm/mutex_64.h>
+#endif
diff --git a/kernel/arch/x86/include/asm/mutex_32.h b/kernel/arch/x86/include/asm/mutex_32.h
new file mode 100644
index 000000000..85e6cda45
--- /dev/null
+++ b/kernel/arch/x86/include/asm/mutex_32.h
@@ -0,0 +1,110 @@
+/*
+ * Assembly implementation of the mutex fastpath, based on atomic
+ * decrement/increment.
+ *
+ * started by Ingo Molnar:
+ *
+ * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ */
+#ifndef _ASM_X86_MUTEX_32_H
+#define _ASM_X86_MUTEX_32_H
+
+#include <asm/alternative.h>
+
+/**
+ * __mutex_fastpath_lock - try to take the lock by moving the count
+ * from 1 to a 0 value
+ * @count: pointer of type atomic_t
+ * @fn: function to call if the original value was not 1
+ *
+ * Change the count from 1 to a value lower than 1, and call <fn> if it
+ * wasn't 1 originally. This function MUST leave the value lower than 1
+ * even when the "1" assertion wasn't true.
+ */
+#define __mutex_fastpath_lock(count, fail_fn) \
+do { \
+ unsigned int dummy; \
+ \
+ typecheck(atomic_t *, count); \
+ typecheck_fn(void (*)(atomic_t *), fail_fn); \
+ \
+ asm volatile(LOCK_PREFIX " decl (%%eax)\n" \
+ " jns 1f \n" \
+ " call " #fail_fn "\n" \
+ "1:\n" \
+ : "=a" (dummy) \
+ : "a" (count) \
+ : "memory", "ecx", "edx"); \
+} while (0)
+
+
+/**
+ * __mutex_fastpath_lock_retval - try to take the lock by moving the count
+ * from 1 to a 0 value
+ * @count: pointer of type atomic_t
+ *
+ * Change the count from 1 to a value lower than 1. This function returns 0
+ * if the fastpath succeeds, or -1 otherwise.
+ */
+static inline int __mutex_fastpath_lock_retval(atomic_t *count)
+{
+ if (unlikely(atomic_dec_return(count) < 0))
+ return -1;
+ else
+ return 0;
+}
+
+/**
+ * __mutex_fastpath_unlock - try to promote the mutex from 0 to 1
+ * @count: pointer of type atomic_t
+ * @fail_fn: function to call if the original value was not 0
+ *
+ * try to promote the mutex from 0 to 1. if it wasn't 0, call <fail_fn>.
+ * In the failure case, this function is allowed to either set the value
+ * to 1, or to set it to a value lower than 1.
+ *
+ * If the implementation sets it to a value of lower than 1, the
+ * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
+ * to return 0 otherwise.
+ */
+#define __mutex_fastpath_unlock(count, fail_fn) \
+do { \
+ unsigned int dummy; \
+ \
+ typecheck(atomic_t *, count); \
+ typecheck_fn(void (*)(atomic_t *), fail_fn); \
+ \
+ asm volatile(LOCK_PREFIX " incl (%%eax)\n" \
+ " jg 1f\n" \
+ " call " #fail_fn "\n" \
+ "1:\n" \
+ : "=a" (dummy) \
+ : "a" (count) \
+ : "memory", "ecx", "edx"); \
+} while (0)
+
+#define __mutex_slowpath_needs_to_unlock() 1
+
+/**
+ * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
+ *
+ * @count: pointer of type atomic_t
+ * @fail_fn: fallback function
+ *
+ * Change the count from 1 to a value lower than 1, and return 0 (failure)
+ * if it wasn't 1 originally, or return 1 (success) otherwise. This function
+ * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
+ * Additionally, if the value was < 0 originally, this function must not leave
+ * it to 0 on failure.
+ */
+static inline int __mutex_fastpath_trylock(atomic_t *count,
+ int (*fail_fn)(atomic_t *))
+{
+ /* cmpxchg because it never induces a false contention state. */
+ if (likely(atomic_cmpxchg(count, 1, 0) == 1))
+ return 1;
+
+ return 0;
+}
+
+#endif /* _ASM_X86_MUTEX_32_H */
diff --git a/kernel/arch/x86/include/asm/mutex_64.h b/kernel/arch/x86/include/asm/mutex_64.h
new file mode 100644
index 000000000..07537a442
--- /dev/null
+++ b/kernel/arch/x86/include/asm/mutex_64.h
@@ -0,0 +1,127 @@
+/*
+ * Assembly implementation of the mutex fastpath, based on atomic
+ * decrement/increment.
+ *
+ * started by Ingo Molnar:
+ *
+ * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ */
+#ifndef _ASM_X86_MUTEX_64_H
+#define _ASM_X86_MUTEX_64_H
+
+/**
+ * __mutex_fastpath_lock - decrement and call function if negative
+ * @v: pointer of type atomic_t
+ * @fail_fn: function to call if the result is negative
+ *
+ * Atomically decrements @v and calls <fail_fn> if the result is negative.
+ */
+#ifdef CC_HAVE_ASM_GOTO
+static inline void __mutex_fastpath_lock(atomic_t *v,
+ void (*fail_fn)(atomic_t *))
+{
+ asm_volatile_goto(LOCK_PREFIX " decl %0\n"
+ " jns %l[exit]\n"
+ : : "m" (v->counter)
+ : "memory", "cc"
+ : exit);
+ fail_fn(v);
+exit:
+ return;
+}
+#else
+#define __mutex_fastpath_lock(v, fail_fn) \
+do { \
+ unsigned long dummy; \
+ \
+ typecheck(atomic_t *, v); \
+ typecheck_fn(void (*)(atomic_t *), fail_fn); \
+ \
+ asm volatile(LOCK_PREFIX " decl (%%rdi)\n" \
+ " jns 1f \n" \
+ " call " #fail_fn "\n" \
+ "1:" \
+ : "=D" (dummy) \
+ : "D" (v) \
+ : "rax", "rsi", "rdx", "rcx", \
+ "r8", "r9", "r10", "r11", "memory"); \
+} while (0)
+#endif
+
+/**
+ * __mutex_fastpath_lock_retval - try to take the lock by moving the count
+ * from 1 to a 0 value
+ * @count: pointer of type atomic_t
+ *
+ * Change the count from 1 to a value lower than 1. This function returns 0
+ * if the fastpath succeeds, or -1 otherwise.
+ */
+static inline int __mutex_fastpath_lock_retval(atomic_t *count)
+{
+ if (unlikely(atomic_dec_return(count) < 0))
+ return -1;
+ else
+ return 0;
+}
+
+/**
+ * __mutex_fastpath_unlock - increment and call function if nonpositive
+ * @v: pointer of type atomic_t
+ * @fail_fn: function to call if the result is nonpositive
+ *
+ * Atomically increments @v and calls <fail_fn> if the result is nonpositive.
+ */
+#ifdef CC_HAVE_ASM_GOTO
+static inline void __mutex_fastpath_unlock(atomic_t *v,
+ void (*fail_fn)(atomic_t *))
+{
+ asm_volatile_goto(LOCK_PREFIX " incl %0\n"
+ " jg %l[exit]\n"
+ : : "m" (v->counter)
+ : "memory", "cc"
+ : exit);
+ fail_fn(v);
+exit:
+ return;
+}
+#else
+#define __mutex_fastpath_unlock(v, fail_fn) \
+do { \
+ unsigned long dummy; \
+ \
+ typecheck(atomic_t *, v); \
+ typecheck_fn(void (*)(atomic_t *), fail_fn); \
+ \
+ asm volatile(LOCK_PREFIX " incl (%%rdi)\n" \
+ " jg 1f\n" \
+ " call " #fail_fn "\n" \
+ "1:" \
+ : "=D" (dummy) \
+ : "D" (v) \
+ : "rax", "rsi", "rdx", "rcx", \
+ "r8", "r9", "r10", "r11", "memory"); \
+} while (0)
+#endif
+
+#define __mutex_slowpath_needs_to_unlock() 1
+
+/**
+ * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
+ *
+ * @count: pointer of type atomic_t
+ * @fail_fn: fallback function
+ *
+ * Change the count from 1 to 0 and return 1 (success), or return 0 (failure)
+ * if it wasn't 1 originally. [the fallback function is never used on
+ * x86_64, because all x86_64 CPUs have a CMPXCHG instruction.]
+ */
+static inline int __mutex_fastpath_trylock(atomic_t *count,
+ int (*fail_fn)(atomic_t *))
+{
+ if (likely(atomic_cmpxchg(count, 1, 0) == 1))
+ return 1;
+ else
+ return 0;
+}
+
+#endif /* _ASM_X86_MUTEX_64_H */
diff --git a/kernel/arch/x86/include/asm/mwait.h b/kernel/arch/x86/include/asm/mwait.h
new file mode 100644
index 000000000..653dfa766
--- /dev/null
+++ b/kernel/arch/x86/include/asm/mwait.h
@@ -0,0 +1,67 @@
+#ifndef _ASM_X86_MWAIT_H
+#define _ASM_X86_MWAIT_H
+
+#include <linux/sched.h>
+
+#define MWAIT_SUBSTATE_MASK 0xf
+#define MWAIT_CSTATE_MASK 0xf
+#define MWAIT_SUBSTATE_SIZE 4
+#define MWAIT_HINT2CSTATE(hint) (((hint) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK)
+#define MWAIT_HINT2SUBSTATE(hint) ((hint) & MWAIT_CSTATE_MASK)
+
+#define CPUID_MWAIT_LEAF 5
+#define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1
+#define CPUID5_ECX_INTERRUPT_BREAK 0x2
+
+#define MWAIT_ECX_INTERRUPT_BREAK 0x1
+
+static inline void __monitor(const void *eax, unsigned long ecx,
+ unsigned long edx)
+{
+ /* "monitor %eax, %ecx, %edx;" */
+ asm volatile(".byte 0x0f, 0x01, 0xc8;"
+ :: "a" (eax), "c" (ecx), "d"(edx));
+}
+
+static inline void __mwait(unsigned long eax, unsigned long ecx)
+{
+ /* "mwait %eax, %ecx;" */
+ asm volatile(".byte 0x0f, 0x01, 0xc9;"
+ :: "a" (eax), "c" (ecx));
+}
+
+static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
+{
+ trace_hardirqs_on();
+ /* "mwait %eax, %ecx;" */
+ asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
+ :: "a" (eax), "c" (ecx));
+}
+
+/*
+ * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
+ * which can obviate IPI to trigger checking of need_resched.
+ * We execute MONITOR against need_resched and enter optimized wait state
+ * through MWAIT. Whenever someone changes need_resched, we would be woken
+ * up from MWAIT (without an IPI).
+ *
+ * New with Core Duo processors, MWAIT can take some hints based on CPU
+ * capability.
+ */
+static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
+{
+ if (!current_set_polling_and_test()) {
+ if (static_cpu_has_bug(X86_BUG_CLFLUSH_MONITOR)) {
+ mb();
+ clflush((void *)&current_thread_info()->flags);
+ mb();
+ }
+
+ __monitor((void *)&current_thread_info()->flags, 0, 0);
+ if (!need_resched())
+ __mwait(eax, ecx);
+ }
+ current_clr_polling();
+}
+
+#endif /* _ASM_X86_MWAIT_H */
diff --git a/kernel/arch/x86/include/asm/nmi.h b/kernel/arch/x86/include/asm/nmi.h
new file mode 100644
index 000000000..5f2fc4441
--- /dev/null
+++ b/kernel/arch/x86/include/asm/nmi.h
@@ -0,0 +1,66 @@
+#ifndef _ASM_X86_NMI_H
+#define _ASM_X86_NMI_H
+
+#include <linux/irq_work.h>
+#include <linux/pm.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+
+#ifdef CONFIG_X86_LOCAL_APIC
+
+extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
+extern int reserve_perfctr_nmi(unsigned int);
+extern void release_perfctr_nmi(unsigned int);
+extern int reserve_evntsel_nmi(unsigned int);
+extern void release_evntsel_nmi(unsigned int);
+
+struct ctl_table;
+extern int proc_nmi_enabled(struct ctl_table *, int ,
+ void __user *, size_t *, loff_t *);
+extern int unknown_nmi_panic;
+
+#endif /* CONFIG_X86_LOCAL_APIC */
+
+#define NMI_FLAG_FIRST 1
+
+enum {
+ NMI_LOCAL=0,
+ NMI_UNKNOWN,
+ NMI_SERR,
+ NMI_IO_CHECK,
+ NMI_MAX
+};
+
+#define NMI_DONE 0
+#define NMI_HANDLED 1
+
+typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *);
+
+struct nmiaction {
+ struct list_head list;
+ nmi_handler_t handler;
+ u64 max_duration;
+ struct irq_work irq_work;
+ unsigned long flags;
+ const char *name;
+};
+
+#define register_nmi_handler(t, fn, fg, n, init...) \
+({ \
+ static struct nmiaction init fn##_na = { \
+ .handler = (fn), \
+ .name = (n), \
+ .flags = (fg), \
+ }; \
+ __register_nmi_handler((t), &fn##_na); \
+})
+
+int __register_nmi_handler(unsigned int, struct nmiaction *);
+
+void unregister_nmi_handler(unsigned int, const char *);
+
+void stop_nmi(void);
+void restart_nmi(void);
+void local_touch_nmi(void);
+
+#endif /* _ASM_X86_NMI_H */
diff --git a/kernel/arch/x86/include/asm/nops.h b/kernel/arch/x86/include/asm/nops.h
new file mode 100644
index 000000000..aff2b3356
--- /dev/null
+++ b/kernel/arch/x86/include/asm/nops.h
@@ -0,0 +1,146 @@
+#ifndef _ASM_X86_NOPS_H
+#define _ASM_X86_NOPS_H
+
+/*
+ * Define nops for use with alternative() and for tracing.
+ *
+ * *_NOP5_ATOMIC must be a single instruction.
+ */
+
+#define NOP_DS_PREFIX 0x3e
+
+/* generic versions from gas
+ 1: nop
+ the following instructions are NOT nops in 64-bit mode,
+ for 64-bit mode use K8 or P6 nops instead
+ 2: movl %esi,%esi
+ 3: leal 0x00(%esi),%esi
+ 4: leal 0x00(,%esi,1),%esi
+ 6: leal 0x00000000(%esi),%esi
+ 7: leal 0x00000000(,%esi,1),%esi
+*/
+#define GENERIC_NOP1 0x90
+#define GENERIC_NOP2 0x89,0xf6
+#define GENERIC_NOP3 0x8d,0x76,0x00
+#define GENERIC_NOP4 0x8d,0x74,0x26,0x00
+#define GENERIC_NOP5 GENERIC_NOP1,GENERIC_NOP4
+#define GENERIC_NOP6 0x8d,0xb6,0x00,0x00,0x00,0x00
+#define GENERIC_NOP7 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00
+#define GENERIC_NOP8 GENERIC_NOP1,GENERIC_NOP7
+#define GENERIC_NOP5_ATOMIC NOP_DS_PREFIX,GENERIC_NOP4
+
+/* Opteron 64bit nops
+ 1: nop
+ 2: osp nop
+ 3: osp osp nop
+ 4: osp osp osp nop
+*/
+#define K8_NOP1 GENERIC_NOP1
+#define K8_NOP2 0x66,K8_NOP1
+#define K8_NOP3 0x66,K8_NOP2
+#define K8_NOP4 0x66,K8_NOP3
+#define K8_NOP5 K8_NOP3,K8_NOP2
+#define K8_NOP6 K8_NOP3,K8_NOP3
+#define K8_NOP7 K8_NOP4,K8_NOP3
+#define K8_NOP8 K8_NOP4,K8_NOP4
+#define K8_NOP5_ATOMIC 0x66,K8_NOP4
+
+/* K7 nops
+ uses eax dependencies (arbitrary choice)
+ 1: nop
+ 2: movl %eax,%eax
+ 3: leal (,%eax,1),%eax
+ 4: leal 0x00(,%eax,1),%eax
+ 6: leal 0x00000000(%eax),%eax
+ 7: leal 0x00000000(,%eax,1),%eax
+*/
+#define K7_NOP1 GENERIC_NOP1
+#define K7_NOP2 0x8b,0xc0
+#define K7_NOP3 0x8d,0x04,0x20
+#define K7_NOP4 0x8d,0x44,0x20,0x00
+#define K7_NOP5 K7_NOP4,K7_NOP1
+#define K7_NOP6 0x8d,0x80,0,0,0,0
+#define K7_NOP7 0x8D,0x04,0x05,0,0,0,0
+#define K7_NOP8 K7_NOP7,K7_NOP1
+#define K7_NOP5_ATOMIC NOP_DS_PREFIX,K7_NOP4
+
+/* P6 nops
+ uses eax dependencies (Intel-recommended choice)
+ 1: nop
+ 2: osp nop
+ 3: nopl (%eax)
+ 4: nopl 0x00(%eax)
+ 5: nopl 0x00(%eax,%eax,1)
+ 6: osp nopl 0x00(%eax,%eax,1)
+ 7: nopl 0x00000000(%eax)
+ 8: nopl 0x00000000(%eax,%eax,1)
+ Note: All the above are assumed to be a single instruction.
+ There is kernel code that depends on this.
+*/
+#define P6_NOP1 GENERIC_NOP1
+#define P6_NOP2 0x66,0x90
+#define P6_NOP3 0x0f,0x1f,0x00
+#define P6_NOP4 0x0f,0x1f,0x40,0
+#define P6_NOP5 0x0f,0x1f,0x44,0x00,0
+#define P6_NOP6 0x66,0x0f,0x1f,0x44,0x00,0
+#define P6_NOP7 0x0f,0x1f,0x80,0,0,0,0
+#define P6_NOP8 0x0f,0x1f,0x84,0x00,0,0,0,0
+#define P6_NOP5_ATOMIC P6_NOP5
+
+#ifdef __ASSEMBLY__
+#define _ASM_MK_NOP(x) .byte x
+#else
+#define _ASM_MK_NOP(x) ".byte " __stringify(x) "\n"
+#endif
+
+#if defined(CONFIG_MK7)
+#define ASM_NOP1 _ASM_MK_NOP(K7_NOP1)
+#define ASM_NOP2 _ASM_MK_NOP(K7_NOP2)
+#define ASM_NOP3 _ASM_MK_NOP(K7_NOP3)
+#define ASM_NOP4 _ASM_MK_NOP(K7_NOP4)
+#define ASM_NOP5 _ASM_MK_NOP(K7_NOP5)
+#define ASM_NOP6 _ASM_MK_NOP(K7_NOP6)
+#define ASM_NOP7 _ASM_MK_NOP(K7_NOP7)
+#define ASM_NOP8 _ASM_MK_NOP(K7_NOP8)
+#define ASM_NOP5_ATOMIC _ASM_MK_NOP(K7_NOP5_ATOMIC)
+#elif defined(CONFIG_X86_P6_NOP)
+#define ASM_NOP1 _ASM_MK_NOP(P6_NOP1)
+#define ASM_NOP2 _ASM_MK_NOP(P6_NOP2)
+#define ASM_NOP3 _ASM_MK_NOP(P6_NOP3)
+#define ASM_NOP4 _ASM_MK_NOP(P6_NOP4)
+#define ASM_NOP5 _ASM_MK_NOP(P6_NOP5)
+#define ASM_NOP6 _ASM_MK_NOP(P6_NOP6)
+#define ASM_NOP7 _ASM_MK_NOP(P6_NOP7)
+#define ASM_NOP8 _ASM_MK_NOP(P6_NOP8)
+#define ASM_NOP5_ATOMIC _ASM_MK_NOP(P6_NOP5_ATOMIC)
+#elif defined(CONFIG_X86_64)
+#define ASM_NOP1 _ASM_MK_NOP(K8_NOP1)
+#define ASM_NOP2 _ASM_MK_NOP(K8_NOP2)
+#define ASM_NOP3 _ASM_MK_NOP(K8_NOP3)
+#define ASM_NOP4 _ASM_MK_NOP(K8_NOP4)
+#define ASM_NOP5 _ASM_MK_NOP(K8_NOP5)
+#define ASM_NOP6 _ASM_MK_NOP(K8_NOP6)
+#define ASM_NOP7 _ASM_MK_NOP(K8_NOP7)
+#define ASM_NOP8 _ASM_MK_NOP(K8_NOP8)
+#define ASM_NOP5_ATOMIC _ASM_MK_NOP(K8_NOP5_ATOMIC)
+#else
+#define ASM_NOP1 _ASM_MK_NOP(GENERIC_NOP1)
+#define ASM_NOP2 _ASM_MK_NOP(GENERIC_NOP2)
+#define ASM_NOP3 _ASM_MK_NOP(GENERIC_NOP3)
+#define ASM_NOP4 _ASM_MK_NOP(GENERIC_NOP4)
+#define ASM_NOP5 _ASM_MK_NOP(GENERIC_NOP5)
+#define ASM_NOP6 _ASM_MK_NOP(GENERIC_NOP6)
+#define ASM_NOP7 _ASM_MK_NOP(GENERIC_NOP7)
+#define ASM_NOP8 _ASM_MK_NOP(GENERIC_NOP8)
+#define ASM_NOP5_ATOMIC _ASM_MK_NOP(GENERIC_NOP5_ATOMIC)
+#endif
+
+#define ASM_NOP_MAX 8
+#define NOP_ATOMIC5 (ASM_NOP_MAX+1) /* Entry for the 5-byte atomic NOP */
+
+#ifndef __ASSEMBLY__
+extern const unsigned char * const *ideal_nops;
+extern void arch_init_ideal_nops(void);
+#endif
+
+#endif /* _ASM_X86_NOPS_H */
diff --git a/kernel/arch/x86/include/asm/numa.h b/kernel/arch/x86/include/asm/numa.h
new file mode 100644
index 000000000..01b493e5a
--- /dev/null
+++ b/kernel/arch/x86/include/asm/numa.h
@@ -0,0 +1,82 @@
+#ifndef _ASM_X86_NUMA_H
+#define _ASM_X86_NUMA_H
+
+#include <linux/nodemask.h>
+
+#include <asm/topology.h>
+#include <asm/apicdef.h>
+
+#ifdef CONFIG_NUMA
+
+#define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
+
+/*
+ * Too small node sizes may confuse the VM badly. Usually they
+ * result from BIOS bugs. So dont recognize nodes as standalone
+ * NUMA entities that have less than this amount of RAM listed:
+ */
+#define NODE_MIN_SIZE (4*1024*1024)
+
+extern int numa_off;
+
+/*
+ * __apicid_to_node[] stores the raw mapping between physical apicid and
+ * node and is used to initialize cpu_to_node mapping.
+ *
+ * The mapping may be overridden by apic->numa_cpu_node() on 32bit and thus
+ * should be accessed by the accessors - set_apicid_to_node() and
+ * numa_cpu_node().
+ */
+extern s16 __apicid_to_node[MAX_LOCAL_APIC];
+extern nodemask_t numa_nodes_parsed __initdata;
+
+extern int __init numa_add_memblk(int nodeid, u64 start, u64 end);
+extern void __init numa_set_distance(int from, int to, int distance);
+
+static inline void set_apicid_to_node(int apicid, s16 node)
+{
+ __apicid_to_node[apicid] = node;
+}
+
+extern int numa_cpu_node(int cpu);
+
+#else /* CONFIG_NUMA */
+static inline void set_apicid_to_node(int apicid, s16 node)
+{
+}
+
+static inline int numa_cpu_node(int cpu)
+{
+ return NUMA_NO_NODE;
+}
+#endif /* CONFIG_NUMA */
+
+#ifdef CONFIG_X86_32
+# include <asm/numa_32.h>
+#endif
+
+#ifdef CONFIG_NUMA
+extern void numa_set_node(int cpu, int node);
+extern void numa_clear_node(int cpu);
+extern void __init init_cpu_to_node(void);
+extern void numa_add_cpu(int cpu);
+extern void numa_remove_cpu(int cpu);
+#else /* CONFIG_NUMA */
+static inline void numa_set_node(int cpu, int node) { }
+static inline void numa_clear_node(int cpu) { }
+static inline void init_cpu_to_node(void) { }
+static inline void numa_add_cpu(int cpu) { }
+static inline void numa_remove_cpu(int cpu) { }
+#endif /* CONFIG_NUMA */
+
+#ifdef CONFIG_DEBUG_PER_CPU_MAPS
+void debug_cpumask_set_cpu(int cpu, int node, bool enable);
+#endif
+
+#ifdef CONFIG_NUMA_EMU
+#define FAKE_NODE_MIN_SIZE ((u64)32 << 20)
+#define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL))
+void numa_emu_cmdline(char *);
+#endif /* CONFIG_NUMA_EMU */
+
+#endif /* _ASM_X86_NUMA_H */
diff --git a/kernel/arch/x86/include/asm/numa_32.h b/kernel/arch/x86/include/asm/numa_32.h
new file mode 100644
index 000000000..e7d6b8254
--- /dev/null
+++ b/kernel/arch/x86/include/asm/numa_32.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_X86_NUMA_32_H
+#define _ASM_X86_NUMA_32_H
+
+#ifdef CONFIG_HIGHMEM
+extern void set_highmem_pages_init(void);
+#else
+static inline void set_highmem_pages_init(void)
+{
+}
+#endif
+
+#endif /* _ASM_X86_NUMA_32_H */
diff --git a/kernel/arch/x86/include/asm/numachip/numachip.h b/kernel/arch/x86/include/asm/numachip/numachip.h
new file mode 100644
index 000000000..1c6f7f621
--- /dev/null
+++ b/kernel/arch/x86/include/asm/numachip/numachip.h
@@ -0,0 +1,19 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Numascale NumaConnect-specific header file
+ *
+ * Copyright (C) 2012 Numascale AS. All rights reserved.
+ *
+ * Send feedback to <support@numascale.com>
+ *
+ */
+
+#ifndef _ASM_X86_NUMACHIP_NUMACHIP_H
+#define _ASM_X86_NUMACHIP_NUMACHIP_H
+
+extern int __init pci_numachip_init(void);
+
+#endif /* _ASM_X86_NUMACHIP_NUMACHIP_H */
diff --git a/kernel/arch/x86/include/asm/numachip/numachip_csr.h b/kernel/arch/x86/include/asm/numachip/numachip_csr.h
new file mode 100644
index 000000000..660f843df
--- /dev/null
+++ b/kernel/arch/x86/include/asm/numachip/numachip_csr.h
@@ -0,0 +1,167 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Numascale NumaConnect-Specific Header file
+ *
+ * Copyright (C) 2011 Numascale AS. All rights reserved.
+ *
+ * Send feedback to <support@numascale.com>
+ *
+ */
+
+#ifndef _ASM_X86_NUMACHIP_NUMACHIP_CSR_H
+#define _ASM_X86_NUMACHIP_NUMACHIP_CSR_H
+
+#include <linux/numa.h>
+#include <linux/percpu.h>
+#include <linux/io.h>
+#include <linux/swab.h>
+#include <asm/types.h>
+#include <asm/processor.h>
+
+#define CSR_NODE_SHIFT 16
+#define CSR_NODE_BITS(p) (((unsigned long)(p)) << CSR_NODE_SHIFT)
+#define CSR_NODE_MASK 0x0fff /* 4K nodes */
+
+/* 32K CSR space, b15 indicates geo/non-geo */
+#define CSR_OFFSET_MASK 0x7fffUL
+
+/* Global CSR space covers all 4K possible nodes with 64K CSR space per node */
+#define NUMACHIP_GCSR_BASE 0x3fff00000000ULL
+#define NUMACHIP_GCSR_LIM 0x3fff0fffffffULL
+#define NUMACHIP_GCSR_SIZE (NUMACHIP_GCSR_LIM - NUMACHIP_GCSR_BASE + 1)
+
+/*
+ * Local CSR space starts in global CSR space with "nodeid" = 0xfff0, however
+ * when using the direct mapping on x86_64, both start and size needs to be
+ * aligned with PMD_SIZE which is 2M
+ */
+#define NUMACHIP_LCSR_BASE 0x3ffffe000000ULL
+#define NUMACHIP_LCSR_LIM 0x3fffffffffffULL
+#define NUMACHIP_LCSR_SIZE (NUMACHIP_LCSR_LIM - NUMACHIP_LCSR_BASE + 1)
+
+static inline void *gcsr_address(int node, unsigned long offset)
+{
+ return __va(NUMACHIP_GCSR_BASE | (1UL << 15) |
+ CSR_NODE_BITS(node & CSR_NODE_MASK) | (offset & CSR_OFFSET_MASK));
+}
+
+static inline void *lcsr_address(unsigned long offset)
+{
+ return __va(NUMACHIP_LCSR_BASE | (1UL << 15) |
+ CSR_NODE_BITS(0xfff0) | (offset & CSR_OFFSET_MASK));
+}
+
+static inline unsigned int read_gcsr(int node, unsigned long offset)
+{
+ return swab32(readl(gcsr_address(node, offset)));
+}
+
+static inline void write_gcsr(int node, unsigned long offset, unsigned int val)
+{
+ writel(swab32(val), gcsr_address(node, offset));
+}
+
+static inline unsigned int read_lcsr(unsigned long offset)
+{
+ return swab32(readl(lcsr_address(offset)));
+}
+
+static inline void write_lcsr(unsigned long offset, unsigned int val)
+{
+ writel(swab32(val), lcsr_address(offset));
+}
+
+/* ========================================================================= */
+/* CSR_G0_STATE_CLEAR */
+/* ========================================================================= */
+
+#define CSR_G0_STATE_CLEAR (0x000 + (0 << 12))
+union numachip_csr_g0_state_clear {
+ unsigned int v;
+ struct numachip_csr_g0_state_clear_s {
+ unsigned int _state:2;
+ unsigned int _rsvd_2_6:5;
+ unsigned int _lost:1;
+ unsigned int _rsvd_8_31:24;
+ } s;
+};
+
+/* ========================================================================= */
+/* CSR_G0_NODE_IDS */
+/* ========================================================================= */
+
+#define CSR_G0_NODE_IDS (0x008 + (0 << 12))
+union numachip_csr_g0_node_ids {
+ unsigned int v;
+ struct numachip_csr_g0_node_ids_s {
+ unsigned int _initialid:16;
+ unsigned int _nodeid:12;
+ unsigned int _rsvd_28_31:4;
+ } s;
+};
+
+/* ========================================================================= */
+/* CSR_G3_EXT_IRQ_GEN */
+/* ========================================================================= */
+
+#define CSR_G3_EXT_IRQ_GEN (0x030 + (3 << 12))
+union numachip_csr_g3_ext_irq_gen {
+ unsigned int v;
+ struct numachip_csr_g3_ext_irq_gen_s {
+ unsigned int _vector:8;
+ unsigned int _msgtype:3;
+ unsigned int _index:5;
+ unsigned int _destination_apic_id:16;
+ } s;
+};
+
+/* ========================================================================= */
+/* CSR_G3_EXT_IRQ_STATUS */
+/* ========================================================================= */
+
+#define CSR_G3_EXT_IRQ_STATUS (0x034 + (3 << 12))
+union numachip_csr_g3_ext_irq_status {
+ unsigned int v;
+ struct numachip_csr_g3_ext_irq_status_s {
+ unsigned int _result:32;
+ } s;
+};
+
+/* ========================================================================= */
+/* CSR_G3_EXT_IRQ_DEST */
+/* ========================================================================= */
+
+#define CSR_G3_EXT_IRQ_DEST (0x038 + (3 << 12))
+union numachip_csr_g3_ext_irq_dest {
+ unsigned int v;
+ struct numachip_csr_g3_ext_irq_dest_s {
+ unsigned int _irq:8;
+ unsigned int _rsvd_8_31:24;
+ } s;
+};
+
+/* ========================================================================= */
+/* CSR_G3_NC_ATT_MAP_SELECT */
+/* ========================================================================= */
+
+#define CSR_G3_NC_ATT_MAP_SELECT (0x7fc + (3 << 12))
+union numachip_csr_g3_nc_att_map_select {
+ unsigned int v;
+ struct numachip_csr_g3_nc_att_map_select_s {
+ unsigned int _upper_address_bits:4;
+ unsigned int _select_ram:4;
+ unsigned int _rsvd_8_31:24;
+ } s;
+};
+
+/* ========================================================================= */
+/* CSR_G3_NC_ATT_MAP_SELECT_0-255 */
+/* ========================================================================= */
+
+#define CSR_G3_NC_ATT_MAP_SELECT_0 (0x800 + (3 << 12))
+
+#endif /* _ASM_X86_NUMACHIP_NUMACHIP_CSR_H */
+
diff --git a/kernel/arch/x86/include/asm/olpc.h b/kernel/arch/x86/include/asm/olpc.h
new file mode 100644
index 000000000..72f9adf6e
--- /dev/null
+++ b/kernel/arch/x86/include/asm/olpc.h
@@ -0,0 +1,132 @@
+/* OLPC machine specific definitions */
+
+#ifndef _ASM_X86_OLPC_H
+#define _ASM_X86_OLPC_H
+
+#include <asm/geode.h>
+
+struct olpc_platform_t {
+ int flags;
+ uint32_t boardrev;
+ int ecver;
+};
+
+#define OLPC_F_PRESENT 0x01
+#define OLPC_F_DCON 0x02
+#define OLPC_F_EC_WIDE_SCI 0x04
+
+#ifdef CONFIG_OLPC
+
+extern struct olpc_platform_t olpc_platform_info;
+
+/*
+ * OLPC board IDs contain the major build number within the mask 0x0ff0,
+ * and the minor build number within 0x000f. Pre-builds have a minor
+ * number less than 8, and normal builds start at 8. For example, 0x0B10
+ * is a PreB1, and 0x0C18 is a C1.
+ */
+
+static inline uint32_t olpc_board(uint8_t id)
+{
+ return (id << 4) | 0x8;
+}
+
+static inline uint32_t olpc_board_pre(uint8_t id)
+{
+ return id << 4;
+}
+
+static inline int machine_is_olpc(void)
+{
+ return (olpc_platform_info.flags & OLPC_F_PRESENT) ? 1 : 0;
+}
+
+/*
+ * The DCON is OLPC's Display Controller. It has a number of unique
+ * features that we might want to take advantage of..
+ */
+static inline int olpc_has_dcon(void)
+{
+ return (olpc_platform_info.flags & OLPC_F_DCON) ? 1 : 0;
+}
+
+/*
+ * The "Mass Production" version of OLPC's XO is identified as being model
+ * C2. During the prototype phase, the following models (in chronological
+ * order) were created: A1, B1, B2, B3, B4, C1. The A1 through B2 models
+ * were based on Geode GX CPUs, and models after that were based upon
+ * Geode LX CPUs. There were also some hand-assembled models floating
+ * around, referred to as PreB1, PreB2, etc.
+ */
+static inline int olpc_board_at_least(uint32_t rev)
+{
+ return olpc_platform_info.boardrev >= rev;
+}
+
+extern void olpc_ec_wakeup_set(u16 value);
+extern void olpc_ec_wakeup_clear(u16 value);
+extern bool olpc_ec_wakeup_available(void);
+
+extern int olpc_ec_mask_write(u16 bits);
+extern int olpc_ec_sci_query(u16 *sci_value);
+
+#else
+
+static inline int machine_is_olpc(void)
+{
+ return 0;
+}
+
+static inline int olpc_has_dcon(void)
+{
+ return 0;
+}
+
+static inline void olpc_ec_wakeup_set(u16 value) { }
+static inline void olpc_ec_wakeup_clear(u16 value) { }
+
+static inline bool olpc_ec_wakeup_available(void)
+{
+ return false;
+}
+
+#endif
+
+#ifdef CONFIG_OLPC_XO1_PM
+extern void do_olpc_suspend_lowlevel(void);
+extern void olpc_xo1_pm_wakeup_set(u16 value);
+extern void olpc_xo1_pm_wakeup_clear(u16 value);
+#endif
+
+extern int pci_olpc_init(void);
+
+/* SCI source values */
+
+#define EC_SCI_SRC_EMPTY 0x00
+#define EC_SCI_SRC_GAME 0x01
+#define EC_SCI_SRC_BATTERY 0x02
+#define EC_SCI_SRC_BATSOC 0x04
+#define EC_SCI_SRC_BATERR 0x08
+#define EC_SCI_SRC_EBOOK 0x10 /* XO-1 only */
+#define EC_SCI_SRC_WLAN 0x20 /* XO-1 only */
+#define EC_SCI_SRC_ACPWR 0x40
+#define EC_SCI_SRC_BATCRIT 0x80
+#define EC_SCI_SRC_GPWAKE 0x100 /* XO-1.5 only */
+#define EC_SCI_SRC_ALL 0x1FF
+
+/* GPIO assignments */
+
+#define OLPC_GPIO_MIC_AC 1
+#define OLPC_GPIO_DCON_STAT0 5
+#define OLPC_GPIO_DCON_STAT1 6
+#define OLPC_GPIO_DCON_IRQ 7
+#define OLPC_GPIO_THRM_ALRM geode_gpio(10)
+#define OLPC_GPIO_DCON_LOAD 11
+#define OLPC_GPIO_DCON_BLANK 12
+#define OLPC_GPIO_SMB_CLK 14
+#define OLPC_GPIO_SMB_DATA 15
+#define OLPC_GPIO_WORKAUX geode_gpio(24)
+#define OLPC_GPIO_LID 26
+#define OLPC_GPIO_ECSCI 27
+
+#endif /* _ASM_X86_OLPC_H */
diff --git a/kernel/arch/x86/include/asm/olpc_ofw.h b/kernel/arch/x86/include/asm/olpc_ofw.h
new file mode 100644
index 000000000..24487712e
--- /dev/null
+++ b/kernel/arch/x86/include/asm/olpc_ofw.h
@@ -0,0 +1,37 @@
+#ifndef _ASM_X86_OLPC_OFW_H
+#define _ASM_X86_OLPC_OFW_H
+
+/* index into the page table containing the entry OFW occupies */
+#define OLPC_OFW_PDE_NR 1022
+
+#define OLPC_OFW_SIG 0x2057464F /* aka "OFW " */
+
+#ifdef CONFIG_OLPC
+
+extern bool olpc_ofw_is_installed(void);
+
+/* run an OFW command by calling into the firmware */
+#define olpc_ofw(name, args, res) \
+ __olpc_ofw((name), ARRAY_SIZE(args), args, ARRAY_SIZE(res), res)
+
+extern int __olpc_ofw(const char *name, int nr_args, const void **args, int nr_res,
+ void **res);
+
+/* determine whether OFW is available and lives in the proper memory */
+extern void olpc_ofw_detect(void);
+
+/* install OFW's pde permanently into the kernel's pgtable */
+extern void setup_olpc_ofw_pgd(void);
+
+/* check if OFW was detected during boot */
+extern bool olpc_ofw_present(void);
+
+extern void olpc_dt_build_devicetree(void);
+
+#else /* !CONFIG_OLPC */
+static inline void olpc_ofw_detect(void) { }
+static inline void setup_olpc_ofw_pgd(void) { }
+static inline void olpc_dt_build_devicetree(void) { }
+#endif /* !CONFIG_OLPC */
+
+#endif /* _ASM_X86_OLPC_OFW_H */
diff --git a/kernel/arch/x86/include/asm/page.h b/kernel/arch/x86/include/asm/page.h
new file mode 100644
index 000000000..802dde30c
--- /dev/null
+++ b/kernel/arch/x86/include/asm/page.h
@@ -0,0 +1,76 @@
+#ifndef _ASM_X86_PAGE_H
+#define _ASM_X86_PAGE_H
+
+#include <linux/types.h>
+
+#ifdef __KERNEL__
+
+#include <asm/page_types.h>
+
+#ifdef CONFIG_X86_64
+#include <asm/page_64.h>
+#else
+#include <asm/page_32.h>
+#endif /* CONFIG_X86_64 */
+
+#ifndef __ASSEMBLY__
+
+struct page;
+
+#include <linux/range.h>
+extern struct range pfn_mapped[];
+extern int nr_pfn_mapped;
+
+static inline void clear_user_page(void *page, unsigned long vaddr,
+ struct page *pg)
+{
+ clear_page(page);
+}
+
+static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
+ struct page *topage)
+{
+ copy_page(to, from);
+}
+
+#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
+ alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+
+#define __pa(x) __phys_addr((unsigned long)(x))
+#define __pa_nodebug(x) __phys_addr_nodebug((unsigned long)(x))
+/* __pa_symbol should be used for C visible symbols.
+ This seems to be the official gcc blessed way to do such arithmetic. */
+/*
+ * We need __phys_reloc_hide() here because gcc may assume that there is no
+ * overflow during __pa() calculation and can optimize it unexpectedly.
+ * Newer versions of gcc provide -fno-strict-overflow switch to handle this
+ * case properly. Once all supported versions of gcc understand it, we can
+ * remove this Voodoo magic stuff. (i.e. once gcc3.x is deprecated)
+ */
+#define __pa_symbol(x) \
+ __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
+
+#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
+
+#define __boot_va(x) __va(x)
+#define __boot_pa(x) __pa(x)
+
+/*
+ * virt_to_page(kaddr) returns a valid pointer if and only if
+ * virt_addr_valid(kaddr) returns true.
+ */
+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+extern bool __virt_addr_valid(unsigned long kaddr);
+#define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
+
+#endif /* __ASSEMBLY__ */
+
+#include <asm-generic/memory_model.h>
+#include <asm-generic/getorder.h>
+
+#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_X86_PAGE_H */
diff --git a/kernel/arch/x86/include/asm/page_32.h b/kernel/arch/x86/include/asm/page_32.h
new file mode 100644
index 000000000..904f528cc
--- /dev/null
+++ b/kernel/arch/x86/include/asm/page_32.h
@@ -0,0 +1,48 @@
+#ifndef _ASM_X86_PAGE_32_H
+#define _ASM_X86_PAGE_32_H
+
+#include <asm/page_32_types.h>
+
+#ifndef __ASSEMBLY__
+
+#define __phys_addr_nodebug(x) ((x) - PAGE_OFFSET)
+#ifdef CONFIG_DEBUG_VIRTUAL
+extern unsigned long __phys_addr(unsigned long);
+#else
+#define __phys_addr(x) __phys_addr_nodebug(x)
+#endif
+#define __phys_addr_symbol(x) __phys_addr(x)
+#define __phys_reloc_hide(x) RELOC_HIDE((x), 0)
+
+#ifdef CONFIG_FLATMEM
+#define pfn_valid(pfn) ((pfn) < max_mapnr)
+#endif /* CONFIG_FLATMEM */
+
+#ifdef CONFIG_X86_USE_3DNOW
+#include <asm/mmx.h>
+
+static inline void clear_page(void *page)
+{
+ mmx_clear_page(page);
+}
+
+static inline void copy_page(void *to, void *from)
+{
+ mmx_copy_page(to, from);
+}
+#else /* !CONFIG_X86_USE_3DNOW */
+#include <linux/string.h>
+
+static inline void clear_page(void *page)
+{
+ memset(page, 0, PAGE_SIZE);
+}
+
+static inline void copy_page(void *to, void *from)
+{
+ memcpy(to, from, PAGE_SIZE);
+}
+#endif /* CONFIG_X86_3DNOW */
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_X86_PAGE_32_H */
diff --git a/kernel/arch/x86/include/asm/page_32_types.h b/kernel/arch/x86/include/asm/page_32_types.h
new file mode 100644
index 000000000..3a52ee0e7
--- /dev/null
+++ b/kernel/arch/x86/include/asm/page_32_types.h
@@ -0,0 +1,58 @@
+#ifndef _ASM_X86_PAGE_32_DEFS_H
+#define _ASM_X86_PAGE_32_DEFS_H
+
+#include <linux/const.h>
+
+/*
+ * This handles the memory map.
+ *
+ * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
+ * a virtual address space of one gigabyte, which limits the
+ * amount of physical memory you can use to about 950MB.
+ *
+ * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
+ * and CONFIG_HIGHMEM64G options in the kernel configuration.
+ */
+#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
+
+#define __START_KERNEL_map __PAGE_OFFSET
+
+#define THREAD_SIZE_ORDER 1
+#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
+
+#define DOUBLEFAULT_STACK 1
+#define NMI_STACK 0
+#define DEBUG_STACK 0
+#define MCE_STACK 0
+#define N_EXCEPTION_STACKS 1
+
+#ifdef CONFIG_X86_PAE
+/* 44=32+12, the limit we can fit into an unsigned long pfn */
+#define __PHYSICAL_MASK_SHIFT 44
+#define __VIRTUAL_MASK_SHIFT 32
+
+#else /* !CONFIG_X86_PAE */
+#define __PHYSICAL_MASK_SHIFT 32
+#define __VIRTUAL_MASK_SHIFT 32
+#endif /* CONFIG_X86_PAE */
+
+/*
+ * Kernel image size is limited to 512 MB (see in arch/x86/kernel/head_32.S)
+ */
+#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
+
+#ifndef __ASSEMBLY__
+
+/*
+ * This much address space is reserved for vmalloc() and iomap()
+ * as well as fixmap mappings.
+ */
+extern unsigned int __VMALLOC_RESERVE;
+extern int sysctl_legacy_va_layout;
+
+extern void find_low_pfn_range(void);
+extern void setup_bootmem_allocator(void);
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_X86_PAGE_32_DEFS_H */
diff --git a/kernel/arch/x86/include/asm/page_64.h b/kernel/arch/x86/include/asm/page_64.h
new file mode 100644
index 000000000..b3bebf9e5
--- /dev/null
+++ b/kernel/arch/x86/include/asm/page_64.h
@@ -0,0 +1,46 @@
+#ifndef _ASM_X86_PAGE_64_H
+#define _ASM_X86_PAGE_64_H
+
+#include <asm/page_64_types.h>
+
+#ifndef __ASSEMBLY__
+
+/* duplicated to the one in bootmem.h */
+extern unsigned long max_pfn;
+extern unsigned long phys_base;
+
+static inline unsigned long __phys_addr_nodebug(unsigned long x)
+{
+ unsigned long y = x - __START_KERNEL_map;
+
+ /* use the carry flag to determine if x was < __START_KERNEL_map */
+ x = y + ((x > y) ? phys_base : (__START_KERNEL_map - PAGE_OFFSET));
+
+ return x;
+}
+
+#ifdef CONFIG_DEBUG_VIRTUAL
+extern unsigned long __phys_addr(unsigned long);
+extern unsigned long __phys_addr_symbol(unsigned long);
+#else
+#define __phys_addr(x) __phys_addr_nodebug(x)
+#define __phys_addr_symbol(x) \
+ ((unsigned long)(x) - __START_KERNEL_map + phys_base)
+#endif
+
+#define __phys_reloc_hide(x) (x)
+
+#ifdef CONFIG_FLATMEM
+#define pfn_valid(pfn) ((pfn) < max_pfn)
+#endif
+
+void clear_page(void *page);
+void copy_page(void *to, void *from);
+
+#endif /* !__ASSEMBLY__ */
+
+#ifdef CONFIG_X86_VSYSCALL_EMULATION
+# define __HAVE_ARCH_GATE_AREA 1
+#endif
+
+#endif /* _ASM_X86_PAGE_64_H */
diff --git a/kernel/arch/x86/include/asm/page_64_types.h b/kernel/arch/x86/include/asm/page_64_types.h
new file mode 100644
index 000000000..4edd53b79
--- /dev/null
+++ b/kernel/arch/x86/include/asm/page_64_types.h
@@ -0,0 +1,61 @@
+#ifndef _ASM_X86_PAGE_64_DEFS_H
+#define _ASM_X86_PAGE_64_DEFS_H
+
+#ifdef CONFIG_KASAN
+#define KASAN_STACK_ORDER 1
+#else
+#define KASAN_STACK_ORDER 0
+#endif
+
+#define THREAD_SIZE_ORDER (2 + KASAN_STACK_ORDER)
+#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
+#define CURRENT_MASK (~(THREAD_SIZE - 1))
+
+#define EXCEPTION_STACK_ORDER (0 + KASAN_STACK_ORDER)
+#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
+
+#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
+#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
+
+#define IRQ_STACK_ORDER (2 + KASAN_STACK_ORDER)
+#define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER)
+
+#define DOUBLEFAULT_STACK 1
+#define NMI_STACK 2
+#define DEBUG_STACK 3
+#define MCE_STACK 4
+#define N_EXCEPTION_STACKS 4 /* hw limit: 7 */
+
+#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
+#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
+
+/*
+ * Set __PAGE_OFFSET to the most negative possible address +
+ * PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a
+ * hypervisor to fit. Choosing 16 slots here is arbitrary, but it's
+ * what Xen requires.
+ */
+#define __PAGE_OFFSET _AC(0xffff880000000000, UL)
+
+#define __START_KERNEL_map _AC(0xffffffff80000000, UL)
+
+/* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */
+#define __PHYSICAL_MASK_SHIFT 46
+#define __VIRTUAL_MASK_SHIFT 47
+
+/*
+ * Kernel image size is limited to 1GiB due to the fixmap living in the
+ * next 1GiB (see level2_kernel_pgt in arch/x86/kernel/head_64.S). Use
+ * 512MiB by default, leaving 1.5GiB for modules once the page tables
+ * are fully set up. If kernel ASLR is configured, it can extend the
+ * kernel page table mapping, reducing the size of the modules area.
+ */
+#define KERNEL_IMAGE_SIZE_DEFAULT (512 * 1024 * 1024)
+#if defined(CONFIG_RANDOMIZE_BASE) && \
+ CONFIG_RANDOMIZE_BASE_MAX_OFFSET > KERNEL_IMAGE_SIZE_DEFAULT
+#define KERNEL_IMAGE_SIZE CONFIG_RANDOMIZE_BASE_MAX_OFFSET
+#else
+#define KERNEL_IMAGE_SIZE KERNEL_IMAGE_SIZE_DEFAULT
+#endif
+
+#endif /* _ASM_X86_PAGE_64_DEFS_H */
diff --git a/kernel/arch/x86/include/asm/page_types.h b/kernel/arch/x86/include/asm/page_types.h
new file mode 100644
index 000000000..c7c712f26
--- /dev/null
+++ b/kernel/arch/x86/include/asm/page_types.h
@@ -0,0 +1,70 @@
+#ifndef _ASM_X86_PAGE_DEFS_H
+#define _ASM_X86_PAGE_DEFS_H
+
+#include <linux/const.h>
+#include <linux/types.h>
+
+/* PAGE_SHIFT determines the page size */
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE-1))
+
+#define __PHYSICAL_MASK ((phys_addr_t)((1ULL << __PHYSICAL_MASK_SHIFT) - 1))
+#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
+
+/* Cast PAGE_MASK to a signed type so that it is sign-extended if
+ virtual addresses are 32-bits but physical addresses are larger
+ (ie, 32-bit PAE). */
+#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK)
+
+#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
+#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
+
+#define HPAGE_SHIFT PMD_SHIFT
+#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
+#define HPAGE_MASK (~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
+
+#define HUGE_MAX_HSTATE 2
+
+#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
+
+#define VM_DATA_DEFAULT_FLAGS \
+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#define __PHYSICAL_START ALIGN(CONFIG_PHYSICAL_START, \
+ CONFIG_PHYSICAL_ALIGN)
+
+#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
+
+#ifdef CONFIG_X86_64
+#include <asm/page_64_types.h>
+#define IOREMAP_MAX_ORDER (PUD_SHIFT)
+#else
+#include <asm/page_32_types.h>
+#define IOREMAP_MAX_ORDER (PMD_SHIFT)
+#endif /* CONFIG_X86_64 */
+
+#ifndef __ASSEMBLY__
+
+extern int devmem_is_allowed(unsigned long pagenr);
+
+extern unsigned long max_low_pfn_mapped;
+extern unsigned long max_pfn_mapped;
+
+static inline phys_addr_t get_max_mapped(void)
+{
+ return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT;
+}
+
+bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn);
+
+extern unsigned long init_memory_mapping(unsigned long start,
+ unsigned long end);
+
+extern void initmem_init(void);
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_X86_PAGE_DEFS_H */
diff --git a/kernel/arch/x86/include/asm/paravirt.h b/kernel/arch/x86/include/asm/paravirt.h
new file mode 100644
index 000000000..8957810ad
--- /dev/null
+++ b/kernel/arch/x86/include/asm/paravirt.h
@@ -0,0 +1,995 @@
+#ifndef _ASM_X86_PARAVIRT_H
+#define _ASM_X86_PARAVIRT_H
+/* Various instructions on x86 need to be replaced for
+ * para-virtualization: those hooks are defined here. */
+
+#ifdef CONFIG_PARAVIRT
+#include <asm/pgtable_types.h>
+#include <asm/asm.h>
+
+#include <asm/paravirt_types.h>
+
+#ifndef __ASSEMBLY__
+#include <linux/bug.h>
+#include <linux/types.h>
+#include <linux/cpumask.h>
+
+static inline int paravirt_enabled(void)
+{
+ return pv_info.paravirt_enabled;
+}
+
+static inline void load_sp0(struct tss_struct *tss,
+ struct thread_struct *thread)
+{
+ PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
+}
+
+/* The paravirtualized CPUID instruction. */
+static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
+ unsigned int *ecx, unsigned int *edx)
+{
+ PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
+}
+
+/*
+ * These special macros can be used to get or set a debugging register
+ */
+static inline unsigned long paravirt_get_debugreg(int reg)
+{
+ return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
+}
+#define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
+static inline void set_debugreg(unsigned long val, int reg)
+{
+ PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
+}
+
+static inline void clts(void)
+{
+ PVOP_VCALL0(pv_cpu_ops.clts);
+}
+
+static inline unsigned long read_cr0(void)
+{
+ return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
+}
+
+static inline void write_cr0(unsigned long x)
+{
+ PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
+}
+
+static inline unsigned long read_cr2(void)
+{
+ return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
+}
+
+static inline void write_cr2(unsigned long x)
+{
+ PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
+}
+
+static inline unsigned long read_cr3(void)
+{
+ return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
+}
+
+static inline void write_cr3(unsigned long x)
+{
+ PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
+}
+
+static inline unsigned long __read_cr4(void)
+{
+ return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
+}
+static inline unsigned long __read_cr4_safe(void)
+{
+ return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
+}
+
+static inline void __write_cr4(unsigned long x)
+{
+ PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
+}
+
+#ifdef CONFIG_X86_64
+static inline unsigned long read_cr8(void)
+{
+ return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
+}
+
+static inline void write_cr8(unsigned long x)
+{
+ PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
+}
+#endif
+
+static inline void arch_safe_halt(void)
+{
+ PVOP_VCALL0(pv_irq_ops.safe_halt);
+}
+
+static inline void halt(void)
+{
+ PVOP_VCALL0(pv_irq_ops.halt);
+}
+
+static inline void wbinvd(void)
+{
+ PVOP_VCALL0(pv_cpu_ops.wbinvd);
+}
+
+#define get_kernel_rpl() (pv_info.kernel_rpl)
+
+static inline u64 paravirt_read_msr(unsigned msr, int *err)
+{
+ return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
+}
+
+static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
+{
+ return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
+}
+
+/* These should all do BUG_ON(_err), but our headers are too tangled. */
+#define rdmsr(msr, val1, val2) \
+do { \
+ int _err; \
+ u64 _l = paravirt_read_msr(msr, &_err); \
+ val1 = (u32)_l; \
+ val2 = _l >> 32; \
+} while (0)
+
+#define wrmsr(msr, val1, val2) \
+do { \
+ paravirt_write_msr(msr, val1, val2); \
+} while (0)
+
+#define rdmsrl(msr, val) \
+do { \
+ int _err; \
+ val = paravirt_read_msr(msr, &_err); \
+} while (0)
+
+#define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
+#define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b)
+
+/* rdmsr with exception handling */
+#define rdmsr_safe(msr, a, b) \
+({ \
+ int _err; \
+ u64 _l = paravirt_read_msr(msr, &_err); \
+ (*a) = (u32)_l; \
+ (*b) = _l >> 32; \
+ _err; \
+})
+
+static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
+{
+ int err;
+
+ *p = paravirt_read_msr(msr, &err);
+ return err;
+}
+
+static inline u64 paravirt_read_tsc(void)
+{
+ return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
+}
+
+#define rdtscl(low) \
+do { \
+ u64 _l = paravirt_read_tsc(); \
+ low = (int)_l; \
+} while (0)
+
+#define rdtscll(val) (val = paravirt_read_tsc())
+
+static inline unsigned long long paravirt_sched_clock(void)
+{
+ return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
+}
+
+struct static_key;
+extern struct static_key paravirt_steal_enabled;
+extern struct static_key paravirt_steal_rq_enabled;
+
+static inline u64 paravirt_steal_clock(int cpu)
+{
+ return PVOP_CALL1(u64, pv_time_ops.steal_clock, cpu);
+}
+
+static inline unsigned long long paravirt_read_pmc(int counter)
+{
+ return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
+}
+
+#define rdpmc(counter, low, high) \
+do { \
+ u64 _l = paravirt_read_pmc(counter); \
+ low = (u32)_l; \
+ high = _l >> 32; \
+} while (0)
+
+#define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
+
+static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
+{
+ return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
+}
+
+#define rdtscp(low, high, aux) \
+do { \
+ int __aux; \
+ unsigned long __val = paravirt_rdtscp(&__aux); \
+ (low) = (u32)__val; \
+ (high) = (u32)(__val >> 32); \
+ (aux) = __aux; \
+} while (0)
+
+#define rdtscpll(val, aux) \
+do { \
+ unsigned long __aux; \
+ val = paravirt_rdtscp(&__aux); \
+ (aux) = __aux; \
+} while (0)
+
+static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
+{
+ PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
+}
+
+static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
+{
+ PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
+}
+
+static inline void load_TR_desc(void)
+{
+ PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
+}
+static inline void load_gdt(const struct desc_ptr *dtr)
+{
+ PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
+}
+static inline void load_idt(const struct desc_ptr *dtr)
+{
+ PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
+}
+static inline void set_ldt(const void *addr, unsigned entries)
+{
+ PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
+}
+static inline void store_idt(struct desc_ptr *dtr)
+{
+ PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
+}
+static inline unsigned long paravirt_store_tr(void)
+{
+ return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
+}
+#define store_tr(tr) ((tr) = paravirt_store_tr())
+static inline void load_TLS(struct thread_struct *t, unsigned cpu)
+{
+ PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
+}
+
+#ifdef CONFIG_X86_64
+static inline void load_gs_index(unsigned int gs)
+{
+ PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
+}
+#endif
+
+static inline void write_ldt_entry(struct desc_struct *dt, int entry,
+ const void *desc)
+{
+ PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
+}
+
+static inline void write_gdt_entry(struct desc_struct *dt, int entry,
+ void *desc, int type)
+{
+ PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
+}
+
+static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
+{
+ PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
+}
+static inline void set_iopl_mask(unsigned mask)
+{
+ PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
+}
+
+/* The paravirtualized I/O functions */
+static inline void slow_down_io(void)
+{
+ pv_cpu_ops.io_delay();
+#ifdef REALLY_SLOW_IO
+ pv_cpu_ops.io_delay();
+ pv_cpu_ops.io_delay();
+ pv_cpu_ops.io_delay();
+#endif
+}
+
+#ifdef CONFIG_SMP
+static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
+ unsigned long start_esp)
+{
+ PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
+ phys_apicid, start_eip, start_esp);
+}
+#endif
+
+static inline void paravirt_activate_mm(struct mm_struct *prev,
+ struct mm_struct *next)
+{
+ PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
+}
+
+static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
+ struct mm_struct *mm)
+{
+ PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
+}
+
+static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
+{
+ PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
+}
+
+static inline void __flush_tlb(void)
+{
+ PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
+}
+static inline void __flush_tlb_global(void)
+{
+ PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
+}
+static inline void __flush_tlb_single(unsigned long addr)
+{
+ PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
+}
+
+static inline void flush_tlb_others(const struct cpumask *cpumask,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ PVOP_VCALL4(pv_mmu_ops.flush_tlb_others, cpumask, mm, start, end);
+}
+
+static inline int paravirt_pgd_alloc(struct mm_struct *mm)
+{
+ return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
+}
+
+static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
+}
+
+static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
+{
+ PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
+}
+static inline void paravirt_release_pte(unsigned long pfn)
+{
+ PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
+}
+
+static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
+{
+ PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
+}
+
+static inline void paravirt_release_pmd(unsigned long pfn)
+{
+ PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
+}
+
+static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
+{
+ PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
+}
+static inline void paravirt_release_pud(unsigned long pfn)
+{
+ PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
+}
+
+static inline void pte_update(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
+}
+static inline void pmd_update(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp)
+{
+ PVOP_VCALL3(pv_mmu_ops.pmd_update, mm, addr, pmdp);
+}
+
+static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
+}
+
+static inline void pmd_update_defer(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp)
+{
+ PVOP_VCALL3(pv_mmu_ops.pmd_update_defer, mm, addr, pmdp);
+}
+
+static inline pte_t __pte(pteval_t val)
+{
+ pteval_t ret;
+
+ if (sizeof(pteval_t) > sizeof(long))
+ ret = PVOP_CALLEE2(pteval_t,
+ pv_mmu_ops.make_pte,
+ val, (u64)val >> 32);
+ else
+ ret = PVOP_CALLEE1(pteval_t,
+ pv_mmu_ops.make_pte,
+ val);
+
+ return (pte_t) { .pte = ret };
+}
+
+static inline pteval_t pte_val(pte_t pte)
+{
+ pteval_t ret;
+
+ if (sizeof(pteval_t) > sizeof(long))
+ ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
+ pte.pte, (u64)pte.pte >> 32);
+ else
+ ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
+ pte.pte);
+
+ return ret;
+}
+
+static inline pgd_t __pgd(pgdval_t val)
+{
+ pgdval_t ret;
+
+ if (sizeof(pgdval_t) > sizeof(long))
+ ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
+ val, (u64)val >> 32);
+ else
+ ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
+ val);
+
+ return (pgd_t) { ret };
+}
+
+static inline pgdval_t pgd_val(pgd_t pgd)
+{
+ pgdval_t ret;
+
+ if (sizeof(pgdval_t) > sizeof(long))
+ ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
+ pgd.pgd, (u64)pgd.pgd >> 32);
+ else
+ ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
+ pgd.pgd);
+
+ return ret;
+}
+
+#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
+static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ pteval_t ret;
+
+ ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
+ mm, addr, ptep);
+
+ return (pte_t) { .pte = ret };
+}
+
+static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ if (sizeof(pteval_t) > sizeof(long))
+ /* 5 arg words */
+ pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
+ else
+ PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
+ mm, addr, ptep, pte.pte);
+}
+
+static inline void set_pte(pte_t *ptep, pte_t pte)
+{
+ if (sizeof(pteval_t) > sizeof(long))
+ PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
+ pte.pte, (u64)pte.pte >> 32);
+ else
+ PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
+ pte.pte);
+}
+
+static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ if (sizeof(pteval_t) > sizeof(long))
+ /* 5 arg words */
+ pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
+ else
+ PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
+}
+
+static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, pmd_t pmd)
+{
+ if (sizeof(pmdval_t) > sizeof(long))
+ /* 5 arg words */
+ pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd);
+ else
+ PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp,
+ native_pmd_val(pmd));
+}
+
+static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
+{
+ pmdval_t val = native_pmd_val(pmd);
+
+ if (sizeof(pmdval_t) > sizeof(long))
+ PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
+ else
+ PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
+}
+
+#if CONFIG_PGTABLE_LEVELS >= 3
+static inline pmd_t __pmd(pmdval_t val)
+{
+ pmdval_t ret;
+
+ if (sizeof(pmdval_t) > sizeof(long))
+ ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
+ val, (u64)val >> 32);
+ else
+ ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
+ val);
+
+ return (pmd_t) { ret };
+}
+
+static inline pmdval_t pmd_val(pmd_t pmd)
+{
+ pmdval_t ret;
+
+ if (sizeof(pmdval_t) > sizeof(long))
+ ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
+ pmd.pmd, (u64)pmd.pmd >> 32);
+ else
+ ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
+ pmd.pmd);
+
+ return ret;
+}
+
+static inline void set_pud(pud_t *pudp, pud_t pud)
+{
+ pudval_t val = native_pud_val(pud);
+
+ if (sizeof(pudval_t) > sizeof(long))
+ PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
+ val, (u64)val >> 32);
+ else
+ PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
+ val);
+}
+#if CONFIG_PGTABLE_LEVELS == 4
+static inline pud_t __pud(pudval_t val)
+{
+ pudval_t ret;
+
+ if (sizeof(pudval_t) > sizeof(long))
+ ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
+ val, (u64)val >> 32);
+ else
+ ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
+ val);
+
+ return (pud_t) { ret };
+}
+
+static inline pudval_t pud_val(pud_t pud)
+{
+ pudval_t ret;
+
+ if (sizeof(pudval_t) > sizeof(long))
+ ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
+ pud.pud, (u64)pud.pud >> 32);
+ else
+ ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
+ pud.pud);
+
+ return ret;
+}
+
+static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
+{
+ pgdval_t val = native_pgd_val(pgd);
+
+ if (sizeof(pgdval_t) > sizeof(long))
+ PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
+ val, (u64)val >> 32);
+ else
+ PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
+ val);
+}
+
+static inline void pgd_clear(pgd_t *pgdp)
+{
+ set_pgd(pgdp, __pgd(0));
+}
+
+static inline void pud_clear(pud_t *pudp)
+{
+ set_pud(pudp, __pud(0));
+}
+
+#endif /* CONFIG_PGTABLE_LEVELS == 4 */
+
+#endif /* CONFIG_PGTABLE_LEVELS >= 3 */
+
+#ifdef CONFIG_X86_PAE
+/* Special-case pte-setting operations for PAE, which can't update a
+ 64-bit pte atomically */
+static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
+{
+ PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
+ pte.pte, pte.pte >> 32);
+}
+
+static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
+}
+
+static inline void pmd_clear(pmd_t *pmdp)
+{
+ PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
+}
+#else /* !CONFIG_X86_PAE */
+static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
+{
+ set_pte(ptep, pte);
+}
+
+static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ set_pte_at(mm, addr, ptep, __pte(0));
+}
+
+static inline void pmd_clear(pmd_t *pmdp)
+{
+ set_pmd(pmdp, __pmd(0));
+}
+#endif /* CONFIG_X86_PAE */
+
+#define __HAVE_ARCH_START_CONTEXT_SWITCH
+static inline void arch_start_context_switch(struct task_struct *prev)
+{
+ PVOP_VCALL1(pv_cpu_ops.start_context_switch, prev);
+}
+
+static inline void arch_end_context_switch(struct task_struct *next)
+{
+ PVOP_VCALL1(pv_cpu_ops.end_context_switch, next);
+}
+
+#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
+static inline void arch_enter_lazy_mmu_mode(void)
+{
+ PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
+}
+
+static inline void arch_leave_lazy_mmu_mode(void)
+{
+ PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
+}
+
+static inline void arch_flush_lazy_mmu_mode(void)
+{
+ PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush);
+}
+
+static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
+ phys_addr_t phys, pgprot_t flags)
+{
+ pv_mmu_ops.set_fixmap(idx, phys, flags);
+}
+
+#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
+
+static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
+ __ticket_t ticket)
+{
+ PVOP_VCALLEE2(pv_lock_ops.lock_spinning, lock, ticket);
+}
+
+static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock,
+ __ticket_t ticket)
+{
+ PVOP_VCALL2(pv_lock_ops.unlock_kick, lock, ticket);
+}
+
+#endif
+
+#ifdef CONFIG_X86_32
+#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
+#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
+
+/* save and restore all caller-save registers, except return value */
+#define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
+#define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
+
+#define PV_FLAGS_ARG "0"
+#define PV_EXTRA_CLOBBERS
+#define PV_VEXTRA_CLOBBERS
+#else
+/* save and restore all caller-save registers, except return value */
+#define PV_SAVE_ALL_CALLER_REGS \
+ "push %rcx;" \
+ "push %rdx;" \
+ "push %rsi;" \
+ "push %rdi;" \
+ "push %r8;" \
+ "push %r9;" \
+ "push %r10;" \
+ "push %r11;"
+#define PV_RESTORE_ALL_CALLER_REGS \
+ "pop %r11;" \
+ "pop %r10;" \
+ "pop %r9;" \
+ "pop %r8;" \
+ "pop %rdi;" \
+ "pop %rsi;" \
+ "pop %rdx;" \
+ "pop %rcx;"
+
+/* We save some registers, but all of them, that's too much. We clobber all
+ * caller saved registers but the argument parameter */
+#define PV_SAVE_REGS "pushq %%rdi;"
+#define PV_RESTORE_REGS "popq %%rdi;"
+#define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
+#define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
+#define PV_FLAGS_ARG "D"
+#endif
+
+/*
+ * Generate a thunk around a function which saves all caller-save
+ * registers except for the return value. This allows C functions to
+ * be called from assembler code where fewer than normal registers are
+ * available. It may also help code generation around calls from C
+ * code if the common case doesn't use many registers.
+ *
+ * When a callee is wrapped in a thunk, the caller can assume that all
+ * arg regs and all scratch registers are preserved across the
+ * call. The return value in rax/eax will not be saved, even for void
+ * functions.
+ */
+#define PV_CALLEE_SAVE_REGS_THUNK(func) \
+ extern typeof(func) __raw_callee_save_##func; \
+ \
+ asm(".pushsection .text;" \
+ ".globl __raw_callee_save_" #func " ; " \
+ "__raw_callee_save_" #func ": " \
+ PV_SAVE_ALL_CALLER_REGS \
+ "call " #func ";" \
+ PV_RESTORE_ALL_CALLER_REGS \
+ "ret;" \
+ ".popsection")
+
+/* Get a reference to a callee-save function */
+#define PV_CALLEE_SAVE(func) \
+ ((struct paravirt_callee_save) { __raw_callee_save_##func })
+
+/* Promise that "func" already uses the right calling convention */
+#define __PV_IS_CALLEE_SAVE(func) \
+ ((struct paravirt_callee_save) { func })
+
+static inline notrace unsigned long arch_local_save_flags(void)
+{
+ return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
+}
+
+static inline notrace void arch_local_irq_restore(unsigned long f)
+{
+ PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
+}
+
+static inline notrace void arch_local_irq_disable(void)
+{
+ PVOP_VCALLEE0(pv_irq_ops.irq_disable);
+}
+
+static inline notrace void arch_local_irq_enable(void)
+{
+ PVOP_VCALLEE0(pv_irq_ops.irq_enable);
+}
+
+static inline notrace unsigned long arch_local_irq_save(void)
+{
+ unsigned long f;
+
+ f = arch_local_save_flags();
+ arch_local_irq_disable();
+ return f;
+}
+
+
+/* Make sure as little as possible of this mess escapes. */
+#undef PARAVIRT_CALL
+#undef __PVOP_CALL
+#undef __PVOP_VCALL
+#undef PVOP_VCALL0
+#undef PVOP_CALL0
+#undef PVOP_VCALL1
+#undef PVOP_CALL1
+#undef PVOP_VCALL2
+#undef PVOP_CALL2
+#undef PVOP_VCALL3
+#undef PVOP_CALL3
+#undef PVOP_VCALL4
+#undef PVOP_CALL4
+
+extern void default_banner(void);
+
+#else /* __ASSEMBLY__ */
+
+#define _PVSITE(ptype, clobbers, ops, word, algn) \
+771:; \
+ ops; \
+772:; \
+ .pushsection .parainstructions,"a"; \
+ .align algn; \
+ word 771b; \
+ .byte ptype; \
+ .byte 772b-771b; \
+ .short clobbers; \
+ .popsection
+
+
+#define COND_PUSH(set, mask, reg) \
+ .if ((~(set)) & mask); push %reg; .endif
+#define COND_POP(set, mask, reg) \
+ .if ((~(set)) & mask); pop %reg; .endif
+
+#ifdef CONFIG_X86_64
+
+#define PV_SAVE_REGS(set) \
+ COND_PUSH(set, CLBR_RAX, rax); \
+ COND_PUSH(set, CLBR_RCX, rcx); \
+ COND_PUSH(set, CLBR_RDX, rdx); \
+ COND_PUSH(set, CLBR_RSI, rsi); \
+ COND_PUSH(set, CLBR_RDI, rdi); \
+ COND_PUSH(set, CLBR_R8, r8); \
+ COND_PUSH(set, CLBR_R9, r9); \
+ COND_PUSH(set, CLBR_R10, r10); \
+ COND_PUSH(set, CLBR_R11, r11)
+#define PV_RESTORE_REGS(set) \
+ COND_POP(set, CLBR_R11, r11); \
+ COND_POP(set, CLBR_R10, r10); \
+ COND_POP(set, CLBR_R9, r9); \
+ COND_POP(set, CLBR_R8, r8); \
+ COND_POP(set, CLBR_RDI, rdi); \
+ COND_POP(set, CLBR_RSI, rsi); \
+ COND_POP(set, CLBR_RDX, rdx); \
+ COND_POP(set, CLBR_RCX, rcx); \
+ COND_POP(set, CLBR_RAX, rax)
+
+#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
+#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
+#define PARA_INDIRECT(addr) *addr(%rip)
+#else
+#define PV_SAVE_REGS(set) \
+ COND_PUSH(set, CLBR_EAX, eax); \
+ COND_PUSH(set, CLBR_EDI, edi); \
+ COND_PUSH(set, CLBR_ECX, ecx); \
+ COND_PUSH(set, CLBR_EDX, edx)
+#define PV_RESTORE_REGS(set) \
+ COND_POP(set, CLBR_EDX, edx); \
+ COND_POP(set, CLBR_ECX, ecx); \
+ COND_POP(set, CLBR_EDI, edi); \
+ COND_POP(set, CLBR_EAX, eax)
+
+#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
+#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
+#define PARA_INDIRECT(addr) *%cs:addr
+#endif
+
+#define INTERRUPT_RETURN \
+ PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
+ jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
+
+#define DISABLE_INTERRUPTS(clobbers) \
+ PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
+ PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
+ call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
+ PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
+
+#define ENABLE_INTERRUPTS(clobbers) \
+ PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
+ PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
+ call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
+ PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
+
+#define USERGS_SYSRET32 \
+ PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \
+ CLBR_NONE, \
+ jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
+
+#ifdef CONFIG_X86_32
+#define GET_CR0_INTO_EAX \
+ push %ecx; push %edx; \
+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
+ pop %edx; pop %ecx
+
+#define ENABLE_INTERRUPTS_SYSEXIT \
+ PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
+ CLBR_NONE, \
+ jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
+
+
+#else /* !CONFIG_X86_32 */
+
+/*
+ * If swapgs is used while the userspace stack is still current,
+ * there's no way to call a pvop. The PV replacement *must* be
+ * inlined, or the swapgs instruction must be trapped and emulated.
+ */
+#define SWAPGS_UNSAFE_STACK \
+ PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
+ swapgs)
+
+/*
+ * Note: swapgs is very special, and in practise is either going to be
+ * implemented with a single "swapgs" instruction or something very
+ * special. Either way, we don't need to save any registers for
+ * it.
+ */
+#define SWAPGS \
+ PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
+ )
+
+#define GET_CR2_INTO_RAX \
+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2)
+
+#define PARAVIRT_ADJUST_EXCEPTION_FRAME \
+ PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
+ CLBR_NONE, \
+ call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
+
+#define USERGS_SYSRET64 \
+ PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
+ CLBR_NONE, \
+ jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
+#endif /* CONFIG_X86_32 */
+
+#endif /* __ASSEMBLY__ */
+#else /* CONFIG_PARAVIRT */
+# define default_banner x86_init_noop
+#ifndef __ASSEMBLY__
+static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
+ struct mm_struct *mm)
+{
+}
+
+static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
+{
+}
+#endif /* __ASSEMBLY__ */
+#endif /* !CONFIG_PARAVIRT */
+#endif /* _ASM_X86_PARAVIRT_H */
diff --git a/kernel/arch/x86/include/asm/paravirt_types.h b/kernel/arch/x86/include/asm/paravirt_types.h
new file mode 100644
index 000000000..f7b0b5c11
--- /dev/null
+++ b/kernel/arch/x86/include/asm/paravirt_types.h
@@ -0,0 +1,708 @@
+#ifndef _ASM_X86_PARAVIRT_TYPES_H
+#define _ASM_X86_PARAVIRT_TYPES_H
+
+/* Bitmask of what can be clobbered: usually at least eax. */
+#define CLBR_NONE 0
+#define CLBR_EAX (1 << 0)
+#define CLBR_ECX (1 << 1)
+#define CLBR_EDX (1 << 2)
+#define CLBR_EDI (1 << 3)
+
+#ifdef CONFIG_X86_32
+/* CLBR_ANY should match all regs platform has. For i386, that's just it */
+#define CLBR_ANY ((1 << 4) - 1)
+
+#define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX)
+#define CLBR_RET_REG (CLBR_EAX | CLBR_EDX)
+#define CLBR_SCRATCH (0)
+#else
+#define CLBR_RAX CLBR_EAX
+#define CLBR_RCX CLBR_ECX
+#define CLBR_RDX CLBR_EDX
+#define CLBR_RDI CLBR_EDI
+#define CLBR_RSI (1 << 4)
+#define CLBR_R8 (1 << 5)
+#define CLBR_R9 (1 << 6)
+#define CLBR_R10 (1 << 7)
+#define CLBR_R11 (1 << 8)
+
+#define CLBR_ANY ((1 << 9) - 1)
+
+#define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
+ CLBR_RCX | CLBR_R8 | CLBR_R9)
+#define CLBR_RET_REG (CLBR_RAX)
+#define CLBR_SCRATCH (CLBR_R10 | CLBR_R11)
+
+#endif /* X86_64 */
+
+#define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
+
+#ifndef __ASSEMBLY__
+
+#include <asm/desc_defs.h>
+#include <asm/kmap_types.h>
+#include <asm/pgtable_types.h>
+
+struct page;
+struct thread_struct;
+struct desc_ptr;
+struct tss_struct;
+struct mm_struct;
+struct desc_struct;
+struct task_struct;
+struct cpumask;
+
+/*
+ * Wrapper type for pointers to code which uses the non-standard
+ * calling convention. See PV_CALL_SAVE_REGS_THUNK below.
+ */
+struct paravirt_callee_save {
+ void *func;
+};
+
+/* general info */
+struct pv_info {
+ unsigned int kernel_rpl;
+ int shared_kernel_pmd;
+
+#ifdef CONFIG_X86_64
+ u16 extra_user_64bit_cs; /* __USER_CS if none */
+#endif
+
+ int paravirt_enabled;
+ const char *name;
+};
+
+struct pv_init_ops {
+ /*
+ * Patch may replace one of the defined code sequences with
+ * arbitrary code, subject to the same register constraints.
+ * This generally means the code is not free to clobber any
+ * registers other than EAX. The patch function should return
+ * the number of bytes of code generated, as we nop pad the
+ * rest in generic code.
+ */
+ unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
+ unsigned long addr, unsigned len);
+};
+
+
+struct pv_lazy_ops {
+ /* Set deferred update mode, used for batching operations. */
+ void (*enter)(void);
+ void (*leave)(void);
+ void (*flush)(void);
+};
+
+struct pv_time_ops {
+ unsigned long long (*sched_clock)(void);
+ unsigned long long (*steal_clock)(int cpu);
+ unsigned long (*get_tsc_khz)(void);
+};
+
+struct pv_cpu_ops {
+ /* hooks for various privileged instructions */
+ unsigned long (*get_debugreg)(int regno);
+ void (*set_debugreg)(int regno, unsigned long value);
+
+ void (*clts)(void);
+
+ unsigned long (*read_cr0)(void);
+ void (*write_cr0)(unsigned long);
+
+ unsigned long (*read_cr4_safe)(void);
+ unsigned long (*read_cr4)(void);
+ void (*write_cr4)(unsigned long);
+
+#ifdef CONFIG_X86_64
+ unsigned long (*read_cr8)(void);
+ void (*write_cr8)(unsigned long);
+#endif
+
+ /* Segment descriptor handling */
+ void (*load_tr_desc)(void);
+ void (*load_gdt)(const struct desc_ptr *);
+ void (*load_idt)(const struct desc_ptr *);
+ /* store_gdt has been removed. */
+ void (*store_idt)(struct desc_ptr *);
+ void (*set_ldt)(const void *desc, unsigned entries);
+ unsigned long (*store_tr)(void);
+ void (*load_tls)(struct thread_struct *t, unsigned int cpu);
+#ifdef CONFIG_X86_64
+ void (*load_gs_index)(unsigned int idx);
+#endif
+ void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
+ const void *desc);
+ void (*write_gdt_entry)(struct desc_struct *,
+ int entrynum, const void *desc, int size);
+ void (*write_idt_entry)(gate_desc *,
+ int entrynum, const gate_desc *gate);
+ void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
+ void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
+
+ void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
+
+ void (*set_iopl_mask)(unsigned mask);
+
+ void (*wbinvd)(void);
+ void (*io_delay)(void);
+
+ /* cpuid emulation, mostly so that caps bits can be disabled */
+ void (*cpuid)(unsigned int *eax, unsigned int *ebx,
+ unsigned int *ecx, unsigned int *edx);
+
+ /* MSR, PMC and TSR operations.
+ err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
+ u64 (*read_msr)(unsigned int msr, int *err);
+ int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
+
+ u64 (*read_tsc)(void);
+ u64 (*read_pmc)(int counter);
+ unsigned long long (*read_tscp)(unsigned int *aux);
+
+ /*
+ * Atomically enable interrupts and return to userspace. This
+ * is only ever used to return to 32-bit processes; in a
+ * 64-bit kernel, it's used for 32-on-64 compat processes, but
+ * never native 64-bit processes. (Jump, not call.)
+ */
+ void (*irq_enable_sysexit)(void);
+
+ /*
+ * Switch to usermode gs and return to 64-bit usermode using
+ * sysret. Only used in 64-bit kernels to return to 64-bit
+ * processes. Usermode register state, including %rsp, must
+ * already be restored.
+ */
+ void (*usergs_sysret64)(void);
+
+ /*
+ * Switch to usermode gs and return to 32-bit usermode using
+ * sysret. Used to return to 32-on-64 compat processes.
+ * Other usermode register state, including %esp, must already
+ * be restored.
+ */
+ void (*usergs_sysret32)(void);
+
+ /* Normal iret. Jump to this with the standard iret stack
+ frame set up. */
+ void (*iret)(void);
+
+ void (*swapgs)(void);
+
+ void (*start_context_switch)(struct task_struct *prev);
+ void (*end_context_switch)(struct task_struct *next);
+};
+
+struct pv_irq_ops {
+ /*
+ * Get/set interrupt state. save_fl and restore_fl are only
+ * expected to use X86_EFLAGS_IF; all other bits
+ * returned from save_fl are undefined, and may be ignored by
+ * restore_fl.
+ *
+ * NOTE: These functions callers expect the callee to preserve
+ * more registers than the standard C calling convention.
+ */
+ struct paravirt_callee_save save_fl;
+ struct paravirt_callee_save restore_fl;
+ struct paravirt_callee_save irq_disable;
+ struct paravirt_callee_save irq_enable;
+
+ void (*safe_halt)(void);
+ void (*halt)(void);
+
+#ifdef CONFIG_X86_64
+ void (*adjust_exception_frame)(void);
+#endif
+};
+
+struct pv_apic_ops {
+#ifdef CONFIG_X86_LOCAL_APIC
+ void (*startup_ipi_hook)(int phys_apicid,
+ unsigned long start_eip,
+ unsigned long start_esp);
+#endif
+};
+
+struct pv_mmu_ops {
+ unsigned long (*read_cr2)(void);
+ void (*write_cr2)(unsigned long);
+
+ unsigned long (*read_cr3)(void);
+ void (*write_cr3)(unsigned long);
+
+ /*
+ * Hooks for intercepting the creation/use/destruction of an
+ * mm_struct.
+ */
+ void (*activate_mm)(struct mm_struct *prev,
+ struct mm_struct *next);
+ void (*dup_mmap)(struct mm_struct *oldmm,
+ struct mm_struct *mm);
+ void (*exit_mmap)(struct mm_struct *mm);
+
+
+ /* TLB operations */
+ void (*flush_tlb_user)(void);
+ void (*flush_tlb_kernel)(void);
+ void (*flush_tlb_single)(unsigned long addr);
+ void (*flush_tlb_others)(const struct cpumask *cpus,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end);
+
+ /* Hooks for allocating and freeing a pagetable top-level */
+ int (*pgd_alloc)(struct mm_struct *mm);
+ void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
+
+ /*
+ * Hooks for allocating/releasing pagetable pages when they're
+ * attached to a pagetable
+ */
+ void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
+ void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
+ void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
+ void (*release_pte)(unsigned long pfn);
+ void (*release_pmd)(unsigned long pfn);
+ void (*release_pud)(unsigned long pfn);
+
+ /* Pagetable manipulation functions */
+ void (*set_pte)(pte_t *ptep, pte_t pteval);
+ void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pteval);
+ void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
+ void (*set_pmd_at)(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, pmd_t pmdval);
+ void (*pte_update)(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep);
+ void (*pte_update_defer)(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep);
+ void (*pmd_update)(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp);
+ void (*pmd_update_defer)(struct mm_struct *mm,
+ unsigned long addr, pmd_t *pmdp);
+
+ pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep);
+ void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte);
+
+ struct paravirt_callee_save pte_val;
+ struct paravirt_callee_save make_pte;
+
+ struct paravirt_callee_save pgd_val;
+ struct paravirt_callee_save make_pgd;
+
+#if CONFIG_PGTABLE_LEVELS >= 3
+#ifdef CONFIG_X86_PAE
+ void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
+ void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep);
+ void (*pmd_clear)(pmd_t *pmdp);
+
+#endif /* CONFIG_X86_PAE */
+
+ void (*set_pud)(pud_t *pudp, pud_t pudval);
+
+ struct paravirt_callee_save pmd_val;
+ struct paravirt_callee_save make_pmd;
+
+#if CONFIG_PGTABLE_LEVELS == 4
+ struct paravirt_callee_save pud_val;
+ struct paravirt_callee_save make_pud;
+
+ void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
+#endif /* CONFIG_PGTABLE_LEVELS == 4 */
+#endif /* CONFIG_PGTABLE_LEVELS >= 3 */
+
+ struct pv_lazy_ops lazy_mode;
+
+ /* dom0 ops */
+
+ /* Sometimes the physical address is a pfn, and sometimes its
+ an mfn. We can tell which is which from the index. */
+ void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
+ phys_addr_t phys, pgprot_t flags);
+};
+
+struct arch_spinlock;
+#ifdef CONFIG_SMP
+#include <asm/spinlock_types.h>
+#else
+typedef u16 __ticket_t;
+#endif
+
+struct pv_lock_ops {
+ struct paravirt_callee_save lock_spinning;
+ void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
+};
+
+/* This contains all the paravirt structures: we get a convenient
+ * number for each function using the offset which we use to indicate
+ * what to patch. */
+struct paravirt_patch_template {
+ struct pv_init_ops pv_init_ops;
+ struct pv_time_ops pv_time_ops;
+ struct pv_cpu_ops pv_cpu_ops;
+ struct pv_irq_ops pv_irq_ops;
+ struct pv_apic_ops pv_apic_ops;
+ struct pv_mmu_ops pv_mmu_ops;
+ struct pv_lock_ops pv_lock_ops;
+};
+
+extern struct pv_info pv_info;
+extern struct pv_init_ops pv_init_ops;
+extern struct pv_time_ops pv_time_ops;
+extern struct pv_cpu_ops pv_cpu_ops;
+extern struct pv_irq_ops pv_irq_ops;
+extern struct pv_apic_ops pv_apic_ops;
+extern struct pv_mmu_ops pv_mmu_ops;
+extern struct pv_lock_ops pv_lock_ops;
+
+#define PARAVIRT_PATCH(x) \
+ (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
+
+#define paravirt_type(op) \
+ [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
+ [paravirt_opptr] "i" (&(op))
+#define paravirt_clobber(clobber) \
+ [paravirt_clobber] "i" (clobber)
+
+/*
+ * Generate some code, and mark it as patchable by the
+ * apply_paravirt() alternate instruction patcher.
+ */
+#define _paravirt_alt(insn_string, type, clobber) \
+ "771:\n\t" insn_string "\n" "772:\n" \
+ ".pushsection .parainstructions,\"a\"\n" \
+ _ASM_ALIGN "\n" \
+ _ASM_PTR " 771b\n" \
+ " .byte " type "\n" \
+ " .byte 772b-771b\n" \
+ " .short " clobber "\n" \
+ ".popsection\n"
+
+/* Generate patchable code, with the default asm parameters. */
+#define paravirt_alt(insn_string) \
+ _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
+
+/* Simple instruction patching code. */
+#define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t"
+
+#define DEF_NATIVE(ops, name, code) \
+ __visible extern const char start_##ops##_##name[], end_##ops##_##name[]; \
+ asm(NATIVE_LABEL("start_", ops, name) code NATIVE_LABEL("end_", ops, name))
+
+unsigned paravirt_patch_nop(void);
+unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
+unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
+unsigned paravirt_patch_ignore(unsigned len);
+unsigned paravirt_patch_call(void *insnbuf,
+ const void *target, u16 tgt_clobbers,
+ unsigned long addr, u16 site_clobbers,
+ unsigned len);
+unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
+ unsigned long addr, unsigned len);
+unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
+ unsigned long addr, unsigned len);
+
+unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
+ const char *start, const char *end);
+
+unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
+ unsigned long addr, unsigned len);
+
+int paravirt_disable_iospace(void);
+
+/*
+ * This generates an indirect call based on the operation type number.
+ * The type number, computed in PARAVIRT_PATCH, is derived from the
+ * offset into the paravirt_patch_template structure, and can therefore be
+ * freely converted back into a structure offset.
+ */
+#define PARAVIRT_CALL "call *%c[paravirt_opptr];"
+
+/*
+ * These macros are intended to wrap calls through one of the paravirt
+ * ops structs, so that they can be later identified and patched at
+ * runtime.
+ *
+ * Normally, a call to a pv_op function is a simple indirect call:
+ * (pv_op_struct.operations)(args...).
+ *
+ * Unfortunately, this is a relatively slow operation for modern CPUs,
+ * because it cannot necessarily determine what the destination
+ * address is. In this case, the address is a runtime constant, so at
+ * the very least we can patch the call to e a simple direct call, or
+ * ideally, patch an inline implementation into the callsite. (Direct
+ * calls are essentially free, because the call and return addresses
+ * are completely predictable.)
+ *
+ * For i386, these macros rely on the standard gcc "regparm(3)" calling
+ * convention, in which the first three arguments are placed in %eax,
+ * %edx, %ecx (in that order), and the remaining arguments are placed
+ * on the stack. All caller-save registers (eax,edx,ecx) are expected
+ * to be modified (either clobbered or used for return values).
+ * X86_64, on the other hand, already specifies a register-based calling
+ * conventions, returning at %rax, with parameteres going on %rdi, %rsi,
+ * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
+ * special handling for dealing with 4 arguments, unlike i386.
+ * However, x86_64 also have to clobber all caller saved registers, which
+ * unfortunately, are quite a bit (r8 - r11)
+ *
+ * The call instruction itself is marked by placing its start address
+ * and size into the .parainstructions section, so that
+ * apply_paravirt() in arch/i386/kernel/alternative.c can do the
+ * appropriate patching under the control of the backend pv_init_ops
+ * implementation.
+ *
+ * Unfortunately there's no way to get gcc to generate the args setup
+ * for the call, and then allow the call itself to be generated by an
+ * inline asm. Because of this, we must do the complete arg setup and
+ * return value handling from within these macros. This is fairly
+ * cumbersome.
+ *
+ * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
+ * It could be extended to more arguments, but there would be little
+ * to be gained from that. For each number of arguments, there are
+ * the two VCALL and CALL variants for void and non-void functions.
+ *
+ * When there is a return value, the invoker of the macro must specify
+ * the return type. The macro then uses sizeof() on that type to
+ * determine whether its a 32 or 64 bit value, and places the return
+ * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
+ * 64-bit). For x86_64 machines, it just returns at %rax regardless of
+ * the return value size.
+ *
+ * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
+ * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
+ * in low,high order
+ *
+ * Small structures are passed and returned in registers. The macro
+ * calling convention can't directly deal with this, so the wrapper
+ * functions must do this.
+ *
+ * These PVOP_* macros are only defined within this header. This
+ * means that all uses must be wrapped in inline functions. This also
+ * makes sure the incoming and outgoing types are always correct.
+ */
+#ifdef CONFIG_X86_32
+#define PVOP_VCALL_ARGS \
+ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx
+#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
+
+#define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x))
+#define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x))
+#define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x))
+
+#define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
+ "=c" (__ecx)
+#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
+
+#define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx)
+#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
+
+#define EXTRA_CLOBBERS
+#define VEXTRA_CLOBBERS
+#else /* CONFIG_X86_64 */
+/* [re]ax isn't an arg, but the return val */
+#define PVOP_VCALL_ARGS \
+ unsigned long __edi = __edi, __esi = __esi, \
+ __edx = __edx, __ecx = __ecx, __eax = __eax
+#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
+
+#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
+#define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
+#define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x))
+#define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x))
+
+#define PVOP_VCALL_CLOBBERS "=D" (__edi), \
+ "=S" (__esi), "=d" (__edx), \
+ "=c" (__ecx)
+#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
+
+/* void functions are still allowed [re]ax for scratch */
+#define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
+#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
+
+#define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
+#define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
+#endif /* CONFIG_X86_32 */
+
+#ifdef CONFIG_PARAVIRT_DEBUG
+#define PVOP_TEST_NULL(op) BUG_ON(op == NULL)
+#else
+#define PVOP_TEST_NULL(op) ((void)op)
+#endif
+
+#define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \
+ pre, post, ...) \
+ ({ \
+ rettype __ret; \
+ PVOP_CALL_ARGS; \
+ PVOP_TEST_NULL(op); \
+ /* This is 32-bit specific, but is okay in 64-bit */ \
+ /* since this condition will never hold */ \
+ if (sizeof(rettype) > sizeof(unsigned long)) { \
+ asm volatile(pre \
+ paravirt_alt(PARAVIRT_CALL) \
+ post \
+ : call_clbr \
+ : paravirt_type(op), \
+ paravirt_clobber(clbr), \
+ ##__VA_ARGS__ \
+ : "memory", "cc" extra_clbr); \
+ __ret = (rettype)((((u64)__edx) << 32) | __eax); \
+ } else { \
+ asm volatile(pre \
+ paravirt_alt(PARAVIRT_CALL) \
+ post \
+ : call_clbr \
+ : paravirt_type(op), \
+ paravirt_clobber(clbr), \
+ ##__VA_ARGS__ \
+ : "memory", "cc" extra_clbr); \
+ __ret = (rettype)__eax; \
+ } \
+ __ret; \
+ })
+
+#define __PVOP_CALL(rettype, op, pre, post, ...) \
+ ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \
+ EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
+
+#define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \
+ ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
+ PVOP_CALLEE_CLOBBERS, , \
+ pre, post, ##__VA_ARGS__)
+
+
+#define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \
+ ({ \
+ PVOP_VCALL_ARGS; \
+ PVOP_TEST_NULL(op); \
+ asm volatile(pre \
+ paravirt_alt(PARAVIRT_CALL) \
+ post \
+ : call_clbr \
+ : paravirt_type(op), \
+ paravirt_clobber(clbr), \
+ ##__VA_ARGS__ \
+ : "memory", "cc" extra_clbr); \
+ })
+
+#define __PVOP_VCALL(op, pre, post, ...) \
+ ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \
+ VEXTRA_CLOBBERS, \
+ pre, post, ##__VA_ARGS__)
+
+#define __PVOP_VCALLEESAVE(op, pre, post, ...) \
+ ____PVOP_VCALL(op.func, CLBR_RET_REG, \
+ PVOP_VCALLEE_CLOBBERS, , \
+ pre, post, ##__VA_ARGS__)
+
+
+
+#define PVOP_CALL0(rettype, op) \
+ __PVOP_CALL(rettype, op, "", "")
+#define PVOP_VCALL0(op) \
+ __PVOP_VCALL(op, "", "")
+
+#define PVOP_CALLEE0(rettype, op) \
+ __PVOP_CALLEESAVE(rettype, op, "", "")
+#define PVOP_VCALLEE0(op) \
+ __PVOP_VCALLEESAVE(op, "", "")
+
+
+#define PVOP_CALL1(rettype, op, arg1) \
+ __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
+#define PVOP_VCALL1(op, arg1) \
+ __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
+
+#define PVOP_CALLEE1(rettype, op, arg1) \
+ __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
+#define PVOP_VCALLEE1(op, arg1) \
+ __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
+
+
+#define PVOP_CALL2(rettype, op, arg1, arg2) \
+ __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
+ PVOP_CALL_ARG2(arg2))
+#define PVOP_VCALL2(op, arg1, arg2) \
+ __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
+ PVOP_CALL_ARG2(arg2))
+
+#define PVOP_CALLEE2(rettype, op, arg1, arg2) \
+ __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
+ PVOP_CALL_ARG2(arg2))
+#define PVOP_VCALLEE2(op, arg1, arg2) \
+ __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \
+ PVOP_CALL_ARG2(arg2))
+
+
+#define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
+ __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
+ PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
+#define PVOP_VCALL3(op, arg1, arg2, arg3) \
+ __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
+ PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
+
+/* This is the only difference in x86_64. We can make it much simpler */
+#ifdef CONFIG_X86_32
+#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
+ __PVOP_CALL(rettype, op, \
+ "push %[_arg4];", "lea 4(%%esp),%%esp;", \
+ PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
+ PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
+#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
+ __PVOP_VCALL(op, \
+ "push %[_arg4];", "lea 4(%%esp),%%esp;", \
+ "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
+ "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
+#else
+#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
+ __PVOP_CALL(rettype, op, "", "", \
+ PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
+ PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
+#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
+ __PVOP_VCALL(op, "", "", \
+ PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
+ PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
+#endif
+
+/* Lazy mode for batching updates / context switch */
+enum paravirt_lazy_mode {
+ PARAVIRT_LAZY_NONE,
+ PARAVIRT_LAZY_MMU,
+ PARAVIRT_LAZY_CPU,
+};
+
+enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
+void paravirt_start_context_switch(struct task_struct *prev);
+void paravirt_end_context_switch(struct task_struct *next);
+
+void paravirt_enter_lazy_mmu(void);
+void paravirt_leave_lazy_mmu(void);
+void paravirt_flush_lazy_mmu(void);
+
+void _paravirt_nop(void);
+u32 _paravirt_ident_32(u32);
+u64 _paravirt_ident_64(u64);
+
+#define paravirt_nop ((void *)_paravirt_nop)
+
+/* These all sit in the .parainstructions section to tell us what to patch. */
+struct paravirt_patch_site {
+ u8 *instr; /* original instructions */
+ u8 instrtype; /* type of this instruction */
+ u8 len; /* length of original instruction */
+ u16 clobbers; /* what registers you may clobber */
+};
+
+extern struct paravirt_patch_site __parainstructions[],
+ __parainstructions_end[];
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_X86_PARAVIRT_TYPES_H */
diff --git a/kernel/arch/x86/include/asm/parport.h b/kernel/arch/x86/include/asm/parport.h
new file mode 100644
index 000000000..0d2d3b291
--- /dev/null
+++ b/kernel/arch/x86/include/asm/parport.h
@@ -0,0 +1,10 @@
+#ifndef _ASM_X86_PARPORT_H
+#define _ASM_X86_PARPORT_H
+
+static int parport_pc_find_isa_ports(int autoirq, int autodma);
+static int parport_pc_find_nonpci_ports(int autoirq, int autodma)
+{
+ return parport_pc_find_isa_ports(autoirq, autodma);
+}
+
+#endif /* _ASM_X86_PARPORT_H */
diff --git a/kernel/arch/x86/include/asm/pat.h b/kernel/arch/x86/include/asm/pat.h
new file mode 100644
index 000000000..91bc4ba95
--- /dev/null
+++ b/kernel/arch/x86/include/asm/pat.h
@@ -0,0 +1,28 @@
+#ifndef _ASM_X86_PAT_H
+#define _ASM_X86_PAT_H
+
+#include <linux/types.h>
+#include <asm/pgtable_types.h>
+
+#ifdef CONFIG_X86_PAT
+extern int pat_enabled;
+#else
+static const int pat_enabled;
+#endif
+
+extern void pat_init(void);
+void pat_init_cache_modes(void);
+
+extern int reserve_memtype(u64 start, u64 end,
+ enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm);
+extern int free_memtype(u64 start, u64 end);
+
+extern int kernel_map_sync_memtype(u64 base, unsigned long size,
+ enum page_cache_mode pcm);
+
+int io_reserve_memtype(resource_size_t start, resource_size_t end,
+ enum page_cache_mode *pcm);
+
+void io_free_memtype(resource_size_t start, resource_size_t end);
+
+#endif /* _ASM_X86_PAT_H */
diff --git a/kernel/arch/x86/include/asm/pci-direct.h b/kernel/arch/x86/include/asm/pci-direct.h
new file mode 100644
index 000000000..b1e7a45d8
--- /dev/null
+++ b/kernel/arch/x86/include/asm/pci-direct.h
@@ -0,0 +1,21 @@
+#ifndef _ASM_X86_PCI_DIRECT_H
+#define _ASM_X86_PCI_DIRECT_H
+
+#include <linux/types.h>
+
+/* Direct PCI access. This is used for PCI accesses in early boot before
+ the PCI subsystem works. */
+
+extern u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset);
+extern u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset);
+extern u16 read_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset);
+extern void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset, u32 val);
+extern void write_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset, u8 val);
+extern void write_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset, u16 val);
+
+extern int early_pci_allowed(void);
+
+extern unsigned int pci_early_dump_regs;
+extern void early_dump_pci_device(u8 bus, u8 slot, u8 func);
+extern void early_dump_pci_devices(void);
+#endif /* _ASM_X86_PCI_DIRECT_H */
diff --git a/kernel/arch/x86/include/asm/pci-functions.h b/kernel/arch/x86/include/asm/pci-functions.h
new file mode 100644
index 000000000..ed0bab427
--- /dev/null
+++ b/kernel/arch/x86/include/asm/pci-functions.h
@@ -0,0 +1,19 @@
+/*
+ * PCI BIOS function numbering for conventional PCI BIOS
+ * systems
+ */
+
+#define PCIBIOS_PCI_FUNCTION_ID 0xb1XX
+#define PCIBIOS_PCI_BIOS_PRESENT 0xb101
+#define PCIBIOS_FIND_PCI_DEVICE 0xb102
+#define PCIBIOS_FIND_PCI_CLASS_CODE 0xb103
+#define PCIBIOS_GENERATE_SPECIAL_CYCLE 0xb106
+#define PCIBIOS_READ_CONFIG_BYTE 0xb108
+#define PCIBIOS_READ_CONFIG_WORD 0xb109
+#define PCIBIOS_READ_CONFIG_DWORD 0xb10a
+#define PCIBIOS_WRITE_CONFIG_BYTE 0xb10b
+#define PCIBIOS_WRITE_CONFIG_WORD 0xb10c
+#define PCIBIOS_WRITE_CONFIG_DWORD 0xb10d
+#define PCIBIOS_GET_ROUTING_OPTIONS 0xb10e
+#define PCIBIOS_SET_PCI_HW_INT 0xb10f
+
diff --git a/kernel/arch/x86/include/asm/pci.h b/kernel/arch/x86/include/asm/pci.h
new file mode 100644
index 000000000..4e370a5d8
--- /dev/null
+++ b/kernel/arch/x86/include/asm/pci.h
@@ -0,0 +1,158 @@
+#ifndef _ASM_X86_PCI_H
+#define _ASM_X86_PCI_H
+
+#include <linux/mm.h> /* for struct page */
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <asm/scatterlist.h>
+#include <asm/io.h>
+#include <asm/x86_init.h>
+
+#ifdef __KERNEL__
+
+struct pci_sysdata {
+ int domain; /* PCI domain */
+ int node; /* NUMA node */
+#ifdef CONFIG_ACPI
+ struct acpi_device *companion; /* ACPI companion device */
+#endif
+#ifdef CONFIG_X86_64
+ void *iommu; /* IOMMU private data */
+#endif
+};
+
+extern int pci_routeirq;
+extern int noioapicquirk;
+extern int noioapicreroute;
+
+#ifdef CONFIG_PCI
+
+#ifdef CONFIG_PCI_DOMAINS
+static inline int pci_domain_nr(struct pci_bus *bus)
+{
+ struct pci_sysdata *sd = bus->sysdata;
+ return sd->domain;
+}
+
+static inline int pci_proc_domain(struct pci_bus *bus)
+{
+ return pci_domain_nr(bus);
+}
+#endif
+
+/* Can be used to override the logic in pci_scan_bus for skipping
+ already-configured bus numbers - to be used for buggy BIOSes
+ or architectures with incomplete PCI setup by the loader */
+
+extern unsigned int pcibios_assign_all_busses(void);
+extern int pci_legacy_init(void);
+# ifdef CONFIG_ACPI
+# define x86_default_pci_init pci_acpi_init
+# else
+# define x86_default_pci_init pci_legacy_init
+# endif
+#else
+# define pcibios_assign_all_busses() 0
+# define x86_default_pci_init NULL
+#endif
+
+extern unsigned long pci_mem_start;
+#define PCIBIOS_MIN_IO 0x1000
+#define PCIBIOS_MIN_MEM (pci_mem_start)
+
+#define PCIBIOS_MIN_CARDBUS_IO 0x4000
+
+extern int pcibios_enabled;
+void pcibios_config_init(void);
+void pcibios_scan_root(int bus);
+
+void pcibios_set_master(struct pci_dev *dev);
+struct irq_routing_table *pcibios_get_irq_routing_table(void);
+int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
+
+
+#define HAVE_PCI_MMAP
+extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state,
+ int write_combine);
+
+
+#ifdef CONFIG_PCI
+extern void early_quirks(void);
+static inline void pci_dma_burst_advice(struct pci_dev *pdev,
+ enum pci_dma_burst_strategy *strat,
+ unsigned long *strategy_parameter)
+{
+ *strat = PCI_DMA_BURST_INFINITY;
+ *strategy_parameter = ~0UL;
+}
+#else
+static inline void early_quirks(void) { }
+#endif
+
+extern void pci_iommu_alloc(void);
+
+#ifdef CONFIG_PCI_MSI
+/* implemented in arch/x86/kernel/apic/io_apic. */
+struct msi_desc;
+void native_compose_msi_msg(struct pci_dev *pdev, unsigned int irq,
+ unsigned int dest, struct msi_msg *msg, u8 hpet_id);
+int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
+void native_teardown_msi_irq(unsigned int irq);
+void native_restore_msi_irqs(struct pci_dev *dev);
+int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc,
+ unsigned int irq_base, unsigned int irq_offset);
+#else
+#define native_compose_msi_msg NULL
+#define native_setup_msi_irqs NULL
+#define native_teardown_msi_irq NULL
+#endif
+
+#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys)
+
+#endif /* __KERNEL__ */
+
+#ifdef CONFIG_X86_64
+#include <asm/pci_64.h>
+#endif
+
+/* implement the pci_ DMA API in terms of the generic device dma_ one */
+#include <asm-generic/pci-dma-compat.h>
+
+/* generic pci stuff */
+#include <asm-generic/pci.h>
+
+#ifdef CONFIG_NUMA
+/* Returns the node based on pci bus */
+static inline int __pcibus_to_node(const struct pci_bus *bus)
+{
+ const struct pci_sysdata *sd = bus->sysdata;
+
+ return sd->node;
+}
+
+static inline const struct cpumask *
+cpumask_of_pcibus(const struct pci_bus *bus)
+{
+ int node;
+
+ node = __pcibus_to_node(bus);
+ return (node == -1) ? cpu_online_mask :
+ cpumask_of_node(node);
+}
+#endif
+
+struct pci_setup_rom {
+ struct setup_data data;
+ uint16_t vendor;
+ uint16_t devid;
+ uint64_t pcilen;
+ unsigned long segment;
+ unsigned long bus;
+ unsigned long device;
+ unsigned long function;
+ uint8_t romdata[0];
+};
+
+#endif /* _ASM_X86_PCI_H */
diff --git a/kernel/arch/x86/include/asm/pci_64.h b/kernel/arch/x86/include/asm/pci_64.h
new file mode 100644
index 000000000..fe15cfb21
--- /dev/null
+++ b/kernel/arch/x86/include/asm/pci_64.h
@@ -0,0 +1,27 @@
+#ifndef _ASM_X86_PCI_64_H
+#define _ASM_X86_PCI_64_H
+
+#ifdef __KERNEL__
+
+#ifdef CONFIG_CALGARY_IOMMU
+static inline void *pci_iommu(struct pci_bus *bus)
+{
+ struct pci_sysdata *sd = bus->sysdata;
+ return sd->iommu;
+}
+
+static inline void set_pci_iommu(struct pci_bus *bus, void *val)
+{
+ struct pci_sysdata *sd = bus->sysdata;
+ sd->iommu = val;
+}
+#endif /* CONFIG_CALGARY_IOMMU */
+
+extern int (*pci_config_read)(int seg, int bus, int dev, int fn,
+ int reg, int len, u32 *value);
+extern int (*pci_config_write)(int seg, int bus, int dev, int fn,
+ int reg, int len, u32 value);
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_X86_PCI_64_H */
diff --git a/kernel/arch/x86/include/asm/pci_x86.h b/kernel/arch/x86/include/asm/pci_x86.h
new file mode 100644
index 000000000..164e3f8d3
--- /dev/null
+++ b/kernel/arch/x86/include/asm/pci_x86.h
@@ -0,0 +1,210 @@
+/*
+ * Low-Level PCI Access for i386 machines.
+ *
+ * (c) 1999 Martin Mares <mj@ucw.cz>
+ */
+
+#undef DEBUG
+
+#ifdef DEBUG
+#define DBG(fmt, ...) printk(fmt, ##__VA_ARGS__)
+#else
+#define DBG(fmt, ...) \
+do { \
+ if (0) \
+ printk(fmt, ##__VA_ARGS__); \
+} while (0)
+#endif
+
+#define PCI_PROBE_BIOS 0x0001
+#define PCI_PROBE_CONF1 0x0002
+#define PCI_PROBE_CONF2 0x0004
+#define PCI_PROBE_MMCONF 0x0008
+#define PCI_PROBE_MASK 0x000f
+#define PCI_PROBE_NOEARLY 0x0010
+
+#define PCI_NO_CHECKS 0x0400
+#define PCI_USE_PIRQ_MASK 0x0800
+#define PCI_ASSIGN_ROMS 0x1000
+#define PCI_BIOS_IRQ_SCAN 0x2000
+#define PCI_ASSIGN_ALL_BUSSES 0x4000
+#define PCI_CAN_SKIP_ISA_ALIGN 0x8000
+#define PCI_USE__CRS 0x10000
+#define PCI_CHECK_ENABLE_AMD_MMCONF 0x20000
+#define PCI_HAS_IO_ECS 0x40000
+#define PCI_NOASSIGN_ROMS 0x80000
+#define PCI_ROOT_NO_CRS 0x100000
+#define PCI_NOASSIGN_BARS 0x200000
+
+extern unsigned int pci_probe;
+extern unsigned long pirq_table_addr;
+
+enum pci_bf_sort_state {
+ pci_bf_sort_default,
+ pci_force_nobf,
+ pci_force_bf,
+ pci_dmi_bf,
+};
+
+/* pci-i386.c */
+
+void pcibios_resource_survey(void);
+void pcibios_set_cache_line_size(void);
+
+/* pci-pc.c */
+
+extern int pcibios_last_bus;
+extern struct pci_ops pci_root_ops;
+
+void pcibios_scan_specific_bus(int busn);
+
+/* pci-irq.c */
+
+struct irq_info {
+ u8 bus, devfn; /* Bus, device and function */
+ struct {
+ u8 link; /* IRQ line ID, chipset dependent,
+ 0 = not routed */
+ u16 bitmap; /* Available IRQs */
+ } __attribute__((packed)) irq[4];
+ u8 slot; /* Slot number, 0=onboard */
+ u8 rfu;
+} __attribute__((packed));
+
+struct irq_routing_table {
+ u32 signature; /* PIRQ_SIGNATURE should be here */
+ u16 version; /* PIRQ_VERSION */
+ u16 size; /* Table size in bytes */
+ u8 rtr_bus, rtr_devfn; /* Where the interrupt router lies */
+ u16 exclusive_irqs; /* IRQs devoted exclusively to
+ PCI usage */
+ u16 rtr_vendor, rtr_device; /* Vendor and device ID of
+ interrupt router */
+ u32 miniport_data; /* Crap */
+ u8 rfu[11];
+ u8 checksum; /* Modulo 256 checksum must give 0 */
+ struct irq_info slots[0];
+} __attribute__((packed));
+
+extern unsigned int pcibios_irq_mask;
+
+extern raw_spinlock_t pci_config_lock;
+
+extern int (*pcibios_enable_irq)(struct pci_dev *dev);
+extern void (*pcibios_disable_irq)(struct pci_dev *dev);
+
+extern bool mp_should_keep_irq(struct device *dev);
+
+struct pci_raw_ops {
+ int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
+ int reg, int len, u32 *val);
+ int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
+ int reg, int len, u32 val);
+};
+
+extern const struct pci_raw_ops *raw_pci_ops;
+extern const struct pci_raw_ops *raw_pci_ext_ops;
+
+extern const struct pci_raw_ops pci_mmcfg;
+extern const struct pci_raw_ops pci_direct_conf1;
+extern bool port_cf9_safe;
+
+/* arch_initcall level */
+extern int pci_direct_probe(void);
+extern void pci_direct_init(int type);
+extern void pci_pcbios_init(void);
+extern void __init dmi_check_pciprobe(void);
+extern void __init dmi_check_skip_isa_align(void);
+
+/* some common used subsys_initcalls */
+extern int __init pci_acpi_init(void);
+extern void __init pcibios_irq_init(void);
+extern int __init pcibios_init(void);
+extern int pci_legacy_init(void);
+extern void pcibios_fixup_irqs(void);
+
+/* pci-mmconfig.c */
+
+/* "PCI MMCONFIG %04x [bus %02x-%02x]" */
+#define PCI_MMCFG_RESOURCE_NAME_LEN (22 + 4 + 2 + 2)
+
+struct pci_mmcfg_region {
+ struct list_head list;
+ struct resource res;
+ u64 address;
+ char __iomem *virt;
+ u16 segment;
+ u8 start_bus;
+ u8 end_bus;
+ char name[PCI_MMCFG_RESOURCE_NAME_LEN];
+};
+
+extern int __init pci_mmcfg_arch_init(void);
+extern void __init pci_mmcfg_arch_free(void);
+extern int pci_mmcfg_arch_map(struct pci_mmcfg_region *cfg);
+extern void pci_mmcfg_arch_unmap(struct pci_mmcfg_region *cfg);
+extern int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end,
+ phys_addr_t addr);
+extern int pci_mmconfig_delete(u16 seg, u8 start, u8 end);
+extern struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus);
+
+extern struct list_head pci_mmcfg_list;
+
+#define PCI_MMCFG_BUS_OFFSET(bus) ((bus) << 20)
+
+/*
+ * AMD Fam10h CPUs are buggy, and cannot access MMIO config space
+ * on their northbrige except through the * %eax register. As such, you MUST
+ * NOT use normal IOMEM accesses, you need to only use the magic mmio-config
+ * accessor functions.
+ * In fact just use pci_config_*, nothing else please.
+ */
+static inline unsigned char mmio_config_readb(void __iomem *pos)
+{
+ u8 val;
+ asm volatile("movb (%1),%%al" : "=a" (val) : "r" (pos));
+ return val;
+}
+
+static inline unsigned short mmio_config_readw(void __iomem *pos)
+{
+ u16 val;
+ asm volatile("movw (%1),%%ax" : "=a" (val) : "r" (pos));
+ return val;
+}
+
+static inline unsigned int mmio_config_readl(void __iomem *pos)
+{
+ u32 val;
+ asm volatile("movl (%1),%%eax" : "=a" (val) : "r" (pos));
+ return val;
+}
+
+static inline void mmio_config_writeb(void __iomem *pos, u8 val)
+{
+ asm volatile("movb %%al,(%1)" : : "a" (val), "r" (pos) : "memory");
+}
+
+static inline void mmio_config_writew(void __iomem *pos, u16 val)
+{
+ asm volatile("movw %%ax,(%1)" : : "a" (val), "r" (pos) : "memory");
+}
+
+static inline void mmio_config_writel(void __iomem *pos, u32 val)
+{
+ asm volatile("movl %%eax,(%1)" : : "a" (val), "r" (pos) : "memory");
+}
+
+#ifdef CONFIG_PCI
+# ifdef CONFIG_ACPI
+# define x86_default_pci_init pci_acpi_init
+# else
+# define x86_default_pci_init pci_legacy_init
+# endif
+# define x86_default_pci_init_irq pcibios_irq_init
+# define x86_default_pci_fixup_irqs pcibios_fixup_irqs
+#else
+# define x86_default_pci_init NULL
+# define x86_default_pci_init_irq NULL
+# define x86_default_pci_fixup_irqs NULL
+#endif
diff --git a/kernel/arch/x86/include/asm/percpu.h b/kernel/arch/x86/include/asm/percpu.h
new file mode 100644
index 000000000..e0ba66ca6
--- /dev/null
+++ b/kernel/arch/x86/include/asm/percpu.h
@@ -0,0 +1,620 @@
+#ifndef _ASM_X86_PERCPU_H
+#define _ASM_X86_PERCPU_H
+
+#ifdef CONFIG_X86_64
+#define __percpu_seg gs
+#define __percpu_mov_op movq
+#else
+#define __percpu_seg fs
+#define __percpu_mov_op movl
+#endif
+
+#ifdef __ASSEMBLY__
+
+/*
+ * PER_CPU finds an address of a per-cpu variable.
+ *
+ * Args:
+ * var - variable name
+ * reg - 32bit register
+ *
+ * The resulting address is stored in the "reg" argument.
+ *
+ * Example:
+ * PER_CPU(cpu_gdt_descr, %ebx)
+ */
+#ifdef CONFIG_SMP
+#define PER_CPU(var, reg) \
+ __percpu_mov_op %__percpu_seg:this_cpu_off, reg; \
+ lea var(reg), reg
+#define PER_CPU_VAR(var) %__percpu_seg:var
+#else /* ! SMP */
+#define PER_CPU(var, reg) __percpu_mov_op $var, reg
+#define PER_CPU_VAR(var) var
+#endif /* SMP */
+
+#ifdef CONFIG_X86_64_SMP
+#define INIT_PER_CPU_VAR(var) init_per_cpu__##var
+#else
+#define INIT_PER_CPU_VAR(var) var
+#endif
+
+#else /* ...!ASSEMBLY */
+
+#include <linux/kernel.h>
+#include <linux/stringify.h>
+
+#ifdef CONFIG_SMP
+#define __percpu_prefix "%%"__stringify(__percpu_seg)":"
+#define __my_cpu_offset this_cpu_read(this_cpu_off)
+
+/*
+ * Compared to the generic __my_cpu_offset version, the following
+ * saves one instruction and avoids clobbering a temp register.
+ */
+#define arch_raw_cpu_ptr(ptr) \
+({ \
+ unsigned long tcp_ptr__; \
+ asm volatile("add " __percpu_arg(1) ", %0" \
+ : "=r" (tcp_ptr__) \
+ : "m" (this_cpu_off), "0" (ptr)); \
+ (typeof(*(ptr)) __kernel __force *)tcp_ptr__; \
+})
+#else
+#define __percpu_prefix ""
+#endif
+
+#define __percpu_arg(x) __percpu_prefix "%" #x
+
+/*
+ * Initialized pointers to per-cpu variables needed for the boot
+ * processor need to use these macros to get the proper address
+ * offset from __per_cpu_load on SMP.
+ *
+ * There also must be an entry in vmlinux_64.lds.S
+ */
+#define DECLARE_INIT_PER_CPU(var) \
+ extern typeof(var) init_per_cpu_var(var)
+
+#ifdef CONFIG_X86_64_SMP
+#define init_per_cpu_var(var) init_per_cpu__##var
+#else
+#define init_per_cpu_var(var) var
+#endif
+
+/* For arch-specific code, we can use direct single-insn ops (they
+ * don't give an lvalue though). */
+extern void __bad_percpu_size(void);
+
+#define percpu_to_op(op, var, val) \
+do { \
+ typedef typeof(var) pto_T__; \
+ if (0) { \
+ pto_T__ pto_tmp__; \
+ pto_tmp__ = (val); \
+ (void)pto_tmp__; \
+ } \
+ switch (sizeof(var)) { \
+ case 1: \
+ asm(op "b %1,"__percpu_arg(0) \
+ : "+m" (var) \
+ : "qi" ((pto_T__)(val))); \
+ break; \
+ case 2: \
+ asm(op "w %1,"__percpu_arg(0) \
+ : "+m" (var) \
+ : "ri" ((pto_T__)(val))); \
+ break; \
+ case 4: \
+ asm(op "l %1,"__percpu_arg(0) \
+ : "+m" (var) \
+ : "ri" ((pto_T__)(val))); \
+ break; \
+ case 8: \
+ asm(op "q %1,"__percpu_arg(0) \
+ : "+m" (var) \
+ : "re" ((pto_T__)(val))); \
+ break; \
+ default: __bad_percpu_size(); \
+ } \
+} while (0)
+
+/*
+ * Generate a percpu add to memory instruction and optimize code
+ * if one is added or subtracted.
+ */
+#define percpu_add_op(var, val) \
+do { \
+ typedef typeof(var) pao_T__; \
+ const int pao_ID__ = (__builtin_constant_p(val) && \
+ ((val) == 1 || (val) == -1)) ? \
+ (int)(val) : 0; \
+ if (0) { \
+ pao_T__ pao_tmp__; \
+ pao_tmp__ = (val); \
+ (void)pao_tmp__; \
+ } \
+ switch (sizeof(var)) { \
+ case 1: \
+ if (pao_ID__ == 1) \
+ asm("incb "__percpu_arg(0) : "+m" (var)); \
+ else if (pao_ID__ == -1) \
+ asm("decb "__percpu_arg(0) : "+m" (var)); \
+ else \
+ asm("addb %1, "__percpu_arg(0) \
+ : "+m" (var) \
+ : "qi" ((pao_T__)(val))); \
+ break; \
+ case 2: \
+ if (pao_ID__ == 1) \
+ asm("incw "__percpu_arg(0) : "+m" (var)); \
+ else if (pao_ID__ == -1) \
+ asm("decw "__percpu_arg(0) : "+m" (var)); \
+ else \
+ asm("addw %1, "__percpu_arg(0) \
+ : "+m" (var) \
+ : "ri" ((pao_T__)(val))); \
+ break; \
+ case 4: \
+ if (pao_ID__ == 1) \
+ asm("incl "__percpu_arg(0) : "+m" (var)); \
+ else if (pao_ID__ == -1) \
+ asm("decl "__percpu_arg(0) : "+m" (var)); \
+ else \
+ asm("addl %1, "__percpu_arg(0) \
+ : "+m" (var) \
+ : "ri" ((pao_T__)(val))); \
+ break; \
+ case 8: \
+ if (pao_ID__ == 1) \
+ asm("incq "__percpu_arg(0) : "+m" (var)); \
+ else if (pao_ID__ == -1) \
+ asm("decq "__percpu_arg(0) : "+m" (var)); \
+ else \
+ asm("addq %1, "__percpu_arg(0) \
+ : "+m" (var) \
+ : "re" ((pao_T__)(val))); \
+ break; \
+ default: __bad_percpu_size(); \
+ } \
+} while (0)
+
+#define percpu_from_op(op, var) \
+({ \
+ typeof(var) pfo_ret__; \
+ switch (sizeof(var)) { \
+ case 1: \
+ asm(op "b "__percpu_arg(1)",%0" \
+ : "=q" (pfo_ret__) \
+ : "m" (var)); \
+ break; \
+ case 2: \
+ asm(op "w "__percpu_arg(1)",%0" \
+ : "=r" (pfo_ret__) \
+ : "m" (var)); \
+ break; \
+ case 4: \
+ asm(op "l "__percpu_arg(1)",%0" \
+ : "=r" (pfo_ret__) \
+ : "m" (var)); \
+ break; \
+ case 8: \
+ asm(op "q "__percpu_arg(1)",%0" \
+ : "=r" (pfo_ret__) \
+ : "m" (var)); \
+ break; \
+ default: __bad_percpu_size(); \
+ } \
+ pfo_ret__; \
+})
+
+#define percpu_stable_op(op, var) \
+({ \
+ typeof(var) pfo_ret__; \
+ switch (sizeof(var)) { \
+ case 1: \
+ asm(op "b "__percpu_arg(P1)",%0" \
+ : "=q" (pfo_ret__) \
+ : "p" (&(var))); \
+ break; \
+ case 2: \
+ asm(op "w "__percpu_arg(P1)",%0" \
+ : "=r" (pfo_ret__) \
+ : "p" (&(var))); \
+ break; \
+ case 4: \
+ asm(op "l "__percpu_arg(P1)",%0" \
+ : "=r" (pfo_ret__) \
+ : "p" (&(var))); \
+ break; \
+ case 8: \
+ asm(op "q "__percpu_arg(P1)",%0" \
+ : "=r" (pfo_ret__) \
+ : "p" (&(var))); \
+ break; \
+ default: __bad_percpu_size(); \
+ } \
+ pfo_ret__; \
+})
+
+#define percpu_unary_op(op, var) \
+({ \
+ switch (sizeof(var)) { \
+ case 1: \
+ asm(op "b "__percpu_arg(0) \
+ : "+m" (var)); \
+ break; \
+ case 2: \
+ asm(op "w "__percpu_arg(0) \
+ : "+m" (var)); \
+ break; \
+ case 4: \
+ asm(op "l "__percpu_arg(0) \
+ : "+m" (var)); \
+ break; \
+ case 8: \
+ asm(op "q "__percpu_arg(0) \
+ : "+m" (var)); \
+ break; \
+ default: __bad_percpu_size(); \
+ } \
+})
+
+/*
+ * Add return operation
+ */
+#define percpu_add_return_op(var, val) \
+({ \
+ typeof(var) paro_ret__ = val; \
+ switch (sizeof(var)) { \
+ case 1: \
+ asm("xaddb %0, "__percpu_arg(1) \
+ : "+q" (paro_ret__), "+m" (var) \
+ : : "memory"); \
+ break; \
+ case 2: \
+ asm("xaddw %0, "__percpu_arg(1) \
+ : "+r" (paro_ret__), "+m" (var) \
+ : : "memory"); \
+ break; \
+ case 4: \
+ asm("xaddl %0, "__percpu_arg(1) \
+ : "+r" (paro_ret__), "+m" (var) \
+ : : "memory"); \
+ break; \
+ case 8: \
+ asm("xaddq %0, "__percpu_arg(1) \
+ : "+re" (paro_ret__), "+m" (var) \
+ : : "memory"); \
+ break; \
+ default: __bad_percpu_size(); \
+ } \
+ paro_ret__ += val; \
+ paro_ret__; \
+})
+
+/*
+ * xchg is implemented using cmpxchg without a lock prefix. xchg is
+ * expensive due to the implied lock prefix. The processor cannot prefetch
+ * cachelines if xchg is used.
+ */
+#define percpu_xchg_op(var, nval) \
+({ \
+ typeof(var) pxo_ret__; \
+ typeof(var) pxo_new__ = (nval); \
+ switch (sizeof(var)) { \
+ case 1: \
+ asm("\n\tmov "__percpu_arg(1)",%%al" \
+ "\n1:\tcmpxchgb %2, "__percpu_arg(1) \
+ "\n\tjnz 1b" \
+ : "=&a" (pxo_ret__), "+m" (var) \
+ : "q" (pxo_new__) \
+ : "memory"); \
+ break; \
+ case 2: \
+ asm("\n\tmov "__percpu_arg(1)",%%ax" \
+ "\n1:\tcmpxchgw %2, "__percpu_arg(1) \
+ "\n\tjnz 1b" \
+ : "=&a" (pxo_ret__), "+m" (var) \
+ : "r" (pxo_new__) \
+ : "memory"); \
+ break; \
+ case 4: \
+ asm("\n\tmov "__percpu_arg(1)",%%eax" \
+ "\n1:\tcmpxchgl %2, "__percpu_arg(1) \
+ "\n\tjnz 1b" \
+ : "=&a" (pxo_ret__), "+m" (var) \
+ : "r" (pxo_new__) \
+ : "memory"); \
+ break; \
+ case 8: \
+ asm("\n\tmov "__percpu_arg(1)",%%rax" \
+ "\n1:\tcmpxchgq %2, "__percpu_arg(1) \
+ "\n\tjnz 1b" \
+ : "=&a" (pxo_ret__), "+m" (var) \
+ : "r" (pxo_new__) \
+ : "memory"); \
+ break; \
+ default: __bad_percpu_size(); \
+ } \
+ pxo_ret__; \
+})
+
+/*
+ * cmpxchg has no such implied lock semantics as a result it is much
+ * more efficient for cpu local operations.
+ */
+#define percpu_cmpxchg_op(var, oval, nval) \
+({ \
+ typeof(var) pco_ret__; \
+ typeof(var) pco_old__ = (oval); \
+ typeof(var) pco_new__ = (nval); \
+ switch (sizeof(var)) { \
+ case 1: \
+ asm("cmpxchgb %2, "__percpu_arg(1) \
+ : "=a" (pco_ret__), "+m" (var) \
+ : "q" (pco_new__), "0" (pco_old__) \
+ : "memory"); \
+ break; \
+ case 2: \
+ asm("cmpxchgw %2, "__percpu_arg(1) \
+ : "=a" (pco_ret__), "+m" (var) \
+ : "r" (pco_new__), "0" (pco_old__) \
+ : "memory"); \
+ break; \
+ case 4: \
+ asm("cmpxchgl %2, "__percpu_arg(1) \
+ : "=a" (pco_ret__), "+m" (var) \
+ : "r" (pco_new__), "0" (pco_old__) \
+ : "memory"); \
+ break; \
+ case 8: \
+ asm("cmpxchgq %2, "__percpu_arg(1) \
+ : "=a" (pco_ret__), "+m" (var) \
+ : "r" (pco_new__), "0" (pco_old__) \
+ : "memory"); \
+ break; \
+ default: __bad_percpu_size(); \
+ } \
+ pco_ret__; \
+})
+
+/*
+ * this_cpu_read() makes gcc load the percpu variable every time it is
+ * accessed while this_cpu_read_stable() allows the value to be cached.
+ * this_cpu_read_stable() is more efficient and can be used if its value
+ * is guaranteed to be valid across cpus. The current users include
+ * get_current() and get_thread_info() both of which are actually
+ * per-thread variables implemented as per-cpu variables and thus
+ * stable for the duration of the respective task.
+ */
+#define this_cpu_read_stable(var) percpu_stable_op("mov", var)
+
+#define raw_cpu_read_1(pcp) percpu_from_op("mov", pcp)
+#define raw_cpu_read_2(pcp) percpu_from_op("mov", pcp)
+#define raw_cpu_read_4(pcp) percpu_from_op("mov", pcp)
+
+#define raw_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
+#define raw_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
+#define raw_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
+#define raw_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
+#define raw_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
+#define raw_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
+#define raw_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
+#define raw_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
+#define raw_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
+#define raw_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
+#define raw_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
+#define raw_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
+#define raw_cpu_xchg_1(pcp, val) percpu_xchg_op(pcp, val)
+#define raw_cpu_xchg_2(pcp, val) percpu_xchg_op(pcp, val)
+#define raw_cpu_xchg_4(pcp, val) percpu_xchg_op(pcp, val)
+
+#define this_cpu_read_1(pcp) percpu_from_op("mov", pcp)
+#define this_cpu_read_2(pcp) percpu_from_op("mov", pcp)
+#define this_cpu_read_4(pcp) percpu_from_op("mov", pcp)
+#define this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
+#define this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
+#define this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
+#define this_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
+#define this_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
+#define this_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
+#define this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
+#define this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
+#define this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
+#define this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
+#define this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
+#define this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
+#define this_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval)
+#define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval)
+#define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval)
+
+#define raw_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
+#define raw_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
+#define raw_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
+#define raw_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
+#define raw_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
+#define raw_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
+
+#define this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
+#define this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
+#define this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
+#define this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
+#define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
+#define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
+
+#ifdef CONFIG_X86_CMPXCHG64
+#define percpu_cmpxchg8b_double(pcp1, pcp2, o1, o2, n1, n2) \
+({ \
+ bool __ret; \
+ typeof(pcp1) __o1 = (o1), __n1 = (n1); \
+ typeof(pcp2) __o2 = (o2), __n2 = (n2); \
+ asm volatile("cmpxchg8b "__percpu_arg(1)"\n\tsetz %0\n\t" \
+ : "=a" (__ret), "+m" (pcp1), "+m" (pcp2), "+d" (__o2) \
+ : "b" (__n1), "c" (__n2), "a" (__o1)); \
+ __ret; \
+})
+
+#define raw_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double
+#define this_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double
+#endif /* CONFIG_X86_CMPXCHG64 */
+
+/*
+ * Per cpu atomic 64 bit operations are only available under 64 bit.
+ * 32 bit must fall back to generic operations.
+ */
+#ifdef CONFIG_X86_64
+#define raw_cpu_read_8(pcp) percpu_from_op("mov", pcp)
+#define raw_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
+#define raw_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
+#define raw_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
+#define raw_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
+#define raw_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
+#define raw_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
+#define raw_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
+
+#define this_cpu_read_8(pcp) percpu_from_op("mov", pcp)
+#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
+#define this_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
+#define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
+#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
+#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
+#define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
+#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
+
+/*
+ * Pretty complex macro to generate cmpxchg16 instruction. The instruction
+ * is not supported on early AMD64 processors so we must be able to emulate
+ * it in software. The address used in the cmpxchg16 instruction must be
+ * aligned to a 16 byte boundary.
+ */
+#define percpu_cmpxchg16b_double(pcp1, pcp2, o1, o2, n1, n2) \
+({ \
+ bool __ret; \
+ typeof(pcp1) __o1 = (o1), __n1 = (n1); \
+ typeof(pcp2) __o2 = (o2), __n2 = (n2); \
+ alternative_io("leaq %P1,%%rsi\n\tcall this_cpu_cmpxchg16b_emu\n\t", \
+ "cmpxchg16b " __percpu_arg(1) "\n\tsetz %0\n\t", \
+ X86_FEATURE_CX16, \
+ ASM_OUTPUT2("=a" (__ret), "+m" (pcp1), \
+ "+m" (pcp2), "+d" (__o2)), \
+ "b" (__n1), "c" (__n2), "a" (__o1) : "rsi"); \
+ __ret; \
+})
+
+#define raw_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double
+#define this_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double
+
+#endif
+
+/* This is not atomic against other CPUs -- CPU preemption needs to be off */
+#define x86_test_and_clear_bit_percpu(bit, var) \
+({ \
+ int old__; \
+ asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \
+ : "=r" (old__), "+m" (var) \
+ : "dIr" (bit)); \
+ old__; \
+})
+
+static __always_inline int x86_this_cpu_constant_test_bit(unsigned int nr,
+ const unsigned long __percpu *addr)
+{
+ unsigned long __percpu *a = (unsigned long *)addr + nr / BITS_PER_LONG;
+
+#ifdef CONFIG_X86_64
+ return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_8(*a)) != 0;
+#else
+ return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_4(*a)) != 0;
+#endif
+}
+
+static inline int x86_this_cpu_variable_test_bit(int nr,
+ const unsigned long __percpu *addr)
+{
+ int oldbit;
+
+ asm volatile("bt "__percpu_arg(2)",%1\n\t"
+ "sbb %0,%0"
+ : "=r" (oldbit)
+ : "m" (*(unsigned long *)addr), "Ir" (nr));
+
+ return oldbit;
+}
+
+#define x86_this_cpu_test_bit(nr, addr) \
+ (__builtin_constant_p((nr)) \
+ ? x86_this_cpu_constant_test_bit((nr), (addr)) \
+ : x86_this_cpu_variable_test_bit((nr), (addr)))
+
+
+#include <asm-generic/percpu.h>
+
+/* We can use this directly for local CPU (faster). */
+DECLARE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off);
+
+#endif /* !__ASSEMBLY__ */
+
+#ifdef CONFIG_SMP
+
+/*
+ * Define the "EARLY_PER_CPU" macros. These are used for some per_cpu
+ * variables that are initialized and accessed before there are per_cpu
+ * areas allocated.
+ */
+
+#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
+ DEFINE_PER_CPU(_type, _name) = _initvalue; \
+ __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \
+ { [0 ... NR_CPUS-1] = _initvalue }; \
+ __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
+
+#define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \
+ DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue; \
+ __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \
+ { [0 ... NR_CPUS-1] = _initvalue }; \
+ __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
+
+#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
+ EXPORT_PER_CPU_SYMBOL(_name)
+
+#define DECLARE_EARLY_PER_CPU(_type, _name) \
+ DECLARE_PER_CPU(_type, _name); \
+ extern __typeof__(_type) *_name##_early_ptr; \
+ extern __typeof__(_type) _name##_early_map[]
+
+#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \
+ DECLARE_PER_CPU_READ_MOSTLY(_type, _name); \
+ extern __typeof__(_type) *_name##_early_ptr; \
+ extern __typeof__(_type) _name##_early_map[]
+
+#define early_per_cpu_ptr(_name) (_name##_early_ptr)
+#define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
+#define early_per_cpu(_name, _cpu) \
+ *(early_per_cpu_ptr(_name) ? \
+ &early_per_cpu_ptr(_name)[_cpu] : \
+ &per_cpu(_name, _cpu))
+
+#else /* !CONFIG_SMP */
+#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
+ DEFINE_PER_CPU(_type, _name) = _initvalue
+
+#define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \
+ DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue
+
+#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
+ EXPORT_PER_CPU_SYMBOL(_name)
+
+#define DECLARE_EARLY_PER_CPU(_type, _name) \
+ DECLARE_PER_CPU(_type, _name)
+
+#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \
+ DECLARE_PER_CPU_READ_MOSTLY(_type, _name)
+
+#define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu)
+#define early_per_cpu_ptr(_name) NULL
+/* no early_per_cpu_map() */
+
+#endif /* !CONFIG_SMP */
+
+#endif /* _ASM_X86_PERCPU_H */
diff --git a/kernel/arch/x86/include/asm/perf_event.h b/kernel/arch/x86/include/asm/perf_event.h
new file mode 100644
index 000000000..dc0f6ed35
--- /dev/null
+++ b/kernel/arch/x86/include/asm/perf_event.h
@@ -0,0 +1,290 @@
+#ifndef _ASM_X86_PERF_EVENT_H
+#define _ASM_X86_PERF_EVENT_H
+
+/*
+ * Performance event hw details:
+ */
+
+#define INTEL_PMC_MAX_GENERIC 32
+#define INTEL_PMC_MAX_FIXED 3
+#define INTEL_PMC_IDX_FIXED 32
+
+#define X86_PMC_IDX_MAX 64
+
+#define MSR_ARCH_PERFMON_PERFCTR0 0xc1
+#define MSR_ARCH_PERFMON_PERFCTR1 0xc2
+
+#define MSR_ARCH_PERFMON_EVENTSEL0 0x186
+#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
+
+#define ARCH_PERFMON_EVENTSEL_EVENT 0x000000FFULL
+#define ARCH_PERFMON_EVENTSEL_UMASK 0x0000FF00ULL
+#define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16)
+#define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
+#define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18)
+#define ARCH_PERFMON_EVENTSEL_PIN_CONTROL (1ULL << 19)
+#define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20)
+#define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21)
+#define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
+#define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
+#define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
+
+#define HSW_IN_TX (1ULL << 32)
+#define HSW_IN_TX_CHECKPOINTED (1ULL << 33)
+
+#define AMD64_EVENTSEL_INT_CORE_ENABLE (1ULL << 36)
+#define AMD64_EVENTSEL_GUESTONLY (1ULL << 40)
+#define AMD64_EVENTSEL_HOSTONLY (1ULL << 41)
+
+#define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT 37
+#define AMD64_EVENTSEL_INT_CORE_SEL_MASK \
+ (0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT)
+
+#define AMD64_EVENTSEL_EVENT \
+ (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
+#define INTEL_ARCH_EVENT_MASK \
+ (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
+
+#define X86_RAW_EVENT_MASK \
+ (ARCH_PERFMON_EVENTSEL_EVENT | \
+ ARCH_PERFMON_EVENTSEL_UMASK | \
+ ARCH_PERFMON_EVENTSEL_EDGE | \
+ ARCH_PERFMON_EVENTSEL_INV | \
+ ARCH_PERFMON_EVENTSEL_CMASK)
+#define X86_ALL_EVENT_FLAGS \
+ (ARCH_PERFMON_EVENTSEL_EDGE | \
+ ARCH_PERFMON_EVENTSEL_INV | \
+ ARCH_PERFMON_EVENTSEL_CMASK | \
+ ARCH_PERFMON_EVENTSEL_ANY | \
+ ARCH_PERFMON_EVENTSEL_PIN_CONTROL | \
+ HSW_IN_TX | \
+ HSW_IN_TX_CHECKPOINTED)
+#define AMD64_RAW_EVENT_MASK \
+ (X86_RAW_EVENT_MASK | \
+ AMD64_EVENTSEL_EVENT)
+#define AMD64_RAW_EVENT_MASK_NB \
+ (AMD64_EVENTSEL_EVENT | \
+ ARCH_PERFMON_EVENTSEL_UMASK)
+#define AMD64_NUM_COUNTERS 4
+#define AMD64_NUM_COUNTERS_CORE 6
+#define AMD64_NUM_COUNTERS_NB 4
+
+#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
+#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
+#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0
+#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
+ (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
+
+#define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6
+#define ARCH_PERFMON_EVENTS_COUNT 7
+
+/*
+ * Intel "Architectural Performance Monitoring" CPUID
+ * detection/enumeration details:
+ */
+union cpuid10_eax {
+ struct {
+ unsigned int version_id:8;
+ unsigned int num_counters:8;
+ unsigned int bit_width:8;
+ unsigned int mask_length:8;
+ } split;
+ unsigned int full;
+};
+
+union cpuid10_ebx {
+ struct {
+ unsigned int no_unhalted_core_cycles:1;
+ unsigned int no_instructions_retired:1;
+ unsigned int no_unhalted_reference_cycles:1;
+ unsigned int no_llc_reference:1;
+ unsigned int no_llc_misses:1;
+ unsigned int no_branch_instruction_retired:1;
+ unsigned int no_branch_misses_retired:1;
+ } split;
+ unsigned int full;
+};
+
+union cpuid10_edx {
+ struct {
+ unsigned int num_counters_fixed:5;
+ unsigned int bit_width_fixed:8;
+ unsigned int reserved:19;
+ } split;
+ unsigned int full;
+};
+
+struct x86_pmu_capability {
+ int version;
+ int num_counters_gp;
+ int num_counters_fixed;
+ int bit_width_gp;
+ int bit_width_fixed;
+ unsigned int events_mask;
+ int events_mask_len;
+};
+
+/*
+ * Fixed-purpose performance events:
+ */
+
+/*
+ * All 3 fixed-mode PMCs are configured via this single MSR:
+ */
+#define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d
+
+/*
+ * The counts are available in three separate MSRs:
+ */
+
+/* Instr_Retired.Any: */
+#define MSR_ARCH_PERFMON_FIXED_CTR0 0x309
+#define INTEL_PMC_IDX_FIXED_INSTRUCTIONS (INTEL_PMC_IDX_FIXED + 0)
+
+/* CPU_CLK_Unhalted.Core: */
+#define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a
+#define INTEL_PMC_IDX_FIXED_CPU_CYCLES (INTEL_PMC_IDX_FIXED + 1)
+
+/* CPU_CLK_Unhalted.Ref: */
+#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
+#define INTEL_PMC_IDX_FIXED_REF_CYCLES (INTEL_PMC_IDX_FIXED + 2)
+#define INTEL_PMC_MSK_FIXED_REF_CYCLES (1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)
+
+/*
+ * We model BTS tracing as another fixed-mode PMC.
+ *
+ * We choose a value in the middle of the fixed event range, since lower
+ * values are used by actual fixed events and higher values are used
+ * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
+ */
+#define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 16)
+
+/*
+ * IBS cpuid feature detection
+ */
+
+#define IBS_CPUID_FEATURES 0x8000001b
+
+/*
+ * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
+ * bit 0 is used to indicate the existence of IBS.
+ */
+#define IBS_CAPS_AVAIL (1U<<0)
+#define IBS_CAPS_FETCHSAM (1U<<1)
+#define IBS_CAPS_OPSAM (1U<<2)
+#define IBS_CAPS_RDWROPCNT (1U<<3)
+#define IBS_CAPS_OPCNT (1U<<4)
+#define IBS_CAPS_BRNTRGT (1U<<5)
+#define IBS_CAPS_OPCNTEXT (1U<<6)
+#define IBS_CAPS_RIPINVALIDCHK (1U<<7)
+#define IBS_CAPS_OPBRNFUSE (1U<<8)
+#define IBS_CAPS_FETCHCTLEXTD (1U<<9)
+#define IBS_CAPS_OPDATA4 (1U<<10)
+
+#define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \
+ | IBS_CAPS_FETCHSAM \
+ | IBS_CAPS_OPSAM)
+
+/*
+ * IBS APIC setup
+ */
+#define IBSCTL 0x1cc
+#define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
+#define IBSCTL_LVT_OFFSET_MASK 0x0F
+
+/* ibs fetch bits/masks */
+#define IBS_FETCH_RAND_EN (1ULL<<57)
+#define IBS_FETCH_VAL (1ULL<<49)
+#define IBS_FETCH_ENABLE (1ULL<<48)
+#define IBS_FETCH_CNT 0xFFFF0000ULL
+#define IBS_FETCH_MAX_CNT 0x0000FFFFULL
+
+/* ibs op bits/masks */
+/* lower 4 bits of the current count are ignored: */
+#define IBS_OP_CUR_CNT (0xFFFF0ULL<<32)
+#define IBS_OP_CNT_CTL (1ULL<<19)
+#define IBS_OP_VAL (1ULL<<18)
+#define IBS_OP_ENABLE (1ULL<<17)
+#define IBS_OP_MAX_CNT 0x0000FFFFULL
+#define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */
+#define IBS_RIP_INVALID (1ULL<<38)
+
+#ifdef CONFIG_X86_LOCAL_APIC
+extern u32 get_ibs_caps(void);
+#else
+static inline u32 get_ibs_caps(void) { return 0; }
+#endif
+
+#ifdef CONFIG_PERF_EVENTS
+extern void perf_events_lapic_init(void);
+
+/*
+ * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise
+ * unused and ABI specified to be 0, so nobody should care what we do with
+ * them.
+ *
+ * EXACT - the IP points to the exact instruction that triggered the
+ * event (HW bugs exempt).
+ * VM - original X86_VM_MASK; see set_linear_ip().
+ */
+#define PERF_EFLAGS_EXACT (1UL << 3)
+#define PERF_EFLAGS_VM (1UL << 5)
+
+struct pt_regs;
+extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
+extern unsigned long perf_misc_flags(struct pt_regs *regs);
+#define perf_misc_flags(regs) perf_misc_flags(regs)
+
+#include <asm/stacktrace.h>
+
+/*
+ * We abuse bit 3 from flags to pass exact information, see perf_misc_flags
+ * and the comment with PERF_EFLAGS_EXACT.
+ */
+#define perf_arch_fetch_caller_regs(regs, __ip) { \
+ (regs)->ip = (__ip); \
+ (regs)->bp = caller_frame_pointer(); \
+ (regs)->cs = __KERNEL_CS; \
+ regs->flags = 0; \
+ asm volatile( \
+ _ASM_MOV "%%"_ASM_SP ", %0\n" \
+ : "=m" ((regs)->sp) \
+ :: "memory" \
+ ); \
+}
+
+struct perf_guest_switch_msr {
+ unsigned msr;
+ u64 host, guest;
+};
+
+extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
+extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
+extern void perf_check_microcode(void);
+#else
+static inline struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
+{
+ *nr = 0;
+ return NULL;
+}
+
+static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
+{
+ memset(cap, 0, sizeof(*cap));
+}
+
+static inline void perf_events_lapic_init(void) { }
+static inline void perf_check_microcode(void) { }
+#endif
+
+#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
+ extern void amd_pmu_enable_virt(void);
+ extern void amd_pmu_disable_virt(void);
+#else
+ static inline void amd_pmu_enable_virt(void) { }
+ static inline void amd_pmu_disable_virt(void) { }
+#endif
+
+#define arch_perf_out_copy_user copy_from_user_nmi
+
+#endif /* _ASM_X86_PERF_EVENT_H */
diff --git a/kernel/arch/x86/include/asm/perf_event_p4.h b/kernel/arch/x86/include/asm/perf_event_p4.h
new file mode 100644
index 000000000..d725382c2
--- /dev/null
+++ b/kernel/arch/x86/include/asm/perf_event_p4.h
@@ -0,0 +1,876 @@
+/*
+ * Netburst Performance Events (P4, old Xeon)
+ */
+
+#ifndef PERF_EVENT_P4_H
+#define PERF_EVENT_P4_H
+
+#include <linux/cpu.h>
+#include <linux/bitops.h>
+
+/*
+ * NetBurst has performance MSRs shared between
+ * threads if HT is turned on, ie for both logical
+ * processors (mem: in turn in Atom with HT support
+ * perf-MSRs are not shared and every thread has its
+ * own perf-MSRs set)
+ */
+#define ARCH_P4_TOTAL_ESCR (46)
+#define ARCH_P4_RESERVED_ESCR (2) /* IQ_ESCR(0,1) not always present */
+#define ARCH_P4_MAX_ESCR (ARCH_P4_TOTAL_ESCR - ARCH_P4_RESERVED_ESCR)
+#define ARCH_P4_MAX_CCCR (18)
+
+#define ARCH_P4_CNTRVAL_BITS (40)
+#define ARCH_P4_CNTRVAL_MASK ((1ULL << ARCH_P4_CNTRVAL_BITS) - 1)
+#define ARCH_P4_UNFLAGGED_BIT ((1ULL) << (ARCH_P4_CNTRVAL_BITS - 1))
+
+#define P4_ESCR_EVENT_MASK 0x7e000000ULL
+#define P4_ESCR_EVENT_SHIFT 25
+#define P4_ESCR_EVENTMASK_MASK 0x01fffe00ULL
+#define P4_ESCR_EVENTMASK_SHIFT 9
+#define P4_ESCR_TAG_MASK 0x000001e0ULL
+#define P4_ESCR_TAG_SHIFT 5
+#define P4_ESCR_TAG_ENABLE 0x00000010ULL
+#define P4_ESCR_T0_OS 0x00000008ULL
+#define P4_ESCR_T0_USR 0x00000004ULL
+#define P4_ESCR_T1_OS 0x00000002ULL
+#define P4_ESCR_T1_USR 0x00000001ULL
+
+#define P4_ESCR_EVENT(v) ((v) << P4_ESCR_EVENT_SHIFT)
+#define P4_ESCR_EMASK(v) ((v) << P4_ESCR_EVENTMASK_SHIFT)
+#define P4_ESCR_TAG(v) ((v) << P4_ESCR_TAG_SHIFT)
+
+#define P4_CCCR_OVF 0x80000000ULL
+#define P4_CCCR_CASCADE 0x40000000ULL
+#define P4_CCCR_OVF_PMI_T0 0x04000000ULL
+#define P4_CCCR_OVF_PMI_T1 0x08000000ULL
+#define P4_CCCR_FORCE_OVF 0x02000000ULL
+#define P4_CCCR_EDGE 0x01000000ULL
+#define P4_CCCR_THRESHOLD_MASK 0x00f00000ULL
+#define P4_CCCR_THRESHOLD_SHIFT 20
+#define P4_CCCR_COMPLEMENT 0x00080000ULL
+#define P4_CCCR_COMPARE 0x00040000ULL
+#define P4_CCCR_ESCR_SELECT_MASK 0x0000e000ULL
+#define P4_CCCR_ESCR_SELECT_SHIFT 13
+#define P4_CCCR_ENABLE 0x00001000ULL
+#define P4_CCCR_THREAD_SINGLE 0x00010000ULL
+#define P4_CCCR_THREAD_BOTH 0x00020000ULL
+#define P4_CCCR_THREAD_ANY 0x00030000ULL
+#define P4_CCCR_RESERVED 0x00000fffULL
+
+#define P4_CCCR_THRESHOLD(v) ((v) << P4_CCCR_THRESHOLD_SHIFT)
+#define P4_CCCR_ESEL(v) ((v) << P4_CCCR_ESCR_SELECT_SHIFT)
+
+#define P4_GEN_ESCR_EMASK(class, name, bit) \
+ class##__##name = ((1ULL << bit) << P4_ESCR_EVENTMASK_SHIFT)
+#define P4_ESCR_EMASK_BIT(class, name) class##__##name
+
+/*
+ * config field is 64bit width and consists of
+ * HT << 63 | ESCR << 32 | CCCR
+ * where HT is HyperThreading bit (since ESCR
+ * has it reserved we may use it for own purpose)
+ *
+ * note that this is NOT the addresses of respective
+ * ESCR and CCCR but rather an only packed value should
+ * be unpacked and written to a proper addresses
+ *
+ * the base idea is to pack as much info as possible
+ */
+#define p4_config_pack_escr(v) (((u64)(v)) << 32)
+#define p4_config_pack_cccr(v) (((u64)(v)) & 0xffffffffULL)
+#define p4_config_unpack_escr(v) (((u64)(v)) >> 32)
+#define p4_config_unpack_cccr(v) (((u64)(v)) & 0xffffffffULL)
+
+#define p4_config_unpack_emask(v) \
+ ({ \
+ u32 t = p4_config_unpack_escr((v)); \
+ t = t & P4_ESCR_EVENTMASK_MASK; \
+ t = t >> P4_ESCR_EVENTMASK_SHIFT; \
+ t; \
+ })
+
+#define p4_config_unpack_event(v) \
+ ({ \
+ u32 t = p4_config_unpack_escr((v)); \
+ t = t & P4_ESCR_EVENT_MASK; \
+ t = t >> P4_ESCR_EVENT_SHIFT; \
+ t; \
+ })
+
+#define P4_CONFIG_HT_SHIFT 63
+#define P4_CONFIG_HT (1ULL << P4_CONFIG_HT_SHIFT)
+
+/*
+ * If an event has alias it should be marked
+ * with a special bit. (Don't forget to check
+ * P4_PEBS_CONFIG_MASK and related bits on
+ * modification.)
+ */
+#define P4_CONFIG_ALIASABLE (1ULL << 9)
+
+/*
+ * The bits we allow to pass for RAW events
+ */
+#define P4_CONFIG_MASK_ESCR \
+ P4_ESCR_EVENT_MASK | \
+ P4_ESCR_EVENTMASK_MASK | \
+ P4_ESCR_TAG_MASK | \
+ P4_ESCR_TAG_ENABLE
+
+#define P4_CONFIG_MASK_CCCR \
+ P4_CCCR_EDGE | \
+ P4_CCCR_THRESHOLD_MASK | \
+ P4_CCCR_COMPLEMENT | \
+ P4_CCCR_COMPARE | \
+ P4_CCCR_THREAD_ANY | \
+ P4_CCCR_RESERVED
+
+/* some dangerous bits are reserved for kernel internals */
+#define P4_CONFIG_MASK \
+ (p4_config_pack_escr(P4_CONFIG_MASK_ESCR)) | \
+ (p4_config_pack_cccr(P4_CONFIG_MASK_CCCR))
+
+/*
+ * In case of event aliasing we need to preserve some
+ * caller bits, otherwise the mapping won't be complete.
+ */
+#define P4_CONFIG_EVENT_ALIAS_MASK \
+ (p4_config_pack_escr(P4_CONFIG_MASK_ESCR) | \
+ p4_config_pack_cccr(P4_CCCR_EDGE | \
+ P4_CCCR_THRESHOLD_MASK | \
+ P4_CCCR_COMPLEMENT | \
+ P4_CCCR_COMPARE))
+
+#define P4_CONFIG_EVENT_ALIAS_IMMUTABLE_BITS \
+ ((P4_CONFIG_HT) | \
+ p4_config_pack_escr(P4_ESCR_T0_OS | \
+ P4_ESCR_T0_USR | \
+ P4_ESCR_T1_OS | \
+ P4_ESCR_T1_USR) | \
+ p4_config_pack_cccr(P4_CCCR_OVF | \
+ P4_CCCR_CASCADE | \
+ P4_CCCR_FORCE_OVF | \
+ P4_CCCR_THREAD_ANY | \
+ P4_CCCR_OVF_PMI_T0 | \
+ P4_CCCR_OVF_PMI_T1 | \
+ P4_CONFIG_ALIASABLE))
+
+static inline bool p4_is_event_cascaded(u64 config)
+{
+ u32 cccr = p4_config_unpack_cccr(config);
+ return !!(cccr & P4_CCCR_CASCADE);
+}
+
+static inline int p4_ht_config_thread(u64 config)
+{
+ return !!(config & P4_CONFIG_HT);
+}
+
+static inline u64 p4_set_ht_bit(u64 config)
+{
+ return config | P4_CONFIG_HT;
+}
+
+static inline u64 p4_clear_ht_bit(u64 config)
+{
+ return config & ~P4_CONFIG_HT;
+}
+
+static inline int p4_ht_active(void)
+{
+#ifdef CONFIG_SMP
+ return smp_num_siblings > 1;
+#endif
+ return 0;
+}
+
+static inline int p4_ht_thread(int cpu)
+{
+#ifdef CONFIG_SMP
+ if (smp_num_siblings == 2)
+ return cpu != cpumask_first(this_cpu_cpumask_var_ptr(cpu_sibling_map));
+#endif
+ return 0;
+}
+
+static inline int p4_should_swap_ts(u64 config, int cpu)
+{
+ return p4_ht_config_thread(config) ^ p4_ht_thread(cpu);
+}
+
+static inline u32 p4_default_cccr_conf(int cpu)
+{
+ /*
+ * Note that P4_CCCR_THREAD_ANY is "required" on
+ * non-HT machines (on HT machines we count TS events
+ * regardless the state of second logical processor
+ */
+ u32 cccr = P4_CCCR_THREAD_ANY;
+
+ if (!p4_ht_thread(cpu))
+ cccr |= P4_CCCR_OVF_PMI_T0;
+ else
+ cccr |= P4_CCCR_OVF_PMI_T1;
+
+ return cccr;
+}
+
+static inline u32 p4_default_escr_conf(int cpu, int exclude_os, int exclude_usr)
+{
+ u32 escr = 0;
+
+ if (!p4_ht_thread(cpu)) {
+ if (!exclude_os)
+ escr |= P4_ESCR_T0_OS;
+ if (!exclude_usr)
+ escr |= P4_ESCR_T0_USR;
+ } else {
+ if (!exclude_os)
+ escr |= P4_ESCR_T1_OS;
+ if (!exclude_usr)
+ escr |= P4_ESCR_T1_USR;
+ }
+
+ return escr;
+}
+
+/*
+ * This are the events which should be used in "Event Select"
+ * field of ESCR register, they are like unique keys which allow
+ * the kernel to determinate which CCCR and COUNTER should be
+ * used to track an event
+ */
+enum P4_EVENTS {
+ P4_EVENT_TC_DELIVER_MODE,
+ P4_EVENT_BPU_FETCH_REQUEST,
+ P4_EVENT_ITLB_REFERENCE,
+ P4_EVENT_MEMORY_CANCEL,
+ P4_EVENT_MEMORY_COMPLETE,
+ P4_EVENT_LOAD_PORT_REPLAY,
+ P4_EVENT_STORE_PORT_REPLAY,
+ P4_EVENT_MOB_LOAD_REPLAY,
+ P4_EVENT_PAGE_WALK_TYPE,
+ P4_EVENT_BSQ_CACHE_REFERENCE,
+ P4_EVENT_IOQ_ALLOCATION,
+ P4_EVENT_IOQ_ACTIVE_ENTRIES,
+ P4_EVENT_FSB_DATA_ACTIVITY,
+ P4_EVENT_BSQ_ALLOCATION,
+ P4_EVENT_BSQ_ACTIVE_ENTRIES,
+ P4_EVENT_SSE_INPUT_ASSIST,
+ P4_EVENT_PACKED_SP_UOP,
+ P4_EVENT_PACKED_DP_UOP,
+ P4_EVENT_SCALAR_SP_UOP,
+ P4_EVENT_SCALAR_DP_UOP,
+ P4_EVENT_64BIT_MMX_UOP,
+ P4_EVENT_128BIT_MMX_UOP,
+ P4_EVENT_X87_FP_UOP,
+ P4_EVENT_TC_MISC,
+ P4_EVENT_GLOBAL_POWER_EVENTS,
+ P4_EVENT_TC_MS_XFER,
+ P4_EVENT_UOP_QUEUE_WRITES,
+ P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE,
+ P4_EVENT_RETIRED_BRANCH_TYPE,
+ P4_EVENT_RESOURCE_STALL,
+ P4_EVENT_WC_BUFFER,
+ P4_EVENT_B2B_CYCLES,
+ P4_EVENT_BNR,
+ P4_EVENT_SNOOP,
+ P4_EVENT_RESPONSE,
+ P4_EVENT_FRONT_END_EVENT,
+ P4_EVENT_EXECUTION_EVENT,
+ P4_EVENT_REPLAY_EVENT,
+ P4_EVENT_INSTR_RETIRED,
+ P4_EVENT_UOPS_RETIRED,
+ P4_EVENT_UOP_TYPE,
+ P4_EVENT_BRANCH_RETIRED,
+ P4_EVENT_MISPRED_BRANCH_RETIRED,
+ P4_EVENT_X87_ASSIST,
+ P4_EVENT_MACHINE_CLEAR,
+ P4_EVENT_INSTR_COMPLETED,
+};
+
+#define P4_OPCODE(event) event##_OPCODE
+#define P4_OPCODE_ESEL(opcode) ((opcode & 0x00ff) >> 0)
+#define P4_OPCODE_EVNT(opcode) ((opcode & 0xff00) >> 8)
+#define P4_OPCODE_PACK(event, sel) (((event) << 8) | sel)
+
+/*
+ * Comments below the event represent ESCR restriction
+ * for this event and counter index per ESCR
+ *
+ * MSR_P4_IQ_ESCR0 and MSR_P4_IQ_ESCR1 are available only on early
+ * processor builds (family 0FH, models 01H-02H). These MSRs
+ * are not available on later versions, so that we don't use
+ * them completely
+ *
+ * Also note that CCCR1 do not have P4_CCCR_ENABLE bit properly
+ * working so that we should not use this CCCR and respective
+ * counter as result
+ */
+enum P4_EVENT_OPCODES {
+ P4_OPCODE(P4_EVENT_TC_DELIVER_MODE) = P4_OPCODE_PACK(0x01, 0x01),
+ /*
+ * MSR_P4_TC_ESCR0: 4, 5
+ * MSR_P4_TC_ESCR1: 6, 7
+ */
+
+ P4_OPCODE(P4_EVENT_BPU_FETCH_REQUEST) = P4_OPCODE_PACK(0x03, 0x00),
+ /*
+ * MSR_P4_BPU_ESCR0: 0, 1
+ * MSR_P4_BPU_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_ITLB_REFERENCE) = P4_OPCODE_PACK(0x18, 0x03),
+ /*
+ * MSR_P4_ITLB_ESCR0: 0, 1
+ * MSR_P4_ITLB_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_MEMORY_CANCEL) = P4_OPCODE_PACK(0x02, 0x05),
+ /*
+ * MSR_P4_DAC_ESCR0: 8, 9
+ * MSR_P4_DAC_ESCR1: 10, 11
+ */
+
+ P4_OPCODE(P4_EVENT_MEMORY_COMPLETE) = P4_OPCODE_PACK(0x08, 0x02),
+ /*
+ * MSR_P4_SAAT_ESCR0: 8, 9
+ * MSR_P4_SAAT_ESCR1: 10, 11
+ */
+
+ P4_OPCODE(P4_EVENT_LOAD_PORT_REPLAY) = P4_OPCODE_PACK(0x04, 0x02),
+ /*
+ * MSR_P4_SAAT_ESCR0: 8, 9
+ * MSR_P4_SAAT_ESCR1: 10, 11
+ */
+
+ P4_OPCODE(P4_EVENT_STORE_PORT_REPLAY) = P4_OPCODE_PACK(0x05, 0x02),
+ /*
+ * MSR_P4_SAAT_ESCR0: 8, 9
+ * MSR_P4_SAAT_ESCR1: 10, 11
+ */
+
+ P4_OPCODE(P4_EVENT_MOB_LOAD_REPLAY) = P4_OPCODE_PACK(0x03, 0x02),
+ /*
+ * MSR_P4_MOB_ESCR0: 0, 1
+ * MSR_P4_MOB_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_PAGE_WALK_TYPE) = P4_OPCODE_PACK(0x01, 0x04),
+ /*
+ * MSR_P4_PMH_ESCR0: 0, 1
+ * MSR_P4_PMH_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_BSQ_CACHE_REFERENCE) = P4_OPCODE_PACK(0x0c, 0x07),
+ /*
+ * MSR_P4_BSU_ESCR0: 0, 1
+ * MSR_P4_BSU_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_IOQ_ALLOCATION) = P4_OPCODE_PACK(0x03, 0x06),
+ /*
+ * MSR_P4_FSB_ESCR0: 0, 1
+ * MSR_P4_FSB_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_IOQ_ACTIVE_ENTRIES) = P4_OPCODE_PACK(0x1a, 0x06),
+ /*
+ * MSR_P4_FSB_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_FSB_DATA_ACTIVITY) = P4_OPCODE_PACK(0x17, 0x06),
+ /*
+ * MSR_P4_FSB_ESCR0: 0, 1
+ * MSR_P4_FSB_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_BSQ_ALLOCATION) = P4_OPCODE_PACK(0x05, 0x07),
+ /*
+ * MSR_P4_BSU_ESCR0: 0, 1
+ */
+
+ P4_OPCODE(P4_EVENT_BSQ_ACTIVE_ENTRIES) = P4_OPCODE_PACK(0x06, 0x07),
+ /*
+ * NOTE: no ESCR name in docs, it's guessed
+ * MSR_P4_BSU_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_SSE_INPUT_ASSIST) = P4_OPCODE_PACK(0x34, 0x01),
+ /*
+ * MSR_P4_FIRM_ESCR0: 8, 9
+ * MSR_P4_FIRM_ESCR1: 10, 11
+ */
+
+ P4_OPCODE(P4_EVENT_PACKED_SP_UOP) = P4_OPCODE_PACK(0x08, 0x01),
+ /*
+ * MSR_P4_FIRM_ESCR0: 8, 9
+ * MSR_P4_FIRM_ESCR1: 10, 11
+ */
+
+ P4_OPCODE(P4_EVENT_PACKED_DP_UOP) = P4_OPCODE_PACK(0x0c, 0x01),
+ /*
+ * MSR_P4_FIRM_ESCR0: 8, 9
+ * MSR_P4_FIRM_ESCR1: 10, 11
+ */
+
+ P4_OPCODE(P4_EVENT_SCALAR_SP_UOP) = P4_OPCODE_PACK(0x0a, 0x01),
+ /*
+ * MSR_P4_FIRM_ESCR0: 8, 9
+ * MSR_P4_FIRM_ESCR1: 10, 11
+ */
+
+ P4_OPCODE(P4_EVENT_SCALAR_DP_UOP) = P4_OPCODE_PACK(0x0e, 0x01),
+ /*
+ * MSR_P4_FIRM_ESCR0: 8, 9
+ * MSR_P4_FIRM_ESCR1: 10, 11
+ */
+
+ P4_OPCODE(P4_EVENT_64BIT_MMX_UOP) = P4_OPCODE_PACK(0x02, 0x01),
+ /*
+ * MSR_P4_FIRM_ESCR0: 8, 9
+ * MSR_P4_FIRM_ESCR1: 10, 11
+ */
+
+ P4_OPCODE(P4_EVENT_128BIT_MMX_UOP) = P4_OPCODE_PACK(0x1a, 0x01),
+ /*
+ * MSR_P4_FIRM_ESCR0: 8, 9
+ * MSR_P4_FIRM_ESCR1: 10, 11
+ */
+
+ P4_OPCODE(P4_EVENT_X87_FP_UOP) = P4_OPCODE_PACK(0x04, 0x01),
+ /*
+ * MSR_P4_FIRM_ESCR0: 8, 9
+ * MSR_P4_FIRM_ESCR1: 10, 11
+ */
+
+ P4_OPCODE(P4_EVENT_TC_MISC) = P4_OPCODE_PACK(0x06, 0x01),
+ /*
+ * MSR_P4_TC_ESCR0: 4, 5
+ * MSR_P4_TC_ESCR1: 6, 7
+ */
+
+ P4_OPCODE(P4_EVENT_GLOBAL_POWER_EVENTS) = P4_OPCODE_PACK(0x13, 0x06),
+ /*
+ * MSR_P4_FSB_ESCR0: 0, 1
+ * MSR_P4_FSB_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_TC_MS_XFER) = P4_OPCODE_PACK(0x05, 0x00),
+ /*
+ * MSR_P4_MS_ESCR0: 4, 5
+ * MSR_P4_MS_ESCR1: 6, 7
+ */
+
+ P4_OPCODE(P4_EVENT_UOP_QUEUE_WRITES) = P4_OPCODE_PACK(0x09, 0x00),
+ /*
+ * MSR_P4_MS_ESCR0: 4, 5
+ * MSR_P4_MS_ESCR1: 6, 7
+ */
+
+ P4_OPCODE(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE) = P4_OPCODE_PACK(0x05, 0x02),
+ /*
+ * MSR_P4_TBPU_ESCR0: 4, 5
+ * MSR_P4_TBPU_ESCR1: 6, 7
+ */
+
+ P4_OPCODE(P4_EVENT_RETIRED_BRANCH_TYPE) = P4_OPCODE_PACK(0x04, 0x02),
+ /*
+ * MSR_P4_TBPU_ESCR0: 4, 5
+ * MSR_P4_TBPU_ESCR1: 6, 7
+ */
+
+ P4_OPCODE(P4_EVENT_RESOURCE_STALL) = P4_OPCODE_PACK(0x01, 0x01),
+ /*
+ * MSR_P4_ALF_ESCR0: 12, 13, 16
+ * MSR_P4_ALF_ESCR1: 14, 15, 17
+ */
+
+ P4_OPCODE(P4_EVENT_WC_BUFFER) = P4_OPCODE_PACK(0x05, 0x05),
+ /*
+ * MSR_P4_DAC_ESCR0: 8, 9
+ * MSR_P4_DAC_ESCR1: 10, 11
+ */
+
+ P4_OPCODE(P4_EVENT_B2B_CYCLES) = P4_OPCODE_PACK(0x16, 0x03),
+ /*
+ * MSR_P4_FSB_ESCR0: 0, 1
+ * MSR_P4_FSB_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_BNR) = P4_OPCODE_PACK(0x08, 0x03),
+ /*
+ * MSR_P4_FSB_ESCR0: 0, 1
+ * MSR_P4_FSB_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_SNOOP) = P4_OPCODE_PACK(0x06, 0x03),
+ /*
+ * MSR_P4_FSB_ESCR0: 0, 1
+ * MSR_P4_FSB_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_RESPONSE) = P4_OPCODE_PACK(0x04, 0x03),
+ /*
+ * MSR_P4_FSB_ESCR0: 0, 1
+ * MSR_P4_FSB_ESCR1: 2, 3
+ */
+
+ P4_OPCODE(P4_EVENT_FRONT_END_EVENT) = P4_OPCODE_PACK(0x08, 0x05),
+ /*
+ * MSR_P4_CRU_ESCR2: 12, 13, 16
+ * MSR_P4_CRU_ESCR3: 14, 15, 17
+ */
+
+ P4_OPCODE(P4_EVENT_EXECUTION_EVENT) = P4_OPCODE_PACK(0x0c, 0x05),
+ /*
+ * MSR_P4_CRU_ESCR2: 12, 13, 16
+ * MSR_P4_CRU_ESCR3: 14, 15, 17
+ */
+
+ P4_OPCODE(P4_EVENT_REPLAY_EVENT) = P4_OPCODE_PACK(0x09, 0x05),
+ /*
+ * MSR_P4_CRU_ESCR2: 12, 13, 16
+ * MSR_P4_CRU_ESCR3: 14, 15, 17
+ */
+
+ P4_OPCODE(P4_EVENT_INSTR_RETIRED) = P4_OPCODE_PACK(0x02, 0x04),
+ /*
+ * MSR_P4_CRU_ESCR0: 12, 13, 16
+ * MSR_P4_CRU_ESCR1: 14, 15, 17
+ */
+
+ P4_OPCODE(P4_EVENT_UOPS_RETIRED) = P4_OPCODE_PACK(0x01, 0x04),
+ /*
+ * MSR_P4_CRU_ESCR0: 12, 13, 16
+ * MSR_P4_CRU_ESCR1: 14, 15, 17
+ */
+
+ P4_OPCODE(P4_EVENT_UOP_TYPE) = P4_OPCODE_PACK(0x02, 0x02),
+ /*
+ * MSR_P4_RAT_ESCR0: 12, 13, 16
+ * MSR_P4_RAT_ESCR1: 14, 15, 17
+ */
+
+ P4_OPCODE(P4_EVENT_BRANCH_RETIRED) = P4_OPCODE_PACK(0x06, 0x05),
+ /*
+ * MSR_P4_CRU_ESCR2: 12, 13, 16
+ * MSR_P4_CRU_ESCR3: 14, 15, 17
+ */
+
+ P4_OPCODE(P4_EVENT_MISPRED_BRANCH_RETIRED) = P4_OPCODE_PACK(0x03, 0x04),
+ /*
+ * MSR_P4_CRU_ESCR0: 12, 13, 16
+ * MSR_P4_CRU_ESCR1: 14, 15, 17
+ */
+
+ P4_OPCODE(P4_EVENT_X87_ASSIST) = P4_OPCODE_PACK(0x03, 0x05),
+ /*
+ * MSR_P4_CRU_ESCR2: 12, 13, 16
+ * MSR_P4_CRU_ESCR3: 14, 15, 17
+ */
+
+ P4_OPCODE(P4_EVENT_MACHINE_CLEAR) = P4_OPCODE_PACK(0x02, 0x05),
+ /*
+ * MSR_P4_CRU_ESCR2: 12, 13, 16
+ * MSR_P4_CRU_ESCR3: 14, 15, 17
+ */
+
+ P4_OPCODE(P4_EVENT_INSTR_COMPLETED) = P4_OPCODE_PACK(0x07, 0x04),
+ /*
+ * MSR_P4_CRU_ESCR0: 12, 13, 16
+ * MSR_P4_CRU_ESCR1: 14, 15, 17
+ */
+};
+
+/*
+ * a caller should use P4_ESCR_EMASK_NAME helper to
+ * pick the EventMask needed, for example
+ *
+ * P4_ESCR_EMASK_BIT(P4_EVENT_TC_DELIVER_MODE, DD)
+ */
+enum P4_ESCR_EMASKS {
+ P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, DD, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, DB, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, DI, 2),
+ P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, BD, 3),
+ P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, BB, 4),
+ P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, BI, 5),
+ P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, ID, 6),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_BPU_FETCH_REQUEST, TCMISS, 0),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_ITLB_REFERENCE, HIT, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_ITLB_REFERENCE, MISS, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_ITLB_REFERENCE, HIT_UK, 2),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_MEMORY_CANCEL, ST_RB_FULL, 2),
+ P4_GEN_ESCR_EMASK(P4_EVENT_MEMORY_CANCEL, 64K_CONF, 3),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_MEMORY_COMPLETE, LSC, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_MEMORY_COMPLETE, SSC, 1),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_LOAD_PORT_REPLAY, SPLIT_LD, 1),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_STORE_PORT_REPLAY, SPLIT_ST, 1),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_MOB_LOAD_REPLAY, NO_STA, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_MOB_LOAD_REPLAY, NO_STD, 3),
+ P4_GEN_ESCR_EMASK(P4_EVENT_MOB_LOAD_REPLAY, PARTIAL_DATA, 4),
+ P4_GEN_ESCR_EMASK(P4_EVENT_MOB_LOAD_REPLAY, UNALGN_ADDR, 5),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_PAGE_WALK_TYPE, DTMISS, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_PAGE_WALK_TYPE, ITMISS, 1),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITS, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITE, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITM, 2),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITS, 3),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITE, 4),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITM, 5),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_MISS, 8),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_MISS, 9),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, WR_2ndL_MISS, 10),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, DEFAULT, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, ALL_READ, 5),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, ALL_WRITE, 6),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, MEM_UC, 7),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, MEM_WC, 8),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, MEM_WT, 9),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, MEM_WP, 10),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, MEM_WB, 11),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, OWN, 13),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, OTHER, 14),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, PREFETCH, 15),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, DEFAULT, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, ALL_READ, 5),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, ALL_WRITE, 6),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_UC, 7),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WC, 8),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WT, 9),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WP, 10),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WB, 11),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, OWN, 13),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, OTHER, 14),
+ P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, PREFETCH, 15),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_DRV, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OWN, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OTHER, 2),
+ P4_GEN_ESCR_EMASK(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_DRV, 3),
+ P4_GEN_ESCR_EMASK(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_OWN, 4),
+ P4_GEN_ESCR_EMASK(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_OTHER, 5),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_TYPE0, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_TYPE1, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_LEN0, 2),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_LEN1, 3),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_IO_TYPE, 5),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_LOCK_TYPE, 6),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_CACHE_TYPE, 7),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_SPLIT_TYPE, 8),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_DEM_TYPE, 9),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_ORD_TYPE, 10),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE0, 11),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE1, 12),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE2, 13),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_TYPE0, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_TYPE1, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LEN0, 2),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LEN1, 3),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_IO_TYPE, 5),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LOCK_TYPE, 6),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_CACHE_TYPE, 7),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_SPLIT_TYPE, 8),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_DEM_TYPE, 9),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_ORD_TYPE, 10),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE0, 11),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE1, 12),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE2, 13),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_SSE_INPUT_ASSIST, ALL, 15),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_PACKED_SP_UOP, ALL, 15),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_PACKED_DP_UOP, ALL, 15),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_SCALAR_SP_UOP, ALL, 15),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_SCALAR_DP_UOP, ALL, 15),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_64BIT_MMX_UOP, ALL, 15),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_128BIT_MMX_UOP, ALL, 15),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_X87_FP_UOP, ALL, 15),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_TC_MISC, FLUSH, 4),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING, 0),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_TC_MS_XFER, CISC, 0),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_UOP_QUEUE_WRITES, FROM_TC_BUILD, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_UOP_QUEUE_WRITES, FROM_TC_DELIVER, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_UOP_QUEUE_WRITES, FROM_ROM, 2),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, CONDITIONAL, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, CALL, 2),
+ P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, RETURN, 3),
+ P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, INDIRECT, 4),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_BRANCH_TYPE, CONDITIONAL, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_BRANCH_TYPE, CALL, 2),
+ P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_BRANCH_TYPE, RETURN, 3),
+ P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_BRANCH_TYPE, INDIRECT, 4),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_RESOURCE_STALL, SBFULL, 5),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_WC_BUFFER, WCB_EVICTS, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_WC_BUFFER, WCB_FULL_EVICTS, 1),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_FRONT_END_EVENT, NBOGUS, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_FRONT_END_EVENT, BOGUS, 1),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, NBOGUS0, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, NBOGUS1, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, NBOGUS2, 2),
+ P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, NBOGUS3, 3),
+ P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, BOGUS0, 4),
+ P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, BOGUS1, 5),
+ P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, BOGUS2, 6),
+ P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, BOGUS3, 7),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_REPLAY_EVENT, NBOGUS, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_REPLAY_EVENT, BOGUS, 1),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_INSTR_RETIRED, NBOGUSNTAG, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_INSTR_RETIRED, NBOGUSTAG, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_INSTR_RETIRED, BOGUSNTAG, 2),
+ P4_GEN_ESCR_EMASK(P4_EVENT_INSTR_RETIRED, BOGUSTAG, 3),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_UOPS_RETIRED, NBOGUS, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_UOPS_RETIRED, BOGUS, 1),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_UOP_TYPE, TAGLOADS, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_UOP_TYPE, TAGSTORES, 2),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_BRANCH_RETIRED, MMNP, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BRANCH_RETIRED, MMNM, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BRANCH_RETIRED, MMTP, 2),
+ P4_GEN_ESCR_EMASK(P4_EVENT_BRANCH_RETIRED, MMTM, 3),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_MISPRED_BRANCH_RETIRED, NBOGUS, 0),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_X87_ASSIST, FPSU, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_X87_ASSIST, FPSO, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_X87_ASSIST, POAO, 2),
+ P4_GEN_ESCR_EMASK(P4_EVENT_X87_ASSIST, POAU, 3),
+ P4_GEN_ESCR_EMASK(P4_EVENT_X87_ASSIST, PREA, 4),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_MACHINE_CLEAR, CLEAR, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_MACHINE_CLEAR, MOCLEAR, 1),
+ P4_GEN_ESCR_EMASK(P4_EVENT_MACHINE_CLEAR, SMCLEAR, 2),
+
+ P4_GEN_ESCR_EMASK(P4_EVENT_INSTR_COMPLETED, NBOGUS, 0),
+ P4_GEN_ESCR_EMASK(P4_EVENT_INSTR_COMPLETED, BOGUS, 1),
+};
+
+/*
+ * Note we have UOP and PEBS bits reserved for now
+ * just in case if we will need them once
+ */
+#define P4_PEBS_CONFIG_ENABLE (1ULL << 7)
+#define P4_PEBS_CONFIG_UOP_TAG (1ULL << 8)
+#define P4_PEBS_CONFIG_METRIC_MASK 0x3FLL
+#define P4_PEBS_CONFIG_MASK 0xFFLL
+
+/*
+ * mem: Only counters MSR_IQ_COUNTER4 (16) and
+ * MSR_IQ_COUNTER5 (17) are allowed for PEBS sampling
+ */
+#define P4_PEBS_ENABLE 0x02000000ULL
+#define P4_PEBS_ENABLE_UOP_TAG 0x01000000ULL
+
+#define p4_config_unpack_metric(v) (((u64)(v)) & P4_PEBS_CONFIG_METRIC_MASK)
+#define p4_config_unpack_pebs(v) (((u64)(v)) & P4_PEBS_CONFIG_MASK)
+
+#define p4_config_pebs_has(v, mask) (p4_config_unpack_pebs(v) & (mask))
+
+enum P4_PEBS_METRIC {
+ P4_PEBS_METRIC__none,
+
+ P4_PEBS_METRIC__1stl_cache_load_miss_retired,
+ P4_PEBS_METRIC__2ndl_cache_load_miss_retired,
+ P4_PEBS_METRIC__dtlb_load_miss_retired,
+ P4_PEBS_METRIC__dtlb_store_miss_retired,
+ P4_PEBS_METRIC__dtlb_all_miss_retired,
+ P4_PEBS_METRIC__tagged_mispred_branch,
+ P4_PEBS_METRIC__mob_load_replay_retired,
+ P4_PEBS_METRIC__split_load_retired,
+ P4_PEBS_METRIC__split_store_retired,
+
+ P4_PEBS_METRIC__max
+};
+
+/*
+ * Notes on internal configuration of ESCR+CCCR tuples
+ *
+ * Since P4 has quite the different architecture of
+ * performance registers in compare with "architectural"
+ * once and we have on 64 bits to keep configuration
+ * of performance event, the following trick is used.
+ *
+ * 1) Since both ESCR and CCCR registers have only low
+ * 32 bits valuable, we pack them into a single 64 bit
+ * configuration. Low 32 bits of such config correspond
+ * to low 32 bits of CCCR register and high 32 bits
+ * correspond to low 32 bits of ESCR register.
+ *
+ * 2) The meaning of every bit of such config field can
+ * be found in Intel SDM but it should be noted that
+ * we "borrow" some reserved bits for own usage and
+ * clean them or set to a proper value when we do
+ * a real write to hardware registers.
+ *
+ * 3) The format of bits of config is the following
+ * and should be either 0 or set to some predefined
+ * values:
+ *
+ * Low 32 bits
+ * -----------
+ * 0-6: P4_PEBS_METRIC enum
+ * 7-11: reserved
+ * 12: reserved (Enable)
+ * 13-15: reserved (ESCR select)
+ * 16-17: Active Thread
+ * 18: Compare
+ * 19: Complement
+ * 20-23: Threshold
+ * 24: Edge
+ * 25: reserved (FORCE_OVF)
+ * 26: reserved (OVF_PMI_T0)
+ * 27: reserved (OVF_PMI_T1)
+ * 28-29: reserved
+ * 30: reserved (Cascade)
+ * 31: reserved (OVF)
+ *
+ * High 32 bits
+ * ------------
+ * 0: reserved (T1_USR)
+ * 1: reserved (T1_OS)
+ * 2: reserved (T0_USR)
+ * 3: reserved (T0_OS)
+ * 4: Tag Enable
+ * 5-8: Tag Value
+ * 9-24: Event Mask (may use P4_ESCR_EMASK_BIT helper)
+ * 25-30: enum P4_EVENTS
+ * 31: reserved (HT thread)
+ */
+
+#endif /* PERF_EVENT_P4_H */
+
diff --git a/kernel/arch/x86/include/asm/pgalloc.h b/kernel/arch/x86/include/asm/pgalloc.h
new file mode 100644
index 000000000..bf7f8b55b
--- /dev/null
+++ b/kernel/arch/x86/include/asm/pgalloc.h
@@ -0,0 +1,148 @@
+#ifndef _ASM_X86_PGALLOC_H
+#define _ASM_X86_PGALLOC_H
+
+#include <linux/threads.h>
+#include <linux/mm.h> /* for struct page */
+#include <linux/pagemap.h>
+
+static inline int __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; }
+
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+#define paravirt_pgd_alloc(mm) __paravirt_pgd_alloc(mm)
+static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) {}
+static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) {}
+static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) {}
+static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
+ unsigned long start, unsigned long count) {}
+static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) {}
+static inline void paravirt_release_pte(unsigned long pfn) {}
+static inline void paravirt_release_pmd(unsigned long pfn) {}
+static inline void paravirt_release_pud(unsigned long pfn) {}
+#endif
+
+/*
+ * Flags to use when allocating a user page table page.
+ */
+extern gfp_t __userpte_alloc_gfp;
+
+/*
+ * Allocate and free page tables.
+ */
+extern pgd_t *pgd_alloc(struct mm_struct *);
+extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
+
+extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
+extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
+
+/* Should really implement gc for free page table pages. This could be
+ done with a reference count in struct page. */
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
+ free_page((unsigned long)pte);
+}
+
+static inline void pte_free(struct mm_struct *mm, struct page *pte)
+{
+ pgtable_page_dtor(pte);
+ __free_page(pte);
+}
+
+extern void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
+
+static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
+ unsigned long address)
+{
+ ___pte_free_tlb(tlb, pte);
+}
+
+static inline void pmd_populate_kernel(struct mm_struct *mm,
+ pmd_t *pmd, pte_t *pte)
+{
+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
+ set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
+}
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
+ struct page *pte)
+{
+ unsigned long pfn = page_to_pfn(pte);
+
+ paravirt_alloc_pte(mm, pfn);
+ set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
+}
+
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
+#if CONFIG_PGTABLE_LEVELS > 2
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ struct page *page;
+ page = alloc_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, 0);
+ if (!page)
+ return NULL;
+ if (!pgtable_pmd_page_ctor(page)) {
+ __free_pages(page, 0);
+ return NULL;
+ }
+ return (pmd_t *)page_address(page);
+}
+
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+{
+ BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
+ pgtable_pmd_page_dtor(virt_to_page(pmd));
+ free_page((unsigned long)pmd);
+}
+
+extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
+
+static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
+ unsigned long address)
+{
+ ___pmd_free_tlb(tlb, pmd);
+}
+
+#ifdef CONFIG_X86_PAE
+extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
+#else /* !CONFIG_X86_PAE */
+static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
+ set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
+}
+#endif /* CONFIG_X86_PAE */
+
+#if CONFIG_PGTABLE_LEVELS > 3
+static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+{
+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
+ set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
+}
+
+static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
+}
+
+static inline void pud_free(struct mm_struct *mm, pud_t *pud)
+{
+ BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
+ free_page((unsigned long)pud);
+}
+
+extern void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud);
+
+static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
+ unsigned long address)
+{
+ ___pud_free_tlb(tlb, pud);
+}
+
+#endif /* CONFIG_PGTABLE_LEVELS > 3 */
+#endif /* CONFIG_PGTABLE_LEVELS > 2 */
+
+#endif /* _ASM_X86_PGALLOC_H */
diff --git a/kernel/arch/x86/include/asm/pgtable-2level.h b/kernel/arch/x86/include/asm/pgtable-2level.h
new file mode 100644
index 000000000..fd74a1195
--- /dev/null
+++ b/kernel/arch/x86/include/asm/pgtable-2level.h
@@ -0,0 +1,80 @@
+#ifndef _ASM_X86_PGTABLE_2LEVEL_H
+#define _ASM_X86_PGTABLE_2LEVEL_H
+
+#define pte_ERROR(e) \
+ pr_err("%s:%d: bad pte %08lx\n", __FILE__, __LINE__, (e).pte_low)
+#define pgd_ERROR(e) \
+ pr_err("%s:%d: bad pgd %08lx\n", __FILE__, __LINE__, pgd_val(e))
+
+/*
+ * Certain architectures need to do special things when PTEs
+ * within a page table are directly modified. Thus, the following
+ * hook is made available.
+ */
+static inline void native_set_pte(pte_t *ptep , pte_t pte)
+{
+ *ptep = pte;
+}
+
+static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
+{
+ *pmdp = pmd;
+}
+
+static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
+{
+ native_set_pte(ptep, pte);
+}
+
+static inline void native_pmd_clear(pmd_t *pmdp)
+{
+ native_set_pmd(pmdp, __pmd(0));
+}
+
+static inline void native_pte_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *xp)
+{
+ *xp = native_make_pte(0);
+}
+
+#ifdef CONFIG_SMP
+static inline pte_t native_ptep_get_and_clear(pte_t *xp)
+{
+ return __pte(xchg(&xp->pte_low, 0));
+}
+#else
+#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
+#endif
+
+#ifdef CONFIG_SMP
+static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
+{
+ return __pmd(xchg((pmdval_t *)xp, 0));
+}
+#else
+#define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
+#endif
+
+/* Bit manipulation helper on pte/pgoff entry */
+static inline unsigned long pte_bitop(unsigned long value, unsigned int rightshift,
+ unsigned long mask, unsigned int leftshift)
+{
+ return ((value >> rightshift) & mask) << leftshift;
+}
+
+/* Encode and de-code a swap entry */
+#define SWP_TYPE_BITS 5
+#define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 1)
+
+#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
+
+#define __swp_type(x) (((x).val >> (_PAGE_BIT_PRESENT + 1)) \
+ & ((1U << SWP_TYPE_BITS) - 1))
+#define __swp_offset(x) ((x).val >> SWP_OFFSET_SHIFT)
+#define __swp_entry(type, offset) ((swp_entry_t) { \
+ ((type) << (_PAGE_BIT_PRESENT + 1)) \
+ | ((offset) << SWP_OFFSET_SHIFT) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
+#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
+
+#endif /* _ASM_X86_PGTABLE_2LEVEL_H */
diff --git a/kernel/arch/x86/include/asm/pgtable-2level_types.h b/kernel/arch/x86/include/asm/pgtable-2level_types.h
new file mode 100644
index 000000000..392576433
--- /dev/null
+++ b/kernel/arch/x86/include/asm/pgtable-2level_types.h
@@ -0,0 +1,36 @@
+#ifndef _ASM_X86_PGTABLE_2LEVEL_DEFS_H
+#define _ASM_X86_PGTABLE_2LEVEL_DEFS_H
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+
+typedef unsigned long pteval_t;
+typedef unsigned long pmdval_t;
+typedef unsigned long pudval_t;
+typedef unsigned long pgdval_t;
+typedef unsigned long pgprotval_t;
+
+typedef union {
+ pteval_t pte;
+ pteval_t pte_low;
+} pte_t;
+#endif /* !__ASSEMBLY__ */
+
+#define SHARED_KERNEL_PMD 0
+
+/*
+ * traditional i386 two-level paging structure:
+ */
+
+#define PGDIR_SHIFT 22
+#define PTRS_PER_PGD 1024
+
+
+/*
+ * the i386 is two-level, so we don't really have any
+ * PMD directory physically.
+ */
+
+#define PTRS_PER_PTE 1024
+
+#endif /* _ASM_X86_PGTABLE_2LEVEL_DEFS_H */
diff --git a/kernel/arch/x86/include/asm/pgtable-3level.h b/kernel/arch/x86/include/asm/pgtable-3level.h
new file mode 100644
index 000000000..cdaa58c9b
--- /dev/null
+++ b/kernel/arch/x86/include/asm/pgtable-3level.h
@@ -0,0 +1,187 @@
+#ifndef _ASM_X86_PGTABLE_3LEVEL_H
+#define _ASM_X86_PGTABLE_3LEVEL_H
+
+/*
+ * Intel Physical Address Extension (PAE) Mode - three-level page
+ * tables on PPro+ CPUs.
+ *
+ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
+ */
+
+#define pte_ERROR(e) \
+ pr_err("%s:%d: bad pte %p(%08lx%08lx)\n", \
+ __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
+#define pmd_ERROR(e) \
+ pr_err("%s:%d: bad pmd %p(%016Lx)\n", \
+ __FILE__, __LINE__, &(e), pmd_val(e))
+#define pgd_ERROR(e) \
+ pr_err("%s:%d: bad pgd %p(%016Lx)\n", \
+ __FILE__, __LINE__, &(e), pgd_val(e))
+
+/* Rules for using set_pte: the pte being assigned *must* be
+ * either not present or in a state where the hardware will
+ * not attempt to update the pte. In places where this is
+ * not possible, use pte_get_and_clear to obtain the old pte
+ * value and then use set_pte to update it. -ben
+ */
+static inline void native_set_pte(pte_t *ptep, pte_t pte)
+{
+ ptep->pte_high = pte.pte_high;
+ smp_wmb();
+ ptep->pte_low = pte.pte_low;
+}
+
+#define pmd_read_atomic pmd_read_atomic
+/*
+ * pte_offset_map_lock on 32bit PAE kernels was reading the pmd_t with
+ * a "*pmdp" dereference done by gcc. Problem is, in certain places
+ * where pte_offset_map_lock is called, concurrent page faults are
+ * allowed, if the mmap_sem is hold for reading. An example is mincore
+ * vs page faults vs MADV_DONTNEED. On the page fault side
+ * pmd_populate rightfully does a set_64bit, but if we're reading the
+ * pmd_t with a "*pmdp" on the mincore side, a SMP race can happen
+ * because gcc will not read the 64bit of the pmd atomically. To fix
+ * this all places running pmd_offset_map_lock() while holding the
+ * mmap_sem in read mode, shall read the pmdp pointer using this
+ * function to know if the pmd is null nor not, and in turn to know if
+ * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd
+ * operations.
+ *
+ * Without THP if the mmap_sem is hold for reading, the pmd can only
+ * transition from null to not null while pmd_read_atomic runs. So
+ * we can always return atomic pmd values with this function.
+ *
+ * With THP if the mmap_sem is hold for reading, the pmd can become
+ * trans_huge or none or point to a pte (and in turn become "stable")
+ * at any time under pmd_read_atomic. We could read it really
+ * atomically here with a atomic64_read for the THP enabled case (and
+ * it would be a whole lot simpler), but to avoid using cmpxchg8b we
+ * only return an atomic pmdval if the low part of the pmdval is later
+ * found stable (i.e. pointing to a pte). And we're returning a none
+ * pmdval if the low part of the pmd is none. In some cases the high
+ * and low part of the pmdval returned may not be consistent if THP is
+ * enabled (the low part may point to previously mapped hugepage,
+ * while the high part may point to a more recently mapped hugepage),
+ * but pmd_none_or_trans_huge_or_clear_bad() only needs the low part
+ * of the pmd to be read atomically to decide if the pmd is unstable
+ * or not, with the only exception of when the low part of the pmd is
+ * zero in which case we return a none pmd.
+ */
+static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
+{
+ pmdval_t ret;
+ u32 *tmp = (u32 *)pmdp;
+
+ ret = (pmdval_t) (*tmp);
+ if (ret) {
+ /*
+ * If the low part is null, we must not read the high part
+ * or we can end up with a partial pmd.
+ */
+ smp_rmb();
+ ret |= ((pmdval_t)*(tmp + 1)) << 32;
+ }
+
+ return (pmd_t) { ret };
+}
+
+static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
+{
+ set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
+}
+
+static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
+{
+ set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
+}
+
+static inline void native_set_pud(pud_t *pudp, pud_t pud)
+{
+ set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
+}
+
+/*
+ * For PTEs and PDEs, we must clear the P-bit first when clearing a page table
+ * entry, so clear the bottom half first and enforce ordering with a compiler
+ * barrier.
+ */
+static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ ptep->pte_low = 0;
+ smp_wmb();
+ ptep->pte_high = 0;
+}
+
+static inline void native_pmd_clear(pmd_t *pmd)
+{
+ u32 *tmp = (u32 *)pmd;
+ *tmp = 0;
+ smp_wmb();
+ *(tmp + 1) = 0;
+}
+
+static inline void pud_clear(pud_t *pudp)
+{
+ set_pud(pudp, __pud(0));
+
+ /*
+ * According to Intel App note "TLBs, Paging-Structure Caches,
+ * and Their Invalidation", April 2007, document 317080-001,
+ * section 8.1: in PAE mode we explicitly have to flush the
+ * TLB via cr3 if the top-level pgd is changed...
+ *
+ * Currently all places where pud_clear() is called either have
+ * flush_tlb_mm() followed or don't need TLB flush (x86_64 code or
+ * pud_clear_bad()), so we don't need TLB flush here.
+ */
+}
+
+#ifdef CONFIG_SMP
+static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
+{
+ pte_t res;
+
+ /* xchg acts as a barrier before the setting of the high bits */
+ res.pte_low = xchg(&ptep->pte_low, 0);
+ res.pte_high = ptep->pte_high;
+ ptep->pte_high = 0;
+
+ return res;
+}
+#else
+#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
+#endif
+
+#ifdef CONFIG_SMP
+union split_pmd {
+ struct {
+ u32 pmd_low;
+ u32 pmd_high;
+ };
+ pmd_t pmd;
+};
+static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
+{
+ union split_pmd res, *orig = (union split_pmd *)pmdp;
+
+ /* xchg acts as a barrier before setting of the high bits */
+ res.pmd_low = xchg(&orig->pmd_low, 0);
+ res.pmd_high = orig->pmd_high;
+ orig->pmd_high = 0;
+
+ return res.pmd;
+}
+#else
+#define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
+#endif
+
+/* Encode and de-code a swap entry */
+#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5)
+#define __swp_type(x) (((x).val) & 0x1f)
+#define __swp_offset(x) ((x).val >> 5)
+#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5})
+#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
+#define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } })
+
+#endif /* _ASM_X86_PGTABLE_3LEVEL_H */
diff --git a/kernel/arch/x86/include/asm/pgtable-3level_types.h b/kernel/arch/x86/include/asm/pgtable-3level_types.h
new file mode 100644
index 000000000..bcc89625e
--- /dev/null
+++ b/kernel/arch/x86/include/asm/pgtable-3level_types.h
@@ -0,0 +1,46 @@
+#ifndef _ASM_X86_PGTABLE_3LEVEL_DEFS_H
+#define _ASM_X86_PGTABLE_3LEVEL_DEFS_H
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+
+typedef u64 pteval_t;
+typedef u64 pmdval_t;
+typedef u64 pudval_t;
+typedef u64 pgdval_t;
+typedef u64 pgprotval_t;
+
+typedef union {
+ struct {
+ unsigned long pte_low, pte_high;
+ };
+ pteval_t pte;
+} pte_t;
+#endif /* !__ASSEMBLY__ */
+
+#ifdef CONFIG_PARAVIRT
+#define SHARED_KERNEL_PMD (pv_info.shared_kernel_pmd)
+#else
+#define SHARED_KERNEL_PMD 1
+#endif
+
+/*
+ * PGDIR_SHIFT determines what a top-level page table entry can map
+ */
+#define PGDIR_SHIFT 30
+#define PTRS_PER_PGD 4
+
+/*
+ * PMD_SHIFT determines the size of the area a middle-level
+ * page table can map
+ */
+#define PMD_SHIFT 21
+#define PTRS_PER_PMD 512
+
+/*
+ * entries per page directory level
+ */
+#define PTRS_PER_PTE 512
+
+
+#endif /* _ASM_X86_PGTABLE_3LEVEL_DEFS_H */
diff --git a/kernel/arch/x86/include/asm/pgtable.h b/kernel/arch/x86/include/asm/pgtable.h
new file mode 100644
index 000000000..fe57e7a98
--- /dev/null
+++ b/kernel/arch/x86/include/asm/pgtable.h
@@ -0,0 +1,881 @@
+#ifndef _ASM_X86_PGTABLE_H
+#define _ASM_X86_PGTABLE_H
+
+#include <asm/page.h>
+#include <asm/e820.h>
+
+#include <asm/pgtable_types.h>
+
+/*
+ * Macro to mark a page protection value as UC-
+ */
+#define pgprot_noncached(prot) \
+ ((boot_cpu_data.x86 > 3) \
+ ? (__pgprot(pgprot_val(prot) | \
+ cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS))) \
+ : (prot))
+
+#ifndef __ASSEMBLY__
+#include <asm/x86_init.h>
+
+void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
+ __visible;
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+extern spinlock_t pgd_lock;
+extern struct list_head pgd_list;
+
+extern struct mm_struct *pgd_page_get_mm(struct page *page);
+
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else /* !CONFIG_PARAVIRT */
+#define set_pte(ptep, pte) native_set_pte(ptep, pte)
+#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
+#define set_pmd_at(mm, addr, pmdp, pmd) native_set_pmd_at(mm, addr, pmdp, pmd)
+
+#define set_pte_atomic(ptep, pte) \
+ native_set_pte_atomic(ptep, pte)
+
+#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
+
+#ifndef __PAGETABLE_PUD_FOLDED
+#define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
+#define pgd_clear(pgd) native_pgd_clear(pgd)
+#endif
+
+#ifndef set_pud
+# define set_pud(pudp, pud) native_set_pud(pudp, pud)
+#endif
+
+#ifndef __PAGETABLE_PMD_FOLDED
+#define pud_clear(pud) native_pud_clear(pud)
+#endif
+
+#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
+#define pmd_clear(pmd) native_pmd_clear(pmd)
+
+#define pte_update(mm, addr, ptep) do { } while (0)
+#define pte_update_defer(mm, addr, ptep) do { } while (0)
+#define pmd_update(mm, addr, ptep) do { } while (0)
+#define pmd_update_defer(mm, addr, ptep) do { } while (0)
+
+#define pgd_val(x) native_pgd_val(x)
+#define __pgd(x) native_make_pgd(x)
+
+#ifndef __PAGETABLE_PUD_FOLDED
+#define pud_val(x) native_pud_val(x)
+#define __pud(x) native_make_pud(x)
+#endif
+
+#ifndef __PAGETABLE_PMD_FOLDED
+#define pmd_val(x) native_pmd_val(x)
+#define __pmd(x) native_make_pmd(x)
+#endif
+
+#define pte_val(x) native_pte_val(x)
+#define __pte(x) native_make_pte(x)
+
+#define arch_end_context_switch(prev) do {} while(0)
+
+#endif /* CONFIG_PARAVIRT */
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+static inline int pte_dirty(pte_t pte)
+{
+ return pte_flags(pte) & _PAGE_DIRTY;
+}
+
+static inline int pte_young(pte_t pte)
+{
+ return pte_flags(pte) & _PAGE_ACCESSED;
+}
+
+static inline int pmd_dirty(pmd_t pmd)
+{
+ return pmd_flags(pmd) & _PAGE_DIRTY;
+}
+
+static inline int pmd_young(pmd_t pmd)
+{
+ return pmd_flags(pmd) & _PAGE_ACCESSED;
+}
+
+static inline int pte_write(pte_t pte)
+{
+ return pte_flags(pte) & _PAGE_RW;
+}
+
+static inline int pte_huge(pte_t pte)
+{
+ return pte_flags(pte) & _PAGE_PSE;
+}
+
+static inline int pte_global(pte_t pte)
+{
+ return pte_flags(pte) & _PAGE_GLOBAL;
+}
+
+static inline int pte_exec(pte_t pte)
+{
+ return !(pte_flags(pte) & _PAGE_NX);
+}
+
+static inline int pte_special(pte_t pte)
+{
+ return pte_flags(pte) & _PAGE_SPECIAL;
+}
+
+static inline unsigned long pte_pfn(pte_t pte)
+{
+ return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
+}
+
+static inline unsigned long pmd_pfn(pmd_t pmd)
+{
+ return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
+}
+
+static inline unsigned long pud_pfn(pud_t pud)
+{
+ return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
+}
+
+#define pte_page(pte) pfn_to_page(pte_pfn(pte))
+
+static inline int pmd_large(pmd_t pte)
+{
+ return pmd_flags(pte) & _PAGE_PSE;
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline int pmd_trans_splitting(pmd_t pmd)
+{
+ return pmd_val(pmd) & _PAGE_SPLITTING;
+}
+
+static inline int pmd_trans_huge(pmd_t pmd)
+{
+ return pmd_val(pmd) & _PAGE_PSE;
+}
+
+static inline int has_transparent_hugepage(void)
+{
+ return cpu_has_pse;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
+{
+ pteval_t v = native_pte_val(pte);
+
+ return native_make_pte(v | set);
+}
+
+static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
+{
+ pteval_t v = native_pte_val(pte);
+
+ return native_make_pte(v & ~clear);
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+ return pte_clear_flags(pte, _PAGE_DIRTY);
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+ return pte_clear_flags(pte, _PAGE_ACCESSED);
+}
+
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ return pte_clear_flags(pte, _PAGE_RW);
+}
+
+static inline pte_t pte_mkexec(pte_t pte)
+{
+ return pte_clear_flags(pte, _PAGE_NX);
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+ return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+ return pte_set_flags(pte, _PAGE_ACCESSED);
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+ return pte_set_flags(pte, _PAGE_RW);
+}
+
+static inline pte_t pte_mkhuge(pte_t pte)
+{
+ return pte_set_flags(pte, _PAGE_PSE);
+}
+
+static inline pte_t pte_clrhuge(pte_t pte)
+{
+ return pte_clear_flags(pte, _PAGE_PSE);
+}
+
+static inline pte_t pte_mkglobal(pte_t pte)
+{
+ return pte_set_flags(pte, _PAGE_GLOBAL);
+}
+
+static inline pte_t pte_clrglobal(pte_t pte)
+{
+ return pte_clear_flags(pte, _PAGE_GLOBAL);
+}
+
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+ return pte_set_flags(pte, _PAGE_SPECIAL);
+}
+
+static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
+{
+ pmdval_t v = native_pmd_val(pmd);
+
+ return __pmd(v | set);
+}
+
+static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
+{
+ pmdval_t v = native_pmd_val(pmd);
+
+ return __pmd(v & ~clear);
+}
+
+static inline pmd_t pmd_mkold(pmd_t pmd)
+{
+ return pmd_clear_flags(pmd, _PAGE_ACCESSED);
+}
+
+static inline pmd_t pmd_wrprotect(pmd_t pmd)
+{
+ return pmd_clear_flags(pmd, _PAGE_RW);
+}
+
+static inline pmd_t pmd_mkdirty(pmd_t pmd)
+{
+ return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
+}
+
+static inline pmd_t pmd_mkhuge(pmd_t pmd)
+{
+ return pmd_set_flags(pmd, _PAGE_PSE);
+}
+
+static inline pmd_t pmd_mkyoung(pmd_t pmd)
+{
+ return pmd_set_flags(pmd, _PAGE_ACCESSED);
+}
+
+static inline pmd_t pmd_mkwrite(pmd_t pmd)
+{
+ return pmd_set_flags(pmd, _PAGE_RW);
+}
+
+static inline pmd_t pmd_mknotpresent(pmd_t pmd)
+{
+ return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE);
+}
+
+#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
+static inline int pte_soft_dirty(pte_t pte)
+{
+ return pte_flags(pte) & _PAGE_SOFT_DIRTY;
+}
+
+static inline int pmd_soft_dirty(pmd_t pmd)
+{
+ return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
+}
+
+static inline pte_t pte_mksoft_dirty(pte_t pte)
+{
+ return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
+}
+
+static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
+{
+ return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
+}
+
+#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
+
+/*
+ * Mask out unsupported bits in a present pgprot. Non-present pgprots
+ * can use those bits for other purposes, so leave them be.
+ */
+static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
+{
+ pgprotval_t protval = pgprot_val(pgprot);
+
+ if (protval & _PAGE_PRESENT)
+ protval &= __supported_pte_mask;
+
+ return protval;
+}
+
+static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
+{
+ return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
+ massage_pgprot(pgprot));
+}
+
+static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
+{
+ return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
+ massage_pgprot(pgprot));
+}
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ pteval_t val = pte_val(pte);
+
+ /*
+ * Chop off the NX bit (if present), and add the NX portion of
+ * the newprot (if present):
+ */
+ val &= _PAGE_CHG_MASK;
+ val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
+
+ return __pte(val);
+}
+
+static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+{
+ pmdval_t val = pmd_val(pmd);
+
+ val &= _HPAGE_CHG_MASK;
+ val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
+
+ return __pmd(val);
+}
+
+/* mprotect needs to preserve PAT bits when updating vm_page_prot */
+#define pgprot_modify pgprot_modify
+static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
+{
+ pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
+ pgprotval_t addbits = pgprot_val(newprot);
+ return __pgprot(preservebits | addbits);
+}
+
+#define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK)
+
+#define canon_pgprot(p) __pgprot(massage_pgprot(p))
+
+static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
+ enum page_cache_mode pcm,
+ enum page_cache_mode new_pcm)
+{
+ /*
+ * PAT type is always WB for untracked ranges, so no need to check.
+ */
+ if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
+ return 1;
+
+ /*
+ * Certain new memtypes are not allowed with certain
+ * requested memtype:
+ * - request is uncached, return cannot be write-back
+ * - request is write-combine, return cannot be write-back
+ */
+ if ((pcm == _PAGE_CACHE_MODE_UC_MINUS &&
+ new_pcm == _PAGE_CACHE_MODE_WB) ||
+ (pcm == _PAGE_CACHE_MODE_WC &&
+ new_pcm == _PAGE_CACHE_MODE_WB)) {
+ return 0;
+ }
+
+ return 1;
+}
+
+pmd_t *populate_extra_pmd(unsigned long vaddr);
+pte_t *populate_extra_pte(unsigned long vaddr);
+#endif /* __ASSEMBLY__ */
+
+#ifdef CONFIG_X86_32
+# include <asm/pgtable_32.h>
+#else
+# include <asm/pgtable_64.h>
+#endif
+
+#ifndef __ASSEMBLY__
+#include <linux/mm_types.h>
+#include <linux/mmdebug.h>
+#include <linux/log2.h>
+
+static inline int pte_none(pte_t pte)
+{
+ return !pte.pte;
+}
+
+#define __HAVE_ARCH_PTE_SAME
+static inline int pte_same(pte_t a, pte_t b)
+{
+ return a.pte == b.pte;
+}
+
+static inline int pte_present(pte_t a)
+{
+ return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
+}
+
+#define pte_accessible pte_accessible
+static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
+{
+ if (pte_flags(a) & _PAGE_PRESENT)
+ return true;
+
+ if ((pte_flags(a) & _PAGE_PROTNONE) &&
+ mm_tlb_flush_pending(mm))
+ return true;
+
+ return false;
+}
+
+static inline int pte_hidden(pte_t pte)
+{
+ return pte_flags(pte) & _PAGE_HIDDEN;
+}
+
+static inline int pmd_present(pmd_t pmd)
+{
+ /*
+ * Checking for _PAGE_PSE is needed too because
+ * split_huge_page will temporarily clear the present bit (but
+ * the _PAGE_PSE flag will remain set at all times while the
+ * _PAGE_PRESENT bit is clear).
+ */
+ return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
+}
+
+#ifdef CONFIG_NUMA_BALANCING
+/*
+ * These work without NUMA balancing but the kernel does not care. See the
+ * comment in include/asm-generic/pgtable.h
+ */
+static inline int pte_protnone(pte_t pte)
+{
+ return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT))
+ == _PAGE_PROTNONE;
+}
+
+static inline int pmd_protnone(pmd_t pmd)
+{
+ return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT))
+ == _PAGE_PROTNONE;
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
+static inline int pmd_none(pmd_t pmd)
+{
+ /* Only check low word on 32-bit platforms, since it might be
+ out of sync with upper half. */
+ return (unsigned long)native_pmd_val(pmd) == 0;
+}
+
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+ return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK);
+}
+
+/*
+ * Currently stuck as a macro due to indirect forward reference to
+ * linux/mmzone.h's __section_mem_map_addr() definition:
+ */
+#define pmd_page(pmd) pfn_to_page((pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT)
+
+/*
+ * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
+ *
+ * this macro returns the index of the entry in the pmd page which would
+ * control the given virtual address
+ */
+static inline unsigned long pmd_index(unsigned long address)
+{
+ return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
+}
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ *
+ * (Currently stuck as a macro because of indirect forward reference
+ * to linux/mm.h:page_to_nid())
+ */
+#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
+
+/*
+ * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
+ *
+ * this function returns the index of the entry in the pte page which would
+ * control the given virtual address
+ */
+static inline unsigned long pte_index(unsigned long address)
+{
+ return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
+}
+
+static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
+{
+ return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
+}
+
+static inline int pmd_bad(pmd_t pmd)
+{
+ return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
+}
+
+static inline unsigned long pages_to_mb(unsigned long npg)
+{
+ return npg >> (20 - PAGE_SHIFT);
+}
+
+#if CONFIG_PGTABLE_LEVELS > 2
+static inline int pud_none(pud_t pud)
+{
+ return native_pud_val(pud) == 0;
+}
+
+static inline int pud_present(pud_t pud)
+{
+ return pud_flags(pud) & _PAGE_PRESENT;
+}
+
+static inline unsigned long pud_page_vaddr(pud_t pud)
+{
+ return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK);
+}
+
+/*
+ * Currently stuck as a macro due to indirect forward reference to
+ * linux/mmzone.h's __section_mem_map_addr() definition:
+ */
+#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
+
+/* Find an entry in the second-level page table.. */
+static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
+{
+ return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
+}
+
+static inline int pud_large(pud_t pud)
+{
+ return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
+ (_PAGE_PSE | _PAGE_PRESENT);
+}
+
+static inline int pud_bad(pud_t pud)
+{
+ return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
+}
+#else
+static inline int pud_large(pud_t pud)
+{
+ return 0;
+}
+#endif /* CONFIG_PGTABLE_LEVELS > 2 */
+
+#if CONFIG_PGTABLE_LEVELS > 3
+static inline int pgd_present(pgd_t pgd)
+{
+ return pgd_flags(pgd) & _PAGE_PRESENT;
+}
+
+static inline unsigned long pgd_page_vaddr(pgd_t pgd)
+{
+ return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
+}
+
+/*
+ * Currently stuck as a macro due to indirect forward reference to
+ * linux/mmzone.h's __section_mem_map_addr() definition:
+ */
+#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
+
+/* to find an entry in a page-table-directory. */
+static inline unsigned long pud_index(unsigned long address)
+{
+ return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
+}
+
+static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
+{
+ return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
+}
+
+static inline int pgd_bad(pgd_t pgd)
+{
+ return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
+}
+
+static inline int pgd_none(pgd_t pgd)
+{
+ return !native_pgd_val(pgd);
+}
+#endif /* CONFIG_PGTABLE_LEVELS > 3 */
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
+ *
+ * this macro returns the index of the entry in the pgd page which would
+ * control the given virtual address
+ */
+#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
+
+/*
+ * pgd_offset() returns a (pgd_t *)
+ * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
+ */
+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
+/*
+ * a shortcut which implies the use of the kernel's pgd, instead
+ * of a process's
+ */
+#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
+
+
+#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
+#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
+
+#ifndef __ASSEMBLY__
+
+extern int direct_gbpages;
+void init_mem_mapping(void);
+void early_alloc_pgt_buf(void);
+
+/* local pte updates need not use xchg for locking */
+static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
+{
+ pte_t res = *ptep;
+
+ /* Pure native function needs no input for mm, addr */
+ native_pte_clear(NULL, 0, ptep);
+ return res;
+}
+
+static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
+{
+ pmd_t res = *pmdp;
+
+ native_pmd_clear(pmdp);
+ return res;
+}
+
+static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep , pte_t pte)
+{
+ native_set_pte(ptep, pte);
+}
+
+static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp , pmd_t pmd)
+{
+ native_set_pmd(pmdp, pmd);
+}
+
+#ifndef CONFIG_PARAVIRT
+/*
+ * Rules for using pte_update - it must be called after any PTE update which
+ * has not been done using the set_pte / clear_pte interfaces. It is used by
+ * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
+ * updates should either be sets, clears, or set_pte_atomic for P->P
+ * transitions, which means this hook should only be called for user PTEs.
+ * This hook implies a P->P protection or access change has taken place, which
+ * requires a subsequent TLB flush. The notification can optionally be delayed
+ * until the TLB flush event by using the pte_update_defer form of the
+ * interface, but care must be taken to assure that the flush happens while
+ * still holding the same page table lock so that the shadow and primary pages
+ * do not become out of sync on SMP.
+ */
+#define pte_update(mm, addr, ptep) do { } while (0)
+#define pte_update_defer(mm, addr, ptep) do { } while (0)
+#endif
+
+/*
+ * We only update the dirty/accessed state if we set
+ * the dirty bit by hand in the kernel, since the hardware
+ * will do the accessed bit for us, and we don't want to
+ * race with other CPU's that might be updating the dirty
+ * bit at the same time.
+ */
+struct vm_area_struct;
+
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+extern int ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep,
+ pte_t entry, int dirty);
+
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep);
+
+#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+extern int ptep_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep);
+
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ pte_t pte = native_ptep_get_and_clear(ptep);
+ pte_update(mm, addr, ptep);
+ return pte;
+}
+
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
+static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep,
+ int full)
+{
+ pte_t pte;
+ if (full) {
+ /*
+ * Full address destruction in progress; paravirt does not
+ * care about updates and native needs no locking
+ */
+ pte = native_local_ptep_get_and_clear(ptep);
+ } else {
+ pte = ptep_get_and_clear(mm, addr, ptep);
+ }
+ return pte;
+}
+
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+static inline void ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
+ pte_update(mm, addr, ptep);
+}
+
+#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
+
+#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
+
+#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
+extern int pmdp_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp,
+ pmd_t entry, int dirty);
+
+#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
+extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmdp);
+
+#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
+extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
+
+
+#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
+extern void pmdp_splitting_flush(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmdp);
+
+#define __HAVE_ARCH_PMD_WRITE
+static inline int pmd_write(pmd_t pmd)
+{
+ return pmd_flags(pmd) & _PAGE_RW;
+}
+
+#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
+static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp)
+{
+ pmd_t pmd = native_pmdp_get_and_clear(pmdp);
+ pmd_update(mm, addr, pmdp);
+ return pmd;
+}
+
+#define __HAVE_ARCH_PMDP_SET_WRPROTECT
+static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pmd_t *pmdp)
+{
+ clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
+ pmd_update(mm, addr, pmdp);
+}
+
+/*
+ * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
+ *
+ * dst - pointer to pgd range anwhere on a pgd page
+ * src - ""
+ * count - the number of pgds to copy.
+ *
+ * dst and src can be on the same page, but the range must not overlap,
+ * and must not cross a page boundary.
+ */
+static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
+{
+ memcpy(dst, src, count * sizeof(pgd_t));
+}
+
+#define PTE_SHIFT ilog2(PTRS_PER_PTE)
+static inline int page_level_shift(enum pg_level level)
+{
+ return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
+}
+static inline unsigned long page_level_size(enum pg_level level)
+{
+ return 1UL << page_level_shift(level);
+}
+static inline unsigned long page_level_mask(enum pg_level level)
+{
+ return ~(page_level_size(level) - 1);
+}
+
+/*
+ * The x86 doesn't have any external MMU info: the kernel page
+ * tables contain all the necessary information.
+ */
+static inline void update_mmu_cache(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+}
+static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmd)
+{
+}
+
+#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
+static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
+{
+ return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
+}
+
+static inline int pte_swp_soft_dirty(pte_t pte)
+{
+ return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
+}
+
+static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
+{
+ return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
+}
+#endif
+
+#include <asm-generic/pgtable.h>
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_X86_PGTABLE_H */
diff --git a/kernel/arch/x86/include/asm/pgtable_32.h b/kernel/arch/x86/include/asm/pgtable_32.h
new file mode 100644
index 000000000..b6c0b4048
--- /dev/null
+++ b/kernel/arch/x86/include/asm/pgtable_32.h
@@ -0,0 +1,78 @@
+#ifndef _ASM_X86_PGTABLE_32_H
+#define _ASM_X86_PGTABLE_32_H
+
+#include <asm/pgtable_32_types.h>
+
+/*
+ * The Linux memory management assumes a three-level page table setup. On
+ * the i386, we use that, but "fold" the mid level into the top-level page
+ * table, so that we physically have the same two-level page table as the
+ * i386 mmu expects.
+ *
+ * This file contains the functions and defines necessary to modify and use
+ * the i386 page table tree.
+ */
+#ifndef __ASSEMBLY__
+#include <asm/processor.h>
+#include <asm/fixmap.h>
+#include <linux/threads.h>
+#include <asm/paravirt.h>
+
+#include <linux/bitops.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+struct mm_struct;
+struct vm_area_struct;
+
+extern pgd_t swapper_pg_dir[1024];
+extern pgd_t initial_page_table[1024];
+
+static inline void pgtable_cache_init(void) { }
+static inline void check_pgt_cache(void) { }
+void paging_init(void);
+
+/*
+ * Define this if things work differently on an i386 and an i486:
+ * it will (on an i486) warn about kernel memory accesses that are
+ * done without a 'access_ok(VERIFY_WRITE,..)'
+ */
+#undef TEST_ACCESS_OK
+
+#ifdef CONFIG_X86_PAE
+# include <asm/pgtable-3level.h>
+#else
+# include <asm/pgtable-2level.h>
+#endif
+
+#if defined(CONFIG_HIGHPTE)
+#define pte_offset_map(dir, address) \
+ ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
+ pte_index((address)))
+#define pte_unmap(pte) kunmap_atomic((pte))
+#else
+#define pte_offset_map(dir, address) \
+ ((pte_t *)page_address(pmd_page(*(dir))) + pte_index((address)))
+#define pte_unmap(pte) do { } while (0)
+#endif
+
+/* Clear a kernel PTE and flush it from the TLB */
+#define kpte_clear_flush(ptep, vaddr) \
+do { \
+ pte_clear(&init_mm, (vaddr), (ptep)); \
+ __flush_tlb_one((vaddr)); \
+} while (0)
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * kern_addr_valid() is (1) for FLATMEM and (0) for
+ * SPARSEMEM and DISCONTIGMEM
+ */
+#ifdef CONFIG_FLATMEM
+#define kern_addr_valid(addr) (1)
+#else
+#define kern_addr_valid(kaddr) (0)
+#endif
+
+#endif /* _ASM_X86_PGTABLE_32_H */
diff --git a/kernel/arch/x86/include/asm/pgtable_32_types.h b/kernel/arch/x86/include/asm/pgtable_32_types.h
new file mode 100644
index 000000000..9fb2f2bc8
--- /dev/null
+++ b/kernel/arch/x86/include/asm/pgtable_32_types.h
@@ -0,0 +1,55 @@
+#ifndef _ASM_X86_PGTABLE_32_DEFS_H
+#define _ASM_X86_PGTABLE_32_DEFS_H
+
+/*
+ * The Linux x86 paging architecture is 'compile-time dual-mode', it
+ * implements both the traditional 2-level x86 page tables and the
+ * newer 3-level PAE-mode page tables.
+ */
+#ifdef CONFIG_X86_PAE
+# include <asm/pgtable-3level_types.h>
+# define PMD_SIZE (1UL << PMD_SHIFT)
+# define PMD_MASK (~(PMD_SIZE - 1))
+#else
+# include <asm/pgtable-2level_types.h>
+#endif
+
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE - 1))
+
+/* Just any arbitrary offset to the start of the vmalloc VM area: the
+ * current 8MB value just means that there will be a 8MB "hole" after the
+ * physical memory until the kernel virtual memory starts. That means that
+ * any out-of-bounds memory accesses will hopefully be caught.
+ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
+ * area for the same reason. ;)
+ */
+#define VMALLOC_OFFSET (8 * 1024 * 1024)
+
+#ifndef __ASSEMBLY__
+extern bool __vmalloc_start_set; /* set once high_memory is set */
+#endif
+
+#define VMALLOC_START ((unsigned long)high_memory + VMALLOC_OFFSET)
+#ifdef CONFIG_X86_PAE
+#define LAST_PKMAP 512
+#else
+#define LAST_PKMAP 1024
+#endif
+
+#define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE * (LAST_PKMAP + 1)) \
+ & PMD_MASK)
+
+#ifdef CONFIG_HIGHMEM
+# define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE)
+#else
+# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
+#endif
+
+#define MODULES_VADDR VMALLOC_START
+#define MODULES_END VMALLOC_END
+#define MODULES_LEN (MODULES_VADDR - MODULES_END)
+
+#define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE)
+
+#endif /* _ASM_X86_PGTABLE_32_DEFS_H */
diff --git a/kernel/arch/x86/include/asm/pgtable_64.h b/kernel/arch/x86/include/asm/pgtable_64.h
new file mode 100644
index 000000000..2ee781114
--- /dev/null
+++ b/kernel/arch/x86/include/asm/pgtable_64.h
@@ -0,0 +1,183 @@
+#ifndef _ASM_X86_PGTABLE_64_H
+#define _ASM_X86_PGTABLE_64_H
+
+#include <linux/const.h>
+#include <asm/pgtable_64_types.h>
+
+#ifndef __ASSEMBLY__
+
+/*
+ * This file contains the functions and defines necessary to modify and use
+ * the x86-64 page table tree.
+ */
+#include <asm/processor.h>
+#include <linux/bitops.h>
+#include <linux/threads.h>
+
+extern pud_t level3_kernel_pgt[512];
+extern pud_t level3_ident_pgt[512];
+extern pmd_t level2_kernel_pgt[512];
+extern pmd_t level2_fixmap_pgt[512];
+extern pmd_t level2_ident_pgt[512];
+extern pte_t level1_fixmap_pgt[512];
+extern pgd_t init_level4_pgt[];
+
+#define swapper_pg_dir init_level4_pgt
+
+extern void paging_init(void);
+
+#define pte_ERROR(e) \
+ pr_err("%s:%d: bad pte %p(%016lx)\n", \
+ __FILE__, __LINE__, &(e), pte_val(e))
+#define pmd_ERROR(e) \
+ pr_err("%s:%d: bad pmd %p(%016lx)\n", \
+ __FILE__, __LINE__, &(e), pmd_val(e))
+#define pud_ERROR(e) \
+ pr_err("%s:%d: bad pud %p(%016lx)\n", \
+ __FILE__, __LINE__, &(e), pud_val(e))
+#define pgd_ERROR(e) \
+ pr_err("%s:%d: bad pgd %p(%016lx)\n", \
+ __FILE__, __LINE__, &(e), pgd_val(e))
+
+struct mm_struct;
+
+void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
+
+
+static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ *ptep = native_make_pte(0);
+}
+
+static inline void native_set_pte(pte_t *ptep, pte_t pte)
+{
+ *ptep = pte;
+}
+
+static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
+{
+ native_set_pte(ptep, pte);
+}
+
+static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
+{
+ *pmdp = pmd;
+}
+
+static inline void native_pmd_clear(pmd_t *pmd)
+{
+ native_set_pmd(pmd, native_make_pmd(0));
+}
+
+static inline pte_t native_ptep_get_and_clear(pte_t *xp)
+{
+#ifdef CONFIG_SMP
+ return native_make_pte(xchg(&xp->pte, 0));
+#else
+ /* native_local_ptep_get_and_clear,
+ but duplicated because of cyclic dependency */
+ pte_t ret = *xp;
+ native_pte_clear(NULL, 0, xp);
+ return ret;
+#endif
+}
+
+static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
+{
+#ifdef CONFIG_SMP
+ return native_make_pmd(xchg(&xp->pmd, 0));
+#else
+ /* native_local_pmdp_get_and_clear,
+ but duplicated because of cyclic dependency */
+ pmd_t ret = *xp;
+ native_pmd_clear(xp);
+ return ret;
+#endif
+}
+
+static inline void native_set_pud(pud_t *pudp, pud_t pud)
+{
+ *pudp = pud;
+}
+
+static inline void native_pud_clear(pud_t *pud)
+{
+ native_set_pud(pud, native_make_pud(0));
+}
+
+static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
+{
+ *pgdp = pgd;
+}
+
+static inline void native_pgd_clear(pgd_t *pgd)
+{
+ native_set_pgd(pgd, native_make_pgd(0));
+}
+
+extern void sync_global_pgds(unsigned long start, unsigned long end,
+ int removed);
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+
+/*
+ * Level 4 access.
+ */
+static inline int pgd_large(pgd_t pgd) { return 0; }
+#define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
+
+/* PUD - Level3 access */
+
+/* PMD - Level 2 access */
+
+/* PTE - Level 1 access. */
+
+/* x86-64 always has all page tables mapped. */
+#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
+#define pte_unmap(pte) ((void)(pte))/* NOP */
+
+/* Encode and de-code a swap entry */
+#define SWP_TYPE_BITS 5
+#define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 1)
+
+#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
+
+#define __swp_type(x) (((x).val >> (_PAGE_BIT_PRESENT + 1)) \
+ & ((1U << SWP_TYPE_BITS) - 1))
+#define __swp_offset(x) ((x).val >> SWP_OFFSET_SHIFT)
+#define __swp_entry(type, offset) ((swp_entry_t) { \
+ ((type) << (_PAGE_BIT_PRESENT + 1)) \
+ | ((offset) << SWP_OFFSET_SHIFT) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
+#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
+
+extern int kern_addr_valid(unsigned long addr);
+extern void cleanup_highmap(void);
+
+#define HAVE_ARCH_UNMAPPED_AREA
+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+
+#define pgtable_cache_init() do { } while (0)
+#define check_pgt_cache() do { } while (0)
+
+#define PAGE_AGP PAGE_KERNEL_NOCACHE
+#define HAVE_PAGE_AGP 1
+
+/* fs/proc/kcore.c */
+#define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
+#define kc_offset_to_vaddr(o) ((o) | ~__VIRTUAL_MASK)
+
+#define __HAVE_ARCH_PTE_SAME
+
+#define vmemmap ((struct page *)VMEMMAP_START)
+
+extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
+extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_X86_PGTABLE_64_H */
diff --git a/kernel/arch/x86/include/asm/pgtable_64_types.h b/kernel/arch/x86/include/asm/pgtable_64_types.h
new file mode 100644
index 000000000..e6844dfb4
--- /dev/null
+++ b/kernel/arch/x86/include/asm/pgtable_64_types.h
@@ -0,0 +1,70 @@
+#ifndef _ASM_X86_PGTABLE_64_DEFS_H
+#define _ASM_X86_PGTABLE_64_DEFS_H
+
+#include <asm/sparsemem.h>
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef unsigned long pteval_t;
+typedef unsigned long pmdval_t;
+typedef unsigned long pudval_t;
+typedef unsigned long pgdval_t;
+typedef unsigned long pgprotval_t;
+
+typedef struct { pteval_t pte; } pte_t;
+
+#endif /* !__ASSEMBLY__ */
+
+#define SHARED_KERNEL_PMD 0
+
+/*
+ * PGDIR_SHIFT determines what a top-level page table entry can map
+ */
+#define PGDIR_SHIFT 39
+#define PTRS_PER_PGD 512
+
+/*
+ * 3rd level page
+ */
+#define PUD_SHIFT 30
+#define PTRS_PER_PUD 512
+
+/*
+ * PMD_SHIFT determines the size of the area a middle-level
+ * page table can map
+ */
+#define PMD_SHIFT 21
+#define PTRS_PER_PMD 512
+
+/*
+ * entries per page directory level
+ */
+#define PTRS_PER_PTE 512
+
+#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE - 1))
+#define PUD_SIZE (_AC(1, UL) << PUD_SHIFT)
+#define PUD_MASK (~(PUD_SIZE - 1))
+#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE - 1))
+
+/* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */
+#define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
+#define VMALLOC_START _AC(0xffffc90000000000, UL)
+#define VMALLOC_END _AC(0xffffe8ffffffffff, UL)
+#define VMEMMAP_START _AC(0xffffea0000000000, UL)
+#define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
+#define MODULES_END _AC(0xffffffffff000000, UL)
+#define MODULES_LEN (MODULES_END - MODULES_VADDR)
+#define ESPFIX_PGD_ENTRY _AC(-2, UL)
+#define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << PGDIR_SHIFT)
+#define EFI_VA_START ( -4 * (_AC(1, UL) << 30))
+#define EFI_VA_END (-68 * (_AC(1, UL) << 30))
+
+#define EARLY_DYNAMIC_PAGE_TABLES 64
+
+#endif /* _ASM_X86_PGTABLE_64_DEFS_H */
diff --git a/kernel/arch/x86/include/asm/pgtable_types.h b/kernel/arch/x86/include/asm/pgtable_types.h
new file mode 100644
index 000000000..78f0c8cbe
--- /dev/null
+++ b/kernel/arch/x86/include/asm/pgtable_types.h
@@ -0,0 +1,423 @@
+#ifndef _ASM_X86_PGTABLE_DEFS_H
+#define _ASM_X86_PGTABLE_DEFS_H
+
+#include <linux/const.h>
+#include <asm/page_types.h>
+
+#define FIRST_USER_ADDRESS 0UL
+
+#define _PAGE_BIT_PRESENT 0 /* is present */
+#define _PAGE_BIT_RW 1 /* writeable */
+#define _PAGE_BIT_USER 2 /* userspace addressable */
+#define _PAGE_BIT_PWT 3 /* page write through */
+#define _PAGE_BIT_PCD 4 /* page cache disabled */
+#define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */
+#define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */
+#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
+#define _PAGE_BIT_PAT 7 /* on 4KB pages */
+#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
+#define _PAGE_BIT_SOFTW1 9 /* available for programmer */
+#define _PAGE_BIT_SOFTW2 10 /* " */
+#define _PAGE_BIT_SOFTW3 11 /* " */
+#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
+#define _PAGE_BIT_SPECIAL _PAGE_BIT_SOFTW1
+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SOFTW1
+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SOFTW2 /* only valid on a PSE pmd */
+#define _PAGE_BIT_HIDDEN _PAGE_BIT_SOFTW3 /* hidden by kmemcheck */
+#define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */
+#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
+
+/* If _PAGE_BIT_PRESENT is clear, we use these: */
+/* - if the user mapped it with PROT_NONE; pte_present gives true */
+#define _PAGE_BIT_PROTNONE _PAGE_BIT_GLOBAL
+
+#define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
+#define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW)
+#define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER)
+#define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT)
+#define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD)
+#define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
+#define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
+#define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
+#define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
+#define _PAGE_SOFTW1 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW1)
+#define _PAGE_SOFTW2 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW2)
+#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
+#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
+#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
+#define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
+#define _PAGE_SPLITTING (_AT(pteval_t, 1) << _PAGE_BIT_SPLITTING)
+#define __HAVE_ARCH_PTE_SPECIAL
+
+#ifdef CONFIG_KMEMCHECK
+#define _PAGE_HIDDEN (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
+#else
+#define _PAGE_HIDDEN (_AT(pteval_t, 0))
+#endif
+
+/*
+ * The same hidden bit is used by kmemcheck, but since kmemcheck
+ * works on kernel pages while soft-dirty engine on user space,
+ * they do not conflict with each other.
+ */
+
+#ifdef CONFIG_MEM_SOFT_DIRTY
+#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY)
+#else
+#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0))
+#endif
+
+/*
+ * Tracking soft dirty bit when a page goes to a swap is tricky.
+ * We need a bit which can be stored in pte _and_ not conflict
+ * with swap entry format. On x86 bits 6 and 7 are *not* involved
+ * into swap entry computation, but bit 6 is used for nonlinear
+ * file mapping, so we borrow bit 7 for soft dirty tracking.
+ *
+ * Please note that this bit must be treated as swap dirty page
+ * mark if and only if the PTE has present bit clear!
+ */
+#ifdef CONFIG_MEM_SOFT_DIRTY
+#define _PAGE_SWP_SOFT_DIRTY _PAGE_PSE
+#else
+#define _PAGE_SWP_SOFT_DIRTY (_AT(pteval_t, 0))
+#endif
+
+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
+#else
+#define _PAGE_NX (_AT(pteval_t, 0))
+#endif
+
+#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
+
+#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
+ _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
+ _PAGE_DIRTY)
+
+/* Set of bits not changed in pte_modify */
+#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
+ _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
+ _PAGE_SOFT_DIRTY)
+#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
+
+/*
+ * The cache modes defined here are used to translate between pure SW usage
+ * and the HW defined cache mode bits and/or PAT entries.
+ *
+ * The resulting bits for PWT, PCD and PAT should be chosen in a way
+ * to have the WB mode at index 0 (all bits clear). This is the default
+ * right now and likely would break too much if changed.
+ */
+#ifndef __ASSEMBLY__
+enum page_cache_mode {
+ _PAGE_CACHE_MODE_WB = 0,
+ _PAGE_CACHE_MODE_WC = 1,
+ _PAGE_CACHE_MODE_UC_MINUS = 2,
+ _PAGE_CACHE_MODE_UC = 3,
+ _PAGE_CACHE_MODE_WT = 4,
+ _PAGE_CACHE_MODE_WP = 5,
+ _PAGE_CACHE_MODE_NUM = 8
+};
+#endif
+
+#define _PAGE_CACHE_MASK (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)
+#define _PAGE_NOCACHE (cachemode2protval(_PAGE_CACHE_MODE_UC))
+
+#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
+#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
+ _PAGE_ACCESSED | _PAGE_NX)
+
+#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \
+ _PAGE_USER | _PAGE_ACCESSED)
+#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
+ _PAGE_ACCESSED | _PAGE_NX)
+#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
+ _PAGE_ACCESSED)
+#define PAGE_COPY PAGE_COPY_NOEXEC
+#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \
+ _PAGE_ACCESSED | _PAGE_NX)
+#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
+ _PAGE_ACCESSED)
+
+#define __PAGE_KERNEL_EXEC \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
+#define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
+
+#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
+#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
+#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_NOCACHE)
+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
+#define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
+#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
+#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
+
+#define __PAGE_KERNEL_IO (__PAGE_KERNEL)
+#define __PAGE_KERNEL_IO_NOCACHE (__PAGE_KERNEL_NOCACHE)
+
+#define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
+#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
+#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
+#define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX)
+#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
+#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
+#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
+#define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL)
+#define PAGE_KERNEL_VVAR __pgprot(__PAGE_KERNEL_VVAR)
+
+#define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO)
+#define PAGE_KERNEL_IO_NOCACHE __pgprot(__PAGE_KERNEL_IO_NOCACHE)
+
+/* xwr */
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READONLY
+#define __P010 PAGE_COPY
+#define __P011 PAGE_COPY
+#define __P100 PAGE_READONLY_EXEC
+#define __P101 PAGE_READONLY_EXEC
+#define __P110 PAGE_COPY_EXEC
+#define __P111 PAGE_COPY_EXEC
+
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READONLY
+#define __S010 PAGE_SHARED
+#define __S011 PAGE_SHARED
+#define __S100 PAGE_READONLY_EXEC
+#define __S101 PAGE_READONLY_EXEC
+#define __S110 PAGE_SHARED_EXEC
+#define __S111 PAGE_SHARED_EXEC
+
+/*
+ * early identity mapping pte attrib macros.
+ */
+#ifdef CONFIG_X86_64
+#define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
+#else
+#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
+#define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
+#endif
+
+#ifdef CONFIG_X86_32
+# include <asm/pgtable_32_types.h>
+#else
+# include <asm/pgtable_64_types.h>
+#endif
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+/* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */
+#define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK)
+
+/* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */
+#define PTE_FLAGS_MASK (~PTE_PFN_MASK)
+
+typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
+
+typedef struct { pgdval_t pgd; } pgd_t;
+
+static inline pgd_t native_make_pgd(pgdval_t val)
+{
+ return (pgd_t) { val };
+}
+
+static inline pgdval_t native_pgd_val(pgd_t pgd)
+{
+ return pgd.pgd;
+}
+
+static inline pgdval_t pgd_flags(pgd_t pgd)
+{
+ return native_pgd_val(pgd) & PTE_FLAGS_MASK;
+}
+
+#if CONFIG_PGTABLE_LEVELS > 3
+typedef struct { pudval_t pud; } pud_t;
+
+static inline pud_t native_make_pud(pmdval_t val)
+{
+ return (pud_t) { val };
+}
+
+static inline pudval_t native_pud_val(pud_t pud)
+{
+ return pud.pud;
+}
+#else
+#include <asm-generic/pgtable-nopud.h>
+
+static inline pudval_t native_pud_val(pud_t pud)
+{
+ return native_pgd_val(pud.pgd);
+}
+#endif
+
+#if CONFIG_PGTABLE_LEVELS > 2
+typedef struct { pmdval_t pmd; } pmd_t;
+
+static inline pmd_t native_make_pmd(pmdval_t val)
+{
+ return (pmd_t) { val };
+}
+
+static inline pmdval_t native_pmd_val(pmd_t pmd)
+{
+ return pmd.pmd;
+}
+#else
+#include <asm-generic/pgtable-nopmd.h>
+
+static inline pmdval_t native_pmd_val(pmd_t pmd)
+{
+ return native_pgd_val(pmd.pud.pgd);
+}
+#endif
+
+static inline pudval_t pud_flags(pud_t pud)
+{
+ return native_pud_val(pud) & PTE_FLAGS_MASK;
+}
+
+static inline pmdval_t pmd_flags(pmd_t pmd)
+{
+ return native_pmd_val(pmd) & PTE_FLAGS_MASK;
+}
+
+static inline pte_t native_make_pte(pteval_t val)
+{
+ return (pte_t) { .pte = val };
+}
+
+static inline pteval_t native_pte_val(pte_t pte)
+{
+ return pte.pte;
+}
+
+static inline pteval_t pte_flags(pte_t pte)
+{
+ return native_pte_val(pte) & PTE_FLAGS_MASK;
+}
+
+#define pgprot_val(x) ((x).pgprot)
+#define __pgprot(x) ((pgprot_t) { (x) } )
+
+extern uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM];
+extern uint8_t __pte2cachemode_tbl[8];
+
+#define __pte2cm_idx(cb) \
+ ((((cb) >> (_PAGE_BIT_PAT - 2)) & 4) | \
+ (((cb) >> (_PAGE_BIT_PCD - 1)) & 2) | \
+ (((cb) >> _PAGE_BIT_PWT) & 1))
+#define __cm_idx2pte(i) \
+ ((((i) & 4) << (_PAGE_BIT_PAT - 2)) | \
+ (((i) & 2) << (_PAGE_BIT_PCD - 1)) | \
+ (((i) & 1) << _PAGE_BIT_PWT))
+
+static inline unsigned long cachemode2protval(enum page_cache_mode pcm)
+{
+ if (likely(pcm == 0))
+ return 0;
+ return __cachemode2pte_tbl[pcm];
+}
+static inline pgprot_t cachemode2pgprot(enum page_cache_mode pcm)
+{
+ return __pgprot(cachemode2protval(pcm));
+}
+static inline enum page_cache_mode pgprot2cachemode(pgprot_t pgprot)
+{
+ unsigned long masked;
+
+ masked = pgprot_val(pgprot) & _PAGE_CACHE_MASK;
+ if (likely(masked == 0))
+ return 0;
+ return __pte2cachemode_tbl[__pte2cm_idx(masked)];
+}
+static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot)
+{
+ pgprot_t new;
+ unsigned long val;
+
+ val = pgprot_val(pgprot);
+ pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
+ ((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
+ return new;
+}
+static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot)
+{
+ pgprot_t new;
+ unsigned long val;
+
+ val = pgprot_val(pgprot);
+ pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
+ ((val & _PAGE_PAT_LARGE) >>
+ (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
+ return new;
+}
+
+
+typedef struct page *pgtable_t;
+
+extern pteval_t __supported_pte_mask;
+extern void set_nx(void);
+extern int nx_enabled;
+
+#define pgprot_writecombine pgprot_writecombine
+extern pgprot_t pgprot_writecombine(pgprot_t prot);
+
+/* Indicate that x86 has its own track and untrack pfn vma functions */
+#define __HAVE_PFNMAP_TRACKING
+
+#define __HAVE_PHYS_MEM_ACCESS_PROT
+struct file;
+pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t vma_prot);
+int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t *vma_prot);
+
+/* Install a pte for a particular vaddr in kernel space. */
+void set_pte_vaddr(unsigned long vaddr, pte_t pte);
+
+#ifdef CONFIG_X86_32
+extern void native_pagetable_init(void);
+#else
+#define native_pagetable_init paging_init
+#endif
+
+struct seq_file;
+extern void arch_report_meminfo(struct seq_file *m);
+
+enum pg_level {
+ PG_LEVEL_NONE,
+ PG_LEVEL_4K,
+ PG_LEVEL_2M,
+ PG_LEVEL_1G,
+ PG_LEVEL_NUM
+};
+
+#ifdef CONFIG_PROC_FS
+extern void update_page_count(int level, unsigned long pages);
+#else
+static inline void update_page_count(int level, unsigned long pages) { }
+#endif
+
+/*
+ * Helper function that returns the kernel pagetable entry controlling
+ * the virtual address 'address'. NULL means no pagetable entry present.
+ * NOTE: the return type is pte_t but if the pmd is PSE then we return it
+ * as a pte too.
+ */
+extern pte_t *lookup_address(unsigned long address, unsigned int *level);
+extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
+ unsigned int *level);
+extern pmd_t *lookup_pmd_address(unsigned long address);
+extern phys_addr_t slow_virt_to_phys(void *__address);
+extern int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
+ unsigned numpages, unsigned long page_flags);
+void kernel_unmap_pages_in_pgd(pgd_t *root, unsigned long address,
+ unsigned numpages);
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_X86_PGTABLE_DEFS_H */
diff --git a/kernel/arch/x86/include/asm/platform_sst_audio.h b/kernel/arch/x86/include/asm/platform_sst_audio.h
new file mode 100644
index 000000000..7249e6d09
--- /dev/null
+++ b/kernel/arch/x86/include/asm/platform_sst_audio.h
@@ -0,0 +1,140 @@
+/*
+ * platform_sst_audio.h: sst audio platform data header file
+ *
+ * Copyright (C) 2012-14 Intel Corporation
+ * Author: Jeeja KP <jeeja.kp@intel.com>
+ * Omair Mohammed Abdullah <omair.m.abdullah@intel.com>
+ * Vinod Koul ,vinod.koul@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _PLATFORM_SST_AUDIO_H_
+#define _PLATFORM_SST_AUDIO_H_
+
+#include <linux/sfi.h>
+
+#define MAX_NUM_STREAMS_MRFLD 25
+#define MAX_NUM_STREAMS MAX_NUM_STREAMS_MRFLD
+
+enum sst_audio_task_id_mrfld {
+ SST_TASK_ID_NONE = 0,
+ SST_TASK_ID_SBA = 1,
+ SST_TASK_ID_MEDIA = 3,
+ SST_TASK_ID_MAX = SST_TASK_ID_MEDIA,
+};
+
+/* Device IDs for Merrifield are Pipe IDs,
+ * ref: DSP spec v0.75 */
+enum sst_audio_device_id_mrfld {
+ /* Output pipeline IDs */
+ PIPE_ID_OUT_START = 0x0,
+ PIPE_CODEC_OUT0 = 0x2,
+ PIPE_CODEC_OUT1 = 0x3,
+ PIPE_SPROT_LOOP_OUT = 0x4,
+ PIPE_MEDIA_LOOP1_OUT = 0x5,
+ PIPE_MEDIA_LOOP2_OUT = 0x6,
+ PIPE_VOIP_OUT = 0xC,
+ PIPE_PCM0_OUT = 0xD,
+ PIPE_PCM1_OUT = 0xE,
+ PIPE_PCM2_OUT = 0xF,
+ PIPE_MEDIA0_OUT = 0x12,
+ PIPE_MEDIA1_OUT = 0x13,
+/* Input Pipeline IDs */
+ PIPE_ID_IN_START = 0x80,
+ PIPE_CODEC_IN0 = 0x82,
+ PIPE_CODEC_IN1 = 0x83,
+ PIPE_SPROT_LOOP_IN = 0x84,
+ PIPE_MEDIA_LOOP1_IN = 0x85,
+ PIPE_MEDIA_LOOP2_IN = 0x86,
+ PIPE_VOIP_IN = 0x8C,
+ PIPE_PCM0_IN = 0x8D,
+ PIPE_PCM1_IN = 0x8E,
+ PIPE_MEDIA0_IN = 0x8F,
+ PIPE_MEDIA1_IN = 0x90,
+ PIPE_MEDIA2_IN = 0x91,
+ PIPE_RSVD = 0xFF,
+};
+
+/* The stream map for each platform consists of an array of the below
+ * stream map structure.
+ */
+struct sst_dev_stream_map {
+ u8 dev_num; /* device id */
+ u8 subdev_num; /* substream */
+ u8 direction;
+ u8 device_id; /* fw id */
+ u8 task_id; /* fw task */
+ u8 status;
+};
+
+struct sst_platform_data {
+ /* Intel software platform id*/
+ struct sst_dev_stream_map *pdev_strm_map;
+ unsigned int strm_map_size;
+};
+
+struct sst_info {
+ u32 iram_start;
+ u32 iram_end;
+ bool iram_use;
+ u32 dram_start;
+ u32 dram_end;
+ bool dram_use;
+ u32 imr_start;
+ u32 imr_end;
+ bool imr_use;
+ u32 mailbox_start;
+ bool use_elf;
+ bool lpe_viewpt_rqd;
+ unsigned int max_streams;
+ u32 dma_max_len;
+ u8 num_probes;
+};
+
+struct sst_lib_dnld_info {
+ unsigned int mod_base;
+ unsigned int mod_end;
+ unsigned int mod_table_offset;
+ unsigned int mod_table_size;
+ bool mod_ddr_dnld;
+};
+
+struct sst_res_info {
+ unsigned int shim_offset;
+ unsigned int shim_size;
+ unsigned int shim_phy_addr;
+ unsigned int ssp0_offset;
+ unsigned int ssp0_size;
+ unsigned int dma0_offset;
+ unsigned int dma0_size;
+ unsigned int dma1_offset;
+ unsigned int dma1_size;
+ unsigned int iram_offset;
+ unsigned int iram_size;
+ unsigned int dram_offset;
+ unsigned int dram_size;
+ unsigned int mbox_offset;
+ unsigned int mbox_size;
+ unsigned int acpi_lpe_res_index;
+ unsigned int acpi_ddr_index;
+ unsigned int acpi_ipc_irq_index;
+};
+
+struct sst_ipc_info {
+ int ipc_offset;
+ unsigned int mbox_recv_off;
+};
+
+struct sst_platform_info {
+ const struct sst_info *probe_data;
+ const struct sst_ipc_info *ipc_info;
+ const struct sst_res_info *res_info;
+ const struct sst_lib_dnld_info *lib_info;
+ const char *platform;
+};
+int add_sst_platform_device(void);
+#endif
+
diff --git a/kernel/arch/x86/include/asm/pm-trace.h b/kernel/arch/x86/include/asm/pm-trace.h
new file mode 100644
index 000000000..7b7ac42c3
--- /dev/null
+++ b/kernel/arch/x86/include/asm/pm-trace.h
@@ -0,0 +1,23 @@
+#ifndef _ASM_X86_PM_TRACE_H
+#define _ASM_X86_PM_TRACE_H
+
+#include <asm/asm.h>
+
+#define TRACE_RESUME(user) \
+do { \
+ if (pm_trace_enabled) { \
+ const void *tracedata; \
+ asm volatile(_ASM_MOV " $1f,%0\n" \
+ ".section .tracedata,\"a\"\n" \
+ "1:\t.word %c1\n\t" \
+ _ASM_PTR " %c2\n" \
+ ".previous" \
+ :"=r" (tracedata) \
+ : "i" (__LINE__), "i" (__FILE__)); \
+ generate_pm_trace(tracedata, user); \
+ } \
+} while (0)
+
+#define TRACE_SUSPEND(user) TRACE_RESUME(user)
+
+#endif /* _ASM_X86_PM_TRACE_H */
diff --git a/kernel/arch/x86/include/asm/pmc_atom.h b/kernel/arch/x86/include/asm/pmc_atom.h
new file mode 100644
index 000000000..bc0fc0866
--- /dev/null
+++ b/kernel/arch/x86/include/asm/pmc_atom.h
@@ -0,0 +1,129 @@
+/*
+ * Intel Atom SOC Power Management Controller Header File
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef PMC_ATOM_H
+#define PMC_ATOM_H
+
+/* ValleyView Power Control Unit PCI Device ID */
+#define PCI_DEVICE_ID_VLV_PMC 0x0F1C
+
+/* PMC Memory mapped IO registers */
+#define PMC_BASE_ADDR_OFFSET 0x44
+#define PMC_BASE_ADDR_MASK 0xFFFFFE00
+#define PMC_MMIO_REG_LEN 0x100
+#define PMC_REG_BIT_WIDTH 32
+
+/* BIOS uses FUNC_DIS to disable specific function */
+#define PMC_FUNC_DIS 0x34
+#define PMC_FUNC_DIS_2 0x38
+
+/* S0ix wake event control */
+#define PMC_S0IX_WAKE_EN 0x3C
+
+#define BIT_LPC_CLOCK_RUN BIT(4)
+#define BIT_SHARED_IRQ_GPSC BIT(5)
+#define BIT_ORED_DEDICATED_IRQ_GPSS BIT(18)
+#define BIT_ORED_DEDICATED_IRQ_GPSC BIT(19)
+#define BIT_SHARED_IRQ_GPSS BIT(20)
+
+#define PMC_WAKE_EN_SETTING ~(BIT_LPC_CLOCK_RUN | \
+ BIT_SHARED_IRQ_GPSC | \
+ BIT_ORED_DEDICATED_IRQ_GPSS | \
+ BIT_ORED_DEDICATED_IRQ_GPSC | \
+ BIT_SHARED_IRQ_GPSS)
+
+/* The timers acumulate time spent in sleep state */
+#define PMC_S0IR_TMR 0x80
+#define PMC_S0I1_TMR 0x84
+#define PMC_S0I2_TMR 0x88
+#define PMC_S0I3_TMR 0x8C
+#define PMC_S0_TMR 0x90
+/* Sleep state counter is in units of of 32us */
+#define PMC_TMR_SHIFT 5
+
+/* Power status of power islands */
+#define PMC_PSS 0x98
+
+#define PMC_PSS_BIT_GBE BIT(0)
+#define PMC_PSS_BIT_SATA BIT(1)
+#define PMC_PSS_BIT_HDA BIT(2)
+#define PMC_PSS_BIT_SEC BIT(3)
+#define PMC_PSS_BIT_PCIE BIT(4)
+#define PMC_PSS_BIT_LPSS BIT(5)
+#define PMC_PSS_BIT_LPE BIT(6)
+#define PMC_PSS_BIT_DFX BIT(7)
+#define PMC_PSS_BIT_USH_CTRL BIT(8)
+#define PMC_PSS_BIT_USH_SUS BIT(9)
+#define PMC_PSS_BIT_USH_VCCS BIT(10)
+#define PMC_PSS_BIT_USH_VCCA BIT(11)
+#define PMC_PSS_BIT_OTG_CTRL BIT(12)
+#define PMC_PSS_BIT_OTG_VCCS BIT(13)
+#define PMC_PSS_BIT_OTG_VCCA_CLK BIT(14)
+#define PMC_PSS_BIT_OTG_VCCA BIT(15)
+#define PMC_PSS_BIT_USB BIT(16)
+#define PMC_PSS_BIT_USB_SUS BIT(17)
+
+/* These registers reflect D3 status of functions */
+#define PMC_D3_STS_0 0xA0
+
+#define BIT_LPSS1_F0_DMA BIT(0)
+#define BIT_LPSS1_F1_PWM1 BIT(1)
+#define BIT_LPSS1_F2_PWM2 BIT(2)
+#define BIT_LPSS1_F3_HSUART1 BIT(3)
+#define BIT_LPSS1_F4_HSUART2 BIT(4)
+#define BIT_LPSS1_F5_SPI BIT(5)
+#define BIT_LPSS1_F6_XXX BIT(6)
+#define BIT_LPSS1_F7_XXX BIT(7)
+#define BIT_SCC_EMMC BIT(8)
+#define BIT_SCC_SDIO BIT(9)
+#define BIT_SCC_SDCARD BIT(10)
+#define BIT_SCC_MIPI BIT(11)
+#define BIT_HDA BIT(12)
+#define BIT_LPE BIT(13)
+#define BIT_OTG BIT(14)
+#define BIT_USH BIT(15)
+#define BIT_GBE BIT(16)
+#define BIT_SATA BIT(17)
+#define BIT_USB_EHCI BIT(18)
+#define BIT_SEC BIT(19)
+#define BIT_PCIE_PORT0 BIT(20)
+#define BIT_PCIE_PORT1 BIT(21)
+#define BIT_PCIE_PORT2 BIT(22)
+#define BIT_PCIE_PORT3 BIT(23)
+#define BIT_LPSS2_F0_DMA BIT(24)
+#define BIT_LPSS2_F1_I2C1 BIT(25)
+#define BIT_LPSS2_F2_I2C2 BIT(26)
+#define BIT_LPSS2_F3_I2C3 BIT(27)
+#define BIT_LPSS2_F4_I2C4 BIT(28)
+#define BIT_LPSS2_F5_I2C5 BIT(29)
+#define BIT_LPSS2_F6_I2C6 BIT(30)
+#define BIT_LPSS2_F7_I2C7 BIT(31)
+
+#define PMC_D3_STS_1 0xA4
+#define BIT_SMB BIT(0)
+#define BIT_OTG_SS_PHY BIT(1)
+#define BIT_USH_SS_PHY BIT(2)
+#define BIT_DFX BIT(3)
+
+/* PMC I/O Registers */
+#define ACPI_BASE_ADDR_OFFSET 0x40
+#define ACPI_BASE_ADDR_MASK 0xFFFFFE00
+#define ACPI_MMIO_REG_LEN 0x100
+
+#define PM1_CNT 0x4
+#define SLEEP_TYPE_MASK 0xFFFFECFF
+#define SLEEP_TYPE_S5 0x1C00
+#define SLEEP_ENABLE 0x2000
+#endif /* PMC_ATOM_H */
diff --git a/kernel/arch/x86/include/asm/posix_types.h b/kernel/arch/x86/include/asm/posix_types.h
new file mode 100644
index 000000000..f565f6dd5
--- /dev/null
+++ b/kernel/arch/x86/include/asm/posix_types.h
@@ -0,0 +1,5 @@
+# ifdef CONFIG_X86_32
+# include <asm/posix_types_32.h>
+# else
+# include <asm/posix_types_64.h>
+# endif
diff --git a/kernel/arch/x86/include/asm/preempt.h b/kernel/arch/x86/include/asm/preempt.h
new file mode 100644
index 000000000..6ef9a161d
--- /dev/null
+++ b/kernel/arch/x86/include/asm/preempt.h
@@ -0,0 +1,125 @@
+#ifndef __ASM_PREEMPT_H
+#define __ASM_PREEMPT_H
+
+#include <asm/rmwcc.h>
+#include <asm/percpu.h>
+#include <linux/thread_info.h>
+
+DECLARE_PER_CPU(int, __preempt_count);
+
+/*
+ * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such
+ * that a decrement hitting 0 means we can and should reschedule.
+ */
+#define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED)
+
+/*
+ * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
+ * that think a non-zero value indicates we cannot preempt.
+ */
+static __always_inline int preempt_count(void)
+{
+ return raw_cpu_read_4(__preempt_count) & ~PREEMPT_NEED_RESCHED;
+}
+
+static __always_inline void preempt_count_set(int pc)
+{
+ raw_cpu_write_4(__preempt_count, pc);
+}
+
+/*
+ * must be macros to avoid header recursion hell
+ */
+#define init_task_preempt_count(p) do { \
+ task_thread_info(p)->saved_preempt_count = PREEMPT_DISABLED; \
+} while (0)
+
+#define init_idle_preempt_count(p, cpu) do { \
+ task_thread_info(p)->saved_preempt_count = PREEMPT_ENABLED; \
+ per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \
+} while (0)
+
+/*
+ * We fold the NEED_RESCHED bit into the preempt count such that
+ * preempt_enable() can decrement and test for needing to reschedule with a
+ * single instruction.
+ *
+ * We invert the actual bit, so that when the decrement hits 0 we know we both
+ * need to resched (the bit is cleared) and can resched (no preempt count).
+ */
+
+static __always_inline void set_preempt_need_resched(void)
+{
+ raw_cpu_and_4(__preempt_count, ~PREEMPT_NEED_RESCHED);
+}
+
+static __always_inline void clear_preempt_need_resched(void)
+{
+ raw_cpu_or_4(__preempt_count, PREEMPT_NEED_RESCHED);
+}
+
+static __always_inline bool test_preempt_need_resched(void)
+{
+ return !(raw_cpu_read_4(__preempt_count) & PREEMPT_NEED_RESCHED);
+}
+
+/*
+ * The various preempt_count add/sub methods
+ */
+
+static __always_inline void __preempt_count_add(int val)
+{
+ raw_cpu_add_4(__preempt_count, val);
+}
+
+static __always_inline void __preempt_count_sub(int val)
+{
+ raw_cpu_add_4(__preempt_count, -val);
+}
+
+/*
+ * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule
+ * a decrement which hits zero means we have no preempt_count and should
+ * reschedule.
+ */
+static __always_inline bool ____preempt_count_dec_and_test(void)
+{
+ GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
+}
+
+static __always_inline bool __preempt_count_dec_and_test(void)
+{
+ if (____preempt_count_dec_and_test())
+ return true;
+#ifdef CONFIG_PREEMPT_LAZY
+ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
+#else
+ return false;
+#endif
+}
+
+/*
+ * Returns true when we need to resched and can (barring IRQ state).
+ */
+static __always_inline bool should_resched(void)
+{
+#ifdef CONFIG_PREEMPT_LAZY
+ return unlikely(!raw_cpu_read_4(__preempt_count) || \
+ test_thread_flag(TIF_NEED_RESCHED_LAZY));
+#else
+ return unlikely(!raw_cpu_read_4(__preempt_count));
+#endif
+}
+
+#ifdef CONFIG_PREEMPT
+ extern asmlinkage void ___preempt_schedule(void);
+# define __preempt_schedule() asm ("call ___preempt_schedule")
+ extern asmlinkage void preempt_schedule(void);
+# ifdef CONFIG_CONTEXT_TRACKING
+ extern asmlinkage void ___preempt_schedule_context(void);
+# define __preempt_schedule_context() asm ("call ___preempt_schedule_context")
+ extern asmlinkage void preempt_schedule_context(void);
+# endif
+#endif
+
+#endif /* __ASM_PREEMPT_H */
diff --git a/kernel/arch/x86/include/asm/probe_roms.h b/kernel/arch/x86/include/asm/probe_roms.h
new file mode 100644
index 000000000..4950a0b1d
--- /dev/null
+++ b/kernel/arch/x86/include/asm/probe_roms.h
@@ -0,0 +1,8 @@
+#ifndef _PROBE_ROMS_H_
+#define _PROBE_ROMS_H_
+struct pci_dev;
+
+extern void __iomem *pci_map_biosrom(struct pci_dev *pdev);
+extern void pci_unmap_biosrom(void __iomem *rom);
+extern size_t pci_biosrom_size(struct pci_dev *pdev);
+#endif
diff --git a/kernel/arch/x86/include/asm/processor-cyrix.h b/kernel/arch/x86/include/asm/processor-cyrix.h
new file mode 100644
index 000000000..1198f2a0e
--- /dev/null
+++ b/kernel/arch/x86/include/asm/processor-cyrix.h
@@ -0,0 +1,38 @@
+/*
+ * NSC/Cyrix CPU indexed register access. Must be inlined instead of
+ * macros to ensure correct access ordering
+ * Access order is always 0x22 (=offset), 0x23 (=value)
+ *
+ * When using the old macros a line like
+ * setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
+ * gets expanded to:
+ * do {
+ * outb((CX86_CCR2), 0x22);
+ * outb((({
+ * outb((CX86_CCR2), 0x22);
+ * inb(0x23);
+ * }) | 0x88), 0x23);
+ * } while (0);
+ *
+ * which in fact violates the access order (= 0x22, 0x22, 0x23, 0x23).
+ */
+
+static inline u8 getCx86(u8 reg)
+{
+ outb(reg, 0x22);
+ return inb(0x23);
+}
+
+static inline void setCx86(u8 reg, u8 data)
+{
+ outb(reg, 0x22);
+ outb(data, 0x23);
+}
+
+#define getCx86_old(reg) ({ outb((reg), 0x22); inb(0x23); })
+
+#define setCx86_old(reg, data) do { \
+ outb((reg), 0x22); \
+ outb((data), 0x23); \
+} while (0)
+
diff --git a/kernel/arch/x86/include/asm/processor-flags.h b/kernel/arch/x86/include/asm/processor-flags.h
new file mode 100644
index 000000000..39fb618e2
--- /dev/null
+++ b/kernel/arch/x86/include/asm/processor-flags.h
@@ -0,0 +1,11 @@
+#ifndef _ASM_X86_PROCESSOR_FLAGS_H
+#define _ASM_X86_PROCESSOR_FLAGS_H
+
+#include <uapi/asm/processor-flags.h>
+
+#ifdef CONFIG_VM86
+#define X86_VM_MASK X86_EFLAGS_VM
+#else
+#define X86_VM_MASK 0 /* No VM86 support */
+#endif
+#endif /* _ASM_X86_PROCESSOR_FLAGS_H */
diff --git a/kernel/arch/x86/include/asm/processor.h b/kernel/arch/x86/include/asm/processor.h
new file mode 100644
index 000000000..23ba6765b
--- /dev/null
+++ b/kernel/arch/x86/include/asm/processor.h
@@ -0,0 +1,977 @@
+#ifndef _ASM_X86_PROCESSOR_H
+#define _ASM_X86_PROCESSOR_H
+
+#include <asm/processor-flags.h>
+
+/* Forward declaration, a strange C thing */
+struct task_struct;
+struct mm_struct;
+
+#include <asm/vm86.h>
+#include <asm/math_emu.h>
+#include <asm/segment.h>
+#include <asm/types.h>
+#include <asm/sigcontext.h>
+#include <asm/current.h>
+#include <asm/cpufeature.h>
+#include <asm/page.h>
+#include <asm/pgtable_types.h>
+#include <asm/percpu.h>
+#include <asm/msr.h>
+#include <asm/desc_defs.h>
+#include <asm/nops.h>
+#include <asm/special_insns.h>
+
+#include <linux/personality.h>
+#include <linux/cpumask.h>
+#include <linux/cache.h>
+#include <linux/threads.h>
+#include <linux/math64.h>
+#include <linux/err.h>
+#include <linux/irqflags.h>
+
+/*
+ * We handle most unaligned accesses in hardware. On the other hand
+ * unaligned DMA can be quite expensive on some Nehalem processors.
+ *
+ * Based on this we disable the IP header alignment in network drivers.
+ */
+#define NET_IP_ALIGN 0
+
+#define HBP_NUM 4
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+static inline void *current_text_addr(void)
+{
+ void *pc;
+
+ asm volatile("mov $1f, %0; 1:":"=r" (pc));
+
+ return pc;
+}
+
+#ifdef CONFIG_X86_VSMP
+# define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
+# define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
+#else
+# define ARCH_MIN_TASKALIGN 16
+# define ARCH_MIN_MMSTRUCT_ALIGN 0
+#endif
+
+enum tlb_infos {
+ ENTRIES,
+ NR_INFO
+};
+
+extern u16 __read_mostly tlb_lli_4k[NR_INFO];
+extern u16 __read_mostly tlb_lli_2m[NR_INFO];
+extern u16 __read_mostly tlb_lli_4m[NR_INFO];
+extern u16 __read_mostly tlb_lld_4k[NR_INFO];
+extern u16 __read_mostly tlb_lld_2m[NR_INFO];
+extern u16 __read_mostly tlb_lld_4m[NR_INFO];
+extern u16 __read_mostly tlb_lld_1g[NR_INFO];
+
+/*
+ * CPU type and hardware bug flags. Kept separately for each CPU.
+ * Members of this structure are referenced in head.S, so think twice
+ * before touching them. [mj]
+ */
+
+struct cpuinfo_x86 {
+ __u8 x86; /* CPU family */
+ __u8 x86_vendor; /* CPU vendor */
+ __u8 x86_model;
+ __u8 x86_mask;
+#ifdef CONFIG_X86_32
+ char wp_works_ok; /* It doesn't on 386's */
+
+ /* Problems on some 486Dx4's and old 386's: */
+ char rfu;
+ char pad0;
+ char pad1;
+#else
+ /* Number of 4K pages in DTLB/ITLB combined(in pages): */
+ int x86_tlbsize;
+#endif
+ __u8 x86_virt_bits;
+ __u8 x86_phys_bits;
+ /* CPUID returned core id bits: */
+ __u8 x86_coreid_bits;
+ /* Max extended CPUID function supported: */
+ __u32 extended_cpuid_level;
+ /* Maximum supported CPUID level, -1=no CPUID: */
+ int cpuid_level;
+ __u32 x86_capability[NCAPINTS + NBUGINTS];
+ char x86_vendor_id[16];
+ char x86_model_id[64];
+ /* in KB - valid for CPUS which support this call: */
+ int x86_cache_size;
+ int x86_cache_alignment; /* In bytes */
+ /* Cache QoS architectural values: */
+ int x86_cache_max_rmid; /* max index */
+ int x86_cache_occ_scale; /* scale to bytes */
+ int x86_power;
+ unsigned long loops_per_jiffy;
+ /* cpuid returned max cores value: */
+ u16 x86_max_cores;
+ u16 apicid;
+ u16 initial_apicid;
+ u16 x86_clflush_size;
+ /* number of cores as seen by the OS: */
+ u16 booted_cores;
+ /* Physical processor id: */
+ u16 phys_proc_id;
+ /* Core id: */
+ u16 cpu_core_id;
+ /* Compute unit id */
+ u8 compute_unit_id;
+ /* Index into per_cpu list: */
+ u16 cpu_index;
+ u32 microcode;
+};
+
+#define X86_VENDOR_INTEL 0
+#define X86_VENDOR_CYRIX 1
+#define X86_VENDOR_AMD 2
+#define X86_VENDOR_UMC 3
+#define X86_VENDOR_CENTAUR 5
+#define X86_VENDOR_TRANSMETA 7
+#define X86_VENDOR_NSC 8
+#define X86_VENDOR_NUM 9
+
+#define X86_VENDOR_UNKNOWN 0xff
+
+/*
+ * capabilities of CPUs
+ */
+extern struct cpuinfo_x86 boot_cpu_data;
+extern struct cpuinfo_x86 new_cpu_data;
+
+extern struct tss_struct doublefault_tss;
+extern __u32 cpu_caps_cleared[NCAPINTS];
+extern __u32 cpu_caps_set[NCAPINTS];
+
+#ifdef CONFIG_SMP
+DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
+#define cpu_data(cpu) per_cpu(cpu_info, cpu)
+#else
+#define cpu_info boot_cpu_data
+#define cpu_data(cpu) boot_cpu_data
+#endif
+
+extern const struct seq_operations cpuinfo_op;
+
+#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
+
+extern void cpu_detect(struct cpuinfo_x86 *c);
+extern void fpu_detect(struct cpuinfo_x86 *c);
+
+extern void early_cpu_init(void);
+extern void identify_boot_cpu(void);
+extern void identify_secondary_cpu(struct cpuinfo_x86 *);
+extern void print_cpu_info(struct cpuinfo_x86 *);
+void print_cpu_msr(struct cpuinfo_x86 *);
+extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
+extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
+extern void init_amd_cacheinfo(struct cpuinfo_x86 *c);
+
+extern void detect_extended_topology(struct cpuinfo_x86 *c);
+extern void detect_ht(struct cpuinfo_x86 *c);
+
+#ifdef CONFIG_X86_32
+extern int have_cpuid_p(void);
+#else
+static inline int have_cpuid_p(void)
+{
+ return 1;
+}
+#endif
+static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
+ unsigned int *ecx, unsigned int *edx)
+{
+ /* ecx is often an input as well as an output. */
+ asm volatile("cpuid"
+ : "=a" (*eax),
+ "=b" (*ebx),
+ "=c" (*ecx),
+ "=d" (*edx)
+ : "0" (*eax), "2" (*ecx)
+ : "memory");
+}
+
+static inline void load_cr3(pgd_t *pgdir)
+{
+ write_cr3(__pa(pgdir));
+}
+
+#ifdef CONFIG_X86_32
+/* This is the TSS defined by the hardware. */
+struct x86_hw_tss {
+ unsigned short back_link, __blh;
+ unsigned long sp0;
+ unsigned short ss0, __ss0h;
+ unsigned long sp1;
+
+ /*
+ * We don't use ring 1, so ss1 is a convenient scratch space in
+ * the same cacheline as sp0. We use ss1 to cache the value in
+ * MSR_IA32_SYSENTER_CS. When we context switch
+ * MSR_IA32_SYSENTER_CS, we first check if the new value being
+ * written matches ss1, and, if it's not, then we wrmsr the new
+ * value and update ss1.
+ *
+ * The only reason we context switch MSR_IA32_SYSENTER_CS is
+ * that we set it to zero in vm86 tasks to avoid corrupting the
+ * stack if we were to go through the sysenter path from vm86
+ * mode.
+ */
+ unsigned short ss1; /* MSR_IA32_SYSENTER_CS */
+
+ unsigned short __ss1h;
+ unsigned long sp2;
+ unsigned short ss2, __ss2h;
+ unsigned long __cr3;
+ unsigned long ip;
+ unsigned long flags;
+ unsigned long ax;
+ unsigned long cx;
+ unsigned long dx;
+ unsigned long bx;
+ unsigned long sp;
+ unsigned long bp;
+ unsigned long si;
+ unsigned long di;
+ unsigned short es, __esh;
+ unsigned short cs, __csh;
+ unsigned short ss, __ssh;
+ unsigned short ds, __dsh;
+ unsigned short fs, __fsh;
+ unsigned short gs, __gsh;
+ unsigned short ldt, __ldth;
+ unsigned short trace;
+ unsigned short io_bitmap_base;
+
+} __attribute__((packed));
+#else
+struct x86_hw_tss {
+ u32 reserved1;
+ u64 sp0;
+ u64 sp1;
+ u64 sp2;
+ u64 reserved2;
+ u64 ist[7];
+ u32 reserved3;
+ u32 reserved4;
+ u16 reserved5;
+ u16 io_bitmap_base;
+
+} __attribute__((packed)) ____cacheline_aligned;
+#endif
+
+/*
+ * IO-bitmap sizes:
+ */
+#define IO_BITMAP_BITS 65536
+#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
+#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
+#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap)
+#define INVALID_IO_BITMAP_OFFSET 0x8000
+
+struct tss_struct {
+ /*
+ * The hardware state:
+ */
+ struct x86_hw_tss x86_tss;
+
+ /*
+ * The extra 1 is there because the CPU will access an
+ * additional byte beyond the end of the IO permission
+ * bitmap. The extra byte must be all 1 bits, and must
+ * be within the limit.
+ */
+ unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
+
+ /*
+ * Space for the temporary SYSENTER stack:
+ */
+ unsigned long SYSENTER_stack[64];
+
+} ____cacheline_aligned;
+
+DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss);
+
+#ifdef CONFIG_X86_32
+DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
+#endif
+
+/*
+ * Save the original ist values for checking stack pointers during debugging
+ */
+struct orig_ist {
+ unsigned long ist[7];
+};
+
+#define MXCSR_DEFAULT 0x1f80
+
+struct i387_fsave_struct {
+ u32 cwd; /* FPU Control Word */
+ u32 swd; /* FPU Status Word */
+ u32 twd; /* FPU Tag Word */
+ u32 fip; /* FPU IP Offset */
+ u32 fcs; /* FPU IP Selector */
+ u32 foo; /* FPU Operand Pointer Offset */
+ u32 fos; /* FPU Operand Pointer Selector */
+
+ /* 8*10 bytes for each FP-reg = 80 bytes: */
+ u32 st_space[20];
+
+ /* Software status information [not touched by FSAVE ]: */
+ u32 status;
+};
+
+struct i387_fxsave_struct {
+ u16 cwd; /* Control Word */
+ u16 swd; /* Status Word */
+ u16 twd; /* Tag Word */
+ u16 fop; /* Last Instruction Opcode */
+ union {
+ struct {
+ u64 rip; /* Instruction Pointer */
+ u64 rdp; /* Data Pointer */
+ };
+ struct {
+ u32 fip; /* FPU IP Offset */
+ u32 fcs; /* FPU IP Selector */
+ u32 foo; /* FPU Operand Offset */
+ u32 fos; /* FPU Operand Selector */
+ };
+ };
+ u32 mxcsr; /* MXCSR Register State */
+ u32 mxcsr_mask; /* MXCSR Mask */
+
+ /* 8*16 bytes for each FP-reg = 128 bytes: */
+ u32 st_space[32];
+
+ /* 16*16 bytes for each XMM-reg = 256 bytes: */
+ u32 xmm_space[64];
+
+ u32 padding[12];
+
+ union {
+ u32 padding1[12];
+ u32 sw_reserved[12];
+ };
+
+} __attribute__((aligned(16)));
+
+struct i387_soft_struct {
+ u32 cwd;
+ u32 swd;
+ u32 twd;
+ u32 fip;
+ u32 fcs;
+ u32 foo;
+ u32 fos;
+ /* 8*10 bytes for each FP-reg = 80 bytes: */
+ u32 st_space[20];
+ u8 ftop;
+ u8 changed;
+ u8 lookahead;
+ u8 no_update;
+ u8 rm;
+ u8 alimit;
+ struct math_emu_info *info;
+ u32 entry_eip;
+};
+
+struct ymmh_struct {
+ /* 16 * 16 bytes for each YMMH-reg = 256 bytes */
+ u32 ymmh_space[64];
+};
+
+/* We don't support LWP yet: */
+struct lwp_struct {
+ u8 reserved[128];
+};
+
+struct bndreg {
+ u64 lower_bound;
+ u64 upper_bound;
+} __packed;
+
+struct bndcsr {
+ u64 bndcfgu;
+ u64 bndstatus;
+} __packed;
+
+struct xsave_hdr_struct {
+ u64 xstate_bv;
+ u64 xcomp_bv;
+ u64 reserved[6];
+} __attribute__((packed));
+
+struct xsave_struct {
+ struct i387_fxsave_struct i387;
+ struct xsave_hdr_struct xsave_hdr;
+ struct ymmh_struct ymmh;
+ struct lwp_struct lwp;
+ struct bndreg bndreg[4];
+ struct bndcsr bndcsr;
+ /* new processor state extensions will go here */
+} __attribute__ ((packed, aligned (64)));
+
+union thread_xstate {
+ struct i387_fsave_struct fsave;
+ struct i387_fxsave_struct fxsave;
+ struct i387_soft_struct soft;
+ struct xsave_struct xsave;
+};
+
+struct fpu {
+ unsigned int last_cpu;
+ unsigned int has_fpu;
+ union thread_xstate *state;
+};
+
+#ifdef CONFIG_X86_64
+DECLARE_PER_CPU(struct orig_ist, orig_ist);
+
+union irq_stack_union {
+ char irq_stack[IRQ_STACK_SIZE];
+ /*
+ * GCC hardcodes the stack canary as %gs:40. Since the
+ * irq_stack is the object at %gs:0, we reserve the bottom
+ * 48 bytes of the irq stack for the canary.
+ */
+ struct {
+ char gs_base[40];
+ unsigned long stack_canary;
+ };
+};
+
+DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union) __visible;
+DECLARE_INIT_PER_CPU(irq_stack_union);
+
+DECLARE_PER_CPU(char *, irq_stack_ptr);
+DECLARE_PER_CPU(unsigned int, irq_count);
+extern asmlinkage void ignore_sysret(void);
+#else /* X86_64 */
+#ifdef CONFIG_CC_STACKPROTECTOR
+/*
+ * Make sure stack canary segment base is cached-aligned:
+ * "For Intel Atom processors, avoid non zero segment base address
+ * that is not aligned to cache line boundary at all cost."
+ * (Optim Ref Manual Assembly/Compiler Coding Rule 15.)
+ */
+struct stack_canary {
+ char __pad[20]; /* canary at %gs:20 */
+ unsigned long canary;
+};
+DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
+#endif
+/*
+ * per-CPU IRQ handling stacks
+ */
+struct irq_stack {
+ u32 stack[THREAD_SIZE/sizeof(u32)];
+} __aligned(THREAD_SIZE);
+
+DECLARE_PER_CPU(struct irq_stack *, hardirq_stack);
+DECLARE_PER_CPU(struct irq_stack *, softirq_stack);
+#endif /* X86_64 */
+
+extern unsigned int xstate_size;
+extern void free_thread_xstate(struct task_struct *);
+extern struct kmem_cache *task_xstate_cachep;
+
+struct perf_event;
+
+struct thread_struct {
+ /* Cached TLS descriptors: */
+ struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
+ unsigned long sp0;
+ unsigned long sp;
+#ifdef CONFIG_X86_32
+ unsigned long sysenter_cs;
+#else
+ unsigned short es;
+ unsigned short ds;
+ unsigned short fsindex;
+ unsigned short gsindex;
+#endif
+#ifdef CONFIG_X86_32
+ unsigned long ip;
+#endif
+#ifdef CONFIG_X86_64
+ unsigned long fs;
+#endif
+ unsigned long gs;
+ /* Save middle states of ptrace breakpoints */
+ struct perf_event *ptrace_bps[HBP_NUM];
+ /* Debug status used for traps, single steps, etc... */
+ unsigned long debugreg6;
+ /* Keep track of the exact dr7 value set by the user */
+ unsigned long ptrace_dr7;
+ /* Fault info: */
+ unsigned long cr2;
+ unsigned long trap_nr;
+ unsigned long error_code;
+ /* floating point and extended processor state */
+ struct fpu fpu;
+#ifdef CONFIG_X86_32
+ /* Virtual 86 mode info */
+ struct vm86_struct __user *vm86_info;
+ unsigned long screen_bitmap;
+ unsigned long v86flags;
+ unsigned long v86mask;
+ unsigned long saved_sp0;
+ unsigned int saved_fs;
+ unsigned int saved_gs;
+#endif
+ /* IO permissions: */
+ unsigned long *io_bitmap_ptr;
+ unsigned long iopl;
+ /* Max allowed port in the bitmap, in bytes: */
+ unsigned io_bitmap_max;
+ /*
+ * fpu_counter contains the number of consecutive context switches
+ * that the FPU is used. If this is over a threshold, the lazy fpu
+ * saving becomes unlazy to save the trap. This is an unsigned char
+ * so that after 256 times the counter wraps and the behavior turns
+ * lazy again; this to deal with bursty apps that only use FPU for
+ * a short time
+ */
+ unsigned char fpu_counter;
+};
+
+/*
+ * Set IOPL bits in EFLAGS from given mask
+ */
+static inline void native_set_iopl_mask(unsigned mask)
+{
+#ifdef CONFIG_X86_32
+ unsigned int reg;
+
+ asm volatile ("pushfl;"
+ "popl %0;"
+ "andl %1, %0;"
+ "orl %2, %0;"
+ "pushl %0;"
+ "popfl"
+ : "=&r" (reg)
+ : "i" (~X86_EFLAGS_IOPL), "r" (mask));
+#endif
+}
+
+static inline void
+native_load_sp0(struct tss_struct *tss, struct thread_struct *thread)
+{
+ tss->x86_tss.sp0 = thread->sp0;
+#ifdef CONFIG_X86_32
+ /* Only happens when SEP is enabled, no need to test "SEP"arately: */
+ if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
+ tss->x86_tss.ss1 = thread->sysenter_cs;
+ wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
+ }
+#endif
+}
+
+static inline void native_swapgs(void)
+{
+#ifdef CONFIG_X86_64
+ asm volatile("swapgs" ::: "memory");
+#endif
+}
+
+static inline unsigned long current_top_of_stack(void)
+{
+#ifdef CONFIG_X86_64
+ return this_cpu_read_stable(cpu_tss.x86_tss.sp0);
+#else
+ /* sp0 on x86_32 is special in and around vm86 mode. */
+ return this_cpu_read_stable(cpu_current_top_of_stack);
+#endif
+}
+
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+#define __cpuid native_cpuid
+#define paravirt_enabled() 0
+
+static inline void load_sp0(struct tss_struct *tss,
+ struct thread_struct *thread)
+{
+ native_load_sp0(tss, thread);
+}
+
+#define set_iopl_mask native_set_iopl_mask
+#endif /* CONFIG_PARAVIRT */
+
+typedef struct {
+ unsigned long seg;
+} mm_segment_t;
+
+
+/* Free all resources held by a thread. */
+extern void release_thread(struct task_struct *);
+
+unsigned long get_wchan(struct task_struct *p);
+
+/*
+ * Generic CPUID function
+ * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
+ * resulting in stale register contents being returned.
+ */
+static inline void cpuid(unsigned int op,
+ unsigned int *eax, unsigned int *ebx,
+ unsigned int *ecx, unsigned int *edx)
+{
+ *eax = op;
+ *ecx = 0;
+ __cpuid(eax, ebx, ecx, edx);
+}
+
+/* Some CPUID calls want 'count' to be placed in ecx */
+static inline void cpuid_count(unsigned int op, int count,
+ unsigned int *eax, unsigned int *ebx,
+ unsigned int *ecx, unsigned int *edx)
+{
+ *eax = op;
+ *ecx = count;
+ __cpuid(eax, ebx, ecx, edx);
+}
+
+/*
+ * CPUID functions returning a single datum
+ */
+static inline unsigned int cpuid_eax(unsigned int op)
+{
+ unsigned int eax, ebx, ecx, edx;
+
+ cpuid(op, &eax, &ebx, &ecx, &edx);
+
+ return eax;
+}
+
+static inline unsigned int cpuid_ebx(unsigned int op)
+{
+ unsigned int eax, ebx, ecx, edx;
+
+ cpuid(op, &eax, &ebx, &ecx, &edx);
+
+ return ebx;
+}
+
+static inline unsigned int cpuid_ecx(unsigned int op)
+{
+ unsigned int eax, ebx, ecx, edx;
+
+ cpuid(op, &eax, &ebx, &ecx, &edx);
+
+ return ecx;
+}
+
+static inline unsigned int cpuid_edx(unsigned int op)
+{
+ unsigned int eax, ebx, ecx, edx;
+
+ cpuid(op, &eax, &ebx, &ecx, &edx);
+
+ return edx;
+}
+
+/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
+static inline void rep_nop(void)
+{
+ asm volatile("rep; nop" ::: "memory");
+}
+
+static inline void cpu_relax(void)
+{
+ rep_nop();
+}
+
+#define cpu_relax_lowlatency() cpu_relax()
+
+/* Stop speculative execution and prefetching of modified code. */
+static inline void sync_core(void)
+{
+ int tmp;
+
+#ifdef CONFIG_M486
+ /*
+ * Do a CPUID if available, otherwise do a jump. The jump
+ * can conveniently enough be the jump around CPUID.
+ */
+ asm volatile("cmpl %2,%1\n\t"
+ "jl 1f\n\t"
+ "cpuid\n"
+ "1:"
+ : "=a" (tmp)
+ : "rm" (boot_cpu_data.cpuid_level), "ri" (0), "0" (1)
+ : "ebx", "ecx", "edx", "memory");
+#else
+ /*
+ * CPUID is a barrier to speculative execution.
+ * Prefetched instructions are automatically
+ * invalidated when modified.
+ */
+ asm volatile("cpuid"
+ : "=a" (tmp)
+ : "0" (1)
+ : "ebx", "ecx", "edx", "memory");
+#endif
+}
+
+extern void select_idle_routine(const struct cpuinfo_x86 *c);
+extern void init_amd_e400_c1e_mask(void);
+
+extern unsigned long boot_option_idle_override;
+extern bool amd_e400_c1e_detected;
+
+enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
+ IDLE_POLL};
+
+extern void enable_sep_cpu(void);
+extern int sysenter_setup(void);
+
+extern void early_trap_init(void);
+void early_trap_pf_init(void);
+
+/* Defined in head.S */
+extern struct desc_ptr early_gdt_descr;
+
+extern void cpu_set_gdt(int);
+extern void switch_to_new_gdt(int);
+extern void load_percpu_segment(int);
+extern void cpu_init(void);
+
+static inline unsigned long get_debugctlmsr(void)
+{
+ unsigned long debugctlmsr = 0;
+
+#ifndef CONFIG_X86_DEBUGCTLMSR
+ if (boot_cpu_data.x86 < 6)
+ return 0;
+#endif
+ rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
+
+ return debugctlmsr;
+}
+
+static inline void update_debugctlmsr(unsigned long debugctlmsr)
+{
+#ifndef CONFIG_X86_DEBUGCTLMSR
+ if (boot_cpu_data.x86 < 6)
+ return;
+#endif
+ wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
+}
+
+extern void set_task_blockstep(struct task_struct *task, bool on);
+
+/*
+ * from system description table in BIOS. Mostly for MCA use, but
+ * others may find it useful:
+ */
+extern unsigned int machine_id;
+extern unsigned int machine_submodel_id;
+extern unsigned int BIOS_revision;
+
+/* Boot loader type from the setup header: */
+extern int bootloader_type;
+extern int bootloader_version;
+
+extern char ignore_fpu_irq;
+
+#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
+#define ARCH_HAS_PREFETCHW
+#define ARCH_HAS_SPINLOCK_PREFETCH
+
+#ifdef CONFIG_X86_32
+# define BASE_PREFETCH ""
+# define ARCH_HAS_PREFETCH
+#else
+# define BASE_PREFETCH "prefetcht0 %P1"
+#endif
+
+/*
+ * Prefetch instructions for Pentium III (+) and AMD Athlon (+)
+ *
+ * It's not worth to care about 3dnow prefetches for the K6
+ * because they are microcoded there and very slow.
+ */
+static inline void prefetch(const void *x)
+{
+ alternative_input(BASE_PREFETCH, "prefetchnta %P1",
+ X86_FEATURE_XMM,
+ "m" (*(const char *)x));
+}
+
+/*
+ * 3dnow prefetch to get an exclusive cache line.
+ * Useful for spinlocks to avoid one state transition in the
+ * cache coherency protocol:
+ */
+static inline void prefetchw(const void *x)
+{
+ alternative_input(BASE_PREFETCH, "prefetchw %P1",
+ X86_FEATURE_3DNOWPREFETCH,
+ "m" (*(const char *)x));
+}
+
+static inline void spin_lock_prefetch(const void *x)
+{
+ prefetchw(x);
+}
+
+#define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \
+ TOP_OF_KERNEL_STACK_PADDING)
+
+#ifdef CONFIG_X86_32
+/*
+ * User space process size: 3GB (default).
+ */
+#define TASK_SIZE PAGE_OFFSET
+#define TASK_SIZE_MAX TASK_SIZE
+#define STACK_TOP TASK_SIZE
+#define STACK_TOP_MAX STACK_TOP
+
+#define INIT_THREAD { \
+ .sp0 = TOP_OF_INIT_STACK, \
+ .vm86_info = NULL, \
+ .sysenter_cs = __KERNEL_CS, \
+ .io_bitmap_ptr = NULL, \
+}
+
+extern unsigned long thread_saved_pc(struct task_struct *tsk);
+
+/*
+ * TOP_OF_KERNEL_STACK_PADDING reserves 8 bytes on top of the ring0 stack.
+ * This is necessary to guarantee that the entire "struct pt_regs"
+ * is accessible even if the CPU haven't stored the SS/ESP registers
+ * on the stack (interrupt gate does not save these registers
+ * when switching to the same priv ring).
+ * Therefore beware: accessing the ss/esp fields of the
+ * "struct pt_regs" is possible, but they may contain the
+ * completely wrong values.
+ */
+#define task_pt_regs(task) \
+({ \
+ unsigned long __ptr = (unsigned long)task_stack_page(task); \
+ __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \
+ ((struct pt_regs *)__ptr) - 1; \
+})
+
+#define KSTK_ESP(task) (task_pt_regs(task)->sp)
+
+#else
+/*
+ * User space process size. 47bits minus one guard page. The guard
+ * page is necessary on Intel CPUs: if a SYSCALL instruction is at
+ * the highest possible canonical userspace address, then that
+ * syscall will enter the kernel with a non-canonical return
+ * address, and SYSRET will explode dangerously. We avoid this
+ * particular problem by preventing anything from being mapped
+ * at the maximum canonical address.
+ */
+#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
+ 0xc0000000 : 0xFFFFe000)
+
+#define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
+ IA32_PAGE_OFFSET : TASK_SIZE_MAX)
+#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_ADDR32)) ? \
+ IA32_PAGE_OFFSET : TASK_SIZE_MAX)
+
+#define STACK_TOP TASK_SIZE
+#define STACK_TOP_MAX TASK_SIZE_MAX
+
+#define INIT_THREAD { \
+ .sp0 = TOP_OF_INIT_STACK \
+}
+
+/*
+ * Return saved PC of a blocked thread.
+ * What is this good for? it will be always the scheduler or ret_from_fork.
+ */
+#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8))
+
+#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
+extern unsigned long KSTK_ESP(struct task_struct *task);
+
+#endif /* CONFIG_X86_64 */
+
+extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
+ unsigned long new_sp);
+
+/*
+ * This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
+
+#define KSTK_EIP(task) (task_pt_regs(task)->ip)
+
+/* Get/set a process' ability to use the timestamp counter instruction */
+#define GET_TSC_CTL(adr) get_tsc_mode((adr))
+#define SET_TSC_CTL(val) set_tsc_mode((val))
+
+extern int get_tsc_mode(unsigned long adr);
+extern int set_tsc_mode(unsigned int val);
+
+/* Register/unregister a process' MPX related resource */
+#define MPX_ENABLE_MANAGEMENT(tsk) mpx_enable_management((tsk))
+#define MPX_DISABLE_MANAGEMENT(tsk) mpx_disable_management((tsk))
+
+#ifdef CONFIG_X86_INTEL_MPX
+extern int mpx_enable_management(struct task_struct *tsk);
+extern int mpx_disable_management(struct task_struct *tsk);
+#else
+static inline int mpx_enable_management(struct task_struct *tsk)
+{
+ return -EINVAL;
+}
+static inline int mpx_disable_management(struct task_struct *tsk)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_X86_INTEL_MPX */
+
+extern u16 amd_get_nb_id(int cpu);
+
+static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
+{
+ uint32_t base, eax, signature[3];
+
+ for (base = 0x40000000; base < 0x40010000; base += 0x100) {
+ cpuid(base, &eax, &signature[0], &signature[1], &signature[2]);
+
+ if (!memcmp(sig, signature, 12) &&
+ (leaves == 0 || ((eax - base) >= leaves)))
+ return base;
+ }
+
+ return 0;
+}
+
+extern unsigned long arch_align_stack(unsigned long sp);
+extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
+
+void default_idle(void);
+#ifdef CONFIG_XEN
+bool xen_set_default_idle(void);
+#else
+#define xen_set_default_idle 0
+#endif
+
+void stop_this_cpu(void *dummy);
+void df_debug(struct pt_regs *regs, long error_code);
+#endif /* _ASM_X86_PROCESSOR_H */
diff --git a/kernel/arch/x86/include/asm/prom.h b/kernel/arch/x86/include/asm/prom.h
new file mode 100644
index 000000000..1d081ac1c
--- /dev/null
+++ b/kernel/arch/x86/include/asm/prom.h
@@ -0,0 +1,41 @@
+/*
+ * Definitions for Device tree / OpenFirmware handling on X86
+ *
+ * based on arch/powerpc/include/asm/prom.h which is
+ * Copyright (C) 1996-2005 Paul Mackerras.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_X86_PROM_H
+#define _ASM_X86_PROM_H
+#ifndef __ASSEMBLY__
+
+#include <linux/of.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+
+#include <asm/irq.h>
+#include <linux/atomic.h>
+#include <asm/setup.h>
+
+#ifdef CONFIG_OF
+extern int of_ioapic;
+extern u64 initial_dtb;
+extern void add_dtb(u64 data);
+void x86_of_pci_init(void);
+void x86_dtb_init(void);
+#else
+static inline void add_dtb(u64 data) { }
+static inline void x86_of_pci_init(void) { }
+static inline void x86_dtb_init(void) { }
+#define of_ioapic 0
+#endif
+
+extern char cmd_line[COMMAND_LINE_SIZE];
+
+#endif /* __ASSEMBLY__ */
+#endif
diff --git a/kernel/arch/x86/include/asm/proto.h b/kernel/arch/x86/include/asm/proto.h
new file mode 100644
index 000000000..a90f8972d
--- /dev/null
+++ b/kernel/arch/x86/include/asm/proto.h
@@ -0,0 +1,22 @@
+#ifndef _ASM_X86_PROTO_H
+#define _ASM_X86_PROTO_H
+
+#include <asm/ldt.h>
+
+/* misc architecture specific prototypes */
+
+void system_call(void);
+void syscall_init(void);
+
+void ia32_syscall(void);
+void ia32_cstar_target(void);
+void ia32_sysenter_target(void);
+
+void x86_configure_nx(void);
+void x86_report_nx(void);
+
+extern int reboot_force;
+
+long do_arch_prctl(struct task_struct *task, int code, unsigned long addr);
+
+#endif /* _ASM_X86_PROTO_H */
diff --git a/kernel/arch/x86/include/asm/ptrace.h b/kernel/arch/x86/include/asm/ptrace.h
new file mode 100644
index 000000000..5fabf1362
--- /dev/null
+++ b/kernel/arch/x86/include/asm/ptrace.h
@@ -0,0 +1,257 @@
+#ifndef _ASM_X86_PTRACE_H
+#define _ASM_X86_PTRACE_H
+
+#include <asm/segment.h>
+#include <asm/page_types.h>
+#include <uapi/asm/ptrace.h>
+
+#ifndef __ASSEMBLY__
+#ifdef __i386__
+
+struct pt_regs {
+ unsigned long bx;
+ unsigned long cx;
+ unsigned long dx;
+ unsigned long si;
+ unsigned long di;
+ unsigned long bp;
+ unsigned long ax;
+ unsigned long ds;
+ unsigned long es;
+ unsigned long fs;
+ unsigned long gs;
+ unsigned long orig_ax;
+ unsigned long ip;
+ unsigned long cs;
+ unsigned long flags;
+ unsigned long sp;
+ unsigned long ss;
+};
+
+#else /* __i386__ */
+
+struct pt_regs {
+/*
+ * C ABI says these regs are callee-preserved. They aren't saved on kernel entry
+ * unless syscall needs a complete, fully filled "struct pt_regs".
+ */
+ unsigned long r15;
+ unsigned long r14;
+ unsigned long r13;
+ unsigned long r12;
+ unsigned long bp;
+ unsigned long bx;
+/* These regs are callee-clobbered. Always saved on kernel entry. */
+ unsigned long r11;
+ unsigned long r10;
+ unsigned long r9;
+ unsigned long r8;
+ unsigned long ax;
+ unsigned long cx;
+ unsigned long dx;
+ unsigned long si;
+ unsigned long di;
+/*
+ * On syscall entry, this is syscall#. On CPU exception, this is error code.
+ * On hw interrupt, it's IRQ number:
+ */
+ unsigned long orig_ax;
+/* Return frame for iretq */
+ unsigned long ip;
+ unsigned long cs;
+ unsigned long flags;
+ unsigned long sp;
+ unsigned long ss;
+/* top of stack page */
+};
+
+#endif /* !__i386__ */
+
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt_types.h>
+#endif
+
+struct cpuinfo_x86;
+struct task_struct;
+
+extern unsigned long profile_pc(struct pt_regs *regs);
+#define profile_pc profile_pc
+
+extern unsigned long
+convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs);
+extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
+ int error_code, int si_code);
+
+
+extern unsigned long syscall_trace_enter_phase1(struct pt_regs *, u32 arch);
+extern long syscall_trace_enter_phase2(struct pt_regs *, u32 arch,
+ unsigned long phase1_result);
+
+extern long syscall_trace_enter(struct pt_regs *);
+extern void syscall_trace_leave(struct pt_regs *);
+
+static inline unsigned long regs_return_value(struct pt_regs *regs)
+{
+ return regs->ax;
+}
+
+/*
+ * user_mode(regs) determines whether a register set came from user
+ * mode. On x86_32, this is true if V8086 mode was enabled OR if the
+ * register set was from protected mode with RPL-3 CS value. This
+ * tricky test checks that with one comparison.
+ *
+ * On x86_64, vm86 mode is mercifully nonexistent, and we don't need
+ * the extra check.
+ */
+static inline int user_mode(struct pt_regs *regs)
+{
+#ifdef CONFIG_X86_32
+ return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >= USER_RPL;
+#else
+ return !!(regs->cs & 3);
+#endif
+}
+
+static inline int v8086_mode(struct pt_regs *regs)
+{
+#ifdef CONFIG_X86_32
+ return (regs->flags & X86_VM_MASK);
+#else
+ return 0; /* No V86 mode support in long mode */
+#endif
+}
+
+#ifdef CONFIG_X86_64
+static inline bool user_64bit_mode(struct pt_regs *regs)
+{
+#ifndef CONFIG_PARAVIRT
+ /*
+ * On non-paravirt systems, this is the only long mode CPL 3
+ * selector. We do not allow long mode selectors in the LDT.
+ */
+ return regs->cs == __USER_CS;
+#else
+ /* Headers are too twisted for this to go in paravirt.h. */
+ return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
+#endif
+}
+
+#define current_user_stack_pointer() current_pt_regs()->sp
+#define compat_user_stack_pointer() current_pt_regs()->sp
+#endif
+
+#ifdef CONFIG_X86_32
+extern unsigned long kernel_stack_pointer(struct pt_regs *regs);
+#else
+static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
+{
+ return regs->sp;
+}
+#endif
+
+#define GET_IP(regs) ((regs)->ip)
+#define GET_FP(regs) ((regs)->bp)
+#define GET_USP(regs) ((regs)->sp)
+
+#include <asm-generic/ptrace.h>
+
+/* Query offset/name of register from its name/offset */
+extern int regs_query_register_offset(const char *name);
+extern const char *regs_query_register_name(unsigned int offset);
+#define MAX_REG_OFFSET (offsetof(struct pt_regs, ss))
+
+/**
+ * regs_get_register() - get register value from its offset
+ * @regs: pt_regs from which register value is gotten.
+ * @offset: offset number of the register.
+ *
+ * regs_get_register returns the value of a register. The @offset is the
+ * offset of the register in struct pt_regs address which specified by @regs.
+ * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
+ */
+static inline unsigned long regs_get_register(struct pt_regs *regs,
+ unsigned int offset)
+{
+ if (unlikely(offset > MAX_REG_OFFSET))
+ return 0;
+#ifdef CONFIG_X86_32
+ /*
+ * Traps from the kernel do not save sp and ss.
+ * Use the helper function to retrieve sp.
+ */
+ if (offset == offsetof(struct pt_regs, sp) &&
+ regs->cs == __KERNEL_CS)
+ return kernel_stack_pointer(regs);
+#endif
+ return *(unsigned long *)((unsigned long)regs + offset);
+}
+
+/**
+ * regs_within_kernel_stack() - check the address in the stack
+ * @regs: pt_regs which contains kernel stack pointer.
+ * @addr: address which is checked.
+ *
+ * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
+ * If @addr is within the kernel stack, it returns true. If not, returns false.
+ */
+static inline int regs_within_kernel_stack(struct pt_regs *regs,
+ unsigned long addr)
+{
+ return ((addr & ~(THREAD_SIZE - 1)) ==
+ (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
+}
+
+/**
+ * regs_get_kernel_stack_nth() - get Nth entry of the stack
+ * @regs: pt_regs which contains kernel stack pointer.
+ * @n: stack entry number.
+ *
+ * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
+ * is specified by @regs. If the @n th entry is NOT in the kernel stack,
+ * this returns 0.
+ */
+static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
+ unsigned int n)
+{
+ unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
+ addr += n;
+ if (regs_within_kernel_stack(regs, (unsigned long)addr))
+ return *addr;
+ else
+ return 0;
+}
+
+#define arch_has_single_step() (1)
+#ifdef CONFIG_X86_DEBUGCTLMSR
+#define arch_has_block_step() (1)
+#else
+#define arch_has_block_step() (boot_cpu_data.x86 >= 6)
+#endif
+
+#define ARCH_HAS_USER_SINGLE_STEP_INFO
+
+/*
+ * When hitting ptrace_stop(), we cannot return using SYSRET because
+ * that does not restore the full CPU state, only a minimal set. The
+ * ptracer can change arbitrary register values, which is usually okay
+ * because the usual ptrace stops run off the signal delivery path which
+ * forces IRET; however, ptrace_event() stops happen in arbitrary places
+ * in the kernel and don't force IRET path.
+ *
+ * So force IRET path after a ptrace stop.
+ */
+#define arch_ptrace_stop_needed(code, info) \
+({ \
+ force_iret(); \
+ false; \
+})
+
+struct user_desc;
+extern int do_get_thread_area(struct task_struct *p, int idx,
+ struct user_desc __user *info);
+extern int do_set_thread_area(struct task_struct *p, int idx,
+ struct user_desc __user *info, int can_allocate);
+
+#endif /* !__ASSEMBLY__ */
+#endif /* _ASM_X86_PTRACE_H */
diff --git a/kernel/arch/x86/include/asm/pvclock-abi.h b/kernel/arch/x86/include/asm/pvclock-abi.h
new file mode 100644
index 000000000..6167fd798
--- /dev/null
+++ b/kernel/arch/x86/include/asm/pvclock-abi.h
@@ -0,0 +1,45 @@
+#ifndef _ASM_X86_PVCLOCK_ABI_H
+#define _ASM_X86_PVCLOCK_ABI_H
+#ifndef __ASSEMBLY__
+
+/*
+ * These structs MUST NOT be changed.
+ * They are the ABI between hypervisor and guest OS.
+ * Both Xen and KVM are using this.
+ *
+ * pvclock_vcpu_time_info holds the system time and the tsc timestamp
+ * of the last update. So the guest can use the tsc delta to get a
+ * more precise system time. There is one per virtual cpu.
+ *
+ * pvclock_wall_clock references the point in time when the system
+ * time was zero (usually boot time), thus the guest calculates the
+ * current wall clock by adding the system time.
+ *
+ * Protocol for the "version" fields is: hypervisor raises it (making
+ * it uneven) before it starts updating the fields and raises it again
+ * (making it even) when it is done. Thus the guest can make sure the
+ * time values it got are consistent by checking the version before
+ * and after reading them.
+ */
+
+struct pvclock_vcpu_time_info {
+ u32 version;
+ u32 pad0;
+ u64 tsc_timestamp;
+ u64 system_time;
+ u32 tsc_to_system_mul;
+ s8 tsc_shift;
+ u8 flags;
+ u8 pad[2];
+} __attribute__((__packed__)); /* 32 bytes */
+
+struct pvclock_wall_clock {
+ u32 version;
+ u32 sec;
+ u32 nsec;
+} __attribute__((__packed__));
+
+#define PVCLOCK_TSC_STABLE_BIT (1 << 0)
+#define PVCLOCK_GUEST_STOPPED (1 << 1)
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_X86_PVCLOCK_ABI_H */
diff --git a/kernel/arch/x86/include/asm/pvclock.h b/kernel/arch/x86/include/asm/pvclock.h
new file mode 100644
index 000000000..d6b078e9f
--- /dev/null
+++ b/kernel/arch/x86/include/asm/pvclock.h
@@ -0,0 +1,107 @@
+#ifndef _ASM_X86_PVCLOCK_H
+#define _ASM_X86_PVCLOCK_H
+
+#include <linux/clocksource.h>
+#include <asm/pvclock-abi.h>
+
+/* some helper functions for xen and kvm pv clock sources */
+cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src);
+u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src);
+void pvclock_set_flags(u8 flags);
+unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src);
+void pvclock_read_wallclock(struct pvclock_wall_clock *wall,
+ struct pvclock_vcpu_time_info *vcpu,
+ struct timespec *ts);
+void pvclock_resume(void);
+
+void pvclock_touch_watchdogs(void);
+
+/*
+ * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
+ * yielding a 64-bit result.
+ */
+static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
+{
+ u64 product;
+#ifdef __i386__
+ u32 tmp1, tmp2;
+#else
+ ulong tmp;
+#endif
+
+ if (shift < 0)
+ delta >>= -shift;
+ else
+ delta <<= shift;
+
+#ifdef __i386__
+ __asm__ (
+ "mul %5 ; "
+ "mov %4,%%eax ; "
+ "mov %%edx,%4 ; "
+ "mul %5 ; "
+ "xor %5,%5 ; "
+ "add %4,%%eax ; "
+ "adc %5,%%edx ; "
+ : "=A" (product), "=r" (tmp1), "=r" (tmp2)
+ : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
+#elif defined(__x86_64__)
+ __asm__ (
+ "mulq %[mul_frac] ; shrd $32, %[hi], %[lo]"
+ : [lo]"=a"(product),
+ [hi]"=d"(tmp)
+ : "0"(delta),
+ [mul_frac]"rm"((u64)mul_frac));
+#else
+#error implement me!
+#endif
+
+ return product;
+}
+
+static __always_inline
+u64 pvclock_get_nsec_offset(const struct pvclock_vcpu_time_info *src)
+{
+ u64 delta = __native_read_tsc() - src->tsc_timestamp;
+ return pvclock_scale_delta(delta, src->tsc_to_system_mul,
+ src->tsc_shift);
+}
+
+static __always_inline
+unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
+ cycle_t *cycles, u8 *flags)
+{
+ unsigned version;
+ cycle_t ret, offset;
+ u8 ret_flags;
+
+ version = src->version;
+ /* Note: emulated platforms which do not advertise SSE2 support
+ * result in kvmclock not using the necessary RDTSC barriers.
+ * Without barriers, it is possible that RDTSC instruction reads from
+ * the time stamp counter outside rdtsc_barrier protected section
+ * below, resulting in violation of monotonicity.
+ */
+ rdtsc_barrier();
+ offset = pvclock_get_nsec_offset(src);
+ ret = src->system_time + offset;
+ ret_flags = src->flags;
+ rdtsc_barrier();
+
+ *cycles = ret;
+ *flags = ret_flags;
+ return version;
+}
+
+struct pvclock_vsyscall_time_info {
+ struct pvclock_vcpu_time_info pvti;
+} __attribute__((__aligned__(SMP_CACHE_BYTES)));
+
+#define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info)
+#define PVCLOCK_VSYSCALL_NR_PAGES (((NR_CPUS-1)/(PAGE_SIZE/PVTI_SIZE))+1)
+
+int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i,
+ int size);
+struct pvclock_vcpu_time_info *pvclock_get_vsyscall_time_info(int cpu);
+
+#endif /* _ASM_X86_PVCLOCK_H */
diff --git a/kernel/arch/x86/include/asm/qrwlock.h b/kernel/arch/x86/include/asm/qrwlock.h
new file mode 100644
index 000000000..ae0e241e2
--- /dev/null
+++ b/kernel/arch/x86/include/asm/qrwlock.h
@@ -0,0 +1,17 @@
+#ifndef _ASM_X86_QRWLOCK_H
+#define _ASM_X86_QRWLOCK_H
+
+#include <asm-generic/qrwlock_types.h>
+
+#ifndef CONFIG_X86_PPRO_FENCE
+#define queue_write_unlock queue_write_unlock
+static inline void queue_write_unlock(struct qrwlock *lock)
+{
+ barrier();
+ ACCESS_ONCE(*(u8 *)&lock->cnts) = 0;
+}
+#endif
+
+#include <asm-generic/qrwlock.h>
+
+#endif /* _ASM_X86_QRWLOCK_H */
diff --git a/kernel/arch/x86/include/asm/realmode.h b/kernel/arch/x86/include/asm/realmode.h
new file mode 100644
index 000000000..9c6b890d5
--- /dev/null
+++ b/kernel/arch/x86/include/asm/realmode.h
@@ -0,0 +1,64 @@
+#ifndef _ARCH_X86_REALMODE_H
+#define _ARCH_X86_REALMODE_H
+
+#include <linux/types.h>
+#include <asm/io.h>
+
+/* This must match data at realmode.S */
+struct real_mode_header {
+ u32 text_start;
+ u32 ro_end;
+ /* SMP trampoline */
+ u32 trampoline_start;
+ u32 trampoline_status;
+ u32 trampoline_header;
+#ifdef CONFIG_X86_64
+ u32 trampoline_pgd;
+#endif
+ /* ACPI S3 wakeup */
+#ifdef CONFIG_ACPI_SLEEP
+ u32 wakeup_start;
+ u32 wakeup_header;
+#endif
+ /* APM/BIOS reboot */
+ u32 machine_real_restart_asm;
+#ifdef CONFIG_X86_64
+ u32 machine_real_restart_seg;
+#endif
+};
+
+/* This must match data at trampoline_32/64.S */
+struct trampoline_header {
+#ifdef CONFIG_X86_32
+ u32 start;
+ u16 gdt_pad;
+ u16 gdt_limit;
+ u32 gdt_base;
+#else
+ u64 start;
+ u64 efer;
+ u32 cr4;
+#endif
+};
+
+extern struct real_mode_header *real_mode_header;
+extern unsigned char real_mode_blob_end[];
+
+extern unsigned long init_rsp;
+extern unsigned long initial_code;
+extern unsigned long initial_gs;
+
+extern unsigned char real_mode_blob[];
+extern unsigned char real_mode_relocs[];
+
+#ifdef CONFIG_X86_32
+extern unsigned char startup_32_smp[];
+extern unsigned char boot_gdt[];
+#else
+extern unsigned char secondary_startup_64[];
+#endif
+
+void reserve_real_mode(void);
+void setup_real_mode(void);
+
+#endif /* _ARCH_X86_REALMODE_H */
diff --git a/kernel/arch/x86/include/asm/reboot.h b/kernel/arch/x86/include/asm/reboot.h
new file mode 100644
index 000000000..a82c4f1b4
--- /dev/null
+++ b/kernel/arch/x86/include/asm/reboot.h
@@ -0,0 +1,29 @@
+#ifndef _ASM_X86_REBOOT_H
+#define _ASM_X86_REBOOT_H
+
+#include <linux/kdebug.h>
+
+struct pt_regs;
+
+struct machine_ops {
+ void (*restart)(char *cmd);
+ void (*halt)(void);
+ void (*power_off)(void);
+ void (*shutdown)(void);
+ void (*crash_shutdown)(struct pt_regs *);
+ void (*emergency_restart)(void);
+};
+
+extern struct machine_ops machine_ops;
+
+void native_machine_crash_shutdown(struct pt_regs *regs);
+void native_machine_shutdown(void);
+void __noreturn machine_real_restart(unsigned int type);
+/* These must match dispatch in arch/x86/realmore/rm/reboot.S */
+#define MRR_BIOS 0
+#define MRR_APM 1
+
+typedef void (*nmi_shootdown_cb)(int, struct pt_regs*);
+void nmi_shootdown_cpus(nmi_shootdown_cb callback);
+
+#endif /* _ASM_X86_REBOOT_H */
diff --git a/kernel/arch/x86/include/asm/reboot_fixups.h b/kernel/arch/x86/include/asm/reboot_fixups.h
new file mode 100644
index 000000000..765debe4c
--- /dev/null
+++ b/kernel/arch/x86/include/asm/reboot_fixups.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_X86_REBOOT_FIXUPS_H
+#define _ASM_X86_REBOOT_FIXUPS_H
+
+extern void mach_reboot_fixups(void);
+
+#endif /* _ASM_X86_REBOOT_FIXUPS_H */
diff --git a/kernel/arch/x86/include/asm/required-features.h b/kernel/arch/x86/include/asm/required-features.h
new file mode 100644
index 000000000..5c6e4fb37
--- /dev/null
+++ b/kernel/arch/x86/include/asm/required-features.h
@@ -0,0 +1,96 @@
+#ifndef _ASM_X86_REQUIRED_FEATURES_H
+#define _ASM_X86_REQUIRED_FEATURES_H
+
+/* Define minimum CPUID feature set for kernel These bits are checked
+ really early to actually display a visible error message before the
+ kernel dies. Make sure to assign features to the proper mask!
+
+ Some requirements that are not in CPUID yet are also in the
+ CONFIG_X86_MINIMUM_CPU_FAMILY which is checked too.
+
+ The real information is in arch/x86/Kconfig.cpu, this just converts
+ the CONFIGs into a bitmask */
+
+#ifndef CONFIG_MATH_EMULATION
+# define NEED_FPU (1<<(X86_FEATURE_FPU & 31))
+#else
+# define NEED_FPU 0
+#endif
+
+#if defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
+# define NEED_PAE (1<<(X86_FEATURE_PAE & 31))
+#else
+# define NEED_PAE 0
+#endif
+
+#ifdef CONFIG_X86_CMPXCHG64
+# define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31))
+#else
+# define NEED_CX8 0
+#endif
+
+#if defined(CONFIG_X86_CMOV) || defined(CONFIG_X86_64)
+# define NEED_CMOV (1<<(X86_FEATURE_CMOV & 31))
+#else
+# define NEED_CMOV 0
+#endif
+
+#ifdef CONFIG_X86_USE_3DNOW
+# define NEED_3DNOW (1<<(X86_FEATURE_3DNOW & 31))
+#else
+# define NEED_3DNOW 0
+#endif
+
+#if defined(CONFIG_X86_P6_NOP) || defined(CONFIG_X86_64)
+# define NEED_NOPL (1<<(X86_FEATURE_NOPL & 31))
+#else
+# define NEED_NOPL 0
+#endif
+
+#ifdef CONFIG_MATOM
+# define NEED_MOVBE (1<<(X86_FEATURE_MOVBE & 31))
+#else
+# define NEED_MOVBE 0
+#endif
+
+#ifdef CONFIG_X86_64
+#ifdef CONFIG_PARAVIRT
+/* Paravirtualized systems may not have PSE or PGE available */
+#define NEED_PSE 0
+#define NEED_PGE 0
+#else
+#define NEED_PSE (1<<(X86_FEATURE_PSE) & 31)
+#define NEED_PGE (1<<(X86_FEATURE_PGE) & 31)
+#endif
+#define NEED_MSR (1<<(X86_FEATURE_MSR & 31))
+#define NEED_FXSR (1<<(X86_FEATURE_FXSR & 31))
+#define NEED_XMM (1<<(X86_FEATURE_XMM & 31))
+#define NEED_XMM2 (1<<(X86_FEATURE_XMM2 & 31))
+#define NEED_LM (1<<(X86_FEATURE_LM & 31))
+#else
+#define NEED_PSE 0
+#define NEED_MSR 0
+#define NEED_PGE 0
+#define NEED_FXSR 0
+#define NEED_XMM 0
+#define NEED_XMM2 0
+#define NEED_LM 0
+#endif
+
+#define REQUIRED_MASK0 (NEED_FPU|NEED_PSE|NEED_MSR|NEED_PAE|\
+ NEED_CX8|NEED_PGE|NEED_FXSR|NEED_CMOV|\
+ NEED_XMM|NEED_XMM2)
+#define SSE_MASK (NEED_XMM|NEED_XMM2)
+
+#define REQUIRED_MASK1 (NEED_LM|NEED_3DNOW)
+
+#define REQUIRED_MASK2 0
+#define REQUIRED_MASK3 (NEED_NOPL)
+#define REQUIRED_MASK4 (NEED_MOVBE)
+#define REQUIRED_MASK5 0
+#define REQUIRED_MASK6 0
+#define REQUIRED_MASK7 0
+#define REQUIRED_MASK8 0
+#define REQUIRED_MASK9 0
+
+#endif /* _ASM_X86_REQUIRED_FEATURES_H */
diff --git a/kernel/arch/x86/include/asm/rio.h b/kernel/arch/x86/include/asm/rio.h
new file mode 100644
index 000000000..97bab6388
--- /dev/null
+++ b/kernel/arch/x86/include/asm/rio.h
@@ -0,0 +1,63 @@
+/*
+ * Derived from include/asm-x86/mach-summit/mach_mpparse.h
+ * and include/asm-x86/mach-default/bios_ebda.h
+ *
+ * Author: Laurent Vivier <Laurent.Vivier@bull.net>
+ */
+
+#ifndef _ASM_X86_RIO_H
+#define _ASM_X86_RIO_H
+
+#define RIO_TABLE_VERSION 3
+
+struct rio_table_hdr {
+ u8 version; /* Version number of this data structure */
+ u8 num_scal_dev; /* # of Scalability devices */
+ u8 num_rio_dev; /* # of RIO I/O devices */
+} __attribute__((packed));
+
+struct scal_detail {
+ u8 node_id; /* Scalability Node ID */
+ u32 CBAR; /* Address of 1MB register space */
+ u8 port0node; /* Node ID port connected to: 0xFF=None */
+ u8 port0port; /* Port num port connected to: 0,1,2, or */
+ /* 0xFF=None */
+ u8 port1node; /* Node ID port connected to: 0xFF = None */
+ u8 port1port; /* Port num port connected to: 0,1,2, or */
+ /* 0xFF=None */
+ u8 port2node; /* Node ID port connected to: 0xFF = None */
+ u8 port2port; /* Port num port connected to: 0,1,2, or */
+ /* 0xFF=None */
+ u8 chassis_num; /* 1 based Chassis number (1 = boot node) */
+} __attribute__((packed));
+
+struct rio_detail {
+ u8 node_id; /* RIO Node ID */
+ u32 BBAR; /* Address of 1MB register space */
+ u8 type; /* Type of device */
+ u8 owner_id; /* Node ID of Hurricane that owns this */
+ /* node */
+ u8 port0node; /* Node ID port connected to: 0xFF=None */
+ u8 port0port; /* Port num port connected to: 0,1,2, or */
+ /* 0xFF=None */
+ u8 port1node; /* Node ID port connected to: 0xFF=None */
+ u8 port1port; /* Port num port connected to: 0,1,2, or */
+ /* 0xFF=None */
+ u8 first_slot; /* Lowest slot number below this Calgary */
+ u8 status; /* Bit 0 = 1 : the XAPIC is used */
+ /* = 0 : the XAPIC is not used, ie: */
+ /* ints fwded to another XAPIC */
+ /* Bits1:7 Reserved */
+ u8 WP_index; /* instance index - lower ones have */
+ /* lower slot numbers/PCI bus numbers */
+ u8 chassis_num; /* 1 based Chassis number */
+} __attribute__((packed));
+
+enum {
+ HURR_SCALABILTY = 0, /* Hurricane Scalability info */
+ HURR_RIOIB = 2, /* Hurricane RIOIB info */
+ COMPAT_CALGARY = 4, /* Compatibility Calgary */
+ ALT_CALGARY = 5, /* Second Planar Calgary */
+};
+
+#endif /* _ASM_X86_RIO_H */
diff --git a/kernel/arch/x86/include/asm/rmwcc.h b/kernel/arch/x86/include/asm/rmwcc.h
new file mode 100644
index 000000000..8f7866a5b
--- /dev/null
+++ b/kernel/arch/x86/include/asm/rmwcc.h
@@ -0,0 +1,41 @@
+#ifndef _ASM_X86_RMWcc
+#define _ASM_X86_RMWcc
+
+#ifdef CC_HAVE_ASM_GOTO
+
+#define __GEN_RMWcc(fullop, var, cc, ...) \
+do { \
+ asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
+ : : "m" (var), ## __VA_ARGS__ \
+ : "memory" : cc_label); \
+ return 0; \
+cc_label: \
+ return 1; \
+} while (0)
+
+#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
+ __GEN_RMWcc(op " " arg0, var, cc)
+
+#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
+ __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
+
+#else /* !CC_HAVE_ASM_GOTO */
+
+#define __GEN_RMWcc(fullop, var, cc, ...) \
+do { \
+ char c; \
+ asm volatile (fullop "; set" cc " %1" \
+ : "+m" (var), "=qm" (c) \
+ : __VA_ARGS__ : "memory"); \
+ return c != 0; \
+} while (0)
+
+#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
+ __GEN_RMWcc(op " " arg0, var, cc)
+
+#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
+ __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
+
+#endif /* CC_HAVE_ASM_GOTO */
+
+#endif /* _ASM_X86_RMWcc */
diff --git a/kernel/arch/x86/include/asm/rtc.h b/kernel/arch/x86/include/asm/rtc.h
new file mode 100644
index 000000000..f71c3b0ed
--- /dev/null
+++ b/kernel/arch/x86/include/asm/rtc.h
@@ -0,0 +1 @@
+#include <asm-generic/rtc.h>
diff --git a/kernel/arch/x86/include/asm/rwsem.h b/kernel/arch/x86/include/asm/rwsem.h
new file mode 100644
index 000000000..cad82c9c2
--- /dev/null
+++ b/kernel/arch/x86/include/asm/rwsem.h
@@ -0,0 +1,225 @@
+/* rwsem.h: R/W semaphores implemented using XADD/CMPXCHG for i486+
+ *
+ * Written by David Howells (dhowells@redhat.com).
+ *
+ * Derived from asm-x86/semaphore.h
+ *
+ *
+ * The MSW of the count is the negated number of active writers and waiting
+ * lockers, and the LSW is the total number of active locks
+ *
+ * The lock count is initialized to 0 (no active and no waiting lockers).
+ *
+ * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
+ * uncontended lock. This can be determined because XADD returns the old value.
+ * Readers increment by 1 and see a positive value when uncontended, negative
+ * if there are writers (and maybe) readers waiting (in which case it goes to
+ * sleep).
+ *
+ * The value of WAITING_BIAS supports up to 32766 waiting processes. This can
+ * be extended to 65534 by manually checking the whole MSW rather than relying
+ * on the S flag.
+ *
+ * The value of ACTIVE_BIAS supports up to 65535 active processes.
+ *
+ * This should be totally fair - if anything is waiting, a process that wants a
+ * lock will go to the back of the queue. When the currently active lock is
+ * released, if there's a writer at the front of the queue, then that and only
+ * that will be woken up; if there's a bunch of consequtive readers at the
+ * front, then they'll all be woken up, but no other readers will be.
+ */
+
+#ifndef _ASM_X86_RWSEM_H
+#define _ASM_X86_RWSEM_H
+
+#ifndef _LINUX_RWSEM_H
+#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
+#endif
+
+#ifdef __KERNEL__
+#include <asm/asm.h>
+
+/*
+ * The bias values and the counter type limits the number of
+ * potential readers/writers to 32767 for 32 bits and 2147483647
+ * for 64 bits.
+ */
+
+#ifdef CONFIG_X86_64
+# define RWSEM_ACTIVE_MASK 0xffffffffL
+#else
+# define RWSEM_ACTIVE_MASK 0x0000ffffL
+#endif
+
+#define RWSEM_UNLOCKED_VALUE 0x00000000L
+#define RWSEM_ACTIVE_BIAS 0x00000001L
+#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
+#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
+#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
+
+/*
+ * lock for reading
+ */
+static inline void __down_read(struct rw_semaphore *sem)
+{
+ asm volatile("# beginning down_read\n\t"
+ LOCK_PREFIX _ASM_INC "(%1)\n\t"
+ /* adds 0x00000001 */
+ " jns 1f\n"
+ " call call_rwsem_down_read_failed\n"
+ "1:\n\t"
+ "# ending down_read\n\t"
+ : "+m" (sem->count)
+ : "a" (sem)
+ : "memory", "cc");
+}
+
+/*
+ * trylock for reading -- returns 1 if successful, 0 if contention
+ */
+static inline int __down_read_trylock(struct rw_semaphore *sem)
+{
+ long result, tmp;
+ asm volatile("# beginning __down_read_trylock\n\t"
+ " mov %0,%1\n\t"
+ "1:\n\t"
+ " mov %1,%2\n\t"
+ " add %3,%2\n\t"
+ " jle 2f\n\t"
+ LOCK_PREFIX " cmpxchg %2,%0\n\t"
+ " jnz 1b\n\t"
+ "2:\n\t"
+ "# ending __down_read_trylock\n\t"
+ : "+m" (sem->count), "=&a" (result), "=&r" (tmp)
+ : "i" (RWSEM_ACTIVE_READ_BIAS)
+ : "memory", "cc");
+ return result >= 0 ? 1 : 0;
+}
+
+/*
+ * lock for writing
+ */
+static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
+{
+ long tmp;
+ asm volatile("# beginning down_write\n\t"
+ LOCK_PREFIX " xadd %1,(%2)\n\t"
+ /* adds 0xffff0001, returns the old value */
+ " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
+ /* was the active mask 0 before? */
+ " jz 1f\n"
+ " call call_rwsem_down_write_failed\n"
+ "1:\n"
+ "# ending down_write"
+ : "+m" (sem->count), "=d" (tmp)
+ : "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS)
+ : "memory", "cc");
+}
+
+static inline void __down_write(struct rw_semaphore *sem)
+{
+ __down_write_nested(sem, 0);
+}
+
+/*
+ * trylock for writing -- returns 1 if successful, 0 if contention
+ */
+static inline int __down_write_trylock(struct rw_semaphore *sem)
+{
+ long result, tmp;
+ asm volatile("# beginning __down_write_trylock\n\t"
+ " mov %0,%1\n\t"
+ "1:\n\t"
+ " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
+ /* was the active mask 0 before? */
+ " jnz 2f\n\t"
+ " mov %1,%2\n\t"
+ " add %3,%2\n\t"
+ LOCK_PREFIX " cmpxchg %2,%0\n\t"
+ " jnz 1b\n\t"
+ "2:\n\t"
+ " sete %b1\n\t"
+ " movzbl %b1, %k1\n\t"
+ "# ending __down_write_trylock\n\t"
+ : "+m" (sem->count), "=&a" (result), "=&r" (tmp)
+ : "er" (RWSEM_ACTIVE_WRITE_BIAS)
+ : "memory", "cc");
+ return result;
+}
+
+/*
+ * unlock after reading
+ */
+static inline void __up_read(struct rw_semaphore *sem)
+{
+ long tmp;
+ asm volatile("# beginning __up_read\n\t"
+ LOCK_PREFIX " xadd %1,(%2)\n\t"
+ /* subtracts 1, returns the old value */
+ " jns 1f\n\t"
+ " call call_rwsem_wake\n" /* expects old value in %edx */
+ "1:\n"
+ "# ending __up_read\n"
+ : "+m" (sem->count), "=d" (tmp)
+ : "a" (sem), "1" (-RWSEM_ACTIVE_READ_BIAS)
+ : "memory", "cc");
+}
+
+/*
+ * unlock after writing
+ */
+static inline void __up_write(struct rw_semaphore *sem)
+{
+ long tmp;
+ asm volatile("# beginning __up_write\n\t"
+ LOCK_PREFIX " xadd %1,(%2)\n\t"
+ /* subtracts 0xffff0001, returns the old value */
+ " jns 1f\n\t"
+ " call call_rwsem_wake\n" /* expects old value in %edx */
+ "1:\n\t"
+ "# ending __up_write\n"
+ : "+m" (sem->count), "=d" (tmp)
+ : "a" (sem), "1" (-RWSEM_ACTIVE_WRITE_BIAS)
+ : "memory", "cc");
+}
+
+/*
+ * downgrade write lock to read lock
+ */
+static inline void __downgrade_write(struct rw_semaphore *sem)
+{
+ asm volatile("# beginning __downgrade_write\n\t"
+ LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
+ /*
+ * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
+ * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
+ */
+ " jns 1f\n\t"
+ " call call_rwsem_downgrade_wake\n"
+ "1:\n\t"
+ "# ending __downgrade_write\n"
+ : "+m" (sem->count)
+ : "a" (sem), "er" (-RWSEM_WAITING_BIAS)
+ : "memory", "cc");
+}
+
+/*
+ * implement atomic add functionality
+ */
+static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
+{
+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
+ : "+m" (sem->count)
+ : "er" (delta));
+}
+
+/*
+ * implement exchange and add functionality
+ */
+static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
+{
+ return delta + xadd(&sem->count, delta);
+}
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_X86_RWSEM_H */
diff --git a/kernel/arch/x86/include/asm/seccomp.h b/kernel/arch/x86/include/asm/seccomp.h
new file mode 100644
index 000000000..0c8c7c886
--- /dev/null
+++ b/kernel/arch/x86/include/asm/seccomp.h
@@ -0,0 +1,20 @@
+#ifndef _ASM_X86_SECCOMP_H
+#define _ASM_X86_SECCOMP_H
+
+#include <asm/unistd.h>
+
+#ifdef CONFIG_X86_32
+#define __NR_seccomp_sigreturn __NR_sigreturn
+#endif
+
+#ifdef CONFIG_COMPAT
+#include <asm/ia32_unistd.h>
+#define __NR_seccomp_read_32 __NR_ia32_read
+#define __NR_seccomp_write_32 __NR_ia32_write
+#define __NR_seccomp_exit_32 __NR_ia32_exit
+#define __NR_seccomp_sigreturn_32 __NR_ia32_sigreturn
+#endif
+
+#include <asm-generic/seccomp.h>
+
+#endif /* _ASM_X86_SECCOMP_H */
diff --git a/kernel/arch/x86/include/asm/sections.h b/kernel/arch/x86/include/asm/sections.h
new file mode 100644
index 000000000..0a5242428
--- /dev/null
+++ b/kernel/arch/x86/include/asm/sections.h
@@ -0,0 +1,14 @@
+#ifndef _ASM_X86_SECTIONS_H
+#define _ASM_X86_SECTIONS_H
+
+#include <asm-generic/sections.h>
+#include <asm/uaccess.h>
+
+extern char __brk_base[], __brk_limit[];
+extern struct exception_table_entry __stop___ex_table[];
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
+extern char __end_rodata_hpage_align[];
+#endif
+
+#endif /* _ASM_X86_SECTIONS_H */
diff --git a/kernel/arch/x86/include/asm/segment.h b/kernel/arch/x86/include/asm/segment.h
new file mode 100644
index 000000000..7d5a1929d
--- /dev/null
+++ b/kernel/arch/x86/include/asm/segment.h
@@ -0,0 +1,300 @@
+#ifndef _ASM_X86_SEGMENT_H
+#define _ASM_X86_SEGMENT_H
+
+#include <linux/const.h>
+
+/*
+ * Constructor for a conventional segment GDT (or LDT) entry.
+ * This is a macro so it can be used in initializers.
+ */
+#define GDT_ENTRY(flags, base, limit) \
+ ((((base) & _AC(0xff000000,ULL)) << (56-24)) | \
+ (((flags) & _AC(0x0000f0ff,ULL)) << 40) | \
+ (((limit) & _AC(0x000f0000,ULL)) << (48-16)) | \
+ (((base) & _AC(0x00ffffff,ULL)) << 16) | \
+ (((limit) & _AC(0x0000ffff,ULL))))
+
+/* Simple and small GDT entries for booting only: */
+
+#define GDT_ENTRY_BOOT_CS 2
+#define GDT_ENTRY_BOOT_DS 3
+#define GDT_ENTRY_BOOT_TSS 4
+#define __BOOT_CS (GDT_ENTRY_BOOT_CS*8)
+#define __BOOT_DS (GDT_ENTRY_BOOT_DS*8)
+#define __BOOT_TSS (GDT_ENTRY_BOOT_TSS*8)
+
+/*
+ * Bottom two bits of selector give the ring
+ * privilege level
+ */
+#define SEGMENT_RPL_MASK 0x3
+
+/* User mode is privilege level 3: */
+#define USER_RPL 0x3
+
+/* Bit 2 is Table Indicator (TI): selects between LDT or GDT */
+#define SEGMENT_TI_MASK 0x4
+/* LDT segment has TI set ... */
+#define SEGMENT_LDT 0x4
+/* ... GDT has it cleared */
+#define SEGMENT_GDT 0x0
+
+#define GDT_ENTRY_INVALID_SEG 0
+
+#ifdef CONFIG_X86_32
+/*
+ * The layout of the per-CPU GDT under Linux:
+ *
+ * 0 - null <=== cacheline #1
+ * 1 - reserved
+ * 2 - reserved
+ * 3 - reserved
+ *
+ * 4 - unused <=== cacheline #2
+ * 5 - unused
+ *
+ * ------- start of TLS (Thread-Local Storage) segments:
+ *
+ * 6 - TLS segment #1 [ glibc's TLS segment ]
+ * 7 - TLS segment #2 [ Wine's %fs Win32 segment ]
+ * 8 - TLS segment #3 <=== cacheline #3
+ * 9 - reserved
+ * 10 - reserved
+ * 11 - reserved
+ *
+ * ------- start of kernel segments:
+ *
+ * 12 - kernel code segment <=== cacheline #4
+ * 13 - kernel data segment
+ * 14 - default user CS
+ * 15 - default user DS
+ * 16 - TSS <=== cacheline #5
+ * 17 - LDT
+ * 18 - PNPBIOS support (16->32 gate)
+ * 19 - PNPBIOS support
+ * 20 - PNPBIOS support <=== cacheline #6
+ * 21 - PNPBIOS support
+ * 22 - PNPBIOS support
+ * 23 - APM BIOS support
+ * 24 - APM BIOS support <=== cacheline #7
+ * 25 - APM BIOS support
+ *
+ * 26 - ESPFIX small SS
+ * 27 - per-cpu [ offset to per-cpu data area ]
+ * 28 - stack_canary-20 [ for stack protector ] <=== cacheline #8
+ * 29 - unused
+ * 30 - unused
+ * 31 - TSS for double fault handler
+ */
+#define GDT_ENTRY_TLS_MIN 6
+#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
+
+#define GDT_ENTRY_KERNEL_CS 12
+#define GDT_ENTRY_KERNEL_DS 13
+#define GDT_ENTRY_DEFAULT_USER_CS 14
+#define GDT_ENTRY_DEFAULT_USER_DS 15
+#define GDT_ENTRY_TSS 16
+#define GDT_ENTRY_LDT 17
+#define GDT_ENTRY_PNPBIOS_CS32 18
+#define GDT_ENTRY_PNPBIOS_CS16 19
+#define GDT_ENTRY_PNPBIOS_DS 20
+#define GDT_ENTRY_PNPBIOS_TS1 21
+#define GDT_ENTRY_PNPBIOS_TS2 22
+#define GDT_ENTRY_APMBIOS_BASE 23
+
+#define GDT_ENTRY_ESPFIX_SS 26
+#define GDT_ENTRY_PERCPU 27
+#define GDT_ENTRY_STACK_CANARY 28
+
+#define GDT_ENTRY_DOUBLEFAULT_TSS 31
+
+/*
+ * Number of entries in the GDT table:
+ */
+#define GDT_ENTRIES 32
+
+/*
+ * Segment selector values corresponding to the above entries:
+ */
+
+#define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
+#define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
+#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8 + 3)
+#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8 + 3)
+#define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS*8)
+
+/* segment for calling fn: */
+#define PNP_CS32 (GDT_ENTRY_PNPBIOS_CS32*8)
+/* code segment for BIOS: */
+#define PNP_CS16 (GDT_ENTRY_PNPBIOS_CS16*8)
+
+/* "Is this PNP code selector (PNP_CS32 or PNP_CS16)?" */
+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == PNP_CS32)
+
+/* data segment for BIOS: */
+#define PNP_DS (GDT_ENTRY_PNPBIOS_DS*8)
+/* transfer data segment: */
+#define PNP_TS1 (GDT_ENTRY_PNPBIOS_TS1*8)
+/* another data segment: */
+#define PNP_TS2 (GDT_ENTRY_PNPBIOS_TS2*8)
+
+#ifdef CONFIG_SMP
+# define __KERNEL_PERCPU (GDT_ENTRY_PERCPU*8)
+#else
+# define __KERNEL_PERCPU 0
+#endif
+
+#ifdef CONFIG_CC_STACKPROTECTOR
+# define __KERNEL_STACK_CANARY (GDT_ENTRY_STACK_CANARY*8)
+#else
+# define __KERNEL_STACK_CANARY 0
+#endif
+
+#else /* 64-bit: */
+
+#include <asm/cache.h>
+
+#define GDT_ENTRY_KERNEL32_CS 1
+#define GDT_ENTRY_KERNEL_CS 2
+#define GDT_ENTRY_KERNEL_DS 3
+
+/*
+ * We cannot use the same code segment descriptor for user and kernel mode,
+ * not even in long flat mode, because of different DPL.
+ *
+ * GDT layout to get 64-bit SYSCALL/SYSRET support right. SYSRET hardcodes
+ * selectors:
+ *
+ * if returning to 32-bit userspace: cs = STAR.SYSRET_CS,
+ * if returning to 64-bit userspace: cs = STAR.SYSRET_CS+16,
+ *
+ * ss = STAR.SYSRET_CS+8 (in either case)
+ *
+ * thus USER_DS should be between 32-bit and 64-bit code selectors:
+ */
+#define GDT_ENTRY_DEFAULT_USER32_CS 4
+#define GDT_ENTRY_DEFAULT_USER_DS 5
+#define GDT_ENTRY_DEFAULT_USER_CS 6
+
+/* Needs two entries */
+#define GDT_ENTRY_TSS 8
+/* Needs two entries */
+#define GDT_ENTRY_LDT 10
+
+#define GDT_ENTRY_TLS_MIN 12
+#define GDT_ENTRY_TLS_MAX 14
+
+/* Abused to load per CPU data from limit */
+#define GDT_ENTRY_PER_CPU 15
+
+/*
+ * Number of entries in the GDT table:
+ */
+#define GDT_ENTRIES 16
+
+/*
+ * Segment selector values corresponding to the above entries:
+ *
+ * Note, selectors also need to have a correct RPL,
+ * expressed with the +3 value for user-space selectors:
+ */
+#define __KERNEL32_CS (GDT_ENTRY_KERNEL32_CS*8)
+#define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
+#define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
+#define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8 + 3)
+#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8 + 3)
+#define __USER32_DS __USER_DS
+#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8 + 3)
+#define __PER_CPU_SEG (GDT_ENTRY_PER_CPU*8 + 3)
+
+/* TLS indexes for 64-bit - hardcoded in arch_prctl(): */
+#define FS_TLS 0
+#define GS_TLS 1
+
+#define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
+#define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
+
+#endif
+
+#ifndef CONFIG_PARAVIRT
+# define get_kernel_rpl() 0
+#endif
+
+#define IDT_ENTRIES 256
+#define NUM_EXCEPTION_VECTORS 32
+
+/* Bitmask of exception vectors which push an error code on the stack: */
+#define EXCEPTION_ERRCODE_MASK 0x00027d00
+
+#define GDT_SIZE (GDT_ENTRIES*8)
+#define GDT_ENTRY_TLS_ENTRIES 3
+#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES* 8)
+
+#ifdef __KERNEL__
+
+/*
+ * early_idt_handler_array is an array of entry points referenced in the
+ * early IDT. For simplicity, it's a real array with one entry point
+ * every nine bytes. That leaves room for an optional 'push $0' if the
+ * vector has no error code (two bytes), a 'push $vector_number' (two
+ * bytes), and a jump to the common entry code (up to five bytes).
+ */
+#define EARLY_IDT_HANDLER_SIZE 9
+
+#ifndef __ASSEMBLY__
+
+extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE];
+#ifdef CONFIG_TRACING
+# define trace_early_idt_handler_array early_idt_handler_array
+#endif
+
+/*
+ * Load a segment. Fall back on loading the zero
+ * segment if something goes wrong..
+ */
+#define loadsegment(seg, value) \
+do { \
+ unsigned short __val = (value); \
+ \
+ asm volatile(" \n" \
+ "1: movl %k0,%%" #seg " \n" \
+ \
+ ".section .fixup,\"ax\" \n" \
+ "2: xorl %k0,%k0 \n" \
+ " jmp 1b \n" \
+ ".previous \n" \
+ \
+ _ASM_EXTABLE(1b, 2b) \
+ \
+ : "+r" (__val) : : "memory"); \
+} while (0)
+
+/*
+ * Save a segment register away:
+ */
+#define savesegment(seg, value) \
+ asm("mov %%" #seg ",%0":"=r" (value) : : "memory")
+
+/*
+ * x86-32 user GS accessors:
+ */
+#ifdef CONFIG_X86_32
+# ifdef CONFIG_X86_32_LAZY_GS
+# define get_user_gs(regs) (u16)({ unsigned long v; savesegment(gs, v); v; })
+# define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v))
+# define task_user_gs(tsk) ((tsk)->thread.gs)
+# define lazy_save_gs(v) savesegment(gs, (v))
+# define lazy_load_gs(v) loadsegment(gs, (v))
+# else /* X86_32_LAZY_GS */
+# define get_user_gs(regs) (u16)((regs)->gs)
+# define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0)
+# define task_user_gs(tsk) (task_pt_regs(tsk)->gs)
+# define lazy_save_gs(v) do { } while (0)
+# define lazy_load_gs(v) do { } while (0)
+# endif /* X86_32_LAZY_GS */
+#endif /* X86_32 */
+
+#endif /* !__ASSEMBLY__ */
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_X86_SEGMENT_H */
diff --git a/kernel/arch/x86/include/asm/serial.h b/kernel/arch/x86/include/asm/serial.h
new file mode 100644
index 000000000..8378b8c91
--- /dev/null
+++ b/kernel/arch/x86/include/asm/serial.h
@@ -0,0 +1,29 @@
+#ifndef _ASM_X86_SERIAL_H
+#define _ASM_X86_SERIAL_H
+
+/*
+ * This assumes you have a 1.8432 MHz clock for your UART.
+ *
+ * It'd be nice if someone built a serial card with a 24.576 MHz
+ * clock, since the 16550A is capable of handling a top speed of 1.5
+ * megabits/second; but this requires a faster clock.
+ */
+#define BASE_BAUD (1843200/16)
+
+/* Standard COM flags (except for COM4, because of the 8514 problem) */
+#ifdef CONFIG_SERIAL_DETECT_IRQ
+# define STD_COMX_FLAGS (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_AUTO_IRQ)
+# define STD_COM4_FLAGS (UPF_BOOT_AUTOCONF | 0 | UPF_AUTO_IRQ)
+#else
+# define STD_COMX_FLAGS (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | 0 )
+# define STD_COM4_FLAGS (UPF_BOOT_AUTOCONF | 0 | 0 )
+#endif
+
+#define SERIAL_PORT_DFNS \
+ /* UART CLK PORT IRQ FLAGS */ \
+ { .uart = 0, BASE_BAUD, 0x3F8, 4, STD_COMX_FLAGS }, /* ttyS0 */ \
+ { .uart = 0, BASE_BAUD, 0x2F8, 3, STD_COMX_FLAGS }, /* ttyS1 */ \
+ { .uart = 0, BASE_BAUD, 0x3E8, 4, STD_COMX_FLAGS }, /* ttyS2 */ \
+ { .uart = 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */
+
+#endif /* _ASM_X86_SERIAL_H */
diff --git a/kernel/arch/x86/include/asm/setup.h b/kernel/arch/x86/include/asm/setup.h
new file mode 100644
index 000000000..f69e06b28
--- /dev/null
+++ b/kernel/arch/x86/include/asm/setup.h
@@ -0,0 +1,131 @@
+#ifndef _ASM_X86_SETUP_H
+#define _ASM_X86_SETUP_H
+
+#include <uapi/asm/setup.h>
+
+#define COMMAND_LINE_SIZE 2048
+
+#include <linux/linkage.h>
+
+#ifdef __i386__
+
+#include <linux/pfn.h>
+/*
+ * Reserved space for vmalloc and iomap - defined in asm/page.h
+ */
+#define MAXMEM_PFN PFN_DOWN(MAXMEM)
+#define MAX_NONPAE_PFN (1 << 20)
+
+#endif /* __i386__ */
+
+#define PARAM_SIZE 4096 /* sizeof(struct boot_params) */
+
+#define OLD_CL_MAGIC 0xA33F
+#define OLD_CL_ADDRESS 0x020 /* Relative to real mode data */
+#define NEW_CL_POINTER 0x228 /* Relative to real mode data */
+
+#ifndef __ASSEMBLY__
+#include <asm/bootparam.h>
+#include <asm/x86_init.h>
+
+extern u64 relocated_ramdisk;
+
+/* Interrupt control for vSMPowered x86_64 systems */
+#ifdef CONFIG_X86_64
+void vsmp_init(void);
+#else
+static inline void vsmp_init(void) { }
+#endif
+
+void setup_bios_corruption_check(void);
+
+extern unsigned long saved_video_mode;
+
+extern void reserve_standard_io_resources(void);
+extern void i386_reserve_resources(void);
+extern void setup_default_timer_irq(void);
+
+#ifdef CONFIG_X86_INTEL_MID
+extern void x86_intel_mid_early_setup(void);
+#else
+static inline void x86_intel_mid_early_setup(void) { }
+#endif
+
+#ifdef CONFIG_X86_INTEL_CE
+extern void x86_ce4100_early_setup(void);
+#else
+static inline void x86_ce4100_early_setup(void) { }
+#endif
+
+#ifndef _SETUP
+
+#include <asm/espfix.h>
+
+/*
+ * This is set up by the setup-routine at boot-time
+ */
+extern struct boot_params boot_params;
+
+static inline bool kaslr_enabled(void)
+{
+ return !!(boot_params.hdr.loadflags & KASLR_FLAG);
+}
+
+/*
+ * Do NOT EVER look at the BIOS memory size location.
+ * It does not work on many machines.
+ */
+#define LOWMEMSIZE() (0x9f000)
+
+/* exceedingly early brk-like allocator */
+extern unsigned long _brk_end;
+void *extend_brk(size_t size, size_t align);
+
+/*
+ * Reserve space in the brk section. The name must be unique within
+ * the file, and somewhat descriptive. The size is in bytes. Must be
+ * used at file scope.
+ *
+ * (This uses a temp function to wrap the asm so we can pass it the
+ * size parameter; otherwise we wouldn't be able to. We can't use a
+ * "section" attribute on a normal variable because it always ends up
+ * being @progbits, which ends up allocating space in the vmlinux
+ * executable.)
+ */
+#define RESERVE_BRK(name,sz) \
+ static void __section(.discard.text) __used notrace \
+ __brk_reservation_fn_##name##__(void) { \
+ asm volatile ( \
+ ".pushsection .brk_reservation,\"aw\",@nobits;" \
+ ".brk." #name ":" \
+ " 1:.skip %c0;" \
+ " .size .brk." #name ", . - 1b;" \
+ " .popsection" \
+ : : "i" (sz)); \
+ }
+
+/* Helper for reserving space for arrays of things */
+#define RESERVE_BRK_ARRAY(type, name, entries) \
+ type *name; \
+ RESERVE_BRK(name, sizeof(type) * entries)
+
+extern void probe_roms(void);
+#ifdef __i386__
+
+asmlinkage void __init i386_start_kernel(void);
+
+#else
+asmlinkage void __init x86_64_start_kernel(char *real_mode);
+asmlinkage void __init x86_64_start_reservations(char *real_mode_data);
+
+#endif /* __i386__ */
+#endif /* _SETUP */
+#else
+#define RESERVE_BRK(name,sz) \
+ .pushsection .brk_reservation,"aw",@nobits; \
+.brk.name: \
+1: .skip sz; \
+ .size .brk.name,.-1b; \
+ .popsection
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_X86_SETUP_H */
diff --git a/kernel/arch/x86/include/asm/setup_arch.h b/kernel/arch/x86/include/asm/setup_arch.h
new file mode 100644
index 000000000..38846208b
--- /dev/null
+++ b/kernel/arch/x86/include/asm/setup_arch.h
@@ -0,0 +1,3 @@
+/* Hook to call BIOS initialisation function */
+
+/* no action for generic */
diff --git a/kernel/arch/x86/include/asm/shmparam.h b/kernel/arch/x86/include/asm/shmparam.h
new file mode 100644
index 000000000..0880cf091
--- /dev/null
+++ b/kernel/arch/x86/include/asm/shmparam.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_X86_SHMPARAM_H
+#define _ASM_X86_SHMPARAM_H
+
+#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
+
+#endif /* _ASM_X86_SHMPARAM_H */
diff --git a/kernel/arch/x86/include/asm/sigcontext.h b/kernel/arch/x86/include/asm/sigcontext.h
new file mode 100644
index 000000000..6fe6b182c
--- /dev/null
+++ b/kernel/arch/x86/include/asm/sigcontext.h
@@ -0,0 +1,79 @@
+#ifndef _ASM_X86_SIGCONTEXT_H
+#define _ASM_X86_SIGCONTEXT_H
+
+#include <uapi/asm/sigcontext.h>
+
+#ifdef __i386__
+struct sigcontext {
+ unsigned short gs, __gsh;
+ unsigned short fs, __fsh;
+ unsigned short es, __esh;
+ unsigned short ds, __dsh;
+ unsigned long di;
+ unsigned long si;
+ unsigned long bp;
+ unsigned long sp;
+ unsigned long bx;
+ unsigned long dx;
+ unsigned long cx;
+ unsigned long ax;
+ unsigned long trapno;
+ unsigned long err;
+ unsigned long ip;
+ unsigned short cs, __csh;
+ unsigned long flags;
+ unsigned long sp_at_signal;
+ unsigned short ss, __ssh;
+
+ /*
+ * fpstate is really (struct _fpstate *) or (struct _xstate *)
+ * depending on the FP_XSTATE_MAGIC1 encoded in the SW reserved
+ * bytes of (struct _fpstate) and FP_XSTATE_MAGIC2 present at the end
+ * of extended memory layout. See comments at the definition of
+ * (struct _fpx_sw_bytes)
+ */
+ void __user *fpstate; /* zero when no FPU/extended context */
+ unsigned long oldmask;
+ unsigned long cr2;
+};
+#else /* __i386__ */
+struct sigcontext {
+ unsigned long r8;
+ unsigned long r9;
+ unsigned long r10;
+ unsigned long r11;
+ unsigned long r12;
+ unsigned long r13;
+ unsigned long r14;
+ unsigned long r15;
+ unsigned long di;
+ unsigned long si;
+ unsigned long bp;
+ unsigned long bx;
+ unsigned long dx;
+ unsigned long ax;
+ unsigned long cx;
+ unsigned long sp;
+ unsigned long ip;
+ unsigned long flags;
+ unsigned short cs;
+ unsigned short __pad2; /* Was called gs, but was always zero. */
+ unsigned short __pad1; /* Was called fs, but was always zero. */
+ unsigned short ss;
+ unsigned long err;
+ unsigned long trapno;
+ unsigned long oldmask;
+ unsigned long cr2;
+
+ /*
+ * fpstate is really (struct _fpstate *) or (struct _xstate *)
+ * depending on the FP_XSTATE_MAGIC1 encoded in the SW reserved
+ * bytes of (struct _fpstate) and FP_XSTATE_MAGIC2 present at the end
+ * of extended memory layout. See comments at the definition of
+ * (struct _fpx_sw_bytes)
+ */
+ void __user *fpstate; /* zero when no FPU/extended context */
+ unsigned long reserved1[8];
+};
+#endif /* !__i386__ */
+#endif /* _ASM_X86_SIGCONTEXT_H */
diff --git a/kernel/arch/x86/include/asm/sigframe.h b/kernel/arch/x86/include/asm/sigframe.h
new file mode 100644
index 000000000..7c7c27c97
--- /dev/null
+++ b/kernel/arch/x86/include/asm/sigframe.h
@@ -0,0 +1,83 @@
+#ifndef _ASM_X86_SIGFRAME_H
+#define _ASM_X86_SIGFRAME_H
+
+#include <asm/sigcontext.h>
+#include <asm/siginfo.h>
+#include <asm/ucontext.h>
+
+#ifdef CONFIG_X86_32
+#define sigframe_ia32 sigframe
+#define rt_sigframe_ia32 rt_sigframe
+#define sigcontext_ia32 sigcontext
+#define _fpstate_ia32 _fpstate
+#define ucontext_ia32 ucontext
+#else /* !CONFIG_X86_32 */
+
+#ifdef CONFIG_IA32_EMULATION
+#include <asm/ia32.h>
+#endif /* CONFIG_IA32_EMULATION */
+
+#endif /* CONFIG_X86_32 */
+
+#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
+struct sigframe_ia32 {
+ u32 pretcode;
+ int sig;
+ struct sigcontext_ia32 sc;
+ /*
+ * fpstate is unused. fpstate is moved/allocated after
+ * retcode[] below. This movement allows to have the FP state and the
+ * future state extensions (xsave) stay together.
+ * And at the same time retaining the unused fpstate, prevents changing
+ * the offset of extramask[] in the sigframe and thus prevent any
+ * legacy application accessing/modifying it.
+ */
+ struct _fpstate_ia32 fpstate_unused;
+#ifdef CONFIG_IA32_EMULATION
+ unsigned int extramask[_COMPAT_NSIG_WORDS-1];
+#else /* !CONFIG_IA32_EMULATION */
+ unsigned long extramask[_NSIG_WORDS-1];
+#endif /* CONFIG_IA32_EMULATION */
+ char retcode[8];
+ /* fp state follows here */
+};
+
+struct rt_sigframe_ia32 {
+ u32 pretcode;
+ int sig;
+ u32 pinfo;
+ u32 puc;
+#ifdef CONFIG_IA32_EMULATION
+ compat_siginfo_t info;
+#else /* !CONFIG_IA32_EMULATION */
+ struct siginfo info;
+#endif /* CONFIG_IA32_EMULATION */
+ struct ucontext_ia32 uc;
+ char retcode[8];
+ /* fp state follows here */
+};
+#endif /* defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) */
+
+#ifdef CONFIG_X86_64
+
+struct rt_sigframe {
+ char __user *pretcode;
+ struct ucontext uc;
+ struct siginfo info;
+ /* fp state follows here */
+};
+
+#ifdef CONFIG_X86_X32_ABI
+
+struct rt_sigframe_x32 {
+ u64 pretcode;
+ struct ucontext_x32 uc;
+ compat_siginfo_t info;
+ /* fp state follows here */
+};
+
+#endif /* CONFIG_X86_X32_ABI */
+
+#endif /* CONFIG_X86_64 */
+
+#endif /* _ASM_X86_SIGFRAME_H */
diff --git a/kernel/arch/x86/include/asm/sighandling.h b/kernel/arch/x86/include/asm/sighandling.h
new file mode 100644
index 000000000..89db46752
--- /dev/null
+++ b/kernel/arch/x86/include/asm/sighandling.h
@@ -0,0 +1,20 @@
+#ifndef _ASM_X86_SIGHANDLING_H
+#define _ASM_X86_SIGHANDLING_H
+
+#include <linux/compiler.h>
+#include <linux/ptrace.h>
+#include <linux/signal.h>
+
+#include <asm/processor-flags.h>
+
+#define FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \
+ X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \
+ X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \
+ X86_EFLAGS_CF | X86_EFLAGS_RF)
+
+void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
+int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc);
+int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
+ struct pt_regs *regs, unsigned long mask);
+
+#endif /* _ASM_X86_SIGHANDLING_H */
diff --git a/kernel/arch/x86/include/asm/signal.h b/kernel/arch/x86/include/asm/signal.h
new file mode 100644
index 000000000..b1b08a28c
--- /dev/null
+++ b/kernel/arch/x86/include/asm/signal.h
@@ -0,0 +1,117 @@
+#ifndef _ASM_X86_SIGNAL_H
+#define _ASM_X86_SIGNAL_H
+
+#ifndef __ASSEMBLY__
+#include <linux/linkage.h>
+
+/* Most things should be clean enough to redefine this at will, if care
+ is taken to make libc match. */
+
+#define _NSIG 64
+
+#ifdef __i386__
+# define _NSIG_BPW 32
+#else
+# define _NSIG_BPW 64
+#endif
+
+#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
+
+typedef unsigned long old_sigset_t; /* at least 32 bits */
+
+typedef struct {
+ unsigned long sig[_NSIG_WORDS];
+} sigset_t;
+
+/*
+ * Because some traps use the IST stack, we must keep preemption
+ * disabled while calling do_trap(), but do_trap() may call
+ * force_sig_info() which will grab the signal spin_locks for the
+ * task, which in PREEMPT_RT_FULL are mutexes. By defining
+ * ARCH_RT_DELAYS_SIGNAL_SEND the force_sig_info() will set
+ * TIF_NOTIFY_RESUME and set up the signal to be sent on exit of the
+ * trap.
+ */
+#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_X86_64)
+#define ARCH_RT_DELAYS_SIGNAL_SEND
+#endif
+
+#ifndef CONFIG_COMPAT
+typedef sigset_t compat_sigset_t;
+#endif
+
+#endif /* __ASSEMBLY__ */
+#include <uapi/asm/signal.h>
+#ifndef __ASSEMBLY__
+extern void do_notify_resume(struct pt_regs *, void *, __u32);
+
+#define __ARCH_HAS_SA_RESTORER
+
+#include <asm/sigcontext.h>
+
+#ifdef __i386__
+
+#define __HAVE_ARCH_SIG_BITOPS
+
+#define sigaddset(set,sig) \
+ (__builtin_constant_p(sig) \
+ ? __const_sigaddset((set), (sig)) \
+ : __gen_sigaddset((set), (sig)))
+
+static inline void __gen_sigaddset(sigset_t *set, int _sig)
+{
+ asm("btsl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc");
+}
+
+static inline void __const_sigaddset(sigset_t *set, int _sig)
+{
+ unsigned long sig = _sig - 1;
+ set->sig[sig / _NSIG_BPW] |= 1 << (sig % _NSIG_BPW);
+}
+
+#define sigdelset(set, sig) \
+ (__builtin_constant_p(sig) \
+ ? __const_sigdelset((set), (sig)) \
+ : __gen_sigdelset((set), (sig)))
+
+
+static inline void __gen_sigdelset(sigset_t *set, int _sig)
+{
+ asm("btrl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc");
+}
+
+static inline void __const_sigdelset(sigset_t *set, int _sig)
+{
+ unsigned long sig = _sig - 1;
+ set->sig[sig / _NSIG_BPW] &= ~(1 << (sig % _NSIG_BPW));
+}
+
+static inline int __const_sigismember(sigset_t *set, int _sig)
+{
+ unsigned long sig = _sig - 1;
+ return 1 & (set->sig[sig / _NSIG_BPW] >> (sig % _NSIG_BPW));
+}
+
+static inline int __gen_sigismember(sigset_t *set, int _sig)
+{
+ int ret;
+ asm("btl %2,%1\n\tsbbl %0,%0"
+ : "=r"(ret) : "m"(*set), "Ir"(_sig-1) : "cc");
+ return ret;
+}
+
+#define sigismember(set, sig) \
+ (__builtin_constant_p(sig) \
+ ? __const_sigismember((set), (sig)) \
+ : __gen_sigismember((set), (sig)))
+
+struct pt_regs;
+
+#else /* __i386__ */
+
+#undef __HAVE_ARCH_SIG_BITOPS
+
+#endif /* !__i386__ */
+
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_X86_SIGNAL_H */
diff --git a/kernel/arch/x86/include/asm/simd.h b/kernel/arch/x86/include/asm/simd.h
new file mode 100644
index 000000000..ee80b92f0
--- /dev/null
+++ b/kernel/arch/x86/include/asm/simd.h
@@ -0,0 +1,11 @@
+
+#include <asm/i387.h>
+
+/*
+ * may_use_simd - whether it is allowable at this time to issue SIMD
+ * instructions or access the SIMD register file
+ */
+static __must_check inline bool may_use_simd(void)
+{
+ return irq_fpu_usable();
+}
diff --git a/kernel/arch/x86/include/asm/smap.h b/kernel/arch/x86/include/asm/smap.h
new file mode 100644
index 000000000..ba665ebd1
--- /dev/null
+++ b/kernel/arch/x86/include/asm/smap.h
@@ -0,0 +1,79 @@
+/*
+ * Supervisor Mode Access Prevention support
+ *
+ * Copyright (C) 2012 Intel Corporation
+ * Author: H. Peter Anvin <hpa@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#ifndef _ASM_X86_SMAP_H
+#define _ASM_X86_SMAP_H
+
+#include <linux/stringify.h>
+#include <asm/nops.h>
+#include <asm/cpufeature.h>
+
+/* "Raw" instruction opcodes */
+#define __ASM_CLAC .byte 0x0f,0x01,0xca
+#define __ASM_STAC .byte 0x0f,0x01,0xcb
+
+#ifdef __ASSEMBLY__
+
+#include <asm/alternative-asm.h>
+
+#ifdef CONFIG_X86_SMAP
+
+#define ASM_CLAC \
+ ALTERNATIVE "", __stringify(__ASM_CLAC), X86_FEATURE_SMAP
+
+#define ASM_STAC \
+ ALTERNATIVE "", __stringify(__ASM_STAC), X86_FEATURE_SMAP
+
+#else /* CONFIG_X86_SMAP */
+
+#define ASM_CLAC
+#define ASM_STAC
+
+#endif /* CONFIG_X86_SMAP */
+
+#else /* __ASSEMBLY__ */
+
+#include <asm/alternative.h>
+
+#ifdef CONFIG_X86_SMAP
+
+static __always_inline void clac(void)
+{
+ /* Note: a barrier is implicit in alternative() */
+ alternative("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP);
+}
+
+static __always_inline void stac(void)
+{
+ /* Note: a barrier is implicit in alternative() */
+ alternative("", __stringify(__ASM_STAC), X86_FEATURE_SMAP);
+}
+
+/* These macros can be used in asm() statements */
+#define ASM_CLAC \
+ ALTERNATIVE("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP)
+#define ASM_STAC \
+ ALTERNATIVE("", __stringify(__ASM_STAC), X86_FEATURE_SMAP)
+
+#else /* CONFIG_X86_SMAP */
+
+static inline void clac(void) { }
+static inline void stac(void) { }
+
+#define ASM_CLAC
+#define ASM_STAC
+
+#endif /* CONFIG_X86_SMAP */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_X86_SMAP_H */
diff --git a/kernel/arch/x86/include/asm/smp.h b/kernel/arch/x86/include/asm/smp.h
new file mode 100644
index 000000000..17a8dced1
--- /dev/null
+++ b/kernel/arch/x86/include/asm/smp.h
@@ -0,0 +1,235 @@
+#ifndef _ASM_X86_SMP_H
+#define _ASM_X86_SMP_H
+#ifndef __ASSEMBLY__
+#include <linux/cpumask.h>
+#include <asm/percpu.h>
+
+/*
+ * We need the APIC definitions automatically as part of 'smp.h'
+ */
+#ifdef CONFIG_X86_LOCAL_APIC
+# include <asm/mpspec.h>
+# include <asm/apic.h>
+# ifdef CONFIG_X86_IO_APIC
+# include <asm/io_apic.h>
+# endif
+#endif
+#include <asm/thread_info.h>
+#include <asm/cpumask.h>
+#include <asm/cpufeature.h>
+
+extern int smp_num_siblings;
+extern unsigned int num_processors;
+
+static inline bool cpu_has_ht_siblings(void)
+{
+ bool has_siblings = false;
+#ifdef CONFIG_SMP
+ has_siblings = cpu_has_ht && smp_num_siblings > 1;
+#endif
+ return has_siblings;
+}
+
+DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
+DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
+/* cpus sharing the last level cache: */
+DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
+DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
+DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
+
+static inline struct cpumask *cpu_sibling_mask(int cpu)
+{
+ return per_cpu(cpu_sibling_map, cpu);
+}
+
+static inline struct cpumask *cpu_core_mask(int cpu)
+{
+ return per_cpu(cpu_core_map, cpu);
+}
+
+static inline struct cpumask *cpu_llc_shared_mask(int cpu)
+{
+ return per_cpu(cpu_llc_shared_map, cpu);
+}
+
+DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid);
+DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid);
+#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
+DECLARE_EARLY_PER_CPU_READ_MOSTLY(int, x86_cpu_to_logical_apicid);
+#endif
+
+/* Static state in head.S used to set up a CPU */
+extern unsigned long stack_start; /* Initial stack pointer address */
+
+struct task_struct;
+
+struct smp_ops {
+ void (*smp_prepare_boot_cpu)(void);
+ void (*smp_prepare_cpus)(unsigned max_cpus);
+ void (*smp_cpus_done)(unsigned max_cpus);
+
+ void (*stop_other_cpus)(int wait);
+ void (*smp_send_reschedule)(int cpu);
+
+ int (*cpu_up)(unsigned cpu, struct task_struct *tidle);
+ int (*cpu_disable)(void);
+ void (*cpu_die)(unsigned int cpu);
+ void (*play_dead)(void);
+
+ void (*send_call_func_ipi)(const struct cpumask *mask);
+ void (*send_call_func_single_ipi)(int cpu);
+};
+
+/* Globals due to paravirt */
+extern void set_cpu_sibling_map(int cpu);
+
+#ifdef CONFIG_SMP
+#ifndef CONFIG_PARAVIRT
+#define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0)
+#endif
+extern struct smp_ops smp_ops;
+
+static inline void smp_send_stop(void)
+{
+ smp_ops.stop_other_cpus(0);
+}
+
+static inline void stop_other_cpus(void)
+{
+ smp_ops.stop_other_cpus(1);
+}
+
+static inline void smp_prepare_boot_cpu(void)
+{
+ smp_ops.smp_prepare_boot_cpu();
+}
+
+static inline void smp_prepare_cpus(unsigned int max_cpus)
+{
+ smp_ops.smp_prepare_cpus(max_cpus);
+}
+
+static inline void smp_cpus_done(unsigned int max_cpus)
+{
+ smp_ops.smp_cpus_done(max_cpus);
+}
+
+static inline int __cpu_up(unsigned int cpu, struct task_struct *tidle)
+{
+ return smp_ops.cpu_up(cpu, tidle);
+}
+
+static inline int __cpu_disable(void)
+{
+ return smp_ops.cpu_disable();
+}
+
+static inline void __cpu_die(unsigned int cpu)
+{
+ smp_ops.cpu_die(cpu);
+}
+
+static inline void play_dead(void)
+{
+ smp_ops.play_dead();
+}
+
+static inline void smp_send_reschedule(int cpu)
+{
+ smp_ops.smp_send_reschedule(cpu);
+}
+
+static inline void arch_send_call_function_single_ipi(int cpu)
+{
+ smp_ops.send_call_func_single_ipi(cpu);
+}
+
+static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+ smp_ops.send_call_func_ipi(mask);
+}
+
+void cpu_disable_common(void);
+void native_smp_prepare_boot_cpu(void);
+void native_smp_prepare_cpus(unsigned int max_cpus);
+void native_smp_cpus_done(unsigned int max_cpus);
+void common_cpu_up(unsigned int cpunum, struct task_struct *tidle);
+int native_cpu_up(unsigned int cpunum, struct task_struct *tidle);
+int native_cpu_disable(void);
+int common_cpu_die(unsigned int cpu);
+void native_cpu_die(unsigned int cpu);
+void native_play_dead(void);
+void play_dead_common(void);
+void wbinvd_on_cpu(int cpu);
+int wbinvd_on_all_cpus(void);
+
+void native_send_call_func_ipi(const struct cpumask *mask);
+void native_send_call_func_single_ipi(int cpu);
+void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle);
+
+void smp_store_boot_cpu_info(void);
+void smp_store_cpu_info(int id);
+#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
+
+#else /* !CONFIG_SMP */
+#define wbinvd_on_cpu(cpu) wbinvd()
+static inline int wbinvd_on_all_cpus(void)
+{
+ wbinvd();
+ return 0;
+}
+#endif /* CONFIG_SMP */
+
+extern unsigned disabled_cpus;
+
+#ifdef CONFIG_X86_32_SMP
+/*
+ * This function is needed by all SMP systems. It must _always_ be valid
+ * from the initial startup. We map APIC_BASE very early in page_setup(),
+ * so this is correct in the x86 case.
+ */
+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
+extern int safe_smp_processor_id(void);
+
+#elif defined(CONFIG_X86_64_SMP)
+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
+
+#define stack_smp_processor_id() \
+({ \
+ struct thread_info *ti; \
+ __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
+ ti->cpu; \
+})
+#define safe_smp_processor_id() smp_processor_id()
+
+#endif
+
+#ifdef CONFIG_X86_LOCAL_APIC
+
+#ifndef CONFIG_X86_64
+static inline int logical_smp_processor_id(void)
+{
+ /* we don't want to mark this access volatile - bad code generation */
+ return GET_APIC_LOGICAL_ID(apic_read(APIC_LDR));
+}
+
+#endif
+
+extern int hard_smp_processor_id(void);
+
+#else /* CONFIG_X86_LOCAL_APIC */
+
+# ifndef CONFIG_SMP
+# define hard_smp_processor_id() 0
+# endif
+
+#endif /* CONFIG_X86_LOCAL_APIC */
+
+#ifdef CONFIG_DEBUG_NMI_SELFTEST
+extern void nmi_selftest(void);
+#else
+#define nmi_selftest() do { } while (0)
+#endif
+
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_X86_SMP_H */
diff --git a/kernel/arch/x86/include/asm/sparsemem.h b/kernel/arch/x86/include/asm/sparsemem.h
new file mode 100644
index 000000000..4517d6b93
--- /dev/null
+++ b/kernel/arch/x86/include/asm/sparsemem.h
@@ -0,0 +1,34 @@
+#ifndef _ASM_X86_SPARSEMEM_H
+#define _ASM_X86_SPARSEMEM_H
+
+#ifdef CONFIG_SPARSEMEM
+/*
+ * generic non-linear memory support:
+ *
+ * 1) we will not split memory into more chunks than will fit into the flags
+ * field of the struct page
+ *
+ * SECTION_SIZE_BITS 2^n: size of each section
+ * MAX_PHYSADDR_BITS 2^n: max size of physical address space
+ * MAX_PHYSMEM_BITS 2^n: how much memory we can have in that space
+ *
+ */
+
+#ifdef CONFIG_X86_32
+# ifdef CONFIG_X86_PAE
+# define SECTION_SIZE_BITS 29
+# define MAX_PHYSADDR_BITS 36
+# define MAX_PHYSMEM_BITS 36
+# else
+# define SECTION_SIZE_BITS 26
+# define MAX_PHYSADDR_BITS 32
+# define MAX_PHYSMEM_BITS 32
+# endif
+#else /* CONFIG_X86_32 */
+# define SECTION_SIZE_BITS 27 /* matt - 128 is convenient right now */
+# define MAX_PHYSADDR_BITS 44
+# define MAX_PHYSMEM_BITS 46
+#endif
+
+#endif /* CONFIG_SPARSEMEM */
+#endif /* _ASM_X86_SPARSEMEM_H */
diff --git a/kernel/arch/x86/include/asm/special_insns.h b/kernel/arch/x86/include/asm/special_insns.h
new file mode 100644
index 000000000..aeb4666e0
--- /dev/null
+++ b/kernel/arch/x86/include/asm/special_insns.h
@@ -0,0 +1,231 @@
+#ifndef _ASM_X86_SPECIAL_INSNS_H
+#define _ASM_X86_SPECIAL_INSNS_H
+
+
+#ifdef __KERNEL__
+
+#include <asm/nops.h>
+
+static inline void native_clts(void)
+{
+ asm volatile("clts");
+}
+
+/*
+ * Volatile isn't enough to prevent the compiler from reordering the
+ * read/write functions for the control registers and messing everything up.
+ * A memory clobber would solve the problem, but would prevent reordering of
+ * all loads stores around it, which can hurt performance. Solution is to
+ * use a variable and mimic reads and writes to it to enforce serialization
+ */
+extern unsigned long __force_order;
+
+static inline unsigned long native_read_cr0(void)
+{
+ unsigned long val;
+ asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
+ return val;
+}
+
+static inline void native_write_cr0(unsigned long val)
+{
+ asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order));
+}
+
+static inline unsigned long native_read_cr2(void)
+{
+ unsigned long val;
+ asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
+ return val;
+}
+
+static inline void native_write_cr2(unsigned long val)
+{
+ asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
+}
+
+static inline unsigned long native_read_cr3(void)
+{
+ unsigned long val;
+ asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
+ return val;
+}
+
+static inline void native_write_cr3(unsigned long val)
+{
+ asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
+}
+
+static inline unsigned long native_read_cr4(void)
+{
+ unsigned long val;
+ asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
+ return val;
+}
+
+static inline unsigned long native_read_cr4_safe(void)
+{
+ unsigned long val;
+ /* This could fault if %cr4 does not exist. In x86_64, a cr4 always
+ * exists, so it will never fail. */
+#ifdef CONFIG_X86_32
+ asm volatile("1: mov %%cr4, %0\n"
+ "2:\n"
+ _ASM_EXTABLE(1b, 2b)
+ : "=r" (val), "=m" (__force_order) : "0" (0));
+#else
+ val = native_read_cr4();
+#endif
+ return val;
+}
+
+static inline void native_write_cr4(unsigned long val)
+{
+ asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order));
+}
+
+#ifdef CONFIG_X86_64
+static inline unsigned long native_read_cr8(void)
+{
+ unsigned long cr8;
+ asm volatile("movq %%cr8,%0" : "=r" (cr8));
+ return cr8;
+}
+
+static inline void native_write_cr8(unsigned long val)
+{
+ asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
+}
+#endif
+
+static inline void native_wbinvd(void)
+{
+ asm volatile("wbinvd": : :"memory");
+}
+
+extern asmlinkage void native_load_gs_index(unsigned);
+
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+
+static inline unsigned long read_cr0(void)
+{
+ return native_read_cr0();
+}
+
+static inline void write_cr0(unsigned long x)
+{
+ native_write_cr0(x);
+}
+
+static inline unsigned long read_cr2(void)
+{
+ return native_read_cr2();
+}
+
+static inline void write_cr2(unsigned long x)
+{
+ native_write_cr2(x);
+}
+
+static inline unsigned long read_cr3(void)
+{
+ return native_read_cr3();
+}
+
+static inline void write_cr3(unsigned long x)
+{
+ native_write_cr3(x);
+}
+
+static inline unsigned long __read_cr4(void)
+{
+ return native_read_cr4();
+}
+
+static inline unsigned long __read_cr4_safe(void)
+{
+ return native_read_cr4_safe();
+}
+
+static inline void __write_cr4(unsigned long x)
+{
+ native_write_cr4(x);
+}
+
+static inline void wbinvd(void)
+{
+ native_wbinvd();
+}
+
+#ifdef CONFIG_X86_64
+
+static inline unsigned long read_cr8(void)
+{
+ return native_read_cr8();
+}
+
+static inline void write_cr8(unsigned long x)
+{
+ native_write_cr8(x);
+}
+
+static inline void load_gs_index(unsigned selector)
+{
+ native_load_gs_index(selector);
+}
+
+#endif
+
+/* Clear the 'TS' bit */
+static inline void clts(void)
+{
+ native_clts();
+}
+
+#endif/* CONFIG_PARAVIRT */
+
+#define stts() write_cr0(read_cr0() | X86_CR0_TS)
+
+static inline void clflush(volatile void *__p)
+{
+ asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
+}
+
+static inline void clflushopt(volatile void *__p)
+{
+ alternative_io(".byte " __stringify(NOP_DS_PREFIX) "; clflush %P0",
+ ".byte 0x66; clflush %P0",
+ X86_FEATURE_CLFLUSHOPT,
+ "+m" (*(volatile char __force *)__p));
+}
+
+static inline void clwb(volatile void *__p)
+{
+ volatile struct { char x[64]; } *p = __p;
+
+ asm volatile(ALTERNATIVE_2(
+ ".byte " __stringify(NOP_DS_PREFIX) "; clflush (%[pax])",
+ ".byte 0x66; clflush (%[pax])", /* clflushopt (%%rax) */
+ X86_FEATURE_CLFLUSHOPT,
+ ".byte 0x66, 0x0f, 0xae, 0x30", /* clwb (%%rax) */
+ X86_FEATURE_CLWB)
+ : [p] "+m" (*p)
+ : [pax] "a" (p));
+}
+
+static inline void pcommit_sfence(void)
+{
+ alternative(ASM_NOP7,
+ ".byte 0x66, 0x0f, 0xae, 0xf8\n\t" /* pcommit */
+ "sfence",
+ X86_FEATURE_PCOMMIT);
+}
+
+#define nop() asm volatile ("nop")
+
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_X86_SPECIAL_INSNS_H */
diff --git a/kernel/arch/x86/include/asm/spinlock.h b/kernel/arch/x86/include/asm/spinlock.h
new file mode 100644
index 000000000..64b611782
--- /dev/null
+++ b/kernel/arch/x86/include/asm/spinlock.h
@@ -0,0 +1,223 @@
+#ifndef _ASM_X86_SPINLOCK_H
+#define _ASM_X86_SPINLOCK_H
+
+#include <linux/jump_label.h>
+#include <linux/atomic.h>
+#include <asm/page.h>
+#include <asm/processor.h>
+#include <linux/compiler.h>
+#include <asm/paravirt.h>
+#include <asm/bitops.h>
+
+/*
+ * Your basic SMP spinlocks, allowing only a single CPU anywhere
+ *
+ * Simple spin lock operations. There are two variants, one clears IRQ's
+ * on the local processor, one does not.
+ *
+ * These are fair FIFO ticket locks, which support up to 2^16 CPUs.
+ *
+ * (the type definitions are in asm/spinlock_types.h)
+ */
+
+#ifdef CONFIG_X86_32
+# define LOCK_PTR_REG "a"
+#else
+# define LOCK_PTR_REG "D"
+#endif
+
+#if defined(CONFIG_X86_32) && (defined(CONFIG_X86_PPRO_FENCE))
+/*
+ * On PPro SMP, we use a locked operation to unlock
+ * (PPro errata 66, 92)
+ */
+# define UNLOCK_LOCK_PREFIX LOCK_PREFIX
+#else
+# define UNLOCK_LOCK_PREFIX
+#endif
+
+/* How long a lock should spin before we consider blocking */
+#define SPIN_THRESHOLD (1 << 15)
+
+extern struct static_key paravirt_ticketlocks_enabled;
+static __always_inline bool static_key_false(struct static_key *key);
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+
+static inline void __ticket_enter_slowpath(arch_spinlock_t *lock)
+{
+ set_bit(0, (volatile unsigned long *)&lock->tickets.head);
+}
+
+#else /* !CONFIG_PARAVIRT_SPINLOCKS */
+static __always_inline void __ticket_lock_spinning(arch_spinlock_t *lock,
+ __ticket_t ticket)
+{
+}
+static inline void __ticket_unlock_kick(arch_spinlock_t *lock,
+ __ticket_t ticket)
+{
+}
+
+#endif /* CONFIG_PARAVIRT_SPINLOCKS */
+static inline int __tickets_equal(__ticket_t one, __ticket_t two)
+{
+ return !((one ^ two) & ~TICKET_SLOWPATH_FLAG);
+}
+
+static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock,
+ __ticket_t head)
+{
+ if (head & TICKET_SLOWPATH_FLAG) {
+ arch_spinlock_t old, new;
+
+ old.tickets.head = head;
+ new.tickets.head = head & ~TICKET_SLOWPATH_FLAG;
+ old.tickets.tail = new.tickets.head + TICKET_LOCK_INC;
+ new.tickets.tail = old.tickets.tail;
+
+ /* try to clear slowpath flag when there are no contenders */
+ cmpxchg(&lock->head_tail, old.head_tail, new.head_tail);
+ }
+}
+
+static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+ return __tickets_equal(lock.tickets.head, lock.tickets.tail);
+}
+
+/*
+ * Ticket locks are conceptually two parts, one indicating the current head of
+ * the queue, and the other indicating the current tail. The lock is acquired
+ * by atomically noting the tail and incrementing it by one (thus adding
+ * ourself to the queue and noting our position), then waiting until the head
+ * becomes equal to the the initial value of the tail.
+ *
+ * We use an xadd covering *both* parts of the lock, to increment the tail and
+ * also load the position of the head, which takes care of memory ordering
+ * issues and should be optimal for the uncontended case. Note the tail must be
+ * in the high part, because a wide xadd increment of the low part would carry
+ * up and contaminate the high part.
+ */
+static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ register struct __raw_tickets inc = { .tail = TICKET_LOCK_INC };
+
+ inc = xadd(&lock->tickets, inc);
+ if (likely(inc.head == inc.tail))
+ goto out;
+
+ for (;;) {
+ unsigned count = SPIN_THRESHOLD;
+
+ do {
+ inc.head = READ_ONCE(lock->tickets.head);
+ if (__tickets_equal(inc.head, inc.tail))
+ goto clear_slowpath;
+ cpu_relax();
+ } while (--count);
+ __ticket_lock_spinning(lock, inc.tail);
+ }
+clear_slowpath:
+ __ticket_check_and_clear_slowpath(lock, inc.head);
+out:
+ barrier(); /* make sure nothing creeps before the lock is taken */
+}
+
+static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ arch_spinlock_t old, new;
+
+ old.tickets = READ_ONCE(lock->tickets);
+ if (!__tickets_equal(old.tickets.head, old.tickets.tail))
+ return 0;
+
+ new.head_tail = old.head_tail + (TICKET_LOCK_INC << TICKET_SHIFT);
+ new.head_tail &= ~TICKET_SLOWPATH_FLAG;
+
+ /* cmpxchg is a full barrier, so nothing can move before it */
+ return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
+}
+
+static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ if (TICKET_SLOWPATH_FLAG &&
+ static_key_false(&paravirt_ticketlocks_enabled)) {
+ __ticket_t head;
+
+ BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS);
+
+ head = xadd(&lock->tickets.head, TICKET_LOCK_INC);
+
+ if (unlikely(head & TICKET_SLOWPATH_FLAG)) {
+ head &= ~TICKET_SLOWPATH_FLAG;
+ __ticket_unlock_kick(lock, (head + TICKET_LOCK_INC));
+ }
+ } else
+ __add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX);
+}
+
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
+{
+ struct __raw_tickets tmp = READ_ONCE(lock->tickets);
+
+ return !__tickets_equal(tmp.tail, tmp.head);
+}
+
+static inline int arch_spin_is_contended(arch_spinlock_t *lock)
+{
+ struct __raw_tickets tmp = READ_ONCE(lock->tickets);
+
+ tmp.head &= ~TICKET_SLOWPATH_FLAG;
+ return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC;
+}
+#define arch_spin_is_contended arch_spin_is_contended
+
+static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
+ unsigned long flags)
+{
+ arch_spin_lock(lock);
+}
+
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+ __ticket_t head = READ_ONCE(lock->tickets.head);
+
+ for (;;) {
+ struct __raw_tickets tmp = READ_ONCE(lock->tickets);
+ /*
+ * We need to check "unlocked" in a loop, tmp.head == head
+ * can be false positive because of overflow.
+ */
+ if (__tickets_equal(tmp.head, tmp.tail) ||
+ !__tickets_equal(tmp.head, head))
+ break;
+
+ cpu_relax();
+ }
+}
+
+/*
+ * Read-write spinlocks, allowing multiple readers
+ * but only one writer.
+ *
+ * NOTE! it is quite common to have readers in interrupts
+ * but no interrupt writers. For those circumstances we
+ * can "mix" irq-safe locks - any writer needs to get a
+ * irq-safe write-lock, but readers can get non-irqsafe
+ * read-locks.
+ *
+ * On x86, we implement read-write locks using the generic qrwlock with
+ * x86 specific optimization.
+ */
+
+#include <asm/qrwlock.h>
+
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
+
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
+
+#endif /* _ASM_X86_SPINLOCK_H */
diff --git a/kernel/arch/x86/include/asm/spinlock_types.h b/kernel/arch/x86/include/asm/spinlock_types.h
new file mode 100644
index 000000000..5f9d7572d
--- /dev/null
+++ b/kernel/arch/x86/include/asm/spinlock_types.h
@@ -0,0 +1,39 @@
+#ifndef _ASM_X86_SPINLOCK_TYPES_H
+#define _ASM_X86_SPINLOCK_TYPES_H
+
+#include <linux/types.h>
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#define __TICKET_LOCK_INC 2
+#define TICKET_SLOWPATH_FLAG ((__ticket_t)1)
+#else
+#define __TICKET_LOCK_INC 1
+#define TICKET_SLOWPATH_FLAG ((__ticket_t)0)
+#endif
+
+#if (CONFIG_NR_CPUS < (256 / __TICKET_LOCK_INC))
+typedef u8 __ticket_t;
+typedef u16 __ticketpair_t;
+#else
+typedef u16 __ticket_t;
+typedef u32 __ticketpair_t;
+#endif
+
+#define TICKET_LOCK_INC ((__ticket_t)__TICKET_LOCK_INC)
+
+#define TICKET_SHIFT (sizeof(__ticket_t) * 8)
+
+typedef struct arch_spinlock {
+ union {
+ __ticketpair_t head_tail;
+ struct __raw_tickets {
+ __ticket_t head, tail;
+ } tickets;
+ };
+} arch_spinlock_t;
+
+#define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } }
+
+#include <asm-generic/qrwlock_types.h>
+
+#endif /* _ASM_X86_SPINLOCK_TYPES_H */
diff --git a/kernel/arch/x86/include/asm/sta2x11.h b/kernel/arch/x86/include/asm/sta2x11.h
new file mode 100644
index 000000000..e9d32df89
--- /dev/null
+++ b/kernel/arch/x86/include/asm/sta2x11.h
@@ -0,0 +1,12 @@
+/*
+ * Header file for STMicroelectronics ConneXt (STA2X11) IOHub
+ */
+#ifndef __ASM_STA2X11_H
+#define __ASM_STA2X11_H
+
+#include <linux/pci.h>
+
+/* This needs to be called from the MFD to configure its sub-devices */
+struct sta2x11_instance *sta2x11_get_instance(struct pci_dev *pdev);
+
+#endif /* __ASM_STA2X11_H */
diff --git a/kernel/arch/x86/include/asm/stackprotector.h b/kernel/arch/x86/include/asm/stackprotector.h
new file mode 100644
index 000000000..64fb5cbe5
--- /dev/null
+++ b/kernel/arch/x86/include/asm/stackprotector.h
@@ -0,0 +1,129 @@
+/*
+ * GCC stack protector support.
+ *
+ * Stack protector works by putting predefined pattern at the start of
+ * the stack frame and verifying that it hasn't been overwritten when
+ * returning from the function. The pattern is called stack canary
+ * and unfortunately gcc requires it to be at a fixed offset from %gs.
+ * On x86_64, the offset is 40 bytes and on x86_32 20 bytes. x86_64
+ * and x86_32 use segment registers differently and thus handles this
+ * requirement differently.
+ *
+ * On x86_64, %gs is shared by percpu area and stack canary. All
+ * percpu symbols are zero based and %gs points to the base of percpu
+ * area. The first occupant of the percpu area is always
+ * irq_stack_union which contains stack_canary at offset 40. Userland
+ * %gs is always saved and restored on kernel entry and exit using
+ * swapgs, so stack protector doesn't add any complexity there.
+ *
+ * On x86_32, it's slightly more complicated. As in x86_64, %gs is
+ * used for userland TLS. Unfortunately, some processors are much
+ * slower at loading segment registers with different value when
+ * entering and leaving the kernel, so the kernel uses %fs for percpu
+ * area and manages %gs lazily so that %gs is switched only when
+ * necessary, usually during task switch.
+ *
+ * As gcc requires the stack canary at %gs:20, %gs can't be managed
+ * lazily if stack protector is enabled, so the kernel saves and
+ * restores userland %gs on kernel entry and exit. This behavior is
+ * controlled by CONFIG_X86_32_LAZY_GS and accessors are defined in
+ * system.h to hide the details.
+ */
+
+#ifndef _ASM_STACKPROTECTOR_H
+#define _ASM_STACKPROTECTOR_H 1
+
+#ifdef CONFIG_CC_STACKPROTECTOR
+
+#include <asm/tsc.h>
+#include <asm/processor.h>
+#include <asm/percpu.h>
+#include <asm/desc.h>
+#include <linux/random.h>
+
+/*
+ * 24 byte read-only segment initializer for stack canary. Linker
+ * can't handle the address bit shifting. Address will be set in
+ * head_32 for boot CPU and setup_per_cpu_areas() for others.
+ */
+#define GDT_STACK_CANARY_INIT \
+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
+
+/*
+ * Initialize the stackprotector canary value.
+ *
+ * NOTE: this must only be called from functions that never return,
+ * and it must always be inlined.
+ */
+static __always_inline void boot_init_stack_canary(void)
+{
+ u64 uninitialized_var(canary);
+ u64 tsc;
+
+#ifdef CONFIG_X86_64
+ BUILD_BUG_ON(offsetof(union irq_stack_union, stack_canary) != 40);
+#endif
+ /*
+ * We both use the random pool and the current TSC as a source
+ * of randomness. The TSC only matters for very early init,
+ * there it already has some randomness on most systems. Later
+ * on during the bootup the random pool has true entropy too.
+ *
+ * For preempt-rt we need to weaken the randomness a bit, as
+ * we can't call into the random generator from atomic context
+ * due to locking constraints. We just leave canary
+ * uninitialized and use the TSC based randomness on top of
+ * it.
+ */
+#ifndef CONFIG_PREEMPT_RT_FULL
+ get_random_bytes(&canary, sizeof(canary));
+#endif
+ tsc = __native_read_tsc();
+ canary += tsc + (tsc << 32UL);
+
+ current->stack_canary = canary;
+#ifdef CONFIG_X86_64
+ this_cpu_write(irq_stack_union.stack_canary, canary);
+#else
+ this_cpu_write(stack_canary.canary, canary);
+#endif
+}
+
+static inline void setup_stack_canary_segment(int cpu)
+{
+#ifdef CONFIG_X86_32
+ unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu);
+ struct desc_struct *gdt_table = get_cpu_gdt_table(cpu);
+ struct desc_struct desc;
+
+ desc = gdt_table[GDT_ENTRY_STACK_CANARY];
+ set_desc_base(&desc, canary);
+ write_gdt_entry(gdt_table, GDT_ENTRY_STACK_CANARY, &desc, DESCTYPE_S);
+#endif
+}
+
+static inline void load_stack_canary_segment(void)
+{
+#ifdef CONFIG_X86_32
+ asm("mov %0, %%gs" : : "r" (__KERNEL_STACK_CANARY) : "memory");
+#endif
+}
+
+#else /* CC_STACKPROTECTOR */
+
+#define GDT_STACK_CANARY_INIT
+
+/* dummy boot_init_stack_canary() is defined in linux/stackprotector.h */
+
+static inline void setup_stack_canary_segment(int cpu)
+{ }
+
+static inline void load_stack_canary_segment(void)
+{
+#ifdef CONFIG_X86_32
+ asm volatile ("mov %0, %%gs" : : "r" (0));
+#endif
+}
+
+#endif /* CC_STACKPROTECTOR */
+#endif /* _ASM_STACKPROTECTOR_H */
diff --git a/kernel/arch/x86/include/asm/stacktrace.h b/kernel/arch/x86/include/asm/stacktrace.h
new file mode 100644
index 000000000..70bbe3904
--- /dev/null
+++ b/kernel/arch/x86/include/asm/stacktrace.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
+ */
+
+#ifndef _ASM_X86_STACKTRACE_H
+#define _ASM_X86_STACKTRACE_H
+
+#include <linux/uaccess.h>
+#include <linux/ptrace.h>
+
+extern int kstack_depth_to_print;
+
+struct thread_info;
+struct stacktrace_ops;
+
+typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
+ unsigned long *stack,
+ unsigned long bp,
+ const struct stacktrace_ops *ops,
+ void *data,
+ unsigned long *end,
+ int *graph);
+
+extern unsigned long
+print_context_stack(struct thread_info *tinfo,
+ unsigned long *stack, unsigned long bp,
+ const struct stacktrace_ops *ops, void *data,
+ unsigned long *end, int *graph);
+
+extern unsigned long
+print_context_stack_bp(struct thread_info *tinfo,
+ unsigned long *stack, unsigned long bp,
+ const struct stacktrace_ops *ops, void *data,
+ unsigned long *end, int *graph);
+
+/* Generic stack tracer with callbacks */
+
+struct stacktrace_ops {
+ void (*address)(void *data, unsigned long address, int reliable);
+ /* On negative return stop dumping */
+ int (*stack)(void *data, char *name);
+ walk_stack_t walk_stack;
+};
+
+void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
+ unsigned long *stack, unsigned long bp,
+ const struct stacktrace_ops *ops, void *data);
+
+#ifdef CONFIG_X86_32
+#define STACKSLOTS_PER_LINE 8
+#define get_bp(bp) asm("movl %%ebp, %0" : "=r" (bp) :)
+#else
+#define STACKSLOTS_PER_LINE 4
+#define get_bp(bp) asm("movq %%rbp, %0" : "=r" (bp) :)
+#endif
+
+#ifdef CONFIG_FRAME_POINTER
+static inline unsigned long
+stack_frame(struct task_struct *task, struct pt_regs *regs)
+{
+ unsigned long bp;
+
+ if (regs)
+ return regs->bp;
+
+ if (task == current) {
+ /* Grab bp right from our regs */
+ get_bp(bp);
+ return bp;
+ }
+
+ /* bp is the last reg pushed by switch_to */
+ return *(unsigned long *)task->thread.sp;
+}
+#else
+static inline unsigned long
+stack_frame(struct task_struct *task, struct pt_regs *regs)
+{
+ return 0;
+}
+#endif
+
+extern void
+show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
+ unsigned long *stack, unsigned long bp, char *log_lvl);
+
+extern void
+show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
+ unsigned long *sp, unsigned long bp, char *log_lvl);
+
+extern unsigned int code_bytes;
+
+/* The form of the top of the frame on the stack */
+struct stack_frame {
+ struct stack_frame *next_frame;
+ unsigned long return_address;
+};
+
+struct stack_frame_ia32 {
+ u32 next_frame;
+ u32 return_address;
+};
+
+static inline unsigned long caller_frame_pointer(void)
+{
+ struct stack_frame *frame;
+
+ get_bp(frame);
+
+#ifdef CONFIG_FRAME_POINTER
+ frame = frame->next_frame;
+#endif
+
+ return (unsigned long)frame;
+}
+
+#endif /* _ASM_X86_STACKTRACE_H */
diff --git a/kernel/arch/x86/include/asm/string.h b/kernel/arch/x86/include/asm/string.h
new file mode 100644
index 000000000..09224d7a5
--- /dev/null
+++ b/kernel/arch/x86/include/asm/string.h
@@ -0,0 +1,5 @@
+#ifdef CONFIG_X86_32
+# include <asm/string_32.h>
+#else
+# include <asm/string_64.h>
+#endif
diff --git a/kernel/arch/x86/include/asm/string_32.h b/kernel/arch/x86/include/asm/string_32.h
new file mode 100644
index 000000000..3d3e8353e
--- /dev/null
+++ b/kernel/arch/x86/include/asm/string_32.h
@@ -0,0 +1,342 @@
+#ifndef _ASM_X86_STRING_32_H
+#define _ASM_X86_STRING_32_H
+
+#ifdef __KERNEL__
+
+/* Let gcc decide whether to inline or use the out of line functions */
+
+#define __HAVE_ARCH_STRCPY
+extern char *strcpy(char *dest, const char *src);
+
+#define __HAVE_ARCH_STRNCPY
+extern char *strncpy(char *dest, const char *src, size_t count);
+
+#define __HAVE_ARCH_STRCAT
+extern char *strcat(char *dest, const char *src);
+
+#define __HAVE_ARCH_STRNCAT
+extern char *strncat(char *dest, const char *src, size_t count);
+
+#define __HAVE_ARCH_STRCMP
+extern int strcmp(const char *cs, const char *ct);
+
+#define __HAVE_ARCH_STRNCMP
+extern int strncmp(const char *cs, const char *ct, size_t count);
+
+#define __HAVE_ARCH_STRCHR
+extern char *strchr(const char *s, int c);
+
+#define __HAVE_ARCH_STRLEN
+extern size_t strlen(const char *s);
+
+static __always_inline void *__memcpy(void *to, const void *from, size_t n)
+{
+ int d0, d1, d2;
+ asm volatile("rep ; movsl\n\t"
+ "movl %4,%%ecx\n\t"
+ "andl $3,%%ecx\n\t"
+ "jz 1f\n\t"
+ "rep ; movsb\n\t"
+ "1:"
+ : "=&c" (d0), "=&D" (d1), "=&S" (d2)
+ : "0" (n / 4), "g" (n), "1" ((long)to), "2" ((long)from)
+ : "memory");
+ return to;
+}
+
+/*
+ * This looks ugly, but the compiler can optimize it totally,
+ * as the count is constant.
+ */
+static __always_inline void *__constant_memcpy(void *to, const void *from,
+ size_t n)
+{
+ long esi, edi;
+ if (!n)
+ return to;
+
+ switch (n) {
+ case 1:
+ *(char *)to = *(char *)from;
+ return to;
+ case 2:
+ *(short *)to = *(short *)from;
+ return to;
+ case 4:
+ *(int *)to = *(int *)from;
+ return to;
+ case 3:
+ *(short *)to = *(short *)from;
+ *((char *)to + 2) = *((char *)from + 2);
+ return to;
+ case 5:
+ *(int *)to = *(int *)from;
+ *((char *)to + 4) = *((char *)from + 4);
+ return to;
+ case 6:
+ *(int *)to = *(int *)from;
+ *((short *)to + 2) = *((short *)from + 2);
+ return to;
+ case 8:
+ *(int *)to = *(int *)from;
+ *((int *)to + 1) = *((int *)from + 1);
+ return to;
+ }
+
+ esi = (long)from;
+ edi = (long)to;
+ if (n >= 5 * 4) {
+ /* large block: use rep prefix */
+ int ecx;
+ asm volatile("rep ; movsl"
+ : "=&c" (ecx), "=&D" (edi), "=&S" (esi)
+ : "0" (n / 4), "1" (edi), "2" (esi)
+ : "memory"
+ );
+ } else {
+ /* small block: don't clobber ecx + smaller code */
+ if (n >= 4 * 4)
+ asm volatile("movsl"
+ : "=&D"(edi), "=&S"(esi)
+ : "0"(edi), "1"(esi)
+ : "memory");
+ if (n >= 3 * 4)
+ asm volatile("movsl"
+ : "=&D"(edi), "=&S"(esi)
+ : "0"(edi), "1"(esi)
+ : "memory");
+ if (n >= 2 * 4)
+ asm volatile("movsl"
+ : "=&D"(edi), "=&S"(esi)
+ : "0"(edi), "1"(esi)
+ : "memory");
+ if (n >= 1 * 4)
+ asm volatile("movsl"
+ : "=&D"(edi), "=&S"(esi)
+ : "0"(edi), "1"(esi)
+ : "memory");
+ }
+ switch (n % 4) {
+ /* tail */
+ case 0:
+ return to;
+ case 1:
+ asm volatile("movsb"
+ : "=&D"(edi), "=&S"(esi)
+ : "0"(edi), "1"(esi)
+ : "memory");
+ return to;
+ case 2:
+ asm volatile("movsw"
+ : "=&D"(edi), "=&S"(esi)
+ : "0"(edi), "1"(esi)
+ : "memory");
+ return to;
+ default:
+ asm volatile("movsw\n\tmovsb"
+ : "=&D"(edi), "=&S"(esi)
+ : "0"(edi), "1"(esi)
+ : "memory");
+ return to;
+ }
+}
+
+#define __HAVE_ARCH_MEMCPY
+
+#ifdef CONFIG_X86_USE_3DNOW
+
+#include <asm/mmx.h>
+
+/*
+ * This CPU favours 3DNow strongly (eg AMD Athlon)
+ */
+
+static inline void *__constant_memcpy3d(void *to, const void *from, size_t len)
+{
+ if (len < 512)
+ return __constant_memcpy(to, from, len);
+ return _mmx_memcpy(to, from, len);
+}
+
+static inline void *__memcpy3d(void *to, const void *from, size_t len)
+{
+ if (len < 512)
+ return __memcpy(to, from, len);
+ return _mmx_memcpy(to, from, len);
+}
+
+#define memcpy(t, f, n) \
+ (__builtin_constant_p((n)) \
+ ? __constant_memcpy3d((t), (f), (n)) \
+ : __memcpy3d((t), (f), (n)))
+
+#else
+
+/*
+ * No 3D Now!
+ */
+
+#ifndef CONFIG_KMEMCHECK
+
+#if (__GNUC__ >= 4)
+#define memcpy(t, f, n) __builtin_memcpy(t, f, n)
+#else
+#define memcpy(t, f, n) \
+ (__builtin_constant_p((n)) \
+ ? __constant_memcpy((t), (f), (n)) \
+ : __memcpy((t), (f), (n)))
+#endif
+#else
+/*
+ * kmemcheck becomes very happy if we use the REP instructions unconditionally,
+ * because it means that we know both memory operands in advance.
+ */
+#define memcpy(t, f, n) __memcpy((t), (f), (n))
+#endif
+
+#endif
+
+#define __HAVE_ARCH_MEMMOVE
+void *memmove(void *dest, const void *src, size_t n);
+
+#define memcmp __builtin_memcmp
+
+#define __HAVE_ARCH_MEMCHR
+extern void *memchr(const void *cs, int c, size_t count);
+
+static inline void *__memset_generic(void *s, char c, size_t count)
+{
+ int d0, d1;
+ asm volatile("rep\n\t"
+ "stosb"
+ : "=&c" (d0), "=&D" (d1)
+ : "a" (c), "1" (s), "0" (count)
+ : "memory");
+ return s;
+}
+
+/* we might want to write optimized versions of these later */
+#define __constant_count_memset(s, c, count) __memset_generic((s), (c), (count))
+
+/*
+ * memset(x, 0, y) is a reasonably common thing to do, so we want to fill
+ * things 32 bits at a time even when we don't know the size of the
+ * area at compile-time..
+ */
+static __always_inline
+void *__constant_c_memset(void *s, unsigned long c, size_t count)
+{
+ int d0, d1;
+ asm volatile("rep ; stosl\n\t"
+ "testb $2,%b3\n\t"
+ "je 1f\n\t"
+ "stosw\n"
+ "1:\ttestb $1,%b3\n\t"
+ "je 2f\n\t"
+ "stosb\n"
+ "2:"
+ : "=&c" (d0), "=&D" (d1)
+ : "a" (c), "q" (count), "0" (count/4), "1" ((long)s)
+ : "memory");
+ return s;
+}
+
+/* Added by Gertjan van Wingerde to make minix and sysv module work */
+#define __HAVE_ARCH_STRNLEN
+extern size_t strnlen(const char *s, size_t count);
+/* end of additional stuff */
+
+#define __HAVE_ARCH_STRSTR
+extern char *strstr(const char *cs, const char *ct);
+
+/*
+ * This looks horribly ugly, but the compiler can optimize it totally,
+ * as we by now know that both pattern and count is constant..
+ */
+static __always_inline
+void *__constant_c_and_count_memset(void *s, unsigned long pattern,
+ size_t count)
+{
+ switch (count) {
+ case 0:
+ return s;
+ case 1:
+ *(unsigned char *)s = pattern & 0xff;
+ return s;
+ case 2:
+ *(unsigned short *)s = pattern & 0xffff;
+ return s;
+ case 3:
+ *(unsigned short *)s = pattern & 0xffff;
+ *((unsigned char *)s + 2) = pattern & 0xff;
+ return s;
+ case 4:
+ *(unsigned long *)s = pattern;
+ return s;
+ }
+
+#define COMMON(x) \
+ asm volatile("rep ; stosl" \
+ x \
+ : "=&c" (d0), "=&D" (d1) \
+ : "a" (eax), "0" (count/4), "1" ((long)s) \
+ : "memory")
+
+ {
+ int d0, d1;
+#if __GNUC__ == 4 && __GNUC_MINOR__ == 0
+ /* Workaround for broken gcc 4.0 */
+ register unsigned long eax asm("%eax") = pattern;
+#else
+ unsigned long eax = pattern;
+#endif
+
+ switch (count % 4) {
+ case 0:
+ COMMON("");
+ return s;
+ case 1:
+ COMMON("\n\tstosb");
+ return s;
+ case 2:
+ COMMON("\n\tstosw");
+ return s;
+ default:
+ COMMON("\n\tstosw\n\tstosb");
+ return s;
+ }
+ }
+
+#undef COMMON
+}
+
+#define __constant_c_x_memset(s, c, count) \
+ (__builtin_constant_p(count) \
+ ? __constant_c_and_count_memset((s), (c), (count)) \
+ : __constant_c_memset((s), (c), (count)))
+
+#define __memset(s, c, count) \
+ (__builtin_constant_p(count) \
+ ? __constant_count_memset((s), (c), (count)) \
+ : __memset_generic((s), (c), (count)))
+
+#define __HAVE_ARCH_MEMSET
+#if (__GNUC__ >= 4)
+#define memset(s, c, count) __builtin_memset(s, c, count)
+#else
+#define memset(s, c, count) \
+ (__builtin_constant_p(c) \
+ ? __constant_c_x_memset((s), (0x01010101UL * (unsigned char)(c)), \
+ (count)) \
+ : __memset((s), (c), (count)))
+#endif
+
+/*
+ * find the first occurrence of byte 'c', or 1 past the area if none
+ */
+#define __HAVE_ARCH_MEMSCAN
+extern void *memscan(void *addr, int c, size_t size);
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_X86_STRING_32_H */
diff --git a/kernel/arch/x86/include/asm/string_64.h b/kernel/arch/x86/include/asm/string_64.h
new file mode 100644
index 000000000..e46611969
--- /dev/null
+++ b/kernel/arch/x86/include/asm/string_64.h
@@ -0,0 +1,84 @@
+#ifndef _ASM_X86_STRING_64_H
+#define _ASM_X86_STRING_64_H
+
+#ifdef __KERNEL__
+
+/* Written 2002 by Andi Kleen */
+
+/* Only used for special circumstances. Stolen from i386/string.h */
+static __always_inline void *__inline_memcpy(void *to, const void *from, size_t n)
+{
+ unsigned long d0, d1, d2;
+ asm volatile("rep ; movsl\n\t"
+ "testb $2,%b4\n\t"
+ "je 1f\n\t"
+ "movsw\n"
+ "1:\ttestb $1,%b4\n\t"
+ "je 2f\n\t"
+ "movsb\n"
+ "2:"
+ : "=&c" (d0), "=&D" (d1), "=&S" (d2)
+ : "0" (n / 4), "q" (n), "1" ((long)to), "2" ((long)from)
+ : "memory");
+ return to;
+}
+
+/* Even with __builtin_ the compiler may decide to use the out of line
+ function. */
+
+#define __HAVE_ARCH_MEMCPY 1
+extern void *__memcpy(void *to, const void *from, size_t len);
+
+#ifndef CONFIG_KMEMCHECK
+#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4
+extern void *memcpy(void *to, const void *from, size_t len);
+#else
+#define memcpy(dst, src, len) \
+({ \
+ size_t __len = (len); \
+ void *__ret; \
+ if (__builtin_constant_p(len) && __len >= 64) \
+ __ret = __memcpy((dst), (src), __len); \
+ else \
+ __ret = __builtin_memcpy((dst), (src), __len); \
+ __ret; \
+})
+#endif
+#else
+/*
+ * kmemcheck becomes very happy if we use the REP instructions unconditionally,
+ * because it means that we know both memory operands in advance.
+ */
+#define memcpy(dst, src, len) __inline_memcpy((dst), (src), (len))
+#endif
+
+#define __HAVE_ARCH_MEMSET
+void *memset(void *s, int c, size_t n);
+void *__memset(void *s, int c, size_t n);
+
+#define __HAVE_ARCH_MEMMOVE
+void *memmove(void *dest, const void *src, size_t count);
+void *__memmove(void *dest, const void *src, size_t count);
+
+int memcmp(const void *cs, const void *ct, size_t count);
+size_t strlen(const char *s);
+char *strcpy(char *dest, const char *src);
+char *strcat(char *dest, const char *src);
+int strcmp(const char *cs, const char *ct);
+
+#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
+
+/*
+ * For files that not instrumented (e.g. mm/slub.c) we
+ * should use not instrumented version of mem* functions.
+ */
+
+#undef memcpy
+#define memcpy(dst, src, len) __memcpy(dst, src, len)
+#define memmove(dst, src, len) __memmove(dst, src, len)
+#define memset(s, c, n) __memset(s, c, n)
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_X86_STRING_64_H */
diff --git a/kernel/arch/x86/include/asm/suspend.h b/kernel/arch/x86/include/asm/suspend.h
new file mode 100644
index 000000000..2fab6c2c3
--- /dev/null
+++ b/kernel/arch/x86/include/asm/suspend.h
@@ -0,0 +1,5 @@
+#ifdef CONFIG_X86_32
+# include <asm/suspend_32.h>
+#else
+# include <asm/suspend_64.h>
+#endif
diff --git a/kernel/arch/x86/include/asm/suspend_32.h b/kernel/arch/x86/include/asm/suspend_32.h
new file mode 100644
index 000000000..552d6c90a
--- /dev/null
+++ b/kernel/arch/x86/include/asm/suspend_32.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2001-2002 Pavel Machek <pavel@suse.cz>
+ * Based on code
+ * Copyright 2001 Patrick Mochel <mochel@osdl.org>
+ */
+#ifndef _ASM_X86_SUSPEND_32_H
+#define _ASM_X86_SUSPEND_32_H
+
+#include <asm/desc.h>
+#include <asm/i387.h>
+
+/* image of the saved processor state */
+struct saved_context {
+ u16 es, fs, gs, ss;
+ unsigned long cr0, cr2, cr3, cr4;
+ u64 misc_enable;
+ bool misc_enable_saved;
+ struct desc_ptr gdt_desc;
+ struct desc_ptr idt;
+ u16 ldt;
+ u16 tss;
+ unsigned long tr;
+ unsigned long safety;
+ unsigned long return_address;
+} __attribute__((packed));
+
+#endif /* _ASM_X86_SUSPEND_32_H */
diff --git a/kernel/arch/x86/include/asm/suspend_64.h b/kernel/arch/x86/include/asm/suspend_64.h
new file mode 100644
index 000000000..bc6232834
--- /dev/null
+++ b/kernel/arch/x86/include/asm/suspend_64.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2001-2003 Pavel Machek <pavel@suse.cz>
+ * Based on code
+ * Copyright 2001 Patrick Mochel <mochel@osdl.org>
+ */
+#ifndef _ASM_X86_SUSPEND_64_H
+#define _ASM_X86_SUSPEND_64_H
+
+#include <asm/desc.h>
+#include <asm/i387.h>
+
+/*
+ * Image of the saved processor state, used by the low level ACPI suspend to
+ * RAM code and by the low level hibernation code.
+ *
+ * If you modify it, fix arch/x86/kernel/acpi/wakeup_64.S and make sure that
+ * __save/__restore_processor_state(), defined in arch/x86/kernel/suspend_64.c,
+ * still work as required.
+ */
+struct saved_context {
+ struct pt_regs regs;
+ u16 ds, es, fs, gs, ss;
+ unsigned long gs_base, gs_kernel_base, fs_base;
+ unsigned long cr0, cr2, cr3, cr4, cr8;
+ u64 misc_enable;
+ bool misc_enable_saved;
+ unsigned long efer;
+ u16 gdt_pad; /* Unused */
+ struct desc_ptr gdt_desc;
+ u16 idt_pad;
+ u16 idt_limit;
+ unsigned long idt_base;
+ u16 ldt;
+ u16 tss;
+ unsigned long tr;
+ unsigned long safety;
+ unsigned long return_address;
+} __attribute__((packed));
+
+#define loaddebug(thread,register) \
+ set_debugreg((thread)->debugreg##register, register)
+
+/* routines for saving/restoring kernel state */
+extern int acpi_save_state_mem(void);
+extern char core_restore_code;
+extern char restore_registers;
+
+#endif /* _ASM_X86_SUSPEND_64_H */
diff --git a/kernel/arch/x86/include/asm/svm.h b/kernel/arch/x86/include/asm/svm.h
new file mode 100644
index 000000000..6136d99f5
--- /dev/null
+++ b/kernel/arch/x86/include/asm/svm.h
@@ -0,0 +1,279 @@
+#ifndef __SVM_H
+#define __SVM_H
+
+#include <uapi/asm/svm.h>
+
+
+enum {
+ INTERCEPT_INTR,
+ INTERCEPT_NMI,
+ INTERCEPT_SMI,
+ INTERCEPT_INIT,
+ INTERCEPT_VINTR,
+ INTERCEPT_SELECTIVE_CR0,
+ INTERCEPT_STORE_IDTR,
+ INTERCEPT_STORE_GDTR,
+ INTERCEPT_STORE_LDTR,
+ INTERCEPT_STORE_TR,
+ INTERCEPT_LOAD_IDTR,
+ INTERCEPT_LOAD_GDTR,
+ INTERCEPT_LOAD_LDTR,
+ INTERCEPT_LOAD_TR,
+ INTERCEPT_RDTSC,
+ INTERCEPT_RDPMC,
+ INTERCEPT_PUSHF,
+ INTERCEPT_POPF,
+ INTERCEPT_CPUID,
+ INTERCEPT_RSM,
+ INTERCEPT_IRET,
+ INTERCEPT_INTn,
+ INTERCEPT_INVD,
+ INTERCEPT_PAUSE,
+ INTERCEPT_HLT,
+ INTERCEPT_INVLPG,
+ INTERCEPT_INVLPGA,
+ INTERCEPT_IOIO_PROT,
+ INTERCEPT_MSR_PROT,
+ INTERCEPT_TASK_SWITCH,
+ INTERCEPT_FERR_FREEZE,
+ INTERCEPT_SHUTDOWN,
+ INTERCEPT_VMRUN,
+ INTERCEPT_VMMCALL,
+ INTERCEPT_VMLOAD,
+ INTERCEPT_VMSAVE,
+ INTERCEPT_STGI,
+ INTERCEPT_CLGI,
+ INTERCEPT_SKINIT,
+ INTERCEPT_RDTSCP,
+ INTERCEPT_ICEBP,
+ INTERCEPT_WBINVD,
+ INTERCEPT_MONITOR,
+ INTERCEPT_MWAIT,
+ INTERCEPT_MWAIT_COND,
+ INTERCEPT_XSETBV,
+};
+
+
+struct __attribute__ ((__packed__)) vmcb_control_area {
+ u32 intercept_cr;
+ u32 intercept_dr;
+ u32 intercept_exceptions;
+ u64 intercept;
+ u8 reserved_1[42];
+ u16 pause_filter_count;
+ u64 iopm_base_pa;
+ u64 msrpm_base_pa;
+ u64 tsc_offset;
+ u32 asid;
+ u8 tlb_ctl;
+ u8 reserved_2[3];
+ u32 int_ctl;
+ u32 int_vector;
+ u32 int_state;
+ u8 reserved_3[4];
+ u32 exit_code;
+ u32 exit_code_hi;
+ u64 exit_info_1;
+ u64 exit_info_2;
+ u32 exit_int_info;
+ u32 exit_int_info_err;
+ u64 nested_ctl;
+ u8 reserved_4[16];
+ u32 event_inj;
+ u32 event_inj_err;
+ u64 nested_cr3;
+ u64 lbr_ctl;
+ u32 clean;
+ u32 reserved_5;
+ u64 next_rip;
+ u8 insn_len;
+ u8 insn_bytes[15];
+ u8 reserved_6[800];
+};
+
+
+#define TLB_CONTROL_DO_NOTHING 0
+#define TLB_CONTROL_FLUSH_ALL_ASID 1
+#define TLB_CONTROL_FLUSH_ASID 3
+#define TLB_CONTROL_FLUSH_ASID_LOCAL 7
+
+#define V_TPR_MASK 0x0f
+
+#define V_IRQ_SHIFT 8
+#define V_IRQ_MASK (1 << V_IRQ_SHIFT)
+
+#define V_INTR_PRIO_SHIFT 16
+#define V_INTR_PRIO_MASK (0x0f << V_INTR_PRIO_SHIFT)
+
+#define V_IGN_TPR_SHIFT 20
+#define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT)
+
+#define V_INTR_MASKING_SHIFT 24
+#define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT)
+
+#define SVM_INTERRUPT_SHADOW_MASK 1
+
+#define SVM_IOIO_STR_SHIFT 2
+#define SVM_IOIO_REP_SHIFT 3
+#define SVM_IOIO_SIZE_SHIFT 4
+#define SVM_IOIO_ASIZE_SHIFT 7
+
+#define SVM_IOIO_TYPE_MASK 1
+#define SVM_IOIO_STR_MASK (1 << SVM_IOIO_STR_SHIFT)
+#define SVM_IOIO_REP_MASK (1 << SVM_IOIO_REP_SHIFT)
+#define SVM_IOIO_SIZE_MASK (7 << SVM_IOIO_SIZE_SHIFT)
+#define SVM_IOIO_ASIZE_MASK (7 << SVM_IOIO_ASIZE_SHIFT)
+
+#define SVM_VM_CR_VALID_MASK 0x001fULL
+#define SVM_VM_CR_SVM_LOCK_MASK 0x0008ULL
+#define SVM_VM_CR_SVM_DIS_MASK 0x0010ULL
+
+struct __attribute__ ((__packed__)) vmcb_seg {
+ u16 selector;
+ u16 attrib;
+ u32 limit;
+ u64 base;
+};
+
+struct __attribute__ ((__packed__)) vmcb_save_area {
+ struct vmcb_seg es;
+ struct vmcb_seg cs;
+ struct vmcb_seg ss;
+ struct vmcb_seg ds;
+ struct vmcb_seg fs;
+ struct vmcb_seg gs;
+ struct vmcb_seg gdtr;
+ struct vmcb_seg ldtr;
+ struct vmcb_seg idtr;
+ struct vmcb_seg tr;
+ u8 reserved_1[43];
+ u8 cpl;
+ u8 reserved_2[4];
+ u64 efer;
+ u8 reserved_3[112];
+ u64 cr4;
+ u64 cr3;
+ u64 cr0;
+ u64 dr7;
+ u64 dr6;
+ u64 rflags;
+ u64 rip;
+ u8 reserved_4[88];
+ u64 rsp;
+ u8 reserved_5[24];
+ u64 rax;
+ u64 star;
+ u64 lstar;
+ u64 cstar;
+ u64 sfmask;
+ u64 kernel_gs_base;
+ u64 sysenter_cs;
+ u64 sysenter_esp;
+ u64 sysenter_eip;
+ u64 cr2;
+ u8 reserved_6[32];
+ u64 g_pat;
+ u64 dbgctl;
+ u64 br_from;
+ u64 br_to;
+ u64 last_excp_from;
+ u64 last_excp_to;
+};
+
+struct __attribute__ ((__packed__)) vmcb {
+ struct vmcb_control_area control;
+ struct vmcb_save_area save;
+};
+
+#define SVM_CPUID_FEATURE_SHIFT 2
+#define SVM_CPUID_FUNC 0x8000000a
+
+#define SVM_VM_CR_SVM_DISABLE 4
+
+#define SVM_SELECTOR_S_SHIFT 4
+#define SVM_SELECTOR_DPL_SHIFT 5
+#define SVM_SELECTOR_P_SHIFT 7
+#define SVM_SELECTOR_AVL_SHIFT 8
+#define SVM_SELECTOR_L_SHIFT 9
+#define SVM_SELECTOR_DB_SHIFT 10
+#define SVM_SELECTOR_G_SHIFT 11
+
+#define SVM_SELECTOR_TYPE_MASK (0xf)
+#define SVM_SELECTOR_S_MASK (1 << SVM_SELECTOR_S_SHIFT)
+#define SVM_SELECTOR_DPL_MASK (3 << SVM_SELECTOR_DPL_SHIFT)
+#define SVM_SELECTOR_P_MASK (1 << SVM_SELECTOR_P_SHIFT)
+#define SVM_SELECTOR_AVL_MASK (1 << SVM_SELECTOR_AVL_SHIFT)
+#define SVM_SELECTOR_L_MASK (1 << SVM_SELECTOR_L_SHIFT)
+#define SVM_SELECTOR_DB_MASK (1 << SVM_SELECTOR_DB_SHIFT)
+#define SVM_SELECTOR_G_MASK (1 << SVM_SELECTOR_G_SHIFT)
+
+#define SVM_SELECTOR_WRITE_MASK (1 << 1)
+#define SVM_SELECTOR_READ_MASK SVM_SELECTOR_WRITE_MASK
+#define SVM_SELECTOR_CODE_MASK (1 << 3)
+
+#define INTERCEPT_CR0_READ 0
+#define INTERCEPT_CR3_READ 3
+#define INTERCEPT_CR4_READ 4
+#define INTERCEPT_CR8_READ 8
+#define INTERCEPT_CR0_WRITE (16 + 0)
+#define INTERCEPT_CR3_WRITE (16 + 3)
+#define INTERCEPT_CR4_WRITE (16 + 4)
+#define INTERCEPT_CR8_WRITE (16 + 8)
+
+#define INTERCEPT_DR0_READ 0
+#define INTERCEPT_DR1_READ 1
+#define INTERCEPT_DR2_READ 2
+#define INTERCEPT_DR3_READ 3
+#define INTERCEPT_DR4_READ 4
+#define INTERCEPT_DR5_READ 5
+#define INTERCEPT_DR6_READ 6
+#define INTERCEPT_DR7_READ 7
+#define INTERCEPT_DR0_WRITE (16 + 0)
+#define INTERCEPT_DR1_WRITE (16 + 1)
+#define INTERCEPT_DR2_WRITE (16 + 2)
+#define INTERCEPT_DR3_WRITE (16 + 3)
+#define INTERCEPT_DR4_WRITE (16 + 4)
+#define INTERCEPT_DR5_WRITE (16 + 5)
+#define INTERCEPT_DR6_WRITE (16 + 6)
+#define INTERCEPT_DR7_WRITE (16 + 7)
+
+#define SVM_EVTINJ_VEC_MASK 0xff
+
+#define SVM_EVTINJ_TYPE_SHIFT 8
+#define SVM_EVTINJ_TYPE_MASK (7 << SVM_EVTINJ_TYPE_SHIFT)
+
+#define SVM_EVTINJ_TYPE_INTR (0 << SVM_EVTINJ_TYPE_SHIFT)
+#define SVM_EVTINJ_TYPE_NMI (2 << SVM_EVTINJ_TYPE_SHIFT)
+#define SVM_EVTINJ_TYPE_EXEPT (3 << SVM_EVTINJ_TYPE_SHIFT)
+#define SVM_EVTINJ_TYPE_SOFT (4 << SVM_EVTINJ_TYPE_SHIFT)
+
+#define SVM_EVTINJ_VALID (1 << 31)
+#define SVM_EVTINJ_VALID_ERR (1 << 11)
+
+#define SVM_EXITINTINFO_VEC_MASK SVM_EVTINJ_VEC_MASK
+#define SVM_EXITINTINFO_TYPE_MASK SVM_EVTINJ_TYPE_MASK
+
+#define SVM_EXITINTINFO_TYPE_INTR SVM_EVTINJ_TYPE_INTR
+#define SVM_EXITINTINFO_TYPE_NMI SVM_EVTINJ_TYPE_NMI
+#define SVM_EXITINTINFO_TYPE_EXEPT SVM_EVTINJ_TYPE_EXEPT
+#define SVM_EXITINTINFO_TYPE_SOFT SVM_EVTINJ_TYPE_SOFT
+
+#define SVM_EXITINTINFO_VALID SVM_EVTINJ_VALID
+#define SVM_EXITINTINFO_VALID_ERR SVM_EVTINJ_VALID_ERR
+
+#define SVM_EXITINFOSHIFT_TS_REASON_IRET 36
+#define SVM_EXITINFOSHIFT_TS_REASON_JMP 38
+#define SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE 44
+
+#define SVM_EXITINFO_REG_MASK 0x0F
+
+#define SVM_CR0_SELECTIVE_MASK (X86_CR0_TS | X86_CR0_MP)
+
+#define SVM_VMLOAD ".byte 0x0f, 0x01, 0xda"
+#define SVM_VMRUN ".byte 0x0f, 0x01, 0xd8"
+#define SVM_VMSAVE ".byte 0x0f, 0x01, 0xdb"
+#define SVM_CLGI ".byte 0x0f, 0x01, 0xdd"
+#define SVM_STGI ".byte 0x0f, 0x01, 0xdc"
+#define SVM_INVLPGA ".byte 0x0f, 0x01, 0xdf"
+
+#endif
diff --git a/kernel/arch/x86/include/asm/swiotlb.h b/kernel/arch/x86/include/asm/swiotlb.h
new file mode 100644
index 000000000..ab05d73e2
--- /dev/null
+++ b/kernel/arch/x86/include/asm/swiotlb.h
@@ -0,0 +1,39 @@
+#ifndef _ASM_X86_SWIOTLB_H
+#define _ASM_X86_SWIOTLB_H
+
+#include <linux/swiotlb.h>
+
+#ifdef CONFIG_SWIOTLB
+extern int swiotlb;
+extern int __init pci_swiotlb_detect_override(void);
+extern int __init pci_swiotlb_detect_4gb(void);
+extern void __init pci_swiotlb_init(void);
+extern void __init pci_swiotlb_late_init(void);
+#else
+#define swiotlb 0
+static inline int pci_swiotlb_detect_override(void)
+{
+ return 0;
+}
+static inline int pci_swiotlb_detect_4gb(void)
+{
+ return 0;
+}
+static inline void pci_swiotlb_init(void)
+{
+}
+static inline void pci_swiotlb_late_init(void)
+{
+}
+#endif
+
+static inline void dma_mark_clean(void *addr, size_t size) {}
+
+extern void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags,
+ struct dma_attrs *attrs);
+extern void x86_swiotlb_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_addr,
+ struct dma_attrs *attrs);
+
+#endif /* _ASM_X86_SWIOTLB_H */
diff --git a/kernel/arch/x86/include/asm/switch_to.h b/kernel/arch/x86/include/asm/switch_to.h
new file mode 100644
index 000000000..751bf4b7b
--- /dev/null
+++ b/kernel/arch/x86/include/asm/switch_to.h
@@ -0,0 +1,133 @@
+#ifndef _ASM_X86_SWITCH_TO_H
+#define _ASM_X86_SWITCH_TO_H
+
+struct task_struct; /* one of the stranger aspects of C forward declarations */
+__visible struct task_struct *__switch_to(struct task_struct *prev,
+ struct task_struct *next);
+struct tss_struct;
+void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
+ struct tss_struct *tss);
+
+#ifdef CONFIG_X86_32
+
+#ifdef CONFIG_CC_STACKPROTECTOR
+#define __switch_canary \
+ "movl %P[task_canary](%[next]), %%ebx\n\t" \
+ "movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
+#define __switch_canary_oparam \
+ , [stack_canary] "=m" (stack_canary.canary)
+#define __switch_canary_iparam \
+ , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
+#else /* CC_STACKPROTECTOR */
+#define __switch_canary
+#define __switch_canary_oparam
+#define __switch_canary_iparam
+#endif /* CC_STACKPROTECTOR */
+
+/*
+ * Saving eflags is important. It switches not only IOPL between tasks,
+ * it also protects other tasks from NT leaking through sysenter etc.
+ */
+#define switch_to(prev, next, last) \
+do { \
+ /* \
+ * Context-switching clobbers all registers, so we clobber \
+ * them explicitly, via unused output variables. \
+ * (EAX and EBP is not listed because EBP is saved/restored \
+ * explicitly for wchan access and EAX is the return value of \
+ * __switch_to()) \
+ */ \
+ unsigned long ebx, ecx, edx, esi, edi; \
+ \
+ asm volatile("pushfl\n\t" /* save flags */ \
+ "pushl %%ebp\n\t" /* save EBP */ \
+ "movl %%esp,%[prev_sp]\n\t" /* save ESP */ \
+ "movl %[next_sp],%%esp\n\t" /* restore ESP */ \
+ "movl $1f,%[prev_ip]\n\t" /* save EIP */ \
+ "pushl %[next_ip]\n\t" /* restore EIP */ \
+ __switch_canary \
+ "jmp __switch_to\n" /* regparm call */ \
+ "1:\t" \
+ "popl %%ebp\n\t" /* restore EBP */ \
+ "popfl\n" /* restore flags */ \
+ \
+ /* output parameters */ \
+ : [prev_sp] "=m" (prev->thread.sp), \
+ [prev_ip] "=m" (prev->thread.ip), \
+ "=a" (last), \
+ \
+ /* clobbered output registers: */ \
+ "=b" (ebx), "=c" (ecx), "=d" (edx), \
+ "=S" (esi), "=D" (edi) \
+ \
+ __switch_canary_oparam \
+ \
+ /* input parameters: */ \
+ : [next_sp] "m" (next->thread.sp), \
+ [next_ip] "m" (next->thread.ip), \
+ \
+ /* regparm parameters for __switch_to(): */ \
+ [prev] "a" (prev), \
+ [next] "d" (next) \
+ \
+ __switch_canary_iparam \
+ \
+ : /* reloaded segment registers */ \
+ "memory"); \
+} while (0)
+
+#else /* CONFIG_X86_32 */
+
+/* frame pointer must be last for get_wchan */
+#define SAVE_CONTEXT "pushq %%rbp ; movq %%rsi,%%rbp\n\t"
+#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\t"
+
+#define __EXTRA_CLOBBER \
+ , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
+ "r12", "r13", "r14", "r15", "flags"
+
+#ifdef CONFIG_CC_STACKPROTECTOR
+#define __switch_canary \
+ "movq %P[task_canary](%%rsi),%%r8\n\t" \
+ "movq %%r8,"__percpu_arg([gs_canary])"\n\t"
+#define __switch_canary_oparam \
+ , [gs_canary] "=m" (irq_stack_union.stack_canary)
+#define __switch_canary_iparam \
+ , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
+#else /* CC_STACKPROTECTOR */
+#define __switch_canary
+#define __switch_canary_oparam
+#define __switch_canary_iparam
+#endif /* CC_STACKPROTECTOR */
+
+/*
+ * There is no need to save or restore flags, because flags are always
+ * clean in kernel mode, with the possible exception of IOPL. Kernel IOPL
+ * has no effect.
+ */
+#define switch_to(prev, next, last) \
+ asm volatile(SAVE_CONTEXT \
+ "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
+ "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
+ "call __switch_to\n\t" \
+ "movq "__percpu_arg([current_task])",%%rsi\n\t" \
+ __switch_canary \
+ "movq %P[thread_info](%%rsi),%%r8\n\t" \
+ "movq %%rax,%%rdi\n\t" \
+ "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
+ "jnz ret_from_fork\n\t" \
+ RESTORE_CONTEXT \
+ : "=a" (last) \
+ __switch_canary_oparam \
+ : [next] "S" (next), [prev] "D" (prev), \
+ [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
+ [ti_flags] "i" (offsetof(struct thread_info, flags)), \
+ [_tif_fork] "i" (_TIF_FORK), \
+ [thread_info] "i" (offsetof(struct task_struct, stack)), \
+ [current_task] "m" (current_task) \
+ __switch_canary_iparam \
+ : "memory", "cc" __EXTRA_CLOBBER)
+
+#endif /* CONFIG_X86_32 */
+
+#endif /* _ASM_X86_SWITCH_TO_H */
diff --git a/kernel/arch/x86/include/asm/sync_bitops.h b/kernel/arch/x86/include/asm/sync_bitops.h
new file mode 100644
index 000000000..f28a24b51
--- /dev/null
+++ b/kernel/arch/x86/include/asm/sync_bitops.h
@@ -0,0 +1,130 @@
+#ifndef _ASM_X86_SYNC_BITOPS_H
+#define _ASM_X86_SYNC_BITOPS_H
+
+/*
+ * Copyright 1992, Linus Torvalds.
+ */
+
+/*
+ * These have to be done with inline assembly: that way the bit-setting
+ * is guaranteed to be atomic. All bit operations return 0 if the bit
+ * was cleared before the operation and != 0 if it was not.
+ *
+ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
+ */
+
+#define ADDR (*(volatile long *)addr)
+
+/**
+ * sync_set_bit - Atomically set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This function is atomic and may not be reordered. See __set_bit()
+ * if you do not require the atomic guarantees.
+ *
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static inline void sync_set_bit(long nr, volatile unsigned long *addr)
+{
+ asm volatile("lock; bts %1,%0"
+ : "+m" (ADDR)
+ : "Ir" (nr)
+ : "memory");
+}
+
+/**
+ * sync_clear_bit - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * sync_clear_bit() is atomic and may not be reordered. However, it does
+ * not contain a memory barrier, so if it is used for locking purposes,
+ * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
+ * in order to ensure changes are visible on other processors.
+ */
+static inline void sync_clear_bit(long nr, volatile unsigned long *addr)
+{
+ asm volatile("lock; btr %1,%0"
+ : "+m" (ADDR)
+ : "Ir" (nr)
+ : "memory");
+}
+
+/**
+ * sync_change_bit - Toggle a bit in memory
+ * @nr: Bit to change
+ * @addr: Address to start counting from
+ *
+ * sync_change_bit() is atomic and may not be reordered.
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static inline void sync_change_bit(long nr, volatile unsigned long *addr)
+{
+ asm volatile("lock; btc %1,%0"
+ : "+m" (ADDR)
+ : "Ir" (nr)
+ : "memory");
+}
+
+/**
+ * sync_test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+static inline int sync_test_and_set_bit(long nr, volatile unsigned long *addr)
+{
+ int oldbit;
+
+ asm volatile("lock; bts %2,%1\n\tsbbl %0,%0"
+ : "=r" (oldbit), "+m" (ADDR)
+ : "Ir" (nr) : "memory");
+ return oldbit;
+}
+
+/**
+ * sync_test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+static inline int sync_test_and_clear_bit(long nr, volatile unsigned long *addr)
+{
+ int oldbit;
+
+ asm volatile("lock; btr %2,%1\n\tsbbl %0,%0"
+ : "=r" (oldbit), "+m" (ADDR)
+ : "Ir" (nr) : "memory");
+ return oldbit;
+}
+
+/**
+ * sync_test_and_change_bit - Change a bit and return its old value
+ * @nr: Bit to change
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+static inline int sync_test_and_change_bit(long nr, volatile unsigned long *addr)
+{
+ int oldbit;
+
+ asm volatile("lock; btc %2,%1\n\tsbbl %0,%0"
+ : "=r" (oldbit), "+m" (ADDR)
+ : "Ir" (nr) : "memory");
+ return oldbit;
+}
+
+#define sync_test_bit(nr, addr) test_bit(nr, addr)
+
+#undef ADDR
+
+#endif /* _ASM_X86_SYNC_BITOPS_H */
diff --git a/kernel/arch/x86/include/asm/sys_ia32.h b/kernel/arch/x86/include/asm/sys_ia32.h
new file mode 100644
index 000000000..82c34ee25
--- /dev/null
+++ b/kernel/arch/x86/include/asm/sys_ia32.h
@@ -0,0 +1,55 @@
+/*
+ * sys_ia32.h - Linux ia32 syscall interfaces
+ *
+ * Copyright (c) 2008 Jaswinder Singh Rajput
+ *
+ * This file is released under the GPLv2.
+ * See the file COPYING for more details.
+ */
+
+#ifndef _ASM_X86_SYS_IA32_H
+#define _ASM_X86_SYS_IA32_H
+
+#ifdef CONFIG_COMPAT
+
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+#include <linux/types.h>
+#include <linux/signal.h>
+#include <asm/compat.h>
+#include <asm/ia32.h>
+
+/* ia32/sys_ia32.c */
+asmlinkage long sys32_truncate64(const char __user *, unsigned long, unsigned long);
+asmlinkage long sys32_ftruncate64(unsigned int, unsigned long, unsigned long);
+
+asmlinkage long sys32_stat64(const char __user *, struct stat64 __user *);
+asmlinkage long sys32_lstat64(const char __user *, struct stat64 __user *);
+asmlinkage long sys32_fstat64(unsigned int, struct stat64 __user *);
+asmlinkage long sys32_fstatat(unsigned int, const char __user *,
+ struct stat64 __user *, int);
+struct mmap_arg_struct32;
+asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *);
+
+asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
+
+asmlinkage long sys32_pread(unsigned int, char __user *, u32, u32, u32);
+asmlinkage long sys32_pwrite(unsigned int, const char __user *, u32, u32, u32);
+
+long sys32_fadvise64_64(int, __u32, __u32, __u32, __u32, int);
+long sys32_vm86_warning(void);
+
+asmlinkage ssize_t sys32_readahead(int, unsigned, unsigned, size_t);
+asmlinkage long sys32_sync_file_range(int, unsigned, unsigned,
+ unsigned, unsigned, int);
+asmlinkage long sys32_fadvise64(int, unsigned, unsigned, size_t, int);
+asmlinkage long sys32_fallocate(int, int, unsigned,
+ unsigned, unsigned, unsigned);
+
+/* ia32/ia32_signal.c */
+asmlinkage long sys32_sigreturn(void);
+asmlinkage long sys32_rt_sigreturn(void);
+
+#endif /* CONFIG_COMPAT */
+
+#endif /* _ASM_X86_SYS_IA32_H */
diff --git a/kernel/arch/x86/include/asm/syscall.h b/kernel/arch/x86/include/asm/syscall.h
new file mode 100644
index 000000000..d6a756ae0
--- /dev/null
+++ b/kernel/arch/x86/include/asm/syscall.h
@@ -0,0 +1,243 @@
+/*
+ * Access to user system call parameters and results
+ *
+ * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved.
+ *
+ * This copyrighted material is made available to anyone wishing to use,
+ * modify, copy, or redistribute it subject to the terms and conditions
+ * of the GNU General Public License v.2.
+ *
+ * See asm-generic/syscall.h for descriptions of what we must do here.
+ */
+
+#ifndef _ASM_X86_SYSCALL_H
+#define _ASM_X86_SYSCALL_H
+
+#include <uapi/linux/audit.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <asm/asm-offsets.h> /* For NR_syscalls */
+#include <asm/thread_info.h> /* for TS_COMPAT */
+#include <asm/unistd.h>
+
+typedef void (*sys_call_ptr_t)(void);
+extern const sys_call_ptr_t sys_call_table[];
+
+/*
+ * Only the low 32 bits of orig_ax are meaningful, so we return int.
+ * This importantly ignores the high bits on 64-bit, so comparisons
+ * sign-extend the low 32 bits.
+ */
+static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
+{
+ return regs->orig_ax;
+}
+
+static inline void syscall_rollback(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ regs->ax = regs->orig_ax;
+}
+
+static inline long syscall_get_error(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ unsigned long error = regs->ax;
+#ifdef CONFIG_IA32_EMULATION
+ /*
+ * TS_COMPAT is set for 32-bit syscall entries and then
+ * remains set until we return to user mode.
+ */
+ if (task_thread_info(task)->status & TS_COMPAT)
+ /*
+ * Sign-extend the value so (int)-EFOO becomes (long)-EFOO
+ * and will match correctly in comparisons.
+ */
+ error = (long) (int) error;
+#endif
+ return IS_ERR_VALUE(error) ? error : 0;
+}
+
+static inline long syscall_get_return_value(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->ax;
+}
+
+static inline void syscall_set_return_value(struct task_struct *task,
+ struct pt_regs *regs,
+ int error, long val)
+{
+ regs->ax = (long) error ?: val;
+}
+
+#ifdef CONFIG_X86_32
+
+static inline void syscall_get_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned int i, unsigned int n,
+ unsigned long *args)
+{
+ BUG_ON(i + n > 6);
+ memcpy(args, &regs->bx + i, n * sizeof(args[0]));
+}
+
+static inline void syscall_set_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned int i, unsigned int n,
+ const unsigned long *args)
+{
+ BUG_ON(i + n > 6);
+ memcpy(&regs->bx + i, args, n * sizeof(args[0]));
+}
+
+static inline int syscall_get_arch(void)
+{
+ return AUDIT_ARCH_I386;
+}
+
+#else /* CONFIG_X86_64 */
+
+static inline void syscall_get_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned int i, unsigned int n,
+ unsigned long *args)
+{
+# ifdef CONFIG_IA32_EMULATION
+ if (task_thread_info(task)->status & TS_COMPAT)
+ switch (i) {
+ case 0:
+ if (!n--) break;
+ *args++ = regs->bx;
+ case 1:
+ if (!n--) break;
+ *args++ = regs->cx;
+ case 2:
+ if (!n--) break;
+ *args++ = regs->dx;
+ case 3:
+ if (!n--) break;
+ *args++ = regs->si;
+ case 4:
+ if (!n--) break;
+ *args++ = regs->di;
+ case 5:
+ if (!n--) break;
+ *args++ = regs->bp;
+ case 6:
+ if (!n--) break;
+ default:
+ BUG();
+ break;
+ }
+ else
+# endif
+ switch (i) {
+ case 0:
+ if (!n--) break;
+ *args++ = regs->di;
+ case 1:
+ if (!n--) break;
+ *args++ = regs->si;
+ case 2:
+ if (!n--) break;
+ *args++ = regs->dx;
+ case 3:
+ if (!n--) break;
+ *args++ = regs->r10;
+ case 4:
+ if (!n--) break;
+ *args++ = regs->r8;
+ case 5:
+ if (!n--) break;
+ *args++ = regs->r9;
+ case 6:
+ if (!n--) break;
+ default:
+ BUG();
+ break;
+ }
+}
+
+static inline void syscall_set_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned int i, unsigned int n,
+ const unsigned long *args)
+{
+# ifdef CONFIG_IA32_EMULATION
+ if (task_thread_info(task)->status & TS_COMPAT)
+ switch (i) {
+ case 0:
+ if (!n--) break;
+ regs->bx = *args++;
+ case 1:
+ if (!n--) break;
+ regs->cx = *args++;
+ case 2:
+ if (!n--) break;
+ regs->dx = *args++;
+ case 3:
+ if (!n--) break;
+ regs->si = *args++;
+ case 4:
+ if (!n--) break;
+ regs->di = *args++;
+ case 5:
+ if (!n--) break;
+ regs->bp = *args++;
+ case 6:
+ if (!n--) break;
+ default:
+ BUG();
+ break;
+ }
+ else
+# endif
+ switch (i) {
+ case 0:
+ if (!n--) break;
+ regs->di = *args++;
+ case 1:
+ if (!n--) break;
+ regs->si = *args++;
+ case 2:
+ if (!n--) break;
+ regs->dx = *args++;
+ case 3:
+ if (!n--) break;
+ regs->r10 = *args++;
+ case 4:
+ if (!n--) break;
+ regs->r8 = *args++;
+ case 5:
+ if (!n--) break;
+ regs->r9 = *args++;
+ case 6:
+ if (!n--) break;
+ default:
+ BUG();
+ break;
+ }
+}
+
+static inline int syscall_get_arch(void)
+{
+#ifdef CONFIG_IA32_EMULATION
+ /*
+ * TS_COMPAT is set for 32-bit syscall entry and then
+ * remains set until we return to user mode.
+ *
+ * TIF_IA32 tasks should always have TS_COMPAT set at
+ * system call time.
+ *
+ * x32 tasks should be considered AUDIT_ARCH_X86_64.
+ */
+ if (task_thread_info(current)->status & TS_COMPAT)
+ return AUDIT_ARCH_I386;
+#endif
+ /* Both x32 and x86_64 are considered "64-bit". */
+ return AUDIT_ARCH_X86_64;
+}
+#endif /* CONFIG_X86_32 */
+
+#endif /* _ASM_X86_SYSCALL_H */
diff --git a/kernel/arch/x86/include/asm/syscalls.h b/kernel/arch/x86/include/asm/syscalls.h
new file mode 100644
index 000000000..592a6a672
--- /dev/null
+++ b/kernel/arch/x86/include/asm/syscalls.h
@@ -0,0 +1,54 @@
+/*
+ * syscalls.h - Linux syscall interfaces (arch-specific)
+ *
+ * Copyright (c) 2008 Jaswinder Singh Rajput
+ *
+ * This file is released under the GPLv2.
+ * See the file COPYING for more details.
+ */
+
+#ifndef _ASM_X86_SYSCALLS_H
+#define _ASM_X86_SYSCALLS_H
+
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+#include <linux/signal.h>
+#include <linux/types.h>
+
+/* Common in X86_32 and X86_64 */
+/* kernel/ioport.c */
+asmlinkage long sys_ioperm(unsigned long, unsigned long, int);
+asmlinkage long sys_iopl(unsigned int);
+
+/* kernel/ldt.c */
+asmlinkage int sys_modify_ldt(int, void __user *, unsigned long);
+
+/* kernel/signal.c */
+asmlinkage long sys_rt_sigreturn(void);
+
+/* kernel/tls.c */
+asmlinkage long sys_set_thread_area(struct user_desc __user *);
+asmlinkage long sys_get_thread_area(struct user_desc __user *);
+
+/* X86_32 only */
+#ifdef CONFIG_X86_32
+
+/* kernel/signal.c */
+asmlinkage unsigned long sys_sigreturn(void);
+
+/* kernel/vm86_32.c */
+asmlinkage long sys_vm86old(struct vm86_struct __user *);
+asmlinkage long sys_vm86(unsigned long, unsigned long);
+
+#else /* CONFIG_X86_32 */
+
+/* X86_64 only */
+/* kernel/process_64.c */
+asmlinkage long sys_arch_prctl(int, unsigned long);
+
+/* kernel/sys_x86_64.c */
+asmlinkage long sys_mmap(unsigned long, unsigned long, unsigned long,
+ unsigned long, unsigned long, unsigned long);
+
+#endif /* CONFIG_X86_32 */
+#endif /* _ASM_X86_SYSCALLS_H */
diff --git a/kernel/arch/x86/include/asm/sysfb.h b/kernel/arch/x86/include/asm/sysfb.h
new file mode 100644
index 000000000..2aeb3e255
--- /dev/null
+++ b/kernel/arch/x86/include/asm/sysfb.h
@@ -0,0 +1,98 @@
+#ifndef _ARCH_X86_KERNEL_SYSFB_H
+#define _ARCH_X86_KERNEL_SYSFB_H
+
+/*
+ * Generic System Framebuffers on x86
+ * Copyright (c) 2012-2013 David Herrmann <dh.herrmann@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_data/simplefb.h>
+#include <linux/screen_info.h>
+
+enum {
+ M_I17, /* 17-Inch iMac */
+ M_I20, /* 20-Inch iMac */
+ M_I20_SR, /* 20-Inch iMac (Santa Rosa) */
+ M_I24, /* 24-Inch iMac */
+ M_I24_8_1, /* 24-Inch iMac, 8,1th gen */
+ M_I24_10_1, /* 24-Inch iMac, 10,1th gen */
+ M_I27_11_1, /* 27-Inch iMac, 11,1th gen */
+ M_MINI, /* Mac Mini */
+ M_MINI_3_1, /* Mac Mini, 3,1th gen */
+ M_MINI_4_1, /* Mac Mini, 4,1th gen */
+ M_MB, /* MacBook */
+ M_MB_2, /* MacBook, 2nd rev. */
+ M_MB_3, /* MacBook, 3rd rev. */
+ M_MB_5_1, /* MacBook, 5th rev. */
+ M_MB_6_1, /* MacBook, 6th rev. */
+ M_MB_7_1, /* MacBook, 7th rev. */
+ M_MB_SR, /* MacBook, 2nd gen, (Santa Rosa) */
+ M_MBA, /* MacBook Air */
+ M_MBA_3, /* Macbook Air, 3rd rev */
+ M_MBP, /* MacBook Pro */
+ M_MBP_2, /* MacBook Pro 2nd gen */
+ M_MBP_2_2, /* MacBook Pro 2,2nd gen */
+ M_MBP_SR, /* MacBook Pro (Santa Rosa) */
+ M_MBP_4, /* MacBook Pro, 4th gen */
+ M_MBP_5_1, /* MacBook Pro, 5,1th gen */
+ M_MBP_5_2, /* MacBook Pro, 5,2th gen */
+ M_MBP_5_3, /* MacBook Pro, 5,3rd gen */
+ M_MBP_6_1, /* MacBook Pro, 6,1th gen */
+ M_MBP_6_2, /* MacBook Pro, 6,2th gen */
+ M_MBP_7_1, /* MacBook Pro, 7,1th gen */
+ M_MBP_8_2, /* MacBook Pro, 8,2nd gen */
+ M_UNKNOWN /* placeholder */
+};
+
+struct efifb_dmi_info {
+ char *optname;
+ unsigned long base;
+ int stride;
+ int width;
+ int height;
+ int flags;
+};
+
+#ifdef CONFIG_EFI
+
+extern struct efifb_dmi_info efifb_dmi_list[];
+void sysfb_apply_efi_quirks(void);
+
+#else /* CONFIG_EFI */
+
+static inline void sysfb_apply_efi_quirks(void)
+{
+}
+
+#endif /* CONFIG_EFI */
+
+#ifdef CONFIG_X86_SYSFB
+
+bool parse_mode(const struct screen_info *si,
+ struct simplefb_platform_data *mode);
+int create_simplefb(const struct screen_info *si,
+ const struct simplefb_platform_data *mode);
+
+#else /* CONFIG_X86_SYSFB */
+
+static inline bool parse_mode(const struct screen_info *si,
+ struct simplefb_platform_data *mode)
+{
+ return false;
+}
+
+static inline int create_simplefb(const struct screen_info *si,
+ const struct simplefb_platform_data *mode)
+{
+ return -EINVAL;
+}
+
+#endif /* CONFIG_X86_SYSFB */
+
+#endif /* _ARCH_X86_KERNEL_SYSFB_H */
diff --git a/kernel/arch/x86/include/asm/tce.h b/kernel/arch/x86/include/asm/tce.h
new file mode 100644
index 000000000..7a6677c1a
--- /dev/null
+++ b/kernel/arch/x86/include/asm/tce.h
@@ -0,0 +1,48 @@
+/*
+ * This file is derived from asm-powerpc/tce.h.
+ *
+ * Copyright (C) IBM Corporation, 2006
+ *
+ * Author: Muli Ben-Yehuda <muli@il.ibm.com>
+ * Author: Jon Mason <jdmason@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_X86_TCE_H
+#define _ASM_X86_TCE_H
+
+extern unsigned int specified_table_size;
+struct iommu_table;
+
+#define TCE_ENTRY_SIZE 8 /* in bytes */
+
+#define TCE_READ_SHIFT 0
+#define TCE_WRITE_SHIFT 1
+#define TCE_HUBID_SHIFT 2 /* unused */
+#define TCE_RSVD_SHIFT 8 /* unused */
+#define TCE_RPN_SHIFT 12
+#define TCE_UNUSED_SHIFT 48 /* unused */
+
+#define TCE_RPN_MASK 0x0000fffffffff000ULL
+
+extern void tce_build(struct iommu_table *tbl, unsigned long index,
+ unsigned int npages, unsigned long uaddr, int direction);
+extern void tce_free(struct iommu_table *tbl, long index, unsigned int npages);
+extern void * __init alloc_tce_table(void);
+extern void __init free_tce_table(void *tbl);
+extern int __init build_tce_table(struct pci_dev *dev, void __iomem *bbar);
+
+#endif /* _ASM_X86_TCE_H */
diff --git a/kernel/arch/x86/include/asm/thread_info.h b/kernel/arch/x86/include/asm/thread_info.h
new file mode 100644
index 000000000..606144afb
--- /dev/null
+++ b/kernel/arch/x86/include/asm/thread_info.h
@@ -0,0 +1,303 @@
+/* thread_info.h: low-level thread information
+ *
+ * Copyright (C) 2002 David Howells (dhowells@redhat.com)
+ * - Incorporating suggestions made by Linus Torvalds and Dave Miller
+ */
+
+#ifndef _ASM_X86_THREAD_INFO_H
+#define _ASM_X86_THREAD_INFO_H
+
+#include <linux/compiler.h>
+#include <asm/page.h>
+#include <asm/percpu.h>
+#include <asm/types.h>
+
+/*
+ * TOP_OF_KERNEL_STACK_PADDING is a number of unused bytes that we
+ * reserve at the top of the kernel stack. We do it because of a nasty
+ * 32-bit corner case. On x86_32, the hardware stack frame is
+ * variable-length. Except for vm86 mode, struct pt_regs assumes a
+ * maximum-length frame. If we enter from CPL 0, the top 8 bytes of
+ * pt_regs don't actually exist. Ordinarily this doesn't matter, but it
+ * does in at least one case:
+ *
+ * If we take an NMI early enough in SYSENTER, then we can end up with
+ * pt_regs that extends above sp0. On the way out, in the espfix code,
+ * we can read the saved SS value, but that value will be above sp0.
+ * Without this offset, that can result in a page fault. (We are
+ * careful that, in this case, the value we read doesn't matter.)
+ *
+ * In vm86 mode, the hardware frame is much longer still, but we neither
+ * access the extra members from NMI context, nor do we write such a
+ * frame at sp0 at all.
+ *
+ * x86_64 has a fixed-length stack frame.
+ */
+#ifdef CONFIG_X86_32
+# define TOP_OF_KERNEL_STACK_PADDING 8
+#else
+# define TOP_OF_KERNEL_STACK_PADDING 0
+#endif
+
+/*
+ * low level task data that entry.S needs immediate access to
+ * - this struct should fit entirely inside of one cache line
+ * - this struct shares the supervisor stack pages
+ */
+#ifndef __ASSEMBLY__
+struct task_struct;
+#include <asm/processor.h>
+#include <linux/atomic.h>
+
+struct thread_info {
+ struct task_struct *task; /* main task structure */
+ __u32 flags; /* low level flags */
+ __u32 status; /* thread synchronous flags */
+ __u32 cpu; /* current CPU */
+ int saved_preempt_count;
+ int preempt_lazy_count; /* 0 => lazy preemptable
+ <0 => BUG */
+ mm_segment_t addr_limit;
+ void __user *sysenter_return;
+ unsigned int sig_on_uaccess_error:1;
+ unsigned int uaccess_err:1; /* uaccess failed */
+};
+
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .task = &tsk, \
+ .flags = 0, \
+ .cpu = 0, \
+ .saved_preempt_count = INIT_PREEMPT_COUNT, \
+ .addr_limit = KERNEL_DS, \
+}
+
+#define init_thread_info (init_thread_union.thread_info)
+#define init_stack (init_thread_union.stack)
+
+#else /* !__ASSEMBLY__ */
+
+#include <asm/asm-offsets.h>
+
+#endif
+
+/*
+ * thread information flags
+ * - these are process state flags that various assembly files
+ * may need to access
+ * - pending work-to-be-done flags are in LSW
+ * - other flags in MSW
+ * Warning: layout of LSW is hardcoded in entry.S
+ */
+#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
+#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
+#define TIF_SIGPENDING 2 /* signal pending */
+#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
+#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
+#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
+#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
+#define TIF_SECCOMP 8 /* secure computing */
+#define TIF_NEED_RESCHED_LAZY 9 /* lazy rescheduling necessary */
+#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
+#define TIF_UPROBE 12 /* breakpointed or singlestepping */
+#define TIF_NOTSC 16 /* TSC is not accessible in userland */
+#define TIF_IA32 17 /* IA32 compatibility process */
+#define TIF_FORK 18 /* ret_from_fork */
+#define TIF_NOHZ 19 /* in adaptive nohz mode */
+#define TIF_MEMDIE 20 /* is terminating due to OOM killer */
+#define TIF_POLLING_NRFLAG 21 /* idle is polling for TIF_NEED_RESCHED */
+#define TIF_IO_BITMAP 22 /* uses I/O bitmap */
+#define TIF_FORCED_TF 24 /* true if TF in eflags artificially */
+#define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */
+#define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */
+#define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
+#define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
+#define TIF_X32 30 /* 32-bit native x86-64 binary */
+
+#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
+#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
+#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
+#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+#define _TIF_SECCOMP (1 << TIF_SECCOMP)
+#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
+#define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
+#define _TIF_UPROBE (1 << TIF_UPROBE)
+#define _TIF_NOTSC (1 << TIF_NOTSC)
+#define _TIF_IA32 (1 << TIF_IA32)
+#define _TIF_FORK (1 << TIF_FORK)
+#define _TIF_NOHZ (1 << TIF_NOHZ)
+#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
+#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
+#define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
+#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
+#define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES)
+#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
+#define _TIF_ADDR32 (1 << TIF_ADDR32)
+#define _TIF_X32 (1 << TIF_X32)
+
+/* work to do in syscall_trace_enter() */
+#define _TIF_WORK_SYSCALL_ENTRY \
+ (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
+ _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
+ _TIF_NOHZ)
+
+/* work to do in syscall_trace_leave() */
+#define _TIF_WORK_SYSCALL_EXIT \
+ (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
+
+/* work to do on interrupt/exception return */
+#define _TIF_WORK_MASK \
+ (0x0000FFFF & \
+ ~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT| \
+ _TIF_SINGLESTEP|_TIF_SECCOMP|_TIF_SYSCALL_EMU))
+
+/* work to do on any return to user space */
+#define _TIF_ALLWORK_MASK \
+ ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
+ _TIF_NOHZ)
+
+/* Only used for 64 bit */
+#define _TIF_DO_NOTIFY_MASK \
+ (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \
+ _TIF_USER_RETURN_NOTIFY | _TIF_UPROBE)
+
+/* flags to check in __switch_to() */
+#define _TIF_WORK_CTXSW \
+ (_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP)
+
+#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
+#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
+
+#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
+
+#define STACK_WARN (THREAD_SIZE/8)
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ *
+ * preempt_count needs to be 1 initially, until the scheduler is functional.
+ */
+#ifndef __ASSEMBLY__
+
+DECLARE_PER_CPU(unsigned long, kernel_stack);
+
+static inline struct thread_info *current_thread_info(void)
+{
+ return (struct thread_info *)(current_top_of_stack() - THREAD_SIZE);
+}
+
+static inline unsigned long current_stack_pointer(void)
+{
+ unsigned long sp;
+#ifdef CONFIG_X86_64
+ asm("mov %%rsp,%0" : "=g" (sp));
+#else
+ asm("mov %%esp,%0" : "=g" (sp));
+#endif
+ return sp;
+}
+
+#else /* !__ASSEMBLY__ */
+
+/* Load thread_info address into "reg" */
+#define GET_THREAD_INFO(reg) \
+ _ASM_MOV PER_CPU_VAR(kernel_stack),reg ; \
+ _ASM_SUB $(THREAD_SIZE),reg ;
+
+/*
+ * ASM operand which evaluates to a 'thread_info' address of
+ * the current task, if it is known that "reg" is exactly "off"
+ * bytes below the top of the stack currently.
+ *
+ * ( The kernel stack's size is known at build time, it is usually
+ * 2 or 4 pages, and the bottom of the kernel stack contains
+ * the thread_info structure. So to access the thread_info very
+ * quickly from assembly code we can calculate down from the
+ * top of the kernel stack to the bottom, using constant,
+ * build-time calculations only. )
+ *
+ * For example, to fetch the current thread_info->flags value into %eax
+ * on x86-64 defconfig kernels, in syscall entry code where RSP is
+ * currently at exactly SIZEOF_PTREGS bytes away from the top of the
+ * stack:
+ *
+ * mov ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS), %eax
+ *
+ * will translate to:
+ *
+ * 8b 84 24 b8 c0 ff ff mov -0x3f48(%rsp), %eax
+ *
+ * which is below the current RSP by almost 16K.
+ */
+#define ASM_THREAD_INFO(field, reg, off) ((field)+(off)-THREAD_SIZE)(reg)
+
+#endif
+
+/*
+ * Thread-synchronous status.
+ *
+ * This is different from the flags in that nobody else
+ * ever touches our thread-synchronous status, so we don't
+ * have to worry about atomic accesses.
+ */
+#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/
+#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */
+
+#ifndef __ASSEMBLY__
+#define HAVE_SET_RESTORE_SIGMASK 1
+static inline void set_restore_sigmask(void)
+{
+ struct thread_info *ti = current_thread_info();
+ ti->status |= TS_RESTORE_SIGMASK;
+ WARN_ON(!test_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags));
+}
+static inline void clear_restore_sigmask(void)
+{
+ current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
+}
+static inline bool test_restore_sigmask(void)
+{
+ return current_thread_info()->status & TS_RESTORE_SIGMASK;
+}
+static inline bool test_and_clear_restore_sigmask(void)
+{
+ struct thread_info *ti = current_thread_info();
+ if (!(ti->status & TS_RESTORE_SIGMASK))
+ return false;
+ ti->status &= ~TS_RESTORE_SIGMASK;
+ return true;
+}
+
+static inline bool is_ia32_task(void)
+{
+#ifdef CONFIG_X86_32
+ return true;
+#endif
+#ifdef CONFIG_IA32_EMULATION
+ if (current_thread_info()->status & TS_COMPAT)
+ return true;
+#endif
+ return false;
+}
+
+/*
+ * Force syscall return via IRET by making it look as if there was
+ * some work pending. IRET is our most capable (but slowest) syscall
+ * return path, which is able to restore modified SS, CS and certain
+ * EFLAGS values that other (fast) syscall return instructions
+ * are not able to restore properly.
+ */
+#define force_iret() set_thread_flag(TIF_NOTIFY_RESUME)
+
+#endif /* !__ASSEMBLY__ */
+
+#ifndef __ASSEMBLY__
+extern void arch_task_cache_init(void);
+extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
+extern void arch_release_task_struct(struct task_struct *tsk);
+#endif
+#endif /* _ASM_X86_THREAD_INFO_H */
diff --git a/kernel/arch/x86/include/asm/time.h b/kernel/arch/x86/include/asm/time.h
new file mode 100644
index 000000000..92b8aec06
--- /dev/null
+++ b/kernel/arch/x86/include/asm/time.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_X86_TIME_H
+#define _ASM_X86_TIME_H
+
+#include <linux/clocksource.h>
+#include <asm/mc146818rtc.h>
+
+extern void hpet_time_init(void);
+extern void time_init(void);
+
+extern struct clock_event_device *global_clock_event;
+
+#endif /* _ASM_X86_TIME_H */
diff --git a/kernel/arch/x86/include/asm/timer.h b/kernel/arch/x86/include/asm/timer.h
new file mode 100644
index 000000000..a04eabd43
--- /dev/null
+++ b/kernel/arch/x86/include/asm/timer.h
@@ -0,0 +1,37 @@
+#ifndef _ASM_X86_TIMER_H
+#define _ASM_X86_TIMER_H
+#include <linux/pm.h>
+#include <linux/percpu.h>
+#include <linux/interrupt.h>
+#include <linux/math64.h>
+
+#define TICK_SIZE (tick_nsec / 1000)
+
+unsigned long long native_sched_clock(void);
+extern int recalibrate_cpu_khz(void);
+
+extern int no_timer_check;
+
+/*
+ * We use the full linear equation: f(x) = a + b*x, in order to allow
+ * a continuous function in the face of dynamic freq changes.
+ *
+ * Continuity means that when our frequency changes our slope (b); we want to
+ * ensure that: f(t) == f'(t), which gives: a + b*t == a' + b'*t.
+ *
+ * Without an offset (a) the above would not be possible.
+ *
+ * See the comment near cycles_2_ns() for details on how we compute (b).
+ */
+struct cyc2ns_data {
+ u32 cyc2ns_mul;
+ u32 cyc2ns_shift;
+ u64 cyc2ns_offset;
+ u32 __count;
+ /* u32 hole */
+}; /* 24 bytes -- do not grow */
+
+extern struct cyc2ns_data *cyc2ns_read_begin(void);
+extern void cyc2ns_read_end(struct cyc2ns_data *);
+
+#endif /* _ASM_X86_TIMER_H */
diff --git a/kernel/arch/x86/include/asm/timex.h b/kernel/arch/x86/include/asm/timex.h
new file mode 100644
index 000000000..1375cfc93
--- /dev/null
+++ b/kernel/arch/x86/include/asm/timex.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_X86_TIMEX_H
+#define _ASM_X86_TIMEX_H
+
+#include <asm/processor.h>
+#include <asm/tsc.h>
+
+/* Assume we use the PIT time source for the clock tick */
+#define CLOCK_TICK_RATE PIT_TICK_RATE
+
+#define ARCH_HAS_READ_CURRENT_TIMER
+
+#endif /* _ASM_X86_TIMEX_H */
diff --git a/kernel/arch/x86/include/asm/tlb.h b/kernel/arch/x86/include/asm/tlb.h
new file mode 100644
index 000000000..c7797307f
--- /dev/null
+++ b/kernel/arch/x86/include/asm/tlb.h
@@ -0,0 +1,18 @@
+#ifndef _ASM_X86_TLB_H
+#define _ASM_X86_TLB_H
+
+#define tlb_start_vma(tlb, vma) do { } while (0)
+#define tlb_end_vma(tlb, vma) do { } while (0)
+#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
+
+#define tlb_flush(tlb) \
+{ \
+ if (!tlb->fullmm && !tlb->need_flush_all) \
+ flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \
+ else \
+ flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \
+}
+
+#include <asm-generic/tlb.h>
+
+#endif /* _ASM_X86_TLB_H */
diff --git a/kernel/arch/x86/include/asm/tlbflush.h b/kernel/arch/x86/include/asm/tlbflush.h
new file mode 100644
index 000000000..cd791948b
--- /dev/null
+++ b/kernel/arch/x86/include/asm/tlbflush.h
@@ -0,0 +1,269 @@
+#ifndef _ASM_X86_TLBFLUSH_H
+#define _ASM_X86_TLBFLUSH_H
+
+#include <linux/mm.h>
+#include <linux/sched.h>
+
+#include <asm/processor.h>
+#include <asm/special_insns.h>
+
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+#define __flush_tlb() __native_flush_tlb()
+#define __flush_tlb_global() __native_flush_tlb_global()
+#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
+#endif
+
+struct tlb_state {
+#ifdef CONFIG_SMP
+ struct mm_struct *active_mm;
+ int state;
+#endif
+
+ /*
+ * Access to this CR4 shadow and to H/W CR4 is protected by
+ * disabling interrupts when modifying either one.
+ */
+ unsigned long cr4;
+};
+DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
+
+/* Initialize cr4 shadow for this CPU. */
+static inline void cr4_init_shadow(void)
+{
+ this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
+}
+
+/* Set in this cpu's CR4. */
+static inline void cr4_set_bits(unsigned long mask)
+{
+ unsigned long cr4;
+
+ cr4 = this_cpu_read(cpu_tlbstate.cr4);
+ if ((cr4 | mask) != cr4) {
+ cr4 |= mask;
+ this_cpu_write(cpu_tlbstate.cr4, cr4);
+ __write_cr4(cr4);
+ }
+}
+
+/* Clear in this cpu's CR4. */
+static inline void cr4_clear_bits(unsigned long mask)
+{
+ unsigned long cr4;
+
+ cr4 = this_cpu_read(cpu_tlbstate.cr4);
+ if ((cr4 & ~mask) != cr4) {
+ cr4 &= ~mask;
+ this_cpu_write(cpu_tlbstate.cr4, cr4);
+ __write_cr4(cr4);
+ }
+}
+
+/* Read the CR4 shadow. */
+static inline unsigned long cr4_read_shadow(void)
+{
+ return this_cpu_read(cpu_tlbstate.cr4);
+}
+
+/*
+ * Save some of cr4 feature set we're using (e.g. Pentium 4MB
+ * enable and PPro Global page enable), so that any CPU's that boot
+ * up after us can get the correct flags. This should only be used
+ * during boot on the boot cpu.
+ */
+extern unsigned long mmu_cr4_features;
+extern u32 *trampoline_cr4_features;
+
+static inline void cr4_set_bits_and_update_boot(unsigned long mask)
+{
+ mmu_cr4_features |= mask;
+ if (trampoline_cr4_features)
+ *trampoline_cr4_features = mmu_cr4_features;
+ cr4_set_bits(mask);
+}
+
+static inline void __native_flush_tlb(void)
+{
+ native_write_cr3(native_read_cr3());
+}
+
+static inline void __native_flush_tlb_global_irq_disabled(void)
+{
+ unsigned long cr4;
+
+ cr4 = this_cpu_read(cpu_tlbstate.cr4);
+ /* clear PGE */
+ native_write_cr4(cr4 & ~X86_CR4_PGE);
+ /* write old PGE again and flush TLBs */
+ native_write_cr4(cr4);
+}
+
+static inline void __native_flush_tlb_global(void)
+{
+ unsigned long flags;
+
+ /*
+ * Read-modify-write to CR4 - protect it from preemption and
+ * from interrupts. (Use the raw variant because this code can
+ * be called from deep inside debugging code.)
+ */
+ raw_local_irq_save(flags);
+
+ __native_flush_tlb_global_irq_disabled();
+
+ raw_local_irq_restore(flags);
+}
+
+static inline void __native_flush_tlb_single(unsigned long addr)
+{
+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
+}
+
+static inline void __flush_tlb_all(void)
+{
+ if (cpu_has_pge)
+ __flush_tlb_global();
+ else
+ __flush_tlb();
+}
+
+static inline void __flush_tlb_one(unsigned long addr)
+{
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
+ __flush_tlb_single(addr);
+}
+
+#define TLB_FLUSH_ALL -1UL
+
+/*
+ * TLB flushing:
+ *
+ * - flush_tlb() flushes the current mm struct TLBs
+ * - flush_tlb_all() flushes all processes TLBs
+ * - flush_tlb_mm(mm) flushes the specified mm context TLB's
+ * - flush_tlb_page(vma, vmaddr) flushes one page
+ * - flush_tlb_range(vma, start, end) flushes a range of pages
+ * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
+ * - flush_tlb_others(cpumask, mm, start, end) flushes TLBs on other cpus
+ *
+ * ..but the i386 has somewhat limited tlb flushing capabilities,
+ * and page-granular flushes are available only on i486 and up.
+ */
+
+#ifndef CONFIG_SMP
+
+/* "_up" is for UniProcessor.
+ *
+ * This is a helper for other header functions. *Not* intended to be called
+ * directly. All global TLB flushes need to either call this, or to bump the
+ * vm statistics themselves.
+ */
+static inline void __flush_tlb_up(void)
+{
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
+ __flush_tlb();
+}
+
+static inline void flush_tlb_all(void)
+{
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
+ __flush_tlb_all();
+}
+
+static inline void flush_tlb(void)
+{
+ __flush_tlb_up();
+}
+
+static inline void local_flush_tlb(void)
+{
+ __flush_tlb_up();
+}
+
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+ if (mm == current->active_mm)
+ __flush_tlb_up();
+}
+
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ if (vma->vm_mm == current->active_mm)
+ __flush_tlb_one(addr);
+}
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ if (vma->vm_mm == current->active_mm)
+ __flush_tlb_up();
+}
+
+static inline void flush_tlb_mm_range(struct mm_struct *mm,
+ unsigned long start, unsigned long end, unsigned long vmflag)
+{
+ if (mm == current->active_mm)
+ __flush_tlb_up();
+}
+
+static inline void native_flush_tlb_others(const struct cpumask *cpumask,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+}
+
+static inline void reset_lazy_tlbstate(void)
+{
+}
+
+static inline void flush_tlb_kernel_range(unsigned long start,
+ unsigned long end)
+{
+ flush_tlb_all();
+}
+
+#else /* SMP */
+
+#include <asm/smp.h>
+
+#define local_flush_tlb() __flush_tlb()
+
+#define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
+
+#define flush_tlb_range(vma, start, end) \
+ flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
+
+extern void flush_tlb_all(void);
+extern void flush_tlb_current_task(void);
+extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
+extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end, unsigned long vmflag);
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+
+#define flush_tlb() flush_tlb_current_task()
+
+void native_flush_tlb_others(const struct cpumask *cpumask,
+ struct mm_struct *mm,
+ unsigned long start, unsigned long end);
+
+#define TLBSTATE_OK 1
+#define TLBSTATE_LAZY 2
+
+static inline void reset_lazy_tlbstate(void)
+{
+ this_cpu_write(cpu_tlbstate.state, 0);
+ this_cpu_write(cpu_tlbstate.active_mm, &init_mm);
+}
+
+#endif /* SMP */
+
+#ifndef CONFIG_PARAVIRT
+#define flush_tlb_others(mask, mm, start, end) \
+ native_flush_tlb_others(mask, mm, start, end)
+#endif
+
+#endif /* _ASM_X86_TLBFLUSH_H */
diff --git a/kernel/arch/x86/include/asm/topology.h b/kernel/arch/x86/include/asm/topology.h
new file mode 100644
index 000000000..0e8f04f2c
--- /dev/null
+++ b/kernel/arch/x86/include/asm/topology.h
@@ -0,0 +1,138 @@
+/*
+ * Written by: Matthew Dobson, IBM Corporation
+ *
+ * Copyright (C) 2002, IBM Corp.
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <colpatch@us.ibm.com>
+ */
+#ifndef _ASM_X86_TOPOLOGY_H
+#define _ASM_X86_TOPOLOGY_H
+
+#ifdef CONFIG_X86_32
+# ifdef CONFIG_X86_HT
+# define ENABLE_TOPO_DEFINES
+# endif
+#else
+# ifdef CONFIG_SMP
+# define ENABLE_TOPO_DEFINES
+# endif
+#endif
+
+/*
+ * to preserve the visibility of NUMA_NO_NODE definition,
+ * moved to there from here. May be used independent of
+ * CONFIG_NUMA.
+ */
+#include <linux/numa.h>
+
+#ifdef CONFIG_NUMA
+#include <linux/cpumask.h>
+
+#include <asm/mpspec.h>
+
+/* Mappings between logical cpu number and node number */
+DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
+
+#ifdef CONFIG_DEBUG_PER_CPU_MAPS
+/*
+ * override generic percpu implementation of cpu_to_node
+ */
+extern int __cpu_to_node(int cpu);
+#define cpu_to_node __cpu_to_node
+
+extern int early_cpu_to_node(int cpu);
+
+#else /* !CONFIG_DEBUG_PER_CPU_MAPS */
+
+/* Same function but used if called before per_cpu areas are setup */
+static inline int early_cpu_to_node(int cpu)
+{
+ return early_per_cpu(x86_cpu_to_node_map, cpu);
+}
+
+#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
+
+/* Mappings between node number and cpus on that node. */
+extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
+
+#ifdef CONFIG_DEBUG_PER_CPU_MAPS
+extern const struct cpumask *cpumask_of_node(int node);
+#else
+/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
+static inline const struct cpumask *cpumask_of_node(int node)
+{
+ return node_to_cpumask_map[node];
+}
+#endif
+
+extern void setup_node_to_cpumask_map(void);
+
+/*
+ * Returns the number of the node containing Node 'node'. This
+ * architecture is flat, so it is a pretty simple function!
+ */
+#define parent_node(node) (node)
+
+#define pcibus_to_node(bus) __pcibus_to_node(bus)
+
+extern int __node_distance(int, int);
+#define node_distance(a, b) __node_distance(a, b)
+
+#else /* !CONFIG_NUMA */
+
+static inline int numa_node_id(void)
+{
+ return 0;
+}
+/*
+ * indicate override:
+ */
+#define numa_node_id numa_node_id
+
+static inline int early_cpu_to_node(int cpu)
+{
+ return 0;
+}
+
+static inline void setup_node_to_cpumask_map(void) { }
+
+#endif
+
+#include <asm-generic/topology.h>
+
+extern const struct cpumask *cpu_coregroup_mask(int cpu);
+
+#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
+#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
+
+#ifdef ENABLE_TOPO_DEFINES
+#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
+#define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
+#endif
+
+static inline void arch_fix_phys_package_id(int num, u32 slot)
+{
+}
+
+struct pci_bus;
+int x86_pci_root_bus_node(int bus);
+void x86_pci_root_bus_resources(int bus, struct list_head *resources);
+
+#endif /* _ASM_X86_TOPOLOGY_H */
diff --git a/kernel/arch/x86/include/asm/trace/exceptions.h b/kernel/arch/x86/include/asm/trace/exceptions.h
new file mode 100644
index 000000000..2fbc66c78
--- /dev/null
+++ b/kernel/arch/x86/include/asm/trace/exceptions.h
@@ -0,0 +1,52 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM exceptions
+
+#if !defined(_TRACE_PAGE_FAULT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PAGE_FAULT_H
+
+#include <linux/tracepoint.h>
+
+extern void trace_irq_vector_regfunc(void);
+extern void trace_irq_vector_unregfunc(void);
+
+DECLARE_EVENT_CLASS(x86_exceptions,
+
+ TP_PROTO(unsigned long address, struct pt_regs *regs,
+ unsigned long error_code),
+
+ TP_ARGS(address, regs, error_code),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, address )
+ __field( unsigned long, ip )
+ __field( unsigned long, error_code )
+ ),
+
+ TP_fast_assign(
+ __entry->address = address;
+ __entry->ip = regs->ip;
+ __entry->error_code = error_code;
+ ),
+
+ TP_printk("address=%pf ip=%pf error_code=0x%lx",
+ (void *)__entry->address, (void *)__entry->ip,
+ __entry->error_code) );
+
+#define DEFINE_PAGE_FAULT_EVENT(name) \
+DEFINE_EVENT_FN(x86_exceptions, name, \
+ TP_PROTO(unsigned long address, struct pt_regs *regs, \
+ unsigned long error_code), \
+ TP_ARGS(address, regs, error_code), \
+ trace_irq_vector_regfunc, \
+ trace_irq_vector_unregfunc);
+
+DEFINE_PAGE_FAULT_EVENT(page_fault_user);
+DEFINE_PAGE_FAULT_EVENT(page_fault_kernel);
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE exceptions
+#endif /* _TRACE_PAGE_FAULT_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/kernel/arch/x86/include/asm/trace/irq_vectors.h b/kernel/arch/x86/include/asm/trace/irq_vectors.h
new file mode 100644
index 000000000..4cab89000
--- /dev/null
+++ b/kernel/arch/x86/include/asm/trace/irq_vectors.h
@@ -0,0 +1,115 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM irq_vectors
+
+#if !defined(_TRACE_IRQ_VECTORS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_IRQ_VECTORS_H
+
+#include <linux/tracepoint.h>
+
+extern void trace_irq_vector_regfunc(void);
+extern void trace_irq_vector_unregfunc(void);
+
+DECLARE_EVENT_CLASS(x86_irq_vector,
+
+ TP_PROTO(int vector),
+
+ TP_ARGS(vector),
+
+ TP_STRUCT__entry(
+ __field( int, vector )
+ ),
+
+ TP_fast_assign(
+ __entry->vector = vector;
+ ),
+
+ TP_printk("vector=%d", __entry->vector) );
+
+#define DEFINE_IRQ_VECTOR_EVENT(name) \
+DEFINE_EVENT_FN(x86_irq_vector, name##_entry, \
+ TP_PROTO(int vector), \
+ TP_ARGS(vector), \
+ trace_irq_vector_regfunc, \
+ trace_irq_vector_unregfunc); \
+DEFINE_EVENT_FN(x86_irq_vector, name##_exit, \
+ TP_PROTO(int vector), \
+ TP_ARGS(vector), \
+ trace_irq_vector_regfunc, \
+ trace_irq_vector_unregfunc);
+
+
+/*
+ * local_timer - called when entering/exiting a local timer interrupt
+ * vector handler
+ */
+DEFINE_IRQ_VECTOR_EVENT(local_timer);
+
+/*
+ * reschedule - called when entering/exiting a reschedule vector handler
+ */
+DEFINE_IRQ_VECTOR_EVENT(reschedule);
+
+/*
+ * spurious_apic - called when entering/exiting a spurious apic vector handler
+ */
+DEFINE_IRQ_VECTOR_EVENT(spurious_apic);
+
+/*
+ * error_apic - called when entering/exiting an error apic vector handler
+ */
+DEFINE_IRQ_VECTOR_EVENT(error_apic);
+
+/*
+ * x86_platform_ipi - called when entering/exiting a x86 platform ipi interrupt
+ * vector handler
+ */
+DEFINE_IRQ_VECTOR_EVENT(x86_platform_ipi);
+
+/*
+ * irq_work - called when entering/exiting a irq work interrupt
+ * vector handler
+ */
+DEFINE_IRQ_VECTOR_EVENT(irq_work);
+
+/*
+ * We must dis-allow sampling irq_work_exit() because perf event sampling
+ * itself can cause irq_work, which would lead to an infinite loop;
+ *
+ * 1) irq_work_exit happens
+ * 2) generates perf sample
+ * 3) generates irq_work
+ * 4) goto 1
+ */
+TRACE_EVENT_PERF_PERM(irq_work_exit, is_sampling_event(p_event) ? -EPERM : 0);
+
+/*
+ * call_function - called when entering/exiting a call function interrupt
+ * vector handler
+ */
+DEFINE_IRQ_VECTOR_EVENT(call_function);
+
+/*
+ * call_function_single - called when entering/exiting a call function
+ * single interrupt vector handler
+ */
+DEFINE_IRQ_VECTOR_EVENT(call_function_single);
+
+/*
+ * threshold_apic - called when entering/exiting a threshold apic interrupt
+ * vector handler
+ */
+DEFINE_IRQ_VECTOR_EVENT(threshold_apic);
+
+/*
+ * thermal_apic - called when entering/exiting a thermal apic interrupt
+ * vector handler
+ */
+DEFINE_IRQ_VECTOR_EVENT(thermal_apic);
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE irq_vectors
+#endif /* _TRACE_IRQ_VECTORS_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/kernel/arch/x86/include/asm/trace_clock.h b/kernel/arch/x86/include/asm/trace_clock.h
new file mode 100644
index 000000000..beab86cc2
--- /dev/null
+++ b/kernel/arch/x86/include/asm/trace_clock.h
@@ -0,0 +1,20 @@
+#ifndef _ASM_X86_TRACE_CLOCK_H
+#define _ASM_X86_TRACE_CLOCK_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_X86_TSC
+
+extern u64 notrace trace_clock_x86_tsc(void);
+
+# define ARCH_TRACE_CLOCKS \
+ { trace_clock_x86_tsc, "x86-tsc", .in_ns = 0 },
+
+#else /* !CONFIG_X86_TSC */
+
+#define ARCH_TRACE_CLOCKS
+
+#endif
+
+#endif /* _ASM_X86_TRACE_CLOCK_H */
diff --git a/kernel/arch/x86/include/asm/traps.h b/kernel/arch/x86/include/asm/traps.h
new file mode 100644
index 000000000..4e49d7dff
--- /dev/null
+++ b/kernel/arch/x86/include/asm/traps.h
@@ -0,0 +1,144 @@
+#ifndef _ASM_X86_TRAPS_H
+#define _ASM_X86_TRAPS_H
+
+#include <linux/context_tracking_state.h>
+#include <linux/kprobes.h>
+
+#include <asm/debugreg.h>
+#include <asm/siginfo.h> /* TRAP_TRACE, ... */
+
+#define dotraplinkage __visible
+
+asmlinkage void divide_error(void);
+asmlinkage void debug(void);
+asmlinkage void nmi(void);
+asmlinkage void int3(void);
+asmlinkage void xen_debug(void);
+asmlinkage void xen_int3(void);
+asmlinkage void xen_stack_segment(void);
+asmlinkage void overflow(void);
+asmlinkage void bounds(void);
+asmlinkage void invalid_op(void);
+asmlinkage void device_not_available(void);
+#ifdef CONFIG_X86_64
+asmlinkage void double_fault(void);
+#endif
+asmlinkage void coprocessor_segment_overrun(void);
+asmlinkage void invalid_TSS(void);
+asmlinkage void segment_not_present(void);
+asmlinkage void stack_segment(void);
+asmlinkage void general_protection(void);
+asmlinkage void page_fault(void);
+asmlinkage void async_page_fault(void);
+asmlinkage void spurious_interrupt_bug(void);
+asmlinkage void coprocessor_error(void);
+asmlinkage void alignment_check(void);
+#ifdef CONFIG_X86_MCE
+asmlinkage void machine_check(void);
+#endif /* CONFIG_X86_MCE */
+asmlinkage void simd_coprocessor_error(void);
+
+#ifdef CONFIG_TRACING
+asmlinkage void trace_page_fault(void);
+#define trace_stack_segment stack_segment
+#define trace_divide_error divide_error
+#define trace_bounds bounds
+#define trace_invalid_op invalid_op
+#define trace_device_not_available device_not_available
+#define trace_coprocessor_segment_overrun coprocessor_segment_overrun
+#define trace_invalid_TSS invalid_TSS
+#define trace_segment_not_present segment_not_present
+#define trace_general_protection general_protection
+#define trace_spurious_interrupt_bug spurious_interrupt_bug
+#define trace_coprocessor_error coprocessor_error
+#define trace_alignment_check alignment_check
+#define trace_simd_coprocessor_error simd_coprocessor_error
+#define trace_async_page_fault async_page_fault
+#endif
+
+dotraplinkage void do_divide_error(struct pt_regs *, long);
+dotraplinkage void do_debug(struct pt_regs *, long);
+dotraplinkage void do_nmi(struct pt_regs *, long);
+dotraplinkage void do_int3(struct pt_regs *, long);
+dotraplinkage void do_overflow(struct pt_regs *, long);
+dotraplinkage void do_bounds(struct pt_regs *, long);
+dotraplinkage void do_invalid_op(struct pt_regs *, long);
+dotraplinkage void do_device_not_available(struct pt_regs *, long);
+dotraplinkage void do_coprocessor_segment_overrun(struct pt_regs *, long);
+dotraplinkage void do_invalid_TSS(struct pt_regs *, long);
+dotraplinkage void do_segment_not_present(struct pt_regs *, long);
+dotraplinkage void do_stack_segment(struct pt_regs *, long);
+#ifdef CONFIG_X86_64
+dotraplinkage void do_double_fault(struct pt_regs *, long);
+asmlinkage struct pt_regs *sync_regs(struct pt_regs *);
+#endif
+dotraplinkage void do_general_protection(struct pt_regs *, long);
+dotraplinkage void do_page_fault(struct pt_regs *, unsigned long);
+#ifdef CONFIG_TRACING
+dotraplinkage void trace_do_page_fault(struct pt_regs *, unsigned long);
+#else
+static inline void trace_do_page_fault(struct pt_regs *regs, unsigned long error)
+{
+ do_page_fault(regs, error);
+}
+#endif
+dotraplinkage void do_spurious_interrupt_bug(struct pt_regs *, long);
+dotraplinkage void do_coprocessor_error(struct pt_regs *, long);
+dotraplinkage void do_alignment_check(struct pt_regs *, long);
+#ifdef CONFIG_X86_MCE
+dotraplinkage void do_machine_check(struct pt_regs *, long);
+#endif
+dotraplinkage void do_simd_coprocessor_error(struct pt_regs *, long);
+#ifdef CONFIG_X86_32
+dotraplinkage void do_iret_error(struct pt_regs *, long);
+#endif
+
+static inline int get_si_code(unsigned long condition)
+{
+ if (condition & DR_STEP)
+ return TRAP_TRACE;
+ else if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3))
+ return TRAP_HWBKPT;
+ else
+ return TRAP_BRKPT;
+}
+
+extern int panic_on_unrecovered_nmi;
+
+void math_emulate(struct math_emu_info *);
+#ifndef CONFIG_X86_32
+asmlinkage void smp_thermal_interrupt(void);
+asmlinkage void mce_threshold_interrupt(void);
+#endif
+
+extern enum ctx_state ist_enter(struct pt_regs *regs);
+extern void ist_exit(struct pt_regs *regs, enum ctx_state prev_state);
+extern void ist_begin_non_atomic(struct pt_regs *regs);
+extern void ist_end_non_atomic(void);
+
+/* Interrupts/Exceptions */
+enum {
+ X86_TRAP_DE = 0, /* 0, Divide-by-zero */
+ X86_TRAP_DB, /* 1, Debug */
+ X86_TRAP_NMI, /* 2, Non-maskable Interrupt */
+ X86_TRAP_BP, /* 3, Breakpoint */
+ X86_TRAP_OF, /* 4, Overflow */
+ X86_TRAP_BR, /* 5, Bound Range Exceeded */
+ X86_TRAP_UD, /* 6, Invalid Opcode */
+ X86_TRAP_NM, /* 7, Device Not Available */
+ X86_TRAP_DF, /* 8, Double Fault */
+ X86_TRAP_OLD_MF, /* 9, Coprocessor Segment Overrun */
+ X86_TRAP_TS, /* 10, Invalid TSS */
+ X86_TRAP_NP, /* 11, Segment Not Present */
+ X86_TRAP_SS, /* 12, Stack Segment Fault */
+ X86_TRAP_GP, /* 13, General Protection Fault */
+ X86_TRAP_PF, /* 14, Page Fault */
+ X86_TRAP_SPURIOUS, /* 15, Spurious Interrupt */
+ X86_TRAP_MF, /* 16, x87 Floating-Point Exception */
+ X86_TRAP_AC, /* 17, Alignment Check */
+ X86_TRAP_MC, /* 18, Machine Check */
+ X86_TRAP_XF, /* 19, SIMD Floating-Point Exception */
+ X86_TRAP_IRET = 32, /* 32, IRET Exception */
+};
+
+#endif /* _ASM_X86_TRAPS_H */
diff --git a/kernel/arch/x86/include/asm/tsc.h b/kernel/arch/x86/include/asm/tsc.h
new file mode 100644
index 000000000..94605c0e9
--- /dev/null
+++ b/kernel/arch/x86/include/asm/tsc.h
@@ -0,0 +1,71 @@
+/*
+ * x86 TSC related functions
+ */
+#ifndef _ASM_X86_TSC_H
+#define _ASM_X86_TSC_H
+
+#include <asm/processor.h>
+
+#define NS_SCALE 10 /* 2^10, carefully chosen */
+#define US_SCALE 32 /* 2^32, arbitralrily chosen */
+
+/*
+ * Standard way to access the cycle counter.
+ */
+typedef unsigned long long cycles_t;
+
+extern unsigned int cpu_khz;
+extern unsigned int tsc_khz;
+
+extern void disable_TSC(void);
+
+static inline cycles_t get_cycles(void)
+{
+ unsigned long long ret = 0;
+
+#ifndef CONFIG_X86_TSC
+ if (!cpu_has_tsc)
+ return 0;
+#endif
+ rdtscll(ret);
+
+ return ret;
+}
+
+static __always_inline cycles_t vget_cycles(void)
+{
+ /*
+ * We only do VDSOs on TSC capable CPUs, so this shouldn't
+ * access boot_cpu_data (which is not VDSO-safe):
+ */
+#ifndef CONFIG_X86_TSC
+ if (!cpu_has_tsc)
+ return 0;
+#endif
+ return (cycles_t)__native_read_tsc();
+}
+
+extern void tsc_init(void);
+extern void mark_tsc_unstable(char *reason);
+extern int unsynchronized_tsc(void);
+extern int check_tsc_unstable(void);
+extern int check_tsc_disabled(void);
+extern unsigned long native_calibrate_tsc(void);
+
+extern int tsc_clocksource_reliable;
+
+/*
+ * Boot-time check whether the TSCs are synchronized across
+ * all CPUs/cores:
+ */
+extern void check_tsc_sync_source(int cpu);
+extern void check_tsc_sync_target(void);
+
+extern int notsc_setup(char *);
+extern void tsc_save_sched_clock_state(void);
+extern void tsc_restore_sched_clock_state(void);
+
+/* MSR based TSC calibration for Intel Atom SoC platforms */
+unsigned long try_msr_calibrate_tsc(void);
+
+#endif /* _ASM_X86_TSC_H */
diff --git a/kernel/arch/x86/include/asm/uaccess.h b/kernel/arch/x86/include/asm/uaccess.h
new file mode 100644
index 000000000..a8df874f3
--- /dev/null
+++ b/kernel/arch/x86/include/asm/uaccess.h
@@ -0,0 +1,749 @@
+#ifndef _ASM_X86_UACCESS_H
+#define _ASM_X86_UACCESS_H
+/*
+ * User space memory access functions
+ */
+#include <linux/errno.h>
+#include <linux/compiler.h>
+#include <linux/thread_info.h>
+#include <linux/string.h>
+#include <asm/asm.h>
+#include <asm/page.h>
+#include <asm/smap.h>
+
+#define VERIFY_READ 0
+#define VERIFY_WRITE 1
+
+/*
+ * The fs value determines whether argument validity checking should be
+ * performed or not. If get_fs() == USER_DS, checking is performed, with
+ * get_fs() == KERNEL_DS, checking is bypassed.
+ *
+ * For historical reasons, these macros are grossly misnamed.
+ */
+
+#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
+
+#define KERNEL_DS MAKE_MM_SEG(-1UL)
+#define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX)
+
+#define get_ds() (KERNEL_DS)
+#define get_fs() (current_thread_info()->addr_limit)
+#define set_fs(x) (current_thread_info()->addr_limit = (x))
+
+#define segment_eq(a, b) ((a).seg == (b).seg)
+
+#define user_addr_max() (current_thread_info()->addr_limit.seg)
+#define __addr_ok(addr) \
+ ((unsigned long __force)(addr) < user_addr_max())
+
+/*
+ * Test whether a block of memory is a valid user space address.
+ * Returns 0 if the range is valid, nonzero otherwise.
+ */
+static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
+{
+ /*
+ * If we have used "sizeof()" for the size,
+ * we know it won't overflow the limit (but
+ * it might overflow the 'addr', so it's
+ * important to subtract the size from the
+ * limit, not add it to the address).
+ */
+ if (__builtin_constant_p(size))
+ return addr > limit - size;
+
+ /* Arbitrary sizes? Be careful about overflow */
+ addr += size;
+ if (addr < size)
+ return true;
+ return addr > limit;
+}
+
+#define __range_not_ok(addr, size, limit) \
+({ \
+ __chk_user_ptr(addr); \
+ __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
+})
+
+/**
+ * access_ok: - Checks if a user space pointer is valid
+ * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
+ * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
+ * to write to a block, it is always safe to read from it.
+ * @addr: User space pointer to start of block to check
+ * @size: Size of block to check
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ * enabled.
+ *
+ * Checks if a pointer to a block of memory in user space is valid.
+ *
+ * Returns true (nonzero) if the memory block may be valid, false (zero)
+ * if it is definitely invalid.
+ *
+ * Note that, depending on architecture, this function probably just
+ * checks that the pointer is in the user space range - after calling
+ * this function, memory access functions may still return -EFAULT.
+ */
+#define access_ok(type, addr, size) \
+ likely(!__range_not_ok(addr, size, user_addr_max()))
+
+/*
+ * The exception table consists of pairs of addresses relative to the
+ * exception table enty itself: the first is the address of an
+ * instruction that is allowed to fault, and the second is the address
+ * at which the program should continue. No registers are modified,
+ * so it is entirely up to the continuation code to figure out what to
+ * do.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path. This means when everything is well,
+ * we don't even have to jump over them. Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+
+struct exception_table_entry {
+ int insn, fixup;
+};
+/* This is not the generic standard exception_table_entry format */
+#define ARCH_HAS_SORT_EXTABLE
+#define ARCH_HAS_SEARCH_EXTABLE
+
+extern int fixup_exception(struct pt_regs *regs);
+extern int early_fixup_exception(unsigned long *ip);
+
+/*
+ * These are the main single-value transfer routines. They automatically
+ * use the right size if we just have the right pointer type.
+ *
+ * This gets kind of ugly. We want to return _two_ values in "get_user()"
+ * and yet we don't want to do any pointers, because that is too much
+ * of a performance impact. Thus we have a few rather ugly macros here,
+ * and hide all the ugliness from the user.
+ *
+ * The "__xxx" versions of the user access functions are versions that
+ * do not verify the address space, that must have been done previously
+ * with a separate "access_ok()" call (this is used when we do multiple
+ * accesses to the same area of user memory).
+ */
+
+extern int __get_user_1(void);
+extern int __get_user_2(void);
+extern int __get_user_4(void);
+extern int __get_user_8(void);
+extern int __get_user_bad(void);
+
+/*
+ * This is a type: either unsigned long, if the argument fits into
+ * that type, or otherwise unsigned long long.
+ */
+#define __inttype(x) \
+__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
+
+/**
+ * get_user: - Get a simple variable from user space.
+ * @x: Variable to store result.
+ * @ptr: Source address, in user space.
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ * enabled.
+ *
+ * This macro copies a single simple variable from user space to kernel
+ * space. It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and the result of
+ * dereferencing @ptr must be assignable to @x without a cast.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ * On error, the variable @x is set to zero.
+ */
+/*
+ * Careful: we have to cast the result to the type of the pointer
+ * for sign reasons.
+ *
+ * The use of _ASM_DX as the register specifier is a bit of a
+ * simplification, as gcc only cares about it as the starting point
+ * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
+ * (%ecx being the next register in gcc's x86 register sequence), and
+ * %rdx on 64 bits.
+ *
+ * Clang/LLVM cares about the size of the register, but still wants
+ * the base register for something that ends up being a pair.
+ */
+#define get_user(x, ptr) \
+({ \
+ int __ret_gu; \
+ register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
+ __chk_user_ptr(ptr); \
+ might_fault(); \
+ asm volatile("call __get_user_%P3" \
+ : "=a" (__ret_gu), "=r" (__val_gu) \
+ : "0" (ptr), "i" (sizeof(*(ptr)))); \
+ (x) = (__force __typeof__(*(ptr))) __val_gu; \
+ __ret_gu; \
+})
+
+#define __put_user_x(size, x, ptr, __ret_pu) \
+ asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
+ : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
+
+
+
+#ifdef CONFIG_X86_32
+#define __put_user_asm_u64(x, addr, err, errret) \
+ asm volatile(ASM_STAC "\n" \
+ "1: movl %%eax,0(%2)\n" \
+ "2: movl %%edx,4(%2)\n" \
+ "3: " ASM_CLAC "\n" \
+ ".section .fixup,\"ax\"\n" \
+ "4: movl %3,%0\n" \
+ " jmp 3b\n" \
+ ".previous\n" \
+ _ASM_EXTABLE(1b, 4b) \
+ _ASM_EXTABLE(2b, 4b) \
+ : "=r" (err) \
+ : "A" (x), "r" (addr), "i" (errret), "0" (err))
+
+#define __put_user_asm_ex_u64(x, addr) \
+ asm volatile(ASM_STAC "\n" \
+ "1: movl %%eax,0(%1)\n" \
+ "2: movl %%edx,4(%1)\n" \
+ "3: " ASM_CLAC "\n" \
+ _ASM_EXTABLE_EX(1b, 2b) \
+ _ASM_EXTABLE_EX(2b, 3b) \
+ : : "A" (x), "r" (addr))
+
+#define __put_user_x8(x, ptr, __ret_pu) \
+ asm volatile("call __put_user_8" : "=a" (__ret_pu) \
+ : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
+#else
+#define __put_user_asm_u64(x, ptr, retval, errret) \
+ __put_user_asm(x, ptr, retval, "q", "", "er", errret)
+#define __put_user_asm_ex_u64(x, addr) \
+ __put_user_asm_ex(x, addr, "q", "", "er")
+#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
+#endif
+
+extern void __put_user_bad(void);
+
+/*
+ * Strange magic calling convention: pointer in %ecx,
+ * value in %eax(:%edx), return value in %eax. clobbers %rbx
+ */
+extern void __put_user_1(void);
+extern void __put_user_2(void);
+extern void __put_user_4(void);
+extern void __put_user_8(void);
+
+/**
+ * put_user: - Write a simple value into user space.
+ * @x: Value to copy to user space.
+ * @ptr: Destination address, in user space.
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ * enabled.
+ *
+ * This macro copies a single simple value from kernel space to user
+ * space. It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and @x must be assignable
+ * to the result of dereferencing @ptr.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ */
+#define put_user(x, ptr) \
+({ \
+ int __ret_pu; \
+ __typeof__(*(ptr)) __pu_val; \
+ __chk_user_ptr(ptr); \
+ might_fault(); \
+ __pu_val = x; \
+ switch (sizeof(*(ptr))) { \
+ case 1: \
+ __put_user_x(1, __pu_val, ptr, __ret_pu); \
+ break; \
+ case 2: \
+ __put_user_x(2, __pu_val, ptr, __ret_pu); \
+ break; \
+ case 4: \
+ __put_user_x(4, __pu_val, ptr, __ret_pu); \
+ break; \
+ case 8: \
+ __put_user_x8(__pu_val, ptr, __ret_pu); \
+ break; \
+ default: \
+ __put_user_x(X, __pu_val, ptr, __ret_pu); \
+ break; \
+ } \
+ __ret_pu; \
+})
+
+#define __put_user_size(x, ptr, size, retval, errret) \
+do { \
+ retval = 0; \
+ __chk_user_ptr(ptr); \
+ switch (size) { \
+ case 1: \
+ __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
+ break; \
+ case 2: \
+ __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
+ break; \
+ case 4: \
+ __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
+ break; \
+ case 8: \
+ __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \
+ errret); \
+ break; \
+ default: \
+ __put_user_bad(); \
+ } \
+} while (0)
+
+#define __put_user_size_ex(x, ptr, size) \
+do { \
+ __chk_user_ptr(ptr); \
+ switch (size) { \
+ case 1: \
+ __put_user_asm_ex(x, ptr, "b", "b", "iq"); \
+ break; \
+ case 2: \
+ __put_user_asm_ex(x, ptr, "w", "w", "ir"); \
+ break; \
+ case 4: \
+ __put_user_asm_ex(x, ptr, "l", "k", "ir"); \
+ break; \
+ case 8: \
+ __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
+ break; \
+ default: \
+ __put_user_bad(); \
+ } \
+} while (0)
+
+#ifdef CONFIG_X86_32
+#define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad()
+#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
+#else
+#define __get_user_asm_u64(x, ptr, retval, errret) \
+ __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
+#define __get_user_asm_ex_u64(x, ptr) \
+ __get_user_asm_ex(x, ptr, "q", "", "=r")
+#endif
+
+#define __get_user_size(x, ptr, size, retval, errret) \
+do { \
+ retval = 0; \
+ __chk_user_ptr(ptr); \
+ switch (size) { \
+ case 1: \
+ __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
+ break; \
+ case 2: \
+ __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
+ break; \
+ case 4: \
+ __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
+ break; \
+ case 8: \
+ __get_user_asm_u64(x, ptr, retval, errret); \
+ break; \
+ default: \
+ (x) = __get_user_bad(); \
+ } \
+} while (0)
+
+#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
+ asm volatile(ASM_STAC "\n" \
+ "1: mov"itype" %2,%"rtype"1\n" \
+ "2: " ASM_CLAC "\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: mov %3,%0\n" \
+ " xor"itype" %"rtype"1,%"rtype"1\n" \
+ " jmp 2b\n" \
+ ".previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "=r" (err), ltype(x) \
+ : "m" (__m(addr)), "i" (errret), "0" (err))
+
+#define __get_user_size_ex(x, ptr, size) \
+do { \
+ __chk_user_ptr(ptr); \
+ switch (size) { \
+ case 1: \
+ __get_user_asm_ex(x, ptr, "b", "b", "=q"); \
+ break; \
+ case 2: \
+ __get_user_asm_ex(x, ptr, "w", "w", "=r"); \
+ break; \
+ case 4: \
+ __get_user_asm_ex(x, ptr, "l", "k", "=r"); \
+ break; \
+ case 8: \
+ __get_user_asm_ex_u64(x, ptr); \
+ break; \
+ default: \
+ (x) = __get_user_bad(); \
+ } \
+} while (0)
+
+#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
+ asm volatile("1: mov"itype" %1,%"rtype"0\n" \
+ "2:\n" \
+ _ASM_EXTABLE_EX(1b, 2b) \
+ : ltype(x) : "m" (__m(addr)))
+
+#define __put_user_nocheck(x, ptr, size) \
+({ \
+ int __pu_err; \
+ __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
+ __pu_err; \
+})
+
+#define __get_user_nocheck(x, ptr, size) \
+({ \
+ int __gu_err; \
+ unsigned long __gu_val; \
+ __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
+ (x) = (__force __typeof__(*(ptr)))__gu_val; \
+ __gu_err; \
+})
+
+/* FIXME: this hack is definitely wrong -AK */
+struct __large_struct { unsigned long buf[100]; };
+#define __m(x) (*(struct __large_struct __user *)(x))
+
+/*
+ * Tell gcc we read from memory instead of writing: this is because
+ * we do not write to any memory gcc knows about, so there are no
+ * aliasing issues.
+ */
+#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
+ asm volatile(ASM_STAC "\n" \
+ "1: mov"itype" %"rtype"1,%2\n" \
+ "2: " ASM_CLAC "\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: mov %3,%0\n" \
+ " jmp 2b\n" \
+ ".previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "=r"(err) \
+ : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
+
+#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
+ asm volatile("1: mov"itype" %"rtype"0,%1\n" \
+ "2:\n" \
+ _ASM_EXTABLE_EX(1b, 2b) \
+ : : ltype(x), "m" (__m(addr)))
+
+/*
+ * uaccess_try and catch
+ */
+#define uaccess_try do { \
+ current_thread_info()->uaccess_err = 0; \
+ stac(); \
+ barrier();
+
+#define uaccess_catch(err) \
+ clac(); \
+ (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
+} while (0)
+
+/**
+ * __get_user: - Get a simple variable from user space, with less checking.
+ * @x: Variable to store result.
+ * @ptr: Source address, in user space.
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ * enabled.
+ *
+ * This macro copies a single simple variable from user space to kernel
+ * space. It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and the result of
+ * dereferencing @ptr must be assignable to @x without a cast.
+ *
+ * Caller must check the pointer with access_ok() before calling this
+ * function.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ * On error, the variable @x is set to zero.
+ */
+
+#define __get_user(x, ptr) \
+ __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
+
+/**
+ * __put_user: - Write a simple value into user space, with less checking.
+ * @x: Value to copy to user space.
+ * @ptr: Destination address, in user space.
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ * enabled.
+ *
+ * This macro copies a single simple value from kernel space to user
+ * space. It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and @x must be assignable
+ * to the result of dereferencing @ptr.
+ *
+ * Caller must check the pointer with access_ok() before calling this
+ * function.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ */
+
+#define __put_user(x, ptr) \
+ __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
+
+#define __get_user_unaligned __get_user
+#define __put_user_unaligned __put_user
+
+/*
+ * {get|put}_user_try and catch
+ *
+ * get_user_try {
+ * get_user_ex(...);
+ * } get_user_catch(err)
+ */
+#define get_user_try uaccess_try
+#define get_user_catch(err) uaccess_catch(err)
+
+#define get_user_ex(x, ptr) do { \
+ unsigned long __gue_val; \
+ __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
+ (x) = (__force __typeof__(*(ptr)))__gue_val; \
+} while (0)
+
+#define put_user_try uaccess_try
+#define put_user_catch(err) uaccess_catch(err)
+
+#define put_user_ex(x, ptr) \
+ __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
+
+extern unsigned long
+copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
+extern __must_check long
+strncpy_from_user(char *dst, const char __user *src, long count);
+
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *str, long n);
+
+unsigned long __must_check clear_user(void __user *mem, unsigned long len);
+unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
+
+extern void __cmpxchg_wrong_size(void)
+ __compiletime_error("Bad argument size for cmpxchg");
+
+#define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \
+({ \
+ int __ret = 0; \
+ __typeof__(ptr) __uval = (uval); \
+ __typeof__(*(ptr)) __old = (old); \
+ __typeof__(*(ptr)) __new = (new); \
+ switch (size) { \
+ case 1: \
+ { \
+ asm volatile("\t" ASM_STAC "\n" \
+ "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
+ "2:\t" ASM_CLAC "\n" \
+ "\t.section .fixup, \"ax\"\n" \
+ "3:\tmov %3, %0\n" \
+ "\tjmp 2b\n" \
+ "\t.previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
+ : "i" (-EFAULT), "q" (__new), "1" (__old) \
+ : "memory" \
+ ); \
+ break; \
+ } \
+ case 2: \
+ { \
+ asm volatile("\t" ASM_STAC "\n" \
+ "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
+ "2:\t" ASM_CLAC "\n" \
+ "\t.section .fixup, \"ax\"\n" \
+ "3:\tmov %3, %0\n" \
+ "\tjmp 2b\n" \
+ "\t.previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
+ : "i" (-EFAULT), "r" (__new), "1" (__old) \
+ : "memory" \
+ ); \
+ break; \
+ } \
+ case 4: \
+ { \
+ asm volatile("\t" ASM_STAC "\n" \
+ "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
+ "2:\t" ASM_CLAC "\n" \
+ "\t.section .fixup, \"ax\"\n" \
+ "3:\tmov %3, %0\n" \
+ "\tjmp 2b\n" \
+ "\t.previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
+ : "i" (-EFAULT), "r" (__new), "1" (__old) \
+ : "memory" \
+ ); \
+ break; \
+ } \
+ case 8: \
+ { \
+ if (!IS_ENABLED(CONFIG_X86_64)) \
+ __cmpxchg_wrong_size(); \
+ \
+ asm volatile("\t" ASM_STAC "\n" \
+ "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
+ "2:\t" ASM_CLAC "\n" \
+ "\t.section .fixup, \"ax\"\n" \
+ "3:\tmov %3, %0\n" \
+ "\tjmp 2b\n" \
+ "\t.previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
+ : "i" (-EFAULT), "r" (__new), "1" (__old) \
+ : "memory" \
+ ); \
+ break; \
+ } \
+ default: \
+ __cmpxchg_wrong_size(); \
+ } \
+ *__uval = __old; \
+ __ret; \
+})
+
+#define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \
+({ \
+ access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \
+ __user_atomic_cmpxchg_inatomic((uval), (ptr), \
+ (old), (new), sizeof(*(ptr))) : \
+ -EFAULT; \
+})
+
+/*
+ * movsl can be slow when source and dest are not both 8-byte aligned
+ */
+#ifdef CONFIG_X86_INTEL_USERCOPY
+extern struct movsl_mask {
+ int mask;
+} ____cacheline_aligned_in_smp movsl_mask;
+#endif
+
+#define ARCH_HAS_NOCACHE_UACCESS 1
+
+#ifdef CONFIG_X86_32
+# include <asm/uaccess_32.h>
+#else
+# include <asm/uaccess_64.h>
+#endif
+
+unsigned long __must_check _copy_from_user(void *to, const void __user *from,
+ unsigned n);
+unsigned long __must_check _copy_to_user(void __user *to, const void *from,
+ unsigned n);
+
+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
+# define copy_user_diag __compiletime_error
+#else
+# define copy_user_diag __compiletime_warning
+#endif
+
+extern void copy_user_diag("copy_from_user() buffer size is too small")
+copy_from_user_overflow(void);
+extern void copy_user_diag("copy_to_user() buffer size is too small")
+copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
+
+#undef copy_user_diag
+
+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
+
+extern void
+__compiletime_warning("copy_from_user() buffer size is not provably correct")
+__copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
+#define __copy_from_user_overflow(size, count) __copy_from_user_overflow()
+
+extern void
+__compiletime_warning("copy_to_user() buffer size is not provably correct")
+__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
+#define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
+
+#else
+
+static inline void
+__copy_from_user_overflow(int size, unsigned long count)
+{
+ WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
+}
+
+#define __copy_to_user_overflow __copy_from_user_overflow
+
+#endif
+
+static inline unsigned long __must_check
+copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ int sz = __compiletime_object_size(to);
+
+ might_fault();
+
+ /*
+ * While we would like to have the compiler do the checking for us
+ * even in the non-constant size case, any false positives there are
+ * a problem (especially when DEBUG_STRICT_USER_COPY_CHECKS, but even
+ * without - the [hopefully] dangerous looking nature of the warning
+ * would make people go look at the respecitive call sites over and
+ * over again just to find that there's no problem).
+ *
+ * And there are cases where it's just not realistic for the compiler
+ * to prove the count to be in range. For example when multiple call
+ * sites of a helper function - perhaps in different source files -
+ * all doing proper range checking, yet the helper function not doing
+ * so again.
+ *
+ * Therefore limit the compile time checking to the constant size
+ * case, and do only runtime checking for non-constant sizes.
+ */
+
+ if (likely(sz < 0 || sz >= n))
+ n = _copy_from_user(to, from, n);
+ else if(__builtin_constant_p(n))
+ copy_from_user_overflow();
+ else
+ __copy_from_user_overflow(sz, n);
+
+ return n;
+}
+
+static inline unsigned long __must_check
+copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ int sz = __compiletime_object_size(from);
+
+ might_fault();
+
+ /* See the comment in copy_from_user() above. */
+ if (likely(sz < 0 || sz >= n))
+ n = _copy_to_user(to, from, n);
+ else if(__builtin_constant_p(n))
+ copy_to_user_overflow();
+ else
+ __copy_to_user_overflow(sz, n);
+
+ return n;
+}
+
+#undef __copy_from_user_overflow
+#undef __copy_to_user_overflow
+
+#endif /* _ASM_X86_UACCESS_H */
+
diff --git a/kernel/arch/x86/include/asm/uaccess_32.h b/kernel/arch/x86/include/asm/uaccess_32.h
new file mode 100644
index 000000000..7c8ad3451
--- /dev/null
+++ b/kernel/arch/x86/include/asm/uaccess_32.h
@@ -0,0 +1,189 @@
+#ifndef _ASM_X86_UACCESS_32_H
+#define _ASM_X86_UACCESS_32_H
+
+/*
+ * User space memory access functions
+ */
+#include <linux/errno.h>
+#include <linux/thread_info.h>
+#include <linux/string.h>
+#include <asm/asm.h>
+#include <asm/page.h>
+
+unsigned long __must_check __copy_to_user_ll
+ (void __user *to, const void *from, unsigned long n);
+unsigned long __must_check __copy_from_user_ll
+ (void *to, const void __user *from, unsigned long n);
+unsigned long __must_check __copy_from_user_ll_nozero
+ (void *to, const void __user *from, unsigned long n);
+unsigned long __must_check __copy_from_user_ll_nocache
+ (void *to, const void __user *from, unsigned long n);
+unsigned long __must_check __copy_from_user_ll_nocache_nozero
+ (void *to, const void __user *from, unsigned long n);
+
+/**
+ * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
+ * @to: Destination address, in user space.
+ * @from: Source address, in kernel space.
+ * @n: Number of bytes to copy.
+ *
+ * Context: User context only.
+ *
+ * Copy data from kernel space to user space. Caller must check
+ * the specified block with access_ok() before calling this function.
+ * The caller should also make sure he pins the user space address
+ * so that we don't result in page fault and sleep.
+ *
+ * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault
+ * we return the initial request size (1, 2 or 4), as copy_*_user should do.
+ * If a store crosses a page boundary and gets a fault, the x86 will not write
+ * anything, so this is accurate.
+ */
+
+static __always_inline unsigned long __must_check
+__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
+{
+ if (__builtin_constant_p(n)) {
+ unsigned long ret;
+
+ switch (n) {
+ case 1:
+ __put_user_size(*(u8 *)from, (u8 __user *)to,
+ 1, ret, 1);
+ return ret;
+ case 2:
+ __put_user_size(*(u16 *)from, (u16 __user *)to,
+ 2, ret, 2);
+ return ret;
+ case 4:
+ __put_user_size(*(u32 *)from, (u32 __user *)to,
+ 4, ret, 4);
+ return ret;
+ }
+ }
+ return __copy_to_user_ll(to, from, n);
+}
+
+/**
+ * __copy_to_user: - Copy a block of data into user space, with less checking.
+ * @to: Destination address, in user space.
+ * @from: Source address, in kernel space.
+ * @n: Number of bytes to copy.
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ * enabled.
+ *
+ * Copy data from kernel space to user space. Caller must check
+ * the specified block with access_ok() before calling this function.
+ *
+ * Returns number of bytes that could not be copied.
+ * On success, this will be zero.
+ */
+static __always_inline unsigned long __must_check
+__copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ might_fault();
+ return __copy_to_user_inatomic(to, from, n);
+}
+
+static __always_inline unsigned long
+__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
+{
+ /* Avoid zeroing the tail if the copy fails..
+ * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
+ * but as the zeroing behaviour is only significant when n is not
+ * constant, that shouldn't be a problem.
+ */
+ if (__builtin_constant_p(n)) {
+ unsigned long ret;
+
+ switch (n) {
+ case 1:
+ __get_user_size(*(u8 *)to, from, 1, ret, 1);
+ return ret;
+ case 2:
+ __get_user_size(*(u16 *)to, from, 2, ret, 2);
+ return ret;
+ case 4:
+ __get_user_size(*(u32 *)to, from, 4, ret, 4);
+ return ret;
+ }
+ }
+ return __copy_from_user_ll_nozero(to, from, n);
+}
+
+/**
+ * __copy_from_user: - Copy a block of data from user space, with less checking.
+ * @to: Destination address, in kernel space.
+ * @from: Source address, in user space.
+ * @n: Number of bytes to copy.
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ * enabled.
+ *
+ * Copy data from user space to kernel space. Caller must check
+ * the specified block with access_ok() before calling this function.
+ *
+ * Returns number of bytes that could not be copied.
+ * On success, this will be zero.
+ *
+ * If some data could not be copied, this function will pad the copied
+ * data to the requested size using zero bytes.
+ *
+ * An alternate version - __copy_from_user_inatomic() - may be called from
+ * atomic context and will fail rather than sleep. In this case the
+ * uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h
+ * for explanation of why this is needed.
+ */
+static __always_inline unsigned long
+__copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ might_fault();
+ if (__builtin_constant_p(n)) {
+ unsigned long ret;
+
+ switch (n) {
+ case 1:
+ __get_user_size(*(u8 *)to, from, 1, ret, 1);
+ return ret;
+ case 2:
+ __get_user_size(*(u16 *)to, from, 2, ret, 2);
+ return ret;
+ case 4:
+ __get_user_size(*(u32 *)to, from, 4, ret, 4);
+ return ret;
+ }
+ }
+ return __copy_from_user_ll(to, from, n);
+}
+
+static __always_inline unsigned long __copy_from_user_nocache(void *to,
+ const void __user *from, unsigned long n)
+{
+ might_fault();
+ if (__builtin_constant_p(n)) {
+ unsigned long ret;
+
+ switch (n) {
+ case 1:
+ __get_user_size(*(u8 *)to, from, 1, ret, 1);
+ return ret;
+ case 2:
+ __get_user_size(*(u16 *)to, from, 2, ret, 2);
+ return ret;
+ case 4:
+ __get_user_size(*(u32 *)to, from, 4, ret, 4);
+ return ret;
+ }
+ }
+ return __copy_from_user_ll_nocache(to, from, n);
+}
+
+static __always_inline unsigned long
+__copy_from_user_inatomic_nocache(void *to, const void __user *from,
+ unsigned long n)
+{
+ return __copy_from_user_ll_nocache_nozero(to, from, n);
+}
+
+#endif /* _ASM_X86_UACCESS_32_H */
diff --git a/kernel/arch/x86/include/asm/uaccess_64.h b/kernel/arch/x86/include/asm/uaccess_64.h
new file mode 100644
index 000000000..f2f9b39b2
--- /dev/null
+++ b/kernel/arch/x86/include/asm/uaccess_64.h
@@ -0,0 +1,236 @@
+#ifndef _ASM_X86_UACCESS_64_H
+#define _ASM_X86_UACCESS_64_H
+
+/*
+ * User space memory access functions
+ */
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/lockdep.h>
+#include <asm/alternative.h>
+#include <asm/cpufeature.h>
+#include <asm/page.h>
+
+/*
+ * Copy To/From Userspace
+ */
+
+/* Handles exceptions in both to and from, but doesn't do access_ok */
+__must_check unsigned long
+copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
+__must_check unsigned long
+copy_user_generic_string(void *to, const void *from, unsigned len);
+__must_check unsigned long
+copy_user_generic_unrolled(void *to, const void *from, unsigned len);
+
+static __always_inline __must_check unsigned long
+copy_user_generic(void *to, const void *from, unsigned len)
+{
+ unsigned ret;
+
+ /*
+ * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
+ * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
+ * Otherwise, use copy_user_generic_unrolled.
+ */
+ alternative_call_2(copy_user_generic_unrolled,
+ copy_user_generic_string,
+ X86_FEATURE_REP_GOOD,
+ copy_user_enhanced_fast_string,
+ X86_FEATURE_ERMS,
+ ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
+ "=d" (len)),
+ "1" (to), "2" (from), "3" (len)
+ : "memory", "rcx", "r8", "r9", "r10", "r11");
+ return ret;
+}
+
+__must_check unsigned long
+copy_in_user(void __user *to, const void __user *from, unsigned len);
+
+static __always_inline __must_check
+int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
+{
+ int ret = 0;
+
+ if (!__builtin_constant_p(size))
+ return copy_user_generic(dst, (__force void *)src, size);
+ switch (size) {
+ case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
+ ret, "b", "b", "=q", 1);
+ return ret;
+ case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
+ ret, "w", "w", "=r", 2);
+ return ret;
+ case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
+ ret, "l", "k", "=r", 4);
+ return ret;
+ case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
+ ret, "q", "", "=r", 8);
+ return ret;
+ case 10:
+ __get_user_asm(*(u64 *)dst, (u64 __user *)src,
+ ret, "q", "", "=r", 10);
+ if (unlikely(ret))
+ return ret;
+ __get_user_asm(*(u16 *)(8 + (char *)dst),
+ (u16 __user *)(8 + (char __user *)src),
+ ret, "w", "w", "=r", 2);
+ return ret;
+ case 16:
+ __get_user_asm(*(u64 *)dst, (u64 __user *)src,
+ ret, "q", "", "=r", 16);
+ if (unlikely(ret))
+ return ret;
+ __get_user_asm(*(u64 *)(8 + (char *)dst),
+ (u64 __user *)(8 + (char __user *)src),
+ ret, "q", "", "=r", 8);
+ return ret;
+ default:
+ return copy_user_generic(dst, (__force void *)src, size);
+ }
+}
+
+static __always_inline __must_check
+int __copy_from_user(void *dst, const void __user *src, unsigned size)
+{
+ might_fault();
+ return __copy_from_user_nocheck(dst, src, size);
+}
+
+static __always_inline __must_check
+int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
+{
+ int ret = 0;
+
+ if (!__builtin_constant_p(size))
+ return copy_user_generic((__force void *)dst, src, size);
+ switch (size) {
+ case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
+ ret, "b", "b", "iq", 1);
+ return ret;
+ case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
+ ret, "w", "w", "ir", 2);
+ return ret;
+ case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
+ ret, "l", "k", "ir", 4);
+ return ret;
+ case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
+ ret, "q", "", "er", 8);
+ return ret;
+ case 10:
+ __put_user_asm(*(u64 *)src, (u64 __user *)dst,
+ ret, "q", "", "er", 10);
+ if (unlikely(ret))
+ return ret;
+ asm("":::"memory");
+ __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
+ ret, "w", "w", "ir", 2);
+ return ret;
+ case 16:
+ __put_user_asm(*(u64 *)src, (u64 __user *)dst,
+ ret, "q", "", "er", 16);
+ if (unlikely(ret))
+ return ret;
+ asm("":::"memory");
+ __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
+ ret, "q", "", "er", 8);
+ return ret;
+ default:
+ return copy_user_generic((__force void *)dst, src, size);
+ }
+}
+
+static __always_inline __must_check
+int __copy_to_user(void __user *dst, const void *src, unsigned size)
+{
+ might_fault();
+ return __copy_to_user_nocheck(dst, src, size);
+}
+
+static __always_inline __must_check
+int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
+{
+ int ret = 0;
+
+ might_fault();
+ if (!__builtin_constant_p(size))
+ return copy_user_generic((__force void *)dst,
+ (__force void *)src, size);
+ switch (size) {
+ case 1: {
+ u8 tmp;
+ __get_user_asm(tmp, (u8 __user *)src,
+ ret, "b", "b", "=q", 1);
+ if (likely(!ret))
+ __put_user_asm(tmp, (u8 __user *)dst,
+ ret, "b", "b", "iq", 1);
+ return ret;
+ }
+ case 2: {
+ u16 tmp;
+ __get_user_asm(tmp, (u16 __user *)src,
+ ret, "w", "w", "=r", 2);
+ if (likely(!ret))
+ __put_user_asm(tmp, (u16 __user *)dst,
+ ret, "w", "w", "ir", 2);
+ return ret;
+ }
+
+ case 4: {
+ u32 tmp;
+ __get_user_asm(tmp, (u32 __user *)src,
+ ret, "l", "k", "=r", 4);
+ if (likely(!ret))
+ __put_user_asm(tmp, (u32 __user *)dst,
+ ret, "l", "k", "ir", 4);
+ return ret;
+ }
+ case 8: {
+ u64 tmp;
+ __get_user_asm(tmp, (u64 __user *)src,
+ ret, "q", "", "=r", 8);
+ if (likely(!ret))
+ __put_user_asm(tmp, (u64 __user *)dst,
+ ret, "q", "", "er", 8);
+ return ret;
+ }
+ default:
+ return copy_user_generic((__force void *)dst,
+ (__force void *)src, size);
+ }
+}
+
+static __must_check __always_inline int
+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
+{
+ return __copy_from_user_nocheck(dst, src, size);
+}
+
+static __must_check __always_inline int
+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
+{
+ return __copy_to_user_nocheck(dst, src, size);
+}
+
+extern long __copy_user_nocache(void *dst, const void __user *src,
+ unsigned size, int zerorest);
+
+static inline int
+__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
+{
+ might_fault();
+ return __copy_user_nocache(dst, src, size, 1);
+}
+
+static inline int
+__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
+ unsigned size)
+{
+ return __copy_user_nocache(dst, src, size, 0);
+}
+
+unsigned long
+copy_user_handle_tail(char *to, char *from, unsigned len);
+
+#endif /* _ASM_X86_UACCESS_64_H */
diff --git a/kernel/arch/x86/include/asm/unaligned.h b/kernel/arch/x86/include/asm/unaligned.h
new file mode 100644
index 000000000..a7bd416b4
--- /dev/null
+++ b/kernel/arch/x86/include/asm/unaligned.h
@@ -0,0 +1,14 @@
+#ifndef _ASM_X86_UNALIGNED_H
+#define _ASM_X86_UNALIGNED_H
+
+/*
+ * The x86 can do unaligned accesses itself.
+ */
+
+#include <linux/unaligned/access_ok.h>
+#include <linux/unaligned/generic.h>
+
+#define get_unaligned __get_unaligned_le
+#define put_unaligned __put_unaligned_le
+
+#endif /* _ASM_X86_UNALIGNED_H */
diff --git a/kernel/arch/x86/include/asm/unistd.h b/kernel/arch/x86/include/asm/unistd.h
new file mode 100644
index 000000000..2b19caa40
--- /dev/null
+++ b/kernel/arch/x86/include/asm/unistd.h
@@ -0,0 +1,55 @@
+#ifndef _ASM_X86_UNISTD_H
+#define _ASM_X86_UNISTD_H 1
+
+#include <uapi/asm/unistd.h>
+
+
+# ifdef CONFIG_X86_X32_ABI
+# define __SYSCALL_MASK (~(__X32_SYSCALL_BIT))
+# else
+# define __SYSCALL_MASK (~0)
+# endif
+
+# ifdef CONFIG_X86_32
+
+# include <asm/unistd_32.h>
+# define __ARCH_WANT_STAT64
+# define __ARCH_WANT_SYS_IPC
+# define __ARCH_WANT_SYS_OLD_MMAP
+# define __ARCH_WANT_SYS_OLD_SELECT
+
+# else
+
+# include <asm/unistd_64.h>
+# include <asm/unistd_64_x32.h>
+# define __ARCH_WANT_COMPAT_SYS_TIME
+# define __ARCH_WANT_COMPAT_SYS_GETDENTS64
+# define __ARCH_WANT_COMPAT_SYS_PREADV64
+# define __ARCH_WANT_COMPAT_SYS_PWRITEV64
+
+# endif
+
+# define __ARCH_WANT_OLD_READDIR
+# define __ARCH_WANT_OLD_STAT
+# define __ARCH_WANT_SYS_ALARM
+# define __ARCH_WANT_SYS_FADVISE64
+# define __ARCH_WANT_SYS_GETHOSTNAME
+# define __ARCH_WANT_SYS_GETPGRP
+# define __ARCH_WANT_SYS_LLSEEK
+# define __ARCH_WANT_SYS_NICE
+# define __ARCH_WANT_SYS_OLDUMOUNT
+# define __ARCH_WANT_SYS_OLD_GETRLIMIT
+# define __ARCH_WANT_SYS_OLD_UNAME
+# define __ARCH_WANT_SYS_PAUSE
+# define __ARCH_WANT_SYS_SIGNAL
+# define __ARCH_WANT_SYS_SIGPENDING
+# define __ARCH_WANT_SYS_SIGPROCMASK
+# define __ARCH_WANT_SYS_SOCKETCALL
+# define __ARCH_WANT_SYS_TIME
+# define __ARCH_WANT_SYS_UTIME
+# define __ARCH_WANT_SYS_WAITPID
+# define __ARCH_WANT_SYS_FORK
+# define __ARCH_WANT_SYS_VFORK
+# define __ARCH_WANT_SYS_CLONE
+
+#endif /* _ASM_X86_UNISTD_H */
diff --git a/kernel/arch/x86/include/asm/uprobes.h b/kernel/arch/x86/include/asm/uprobes.h
new file mode 100644
index 000000000..74f4c2ff6
--- /dev/null
+++ b/kernel/arch/x86/include/asm/uprobes.h
@@ -0,0 +1,67 @@
+#ifndef _ASM_UPROBES_H
+#define _ASM_UPROBES_H
+/*
+ * User-space Probes (UProbes) for x86
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2008-2011
+ * Authors:
+ * Srikar Dronamraju
+ * Jim Keniston
+ */
+
+#include <linux/notifier.h>
+
+typedef u8 uprobe_opcode_t;
+
+#define MAX_UINSN_BYTES 16
+#define UPROBE_XOL_SLOT_BYTES 128 /* to keep it cache aligned */
+
+#define UPROBE_SWBP_INSN 0xcc
+#define UPROBE_SWBP_INSN_SIZE 1
+
+struct uprobe_xol_ops;
+
+struct arch_uprobe {
+ union {
+ u8 insn[MAX_UINSN_BYTES];
+ u8 ixol[MAX_UINSN_BYTES];
+ };
+
+ const struct uprobe_xol_ops *ops;
+
+ union {
+ struct {
+ s32 offs;
+ u8 ilen;
+ u8 opc1;
+ } branch;
+ struct {
+ u8 fixups;
+ u8 ilen;
+ } defparam;
+ };
+};
+
+struct arch_uprobe_task {
+#ifdef CONFIG_X86_64
+ unsigned long saved_scratch_register;
+#endif
+ unsigned int saved_trap_nr;
+ unsigned int saved_tf;
+};
+
+#endif /* _ASM_UPROBES_H */
diff --git a/kernel/arch/x86/include/asm/user.h b/kernel/arch/x86/include/asm/user.h
new file mode 100644
index 000000000..ccab4af16
--- /dev/null
+++ b/kernel/arch/x86/include/asm/user.h
@@ -0,0 +1,63 @@
+#ifndef _ASM_X86_USER_H
+#define _ASM_X86_USER_H
+
+#ifdef CONFIG_X86_32
+# include <asm/user_32.h>
+#else
+# include <asm/user_64.h>
+#endif
+
+#include <asm/types.h>
+
+struct user_ymmh_regs {
+ /* 16 * 16 bytes for each YMMH-reg */
+ __u32 ymmh_space[64];
+};
+
+struct user_xsave_hdr {
+ __u64 xstate_bv;
+ __u64 reserved1[2];
+ __u64 reserved2[5];
+};
+
+/*
+ * The structure layout of user_xstateregs, used for exporting the
+ * extended register state through ptrace and core-dump (NT_X86_XSTATE note)
+ * interfaces will be same as the memory layout of xsave used by the processor
+ * (except for the bytes 464..511, which can be used by the software) and hence
+ * the size of this structure varies depending on the features supported by the
+ * processor and OS. The size of the structure that users need to use can be
+ * obtained by doing:
+ * cpuid_count(0xd, 0, &eax, &ptrace_xstateregs_struct_size, &ecx, &edx);
+ * i.e., cpuid.(eax=0xd,ecx=0).ebx will be the size that user (debuggers, etc.)
+ * need to use.
+ *
+ * For now, only the first 8 bytes of the software usable bytes[464..471] will
+ * be used and will be set to OS enabled xstate mask (which is same as the
+ * 64bit mask returned by the xgetbv's xCR0). Users (analyzing core dump
+ * remotely, etc.) can use this mask as well as the mask saved in the
+ * xstate_hdr bytes and interpret what states the processor/OS supports
+ * and what states are in modified/initialized conditions for the
+ * particular process/thread.
+ *
+ * Also when the user modifies certain state FP/SSE/etc through the
+ * ptrace interface, they must ensure that the xsave_hdr.xstate_bv
+ * bytes[512..519] of the memory layout are updated correspondingly.
+ * i.e., for example when FP state is modified to a non-init state,
+ * xsave_hdr.xstate_bv's bit 0 must be set to '1', when SSE is modified to
+ * non-init state, xsave_hdr.xstate_bv's bit 1 must to be set to '1', etc.
+ */
+#define USER_XSTATE_FX_SW_WORDS 6
+#define USER_XSTATE_XCR0_WORD 0
+
+struct user_xstateregs {
+ struct {
+ __u64 fpx_space[58];
+ __u64 xstate_fx_sw[USER_XSTATE_FX_SW_WORDS];
+ } i387;
+ struct user_xsave_hdr xsave_hdr;
+ struct user_ymmh_regs ymmh;
+ /* further processor state extensions go here */
+};
+
+#endif /* _ASM_X86_USER_H */
diff --git a/kernel/arch/x86/include/asm/user32.h b/kernel/arch/x86/include/asm/user32.h
new file mode 100644
index 000000000..14cbb73eb
--- /dev/null
+++ b/kernel/arch/x86/include/asm/user32.h
@@ -0,0 +1,70 @@
+#ifndef _ASM_X86_USER32_H
+#define _ASM_X86_USER32_H
+
+/* IA32 compatible user structures for ptrace.
+ * These should be used for 32bit coredumps too. */
+
+struct user_i387_ia32_struct {
+ u32 cwd;
+ u32 swd;
+ u32 twd;
+ u32 fip;
+ u32 fcs;
+ u32 foo;
+ u32 fos;
+ u32 st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
+};
+
+/* FSAVE frame with extensions */
+struct user32_fxsr_struct {
+ unsigned short cwd;
+ unsigned short swd;
+ unsigned short twd; /* not compatible to 64bit twd */
+ unsigned short fop;
+ int fip;
+ int fcs;
+ int foo;
+ int fos;
+ int mxcsr;
+ int reserved;
+ int st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
+ int xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
+ int padding[56];
+};
+
+struct user_regs_struct32 {
+ __u32 ebx, ecx, edx, esi, edi, ebp, eax;
+ unsigned short ds, __ds, es, __es;
+ unsigned short fs, __fs, gs, __gs;
+ __u32 orig_eax, eip;
+ unsigned short cs, __cs;
+ __u32 eflags, esp;
+ unsigned short ss, __ss;
+};
+
+struct user32 {
+ struct user_regs_struct32 regs; /* Where the registers are actually stored */
+ int u_fpvalid; /* True if math co-processor being used. */
+ /* for this mess. Not yet used. */
+ struct user_i387_ia32_struct i387; /* Math Co-processor registers. */
+/* The rest of this junk is to help gdb figure out what goes where */
+ __u32 u_tsize; /* Text segment size (pages). */
+ __u32 u_dsize; /* Data segment size (pages). */
+ __u32 u_ssize; /* Stack segment size (pages). */
+ __u32 start_code; /* Starting virtual address of text. */
+ __u32 start_stack; /* Starting virtual address of stack area.
+ This is actually the bottom of the stack,
+ the top of the stack is always found in the
+ esp register. */
+ __u32 signal; /* Signal that caused the core dump. */
+ int reserved; /* No __u32er used */
+ __u32 u_ar0; /* Used by gdb to help find the values for */
+ /* the registers. */
+ __u32 u_fpstate; /* Math Co-processor pointer. */
+ __u32 magic; /* To uniquely identify a core file */
+ char u_comm[32]; /* User command that was responsible */
+ int u_debugreg[8];
+};
+
+
+#endif /* _ASM_X86_USER32_H */
diff --git a/kernel/arch/x86/include/asm/user_32.h b/kernel/arch/x86/include/asm/user_32.h
new file mode 100644
index 000000000..bebfd8644
--- /dev/null
+++ b/kernel/arch/x86/include/asm/user_32.h
@@ -0,0 +1,131 @@
+#ifndef _ASM_X86_USER_32_H
+#define _ASM_X86_USER_32_H
+
+#include <asm/page.h>
+/* Core file format: The core file is written in such a way that gdb
+ can understand it and provide useful information to the user (under
+ linux we use the 'trad-core' bfd). There are quite a number of
+ obstacles to being able to view the contents of the floating point
+ registers, and until these are solved you will not be able to view the
+ contents of them. Actually, you can read in the core file and look at
+ the contents of the user struct to find out what the floating point
+ registers contain.
+ The actual file contents are as follows:
+ UPAGE: 1 page consisting of a user struct that tells gdb what is present
+ in the file. Directly after this is a copy of the task_struct, which
+ is currently not used by gdb, but it may come in useful at some point.
+ All of the registers are stored as part of the upage. The upage should
+ always be only one page.
+ DATA: The data area is stored. We use current->end_text to
+ current->brk to pick up all of the user variables, plus any memory
+ that may have been malloced. No attempt is made to determine if a page
+ is demand-zero or if a page is totally unused, we just cover the entire
+ range. All of the addresses are rounded in such a way that an integral
+ number of pages is written.
+ STACK: We need the stack information in order to get a meaningful
+ backtrace. We need to write the data from (esp) to
+ current->start_stack, so we round each of these off in order to be able
+ to write an integer number of pages.
+ The minimum core file size is 3 pages, or 12288 bytes.
+*/
+
+/*
+ * Pentium III FXSR, SSE support
+ * Gareth Hughes <gareth@valinux.com>, May 2000
+ *
+ * Provide support for the GDB 5.0+ PTRACE_{GET|SET}FPXREGS requests for
+ * interacting with the FXSR-format floating point environment. Floating
+ * point data can be accessed in the regular format in the usual manner,
+ * and both the standard and SIMD floating point data can be accessed via
+ * the new ptrace requests. In either case, changes to the FPU environment
+ * will be reflected in the task's state as expected.
+ */
+
+struct user_i387_struct {
+ long cwd;
+ long swd;
+ long twd;
+ long fip;
+ long fcs;
+ long foo;
+ long fos;
+ long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
+};
+
+struct user_fxsr_struct {
+ unsigned short cwd;
+ unsigned short swd;
+ unsigned short twd;
+ unsigned short fop;
+ long fip;
+ long fcs;
+ long foo;
+ long fos;
+ long mxcsr;
+ long reserved;
+ long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
+ long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
+ long padding[56];
+};
+
+/*
+ * This is the old layout of "struct pt_regs", and
+ * is still the layout used by user mode (the new
+ * pt_regs doesn't have all registers as the kernel
+ * doesn't use the extra segment registers)
+ */
+struct user_regs_struct {
+ unsigned long bx;
+ unsigned long cx;
+ unsigned long dx;
+ unsigned long si;
+ unsigned long di;
+ unsigned long bp;
+ unsigned long ax;
+ unsigned long ds;
+ unsigned long es;
+ unsigned long fs;
+ unsigned long gs;
+ unsigned long orig_ax;
+ unsigned long ip;
+ unsigned long cs;
+ unsigned long flags;
+ unsigned long sp;
+ unsigned long ss;
+};
+
+/* When the kernel dumps core, it starts by dumping the user struct -
+ this will be used by gdb to figure out where the data and stack segments
+ are within the file, and what virtual addresses to use. */
+struct user{
+/* We start with the registers, to mimic the way that "memory" is returned
+ from the ptrace(3,...) function. */
+ struct user_regs_struct regs; /* Where the registers are actually stored */
+/* ptrace does not yet supply these. Someday.... */
+ int u_fpvalid; /* True if math co-processor being used. */
+ /* for this mess. Not yet used. */
+ struct user_i387_struct i387; /* Math Co-processor registers. */
+/* The rest of this junk is to help gdb figure out what goes where */
+ unsigned long int u_tsize; /* Text segment size (pages). */
+ unsigned long int u_dsize; /* Data segment size (pages). */
+ unsigned long int u_ssize; /* Stack segment size (pages). */
+ unsigned long start_code; /* Starting virtual address of text. */
+ unsigned long start_stack; /* Starting virtual address of stack area.
+ This is actually the bottom of the stack,
+ the top of the stack is always found in the
+ esp register. */
+ long int signal; /* Signal that caused the core dump. */
+ int reserved; /* No longer used */
+ unsigned long u_ar0; /* Used by gdb to help find the values for */
+ /* the registers. */
+ struct user_i387_struct *u_fpstate; /* Math Co-processor pointer. */
+ unsigned long magic; /* To uniquely identify a core file */
+ char u_comm[32]; /* User command that was responsible */
+ int u_debugreg[8];
+};
+#define NBPG PAGE_SIZE
+#define UPAGES 1
+#define HOST_TEXT_START_ADDR (u.start_code)
+#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
+
+#endif /* _ASM_X86_USER_32_H */
diff --git a/kernel/arch/x86/include/asm/user_64.h b/kernel/arch/x86/include/asm/user_64.h
new file mode 100644
index 000000000..faf2cd3e0
--- /dev/null
+++ b/kernel/arch/x86/include/asm/user_64.h
@@ -0,0 +1,137 @@
+#ifndef _ASM_X86_USER_64_H
+#define _ASM_X86_USER_64_H
+
+#include <asm/types.h>
+#include <asm/page.h>
+/* Core file format: The core file is written in such a way that gdb
+ can understand it and provide useful information to the user.
+ There are quite a number of obstacles to being able to view the
+ contents of the floating point registers, and until these are
+ solved you will not be able to view the contents of them.
+ Actually, you can read in the core file and look at the contents of
+ the user struct to find out what the floating point registers
+ contain.
+
+ The actual file contents are as follows:
+ UPAGE: 1 page consisting of a user struct that tells gdb what is present
+ in the file. Directly after this is a copy of the task_struct, which
+ is currently not used by gdb, but it may come in useful at some point.
+ All of the registers are stored as part of the upage. The upage should
+ always be only one page.
+ DATA: The data area is stored. We use current->end_text to
+ current->brk to pick up all of the user variables, plus any memory
+ that may have been malloced. No attempt is made to determine if a page
+ is demand-zero or if a page is totally unused, we just cover the entire
+ range. All of the addresses are rounded in such a way that an integral
+ number of pages is written.
+ STACK: We need the stack information in order to get a meaningful
+ backtrace. We need to write the data from (esp) to
+ current->start_stack, so we round each of these off in order to be able
+ to write an integer number of pages.
+ The minimum core file size is 3 pages, or 12288 bytes. */
+
+/*
+ * Pentium III FXSR, SSE support
+ * Gareth Hughes <gareth@valinux.com>, May 2000
+ *
+ * Provide support for the GDB 5.0+ PTRACE_{GET|SET}FPXREGS requests for
+ * interacting with the FXSR-format floating point environment. Floating
+ * point data can be accessed in the regular format in the usual manner,
+ * and both the standard and SIMD floating point data can be accessed via
+ * the new ptrace requests. In either case, changes to the FPU environment
+ * will be reflected in the task's state as expected.
+ *
+ * x86-64 support by Andi Kleen.
+ */
+
+/* This matches the 64bit FXSAVE format as defined by AMD. It is the same
+ as the 32bit format defined by Intel, except that the selector:offset pairs
+ for data and eip are replaced with flat 64bit pointers. */
+struct user_i387_struct {
+ unsigned short cwd;
+ unsigned short swd;
+ unsigned short twd; /* Note this is not the same as
+ the 32bit/x87/FSAVE twd */
+ unsigned short fop;
+ __u64 rip;
+ __u64 rdp;
+ __u32 mxcsr;
+ __u32 mxcsr_mask;
+ __u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
+ __u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
+ __u32 padding[24];
+};
+
+/*
+ * Segment register layout in coredumps.
+ */
+struct user_regs_struct {
+ unsigned long r15;
+ unsigned long r14;
+ unsigned long r13;
+ unsigned long r12;
+ unsigned long bp;
+ unsigned long bx;
+ unsigned long r11;
+ unsigned long r10;
+ unsigned long r9;
+ unsigned long r8;
+ unsigned long ax;
+ unsigned long cx;
+ unsigned long dx;
+ unsigned long si;
+ unsigned long di;
+ unsigned long orig_ax;
+ unsigned long ip;
+ unsigned long cs;
+ unsigned long flags;
+ unsigned long sp;
+ unsigned long ss;
+ unsigned long fs_base;
+ unsigned long gs_base;
+ unsigned long ds;
+ unsigned long es;
+ unsigned long fs;
+ unsigned long gs;
+};
+
+/* When the kernel dumps core, it starts by dumping the user struct -
+ this will be used by gdb to figure out where the data and stack segments
+ are within the file, and what virtual addresses to use. */
+
+struct user {
+/* We start with the registers, to mimic the way that "memory" is returned
+ from the ptrace(3,...) function. */
+ struct user_regs_struct regs; /* Where the registers are actually stored */
+/* ptrace does not yet supply these. Someday.... */
+ int u_fpvalid; /* True if math co-processor being used. */
+ /* for this mess. Not yet used. */
+ int pad0;
+ struct user_i387_struct i387; /* Math Co-processor registers. */
+/* The rest of this junk is to help gdb figure out what goes where */
+ unsigned long int u_tsize; /* Text segment size (pages). */
+ unsigned long int u_dsize; /* Data segment size (pages). */
+ unsigned long int u_ssize; /* Stack segment size (pages). */
+ unsigned long start_code; /* Starting virtual address of text. */
+ unsigned long start_stack; /* Starting virtual address of stack area.
+ This is actually the bottom of the stack,
+ the top of the stack is always found in the
+ esp register. */
+ long int signal; /* Signal that caused the core dump. */
+ int reserved; /* No longer used */
+ int pad1;
+ unsigned long u_ar0; /* Used by gdb to help find the values for */
+ /* the registers. */
+ struct user_i387_struct *u_fpstate; /* Math Co-processor pointer. */
+ unsigned long magic; /* To uniquely identify a core file */
+ char u_comm[32]; /* User command that was responsible */
+ unsigned long u_debugreg[8];
+ unsigned long error_code; /* CPU error code or 0 */
+ unsigned long fault_address; /* CR3 or 0 */
+};
+#define NBPG PAGE_SIZE
+#define UPAGES 1
+#define HOST_TEXT_START_ADDR (u.start_code)
+#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
+
+#endif /* _ASM_X86_USER_64_H */
diff --git a/kernel/arch/x86/include/asm/uv/bios.h b/kernel/arch/x86/include/asm/uv/bios.h
new file mode 100644
index 000000000..71605c7d5
--- /dev/null
+++ b/kernel/arch/x86/include/asm/uv/bios.h
@@ -0,0 +1,114 @@
+#ifndef _ASM_X86_UV_BIOS_H
+#define _ASM_X86_UV_BIOS_H
+
+/*
+ * UV BIOS layer definitions.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) Russ Anderson <rja@sgi.com>
+ */
+
+#include <linux/rtc.h>
+
+/*
+ * Values for the BIOS calls. It is passed as the first * argument in the
+ * BIOS call. Passing any other value in the first argument will result
+ * in a BIOS_STATUS_UNIMPLEMENTED return status.
+ */
+enum uv_bios_cmd {
+ UV_BIOS_COMMON,
+ UV_BIOS_GET_SN_INFO,
+ UV_BIOS_FREQ_BASE,
+ UV_BIOS_WATCHLIST_ALLOC,
+ UV_BIOS_WATCHLIST_FREE,
+ UV_BIOS_MEMPROTECT,
+ UV_BIOS_GET_PARTITION_ADDR,
+ UV_BIOS_SET_LEGACY_VGA_TARGET
+};
+
+/*
+ * Status values returned from a BIOS call.
+ */
+enum {
+ BIOS_STATUS_MORE_PASSES = 1,
+ BIOS_STATUS_SUCCESS = 0,
+ BIOS_STATUS_UNIMPLEMENTED = -ENOSYS,
+ BIOS_STATUS_EINVAL = -EINVAL,
+ BIOS_STATUS_UNAVAIL = -EBUSY
+};
+
+/*
+ * The UV system table describes specific firmware
+ * capabilities available to the Linux kernel at runtime.
+ */
+struct uv_systab {
+ char signature[4]; /* must be "UVST" */
+ u32 revision; /* distinguish different firmware revs */
+ u64 function; /* BIOS runtime callback function ptr */
+};
+
+enum {
+ BIOS_FREQ_BASE_PLATFORM = 0,
+ BIOS_FREQ_BASE_INTERVAL_TIMER = 1,
+ BIOS_FREQ_BASE_REALTIME_CLOCK = 2
+};
+
+union partition_info_u {
+ u64 val;
+ struct {
+ u64 hub_version : 8,
+ partition_id : 16,
+ coherence_id : 16,
+ region_size : 24;
+ };
+};
+
+enum uv_memprotect {
+ UV_MEMPROT_RESTRICT_ACCESS,
+ UV_MEMPROT_ALLOW_AMO,
+ UV_MEMPROT_ALLOW_RW
+};
+
+/*
+ * bios calls have 6 parameters
+ */
+extern s64 uv_bios_call(enum uv_bios_cmd, u64, u64, u64, u64, u64);
+extern s64 uv_bios_call_irqsave(enum uv_bios_cmd, u64, u64, u64, u64, u64);
+extern s64 uv_bios_call_reentrant(enum uv_bios_cmd, u64, u64, u64, u64, u64);
+
+extern s64 uv_bios_get_sn_info(int, int *, long *, long *, long *, long *);
+extern s64 uv_bios_freq_base(u64, u64 *);
+extern int uv_bios_mq_watchlist_alloc(unsigned long, unsigned int,
+ unsigned long *);
+extern int uv_bios_mq_watchlist_free(int, int);
+extern s64 uv_bios_change_memprotect(u64, u64, enum uv_memprotect);
+extern s64 uv_bios_reserved_page_pa(u64, u64 *, u64 *, u64 *);
+extern int uv_bios_set_legacy_vga_target(bool decode, int domain, int bus);
+
+extern void uv_bios_init(void);
+
+extern unsigned long sn_rtc_cycles_per_second;
+extern int uv_type;
+extern long sn_partition_id;
+extern long sn_coherency_id;
+extern long sn_region_size;
+extern long system_serial_number;
+#define partition_coherence_id() (sn_coherency_id)
+
+extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */
+
+#endif /* _ASM_X86_UV_BIOS_H */
diff --git a/kernel/arch/x86/include/asm/uv/uv.h b/kernel/arch/x86/include/asm/uv/uv.h
new file mode 100644
index 000000000..062921ef3
--- /dev/null
+++ b/kernel/arch/x86/include/asm/uv/uv.h
@@ -0,0 +1,35 @@
+#ifndef _ASM_X86_UV_UV_H
+#define _ASM_X86_UV_UV_H
+
+enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
+
+struct cpumask;
+struct mm_struct;
+
+#ifdef CONFIG_X86_UV
+
+extern enum uv_system_type get_uv_system_type(void);
+extern int is_uv_system(void);
+extern void uv_cpu_init(void);
+extern void uv_nmi_init(void);
+extern void uv_system_init(void);
+extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end,
+ unsigned int cpu);
+
+#else /* X86_UV */
+
+static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; }
+static inline int is_uv_system(void) { return 0; }
+static inline void uv_cpu_init(void) { }
+static inline void uv_system_init(void) { }
+static inline const struct cpumask *
+uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm,
+ unsigned long start, unsigned long end, unsigned int cpu)
+{ return cpumask; }
+
+#endif /* X86_UV */
+
+#endif /* _ASM_X86_UV_UV_H */
diff --git a/kernel/arch/x86/include/asm/uv/uv_bau.h b/kernel/arch/x86/include/asm/uv/uv_bau.h
new file mode 100644
index 000000000..ebb40118a
--- /dev/null
+++ b/kernel/arch/x86/include/asm/uv/uv_bau.h
@@ -0,0 +1,791 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * SGI UV Broadcast Assist Unit definitions
+ *
+ * Copyright (C) 2008-2011 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_X86_UV_UV_BAU_H
+#define _ASM_X86_UV_UV_BAU_H
+
+#include <linux/bitmap.h>
+#define BITSPERBYTE 8
+
+/*
+ * Broadcast Assist Unit messaging structures
+ *
+ * Selective Broadcast activations are induced by software action
+ * specifying a particular 8-descriptor "set" via a 6-bit index written
+ * to an MMR.
+ * Thus there are 64 unique 512-byte sets of SB descriptors - one set for
+ * each 6-bit index value. These descriptor sets are mapped in sequence
+ * starting with set 0 located at the address specified in the
+ * BAU_SB_DESCRIPTOR_BASE register, set 1 is located at BASE + 512,
+ * set 2 is at BASE + 2*512, set 3 at BASE + 3*512, and so on.
+ *
+ * We will use one set for sending BAU messages from each of the
+ * cpu's on the uvhub.
+ *
+ * TLB shootdown will use the first of the 8 descriptors of each set.
+ * Each of the descriptors is 64 bytes in size (8*64 = 512 bytes in a set).
+ */
+
+#define MAX_CPUS_PER_UVHUB 128
+#define MAX_CPUS_PER_SOCKET 64
+#define ADP_SZ 64 /* hardware-provided max. */
+#define UV_CPUS_PER_AS 32 /* hardware-provided max. */
+#define ITEMS_PER_DESC 8
+/* the 'throttle' to prevent the hardware stay-busy bug */
+#define MAX_BAU_CONCURRENT 3
+#define UV_ACT_STATUS_MASK 0x3
+#define UV_ACT_STATUS_SIZE 2
+#define UV_DISTRIBUTION_SIZE 256
+#define UV_SW_ACK_NPENDING 8
+#define UV1_NET_ENDPOINT_INTD 0x38
+#define UV2_NET_ENDPOINT_INTD 0x28
+#define UV_NET_ENDPOINT_INTD (is_uv1_hub() ? \
+ UV1_NET_ENDPOINT_INTD : UV2_NET_ENDPOINT_INTD)
+#define UV_DESC_PSHIFT 49
+#define UV_PAYLOADQ_PNODE_SHIFT 49
+#define UV_PTC_BASENAME "sgi_uv/ptc_statistics"
+#define UV_BAU_BASENAME "sgi_uv/bau_tunables"
+#define UV_BAU_TUNABLES_DIR "sgi_uv"
+#define UV_BAU_TUNABLES_FILE "bau_tunables"
+#define WHITESPACE " \t\n"
+#define uv_mmask ((1UL << uv_hub_info->m_val) - 1)
+#define uv_physnodeaddr(x) ((__pa((unsigned long)(x)) & uv_mmask))
+#define cpubit_isset(cpu, bau_local_cpumask) \
+ test_bit((cpu), (bau_local_cpumask).bits)
+
+/* [19:16] SOFT_ACK timeout period 19: 1 is urgency 7 17:16 1 is multiplier */
+/*
+ * UV2: Bit 19 selects between
+ * (0): 10 microsecond timebase and
+ * (1): 80 microseconds
+ * we're using 560us, similar to UV1: 65 units of 10us
+ */
+#define UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD (9UL)
+#define UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD (15UL)
+
+#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD (is_uv1_hub() ? \
+ UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD : \
+ UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD)
+/* assuming UV3 is the same */
+
+#define BAU_MISC_CONTROL_MULT_MASK 3
+
+#define UVH_AGING_PRESCALE_SEL 0x000000b000UL
+/* [30:28] URGENCY_7 an index into a table of times */
+#define BAU_URGENCY_7_SHIFT 28
+#define BAU_URGENCY_7_MASK 7
+
+#define UVH_TRANSACTION_TIMEOUT 0x000000b200UL
+/* [45:40] BAU - BAU transaction timeout select - a multiplier */
+#define BAU_TRANS_SHIFT 40
+#define BAU_TRANS_MASK 0x3f
+
+/*
+ * shorten some awkward names
+ */
+#define AS_PUSH_SHIFT UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT
+#define SOFTACK_MSHIFT UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT
+#define SOFTACK_PSHIFT UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT
+#define SOFTACK_TIMEOUT_PERIOD UV_INTD_SOFT_ACK_TIMEOUT_PERIOD
+#define PREFETCH_HINT_SHFT UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_PREFETCH_HINT_SHFT
+#define SB_STATUS_SHFT UV3H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT
+#define write_gmmr uv_write_global_mmr64
+#define write_lmmr uv_write_local_mmr
+#define read_lmmr uv_read_local_mmr
+#define read_gmmr uv_read_global_mmr64
+
+/*
+ * bits in UVH_LB_BAU_SB_ACTIVATION_STATUS_0/1
+ */
+#define DS_IDLE 0
+#define DS_ACTIVE 1
+#define DS_DESTINATION_TIMEOUT 2
+#define DS_SOURCE_TIMEOUT 3
+/*
+ * bits put together from HRP_LB_BAU_SB_ACTIVATION_STATUS_0/1/2
+ * values 1 and 3 will not occur
+ * Decoded meaning ERROR BUSY AUX ERR
+ * ------------------------------- ---- ----- -------
+ * IDLE 0 0 0
+ * BUSY (active) 0 1 0
+ * SW Ack Timeout (destination) 1 0 0
+ * SW Ack INTD rejected (strong NACK) 1 0 1
+ * Source Side Time Out Detected 1 1 0
+ * Destination Side PUT Failed 1 1 1
+ */
+#define UV2H_DESC_IDLE 0
+#define UV2H_DESC_BUSY 2
+#define UV2H_DESC_DEST_TIMEOUT 4
+#define UV2H_DESC_DEST_STRONG_NACK 5
+#define UV2H_DESC_SOURCE_TIMEOUT 6
+#define UV2H_DESC_DEST_PUT_ERR 7
+
+/*
+ * delay for 'plugged' timeout retries, in microseconds
+ */
+#define PLUGGED_DELAY 10
+
+/*
+ * threshholds at which to use IPI to free resources
+ */
+/* after this # consecutive 'plugged' timeouts, use IPI to release resources */
+#define PLUGSB4RESET 100
+/* after this many consecutive timeouts, use IPI to release resources */
+#define TIMEOUTSB4RESET 1
+/* at this number uses of IPI to release resources, giveup the request */
+#define IPI_RESET_LIMIT 1
+/* after this # consecutive successes, bump up the throttle if it was lowered */
+#define COMPLETE_THRESHOLD 5
+/* after this # of giveups (fall back to kernel IPI's) disable the use of
+ the BAU for a period of time */
+#define GIVEUP_LIMIT 100
+
+#define UV_LB_SUBNODEID 0x10
+
+/* these two are the same for UV1 and UV2: */
+#define UV_SA_SHFT UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT
+#define UV_SA_MASK UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK
+/* 4 bits of software ack period */
+#define UV2_ACK_MASK 0x7UL
+#define UV2_ACK_UNITS_SHFT 3
+#define UV2_EXT_SHFT UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT
+
+/*
+ * number of entries in the destination side payload queue
+ */
+#define DEST_Q_SIZE 20
+/*
+ * number of destination side software ack resources
+ */
+#define DEST_NUM_RESOURCES 8
+/*
+ * completion statuses for sending a TLB flush message
+ */
+#define FLUSH_RETRY_PLUGGED 1
+#define FLUSH_RETRY_TIMEOUT 2
+#define FLUSH_GIVEUP 3
+#define FLUSH_COMPLETE 4
+
+/*
+ * tuning the action when the numalink network is extremely delayed
+ */
+#define CONGESTED_RESPONSE_US 1000 /* 'long' response time, in
+ microseconds */
+#define CONGESTED_REPS 10 /* long delays averaged over
+ this many broadcasts */
+#define DISABLED_PERIOD 10 /* time for the bau to be
+ disabled, in seconds */
+/* see msg_type: */
+#define MSG_NOOP 0
+#define MSG_REGULAR 1
+#define MSG_RETRY 2
+
+/*
+ * Distribution: 32 bytes (256 bits) (bytes 0-0x1f of descriptor)
+ * If the 'multilevel' flag in the header portion of the descriptor
+ * has been set to 0, then endpoint multi-unicast mode is selected.
+ * The distribution specification (32 bytes) is interpreted as a 256-bit
+ * distribution vector. Adjacent bits correspond to consecutive even numbered
+ * nodeIDs. The result of adding the index of a given bit to the 15-bit
+ * 'base_dest_nasid' field of the header corresponds to the
+ * destination nodeID associated with that specified bit.
+ */
+struct pnmask {
+ unsigned long bits[BITS_TO_LONGS(UV_DISTRIBUTION_SIZE)];
+};
+
+/*
+ * mask of cpu's on a uvhub
+ * (during initialization we need to check that unsigned long has
+ * enough bits for max. cpu's per uvhub)
+ */
+struct bau_local_cpumask {
+ unsigned long bits;
+};
+
+/*
+ * Payload: 16 bytes (128 bits) (bytes 0x20-0x2f of descriptor)
+ * only 12 bytes (96 bits) of the payload area are usable.
+ * An additional 3 bytes (bits 27:4) of the header address are carried
+ * to the next bytes of the destination payload queue.
+ * And an additional 2 bytes of the header Suppl_A field are also
+ * carried to the destination payload queue.
+ * But the first byte of the Suppl_A becomes bits 127:120 (the 16th byte)
+ * of the destination payload queue, which is written by the hardware
+ * with the s/w ack resource bit vector.
+ * [ effective message contents (16 bytes (128 bits) maximum), not counting
+ * the s/w ack bit vector ]
+ */
+
+/*
+ * The payload is software-defined for INTD transactions
+ */
+struct bau_msg_payload {
+ unsigned long address; /* signifies a page or all
+ TLB's of the cpu */
+ /* 64 bits */
+ unsigned short sending_cpu; /* filled in by sender */
+ /* 16 bits */
+ unsigned short acknowledge_count; /* filled in by destination */
+ /* 16 bits */
+ unsigned int reserved1:32; /* not usable */
+};
+
+
+/*
+ * UV1 Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor)
+ * see table 4.2.3.0.1 in broacast_assist spec.
+ */
+struct uv1_bau_msg_header {
+ unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */
+ /* bits 5:0 */
+ unsigned int base_dest_nasid:15; /* nasid of the first bit */
+ /* bits 20:6 */ /* in uvhub map */
+ unsigned int command:8; /* message type */
+ /* bits 28:21 */
+ /* 0x38: SN3net EndPoint Message */
+ unsigned int rsvd_1:3; /* must be zero */
+ /* bits 31:29 */
+ /* int will align on 32 bits */
+ unsigned int rsvd_2:9; /* must be zero */
+ /* bits 40:32 */
+ /* Suppl_A is 56-41 */
+ unsigned int sequence:16; /* message sequence number */
+ /* bits 56:41 */ /* becomes bytes 16-17 of msg */
+ /* Address field (96:57) is
+ never used as an address
+ (these are address bits
+ 42:3) */
+
+ unsigned int rsvd_3:1; /* must be zero */
+ /* bit 57 */
+ /* address bits 27:4 are payload */
+ /* these next 24 (58-81) bits become bytes 12-14 of msg */
+ /* bits 65:58 land in byte 12 */
+ unsigned int replied_to:1; /* sent as 0 by the source to
+ byte 12 */
+ /* bit 58 */
+ unsigned int msg_type:3; /* software type of the
+ message */
+ /* bits 61:59 */
+ unsigned int canceled:1; /* message canceled, resource
+ is to be freed*/
+ /* bit 62 */
+ unsigned int payload_1a:1; /* not currently used */
+ /* bit 63 */
+ unsigned int payload_1b:2; /* not currently used */
+ /* bits 65:64 */
+
+ /* bits 73:66 land in byte 13 */
+ unsigned int payload_1ca:6; /* not currently used */
+ /* bits 71:66 */
+ unsigned int payload_1c:2; /* not currently used */
+ /* bits 73:72 */
+
+ /* bits 81:74 land in byte 14 */
+ unsigned int payload_1d:6; /* not currently used */
+ /* bits 79:74 */
+ unsigned int payload_1e:2; /* not currently used */
+ /* bits 81:80 */
+
+ unsigned int rsvd_4:7; /* must be zero */
+ /* bits 88:82 */
+ unsigned int swack_flag:1; /* software acknowledge flag */
+ /* bit 89 */
+ /* INTD trasactions at
+ destination are to wait for
+ software acknowledge */
+ unsigned int rsvd_5:6; /* must be zero */
+ /* bits 95:90 */
+ unsigned int rsvd_6:5; /* must be zero */
+ /* bits 100:96 */
+ unsigned int int_both:1; /* if 1, interrupt both sockets
+ on the uvhub */
+ /* bit 101*/
+ unsigned int fairness:3; /* usually zero */
+ /* bits 104:102 */
+ unsigned int multilevel:1; /* multi-level multicast
+ format */
+ /* bit 105 */
+ /* 0 for TLB: endpoint multi-unicast messages */
+ unsigned int chaining:1; /* next descriptor is part of
+ this activation*/
+ /* bit 106 */
+ unsigned int rsvd_7:21; /* must be zero */
+ /* bits 127:107 */
+};
+
+/*
+ * UV2 Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor)
+ * see figure 9-2 of harp_sys.pdf
+ * assuming UV3 is the same
+ */
+struct uv2_3_bau_msg_header {
+ unsigned int base_dest_nasid:15; /* nasid of the first bit */
+ /* bits 14:0 */ /* in uvhub map */
+ unsigned int dest_subnodeid:5; /* must be 0x10, for the LB */
+ /* bits 19:15 */
+ unsigned int rsvd_1:1; /* must be zero */
+ /* bit 20 */
+ /* Address bits 59:21 */
+ /* bits 25:2 of address (44:21) are payload */
+ /* these next 24 bits become bytes 12-14 of msg */
+ /* bits 28:21 land in byte 12 */
+ unsigned int replied_to:1; /* sent as 0 by the source to
+ byte 12 */
+ /* bit 21 */
+ unsigned int msg_type:3; /* software type of the
+ message */
+ /* bits 24:22 */
+ unsigned int canceled:1; /* message canceled, resource
+ is to be freed*/
+ /* bit 25 */
+ unsigned int payload_1:3; /* not currently used */
+ /* bits 28:26 */
+
+ /* bits 36:29 land in byte 13 */
+ unsigned int payload_2a:3; /* not currently used */
+ unsigned int payload_2b:5; /* not currently used */
+ /* bits 36:29 */
+
+ /* bits 44:37 land in byte 14 */
+ unsigned int payload_3:8; /* not currently used */
+ /* bits 44:37 */
+
+ unsigned int rsvd_2:7; /* reserved */
+ /* bits 51:45 */
+ unsigned int swack_flag:1; /* software acknowledge flag */
+ /* bit 52 */
+ unsigned int rsvd_3a:3; /* must be zero */
+ unsigned int rsvd_3b:8; /* must be zero */
+ unsigned int rsvd_3c:8; /* must be zero */
+ unsigned int rsvd_3d:3; /* must be zero */
+ /* bits 74:53 */
+ unsigned int fairness:3; /* usually zero */
+ /* bits 77:75 */
+
+ unsigned int sequence:16; /* message sequence number */
+ /* bits 93:78 Suppl_A */
+ unsigned int chaining:1; /* next descriptor is part of
+ this activation*/
+ /* bit 94 */
+ unsigned int multilevel:1; /* multi-level multicast
+ format */
+ /* bit 95 */
+ unsigned int rsvd_4:24; /* ordered / source node /
+ source subnode / aging
+ must be zero */
+ /* bits 119:96 */
+ unsigned int command:8; /* message type */
+ /* bits 127:120 */
+};
+
+/*
+ * The activation descriptor:
+ * The format of the message to send, plus all accompanying control
+ * Should be 64 bytes
+ */
+struct bau_desc {
+ struct pnmask distribution;
+ /*
+ * message template, consisting of header and payload:
+ */
+ union bau_msg_header {
+ struct uv1_bau_msg_header uv1_hdr;
+ struct uv2_3_bau_msg_header uv2_3_hdr;
+ } header;
+
+ struct bau_msg_payload payload;
+};
+/* UV1:
+ * -payload-- ---------header------
+ * bytes 0-11 bits 41-56 bits 58-81
+ * A B (2) C (3)
+ *
+ * A/B/C are moved to:
+ * A C B
+ * bytes 0-11 bytes 12-14 bytes 16-17 (byte 15 filled in by hw as vector)
+ * ------------payload queue-----------
+ */
+/* UV2:
+ * -payload-- ---------header------
+ * bytes 0-11 bits 70-78 bits 21-44
+ * A B (2) C (3)
+ *
+ * A/B/C are moved to:
+ * A C B
+ * bytes 0-11 bytes 12-14 bytes 16-17 (byte 15 filled in by hw as vector)
+ * ------------payload queue-----------
+ */
+
+/*
+ * The payload queue on the destination side is an array of these.
+ * With BAU_MISC_CONTROL set for software acknowledge mode, the messages
+ * are 32 bytes (2 micropackets) (256 bits) in length, but contain only 17
+ * bytes of usable data, including the sw ack vector in byte 15 (bits 127:120)
+ * (12 bytes come from bau_msg_payload, 3 from payload_1, 2 from
+ * swack_vec and payload_2)
+ * "Enabling Software Acknowledgment mode (see Section 4.3.3 Software
+ * Acknowledge Processing) also selects 32 byte (17 bytes usable) payload
+ * operation."
+ */
+struct bau_pq_entry {
+ unsigned long address; /* signifies a page or all TLB's
+ of the cpu */
+ /* 64 bits, bytes 0-7 */
+ unsigned short sending_cpu; /* cpu that sent the message */
+ /* 16 bits, bytes 8-9 */
+ unsigned short acknowledge_count; /* filled in by destination */
+ /* 16 bits, bytes 10-11 */
+ /* these next 3 bytes come from bits 58-81 of the message header */
+ unsigned short replied_to:1; /* sent as 0 by the source */
+ unsigned short msg_type:3; /* software message type */
+ unsigned short canceled:1; /* sent as 0 by the source */
+ unsigned short unused1:3; /* not currently using */
+ /* byte 12 */
+ unsigned char unused2a; /* not currently using */
+ /* byte 13 */
+ unsigned char unused2; /* not currently using */
+ /* byte 14 */
+ unsigned char swack_vec; /* filled in by the hardware */
+ /* byte 15 (bits 127:120) */
+ unsigned short sequence; /* message sequence number */
+ /* bytes 16-17 */
+ unsigned char unused4[2]; /* not currently using bytes 18-19 */
+ /* bytes 18-19 */
+ int number_of_cpus; /* filled in at destination */
+ /* 32 bits, bytes 20-23 (aligned) */
+ unsigned char unused5[8]; /* not using */
+ /* bytes 24-31 */
+};
+
+struct msg_desc {
+ struct bau_pq_entry *msg;
+ int msg_slot;
+ struct bau_pq_entry *queue_first;
+ struct bau_pq_entry *queue_last;
+};
+
+struct reset_args {
+ int sender;
+};
+
+/*
+ * This structure is allocated per_cpu for UV TLB shootdown statistics.
+ */
+struct ptc_stats {
+ /* sender statistics */
+ unsigned long s_giveup; /* number of fall backs to
+ IPI-style flushes */
+ unsigned long s_requestor; /* number of shootdown
+ requests */
+ unsigned long s_stimeout; /* source side timeouts */
+ unsigned long s_dtimeout; /* destination side timeouts */
+ unsigned long s_strongnacks; /* number of strong nack's */
+ unsigned long s_time; /* time spent in sending side */
+ unsigned long s_retriesok; /* successful retries */
+ unsigned long s_ntargcpu; /* total number of cpu's
+ targeted */
+ unsigned long s_ntargself; /* times the sending cpu was
+ targeted */
+ unsigned long s_ntarglocals; /* targets of cpus on the local
+ blade */
+ unsigned long s_ntargremotes; /* targets of cpus on remote
+ blades */
+ unsigned long s_ntarglocaluvhub; /* targets of the local hub */
+ unsigned long s_ntargremoteuvhub; /* remotes hubs targeted */
+ unsigned long s_ntarguvhub; /* total number of uvhubs
+ targeted */
+ unsigned long s_ntarguvhub16; /* number of times target
+ hubs >= 16*/
+ unsigned long s_ntarguvhub8; /* number of times target
+ hubs >= 8 */
+ unsigned long s_ntarguvhub4; /* number of times target
+ hubs >= 4 */
+ unsigned long s_ntarguvhub2; /* number of times target
+ hubs >= 2 */
+ unsigned long s_ntarguvhub1; /* number of times target
+ hubs == 1 */
+ unsigned long s_resets_plug; /* ipi-style resets from plug
+ state */
+ unsigned long s_resets_timeout; /* ipi-style resets from
+ timeouts */
+ unsigned long s_busy; /* status stayed busy past
+ s/w timer */
+ unsigned long s_throttles; /* waits in throttle */
+ unsigned long s_retry_messages; /* retry broadcasts */
+ unsigned long s_bau_reenabled; /* for bau enable/disable */
+ unsigned long s_bau_disabled; /* for bau enable/disable */
+ unsigned long s_uv2_wars; /* uv2 workaround, perm. busy */
+ unsigned long s_uv2_wars_hw; /* uv2 workaround, hiwater */
+ unsigned long s_uv2_war_waits; /* uv2 workaround, long waits */
+ unsigned long s_overipilimit; /* over the ipi reset limit */
+ unsigned long s_giveuplimit; /* disables, over giveup limit*/
+ unsigned long s_enters; /* entries to the driver */
+ unsigned long s_ipifordisabled; /* fall back to IPI; disabled */
+ unsigned long s_plugged; /* plugged by h/w bug*/
+ unsigned long s_congested; /* giveup on long wait */
+ /* destination statistics */
+ unsigned long d_alltlb; /* times all tlb's on this
+ cpu were flushed */
+ unsigned long d_onetlb; /* times just one tlb on this
+ cpu was flushed */
+ unsigned long d_multmsg; /* interrupts with multiple
+ messages */
+ unsigned long d_nomsg; /* interrupts with no message */
+ unsigned long d_time; /* time spent on destination
+ side */
+ unsigned long d_requestee; /* number of messages
+ processed */
+ unsigned long d_retries; /* number of retry messages
+ processed */
+ unsigned long d_canceled; /* number of messages canceled
+ by retries */
+ unsigned long d_nocanceled; /* retries that found nothing
+ to cancel */
+ unsigned long d_resets; /* number of ipi-style requests
+ processed */
+ unsigned long d_rcanceled; /* number of messages canceled
+ by resets */
+};
+
+struct tunables {
+ int *tunp;
+ int deflt;
+};
+
+struct hub_and_pnode {
+ short uvhub;
+ short pnode;
+};
+
+struct socket_desc {
+ short num_cpus;
+ short cpu_number[MAX_CPUS_PER_SOCKET];
+};
+
+struct uvhub_desc {
+ unsigned short socket_mask;
+ short num_cpus;
+ short uvhub;
+ short pnode;
+ struct socket_desc socket[2];
+};
+
+/*
+ * one per-cpu; to locate the software tables
+ */
+struct bau_control {
+ struct bau_desc *descriptor_base;
+ struct bau_pq_entry *queue_first;
+ struct bau_pq_entry *queue_last;
+ struct bau_pq_entry *bau_msg_head;
+ struct bau_control *uvhub_master;
+ struct bau_control *socket_master;
+ struct ptc_stats *statp;
+ cpumask_t *cpumask;
+ unsigned long timeout_interval;
+ unsigned long set_bau_on_time;
+ atomic_t active_descriptor_count;
+ int plugged_tries;
+ int timeout_tries;
+ int ipi_attempts;
+ int conseccompletes;
+ short nobau;
+ short baudisabled;
+ short cpu;
+ short osnode;
+ short uvhub_cpu;
+ short uvhub;
+ short uvhub_version;
+ short cpus_in_socket;
+ short cpus_in_uvhub;
+ short partition_base_pnode;
+ short busy; /* all were busy (war) */
+ unsigned short message_number;
+ unsigned short uvhub_quiesce;
+ short socket_acknowledge_count[DEST_Q_SIZE];
+ cycles_t send_message;
+ cycles_t period_end;
+ cycles_t period_time;
+ raw_spinlock_t uvhub_lock;
+ raw_spinlock_t queue_lock;
+ raw_spinlock_t disable_lock;
+ /* tunables */
+ int max_concurr;
+ int max_concurr_const;
+ int plugged_delay;
+ int plugsb4reset;
+ int timeoutsb4reset;
+ int ipi_reset_limit;
+ int complete_threshold;
+ int cong_response_us;
+ int cong_reps;
+ cycles_t disabled_period;
+ int period_giveups;
+ int giveup_limit;
+ long period_requests;
+ struct hub_and_pnode *thp;
+};
+
+static inline void write_mmr_data_broadcast(int pnode, unsigned long mmr_image)
+{
+ write_gmmr(pnode, UVH_BAU_DATA_BROADCAST, mmr_image);
+}
+
+static inline void write_mmr_descriptor_base(int pnode, unsigned long mmr_image)
+{
+ write_gmmr(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE, mmr_image);
+}
+
+static inline void write_mmr_activation(unsigned long index)
+{
+ write_lmmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index);
+}
+
+static inline void write_gmmr_activation(int pnode, unsigned long mmr_image)
+{
+ write_gmmr(pnode, UVH_LB_BAU_SB_ACTIVATION_CONTROL, mmr_image);
+}
+
+static inline void write_mmr_payload_first(int pnode, unsigned long mmr_image)
+{
+ write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST, mmr_image);
+}
+
+static inline void write_mmr_payload_tail(int pnode, unsigned long mmr_image)
+{
+ write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL, mmr_image);
+}
+
+static inline void write_mmr_payload_last(int pnode, unsigned long mmr_image)
+{
+ write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST, mmr_image);
+}
+
+static inline void write_mmr_misc_control(int pnode, unsigned long mmr_image)
+{
+ write_gmmr(pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
+}
+
+static inline unsigned long read_mmr_misc_control(int pnode)
+{
+ return read_gmmr(pnode, UVH_LB_BAU_MISC_CONTROL);
+}
+
+static inline void write_mmr_sw_ack(unsigned long mr)
+{
+ uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr);
+}
+
+static inline void write_gmmr_sw_ack(int pnode, unsigned long mr)
+{
+ write_gmmr(pnode, UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr);
+}
+
+static inline unsigned long read_mmr_sw_ack(void)
+{
+ return read_lmmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
+}
+
+static inline unsigned long read_gmmr_sw_ack(int pnode)
+{
+ return read_gmmr(pnode, UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
+}
+
+static inline void write_mmr_data_config(int pnode, unsigned long mr)
+{
+ uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, mr);
+}
+
+static inline int bau_uvhub_isset(int uvhub, struct pnmask *dstp)
+{
+ return constant_test_bit(uvhub, &dstp->bits[0]);
+}
+static inline void bau_uvhub_set(int pnode, struct pnmask *dstp)
+{
+ __set_bit(pnode, &dstp->bits[0]);
+}
+static inline void bau_uvhubs_clear(struct pnmask *dstp,
+ int nbits)
+{
+ bitmap_zero(&dstp->bits[0], nbits);
+}
+static inline int bau_uvhub_weight(struct pnmask *dstp)
+{
+ return bitmap_weight((unsigned long *)&dstp->bits[0],
+ UV_DISTRIBUTION_SIZE);
+}
+
+static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits)
+{
+ bitmap_zero(&dstp->bits, nbits);
+}
+
+extern void uv_bau_message_intr1(void);
+#ifdef CONFIG_TRACING
+#define trace_uv_bau_message_intr1 uv_bau_message_intr1
+#endif
+extern void uv_bau_timeout_intr1(void);
+
+struct atomic_short {
+ short counter;
+};
+
+/*
+ * atomic_read_short - read a short atomic variable
+ * @v: pointer of type atomic_short
+ *
+ * Atomically reads the value of @v.
+ */
+static inline int atomic_read_short(const struct atomic_short *v)
+{
+ return v->counter;
+}
+
+/*
+ * atom_asr - add and return a short int
+ * @i: short value to add
+ * @v: pointer of type atomic_short
+ *
+ * Atomically adds @i to @v and returns @i + @v
+ */
+static inline int atom_asr(short i, struct atomic_short *v)
+{
+ short __i = i;
+ asm volatile(LOCK_PREFIX "xaddw %0, %1"
+ : "+r" (i), "+m" (v->counter)
+ : : "memory");
+ return i + __i;
+}
+
+/*
+ * conditionally add 1 to *v, unless *v is >= u
+ * return 0 if we cannot add 1 to *v because it is >= u
+ * return 1 if we can add 1 to *v because it is < u
+ * the add is atomic
+ *
+ * This is close to atomic_add_unless(), but this allows the 'u' value
+ * to be lowered below the current 'v'. atomic_add_unless can only stop
+ * on equal.
+ */
+static inline int atomic_inc_unless_ge(raw_spinlock_t *lock, atomic_t *v, int u)
+{
+ raw_spin_lock(lock);
+ if (atomic_read(v) >= u) {
+ raw_spin_unlock(lock);
+ return 0;
+ }
+ atomic_inc(v);
+ raw_spin_unlock(lock);
+ return 1;
+}
+
+#endif /* _ASM_X86_UV_UV_BAU_H */
diff --git a/kernel/arch/x86/include/asm/uv/uv_hub.h b/kernel/arch/x86/include/asm/uv/uv_hub.h
new file mode 100644
index 000000000..c2729abe0
--- /dev/null
+++ b/kernel/arch/x86/include/asm/uv/uv_hub.h
@@ -0,0 +1,680 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * SGI UV architectural definitions
+ *
+ * Copyright (C) 2007-2014 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_X86_UV_UV_HUB_H
+#define _ASM_X86_UV_UV_HUB_H
+
+#ifdef CONFIG_X86_64
+#include <linux/numa.h>
+#include <linux/percpu.h>
+#include <linux/timer.h>
+#include <linux/io.h>
+#include <asm/types.h>
+#include <asm/percpu.h>
+#include <asm/uv/uv_mmrs.h>
+#include <asm/irq_vectors.h>
+#include <asm/io_apic.h>
+
+
+/*
+ * Addressing Terminology
+ *
+ * M - The low M bits of a physical address represent the offset
+ * into the blade local memory. RAM memory on a blade is physically
+ * contiguous (although various IO spaces may punch holes in
+ * it)..
+ *
+ * N - Number of bits in the node portion of a socket physical
+ * address.
+ *
+ * NASID - network ID of a router, Mbrick or Cbrick. Nasid values of
+ * routers always have low bit of 1, C/MBricks have low bit
+ * equal to 0. Most addressing macros that target UV hub chips
+ * right shift the NASID by 1 to exclude the always-zero bit.
+ * NASIDs contain up to 15 bits.
+ *
+ * GNODE - NASID right shifted by 1 bit. Most mmrs contain gnodes instead
+ * of nasids.
+ *
+ * PNODE - the low N bits of the GNODE. The PNODE is the most useful variant
+ * of the nasid for socket usage.
+ *
+ * GPA - (global physical address) a socket physical address converted
+ * so that it can be used by the GRU as a global address. Socket
+ * physical addresses 1) need additional NASID (node) bits added
+ * to the high end of the address, and 2) unaliased if the
+ * partition does not have a physical address 0. In addition, on
+ * UV2 rev 1, GPAs need the gnode left shifted to bits 39 or 40.
+ *
+ *
+ * NumaLink Global Physical Address Format:
+ * +--------------------------------+---------------------+
+ * |00..000| GNODE | NodeOffset |
+ * +--------------------------------+---------------------+
+ * |<-------53 - M bits --->|<--------M bits ----->
+ *
+ * M - number of node offset bits (35 .. 40)
+ *
+ *
+ * Memory/UV-HUB Processor Socket Address Format:
+ * +----------------+---------------+---------------------+
+ * |00..000000000000| PNODE | NodeOffset |
+ * +----------------+---------------+---------------------+
+ * <--- N bits --->|<--------M bits ----->
+ *
+ * M - number of node offset bits (35 .. 40)
+ * N - number of PNODE bits (0 .. 10)
+ *
+ * Note: M + N cannot currently exceed 44 (x86_64) or 46 (IA64).
+ * The actual values are configuration dependent and are set at
+ * boot time. M & N values are set by the hardware/BIOS at boot.
+ *
+ *
+ * APICID format
+ * NOTE!!!!!! This is the current format of the APICID. However, code
+ * should assume that this will change in the future. Use functions
+ * in this file for all APICID bit manipulations and conversion.
+ *
+ * 1111110000000000
+ * 5432109876543210
+ * pppppppppplc0cch Nehalem-EX (12 bits in hdw reg)
+ * ppppppppplcc0cch Westmere-EX (12 bits in hdw reg)
+ * pppppppppppcccch SandyBridge (15 bits in hdw reg)
+ * sssssssssss
+ *
+ * p = pnode bits
+ * l = socket number on board
+ * c = core
+ * h = hyperthread
+ * s = bits that are in the SOCKET_ID CSR
+ *
+ * Note: Processor may support fewer bits in the APICID register. The ACPI
+ * tables hold all 16 bits. Software needs to be aware of this.
+ *
+ * Unless otherwise specified, all references to APICID refer to
+ * the FULL value contained in ACPI tables, not the subset in the
+ * processor APICID register.
+ */
+
+
+/*
+ * Maximum number of bricks in all partitions and in all coherency domains.
+ * This is the total number of bricks accessible in the numalink fabric. It
+ * includes all C & M bricks. Routers are NOT included.
+ *
+ * This value is also the value of the maximum number of non-router NASIDs
+ * in the numalink fabric.
+ *
+ * NOTE: a brick may contain 1 or 2 OS nodes. Don't get these confused.
+ */
+#define UV_MAX_NUMALINK_BLADES 16384
+
+/*
+ * Maximum number of C/Mbricks within a software SSI (hardware may support
+ * more).
+ */
+#define UV_MAX_SSI_BLADES 256
+
+/*
+ * The largest possible NASID of a C or M brick (+ 2)
+ */
+#define UV_MAX_NASID_VALUE (UV_MAX_NUMALINK_BLADES * 2)
+
+struct uv_scir_s {
+ struct timer_list timer;
+ unsigned long offset;
+ unsigned long last;
+ unsigned long idle_on;
+ unsigned long idle_off;
+ unsigned char state;
+ unsigned char enabled;
+};
+
+/*
+ * The following defines attributes of the HUB chip. These attributes are
+ * frequently referenced and are kept in the per-cpu data areas of each cpu.
+ * They are kept together in a struct to minimize cache misses.
+ */
+struct uv_hub_info_s {
+ unsigned long global_mmr_base;
+ unsigned long gpa_mask;
+ unsigned int gnode_extra;
+ unsigned char hub_revision;
+ unsigned char apic_pnode_shift;
+ unsigned char m_shift;
+ unsigned char n_lshift;
+ unsigned long gnode_upper;
+ unsigned long lowmem_remap_top;
+ unsigned long lowmem_remap_base;
+ unsigned short pnode;
+ unsigned short pnode_mask;
+ unsigned short coherency_domain_number;
+ unsigned short numa_blade_id;
+ unsigned char blade_processor_id;
+ unsigned char m_val;
+ unsigned char n_val;
+ struct uv_scir_s scir;
+};
+
+DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
+#define uv_hub_info this_cpu_ptr(&__uv_hub_info)
+#define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu))
+
+/*
+ * Hub revisions less than UV2_HUB_REVISION_BASE are UV1 hubs. All UV2
+ * hubs have revision numbers greater than or equal to UV2_HUB_REVISION_BASE.
+ * This is a software convention - NOT the hardware revision numbers in
+ * the hub chip.
+ */
+#define UV1_HUB_REVISION_BASE 1
+#define UV2_HUB_REVISION_BASE 3
+#define UV3_HUB_REVISION_BASE 5
+
+static inline int is_uv1_hub(void)
+{
+ return uv_hub_info->hub_revision < UV2_HUB_REVISION_BASE;
+}
+
+static inline int is_uv2_hub(void)
+{
+ return ((uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE) &&
+ (uv_hub_info->hub_revision < UV3_HUB_REVISION_BASE));
+}
+
+static inline int is_uv3_hub(void)
+{
+ return uv_hub_info->hub_revision >= UV3_HUB_REVISION_BASE;
+}
+
+static inline int is_uv_hub(void)
+{
+ return uv_hub_info->hub_revision;
+}
+
+/* code common to uv2 and uv3 only */
+static inline int is_uvx_hub(void)
+{
+ return uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE;
+}
+
+union uvh_apicid {
+ unsigned long v;
+ struct uvh_apicid_s {
+ unsigned long local_apic_mask : 24;
+ unsigned long local_apic_shift : 5;
+ unsigned long unused1 : 3;
+ unsigned long pnode_mask : 24;
+ unsigned long pnode_shift : 5;
+ unsigned long unused2 : 3;
+ } s;
+};
+
+/*
+ * Local & Global MMR space macros.
+ * Note: macros are intended to be used ONLY by inline functions
+ * in this file - not by other kernel code.
+ * n - NASID (full 15-bit global nasid)
+ * g - GNODE (full 15-bit global nasid, right shifted 1)
+ * p - PNODE (local part of nsids, right shifted 1)
+ */
+#define UV_NASID_TO_PNODE(n) (((n) >> 1) & uv_hub_info->pnode_mask)
+#define UV_PNODE_TO_GNODE(p) ((p) |uv_hub_info->gnode_extra)
+#define UV_PNODE_TO_NASID(p) (UV_PNODE_TO_GNODE(p) << 1)
+
+#define UV1_LOCAL_MMR_BASE 0xf4000000UL
+#define UV1_GLOBAL_MMR32_BASE 0xf8000000UL
+#define UV1_LOCAL_MMR_SIZE (64UL * 1024 * 1024)
+#define UV1_GLOBAL_MMR32_SIZE (64UL * 1024 * 1024)
+
+#define UV2_LOCAL_MMR_BASE 0xfa000000UL
+#define UV2_GLOBAL_MMR32_BASE 0xfc000000UL
+#define UV2_LOCAL_MMR_SIZE (32UL * 1024 * 1024)
+#define UV2_GLOBAL_MMR32_SIZE (32UL * 1024 * 1024)
+
+#define UV3_LOCAL_MMR_BASE 0xfa000000UL
+#define UV3_GLOBAL_MMR32_BASE 0xfc000000UL
+#define UV3_LOCAL_MMR_SIZE (32UL * 1024 * 1024)
+#define UV3_GLOBAL_MMR32_SIZE (32UL * 1024 * 1024)
+
+#define UV_LOCAL_MMR_BASE (is_uv1_hub() ? UV1_LOCAL_MMR_BASE : \
+ (is_uv2_hub() ? UV2_LOCAL_MMR_BASE : \
+ UV3_LOCAL_MMR_BASE))
+#define UV_GLOBAL_MMR32_BASE (is_uv1_hub() ? UV1_GLOBAL_MMR32_BASE :\
+ (is_uv2_hub() ? UV2_GLOBAL_MMR32_BASE :\
+ UV3_GLOBAL_MMR32_BASE))
+#define UV_LOCAL_MMR_SIZE (is_uv1_hub() ? UV1_LOCAL_MMR_SIZE : \
+ (is_uv2_hub() ? UV2_LOCAL_MMR_SIZE : \
+ UV3_LOCAL_MMR_SIZE))
+#define UV_GLOBAL_MMR32_SIZE (is_uv1_hub() ? UV1_GLOBAL_MMR32_SIZE :\
+ (is_uv2_hub() ? UV2_GLOBAL_MMR32_SIZE :\
+ UV3_GLOBAL_MMR32_SIZE))
+#define UV_GLOBAL_MMR64_BASE (uv_hub_info->global_mmr_base)
+
+#define UV_GLOBAL_GRU_MMR_BASE 0x4000000
+
+#define UV_GLOBAL_MMR32_PNODE_SHIFT 15
+#define UV_GLOBAL_MMR64_PNODE_SHIFT 26
+
+#define UV_GLOBAL_MMR32_PNODE_BITS(p) ((p) << (UV_GLOBAL_MMR32_PNODE_SHIFT))
+
+#define UV_GLOBAL_MMR64_PNODE_BITS(p) \
+ (((unsigned long)(p)) << UV_GLOBAL_MMR64_PNODE_SHIFT)
+
+#define UVH_APICID 0x002D0E00L
+#define UV_APIC_PNODE_SHIFT 6
+
+#define UV_APICID_HIBIT_MASK 0xffff0000
+
+/* Local Bus from cpu's perspective */
+#define LOCAL_BUS_BASE 0x1c00000
+#define LOCAL_BUS_SIZE (4 * 1024 * 1024)
+
+/*
+ * System Controller Interface Reg
+ *
+ * Note there are NO leds on a UV system. This register is only
+ * used by the system controller to monitor system-wide operation.
+ * There are 64 regs per node. With Nahelem cpus (2 cores per node,
+ * 8 cpus per core, 2 threads per cpu) there are 32 cpu threads on
+ * a node.
+ *
+ * The window is located at top of ACPI MMR space
+ */
+#define SCIR_WINDOW_COUNT 64
+#define SCIR_LOCAL_MMR_BASE (LOCAL_BUS_BASE + \
+ LOCAL_BUS_SIZE - \
+ SCIR_WINDOW_COUNT)
+
+#define SCIR_CPU_HEARTBEAT 0x01 /* timer interrupt */
+#define SCIR_CPU_ACTIVITY 0x02 /* not idle */
+#define SCIR_CPU_HB_INTERVAL (HZ) /* once per second */
+
+/* Loop through all installed blades */
+#define for_each_possible_blade(bid) \
+ for ((bid) = 0; (bid) < uv_num_possible_blades(); (bid)++)
+
+/*
+ * Macros for converting between kernel virtual addresses, socket local physical
+ * addresses, and UV global physical addresses.
+ * Note: use the standard __pa() & __va() macros for converting
+ * between socket virtual and socket physical addresses.
+ */
+
+/* socket phys RAM --> UV global physical address */
+static inline unsigned long uv_soc_phys_ram_to_gpa(unsigned long paddr)
+{
+ if (paddr < uv_hub_info->lowmem_remap_top)
+ paddr |= uv_hub_info->lowmem_remap_base;
+ paddr |= uv_hub_info->gnode_upper;
+ paddr = ((paddr << uv_hub_info->m_shift) >> uv_hub_info->m_shift) |
+ ((paddr >> uv_hub_info->m_val) << uv_hub_info->n_lshift);
+ return paddr;
+}
+
+
+/* socket virtual --> UV global physical address */
+static inline unsigned long uv_gpa(void *v)
+{
+ return uv_soc_phys_ram_to_gpa(__pa(v));
+}
+
+/* Top two bits indicate the requested address is in MMR space. */
+static inline int
+uv_gpa_in_mmr_space(unsigned long gpa)
+{
+ return (gpa >> 62) == 0x3UL;
+}
+
+/* UV global physical address --> socket phys RAM */
+static inline unsigned long uv_gpa_to_soc_phys_ram(unsigned long gpa)
+{
+ unsigned long paddr;
+ unsigned long remap_base = uv_hub_info->lowmem_remap_base;
+ unsigned long remap_top = uv_hub_info->lowmem_remap_top;
+
+ gpa = ((gpa << uv_hub_info->m_shift) >> uv_hub_info->m_shift) |
+ ((gpa >> uv_hub_info->n_lshift) << uv_hub_info->m_val);
+ paddr = gpa & uv_hub_info->gpa_mask;
+ if (paddr >= remap_base && paddr < remap_base + remap_top)
+ paddr -= remap_base;
+ return paddr;
+}
+
+
+/* gpa -> pnode */
+static inline unsigned long uv_gpa_to_gnode(unsigned long gpa)
+{
+ return gpa >> uv_hub_info->n_lshift;
+}
+
+/* gpa -> pnode */
+static inline int uv_gpa_to_pnode(unsigned long gpa)
+{
+ unsigned long n_mask = (1UL << uv_hub_info->n_val) - 1;
+
+ return uv_gpa_to_gnode(gpa) & n_mask;
+}
+
+/* gpa -> node offset*/
+static inline unsigned long uv_gpa_to_offset(unsigned long gpa)
+{
+ return (gpa << uv_hub_info->m_shift) >> uv_hub_info->m_shift;
+}
+
+/* pnode, offset --> socket virtual */
+static inline void *uv_pnode_offset_to_vaddr(int pnode, unsigned long offset)
+{
+ return __va(((unsigned long)pnode << uv_hub_info->m_val) | offset);
+}
+
+
+/*
+ * Extract a PNODE from an APICID (full apicid, not processor subset)
+ */
+static inline int uv_apicid_to_pnode(int apicid)
+{
+ return (apicid >> uv_hub_info->apic_pnode_shift);
+}
+
+/*
+ * Convert an apicid to the socket number on the blade
+ */
+static inline int uv_apicid_to_socket(int apicid)
+{
+ if (is_uv1_hub())
+ return (apicid >> (uv_hub_info->apic_pnode_shift - 1)) & 1;
+ else
+ return 0;
+}
+
+/*
+ * Access global MMRs using the low memory MMR32 space. This region supports
+ * faster MMR access but not all MMRs are accessible in this space.
+ */
+static inline unsigned long *uv_global_mmr32_address(int pnode, unsigned long offset)
+{
+ return __va(UV_GLOBAL_MMR32_BASE |
+ UV_GLOBAL_MMR32_PNODE_BITS(pnode) | offset);
+}
+
+static inline void uv_write_global_mmr32(int pnode, unsigned long offset, unsigned long val)
+{
+ writeq(val, uv_global_mmr32_address(pnode, offset));
+}
+
+static inline unsigned long uv_read_global_mmr32(int pnode, unsigned long offset)
+{
+ return readq(uv_global_mmr32_address(pnode, offset));
+}
+
+/*
+ * Access Global MMR space using the MMR space located at the top of physical
+ * memory.
+ */
+static inline volatile void __iomem *uv_global_mmr64_address(int pnode, unsigned long offset)
+{
+ return __va(UV_GLOBAL_MMR64_BASE |
+ UV_GLOBAL_MMR64_PNODE_BITS(pnode) | offset);
+}
+
+static inline void uv_write_global_mmr64(int pnode, unsigned long offset, unsigned long val)
+{
+ writeq(val, uv_global_mmr64_address(pnode, offset));
+}
+
+static inline unsigned long uv_read_global_mmr64(int pnode, unsigned long offset)
+{
+ return readq(uv_global_mmr64_address(pnode, offset));
+}
+
+/*
+ * Global MMR space addresses when referenced by the GRU. (GRU does
+ * NOT use socket addressing).
+ */
+static inline unsigned long uv_global_gru_mmr_address(int pnode, unsigned long offset)
+{
+ return UV_GLOBAL_GRU_MMR_BASE | offset |
+ ((unsigned long)pnode << uv_hub_info->m_val);
+}
+
+static inline void uv_write_global_mmr8(int pnode, unsigned long offset, unsigned char val)
+{
+ writeb(val, uv_global_mmr64_address(pnode, offset));
+}
+
+static inline unsigned char uv_read_global_mmr8(int pnode, unsigned long offset)
+{
+ return readb(uv_global_mmr64_address(pnode, offset));
+}
+
+/*
+ * Access hub local MMRs. Faster than using global space but only local MMRs
+ * are accessible.
+ */
+static inline unsigned long *uv_local_mmr_address(unsigned long offset)
+{
+ return __va(UV_LOCAL_MMR_BASE | offset);
+}
+
+static inline unsigned long uv_read_local_mmr(unsigned long offset)
+{
+ return readq(uv_local_mmr_address(offset));
+}
+
+static inline void uv_write_local_mmr(unsigned long offset, unsigned long val)
+{
+ writeq(val, uv_local_mmr_address(offset));
+}
+
+static inline unsigned char uv_read_local_mmr8(unsigned long offset)
+{
+ return readb(uv_local_mmr_address(offset));
+}
+
+static inline void uv_write_local_mmr8(unsigned long offset, unsigned char val)
+{
+ writeb(val, uv_local_mmr_address(offset));
+}
+
+/*
+ * Structures and definitions for converting between cpu, node, pnode, and blade
+ * numbers.
+ */
+struct uv_blade_info {
+ unsigned short nr_possible_cpus;
+ unsigned short nr_online_cpus;
+ unsigned short pnode;
+ short memory_nid;
+ raw_spinlock_t nmi_lock; /* obsolete, see uv_hub_nmi */
+ unsigned long nmi_count; /* obsolete, see uv_hub_nmi */
+};
+extern struct uv_blade_info *uv_blade_info;
+extern short *uv_node_to_blade;
+extern short *uv_cpu_to_blade;
+extern short uv_possible_blades;
+
+/* Blade-local cpu number of current cpu. Numbered 0 .. <# cpus on the blade> */
+static inline int uv_blade_processor_id(void)
+{
+ return uv_hub_info->blade_processor_id;
+}
+
+/* Blade number of current cpu. Numnbered 0 .. <#blades -1> */
+static inline int uv_numa_blade_id(void)
+{
+ return uv_hub_info->numa_blade_id;
+}
+
+/* Convert a cpu number to the the UV blade number */
+static inline int uv_cpu_to_blade_id(int cpu)
+{
+ return uv_cpu_to_blade[cpu];
+}
+
+/* Convert linux node number to the UV blade number */
+static inline int uv_node_to_blade_id(int nid)
+{
+ return uv_node_to_blade[nid];
+}
+
+/* Convert a blade id to the PNODE of the blade */
+static inline int uv_blade_to_pnode(int bid)
+{
+ return uv_blade_info[bid].pnode;
+}
+
+/* Nid of memory node on blade. -1 if no blade-local memory */
+static inline int uv_blade_to_memory_nid(int bid)
+{
+ return uv_blade_info[bid].memory_nid;
+}
+
+/* Determine the number of possible cpus on a blade */
+static inline int uv_blade_nr_possible_cpus(int bid)
+{
+ return uv_blade_info[bid].nr_possible_cpus;
+}
+
+/* Determine the number of online cpus on a blade */
+static inline int uv_blade_nr_online_cpus(int bid)
+{
+ return uv_blade_info[bid].nr_online_cpus;
+}
+
+/* Convert a cpu id to the PNODE of the blade containing the cpu */
+static inline int uv_cpu_to_pnode(int cpu)
+{
+ return uv_blade_info[uv_cpu_to_blade_id(cpu)].pnode;
+}
+
+/* Convert a linux node number to the PNODE of the blade */
+static inline int uv_node_to_pnode(int nid)
+{
+ return uv_blade_info[uv_node_to_blade_id(nid)].pnode;
+}
+
+/* Maximum possible number of blades */
+static inline int uv_num_possible_blades(void)
+{
+ return uv_possible_blades;
+}
+
+/* Per Hub NMI support */
+extern void uv_nmi_setup(void);
+
+/* BMC sets a bit this MMR non-zero before sending an NMI */
+#define UVH_NMI_MMR UVH_SCRATCH5
+#define UVH_NMI_MMR_CLEAR UVH_SCRATCH5_ALIAS
+#define UVH_NMI_MMR_SHIFT 63
+#define UVH_NMI_MMR_TYPE "SCRATCH5"
+
+/* Newer SMM NMI handler, not present in all systems */
+#define UVH_NMI_MMRX UVH_EVENT_OCCURRED0
+#define UVH_NMI_MMRX_CLEAR UVH_EVENT_OCCURRED0_ALIAS
+#define UVH_NMI_MMRX_SHIFT (is_uv1_hub() ? \
+ UV1H_EVENT_OCCURRED0_EXTIO_INT0_SHFT :\
+ UVXH_EVENT_OCCURRED0_EXTIO_INT0_SHFT)
+#define UVH_NMI_MMRX_TYPE "EXTIO_INT0"
+
+/* Non-zero indicates newer SMM NMI handler present */
+#define UVH_NMI_MMRX_SUPPORTED UVH_EXTIO_INT0_BROADCAST
+
+/* Indicates to BIOS that we want to use the newer SMM NMI handler */
+#define UVH_NMI_MMRX_REQ UVH_SCRATCH5_ALIAS_2
+#define UVH_NMI_MMRX_REQ_SHIFT 62
+
+struct uv_hub_nmi_s {
+ raw_spinlock_t nmi_lock;
+ atomic_t in_nmi; /* flag this node in UV NMI IRQ */
+ atomic_t cpu_owner; /* last locker of this struct */
+ atomic_t read_mmr_count; /* count of MMR reads */
+ atomic_t nmi_count; /* count of true UV NMIs */
+ unsigned long nmi_value; /* last value read from NMI MMR */
+};
+
+struct uv_cpu_nmi_s {
+ struct uv_hub_nmi_s *hub;
+ int state;
+ int pinging;
+ int queries;
+ int pings;
+};
+
+DECLARE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi);
+
+#define uv_hub_nmi (uv_cpu_nmi.hub)
+#define uv_cpu_nmi_per(cpu) (per_cpu(uv_cpu_nmi, cpu))
+#define uv_hub_nmi_per(cpu) (uv_cpu_nmi_per(cpu).hub)
+
+/* uv_cpu_nmi_states */
+#define UV_NMI_STATE_OUT 0
+#define UV_NMI_STATE_IN 1
+#define UV_NMI_STATE_DUMP 2
+#define UV_NMI_STATE_DUMP_DONE 3
+
+/* Update SCIR state */
+static inline void uv_set_scir_bits(unsigned char value)
+{
+ if (uv_hub_info->scir.state != value) {
+ uv_hub_info->scir.state = value;
+ uv_write_local_mmr8(uv_hub_info->scir.offset, value);
+ }
+}
+
+static inline unsigned long uv_scir_offset(int apicid)
+{
+ return SCIR_LOCAL_MMR_BASE | (apicid & 0x3f);
+}
+
+static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value)
+{
+ if (uv_cpu_hub_info(cpu)->scir.state != value) {
+ uv_write_global_mmr8(uv_cpu_to_pnode(cpu),
+ uv_cpu_hub_info(cpu)->scir.offset, value);
+ uv_cpu_hub_info(cpu)->scir.state = value;
+ }
+}
+
+extern unsigned int uv_apicid_hibits;
+static unsigned long uv_hub_ipi_value(int apicid, int vector, int mode)
+{
+ apicid |= uv_apicid_hibits;
+ return (1UL << UVH_IPI_INT_SEND_SHFT) |
+ ((apicid) << UVH_IPI_INT_APIC_ID_SHFT) |
+ (mode << UVH_IPI_INT_DELIVERY_MODE_SHFT) |
+ (vector << UVH_IPI_INT_VECTOR_SHFT);
+}
+
+static inline void uv_hub_send_ipi(int pnode, int apicid, int vector)
+{
+ unsigned long val;
+ unsigned long dmode = dest_Fixed;
+
+ if (vector == NMI_VECTOR)
+ dmode = dest_NMI;
+
+ val = uv_hub_ipi_value(apicid, vector, dmode);
+ uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
+}
+
+/*
+ * Get the minimum revision number of the hub chips within the partition.
+ * 1 - UV1 rev 1.0 initial silicon
+ * 2 - UV1 rev 2.0 production silicon
+ * 3 - UV2 rev 1.0 initial silicon
+ * 5 - UV3 rev 1.0 initial silicon
+ */
+static inline int uv_get_min_hub_revision_id(void)
+{
+ return uv_hub_info->hub_revision;
+}
+
+#endif /* CONFIG_X86_64 */
+#endif /* _ASM_X86_UV_UV_HUB_H */
diff --git a/kernel/arch/x86/include/asm/uv/uv_irq.h b/kernel/arch/x86/include/asm/uv/uv_irq.h
new file mode 100644
index 000000000..d6b17c760
--- /dev/null
+++ b/kernel/arch/x86/include/asm/uv/uv_irq.h
@@ -0,0 +1,38 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * SGI UV IRQ definitions
+ *
+ * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_X86_UV_UV_IRQ_H
+#define _ASM_X86_UV_UV_IRQ_H
+
+/* If a generic version of this structure gets defined, eliminate this one. */
+struct uv_IO_APIC_route_entry {
+ __u64 vector : 8,
+ delivery_mode : 3,
+ dest_mode : 1,
+ delivery_status : 1,
+ polarity : 1,
+ __reserved_1 : 1,
+ trigger : 1,
+ mask : 1,
+ __reserved_2 : 15,
+ dest : 32;
+};
+
+enum {
+ UV_AFFINITY_ALL,
+ UV_AFFINITY_NODE,
+ UV_AFFINITY_CPU
+};
+
+extern int uv_irq_2_mmr_info(int, unsigned long *, int *);
+extern int uv_setup_irq(char *, int, int, unsigned long, int);
+extern void uv_teardown_irq(unsigned int);
+
+#endif /* _ASM_X86_UV_UV_IRQ_H */
diff --git a/kernel/arch/x86/include/asm/uv/uv_mmrs.h b/kernel/arch/x86/include/asm/uv/uv_mmrs.h
new file mode 100644
index 000000000..ddd8db6b6
--- /dev/null
+++ b/kernel/arch/x86/include/asm/uv/uv_mmrs.h
@@ -0,0 +1,2928 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * SGI UV MMR definitions
+ *
+ * Copyright (C) 2007-2014 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_X86_UV_UV_MMRS_H
+#define _ASM_X86_UV_UV_MMRS_H
+
+/*
+ * This file contains MMR definitions for all UV hubs types.
+ *
+ * To minimize coding differences between hub types, the symbols are
+ * grouped by architecture types.
+ *
+ * UVH - definitions common to all UV hub types.
+ * UVXH - definitions common to all UV eXtended hub types (currently 2 & 3).
+ * UV1H - definitions specific to UV type 1 hub.
+ * UV2H - definitions specific to UV type 2 hub.
+ * UV3H - definitions specific to UV type 3 hub.
+ *
+ * So in general, MMR addresses and structures are identical on all hubs types.
+ * These MMRs are identified as:
+ * #define UVH_xxx <address>
+ * union uvh_xxx {
+ * unsigned long v;
+ * struct uvh_int_cmpd_s {
+ * } s;
+ * };
+ *
+ * If the MMR exists on all hub types but have different addresses:
+ * #define UV1Hxxx a
+ * #define UV2Hxxx b
+ * #define UV3Hxxx c
+ * #define UVHxxx (is_uv1_hub() ? UV1Hxxx :
+ * (is_uv2_hub() ? UV2Hxxx :
+ * UV3Hxxx))
+ *
+ * If the MMR exists on all hub types > 1 but have different addresses:
+ * #define UV2Hxxx b
+ * #define UV3Hxxx c
+ * #define UVXHxxx (is_uv2_hub() ? UV2Hxxx :
+ * UV3Hxxx))
+ *
+ * union uvh_xxx {
+ * unsigned long v;
+ * struct uvh_xxx_s { # Common fields only
+ * } s;
+ * struct uv1h_xxx_s { # Full UV1 definition (*)
+ * } s1;
+ * struct uv2h_xxx_s { # Full UV2 definition (*)
+ * } s2;
+ * struct uv3h_xxx_s { # Full UV3 definition (*)
+ * } s3;
+ * };
+ * (* - if present and different than the common struct)
+ *
+ * Only essential differences are enumerated. For example, if the address is
+ * the same for all UV's, only a single #define is generated. Likewise,
+ * if the contents is the same for all hubs, only the "s" structure is
+ * generated.
+ *
+ * If the MMR exists on ONLY 1 type of hub, no generic definition is
+ * generated:
+ * #define UVnH_xxx <uvn address>
+ * union uvnh_xxx {
+ * unsigned long v;
+ * struct uvh_int_cmpd_s {
+ * } sn;
+ * };
+ *
+ * (GEN Flags: mflags_opt= undefs=0 UV23=UVXH)
+ */
+
+#define UV_MMR_ENABLE (1UL << 63)
+
+#define UV1_HUB_PART_NUMBER 0x88a5
+#define UV2_HUB_PART_NUMBER 0x8eb8
+#define UV2_HUB_PART_NUMBER_X 0x1111
+#define UV3_HUB_PART_NUMBER 0x9578
+#define UV3_HUB_PART_NUMBER_X 0x4321
+
+/* Compat: Indicate which UV Hubs are supported. */
+#define UV2_HUB_IS_SUPPORTED 1
+#define UV3_HUB_IS_SUPPORTED 1
+
+/* ========================================================================= */
+/* UVH_BAU_DATA_BROADCAST */
+/* ========================================================================= */
+#define UVH_BAU_DATA_BROADCAST 0x61688UL
+#define UVH_BAU_DATA_BROADCAST_32 0x440
+
+#define UVH_BAU_DATA_BROADCAST_ENABLE_SHFT 0
+#define UVH_BAU_DATA_BROADCAST_ENABLE_MASK 0x0000000000000001UL
+
+union uvh_bau_data_broadcast_u {
+ unsigned long v;
+ struct uvh_bau_data_broadcast_s {
+ unsigned long enable:1; /* RW */
+ unsigned long rsvd_1_63:63;
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_BAU_DATA_CONFIG */
+/* ========================================================================= */
+#define UVH_BAU_DATA_CONFIG 0x61680UL
+#define UVH_BAU_DATA_CONFIG_32 0x438
+
+#define UVH_BAU_DATA_CONFIG_VECTOR_SHFT 0
+#define UVH_BAU_DATA_CONFIG_DM_SHFT 8
+#define UVH_BAU_DATA_CONFIG_DESTMODE_SHFT 11
+#define UVH_BAU_DATA_CONFIG_STATUS_SHFT 12
+#define UVH_BAU_DATA_CONFIG_P_SHFT 13
+#define UVH_BAU_DATA_CONFIG_T_SHFT 15
+#define UVH_BAU_DATA_CONFIG_M_SHFT 16
+#define UVH_BAU_DATA_CONFIG_APIC_ID_SHFT 32
+#define UVH_BAU_DATA_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_BAU_DATA_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_BAU_DATA_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_BAU_DATA_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_BAU_DATA_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_BAU_DATA_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_BAU_DATA_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_BAU_DATA_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+
+union uvh_bau_data_config_u {
+ unsigned long v;
+ struct uvh_bau_data_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_EVENT_OCCURRED0 */
+/* ========================================================================= */
+#define UVH_EVENT_OCCURRED0 0x70000UL
+#define UVH_EVENT_OCCURRED0_32 0x5e8
+
+#define UVH_EVENT_OCCURRED0_LB_HCERR_SHFT 0
+#define UVH_EVENT_OCCURRED0_RH_AOERR0_SHFT 11
+#define UVH_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL
+#define UVH_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL
+
+#define UV1H_EVENT_OCCURRED0_GR0_HCERR_SHFT 1
+#define UV1H_EVENT_OCCURRED0_GR1_HCERR_SHFT 2
+#define UV1H_EVENT_OCCURRED0_LH_HCERR_SHFT 3
+#define UV1H_EVENT_OCCURRED0_RH_HCERR_SHFT 4
+#define UV1H_EVENT_OCCURRED0_XN_HCERR_SHFT 5
+#define UV1H_EVENT_OCCURRED0_SI_HCERR_SHFT 6
+#define UV1H_EVENT_OCCURRED0_LB_AOERR0_SHFT 7
+#define UV1H_EVENT_OCCURRED0_GR0_AOERR0_SHFT 8
+#define UV1H_EVENT_OCCURRED0_GR1_AOERR0_SHFT 9
+#define UV1H_EVENT_OCCURRED0_LH_AOERR0_SHFT 10
+#define UV1H_EVENT_OCCURRED0_XN_AOERR0_SHFT 12
+#define UV1H_EVENT_OCCURRED0_SI_AOERR0_SHFT 13
+#define UV1H_EVENT_OCCURRED0_LB_AOERR1_SHFT 14
+#define UV1H_EVENT_OCCURRED0_GR0_AOERR1_SHFT 15
+#define UV1H_EVENT_OCCURRED0_GR1_AOERR1_SHFT 16
+#define UV1H_EVENT_OCCURRED0_LH_AOERR1_SHFT 17
+#define UV1H_EVENT_OCCURRED0_RH_AOERR1_SHFT 18
+#define UV1H_EVENT_OCCURRED0_XN_AOERR1_SHFT 19
+#define UV1H_EVENT_OCCURRED0_SI_AOERR1_SHFT 20
+#define UV1H_EVENT_OCCURRED0_RH_VPI_INT_SHFT 21
+#define UV1H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 22
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 23
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 24
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 25
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 26
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 27
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 28
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 29
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 30
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 31
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 32
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 33
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 34
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 35
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 36
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 37
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 38
+#define UV1H_EVENT_OCCURRED0_L1_NMI_INT_SHFT 39
+#define UV1H_EVENT_OCCURRED0_STOP_CLOCK_SHFT 40
+#define UV1H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 41
+#define UV1H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 42
+#define UV1H_EVENT_OCCURRED0_LTC_INT_SHFT 43
+#define UV1H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 44
+#define UV1H_EVENT_OCCURRED0_IPI_INT_SHFT 45
+#define UV1H_EVENT_OCCURRED0_EXTIO_INT0_SHFT 46
+#define UV1H_EVENT_OCCURRED0_EXTIO_INT1_SHFT 47
+#define UV1H_EVENT_OCCURRED0_EXTIO_INT2_SHFT 48
+#define UV1H_EVENT_OCCURRED0_EXTIO_INT3_SHFT 49
+#define UV1H_EVENT_OCCURRED0_PROFILE_INT_SHFT 50
+#define UV1H_EVENT_OCCURRED0_RTC0_SHFT 51
+#define UV1H_EVENT_OCCURRED0_RTC1_SHFT 52
+#define UV1H_EVENT_OCCURRED0_RTC2_SHFT 53
+#define UV1H_EVENT_OCCURRED0_RTC3_SHFT 54
+#define UV1H_EVENT_OCCURRED0_BAU_DATA_SHFT 55
+#define UV1H_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_SHFT 56
+#define UV1H_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000002UL
+#define UV1H_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000004UL
+#define UV1H_EVENT_OCCURRED0_LH_HCERR_MASK 0x0000000000000008UL
+#define UV1H_EVENT_OCCURRED0_RH_HCERR_MASK 0x0000000000000010UL
+#define UV1H_EVENT_OCCURRED0_XN_HCERR_MASK 0x0000000000000020UL
+#define UV1H_EVENT_OCCURRED0_SI_HCERR_MASK 0x0000000000000040UL
+#define UV1H_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000000080UL
+#define UV1H_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000000100UL
+#define UV1H_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000000200UL
+#define UV1H_EVENT_OCCURRED0_LH_AOERR0_MASK 0x0000000000000400UL
+#define UV1H_EVENT_OCCURRED0_XN_AOERR0_MASK 0x0000000000001000UL
+#define UV1H_EVENT_OCCURRED0_SI_AOERR0_MASK 0x0000000000002000UL
+#define UV1H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000004000UL
+#define UV1H_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000000008000UL
+#define UV1H_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000000010000UL
+#define UV1H_EVENT_OCCURRED0_LH_AOERR1_MASK 0x0000000000020000UL
+#define UV1H_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000040000UL
+#define UV1H_EVENT_OCCURRED0_XN_AOERR1_MASK 0x0000000000080000UL
+#define UV1H_EVENT_OCCURRED0_SI_AOERR1_MASK 0x0000000000100000UL
+#define UV1H_EVENT_OCCURRED0_RH_VPI_INT_MASK 0x0000000000200000UL
+#define UV1H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000000400000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000000800000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000001000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000002000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000004000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000000008000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000000010000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000000020000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000000040000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000000080000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000000100000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000000200000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000000400000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000000800000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000001000000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000002000000000UL
+#define UV1H_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000004000000000UL
+#define UV1H_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0000008000000000UL
+#define UV1H_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0000010000000000UL
+#define UV1H_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0000020000000000UL
+#define UV1H_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0000040000000000UL
+#define UV1H_EVENT_OCCURRED0_LTC_INT_MASK 0x0000080000000000UL
+#define UV1H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0000100000000000UL
+#define UV1H_EVENT_OCCURRED0_IPI_INT_MASK 0x0000200000000000UL
+#define UV1H_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0000400000000000UL
+#define UV1H_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0000800000000000UL
+#define UV1H_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0001000000000000UL
+#define UV1H_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0002000000000000UL
+#define UV1H_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0004000000000000UL
+#define UV1H_EVENT_OCCURRED0_RTC0_MASK 0x0008000000000000UL
+#define UV1H_EVENT_OCCURRED0_RTC1_MASK 0x0010000000000000UL
+#define UV1H_EVENT_OCCURRED0_RTC2_MASK 0x0020000000000000UL
+#define UV1H_EVENT_OCCURRED0_RTC3_MASK 0x0040000000000000UL
+#define UV1H_EVENT_OCCURRED0_BAU_DATA_MASK 0x0080000000000000UL
+#define UV1H_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_MASK 0x0100000000000000UL
+
+#define UVXH_EVENT_OCCURRED0_QP_HCERR_SHFT 1
+#define UVXH_EVENT_OCCURRED0_RH_HCERR_SHFT 2
+#define UVXH_EVENT_OCCURRED0_LH0_HCERR_SHFT 3
+#define UVXH_EVENT_OCCURRED0_LH1_HCERR_SHFT 4
+#define UVXH_EVENT_OCCURRED0_GR0_HCERR_SHFT 5
+#define UVXH_EVENT_OCCURRED0_GR1_HCERR_SHFT 6
+#define UVXH_EVENT_OCCURRED0_NI0_HCERR_SHFT 7
+#define UVXH_EVENT_OCCURRED0_NI1_HCERR_SHFT 8
+#define UVXH_EVENT_OCCURRED0_LB_AOERR0_SHFT 9
+#define UVXH_EVENT_OCCURRED0_QP_AOERR0_SHFT 10
+#define UVXH_EVENT_OCCURRED0_LH0_AOERR0_SHFT 12
+#define UVXH_EVENT_OCCURRED0_LH1_AOERR0_SHFT 13
+#define UVXH_EVENT_OCCURRED0_GR0_AOERR0_SHFT 14
+#define UVXH_EVENT_OCCURRED0_GR1_AOERR0_SHFT 15
+#define UVXH_EVENT_OCCURRED0_XB_AOERR0_SHFT 16
+#define UVXH_EVENT_OCCURRED0_RT_AOERR0_SHFT 17
+#define UVXH_EVENT_OCCURRED0_NI0_AOERR0_SHFT 18
+#define UVXH_EVENT_OCCURRED0_NI1_AOERR0_SHFT 19
+#define UVXH_EVENT_OCCURRED0_LB_AOERR1_SHFT 20
+#define UVXH_EVENT_OCCURRED0_QP_AOERR1_SHFT 21
+#define UVXH_EVENT_OCCURRED0_RH_AOERR1_SHFT 22
+#define UVXH_EVENT_OCCURRED0_LH0_AOERR1_SHFT 23
+#define UVXH_EVENT_OCCURRED0_LH1_AOERR1_SHFT 24
+#define UVXH_EVENT_OCCURRED0_GR0_AOERR1_SHFT 25
+#define UVXH_EVENT_OCCURRED0_GR1_AOERR1_SHFT 26
+#define UVXH_EVENT_OCCURRED0_XB_AOERR1_SHFT 27
+#define UVXH_EVENT_OCCURRED0_RT_AOERR1_SHFT 28
+#define UVXH_EVENT_OCCURRED0_NI0_AOERR1_SHFT 29
+#define UVXH_EVENT_OCCURRED0_NI1_AOERR1_SHFT 30
+#define UVXH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 31
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 32
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 33
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 34
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 35
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 36
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 37
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 38
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 39
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 40
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 41
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 42
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 43
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 44
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 45
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 46
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 47
+#define UVXH_EVENT_OCCURRED0_L1_NMI_INT_SHFT 48
+#define UVXH_EVENT_OCCURRED0_STOP_CLOCK_SHFT 49
+#define UVXH_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 50
+#define UVXH_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 51
+#define UVXH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 52
+#define UVXH_EVENT_OCCURRED0_IPI_INT_SHFT 53
+#define UVXH_EVENT_OCCURRED0_EXTIO_INT0_SHFT 54
+#define UVXH_EVENT_OCCURRED0_EXTIO_INT1_SHFT 55
+#define UVXH_EVENT_OCCURRED0_EXTIO_INT2_SHFT 56
+#define UVXH_EVENT_OCCURRED0_EXTIO_INT3_SHFT 57
+#define UVXH_EVENT_OCCURRED0_PROFILE_INT_SHFT 58
+#define UVXH_EVENT_OCCURRED0_QP_HCERR_MASK 0x0000000000000002UL
+#define UVXH_EVENT_OCCURRED0_RH_HCERR_MASK 0x0000000000000004UL
+#define UVXH_EVENT_OCCURRED0_LH0_HCERR_MASK 0x0000000000000008UL
+#define UVXH_EVENT_OCCURRED0_LH1_HCERR_MASK 0x0000000000000010UL
+#define UVXH_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000020UL
+#define UVXH_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000040UL
+#define UVXH_EVENT_OCCURRED0_NI0_HCERR_MASK 0x0000000000000080UL
+#define UVXH_EVENT_OCCURRED0_NI1_HCERR_MASK 0x0000000000000100UL
+#define UVXH_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000000200UL
+#define UVXH_EVENT_OCCURRED0_QP_AOERR0_MASK 0x0000000000000400UL
+#define UVXH_EVENT_OCCURRED0_LH0_AOERR0_MASK 0x0000000000001000UL
+#define UVXH_EVENT_OCCURRED0_LH1_AOERR0_MASK 0x0000000000002000UL
+#define UVXH_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000004000UL
+#define UVXH_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000008000UL
+#define UVXH_EVENT_OCCURRED0_XB_AOERR0_MASK 0x0000000000010000UL
+#define UVXH_EVENT_OCCURRED0_RT_AOERR0_MASK 0x0000000000020000UL
+#define UVXH_EVENT_OCCURRED0_NI0_AOERR0_MASK 0x0000000000040000UL
+#define UVXH_EVENT_OCCURRED0_NI1_AOERR0_MASK 0x0000000000080000UL
+#define UVXH_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000100000UL
+#define UVXH_EVENT_OCCURRED0_QP_AOERR1_MASK 0x0000000000200000UL
+#define UVXH_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000400000UL
+#define UVXH_EVENT_OCCURRED0_LH0_AOERR1_MASK 0x0000000000800000UL
+#define UVXH_EVENT_OCCURRED0_LH1_AOERR1_MASK 0x0000000001000000UL
+#define UVXH_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000002000000UL
+#define UVXH_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000004000000UL
+#define UVXH_EVENT_OCCURRED0_XB_AOERR1_MASK 0x0000000008000000UL
+#define UVXH_EVENT_OCCURRED0_RT_AOERR1_MASK 0x0000000010000000UL
+#define UVXH_EVENT_OCCURRED0_NI0_AOERR1_MASK 0x0000000020000000UL
+#define UVXH_EVENT_OCCURRED0_NI1_AOERR1_MASK 0x0000000040000000UL
+#define UVXH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000080000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000100000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000200000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000400000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000800000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000001000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000002000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000004000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000008000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000010000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000020000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000040000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000080000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000100000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000200000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000400000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000800000000000UL
+#define UVXH_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0001000000000000UL
+#define UVXH_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0002000000000000UL
+#define UVXH_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0004000000000000UL
+#define UVXH_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0008000000000000UL
+#define UVXH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0010000000000000UL
+#define UVXH_EVENT_OCCURRED0_IPI_INT_MASK 0x0020000000000000UL
+#define UVXH_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0040000000000000UL
+#define UVXH_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0080000000000000UL
+#define UVXH_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0100000000000000UL
+#define UVXH_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0200000000000000UL
+#define UVXH_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0400000000000000UL
+
+union uvh_event_occurred0_u {
+ unsigned long v;
+ struct uvh_event_occurred0_s {
+ unsigned long lb_hcerr:1; /* RW, W1C */
+ unsigned long rsvd_1_10:10;
+ unsigned long rh_aoerr0:1; /* RW, W1C */
+ unsigned long rsvd_12_63:52;
+ } s;
+ struct uvxh_event_occurred0_s {
+ unsigned long lb_hcerr:1; /* RW */
+ unsigned long qp_hcerr:1; /* RW */
+ unsigned long rh_hcerr:1; /* RW */
+ unsigned long lh0_hcerr:1; /* RW */
+ unsigned long lh1_hcerr:1; /* RW */
+ unsigned long gr0_hcerr:1; /* RW */
+ unsigned long gr1_hcerr:1; /* RW */
+ unsigned long ni0_hcerr:1; /* RW */
+ unsigned long ni1_hcerr:1; /* RW */
+ unsigned long lb_aoerr0:1; /* RW */
+ unsigned long qp_aoerr0:1; /* RW */
+ unsigned long rh_aoerr0:1; /* RW */
+ unsigned long lh0_aoerr0:1; /* RW */
+ unsigned long lh1_aoerr0:1; /* RW */
+ unsigned long gr0_aoerr0:1; /* RW */
+ unsigned long gr1_aoerr0:1; /* RW */
+ unsigned long xb_aoerr0:1; /* RW */
+ unsigned long rt_aoerr0:1; /* RW */
+ unsigned long ni0_aoerr0:1; /* RW */
+ unsigned long ni1_aoerr0:1; /* RW */
+ unsigned long lb_aoerr1:1; /* RW */
+ unsigned long qp_aoerr1:1; /* RW */
+ unsigned long rh_aoerr1:1; /* RW */
+ unsigned long lh0_aoerr1:1; /* RW */
+ unsigned long lh1_aoerr1:1; /* RW */
+ unsigned long gr0_aoerr1:1; /* RW */
+ unsigned long gr1_aoerr1:1; /* RW */
+ unsigned long xb_aoerr1:1; /* RW */
+ unsigned long rt_aoerr1:1; /* RW */
+ unsigned long ni0_aoerr1:1; /* RW */
+ unsigned long ni1_aoerr1:1; /* RW */
+ unsigned long system_shutdown_int:1; /* RW */
+ unsigned long lb_irq_int_0:1; /* RW */
+ unsigned long lb_irq_int_1:1; /* RW */
+ unsigned long lb_irq_int_2:1; /* RW */
+ unsigned long lb_irq_int_3:1; /* RW */
+ unsigned long lb_irq_int_4:1; /* RW */
+ unsigned long lb_irq_int_5:1; /* RW */
+ unsigned long lb_irq_int_6:1; /* RW */
+ unsigned long lb_irq_int_7:1; /* RW */
+ unsigned long lb_irq_int_8:1; /* RW */
+ unsigned long lb_irq_int_9:1; /* RW */
+ unsigned long lb_irq_int_10:1; /* RW */
+ unsigned long lb_irq_int_11:1; /* RW */
+ unsigned long lb_irq_int_12:1; /* RW */
+ unsigned long lb_irq_int_13:1; /* RW */
+ unsigned long lb_irq_int_14:1; /* RW */
+ unsigned long lb_irq_int_15:1; /* RW */
+ unsigned long l1_nmi_int:1; /* RW */
+ unsigned long stop_clock:1; /* RW */
+ unsigned long asic_to_l1:1; /* RW */
+ unsigned long l1_to_asic:1; /* RW */
+ unsigned long la_seq_trigger:1; /* RW */
+ unsigned long ipi_int:1; /* RW */
+ unsigned long extio_int0:1; /* RW */
+ unsigned long extio_int1:1; /* RW */
+ unsigned long extio_int2:1; /* RW */
+ unsigned long extio_int3:1; /* RW */
+ unsigned long profile_int:1; /* RW */
+ unsigned long rsvd_59_63:5;
+ } sx;
+};
+
+/* ========================================================================= */
+/* UVH_EVENT_OCCURRED0_ALIAS */
+/* ========================================================================= */
+#define UVH_EVENT_OCCURRED0_ALIAS 0x70008UL
+#define UVH_EVENT_OCCURRED0_ALIAS_32 0x5f0
+
+
+/* ========================================================================= */
+/* UVH_EXTIO_INT0_BROADCAST */
+/* ========================================================================= */
+#define UVH_EXTIO_INT0_BROADCAST 0x61448UL
+#define UVH_EXTIO_INT0_BROADCAST_32 0x3f0
+
+#define UVH_EXTIO_INT0_BROADCAST_ENABLE_SHFT 0
+#define UVH_EXTIO_INT0_BROADCAST_ENABLE_MASK 0x0000000000000001UL
+
+union uvh_extio_int0_broadcast_u {
+ unsigned long v;
+ struct uvh_extio_int0_broadcast_s {
+ unsigned long enable:1; /* RW */
+ unsigned long rsvd_1_63:63;
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_GR0_TLB_INT0_CONFIG */
+/* ========================================================================= */
+#define UVH_GR0_TLB_INT0_CONFIG 0x61b00UL
+
+#define UVH_GR0_TLB_INT0_CONFIG_VECTOR_SHFT 0
+#define UVH_GR0_TLB_INT0_CONFIG_DM_SHFT 8
+#define UVH_GR0_TLB_INT0_CONFIG_DESTMODE_SHFT 11
+#define UVH_GR0_TLB_INT0_CONFIG_STATUS_SHFT 12
+#define UVH_GR0_TLB_INT0_CONFIG_P_SHFT 13
+#define UVH_GR0_TLB_INT0_CONFIG_T_SHFT 15
+#define UVH_GR0_TLB_INT0_CONFIG_M_SHFT 16
+#define UVH_GR0_TLB_INT0_CONFIG_APIC_ID_SHFT 32
+#define UVH_GR0_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_GR0_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_GR0_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_GR0_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_GR0_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_GR0_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_GR0_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_GR0_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+
+union uvh_gr0_tlb_int0_config_u {
+ unsigned long v;
+ struct uvh_gr0_tlb_int0_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_GR0_TLB_INT1_CONFIG */
+/* ========================================================================= */
+#define UVH_GR0_TLB_INT1_CONFIG 0x61b40UL
+
+#define UVH_GR0_TLB_INT1_CONFIG_VECTOR_SHFT 0
+#define UVH_GR0_TLB_INT1_CONFIG_DM_SHFT 8
+#define UVH_GR0_TLB_INT1_CONFIG_DESTMODE_SHFT 11
+#define UVH_GR0_TLB_INT1_CONFIG_STATUS_SHFT 12
+#define UVH_GR0_TLB_INT1_CONFIG_P_SHFT 13
+#define UVH_GR0_TLB_INT1_CONFIG_T_SHFT 15
+#define UVH_GR0_TLB_INT1_CONFIG_M_SHFT 16
+#define UVH_GR0_TLB_INT1_CONFIG_APIC_ID_SHFT 32
+#define UVH_GR0_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_GR0_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_GR0_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_GR0_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_GR0_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_GR0_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_GR0_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_GR0_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+
+union uvh_gr0_tlb_int1_config_u {
+ unsigned long v;
+ struct uvh_gr0_tlb_int1_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_GR0_TLB_MMR_CONTROL */
+/* ========================================================================= */
+#define UV1H_GR0_TLB_MMR_CONTROL 0x401080UL
+#define UV2H_GR0_TLB_MMR_CONTROL 0xc01080UL
+#define UV3H_GR0_TLB_MMR_CONTROL 0xc01080UL
+#define UVH_GR0_TLB_MMR_CONTROL \
+ (is_uv1_hub() ? UV1H_GR0_TLB_MMR_CONTROL : \
+ (is_uv2_hub() ? UV2H_GR0_TLB_MMR_CONTROL : \
+ UV3H_GR0_TLB_MMR_CONTROL))
+
+#define UVH_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0
+#define UVH_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
+#define UVH_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
+#define UVH_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
+#define UVH_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
+#define UVH_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT 31
+#define UVH_GR0_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
+#define UVH_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
+#define UVH_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
+#define UVH_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
+#define UVH_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
+#define UVH_GR0_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
+
+#define UV1H_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0
+#define UV1H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
+#define UV1H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
+#define UV1H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
+#define UV1H_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
+#define UV1H_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT 31
+#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_CON_SHFT 48
+#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_SHFT 52
+#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBPGSIZE_SHFT 54
+#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRREG_SHFT 56
+#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBLRUV_SHFT 60
+#define UV1H_GR0_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
+#define UV1H_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
+#define UV1H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
+#define UV1H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
+#define UV1H_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
+#define UV1H_GR0_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
+#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_CON_MASK 0x0001000000000000UL
+#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_MASK 0x0010000000000000UL
+#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBPGSIZE_MASK 0x0040000000000000UL
+#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRREG_MASK 0x0100000000000000UL
+#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBLRUV_MASK 0x1000000000000000UL
+
+#define UVXH_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0
+#define UVXH_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
+#define UVXH_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
+#define UVXH_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
+#define UVXH_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
+#define UVXH_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT 31
+#define UVXH_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
+#define UVXH_GR0_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
+#define UVXH_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
+#define UVXH_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
+#define UVXH_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
+#define UVXH_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
+#define UVXH_GR0_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
+#define UVXH_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
+
+#define UV2H_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0
+#define UV2H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
+#define UV2H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
+#define UV2H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
+#define UV2H_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
+#define UV2H_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT 31
+#define UV2H_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
+#define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_CON_SHFT 48
+#define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_SHFT 52
+#define UV2H_GR0_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
+#define UV2H_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
+#define UV2H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
+#define UV2H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
+#define UV2H_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
+#define UV2H_GR0_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
+#define UV2H_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
+#define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_CON_MASK 0x0001000000000000UL
+#define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_MASK 0x0010000000000000UL
+
+#define UV3H_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0
+#define UV3H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
+#define UV3H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
+#define UV3H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
+#define UV3H_GR0_TLB_MMR_CONTROL_ECC_SEL_SHFT 21
+#define UV3H_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
+#define UV3H_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT 31
+#define UV3H_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
+#define UV3H_GR0_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
+#define UV3H_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
+#define UV3H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
+#define UV3H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
+#define UV3H_GR0_TLB_MMR_CONTROL_ECC_SEL_MASK 0x0000000000200000UL
+#define UV3H_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
+#define UV3H_GR0_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
+#define UV3H_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
+
+union uvh_gr0_tlb_mmr_control_u {
+ unsigned long v;
+ struct uvh_gr0_tlb_mmr_control_s {
+ unsigned long index:12; /* RW */
+ unsigned long mem_sel:2; /* RW */
+ unsigned long rsvd_14_15:2;
+ unsigned long auto_valid_en:1; /* RW */
+ unsigned long rsvd_17_19:3;
+ unsigned long mmr_hash_index_en:1; /* RW */
+ unsigned long rsvd_21_29:9;
+ unsigned long mmr_write:1; /* WP */
+ unsigned long mmr_read:1; /* WP */
+ unsigned long rsvd_32_48:17;
+ unsigned long rsvd_49_51:3;
+ unsigned long rsvd_52_63:12;
+ } s;
+ struct uv1h_gr0_tlb_mmr_control_s {
+ unsigned long index:12; /* RW */
+ unsigned long mem_sel:2; /* RW */
+ unsigned long rsvd_14_15:2;
+ unsigned long auto_valid_en:1; /* RW */
+ unsigned long rsvd_17_19:3;
+ unsigned long mmr_hash_index_en:1; /* RW */
+ unsigned long rsvd_21_29:9;
+ unsigned long mmr_write:1; /* WP */
+ unsigned long mmr_read:1; /* WP */
+ unsigned long rsvd_32_47:16;
+ unsigned long mmr_inj_con:1; /* RW */
+ unsigned long rsvd_49_51:3;
+ unsigned long mmr_inj_tlbram:1; /* RW */
+ unsigned long rsvd_53:1;
+ unsigned long mmr_inj_tlbpgsize:1; /* RW */
+ unsigned long rsvd_55:1;
+ unsigned long mmr_inj_tlbrreg:1; /* RW */
+ unsigned long rsvd_57_59:3;
+ unsigned long mmr_inj_tlblruv:1; /* RW */
+ unsigned long rsvd_61_63:3;
+ } s1;
+ struct uvxh_gr0_tlb_mmr_control_s {
+ unsigned long index:12; /* RW */
+ unsigned long mem_sel:2; /* RW */
+ unsigned long rsvd_14_15:2;
+ unsigned long auto_valid_en:1; /* RW */
+ unsigned long rsvd_17_19:3;
+ unsigned long mmr_hash_index_en:1; /* RW */
+ unsigned long rsvd_21_29:9;
+ unsigned long mmr_write:1; /* WP */
+ unsigned long mmr_read:1; /* WP */
+ unsigned long mmr_op_done:1; /* RW */
+ unsigned long rsvd_33_47:15;
+ unsigned long rsvd_48:1;
+ unsigned long rsvd_49_51:3;
+ unsigned long rsvd_52:1;
+ unsigned long rsvd_53_63:11;
+ } sx;
+ struct uv2h_gr0_tlb_mmr_control_s {
+ unsigned long index:12; /* RW */
+ unsigned long mem_sel:2; /* RW */
+ unsigned long rsvd_14_15:2;
+ unsigned long auto_valid_en:1; /* RW */
+ unsigned long rsvd_17_19:3;
+ unsigned long mmr_hash_index_en:1; /* RW */
+ unsigned long rsvd_21_29:9;
+ unsigned long mmr_write:1; /* WP */
+ unsigned long mmr_read:1; /* WP */
+ unsigned long mmr_op_done:1; /* RW */
+ unsigned long rsvd_33_47:15;
+ unsigned long mmr_inj_con:1; /* RW */
+ unsigned long rsvd_49_51:3;
+ unsigned long mmr_inj_tlbram:1; /* RW */
+ unsigned long rsvd_53_63:11;
+ } s2;
+ struct uv3h_gr0_tlb_mmr_control_s {
+ unsigned long index:12; /* RW */
+ unsigned long mem_sel:2; /* RW */
+ unsigned long rsvd_14_15:2;
+ unsigned long auto_valid_en:1; /* RW */
+ unsigned long rsvd_17_19:3;
+ unsigned long mmr_hash_index_en:1; /* RW */
+ unsigned long ecc_sel:1; /* RW */
+ unsigned long rsvd_22_29:8;
+ unsigned long mmr_write:1; /* WP */
+ unsigned long mmr_read:1; /* WP */
+ unsigned long mmr_op_done:1; /* RW */
+ unsigned long rsvd_33_47:15;
+ unsigned long undef_48:1; /* Undefined */
+ unsigned long rsvd_49_51:3;
+ unsigned long undef_52:1; /* Undefined */
+ unsigned long rsvd_53_63:11;
+ } s3;
+};
+
+/* ========================================================================= */
+/* UVH_GR0_TLB_MMR_READ_DATA_HI */
+/* ========================================================================= */
+#define UV1H_GR0_TLB_MMR_READ_DATA_HI 0x4010a0UL
+#define UV2H_GR0_TLB_MMR_READ_DATA_HI 0xc010a0UL
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI 0xc010a0UL
+#define UVH_GR0_TLB_MMR_READ_DATA_HI \
+ (is_uv1_hub() ? UV1H_GR0_TLB_MMR_READ_DATA_HI : \
+ (is_uv2_hub() ? UV2H_GR0_TLB_MMR_READ_DATA_HI : \
+ UV3H_GR0_TLB_MMR_READ_DATA_HI))
+
+#define UVH_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
+#define UVH_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
+#define UVH_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
+#define UVH_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
+#define UVH_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
+#define UVH_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
+#define UVH_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
+#define UVH_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
+
+#define UV1H_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
+#define UV1H_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
+#define UV1H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
+#define UV1H_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
+#define UV1H_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
+#define UV1H_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
+#define UV1H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
+#define UV1H_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
+
+#define UVXH_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
+#define UVXH_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
+#define UVXH_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
+#define UVXH_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
+#define UVXH_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
+#define UVXH_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
+#define UVXH_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
+#define UVXH_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
+
+#define UV2H_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
+#define UV2H_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
+#define UV2H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
+#define UV2H_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
+#define UV2H_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
+#define UV2H_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
+#define UV2H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
+#define UV2H_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
+
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_AA_EXT_SHFT 45
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_WAY_ECC_SHFT 55
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_AA_EXT_MASK 0x0000200000000000UL
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_WAY_ECC_MASK 0xff80000000000000UL
+
+union uvh_gr0_tlb_mmr_read_data_hi_u {
+ unsigned long v;
+ struct uvh_gr0_tlb_mmr_read_data_hi_s {
+ unsigned long pfn:41; /* RO */
+ unsigned long gaa:2; /* RO */
+ unsigned long dirty:1; /* RO */
+ unsigned long larger:1; /* RO */
+ unsigned long rsvd_45_63:19;
+ } s;
+ struct uv1h_gr0_tlb_mmr_read_data_hi_s {
+ unsigned long pfn:41; /* RO */
+ unsigned long gaa:2; /* RO */
+ unsigned long dirty:1; /* RO */
+ unsigned long larger:1; /* RO */
+ unsigned long rsvd_45_63:19;
+ } s1;
+ struct uvxh_gr0_tlb_mmr_read_data_hi_s {
+ unsigned long pfn:41; /* RO */
+ unsigned long gaa:2; /* RO */
+ unsigned long dirty:1; /* RO */
+ unsigned long larger:1; /* RO */
+ unsigned long rsvd_45_63:19;
+ } sx;
+ struct uv2h_gr0_tlb_mmr_read_data_hi_s {
+ unsigned long pfn:41; /* RO */
+ unsigned long gaa:2; /* RO */
+ unsigned long dirty:1; /* RO */
+ unsigned long larger:1; /* RO */
+ unsigned long rsvd_45_63:19;
+ } s2;
+ struct uv3h_gr0_tlb_mmr_read_data_hi_s {
+ unsigned long pfn:41; /* RO */
+ unsigned long gaa:2; /* RO */
+ unsigned long dirty:1; /* RO */
+ unsigned long larger:1; /* RO */
+ unsigned long aa_ext:1; /* RO */
+ unsigned long undef_46_54:9; /* Undefined */
+ unsigned long way_ecc:9; /* RO */
+ } s3;
+};
+
+/* ========================================================================= */
+/* UVH_GR0_TLB_MMR_READ_DATA_LO */
+/* ========================================================================= */
+#define UV1H_GR0_TLB_MMR_READ_DATA_LO 0x4010a8UL
+#define UV2H_GR0_TLB_MMR_READ_DATA_LO 0xc010a8UL
+#define UV3H_GR0_TLB_MMR_READ_DATA_LO 0xc010a8UL
+#define UVH_GR0_TLB_MMR_READ_DATA_LO \
+ (is_uv1_hub() ? UV1H_GR0_TLB_MMR_READ_DATA_LO : \
+ (is_uv2_hub() ? UV2H_GR0_TLB_MMR_READ_DATA_LO : \
+ UV3H_GR0_TLB_MMR_READ_DATA_LO))
+
+#define UVH_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
+#define UVH_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
+#define UVH_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
+#define UVH_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
+#define UVH_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
+#define UVH_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
+
+#define UV1H_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
+#define UV1H_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
+#define UV1H_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
+#define UV1H_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
+#define UV1H_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
+#define UV1H_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
+
+#define UVXH_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
+#define UVXH_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
+#define UVXH_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
+#define UVXH_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
+#define UVXH_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
+#define UVXH_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
+
+#define UV2H_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
+#define UV2H_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
+#define UV2H_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
+#define UV2H_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
+#define UV2H_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
+#define UV2H_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
+
+#define UV3H_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
+#define UV3H_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
+#define UV3H_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
+#define UV3H_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
+#define UV3H_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
+#define UV3H_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
+
+union uvh_gr0_tlb_mmr_read_data_lo_u {
+ unsigned long v;
+ struct uvh_gr0_tlb_mmr_read_data_lo_s {
+ unsigned long vpn:39; /* RO */
+ unsigned long asid:24; /* RO */
+ unsigned long valid:1; /* RO */
+ } s;
+ struct uv1h_gr0_tlb_mmr_read_data_lo_s {
+ unsigned long vpn:39; /* RO */
+ unsigned long asid:24; /* RO */
+ unsigned long valid:1; /* RO */
+ } s1;
+ struct uvxh_gr0_tlb_mmr_read_data_lo_s {
+ unsigned long vpn:39; /* RO */
+ unsigned long asid:24; /* RO */
+ unsigned long valid:1; /* RO */
+ } sx;
+ struct uv2h_gr0_tlb_mmr_read_data_lo_s {
+ unsigned long vpn:39; /* RO */
+ unsigned long asid:24; /* RO */
+ unsigned long valid:1; /* RO */
+ } s2;
+ struct uv3h_gr0_tlb_mmr_read_data_lo_s {
+ unsigned long vpn:39; /* RO */
+ unsigned long asid:24; /* RO */
+ unsigned long valid:1; /* RO */
+ } s3;
+};
+
+/* ========================================================================= */
+/* UVH_GR1_TLB_INT0_CONFIG */
+/* ========================================================================= */
+#define UVH_GR1_TLB_INT0_CONFIG 0x61f00UL
+
+#define UVH_GR1_TLB_INT0_CONFIG_VECTOR_SHFT 0
+#define UVH_GR1_TLB_INT0_CONFIG_DM_SHFT 8
+#define UVH_GR1_TLB_INT0_CONFIG_DESTMODE_SHFT 11
+#define UVH_GR1_TLB_INT0_CONFIG_STATUS_SHFT 12
+#define UVH_GR1_TLB_INT0_CONFIG_P_SHFT 13
+#define UVH_GR1_TLB_INT0_CONFIG_T_SHFT 15
+#define UVH_GR1_TLB_INT0_CONFIG_M_SHFT 16
+#define UVH_GR1_TLB_INT0_CONFIG_APIC_ID_SHFT 32
+#define UVH_GR1_TLB_INT0_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_GR1_TLB_INT0_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_GR1_TLB_INT0_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_GR1_TLB_INT0_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_GR1_TLB_INT0_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_GR1_TLB_INT0_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_GR1_TLB_INT0_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_GR1_TLB_INT0_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+
+union uvh_gr1_tlb_int0_config_u {
+ unsigned long v;
+ struct uvh_gr1_tlb_int0_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_GR1_TLB_INT1_CONFIG */
+/* ========================================================================= */
+#define UVH_GR1_TLB_INT1_CONFIG 0x61f40UL
+
+#define UVH_GR1_TLB_INT1_CONFIG_VECTOR_SHFT 0
+#define UVH_GR1_TLB_INT1_CONFIG_DM_SHFT 8
+#define UVH_GR1_TLB_INT1_CONFIG_DESTMODE_SHFT 11
+#define UVH_GR1_TLB_INT1_CONFIG_STATUS_SHFT 12
+#define UVH_GR1_TLB_INT1_CONFIG_P_SHFT 13
+#define UVH_GR1_TLB_INT1_CONFIG_T_SHFT 15
+#define UVH_GR1_TLB_INT1_CONFIG_M_SHFT 16
+#define UVH_GR1_TLB_INT1_CONFIG_APIC_ID_SHFT 32
+#define UVH_GR1_TLB_INT1_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_GR1_TLB_INT1_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_GR1_TLB_INT1_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_GR1_TLB_INT1_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_GR1_TLB_INT1_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_GR1_TLB_INT1_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_GR1_TLB_INT1_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_GR1_TLB_INT1_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+
+union uvh_gr1_tlb_int1_config_u {
+ unsigned long v;
+ struct uvh_gr1_tlb_int1_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_GR1_TLB_MMR_CONTROL */
+/* ========================================================================= */
+#define UV1H_GR1_TLB_MMR_CONTROL 0x801080UL
+#define UV2H_GR1_TLB_MMR_CONTROL 0x1001080UL
+#define UV3H_GR1_TLB_MMR_CONTROL 0x1001080UL
+#define UVH_GR1_TLB_MMR_CONTROL \
+ (is_uv1_hub() ? UV1H_GR1_TLB_MMR_CONTROL : \
+ (is_uv2_hub() ? UV2H_GR1_TLB_MMR_CONTROL : \
+ UV3H_GR1_TLB_MMR_CONTROL))
+
+#define UVH_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0
+#define UVH_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
+#define UVH_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
+#define UVH_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
+#define UVH_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
+#define UVH_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT 31
+#define UVH_GR1_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
+#define UVH_GR1_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
+#define UVH_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
+#define UVH_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
+#define UVH_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
+#define UVH_GR1_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
+
+#define UV1H_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0
+#define UV1H_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
+#define UV1H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
+#define UV1H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
+#define UV1H_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
+#define UV1H_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT 31
+#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_CON_SHFT 48
+#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_SHFT 52
+#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBPGSIZE_SHFT 54
+#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRREG_SHFT 56
+#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBLRUV_SHFT 60
+#define UV1H_GR1_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
+#define UV1H_GR1_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
+#define UV1H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
+#define UV1H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
+#define UV1H_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
+#define UV1H_GR1_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
+#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_CON_MASK 0x0001000000000000UL
+#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_MASK 0x0010000000000000UL
+#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBPGSIZE_MASK 0x0040000000000000UL
+#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRREG_MASK 0x0100000000000000UL
+#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBLRUV_MASK 0x1000000000000000UL
+
+#define UVXH_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0
+#define UVXH_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
+#define UVXH_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
+#define UVXH_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
+#define UVXH_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
+#define UVXH_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT 31
+#define UVXH_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
+#define UVXH_GR1_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
+#define UVXH_GR1_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
+#define UVXH_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
+#define UVXH_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
+#define UVXH_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
+#define UVXH_GR1_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
+#define UVXH_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
+
+#define UV2H_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0
+#define UV2H_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
+#define UV2H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
+#define UV2H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
+#define UV2H_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
+#define UV2H_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT 31
+#define UV2H_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
+#define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_CON_SHFT 48
+#define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_SHFT 52
+#define UV2H_GR1_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
+#define UV2H_GR1_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
+#define UV2H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
+#define UV2H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
+#define UV2H_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
+#define UV2H_GR1_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
+#define UV2H_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
+#define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_CON_MASK 0x0001000000000000UL
+#define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_MASK 0x0010000000000000UL
+
+#define UV3H_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0
+#define UV3H_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
+#define UV3H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
+#define UV3H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
+#define UV3H_GR1_TLB_MMR_CONTROL_ECC_SEL_SHFT 21
+#define UV3H_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
+#define UV3H_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT 31
+#define UV3H_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
+#define UV3H_GR1_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
+#define UV3H_GR1_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
+#define UV3H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
+#define UV3H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
+#define UV3H_GR1_TLB_MMR_CONTROL_ECC_SEL_MASK 0x0000000000200000UL
+#define UV3H_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
+#define UV3H_GR1_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
+#define UV3H_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
+
+union uvh_gr1_tlb_mmr_control_u {
+ unsigned long v;
+ struct uvh_gr1_tlb_mmr_control_s {
+ unsigned long index:12; /* RW */
+ unsigned long mem_sel:2; /* RW */
+ unsigned long rsvd_14_15:2;
+ unsigned long auto_valid_en:1; /* RW */
+ unsigned long rsvd_17_19:3;
+ unsigned long mmr_hash_index_en:1; /* RW */
+ unsigned long rsvd_21_29:9;
+ unsigned long mmr_write:1; /* WP */
+ unsigned long mmr_read:1; /* WP */
+ unsigned long rsvd_32_48:17;
+ unsigned long rsvd_49_51:3;
+ unsigned long rsvd_52_63:12;
+ } s;
+ struct uv1h_gr1_tlb_mmr_control_s {
+ unsigned long index:12; /* RW */
+ unsigned long mem_sel:2; /* RW */
+ unsigned long rsvd_14_15:2;
+ unsigned long auto_valid_en:1; /* RW */
+ unsigned long rsvd_17_19:3;
+ unsigned long mmr_hash_index_en:1; /* RW */
+ unsigned long rsvd_21_29:9;
+ unsigned long mmr_write:1; /* WP */
+ unsigned long mmr_read:1; /* WP */
+ unsigned long rsvd_32_47:16;
+ unsigned long mmr_inj_con:1; /* RW */
+ unsigned long rsvd_49_51:3;
+ unsigned long mmr_inj_tlbram:1; /* RW */
+ unsigned long rsvd_53:1;
+ unsigned long mmr_inj_tlbpgsize:1; /* RW */
+ unsigned long rsvd_55:1;
+ unsigned long mmr_inj_tlbrreg:1; /* RW */
+ unsigned long rsvd_57_59:3;
+ unsigned long mmr_inj_tlblruv:1; /* RW */
+ unsigned long rsvd_61_63:3;
+ } s1;
+ struct uvxh_gr1_tlb_mmr_control_s {
+ unsigned long index:12; /* RW */
+ unsigned long mem_sel:2; /* RW */
+ unsigned long rsvd_14_15:2;
+ unsigned long auto_valid_en:1; /* RW */
+ unsigned long rsvd_17_19:3;
+ unsigned long mmr_hash_index_en:1; /* RW */
+ unsigned long rsvd_21_29:9;
+ unsigned long mmr_write:1; /* WP */
+ unsigned long mmr_read:1; /* WP */
+ unsigned long mmr_op_done:1; /* RW */
+ unsigned long rsvd_33_47:15;
+ unsigned long rsvd_48:1;
+ unsigned long rsvd_49_51:3;
+ unsigned long rsvd_52:1;
+ unsigned long rsvd_53_63:11;
+ } sx;
+ struct uv2h_gr1_tlb_mmr_control_s {
+ unsigned long index:12; /* RW */
+ unsigned long mem_sel:2; /* RW */
+ unsigned long rsvd_14_15:2;
+ unsigned long auto_valid_en:1; /* RW */
+ unsigned long rsvd_17_19:3;
+ unsigned long mmr_hash_index_en:1; /* RW */
+ unsigned long rsvd_21_29:9;
+ unsigned long mmr_write:1; /* WP */
+ unsigned long mmr_read:1; /* WP */
+ unsigned long mmr_op_done:1; /* RW */
+ unsigned long rsvd_33_47:15;
+ unsigned long mmr_inj_con:1; /* RW */
+ unsigned long rsvd_49_51:3;
+ unsigned long mmr_inj_tlbram:1; /* RW */
+ unsigned long rsvd_53_63:11;
+ } s2;
+ struct uv3h_gr1_tlb_mmr_control_s {
+ unsigned long index:12; /* RW */
+ unsigned long mem_sel:2; /* RW */
+ unsigned long rsvd_14_15:2;
+ unsigned long auto_valid_en:1; /* RW */
+ unsigned long rsvd_17_19:3;
+ unsigned long mmr_hash_index_en:1; /* RW */
+ unsigned long ecc_sel:1; /* RW */
+ unsigned long rsvd_22_29:8;
+ unsigned long mmr_write:1; /* WP */
+ unsigned long mmr_read:1; /* WP */
+ unsigned long mmr_op_done:1; /* RW */
+ unsigned long rsvd_33_47:15;
+ unsigned long undef_48:1; /* Undefined */
+ unsigned long rsvd_49_51:3;
+ unsigned long undef_52:1; /* Undefined */
+ unsigned long rsvd_53_63:11;
+ } s3;
+};
+
+/* ========================================================================= */
+/* UVH_GR1_TLB_MMR_READ_DATA_HI */
+/* ========================================================================= */
+#define UV1H_GR1_TLB_MMR_READ_DATA_HI 0x8010a0UL
+#define UV2H_GR1_TLB_MMR_READ_DATA_HI 0x10010a0UL
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI 0x10010a0UL
+#define UVH_GR1_TLB_MMR_READ_DATA_HI \
+ (is_uv1_hub() ? UV1H_GR1_TLB_MMR_READ_DATA_HI : \
+ (is_uv2_hub() ? UV2H_GR1_TLB_MMR_READ_DATA_HI : \
+ UV3H_GR1_TLB_MMR_READ_DATA_HI))
+
+#define UVH_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
+#define UVH_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
+#define UVH_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
+#define UVH_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
+#define UVH_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
+#define UVH_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
+#define UVH_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
+#define UVH_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
+
+#define UV1H_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
+#define UV1H_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
+#define UV1H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
+#define UV1H_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
+#define UV1H_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
+#define UV1H_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
+#define UV1H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
+#define UV1H_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
+
+#define UVXH_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
+#define UVXH_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
+#define UVXH_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
+#define UVXH_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
+#define UVXH_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
+#define UVXH_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
+#define UVXH_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
+#define UVXH_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
+
+#define UV2H_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
+#define UV2H_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
+#define UV2H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
+#define UV2H_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
+#define UV2H_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
+#define UV2H_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
+#define UV2H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
+#define UV2H_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
+
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_AA_EXT_SHFT 45
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_WAY_ECC_SHFT 55
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_AA_EXT_MASK 0x0000200000000000UL
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_WAY_ECC_MASK 0xff80000000000000UL
+
+union uvh_gr1_tlb_mmr_read_data_hi_u {
+ unsigned long v;
+ struct uvh_gr1_tlb_mmr_read_data_hi_s {
+ unsigned long pfn:41; /* RO */
+ unsigned long gaa:2; /* RO */
+ unsigned long dirty:1; /* RO */
+ unsigned long larger:1; /* RO */
+ unsigned long rsvd_45_63:19;
+ } s;
+ struct uv1h_gr1_tlb_mmr_read_data_hi_s {
+ unsigned long pfn:41; /* RO */
+ unsigned long gaa:2; /* RO */
+ unsigned long dirty:1; /* RO */
+ unsigned long larger:1; /* RO */
+ unsigned long rsvd_45_63:19;
+ } s1;
+ struct uvxh_gr1_tlb_mmr_read_data_hi_s {
+ unsigned long pfn:41; /* RO */
+ unsigned long gaa:2; /* RO */
+ unsigned long dirty:1; /* RO */
+ unsigned long larger:1; /* RO */
+ unsigned long rsvd_45_63:19;
+ } sx;
+ struct uv2h_gr1_tlb_mmr_read_data_hi_s {
+ unsigned long pfn:41; /* RO */
+ unsigned long gaa:2; /* RO */
+ unsigned long dirty:1; /* RO */
+ unsigned long larger:1; /* RO */
+ unsigned long rsvd_45_63:19;
+ } s2;
+ struct uv3h_gr1_tlb_mmr_read_data_hi_s {
+ unsigned long pfn:41; /* RO */
+ unsigned long gaa:2; /* RO */
+ unsigned long dirty:1; /* RO */
+ unsigned long larger:1; /* RO */
+ unsigned long aa_ext:1; /* RO */
+ unsigned long undef_46_54:9; /* Undefined */
+ unsigned long way_ecc:9; /* RO */
+ } s3;
+};
+
+/* ========================================================================= */
+/* UVH_GR1_TLB_MMR_READ_DATA_LO */
+/* ========================================================================= */
+#define UV1H_GR1_TLB_MMR_READ_DATA_LO 0x8010a8UL
+#define UV2H_GR1_TLB_MMR_READ_DATA_LO 0x10010a8UL
+#define UV3H_GR1_TLB_MMR_READ_DATA_LO 0x10010a8UL
+#define UVH_GR1_TLB_MMR_READ_DATA_LO \
+ (is_uv1_hub() ? UV1H_GR1_TLB_MMR_READ_DATA_LO : \
+ (is_uv2_hub() ? UV2H_GR1_TLB_MMR_READ_DATA_LO : \
+ UV3H_GR1_TLB_MMR_READ_DATA_LO))
+
+#define UVH_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
+#define UVH_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
+#define UVH_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
+#define UVH_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
+#define UVH_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
+#define UVH_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
+
+#define UV1H_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
+#define UV1H_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
+#define UV1H_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
+#define UV1H_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
+#define UV1H_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
+#define UV1H_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
+
+#define UVXH_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
+#define UVXH_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
+#define UVXH_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
+#define UVXH_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
+#define UVXH_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
+#define UVXH_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
+
+#define UV2H_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
+#define UV2H_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
+#define UV2H_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
+#define UV2H_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
+#define UV2H_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
+#define UV2H_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
+
+#define UV3H_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
+#define UV3H_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
+#define UV3H_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
+#define UV3H_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
+#define UV3H_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
+#define UV3H_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
+
+union uvh_gr1_tlb_mmr_read_data_lo_u {
+ unsigned long v;
+ struct uvh_gr1_tlb_mmr_read_data_lo_s {
+ unsigned long vpn:39; /* RO */
+ unsigned long asid:24; /* RO */
+ unsigned long valid:1; /* RO */
+ } s;
+ struct uv1h_gr1_tlb_mmr_read_data_lo_s {
+ unsigned long vpn:39; /* RO */
+ unsigned long asid:24; /* RO */
+ unsigned long valid:1; /* RO */
+ } s1;
+ struct uvxh_gr1_tlb_mmr_read_data_lo_s {
+ unsigned long vpn:39; /* RO */
+ unsigned long asid:24; /* RO */
+ unsigned long valid:1; /* RO */
+ } sx;
+ struct uv2h_gr1_tlb_mmr_read_data_lo_s {
+ unsigned long vpn:39; /* RO */
+ unsigned long asid:24; /* RO */
+ unsigned long valid:1; /* RO */
+ } s2;
+ struct uv3h_gr1_tlb_mmr_read_data_lo_s {
+ unsigned long vpn:39; /* RO */
+ unsigned long asid:24; /* RO */
+ unsigned long valid:1; /* RO */
+ } s3;
+};
+
+/* ========================================================================= */
+/* UVH_INT_CMPB */
+/* ========================================================================= */
+#define UVH_INT_CMPB 0x22080UL
+
+#define UVH_INT_CMPB_REAL_TIME_CMPB_SHFT 0
+#define UVH_INT_CMPB_REAL_TIME_CMPB_MASK 0x00ffffffffffffffUL
+
+union uvh_int_cmpb_u {
+ unsigned long v;
+ struct uvh_int_cmpb_s {
+ unsigned long real_time_cmpb:56; /* RW */
+ unsigned long rsvd_56_63:8;
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_INT_CMPC */
+/* ========================================================================= */
+#define UVH_INT_CMPC 0x22100UL
+
+#define UV1H_INT_CMPC_REAL_TIME_CMPC_SHFT 0
+#define UV1H_INT_CMPC_REAL_TIME_CMPC_MASK 0x00ffffffffffffffUL
+
+#define UVXH_INT_CMPC_REAL_TIME_CMP_2_SHFT 0
+#define UVXH_INT_CMPC_REAL_TIME_CMP_2_MASK 0x00ffffffffffffffUL
+
+union uvh_int_cmpc_u {
+ unsigned long v;
+ struct uvh_int_cmpc_s {
+ unsigned long real_time_cmpc:56; /* RW */
+ unsigned long rsvd_56_63:8;
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_INT_CMPD */
+/* ========================================================================= */
+#define UVH_INT_CMPD 0x22180UL
+
+#define UV1H_INT_CMPD_REAL_TIME_CMPD_SHFT 0
+#define UV1H_INT_CMPD_REAL_TIME_CMPD_MASK 0x00ffffffffffffffUL
+
+#define UVXH_INT_CMPD_REAL_TIME_CMP_3_SHFT 0
+#define UVXH_INT_CMPD_REAL_TIME_CMP_3_MASK 0x00ffffffffffffffUL
+
+union uvh_int_cmpd_u {
+ unsigned long v;
+ struct uvh_int_cmpd_s {
+ unsigned long real_time_cmpd:56; /* RW */
+ unsigned long rsvd_56_63:8;
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_IPI_INT */
+/* ========================================================================= */
+#define UVH_IPI_INT 0x60500UL
+#define UVH_IPI_INT_32 0x348
+
+#define UVH_IPI_INT_VECTOR_SHFT 0
+#define UVH_IPI_INT_DELIVERY_MODE_SHFT 8
+#define UVH_IPI_INT_DESTMODE_SHFT 11
+#define UVH_IPI_INT_APIC_ID_SHFT 16
+#define UVH_IPI_INT_SEND_SHFT 63
+#define UVH_IPI_INT_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_IPI_INT_DELIVERY_MODE_MASK 0x0000000000000700UL
+#define UVH_IPI_INT_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_IPI_INT_APIC_ID_MASK 0x0000ffffffff0000UL
+#define UVH_IPI_INT_SEND_MASK 0x8000000000000000UL
+
+union uvh_ipi_int_u {
+ unsigned long v;
+ struct uvh_ipi_int_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long delivery_mode:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long rsvd_12_15:4;
+ unsigned long apic_id:32; /* RW */
+ unsigned long rsvd_48_62:15;
+ unsigned long send:1; /* WP */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST */
+/* ========================================================================= */
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_32 0x9c0
+
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_SHFT 4
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_SHFT 49
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_MASK 0x000007fffffffff0UL
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_MASK 0x7ffe000000000000UL
+
+union uvh_lb_bau_intd_payload_queue_first_u {
+ unsigned long v;
+ struct uvh_lb_bau_intd_payload_queue_first_s {
+ unsigned long rsvd_0_3:4;
+ unsigned long address:39; /* RW */
+ unsigned long rsvd_43_48:6;
+ unsigned long node_id:14; /* RW */
+ unsigned long rsvd_63:1;
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST */
+/* ========================================================================= */
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_32 0x9c8
+
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_SHFT 4
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_MASK 0x000007fffffffff0UL
+
+union uvh_lb_bau_intd_payload_queue_last_u {
+ unsigned long v;
+ struct uvh_lb_bau_intd_payload_queue_last_s {
+ unsigned long rsvd_0_3:4;
+ unsigned long address:39; /* RW */
+ unsigned long rsvd_43_63:21;
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL */
+/* ========================================================================= */
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_32 0x9d0
+
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_SHFT 4
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_MASK 0x000007fffffffff0UL
+
+union uvh_lb_bau_intd_payload_queue_tail_u {
+ unsigned long v;
+ struct uvh_lb_bau_intd_payload_queue_tail_s {
+ unsigned long rsvd_0_3:4;
+ unsigned long address:39; /* RW */
+ unsigned long rsvd_43_63:21;
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE */
+/* ========================================================================= */
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_32 0xa68
+
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_SHFT 1
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_SHFT 2
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_SHFT 3
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_SHFT 4
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_SHFT 5
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_SHFT 6
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_SHFT 7
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_SHFT 8
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_SHFT 9
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_SHFT 10
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_SHFT 11
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_SHFT 12
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_SHFT 13
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_SHFT 14
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_SHFT 15
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_MASK 0x0000000000000001UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_MASK 0x0000000000000002UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_2_MASK 0x0000000000000004UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_3_MASK 0x0000000000000008UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_4_MASK 0x0000000000000010UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_5_MASK 0x0000000000000020UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_6_MASK 0x0000000000000040UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_7_MASK 0x0000000000000080UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_0_MASK 0x0000000000000100UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_1_MASK 0x0000000000000200UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_2_MASK 0x0000000000000400UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_3_MASK 0x0000000000000800UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_4_MASK 0x0000000000001000UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_5_MASK 0x0000000000002000UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_6_MASK 0x0000000000004000UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_TIMEOUT_7_MASK 0x0000000000008000UL
+
+union uvh_lb_bau_intd_software_acknowledge_u {
+ unsigned long v;
+ struct uvh_lb_bau_intd_software_acknowledge_s {
+ unsigned long pending_0:1; /* RW, W1C */
+ unsigned long pending_1:1; /* RW, W1C */
+ unsigned long pending_2:1; /* RW, W1C */
+ unsigned long pending_3:1; /* RW, W1C */
+ unsigned long pending_4:1; /* RW, W1C */
+ unsigned long pending_5:1; /* RW, W1C */
+ unsigned long pending_6:1; /* RW, W1C */
+ unsigned long pending_7:1; /* RW, W1C */
+ unsigned long timeout_0:1; /* RW, W1C */
+ unsigned long timeout_1:1; /* RW, W1C */
+ unsigned long timeout_2:1; /* RW, W1C */
+ unsigned long timeout_3:1; /* RW, W1C */
+ unsigned long timeout_4:1; /* RW, W1C */
+ unsigned long timeout_5:1; /* RW, W1C */
+ unsigned long timeout_6:1; /* RW, W1C */
+ unsigned long timeout_7:1; /* RW, W1C */
+ unsigned long rsvd_16_63:48;
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS */
+/* ========================================================================= */
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x320088UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS_32 0xa70
+
+
+/* ========================================================================= */
+/* UVH_LB_BAU_MISC_CONTROL */
+/* ========================================================================= */
+#define UVH_LB_BAU_MISC_CONTROL 0x320170UL
+#define UV1H_LB_BAU_MISC_CONTROL 0x320170UL
+#define UV2H_LB_BAU_MISC_CONTROL 0x320170UL
+#define UV3H_LB_BAU_MISC_CONTROL 0x320170UL
+#define UVH_LB_BAU_MISC_CONTROL_32 0xa10
+#define UV1H_LB_BAU_MISC_CONTROL_32 0x320170UL
+#define UV2H_LB_BAU_MISC_CONTROL_32 0x320170UL
+#define UV3H_LB_BAU_MISC_CONTROL_32 0x320170UL
+
+#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
+#define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
+#define UVH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
+#define UVH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
+#define UVH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
+#define UVH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
+#define UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
+#define UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16
+#define UVH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
+#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
+#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
+#define UVH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
+#define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
+#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
+#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
+#define UVH_LB_BAU_MISC_CONTROL_FUN_SHFT 48
+#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
+#define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
+#define UVH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
+#define UVH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
+#define UVH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
+#define UVH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
+#define UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
+#define UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
+#define UVH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
+#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
+#define UVH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
+#define UVH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
+#define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
+#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
+#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
+#define UVH_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
+
+#define UV1H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
+#define UV1H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
+#define UV1H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
+#define UV1H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
+#define UV1H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
+#define UV1H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
+#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
+#define UV1H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16
+#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
+#define UV1H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
+#define UV1H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
+#define UV1H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
+#define UV1H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
+#define UV1H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
+#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
+#define UV1H_LB_BAU_MISC_CONTROL_FUN_SHFT 48
+#define UV1H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
+#define UV1H_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
+#define UV1H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
+#define UV1H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
+#define UV1H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
+#define UV1H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
+#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
+#define UV1H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
+#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
+#define UV1H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
+#define UV1H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
+#define UV1H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
+#define UV1H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
+#define UV1H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
+#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
+#define UV1H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
+
+#define UVXH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
+#define UVXH_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
+#define UVXH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
+#define UVXH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
+#define UVXH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
+#define UVXH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
+#define UVXH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
+#define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
+#define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
+#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
+#define UVXH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
+#define UVXH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_SHFT 29
+#define UVXH_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_SHFT 30
+#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_SHFT 31
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_SHFT 32
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT 33
+#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_SHFT 34
+#define UVXH_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT 35
+#define UVXH_LB_BAU_MISC_CONTROL_FUN_SHFT 48
+#define UVXH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
+#define UVXH_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
+#define UVXH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
+#define UVXH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
+#define UVXH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
+#define UVXH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
+#define UVXH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
+#define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
+#define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
+#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
+#define UVXH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_MASK 0x0000000020000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_MASK 0x0000000040000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_MASK 0x0000000080000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_MASK 0x0000000100000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_MASK 0x0000000200000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_MASK 0x0000000400000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
+
+#define UV2H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
+#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
+#define UV2H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
+#define UV2H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
+#define UV2H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
+#define UV2H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
+#define UV2H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
+#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
+#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
+#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
+#define UV2H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
+#define UV2H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_SHFT 29
+#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_SHFT 30
+#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_SHFT 31
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_SHFT 32
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT 33
+#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_SHFT 34
+#define UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT 35
+#define UV2H_LB_BAU_MISC_CONTROL_FUN_SHFT 48
+#define UV2H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
+#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
+#define UV2H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
+#define UV2H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
+#define UV2H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
+#define UV2H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
+#define UV2H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
+#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
+#define UV2H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
+#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
+#define UV2H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
+#define UV2H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_MASK 0x0000000020000000UL
+#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_MASK 0x0000000040000000UL
+#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_MASK 0x0000000080000000UL
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_MASK 0x0000000100000000UL
+#define UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_MASK 0x0000000200000000UL
+#define UV2H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_MASK 0x0000000400000000UL
+#define UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL
+#define UV2H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
+
+#define UV3H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
+#define UV3H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
+#define UV3H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
+#define UV3H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
+#define UV3H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
+#define UV3H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
+#define UV3H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
+#define UV3H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
+#define UV3H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
+#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
+#define UV3H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
+#define UV3H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_SHFT 29
+#define UV3H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_SHFT 30
+#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_SHFT 31
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_SHFT 32
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT 33
+#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_SHFT 34
+#define UV3H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT 35
+#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_QUIESCE_MSGS_TO_QPI_SHFT 36
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_PREFETCH_HINT_SHFT 37
+#define UV3H_LB_BAU_MISC_CONTROL_THREAD_KILL_TIMEBASE_SHFT 38
+#define UV3H_LB_BAU_MISC_CONTROL_FUN_SHFT 48
+#define UV3H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
+#define UV3H_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
+#define UV3H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
+#define UV3H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
+#define UV3H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
+#define UV3H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
+#define UV3H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
+#define UV3H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
+#define UV3H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
+#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
+#define UV3H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_MASK 0x0000000020000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_MASK 0x0000000040000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_MASK 0x0000000080000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_MASK 0x0000000100000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_MASK 0x0000000200000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_MASK 0x0000000400000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_QUIESCE_MSGS_TO_QPI_MASK 0x0000001000000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_PREFETCH_HINT_MASK 0x0000002000000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_THREAD_KILL_TIMEBASE_MASK 0x00003fc000000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
+
+union uvh_lb_bau_misc_control_u {
+ unsigned long v;
+ struct uvh_lb_bau_misc_control_s {
+ unsigned long rejection_delay:8; /* RW */
+ unsigned long apic_mode:1; /* RW */
+ unsigned long force_broadcast:1; /* RW */
+ unsigned long force_lock_nop:1; /* RW */
+ unsigned long qpi_agent_presence_vector:3; /* RW */
+ unsigned long descriptor_fetch_mode:1; /* RW */
+ unsigned long enable_intd_soft_ack_mode:1; /* RW */
+ unsigned long intd_soft_ack_timeout_period:4; /* RW */
+ unsigned long enable_dual_mapping_mode:1; /* RW */
+ unsigned long vga_io_port_decode_enable:1; /* RW */
+ unsigned long vga_io_port_16_bit_decode:1; /* RW */
+ unsigned long suppress_dest_registration:1; /* RW */
+ unsigned long programmed_initial_priority:3; /* RW */
+ unsigned long use_incoming_priority:1; /* RW */
+ unsigned long enable_programmed_initial_priority:1;/* RW */
+ unsigned long rsvd_29_47:19;
+ unsigned long fun:16; /* RW */
+ } s;
+ struct uv1h_lb_bau_misc_control_s {
+ unsigned long rejection_delay:8; /* RW */
+ unsigned long apic_mode:1; /* RW */
+ unsigned long force_broadcast:1; /* RW */
+ unsigned long force_lock_nop:1; /* RW */
+ unsigned long qpi_agent_presence_vector:3; /* RW */
+ unsigned long descriptor_fetch_mode:1; /* RW */
+ unsigned long enable_intd_soft_ack_mode:1; /* RW */
+ unsigned long intd_soft_ack_timeout_period:4; /* RW */
+ unsigned long enable_dual_mapping_mode:1; /* RW */
+ unsigned long vga_io_port_decode_enable:1; /* RW */
+ unsigned long vga_io_port_16_bit_decode:1; /* RW */
+ unsigned long suppress_dest_registration:1; /* RW */
+ unsigned long programmed_initial_priority:3; /* RW */
+ unsigned long use_incoming_priority:1; /* RW */
+ unsigned long enable_programmed_initial_priority:1;/* RW */
+ unsigned long rsvd_29_47:19;
+ unsigned long fun:16; /* RW */
+ } s1;
+ struct uvxh_lb_bau_misc_control_s {
+ unsigned long rejection_delay:8; /* RW */
+ unsigned long apic_mode:1; /* RW */
+ unsigned long force_broadcast:1; /* RW */
+ unsigned long force_lock_nop:1; /* RW */
+ unsigned long qpi_agent_presence_vector:3; /* RW */
+ unsigned long descriptor_fetch_mode:1; /* RW */
+ unsigned long enable_intd_soft_ack_mode:1; /* RW */
+ unsigned long intd_soft_ack_timeout_period:4; /* RW */
+ unsigned long enable_dual_mapping_mode:1; /* RW */
+ unsigned long vga_io_port_decode_enable:1; /* RW */
+ unsigned long vga_io_port_16_bit_decode:1; /* RW */
+ unsigned long suppress_dest_registration:1; /* RW */
+ unsigned long programmed_initial_priority:3; /* RW */
+ unsigned long use_incoming_priority:1; /* RW */
+ unsigned long enable_programmed_initial_priority:1;/* RW */
+ unsigned long enable_automatic_apic_mode_selection:1;/* RW */
+ unsigned long apic_mode_status:1; /* RO */
+ unsigned long suppress_interrupts_to_self:1; /* RW */
+ unsigned long enable_lock_based_system_flush:1;/* RW */
+ unsigned long enable_extended_sb_status:1; /* RW */
+ unsigned long suppress_int_prio_udt_to_self:1;/* RW */
+ unsigned long use_legacy_descriptor_formats:1;/* RW */
+ unsigned long rsvd_36_47:12;
+ unsigned long fun:16; /* RW */
+ } sx;
+ struct uv2h_lb_bau_misc_control_s {
+ unsigned long rejection_delay:8; /* RW */
+ unsigned long apic_mode:1; /* RW */
+ unsigned long force_broadcast:1; /* RW */
+ unsigned long force_lock_nop:1; /* RW */
+ unsigned long qpi_agent_presence_vector:3; /* RW */
+ unsigned long descriptor_fetch_mode:1; /* RW */
+ unsigned long enable_intd_soft_ack_mode:1; /* RW */
+ unsigned long intd_soft_ack_timeout_period:4; /* RW */
+ unsigned long enable_dual_mapping_mode:1; /* RW */
+ unsigned long vga_io_port_decode_enable:1; /* RW */
+ unsigned long vga_io_port_16_bit_decode:1; /* RW */
+ unsigned long suppress_dest_registration:1; /* RW */
+ unsigned long programmed_initial_priority:3; /* RW */
+ unsigned long use_incoming_priority:1; /* RW */
+ unsigned long enable_programmed_initial_priority:1;/* RW */
+ unsigned long enable_automatic_apic_mode_selection:1;/* RW */
+ unsigned long apic_mode_status:1; /* RO */
+ unsigned long suppress_interrupts_to_self:1; /* RW */
+ unsigned long enable_lock_based_system_flush:1;/* RW */
+ unsigned long enable_extended_sb_status:1; /* RW */
+ unsigned long suppress_int_prio_udt_to_self:1;/* RW */
+ unsigned long use_legacy_descriptor_formats:1;/* RW */
+ unsigned long rsvd_36_47:12;
+ unsigned long fun:16; /* RW */
+ } s2;
+ struct uv3h_lb_bau_misc_control_s {
+ unsigned long rejection_delay:8; /* RW */
+ unsigned long apic_mode:1; /* RW */
+ unsigned long force_broadcast:1; /* RW */
+ unsigned long force_lock_nop:1; /* RW */
+ unsigned long qpi_agent_presence_vector:3; /* RW */
+ unsigned long descriptor_fetch_mode:1; /* RW */
+ unsigned long enable_intd_soft_ack_mode:1; /* RW */
+ unsigned long intd_soft_ack_timeout_period:4; /* RW */
+ unsigned long enable_dual_mapping_mode:1; /* RW */
+ unsigned long vga_io_port_decode_enable:1; /* RW */
+ unsigned long vga_io_port_16_bit_decode:1; /* RW */
+ unsigned long suppress_dest_registration:1; /* RW */
+ unsigned long programmed_initial_priority:3; /* RW */
+ unsigned long use_incoming_priority:1; /* RW */
+ unsigned long enable_programmed_initial_priority:1;/* RW */
+ unsigned long enable_automatic_apic_mode_selection:1;/* RW */
+ unsigned long apic_mode_status:1; /* RO */
+ unsigned long suppress_interrupts_to_self:1; /* RW */
+ unsigned long enable_lock_based_system_flush:1;/* RW */
+ unsigned long enable_extended_sb_status:1; /* RW */
+ unsigned long suppress_int_prio_udt_to_self:1;/* RW */
+ unsigned long use_legacy_descriptor_formats:1;/* RW */
+ unsigned long suppress_quiesce_msgs_to_qpi:1; /* RW */
+ unsigned long enable_intd_prefetch_hint:1; /* RW */
+ unsigned long thread_kill_timebase:8; /* RW */
+ unsigned long rsvd_46_47:2;
+ unsigned long fun:16; /* RW */
+ } s3;
+};
+
+/* ========================================================================= */
+/* UVH_LB_BAU_SB_ACTIVATION_CONTROL */
+/* ========================================================================= */
+#define UVH_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL
+#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_32 0x9a8
+
+#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_SHFT 0
+#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT 62
+#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INIT_SHFT 63
+#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_MASK 0x000000000000003fUL
+#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_MASK 0x4000000000000000UL
+#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INIT_MASK 0x8000000000000000UL
+
+union uvh_lb_bau_sb_activation_control_u {
+ unsigned long v;
+ struct uvh_lb_bau_sb_activation_control_s {
+ unsigned long index:6; /* RW */
+ unsigned long rsvd_6_61:56;
+ unsigned long push:1; /* WP */
+ unsigned long init:1; /* WP */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_LB_BAU_SB_ACTIVATION_STATUS_0 */
+/* ========================================================================= */
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x9b0
+
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_SHFT 0
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_MASK 0xffffffffffffffffUL
+
+union uvh_lb_bau_sb_activation_status_0_u {
+ unsigned long v;
+ struct uvh_lb_bau_sb_activation_status_0_s {
+ unsigned long status:64; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_LB_BAU_SB_ACTIVATION_STATUS_1 */
+/* ========================================================================= */
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x9b8
+
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_SHFT 0
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_MASK 0xffffffffffffffffUL
+
+union uvh_lb_bau_sb_activation_status_1_u {
+ unsigned long v;
+ struct uvh_lb_bau_sb_activation_status_1_s {
+ unsigned long status:64; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_LB_BAU_SB_DESCRIPTOR_BASE */
+/* ========================================================================= */
+#define UVH_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL
+#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_32 0x9a0
+
+#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_SHFT 12
+#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 49
+#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x000007fffffff000UL
+#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK 0x7ffe000000000000UL
+
+union uvh_lb_bau_sb_descriptor_base_u {
+ unsigned long v;
+ struct uvh_lb_bau_sb_descriptor_base_s {
+ unsigned long rsvd_0_11:12;
+ unsigned long page_address:31; /* RW */
+ unsigned long rsvd_43_48:6;
+ unsigned long node_id:14; /* RW */
+ unsigned long rsvd_63:1;
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_NODE_ID */
+/* ========================================================================= */
+#define UVH_NODE_ID 0x0UL
+#define UV1H_NODE_ID 0x0UL
+#define UV2H_NODE_ID 0x0UL
+#define UV3H_NODE_ID 0x0UL
+
+#define UVH_NODE_ID_FORCE1_SHFT 0
+#define UVH_NODE_ID_MANUFACTURER_SHFT 1
+#define UVH_NODE_ID_PART_NUMBER_SHFT 12
+#define UVH_NODE_ID_REVISION_SHFT 28
+#define UVH_NODE_ID_NODE_ID_SHFT 32
+#define UVH_NODE_ID_FORCE1_MASK 0x0000000000000001UL
+#define UVH_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
+#define UVH_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
+#define UVH_NODE_ID_REVISION_MASK 0x00000000f0000000UL
+#define UVH_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
+
+#define UV1H_NODE_ID_FORCE1_SHFT 0
+#define UV1H_NODE_ID_MANUFACTURER_SHFT 1
+#define UV1H_NODE_ID_PART_NUMBER_SHFT 12
+#define UV1H_NODE_ID_REVISION_SHFT 28
+#define UV1H_NODE_ID_NODE_ID_SHFT 32
+#define UV1H_NODE_ID_NODES_PER_BIT_SHFT 48
+#define UV1H_NODE_ID_NI_PORT_SHFT 56
+#define UV1H_NODE_ID_FORCE1_MASK 0x0000000000000001UL
+#define UV1H_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
+#define UV1H_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
+#define UV1H_NODE_ID_REVISION_MASK 0x00000000f0000000UL
+#define UV1H_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
+#define UV1H_NODE_ID_NODES_PER_BIT_MASK 0x007f000000000000UL
+#define UV1H_NODE_ID_NI_PORT_MASK 0x0f00000000000000UL
+
+#define UVXH_NODE_ID_FORCE1_SHFT 0
+#define UVXH_NODE_ID_MANUFACTURER_SHFT 1
+#define UVXH_NODE_ID_PART_NUMBER_SHFT 12
+#define UVXH_NODE_ID_REVISION_SHFT 28
+#define UVXH_NODE_ID_NODE_ID_SHFT 32
+#define UVXH_NODE_ID_NODES_PER_BIT_SHFT 50
+#define UVXH_NODE_ID_NI_PORT_SHFT 57
+#define UVXH_NODE_ID_FORCE1_MASK 0x0000000000000001UL
+#define UVXH_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
+#define UVXH_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
+#define UVXH_NODE_ID_REVISION_MASK 0x00000000f0000000UL
+#define UVXH_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
+#define UVXH_NODE_ID_NODES_PER_BIT_MASK 0x01fc000000000000UL
+#define UVXH_NODE_ID_NI_PORT_MASK 0x3e00000000000000UL
+
+#define UV2H_NODE_ID_FORCE1_SHFT 0
+#define UV2H_NODE_ID_MANUFACTURER_SHFT 1
+#define UV2H_NODE_ID_PART_NUMBER_SHFT 12
+#define UV2H_NODE_ID_REVISION_SHFT 28
+#define UV2H_NODE_ID_NODE_ID_SHFT 32
+#define UV2H_NODE_ID_NODES_PER_BIT_SHFT 50
+#define UV2H_NODE_ID_NI_PORT_SHFT 57
+#define UV2H_NODE_ID_FORCE1_MASK 0x0000000000000001UL
+#define UV2H_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
+#define UV2H_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
+#define UV2H_NODE_ID_REVISION_MASK 0x00000000f0000000UL
+#define UV2H_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
+#define UV2H_NODE_ID_NODES_PER_BIT_MASK 0x01fc000000000000UL
+#define UV2H_NODE_ID_NI_PORT_MASK 0x3e00000000000000UL
+
+#define UV3H_NODE_ID_FORCE1_SHFT 0
+#define UV3H_NODE_ID_MANUFACTURER_SHFT 1
+#define UV3H_NODE_ID_PART_NUMBER_SHFT 12
+#define UV3H_NODE_ID_REVISION_SHFT 28
+#define UV3H_NODE_ID_NODE_ID_SHFT 32
+#define UV3H_NODE_ID_ROUTER_SELECT_SHFT 48
+#define UV3H_NODE_ID_RESERVED_2_SHFT 49
+#define UV3H_NODE_ID_NODES_PER_BIT_SHFT 50
+#define UV3H_NODE_ID_NI_PORT_SHFT 57
+#define UV3H_NODE_ID_FORCE1_MASK 0x0000000000000001UL
+#define UV3H_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
+#define UV3H_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
+#define UV3H_NODE_ID_REVISION_MASK 0x00000000f0000000UL
+#define UV3H_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
+#define UV3H_NODE_ID_ROUTER_SELECT_MASK 0x0001000000000000UL
+#define UV3H_NODE_ID_RESERVED_2_MASK 0x0002000000000000UL
+#define UV3H_NODE_ID_NODES_PER_BIT_MASK 0x01fc000000000000UL
+#define UV3H_NODE_ID_NI_PORT_MASK 0x3e00000000000000UL
+
+union uvh_node_id_u {
+ unsigned long v;
+ struct uvh_node_id_s {
+ unsigned long force1:1; /* RO */
+ unsigned long manufacturer:11; /* RO */
+ unsigned long part_number:16; /* RO */
+ unsigned long revision:4; /* RO */
+ unsigned long node_id:15; /* RW */
+ unsigned long rsvd_47_63:17;
+ } s;
+ struct uv1h_node_id_s {
+ unsigned long force1:1; /* RO */
+ unsigned long manufacturer:11; /* RO */
+ unsigned long part_number:16; /* RO */
+ unsigned long revision:4; /* RO */
+ unsigned long node_id:15; /* RW */
+ unsigned long rsvd_47:1;
+ unsigned long nodes_per_bit:7; /* RW */
+ unsigned long rsvd_55:1;
+ unsigned long ni_port:4; /* RO */
+ unsigned long rsvd_60_63:4;
+ } s1;
+ struct uvxh_node_id_s {
+ unsigned long force1:1; /* RO */
+ unsigned long manufacturer:11; /* RO */
+ unsigned long part_number:16; /* RO */
+ unsigned long revision:4; /* RO */
+ unsigned long node_id:15; /* RW */
+ unsigned long rsvd_47_49:3;
+ unsigned long nodes_per_bit:7; /* RO */
+ unsigned long ni_port:5; /* RO */
+ unsigned long rsvd_62_63:2;
+ } sx;
+ struct uv2h_node_id_s {
+ unsigned long force1:1; /* RO */
+ unsigned long manufacturer:11; /* RO */
+ unsigned long part_number:16; /* RO */
+ unsigned long revision:4; /* RO */
+ unsigned long node_id:15; /* RW */
+ unsigned long rsvd_47_49:3;
+ unsigned long nodes_per_bit:7; /* RO */
+ unsigned long ni_port:5; /* RO */
+ unsigned long rsvd_62_63:2;
+ } s2;
+ struct uv3h_node_id_s {
+ unsigned long force1:1; /* RO */
+ unsigned long manufacturer:11; /* RO */
+ unsigned long part_number:16; /* RO */
+ unsigned long revision:4; /* RO */
+ unsigned long node_id:15; /* RW */
+ unsigned long rsvd_47:1;
+ unsigned long router_select:1; /* RO */
+ unsigned long rsvd_49:1;
+ unsigned long nodes_per_bit:7; /* RO */
+ unsigned long ni_port:5; /* RO */
+ unsigned long rsvd_62_63:2;
+ } s3;
+};
+
+/* ========================================================================= */
+/* UVH_NODE_PRESENT_TABLE */
+/* ========================================================================= */
+#define UVH_NODE_PRESENT_TABLE 0x1400UL
+#define UVH_NODE_PRESENT_TABLE_DEPTH 16
+
+#define UVH_NODE_PRESENT_TABLE_NODES_SHFT 0
+#define UVH_NODE_PRESENT_TABLE_NODES_MASK 0xffffffffffffffffUL
+
+union uvh_node_present_table_u {
+ unsigned long v;
+ struct uvh_node_present_table_s {
+ unsigned long nodes:64; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR */
+/* ========================================================================= */
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL
+
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL
+
+union uvh_rh_gam_alias210_overlay_config_0_mmr_u {
+ unsigned long v;
+ struct uvh_rh_gam_alias210_overlay_config_0_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR */
+/* ========================================================================= */
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL
+
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL
+
+union uvh_rh_gam_alias210_overlay_config_1_mmr_u {
+ unsigned long v;
+ struct uvh_rh_gam_alias210_overlay_config_1_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR */
+/* ========================================================================= */
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL
+
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL
+
+union uvh_rh_gam_alias210_overlay_config_2_mmr_u {
+ unsigned long v;
+ struct uvh_rh_gam_alias210_overlay_config_2_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR */
+/* ========================================================================= */
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL
+
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
+union uvh_rh_gam_alias210_redirect_config_0_mmr_u {
+ unsigned long v;
+ struct uvh_rh_gam_alias210_redirect_config_0_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR */
+/* ========================================================================= */
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x16000e0UL
+
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
+union uvh_rh_gam_alias210_redirect_config_1_mmr_u {
+ unsigned long v;
+ struct uvh_rh_gam_alias210_redirect_config_1_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR */
+/* ========================================================================= */
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x16000f0UL
+
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
+union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
+ unsigned long v;
+ struct uvh_rh_gam_alias210_redirect_config_2_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_RH_GAM_CONFIG_MMR */
+/* ========================================================================= */
+#define UVH_RH_GAM_CONFIG_MMR 0x1600000UL
+#define UV1H_RH_GAM_CONFIG_MMR 0x1600000UL
+#define UV2H_RH_GAM_CONFIG_MMR 0x1600000UL
+#define UV3H_RH_GAM_CONFIG_MMR 0x1600000UL
+
+#define UVH_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
+#define UVH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
+#define UVH_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
+#define UVH_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
+
+#define UV1H_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
+#define UV1H_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
+#define UV1H_RH_GAM_CONFIG_MMR_MMIOL_CFG_SHFT 12
+#define UV1H_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
+#define UV1H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
+#define UV1H_RH_GAM_CONFIG_MMR_MMIOL_CFG_MASK 0x0000000000001000UL
+
+#define UVXH_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
+#define UVXH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
+#define UVXH_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
+#define UVXH_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
+
+#define UV2H_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
+#define UV2H_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
+#define UV2H_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
+#define UV2H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
+
+#define UV3H_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
+#define UV3H_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
+#define UV3H_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
+#define UV3H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
+
+union uvh_rh_gam_config_mmr_u {
+ unsigned long v;
+ struct uvh_rh_gam_config_mmr_s {
+ unsigned long m_skt:6; /* RW */
+ unsigned long n_skt:4; /* RW */
+ unsigned long rsvd_10_63:54;
+ } s;
+ struct uv1h_rh_gam_config_mmr_s {
+ unsigned long m_skt:6; /* RW */
+ unsigned long n_skt:4; /* RW */
+ unsigned long rsvd_10_11:2;
+ unsigned long mmiol_cfg:1; /* RW */
+ unsigned long rsvd_13_63:51;
+ } s1;
+ struct uvxh_rh_gam_config_mmr_s {
+ unsigned long m_skt:6; /* RW */
+ unsigned long n_skt:4; /* RW */
+ unsigned long rsvd_10_63:54;
+ } sx;
+ struct uv2h_rh_gam_config_mmr_s {
+ unsigned long m_skt:6; /* RW */
+ unsigned long n_skt:4; /* RW */
+ unsigned long rsvd_10_63:54;
+ } s2;
+ struct uv3h_rh_gam_config_mmr_s {
+ unsigned long m_skt:6; /* RW */
+ unsigned long n_skt:4; /* RW */
+ unsigned long rsvd_10_63:54;
+ } s3;
+};
+
+/* ========================================================================= */
+/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */
+/* ========================================================================= */
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
+#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
+#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
+
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
+#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_SHFT 48
+#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
+#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
+#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_MASK 0x0001000000000000UL
+#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
+#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
+#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
+#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
+#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
+#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
+#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
+#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
+#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
+#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_MODE_SHFT 62
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_MODE_MASK 0x4000000000000000UL
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
+union uvh_rh_gam_gru_overlay_config_mmr_u {
+ unsigned long v;
+ struct uvh_rh_gam_gru_overlay_config_mmr_s {
+ unsigned long rsvd_0_27:28;
+ unsigned long base:18; /* RW */
+ unsigned long rsvd_46_51:6;
+ unsigned long n_gru:4; /* RW */
+ unsigned long rsvd_56_62:7;
+ unsigned long enable:1; /* RW */
+ } s;
+ struct uv1h_rh_gam_gru_overlay_config_mmr_s {
+ unsigned long rsvd_0_27:28;
+ unsigned long base:18; /* RW */
+ unsigned long rsvd_46_47:2;
+ unsigned long gr4:1; /* RW */
+ unsigned long rsvd_49_51:3;
+ unsigned long n_gru:4; /* RW */
+ unsigned long rsvd_56_62:7;
+ unsigned long enable:1; /* RW */
+ } s1;
+ struct uvxh_rh_gam_gru_overlay_config_mmr_s {
+ unsigned long rsvd_0_27:28;
+ unsigned long base:18; /* RW */
+ unsigned long rsvd_46_51:6;
+ unsigned long n_gru:4; /* RW */
+ unsigned long rsvd_56_62:7;
+ unsigned long enable:1; /* RW */
+ } sx;
+ struct uv2h_rh_gam_gru_overlay_config_mmr_s {
+ unsigned long rsvd_0_27:28;
+ unsigned long base:18; /* RW */
+ unsigned long rsvd_46_51:6;
+ unsigned long n_gru:4; /* RW */
+ unsigned long rsvd_56_62:7;
+ unsigned long enable:1; /* RW */
+ } s2;
+ struct uv3h_rh_gam_gru_overlay_config_mmr_s {
+ unsigned long rsvd_0_27:28;
+ unsigned long base:18; /* RW */
+ unsigned long rsvd_46_51:6;
+ unsigned long n_gru:4; /* RW */
+ unsigned long rsvd_56_61:6;
+ unsigned long mode:1; /* RW */
+ unsigned long enable:1; /* RW */
+ } s3;
+};
+
+/* ========================================================================= */
+/* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR */
+/* ========================================================================= */
+#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR 0x1600030UL
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR 0x1600030UL
+
+#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT 30
+#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_SHFT 46
+#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_SHFT 52
+#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003fffc0000000UL
+#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_MASK 0x000fc00000000000UL
+#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_MASK 0x00f0000000000000UL
+#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT 27
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_SHFT 46
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_SHFT 52
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff8000000UL
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_MASK 0x000fc00000000000UL
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_N_IO_MASK 0x00f0000000000000UL
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
+union uvh_rh_gam_mmioh_overlay_config_mmr_u {
+ unsigned long v;
+ struct uv1h_rh_gam_mmioh_overlay_config_mmr_s {
+ unsigned long rsvd_0_29:30;
+ unsigned long base:16; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4; /* RW */
+ unsigned long rsvd_56_62:7;
+ unsigned long enable:1; /* RW */
+ } s1;
+ struct uv2h_rh_gam_mmioh_overlay_config_mmr_s {
+ unsigned long rsvd_0_26:27;
+ unsigned long base:19; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4; /* RW */
+ unsigned long rsvd_56_62:7;
+ unsigned long enable:1; /* RW */
+ } s2;
+};
+
+/* ========================================================================= */
+/* UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR */
+/* ========================================================================= */
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
+#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
+#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
+#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
+
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
+#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_SHFT 46
+#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
+#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_MASK 0x0000400000000000UL
+#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
+#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
+#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
+#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
+#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
+#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
+#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
+union uvh_rh_gam_mmr_overlay_config_mmr_u {
+ unsigned long v;
+ struct uvh_rh_gam_mmr_overlay_config_mmr_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:20; /* RW */
+ unsigned long rsvd_46_62:17;
+ unsigned long enable:1; /* RW */
+ } s;
+ struct uv1h_rh_gam_mmr_overlay_config_mmr_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:20; /* RW */
+ unsigned long dual_hub:1; /* RW */
+ unsigned long rsvd_47_62:16;
+ unsigned long enable:1; /* RW */
+ } s1;
+ struct uvxh_rh_gam_mmr_overlay_config_mmr_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:20; /* RW */
+ unsigned long rsvd_46_62:17;
+ unsigned long enable:1; /* RW */
+ } sx;
+ struct uv2h_rh_gam_mmr_overlay_config_mmr_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:20; /* RW */
+ unsigned long rsvd_46_62:17;
+ unsigned long enable:1; /* RW */
+ } s2;
+ struct uv3h_rh_gam_mmr_overlay_config_mmr_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:20; /* RW */
+ unsigned long rsvd_46_62:17;
+ unsigned long enable:1; /* RW */
+ } s3;
+};
+
+/* ========================================================================= */
+/* UVH_RTC */
+/* ========================================================================= */
+#define UVH_RTC 0x340000UL
+
+#define UVH_RTC_REAL_TIME_CLOCK_SHFT 0
+#define UVH_RTC_REAL_TIME_CLOCK_MASK 0x00ffffffffffffffUL
+
+union uvh_rtc_u {
+ unsigned long v;
+ struct uvh_rtc_s {
+ unsigned long real_time_clock:56; /* RW */
+ unsigned long rsvd_56_63:8;
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_RTC1_INT_CONFIG */
+/* ========================================================================= */
+#define UVH_RTC1_INT_CONFIG 0x615c0UL
+
+#define UVH_RTC1_INT_CONFIG_VECTOR_SHFT 0
+#define UVH_RTC1_INT_CONFIG_DM_SHFT 8
+#define UVH_RTC1_INT_CONFIG_DESTMODE_SHFT 11
+#define UVH_RTC1_INT_CONFIG_STATUS_SHFT 12
+#define UVH_RTC1_INT_CONFIG_P_SHFT 13
+#define UVH_RTC1_INT_CONFIG_T_SHFT 15
+#define UVH_RTC1_INT_CONFIG_M_SHFT 16
+#define UVH_RTC1_INT_CONFIG_APIC_ID_SHFT 32
+#define UVH_RTC1_INT_CONFIG_VECTOR_MASK 0x00000000000000ffUL
+#define UVH_RTC1_INT_CONFIG_DM_MASK 0x0000000000000700UL
+#define UVH_RTC1_INT_CONFIG_DESTMODE_MASK 0x0000000000000800UL
+#define UVH_RTC1_INT_CONFIG_STATUS_MASK 0x0000000000001000UL
+#define UVH_RTC1_INT_CONFIG_P_MASK 0x0000000000002000UL
+#define UVH_RTC1_INT_CONFIG_T_MASK 0x0000000000008000UL
+#define UVH_RTC1_INT_CONFIG_M_MASK 0x0000000000010000UL
+#define UVH_RTC1_INT_CONFIG_APIC_ID_MASK 0xffffffff00000000UL
+
+union uvh_rtc1_int_config_u {
+ unsigned long v;
+ struct uvh_rtc1_int_config_s {
+ unsigned long vector_:8; /* RW */
+ unsigned long dm:3; /* RW */
+ unsigned long destmode:1; /* RW */
+ unsigned long status:1; /* RO */
+ unsigned long p:1; /* RO */
+ unsigned long rsvd_14:1;
+ unsigned long t:1; /* RO */
+ unsigned long m:1; /* RW */
+ unsigned long rsvd_17_31:15;
+ unsigned long apic_id:32; /* RW */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_SCRATCH5 */
+/* ========================================================================= */
+#define UVH_SCRATCH5 0x2d0200UL
+#define UVH_SCRATCH5_32 0x778
+
+#define UVH_SCRATCH5_SCRATCH5_SHFT 0
+#define UVH_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL
+
+union uvh_scratch5_u {
+ unsigned long v;
+ struct uvh_scratch5_s {
+ unsigned long scratch5:64; /* RW, W1CS */
+ } s;
+};
+
+/* ========================================================================= */
+/* UVH_SCRATCH5_ALIAS */
+/* ========================================================================= */
+#define UVH_SCRATCH5_ALIAS 0x2d0208UL
+#define UVH_SCRATCH5_ALIAS_32 0x780
+
+
+/* ========================================================================= */
+/* UVH_SCRATCH5_ALIAS_2 */
+/* ========================================================================= */
+#define UVH_SCRATCH5_ALIAS_2 0x2d0210UL
+#define UVH_SCRATCH5_ALIAS_2_32 0x788
+
+
+/* ========================================================================= */
+/* UVXH_EVENT_OCCURRED2 */
+/* ========================================================================= */
+#define UVXH_EVENT_OCCURRED2 0x70100UL
+#define UVXH_EVENT_OCCURRED2_32 0xb68
+
+#define UVXH_EVENT_OCCURRED2_RTC_0_SHFT 0
+#define UVXH_EVENT_OCCURRED2_RTC_1_SHFT 1
+#define UVXH_EVENT_OCCURRED2_RTC_2_SHFT 2
+#define UVXH_EVENT_OCCURRED2_RTC_3_SHFT 3
+#define UVXH_EVENT_OCCURRED2_RTC_4_SHFT 4
+#define UVXH_EVENT_OCCURRED2_RTC_5_SHFT 5
+#define UVXH_EVENT_OCCURRED2_RTC_6_SHFT 6
+#define UVXH_EVENT_OCCURRED2_RTC_7_SHFT 7
+#define UVXH_EVENT_OCCURRED2_RTC_8_SHFT 8
+#define UVXH_EVENT_OCCURRED2_RTC_9_SHFT 9
+#define UVXH_EVENT_OCCURRED2_RTC_10_SHFT 10
+#define UVXH_EVENT_OCCURRED2_RTC_11_SHFT 11
+#define UVXH_EVENT_OCCURRED2_RTC_12_SHFT 12
+#define UVXH_EVENT_OCCURRED2_RTC_13_SHFT 13
+#define UVXH_EVENT_OCCURRED2_RTC_14_SHFT 14
+#define UVXH_EVENT_OCCURRED2_RTC_15_SHFT 15
+#define UVXH_EVENT_OCCURRED2_RTC_16_SHFT 16
+#define UVXH_EVENT_OCCURRED2_RTC_17_SHFT 17
+#define UVXH_EVENT_OCCURRED2_RTC_18_SHFT 18
+#define UVXH_EVENT_OCCURRED2_RTC_19_SHFT 19
+#define UVXH_EVENT_OCCURRED2_RTC_20_SHFT 20
+#define UVXH_EVENT_OCCURRED2_RTC_21_SHFT 21
+#define UVXH_EVENT_OCCURRED2_RTC_22_SHFT 22
+#define UVXH_EVENT_OCCURRED2_RTC_23_SHFT 23
+#define UVXH_EVENT_OCCURRED2_RTC_24_SHFT 24
+#define UVXH_EVENT_OCCURRED2_RTC_25_SHFT 25
+#define UVXH_EVENT_OCCURRED2_RTC_26_SHFT 26
+#define UVXH_EVENT_OCCURRED2_RTC_27_SHFT 27
+#define UVXH_EVENT_OCCURRED2_RTC_28_SHFT 28
+#define UVXH_EVENT_OCCURRED2_RTC_29_SHFT 29
+#define UVXH_EVENT_OCCURRED2_RTC_30_SHFT 30
+#define UVXH_EVENT_OCCURRED2_RTC_31_SHFT 31
+#define UVXH_EVENT_OCCURRED2_RTC_0_MASK 0x0000000000000001UL
+#define UVXH_EVENT_OCCURRED2_RTC_1_MASK 0x0000000000000002UL
+#define UVXH_EVENT_OCCURRED2_RTC_2_MASK 0x0000000000000004UL
+#define UVXH_EVENT_OCCURRED2_RTC_3_MASK 0x0000000000000008UL
+#define UVXH_EVENT_OCCURRED2_RTC_4_MASK 0x0000000000000010UL
+#define UVXH_EVENT_OCCURRED2_RTC_5_MASK 0x0000000000000020UL
+#define UVXH_EVENT_OCCURRED2_RTC_6_MASK 0x0000000000000040UL
+#define UVXH_EVENT_OCCURRED2_RTC_7_MASK 0x0000000000000080UL
+#define UVXH_EVENT_OCCURRED2_RTC_8_MASK 0x0000000000000100UL
+#define UVXH_EVENT_OCCURRED2_RTC_9_MASK 0x0000000000000200UL
+#define UVXH_EVENT_OCCURRED2_RTC_10_MASK 0x0000000000000400UL
+#define UVXH_EVENT_OCCURRED2_RTC_11_MASK 0x0000000000000800UL
+#define UVXH_EVENT_OCCURRED2_RTC_12_MASK 0x0000000000001000UL
+#define UVXH_EVENT_OCCURRED2_RTC_13_MASK 0x0000000000002000UL
+#define UVXH_EVENT_OCCURRED2_RTC_14_MASK 0x0000000000004000UL
+#define UVXH_EVENT_OCCURRED2_RTC_15_MASK 0x0000000000008000UL
+#define UVXH_EVENT_OCCURRED2_RTC_16_MASK 0x0000000000010000UL
+#define UVXH_EVENT_OCCURRED2_RTC_17_MASK 0x0000000000020000UL
+#define UVXH_EVENT_OCCURRED2_RTC_18_MASK 0x0000000000040000UL
+#define UVXH_EVENT_OCCURRED2_RTC_19_MASK 0x0000000000080000UL
+#define UVXH_EVENT_OCCURRED2_RTC_20_MASK 0x0000000000100000UL
+#define UVXH_EVENT_OCCURRED2_RTC_21_MASK 0x0000000000200000UL
+#define UVXH_EVENT_OCCURRED2_RTC_22_MASK 0x0000000000400000UL
+#define UVXH_EVENT_OCCURRED2_RTC_23_MASK 0x0000000000800000UL
+#define UVXH_EVENT_OCCURRED2_RTC_24_MASK 0x0000000001000000UL
+#define UVXH_EVENT_OCCURRED2_RTC_25_MASK 0x0000000002000000UL
+#define UVXH_EVENT_OCCURRED2_RTC_26_MASK 0x0000000004000000UL
+#define UVXH_EVENT_OCCURRED2_RTC_27_MASK 0x0000000008000000UL
+#define UVXH_EVENT_OCCURRED2_RTC_28_MASK 0x0000000010000000UL
+#define UVXH_EVENT_OCCURRED2_RTC_29_MASK 0x0000000020000000UL
+#define UVXH_EVENT_OCCURRED2_RTC_30_MASK 0x0000000040000000UL
+#define UVXH_EVENT_OCCURRED2_RTC_31_MASK 0x0000000080000000UL
+
+union uvxh_event_occurred2_u {
+ unsigned long v;
+ struct uvxh_event_occurred2_s {
+ unsigned long rtc_0:1; /* RW */
+ unsigned long rtc_1:1; /* RW */
+ unsigned long rtc_2:1; /* RW */
+ unsigned long rtc_3:1; /* RW */
+ unsigned long rtc_4:1; /* RW */
+ unsigned long rtc_5:1; /* RW */
+ unsigned long rtc_6:1; /* RW */
+ unsigned long rtc_7:1; /* RW */
+ unsigned long rtc_8:1; /* RW */
+ unsigned long rtc_9:1; /* RW */
+ unsigned long rtc_10:1; /* RW */
+ unsigned long rtc_11:1; /* RW */
+ unsigned long rtc_12:1; /* RW */
+ unsigned long rtc_13:1; /* RW */
+ unsigned long rtc_14:1; /* RW */
+ unsigned long rtc_15:1; /* RW */
+ unsigned long rtc_16:1; /* RW */
+ unsigned long rtc_17:1; /* RW */
+ unsigned long rtc_18:1; /* RW */
+ unsigned long rtc_19:1; /* RW */
+ unsigned long rtc_20:1; /* RW */
+ unsigned long rtc_21:1; /* RW */
+ unsigned long rtc_22:1; /* RW */
+ unsigned long rtc_23:1; /* RW */
+ unsigned long rtc_24:1; /* RW */
+ unsigned long rtc_25:1; /* RW */
+ unsigned long rtc_26:1; /* RW */
+ unsigned long rtc_27:1; /* RW */
+ unsigned long rtc_28:1; /* RW */
+ unsigned long rtc_29:1; /* RW */
+ unsigned long rtc_30:1; /* RW */
+ unsigned long rtc_31:1; /* RW */
+ unsigned long rsvd_32_63:32;
+ } sx;
+};
+
+/* ========================================================================= */
+/* UVXH_EVENT_OCCURRED2_ALIAS */
+/* ========================================================================= */
+#define UVXH_EVENT_OCCURRED2_ALIAS 0x70108UL
+#define UVXH_EVENT_OCCURRED2_ALIAS_32 0xb70
+
+
+/* ========================================================================= */
+/* UVXH_LB_BAU_SB_ACTIVATION_STATUS_2 */
+/* ========================================================================= */
+#define UVXH_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL
+#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL
+#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL
+#define UVXH_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x9f0
+#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x320130UL
+#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x320130UL
+
+#define UVXH_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0
+#define UVXH_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL
+
+#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0
+#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL
+
+#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0
+#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL
+
+union uvxh_lb_bau_sb_activation_status_2_u {
+ unsigned long v;
+ struct uvxh_lb_bau_sb_activation_status_2_s {
+ unsigned long aux_error:64; /* RW */
+ } sx;
+ struct uv2h_lb_bau_sb_activation_status_2_s {
+ unsigned long aux_error:64; /* RW */
+ } s2;
+ struct uv3h_lb_bau_sb_activation_status_2_s {
+ unsigned long aux_error:64; /* RW */
+ } s3;
+};
+
+/* ========================================================================= */
+/* UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK */
+/* ========================================================================= */
+#define UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK 0x320130UL
+#define UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK_32 0x9f0
+
+#define UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK_BIT_ENABLES_SHFT 0
+#define UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK_BIT_ENABLES_MASK 0x00000000ffffffffUL
+
+union uv1h_lb_target_physical_apic_id_mask_u {
+ unsigned long v;
+ struct uv1h_lb_target_physical_apic_id_mask_s {
+ unsigned long bit_enables:32; /* RW */
+ unsigned long rsvd_32_63:32;
+ } s1;
+};
+
+/* ========================================================================= */
+/* UV3H_GR0_GAM_GR_CONFIG */
+/* ========================================================================= */
+#define UV3H_GR0_GAM_GR_CONFIG 0xc00028UL
+
+#define UV3H_GR0_GAM_GR_CONFIG_M_SKT_SHFT 0
+#define UV3H_GR0_GAM_GR_CONFIG_SUBSPACE_SHFT 10
+#define UV3H_GR0_GAM_GR_CONFIG_M_SKT_MASK 0x000000000000003fUL
+#define UV3H_GR0_GAM_GR_CONFIG_SUBSPACE_MASK 0x0000000000000400UL
+
+union uv3h_gr0_gam_gr_config_u {
+ unsigned long v;
+ struct uv3h_gr0_gam_gr_config_s {
+ unsigned long m_skt:6; /* RW */
+ unsigned long undef_6_9:4; /* Undefined */
+ unsigned long subspace:1; /* RW */
+ unsigned long reserved:53;
+ } s3;
+};
+
+/* ========================================================================= */
+/* UV3H_GR1_GAM_GR_CONFIG */
+/* ========================================================================= */
+#define UV3H_GR1_GAM_GR_CONFIG 0x1000028UL
+
+#define UV3H_GR1_GAM_GR_CONFIG_M_SKT_SHFT 0
+#define UV3H_GR1_GAM_GR_CONFIG_SUBSPACE_SHFT 10
+#define UV3H_GR1_GAM_GR_CONFIG_M_SKT_MASK 0x000000000000003fUL
+#define UV3H_GR1_GAM_GR_CONFIG_SUBSPACE_MASK 0x0000000000000400UL
+
+union uv3h_gr1_gam_gr_config_u {
+ unsigned long v;
+ struct uv3h_gr1_gam_gr_config_s {
+ unsigned long m_skt:6; /* RW */
+ unsigned long undef_6_9:4; /* Undefined */
+ unsigned long subspace:1; /* RW */
+ unsigned long reserved:53;
+ } s3;
+};
+
+/* ========================================================================= */
+/* UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR */
+/* ========================================================================= */
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR 0x1603000UL
+
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_SHFT 26
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT 46
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_SHFT 63
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK 0x00003ffffc000000UL
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK 0x000fc00000000000UL
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK 0x8000000000000000UL
+
+union uv3h_rh_gam_mmioh_overlay_config0_mmr_u {
+ unsigned long v;
+ struct uv3h_rh_gam_mmioh_overlay_config0_mmr_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:20; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4;
+ unsigned long rsvd_56_62:7;
+ unsigned long enable:1; /* RW */
+ } s3;
+};
+
+/* ========================================================================= */
+/* UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR */
+/* ========================================================================= */
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR 0x1604000UL
+
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_SHFT 26
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT 46
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_ENABLE_SHFT 63
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK 0x00003ffffc000000UL
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK 0x000fc00000000000UL
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_ENABLE_MASK 0x8000000000000000UL
+
+union uv3h_rh_gam_mmioh_overlay_config1_mmr_u {
+ unsigned long v;
+ struct uv3h_rh_gam_mmioh_overlay_config1_mmr_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:20; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4;
+ unsigned long rsvd_56_62:7;
+ unsigned long enable:1; /* RW */
+ } s3;
+};
+
+/* ========================================================================= */
+/* UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR */
+/* ========================================================================= */
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR 0x1603800UL
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH 128
+
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_SHFT 0
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK 0x0000000000007fffUL
+
+union uv3h_rh_gam_mmioh_redirect_config0_mmr_u {
+ unsigned long v;
+ struct uv3h_rh_gam_mmioh_redirect_config0_mmr_s {
+ unsigned long nasid:15; /* RW */
+ unsigned long rsvd_15_63:49;
+ } s3;
+};
+
+/* ========================================================================= */
+/* UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR */
+/* ========================================================================= */
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR 0x1604800UL
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH 128
+
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_SHFT 0
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK 0x0000000000007fffUL
+
+union uv3h_rh_gam_mmioh_redirect_config1_mmr_u {
+ unsigned long v;
+ struct uv3h_rh_gam_mmioh_redirect_config1_mmr_s {
+ unsigned long nasid:15; /* RW */
+ unsigned long rsvd_15_63:49;
+ } s3;
+};
+
+
+#endif /* _ASM_X86_UV_UV_MMRS_H */
diff --git a/kernel/arch/x86/include/asm/vdso.h b/kernel/arch/x86/include/asm/vdso.h
new file mode 100644
index 000000000..8021bd28c
--- /dev/null
+++ b/kernel/arch/x86/include/asm/vdso.h
@@ -0,0 +1,54 @@
+#ifndef _ASM_X86_VDSO_H
+#define _ASM_X86_VDSO_H
+
+#include <asm/page_types.h>
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+#ifndef __ASSEMBLER__
+
+#include <linux/mm_types.h>
+
+struct vdso_image {
+ void *data;
+ unsigned long size; /* Always a multiple of PAGE_SIZE */
+
+ /* text_mapping.pages is big enough for data/size page pointers */
+ struct vm_special_mapping text_mapping;
+
+ unsigned long alt, alt_len;
+
+ long sym_vvar_start; /* Negative offset to the vvar area */
+
+ long sym_vvar_page;
+ long sym_hpet_page;
+ long sym_VDSO32_NOTE_MASK;
+ long sym___kernel_sigreturn;
+ long sym___kernel_rt_sigreturn;
+ long sym___kernel_vsyscall;
+ long sym_VDSO32_SYSENTER_RETURN;
+};
+
+#ifdef CONFIG_X86_64
+extern const struct vdso_image vdso_image_64;
+#endif
+
+#ifdef CONFIG_X86_X32
+extern const struct vdso_image vdso_image_x32;
+#endif
+
+#if defined CONFIG_X86_32 || defined CONFIG_COMPAT
+extern const struct vdso_image vdso_image_32_int80;
+#ifdef CONFIG_COMPAT
+extern const struct vdso_image vdso_image_32_syscall;
+#endif
+extern const struct vdso_image vdso_image_32_sysenter;
+
+extern const struct vdso_image *selected_vdso32;
+#endif
+
+extern void __init init_vdso_image(const struct vdso_image *image);
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* _ASM_X86_VDSO_H */
diff --git a/kernel/arch/x86/include/asm/vga.h b/kernel/arch/x86/include/asm/vga.h
new file mode 100644
index 000000000..c4b9dc2f6
--- /dev/null
+++ b/kernel/arch/x86/include/asm/vga.h
@@ -0,0 +1,20 @@
+/*
+ * Access to VGA videoram
+ *
+ * (c) 1998 Martin Mares <mj@ucw.cz>
+ */
+
+#ifndef _ASM_X86_VGA_H
+#define _ASM_X86_VGA_H
+
+/*
+ * On the PC, we can just recalculate addresses and then
+ * access the videoram directly without any black magic.
+ */
+
+#define VGA_MAP_MEM(x, s) (unsigned long)phys_to_virt(x)
+
+#define vga_readb(x) (*(x))
+#define vga_writeb(x, y) (*(y) = (x))
+
+#endif /* _ASM_X86_VGA_H */
diff --git a/kernel/arch/x86/include/asm/vgtod.h b/kernel/arch/x86/include/asm/vgtod.h
new file mode 100644
index 000000000..f556c4843
--- /dev/null
+++ b/kernel/arch/x86/include/asm/vgtod.h
@@ -0,0 +1,94 @@
+#ifndef _ASM_X86_VGTOD_H
+#define _ASM_X86_VGTOD_H
+
+#include <linux/compiler.h>
+#include <linux/clocksource.h>
+
+#ifdef BUILD_VDSO32_64
+typedef u64 gtod_long_t;
+#else
+typedef unsigned long gtod_long_t;
+#endif
+/*
+ * vsyscall_gtod_data will be accessed by 32 and 64 bit code at the same time
+ * so be carefull by modifying this structure.
+ */
+struct vsyscall_gtod_data {
+ unsigned seq;
+
+ int vclock_mode;
+ cycle_t cycle_last;
+ cycle_t mask;
+ u32 mult;
+ u32 shift;
+
+ /* open coded 'struct timespec' */
+ u64 wall_time_snsec;
+ gtod_long_t wall_time_sec;
+ gtod_long_t monotonic_time_sec;
+ u64 monotonic_time_snsec;
+ gtod_long_t wall_time_coarse_sec;
+ gtod_long_t wall_time_coarse_nsec;
+ gtod_long_t monotonic_time_coarse_sec;
+ gtod_long_t monotonic_time_coarse_nsec;
+
+ int tz_minuteswest;
+ int tz_dsttime;
+};
+extern struct vsyscall_gtod_data vsyscall_gtod_data;
+
+static inline unsigned gtod_read_begin(const struct vsyscall_gtod_data *s)
+{
+ unsigned ret;
+
+repeat:
+ ret = ACCESS_ONCE(s->seq);
+ if (unlikely(ret & 1)) {
+ cpu_relax();
+ goto repeat;
+ }
+ smp_rmb();
+ return ret;
+}
+
+static inline int gtod_read_retry(const struct vsyscall_gtod_data *s,
+ unsigned start)
+{
+ smp_rmb();
+ return unlikely(s->seq != start);
+}
+
+static inline void gtod_write_begin(struct vsyscall_gtod_data *s)
+{
+ ++s->seq;
+ smp_wmb();
+}
+
+static inline void gtod_write_end(struct vsyscall_gtod_data *s)
+{
+ smp_wmb();
+ ++s->seq;
+}
+
+#ifdef CONFIG_X86_64
+
+#define VGETCPU_CPU_MASK 0xfff
+
+static inline unsigned int __getcpu(void)
+{
+ unsigned int p;
+
+ /*
+ * Load per CPU data from GDT. LSL is faster than RDTSCP and
+ * works on all CPUs. This is volatile so that it orders
+ * correctly wrt barrier() and to keep gcc from cleverly
+ * hoisting it out of the calling function.
+ */
+ asm volatile ("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
+
+ return p;
+}
+
+#endif /* CONFIG_X86_64 */
+
+#endif /* _ASM_X86_VGTOD_H */
diff --git a/kernel/arch/x86/include/asm/virtext.h b/kernel/arch/x86/include/asm/virtext.h
new file mode 100644
index 000000000..cce9ee68e
--- /dev/null
+++ b/kernel/arch/x86/include/asm/virtext.h
@@ -0,0 +1,132 @@
+/* CPU virtualization extensions handling
+ *
+ * This should carry the code for handling CPU virtualization extensions
+ * that needs to live in the kernel core.
+ *
+ * Author: Eduardo Habkost <ehabkost@redhat.com>
+ *
+ * Copyright (C) 2008, Red Hat Inc.
+ *
+ * Contains code from KVM, Copyright (C) 2006 Qumranet, Inc.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+#ifndef _ASM_X86_VIRTEX_H
+#define _ASM_X86_VIRTEX_H
+
+#include <asm/processor.h>
+
+#include <asm/vmx.h>
+#include <asm/svm.h>
+#include <asm/tlbflush.h>
+
+/*
+ * VMX functions:
+ */
+
+static inline int cpu_has_vmx(void)
+{
+ unsigned long ecx = cpuid_ecx(1);
+ return test_bit(5, &ecx); /* CPUID.1:ECX.VMX[bit 5] -> VT */
+}
+
+
+/** Disable VMX on the current CPU
+ *
+ * vmxoff causes a undefined-opcode exception if vmxon was not run
+ * on the CPU previously. Only call this function if you know VMX
+ * is enabled.
+ */
+static inline void cpu_vmxoff(void)
+{
+ asm volatile (ASM_VMX_VMXOFF : : : "cc");
+ cr4_clear_bits(X86_CR4_VMXE);
+}
+
+static inline int cpu_vmx_enabled(void)
+{
+ return __read_cr4() & X86_CR4_VMXE;
+}
+
+/** Disable VMX if it is enabled on the current CPU
+ *
+ * You shouldn't call this if cpu_has_vmx() returns 0.
+ */
+static inline void __cpu_emergency_vmxoff(void)
+{
+ if (cpu_vmx_enabled())
+ cpu_vmxoff();
+}
+
+/** Disable VMX if it is supported and enabled on the current CPU
+ */
+static inline void cpu_emergency_vmxoff(void)
+{
+ if (cpu_has_vmx())
+ __cpu_emergency_vmxoff();
+}
+
+
+
+
+/*
+ * SVM functions:
+ */
+
+/** Check if the CPU has SVM support
+ *
+ * You can use the 'msg' arg to get a message describing the problem,
+ * if the function returns zero. Simply pass NULL if you are not interested
+ * on the messages; gcc should take care of not generating code for
+ * the messages on this case.
+ */
+static inline int cpu_has_svm(const char **msg)
+{
+ uint32_t eax, ebx, ecx, edx;
+
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
+ if (msg)
+ *msg = "not amd";
+ return 0;
+ }
+
+ cpuid(0x80000000, &eax, &ebx, &ecx, &edx);
+ if (eax < SVM_CPUID_FUNC) {
+ if (msg)
+ *msg = "can't execute cpuid_8000000a";
+ return 0;
+ }
+
+ cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
+ if (!(ecx & (1 << SVM_CPUID_FEATURE_SHIFT))) {
+ if (msg)
+ *msg = "svm not available";
+ return 0;
+ }
+ return 1;
+}
+
+
+/** Disable SVM on the current CPU
+ *
+ * You should call this only if cpu_has_svm() returned true.
+ */
+static inline void cpu_svm_disable(void)
+{
+ uint64_t efer;
+
+ wrmsrl(MSR_VM_HSAVE_PA, 0);
+ rdmsrl(MSR_EFER, efer);
+ wrmsrl(MSR_EFER, efer & ~EFER_SVME);
+}
+
+/** Makes sure SVM is disabled, if it is supported on the CPU
+ */
+static inline void cpu_emergency_svm_disable(void)
+{
+ if (cpu_has_svm(NULL))
+ cpu_svm_disable();
+}
+
+#endif /* _ASM_X86_VIRTEX_H */
diff --git a/kernel/arch/x86/include/asm/vm86.h b/kernel/arch/x86/include/asm/vm86.h
new file mode 100644
index 000000000..1d8de3f3f
--- /dev/null
+++ b/kernel/arch/x86/include/asm/vm86.h
@@ -0,0 +1,82 @@
+#ifndef _ASM_X86_VM86_H
+#define _ASM_X86_VM86_H
+
+
+#include <asm/ptrace.h>
+#include <uapi/asm/vm86.h>
+
+/*
+ * This is the (kernel) stack-layout when we have done a "SAVE_ALL" from vm86
+ * mode - the main change is that the old segment descriptors aren't
+ * useful any more and are forced to be zero by the kernel (and the
+ * hardware when a trap occurs), and the real segment descriptors are
+ * at the end of the structure. Look at ptrace.h to see the "normal"
+ * setup. For user space layout see 'struct vm86_regs' above.
+ */
+
+struct kernel_vm86_regs {
+/*
+ * normal regs, with special meaning for the segment descriptors..
+ */
+ struct pt_regs pt;
+/*
+ * these are specific to v86 mode:
+ */
+ unsigned short es, __esh;
+ unsigned short ds, __dsh;
+ unsigned short fs, __fsh;
+ unsigned short gs, __gsh;
+};
+
+struct kernel_vm86_struct {
+ struct kernel_vm86_regs regs;
+/*
+ * the below part remains on the kernel stack while we are in VM86 mode.
+ * 'tss.esp0' then contains the address of VM86_TSS_ESP0 below, and when we
+ * get forced back from VM86, the CPU and "SAVE_ALL" will restore the above
+ * 'struct kernel_vm86_regs' with the then actual values.
+ * Therefore, pt_regs in fact points to a complete 'kernel_vm86_struct'
+ * in kernelspace, hence we need not reget the data from userspace.
+ */
+#define VM86_TSS_ESP0 flags
+ unsigned long flags;
+ unsigned long screen_bitmap;
+ unsigned long cpu_type;
+ struct revectored_struct int_revectored;
+ struct revectored_struct int21_revectored;
+ struct vm86plus_info_struct vm86plus;
+ struct pt_regs *regs32; /* here we save the pointer to the old regs */
+/*
+ * The below is not part of the structure, but the stack layout continues
+ * this way. In front of 'return-eip' may be some data, depending on
+ * compilation, so we don't rely on this and save the pointer to 'oldregs'
+ * in 'regs32' above.
+ * However, with GCC-2.7.2 and the current CFLAGS you see exactly this:
+
+ long return-eip; from call to vm86()
+ struct pt_regs oldregs; user space registers as saved by syscall
+ */
+};
+
+#ifdef CONFIG_VM86
+
+void handle_vm86_fault(struct kernel_vm86_regs *, long);
+int handle_vm86_trap(struct kernel_vm86_regs *, long, int);
+struct pt_regs *save_v86_state(struct kernel_vm86_regs *);
+
+struct task_struct;
+void release_vm86_irqs(struct task_struct *);
+
+#else
+
+#define handle_vm86_fault(a, b)
+#define release_vm86_irqs(a)
+
+static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c)
+{
+ return 0;
+}
+
+#endif /* CONFIG_VM86 */
+
+#endif /* _ASM_X86_VM86_H */
diff --git a/kernel/arch/x86/include/asm/vmx.h b/kernel/arch/x86/include/asm/vmx.h
new file mode 100644
index 000000000..da772edd1
--- /dev/null
+++ b/kernel/arch/x86/include/asm/vmx.h
@@ -0,0 +1,494 @@
+/*
+ * vmx.h: VMX Architecture related definitions
+ * Copyright (c) 2004, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * A few random additions are:
+ * Copyright (C) 2006 Qumranet
+ * Avi Kivity <avi@qumranet.com>
+ * Yaniv Kamay <yaniv@qumranet.com>
+ *
+ */
+#ifndef VMX_H
+#define VMX_H
+
+
+#include <linux/types.h>
+#include <uapi/asm/vmx.h>
+
+/*
+ * Definitions of Primary Processor-Based VM-Execution Controls.
+ */
+#define CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004
+#define CPU_BASED_USE_TSC_OFFSETING 0x00000008
+#define CPU_BASED_HLT_EXITING 0x00000080
+#define CPU_BASED_INVLPG_EXITING 0x00000200
+#define CPU_BASED_MWAIT_EXITING 0x00000400
+#define CPU_BASED_RDPMC_EXITING 0x00000800
+#define CPU_BASED_RDTSC_EXITING 0x00001000
+#define CPU_BASED_CR3_LOAD_EXITING 0x00008000
+#define CPU_BASED_CR3_STORE_EXITING 0x00010000
+#define CPU_BASED_CR8_LOAD_EXITING 0x00080000
+#define CPU_BASED_CR8_STORE_EXITING 0x00100000
+#define CPU_BASED_TPR_SHADOW 0x00200000
+#define CPU_BASED_VIRTUAL_NMI_PENDING 0x00400000
+#define CPU_BASED_MOV_DR_EXITING 0x00800000
+#define CPU_BASED_UNCOND_IO_EXITING 0x01000000
+#define CPU_BASED_USE_IO_BITMAPS 0x02000000
+#define CPU_BASED_USE_MSR_BITMAPS 0x10000000
+#define CPU_BASED_MONITOR_EXITING 0x20000000
+#define CPU_BASED_PAUSE_EXITING 0x40000000
+#define CPU_BASED_ACTIVATE_SECONDARY_CONTROLS 0x80000000
+
+#define CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x0401e172
+
+/*
+ * Definitions of Secondary Processor-Based VM-Execution Controls.
+ */
+#define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001
+#define SECONDARY_EXEC_ENABLE_EPT 0x00000002
+#define SECONDARY_EXEC_RDTSCP 0x00000008
+#define SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE 0x00000010
+#define SECONDARY_EXEC_ENABLE_VPID 0x00000020
+#define SECONDARY_EXEC_WBINVD_EXITING 0x00000040
+#define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080
+#define SECONDARY_EXEC_APIC_REGISTER_VIRT 0x00000100
+#define SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY 0x00000200
+#define SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400
+#define SECONDARY_EXEC_ENABLE_INVPCID 0x00001000
+#define SECONDARY_EXEC_SHADOW_VMCS 0x00004000
+#define SECONDARY_EXEC_ENABLE_PML 0x00020000
+#define SECONDARY_EXEC_XSAVES 0x00100000
+
+
+#define PIN_BASED_EXT_INTR_MASK 0x00000001
+#define PIN_BASED_NMI_EXITING 0x00000008
+#define PIN_BASED_VIRTUAL_NMIS 0x00000020
+#define PIN_BASED_VMX_PREEMPTION_TIMER 0x00000040
+#define PIN_BASED_POSTED_INTR 0x00000080
+
+#define PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x00000016
+
+#define VM_EXIT_SAVE_DEBUG_CONTROLS 0x00000004
+#define VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200
+#define VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL 0x00001000
+#define VM_EXIT_ACK_INTR_ON_EXIT 0x00008000
+#define VM_EXIT_SAVE_IA32_PAT 0x00040000
+#define VM_EXIT_LOAD_IA32_PAT 0x00080000
+#define VM_EXIT_SAVE_IA32_EFER 0x00100000
+#define VM_EXIT_LOAD_IA32_EFER 0x00200000
+#define VM_EXIT_SAVE_VMX_PREEMPTION_TIMER 0x00400000
+#define VM_EXIT_CLEAR_BNDCFGS 0x00800000
+
+#define VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR 0x00036dff
+
+#define VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000004
+#define VM_ENTRY_IA32E_MODE 0x00000200
+#define VM_ENTRY_SMM 0x00000400
+#define VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800
+#define VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL 0x00002000
+#define VM_ENTRY_LOAD_IA32_PAT 0x00004000
+#define VM_ENTRY_LOAD_IA32_EFER 0x00008000
+#define VM_ENTRY_LOAD_BNDCFGS 0x00010000
+
+#define VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR 0x000011ff
+
+#define VMX_MISC_PREEMPTION_TIMER_RATE_MASK 0x0000001f
+#define VMX_MISC_SAVE_EFER_LMA 0x00000020
+#define VMX_MISC_ACTIVITY_HLT 0x00000040
+
+/* VMCS Encodings */
+enum vmcs_field {
+ VIRTUAL_PROCESSOR_ID = 0x00000000,
+ POSTED_INTR_NV = 0x00000002,
+ GUEST_ES_SELECTOR = 0x00000800,
+ GUEST_CS_SELECTOR = 0x00000802,
+ GUEST_SS_SELECTOR = 0x00000804,
+ GUEST_DS_SELECTOR = 0x00000806,
+ GUEST_FS_SELECTOR = 0x00000808,
+ GUEST_GS_SELECTOR = 0x0000080a,
+ GUEST_LDTR_SELECTOR = 0x0000080c,
+ GUEST_TR_SELECTOR = 0x0000080e,
+ GUEST_INTR_STATUS = 0x00000810,
+ GUEST_PML_INDEX = 0x00000812,
+ HOST_ES_SELECTOR = 0x00000c00,
+ HOST_CS_SELECTOR = 0x00000c02,
+ HOST_SS_SELECTOR = 0x00000c04,
+ HOST_DS_SELECTOR = 0x00000c06,
+ HOST_FS_SELECTOR = 0x00000c08,
+ HOST_GS_SELECTOR = 0x00000c0a,
+ HOST_TR_SELECTOR = 0x00000c0c,
+ IO_BITMAP_A = 0x00002000,
+ IO_BITMAP_A_HIGH = 0x00002001,
+ IO_BITMAP_B = 0x00002002,
+ IO_BITMAP_B_HIGH = 0x00002003,
+ MSR_BITMAP = 0x00002004,
+ MSR_BITMAP_HIGH = 0x00002005,
+ VM_EXIT_MSR_STORE_ADDR = 0x00002006,
+ VM_EXIT_MSR_STORE_ADDR_HIGH = 0x00002007,
+ VM_EXIT_MSR_LOAD_ADDR = 0x00002008,
+ VM_EXIT_MSR_LOAD_ADDR_HIGH = 0x00002009,
+ VM_ENTRY_MSR_LOAD_ADDR = 0x0000200a,
+ VM_ENTRY_MSR_LOAD_ADDR_HIGH = 0x0000200b,
+ PML_ADDRESS = 0x0000200e,
+ PML_ADDRESS_HIGH = 0x0000200f,
+ TSC_OFFSET = 0x00002010,
+ TSC_OFFSET_HIGH = 0x00002011,
+ VIRTUAL_APIC_PAGE_ADDR = 0x00002012,
+ VIRTUAL_APIC_PAGE_ADDR_HIGH = 0x00002013,
+ APIC_ACCESS_ADDR = 0x00002014,
+ APIC_ACCESS_ADDR_HIGH = 0x00002015,
+ POSTED_INTR_DESC_ADDR = 0x00002016,
+ POSTED_INTR_DESC_ADDR_HIGH = 0x00002017,
+ EPT_POINTER = 0x0000201a,
+ EPT_POINTER_HIGH = 0x0000201b,
+ EOI_EXIT_BITMAP0 = 0x0000201c,
+ EOI_EXIT_BITMAP0_HIGH = 0x0000201d,
+ EOI_EXIT_BITMAP1 = 0x0000201e,
+ EOI_EXIT_BITMAP1_HIGH = 0x0000201f,
+ EOI_EXIT_BITMAP2 = 0x00002020,
+ EOI_EXIT_BITMAP2_HIGH = 0x00002021,
+ EOI_EXIT_BITMAP3 = 0x00002022,
+ EOI_EXIT_BITMAP3_HIGH = 0x00002023,
+ VMREAD_BITMAP = 0x00002026,
+ VMWRITE_BITMAP = 0x00002028,
+ XSS_EXIT_BITMAP = 0x0000202C,
+ XSS_EXIT_BITMAP_HIGH = 0x0000202D,
+ GUEST_PHYSICAL_ADDRESS = 0x00002400,
+ GUEST_PHYSICAL_ADDRESS_HIGH = 0x00002401,
+ VMCS_LINK_POINTER = 0x00002800,
+ VMCS_LINK_POINTER_HIGH = 0x00002801,
+ GUEST_IA32_DEBUGCTL = 0x00002802,
+ GUEST_IA32_DEBUGCTL_HIGH = 0x00002803,
+ GUEST_IA32_PAT = 0x00002804,
+ GUEST_IA32_PAT_HIGH = 0x00002805,
+ GUEST_IA32_EFER = 0x00002806,
+ GUEST_IA32_EFER_HIGH = 0x00002807,
+ GUEST_IA32_PERF_GLOBAL_CTRL = 0x00002808,
+ GUEST_IA32_PERF_GLOBAL_CTRL_HIGH= 0x00002809,
+ GUEST_PDPTR0 = 0x0000280a,
+ GUEST_PDPTR0_HIGH = 0x0000280b,
+ GUEST_PDPTR1 = 0x0000280c,
+ GUEST_PDPTR1_HIGH = 0x0000280d,
+ GUEST_PDPTR2 = 0x0000280e,
+ GUEST_PDPTR2_HIGH = 0x0000280f,
+ GUEST_PDPTR3 = 0x00002810,
+ GUEST_PDPTR3_HIGH = 0x00002811,
+ GUEST_BNDCFGS = 0x00002812,
+ GUEST_BNDCFGS_HIGH = 0x00002813,
+ HOST_IA32_PAT = 0x00002c00,
+ HOST_IA32_PAT_HIGH = 0x00002c01,
+ HOST_IA32_EFER = 0x00002c02,
+ HOST_IA32_EFER_HIGH = 0x00002c03,
+ HOST_IA32_PERF_GLOBAL_CTRL = 0x00002c04,
+ HOST_IA32_PERF_GLOBAL_CTRL_HIGH = 0x00002c05,
+ PIN_BASED_VM_EXEC_CONTROL = 0x00004000,
+ CPU_BASED_VM_EXEC_CONTROL = 0x00004002,
+ EXCEPTION_BITMAP = 0x00004004,
+ PAGE_FAULT_ERROR_CODE_MASK = 0x00004006,
+ PAGE_FAULT_ERROR_CODE_MATCH = 0x00004008,
+ CR3_TARGET_COUNT = 0x0000400a,
+ VM_EXIT_CONTROLS = 0x0000400c,
+ VM_EXIT_MSR_STORE_COUNT = 0x0000400e,
+ VM_EXIT_MSR_LOAD_COUNT = 0x00004010,
+ VM_ENTRY_CONTROLS = 0x00004012,
+ VM_ENTRY_MSR_LOAD_COUNT = 0x00004014,
+ VM_ENTRY_INTR_INFO_FIELD = 0x00004016,
+ VM_ENTRY_EXCEPTION_ERROR_CODE = 0x00004018,
+ VM_ENTRY_INSTRUCTION_LEN = 0x0000401a,
+ TPR_THRESHOLD = 0x0000401c,
+ SECONDARY_VM_EXEC_CONTROL = 0x0000401e,
+ PLE_GAP = 0x00004020,
+ PLE_WINDOW = 0x00004022,
+ VM_INSTRUCTION_ERROR = 0x00004400,
+ VM_EXIT_REASON = 0x00004402,
+ VM_EXIT_INTR_INFO = 0x00004404,
+ VM_EXIT_INTR_ERROR_CODE = 0x00004406,
+ IDT_VECTORING_INFO_FIELD = 0x00004408,
+ IDT_VECTORING_ERROR_CODE = 0x0000440a,
+ VM_EXIT_INSTRUCTION_LEN = 0x0000440c,
+ VMX_INSTRUCTION_INFO = 0x0000440e,
+ GUEST_ES_LIMIT = 0x00004800,
+ GUEST_CS_LIMIT = 0x00004802,
+ GUEST_SS_LIMIT = 0x00004804,
+ GUEST_DS_LIMIT = 0x00004806,
+ GUEST_FS_LIMIT = 0x00004808,
+ GUEST_GS_LIMIT = 0x0000480a,
+ GUEST_LDTR_LIMIT = 0x0000480c,
+ GUEST_TR_LIMIT = 0x0000480e,
+ GUEST_GDTR_LIMIT = 0x00004810,
+ GUEST_IDTR_LIMIT = 0x00004812,
+ GUEST_ES_AR_BYTES = 0x00004814,
+ GUEST_CS_AR_BYTES = 0x00004816,
+ GUEST_SS_AR_BYTES = 0x00004818,
+ GUEST_DS_AR_BYTES = 0x0000481a,
+ GUEST_FS_AR_BYTES = 0x0000481c,
+ GUEST_GS_AR_BYTES = 0x0000481e,
+ GUEST_LDTR_AR_BYTES = 0x00004820,
+ GUEST_TR_AR_BYTES = 0x00004822,
+ GUEST_INTERRUPTIBILITY_INFO = 0x00004824,
+ GUEST_ACTIVITY_STATE = 0X00004826,
+ GUEST_SYSENTER_CS = 0x0000482A,
+ VMX_PREEMPTION_TIMER_VALUE = 0x0000482E,
+ HOST_IA32_SYSENTER_CS = 0x00004c00,
+ CR0_GUEST_HOST_MASK = 0x00006000,
+ CR4_GUEST_HOST_MASK = 0x00006002,
+ CR0_READ_SHADOW = 0x00006004,
+ CR4_READ_SHADOW = 0x00006006,
+ CR3_TARGET_VALUE0 = 0x00006008,
+ CR3_TARGET_VALUE1 = 0x0000600a,
+ CR3_TARGET_VALUE2 = 0x0000600c,
+ CR3_TARGET_VALUE3 = 0x0000600e,
+ EXIT_QUALIFICATION = 0x00006400,
+ GUEST_LINEAR_ADDRESS = 0x0000640a,
+ GUEST_CR0 = 0x00006800,
+ GUEST_CR3 = 0x00006802,
+ GUEST_CR4 = 0x00006804,
+ GUEST_ES_BASE = 0x00006806,
+ GUEST_CS_BASE = 0x00006808,
+ GUEST_SS_BASE = 0x0000680a,
+ GUEST_DS_BASE = 0x0000680c,
+ GUEST_FS_BASE = 0x0000680e,
+ GUEST_GS_BASE = 0x00006810,
+ GUEST_LDTR_BASE = 0x00006812,
+ GUEST_TR_BASE = 0x00006814,
+ GUEST_GDTR_BASE = 0x00006816,
+ GUEST_IDTR_BASE = 0x00006818,
+ GUEST_DR7 = 0x0000681a,
+ GUEST_RSP = 0x0000681c,
+ GUEST_RIP = 0x0000681e,
+ GUEST_RFLAGS = 0x00006820,
+ GUEST_PENDING_DBG_EXCEPTIONS = 0x00006822,
+ GUEST_SYSENTER_ESP = 0x00006824,
+ GUEST_SYSENTER_EIP = 0x00006826,
+ HOST_CR0 = 0x00006c00,
+ HOST_CR3 = 0x00006c02,
+ HOST_CR4 = 0x00006c04,
+ HOST_FS_BASE = 0x00006c06,
+ HOST_GS_BASE = 0x00006c08,
+ HOST_TR_BASE = 0x00006c0a,
+ HOST_GDTR_BASE = 0x00006c0c,
+ HOST_IDTR_BASE = 0x00006c0e,
+ HOST_IA32_SYSENTER_ESP = 0x00006c10,
+ HOST_IA32_SYSENTER_EIP = 0x00006c12,
+ HOST_RSP = 0x00006c14,
+ HOST_RIP = 0x00006c16,
+};
+
+/*
+ * Interruption-information format
+ */
+#define INTR_INFO_VECTOR_MASK 0xff /* 7:0 */
+#define INTR_INFO_INTR_TYPE_MASK 0x700 /* 10:8 */
+#define INTR_INFO_DELIVER_CODE_MASK 0x800 /* 11 */
+#define INTR_INFO_UNBLOCK_NMI 0x1000 /* 12 */
+#define INTR_INFO_VALID_MASK 0x80000000 /* 31 */
+#define INTR_INFO_RESVD_BITS_MASK 0x7ffff000
+
+#define VECTORING_INFO_VECTOR_MASK INTR_INFO_VECTOR_MASK
+#define VECTORING_INFO_TYPE_MASK INTR_INFO_INTR_TYPE_MASK
+#define VECTORING_INFO_DELIVER_CODE_MASK INTR_INFO_DELIVER_CODE_MASK
+#define VECTORING_INFO_VALID_MASK INTR_INFO_VALID_MASK
+
+#define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */
+#define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */
+#define INTR_TYPE_HARD_EXCEPTION (3 << 8) /* processor exception */
+#define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */
+#define INTR_TYPE_SOFT_EXCEPTION (6 << 8) /* software exception */
+
+/* GUEST_INTERRUPTIBILITY_INFO flags. */
+#define GUEST_INTR_STATE_STI 0x00000001
+#define GUEST_INTR_STATE_MOV_SS 0x00000002
+#define GUEST_INTR_STATE_SMI 0x00000004
+#define GUEST_INTR_STATE_NMI 0x00000008
+
+/* GUEST_ACTIVITY_STATE flags */
+#define GUEST_ACTIVITY_ACTIVE 0
+#define GUEST_ACTIVITY_HLT 1
+#define GUEST_ACTIVITY_SHUTDOWN 2
+#define GUEST_ACTIVITY_WAIT_SIPI 3
+
+/*
+ * Exit Qualifications for MOV for Control Register Access
+ */
+#define CONTROL_REG_ACCESS_NUM 0x7 /* 2:0, number of control reg.*/
+#define CONTROL_REG_ACCESS_TYPE 0x30 /* 5:4, access type */
+#define CONTROL_REG_ACCESS_REG 0xf00 /* 10:8, general purpose reg. */
+#define LMSW_SOURCE_DATA_SHIFT 16
+#define LMSW_SOURCE_DATA (0xFFFF << LMSW_SOURCE_DATA_SHIFT) /* 16:31 lmsw source */
+#define REG_EAX (0 << 8)
+#define REG_ECX (1 << 8)
+#define REG_EDX (2 << 8)
+#define REG_EBX (3 << 8)
+#define REG_ESP (4 << 8)
+#define REG_EBP (5 << 8)
+#define REG_ESI (6 << 8)
+#define REG_EDI (7 << 8)
+#define REG_R8 (8 << 8)
+#define REG_R9 (9 << 8)
+#define REG_R10 (10 << 8)
+#define REG_R11 (11 << 8)
+#define REG_R12 (12 << 8)
+#define REG_R13 (13 << 8)
+#define REG_R14 (14 << 8)
+#define REG_R15 (15 << 8)
+
+/*
+ * Exit Qualifications for MOV for Debug Register Access
+ */
+#define DEBUG_REG_ACCESS_NUM 0x7 /* 2:0, number of debug reg. */
+#define DEBUG_REG_ACCESS_TYPE 0x10 /* 4, direction of access */
+#define TYPE_MOV_TO_DR (0 << 4)
+#define TYPE_MOV_FROM_DR (1 << 4)
+#define DEBUG_REG_ACCESS_REG(eq) (((eq) >> 8) & 0xf) /* 11:8, general purpose reg. */
+
+
+/*
+ * Exit Qualifications for APIC-Access
+ */
+#define APIC_ACCESS_OFFSET 0xfff /* 11:0, offset within the APIC page */
+#define APIC_ACCESS_TYPE 0xf000 /* 15:12, access type */
+#define TYPE_LINEAR_APIC_INST_READ (0 << 12)
+#define TYPE_LINEAR_APIC_INST_WRITE (1 << 12)
+#define TYPE_LINEAR_APIC_INST_FETCH (2 << 12)
+#define TYPE_LINEAR_APIC_EVENT (3 << 12)
+#define TYPE_PHYSICAL_APIC_EVENT (10 << 12)
+#define TYPE_PHYSICAL_APIC_INST (15 << 12)
+
+/* segment AR */
+#define SEGMENT_AR_L_MASK (1 << 13)
+
+#define AR_TYPE_ACCESSES_MASK 1
+#define AR_TYPE_READABLE_MASK (1 << 1)
+#define AR_TYPE_WRITEABLE_MASK (1 << 2)
+#define AR_TYPE_CODE_MASK (1 << 3)
+#define AR_TYPE_MASK 0x0f
+#define AR_TYPE_BUSY_64_TSS 11
+#define AR_TYPE_BUSY_32_TSS 11
+#define AR_TYPE_BUSY_16_TSS 3
+#define AR_TYPE_LDT 2
+
+#define AR_UNUSABLE_MASK (1 << 16)
+#define AR_S_MASK (1 << 4)
+#define AR_P_MASK (1 << 7)
+#define AR_L_MASK (1 << 13)
+#define AR_DB_MASK (1 << 14)
+#define AR_G_MASK (1 << 15)
+#define AR_DPL_SHIFT 5
+#define AR_DPL(ar) (((ar) >> AR_DPL_SHIFT) & 3)
+
+#define AR_RESERVD_MASK 0xfffe0f00
+
+#define TSS_PRIVATE_MEMSLOT (KVM_USER_MEM_SLOTS + 0)
+#define APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (KVM_USER_MEM_SLOTS + 1)
+#define IDENTITY_PAGETABLE_PRIVATE_MEMSLOT (KVM_USER_MEM_SLOTS + 2)
+
+#define VMX_NR_VPIDS (1 << 16)
+#define VMX_VPID_EXTENT_SINGLE_CONTEXT 1
+#define VMX_VPID_EXTENT_ALL_CONTEXT 2
+
+#define VMX_EPT_EXTENT_INDIVIDUAL_ADDR 0
+#define VMX_EPT_EXTENT_CONTEXT 1
+#define VMX_EPT_EXTENT_GLOBAL 2
+#define VMX_EPT_EXTENT_SHIFT 24
+
+#define VMX_EPT_EXECUTE_ONLY_BIT (1ull)
+#define VMX_EPT_PAGE_WALK_4_BIT (1ull << 6)
+#define VMX_EPTP_UC_BIT (1ull << 8)
+#define VMX_EPTP_WB_BIT (1ull << 14)
+#define VMX_EPT_2MB_PAGE_BIT (1ull << 16)
+#define VMX_EPT_1GB_PAGE_BIT (1ull << 17)
+#define VMX_EPT_INVEPT_BIT (1ull << 20)
+#define VMX_EPT_AD_BIT (1ull << 21)
+#define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25)
+#define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26)
+
+#define VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT (1ull << 9) /* (41 - 32) */
+#define VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT (1ull << 10) /* (42 - 32) */
+
+#define VMX_EPT_DEFAULT_GAW 3
+#define VMX_EPT_MAX_GAW 0x4
+#define VMX_EPT_MT_EPTE_SHIFT 3
+#define VMX_EPT_GAW_EPTP_SHIFT 3
+#define VMX_EPT_AD_ENABLE_BIT (1ull << 6)
+#define VMX_EPT_DEFAULT_MT 0x6ull
+#define VMX_EPT_READABLE_MASK 0x1ull
+#define VMX_EPT_WRITABLE_MASK 0x2ull
+#define VMX_EPT_EXECUTABLE_MASK 0x4ull
+#define VMX_EPT_IPAT_BIT (1ull << 6)
+#define VMX_EPT_ACCESS_BIT (1ull << 8)
+#define VMX_EPT_DIRTY_BIT (1ull << 9)
+
+#define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul
+
+
+#define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30"
+#define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2"
+#define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3"
+#define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30"
+#define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0"
+#define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0"
+#define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4"
+#define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4"
+#define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30"
+#define ASM_VMX_INVEPT ".byte 0x66, 0x0f, 0x38, 0x80, 0x08"
+#define ASM_VMX_INVVPID ".byte 0x66, 0x0f, 0x38, 0x81, 0x08"
+
+struct vmx_msr_entry {
+ u32 index;
+ u32 reserved;
+ u64 value;
+} __aligned(16);
+
+/*
+ * Exit Qualifications for entry failure during or after loading guest state
+ */
+#define ENTRY_FAIL_DEFAULT 0
+#define ENTRY_FAIL_PDPTE 2
+#define ENTRY_FAIL_NMI 3
+#define ENTRY_FAIL_VMCS_LINK_PTR 4
+
+/*
+ * VM-instruction error numbers
+ */
+enum vm_instruction_error_number {
+ VMXERR_VMCALL_IN_VMX_ROOT_OPERATION = 1,
+ VMXERR_VMCLEAR_INVALID_ADDRESS = 2,
+ VMXERR_VMCLEAR_VMXON_POINTER = 3,
+ VMXERR_VMLAUNCH_NONCLEAR_VMCS = 4,
+ VMXERR_VMRESUME_NONLAUNCHED_VMCS = 5,
+ VMXERR_VMRESUME_AFTER_VMXOFF = 6,
+ VMXERR_ENTRY_INVALID_CONTROL_FIELD = 7,
+ VMXERR_ENTRY_INVALID_HOST_STATE_FIELD = 8,
+ VMXERR_VMPTRLD_INVALID_ADDRESS = 9,
+ VMXERR_VMPTRLD_VMXON_POINTER = 10,
+ VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID = 11,
+ VMXERR_UNSUPPORTED_VMCS_COMPONENT = 12,
+ VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT = 13,
+ VMXERR_VMXON_IN_VMX_ROOT_OPERATION = 15,
+ VMXERR_ENTRY_INVALID_EXECUTIVE_VMCS_POINTER = 16,
+ VMXERR_ENTRY_NONLAUNCHED_EXECUTIVE_VMCS = 17,
+ VMXERR_ENTRY_EXECUTIVE_VMCS_POINTER_NOT_VMXON_POINTER = 18,
+ VMXERR_VMCALL_NONCLEAR_VMCS = 19,
+ VMXERR_VMCALL_INVALID_VM_EXIT_CONTROL_FIELDS = 20,
+ VMXERR_VMCALL_INCORRECT_MSEG_REVISION_ID = 22,
+ VMXERR_VMXOFF_UNDER_DUAL_MONITOR_TREATMENT_OF_SMIS_AND_SMM = 23,
+ VMXERR_VMCALL_INVALID_SMM_MONITOR_FEATURES = 24,
+ VMXERR_ENTRY_INVALID_VM_EXECUTION_CONTROL_FIELDS_IN_EXECUTIVE_VMCS = 25,
+ VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS = 26,
+ VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID = 28,
+};
+
+#endif
diff --git a/kernel/arch/x86/include/asm/vsyscall.h b/kernel/arch/x86/include/asm/vsyscall.h
new file mode 100644
index 000000000..6ba66ee79
--- /dev/null
+++ b/kernel/arch/x86/include/asm/vsyscall.h
@@ -0,0 +1,23 @@
+#ifndef _ASM_X86_VSYSCALL_H
+#define _ASM_X86_VSYSCALL_H
+
+#include <linux/seqlock.h>
+#include <uapi/asm/vsyscall.h>
+
+#ifdef CONFIG_X86_VSYSCALL_EMULATION
+extern void map_vsyscall(void);
+
+/*
+ * Called on instruction fetch fault in vsyscall page.
+ * Returns true if handled.
+ */
+extern bool emulate_vsyscall(struct pt_regs *regs, unsigned long address);
+#else
+static inline void map_vsyscall(void) {}
+static inline bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
+{
+ return false;
+}
+#endif
+
+#endif /* _ASM_X86_VSYSCALL_H */
diff --git a/kernel/arch/x86/include/asm/vvar.h b/kernel/arch/x86/include/asm/vvar.h
new file mode 100644
index 000000000..3f32dfc2a
--- /dev/null
+++ b/kernel/arch/x86/include/asm/vvar.h
@@ -0,0 +1,51 @@
+/*
+ * vvar.h: Shared vDSO/kernel variable declarations
+ * Copyright (c) 2011 Andy Lutomirski
+ * Subject to the GNU General Public License, version 2
+ *
+ * A handful of variables are accessible (read-only) from userspace
+ * code in the vsyscall page and the vdso. They are declared here.
+ * Some other file must define them with DEFINE_VVAR.
+ *
+ * In normal kernel code, they are used like any other variable.
+ * In user code, they are accessed through the VVAR macro.
+ *
+ * These variables live in a page of kernel data that has an extra RO
+ * mapping for userspace. Each variable needs a unique offset within
+ * that page; specify that offset with the DECLARE_VVAR macro. (If
+ * you mess up, the linker will catch it.)
+ */
+
+#ifndef _ASM_X86_VVAR_H
+#define _ASM_X86_VVAR_H
+
+#if defined(__VVAR_KERNEL_LDS)
+
+/* The kernel linker script defines its own magic to put vvars in the
+ * right place.
+ */
+#define DECLARE_VVAR(offset, type, name) \
+ EMIT_VVAR(name, offset)
+
+#else
+
+extern char __vvar_page;
+
+#define DECLARE_VVAR(offset, type, name) \
+ extern type vvar_ ## name __attribute__((visibility("hidden")));
+
+#define VVAR(name) (vvar_ ## name)
+
+#define DEFINE_VVAR(type, name) \
+ type name \
+ __attribute__((section(".vvar_" #name), aligned(16))) __visible
+
+#endif
+
+/* DECLARE_VVAR(offset, type, name) */
+
+DECLARE_VVAR(128, struct vsyscall_gtod_data, vsyscall_gtod_data)
+
+#undef DECLARE_VVAR
+
+#endif
diff --git a/kernel/arch/x86/include/asm/word-at-a-time.h b/kernel/arch/x86/include/asm/word-at-a-time.h
new file mode 100644
index 000000000..5b2389815
--- /dev/null
+++ b/kernel/arch/x86/include/asm/word-at-a-time.h
@@ -0,0 +1,105 @@
+#ifndef _ASM_WORD_AT_A_TIME_H
+#define _ASM_WORD_AT_A_TIME_H
+
+#include <linux/kernel.h>
+
+/*
+ * This is largely generic for little-endian machines, but the
+ * optimal byte mask counting is probably going to be something
+ * that is architecture-specific. If you have a reliably fast
+ * bit count instruction, that might be better than the multiply
+ * and shift, for example.
+ */
+struct word_at_a_time {
+ const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
+
+#ifdef CONFIG_64BIT
+
+/*
+ * Jan Achrenius on G+: microoptimized version of
+ * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56"
+ * that works for the bytemasks without having to
+ * mask them first.
+ */
+static inline long count_masked_bytes(unsigned long mask)
+{
+ return mask*0x0001020304050608ul >> 56;
+}
+
+#else /* 32-bit case */
+
+/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
+static inline long count_masked_bytes(long mask)
+{
+ /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
+ long a = (0x0ff0001+mask) >> 23;
+ /* Fix the 1 for 00 case */
+ return a & mask;
+}
+
+#endif
+
+/* Return nonzero if it has a zero */
+static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
+{
+ unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
+ *bits = mask;
+ return mask;
+}
+
+static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
+{
+ return bits;
+}
+
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+ bits = (bits - 1) & ~bits;
+ return bits >> 7;
+}
+
+/* The mask we created is directly usable as a bytemask */
+#define zero_bytemask(mask) (mask)
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+ return count_masked_bytes(mask);
+}
+
+/*
+ * Load an unaligned word from kernel space.
+ *
+ * In the (very unlikely) case of the word being a page-crosser
+ * and the next page not being mapped, take the exception and
+ * return zeroes in the non-existing part.
+ */
+static inline unsigned long load_unaligned_zeropad(const void *addr)
+{
+ unsigned long ret, dummy;
+
+ asm(
+ "1:\tmov %2,%0\n"
+ "2:\n"
+ ".section .fixup,\"ax\"\n"
+ "3:\t"
+ "lea %2,%1\n\t"
+ "and %3,%1\n\t"
+ "mov (%1),%0\n\t"
+ "leal %2,%%ecx\n\t"
+ "andl %4,%%ecx\n\t"
+ "shll $3,%%ecx\n\t"
+ "shr %%cl,%0\n\t"
+ "jmp 2b\n"
+ ".previous\n"
+ _ASM_EXTABLE(1b, 3b)
+ :"=&r" (ret),"=&c" (dummy)
+ :"m" (*(unsigned long *)addr),
+ "i" (-sizeof(unsigned long)),
+ "i" (sizeof(unsigned long)-1));
+ return ret;
+}
+
+#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/kernel/arch/x86/include/asm/x2apic.h b/kernel/arch/x86/include/asm/x2apic.h
new file mode 100644
index 000000000..f90f0a587
--- /dev/null
+++ b/kernel/arch/x86/include/asm/x2apic.h
@@ -0,0 +1,49 @@
+/*
+ * Common bits for X2APIC cluster/physical modes.
+ */
+
+#ifndef _ASM_X86_X2APIC_H
+#define _ASM_X86_X2APIC_H
+
+#include <asm/apic.h>
+#include <asm/ipi.h>
+#include <linux/cpumask.h>
+
+static int x2apic_apic_id_valid(int apicid)
+{
+ return 1;
+}
+
+static int x2apic_apic_id_registered(void)
+{
+ return 1;
+}
+
+static void
+__x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest)
+{
+ unsigned long cfg = __prepare_ICR(0, vector, dest);
+ native_x2apic_icr_write(cfg, apicid);
+}
+
+static unsigned int x2apic_get_apic_id(unsigned long id)
+{
+ return id;
+}
+
+static unsigned long x2apic_set_apic_id(unsigned int id)
+{
+ return id;
+}
+
+static int x2apic_phys_pkg_id(int initial_apicid, int index_msb)
+{
+ return initial_apicid >> index_msb;
+}
+
+static void x2apic_send_IPI_self(int vector)
+{
+ apic_write(APIC_SELF_IPI, vector);
+}
+
+#endif /* _ASM_X86_X2APIC_H */
diff --git a/kernel/arch/x86/include/asm/x86_init.h b/kernel/arch/x86/include/asm/x86_init.h
new file mode 100644
index 000000000..f58a9c7a3
--- /dev/null
+++ b/kernel/arch/x86/include/asm/x86_init.h
@@ -0,0 +1,216 @@
+#ifndef _ASM_X86_PLATFORM_H
+#define _ASM_X86_PLATFORM_H
+
+#include <asm/pgtable_types.h>
+#include <asm/bootparam.h>
+
+struct mpc_bus;
+struct mpc_cpu;
+struct mpc_table;
+struct cpuinfo_x86;
+
+/**
+ * struct x86_init_mpparse - platform specific mpparse ops
+ * @mpc_record: platform specific mpc record accounting
+ * @setup_ioapic_ids: platform specific ioapic id override
+ * @mpc_apic_id: platform specific mpc apic id assignment
+ * @smp_read_mpc_oem: platform specific oem mpc table setup
+ * @mpc_oem_pci_bus: platform specific pci bus setup (default NULL)
+ * @mpc_oem_bus_info: platform specific mpc bus info
+ * @find_smp_config: find the smp configuration
+ * @get_smp_config: get the smp configuration
+ */
+struct x86_init_mpparse {
+ void (*mpc_record)(unsigned int mode);
+ void (*setup_ioapic_ids)(void);
+ int (*mpc_apic_id)(struct mpc_cpu *m);
+ void (*smp_read_mpc_oem)(struct mpc_table *mpc);
+ void (*mpc_oem_pci_bus)(struct mpc_bus *m);
+ void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
+ void (*find_smp_config)(void);
+ void (*get_smp_config)(unsigned int early);
+};
+
+/**
+ * struct x86_init_resources - platform specific resource related ops
+ * @probe_roms: probe BIOS roms
+ * @reserve_resources: reserve the standard resources for the
+ * platform
+ * @memory_setup: platform specific memory setup
+ *
+ */
+struct x86_init_resources {
+ void (*probe_roms)(void);
+ void (*reserve_resources)(void);
+ char *(*memory_setup)(void);
+};
+
+/**
+ * struct x86_init_irqs - platform specific interrupt setup
+ * @pre_vector_init: init code to run before interrupt vectors
+ * are set up.
+ * @intr_init: interrupt init code
+ * @trap_init: platform specific trap setup
+ */
+struct x86_init_irqs {
+ void (*pre_vector_init)(void);
+ void (*intr_init)(void);
+ void (*trap_init)(void);
+};
+
+/**
+ * struct x86_init_oem - oem platform specific customizing functions
+ * @arch_setup: platform specific architecure setup
+ * @banner: print a platform specific banner
+ */
+struct x86_init_oem {
+ void (*arch_setup)(void);
+ void (*banner)(void);
+};
+
+/**
+ * struct x86_init_paging - platform specific paging functions
+ * @pagetable_init: platform specific paging initialization call to setup
+ * the kernel pagetables and prepare accessors functions.
+ * Callback must call paging_init(). Called once after the
+ * direct mapping for phys memory is available.
+ */
+struct x86_init_paging {
+ void (*pagetable_init)(void);
+};
+
+/**
+ * struct x86_init_timers - platform specific timer setup
+ * @setup_perpcu_clockev: set up the per cpu clock event device for the
+ * boot cpu
+ * @tsc_pre_init: platform function called before TSC init
+ * @timer_init: initialize the platform timer (default PIT/HPET)
+ * @wallclock_init: init the wallclock device
+ */
+struct x86_init_timers {
+ void (*setup_percpu_clockev)(void);
+ void (*tsc_pre_init)(void);
+ void (*timer_init)(void);
+ void (*wallclock_init)(void);
+};
+
+/**
+ * struct x86_init_iommu - platform specific iommu setup
+ * @iommu_init: platform specific iommu setup
+ */
+struct x86_init_iommu {
+ int (*iommu_init)(void);
+};
+
+/**
+ * struct x86_init_pci - platform specific pci init functions
+ * @arch_init: platform specific pci arch init call
+ * @init: platform specific pci subsystem init
+ * @init_irq: platform specific pci irq init
+ * @fixup_irqs: platform specific pci irq fixup
+ */
+struct x86_init_pci {
+ int (*arch_init)(void);
+ int (*init)(void);
+ void (*init_irq)(void);
+ void (*fixup_irqs)(void);
+};
+
+/**
+ * struct x86_init_ops - functions for platform specific setup
+ *
+ */
+struct x86_init_ops {
+ struct x86_init_resources resources;
+ struct x86_init_mpparse mpparse;
+ struct x86_init_irqs irqs;
+ struct x86_init_oem oem;
+ struct x86_init_paging paging;
+ struct x86_init_timers timers;
+ struct x86_init_iommu iommu;
+ struct x86_init_pci pci;
+};
+
+/**
+ * struct x86_cpuinit_ops - platform specific cpu hotplug setups
+ * @setup_percpu_clockev: set up the per cpu clock event device
+ * @early_percpu_clock_init: early init of the per cpu clock event device
+ */
+struct x86_cpuinit_ops {
+ void (*setup_percpu_clockev)(void);
+ void (*early_percpu_clock_init)(void);
+ void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
+};
+
+struct timespec;
+
+/**
+ * struct x86_platform_ops - platform specific runtime functions
+ * @calibrate_tsc: calibrate TSC
+ * @get_wallclock: get time from HW clock like RTC etc.
+ * @set_wallclock: set time back to HW clock
+ * @is_untracked_pat_range exclude from PAT logic
+ * @nmi_init enable NMI on cpus
+ * @i8042_detect pre-detect if i8042 controller exists
+ * @save_sched_clock_state: save state for sched_clock() on suspend
+ * @restore_sched_clock_state: restore state for sched_clock() on resume
+ * @apic_post_init: adjust apic if neeeded
+ */
+struct x86_platform_ops {
+ unsigned long (*calibrate_tsc)(void);
+ void (*get_wallclock)(struct timespec *ts);
+ int (*set_wallclock)(const struct timespec *ts);
+ void (*iommu_shutdown)(void);
+ bool (*is_untracked_pat_range)(u64 start, u64 end);
+ void (*nmi_init)(void);
+ unsigned char (*get_nmi_reason)(void);
+ int (*i8042_detect)(void);
+ void (*save_sched_clock_state)(void);
+ void (*restore_sched_clock_state)(void);
+ void (*apic_post_init)(void);
+};
+
+struct pci_dev;
+struct msi_msg;
+
+struct x86_msi_ops {
+ int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
+ void (*compose_msi_msg)(struct pci_dev *dev, unsigned int irq,
+ unsigned int dest, struct msi_msg *msg,
+ u8 hpet_id);
+ void (*teardown_msi_irq)(unsigned int irq);
+ void (*teardown_msi_irqs)(struct pci_dev *dev);
+ void (*restore_msi_irqs)(struct pci_dev *dev);
+ int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
+};
+
+struct IO_APIC_route_entry;
+struct io_apic_irq_attr;
+struct irq_data;
+struct cpumask;
+
+struct x86_io_apic_ops {
+ void (*init) (void);
+ unsigned int (*read) (unsigned int apic, unsigned int reg);
+ void (*write) (unsigned int apic, unsigned int reg, unsigned int value);
+ void (*modify) (unsigned int apic, unsigned int reg, unsigned int value);
+ void (*disable)(void);
+ void (*print_entries)(unsigned int apic, unsigned int nr_entries);
+ int (*set_affinity)(struct irq_data *data,
+ const struct cpumask *mask,
+ bool force);
+ int (*setup_entry)(int irq, struct IO_APIC_route_entry *entry,
+ unsigned int destination, int vector,
+ struct io_apic_irq_attr *attr);
+ void (*eoi_ioapic_pin)(int apic, int pin, int vector);
+};
+
+extern struct x86_init_ops x86_init;
+extern struct x86_cpuinit_ops x86_cpuinit;
+extern struct x86_platform_ops x86_platform;
+extern struct x86_msi_ops x86_msi;
+extern struct x86_io_apic_ops x86_io_apic_ops;
+extern void x86_init_noop(void);
+extern void x86_init_uint_noop(unsigned int unused);
+
+#endif
diff --git a/kernel/arch/x86/include/asm/xcr.h b/kernel/arch/x86/include/asm/xcr.h
new file mode 100644
index 000000000..f2cba4e79
--- /dev/null
+++ b/kernel/arch/x86/include/asm/xcr.h
@@ -0,0 +1,49 @@
+/* -*- linux-c -*- ------------------------------------------------------- *
+ *
+ * Copyright 2008 rPath, Inc. - All Rights Reserved
+ *
+ * This file is part of the Linux kernel, and is made available under
+ * the terms of the GNU General Public License version 2 or (at your
+ * option) any later version; incorporated herein by reference.
+ *
+ * ----------------------------------------------------------------------- */
+
+/*
+ * asm-x86/xcr.h
+ *
+ * Definitions for the eXtended Control Register instructions
+ */
+
+#ifndef _ASM_X86_XCR_H
+#define _ASM_X86_XCR_H
+
+#define XCR_XFEATURE_ENABLED_MASK 0x00000000
+
+#ifdef __KERNEL__
+# ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+static inline u64 xgetbv(u32 index)
+{
+ u32 eax, edx;
+
+ asm volatile(".byte 0x0f,0x01,0xd0" /* xgetbv */
+ : "=a" (eax), "=d" (edx)
+ : "c" (index));
+ return eax + ((u64)edx << 32);
+}
+
+static inline void xsetbv(u32 index, u64 value)
+{
+ u32 eax = value;
+ u32 edx = value >> 32;
+
+ asm volatile(".byte 0x0f,0x01,0xd1" /* xsetbv */
+ : : "a" (eax), "d" (edx), "c" (index));
+}
+
+# endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_X86_XCR_H */
diff --git a/kernel/arch/x86/include/asm/xen/cpuid.h b/kernel/arch/x86/include/asm/xen/cpuid.h
new file mode 100644
index 000000000..0d809e9fc
--- /dev/null
+++ b/kernel/arch/x86/include/asm/xen/cpuid.h
@@ -0,0 +1,91 @@
+/******************************************************************************
+ * arch-x86/cpuid.h
+ *
+ * CPUID interface to Xen.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2007 Citrix Systems, Inc.
+ *
+ * Authors:
+ * Keir Fraser <keir@xen.org>
+ */
+
+#ifndef __XEN_PUBLIC_ARCH_X86_CPUID_H__
+#define __XEN_PUBLIC_ARCH_X86_CPUID_H__
+
+/*
+ * For compatibility with other hypervisor interfaces, the Xen cpuid leaves
+ * can be found at the first otherwise unused 0x100 aligned boundary starting
+ * from 0x40000000.
+ *
+ * e.g If viridian extensions are enabled for an HVM domain, the Xen cpuid
+ * leaves will start at 0x40000100
+ */
+
+#define XEN_CPUID_FIRST_LEAF 0x40000000
+#define XEN_CPUID_LEAF(i) (XEN_CPUID_FIRST_LEAF + (i))
+
+/*
+ * Leaf 1 (0x40000x00)
+ * EAX: Largest Xen-information leaf. All leaves up to an including @EAX
+ * are supported by the Xen host.
+ * EBX-EDX: "XenVMMXenVMM" signature, allowing positive identification
+ * of a Xen host.
+ */
+#define XEN_CPUID_SIGNATURE_EBX 0x566e6558 /* "XenV" */
+#define XEN_CPUID_SIGNATURE_ECX 0x65584d4d /* "MMXe" */
+#define XEN_CPUID_SIGNATURE_EDX 0x4d4d566e /* "nVMM" */
+
+/*
+ * Leaf 2 (0x40000x01)
+ * EAX[31:16]: Xen major version.
+ * EAX[15: 0]: Xen minor version.
+ * EBX-EDX: Reserved (currently all zeroes).
+ */
+
+/*
+ * Leaf 3 (0x40000x02)
+ * EAX: Number of hypercall transfer pages. This register is always guaranteed
+ * to specify one hypercall page.
+ * EBX: Base address of Xen-specific MSRs.
+ * ECX: Features 1. Unused bits are set to zero.
+ * EDX: Features 2. Unused bits are set to zero.
+ */
+
+/* Does the host support MMU_PT_UPDATE_PRESERVE_AD for this guest? */
+#define _XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD 0
+#define XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD (1u<<0)
+
+/*
+ * Leaf 5 (0x40000x04)
+ * HVM-specific features
+ */
+
+/* EAX Features */
+/* Virtualized APIC registers */
+#define XEN_HVM_CPUID_APIC_ACCESS_VIRT (1u << 0)
+/* Virtualized x2APIC accesses */
+#define XEN_HVM_CPUID_X2APIC_VIRT (1u << 1)
+/* Memory mapped from other domains has valid IOMMU entries */
+#define XEN_HVM_CPUID_IOMMU_MAPPINGS (1u << 2)
+
+#define XEN_CPUID_MAX_NUM_LEAVES 4
+
+#endif /* __XEN_PUBLIC_ARCH_X86_CPUID_H__ */
diff --git a/kernel/arch/x86/include/asm/xen/events.h b/kernel/arch/x86/include/asm/xen/events.h
new file mode 100644
index 000000000..608a79d5a
--- /dev/null
+++ b/kernel/arch/x86/include/asm/xen/events.h
@@ -0,0 +1,23 @@
+#ifndef _ASM_X86_XEN_EVENTS_H
+#define _ASM_X86_XEN_EVENTS_H
+
+enum ipi_vector {
+ XEN_RESCHEDULE_VECTOR,
+ XEN_CALL_FUNCTION_VECTOR,
+ XEN_CALL_FUNCTION_SINGLE_VECTOR,
+ XEN_SPIN_UNLOCK_VECTOR,
+ XEN_IRQ_WORK_VECTOR,
+ XEN_NMI_VECTOR,
+
+ XEN_NR_IPIS,
+};
+
+static inline int xen_irqs_disabled(struct pt_regs *regs)
+{
+ return raw_irqs_disabled_flags(regs->flags);
+}
+
+/* No need for a barrier -- XCHG is a barrier on x86. */
+#define xchg_xen_ulong(ptr, val) xchg((ptr), (val))
+
+#endif /* _ASM_X86_XEN_EVENTS_H */
diff --git a/kernel/arch/x86/include/asm/xen/hypercall.h b/kernel/arch/x86/include/asm/xen/hypercall.h
new file mode 100644
index 000000000..ca08a27b9
--- /dev/null
+++ b/kernel/arch/x86/include/asm/xen/hypercall.h
@@ -0,0 +1,603 @@
+/******************************************************************************
+ * hypercall.h
+ *
+ * Linux-specific hypervisor handling.
+ *
+ * Copyright (c) 2002-2004, K A Fraser
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _ASM_X86_XEN_HYPERCALL_H
+#define _ASM_X86_XEN_HYPERCALL_H
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <trace/events/xen.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+#include <xen/interface/xen.h>
+#include <xen/interface/sched.h>
+#include <xen/interface/physdev.h>
+#include <xen/interface/platform.h>
+#include <xen/interface/xen-mca.h>
+
+/*
+ * The hypercall asms have to meet several constraints:
+ * - Work on 32- and 64-bit.
+ * The two architectures put their arguments in different sets of
+ * registers.
+ *
+ * - Work around asm syntax quirks
+ * It isn't possible to specify one of the rNN registers in a
+ * constraint, so we use explicit register variables to get the
+ * args into the right place.
+ *
+ * - Mark all registers as potentially clobbered
+ * Even unused parameters can be clobbered by the hypervisor, so we
+ * need to make sure gcc knows it.
+ *
+ * - Avoid compiler bugs.
+ * This is the tricky part. Because x86_32 has such a constrained
+ * register set, gcc versions below 4.3 have trouble generating
+ * code when all the arg registers and memory are trashed by the
+ * asm. There are syntactically simpler ways of achieving the
+ * semantics below, but they cause the compiler to crash.
+ *
+ * The only combination I found which works is:
+ * - assign the __argX variables first
+ * - list all actually used parameters as "+r" (__argX)
+ * - clobber the rest
+ *
+ * The result certainly isn't pretty, and it really shows up cpp's
+ * weakness as as macro language. Sorry. (But let's just give thanks
+ * there aren't more than 5 arguments...)
+ */
+
+extern struct { char _entry[32]; } hypercall_page[];
+
+#define __HYPERCALL "call hypercall_page+%c[offset]"
+#define __HYPERCALL_ENTRY(x) \
+ [offset] "i" (__HYPERVISOR_##x * sizeof(hypercall_page[0]))
+
+#ifdef CONFIG_X86_32
+#define __HYPERCALL_RETREG "eax"
+#define __HYPERCALL_ARG1REG "ebx"
+#define __HYPERCALL_ARG2REG "ecx"
+#define __HYPERCALL_ARG3REG "edx"
+#define __HYPERCALL_ARG4REG "esi"
+#define __HYPERCALL_ARG5REG "edi"
+#else
+#define __HYPERCALL_RETREG "rax"
+#define __HYPERCALL_ARG1REG "rdi"
+#define __HYPERCALL_ARG2REG "rsi"
+#define __HYPERCALL_ARG3REG "rdx"
+#define __HYPERCALL_ARG4REG "r10"
+#define __HYPERCALL_ARG5REG "r8"
+#endif
+
+#define __HYPERCALL_DECLS \
+ register unsigned long __res asm(__HYPERCALL_RETREG); \
+ register unsigned long __arg1 asm(__HYPERCALL_ARG1REG) = __arg1; \
+ register unsigned long __arg2 asm(__HYPERCALL_ARG2REG) = __arg2; \
+ register unsigned long __arg3 asm(__HYPERCALL_ARG3REG) = __arg3; \
+ register unsigned long __arg4 asm(__HYPERCALL_ARG4REG) = __arg4; \
+ register unsigned long __arg5 asm(__HYPERCALL_ARG5REG) = __arg5;
+
+#define __HYPERCALL_0PARAM "=r" (__res)
+#define __HYPERCALL_1PARAM __HYPERCALL_0PARAM, "+r" (__arg1)
+#define __HYPERCALL_2PARAM __HYPERCALL_1PARAM, "+r" (__arg2)
+#define __HYPERCALL_3PARAM __HYPERCALL_2PARAM, "+r" (__arg3)
+#define __HYPERCALL_4PARAM __HYPERCALL_3PARAM, "+r" (__arg4)
+#define __HYPERCALL_5PARAM __HYPERCALL_4PARAM, "+r" (__arg5)
+
+#define __HYPERCALL_0ARG()
+#define __HYPERCALL_1ARG(a1) \
+ __HYPERCALL_0ARG() __arg1 = (unsigned long)(a1);
+#define __HYPERCALL_2ARG(a1,a2) \
+ __HYPERCALL_1ARG(a1) __arg2 = (unsigned long)(a2);
+#define __HYPERCALL_3ARG(a1,a2,a3) \
+ __HYPERCALL_2ARG(a1,a2) __arg3 = (unsigned long)(a3);
+#define __HYPERCALL_4ARG(a1,a2,a3,a4) \
+ __HYPERCALL_3ARG(a1,a2,a3) __arg4 = (unsigned long)(a4);
+#define __HYPERCALL_5ARG(a1,a2,a3,a4,a5) \
+ __HYPERCALL_4ARG(a1,a2,a3,a4) __arg5 = (unsigned long)(a5);
+
+#define __HYPERCALL_CLOBBER5 "memory"
+#define __HYPERCALL_CLOBBER4 __HYPERCALL_CLOBBER5, __HYPERCALL_ARG5REG
+#define __HYPERCALL_CLOBBER3 __HYPERCALL_CLOBBER4, __HYPERCALL_ARG4REG
+#define __HYPERCALL_CLOBBER2 __HYPERCALL_CLOBBER3, __HYPERCALL_ARG3REG
+#define __HYPERCALL_CLOBBER1 __HYPERCALL_CLOBBER2, __HYPERCALL_ARG2REG
+#define __HYPERCALL_CLOBBER0 __HYPERCALL_CLOBBER1, __HYPERCALL_ARG1REG
+
+#define _hypercall0(type, name) \
+({ \
+ __HYPERCALL_DECLS; \
+ __HYPERCALL_0ARG(); \
+ asm volatile (__HYPERCALL \
+ : __HYPERCALL_0PARAM \
+ : __HYPERCALL_ENTRY(name) \
+ : __HYPERCALL_CLOBBER0); \
+ (type)__res; \
+})
+
+#define _hypercall1(type, name, a1) \
+({ \
+ __HYPERCALL_DECLS; \
+ __HYPERCALL_1ARG(a1); \
+ asm volatile (__HYPERCALL \
+ : __HYPERCALL_1PARAM \
+ : __HYPERCALL_ENTRY(name) \
+ : __HYPERCALL_CLOBBER1); \
+ (type)__res; \
+})
+
+#define _hypercall2(type, name, a1, a2) \
+({ \
+ __HYPERCALL_DECLS; \
+ __HYPERCALL_2ARG(a1, a2); \
+ asm volatile (__HYPERCALL \
+ : __HYPERCALL_2PARAM \
+ : __HYPERCALL_ENTRY(name) \
+ : __HYPERCALL_CLOBBER2); \
+ (type)__res; \
+})
+
+#define _hypercall3(type, name, a1, a2, a3) \
+({ \
+ __HYPERCALL_DECLS; \
+ __HYPERCALL_3ARG(a1, a2, a3); \
+ asm volatile (__HYPERCALL \
+ : __HYPERCALL_3PARAM \
+ : __HYPERCALL_ENTRY(name) \
+ : __HYPERCALL_CLOBBER3); \
+ (type)__res; \
+})
+
+#define _hypercall4(type, name, a1, a2, a3, a4) \
+({ \
+ __HYPERCALL_DECLS; \
+ __HYPERCALL_4ARG(a1, a2, a3, a4); \
+ asm volatile (__HYPERCALL \
+ : __HYPERCALL_4PARAM \
+ : __HYPERCALL_ENTRY(name) \
+ : __HYPERCALL_CLOBBER4); \
+ (type)__res; \
+})
+
+#define _hypercall5(type, name, a1, a2, a3, a4, a5) \
+({ \
+ __HYPERCALL_DECLS; \
+ __HYPERCALL_5ARG(a1, a2, a3, a4, a5); \
+ asm volatile (__HYPERCALL \
+ : __HYPERCALL_5PARAM \
+ : __HYPERCALL_ENTRY(name) \
+ : __HYPERCALL_CLOBBER5); \
+ (type)__res; \
+})
+
+static inline long
+privcmd_call(unsigned call,
+ unsigned long a1, unsigned long a2,
+ unsigned long a3, unsigned long a4,
+ unsigned long a5)
+{
+ __HYPERCALL_DECLS;
+ __HYPERCALL_5ARG(a1, a2, a3, a4, a5);
+
+ asm volatile("call *%[call]"
+ : __HYPERCALL_5PARAM
+ : [call] "a" (&hypercall_page[call])
+ : __HYPERCALL_CLOBBER5);
+
+ return (long)__res;
+}
+
+static inline int
+HYPERVISOR_set_trap_table(struct trap_info *table)
+{
+ return _hypercall1(int, set_trap_table, table);
+}
+
+static inline int
+HYPERVISOR_mmu_update(struct mmu_update *req, int count,
+ int *success_count, domid_t domid)
+{
+ return _hypercall4(int, mmu_update, req, count, success_count, domid);
+}
+
+static inline int
+HYPERVISOR_mmuext_op(struct mmuext_op *op, int count,
+ int *success_count, domid_t domid)
+{
+ return _hypercall4(int, mmuext_op, op, count, success_count, domid);
+}
+
+static inline int
+HYPERVISOR_set_gdt(unsigned long *frame_list, int entries)
+{
+ return _hypercall2(int, set_gdt, frame_list, entries);
+}
+
+static inline int
+HYPERVISOR_stack_switch(unsigned long ss, unsigned long esp)
+{
+ return _hypercall2(int, stack_switch, ss, esp);
+}
+
+#ifdef CONFIG_X86_32
+static inline int
+HYPERVISOR_set_callbacks(unsigned long event_selector,
+ unsigned long event_address,
+ unsigned long failsafe_selector,
+ unsigned long failsafe_address)
+{
+ return _hypercall4(int, set_callbacks,
+ event_selector, event_address,
+ failsafe_selector, failsafe_address);
+}
+#else /* CONFIG_X86_64 */
+static inline int
+HYPERVISOR_set_callbacks(unsigned long event_address,
+ unsigned long failsafe_address,
+ unsigned long syscall_address)
+{
+ return _hypercall3(int, set_callbacks,
+ event_address, failsafe_address,
+ syscall_address);
+}
+#endif /* CONFIG_X86_{32,64} */
+
+static inline int
+HYPERVISOR_callback_op(int cmd, void *arg)
+{
+ return _hypercall2(int, callback_op, cmd, arg);
+}
+
+static inline int
+HYPERVISOR_fpu_taskswitch(int set)
+{
+ return _hypercall1(int, fpu_taskswitch, set);
+}
+
+static inline int
+HYPERVISOR_sched_op(int cmd, void *arg)
+{
+ return _hypercall2(int, sched_op, cmd, arg);
+}
+
+static inline long
+HYPERVISOR_set_timer_op(u64 timeout)
+{
+ unsigned long timeout_hi = (unsigned long)(timeout>>32);
+ unsigned long timeout_lo = (unsigned long)timeout;
+ return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
+}
+
+static inline int
+HYPERVISOR_mca(struct xen_mc *mc_op)
+{
+ mc_op->interface_version = XEN_MCA_INTERFACE_VERSION;
+ return _hypercall1(int, mca, mc_op);
+}
+
+static inline int
+HYPERVISOR_dom0_op(struct xen_platform_op *platform_op)
+{
+ platform_op->interface_version = XENPF_INTERFACE_VERSION;
+ return _hypercall1(int, dom0_op, platform_op);
+}
+
+static inline int
+HYPERVISOR_set_debugreg(int reg, unsigned long value)
+{
+ return _hypercall2(int, set_debugreg, reg, value);
+}
+
+static inline unsigned long
+HYPERVISOR_get_debugreg(int reg)
+{
+ return _hypercall1(unsigned long, get_debugreg, reg);
+}
+
+static inline int
+HYPERVISOR_update_descriptor(u64 ma, u64 desc)
+{
+ if (sizeof(u64) == sizeof(long))
+ return _hypercall2(int, update_descriptor, ma, desc);
+ return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
+}
+
+static inline int
+HYPERVISOR_memory_op(unsigned int cmd, void *arg)
+{
+ return _hypercall2(int, memory_op, cmd, arg);
+}
+
+static inline int
+HYPERVISOR_multicall(void *call_list, uint32_t nr_calls)
+{
+ return _hypercall2(int, multicall, call_list, nr_calls);
+}
+
+static inline int
+HYPERVISOR_update_va_mapping(unsigned long va, pte_t new_val,
+ unsigned long flags)
+{
+ if (sizeof(new_val) == sizeof(long))
+ return _hypercall3(int, update_va_mapping, va,
+ new_val.pte, flags);
+ else
+ return _hypercall4(int, update_va_mapping, va,
+ new_val.pte, new_val.pte >> 32, flags);
+}
+extern int __must_check xen_event_channel_op_compat(int, void *);
+
+static inline int
+HYPERVISOR_event_channel_op(int cmd, void *arg)
+{
+ int rc = _hypercall2(int, event_channel_op, cmd, arg);
+ if (unlikely(rc == -ENOSYS))
+ rc = xen_event_channel_op_compat(cmd, arg);
+ return rc;
+}
+
+static inline int
+HYPERVISOR_xen_version(int cmd, void *arg)
+{
+ return _hypercall2(int, xen_version, cmd, arg);
+}
+
+static inline int
+HYPERVISOR_console_io(int cmd, int count, char *str)
+{
+ return _hypercall3(int, console_io, cmd, count, str);
+}
+
+extern int __must_check xen_physdev_op_compat(int, void *);
+
+static inline int
+HYPERVISOR_physdev_op(int cmd, void *arg)
+{
+ int rc = _hypercall2(int, physdev_op, cmd, arg);
+ if (unlikely(rc == -ENOSYS))
+ rc = xen_physdev_op_compat(cmd, arg);
+ return rc;
+}
+
+static inline int
+HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count)
+{
+ return _hypercall3(int, grant_table_op, cmd, uop, count);
+}
+
+static inline int
+HYPERVISOR_update_va_mapping_otherdomain(unsigned long va, pte_t new_val,
+ unsigned long flags, domid_t domid)
+{
+ if (sizeof(new_val) == sizeof(long))
+ return _hypercall4(int, update_va_mapping_otherdomain, va,
+ new_val.pte, flags, domid);
+ else
+ return _hypercall5(int, update_va_mapping_otherdomain, va,
+ new_val.pte, new_val.pte >> 32,
+ flags, domid);
+}
+
+static inline int
+HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type)
+{
+ return _hypercall2(int, vm_assist, cmd, type);
+}
+
+static inline int
+HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args)
+{
+ return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
+}
+
+#ifdef CONFIG_X86_64
+static inline int
+HYPERVISOR_set_segment_base(int reg, unsigned long value)
+{
+ return _hypercall2(int, set_segment_base, reg, value);
+}
+#endif
+
+static inline int
+HYPERVISOR_suspend(unsigned long start_info_mfn)
+{
+ struct sched_shutdown r = { .reason = SHUTDOWN_suspend };
+
+ /*
+ * For a PV guest the tools require that the start_info mfn be
+ * present in rdx/edx when the hypercall is made. Per the
+ * hypercall calling convention this is the third hypercall
+ * argument, which is start_info_mfn here.
+ */
+ return _hypercall3(int, sched_op, SCHEDOP_shutdown, &r, start_info_mfn);
+}
+
+static inline int
+HYPERVISOR_nmi_op(unsigned long op, unsigned long arg)
+{
+ return _hypercall2(int, nmi_op, op, arg);
+}
+
+static inline unsigned long __must_check
+HYPERVISOR_hvm_op(int op, void *arg)
+{
+ return _hypercall2(unsigned long, hvm_op, op, arg);
+}
+
+static inline int
+HYPERVISOR_tmem_op(
+ struct tmem_op *op)
+{
+ return _hypercall1(int, tmem_op, op);
+}
+
+static inline void
+MULTI_fpu_taskswitch(struct multicall_entry *mcl, int set)
+{
+ mcl->op = __HYPERVISOR_fpu_taskswitch;
+ mcl->args[0] = set;
+
+ trace_xen_mc_entry(mcl, 1);
+}
+
+static inline void
+MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
+ pte_t new_val, unsigned long flags)
+{
+ mcl->op = __HYPERVISOR_update_va_mapping;
+ mcl->args[0] = va;
+ if (sizeof(new_val) == sizeof(long)) {
+ mcl->args[1] = new_val.pte;
+ mcl->args[2] = flags;
+ } else {
+ mcl->args[1] = new_val.pte;
+ mcl->args[2] = new_val.pte >> 32;
+ mcl->args[3] = flags;
+ }
+
+ trace_xen_mc_entry(mcl, sizeof(new_val) == sizeof(long) ? 3 : 4);
+}
+
+static inline void
+MULTI_grant_table_op(struct multicall_entry *mcl, unsigned int cmd,
+ void *uop, unsigned int count)
+{
+ mcl->op = __HYPERVISOR_grant_table_op;
+ mcl->args[0] = cmd;
+ mcl->args[1] = (unsigned long)uop;
+ mcl->args[2] = count;
+
+ trace_xen_mc_entry(mcl, 3);
+}
+
+static inline void
+MULTI_update_va_mapping_otherdomain(struct multicall_entry *mcl, unsigned long va,
+ pte_t new_val, unsigned long flags,
+ domid_t domid)
+{
+ mcl->op = __HYPERVISOR_update_va_mapping_otherdomain;
+ mcl->args[0] = va;
+ if (sizeof(new_val) == sizeof(long)) {
+ mcl->args[1] = new_val.pte;
+ mcl->args[2] = flags;
+ mcl->args[3] = domid;
+ } else {
+ mcl->args[1] = new_val.pte;
+ mcl->args[2] = new_val.pte >> 32;
+ mcl->args[3] = flags;
+ mcl->args[4] = domid;
+ }
+
+ trace_xen_mc_entry(mcl, sizeof(new_val) == sizeof(long) ? 4 : 5);
+}
+
+static inline void
+MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr,
+ struct desc_struct desc)
+{
+ mcl->op = __HYPERVISOR_update_descriptor;
+ if (sizeof(maddr) == sizeof(long)) {
+ mcl->args[0] = maddr;
+ mcl->args[1] = *(unsigned long *)&desc;
+ } else {
+ mcl->args[0] = maddr;
+ mcl->args[1] = maddr >> 32;
+ mcl->args[2] = desc.a;
+ mcl->args[3] = desc.b;
+ }
+
+ trace_xen_mc_entry(mcl, sizeof(maddr) == sizeof(long) ? 2 : 4);
+}
+
+static inline void
+MULTI_memory_op(struct multicall_entry *mcl, unsigned int cmd, void *arg)
+{
+ mcl->op = __HYPERVISOR_memory_op;
+ mcl->args[0] = cmd;
+ mcl->args[1] = (unsigned long)arg;
+
+ trace_xen_mc_entry(mcl, 2);
+}
+
+static inline void
+MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
+ int count, int *success_count, domid_t domid)
+{
+ mcl->op = __HYPERVISOR_mmu_update;
+ mcl->args[0] = (unsigned long)req;
+ mcl->args[1] = count;
+ mcl->args[2] = (unsigned long)success_count;
+ mcl->args[3] = domid;
+
+ trace_xen_mc_entry(mcl, 4);
+}
+
+static inline void
+MULTI_mmuext_op(struct multicall_entry *mcl, struct mmuext_op *op, int count,
+ int *success_count, domid_t domid)
+{
+ mcl->op = __HYPERVISOR_mmuext_op;
+ mcl->args[0] = (unsigned long)op;
+ mcl->args[1] = count;
+ mcl->args[2] = (unsigned long)success_count;
+ mcl->args[3] = domid;
+
+ trace_xen_mc_entry(mcl, 4);
+}
+
+static inline void
+MULTI_set_gdt(struct multicall_entry *mcl, unsigned long *frames, int entries)
+{
+ mcl->op = __HYPERVISOR_set_gdt;
+ mcl->args[0] = (unsigned long)frames;
+ mcl->args[1] = entries;
+
+ trace_xen_mc_entry(mcl, 2);
+}
+
+static inline void
+MULTI_stack_switch(struct multicall_entry *mcl,
+ unsigned long ss, unsigned long esp)
+{
+ mcl->op = __HYPERVISOR_stack_switch;
+ mcl->args[0] = ss;
+ mcl->args[1] = esp;
+
+ trace_xen_mc_entry(mcl, 2);
+}
+
+#endif /* _ASM_X86_XEN_HYPERCALL_H */
diff --git a/kernel/arch/x86/include/asm/xen/hypervisor.h b/kernel/arch/x86/include/asm/xen/hypervisor.h
new file mode 100644
index 000000000..d866959e5
--- /dev/null
+++ b/kernel/arch/x86/include/asm/xen/hypervisor.h
@@ -0,0 +1,60 @@
+/******************************************************************************
+ * hypervisor.h
+ *
+ * Linux-specific hypervisor handling.
+ *
+ * Copyright (c) 2002-2004, K A Fraser
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _ASM_X86_XEN_HYPERVISOR_H
+#define _ASM_X86_XEN_HYPERVISOR_H
+
+extern struct shared_info *HYPERVISOR_shared_info;
+extern struct start_info *xen_start_info;
+
+#include <asm/processor.h>
+
+static inline uint32_t xen_cpuid_base(void)
+{
+ return hypervisor_cpuid_base("XenVMMXenVMM", 2);
+}
+
+#ifdef CONFIG_XEN
+extern bool xen_hvm_need_lapic(void);
+
+static inline bool xen_x2apic_para_available(void)
+{
+ return xen_hvm_need_lapic();
+}
+#else
+static inline bool xen_x2apic_para_available(void)
+{
+ return (xen_cpuid_base() != 0);
+}
+#endif
+
+#endif /* _ASM_X86_XEN_HYPERVISOR_H */
diff --git a/kernel/arch/x86/include/asm/xen/interface.h b/kernel/arch/x86/include/asm/xen/interface.h
new file mode 100644
index 000000000..3400dbaec
--- /dev/null
+++ b/kernel/arch/x86/include/asm/xen/interface.h
@@ -0,0 +1,189 @@
+/******************************************************************************
+ * arch-x86_32.h
+ *
+ * Guest OS interface to x86 Xen.
+ *
+ * Copyright (c) 2004, K A Fraser
+ */
+
+#ifndef _ASM_X86_XEN_INTERFACE_H
+#define _ASM_X86_XEN_INTERFACE_H
+
+#ifdef __XEN__
+#define __DEFINE_GUEST_HANDLE(name, type) \
+ typedef struct { type *p; } __guest_handle_ ## name
+#else
+#define __DEFINE_GUEST_HANDLE(name, type) \
+ typedef type * __guest_handle_ ## name
+#endif
+
+#define DEFINE_GUEST_HANDLE_STRUCT(name) \
+ __DEFINE_GUEST_HANDLE(name, struct name)
+#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name)
+#define GUEST_HANDLE(name) __guest_handle_ ## name
+
+#ifdef __XEN__
+#if defined(__i386__)
+#define set_xen_guest_handle(hnd, val) \
+ do { \
+ if (sizeof(hnd) == 8) \
+ *(uint64_t *)&(hnd) = 0; \
+ (hnd).p = val; \
+ } while (0)
+#elif defined(__x86_64__)
+#define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0)
+#endif
+#else
+#if defined(__i386__)
+#define set_xen_guest_handle(hnd, val) \
+ do { \
+ if (sizeof(hnd) == 8) \
+ *(uint64_t *)&(hnd) = 0; \
+ (hnd) = val; \
+ } while (0)
+#elif defined(__x86_64__)
+#define set_xen_guest_handle(hnd, val) do { (hnd) = val; } while (0)
+#endif
+#endif
+
+#ifndef __ASSEMBLY__
+/* Explicitly size integers that represent pfns in the public interface
+ * with Xen so that on ARM we can have one ABI that works for 32 and 64
+ * bit guests. */
+typedef unsigned long xen_pfn_t;
+#define PRI_xen_pfn "lx"
+typedef unsigned long xen_ulong_t;
+#define PRI_xen_ulong "lx"
+typedef long xen_long_t;
+#define PRI_xen_long "lx"
+
+/* Guest handles for primitive C types. */
+__DEFINE_GUEST_HANDLE(uchar, unsigned char);
+__DEFINE_GUEST_HANDLE(uint, unsigned int);
+DEFINE_GUEST_HANDLE(char);
+DEFINE_GUEST_HANDLE(int);
+DEFINE_GUEST_HANDLE(void);
+DEFINE_GUEST_HANDLE(uint64_t);
+DEFINE_GUEST_HANDLE(uint32_t);
+DEFINE_GUEST_HANDLE(xen_pfn_t);
+DEFINE_GUEST_HANDLE(xen_ulong_t);
+#endif
+
+#ifndef HYPERVISOR_VIRT_START
+#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
+#endif
+
+#define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START)
+#define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END)
+#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>__MACH2PHYS_SHIFT)
+
+/* Maximum number of virtual CPUs in multi-processor guests. */
+#define MAX_VIRT_CPUS 32
+
+/*
+ * SEGMENT DESCRIPTOR TABLES
+ */
+/*
+ * A number of GDT entries are reserved by Xen. These are not situated at the
+ * start of the GDT because some stupid OSes export hard-coded selector values
+ * in their ABI. These hard-coded values are always near the start of the GDT,
+ * so Xen places itself out of the way, at the far end of the GDT.
+ */
+#define FIRST_RESERVED_GDT_PAGE 14
+#define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096)
+#define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8)
+
+/*
+ * Send an array of these to HYPERVISOR_set_trap_table()
+ * The privilege level specifies which modes may enter a trap via a software
+ * interrupt. On x86/64, since rings 1 and 2 are unavailable, we allocate
+ * privilege levels as follows:
+ * Level == 0: No one may enter
+ * Level == 1: Kernel may enter
+ * Level == 2: Kernel may enter
+ * Level == 3: Everyone may enter
+ */
+#define TI_GET_DPL(_ti) ((_ti)->flags & 3)
+#define TI_GET_IF(_ti) ((_ti)->flags & 4)
+#define TI_SET_DPL(_ti, _dpl) ((_ti)->flags |= (_dpl))
+#define TI_SET_IF(_ti, _if) ((_ti)->flags |= ((!!(_if))<<2))
+
+#ifndef __ASSEMBLY__
+struct trap_info {
+ uint8_t vector; /* exception vector */
+ uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */
+ uint16_t cs; /* code selector */
+ unsigned long address; /* code offset */
+};
+DEFINE_GUEST_HANDLE_STRUCT(trap_info);
+
+struct arch_shared_info {
+ unsigned long max_pfn; /* max pfn that appears in table */
+ /* Frame containing list of mfns containing list of mfns containing p2m. */
+ unsigned long pfn_to_mfn_frame_list_list;
+ unsigned long nmi_reason;
+};
+#endif /* !__ASSEMBLY__ */
+
+#ifdef CONFIG_X86_32
+#include <asm/xen/interface_32.h>
+#else
+#include <asm/xen/interface_64.h>
+#endif
+
+#include <asm/pvclock-abi.h>
+
+#ifndef __ASSEMBLY__
+/*
+ * The following is all CPU context. Note that the fpu_ctxt block is filled
+ * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
+ */
+struct vcpu_guest_context {
+ /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */
+ struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */
+#define VGCF_I387_VALID (1<<0)
+#define VGCF_HVM_GUEST (1<<1)
+#define VGCF_IN_KERNEL (1<<2)
+ unsigned long flags; /* VGCF_* flags */
+ struct cpu_user_regs user_regs; /* User-level CPU registers */
+ struct trap_info trap_ctxt[256]; /* Virtual IDT */
+ unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */
+ unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
+ unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */
+ /* NB. User pagetable on x86/64 is placed in ctrlreg[1]. */
+ unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */
+ unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */
+#ifdef __i386__
+ unsigned long event_callback_cs; /* CS:EIP of event callback */
+ unsigned long event_callback_eip;
+ unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */
+ unsigned long failsafe_callback_eip;
+#else
+ unsigned long event_callback_eip;
+ unsigned long failsafe_callback_eip;
+ unsigned long syscall_callback_eip;
+#endif
+ unsigned long vm_assist; /* VMASST_TYPE_* bitmap */
+#ifdef __x86_64__
+ /* Segment base addresses. */
+ uint64_t fs_base;
+ uint64_t gs_base_kernel;
+ uint64_t gs_base_user;
+#endif
+};
+DEFINE_GUEST_HANDLE_STRUCT(vcpu_guest_context);
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * Prefix forces emulation of some non-trapping instructions.
+ * Currently only CPUID.
+ */
+#ifdef __ASSEMBLY__
+#define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ;
+#define XEN_CPUID XEN_EMULATE_PREFIX cpuid
+#else
+#define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; "
+#define XEN_CPUID XEN_EMULATE_PREFIX "cpuid"
+#endif
+
+#endif /* _ASM_X86_XEN_INTERFACE_H */
diff --git a/kernel/arch/x86/include/asm/xen/interface_32.h b/kernel/arch/x86/include/asm/xen/interface_32.h
new file mode 100644
index 000000000..8413688b2
--- /dev/null
+++ b/kernel/arch/x86/include/asm/xen/interface_32.h
@@ -0,0 +1,102 @@
+/******************************************************************************
+ * arch-x86_32.h
+ *
+ * Guest OS interface to x86 32-bit Xen.
+ *
+ * Copyright (c) 2004, K A Fraser
+ */
+
+#ifndef _ASM_X86_XEN_INTERFACE_32_H
+#define _ASM_X86_XEN_INTERFACE_32_H
+
+
+/*
+ * These flat segments are in the Xen-private section of every GDT. Since these
+ * are also present in the initial GDT, many OSes will be able to avoid
+ * installing their own GDT.
+ */
+#define FLAT_RING1_CS 0xe019 /* GDT index 259 */
+#define FLAT_RING1_DS 0xe021 /* GDT index 260 */
+#define FLAT_RING1_SS 0xe021 /* GDT index 260 */
+#define FLAT_RING3_CS 0xe02b /* GDT index 261 */
+#define FLAT_RING3_DS 0xe033 /* GDT index 262 */
+#define FLAT_RING3_SS 0xe033 /* GDT index 262 */
+
+#define FLAT_KERNEL_CS FLAT_RING1_CS
+#define FLAT_KERNEL_DS FLAT_RING1_DS
+#define FLAT_KERNEL_SS FLAT_RING1_SS
+#define FLAT_USER_CS FLAT_RING3_CS
+#define FLAT_USER_DS FLAT_RING3_DS
+#define FLAT_USER_SS FLAT_RING3_SS
+
+/* And the trap vector is... */
+#define TRAP_INSTR "int $0x82"
+
+#define __MACH2PHYS_VIRT_START 0xF5800000
+#define __MACH2PHYS_VIRT_END 0xF6800000
+
+#define __MACH2PHYS_SHIFT 2
+
+/*
+ * Virtual addresses beyond this are not modifiable by guest OSes. The
+ * machine->physical mapping table starts at this address, read-only.
+ */
+#define __HYPERVISOR_VIRT_START 0xF5800000
+
+#ifndef __ASSEMBLY__
+
+struct cpu_user_regs {
+ uint32_t ebx;
+ uint32_t ecx;
+ uint32_t edx;
+ uint32_t esi;
+ uint32_t edi;
+ uint32_t ebp;
+ uint32_t eax;
+ uint16_t error_code; /* private */
+ uint16_t entry_vector; /* private */
+ uint32_t eip;
+ uint16_t cs;
+ uint8_t saved_upcall_mask;
+ uint8_t _pad0;
+ uint32_t eflags; /* eflags.IF == !saved_upcall_mask */
+ uint32_t esp;
+ uint16_t ss, _pad1;
+ uint16_t es, _pad2;
+ uint16_t ds, _pad3;
+ uint16_t fs, _pad4;
+ uint16_t gs, _pad5;
+};
+DEFINE_GUEST_HANDLE_STRUCT(cpu_user_regs);
+
+typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */
+
+struct arch_vcpu_info {
+ unsigned long cr2;
+ unsigned long pad[5]; /* sizeof(struct vcpu_info) == 64 */
+};
+
+struct xen_callback {
+ unsigned long cs;
+ unsigned long eip;
+};
+typedef struct xen_callback xen_callback_t;
+
+#define XEN_CALLBACK(__cs, __eip) \
+ ((struct xen_callback){ .cs = (__cs), .eip = (unsigned long)(__eip) })
+#endif /* !__ASSEMBLY__ */
+
+
+/*
+ * Page-directory addresses above 4GB do not fit into architectural %cr3.
+ * When accessing %cr3, or equivalent field in vcpu_guest_context, guests
+ * must use the following accessor macros to pack/unpack valid MFNs.
+ *
+ * Note that Xen is using the fact that the pagetable base is always
+ * page-aligned, and putting the 12 MSB of the address into the 12 LSB
+ * of cr3.
+ */
+#define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20))
+#define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20))
+
+#endif /* _ASM_X86_XEN_INTERFACE_32_H */
diff --git a/kernel/arch/x86/include/asm/xen/interface_64.h b/kernel/arch/x86/include/asm/xen/interface_64.h
new file mode 100644
index 000000000..839a4811c
--- /dev/null
+++ b/kernel/arch/x86/include/asm/xen/interface_64.h
@@ -0,0 +1,148 @@
+#ifndef _ASM_X86_XEN_INTERFACE_64_H
+#define _ASM_X86_XEN_INTERFACE_64_H
+
+/*
+ * 64-bit segment selectors
+ * These flat segments are in the Xen-private section of every GDT. Since these
+ * are also present in the initial GDT, many OSes will be able to avoid
+ * installing their own GDT.
+ */
+
+#define FLAT_RING3_CS32 0xe023 /* GDT index 260 */
+#define FLAT_RING3_CS64 0xe033 /* GDT index 261 */
+#define FLAT_RING3_DS32 0xe02b /* GDT index 262 */
+#define FLAT_RING3_DS64 0x0000 /* NULL selector */
+#define FLAT_RING3_SS32 0xe02b /* GDT index 262 */
+#define FLAT_RING3_SS64 0xe02b /* GDT index 262 */
+
+#define FLAT_KERNEL_DS64 FLAT_RING3_DS64
+#define FLAT_KERNEL_DS32 FLAT_RING3_DS32
+#define FLAT_KERNEL_DS FLAT_KERNEL_DS64
+#define FLAT_KERNEL_CS64 FLAT_RING3_CS64
+#define FLAT_KERNEL_CS32 FLAT_RING3_CS32
+#define FLAT_KERNEL_CS FLAT_KERNEL_CS64
+#define FLAT_KERNEL_SS64 FLAT_RING3_SS64
+#define FLAT_KERNEL_SS32 FLAT_RING3_SS32
+#define FLAT_KERNEL_SS FLAT_KERNEL_SS64
+
+#define FLAT_USER_DS64 FLAT_RING3_DS64
+#define FLAT_USER_DS32 FLAT_RING3_DS32
+#define FLAT_USER_DS FLAT_USER_DS64
+#define FLAT_USER_CS64 FLAT_RING3_CS64
+#define FLAT_USER_CS32 FLAT_RING3_CS32
+#define FLAT_USER_CS FLAT_USER_CS64
+#define FLAT_USER_SS64 FLAT_RING3_SS64
+#define FLAT_USER_SS32 FLAT_RING3_SS32
+#define FLAT_USER_SS FLAT_USER_SS64
+
+#define __HYPERVISOR_VIRT_START 0xFFFF800000000000
+#define __HYPERVISOR_VIRT_END 0xFFFF880000000000
+#define __MACH2PHYS_VIRT_START 0xFFFF800000000000
+#define __MACH2PHYS_VIRT_END 0xFFFF804000000000
+#define __MACH2PHYS_SHIFT 3
+
+/*
+ * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base)
+ * @which == SEGBASE_* ; @base == 64-bit base address
+ * Returns 0 on success.
+ */
+#define SEGBASE_FS 0
+#define SEGBASE_GS_USER 1
+#define SEGBASE_GS_KERNEL 2
+#define SEGBASE_GS_USER_SEL 3 /* Set user %gs specified in base[15:0] */
+
+/*
+ * int HYPERVISOR_iret(void)
+ * All arguments are on the kernel stack, in the following format.
+ * Never returns if successful. Current kernel context is lost.
+ * The saved CS is mapped as follows:
+ * RING0 -> RING3 kernel mode.
+ * RING1 -> RING3 kernel mode.
+ * RING2 -> RING3 kernel mode.
+ * RING3 -> RING3 user mode.
+ * However RING0 indicates that the guest kernel should return to iteself
+ * directly with
+ * orb $3,1*8(%rsp)
+ * iretq
+ * If flags contains VGCF_in_syscall:
+ * Restore RAX, RIP, RFLAGS, RSP.
+ * Discard R11, RCX, CS, SS.
+ * Otherwise:
+ * Restore RAX, R11, RCX, CS:RIP, RFLAGS, SS:RSP.
+ * All other registers are saved on hypercall entry and restored to user.
+ */
+/* Guest exited in SYSCALL context? Return to guest with SYSRET? */
+#define _VGCF_in_syscall 8
+#define VGCF_in_syscall (1<<_VGCF_in_syscall)
+#define VGCF_IN_SYSCALL VGCF_in_syscall
+
+#ifndef __ASSEMBLY__
+
+struct iret_context {
+ /* Top of stack (%rsp at point of hypercall). */
+ uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
+ /* Bottom of iret stack frame. */
+};
+
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+/* Anonymous union includes both 32- and 64-bit names (e.g., eax/rax). */
+#define __DECL_REG(name) union { \
+ uint64_t r ## name, e ## name; \
+ uint32_t _e ## name; \
+}
+#else
+/* Non-gcc sources must always use the proper 64-bit name (e.g., rax). */
+#define __DECL_REG(name) uint64_t r ## name
+#endif
+
+struct cpu_user_regs {
+ uint64_t r15;
+ uint64_t r14;
+ uint64_t r13;
+ uint64_t r12;
+ __DECL_REG(bp);
+ __DECL_REG(bx);
+ uint64_t r11;
+ uint64_t r10;
+ uint64_t r9;
+ uint64_t r8;
+ __DECL_REG(ax);
+ __DECL_REG(cx);
+ __DECL_REG(dx);
+ __DECL_REG(si);
+ __DECL_REG(di);
+ uint32_t error_code; /* private */
+ uint32_t entry_vector; /* private */
+ __DECL_REG(ip);
+ uint16_t cs, _pad0[1];
+ uint8_t saved_upcall_mask;
+ uint8_t _pad1[3];
+ __DECL_REG(flags); /* rflags.IF == !saved_upcall_mask */
+ __DECL_REG(sp);
+ uint16_t ss, _pad2[3];
+ uint16_t es, _pad3[3];
+ uint16_t ds, _pad4[3];
+ uint16_t fs, _pad5[3]; /* Non-zero => takes precedence over fs_base. */
+ uint16_t gs, _pad6[3]; /* Non-zero => takes precedence over gs_base_usr. */
+};
+DEFINE_GUEST_HANDLE_STRUCT(cpu_user_regs);
+
+#undef __DECL_REG
+
+#define xen_pfn_to_cr3(pfn) ((unsigned long)(pfn) << 12)
+#define xen_cr3_to_pfn(cr3) ((unsigned long)(cr3) >> 12)
+
+struct arch_vcpu_info {
+ unsigned long cr2;
+ unsigned long pad; /* sizeof(vcpu_info_t) == 64 */
+};
+
+typedef unsigned long xen_callback_t;
+
+#define XEN_CALLBACK(__cs, __rip) \
+ ((unsigned long)(__rip))
+
+#endif /* !__ASSEMBLY__ */
+
+
+#endif /* _ASM_X86_XEN_INTERFACE_64_H */
diff --git a/kernel/arch/x86/include/asm/xen/page-coherent.h b/kernel/arch/x86/include/asm/xen/page-coherent.h
new file mode 100644
index 000000000..acd844c01
--- /dev/null
+++ b/kernel/arch/x86/include/asm/xen/page-coherent.h
@@ -0,0 +1,38 @@
+#ifndef _ASM_X86_XEN_PAGE_COHERENT_H
+#define _ASM_X86_XEN_PAGE_COHERENT_H
+
+#include <asm/page.h>
+#include <linux/dma-attrs.h>
+#include <linux/dma-mapping.h>
+
+static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags,
+ struct dma_attrs *attrs)
+{
+ void *vstart = (void*)__get_free_pages(flags, get_order(size));
+ *dma_handle = virt_to_phys(vstart);
+ return vstart;
+}
+
+static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
+{
+ free_pages((unsigned long) cpu_addr, get_order(size));
+}
+
+static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
+ dma_addr_t dev_addr, unsigned long offset, size_t size,
+ enum dma_data_direction dir, struct dma_attrs *attrs) { }
+
+static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs) { }
+
+static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir) { }
+
+static inline void xen_dma_sync_single_for_device(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir) { }
+
+#endif /* _ASM_X86_XEN_PAGE_COHERENT_H */
diff --git a/kernel/arch/x86/include/asm/xen/page.h b/kernel/arch/x86/include/asm/xen/page.h
new file mode 100644
index 000000000..c44a5d53e
--- /dev/null
+++ b/kernel/arch/x86/include/asm/xen/page.h
@@ -0,0 +1,277 @@
+#ifndef _ASM_X86_XEN_PAGE_H
+#define _ASM_X86_XEN_PAGE_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/pfn.h>
+#include <linux/mm.h>
+
+#include <asm/uaccess.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+#include <xen/interface/xen.h>
+#include <xen/grant_table.h>
+#include <xen/features.h>
+
+/* Xen machine address */
+typedef struct xmaddr {
+ phys_addr_t maddr;
+} xmaddr_t;
+
+/* Xen pseudo-physical address */
+typedef struct xpaddr {
+ phys_addr_t paddr;
+} xpaddr_t;
+
+#define XMADDR(x) ((xmaddr_t) { .maddr = (x) })
+#define XPADDR(x) ((xpaddr_t) { .paddr = (x) })
+
+/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
+#define INVALID_P2M_ENTRY (~0UL)
+#define FOREIGN_FRAME_BIT (1UL<<(BITS_PER_LONG-1))
+#define IDENTITY_FRAME_BIT (1UL<<(BITS_PER_LONG-2))
+#define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT)
+#define IDENTITY_FRAME(m) ((m) | IDENTITY_FRAME_BIT)
+
+/* Maximum amount of memory we can handle in a domain in pages */
+#define MAX_DOMAIN_PAGES \
+ ((unsigned long)((u64)CONFIG_XEN_MAX_DOMAIN_MEMORY * 1024 * 1024 * 1024 / PAGE_SIZE))
+
+extern unsigned long *machine_to_phys_mapping;
+extern unsigned long machine_to_phys_nr;
+extern unsigned long *xen_p2m_addr;
+extern unsigned long xen_p2m_size;
+extern unsigned long xen_max_p2m_pfn;
+
+extern unsigned long get_phys_to_machine(unsigned long pfn);
+extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
+extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
+extern unsigned long set_phys_range_identity(unsigned long pfn_s,
+ unsigned long pfn_e);
+
+extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
+ struct gnttab_map_grant_ref *kmap_ops,
+ struct page **pages, unsigned int count);
+extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
+ struct gnttab_unmap_grant_ref *kunmap_ops,
+ struct page **pages, unsigned int count);
+
+/*
+ * Helper functions to write or read unsigned long values to/from
+ * memory, when the access may fault.
+ */
+static inline int xen_safe_write_ulong(unsigned long *addr, unsigned long val)
+{
+ return __put_user(val, (unsigned long __user *)addr);
+}
+
+static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val)
+{
+ return __get_user(*val, (unsigned long __user *)addr);
+}
+
+/*
+ * When to use pfn_to_mfn(), __pfn_to_mfn() or get_phys_to_machine():
+ * - pfn_to_mfn() returns either INVALID_P2M_ENTRY or the mfn. No indicator
+ * bits (identity or foreign) are set.
+ * - __pfn_to_mfn() returns the found entry of the p2m table. A possibly set
+ * identity or foreign indicator will be still set. __pfn_to_mfn() is
+ * encapsulating get_phys_to_machine() which is called in special cases only.
+ * - get_phys_to_machine() is to be called by __pfn_to_mfn() only in special
+ * cases needing an extended handling.
+ */
+static inline unsigned long __pfn_to_mfn(unsigned long pfn)
+{
+ unsigned long mfn;
+
+ if (pfn < xen_p2m_size)
+ mfn = xen_p2m_addr[pfn];
+ else if (unlikely(pfn < xen_max_p2m_pfn))
+ return get_phys_to_machine(pfn);
+ else
+ return IDENTITY_FRAME(pfn);
+
+ if (unlikely(mfn == INVALID_P2M_ENTRY))
+ return get_phys_to_machine(pfn);
+
+ return mfn;
+}
+
+static inline unsigned long pfn_to_mfn(unsigned long pfn)
+{
+ unsigned long mfn;
+
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return pfn;
+
+ mfn = __pfn_to_mfn(pfn);
+
+ if (mfn != INVALID_P2M_ENTRY)
+ mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
+
+ return mfn;
+}
+
+static inline int phys_to_machine_mapping_valid(unsigned long pfn)
+{
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return 1;
+
+ return __pfn_to_mfn(pfn) != INVALID_P2M_ENTRY;
+}
+
+static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn)
+{
+ unsigned long pfn;
+ int ret;
+
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return mfn;
+
+ if (unlikely(mfn >= machine_to_phys_nr))
+ return ~0;
+
+ /*
+ * The array access can fail (e.g., device space beyond end of RAM).
+ * In such cases it doesn't matter what we return (we return garbage),
+ * but we must handle the fault without crashing!
+ */
+ ret = xen_safe_read_ulong(&machine_to_phys_mapping[mfn], &pfn);
+ if (ret < 0)
+ return ~0;
+
+ return pfn;
+}
+
+static inline unsigned long mfn_to_pfn(unsigned long mfn)
+{
+ unsigned long pfn;
+
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return mfn;
+
+ pfn = mfn_to_pfn_no_overrides(mfn);
+ if (__pfn_to_mfn(pfn) != mfn)
+ pfn = ~0;
+
+ /*
+ * pfn is ~0 if there are no entries in the m2p for mfn or the
+ * entry doesn't map back to the mfn.
+ */
+ if (pfn == ~0 && __pfn_to_mfn(mfn) == IDENTITY_FRAME(mfn))
+ pfn = mfn;
+
+ return pfn;
+}
+
+static inline xmaddr_t phys_to_machine(xpaddr_t phys)
+{
+ unsigned offset = phys.paddr & ~PAGE_MASK;
+ return XMADDR(PFN_PHYS(pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset);
+}
+
+static inline xpaddr_t machine_to_phys(xmaddr_t machine)
+{
+ unsigned offset = machine.maddr & ~PAGE_MASK;
+ return XPADDR(PFN_PHYS(mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset);
+}
+
+/*
+ * We detect special mappings in one of two ways:
+ * 1. If the MFN is an I/O page then Xen will set the m2p entry
+ * to be outside our maximum possible pseudophys range.
+ * 2. If the MFN belongs to a different domain then we will certainly
+ * not have MFN in our p2m table. Conversely, if the page is ours,
+ * then we'll have p2m(m2p(MFN))==MFN.
+ * If we detect a special mapping then it doesn't have a 'struct page'.
+ * We force !pfn_valid() by returning an out-of-range pointer.
+ *
+ * NB. These checks require that, for any MFN that is not in our reservation,
+ * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
+ * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
+ * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
+ *
+ * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
+ * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
+ * require. In all the cases we care about, the FOREIGN_FRAME bit is
+ * masked (e.g., pfn_to_mfn()) so behaviour there is correct.
+ */
+static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
+{
+ unsigned long pfn;
+
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return mfn;
+
+ pfn = mfn_to_pfn(mfn);
+ if (__pfn_to_mfn(pfn) != mfn)
+ return -1; /* force !pfn_valid() */
+ return pfn;
+}
+
+/* VIRT <-> MACHINE conversion */
+#define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v))))
+#define virt_to_pfn(v) (PFN_DOWN(__pa(v)))
+#define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v)))
+#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
+
+static inline unsigned long pte_mfn(pte_t pte)
+{
+ return (pte.pte & PTE_PFN_MASK) >> PAGE_SHIFT;
+}
+
+static inline pte_t mfn_pte(unsigned long page_nr, pgprot_t pgprot)
+{
+ pte_t pte;
+
+ pte.pte = ((phys_addr_t)page_nr << PAGE_SHIFT) |
+ massage_pgprot(pgprot);
+
+ return pte;
+}
+
+static inline pteval_t pte_val_ma(pte_t pte)
+{
+ return pte.pte;
+}
+
+static inline pte_t __pte_ma(pteval_t x)
+{
+ return (pte_t) { .pte = x };
+}
+
+#define pmd_val_ma(v) ((v).pmd)
+#ifdef __PAGETABLE_PUD_FOLDED
+#define pud_val_ma(v) ((v).pgd.pgd)
+#else
+#define pud_val_ma(v) ((v).pud)
+#endif
+#define __pmd_ma(x) ((pmd_t) { (x) } )
+
+#define pgd_val_ma(x) ((x).pgd)
+
+void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid);
+
+xmaddr_t arbitrary_virt_to_machine(void *address);
+unsigned long arbitrary_virt_to_mfn(void *vaddr);
+void make_lowmem_page_readonly(void *vaddr);
+void make_lowmem_page_readwrite(void *vaddr);
+
+#define xen_remap(cookie, size) ioremap((cookie), (size));
+#define xen_unmap(cookie) iounmap((cookie))
+
+static inline bool xen_arch_need_swiotlb(struct device *dev,
+ unsigned long pfn,
+ unsigned long mfn)
+{
+ return false;
+}
+
+static inline unsigned long xen_get_swiotlb_free_pages(unsigned int order)
+{
+ return __get_free_pages(__GFP_NOWARN, order);
+}
+
+#endif /* _ASM_X86_XEN_PAGE_H */
diff --git a/kernel/arch/x86/include/asm/xen/pci.h b/kernel/arch/x86/include/asm/xen/pci.h
new file mode 100644
index 000000000..968d57dd5
--- /dev/null
+++ b/kernel/arch/x86/include/asm/xen/pci.h
@@ -0,0 +1,82 @@
+#ifndef _ASM_X86_XEN_PCI_H
+#define _ASM_X86_XEN_PCI_H
+
+#if defined(CONFIG_PCI_XEN)
+extern int __init pci_xen_init(void);
+extern int __init pci_xen_hvm_init(void);
+#define pci_xen 1
+#else
+#define pci_xen 0
+#define pci_xen_init (0)
+static inline int pci_xen_hvm_init(void)
+{
+ return -1;
+}
+#endif
+#if defined(CONFIG_XEN_DOM0)
+int __init pci_xen_initial_domain(void);
+int xen_find_device_domain_owner(struct pci_dev *dev);
+int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain);
+int xen_unregister_device_domain_owner(struct pci_dev *dev);
+#else
+static inline int __init pci_xen_initial_domain(void)
+{
+ return -1;
+}
+static inline int xen_find_device_domain_owner(struct pci_dev *dev)
+{
+ return -1;
+}
+static inline int xen_register_device_domain_owner(struct pci_dev *dev,
+ uint16_t domain)
+{
+ return -1;
+}
+static inline int xen_unregister_device_domain_owner(struct pci_dev *dev)
+{
+ return -1;
+}
+#endif
+
+#if defined(CONFIG_PCI_MSI)
+#if defined(CONFIG_PCI_XEN)
+/* The drivers/pci/xen-pcifront.c sets this structure to
+ * its own functions.
+ */
+struct xen_pci_frontend_ops {
+ int (*enable_msi)(struct pci_dev *dev, int vectors[]);
+ void (*disable_msi)(struct pci_dev *dev);
+ int (*enable_msix)(struct pci_dev *dev, int vectors[], int nvec);
+ void (*disable_msix)(struct pci_dev *dev);
+};
+
+extern struct xen_pci_frontend_ops *xen_pci_frontend;
+
+static inline int xen_pci_frontend_enable_msi(struct pci_dev *dev,
+ int vectors[])
+{
+ if (xen_pci_frontend && xen_pci_frontend->enable_msi)
+ return xen_pci_frontend->enable_msi(dev, vectors);
+ return -ENODEV;
+}
+static inline void xen_pci_frontend_disable_msi(struct pci_dev *dev)
+{
+ if (xen_pci_frontend && xen_pci_frontend->disable_msi)
+ xen_pci_frontend->disable_msi(dev);
+}
+static inline int xen_pci_frontend_enable_msix(struct pci_dev *dev,
+ int vectors[], int nvec)
+{
+ if (xen_pci_frontend && xen_pci_frontend->enable_msix)
+ return xen_pci_frontend->enable_msix(dev, vectors, nvec);
+ return -ENODEV;
+}
+static inline void xen_pci_frontend_disable_msix(struct pci_dev *dev)
+{
+ if (xen_pci_frontend && xen_pci_frontend->disable_msix)
+ xen_pci_frontend->disable_msix(dev);
+}
+#endif /* CONFIG_PCI_XEN */
+#endif /* CONFIG_PCI_MSI */
+
+#endif /* _ASM_X86_XEN_PCI_H */
diff --git a/kernel/arch/x86/include/asm/xen/swiotlb-xen.h b/kernel/arch/x86/include/asm/xen/swiotlb-xen.h
new file mode 100644
index 000000000..ee52fcac6
--- /dev/null
+++ b/kernel/arch/x86/include/asm/xen/swiotlb-xen.h
@@ -0,0 +1,16 @@
+#ifndef _ASM_X86_SWIOTLB_XEN_H
+#define _ASM_X86_SWIOTLB_XEN_H
+
+#ifdef CONFIG_SWIOTLB_XEN
+extern int xen_swiotlb;
+extern int __init pci_xen_swiotlb_detect(void);
+extern void __init pci_xen_swiotlb_init(void);
+extern int pci_xen_swiotlb_init_late(void);
+#else
+#define xen_swiotlb (0)
+static inline int __init pci_xen_swiotlb_detect(void) { return 0; }
+static inline void __init pci_xen_swiotlb_init(void) { }
+static inline int pci_xen_swiotlb_init_late(void) { return -ENXIO; }
+#endif
+
+#endif /* _ASM_X86_SWIOTLB_XEN_H */
diff --git a/kernel/arch/x86/include/asm/xen/trace_types.h b/kernel/arch/x86/include/asm/xen/trace_types.h
new file mode 100644
index 000000000..21e1874c0
--- /dev/null
+++ b/kernel/arch/x86/include/asm/xen/trace_types.h
@@ -0,0 +1,18 @@
+#ifndef _ASM_XEN_TRACE_TYPES_H
+#define _ASM_XEN_TRACE_TYPES_H
+
+enum xen_mc_flush_reason {
+ XEN_MC_FL_NONE, /* explicit flush */
+ XEN_MC_FL_BATCH, /* out of hypercall space */
+ XEN_MC_FL_ARGS, /* out of argument space */
+ XEN_MC_FL_CALLBACK, /* out of callback space */
+};
+
+enum xen_mc_extend_args {
+ XEN_MC_XE_OK,
+ XEN_MC_XE_BAD_OP,
+ XEN_MC_XE_NO_SPACE
+};
+typedef void (*xen_mc_callback_fn_t)(void *);
+
+#endif /* _ASM_XEN_TRACE_TYPES_H */
diff --git a/kernel/arch/x86/include/asm/xor.h b/kernel/arch/x86/include/asm/xor.h
new file mode 100644
index 000000000..d8829751b
--- /dev/null
+++ b/kernel/arch/x86/include/asm/xor.h
@@ -0,0 +1,499 @@
+#ifdef CONFIG_KMEMCHECK
+/* kmemcheck doesn't handle MMX/SSE/SSE2 instructions */
+# include <asm-generic/xor.h>
+#elif !defined(_ASM_X86_XOR_H)
+#define _ASM_X86_XOR_H
+
+/*
+ * Optimized RAID-5 checksumming functions for SSE.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * You should have received a copy of the GNU General Public License
+ * (for example /usr/src/linux/COPYING); if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * Cache avoiding checksumming functions utilizing KNI instructions
+ * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
+ */
+
+/*
+ * Based on
+ * High-speed RAID5 checksumming functions utilizing SSE instructions.
+ * Copyright (C) 1998 Ingo Molnar.
+ */
+
+/*
+ * x86-64 changes / gcc fixes from Andi Kleen.
+ * Copyright 2002 Andi Kleen, SuSE Labs.
+ *
+ * This hasn't been optimized for the hammer yet, but there are likely
+ * no advantages to be gotten from x86-64 here anyways.
+ */
+
+#include <asm/i387.h>
+
+#ifdef CONFIG_X86_32
+/* reduce register pressure */
+# define XOR_CONSTANT_CONSTRAINT "i"
+#else
+# define XOR_CONSTANT_CONSTRAINT "re"
+#endif
+
+#define OFFS(x) "16*("#x")"
+#define PF_OFFS(x) "256+16*("#x")"
+#define PF0(x) " prefetchnta "PF_OFFS(x)"(%[p1]) ;\n"
+#define LD(x, y) " movaps "OFFS(x)"(%[p1]), %%xmm"#y" ;\n"
+#define ST(x, y) " movaps %%xmm"#y", "OFFS(x)"(%[p1]) ;\n"
+#define PF1(x) " prefetchnta "PF_OFFS(x)"(%[p2]) ;\n"
+#define PF2(x) " prefetchnta "PF_OFFS(x)"(%[p3]) ;\n"
+#define PF3(x) " prefetchnta "PF_OFFS(x)"(%[p4]) ;\n"
+#define PF4(x) " prefetchnta "PF_OFFS(x)"(%[p5]) ;\n"
+#define XO1(x, y) " xorps "OFFS(x)"(%[p2]), %%xmm"#y" ;\n"
+#define XO2(x, y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n"
+#define XO3(x, y) " xorps "OFFS(x)"(%[p4]), %%xmm"#y" ;\n"
+#define XO4(x, y) " xorps "OFFS(x)"(%[p5]), %%xmm"#y" ;\n"
+#define NOP(x)
+
+#define BLK64(pf, op, i) \
+ pf(i) \
+ op(i, 0) \
+ op(i + 1, 1) \
+ op(i + 2, 2) \
+ op(i + 3, 3)
+
+static void
+xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+ unsigned long lines = bytes >> 8;
+
+ kernel_fpu_begin();
+
+ asm volatile(
+#undef BLOCK
+#define BLOCK(i) \
+ LD(i, 0) \
+ LD(i + 1, 1) \
+ PF1(i) \
+ PF1(i + 2) \
+ LD(i + 2, 2) \
+ LD(i + 3, 3) \
+ PF0(i + 4) \
+ PF0(i + 6) \
+ XO1(i, 0) \
+ XO1(i + 1, 1) \
+ XO1(i + 2, 2) \
+ XO1(i + 3, 3) \
+ ST(i, 0) \
+ ST(i + 1, 1) \
+ ST(i + 2, 2) \
+ ST(i + 3, 3) \
+
+
+ PF0(0)
+ PF0(2)
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " add %[inc], %[p1] ;\n"
+ " add %[inc], %[p2] ;\n"
+ " dec %[cnt] ;\n"
+ " jnz 1b ;\n"
+ : [cnt] "+r" (lines),
+ [p1] "+r" (p1), [p2] "+r" (p2)
+ : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
+ : "memory");
+
+ kernel_fpu_end();
+}
+
+static void
+xor_sse_2_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+ unsigned long lines = bytes >> 8;
+
+ kernel_fpu_begin();
+
+ asm volatile(
+#undef BLOCK
+#define BLOCK(i) \
+ BLK64(PF0, LD, i) \
+ BLK64(PF1, XO1, i) \
+ BLK64(NOP, ST, i) \
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " add %[inc], %[p1] ;\n"
+ " add %[inc], %[p2] ;\n"
+ " dec %[cnt] ;\n"
+ " jnz 1b ;\n"
+ : [cnt] "+r" (lines),
+ [p1] "+r" (p1), [p2] "+r" (p2)
+ : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
+ : "memory");
+
+ kernel_fpu_end();
+}
+
+static void
+xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3)
+{
+ unsigned long lines = bytes >> 8;
+
+ kernel_fpu_begin();
+
+ asm volatile(
+#undef BLOCK
+#define BLOCK(i) \
+ PF1(i) \
+ PF1(i + 2) \
+ LD(i, 0) \
+ LD(i + 1, 1) \
+ LD(i + 2, 2) \
+ LD(i + 3, 3) \
+ PF2(i) \
+ PF2(i + 2) \
+ PF0(i + 4) \
+ PF0(i + 6) \
+ XO1(i, 0) \
+ XO1(i + 1, 1) \
+ XO1(i + 2, 2) \
+ XO1(i + 3, 3) \
+ XO2(i, 0) \
+ XO2(i + 1, 1) \
+ XO2(i + 2, 2) \
+ XO2(i + 3, 3) \
+ ST(i, 0) \
+ ST(i + 1, 1) \
+ ST(i + 2, 2) \
+ ST(i + 3, 3) \
+
+
+ PF0(0)
+ PF0(2)
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " add %[inc], %[p1] ;\n"
+ " add %[inc], %[p2] ;\n"
+ " add %[inc], %[p3] ;\n"
+ " dec %[cnt] ;\n"
+ " jnz 1b ;\n"
+ : [cnt] "+r" (lines),
+ [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
+ : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
+ : "memory");
+
+ kernel_fpu_end();
+}
+
+static void
+xor_sse_3_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3)
+{
+ unsigned long lines = bytes >> 8;
+
+ kernel_fpu_begin();
+
+ asm volatile(
+#undef BLOCK
+#define BLOCK(i) \
+ BLK64(PF0, LD, i) \
+ BLK64(PF1, XO1, i) \
+ BLK64(PF2, XO2, i) \
+ BLK64(NOP, ST, i) \
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " add %[inc], %[p1] ;\n"
+ " add %[inc], %[p2] ;\n"
+ " add %[inc], %[p3] ;\n"
+ " dec %[cnt] ;\n"
+ " jnz 1b ;\n"
+ : [cnt] "+r" (lines),
+ [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
+ : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
+ : "memory");
+
+ kernel_fpu_end();
+}
+
+static void
+xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4)
+{
+ unsigned long lines = bytes >> 8;
+
+ kernel_fpu_begin();
+
+ asm volatile(
+#undef BLOCK
+#define BLOCK(i) \
+ PF1(i) \
+ PF1(i + 2) \
+ LD(i, 0) \
+ LD(i + 1, 1) \
+ LD(i + 2, 2) \
+ LD(i + 3, 3) \
+ PF2(i) \
+ PF2(i + 2) \
+ XO1(i, 0) \
+ XO1(i + 1, 1) \
+ XO1(i + 2, 2) \
+ XO1(i + 3, 3) \
+ PF3(i) \
+ PF3(i + 2) \
+ PF0(i + 4) \
+ PF0(i + 6) \
+ XO2(i, 0) \
+ XO2(i + 1, 1) \
+ XO2(i + 2, 2) \
+ XO2(i + 3, 3) \
+ XO3(i, 0) \
+ XO3(i + 1, 1) \
+ XO3(i + 2, 2) \
+ XO3(i + 3, 3) \
+ ST(i, 0) \
+ ST(i + 1, 1) \
+ ST(i + 2, 2) \
+ ST(i + 3, 3) \
+
+
+ PF0(0)
+ PF0(2)
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " add %[inc], %[p1] ;\n"
+ " add %[inc], %[p2] ;\n"
+ " add %[inc], %[p3] ;\n"
+ " add %[inc], %[p4] ;\n"
+ " dec %[cnt] ;\n"
+ " jnz 1b ;\n"
+ : [cnt] "+r" (lines), [p1] "+r" (p1),
+ [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4)
+ : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
+ : "memory");
+
+ kernel_fpu_end();
+}
+
+static void
+xor_sse_4_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4)
+{
+ unsigned long lines = bytes >> 8;
+
+ kernel_fpu_begin();
+
+ asm volatile(
+#undef BLOCK
+#define BLOCK(i) \
+ BLK64(PF0, LD, i) \
+ BLK64(PF1, XO1, i) \
+ BLK64(PF2, XO2, i) \
+ BLK64(PF3, XO3, i) \
+ BLK64(NOP, ST, i) \
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " add %[inc], %[p1] ;\n"
+ " add %[inc], %[p2] ;\n"
+ " add %[inc], %[p3] ;\n"
+ " add %[inc], %[p4] ;\n"
+ " dec %[cnt] ;\n"
+ " jnz 1b ;\n"
+ : [cnt] "+r" (lines), [p1] "+r" (p1),
+ [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4)
+ : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
+ : "memory");
+
+ kernel_fpu_end();
+}
+
+static void
+xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+ unsigned long lines = bytes >> 8;
+
+ kernel_fpu_begin();
+
+ asm volatile(
+#undef BLOCK
+#define BLOCK(i) \
+ PF1(i) \
+ PF1(i + 2) \
+ LD(i, 0) \
+ LD(i + 1, 1) \
+ LD(i + 2, 2) \
+ LD(i + 3, 3) \
+ PF2(i) \
+ PF2(i + 2) \
+ XO1(i, 0) \
+ XO1(i + 1, 1) \
+ XO1(i + 2, 2) \
+ XO1(i + 3, 3) \
+ PF3(i) \
+ PF3(i + 2) \
+ XO2(i, 0) \
+ XO2(i + 1, 1) \
+ XO2(i + 2, 2) \
+ XO2(i + 3, 3) \
+ PF4(i) \
+ PF4(i + 2) \
+ PF0(i + 4) \
+ PF0(i + 6) \
+ XO3(i, 0) \
+ XO3(i + 1, 1) \
+ XO3(i + 2, 2) \
+ XO3(i + 3, 3) \
+ XO4(i, 0) \
+ XO4(i + 1, 1) \
+ XO4(i + 2, 2) \
+ XO4(i + 3, 3) \
+ ST(i, 0) \
+ ST(i + 1, 1) \
+ ST(i + 2, 2) \
+ ST(i + 3, 3) \
+
+
+ PF0(0)
+ PF0(2)
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " add %[inc], %[p1] ;\n"
+ " add %[inc], %[p2] ;\n"
+ " add %[inc], %[p3] ;\n"
+ " add %[inc], %[p4] ;\n"
+ " add %[inc], %[p5] ;\n"
+ " dec %[cnt] ;\n"
+ " jnz 1b ;\n"
+ : [cnt] "+r" (lines), [p1] "+r" (p1), [p2] "+r" (p2),
+ [p3] "+r" (p3), [p4] "+r" (p4), [p5] "+r" (p5)
+ : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
+ : "memory");
+
+ kernel_fpu_end();
+}
+
+static void
+xor_sse_5_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+ unsigned long lines = bytes >> 8;
+
+ kernel_fpu_begin();
+
+ asm volatile(
+#undef BLOCK
+#define BLOCK(i) \
+ BLK64(PF0, LD, i) \
+ BLK64(PF1, XO1, i) \
+ BLK64(PF2, XO2, i) \
+ BLK64(PF3, XO3, i) \
+ BLK64(PF4, XO4, i) \
+ BLK64(NOP, ST, i) \
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " add %[inc], %[p1] ;\n"
+ " add %[inc], %[p2] ;\n"
+ " add %[inc], %[p3] ;\n"
+ " add %[inc], %[p4] ;\n"
+ " add %[inc], %[p5] ;\n"
+ " dec %[cnt] ;\n"
+ " jnz 1b ;\n"
+ : [cnt] "+r" (lines), [p1] "+r" (p1), [p2] "+r" (p2),
+ [p3] "+r" (p3), [p4] "+r" (p4), [p5] "+r" (p5)
+ : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
+ : "memory");
+
+ kernel_fpu_end();
+}
+
+static struct xor_block_template xor_block_sse_pf64 = {
+ .name = "prefetch64-sse",
+ .do_2 = xor_sse_2_pf64,
+ .do_3 = xor_sse_3_pf64,
+ .do_4 = xor_sse_4_pf64,
+ .do_5 = xor_sse_5_pf64,
+};
+
+#undef LD
+#undef XO1
+#undef XO2
+#undef XO3
+#undef XO4
+#undef ST
+#undef NOP
+#undef BLK64
+#undef BLOCK
+
+#undef XOR_CONSTANT_CONSTRAINT
+
+#ifdef CONFIG_X86_32
+# include <asm/xor_32.h>
+#else
+# include <asm/xor_64.h>
+#endif
+
+#define XOR_SELECT_TEMPLATE(FASTEST) \
+ AVX_SELECT(FASTEST)
+
+#endif /* _ASM_X86_XOR_H */
diff --git a/kernel/arch/x86/include/asm/xor_32.h b/kernel/arch/x86/include/asm/xor_32.h
new file mode 100644
index 000000000..ce05722e3
--- /dev/null
+++ b/kernel/arch/x86/include/asm/xor_32.h
@@ -0,0 +1,567 @@
+#ifndef _ASM_X86_XOR_32_H
+#define _ASM_X86_XOR_32_H
+
+/*
+ * Optimized RAID-5 checksumming functions for MMX.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * You should have received a copy of the GNU General Public License
+ * (for example /usr/src/linux/COPYING); if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * High-speed RAID5 checksumming functions utilizing MMX instructions.
+ * Copyright (C) 1998 Ingo Molnar.
+ */
+
+#define LD(x, y) " movq 8*("#x")(%1), %%mm"#y" ;\n"
+#define ST(x, y) " movq %%mm"#y", 8*("#x")(%1) ;\n"
+#define XO1(x, y) " pxor 8*("#x")(%2), %%mm"#y" ;\n"
+#define XO2(x, y) " pxor 8*("#x")(%3), %%mm"#y" ;\n"
+#define XO3(x, y) " pxor 8*("#x")(%4), %%mm"#y" ;\n"
+#define XO4(x, y) " pxor 8*("#x")(%5), %%mm"#y" ;\n"
+
+#include <asm/i387.h>
+
+static void
+xor_pII_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+ unsigned long lines = bytes >> 7;
+
+ kernel_fpu_begin();
+
+ asm volatile(
+#undef BLOCK
+#define BLOCK(i) \
+ LD(i, 0) \
+ LD(i + 1, 1) \
+ LD(i + 2, 2) \
+ LD(i + 3, 3) \
+ XO1(i, 0) \
+ ST(i, 0) \
+ XO1(i+1, 1) \
+ ST(i+1, 1) \
+ XO1(i + 2, 2) \
+ ST(i + 2, 2) \
+ XO1(i + 3, 3) \
+ ST(i + 3, 3)
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " addl $128, %1 ;\n"
+ " addl $128, %2 ;\n"
+ " decl %0 ;\n"
+ " jnz 1b ;\n"
+ : "+r" (lines),
+ "+r" (p1), "+r" (p2)
+ :
+ : "memory");
+
+ kernel_fpu_end();
+}
+
+static void
+xor_pII_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3)
+{
+ unsigned long lines = bytes >> 7;
+
+ kernel_fpu_begin();
+
+ asm volatile(
+#undef BLOCK
+#define BLOCK(i) \
+ LD(i, 0) \
+ LD(i + 1, 1) \
+ LD(i + 2, 2) \
+ LD(i + 3, 3) \
+ XO1(i, 0) \
+ XO1(i + 1, 1) \
+ XO1(i + 2, 2) \
+ XO1(i + 3, 3) \
+ XO2(i, 0) \
+ ST(i, 0) \
+ XO2(i + 1, 1) \
+ ST(i + 1, 1) \
+ XO2(i + 2, 2) \
+ ST(i + 2, 2) \
+ XO2(i + 3, 3) \
+ ST(i + 3, 3)
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " addl $128, %1 ;\n"
+ " addl $128, %2 ;\n"
+ " addl $128, %3 ;\n"
+ " decl %0 ;\n"
+ " jnz 1b ;\n"
+ : "+r" (lines),
+ "+r" (p1), "+r" (p2), "+r" (p3)
+ :
+ : "memory");
+
+ kernel_fpu_end();
+}
+
+static void
+xor_pII_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4)
+{
+ unsigned long lines = bytes >> 7;
+
+ kernel_fpu_begin();
+
+ asm volatile(
+#undef BLOCK
+#define BLOCK(i) \
+ LD(i, 0) \
+ LD(i + 1, 1) \
+ LD(i + 2, 2) \
+ LD(i + 3, 3) \
+ XO1(i, 0) \
+ XO1(i + 1, 1) \
+ XO1(i + 2, 2) \
+ XO1(i + 3, 3) \
+ XO2(i, 0) \
+ XO2(i + 1, 1) \
+ XO2(i + 2, 2) \
+ XO2(i + 3, 3) \
+ XO3(i, 0) \
+ ST(i, 0) \
+ XO3(i + 1, 1) \
+ ST(i + 1, 1) \
+ XO3(i + 2, 2) \
+ ST(i + 2, 2) \
+ XO3(i + 3, 3) \
+ ST(i + 3, 3)
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " addl $128, %1 ;\n"
+ " addl $128, %2 ;\n"
+ " addl $128, %3 ;\n"
+ " addl $128, %4 ;\n"
+ " decl %0 ;\n"
+ " jnz 1b ;\n"
+ : "+r" (lines),
+ "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
+ :
+ : "memory");
+
+ kernel_fpu_end();
+}
+
+
+static void
+xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+ unsigned long lines = bytes >> 7;
+
+ kernel_fpu_begin();
+
+ /* Make sure GCC forgets anything it knows about p4 or p5,
+ such that it won't pass to the asm volatile below a
+ register that is shared with any other variable. That's
+ because we modify p4 and p5 there, but we can't mark them
+ as read/write, otherwise we'd overflow the 10-asm-operands
+ limit of GCC < 3.1. */
+ asm("" : "+r" (p4), "+r" (p5));
+
+ asm volatile(
+#undef BLOCK
+#define BLOCK(i) \
+ LD(i, 0) \
+ LD(i + 1, 1) \
+ LD(i + 2, 2) \
+ LD(i + 3, 3) \
+ XO1(i, 0) \
+ XO1(i + 1, 1) \
+ XO1(i + 2, 2) \
+ XO1(i + 3, 3) \
+ XO2(i, 0) \
+ XO2(i + 1, 1) \
+ XO2(i + 2, 2) \
+ XO2(i + 3, 3) \
+ XO3(i, 0) \
+ XO3(i + 1, 1) \
+ XO3(i + 2, 2) \
+ XO3(i + 3, 3) \
+ XO4(i, 0) \
+ ST(i, 0) \
+ XO4(i + 1, 1) \
+ ST(i + 1, 1) \
+ XO4(i + 2, 2) \
+ ST(i + 2, 2) \
+ XO4(i + 3, 3) \
+ ST(i + 3, 3)
+
+ " .align 32 ;\n"
+ " 1: ;\n"
+
+ BLOCK(0)
+ BLOCK(4)
+ BLOCK(8)
+ BLOCK(12)
+
+ " addl $128, %1 ;\n"
+ " addl $128, %2 ;\n"
+ " addl $128, %3 ;\n"
+ " addl $128, %4 ;\n"
+ " addl $128, %5 ;\n"
+ " decl %0 ;\n"
+ " jnz 1b ;\n"
+ : "+r" (lines),
+ "+r" (p1), "+r" (p2), "+r" (p3)
+ : "r" (p4), "r" (p5)
+ : "memory");
+
+ /* p4 and p5 were modified, and now the variables are dead.
+ Clobber them just to be sure nobody does something stupid
+ like assuming they have some legal value. */
+ asm("" : "=r" (p4), "=r" (p5));
+
+ kernel_fpu_end();
+}
+
+#undef LD
+#undef XO1
+#undef XO2
+#undef XO3
+#undef XO4
+#undef ST
+#undef BLOCK
+
+static void
+xor_p5_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+ unsigned long lines = bytes >> 6;
+
+ kernel_fpu_begin();
+
+ asm volatile(
+ " .align 32 ;\n"
+ " 1: ;\n"
+ " movq (%1), %%mm0 ;\n"
+ " movq 8(%1), %%mm1 ;\n"
+ " pxor (%2), %%mm0 ;\n"
+ " movq 16(%1), %%mm2 ;\n"
+ " movq %%mm0, (%1) ;\n"
+ " pxor 8(%2), %%mm1 ;\n"
+ " movq 24(%1), %%mm3 ;\n"
+ " movq %%mm1, 8(%1) ;\n"
+ " pxor 16(%2), %%mm2 ;\n"
+ " movq 32(%1), %%mm4 ;\n"
+ " movq %%mm2, 16(%1) ;\n"
+ " pxor 24(%2), %%mm3 ;\n"
+ " movq 40(%1), %%mm5 ;\n"
+ " movq %%mm3, 24(%1) ;\n"
+ " pxor 32(%2), %%mm4 ;\n"
+ " movq 48(%1), %%mm6 ;\n"
+ " movq %%mm4, 32(%1) ;\n"
+ " pxor 40(%2), %%mm5 ;\n"
+ " movq 56(%1), %%mm7 ;\n"
+ " movq %%mm5, 40(%1) ;\n"
+ " pxor 48(%2), %%mm6 ;\n"
+ " pxor 56(%2), %%mm7 ;\n"
+ " movq %%mm6, 48(%1) ;\n"
+ " movq %%mm7, 56(%1) ;\n"
+
+ " addl $64, %1 ;\n"
+ " addl $64, %2 ;\n"
+ " decl %0 ;\n"
+ " jnz 1b ;\n"
+ : "+r" (lines),
+ "+r" (p1), "+r" (p2)
+ :
+ : "memory");
+
+ kernel_fpu_end();
+}
+
+static void
+xor_p5_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3)
+{
+ unsigned long lines = bytes >> 6;
+
+ kernel_fpu_begin();
+
+ asm volatile(
+ " .align 32,0x90 ;\n"
+ " 1: ;\n"
+ " movq (%1), %%mm0 ;\n"
+ " movq 8(%1), %%mm1 ;\n"
+ " pxor (%2), %%mm0 ;\n"
+ " movq 16(%1), %%mm2 ;\n"
+ " pxor 8(%2), %%mm1 ;\n"
+ " pxor (%3), %%mm0 ;\n"
+ " pxor 16(%2), %%mm2 ;\n"
+ " movq %%mm0, (%1) ;\n"
+ " pxor 8(%3), %%mm1 ;\n"
+ " pxor 16(%3), %%mm2 ;\n"
+ " movq 24(%1), %%mm3 ;\n"
+ " movq %%mm1, 8(%1) ;\n"
+ " movq 32(%1), %%mm4 ;\n"
+ " movq 40(%1), %%mm5 ;\n"
+ " pxor 24(%2), %%mm3 ;\n"
+ " movq %%mm2, 16(%1) ;\n"
+ " pxor 32(%2), %%mm4 ;\n"
+ " pxor 24(%3), %%mm3 ;\n"
+ " pxor 40(%2), %%mm5 ;\n"
+ " movq %%mm3, 24(%1) ;\n"
+ " pxor 32(%3), %%mm4 ;\n"
+ " pxor 40(%3), %%mm5 ;\n"
+ " movq 48(%1), %%mm6 ;\n"
+ " movq %%mm4, 32(%1) ;\n"
+ " movq 56(%1), %%mm7 ;\n"
+ " pxor 48(%2), %%mm6 ;\n"
+ " movq %%mm5, 40(%1) ;\n"
+ " pxor 56(%2), %%mm7 ;\n"
+ " pxor 48(%3), %%mm6 ;\n"
+ " pxor 56(%3), %%mm7 ;\n"
+ " movq %%mm6, 48(%1) ;\n"
+ " movq %%mm7, 56(%1) ;\n"
+
+ " addl $64, %1 ;\n"
+ " addl $64, %2 ;\n"
+ " addl $64, %3 ;\n"
+ " decl %0 ;\n"
+ " jnz 1b ;\n"
+ : "+r" (lines),
+ "+r" (p1), "+r" (p2), "+r" (p3)
+ :
+ : "memory" );
+
+ kernel_fpu_end();
+}
+
+static void
+xor_p5_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4)
+{
+ unsigned long lines = bytes >> 6;
+
+ kernel_fpu_begin();
+
+ asm volatile(
+ " .align 32,0x90 ;\n"
+ " 1: ;\n"
+ " movq (%1), %%mm0 ;\n"
+ " movq 8(%1), %%mm1 ;\n"
+ " pxor (%2), %%mm0 ;\n"
+ " movq 16(%1), %%mm2 ;\n"
+ " pxor 8(%2), %%mm1 ;\n"
+ " pxor (%3), %%mm0 ;\n"
+ " pxor 16(%2), %%mm2 ;\n"
+ " pxor 8(%3), %%mm1 ;\n"
+ " pxor (%4), %%mm0 ;\n"
+ " movq 24(%1), %%mm3 ;\n"
+ " pxor 16(%3), %%mm2 ;\n"
+ " pxor 8(%4), %%mm1 ;\n"
+ " movq %%mm0, (%1) ;\n"
+ " movq 32(%1), %%mm4 ;\n"
+ " pxor 24(%2), %%mm3 ;\n"
+ " pxor 16(%4), %%mm2 ;\n"
+ " movq %%mm1, 8(%1) ;\n"
+ " movq 40(%1), %%mm5 ;\n"
+ " pxor 32(%2), %%mm4 ;\n"
+ " pxor 24(%3), %%mm3 ;\n"
+ " movq %%mm2, 16(%1) ;\n"
+ " pxor 40(%2), %%mm5 ;\n"
+ " pxor 32(%3), %%mm4 ;\n"
+ " pxor 24(%4), %%mm3 ;\n"
+ " movq %%mm3, 24(%1) ;\n"
+ " movq 56(%1), %%mm7 ;\n"
+ " movq 48(%1), %%mm6 ;\n"
+ " pxor 40(%3), %%mm5 ;\n"
+ " pxor 32(%4), %%mm4 ;\n"
+ " pxor 48(%2), %%mm6 ;\n"
+ " movq %%mm4, 32(%1) ;\n"
+ " pxor 56(%2), %%mm7 ;\n"
+ " pxor 40(%4), %%mm5 ;\n"
+ " pxor 48(%3), %%mm6 ;\n"
+ " pxor 56(%3), %%mm7 ;\n"
+ " movq %%mm5, 40(%1) ;\n"
+ " pxor 48(%4), %%mm6 ;\n"
+ " pxor 56(%4), %%mm7 ;\n"
+ " movq %%mm6, 48(%1) ;\n"
+ " movq %%mm7, 56(%1) ;\n"
+
+ " addl $64, %1 ;\n"
+ " addl $64, %2 ;\n"
+ " addl $64, %3 ;\n"
+ " addl $64, %4 ;\n"
+ " decl %0 ;\n"
+ " jnz 1b ;\n"
+ : "+r" (lines),
+ "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
+ :
+ : "memory");
+
+ kernel_fpu_end();
+}
+
+static void
+xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+ unsigned long lines = bytes >> 6;
+
+ kernel_fpu_begin();
+
+ /* Make sure GCC forgets anything it knows about p4 or p5,
+ such that it won't pass to the asm volatile below a
+ register that is shared with any other variable. That's
+ because we modify p4 and p5 there, but we can't mark them
+ as read/write, otherwise we'd overflow the 10-asm-operands
+ limit of GCC < 3.1. */
+ asm("" : "+r" (p4), "+r" (p5));
+
+ asm volatile(
+ " .align 32,0x90 ;\n"
+ " 1: ;\n"
+ " movq (%1), %%mm0 ;\n"
+ " movq 8(%1), %%mm1 ;\n"
+ " pxor (%2), %%mm0 ;\n"
+ " pxor 8(%2), %%mm1 ;\n"
+ " movq 16(%1), %%mm2 ;\n"
+ " pxor (%3), %%mm0 ;\n"
+ " pxor 8(%3), %%mm1 ;\n"
+ " pxor 16(%2), %%mm2 ;\n"
+ " pxor (%4), %%mm0 ;\n"
+ " pxor 8(%4), %%mm1 ;\n"
+ " pxor 16(%3), %%mm2 ;\n"
+ " movq 24(%1), %%mm3 ;\n"
+ " pxor (%5), %%mm0 ;\n"
+ " pxor 8(%5), %%mm1 ;\n"
+ " movq %%mm0, (%1) ;\n"
+ " pxor 16(%4), %%mm2 ;\n"
+ " pxor 24(%2), %%mm3 ;\n"
+ " movq %%mm1, 8(%1) ;\n"
+ " pxor 16(%5), %%mm2 ;\n"
+ " pxor 24(%3), %%mm3 ;\n"
+ " movq 32(%1), %%mm4 ;\n"
+ " movq %%mm2, 16(%1) ;\n"
+ " pxor 24(%4), %%mm3 ;\n"
+ " pxor 32(%2), %%mm4 ;\n"
+ " movq 40(%1), %%mm5 ;\n"
+ " pxor 24(%5), %%mm3 ;\n"
+ " pxor 32(%3), %%mm4 ;\n"
+ " pxor 40(%2), %%mm5 ;\n"
+ " movq %%mm3, 24(%1) ;\n"
+ " pxor 32(%4), %%mm4 ;\n"
+ " pxor 40(%3), %%mm5 ;\n"
+ " movq 48(%1), %%mm6 ;\n"
+ " movq 56(%1), %%mm7 ;\n"
+ " pxor 32(%5), %%mm4 ;\n"
+ " pxor 40(%4), %%mm5 ;\n"
+ " pxor 48(%2), %%mm6 ;\n"
+ " pxor 56(%2), %%mm7 ;\n"
+ " movq %%mm4, 32(%1) ;\n"
+ " pxor 48(%3), %%mm6 ;\n"
+ " pxor 56(%3), %%mm7 ;\n"
+ " pxor 40(%5), %%mm5 ;\n"
+ " pxor 48(%4), %%mm6 ;\n"
+ " pxor 56(%4), %%mm7 ;\n"
+ " movq %%mm5, 40(%1) ;\n"
+ " pxor 48(%5), %%mm6 ;\n"
+ " pxor 56(%5), %%mm7 ;\n"
+ " movq %%mm6, 48(%1) ;\n"
+ " movq %%mm7, 56(%1) ;\n"
+
+ " addl $64, %1 ;\n"
+ " addl $64, %2 ;\n"
+ " addl $64, %3 ;\n"
+ " addl $64, %4 ;\n"
+ " addl $64, %5 ;\n"
+ " decl %0 ;\n"
+ " jnz 1b ;\n"
+ : "+r" (lines),
+ "+r" (p1), "+r" (p2), "+r" (p3)
+ : "r" (p4), "r" (p5)
+ : "memory");
+
+ /* p4 and p5 were modified, and now the variables are dead.
+ Clobber them just to be sure nobody does something stupid
+ like assuming they have some legal value. */
+ asm("" : "=r" (p4), "=r" (p5));
+
+ kernel_fpu_end();
+}
+
+static struct xor_block_template xor_block_pII_mmx = {
+ .name = "pII_mmx",
+ .do_2 = xor_pII_mmx_2,
+ .do_3 = xor_pII_mmx_3,
+ .do_4 = xor_pII_mmx_4,
+ .do_5 = xor_pII_mmx_5,
+};
+
+static struct xor_block_template xor_block_p5_mmx = {
+ .name = "p5_mmx",
+ .do_2 = xor_p5_mmx_2,
+ .do_3 = xor_p5_mmx_3,
+ .do_4 = xor_p5_mmx_4,
+ .do_5 = xor_p5_mmx_5,
+};
+
+static struct xor_block_template xor_block_pIII_sse = {
+ .name = "pIII_sse",
+ .do_2 = xor_sse_2,
+ .do_3 = xor_sse_3,
+ .do_4 = xor_sse_4,
+ .do_5 = xor_sse_5,
+};
+
+/* Also try the AVX routines */
+#include <asm/xor_avx.h>
+
+/* Also try the generic routines. */
+#include <asm-generic/xor.h>
+
+/* We force the use of the SSE xor block because it can write around L2.
+ We may also be able to load into the L1 only depending on how the cpu
+ deals with a load to a line that is being prefetched. */
+#undef XOR_TRY_TEMPLATES
+#define XOR_TRY_TEMPLATES \
+do { \
+ AVX_XOR_SPEED; \
+ if (cpu_has_xmm) { \
+ xor_speed(&xor_block_pIII_sse); \
+ xor_speed(&xor_block_sse_pf64); \
+ } else if (cpu_has_mmx) { \
+ xor_speed(&xor_block_pII_mmx); \
+ xor_speed(&xor_block_p5_mmx); \
+ } else { \
+ xor_speed(&xor_block_8regs); \
+ xor_speed(&xor_block_8regs_p); \
+ xor_speed(&xor_block_32regs); \
+ xor_speed(&xor_block_32regs_p); \
+ } \
+} while (0)
+
+#endif /* _ASM_X86_XOR_32_H */
diff --git a/kernel/arch/x86/include/asm/xor_64.h b/kernel/arch/x86/include/asm/xor_64.h
new file mode 100644
index 000000000..546f1e3b8
--- /dev/null
+++ b/kernel/arch/x86/include/asm/xor_64.h
@@ -0,0 +1,27 @@
+#ifndef _ASM_X86_XOR_64_H
+#define _ASM_X86_XOR_64_H
+
+static struct xor_block_template xor_block_sse = {
+ .name = "generic_sse",
+ .do_2 = xor_sse_2,
+ .do_3 = xor_sse_3,
+ .do_4 = xor_sse_4,
+ .do_5 = xor_sse_5,
+};
+
+
+/* Also try the AVX routines */
+#include <asm/xor_avx.h>
+
+/* We force the use of the SSE xor block because it can write around L2.
+ We may also be able to load into the L1 only depending on how the cpu
+ deals with a load to a line that is being prefetched. */
+#undef XOR_TRY_TEMPLATES
+#define XOR_TRY_TEMPLATES \
+do { \
+ AVX_XOR_SPEED; \
+ xor_speed(&xor_block_sse_pf64); \
+ xor_speed(&xor_block_sse); \
+} while (0)
+
+#endif /* _ASM_X86_XOR_64_H */
diff --git a/kernel/arch/x86/include/asm/xor_avx.h b/kernel/arch/x86/include/asm/xor_avx.h
new file mode 100644
index 000000000..492b29802
--- /dev/null
+++ b/kernel/arch/x86/include/asm/xor_avx.h
@@ -0,0 +1,184 @@
+#ifndef _ASM_X86_XOR_AVX_H
+#define _ASM_X86_XOR_AVX_H
+
+/*
+ * Optimized RAID-5 checksumming functions for AVX
+ *
+ * Copyright (C) 2012 Intel Corporation
+ * Author: Jim Kukunas <james.t.kukunas@linux.intel.com>
+ *
+ * Based on Ingo Molnar and Zach Brown's respective MMX and SSE routines
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#ifdef CONFIG_AS_AVX
+
+#include <linux/compiler.h>
+#include <asm/i387.h>
+
+#define BLOCK4(i) \
+ BLOCK(32 * i, 0) \
+ BLOCK(32 * (i + 1), 1) \
+ BLOCK(32 * (i + 2), 2) \
+ BLOCK(32 * (i + 3), 3)
+
+#define BLOCK16() \
+ BLOCK4(0) \
+ BLOCK4(4) \
+ BLOCK4(8) \
+ BLOCK4(12)
+
+static void xor_avx_2(unsigned long bytes, unsigned long *p0, unsigned long *p1)
+{
+ unsigned long lines = bytes >> 9;
+
+ kernel_fpu_begin();
+
+ while (lines--) {
+#undef BLOCK
+#define BLOCK(i, reg) \
+do { \
+ asm volatile("vmovdqa %0, %%ymm" #reg : : "m" (p1[i / sizeof(*p1)])); \
+ asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
+ "m" (p0[i / sizeof(*p0)])); \
+ asm volatile("vmovdqa %%ymm" #reg ", %0" : \
+ "=m" (p0[i / sizeof(*p0)])); \
+} while (0);
+
+ BLOCK16()
+
+ p0 = (unsigned long *)((uintptr_t)p0 + 512);
+ p1 = (unsigned long *)((uintptr_t)p1 + 512);
+ }
+
+ kernel_fpu_end();
+}
+
+static void xor_avx_3(unsigned long bytes, unsigned long *p0, unsigned long *p1,
+ unsigned long *p2)
+{
+ unsigned long lines = bytes >> 9;
+
+ kernel_fpu_begin();
+
+ while (lines--) {
+#undef BLOCK
+#define BLOCK(i, reg) \
+do { \
+ asm volatile("vmovdqa %0, %%ymm" #reg : : "m" (p2[i / sizeof(*p2)])); \
+ asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
+ "m" (p1[i / sizeof(*p1)])); \
+ asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
+ "m" (p0[i / sizeof(*p0)])); \
+ asm volatile("vmovdqa %%ymm" #reg ", %0" : \
+ "=m" (p0[i / sizeof(*p0)])); \
+} while (0);
+
+ BLOCK16()
+
+ p0 = (unsigned long *)((uintptr_t)p0 + 512);
+ p1 = (unsigned long *)((uintptr_t)p1 + 512);
+ p2 = (unsigned long *)((uintptr_t)p2 + 512);
+ }
+
+ kernel_fpu_end();
+}
+
+static void xor_avx_4(unsigned long bytes, unsigned long *p0, unsigned long *p1,
+ unsigned long *p2, unsigned long *p3)
+{
+ unsigned long lines = bytes >> 9;
+
+ kernel_fpu_begin();
+
+ while (lines--) {
+#undef BLOCK
+#define BLOCK(i, reg) \
+do { \
+ asm volatile("vmovdqa %0, %%ymm" #reg : : "m" (p3[i / sizeof(*p3)])); \
+ asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
+ "m" (p2[i / sizeof(*p2)])); \
+ asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
+ "m" (p1[i / sizeof(*p1)])); \
+ asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
+ "m" (p0[i / sizeof(*p0)])); \
+ asm volatile("vmovdqa %%ymm" #reg ", %0" : \
+ "=m" (p0[i / sizeof(*p0)])); \
+} while (0);
+
+ BLOCK16();
+
+ p0 = (unsigned long *)((uintptr_t)p0 + 512);
+ p1 = (unsigned long *)((uintptr_t)p1 + 512);
+ p2 = (unsigned long *)((uintptr_t)p2 + 512);
+ p3 = (unsigned long *)((uintptr_t)p3 + 512);
+ }
+
+ kernel_fpu_end();
+}
+
+static void xor_avx_5(unsigned long bytes, unsigned long *p0, unsigned long *p1,
+ unsigned long *p2, unsigned long *p3, unsigned long *p4)
+{
+ unsigned long lines = bytes >> 9;
+
+ kernel_fpu_begin();
+
+ while (lines--) {
+#undef BLOCK
+#define BLOCK(i, reg) \
+do { \
+ asm volatile("vmovdqa %0, %%ymm" #reg : : "m" (p4[i / sizeof(*p4)])); \
+ asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
+ "m" (p3[i / sizeof(*p3)])); \
+ asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
+ "m" (p2[i / sizeof(*p2)])); \
+ asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
+ "m" (p1[i / sizeof(*p1)])); \
+ asm volatile("vxorps %0, %%ymm" #reg ", %%ymm" #reg : : \
+ "m" (p0[i / sizeof(*p0)])); \
+ asm volatile("vmovdqa %%ymm" #reg ", %0" : \
+ "=m" (p0[i / sizeof(*p0)])); \
+} while (0);
+
+ BLOCK16()
+
+ p0 = (unsigned long *)((uintptr_t)p0 + 512);
+ p1 = (unsigned long *)((uintptr_t)p1 + 512);
+ p2 = (unsigned long *)((uintptr_t)p2 + 512);
+ p3 = (unsigned long *)((uintptr_t)p3 + 512);
+ p4 = (unsigned long *)((uintptr_t)p4 + 512);
+ }
+
+ kernel_fpu_end();
+}
+
+static struct xor_block_template xor_block_avx = {
+ .name = "avx",
+ .do_2 = xor_avx_2,
+ .do_3 = xor_avx_3,
+ .do_4 = xor_avx_4,
+ .do_5 = xor_avx_5,
+};
+
+#define AVX_XOR_SPEED \
+do { \
+ if (cpu_has_avx && cpu_has_osxsave) \
+ xor_speed(&xor_block_avx); \
+} while (0)
+
+#define AVX_SELECT(FASTEST) \
+ (cpu_has_avx && cpu_has_osxsave ? &xor_block_avx : FASTEST)
+
+#else
+
+#define AVX_XOR_SPEED {}
+
+#define AVX_SELECT(FASTEST) (FASTEST)
+
+#endif
+#endif
diff --git a/kernel/arch/x86/include/asm/xsave.h b/kernel/arch/x86/include/asm/xsave.h
new file mode 100644
index 000000000..c9a6d68b8
--- /dev/null
+++ b/kernel/arch/x86/include/asm/xsave.h
@@ -0,0 +1,257 @@
+#ifndef __ASM_X86_XSAVE_H
+#define __ASM_X86_XSAVE_H
+
+#include <linux/types.h>
+#include <asm/processor.h>
+
+#define XSTATE_CPUID 0x0000000d
+
+#define XSTATE_FP 0x1
+#define XSTATE_SSE 0x2
+#define XSTATE_YMM 0x4
+#define XSTATE_BNDREGS 0x8
+#define XSTATE_BNDCSR 0x10
+#define XSTATE_OPMASK 0x20
+#define XSTATE_ZMM_Hi256 0x40
+#define XSTATE_Hi16_ZMM 0x80
+
+#define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE)
+#define XSTATE_AVX512 (XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM)
+/* Bit 63 of XCR0 is reserved for future expansion */
+#define XSTATE_EXTEND_MASK (~(XSTATE_FPSSE | (1ULL << 63)))
+
+#define FXSAVE_SIZE 512
+
+#define XSAVE_HDR_SIZE 64
+#define XSAVE_HDR_OFFSET FXSAVE_SIZE
+
+#define XSAVE_YMM_SIZE 256
+#define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET)
+
+/* Supported features which support lazy state saving */
+#define XSTATE_LAZY (XSTATE_FP | XSTATE_SSE | XSTATE_YMM \
+ | XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM)
+
+/* Supported features which require eager state saving */
+#define XSTATE_EAGER (XSTATE_BNDREGS | XSTATE_BNDCSR)
+
+/* All currently supported features */
+#define XCNTXT_MASK (XSTATE_LAZY | XSTATE_EAGER)
+
+#ifdef CONFIG_X86_64
+#define REX_PREFIX "0x48, "
+#else
+#define REX_PREFIX
+#endif
+
+extern unsigned int xstate_size;
+extern u64 pcntxt_mask;
+extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
+extern struct xsave_struct *init_xstate_buf;
+
+extern void xsave_init(void);
+extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask);
+extern int init_fpu(struct task_struct *child);
+
+/* These macros all use (%edi)/(%rdi) as the single memory argument. */
+#define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27"
+#define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37"
+#define XSAVES ".byte " REX_PREFIX "0x0f,0xc7,0x2f"
+#define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f"
+#define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f"
+
+#define xstate_fault ".section .fixup,\"ax\"\n" \
+ "3: movl $-1,%[err]\n" \
+ " jmp 2b\n" \
+ ".previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : [err] "=r" (err)
+
+/*
+ * This function is called only during boot time when x86 caps are not set
+ * up and alternative can not be used yet.
+ */
+static inline int xsave_state_booting(struct xsave_struct *fx, u64 mask)
+{
+ u32 lmask = mask;
+ u32 hmask = mask >> 32;
+ int err = 0;
+
+ WARN_ON(system_state != SYSTEM_BOOTING);
+
+ if (boot_cpu_has(X86_FEATURE_XSAVES))
+ asm volatile("1:"XSAVES"\n\t"
+ "2:\n\t"
+ xstate_fault
+ : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
+ : "memory");
+ else
+ asm volatile("1:"XSAVE"\n\t"
+ "2:\n\t"
+ xstate_fault
+ : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
+ : "memory");
+ return err;
+}
+
+/*
+ * This function is called only during boot time when x86 caps are not set
+ * up and alternative can not be used yet.
+ */
+static inline int xrstor_state_booting(struct xsave_struct *fx, u64 mask)
+{
+ u32 lmask = mask;
+ u32 hmask = mask >> 32;
+ int err = 0;
+
+ WARN_ON(system_state != SYSTEM_BOOTING);
+
+ if (boot_cpu_has(X86_FEATURE_XSAVES))
+ asm volatile("1:"XRSTORS"\n\t"
+ "2:\n\t"
+ xstate_fault
+ : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
+ : "memory");
+ else
+ asm volatile("1:"XRSTOR"\n\t"
+ "2:\n\t"
+ xstate_fault
+ : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
+ : "memory");
+ return err;
+}
+
+/*
+ * Save processor xstate to xsave area.
+ */
+static inline int xsave_state(struct xsave_struct *fx, u64 mask)
+{
+ u32 lmask = mask;
+ u32 hmask = mask >> 32;
+ int err = 0;
+
+ /*
+ * If xsaves is enabled, xsaves replaces xsaveopt because
+ * it supports compact format and supervisor states in addition to
+ * modified optimization in xsaveopt.
+ *
+ * Otherwise, if xsaveopt is enabled, xsaveopt replaces xsave
+ * because xsaveopt supports modified optimization which is not
+ * supported by xsave.
+ *
+ * If none of xsaves and xsaveopt is enabled, use xsave.
+ */
+ alternative_input_2(
+ "1:"XSAVE,
+ XSAVEOPT,
+ X86_FEATURE_XSAVEOPT,
+ XSAVES,
+ X86_FEATURE_XSAVES,
+ [fx] "D" (fx), "a" (lmask), "d" (hmask) :
+ "memory");
+ asm volatile("2:\n\t"
+ xstate_fault
+ : "0" (0)
+ : "memory");
+
+ return err;
+}
+
+/*
+ * Restore processor xstate from xsave area.
+ */
+static inline int xrstor_state(struct xsave_struct *fx, u64 mask)
+{
+ int err = 0;
+ u32 lmask = mask;
+ u32 hmask = mask >> 32;
+
+ /*
+ * Use xrstors to restore context if it is enabled. xrstors supports
+ * compacted format of xsave area which is not supported by xrstor.
+ */
+ alternative_input(
+ "1: " XRSTOR,
+ XRSTORS,
+ X86_FEATURE_XSAVES,
+ "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
+ : "memory");
+
+ asm volatile("2:\n"
+ xstate_fault
+ : "0" (0)
+ : "memory");
+
+ return err;
+}
+
+/*
+ * Save xstate context for old process during context switch.
+ */
+static inline void fpu_xsave(struct fpu *fpu)
+{
+ xsave_state(&fpu->state->xsave, -1);
+}
+
+/*
+ * Restore xstate context for new process during context switch.
+ */
+static inline int fpu_xrstor_checking(struct xsave_struct *fx)
+{
+ return xrstor_state(fx, -1);
+}
+
+/*
+ * Save xstate to user space xsave area.
+ *
+ * We don't use modified optimization because xrstor/xrstors might track
+ * a different application.
+ *
+ * We don't use compacted format xsave area for
+ * backward compatibility for old applications which don't understand
+ * compacted format of xsave area.
+ */
+static inline int xsave_user(struct xsave_struct __user *buf)
+{
+ int err;
+
+ /*
+ * Clear the xsave header first, so that reserved fields are
+ * initialized to zero.
+ */
+ err = __clear_user(&buf->xsave_hdr, sizeof(buf->xsave_hdr));
+ if (unlikely(err))
+ return -EFAULT;
+
+ __asm__ __volatile__(ASM_STAC "\n"
+ "1:"XSAVE"\n"
+ "2: " ASM_CLAC "\n"
+ xstate_fault
+ : "D" (buf), "a" (-1), "d" (-1), "0" (0)
+ : "memory");
+ return err;
+}
+
+/*
+ * Restore xstate from user space xsave area.
+ */
+static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
+{
+ int err = 0;
+ struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
+ u32 lmask = mask;
+ u32 hmask = mask >> 32;
+
+ __asm__ __volatile__(ASM_STAC "\n"
+ "1:"XRSTOR"\n"
+ "2: " ASM_CLAC "\n"
+ xstate_fault
+ : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
+ : "memory"); /* memory required? */
+ return err;
+}
+
+void *get_xsave_addr(struct xsave_struct *xsave, int xstate);
+void setup_xstate_comp(void);
+
+#endif