summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/cpufreq
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/drivers/cpufreq')
-rw-r--r--kernel/drivers/cpufreq/Kconfig305
-rw-r--r--kernel/drivers/cpufreq/Kconfig.arm274
-rw-r--r--kernel/drivers/cpufreq/Kconfig.powerpc55
-rw-r--r--kernel/drivers/cpufreq/Kconfig.x86297
-rw-r--r--kernel/drivers/cpufreq/Makefile107
-rw-r--r--kernel/drivers/cpufreq/acpi-cpufreq.c1011
-rw-r--r--kernel/drivers/cpufreq/amd_freq_sensitivity.c148
-rw-r--r--kernel/drivers/cpufreq/arm_big_little.c605
-rw-r--r--kernel/drivers/cpufreq/arm_big_little.h43
-rw-r--r--kernel/drivers/cpufreq/arm_big_little_dt.c117
-rw-r--r--kernel/drivers/cpufreq/at32ap-cpufreq.c127
-rw-r--r--kernel/drivers/cpufreq/blackfin-cpufreq.c217
-rw-r--r--kernel/drivers/cpufreq/cpufreq-dt.c422
-rw-r--r--kernel/drivers/cpufreq/cpufreq-nforce2.c445
-rw-r--r--kernel/drivers/cpufreq/cpufreq.c2511
-rw-r--r--kernel/drivers/cpufreq/cpufreq_conservative.c408
-rw-r--r--kernel/drivers/cpufreq/cpufreq_governor.c449
-rw-r--r--kernel/drivers/cpufreq/cpufreq_governor.h280
-rw-r--r--kernel/drivers/cpufreq/cpufreq_ondemand.c631
-rw-r--r--kernel/drivers/cpufreq/cpufreq_opp.c110
-rw-r--r--kernel/drivers/cpufreq/cpufreq_performance.c60
-rw-r--r--kernel/drivers/cpufreq/cpufreq_powersave.c64
-rw-r--r--kernel/drivers/cpufreq/cpufreq_stats.c357
-rw-r--r--kernel/drivers/cpufreq/cpufreq_userspace.c123
-rw-r--r--kernel/drivers/cpufreq/cris-artpec3-cpufreq.c92
-rw-r--r--kernel/drivers/cpufreq/cris-etraxfs-cpufreq.c91
-rw-r--r--kernel/drivers/cpufreq/davinci-cpufreq.c181
-rw-r--r--kernel/drivers/cpufreq/dbx500-cpufreq.c83
-rw-r--r--kernel/drivers/cpufreq/e_powersaver.c441
-rw-r--r--kernel/drivers/cpufreq/elanfreq.c232
-rw-r--r--kernel/drivers/cpufreq/exynos-cpufreq.c239
-rw-r--r--kernel/drivers/cpufreq/exynos-cpufreq.h98
-rw-r--r--kernel/drivers/cpufreq/exynos4210-cpufreq.c184
-rw-r--r--kernel/drivers/cpufreq/exynos4x12-cpufreq.c236
-rw-r--r--kernel/drivers/cpufreq/exynos5250-cpufreq.c210
-rw-r--r--kernel/drivers/cpufreq/exynos5440-cpufreq.c454
-rw-r--r--kernel/drivers/cpufreq/freq_table.c311
-rw-r--r--kernel/drivers/cpufreq/gx-suspmod.c502
-rw-r--r--kernel/drivers/cpufreq/highbank-cpufreq.c109
-rw-r--r--kernel/drivers/cpufreq/hisi-acpu-cpufreq.c42
-rw-r--r--kernel/drivers/cpufreq/ia64-acpi-cpufreq.c376
-rw-r--r--kernel/drivers/cpufreq/imx6q-cpufreq.c368
-rw-r--r--kernel/drivers/cpufreq/integrator-cpufreq.c239
-rw-r--r--kernel/drivers/cpufreq/intel_pstate.c1282
-rw-r--r--kernel/drivers/cpufreq/kirkwood-cpufreq.c194
-rw-r--r--kernel/drivers/cpufreq/longhaul.c1016
-rw-r--r--kernel/drivers/cpufreq/longhaul.h353
-rw-r--r--kernel/drivers/cpufreq/longrun.c324
-rw-r--r--kernel/drivers/cpufreq/loongson2_cpufreq.c200
-rw-r--r--kernel/drivers/cpufreq/ls1x-cpufreq.c222
-rw-r--r--kernel/drivers/cpufreq/maple-cpufreq.c246
-rw-r--r--kernel/drivers/cpufreq/omap-cpufreq.c205
-rw-r--r--kernel/drivers/cpufreq/p4-clockmod.c286
-rw-r--r--kernel/drivers/cpufreq/pasemi-cpufreq.c291
-rw-r--r--kernel/drivers/cpufreq/pcc-cpufreq.c619
-rw-r--r--kernel/drivers/cpufreq/pmac32-cpufreq.c686
-rw-r--r--kernel/drivers/cpufreq/pmac64-cpufreq.c676
-rw-r--r--kernel/drivers/cpufreq/powernow-k6.c309
-rw-r--r--kernel/drivers/cpufreq/powernow-k7.c709
-rw-r--r--kernel/drivers/cpufreq/powernow-k7.h43
-rw-r--r--kernel/drivers/cpufreq/powernow-k8.c1249
-rw-r--r--kernel/drivers/cpufreq/powernow-k8.h190
-rw-r--r--kernel/drivers/cpufreq/powernv-cpufreq.c445
-rw-r--r--kernel/drivers/cpufreq/ppc_cbe_cpufreq.c170
-rw-r--r--kernel/drivers/cpufreq/ppc_cbe_cpufreq.h24
-rw-r--r--kernel/drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c115
-rw-r--r--kernel/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c156
-rw-r--r--kernel/drivers/cpufreq/pxa2xx-cpufreq.c452
-rw-r--r--kernel/drivers/cpufreq/pxa3xx-cpufreq.c228
-rw-r--r--kernel/drivers/cpufreq/qoriq-cpufreq.c374
-rw-r--r--kernel/drivers/cpufreq/s3c2410-cpufreq.c158
-rw-r--r--kernel/drivers/cpufreq/s3c2412-cpufreq.c254
-rw-r--r--kernel/drivers/cpufreq/s3c2416-cpufreq.c491
-rw-r--r--kernel/drivers/cpufreq/s3c2440-cpufreq.c307
-rw-r--r--kernel/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c198
-rw-r--r--kernel/drivers/cpufreq/s3c24xx-cpufreq.c674
-rw-r--r--kernel/drivers/cpufreq/s3c64xx-cpufreq.c227
-rw-r--r--kernel/drivers/cpufreq/s5pv210-cpufreq.c662
-rw-r--r--kernel/drivers/cpufreq/sa1100-cpufreq.c220
-rw-r--r--kernel/drivers/cpufreq/sa1110-cpufreq.c374
-rw-r--r--kernel/drivers/cpufreq/sc520_freq.c140
-rw-r--r--kernel/drivers/cpufreq/sfi-cpufreq.c136
-rw-r--r--kernel/drivers/cpufreq/sh-cpufreq.c177
-rw-r--r--kernel/drivers/cpufreq/sparc-us2e-cpufreq.c378
-rw-r--r--kernel/drivers/cpufreq/sparc-us3-cpufreq.c237
-rw-r--r--kernel/drivers/cpufreq/spear-cpufreq.c246
-rw-r--r--kernel/drivers/cpufreq/speedstep-centrino.c566
-rw-r--r--kernel/drivers/cpufreq/speedstep-ich.c387
-rw-r--r--kernel/drivers/cpufreq/speedstep-lib.c482
-rw-r--r--kernel/drivers/cpufreq/speedstep-lib.h49
-rw-r--r--kernel/drivers/cpufreq/speedstep-smi.c396
-rw-r--r--kernel/drivers/cpufreq/tegra-cpufreq.c218
-rw-r--r--kernel/drivers/cpufreq/unicore2-cpufreq.c80
-rw-r--r--kernel/drivers/cpufreq/vexpress-spc-cpufreq.c69
94 files changed, 31844 insertions, 0 deletions
diff --git a/kernel/drivers/cpufreq/Kconfig b/kernel/drivers/cpufreq/Kconfig
new file mode 100644
index 000000000..659879a56
--- /dev/null
+++ b/kernel/drivers/cpufreq/Kconfig
@@ -0,0 +1,305 @@
+menu "CPU Frequency scaling"
+
+config CPU_FREQ
+ bool "CPU Frequency scaling"
+ select SRCU
+ help
+ CPU Frequency scaling allows you to change the clock speed of
+ CPUs on the fly. This is a nice method to save power, because
+ the lower the CPU clock speed, the less power the CPU consumes.
+
+ Note that this driver doesn't automatically change the CPU
+ clock speed, you need to either enable a dynamic cpufreq governor
+ (see below) after boot, or use a userspace tool.
+
+ For details, take a look at <file:Documentation/cpu-freq>.
+
+ If in doubt, say N.
+
+if CPU_FREQ
+
+config CPU_FREQ_GOV_COMMON
+ bool
+
+config CPU_FREQ_BOOST_SW
+ bool
+ depends on THERMAL
+
+config CPU_FREQ_STAT
+ tristate "CPU frequency translation statistics"
+ default y
+ help
+ This driver exports CPU frequency statistics information through sysfs
+ file system.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cpufreq_stats.
+
+ If in doubt, say N.
+
+config CPU_FREQ_STAT_DETAILS
+ bool "CPU frequency translation statistics details"
+ depends on CPU_FREQ_STAT
+ help
+ This will show detail CPU frequency translation table in sysfs file
+ system.
+
+ If in doubt, say N.
+
+choice
+ prompt "Default CPUFreq governor"
+ default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1100_CPUFREQ || ARM_SA1110_CPUFREQ
+ default CPU_FREQ_DEFAULT_GOV_PERFORMANCE
+ help
+ This option sets which CPUFreq governor shall be loaded at
+ startup. If in doubt, select 'performance'.
+
+config CPU_FREQ_DEFAULT_GOV_PERFORMANCE
+ bool "performance"
+ select CPU_FREQ_GOV_PERFORMANCE
+ help
+ Use the CPUFreq governor 'performance' as default. This sets
+ the frequency statically to the highest frequency supported by
+ the CPU.
+
+config CPU_FREQ_DEFAULT_GOV_POWERSAVE
+ bool "powersave"
+ select CPU_FREQ_GOV_POWERSAVE
+ help
+ Use the CPUFreq governor 'powersave' as default. This sets
+ the frequency statically to the lowest frequency supported by
+ the CPU.
+
+config CPU_FREQ_DEFAULT_GOV_USERSPACE
+ bool "userspace"
+ select CPU_FREQ_GOV_USERSPACE
+ help
+ Use the CPUFreq governor 'userspace' as default. This allows
+ you to set the CPU frequency manually or when a userspace
+ program shall be able to set the CPU dynamically without having
+ to enable the userspace governor manually.
+
+config CPU_FREQ_DEFAULT_GOV_ONDEMAND
+ bool "ondemand"
+ select CPU_FREQ_GOV_ONDEMAND
+ select CPU_FREQ_GOV_PERFORMANCE
+ help
+ Use the CPUFreq governor 'ondemand' as default. This allows
+ you to get a full dynamic frequency capable system by simply
+ loading your cpufreq low-level hardware driver.
+ Be aware that not all cpufreq drivers support the ondemand
+ governor. If unsure have a look at the help section of the
+ driver. Fallback governor will be the performance governor.
+
+config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
+ bool "conservative"
+ select CPU_FREQ_GOV_CONSERVATIVE
+ select CPU_FREQ_GOV_PERFORMANCE
+ help
+ Use the CPUFreq governor 'conservative' as default. This allows
+ you to get a full dynamic frequency capable system by simply
+ loading your cpufreq low-level hardware driver.
+ Be aware that not all cpufreq drivers support the conservative
+ governor. If unsure have a look at the help section of the
+ driver. Fallback governor will be the performance governor.
+endchoice
+
+config CPU_FREQ_GOV_PERFORMANCE
+ tristate "'performance' governor"
+ help
+ This cpufreq governor sets the frequency statically to the
+ highest available CPU frequency.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cpufreq_performance.
+
+ If in doubt, say Y.
+
+config CPU_FREQ_GOV_POWERSAVE
+ tristate "'powersave' governor"
+ help
+ This cpufreq governor sets the frequency statically to the
+ lowest available CPU frequency.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cpufreq_powersave.
+
+ If in doubt, say Y.
+
+config CPU_FREQ_GOV_USERSPACE
+ tristate "'userspace' governor for userspace frequency scaling"
+ help
+ Enable this cpufreq governor when you either want to set the
+ CPU frequency manually or when a userspace program shall
+ be able to set the CPU dynamically, like on LART
+ <http://www.lartmaker.nl/>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cpufreq_userspace.
+
+ For details, take a look at <file:Documentation/cpu-freq/>.
+
+ If in doubt, say Y.
+
+config CPU_FREQ_GOV_ONDEMAND
+ tristate "'ondemand' cpufreq policy governor"
+ select CPU_FREQ_GOV_COMMON
+ help
+ 'ondemand' - This driver adds a dynamic cpufreq policy governor.
+ The governor does a periodic polling and
+ changes frequency based on the CPU utilization.
+ The support for this governor depends on CPU capability to
+ do fast frequency switching (i.e, very low latency frequency
+ transitions).
+
+ To compile this driver as a module, choose M here: the
+ module will be called cpufreq_ondemand.
+
+ For details, take a look at linux/Documentation/cpu-freq.
+
+ If in doubt, say N.
+
+config CPU_FREQ_GOV_CONSERVATIVE
+ tristate "'conservative' cpufreq governor"
+ depends on CPU_FREQ
+ select CPU_FREQ_GOV_COMMON
+ help
+ 'conservative' - this driver is rather similar to the 'ondemand'
+ governor both in its source code and its purpose, the difference is
+ its optimisation for better suitability in a battery powered
+ environment. The frequency is gracefully increased and decreased
+ rather than jumping to 100% when speed is required.
+
+ If you have a desktop machine then you should really be considering
+ the 'ondemand' governor instead, however if you are using a laptop,
+ PDA or even an AMD64 based computer (due to the unacceptable
+ step-by-step latency issues between the minimum and maximum frequency
+ transitions in the CPU) you will probably want to use this governor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cpufreq_conservative.
+
+ For details, take a look at linux/Documentation/cpu-freq.
+
+ If in doubt, say N.
+
+comment "CPU frequency scaling drivers"
+
+config CPUFREQ_DT
+ tristate "Generic DT based cpufreq driver"
+ depends on HAVE_CLK && OF
+ # if CPU_THERMAL is on and THERMAL=m, CPUFREQ_DT cannot be =y:
+ depends on !CPU_THERMAL || THERMAL
+ select PM_OPP
+ help
+ This adds a generic DT based cpufreq driver for frequency management.
+ It supports both uniprocessor (UP) and symmetric multiprocessor (SMP)
+ systems which share clock and voltage across all CPUs.
+
+ If in doubt, say N.
+
+if X86
+source "drivers/cpufreq/Kconfig.x86"
+endif
+
+if ARM || ARM64
+source "drivers/cpufreq/Kconfig.arm"
+endif
+
+if PPC32 || PPC64
+source "drivers/cpufreq/Kconfig.powerpc"
+endif
+
+if AVR32
+config AVR32_AT32AP_CPUFREQ
+ bool "CPU frequency driver for AT32AP"
+ depends on PLATFORM_AT32AP
+ default n
+ help
+ This enables the CPU frequency driver for AT32AP processors.
+ If in doubt, say N.
+endif
+
+if IA64
+config IA64_ACPI_CPUFREQ
+ tristate "ACPI Processor P-States driver"
+ depends on ACPI_PROCESSOR
+ help
+ This driver adds a CPUFreq driver which utilizes the ACPI
+ Processor Performance States.
+
+ For details, take a look at <file:Documentation/cpu-freq/>.
+
+ If in doubt, say N.
+endif
+
+if MIPS
+config LOONGSON2_CPUFREQ
+ tristate "Loongson2 CPUFreq Driver"
+ help
+ This option adds a CPUFreq driver for loongson processors which
+ support software configurable cpu frequency.
+
+ Loongson2F and it's successors support this feature.
+
+ For details, take a look at <file:Documentation/cpu-freq/>.
+
+ If in doubt, say N.
+
+config LOONGSON1_CPUFREQ
+ tristate "Loongson1 CPUFreq Driver"
+ help
+ This option adds a CPUFreq driver for loongson1 processors which
+ support software configurable cpu frequency.
+
+ For details, take a look at <file:Documentation/cpu-freq/>.
+
+ If in doubt, say N.
+endif
+
+if SPARC64
+config SPARC_US3_CPUFREQ
+ tristate "UltraSPARC-III CPU Frequency driver"
+ help
+ This adds the CPUFreq driver for UltraSPARC-III processors.
+
+ For details, take a look at <file:Documentation/cpu-freq>.
+
+ If in doubt, say N.
+
+config SPARC_US2E_CPUFREQ
+ tristate "UltraSPARC-IIe CPU Frequency driver"
+ help
+ This adds the CPUFreq driver for UltraSPARC-IIe processors.
+
+ For details, take a look at <file:Documentation/cpu-freq>.
+
+ If in doubt, say N.
+endif
+
+if SUPERH
+config SH_CPU_FREQ
+ tristate "SuperH CPU Frequency driver"
+ help
+ This adds the cpufreq driver for SuperH. Any CPU that supports
+ clock rate rounding through the clock framework can use this
+ driver. While it will make the kernel slightly larger, this is
+ harmless for CPUs that don't support rate rounding. The driver
+ will also generate a notice in the boot log before disabling
+ itself if the CPU in question is not capable of rate rounding.
+
+ For details, take a look at <file:Documentation/cpu-freq>.
+
+ If unsure, say N.
+endif
+
+config QORIQ_CPUFREQ
+ tristate "CPU frequency scaling driver for Freescale QorIQ SoCs"
+ depends on OF && COMMON_CLK && (PPC_E500MC || ARM)
+ select CLK_QORIQ
+ help
+ This adds the CPUFreq driver support for Freescale QorIQ SoCs
+ which are capable of changing the CPU's frequency dynamically.
+
+endif
+endmenu
diff --git a/kernel/drivers/cpufreq/Kconfig.arm b/kernel/drivers/cpufreq/Kconfig.arm
new file mode 100644
index 000000000..4f3dbc8cf
--- /dev/null
+++ b/kernel/drivers/cpufreq/Kconfig.arm
@@ -0,0 +1,274 @@
+#
+# ARM CPU Frequency scaling drivers
+#
+
+# big LITTLE core layer and glue drivers
+config ARM_BIG_LITTLE_CPUFREQ
+ tristate "Generic ARM big LITTLE CPUfreq driver"
+ depends on ARM && BIG_LITTLE && ARM_CPU_TOPOLOGY && HAVE_CLK
+ select PM_OPP
+ help
+ This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
+
+config ARM_DT_BL_CPUFREQ
+ tristate "Generic probing via DT for ARM big LITTLE CPUfreq driver"
+ depends on ARM_BIG_LITTLE_CPUFREQ && OF
+ help
+ This enables probing via DT for Generic CPUfreq driver for ARM
+ big.LITTLE platform. This gets frequency tables from DT.
+
+config ARM_VEXPRESS_SPC_CPUFREQ
+ tristate "Versatile Express SPC based CPUfreq driver"
+ depends on ARM_BIG_LITTLE_CPUFREQ && ARCH_VEXPRESS_SPC
+ help
+ This add the CPUfreq driver support for Versatile Express
+ big.LITTLE platforms using SPC for power management.
+
+
+config ARM_EXYNOS_CPUFREQ
+ tristate "SAMSUNG EXYNOS CPUfreq Driver"
+ depends on CPU_EXYNOS4210 || SOC_EXYNOS4212 || SOC_EXYNOS4412 || SOC_EXYNOS5250
+ depends on THERMAL
+ help
+ This adds the CPUFreq driver for Samsung EXYNOS platforms.
+ Supported SoC versions are:
+ Exynos4210, Exynos4212, Exynos4412, and Exynos5250.
+
+ If in doubt, say N.
+
+config ARM_EXYNOS4210_CPUFREQ
+ bool "SAMSUNG EXYNOS4210"
+ depends on CPU_EXYNOS4210
+ depends on ARM_EXYNOS_CPUFREQ
+ default y
+ help
+ This adds the CPUFreq driver for Samsung EXYNOS4210
+ SoC (S5PV310 or S5PC210).
+
+ If in doubt, say N.
+
+config ARM_EXYNOS4X12_CPUFREQ
+ bool "SAMSUNG EXYNOS4x12"
+ depends on SOC_EXYNOS4212 || SOC_EXYNOS4412
+ depends on ARM_EXYNOS_CPUFREQ
+ default y
+ help
+ This adds the CPUFreq driver for Samsung EXYNOS4X12
+ SoC (EXYNOS4212 or EXYNOS4412).
+
+ If in doubt, say N.
+
+config ARM_EXYNOS5250_CPUFREQ
+ bool "SAMSUNG EXYNOS5250"
+ depends on SOC_EXYNOS5250
+ depends on ARM_EXYNOS_CPUFREQ
+ default y
+ help
+ This adds the CPUFreq driver for Samsung EXYNOS5250
+ SoC.
+
+ If in doubt, say N.
+
+config ARM_EXYNOS_CPU_FREQ_BOOST_SW
+ bool "EXYNOS Frequency Overclocking - Software"
+ depends on ARM_EXYNOS_CPUFREQ && THERMAL
+ select CPU_FREQ_BOOST_SW
+ select EXYNOS_THERMAL
+ help
+ This driver supports software managed overclocking (BOOST).
+ It allows usage of special frequencies for Samsung Exynos
+ processors if thermal conditions are appropriate.
+
+ It requires, for safe operation, thermal framework with properly
+ defined trip points.
+
+ If in doubt, say N.
+
+config ARM_EXYNOS5440_CPUFREQ
+ tristate "SAMSUNG EXYNOS5440"
+ depends on SOC_EXYNOS5440
+ depends on HAVE_CLK && OF
+ select PM_OPP
+ default y
+ help
+ This adds the CPUFreq driver for Samsung EXYNOS5440
+ SoC. The nature of exynos5440 clock controller is
+ different than previous exynos controllers so not using
+ the common exynos framework.
+
+ If in doubt, say N.
+
+config ARM_HIGHBANK_CPUFREQ
+ tristate "Calxeda Highbank-based"
+ depends on ARCH_HIGHBANK && CPUFREQ_DT && REGULATOR
+ default m
+ help
+ This adds the CPUFreq driver for Calxeda Highbank SoC
+ based boards.
+
+ If in doubt, say N.
+
+config ARM_HISI_ACPU_CPUFREQ
+ tristate "Hisilicon ACPU CPUfreq driver"
+ depends on ARCH_HISI && CPUFREQ_DT
+ select PM_OPP
+ help
+ This enables the hisilicon ACPU CPUfreq driver.
+
+ If in doubt, say N.
+
+config ARM_IMX6Q_CPUFREQ
+ tristate "Freescale i.MX6 cpufreq support"
+ depends on ARCH_MXC
+ depends on REGULATOR_ANATOP
+ select PM_OPP
+ help
+ This adds cpufreq driver support for Freescale i.MX6 series SoCs.
+
+ If in doubt, say N.
+
+config ARM_INTEGRATOR
+ tristate "CPUfreq driver for ARM Integrator CPUs"
+ depends on ARCH_INTEGRATOR
+ default y
+ help
+ This enables the CPUfreq driver for ARM Integrator CPUs.
+ If in doubt, say Y.
+
+config ARM_KIRKWOOD_CPUFREQ
+ def_bool MACH_KIRKWOOD
+ help
+ This adds the CPUFreq driver for Marvell Kirkwood
+ SoCs.
+
+config ARM_OMAP2PLUS_CPUFREQ
+ bool "TI OMAP2+"
+ depends on ARCH_OMAP2PLUS
+ default ARCH_OMAP2PLUS
+
+config ARM_S3C_CPUFREQ
+ bool
+ help
+ Internal configuration node for common cpufreq on Samsung SoC
+
+config ARM_S3C24XX_CPUFREQ
+ bool "CPUfreq driver for Samsung S3C24XX series CPUs (EXPERIMENTAL)"
+ depends on ARCH_S3C24XX
+ select ARM_S3C_CPUFREQ
+ help
+ This enables the CPUfreq driver for the Samsung S3C24XX family
+ of CPUs.
+
+ For details, take a look at <file:Documentation/cpu-freq>.
+
+ If in doubt, say N.
+
+config ARM_S3C24XX_CPUFREQ_DEBUG
+ bool "Debug CPUfreq Samsung driver core"
+ depends on ARM_S3C24XX_CPUFREQ
+ help
+ Enable s3c_freq_dbg for the Samsung S3C CPUfreq core
+
+config ARM_S3C24XX_CPUFREQ_IODEBUG
+ bool "Debug CPUfreq Samsung driver IO timing"
+ depends on ARM_S3C24XX_CPUFREQ
+ help
+ Enable s3c_freq_iodbg for the Samsung S3C CPUfreq core
+
+config ARM_S3C24XX_CPUFREQ_DEBUGFS
+ bool "Export debugfs for CPUFreq"
+ depends on ARM_S3C24XX_CPUFREQ && DEBUG_FS
+ help
+ Export status information via debugfs.
+
+config ARM_S3C2410_CPUFREQ
+ bool
+ depends on ARM_S3C24XX_CPUFREQ && CPU_S3C2410
+ select S3C2410_CPUFREQ_UTILS
+ help
+ CPU Frequency scaling support for S3C2410
+
+config ARM_S3C2412_CPUFREQ
+ bool
+ depends on ARM_S3C24XX_CPUFREQ && CPU_S3C2412
+ default y
+ select S3C2412_IOTIMING
+ help
+ CPU Frequency scaling support for S3C2412 and S3C2413 SoC CPUs.
+
+config ARM_S3C2416_CPUFREQ
+ bool "S3C2416 CPU Frequency scaling support"
+ depends on CPU_S3C2416
+ help
+ This adds the CPUFreq driver for the Samsung S3C2416 and
+ S3C2450 SoC. The S3C2416 supports changing the rate of the
+ armdiv clock source and also entering a so called dynamic
+ voltage scaling mode in which it is possible to reduce the
+ core voltage of the CPU.
+
+ If in doubt, say N.
+
+config ARM_S3C2416_CPUFREQ_VCORESCALE
+ bool "Allow voltage scaling for S3C2416 arm core"
+ depends on ARM_S3C2416_CPUFREQ && REGULATOR
+ help
+ Enable CPU voltage scaling when entering the dvs mode.
+ It uses information gathered through existing hardware and
+ tests but not documented in any datasheet.
+
+ If in doubt, say N.
+
+config ARM_S3C2440_CPUFREQ
+ bool "S3C2440/S3C2442 CPU Frequency scaling support"
+ depends on ARM_S3C24XX_CPUFREQ && (CPU_S3C2440 || CPU_S3C2442)
+ select S3C2410_CPUFREQ_UTILS
+ default y
+ help
+ CPU Frequency scaling support for S3C2440 and S3C2442 SoC CPUs.
+
+config ARM_S3C64XX_CPUFREQ
+ bool "Samsung S3C64XX"
+ depends on CPU_S3C6410
+ default y
+ help
+ This adds the CPUFreq driver for Samsung S3C6410 SoC.
+
+ If in doubt, say N.
+
+config ARM_S5PV210_CPUFREQ
+ bool "Samsung S5PV210 and S5PC110"
+ depends on CPU_S5PV210
+ default y
+ help
+ This adds the CPUFreq driver for Samsung S5PV210 and
+ S5PC110 SoCs.
+
+ If in doubt, say N.
+
+config ARM_SA1100_CPUFREQ
+ bool
+
+config ARM_SA1110_CPUFREQ
+ bool
+
+config ARM_SPEAR_CPUFREQ
+ bool "SPEAr CPUFreq support"
+ depends on PLAT_SPEAR
+ default y
+ help
+ This adds the CPUFreq driver support for SPEAr SOCs.
+
+config ARM_TEGRA_CPUFREQ
+ bool "TEGRA CPUFreq support"
+ depends on ARCH_TEGRA
+ default y
+ help
+ This adds the CPUFreq driver support for TEGRA SOCs.
+
+config ARM_PXA2xx_CPUFREQ
+ tristate "Intel PXA2xx CPUfreq driver"
+ depends on PXA27x || PXA25x
+ help
+ This add the CPUFreq driver support for Intel PXA2xx SOCs.
+
+ If in doubt, say N.
diff --git a/kernel/drivers/cpufreq/Kconfig.powerpc b/kernel/drivers/cpufreq/Kconfig.powerpc
new file mode 100644
index 000000000..3a0595b41
--- /dev/null
+++ b/kernel/drivers/cpufreq/Kconfig.powerpc
@@ -0,0 +1,55 @@
+config CPU_FREQ_CBE
+ tristate "CBE frequency scaling"
+ depends on CBE_RAS && PPC_CELL
+ default m
+ help
+ This adds the cpufreq driver for Cell BE processors.
+ For details, take a look at <file:Documentation/cpu-freq/>.
+ If you don't have such processor, say N
+
+config CPU_FREQ_CBE_PMI
+ bool "CBE frequency scaling using PMI interface"
+ depends on CPU_FREQ_CBE
+ default n
+ help
+ Select this, if you want to use the PMI interface to switch
+ frequencies. Using PMI, the processor will not only be able to run at
+ lower speed, but also at lower core voltage.
+
+config CPU_FREQ_MAPLE
+ bool "Support for Maple 970FX Evaluation Board"
+ depends on PPC_MAPLE
+ help
+ This adds support for frequency switching on Maple 970FX
+ Evaluation Board and compatible boards (IBM JS2x blades).
+
+config CPU_FREQ_PMAC
+ bool "Support for Apple PowerBooks"
+ depends on ADB_PMU && PPC32
+ help
+ This adds support for frequency switching on Apple PowerBooks,
+ this currently includes some models of iBook & Titanium
+ PowerBook.
+
+config CPU_FREQ_PMAC64
+ bool "Support for some Apple G5s"
+ depends on PPC_PMAC && PPC64
+ help
+ This adds support for frequency switching on Apple iMac G5,
+ and some of the more recent desktop G5 machines as well.
+
+config PPC_PASEMI_CPUFREQ
+ bool "Support for PA Semi PWRficient"
+ depends on PPC_PASEMI
+ default y
+ help
+ This adds the support for frequency switching on PA Semi
+ PWRficient processors.
+
+config POWERNV_CPUFREQ
+ tristate "CPU frequency scaling for IBM POWERNV platform"
+ depends on PPC_POWERNV
+ default y
+ help
+ This adds support for CPU frequency switching on IBM POWERNV
+ platform
diff --git a/kernel/drivers/cpufreq/Kconfig.x86 b/kernel/drivers/cpufreq/Kconfig.x86
new file mode 100644
index 000000000..8f23161d8
--- /dev/null
+++ b/kernel/drivers/cpufreq/Kconfig.x86
@@ -0,0 +1,297 @@
+#
+# x86 CPU Frequency scaling drivers
+#
+
+config X86_INTEL_PSTATE
+ bool "Intel P state control"
+ depends on X86
+ help
+ This driver provides a P state for Intel core processors.
+ The driver implements an internal governor and will become
+ the scaling driver and governor for Sandy bridge processors.
+
+ When this driver is enabled it will become the preferred
+ scaling driver for Sandy bridge processors.
+
+ If in doubt, say N.
+
+config X86_PCC_CPUFREQ
+ tristate "Processor Clocking Control interface driver"
+ depends on ACPI && ACPI_PROCESSOR
+ help
+ This driver adds support for the PCC interface.
+
+ For details, take a look at:
+ <file:Documentation/cpu-freq/pcc-cpufreq.txt>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called pcc-cpufreq.
+
+ If in doubt, say N.
+
+config X86_ACPI_CPUFREQ
+ tristate "ACPI Processor P-States driver"
+ depends on ACPI_PROCESSOR
+ help
+ This driver adds a CPUFreq driver which utilizes the ACPI
+ Processor Performance States.
+ This driver also supports Intel Enhanced Speedstep and newer
+ AMD CPUs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called acpi-cpufreq.
+
+ For details, take a look at <file:Documentation/cpu-freq/>.
+
+ If in doubt, say N.
+
+config X86_ACPI_CPUFREQ_CPB
+ default y
+ bool "Legacy cpb sysfs knob support for AMD CPUs"
+ depends on X86_ACPI_CPUFREQ && CPU_SUP_AMD
+ help
+ The powernow-k8 driver used to provide a sysfs knob called "cpb"
+ to disable the Core Performance Boosting feature of AMD CPUs. This
+ file has now been superseded by the more generic "boost" entry.
+
+ By enabling this option the acpi_cpufreq driver provides the old
+ entry in addition to the new boost ones, for compatibility reasons.
+
+config X86_SFI_CPUFREQ
+ tristate "SFI Performance-States driver"
+ depends on X86_INTEL_MID && SFI
+ help
+ This adds a CPUFreq driver for some Silvermont based Intel Atom
+ architectures like Z34xx and Z35xx which enumerate processor
+ performance states through SFI.
+
+ If in doubt, say N.
+
+config ELAN_CPUFREQ
+ tristate "AMD Elan SC400 and SC410"
+ depends on MELAN
+ ---help---
+ This adds the CPUFreq driver for AMD Elan SC400 and SC410
+ processors.
+
+ You need to specify the processor maximum speed as boot
+ parameter: elanfreq=maxspeed (in kHz) or as module
+ parameter "max_freq".
+
+ For details, take a look at <file:Documentation/cpu-freq/>.
+
+ If in doubt, say N.
+
+config SC520_CPUFREQ
+ tristate "AMD Elan SC520"
+ depends on MELAN
+ ---help---
+ This adds the CPUFreq driver for AMD Elan SC520 processor.
+
+ For details, take a look at <file:Documentation/cpu-freq/>.
+
+ If in doubt, say N.
+
+
+config X86_POWERNOW_K6
+ tristate "AMD Mobile K6-2/K6-3 PowerNow!"
+ depends on X86_32
+ help
+ This adds the CPUFreq driver for mobile AMD K6-2+ and mobile
+ AMD K6-3+ processors.
+
+ For details, take a look at <file:Documentation/cpu-freq/>.
+
+ If in doubt, say N.
+
+config X86_POWERNOW_K7
+ tristate "AMD Mobile Athlon/Duron PowerNow!"
+ depends on X86_32
+ help
+ This adds the CPUFreq driver for mobile AMD K7 mobile processors.
+
+ For details, take a look at <file:Documentation/cpu-freq/>.
+
+ If in doubt, say N.
+
+config X86_POWERNOW_K7_ACPI
+ bool
+ depends on X86_POWERNOW_K7 && ACPI_PROCESSOR
+ depends on !(X86_POWERNOW_K7 = y && ACPI_PROCESSOR = m)
+ depends on X86_32
+ default y
+
+config X86_POWERNOW_K8
+ tristate "AMD Opteron/Athlon64 PowerNow!"
+ depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ && !PREEMPT_RT_BASE
+ help
+ This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors.
+ Support for K10 and newer processors is now in acpi-cpufreq.
+
+ To compile this driver as a module, choose M here: the
+ module will be called powernow-k8.
+
+ For details, take a look at <file:Documentation/cpu-freq/>.
+
+config X86_AMD_FREQ_SENSITIVITY
+ tristate "AMD frequency sensitivity feedback powersave bias"
+ depends on CPU_FREQ_GOV_ONDEMAND && X86_ACPI_CPUFREQ && CPU_SUP_AMD
+ help
+ This adds AMD-specific powersave bias function to the ondemand
+ governor, which allows it to make more power-conscious frequency
+ change decisions based on feedback from hardware (available on AMD
+ Family 16h and above).
+
+ Hardware feedback tells software how "sensitive" to frequency changes
+ the CPUs' workloads are. CPU-bound workloads will be more sensitive
+ -- they will perform better as frequency increases. Memory/IO-bound
+ workloads will be less sensitive -- they will not necessarily perform
+ better as frequency increases.
+
+ If in doubt, say N.
+
+config X86_GX_SUSPMOD
+ tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation"
+ depends on X86_32 && PCI
+ help
+ This add the CPUFreq driver for NatSemi Geode processors which
+ support suspend modulation.
+
+ For details, take a look at <file:Documentation/cpu-freq/>.
+
+ If in doubt, say N.
+
+config X86_SPEEDSTEP_CENTRINO
+ tristate "Intel Enhanced SpeedStep (deprecated)"
+ select X86_SPEEDSTEP_CENTRINO_TABLE if X86_32
+ depends on X86_32 || (X86_64 && ACPI_PROCESSOR)
+ help
+ This is deprecated and this functionality is now merged into
+ acpi_cpufreq (X86_ACPI_CPUFREQ). Use that driver instead of
+ speedstep_centrino.
+ This adds the CPUFreq driver for Enhanced SpeedStep enabled
+ mobile CPUs. This means Intel Pentium M (Centrino) CPUs
+ or 64bit enabled Intel Xeons.
+
+ To compile this driver as a module, choose M here: the
+ module will be called speedstep-centrino.
+
+ For details, take a look at <file:Documentation/cpu-freq/>.
+
+ If in doubt, say N.
+
+config X86_SPEEDSTEP_CENTRINO_TABLE
+ bool "Built-in tables for Banias CPUs"
+ depends on X86_32 && X86_SPEEDSTEP_CENTRINO
+ default y
+ help
+ Use built-in tables for Banias CPUs if ACPI encoding
+ is not available.
+
+ If in doubt, say N.
+
+config X86_SPEEDSTEP_ICH
+ tristate "Intel Speedstep on ICH-M chipsets (ioport interface)"
+ depends on X86_32
+ help
+ This adds the CPUFreq driver for certain mobile Intel Pentium III
+ (Coppermine), all mobile Intel Pentium III-M (Tualatin) and all
+ mobile Intel Pentium 4 P4-M on systems which have an Intel ICH2,
+ ICH3 or ICH4 southbridge.
+
+ For details, take a look at <file:Documentation/cpu-freq/>.
+
+ If in doubt, say N.
+
+config X86_SPEEDSTEP_SMI
+ tristate "Intel SpeedStep on 440BX/ZX/MX chipsets (SMI interface)"
+ depends on X86_32
+ help
+ This adds the CPUFreq driver for certain mobile Intel Pentium III
+ (Coppermine), all mobile Intel Pentium III-M (Tualatin)
+ on systems which have an Intel 440BX/ZX/MX southbridge.
+
+ For details, take a look at <file:Documentation/cpu-freq/>.
+
+ If in doubt, say N.
+
+config X86_P4_CLOCKMOD
+ tristate "Intel Pentium 4 clock modulation"
+ help
+ This adds the CPUFreq driver for Intel Pentium 4 / XEON
+ processors. When enabled it will lower CPU temperature by skipping
+ clocks.
+
+ This driver should be only used in exceptional
+ circumstances when very low power is needed because it causes severe
+ slowdowns and noticeable latencies. Normally Speedstep should be used
+ instead.
+
+ To compile this driver as a module, choose M here: the
+ module will be called p4-clockmod.
+
+ For details, take a look at <file:Documentation/cpu-freq/>.
+
+ Unless you are absolutely sure say N.
+
+config X86_CPUFREQ_NFORCE2
+ tristate "nVidia nForce2 FSB changing"
+ depends on X86_32
+ help
+ This adds the CPUFreq driver for FSB changing on nVidia nForce2
+ platforms.
+
+ For details, take a look at <file:Documentation/cpu-freq/>.
+
+ If in doubt, say N.
+
+config X86_LONGRUN
+ tristate "Transmeta LongRun"
+ depends on X86_32
+ help
+ This adds the CPUFreq driver for Transmeta Crusoe and Efficeon processors
+ which support LongRun.
+
+ For details, take a look at <file:Documentation/cpu-freq/>.
+
+ If in doubt, say N.
+
+config X86_LONGHAUL
+ tristate "VIA Cyrix III Longhaul"
+ depends on X86_32 && ACPI_PROCESSOR
+ help
+ This adds the CPUFreq driver for VIA Samuel/CyrixIII,
+ VIA Cyrix Samuel/C3, VIA Cyrix Ezra and VIA Cyrix Ezra-T
+ processors.
+
+ For details, take a look at <file:Documentation/cpu-freq/>.
+
+ If in doubt, say N.
+
+config X86_E_POWERSAVER
+ tristate "VIA C7 Enhanced PowerSaver (DANGEROUS)"
+ depends on X86_32 && ACPI_PROCESSOR
+ help
+ This adds the CPUFreq driver for VIA C7 processors. However, this driver
+ does not have any safeguards to prevent operating the CPU out of spec
+ and is thus considered dangerous. Please use the regular ACPI cpufreq
+ driver, enabled by CONFIG_X86_ACPI_CPUFREQ.
+
+ If in doubt, say N.
+
+comment "shared options"
+
+config X86_SPEEDSTEP_LIB
+ tristate
+ default (X86_SPEEDSTEP_ICH || X86_SPEEDSTEP_SMI || X86_P4_CLOCKMOD)
+
+config X86_SPEEDSTEP_RELAXED_CAP_CHECK
+ bool "Relaxed speedstep capability checks"
+ depends on X86_32 && (X86_SPEEDSTEP_SMI || X86_SPEEDSTEP_ICH)
+ help
+ Don't perform all checks for a speedstep capable system which would
+ normally be done. Some ancient or strange systems, though speedstep
+ capable, don't always indicate that they are speedstep capable. This
+ option lets the probing code bypass some of those checks if the
+ parameter "relaxed_check=1" is passed to the module.
+
diff --git a/kernel/drivers/cpufreq/Makefile b/kernel/drivers/cpufreq/Makefile
new file mode 100644
index 000000000..cdce92ae2
--- /dev/null
+++ b/kernel/drivers/cpufreq/Makefile
@@ -0,0 +1,107 @@
+# CPUfreq core
+obj-$(CONFIG_CPU_FREQ) += cpufreq.o freq_table.o
+obj-$(CONFIG_PM_OPP) += cpufreq_opp.o
+
+# CPUfreq stats
+obj-$(CONFIG_CPU_FREQ_STAT) += cpufreq_stats.o
+
+# CPUfreq governors
+obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE) += cpufreq_performance.o
+obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o
+obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o
+obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o
+obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
+obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o
+
+obj-$(CONFIG_CPUFREQ_DT) += cpufreq-dt.o
+
+##################################################################################
+# x86 drivers.
+# Link order matters. K8 is preferred to ACPI because of firmware bugs in early
+# K8 systems. This is still the case but acpi-cpufreq errors out so that
+# powernow-k8 can load then. ACPI is preferred to all other hardware-specific drivers.
+# speedstep-* is preferred over p4-clockmod.
+
+obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o
+obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
+obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o
+obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o
+obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o
+obj-$(CONFIG_X86_LONGHAUL) += longhaul.o
+obj-$(CONFIG_X86_E_POWERSAVER) += e_powersaver.o
+obj-$(CONFIG_ELAN_CPUFREQ) += elanfreq.o
+obj-$(CONFIG_SC520_CPUFREQ) += sc520_freq.o
+obj-$(CONFIG_X86_LONGRUN) += longrun.o
+obj-$(CONFIG_X86_GX_SUSPMOD) += gx-suspmod.o
+obj-$(CONFIG_X86_SPEEDSTEP_ICH) += speedstep-ich.o
+obj-$(CONFIG_X86_SPEEDSTEP_LIB) += speedstep-lib.o
+obj-$(CONFIG_X86_SPEEDSTEP_SMI) += speedstep-smi.o
+obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o
+obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o
+obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o
+obj-$(CONFIG_X86_INTEL_PSTATE) += intel_pstate.o
+obj-$(CONFIG_X86_AMD_FREQ_SENSITIVITY) += amd_freq_sensitivity.o
+obj-$(CONFIG_X86_SFI_CPUFREQ) += sfi-cpufreq.o
+
+##################################################################################
+# ARM SoC drivers
+obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o
+# big LITTLE per platform glues. Keep DT_BL_CPUFREQ as the last entry in all big
+# LITTLE drivers, so that it is probed last.
+obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o
+
+obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o
+obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o
+obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += arm-exynos-cpufreq.o
+arm-exynos-cpufreq-y := exynos-cpufreq.o
+arm-exynos-cpufreq-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o
+arm-exynos-cpufreq-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o
+arm-exynos-cpufreq-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o
+obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o
+obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o
+obj-$(CONFIG_ARM_HISI_ACPU_CPUFREQ) += hisi-acpu-cpufreq.o
+obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
+obj-$(CONFIG_ARM_INTEGRATOR) += integrator-cpufreq.o
+obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o
+obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
+obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o
+obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o
+obj-$(CONFIG_ARM_S3C24XX_CPUFREQ) += s3c24xx-cpufreq.o
+obj-$(CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS) += s3c24xx-cpufreq-debugfs.o
+obj-$(CONFIG_ARM_S3C2410_CPUFREQ) += s3c2410-cpufreq.o
+obj-$(CONFIG_ARM_S3C2412_CPUFREQ) += s3c2412-cpufreq.o
+obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o
+obj-$(CONFIG_ARM_S3C2440_CPUFREQ) += s3c2440-cpufreq.o
+obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o
+obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o
+obj-$(CONFIG_ARM_SA1100_CPUFREQ) += sa1100-cpufreq.o
+obj-$(CONFIG_ARM_SA1110_CPUFREQ) += sa1110-cpufreq.o
+obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o
+obj-$(CONFIG_ARM_TEGRA_CPUFREQ) += tegra-cpufreq.o
+obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o
+
+##################################################################################
+# PowerPC platform drivers
+obj-$(CONFIG_CPU_FREQ_CBE) += ppc-cbe-cpufreq.o
+ppc-cbe-cpufreq-y += ppc_cbe_cpufreq_pervasive.o ppc_cbe_cpufreq.o
+obj-$(CONFIG_CPU_FREQ_CBE_PMI) += ppc_cbe_cpufreq_pmi.o
+obj-$(CONFIG_CPU_FREQ_MAPLE) += maple-cpufreq.o
+obj-$(CONFIG_QORIQ_CPUFREQ) += qoriq-cpufreq.o
+obj-$(CONFIG_CPU_FREQ_PMAC) += pmac32-cpufreq.o
+obj-$(CONFIG_CPU_FREQ_PMAC64) += pmac64-cpufreq.o
+obj-$(CONFIG_PPC_PASEMI_CPUFREQ) += pasemi-cpufreq.o
+obj-$(CONFIG_POWERNV_CPUFREQ) += powernv-cpufreq.o
+
+##################################################################################
+# Other platform drivers
+obj-$(CONFIG_AVR32_AT32AP_CPUFREQ) += at32ap-cpufreq.o
+obj-$(CONFIG_BFIN_CPU_FREQ) += blackfin-cpufreq.o
+obj-$(CONFIG_CRIS_MACH_ARTPEC3) += cris-artpec3-cpufreq.o
+obj-$(CONFIG_ETRAXFS) += cris-etraxfs-cpufreq.o
+obj-$(CONFIG_IA64_ACPI_CPUFREQ) += ia64-acpi-cpufreq.o
+obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o
+obj-$(CONFIG_LOONGSON1_CPUFREQ) += ls1x-cpufreq.o
+obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o
+obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o
+obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o
+obj-$(CONFIG_UNICORE32) += unicore2-cpufreq.o
diff --git a/kernel/drivers/cpufreq/acpi-cpufreq.c b/kernel/drivers/cpufreq/acpi-cpufreq.c
new file mode 100644
index 000000000..b0c18ed8d
--- /dev/null
+++ b/kernel/drivers/cpufreq/acpi-cpufreq.c
@@ -0,0 +1,1011 @@
+/*
+ * acpi-cpufreq.c - ACPI Processor P-States Driver
+ *
+ * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
+ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de>
+ * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/sched.h>
+#include <linux/cpufreq.h>
+#include <linux/compiler.h>
+#include <linux/dmi.h>
+#include <linux/slab.h>
+
+#include <linux/acpi.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+
+#include <acpi/processor.h>
+
+#include <asm/msr.h>
+#include <asm/processor.h>
+#include <asm/cpufeature.h>
+
+MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
+MODULE_DESCRIPTION("ACPI Processor P-States Driver");
+MODULE_LICENSE("GPL");
+
+#define PFX "acpi-cpufreq: "
+
+enum {
+ UNDEFINED_CAPABLE = 0,
+ SYSTEM_INTEL_MSR_CAPABLE,
+ SYSTEM_AMD_MSR_CAPABLE,
+ SYSTEM_IO_CAPABLE,
+};
+
+#define INTEL_MSR_RANGE (0xffff)
+#define AMD_MSR_RANGE (0x7)
+
+#define MSR_K7_HWCR_CPB_DIS (1ULL << 25)
+
+struct acpi_cpufreq_data {
+ struct acpi_processor_performance *acpi_data;
+ struct cpufreq_frequency_table *freq_table;
+ unsigned int resume;
+ unsigned int cpu_feature;
+ cpumask_var_t freqdomain_cpus;
+};
+
+static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data);
+
+/* acpi_perf_data is a pointer to percpu data. */
+static struct acpi_processor_performance __percpu *acpi_perf_data;
+
+static struct cpufreq_driver acpi_cpufreq_driver;
+
+static unsigned int acpi_pstate_strict;
+static struct msr __percpu *msrs;
+
+static bool boost_state(unsigned int cpu)
+{
+ u32 lo, hi;
+ u64 msr;
+
+ switch (boot_cpu_data.x86_vendor) {
+ case X86_VENDOR_INTEL:
+ rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
+ msr = lo | ((u64)hi << 32);
+ return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
+ case X86_VENDOR_AMD:
+ rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
+ msr = lo | ((u64)hi << 32);
+ return !(msr & MSR_K7_HWCR_CPB_DIS);
+ }
+ return false;
+}
+
+static void boost_set_msrs(bool enable, const struct cpumask *cpumask)
+{
+ u32 cpu;
+ u32 msr_addr;
+ u64 msr_mask;
+
+ switch (boot_cpu_data.x86_vendor) {
+ case X86_VENDOR_INTEL:
+ msr_addr = MSR_IA32_MISC_ENABLE;
+ msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
+ break;
+ case X86_VENDOR_AMD:
+ msr_addr = MSR_K7_HWCR;
+ msr_mask = MSR_K7_HWCR_CPB_DIS;
+ break;
+ default:
+ return;
+ }
+
+ rdmsr_on_cpus(cpumask, msr_addr, msrs);
+
+ for_each_cpu(cpu, cpumask) {
+ struct msr *reg = per_cpu_ptr(msrs, cpu);
+ if (enable)
+ reg->q &= ~msr_mask;
+ else
+ reg->q |= msr_mask;
+ }
+
+ wrmsr_on_cpus(cpumask, msr_addr, msrs);
+}
+
+static int _store_boost(int val)
+{
+ get_online_cpus();
+ boost_set_msrs(val, cpu_online_mask);
+ put_online_cpus();
+ pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis");
+
+ return 0;
+}
+
+static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
+{
+ struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
+
+ return cpufreq_show_cpus(data->freqdomain_cpus, buf);
+}
+
+cpufreq_freq_attr_ro(freqdomain_cpus);
+
+#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
+static ssize_t store_boost(const char *buf, size_t count)
+{
+ int ret;
+ unsigned long val = 0;
+
+ if (!acpi_cpufreq_driver.boost_supported)
+ return -EINVAL;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret || (val > 1))
+ return -EINVAL;
+
+ _store_boost((int) val);
+
+ return count;
+}
+
+static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
+ size_t count)
+{
+ return store_boost(buf, count);
+}
+
+static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
+{
+ return sprintf(buf, "%u\n", acpi_cpufreq_driver.boost_enabled);
+}
+
+cpufreq_freq_attr_rw(cpb);
+#endif
+
+static int check_est_cpu(unsigned int cpuid)
+{
+ struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
+
+ return cpu_has(cpu, X86_FEATURE_EST);
+}
+
+static int check_amd_hwpstate_cpu(unsigned int cpuid)
+{
+ struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
+
+ return cpu_has(cpu, X86_FEATURE_HW_PSTATE);
+}
+
+static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
+{
+ struct acpi_processor_performance *perf;
+ int i;
+
+ perf = data->acpi_data;
+
+ for (i = 0; i < perf->state_count; i++) {
+ if (value == perf->states[i].status)
+ return data->freq_table[i].frequency;
+ }
+ return 0;
+}
+
+static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
+{
+ struct cpufreq_frequency_table *pos;
+ struct acpi_processor_performance *perf;
+
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ msr &= AMD_MSR_RANGE;
+ else
+ msr &= INTEL_MSR_RANGE;
+
+ perf = data->acpi_data;
+
+ cpufreq_for_each_entry(pos, data->freq_table)
+ if (msr == perf->states[pos->driver_data].status)
+ return pos->frequency;
+ return data->freq_table[0].frequency;
+}
+
+static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
+{
+ switch (data->cpu_feature) {
+ case SYSTEM_INTEL_MSR_CAPABLE:
+ case SYSTEM_AMD_MSR_CAPABLE:
+ return extract_msr(val, data);
+ case SYSTEM_IO_CAPABLE:
+ return extract_io(val, data);
+ default:
+ return 0;
+ }
+}
+
+struct msr_addr {
+ u32 reg;
+};
+
+struct io_addr {
+ u16 port;
+ u8 bit_width;
+};
+
+struct drv_cmd {
+ unsigned int type;
+ const struct cpumask *mask;
+ union {
+ struct msr_addr msr;
+ struct io_addr io;
+ } addr;
+ u32 val;
+};
+
+/* Called via smp_call_function_single(), on the target CPU */
+static void do_drv_read(void *_cmd)
+{
+ struct drv_cmd *cmd = _cmd;
+ u32 h;
+
+ switch (cmd->type) {
+ case SYSTEM_INTEL_MSR_CAPABLE:
+ case SYSTEM_AMD_MSR_CAPABLE:
+ rdmsr(cmd->addr.msr.reg, cmd->val, h);
+ break;
+ case SYSTEM_IO_CAPABLE:
+ acpi_os_read_port((acpi_io_address)cmd->addr.io.port,
+ &cmd->val,
+ (u32)cmd->addr.io.bit_width);
+ break;
+ default:
+ break;
+ }
+}
+
+/* Called via smp_call_function_many(), on the target CPUs */
+static void do_drv_write(void *_cmd)
+{
+ struct drv_cmd *cmd = _cmd;
+ u32 lo, hi;
+
+ switch (cmd->type) {
+ case SYSTEM_INTEL_MSR_CAPABLE:
+ rdmsr(cmd->addr.msr.reg, lo, hi);
+ lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE);
+ wrmsr(cmd->addr.msr.reg, lo, hi);
+ break;
+ case SYSTEM_AMD_MSR_CAPABLE:
+ wrmsr(cmd->addr.msr.reg, cmd->val, 0);
+ break;
+ case SYSTEM_IO_CAPABLE:
+ acpi_os_write_port((acpi_io_address)cmd->addr.io.port,
+ cmd->val,
+ (u32)cmd->addr.io.bit_width);
+ break;
+ default:
+ break;
+ }
+}
+
+static void drv_read(struct drv_cmd *cmd)
+{
+ int err;
+ cmd->val = 0;
+
+ err = smp_call_function_any(cmd->mask, do_drv_read, cmd, 1);
+ WARN_ON_ONCE(err); /* smp_call_function_any() was buggy? */
+}
+
+static void drv_write(struct drv_cmd *cmd)
+{
+ int this_cpu;
+
+ this_cpu = get_cpu();
+ if (cpumask_test_cpu(this_cpu, cmd->mask))
+ do_drv_write(cmd);
+ smp_call_function_many(cmd->mask, do_drv_write, cmd, 1);
+ put_cpu();
+}
+
+static u32 get_cur_val(const struct cpumask *mask)
+{
+ struct acpi_processor_performance *perf;
+ struct drv_cmd cmd;
+
+ if (unlikely(cpumask_empty(mask)))
+ return 0;
+
+ switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) {
+ case SYSTEM_INTEL_MSR_CAPABLE:
+ cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
+ cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
+ break;
+ case SYSTEM_AMD_MSR_CAPABLE:
+ cmd.type = SYSTEM_AMD_MSR_CAPABLE;
+ cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
+ break;
+ case SYSTEM_IO_CAPABLE:
+ cmd.type = SYSTEM_IO_CAPABLE;
+ perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data;
+ cmd.addr.io.port = perf->control_register.address;
+ cmd.addr.io.bit_width = perf->control_register.bit_width;
+ break;
+ default:
+ return 0;
+ }
+
+ cmd.mask = mask;
+ drv_read(&cmd);
+
+ pr_debug("get_cur_val = %u\n", cmd.val);
+
+ return cmd.val;
+}
+
+static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
+{
+ struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu);
+ unsigned int freq;
+ unsigned int cached_freq;
+
+ pr_debug("get_cur_freq_on_cpu (%d)\n", cpu);
+
+ if (unlikely(data == NULL ||
+ data->acpi_data == NULL || data->freq_table == NULL)) {
+ return 0;
+ }
+
+ cached_freq = data->freq_table[data->acpi_data->state].frequency;
+ freq = extract_freq(get_cur_val(cpumask_of(cpu)), data);
+ if (freq != cached_freq) {
+ /*
+ * The dreaded BIOS frequency change behind our back.
+ * Force set the frequency on next target call.
+ */
+ data->resume = 1;
+ }
+
+ pr_debug("cur freq = %u\n", freq);
+
+ return freq;
+}
+
+static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
+ struct acpi_cpufreq_data *data)
+{
+ unsigned int cur_freq;
+ unsigned int i;
+
+ for (i = 0; i < 100; i++) {
+ cur_freq = extract_freq(get_cur_val(mask), data);
+ if (cur_freq == freq)
+ return 1;
+ udelay(10);
+ }
+ return 0;
+}
+
+static int acpi_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
+ struct acpi_processor_performance *perf;
+ struct drv_cmd cmd;
+ unsigned int next_perf_state = 0; /* Index into perf table */
+ int result = 0;
+
+ if (unlikely(data == NULL ||
+ data->acpi_data == NULL || data->freq_table == NULL)) {
+ return -ENODEV;
+ }
+
+ perf = data->acpi_data;
+ next_perf_state = data->freq_table[index].driver_data;
+ if (perf->state == next_perf_state) {
+ if (unlikely(data->resume)) {
+ pr_debug("Called after resume, resetting to P%d\n",
+ next_perf_state);
+ data->resume = 0;
+ } else {
+ pr_debug("Already at target state (P%d)\n",
+ next_perf_state);
+ goto out;
+ }
+ }
+
+ switch (data->cpu_feature) {
+ case SYSTEM_INTEL_MSR_CAPABLE:
+ cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
+ cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
+ cmd.val = (u32) perf->states[next_perf_state].control;
+ break;
+ case SYSTEM_AMD_MSR_CAPABLE:
+ cmd.type = SYSTEM_AMD_MSR_CAPABLE;
+ cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
+ cmd.val = (u32) perf->states[next_perf_state].control;
+ break;
+ case SYSTEM_IO_CAPABLE:
+ cmd.type = SYSTEM_IO_CAPABLE;
+ cmd.addr.io.port = perf->control_register.address;
+ cmd.addr.io.bit_width = perf->control_register.bit_width;
+ cmd.val = (u32) perf->states[next_perf_state].control;
+ break;
+ default:
+ result = -ENODEV;
+ goto out;
+ }
+
+ /* cpufreq holds the hotplug lock, so we are safe from here on */
+ if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY)
+ cmd.mask = policy->cpus;
+ else
+ cmd.mask = cpumask_of(policy->cpu);
+
+ drv_write(&cmd);
+
+ if (acpi_pstate_strict) {
+ if (!check_freqs(cmd.mask, data->freq_table[index].frequency,
+ data)) {
+ pr_debug("acpi_cpufreq_target failed (%d)\n",
+ policy->cpu);
+ result = -EAGAIN;
+ }
+ }
+
+ if (!result)
+ perf->state = next_perf_state;
+
+out:
+ return result;
+}
+
+static unsigned long
+acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
+{
+ struct acpi_processor_performance *perf = data->acpi_data;
+
+ if (cpu_khz) {
+ /* search the closest match to cpu_khz */
+ unsigned int i;
+ unsigned long freq;
+ unsigned long freqn = perf->states[0].core_frequency * 1000;
+
+ for (i = 0; i < (perf->state_count-1); i++) {
+ freq = freqn;
+ freqn = perf->states[i+1].core_frequency * 1000;
+ if ((2 * cpu_khz) > (freqn + freq)) {
+ perf->state = i;
+ return freq;
+ }
+ }
+ perf->state = perf->state_count-1;
+ return freqn;
+ } else {
+ /* assume CPU is at P0... */
+ perf->state = 0;
+ return perf->states[0].core_frequency * 1000;
+ }
+}
+
+static void free_acpi_perf_data(void)
+{
+ unsigned int i;
+
+ /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
+ for_each_possible_cpu(i)
+ free_cpumask_var(per_cpu_ptr(acpi_perf_data, i)
+ ->shared_cpu_map);
+ free_percpu(acpi_perf_data);
+}
+
+static int boost_notify(struct notifier_block *nb, unsigned long action,
+ void *hcpu)
+{
+ unsigned cpu = (long)hcpu;
+ const struct cpumask *cpumask;
+
+ cpumask = get_cpu_mask(cpu);
+
+ /*
+ * Clear the boost-disable bit on the CPU_DOWN path so that
+ * this cpu cannot block the remaining ones from boosting. On
+ * the CPU_UP path we simply keep the boost-disable flag in
+ * sync with the current global state.
+ */
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
+ boost_set_msrs(acpi_cpufreq_driver.boost_enabled, cpumask);
+ break;
+
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ boost_set_msrs(1, cpumask);
+ break;
+
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+
+static struct notifier_block boost_nb = {
+ .notifier_call = boost_notify,
+};
+
+/*
+ * acpi_cpufreq_early_init - initialize ACPI P-States library
+ *
+ * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c)
+ * in order to determine correct frequency and voltage pairings. We can
+ * do _PDC and _PSD and find out the processor dependency for the
+ * actual init that will happen later...
+ */
+static int __init acpi_cpufreq_early_init(void)
+{
+ unsigned int i;
+ pr_debug("acpi_cpufreq_early_init\n");
+
+ acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
+ if (!acpi_perf_data) {
+ pr_debug("Memory allocation error for acpi_perf_data.\n");
+ return -ENOMEM;
+ }
+ for_each_possible_cpu(i) {
+ if (!zalloc_cpumask_var_node(
+ &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map,
+ GFP_KERNEL, cpu_to_node(i))) {
+
+ /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
+ free_acpi_perf_data();
+ return -ENOMEM;
+ }
+ }
+
+ /* Do initialization in ACPI core */
+ acpi_processor_preregister_performance(acpi_perf_data);
+ return 0;
+}
+
+#ifdef CONFIG_SMP
+/*
+ * Some BIOSes do SW_ANY coordination internally, either set it up in hw
+ * or do it in BIOS firmware and won't inform about it to OS. If not
+ * detected, this has a side effect of making CPU run at a different speed
+ * than OS intended it to run at. Detect it and handle it cleanly.
+ */
+static int bios_with_sw_any_bug;
+
+static int sw_any_bug_found(const struct dmi_system_id *d)
+{
+ bios_with_sw_any_bug = 1;
+ return 0;
+}
+
+static const struct dmi_system_id sw_any_bug_dmi_table[] = {
+ {
+ .callback = sw_any_bug_found,
+ .ident = "Supermicro Server X6DLP",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
+ DMI_MATCH(DMI_BIOS_VERSION, "080010"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"),
+ },
+ },
+ { }
+};
+
+static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
+{
+ /* Intel Xeon Processor 7100 Series Specification Update
+ * http://www.intel.com/Assets/PDF/specupdate/314554.pdf
+ * AL30: A Machine Check Exception (MCE) Occurring during an
+ * Enhanced Intel SpeedStep Technology Ratio Change May Cause
+ * Both Processor Cores to Lock Up. */
+ if (c->x86_vendor == X86_VENDOR_INTEL) {
+ if ((c->x86 == 15) &&
+ (c->x86_model == 6) &&
+ (c->x86_mask == 8)) {
+ printk(KERN_INFO "acpi-cpufreq: Intel(R) "
+ "Xeon(R) 7100 Errata AL30, processors may "
+ "lock up on frequency changes: disabling "
+ "acpi-cpufreq.\n");
+ return -ENODEV;
+ }
+ }
+ return 0;
+}
+#endif
+
+static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ unsigned int i;
+ unsigned int valid_states = 0;
+ unsigned int cpu = policy->cpu;
+ struct acpi_cpufreq_data *data;
+ unsigned int result = 0;
+ struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
+ struct acpi_processor_performance *perf;
+#ifdef CONFIG_SMP
+ static int blacklisted;
+#endif
+
+ pr_debug("acpi_cpufreq_cpu_init\n");
+
+#ifdef CONFIG_SMP
+ if (blacklisted)
+ return blacklisted;
+ blacklisted = acpi_cpufreq_blacklist(c);
+ if (blacklisted)
+ return blacklisted;
+#endif
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ if (!zalloc_cpumask_var(&data->freqdomain_cpus, GFP_KERNEL)) {
+ result = -ENOMEM;
+ goto err_free;
+ }
+
+ data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
+ per_cpu(acfreq_data, cpu) = data;
+
+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
+ acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
+
+ result = acpi_processor_register_performance(data->acpi_data, cpu);
+ if (result)
+ goto err_free_mask;
+
+ perf = data->acpi_data;
+ policy->shared_type = perf->shared_type;
+
+ /*
+ * Will let policy->cpus know about dependency only when software
+ * coordination is required.
+ */
+ if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
+ policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
+ cpumask_copy(policy->cpus, perf->shared_cpu_map);
+ }
+ cpumask_copy(data->freqdomain_cpus, perf->shared_cpu_map);
+
+#ifdef CONFIG_SMP
+ dmi_check_system(sw_any_bug_dmi_table);
+ if (bios_with_sw_any_bug && !policy_is_shared(policy)) {
+ policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
+ cpumask_copy(policy->cpus, cpu_core_mask(cpu));
+ }
+
+ if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
+ cpumask_clear(policy->cpus);
+ cpumask_set_cpu(cpu, policy->cpus);
+ cpumask_copy(data->freqdomain_cpus, cpu_sibling_mask(cpu));
+ policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
+ pr_info_once(PFX "overriding BIOS provided _PSD data\n");
+ }
+#endif
+
+ /* capability check */
+ if (perf->state_count <= 1) {
+ pr_debug("No P-States\n");
+ result = -ENODEV;
+ goto err_unreg;
+ }
+
+ if (perf->control_register.space_id != perf->status_register.space_id) {
+ result = -ENODEV;
+ goto err_unreg;
+ }
+
+ switch (perf->control_register.space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+ boot_cpu_data.x86 == 0xf) {
+ pr_debug("AMD K8 systems must use native drivers.\n");
+ result = -ENODEV;
+ goto err_unreg;
+ }
+ pr_debug("SYSTEM IO addr space\n");
+ data->cpu_feature = SYSTEM_IO_CAPABLE;
+ break;
+ case ACPI_ADR_SPACE_FIXED_HARDWARE:
+ pr_debug("HARDWARE addr space\n");
+ if (check_est_cpu(cpu)) {
+ data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
+ break;
+ }
+ if (check_amd_hwpstate_cpu(cpu)) {
+ data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE;
+ break;
+ }
+ result = -ENODEV;
+ goto err_unreg;
+ default:
+ pr_debug("Unknown addr space %d\n",
+ (u32) (perf->control_register.space_id));
+ result = -ENODEV;
+ goto err_unreg;
+ }
+
+ data->freq_table = kzalloc(sizeof(*data->freq_table) *
+ (perf->state_count+1), GFP_KERNEL);
+ if (!data->freq_table) {
+ result = -ENOMEM;
+ goto err_unreg;
+ }
+
+ /* detect transition latency */
+ policy->cpuinfo.transition_latency = 0;
+ for (i = 0; i < perf->state_count; i++) {
+ if ((perf->states[i].transition_latency * 1000) >
+ policy->cpuinfo.transition_latency)
+ policy->cpuinfo.transition_latency =
+ perf->states[i].transition_latency * 1000;
+ }
+
+ /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
+ if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
+ policy->cpuinfo.transition_latency > 20 * 1000) {
+ policy->cpuinfo.transition_latency = 20 * 1000;
+ printk_once(KERN_INFO
+ "P-state transition latency capped at 20 uS\n");
+ }
+
+ /* table init */
+ for (i = 0; i < perf->state_count; i++) {
+ if (i > 0 && perf->states[i].core_frequency >=
+ data->freq_table[valid_states-1].frequency / 1000)
+ continue;
+
+ data->freq_table[valid_states].driver_data = i;
+ data->freq_table[valid_states].frequency =
+ perf->states[i].core_frequency * 1000;
+ valid_states++;
+ }
+ data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
+ perf->state = 0;
+
+ result = cpufreq_table_validate_and_show(policy, data->freq_table);
+ if (result)
+ goto err_freqfree;
+
+ if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq)
+ printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n");
+
+ switch (perf->control_register.space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ /*
+ * The core will not set policy->cur, because
+ * cpufreq_driver->get is NULL, so we need to set it here.
+ * However, we have to guess it, because the current speed is
+ * unknown and not detectable via IO ports.
+ */
+ policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
+ break;
+ case ACPI_ADR_SPACE_FIXED_HARDWARE:
+ acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
+ break;
+ default:
+ break;
+ }
+
+ /* notify BIOS that we exist */
+ acpi_processor_notify_smm(THIS_MODULE);
+
+ pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
+ for (i = 0; i < perf->state_count; i++)
+ pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n",
+ (i == perf->state ? '*' : ' '), i,
+ (u32) perf->states[i].core_frequency,
+ (u32) perf->states[i].power,
+ (u32) perf->states[i].transition_latency);
+
+ /*
+ * the first call to ->target() should result in us actually
+ * writing something to the appropriate registers.
+ */
+ data->resume = 1;
+
+ return result;
+
+err_freqfree:
+ kfree(data->freq_table);
+err_unreg:
+ acpi_processor_unregister_performance(perf, cpu);
+err_free_mask:
+ free_cpumask_var(data->freqdomain_cpus);
+err_free:
+ kfree(data);
+ per_cpu(acfreq_data, cpu) = NULL;
+
+ return result;
+}
+
+static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
+
+ pr_debug("acpi_cpufreq_cpu_exit\n");
+
+ if (data) {
+ per_cpu(acfreq_data, policy->cpu) = NULL;
+ acpi_processor_unregister_performance(data->acpi_data,
+ policy->cpu);
+ free_cpumask_var(data->freqdomain_cpus);
+ kfree(data->freq_table);
+ kfree(data);
+ }
+
+ return 0;
+}
+
+static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
+{
+ struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
+
+ pr_debug("acpi_cpufreq_resume\n");
+
+ data->resume = 1;
+
+ return 0;
+}
+
+static struct freq_attr *acpi_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ &freqdomain_cpus,
+ NULL, /* this is a placeholder for cpb, do not remove */
+ NULL,
+};
+
+static struct cpufreq_driver acpi_cpufreq_driver = {
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = acpi_cpufreq_target,
+ .bios_limit = acpi_processor_get_bios_limit,
+ .init = acpi_cpufreq_cpu_init,
+ .exit = acpi_cpufreq_cpu_exit,
+ .resume = acpi_cpufreq_resume,
+ .name = "acpi-cpufreq",
+ .attr = acpi_cpufreq_attr,
+ .set_boost = _store_boost,
+};
+
+static void __init acpi_cpufreq_boost_init(void)
+{
+ if (boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)) {
+ msrs = msrs_alloc();
+
+ if (!msrs)
+ return;
+
+ acpi_cpufreq_driver.boost_supported = true;
+ acpi_cpufreq_driver.boost_enabled = boost_state(0);
+
+ cpu_notifier_register_begin();
+
+ /* Force all MSRs to the same value */
+ boost_set_msrs(acpi_cpufreq_driver.boost_enabled,
+ cpu_online_mask);
+
+ __register_cpu_notifier(&boost_nb);
+
+ cpu_notifier_register_done();
+ }
+}
+
+static void acpi_cpufreq_boost_exit(void)
+{
+ if (msrs) {
+ unregister_cpu_notifier(&boost_nb);
+
+ msrs_free(msrs);
+ msrs = NULL;
+ }
+}
+
+static int __init acpi_cpufreq_init(void)
+{
+ int ret;
+
+ if (acpi_disabled)
+ return -ENODEV;
+
+ /* don't keep reloading if cpufreq_driver exists */
+ if (cpufreq_get_current_driver())
+ return -EEXIST;
+
+ pr_debug("acpi_cpufreq_init\n");
+
+ ret = acpi_cpufreq_early_init();
+ if (ret)
+ return ret;
+
+#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
+ /* this is a sysfs file with a strange name and an even stranger
+ * semantic - per CPU instantiation, but system global effect.
+ * Lets enable it only on AMD CPUs for compatibility reasons and
+ * only if configured. This is considered legacy code, which
+ * will probably be removed at some point in the future.
+ */
+ if (check_amd_hwpstate_cpu(0)) {
+ struct freq_attr **iter;
+
+ pr_debug("adding sysfs entry for cpb\n");
+
+ for (iter = acpi_cpufreq_attr; *iter != NULL; iter++)
+ ;
+
+ /* make sure there is a terminator behind it */
+ if (iter[1] == NULL)
+ *iter = &cpb;
+ }
+#endif
+ acpi_cpufreq_boost_init();
+
+ ret = cpufreq_register_driver(&acpi_cpufreq_driver);
+ if (ret) {
+ free_acpi_perf_data();
+ acpi_cpufreq_boost_exit();
+ }
+ return ret;
+}
+
+static void __exit acpi_cpufreq_exit(void)
+{
+ pr_debug("acpi_cpufreq_exit\n");
+
+ acpi_cpufreq_boost_exit();
+
+ cpufreq_unregister_driver(&acpi_cpufreq_driver);
+
+ free_acpi_perf_data();
+}
+
+module_param(acpi_pstate_strict, uint, 0644);
+MODULE_PARM_DESC(acpi_pstate_strict,
+ "value 0 or non-zero. non-zero -> strict ACPI checks are "
+ "performed during frequency changes.");
+
+late_initcall(acpi_cpufreq_init);
+module_exit(acpi_cpufreq_exit);
+
+static const struct x86_cpu_id acpi_cpufreq_ids[] = {
+ X86_FEATURE_MATCH(X86_FEATURE_ACPI),
+ X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids);
+
+static const struct acpi_device_id processor_device_ids[] = {
+ {ACPI_PROCESSOR_OBJECT_HID, },
+ {ACPI_PROCESSOR_DEVICE_HID, },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, processor_device_ids);
+
+MODULE_ALIAS("acpi");
diff --git a/kernel/drivers/cpufreq/amd_freq_sensitivity.c b/kernel/drivers/cpufreq/amd_freq_sensitivity.c
new file mode 100644
index 000000000..f6b79ab00
--- /dev/null
+++ b/kernel/drivers/cpufreq/amd_freq_sensitivity.c
@@ -0,0 +1,148 @@
+/*
+ * amd_freq_sensitivity.c: AMD frequency sensitivity feedback powersave bias
+ * for the ondemand governor.
+ *
+ * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ *
+ * Author: Jacob Shin <jacob.shin@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/percpu-defs.h>
+#include <linux/init.h>
+#include <linux/mod_devicetable.h>
+
+#include <asm/msr.h>
+#include <asm/cpufeature.h>
+
+#include "cpufreq_governor.h"
+
+#define MSR_AMD64_FREQ_SENSITIVITY_ACTUAL 0xc0010080
+#define MSR_AMD64_FREQ_SENSITIVITY_REFERENCE 0xc0010081
+#define CLASS_CODE_SHIFT 56
+#define POWERSAVE_BIAS_MAX 1000
+#define POWERSAVE_BIAS_DEF 400
+
+struct cpu_data_t {
+ u64 actual;
+ u64 reference;
+ unsigned int freq_prev;
+};
+
+static DEFINE_PER_CPU(struct cpu_data_t, cpu_data);
+
+static unsigned int amd_powersave_bias_target(struct cpufreq_policy *policy,
+ unsigned int freq_next,
+ unsigned int relation)
+{
+ int sensitivity;
+ long d_actual, d_reference;
+ struct msr actual, reference;
+ struct cpu_data_t *data = &per_cpu(cpu_data, policy->cpu);
+ struct dbs_data *od_data = policy->governor_data;
+ struct od_dbs_tuners *od_tuners = od_data->tuners;
+ struct od_cpu_dbs_info_s *od_info =
+ od_data->cdata->get_cpu_dbs_info_s(policy->cpu);
+
+ if (!od_info->freq_table)
+ return freq_next;
+
+ rdmsr_on_cpu(policy->cpu, MSR_AMD64_FREQ_SENSITIVITY_ACTUAL,
+ &actual.l, &actual.h);
+ rdmsr_on_cpu(policy->cpu, MSR_AMD64_FREQ_SENSITIVITY_REFERENCE,
+ &reference.l, &reference.h);
+ actual.h &= 0x00ffffff;
+ reference.h &= 0x00ffffff;
+
+ /* counter wrapped around, so stay on current frequency */
+ if (actual.q < data->actual || reference.q < data->reference) {
+ freq_next = policy->cur;
+ goto out;
+ }
+
+ d_actual = actual.q - data->actual;
+ d_reference = reference.q - data->reference;
+
+ /* divide by 0, so stay on current frequency as well */
+ if (d_reference == 0) {
+ freq_next = policy->cur;
+ goto out;
+ }
+
+ sensitivity = POWERSAVE_BIAS_MAX -
+ (POWERSAVE_BIAS_MAX * (d_reference - d_actual) / d_reference);
+
+ clamp(sensitivity, 0, POWERSAVE_BIAS_MAX);
+
+ /* this workload is not CPU bound, so choose a lower freq */
+ if (sensitivity < od_tuners->powersave_bias) {
+ if (data->freq_prev == policy->cur)
+ freq_next = policy->cur;
+
+ if (freq_next > policy->cur)
+ freq_next = policy->cur;
+ else if (freq_next < policy->cur)
+ freq_next = policy->min;
+ else {
+ unsigned int index;
+
+ cpufreq_frequency_table_target(policy,
+ od_info->freq_table, policy->cur - 1,
+ CPUFREQ_RELATION_H, &index);
+ freq_next = od_info->freq_table[index].frequency;
+ }
+
+ data->freq_prev = freq_next;
+ } else
+ data->freq_prev = 0;
+
+out:
+ data->actual = actual.q;
+ data->reference = reference.q;
+ return freq_next;
+}
+
+static int __init amd_freq_sensitivity_init(void)
+{
+ u64 val;
+
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+ return -ENODEV;
+
+ if (!static_cpu_has(X86_FEATURE_PROC_FEEDBACK))
+ return -ENODEV;
+
+ if (rdmsrl_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val))
+ return -ENODEV;
+
+ if (!(val >> CLASS_CODE_SHIFT))
+ return -ENODEV;
+
+ od_register_powersave_bias_handler(amd_powersave_bias_target,
+ POWERSAVE_BIAS_DEF);
+ return 0;
+}
+late_initcall(amd_freq_sensitivity_init);
+
+static void __exit amd_freq_sensitivity_exit(void)
+{
+ od_unregister_powersave_bias_handler();
+}
+module_exit(amd_freq_sensitivity_exit);
+
+static const struct x86_cpu_id amd_freq_sensitivity_ids[] = {
+ X86_FEATURE_MATCH(X86_FEATURE_PROC_FEEDBACK),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, amd_freq_sensitivity_ids);
+
+MODULE_AUTHOR("Jacob Shin <jacob.shin@amd.com>");
+MODULE_DESCRIPTION("AMD frequency sensitivity feedback powersave bias for "
+ "the ondemand governor.");
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/cpufreq/arm_big_little.c b/kernel/drivers/cpufreq/arm_big_little.c
new file mode 100644
index 000000000..e1a6ba66a
--- /dev/null
+++ b/kernel/drivers/cpufreq/arm_big_little.c
@@ -0,0 +1,605 @@
+/*
+ * ARM big.LITTLE Platforms CPUFreq support
+ *
+ * Copyright (C) 2013 ARM Ltd.
+ * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
+ *
+ * Copyright (C) 2013 Linaro.
+ * Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+#include <linux/topology.h>
+#include <linux/types.h>
+#include <asm/bL_switcher.h>
+
+#include "arm_big_little.h"
+
+/* Currently we support only two clusters */
+#define A15_CLUSTER 0
+#define A7_CLUSTER 1
+#define MAX_CLUSTERS 2
+
+#ifdef CONFIG_BL_SWITCHER
+static bool bL_switching_enabled;
+#define is_bL_switching_enabled() bL_switching_enabled
+#define set_switching_enabled(x) (bL_switching_enabled = (x))
+#else
+#define is_bL_switching_enabled() false
+#define set_switching_enabled(x) do { } while (0)
+#endif
+
+#define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq)
+#define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq)
+
+static struct cpufreq_arm_bL_ops *arm_bL_ops;
+static struct clk *clk[MAX_CLUSTERS];
+static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1];
+static atomic_t cluster_usage[MAX_CLUSTERS + 1];
+
+static unsigned int clk_big_min; /* (Big) clock frequencies */
+static unsigned int clk_little_max; /* Maximum clock frequency (Little) */
+
+static DEFINE_PER_CPU(unsigned int, physical_cluster);
+static DEFINE_PER_CPU(unsigned int, cpu_last_req_freq);
+
+static struct mutex cluster_lock[MAX_CLUSTERS];
+
+static inline int raw_cpu_to_cluster(int cpu)
+{
+ return topology_physical_package_id(cpu);
+}
+
+static inline int cpu_to_cluster(int cpu)
+{
+ return is_bL_switching_enabled() ?
+ MAX_CLUSTERS : raw_cpu_to_cluster(cpu);
+}
+
+static unsigned int find_cluster_maxfreq(int cluster)
+{
+ int j;
+ u32 max_freq = 0, cpu_freq;
+
+ for_each_online_cpu(j) {
+ cpu_freq = per_cpu(cpu_last_req_freq, j);
+
+ if ((cluster == per_cpu(physical_cluster, j)) &&
+ (max_freq < cpu_freq))
+ max_freq = cpu_freq;
+ }
+
+ pr_debug("%s: cluster: %d, max freq: %d\n", __func__, cluster,
+ max_freq);
+
+ return max_freq;
+}
+
+static unsigned int clk_get_cpu_rate(unsigned int cpu)
+{
+ u32 cur_cluster = per_cpu(physical_cluster, cpu);
+ u32 rate = clk_get_rate(clk[cur_cluster]) / 1000;
+
+ /* For switcher we use virtual A7 clock rates */
+ if (is_bL_switching_enabled())
+ rate = VIRT_FREQ(cur_cluster, rate);
+
+ pr_debug("%s: cpu: %d, cluster: %d, freq: %u\n", __func__, cpu,
+ cur_cluster, rate);
+
+ return rate;
+}
+
+static unsigned int bL_cpufreq_get_rate(unsigned int cpu)
+{
+ if (is_bL_switching_enabled()) {
+ pr_debug("%s: freq: %d\n", __func__, per_cpu(cpu_last_req_freq,
+ cpu));
+
+ return per_cpu(cpu_last_req_freq, cpu);
+ } else {
+ return clk_get_cpu_rate(cpu);
+ }
+}
+
+static unsigned int
+bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate)
+{
+ u32 new_rate, prev_rate;
+ int ret;
+ bool bLs = is_bL_switching_enabled();
+
+ mutex_lock(&cluster_lock[new_cluster]);
+
+ if (bLs) {
+ prev_rate = per_cpu(cpu_last_req_freq, cpu);
+ per_cpu(cpu_last_req_freq, cpu) = rate;
+ per_cpu(physical_cluster, cpu) = new_cluster;
+
+ new_rate = find_cluster_maxfreq(new_cluster);
+ new_rate = ACTUAL_FREQ(new_cluster, new_rate);
+ } else {
+ new_rate = rate;
+ }
+
+ pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d, freq: %d\n",
+ __func__, cpu, old_cluster, new_cluster, new_rate);
+
+ ret = clk_set_rate(clk[new_cluster], new_rate * 1000);
+ if (WARN_ON(ret)) {
+ pr_err("clk_set_rate failed: %d, new cluster: %d\n", ret,
+ new_cluster);
+ if (bLs) {
+ per_cpu(cpu_last_req_freq, cpu) = prev_rate;
+ per_cpu(physical_cluster, cpu) = old_cluster;
+ }
+
+ mutex_unlock(&cluster_lock[new_cluster]);
+
+ return ret;
+ }
+
+ mutex_unlock(&cluster_lock[new_cluster]);
+
+ /* Recalc freq for old cluster when switching clusters */
+ if (old_cluster != new_cluster) {
+ pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d\n",
+ __func__, cpu, old_cluster, new_cluster);
+
+ /* Switch cluster */
+ bL_switch_request(cpu, new_cluster);
+
+ mutex_lock(&cluster_lock[old_cluster]);
+
+ /* Set freq of old cluster if there are cpus left on it */
+ new_rate = find_cluster_maxfreq(old_cluster);
+ new_rate = ACTUAL_FREQ(old_cluster, new_rate);
+
+ if (new_rate) {
+ pr_debug("%s: Updating rate of old cluster: %d, to freq: %d\n",
+ __func__, old_cluster, new_rate);
+
+ if (clk_set_rate(clk[old_cluster], new_rate * 1000))
+ pr_err("%s: clk_set_rate failed: %d, old cluster: %d\n",
+ __func__, ret, old_cluster);
+ }
+ mutex_unlock(&cluster_lock[old_cluster]);
+ }
+
+ return 0;
+}
+
+/* Set clock frequency */
+static int bL_cpufreq_set_target(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster;
+ unsigned int freqs_new;
+
+ cur_cluster = cpu_to_cluster(cpu);
+ new_cluster = actual_cluster = per_cpu(physical_cluster, cpu);
+
+ freqs_new = freq_table[cur_cluster][index].frequency;
+
+ if (is_bL_switching_enabled()) {
+ if ((actual_cluster == A15_CLUSTER) &&
+ (freqs_new < clk_big_min)) {
+ new_cluster = A7_CLUSTER;
+ } else if ((actual_cluster == A7_CLUSTER) &&
+ (freqs_new > clk_little_max)) {
+ new_cluster = A15_CLUSTER;
+ }
+ }
+
+ return bL_cpufreq_set_rate(cpu, actual_cluster, new_cluster, freqs_new);
+}
+
+static inline u32 get_table_count(struct cpufreq_frequency_table *table)
+{
+ int count;
+
+ for (count = 0; table[count].frequency != CPUFREQ_TABLE_END; count++)
+ ;
+
+ return count;
+}
+
+/* get the minimum frequency in the cpufreq_frequency_table */
+static inline u32 get_table_min(struct cpufreq_frequency_table *table)
+{
+ struct cpufreq_frequency_table *pos;
+ uint32_t min_freq = ~0;
+ cpufreq_for_each_entry(pos, table)
+ if (pos->frequency < min_freq)
+ min_freq = pos->frequency;
+ return min_freq;
+}
+
+/* get the maximum frequency in the cpufreq_frequency_table */
+static inline u32 get_table_max(struct cpufreq_frequency_table *table)
+{
+ struct cpufreq_frequency_table *pos;
+ uint32_t max_freq = 0;
+ cpufreq_for_each_entry(pos, table)
+ if (pos->frequency > max_freq)
+ max_freq = pos->frequency;
+ return max_freq;
+}
+
+static int merge_cluster_tables(void)
+{
+ int i, j, k = 0, count = 1;
+ struct cpufreq_frequency_table *table;
+
+ for (i = 0; i < MAX_CLUSTERS; i++)
+ count += get_table_count(freq_table[i]);
+
+ table = kzalloc(sizeof(*table) * count, GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ freq_table[MAX_CLUSTERS] = table;
+
+ /* Add in reverse order to get freqs in increasing order */
+ for (i = MAX_CLUSTERS - 1; i >= 0; i--) {
+ for (j = 0; freq_table[i][j].frequency != CPUFREQ_TABLE_END;
+ j++) {
+ table[k].frequency = VIRT_FREQ(i,
+ freq_table[i][j].frequency);
+ pr_debug("%s: index: %d, freq: %d\n", __func__, k,
+ table[k].frequency);
+ k++;
+ }
+ }
+
+ table[k].driver_data = k;
+ table[k].frequency = CPUFREQ_TABLE_END;
+
+ pr_debug("%s: End, table: %p, count: %d\n", __func__, table, k);
+
+ return 0;
+}
+
+static void _put_cluster_clk_and_freq_table(struct device *cpu_dev)
+{
+ u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
+
+ if (!freq_table[cluster])
+ return;
+
+ clk_put(clk[cluster]);
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
+ if (arm_bL_ops->free_opp_table)
+ arm_bL_ops->free_opp_table(cpu_dev);
+ dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster);
+}
+
+static void put_cluster_clk_and_freq_table(struct device *cpu_dev)
+{
+ u32 cluster = cpu_to_cluster(cpu_dev->id);
+ int i;
+
+ if (atomic_dec_return(&cluster_usage[cluster]))
+ return;
+
+ if (cluster < MAX_CLUSTERS)
+ return _put_cluster_clk_and_freq_table(cpu_dev);
+
+ for_each_present_cpu(i) {
+ struct device *cdev = get_cpu_device(i);
+ if (!cdev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__, i);
+ return;
+ }
+
+ _put_cluster_clk_and_freq_table(cdev);
+ }
+
+ /* free virtual table */
+ kfree(freq_table[cluster]);
+}
+
+static int _get_cluster_clk_and_freq_table(struct device *cpu_dev)
+{
+ u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
+ char name[14] = "cpu-cluster.";
+ int ret;
+
+ if (freq_table[cluster])
+ return 0;
+
+ ret = arm_bL_ops->init_opp_table(cpu_dev);
+ if (ret) {
+ dev_err(cpu_dev, "%s: init_opp_table failed, cpu: %d, err: %d\n",
+ __func__, cpu_dev->id, ret);
+ goto out;
+ }
+
+ ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]);
+ if (ret) {
+ dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n",
+ __func__, cpu_dev->id, ret);
+ goto free_opp_table;
+ }
+
+ name[12] = cluster + '0';
+ clk[cluster] = clk_get(cpu_dev, name);
+ if (!IS_ERR(clk[cluster])) {
+ dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n",
+ __func__, clk[cluster], freq_table[cluster],
+ cluster);
+ return 0;
+ }
+
+ dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n",
+ __func__, cpu_dev->id, cluster);
+ ret = PTR_ERR(clk[cluster]);
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
+
+free_opp_table:
+ if (arm_bL_ops->free_opp_table)
+ arm_bL_ops->free_opp_table(cpu_dev);
+out:
+ dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__,
+ cluster);
+ return ret;
+}
+
+static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
+{
+ u32 cluster = cpu_to_cluster(cpu_dev->id);
+ int i, ret;
+
+ if (atomic_inc_return(&cluster_usage[cluster]) != 1)
+ return 0;
+
+ if (cluster < MAX_CLUSTERS) {
+ ret = _get_cluster_clk_and_freq_table(cpu_dev);
+ if (ret)
+ atomic_dec(&cluster_usage[cluster]);
+ return ret;
+ }
+
+ /*
+ * Get data for all clusters and fill virtual cluster with a merge of
+ * both
+ */
+ for_each_present_cpu(i) {
+ struct device *cdev = get_cpu_device(i);
+ if (!cdev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__, i);
+ return -ENODEV;
+ }
+
+ ret = _get_cluster_clk_and_freq_table(cdev);
+ if (ret)
+ goto put_clusters;
+ }
+
+ ret = merge_cluster_tables();
+ if (ret)
+ goto put_clusters;
+
+ /* Assuming 2 cluster, set clk_big_min and clk_little_max */
+ clk_big_min = get_table_min(freq_table[0]);
+ clk_little_max = VIRT_FREQ(1, get_table_max(freq_table[1]));
+
+ pr_debug("%s: cluster: %d, clk_big_min: %d, clk_little_max: %d\n",
+ __func__, cluster, clk_big_min, clk_little_max);
+
+ return 0;
+
+put_clusters:
+ for_each_present_cpu(i) {
+ struct device *cdev = get_cpu_device(i);
+ if (!cdev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__, i);
+ return -ENODEV;
+ }
+
+ _put_cluster_clk_and_freq_table(cdev);
+ }
+
+ atomic_dec(&cluster_usage[cluster]);
+
+ return ret;
+}
+
+/* Per-CPU initialization */
+static int bL_cpufreq_init(struct cpufreq_policy *policy)
+{
+ u32 cur_cluster = cpu_to_cluster(policy->cpu);
+ struct device *cpu_dev;
+ int ret;
+
+ cpu_dev = get_cpu_device(policy->cpu);
+ if (!cpu_dev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__,
+ policy->cpu);
+ return -ENODEV;
+ }
+
+ ret = get_cluster_clk_and_freq_table(cpu_dev);
+ if (ret)
+ return ret;
+
+ ret = cpufreq_table_validate_and_show(policy, freq_table[cur_cluster]);
+ if (ret) {
+ dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n",
+ policy->cpu, cur_cluster);
+ put_cluster_clk_and_freq_table(cpu_dev);
+ return ret;
+ }
+
+ if (cur_cluster < MAX_CLUSTERS) {
+ int cpu;
+
+ cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
+
+ for_each_cpu(cpu, policy->cpus)
+ per_cpu(physical_cluster, cpu) = cur_cluster;
+ } else {
+ /* Assumption: during init, we are always running on A15 */
+ per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER;
+ }
+
+ if (arm_bL_ops->get_transition_latency)
+ policy->cpuinfo.transition_latency =
+ arm_bL_ops->get_transition_latency(cpu_dev);
+ else
+ policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+
+ if (is_bL_switching_enabled())
+ per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu);
+
+ dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu);
+ return 0;
+}
+
+static int bL_cpufreq_exit(struct cpufreq_policy *policy)
+{
+ struct device *cpu_dev;
+
+ cpu_dev = get_cpu_device(policy->cpu);
+ if (!cpu_dev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__,
+ policy->cpu);
+ return -ENODEV;
+ }
+
+ put_cluster_clk_and_freq_table(cpu_dev);
+ dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu);
+
+ return 0;
+}
+
+static struct cpufreq_driver bL_cpufreq_driver = {
+ .name = "arm-big-little",
+ .flags = CPUFREQ_STICKY |
+ CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = bL_cpufreq_set_target,
+ .get = bL_cpufreq_get_rate,
+ .init = bL_cpufreq_init,
+ .exit = bL_cpufreq_exit,
+ .attr = cpufreq_generic_attr,
+};
+
+static int bL_cpufreq_switcher_notifier(struct notifier_block *nfb,
+ unsigned long action, void *_arg)
+{
+ pr_debug("%s: action: %ld\n", __func__, action);
+
+ switch (action) {
+ case BL_NOTIFY_PRE_ENABLE:
+ case BL_NOTIFY_PRE_DISABLE:
+ cpufreq_unregister_driver(&bL_cpufreq_driver);
+ break;
+
+ case BL_NOTIFY_POST_ENABLE:
+ set_switching_enabled(true);
+ cpufreq_register_driver(&bL_cpufreq_driver);
+ break;
+
+ case BL_NOTIFY_POST_DISABLE:
+ set_switching_enabled(false);
+ cpufreq_register_driver(&bL_cpufreq_driver);
+ break;
+
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block bL_switcher_notifier = {
+ .notifier_call = bL_cpufreq_switcher_notifier,
+};
+
+int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
+{
+ int ret, i;
+
+ if (arm_bL_ops) {
+ pr_debug("%s: Already registered: %s, exiting\n", __func__,
+ arm_bL_ops->name);
+ return -EBUSY;
+ }
+
+ if (!ops || !strlen(ops->name) || !ops->init_opp_table) {
+ pr_err("%s: Invalid arm_bL_ops, exiting\n", __func__);
+ return -ENODEV;
+ }
+
+ arm_bL_ops = ops;
+
+ ret = bL_switcher_get_enabled();
+ set_switching_enabled(ret);
+
+ for (i = 0; i < MAX_CLUSTERS; i++)
+ mutex_init(&cluster_lock[i]);
+
+ ret = cpufreq_register_driver(&bL_cpufreq_driver);
+ if (ret) {
+ pr_info("%s: Failed registering platform driver: %s, err: %d\n",
+ __func__, ops->name, ret);
+ arm_bL_ops = NULL;
+ } else {
+ ret = bL_switcher_register_notifier(&bL_switcher_notifier);
+ if (ret) {
+ cpufreq_unregister_driver(&bL_cpufreq_driver);
+ arm_bL_ops = NULL;
+ } else {
+ pr_info("%s: Registered platform driver: %s\n",
+ __func__, ops->name);
+ }
+ }
+
+ bL_switcher_put_enabled();
+ return ret;
+}
+EXPORT_SYMBOL_GPL(bL_cpufreq_register);
+
+void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops)
+{
+ if (arm_bL_ops != ops) {
+ pr_err("%s: Registered with: %s, can't unregister, exiting\n",
+ __func__, arm_bL_ops->name);
+ return;
+ }
+
+ bL_switcher_get_enabled();
+ bL_switcher_unregister_notifier(&bL_switcher_notifier);
+ cpufreq_unregister_driver(&bL_cpufreq_driver);
+ bL_switcher_put_enabled();
+ pr_info("%s: Un-registered platform driver: %s\n", __func__,
+ arm_bL_ops->name);
+ arm_bL_ops = NULL;
+}
+EXPORT_SYMBOL_GPL(bL_cpufreq_unregister);
+
+MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
+MODULE_DESCRIPTION("Generic ARM big LITTLE cpufreq driver");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/cpufreq/arm_big_little.h b/kernel/drivers/cpufreq/arm_big_little.h
new file mode 100644
index 000000000..a211f7db9
--- /dev/null
+++ b/kernel/drivers/cpufreq/arm_big_little.h
@@ -0,0 +1,43 @@
+/*
+ * ARM big.LITTLE platform's CPUFreq header file
+ *
+ * Copyright (C) 2013 ARM Ltd.
+ * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
+ *
+ * Copyright (C) 2013 Linaro.
+ * Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef CPUFREQ_ARM_BIG_LITTLE_H
+#define CPUFREQ_ARM_BIG_LITTLE_H
+
+#include <linux/cpufreq.h>
+#include <linux/device.h>
+#include <linux/types.h>
+
+struct cpufreq_arm_bL_ops {
+ char name[CPUFREQ_NAME_LEN];
+
+ /*
+ * This must set opp table for cpu_dev in a similar way as done by
+ * of_init_opp_table().
+ */
+ int (*init_opp_table)(struct device *cpu_dev);
+
+ /* Optional */
+ int (*get_transition_latency)(struct device *cpu_dev);
+ void (*free_opp_table)(struct device *cpu_dev);
+};
+
+int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops);
+void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops);
+
+#endif /* CPUFREQ_ARM_BIG_LITTLE_H */
diff --git a/kernel/drivers/cpufreq/arm_big_little_dt.c b/kernel/drivers/cpufreq/arm_big_little_dt.c
new file mode 100644
index 000000000..36d91dba2
--- /dev/null
+++ b/kernel/drivers/cpufreq/arm_big_little_dt.c
@@ -0,0 +1,117 @@
+/*
+ * Generic big.LITTLE CPUFreq Interface driver
+ *
+ * It provides necessary ops to arm_big_little cpufreq driver and gets
+ * Frequency information from Device Tree. Freq table in DT must be in KHz.
+ *
+ * Copyright (C) 2013 Linaro.
+ * Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cpufreq.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pm_opp.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include "arm_big_little.h"
+
+/* get cpu node with valid operating-points */
+static struct device_node *get_cpu_node_with_valid_op(int cpu)
+{
+ struct device_node *np = of_cpu_device_node_get(cpu);
+
+ if (!of_get_property(np, "operating-points", NULL)) {
+ of_node_put(np);
+ np = NULL;
+ }
+
+ return np;
+}
+
+static int dt_init_opp_table(struct device *cpu_dev)
+{
+ struct device_node *np;
+ int ret;
+
+ np = of_node_get(cpu_dev->of_node);
+ if (!np) {
+ pr_err("failed to find cpu%d node\n", cpu_dev->id);
+ return -ENOENT;
+ }
+
+ ret = of_init_opp_table(cpu_dev);
+ of_node_put(np);
+
+ return ret;
+}
+
+static int dt_get_transition_latency(struct device *cpu_dev)
+{
+ struct device_node *np;
+ u32 transition_latency = CPUFREQ_ETERNAL;
+
+ np = of_node_get(cpu_dev->of_node);
+ if (!np) {
+ pr_info("Failed to find cpu node. Use CPUFREQ_ETERNAL transition latency\n");
+ return CPUFREQ_ETERNAL;
+ }
+
+ of_property_read_u32(np, "clock-latency", &transition_latency);
+ of_node_put(np);
+
+ pr_debug("%s: clock-latency: %d\n", __func__, transition_latency);
+ return transition_latency;
+}
+
+static struct cpufreq_arm_bL_ops dt_bL_ops = {
+ .name = "dt-bl",
+ .get_transition_latency = dt_get_transition_latency,
+ .init_opp_table = dt_init_opp_table,
+ .free_opp_table = of_free_opp_table,
+};
+
+static int generic_bL_probe(struct platform_device *pdev)
+{
+ struct device_node *np;
+
+ np = get_cpu_node_with_valid_op(0);
+ if (!np)
+ return -ENODEV;
+
+ of_node_put(np);
+ return bL_cpufreq_register(&dt_bL_ops);
+}
+
+static int generic_bL_remove(struct platform_device *pdev)
+{
+ bL_cpufreq_unregister(&dt_bL_ops);
+ return 0;
+}
+
+static struct platform_driver generic_bL_platdrv = {
+ .driver = {
+ .name = "arm-bL-cpufreq-dt",
+ },
+ .probe = generic_bL_probe,
+ .remove = generic_bL_remove,
+};
+module_platform_driver(generic_bL_platdrv);
+
+MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
+MODULE_DESCRIPTION("Generic ARM big LITTLE cpufreq driver via DT");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/cpufreq/at32ap-cpufreq.c b/kernel/drivers/cpufreq/at32ap-cpufreq.c
new file mode 100644
index 000000000..7b612c8bb
--- /dev/null
+++ b/kernel/drivers/cpufreq/at32ap-cpufreq.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2004-2007 Atmel Corporation
+ *
+ * Based on MIPS implementation arch/mips/kernel/time.c
+ * Copyright 2001 MontaVista Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*#define DEBUG*/
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+
+static struct cpufreq_frequency_table *freq_table;
+
+static unsigned int ref_freq;
+static unsigned long loops_per_jiffy_ref;
+
+static int at32_set_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ unsigned int old_freq, new_freq;
+
+ old_freq = policy->cur;
+ new_freq = freq_table[index].frequency;
+
+ if (!ref_freq) {
+ ref_freq = old_freq;
+ loops_per_jiffy_ref = boot_cpu_data.loops_per_jiffy;
+ }
+
+ if (old_freq < new_freq)
+ boot_cpu_data.loops_per_jiffy = cpufreq_scale(
+ loops_per_jiffy_ref, ref_freq, new_freq);
+ clk_set_rate(policy->clk, new_freq * 1000);
+ if (new_freq < old_freq)
+ boot_cpu_data.loops_per_jiffy = cpufreq_scale(
+ loops_per_jiffy_ref, ref_freq, new_freq);
+
+ return 0;
+}
+
+static int at32_cpufreq_driver_init(struct cpufreq_policy *policy)
+{
+ unsigned int frequency, rate, min_freq;
+ struct clk *cpuclk;
+ int retval, steps, i;
+
+ if (policy->cpu != 0)
+ return -EINVAL;
+
+ cpuclk = clk_get(NULL, "cpu");
+ if (IS_ERR(cpuclk)) {
+ pr_debug("cpufreq: could not get CPU clk\n");
+ retval = PTR_ERR(cpuclk);
+ goto out_err;
+ }
+
+ min_freq = (clk_round_rate(cpuclk, 1) + 500) / 1000;
+ frequency = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
+ policy->cpuinfo.transition_latency = 0;
+
+ /*
+ * AVR32 CPU frequency rate scales in power of two between maximum and
+ * minimum, also add space for the table end marker.
+ *
+ * Further validate that the frequency is usable, and append it to the
+ * frequency table.
+ */
+ steps = fls(frequency / min_freq) + 1;
+ freq_table = kzalloc(steps * sizeof(struct cpufreq_frequency_table),
+ GFP_KERNEL);
+ if (!freq_table) {
+ retval = -ENOMEM;
+ goto out_err_put_clk;
+ }
+
+ for (i = 0; i < (steps - 1); i++) {
+ rate = clk_round_rate(cpuclk, frequency * 1000) / 1000;
+
+ if (rate != frequency)
+ freq_table[i].frequency = CPUFREQ_ENTRY_INVALID;
+ else
+ freq_table[i].frequency = frequency;
+
+ frequency /= 2;
+ }
+
+ policy->clk = cpuclk;
+ freq_table[steps - 1].frequency = CPUFREQ_TABLE_END;
+
+ retval = cpufreq_table_validate_and_show(policy, freq_table);
+ if (!retval) {
+ printk("cpufreq: AT32AP CPU frequency driver\n");
+ return 0;
+ }
+
+ kfree(freq_table);
+out_err_put_clk:
+ clk_put(cpuclk);
+out_err:
+ return retval;
+}
+
+static struct cpufreq_driver at32_driver = {
+ .name = "at32ap",
+ .init = at32_cpufreq_driver_init,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = at32_set_target,
+ .get = cpufreq_generic_get,
+ .flags = CPUFREQ_STICKY,
+};
+
+static int __init at32_cpufreq_init(void)
+{
+ return cpufreq_register_driver(&at32_driver);
+}
+late_initcall(at32_cpufreq_init);
diff --git a/kernel/drivers/cpufreq/blackfin-cpufreq.c b/kernel/drivers/cpufreq/blackfin-cpufreq.c
new file mode 100644
index 000000000..a9f8e5bd0
--- /dev/null
+++ b/kernel/drivers/cpufreq/blackfin-cpufreq.c
@@ -0,0 +1,217 @@
+/*
+ * Blackfin core clock scaling
+ *
+ * Copyright 2008-2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <asm/blackfin.h>
+#include <asm/time.h>
+#include <asm/dpmc.h>
+
+
+/* this is the table of CCLK frequencies, in Hz */
+/* .driver_data is the entry in the auxiliary dpm_state_table[] */
+static struct cpufreq_frequency_table bfin_freq_table[] = {
+ {
+ .frequency = CPUFREQ_TABLE_END,
+ .driver_data = 0,
+ },
+ {
+ .frequency = CPUFREQ_TABLE_END,
+ .driver_data = 1,
+ },
+ {
+ .frequency = CPUFREQ_TABLE_END,
+ .driver_data = 2,
+ },
+ {
+ .frequency = CPUFREQ_TABLE_END,
+ .driver_data = 0,
+ },
+};
+
+static struct bfin_dpm_state {
+ unsigned int csel; /* system clock divider */
+ unsigned int tscale; /* change the divider on the core timer interrupt */
+} dpm_state_table[3];
+
+#if defined(CONFIG_CYCLES_CLOCKSOURCE)
+/*
+ * normalized to maximum frequency offset for CYCLES,
+ * used in time-ts cycles clock source, but could be used
+ * somewhere also.
+ */
+unsigned long long __bfin_cycles_off;
+unsigned int __bfin_cycles_mod;
+#endif
+
+/**************************************************************************/
+static void __init bfin_init_tables(unsigned long cclk, unsigned long sclk)
+{
+
+ unsigned long csel, min_cclk;
+ int index;
+
+ /* Anomaly 273 seems to still exist on non-BF54x w/dcache turned on */
+#if ANOMALY_05000273 || ANOMALY_05000274 || \
+ (!(defined(CONFIG_BF54x) || defined(CONFIG_BF60x)) \
+ && defined(CONFIG_BFIN_EXTMEM_DCACHEABLE))
+ min_cclk = sclk * 2;
+#else
+ min_cclk = sclk;
+#endif
+
+#ifndef CONFIG_BF60x
+ csel = ((bfin_read_PLL_DIV() & CSEL) >> 4);
+#else
+ csel = bfin_read32(CGU0_DIV) & 0x1F;
+#endif
+
+ for (index = 0; (cclk >> index) >= min_cclk && csel <= 3 && index < 3; index++, csel++) {
+ bfin_freq_table[index].frequency = cclk >> index;
+#ifndef CONFIG_BF60x
+ dpm_state_table[index].csel = csel << 4; /* Shift now into PLL_DIV bitpos */
+#else
+ dpm_state_table[index].csel = csel;
+#endif
+ dpm_state_table[index].tscale = (TIME_SCALE >> index) - 1;
+
+ pr_debug("cpufreq: freq:%d csel:0x%x tscale:%d\n",
+ bfin_freq_table[index].frequency,
+ dpm_state_table[index].csel,
+ dpm_state_table[index].tscale);
+ }
+ return;
+}
+
+static void bfin_adjust_core_timer(void *info)
+{
+ unsigned int tscale;
+ unsigned int index = *(unsigned int *)info;
+
+ /* we have to adjust the core timer, because it is using cclk */
+ tscale = dpm_state_table[index].tscale;
+ bfin_write_TSCALE(tscale);
+ return;
+}
+
+static unsigned int bfin_getfreq_khz(unsigned int cpu)
+{
+ /* Both CoreA/B have the same core clock */
+ return get_cclk() / 1000;
+}
+
+#ifdef CONFIG_BF60x
+unsigned long cpu_set_cclk(int cpu, unsigned long new)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = clk_get(NULL, "CCLK");
+ if (IS_ERR(clk))
+ return -ENODEV;
+
+ ret = clk_set_rate(clk, new);
+ clk_put(clk);
+ return ret;
+}
+#endif
+
+static int bfin_target(struct cpufreq_policy *policy, unsigned int index)
+{
+#ifndef CONFIG_BF60x
+ unsigned int plldiv;
+#endif
+ static unsigned long lpj_ref;
+ static unsigned int lpj_ref_freq;
+ unsigned int old_freq, new_freq;
+ int ret = 0;
+
+#if defined(CONFIG_CYCLES_CLOCKSOURCE)
+ cycles_t cycles;
+#endif
+
+ old_freq = bfin_getfreq_khz(0);
+ new_freq = bfin_freq_table[index].frequency;
+
+#ifndef CONFIG_BF60x
+ plldiv = (bfin_read_PLL_DIV() & SSEL) | dpm_state_table[index].csel;
+ bfin_write_PLL_DIV(plldiv);
+#else
+ ret = cpu_set_cclk(policy->cpu, new_freq * 1000);
+ if (ret != 0) {
+ WARN_ONCE(ret, "cpufreq set freq failed %d\n", ret);
+ return ret;
+ }
+#endif
+ on_each_cpu(bfin_adjust_core_timer, &index, 1);
+#if defined(CONFIG_CYCLES_CLOCKSOURCE)
+ cycles = get_cycles();
+ SSYNC();
+ cycles += 10; /* ~10 cycles we lose after get_cycles() */
+ __bfin_cycles_off += (cycles << __bfin_cycles_mod) - (cycles << index);
+ __bfin_cycles_mod = index;
+#endif
+ if (!lpj_ref_freq) {
+ lpj_ref = loops_per_jiffy;
+ lpj_ref_freq = old_freq;
+ }
+ if (new_freq != old_freq) {
+ loops_per_jiffy = cpufreq_scale(lpj_ref,
+ lpj_ref_freq, new_freq);
+ }
+
+ return ret;
+}
+
+static int __bfin_cpu_init(struct cpufreq_policy *policy)
+{
+
+ unsigned long cclk, sclk;
+
+ cclk = get_cclk() / 1000;
+ sclk = get_sclk() / 1000;
+
+ if (policy->cpu == CPUFREQ_CPU)
+ bfin_init_tables(cclk, sclk);
+
+ policy->cpuinfo.transition_latency = 50000; /* 50us assumed */
+
+ return cpufreq_table_validate_and_show(policy, bfin_freq_table);
+}
+
+static struct cpufreq_driver bfin_driver = {
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = bfin_target,
+ .get = bfin_getfreq_khz,
+ .init = __bfin_cpu_init,
+ .name = "bfin cpufreq",
+ .attr = cpufreq_generic_attr,
+};
+
+static int __init bfin_cpu_init(void)
+{
+ return cpufreq_register_driver(&bfin_driver);
+}
+
+static void __exit bfin_cpu_exit(void)
+{
+ cpufreq_unregister_driver(&bfin_driver);
+}
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("cpufreq driver for Blackfin");
+MODULE_LICENSE("GPL");
+
+module_init(bfin_cpu_init);
+module_exit(bfin_cpu_exit);
diff --git a/kernel/drivers/cpufreq/cpufreq-dt.c b/kernel/drivers/cpufreq/cpufreq-dt.c
new file mode 100644
index 000000000..bab67db54
--- /dev/null
+++ b/kernel/drivers/cpufreq/cpufreq-dt.c
@@ -0,0 +1,422 @@
+/*
+ * Copyright (C) 2012 Freescale Semiconductor, Inc.
+ *
+ * Copyright (C) 2014 Linaro.
+ * Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * The OPP code in function set_target() is reused from
+ * drivers/cpufreq/omap-cpufreq.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/cpu_cooling.h>
+#include <linux/cpufreq.h>
+#include <linux/cpufreq-dt.h>
+#include <linux/cpumask.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm_opp.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+
+struct private_data {
+ struct device *cpu_dev;
+ struct regulator *cpu_reg;
+ struct thermal_cooling_device *cdev;
+ unsigned int voltage_tolerance; /* in percentage */
+};
+
+static int set_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ struct dev_pm_opp *opp;
+ struct cpufreq_frequency_table *freq_table = policy->freq_table;
+ struct clk *cpu_clk = policy->clk;
+ struct private_data *priv = policy->driver_data;
+ struct device *cpu_dev = priv->cpu_dev;
+ struct regulator *cpu_reg = priv->cpu_reg;
+ unsigned long volt = 0, volt_old = 0, tol = 0;
+ unsigned int old_freq, new_freq;
+ long freq_Hz, freq_exact;
+ int ret;
+
+ freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
+ if (freq_Hz <= 0)
+ freq_Hz = freq_table[index].frequency * 1000;
+
+ freq_exact = freq_Hz;
+ new_freq = freq_Hz / 1000;
+ old_freq = clk_get_rate(cpu_clk) / 1000;
+
+ if (!IS_ERR(cpu_reg)) {
+ unsigned long opp_freq;
+
+ rcu_read_lock();
+ opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz);
+ if (IS_ERR(opp)) {
+ rcu_read_unlock();
+ dev_err(cpu_dev, "failed to find OPP for %ld\n",
+ freq_Hz);
+ return PTR_ERR(opp);
+ }
+ volt = dev_pm_opp_get_voltage(opp);
+ opp_freq = dev_pm_opp_get_freq(opp);
+ rcu_read_unlock();
+ tol = volt * priv->voltage_tolerance / 100;
+ volt_old = regulator_get_voltage(cpu_reg);
+ dev_dbg(cpu_dev, "Found OPP: %ld kHz, %ld uV\n",
+ opp_freq / 1000, volt);
+ }
+
+ dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n",
+ old_freq / 1000, (volt_old > 0) ? volt_old / 1000 : -1,
+ new_freq / 1000, volt ? volt / 1000 : -1);
+
+ /* scaling up? scale voltage before frequency */
+ if (!IS_ERR(cpu_reg) && new_freq > old_freq) {
+ ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
+ if (ret) {
+ dev_err(cpu_dev, "failed to scale voltage up: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ ret = clk_set_rate(cpu_clk, freq_exact);
+ if (ret) {
+ dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
+ if (!IS_ERR(cpu_reg) && volt_old > 0)
+ regulator_set_voltage_tol(cpu_reg, volt_old, tol);
+ return ret;
+ }
+
+ /* scaling down? scale voltage after frequency */
+ if (!IS_ERR(cpu_reg) && new_freq < old_freq) {
+ ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
+ if (ret) {
+ dev_err(cpu_dev, "failed to scale voltage down: %d\n",
+ ret);
+ clk_set_rate(cpu_clk, old_freq * 1000);
+ }
+ }
+
+ return ret;
+}
+
+static int allocate_resources(int cpu, struct device **cdev,
+ struct regulator **creg, struct clk **cclk)
+{
+ struct device *cpu_dev;
+ struct regulator *cpu_reg;
+ struct clk *cpu_clk;
+ int ret = 0;
+ char *reg_cpu0 = "cpu0", *reg_cpu = "cpu", *reg;
+
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev) {
+ pr_err("failed to get cpu%d device\n", cpu);
+ return -ENODEV;
+ }
+
+ /* Try "cpu0" for older DTs */
+ if (!cpu)
+ reg = reg_cpu0;
+ else
+ reg = reg_cpu;
+
+try_again:
+ cpu_reg = regulator_get_optional(cpu_dev, reg);
+ if (IS_ERR(cpu_reg)) {
+ /*
+ * If cpu's regulator supply node is present, but regulator is
+ * not yet registered, we should try defering probe.
+ */
+ if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) {
+ dev_dbg(cpu_dev, "cpu%d regulator not ready, retry\n",
+ cpu);
+ return -EPROBE_DEFER;
+ }
+
+ /* Try with "cpu-supply" */
+ if (reg == reg_cpu0) {
+ reg = reg_cpu;
+ goto try_again;
+ }
+
+ dev_dbg(cpu_dev, "no regulator for cpu%d: %ld\n",
+ cpu, PTR_ERR(cpu_reg));
+ }
+
+ cpu_clk = clk_get(cpu_dev, NULL);
+ if (IS_ERR(cpu_clk)) {
+ /* put regulator */
+ if (!IS_ERR(cpu_reg))
+ regulator_put(cpu_reg);
+
+ ret = PTR_ERR(cpu_clk);
+
+ /*
+ * If cpu's clk node is present, but clock is not yet
+ * registered, we should try defering probe.
+ */
+ if (ret == -EPROBE_DEFER)
+ dev_dbg(cpu_dev, "cpu%d clock not ready, retry\n", cpu);
+ else
+ dev_err(cpu_dev, "failed to get cpu%d clock: %d\n", cpu,
+ ret);
+ } else {
+ *cdev = cpu_dev;
+ *creg = cpu_reg;
+ *cclk = cpu_clk;
+ }
+
+ return ret;
+}
+
+static int cpufreq_init(struct cpufreq_policy *policy)
+{
+ struct cpufreq_dt_platform_data *pd;
+ struct cpufreq_frequency_table *freq_table;
+ struct device_node *np;
+ struct private_data *priv;
+ struct device *cpu_dev;
+ struct regulator *cpu_reg;
+ struct clk *cpu_clk;
+ unsigned long min_uV = ~0, max_uV = 0;
+ unsigned int transition_latency;
+ int ret;
+
+ ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk);
+ if (ret) {
+ pr_err("%s: Failed to allocate resources: %d\n", __func__, ret);
+ return ret;
+ }
+
+ np = of_node_get(cpu_dev->of_node);
+ if (!np) {
+ dev_err(cpu_dev, "failed to find cpu%d node\n", policy->cpu);
+ ret = -ENOENT;
+ goto out_put_reg_clk;
+ }
+
+ /* OPPs might be populated at runtime, don't check for error here */
+ of_init_opp_table(cpu_dev);
+
+ /*
+ * But we need OPP table to function so if it is not there let's
+ * give platform code chance to provide it for us.
+ */
+ ret = dev_pm_opp_get_opp_count(cpu_dev);
+ if (ret <= 0) {
+ pr_debug("OPP table is not ready, deferring probe\n");
+ ret = -EPROBE_DEFER;
+ goto out_free_opp;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto out_free_opp;
+ }
+
+ of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance);
+
+ if (of_property_read_u32(np, "clock-latency", &transition_latency))
+ transition_latency = CPUFREQ_ETERNAL;
+
+ if (!IS_ERR(cpu_reg)) {
+ unsigned long opp_freq = 0;
+
+ /*
+ * Disable any OPPs where the connected regulator isn't able to
+ * provide the specified voltage and record minimum and maximum
+ * voltage levels.
+ */
+ while (1) {
+ struct dev_pm_opp *opp;
+ unsigned long opp_uV, tol_uV;
+
+ rcu_read_lock();
+ opp = dev_pm_opp_find_freq_ceil(cpu_dev, &opp_freq);
+ if (IS_ERR(opp)) {
+ rcu_read_unlock();
+ break;
+ }
+ opp_uV = dev_pm_opp_get_voltage(opp);
+ rcu_read_unlock();
+
+ tol_uV = opp_uV * priv->voltage_tolerance / 100;
+ if (regulator_is_supported_voltage(cpu_reg, opp_uV,
+ opp_uV + tol_uV)) {
+ if (opp_uV < min_uV)
+ min_uV = opp_uV;
+ if (opp_uV > max_uV)
+ max_uV = opp_uV;
+ } else {
+ dev_pm_opp_disable(cpu_dev, opp_freq);
+ }
+
+ opp_freq++;
+ }
+
+ ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
+ if (ret > 0)
+ transition_latency += ret * 1000;
+ }
+
+ ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
+ if (ret) {
+ pr_err("failed to init cpufreq table: %d\n", ret);
+ goto out_free_priv;
+ }
+
+ priv->cpu_dev = cpu_dev;
+ priv->cpu_reg = cpu_reg;
+ policy->driver_data = priv;
+
+ policy->clk = cpu_clk;
+ ret = cpufreq_table_validate_and_show(policy, freq_table);
+ if (ret) {
+ dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__,
+ ret);
+ goto out_free_cpufreq_table;
+ }
+
+ policy->cpuinfo.transition_latency = transition_latency;
+
+ pd = cpufreq_get_driver_data();
+ if (!pd || !pd->independent_clocks)
+ cpumask_setall(policy->cpus);
+
+ of_node_put(np);
+
+ return 0;
+
+out_free_cpufreq_table:
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
+out_free_priv:
+ kfree(priv);
+out_free_opp:
+ of_free_opp_table(cpu_dev);
+ of_node_put(np);
+out_put_reg_clk:
+ clk_put(cpu_clk);
+ if (!IS_ERR(cpu_reg))
+ regulator_put(cpu_reg);
+
+ return ret;
+}
+
+static int cpufreq_exit(struct cpufreq_policy *policy)
+{
+ struct private_data *priv = policy->driver_data;
+
+ cpufreq_cooling_unregister(priv->cdev);
+ dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
+ of_free_opp_table(priv->cpu_dev);
+ clk_put(policy->clk);
+ if (!IS_ERR(priv->cpu_reg))
+ regulator_put(priv->cpu_reg);
+ kfree(priv);
+
+ return 0;
+}
+
+static void cpufreq_ready(struct cpufreq_policy *policy)
+{
+ struct private_data *priv = policy->driver_data;
+ struct device_node *np = of_node_get(priv->cpu_dev->of_node);
+
+ if (WARN_ON(!np))
+ return;
+
+ /*
+ * For now, just loading the cooling device;
+ * thermal DT code takes care of matching them.
+ */
+ if (of_find_property(np, "#cooling-cells", NULL)) {
+ priv->cdev = of_cpufreq_cooling_register(np,
+ policy->related_cpus);
+ if (IS_ERR(priv->cdev)) {
+ dev_err(priv->cpu_dev,
+ "running cpufreq without cooling device: %ld\n",
+ PTR_ERR(priv->cdev));
+
+ priv->cdev = NULL;
+ }
+ }
+
+ of_node_put(np);
+}
+
+static struct cpufreq_driver dt_cpufreq_driver = {
+ .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = set_target,
+ .get = cpufreq_generic_get,
+ .init = cpufreq_init,
+ .exit = cpufreq_exit,
+ .ready = cpufreq_ready,
+ .name = "cpufreq-dt",
+ .attr = cpufreq_generic_attr,
+};
+
+static int dt_cpufreq_probe(struct platform_device *pdev)
+{
+ struct device *cpu_dev;
+ struct regulator *cpu_reg;
+ struct clk *cpu_clk;
+ int ret;
+
+ /*
+ * All per-cluster (CPUs sharing clock/voltages) initialization is done
+ * from ->init(). In probe(), we just need to make sure that clk and
+ * regulators are available. Else defer probe and retry.
+ *
+ * FIXME: Is checking this only for CPU0 sufficient ?
+ */
+ ret = allocate_resources(0, &cpu_dev, &cpu_reg, &cpu_clk);
+ if (ret)
+ return ret;
+
+ clk_put(cpu_clk);
+ if (!IS_ERR(cpu_reg))
+ regulator_put(cpu_reg);
+
+ dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
+
+ ret = cpufreq_register_driver(&dt_cpufreq_driver);
+ if (ret)
+ dev_err(cpu_dev, "failed register driver: %d\n", ret);
+
+ return ret;
+}
+
+static int dt_cpufreq_remove(struct platform_device *pdev)
+{
+ cpufreq_unregister_driver(&dt_cpufreq_driver);
+ return 0;
+}
+
+static struct platform_driver dt_cpufreq_platdrv = {
+ .driver = {
+ .name = "cpufreq-dt",
+ },
+ .probe = dt_cpufreq_probe,
+ .remove = dt_cpufreq_remove,
+};
+module_platform_driver(dt_cpufreq_platdrv);
+
+MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
+MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
+MODULE_DESCRIPTION("Generic cpufreq driver");
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/cpufreq/cpufreq-nforce2.c b/kernel/drivers/cpufreq/cpufreq-nforce2.c
new file mode 100644
index 000000000..a2258090b
--- /dev/null
+++ b/kernel/drivers/cpufreq/cpufreq-nforce2.c
@@ -0,0 +1,445 @@
+/*
+ * (C) 2004-2006 Sebastian Witt <se.witt@gmx.net>
+ *
+ * Licensed under the terms of the GNU GPL License version 2.
+ * Based upon reverse engineered information
+ *
+ * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+#define NFORCE2_XTAL 25
+#define NFORCE2_BOOTFSB 0x48
+#define NFORCE2_PLLENABLE 0xa8
+#define NFORCE2_PLLREG 0xa4
+#define NFORCE2_PLLADR 0xa0
+#define NFORCE2_PLL(mul, div) (0x100000 | (mul << 8) | div)
+
+#define NFORCE2_MIN_FSB 50
+#define NFORCE2_SAFE_DISTANCE 50
+
+/* Delay in ms between FSB changes */
+/* #define NFORCE2_DELAY 10 */
+
+/*
+ * nforce2_chipset:
+ * FSB is changed using the chipset
+ */
+static struct pci_dev *nforce2_dev;
+
+/* fid:
+ * multiplier * 10
+ */
+static int fid;
+
+/* min_fsb, max_fsb:
+ * minimum and maximum FSB (= FSB at boot time)
+ */
+static int min_fsb;
+static int max_fsb;
+
+MODULE_AUTHOR("Sebastian Witt <se.witt@gmx.net>");
+MODULE_DESCRIPTION("nForce2 FSB changing cpufreq driver");
+MODULE_LICENSE("GPL");
+
+module_param(fid, int, 0444);
+module_param(min_fsb, int, 0444);
+
+MODULE_PARM_DESC(fid, "CPU multiplier to use (11.5 = 115)");
+MODULE_PARM_DESC(min_fsb,
+ "Minimum FSB to use, if not defined: current FSB - 50");
+
+#define PFX "cpufreq-nforce2: "
+
+/**
+ * nforce2_calc_fsb - calculate FSB
+ * @pll: PLL value
+ *
+ * Calculates FSB from PLL value
+ */
+static int nforce2_calc_fsb(int pll)
+{
+ unsigned char mul, div;
+
+ mul = (pll >> 8) & 0xff;
+ div = pll & 0xff;
+
+ if (div > 0)
+ return NFORCE2_XTAL * mul / div;
+
+ return 0;
+}
+
+/**
+ * nforce2_calc_pll - calculate PLL value
+ * @fsb: FSB
+ *
+ * Calculate PLL value for given FSB
+ */
+static int nforce2_calc_pll(unsigned int fsb)
+{
+ unsigned char xmul, xdiv;
+ unsigned char mul = 0, div = 0;
+ int tried = 0;
+
+ /* Try to calculate multiplier and divider up to 4 times */
+ while (((mul == 0) || (div == 0)) && (tried <= 3)) {
+ for (xdiv = 2; xdiv <= 0x80; xdiv++)
+ for (xmul = 1; xmul <= 0xfe; xmul++)
+ if (nforce2_calc_fsb(NFORCE2_PLL(xmul, xdiv)) ==
+ fsb + tried) {
+ mul = xmul;
+ div = xdiv;
+ }
+ tried++;
+ }
+
+ if ((mul == 0) || (div == 0))
+ return -1;
+
+ return NFORCE2_PLL(mul, div);
+}
+
+/**
+ * nforce2_write_pll - write PLL value to chipset
+ * @pll: PLL value
+ *
+ * Writes new FSB PLL value to chipset
+ */
+static void nforce2_write_pll(int pll)
+{
+ int temp;
+
+ /* Set the pll addr. to 0x00 */
+ pci_write_config_dword(nforce2_dev, NFORCE2_PLLADR, 0);
+
+ /* Now write the value in all 64 registers */
+ for (temp = 0; temp <= 0x3f; temp++)
+ pci_write_config_dword(nforce2_dev, NFORCE2_PLLREG, pll);
+
+ return;
+}
+
+/**
+ * nforce2_fsb_read - Read FSB
+ *
+ * Read FSB from chipset
+ * If bootfsb != 0, return FSB at boot-time
+ */
+static unsigned int nforce2_fsb_read(int bootfsb)
+{
+ struct pci_dev *nforce2_sub5;
+ u32 fsb, temp = 0;
+
+ /* Get chipset boot FSB from subdevice 5 (FSB at boot-time) */
+ nforce2_sub5 = pci_get_subsys(PCI_VENDOR_ID_NVIDIA, 0x01EF,
+ PCI_ANY_ID, PCI_ANY_ID, NULL);
+ if (!nforce2_sub5)
+ return 0;
+
+ pci_read_config_dword(nforce2_sub5, NFORCE2_BOOTFSB, &fsb);
+ fsb /= 1000000;
+
+ /* Check if PLL register is already set */
+ pci_read_config_byte(nforce2_dev, NFORCE2_PLLENABLE, (u8 *)&temp);
+
+ if (bootfsb || !temp)
+ return fsb;
+
+ /* Use PLL register FSB value */
+ pci_read_config_dword(nforce2_dev, NFORCE2_PLLREG, &temp);
+ fsb = nforce2_calc_fsb(temp);
+
+ return fsb;
+}
+
+/**
+ * nforce2_set_fsb - set new FSB
+ * @fsb: New FSB
+ *
+ * Sets new FSB
+ */
+static int nforce2_set_fsb(unsigned int fsb)
+{
+ u32 temp = 0;
+ unsigned int tfsb;
+ int diff;
+ int pll = 0;
+
+ if ((fsb > max_fsb) || (fsb < NFORCE2_MIN_FSB)) {
+ printk(KERN_ERR PFX "FSB %d is out of range!\n", fsb);
+ return -EINVAL;
+ }
+
+ tfsb = nforce2_fsb_read(0);
+ if (!tfsb) {
+ printk(KERN_ERR PFX "Error while reading the FSB\n");
+ return -EINVAL;
+ }
+
+ /* First write? Then set actual value */
+ pci_read_config_byte(nforce2_dev, NFORCE2_PLLENABLE, (u8 *)&temp);
+ if (!temp) {
+ pll = nforce2_calc_pll(tfsb);
+
+ if (pll < 0)
+ return -EINVAL;
+
+ nforce2_write_pll(pll);
+ }
+
+ /* Enable write access */
+ temp = 0x01;
+ pci_write_config_byte(nforce2_dev, NFORCE2_PLLENABLE, (u8)temp);
+
+ diff = tfsb - fsb;
+
+ if (!diff)
+ return 0;
+
+ while ((tfsb != fsb) && (tfsb <= max_fsb) && (tfsb >= min_fsb)) {
+ if (diff < 0)
+ tfsb++;
+ else
+ tfsb--;
+
+ /* Calculate the PLL reg. value */
+ pll = nforce2_calc_pll(tfsb);
+ if (pll == -1)
+ return -EINVAL;
+
+ nforce2_write_pll(pll);
+#ifdef NFORCE2_DELAY
+ mdelay(NFORCE2_DELAY);
+#endif
+ }
+
+ temp = 0x40;
+ pci_write_config_byte(nforce2_dev, NFORCE2_PLLADR, (u8)temp);
+
+ return 0;
+}
+
+/**
+ * nforce2_get - get the CPU frequency
+ * @cpu: CPU number
+ *
+ * Returns the CPU frequency
+ */
+static unsigned int nforce2_get(unsigned int cpu)
+{
+ if (cpu)
+ return 0;
+ return nforce2_fsb_read(0) * fid * 100;
+}
+
+/**
+ * nforce2_target - set a new CPUFreq policy
+ * @policy: new policy
+ * @target_freq: the target frequency
+ * @relation: how that frequency relates to achieved frequency
+ * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H)
+ *
+ * Sets a new CPUFreq policy.
+ */
+static int nforce2_target(struct cpufreq_policy *policy,
+ unsigned int target_freq, unsigned int relation)
+{
+/* unsigned long flags; */
+ struct cpufreq_freqs freqs;
+ unsigned int target_fsb;
+
+ if ((target_freq > policy->max) || (target_freq < policy->min))
+ return -EINVAL;
+
+ target_fsb = target_freq / (fid * 100);
+
+ freqs.old = nforce2_get(policy->cpu);
+ freqs.new = target_fsb * fid * 100;
+
+ if (freqs.old == freqs.new)
+ return 0;
+
+ pr_debug("Old CPU frequency %d kHz, new %d kHz\n",
+ freqs.old, freqs.new);
+
+ cpufreq_freq_transition_begin(policy, &freqs);
+
+ /* Disable IRQs */
+ /* local_irq_save(flags); */
+
+ if (nforce2_set_fsb(target_fsb) < 0)
+ printk(KERN_ERR PFX "Changing FSB to %d failed\n",
+ target_fsb);
+ else
+ pr_debug("Changed FSB successfully to %d\n",
+ target_fsb);
+
+ /* Enable IRQs */
+ /* local_irq_restore(flags); */
+
+ cpufreq_freq_transition_end(policy, &freqs, 0);
+
+ return 0;
+}
+
+/**
+ * nforce2_verify - verifies a new CPUFreq policy
+ * @policy: new policy
+ */
+static int nforce2_verify(struct cpufreq_policy *policy)
+{
+ unsigned int fsb_pol_max;
+
+ fsb_pol_max = policy->max / (fid * 100);
+
+ if (policy->min < (fsb_pol_max * fid * 100))
+ policy->max = (fsb_pol_max + 1) * fid * 100;
+
+ cpufreq_verify_within_cpu_limits(policy);
+ return 0;
+}
+
+static int nforce2_cpu_init(struct cpufreq_policy *policy)
+{
+ unsigned int fsb;
+ unsigned int rfid;
+
+ /* capability check */
+ if (policy->cpu != 0)
+ return -ENODEV;
+
+ /* Get current FSB */
+ fsb = nforce2_fsb_read(0);
+
+ if (!fsb)
+ return -EIO;
+
+ /* FIX: Get FID from CPU */
+ if (!fid) {
+ if (!cpu_khz) {
+ printk(KERN_WARNING PFX
+ "cpu_khz not set, can't calculate multiplier!\n");
+ return -ENODEV;
+ }
+
+ fid = cpu_khz / (fsb * 100);
+ rfid = fid % 5;
+
+ if (rfid) {
+ if (rfid > 2)
+ fid += 5 - rfid;
+ else
+ fid -= rfid;
+ }
+ }
+
+ printk(KERN_INFO PFX "FSB currently at %i MHz, FID %d.%d\n", fsb,
+ fid / 10, fid % 10);
+
+ /* Set maximum FSB to FSB at boot time */
+ max_fsb = nforce2_fsb_read(1);
+
+ if (!max_fsb)
+ return -EIO;
+
+ if (!min_fsb)
+ min_fsb = max_fsb - NFORCE2_SAFE_DISTANCE;
+
+ if (min_fsb < NFORCE2_MIN_FSB)
+ min_fsb = NFORCE2_MIN_FSB;
+
+ /* cpuinfo and default policy values */
+ policy->min = policy->cpuinfo.min_freq = min_fsb * fid * 100;
+ policy->max = policy->cpuinfo.max_freq = max_fsb * fid * 100;
+ policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+
+ return 0;
+}
+
+static int nforce2_cpu_exit(struct cpufreq_policy *policy)
+{
+ return 0;
+}
+
+static struct cpufreq_driver nforce2_driver = {
+ .name = "nforce2",
+ .verify = nforce2_verify,
+ .target = nforce2_target,
+ .get = nforce2_get,
+ .init = nforce2_cpu_init,
+ .exit = nforce2_cpu_exit,
+};
+
+#ifdef MODULE
+static const struct pci_device_id nforce2_ids[] = {
+ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2 },
+ {}
+};
+MODULE_DEVICE_TABLE(pci, nforce2_ids);
+#endif
+
+/**
+ * nforce2_detect_chipset - detect the Southbridge which contains FSB PLL logic
+ *
+ * Detects nForce2 A2 and C1 stepping
+ *
+ */
+static int nforce2_detect_chipset(void)
+{
+ nforce2_dev = pci_get_subsys(PCI_VENDOR_ID_NVIDIA,
+ PCI_DEVICE_ID_NVIDIA_NFORCE2,
+ PCI_ANY_ID, PCI_ANY_ID, NULL);
+
+ if (nforce2_dev == NULL)
+ return -ENODEV;
+
+ printk(KERN_INFO PFX "Detected nForce2 chipset revision %X\n",
+ nforce2_dev->revision);
+ printk(KERN_INFO PFX
+ "FSB changing is maybe unstable and can lead to "
+ "crashes and data loss.\n");
+
+ return 0;
+}
+
+/**
+ * nforce2_init - initializes the nForce2 CPUFreq driver
+ *
+ * Initializes the nForce2 FSB support. Returns -ENODEV on unsupported
+ * devices, -EINVAL on problems during initiatization, and zero on
+ * success.
+ */
+static int __init nforce2_init(void)
+{
+ /* TODO: do we need to detect the processor? */
+
+ /* detect chipset */
+ if (nforce2_detect_chipset()) {
+ printk(KERN_INFO PFX "No nForce2 chipset.\n");
+ return -ENODEV;
+ }
+
+ return cpufreq_register_driver(&nforce2_driver);
+}
+
+/**
+ * nforce2_exit - unregisters cpufreq module
+ *
+ * Unregisters nForce2 FSB change support.
+ */
+static void __exit nforce2_exit(void)
+{
+ cpufreq_unregister_driver(&nforce2_driver);
+}
+
+module_init(nforce2_init);
+module_exit(nforce2_exit);
+
diff --git a/kernel/drivers/cpufreq/cpufreq.c b/kernel/drivers/cpufreq/cpufreq.c
new file mode 100644
index 000000000..ce1d93e93
--- /dev/null
+++ b/kernel/drivers/cpufreq/cpufreq.c
@@ -0,0 +1,2511 @@
+/*
+ * linux/drivers/cpufreq/cpufreq.c
+ *
+ * Copyright (C) 2001 Russell King
+ * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
+ * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
+ * Added handling for CPU hotplug
+ * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
+ * Fix handling for CPU hotplug -- affected CPUs
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel_stat.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <linux/syscore_ops.h>
+#include <linux/tick.h>
+#include <trace/events/power.h>
+
+/* Macros to iterate over lists */
+/* Iterate over online CPUs policies */
+static LIST_HEAD(cpufreq_policy_list);
+#define for_each_policy(__policy) \
+ list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
+
+/* Iterate over governors */
+static LIST_HEAD(cpufreq_governor_list);
+#define for_each_governor(__governor) \
+ list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
+
+/**
+ * The "cpufreq driver" - the arch- or hardware-dependent low
+ * level driver of CPUFreq support, and its spinlock. This lock
+ * also protects the cpufreq_cpu_data array.
+ */
+static struct cpufreq_driver *cpufreq_driver;
+static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
+static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
+static DEFINE_RWLOCK(cpufreq_driver_lock);
+DEFINE_MUTEX(cpufreq_governor_lock);
+
+/* This one keeps track of the previously set governor of a removed CPU */
+static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
+
+/* Flag to suspend/resume CPUFreq governors */
+static bool cpufreq_suspended;
+
+static inline bool has_target(void)
+{
+ return cpufreq_driver->target_index || cpufreq_driver->target;
+}
+
+/* internal prototypes */
+static int __cpufreq_governor(struct cpufreq_policy *policy,
+ unsigned int event);
+static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
+static void handle_update(struct work_struct *work);
+
+/**
+ * Two notifier lists: the "policy" list is involved in the
+ * validation process for a new CPU frequency policy; the
+ * "transition" list for kernel code that needs to handle
+ * changes to devices when the CPU clock speed changes.
+ * The mutex locks both lists.
+ */
+static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
+static struct srcu_notifier_head cpufreq_transition_notifier_list;
+
+static bool init_cpufreq_transition_notifier_list_called;
+static int __init init_cpufreq_transition_notifier_list(void)
+{
+ srcu_init_notifier_head(&cpufreq_transition_notifier_list);
+ init_cpufreq_transition_notifier_list_called = true;
+ return 0;
+}
+pure_initcall(init_cpufreq_transition_notifier_list);
+
+static int off __read_mostly;
+static int cpufreq_disabled(void)
+{
+ return off;
+}
+void disable_cpufreq(void)
+{
+ off = 1;
+}
+static DEFINE_MUTEX(cpufreq_governor_mutex);
+
+bool have_governor_per_policy(void)
+{
+ return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
+}
+EXPORT_SYMBOL_GPL(have_governor_per_policy);
+
+struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
+{
+ if (have_governor_per_policy())
+ return &policy->kobj;
+ else
+ return cpufreq_global_kobject;
+}
+EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
+
+static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
+{
+ u64 idle_time;
+ u64 cur_wall_time;
+ u64 busy_time;
+
+ cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
+
+ busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
+
+ idle_time = cur_wall_time - busy_time;
+ if (wall)
+ *wall = cputime_to_usecs(cur_wall_time);
+
+ return cputime_to_usecs(idle_time);
+}
+
+u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
+{
+ u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
+
+ if (idle_time == -1ULL)
+ return get_cpu_idle_time_jiffy(cpu, wall);
+ else if (!io_busy)
+ idle_time += get_cpu_iowait_time_us(cpu, wall);
+
+ return idle_time;
+}
+EXPORT_SYMBOL_GPL(get_cpu_idle_time);
+
+/*
+ * This is a generic cpufreq init() routine which can be used by cpufreq
+ * drivers of SMP systems. It will do following:
+ * - validate & show freq table passed
+ * - set policies transition latency
+ * - policy->cpus with all possible CPUs
+ */
+int cpufreq_generic_init(struct cpufreq_policy *policy,
+ struct cpufreq_frequency_table *table,
+ unsigned int transition_latency)
+{
+ int ret;
+
+ ret = cpufreq_table_validate_and_show(policy, table);
+ if (ret) {
+ pr_err("%s: invalid frequency table: %d\n", __func__, ret);
+ return ret;
+ }
+
+ policy->cpuinfo.transition_latency = transition_latency;
+
+ /*
+ * The driver only supports the SMP configuartion where all processors
+ * share the clock and voltage and clock.
+ */
+ cpumask_setall(policy->cpus);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cpufreq_generic_init);
+
+unsigned int cpufreq_generic_get(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
+
+ if (!policy || IS_ERR(policy->clk)) {
+ pr_err("%s: No %s associated to cpu: %d\n",
+ __func__, policy ? "clk" : "policy", cpu);
+ return 0;
+ }
+
+ return clk_get_rate(policy->clk) / 1000;
+}
+EXPORT_SYMBOL_GPL(cpufreq_generic_get);
+
+/* Only for cpufreq core internal use */
+struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
+{
+ return per_cpu(cpufreq_cpu_data, cpu);
+}
+
+struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = NULL;
+ unsigned long flags;
+
+ if (cpu >= nr_cpu_ids)
+ return NULL;
+
+ /* get the cpufreq driver */
+ read_lock_irqsave(&cpufreq_driver_lock, flags);
+
+ if (cpufreq_driver) {
+ /* get the CPU */
+ policy = per_cpu(cpufreq_cpu_data, cpu);
+ if (policy)
+ kobject_get(&policy->kobj);
+ }
+
+ read_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ return policy;
+}
+EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
+
+void cpufreq_cpu_put(struct cpufreq_policy *policy)
+{
+ kobject_put(&policy->kobj);
+}
+EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
+
+/*********************************************************************
+ * EXTERNALLY AFFECTING FREQUENCY CHANGES *
+ *********************************************************************/
+
+/**
+ * adjust_jiffies - adjust the system "loops_per_jiffy"
+ *
+ * This function alters the system "loops_per_jiffy" for the clock
+ * speed change. Note that loops_per_jiffy cannot be updated on SMP
+ * systems as each CPU might be scaled differently. So, use the arch
+ * per-CPU loops_per_jiffy value wherever possible.
+ */
+static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
+{
+#ifndef CONFIG_SMP
+ static unsigned long l_p_j_ref;
+ static unsigned int l_p_j_ref_freq;
+
+ if (ci->flags & CPUFREQ_CONST_LOOPS)
+ return;
+
+ if (!l_p_j_ref_freq) {
+ l_p_j_ref = loops_per_jiffy;
+ l_p_j_ref_freq = ci->old;
+ pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
+ l_p_j_ref, l_p_j_ref_freq);
+ }
+ if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
+ loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
+ ci->new);
+ pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
+ loops_per_jiffy, ci->new);
+ }
+#endif
+}
+
+static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
+ struct cpufreq_freqs *freqs, unsigned int state)
+{
+ BUG_ON(irqs_disabled());
+
+ if (cpufreq_disabled())
+ return;
+
+ freqs->flags = cpufreq_driver->flags;
+ pr_debug("notification %u of frequency transition to %u kHz\n",
+ state, freqs->new);
+
+ switch (state) {
+
+ case CPUFREQ_PRECHANGE:
+ /* detect if the driver reported a value as "old frequency"
+ * which is not equal to what the cpufreq core thinks is
+ * "old frequency".
+ */
+ if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
+ if ((policy) && (policy->cpu == freqs->cpu) &&
+ (policy->cur) && (policy->cur != freqs->old)) {
+ pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
+ freqs->old, policy->cur);
+ freqs->old = policy->cur;
+ }
+ }
+ srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
+ CPUFREQ_PRECHANGE, freqs);
+ adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
+ break;
+
+ case CPUFREQ_POSTCHANGE:
+ adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
+ pr_debug("FREQ: %lu - CPU: %lu\n",
+ (unsigned long)freqs->new, (unsigned long)freqs->cpu);
+ trace_cpu_frequency(freqs->new, freqs->cpu);
+ srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
+ CPUFREQ_POSTCHANGE, freqs);
+ if (likely(policy) && likely(policy->cpu == freqs->cpu))
+ policy->cur = freqs->new;
+ break;
+ }
+}
+
+/**
+ * cpufreq_notify_transition - call notifier chain and adjust_jiffies
+ * on frequency transition.
+ *
+ * This function calls the transition notifiers and the "adjust_jiffies"
+ * function. It is called twice on all CPU frequency changes that have
+ * external effects.
+ */
+static void cpufreq_notify_transition(struct cpufreq_policy *policy,
+ struct cpufreq_freqs *freqs, unsigned int state)
+{
+ for_each_cpu(freqs->cpu, policy->cpus)
+ __cpufreq_notify_transition(policy, freqs, state);
+}
+
+/* Do post notifications when there are chances that transition has failed */
+static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
+ struct cpufreq_freqs *freqs, int transition_failed)
+{
+ cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
+ if (!transition_failed)
+ return;
+
+ swap(freqs->old, freqs->new);
+ cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
+ cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
+}
+
+void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
+ struct cpufreq_freqs *freqs)
+{
+
+ /*
+ * Catch double invocations of _begin() which lead to self-deadlock.
+ * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
+ * doesn't invoke _begin() on their behalf, and hence the chances of
+ * double invocations are very low. Moreover, there are scenarios
+ * where these checks can emit false-positive warnings in these
+ * drivers; so we avoid that by skipping them altogether.
+ */
+ WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
+ && current == policy->transition_task);
+
+wait:
+ wait_event(policy->transition_wait, !policy->transition_ongoing);
+
+ spin_lock(&policy->transition_lock);
+
+ if (unlikely(policy->transition_ongoing)) {
+ spin_unlock(&policy->transition_lock);
+ goto wait;
+ }
+
+ policy->transition_ongoing = true;
+ policy->transition_task = current;
+
+ spin_unlock(&policy->transition_lock);
+
+ cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
+}
+EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
+
+void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
+ struct cpufreq_freqs *freqs, int transition_failed)
+{
+ if (unlikely(WARN_ON(!policy->transition_ongoing)))
+ return;
+
+ cpufreq_notify_post_transition(policy, freqs, transition_failed);
+
+ policy->transition_ongoing = false;
+ policy->transition_task = NULL;
+
+ wake_up(&policy->transition_wait);
+}
+EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
+
+
+/*********************************************************************
+ * SYSFS INTERFACE *
+ *********************************************************************/
+static ssize_t show_boost(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
+}
+
+static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret, enable;
+
+ ret = sscanf(buf, "%d", &enable);
+ if (ret != 1 || enable < 0 || enable > 1)
+ return -EINVAL;
+
+ if (cpufreq_boost_trigger_state(enable)) {
+ pr_err("%s: Cannot %s BOOST!\n",
+ __func__, enable ? "enable" : "disable");
+ return -EINVAL;
+ }
+
+ pr_debug("%s: cpufreq BOOST %s\n",
+ __func__, enable ? "enabled" : "disabled");
+
+ return count;
+}
+define_one_global_rw(boost);
+
+static struct cpufreq_governor *find_governor(const char *str_governor)
+{
+ struct cpufreq_governor *t;
+
+ for_each_governor(t)
+ if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
+ return t;
+
+ return NULL;
+}
+
+/**
+ * cpufreq_parse_governor - parse a governor string
+ */
+static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
+ struct cpufreq_governor **governor)
+{
+ int err = -EINVAL;
+
+ if (!cpufreq_driver)
+ goto out;
+
+ if (cpufreq_driver->setpolicy) {
+ if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
+ *policy = CPUFREQ_POLICY_PERFORMANCE;
+ err = 0;
+ } else if (!strncasecmp(str_governor, "powersave",
+ CPUFREQ_NAME_LEN)) {
+ *policy = CPUFREQ_POLICY_POWERSAVE;
+ err = 0;
+ }
+ } else {
+ struct cpufreq_governor *t;
+
+ mutex_lock(&cpufreq_governor_mutex);
+
+ t = find_governor(str_governor);
+
+ if (t == NULL) {
+ int ret;
+
+ mutex_unlock(&cpufreq_governor_mutex);
+ ret = request_module("cpufreq_%s", str_governor);
+ mutex_lock(&cpufreq_governor_mutex);
+
+ if (ret == 0)
+ t = find_governor(str_governor);
+ }
+
+ if (t != NULL) {
+ *governor = t;
+ err = 0;
+ }
+
+ mutex_unlock(&cpufreq_governor_mutex);
+ }
+out:
+ return err;
+}
+
+/**
+ * cpufreq_per_cpu_attr_read() / show_##file_name() -
+ * print out cpufreq information
+ *
+ * Write out information from cpufreq_driver->policy[cpu]; object must be
+ * "unsigned int".
+ */
+
+#define show_one(file_name, object) \
+static ssize_t show_##file_name \
+(struct cpufreq_policy *policy, char *buf) \
+{ \
+ return sprintf(buf, "%u\n", policy->object); \
+}
+
+show_one(cpuinfo_min_freq, cpuinfo.min_freq);
+show_one(cpuinfo_max_freq, cpuinfo.max_freq);
+show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
+show_one(scaling_min_freq, min);
+show_one(scaling_max_freq, max);
+
+static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
+{
+ ssize_t ret;
+
+ if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
+ ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
+ else
+ ret = sprintf(buf, "%u\n", policy->cur);
+ return ret;
+}
+
+static int cpufreq_set_policy(struct cpufreq_policy *policy,
+ struct cpufreq_policy *new_policy);
+
+/**
+ * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
+ */
+#define store_one(file_name, object) \
+static ssize_t store_##file_name \
+(struct cpufreq_policy *policy, const char *buf, size_t count) \
+{ \
+ int ret, temp; \
+ struct cpufreq_policy new_policy; \
+ \
+ ret = cpufreq_get_policy(&new_policy, policy->cpu); \
+ if (ret) \
+ return -EINVAL; \
+ \
+ ret = sscanf(buf, "%u", &new_policy.object); \
+ if (ret != 1) \
+ return -EINVAL; \
+ \
+ temp = new_policy.object; \
+ ret = cpufreq_set_policy(policy, &new_policy); \
+ if (!ret) \
+ policy->user_policy.object = temp; \
+ \
+ return ret ? ret : count; \
+}
+
+store_one(scaling_min_freq, min);
+store_one(scaling_max_freq, max);
+
+/**
+ * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
+ */
+static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
+ char *buf)
+{
+ unsigned int cur_freq = __cpufreq_get(policy);
+ if (!cur_freq)
+ return sprintf(buf, "<unknown>");
+ return sprintf(buf, "%u\n", cur_freq);
+}
+
+/**
+ * show_scaling_governor - show the current policy for the specified CPU
+ */
+static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
+{
+ if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
+ return sprintf(buf, "powersave\n");
+ else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
+ return sprintf(buf, "performance\n");
+ else if (policy->governor)
+ return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
+ policy->governor->name);
+ return -EINVAL;
+}
+
+/**
+ * store_scaling_governor - store policy for the specified CPU
+ */
+static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
+ const char *buf, size_t count)
+{
+ int ret;
+ char str_governor[16];
+ struct cpufreq_policy new_policy;
+
+ ret = cpufreq_get_policy(&new_policy, policy->cpu);
+ if (ret)
+ return ret;
+
+ ret = sscanf(buf, "%15s", str_governor);
+ if (ret != 1)
+ return -EINVAL;
+
+ if (cpufreq_parse_governor(str_governor, &new_policy.policy,
+ &new_policy.governor))
+ return -EINVAL;
+
+ ret = cpufreq_set_policy(policy, &new_policy);
+
+ policy->user_policy.policy = policy->policy;
+ policy->user_policy.governor = policy->governor;
+
+ if (ret)
+ return ret;
+ else
+ return count;
+}
+
+/**
+ * show_scaling_driver - show the cpufreq driver currently loaded
+ */
+static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
+{
+ return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
+}
+
+/**
+ * show_scaling_available_governors - show the available CPUfreq governors
+ */
+static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
+ char *buf)
+{
+ ssize_t i = 0;
+ struct cpufreq_governor *t;
+
+ if (!has_target()) {
+ i += sprintf(buf, "performance powersave");
+ goto out;
+ }
+
+ for_each_governor(t) {
+ if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
+ - (CPUFREQ_NAME_LEN + 2)))
+ goto out;
+ i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
+ }
+out:
+ i += sprintf(&buf[i], "\n");
+ return i;
+}
+
+ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
+{
+ ssize_t i = 0;
+ unsigned int cpu;
+
+ for_each_cpu(cpu, mask) {
+ if (i)
+ i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
+ i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
+ if (i >= (PAGE_SIZE - 5))
+ break;
+ }
+ i += sprintf(&buf[i], "\n");
+ return i;
+}
+EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
+
+/**
+ * show_related_cpus - show the CPUs affected by each transition even if
+ * hw coordination is in use
+ */
+static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
+{
+ return cpufreq_show_cpus(policy->related_cpus, buf);
+}
+
+/**
+ * show_affected_cpus - show the CPUs affected by each transition
+ */
+static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
+{
+ return cpufreq_show_cpus(policy->cpus, buf);
+}
+
+static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
+ const char *buf, size_t count)
+{
+ unsigned int freq = 0;
+ unsigned int ret;
+
+ if (!policy->governor || !policy->governor->store_setspeed)
+ return -EINVAL;
+
+ ret = sscanf(buf, "%u", &freq);
+ if (ret != 1)
+ return -EINVAL;
+
+ policy->governor->store_setspeed(policy, freq);
+
+ return count;
+}
+
+static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
+{
+ if (!policy->governor || !policy->governor->show_setspeed)
+ return sprintf(buf, "<unsupported>\n");
+
+ return policy->governor->show_setspeed(policy, buf);
+}
+
+/**
+ * show_bios_limit - show the current cpufreq HW/BIOS limitation
+ */
+static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
+{
+ unsigned int limit;
+ int ret;
+ if (cpufreq_driver->bios_limit) {
+ ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
+ if (!ret)
+ return sprintf(buf, "%u\n", limit);
+ }
+ return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
+}
+
+cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
+cpufreq_freq_attr_ro(cpuinfo_min_freq);
+cpufreq_freq_attr_ro(cpuinfo_max_freq);
+cpufreq_freq_attr_ro(cpuinfo_transition_latency);
+cpufreq_freq_attr_ro(scaling_available_governors);
+cpufreq_freq_attr_ro(scaling_driver);
+cpufreq_freq_attr_ro(scaling_cur_freq);
+cpufreq_freq_attr_ro(bios_limit);
+cpufreq_freq_attr_ro(related_cpus);
+cpufreq_freq_attr_ro(affected_cpus);
+cpufreq_freq_attr_rw(scaling_min_freq);
+cpufreq_freq_attr_rw(scaling_max_freq);
+cpufreq_freq_attr_rw(scaling_governor);
+cpufreq_freq_attr_rw(scaling_setspeed);
+
+static struct attribute *default_attrs[] = {
+ &cpuinfo_min_freq.attr,
+ &cpuinfo_max_freq.attr,
+ &cpuinfo_transition_latency.attr,
+ &scaling_min_freq.attr,
+ &scaling_max_freq.attr,
+ &affected_cpus.attr,
+ &related_cpus.attr,
+ &scaling_governor.attr,
+ &scaling_driver.attr,
+ &scaling_available_governors.attr,
+ &scaling_setspeed.attr,
+ NULL
+};
+
+#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
+#define to_attr(a) container_of(a, struct freq_attr, attr)
+
+static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ struct cpufreq_policy *policy = to_policy(kobj);
+ struct freq_attr *fattr = to_attr(attr);
+ ssize_t ret;
+
+ down_read(&policy->rwsem);
+
+ if (fattr->show)
+ ret = fattr->show(policy, buf);
+ else
+ ret = -EIO;
+
+ up_read(&policy->rwsem);
+
+ return ret;
+}
+
+static ssize_t store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cpufreq_policy *policy = to_policy(kobj);
+ struct freq_attr *fattr = to_attr(attr);
+ ssize_t ret = -EINVAL;
+
+ get_online_cpus();
+
+ if (!cpu_online(policy->cpu))
+ goto unlock;
+
+ down_write(&policy->rwsem);
+
+ if (fattr->store)
+ ret = fattr->store(policy, buf, count);
+ else
+ ret = -EIO;
+
+ up_write(&policy->rwsem);
+unlock:
+ put_online_cpus();
+
+ return ret;
+}
+
+static void cpufreq_sysfs_release(struct kobject *kobj)
+{
+ struct cpufreq_policy *policy = to_policy(kobj);
+ pr_debug("last reference is dropped\n");
+ complete(&policy->kobj_unregister);
+}
+
+static const struct sysfs_ops sysfs_ops = {
+ .show = show,
+ .store = store,
+};
+
+static struct kobj_type ktype_cpufreq = {
+ .sysfs_ops = &sysfs_ops,
+ .default_attrs = default_attrs,
+ .release = cpufreq_sysfs_release,
+};
+
+struct kobject *cpufreq_global_kobject;
+EXPORT_SYMBOL(cpufreq_global_kobject);
+
+static int cpufreq_global_kobject_usage;
+
+int cpufreq_get_global_kobject(void)
+{
+ if (!cpufreq_global_kobject_usage++)
+ return kobject_add(cpufreq_global_kobject,
+ &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
+
+ return 0;
+}
+EXPORT_SYMBOL(cpufreq_get_global_kobject);
+
+void cpufreq_put_global_kobject(void)
+{
+ if (!--cpufreq_global_kobject_usage)
+ kobject_del(cpufreq_global_kobject);
+}
+EXPORT_SYMBOL(cpufreq_put_global_kobject);
+
+int cpufreq_sysfs_create_file(const struct attribute *attr)
+{
+ int ret = cpufreq_get_global_kobject();
+
+ if (!ret) {
+ ret = sysfs_create_file(cpufreq_global_kobject, attr);
+ if (ret)
+ cpufreq_put_global_kobject();
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(cpufreq_sysfs_create_file);
+
+void cpufreq_sysfs_remove_file(const struct attribute *attr)
+{
+ sysfs_remove_file(cpufreq_global_kobject, attr);
+ cpufreq_put_global_kobject();
+}
+EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
+
+/* symlink affected CPUs */
+static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
+{
+ unsigned int j;
+ int ret = 0;
+
+ for_each_cpu(j, policy->cpus) {
+ struct device *cpu_dev;
+
+ if (j == policy->cpu)
+ continue;
+
+ pr_debug("Adding link for CPU: %u\n", j);
+ cpu_dev = get_cpu_device(j);
+ ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
+ "cpufreq");
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
+ struct device *dev)
+{
+ struct freq_attr **drv_attr;
+ int ret = 0;
+
+ /* set up files for this cpu device */
+ drv_attr = cpufreq_driver->attr;
+ while (drv_attr && *drv_attr) {
+ ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
+ if (ret)
+ return ret;
+ drv_attr++;
+ }
+ if (cpufreq_driver->get) {
+ ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
+ if (ret)
+ return ret;
+ }
+
+ ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
+ if (ret)
+ return ret;
+
+ if (cpufreq_driver->bios_limit) {
+ ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
+ if (ret)
+ return ret;
+ }
+
+ return cpufreq_add_dev_symlink(policy);
+}
+
+static void cpufreq_init_policy(struct cpufreq_policy *policy)
+{
+ struct cpufreq_governor *gov = NULL;
+ struct cpufreq_policy new_policy;
+ int ret = 0;
+
+ memcpy(&new_policy, policy, sizeof(*policy));
+
+ /* Update governor of new_policy to the governor used before hotplug */
+ gov = find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu));
+ if (gov)
+ pr_debug("Restoring governor %s for cpu %d\n",
+ policy->governor->name, policy->cpu);
+ else
+ gov = CPUFREQ_DEFAULT_GOVERNOR;
+
+ new_policy.governor = gov;
+
+ /* Use the default policy if its valid. */
+ if (cpufreq_driver->setpolicy)
+ cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
+
+ /* set default policy */
+ ret = cpufreq_set_policy(policy, &new_policy);
+ if (ret) {
+ pr_debug("setting policy failed\n");
+ if (cpufreq_driver->exit)
+ cpufreq_driver->exit(policy);
+ }
+}
+
+static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
+ unsigned int cpu, struct device *dev)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ if (has_target()) {
+ ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+ if (ret) {
+ pr_err("%s: Failed to stop governor\n", __func__);
+ return ret;
+ }
+ }
+
+ down_write(&policy->rwsem);
+
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+
+ cpumask_set_cpu(cpu, policy->cpus);
+ per_cpu(cpufreq_cpu_data, cpu) = policy;
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ up_write(&policy->rwsem);
+
+ if (has_target()) {
+ ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
+ if (!ret)
+ ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+
+ if (ret) {
+ pr_err("%s: Failed to start governor\n", __func__);
+ return ret;
+ }
+ }
+
+ return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
+}
+
+static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
+{
+ struct cpufreq_policy *policy;
+ unsigned long flags;
+
+ read_lock_irqsave(&cpufreq_driver_lock, flags);
+
+ policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
+
+ read_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ if (policy)
+ policy->governor = NULL;
+
+ return policy;
+}
+
+static struct cpufreq_policy *cpufreq_policy_alloc(void)
+{
+ struct cpufreq_policy *policy;
+
+ policy = kzalloc(sizeof(*policy), GFP_KERNEL);
+ if (!policy)
+ return NULL;
+
+ if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
+ goto err_free_policy;
+
+ if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
+ goto err_free_cpumask;
+
+ INIT_LIST_HEAD(&policy->policy_list);
+ init_rwsem(&policy->rwsem);
+ spin_lock_init(&policy->transition_lock);
+ init_waitqueue_head(&policy->transition_wait);
+ init_completion(&policy->kobj_unregister);
+ INIT_WORK(&policy->update, handle_update);
+
+ return policy;
+
+err_free_cpumask:
+ free_cpumask_var(policy->cpus);
+err_free_policy:
+ kfree(policy);
+
+ return NULL;
+}
+
+static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
+{
+ struct kobject *kobj;
+ struct completion *cmp;
+
+ blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+ CPUFREQ_REMOVE_POLICY, policy);
+
+ down_read(&policy->rwsem);
+ kobj = &policy->kobj;
+ cmp = &policy->kobj_unregister;
+ up_read(&policy->rwsem);
+ kobject_put(kobj);
+
+ /*
+ * We need to make sure that the underlying kobj is
+ * actually not referenced anymore by anybody before we
+ * proceed with unloading.
+ */
+ pr_debug("waiting for dropping of refcount\n");
+ wait_for_completion(cmp);
+ pr_debug("wait complete\n");
+}
+
+static void cpufreq_policy_free(struct cpufreq_policy *policy)
+{
+ free_cpumask_var(policy->related_cpus);
+ free_cpumask_var(policy->cpus);
+ kfree(policy);
+}
+
+static int update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu,
+ struct device *cpu_dev)
+{
+ int ret;
+
+ if (WARN_ON(cpu == policy->cpu))
+ return 0;
+
+ /* Move kobject to the new policy->cpu */
+ ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
+ if (ret) {
+ pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
+ return ret;
+ }
+
+ down_write(&policy->rwsem);
+ policy->cpu = cpu;
+ up_write(&policy->rwsem);
+
+ return 0;
+}
+
+static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
+{
+ unsigned int j, cpu = dev->id;
+ int ret = -ENOMEM;
+ struct cpufreq_policy *policy;
+ unsigned long flags;
+ bool recover_policy = cpufreq_suspended;
+
+ if (cpu_is_offline(cpu))
+ return 0;
+
+ pr_debug("adding CPU %u\n", cpu);
+
+ /* check whether a different CPU already registered this
+ * CPU because it is in the same boat. */
+ policy = cpufreq_cpu_get_raw(cpu);
+ if (unlikely(policy))
+ return 0;
+
+ /* Check if this cpu was hot-unplugged earlier and has siblings */
+ read_lock_irqsave(&cpufreq_driver_lock, flags);
+ for_each_policy(policy) {
+ if (cpumask_test_cpu(cpu, policy->related_cpus)) {
+ read_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ ret = cpufreq_add_policy_cpu(policy, cpu, dev);
+ return ret;
+ }
+ }
+ read_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ /*
+ * Restore the saved policy when doing light-weight init and fall back
+ * to the full init if that fails.
+ */
+ policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
+ if (!policy) {
+ recover_policy = false;
+ policy = cpufreq_policy_alloc();
+ if (!policy)
+ goto nomem_out;
+ }
+
+ /*
+ * In the resume path, since we restore a saved policy, the assignment
+ * to policy->cpu is like an update of the existing policy, rather than
+ * the creation of a brand new one. So we need to perform this update
+ * by invoking update_policy_cpu().
+ */
+ if (recover_policy && cpu != policy->cpu)
+ WARN_ON(update_policy_cpu(policy, cpu, dev));
+ else
+ policy->cpu = cpu;
+
+ cpumask_copy(policy->cpus, cpumask_of(cpu));
+
+ /* call driver. From then on the cpufreq must be able
+ * to accept all calls to ->verify and ->setpolicy for this CPU
+ */
+ ret = cpufreq_driver->init(policy);
+ if (ret) {
+ pr_debug("initialization failed\n");
+ goto err_set_policy_cpu;
+ }
+
+ down_write(&policy->rwsem);
+
+ /* related cpus should atleast have policy->cpus */
+ cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
+
+ /*
+ * affected cpus must always be the one, which are online. We aren't
+ * managing offline cpus here.
+ */
+ cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
+
+ if (!recover_policy) {
+ policy->user_policy.min = policy->min;
+ policy->user_policy.max = policy->max;
+
+ /* prepare interface data */
+ ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
+ &dev->kobj, "cpufreq");
+ if (ret) {
+ pr_err("%s: failed to init policy->kobj: %d\n",
+ __func__, ret);
+ goto err_init_policy_kobj;
+ }
+ }
+
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+ for_each_cpu(j, policy->cpus)
+ per_cpu(cpufreq_cpu_data, j) = policy;
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
+ policy->cur = cpufreq_driver->get(policy->cpu);
+ if (!policy->cur) {
+ pr_err("%s: ->get() failed\n", __func__);
+ goto err_get_freq;
+ }
+ }
+
+ /*
+ * Sometimes boot loaders set CPU frequency to a value outside of
+ * frequency table present with cpufreq core. In such cases CPU might be
+ * unstable if it has to run on that frequency for long duration of time
+ * and so its better to set it to a frequency which is specified in
+ * freq-table. This also makes cpufreq stats inconsistent as
+ * cpufreq-stats would fail to register because current frequency of CPU
+ * isn't found in freq-table.
+ *
+ * Because we don't want this change to effect boot process badly, we go
+ * for the next freq which is >= policy->cur ('cur' must be set by now,
+ * otherwise we will end up setting freq to lowest of the table as 'cur'
+ * is initialized to zero).
+ *
+ * We are passing target-freq as "policy->cur - 1" otherwise
+ * __cpufreq_driver_target() would simply fail, as policy->cur will be
+ * equal to target-freq.
+ */
+ if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
+ && has_target()) {
+ /* Are we running at unknown frequency ? */
+ ret = cpufreq_frequency_table_get_index(policy, policy->cur);
+ if (ret == -EINVAL) {
+ /* Warn user and fix it */
+ pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
+ __func__, policy->cpu, policy->cur);
+ ret = __cpufreq_driver_target(policy, policy->cur - 1,
+ CPUFREQ_RELATION_L);
+
+ /*
+ * Reaching here after boot in a few seconds may not
+ * mean that system will remain stable at "unknown"
+ * frequency for longer duration. Hence, a BUG_ON().
+ */
+ BUG_ON(ret);
+ pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
+ __func__, policy->cpu, policy->cur);
+ }
+ }
+
+ blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+ CPUFREQ_START, policy);
+
+ if (!recover_policy) {
+ ret = cpufreq_add_dev_interface(policy, dev);
+ if (ret)
+ goto err_out_unregister;
+ blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+ CPUFREQ_CREATE_POLICY, policy);
+ }
+
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+ list_add(&policy->policy_list, &cpufreq_policy_list);
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ cpufreq_init_policy(policy);
+
+ if (!recover_policy) {
+ policy->user_policy.policy = policy->policy;
+ policy->user_policy.governor = policy->governor;
+ }
+ up_write(&policy->rwsem);
+
+ kobject_uevent(&policy->kobj, KOBJ_ADD);
+
+ /* Callback for handling stuff after policy is ready */
+ if (cpufreq_driver->ready)
+ cpufreq_driver->ready(policy);
+
+ pr_debug("initialization complete\n");
+
+ return 0;
+
+err_out_unregister:
+err_get_freq:
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+ for_each_cpu(j, policy->cpus)
+ per_cpu(cpufreq_cpu_data, j) = NULL;
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ if (!recover_policy) {
+ kobject_put(&policy->kobj);
+ wait_for_completion(&policy->kobj_unregister);
+ }
+err_init_policy_kobj:
+ up_write(&policy->rwsem);
+
+ if (cpufreq_driver->exit)
+ cpufreq_driver->exit(policy);
+err_set_policy_cpu:
+ if (recover_policy) {
+ /* Do not leave stale fallback data behind. */
+ per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
+ cpufreq_policy_put_kobj(policy);
+ }
+ cpufreq_policy_free(policy);
+
+nomem_out:
+ return ret;
+}
+
+/**
+ * cpufreq_add_dev - add a CPU device
+ *
+ * Adds the cpufreq interface for a CPU device.
+ *
+ * The Oracle says: try running cpufreq registration/unregistration concurrently
+ * with with cpu hotplugging and all hell will break loose. Tried to clean this
+ * mess up, but more thorough testing is needed. - Mathieu
+ */
+static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
+{
+ return __cpufreq_add_dev(dev, sif);
+}
+
+static int __cpufreq_remove_dev_prepare(struct device *dev,
+ struct subsys_interface *sif)
+{
+ unsigned int cpu = dev->id, cpus;
+ int ret;
+ unsigned long flags;
+ struct cpufreq_policy *policy;
+
+ pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
+
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+
+ policy = per_cpu(cpufreq_cpu_data, cpu);
+
+ /* Save the policy somewhere when doing a light-weight tear-down */
+ if (cpufreq_suspended)
+ per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
+
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ if (!policy) {
+ pr_debug("%s: No cpu_data found\n", __func__);
+ return -EINVAL;
+ }
+
+ if (has_target()) {
+ ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+ if (ret) {
+ pr_err("%s: Failed to stop governor\n", __func__);
+ return ret;
+ }
+
+ strncpy(per_cpu(cpufreq_cpu_governor, cpu),
+ policy->governor->name, CPUFREQ_NAME_LEN);
+ }
+
+ down_read(&policy->rwsem);
+ cpus = cpumask_weight(policy->cpus);
+ up_read(&policy->rwsem);
+
+ if (cpu != policy->cpu) {
+ sysfs_remove_link(&dev->kobj, "cpufreq");
+ } else if (cpus > 1) {
+ /* Nominate new CPU */
+ int new_cpu = cpumask_any_but(policy->cpus, cpu);
+ struct device *cpu_dev = get_cpu_device(new_cpu);
+
+ sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
+ ret = update_policy_cpu(policy, new_cpu, cpu_dev);
+ if (ret) {
+ if (sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
+ "cpufreq"))
+ pr_err("%s: Failed to restore kobj link to cpu:%d\n",
+ __func__, cpu_dev->id);
+ return ret;
+ }
+
+ if (!cpufreq_suspended)
+ pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
+ __func__, new_cpu, cpu);
+ } else if (cpufreq_driver->stop_cpu) {
+ cpufreq_driver->stop_cpu(policy);
+ }
+
+ return 0;
+}
+
+static int __cpufreq_remove_dev_finish(struct device *dev,
+ struct subsys_interface *sif)
+{
+ unsigned int cpu = dev->id, cpus;
+ int ret;
+ unsigned long flags;
+ struct cpufreq_policy *policy;
+
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+ policy = per_cpu(cpufreq_cpu_data, cpu);
+ per_cpu(cpufreq_cpu_data, cpu) = NULL;
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ if (!policy) {
+ pr_debug("%s: No cpu_data found\n", __func__);
+ return -EINVAL;
+ }
+
+ down_write(&policy->rwsem);
+ cpus = cpumask_weight(policy->cpus);
+
+ if (cpus > 1)
+ cpumask_clear_cpu(cpu, policy->cpus);
+ up_write(&policy->rwsem);
+
+ /* If cpu is last user of policy, free policy */
+ if (cpus == 1) {
+ if (has_target()) {
+ ret = __cpufreq_governor(policy,
+ CPUFREQ_GOV_POLICY_EXIT);
+ if (ret) {
+ pr_err("%s: Failed to exit governor\n",
+ __func__);
+ return ret;
+ }
+ }
+
+ if (!cpufreq_suspended)
+ cpufreq_policy_put_kobj(policy);
+
+ /*
+ * Perform the ->exit() even during light-weight tear-down,
+ * since this is a core component, and is essential for the
+ * subsequent light-weight ->init() to succeed.
+ */
+ if (cpufreq_driver->exit)
+ cpufreq_driver->exit(policy);
+
+ /* Remove policy from list of active policies */
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+ list_del(&policy->policy_list);
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ if (!cpufreq_suspended)
+ cpufreq_policy_free(policy);
+ } else if (has_target()) {
+ ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
+ if (!ret)
+ ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+
+ if (ret) {
+ pr_err("%s: Failed to start governor\n", __func__);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * cpufreq_remove_dev - remove a CPU device
+ *
+ * Removes the cpufreq interface for a CPU device.
+ */
+static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
+{
+ unsigned int cpu = dev->id;
+ int ret;
+
+ if (cpu_is_offline(cpu))
+ return 0;
+
+ ret = __cpufreq_remove_dev_prepare(dev, sif);
+
+ if (!ret)
+ ret = __cpufreq_remove_dev_finish(dev, sif);
+
+ return ret;
+}
+
+static void handle_update(struct work_struct *work)
+{
+ struct cpufreq_policy *policy =
+ container_of(work, struct cpufreq_policy, update);
+ unsigned int cpu = policy->cpu;
+ pr_debug("handle_update for cpu %u called\n", cpu);
+ cpufreq_update_policy(cpu);
+}
+
+/**
+ * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
+ * in deep trouble.
+ * @policy: policy managing CPUs
+ * @new_freq: CPU frequency the CPU actually runs at
+ *
+ * We adjust to current frequency first, and need to clean up later.
+ * So either call to cpufreq_update_policy() or schedule handle_update()).
+ */
+static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
+ unsigned int new_freq)
+{
+ struct cpufreq_freqs freqs;
+
+ pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
+ policy->cur, new_freq);
+
+ freqs.old = policy->cur;
+ freqs.new = new_freq;
+
+ cpufreq_freq_transition_begin(policy, &freqs);
+ cpufreq_freq_transition_end(policy, &freqs, 0);
+}
+
+/**
+ * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
+ * @cpu: CPU number
+ *
+ * This is the last known freq, without actually getting it from the driver.
+ * Return value will be same as what is shown in scaling_cur_freq in sysfs.
+ */
+unsigned int cpufreq_quick_get(unsigned int cpu)
+{
+ struct cpufreq_policy *policy;
+ unsigned int ret_freq = 0;
+
+ if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
+ return cpufreq_driver->get(cpu);
+
+ policy = cpufreq_cpu_get(cpu);
+ if (policy) {
+ ret_freq = policy->cur;
+ cpufreq_cpu_put(policy);
+ }
+
+ return ret_freq;
+}
+EXPORT_SYMBOL(cpufreq_quick_get);
+
+/**
+ * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
+ * @cpu: CPU number
+ *
+ * Just return the max possible frequency for a given CPU.
+ */
+unsigned int cpufreq_quick_get_max(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ unsigned int ret_freq = 0;
+
+ if (policy) {
+ ret_freq = policy->max;
+ cpufreq_cpu_put(policy);
+ }
+
+ return ret_freq;
+}
+EXPORT_SYMBOL(cpufreq_quick_get_max);
+
+static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
+{
+ unsigned int ret_freq = 0;
+
+ if (!cpufreq_driver->get)
+ return ret_freq;
+
+ ret_freq = cpufreq_driver->get(policy->cpu);
+
+ if (ret_freq && policy->cur &&
+ !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
+ /* verify no discrepancy between actual and
+ saved value exists */
+ if (unlikely(ret_freq != policy->cur)) {
+ cpufreq_out_of_sync(policy, ret_freq);
+ schedule_work(&policy->update);
+ }
+ }
+
+ return ret_freq;
+}
+
+/**
+ * cpufreq_get - get the current CPU frequency (in kHz)
+ * @cpu: CPU number
+ *
+ * Get the CPU current (static) CPU frequency
+ */
+unsigned int cpufreq_get(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ unsigned int ret_freq = 0;
+
+ if (policy) {
+ down_read(&policy->rwsem);
+ ret_freq = __cpufreq_get(policy);
+ up_read(&policy->rwsem);
+
+ cpufreq_cpu_put(policy);
+ }
+
+ return ret_freq;
+}
+EXPORT_SYMBOL(cpufreq_get);
+
+static struct subsys_interface cpufreq_interface = {
+ .name = "cpufreq",
+ .subsys = &cpu_subsys,
+ .add_dev = cpufreq_add_dev,
+ .remove_dev = cpufreq_remove_dev,
+};
+
+/*
+ * In case platform wants some specific frequency to be configured
+ * during suspend..
+ */
+int cpufreq_generic_suspend(struct cpufreq_policy *policy)
+{
+ int ret;
+
+ if (!policy->suspend_freq) {
+ pr_err("%s: suspend_freq can't be zero\n", __func__);
+ return -EINVAL;
+ }
+
+ pr_debug("%s: Setting suspend-freq: %u\n", __func__,
+ policy->suspend_freq);
+
+ ret = __cpufreq_driver_target(policy, policy->suspend_freq,
+ CPUFREQ_RELATION_H);
+ if (ret)
+ pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
+ __func__, policy->suspend_freq, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(cpufreq_generic_suspend);
+
+/**
+ * cpufreq_suspend() - Suspend CPUFreq governors
+ *
+ * Called during system wide Suspend/Hibernate cycles for suspending governors
+ * as some platforms can't change frequency after this point in suspend cycle.
+ * Because some of the devices (like: i2c, regulators, etc) they use for
+ * changing frequency are suspended quickly after this point.
+ */
+void cpufreq_suspend(void)
+{
+ struct cpufreq_policy *policy;
+
+ if (!cpufreq_driver)
+ return;
+
+ if (!has_target())
+ goto suspend;
+
+ pr_debug("%s: Suspending Governors\n", __func__);
+
+ for_each_policy(policy) {
+ if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
+ pr_err("%s: Failed to stop governor for policy: %p\n",
+ __func__, policy);
+ else if (cpufreq_driver->suspend
+ && cpufreq_driver->suspend(policy))
+ pr_err("%s: Failed to suspend driver: %p\n", __func__,
+ policy);
+ }
+
+suspend:
+ cpufreq_suspended = true;
+}
+
+/**
+ * cpufreq_resume() - Resume CPUFreq governors
+ *
+ * Called during system wide Suspend/Hibernate cycle for resuming governors that
+ * are suspended with cpufreq_suspend().
+ */
+void cpufreq_resume(void)
+{
+ struct cpufreq_policy *policy;
+
+ if (!cpufreq_driver)
+ return;
+
+ cpufreq_suspended = false;
+
+ if (!has_target())
+ return;
+
+ pr_debug("%s: Resuming Governors\n", __func__);
+
+ for_each_policy(policy) {
+ if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
+ pr_err("%s: Failed to resume driver: %p\n", __func__,
+ policy);
+ else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
+ || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
+ pr_err("%s: Failed to start governor for policy: %p\n",
+ __func__, policy);
+ }
+
+ /*
+ * schedule call cpufreq_update_policy() for first-online CPU, as that
+ * wouldn't be hotplugged-out on suspend. It will verify that the
+ * current freq is in sync with what we believe it to be.
+ */
+ policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask));
+ if (WARN_ON(!policy))
+ return;
+
+ schedule_work(&policy->update);
+}
+
+/**
+ * cpufreq_get_current_driver - return current driver's name
+ *
+ * Return the name string of the currently loaded cpufreq driver
+ * or NULL, if none.
+ */
+const char *cpufreq_get_current_driver(void)
+{
+ if (cpufreq_driver)
+ return cpufreq_driver->name;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
+
+/**
+ * cpufreq_get_driver_data - return current driver data
+ *
+ * Return the private data of the currently loaded cpufreq
+ * driver, or NULL if no cpufreq driver is loaded.
+ */
+void *cpufreq_get_driver_data(void)
+{
+ if (cpufreq_driver)
+ return cpufreq_driver->driver_data;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
+
+/*********************************************************************
+ * NOTIFIER LISTS INTERFACE *
+ *********************************************************************/
+
+/**
+ * cpufreq_register_notifier - register a driver with cpufreq
+ * @nb: notifier function to register
+ * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
+ *
+ * Add a driver to one of two lists: either a list of drivers that
+ * are notified about clock rate changes (once before and once after
+ * the transition), or a list of drivers that are notified about
+ * changes in cpufreq policy.
+ *
+ * This function may sleep, and has the same return conditions as
+ * blocking_notifier_chain_register.
+ */
+int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
+{
+ int ret;
+
+ if (cpufreq_disabled())
+ return -EINVAL;
+
+ WARN_ON(!init_cpufreq_transition_notifier_list_called);
+
+ switch (list) {
+ case CPUFREQ_TRANSITION_NOTIFIER:
+ ret = srcu_notifier_chain_register(
+ &cpufreq_transition_notifier_list, nb);
+ break;
+ case CPUFREQ_POLICY_NOTIFIER:
+ ret = blocking_notifier_chain_register(
+ &cpufreq_policy_notifier_list, nb);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(cpufreq_register_notifier);
+
+/**
+ * cpufreq_unregister_notifier - unregister a driver with cpufreq
+ * @nb: notifier block to be unregistered
+ * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
+ *
+ * Remove a driver from the CPU frequency notifier list.
+ *
+ * This function may sleep, and has the same return conditions as
+ * blocking_notifier_chain_unregister.
+ */
+int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
+{
+ int ret;
+
+ if (cpufreq_disabled())
+ return -EINVAL;
+
+ switch (list) {
+ case CPUFREQ_TRANSITION_NOTIFIER:
+ ret = srcu_notifier_chain_unregister(
+ &cpufreq_transition_notifier_list, nb);
+ break;
+ case CPUFREQ_POLICY_NOTIFIER:
+ ret = blocking_notifier_chain_unregister(
+ &cpufreq_policy_notifier_list, nb);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(cpufreq_unregister_notifier);
+
+
+/*********************************************************************
+ * GOVERNORS *
+ *********************************************************************/
+
+/* Must set freqs->new to intermediate frequency */
+static int __target_intermediate(struct cpufreq_policy *policy,
+ struct cpufreq_freqs *freqs, int index)
+{
+ int ret;
+
+ freqs->new = cpufreq_driver->get_intermediate(policy, index);
+
+ /* We don't need to switch to intermediate freq */
+ if (!freqs->new)
+ return 0;
+
+ pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
+ __func__, policy->cpu, freqs->old, freqs->new);
+
+ cpufreq_freq_transition_begin(policy, freqs);
+ ret = cpufreq_driver->target_intermediate(policy, index);
+ cpufreq_freq_transition_end(policy, freqs, ret);
+
+ if (ret)
+ pr_err("%s: Failed to change to intermediate frequency: %d\n",
+ __func__, ret);
+
+ return ret;
+}
+
+static int __target_index(struct cpufreq_policy *policy,
+ struct cpufreq_frequency_table *freq_table, int index)
+{
+ struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
+ unsigned int intermediate_freq = 0;
+ int retval = -EINVAL;
+ bool notify;
+
+ notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
+ if (notify) {
+ /* Handle switching to intermediate frequency */
+ if (cpufreq_driver->get_intermediate) {
+ retval = __target_intermediate(policy, &freqs, index);
+ if (retval)
+ return retval;
+
+ intermediate_freq = freqs.new;
+ /* Set old freq to intermediate */
+ if (intermediate_freq)
+ freqs.old = freqs.new;
+ }
+
+ freqs.new = freq_table[index].frequency;
+ pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
+ __func__, policy->cpu, freqs.old, freqs.new);
+
+ cpufreq_freq_transition_begin(policy, &freqs);
+ }
+
+ retval = cpufreq_driver->target_index(policy, index);
+ if (retval)
+ pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
+ retval);
+
+ if (notify) {
+ cpufreq_freq_transition_end(policy, &freqs, retval);
+
+ /*
+ * Failed after setting to intermediate freq? Driver should have
+ * reverted back to initial frequency and so should we. Check
+ * here for intermediate_freq instead of get_intermediate, in
+ * case we have't switched to intermediate freq at all.
+ */
+ if (unlikely(retval && intermediate_freq)) {
+ freqs.old = intermediate_freq;
+ freqs.new = policy->restore_freq;
+ cpufreq_freq_transition_begin(policy, &freqs);
+ cpufreq_freq_transition_end(policy, &freqs, 0);
+ }
+ }
+
+ return retval;
+}
+
+int __cpufreq_driver_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ unsigned int old_target_freq = target_freq;
+ int retval = -EINVAL;
+
+ if (cpufreq_disabled())
+ return -ENODEV;
+
+ /* Make sure that target_freq is within supported range */
+ if (target_freq > policy->max)
+ target_freq = policy->max;
+ if (target_freq < policy->min)
+ target_freq = policy->min;
+
+ pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
+ policy->cpu, target_freq, relation, old_target_freq);
+
+ /*
+ * This might look like a redundant call as we are checking it again
+ * after finding index. But it is left intentionally for cases where
+ * exactly same freq is called again and so we can save on few function
+ * calls.
+ */
+ if (target_freq == policy->cur)
+ return 0;
+
+ /* Save last value to restore later on errors */
+ policy->restore_freq = policy->cur;
+
+ if (cpufreq_driver->target)
+ retval = cpufreq_driver->target(policy, target_freq, relation);
+ else if (cpufreq_driver->target_index) {
+ struct cpufreq_frequency_table *freq_table;
+ int index;
+
+ freq_table = cpufreq_frequency_get_table(policy->cpu);
+ if (unlikely(!freq_table)) {
+ pr_err("%s: Unable to find freq_table\n", __func__);
+ goto out;
+ }
+
+ retval = cpufreq_frequency_table_target(policy, freq_table,
+ target_freq, relation, &index);
+ if (unlikely(retval)) {
+ pr_err("%s: Unable to find matching freq\n", __func__);
+ goto out;
+ }
+
+ if (freq_table[index].frequency == policy->cur) {
+ retval = 0;
+ goto out;
+ }
+
+ retval = __target_index(policy, freq_table, index);
+ }
+
+out:
+ return retval;
+}
+EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
+
+int cpufreq_driver_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ int ret = -EINVAL;
+
+ down_write(&policy->rwsem);
+
+ ret = __cpufreq_driver_target(policy, target_freq, relation);
+
+ up_write(&policy->rwsem);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpufreq_driver_target);
+
+static int __cpufreq_governor(struct cpufreq_policy *policy,
+ unsigned int event)
+{
+ int ret;
+
+ /* Only must be defined when default governor is known to have latency
+ restrictions, like e.g. conservative or ondemand.
+ That this is the case is already ensured in Kconfig
+ */
+#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
+ struct cpufreq_governor *gov = &cpufreq_gov_performance;
+#else
+ struct cpufreq_governor *gov = NULL;
+#endif
+
+ /* Don't start any governor operations if we are entering suspend */
+ if (cpufreq_suspended)
+ return 0;
+ /*
+ * Governor might not be initiated here if ACPI _PPC changed
+ * notification happened, so check it.
+ */
+ if (!policy->governor)
+ return -EINVAL;
+
+ if (policy->governor->max_transition_latency &&
+ policy->cpuinfo.transition_latency >
+ policy->governor->max_transition_latency) {
+ if (!gov)
+ return -EINVAL;
+ else {
+ pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
+ policy->governor->name, gov->name);
+ policy->governor = gov;
+ }
+ }
+
+ if (event == CPUFREQ_GOV_POLICY_INIT)
+ if (!try_module_get(policy->governor->owner))
+ return -EINVAL;
+
+ pr_debug("__cpufreq_governor for CPU %u, event %u\n",
+ policy->cpu, event);
+
+ mutex_lock(&cpufreq_governor_lock);
+ if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
+ || (!policy->governor_enabled
+ && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
+ mutex_unlock(&cpufreq_governor_lock);
+ return -EBUSY;
+ }
+
+ if (event == CPUFREQ_GOV_STOP)
+ policy->governor_enabled = false;
+ else if (event == CPUFREQ_GOV_START)
+ policy->governor_enabled = true;
+
+ mutex_unlock(&cpufreq_governor_lock);
+
+ ret = policy->governor->governor(policy, event);
+
+ if (!ret) {
+ if (event == CPUFREQ_GOV_POLICY_INIT)
+ policy->governor->initialized++;
+ else if (event == CPUFREQ_GOV_POLICY_EXIT)
+ policy->governor->initialized--;
+ } else {
+ /* Restore original values */
+ mutex_lock(&cpufreq_governor_lock);
+ if (event == CPUFREQ_GOV_STOP)
+ policy->governor_enabled = true;
+ else if (event == CPUFREQ_GOV_START)
+ policy->governor_enabled = false;
+ mutex_unlock(&cpufreq_governor_lock);
+ }
+
+ if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
+ ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
+ module_put(policy->governor->owner);
+
+ return ret;
+}
+
+int cpufreq_register_governor(struct cpufreq_governor *governor)
+{
+ int err;
+
+ if (!governor)
+ return -EINVAL;
+
+ if (cpufreq_disabled())
+ return -ENODEV;
+
+ mutex_lock(&cpufreq_governor_mutex);
+
+ governor->initialized = 0;
+ err = -EBUSY;
+ if (!find_governor(governor->name)) {
+ err = 0;
+ list_add(&governor->governor_list, &cpufreq_governor_list);
+ }
+
+ mutex_unlock(&cpufreq_governor_mutex);
+ return err;
+}
+EXPORT_SYMBOL_GPL(cpufreq_register_governor);
+
+void cpufreq_unregister_governor(struct cpufreq_governor *governor)
+{
+ int cpu;
+
+ if (!governor)
+ return;
+
+ if (cpufreq_disabled())
+ return;
+
+ for_each_present_cpu(cpu) {
+ if (cpu_online(cpu))
+ continue;
+ if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
+ strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
+ }
+
+ mutex_lock(&cpufreq_governor_mutex);
+ list_del(&governor->governor_list);
+ mutex_unlock(&cpufreq_governor_mutex);
+ return;
+}
+EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
+
+
+/*********************************************************************
+ * POLICY INTERFACE *
+ *********************************************************************/
+
+/**
+ * cpufreq_get_policy - get the current cpufreq_policy
+ * @policy: struct cpufreq_policy into which the current cpufreq_policy
+ * is written
+ *
+ * Reads the current cpufreq policy.
+ */
+int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
+{
+ struct cpufreq_policy *cpu_policy;
+ if (!policy)
+ return -EINVAL;
+
+ cpu_policy = cpufreq_cpu_get(cpu);
+ if (!cpu_policy)
+ return -EINVAL;
+
+ memcpy(policy, cpu_policy, sizeof(*policy));
+
+ cpufreq_cpu_put(cpu_policy);
+ return 0;
+}
+EXPORT_SYMBOL(cpufreq_get_policy);
+
+/*
+ * policy : current policy.
+ * new_policy: policy to be set.
+ */
+static int cpufreq_set_policy(struct cpufreq_policy *policy,
+ struct cpufreq_policy *new_policy)
+{
+ struct cpufreq_governor *old_gov;
+ int ret;
+
+ pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
+ new_policy->cpu, new_policy->min, new_policy->max);
+
+ memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
+
+ if (new_policy->min > policy->max || new_policy->max < policy->min)
+ return -EINVAL;
+
+ /* verify the cpu speed can be set within this limit */
+ ret = cpufreq_driver->verify(new_policy);
+ if (ret)
+ return ret;
+
+ /* adjust if necessary - all reasons */
+ blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+ CPUFREQ_ADJUST, new_policy);
+
+ /* adjust if necessary - hardware incompatibility*/
+ blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+ CPUFREQ_INCOMPATIBLE, new_policy);
+
+ /*
+ * verify the cpu speed can be set within this limit, which might be
+ * different to the first one
+ */
+ ret = cpufreq_driver->verify(new_policy);
+ if (ret)
+ return ret;
+
+ /* notification of the new policy */
+ blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+ CPUFREQ_NOTIFY, new_policy);
+
+ policy->min = new_policy->min;
+ policy->max = new_policy->max;
+
+ pr_debug("new min and max freqs are %u - %u kHz\n",
+ policy->min, policy->max);
+
+ if (cpufreq_driver->setpolicy) {
+ policy->policy = new_policy->policy;
+ pr_debug("setting range\n");
+ return cpufreq_driver->setpolicy(new_policy);
+ }
+
+ if (new_policy->governor == policy->governor)
+ goto out;
+
+ pr_debug("governor switch\n");
+
+ /* save old, working values */
+ old_gov = policy->governor;
+ /* end old governor */
+ if (old_gov) {
+ __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+ up_write(&policy->rwsem);
+ __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+ down_write(&policy->rwsem);
+ }
+
+ /* start new governor */
+ policy->governor = new_policy->governor;
+ if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
+ if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
+ goto out;
+
+ up_write(&policy->rwsem);
+ __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+ down_write(&policy->rwsem);
+ }
+
+ /* new governor failed, so re-start old one */
+ pr_debug("starting governor %s failed\n", policy->governor->name);
+ if (old_gov) {
+ policy->governor = old_gov;
+ __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
+ __cpufreq_governor(policy, CPUFREQ_GOV_START);
+ }
+
+ return -EINVAL;
+
+ out:
+ pr_debug("governor: change or update limits\n");
+ return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+}
+
+/**
+ * cpufreq_update_policy - re-evaluate an existing cpufreq policy
+ * @cpu: CPU which shall be re-evaluated
+ *
+ * Useful for policy notifiers which have different necessities
+ * at different times.
+ */
+int cpufreq_update_policy(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy new_policy;
+ int ret;
+
+ if (!policy)
+ return -ENODEV;
+
+ down_write(&policy->rwsem);
+
+ pr_debug("updating policy for CPU %u\n", cpu);
+ memcpy(&new_policy, policy, sizeof(*policy));
+ new_policy.min = policy->user_policy.min;
+ new_policy.max = policy->user_policy.max;
+ new_policy.policy = policy->user_policy.policy;
+ new_policy.governor = policy->user_policy.governor;
+
+ /*
+ * BIOS might change freq behind our back
+ * -> ask driver for current freq and notify governors about a change
+ */
+ if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
+ new_policy.cur = cpufreq_driver->get(cpu);
+ if (WARN_ON(!new_policy.cur)) {
+ ret = -EIO;
+ goto unlock;
+ }
+
+ if (!policy->cur) {
+ pr_debug("Driver did not initialize current freq\n");
+ policy->cur = new_policy.cur;
+ } else {
+ if (policy->cur != new_policy.cur && has_target())
+ cpufreq_out_of_sync(policy, new_policy.cur);
+ }
+ }
+
+ ret = cpufreq_set_policy(policy, &new_policy);
+
+unlock:
+ up_write(&policy->rwsem);
+
+ cpufreq_cpu_put(policy);
+ return ret;
+}
+EXPORT_SYMBOL(cpufreq_update_policy);
+
+static int cpufreq_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+ struct device *dev;
+
+ dev = get_cpu_device(cpu);
+ if (dev) {
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_ONLINE:
+ __cpufreq_add_dev(dev, NULL);
+ break;
+
+ case CPU_DOWN_PREPARE:
+ __cpufreq_remove_dev_prepare(dev, NULL);
+ break;
+
+ case CPU_POST_DEAD:
+ __cpufreq_remove_dev_finish(dev, NULL);
+ break;
+
+ case CPU_DOWN_FAILED:
+ __cpufreq_add_dev(dev, NULL);
+ break;
+ }
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __refdata cpufreq_cpu_notifier = {
+ .notifier_call = cpufreq_cpu_callback,
+};
+
+/*********************************************************************
+ * BOOST *
+ *********************************************************************/
+static int cpufreq_boost_set_sw(int state)
+{
+ struct cpufreq_frequency_table *freq_table;
+ struct cpufreq_policy *policy;
+ int ret = -EINVAL;
+
+ for_each_policy(policy) {
+ freq_table = cpufreq_frequency_get_table(policy->cpu);
+ if (freq_table) {
+ ret = cpufreq_frequency_table_cpuinfo(policy,
+ freq_table);
+ if (ret) {
+ pr_err("%s: Policy frequency update failed\n",
+ __func__);
+ break;
+ }
+ policy->user_policy.max = policy->max;
+ __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+ }
+ }
+
+ return ret;
+}
+
+int cpufreq_boost_trigger_state(int state)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ if (cpufreq_driver->boost_enabled == state)
+ return 0;
+
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+ cpufreq_driver->boost_enabled = state;
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ ret = cpufreq_driver->set_boost(state);
+ if (ret) {
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+ cpufreq_driver->boost_enabled = !state;
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ pr_err("%s: Cannot %s BOOST\n",
+ __func__, state ? "enable" : "disable");
+ }
+
+ return ret;
+}
+
+int cpufreq_boost_supported(void)
+{
+ if (likely(cpufreq_driver))
+ return cpufreq_driver->boost_supported;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
+
+int cpufreq_boost_enabled(void)
+{
+ return cpufreq_driver->boost_enabled;
+}
+EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
+
+/*********************************************************************
+ * REGISTER / UNREGISTER CPUFREQ DRIVER *
+ *********************************************************************/
+
+/**
+ * cpufreq_register_driver - register a CPU Frequency driver
+ * @driver_data: A struct cpufreq_driver containing the values#
+ * submitted by the CPU Frequency driver.
+ *
+ * Registers a CPU Frequency driver to this core code. This code
+ * returns zero on success, -EBUSY when another driver got here first
+ * (and isn't unregistered in the meantime).
+ *
+ */
+int cpufreq_register_driver(struct cpufreq_driver *driver_data)
+{
+ unsigned long flags;
+ int ret;
+
+ if (cpufreq_disabled())
+ return -ENODEV;
+
+ if (!driver_data || !driver_data->verify || !driver_data->init ||
+ !(driver_data->setpolicy || driver_data->target_index ||
+ driver_data->target) ||
+ (driver_data->setpolicy && (driver_data->target_index ||
+ driver_data->target)) ||
+ (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
+ return -EINVAL;
+
+ pr_debug("trying to register driver %s\n", driver_data->name);
+
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+ if (cpufreq_driver) {
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ return -EEXIST;
+ }
+ cpufreq_driver = driver_data;
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ if (driver_data->setpolicy)
+ driver_data->flags |= CPUFREQ_CONST_LOOPS;
+
+ if (cpufreq_boost_supported()) {
+ /*
+ * Check if driver provides function to enable boost -
+ * if not, use cpufreq_boost_set_sw as default
+ */
+ if (!cpufreq_driver->set_boost)
+ cpufreq_driver->set_boost = cpufreq_boost_set_sw;
+
+ ret = cpufreq_sysfs_create_file(&boost.attr);
+ if (ret) {
+ pr_err("%s: cannot register global BOOST sysfs file\n",
+ __func__);
+ goto err_null_driver;
+ }
+ }
+
+ ret = subsys_interface_register(&cpufreq_interface);
+ if (ret)
+ goto err_boost_unreg;
+
+ if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
+ list_empty(&cpufreq_policy_list)) {
+ /* if all ->init() calls failed, unregister */
+ pr_debug("%s: No CPU initialized for driver %s\n", __func__,
+ driver_data->name);
+ goto err_if_unreg;
+ }
+
+ register_hotcpu_notifier(&cpufreq_cpu_notifier);
+ pr_debug("driver %s up and running\n", driver_data->name);
+
+ return 0;
+err_if_unreg:
+ subsys_interface_unregister(&cpufreq_interface);
+err_boost_unreg:
+ if (cpufreq_boost_supported())
+ cpufreq_sysfs_remove_file(&boost.attr);
+err_null_driver:
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+ cpufreq_driver = NULL;
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpufreq_register_driver);
+
+/**
+ * cpufreq_unregister_driver - unregister the current CPUFreq driver
+ *
+ * Unregister the current CPUFreq driver. Only call this if you have
+ * the right to do so, i.e. if you have succeeded in initialising before!
+ * Returns zero if successful, and -EINVAL if the cpufreq_driver is
+ * currently not initialised.
+ */
+int cpufreq_unregister_driver(struct cpufreq_driver *driver)
+{
+ unsigned long flags;
+
+ if (!cpufreq_driver || (driver != cpufreq_driver))
+ return -EINVAL;
+
+ pr_debug("unregistering driver %s\n", driver->name);
+
+ /* Protect against concurrent cpu hotplug */
+ get_online_cpus();
+ subsys_interface_unregister(&cpufreq_interface);
+ if (cpufreq_boost_supported())
+ cpufreq_sysfs_remove_file(&boost.attr);
+
+ unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
+
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+
+ cpufreq_driver = NULL;
+
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ put_online_cpus();
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
+
+/*
+ * Stop cpufreq at shutdown to make sure it isn't holding any locks
+ * or mutexes when secondary CPUs are halted.
+ */
+static struct syscore_ops cpufreq_syscore_ops = {
+ .shutdown = cpufreq_suspend,
+};
+
+static int __init cpufreq_core_init(void)
+{
+ if (cpufreq_disabled())
+ return -ENODEV;
+
+ cpufreq_global_kobject = kobject_create();
+ BUG_ON(!cpufreq_global_kobject);
+
+ register_syscore_ops(&cpufreq_syscore_ops);
+
+ return 0;
+}
+core_initcall(cpufreq_core_init);
diff --git a/kernel/drivers/cpufreq/cpufreq_conservative.c b/kernel/drivers/cpufreq/cpufreq_conservative.c
new file mode 100644
index 000000000..25a70d06c
--- /dev/null
+++ b/kernel/drivers/cpufreq/cpufreq_conservative.c
@@ -0,0 +1,408 @@
+/*
+ * drivers/cpufreq/cpufreq_conservative.c
+ *
+ * Copyright (C) 2001 Russell King
+ * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
+ * Jun Nakajima <jun.nakajima@intel.com>
+ * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include "cpufreq_governor.h"
+
+/* Conservative governor macros */
+#define DEF_FREQUENCY_UP_THRESHOLD (80)
+#define DEF_FREQUENCY_DOWN_THRESHOLD (20)
+#define DEF_FREQUENCY_STEP (5)
+#define DEF_SAMPLING_DOWN_FACTOR (1)
+#define MAX_SAMPLING_DOWN_FACTOR (10)
+
+static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info);
+
+static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners,
+ struct cpufreq_policy *policy)
+{
+ unsigned int freq_target = (cs_tuners->freq_step * policy->max) / 100;
+
+ /* max freq cannot be less than 100. But who knows... */
+ if (unlikely(freq_target == 0))
+ freq_target = DEF_FREQUENCY_STEP;
+
+ return freq_target;
+}
+
+/*
+ * Every sampling_rate, we check, if current idle time is less than 20%
+ * (default), then we try to increase frequency. Every sampling_rate *
+ * sampling_down_factor, we check, if current idle time is more than 80%
+ * (default), then we try to decrease frequency
+ *
+ * Any frequency increase takes it to the maximum frequency. Frequency reduction
+ * happens at minimum steps of 5% (default) of maximum frequency
+ */
+static void cs_check_cpu(int cpu, unsigned int load)
+{
+ struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
+ struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
+ struct dbs_data *dbs_data = policy->governor_data;
+ struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
+
+ /*
+ * break out if we 'cannot' reduce the speed as the user might
+ * want freq_step to be zero
+ */
+ if (cs_tuners->freq_step == 0)
+ return;
+
+ /* Check for frequency increase */
+ if (load > cs_tuners->up_threshold) {
+ dbs_info->down_skip = 0;
+
+ /* if we are already at full speed then break out early */
+ if (dbs_info->requested_freq == policy->max)
+ return;
+
+ dbs_info->requested_freq += get_freq_target(cs_tuners, policy);
+
+ if (dbs_info->requested_freq > policy->max)
+ dbs_info->requested_freq = policy->max;
+
+ __cpufreq_driver_target(policy, dbs_info->requested_freq,
+ CPUFREQ_RELATION_H);
+ return;
+ }
+
+ /* if sampling_down_factor is active break out early */
+ if (++dbs_info->down_skip < cs_tuners->sampling_down_factor)
+ return;
+ dbs_info->down_skip = 0;
+
+ /* Check for frequency decrease */
+ if (load < cs_tuners->down_threshold) {
+ unsigned int freq_target;
+ /*
+ * if we cannot reduce the frequency anymore, break out early
+ */
+ if (policy->cur == policy->min)
+ return;
+
+ freq_target = get_freq_target(cs_tuners, policy);
+ if (dbs_info->requested_freq > freq_target)
+ dbs_info->requested_freq -= freq_target;
+ else
+ dbs_info->requested_freq = policy->min;
+
+ __cpufreq_driver_target(policy, dbs_info->requested_freq,
+ CPUFREQ_RELATION_L);
+ return;
+ }
+}
+
+static void cs_dbs_timer(struct work_struct *work)
+{
+ struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
+ struct cs_cpu_dbs_info_s, cdbs.work.work);
+ unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
+ struct cs_cpu_dbs_info_s *core_dbs_info = &per_cpu(cs_cpu_dbs_info,
+ cpu);
+ struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
+ struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
+ int delay = delay_for_sampling_rate(cs_tuners->sampling_rate);
+ bool modify_all = true;
+
+ mutex_lock(&core_dbs_info->cdbs.timer_mutex);
+ if (!need_load_eval(&core_dbs_info->cdbs, cs_tuners->sampling_rate))
+ modify_all = false;
+ else
+ dbs_check_cpu(dbs_data, cpu);
+
+ gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all);
+ mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
+}
+
+static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct cpufreq_freqs *freq = data;
+ struct cs_cpu_dbs_info_s *dbs_info =
+ &per_cpu(cs_cpu_dbs_info, freq->cpu);
+ struct cpufreq_policy *policy;
+
+ if (!dbs_info->enable)
+ return 0;
+
+ policy = dbs_info->cdbs.cur_policy;
+
+ /*
+ * we only care if our internally tracked freq moves outside the 'valid'
+ * ranges of frequency available to us otherwise we do not change it
+ */
+ if (dbs_info->requested_freq > policy->max
+ || dbs_info->requested_freq < policy->min)
+ dbs_info->requested_freq = freq->new;
+
+ return 0;
+}
+
+/************************** sysfs interface ************************/
+static struct common_dbs_data cs_dbs_cdata;
+
+static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
+ const char *buf, size_t count)
+{
+ struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
+ unsigned int input;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+
+ if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
+ return -EINVAL;
+
+ cs_tuners->sampling_down_factor = input;
+ return count;
+}
+
+static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
+ size_t count)
+{
+ struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
+ unsigned int input;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+
+ if (ret != 1)
+ return -EINVAL;
+
+ cs_tuners->sampling_rate = max(input, dbs_data->min_sampling_rate);
+ return count;
+}
+
+static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
+ size_t count)
+{
+ struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
+ unsigned int input;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+
+ if (ret != 1 || input > 100 || input <= cs_tuners->down_threshold)
+ return -EINVAL;
+
+ cs_tuners->up_threshold = input;
+ return count;
+}
+
+static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
+ size_t count)
+{
+ struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
+ unsigned int input;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+
+ /* cannot be lower than 11 otherwise freq will not fall */
+ if (ret != 1 || input < 11 || input > 100 ||
+ input >= cs_tuners->up_threshold)
+ return -EINVAL;
+
+ cs_tuners->down_threshold = input;
+ return count;
+}
+
+static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
+ const char *buf, size_t count)
+{
+ struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
+ unsigned int input, j;
+ int ret;
+
+ ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
+ return -EINVAL;
+
+ if (input > 1)
+ input = 1;
+
+ if (input == cs_tuners->ignore_nice_load) /* nothing to do */
+ return count;
+
+ cs_tuners->ignore_nice_load = input;
+
+ /* we need to re-evaluate prev_cpu_idle */
+ for_each_online_cpu(j) {
+ struct cs_cpu_dbs_info_s *dbs_info;
+ dbs_info = &per_cpu(cs_cpu_dbs_info, j);
+ dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
+ &dbs_info->cdbs.prev_cpu_wall, 0);
+ if (cs_tuners->ignore_nice_load)
+ dbs_info->cdbs.prev_cpu_nice =
+ kcpustat_cpu(j).cpustat[CPUTIME_NICE];
+ }
+ return count;
+}
+
+static ssize_t store_freq_step(struct dbs_data *dbs_data, const char *buf,
+ size_t count)
+{
+ struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
+ unsigned int input;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+
+ if (ret != 1)
+ return -EINVAL;
+
+ if (input > 100)
+ input = 100;
+
+ /*
+ * no need to test here if freq_step is zero as the user might actually
+ * want this, they would be crazy though :)
+ */
+ cs_tuners->freq_step = input;
+ return count;
+}
+
+show_store_one(cs, sampling_rate);
+show_store_one(cs, sampling_down_factor);
+show_store_one(cs, up_threshold);
+show_store_one(cs, down_threshold);
+show_store_one(cs, ignore_nice_load);
+show_store_one(cs, freq_step);
+declare_show_sampling_rate_min(cs);
+
+gov_sys_pol_attr_rw(sampling_rate);
+gov_sys_pol_attr_rw(sampling_down_factor);
+gov_sys_pol_attr_rw(up_threshold);
+gov_sys_pol_attr_rw(down_threshold);
+gov_sys_pol_attr_rw(ignore_nice_load);
+gov_sys_pol_attr_rw(freq_step);
+gov_sys_pol_attr_ro(sampling_rate_min);
+
+static struct attribute *dbs_attributes_gov_sys[] = {
+ &sampling_rate_min_gov_sys.attr,
+ &sampling_rate_gov_sys.attr,
+ &sampling_down_factor_gov_sys.attr,
+ &up_threshold_gov_sys.attr,
+ &down_threshold_gov_sys.attr,
+ &ignore_nice_load_gov_sys.attr,
+ &freq_step_gov_sys.attr,
+ NULL
+};
+
+static struct attribute_group cs_attr_group_gov_sys = {
+ .attrs = dbs_attributes_gov_sys,
+ .name = "conservative",
+};
+
+static struct attribute *dbs_attributes_gov_pol[] = {
+ &sampling_rate_min_gov_pol.attr,
+ &sampling_rate_gov_pol.attr,
+ &sampling_down_factor_gov_pol.attr,
+ &up_threshold_gov_pol.attr,
+ &down_threshold_gov_pol.attr,
+ &ignore_nice_load_gov_pol.attr,
+ &freq_step_gov_pol.attr,
+ NULL
+};
+
+static struct attribute_group cs_attr_group_gov_pol = {
+ .attrs = dbs_attributes_gov_pol,
+ .name = "conservative",
+};
+
+/************************** sysfs end ************************/
+
+static int cs_init(struct dbs_data *dbs_data)
+{
+ struct cs_dbs_tuners *tuners;
+
+ tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
+ if (!tuners) {
+ pr_err("%s: kzalloc failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
+ tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
+ tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
+ tuners->ignore_nice_load = 0;
+ tuners->freq_step = DEF_FREQUENCY_STEP;
+
+ dbs_data->tuners = tuners;
+ dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
+ jiffies_to_usecs(10);
+ mutex_init(&dbs_data->mutex);
+ return 0;
+}
+
+static void cs_exit(struct dbs_data *dbs_data)
+{
+ kfree(dbs_data->tuners);
+}
+
+define_get_cpu_dbs_routines(cs_cpu_dbs_info);
+
+static struct notifier_block cs_cpufreq_notifier_block = {
+ .notifier_call = dbs_cpufreq_notifier,
+};
+
+static struct cs_ops cs_ops = {
+ .notifier_block = &cs_cpufreq_notifier_block,
+};
+
+static struct common_dbs_data cs_dbs_cdata = {
+ .governor = GOV_CONSERVATIVE,
+ .attr_group_gov_sys = &cs_attr_group_gov_sys,
+ .attr_group_gov_pol = &cs_attr_group_gov_pol,
+ .get_cpu_cdbs = get_cpu_cdbs,
+ .get_cpu_dbs_info_s = get_cpu_dbs_info_s,
+ .gov_dbs_timer = cs_dbs_timer,
+ .gov_check_cpu = cs_check_cpu,
+ .gov_ops = &cs_ops,
+ .init = cs_init,
+ .exit = cs_exit,
+};
+
+static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy,
+ unsigned int event)
+{
+ return cpufreq_governor_dbs(policy, &cs_dbs_cdata, event);
+}
+
+#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
+static
+#endif
+struct cpufreq_governor cpufreq_gov_conservative = {
+ .name = "conservative",
+ .governor = cs_cpufreq_governor_dbs,
+ .max_transition_latency = TRANSITION_LATENCY_LIMIT,
+ .owner = THIS_MODULE,
+};
+
+static int __init cpufreq_gov_dbs_init(void)
+{
+ return cpufreq_register_governor(&cpufreq_gov_conservative);
+}
+
+static void __exit cpufreq_gov_dbs_exit(void)
+{
+ cpufreq_unregister_governor(&cpufreq_gov_conservative);
+}
+
+MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
+MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for "
+ "Low Latency Frequency Transition capable processors "
+ "optimised for use in a battery environment");
+MODULE_LICENSE("GPL");
+
+#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
+fs_initcall(cpufreq_gov_dbs_init);
+#else
+module_init(cpufreq_gov_dbs_init);
+#endif
+module_exit(cpufreq_gov_dbs_exit);
diff --git a/kernel/drivers/cpufreq/cpufreq_governor.c b/kernel/drivers/cpufreq/cpufreq_governor.c
new file mode 100644
index 000000000..1b44496b2
--- /dev/null
+++ b/kernel/drivers/cpufreq/cpufreq_governor.c
@@ -0,0 +1,449 @@
+/*
+ * drivers/cpufreq/cpufreq_governor.c
+ *
+ * CPUFREQ governors common code
+ *
+ * Copyright (C) 2001 Russell King
+ * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
+ * (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
+ * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
+ * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/export.h>
+#include <linux/kernel_stat.h>
+#include <linux/slab.h>
+
+#include "cpufreq_governor.h"
+
+static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data)
+{
+ if (have_governor_per_policy())
+ return dbs_data->cdata->attr_group_gov_pol;
+ else
+ return dbs_data->cdata->attr_group_gov_sys;
+}
+
+void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
+{
+ struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
+ struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+ struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
+ struct cpufreq_policy *policy;
+ unsigned int sampling_rate;
+ unsigned int max_load = 0;
+ unsigned int ignore_nice;
+ unsigned int j;
+
+ if (dbs_data->cdata->governor == GOV_ONDEMAND) {
+ struct od_cpu_dbs_info_s *od_dbs_info =
+ dbs_data->cdata->get_cpu_dbs_info_s(cpu);
+
+ /*
+ * Sometimes, the ondemand governor uses an additional
+ * multiplier to give long delays. So apply this multiplier to
+ * the 'sampling_rate', so as to keep the wake-up-from-idle
+ * detection logic a bit conservative.
+ */
+ sampling_rate = od_tuners->sampling_rate;
+ sampling_rate *= od_dbs_info->rate_mult;
+
+ ignore_nice = od_tuners->ignore_nice_load;
+ } else {
+ sampling_rate = cs_tuners->sampling_rate;
+ ignore_nice = cs_tuners->ignore_nice_load;
+ }
+
+ policy = cdbs->cur_policy;
+
+ /* Get Absolute Load */
+ for_each_cpu(j, policy->cpus) {
+ struct cpu_dbs_common_info *j_cdbs;
+ u64 cur_wall_time, cur_idle_time;
+ unsigned int idle_time, wall_time;
+ unsigned int load;
+ int io_busy = 0;
+
+ j_cdbs = dbs_data->cdata->get_cpu_cdbs(j);
+
+ /*
+ * For the purpose of ondemand, waiting for disk IO is
+ * an indication that you're performance critical, and
+ * not that the system is actually idle. So do not add
+ * the iowait time to the cpu idle time.
+ */
+ if (dbs_data->cdata->governor == GOV_ONDEMAND)
+ io_busy = od_tuners->io_is_busy;
+ cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy);
+
+ wall_time = (unsigned int)
+ (cur_wall_time - j_cdbs->prev_cpu_wall);
+ j_cdbs->prev_cpu_wall = cur_wall_time;
+
+ idle_time = (unsigned int)
+ (cur_idle_time - j_cdbs->prev_cpu_idle);
+ j_cdbs->prev_cpu_idle = cur_idle_time;
+
+ if (ignore_nice) {
+ u64 cur_nice;
+ unsigned long cur_nice_jiffies;
+
+ cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
+ cdbs->prev_cpu_nice;
+ /*
+ * Assumption: nice time between sampling periods will
+ * be less than 2^32 jiffies for 32 bit sys
+ */
+ cur_nice_jiffies = (unsigned long)
+ cputime64_to_jiffies64(cur_nice);
+
+ cdbs->prev_cpu_nice =
+ kcpustat_cpu(j).cpustat[CPUTIME_NICE];
+ idle_time += jiffies_to_usecs(cur_nice_jiffies);
+ }
+
+ if (unlikely(!wall_time || wall_time < idle_time))
+ continue;
+
+ /*
+ * If the CPU had gone completely idle, and a task just woke up
+ * on this CPU now, it would be unfair to calculate 'load' the
+ * usual way for this elapsed time-window, because it will show
+ * near-zero load, irrespective of how CPU intensive that task
+ * actually is. This is undesirable for latency-sensitive bursty
+ * workloads.
+ *
+ * To avoid this, we reuse the 'load' from the previous
+ * time-window and give this task a chance to start with a
+ * reasonably high CPU frequency. (However, we shouldn't over-do
+ * this copy, lest we get stuck at a high load (high frequency)
+ * for too long, even when the current system load has actually
+ * dropped down. So we perform the copy only once, upon the
+ * first wake-up from idle.)
+ *
+ * Detecting this situation is easy: the governor's deferrable
+ * timer would not have fired during CPU-idle periods. Hence
+ * an unusually large 'wall_time' (as compared to the sampling
+ * rate) indicates this scenario.
+ *
+ * prev_load can be zero in two cases and we must recalculate it
+ * for both cases:
+ * - during long idle intervals
+ * - explicitly set to zero
+ */
+ if (unlikely(wall_time > (2 * sampling_rate) &&
+ j_cdbs->prev_load)) {
+ load = j_cdbs->prev_load;
+
+ /*
+ * Perform a destructive copy, to ensure that we copy
+ * the previous load only once, upon the first wake-up
+ * from idle.
+ */
+ j_cdbs->prev_load = 0;
+ } else {
+ load = 100 * (wall_time - idle_time) / wall_time;
+ j_cdbs->prev_load = load;
+ }
+
+ if (load > max_load)
+ max_load = load;
+ }
+
+ dbs_data->cdata->gov_check_cpu(cpu, max_load);
+}
+EXPORT_SYMBOL_GPL(dbs_check_cpu);
+
+static inline void __gov_queue_work(int cpu, struct dbs_data *dbs_data,
+ unsigned int delay)
+{
+ struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
+
+ mod_delayed_work_on(cpu, system_wq, &cdbs->work, delay);
+}
+
+void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
+ unsigned int delay, bool all_cpus)
+{
+ int i;
+
+ mutex_lock(&cpufreq_governor_lock);
+ if (!policy->governor_enabled)
+ goto out_unlock;
+
+ if (!all_cpus) {
+ /*
+ * Use raw_smp_processor_id() to avoid preemptible warnings.
+ * We know that this is only called with all_cpus == false from
+ * works that have been queued with *_work_on() functions and
+ * those works are canceled during CPU_DOWN_PREPARE so they
+ * can't possibly run on any other CPU.
+ */
+ __gov_queue_work(raw_smp_processor_id(), dbs_data, delay);
+ } else {
+ for_each_cpu(i, policy->cpus)
+ __gov_queue_work(i, dbs_data, delay);
+ }
+
+out_unlock:
+ mutex_unlock(&cpufreq_governor_lock);
+}
+EXPORT_SYMBOL_GPL(gov_queue_work);
+
+static inline void gov_cancel_work(struct dbs_data *dbs_data,
+ struct cpufreq_policy *policy)
+{
+ struct cpu_dbs_common_info *cdbs;
+ int i;
+
+ for_each_cpu(i, policy->cpus) {
+ cdbs = dbs_data->cdata->get_cpu_cdbs(i);
+ cancel_delayed_work_sync(&cdbs->work);
+ }
+}
+
+/* Will return if we need to evaluate cpu load again or not */
+bool need_load_eval(struct cpu_dbs_common_info *cdbs,
+ unsigned int sampling_rate)
+{
+ if (policy_is_shared(cdbs->cur_policy)) {
+ ktime_t time_now = ktime_get();
+ s64 delta_us = ktime_us_delta(time_now, cdbs->time_stamp);
+
+ /* Do nothing if we recently have sampled */
+ if (delta_us < (s64)(sampling_rate / 2))
+ return false;
+ else
+ cdbs->time_stamp = time_now;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(need_load_eval);
+
+static void set_sampling_rate(struct dbs_data *dbs_data,
+ unsigned int sampling_rate)
+{
+ if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
+ struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
+ cs_tuners->sampling_rate = sampling_rate;
+ } else {
+ struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+ od_tuners->sampling_rate = sampling_rate;
+ }
+}
+
+int cpufreq_governor_dbs(struct cpufreq_policy *policy,
+ struct common_dbs_data *cdata, unsigned int event)
+{
+ struct dbs_data *dbs_data;
+ struct od_cpu_dbs_info_s *od_dbs_info = NULL;
+ struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
+ struct od_ops *od_ops = NULL;
+ struct od_dbs_tuners *od_tuners = NULL;
+ struct cs_dbs_tuners *cs_tuners = NULL;
+ struct cpu_dbs_common_info *cpu_cdbs;
+ unsigned int sampling_rate, latency, ignore_nice, j, cpu = policy->cpu;
+ int io_busy = 0;
+ int rc;
+
+ if (have_governor_per_policy())
+ dbs_data = policy->governor_data;
+ else
+ dbs_data = cdata->gdbs_data;
+
+ WARN_ON(!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT));
+
+ switch (event) {
+ case CPUFREQ_GOV_POLICY_INIT:
+ if (have_governor_per_policy()) {
+ WARN_ON(dbs_data);
+ } else if (dbs_data) {
+ dbs_data->usage_count++;
+ policy->governor_data = dbs_data;
+ return 0;
+ }
+
+ dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
+ if (!dbs_data) {
+ pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ dbs_data->cdata = cdata;
+ dbs_data->usage_count = 1;
+ rc = cdata->init(dbs_data);
+ if (rc) {
+ pr_err("%s: POLICY_INIT: init() failed\n", __func__);
+ kfree(dbs_data);
+ return rc;
+ }
+
+ if (!have_governor_per_policy())
+ WARN_ON(cpufreq_get_global_kobject());
+
+ rc = sysfs_create_group(get_governor_parent_kobj(policy),
+ get_sysfs_attr(dbs_data));
+ if (rc) {
+ cdata->exit(dbs_data);
+ kfree(dbs_data);
+ return rc;
+ }
+
+ policy->governor_data = dbs_data;
+
+ /* policy latency is in ns. Convert it to us first */
+ latency = policy->cpuinfo.transition_latency / 1000;
+ if (latency == 0)
+ latency = 1;
+
+ /* Bring kernel and HW constraints together */
+ dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
+ MIN_LATENCY_MULTIPLIER * latency);
+ set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate,
+ latency * LATENCY_MULTIPLIER));
+
+ if ((cdata->governor == GOV_CONSERVATIVE) &&
+ (!policy->governor->initialized)) {
+ struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
+
+ cpufreq_register_notifier(cs_ops->notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ }
+
+ if (!have_governor_per_policy())
+ cdata->gdbs_data = dbs_data;
+
+ return 0;
+ case CPUFREQ_GOV_POLICY_EXIT:
+ if (!--dbs_data->usage_count) {
+ sysfs_remove_group(get_governor_parent_kobj(policy),
+ get_sysfs_attr(dbs_data));
+
+ if (!have_governor_per_policy())
+ cpufreq_put_global_kobject();
+
+ if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
+ (policy->governor->initialized == 1)) {
+ struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
+
+ cpufreq_unregister_notifier(cs_ops->notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ }
+
+ cdata->exit(dbs_data);
+ kfree(dbs_data);
+ cdata->gdbs_data = NULL;
+ }
+
+ policy->governor_data = NULL;
+ return 0;
+ }
+
+ cpu_cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
+
+ if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
+ cs_tuners = dbs_data->tuners;
+ cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
+ sampling_rate = cs_tuners->sampling_rate;
+ ignore_nice = cs_tuners->ignore_nice_load;
+ } else {
+ od_tuners = dbs_data->tuners;
+ od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
+ sampling_rate = od_tuners->sampling_rate;
+ ignore_nice = od_tuners->ignore_nice_load;
+ od_ops = dbs_data->cdata->gov_ops;
+ io_busy = od_tuners->io_is_busy;
+ }
+
+ switch (event) {
+ case CPUFREQ_GOV_START:
+ if (!policy->cur)
+ return -EINVAL;
+
+ mutex_lock(&dbs_data->mutex);
+
+ for_each_cpu(j, policy->cpus) {
+ struct cpu_dbs_common_info *j_cdbs =
+ dbs_data->cdata->get_cpu_cdbs(j);
+ unsigned int prev_load;
+
+ j_cdbs->cpu = j;
+ j_cdbs->cur_policy = policy;
+ j_cdbs->prev_cpu_idle = get_cpu_idle_time(j,
+ &j_cdbs->prev_cpu_wall, io_busy);
+
+ prev_load = (unsigned int)
+ (j_cdbs->prev_cpu_wall - j_cdbs->prev_cpu_idle);
+ j_cdbs->prev_load = 100 * prev_load /
+ (unsigned int) j_cdbs->prev_cpu_wall;
+
+ if (ignore_nice)
+ j_cdbs->prev_cpu_nice =
+ kcpustat_cpu(j).cpustat[CPUTIME_NICE];
+
+ mutex_init(&j_cdbs->timer_mutex);
+ INIT_DEFERRABLE_WORK(&j_cdbs->work,
+ dbs_data->cdata->gov_dbs_timer);
+ }
+
+ if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
+ cs_dbs_info->down_skip = 0;
+ cs_dbs_info->enable = 1;
+ cs_dbs_info->requested_freq = policy->cur;
+ } else {
+ od_dbs_info->rate_mult = 1;
+ od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
+ od_ops->powersave_bias_init_cpu(cpu);
+ }
+
+ mutex_unlock(&dbs_data->mutex);
+
+ /* Initiate timer time stamp */
+ cpu_cdbs->time_stamp = ktime_get();
+
+ gov_queue_work(dbs_data, policy,
+ delay_for_sampling_rate(sampling_rate), true);
+ break;
+
+ case CPUFREQ_GOV_STOP:
+ if (dbs_data->cdata->governor == GOV_CONSERVATIVE)
+ cs_dbs_info->enable = 0;
+
+ gov_cancel_work(dbs_data, policy);
+
+ mutex_lock(&dbs_data->mutex);
+ mutex_destroy(&cpu_cdbs->timer_mutex);
+ cpu_cdbs->cur_policy = NULL;
+
+ mutex_unlock(&dbs_data->mutex);
+
+ break;
+
+ case CPUFREQ_GOV_LIMITS:
+ mutex_lock(&dbs_data->mutex);
+ if (!cpu_cdbs->cur_policy) {
+ mutex_unlock(&dbs_data->mutex);
+ break;
+ }
+ mutex_lock(&cpu_cdbs->timer_mutex);
+ if (policy->max < cpu_cdbs->cur_policy->cur)
+ __cpufreq_driver_target(cpu_cdbs->cur_policy,
+ policy->max, CPUFREQ_RELATION_H);
+ else if (policy->min > cpu_cdbs->cur_policy->cur)
+ __cpufreq_driver_target(cpu_cdbs->cur_policy,
+ policy->min, CPUFREQ_RELATION_L);
+ dbs_check_cpu(dbs_data, cpu);
+ mutex_unlock(&cpu_cdbs->timer_mutex);
+ mutex_unlock(&dbs_data->mutex);
+ break;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cpufreq_governor_dbs);
diff --git a/kernel/drivers/cpufreq/cpufreq_governor.h b/kernel/drivers/cpufreq/cpufreq_governor.h
new file mode 100644
index 000000000..cc401d147
--- /dev/null
+++ b/kernel/drivers/cpufreq/cpufreq_governor.h
@@ -0,0 +1,280 @@
+/*
+ * drivers/cpufreq/cpufreq_governor.h
+ *
+ * Header file for CPUFreq governors common code
+ *
+ * Copyright (C) 2001 Russell King
+ * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
+ * (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
+ * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
+ * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _CPUFREQ_GOVERNOR_H
+#define _CPUFREQ_GOVERNOR_H
+
+#include <linux/cpufreq.h>
+#include <linux/kernel_stat.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+
+/*
+ * The polling frequency depends on the capability of the processor. Default
+ * polling frequency is 1000 times the transition latency of the processor. The
+ * governor will work on any processor with transition latency <= 10ms, using
+ * appropriate sampling rate.
+ *
+ * For CPUs with transition latency > 10ms (mostly drivers with CPUFREQ_ETERNAL)
+ * this governor will not work. All times here are in us (micro seconds).
+ */
+#define MIN_SAMPLING_RATE_RATIO (2)
+#define LATENCY_MULTIPLIER (1000)
+#define MIN_LATENCY_MULTIPLIER (20)
+#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
+
+/* Ondemand Sampling types */
+enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE};
+
+/*
+ * Macro for creating governors sysfs routines
+ *
+ * - gov_sys: One governor instance per whole system
+ * - gov_pol: One governor instance per policy
+ */
+
+/* Create attributes */
+#define gov_sys_attr_ro(_name) \
+static struct global_attr _name##_gov_sys = \
+__ATTR(_name, 0444, show_##_name##_gov_sys, NULL)
+
+#define gov_sys_attr_rw(_name) \
+static struct global_attr _name##_gov_sys = \
+__ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
+
+#define gov_pol_attr_ro(_name) \
+static struct freq_attr _name##_gov_pol = \
+__ATTR(_name, 0444, show_##_name##_gov_pol, NULL)
+
+#define gov_pol_attr_rw(_name) \
+static struct freq_attr _name##_gov_pol = \
+__ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
+
+#define gov_sys_pol_attr_rw(_name) \
+ gov_sys_attr_rw(_name); \
+ gov_pol_attr_rw(_name)
+
+#define gov_sys_pol_attr_ro(_name) \
+ gov_sys_attr_ro(_name); \
+ gov_pol_attr_ro(_name)
+
+/* Create show/store routines */
+#define show_one(_gov, file_name) \
+static ssize_t show_##file_name##_gov_sys \
+(struct kobject *kobj, struct attribute *attr, char *buf) \
+{ \
+ struct _gov##_dbs_tuners *tuners = _gov##_dbs_cdata.gdbs_data->tuners; \
+ return sprintf(buf, "%u\n", tuners->file_name); \
+} \
+ \
+static ssize_t show_##file_name##_gov_pol \
+(struct cpufreq_policy *policy, char *buf) \
+{ \
+ struct dbs_data *dbs_data = policy->governor_data; \
+ struct _gov##_dbs_tuners *tuners = dbs_data->tuners; \
+ return sprintf(buf, "%u\n", tuners->file_name); \
+}
+
+#define store_one(_gov, file_name) \
+static ssize_t store_##file_name##_gov_sys \
+(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) \
+{ \
+ struct dbs_data *dbs_data = _gov##_dbs_cdata.gdbs_data; \
+ return store_##file_name(dbs_data, buf, count); \
+} \
+ \
+static ssize_t store_##file_name##_gov_pol \
+(struct cpufreq_policy *policy, const char *buf, size_t count) \
+{ \
+ struct dbs_data *dbs_data = policy->governor_data; \
+ return store_##file_name(dbs_data, buf, count); \
+}
+
+#define show_store_one(_gov, file_name) \
+show_one(_gov, file_name); \
+store_one(_gov, file_name)
+
+/* create helper routines */
+#define define_get_cpu_dbs_routines(_dbs_info) \
+static struct cpu_dbs_common_info *get_cpu_cdbs(int cpu) \
+{ \
+ return &per_cpu(_dbs_info, cpu).cdbs; \
+} \
+ \
+static void *get_cpu_dbs_info_s(int cpu) \
+{ \
+ return &per_cpu(_dbs_info, cpu); \
+}
+
+/*
+ * Abbreviations:
+ * dbs: used as a shortform for demand based switching It helps to keep variable
+ * names smaller, simpler
+ * cdbs: common dbs
+ * od_*: On-demand governor
+ * cs_*: Conservative governor
+ */
+
+/* Per cpu structures */
+struct cpu_dbs_common_info {
+ int cpu;
+ u64 prev_cpu_idle;
+ u64 prev_cpu_wall;
+ u64 prev_cpu_nice;
+ /*
+ * Used to keep track of load in the previous interval. However, when
+ * explicitly set to zero, it is used as a flag to ensure that we copy
+ * the previous load to the current interval only once, upon the first
+ * wake-up from idle.
+ */
+ unsigned int prev_load;
+ struct cpufreq_policy *cur_policy;
+ struct delayed_work work;
+ /*
+ * percpu mutex that serializes governor limit change with gov_dbs_timer
+ * invocation. We do not want gov_dbs_timer to run when user is changing
+ * the governor or limits.
+ */
+ struct mutex timer_mutex;
+ ktime_t time_stamp;
+};
+
+struct od_cpu_dbs_info_s {
+ struct cpu_dbs_common_info cdbs;
+ struct cpufreq_frequency_table *freq_table;
+ unsigned int freq_lo;
+ unsigned int freq_lo_jiffies;
+ unsigned int freq_hi_jiffies;
+ unsigned int rate_mult;
+ unsigned int sample_type:1;
+};
+
+struct cs_cpu_dbs_info_s {
+ struct cpu_dbs_common_info cdbs;
+ unsigned int down_skip;
+ unsigned int requested_freq;
+ unsigned int enable:1;
+};
+
+/* Per policy Governors sysfs tunables */
+struct od_dbs_tuners {
+ unsigned int ignore_nice_load;
+ unsigned int sampling_rate;
+ unsigned int sampling_down_factor;
+ unsigned int up_threshold;
+ unsigned int powersave_bias;
+ unsigned int io_is_busy;
+};
+
+struct cs_dbs_tuners {
+ unsigned int ignore_nice_load;
+ unsigned int sampling_rate;
+ unsigned int sampling_down_factor;
+ unsigned int up_threshold;
+ unsigned int down_threshold;
+ unsigned int freq_step;
+};
+
+/* Common Governor data across policies */
+struct dbs_data;
+struct common_dbs_data {
+ /* Common across governors */
+ #define GOV_ONDEMAND 0
+ #define GOV_CONSERVATIVE 1
+ int governor;
+ struct attribute_group *attr_group_gov_sys; /* one governor - system */
+ struct attribute_group *attr_group_gov_pol; /* one governor - policy */
+
+ /*
+ * Common data for platforms that don't set
+ * CPUFREQ_HAVE_GOVERNOR_PER_POLICY
+ */
+ struct dbs_data *gdbs_data;
+
+ struct cpu_dbs_common_info *(*get_cpu_cdbs)(int cpu);
+ void *(*get_cpu_dbs_info_s)(int cpu);
+ void (*gov_dbs_timer)(struct work_struct *work);
+ void (*gov_check_cpu)(int cpu, unsigned int load);
+ int (*init)(struct dbs_data *dbs_data);
+ void (*exit)(struct dbs_data *dbs_data);
+
+ /* Governor specific ops, see below */
+ void *gov_ops;
+};
+
+/* Governor Per policy data */
+struct dbs_data {
+ struct common_dbs_data *cdata;
+ unsigned int min_sampling_rate;
+ int usage_count;
+ void *tuners;
+
+ /* dbs_mutex protects dbs_enable in governor start/stop */
+ struct mutex mutex;
+};
+
+/* Governor specific ops, will be passed to dbs_data->gov_ops */
+struct od_ops {
+ void (*powersave_bias_init_cpu)(int cpu);
+ unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
+ unsigned int freq_next, unsigned int relation);
+ void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
+};
+
+struct cs_ops {
+ struct notifier_block *notifier_block;
+};
+
+static inline int delay_for_sampling_rate(unsigned int sampling_rate)
+{
+ int delay = usecs_to_jiffies(sampling_rate);
+
+ /* We want all CPUs to do sampling nearly on same jiffy */
+ if (num_online_cpus() > 1)
+ delay -= jiffies % delay;
+
+ return delay;
+}
+
+#define declare_show_sampling_rate_min(_gov) \
+static ssize_t show_sampling_rate_min_gov_sys \
+(struct kobject *kobj, struct attribute *attr, char *buf) \
+{ \
+ struct dbs_data *dbs_data = _gov##_dbs_cdata.gdbs_data; \
+ return sprintf(buf, "%u\n", dbs_data->min_sampling_rate); \
+} \
+ \
+static ssize_t show_sampling_rate_min_gov_pol \
+(struct cpufreq_policy *policy, char *buf) \
+{ \
+ struct dbs_data *dbs_data = policy->governor_data; \
+ return sprintf(buf, "%u\n", dbs_data->min_sampling_rate); \
+}
+
+extern struct mutex cpufreq_governor_lock;
+
+void dbs_check_cpu(struct dbs_data *dbs_data, int cpu);
+bool need_load_eval(struct cpu_dbs_common_info *cdbs,
+ unsigned int sampling_rate);
+int cpufreq_governor_dbs(struct cpufreq_policy *policy,
+ struct common_dbs_data *cdata, unsigned int event);
+void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
+ unsigned int delay, bool all_cpus);
+void od_register_powersave_bias_handler(unsigned int (*f)
+ (struct cpufreq_policy *, unsigned int, unsigned int),
+ unsigned int powersave_bias);
+void od_unregister_powersave_bias_handler(void);
+#endif /* _CPUFREQ_GOVERNOR_H */
diff --git a/kernel/drivers/cpufreq/cpufreq_ondemand.c b/kernel/drivers/cpufreq/cpufreq_ondemand.c
new file mode 100644
index 000000000..ad3f38fd3
--- /dev/null
+++ b/kernel/drivers/cpufreq/cpufreq_ondemand.c
@@ -0,0 +1,631 @@
+/*
+ * drivers/cpufreq/cpufreq_ondemand.c
+ *
+ * Copyright (C) 2001 Russell King
+ * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
+ * Jun Nakajima <jun.nakajima@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cpu.h>
+#include <linux/percpu-defs.h>
+#include <linux/slab.h>
+#include <linux/tick.h>
+#include "cpufreq_governor.h"
+
+/* On-demand governor macros */
+#define DEF_FREQUENCY_UP_THRESHOLD (80)
+#define DEF_SAMPLING_DOWN_FACTOR (1)
+#define MAX_SAMPLING_DOWN_FACTOR (100000)
+#define MICRO_FREQUENCY_UP_THRESHOLD (95)
+#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
+#define MIN_FREQUENCY_UP_THRESHOLD (11)
+#define MAX_FREQUENCY_UP_THRESHOLD (100)
+
+static DEFINE_PER_CPU(struct od_cpu_dbs_info_s, od_cpu_dbs_info);
+
+static struct od_ops od_ops;
+
+#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
+static struct cpufreq_governor cpufreq_gov_ondemand;
+#endif
+
+static unsigned int default_powersave_bias;
+
+static void ondemand_powersave_bias_init_cpu(int cpu)
+{
+ struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
+
+ dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
+ dbs_info->freq_lo = 0;
+}
+
+/*
+ * Not all CPUs want IO time to be accounted as busy; this depends on how
+ * efficient idling at a higher frequency/voltage is.
+ * Pavel Machek says this is not so for various generations of AMD and old
+ * Intel systems.
+ * Mike Chan (android.com) claims this is also not true for ARM.
+ * Because of this, whitelist specific known (series) of CPUs by default, and
+ * leave all others up to the user.
+ */
+static int should_io_be_busy(void)
+{
+#if defined(CONFIG_X86)
+ /*
+ * For Intel, Core 2 (model 15) and later have an efficient idle.
+ */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+ boot_cpu_data.x86 == 6 &&
+ boot_cpu_data.x86_model >= 15)
+ return 1;
+#endif
+ return 0;
+}
+
+/*
+ * Find right freq to be set now with powersave_bias on.
+ * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
+ * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
+ */
+static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
+ unsigned int freq_next, unsigned int relation)
+{
+ unsigned int freq_req, freq_reduc, freq_avg;
+ unsigned int freq_hi, freq_lo;
+ unsigned int index = 0;
+ unsigned int jiffies_total, jiffies_hi, jiffies_lo;
+ struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
+ policy->cpu);
+ struct dbs_data *dbs_data = policy->governor_data;
+ struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+
+ if (!dbs_info->freq_table) {
+ dbs_info->freq_lo = 0;
+ dbs_info->freq_lo_jiffies = 0;
+ return freq_next;
+ }
+
+ cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
+ relation, &index);
+ freq_req = dbs_info->freq_table[index].frequency;
+ freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
+ freq_avg = freq_req - freq_reduc;
+
+ /* Find freq bounds for freq_avg in freq_table */
+ index = 0;
+ cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
+ CPUFREQ_RELATION_H, &index);
+ freq_lo = dbs_info->freq_table[index].frequency;
+ index = 0;
+ cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
+ CPUFREQ_RELATION_L, &index);
+ freq_hi = dbs_info->freq_table[index].frequency;
+
+ /* Find out how long we have to be in hi and lo freqs */
+ if (freq_hi == freq_lo) {
+ dbs_info->freq_lo = 0;
+ dbs_info->freq_lo_jiffies = 0;
+ return freq_lo;
+ }
+ jiffies_total = usecs_to_jiffies(od_tuners->sampling_rate);
+ jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
+ jiffies_hi += ((freq_hi - freq_lo) / 2);
+ jiffies_hi /= (freq_hi - freq_lo);
+ jiffies_lo = jiffies_total - jiffies_hi;
+ dbs_info->freq_lo = freq_lo;
+ dbs_info->freq_lo_jiffies = jiffies_lo;
+ dbs_info->freq_hi_jiffies = jiffies_hi;
+ return freq_hi;
+}
+
+static void ondemand_powersave_bias_init(void)
+{
+ int i;
+ for_each_online_cpu(i) {
+ ondemand_powersave_bias_init_cpu(i);
+ }
+}
+
+static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq)
+{
+ struct dbs_data *dbs_data = policy->governor_data;
+ struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+
+ if (od_tuners->powersave_bias)
+ freq = od_ops.powersave_bias_target(policy, freq,
+ CPUFREQ_RELATION_H);
+ else if (policy->cur == policy->max)
+ return;
+
+ __cpufreq_driver_target(policy, freq, od_tuners->powersave_bias ?
+ CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
+}
+
+/*
+ * Every sampling_rate, we check, if current idle time is less than 20%
+ * (default), then we try to increase frequency. Else, we adjust the frequency
+ * proportional to load.
+ */
+static void od_check_cpu(int cpu, unsigned int load)
+{
+ struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
+ struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
+ struct dbs_data *dbs_data = policy->governor_data;
+ struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+
+ dbs_info->freq_lo = 0;
+
+ /* Check for frequency increase */
+ if (load > od_tuners->up_threshold) {
+ /* If switching to max speed, apply sampling_down_factor */
+ if (policy->cur < policy->max)
+ dbs_info->rate_mult =
+ od_tuners->sampling_down_factor;
+ dbs_freq_increase(policy, policy->max);
+ } else {
+ /* Calculate the next frequency proportional to load */
+ unsigned int freq_next, min_f, max_f;
+
+ min_f = policy->cpuinfo.min_freq;
+ max_f = policy->cpuinfo.max_freq;
+ freq_next = min_f + load * (max_f - min_f) / 100;
+
+ /* No longer fully busy, reset rate_mult */
+ dbs_info->rate_mult = 1;
+
+ if (!od_tuners->powersave_bias) {
+ __cpufreq_driver_target(policy, freq_next,
+ CPUFREQ_RELATION_C);
+ return;
+ }
+
+ freq_next = od_ops.powersave_bias_target(policy, freq_next,
+ CPUFREQ_RELATION_L);
+ __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_C);
+ }
+}
+
+static void od_dbs_timer(struct work_struct *work)
+{
+ struct od_cpu_dbs_info_s *dbs_info =
+ container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
+ unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
+ struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info,
+ cpu);
+ struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
+ struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+ int delay = 0, sample_type = core_dbs_info->sample_type;
+ bool modify_all = true;
+
+ mutex_lock(&core_dbs_info->cdbs.timer_mutex);
+ if (!need_load_eval(&core_dbs_info->cdbs, od_tuners->sampling_rate)) {
+ modify_all = false;
+ goto max_delay;
+ }
+
+ /* Common NORMAL_SAMPLE setup */
+ core_dbs_info->sample_type = OD_NORMAL_SAMPLE;
+ if (sample_type == OD_SUB_SAMPLE) {
+ delay = core_dbs_info->freq_lo_jiffies;
+ __cpufreq_driver_target(core_dbs_info->cdbs.cur_policy,
+ core_dbs_info->freq_lo, CPUFREQ_RELATION_H);
+ } else {
+ dbs_check_cpu(dbs_data, cpu);
+ if (core_dbs_info->freq_lo) {
+ /* Setup timer for SUB_SAMPLE */
+ core_dbs_info->sample_type = OD_SUB_SAMPLE;
+ delay = core_dbs_info->freq_hi_jiffies;
+ }
+ }
+
+max_delay:
+ if (!delay)
+ delay = delay_for_sampling_rate(od_tuners->sampling_rate
+ * core_dbs_info->rate_mult);
+
+ gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all);
+ mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
+}
+
+/************************** sysfs interface ************************/
+static struct common_dbs_data od_dbs_cdata;
+
+/**
+ * update_sampling_rate - update sampling rate effective immediately if needed.
+ * @new_rate: new sampling rate
+ *
+ * If new rate is smaller than the old, simply updating
+ * dbs_tuners_int.sampling_rate might not be appropriate. For example, if the
+ * original sampling_rate was 1 second and the requested new sampling rate is 10
+ * ms because the user needs immediate reaction from ondemand governor, but not
+ * sure if higher frequency will be required or not, then, the governor may
+ * change the sampling rate too late; up to 1 second later. Thus, if we are
+ * reducing the sampling rate, we need to make the new value effective
+ * immediately.
+ */
+static void update_sampling_rate(struct dbs_data *dbs_data,
+ unsigned int new_rate)
+{
+ struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+ int cpu;
+
+ od_tuners->sampling_rate = new_rate = max(new_rate,
+ dbs_data->min_sampling_rate);
+
+ for_each_online_cpu(cpu) {
+ struct cpufreq_policy *policy;
+ struct od_cpu_dbs_info_s *dbs_info;
+ unsigned long next_sampling, appointed_at;
+
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy)
+ continue;
+ if (policy->governor != &cpufreq_gov_ondemand) {
+ cpufreq_cpu_put(policy);
+ continue;
+ }
+ dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
+ cpufreq_cpu_put(policy);
+
+ mutex_lock(&dbs_info->cdbs.timer_mutex);
+
+ if (!delayed_work_pending(&dbs_info->cdbs.work)) {
+ mutex_unlock(&dbs_info->cdbs.timer_mutex);
+ continue;
+ }
+
+ next_sampling = jiffies + usecs_to_jiffies(new_rate);
+ appointed_at = dbs_info->cdbs.work.timer.expires;
+
+ if (time_before(next_sampling, appointed_at)) {
+
+ mutex_unlock(&dbs_info->cdbs.timer_mutex);
+ cancel_delayed_work_sync(&dbs_info->cdbs.work);
+ mutex_lock(&dbs_info->cdbs.timer_mutex);
+
+ gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy,
+ usecs_to_jiffies(new_rate), true);
+
+ }
+ mutex_unlock(&dbs_info->cdbs.timer_mutex);
+ }
+}
+
+static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
+ size_t count)
+{
+ unsigned int input;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
+ return -EINVAL;
+
+ update_sampling_rate(dbs_data, input);
+ return count;
+}
+
+static ssize_t store_io_is_busy(struct dbs_data *dbs_data, const char *buf,
+ size_t count)
+{
+ struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+ unsigned int input;
+ int ret;
+ unsigned int j;
+
+ ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
+ return -EINVAL;
+ od_tuners->io_is_busy = !!input;
+
+ /* we need to re-evaluate prev_cpu_idle */
+ for_each_online_cpu(j) {
+ struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
+ j);
+ dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
+ &dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy);
+ }
+ return count;
+}
+
+static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
+ size_t count)
+{
+ struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+ unsigned int input;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+
+ if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
+ input < MIN_FREQUENCY_UP_THRESHOLD) {
+ return -EINVAL;
+ }
+
+ od_tuners->up_threshold = input;
+ return count;
+}
+
+static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
+ const char *buf, size_t count)
+{
+ struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+ unsigned int input, j;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+
+ if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
+ return -EINVAL;
+ od_tuners->sampling_down_factor = input;
+
+ /* Reset down sampling multiplier in case it was active */
+ for_each_online_cpu(j) {
+ struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
+ j);
+ dbs_info->rate_mult = 1;
+ }
+ return count;
+}
+
+static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
+ const char *buf, size_t count)
+{
+ struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+ unsigned int input;
+ int ret;
+
+ unsigned int j;
+
+ ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
+ return -EINVAL;
+
+ if (input > 1)
+ input = 1;
+
+ if (input == od_tuners->ignore_nice_load) { /* nothing to do */
+ return count;
+ }
+ od_tuners->ignore_nice_load = input;
+
+ /* we need to re-evaluate prev_cpu_idle */
+ for_each_online_cpu(j) {
+ struct od_cpu_dbs_info_s *dbs_info;
+ dbs_info = &per_cpu(od_cpu_dbs_info, j);
+ dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
+ &dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy);
+ if (od_tuners->ignore_nice_load)
+ dbs_info->cdbs.prev_cpu_nice =
+ kcpustat_cpu(j).cpustat[CPUTIME_NICE];
+
+ }
+ return count;
+}
+
+static ssize_t store_powersave_bias(struct dbs_data *dbs_data, const char *buf,
+ size_t count)
+{
+ struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+ unsigned int input;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+
+ if (ret != 1)
+ return -EINVAL;
+
+ if (input > 1000)
+ input = 1000;
+
+ od_tuners->powersave_bias = input;
+ ondemand_powersave_bias_init();
+ return count;
+}
+
+show_store_one(od, sampling_rate);
+show_store_one(od, io_is_busy);
+show_store_one(od, up_threshold);
+show_store_one(od, sampling_down_factor);
+show_store_one(od, ignore_nice_load);
+show_store_one(od, powersave_bias);
+declare_show_sampling_rate_min(od);
+
+gov_sys_pol_attr_rw(sampling_rate);
+gov_sys_pol_attr_rw(io_is_busy);
+gov_sys_pol_attr_rw(up_threshold);
+gov_sys_pol_attr_rw(sampling_down_factor);
+gov_sys_pol_attr_rw(ignore_nice_load);
+gov_sys_pol_attr_rw(powersave_bias);
+gov_sys_pol_attr_ro(sampling_rate_min);
+
+static struct attribute *dbs_attributes_gov_sys[] = {
+ &sampling_rate_min_gov_sys.attr,
+ &sampling_rate_gov_sys.attr,
+ &up_threshold_gov_sys.attr,
+ &sampling_down_factor_gov_sys.attr,
+ &ignore_nice_load_gov_sys.attr,
+ &powersave_bias_gov_sys.attr,
+ &io_is_busy_gov_sys.attr,
+ NULL
+};
+
+static struct attribute_group od_attr_group_gov_sys = {
+ .attrs = dbs_attributes_gov_sys,
+ .name = "ondemand",
+};
+
+static struct attribute *dbs_attributes_gov_pol[] = {
+ &sampling_rate_min_gov_pol.attr,
+ &sampling_rate_gov_pol.attr,
+ &up_threshold_gov_pol.attr,
+ &sampling_down_factor_gov_pol.attr,
+ &ignore_nice_load_gov_pol.attr,
+ &powersave_bias_gov_pol.attr,
+ &io_is_busy_gov_pol.attr,
+ NULL
+};
+
+static struct attribute_group od_attr_group_gov_pol = {
+ .attrs = dbs_attributes_gov_pol,
+ .name = "ondemand",
+};
+
+/************************** sysfs end ************************/
+
+static int od_init(struct dbs_data *dbs_data)
+{
+ struct od_dbs_tuners *tuners;
+ u64 idle_time;
+ int cpu;
+
+ tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
+ if (!tuners) {
+ pr_err("%s: kzalloc failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ cpu = get_cpu();
+ idle_time = get_cpu_idle_time_us(cpu, NULL);
+ put_cpu();
+ if (idle_time != -1ULL) {
+ /* Idle micro accounting is supported. Use finer thresholds */
+ tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
+ /*
+ * In nohz/micro accounting case we set the minimum frequency
+ * not depending on HZ, but fixed (very low). The deferred
+ * timer might skip some samples if idle/sleeping as needed.
+ */
+ dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
+ } else {
+ tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
+
+ /* For correct statistics, we need 10 ticks for each measure */
+ dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
+ jiffies_to_usecs(10);
+ }
+
+ tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
+ tuners->ignore_nice_load = 0;
+ tuners->powersave_bias = default_powersave_bias;
+ tuners->io_is_busy = should_io_be_busy();
+
+ dbs_data->tuners = tuners;
+ mutex_init(&dbs_data->mutex);
+ return 0;
+}
+
+static void od_exit(struct dbs_data *dbs_data)
+{
+ kfree(dbs_data->tuners);
+}
+
+define_get_cpu_dbs_routines(od_cpu_dbs_info);
+
+static struct od_ops od_ops = {
+ .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
+ .powersave_bias_target = generic_powersave_bias_target,
+ .freq_increase = dbs_freq_increase,
+};
+
+static struct common_dbs_data od_dbs_cdata = {
+ .governor = GOV_ONDEMAND,
+ .attr_group_gov_sys = &od_attr_group_gov_sys,
+ .attr_group_gov_pol = &od_attr_group_gov_pol,
+ .get_cpu_cdbs = get_cpu_cdbs,
+ .get_cpu_dbs_info_s = get_cpu_dbs_info_s,
+ .gov_dbs_timer = od_dbs_timer,
+ .gov_check_cpu = od_check_cpu,
+ .gov_ops = &od_ops,
+ .init = od_init,
+ .exit = od_exit,
+};
+
+static void od_set_powersave_bias(unsigned int powersave_bias)
+{
+ struct cpufreq_policy *policy;
+ struct dbs_data *dbs_data;
+ struct od_dbs_tuners *od_tuners;
+ unsigned int cpu;
+ cpumask_t done;
+
+ default_powersave_bias = powersave_bias;
+ cpumask_clear(&done);
+
+ get_online_cpus();
+ for_each_online_cpu(cpu) {
+ if (cpumask_test_cpu(cpu, &done))
+ continue;
+
+ policy = per_cpu(od_cpu_dbs_info, cpu).cdbs.cur_policy;
+ if (!policy)
+ continue;
+
+ cpumask_or(&done, &done, policy->cpus);
+
+ if (policy->governor != &cpufreq_gov_ondemand)
+ continue;
+
+ dbs_data = policy->governor_data;
+ od_tuners = dbs_data->tuners;
+ od_tuners->powersave_bias = default_powersave_bias;
+ }
+ put_online_cpus();
+}
+
+void od_register_powersave_bias_handler(unsigned int (*f)
+ (struct cpufreq_policy *, unsigned int, unsigned int),
+ unsigned int powersave_bias)
+{
+ od_ops.powersave_bias_target = f;
+ od_set_powersave_bias(powersave_bias);
+}
+EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
+
+void od_unregister_powersave_bias_handler(void)
+{
+ od_ops.powersave_bias_target = generic_powersave_bias_target;
+ od_set_powersave_bias(0);
+}
+EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
+
+static int od_cpufreq_governor_dbs(struct cpufreq_policy *policy,
+ unsigned int event)
+{
+ return cpufreq_governor_dbs(policy, &od_dbs_cdata, event);
+}
+
+#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
+static
+#endif
+struct cpufreq_governor cpufreq_gov_ondemand = {
+ .name = "ondemand",
+ .governor = od_cpufreq_governor_dbs,
+ .max_transition_latency = TRANSITION_LATENCY_LIMIT,
+ .owner = THIS_MODULE,
+};
+
+static int __init cpufreq_gov_dbs_init(void)
+{
+ return cpufreq_register_governor(&cpufreq_gov_ondemand);
+}
+
+static void __exit cpufreq_gov_dbs_exit(void)
+{
+ cpufreq_unregister_governor(&cpufreq_gov_ondemand);
+}
+
+MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
+MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
+MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
+ "Low Latency Frequency Transition capable processors");
+MODULE_LICENSE("GPL");
+
+#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
+fs_initcall(cpufreq_gov_dbs_init);
+#else
+module_init(cpufreq_gov_dbs_init);
+#endif
+module_exit(cpufreq_gov_dbs_exit);
diff --git a/kernel/drivers/cpufreq/cpufreq_opp.c b/kernel/drivers/cpufreq/cpufreq_opp.c
new file mode 100644
index 000000000..773bcde89
--- /dev/null
+++ b/kernel/drivers/cpufreq/cpufreq_opp.c
@@ -0,0 +1,110 @@
+/*
+ * Generic OPP helper interface for CPUFreq drivers
+ *
+ * Copyright (C) 2009-2014 Texas Instruments Incorporated.
+ * Nishanth Menon
+ * Romit Dasgupta
+ * Kevin Hilman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/cpufreq.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/pm_opp.h>
+#include <linux/rcupdate.h>
+#include <linux/slab.h>
+
+/**
+ * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device
+ * @dev: device for which we do this operation
+ * @table: Cpufreq table returned back to caller
+ *
+ * Generate a cpufreq table for a provided device- this assumes that the
+ * opp list is already initialized and ready for usage.
+ *
+ * This function allocates required memory for the cpufreq table. It is
+ * expected that the caller does the required maintenance such as freeing
+ * the table as required.
+ *
+ * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM
+ * if no memory available for the operation (table is not populated), returns 0
+ * if successful and table is populated.
+ *
+ * WARNING: It is important for the callers to ensure refreshing their copy of
+ * the table if any of the mentioned functions have been invoked in the interim.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * Since we just use the regular accessor functions to access the internal data
+ * structures, we use RCU read lock inside this function. As a result, users of
+ * this function DONOT need to use explicit locks for invoking.
+ */
+int dev_pm_opp_init_cpufreq_table(struct device *dev,
+ struct cpufreq_frequency_table **table)
+{
+ struct dev_pm_opp *opp;
+ struct cpufreq_frequency_table *freq_table = NULL;
+ int i, max_opps, ret = 0;
+ unsigned long rate;
+
+ rcu_read_lock();
+
+ max_opps = dev_pm_opp_get_opp_count(dev);
+ if (max_opps <= 0) {
+ ret = max_opps ? max_opps : -ENODATA;
+ goto out;
+ }
+
+ freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_ATOMIC);
+ if (!freq_table) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0, rate = 0; i < max_opps; i++, rate++) {
+ /* find next rate */
+ opp = dev_pm_opp_find_freq_ceil(dev, &rate);
+ if (IS_ERR(opp)) {
+ ret = PTR_ERR(opp);
+ goto out;
+ }
+ freq_table[i].driver_data = i;
+ freq_table[i].frequency = rate / 1000;
+ }
+
+ freq_table[i].driver_data = i;
+ freq_table[i].frequency = CPUFREQ_TABLE_END;
+
+ *table = &freq_table[0];
+
+out:
+ rcu_read_unlock();
+ if (ret)
+ kfree(freq_table);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table);
+
+/**
+ * dev_pm_opp_free_cpufreq_table() - free the cpufreq table
+ * @dev: device for which we do this operation
+ * @table: table to free
+ *
+ * Free up the table allocated by dev_pm_opp_init_cpufreq_table
+ */
+void dev_pm_opp_free_cpufreq_table(struct device *dev,
+ struct cpufreq_frequency_table **table)
+{
+ if (!table)
+ return;
+
+ kfree(*table);
+ *table = NULL;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
diff --git a/kernel/drivers/cpufreq/cpufreq_performance.c b/kernel/drivers/cpufreq/cpufreq_performance.c
new file mode 100644
index 000000000..cf117deb3
--- /dev/null
+++ b/kernel/drivers/cpufreq/cpufreq_performance.c
@@ -0,0 +1,60 @@
+/*
+ * linux/drivers/cpufreq/cpufreq_performance.c
+ *
+ * Copyright (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cpufreq.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+static int cpufreq_governor_performance(struct cpufreq_policy *policy,
+ unsigned int event)
+{
+ switch (event) {
+ case CPUFREQ_GOV_START:
+ case CPUFREQ_GOV_LIMITS:
+ pr_debug("setting to %u kHz because of event %u\n",
+ policy->max, event);
+ __cpufreq_driver_target(policy, policy->max,
+ CPUFREQ_RELATION_H);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE_MODULE
+static
+#endif
+struct cpufreq_governor cpufreq_gov_performance = {
+ .name = "performance",
+ .governor = cpufreq_governor_performance,
+ .owner = THIS_MODULE,
+};
+
+static int __init cpufreq_gov_performance_init(void)
+{
+ return cpufreq_register_governor(&cpufreq_gov_performance);
+}
+
+static void __exit cpufreq_gov_performance_exit(void)
+{
+ cpufreq_unregister_governor(&cpufreq_gov_performance);
+}
+
+MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
+MODULE_DESCRIPTION("CPUfreq policy governor 'performance'");
+MODULE_LICENSE("GPL");
+
+fs_initcall(cpufreq_gov_performance_init);
+module_exit(cpufreq_gov_performance_exit);
diff --git a/kernel/drivers/cpufreq/cpufreq_powersave.c b/kernel/drivers/cpufreq/cpufreq_powersave.c
new file mode 100644
index 000000000..e3b874c23
--- /dev/null
+++ b/kernel/drivers/cpufreq/cpufreq_powersave.c
@@ -0,0 +1,64 @@
+/*
+ * linux/drivers/cpufreq/cpufreq_powersave.c
+ *
+ * Copyright (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cpufreq.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+static int cpufreq_governor_powersave(struct cpufreq_policy *policy,
+ unsigned int event)
+{
+ switch (event) {
+ case CPUFREQ_GOV_START:
+ case CPUFREQ_GOV_LIMITS:
+ pr_debug("setting to %u kHz because of event %u\n",
+ policy->min, event);
+ __cpufreq_driver_target(policy, policy->min,
+ CPUFREQ_RELATION_L);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE
+static
+#endif
+struct cpufreq_governor cpufreq_gov_powersave = {
+ .name = "powersave",
+ .governor = cpufreq_governor_powersave,
+ .owner = THIS_MODULE,
+};
+
+static int __init cpufreq_gov_powersave_init(void)
+{
+ return cpufreq_register_governor(&cpufreq_gov_powersave);
+}
+
+static void __exit cpufreq_gov_powersave_exit(void)
+{
+ cpufreq_unregister_governor(&cpufreq_gov_powersave);
+}
+
+MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
+MODULE_DESCRIPTION("CPUfreq policy governor 'powersave'");
+MODULE_LICENSE("GPL");
+
+#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE
+fs_initcall(cpufreq_gov_powersave_init);
+#else
+module_init(cpufreq_gov_powersave_init);
+#endif
+module_exit(cpufreq_gov_powersave_exit);
diff --git a/kernel/drivers/cpufreq/cpufreq_stats.c b/kernel/drivers/cpufreq/cpufreq_stats.c
new file mode 100644
index 000000000..5e370a30a
--- /dev/null
+++ b/kernel/drivers/cpufreq/cpufreq_stats.c
@@ -0,0 +1,357 @@
+/*
+ * drivers/cpufreq/cpufreq_stats.c
+ *
+ * Copyright (C) 2003-2004 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
+ * (C) 2004 Zou Nan hai <nanhai.zou@intel.com>.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/cputime.h>
+
+static spinlock_t cpufreq_stats_lock;
+
+struct cpufreq_stats {
+ unsigned int total_trans;
+ unsigned long long last_time;
+ unsigned int max_state;
+ unsigned int state_num;
+ unsigned int last_index;
+ u64 *time_in_state;
+ unsigned int *freq_table;
+#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
+ unsigned int *trans_table;
+#endif
+};
+
+static int cpufreq_stats_update(struct cpufreq_stats *stats)
+{
+ unsigned long long cur_time = get_jiffies_64();
+
+ spin_lock(&cpufreq_stats_lock);
+ stats->time_in_state[stats->last_index] += cur_time - stats->last_time;
+ stats->last_time = cur_time;
+ spin_unlock(&cpufreq_stats_lock);
+ return 0;
+}
+
+static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
+{
+ return sprintf(buf, "%d\n", policy->stats->total_trans);
+}
+
+static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
+{
+ struct cpufreq_stats *stats = policy->stats;
+ ssize_t len = 0;
+ int i;
+
+ cpufreq_stats_update(stats);
+ for (i = 0; i < stats->state_num; i++) {
+ len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i],
+ (unsigned long long)
+ jiffies_64_to_clock_t(stats->time_in_state[i]));
+ }
+ return len;
+}
+
+#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
+static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
+{
+ struct cpufreq_stats *stats = policy->stats;
+ ssize_t len = 0;
+ int i, j;
+
+ len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n");
+ len += snprintf(buf + len, PAGE_SIZE - len, " : ");
+ for (i = 0; i < stats->state_num; i++) {
+ if (len >= PAGE_SIZE)
+ break;
+ len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
+ stats->freq_table[i]);
+ }
+ if (len >= PAGE_SIZE)
+ return PAGE_SIZE;
+
+ len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+
+ for (i = 0; i < stats->state_num; i++) {
+ if (len >= PAGE_SIZE)
+ break;
+
+ len += snprintf(buf + len, PAGE_SIZE - len, "%9u: ",
+ stats->freq_table[i]);
+
+ for (j = 0; j < stats->state_num; j++) {
+ if (len >= PAGE_SIZE)
+ break;
+ len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
+ stats->trans_table[i*stats->max_state+j]);
+ }
+ if (len >= PAGE_SIZE)
+ break;
+ len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+ }
+ if (len >= PAGE_SIZE)
+ return PAGE_SIZE;
+ return len;
+}
+cpufreq_freq_attr_ro(trans_table);
+#endif
+
+cpufreq_freq_attr_ro(total_trans);
+cpufreq_freq_attr_ro(time_in_state);
+
+static struct attribute *default_attrs[] = {
+ &total_trans.attr,
+ &time_in_state.attr,
+#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
+ &trans_table.attr,
+#endif
+ NULL
+};
+static struct attribute_group stats_attr_group = {
+ .attrs = default_attrs,
+ .name = "stats"
+};
+
+static int freq_table_get_index(struct cpufreq_stats *stats, unsigned int freq)
+{
+ int index;
+ for (index = 0; index < stats->max_state; index++)
+ if (stats->freq_table[index] == freq)
+ return index;
+ return -1;
+}
+
+static void __cpufreq_stats_free_table(struct cpufreq_policy *policy)
+{
+ struct cpufreq_stats *stats = policy->stats;
+
+ /* Already freed */
+ if (!stats)
+ return;
+
+ pr_debug("%s: Free stats table\n", __func__);
+
+ sysfs_remove_group(&policy->kobj, &stats_attr_group);
+ kfree(stats->time_in_state);
+ kfree(stats);
+ policy->stats = NULL;
+}
+
+static void cpufreq_stats_free_table(unsigned int cpu)
+{
+ struct cpufreq_policy *policy;
+
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy)
+ return;
+
+ __cpufreq_stats_free_table(policy);
+
+ cpufreq_cpu_put(policy);
+}
+
+static int __cpufreq_stats_create_table(struct cpufreq_policy *policy)
+{
+ unsigned int i = 0, count = 0, ret = -ENOMEM;
+ struct cpufreq_stats *stats;
+ unsigned int alloc_size;
+ unsigned int cpu = policy->cpu;
+ struct cpufreq_frequency_table *pos, *table;
+
+ /* We need cpufreq table for creating stats table */
+ table = cpufreq_frequency_get_table(cpu);
+ if (unlikely(!table))
+ return 0;
+
+ /* stats already initialized */
+ if (policy->stats)
+ return -EEXIST;
+
+ stats = kzalloc(sizeof(*stats), GFP_KERNEL);
+ if (!stats)
+ return -ENOMEM;
+
+ /* Find total allocation size */
+ cpufreq_for_each_valid_entry(pos, table)
+ count++;
+
+ alloc_size = count * sizeof(int) + count * sizeof(u64);
+
+#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
+ alloc_size += count * count * sizeof(int);
+#endif
+
+ /* Allocate memory for time_in_state/freq_table/trans_table in one go */
+ stats->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
+ if (!stats->time_in_state)
+ goto free_stat;
+
+ stats->freq_table = (unsigned int *)(stats->time_in_state + count);
+
+#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
+ stats->trans_table = stats->freq_table + count;
+#endif
+
+ stats->max_state = count;
+
+ /* Find valid-unique entries */
+ cpufreq_for_each_valid_entry(pos, table)
+ if (freq_table_get_index(stats, pos->frequency) == -1)
+ stats->freq_table[i++] = pos->frequency;
+
+ stats->state_num = i;
+ stats->last_time = get_jiffies_64();
+ stats->last_index = freq_table_get_index(stats, policy->cur);
+
+ policy->stats = stats;
+ ret = sysfs_create_group(&policy->kobj, &stats_attr_group);
+ if (!ret)
+ return 0;
+
+ /* We failed, release resources */
+ policy->stats = NULL;
+ kfree(stats->time_in_state);
+free_stat:
+ kfree(stats);
+
+ return ret;
+}
+
+static void cpufreq_stats_create_table(unsigned int cpu)
+{
+ struct cpufreq_policy *policy;
+
+ /*
+ * "likely(!policy)" because normally cpufreq_stats will be registered
+ * before cpufreq driver
+ */
+ policy = cpufreq_cpu_get(cpu);
+ if (likely(!policy))
+ return;
+
+ __cpufreq_stats_create_table(policy);
+
+ cpufreq_cpu_put(policy);
+}
+
+static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ int ret = 0;
+ struct cpufreq_policy *policy = data;
+
+ if (val == CPUFREQ_CREATE_POLICY)
+ ret = __cpufreq_stats_create_table(policy);
+ else if (val == CPUFREQ_REMOVE_POLICY)
+ __cpufreq_stats_free_table(policy);
+
+ return ret;
+}
+
+static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct cpufreq_freqs *freq = data;
+ struct cpufreq_policy *policy = cpufreq_cpu_get(freq->cpu);
+ struct cpufreq_stats *stats;
+ int old_index, new_index;
+
+ if (!policy) {
+ pr_err("%s: No policy found\n", __func__);
+ return 0;
+ }
+
+ if (val != CPUFREQ_POSTCHANGE)
+ goto put_policy;
+
+ if (!policy->stats) {
+ pr_debug("%s: No stats found\n", __func__);
+ goto put_policy;
+ }
+
+ stats = policy->stats;
+
+ old_index = stats->last_index;
+ new_index = freq_table_get_index(stats, freq->new);
+
+ /* We can't do stats->time_in_state[-1]= .. */
+ if (old_index == -1 || new_index == -1)
+ goto put_policy;
+
+ if (old_index == new_index)
+ goto put_policy;
+
+ cpufreq_stats_update(stats);
+
+ stats->last_index = new_index;
+#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
+ stats->trans_table[old_index * stats->max_state + new_index]++;
+#endif
+ stats->total_trans++;
+
+put_policy:
+ cpufreq_cpu_put(policy);
+ return 0;
+}
+
+static struct notifier_block notifier_policy_block = {
+ .notifier_call = cpufreq_stat_notifier_policy
+};
+
+static struct notifier_block notifier_trans_block = {
+ .notifier_call = cpufreq_stat_notifier_trans
+};
+
+static int __init cpufreq_stats_init(void)
+{
+ int ret;
+ unsigned int cpu;
+
+ spin_lock_init(&cpufreq_stats_lock);
+ ret = cpufreq_register_notifier(&notifier_policy_block,
+ CPUFREQ_POLICY_NOTIFIER);
+ if (ret)
+ return ret;
+
+ for_each_online_cpu(cpu)
+ cpufreq_stats_create_table(cpu);
+
+ ret = cpufreq_register_notifier(&notifier_trans_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ if (ret) {
+ cpufreq_unregister_notifier(&notifier_policy_block,
+ CPUFREQ_POLICY_NOTIFIER);
+ for_each_online_cpu(cpu)
+ cpufreq_stats_free_table(cpu);
+ return ret;
+ }
+
+ return 0;
+}
+static void __exit cpufreq_stats_exit(void)
+{
+ unsigned int cpu;
+
+ cpufreq_unregister_notifier(&notifier_policy_block,
+ CPUFREQ_POLICY_NOTIFIER);
+ cpufreq_unregister_notifier(&notifier_trans_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ for_each_online_cpu(cpu)
+ cpufreq_stats_free_table(cpu);
+}
+
+MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>");
+MODULE_DESCRIPTION("Export cpufreq stats via sysfs");
+MODULE_LICENSE("GPL");
+
+module_init(cpufreq_stats_init);
+module_exit(cpufreq_stats_exit);
diff --git a/kernel/drivers/cpufreq/cpufreq_userspace.c b/kernel/drivers/cpufreq/cpufreq_userspace.c
new file mode 100644
index 000000000..4dbf1db16
--- /dev/null
+++ b/kernel/drivers/cpufreq/cpufreq_userspace.c
@@ -0,0 +1,123 @@
+
+/*
+ * linux/drivers/cpufreq/cpufreq_userspace.c
+ *
+ * Copyright (C) 2001 Russell King
+ * (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cpufreq.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+
+static DEFINE_PER_CPU(unsigned int, cpu_is_managed);
+static DEFINE_MUTEX(userspace_mutex);
+
+/**
+ * cpufreq_set - set the CPU frequency
+ * @policy: pointer to policy struct where freq is being set
+ * @freq: target frequency in kHz
+ *
+ * Sets the CPU frequency to freq.
+ */
+static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
+{
+ int ret = -EINVAL;
+
+ pr_debug("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
+
+ mutex_lock(&userspace_mutex);
+ if (!per_cpu(cpu_is_managed, policy->cpu))
+ goto err;
+
+ ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L);
+ err:
+ mutex_unlock(&userspace_mutex);
+ return ret;
+}
+
+static ssize_t show_speed(struct cpufreq_policy *policy, char *buf)
+{
+ return sprintf(buf, "%u\n", policy->cur);
+}
+
+static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
+ unsigned int event)
+{
+ unsigned int cpu = policy->cpu;
+ int rc = 0;
+
+ switch (event) {
+ case CPUFREQ_GOV_START:
+ BUG_ON(!policy->cur);
+ pr_debug("started managing cpu %u\n", cpu);
+
+ mutex_lock(&userspace_mutex);
+ per_cpu(cpu_is_managed, cpu) = 1;
+ mutex_unlock(&userspace_mutex);
+ break;
+ case CPUFREQ_GOV_STOP:
+ pr_debug("managing cpu %u stopped\n", cpu);
+
+ mutex_lock(&userspace_mutex);
+ per_cpu(cpu_is_managed, cpu) = 0;
+ mutex_unlock(&userspace_mutex);
+ break;
+ case CPUFREQ_GOV_LIMITS:
+ mutex_lock(&userspace_mutex);
+ pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz\n",
+ cpu, policy->min, policy->max,
+ policy->cur);
+
+ if (policy->max < policy->cur)
+ __cpufreq_driver_target(policy, policy->max,
+ CPUFREQ_RELATION_H);
+ else if (policy->min > policy->cur)
+ __cpufreq_driver_target(policy, policy->min,
+ CPUFREQ_RELATION_L);
+ mutex_unlock(&userspace_mutex);
+ break;
+ }
+ return rc;
+}
+
+#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE
+static
+#endif
+struct cpufreq_governor cpufreq_gov_userspace = {
+ .name = "userspace",
+ .governor = cpufreq_governor_userspace,
+ .store_setspeed = cpufreq_set,
+ .show_setspeed = show_speed,
+ .owner = THIS_MODULE,
+};
+
+static int __init cpufreq_gov_userspace_init(void)
+{
+ return cpufreq_register_governor(&cpufreq_gov_userspace);
+}
+
+static void __exit cpufreq_gov_userspace_exit(void)
+{
+ cpufreq_unregister_governor(&cpufreq_gov_userspace);
+}
+
+MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>, "
+ "Russell King <rmk@arm.linux.org.uk>");
+MODULE_DESCRIPTION("CPUfreq policy governor 'userspace'");
+MODULE_LICENSE("GPL");
+
+#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE
+fs_initcall(cpufreq_gov_userspace_init);
+#else
+module_init(cpufreq_gov_userspace_init);
+#endif
+module_exit(cpufreq_gov_userspace_exit);
diff --git a/kernel/drivers/cpufreq/cris-artpec3-cpufreq.c b/kernel/drivers/cpufreq/cris-artpec3-cpufreq.c
new file mode 100644
index 000000000..601b88c49
--- /dev/null
+++ b/kernel/drivers/cpufreq/cris-artpec3-cpufreq.c
@@ -0,0 +1,92 @@
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cpufreq.h>
+#include <hwregs/reg_map.h>
+#include <hwregs/reg_rdwr.h>
+#include <hwregs/clkgen_defs.h>
+#include <hwregs/ddr2_defs.h>
+
+static int
+cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val,
+ void *data);
+
+static struct notifier_block cris_sdram_freq_notifier_block = {
+ .notifier_call = cris_sdram_freq_notifier
+};
+
+static struct cpufreq_frequency_table cris_freq_table[] = {
+ {0, 0x01, 6000},
+ {0, 0x02, 200000},
+ {0, 0, CPUFREQ_TABLE_END},
+};
+
+static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu)
+{
+ reg_clkgen_rw_clk_ctrl clk_ctrl;
+ clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl);
+ return clk_ctrl.pll ? 200000 : 6000;
+}
+
+static int cris_freq_target(struct cpufreq_policy *policy, unsigned int state)
+{
+ reg_clkgen_rw_clk_ctrl clk_ctrl;
+ clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl);
+
+ local_irq_disable();
+
+ /* Even though we may be SMP they will share the same clock
+ * so all settings are made on CPU0. */
+ if (cris_freq_table[state].frequency == 200000)
+ clk_ctrl.pll = 1;
+ else
+ clk_ctrl.pll = 0;
+ REG_WR(clkgen, regi_clkgen, rw_clk_ctrl, clk_ctrl);
+
+ local_irq_enable();
+
+ return 0;
+}
+
+static int cris_freq_cpu_init(struct cpufreq_policy *policy)
+{
+ return cpufreq_generic_init(policy, cris_freq_table, 1000000);
+}
+
+static struct cpufreq_driver cris_freq_driver = {
+ .get = cris_freq_get_cpu_frequency,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = cris_freq_target,
+ .init = cris_freq_cpu_init,
+ .name = "cris_freq",
+ .attr = cpufreq_generic_attr,
+};
+
+static int __init cris_freq_init(void)
+{
+ int ret;
+ ret = cpufreq_register_driver(&cris_freq_driver);
+ cpufreq_register_notifier(&cris_sdram_freq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ return ret;
+}
+
+static int
+cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ int i;
+ struct cpufreq_freqs *freqs = data;
+ if (val == CPUFREQ_PRECHANGE) {
+ reg_ddr2_rw_cfg cfg =
+ REG_RD(ddr2, regi_ddr2_ctrl, rw_cfg);
+ cfg.ref_interval = (freqs->new == 200000 ? 1560 : 46);
+
+ if (freqs->new == 200000)
+ for (i = 0; i < 50000; i++);
+ REG_WR(bif_core, regi_bif_core, rw_sdram_timing, timing);
+ }
+ return 0;
+}
+
+
+module_init(cris_freq_init);
diff --git a/kernel/drivers/cpufreq/cris-etraxfs-cpufreq.c b/kernel/drivers/cpufreq/cris-etraxfs-cpufreq.c
new file mode 100644
index 000000000..22b2cdde7
--- /dev/null
+++ b/kernel/drivers/cpufreq/cris-etraxfs-cpufreq.c
@@ -0,0 +1,91 @@
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cpufreq.h>
+#include <hwregs/reg_map.h>
+#include <arch/hwregs/reg_rdwr.h>
+#include <arch/hwregs/config_defs.h>
+#include <arch/hwregs/bif_core_defs.h>
+
+static int
+cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val,
+ void *data);
+
+static struct notifier_block cris_sdram_freq_notifier_block = {
+ .notifier_call = cris_sdram_freq_notifier
+};
+
+static struct cpufreq_frequency_table cris_freq_table[] = {
+ {0, 0x01, 6000},
+ {0, 0x02, 200000},
+ {0, 0, CPUFREQ_TABLE_END},
+};
+
+static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu)
+{
+ reg_config_rw_clk_ctrl clk_ctrl;
+ clk_ctrl = REG_RD(config, regi_config, rw_clk_ctrl);
+ return clk_ctrl.pll ? 200000 : 6000;
+}
+
+static int cris_freq_target(struct cpufreq_policy *policy, unsigned int state)
+{
+ reg_config_rw_clk_ctrl clk_ctrl;
+ clk_ctrl = REG_RD(config, regi_config, rw_clk_ctrl);
+
+ local_irq_disable();
+
+ /* Even though we may be SMP they will share the same clock
+ * so all settings are made on CPU0. */
+ if (cris_freq_table[state].frequency == 200000)
+ clk_ctrl.pll = 1;
+ else
+ clk_ctrl.pll = 0;
+ REG_WR(config, regi_config, rw_clk_ctrl, clk_ctrl);
+
+ local_irq_enable();
+
+ return 0;
+}
+
+static int cris_freq_cpu_init(struct cpufreq_policy *policy)
+{
+ return cpufreq_generic_init(policy, cris_freq_table, 1000000);
+}
+
+static struct cpufreq_driver cris_freq_driver = {
+ .get = cris_freq_get_cpu_frequency,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = cris_freq_target,
+ .init = cris_freq_cpu_init,
+ .name = "cris_freq",
+ .attr = cpufreq_generic_attr,
+};
+
+static int __init cris_freq_init(void)
+{
+ int ret;
+ ret = cpufreq_register_driver(&cris_freq_driver);
+ cpufreq_register_notifier(&cris_sdram_freq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ return ret;
+}
+
+static int
+cris_sdram_freq_notifier(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ int i;
+ struct cpufreq_freqs *freqs = data;
+ if (val == CPUFREQ_PRECHANGE) {
+ reg_bif_core_rw_sdram_timing timing =
+ REG_RD(bif_core, regi_bif_core, rw_sdram_timing);
+ timing.cpd = (freqs->new == 200000 ? 0 : 1);
+
+ if (freqs->new == 200000)
+ for (i = 0; i < 50000; i++) ;
+ REG_WR(bif_core, regi_bif_core, rw_sdram_timing, timing);
+ }
+ return 0;
+}
+
+module_init(cris_freq_init);
diff --git a/kernel/drivers/cpufreq/davinci-cpufreq.c b/kernel/drivers/cpufreq/davinci-cpufreq.c
new file mode 100644
index 000000000..7e336d20c
--- /dev/null
+++ b/kernel/drivers/cpufreq/davinci-cpufreq.c
@@ -0,0 +1,181 @@
+/*
+ * CPU frequency scaling for DaVinci
+ *
+ * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Based on linux/arch/arm/plat-omap/cpu-omap.c. Original Copyright follows:
+ *
+ * Copyright (C) 2005 Nokia Corporation
+ * Written by Tony Lindgren <tony@atomide.com>
+ *
+ * Based on cpu-sa1110.c, Copyright (C) 2001 Russell King
+ *
+ * Copyright (C) 2007-2008 Texas Instruments, Inc.
+ * Updated to support OMAP3
+ * Rajendra Nayak <rnayak@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/types.h>
+#include <linux/cpufreq.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/export.h>
+
+#include <mach/hardware.h>
+#include <mach/cpufreq.h>
+#include <mach/common.h>
+
+struct davinci_cpufreq {
+ struct device *dev;
+ struct clk *armclk;
+ struct clk *asyncclk;
+ unsigned long asyncrate;
+};
+static struct davinci_cpufreq cpufreq;
+
+static int davinci_verify_speed(struct cpufreq_policy *policy)
+{
+ struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
+ struct cpufreq_frequency_table *freq_table = pdata->freq_table;
+ struct clk *armclk = cpufreq.armclk;
+
+ if (freq_table)
+ return cpufreq_frequency_table_verify(policy, freq_table);
+
+ if (policy->cpu)
+ return -EINVAL;
+
+ cpufreq_verify_within_cpu_limits(policy);
+ policy->min = clk_round_rate(armclk, policy->min * 1000) / 1000;
+ policy->max = clk_round_rate(armclk, policy->max * 1000) / 1000;
+ cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
+ policy->cpuinfo.max_freq);
+ return 0;
+}
+
+static int davinci_target(struct cpufreq_policy *policy, unsigned int idx)
+{
+ struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
+ struct clk *armclk = cpufreq.armclk;
+ unsigned int old_freq, new_freq;
+ int ret = 0;
+
+ old_freq = policy->cur;
+ new_freq = pdata->freq_table[idx].frequency;
+
+ /* if moving to higher frequency, up the voltage beforehand */
+ if (pdata->set_voltage && new_freq > old_freq) {
+ ret = pdata->set_voltage(idx);
+ if (ret)
+ return ret;
+ }
+
+ ret = clk_set_rate(armclk, idx);
+ if (ret)
+ return ret;
+
+ if (cpufreq.asyncclk) {
+ ret = clk_set_rate(cpufreq.asyncclk, cpufreq.asyncrate);
+ if (ret)
+ return ret;
+ }
+
+ /* if moving to lower freq, lower the voltage after lowering freq */
+ if (pdata->set_voltage && new_freq < old_freq)
+ pdata->set_voltage(idx);
+
+ return 0;
+}
+
+static int davinci_cpu_init(struct cpufreq_policy *policy)
+{
+ int result = 0;
+ struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
+ struct cpufreq_frequency_table *freq_table = pdata->freq_table;
+
+ if (policy->cpu != 0)
+ return -EINVAL;
+
+ /* Finish platform specific initialization */
+ if (pdata->init) {
+ result = pdata->init();
+ if (result)
+ return result;
+ }
+
+ policy->clk = cpufreq.armclk;
+
+ /*
+ * Time measurement across the target() function yields ~1500-1800us
+ * time taken with no drivers on notification list.
+ * Setting the latency to 2000 us to accommodate addition of drivers
+ * to pre/post change notification list.
+ */
+ return cpufreq_generic_init(policy, freq_table, 2000 * 1000);
+}
+
+static struct cpufreq_driver davinci_driver = {
+ .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = davinci_verify_speed,
+ .target_index = davinci_target,
+ .get = cpufreq_generic_get,
+ .init = davinci_cpu_init,
+ .name = "davinci",
+ .attr = cpufreq_generic_attr,
+};
+
+static int __init davinci_cpufreq_probe(struct platform_device *pdev)
+{
+ struct davinci_cpufreq_config *pdata = pdev->dev.platform_data;
+ struct clk *asyncclk;
+
+ if (!pdata)
+ return -EINVAL;
+ if (!pdata->freq_table)
+ return -EINVAL;
+
+ cpufreq.dev = &pdev->dev;
+
+ cpufreq.armclk = clk_get(NULL, "arm");
+ if (IS_ERR(cpufreq.armclk)) {
+ dev_err(cpufreq.dev, "Unable to get ARM clock\n");
+ return PTR_ERR(cpufreq.armclk);
+ }
+
+ asyncclk = clk_get(cpufreq.dev, "async");
+ if (!IS_ERR(asyncclk)) {
+ cpufreq.asyncclk = asyncclk;
+ cpufreq.asyncrate = clk_get_rate(asyncclk);
+ }
+
+ return cpufreq_register_driver(&davinci_driver);
+}
+
+static int __exit davinci_cpufreq_remove(struct platform_device *pdev)
+{
+ clk_put(cpufreq.armclk);
+
+ if (cpufreq.asyncclk)
+ clk_put(cpufreq.asyncclk);
+
+ return cpufreq_unregister_driver(&davinci_driver);
+}
+
+static struct platform_driver davinci_cpufreq_driver = {
+ .driver = {
+ .name = "cpufreq-davinci",
+ },
+ .remove = __exit_p(davinci_cpufreq_remove),
+};
+
+int __init davinci_cpufreq_init(void)
+{
+ return platform_driver_probe(&davinci_cpufreq_driver,
+ davinci_cpufreq_probe);
+}
+
diff --git a/kernel/drivers/cpufreq/dbx500-cpufreq.c b/kernel/drivers/cpufreq/dbx500-cpufreq.c
new file mode 100644
index 000000000..5c3ec1dd4
--- /dev/null
+++ b/kernel/drivers/cpufreq/dbx500-cpufreq.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) STMicroelectronics 2009
+ * Copyright (C) ST-Ericsson SA 2010-2012
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Sundar Iyer <sundar.iyer@stericsson.com>
+ * Author: Martin Persson <martin.persson@stericsson.com>
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+
+static struct cpufreq_frequency_table *freq_table;
+static struct clk *armss_clk;
+
+static int dbx500_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ /* update armss clk frequency */
+ return clk_set_rate(armss_clk, freq_table[index].frequency * 1000);
+}
+
+static int dbx500_cpufreq_init(struct cpufreq_policy *policy)
+{
+ policy->clk = armss_clk;
+ return cpufreq_generic_init(policy, freq_table, 20 * 1000);
+}
+
+static struct cpufreq_driver dbx500_cpufreq_driver = {
+ .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS |
+ CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = dbx500_cpufreq_target,
+ .get = cpufreq_generic_get,
+ .init = dbx500_cpufreq_init,
+ .name = "DBX500",
+ .attr = cpufreq_generic_attr,
+};
+
+static int dbx500_cpufreq_probe(struct platform_device *pdev)
+{
+ struct cpufreq_frequency_table *pos;
+
+ freq_table = dev_get_platdata(&pdev->dev);
+ if (!freq_table) {
+ pr_err("dbx500-cpufreq: Failed to fetch cpufreq table\n");
+ return -ENODEV;
+ }
+
+ armss_clk = clk_get(&pdev->dev, "armss");
+ if (IS_ERR(armss_clk)) {
+ pr_err("dbx500-cpufreq: Failed to get armss clk\n");
+ return PTR_ERR(armss_clk);
+ }
+
+ pr_info("dbx500-cpufreq: Available frequencies:\n");
+ cpufreq_for_each_entry(pos, freq_table)
+ pr_info(" %d Mhz\n", pos->frequency / 1000);
+
+ return cpufreq_register_driver(&dbx500_cpufreq_driver);
+}
+
+static struct platform_driver dbx500_cpufreq_plat_driver = {
+ .driver = {
+ .name = "cpufreq-ux500",
+ },
+ .probe = dbx500_cpufreq_probe,
+};
+
+static int __init dbx500_cpufreq_register(void)
+{
+ return platform_driver_register(&dbx500_cpufreq_plat_driver);
+}
+device_initcall(dbx500_cpufreq_register);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("cpufreq driver for DBX500");
diff --git a/kernel/drivers/cpufreq/e_powersaver.c b/kernel/drivers/cpufreq/e_powersaver.c
new file mode 100644
index 000000000..a0d2a423c
--- /dev/null
+++ b/kernel/drivers/cpufreq/e_powersaver.c
@@ -0,0 +1,441 @@
+/*
+ * Based on documentation provided by Dave Jones. Thanks!
+ *
+ * Licensed under the terms of the GNU GPL License version 2.
+ *
+ * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/timex.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+
+#include <asm/cpu_device_id.h>
+#include <asm/msr.h>
+#include <asm/tsc.h>
+
+#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+#include <linux/acpi.h>
+#include <acpi/processor.h>
+#endif
+
+#define EPS_BRAND_C7M 0
+#define EPS_BRAND_C7 1
+#define EPS_BRAND_EDEN 2
+#define EPS_BRAND_C3 3
+#define EPS_BRAND_C7D 4
+
+struct eps_cpu_data {
+ u32 fsb;
+#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+ u32 bios_limit;
+#endif
+ struct cpufreq_frequency_table freq_table[];
+};
+
+static struct eps_cpu_data *eps_cpu[NR_CPUS];
+
+/* Module parameters */
+static int freq_failsafe_off;
+static int voltage_failsafe_off;
+static int set_max_voltage;
+
+#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+static int ignore_acpi_limit;
+
+static struct acpi_processor_performance *eps_acpi_cpu_perf;
+
+/* Minimum necessary to get acpi_processor_get_bios_limit() working */
+static int eps_acpi_init(void)
+{
+ eps_acpi_cpu_perf = kzalloc(sizeof(*eps_acpi_cpu_perf),
+ GFP_KERNEL);
+ if (!eps_acpi_cpu_perf)
+ return -ENOMEM;
+
+ if (!zalloc_cpumask_var(&eps_acpi_cpu_perf->shared_cpu_map,
+ GFP_KERNEL)) {
+ kfree(eps_acpi_cpu_perf);
+ eps_acpi_cpu_perf = NULL;
+ return -ENOMEM;
+ }
+
+ if (acpi_processor_register_performance(eps_acpi_cpu_perf, 0)) {
+ free_cpumask_var(eps_acpi_cpu_perf->shared_cpu_map);
+ kfree(eps_acpi_cpu_perf);
+ eps_acpi_cpu_perf = NULL;
+ return -EIO;
+ }
+ return 0;
+}
+
+static int eps_acpi_exit(struct cpufreq_policy *policy)
+{
+ if (eps_acpi_cpu_perf) {
+ acpi_processor_unregister_performance(eps_acpi_cpu_perf, 0);
+ free_cpumask_var(eps_acpi_cpu_perf->shared_cpu_map);
+ kfree(eps_acpi_cpu_perf);
+ eps_acpi_cpu_perf = NULL;
+ }
+ return 0;
+}
+#endif
+
+static unsigned int eps_get(unsigned int cpu)
+{
+ struct eps_cpu_data *centaur;
+ u32 lo, hi;
+
+ if (cpu)
+ return 0;
+ centaur = eps_cpu[cpu];
+ if (centaur == NULL)
+ return 0;
+
+ /* Return current frequency */
+ rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
+ return centaur->fsb * ((lo >> 8) & 0xff);
+}
+
+static int eps_set_state(struct eps_cpu_data *centaur,
+ struct cpufreq_policy *policy,
+ u32 dest_state)
+{
+ u32 lo, hi;
+ int i;
+
+ /* Wait while CPU is busy */
+ rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
+ i = 0;
+ while (lo & ((1 << 16) | (1 << 17))) {
+ udelay(16);
+ rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
+ i++;
+ if (unlikely(i > 64)) {
+ return -ENODEV;
+ }
+ }
+ /* Set new multiplier and voltage */
+ wrmsr(MSR_IA32_PERF_CTL, dest_state & 0xffff, 0);
+ /* Wait until transition end */
+ i = 0;
+ do {
+ udelay(16);
+ rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
+ i++;
+ if (unlikely(i > 64)) {
+ return -ENODEV;
+ }
+ } while (lo & ((1 << 16) | (1 << 17)));
+
+#ifdef DEBUG
+ {
+ u8 current_multiplier, current_voltage;
+
+ /* Print voltage and multiplier */
+ rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
+ current_voltage = lo & 0xff;
+ printk(KERN_INFO "eps: Current voltage = %dmV\n",
+ current_voltage * 16 + 700);
+ current_multiplier = (lo >> 8) & 0xff;
+ printk(KERN_INFO "eps: Current multiplier = %d\n",
+ current_multiplier);
+ }
+#endif
+ return 0;
+}
+
+static int eps_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ struct eps_cpu_data *centaur;
+ unsigned int cpu = policy->cpu;
+ unsigned int dest_state;
+ int ret;
+
+ if (unlikely(eps_cpu[cpu] == NULL))
+ return -ENODEV;
+ centaur = eps_cpu[cpu];
+
+ /* Make frequency transition */
+ dest_state = centaur->freq_table[index].driver_data & 0xffff;
+ ret = eps_set_state(centaur, policy, dest_state);
+ if (ret)
+ printk(KERN_ERR "eps: Timeout!\n");
+ return ret;
+}
+
+static int eps_cpu_init(struct cpufreq_policy *policy)
+{
+ unsigned int i;
+ u32 lo, hi;
+ u64 val;
+ u8 current_multiplier, current_voltage;
+ u8 max_multiplier, max_voltage;
+ u8 min_multiplier, min_voltage;
+ u8 brand = 0;
+ u32 fsb;
+ struct eps_cpu_data *centaur;
+ struct cpuinfo_x86 *c = &cpu_data(0);
+ struct cpufreq_frequency_table *f_table;
+ int k, step, voltage;
+ int ret;
+ int states;
+#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+ unsigned int limit;
+#endif
+
+ if (policy->cpu != 0)
+ return -ENODEV;
+
+ /* Check brand */
+ printk(KERN_INFO "eps: Detected VIA ");
+
+ switch (c->x86_model) {
+ case 10:
+ rdmsr(0x1153, lo, hi);
+ brand = (((lo >> 2) ^ lo) >> 18) & 3;
+ printk(KERN_CONT "Model A ");
+ break;
+ case 13:
+ rdmsr(0x1154, lo, hi);
+ brand = (((lo >> 4) ^ (lo >> 2))) & 0x000000ff;
+ printk(KERN_CONT "Model D ");
+ break;
+ }
+
+ switch (brand) {
+ case EPS_BRAND_C7M:
+ printk(KERN_CONT "C7-M\n");
+ break;
+ case EPS_BRAND_C7:
+ printk(KERN_CONT "C7\n");
+ break;
+ case EPS_BRAND_EDEN:
+ printk(KERN_CONT "Eden\n");
+ break;
+ case EPS_BRAND_C7D:
+ printk(KERN_CONT "C7-D\n");
+ break;
+ case EPS_BRAND_C3:
+ printk(KERN_CONT "C3\n");
+ return -ENODEV;
+ break;
+ }
+ /* Enable Enhanced PowerSaver */
+ rdmsrl(MSR_IA32_MISC_ENABLE, val);
+ if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
+ val |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP;
+ wrmsrl(MSR_IA32_MISC_ENABLE, val);
+ /* Can be locked at 0 */
+ rdmsrl(MSR_IA32_MISC_ENABLE, val);
+ if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
+ printk(KERN_INFO "eps: Can't enable Enhanced PowerSaver\n");
+ return -ENODEV;
+ }
+ }
+
+ /* Print voltage and multiplier */
+ rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
+ current_voltage = lo & 0xff;
+ printk(KERN_INFO "eps: Current voltage = %dmV\n",
+ current_voltage * 16 + 700);
+ current_multiplier = (lo >> 8) & 0xff;
+ printk(KERN_INFO "eps: Current multiplier = %d\n", current_multiplier);
+
+ /* Print limits */
+ max_voltage = hi & 0xff;
+ printk(KERN_INFO "eps: Highest voltage = %dmV\n",
+ max_voltage * 16 + 700);
+ max_multiplier = (hi >> 8) & 0xff;
+ printk(KERN_INFO "eps: Highest multiplier = %d\n", max_multiplier);
+ min_voltage = (hi >> 16) & 0xff;
+ printk(KERN_INFO "eps: Lowest voltage = %dmV\n",
+ min_voltage * 16 + 700);
+ min_multiplier = (hi >> 24) & 0xff;
+ printk(KERN_INFO "eps: Lowest multiplier = %d\n", min_multiplier);
+
+ /* Sanity checks */
+ if (current_multiplier == 0 || max_multiplier == 0
+ || min_multiplier == 0)
+ return -EINVAL;
+ if (current_multiplier > max_multiplier
+ || max_multiplier <= min_multiplier)
+ return -EINVAL;
+ if (current_voltage > 0x1f || max_voltage > 0x1f)
+ return -EINVAL;
+ if (max_voltage < min_voltage
+ || current_voltage < min_voltage
+ || current_voltage > max_voltage)
+ return -EINVAL;
+
+ /* Check for systems using underclocked CPU */
+ if (!freq_failsafe_off && max_multiplier != current_multiplier) {
+ printk(KERN_INFO "eps: Your processor is running at different "
+ "frequency then its maximum. Aborting.\n");
+ printk(KERN_INFO "eps: You can use freq_failsafe_off option "
+ "to disable this check.\n");
+ return -EINVAL;
+ }
+ if (!voltage_failsafe_off && max_voltage != current_voltage) {
+ printk(KERN_INFO "eps: Your processor is running at different "
+ "voltage then its maximum. Aborting.\n");
+ printk(KERN_INFO "eps: You can use voltage_failsafe_off "
+ "option to disable this check.\n");
+ return -EINVAL;
+ }
+
+ /* Calc FSB speed */
+ fsb = cpu_khz / current_multiplier;
+
+#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+ /* Check for ACPI processor speed limit */
+ if (!ignore_acpi_limit && !eps_acpi_init()) {
+ if (!acpi_processor_get_bios_limit(policy->cpu, &limit)) {
+ printk(KERN_INFO "eps: ACPI limit %u.%uGHz\n",
+ limit/1000000,
+ (limit%1000000)/10000);
+ eps_acpi_exit(policy);
+ /* Check if max_multiplier is in BIOS limits */
+ if (limit && max_multiplier * fsb > limit) {
+ printk(KERN_INFO "eps: Aborting.\n");
+ return -EINVAL;
+ }
+ }
+ }
+#endif
+
+ /* Allow user to set lower maximum voltage then that reported
+ * by processor */
+ if (brand == EPS_BRAND_C7M && set_max_voltage) {
+ u32 v;
+
+ /* Change mV to something hardware can use */
+ v = (set_max_voltage - 700) / 16;
+ /* Check if voltage is within limits */
+ if (v >= min_voltage && v <= max_voltage) {
+ printk(KERN_INFO "eps: Setting %dmV as maximum.\n",
+ v * 16 + 700);
+ max_voltage = v;
+ }
+ }
+
+ /* Calc number of p-states supported */
+ if (brand == EPS_BRAND_C7M)
+ states = max_multiplier - min_multiplier + 1;
+ else
+ states = 2;
+
+ /* Allocate private data and frequency table for current cpu */
+ centaur = kzalloc(sizeof(*centaur)
+ + (states + 1) * sizeof(struct cpufreq_frequency_table),
+ GFP_KERNEL);
+ if (!centaur)
+ return -ENOMEM;
+ eps_cpu[0] = centaur;
+
+ /* Copy basic values */
+ centaur->fsb = fsb;
+#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+ centaur->bios_limit = limit;
+#endif
+
+ /* Fill frequency and MSR value table */
+ f_table = &centaur->freq_table[0];
+ if (brand != EPS_BRAND_C7M) {
+ f_table[0].frequency = fsb * min_multiplier;
+ f_table[0].driver_data = (min_multiplier << 8) | min_voltage;
+ f_table[1].frequency = fsb * max_multiplier;
+ f_table[1].driver_data = (max_multiplier << 8) | max_voltage;
+ f_table[2].frequency = CPUFREQ_TABLE_END;
+ } else {
+ k = 0;
+ step = ((max_voltage - min_voltage) * 256)
+ / (max_multiplier - min_multiplier);
+ for (i = min_multiplier; i <= max_multiplier; i++) {
+ voltage = (k * step) / 256 + min_voltage;
+ f_table[k].frequency = fsb * i;
+ f_table[k].driver_data = (i << 8) | voltage;
+ k++;
+ }
+ f_table[k].frequency = CPUFREQ_TABLE_END;
+ }
+
+ policy->cpuinfo.transition_latency = 140000; /* 844mV -> 700mV in ns */
+
+ ret = cpufreq_table_validate_and_show(policy, &centaur->freq_table[0]);
+ if (ret) {
+ kfree(centaur);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int eps_cpu_exit(struct cpufreq_policy *policy)
+{
+ unsigned int cpu = policy->cpu;
+
+ /* Bye */
+ kfree(eps_cpu[cpu]);
+ eps_cpu[cpu] = NULL;
+ return 0;
+}
+
+static struct cpufreq_driver eps_driver = {
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = eps_target,
+ .init = eps_cpu_init,
+ .exit = eps_cpu_exit,
+ .get = eps_get,
+ .name = "e_powersaver",
+ .attr = cpufreq_generic_attr,
+};
+
+
+/* This driver will work only on Centaur C7 processors with
+ * Enhanced SpeedStep/PowerSaver registers */
+static const struct x86_cpu_id eps_cpu_id[] = {
+ { X86_VENDOR_CENTAUR, 6, X86_MODEL_ANY, X86_FEATURE_EST },
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, eps_cpu_id);
+
+static int __init eps_init(void)
+{
+ if (!x86_match_cpu(eps_cpu_id) || boot_cpu_data.x86_model < 10)
+ return -ENODEV;
+ if (cpufreq_register_driver(&eps_driver))
+ return -EINVAL;
+ return 0;
+}
+
+static void __exit eps_exit(void)
+{
+ cpufreq_unregister_driver(&eps_driver);
+}
+
+/* Allow user to overclock his machine or to change frequency to higher after
+ * unloading module */
+module_param(freq_failsafe_off, int, 0644);
+MODULE_PARM_DESC(freq_failsafe_off, "Disable current vs max frequency check");
+module_param(voltage_failsafe_off, int, 0644);
+MODULE_PARM_DESC(voltage_failsafe_off, "Disable current vs max voltage check");
+#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+module_param(ignore_acpi_limit, int, 0644);
+MODULE_PARM_DESC(ignore_acpi_limit, "Don't check ACPI's processor speed limit");
+#endif
+module_param(set_max_voltage, int, 0644);
+MODULE_PARM_DESC(set_max_voltage, "Set maximum CPU voltage (mV) C7-M only");
+
+MODULE_AUTHOR("Rafal Bilski <rafalbilski@interia.pl>");
+MODULE_DESCRIPTION("Enhanced PowerSaver driver for VIA C7 CPU's.");
+MODULE_LICENSE("GPL");
+
+module_init(eps_init);
+module_exit(eps_exit);
diff --git a/kernel/drivers/cpufreq/elanfreq.c b/kernel/drivers/cpufreq/elanfreq.c
new file mode 100644
index 000000000..1c06e786c
--- /dev/null
+++ b/kernel/drivers/cpufreq/elanfreq.c
@@ -0,0 +1,232 @@
+/*
+ * elanfreq: cpufreq driver for the AMD ELAN family
+ *
+ * (c) Copyright 2002 Robert Schwebel <r.schwebel@pengutronix.de>
+ *
+ * Parts of this code are (c) Sven Geggus <sven@geggus.net>
+ *
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * 2002-02-13: - initial revision for 2.4.18-pre9 by Robert Schwebel
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/delay.h>
+#include <linux/cpufreq.h>
+
+#include <asm/cpu_device_id.h>
+#include <asm/msr.h>
+#include <linux/timex.h>
+#include <linux/io.h>
+
+#define REG_CSCIR 0x22 /* Chip Setup and Control Index Register */
+#define REG_CSCDR 0x23 /* Chip Setup and Control Data Register */
+
+/* Module parameter */
+static int max_freq;
+
+struct s_elan_multiplier {
+ int clock; /* frequency in kHz */
+ int val40h; /* PMU Force Mode register */
+ int val80h; /* CPU Clock Speed Register */
+};
+
+/*
+ * It is important that the frequencies
+ * are listed in ascending order here!
+ */
+static struct s_elan_multiplier elan_multiplier[] = {
+ {1000, 0x02, 0x18},
+ {2000, 0x02, 0x10},
+ {4000, 0x02, 0x08},
+ {8000, 0x00, 0x00},
+ {16000, 0x00, 0x02},
+ {33000, 0x00, 0x04},
+ {66000, 0x01, 0x04},
+ {99000, 0x01, 0x05}
+};
+
+static struct cpufreq_frequency_table elanfreq_table[] = {
+ {0, 0, 1000},
+ {0, 1, 2000},
+ {0, 2, 4000},
+ {0, 3, 8000},
+ {0, 4, 16000},
+ {0, 5, 33000},
+ {0, 6, 66000},
+ {0, 7, 99000},
+ {0, 0, CPUFREQ_TABLE_END},
+};
+
+
+/**
+ * elanfreq_get_cpu_frequency: determine current cpu speed
+ *
+ * Finds out at which frequency the CPU of the Elan SOC runs
+ * at the moment. Frequencies from 1 to 33 MHz are generated
+ * the normal way, 66 and 99 MHz are called "Hyperspeed Mode"
+ * and have the rest of the chip running with 33 MHz.
+ */
+
+static unsigned int elanfreq_get_cpu_frequency(unsigned int cpu)
+{
+ u8 clockspeed_reg; /* Clock Speed Register */
+
+ local_irq_disable();
+ outb_p(0x80, REG_CSCIR);
+ clockspeed_reg = inb_p(REG_CSCDR);
+ local_irq_enable();
+
+ if ((clockspeed_reg & 0xE0) == 0xE0)
+ return 0;
+
+ /* Are we in CPU clock multiplied mode (66/99 MHz)? */
+ if ((clockspeed_reg & 0xE0) == 0xC0) {
+ if ((clockspeed_reg & 0x01) == 0)
+ return 66000;
+ else
+ return 99000;
+ }
+
+ /* 33 MHz is not 32 MHz... */
+ if ((clockspeed_reg & 0xE0) == 0xA0)
+ return 33000;
+
+ return (1<<((clockspeed_reg & 0xE0) >> 5)) * 1000;
+}
+
+
+static int elanfreq_target(struct cpufreq_policy *policy,
+ unsigned int state)
+{
+ /*
+ * Access to the Elan's internal registers is indexed via
+ * 0x22: Chip Setup & Control Register Index Register (CSCI)
+ * 0x23: Chip Setup & Control Register Data Register (CSCD)
+ *
+ */
+
+ /*
+ * 0x40 is the Power Management Unit's Force Mode Register.
+ * Bit 6 enables Hyperspeed Mode (66/100 MHz core frequency)
+ */
+
+ local_irq_disable();
+ outb_p(0x40, REG_CSCIR); /* Disable hyperspeed mode */
+ outb_p(0x00, REG_CSCDR);
+ local_irq_enable(); /* wait till internal pipelines and */
+ udelay(1000); /* buffers have cleaned up */
+
+ local_irq_disable();
+
+ /* now, set the CPU clock speed register (0x80) */
+ outb_p(0x80, REG_CSCIR);
+ outb_p(elan_multiplier[state].val80h, REG_CSCDR);
+
+ /* now, the hyperspeed bit in PMU Force Mode Register (0x40) */
+ outb_p(0x40, REG_CSCIR);
+ outb_p(elan_multiplier[state].val40h, REG_CSCDR);
+ udelay(10000);
+ local_irq_enable();
+
+ return 0;
+}
+/*
+ * Module init and exit code
+ */
+
+static int elanfreq_cpu_init(struct cpufreq_policy *policy)
+{
+ struct cpuinfo_x86 *c = &cpu_data(0);
+ struct cpufreq_frequency_table *pos;
+
+ /* capability check */
+ if ((c->x86_vendor != X86_VENDOR_AMD) ||
+ (c->x86 != 4) || (c->x86_model != 10))
+ return -ENODEV;
+
+ /* max freq */
+ if (!max_freq)
+ max_freq = elanfreq_get_cpu_frequency(0);
+
+ /* table init */
+ cpufreq_for_each_entry(pos, elanfreq_table)
+ if (pos->frequency > max_freq)
+ pos->frequency = CPUFREQ_ENTRY_INVALID;
+
+ /* cpuinfo and default policy values */
+ policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+
+ return cpufreq_table_validate_and_show(policy, elanfreq_table);
+}
+
+
+#ifndef MODULE
+/**
+ * elanfreq_setup - elanfreq command line parameter parsing
+ *
+ * elanfreq command line parameter. Use:
+ * elanfreq=66000
+ * to set the maximum CPU frequency to 66 MHz. Note that in
+ * case you do not give this boot parameter, the maximum
+ * frequency will fall back to _current_ CPU frequency which
+ * might be lower. If you build this as a module, use the
+ * max_freq module parameter instead.
+ */
+static int __init elanfreq_setup(char *str)
+{
+ max_freq = simple_strtoul(str, &str, 0);
+ printk(KERN_WARNING "You're using the deprecated elanfreq command line option. Use elanfreq.max_freq instead, please!\n");
+ return 1;
+}
+__setup("elanfreq=", elanfreq_setup);
+#endif
+
+
+static struct cpufreq_driver elanfreq_driver = {
+ .get = elanfreq_get_cpu_frequency,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = elanfreq_target,
+ .init = elanfreq_cpu_init,
+ .name = "elanfreq",
+ .attr = cpufreq_generic_attr,
+};
+
+static const struct x86_cpu_id elan_id[] = {
+ { X86_VENDOR_AMD, 4, 10, },
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, elan_id);
+
+static int __init elanfreq_init(void)
+{
+ if (!x86_match_cpu(elan_id))
+ return -ENODEV;
+ return cpufreq_register_driver(&elanfreq_driver);
+}
+
+
+static void __exit elanfreq_exit(void)
+{
+ cpufreq_unregister_driver(&elanfreq_driver);
+}
+
+
+module_param(max_freq, int, 0444);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Robert Schwebel <r.schwebel@pengutronix.de>, "
+ "Sven Geggus <sven@geggus.net>");
+MODULE_DESCRIPTION("cpufreq driver for AMD's Elan CPUs");
+
+module_init(elanfreq_init);
+module_exit(elanfreq_exit);
diff --git a/kernel/drivers/cpufreq/exynos-cpufreq.c b/kernel/drivers/cpufreq/exynos-cpufreq.c
new file mode 100644
index 000000000..82d2fbb20
--- /dev/null
+++ b/kernel/drivers/cpufreq/exynos-cpufreq.c
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * EXYNOS - CPU frequency scaling support for EXYNOS series
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
+#include <linux/cpufreq.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/cpu_cooling.h>
+#include <linux/cpu.h>
+
+#include "exynos-cpufreq.h"
+
+static struct exynos_dvfs_info *exynos_info;
+static struct thermal_cooling_device *cdev;
+static struct regulator *arm_regulator;
+static unsigned int locking_frequency;
+
+static int exynos_cpufreq_get_index(unsigned int freq)
+{
+ struct cpufreq_frequency_table *freq_table = exynos_info->freq_table;
+ struct cpufreq_frequency_table *pos;
+
+ cpufreq_for_each_entry(pos, freq_table)
+ if (pos->frequency == freq)
+ break;
+
+ if (pos->frequency == CPUFREQ_TABLE_END)
+ return -EINVAL;
+
+ return pos - freq_table;
+}
+
+static int exynos_cpufreq_scale(unsigned int target_freq)
+{
+ struct cpufreq_frequency_table *freq_table = exynos_info->freq_table;
+ unsigned int *volt_table = exynos_info->volt_table;
+ struct cpufreq_policy *policy = cpufreq_cpu_get(0);
+ unsigned int arm_volt, safe_arm_volt = 0;
+ unsigned int mpll_freq_khz = exynos_info->mpll_freq_khz;
+ struct device *dev = exynos_info->dev;
+ unsigned int old_freq;
+ int index, old_index;
+ int ret = 0;
+
+ old_freq = policy->cur;
+
+ /*
+ * The policy max have been changed so that we cannot get proper
+ * old_index with cpufreq_frequency_table_target(). Thus, ignore
+ * policy and get the index from the raw frequency table.
+ */
+ old_index = exynos_cpufreq_get_index(old_freq);
+ if (old_index < 0) {
+ ret = old_index;
+ goto out;
+ }
+
+ index = exynos_cpufreq_get_index(target_freq);
+ if (index < 0) {
+ ret = index;
+ goto out;
+ }
+
+ /*
+ * ARM clock source will be changed APLL to MPLL temporary
+ * To support this level, need to control regulator for
+ * required voltage level
+ */
+ if (exynos_info->need_apll_change != NULL) {
+ if (exynos_info->need_apll_change(old_index, index) &&
+ (freq_table[index].frequency < mpll_freq_khz) &&
+ (freq_table[old_index].frequency < mpll_freq_khz))
+ safe_arm_volt = volt_table[exynos_info->pll_safe_idx];
+ }
+ arm_volt = volt_table[index];
+
+ /* When the new frequency is higher than current frequency */
+ if ((target_freq > old_freq) && !safe_arm_volt) {
+ /* Firstly, voltage up to increase frequency */
+ ret = regulator_set_voltage(arm_regulator, arm_volt, arm_volt);
+ if (ret) {
+ dev_err(dev, "failed to set cpu voltage to %d\n",
+ arm_volt);
+ return ret;
+ }
+ }
+
+ if (safe_arm_volt) {
+ ret = regulator_set_voltage(arm_regulator, safe_arm_volt,
+ safe_arm_volt);
+ if (ret) {
+ dev_err(dev, "failed to set cpu voltage to %d\n",
+ safe_arm_volt);
+ return ret;
+ }
+ }
+
+ exynos_info->set_freq(old_index, index);
+
+ /* When the new frequency is lower than current frequency */
+ if ((target_freq < old_freq) ||
+ ((target_freq > old_freq) && safe_arm_volt)) {
+ /* down the voltage after frequency change */
+ ret = regulator_set_voltage(arm_regulator, arm_volt,
+ arm_volt);
+ if (ret) {
+ dev_err(dev, "failed to set cpu voltage to %d\n",
+ arm_volt);
+ goto out;
+ }
+ }
+
+out:
+ cpufreq_cpu_put(policy);
+
+ return ret;
+}
+
+static int exynos_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ return exynos_cpufreq_scale(exynos_info->freq_table[index].frequency);
+}
+
+static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ policy->clk = exynos_info->cpu_clk;
+ policy->suspend_freq = locking_frequency;
+ return cpufreq_generic_init(policy, exynos_info->freq_table, 100000);
+}
+
+static struct cpufreq_driver exynos_driver = {
+ .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = exynos_target,
+ .get = cpufreq_generic_get,
+ .init = exynos_cpufreq_cpu_init,
+ .name = "exynos_cpufreq",
+ .attr = cpufreq_generic_attr,
+#ifdef CONFIG_ARM_EXYNOS_CPU_FREQ_BOOST_SW
+ .boost_supported = true,
+#endif
+#ifdef CONFIG_PM
+ .suspend = cpufreq_generic_suspend,
+#endif
+};
+
+static int exynos_cpufreq_probe(struct platform_device *pdev)
+{
+ struct device_node *cpu0;
+ int ret = -EINVAL;
+
+ exynos_info = kzalloc(sizeof(*exynos_info), GFP_KERNEL);
+ if (!exynos_info)
+ return -ENOMEM;
+
+ exynos_info->dev = &pdev->dev;
+
+ if (of_machine_is_compatible("samsung,exynos4210")) {
+ exynos_info->type = EXYNOS_SOC_4210;
+ ret = exynos4210_cpufreq_init(exynos_info);
+ } else if (of_machine_is_compatible("samsung,exynos4212")) {
+ exynos_info->type = EXYNOS_SOC_4212;
+ ret = exynos4x12_cpufreq_init(exynos_info);
+ } else if (of_machine_is_compatible("samsung,exynos4412")) {
+ exynos_info->type = EXYNOS_SOC_4412;
+ ret = exynos4x12_cpufreq_init(exynos_info);
+ } else if (of_machine_is_compatible("samsung,exynos5250")) {
+ exynos_info->type = EXYNOS_SOC_5250;
+ ret = exynos5250_cpufreq_init(exynos_info);
+ } else {
+ pr_err("%s: Unknown SoC type\n", __func__);
+ return -ENODEV;
+ }
+
+ if (ret)
+ goto err_vdd_arm;
+
+ if (exynos_info->set_freq == NULL) {
+ dev_err(&pdev->dev, "No set_freq function (ERR)\n");
+ goto err_vdd_arm;
+ }
+
+ arm_regulator = regulator_get(NULL, "vdd_arm");
+ if (IS_ERR(arm_regulator)) {
+ dev_err(&pdev->dev, "failed to get resource vdd_arm\n");
+ goto err_vdd_arm;
+ }
+
+ /* Done here as we want to capture boot frequency */
+ locking_frequency = clk_get_rate(exynos_info->cpu_clk) / 1000;
+
+ ret = cpufreq_register_driver(&exynos_driver);
+ if (ret)
+ goto err_cpufreq_reg;
+
+ cpu0 = of_get_cpu_node(0, NULL);
+ if (!cpu0) {
+ pr_err("failed to find cpu0 node\n");
+ return 0;
+ }
+
+ if (of_find_property(cpu0, "#cooling-cells", NULL)) {
+ cdev = of_cpufreq_cooling_register(cpu0,
+ cpu_present_mask);
+ if (IS_ERR(cdev))
+ pr_err("running cpufreq without cooling device: %ld\n",
+ PTR_ERR(cdev));
+ }
+
+ return 0;
+
+err_cpufreq_reg:
+ dev_err(&pdev->dev, "failed to register cpufreq driver\n");
+ regulator_put(arm_regulator);
+err_vdd_arm:
+ kfree(exynos_info);
+ return -EINVAL;
+}
+
+static struct platform_driver exynos_cpufreq_platdrv = {
+ .driver = {
+ .name = "exynos-cpufreq",
+ },
+ .probe = exynos_cpufreq_probe,
+};
+module_platform_driver(exynos_cpufreq_platdrv);
diff --git a/kernel/drivers/cpufreq/exynos-cpufreq.h b/kernel/drivers/cpufreq/exynos-cpufreq.h
new file mode 100644
index 000000000..9f2062a7c
--- /dev/null
+++ b/kernel/drivers/cpufreq/exynos-cpufreq.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * EXYNOS - CPUFreq support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+enum cpufreq_level_index {
+ L0, L1, L2, L3, L4,
+ L5, L6, L7, L8, L9,
+ L10, L11, L12, L13, L14,
+ L15, L16, L17, L18, L19,
+ L20,
+};
+
+enum exynos_soc_type {
+ EXYNOS_SOC_4210,
+ EXYNOS_SOC_4212,
+ EXYNOS_SOC_4412,
+ EXYNOS_SOC_5250,
+};
+
+#define APLL_FREQ(f, a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, m, p, s) \
+ { \
+ .freq = (f) * 1000, \
+ .clk_div_cpu0 = ((a0) | (a1) << 4 | (a2) << 8 | (a3) << 12 | \
+ (a4) << 16 | (a5) << 20 | (a6) << 24 | (a7) << 28), \
+ .clk_div_cpu1 = (b0 << 0 | b1 << 4 | b2 << 8), \
+ .mps = ((m) << 16 | (p) << 8 | (s)), \
+ }
+
+struct apll_freq {
+ unsigned int freq;
+ u32 clk_div_cpu0;
+ u32 clk_div_cpu1;
+ u32 mps;
+};
+
+struct exynos_dvfs_info {
+ enum exynos_soc_type type;
+ struct device *dev;
+ unsigned long mpll_freq_khz;
+ unsigned int pll_safe_idx;
+ struct clk *cpu_clk;
+ unsigned int *volt_table;
+ struct cpufreq_frequency_table *freq_table;
+ void (*set_freq)(unsigned int, unsigned int);
+ bool (*need_apll_change)(unsigned int, unsigned int);
+ void __iomem *cmu_regs;
+};
+
+#ifdef CONFIG_ARM_EXYNOS4210_CPUFREQ
+extern int exynos4210_cpufreq_init(struct exynos_dvfs_info *);
+#else
+static inline int exynos4210_cpufreq_init(struct exynos_dvfs_info *info)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+#ifdef CONFIG_ARM_EXYNOS4X12_CPUFREQ
+extern int exynos4x12_cpufreq_init(struct exynos_dvfs_info *);
+#else
+static inline int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+#ifdef CONFIG_ARM_EXYNOS5250_CPUFREQ
+extern int exynos5250_cpufreq_init(struct exynos_dvfs_info *);
+#else
+static inline int exynos5250_cpufreq_init(struct exynos_dvfs_info *info)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+#define EXYNOS4_CLKSRC_CPU 0x14200
+#define EXYNOS4_CLKMUX_STATCPU 0x14400
+
+#define EXYNOS4_CLKDIV_CPU 0x14500
+#define EXYNOS4_CLKDIV_CPU1 0x14504
+#define EXYNOS4_CLKDIV_STATCPU 0x14600
+#define EXYNOS4_CLKDIV_STATCPU1 0x14604
+
+#define EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT (16)
+#define EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK (0x7 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT)
+
+#define EXYNOS5_APLL_LOCK 0x00000
+#define EXYNOS5_APLL_CON0 0x00100
+#define EXYNOS5_CLKMUX_STATCPU 0x00400
+#define EXYNOS5_CLKDIV_CPU0 0x00500
+#define EXYNOS5_CLKDIV_CPU1 0x00504
+#define EXYNOS5_CLKDIV_STATCPU0 0x00600
+#define EXYNOS5_CLKDIV_STATCPU1 0x00604
diff --git a/kernel/drivers/cpufreq/exynos4210-cpufreq.c b/kernel/drivers/cpufreq/exynos4210-cpufreq.c
new file mode 100644
index 000000000..843ec824f
--- /dev/null
+++ b/kernel/drivers/cpufreq/exynos4210-cpufreq.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * EXYNOS4210 - CPU frequency scaling support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/cpufreq.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include "exynos-cpufreq.h"
+
+static struct clk *cpu_clk;
+static struct clk *moutcore;
+static struct clk *mout_mpll;
+static struct clk *mout_apll;
+static struct exynos_dvfs_info *cpufreq;
+
+static unsigned int exynos4210_volt_table[] = {
+ 1250000, 1150000, 1050000, 975000, 950000,
+};
+
+static struct cpufreq_frequency_table exynos4210_freq_table[] = {
+ {0, L0, 1200 * 1000},
+ {0, L1, 1000 * 1000},
+ {0, L2, 800 * 1000},
+ {0, L3, 500 * 1000},
+ {0, L4, 200 * 1000},
+ {0, 0, CPUFREQ_TABLE_END},
+};
+
+static struct apll_freq apll_freq_4210[] = {
+ /*
+ * values:
+ * freq
+ * clock divider for CORE, COREM0, COREM1, PERIPH, ATB, PCLK_DBG, APLL, RESERVED
+ * clock divider for COPY, HPM, RESERVED
+ * PLL M, P, S
+ */
+ APLL_FREQ(1200, 0, 3, 7, 3, 4, 1, 7, 0, 5, 0, 0, 150, 3, 1),
+ APLL_FREQ(1000, 0, 3, 7, 3, 4, 1, 7, 0, 4, 0, 0, 250, 6, 1),
+ APLL_FREQ(800, 0, 3, 7, 3, 3, 1, 7, 0, 3, 0, 0, 200, 6, 1),
+ APLL_FREQ(500, 0, 3, 7, 3, 3, 1, 7, 0, 3, 0, 0, 250, 6, 2),
+ APLL_FREQ(200, 0, 1, 3, 1, 3, 1, 0, 0, 3, 0, 0, 200, 6, 3),
+};
+
+static void exynos4210_set_clkdiv(unsigned int div_index)
+{
+ unsigned int tmp;
+
+ /* Change Divider - CPU0 */
+
+ tmp = apll_freq_4210[div_index].clk_div_cpu0;
+
+ __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU);
+
+ do {
+ tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU);
+ } while (tmp & 0x1111111);
+
+ /* Change Divider - CPU1 */
+
+ tmp = apll_freq_4210[div_index].clk_div_cpu1;
+
+ __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU1);
+
+ do {
+ tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU1);
+ } while (tmp & 0x11);
+}
+
+static void exynos4210_set_apll(unsigned int index)
+{
+ unsigned int tmp, freq = apll_freq_4210[index].freq;
+
+ /* MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
+ clk_set_parent(moutcore, mout_mpll);
+
+ do {
+ tmp = (__raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU)
+ >> EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT);
+ tmp &= 0x7;
+ } while (tmp != 0x2);
+
+ clk_set_rate(mout_apll, freq * 1000);
+
+ /* MUX_CORE_SEL = APLL */
+ clk_set_parent(moutcore, mout_apll);
+
+ do {
+ tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU);
+ tmp &= EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK;
+ } while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT));
+}
+
+static void exynos4210_set_frequency(unsigned int old_index,
+ unsigned int new_index)
+{
+ if (old_index > new_index) {
+ exynos4210_set_clkdiv(new_index);
+ exynos4210_set_apll(new_index);
+ } else if (old_index < new_index) {
+ exynos4210_set_apll(new_index);
+ exynos4210_set_clkdiv(new_index);
+ }
+}
+
+int exynos4210_cpufreq_init(struct exynos_dvfs_info *info)
+{
+ struct device_node *np;
+ unsigned long rate;
+
+ /*
+ * HACK: This is a temporary workaround to get access to clock
+ * controller registers directly and remove static mappings and
+ * dependencies on platform headers. It is necessary to enable
+ * Exynos multi-platform support and will be removed together with
+ * this whole driver as soon as Exynos gets migrated to use
+ * cpufreq-dt driver.
+ */
+ np = of_find_compatible_node(NULL, NULL, "samsung,exynos4210-clock");
+ if (!np) {
+ pr_err("%s: failed to find clock controller DT node\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ info->cmu_regs = of_iomap(np, 0);
+ if (!info->cmu_regs) {
+ pr_err("%s: failed to map CMU registers\n", __func__);
+ return -EFAULT;
+ }
+
+ cpu_clk = clk_get(NULL, "armclk");
+ if (IS_ERR(cpu_clk))
+ return PTR_ERR(cpu_clk);
+
+ moutcore = clk_get(NULL, "moutcore");
+ if (IS_ERR(moutcore))
+ goto err_moutcore;
+
+ mout_mpll = clk_get(NULL, "mout_mpll");
+ if (IS_ERR(mout_mpll))
+ goto err_mout_mpll;
+
+ rate = clk_get_rate(mout_mpll) / 1000;
+
+ mout_apll = clk_get(NULL, "mout_apll");
+ if (IS_ERR(mout_apll))
+ goto err_mout_apll;
+
+ info->mpll_freq_khz = rate;
+ /* 800Mhz */
+ info->pll_safe_idx = L2;
+ info->cpu_clk = cpu_clk;
+ info->volt_table = exynos4210_volt_table;
+ info->freq_table = exynos4210_freq_table;
+ info->set_freq = exynos4210_set_frequency;
+
+ cpufreq = info;
+
+ return 0;
+
+err_mout_apll:
+ clk_put(mout_mpll);
+err_mout_mpll:
+ clk_put(moutcore);
+err_moutcore:
+ clk_put(cpu_clk);
+
+ pr_debug("%s: failed initialization\n", __func__);
+ return -EINVAL;
+}
diff --git a/kernel/drivers/cpufreq/exynos4x12-cpufreq.c b/kernel/drivers/cpufreq/exynos4x12-cpufreq.c
new file mode 100644
index 000000000..9e78a850e
--- /dev/null
+++ b/kernel/drivers/cpufreq/exynos4x12-cpufreq.c
@@ -0,0 +1,236 @@
+/*
+ * Copyright (c) 2010-2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * EXYNOS4X12 - CPU frequency scaling support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/cpufreq.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include "exynos-cpufreq.h"
+
+static struct clk *cpu_clk;
+static struct clk *moutcore;
+static struct clk *mout_mpll;
+static struct clk *mout_apll;
+static struct exynos_dvfs_info *cpufreq;
+
+static unsigned int exynos4x12_volt_table[] = {
+ 1350000, 1287500, 1250000, 1187500, 1137500, 1087500, 1037500,
+ 1000000, 987500, 975000, 950000, 925000, 900000, 900000
+};
+
+static struct cpufreq_frequency_table exynos4x12_freq_table[] = {
+ {CPUFREQ_BOOST_FREQ, L0, 1500 * 1000},
+ {0, L1, 1400 * 1000},
+ {0, L2, 1300 * 1000},
+ {0, L3, 1200 * 1000},
+ {0, L4, 1100 * 1000},
+ {0, L5, 1000 * 1000},
+ {0, L6, 900 * 1000},
+ {0, L7, 800 * 1000},
+ {0, L8, 700 * 1000},
+ {0, L9, 600 * 1000},
+ {0, L10, 500 * 1000},
+ {0, L11, 400 * 1000},
+ {0, L12, 300 * 1000},
+ {0, L13, 200 * 1000},
+ {0, 0, CPUFREQ_TABLE_END},
+};
+
+static struct apll_freq *apll_freq_4x12;
+
+static struct apll_freq apll_freq_4212[] = {
+ /*
+ * values:
+ * freq
+ * clock divider for CORE, COREM0, COREM1, PERIPH, ATB, PCLK_DBG, APLL, CORE2
+ * clock divider for COPY, HPM, RESERVED
+ * PLL M, P, S
+ */
+ APLL_FREQ(1500, 0, 3, 7, 0, 6, 1, 2, 0, 6, 2, 0, 250, 4, 0),
+ APLL_FREQ(1400, 0, 3, 7, 0, 6, 1, 2, 0, 6, 2, 0, 175, 3, 0),
+ APLL_FREQ(1300, 0, 3, 7, 0, 5, 1, 2, 0, 5, 2, 0, 325, 6, 0),
+ APLL_FREQ(1200, 0, 3, 7, 0, 5, 1, 2, 0, 5, 2, 0, 200, 4, 0),
+ APLL_FREQ(1100, 0, 3, 6, 0, 4, 1, 2, 0, 4, 2, 0, 275, 6, 0),
+ APLL_FREQ(1000, 0, 2, 5, 0, 4, 1, 1, 0, 4, 2, 0, 125, 3, 0),
+ APLL_FREQ(900, 0, 2, 5, 0, 3, 1, 1, 0, 3, 2, 0, 150, 4, 0),
+ APLL_FREQ(800, 0, 2, 5, 0, 3, 1, 1, 0, 3, 2, 0, 100, 3, 0),
+ APLL_FREQ(700, 0, 2, 4, 0, 3, 1, 1, 0, 3, 2, 0, 175, 3, 1),
+ APLL_FREQ(600, 0, 2, 4, 0, 3, 1, 1, 0, 3, 2, 0, 200, 4, 1),
+ APLL_FREQ(500, 0, 2, 4, 0, 3, 1, 1, 0, 3, 2, 0, 125, 3, 1),
+ APLL_FREQ(400, 0, 2, 4, 0, 3, 1, 1, 0, 3, 2, 0, 100, 3, 1),
+ APLL_FREQ(300, 0, 2, 4, 0, 2, 1, 1, 0, 3, 2, 0, 200, 4, 2),
+ APLL_FREQ(200, 0, 1, 3, 0, 1, 1, 1, 0, 3, 2, 0, 100, 3, 2),
+};
+
+static struct apll_freq apll_freq_4412[] = {
+ /*
+ * values:
+ * freq
+ * clock divider for CORE, COREM0, COREM1, PERIPH, ATB, PCLK_DBG, APLL, CORE2
+ * clock divider for COPY, HPM, CORES
+ * PLL M, P, S
+ */
+ APLL_FREQ(1500, 0, 3, 7, 0, 6, 1, 2, 0, 6, 0, 7, 250, 4, 0),
+ APLL_FREQ(1400, 0, 3, 7, 0, 6, 1, 2, 0, 6, 0, 6, 175, 3, 0),
+ APLL_FREQ(1300, 0, 3, 7, 0, 5, 1, 2, 0, 5, 0, 6, 325, 6, 0),
+ APLL_FREQ(1200, 0, 3, 7, 0, 5, 1, 2, 0, 5, 0, 5, 200, 4, 0),
+ APLL_FREQ(1100, 0, 3, 6, 0, 4, 1, 2, 0, 4, 0, 5, 275, 6, 0),
+ APLL_FREQ(1000, 0, 2, 5, 0, 4, 1, 1, 0, 4, 0, 4, 125, 3, 0),
+ APLL_FREQ(900, 0, 2, 5, 0, 3, 1, 1, 0, 3, 0, 4, 150, 4, 0),
+ APLL_FREQ(800, 0, 2, 5, 0, 3, 1, 1, 0, 3, 0, 3, 100, 3, 0),
+ APLL_FREQ(700, 0, 2, 4, 0, 3, 1, 1, 0, 3, 0, 3, 175, 3, 1),
+ APLL_FREQ(600, 0, 2, 4, 0, 3, 1, 1, 0, 3, 0, 2, 200, 4, 1),
+ APLL_FREQ(500, 0, 2, 4, 0, 3, 1, 1, 0, 3, 0, 2, 125, 3, 1),
+ APLL_FREQ(400, 0, 2, 4, 0, 3, 1, 1, 0, 3, 0, 1, 100, 3, 1),
+ APLL_FREQ(300, 0, 2, 4, 0, 2, 1, 1, 0, 3, 0, 1, 200, 4, 2),
+ APLL_FREQ(200, 0, 1, 3, 0, 1, 1, 1, 0, 3, 0, 0, 100, 3, 2),
+};
+
+static void exynos4x12_set_clkdiv(unsigned int div_index)
+{
+ unsigned int tmp;
+
+ /* Change Divider - CPU0 */
+
+ tmp = apll_freq_4x12[div_index].clk_div_cpu0;
+
+ __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU);
+
+ while (__raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU)
+ & 0x11111111)
+ cpu_relax();
+
+ /* Change Divider - CPU1 */
+ tmp = apll_freq_4x12[div_index].clk_div_cpu1;
+
+ __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU1);
+
+ do {
+ cpu_relax();
+ tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU1);
+ } while (tmp != 0x0);
+}
+
+static void exynos4x12_set_apll(unsigned int index)
+{
+ unsigned int tmp, freq = apll_freq_4x12[index].freq;
+
+ /* MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
+ clk_set_parent(moutcore, mout_mpll);
+
+ do {
+ cpu_relax();
+ tmp = (__raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU)
+ >> EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT);
+ tmp &= 0x7;
+ } while (tmp != 0x2);
+
+ clk_set_rate(mout_apll, freq * 1000);
+
+ /* MUX_CORE_SEL = APLL */
+ clk_set_parent(moutcore, mout_apll);
+
+ do {
+ cpu_relax();
+ tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU);
+ tmp &= EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK;
+ } while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT));
+}
+
+static void exynos4x12_set_frequency(unsigned int old_index,
+ unsigned int new_index)
+{
+ if (old_index > new_index) {
+ exynos4x12_set_clkdiv(new_index);
+ exynos4x12_set_apll(new_index);
+ } else if (old_index < new_index) {
+ exynos4x12_set_apll(new_index);
+ exynos4x12_set_clkdiv(new_index);
+ }
+}
+
+int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info)
+{
+ struct device_node *np;
+ unsigned long rate;
+
+ /*
+ * HACK: This is a temporary workaround to get access to clock
+ * controller registers directly and remove static mappings and
+ * dependencies on platform headers. It is necessary to enable
+ * Exynos multi-platform support and will be removed together with
+ * this whole driver as soon as Exynos gets migrated to use
+ * cpufreq-dt driver.
+ */
+ np = of_find_compatible_node(NULL, NULL, "samsung,exynos4412-clock");
+ if (!np) {
+ pr_err("%s: failed to find clock controller DT node\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ info->cmu_regs = of_iomap(np, 0);
+ if (!info->cmu_regs) {
+ pr_err("%s: failed to map CMU registers\n", __func__);
+ return -EFAULT;
+ }
+
+ cpu_clk = clk_get(NULL, "armclk");
+ if (IS_ERR(cpu_clk))
+ return PTR_ERR(cpu_clk);
+
+ moutcore = clk_get(NULL, "moutcore");
+ if (IS_ERR(moutcore))
+ goto err_moutcore;
+
+ mout_mpll = clk_get(NULL, "mout_mpll");
+ if (IS_ERR(mout_mpll))
+ goto err_mout_mpll;
+
+ rate = clk_get_rate(mout_mpll) / 1000;
+
+ mout_apll = clk_get(NULL, "mout_apll");
+ if (IS_ERR(mout_apll))
+ goto err_mout_apll;
+
+ if (info->type == EXYNOS_SOC_4212)
+ apll_freq_4x12 = apll_freq_4212;
+ else
+ apll_freq_4x12 = apll_freq_4412;
+
+ info->mpll_freq_khz = rate;
+ /* 800Mhz */
+ info->pll_safe_idx = L7;
+ info->cpu_clk = cpu_clk;
+ info->volt_table = exynos4x12_volt_table;
+ info->freq_table = exynos4x12_freq_table;
+ info->set_freq = exynos4x12_set_frequency;
+
+ cpufreq = info;
+
+ return 0;
+
+err_mout_apll:
+ clk_put(mout_mpll);
+err_mout_mpll:
+ clk_put(moutcore);
+err_moutcore:
+ clk_put(cpu_clk);
+
+ pr_debug("%s: failed initialization\n", __func__);
+ return -EINVAL;
+}
diff --git a/kernel/drivers/cpufreq/exynos5250-cpufreq.c b/kernel/drivers/cpufreq/exynos5250-cpufreq.c
new file mode 100644
index 000000000..3eafdc7ba
--- /dev/null
+++ b/kernel/drivers/cpufreq/exynos5250-cpufreq.c
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2010-20122Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * EXYNOS5250 - CPU frequency scaling support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/cpufreq.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include "exynos-cpufreq.h"
+
+static struct clk *cpu_clk;
+static struct clk *moutcore;
+static struct clk *mout_mpll;
+static struct clk *mout_apll;
+static struct exynos_dvfs_info *cpufreq;
+
+static unsigned int exynos5250_volt_table[] = {
+ 1300000, 1250000, 1225000, 1200000, 1150000,
+ 1125000, 1100000, 1075000, 1050000, 1025000,
+ 1012500, 1000000, 975000, 950000, 937500,
+ 925000
+};
+
+static struct cpufreq_frequency_table exynos5250_freq_table[] = {
+ {0, L0, 1700 * 1000},
+ {0, L1, 1600 * 1000},
+ {0, L2, 1500 * 1000},
+ {0, L3, 1400 * 1000},
+ {0, L4, 1300 * 1000},
+ {0, L5, 1200 * 1000},
+ {0, L6, 1100 * 1000},
+ {0, L7, 1000 * 1000},
+ {0, L8, 900 * 1000},
+ {0, L9, 800 * 1000},
+ {0, L10, 700 * 1000},
+ {0, L11, 600 * 1000},
+ {0, L12, 500 * 1000},
+ {0, L13, 400 * 1000},
+ {0, L14, 300 * 1000},
+ {0, L15, 200 * 1000},
+ {0, 0, CPUFREQ_TABLE_END},
+};
+
+static struct apll_freq apll_freq_5250[] = {
+ /*
+ * values:
+ * freq
+ * clock divider for ARM, CPUD, ACP, PERIPH, ATB, PCLK_DBG, APLL, ARM2
+ * clock divider for COPY, HPM, RESERVED
+ * PLL M, P, S
+ */
+ APLL_FREQ(1700, 0, 3, 7, 7, 7, 3, 5, 0, 0, 2, 0, 425, 6, 0),
+ APLL_FREQ(1600, 0, 3, 7, 7, 7, 1, 4, 0, 0, 2, 0, 200, 3, 0),
+ APLL_FREQ(1500, 0, 2, 7, 7, 7, 1, 4, 0, 0, 2, 0, 250, 4, 0),
+ APLL_FREQ(1400, 0, 2, 7, 7, 6, 1, 4, 0, 0, 2, 0, 175, 3, 0),
+ APLL_FREQ(1300, 0, 2, 7, 7, 6, 1, 3, 0, 0, 2, 0, 325, 6, 0),
+ APLL_FREQ(1200, 0, 2, 7, 7, 5, 1, 3, 0, 0, 2, 0, 200, 4, 0),
+ APLL_FREQ(1100, 0, 3, 7, 7, 5, 1, 3, 0, 0, 2, 0, 275, 6, 0),
+ APLL_FREQ(1000, 0, 1, 7, 7, 4, 1, 2, 0, 0, 2, 0, 125, 3, 0),
+ APLL_FREQ(900, 0, 1, 7, 7, 4, 1, 2, 0, 0, 2, 0, 150, 4, 0),
+ APLL_FREQ(800, 0, 1, 7, 7, 4, 1, 2, 0, 0, 2, 0, 100, 3, 0),
+ APLL_FREQ(700, 0, 1, 7, 7, 3, 1, 1, 0, 0, 2, 0, 175, 3, 1),
+ APLL_FREQ(600, 0, 1, 7, 7, 3, 1, 1, 0, 0, 2, 0, 200, 4, 1),
+ APLL_FREQ(500, 0, 1, 7, 7, 2, 1, 1, 0, 0, 2, 0, 125, 3, 1),
+ APLL_FREQ(400, 0, 1, 7, 7, 2, 1, 1, 0, 0, 2, 0, 100, 3, 1),
+ APLL_FREQ(300, 0, 1, 7, 7, 1, 1, 1, 0, 0, 2, 0, 200, 4, 2),
+ APLL_FREQ(200, 0, 1, 7, 7, 1, 1, 1, 0, 0, 2, 0, 100, 3, 2),
+};
+
+static void set_clkdiv(unsigned int div_index)
+{
+ unsigned int tmp;
+
+ /* Change Divider - CPU0 */
+
+ tmp = apll_freq_5250[div_index].clk_div_cpu0;
+
+ __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS5_CLKDIV_CPU0);
+
+ while (__raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKDIV_STATCPU0)
+ & 0x11111111)
+ cpu_relax();
+
+ /* Change Divider - CPU1 */
+ tmp = apll_freq_5250[div_index].clk_div_cpu1;
+
+ __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS5_CLKDIV_CPU1);
+
+ while (__raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKDIV_STATCPU1) & 0x11)
+ cpu_relax();
+}
+
+static void set_apll(unsigned int index)
+{
+ unsigned int tmp;
+ unsigned int freq = apll_freq_5250[index].freq;
+
+ /* MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
+ clk_set_parent(moutcore, mout_mpll);
+
+ do {
+ cpu_relax();
+ tmp = (__raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKMUX_STATCPU)
+ >> 16);
+ tmp &= 0x7;
+ } while (tmp != 0x2);
+
+ clk_set_rate(mout_apll, freq * 1000);
+
+ /* MUX_CORE_SEL = APLL */
+ clk_set_parent(moutcore, mout_apll);
+
+ do {
+ cpu_relax();
+ tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKMUX_STATCPU);
+ tmp &= (0x7 << 16);
+ } while (tmp != (0x1 << 16));
+}
+
+static void exynos5250_set_frequency(unsigned int old_index,
+ unsigned int new_index)
+{
+ if (old_index > new_index) {
+ set_clkdiv(new_index);
+ set_apll(new_index);
+ } else if (old_index < new_index) {
+ set_apll(new_index);
+ set_clkdiv(new_index);
+ }
+}
+
+int exynos5250_cpufreq_init(struct exynos_dvfs_info *info)
+{
+ struct device_node *np;
+ unsigned long rate;
+
+ /*
+ * HACK: This is a temporary workaround to get access to clock
+ * controller registers directly and remove static mappings and
+ * dependencies on platform headers. It is necessary to enable
+ * Exynos multi-platform support and will be removed together with
+ * this whole driver as soon as Exynos gets migrated to use
+ * cpufreq-dt driver.
+ */
+ np = of_find_compatible_node(NULL, NULL, "samsung,exynos5250-clock");
+ if (!np) {
+ pr_err("%s: failed to find clock controller DT node\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ info->cmu_regs = of_iomap(np, 0);
+ if (!info->cmu_regs) {
+ pr_err("%s: failed to map CMU registers\n", __func__);
+ return -EFAULT;
+ }
+
+ cpu_clk = clk_get(NULL, "armclk");
+ if (IS_ERR(cpu_clk))
+ return PTR_ERR(cpu_clk);
+
+ moutcore = clk_get(NULL, "mout_cpu");
+ if (IS_ERR(moutcore))
+ goto err_moutcore;
+
+ mout_mpll = clk_get(NULL, "mout_mpll");
+ if (IS_ERR(mout_mpll))
+ goto err_mout_mpll;
+
+ rate = clk_get_rate(mout_mpll) / 1000;
+
+ mout_apll = clk_get(NULL, "mout_apll");
+ if (IS_ERR(mout_apll))
+ goto err_mout_apll;
+
+ info->mpll_freq_khz = rate;
+ /* 800Mhz */
+ info->pll_safe_idx = L9;
+ info->cpu_clk = cpu_clk;
+ info->volt_table = exynos5250_volt_table;
+ info->freq_table = exynos5250_freq_table;
+ info->set_freq = exynos5250_set_frequency;
+
+ cpufreq = info;
+
+ return 0;
+
+err_mout_apll:
+ clk_put(mout_mpll);
+err_mout_mpll:
+ clk_put(moutcore);
+err_moutcore:
+ clk_put(cpu_clk);
+
+ pr_err("%s: failed initialization\n", __func__);
+ return -EINVAL;
+}
diff --git a/kernel/drivers/cpufreq/exynos5440-cpufreq.c b/kernel/drivers/cpufreq/exynos5440-cpufreq.c
new file mode 100644
index 000000000..21a90ed7f
--- /dev/null
+++ b/kernel/drivers/cpufreq/exynos5440-cpufreq.c
@@ -0,0 +1,454 @@
+/*
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Amit Daniel Kachhap <amit.daniel@samsung.com>
+ *
+ * EXYNOS5440 - CPU frequency scaling support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/pm_opp.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* Register definitions */
+#define XMU_DVFS_CTRL 0x0060
+#define XMU_PMU_P0_7 0x0064
+#define XMU_C0_3_PSTATE 0x0090
+#define XMU_P_LIMIT 0x00a0
+#define XMU_P_STATUS 0x00a4
+#define XMU_PMUEVTEN 0x00d0
+#define XMU_PMUIRQEN 0x00d4
+#define XMU_PMUIRQ 0x00d8
+
+/* PMU mask and shift definations */
+#define P_VALUE_MASK 0x7
+
+#define XMU_DVFS_CTRL_EN_SHIFT 0
+
+#define P0_7_CPUCLKDEV_SHIFT 21
+#define P0_7_CPUCLKDEV_MASK 0x7
+#define P0_7_ATBCLKDEV_SHIFT 18
+#define P0_7_ATBCLKDEV_MASK 0x7
+#define P0_7_CSCLKDEV_SHIFT 15
+#define P0_7_CSCLKDEV_MASK 0x7
+#define P0_7_CPUEMA_SHIFT 28
+#define P0_7_CPUEMA_MASK 0xf
+#define P0_7_L2EMA_SHIFT 24
+#define P0_7_L2EMA_MASK 0xf
+#define P0_7_VDD_SHIFT 8
+#define P0_7_VDD_MASK 0x7f
+#define P0_7_FREQ_SHIFT 0
+#define P0_7_FREQ_MASK 0xff
+
+#define C0_3_PSTATE_VALID_SHIFT 8
+#define C0_3_PSTATE_CURR_SHIFT 4
+#define C0_3_PSTATE_NEW_SHIFT 0
+
+#define PSTATE_CHANGED_EVTEN_SHIFT 0
+
+#define PSTATE_CHANGED_IRQEN_SHIFT 0
+
+#define PSTATE_CHANGED_SHIFT 0
+
+/* some constant values for clock divider calculation */
+#define CPU_DIV_FREQ_MAX 500
+#define CPU_DBG_FREQ_MAX 375
+#define CPU_ATB_FREQ_MAX 500
+
+#define PMIC_LOW_VOLT 0x30
+#define PMIC_HIGH_VOLT 0x28
+
+#define CPUEMA_HIGH 0x2
+#define CPUEMA_MID 0x4
+#define CPUEMA_LOW 0x7
+
+#define L2EMA_HIGH 0x1
+#define L2EMA_MID 0x3
+#define L2EMA_LOW 0x4
+
+#define DIV_TAB_MAX 2
+/* frequency unit is 20MHZ */
+#define FREQ_UNIT 20
+#define MAX_VOLTAGE 1550000 /* In microvolt */
+#define VOLTAGE_STEP 12500 /* In microvolt */
+
+#define CPUFREQ_NAME "exynos5440_dvfs"
+#define DEF_TRANS_LATENCY 100000
+
+enum cpufreq_level_index {
+ L0, L1, L2, L3, L4,
+ L5, L6, L7, L8, L9,
+};
+#define CPUFREQ_LEVEL_END (L7 + 1)
+
+struct exynos_dvfs_data {
+ void __iomem *base;
+ struct resource *mem;
+ int irq;
+ struct clk *cpu_clk;
+ unsigned int latency;
+ struct cpufreq_frequency_table *freq_table;
+ unsigned int freq_count;
+ struct device *dev;
+ bool dvfs_enabled;
+ struct work_struct irq_work;
+};
+
+static struct exynos_dvfs_data *dvfs_info;
+static DEFINE_MUTEX(cpufreq_lock);
+static struct cpufreq_freqs freqs;
+
+static int init_div_table(void)
+{
+ struct cpufreq_frequency_table *pos, *freq_tbl = dvfs_info->freq_table;
+ unsigned int tmp, clk_div, ema_div, freq, volt_id;
+ struct dev_pm_opp *opp;
+
+ rcu_read_lock();
+ cpufreq_for_each_entry(pos, freq_tbl) {
+ opp = dev_pm_opp_find_freq_exact(dvfs_info->dev,
+ pos->frequency * 1000, true);
+ if (IS_ERR(opp)) {
+ rcu_read_unlock();
+ dev_err(dvfs_info->dev,
+ "failed to find valid OPP for %u KHZ\n",
+ pos->frequency);
+ return PTR_ERR(opp);
+ }
+
+ freq = pos->frequency / 1000; /* In MHZ */
+ clk_div = ((freq / CPU_DIV_FREQ_MAX) & P0_7_CPUCLKDEV_MASK)
+ << P0_7_CPUCLKDEV_SHIFT;
+ clk_div |= ((freq / CPU_ATB_FREQ_MAX) & P0_7_ATBCLKDEV_MASK)
+ << P0_7_ATBCLKDEV_SHIFT;
+ clk_div |= ((freq / CPU_DBG_FREQ_MAX) & P0_7_CSCLKDEV_MASK)
+ << P0_7_CSCLKDEV_SHIFT;
+
+ /* Calculate EMA */
+ volt_id = dev_pm_opp_get_voltage(opp);
+ volt_id = (MAX_VOLTAGE - volt_id) / VOLTAGE_STEP;
+ if (volt_id < PMIC_HIGH_VOLT) {
+ ema_div = (CPUEMA_HIGH << P0_7_CPUEMA_SHIFT) |
+ (L2EMA_HIGH << P0_7_L2EMA_SHIFT);
+ } else if (volt_id > PMIC_LOW_VOLT) {
+ ema_div = (CPUEMA_LOW << P0_7_CPUEMA_SHIFT) |
+ (L2EMA_LOW << P0_7_L2EMA_SHIFT);
+ } else {
+ ema_div = (CPUEMA_MID << P0_7_CPUEMA_SHIFT) |
+ (L2EMA_MID << P0_7_L2EMA_SHIFT);
+ }
+
+ tmp = (clk_div | ema_div | (volt_id << P0_7_VDD_SHIFT)
+ | ((freq / FREQ_UNIT) << P0_7_FREQ_SHIFT));
+
+ __raw_writel(tmp, dvfs_info->base + XMU_PMU_P0_7 + 4 *
+ (pos - freq_tbl));
+ }
+
+ rcu_read_unlock();
+ return 0;
+}
+
+static void exynos_enable_dvfs(unsigned int cur_frequency)
+{
+ unsigned int tmp, cpu;
+ struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
+ struct cpufreq_frequency_table *pos;
+ /* Disable DVFS */
+ __raw_writel(0, dvfs_info->base + XMU_DVFS_CTRL);
+
+ /* Enable PSTATE Change Event */
+ tmp = __raw_readl(dvfs_info->base + XMU_PMUEVTEN);
+ tmp |= (1 << PSTATE_CHANGED_EVTEN_SHIFT);
+ __raw_writel(tmp, dvfs_info->base + XMU_PMUEVTEN);
+
+ /* Enable PSTATE Change IRQ */
+ tmp = __raw_readl(dvfs_info->base + XMU_PMUIRQEN);
+ tmp |= (1 << PSTATE_CHANGED_IRQEN_SHIFT);
+ __raw_writel(tmp, dvfs_info->base + XMU_PMUIRQEN);
+
+ /* Set initial performance index */
+ cpufreq_for_each_entry(pos, freq_table)
+ if (pos->frequency == cur_frequency)
+ break;
+
+ if (pos->frequency == CPUFREQ_TABLE_END) {
+ dev_crit(dvfs_info->dev, "Boot up frequency not supported\n");
+ /* Assign the highest frequency */
+ pos = freq_table;
+ cur_frequency = pos->frequency;
+ }
+
+ dev_info(dvfs_info->dev, "Setting dvfs initial frequency = %uKHZ",
+ cur_frequency);
+
+ for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++) {
+ tmp = __raw_readl(dvfs_info->base + XMU_C0_3_PSTATE + cpu * 4);
+ tmp &= ~(P_VALUE_MASK << C0_3_PSTATE_NEW_SHIFT);
+ tmp |= ((pos - freq_table) << C0_3_PSTATE_NEW_SHIFT);
+ __raw_writel(tmp, dvfs_info->base + XMU_C0_3_PSTATE + cpu * 4);
+ }
+
+ /* Enable DVFS */
+ __raw_writel(1 << XMU_DVFS_CTRL_EN_SHIFT,
+ dvfs_info->base + XMU_DVFS_CTRL);
+}
+
+static int exynos_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ unsigned int tmp;
+ int i;
+ struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
+
+ mutex_lock(&cpufreq_lock);
+
+ freqs.old = policy->cur;
+ freqs.new = freq_table[index].frequency;
+
+ cpufreq_freq_transition_begin(policy, &freqs);
+
+ /* Set the target frequency in all C0_3_PSTATE register */
+ for_each_cpu(i, policy->cpus) {
+ tmp = __raw_readl(dvfs_info->base + XMU_C0_3_PSTATE + i * 4);
+ tmp &= ~(P_VALUE_MASK << C0_3_PSTATE_NEW_SHIFT);
+ tmp |= (index << C0_3_PSTATE_NEW_SHIFT);
+
+ __raw_writel(tmp, dvfs_info->base + XMU_C0_3_PSTATE + i * 4);
+ }
+ mutex_unlock(&cpufreq_lock);
+ return 0;
+}
+
+static void exynos_cpufreq_work(struct work_struct *work)
+{
+ unsigned int cur_pstate, index;
+ struct cpufreq_policy *policy = cpufreq_cpu_get(0); /* boot CPU */
+ struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
+
+ /* Ensure we can access cpufreq structures */
+ if (unlikely(dvfs_info->dvfs_enabled == false))
+ goto skip_work;
+
+ mutex_lock(&cpufreq_lock);
+ freqs.old = policy->cur;
+
+ cur_pstate = __raw_readl(dvfs_info->base + XMU_P_STATUS);
+ if (cur_pstate >> C0_3_PSTATE_VALID_SHIFT & 0x1)
+ index = (cur_pstate >> C0_3_PSTATE_CURR_SHIFT) & P_VALUE_MASK;
+ else
+ index = (cur_pstate >> C0_3_PSTATE_NEW_SHIFT) & P_VALUE_MASK;
+
+ if (likely(index < dvfs_info->freq_count)) {
+ freqs.new = freq_table[index].frequency;
+ } else {
+ dev_crit(dvfs_info->dev, "New frequency out of range\n");
+ freqs.new = freqs.old;
+ }
+ cpufreq_freq_transition_end(policy, &freqs, 0);
+
+ cpufreq_cpu_put(policy);
+ mutex_unlock(&cpufreq_lock);
+skip_work:
+ enable_irq(dvfs_info->irq);
+}
+
+static irqreturn_t exynos_cpufreq_irq(int irq, void *id)
+{
+ unsigned int tmp;
+
+ tmp = __raw_readl(dvfs_info->base + XMU_PMUIRQ);
+ if (tmp >> PSTATE_CHANGED_SHIFT & 0x1) {
+ __raw_writel(tmp, dvfs_info->base + XMU_PMUIRQ);
+ disable_irq_nosync(irq);
+ schedule_work(&dvfs_info->irq_work);
+ }
+ return IRQ_HANDLED;
+}
+
+static void exynos_sort_descend_freq_table(void)
+{
+ struct cpufreq_frequency_table *freq_tbl = dvfs_info->freq_table;
+ int i = 0, index;
+ unsigned int tmp_freq;
+ /*
+ * Exynos5440 clock controller state logic expects the cpufreq table to
+ * be in descending order. But the OPP library constructs the table in
+ * ascending order. So to make the table descending we just need to
+ * swap the i element with the N - i element.
+ */
+ for (i = 0; i < dvfs_info->freq_count / 2; i++) {
+ index = dvfs_info->freq_count - i - 1;
+ tmp_freq = freq_tbl[i].frequency;
+ freq_tbl[i].frequency = freq_tbl[index].frequency;
+ freq_tbl[index].frequency = tmp_freq;
+ }
+}
+
+static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ policy->clk = dvfs_info->cpu_clk;
+ return cpufreq_generic_init(policy, dvfs_info->freq_table,
+ dvfs_info->latency);
+}
+
+static struct cpufreq_driver exynos_driver = {
+ .flags = CPUFREQ_STICKY | CPUFREQ_ASYNC_NOTIFICATION |
+ CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = exynos_target,
+ .get = cpufreq_generic_get,
+ .init = exynos_cpufreq_cpu_init,
+ .name = CPUFREQ_NAME,
+ .attr = cpufreq_generic_attr,
+};
+
+static const struct of_device_id exynos_cpufreq_match[] = {
+ {
+ .compatible = "samsung,exynos5440-cpufreq",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, exynos_cpufreq_match);
+
+static int exynos_cpufreq_probe(struct platform_device *pdev)
+{
+ int ret = -EINVAL;
+ struct device_node *np;
+ struct resource res;
+ unsigned int cur_frequency;
+
+ np = pdev->dev.of_node;
+ if (!np)
+ return -ENODEV;
+
+ dvfs_info = devm_kzalloc(&pdev->dev, sizeof(*dvfs_info), GFP_KERNEL);
+ if (!dvfs_info) {
+ ret = -ENOMEM;
+ goto err_put_node;
+ }
+
+ dvfs_info->dev = &pdev->dev;
+
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret)
+ goto err_put_node;
+
+ dvfs_info->base = devm_ioremap_resource(dvfs_info->dev, &res);
+ if (IS_ERR(dvfs_info->base)) {
+ ret = PTR_ERR(dvfs_info->base);
+ goto err_put_node;
+ }
+
+ dvfs_info->irq = irq_of_parse_and_map(np, 0);
+ if (!dvfs_info->irq) {
+ dev_err(dvfs_info->dev, "No cpufreq irq found\n");
+ ret = -ENODEV;
+ goto err_put_node;
+ }
+
+ ret = of_init_opp_table(dvfs_info->dev);
+ if (ret) {
+ dev_err(dvfs_info->dev, "failed to init OPP table: %d\n", ret);
+ goto err_put_node;
+ }
+
+ ret = dev_pm_opp_init_cpufreq_table(dvfs_info->dev,
+ &dvfs_info->freq_table);
+ if (ret) {
+ dev_err(dvfs_info->dev,
+ "failed to init cpufreq table: %d\n", ret);
+ goto err_free_opp;
+ }
+ dvfs_info->freq_count = dev_pm_opp_get_opp_count(dvfs_info->dev);
+ exynos_sort_descend_freq_table();
+
+ if (of_property_read_u32(np, "clock-latency", &dvfs_info->latency))
+ dvfs_info->latency = DEF_TRANS_LATENCY;
+
+ dvfs_info->cpu_clk = devm_clk_get(dvfs_info->dev, "armclk");
+ if (IS_ERR(dvfs_info->cpu_clk)) {
+ dev_err(dvfs_info->dev, "Failed to get cpu clock\n");
+ ret = PTR_ERR(dvfs_info->cpu_clk);
+ goto err_free_table;
+ }
+
+ cur_frequency = clk_get_rate(dvfs_info->cpu_clk);
+ if (!cur_frequency) {
+ dev_err(dvfs_info->dev, "Failed to get clock rate\n");
+ ret = -EINVAL;
+ goto err_free_table;
+ }
+ cur_frequency /= 1000;
+
+ INIT_WORK(&dvfs_info->irq_work, exynos_cpufreq_work);
+ ret = devm_request_irq(dvfs_info->dev, dvfs_info->irq,
+ exynos_cpufreq_irq, IRQF_TRIGGER_NONE,
+ CPUFREQ_NAME, dvfs_info);
+ if (ret) {
+ dev_err(dvfs_info->dev, "Failed to register IRQ\n");
+ goto err_free_table;
+ }
+
+ ret = init_div_table();
+ if (ret) {
+ dev_err(dvfs_info->dev, "Failed to initialise div table\n");
+ goto err_free_table;
+ }
+
+ exynos_enable_dvfs(cur_frequency);
+ ret = cpufreq_register_driver(&exynos_driver);
+ if (ret) {
+ dev_err(dvfs_info->dev,
+ "%s: failed to register cpufreq driver\n", __func__);
+ goto err_free_table;
+ }
+
+ of_node_put(np);
+ dvfs_info->dvfs_enabled = true;
+ return 0;
+
+err_free_table:
+ dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
+err_free_opp:
+ of_free_opp_table(dvfs_info->dev);
+err_put_node:
+ of_node_put(np);
+ dev_err(&pdev->dev, "%s: failed initialization\n", __func__);
+ return ret;
+}
+
+static int exynos_cpufreq_remove(struct platform_device *pdev)
+{
+ cpufreq_unregister_driver(&exynos_driver);
+ dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
+ of_free_opp_table(dvfs_info->dev);
+ return 0;
+}
+
+static struct platform_driver exynos_cpufreq_platdrv = {
+ .driver = {
+ .name = "exynos5440-cpufreq",
+ .of_match_table = exynos_cpufreq_match,
+ },
+ .probe = exynos_cpufreq_probe,
+ .remove = exynos_cpufreq_remove,
+};
+module_platform_driver(exynos_cpufreq_platdrv);
+
+MODULE_AUTHOR("Amit Daniel Kachhap <amit.daniel@samsung.com>");
+MODULE_DESCRIPTION("Exynos5440 cpufreq driver");
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/cpufreq/freq_table.c b/kernel/drivers/cpufreq/freq_table.c
new file mode 100644
index 000000000..df14766a8
--- /dev/null
+++ b/kernel/drivers/cpufreq/freq_table.c
@@ -0,0 +1,311 @@
+/*
+ * linux/drivers/cpufreq/freq_table.c
+ *
+ * Copyright (C) 2002 - 2003 Dominik Brodowski
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+
+/*********************************************************************
+ * FREQUENCY TABLE HELPERS *
+ *********************************************************************/
+
+int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
+ struct cpufreq_frequency_table *table)
+{
+ struct cpufreq_frequency_table *pos;
+ unsigned int min_freq = ~0;
+ unsigned int max_freq = 0;
+ unsigned int freq;
+
+ cpufreq_for_each_valid_entry(pos, table) {
+ freq = pos->frequency;
+
+ if (!cpufreq_boost_enabled()
+ && (pos->flags & CPUFREQ_BOOST_FREQ))
+ continue;
+
+ pr_debug("table entry %u: %u kHz\n", (int)(pos - table), freq);
+ if (freq < min_freq)
+ min_freq = freq;
+ if (freq > max_freq)
+ max_freq = freq;
+ }
+
+ policy->min = policy->cpuinfo.min_freq = min_freq;
+ policy->max = policy->cpuinfo.max_freq = max_freq;
+
+ if (policy->min == ~0)
+ return -EINVAL;
+ else
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cpufreq_frequency_table_cpuinfo);
+
+
+int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
+ struct cpufreq_frequency_table *table)
+{
+ struct cpufreq_frequency_table *pos;
+ unsigned int freq, next_larger = ~0;
+ bool found = false;
+
+ pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n",
+ policy->min, policy->max, policy->cpu);
+
+ cpufreq_verify_within_cpu_limits(policy);
+
+ cpufreq_for_each_valid_entry(pos, table) {
+ freq = pos->frequency;
+
+ if ((freq >= policy->min) && (freq <= policy->max)) {
+ found = true;
+ break;
+ }
+
+ if ((next_larger > freq) && (freq > policy->max))
+ next_larger = freq;
+ }
+
+ if (!found) {
+ policy->max = next_larger;
+ cpufreq_verify_within_cpu_limits(policy);
+ }
+
+ pr_debug("verification lead to (%u - %u kHz) for cpu %u\n",
+ policy->min, policy->max, policy->cpu);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify);
+
+/*
+ * Generic routine to verify policy & frequency table, requires driver to set
+ * policy->freq_table prior to it.
+ */
+int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy)
+{
+ struct cpufreq_frequency_table *table =
+ cpufreq_frequency_get_table(policy->cpu);
+ if (!table)
+ return -ENODEV;
+
+ return cpufreq_frequency_table_verify(policy, table);
+}
+EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify);
+
+int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
+ struct cpufreq_frequency_table *table,
+ unsigned int target_freq,
+ unsigned int relation,
+ unsigned int *index)
+{
+ struct cpufreq_frequency_table optimal = {
+ .driver_data = ~0,
+ .frequency = 0,
+ };
+ struct cpufreq_frequency_table suboptimal = {
+ .driver_data = ~0,
+ .frequency = 0,
+ };
+ struct cpufreq_frequency_table *pos;
+ unsigned int freq, diff, i = 0;
+
+ pr_debug("request for target %u kHz (relation: %u) for cpu %u\n",
+ target_freq, relation, policy->cpu);
+
+ switch (relation) {
+ case CPUFREQ_RELATION_H:
+ suboptimal.frequency = ~0;
+ break;
+ case CPUFREQ_RELATION_L:
+ case CPUFREQ_RELATION_C:
+ optimal.frequency = ~0;
+ break;
+ }
+
+ cpufreq_for_each_valid_entry(pos, table) {
+ freq = pos->frequency;
+
+ i = pos - table;
+ if ((freq < policy->min) || (freq > policy->max))
+ continue;
+ if (freq == target_freq) {
+ optimal.driver_data = i;
+ break;
+ }
+ switch (relation) {
+ case CPUFREQ_RELATION_H:
+ if (freq < target_freq) {
+ if (freq >= optimal.frequency) {
+ optimal.frequency = freq;
+ optimal.driver_data = i;
+ }
+ } else {
+ if (freq <= suboptimal.frequency) {
+ suboptimal.frequency = freq;
+ suboptimal.driver_data = i;
+ }
+ }
+ break;
+ case CPUFREQ_RELATION_L:
+ if (freq > target_freq) {
+ if (freq <= optimal.frequency) {
+ optimal.frequency = freq;
+ optimal.driver_data = i;
+ }
+ } else {
+ if (freq >= suboptimal.frequency) {
+ suboptimal.frequency = freq;
+ suboptimal.driver_data = i;
+ }
+ }
+ break;
+ case CPUFREQ_RELATION_C:
+ diff = abs(freq - target_freq);
+ if (diff < optimal.frequency ||
+ (diff == optimal.frequency &&
+ freq > table[optimal.driver_data].frequency)) {
+ optimal.frequency = diff;
+ optimal.driver_data = i;
+ }
+ break;
+ }
+ }
+ if (optimal.driver_data > i) {
+ if (suboptimal.driver_data > i)
+ return -EINVAL;
+ *index = suboptimal.driver_data;
+ } else
+ *index = optimal.driver_data;
+
+ pr_debug("target index is %u, freq is:%u kHz\n", *index,
+ table[*index].frequency);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target);
+
+int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
+ unsigned int freq)
+{
+ struct cpufreq_frequency_table *pos, *table;
+
+ table = cpufreq_frequency_get_table(policy->cpu);
+ if (unlikely(!table)) {
+ pr_debug("%s: Unable to find frequency table\n", __func__);
+ return -ENOENT;
+ }
+
+ cpufreq_for_each_valid_entry(pos, table)
+ if (pos->frequency == freq)
+ return pos - table;
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_index);
+
+/**
+ * show_available_freqs - show available frequencies for the specified CPU
+ */
+static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf,
+ bool show_boost)
+{
+ ssize_t count = 0;
+ struct cpufreq_frequency_table *pos, *table = policy->freq_table;
+
+ if (!table)
+ return -ENODEV;
+
+ cpufreq_for_each_valid_entry(pos, table) {
+ /*
+ * show_boost = true and driver_data = BOOST freq
+ * display BOOST freqs
+ *
+ * show_boost = false and driver_data = BOOST freq
+ * show_boost = true and driver_data != BOOST freq
+ * continue - do not display anything
+ *
+ * show_boost = false and driver_data != BOOST freq
+ * display NON BOOST freqs
+ */
+ if (show_boost ^ (pos->flags & CPUFREQ_BOOST_FREQ))
+ continue;
+
+ count += sprintf(&buf[count], "%d ", pos->frequency);
+ }
+ count += sprintf(&buf[count], "\n");
+
+ return count;
+
+}
+
+#define cpufreq_attr_available_freq(_name) \
+struct freq_attr cpufreq_freq_attr_##_name##_freqs = \
+__ATTR_RO(_name##_frequencies)
+
+/**
+ * show_scaling_available_frequencies - show available normal frequencies for
+ * the specified CPU
+ */
+static ssize_t scaling_available_frequencies_show(struct cpufreq_policy *policy,
+ char *buf)
+{
+ return show_available_freqs(policy, buf, false);
+}
+cpufreq_attr_available_freq(scaling_available);
+EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs);
+
+/**
+ * show_available_boost_freqs - show available boost frequencies for
+ * the specified CPU
+ */
+static ssize_t scaling_boost_frequencies_show(struct cpufreq_policy *policy,
+ char *buf)
+{
+ return show_available_freqs(policy, buf, true);
+}
+cpufreq_attr_available_freq(scaling_boost);
+EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_boost_freqs);
+
+struct freq_attr *cpufreq_generic_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+#ifdef CONFIG_CPU_FREQ_BOOST_SW
+ &cpufreq_freq_attr_scaling_boost_freqs,
+#endif
+ NULL,
+};
+EXPORT_SYMBOL_GPL(cpufreq_generic_attr);
+
+int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
+ struct cpufreq_frequency_table *table)
+{
+ int ret = cpufreq_frequency_table_cpuinfo(policy, table);
+
+ if (!ret)
+ policy->freq_table = table;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpufreq_table_validate_and_show);
+
+struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu);
+
+struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
+ return policy ? policy->freq_table : NULL;
+}
+EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
+
+MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
+MODULE_DESCRIPTION("CPUfreq frequency table helpers");
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/cpufreq/gx-suspmod.c b/kernel/drivers/cpufreq/gx-suspmod.c
new file mode 100644
index 000000000..1d723dc88
--- /dev/null
+++ b/kernel/drivers/cpufreq/gx-suspmod.c
@@ -0,0 +1,502 @@
+/*
+ * Cyrix MediaGX and NatSemi Geode Suspend Modulation
+ * (C) 2002 Zwane Mwaikambo <zwane@commfireservices.com>
+ * (C) 2002 Hiroshi Miura <miura@da-cha.org>
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation
+ *
+ * The author(s) of this software shall not be held liable for damages
+ * of any nature resulting due to the use of this software. This
+ * software is provided AS-IS with no warranties.
+ *
+ * Theoretical note:
+ *
+ * (see Geode(tm) CS5530 manual (rev.4.1) page.56)
+ *
+ * CPU frequency control on NatSemi Geode GX1/GXLV processor and CS55x0
+ * are based on Suspend Modulation.
+ *
+ * Suspend Modulation works by asserting and de-asserting the SUSP# pin
+ * to CPU(GX1/GXLV) for configurable durations. When asserting SUSP#
+ * the CPU enters an idle state. GX1 stops its core clock when SUSP# is
+ * asserted then power consumption is reduced.
+ *
+ * Suspend Modulation's OFF/ON duration are configurable
+ * with 'Suspend Modulation OFF Count Register'
+ * and 'Suspend Modulation ON Count Register'.
+ * These registers are 8bit counters that represent the number of
+ * 32us intervals which the SUSP# pin is asserted(ON)/de-asserted(OFF)
+ * to the processor.
+ *
+ * These counters define a ratio which is the effective frequency
+ * of operation of the system.
+ *
+ * OFF Count
+ * F_eff = Fgx * ----------------------
+ * OFF Count + ON Count
+ *
+ * 0 <= On Count, Off Count <= 255
+ *
+ * From these limits, we can get register values
+ *
+ * off_duration + on_duration <= MAX_DURATION
+ * on_duration = off_duration * (stock_freq - freq) / freq
+ *
+ * off_duration = (freq * DURATION) / stock_freq
+ * on_duration = DURATION - off_duration
+ *
+ *
+ *---------------------------------------------------------------------------
+ *
+ * ChangeLog:
+ * Dec. 12, 2003 Hiroshi Miura <miura@da-cha.org>
+ * - fix on/off register mistake
+ * - fix cpu_khz calc when it stops cpu modulation.
+ *
+ * Dec. 11, 2002 Hiroshi Miura <miura@da-cha.org>
+ * - rewrite for Cyrix MediaGX Cx5510/5520 and
+ * NatSemi Geode Cs5530(A).
+ *
+ * Jul. ??, 2002 Zwane Mwaikambo <zwane@commfireservices.com>
+ * - cs5530_mod patch for 2.4.19-rc1.
+ *
+ *---------------------------------------------------------------------------
+ *
+ * Todo
+ * Test on machines with 5510, 5530, 5530A
+ */
+
+/************************************************************************
+ * Suspend Modulation - Definitions *
+ ************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/cpufreq.h>
+#include <linux/pci.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+
+#include <asm/cpu_device_id.h>
+#include <asm/processor-cyrix.h>
+
+/* PCI config registers, all at F0 */
+#define PCI_PMER1 0x80 /* power management enable register 1 */
+#define PCI_PMER2 0x81 /* power management enable register 2 */
+#define PCI_PMER3 0x82 /* power management enable register 3 */
+#define PCI_IRQTC 0x8c /* irq speedup timer counter register:typical 2 to 4ms */
+#define PCI_VIDTC 0x8d /* video speedup timer counter register: typical 50 to 100ms */
+#define PCI_MODOFF 0x94 /* suspend modulation OFF counter register, 1 = 32us */
+#define PCI_MODON 0x95 /* suspend modulation ON counter register */
+#define PCI_SUSCFG 0x96 /* suspend configuration register */
+
+/* PMER1 bits */
+#define GPM (1<<0) /* global power management */
+#define GIT (1<<1) /* globally enable PM device idle timers */
+#define GTR (1<<2) /* globally enable IO traps */
+#define IRQ_SPDUP (1<<3) /* disable clock throttle during interrupt handling */
+#define VID_SPDUP (1<<4) /* disable clock throttle during vga video handling */
+
+/* SUSCFG bits */
+#define SUSMOD (1<<0) /* enable/disable suspend modulation */
+/* the below is supported only with cs5530 (after rev.1.2)/cs5530A */
+#define SMISPDUP (1<<1) /* select how SMI re-enable suspend modulation: */
+ /* IRQTC timer or read SMI speedup disable reg.(F1BAR[08-09h]) */
+#define SUSCFG (1<<2) /* enable powering down a GXLV processor. "Special 3Volt Suspend" mode */
+/* the below is supported only with cs5530A */
+#define PWRSVE_ISA (1<<3) /* stop ISA clock */
+#define PWRSVE (1<<4) /* active idle */
+
+struct gxfreq_params {
+ u8 on_duration;
+ u8 off_duration;
+ u8 pci_suscfg;
+ u8 pci_pmer1;
+ u8 pci_pmer2;
+ struct pci_dev *cs55x0;
+};
+
+static struct gxfreq_params *gx_params;
+static int stock_freq;
+
+/* PCI bus clock - defaults to 30.000 if cpu_khz is not available */
+static int pci_busclk;
+module_param(pci_busclk, int, 0444);
+
+/* maximum duration for which the cpu may be suspended
+ * (32us * MAX_DURATION). If no parameter is given, this defaults
+ * to 255.
+ * Note that this leads to a maximum of 8 ms(!) where the CPU clock
+ * is suspended -- processing power is just 0.39% of what it used to be,
+ * though. 781.25 kHz(!) for a 200 MHz processor -- wow. */
+static int max_duration = 255;
+module_param(max_duration, int, 0444);
+
+/* For the default policy, we want at least some processing power
+ * - let's say 5%. (min = maxfreq / POLICY_MIN_DIV)
+ */
+#define POLICY_MIN_DIV 20
+
+
+/**
+ * we can detect a core multipiler from dir0_lsb
+ * from GX1 datasheet p.56,
+ * MULT[3:0]:
+ * 0000 = SYSCLK multiplied by 4 (test only)
+ * 0001 = SYSCLK multiplied by 10
+ * 0010 = SYSCLK multiplied by 4
+ * 0011 = SYSCLK multiplied by 6
+ * 0100 = SYSCLK multiplied by 9
+ * 0101 = SYSCLK multiplied by 5
+ * 0110 = SYSCLK multiplied by 7
+ * 0111 = SYSCLK multiplied by 8
+ * of 33.3MHz
+ **/
+static int gx_freq_mult[16] = {
+ 4, 10, 4, 6, 9, 5, 7, 8,
+ 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+
+/****************************************************************
+ * Low Level chipset interface *
+ ****************************************************************/
+static struct pci_device_id gx_chipset_tbl[] __initdata = {
+ { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY), },
+ { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5520), },
+ { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5510), },
+ { 0, },
+};
+MODULE_DEVICE_TABLE(pci, gx_chipset_tbl);
+
+static void gx_write_byte(int reg, int value)
+{
+ pci_write_config_byte(gx_params->cs55x0, reg, value);
+}
+
+/**
+ * gx_detect_chipset:
+ *
+ **/
+static struct pci_dev * __init gx_detect_chipset(void)
+{
+ struct pci_dev *gx_pci = NULL;
+
+ /* detect which companion chip is used */
+ for_each_pci_dev(gx_pci) {
+ if ((pci_match_id(gx_chipset_tbl, gx_pci)) != NULL)
+ return gx_pci;
+ }
+
+ pr_debug("error: no supported chipset found!\n");
+ return NULL;
+}
+
+/**
+ * gx_get_cpuspeed:
+ *
+ * Finds out at which efficient frequency the Cyrix MediaGX/NatSemi
+ * Geode CPU runs.
+ */
+static unsigned int gx_get_cpuspeed(unsigned int cpu)
+{
+ if ((gx_params->pci_suscfg & SUSMOD) == 0)
+ return stock_freq;
+
+ return (stock_freq * gx_params->off_duration)
+ / (gx_params->on_duration + gx_params->off_duration);
+}
+
+/**
+ * gx_validate_speed:
+ * determine current cpu speed
+ *
+ **/
+
+static unsigned int gx_validate_speed(unsigned int khz, u8 *on_duration,
+ u8 *off_duration)
+{
+ unsigned int i;
+ u8 tmp_on, tmp_off;
+ int old_tmp_freq = stock_freq;
+ int tmp_freq;
+
+ *off_duration = 1;
+ *on_duration = 0;
+
+ for (i = max_duration; i > 0; i--) {
+ tmp_off = ((khz * i) / stock_freq) & 0xff;
+ tmp_on = i - tmp_off;
+ tmp_freq = (stock_freq * tmp_off) / i;
+ /* if this relation is closer to khz, use this. If it's equal,
+ * prefer it, too - lower latency */
+ if (abs(tmp_freq - khz) <= abs(old_tmp_freq - khz)) {
+ *on_duration = tmp_on;
+ *off_duration = tmp_off;
+ old_tmp_freq = tmp_freq;
+ }
+ }
+
+ return old_tmp_freq;
+}
+
+
+/**
+ * gx_set_cpuspeed:
+ * set cpu speed in khz.
+ **/
+
+static void gx_set_cpuspeed(struct cpufreq_policy *policy, unsigned int khz)
+{
+ u8 suscfg, pmer1;
+ unsigned int new_khz;
+ unsigned long flags;
+ struct cpufreq_freqs freqs;
+
+ freqs.old = gx_get_cpuspeed(0);
+
+ new_khz = gx_validate_speed(khz, &gx_params->on_duration,
+ &gx_params->off_duration);
+
+ freqs.new = new_khz;
+
+ cpufreq_freq_transition_begin(policy, &freqs);
+ local_irq_save(flags);
+
+ if (new_khz != stock_freq) {
+ /* if new khz == 100% of CPU speed, it is special case */
+ switch (gx_params->cs55x0->device) {
+ case PCI_DEVICE_ID_CYRIX_5530_LEGACY:
+ pmer1 = gx_params->pci_pmer1 | IRQ_SPDUP | VID_SPDUP;
+ /* FIXME: need to test other values -- Zwane,Miura */
+ /* typical 2 to 4ms */
+ gx_write_byte(PCI_IRQTC, 4);
+ /* typical 50 to 100ms */
+ gx_write_byte(PCI_VIDTC, 100);
+ gx_write_byte(PCI_PMER1, pmer1);
+
+ if (gx_params->cs55x0->revision < 0x10) {
+ /* CS5530(rev 1.2, 1.3) */
+ suscfg = gx_params->pci_suscfg|SUSMOD;
+ } else {
+ /* CS5530A,B.. */
+ suscfg = gx_params->pci_suscfg|SUSMOD|PWRSVE;
+ }
+ break;
+ case PCI_DEVICE_ID_CYRIX_5520:
+ case PCI_DEVICE_ID_CYRIX_5510:
+ suscfg = gx_params->pci_suscfg | SUSMOD;
+ break;
+ default:
+ local_irq_restore(flags);
+ pr_debug("fatal: try to set unknown chipset.\n");
+ return;
+ }
+ } else {
+ suscfg = gx_params->pci_suscfg & ~(SUSMOD);
+ gx_params->off_duration = 0;
+ gx_params->on_duration = 0;
+ pr_debug("suspend modulation disabled: cpu runs 100%% speed.\n");
+ }
+
+ gx_write_byte(PCI_MODOFF, gx_params->off_duration);
+ gx_write_byte(PCI_MODON, gx_params->on_duration);
+
+ gx_write_byte(PCI_SUSCFG, suscfg);
+ pci_read_config_byte(gx_params->cs55x0, PCI_SUSCFG, &suscfg);
+
+ local_irq_restore(flags);
+
+ gx_params->pci_suscfg = suscfg;
+
+ cpufreq_freq_transition_end(policy, &freqs, 0);
+
+ pr_debug("suspend modulation w/ duration of ON:%d us, OFF:%d us\n",
+ gx_params->on_duration * 32, gx_params->off_duration * 32);
+ pr_debug("suspend modulation w/ clock speed: %d kHz.\n", freqs.new);
+}
+
+/****************************************************************
+ * High level functions *
+ ****************************************************************/
+
+/*
+ * cpufreq_gx_verify: test if frequency range is valid
+ *
+ * This function checks if a given frequency range in kHz is valid
+ * for the hardware supported by the driver.
+ */
+
+static int cpufreq_gx_verify(struct cpufreq_policy *policy)
+{
+ unsigned int tmp_freq = 0;
+ u8 tmp1, tmp2;
+
+ if (!stock_freq || !policy)
+ return -EINVAL;
+
+ policy->cpu = 0;
+ cpufreq_verify_within_limits(policy, (stock_freq / max_duration),
+ stock_freq);
+
+ /* it needs to be assured that at least one supported frequency is
+ * within policy->min and policy->max. If it is not, policy->max
+ * needs to be increased until one freuqency is supported.
+ * policy->min may not be decreased, though. This way we guarantee a
+ * specific processing capacity.
+ */
+ tmp_freq = gx_validate_speed(policy->min, &tmp1, &tmp2);
+ if (tmp_freq < policy->min)
+ tmp_freq += stock_freq / max_duration;
+ policy->min = tmp_freq;
+ if (policy->min > policy->max)
+ policy->max = tmp_freq;
+ tmp_freq = gx_validate_speed(policy->max, &tmp1, &tmp2);
+ if (tmp_freq > policy->max)
+ tmp_freq -= stock_freq / max_duration;
+ policy->max = tmp_freq;
+ if (policy->max < policy->min)
+ policy->max = policy->min;
+ cpufreq_verify_within_limits(policy, (stock_freq / max_duration),
+ stock_freq);
+
+ return 0;
+}
+
+/*
+ * cpufreq_gx_target:
+ *
+ */
+static int cpufreq_gx_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ u8 tmp1, tmp2;
+ unsigned int tmp_freq;
+
+ if (!stock_freq || !policy)
+ return -EINVAL;
+
+ policy->cpu = 0;
+
+ tmp_freq = gx_validate_speed(target_freq, &tmp1, &tmp2);
+ while (tmp_freq < policy->min) {
+ tmp_freq += stock_freq / max_duration;
+ tmp_freq = gx_validate_speed(tmp_freq, &tmp1, &tmp2);
+ }
+ while (tmp_freq > policy->max) {
+ tmp_freq -= stock_freq / max_duration;
+ tmp_freq = gx_validate_speed(tmp_freq, &tmp1, &tmp2);
+ }
+
+ gx_set_cpuspeed(policy, tmp_freq);
+
+ return 0;
+}
+
+static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy)
+{
+ unsigned int maxfreq;
+
+ if (!policy || policy->cpu != 0)
+ return -ENODEV;
+
+ /* determine maximum frequency */
+ if (pci_busclk)
+ maxfreq = pci_busclk * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f];
+ else if (cpu_khz)
+ maxfreq = cpu_khz;
+ else
+ maxfreq = 30000 * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f];
+
+ stock_freq = maxfreq;
+
+ pr_debug("cpu max frequency is %d.\n", maxfreq);
+
+ /* setup basic struct for cpufreq API */
+ policy->cpu = 0;
+
+ if (max_duration < POLICY_MIN_DIV)
+ policy->min = maxfreq / max_duration;
+ else
+ policy->min = maxfreq / POLICY_MIN_DIV;
+ policy->max = maxfreq;
+ policy->cpuinfo.min_freq = maxfreq / max_duration;
+ policy->cpuinfo.max_freq = maxfreq;
+ policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+
+ return 0;
+}
+
+/*
+ * cpufreq_gx_init:
+ * MediaGX/Geode GX initialize cpufreq driver
+ */
+static struct cpufreq_driver gx_suspmod_driver = {
+ .get = gx_get_cpuspeed,
+ .verify = cpufreq_gx_verify,
+ .target = cpufreq_gx_target,
+ .init = cpufreq_gx_cpu_init,
+ .name = "gx-suspmod",
+};
+
+static int __init cpufreq_gx_init(void)
+{
+ int ret;
+ struct gxfreq_params *params;
+ struct pci_dev *gx_pci;
+
+ /* Test if we have the right hardware */
+ gx_pci = gx_detect_chipset();
+ if (gx_pci == NULL)
+ return -ENODEV;
+
+ /* check whether module parameters are sane */
+ if (max_duration > 0xff)
+ max_duration = 0xff;
+
+ pr_debug("geode suspend modulation available.\n");
+
+ params = kzalloc(sizeof(*params), GFP_KERNEL);
+ if (params == NULL)
+ return -ENOMEM;
+
+ params->cs55x0 = gx_pci;
+ gx_params = params;
+
+ /* keep cs55x0 configurations */
+ pci_read_config_byte(params->cs55x0, PCI_SUSCFG, &(params->pci_suscfg));
+ pci_read_config_byte(params->cs55x0, PCI_PMER1, &(params->pci_pmer1));
+ pci_read_config_byte(params->cs55x0, PCI_PMER2, &(params->pci_pmer2));
+ pci_read_config_byte(params->cs55x0, PCI_MODON, &(params->on_duration));
+ pci_read_config_byte(params->cs55x0, PCI_MODOFF,
+ &(params->off_duration));
+
+ ret = cpufreq_register_driver(&gx_suspmod_driver);
+ if (ret) {
+ kfree(params);
+ return ret; /* register error! */
+ }
+
+ return 0;
+}
+
+static void __exit cpufreq_gx_exit(void)
+{
+ cpufreq_unregister_driver(&gx_suspmod_driver);
+ pci_dev_put(gx_params->cs55x0);
+ kfree(gx_params);
+}
+
+MODULE_AUTHOR("Hiroshi Miura <miura@da-cha.org>");
+MODULE_DESCRIPTION("Cpufreq driver for Cyrix MediaGX and NatSemi Geode");
+MODULE_LICENSE("GPL");
+
+module_init(cpufreq_gx_init);
+module_exit(cpufreq_gx_exit);
+
diff --git a/kernel/drivers/cpufreq/highbank-cpufreq.c b/kernel/drivers/cpufreq/highbank-cpufreq.c
new file mode 100644
index 000000000..1608f7105
--- /dev/null
+++ b/kernel/drivers/cpufreq/highbank-cpufreq.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2012 Calxeda, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This driver provides the clk notifier callbacks that are used when
+ * the cpufreq-dt driver changes to frequency to alert the highbank
+ * EnergyCore Management Engine (ECME) about the need to change
+ * voltage. The ECME interfaces with the actual voltage regulators.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/pl320-ipc.h>
+#include <linux/platform_device.h>
+
+#define HB_CPUFREQ_CHANGE_NOTE 0x80000001
+#define HB_CPUFREQ_IPC_LEN 7
+#define HB_CPUFREQ_VOLT_RETRIES 15
+
+static int hb_voltage_change(unsigned int freq)
+{
+ u32 msg[HB_CPUFREQ_IPC_LEN] = {HB_CPUFREQ_CHANGE_NOTE, freq / 1000000};
+
+ return pl320_ipc_transmit(msg);
+}
+
+static int hb_cpufreq_clk_notify(struct notifier_block *nb,
+ unsigned long action, void *hclk)
+{
+ struct clk_notifier_data *clk_data = hclk;
+ int i = 0;
+
+ if (action == PRE_RATE_CHANGE) {
+ if (clk_data->new_rate > clk_data->old_rate)
+ while (hb_voltage_change(clk_data->new_rate))
+ if (i++ > HB_CPUFREQ_VOLT_RETRIES)
+ return NOTIFY_BAD;
+ } else if (action == POST_RATE_CHANGE) {
+ if (clk_data->new_rate < clk_data->old_rate)
+ while (hb_voltage_change(clk_data->new_rate))
+ if (i++ > HB_CPUFREQ_VOLT_RETRIES)
+ return NOTIFY_BAD;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block hb_cpufreq_clk_nb = {
+ .notifier_call = hb_cpufreq_clk_notify,
+};
+
+static int hb_cpufreq_driver_init(void)
+{
+ struct platform_device_info devinfo = { .name = "cpufreq-dt", };
+ struct device *cpu_dev;
+ struct clk *cpu_clk;
+ struct device_node *np;
+ int ret;
+
+ if ((!of_machine_is_compatible("calxeda,highbank")) &&
+ (!of_machine_is_compatible("calxeda,ecx-2000")))
+ return -ENODEV;
+
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev) {
+ pr_err("failed to get highbank cpufreq device\n");
+ return -ENODEV;
+ }
+
+ np = of_node_get(cpu_dev->of_node);
+ if (!np) {
+ pr_err("failed to find highbank cpufreq node\n");
+ return -ENOENT;
+ }
+
+ cpu_clk = clk_get(cpu_dev, NULL);
+ if (IS_ERR(cpu_clk)) {
+ ret = PTR_ERR(cpu_clk);
+ pr_err("failed to get cpu0 clock: %d\n", ret);
+ goto out_put_node;
+ }
+
+ ret = clk_notifier_register(cpu_clk, &hb_cpufreq_clk_nb);
+ if (ret) {
+ pr_err("failed to register clk notifier: %d\n", ret);
+ goto out_put_node;
+ }
+
+ /* Instantiate cpufreq-dt */
+ platform_device_register_full(&devinfo);
+
+out_put_node:
+ of_node_put(np);
+ return ret;
+}
+module_init(hb_cpufreq_driver_init);
+
+MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@calxeda.com>");
+MODULE_DESCRIPTION("Calxeda Highbank cpufreq driver");
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/cpufreq/hisi-acpu-cpufreq.c b/kernel/drivers/cpufreq/hisi-acpu-cpufreq.c
new file mode 100644
index 000000000..026d5b222
--- /dev/null
+++ b/kernel/drivers/cpufreq/hisi-acpu-cpufreq.c
@@ -0,0 +1,42 @@
+/*
+ * Hisilicon Platforms Using ACPU CPUFreq Support
+ *
+ * Copyright (c) 2015 Hisilicon Limited.
+ * Copyright (c) 2015 Linaro Limited.
+ *
+ * Leo Yan <leo.yan@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+static int __init hisi_acpu_cpufreq_driver_init(void)
+{
+ struct platform_device *pdev;
+
+ if (!of_machine_is_compatible("hisilicon,hi6220"))
+ return -ENODEV;
+
+ pdev = platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
+ return PTR_ERR_OR_ZERO(pdev);
+}
+module_init(hisi_acpu_cpufreq_driver_init);
+
+MODULE_AUTHOR("Leo Yan <leo.yan@linaro.org>");
+MODULE_DESCRIPTION("Hisilicon acpu cpufreq driver");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/drivers/cpufreq/ia64-acpi-cpufreq.c b/kernel/drivers/cpufreq/ia64-acpi-cpufreq.c
new file mode 100644
index 000000000..c30aaa6a5
--- /dev/null
+++ b/kernel/drivers/cpufreq/ia64-acpi-cpufreq.c
@@ -0,0 +1,376 @@
+/*
+ * This file provides the ACPI based P-state support. This
+ * module works with generic cpufreq infrastructure. Most of
+ * the code is based on i386 version
+ * (arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c)
+ *
+ * Copyright (C) 2005 Intel Corp
+ * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/pal.h>
+
+#include <linux/acpi.h>
+#include <acpi/processor.h>
+
+MODULE_AUTHOR("Venkatesh Pallipadi");
+MODULE_DESCRIPTION("ACPI Processor P-States Driver");
+MODULE_LICENSE("GPL");
+
+
+struct cpufreq_acpi_io {
+ struct acpi_processor_performance acpi_data;
+ struct cpufreq_frequency_table *freq_table;
+ unsigned int resume;
+};
+
+static struct cpufreq_acpi_io *acpi_io_data[NR_CPUS];
+
+static struct cpufreq_driver acpi_cpufreq_driver;
+
+
+static int
+processor_set_pstate (
+ u32 value)
+{
+ s64 retval;
+
+ pr_debug("processor_set_pstate\n");
+
+ retval = ia64_pal_set_pstate((u64)value);
+
+ if (retval) {
+ pr_debug("Failed to set freq to 0x%x, with error 0x%lx\n",
+ value, retval);
+ return -ENODEV;
+ }
+ return (int)retval;
+}
+
+
+static int
+processor_get_pstate (
+ u32 *value)
+{
+ u64 pstate_index = 0;
+ s64 retval;
+
+ pr_debug("processor_get_pstate\n");
+
+ retval = ia64_pal_get_pstate(&pstate_index,
+ PAL_GET_PSTATE_TYPE_INSTANT);
+ *value = (u32) pstate_index;
+
+ if (retval)
+ pr_debug("Failed to get current freq with "
+ "error 0x%lx, idx 0x%x\n", retval, *value);
+
+ return (int)retval;
+}
+
+
+/* To be used only after data->acpi_data is initialized */
+static unsigned
+extract_clock (
+ struct cpufreq_acpi_io *data,
+ unsigned value,
+ unsigned int cpu)
+{
+ unsigned long i;
+
+ pr_debug("extract_clock\n");
+
+ for (i = 0; i < data->acpi_data.state_count; i++) {
+ if (value == data->acpi_data.states[i].status)
+ return data->acpi_data.states[i].core_frequency;
+ }
+ return data->acpi_data.states[i-1].core_frequency;
+}
+
+
+static unsigned int
+processor_get_freq (
+ struct cpufreq_acpi_io *data,
+ unsigned int cpu)
+{
+ int ret = 0;
+ u32 value = 0;
+ cpumask_t saved_mask;
+ unsigned long clock_freq;
+
+ pr_debug("processor_get_freq\n");
+
+ saved_mask = current->cpus_allowed;
+ set_cpus_allowed_ptr(current, cpumask_of(cpu));
+ if (smp_processor_id() != cpu)
+ goto migrate_end;
+
+ /* processor_get_pstate gets the instantaneous frequency */
+ ret = processor_get_pstate(&value);
+
+ if (ret) {
+ set_cpus_allowed_ptr(current, &saved_mask);
+ printk(KERN_WARNING "get performance failed with error %d\n",
+ ret);
+ ret = 0;
+ goto migrate_end;
+ }
+ clock_freq = extract_clock(data, value, cpu);
+ ret = (clock_freq*1000);
+
+migrate_end:
+ set_cpus_allowed_ptr(current, &saved_mask);
+ return ret;
+}
+
+
+static int
+processor_set_freq (
+ struct cpufreq_acpi_io *data,
+ struct cpufreq_policy *policy,
+ int state)
+{
+ int ret = 0;
+ u32 value = 0;
+ cpumask_t saved_mask;
+ int retval;
+
+ pr_debug("processor_set_freq\n");
+
+ saved_mask = current->cpus_allowed;
+ set_cpus_allowed_ptr(current, cpumask_of(policy->cpu));
+ if (smp_processor_id() != policy->cpu) {
+ retval = -EAGAIN;
+ goto migrate_end;
+ }
+
+ if (state == data->acpi_data.state) {
+ if (unlikely(data->resume)) {
+ pr_debug("Called after resume, resetting to P%d\n", state);
+ data->resume = 0;
+ } else {
+ pr_debug("Already at target state (P%d)\n", state);
+ retval = 0;
+ goto migrate_end;
+ }
+ }
+
+ pr_debug("Transitioning from P%d to P%d\n",
+ data->acpi_data.state, state);
+
+ /*
+ * First we write the target state's 'control' value to the
+ * control_register.
+ */
+
+ value = (u32) data->acpi_data.states[state].control;
+
+ pr_debug("Transitioning to state: 0x%08x\n", value);
+
+ ret = processor_set_pstate(value);
+ if (ret) {
+ printk(KERN_WARNING "Transition failed with error %d\n", ret);
+ retval = -ENODEV;
+ goto migrate_end;
+ }
+
+ data->acpi_data.state = state;
+
+ retval = 0;
+
+migrate_end:
+ set_cpus_allowed_ptr(current, &saved_mask);
+ return (retval);
+}
+
+
+static unsigned int
+acpi_cpufreq_get (
+ unsigned int cpu)
+{
+ struct cpufreq_acpi_io *data = acpi_io_data[cpu];
+
+ pr_debug("acpi_cpufreq_get\n");
+
+ return processor_get_freq(data, cpu);
+}
+
+
+static int
+acpi_cpufreq_target (
+ struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ return processor_set_freq(acpi_io_data[policy->cpu], policy, index);
+}
+
+static int
+acpi_cpufreq_cpu_init (
+ struct cpufreq_policy *policy)
+{
+ unsigned int i;
+ unsigned int cpu = policy->cpu;
+ struct cpufreq_acpi_io *data;
+ unsigned int result = 0;
+
+ pr_debug("acpi_cpufreq_cpu_init\n");
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return (-ENOMEM);
+
+ acpi_io_data[cpu] = data;
+
+ result = acpi_processor_register_performance(&data->acpi_data, cpu);
+
+ if (result)
+ goto err_free;
+
+ /* capability check */
+ if (data->acpi_data.state_count <= 1) {
+ pr_debug("No P-States\n");
+ result = -ENODEV;
+ goto err_unreg;
+ }
+
+ if ((data->acpi_data.control_register.space_id !=
+ ACPI_ADR_SPACE_FIXED_HARDWARE) ||
+ (data->acpi_data.status_register.space_id !=
+ ACPI_ADR_SPACE_FIXED_HARDWARE)) {
+ pr_debug("Unsupported address space [%d, %d]\n",
+ (u32) (data->acpi_data.control_register.space_id),
+ (u32) (data->acpi_data.status_register.space_id));
+ result = -ENODEV;
+ goto err_unreg;
+ }
+
+ /* alloc freq_table */
+ data->freq_table = kzalloc(sizeof(*data->freq_table) *
+ (data->acpi_data.state_count + 1),
+ GFP_KERNEL);
+ if (!data->freq_table) {
+ result = -ENOMEM;
+ goto err_unreg;
+ }
+
+ /* detect transition latency */
+ policy->cpuinfo.transition_latency = 0;
+ for (i=0; i<data->acpi_data.state_count; i++) {
+ if ((data->acpi_data.states[i].transition_latency * 1000) >
+ policy->cpuinfo.transition_latency) {
+ policy->cpuinfo.transition_latency =
+ data->acpi_data.states[i].transition_latency * 1000;
+ }
+ }
+
+ /* table init */
+ for (i = 0; i <= data->acpi_data.state_count; i++)
+ {
+ if (i < data->acpi_data.state_count) {
+ data->freq_table[i].frequency =
+ data->acpi_data.states[i].core_frequency * 1000;
+ } else {
+ data->freq_table[i].frequency = CPUFREQ_TABLE_END;
+ }
+ }
+
+ result = cpufreq_table_validate_and_show(policy, data->freq_table);
+ if (result) {
+ goto err_freqfree;
+ }
+
+ /* notify BIOS that we exist */
+ acpi_processor_notify_smm(THIS_MODULE);
+
+ printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management "
+ "activated.\n", cpu);
+
+ for (i = 0; i < data->acpi_data.state_count; i++)
+ pr_debug(" %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n",
+ (i == data->acpi_data.state?'*':' '), i,
+ (u32) data->acpi_data.states[i].core_frequency,
+ (u32) data->acpi_data.states[i].power,
+ (u32) data->acpi_data.states[i].transition_latency,
+ (u32) data->acpi_data.states[i].bus_master_latency,
+ (u32) data->acpi_data.states[i].status,
+ (u32) data->acpi_data.states[i].control);
+
+ /* the first call to ->target() should result in us actually
+ * writing something to the appropriate registers. */
+ data->resume = 1;
+
+ return (result);
+
+ err_freqfree:
+ kfree(data->freq_table);
+ err_unreg:
+ acpi_processor_unregister_performance(&data->acpi_data, cpu);
+ err_free:
+ kfree(data);
+ acpi_io_data[cpu] = NULL;
+
+ return (result);
+}
+
+
+static int
+acpi_cpufreq_cpu_exit (
+ struct cpufreq_policy *policy)
+{
+ struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
+
+ pr_debug("acpi_cpufreq_cpu_exit\n");
+
+ if (data) {
+ acpi_io_data[policy->cpu] = NULL;
+ acpi_processor_unregister_performance(&data->acpi_data,
+ policy->cpu);
+ kfree(data);
+ }
+
+ return (0);
+}
+
+
+static struct cpufreq_driver acpi_cpufreq_driver = {
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = acpi_cpufreq_target,
+ .get = acpi_cpufreq_get,
+ .init = acpi_cpufreq_cpu_init,
+ .exit = acpi_cpufreq_cpu_exit,
+ .name = "acpi-cpufreq",
+ .attr = cpufreq_generic_attr,
+};
+
+
+static int __init
+acpi_cpufreq_init (void)
+{
+ pr_debug("acpi_cpufreq_init\n");
+
+ return cpufreq_register_driver(&acpi_cpufreq_driver);
+}
+
+
+static void __exit
+acpi_cpufreq_exit (void)
+{
+ pr_debug("acpi_cpufreq_exit\n");
+
+ cpufreq_unregister_driver(&acpi_cpufreq_driver);
+ return;
+}
+
+
+late_initcall(acpi_cpufreq_init);
+module_exit(acpi_cpufreq_exit);
+
diff --git a/kernel/drivers/cpufreq/imx6q-cpufreq.c b/kernel/drivers/cpufreq/imx6q-cpufreq.c
new file mode 100644
index 000000000..380a90d3c
--- /dev/null
+++ b/kernel/drivers/cpufreq/imx6q-cpufreq.c
@@ -0,0 +1,368 @@
+/*
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm_opp.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#define PU_SOC_VOLTAGE_NORMAL 1250000
+#define PU_SOC_VOLTAGE_HIGH 1275000
+#define FREQ_1P2_GHZ 1200000000
+
+static struct regulator *arm_reg;
+static struct regulator *pu_reg;
+static struct regulator *soc_reg;
+
+static struct clk *arm_clk;
+static struct clk *pll1_sys_clk;
+static struct clk *pll1_sw_clk;
+static struct clk *step_clk;
+static struct clk *pll2_pfd2_396m_clk;
+
+static struct device *cpu_dev;
+static bool free_opp;
+static struct cpufreq_frequency_table *freq_table;
+static unsigned int transition_latency;
+
+static u32 *imx6_soc_volt;
+static u32 soc_opp_count;
+
+static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ struct dev_pm_opp *opp;
+ unsigned long freq_hz, volt, volt_old;
+ unsigned int old_freq, new_freq;
+ int ret;
+
+ new_freq = freq_table[index].frequency;
+ freq_hz = new_freq * 1000;
+ old_freq = clk_get_rate(arm_clk) / 1000;
+
+ rcu_read_lock();
+ opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz);
+ if (IS_ERR(opp)) {
+ rcu_read_unlock();
+ dev_err(cpu_dev, "failed to find OPP for %ld\n", freq_hz);
+ return PTR_ERR(opp);
+ }
+
+ volt = dev_pm_opp_get_voltage(opp);
+ rcu_read_unlock();
+ volt_old = regulator_get_voltage(arm_reg);
+
+ dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n",
+ old_freq / 1000, volt_old / 1000,
+ new_freq / 1000, volt / 1000);
+
+ /* scaling up? scale voltage before frequency */
+ if (new_freq > old_freq) {
+ if (!IS_ERR(pu_reg)) {
+ ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0);
+ if (ret) {
+ dev_err(cpu_dev, "failed to scale vddpu up: %d\n", ret);
+ return ret;
+ }
+ }
+ ret = regulator_set_voltage_tol(soc_reg, imx6_soc_volt[index], 0);
+ if (ret) {
+ dev_err(cpu_dev, "failed to scale vddsoc up: %d\n", ret);
+ return ret;
+ }
+ ret = regulator_set_voltage_tol(arm_reg, volt, 0);
+ if (ret) {
+ dev_err(cpu_dev,
+ "failed to scale vddarm up: %d\n", ret);
+ return ret;
+ }
+ }
+
+ /*
+ * The setpoints are selected per PLL/PDF frequencies, so we need to
+ * reprogram PLL for frequency scaling. The procedure of reprogramming
+ * PLL1 is as below.
+ *
+ * - Enable pll2_pfd2_396m_clk and reparent pll1_sw_clk to it
+ * - Reprogram pll1_sys_clk and reparent pll1_sw_clk back to it
+ * - Disable pll2_pfd2_396m_clk
+ */
+ clk_set_parent(step_clk, pll2_pfd2_396m_clk);
+ clk_set_parent(pll1_sw_clk, step_clk);
+ if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) {
+ clk_set_rate(pll1_sys_clk, new_freq * 1000);
+ clk_set_parent(pll1_sw_clk, pll1_sys_clk);
+ }
+
+ /* Ensure the arm clock divider is what we expect */
+ ret = clk_set_rate(arm_clk, new_freq * 1000);
+ if (ret) {
+ dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
+ regulator_set_voltage_tol(arm_reg, volt_old, 0);
+ return ret;
+ }
+
+ /* scaling down? scale voltage after frequency */
+ if (new_freq < old_freq) {
+ ret = regulator_set_voltage_tol(arm_reg, volt, 0);
+ if (ret) {
+ dev_warn(cpu_dev,
+ "failed to scale vddarm down: %d\n", ret);
+ ret = 0;
+ }
+ ret = regulator_set_voltage_tol(soc_reg, imx6_soc_volt[index], 0);
+ if (ret) {
+ dev_warn(cpu_dev, "failed to scale vddsoc down: %d\n", ret);
+ ret = 0;
+ }
+ if (!IS_ERR(pu_reg)) {
+ ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0);
+ if (ret) {
+ dev_warn(cpu_dev, "failed to scale vddpu down: %d\n", ret);
+ ret = 0;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
+{
+ policy->clk = arm_clk;
+ return cpufreq_generic_init(policy, freq_table, transition_latency);
+}
+
+static struct cpufreq_driver imx6q_cpufreq_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = imx6q_set_target,
+ .get = cpufreq_generic_get,
+ .init = imx6q_cpufreq_init,
+ .name = "imx6q-cpufreq",
+ .attr = cpufreq_generic_attr,
+};
+
+static int imx6q_cpufreq_probe(struct platform_device *pdev)
+{
+ struct device_node *np;
+ struct dev_pm_opp *opp;
+ unsigned long min_volt, max_volt;
+ int num, ret;
+ const struct property *prop;
+ const __be32 *val;
+ u32 nr, i, j;
+
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev) {
+ pr_err("failed to get cpu0 device\n");
+ return -ENODEV;
+ }
+
+ np = of_node_get(cpu_dev->of_node);
+ if (!np) {
+ dev_err(cpu_dev, "failed to find cpu0 node\n");
+ return -ENOENT;
+ }
+
+ arm_clk = clk_get(cpu_dev, "arm");
+ pll1_sys_clk = clk_get(cpu_dev, "pll1_sys");
+ pll1_sw_clk = clk_get(cpu_dev, "pll1_sw");
+ step_clk = clk_get(cpu_dev, "step");
+ pll2_pfd2_396m_clk = clk_get(cpu_dev, "pll2_pfd2_396m");
+ if (IS_ERR(arm_clk) || IS_ERR(pll1_sys_clk) || IS_ERR(pll1_sw_clk) ||
+ IS_ERR(step_clk) || IS_ERR(pll2_pfd2_396m_clk)) {
+ dev_err(cpu_dev, "failed to get clocks\n");
+ ret = -ENOENT;
+ goto put_clk;
+ }
+
+ arm_reg = regulator_get(cpu_dev, "arm");
+ pu_reg = regulator_get_optional(cpu_dev, "pu");
+ soc_reg = regulator_get(cpu_dev, "soc");
+ if (IS_ERR(arm_reg) || IS_ERR(soc_reg)) {
+ dev_err(cpu_dev, "failed to get regulators\n");
+ ret = -ENOENT;
+ goto put_reg;
+ }
+
+ /*
+ * We expect an OPP table supplied by platform.
+ * Just, incase the platform did not supply the OPP
+ * table, it will try to get it.
+ */
+ num = dev_pm_opp_get_opp_count(cpu_dev);
+ if (num < 0) {
+ ret = of_init_opp_table(cpu_dev);
+ if (ret < 0) {
+ dev_err(cpu_dev, "failed to init OPP table: %d\n", ret);
+ goto put_reg;
+ }
+
+ /* Because we have added the OPPs here, we must free them */
+ free_opp = true;
+
+ num = dev_pm_opp_get_opp_count(cpu_dev);
+ if (num < 0) {
+ ret = num;
+ dev_err(cpu_dev, "no OPP table is found: %d\n", ret);
+ goto out_free_opp;
+ }
+ }
+
+ ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
+ if (ret) {
+ dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
+ goto put_reg;
+ }
+
+ /* Make imx6_soc_volt array's size same as arm opp number */
+ imx6_soc_volt = devm_kzalloc(cpu_dev, sizeof(*imx6_soc_volt) * num, GFP_KERNEL);
+ if (imx6_soc_volt == NULL) {
+ ret = -ENOMEM;
+ goto free_freq_table;
+ }
+
+ prop = of_find_property(np, "fsl,soc-operating-points", NULL);
+ if (!prop || !prop->value)
+ goto soc_opp_out;
+
+ /*
+ * Each OPP is a set of tuples consisting of frequency and
+ * voltage like <freq-kHz vol-uV>.
+ */
+ nr = prop->length / sizeof(u32);
+ if (nr % 2 || (nr / 2) < num)
+ goto soc_opp_out;
+
+ for (j = 0; j < num; j++) {
+ val = prop->value;
+ for (i = 0; i < nr / 2; i++) {
+ unsigned long freq = be32_to_cpup(val++);
+ unsigned long volt = be32_to_cpup(val++);
+ if (freq_table[j].frequency == freq) {
+ imx6_soc_volt[soc_opp_count++] = volt;
+ break;
+ }
+ }
+ }
+
+soc_opp_out:
+ /* use fixed soc opp volt if no valid soc opp info found in dtb */
+ if (soc_opp_count != num) {
+ dev_warn(cpu_dev, "can NOT find valid fsl,soc-operating-points property in dtb, use default value!\n");
+ for (j = 0; j < num; j++)
+ imx6_soc_volt[j] = PU_SOC_VOLTAGE_NORMAL;
+ if (freq_table[num - 1].frequency * 1000 == FREQ_1P2_GHZ)
+ imx6_soc_volt[num - 1] = PU_SOC_VOLTAGE_HIGH;
+ }
+
+ if (of_property_read_u32(np, "clock-latency", &transition_latency))
+ transition_latency = CPUFREQ_ETERNAL;
+
+ /*
+ * Calculate the ramp time for max voltage change in the
+ * VDDSOC and VDDPU regulators.
+ */
+ ret = regulator_set_voltage_time(soc_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]);
+ if (ret > 0)
+ transition_latency += ret * 1000;
+ if (!IS_ERR(pu_reg)) {
+ ret = regulator_set_voltage_time(pu_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]);
+ if (ret > 0)
+ transition_latency += ret * 1000;
+ }
+
+ /*
+ * OPP is maintained in order of increasing frequency, and
+ * freq_table initialised from OPP is therefore sorted in the
+ * same order.
+ */
+ rcu_read_lock();
+ opp = dev_pm_opp_find_freq_exact(cpu_dev,
+ freq_table[0].frequency * 1000, true);
+ min_volt = dev_pm_opp_get_voltage(opp);
+ opp = dev_pm_opp_find_freq_exact(cpu_dev,
+ freq_table[--num].frequency * 1000, true);
+ max_volt = dev_pm_opp_get_voltage(opp);
+ rcu_read_unlock();
+ ret = regulator_set_voltage_time(arm_reg, min_volt, max_volt);
+ if (ret > 0)
+ transition_latency += ret * 1000;
+
+ ret = cpufreq_register_driver(&imx6q_cpufreq_driver);
+ if (ret) {
+ dev_err(cpu_dev, "failed register driver: %d\n", ret);
+ goto free_freq_table;
+ }
+
+ of_node_put(np);
+ return 0;
+
+free_freq_table:
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
+out_free_opp:
+ if (free_opp)
+ of_free_opp_table(cpu_dev);
+put_reg:
+ if (!IS_ERR(arm_reg))
+ regulator_put(arm_reg);
+ if (!IS_ERR(pu_reg))
+ regulator_put(pu_reg);
+ if (!IS_ERR(soc_reg))
+ regulator_put(soc_reg);
+put_clk:
+ if (!IS_ERR(arm_clk))
+ clk_put(arm_clk);
+ if (!IS_ERR(pll1_sys_clk))
+ clk_put(pll1_sys_clk);
+ if (!IS_ERR(pll1_sw_clk))
+ clk_put(pll1_sw_clk);
+ if (!IS_ERR(step_clk))
+ clk_put(step_clk);
+ if (!IS_ERR(pll2_pfd2_396m_clk))
+ clk_put(pll2_pfd2_396m_clk);
+ of_node_put(np);
+ return ret;
+}
+
+static int imx6q_cpufreq_remove(struct platform_device *pdev)
+{
+ cpufreq_unregister_driver(&imx6q_cpufreq_driver);
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
+ if (free_opp)
+ of_free_opp_table(cpu_dev);
+ regulator_put(arm_reg);
+ if (!IS_ERR(pu_reg))
+ regulator_put(pu_reg);
+ regulator_put(soc_reg);
+ clk_put(arm_clk);
+ clk_put(pll1_sys_clk);
+ clk_put(pll1_sw_clk);
+ clk_put(step_clk);
+ clk_put(pll2_pfd2_396m_clk);
+
+ return 0;
+}
+
+static struct platform_driver imx6q_cpufreq_platdrv = {
+ .driver = {
+ .name = "imx6q-cpufreq",
+ },
+ .probe = imx6q_cpufreq_probe,
+ .remove = imx6q_cpufreq_remove,
+};
+module_platform_driver(imx6q_cpufreq_platdrv);
+
+MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
+MODULE_DESCRIPTION("Freescale i.MX6Q cpufreq driver");
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/cpufreq/integrator-cpufreq.c b/kernel/drivers/cpufreq/integrator-cpufreq.c
new file mode 100644
index 000000000..129e266f7
--- /dev/null
+++ b/kernel/drivers/cpufreq/integrator-cpufreq.c
@@ -0,0 +1,239 @@
+/*
+ * Copyright (C) 2001-2002 Deep Blue Solutions Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * CPU support functions
+ */
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/cpufreq.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include <asm/mach-types.h>
+#include <asm/hardware/icst.h>
+
+static void __iomem *cm_base;
+/* The cpufreq driver only use the OSC register */
+#define INTEGRATOR_HDR_OSC_OFFSET 0x08
+#define INTEGRATOR_HDR_LOCK_OFFSET 0x14
+
+static struct cpufreq_driver integrator_driver;
+
+static const struct icst_params lclk_params = {
+ .ref = 24000000,
+ .vco_max = ICST525_VCO_MAX_5V,
+ .vco_min = ICST525_VCO_MIN,
+ .vd_min = 8,
+ .vd_max = 132,
+ .rd_min = 24,
+ .rd_max = 24,
+ .s2div = icst525_s2div,
+ .idx2s = icst525_idx2s,
+};
+
+static const struct icst_params cclk_params = {
+ .ref = 24000000,
+ .vco_max = ICST525_VCO_MAX_5V,
+ .vco_min = ICST525_VCO_MIN,
+ .vd_min = 12,
+ .vd_max = 160,
+ .rd_min = 24,
+ .rd_max = 24,
+ .s2div = icst525_s2div,
+ .idx2s = icst525_idx2s,
+};
+
+/*
+ * Validate the speed policy.
+ */
+static int integrator_verify_policy(struct cpufreq_policy *policy)
+{
+ struct icst_vco vco;
+
+ cpufreq_verify_within_cpu_limits(policy);
+
+ vco = icst_hz_to_vco(&cclk_params, policy->max * 1000);
+ policy->max = icst_hz(&cclk_params, vco) / 1000;
+
+ vco = icst_hz_to_vco(&cclk_params, policy->min * 1000);
+ policy->min = icst_hz(&cclk_params, vco) / 1000;
+
+ cpufreq_verify_within_cpu_limits(policy);
+ return 0;
+}
+
+
+static int integrator_set_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ cpumask_t cpus_allowed;
+ int cpu = policy->cpu;
+ struct icst_vco vco;
+ struct cpufreq_freqs freqs;
+ u_int cm_osc;
+
+ /*
+ * Save this threads cpus_allowed mask.
+ */
+ cpus_allowed = current->cpus_allowed;
+
+ /*
+ * Bind to the specified CPU. When this call returns,
+ * we should be running on the right CPU.
+ */
+ set_cpus_allowed_ptr(current, cpumask_of(cpu));
+ BUG_ON(cpu != smp_processor_id());
+
+ /* get current setting */
+ cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET);
+
+ if (machine_is_integrator()) {
+ vco.s = (cm_osc >> 8) & 7;
+ } else if (machine_is_cintegrator()) {
+ vco.s = 1;
+ }
+ vco.v = cm_osc & 255;
+ vco.r = 22;
+ freqs.old = icst_hz(&cclk_params, vco) / 1000;
+
+ /* icst_hz_to_vco rounds down -- so we need the next
+ * larger freq in case of CPUFREQ_RELATION_L.
+ */
+ if (relation == CPUFREQ_RELATION_L)
+ target_freq += 999;
+ if (target_freq > policy->max)
+ target_freq = policy->max;
+ vco = icst_hz_to_vco(&cclk_params, target_freq * 1000);
+ freqs.new = icst_hz(&cclk_params, vco) / 1000;
+
+ if (freqs.old == freqs.new) {
+ set_cpus_allowed_ptr(current, &cpus_allowed);
+ return 0;
+ }
+
+ cpufreq_freq_transition_begin(policy, &freqs);
+
+ cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET);
+
+ if (machine_is_integrator()) {
+ cm_osc &= 0xfffff800;
+ cm_osc |= vco.s << 8;
+ } else if (machine_is_cintegrator()) {
+ cm_osc &= 0xffffff00;
+ }
+ cm_osc |= vco.v;
+
+ __raw_writel(0xa05f, cm_base + INTEGRATOR_HDR_LOCK_OFFSET);
+ __raw_writel(cm_osc, cm_base + INTEGRATOR_HDR_OSC_OFFSET);
+ __raw_writel(0, cm_base + INTEGRATOR_HDR_LOCK_OFFSET);
+
+ /*
+ * Restore the CPUs allowed mask.
+ */
+ set_cpus_allowed_ptr(current, &cpus_allowed);
+
+ cpufreq_freq_transition_end(policy, &freqs, 0);
+
+ return 0;
+}
+
+static unsigned int integrator_get(unsigned int cpu)
+{
+ cpumask_t cpus_allowed;
+ unsigned int current_freq;
+ u_int cm_osc;
+ struct icst_vco vco;
+
+ cpus_allowed = current->cpus_allowed;
+
+ set_cpus_allowed_ptr(current, cpumask_of(cpu));
+ BUG_ON(cpu != smp_processor_id());
+
+ /* detect memory etc. */
+ cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET);
+
+ if (machine_is_integrator()) {
+ vco.s = (cm_osc >> 8) & 7;
+ } else {
+ vco.s = 1;
+ }
+ vco.v = cm_osc & 255;
+ vco.r = 22;
+
+ current_freq = icst_hz(&cclk_params, vco) / 1000; /* current freq */
+
+ set_cpus_allowed_ptr(current, &cpus_allowed);
+
+ return current_freq;
+}
+
+static int integrator_cpufreq_init(struct cpufreq_policy *policy)
+{
+
+ /* set default policy and cpuinfo */
+ policy->max = policy->cpuinfo.max_freq = 160000;
+ policy->min = policy->cpuinfo.min_freq = 12000;
+ policy->cpuinfo.transition_latency = 1000000; /* 1 ms, assumed */
+
+ return 0;
+}
+
+static struct cpufreq_driver integrator_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = integrator_verify_policy,
+ .target = integrator_set_target,
+ .get = integrator_get,
+ .init = integrator_cpufreq_init,
+ .name = "integrator",
+};
+
+static int __init integrator_cpufreq_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ cm_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!cm_base)
+ return -ENODEV;
+
+ return cpufreq_register_driver(&integrator_driver);
+}
+
+static int __exit integrator_cpufreq_remove(struct platform_device *pdev)
+{
+ return cpufreq_unregister_driver(&integrator_driver);
+}
+
+static const struct of_device_id integrator_cpufreq_match[] = {
+ { .compatible = "arm,core-module-integrator"},
+ { },
+};
+
+static struct platform_driver integrator_cpufreq_driver = {
+ .driver = {
+ .name = "integrator-cpufreq",
+ .of_match_table = integrator_cpufreq_match,
+ },
+ .remove = __exit_p(integrator_cpufreq_remove),
+};
+
+module_platform_driver_probe(integrator_cpufreq_driver,
+ integrator_cpufreq_probe);
+
+MODULE_AUTHOR ("Russell M. King");
+MODULE_DESCRIPTION ("cpufreq driver for ARM Integrator CPUs");
+MODULE_LICENSE ("GPL");
diff --git a/kernel/drivers/cpufreq/intel_pstate.c b/kernel/drivers/cpufreq/intel_pstate.c
new file mode 100644
index 000000000..c45d274a7
--- /dev/null
+++ b/kernel/drivers/cpufreq/intel_pstate.c
@@ -0,0 +1,1282 @@
+/*
+ * intel_pstate.c: Native P state management for Intel processors
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kernel_stat.h>
+#include <linux/module.h>
+#include <linux/ktime.h>
+#include <linux/hrtimer.h>
+#include <linux/tick.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/acpi.h>
+#include <trace/events/power.h>
+
+#include <asm/div64.h>
+#include <asm/msr.h>
+#include <asm/cpu_device_id.h>
+#include <asm/cpufeature.h>
+
+#define BYT_RATIOS 0x66a
+#define BYT_VIDS 0x66b
+#define BYT_TURBO_RATIOS 0x66c
+#define BYT_TURBO_VIDS 0x66d
+
+#define FRAC_BITS 8
+#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
+#define fp_toint(X) ((X) >> FRAC_BITS)
+
+
+static inline int32_t mul_fp(int32_t x, int32_t y)
+{
+ return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
+}
+
+static inline int32_t div_fp(int32_t x, int32_t y)
+{
+ return div_s64((int64_t)x << FRAC_BITS, y);
+}
+
+static inline int ceiling_fp(int32_t x)
+{
+ int mask, ret;
+
+ ret = fp_toint(x);
+ mask = (1 << FRAC_BITS) - 1;
+ if (x & mask)
+ ret += 1;
+ return ret;
+}
+
+struct sample {
+ int32_t core_pct_busy;
+ u64 aperf;
+ u64 mperf;
+ int freq;
+ ktime_t time;
+};
+
+struct pstate_data {
+ int current_pstate;
+ int min_pstate;
+ int max_pstate;
+ int scaling;
+ int turbo_pstate;
+};
+
+struct vid_data {
+ int min;
+ int max;
+ int turbo;
+ int32_t ratio;
+};
+
+struct _pid {
+ int setpoint;
+ int32_t integral;
+ int32_t p_gain;
+ int32_t i_gain;
+ int32_t d_gain;
+ int deadband;
+ int32_t last_err;
+};
+
+struct cpudata {
+ int cpu;
+
+ struct timer_list timer;
+
+ struct pstate_data pstate;
+ struct vid_data vid;
+ struct _pid pid;
+
+ ktime_t last_sample_time;
+ u64 prev_aperf;
+ u64 prev_mperf;
+ struct sample sample;
+};
+
+static struct cpudata **all_cpu_data;
+struct pstate_adjust_policy {
+ int sample_rate_ms;
+ int deadband;
+ int setpoint;
+ int p_gain_pct;
+ int d_gain_pct;
+ int i_gain_pct;
+};
+
+struct pstate_funcs {
+ int (*get_max)(void);
+ int (*get_min)(void);
+ int (*get_turbo)(void);
+ int (*get_scaling)(void);
+ void (*set)(struct cpudata*, int pstate);
+ void (*get_vid)(struct cpudata *);
+};
+
+struct cpu_defaults {
+ struct pstate_adjust_policy pid_policy;
+ struct pstate_funcs funcs;
+};
+
+static struct pstate_adjust_policy pid_params;
+static struct pstate_funcs pstate_funcs;
+static int hwp_active;
+
+struct perf_limits {
+ int no_turbo;
+ int turbo_disabled;
+ int max_perf_pct;
+ int min_perf_pct;
+ int32_t max_perf;
+ int32_t min_perf;
+ int max_policy_pct;
+ int max_sysfs_pct;
+ int min_policy_pct;
+ int min_sysfs_pct;
+};
+
+static struct perf_limits limits = {
+ .no_turbo = 0,
+ .turbo_disabled = 0,
+ .max_perf_pct = 100,
+ .max_perf = int_tofp(1),
+ .min_perf_pct = 0,
+ .min_perf = 0,
+ .max_policy_pct = 100,
+ .max_sysfs_pct = 100,
+ .min_policy_pct = 0,
+ .min_sysfs_pct = 0,
+};
+
+static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
+ int deadband, int integral) {
+ pid->setpoint = setpoint;
+ pid->deadband = deadband;
+ pid->integral = int_tofp(integral);
+ pid->last_err = int_tofp(setpoint) - int_tofp(busy);
+}
+
+static inline void pid_p_gain_set(struct _pid *pid, int percent)
+{
+ pid->p_gain = div_fp(int_tofp(percent), int_tofp(100));
+}
+
+static inline void pid_i_gain_set(struct _pid *pid, int percent)
+{
+ pid->i_gain = div_fp(int_tofp(percent), int_tofp(100));
+}
+
+static inline void pid_d_gain_set(struct _pid *pid, int percent)
+{
+ pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
+}
+
+static signed int pid_calc(struct _pid *pid, int32_t busy)
+{
+ signed int result;
+ int32_t pterm, dterm, fp_error;
+ int32_t integral_limit;
+
+ fp_error = int_tofp(pid->setpoint) - busy;
+
+ if (abs(fp_error) <= int_tofp(pid->deadband))
+ return 0;
+
+ pterm = mul_fp(pid->p_gain, fp_error);
+
+ pid->integral += fp_error;
+
+ /*
+ * We limit the integral here so that it will never
+ * get higher than 30. This prevents it from becoming
+ * too large an input over long periods of time and allows
+ * it to get factored out sooner.
+ *
+ * The value of 30 was chosen through experimentation.
+ */
+ integral_limit = int_tofp(30);
+ if (pid->integral > integral_limit)
+ pid->integral = integral_limit;
+ if (pid->integral < -integral_limit)
+ pid->integral = -integral_limit;
+
+ dterm = mul_fp(pid->d_gain, fp_error - pid->last_err);
+ pid->last_err = fp_error;
+
+ result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
+ result = result + (1 << (FRAC_BITS-1));
+ return (signed int)fp_toint(result);
+}
+
+static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu)
+{
+ pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct);
+ pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct);
+ pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct);
+
+ pid_reset(&cpu->pid, pid_params.setpoint, 100, pid_params.deadband, 0);
+}
+
+static inline void intel_pstate_reset_all_pid(void)
+{
+ unsigned int cpu;
+
+ for_each_online_cpu(cpu) {
+ if (all_cpu_data[cpu])
+ intel_pstate_busy_pid_reset(all_cpu_data[cpu]);
+ }
+}
+
+static inline void update_turbo_state(void)
+{
+ u64 misc_en;
+ struct cpudata *cpu;
+
+ cpu = all_cpu_data[0];
+ rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
+ limits.turbo_disabled =
+ (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
+ cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
+}
+
+#define PCT_TO_HWP(x) (x * 255 / 100)
+static void intel_pstate_hwp_set(void)
+{
+ int min, max, cpu;
+ u64 value, freq;
+
+ get_online_cpus();
+
+ for_each_online_cpu(cpu) {
+ rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
+ min = PCT_TO_HWP(limits.min_perf_pct);
+ value &= ~HWP_MIN_PERF(~0L);
+ value |= HWP_MIN_PERF(min);
+
+ max = PCT_TO_HWP(limits.max_perf_pct);
+ if (limits.no_turbo) {
+ rdmsrl( MSR_HWP_CAPABILITIES, freq);
+ max = HWP_GUARANTEED_PERF(freq);
+ }
+
+ value &= ~HWP_MAX_PERF(~0L);
+ value |= HWP_MAX_PERF(max);
+ wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
+ }
+
+ put_online_cpus();
+}
+
+/************************** debugfs begin ************************/
+static int pid_param_set(void *data, u64 val)
+{
+ *(u32 *)data = val;
+ intel_pstate_reset_all_pid();
+ return 0;
+}
+
+static int pid_param_get(void *data, u64 *val)
+{
+ *val = *(u32 *)data;
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n");
+
+struct pid_param {
+ char *name;
+ void *value;
+};
+
+static struct pid_param pid_files[] = {
+ {"sample_rate_ms", &pid_params.sample_rate_ms},
+ {"d_gain_pct", &pid_params.d_gain_pct},
+ {"i_gain_pct", &pid_params.i_gain_pct},
+ {"deadband", &pid_params.deadband},
+ {"setpoint", &pid_params.setpoint},
+ {"p_gain_pct", &pid_params.p_gain_pct},
+ {NULL, NULL}
+};
+
+static void __init intel_pstate_debug_expose_params(void)
+{
+ struct dentry *debugfs_parent;
+ int i = 0;
+
+ if (hwp_active)
+ return;
+ debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
+ if (IS_ERR_OR_NULL(debugfs_parent))
+ return;
+ while (pid_files[i].name) {
+ debugfs_create_file(pid_files[i].name, 0660,
+ debugfs_parent, pid_files[i].value,
+ &fops_pid_param);
+ i++;
+ }
+}
+
+/************************** debugfs end ************************/
+
+/************************** sysfs begin ************************/
+#define show_one(file_name, object) \
+ static ssize_t show_##file_name \
+ (struct kobject *kobj, struct attribute *attr, char *buf) \
+ { \
+ return sprintf(buf, "%u\n", limits.object); \
+ }
+
+static ssize_t show_turbo_pct(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct cpudata *cpu;
+ int total, no_turbo, turbo_pct;
+ uint32_t turbo_fp;
+
+ cpu = all_cpu_data[0];
+
+ total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
+ no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1;
+ turbo_fp = div_fp(int_tofp(no_turbo), int_tofp(total));
+ turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
+ return sprintf(buf, "%u\n", turbo_pct);
+}
+
+static ssize_t show_num_pstates(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct cpudata *cpu;
+ int total;
+
+ cpu = all_cpu_data[0];
+ total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
+ return sprintf(buf, "%u\n", total);
+}
+
+static ssize_t show_no_turbo(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ ssize_t ret;
+
+ update_turbo_state();
+ if (limits.turbo_disabled)
+ ret = sprintf(buf, "%u\n", limits.turbo_disabled);
+ else
+ ret = sprintf(buf, "%u\n", limits.no_turbo);
+
+ return ret;
+}
+
+static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+
+ ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
+ return -EINVAL;
+
+ update_turbo_state();
+ if (limits.turbo_disabled) {
+ pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
+ return -EPERM;
+ }
+
+ limits.no_turbo = clamp_t(int, input, 0, 1);
+
+ if (hwp_active)
+ intel_pstate_hwp_set();
+
+ return count;
+}
+
+static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+
+ ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
+ return -EINVAL;
+
+ limits.max_sysfs_pct = clamp_t(int, input, 0 , 100);
+ limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
+ limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
+
+ if (hwp_active)
+ intel_pstate_hwp_set();
+ return count;
+}
+
+static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+
+ ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
+ return -EINVAL;
+
+ limits.min_sysfs_pct = clamp_t(int, input, 0 , 100);
+ limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
+ limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
+
+ if (hwp_active)
+ intel_pstate_hwp_set();
+ return count;
+}
+
+show_one(max_perf_pct, max_perf_pct);
+show_one(min_perf_pct, min_perf_pct);
+
+define_one_global_rw(no_turbo);
+define_one_global_rw(max_perf_pct);
+define_one_global_rw(min_perf_pct);
+define_one_global_ro(turbo_pct);
+define_one_global_ro(num_pstates);
+
+static struct attribute *intel_pstate_attributes[] = {
+ &no_turbo.attr,
+ &max_perf_pct.attr,
+ &min_perf_pct.attr,
+ &turbo_pct.attr,
+ &num_pstates.attr,
+ NULL
+};
+
+static struct attribute_group intel_pstate_attr_group = {
+ .attrs = intel_pstate_attributes,
+};
+
+static void __init intel_pstate_sysfs_expose_params(void)
+{
+ struct kobject *intel_pstate_kobject;
+ int rc;
+
+ intel_pstate_kobject = kobject_create_and_add("intel_pstate",
+ &cpu_subsys.dev_root->kobj);
+ BUG_ON(!intel_pstate_kobject);
+ rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group);
+ BUG_ON(rc);
+}
+/************************** sysfs end ************************/
+
+static void intel_pstate_hwp_enable(void)
+{
+ hwp_active++;
+ pr_info("intel_pstate HWP enabled\n");
+
+ wrmsrl( MSR_PM_ENABLE, 0x1);
+}
+
+static int byt_get_min_pstate(void)
+{
+ u64 value;
+
+ rdmsrl(BYT_RATIOS, value);
+ return (value >> 8) & 0x7F;
+}
+
+static int byt_get_max_pstate(void)
+{
+ u64 value;
+
+ rdmsrl(BYT_RATIOS, value);
+ return (value >> 16) & 0x7F;
+}
+
+static int byt_get_turbo_pstate(void)
+{
+ u64 value;
+
+ rdmsrl(BYT_TURBO_RATIOS, value);
+ return value & 0x7F;
+}
+
+static void byt_set_pstate(struct cpudata *cpudata, int pstate)
+{
+ u64 val;
+ int32_t vid_fp;
+ u32 vid;
+
+ val = pstate << 8;
+ if (limits.no_turbo && !limits.turbo_disabled)
+ val |= (u64)1 << 32;
+
+ vid_fp = cpudata->vid.min + mul_fp(
+ int_tofp(pstate - cpudata->pstate.min_pstate),
+ cpudata->vid.ratio);
+
+ vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
+ vid = ceiling_fp(vid_fp);
+
+ if (pstate > cpudata->pstate.max_pstate)
+ vid = cpudata->vid.turbo;
+
+ val |= vid;
+
+ wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
+}
+
+#define BYT_BCLK_FREQS 5
+static int byt_freq_table[BYT_BCLK_FREQS] = { 833, 1000, 1333, 1167, 800};
+
+static int byt_get_scaling(void)
+{
+ u64 value;
+ int i;
+
+ rdmsrl(MSR_FSB_FREQ, value);
+ i = value & 0x3;
+
+ BUG_ON(i > BYT_BCLK_FREQS);
+
+ return byt_freq_table[i] * 100;
+}
+
+static void byt_get_vid(struct cpudata *cpudata)
+{
+ u64 value;
+
+ rdmsrl(BYT_VIDS, value);
+ cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
+ cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
+ cpudata->vid.ratio = div_fp(
+ cpudata->vid.max - cpudata->vid.min,
+ int_tofp(cpudata->pstate.max_pstate -
+ cpudata->pstate.min_pstate));
+
+ rdmsrl(BYT_TURBO_VIDS, value);
+ cpudata->vid.turbo = value & 0x7f;
+}
+
+static int core_get_min_pstate(void)
+{
+ u64 value;
+
+ rdmsrl(MSR_PLATFORM_INFO, value);
+ return (value >> 40) & 0xFF;
+}
+
+static int core_get_max_pstate(void)
+{
+ u64 value;
+
+ rdmsrl(MSR_PLATFORM_INFO, value);
+ return (value >> 8) & 0xFF;
+}
+
+static int core_get_turbo_pstate(void)
+{
+ u64 value;
+ int nont, ret;
+
+ rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
+ nont = core_get_max_pstate();
+ ret = (value) & 255;
+ if (ret <= nont)
+ ret = nont;
+ return ret;
+}
+
+static inline int core_get_scaling(void)
+{
+ return 100000;
+}
+
+static void core_set_pstate(struct cpudata *cpudata, int pstate)
+{
+ u64 val;
+
+ val = pstate << 8;
+ if (limits.no_turbo && !limits.turbo_disabled)
+ val |= (u64)1 << 32;
+
+ wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
+}
+
+static int knl_get_turbo_pstate(void)
+{
+ u64 value;
+ int nont, ret;
+
+ rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
+ nont = core_get_max_pstate();
+ ret = (((value) >> 8) & 0xFF);
+ if (ret <= nont)
+ ret = nont;
+ return ret;
+}
+
+static struct cpu_defaults core_params = {
+ .pid_policy = {
+ .sample_rate_ms = 10,
+ .deadband = 0,
+ .setpoint = 97,
+ .p_gain_pct = 20,
+ .d_gain_pct = 0,
+ .i_gain_pct = 0,
+ },
+ .funcs = {
+ .get_max = core_get_max_pstate,
+ .get_min = core_get_min_pstate,
+ .get_turbo = core_get_turbo_pstate,
+ .get_scaling = core_get_scaling,
+ .set = core_set_pstate,
+ },
+};
+
+static struct cpu_defaults byt_params = {
+ .pid_policy = {
+ .sample_rate_ms = 10,
+ .deadband = 0,
+ .setpoint = 60,
+ .p_gain_pct = 14,
+ .d_gain_pct = 0,
+ .i_gain_pct = 4,
+ },
+ .funcs = {
+ .get_max = byt_get_max_pstate,
+ .get_min = byt_get_min_pstate,
+ .get_turbo = byt_get_turbo_pstate,
+ .set = byt_set_pstate,
+ .get_scaling = byt_get_scaling,
+ .get_vid = byt_get_vid,
+ },
+};
+
+static struct cpu_defaults knl_params = {
+ .pid_policy = {
+ .sample_rate_ms = 10,
+ .deadband = 0,
+ .setpoint = 97,
+ .p_gain_pct = 20,
+ .d_gain_pct = 0,
+ .i_gain_pct = 0,
+ },
+ .funcs = {
+ .get_max = core_get_max_pstate,
+ .get_min = core_get_min_pstate,
+ .get_turbo = knl_get_turbo_pstate,
+ .set = core_set_pstate,
+ },
+};
+
+static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
+{
+ int max_perf = cpu->pstate.turbo_pstate;
+ int max_perf_adj;
+ int min_perf;
+
+ if (limits.no_turbo || limits.turbo_disabled)
+ max_perf = cpu->pstate.max_pstate;
+
+ /*
+ * performance can be limited by user through sysfs, by cpufreq
+ * policy, or by cpu specific default values determined through
+ * experimentation.
+ */
+ max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
+ *max = clamp_t(int, max_perf_adj,
+ cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
+
+ min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf));
+ *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
+}
+
+static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
+{
+ int max_perf, min_perf;
+
+ update_turbo_state();
+
+ intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
+
+ pstate = clamp_t(int, pstate, min_perf, max_perf);
+
+ if (pstate == cpu->pstate.current_pstate)
+ return;
+
+ trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
+
+ cpu->pstate.current_pstate = pstate;
+
+ pstate_funcs.set(cpu, pstate);
+}
+
+static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
+{
+ cpu->pstate.min_pstate = pstate_funcs.get_min();
+ cpu->pstate.max_pstate = pstate_funcs.get_max();
+ cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
+ cpu->pstate.scaling = pstate_funcs.get_scaling();
+
+ if (pstate_funcs.get_vid)
+ pstate_funcs.get_vid(cpu);
+ intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
+}
+
+static inline void intel_pstate_calc_busy(struct cpudata *cpu)
+{
+ struct sample *sample = &cpu->sample;
+ int64_t core_pct;
+
+ core_pct = int_tofp(sample->aperf) * int_tofp(100);
+ core_pct = div64_u64(core_pct, int_tofp(sample->mperf));
+
+ sample->freq = fp_toint(
+ mul_fp(int_tofp(
+ cpu->pstate.max_pstate * cpu->pstate.scaling / 100),
+ core_pct));
+
+ sample->core_pct_busy = (int32_t)core_pct;
+}
+
+static inline void intel_pstate_sample(struct cpudata *cpu)
+{
+ u64 aperf, mperf;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ rdmsrl(MSR_IA32_APERF, aperf);
+ rdmsrl(MSR_IA32_MPERF, mperf);
+ local_irq_restore(flags);
+
+ cpu->last_sample_time = cpu->sample.time;
+ cpu->sample.time = ktime_get();
+ cpu->sample.aperf = aperf;
+ cpu->sample.mperf = mperf;
+ cpu->sample.aperf -= cpu->prev_aperf;
+ cpu->sample.mperf -= cpu->prev_mperf;
+
+ intel_pstate_calc_busy(cpu);
+
+ cpu->prev_aperf = aperf;
+ cpu->prev_mperf = mperf;
+}
+
+static inline void intel_hwp_set_sample_time(struct cpudata *cpu)
+{
+ int delay;
+
+ delay = msecs_to_jiffies(50);
+ mod_timer_pinned(&cpu->timer, jiffies + delay);
+}
+
+static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
+{
+ int delay;
+
+ delay = msecs_to_jiffies(pid_params.sample_rate_ms);
+ mod_timer_pinned(&cpu->timer, jiffies + delay);
+}
+
+static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
+{
+ int32_t core_busy, max_pstate, current_pstate, sample_ratio;
+ u32 duration_us;
+ u32 sample_time;
+
+ /*
+ * core_busy is the ratio of actual performance to max
+ * max_pstate is the max non turbo pstate available
+ * current_pstate was the pstate that was requested during
+ * the last sample period.
+ *
+ * We normalize core_busy, which was our actual percent
+ * performance to what we requested during the last sample
+ * period. The result will be a percentage of busy at a
+ * specified pstate.
+ */
+ core_busy = cpu->sample.core_pct_busy;
+ max_pstate = int_tofp(cpu->pstate.max_pstate);
+ current_pstate = int_tofp(cpu->pstate.current_pstate);
+ core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
+
+ /*
+ * Since we have a deferred timer, it will not fire unless
+ * we are in C0. So, determine if the actual elapsed time
+ * is significantly greater (3x) than our sample interval. If it
+ * is, then we were idle for a long enough period of time
+ * to adjust our busyness.
+ */
+ sample_time = pid_params.sample_rate_ms * USEC_PER_MSEC;
+ duration_us = (u32) ktime_us_delta(cpu->sample.time,
+ cpu->last_sample_time);
+ if (duration_us > sample_time * 3) {
+ sample_ratio = div_fp(int_tofp(sample_time),
+ int_tofp(duration_us));
+ core_busy = mul_fp(core_busy, sample_ratio);
+ }
+
+ return core_busy;
+}
+
+static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
+{
+ int32_t busy_scaled;
+ struct _pid *pid;
+ signed int ctl;
+
+ pid = &cpu->pid;
+ busy_scaled = intel_pstate_get_scaled_busy(cpu);
+
+ ctl = pid_calc(pid, busy_scaled);
+
+ /* Negative values of ctl increase the pstate and vice versa */
+ intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl);
+}
+
+static void intel_hwp_timer_func(unsigned long __data)
+{
+ struct cpudata *cpu = (struct cpudata *) __data;
+
+ intel_pstate_sample(cpu);
+ intel_hwp_set_sample_time(cpu);
+}
+
+static void intel_pstate_timer_func(unsigned long __data)
+{
+ struct cpudata *cpu = (struct cpudata *) __data;
+ struct sample *sample;
+
+ intel_pstate_sample(cpu);
+
+ sample = &cpu->sample;
+
+ intel_pstate_adjust_busy_pstate(cpu);
+
+ trace_pstate_sample(fp_toint(sample->core_pct_busy),
+ fp_toint(intel_pstate_get_scaled_busy(cpu)),
+ cpu->pstate.current_pstate,
+ sample->mperf,
+ sample->aperf,
+ sample->freq);
+
+ intel_pstate_set_sample_time(cpu);
+}
+
+#define ICPU(model, policy) \
+ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
+ (unsigned long)&policy }
+
+static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
+ ICPU(0x2a, core_params),
+ ICPU(0x2d, core_params),
+ ICPU(0x37, byt_params),
+ ICPU(0x3a, core_params),
+ ICPU(0x3c, core_params),
+ ICPU(0x3d, core_params),
+ ICPU(0x3e, core_params),
+ ICPU(0x3f, core_params),
+ ICPU(0x45, core_params),
+ ICPU(0x46, core_params),
+ ICPU(0x47, core_params),
+ ICPU(0x4c, byt_params),
+ ICPU(0x4e, core_params),
+ ICPU(0x4f, core_params),
+ ICPU(0x56, core_params),
+ ICPU(0x57, knl_params),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
+
+static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] = {
+ ICPU(0x56, core_params),
+ {}
+};
+
+static int intel_pstate_init_cpu(unsigned int cpunum)
+{
+ struct cpudata *cpu;
+
+ if (!all_cpu_data[cpunum])
+ all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata),
+ GFP_KERNEL);
+ if (!all_cpu_data[cpunum])
+ return -ENOMEM;
+
+ cpu = all_cpu_data[cpunum];
+
+ cpu->cpu = cpunum;
+ intel_pstate_get_cpu_pstates(cpu);
+
+ init_timer_deferrable(&cpu->timer);
+ cpu->timer.data = (unsigned long)cpu;
+ cpu->timer.expires = jiffies + HZ/100;
+
+ if (!hwp_active)
+ cpu->timer.function = intel_pstate_timer_func;
+ else
+ cpu->timer.function = intel_hwp_timer_func;
+
+ intel_pstate_busy_pid_reset(cpu);
+ intel_pstate_sample(cpu);
+
+ add_timer_on(&cpu->timer, cpunum);
+
+ pr_debug("Intel pstate controlling: cpu %d\n", cpunum);
+
+ return 0;
+}
+
+static unsigned int intel_pstate_get(unsigned int cpu_num)
+{
+ struct sample *sample;
+ struct cpudata *cpu;
+
+ cpu = all_cpu_data[cpu_num];
+ if (!cpu)
+ return 0;
+ sample = &cpu->sample;
+ return sample->freq;
+}
+
+static int intel_pstate_set_policy(struct cpufreq_policy *policy)
+{
+ if (!policy->cpuinfo.max_freq)
+ return -ENODEV;
+
+ if (policy->policy == CPUFREQ_POLICY_PERFORMANCE &&
+ policy->max >= policy->cpuinfo.max_freq) {
+ limits.min_policy_pct = 100;
+ limits.min_perf_pct = 100;
+ limits.min_perf = int_tofp(1);
+ limits.max_policy_pct = 100;
+ limits.max_perf_pct = 100;
+ limits.max_perf = int_tofp(1);
+ limits.no_turbo = 0;
+ return 0;
+ }
+
+ limits.min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
+ limits.min_policy_pct = clamp_t(int, limits.min_policy_pct, 0 , 100);
+ limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
+ limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
+
+ limits.max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq;
+ limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100);
+ limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
+ limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
+
+ if (hwp_active)
+ intel_pstate_hwp_set();
+
+ return 0;
+}
+
+static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
+{
+ cpufreq_verify_within_cpu_limits(policy);
+
+ if (policy->policy != CPUFREQ_POLICY_POWERSAVE &&
+ policy->policy != CPUFREQ_POLICY_PERFORMANCE)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
+{
+ int cpu_num = policy->cpu;
+ struct cpudata *cpu = all_cpu_data[cpu_num];
+
+ pr_info("intel_pstate CPU %d exiting\n", cpu_num);
+
+ del_timer_sync(&all_cpu_data[cpu_num]->timer);
+ if (hwp_active)
+ return;
+
+ intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
+}
+
+static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
+{
+ struct cpudata *cpu;
+ int rc;
+
+ rc = intel_pstate_init_cpu(policy->cpu);
+ if (rc)
+ return rc;
+
+ cpu = all_cpu_data[policy->cpu];
+
+ if (limits.min_perf_pct == 100 && limits.max_perf_pct == 100)
+ policy->policy = CPUFREQ_POLICY_PERFORMANCE;
+ else
+ policy->policy = CPUFREQ_POLICY_POWERSAVE;
+
+ policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
+ policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+
+ /* cpuinfo and default policy values */
+ policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
+ policy->cpuinfo.max_freq =
+ cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+ policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+ cpumask_set_cpu(policy->cpu, policy->cpus);
+
+ return 0;
+}
+
+static struct cpufreq_driver intel_pstate_driver = {
+ .flags = CPUFREQ_CONST_LOOPS,
+ .verify = intel_pstate_verify_policy,
+ .setpolicy = intel_pstate_set_policy,
+ .get = intel_pstate_get,
+ .init = intel_pstate_cpu_init,
+ .stop_cpu = intel_pstate_stop_cpu,
+ .name = "intel_pstate",
+};
+
+static int __initdata no_load;
+static int __initdata no_hwp;
+static int __initdata hwp_only;
+static unsigned int force_load;
+
+static int intel_pstate_msrs_not_valid(void)
+{
+ if (!pstate_funcs.get_max() ||
+ !pstate_funcs.get_min() ||
+ !pstate_funcs.get_turbo())
+ return -ENODEV;
+
+ return 0;
+}
+
+static void copy_pid_params(struct pstate_adjust_policy *policy)
+{
+ pid_params.sample_rate_ms = policy->sample_rate_ms;
+ pid_params.p_gain_pct = policy->p_gain_pct;
+ pid_params.i_gain_pct = policy->i_gain_pct;
+ pid_params.d_gain_pct = policy->d_gain_pct;
+ pid_params.deadband = policy->deadband;
+ pid_params.setpoint = policy->setpoint;
+}
+
+static void copy_cpu_funcs(struct pstate_funcs *funcs)
+{
+ pstate_funcs.get_max = funcs->get_max;
+ pstate_funcs.get_min = funcs->get_min;
+ pstate_funcs.get_turbo = funcs->get_turbo;
+ pstate_funcs.get_scaling = funcs->get_scaling;
+ pstate_funcs.set = funcs->set;
+ pstate_funcs.get_vid = funcs->get_vid;
+}
+
+#if IS_ENABLED(CONFIG_ACPI)
+#include <acpi/processor.h>
+
+static bool intel_pstate_no_acpi_pss(void)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ acpi_status status;
+ union acpi_object *pss;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_processor *pr = per_cpu(processors, i);
+
+ if (!pr)
+ continue;
+
+ status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ continue;
+
+ pss = buffer.pointer;
+ if (pss && pss->type == ACPI_TYPE_PACKAGE) {
+ kfree(pss);
+ return false;
+ }
+
+ kfree(pss);
+ }
+
+ return true;
+}
+
+static bool intel_pstate_has_acpi_ppc(void)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ struct acpi_processor *pr = per_cpu(processors, i);
+
+ if (!pr)
+ continue;
+ if (acpi_has_method(pr->handle, "_PPC"))
+ return true;
+ }
+ return false;
+}
+
+enum {
+ PSS,
+ PPC,
+};
+
+struct hw_vendor_info {
+ u16 valid;
+ char oem_id[ACPI_OEM_ID_SIZE];
+ char oem_table_id[ACPI_OEM_TABLE_ID_SIZE];
+ int oem_pwr_table;
+};
+
+/* Hardware vendor-specific info that has its own power management modes */
+static struct hw_vendor_info vendor_info[] = {
+ {1, "HP ", "ProLiant", PSS},
+ {1, "ORACLE", "X4-2 ", PPC},
+ {1, "ORACLE", "X4-2L ", PPC},
+ {1, "ORACLE", "X4-2B ", PPC},
+ {1, "ORACLE", "X3-2 ", PPC},
+ {1, "ORACLE", "X3-2L ", PPC},
+ {1, "ORACLE", "X3-2B ", PPC},
+ {1, "ORACLE", "X4470M2 ", PPC},
+ {1, "ORACLE", "X4270M3 ", PPC},
+ {1, "ORACLE", "X4270M2 ", PPC},
+ {1, "ORACLE", "X4170M2 ", PPC},
+ {0, "", ""},
+};
+
+static bool intel_pstate_platform_pwr_mgmt_exists(void)
+{
+ struct acpi_table_header hdr;
+ struct hw_vendor_info *v_info;
+ const struct x86_cpu_id *id;
+ u64 misc_pwr;
+
+ id = x86_match_cpu(intel_pstate_cpu_oob_ids);
+ if (id) {
+ rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
+ if ( misc_pwr & (1 << 8))
+ return true;
+ }
+
+ if (acpi_disabled ||
+ ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr)))
+ return false;
+
+ for (v_info = vendor_info; v_info->valid; v_info++) {
+ if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) &&
+ !strncmp(hdr.oem_table_id, v_info->oem_table_id,
+ ACPI_OEM_TABLE_ID_SIZE))
+ switch (v_info->oem_pwr_table) {
+ case PSS:
+ return intel_pstate_no_acpi_pss();
+ case PPC:
+ return intel_pstate_has_acpi_ppc() &&
+ (!force_load);
+ }
+ }
+
+ return false;
+}
+#else /* CONFIG_ACPI not enabled */
+static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
+static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
+#endif /* CONFIG_ACPI */
+
+static int __init intel_pstate_init(void)
+{
+ int cpu, rc = 0;
+ const struct x86_cpu_id *id;
+ struct cpu_defaults *cpu_def;
+
+ if (no_load)
+ return -ENODEV;
+
+ id = x86_match_cpu(intel_pstate_cpu_ids);
+ if (!id)
+ return -ENODEV;
+
+ /*
+ * The Intel pstate driver will be ignored if the platform
+ * firmware has its own power management modes.
+ */
+ if (intel_pstate_platform_pwr_mgmt_exists())
+ return -ENODEV;
+
+ cpu_def = (struct cpu_defaults *)id->driver_data;
+
+ copy_pid_params(&cpu_def->pid_policy);
+ copy_cpu_funcs(&cpu_def->funcs);
+
+ if (intel_pstate_msrs_not_valid())
+ return -ENODEV;
+
+ pr_info("Intel P-state driver initializing.\n");
+
+ all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
+ if (!all_cpu_data)
+ return -ENOMEM;
+
+ if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp)
+ intel_pstate_hwp_enable();
+
+ if (!hwp_active && hwp_only)
+ goto out;
+
+ rc = cpufreq_register_driver(&intel_pstate_driver);
+ if (rc)
+ goto out;
+
+ intel_pstate_debug_expose_params();
+ intel_pstate_sysfs_expose_params();
+
+ return rc;
+out:
+ get_online_cpus();
+ for_each_online_cpu(cpu) {
+ if (all_cpu_data[cpu]) {
+ del_timer_sync(&all_cpu_data[cpu]->timer);
+ kfree(all_cpu_data[cpu]);
+ }
+ }
+
+ put_online_cpus();
+ vfree(all_cpu_data);
+ return -ENODEV;
+}
+device_initcall(intel_pstate_init);
+
+static int __init intel_pstate_setup(char *str)
+{
+ if (!str)
+ return -EINVAL;
+
+ if (!strcmp(str, "disable"))
+ no_load = 1;
+ if (!strcmp(str, "no_hwp"))
+ no_hwp = 1;
+ if (!strcmp(str, "force"))
+ force_load = 1;
+ if (!strcmp(str, "hwp_only"))
+ hwp_only = 1;
+ return 0;
+}
+early_param("intel_pstate", intel_pstate_setup);
+
+MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
+MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/cpufreq/kirkwood-cpufreq.c b/kernel/drivers/cpufreq/kirkwood-cpufreq.c
new file mode 100644
index 000000000..be42f103d
--- /dev/null
+++ b/kernel/drivers/cpufreq/kirkwood-cpufreq.c
@@ -0,0 +1,194 @@
+/*
+ * kirkwood_freq.c: cpufreq driver for the Marvell kirkwood
+ *
+ * Copyright (C) 2013 Andrew Lunn <andrew@lunn.ch>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <asm/proc-fns.h>
+
+#define CPU_SW_INT_BLK BIT(28)
+
+static struct priv
+{
+ struct clk *cpu_clk;
+ struct clk *ddr_clk;
+ struct clk *powersave_clk;
+ struct device *dev;
+ void __iomem *base;
+} priv;
+
+#define STATE_CPU_FREQ 0x01
+#define STATE_DDR_FREQ 0x02
+
+/*
+ * Kirkwood can swap the clock to the CPU between two clocks:
+ *
+ * - cpu clk
+ * - ddr clk
+ *
+ * The frequencies are set at runtime before registering this table.
+ */
+static struct cpufreq_frequency_table kirkwood_freq_table[] = {
+ {0, STATE_CPU_FREQ, 0}, /* CPU uses cpuclk */
+ {0, STATE_DDR_FREQ, 0}, /* CPU uses ddrclk */
+ {0, 0, CPUFREQ_TABLE_END},
+};
+
+static unsigned int kirkwood_cpufreq_get_cpu_frequency(unsigned int cpu)
+{
+ return clk_get_rate(priv.powersave_clk) / 1000;
+}
+
+static int kirkwood_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ unsigned int state = kirkwood_freq_table[index].driver_data;
+ unsigned long reg;
+
+ local_irq_disable();
+
+ /* Disable interrupts to the CPU */
+ reg = readl_relaxed(priv.base);
+ reg |= CPU_SW_INT_BLK;
+ writel_relaxed(reg, priv.base);
+
+ switch (state) {
+ case STATE_CPU_FREQ:
+ clk_set_parent(priv.powersave_clk, priv.cpu_clk);
+ break;
+ case STATE_DDR_FREQ:
+ clk_set_parent(priv.powersave_clk, priv.ddr_clk);
+ break;
+ }
+
+ /* Wait-for-Interrupt, while the hardware changes frequency */
+ cpu_do_idle();
+
+ /* Enable interrupts to the CPU */
+ reg = readl_relaxed(priv.base);
+ reg &= ~CPU_SW_INT_BLK;
+ writel_relaxed(reg, priv.base);
+
+ local_irq_enable();
+
+ return 0;
+}
+
+/* Module init and exit code */
+static int kirkwood_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ return cpufreq_generic_init(policy, kirkwood_freq_table, 5000);
+}
+
+static struct cpufreq_driver kirkwood_cpufreq_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .get = kirkwood_cpufreq_get_cpu_frequency,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = kirkwood_cpufreq_target,
+ .init = kirkwood_cpufreq_cpu_init,
+ .name = "kirkwood-cpufreq",
+ .attr = cpufreq_generic_attr,
+};
+
+static int kirkwood_cpufreq_probe(struct platform_device *pdev)
+{
+ struct device_node *np;
+ struct resource *res;
+ int err;
+
+ priv.dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv.base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv.base))
+ return PTR_ERR(priv.base);
+
+ np = of_cpu_device_node_get(0);
+ if (!np) {
+ dev_err(&pdev->dev, "failed to get cpu device node\n");
+ return -ENODEV;
+ }
+
+ priv.cpu_clk = of_clk_get_by_name(np, "cpu_clk");
+ if (IS_ERR(priv.cpu_clk)) {
+ dev_err(priv.dev, "Unable to get cpuclk");
+ return PTR_ERR(priv.cpu_clk);
+ }
+
+ clk_prepare_enable(priv.cpu_clk);
+ kirkwood_freq_table[0].frequency = clk_get_rate(priv.cpu_clk) / 1000;
+
+ priv.ddr_clk = of_clk_get_by_name(np, "ddrclk");
+ if (IS_ERR(priv.ddr_clk)) {
+ dev_err(priv.dev, "Unable to get ddrclk");
+ err = PTR_ERR(priv.ddr_clk);
+ goto out_cpu;
+ }
+
+ clk_prepare_enable(priv.ddr_clk);
+ kirkwood_freq_table[1].frequency = clk_get_rate(priv.ddr_clk) / 1000;
+
+ priv.powersave_clk = of_clk_get_by_name(np, "powersave");
+ if (IS_ERR(priv.powersave_clk)) {
+ dev_err(priv.dev, "Unable to get powersave");
+ err = PTR_ERR(priv.powersave_clk);
+ goto out_ddr;
+ }
+ clk_prepare_enable(priv.powersave_clk);
+
+ of_node_put(np);
+ np = NULL;
+
+ err = cpufreq_register_driver(&kirkwood_cpufreq_driver);
+ if (!err)
+ return 0;
+
+ dev_err(priv.dev, "Failed to register cpufreq driver");
+
+ clk_disable_unprepare(priv.powersave_clk);
+out_ddr:
+ clk_disable_unprepare(priv.ddr_clk);
+out_cpu:
+ clk_disable_unprepare(priv.cpu_clk);
+ of_node_put(np);
+
+ return err;
+}
+
+static int kirkwood_cpufreq_remove(struct platform_device *pdev)
+{
+ cpufreq_unregister_driver(&kirkwood_cpufreq_driver);
+
+ clk_disable_unprepare(priv.powersave_clk);
+ clk_disable_unprepare(priv.ddr_clk);
+ clk_disable_unprepare(priv.cpu_clk);
+
+ return 0;
+}
+
+static struct platform_driver kirkwood_cpufreq_platform_driver = {
+ .probe = kirkwood_cpufreq_probe,
+ .remove = kirkwood_cpufreq_remove,
+ .driver = {
+ .name = "kirkwood-cpufreq",
+ },
+};
+
+module_platform_driver(kirkwood_cpufreq_platform_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Andrew Lunn <andrew@lunn.ch");
+MODULE_DESCRIPTION("cpufreq driver for Marvell's kirkwood CPU");
+MODULE_ALIAS("platform:kirkwood-cpufreq");
diff --git a/kernel/drivers/cpufreq/longhaul.c b/kernel/drivers/cpufreq/longhaul.c
new file mode 100644
index 000000000..0f6b229af
--- /dev/null
+++ b/kernel/drivers/cpufreq/longhaul.c
@@ -0,0 +1,1016 @@
+/*
+ * (C) 2001-2004 Dave Jones.
+ * (C) 2002 Padraig Brady. <padraig@antefacto.com>
+ *
+ * Licensed under the terms of the GNU GPL License version 2.
+ * Based upon datasheets & sample CPUs kindly provided by VIA.
+ *
+ * VIA have currently 3 different versions of Longhaul.
+ * Version 1 (Longhaul) uses the BCR2 MSR at 0x1147.
+ * It is present only in Samuel 1 (C5A), Samuel 2 (C5B) stepping 0.
+ * Version 2 of longhaul is backward compatible with v1, but adds
+ * LONGHAUL MSR for purpose of both frequency and voltage scaling.
+ * Present in Samuel 2 (steppings 1-7 only) (C5B), and Ezra (C5C).
+ * Version 3 of longhaul got renamed to Powersaver and redesigned
+ * to use only the POWERSAVER MSR at 0x110a.
+ * It is present in Ezra-T (C5M), Nehemiah (C5X) and above.
+ * It's pretty much the same feature wise to longhaul v2, though
+ * there is provision for scaling FSB too, but this doesn't work
+ * too well in practice so we don't even try to use this.
+ *
+ * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/timex.h>
+#include <linux/io.h>
+#include <linux/acpi.h>
+
+#include <asm/msr.h>
+#include <asm/cpu_device_id.h>
+#include <acpi/processor.h>
+
+#include "longhaul.h"
+
+#define PFX "longhaul: "
+
+#define TYPE_LONGHAUL_V1 1
+#define TYPE_LONGHAUL_V2 2
+#define TYPE_POWERSAVER 3
+
+#define CPU_SAMUEL 1
+#define CPU_SAMUEL2 2
+#define CPU_EZRA 3
+#define CPU_EZRA_T 4
+#define CPU_NEHEMIAH 5
+#define CPU_NEHEMIAH_C 6
+
+/* Flags */
+#define USE_ACPI_C3 (1 << 1)
+#define USE_NORTHBRIDGE (1 << 2)
+
+static int cpu_model;
+static unsigned int numscales = 16;
+static unsigned int fsb;
+
+static const struct mV_pos *vrm_mV_table;
+static const unsigned char *mV_vrm_table;
+
+static unsigned int highest_speed, lowest_speed; /* kHz */
+static unsigned int minmult, maxmult;
+static int can_scale_voltage;
+static struct acpi_processor *pr;
+static struct acpi_processor_cx *cx;
+static u32 acpi_regs_addr;
+static u8 longhaul_flags;
+static unsigned int longhaul_index;
+
+/* Module parameters */
+static int scale_voltage;
+static int disable_acpi_c3;
+static int revid_errata;
+static int enable;
+
+/* Clock ratios multiplied by 10 */
+static int mults[32];
+static int eblcr[32];
+static int longhaul_version;
+static struct cpufreq_frequency_table *longhaul_table;
+
+static char speedbuffer[8];
+
+static char *print_speed(int speed)
+{
+ if (speed < 1000) {
+ snprintf(speedbuffer, sizeof(speedbuffer), "%dMHz", speed);
+ return speedbuffer;
+ }
+
+ if (speed%1000 == 0)
+ snprintf(speedbuffer, sizeof(speedbuffer),
+ "%dGHz", speed/1000);
+ else
+ snprintf(speedbuffer, sizeof(speedbuffer),
+ "%d.%dGHz", speed/1000, (speed%1000)/100);
+
+ return speedbuffer;
+}
+
+
+static unsigned int calc_speed(int mult)
+{
+ int khz;
+ khz = (mult/10)*fsb;
+ if (mult%10)
+ khz += fsb/2;
+ khz *= 1000;
+ return khz;
+}
+
+
+static int longhaul_get_cpu_mult(void)
+{
+ unsigned long invalue = 0, lo, hi;
+
+ rdmsr(MSR_IA32_EBL_CR_POWERON, lo, hi);
+ invalue = (lo & (1<<22|1<<23|1<<24|1<<25))>>22;
+ if (longhaul_version == TYPE_LONGHAUL_V2 ||
+ longhaul_version == TYPE_POWERSAVER) {
+ if (lo & (1<<27))
+ invalue += 16;
+ }
+ return eblcr[invalue];
+}
+
+/* For processor with BCR2 MSR */
+
+static void do_longhaul1(unsigned int mults_index)
+{
+ union msr_bcr2 bcr2;
+
+ rdmsrl(MSR_VIA_BCR2, bcr2.val);
+ /* Enable software clock multiplier */
+ bcr2.bits.ESOFTBF = 1;
+ bcr2.bits.CLOCKMUL = mults_index & 0xff;
+
+ /* Sync to timer tick */
+ safe_halt();
+ /* Change frequency on next halt or sleep */
+ wrmsrl(MSR_VIA_BCR2, bcr2.val);
+ /* Invoke transition */
+ ACPI_FLUSH_CPU_CACHE();
+ halt();
+
+ /* Disable software clock multiplier */
+ local_irq_disable();
+ rdmsrl(MSR_VIA_BCR2, bcr2.val);
+ bcr2.bits.ESOFTBF = 0;
+ wrmsrl(MSR_VIA_BCR2, bcr2.val);
+}
+
+/* For processor with Longhaul MSR */
+
+static void do_powersaver(int cx_address, unsigned int mults_index,
+ unsigned int dir)
+{
+ union msr_longhaul longhaul;
+ u32 t;
+
+ rdmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ /* Setup new frequency */
+ if (!revid_errata)
+ longhaul.bits.RevisionKey = longhaul.bits.RevisionID;
+ else
+ longhaul.bits.RevisionKey = 0;
+ longhaul.bits.SoftBusRatio = mults_index & 0xf;
+ longhaul.bits.SoftBusRatio4 = (mults_index & 0x10) >> 4;
+ /* Setup new voltage */
+ if (can_scale_voltage)
+ longhaul.bits.SoftVID = (mults_index >> 8) & 0x1f;
+ /* Sync to timer tick */
+ safe_halt();
+ /* Raise voltage if necessary */
+ if (can_scale_voltage && dir) {
+ longhaul.bits.EnableSoftVID = 1;
+ wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ /* Change voltage */
+ if (!cx_address) {
+ ACPI_FLUSH_CPU_CACHE();
+ halt();
+ } else {
+ ACPI_FLUSH_CPU_CACHE();
+ /* Invoke C3 */
+ inb(cx_address);
+ /* Dummy op - must do something useless after P_LVL3
+ * read */
+ t = inl(acpi_gbl_FADT.xpm_timer_block.address);
+ }
+ longhaul.bits.EnableSoftVID = 0;
+ wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ }
+
+ /* Change frequency on next halt or sleep */
+ longhaul.bits.EnableSoftBusRatio = 1;
+ wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ if (!cx_address) {
+ ACPI_FLUSH_CPU_CACHE();
+ halt();
+ } else {
+ ACPI_FLUSH_CPU_CACHE();
+ /* Invoke C3 */
+ inb(cx_address);
+ /* Dummy op - must do something useless after P_LVL3 read */
+ t = inl(acpi_gbl_FADT.xpm_timer_block.address);
+ }
+ /* Disable bus ratio bit */
+ longhaul.bits.EnableSoftBusRatio = 0;
+ wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+
+ /* Reduce voltage if necessary */
+ if (can_scale_voltage && !dir) {
+ longhaul.bits.EnableSoftVID = 1;
+ wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ /* Change voltage */
+ if (!cx_address) {
+ ACPI_FLUSH_CPU_CACHE();
+ halt();
+ } else {
+ ACPI_FLUSH_CPU_CACHE();
+ /* Invoke C3 */
+ inb(cx_address);
+ /* Dummy op - must do something useless after P_LVL3
+ * read */
+ t = inl(acpi_gbl_FADT.xpm_timer_block.address);
+ }
+ longhaul.bits.EnableSoftVID = 0;
+ wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ }
+}
+
+/**
+ * longhaul_set_cpu_frequency()
+ * @mults_index : bitpattern of the new multiplier.
+ *
+ * Sets a new clock ratio.
+ */
+
+static int longhaul_setstate(struct cpufreq_policy *policy,
+ unsigned int table_index)
+{
+ unsigned int mults_index;
+ int speed, mult;
+ struct cpufreq_freqs freqs;
+ unsigned long flags;
+ unsigned int pic1_mask, pic2_mask;
+ u16 bm_status = 0;
+ u32 bm_timeout = 1000;
+ unsigned int dir = 0;
+
+ mults_index = longhaul_table[table_index].driver_data;
+ /* Safety precautions */
+ mult = mults[mults_index & 0x1f];
+ if (mult == -1)
+ return -EINVAL;
+
+ speed = calc_speed(mult);
+ if ((speed > highest_speed) || (speed < lowest_speed))
+ return -EINVAL;
+
+ /* Voltage transition before frequency transition? */
+ if (can_scale_voltage && longhaul_index < table_index)
+ dir = 1;
+
+ freqs.old = calc_speed(longhaul_get_cpu_mult());
+ freqs.new = speed;
+
+ pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n",
+ fsb, mult/10, mult%10, print_speed(speed/1000));
+retry_loop:
+ preempt_disable();
+ local_irq_save(flags);
+
+ pic2_mask = inb(0xA1);
+ pic1_mask = inb(0x21); /* works on C3. save mask. */
+ outb(0xFF, 0xA1); /* Overkill */
+ outb(0xFE, 0x21); /* TMR0 only */
+
+ /* Wait while PCI bus is busy. */
+ if (acpi_regs_addr && (longhaul_flags & USE_NORTHBRIDGE
+ || ((pr != NULL) && pr->flags.bm_control))) {
+ bm_status = inw(acpi_regs_addr);
+ bm_status &= 1 << 4;
+ while (bm_status && bm_timeout) {
+ outw(1 << 4, acpi_regs_addr);
+ bm_timeout--;
+ bm_status = inw(acpi_regs_addr);
+ bm_status &= 1 << 4;
+ }
+ }
+
+ if (longhaul_flags & USE_NORTHBRIDGE) {
+ /* Disable AGP and PCI arbiters */
+ outb(3, 0x22);
+ } else if ((pr != NULL) && pr->flags.bm_control) {
+ /* Disable bus master arbitration */
+ acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
+ }
+ switch (longhaul_version) {
+
+ /*
+ * Longhaul v1. (Samuel[C5A] and Samuel2 stepping 0[C5B])
+ * Software controlled multipliers only.
+ */
+ case TYPE_LONGHAUL_V1:
+ do_longhaul1(mults_index);
+ break;
+
+ /*
+ * Longhaul v2 appears in Samuel2 Steppings 1->7 [C5B] and Ezra [C5C]
+ *
+ * Longhaul v3 (aka Powersaver). (Ezra-T [C5M] & Nehemiah [C5N])
+ * Nehemiah can do FSB scaling too, but this has never been proven
+ * to work in practice.
+ */
+ case TYPE_LONGHAUL_V2:
+ case TYPE_POWERSAVER:
+ if (longhaul_flags & USE_ACPI_C3) {
+ /* Don't allow wakeup */
+ acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
+ do_powersaver(cx->address, mults_index, dir);
+ } else {
+ do_powersaver(0, mults_index, dir);
+ }
+ break;
+ }
+
+ if (longhaul_flags & USE_NORTHBRIDGE) {
+ /* Enable arbiters */
+ outb(0, 0x22);
+ } else if ((pr != NULL) && pr->flags.bm_control) {
+ /* Enable bus master arbitration */
+ acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
+ }
+ outb(pic2_mask, 0xA1); /* restore mask */
+ outb(pic1_mask, 0x21);
+
+ local_irq_restore(flags);
+ preempt_enable();
+
+ freqs.new = calc_speed(longhaul_get_cpu_mult());
+ /* Check if requested frequency is set. */
+ if (unlikely(freqs.new != speed)) {
+ printk(KERN_INFO PFX "Failed to set requested frequency!\n");
+ /* Revision ID = 1 but processor is expecting revision key
+ * equal to 0. Jumpers at the bottom of processor will change
+ * multiplier and FSB, but will not change bits in Longhaul
+ * MSR nor enable voltage scaling. */
+ if (!revid_errata) {
+ printk(KERN_INFO PFX "Enabling \"Ignore Revision ID\" "
+ "option.\n");
+ revid_errata = 1;
+ msleep(200);
+ goto retry_loop;
+ }
+ /* Why ACPI C3 sometimes doesn't work is a mystery for me.
+ * But it does happen. Processor is entering ACPI C3 state,
+ * but it doesn't change frequency. I tried poking various
+ * bits in northbridge registers, but without success. */
+ if (longhaul_flags & USE_ACPI_C3) {
+ printk(KERN_INFO PFX "Disabling ACPI C3 support.\n");
+ longhaul_flags &= ~USE_ACPI_C3;
+ if (revid_errata) {
+ printk(KERN_INFO PFX "Disabling \"Ignore "
+ "Revision ID\" option.\n");
+ revid_errata = 0;
+ }
+ msleep(200);
+ goto retry_loop;
+ }
+ /* This shouldn't happen. Longhaul ver. 2 was reported not
+ * working on processors without voltage scaling, but with
+ * RevID = 1. RevID errata will make things right. Just
+ * to be 100% sure. */
+ if (longhaul_version == TYPE_LONGHAUL_V2) {
+ printk(KERN_INFO PFX "Switching to Longhaul ver. 1\n");
+ longhaul_version = TYPE_LONGHAUL_V1;
+ msleep(200);
+ goto retry_loop;
+ }
+ }
+
+ if (!bm_timeout) {
+ printk(KERN_INFO PFX "Warning: Timeout while waiting for "
+ "idle PCI bus.\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/*
+ * Centaur decided to make life a little more tricky.
+ * Only longhaul v1 is allowed to read EBLCR BSEL[0:1].
+ * Samuel2 and above have to try and guess what the FSB is.
+ * We do this by assuming we booted at maximum multiplier, and interpolate
+ * between that value multiplied by possible FSBs and cpu_mhz which
+ * was calculated at boot time. Really ugly, but no other way to do this.
+ */
+
+#define ROUNDING 0xf
+
+static int guess_fsb(int mult)
+{
+ int speed = cpu_khz / 1000;
+ int i;
+ int speeds[] = { 666, 1000, 1333, 2000 };
+ int f_max, f_min;
+
+ for (i = 0; i < 4; i++) {
+ f_max = ((speeds[i] * mult) + 50) / 100;
+ f_max += (ROUNDING / 2);
+ f_min = f_max - ROUNDING;
+ if ((speed <= f_max) && (speed >= f_min))
+ return speeds[i] / 10;
+ }
+ return 0;
+}
+
+
+static int longhaul_get_ranges(void)
+{
+ unsigned int i, j, k = 0;
+ unsigned int ratio;
+ int mult;
+
+ /* Get current frequency */
+ mult = longhaul_get_cpu_mult();
+ if (mult == -1) {
+ printk(KERN_INFO PFX "Invalid (reserved) multiplier!\n");
+ return -EINVAL;
+ }
+ fsb = guess_fsb(mult);
+ if (fsb == 0) {
+ printk(KERN_INFO PFX "Invalid (reserved) FSB!\n");
+ return -EINVAL;
+ }
+ /* Get max multiplier - as we always did.
+ * Longhaul MSR is useful only when voltage scaling is enabled.
+ * C3 is booting at max anyway. */
+ maxmult = mult;
+ /* Get min multiplier */
+ switch (cpu_model) {
+ case CPU_NEHEMIAH:
+ minmult = 50;
+ break;
+ case CPU_NEHEMIAH_C:
+ minmult = 40;
+ break;
+ default:
+ minmult = 30;
+ break;
+ }
+
+ pr_debug("MinMult:%d.%dx MaxMult:%d.%dx\n",
+ minmult/10, minmult%10, maxmult/10, maxmult%10);
+
+ highest_speed = calc_speed(maxmult);
+ lowest_speed = calc_speed(minmult);
+ pr_debug("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb,
+ print_speed(lowest_speed/1000),
+ print_speed(highest_speed/1000));
+
+ if (lowest_speed == highest_speed) {
+ printk(KERN_INFO PFX "highestspeed == lowest, aborting.\n");
+ return -EINVAL;
+ }
+ if (lowest_speed > highest_speed) {
+ printk(KERN_INFO PFX "nonsense! lowest (%d > %d) !\n",
+ lowest_speed, highest_speed);
+ return -EINVAL;
+ }
+
+ longhaul_table = kzalloc((numscales + 1) * sizeof(*longhaul_table),
+ GFP_KERNEL);
+ if (!longhaul_table)
+ return -ENOMEM;
+
+ for (j = 0; j < numscales; j++) {
+ ratio = mults[j];
+ if (ratio == -1)
+ continue;
+ if (ratio > maxmult || ratio < minmult)
+ continue;
+ longhaul_table[k].frequency = calc_speed(ratio);
+ longhaul_table[k].driver_data = j;
+ k++;
+ }
+ if (k <= 1) {
+ kfree(longhaul_table);
+ return -ENODEV;
+ }
+ /* Sort */
+ for (j = 0; j < k - 1; j++) {
+ unsigned int min_f, min_i;
+ min_f = longhaul_table[j].frequency;
+ min_i = j;
+ for (i = j + 1; i < k; i++) {
+ if (longhaul_table[i].frequency < min_f) {
+ min_f = longhaul_table[i].frequency;
+ min_i = i;
+ }
+ }
+ if (min_i != j) {
+ swap(longhaul_table[j].frequency,
+ longhaul_table[min_i].frequency);
+ swap(longhaul_table[j].driver_data,
+ longhaul_table[min_i].driver_data);
+ }
+ }
+
+ longhaul_table[k].frequency = CPUFREQ_TABLE_END;
+
+ /* Find index we are running on */
+ for (j = 0; j < k; j++) {
+ if (mults[longhaul_table[j].driver_data & 0x1f] == mult) {
+ longhaul_index = j;
+ break;
+ }
+ }
+ return 0;
+}
+
+
+static void longhaul_setup_voltagescaling(void)
+{
+ struct cpufreq_frequency_table *freq_pos;
+ union msr_longhaul longhaul;
+ struct mV_pos minvid, maxvid, vid;
+ unsigned int j, speed, pos, kHz_step, numvscales;
+ int min_vid_speed;
+
+ rdmsrl(MSR_VIA_LONGHAUL, longhaul.val);
+ if (!(longhaul.bits.RevisionID & 1)) {
+ printk(KERN_INFO PFX "Voltage scaling not supported by CPU.\n");
+ return;
+ }
+
+ if (!longhaul.bits.VRMRev) {
+ printk(KERN_INFO PFX "VRM 8.5\n");
+ vrm_mV_table = &vrm85_mV[0];
+ mV_vrm_table = &mV_vrm85[0];
+ } else {
+ printk(KERN_INFO PFX "Mobile VRM\n");
+ if (cpu_model < CPU_NEHEMIAH)
+ return;
+ vrm_mV_table = &mobilevrm_mV[0];
+ mV_vrm_table = &mV_mobilevrm[0];
+ }
+
+ minvid = vrm_mV_table[longhaul.bits.MinimumVID];
+ maxvid = vrm_mV_table[longhaul.bits.MaximumVID];
+
+ if (minvid.mV == 0 || maxvid.mV == 0 || minvid.mV > maxvid.mV) {
+ printk(KERN_INFO PFX "Bogus values Min:%d.%03d Max:%d.%03d. "
+ "Voltage scaling disabled.\n",
+ minvid.mV/1000, minvid.mV%1000,
+ maxvid.mV/1000, maxvid.mV%1000);
+ return;
+ }
+
+ if (minvid.mV == maxvid.mV) {
+ printk(KERN_INFO PFX "Claims to support voltage scaling but "
+ "min & max are both %d.%03d. "
+ "Voltage scaling disabled\n",
+ maxvid.mV/1000, maxvid.mV%1000);
+ return;
+ }
+
+ /* How many voltage steps*/
+ numvscales = maxvid.pos - minvid.pos + 1;
+ printk(KERN_INFO PFX
+ "Max VID=%d.%03d "
+ "Min VID=%d.%03d, "
+ "%d possible voltage scales\n",
+ maxvid.mV/1000, maxvid.mV%1000,
+ minvid.mV/1000, minvid.mV%1000,
+ numvscales);
+
+ /* Calculate max frequency at min voltage */
+ j = longhaul.bits.MinMHzBR;
+ if (longhaul.bits.MinMHzBR4)
+ j += 16;
+ min_vid_speed = eblcr[j];
+ if (min_vid_speed == -1)
+ return;
+ switch (longhaul.bits.MinMHzFSB) {
+ case 0:
+ min_vid_speed *= 13333;
+ break;
+ case 1:
+ min_vid_speed *= 10000;
+ break;
+ case 3:
+ min_vid_speed *= 6666;
+ break;
+ default:
+ return;
+ break;
+ }
+ if (min_vid_speed >= highest_speed)
+ return;
+ /* Calculate kHz for one voltage step */
+ kHz_step = (highest_speed - min_vid_speed) / numvscales;
+
+ cpufreq_for_each_entry(freq_pos, longhaul_table) {
+ speed = freq_pos->frequency;
+ if (speed > min_vid_speed)
+ pos = (speed - min_vid_speed) / kHz_step + minvid.pos;
+ else
+ pos = minvid.pos;
+ freq_pos->driver_data |= mV_vrm_table[pos] << 8;
+ vid = vrm_mV_table[mV_vrm_table[pos]];
+ printk(KERN_INFO PFX "f: %d kHz, index: %d, vid: %d mV\n",
+ speed, (int)(freq_pos - longhaul_table), vid.mV);
+ }
+
+ can_scale_voltage = 1;
+ printk(KERN_INFO PFX "Voltage scaling enabled.\n");
+}
+
+
+static int longhaul_target(struct cpufreq_policy *policy,
+ unsigned int table_index)
+{
+ unsigned int i;
+ unsigned int dir = 0;
+ u8 vid, current_vid;
+ int retval = 0;
+
+ if (!can_scale_voltage)
+ retval = longhaul_setstate(policy, table_index);
+ else {
+ /* On test system voltage transitions exceeding single
+ * step up or down were turning motherboard off. Both
+ * "ondemand" and "userspace" are unsafe. C7 is doing
+ * this in hardware, C3 is old and we need to do this
+ * in software. */
+ i = longhaul_index;
+ current_vid = (longhaul_table[longhaul_index].driver_data >> 8);
+ current_vid &= 0x1f;
+ if (table_index > longhaul_index)
+ dir = 1;
+ while (i != table_index) {
+ vid = (longhaul_table[i].driver_data >> 8) & 0x1f;
+ if (vid != current_vid) {
+ retval = longhaul_setstate(policy, i);
+ current_vid = vid;
+ msleep(200);
+ }
+ if (dir)
+ i++;
+ else
+ i--;
+ }
+ retval = longhaul_setstate(policy, table_index);
+ }
+
+ longhaul_index = table_index;
+ return retval;
+}
+
+
+static unsigned int longhaul_get(unsigned int cpu)
+{
+ if (cpu)
+ return 0;
+ return calc_speed(longhaul_get_cpu_mult());
+}
+
+static acpi_status longhaul_walk_callback(acpi_handle obj_handle,
+ u32 nesting_level,
+ void *context, void **return_value)
+{
+ struct acpi_device *d;
+
+ if (acpi_bus_get_device(obj_handle, &d))
+ return 0;
+
+ *return_value = acpi_driver_data(d);
+ return 1;
+}
+
+/* VIA don't support PM2 reg, but have something similar */
+static int enable_arbiter_disable(void)
+{
+ struct pci_dev *dev;
+ int status = 1;
+ int reg;
+ u8 pci_cmd;
+
+ /* Find PLE133 host bridge */
+ reg = 0x78;
+ dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8601_0,
+ NULL);
+ /* Find PM133/VT8605 host bridge */
+ if (dev == NULL)
+ dev = pci_get_device(PCI_VENDOR_ID_VIA,
+ PCI_DEVICE_ID_VIA_8605_0, NULL);
+ /* Find CLE266 host bridge */
+ if (dev == NULL) {
+ reg = 0x76;
+ dev = pci_get_device(PCI_VENDOR_ID_VIA,
+ PCI_DEVICE_ID_VIA_862X_0, NULL);
+ /* Find CN400 V-Link host bridge */
+ if (dev == NULL)
+ dev = pci_get_device(PCI_VENDOR_ID_VIA, 0x7259, NULL);
+ }
+ if (dev != NULL) {
+ /* Enable access to port 0x22 */
+ pci_read_config_byte(dev, reg, &pci_cmd);
+ if (!(pci_cmd & 1<<7)) {
+ pci_cmd |= 1<<7;
+ pci_write_config_byte(dev, reg, pci_cmd);
+ pci_read_config_byte(dev, reg, &pci_cmd);
+ if (!(pci_cmd & 1<<7)) {
+ printk(KERN_ERR PFX
+ "Can't enable access to port 0x22.\n");
+ status = 0;
+ }
+ }
+ pci_dev_put(dev);
+ return status;
+ }
+ return 0;
+}
+
+static int longhaul_setup_southbridge(void)
+{
+ struct pci_dev *dev;
+ u8 pci_cmd;
+
+ /* Find VT8235 southbridge */
+ dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, NULL);
+ if (dev == NULL)
+ /* Find VT8237 southbridge */
+ dev = pci_get_device(PCI_VENDOR_ID_VIA,
+ PCI_DEVICE_ID_VIA_8237, NULL);
+ if (dev != NULL) {
+ /* Set transition time to max */
+ pci_read_config_byte(dev, 0xec, &pci_cmd);
+ pci_cmd &= ~(1 << 2);
+ pci_write_config_byte(dev, 0xec, pci_cmd);
+ pci_read_config_byte(dev, 0xe4, &pci_cmd);
+ pci_cmd &= ~(1 << 7);
+ pci_write_config_byte(dev, 0xe4, pci_cmd);
+ pci_read_config_byte(dev, 0xe5, &pci_cmd);
+ pci_cmd |= 1 << 7;
+ pci_write_config_byte(dev, 0xe5, pci_cmd);
+ /* Get address of ACPI registers block*/
+ pci_read_config_byte(dev, 0x81, &pci_cmd);
+ if (pci_cmd & 1 << 7) {
+ pci_read_config_dword(dev, 0x88, &acpi_regs_addr);
+ acpi_regs_addr &= 0xff00;
+ printk(KERN_INFO PFX "ACPI I/O at 0x%x\n",
+ acpi_regs_addr);
+ }
+
+ pci_dev_put(dev);
+ return 1;
+ }
+ return 0;
+}
+
+static int longhaul_cpu_init(struct cpufreq_policy *policy)
+{
+ struct cpuinfo_x86 *c = &cpu_data(0);
+ char *cpuname = NULL;
+ int ret;
+ u32 lo, hi;
+
+ /* Check what we have on this motherboard */
+ switch (c->x86_model) {
+ case 6:
+ cpu_model = CPU_SAMUEL;
+ cpuname = "C3 'Samuel' [C5A]";
+ longhaul_version = TYPE_LONGHAUL_V1;
+ memcpy(mults, samuel1_mults, sizeof(samuel1_mults));
+ memcpy(eblcr, samuel1_eblcr, sizeof(samuel1_eblcr));
+ break;
+
+ case 7:
+ switch (c->x86_mask) {
+ case 0:
+ longhaul_version = TYPE_LONGHAUL_V1;
+ cpu_model = CPU_SAMUEL2;
+ cpuname = "C3 'Samuel 2' [C5B]";
+ /* Note, this is not a typo, early Samuel2's had
+ * Samuel1 ratios. */
+ memcpy(mults, samuel1_mults, sizeof(samuel1_mults));
+ memcpy(eblcr, samuel2_eblcr, sizeof(samuel2_eblcr));
+ break;
+ case 1 ... 15:
+ longhaul_version = TYPE_LONGHAUL_V2;
+ if (c->x86_mask < 8) {
+ cpu_model = CPU_SAMUEL2;
+ cpuname = "C3 'Samuel 2' [C5B]";
+ } else {
+ cpu_model = CPU_EZRA;
+ cpuname = "C3 'Ezra' [C5C]";
+ }
+ memcpy(mults, ezra_mults, sizeof(ezra_mults));
+ memcpy(eblcr, ezra_eblcr, sizeof(ezra_eblcr));
+ break;
+ }
+ break;
+
+ case 8:
+ cpu_model = CPU_EZRA_T;
+ cpuname = "C3 'Ezra-T' [C5M]";
+ longhaul_version = TYPE_POWERSAVER;
+ numscales = 32;
+ memcpy(mults, ezrat_mults, sizeof(ezrat_mults));
+ memcpy(eblcr, ezrat_eblcr, sizeof(ezrat_eblcr));
+ break;
+
+ case 9:
+ longhaul_version = TYPE_POWERSAVER;
+ numscales = 32;
+ memcpy(mults, nehemiah_mults, sizeof(nehemiah_mults));
+ memcpy(eblcr, nehemiah_eblcr, sizeof(nehemiah_eblcr));
+ switch (c->x86_mask) {
+ case 0 ... 1:
+ cpu_model = CPU_NEHEMIAH;
+ cpuname = "C3 'Nehemiah A' [C5XLOE]";
+ break;
+ case 2 ... 4:
+ cpu_model = CPU_NEHEMIAH;
+ cpuname = "C3 'Nehemiah B' [C5XLOH]";
+ break;
+ case 5 ... 15:
+ cpu_model = CPU_NEHEMIAH_C;
+ cpuname = "C3 'Nehemiah C' [C5P]";
+ break;
+ }
+ break;
+
+ default:
+ cpuname = "Unknown";
+ break;
+ }
+ /* Check Longhaul ver. 2 */
+ if (longhaul_version == TYPE_LONGHAUL_V2) {
+ rdmsr(MSR_VIA_LONGHAUL, lo, hi);
+ if (lo == 0 && hi == 0)
+ /* Looks like MSR isn't present */
+ longhaul_version = TYPE_LONGHAUL_V1;
+ }
+
+ printk(KERN_INFO PFX "VIA %s CPU detected. ", cpuname);
+ switch (longhaul_version) {
+ case TYPE_LONGHAUL_V1:
+ case TYPE_LONGHAUL_V2:
+ printk(KERN_CONT "Longhaul v%d supported.\n", longhaul_version);
+ break;
+ case TYPE_POWERSAVER:
+ printk(KERN_CONT "Powersaver supported.\n");
+ break;
+ };
+
+ /* Doesn't hurt */
+ longhaul_setup_southbridge();
+
+ /* Find ACPI data for processor */
+ acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, &longhaul_walk_callback, NULL,
+ NULL, (void *)&pr);
+
+ /* Check ACPI support for C3 state */
+ if (pr != NULL && longhaul_version == TYPE_POWERSAVER) {
+ cx = &pr->power.states[ACPI_STATE_C3];
+ if (cx->address > 0 && cx->latency <= 1000)
+ longhaul_flags |= USE_ACPI_C3;
+ }
+ /* Disable if it isn't working */
+ if (disable_acpi_c3)
+ longhaul_flags &= ~USE_ACPI_C3;
+ /* Check if northbridge is friendly */
+ if (enable_arbiter_disable())
+ longhaul_flags |= USE_NORTHBRIDGE;
+
+ /* Check ACPI support for bus master arbiter disable */
+ if (!(longhaul_flags & USE_ACPI_C3
+ || longhaul_flags & USE_NORTHBRIDGE)
+ && ((pr == NULL) || !(pr->flags.bm_control))) {
+ printk(KERN_ERR PFX
+ "No ACPI support. Unsupported northbridge.\n");
+ return -ENODEV;
+ }
+
+ if (longhaul_flags & USE_NORTHBRIDGE)
+ printk(KERN_INFO PFX "Using northbridge support.\n");
+ if (longhaul_flags & USE_ACPI_C3)
+ printk(KERN_INFO PFX "Using ACPI support.\n");
+
+ ret = longhaul_get_ranges();
+ if (ret != 0)
+ return ret;
+
+ if ((longhaul_version != TYPE_LONGHAUL_V1) && (scale_voltage != 0))
+ longhaul_setup_voltagescaling();
+
+ policy->cpuinfo.transition_latency = 200000; /* nsec */
+
+ return cpufreq_table_validate_and_show(policy, longhaul_table);
+}
+
+static struct cpufreq_driver longhaul_driver = {
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = longhaul_target,
+ .get = longhaul_get,
+ .init = longhaul_cpu_init,
+ .name = "longhaul",
+ .attr = cpufreq_generic_attr,
+};
+
+static const struct x86_cpu_id longhaul_id[] = {
+ { X86_VENDOR_CENTAUR, 6 },
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, longhaul_id);
+
+static int __init longhaul_init(void)
+{
+ struct cpuinfo_x86 *c = &cpu_data(0);
+
+ if (!x86_match_cpu(longhaul_id))
+ return -ENODEV;
+
+ if (!enable) {
+ printk(KERN_ERR PFX "Option \"enable\" not set. Aborting.\n");
+ return -ENODEV;
+ }
+#ifdef CONFIG_SMP
+ if (num_online_cpus() > 1) {
+ printk(KERN_ERR PFX "More than 1 CPU detected, "
+ "longhaul disabled.\n");
+ return -ENODEV;
+ }
+#endif
+#ifdef CONFIG_X86_IO_APIC
+ if (cpu_has_apic) {
+ printk(KERN_ERR PFX "APIC detected. Longhaul is currently "
+ "broken in this configuration.\n");
+ return -ENODEV;
+ }
+#endif
+ switch (c->x86_model) {
+ case 6 ... 9:
+ return cpufreq_register_driver(&longhaul_driver);
+ case 10:
+ printk(KERN_ERR PFX "Use acpi-cpufreq driver for VIA C7\n");
+ default:
+ ;
+ }
+
+ return -ENODEV;
+}
+
+
+static void __exit longhaul_exit(void)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get(0);
+ int i;
+
+ for (i = 0; i < numscales; i++) {
+ if (mults[i] == maxmult) {
+ struct cpufreq_freqs freqs;
+
+ freqs.old = policy->cur;
+ freqs.new = longhaul_table[i].frequency;
+ freqs.flags = 0;
+
+ cpufreq_freq_transition_begin(policy, &freqs);
+ longhaul_setstate(policy, i);
+ cpufreq_freq_transition_end(policy, &freqs, 0);
+ break;
+ }
+ }
+
+ cpufreq_cpu_put(policy);
+ cpufreq_unregister_driver(&longhaul_driver);
+ kfree(longhaul_table);
+}
+
+/* Even if BIOS is exporting ACPI C3 state, and it is used
+ * with success when CPU is idle, this state doesn't
+ * trigger frequency transition in some cases. */
+module_param(disable_acpi_c3, int, 0644);
+MODULE_PARM_DESC(disable_acpi_c3, "Don't use ACPI C3 support");
+/* Change CPU voltage with frequency. Very useful to save
+ * power, but most VIA C3 processors aren't supporting it. */
+module_param(scale_voltage, int, 0644);
+MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor");
+/* Force revision key to 0 for processors which doesn't
+ * support voltage scaling, but are introducing itself as
+ * such. */
+module_param(revid_errata, int, 0644);
+MODULE_PARM_DESC(revid_errata, "Ignore CPU Revision ID");
+/* By default driver is disabled to prevent incompatible
+ * system freeze. */
+module_param(enable, int, 0644);
+MODULE_PARM_DESC(enable, "Enable driver");
+
+MODULE_AUTHOR("Dave Jones");
+MODULE_DESCRIPTION("Longhaul driver for VIA Cyrix processors.");
+MODULE_LICENSE("GPL");
+
+late_initcall(longhaul_init);
+module_exit(longhaul_exit);
diff --git a/kernel/drivers/cpufreq/longhaul.h b/kernel/drivers/cpufreq/longhaul.h
new file mode 100644
index 000000000..1928b923a
--- /dev/null
+++ b/kernel/drivers/cpufreq/longhaul.h
@@ -0,0 +1,353 @@
+/*
+ * longhaul.h
+ * (C) 2003 Dave Jones.
+ *
+ * Licensed under the terms of the GNU GPL License version 2.
+ *
+ * VIA-specific information
+ */
+
+union msr_bcr2 {
+ struct {
+ unsigned Reseved:19, // 18:0
+ ESOFTBF:1, // 19
+ Reserved2:3, // 22:20
+ CLOCKMUL:4, // 26:23
+ Reserved3:5; // 31:27
+ } bits;
+ unsigned long val;
+};
+
+union msr_longhaul {
+ struct {
+ unsigned RevisionID:4, // 3:0
+ RevisionKey:4, // 7:4
+ EnableSoftBusRatio:1, // 8
+ EnableSoftVID:1, // 9
+ EnableSoftBSEL:1, // 10
+ Reserved:3, // 11:13
+ SoftBusRatio4:1, // 14
+ VRMRev:1, // 15
+ SoftBusRatio:4, // 19:16
+ SoftVID:5, // 24:20
+ Reserved2:3, // 27:25
+ SoftBSEL:2, // 29:28
+ Reserved3:2, // 31:30
+ MaxMHzBR:4, // 35:32
+ MaximumVID:5, // 40:36
+ MaxMHzFSB:2, // 42:41
+ MaxMHzBR4:1, // 43
+ Reserved4:4, // 47:44
+ MinMHzBR:4, // 51:48
+ MinimumVID:5, // 56:52
+ MinMHzFSB:2, // 58:57
+ MinMHzBR4:1, // 59
+ Reserved5:4; // 63:60
+ } bits;
+ unsigned long long val;
+};
+
+/*
+ * Clock ratio tables. Div/Mod by 10 to get ratio.
+ * The eblcr values specify the ratio read from the CPU.
+ * The mults values specify what to write to the CPU.
+ */
+
+/*
+ * VIA C3 Samuel 1 & Samuel 2 (stepping 0)
+ */
+static const int samuel1_mults[16] = {
+ -1, /* 0000 -> RESERVED */
+ 30, /* 0001 -> 3.0x */
+ 40, /* 0010 -> 4.0x */
+ -1, /* 0011 -> RESERVED */
+ -1, /* 0100 -> RESERVED */
+ 35, /* 0101 -> 3.5x */
+ 45, /* 0110 -> 4.5x */
+ 55, /* 0111 -> 5.5x */
+ 60, /* 1000 -> 6.0x */
+ 70, /* 1001 -> 7.0x */
+ 80, /* 1010 -> 8.0x */
+ 50, /* 1011 -> 5.0x */
+ 65, /* 1100 -> 6.5x */
+ 75, /* 1101 -> 7.5x */
+ -1, /* 1110 -> RESERVED */
+ -1, /* 1111 -> RESERVED */
+};
+
+static const int samuel1_eblcr[16] = {
+ 50, /* 0000 -> RESERVED */
+ 30, /* 0001 -> 3.0x */
+ 40, /* 0010 -> 4.0x */
+ -1, /* 0011 -> RESERVED */
+ 55, /* 0100 -> 5.5x */
+ 35, /* 0101 -> 3.5x */
+ 45, /* 0110 -> 4.5x */
+ -1, /* 0111 -> RESERVED */
+ -1, /* 1000 -> RESERVED */
+ 70, /* 1001 -> 7.0x */
+ 80, /* 1010 -> 8.0x */
+ 60, /* 1011 -> 6.0x */
+ -1, /* 1100 -> RESERVED */
+ 75, /* 1101 -> 7.5x */
+ -1, /* 1110 -> RESERVED */
+ 65, /* 1111 -> 6.5x */
+};
+
+/*
+ * VIA C3 Samuel2 Stepping 1->15
+ */
+static const int samuel2_eblcr[16] = {
+ 50, /* 0000 -> 5.0x */
+ 30, /* 0001 -> 3.0x */
+ 40, /* 0010 -> 4.0x */
+ 100, /* 0011 -> 10.0x */
+ 55, /* 0100 -> 5.5x */
+ 35, /* 0101 -> 3.5x */
+ 45, /* 0110 -> 4.5x */
+ 110, /* 0111 -> 11.0x */
+ 90, /* 1000 -> 9.0x */
+ 70, /* 1001 -> 7.0x */
+ 80, /* 1010 -> 8.0x */
+ 60, /* 1011 -> 6.0x */
+ 120, /* 1100 -> 12.0x */
+ 75, /* 1101 -> 7.5x */
+ 130, /* 1110 -> 13.0x */
+ 65, /* 1111 -> 6.5x */
+};
+
+/*
+ * VIA C3 Ezra
+ */
+static const int ezra_mults[16] = {
+ 100, /* 0000 -> 10.0x */
+ 30, /* 0001 -> 3.0x */
+ 40, /* 0010 -> 4.0x */
+ 90, /* 0011 -> 9.0x */
+ 95, /* 0100 -> 9.5x */
+ 35, /* 0101 -> 3.5x */
+ 45, /* 0110 -> 4.5x */
+ 55, /* 0111 -> 5.5x */
+ 60, /* 1000 -> 6.0x */
+ 70, /* 1001 -> 7.0x */
+ 80, /* 1010 -> 8.0x */
+ 50, /* 1011 -> 5.0x */
+ 65, /* 1100 -> 6.5x */
+ 75, /* 1101 -> 7.5x */
+ 85, /* 1110 -> 8.5x */
+ 120, /* 1111 -> 12.0x */
+};
+
+static const int ezra_eblcr[16] = {
+ 50, /* 0000 -> 5.0x */
+ 30, /* 0001 -> 3.0x */
+ 40, /* 0010 -> 4.0x */
+ 100, /* 0011 -> 10.0x */
+ 55, /* 0100 -> 5.5x */
+ 35, /* 0101 -> 3.5x */
+ 45, /* 0110 -> 4.5x */
+ 95, /* 0111 -> 9.5x */
+ 90, /* 1000 -> 9.0x */
+ 70, /* 1001 -> 7.0x */
+ 80, /* 1010 -> 8.0x */
+ 60, /* 1011 -> 6.0x */
+ 120, /* 1100 -> 12.0x */
+ 75, /* 1101 -> 7.5x */
+ 85, /* 1110 -> 8.5x */
+ 65, /* 1111 -> 6.5x */
+};
+
+/*
+ * VIA C3 (Ezra-T) [C5M].
+ */
+static const int ezrat_mults[32] = {
+ 100, /* 0000 -> 10.0x */
+ 30, /* 0001 -> 3.0x */
+ 40, /* 0010 -> 4.0x */
+ 90, /* 0011 -> 9.0x */
+ 95, /* 0100 -> 9.5x */
+ 35, /* 0101 -> 3.5x */
+ 45, /* 0110 -> 4.5x */
+ 55, /* 0111 -> 5.5x */
+ 60, /* 1000 -> 6.0x */
+ 70, /* 1001 -> 7.0x */
+ 80, /* 1010 -> 8.0x */
+ 50, /* 1011 -> 5.0x */
+ 65, /* 1100 -> 6.5x */
+ 75, /* 1101 -> 7.5x */
+ 85, /* 1110 -> 8.5x */
+ 120, /* 1111 -> 12.0x */
+
+ -1, /* 0000 -> RESERVED (10.0x) */
+ 110, /* 0001 -> 11.0x */
+ -1, /* 0010 -> 12.0x */
+ -1, /* 0011 -> RESERVED (9.0x)*/
+ 105, /* 0100 -> 10.5x */
+ 115, /* 0101 -> 11.5x */
+ 125, /* 0110 -> 12.5x */
+ 135, /* 0111 -> 13.5x */
+ 140, /* 1000 -> 14.0x */
+ 150, /* 1001 -> 15.0x */
+ 160, /* 1010 -> 16.0x */
+ 130, /* 1011 -> 13.0x */
+ 145, /* 1100 -> 14.5x */
+ 155, /* 1101 -> 15.5x */
+ -1, /* 1110 -> RESERVED (13.0x) */
+ -1, /* 1111 -> RESERVED (12.0x) */
+};
+
+static const int ezrat_eblcr[32] = {
+ 50, /* 0000 -> 5.0x */
+ 30, /* 0001 -> 3.0x */
+ 40, /* 0010 -> 4.0x */
+ 100, /* 0011 -> 10.0x */
+ 55, /* 0100 -> 5.5x */
+ 35, /* 0101 -> 3.5x */
+ 45, /* 0110 -> 4.5x */
+ 95, /* 0111 -> 9.5x */
+ 90, /* 1000 -> 9.0x */
+ 70, /* 1001 -> 7.0x */
+ 80, /* 1010 -> 8.0x */
+ 60, /* 1011 -> 6.0x */
+ 120, /* 1100 -> 12.0x */
+ 75, /* 1101 -> 7.5x */
+ 85, /* 1110 -> 8.5x */
+ 65, /* 1111 -> 6.5x */
+
+ -1, /* 0000 -> RESERVED (9.0x) */
+ 110, /* 0001 -> 11.0x */
+ 120, /* 0010 -> 12.0x */
+ -1, /* 0011 -> RESERVED (10.0x)*/
+ 135, /* 0100 -> 13.5x */
+ 115, /* 0101 -> 11.5x */
+ 125, /* 0110 -> 12.5x */
+ 105, /* 0111 -> 10.5x */
+ 130, /* 1000 -> 13.0x */
+ 150, /* 1001 -> 15.0x */
+ 160, /* 1010 -> 16.0x */
+ 140, /* 1011 -> 14.0x */
+ -1, /* 1100 -> RESERVED (12.0x) */
+ 155, /* 1101 -> 15.5x */
+ -1, /* 1110 -> RESERVED (13.0x) */
+ 145, /* 1111 -> 14.5x */
+};
+
+/*
+ * VIA C3 Nehemiah */
+
+static const int nehemiah_mults[32] = {
+ 100, /* 0000 -> 10.0x */
+ -1, /* 0001 -> 16.0x */
+ 40, /* 0010 -> 4.0x */
+ 90, /* 0011 -> 9.0x */
+ 95, /* 0100 -> 9.5x */
+ -1, /* 0101 -> RESERVED */
+ 45, /* 0110 -> 4.5x */
+ 55, /* 0111 -> 5.5x */
+ 60, /* 1000 -> 6.0x */
+ 70, /* 1001 -> 7.0x */
+ 80, /* 1010 -> 8.0x */
+ 50, /* 1011 -> 5.0x */
+ 65, /* 1100 -> 6.5x */
+ 75, /* 1101 -> 7.5x */
+ 85, /* 1110 -> 8.5x */
+ 120, /* 1111 -> 12.0x */
+ -1, /* 0000 -> 10.0x */
+ 110, /* 0001 -> 11.0x */
+ -1, /* 0010 -> 12.0x */
+ -1, /* 0011 -> 9.0x */
+ 105, /* 0100 -> 10.5x */
+ 115, /* 0101 -> 11.5x */
+ 125, /* 0110 -> 12.5x */
+ 135, /* 0111 -> 13.5x */
+ 140, /* 1000 -> 14.0x */
+ 150, /* 1001 -> 15.0x */
+ 160, /* 1010 -> 16.0x */
+ 130, /* 1011 -> 13.0x */
+ 145, /* 1100 -> 14.5x */
+ 155, /* 1101 -> 15.5x */
+ -1, /* 1110 -> RESERVED (13.0x) */
+ -1, /* 1111 -> 12.0x */
+};
+
+static const int nehemiah_eblcr[32] = {
+ 50, /* 0000 -> 5.0x */
+ 160, /* 0001 -> 16.0x */
+ 40, /* 0010 -> 4.0x */
+ 100, /* 0011 -> 10.0x */
+ 55, /* 0100 -> 5.5x */
+ -1, /* 0101 -> RESERVED */
+ 45, /* 0110 -> 4.5x */
+ 95, /* 0111 -> 9.5x */
+ 90, /* 1000 -> 9.0x */
+ 70, /* 1001 -> 7.0x */
+ 80, /* 1010 -> 8.0x */
+ 60, /* 1011 -> 6.0x */
+ 120, /* 1100 -> 12.0x */
+ 75, /* 1101 -> 7.5x */
+ 85, /* 1110 -> 8.5x */
+ 65, /* 1111 -> 6.5x */
+ 90, /* 0000 -> 9.0x */
+ 110, /* 0001 -> 11.0x */
+ 120, /* 0010 -> 12.0x */
+ 100, /* 0011 -> 10.0x */
+ 135, /* 0100 -> 13.5x */
+ 115, /* 0101 -> 11.5x */
+ 125, /* 0110 -> 12.5x */
+ 105, /* 0111 -> 10.5x */
+ 130, /* 1000 -> 13.0x */
+ 150, /* 1001 -> 15.0x */
+ 160, /* 1010 -> 16.0x */
+ 140, /* 1011 -> 14.0x */
+ 120, /* 1100 -> 12.0x */
+ 155, /* 1101 -> 15.5x */
+ -1, /* 1110 -> RESERVED (13.0x) */
+ 145 /* 1111 -> 14.5x */
+};
+
+/*
+ * Voltage scales. Div/Mod by 1000 to get actual voltage.
+ * Which scale to use depends on the VRM type in use.
+ */
+
+struct mV_pos {
+ unsigned short mV;
+ unsigned short pos;
+};
+
+static const struct mV_pos vrm85_mV[32] = {
+ {1250, 8}, {1200, 6}, {1150, 4}, {1100, 2},
+ {1050, 0}, {1800, 30}, {1750, 28}, {1700, 26},
+ {1650, 24}, {1600, 22}, {1550, 20}, {1500, 18},
+ {1450, 16}, {1400, 14}, {1350, 12}, {1300, 10},
+ {1275, 9}, {1225, 7}, {1175, 5}, {1125, 3},
+ {1075, 1}, {1825, 31}, {1775, 29}, {1725, 27},
+ {1675, 25}, {1625, 23}, {1575, 21}, {1525, 19},
+ {1475, 17}, {1425, 15}, {1375, 13}, {1325, 11}
+};
+
+static const unsigned char mV_vrm85[32] = {
+ 0x04, 0x14, 0x03, 0x13, 0x02, 0x12, 0x01, 0x11,
+ 0x00, 0x10, 0x0f, 0x1f, 0x0e, 0x1e, 0x0d, 0x1d,
+ 0x0c, 0x1c, 0x0b, 0x1b, 0x0a, 0x1a, 0x09, 0x19,
+ 0x08, 0x18, 0x07, 0x17, 0x06, 0x16, 0x05, 0x15
+};
+
+static const struct mV_pos mobilevrm_mV[32] = {
+ {1750, 31}, {1700, 30}, {1650, 29}, {1600, 28},
+ {1550, 27}, {1500, 26}, {1450, 25}, {1400, 24},
+ {1350, 23}, {1300, 22}, {1250, 21}, {1200, 20},
+ {1150, 19}, {1100, 18}, {1050, 17}, {1000, 16},
+ {975, 15}, {950, 14}, {925, 13}, {900, 12},
+ {875, 11}, {850, 10}, {825, 9}, {800, 8},
+ {775, 7}, {750, 6}, {725, 5}, {700, 4},
+ {675, 3}, {650, 2}, {625, 1}, {600, 0}
+};
+
+static const unsigned char mV_mobilevrm[32] = {
+ 0x1f, 0x1e, 0x1d, 0x1c, 0x1b, 0x1a, 0x19, 0x18,
+ 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
+ 0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08,
+ 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00
+};
+
diff --git a/kernel/drivers/cpufreq/longrun.c b/kernel/drivers/cpufreq/longrun.c
new file mode 100644
index 000000000..074971b12
--- /dev/null
+++ b/kernel/drivers/cpufreq/longrun.c
@@ -0,0 +1,324 @@
+/*
+ * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
+ *
+ * Licensed under the terms of the GNU GPL License version 2.
+ *
+ * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/timex.h>
+
+#include <asm/msr.h>
+#include <asm/processor.h>
+#include <asm/cpu_device_id.h>
+
+static struct cpufreq_driver longrun_driver;
+
+/**
+ * longrun_{low,high}_freq is needed for the conversion of cpufreq kHz
+ * values into per cent values. In TMTA microcode, the following is valid:
+ * performance_pctg = (current_freq - low_freq)/(high_freq - low_freq)
+ */
+static unsigned int longrun_low_freq, longrun_high_freq;
+
+
+/**
+ * longrun_get_policy - get the current LongRun policy
+ * @policy: struct cpufreq_policy where current policy is written into
+ *
+ * Reads the current LongRun policy by access to MSR_TMTA_LONGRUN_FLAGS
+ * and MSR_TMTA_LONGRUN_CTRL
+ */
+static void longrun_get_policy(struct cpufreq_policy *policy)
+{
+ u32 msr_lo, msr_hi;
+
+ rdmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi);
+ pr_debug("longrun flags are %x - %x\n", msr_lo, msr_hi);
+ if (msr_lo & 0x01)
+ policy->policy = CPUFREQ_POLICY_PERFORMANCE;
+ else
+ policy->policy = CPUFREQ_POLICY_POWERSAVE;
+
+ rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
+ pr_debug("longrun ctrl is %x - %x\n", msr_lo, msr_hi);
+ msr_lo &= 0x0000007F;
+ msr_hi &= 0x0000007F;
+
+ if (longrun_high_freq <= longrun_low_freq) {
+ /* Assume degenerate Longrun table */
+ policy->min = policy->max = longrun_high_freq;
+ } else {
+ policy->min = longrun_low_freq + msr_lo *
+ ((longrun_high_freq - longrun_low_freq) / 100);
+ policy->max = longrun_low_freq + msr_hi *
+ ((longrun_high_freq - longrun_low_freq) / 100);
+ }
+ policy->cpu = 0;
+}
+
+
+/**
+ * longrun_set_policy - sets a new CPUFreq policy
+ * @policy: new policy
+ *
+ * Sets a new CPUFreq policy on LongRun-capable processors. This function
+ * has to be called with cpufreq_driver locked.
+ */
+static int longrun_set_policy(struct cpufreq_policy *policy)
+{
+ u32 msr_lo, msr_hi;
+ u32 pctg_lo, pctg_hi;
+
+ if (!policy)
+ return -EINVAL;
+
+ if (longrun_high_freq <= longrun_low_freq) {
+ /* Assume degenerate Longrun table */
+ pctg_lo = pctg_hi = 100;
+ } else {
+ pctg_lo = (policy->min - longrun_low_freq) /
+ ((longrun_high_freq - longrun_low_freq) / 100);
+ pctg_hi = (policy->max - longrun_low_freq) /
+ ((longrun_high_freq - longrun_low_freq) / 100);
+ }
+
+ if (pctg_hi > 100)
+ pctg_hi = 100;
+ if (pctg_lo > pctg_hi)
+ pctg_lo = pctg_hi;
+
+ /* performance or economy mode */
+ rdmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi);
+ msr_lo &= 0xFFFFFFFE;
+ switch (policy->policy) {
+ case CPUFREQ_POLICY_PERFORMANCE:
+ msr_lo |= 0x00000001;
+ break;
+ case CPUFREQ_POLICY_POWERSAVE:
+ break;
+ }
+ wrmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi);
+
+ /* lower and upper boundary */
+ rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
+ msr_lo &= 0xFFFFFF80;
+ msr_hi &= 0xFFFFFF80;
+ msr_lo |= pctg_lo;
+ msr_hi |= pctg_hi;
+ wrmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
+
+ return 0;
+}
+
+
+/**
+ * longrun_verify_poliy - verifies a new CPUFreq policy
+ * @policy: the policy to verify
+ *
+ * Validates a new CPUFreq policy. This function has to be called with
+ * cpufreq_driver locked.
+ */
+static int longrun_verify_policy(struct cpufreq_policy *policy)
+{
+ if (!policy)
+ return -EINVAL;
+
+ policy->cpu = 0;
+ cpufreq_verify_within_cpu_limits(policy);
+
+ if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) &&
+ (policy->policy != CPUFREQ_POLICY_PERFORMANCE))
+ return -EINVAL;
+
+ return 0;
+}
+
+static unsigned int longrun_get(unsigned int cpu)
+{
+ u32 eax, ebx, ecx, edx;
+
+ if (cpu)
+ return 0;
+
+ cpuid(0x80860007, &eax, &ebx, &ecx, &edx);
+ pr_debug("cpuid eax is %u\n", eax);
+
+ return eax * 1000;
+}
+
+/**
+ * longrun_determine_freqs - determines the lowest and highest possible core frequency
+ * @low_freq: an int to put the lowest frequency into
+ * @high_freq: an int to put the highest frequency into
+ *
+ * Determines the lowest and highest possible core frequencies on this CPU.
+ * This is necessary to calculate the performance percentage according to
+ * TMTA rules:
+ * performance_pctg = (target_freq - low_freq)/(high_freq - low_freq)
+ */
+static int longrun_determine_freqs(unsigned int *low_freq,
+ unsigned int *high_freq)
+{
+ u32 msr_lo, msr_hi;
+ u32 save_lo, save_hi;
+ u32 eax, ebx, ecx, edx;
+ u32 try_hi;
+ struct cpuinfo_x86 *c = &cpu_data(0);
+
+ if (!low_freq || !high_freq)
+ return -EINVAL;
+
+ if (cpu_has(c, X86_FEATURE_LRTI)) {
+ /* if the LongRun Table Interface is present, the
+ * detection is a bit easier:
+ * For minimum frequency, read out the maximum
+ * level (msr_hi), write that into "currently
+ * selected level", and read out the frequency.
+ * For maximum frequency, read out level zero.
+ */
+ /* minimum */
+ rdmsr(MSR_TMTA_LRTI_READOUT, msr_lo, msr_hi);
+ wrmsr(MSR_TMTA_LRTI_READOUT, msr_hi, msr_hi);
+ rdmsr(MSR_TMTA_LRTI_VOLT_MHZ, msr_lo, msr_hi);
+ *low_freq = msr_lo * 1000; /* to kHz */
+
+ /* maximum */
+ wrmsr(MSR_TMTA_LRTI_READOUT, 0, msr_hi);
+ rdmsr(MSR_TMTA_LRTI_VOLT_MHZ, msr_lo, msr_hi);
+ *high_freq = msr_lo * 1000; /* to kHz */
+
+ pr_debug("longrun table interface told %u - %u kHz\n",
+ *low_freq, *high_freq);
+
+ if (*low_freq > *high_freq)
+ *low_freq = *high_freq;
+ return 0;
+ }
+
+ /* set the upper border to the value determined during TSC init */
+ *high_freq = (cpu_khz / 1000);
+ *high_freq = *high_freq * 1000;
+ pr_debug("high frequency is %u kHz\n", *high_freq);
+
+ /* get current borders */
+ rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
+ save_lo = msr_lo & 0x0000007F;
+ save_hi = msr_hi & 0x0000007F;
+
+ /* if current perf_pctg is larger than 90%, we need to decrease the
+ * upper limit to make the calculation more accurate.
+ */
+ cpuid(0x80860007, &eax, &ebx, &ecx, &edx);
+ /* try decreasing in 10% steps, some processors react only
+ * on some barrier values */
+ for (try_hi = 80; try_hi > 0 && ecx > 90; try_hi -= 10) {
+ /* set to 0 to try_hi perf_pctg */
+ msr_lo &= 0xFFFFFF80;
+ msr_hi &= 0xFFFFFF80;
+ msr_hi |= try_hi;
+ wrmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi);
+
+ /* read out current core MHz and current perf_pctg */
+ cpuid(0x80860007, &eax, &ebx, &ecx, &edx);
+
+ /* restore values */
+ wrmsr(MSR_TMTA_LONGRUN_CTRL, save_lo, save_hi);
+ }
+ pr_debug("percentage is %u %%, freq is %u MHz\n", ecx, eax);
+
+ /* performance_pctg = (current_freq - low_freq)/(high_freq - low_freq)
+ * eqals
+ * low_freq * (1 - perf_pctg) = (cur_freq - high_freq * perf_pctg)
+ *
+ * high_freq * perf_pctg is stored tempoarily into "ebx".
+ */
+ ebx = (((cpu_khz / 1000) * ecx) / 100); /* to MHz */
+
+ if ((ecx > 95) || (ecx == 0) || (eax < ebx))
+ return -EIO;
+
+ edx = ((eax - ebx) * 100) / (100 - ecx);
+ *low_freq = edx * 1000; /* back to kHz */
+
+ pr_debug("low frequency is %u kHz\n", *low_freq);
+
+ if (*low_freq > *high_freq)
+ *low_freq = *high_freq;
+
+ return 0;
+}
+
+
+static int longrun_cpu_init(struct cpufreq_policy *policy)
+{
+ int result = 0;
+
+ /* capability check */
+ if (policy->cpu != 0)
+ return -ENODEV;
+
+ /* detect low and high frequency */
+ result = longrun_determine_freqs(&longrun_low_freq, &longrun_high_freq);
+ if (result)
+ return result;
+
+ /* cpuinfo and default policy values */
+ policy->cpuinfo.min_freq = longrun_low_freq;
+ policy->cpuinfo.max_freq = longrun_high_freq;
+ policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+ longrun_get_policy(policy);
+
+ return 0;
+}
+
+
+static struct cpufreq_driver longrun_driver = {
+ .flags = CPUFREQ_CONST_LOOPS,
+ .verify = longrun_verify_policy,
+ .setpolicy = longrun_set_policy,
+ .get = longrun_get,
+ .init = longrun_cpu_init,
+ .name = "longrun",
+};
+
+static const struct x86_cpu_id longrun_ids[] = {
+ { X86_VENDOR_TRANSMETA, X86_FAMILY_ANY, X86_MODEL_ANY,
+ X86_FEATURE_LONGRUN },
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, longrun_ids);
+
+/**
+ * longrun_init - initializes the Transmeta Crusoe LongRun CPUFreq driver
+ *
+ * Initializes the LongRun support.
+ */
+static int __init longrun_init(void)
+{
+ if (!x86_match_cpu(longrun_ids))
+ return -ENODEV;
+ return cpufreq_register_driver(&longrun_driver);
+}
+
+
+/**
+ * longrun_exit - unregisters LongRun support
+ */
+static void __exit longrun_exit(void)
+{
+ cpufreq_unregister_driver(&longrun_driver);
+}
+
+
+MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
+MODULE_DESCRIPTION("LongRun driver for Transmeta Crusoe and "
+ "Efficeon processors.");
+MODULE_LICENSE("GPL");
+
+module_init(longrun_init);
+module_exit(longrun_exit);
diff --git a/kernel/drivers/cpufreq/loongson2_cpufreq.c b/kernel/drivers/cpufreq/loongson2_cpufreq.c
new file mode 100644
index 000000000..fc897baba
--- /dev/null
+++ b/kernel/drivers/cpufreq/loongson2_cpufreq.c
@@ -0,0 +1,200 @@
+/*
+ * Cpufreq driver for the loongson-2 processors
+ *
+ * The 2E revision of loongson processor not support this feature.
+ *
+ * Copyright (C) 2006 - 2008 Lemote Inc. & Insititute of Computing Technology
+ * Author: Yanhua, yanh@lemote.com
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/sched.h> /* set_cpus_allowed() */
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+
+#include <asm/clock.h>
+#include <asm/idle.h>
+
+#include <asm/mach-loongson/loongson.h>
+
+static uint nowait;
+
+static void (*saved_cpu_wait) (void);
+
+static int loongson2_cpu_freq_notifier(struct notifier_block *nb,
+ unsigned long val, void *data);
+
+static struct notifier_block loongson2_cpufreq_notifier_block = {
+ .notifier_call = loongson2_cpu_freq_notifier
+};
+
+static int loongson2_cpu_freq_notifier(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ if (val == CPUFREQ_POSTCHANGE)
+ current_cpu_data.udelay_val = loops_per_jiffy;
+
+ return 0;
+}
+
+/*
+ * Here we notify other drivers of the proposed change and the final change.
+ */
+static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ unsigned int cpu = policy->cpu;
+ cpumask_t cpus_allowed;
+ unsigned int freq;
+
+ cpus_allowed = current->cpus_allowed;
+ set_cpus_allowed_ptr(current, cpumask_of(cpu));
+
+ freq =
+ ((cpu_clock_freq / 1000) *
+ loongson2_clockmod_table[index].driver_data) / 8;
+
+ set_cpus_allowed_ptr(current, &cpus_allowed);
+
+ /* setting the cpu frequency */
+ clk_set_rate(policy->clk, freq * 1000);
+
+ return 0;
+}
+
+static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ struct clk *cpuclk;
+ int i;
+ unsigned long rate;
+ int ret;
+
+ cpuclk = clk_get(NULL, "cpu_clk");
+ if (IS_ERR(cpuclk)) {
+ printk(KERN_ERR "cpufreq: couldn't get CPU clk\n");
+ return PTR_ERR(cpuclk);
+ }
+
+ rate = cpu_clock_freq / 1000;
+ if (!rate) {
+ clk_put(cpuclk);
+ return -EINVAL;
+ }
+
+ /* clock table init */
+ for (i = 2;
+ (loongson2_clockmod_table[i].frequency != CPUFREQ_TABLE_END);
+ i++)
+ loongson2_clockmod_table[i].frequency = (rate * i) / 8;
+
+ ret = clk_set_rate(cpuclk, rate * 1000);
+ if (ret) {
+ clk_put(cpuclk);
+ return ret;
+ }
+
+ policy->clk = cpuclk;
+ return cpufreq_generic_init(policy, &loongson2_clockmod_table[0], 0);
+}
+
+static int loongson2_cpufreq_exit(struct cpufreq_policy *policy)
+{
+ clk_put(policy->clk);
+ return 0;
+}
+
+static struct cpufreq_driver loongson2_cpufreq_driver = {
+ .name = "loongson2",
+ .init = loongson2_cpufreq_cpu_init,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = loongson2_cpufreq_target,
+ .get = cpufreq_generic_get,
+ .exit = loongson2_cpufreq_exit,
+ .attr = cpufreq_generic_attr,
+};
+
+static struct platform_device_id platform_device_ids[] = {
+ {
+ .name = "loongson2_cpufreq",
+ },
+ {}
+};
+
+MODULE_DEVICE_TABLE(platform, platform_device_ids);
+
+static struct platform_driver platform_driver = {
+ .driver = {
+ .name = "loongson2_cpufreq",
+ },
+ .id_table = platform_device_ids,
+};
+
+/*
+ * This is the simple version of Loongson-2 wait, Maybe we need do this in
+ * interrupt disabled context.
+ */
+
+static DEFINE_SPINLOCK(loongson2_wait_lock);
+
+static void loongson2_cpu_wait(void)
+{
+ unsigned long flags;
+ u32 cpu_freq;
+
+ spin_lock_irqsave(&loongson2_wait_lock, flags);
+ cpu_freq = LOONGSON_CHIPCFG(0);
+ LOONGSON_CHIPCFG(0) &= ~0x7; /* Put CPU into wait mode */
+ LOONGSON_CHIPCFG(0) = cpu_freq; /* Restore CPU state */
+ spin_unlock_irqrestore(&loongson2_wait_lock, flags);
+ local_irq_enable();
+}
+
+static int __init cpufreq_init(void)
+{
+ int ret;
+
+ /* Register platform stuff */
+ ret = platform_driver_register(&platform_driver);
+ if (ret)
+ return ret;
+
+ pr_info("cpufreq: Loongson-2F CPU frequency driver.\n");
+
+ cpufreq_register_notifier(&loongson2_cpufreq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+ ret = cpufreq_register_driver(&loongson2_cpufreq_driver);
+
+ if (!ret && !nowait) {
+ saved_cpu_wait = cpu_wait;
+ cpu_wait = loongson2_cpu_wait;
+ }
+
+ return ret;
+}
+
+static void __exit cpufreq_exit(void)
+{
+ if (!nowait && saved_cpu_wait)
+ cpu_wait = saved_cpu_wait;
+ cpufreq_unregister_driver(&loongson2_cpufreq_driver);
+ cpufreq_unregister_notifier(&loongson2_cpufreq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+ platform_driver_unregister(&platform_driver);
+}
+
+module_init(cpufreq_init);
+module_exit(cpufreq_exit);
+
+module_param(nowait, uint, 0644);
+MODULE_PARM_DESC(nowait, "Disable Loongson-2F specific wait");
+
+MODULE_AUTHOR("Yanhua <yanh@lemote.com>");
+MODULE_DESCRIPTION("cpufreq driver for Loongson2F");
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/cpufreq/ls1x-cpufreq.c b/kernel/drivers/cpufreq/ls1x-cpufreq.c
new file mode 100644
index 000000000..f0913eee2
--- /dev/null
+++ b/kernel/drivers/cpufreq/ls1x-cpufreq.c
@@ -0,0 +1,222 @@
+/*
+ * CPU Frequency Scaling for Loongson 1 SoC
+ *
+ * Copyright (C) 2014 Zhang, Keguang <keguang.zhang@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <asm/mach-loongson1/cpufreq.h>
+#include <asm/mach-loongson1/loongson1.h>
+
+static struct {
+ struct device *dev;
+ struct clk *clk; /* CPU clk */
+ struct clk *mux_clk; /* MUX of CPU clk */
+ struct clk *pll_clk; /* PLL clk */
+ struct clk *osc_clk; /* OSC clk */
+ unsigned int max_freq;
+ unsigned int min_freq;
+} ls1x_cpufreq;
+
+static int ls1x_cpufreq_notifier(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ if (val == CPUFREQ_POSTCHANGE)
+ current_cpu_data.udelay_val = loops_per_jiffy;
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block ls1x_cpufreq_notifier_block = {
+ .notifier_call = ls1x_cpufreq_notifier
+};
+
+static int ls1x_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ unsigned int old_freq, new_freq;
+
+ old_freq = policy->cur;
+ new_freq = policy->freq_table[index].frequency;
+
+ /*
+ * The procedure of reconfiguring CPU clk is as below.
+ *
+ * - Reparent CPU clk to OSC clk
+ * - Reset CPU clock (very important)
+ * - Reconfigure CPU DIV
+ * - Reparent CPU clk back to CPU DIV clk
+ */
+
+ dev_dbg(ls1x_cpufreq.dev, "%u KHz --> %u KHz\n", old_freq, new_freq);
+ clk_set_parent(policy->clk, ls1x_cpufreq.osc_clk);
+ __raw_writel(__raw_readl(LS1X_CLK_PLL_DIV) | RST_CPU_EN | RST_CPU,
+ LS1X_CLK_PLL_DIV);
+ __raw_writel(__raw_readl(LS1X_CLK_PLL_DIV) & ~(RST_CPU_EN | RST_CPU),
+ LS1X_CLK_PLL_DIV);
+ clk_set_rate(ls1x_cpufreq.mux_clk, new_freq * 1000);
+ clk_set_parent(policy->clk, ls1x_cpufreq.mux_clk);
+
+ return 0;
+}
+
+static int ls1x_cpufreq_init(struct cpufreq_policy *policy)
+{
+ struct cpufreq_frequency_table *freq_tbl;
+ unsigned int pll_freq, freq;
+ int steps, i, ret;
+
+ pll_freq = clk_get_rate(ls1x_cpufreq.pll_clk) / 1000;
+
+ steps = 1 << DIV_CPU_WIDTH;
+ freq_tbl = kzalloc(sizeof(*freq_tbl) * steps, GFP_KERNEL);
+ if (!freq_tbl) {
+ dev_err(ls1x_cpufreq.dev,
+ "failed to alloc cpufreq_frequency_table\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < (steps - 1); i++) {
+ freq = pll_freq / (i + 1);
+ if ((freq < ls1x_cpufreq.min_freq) ||
+ (freq > ls1x_cpufreq.max_freq))
+ freq_tbl[i].frequency = CPUFREQ_ENTRY_INVALID;
+ else
+ freq_tbl[i].frequency = freq;
+ dev_dbg(ls1x_cpufreq.dev,
+ "cpufreq table: index %d: frequency %d\n", i,
+ freq_tbl[i].frequency);
+ }
+ freq_tbl[i].frequency = CPUFREQ_TABLE_END;
+
+ policy->clk = ls1x_cpufreq.clk;
+ ret = cpufreq_generic_init(policy, freq_tbl, 0);
+ if (ret)
+ kfree(freq_tbl);
+out:
+ return ret;
+}
+
+static int ls1x_cpufreq_exit(struct cpufreq_policy *policy)
+{
+ kfree(policy->freq_table);
+ return 0;
+}
+
+static struct cpufreq_driver ls1x_cpufreq_driver = {
+ .name = "cpufreq-ls1x",
+ .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = ls1x_cpufreq_target,
+ .get = cpufreq_generic_get,
+ .init = ls1x_cpufreq_init,
+ .exit = ls1x_cpufreq_exit,
+ .attr = cpufreq_generic_attr,
+};
+
+static int ls1x_cpufreq_remove(struct platform_device *pdev)
+{
+ cpufreq_unregister_notifier(&ls1x_cpufreq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ cpufreq_unregister_driver(&ls1x_cpufreq_driver);
+
+ return 0;
+}
+
+static int ls1x_cpufreq_probe(struct platform_device *pdev)
+{
+ struct plat_ls1x_cpufreq *pdata = pdev->dev.platform_data;
+ struct clk *clk;
+ int ret;
+
+ if (!pdata || !pdata->clk_name || !pdata->osc_clk_name)
+ return -EINVAL;
+
+ ls1x_cpufreq.dev = &pdev->dev;
+
+ clk = devm_clk_get(&pdev->dev, pdata->clk_name);
+ if (IS_ERR(clk)) {
+ dev_err(ls1x_cpufreq.dev, "unable to get %s clock\n",
+ pdata->clk_name);
+ ret = PTR_ERR(clk);
+ goto out;
+ }
+ ls1x_cpufreq.clk = clk;
+
+ clk = clk_get_parent(clk);
+ if (IS_ERR(clk)) {
+ dev_err(ls1x_cpufreq.dev, "unable to get parent of %s clock\n",
+ __clk_get_name(ls1x_cpufreq.clk));
+ ret = PTR_ERR(clk);
+ goto out;
+ }
+ ls1x_cpufreq.mux_clk = clk;
+
+ clk = clk_get_parent(clk);
+ if (IS_ERR(clk)) {
+ dev_err(ls1x_cpufreq.dev, "unable to get parent of %s clock\n",
+ __clk_get_name(ls1x_cpufreq.mux_clk));
+ ret = PTR_ERR(clk);
+ goto out;
+ }
+ ls1x_cpufreq.pll_clk = clk;
+
+ clk = devm_clk_get(&pdev->dev, pdata->osc_clk_name);
+ if (IS_ERR(clk)) {
+ dev_err(ls1x_cpufreq.dev, "unable to get %s clock\n",
+ pdata->osc_clk_name);
+ ret = PTR_ERR(clk);
+ goto out;
+ }
+ ls1x_cpufreq.osc_clk = clk;
+
+ ls1x_cpufreq.max_freq = pdata->max_freq;
+ ls1x_cpufreq.min_freq = pdata->min_freq;
+
+ ret = cpufreq_register_driver(&ls1x_cpufreq_driver);
+ if (ret) {
+ dev_err(ls1x_cpufreq.dev,
+ "failed to register cpufreq driver: %d\n", ret);
+ goto out;
+ }
+
+ ret = cpufreq_register_notifier(&ls1x_cpufreq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+ if (!ret)
+ goto out;
+
+ dev_err(ls1x_cpufreq.dev, "failed to register cpufreq notifier: %d\n",
+ ret);
+
+ cpufreq_unregister_driver(&ls1x_cpufreq_driver);
+out:
+ return ret;
+}
+
+static struct platform_driver ls1x_cpufreq_platdrv = {
+ .driver = {
+ .name = "ls1x-cpufreq",
+ },
+ .probe = ls1x_cpufreq_probe,
+ .remove = ls1x_cpufreq_remove,
+};
+
+module_platform_driver(ls1x_cpufreq_platdrv);
+
+MODULE_AUTHOR("Kelvin Cheung <keguang.zhang@gmail.com>");
+MODULE_DESCRIPTION("Loongson 1 CPUFreq driver");
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/cpufreq/maple-cpufreq.c b/kernel/drivers/cpufreq/maple-cpufreq.c
new file mode 100644
index 000000000..cc3408fc0
--- /dev/null
+++ b/kernel/drivers/cpufreq/maple-cpufreq.c
@@ -0,0 +1,246 @@
+/*
+ * Copyright (C) 2011 Dmitry Eremin-Solenikov
+ * Copyright (C) 2002 - 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
+ * and Markus Demleitner <msdemlei@cl.uni-heidelberg.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This driver adds basic cpufreq support for SMU & 970FX based G5 Macs,
+ * that is iMac G5 and latest single CPU desktop.
+ */
+
+#undef DEBUG
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/cpufreq.h>
+#include <linux/init.h>
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/time.h>
+#include <linux/of_device.h>
+
+#define DBG(fmt...) pr_debug(fmt)
+
+/* see 970FX user manual */
+
+#define SCOM_PCR 0x0aa001 /* PCR scom addr */
+
+#define PCR_HILO_SELECT 0x80000000U /* 1 = PCR, 0 = PCRH */
+#define PCR_SPEED_FULL 0x00000000U /* 1:1 speed value */
+#define PCR_SPEED_HALF 0x00020000U /* 1:2 speed value */
+#define PCR_SPEED_QUARTER 0x00040000U /* 1:4 speed value */
+#define PCR_SPEED_MASK 0x000e0000U /* speed mask */
+#define PCR_SPEED_SHIFT 17
+#define PCR_FREQ_REQ_VALID 0x00010000U /* freq request valid */
+#define PCR_VOLT_REQ_VALID 0x00008000U /* volt request valid */
+#define PCR_TARGET_TIME_MASK 0x00006000U /* target time */
+#define PCR_STATLAT_MASK 0x00001f00U /* STATLAT value */
+#define PCR_SNOOPLAT_MASK 0x000000f0U /* SNOOPLAT value */
+#define PCR_SNOOPACC_MASK 0x0000000fU /* SNOOPACC value */
+
+#define SCOM_PSR 0x408001 /* PSR scom addr */
+/* warning: PSR is a 64 bits register */
+#define PSR_CMD_RECEIVED 0x2000000000000000U /* command received */
+#define PSR_CMD_COMPLETED 0x1000000000000000U /* command completed */
+#define PSR_CUR_SPEED_MASK 0x0300000000000000U /* current speed */
+#define PSR_CUR_SPEED_SHIFT (56)
+
+/*
+ * The G5 only supports two frequencies (Quarter speed is not supported)
+ */
+#define CPUFREQ_HIGH 0
+#define CPUFREQ_LOW 1
+
+static struct cpufreq_frequency_table maple_cpu_freqs[] = {
+ {0, CPUFREQ_HIGH, 0},
+ {0, CPUFREQ_LOW, 0},
+ {0, 0, CPUFREQ_TABLE_END},
+};
+
+/* Power mode data is an array of the 32 bits PCR values to use for
+ * the various frequencies, retrieved from the device-tree
+ */
+static int maple_pmode_cur;
+
+static const u32 *maple_pmode_data;
+static int maple_pmode_max;
+
+/*
+ * SCOM based frequency switching for 970FX rev3
+ */
+static int maple_scom_switch_freq(int speed_mode)
+{
+ unsigned long flags;
+ int to;
+
+ local_irq_save(flags);
+
+ /* Clear PCR high */
+ scom970_write(SCOM_PCR, 0);
+ /* Clear PCR low */
+ scom970_write(SCOM_PCR, PCR_HILO_SELECT | 0);
+ /* Set PCR low */
+ scom970_write(SCOM_PCR, PCR_HILO_SELECT |
+ maple_pmode_data[speed_mode]);
+
+ /* Wait for completion */
+ for (to = 0; to < 10; to++) {
+ unsigned long psr = scom970_read(SCOM_PSR);
+
+ if ((psr & PSR_CMD_RECEIVED) == 0 &&
+ (((psr >> PSR_CUR_SPEED_SHIFT) ^
+ (maple_pmode_data[speed_mode] >> PCR_SPEED_SHIFT)) & 0x3)
+ == 0)
+ break;
+ if (psr & PSR_CMD_COMPLETED)
+ break;
+ udelay(100);
+ }
+
+ local_irq_restore(flags);
+
+ maple_pmode_cur = speed_mode;
+ ppc_proc_freq = maple_cpu_freqs[speed_mode].frequency * 1000ul;
+
+ return 0;
+}
+
+static int maple_scom_query_freq(void)
+{
+ unsigned long psr = scom970_read(SCOM_PSR);
+ int i;
+
+ for (i = 0; i <= maple_pmode_max; i++)
+ if ((((psr >> PSR_CUR_SPEED_SHIFT) ^
+ (maple_pmode_data[i] >> PCR_SPEED_SHIFT)) & 0x3) == 0)
+ break;
+ return i;
+}
+
+/*
+ * Common interface to the cpufreq core
+ */
+
+static int maple_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ return maple_scom_switch_freq(index);
+}
+
+static unsigned int maple_cpufreq_get_speed(unsigned int cpu)
+{
+ return maple_cpu_freqs[maple_pmode_cur].frequency;
+}
+
+static int maple_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ return cpufreq_generic_init(policy, maple_cpu_freqs, 12000);
+}
+
+static struct cpufreq_driver maple_cpufreq_driver = {
+ .name = "maple",
+ .flags = CPUFREQ_CONST_LOOPS,
+ .init = maple_cpufreq_cpu_init,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = maple_cpufreq_target,
+ .get = maple_cpufreq_get_speed,
+ .attr = cpufreq_generic_attr,
+};
+
+static int __init maple_cpufreq_init(void)
+{
+ struct device_node *cpunode;
+ unsigned int psize;
+ unsigned long max_freq;
+ const u32 *valp;
+ u32 pvr_hi;
+ int rc = -ENODEV;
+
+ /*
+ * Behave here like powermac driver which checks machine compatibility
+ * to ease merging of two drivers in future.
+ */
+ if (!of_machine_is_compatible("Momentum,Maple") &&
+ !of_machine_is_compatible("Momentum,Apache"))
+ return 0;
+
+ /* Get first CPU node */
+ cpunode = of_cpu_device_node_get(0);
+ if (cpunode == NULL) {
+ printk(KERN_ERR "cpufreq: Can't find any CPU 0 node\n");
+ goto bail_noprops;
+ }
+
+ /* Check 970FX for now */
+ /* we actually don't care on which CPU to access PVR */
+ pvr_hi = PVR_VER(mfspr(SPRN_PVR));
+ if (pvr_hi != 0x3c && pvr_hi != 0x44) {
+ printk(KERN_ERR "cpufreq: Unsupported CPU version (%x)\n",
+ pvr_hi);
+ goto bail_noprops;
+ }
+
+ /* Look for the powertune data in the device-tree */
+ /*
+ * On Maple this property is provided by PIBS in dual-processor config,
+ * not provided by PIBS in CPU0 config and also not provided by SLOF,
+ * so YMMV
+ */
+ maple_pmode_data = of_get_property(cpunode, "power-mode-data", &psize);
+ if (!maple_pmode_data) {
+ DBG("No power-mode-data !\n");
+ goto bail_noprops;
+ }
+ maple_pmode_max = psize / sizeof(u32) - 1;
+
+ /*
+ * From what I see, clock-frequency is always the maximal frequency.
+ * The current driver can not slew sysclk yet, so we really only deal
+ * with powertune steps for now. We also only implement full freq and
+ * half freq in this version. So far, I haven't yet seen a machine
+ * supporting anything else.
+ */
+ valp = of_get_property(cpunode, "clock-frequency", NULL);
+ if (!valp)
+ return -ENODEV;
+ max_freq = (*valp)/1000;
+ maple_cpu_freqs[0].frequency = max_freq;
+ maple_cpu_freqs[1].frequency = max_freq/2;
+
+ /* Force apply current frequency to make sure everything is in
+ * sync (voltage is right for example). Firmware may leave us with
+ * a strange setting ...
+ */
+ msleep(10);
+ maple_pmode_cur = -1;
+ maple_scom_switch_freq(maple_scom_query_freq());
+
+ printk(KERN_INFO "Registering Maple CPU frequency driver\n");
+ printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
+ maple_cpu_freqs[1].frequency/1000,
+ maple_cpu_freqs[0].frequency/1000,
+ maple_cpu_freqs[maple_pmode_cur].frequency/1000);
+
+ rc = cpufreq_register_driver(&maple_cpufreq_driver);
+
+ of_node_put(cpunode);
+
+ return rc;
+
+bail_noprops:
+ of_node_put(cpunode);
+
+ return rc;
+}
+
+module_init(maple_cpufreq_init);
+
+
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/cpufreq/omap-cpufreq.c b/kernel/drivers/cpufreq/omap-cpufreq.c
new file mode 100644
index 000000000..e3866e0d5
--- /dev/null
+++ b/kernel/drivers/cpufreq/omap-cpufreq.c
@@ -0,0 +1,205 @@
+/*
+ * CPU frequency scaling for OMAP using OPP information
+ *
+ * Copyright (C) 2005 Nokia Corporation
+ * Written by Tony Lindgren <tony@atomide.com>
+ *
+ * Based on cpu-sa1110.c, Copyright (C) 2001 Russell King
+ *
+ * Copyright (C) 2007-2011 Texas Instruments, Inc.
+ * - OMAP3/4 support by Rajendra Nayak, Santosh Shilimkar
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/pm_opp.h>
+#include <linux/cpu.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <asm/smp_plat.h>
+#include <asm/cpu.h>
+
+/* OPP tolerance in percentage */
+#define OPP_TOLERANCE 4
+
+static struct cpufreq_frequency_table *freq_table;
+static atomic_t freq_table_users = ATOMIC_INIT(0);
+static struct device *mpu_dev;
+static struct regulator *mpu_reg;
+
+static int omap_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ int r, ret;
+ struct dev_pm_opp *opp;
+ unsigned long freq, volt = 0, volt_old = 0, tol = 0;
+ unsigned int old_freq, new_freq;
+
+ old_freq = policy->cur;
+ new_freq = freq_table[index].frequency;
+
+ freq = new_freq * 1000;
+ ret = clk_round_rate(policy->clk, freq);
+ if (IS_ERR_VALUE(ret)) {
+ dev_warn(mpu_dev,
+ "CPUfreq: Cannot find matching frequency for %lu\n",
+ freq);
+ return ret;
+ }
+ freq = ret;
+
+ if (mpu_reg) {
+ rcu_read_lock();
+ opp = dev_pm_opp_find_freq_ceil(mpu_dev, &freq);
+ if (IS_ERR(opp)) {
+ rcu_read_unlock();
+ dev_err(mpu_dev, "%s: unable to find MPU OPP for %d\n",
+ __func__, new_freq);
+ return -EINVAL;
+ }
+ volt = dev_pm_opp_get_voltage(opp);
+ rcu_read_unlock();
+ tol = volt * OPP_TOLERANCE / 100;
+ volt_old = regulator_get_voltage(mpu_reg);
+ }
+
+ dev_dbg(mpu_dev, "cpufreq-omap: %u MHz, %ld mV --> %u MHz, %ld mV\n",
+ old_freq / 1000, volt_old ? volt_old / 1000 : -1,
+ new_freq / 1000, volt ? volt / 1000 : -1);
+
+ /* scaling up? scale voltage before frequency */
+ if (mpu_reg && (new_freq > old_freq)) {
+ r = regulator_set_voltage(mpu_reg, volt - tol, volt + tol);
+ if (r < 0) {
+ dev_warn(mpu_dev, "%s: unable to scale voltage up.\n",
+ __func__);
+ return r;
+ }
+ }
+
+ ret = clk_set_rate(policy->clk, new_freq * 1000);
+
+ /* scaling down? scale voltage after frequency */
+ if (mpu_reg && (new_freq < old_freq)) {
+ r = regulator_set_voltage(mpu_reg, volt - tol, volt + tol);
+ if (r < 0) {
+ dev_warn(mpu_dev, "%s: unable to scale voltage down.\n",
+ __func__);
+ clk_set_rate(policy->clk, old_freq * 1000);
+ return r;
+ }
+ }
+
+ return ret;
+}
+
+static inline void freq_table_free(void)
+{
+ if (atomic_dec_and_test(&freq_table_users))
+ dev_pm_opp_free_cpufreq_table(mpu_dev, &freq_table);
+}
+
+static int omap_cpu_init(struct cpufreq_policy *policy)
+{
+ int result;
+
+ policy->clk = clk_get(NULL, "cpufreq_ck");
+ if (IS_ERR(policy->clk))
+ return PTR_ERR(policy->clk);
+
+ if (!freq_table) {
+ result = dev_pm_opp_init_cpufreq_table(mpu_dev, &freq_table);
+ if (result) {
+ dev_err(mpu_dev,
+ "%s: cpu%d: failed creating freq table[%d]\n",
+ __func__, policy->cpu, result);
+ goto fail;
+ }
+ }
+
+ atomic_inc_return(&freq_table_users);
+
+ /* FIXME: what's the actual transition time? */
+ result = cpufreq_generic_init(policy, freq_table, 300 * 1000);
+ if (!result)
+ return 0;
+
+ freq_table_free();
+fail:
+ clk_put(policy->clk);
+ return result;
+}
+
+static int omap_cpu_exit(struct cpufreq_policy *policy)
+{
+ freq_table_free();
+ clk_put(policy->clk);
+ return 0;
+}
+
+static struct cpufreq_driver omap_driver = {
+ .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = omap_target,
+ .get = cpufreq_generic_get,
+ .init = omap_cpu_init,
+ .exit = omap_cpu_exit,
+ .name = "omap",
+ .attr = cpufreq_generic_attr,
+};
+
+static int omap_cpufreq_probe(struct platform_device *pdev)
+{
+ mpu_dev = get_cpu_device(0);
+ if (!mpu_dev) {
+ pr_warning("%s: unable to get the mpu device\n", __func__);
+ return -EINVAL;
+ }
+
+ mpu_reg = regulator_get(mpu_dev, "vcc");
+ if (IS_ERR(mpu_reg)) {
+ pr_warning("%s: unable to get MPU regulator\n", __func__);
+ mpu_reg = NULL;
+ } else {
+ /*
+ * Ensure physical regulator is present.
+ * (e.g. could be dummy regulator.)
+ */
+ if (regulator_get_voltage(mpu_reg) < 0) {
+ pr_warn("%s: physical regulator not present for MPU\n",
+ __func__);
+ regulator_put(mpu_reg);
+ mpu_reg = NULL;
+ }
+ }
+
+ return cpufreq_register_driver(&omap_driver);
+}
+
+static int omap_cpufreq_remove(struct platform_device *pdev)
+{
+ return cpufreq_unregister_driver(&omap_driver);
+}
+
+static struct platform_driver omap_cpufreq_platdrv = {
+ .driver = {
+ .name = "omap-cpufreq",
+ },
+ .probe = omap_cpufreq_probe,
+ .remove = omap_cpufreq_remove,
+};
+module_platform_driver(omap_cpufreq_platdrv);
+
+MODULE_DESCRIPTION("cpufreq driver for OMAP SoCs");
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/cpufreq/p4-clockmod.c b/kernel/drivers/cpufreq/p4-clockmod.c
new file mode 100644
index 000000000..529cfd921
--- /dev/null
+++ b/kernel/drivers/cpufreq/p4-clockmod.c
@@ -0,0 +1,286 @@
+/*
+ * Pentium 4/Xeon CPU on demand clock modulation/speed scaling
+ * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
+ * (C) 2002 Zwane Mwaikambo <zwane@commfireservices.com>
+ * (C) 2002 Arjan van de Ven <arjanv@redhat.com>
+ * (C) 2002 Tora T. Engstad
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * The author(s) of this software shall not be held liable for damages
+ * of any nature resulting due to the use of this software. This
+ * software is provided AS-IS with no warranties.
+ *
+ * Date Errata Description
+ * 20020525 N44, O17 12.5% or 25% DC causes lockup
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/timex.h>
+
+#include <asm/processor.h>
+#include <asm/msr.h>
+#include <asm/timer.h>
+#include <asm/cpu_device_id.h>
+
+#include "speedstep-lib.h"
+
+#define PFX "p4-clockmod: "
+
+/*
+ * Duty Cycle (3bits), note DC_DISABLE is not specified in
+ * intel docs i just use it to mean disable
+ */
+enum {
+ DC_RESV, DC_DFLT, DC_25PT, DC_38PT, DC_50PT,
+ DC_64PT, DC_75PT, DC_88PT, DC_DISABLE
+};
+
+#define DC_ENTRIES 8
+
+
+static int has_N44_O17_errata[NR_CPUS];
+static unsigned int stock_freq;
+static struct cpufreq_driver p4clockmod_driver;
+static unsigned int cpufreq_p4_get(unsigned int cpu);
+
+static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate)
+{
+ u32 l, h;
+
+ if ((newstate > DC_DISABLE) || (newstate == DC_RESV))
+ return -EINVAL;
+
+ rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h);
+
+ if (l & 0x01)
+ pr_debug("CPU#%d currently thermal throttled\n", cpu);
+
+ if (has_N44_O17_errata[cpu] &&
+ (newstate == DC_25PT || newstate == DC_DFLT))
+ newstate = DC_38PT;
+
+ rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h);
+ if (newstate == DC_DISABLE) {
+ pr_debug("CPU#%d disabling modulation\n", cpu);
+ wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l & ~(1<<4), h);
+ } else {
+ pr_debug("CPU#%d setting duty cycle to %d%%\n",
+ cpu, ((125 * newstate) / 10));
+ /* bits 63 - 5 : reserved
+ * bit 4 : enable/disable
+ * bits 3-1 : duty cycle
+ * bit 0 : reserved
+ */
+ l = (l & ~14);
+ l = l | (1<<4) | ((newstate & 0x7)<<1);
+ wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l, h);
+ }
+
+ return 0;
+}
+
+
+static struct cpufreq_frequency_table p4clockmod_table[] = {
+ {0, DC_RESV, CPUFREQ_ENTRY_INVALID},
+ {0, DC_DFLT, 0},
+ {0, DC_25PT, 0},
+ {0, DC_38PT, 0},
+ {0, DC_50PT, 0},
+ {0, DC_64PT, 0},
+ {0, DC_75PT, 0},
+ {0, DC_88PT, 0},
+ {0, DC_DISABLE, 0},
+ {0, DC_RESV, CPUFREQ_TABLE_END},
+};
+
+
+static int cpufreq_p4_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ int i;
+
+ /* run on each logical CPU,
+ * see section 13.15.3 of IA32 Intel Architecture Software
+ * Developer's Manual, Volume 3
+ */
+ for_each_cpu(i, policy->cpus)
+ cpufreq_p4_setdc(i, p4clockmod_table[index].driver_data);
+
+ return 0;
+}
+
+
+static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
+{
+ if (c->x86 == 0x06) {
+ if (cpu_has(c, X86_FEATURE_EST))
+ printk_once(KERN_WARNING PFX "Warning: EST-capable "
+ "CPU detected. The acpi-cpufreq module offers "
+ "voltage scaling in addition to frequency "
+ "scaling. You should use that instead of "
+ "p4-clockmod, if possible.\n");
+ switch (c->x86_model) {
+ case 0x0E: /* Core */
+ case 0x0F: /* Core Duo */
+ case 0x16: /* Celeron Core */
+ case 0x1C: /* Atom */
+ p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
+ return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
+ case 0x0D: /* Pentium M (Dothan) */
+ p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
+ /* fall through */
+ case 0x09: /* Pentium M (Banias) */
+ return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
+ }
+ }
+
+ if (c->x86 != 0xF)
+ return 0;
+
+ /* on P-4s, the TSC runs with constant frequency independent whether
+ * throttling is active or not. */
+ p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
+
+ if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
+ printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
+ "The speedstep-ich or acpi cpufreq modules offer "
+ "voltage scaling in addition of frequency scaling. "
+ "You should use either one instead of p4-clockmod, "
+ "if possible.\n");
+ return speedstep_get_frequency(SPEEDSTEP_CPU_P4M);
+ }
+
+ return speedstep_get_frequency(SPEEDSTEP_CPU_P4D);
+}
+
+
+
+static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
+{
+ struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
+ int cpuid = 0;
+ unsigned int i;
+
+#ifdef CONFIG_SMP
+ cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
+#endif
+
+ /* Errata workaround */
+ cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_mask;
+ switch (cpuid) {
+ case 0x0f07:
+ case 0x0f0a:
+ case 0x0f11:
+ case 0x0f12:
+ has_N44_O17_errata[policy->cpu] = 1;
+ pr_debug("has errata -- disabling low frequencies\n");
+ }
+
+ if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4D &&
+ c->x86_model < 2) {
+ /* switch to maximum frequency and measure result */
+ cpufreq_p4_setdc(policy->cpu, DC_DISABLE);
+ recalibrate_cpu_khz();
+ }
+ /* get max frequency */
+ stock_freq = cpufreq_p4_get_frequency(c);
+ if (!stock_freq)
+ return -EINVAL;
+
+ /* table init */
+ for (i = 1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) {
+ if ((i < 2) && (has_N44_O17_errata[policy->cpu]))
+ p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID;
+ else
+ p4clockmod_table[i].frequency = (stock_freq * i)/8;
+ }
+
+ /* cpuinfo and default policy values */
+
+ /* the transition latency is set to be 1 higher than the maximum
+ * transition latency of the ondemand governor */
+ policy->cpuinfo.transition_latency = 10000001;
+
+ return cpufreq_table_validate_and_show(policy, &p4clockmod_table[0]);
+}
+
+
+static unsigned int cpufreq_p4_get(unsigned int cpu)
+{
+ u32 l, h;
+
+ rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h);
+
+ if (l & 0x10) {
+ l = l >> 1;
+ l &= 0x7;
+ } else
+ l = DC_DISABLE;
+
+ if (l != DC_DISABLE)
+ return stock_freq * l / 8;
+
+ return stock_freq;
+}
+
+static struct cpufreq_driver p4clockmod_driver = {
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = cpufreq_p4_target,
+ .init = cpufreq_p4_cpu_init,
+ .get = cpufreq_p4_get,
+ .name = "p4-clockmod",
+ .attr = cpufreq_generic_attr,
+};
+
+static const struct x86_cpu_id cpufreq_p4_id[] = {
+ { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_ACC },
+ {}
+};
+
+/*
+ * Intentionally no MODULE_DEVICE_TABLE here: this driver should not
+ * be auto loaded. Please don't add one.
+ */
+
+static int __init cpufreq_p4_init(void)
+{
+ int ret;
+
+ /*
+ * THERM_CONTROL is architectural for IA32 now, so
+ * we can rely on the capability checks
+ */
+ if (!x86_match_cpu(cpufreq_p4_id) || !boot_cpu_has(X86_FEATURE_ACPI))
+ return -ENODEV;
+
+ ret = cpufreq_register_driver(&p4clockmod_driver);
+ if (!ret)
+ printk(KERN_INFO PFX "P4/Xeon(TM) CPU On-Demand Clock "
+ "Modulation available\n");
+
+ return ret;
+}
+
+
+static void __exit cpufreq_p4_exit(void)
+{
+ cpufreq_unregister_driver(&p4clockmod_driver);
+}
+
+
+MODULE_AUTHOR("Zwane Mwaikambo <zwane@commfireservices.com>");
+MODULE_DESCRIPTION("cpufreq driver for Pentium(TM) 4/Xeon(TM)");
+MODULE_LICENSE("GPL");
+
+late_initcall(cpufreq_p4_init);
+module_exit(cpufreq_p4_exit);
diff --git a/kernel/drivers/cpufreq/pasemi-cpufreq.c b/kernel/drivers/cpufreq/pasemi-cpufreq.c
new file mode 100644
index 000000000..35dd4d7ff
--- /dev/null
+++ b/kernel/drivers/cpufreq/pasemi-cpufreq.c
@@ -0,0 +1,291 @@
+/*
+ * Copyright (C) 2007 PA Semi, Inc
+ *
+ * Authors: Egor Martovetsky <egor@pasemi.com>
+ * Olof Johansson <olof@lixom.net>
+ *
+ * Maintained by: Olof Johansson <olof@lixom.net>
+ *
+ * Based on arch/powerpc/platforms/cell/cbe_cpufreq.c:
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/timer.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+
+#include <asm/hw_irq.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/time.h>
+#include <asm/smp.h>
+
+#define SDCASR_REG 0x0100
+#define SDCASR_REG_STRIDE 0x1000
+#define SDCPWR_CFGA0_REG 0x0100
+#define SDCPWR_PWST0_REG 0x0000
+#define SDCPWR_GIZTIME_REG 0x0440
+
+/* SDCPWR_GIZTIME_REG fields */
+#define SDCPWR_GIZTIME_GR 0x80000000
+#define SDCPWR_GIZTIME_LONGLOCK 0x000000ff
+
+/* Offset of ASR registers from SDC base */
+#define SDCASR_OFFSET 0x120000
+
+static void __iomem *sdcpwr_mapbase;
+static void __iomem *sdcasr_mapbase;
+
+/* Current astate, is used when waking up from power savings on
+ * one core, in case the other core has switched states during
+ * the idle time.
+ */
+static int current_astate;
+
+/* We support 5(A0-A4) power states excluding turbo(A5-A6) modes */
+static struct cpufreq_frequency_table pas_freqs[] = {
+ {0, 0, 0},
+ {0, 1, 0},
+ {0, 2, 0},
+ {0, 3, 0},
+ {0, 4, 0},
+ {0, 0, CPUFREQ_TABLE_END},
+};
+
+/*
+ * hardware specific functions
+ */
+
+static int get_astate_freq(int astate)
+{
+ u32 ret;
+ ret = in_le32(sdcpwr_mapbase + SDCPWR_CFGA0_REG + (astate * 0x10));
+
+ return ret & 0x3f;
+}
+
+static int get_cur_astate(int cpu)
+{
+ u32 ret;
+
+ ret = in_le32(sdcpwr_mapbase + SDCPWR_PWST0_REG);
+ ret = (ret >> (cpu * 4)) & 0x7;
+
+ return ret;
+}
+
+static int get_gizmo_latency(void)
+{
+ u32 giztime, ret;
+
+ giztime = in_le32(sdcpwr_mapbase + SDCPWR_GIZTIME_REG);
+
+ /* just provide the upper bound */
+ if (giztime & SDCPWR_GIZTIME_GR)
+ ret = (giztime & SDCPWR_GIZTIME_LONGLOCK) * 128000;
+ else
+ ret = (giztime & SDCPWR_GIZTIME_LONGLOCK) * 1000;
+
+ return ret;
+}
+
+static void set_astate(int cpu, unsigned int astate)
+{
+ unsigned long flags;
+
+ /* Return if called before init has run */
+ if (unlikely(!sdcasr_mapbase))
+ return;
+
+ local_irq_save(flags);
+
+ out_le32(sdcasr_mapbase + SDCASR_REG + SDCASR_REG_STRIDE*cpu, astate);
+
+ local_irq_restore(flags);
+}
+
+int check_astate(void)
+{
+ return get_cur_astate(hard_smp_processor_id());
+}
+
+void restore_astate(int cpu)
+{
+ set_astate(cpu, current_astate);
+}
+
+/*
+ * cpufreq functions
+ */
+
+static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ struct cpufreq_frequency_table *pos;
+ const u32 *max_freqp;
+ u32 max_freq;
+ int cur_astate;
+ struct resource res;
+ struct device_node *cpu, *dn;
+ int err = -ENODEV;
+
+ cpu = of_get_cpu_node(policy->cpu, NULL);
+
+ if (!cpu)
+ goto out;
+
+ dn = of_find_compatible_node(NULL, NULL, "1682m-sdc");
+ if (!dn)
+ dn = of_find_compatible_node(NULL, NULL,
+ "pasemi,pwrficient-sdc");
+ if (!dn)
+ goto out;
+ err = of_address_to_resource(dn, 0, &res);
+ of_node_put(dn);
+ if (err)
+ goto out;
+ sdcasr_mapbase = ioremap(res.start + SDCASR_OFFSET, 0x2000);
+ if (!sdcasr_mapbase) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ dn = of_find_compatible_node(NULL, NULL, "1682m-gizmo");
+ if (!dn)
+ dn = of_find_compatible_node(NULL, NULL,
+ "pasemi,pwrficient-gizmo");
+ if (!dn) {
+ err = -ENODEV;
+ goto out_unmap_sdcasr;
+ }
+ err = of_address_to_resource(dn, 0, &res);
+ of_node_put(dn);
+ if (err)
+ goto out_unmap_sdcasr;
+ sdcpwr_mapbase = ioremap(res.start, 0x1000);
+ if (!sdcpwr_mapbase) {
+ err = -EINVAL;
+ goto out_unmap_sdcasr;
+ }
+
+ pr_debug("init cpufreq on CPU %d\n", policy->cpu);
+
+ max_freqp = of_get_property(cpu, "clock-frequency", NULL);
+ if (!max_freqp) {
+ err = -EINVAL;
+ goto out_unmap_sdcpwr;
+ }
+
+ /* we need the freq in kHz */
+ max_freq = *max_freqp / 1000;
+
+ pr_debug("max clock-frequency is at %u kHz\n", max_freq);
+ pr_debug("initializing frequency table\n");
+
+ /* initialize frequency table */
+ cpufreq_for_each_entry(pos, pas_freqs) {
+ pos->frequency = get_astate_freq(pos->driver_data) * 100000;
+ pr_debug("%d: %d\n", (int)(pos - pas_freqs), pos->frequency);
+ }
+
+ cur_astate = get_cur_astate(policy->cpu);
+ pr_debug("current astate is at %d\n",cur_astate);
+
+ policy->cur = pas_freqs[cur_astate].frequency;
+ ppc_proc_freq = policy->cur * 1000ul;
+
+ return cpufreq_generic_init(policy, pas_freqs, get_gizmo_latency());
+
+out_unmap_sdcpwr:
+ iounmap(sdcpwr_mapbase);
+
+out_unmap_sdcasr:
+ iounmap(sdcasr_mapbase);
+out:
+ return err;
+}
+
+static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ /*
+ * We don't support CPU hotplug. Don't unmap after the system
+ * has already made it to a running state.
+ */
+ if (system_state != SYSTEM_BOOTING)
+ return 0;
+
+ if (sdcasr_mapbase)
+ iounmap(sdcasr_mapbase);
+ if (sdcpwr_mapbase)
+ iounmap(sdcpwr_mapbase);
+
+ return 0;
+}
+
+static int pas_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int pas_astate_new)
+{
+ int i;
+
+ pr_debug("setting frequency for cpu %d to %d kHz, 1/%d of max frequency\n",
+ policy->cpu,
+ pas_freqs[pas_astate_new].frequency,
+ pas_freqs[pas_astate_new].driver_data);
+
+ current_astate = pas_astate_new;
+
+ for_each_online_cpu(i)
+ set_astate(i, pas_astate_new);
+
+ ppc_proc_freq = pas_freqs[pas_astate_new].frequency * 1000ul;
+ return 0;
+}
+
+static struct cpufreq_driver pas_cpufreq_driver = {
+ .name = "pas-cpufreq",
+ .flags = CPUFREQ_CONST_LOOPS,
+ .init = pas_cpufreq_cpu_init,
+ .exit = pas_cpufreq_cpu_exit,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = pas_cpufreq_target,
+ .attr = cpufreq_generic_attr,
+};
+
+/*
+ * module init and destoy
+ */
+
+static int __init pas_cpufreq_init(void)
+{
+ if (!of_machine_is_compatible("PA6T-1682M") &&
+ !of_machine_is_compatible("pasemi,pwrficient"))
+ return -ENODEV;
+
+ return cpufreq_register_driver(&pas_cpufreq_driver);
+}
+
+static void __exit pas_cpufreq_exit(void)
+{
+ cpufreq_unregister_driver(&pas_cpufreq_driver);
+}
+
+module_init(pas_cpufreq_init);
+module_exit(pas_cpufreq_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>, Olof Johansson <olof@lixom.net>");
diff --git a/kernel/drivers/cpufreq/pcc-cpufreq.c b/kernel/drivers/cpufreq/pcc-cpufreq.c
new file mode 100644
index 000000000..2a0d58959
--- /dev/null
+++ b/kernel/drivers/cpufreq/pcc-cpufreq.c
@@ -0,0 +1,619 @@
+/*
+ * pcc-cpufreq.c - Processor Clocking Control firmware cpufreq interface
+ *
+ * Copyright (C) 2009 Red Hat, Matthew Garrett <mjg@redhat.com>
+ * Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
+ * Nagananda Chumbalkar <nagananda.chumbalkar@hp.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or NON
+ * INFRINGEMENT. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/sched.h>
+#include <linux/cpufreq.h>
+#include <linux/compiler.h>
+#include <linux/slab.h>
+
+#include <linux/acpi.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+
+#include <acpi/processor.h>
+
+#define PCC_VERSION "1.10.00"
+#define POLL_LOOPS 300
+
+#define CMD_COMPLETE 0x1
+#define CMD_GET_FREQ 0x0
+#define CMD_SET_FREQ 0x1
+
+#define BUF_SZ 4
+
+struct pcc_register_resource {
+ u8 descriptor;
+ u16 length;
+ u8 space_id;
+ u8 bit_width;
+ u8 bit_offset;
+ u8 access_size;
+ u64 address;
+} __attribute__ ((packed));
+
+struct pcc_memory_resource {
+ u8 descriptor;
+ u16 length;
+ u8 space_id;
+ u8 resource_usage;
+ u8 type_specific;
+ u64 granularity;
+ u64 minimum;
+ u64 maximum;
+ u64 translation_offset;
+ u64 address_length;
+} __attribute__ ((packed));
+
+static struct cpufreq_driver pcc_cpufreq_driver;
+
+struct pcc_header {
+ u32 signature;
+ u16 length;
+ u8 major;
+ u8 minor;
+ u32 features;
+ u16 command;
+ u16 status;
+ u32 latency;
+ u32 minimum_time;
+ u32 maximum_time;
+ u32 nominal;
+ u32 throttled_frequency;
+ u32 minimum_frequency;
+};
+
+static void __iomem *pcch_virt_addr;
+static struct pcc_header __iomem *pcch_hdr;
+
+static DEFINE_SPINLOCK(pcc_lock);
+
+static struct acpi_generic_address doorbell;
+
+static u64 doorbell_preserve;
+static u64 doorbell_write;
+
+static u8 OSC_UUID[16] = {0x9F, 0x2C, 0x9B, 0x63, 0x91, 0x70, 0x1f, 0x49,
+ 0xBB, 0x4F, 0xA5, 0x98, 0x2F, 0xA1, 0xB5, 0x46};
+
+struct pcc_cpu {
+ u32 input_offset;
+ u32 output_offset;
+};
+
+static struct pcc_cpu __percpu *pcc_cpu_info;
+
+static int pcc_cpufreq_verify(struct cpufreq_policy *policy)
+{
+ cpufreq_verify_within_cpu_limits(policy);
+ return 0;
+}
+
+static inline void pcc_cmd(void)
+{
+ u64 doorbell_value;
+ int i;
+
+ acpi_read(&doorbell_value, &doorbell);
+ acpi_write((doorbell_value & doorbell_preserve) | doorbell_write,
+ &doorbell);
+
+ for (i = 0; i < POLL_LOOPS; i++) {
+ if (ioread16(&pcch_hdr->status) & CMD_COMPLETE)
+ break;
+ }
+}
+
+static inline void pcc_clear_mapping(void)
+{
+ if (pcch_virt_addr)
+ iounmap(pcch_virt_addr);
+ pcch_virt_addr = NULL;
+}
+
+static unsigned int pcc_get_freq(unsigned int cpu)
+{
+ struct pcc_cpu *pcc_cpu_data;
+ unsigned int curr_freq;
+ unsigned int freq_limit;
+ u16 status;
+ u32 input_buffer;
+ u32 output_buffer;
+
+ spin_lock(&pcc_lock);
+
+ pr_debug("get: get_freq for CPU %d\n", cpu);
+ pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu);
+
+ input_buffer = 0x1;
+ iowrite32(input_buffer,
+ (pcch_virt_addr + pcc_cpu_data->input_offset));
+ iowrite16(CMD_GET_FREQ, &pcch_hdr->command);
+
+ pcc_cmd();
+
+ output_buffer =
+ ioread32(pcch_virt_addr + pcc_cpu_data->output_offset);
+
+ /* Clear the input buffer - we are done with the current command */
+ memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ);
+
+ status = ioread16(&pcch_hdr->status);
+ if (status != CMD_COMPLETE) {
+ pr_debug("get: FAILED: for CPU %d, status is %d\n",
+ cpu, status);
+ goto cmd_incomplete;
+ }
+ iowrite16(0, &pcch_hdr->status);
+ curr_freq = (((ioread32(&pcch_hdr->nominal) * (output_buffer & 0xff))
+ / 100) * 1000);
+
+ pr_debug("get: SUCCESS: (virtual) output_offset for cpu %d is "
+ "0x%p, contains a value of: 0x%x. Speed is: %d MHz\n",
+ cpu, (pcch_virt_addr + pcc_cpu_data->output_offset),
+ output_buffer, curr_freq);
+
+ freq_limit = (output_buffer >> 8) & 0xff;
+ if (freq_limit != 0xff) {
+ pr_debug("get: frequency for cpu %d is being temporarily"
+ " capped at %d\n", cpu, curr_freq);
+ }
+
+ spin_unlock(&pcc_lock);
+ return curr_freq;
+
+cmd_incomplete:
+ iowrite16(0, &pcch_hdr->status);
+ spin_unlock(&pcc_lock);
+ return 0;
+}
+
+static int pcc_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ struct pcc_cpu *pcc_cpu_data;
+ struct cpufreq_freqs freqs;
+ u16 status;
+ u32 input_buffer;
+ int cpu;
+
+ cpu = policy->cpu;
+ pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu);
+
+ pr_debug("target: CPU %d should go to target freq: %d "
+ "(virtual) input_offset is 0x%p\n",
+ cpu, target_freq,
+ (pcch_virt_addr + pcc_cpu_data->input_offset));
+
+ freqs.old = policy->cur;
+ freqs.new = target_freq;
+ cpufreq_freq_transition_begin(policy, &freqs);
+ spin_lock(&pcc_lock);
+
+ input_buffer = 0x1 | (((target_freq * 100)
+ / (ioread32(&pcch_hdr->nominal) * 1000)) << 8);
+ iowrite32(input_buffer,
+ (pcch_virt_addr + pcc_cpu_data->input_offset));
+ iowrite16(CMD_SET_FREQ, &pcch_hdr->command);
+
+ pcc_cmd();
+
+ /* Clear the input buffer - we are done with the current command */
+ memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ);
+
+ status = ioread16(&pcch_hdr->status);
+ iowrite16(0, &pcch_hdr->status);
+
+ cpufreq_freq_transition_end(policy, &freqs, status != CMD_COMPLETE);
+ spin_unlock(&pcc_lock);
+
+ if (status != CMD_COMPLETE) {
+ pr_debug("target: FAILED for cpu %d, with status: 0x%x\n",
+ cpu, status);
+ return -EINVAL;
+ }
+
+ pr_debug("target: was SUCCESSFUL for cpu %d\n", cpu);
+
+ return 0;
+}
+
+static int pcc_get_offset(int cpu)
+{
+ acpi_status status;
+ struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
+ union acpi_object *pccp, *offset;
+ struct pcc_cpu *pcc_cpu_data;
+ struct acpi_processor *pr;
+ int ret = 0;
+
+ pr = per_cpu(processors, cpu);
+ pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu);
+
+ if (!pr)
+ return -ENODEV;
+
+ status = acpi_evaluate_object(pr->handle, "PCCP", NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ pccp = buffer.pointer;
+ if (!pccp || pccp->type != ACPI_TYPE_PACKAGE) {
+ ret = -ENODEV;
+ goto out_free;
+ };
+
+ offset = &(pccp->package.elements[0]);
+ if (!offset || offset->type != ACPI_TYPE_INTEGER) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ pcc_cpu_data->input_offset = offset->integer.value;
+
+ offset = &(pccp->package.elements[1]);
+ if (!offset || offset->type != ACPI_TYPE_INTEGER) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ pcc_cpu_data->output_offset = offset->integer.value;
+
+ memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ);
+ memset_io((pcch_virt_addr + pcc_cpu_data->output_offset), 0, BUF_SZ);
+
+ pr_debug("pcc_get_offset: for CPU %d: pcc_cpu_data "
+ "input_offset: 0x%x, pcc_cpu_data output_offset: 0x%x\n",
+ cpu, pcc_cpu_data->input_offset, pcc_cpu_data->output_offset);
+out_free:
+ kfree(buffer.pointer);
+ return ret;
+}
+
+static int __init pcc_cpufreq_do_osc(acpi_handle *handle)
+{
+ acpi_status status;
+ struct acpi_object_list input;
+ struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
+ union acpi_object in_params[4];
+ union acpi_object *out_obj;
+ u32 capabilities[2];
+ u32 errors;
+ u32 supported;
+ int ret = 0;
+
+ input.count = 4;
+ input.pointer = in_params;
+ in_params[0].type = ACPI_TYPE_BUFFER;
+ in_params[0].buffer.length = 16;
+ in_params[0].buffer.pointer = OSC_UUID;
+ in_params[1].type = ACPI_TYPE_INTEGER;
+ in_params[1].integer.value = 1;
+ in_params[2].type = ACPI_TYPE_INTEGER;
+ in_params[2].integer.value = 2;
+ in_params[3].type = ACPI_TYPE_BUFFER;
+ in_params[3].buffer.length = 8;
+ in_params[3].buffer.pointer = (u8 *)&capabilities;
+
+ capabilities[0] = OSC_QUERY_ENABLE;
+ capabilities[1] = 0x1;
+
+ status = acpi_evaluate_object(*handle, "_OSC", &input, &output);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ if (!output.length)
+ return -ENODEV;
+
+ out_obj = output.pointer;
+ if (out_obj->type != ACPI_TYPE_BUFFER) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0);
+ if (errors) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ supported = *((u32 *)(out_obj->buffer.pointer + 4));
+ if (!(supported & 0x1)) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ kfree(output.pointer);
+ capabilities[0] = 0x0;
+ capabilities[1] = 0x1;
+
+ status = acpi_evaluate_object(*handle, "_OSC", &input, &output);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ if (!output.length)
+ return -ENODEV;
+
+ out_obj = output.pointer;
+ if (out_obj->type != ACPI_TYPE_BUFFER) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0);
+ if (errors) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ supported = *((u32 *)(out_obj->buffer.pointer + 4));
+ if (!(supported & 0x1)) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+out_free:
+ kfree(output.pointer);
+ return ret;
+}
+
+static int __init pcc_cpufreq_probe(void)
+{
+ acpi_status status;
+ struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
+ struct pcc_memory_resource *mem_resource;
+ struct pcc_register_resource *reg_resource;
+ union acpi_object *out_obj, *member;
+ acpi_handle handle, osc_handle;
+ int ret = 0;
+
+ status = acpi_get_handle(NULL, "\\_SB", &handle);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ if (!acpi_has_method(handle, "PCCH"))
+ return -ENODEV;
+
+ status = acpi_get_handle(handle, "_OSC", &osc_handle);
+ if (ACPI_SUCCESS(status)) {
+ ret = pcc_cpufreq_do_osc(&osc_handle);
+ if (ret)
+ pr_debug("probe: _OSC evaluation did not succeed\n");
+ /* Firmware's use of _OSC is optional */
+ ret = 0;
+ }
+
+ status = acpi_evaluate_object(handle, "PCCH", NULL, &output);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ out_obj = output.pointer;
+ if (out_obj->type != ACPI_TYPE_PACKAGE) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ member = &out_obj->package.elements[0];
+ if (member->type != ACPI_TYPE_BUFFER) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ mem_resource = (struct pcc_memory_resource *)member->buffer.pointer;
+
+ pr_debug("probe: mem_resource descriptor: 0x%x,"
+ " length: %d, space_id: %d, resource_usage: %d,"
+ " type_specific: %d, granularity: 0x%llx,"
+ " minimum: 0x%llx, maximum: 0x%llx,"
+ " translation_offset: 0x%llx, address_length: 0x%llx\n",
+ mem_resource->descriptor, mem_resource->length,
+ mem_resource->space_id, mem_resource->resource_usage,
+ mem_resource->type_specific, mem_resource->granularity,
+ mem_resource->minimum, mem_resource->maximum,
+ mem_resource->translation_offset,
+ mem_resource->address_length);
+
+ if (mem_resource->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ pcch_virt_addr = ioremap_nocache(mem_resource->minimum,
+ mem_resource->address_length);
+ if (pcch_virt_addr == NULL) {
+ pr_debug("probe: could not map shared mem region\n");
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ pcch_hdr = pcch_virt_addr;
+
+ pr_debug("probe: PCCH header (virtual) addr: 0x%p\n", pcch_hdr);
+ pr_debug("probe: PCCH header is at physical address: 0x%llx,"
+ " signature: 0x%x, length: %d bytes, major: %d, minor: %d,"
+ " supported features: 0x%x, command field: 0x%x,"
+ " status field: 0x%x, nominal latency: %d us\n",
+ mem_resource->minimum, ioread32(&pcch_hdr->signature),
+ ioread16(&pcch_hdr->length), ioread8(&pcch_hdr->major),
+ ioread8(&pcch_hdr->minor), ioread32(&pcch_hdr->features),
+ ioread16(&pcch_hdr->command), ioread16(&pcch_hdr->status),
+ ioread32(&pcch_hdr->latency));
+
+ pr_debug("probe: min time between commands: %d us,"
+ " max time between commands: %d us,"
+ " nominal CPU frequency: %d MHz,"
+ " minimum CPU frequency: %d MHz,"
+ " minimum CPU frequency without throttling: %d MHz\n",
+ ioread32(&pcch_hdr->minimum_time),
+ ioread32(&pcch_hdr->maximum_time),
+ ioread32(&pcch_hdr->nominal),
+ ioread32(&pcch_hdr->throttled_frequency),
+ ioread32(&pcch_hdr->minimum_frequency));
+
+ member = &out_obj->package.elements[1];
+ if (member->type != ACPI_TYPE_BUFFER) {
+ ret = -ENODEV;
+ goto pcch_free;
+ }
+
+ reg_resource = (struct pcc_register_resource *)member->buffer.pointer;
+
+ doorbell.space_id = reg_resource->space_id;
+ doorbell.bit_width = reg_resource->bit_width;
+ doorbell.bit_offset = reg_resource->bit_offset;
+ doorbell.access_width = 64;
+ doorbell.address = reg_resource->address;
+
+ pr_debug("probe: doorbell: space_id is %d, bit_width is %d, "
+ "bit_offset is %d, access_width is %d, address is 0x%llx\n",
+ doorbell.space_id, doorbell.bit_width, doorbell.bit_offset,
+ doorbell.access_width, reg_resource->address);
+
+ member = &out_obj->package.elements[2];
+ if (member->type != ACPI_TYPE_INTEGER) {
+ ret = -ENODEV;
+ goto pcch_free;
+ }
+
+ doorbell_preserve = member->integer.value;
+
+ member = &out_obj->package.elements[3];
+ if (member->type != ACPI_TYPE_INTEGER) {
+ ret = -ENODEV;
+ goto pcch_free;
+ }
+
+ doorbell_write = member->integer.value;
+
+ pr_debug("probe: doorbell_preserve: 0x%llx,"
+ " doorbell_write: 0x%llx\n",
+ doorbell_preserve, doorbell_write);
+
+ pcc_cpu_info = alloc_percpu(struct pcc_cpu);
+ if (!pcc_cpu_info) {
+ ret = -ENOMEM;
+ goto pcch_free;
+ }
+
+ printk(KERN_DEBUG "pcc-cpufreq: (v%s) driver loaded with frequency"
+ " limits: %d MHz, %d MHz\n", PCC_VERSION,
+ ioread32(&pcch_hdr->minimum_frequency),
+ ioread32(&pcch_hdr->nominal));
+ kfree(output.pointer);
+ return ret;
+pcch_free:
+ pcc_clear_mapping();
+out_free:
+ kfree(output.pointer);
+ return ret;
+}
+
+static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ unsigned int cpu = policy->cpu;
+ unsigned int result = 0;
+
+ if (!pcch_virt_addr) {
+ result = -1;
+ goto out;
+ }
+
+ result = pcc_get_offset(cpu);
+ if (result) {
+ pr_debug("init: PCCP evaluation failed\n");
+ goto out;
+ }
+
+ policy->max = policy->cpuinfo.max_freq =
+ ioread32(&pcch_hdr->nominal) * 1000;
+ policy->min = policy->cpuinfo.min_freq =
+ ioread32(&pcch_hdr->minimum_frequency) * 1000;
+
+ pr_debug("init: policy->max is %d, policy->min is %d\n",
+ policy->max, policy->min);
+out:
+ return result;
+}
+
+static int pcc_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ return 0;
+}
+
+static struct cpufreq_driver pcc_cpufreq_driver = {
+ .flags = CPUFREQ_CONST_LOOPS,
+ .get = pcc_get_freq,
+ .verify = pcc_cpufreq_verify,
+ .target = pcc_cpufreq_target,
+ .init = pcc_cpufreq_cpu_init,
+ .exit = pcc_cpufreq_cpu_exit,
+ .name = "pcc-cpufreq",
+};
+
+static int __init pcc_cpufreq_init(void)
+{
+ int ret;
+
+ if (acpi_disabled)
+ return 0;
+
+ ret = pcc_cpufreq_probe();
+ if (ret) {
+ pr_debug("pcc_cpufreq_init: PCCH evaluation failed\n");
+ return ret;
+ }
+
+ ret = cpufreq_register_driver(&pcc_cpufreq_driver);
+
+ return ret;
+}
+
+static void __exit pcc_cpufreq_exit(void)
+{
+ cpufreq_unregister_driver(&pcc_cpufreq_driver);
+
+ pcc_clear_mapping();
+
+ free_percpu(pcc_cpu_info);
+}
+
+static const struct acpi_device_id processor_device_ids[] = {
+ {ACPI_PROCESSOR_OBJECT_HID, },
+ {ACPI_PROCESSOR_DEVICE_HID, },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, processor_device_ids);
+
+MODULE_AUTHOR("Matthew Garrett, Naga Chumbalkar");
+MODULE_VERSION(PCC_VERSION);
+MODULE_DESCRIPTION("Processor Clocking Control interface driver");
+MODULE_LICENSE("GPL");
+
+late_initcall(pcc_cpufreq_init);
+module_exit(pcc_cpufreq_exit);
diff --git a/kernel/drivers/cpufreq/pmac32-cpufreq.c b/kernel/drivers/cpufreq/pmac32-cpufreq.c
new file mode 100644
index 000000000..1f49d97a7
--- /dev/null
+++ b/kernel/drivers/cpufreq/pmac32-cpufreq.c
@@ -0,0 +1,686 @@
+/*
+ * Copyright (C) 2002 - 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
+ * Copyright (C) 2004 John Steele Scott <toojays@toojays.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * TODO: Need a big cleanup here. Basically, we need to have different
+ * cpufreq_driver structures for the different type of HW instead of the
+ * current mess. We also need to better deal with the detection of the
+ * type of machine.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/adb.h>
+#include <linux/pmu.h>
+#include <linux/cpufreq.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/hardirq.h>
+#include <linux/of_device.h>
+#include <asm/prom.h>
+#include <asm/machdep.h>
+#include <asm/irq.h>
+#include <asm/pmac_feature.h>
+#include <asm/mmu_context.h>
+#include <asm/sections.h>
+#include <asm/cputable.h>
+#include <asm/time.h>
+#include <asm/mpic.h>
+#include <asm/keylargo.h>
+#include <asm/switch_to.h>
+
+/* WARNING !!! This will cause calibrate_delay() to be called,
+ * but this is an __init function ! So you MUST go edit
+ * init/main.c to make it non-init before enabling DEBUG_FREQ
+ */
+#undef DEBUG_FREQ
+
+extern void low_choose_7447a_dfs(int dfs);
+extern void low_choose_750fx_pll(int pll);
+extern void low_sleep_handler(void);
+
+/*
+ * Currently, PowerMac cpufreq supports only high & low frequencies
+ * that are set by the firmware
+ */
+static unsigned int low_freq;
+static unsigned int hi_freq;
+static unsigned int cur_freq;
+static unsigned int sleep_freq;
+static unsigned long transition_latency;
+
+/*
+ * Different models uses different mechanisms to switch the frequency
+ */
+static int (*set_speed_proc)(int low_speed);
+static unsigned int (*get_speed_proc)(void);
+
+/*
+ * Some definitions used by the various speedprocs
+ */
+static u32 voltage_gpio;
+static u32 frequency_gpio;
+static u32 slew_done_gpio;
+static int no_schedule;
+static int has_cpu_l2lve;
+static int is_pmu_based;
+
+/* There are only two frequency states for each processor. Values
+ * are in kHz for the time being.
+ */
+#define CPUFREQ_HIGH 0
+#define CPUFREQ_LOW 1
+
+static struct cpufreq_frequency_table pmac_cpu_freqs[] = {
+ {0, CPUFREQ_HIGH, 0},
+ {0, CPUFREQ_LOW, 0},
+ {0, 0, CPUFREQ_TABLE_END},
+};
+
+static inline void local_delay(unsigned long ms)
+{
+ if (no_schedule)
+ mdelay(ms);
+ else
+ msleep(ms);
+}
+
+#ifdef DEBUG_FREQ
+static inline void debug_calc_bogomips(void)
+{
+ /* This will cause a recalc of bogomips and display the
+ * result. We backup/restore the value to avoid affecting the
+ * core cpufreq framework's own calculation.
+ */
+ unsigned long save_lpj = loops_per_jiffy;
+ calibrate_delay();
+ loops_per_jiffy = save_lpj;
+}
+#endif /* DEBUG_FREQ */
+
+/* Switch CPU speed under 750FX CPU control
+ */
+static int cpu_750fx_cpu_speed(int low_speed)
+{
+ u32 hid2;
+
+ if (low_speed == 0) {
+ /* ramping up, set voltage first */
+ pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05);
+ /* Make sure we sleep for at least 1ms */
+ local_delay(10);
+
+ /* tweak L2 for high voltage */
+ if (has_cpu_l2lve) {
+ hid2 = mfspr(SPRN_HID2);
+ hid2 &= ~0x2000;
+ mtspr(SPRN_HID2, hid2);
+ }
+ }
+#ifdef CONFIG_6xx
+ low_choose_750fx_pll(low_speed);
+#endif
+ if (low_speed == 1) {
+ /* tweak L2 for low voltage */
+ if (has_cpu_l2lve) {
+ hid2 = mfspr(SPRN_HID2);
+ hid2 |= 0x2000;
+ mtspr(SPRN_HID2, hid2);
+ }
+
+ /* ramping down, set voltage last */
+ pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04);
+ local_delay(10);
+ }
+
+ return 0;
+}
+
+static unsigned int cpu_750fx_get_cpu_speed(void)
+{
+ if (mfspr(SPRN_HID1) & HID1_PS)
+ return low_freq;
+ else
+ return hi_freq;
+}
+
+/* Switch CPU speed using DFS */
+static int dfs_set_cpu_speed(int low_speed)
+{
+ if (low_speed == 0) {
+ /* ramping up, set voltage first */
+ pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05);
+ /* Make sure we sleep for at least 1ms */
+ local_delay(1);
+ }
+
+ /* set frequency */
+#ifdef CONFIG_6xx
+ low_choose_7447a_dfs(low_speed);
+#endif
+ udelay(100);
+
+ if (low_speed == 1) {
+ /* ramping down, set voltage last */
+ pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04);
+ local_delay(1);
+ }
+
+ return 0;
+}
+
+static unsigned int dfs_get_cpu_speed(void)
+{
+ if (mfspr(SPRN_HID1) & HID1_DFS)
+ return low_freq;
+ else
+ return hi_freq;
+}
+
+
+/* Switch CPU speed using slewing GPIOs
+ */
+static int gpios_set_cpu_speed(int low_speed)
+{
+ int gpio, timeout = 0;
+
+ /* If ramping up, set voltage first */
+ if (low_speed == 0) {
+ pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05);
+ /* Delay is way too big but it's ok, we schedule */
+ local_delay(10);
+ }
+
+ /* Set frequency */
+ gpio = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, frequency_gpio, 0);
+ if (low_speed == ((gpio & 0x01) == 0))
+ goto skip;
+
+ pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, frequency_gpio,
+ low_speed ? 0x04 : 0x05);
+ udelay(200);
+ do {
+ if (++timeout > 100)
+ break;
+ local_delay(1);
+ gpio = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, slew_done_gpio, 0);
+ } while((gpio & 0x02) == 0);
+ skip:
+ /* If ramping down, set voltage last */
+ if (low_speed == 1) {
+ pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04);
+ /* Delay is way too big but it's ok, we schedule */
+ local_delay(10);
+ }
+
+#ifdef DEBUG_FREQ
+ debug_calc_bogomips();
+#endif
+
+ return 0;
+}
+
+/* Switch CPU speed under PMU control
+ */
+static int pmu_set_cpu_speed(int low_speed)
+{
+ struct adb_request req;
+ unsigned long save_l2cr;
+ unsigned long save_l3cr;
+ unsigned int pic_prio;
+ unsigned long flags;
+
+ preempt_disable();
+
+#ifdef DEBUG_FREQ
+ printk(KERN_DEBUG "HID1, before: %x\n", mfspr(SPRN_HID1));
+#endif
+ pmu_suspend();
+
+ /* Disable all interrupt sources on openpic */
+ pic_prio = mpic_cpu_get_priority();
+ mpic_cpu_set_priority(0xf);
+
+ /* Make sure the decrementer won't interrupt us */
+ asm volatile("mtdec %0" : : "r" (0x7fffffff));
+ /* Make sure any pending DEC interrupt occurring while we did
+ * the above didn't re-enable the DEC */
+ mb();
+ asm volatile("mtdec %0" : : "r" (0x7fffffff));
+
+ /* We can now disable MSR_EE */
+ local_irq_save(flags);
+
+ /* Giveup the FPU & vec */
+ enable_kernel_fp();
+
+#ifdef CONFIG_ALTIVEC
+ if (cpu_has_feature(CPU_FTR_ALTIVEC))
+ enable_kernel_altivec();
+#endif /* CONFIG_ALTIVEC */
+
+ /* Save & disable L2 and L3 caches */
+ save_l3cr = _get_L3CR(); /* (returns -1 if not available) */
+ save_l2cr = _get_L2CR(); /* (returns -1 if not available) */
+
+ /* Send the new speed command. My assumption is that this command
+ * will cause PLL_CFG[0..3] to be changed next time CPU goes to sleep
+ */
+ pmu_request(&req, NULL, 6, PMU_CPU_SPEED, 'W', 'O', 'O', 'F', low_speed);
+ while (!req.complete)
+ pmu_poll();
+
+ /* Prepare the northbridge for the speed transition */
+ pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,1,1);
+
+ /* Call low level code to backup CPU state and recover from
+ * hardware reset
+ */
+ low_sleep_handler();
+
+ /* Restore the northbridge */
+ pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,1,0);
+
+ /* Restore L2 cache */
+ if (save_l2cr != 0xffffffff && (save_l2cr & L2CR_L2E) != 0)
+ _set_L2CR(save_l2cr);
+ /* Restore L3 cache */
+ if (save_l3cr != 0xffffffff && (save_l3cr & L3CR_L3E) != 0)
+ _set_L3CR(save_l3cr);
+
+ /* Restore userland MMU context */
+ switch_mmu_context(NULL, current->active_mm);
+
+#ifdef DEBUG_FREQ
+ printk(KERN_DEBUG "HID1, after: %x\n", mfspr(SPRN_HID1));
+#endif
+
+ /* Restore low level PMU operations */
+ pmu_unlock();
+
+ /*
+ * Restore decrementer; we'll take a decrementer interrupt
+ * as soon as interrupts are re-enabled and the generic
+ * clockevents code will reprogram it with the right value.
+ */
+ set_dec(1);
+
+ /* Restore interrupts */
+ mpic_cpu_set_priority(pic_prio);
+
+ /* Let interrupts flow again ... */
+ local_irq_restore(flags);
+
+#ifdef DEBUG_FREQ
+ debug_calc_bogomips();
+#endif
+
+ pmu_resume();
+
+ preempt_enable();
+
+ return 0;
+}
+
+static int do_set_cpu_speed(struct cpufreq_policy *policy, int speed_mode)
+{
+ unsigned long l3cr;
+ static unsigned long prev_l3cr;
+
+ if (speed_mode == CPUFREQ_LOW &&
+ cpu_has_feature(CPU_FTR_L3CR)) {
+ l3cr = _get_L3CR();
+ if (l3cr & L3CR_L3E) {
+ prev_l3cr = l3cr;
+ _set_L3CR(0);
+ }
+ }
+ set_speed_proc(speed_mode == CPUFREQ_LOW);
+ if (speed_mode == CPUFREQ_HIGH &&
+ cpu_has_feature(CPU_FTR_L3CR)) {
+ l3cr = _get_L3CR();
+ if ((prev_l3cr & L3CR_L3E) && l3cr != prev_l3cr)
+ _set_L3CR(prev_l3cr);
+ }
+ cur_freq = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq;
+
+ return 0;
+}
+
+static unsigned int pmac_cpufreq_get_speed(unsigned int cpu)
+{
+ return cur_freq;
+}
+
+static int pmac_cpufreq_target( struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ int rc;
+
+ rc = do_set_cpu_speed(policy, index);
+
+ ppc_proc_freq = cur_freq * 1000ul;
+ return rc;
+}
+
+static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ return cpufreq_generic_init(policy, pmac_cpu_freqs, transition_latency);
+}
+
+static u32 read_gpio(struct device_node *np)
+{
+ const u32 *reg = of_get_property(np, "reg", NULL);
+ u32 offset;
+
+ if (reg == NULL)
+ return 0;
+ /* That works for all keylargos but shall be fixed properly
+ * some day... The problem is that it seems we can't rely
+ * on the "reg" property of the GPIO nodes, they are either
+ * relative to the base of KeyLargo or to the base of the
+ * GPIO space, and the device-tree doesn't help.
+ */
+ offset = *reg;
+ if (offset < KEYLARGO_GPIO_LEVELS0)
+ offset += KEYLARGO_GPIO_LEVELS0;
+ return offset;
+}
+
+static int pmac_cpufreq_suspend(struct cpufreq_policy *policy)
+{
+ /* Ok, this could be made a bit smarter, but let's be robust for now. We
+ * always force a speed change to high speed before sleep, to make sure
+ * we have appropriate voltage and/or bus speed for the wakeup process,
+ * and to make sure our loops_per_jiffies are "good enough", that is will
+ * not cause too short delays if we sleep in low speed and wake in high
+ * speed..
+ */
+ no_schedule = 1;
+ sleep_freq = cur_freq;
+ if (cur_freq == low_freq && !is_pmu_based)
+ do_set_cpu_speed(policy, CPUFREQ_HIGH);
+ return 0;
+}
+
+static int pmac_cpufreq_resume(struct cpufreq_policy *policy)
+{
+ /* If we resume, first check if we have a get() function */
+ if (get_speed_proc)
+ cur_freq = get_speed_proc();
+ else
+ cur_freq = 0;
+
+ /* We don't, hrm... we don't really know our speed here, best
+ * is that we force a switch to whatever it was, which is
+ * probably high speed due to our suspend() routine
+ */
+ do_set_cpu_speed(policy, sleep_freq == low_freq ?
+ CPUFREQ_LOW : CPUFREQ_HIGH);
+
+ ppc_proc_freq = cur_freq * 1000ul;
+
+ no_schedule = 0;
+ return 0;
+}
+
+static struct cpufreq_driver pmac_cpufreq_driver = {
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = pmac_cpufreq_target,
+ .get = pmac_cpufreq_get_speed,
+ .init = pmac_cpufreq_cpu_init,
+ .suspend = pmac_cpufreq_suspend,
+ .resume = pmac_cpufreq_resume,
+ .flags = CPUFREQ_PM_NO_WARN,
+ .attr = cpufreq_generic_attr,
+ .name = "powermac",
+};
+
+
+static int pmac_cpufreq_init_MacRISC3(struct device_node *cpunode)
+{
+ struct device_node *volt_gpio_np = of_find_node_by_name(NULL,
+ "voltage-gpio");
+ struct device_node *freq_gpio_np = of_find_node_by_name(NULL,
+ "frequency-gpio");
+ struct device_node *slew_done_gpio_np = of_find_node_by_name(NULL,
+ "slewing-done");
+ const u32 *value;
+
+ /*
+ * Check to see if it's GPIO driven or PMU only
+ *
+ * The way we extract the GPIO address is slightly hackish, but it
+ * works well enough for now. We need to abstract the whole GPIO
+ * stuff sooner or later anyway
+ */
+
+ if (volt_gpio_np)
+ voltage_gpio = read_gpio(volt_gpio_np);
+ if (freq_gpio_np)
+ frequency_gpio = read_gpio(freq_gpio_np);
+ if (slew_done_gpio_np)
+ slew_done_gpio = read_gpio(slew_done_gpio_np);
+
+ /* If we use the frequency GPIOs, calculate the min/max speeds based
+ * on the bus frequencies
+ */
+ if (frequency_gpio && slew_done_gpio) {
+ int lenp, rc;
+ const u32 *freqs, *ratio;
+
+ freqs = of_get_property(cpunode, "bus-frequencies", &lenp);
+ lenp /= sizeof(u32);
+ if (freqs == NULL || lenp != 2) {
+ printk(KERN_ERR "cpufreq: bus-frequencies incorrect or missing\n");
+ return 1;
+ }
+ ratio = of_get_property(cpunode, "processor-to-bus-ratio*2",
+ NULL);
+ if (ratio == NULL) {
+ printk(KERN_ERR "cpufreq: processor-to-bus-ratio*2 missing\n");
+ return 1;
+ }
+
+ /* Get the min/max bus frequencies */
+ low_freq = min(freqs[0], freqs[1]);
+ hi_freq = max(freqs[0], freqs[1]);
+
+ /* Grrrr.. It _seems_ that the device-tree is lying on the low bus
+ * frequency, it claims it to be around 84Mhz on some models while
+ * it appears to be approx. 101Mhz on all. Let's hack around here...
+ * fortunately, we don't need to be too precise
+ */
+ if (low_freq < 98000000)
+ low_freq = 101000000;
+
+ /* Convert those to CPU core clocks */
+ low_freq = (low_freq * (*ratio)) / 2000;
+ hi_freq = (hi_freq * (*ratio)) / 2000;
+
+ /* Now we get the frequencies, we read the GPIO to see what is out current
+ * speed
+ */
+ rc = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, frequency_gpio, 0);
+ cur_freq = (rc & 0x01) ? hi_freq : low_freq;
+
+ set_speed_proc = gpios_set_cpu_speed;
+ return 1;
+ }
+
+ /* If we use the PMU, look for the min & max frequencies in the
+ * device-tree
+ */
+ value = of_get_property(cpunode, "min-clock-frequency", NULL);
+ if (!value)
+ return 1;
+ low_freq = (*value) / 1000;
+ /* The PowerBook G4 12" (PowerBook6,1) has an error in the device-tree
+ * here */
+ if (low_freq < 100000)
+ low_freq *= 10;
+
+ value = of_get_property(cpunode, "max-clock-frequency", NULL);
+ if (!value)
+ return 1;
+ hi_freq = (*value) / 1000;
+ set_speed_proc = pmu_set_cpu_speed;
+ is_pmu_based = 1;
+
+ return 0;
+}
+
+static int pmac_cpufreq_init_7447A(struct device_node *cpunode)
+{
+ struct device_node *volt_gpio_np;
+
+ if (of_get_property(cpunode, "dynamic-power-step", NULL) == NULL)
+ return 1;
+
+ volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select");
+ if (volt_gpio_np)
+ voltage_gpio = read_gpio(volt_gpio_np);
+ if (!voltage_gpio){
+ printk(KERN_ERR "cpufreq: missing cpu-vcore-select gpio\n");
+ return 1;
+ }
+
+ /* OF only reports the high frequency */
+ hi_freq = cur_freq;
+ low_freq = cur_freq/2;
+
+ /* Read actual frequency from CPU */
+ cur_freq = dfs_get_cpu_speed();
+ set_speed_proc = dfs_set_cpu_speed;
+ get_speed_proc = dfs_get_cpu_speed;
+
+ return 0;
+}
+
+static int pmac_cpufreq_init_750FX(struct device_node *cpunode)
+{
+ struct device_node *volt_gpio_np;
+ u32 pvr;
+ const u32 *value;
+
+ if (of_get_property(cpunode, "dynamic-power-step", NULL) == NULL)
+ return 1;
+
+ hi_freq = cur_freq;
+ value = of_get_property(cpunode, "reduced-clock-frequency", NULL);
+ if (!value)
+ return 1;
+ low_freq = (*value) / 1000;
+
+ volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select");
+ if (volt_gpio_np)
+ voltage_gpio = read_gpio(volt_gpio_np);
+
+ pvr = mfspr(SPRN_PVR);
+ has_cpu_l2lve = !((pvr & 0xf00) == 0x100);
+
+ set_speed_proc = cpu_750fx_cpu_speed;
+ get_speed_proc = cpu_750fx_get_cpu_speed;
+ cur_freq = cpu_750fx_get_cpu_speed();
+
+ return 0;
+}
+
+/* Currently, we support the following machines:
+ *
+ * - Titanium PowerBook 1Ghz (PMU based, 667Mhz & 1Ghz)
+ * - Titanium PowerBook 800 (PMU based, 667Mhz & 800Mhz)
+ * - Titanium PowerBook 400 (PMU based, 300Mhz & 400Mhz)
+ * - Titanium PowerBook 500 (PMU based, 300Mhz & 500Mhz)
+ * - iBook2 500/600 (PMU based, 400Mhz & 500/600Mhz)
+ * - iBook2 700 (CPU based, 400Mhz & 700Mhz, support low voltage)
+ * - Recent MacRISC3 laptops
+ * - All new machines with 7447A CPUs
+ */
+static int __init pmac_cpufreq_setup(void)
+{
+ struct device_node *cpunode;
+ const u32 *value;
+
+ if (strstr(boot_command_line, "nocpufreq"))
+ return 0;
+
+ /* Get first CPU node */
+ cpunode = of_cpu_device_node_get(0);
+ if (!cpunode)
+ goto out;
+
+ /* Get current cpu clock freq */
+ value = of_get_property(cpunode, "clock-frequency", NULL);
+ if (!value)
+ goto out;
+ cur_freq = (*value) / 1000;
+ transition_latency = CPUFREQ_ETERNAL;
+
+ /* Check for 7447A based MacRISC3 */
+ if (of_machine_is_compatible("MacRISC3") &&
+ of_get_property(cpunode, "dynamic-power-step", NULL) &&
+ PVR_VER(mfspr(SPRN_PVR)) == 0x8003) {
+ pmac_cpufreq_init_7447A(cpunode);
+ transition_latency = 8000000;
+ /* Check for other MacRISC3 machines */
+ } else if (of_machine_is_compatible("PowerBook3,4") ||
+ of_machine_is_compatible("PowerBook3,5") ||
+ of_machine_is_compatible("MacRISC3")) {
+ pmac_cpufreq_init_MacRISC3(cpunode);
+ /* Else check for iBook2 500/600 */
+ } else if (of_machine_is_compatible("PowerBook4,1")) {
+ hi_freq = cur_freq;
+ low_freq = 400000;
+ set_speed_proc = pmu_set_cpu_speed;
+ is_pmu_based = 1;
+ }
+ /* Else check for TiPb 550 */
+ else if (of_machine_is_compatible("PowerBook3,3") && cur_freq == 550000) {
+ hi_freq = cur_freq;
+ low_freq = 500000;
+ set_speed_proc = pmu_set_cpu_speed;
+ is_pmu_based = 1;
+ }
+ /* Else check for TiPb 400 & 500 */
+ else if (of_machine_is_compatible("PowerBook3,2")) {
+ /* We only know about the 400 MHz and the 500Mhz model
+ * they both have 300 MHz as low frequency
+ */
+ if (cur_freq < 350000 || cur_freq > 550000)
+ goto out;
+ hi_freq = cur_freq;
+ low_freq = 300000;
+ set_speed_proc = pmu_set_cpu_speed;
+ is_pmu_based = 1;
+ }
+ /* Else check for 750FX */
+ else if (PVR_VER(mfspr(SPRN_PVR)) == 0x7000)
+ pmac_cpufreq_init_750FX(cpunode);
+out:
+ of_node_put(cpunode);
+ if (set_speed_proc == NULL)
+ return -ENODEV;
+
+ pmac_cpu_freqs[CPUFREQ_LOW].frequency = low_freq;
+ pmac_cpu_freqs[CPUFREQ_HIGH].frequency = hi_freq;
+ ppc_proc_freq = cur_freq * 1000ul;
+
+ printk(KERN_INFO "Registering PowerMac CPU frequency driver\n");
+ printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Boot: %d Mhz\n",
+ low_freq/1000, hi_freq/1000, cur_freq/1000);
+
+ return cpufreq_register_driver(&pmac_cpufreq_driver);
+}
+
+module_init(pmac_cpufreq_setup);
+
diff --git a/kernel/drivers/cpufreq/pmac64-cpufreq.c b/kernel/drivers/cpufreq/pmac64-cpufreq.c
new file mode 100644
index 000000000..4ff868787
--- /dev/null
+++ b/kernel/drivers/cpufreq/pmac64-cpufreq.c
@@ -0,0 +1,676 @@
+/*
+ * Copyright (C) 2002 - 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
+ * and Markus Demleitner <msdemlei@cl.uni-heidelberg.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This driver adds basic cpufreq support for SMU & 970FX based G5 Macs,
+ * that is iMac G5 and latest single CPU desktop.
+ */
+
+#undef DEBUG
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/cpufreq.h>
+#include <linux/init.h>
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <asm/prom.h>
+#include <asm/machdep.h>
+#include <asm/irq.h>
+#include <asm/sections.h>
+#include <asm/cputable.h>
+#include <asm/time.h>
+#include <asm/smu.h>
+#include <asm/pmac_pfunc.h>
+
+#define DBG(fmt...) pr_debug(fmt)
+
+/* see 970FX user manual */
+
+#define SCOM_PCR 0x0aa001 /* PCR scom addr */
+
+#define PCR_HILO_SELECT 0x80000000U /* 1 = PCR, 0 = PCRH */
+#define PCR_SPEED_FULL 0x00000000U /* 1:1 speed value */
+#define PCR_SPEED_HALF 0x00020000U /* 1:2 speed value */
+#define PCR_SPEED_QUARTER 0x00040000U /* 1:4 speed value */
+#define PCR_SPEED_MASK 0x000e0000U /* speed mask */
+#define PCR_SPEED_SHIFT 17
+#define PCR_FREQ_REQ_VALID 0x00010000U /* freq request valid */
+#define PCR_VOLT_REQ_VALID 0x00008000U /* volt request valid */
+#define PCR_TARGET_TIME_MASK 0x00006000U /* target time */
+#define PCR_STATLAT_MASK 0x00001f00U /* STATLAT value */
+#define PCR_SNOOPLAT_MASK 0x000000f0U /* SNOOPLAT value */
+#define PCR_SNOOPACC_MASK 0x0000000fU /* SNOOPACC value */
+
+#define SCOM_PSR 0x408001 /* PSR scom addr */
+/* warning: PSR is a 64 bits register */
+#define PSR_CMD_RECEIVED 0x2000000000000000U /* command received */
+#define PSR_CMD_COMPLETED 0x1000000000000000U /* command completed */
+#define PSR_CUR_SPEED_MASK 0x0300000000000000U /* current speed */
+#define PSR_CUR_SPEED_SHIFT (56)
+
+/*
+ * The G5 only supports two frequencies (Quarter speed is not supported)
+ */
+#define CPUFREQ_HIGH 0
+#define CPUFREQ_LOW 1
+
+static struct cpufreq_frequency_table g5_cpu_freqs[] = {
+ {0, CPUFREQ_HIGH, 0},
+ {0, CPUFREQ_LOW, 0},
+ {0, 0, CPUFREQ_TABLE_END},
+};
+
+/* Power mode data is an array of the 32 bits PCR values to use for
+ * the various frequencies, retrieved from the device-tree
+ */
+static int g5_pmode_cur;
+
+static void (*g5_switch_volt)(int speed_mode);
+static int (*g5_switch_freq)(int speed_mode);
+static int (*g5_query_freq)(void);
+
+static unsigned long transition_latency;
+
+#ifdef CONFIG_PMAC_SMU
+
+static const u32 *g5_pmode_data;
+static int g5_pmode_max;
+
+static struct smu_sdbp_fvt *g5_fvt_table; /* table of op. points */
+static int g5_fvt_count; /* number of op. points */
+static int g5_fvt_cur; /* current op. point */
+
+/*
+ * SMU based voltage switching for Neo2 platforms
+ */
+
+static void g5_smu_switch_volt(int speed_mode)
+{
+ struct smu_simple_cmd cmd;
+
+ DECLARE_COMPLETION_ONSTACK(comp);
+ smu_queue_simple(&cmd, SMU_CMD_POWER_COMMAND, 8, smu_done_complete,
+ &comp, 'V', 'S', 'L', 'E', 'W',
+ 0xff, g5_fvt_cur+1, speed_mode);
+ wait_for_completion(&comp);
+}
+
+/*
+ * Platform function based voltage/vdnap switching for Neo2
+ */
+
+static struct pmf_function *pfunc_set_vdnap0;
+static struct pmf_function *pfunc_vdnap0_complete;
+
+static void g5_vdnap_switch_volt(int speed_mode)
+{
+ struct pmf_args args;
+ u32 slew, done = 0;
+ unsigned long timeout;
+
+ slew = (speed_mode == CPUFREQ_LOW) ? 1 : 0;
+ args.count = 1;
+ args.u[0].p = &slew;
+
+ pmf_call_one(pfunc_set_vdnap0, &args);
+
+ /* It's an irq GPIO so we should be able to just block here,
+ * I'll do that later after I've properly tested the IRQ code for
+ * platform functions
+ */
+ timeout = jiffies + HZ/10;
+ while(!time_after(jiffies, timeout)) {
+ args.count = 1;
+ args.u[0].p = &done;
+ pmf_call_one(pfunc_vdnap0_complete, &args);
+ if (done)
+ break;
+ usleep_range(1000, 1000);
+ }
+ if (done == 0)
+ printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n");
+}
+
+
+/*
+ * SCOM based frequency switching for 970FX rev3
+ */
+static int g5_scom_switch_freq(int speed_mode)
+{
+ unsigned long flags;
+ int to;
+
+ /* If frequency is going up, first ramp up the voltage */
+ if (speed_mode < g5_pmode_cur)
+ g5_switch_volt(speed_mode);
+
+ local_irq_save(flags);
+
+ /* Clear PCR high */
+ scom970_write(SCOM_PCR, 0);
+ /* Clear PCR low */
+ scom970_write(SCOM_PCR, PCR_HILO_SELECT | 0);
+ /* Set PCR low */
+ scom970_write(SCOM_PCR, PCR_HILO_SELECT |
+ g5_pmode_data[speed_mode]);
+
+ /* Wait for completion */
+ for (to = 0; to < 10; to++) {
+ unsigned long psr = scom970_read(SCOM_PSR);
+
+ if ((psr & PSR_CMD_RECEIVED) == 0 &&
+ (((psr >> PSR_CUR_SPEED_SHIFT) ^
+ (g5_pmode_data[speed_mode] >> PCR_SPEED_SHIFT)) & 0x3)
+ == 0)
+ break;
+ if (psr & PSR_CMD_COMPLETED)
+ break;
+ udelay(100);
+ }
+
+ local_irq_restore(flags);
+
+ /* If frequency is going down, last ramp the voltage */
+ if (speed_mode > g5_pmode_cur)
+ g5_switch_volt(speed_mode);
+
+ g5_pmode_cur = speed_mode;
+ ppc_proc_freq = g5_cpu_freqs[speed_mode].frequency * 1000ul;
+
+ return 0;
+}
+
+static int g5_scom_query_freq(void)
+{
+ unsigned long psr = scom970_read(SCOM_PSR);
+ int i;
+
+ for (i = 0; i <= g5_pmode_max; i++)
+ if ((((psr >> PSR_CUR_SPEED_SHIFT) ^
+ (g5_pmode_data[i] >> PCR_SPEED_SHIFT)) & 0x3) == 0)
+ break;
+ return i;
+}
+
+/*
+ * Fake voltage switching for platforms with missing support
+ */
+
+static void g5_dummy_switch_volt(int speed_mode)
+{
+}
+
+#endif /* CONFIG_PMAC_SMU */
+
+/*
+ * Platform function based voltage switching for PowerMac7,2 & 7,3
+ */
+
+static struct pmf_function *pfunc_cpu0_volt_high;
+static struct pmf_function *pfunc_cpu0_volt_low;
+static struct pmf_function *pfunc_cpu1_volt_high;
+static struct pmf_function *pfunc_cpu1_volt_low;
+
+static void g5_pfunc_switch_volt(int speed_mode)
+{
+ if (speed_mode == CPUFREQ_HIGH) {
+ if (pfunc_cpu0_volt_high)
+ pmf_call_one(pfunc_cpu0_volt_high, NULL);
+ if (pfunc_cpu1_volt_high)
+ pmf_call_one(pfunc_cpu1_volt_high, NULL);
+ } else {
+ if (pfunc_cpu0_volt_low)
+ pmf_call_one(pfunc_cpu0_volt_low, NULL);
+ if (pfunc_cpu1_volt_low)
+ pmf_call_one(pfunc_cpu1_volt_low, NULL);
+ }
+ usleep_range(10000, 10000); /* should be faster , to fix */
+}
+
+/*
+ * Platform function based frequency switching for PowerMac7,2 & 7,3
+ */
+
+static struct pmf_function *pfunc_cpu_setfreq_high;
+static struct pmf_function *pfunc_cpu_setfreq_low;
+static struct pmf_function *pfunc_cpu_getfreq;
+static struct pmf_function *pfunc_slewing_done;
+
+static int g5_pfunc_switch_freq(int speed_mode)
+{
+ struct pmf_args args;
+ u32 done = 0;
+ unsigned long timeout;
+ int rc;
+
+ DBG("g5_pfunc_switch_freq(%d)\n", speed_mode);
+
+ /* If frequency is going up, first ramp up the voltage */
+ if (speed_mode < g5_pmode_cur)
+ g5_switch_volt(speed_mode);
+
+ /* Do it */
+ if (speed_mode == CPUFREQ_HIGH)
+ rc = pmf_call_one(pfunc_cpu_setfreq_high, NULL);
+ else
+ rc = pmf_call_one(pfunc_cpu_setfreq_low, NULL);
+
+ if (rc)
+ printk(KERN_WARNING "cpufreq: pfunc switch error %d\n", rc);
+
+ /* It's an irq GPIO so we should be able to just block here,
+ * I'll do that later after I've properly tested the IRQ code for
+ * platform functions
+ */
+ timeout = jiffies + HZ/10;
+ while(!time_after(jiffies, timeout)) {
+ args.count = 1;
+ args.u[0].p = &done;
+ pmf_call_one(pfunc_slewing_done, &args);
+ if (done)
+ break;
+ usleep_range(500, 500);
+ }
+ if (done == 0)
+ printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n");
+
+ /* If frequency is going down, last ramp the voltage */
+ if (speed_mode > g5_pmode_cur)
+ g5_switch_volt(speed_mode);
+
+ g5_pmode_cur = speed_mode;
+ ppc_proc_freq = g5_cpu_freqs[speed_mode].frequency * 1000ul;
+
+ return 0;
+}
+
+static int g5_pfunc_query_freq(void)
+{
+ struct pmf_args args;
+ u32 val = 0;
+
+ args.count = 1;
+ args.u[0].p = &val;
+ pmf_call_one(pfunc_cpu_getfreq, &args);
+ return val ? CPUFREQ_HIGH : CPUFREQ_LOW;
+}
+
+
+/*
+ * Common interface to the cpufreq core
+ */
+
+static int g5_cpufreq_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ return g5_switch_freq(index);
+}
+
+static unsigned int g5_cpufreq_get_speed(unsigned int cpu)
+{
+ return g5_cpu_freqs[g5_pmode_cur].frequency;
+}
+
+static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ return cpufreq_generic_init(policy, g5_cpu_freqs, transition_latency);
+}
+
+static struct cpufreq_driver g5_cpufreq_driver = {
+ .name = "powermac",
+ .flags = CPUFREQ_CONST_LOOPS,
+ .init = g5_cpufreq_cpu_init,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = g5_cpufreq_target,
+ .get = g5_cpufreq_get_speed,
+ .attr = cpufreq_generic_attr,
+};
+
+
+#ifdef CONFIG_PMAC_SMU
+
+static int __init g5_neo2_cpufreq_init(struct device_node *cpunode)
+{
+ unsigned int psize, ssize;
+ unsigned long max_freq;
+ char *freq_method, *volt_method;
+ const u32 *valp;
+ u32 pvr_hi;
+ int use_volts_vdnap = 0;
+ int use_volts_smu = 0;
+ int rc = -ENODEV;
+
+ /* Check supported platforms */
+ if (of_machine_is_compatible("PowerMac8,1") ||
+ of_machine_is_compatible("PowerMac8,2") ||
+ of_machine_is_compatible("PowerMac9,1") ||
+ of_machine_is_compatible("PowerMac12,1"))
+ use_volts_smu = 1;
+ else if (of_machine_is_compatible("PowerMac11,2"))
+ use_volts_vdnap = 1;
+ else
+ return -ENODEV;
+
+ /* Check 970FX for now */
+ valp = of_get_property(cpunode, "cpu-version", NULL);
+ if (!valp) {
+ DBG("No cpu-version property !\n");
+ goto bail_noprops;
+ }
+ pvr_hi = (*valp) >> 16;
+ if (pvr_hi != 0x3c && pvr_hi != 0x44) {
+ printk(KERN_ERR "cpufreq: Unsupported CPU version\n");
+ goto bail_noprops;
+ }
+
+ /* Look for the powertune data in the device-tree */
+ g5_pmode_data = of_get_property(cpunode, "power-mode-data",&psize);
+ if (!g5_pmode_data) {
+ DBG("No power-mode-data !\n");
+ goto bail_noprops;
+ }
+ g5_pmode_max = psize / sizeof(u32) - 1;
+
+ if (use_volts_smu) {
+ const struct smu_sdbp_header *shdr;
+
+ /* Look for the FVT table */
+ shdr = smu_get_sdb_partition(SMU_SDB_FVT_ID, NULL);
+ if (!shdr)
+ goto bail_noprops;
+ g5_fvt_table = (struct smu_sdbp_fvt *)&shdr[1];
+ ssize = (shdr->len * sizeof(u32)) - sizeof(*shdr);
+ g5_fvt_count = ssize / sizeof(*g5_fvt_table);
+ g5_fvt_cur = 0;
+
+ /* Sanity checking */
+ if (g5_fvt_count < 1 || g5_pmode_max < 1)
+ goto bail_noprops;
+
+ g5_switch_volt = g5_smu_switch_volt;
+ volt_method = "SMU";
+ } else if (use_volts_vdnap) {
+ struct device_node *root;
+
+ root = of_find_node_by_path("/");
+ if (root == NULL) {
+ printk(KERN_ERR "cpufreq: Can't find root of "
+ "device tree\n");
+ goto bail_noprops;
+ }
+ pfunc_set_vdnap0 = pmf_find_function(root, "set-vdnap0");
+ pfunc_vdnap0_complete =
+ pmf_find_function(root, "slewing-done");
+ if (pfunc_set_vdnap0 == NULL ||
+ pfunc_vdnap0_complete == NULL) {
+ printk(KERN_ERR "cpufreq: Can't find required "
+ "platform function\n");
+ goto bail_noprops;
+ }
+
+ g5_switch_volt = g5_vdnap_switch_volt;
+ volt_method = "GPIO";
+ } else {
+ g5_switch_volt = g5_dummy_switch_volt;
+ volt_method = "none";
+ }
+
+ /*
+ * From what I see, clock-frequency is always the maximal frequency.
+ * The current driver can not slew sysclk yet, so we really only deal
+ * with powertune steps for now. We also only implement full freq and
+ * half freq in this version. So far, I haven't yet seen a machine
+ * supporting anything else.
+ */
+ valp = of_get_property(cpunode, "clock-frequency", NULL);
+ if (!valp)
+ return -ENODEV;
+ max_freq = (*valp)/1000;
+ g5_cpu_freqs[0].frequency = max_freq;
+ g5_cpu_freqs[1].frequency = max_freq/2;
+
+ /* Set callbacks */
+ transition_latency = 12000;
+ g5_switch_freq = g5_scom_switch_freq;
+ g5_query_freq = g5_scom_query_freq;
+ freq_method = "SCOM";
+
+ /* Force apply current frequency to make sure everything is in
+ * sync (voltage is right for example). Firmware may leave us with
+ * a strange setting ...
+ */
+ g5_switch_volt(CPUFREQ_HIGH);
+ msleep(10);
+ g5_pmode_cur = -1;
+ g5_switch_freq(g5_query_freq());
+
+ printk(KERN_INFO "Registering G5 CPU frequency driver\n");
+ printk(KERN_INFO "Frequency method: %s, Voltage method: %s\n",
+ freq_method, volt_method);
+ printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
+ g5_cpu_freqs[1].frequency/1000,
+ g5_cpu_freqs[0].frequency/1000,
+ g5_cpu_freqs[g5_pmode_cur].frequency/1000);
+
+ rc = cpufreq_register_driver(&g5_cpufreq_driver);
+
+ /* We keep the CPU node on hold... hopefully, Apple G5 don't have
+ * hotplug CPU with a dynamic device-tree ...
+ */
+ return rc;
+
+ bail_noprops:
+ of_node_put(cpunode);
+
+ return rc;
+}
+
+#endif /* CONFIG_PMAC_SMU */
+
+
+static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
+{
+ struct device_node *cpuid = NULL, *hwclock = NULL;
+ const u8 *eeprom = NULL;
+ const u32 *valp;
+ u64 max_freq, min_freq, ih, il;
+ int has_volt = 1, rc = 0;
+
+ DBG("cpufreq: Initializing for PowerMac7,2, PowerMac7,3 and"
+ " RackMac3,1...\n");
+
+ /* Lookup the cpuid eeprom node */
+ cpuid = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/cpuid@a0");
+ if (cpuid != NULL)
+ eeprom = of_get_property(cpuid, "cpuid", NULL);
+ if (eeprom == NULL) {
+ printk(KERN_ERR "cpufreq: Can't find cpuid EEPROM !\n");
+ rc = -ENODEV;
+ goto bail;
+ }
+
+ /* Lookup the i2c hwclock */
+ for_each_node_by_name(hwclock, "i2c-hwclock") {
+ const char *loc = of_get_property(hwclock,
+ "hwctrl-location", NULL);
+ if (loc == NULL)
+ continue;
+ if (strcmp(loc, "CPU CLOCK"))
+ continue;
+ if (!of_get_property(hwclock, "platform-get-frequency", NULL))
+ continue;
+ break;
+ }
+ if (hwclock == NULL) {
+ printk(KERN_ERR "cpufreq: Can't find i2c clock chip !\n");
+ rc = -ENODEV;
+ goto bail;
+ }
+
+ DBG("cpufreq: i2c clock chip found: %s\n", hwclock->full_name);
+
+ /* Now get all the platform functions */
+ pfunc_cpu_getfreq =
+ pmf_find_function(hwclock, "get-frequency");
+ pfunc_cpu_setfreq_high =
+ pmf_find_function(hwclock, "set-frequency-high");
+ pfunc_cpu_setfreq_low =
+ pmf_find_function(hwclock, "set-frequency-low");
+ pfunc_slewing_done =
+ pmf_find_function(hwclock, "slewing-done");
+ pfunc_cpu0_volt_high =
+ pmf_find_function(hwclock, "set-voltage-high-0");
+ pfunc_cpu0_volt_low =
+ pmf_find_function(hwclock, "set-voltage-low-0");
+ pfunc_cpu1_volt_high =
+ pmf_find_function(hwclock, "set-voltage-high-1");
+ pfunc_cpu1_volt_low =
+ pmf_find_function(hwclock, "set-voltage-low-1");
+
+ /* Check we have minimum requirements */
+ if (pfunc_cpu_getfreq == NULL || pfunc_cpu_setfreq_high == NULL ||
+ pfunc_cpu_setfreq_low == NULL || pfunc_slewing_done == NULL) {
+ printk(KERN_ERR "cpufreq: Can't find platform functions !\n");
+ rc = -ENODEV;
+ goto bail;
+ }
+
+ /* Check that we have complete sets */
+ if (pfunc_cpu0_volt_high == NULL || pfunc_cpu0_volt_low == NULL) {
+ pmf_put_function(pfunc_cpu0_volt_high);
+ pmf_put_function(pfunc_cpu0_volt_low);
+ pfunc_cpu0_volt_high = pfunc_cpu0_volt_low = NULL;
+ has_volt = 0;
+ }
+ if (!has_volt ||
+ pfunc_cpu1_volt_high == NULL || pfunc_cpu1_volt_low == NULL) {
+ pmf_put_function(pfunc_cpu1_volt_high);
+ pmf_put_function(pfunc_cpu1_volt_low);
+ pfunc_cpu1_volt_high = pfunc_cpu1_volt_low = NULL;
+ }
+
+ /* Note: The device tree also contains a "platform-set-values"
+ * function for which I haven't quite figured out the usage. It
+ * might have to be called on init and/or wakeup, I'm not too sure
+ * but things seem to work fine without it so far ...
+ */
+
+ /* Get max frequency from device-tree */
+ valp = of_get_property(cpunode, "clock-frequency", NULL);
+ if (!valp) {
+ printk(KERN_ERR "cpufreq: Can't find CPU frequency !\n");
+ rc = -ENODEV;
+ goto bail;
+ }
+
+ max_freq = (*valp)/1000;
+
+ /* Now calculate reduced frequency by using the cpuid input freq
+ * ratio. This requires 64 bits math unless we are willing to lose
+ * some precision
+ */
+ ih = *((u32 *)(eeprom + 0x10));
+ il = *((u32 *)(eeprom + 0x20));
+
+ /* Check for machines with no useful settings */
+ if (il == ih) {
+ printk(KERN_WARNING "cpufreq: No low frequency mode available"
+ " on this model !\n");
+ rc = -ENODEV;
+ goto bail;
+ }
+
+ min_freq = 0;
+ if (ih != 0 && il != 0)
+ min_freq = (max_freq * il) / ih;
+
+ /* Sanity check */
+ if (min_freq >= max_freq || min_freq < 1000) {
+ printk(KERN_ERR "cpufreq: Can't calculate low frequency !\n");
+ rc = -ENXIO;
+ goto bail;
+ }
+ g5_cpu_freqs[0].frequency = max_freq;
+ g5_cpu_freqs[1].frequency = min_freq;
+
+ /* Based on a measurement on Xserve G5, rounded up. */
+ transition_latency = 10 * NSEC_PER_MSEC;
+
+ /* Set callbacks */
+ g5_switch_volt = g5_pfunc_switch_volt;
+ g5_switch_freq = g5_pfunc_switch_freq;
+ g5_query_freq = g5_pfunc_query_freq;
+
+ /* Force apply current frequency to make sure everything is in
+ * sync (voltage is right for example). Firmware may leave us with
+ * a strange setting ...
+ */
+ g5_switch_volt(CPUFREQ_HIGH);
+ msleep(10);
+ g5_pmode_cur = -1;
+ g5_switch_freq(g5_query_freq());
+
+ printk(KERN_INFO "Registering G5 CPU frequency driver\n");
+ printk(KERN_INFO "Frequency method: i2c/pfunc, "
+ "Voltage method: %s\n", has_volt ? "i2c/pfunc" : "none");
+ printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
+ g5_cpu_freqs[1].frequency/1000,
+ g5_cpu_freqs[0].frequency/1000,
+ g5_cpu_freqs[g5_pmode_cur].frequency/1000);
+
+ rc = cpufreq_register_driver(&g5_cpufreq_driver);
+ bail:
+ if (rc != 0) {
+ pmf_put_function(pfunc_cpu_getfreq);
+ pmf_put_function(pfunc_cpu_setfreq_high);
+ pmf_put_function(pfunc_cpu_setfreq_low);
+ pmf_put_function(pfunc_slewing_done);
+ pmf_put_function(pfunc_cpu0_volt_high);
+ pmf_put_function(pfunc_cpu0_volt_low);
+ pmf_put_function(pfunc_cpu1_volt_high);
+ pmf_put_function(pfunc_cpu1_volt_low);
+ }
+ of_node_put(hwclock);
+ of_node_put(cpuid);
+ of_node_put(cpunode);
+
+ return rc;
+}
+
+static int __init g5_cpufreq_init(void)
+{
+ struct device_node *cpunode;
+ int rc = 0;
+
+ /* Get first CPU node */
+ cpunode = of_cpu_device_node_get(0);
+ if (cpunode == NULL) {
+ pr_err("cpufreq: Can't find any CPU node\n");
+ return -ENODEV;
+ }
+
+ if (of_machine_is_compatible("PowerMac7,2") ||
+ of_machine_is_compatible("PowerMac7,3") ||
+ of_machine_is_compatible("RackMac3,1"))
+ rc = g5_pm72_cpufreq_init(cpunode);
+#ifdef CONFIG_PMAC_SMU
+ else
+ rc = g5_neo2_cpufreq_init(cpunode);
+#endif /* CONFIG_PMAC_SMU */
+
+ return rc;
+}
+
+module_init(g5_cpufreq_init);
+
+
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/cpufreq/powernow-k6.c b/kernel/drivers/cpufreq/powernow-k6.c
new file mode 100644
index 000000000..e6f24b281
--- /dev/null
+++ b/kernel/drivers/cpufreq/powernow-k6.c
@@ -0,0 +1,309 @@
+/*
+ * This file was based upon code in Powertweak Linux (http://powertweak.sf.net)
+ * (C) 2000-2003 Dave Jones, Arjan van de Ven, Janne Pänkälä,
+ * Dominik Brodowski.
+ *
+ * Licensed under the terms of the GNU GPL License version 2.
+ *
+ * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/ioport.h>
+#include <linux/timex.h>
+#include <linux/io.h>
+
+#include <asm/cpu_device_id.h>
+#include <asm/msr.h>
+
+#define POWERNOW_IOPORT 0xfff0 /* it doesn't matter where, as long
+ as it is unused */
+
+#define PFX "powernow-k6: "
+static unsigned int busfreq; /* FSB, in 10 kHz */
+static unsigned int max_multiplier;
+
+static unsigned int param_busfreq = 0;
+static unsigned int param_max_multiplier = 0;
+
+module_param_named(max_multiplier, param_max_multiplier, uint, S_IRUGO);
+MODULE_PARM_DESC(max_multiplier, "Maximum multiplier (allowed values: 20 30 35 40 45 50 55 60)");
+
+module_param_named(bus_frequency, param_busfreq, uint, S_IRUGO);
+MODULE_PARM_DESC(bus_frequency, "Bus frequency in kHz");
+
+/* Clock ratio multiplied by 10 - see table 27 in AMD#23446 */
+static struct cpufreq_frequency_table clock_ratio[] = {
+ {0, 60, /* 110 -> 6.0x */ 0},
+ {0, 55, /* 011 -> 5.5x */ 0},
+ {0, 50, /* 001 -> 5.0x */ 0},
+ {0, 45, /* 000 -> 4.5x */ 0},
+ {0, 40, /* 010 -> 4.0x */ 0},
+ {0, 35, /* 111 -> 3.5x */ 0},
+ {0, 30, /* 101 -> 3.0x */ 0},
+ {0, 20, /* 100 -> 2.0x */ 0},
+ {0, 0, CPUFREQ_TABLE_END}
+};
+
+static const u8 index_to_register[8] = { 6, 3, 1, 0, 2, 7, 5, 4 };
+static const u8 register_to_index[8] = { 3, 2, 4, 1, 7, 6, 0, 5 };
+
+static const struct {
+ unsigned freq;
+ unsigned mult;
+} usual_frequency_table[] = {
+ { 350000, 35 }, // 100 * 3.5
+ { 400000, 40 }, // 100 * 4
+ { 450000, 45 }, // 100 * 4.5
+ { 475000, 50 }, // 95 * 5
+ { 500000, 50 }, // 100 * 5
+ { 506250, 45 }, // 112.5 * 4.5
+ { 533500, 55 }, // 97 * 5.5
+ { 550000, 55 }, // 100 * 5.5
+ { 562500, 50 }, // 112.5 * 5
+ { 570000, 60 }, // 95 * 6
+ { 600000, 60 }, // 100 * 6
+ { 618750, 55 }, // 112.5 * 5.5
+ { 660000, 55 }, // 120 * 5.5
+ { 675000, 60 }, // 112.5 * 6
+ { 720000, 60 }, // 120 * 6
+};
+
+#define FREQ_RANGE 3000
+
+/**
+ * powernow_k6_get_cpu_multiplier - returns the current FSB multiplier
+ *
+ * Returns the current setting of the frequency multiplier. Core clock
+ * speed is frequency of the Front-Side Bus multiplied with this value.
+ */
+static int powernow_k6_get_cpu_multiplier(void)
+{
+ unsigned long invalue = 0;
+ u32 msrval;
+
+ local_irq_disable();
+
+ msrval = POWERNOW_IOPORT + 0x1;
+ wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
+ invalue = inl(POWERNOW_IOPORT + 0x8);
+ msrval = POWERNOW_IOPORT + 0x0;
+ wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
+
+ local_irq_enable();
+
+ return clock_ratio[register_to_index[(invalue >> 5)&7]].driver_data;
+}
+
+static void powernow_k6_set_cpu_multiplier(unsigned int best_i)
+{
+ unsigned long outvalue, invalue;
+ unsigned long msrval;
+ unsigned long cr0;
+
+ /* we now need to transform best_i to the BVC format, see AMD#23446 */
+
+ /*
+ * The processor doesn't respond to inquiry cycles while changing the
+ * frequency, so we must disable cache.
+ */
+ local_irq_disable();
+ cr0 = read_cr0();
+ write_cr0(cr0 | X86_CR0_CD);
+ wbinvd();
+
+ outvalue = (1<<12) | (1<<10) | (1<<9) | (index_to_register[best_i]<<5);
+
+ msrval = POWERNOW_IOPORT + 0x1;
+ wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
+ invalue = inl(POWERNOW_IOPORT + 0x8);
+ invalue = invalue & 0x1f;
+ outvalue = outvalue | invalue;
+ outl(outvalue, (POWERNOW_IOPORT + 0x8));
+ msrval = POWERNOW_IOPORT + 0x0;
+ wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
+
+ write_cr0(cr0);
+ local_irq_enable();
+}
+
+/**
+ * powernow_k6_target - set the PowerNow! multiplier
+ * @best_i: clock_ratio[best_i] is the target multiplier
+ *
+ * Tries to change the PowerNow! multiplier
+ */
+static int powernow_k6_target(struct cpufreq_policy *policy,
+ unsigned int best_i)
+{
+
+ if (clock_ratio[best_i].driver_data > max_multiplier) {
+ printk(KERN_ERR PFX "invalid target frequency\n");
+ return -EINVAL;
+ }
+
+ powernow_k6_set_cpu_multiplier(best_i);
+
+ return 0;
+}
+
+static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
+{
+ struct cpufreq_frequency_table *pos;
+ unsigned int i, f;
+ unsigned khz;
+
+ if (policy->cpu != 0)
+ return -ENODEV;
+
+ max_multiplier = 0;
+ khz = cpu_khz;
+ for (i = 0; i < ARRAY_SIZE(usual_frequency_table); i++) {
+ if (khz >= usual_frequency_table[i].freq - FREQ_RANGE &&
+ khz <= usual_frequency_table[i].freq + FREQ_RANGE) {
+ khz = usual_frequency_table[i].freq;
+ max_multiplier = usual_frequency_table[i].mult;
+ break;
+ }
+ }
+ if (param_max_multiplier) {
+ cpufreq_for_each_entry(pos, clock_ratio)
+ if (pos->driver_data == param_max_multiplier) {
+ max_multiplier = param_max_multiplier;
+ goto have_max_multiplier;
+ }
+ printk(KERN_ERR "powernow-k6: invalid max_multiplier parameter, valid parameters 20, 30, 35, 40, 45, 50, 55, 60\n");
+ return -EINVAL;
+ }
+
+ if (!max_multiplier) {
+ printk(KERN_WARNING "powernow-k6: unknown frequency %u, cannot determine current multiplier\n", khz);
+ printk(KERN_WARNING "powernow-k6: use module parameters max_multiplier and bus_frequency\n");
+ return -EOPNOTSUPP;
+ }
+
+have_max_multiplier:
+ param_max_multiplier = max_multiplier;
+
+ if (param_busfreq) {
+ if (param_busfreq >= 50000 && param_busfreq <= 150000) {
+ busfreq = param_busfreq / 10;
+ goto have_busfreq;
+ }
+ printk(KERN_ERR "powernow-k6: invalid bus_frequency parameter, allowed range 50000 - 150000 kHz\n");
+ return -EINVAL;
+ }
+
+ busfreq = khz / max_multiplier;
+have_busfreq:
+ param_busfreq = busfreq * 10;
+
+ /* table init */
+ cpufreq_for_each_entry(pos, clock_ratio) {
+ f = pos->driver_data;
+ if (f > max_multiplier)
+ pos->frequency = CPUFREQ_ENTRY_INVALID;
+ else
+ pos->frequency = busfreq * f;
+ }
+
+ /* cpuinfo and default policy values */
+ policy->cpuinfo.transition_latency = 500000;
+
+ return cpufreq_table_validate_and_show(policy, clock_ratio);
+}
+
+
+static int powernow_k6_cpu_exit(struct cpufreq_policy *policy)
+{
+ unsigned int i;
+
+ for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
+ if (clock_ratio[i].driver_data == max_multiplier) {
+ struct cpufreq_freqs freqs;
+
+ freqs.old = policy->cur;
+ freqs.new = clock_ratio[i].frequency;
+ freqs.flags = 0;
+
+ cpufreq_freq_transition_begin(policy, &freqs);
+ powernow_k6_target(policy, i);
+ cpufreq_freq_transition_end(policy, &freqs, 0);
+ break;
+ }
+ }
+ return 0;
+}
+
+static unsigned int powernow_k6_get(unsigned int cpu)
+{
+ unsigned int ret;
+ ret = (busfreq * powernow_k6_get_cpu_multiplier());
+ return ret;
+}
+
+static struct cpufreq_driver powernow_k6_driver = {
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = powernow_k6_target,
+ .init = powernow_k6_cpu_init,
+ .exit = powernow_k6_cpu_exit,
+ .get = powernow_k6_get,
+ .name = "powernow-k6",
+ .attr = cpufreq_generic_attr,
+};
+
+static const struct x86_cpu_id powernow_k6_ids[] = {
+ { X86_VENDOR_AMD, 5, 12 },
+ { X86_VENDOR_AMD, 5, 13 },
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, powernow_k6_ids);
+
+/**
+ * powernow_k6_init - initializes the k6 PowerNow! CPUFreq driver
+ *
+ * Initializes the K6 PowerNow! support. Returns -ENODEV on unsupported
+ * devices, -EINVAL or -ENOMEM on problems during initiatization, and zero
+ * on success.
+ */
+static int __init powernow_k6_init(void)
+{
+ if (!x86_match_cpu(powernow_k6_ids))
+ return -ENODEV;
+
+ if (!request_region(POWERNOW_IOPORT, 16, "PowerNow!")) {
+ printk(KERN_INFO PFX "PowerNow IOPORT region already used.\n");
+ return -EIO;
+ }
+
+ if (cpufreq_register_driver(&powernow_k6_driver)) {
+ release_region(POWERNOW_IOPORT, 16);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+/**
+ * powernow_k6_exit - unregisters AMD K6-2+/3+ PowerNow! support
+ *
+ * Unregisters AMD K6-2+ / K6-3+ PowerNow! support.
+ */
+static void __exit powernow_k6_exit(void)
+{
+ cpufreq_unregister_driver(&powernow_k6_driver);
+ release_region(POWERNOW_IOPORT, 16);
+}
+
+
+MODULE_AUTHOR("Arjan van de Ven, Dave Jones, "
+ "Dominik Brodowski <linux@brodo.de>");
+MODULE_DESCRIPTION("PowerNow! driver for AMD K6-2+ / K6-3+ processors.");
+MODULE_LICENSE("GPL");
+
+module_init(powernow_k6_init);
+module_exit(powernow_k6_exit);
diff --git a/kernel/drivers/cpufreq/powernow-k7.c b/kernel/drivers/cpufreq/powernow-k7.c
new file mode 100644
index 000000000..37c574248
--- /dev/null
+++ b/kernel/drivers/cpufreq/powernow-k7.c
@@ -0,0 +1,709 @@
+/*
+ * AMD K7 Powernow driver.
+ * (C) 2003 Dave Jones on behalf of SuSE Labs.
+ *
+ * Licensed under the terms of the GNU GPL License version 2.
+ * Based upon datasheets & sample CPUs kindly provided by AMD.
+ *
+ * Errata 5:
+ * CPU may fail to execute a FID/VID change in presence of interrupt.
+ * - We cli/sti on stepping A0 CPUs around the FID/VID transition.
+ * Errata 15:
+ * CPU with half frequency multipliers may hang upon wakeup from disconnect.
+ * - We disable half multipliers if ACPI is used on A0 stepping CPUs.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/dmi.h>
+#include <linux/timex.h>
+#include <linux/io.h>
+
+#include <asm/timer.h> /* Needed for recalibrate_cpu_khz() */
+#include <asm/msr.h>
+#include <asm/cpu_device_id.h>
+
+#ifdef CONFIG_X86_POWERNOW_K7_ACPI
+#include <linux/acpi.h>
+#include <acpi/processor.h>
+#endif
+
+#include "powernow-k7.h"
+
+#define PFX "powernow: "
+
+
+struct psb_s {
+ u8 signature[10];
+ u8 tableversion;
+ u8 flags;
+ u16 settlingtime;
+ u8 reserved1;
+ u8 numpst;
+};
+
+struct pst_s {
+ u32 cpuid;
+ u8 fsbspeed;
+ u8 maxfid;
+ u8 startvid;
+ u8 numpstates;
+};
+
+#ifdef CONFIG_X86_POWERNOW_K7_ACPI
+union powernow_acpi_control_t {
+ struct {
+ unsigned long fid:5,
+ vid:5,
+ sgtc:20,
+ res1:2;
+ } bits;
+ unsigned long val;
+};
+#endif
+
+/* divide by 1000 to get VCore voltage in V. */
+static const int mobile_vid_table[32] = {
+ 2000, 1950, 1900, 1850, 1800, 1750, 1700, 1650,
+ 1600, 1550, 1500, 1450, 1400, 1350, 1300, 0,
+ 1275, 1250, 1225, 1200, 1175, 1150, 1125, 1100,
+ 1075, 1050, 1025, 1000, 975, 950, 925, 0,
+};
+
+/* divide by 10 to get FID. */
+static const int fid_codes[32] = {
+ 110, 115, 120, 125, 50, 55, 60, 65,
+ 70, 75, 80, 85, 90, 95, 100, 105,
+ 30, 190, 40, 200, 130, 135, 140, 210,
+ 150, 225, 160, 165, 170, 180, -1, -1,
+};
+
+/* This parameter is used in order to force ACPI instead of legacy method for
+ * configuration purpose.
+ */
+
+static int acpi_force;
+
+static struct cpufreq_frequency_table *powernow_table;
+
+static unsigned int can_scale_bus;
+static unsigned int can_scale_vid;
+static unsigned int minimum_speed = -1;
+static unsigned int maximum_speed;
+static unsigned int number_scales;
+static unsigned int fsb;
+static unsigned int latency;
+static char have_a0;
+
+static int check_fsb(unsigned int fsbspeed)
+{
+ int delta;
+ unsigned int f = fsb / 1000;
+
+ delta = (fsbspeed > f) ? fsbspeed - f : f - fsbspeed;
+ return delta < 5;
+}
+
+static const struct x86_cpu_id powernow_k7_cpuids[] = {
+ { X86_VENDOR_AMD, 6, },
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, powernow_k7_cpuids);
+
+static int check_powernow(void)
+{
+ struct cpuinfo_x86 *c = &cpu_data(0);
+ unsigned int maxei, eax, ebx, ecx, edx;
+
+ if (!x86_match_cpu(powernow_k7_cpuids))
+ return 0;
+
+ /* Get maximum capabilities */
+ maxei = cpuid_eax(0x80000000);
+ if (maxei < 0x80000007) { /* Any powernow info ? */
+#ifdef MODULE
+ printk(KERN_INFO PFX "No powernow capabilities detected\n");
+#endif
+ return 0;
+ }
+
+ if ((c->x86_model == 6) && (c->x86_mask == 0)) {
+ printk(KERN_INFO PFX "K7 660[A0] core detected, "
+ "enabling errata workarounds\n");
+ have_a0 = 1;
+ }
+
+ cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
+
+ /* Check we can actually do something before we say anything.*/
+ if (!(edx & (1 << 1 | 1 << 2)))
+ return 0;
+
+ printk(KERN_INFO PFX "PowerNOW! Technology present. Can scale: ");
+
+ if (edx & 1 << 1) {
+ printk("frequency");
+ can_scale_bus = 1;
+ }
+
+ if ((edx & (1 << 1 | 1 << 2)) == 0x6)
+ printk(" and ");
+
+ if (edx & 1 << 2) {
+ printk("voltage");
+ can_scale_vid = 1;
+ }
+
+ printk(".\n");
+ return 1;
+}
+
+#ifdef CONFIG_X86_POWERNOW_K7_ACPI
+static void invalidate_entry(unsigned int entry)
+{
+ powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID;
+}
+#endif
+
+static int get_ranges(unsigned char *pst)
+{
+ unsigned int j;
+ unsigned int speed;
+ u8 fid, vid;
+
+ powernow_table = kzalloc((sizeof(*powernow_table) *
+ (number_scales + 1)), GFP_KERNEL);
+ if (!powernow_table)
+ return -ENOMEM;
+
+ for (j = 0 ; j < number_scales; j++) {
+ fid = *pst++;
+
+ powernow_table[j].frequency = (fsb * fid_codes[fid]) / 10;
+ powernow_table[j].driver_data = fid; /* lower 8 bits */
+
+ speed = powernow_table[j].frequency;
+
+ if ((fid_codes[fid] % 10) == 5) {
+#ifdef CONFIG_X86_POWERNOW_K7_ACPI
+ if (have_a0 == 1)
+ invalidate_entry(j);
+#endif
+ }
+
+ if (speed < minimum_speed)
+ minimum_speed = speed;
+ if (speed > maximum_speed)
+ maximum_speed = speed;
+
+ vid = *pst++;
+ powernow_table[j].driver_data |= (vid << 8); /* upper 8 bits */
+
+ pr_debug(" FID: 0x%x (%d.%dx [%dMHz]) "
+ "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10,
+ fid_codes[fid] % 10, speed/1000, vid,
+ mobile_vid_table[vid]/1000,
+ mobile_vid_table[vid]%1000);
+ }
+ powernow_table[number_scales].frequency = CPUFREQ_TABLE_END;
+ powernow_table[number_scales].driver_data = 0;
+
+ return 0;
+}
+
+
+static void change_FID(int fid)
+{
+ union msr_fidvidctl fidvidctl;
+
+ rdmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
+ if (fidvidctl.bits.FID != fid) {
+ fidvidctl.bits.SGTC = latency;
+ fidvidctl.bits.FID = fid;
+ fidvidctl.bits.VIDC = 0;
+ fidvidctl.bits.FIDC = 1;
+ wrmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
+ }
+}
+
+
+static void change_VID(int vid)
+{
+ union msr_fidvidctl fidvidctl;
+
+ rdmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
+ if (fidvidctl.bits.VID != vid) {
+ fidvidctl.bits.SGTC = latency;
+ fidvidctl.bits.VID = vid;
+ fidvidctl.bits.FIDC = 0;
+ fidvidctl.bits.VIDC = 1;
+ wrmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val);
+ }
+}
+
+
+static int powernow_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ u8 fid, vid;
+ struct cpufreq_freqs freqs;
+ union msr_fidvidstatus fidvidstatus;
+ int cfid;
+
+ /* fid are the lower 8 bits of the index we stored into
+ * the cpufreq frequency table in powernow_decode_bios,
+ * vid are the upper 8 bits.
+ */
+
+ fid = powernow_table[index].driver_data & 0xFF;
+ vid = (powernow_table[index].driver_data & 0xFF00) >> 8;
+
+ rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
+ cfid = fidvidstatus.bits.CFID;
+ freqs.old = fsb * fid_codes[cfid] / 10;
+
+ freqs.new = powernow_table[index].frequency;
+
+ /* Now do the magic poking into the MSRs. */
+
+ if (have_a0 == 1) /* A0 errata 5 */
+ local_irq_disable();
+
+ if (freqs.old > freqs.new) {
+ /* Going down, so change FID first */
+ change_FID(fid);
+ change_VID(vid);
+ } else {
+ /* Going up, so change VID first */
+ change_VID(vid);
+ change_FID(fid);
+ }
+
+
+ if (have_a0 == 1)
+ local_irq_enable();
+
+ return 0;
+}
+
+
+#ifdef CONFIG_X86_POWERNOW_K7_ACPI
+
+static struct acpi_processor_performance *acpi_processor_perf;
+
+static int powernow_acpi_init(void)
+{
+ int i;
+ int retval = 0;
+ union powernow_acpi_control_t pc;
+
+ if (acpi_processor_perf != NULL && powernow_table != NULL) {
+ retval = -EINVAL;
+ goto err0;
+ }
+
+ acpi_processor_perf = kzalloc(sizeof(*acpi_processor_perf), GFP_KERNEL);
+ if (!acpi_processor_perf) {
+ retval = -ENOMEM;
+ goto err0;
+ }
+
+ if (!zalloc_cpumask_var(&acpi_processor_perf->shared_cpu_map,
+ GFP_KERNEL)) {
+ retval = -ENOMEM;
+ goto err05;
+ }
+
+ if (acpi_processor_register_performance(acpi_processor_perf, 0)) {
+ retval = -EIO;
+ goto err1;
+ }
+
+ if (acpi_processor_perf->control_register.space_id !=
+ ACPI_ADR_SPACE_FIXED_HARDWARE) {
+ retval = -ENODEV;
+ goto err2;
+ }
+
+ if (acpi_processor_perf->status_register.space_id !=
+ ACPI_ADR_SPACE_FIXED_HARDWARE) {
+ retval = -ENODEV;
+ goto err2;
+ }
+
+ number_scales = acpi_processor_perf->state_count;
+
+ if (number_scales < 2) {
+ retval = -ENODEV;
+ goto err2;
+ }
+
+ powernow_table = kzalloc((sizeof(*powernow_table) *
+ (number_scales + 1)), GFP_KERNEL);
+ if (!powernow_table) {
+ retval = -ENOMEM;
+ goto err2;
+ }
+
+ pc.val = (unsigned long) acpi_processor_perf->states[0].control;
+ for (i = 0; i < number_scales; i++) {
+ u8 fid, vid;
+ struct acpi_processor_px *state =
+ &acpi_processor_perf->states[i];
+ unsigned int speed, speed_mhz;
+
+ pc.val = (unsigned long) state->control;
+ pr_debug("acpi: P%d: %d MHz %d mW %d uS control %08x SGTC %d\n",
+ i,
+ (u32) state->core_frequency,
+ (u32) state->power,
+ (u32) state->transition_latency,
+ (u32) state->control,
+ pc.bits.sgtc);
+
+ vid = pc.bits.vid;
+ fid = pc.bits.fid;
+
+ powernow_table[i].frequency = fsb * fid_codes[fid] / 10;
+ powernow_table[i].driver_data = fid; /* lower 8 bits */
+ powernow_table[i].driver_data |= (vid << 8); /* upper 8 bits */
+
+ speed = powernow_table[i].frequency;
+ speed_mhz = speed / 1000;
+
+ /* processor_perflib will multiply the MHz value by 1000 to
+ * get a KHz value (e.g. 1266000). However, powernow-k7 works
+ * with true KHz values (e.g. 1266768). To ensure that all
+ * powernow frequencies are available, we must ensure that
+ * ACPI doesn't restrict them, so we round up the MHz value
+ * to ensure that perflib's computed KHz value is greater than
+ * or equal to powernow's KHz value.
+ */
+ if (speed % 1000 > 0)
+ speed_mhz++;
+
+ if ((fid_codes[fid] % 10) == 5) {
+ if (have_a0 == 1)
+ invalidate_entry(i);
+ }
+
+ pr_debug(" FID: 0x%x (%d.%dx [%dMHz]) "
+ "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10,
+ fid_codes[fid] % 10, speed_mhz, vid,
+ mobile_vid_table[vid]/1000,
+ mobile_vid_table[vid]%1000);
+
+ if (state->core_frequency != speed_mhz) {
+ state->core_frequency = speed_mhz;
+ pr_debug(" Corrected ACPI frequency to %d\n",
+ speed_mhz);
+ }
+
+ if (latency < pc.bits.sgtc)
+ latency = pc.bits.sgtc;
+
+ if (speed < minimum_speed)
+ minimum_speed = speed;
+ if (speed > maximum_speed)
+ maximum_speed = speed;
+ }
+
+ powernow_table[i].frequency = CPUFREQ_TABLE_END;
+ powernow_table[i].driver_data = 0;
+
+ /* notify BIOS that we exist */
+ acpi_processor_notify_smm(THIS_MODULE);
+
+ return 0;
+
+err2:
+ acpi_processor_unregister_performance(acpi_processor_perf, 0);
+err1:
+ free_cpumask_var(acpi_processor_perf->shared_cpu_map);
+err05:
+ kfree(acpi_processor_perf);
+err0:
+ printk(KERN_WARNING PFX "ACPI perflib can not be used on "
+ "this platform\n");
+ acpi_processor_perf = NULL;
+ return retval;
+}
+#else
+static int powernow_acpi_init(void)
+{
+ printk(KERN_INFO PFX "no support for ACPI processor found."
+ " Please recompile your kernel with ACPI processor\n");
+ return -EINVAL;
+}
+#endif
+
+static void print_pst_entry(struct pst_s *pst, unsigned int j)
+{
+ pr_debug("PST:%d (@%p)\n", j, pst);
+ pr_debug(" cpuid: 0x%x fsb: %d maxFID: 0x%x startvid: 0x%x\n",
+ pst->cpuid, pst->fsbspeed, pst->maxfid, pst->startvid);
+}
+
+static int powernow_decode_bios(int maxfid, int startvid)
+{
+ struct psb_s *psb;
+ struct pst_s *pst;
+ unsigned int i, j;
+ unsigned char *p;
+ unsigned int etuple;
+ unsigned int ret;
+
+ etuple = cpuid_eax(0x80000001);
+
+ for (i = 0xC0000; i < 0xffff0 ; i += 16) {
+
+ p = phys_to_virt(i);
+
+ if (memcmp(p, "AMDK7PNOW!", 10) == 0) {
+ pr_debug("Found PSB header at %p\n", p);
+ psb = (struct psb_s *) p;
+ pr_debug("Table version: 0x%x\n", psb->tableversion);
+ if (psb->tableversion != 0x12) {
+ printk(KERN_INFO PFX "Sorry, only v1.2 tables"
+ " supported right now\n");
+ return -ENODEV;
+ }
+
+ pr_debug("Flags: 0x%x\n", psb->flags);
+ if ((psb->flags & 1) == 0)
+ pr_debug("Mobile voltage regulator\n");
+ else
+ pr_debug("Desktop voltage regulator\n");
+
+ latency = psb->settlingtime;
+ if (latency < 100) {
+ printk(KERN_INFO PFX "BIOS set settling time "
+ "to %d microseconds. "
+ "Should be at least 100. "
+ "Correcting.\n", latency);
+ latency = 100;
+ }
+ pr_debug("Settling Time: %d microseconds.\n",
+ psb->settlingtime);
+ pr_debug("Has %d PST tables. (Only dumping ones "
+ "relevant to this CPU).\n",
+ psb->numpst);
+
+ p += sizeof(*psb);
+
+ pst = (struct pst_s *) p;
+
+ for (j = 0; j < psb->numpst; j++) {
+ pst = (struct pst_s *) p;
+ number_scales = pst->numpstates;
+
+ if ((etuple == pst->cpuid) &&
+ check_fsb(pst->fsbspeed) &&
+ (maxfid == pst->maxfid) &&
+ (startvid == pst->startvid)) {
+ print_pst_entry(pst, j);
+ p = (char *)pst + sizeof(*pst);
+ ret = get_ranges(p);
+ return ret;
+ } else {
+ unsigned int k;
+ p = (char *)pst + sizeof(*pst);
+ for (k = 0; k < number_scales; k++)
+ p += 2;
+ }
+ }
+ printk(KERN_INFO PFX "No PST tables match this cpuid "
+ "(0x%x)\n", etuple);
+ printk(KERN_INFO PFX "This is indicative of a broken "
+ "BIOS.\n");
+
+ return -EINVAL;
+ }
+ p++;
+ }
+
+ return -ENODEV;
+}
+
+
+/*
+ * We use the fact that the bus frequency is somehow
+ * a multiple of 100000/3 khz, then we compute sgtc according
+ * to this multiple.
+ * That way, we match more how AMD thinks all of that work.
+ * We will then get the same kind of behaviour already tested under
+ * the "well-known" other OS.
+ */
+static int fixup_sgtc(void)
+{
+ unsigned int sgtc;
+ unsigned int m;
+
+ m = fsb / 3333;
+ if ((m % 10) >= 5)
+ m += 5;
+
+ m /= 10;
+
+ sgtc = 100 * m * latency;
+ sgtc = sgtc / 3;
+ if (sgtc > 0xfffff) {
+ printk(KERN_WARNING PFX "SGTC too large %d\n", sgtc);
+ sgtc = 0xfffff;
+ }
+ return sgtc;
+}
+
+static unsigned int powernow_get(unsigned int cpu)
+{
+ union msr_fidvidstatus fidvidstatus;
+ unsigned int cfid;
+
+ if (cpu)
+ return 0;
+ rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
+ cfid = fidvidstatus.bits.CFID;
+
+ return fsb * fid_codes[cfid] / 10;
+}
+
+
+static int acer_cpufreq_pst(const struct dmi_system_id *d)
+{
+ printk(KERN_WARNING PFX
+ "%s laptop with broken PST tables in BIOS detected.\n",
+ d->ident);
+ printk(KERN_WARNING PFX
+ "You need to downgrade to 3A21 (09/09/2002), or try a newer "
+ "BIOS than 3A71 (01/20/2003)\n");
+ printk(KERN_WARNING PFX
+ "cpufreq scaling has been disabled as a result of this.\n");
+ return 0;
+}
+
+/*
+ * Some Athlon laptops have really fucked PST tables.
+ * A BIOS update is all that can save them.
+ * Mention this, and disable cpufreq.
+ */
+static struct dmi_system_id powernow_dmi_table[] = {
+ {
+ .callback = acer_cpufreq_pst,
+ .ident = "Acer Aspire",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde Software"),
+ DMI_MATCH(DMI_BIOS_VERSION, "3A71"),
+ },
+ },
+ { }
+};
+
+static int powernow_cpu_init(struct cpufreq_policy *policy)
+{
+ union msr_fidvidstatus fidvidstatus;
+ int result;
+
+ if (policy->cpu != 0)
+ return -ENODEV;
+
+ rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
+
+ recalibrate_cpu_khz();
+
+ fsb = (10 * cpu_khz) / fid_codes[fidvidstatus.bits.CFID];
+ if (!fsb) {
+ printk(KERN_WARNING PFX "can not determine bus frequency\n");
+ return -EINVAL;
+ }
+ pr_debug("FSB: %3dMHz\n", fsb/1000);
+
+ if (dmi_check_system(powernow_dmi_table) || acpi_force) {
+ printk(KERN_INFO PFX "PSB/PST known to be broken. "
+ "Trying ACPI instead\n");
+ result = powernow_acpi_init();
+ } else {
+ result = powernow_decode_bios(fidvidstatus.bits.MFID,
+ fidvidstatus.bits.SVID);
+ if (result) {
+ printk(KERN_INFO PFX "Trying ACPI perflib\n");
+ maximum_speed = 0;
+ minimum_speed = -1;
+ latency = 0;
+ result = powernow_acpi_init();
+ if (result) {
+ printk(KERN_INFO PFX
+ "ACPI and legacy methods failed\n");
+ }
+ } else {
+ /* SGTC use the bus clock as timer */
+ latency = fixup_sgtc();
+ printk(KERN_INFO PFX "SGTC: %d\n", latency);
+ }
+ }
+
+ if (result)
+ return result;
+
+ printk(KERN_INFO PFX "Minimum speed %d MHz. Maximum speed %d MHz.\n",
+ minimum_speed/1000, maximum_speed/1000);
+
+ policy->cpuinfo.transition_latency =
+ cpufreq_scale(2000000UL, fsb, latency);
+
+ return cpufreq_table_validate_and_show(policy, powernow_table);
+}
+
+static int powernow_cpu_exit(struct cpufreq_policy *policy)
+{
+#ifdef CONFIG_X86_POWERNOW_K7_ACPI
+ if (acpi_processor_perf) {
+ acpi_processor_unregister_performance(acpi_processor_perf, 0);
+ free_cpumask_var(acpi_processor_perf->shared_cpu_map);
+ kfree(acpi_processor_perf);
+ }
+#endif
+
+ kfree(powernow_table);
+ return 0;
+}
+
+static struct cpufreq_driver powernow_driver = {
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = powernow_target,
+ .get = powernow_get,
+#ifdef CONFIG_X86_POWERNOW_K7_ACPI
+ .bios_limit = acpi_processor_get_bios_limit,
+#endif
+ .init = powernow_cpu_init,
+ .exit = powernow_cpu_exit,
+ .name = "powernow-k7",
+ .attr = cpufreq_generic_attr,
+};
+
+static int __init powernow_init(void)
+{
+ if (check_powernow() == 0)
+ return -ENODEV;
+ return cpufreq_register_driver(&powernow_driver);
+}
+
+
+static void __exit powernow_exit(void)
+{
+ cpufreq_unregister_driver(&powernow_driver);
+}
+
+module_param(acpi_force, int, 0444);
+MODULE_PARM_DESC(acpi_force, "Force ACPI to be used.");
+
+MODULE_AUTHOR("Dave Jones");
+MODULE_DESCRIPTION("Powernow driver for AMD K7 processors.");
+MODULE_LICENSE("GPL");
+
+late_initcall(powernow_init);
+module_exit(powernow_exit);
+
diff --git a/kernel/drivers/cpufreq/powernow-k7.h b/kernel/drivers/cpufreq/powernow-k7.h
new file mode 100644
index 000000000..35fb4eaf6
--- /dev/null
+++ b/kernel/drivers/cpufreq/powernow-k7.h
@@ -0,0 +1,43 @@
+/*
+ * (C) 2003 Dave Jones.
+ *
+ * Licensed under the terms of the GNU GPL License version 2.
+ *
+ * AMD-specific information
+ *
+ */
+
+union msr_fidvidctl {
+ struct {
+ unsigned FID:5, // 4:0
+ reserved1:3, // 7:5
+ VID:5, // 12:8
+ reserved2:3, // 15:13
+ FIDC:1, // 16
+ VIDC:1, // 17
+ reserved3:2, // 19:18
+ FIDCHGRATIO:1, // 20
+ reserved4:11, // 31-21
+ SGTC:20, // 32:51
+ reserved5:12; // 63:52
+ } bits;
+ unsigned long long val;
+};
+
+union msr_fidvidstatus {
+ struct {
+ unsigned CFID:5, // 4:0
+ reserved1:3, // 7:5
+ SFID:5, // 12:8
+ reserved2:3, // 15:13
+ MFID:5, // 20:16
+ reserved3:11, // 31:21
+ CVID:5, // 36:32
+ reserved4:3, // 39:37
+ SVID:5, // 44:40
+ reserved5:3, // 47:45
+ MVID:5, // 52:48
+ reserved6:11; // 63:53
+ } bits;
+ unsigned long long val;
+};
diff --git a/kernel/drivers/cpufreq/powernow-k8.c b/kernel/drivers/cpufreq/powernow-k8.c
new file mode 100644
index 000000000..f9ce7e4bf
--- /dev/null
+++ b/kernel/drivers/cpufreq/powernow-k8.c
@@ -0,0 +1,1249 @@
+/*
+ * (c) 2003-2012 Advanced Micro Devices, Inc.
+ * Your use of this code is subject to the terms and conditions of the
+ * GNU general public license version 2. See "COPYING" or
+ * http://www.gnu.org/licenses/gpl.html
+ *
+ * Maintainer:
+ * Andreas Herrmann <herrmann.der.user@googlemail.com>
+ *
+ * Based on the powernow-k7.c module written by Dave Jones.
+ * (C) 2003 Dave Jones on behalf of SuSE Labs
+ * (C) 2004 Dominik Brodowski <linux@brodo.de>
+ * (C) 2004 Pavel Machek <pavel@ucw.cz>
+ * Licensed under the terms of the GNU GPL License version 2.
+ * Based upon datasheets & sample CPUs kindly provided by AMD.
+ *
+ * Valuable input gratefully received from Dave Jones, Pavel Machek,
+ * Dominik Brodowski, Jacob Shin, and others.
+ * Originally developed by Paul Devriendt.
+ *
+ * Processor information obtained from Chapter 9 (Power and Thermal
+ * Management) of the "BIOS and Kernel Developer's Guide (BKDG) for
+ * the AMD Athlon 64 and AMD Opteron Processors" and section "2.x
+ * Power Management" in BKDGs for newer AMD CPU families.
+ *
+ * Tables for specific CPUs can be inferred from AMD's processor
+ * power and thermal data sheets, (e.g. 30417.pdf, 30430.pdf, 43375.pdf)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/smp.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/cpumask.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+
+#include <asm/msr.h>
+#include <asm/cpu_device_id.h>
+
+#include <linux/acpi.h>
+#include <linux/mutex.h>
+#include <acpi/processor.h>
+
+#define VERSION "version 2.20.00"
+#include "powernow-k8.h"
+
+/* serialize freq changes */
+static DEFINE_MUTEX(fidvid_mutex);
+
+static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data);
+
+static struct cpufreq_driver cpufreq_amd64_driver;
+
+#ifndef CONFIG_SMP
+static inline const struct cpumask *cpu_core_mask(int cpu)
+{
+ return cpumask_of(0);
+}
+#endif
+
+/* Return a frequency in MHz, given an input fid */
+static u32 find_freq_from_fid(u32 fid)
+{
+ return 800 + (fid * 100);
+}
+
+/* Return a frequency in KHz, given an input fid */
+static u32 find_khz_freq_from_fid(u32 fid)
+{
+ return 1000 * find_freq_from_fid(fid);
+}
+
+/* Return the vco fid for an input fid
+ *
+ * Each "low" fid has corresponding "high" fid, and you can get to "low" fids
+ * only from corresponding high fids. This returns "high" fid corresponding to
+ * "low" one.
+ */
+static u32 convert_fid_to_vco_fid(u32 fid)
+{
+ if (fid < HI_FID_TABLE_BOTTOM)
+ return 8 + (2 * fid);
+ else
+ return fid;
+}
+
+/*
+ * Return 1 if the pending bit is set. Unless we just instructed the processor
+ * to transition to a new state, seeing this bit set is really bad news.
+ */
+static int pending_bit_stuck(void)
+{
+ u32 lo, hi;
+
+ rdmsr(MSR_FIDVID_STATUS, lo, hi);
+ return lo & MSR_S_LO_CHANGE_PENDING ? 1 : 0;
+}
+
+/*
+ * Update the global current fid / vid values from the status msr.
+ * Returns 1 on error.
+ */
+static int query_current_values_with_pending_wait(struct powernow_k8_data *data)
+{
+ u32 lo, hi;
+ u32 i = 0;
+
+ do {
+ if (i++ > 10000) {
+ pr_debug("detected change pending stuck\n");
+ return 1;
+ }
+ rdmsr(MSR_FIDVID_STATUS, lo, hi);
+ } while (lo & MSR_S_LO_CHANGE_PENDING);
+
+ data->currvid = hi & MSR_S_HI_CURRENT_VID;
+ data->currfid = lo & MSR_S_LO_CURRENT_FID;
+
+ return 0;
+}
+
+/* the isochronous relief time */
+static void count_off_irt(struct powernow_k8_data *data)
+{
+ udelay((1 << data->irt) * 10);
+ return;
+}
+
+/* the voltage stabilization time */
+static void count_off_vst(struct powernow_k8_data *data)
+{
+ udelay(data->vstable * VST_UNITS_20US);
+ return;
+}
+
+/* need to init the control msr to a safe value (for each cpu) */
+static void fidvid_msr_init(void)
+{
+ u32 lo, hi;
+ u8 fid, vid;
+
+ rdmsr(MSR_FIDVID_STATUS, lo, hi);
+ vid = hi & MSR_S_HI_CURRENT_VID;
+ fid = lo & MSR_S_LO_CURRENT_FID;
+ lo = fid | (vid << MSR_C_LO_VID_SHIFT);
+ hi = MSR_C_HI_STP_GNT_BENIGN;
+ pr_debug("cpu%d, init lo 0x%x, hi 0x%x\n", smp_processor_id(), lo, hi);
+ wrmsr(MSR_FIDVID_CTL, lo, hi);
+}
+
+/* write the new fid value along with the other control fields to the msr */
+static int write_new_fid(struct powernow_k8_data *data, u32 fid)
+{
+ u32 lo;
+ u32 savevid = data->currvid;
+ u32 i = 0;
+
+ if ((fid & INVALID_FID_MASK) || (data->currvid & INVALID_VID_MASK)) {
+ pr_err("internal error - overflow on fid write\n");
+ return 1;
+ }
+
+ lo = fid;
+ lo |= (data->currvid << MSR_C_LO_VID_SHIFT);
+ lo |= MSR_C_LO_INIT_FID_VID;
+
+ pr_debug("writing fid 0x%x, lo 0x%x, hi 0x%x\n",
+ fid, lo, data->plllock * PLL_LOCK_CONVERSION);
+
+ do {
+ wrmsr(MSR_FIDVID_CTL, lo, data->plllock * PLL_LOCK_CONVERSION);
+ if (i++ > 100) {
+ pr_err("Hardware error - pending bit very stuck - no further pstate changes possible\n");
+ return 1;
+ }
+ } while (query_current_values_with_pending_wait(data));
+
+ count_off_irt(data);
+
+ if (savevid != data->currvid) {
+ pr_err("vid change on fid trans, old 0x%x, new 0x%x\n",
+ savevid, data->currvid);
+ return 1;
+ }
+
+ if (fid != data->currfid) {
+ pr_err("fid trans failed, fid 0x%x, curr 0x%x\n", fid,
+ data->currfid);
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Write a new vid to the hardware */
+static int write_new_vid(struct powernow_k8_data *data, u32 vid)
+{
+ u32 lo;
+ u32 savefid = data->currfid;
+ int i = 0;
+
+ if ((data->currfid & INVALID_FID_MASK) || (vid & INVALID_VID_MASK)) {
+ pr_err("internal error - overflow on vid write\n");
+ return 1;
+ }
+
+ lo = data->currfid;
+ lo |= (vid << MSR_C_LO_VID_SHIFT);
+ lo |= MSR_C_LO_INIT_FID_VID;
+
+ pr_debug("writing vid 0x%x, lo 0x%x, hi 0x%x\n",
+ vid, lo, STOP_GRANT_5NS);
+
+ do {
+ wrmsr(MSR_FIDVID_CTL, lo, STOP_GRANT_5NS);
+ if (i++ > 100) {
+ pr_err("internal error - pending bit very stuck - no further pstate changes possible\n");
+ return 1;
+ }
+ } while (query_current_values_with_pending_wait(data));
+
+ if (savefid != data->currfid) {
+ pr_err("fid changed on vid trans, old 0x%x new 0x%x\n",
+ savefid, data->currfid);
+ return 1;
+ }
+
+ if (vid != data->currvid) {
+ pr_err("vid trans failed, vid 0x%x, curr 0x%x\n",
+ vid, data->currvid);
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * Reduce the vid by the max of step or reqvid.
+ * Decreasing vid codes represent increasing voltages:
+ * vid of 0 is 1.550V, vid of 0x1e is 0.800V, vid of VID_OFF is off.
+ */
+static int decrease_vid_code_by_step(struct powernow_k8_data *data,
+ u32 reqvid, u32 step)
+{
+ if ((data->currvid - reqvid) > step)
+ reqvid = data->currvid - step;
+
+ if (write_new_vid(data, reqvid))
+ return 1;
+
+ count_off_vst(data);
+
+ return 0;
+}
+
+/* Change Opteron/Athlon64 fid and vid, by the 3 phases. */
+static int transition_fid_vid(struct powernow_k8_data *data,
+ u32 reqfid, u32 reqvid)
+{
+ if (core_voltage_pre_transition(data, reqvid, reqfid))
+ return 1;
+
+ if (core_frequency_transition(data, reqfid))
+ return 1;
+
+ if (core_voltage_post_transition(data, reqvid))
+ return 1;
+
+ if (query_current_values_with_pending_wait(data))
+ return 1;
+
+ if ((reqfid != data->currfid) || (reqvid != data->currvid)) {
+ pr_err("failed (cpu%d): req 0x%x 0x%x, curr 0x%x 0x%x\n",
+ smp_processor_id(),
+ reqfid, reqvid, data->currfid, data->currvid);
+ return 1;
+ }
+
+ pr_debug("transitioned (cpu%d): new fid 0x%x, vid 0x%x\n",
+ smp_processor_id(), data->currfid, data->currvid);
+
+ return 0;
+}
+
+/* Phase 1 - core voltage transition ... setup voltage */
+static int core_voltage_pre_transition(struct powernow_k8_data *data,
+ u32 reqvid, u32 reqfid)
+{
+ u32 rvosteps = data->rvo;
+ u32 savefid = data->currfid;
+ u32 maxvid, lo, rvomult = 1;
+
+ pr_debug("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, reqvid 0x%x, rvo 0x%x\n",
+ smp_processor_id(),
+ data->currfid, data->currvid, reqvid, data->rvo);
+
+ if ((savefid < LO_FID_TABLE_TOP) && (reqfid < LO_FID_TABLE_TOP))
+ rvomult = 2;
+ rvosteps *= rvomult;
+ rdmsr(MSR_FIDVID_STATUS, lo, maxvid);
+ maxvid = 0x1f & (maxvid >> 16);
+ pr_debug("ph1 maxvid=0x%x\n", maxvid);
+ if (reqvid < maxvid) /* lower numbers are higher voltages */
+ reqvid = maxvid;
+
+ while (data->currvid > reqvid) {
+ pr_debug("ph1: curr 0x%x, req vid 0x%x\n",
+ data->currvid, reqvid);
+ if (decrease_vid_code_by_step(data, reqvid, data->vidmvs))
+ return 1;
+ }
+
+ while ((rvosteps > 0) &&
+ ((rvomult * data->rvo + data->currvid) > reqvid)) {
+ if (data->currvid == maxvid) {
+ rvosteps = 0;
+ } else {
+ pr_debug("ph1: changing vid for rvo, req 0x%x\n",
+ data->currvid - 1);
+ if (decrease_vid_code_by_step(data, data->currvid-1, 1))
+ return 1;
+ rvosteps--;
+ }
+ }
+
+ if (query_current_values_with_pending_wait(data))
+ return 1;
+
+ if (savefid != data->currfid) {
+ pr_err("ph1 err, currfid changed 0x%x\n", data->currfid);
+ return 1;
+ }
+
+ pr_debug("ph1 complete, currfid 0x%x, currvid 0x%x\n",
+ data->currfid, data->currvid);
+
+ return 0;
+}
+
+/* Phase 2 - core frequency transition */
+static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid)
+{
+ u32 vcoreqfid, vcocurrfid, vcofiddiff;
+ u32 fid_interval, savevid = data->currvid;
+
+ if (data->currfid == reqfid) {
+ pr_err("ph2 null fid transition 0x%x\n", data->currfid);
+ return 0;
+ }
+
+ pr_debug("ph2 (cpu%d): starting, currfid 0x%x, currvid 0x%x, reqfid 0x%x\n",
+ smp_processor_id(),
+ data->currfid, data->currvid, reqfid);
+
+ vcoreqfid = convert_fid_to_vco_fid(reqfid);
+ vcocurrfid = convert_fid_to_vco_fid(data->currfid);
+ vcofiddiff = vcocurrfid > vcoreqfid ? vcocurrfid - vcoreqfid
+ : vcoreqfid - vcocurrfid;
+
+ if ((reqfid <= LO_FID_TABLE_TOP) && (data->currfid <= LO_FID_TABLE_TOP))
+ vcofiddiff = 0;
+
+ while (vcofiddiff > 2) {
+ (data->currfid & 1) ? (fid_interval = 1) : (fid_interval = 2);
+
+ if (reqfid > data->currfid) {
+ if (data->currfid > LO_FID_TABLE_TOP) {
+ if (write_new_fid(data,
+ data->currfid + fid_interval))
+ return 1;
+ } else {
+ if (write_new_fid
+ (data,
+ 2 + convert_fid_to_vco_fid(data->currfid)))
+ return 1;
+ }
+ } else {
+ if (write_new_fid(data, data->currfid - fid_interval))
+ return 1;
+ }
+
+ vcocurrfid = convert_fid_to_vco_fid(data->currfid);
+ vcofiddiff = vcocurrfid > vcoreqfid ? vcocurrfid - vcoreqfid
+ : vcoreqfid - vcocurrfid;
+ }
+
+ if (write_new_fid(data, reqfid))
+ return 1;
+
+ if (query_current_values_with_pending_wait(data))
+ return 1;
+
+ if (data->currfid != reqfid) {
+ pr_err("ph2: mismatch, failed fid transition, curr 0x%x, req 0x%x\n",
+ data->currfid, reqfid);
+ return 1;
+ }
+
+ if (savevid != data->currvid) {
+ pr_err("ph2: vid changed, save 0x%x, curr 0x%x\n",
+ savevid, data->currvid);
+ return 1;
+ }
+
+ pr_debug("ph2 complete, currfid 0x%x, currvid 0x%x\n",
+ data->currfid, data->currvid);
+
+ return 0;
+}
+
+/* Phase 3 - core voltage transition flow ... jump to the final vid. */
+static int core_voltage_post_transition(struct powernow_k8_data *data,
+ u32 reqvid)
+{
+ u32 savefid = data->currfid;
+ u32 savereqvid = reqvid;
+
+ pr_debug("ph3 (cpu%d): starting, currfid 0x%x, currvid 0x%x\n",
+ smp_processor_id(),
+ data->currfid, data->currvid);
+
+ if (reqvid != data->currvid) {
+ if (write_new_vid(data, reqvid))
+ return 1;
+
+ if (savefid != data->currfid) {
+ pr_err("ph3: bad fid change, save 0x%x, curr 0x%x\n",
+ savefid, data->currfid);
+ return 1;
+ }
+
+ if (data->currvid != reqvid) {
+ pr_err("ph3: failed vid transition\n, req 0x%x, curr 0x%x",
+ reqvid, data->currvid);
+ return 1;
+ }
+ }
+
+ if (query_current_values_with_pending_wait(data))
+ return 1;
+
+ if (savereqvid != data->currvid) {
+ pr_debug("ph3 failed, currvid 0x%x\n", data->currvid);
+ return 1;
+ }
+
+ if (savefid != data->currfid) {
+ pr_debug("ph3 failed, currfid changed 0x%x\n",
+ data->currfid);
+ return 1;
+ }
+
+ pr_debug("ph3 complete, currfid 0x%x, currvid 0x%x\n",
+ data->currfid, data->currvid);
+
+ return 0;
+}
+
+static const struct x86_cpu_id powernow_k8_ids[] = {
+ /* IO based frequency switching */
+ { X86_VENDOR_AMD, 0xf },
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, powernow_k8_ids);
+
+static void check_supported_cpu(void *_rc)
+{
+ u32 eax, ebx, ecx, edx;
+ int *rc = _rc;
+
+ *rc = -ENODEV;
+
+ eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
+
+ if ((eax & CPUID_XFAM) == CPUID_XFAM_K8) {
+ if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) ||
+ ((eax & CPUID_XMOD) > CPUID_XMOD_REV_MASK)) {
+ pr_info("Processor cpuid %x not supported\n", eax);
+ return;
+ }
+
+ eax = cpuid_eax(CPUID_GET_MAX_CAPABILITIES);
+ if (eax < CPUID_FREQ_VOLT_CAPABILITIES) {
+ pr_info("No frequency change capabilities detected\n");
+ return;
+ }
+
+ cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
+ if ((edx & P_STATE_TRANSITION_CAPABLE)
+ != P_STATE_TRANSITION_CAPABLE) {
+ pr_info("Power state transitions not supported\n");
+ return;
+ }
+ *rc = 0;
+ }
+}
+
+static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst,
+ u8 maxvid)
+{
+ unsigned int j;
+ u8 lastfid = 0xff;
+
+ for (j = 0; j < data->numps; j++) {
+ if (pst[j].vid > LEAST_VID) {
+ pr_err(FW_BUG "vid %d invalid : 0x%x\n", j,
+ pst[j].vid);
+ return -EINVAL;
+ }
+ if (pst[j].vid < data->rvo) {
+ /* vid + rvo >= 0 */
+ pr_err(FW_BUG "0 vid exceeded with pstate %d\n", j);
+ return -ENODEV;
+ }
+ if (pst[j].vid < maxvid + data->rvo) {
+ /* vid + rvo >= maxvid */
+ pr_err(FW_BUG "maxvid exceeded with pstate %d\n", j);
+ return -ENODEV;
+ }
+ if (pst[j].fid > MAX_FID) {
+ pr_err(FW_BUG "maxfid exceeded with pstate %d\n", j);
+ return -ENODEV;
+ }
+ if (j && (pst[j].fid < HI_FID_TABLE_BOTTOM)) {
+ /* Only first fid is allowed to be in "low" range */
+ pr_err(FW_BUG "two low fids - %d : 0x%x\n", j,
+ pst[j].fid);
+ return -EINVAL;
+ }
+ if (pst[j].fid < lastfid)
+ lastfid = pst[j].fid;
+ }
+ if (lastfid & 1) {
+ pr_err(FW_BUG "lastfid invalid\n");
+ return -EINVAL;
+ }
+ if (lastfid > LO_FID_TABLE_TOP)
+ pr_info(FW_BUG "first fid not from lo freq table\n");
+
+ return 0;
+}
+
+static void invalidate_entry(struct cpufreq_frequency_table *powernow_table,
+ unsigned int entry)
+{
+ powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID;
+}
+
+static void print_basics(struct powernow_k8_data *data)
+{
+ int j;
+ for (j = 0; j < data->numps; j++) {
+ if (data->powernow_table[j].frequency !=
+ CPUFREQ_ENTRY_INVALID) {
+ pr_info("fid 0x%x (%d MHz), vid 0x%x\n",
+ data->powernow_table[j].driver_data & 0xff,
+ data->powernow_table[j].frequency/1000,
+ data->powernow_table[j].driver_data >> 8);
+ }
+ }
+ if (data->batps)
+ pr_info("Only %d pstates on battery\n", data->batps);
+}
+
+static int fill_powernow_table(struct powernow_k8_data *data,
+ struct pst_s *pst, u8 maxvid)
+{
+ struct cpufreq_frequency_table *powernow_table;
+ unsigned int j;
+
+ if (data->batps) {
+ /* use ACPI support to get full speed on mains power */
+ pr_warn("Only %d pstates usable (use ACPI driver for full range\n",
+ data->batps);
+ data->numps = data->batps;
+ }
+
+ for (j = 1; j < data->numps; j++) {
+ if (pst[j-1].fid >= pst[j].fid) {
+ pr_err("PST out of sequence\n");
+ return -EINVAL;
+ }
+ }
+
+ if (data->numps < 2) {
+ pr_err("no p states to transition\n");
+ return -ENODEV;
+ }
+
+ if (check_pst_table(data, pst, maxvid))
+ return -EINVAL;
+
+ powernow_table = kzalloc((sizeof(*powernow_table)
+ * (data->numps + 1)), GFP_KERNEL);
+ if (!powernow_table) {
+ pr_err("powernow_table memory alloc failure\n");
+ return -ENOMEM;
+ }
+
+ for (j = 0; j < data->numps; j++) {
+ int freq;
+ powernow_table[j].driver_data = pst[j].fid; /* lower 8 bits */
+ powernow_table[j].driver_data |= (pst[j].vid << 8); /* upper 8 bits */
+ freq = find_khz_freq_from_fid(pst[j].fid);
+ powernow_table[j].frequency = freq;
+ }
+ powernow_table[data->numps].frequency = CPUFREQ_TABLE_END;
+ powernow_table[data->numps].driver_data = 0;
+
+ if (query_current_values_with_pending_wait(data)) {
+ kfree(powernow_table);
+ return -EIO;
+ }
+
+ pr_debug("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid);
+ data->powernow_table = powernow_table;
+ if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu)
+ print_basics(data);
+
+ for (j = 0; j < data->numps; j++)
+ if ((pst[j].fid == data->currfid) &&
+ (pst[j].vid == data->currvid))
+ return 0;
+
+ pr_debug("currfid/vid do not match PST, ignoring\n");
+ return 0;
+}
+
+/* Find and validate the PSB/PST table in BIOS. */
+static int find_psb_table(struct powernow_k8_data *data)
+{
+ struct psb_s *psb;
+ unsigned int i;
+ u32 mvs;
+ u8 maxvid;
+ u32 cpst = 0;
+ u32 thiscpuid;
+
+ for (i = 0xc0000; i < 0xffff0; i += 0x10) {
+ /* Scan BIOS looking for the signature. */
+ /* It can not be at ffff0 - it is too big. */
+
+ psb = phys_to_virt(i);
+ if (memcmp(psb, PSB_ID_STRING, PSB_ID_STRING_LEN) != 0)
+ continue;
+
+ pr_debug("found PSB header at 0x%p\n", psb);
+
+ pr_debug("table vers: 0x%x\n", psb->tableversion);
+ if (psb->tableversion != PSB_VERSION_1_4) {
+ pr_err(FW_BUG "PSB table is not v1.4\n");
+ return -ENODEV;
+ }
+
+ pr_debug("flags: 0x%x\n", psb->flags1);
+ if (psb->flags1) {
+ pr_err(FW_BUG "unknown flags\n");
+ return -ENODEV;
+ }
+
+ data->vstable = psb->vstable;
+ pr_debug("voltage stabilization time: %d(*20us)\n",
+ data->vstable);
+
+ pr_debug("flags2: 0x%x\n", psb->flags2);
+ data->rvo = psb->flags2 & 3;
+ data->irt = ((psb->flags2) >> 2) & 3;
+ mvs = ((psb->flags2) >> 4) & 3;
+ data->vidmvs = 1 << mvs;
+ data->batps = ((psb->flags2) >> 6) & 3;
+
+ pr_debug("ramp voltage offset: %d\n", data->rvo);
+ pr_debug("isochronous relief time: %d\n", data->irt);
+ pr_debug("maximum voltage step: %d - 0x%x\n", mvs, data->vidmvs);
+
+ pr_debug("numpst: 0x%x\n", psb->num_tables);
+ cpst = psb->num_tables;
+ if ((psb->cpuid == 0x00000fc0) ||
+ (psb->cpuid == 0x00000fe0)) {
+ thiscpuid = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
+ if ((thiscpuid == 0x00000fc0) ||
+ (thiscpuid == 0x00000fe0))
+ cpst = 1;
+ }
+ if (cpst != 1) {
+ pr_err(FW_BUG "numpst must be 1\n");
+ return -ENODEV;
+ }
+
+ data->plllock = psb->plllocktime;
+ pr_debug("plllocktime: 0x%x (units 1us)\n", psb->plllocktime);
+ pr_debug("maxfid: 0x%x\n", psb->maxfid);
+ pr_debug("maxvid: 0x%x\n", psb->maxvid);
+ maxvid = psb->maxvid;
+
+ data->numps = psb->numps;
+ pr_debug("numpstates: 0x%x\n", data->numps);
+ return fill_powernow_table(data,
+ (struct pst_s *)(psb+1), maxvid);
+ }
+ /*
+ * If you see this message, complain to BIOS manufacturer. If
+ * he tells you "we do not support Linux" or some similar
+ * nonsense, remember that Windows 2000 uses the same legacy
+ * mechanism that the old Linux PSB driver uses. Tell them it
+ * is broken with Windows 2000.
+ *
+ * The reference to the AMD documentation is chapter 9 in the
+ * BIOS and Kernel Developer's Guide, which is available on
+ * www.amd.com
+ */
+ pr_err(FW_BUG "No PSB or ACPI _PSS objects\n");
+ pr_err("Make sure that your BIOS is up to date and Cool'N'Quiet support is enabled in BIOS setup\n");
+ return -ENODEV;
+}
+
+static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data,
+ unsigned int index)
+{
+ u64 control;
+
+ if (!data->acpi_data.state_count)
+ return;
+
+ control = data->acpi_data.states[index].control;
+ data->irt = (control >> IRT_SHIFT) & IRT_MASK;
+ data->rvo = (control >> RVO_SHIFT) & RVO_MASK;
+ data->exttype = (control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK;
+ data->plllock = (control >> PLL_L_SHIFT) & PLL_L_MASK;
+ data->vidmvs = 1 << ((control >> MVS_SHIFT) & MVS_MASK);
+ data->vstable = (control >> VST_SHIFT) & VST_MASK;
+}
+
+static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
+{
+ struct cpufreq_frequency_table *powernow_table;
+ int ret_val = -ENODEV;
+ u64 control, status;
+
+ if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) {
+ pr_debug("register performance failed: bad ACPI data\n");
+ return -EIO;
+ }
+
+ /* verify the data contained in the ACPI structures */
+ if (data->acpi_data.state_count <= 1) {
+ pr_debug("No ACPI P-States\n");
+ goto err_out;
+ }
+
+ control = data->acpi_data.control_register.space_id;
+ status = data->acpi_data.status_register.space_id;
+
+ if ((control != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
+ (status != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
+ pr_debug("Invalid control/status registers (%llx - %llx)\n",
+ control, status);
+ goto err_out;
+ }
+
+ /* fill in data->powernow_table */
+ powernow_table = kzalloc((sizeof(*powernow_table)
+ * (data->acpi_data.state_count + 1)), GFP_KERNEL);
+ if (!powernow_table) {
+ pr_debug("powernow_table memory alloc failure\n");
+ goto err_out;
+ }
+
+ /* fill in data */
+ data->numps = data->acpi_data.state_count;
+ powernow_k8_acpi_pst_values(data, 0);
+
+ ret_val = fill_powernow_table_fidvid(data, powernow_table);
+ if (ret_val)
+ goto err_out_mem;
+
+ powernow_table[data->acpi_data.state_count].frequency =
+ CPUFREQ_TABLE_END;
+ data->powernow_table = powernow_table;
+
+ if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu)
+ print_basics(data);
+
+ /* notify BIOS that we exist */
+ acpi_processor_notify_smm(THIS_MODULE);
+
+ if (!zalloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) {
+ pr_err("unable to alloc powernow_k8_data cpumask\n");
+ ret_val = -ENOMEM;
+ goto err_out_mem;
+ }
+
+ return 0;
+
+err_out_mem:
+ kfree(powernow_table);
+
+err_out:
+ acpi_processor_unregister_performance(&data->acpi_data, data->cpu);
+
+ /* data->acpi_data.state_count informs us at ->exit()
+ * whether ACPI was used */
+ data->acpi_data.state_count = 0;
+
+ return ret_val;
+}
+
+static int fill_powernow_table_fidvid(struct powernow_k8_data *data,
+ struct cpufreq_frequency_table *powernow_table)
+{
+ int i;
+
+ for (i = 0; i < data->acpi_data.state_count; i++) {
+ u32 fid;
+ u32 vid;
+ u32 freq, index;
+ u64 status, control;
+
+ if (data->exttype) {
+ status = data->acpi_data.states[i].status;
+ fid = status & EXT_FID_MASK;
+ vid = (status >> VID_SHIFT) & EXT_VID_MASK;
+ } else {
+ control = data->acpi_data.states[i].control;
+ fid = control & FID_MASK;
+ vid = (control >> VID_SHIFT) & VID_MASK;
+ }
+
+ pr_debug(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid);
+
+ index = fid | (vid<<8);
+ powernow_table[i].driver_data = index;
+
+ freq = find_khz_freq_from_fid(fid);
+ powernow_table[i].frequency = freq;
+
+ /* verify frequency is OK */
+ if ((freq > (MAX_FREQ * 1000)) || (freq < (MIN_FREQ * 1000))) {
+ pr_debug("invalid freq %u kHz, ignoring\n", freq);
+ invalidate_entry(powernow_table, i);
+ continue;
+ }
+
+ /* verify voltage is OK -
+ * BIOSs are using "off" to indicate invalid */
+ if (vid == VID_OFF) {
+ pr_debug("invalid vid %u, ignoring\n", vid);
+ invalidate_entry(powernow_table, i);
+ continue;
+ }
+
+ if (freq != (data->acpi_data.states[i].core_frequency * 1000)) {
+ pr_info("invalid freq entries %u kHz vs. %u kHz\n",
+ freq, (unsigned int)
+ (data->acpi_data.states[i].core_frequency
+ * 1000));
+ invalidate_entry(powernow_table, i);
+ continue;
+ }
+ }
+ return 0;
+}
+
+static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data)
+{
+ if (data->acpi_data.state_count)
+ acpi_processor_unregister_performance(&data->acpi_data,
+ data->cpu);
+ free_cpumask_var(data->acpi_data.shared_cpu_map);
+}
+
+static int get_transition_latency(struct powernow_k8_data *data)
+{
+ int max_latency = 0;
+ int i;
+ for (i = 0; i < data->acpi_data.state_count; i++) {
+ int cur_latency = data->acpi_data.states[i].transition_latency
+ + data->acpi_data.states[i].bus_master_latency;
+ if (cur_latency > max_latency)
+ max_latency = cur_latency;
+ }
+ if (max_latency == 0) {
+ pr_err(FW_WARN "Invalid zero transition latency\n");
+ max_latency = 1;
+ }
+ /* value in usecs, needs to be in nanoseconds */
+ return 1000 * max_latency;
+}
+
+/* Take a frequency, and issue the fid/vid transition command */
+static int transition_frequency_fidvid(struct powernow_k8_data *data,
+ unsigned int index)
+{
+ struct cpufreq_policy *policy;
+ u32 fid = 0;
+ u32 vid = 0;
+ int res;
+ struct cpufreq_freqs freqs;
+
+ pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index);
+
+ /* fid/vid correctness check for k8 */
+ /* fid are the lower 8 bits of the index we stored into
+ * the cpufreq frequency table in find_psb_table, vid
+ * are the upper 8 bits.
+ */
+ fid = data->powernow_table[index].driver_data & 0xFF;
+ vid = (data->powernow_table[index].driver_data & 0xFF00) >> 8;
+
+ pr_debug("table matched fid 0x%x, giving vid 0x%x\n", fid, vid);
+
+ if (query_current_values_with_pending_wait(data))
+ return 1;
+
+ if ((data->currvid == vid) && (data->currfid == fid)) {
+ pr_debug("target matches current values (fid 0x%x, vid 0x%x)\n",
+ fid, vid);
+ return 0;
+ }
+
+ pr_debug("cpu %d, changing to fid 0x%x, vid 0x%x\n",
+ smp_processor_id(), fid, vid);
+ freqs.old = find_khz_freq_from_fid(data->currfid);
+ freqs.new = find_khz_freq_from_fid(fid);
+
+ policy = cpufreq_cpu_get(smp_processor_id());
+ cpufreq_cpu_put(policy);
+
+ cpufreq_freq_transition_begin(policy, &freqs);
+ res = transition_fid_vid(data, fid, vid);
+ cpufreq_freq_transition_end(policy, &freqs, res);
+
+ return res;
+}
+
+struct powernowk8_target_arg {
+ struct cpufreq_policy *pol;
+ unsigned newstate;
+};
+
+static long powernowk8_target_fn(void *arg)
+{
+ struct powernowk8_target_arg *pta = arg;
+ struct cpufreq_policy *pol = pta->pol;
+ unsigned newstate = pta->newstate;
+ struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
+ u32 checkfid;
+ u32 checkvid;
+ int ret;
+
+ if (!data)
+ return -EINVAL;
+
+ checkfid = data->currfid;
+ checkvid = data->currvid;
+
+ if (pending_bit_stuck()) {
+ pr_err("failing targ, change pending bit set\n");
+ return -EIO;
+ }
+
+ pr_debug("targ: cpu %d, %d kHz, min %d, max %d\n",
+ pol->cpu, data->powernow_table[newstate].frequency, pol->min,
+ pol->max);
+
+ if (query_current_values_with_pending_wait(data))
+ return -EIO;
+
+ pr_debug("targ: curr fid 0x%x, vid 0x%x\n",
+ data->currfid, data->currvid);
+
+ if ((checkvid != data->currvid) ||
+ (checkfid != data->currfid)) {
+ pr_info("error - out of sync, fix 0x%x 0x%x, vid 0x%x 0x%x\n",
+ checkfid, data->currfid,
+ checkvid, data->currvid);
+ }
+
+ mutex_lock(&fidvid_mutex);
+
+ powernow_k8_acpi_pst_values(data, newstate);
+
+ ret = transition_frequency_fidvid(data, newstate);
+
+ if (ret) {
+ pr_err("transition frequency failed\n");
+ mutex_unlock(&fidvid_mutex);
+ return 1;
+ }
+ mutex_unlock(&fidvid_mutex);
+
+ pol->cur = find_khz_freq_from_fid(data->currfid);
+
+ return 0;
+}
+
+/* Driver entry point to switch to the target frequency */
+static int powernowk8_target(struct cpufreq_policy *pol, unsigned index)
+{
+ struct powernowk8_target_arg pta = { .pol = pol, .newstate = index };
+
+ return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta);
+}
+
+struct init_on_cpu {
+ struct powernow_k8_data *data;
+ int rc;
+};
+
+static void powernowk8_cpu_init_on_cpu(void *_init_on_cpu)
+{
+ struct init_on_cpu *init_on_cpu = _init_on_cpu;
+
+ if (pending_bit_stuck()) {
+ pr_err("failing init, change pending bit set\n");
+ init_on_cpu->rc = -ENODEV;
+ return;
+ }
+
+ if (query_current_values_with_pending_wait(init_on_cpu->data)) {
+ init_on_cpu->rc = -ENODEV;
+ return;
+ }
+
+ fidvid_msr_init();
+
+ init_on_cpu->rc = 0;
+}
+
+#define MISSING_PSS_MSG \
+ FW_BUG "No compatible ACPI _PSS objects found.\n" \
+ FW_BUG "First, make sure Cool'N'Quiet is enabled in the BIOS.\n" \
+ FW_BUG "If that doesn't help, try upgrading your BIOS.\n"
+
+/* per CPU init entry point to the driver */
+static int powernowk8_cpu_init(struct cpufreq_policy *pol)
+{
+ struct powernow_k8_data *data;
+ struct init_on_cpu init_on_cpu;
+ int rc, cpu;
+
+ smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1);
+ if (rc)
+ return -ENODEV;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ pr_err("unable to alloc powernow_k8_data");
+ return -ENOMEM;
+ }
+
+ data->cpu = pol->cpu;
+
+ if (powernow_k8_cpu_init_acpi(data)) {
+ /*
+ * Use the PSB BIOS structure. This is only available on
+ * an UP version, and is deprecated by AMD.
+ */
+ if (num_online_cpus() != 1) {
+ pr_err_once(MISSING_PSS_MSG);
+ goto err_out;
+ }
+ if (pol->cpu != 0) {
+ pr_err(FW_BUG "No ACPI _PSS objects for CPU other than CPU0. Complain to your BIOS vendor.\n");
+ goto err_out;
+ }
+ rc = find_psb_table(data);
+ if (rc)
+ goto err_out;
+
+ /* Take a crude guess here.
+ * That guess was in microseconds, so multiply with 1000 */
+ pol->cpuinfo.transition_latency = (
+ ((data->rvo + 8) * data->vstable * VST_UNITS_20US) +
+ ((1 << data->irt) * 30)) * 1000;
+ } else /* ACPI _PSS objects available */
+ pol->cpuinfo.transition_latency = get_transition_latency(data);
+
+ /* only run on specific CPU from here on */
+ init_on_cpu.data = data;
+ smp_call_function_single(data->cpu, powernowk8_cpu_init_on_cpu,
+ &init_on_cpu, 1);
+ rc = init_on_cpu.rc;
+ if (rc != 0)
+ goto err_out_exit_acpi;
+
+ cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu));
+ data->available_cores = pol->cpus;
+
+ /* min/max the cpu is capable of */
+ if (cpufreq_table_validate_and_show(pol, data->powernow_table)) {
+ pr_err(FW_BUG "invalid powernow_table\n");
+ powernow_k8_cpu_exit_acpi(data);
+ kfree(data->powernow_table);
+ kfree(data);
+ return -EINVAL;
+ }
+
+ pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n",
+ data->currfid, data->currvid);
+
+ /* Point all the CPUs in this policy to the same data */
+ for_each_cpu(cpu, pol->cpus)
+ per_cpu(powernow_data, cpu) = data;
+
+ return 0;
+
+err_out_exit_acpi:
+ powernow_k8_cpu_exit_acpi(data);
+
+err_out:
+ kfree(data);
+ return -ENODEV;
+}
+
+static int powernowk8_cpu_exit(struct cpufreq_policy *pol)
+{
+ struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
+ int cpu;
+
+ if (!data)
+ return -EINVAL;
+
+ powernow_k8_cpu_exit_acpi(data);
+
+ kfree(data->powernow_table);
+ kfree(data);
+ for_each_cpu(cpu, pol->cpus)
+ per_cpu(powernow_data, cpu) = NULL;
+
+ return 0;
+}
+
+static void query_values_on_cpu(void *_err)
+{
+ int *err = _err;
+ struct powernow_k8_data *data = __this_cpu_read(powernow_data);
+
+ *err = query_current_values_with_pending_wait(data);
+}
+
+static unsigned int powernowk8_get(unsigned int cpu)
+{
+ struct powernow_k8_data *data = per_cpu(powernow_data, cpu);
+ unsigned int khz = 0;
+ int err;
+
+ if (!data)
+ return 0;
+
+ smp_call_function_single(cpu, query_values_on_cpu, &err, true);
+ if (err)
+ goto out;
+
+ khz = find_khz_freq_from_fid(data->currfid);
+
+
+out:
+ return khz;
+}
+
+static struct cpufreq_driver cpufreq_amd64_driver = {
+ .flags = CPUFREQ_ASYNC_NOTIFICATION,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = powernowk8_target,
+ .bios_limit = acpi_processor_get_bios_limit,
+ .init = powernowk8_cpu_init,
+ .exit = powernowk8_cpu_exit,
+ .get = powernowk8_get,
+ .name = "powernow-k8",
+ .attr = cpufreq_generic_attr,
+};
+
+static void __request_acpi_cpufreq(void)
+{
+ const char *cur_drv, *drv = "acpi-cpufreq";
+
+ cur_drv = cpufreq_get_current_driver();
+ if (!cur_drv)
+ goto request;
+
+ if (strncmp(cur_drv, drv, min_t(size_t, strlen(cur_drv), strlen(drv))))
+ pr_warn("WTF driver: %s\n", cur_drv);
+
+ return;
+
+ request:
+ pr_warn("This CPU is not supported anymore, using acpi-cpufreq instead.\n");
+ request_module(drv);
+}
+
+/* driver entry point for init */
+static int powernowk8_init(void)
+{
+ unsigned int i, supported_cpus = 0;
+ int ret;
+
+ if (static_cpu_has(X86_FEATURE_HW_PSTATE)) {
+ __request_acpi_cpufreq();
+ return -ENODEV;
+ }
+
+ if (!x86_match_cpu(powernow_k8_ids))
+ return -ENODEV;
+
+ get_online_cpus();
+ for_each_online_cpu(i) {
+ smp_call_function_single(i, check_supported_cpu, &ret, 1);
+ if (!ret)
+ supported_cpus++;
+ }
+
+ if (supported_cpus != num_online_cpus()) {
+ put_online_cpus();
+ return -ENODEV;
+ }
+ put_online_cpus();
+
+ ret = cpufreq_register_driver(&cpufreq_amd64_driver);
+ if (ret)
+ return ret;
+
+ pr_info("Found %d %s (%d cpu cores) (" VERSION ")\n",
+ num_online_nodes(), boot_cpu_data.x86_model_id, supported_cpus);
+
+ return ret;
+}
+
+/* driver entry point for term */
+static void __exit powernowk8_exit(void)
+{
+ pr_debug("exit\n");
+
+ cpufreq_unregister_driver(&cpufreq_amd64_driver);
+}
+
+MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com>");
+MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@amd.com>");
+MODULE_DESCRIPTION("AMD Athlon 64 and Opteron processor frequency driver.");
+MODULE_LICENSE("GPL");
+
+late_initcall(powernowk8_init);
+module_exit(powernowk8_exit);
diff --git a/kernel/drivers/cpufreq/powernow-k8.h b/kernel/drivers/cpufreq/powernow-k8.h
new file mode 100644
index 000000000..45ce11e86
--- /dev/null
+++ b/kernel/drivers/cpufreq/powernow-k8.h
@@ -0,0 +1,190 @@
+/*
+ * (c) 2003-2006 Advanced Micro Devices, Inc.
+ * Your use of this code is subject to the terms and conditions of the
+ * GNU general public license version 2. See "COPYING" or
+ * http://www.gnu.org/licenses/gpl.html
+ */
+
+struct powernow_k8_data {
+ unsigned int cpu;
+
+ u32 numps; /* number of p-states */
+ u32 batps; /* number of p-states supported on battery */
+
+ /* these values are constant when the PSB is used to determine
+ * vid/fid pairings, but are modified during the ->target() call
+ * when ACPI is used */
+ u32 rvo; /* ramp voltage offset */
+ u32 irt; /* isochronous relief time */
+ u32 vidmvs; /* usable value calculated from mvs */
+ u32 vstable; /* voltage stabilization time, units 20 us */
+ u32 plllock; /* pll lock time, units 1 us */
+ u32 exttype; /* extended interface = 1 */
+
+ /* keep track of the current fid / vid or pstate */
+ u32 currvid;
+ u32 currfid;
+
+ /* the powernow_table includes all frequency and vid/fid pairings:
+ * fid are the lower 8 bits of the index, vid are the upper 8 bits.
+ * frequency is in kHz */
+ struct cpufreq_frequency_table *powernow_table;
+
+ /* the acpi table needs to be kept. it's only available if ACPI was
+ * used to determine valid frequency/vid/fid states */
+ struct acpi_processor_performance acpi_data;
+
+ /* we need to keep track of associated cores, but let cpufreq
+ * handle hotplug events - so just point at cpufreq pol->cpus
+ * structure */
+ struct cpumask *available_cores;
+};
+
+/* processor's cpuid instruction support */
+#define CPUID_PROCESSOR_SIGNATURE 1 /* function 1 */
+#define CPUID_XFAM 0x0ff00000 /* extended family */
+#define CPUID_XFAM_K8 0
+#define CPUID_XMOD 0x000f0000 /* extended model */
+#define CPUID_XMOD_REV_MASK 0x000c0000
+#define CPUID_XFAM_10H 0x00100000 /* family 0x10 */
+#define CPUID_USE_XFAM_XMOD 0x00000f00
+#define CPUID_GET_MAX_CAPABILITIES 0x80000000
+#define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007
+#define P_STATE_TRANSITION_CAPABLE 6
+
+/* Model Specific Registers for p-state transitions. MSRs are 64-bit. For */
+/* writes (wrmsr - opcode 0f 30), the register number is placed in ecx, and */
+/* the value to write is placed in edx:eax. For reads (rdmsr - opcode 0f 32), */
+/* the register number is placed in ecx, and the data is returned in edx:eax. */
+
+#define MSR_FIDVID_CTL 0xc0010041
+#define MSR_FIDVID_STATUS 0xc0010042
+
+/* Field definitions within the FID VID Low Control MSR : */
+#define MSR_C_LO_INIT_FID_VID 0x00010000
+#define MSR_C_LO_NEW_VID 0x00003f00
+#define MSR_C_LO_NEW_FID 0x0000003f
+#define MSR_C_LO_VID_SHIFT 8
+
+/* Field definitions within the FID VID High Control MSR : */
+#define MSR_C_HI_STP_GNT_TO 0x000fffff
+
+/* Field definitions within the FID VID Low Status MSR : */
+#define MSR_S_LO_CHANGE_PENDING 0x80000000 /* cleared when completed */
+#define MSR_S_LO_MAX_RAMP_VID 0x3f000000
+#define MSR_S_LO_MAX_FID 0x003f0000
+#define MSR_S_LO_START_FID 0x00003f00
+#define MSR_S_LO_CURRENT_FID 0x0000003f
+
+/* Field definitions within the FID VID High Status MSR : */
+#define MSR_S_HI_MIN_WORKING_VID 0x3f000000
+#define MSR_S_HI_MAX_WORKING_VID 0x003f0000
+#define MSR_S_HI_START_VID 0x00003f00
+#define MSR_S_HI_CURRENT_VID 0x0000003f
+#define MSR_C_HI_STP_GNT_BENIGN 0x00000001
+
+/*
+ * There are restrictions frequencies have to follow:
+ * - only 1 entry in the low fid table ( <=1.4GHz )
+ * - lowest entry in the high fid table must be >= 2 * the entry in the
+ * low fid table
+ * - lowest entry in the high fid table must be a <= 200MHz + 2 * the entry
+ * in the low fid table
+ * - the parts can only step at <= 200 MHz intervals, odd fid values are
+ * supported in revision G and later revisions.
+ * - lowest frequency must be >= interprocessor hypertransport link speed
+ * (only applies to MP systems obviously)
+ */
+
+/* fids (frequency identifiers) are arranged in 2 tables - lo and hi */
+#define LO_FID_TABLE_TOP 7 /* fid values marking the boundary */
+#define HI_FID_TABLE_BOTTOM 8 /* between the low and high tables */
+
+#define LO_VCOFREQ_TABLE_TOP 1400 /* corresponding vco frequency values */
+#define HI_VCOFREQ_TABLE_BOTTOM 1600
+
+#define MIN_FREQ_RESOLUTION 200 /* fids jump by 2 matching freq jumps by 200 */
+
+#define MAX_FID 0x2a /* Spec only gives FID values as far as 5 GHz */
+#define LEAST_VID 0x3e /* Lowest (numerically highest) useful vid value */
+
+#define MIN_FREQ 800 /* Min and max freqs, per spec */
+#define MAX_FREQ 5000
+
+#define INVALID_FID_MASK 0xffffffc0 /* not a valid fid if these bits are set */
+#define INVALID_VID_MASK 0xffffffc0 /* not a valid vid if these bits are set */
+
+#define VID_OFF 0x3f
+
+#define STOP_GRANT_5NS 1 /* min poss memory access latency for voltage change */
+
+#define PLL_LOCK_CONVERSION (1000/5) /* ms to ns, then divide by clock period */
+
+#define MAXIMUM_VID_STEPS 1 /* Current cpus only allow a single step of 25mV */
+#define VST_UNITS_20US 20 /* Voltage Stabilization Time is in units of 20us */
+
+/*
+ * Most values of interest are encoded in a single field of the _PSS
+ * entries: the "control" value.
+ */
+
+#define IRT_SHIFT 30
+#define RVO_SHIFT 28
+#define EXT_TYPE_SHIFT 27
+#define PLL_L_SHIFT 20
+#define MVS_SHIFT 18
+#define VST_SHIFT 11
+#define VID_SHIFT 6
+#define IRT_MASK 3
+#define RVO_MASK 3
+#define EXT_TYPE_MASK 1
+#define PLL_L_MASK 0x7f
+#define MVS_MASK 3
+#define VST_MASK 0x7f
+#define VID_MASK 0x1f
+#define FID_MASK 0x1f
+#define EXT_VID_MASK 0x3f
+#define EXT_FID_MASK 0x3f
+
+
+/*
+ * Version 1.4 of the PSB table. This table is constructed by BIOS and is
+ * to tell the OS's power management driver which VIDs and FIDs are
+ * supported by this particular processor.
+ * If the data in the PSB / PST is wrong, then this driver will program the
+ * wrong values into hardware, which is very likely to lead to a crash.
+ */
+
+#define PSB_ID_STRING "AMDK7PNOW!"
+#define PSB_ID_STRING_LEN 10
+
+#define PSB_VERSION_1_4 0x14
+
+struct psb_s {
+ u8 signature[10];
+ u8 tableversion;
+ u8 flags1;
+ u16 vstable;
+ u8 flags2;
+ u8 num_tables;
+ u32 cpuid;
+ u8 plllocktime;
+ u8 maxfid;
+ u8 maxvid;
+ u8 numps;
+};
+
+/* Pairs of fid/vid values are appended to the version 1.4 PSB table. */
+struct pst_s {
+ u8 fid;
+ u8 vid;
+};
+
+static int core_voltage_pre_transition(struct powernow_k8_data *data,
+ u32 reqvid, u32 regfid);
+static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvid);
+static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid);
+
+static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index);
+
+static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table);
diff --git a/kernel/drivers/cpufreq/powernv-cpufreq.c b/kernel/drivers/cpufreq/powernv-cpufreq.c
new file mode 100644
index 000000000..ebef0d827
--- /dev/null
+++ b/kernel/drivers/cpufreq/powernv-cpufreq.c
@@ -0,0 +1,445 @@
+/*
+ * POWERNV cpufreq driver for the IBM POWER processors
+ *
+ * (C) Copyright IBM 2014
+ *
+ * Author: Vaidyanathan Srinivasan <svaidy at linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "powernv-cpufreq: " fmt
+
+#include <linux/kernel.h>
+#include <linux/sysfs.h>
+#include <linux/cpumask.h>
+#include <linux/module.h>
+#include <linux/cpufreq.h>
+#include <linux/smp.h>
+#include <linux/of.h>
+#include <linux/reboot.h>
+
+#include <asm/cputhreads.h>
+#include <asm/firmware.h>
+#include <asm/reg.h>
+#include <asm/smp.h> /* Required for cpu_sibling_mask() in UP configs */
+
+#define POWERNV_MAX_PSTATES 256
+#define PMSR_PSAFE_ENABLE (1UL << 30)
+#define PMSR_SPR_EM_DISABLE (1UL << 31)
+#define PMSR_MAX(x) ((x >> 32) & 0xFF)
+#define PMSR_LP(x) ((x >> 48) & 0xFF)
+
+static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
+static bool rebooting, throttled;
+
+/*
+ * Note: The set of pstates consists of contiguous integers, the
+ * smallest of which is indicated by powernv_pstate_info.min, the
+ * largest of which is indicated by powernv_pstate_info.max.
+ *
+ * The nominal pstate is the highest non-turbo pstate in this
+ * platform. This is indicated by powernv_pstate_info.nominal.
+ */
+static struct powernv_pstate_info {
+ int min;
+ int max;
+ int nominal;
+ int nr_pstates;
+} powernv_pstate_info;
+
+/*
+ * Initialize the freq table based on data obtained
+ * from the firmware passed via device-tree
+ */
+static int init_powernv_pstates(void)
+{
+ struct device_node *power_mgt;
+ int i, pstate_min, pstate_max, pstate_nominal, nr_pstates = 0;
+ const __be32 *pstate_ids, *pstate_freqs;
+ u32 len_ids, len_freqs;
+
+ power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
+ if (!power_mgt) {
+ pr_warn("power-mgt node not found\n");
+ return -ENODEV;
+ }
+
+ if (of_property_read_u32(power_mgt, "ibm,pstate-min", &pstate_min)) {
+ pr_warn("ibm,pstate-min node not found\n");
+ return -ENODEV;
+ }
+
+ if (of_property_read_u32(power_mgt, "ibm,pstate-max", &pstate_max)) {
+ pr_warn("ibm,pstate-max node not found\n");
+ return -ENODEV;
+ }
+
+ if (of_property_read_u32(power_mgt, "ibm,pstate-nominal",
+ &pstate_nominal)) {
+ pr_warn("ibm,pstate-nominal not found\n");
+ return -ENODEV;
+ }
+ pr_info("cpufreq pstate min %d nominal %d max %d\n", pstate_min,
+ pstate_nominal, pstate_max);
+
+ pstate_ids = of_get_property(power_mgt, "ibm,pstate-ids", &len_ids);
+ if (!pstate_ids) {
+ pr_warn("ibm,pstate-ids not found\n");
+ return -ENODEV;
+ }
+
+ pstate_freqs = of_get_property(power_mgt, "ibm,pstate-frequencies-mhz",
+ &len_freqs);
+ if (!pstate_freqs) {
+ pr_warn("ibm,pstate-frequencies-mhz not found\n");
+ return -ENODEV;
+ }
+
+ if (len_ids != len_freqs) {
+ pr_warn("Entries in ibm,pstate-ids and "
+ "ibm,pstate-frequencies-mhz does not match\n");
+ }
+
+ nr_pstates = min(len_ids, len_freqs) / sizeof(u32);
+ if (!nr_pstates) {
+ pr_warn("No PStates found\n");
+ return -ENODEV;
+ }
+
+ pr_debug("NR PStates %d\n", nr_pstates);
+ for (i = 0; i < nr_pstates; i++) {
+ u32 id = be32_to_cpu(pstate_ids[i]);
+ u32 freq = be32_to_cpu(pstate_freqs[i]);
+
+ pr_debug("PState id %d freq %d MHz\n", id, freq);
+ powernv_freqs[i].frequency = freq * 1000; /* kHz */
+ powernv_freqs[i].driver_data = id;
+ }
+ /* End of list marker entry */
+ powernv_freqs[i].frequency = CPUFREQ_TABLE_END;
+
+ powernv_pstate_info.min = pstate_min;
+ powernv_pstate_info.max = pstate_max;
+ powernv_pstate_info.nominal = pstate_nominal;
+ powernv_pstate_info.nr_pstates = nr_pstates;
+
+ return 0;
+}
+
+/* Returns the CPU frequency corresponding to the pstate_id. */
+static unsigned int pstate_id_to_freq(int pstate_id)
+{
+ int i;
+
+ i = powernv_pstate_info.max - pstate_id;
+ if (i >= powernv_pstate_info.nr_pstates || i < 0) {
+ pr_warn("PState id %d outside of PState table, "
+ "reporting nominal id %d instead\n",
+ pstate_id, powernv_pstate_info.nominal);
+ i = powernv_pstate_info.max - powernv_pstate_info.nominal;
+ }
+
+ return powernv_freqs[i].frequency;
+}
+
+/*
+ * cpuinfo_nominal_freq_show - Show the nominal CPU frequency as indicated by
+ * the firmware
+ */
+static ssize_t cpuinfo_nominal_freq_show(struct cpufreq_policy *policy,
+ char *buf)
+{
+ return sprintf(buf, "%u\n",
+ pstate_id_to_freq(powernv_pstate_info.nominal));
+}
+
+struct freq_attr cpufreq_freq_attr_cpuinfo_nominal_freq =
+ __ATTR_RO(cpuinfo_nominal_freq);
+
+static struct freq_attr *powernv_cpu_freq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ &cpufreq_freq_attr_cpuinfo_nominal_freq,
+ NULL,
+};
+
+/* Helper routines */
+
+/* Access helpers to power mgt SPR */
+
+static inline unsigned long get_pmspr(unsigned long sprn)
+{
+ switch (sprn) {
+ case SPRN_PMCR:
+ return mfspr(SPRN_PMCR);
+
+ case SPRN_PMICR:
+ return mfspr(SPRN_PMICR);
+
+ case SPRN_PMSR:
+ return mfspr(SPRN_PMSR);
+ }
+ BUG();
+}
+
+static inline void set_pmspr(unsigned long sprn, unsigned long val)
+{
+ switch (sprn) {
+ case SPRN_PMCR:
+ mtspr(SPRN_PMCR, val);
+ return;
+
+ case SPRN_PMICR:
+ mtspr(SPRN_PMICR, val);
+ return;
+ }
+ BUG();
+}
+
+/*
+ * Use objects of this type to query/update
+ * pstates on a remote CPU via smp_call_function.
+ */
+struct powernv_smp_call_data {
+ unsigned int freq;
+ int pstate_id;
+};
+
+/*
+ * powernv_read_cpu_freq: Reads the current frequency on this CPU.
+ *
+ * Called via smp_call_function.
+ *
+ * Note: The caller of the smp_call_function should pass an argument of
+ * the type 'struct powernv_smp_call_data *' along with this function.
+ *
+ * The current frequency on this CPU will be returned via
+ * ((struct powernv_smp_call_data *)arg)->freq;
+ */
+static void powernv_read_cpu_freq(void *arg)
+{
+ unsigned long pmspr_val;
+ s8 local_pstate_id;
+ struct powernv_smp_call_data *freq_data = arg;
+
+ pmspr_val = get_pmspr(SPRN_PMSR);
+
+ /*
+ * The local pstate id corresponds bits 48..55 in the PMSR.
+ * Note: Watch out for the sign!
+ */
+ local_pstate_id = (pmspr_val >> 48) & 0xFF;
+ freq_data->pstate_id = local_pstate_id;
+ freq_data->freq = pstate_id_to_freq(freq_data->pstate_id);
+
+ pr_debug("cpu %d pmsr %016lX pstate_id %d frequency %d kHz\n",
+ raw_smp_processor_id(), pmspr_val, freq_data->pstate_id,
+ freq_data->freq);
+}
+
+/*
+ * powernv_cpufreq_get: Returns the CPU frequency as reported by the
+ * firmware for CPU 'cpu'. This value is reported through the sysfs
+ * file cpuinfo_cur_freq.
+ */
+static unsigned int powernv_cpufreq_get(unsigned int cpu)
+{
+ struct powernv_smp_call_data freq_data;
+
+ smp_call_function_any(cpu_sibling_mask(cpu), powernv_read_cpu_freq,
+ &freq_data, 1);
+
+ return freq_data.freq;
+}
+
+/*
+ * set_pstate: Sets the pstate on this CPU.
+ *
+ * This is called via an smp_call_function.
+ *
+ * The caller must ensure that freq_data is of the type
+ * (struct powernv_smp_call_data *) and the pstate_id which needs to be set
+ * on this CPU should be present in freq_data->pstate_id.
+ */
+static void set_pstate(void *freq_data)
+{
+ unsigned long val;
+ unsigned long pstate_ul =
+ ((struct powernv_smp_call_data *) freq_data)->pstate_id;
+
+ val = get_pmspr(SPRN_PMCR);
+ val = val & 0x0000FFFFFFFFFFFFULL;
+
+ pstate_ul = pstate_ul & 0xFF;
+
+ /* Set both global(bits 56..63) and local(bits 48..55) PStates */
+ val = val | (pstate_ul << 56) | (pstate_ul << 48);
+
+ pr_debug("Setting cpu %d pmcr to %016lX\n",
+ raw_smp_processor_id(), val);
+ set_pmspr(SPRN_PMCR, val);
+}
+
+/*
+ * get_nominal_index: Returns the index corresponding to the nominal
+ * pstate in the cpufreq table
+ */
+static inline unsigned int get_nominal_index(void)
+{
+ return powernv_pstate_info.max - powernv_pstate_info.nominal;
+}
+
+static void powernv_cpufreq_throttle_check(unsigned int cpu)
+{
+ unsigned long pmsr;
+ int pmsr_pmax, pmsr_lp;
+
+ pmsr = get_pmspr(SPRN_PMSR);
+
+ /* Check for Pmax Capping */
+ pmsr_pmax = (s8)PMSR_MAX(pmsr);
+ if (pmsr_pmax != powernv_pstate_info.max) {
+ throttled = true;
+ pr_info("CPU %d Pmax is reduced to %d\n", cpu, pmsr_pmax);
+ pr_info("Max allowed Pstate is capped\n");
+ }
+
+ /*
+ * Check for Psafe by reading LocalPstate
+ * or check if Psafe_mode_active is set in PMSR.
+ */
+ pmsr_lp = (s8)PMSR_LP(pmsr);
+ if ((pmsr_lp < powernv_pstate_info.min) ||
+ (pmsr & PMSR_PSAFE_ENABLE)) {
+ throttled = true;
+ pr_info("Pstate set to safe frequency\n");
+ }
+
+ /* Check if SPR_EM_DISABLE is set in PMSR */
+ if (pmsr & PMSR_SPR_EM_DISABLE) {
+ throttled = true;
+ pr_info("Frequency Control disabled from OS\n");
+ }
+
+ if (throttled) {
+ pr_info("PMSR = %16lx\n", pmsr);
+ pr_crit("CPU Frequency could be throttled\n");
+ }
+}
+
+/*
+ * powernv_cpufreq_target_index: Sets the frequency corresponding to
+ * the cpufreq table entry indexed by new_index on the cpus in the
+ * mask policy->cpus
+ */
+static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
+ unsigned int new_index)
+{
+ struct powernv_smp_call_data freq_data;
+
+ if (unlikely(rebooting) && new_index != get_nominal_index())
+ return 0;
+
+ if (!throttled)
+ powernv_cpufreq_throttle_check(smp_processor_id());
+
+ freq_data.pstate_id = powernv_freqs[new_index].driver_data;
+
+ /*
+ * Use smp_call_function to send IPI and execute the
+ * mtspr on target CPU. We could do that without IPI
+ * if current CPU is within policy->cpus (core)
+ */
+ smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1);
+
+ return 0;
+}
+
+static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ int base, i;
+
+ base = cpu_first_thread_sibling(policy->cpu);
+
+ for (i = 0; i < threads_per_core; i++)
+ cpumask_set_cpu(base + i, policy->cpus);
+
+ return cpufreq_table_validate_and_show(policy, powernv_freqs);
+}
+
+static int powernv_cpufreq_reboot_notifier(struct notifier_block *nb,
+ unsigned long action, void *unused)
+{
+ int cpu;
+ struct cpufreq_policy cpu_policy;
+
+ rebooting = true;
+ for_each_online_cpu(cpu) {
+ cpufreq_get_policy(&cpu_policy, cpu);
+ powernv_cpufreq_target_index(&cpu_policy, get_nominal_index());
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block powernv_cpufreq_reboot_nb = {
+ .notifier_call = powernv_cpufreq_reboot_notifier,
+};
+
+static void powernv_cpufreq_stop_cpu(struct cpufreq_policy *policy)
+{
+ struct powernv_smp_call_data freq_data;
+
+ freq_data.pstate_id = powernv_pstate_info.min;
+ smp_call_function_single(policy->cpu, set_pstate, &freq_data, 1);
+}
+
+static struct cpufreq_driver powernv_cpufreq_driver = {
+ .name = "powernv-cpufreq",
+ .flags = CPUFREQ_CONST_LOOPS,
+ .init = powernv_cpufreq_cpu_init,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = powernv_cpufreq_target_index,
+ .get = powernv_cpufreq_get,
+ .stop_cpu = powernv_cpufreq_stop_cpu,
+ .attr = powernv_cpu_freq_attr,
+};
+
+static int __init powernv_cpufreq_init(void)
+{
+ int rc = 0;
+
+ /* Don't probe on pseries (guest) platforms */
+ if (!firmware_has_feature(FW_FEATURE_OPALv3))
+ return -ENODEV;
+
+ /* Discover pstates from device tree and init */
+ rc = init_powernv_pstates();
+ if (rc) {
+ pr_info("powernv-cpufreq disabled. System does not support PState control\n");
+ return rc;
+ }
+
+ register_reboot_notifier(&powernv_cpufreq_reboot_nb);
+ return cpufreq_register_driver(&powernv_cpufreq_driver);
+}
+module_init(powernv_cpufreq_init);
+
+static void __exit powernv_cpufreq_exit(void)
+{
+ unregister_reboot_notifier(&powernv_cpufreq_reboot_nb);
+ cpufreq_unregister_driver(&powernv_cpufreq_driver);
+}
+module_exit(powernv_cpufreq_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vaidyanathan Srinivasan <svaidy at linux.vnet.ibm.com>");
diff --git a/kernel/drivers/cpufreq/ppc_cbe_cpufreq.c b/kernel/drivers/cpufreq/ppc_cbe_cpufreq.c
new file mode 100644
index 000000000..5a4c5a639
--- /dev/null
+++ b/kernel/drivers/cpufreq/ppc_cbe_cpufreq.c
@@ -0,0 +1,170 @@
+/*
+ * cpufreq driver for the cell processor
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
+ *
+ * Author: Christian Krafft <krafft@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+
+#include <asm/machdep.h>
+#include <asm/prom.h>
+#include <asm/cell-regs.h>
+
+#include "ppc_cbe_cpufreq.h"
+
+/* the CBE supports an 8 step frequency scaling */
+static struct cpufreq_frequency_table cbe_freqs[] = {
+ {0, 1, 0},
+ {0, 2, 0},
+ {0, 3, 0},
+ {0, 4, 0},
+ {0, 5, 0},
+ {0, 6, 0},
+ {0, 8, 0},
+ {0, 10, 0},
+ {0, 0, CPUFREQ_TABLE_END},
+};
+
+/*
+ * hardware specific functions
+ */
+
+static int set_pmode(unsigned int cpu, unsigned int slow_mode)
+{
+ int rc;
+
+ if (cbe_cpufreq_has_pmi)
+ rc = cbe_cpufreq_set_pmode_pmi(cpu, slow_mode);
+ else
+ rc = cbe_cpufreq_set_pmode(cpu, slow_mode);
+
+ pr_debug("register contains slow mode %d\n", cbe_cpufreq_get_pmode(cpu));
+
+ return rc;
+}
+
+/*
+ * cpufreq functions
+ */
+
+static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ struct cpufreq_frequency_table *pos;
+ const u32 *max_freqp;
+ u32 max_freq;
+ int cur_pmode;
+ struct device_node *cpu;
+
+ cpu = of_get_cpu_node(policy->cpu, NULL);
+
+ if (!cpu)
+ return -ENODEV;
+
+ pr_debug("init cpufreq on CPU %d\n", policy->cpu);
+
+ /*
+ * Let's check we can actually get to the CELL regs
+ */
+ if (!cbe_get_cpu_pmd_regs(policy->cpu) ||
+ !cbe_get_cpu_mic_tm_regs(policy->cpu)) {
+ pr_info("invalid CBE regs pointers for cpufreq\n");
+ return -EINVAL;
+ }
+
+ max_freqp = of_get_property(cpu, "clock-frequency", NULL);
+
+ of_node_put(cpu);
+
+ if (!max_freqp)
+ return -EINVAL;
+
+ /* we need the freq in kHz */
+ max_freq = *max_freqp / 1000;
+
+ pr_debug("max clock-frequency is at %u kHz\n", max_freq);
+ pr_debug("initializing frequency table\n");
+
+ /* initialize frequency table */
+ cpufreq_for_each_entry(pos, cbe_freqs) {
+ pos->frequency = max_freq / pos->driver_data;
+ pr_debug("%d: %d\n", (int)(pos - cbe_freqs), pos->frequency);
+ }
+
+ /* if DEBUG is enabled set_pmode() measures the latency
+ * of a transition */
+ policy->cpuinfo.transition_latency = 25000;
+
+ cur_pmode = cbe_cpufreq_get_pmode(policy->cpu);
+ pr_debug("current pmode is at %d\n",cur_pmode);
+
+ policy->cur = cbe_freqs[cur_pmode].frequency;
+
+#ifdef CONFIG_SMP
+ cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
+#endif
+
+ /* this ensures that policy->cpuinfo_min
+ * and policy->cpuinfo_max are set correctly */
+ return cpufreq_table_validate_and_show(policy, cbe_freqs);
+}
+
+static int cbe_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int cbe_pmode_new)
+{
+ pr_debug("setting frequency for cpu %d to %d kHz, " \
+ "1/%d of max frequency\n",
+ policy->cpu,
+ cbe_freqs[cbe_pmode_new].frequency,
+ cbe_freqs[cbe_pmode_new].driver_data);
+
+ return set_pmode(policy->cpu, cbe_pmode_new);
+}
+
+static struct cpufreq_driver cbe_cpufreq_driver = {
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = cbe_cpufreq_target,
+ .init = cbe_cpufreq_cpu_init,
+ .name = "cbe-cpufreq",
+ .flags = CPUFREQ_CONST_LOOPS,
+};
+
+/*
+ * module init and destoy
+ */
+
+static int __init cbe_cpufreq_init(void)
+{
+ if (!machine_is(cell))
+ return -ENODEV;
+
+ return cpufreq_register_driver(&cbe_cpufreq_driver);
+}
+
+static void __exit cbe_cpufreq_exit(void)
+{
+ cpufreq_unregister_driver(&cbe_cpufreq_driver);
+}
+
+module_init(cbe_cpufreq_init);
+module_exit(cbe_cpufreq_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
diff --git a/kernel/drivers/cpufreq/ppc_cbe_cpufreq.h b/kernel/drivers/cpufreq/ppc_cbe_cpufreq.h
new file mode 100644
index 000000000..b4c00a5a6
--- /dev/null
+++ b/kernel/drivers/cpufreq/ppc_cbe_cpufreq.h
@@ -0,0 +1,24 @@
+/*
+ * ppc_cbe_cpufreq.h
+ *
+ * This file contains the definitions used by the cbe_cpufreq driver.
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
+ *
+ * Author: Christian Krafft <krafft@de.ibm.com>
+ *
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/types.h>
+
+int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode);
+int cbe_cpufreq_get_pmode(int cpu);
+
+int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode);
+
+#if defined(CONFIG_CPU_FREQ_CBE_PMI) || defined(CONFIG_CPU_FREQ_CBE_PMI_MODULE)
+extern bool cbe_cpufreq_has_pmi;
+#else
+#define cbe_cpufreq_has_pmi (0)
+#endif
diff --git a/kernel/drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c b/kernel/drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c
new file mode 100644
index 000000000..84d2f2cf5
--- /dev/null
+++ b/kernel/drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c
@@ -0,0 +1,115 @@
+/*
+ * pervasive backend for the cbe_cpufreq driver
+ *
+ * This driver makes use of the pervasive unit to
+ * engage the desired frequency.
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
+ *
+ * Author: Christian Krafft <krafft@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/time.h>
+#include <asm/machdep.h>
+#include <asm/hw_irq.h>
+#include <asm/cell-regs.h>
+
+#include "ppc_cbe_cpufreq.h"
+
+/* to write to MIC register */
+static u64 MIC_Slow_Fast_Timer_table[] = {
+ [0 ... 7] = 0x007fc00000000000ull,
+};
+
+/* more values for the MIC */
+static u64 MIC_Slow_Next_Timer_table[] = {
+ 0x0000240000000000ull,
+ 0x0000268000000000ull,
+ 0x000029C000000000ull,
+ 0x00002D0000000000ull,
+ 0x0000300000000000ull,
+ 0x0000334000000000ull,
+ 0x000039C000000000ull,
+ 0x00003FC000000000ull,
+};
+
+
+int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode)
+{
+ struct cbe_pmd_regs __iomem *pmd_regs;
+ struct cbe_mic_tm_regs __iomem *mic_tm_regs;
+ unsigned long flags;
+ u64 value;
+#ifdef DEBUG
+ long time;
+#endif
+
+ local_irq_save(flags);
+
+ mic_tm_regs = cbe_get_cpu_mic_tm_regs(cpu);
+ pmd_regs = cbe_get_cpu_pmd_regs(cpu);
+
+#ifdef DEBUG
+ time = jiffies;
+#endif
+
+ out_be64(&mic_tm_regs->slow_fast_timer_0, MIC_Slow_Fast_Timer_table[pmode]);
+ out_be64(&mic_tm_regs->slow_fast_timer_1, MIC_Slow_Fast_Timer_table[pmode]);
+
+ out_be64(&mic_tm_regs->slow_next_timer_0, MIC_Slow_Next_Timer_table[pmode]);
+ out_be64(&mic_tm_regs->slow_next_timer_1, MIC_Slow_Next_Timer_table[pmode]);
+
+ value = in_be64(&pmd_regs->pmcr);
+ /* set bits to zero */
+ value &= 0xFFFFFFFFFFFFFFF8ull;
+ /* set bits to next pmode */
+ value |= pmode;
+
+ out_be64(&pmd_regs->pmcr, value);
+
+#ifdef DEBUG
+ /* wait until new pmode appears in status register */
+ value = in_be64(&pmd_regs->pmsr) & 0x07;
+ while (value != pmode) {
+ cpu_relax();
+ value = in_be64(&pmd_regs->pmsr) & 0x07;
+ }
+
+ time = jiffies - time;
+ time = jiffies_to_msecs(time);
+ pr_debug("had to wait %lu ms for a transition using " \
+ "pervasive unit\n", time);
+#endif
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+
+int cbe_cpufreq_get_pmode(int cpu)
+{
+ int ret;
+ struct cbe_pmd_regs __iomem *pmd_regs;
+
+ pmd_regs = cbe_get_cpu_pmd_regs(cpu);
+ ret = in_be64(&pmd_regs->pmsr) & 0x07;
+
+ return ret;
+}
+
diff --git a/kernel/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c b/kernel/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
new file mode 100644
index 000000000..d29e8da39
--- /dev/null
+++ b/kernel/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
@@ -0,0 +1,156 @@
+/*
+ * pmi backend for the cbe_cpufreq driver
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
+ *
+ * Author: Christian Krafft <krafft@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+
+#include <asm/processor.h>
+#include <asm/prom.h>
+#include <asm/pmi.h>
+#include <asm/cell-regs.h>
+
+#ifdef DEBUG
+#include <asm/time.h>
+#endif
+
+#include "ppc_cbe_cpufreq.h"
+
+static u8 pmi_slow_mode_limit[MAX_CBE];
+
+bool cbe_cpufreq_has_pmi = false;
+EXPORT_SYMBOL_GPL(cbe_cpufreq_has_pmi);
+
+/*
+ * hardware specific functions
+ */
+
+int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode)
+{
+ int ret;
+ pmi_message_t pmi_msg;
+#ifdef DEBUG
+ long time;
+#endif
+ pmi_msg.type = PMI_TYPE_FREQ_CHANGE;
+ pmi_msg.data1 = cbe_cpu_to_node(cpu);
+ pmi_msg.data2 = pmode;
+
+#ifdef DEBUG
+ time = jiffies;
+#endif
+ pmi_send_message(pmi_msg);
+
+#ifdef DEBUG
+ time = jiffies - time;
+ time = jiffies_to_msecs(time);
+ pr_debug("had to wait %lu ms for a transition using " \
+ "PMI\n", time);
+#endif
+ ret = pmi_msg.data2;
+ pr_debug("PMI returned slow mode %d\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cbe_cpufreq_set_pmode_pmi);
+
+
+static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg)
+{
+ u8 node, slow_mode;
+
+ BUG_ON(pmi_msg.type != PMI_TYPE_FREQ_CHANGE);
+
+ node = pmi_msg.data1;
+ slow_mode = pmi_msg.data2;
+
+ pmi_slow_mode_limit[node] = slow_mode;
+
+ pr_debug("cbe_handle_pmi: node: %d max_freq: %d\n", node, slow_mode);
+}
+
+static int pmi_notifier(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct cpufreq_policy *policy = data;
+ struct cpufreq_frequency_table *cbe_freqs;
+ u8 node;
+
+ /* Should this really be called for CPUFREQ_ADJUST, CPUFREQ_INCOMPATIBLE
+ * and CPUFREQ_NOTIFY policy events?)
+ */
+ if (event == CPUFREQ_START)
+ return 0;
+
+ cbe_freqs = cpufreq_frequency_get_table(policy->cpu);
+ node = cbe_cpu_to_node(policy->cpu);
+
+ pr_debug("got notified, event=%lu, node=%u\n", event, node);
+
+ if (pmi_slow_mode_limit[node] != 0) {
+ pr_debug("limiting node %d to slow mode %d\n",
+ node, pmi_slow_mode_limit[node]);
+
+ cpufreq_verify_within_limits(policy, 0,
+
+ cbe_freqs[pmi_slow_mode_limit[node]].frequency);
+ }
+
+ return 0;
+}
+
+static struct notifier_block pmi_notifier_block = {
+ .notifier_call = pmi_notifier,
+};
+
+static struct pmi_handler cbe_pmi_handler = {
+ .type = PMI_TYPE_FREQ_CHANGE,
+ .handle_pmi_message = cbe_cpufreq_handle_pmi,
+};
+
+
+
+static int __init cbe_cpufreq_pmi_init(void)
+{
+ cbe_cpufreq_has_pmi = pmi_register_handler(&cbe_pmi_handler) == 0;
+
+ if (!cbe_cpufreq_has_pmi)
+ return -ENODEV;
+
+ cpufreq_register_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER);
+
+ return 0;
+}
+
+static void __exit cbe_cpufreq_pmi_exit(void)
+{
+ cpufreq_unregister_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER);
+ pmi_unregister_handler(&cbe_pmi_handler);
+}
+
+module_init(cbe_cpufreq_pmi_init);
+module_exit(cbe_cpufreq_pmi_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
diff --git a/kernel/drivers/cpufreq/pxa2xx-cpufreq.c b/kernel/drivers/cpufreq/pxa2xx-cpufreq.c
new file mode 100644
index 000000000..e24269ab4
--- /dev/null
+++ b/kernel/drivers/cpufreq/pxa2xx-cpufreq.c
@@ -0,0 +1,452 @@
+/*
+ * Copyright (C) 2002,2003 Intrinsyc Software
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * History:
+ * 31-Jul-2002 : Initial version [FB]
+ * 29-Jan-2003 : added PXA255 support [FB]
+ * 20-Apr-2003 : ported to v2.5 (Dustin McIntire, Sensoria Corp.)
+ *
+ * Note:
+ * This driver may change the memory bus clock rate, but will not do any
+ * platform specific access timing changes... for example if you have flash
+ * memory connected to CS0, you will need to register a platform specific
+ * notifier which will adjust the memory access strobes to maintain a
+ * minimum strobe width.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/err.h>
+#include <linux/regulator/consumer.h>
+#include <linux/io.h>
+
+#include <mach/pxa2xx-regs.h>
+#include <mach/smemc.h>
+
+#ifdef DEBUG
+static unsigned int freq_debug;
+module_param(freq_debug, uint, 0);
+MODULE_PARM_DESC(freq_debug, "Set the debug messages to on=1/off=0");
+#else
+#define freq_debug 0
+#endif
+
+static struct regulator *vcc_core;
+
+static unsigned int pxa27x_maxfreq;
+module_param(pxa27x_maxfreq, uint, 0);
+MODULE_PARM_DESC(pxa27x_maxfreq, "Set the pxa27x maxfreq in MHz"
+ "(typically 624=>pxa270, 416=>pxa271, 520=>pxa272)");
+
+typedef struct {
+ unsigned int khz;
+ unsigned int membus;
+ unsigned int cccr;
+ unsigned int div2;
+ unsigned int cclkcfg;
+ int vmin;
+ int vmax;
+} pxa_freqs_t;
+
+/* Define the refresh period in mSec for the SDRAM and the number of rows */
+#define SDRAM_TREF 64 /* standard 64ms SDRAM */
+static unsigned int sdram_rows;
+
+#define CCLKCFG_TURBO 0x1
+#define CCLKCFG_FCS 0x2
+#define CCLKCFG_HALFTURBO 0x4
+#define CCLKCFG_FASTBUS 0x8
+#define MDREFR_DB2_MASK (MDREFR_K2DB2 | MDREFR_K1DB2)
+#define MDREFR_DRI_MASK 0xFFF
+
+#define MDCNFG_DRAC2(mdcnfg) (((mdcnfg) >> 21) & 0x3)
+#define MDCNFG_DRAC0(mdcnfg) (((mdcnfg) >> 5) & 0x3)
+
+/*
+ * PXA255 definitions
+ */
+/* Use the run mode frequencies for the CPUFREQ_POLICY_PERFORMANCE policy */
+#define CCLKCFG CCLKCFG_TURBO | CCLKCFG_FCS
+
+static pxa_freqs_t pxa255_run_freqs[] =
+{
+ /* CPU MEMBUS CCCR DIV2 CCLKCFG run turbo PXbus SDRAM */
+ { 99500, 99500, 0x121, 1, CCLKCFG, -1, -1}, /* 99, 99, 50, 50 */
+ {132700, 132700, 0x123, 1, CCLKCFG, -1, -1}, /* 133, 133, 66, 66 */
+ {199100, 99500, 0x141, 0, CCLKCFG, -1, -1}, /* 199, 199, 99, 99 */
+ {265400, 132700, 0x143, 1, CCLKCFG, -1, -1}, /* 265, 265, 133, 66 */
+ {331800, 165900, 0x145, 1, CCLKCFG, -1, -1}, /* 331, 331, 166, 83 */
+ {398100, 99500, 0x161, 0, CCLKCFG, -1, -1}, /* 398, 398, 196, 99 */
+};
+
+/* Use the turbo mode frequencies for the CPUFREQ_POLICY_POWERSAVE policy */
+static pxa_freqs_t pxa255_turbo_freqs[] =
+{
+ /* CPU MEMBUS CCCR DIV2 CCLKCFG run turbo PXbus SDRAM */
+ { 99500, 99500, 0x121, 1, CCLKCFG, -1, -1}, /* 99, 99, 50, 50 */
+ {199100, 99500, 0x221, 0, CCLKCFG, -1, -1}, /* 99, 199, 50, 99 */
+ {298500, 99500, 0x321, 0, CCLKCFG, -1, -1}, /* 99, 287, 50, 99 */
+ {298600, 99500, 0x1c1, 0, CCLKCFG, -1, -1}, /* 199, 287, 99, 99 */
+ {398100, 99500, 0x241, 0, CCLKCFG, -1, -1}, /* 199, 398, 99, 99 */
+};
+
+#define NUM_PXA25x_RUN_FREQS ARRAY_SIZE(pxa255_run_freqs)
+#define NUM_PXA25x_TURBO_FREQS ARRAY_SIZE(pxa255_turbo_freqs)
+
+static struct cpufreq_frequency_table
+ pxa255_run_freq_table[NUM_PXA25x_RUN_FREQS+1];
+static struct cpufreq_frequency_table
+ pxa255_turbo_freq_table[NUM_PXA25x_TURBO_FREQS+1];
+
+static unsigned int pxa255_turbo_table;
+module_param(pxa255_turbo_table, uint, 0);
+MODULE_PARM_DESC(pxa255_turbo_table, "Selects the frequency table (0 = run table, !0 = turbo table)");
+
+/*
+ * PXA270 definitions
+ *
+ * For the PXA27x:
+ * Control variables are A, L, 2N for CCCR; B, HT, T for CLKCFG.
+ *
+ * A = 0 => memory controller clock from table 3-7,
+ * A = 1 => memory controller clock = system bus clock
+ * Run mode frequency = 13 MHz * L
+ * Turbo mode frequency = 13 MHz * L * N
+ * System bus frequency = 13 MHz * L / (B + 1)
+ *
+ * In CCCR:
+ * A = 1
+ * L = 16 oscillator to run mode ratio
+ * 2N = 6 2 * (turbo mode to run mode ratio)
+ *
+ * In CCLKCFG:
+ * B = 1 Fast bus mode
+ * HT = 0 Half-Turbo mode
+ * T = 1 Turbo mode
+ *
+ * For now, just support some of the combinations in table 3-7 of
+ * PXA27x Processor Family Developer's Manual to simplify frequency
+ * change sequences.
+ */
+#define PXA27x_CCCR(A, L, N2) (A << 25 | N2 << 7 | L)
+#define CCLKCFG2(B, HT, T) \
+ (CCLKCFG_FCS | \
+ ((B) ? CCLKCFG_FASTBUS : 0) | \
+ ((HT) ? CCLKCFG_HALFTURBO : 0) | \
+ ((T) ? CCLKCFG_TURBO : 0))
+
+static pxa_freqs_t pxa27x_freqs[] = {
+ {104000, 104000, PXA27x_CCCR(1, 8, 2), 0, CCLKCFG2(1, 0, 1), 900000, 1705000 },
+ {156000, 104000, PXA27x_CCCR(1, 8, 3), 0, CCLKCFG2(1, 0, 1), 1000000, 1705000 },
+ {208000, 208000, PXA27x_CCCR(0, 16, 2), 1, CCLKCFG2(0, 0, 1), 1180000, 1705000 },
+ {312000, 208000, PXA27x_CCCR(1, 16, 3), 1, CCLKCFG2(1, 0, 1), 1250000, 1705000 },
+ {416000, 208000, PXA27x_CCCR(1, 16, 4), 1, CCLKCFG2(1, 0, 1), 1350000, 1705000 },
+ {520000, 208000, PXA27x_CCCR(1, 16, 5), 1, CCLKCFG2(1, 0, 1), 1450000, 1705000 },
+ {624000, 208000, PXA27x_CCCR(1, 16, 6), 1, CCLKCFG2(1, 0, 1), 1550000, 1705000 }
+};
+
+#define NUM_PXA27x_FREQS ARRAY_SIZE(pxa27x_freqs)
+static struct cpufreq_frequency_table
+ pxa27x_freq_table[NUM_PXA27x_FREQS+1];
+
+extern unsigned get_clk_frequency_khz(int info);
+
+#ifdef CONFIG_REGULATOR
+
+static int pxa_cpufreq_change_voltage(pxa_freqs_t *pxa_freq)
+{
+ int ret = 0;
+ int vmin, vmax;
+
+ if (!cpu_is_pxa27x())
+ return 0;
+
+ vmin = pxa_freq->vmin;
+ vmax = pxa_freq->vmax;
+ if ((vmin == -1) || (vmax == -1))
+ return 0;
+
+ ret = regulator_set_voltage(vcc_core, vmin, vmax);
+ if (ret)
+ pr_err("cpufreq: Failed to set vcc_core in [%dmV..%dmV]\n",
+ vmin, vmax);
+ return ret;
+}
+
+static void __init pxa_cpufreq_init_voltages(void)
+{
+ vcc_core = regulator_get(NULL, "vcc_core");
+ if (IS_ERR(vcc_core)) {
+ pr_info("cpufreq: Didn't find vcc_core regulator\n");
+ vcc_core = NULL;
+ } else {
+ pr_info("cpufreq: Found vcc_core regulator\n");
+ }
+}
+#else
+static int pxa_cpufreq_change_voltage(pxa_freqs_t *pxa_freq)
+{
+ return 0;
+}
+
+static void __init pxa_cpufreq_init_voltages(void) { }
+#endif
+
+static void find_freq_tables(struct cpufreq_frequency_table **freq_table,
+ pxa_freqs_t **pxa_freqs)
+{
+ if (cpu_is_pxa25x()) {
+ if (!pxa255_turbo_table) {
+ *pxa_freqs = pxa255_run_freqs;
+ *freq_table = pxa255_run_freq_table;
+ } else {
+ *pxa_freqs = pxa255_turbo_freqs;
+ *freq_table = pxa255_turbo_freq_table;
+ }
+ } else if (cpu_is_pxa27x()) {
+ *pxa_freqs = pxa27x_freqs;
+ *freq_table = pxa27x_freq_table;
+ } else {
+ BUG();
+ }
+}
+
+static void pxa27x_guess_max_freq(void)
+{
+ if (!pxa27x_maxfreq) {
+ pxa27x_maxfreq = 416000;
+ printk(KERN_INFO "PXA CPU 27x max frequency not defined "
+ "(pxa27x_maxfreq), assuming pxa271 with %dkHz maxfreq\n",
+ pxa27x_maxfreq);
+ } else {
+ pxa27x_maxfreq *= 1000;
+ }
+}
+
+static void init_sdram_rows(void)
+{
+ uint32_t mdcnfg = __raw_readl(MDCNFG);
+ unsigned int drac2 = 0, drac0 = 0;
+
+ if (mdcnfg & (MDCNFG_DE2 | MDCNFG_DE3))
+ drac2 = MDCNFG_DRAC2(mdcnfg);
+
+ if (mdcnfg & (MDCNFG_DE0 | MDCNFG_DE1))
+ drac0 = MDCNFG_DRAC0(mdcnfg);
+
+ sdram_rows = 1 << (11 + max(drac0, drac2));
+}
+
+static u32 mdrefr_dri(unsigned int freq)
+{
+ u32 interval = freq * SDRAM_TREF / sdram_rows;
+
+ return (interval - (cpu_is_pxa27x() ? 31 : 0)) / 32;
+}
+
+static unsigned int pxa_cpufreq_get(unsigned int cpu)
+{
+ return get_clk_frequency_khz(0);
+}
+
+static int pxa_set_target(struct cpufreq_policy *policy, unsigned int idx)
+{
+ struct cpufreq_frequency_table *pxa_freqs_table;
+ pxa_freqs_t *pxa_freq_settings;
+ unsigned long flags;
+ unsigned int new_freq_cpu, new_freq_mem;
+ unsigned int unused, preset_mdrefr, postset_mdrefr, cclkcfg;
+ int ret = 0;
+
+ /* Get the current policy */
+ find_freq_tables(&pxa_freqs_table, &pxa_freq_settings);
+
+ new_freq_cpu = pxa_freq_settings[idx].khz;
+ new_freq_mem = pxa_freq_settings[idx].membus;
+
+ if (freq_debug)
+ pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n",
+ new_freq_cpu / 1000, (pxa_freq_settings[idx].div2) ?
+ (new_freq_mem / 2000) : (new_freq_mem / 1000));
+
+ if (vcc_core && new_freq_cpu > policy->cur) {
+ ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]);
+ if (ret)
+ return ret;
+ }
+
+ /* Calculate the next MDREFR. If we're slowing down the SDRAM clock
+ * we need to preset the smaller DRI before the change. If we're
+ * speeding up we need to set the larger DRI value after the change.
+ */
+ preset_mdrefr = postset_mdrefr = __raw_readl(MDREFR);
+ if ((preset_mdrefr & MDREFR_DRI_MASK) > mdrefr_dri(new_freq_mem)) {
+ preset_mdrefr = (preset_mdrefr & ~MDREFR_DRI_MASK);
+ preset_mdrefr |= mdrefr_dri(new_freq_mem);
+ }
+ postset_mdrefr =
+ (postset_mdrefr & ~MDREFR_DRI_MASK) | mdrefr_dri(new_freq_mem);
+
+ /* If we're dividing the memory clock by two for the SDRAM clock, this
+ * must be set prior to the change. Clearing the divide must be done
+ * after the change.
+ */
+ if (pxa_freq_settings[idx].div2) {
+ preset_mdrefr |= MDREFR_DB2_MASK;
+ postset_mdrefr |= MDREFR_DB2_MASK;
+ } else {
+ postset_mdrefr &= ~MDREFR_DB2_MASK;
+ }
+
+ local_irq_save(flags);
+
+ /* Set new the CCCR and prepare CCLKCFG */
+ CCCR = pxa_freq_settings[idx].cccr;
+ cclkcfg = pxa_freq_settings[idx].cclkcfg;
+
+ asm volatile(" \n\
+ ldr r4, [%1] /* load MDREFR */ \n\
+ b 2f \n\
+ .align 5 \n\
+1: \n\
+ str %3, [%1] /* preset the MDREFR */ \n\
+ mcr p14, 0, %2, c6, c0, 0 /* set CCLKCFG[FCS] */ \n\
+ str %4, [%1] /* postset the MDREFR */ \n\
+ \n\
+ b 3f \n\
+2: b 1b \n\
+3: nop \n\
+ "
+ : "=&r" (unused)
+ : "r" (MDREFR), "r" (cclkcfg),
+ "r" (preset_mdrefr), "r" (postset_mdrefr)
+ : "r4", "r5");
+ local_irq_restore(flags);
+
+ /*
+ * Even if voltage setting fails, we don't report it, as the frequency
+ * change succeeded. The voltage reduction is not a critical failure,
+ * only power savings will suffer from this.
+ *
+ * Note: if the voltage change fails, and a return value is returned, a
+ * bug is triggered (seems a deadlock). Should anybody find out where,
+ * the "return 0" should become a "return ret".
+ */
+ if (vcc_core && new_freq_cpu < policy->cur)
+ ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]);
+
+ return 0;
+}
+
+static int pxa_cpufreq_init(struct cpufreq_policy *policy)
+{
+ int i;
+ unsigned int freq;
+ struct cpufreq_frequency_table *pxa255_freq_table;
+ pxa_freqs_t *pxa255_freqs;
+
+ /* try to guess pxa27x cpu */
+ if (cpu_is_pxa27x())
+ pxa27x_guess_max_freq();
+
+ pxa_cpufreq_init_voltages();
+
+ init_sdram_rows();
+
+ /* set default policy and cpuinfo */
+ policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */
+
+ /* Generate pxa25x the run cpufreq_frequency_table struct */
+ for (i = 0; i < NUM_PXA25x_RUN_FREQS; i++) {
+ pxa255_run_freq_table[i].frequency = pxa255_run_freqs[i].khz;
+ pxa255_run_freq_table[i].driver_data = i;
+ }
+ pxa255_run_freq_table[i].frequency = CPUFREQ_TABLE_END;
+
+ /* Generate pxa25x the turbo cpufreq_frequency_table struct */
+ for (i = 0; i < NUM_PXA25x_TURBO_FREQS; i++) {
+ pxa255_turbo_freq_table[i].frequency =
+ pxa255_turbo_freqs[i].khz;
+ pxa255_turbo_freq_table[i].driver_data = i;
+ }
+ pxa255_turbo_freq_table[i].frequency = CPUFREQ_TABLE_END;
+
+ pxa255_turbo_table = !!pxa255_turbo_table;
+
+ /* Generate the pxa27x cpufreq_frequency_table struct */
+ for (i = 0; i < NUM_PXA27x_FREQS; i++) {
+ freq = pxa27x_freqs[i].khz;
+ if (freq > pxa27x_maxfreq)
+ break;
+ pxa27x_freq_table[i].frequency = freq;
+ pxa27x_freq_table[i].driver_data = i;
+ }
+ pxa27x_freq_table[i].driver_data = i;
+ pxa27x_freq_table[i].frequency = CPUFREQ_TABLE_END;
+
+ /*
+ * Set the policy's minimum and maximum frequencies from the tables
+ * just constructed. This sets cpuinfo.mxx_freq, min and max.
+ */
+ if (cpu_is_pxa25x()) {
+ find_freq_tables(&pxa255_freq_table, &pxa255_freqs);
+ pr_info("PXA255 cpufreq using %s frequency table\n",
+ pxa255_turbo_table ? "turbo" : "run");
+
+ cpufreq_table_validate_and_show(policy, pxa255_freq_table);
+ }
+ else if (cpu_is_pxa27x()) {
+ cpufreq_table_validate_and_show(policy, pxa27x_freq_table);
+ }
+
+ printk(KERN_INFO "PXA CPU frequency change support initialized\n");
+
+ return 0;
+}
+
+static struct cpufreq_driver pxa_cpufreq_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = pxa_set_target,
+ .init = pxa_cpufreq_init,
+ .get = pxa_cpufreq_get,
+ .name = "PXA2xx",
+};
+
+static int __init pxa_cpu_init(void)
+{
+ int ret = -ENODEV;
+ if (cpu_is_pxa25x() || cpu_is_pxa27x())
+ ret = cpufreq_register_driver(&pxa_cpufreq_driver);
+ return ret;
+}
+
+static void __exit pxa_cpu_exit(void)
+{
+ cpufreq_unregister_driver(&pxa_cpufreq_driver);
+}
+
+
+MODULE_AUTHOR("Intrinsyc Software Inc.");
+MODULE_DESCRIPTION("CPU frequency changing driver for the PXA architecture");
+MODULE_LICENSE("GPL");
+module_init(pxa_cpu_init);
+module_exit(pxa_cpu_exit);
diff --git a/kernel/drivers/cpufreq/pxa3xx-cpufreq.c b/kernel/drivers/cpufreq/pxa3xx-cpufreq.c
new file mode 100644
index 000000000..a01275900
--- /dev/null
+++ b/kernel/drivers/cpufreq/pxa3xx-cpufreq.c
@@ -0,0 +1,228 @@
+/*
+ * Copyright (C) 2008 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+
+#include <mach/generic.h>
+#include <mach/pxa3xx-regs.h>
+
+#define HSS_104M (0)
+#define HSS_156M (1)
+#define HSS_208M (2)
+#define HSS_312M (3)
+
+#define SMCFS_78M (0)
+#define SMCFS_104M (2)
+#define SMCFS_208M (5)
+
+#define SFLFS_104M (0)
+#define SFLFS_156M (1)
+#define SFLFS_208M (2)
+#define SFLFS_312M (3)
+
+#define XSPCLK_156M (0)
+#define XSPCLK_NONE (3)
+
+#define DMCFS_26M (0)
+#define DMCFS_260M (3)
+
+struct pxa3xx_freq_info {
+ unsigned int cpufreq_mhz;
+ unsigned int core_xl : 5;
+ unsigned int core_xn : 3;
+ unsigned int hss : 2;
+ unsigned int dmcfs : 2;
+ unsigned int smcfs : 3;
+ unsigned int sflfs : 2;
+ unsigned int df_clkdiv : 3;
+
+ int vcc_core; /* in mV */
+ int vcc_sram; /* in mV */
+};
+
+#define OP(cpufreq, _xl, _xn, _hss, _dmc, _smc, _sfl, _dfi, vcore, vsram) \
+{ \
+ .cpufreq_mhz = cpufreq, \
+ .core_xl = _xl, \
+ .core_xn = _xn, \
+ .hss = HSS_##_hss##M, \
+ .dmcfs = DMCFS_##_dmc##M, \
+ .smcfs = SMCFS_##_smc##M, \
+ .sflfs = SFLFS_##_sfl##M, \
+ .df_clkdiv = _dfi, \
+ .vcc_core = vcore, \
+ .vcc_sram = vsram, \
+}
+
+static struct pxa3xx_freq_info pxa300_freqs[] = {
+ /* CPU XL XN HSS DMEM SMEM SRAM DFI VCC_CORE VCC_SRAM */
+ OP(104, 8, 1, 104, 260, 78, 104, 3, 1000, 1100), /* 104MHz */
+ OP(208, 16, 1, 104, 260, 104, 156, 2, 1000, 1100), /* 208MHz */
+ OP(416, 16, 2, 156, 260, 104, 208, 2, 1100, 1200), /* 416MHz */
+ OP(624, 24, 2, 208, 260, 208, 312, 3, 1375, 1400), /* 624MHz */
+};
+
+static struct pxa3xx_freq_info pxa320_freqs[] = {
+ /* CPU XL XN HSS DMEM SMEM SRAM DFI VCC_CORE VCC_SRAM */
+ OP(104, 8, 1, 104, 260, 78, 104, 3, 1000, 1100), /* 104MHz */
+ OP(208, 16, 1, 104, 260, 104, 156, 2, 1000, 1100), /* 208MHz */
+ OP(416, 16, 2, 156, 260, 104, 208, 2, 1100, 1200), /* 416MHz */
+ OP(624, 24, 2, 208, 260, 208, 312, 3, 1375, 1400), /* 624MHz */
+ OP(806, 31, 2, 208, 260, 208, 312, 3, 1400, 1400), /* 806MHz */
+};
+
+static unsigned int pxa3xx_freqs_num;
+static struct pxa3xx_freq_info *pxa3xx_freqs;
+static struct cpufreq_frequency_table *pxa3xx_freqs_table;
+
+static int setup_freqs_table(struct cpufreq_policy *policy,
+ struct pxa3xx_freq_info *freqs, int num)
+{
+ struct cpufreq_frequency_table *table;
+ int i;
+
+ table = kzalloc((num + 1) * sizeof(*table), GFP_KERNEL);
+ if (table == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++) {
+ table[i].driver_data = i;
+ table[i].frequency = freqs[i].cpufreq_mhz * 1000;
+ }
+ table[num].driver_data = i;
+ table[num].frequency = CPUFREQ_TABLE_END;
+
+ pxa3xx_freqs = freqs;
+ pxa3xx_freqs_num = num;
+ pxa3xx_freqs_table = table;
+
+ return cpufreq_table_validate_and_show(policy, table);
+}
+
+static void __update_core_freq(struct pxa3xx_freq_info *info)
+{
+ uint32_t mask = ACCR_XN_MASK | ACCR_XL_MASK;
+ uint32_t accr = ACCR;
+ uint32_t xclkcfg;
+
+ accr &= ~(ACCR_XN_MASK | ACCR_XL_MASK | ACCR_XSPCLK_MASK);
+ accr |= ACCR_XN(info->core_xn) | ACCR_XL(info->core_xl);
+
+ /* No clock until core PLL is re-locked */
+ accr |= ACCR_XSPCLK(XSPCLK_NONE);
+
+ xclkcfg = (info->core_xn == 2) ? 0x3 : 0x2; /* turbo bit */
+
+ ACCR = accr;
+ __asm__("mcr p14, 0, %0, c6, c0, 0\n" : : "r"(xclkcfg));
+
+ while ((ACSR & mask) != (accr & mask))
+ cpu_relax();
+}
+
+static void __update_bus_freq(struct pxa3xx_freq_info *info)
+{
+ uint32_t mask;
+ uint32_t accr = ACCR;
+
+ mask = ACCR_SMCFS_MASK | ACCR_SFLFS_MASK | ACCR_HSS_MASK |
+ ACCR_DMCFS_MASK;
+
+ accr &= ~mask;
+ accr |= ACCR_SMCFS(info->smcfs) | ACCR_SFLFS(info->sflfs) |
+ ACCR_HSS(info->hss) | ACCR_DMCFS(info->dmcfs);
+
+ ACCR = accr;
+
+ while ((ACSR & mask) != (accr & mask))
+ cpu_relax();
+}
+
+static unsigned int pxa3xx_cpufreq_get(unsigned int cpu)
+{
+ return pxa3xx_get_clk_frequency_khz(0);
+}
+
+static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy, unsigned int index)
+{
+ struct pxa3xx_freq_info *next;
+ unsigned long flags;
+
+ if (policy->cpu != 0)
+ return -EINVAL;
+
+ next = &pxa3xx_freqs[index];
+
+ local_irq_save(flags);
+ __update_core_freq(next);
+ __update_bus_freq(next);
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
+{
+ int ret = -EINVAL;
+
+ /* set default policy and cpuinfo */
+ policy->min = policy->cpuinfo.min_freq = 104000;
+ policy->max = policy->cpuinfo.max_freq =
+ (cpu_is_pxa320()) ? 806000 : 624000;
+ policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */
+
+ if (cpu_is_pxa300() || cpu_is_pxa310())
+ ret = setup_freqs_table(policy, pxa300_freqs,
+ ARRAY_SIZE(pxa300_freqs));
+
+ if (cpu_is_pxa320())
+ ret = setup_freqs_table(policy, pxa320_freqs,
+ ARRAY_SIZE(pxa320_freqs));
+
+ if (ret) {
+ pr_err("failed to setup frequency table\n");
+ return ret;
+ }
+
+ pr_info("CPUFREQ support for PXA3xx initialized\n");
+ return 0;
+}
+
+static struct cpufreq_driver pxa3xx_cpufreq_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = pxa3xx_cpufreq_set,
+ .init = pxa3xx_cpufreq_init,
+ .get = pxa3xx_cpufreq_get,
+ .name = "pxa3xx-cpufreq",
+};
+
+static int __init cpufreq_init(void)
+{
+ if (cpu_is_pxa3xx())
+ return cpufreq_register_driver(&pxa3xx_cpufreq_driver);
+
+ return 0;
+}
+module_init(cpufreq_init);
+
+static void __exit cpufreq_exit(void)
+{
+ cpufreq_unregister_driver(&pxa3xx_cpufreq_driver);
+}
+module_exit(cpufreq_exit);
+
+MODULE_DESCRIPTION("CPU frequency scaling driver for PXA3xx");
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/cpufreq/qoriq-cpufreq.c b/kernel/drivers/cpufreq/qoriq-cpufreq.c
new file mode 100644
index 000000000..88b21ae0d
--- /dev/null
+++ b/kernel/drivers/cpufreq/qoriq-cpufreq.c
@@ -0,0 +1,374 @@
+/*
+ * Copyright 2013 Freescale Semiconductor, Inc.
+ *
+ * CPU Frequency Scaling driver for Freescale QorIQ SoCs.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+
+#if !defined(CONFIG_ARM)
+#include <asm/smp.h> /* for get_hard_smp_processor_id() in UP configs */
+#endif
+
+/**
+ * struct cpu_data
+ * @parent: the parent node of cpu clock
+ * @table: frequency table
+ */
+struct cpu_data {
+ struct device_node *parent;
+ struct cpufreq_frequency_table *table;
+};
+
+/**
+ * struct soc_data - SoC specific data
+ * @freq_mask: mask the disallowed frequencies
+ * @flag: unique flags
+ */
+struct soc_data {
+ u32 freq_mask[4];
+ u32 flag;
+};
+
+#define FREQ_MASK 1
+/* see hardware specification for the allowed frqeuencies */
+static const struct soc_data sdata[] = {
+ { /* used by p2041 and p3041 */
+ .freq_mask = {0x8, 0x8, 0x2, 0x2},
+ .flag = FREQ_MASK,
+ },
+ { /* used by p5020 */
+ .freq_mask = {0x8, 0x2},
+ .flag = FREQ_MASK,
+ },
+ { /* used by p4080, p5040 */
+ .freq_mask = {0},
+ .flag = 0,
+ },
+};
+
+/*
+ * the minimum allowed core frequency, in Hz
+ * for chassis v1.0, >= platform frequency
+ * for chassis v2.0, >= platform frequency / 2
+ */
+static u32 min_cpufreq;
+static const u32 *fmask;
+
+#if defined(CONFIG_ARM)
+static int get_cpu_physical_id(int cpu)
+{
+ return topology_core_id(cpu);
+}
+#else
+static int get_cpu_physical_id(int cpu)
+{
+ return get_hard_smp_processor_id(cpu);
+}
+#endif
+
+static u32 get_bus_freq(void)
+{
+ struct device_node *soc;
+ u32 sysfreq;
+
+ soc = of_find_node_by_type(NULL, "soc");
+ if (!soc)
+ return 0;
+
+ if (of_property_read_u32(soc, "bus-frequency", &sysfreq))
+ sysfreq = 0;
+
+ of_node_put(soc);
+
+ return sysfreq;
+}
+
+static struct device_node *cpu_to_clk_node(int cpu)
+{
+ struct device_node *np, *clk_np;
+
+ if (!cpu_present(cpu))
+ return NULL;
+
+ np = of_get_cpu_node(cpu, NULL);
+ if (!np)
+ return NULL;
+
+ clk_np = of_parse_phandle(np, "clocks", 0);
+ if (!clk_np)
+ return NULL;
+
+ of_node_put(np);
+
+ return clk_np;
+}
+
+/* traverse cpu nodes to get cpu mask of sharing clock wire */
+static void set_affected_cpus(struct cpufreq_policy *policy)
+{
+ struct device_node *np, *clk_np;
+ struct cpumask *dstp = policy->cpus;
+ int i;
+
+ np = cpu_to_clk_node(policy->cpu);
+ if (!np)
+ return;
+
+ for_each_present_cpu(i) {
+ clk_np = cpu_to_clk_node(i);
+ if (!clk_np)
+ continue;
+
+ if (clk_np == np)
+ cpumask_set_cpu(i, dstp);
+
+ of_node_put(clk_np);
+ }
+ of_node_put(np);
+}
+
+/* reduce the duplicated frequencies in frequency table */
+static void freq_table_redup(struct cpufreq_frequency_table *freq_table,
+ int count)
+{
+ int i, j;
+
+ for (i = 1; i < count; i++) {
+ for (j = 0; j < i; j++) {
+ if (freq_table[j].frequency == CPUFREQ_ENTRY_INVALID ||
+ freq_table[j].frequency !=
+ freq_table[i].frequency)
+ continue;
+
+ freq_table[i].frequency = CPUFREQ_ENTRY_INVALID;
+ break;
+ }
+ }
+}
+
+/* sort the frequencies in frequency table in descenting order */
+static void freq_table_sort(struct cpufreq_frequency_table *freq_table,
+ int count)
+{
+ int i, j, ind;
+ unsigned int freq, max_freq;
+ struct cpufreq_frequency_table table;
+
+ for (i = 0; i < count - 1; i++) {
+ max_freq = freq_table[i].frequency;
+ ind = i;
+ for (j = i + 1; j < count; j++) {
+ freq = freq_table[j].frequency;
+ if (freq == CPUFREQ_ENTRY_INVALID ||
+ freq <= max_freq)
+ continue;
+ ind = j;
+ max_freq = freq;
+ }
+
+ if (ind != i) {
+ /* exchange the frequencies */
+ table.driver_data = freq_table[i].driver_data;
+ table.frequency = freq_table[i].frequency;
+ freq_table[i].driver_data = freq_table[ind].driver_data;
+ freq_table[i].frequency = freq_table[ind].frequency;
+ freq_table[ind].driver_data = table.driver_data;
+ freq_table[ind].frequency = table.frequency;
+ }
+ }
+}
+
+static int qoriq_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ struct device_node *np;
+ int i, count, ret;
+ u32 freq, mask;
+ struct clk *clk;
+ struct cpufreq_frequency_table *table;
+ struct cpu_data *data;
+ unsigned int cpu = policy->cpu;
+ u64 u64temp;
+
+ np = of_get_cpu_node(cpu, NULL);
+ if (!np)
+ return -ENODEV;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ goto err_np;
+
+ policy->clk = of_clk_get(np, 0);
+ if (IS_ERR(policy->clk)) {
+ pr_err("%s: no clock information\n", __func__);
+ goto err_nomem2;
+ }
+
+ data->parent = of_parse_phandle(np, "clocks", 0);
+ if (!data->parent) {
+ pr_err("%s: could not get clock information\n", __func__);
+ goto err_nomem2;
+ }
+
+ count = of_property_count_strings(data->parent, "clock-names");
+ table = kcalloc(count + 1, sizeof(*table), GFP_KERNEL);
+ if (!table) {
+ pr_err("%s: no memory\n", __func__);
+ goto err_node;
+ }
+
+ if (fmask)
+ mask = fmask[get_cpu_physical_id(cpu)];
+ else
+ mask = 0x0;
+
+ for (i = 0; i < count; i++) {
+ clk = of_clk_get(data->parent, i);
+ freq = clk_get_rate(clk);
+ /*
+ * the clock is valid if its frequency is not masked
+ * and large than minimum allowed frequency.
+ */
+ if (freq < min_cpufreq || (mask & (1 << i)))
+ table[i].frequency = CPUFREQ_ENTRY_INVALID;
+ else
+ table[i].frequency = freq / 1000;
+ table[i].driver_data = i;
+ }
+ freq_table_redup(table, count);
+ freq_table_sort(table, count);
+ table[i].frequency = CPUFREQ_TABLE_END;
+
+ /* set the min and max frequency properly */
+ ret = cpufreq_table_validate_and_show(policy, table);
+ if (ret) {
+ pr_err("invalid frequency table: %d\n", ret);
+ goto err_nomem1;
+ }
+
+ data->table = table;
+
+ /* update ->cpus if we have cluster, no harm if not */
+ set_affected_cpus(policy);
+ policy->driver_data = data;
+
+ /* Minimum transition latency is 12 platform clocks */
+ u64temp = 12ULL * NSEC_PER_SEC;
+ do_div(u64temp, get_bus_freq());
+ policy->cpuinfo.transition_latency = u64temp + 1;
+
+ of_node_put(np);
+
+ return 0;
+
+err_nomem1:
+ kfree(table);
+err_node:
+ of_node_put(data->parent);
+err_nomem2:
+ policy->driver_data = NULL;
+ kfree(data);
+err_np:
+ of_node_put(np);
+
+ return -ENODEV;
+}
+
+static int __exit qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ struct cpu_data *data = policy->driver_data;
+
+ of_node_put(data->parent);
+ kfree(data->table);
+ kfree(data);
+ policy->driver_data = NULL;
+
+ return 0;
+}
+
+static int qoriq_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ struct clk *parent;
+ struct cpu_data *data = policy->driver_data;
+
+ parent = of_clk_get(data->parent, data->table[index].driver_data);
+ return clk_set_parent(policy->clk, parent);
+}
+
+static struct cpufreq_driver qoriq_cpufreq_driver = {
+ .name = "qoriq_cpufreq",
+ .flags = CPUFREQ_CONST_LOOPS,
+ .init = qoriq_cpufreq_cpu_init,
+ .exit = __exit_p(qoriq_cpufreq_cpu_exit),
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = qoriq_cpufreq_target,
+ .get = cpufreq_generic_get,
+ .attr = cpufreq_generic_attr,
+};
+
+static const struct of_device_id node_matches[] __initconst = {
+ { .compatible = "fsl,p2041-clockgen", .data = &sdata[0], },
+ { .compatible = "fsl,p3041-clockgen", .data = &sdata[0], },
+ { .compatible = "fsl,p5020-clockgen", .data = &sdata[1], },
+ { .compatible = "fsl,p4080-clockgen", .data = &sdata[2], },
+ { .compatible = "fsl,p5040-clockgen", .data = &sdata[2], },
+ { .compatible = "fsl,qoriq-clockgen-2.0", },
+ {}
+};
+
+static int __init qoriq_cpufreq_init(void)
+{
+ int ret;
+ struct device_node *np;
+ const struct of_device_id *match;
+ const struct soc_data *data;
+
+ np = of_find_matching_node(NULL, node_matches);
+ if (!np)
+ return -ENODEV;
+
+ match = of_match_node(node_matches, np);
+ data = match->data;
+ if (data) {
+ if (data->flag)
+ fmask = data->freq_mask;
+ min_cpufreq = get_bus_freq();
+ } else {
+ min_cpufreq = get_bus_freq() / 2;
+ }
+
+ of_node_put(np);
+
+ ret = cpufreq_register_driver(&qoriq_cpufreq_driver);
+ if (!ret)
+ pr_info("Freescale QorIQ CPU frequency scaling driver\n");
+
+ return ret;
+}
+module_init(qoriq_cpufreq_init);
+
+static void __exit qoriq_cpufreq_exit(void)
+{
+ cpufreq_unregister_driver(&qoriq_cpufreq_driver);
+}
+module_exit(qoriq_cpufreq_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tang Yuantian <Yuantian.Tang@freescale.com>");
+MODULE_DESCRIPTION("cpufreq driver for Freescale QorIQ series SoCs");
diff --git a/kernel/drivers/cpufreq/s3c2410-cpufreq.c b/kernel/drivers/cpufreq/s3c2410-cpufreq.c
new file mode 100644
index 000000000..b8e5da8e1
--- /dev/null
+++ b/kernel/drivers/cpufreq/s3c2410-cpufreq.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2006-2008 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C2410 CPU Frequency scaling
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/cpufreq.h>
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+
+#include <mach/regs-clock.h>
+
+#include <plat/cpu.h>
+#include <plat/cpu-freq-core.h>
+
+/* Note, 2410A has an extra mode for 1:4:4 ratio, bit 2 of CLKDIV */
+
+static void s3c2410_cpufreq_setdivs(struct s3c_cpufreq_config *cfg)
+{
+ u32 clkdiv = 0;
+
+ if (cfg->divs.h_divisor == 2)
+ clkdiv |= S3C2410_CLKDIVN_HDIVN;
+
+ if (cfg->divs.p_divisor != cfg->divs.h_divisor)
+ clkdiv |= S3C2410_CLKDIVN_PDIVN;
+
+ __raw_writel(clkdiv, S3C2410_CLKDIVN);
+}
+
+static int s3c2410_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg)
+{
+ unsigned long hclk, fclk, pclk;
+ unsigned int hdiv, pdiv;
+ unsigned long hclk_max;
+
+ fclk = cfg->freq.fclk;
+ hclk_max = cfg->max.hclk;
+
+ cfg->freq.armclk = fclk;
+
+ s3c_freq_dbg("%s: fclk is %lu, max hclk %lu\n",
+ __func__, fclk, hclk_max);
+
+ hdiv = (fclk > cfg->max.hclk) ? 2 : 1;
+ hclk = fclk / hdiv;
+
+ if (hclk > cfg->max.hclk) {
+ s3c_freq_dbg("%s: hclk too big\n", __func__);
+ return -EINVAL;
+ }
+
+ pdiv = (hclk > cfg->max.pclk) ? 2 : 1;
+ pclk = hclk / pdiv;
+
+ if (pclk > cfg->max.pclk) {
+ s3c_freq_dbg("%s: pclk too big\n", __func__);
+ return -EINVAL;
+ }
+
+ pdiv *= hdiv;
+
+ /* record the result */
+ cfg->divs.p_divisor = pdiv;
+ cfg->divs.h_divisor = hdiv;
+
+ return 0;
+}
+
+static struct s3c_cpufreq_info s3c2410_cpufreq_info = {
+ .max = {
+ .fclk = 200000000,
+ .hclk = 100000000,
+ .pclk = 50000000,
+ },
+
+ /* transition latency is about 5ms worst-case, so
+ * set 10ms to be sure */
+ .latency = 10000000,
+
+ .locktime_m = 150,
+ .locktime_u = 150,
+ .locktime_bits = 12,
+
+ .need_pll = 1,
+
+ .name = "s3c2410",
+ .calc_iotiming = s3c2410_iotiming_calc,
+ .set_iotiming = s3c2410_iotiming_set,
+ .get_iotiming = s3c2410_iotiming_get,
+
+ .set_fvco = s3c2410_set_fvco,
+ .set_refresh = s3c2410_cpufreq_setrefresh,
+ .set_divs = s3c2410_cpufreq_setdivs,
+ .calc_divs = s3c2410_cpufreq_calcdivs,
+
+ .debug_io_show = s3c_cpufreq_debugfs_call(s3c2410_iotiming_debugfs),
+};
+
+static int s3c2410_cpufreq_add(struct device *dev,
+ struct subsys_interface *sif)
+{
+ return s3c_cpufreq_register(&s3c2410_cpufreq_info);
+}
+
+static struct subsys_interface s3c2410_cpufreq_interface = {
+ .name = "s3c2410_cpufreq",
+ .subsys = &s3c2410_subsys,
+ .add_dev = s3c2410_cpufreq_add,
+};
+
+static int __init s3c2410_cpufreq_init(void)
+{
+ return subsys_interface_register(&s3c2410_cpufreq_interface);
+}
+arch_initcall(s3c2410_cpufreq_init);
+
+static int s3c2410a_cpufreq_add(struct device *dev,
+ struct subsys_interface *sif)
+{
+ /* alter the maximum freq settings for S3C2410A. If a board knows
+ * it only has a maximum of 200, then it should register its own
+ * limits. */
+
+ s3c2410_cpufreq_info.max.fclk = 266000000;
+ s3c2410_cpufreq_info.max.hclk = 133000000;
+ s3c2410_cpufreq_info.max.pclk = 66500000;
+ s3c2410_cpufreq_info.name = "s3c2410a";
+
+ return s3c2410_cpufreq_add(dev, sif);
+}
+
+static struct subsys_interface s3c2410a_cpufreq_interface = {
+ .name = "s3c2410a_cpufreq",
+ .subsys = &s3c2410a_subsys,
+ .add_dev = s3c2410a_cpufreq_add,
+};
+
+static int __init s3c2410a_cpufreq_init(void)
+{
+ return subsys_interface_register(&s3c2410a_cpufreq_interface);
+}
+arch_initcall(s3c2410a_cpufreq_init);
diff --git a/kernel/drivers/cpufreq/s3c2412-cpufreq.c b/kernel/drivers/cpufreq/s3c2412-cpufreq.c
new file mode 100644
index 000000000..eb262133f
--- /dev/null
+++ b/kernel/drivers/cpufreq/s3c2412-cpufreq.c
@@ -0,0 +1,254 @@
+/*
+ * Copyright 2008 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C2412 CPU Frequency scalling
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/cpufreq.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+
+#include <mach/regs-clock.h>
+#include <mach/s3c2412.h>
+
+#include <plat/cpu.h>
+#include <plat/cpu-freq-core.h>
+
+/* our clock resources. */
+static struct clk *xtal;
+static struct clk *fclk;
+static struct clk *hclk;
+static struct clk *armclk;
+
+/* HDIV: 1, 2, 3, 4, 6, 8 */
+
+static int s3c2412_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg)
+{
+ unsigned int hdiv, pdiv, armdiv, dvs;
+ unsigned long hclk, fclk, armclk, armdiv_clk;
+ unsigned long hclk_max;
+
+ fclk = cfg->freq.fclk;
+ armclk = cfg->freq.armclk;
+ hclk_max = cfg->max.hclk;
+
+ /* We can't run hclk above armclk as at the best we have to
+ * have armclk and hclk in dvs mode. */
+
+ if (hclk_max > armclk)
+ hclk_max = armclk;
+
+ s3c_freq_dbg("%s: fclk=%lu, armclk=%lu, hclk_max=%lu\n",
+ __func__, fclk, armclk, hclk_max);
+ s3c_freq_dbg("%s: want f=%lu, arm=%lu, h=%lu, p=%lu\n",
+ __func__, cfg->freq.fclk, cfg->freq.armclk,
+ cfg->freq.hclk, cfg->freq.pclk);
+
+ armdiv = fclk / armclk;
+
+ if (armdiv < 1)
+ armdiv = 1;
+ if (armdiv > 2)
+ armdiv = 2;
+
+ cfg->divs.arm_divisor = armdiv;
+ armdiv_clk = fclk / armdiv;
+
+ hdiv = armdiv_clk / hclk_max;
+ if (hdiv < 1)
+ hdiv = 1;
+
+ cfg->freq.hclk = hclk = armdiv_clk / hdiv;
+
+ /* set dvs depending on whether we reached armclk or not. */
+ cfg->divs.dvs = dvs = armclk < armdiv_clk;
+
+ /* update the actual armclk we achieved. */
+ cfg->freq.armclk = dvs ? hclk : armdiv_clk;
+
+ s3c_freq_dbg("%s: armclk %lu, hclk %lu, armdiv %d, hdiv %d, dvs %d\n",
+ __func__, armclk, hclk, armdiv, hdiv, cfg->divs.dvs);
+
+ if (hdiv > 4)
+ goto invalid;
+
+ pdiv = (hclk > cfg->max.pclk) ? 2 : 1;
+
+ if ((hclk / pdiv) > cfg->max.pclk)
+ pdiv++;
+
+ cfg->freq.pclk = hclk / pdiv;
+
+ s3c_freq_dbg("%s: pdiv %d\n", __func__, pdiv);
+
+ if (pdiv > 2)
+ goto invalid;
+
+ pdiv *= hdiv;
+
+ /* store the result, and then return */
+
+ cfg->divs.h_divisor = hdiv * armdiv;
+ cfg->divs.p_divisor = pdiv * armdiv;
+
+ return 0;
+
+invalid:
+ return -EINVAL;
+}
+
+static void s3c2412_cpufreq_setdivs(struct s3c_cpufreq_config *cfg)
+{
+ unsigned long clkdiv;
+ unsigned long olddiv;
+
+ olddiv = clkdiv = __raw_readl(S3C2410_CLKDIVN);
+
+ /* clear off current clock info */
+
+ clkdiv &= ~S3C2412_CLKDIVN_ARMDIVN;
+ clkdiv &= ~S3C2412_CLKDIVN_HDIVN_MASK;
+ clkdiv &= ~S3C2412_CLKDIVN_PDIVN;
+
+ if (cfg->divs.arm_divisor == 2)
+ clkdiv |= S3C2412_CLKDIVN_ARMDIVN;
+
+ clkdiv |= ((cfg->divs.h_divisor / cfg->divs.arm_divisor) - 1);
+
+ if (cfg->divs.p_divisor != cfg->divs.h_divisor)
+ clkdiv |= S3C2412_CLKDIVN_PDIVN;
+
+ s3c_freq_dbg("%s: div %08lx => %08lx\n", __func__, olddiv, clkdiv);
+ __raw_writel(clkdiv, S3C2410_CLKDIVN);
+
+ clk_set_parent(armclk, cfg->divs.dvs ? hclk : fclk);
+}
+
+static void s3c2412_cpufreq_setrefresh(struct s3c_cpufreq_config *cfg)
+{
+ struct s3c_cpufreq_board *board = cfg->board;
+ unsigned long refresh;
+
+ s3c_freq_dbg("%s: refresh %u ns, hclk %lu\n", __func__,
+ board->refresh, cfg->freq.hclk);
+
+ /* Reduce both the refresh time (in ns) and the frequency (in MHz)
+ * by 10 each to ensure that we do not overflow 32 bit numbers. This
+ * should work for HCLK up to 133MHz and refresh period up to 30usec.
+ */
+
+ refresh = (board->refresh / 10);
+ refresh *= (cfg->freq.hclk / 100);
+ refresh /= (1 * 1000 * 1000); /* 10^6 */
+
+ s3c_freq_dbg("%s: setting refresh 0x%08lx\n", __func__, refresh);
+ __raw_writel(refresh, S3C2412_REFRESH);
+}
+
+/* set the default cpu frequency information, based on an 200MHz part
+ * as we have no other way of detecting the speed rating in software.
+ */
+
+static struct s3c_cpufreq_info s3c2412_cpufreq_info = {
+ .max = {
+ .fclk = 200000000,
+ .hclk = 100000000,
+ .pclk = 50000000,
+ },
+
+ .latency = 5000000, /* 5ms */
+
+ .locktime_m = 150,
+ .locktime_u = 150,
+ .locktime_bits = 16,
+
+ .name = "s3c2412",
+ .set_refresh = s3c2412_cpufreq_setrefresh,
+ .set_divs = s3c2412_cpufreq_setdivs,
+ .calc_divs = s3c2412_cpufreq_calcdivs,
+
+ .calc_iotiming = s3c2412_iotiming_calc,
+ .set_iotiming = s3c2412_iotiming_set,
+ .get_iotiming = s3c2412_iotiming_get,
+
+ .debug_io_show = s3c_cpufreq_debugfs_call(s3c2412_iotiming_debugfs),
+};
+
+static int s3c2412_cpufreq_add(struct device *dev,
+ struct subsys_interface *sif)
+{
+ unsigned long fclk_rate;
+
+ hclk = clk_get(NULL, "hclk");
+ if (IS_ERR(hclk)) {
+ printk(KERN_ERR "%s: cannot find hclk clock\n", __func__);
+ return -ENOENT;
+ }
+
+ fclk = clk_get(NULL, "fclk");
+ if (IS_ERR(fclk)) {
+ printk(KERN_ERR "%s: cannot find fclk clock\n", __func__);
+ goto err_fclk;
+ }
+
+ fclk_rate = clk_get_rate(fclk);
+ if (fclk_rate > 200000000) {
+ printk(KERN_INFO
+ "%s: fclk %ld MHz, assuming 266MHz capable part\n",
+ __func__, fclk_rate / 1000000);
+ s3c2412_cpufreq_info.max.fclk = 266000000;
+ s3c2412_cpufreq_info.max.hclk = 133000000;
+ s3c2412_cpufreq_info.max.pclk = 66000000;
+ }
+
+ armclk = clk_get(NULL, "armclk");
+ if (IS_ERR(armclk)) {
+ printk(KERN_ERR "%s: cannot find arm clock\n", __func__);
+ goto err_armclk;
+ }
+
+ xtal = clk_get(NULL, "xtal");
+ if (IS_ERR(xtal)) {
+ printk(KERN_ERR "%s: cannot find xtal clock\n", __func__);
+ goto err_xtal;
+ }
+
+ return s3c_cpufreq_register(&s3c2412_cpufreq_info);
+
+err_xtal:
+ clk_put(armclk);
+err_armclk:
+ clk_put(fclk);
+err_fclk:
+ clk_put(hclk);
+
+ return -ENOENT;
+}
+
+static struct subsys_interface s3c2412_cpufreq_interface = {
+ .name = "s3c2412_cpufreq",
+ .subsys = &s3c2412_subsys,
+ .add_dev = s3c2412_cpufreq_add,
+};
+
+static int s3c2412_cpufreq_init(void)
+{
+ return subsys_interface_register(&s3c2412_cpufreq_interface);
+}
+arch_initcall(s3c2412_cpufreq_init);
diff --git a/kernel/drivers/cpufreq/s3c2416-cpufreq.c b/kernel/drivers/cpufreq/s3c2416-cpufreq.c
new file mode 100644
index 000000000..d6d425773
--- /dev/null
+++ b/kernel/drivers/cpufreq/s3c2416-cpufreq.c
@@ -0,0 +1,491 @@
+/*
+ * S3C2416/2450 CPUfreq Support
+ *
+ * Copyright 2011 Heiko Stuebner <heiko@sntech.de>
+ *
+ * based on s3c64xx_cpufreq.c
+ *
+ * Copyright 2009 Wolfson Microelectronics plc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reboot.h>
+#include <linux/module.h>
+
+static DEFINE_MUTEX(cpufreq_lock);
+
+struct s3c2416_data {
+ struct clk *armdiv;
+ struct clk *armclk;
+ struct clk *hclk;
+
+ unsigned long regulator_latency;
+#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
+ struct regulator *vddarm;
+#endif
+
+ struct cpufreq_frequency_table *freq_table;
+
+ bool is_dvs;
+ bool disable_dvs;
+};
+
+static struct s3c2416_data s3c2416_cpufreq;
+
+struct s3c2416_dvfs {
+ unsigned int vddarm_min;
+ unsigned int vddarm_max;
+};
+
+/* pseudo-frequency for dvs mode */
+#define FREQ_DVS 132333
+
+/* frequency to sleep and reboot in
+ * it's essential to leave dvs, as some boards do not reconfigure the
+ * regulator on reboot
+ */
+#define FREQ_SLEEP 133333
+
+/* Sources for the ARMCLK */
+#define SOURCE_HCLK 0
+#define SOURCE_ARMDIV 1
+
+#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
+/* S3C2416 only supports changing the voltage in the dvs-mode.
+ * Voltages down to 1.0V seem to work, so we take what the regulator
+ * can get us.
+ */
+static struct s3c2416_dvfs s3c2416_dvfs_table[] = {
+ [SOURCE_HCLK] = { 950000, 1250000 },
+ [SOURCE_ARMDIV] = { 1250000, 1350000 },
+};
+#endif
+
+static struct cpufreq_frequency_table s3c2416_freq_table[] = {
+ { 0, SOURCE_HCLK, FREQ_DVS },
+ { 0, SOURCE_ARMDIV, 133333 },
+ { 0, SOURCE_ARMDIV, 266666 },
+ { 0, SOURCE_ARMDIV, 400000 },
+ { 0, 0, CPUFREQ_TABLE_END },
+};
+
+static struct cpufreq_frequency_table s3c2450_freq_table[] = {
+ { 0, SOURCE_HCLK, FREQ_DVS },
+ { 0, SOURCE_ARMDIV, 133500 },
+ { 0, SOURCE_ARMDIV, 267000 },
+ { 0, SOURCE_ARMDIV, 534000 },
+ { 0, 0, CPUFREQ_TABLE_END },
+};
+
+static unsigned int s3c2416_cpufreq_get_speed(unsigned int cpu)
+{
+ struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
+
+ if (cpu != 0)
+ return 0;
+
+ /* return our pseudo-frequency when in dvs mode */
+ if (s3c_freq->is_dvs)
+ return FREQ_DVS;
+
+ return clk_get_rate(s3c_freq->armclk) / 1000;
+}
+
+static int s3c2416_cpufreq_set_armdiv(struct s3c2416_data *s3c_freq,
+ unsigned int freq)
+{
+ int ret;
+
+ if (clk_get_rate(s3c_freq->armdiv) / 1000 != freq) {
+ ret = clk_set_rate(s3c_freq->armdiv, freq * 1000);
+ if (ret < 0) {
+ pr_err("cpufreq: Failed to set armdiv rate %dkHz: %d\n",
+ freq, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int s3c2416_cpufreq_enter_dvs(struct s3c2416_data *s3c_freq, int idx)
+{
+#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
+ struct s3c2416_dvfs *dvfs;
+#endif
+ int ret;
+
+ if (s3c_freq->is_dvs) {
+ pr_debug("cpufreq: already in dvs mode, nothing to do\n");
+ return 0;
+ }
+
+ pr_debug("cpufreq: switching armclk to hclk (%lukHz)\n",
+ clk_get_rate(s3c_freq->hclk) / 1000);
+ ret = clk_set_parent(s3c_freq->armclk, s3c_freq->hclk);
+ if (ret < 0) {
+ pr_err("cpufreq: Failed to switch armclk to hclk: %d\n", ret);
+ return ret;
+ }
+
+#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
+ /* changing the core voltage is only allowed when in dvs mode */
+ if (s3c_freq->vddarm) {
+ dvfs = &s3c2416_dvfs_table[idx];
+
+ pr_debug("cpufreq: setting regulator to %d-%d\n",
+ dvfs->vddarm_min, dvfs->vddarm_max);
+ ret = regulator_set_voltage(s3c_freq->vddarm,
+ dvfs->vddarm_min,
+ dvfs->vddarm_max);
+
+ /* when lowering the voltage failed, there is nothing to do */
+ if (ret != 0)
+ pr_err("cpufreq: Failed to set VDDARM: %d\n", ret);
+ }
+#endif
+
+ s3c_freq->is_dvs = 1;
+
+ return 0;
+}
+
+static int s3c2416_cpufreq_leave_dvs(struct s3c2416_data *s3c_freq, int idx)
+{
+#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
+ struct s3c2416_dvfs *dvfs;
+#endif
+ int ret;
+
+ if (!s3c_freq->is_dvs) {
+ pr_debug("cpufreq: not in dvs mode, so can't leave\n");
+ return 0;
+ }
+
+#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
+ if (s3c_freq->vddarm) {
+ dvfs = &s3c2416_dvfs_table[idx];
+
+ pr_debug("cpufreq: setting regulator to %d-%d\n",
+ dvfs->vddarm_min, dvfs->vddarm_max);
+ ret = regulator_set_voltage(s3c_freq->vddarm,
+ dvfs->vddarm_min,
+ dvfs->vddarm_max);
+ if (ret != 0) {
+ pr_err("cpufreq: Failed to set VDDARM: %d\n", ret);
+ return ret;
+ }
+ }
+#endif
+
+ /* force armdiv to hclk frequency for transition from dvs*/
+ if (clk_get_rate(s3c_freq->armdiv) > clk_get_rate(s3c_freq->hclk)) {
+ pr_debug("cpufreq: force armdiv to hclk frequency (%lukHz)\n",
+ clk_get_rate(s3c_freq->hclk) / 1000);
+ ret = s3c2416_cpufreq_set_armdiv(s3c_freq,
+ clk_get_rate(s3c_freq->hclk) / 1000);
+ if (ret < 0) {
+ pr_err("cpufreq: Failed to set the armdiv to %lukHz: %d\n",
+ clk_get_rate(s3c_freq->hclk) / 1000, ret);
+ return ret;
+ }
+ }
+
+ pr_debug("cpufreq: switching armclk parent to armdiv (%lukHz)\n",
+ clk_get_rate(s3c_freq->armdiv) / 1000);
+
+ ret = clk_set_parent(s3c_freq->armclk, s3c_freq->armdiv);
+ if (ret < 0) {
+ pr_err("cpufreq: Failed to switch armclk clock parent to armdiv: %d\n",
+ ret);
+ return ret;
+ }
+
+ s3c_freq->is_dvs = 0;
+
+ return 0;
+}
+
+static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
+ unsigned int new_freq;
+ int idx, ret, to_dvs = 0;
+
+ mutex_lock(&cpufreq_lock);
+
+ idx = s3c_freq->freq_table[index].driver_data;
+
+ if (idx == SOURCE_HCLK)
+ to_dvs = 1;
+
+ /* switching to dvs when it's not allowed */
+ if (to_dvs && s3c_freq->disable_dvs) {
+ pr_debug("cpufreq: entering dvs mode not allowed\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* When leavin dvs mode, always switch the armdiv to the hclk rate
+ * The S3C2416 has stability issues when switching directly to
+ * higher frequencies.
+ */
+ new_freq = (s3c_freq->is_dvs && !to_dvs)
+ ? clk_get_rate(s3c_freq->hclk) / 1000
+ : s3c_freq->freq_table[index].frequency;
+
+ if (to_dvs) {
+ pr_debug("cpufreq: enter dvs\n");
+ ret = s3c2416_cpufreq_enter_dvs(s3c_freq, idx);
+ } else if (s3c_freq->is_dvs) {
+ pr_debug("cpufreq: leave dvs\n");
+ ret = s3c2416_cpufreq_leave_dvs(s3c_freq, idx);
+ } else {
+ pr_debug("cpufreq: change armdiv to %dkHz\n", new_freq);
+ ret = s3c2416_cpufreq_set_armdiv(s3c_freq, new_freq);
+ }
+
+out:
+ mutex_unlock(&cpufreq_lock);
+
+ return ret;
+}
+
+#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
+static void s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq)
+{
+ int count, v, i, found;
+ struct cpufreq_frequency_table *pos;
+ struct s3c2416_dvfs *dvfs;
+
+ count = regulator_count_voltages(s3c_freq->vddarm);
+ if (count < 0) {
+ pr_err("cpufreq: Unable to check supported voltages\n");
+ return;
+ }
+
+ if (!count)
+ goto out;
+
+ cpufreq_for_each_valid_entry(pos, s3c_freq->freq_table) {
+ dvfs = &s3c2416_dvfs_table[pos->driver_data];
+ found = 0;
+
+ /* Check only the min-voltage, more is always ok on S3C2416 */
+ for (i = 0; i < count; i++) {
+ v = regulator_list_voltage(s3c_freq->vddarm, i);
+ if (v >= dvfs->vddarm_min)
+ found = 1;
+ }
+
+ if (!found) {
+ pr_debug("cpufreq: %dkHz unsupported by regulator\n",
+ pos->frequency);
+ pos->frequency = CPUFREQ_ENTRY_INVALID;
+ }
+ }
+
+out:
+ /* Guessed */
+ s3c_freq->regulator_latency = 1 * 1000 * 1000;
+}
+#endif
+
+static int s3c2416_cpufreq_reboot_notifier_evt(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
+ int ret;
+
+ mutex_lock(&cpufreq_lock);
+
+ /* disable further changes */
+ s3c_freq->disable_dvs = 1;
+
+ mutex_unlock(&cpufreq_lock);
+
+ /* some boards don't reconfigure the regulator on reboot, which
+ * could lead to undervolting the cpu when the clock is reset.
+ * Therefore we always leave the DVS mode on reboot.
+ */
+ if (s3c_freq->is_dvs) {
+ pr_debug("cpufreq: leave dvs on reboot\n");
+ ret = cpufreq_driver_target(cpufreq_cpu_get(0), FREQ_SLEEP, 0);
+ if (ret < 0)
+ return NOTIFY_BAD;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block s3c2416_cpufreq_reboot_notifier = {
+ .notifier_call = s3c2416_cpufreq_reboot_notifier_evt,
+};
+
+static int s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
+{
+ struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
+ struct cpufreq_frequency_table *pos;
+ struct clk *msysclk;
+ unsigned long rate;
+ int ret;
+
+ if (policy->cpu != 0)
+ return -EINVAL;
+
+ msysclk = clk_get(NULL, "msysclk");
+ if (IS_ERR(msysclk)) {
+ ret = PTR_ERR(msysclk);
+ pr_err("cpufreq: Unable to obtain msysclk: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * S3C2416 and S3C2450 share the same processor-ID and also provide no
+ * other means to distinguish them other than through the rate of
+ * msysclk. On S3C2416 msysclk runs at 800MHz and on S3C2450 at 533MHz.
+ */
+ rate = clk_get_rate(msysclk);
+ if (rate == 800 * 1000 * 1000) {
+ pr_info("cpufreq: msysclk running at %lukHz, using S3C2416 frequency table\n",
+ rate / 1000);
+ s3c_freq->freq_table = s3c2416_freq_table;
+ policy->cpuinfo.max_freq = 400000;
+ } else if (rate / 1000 == 534000) {
+ pr_info("cpufreq: msysclk running at %lukHz, using S3C2450 frequency table\n",
+ rate / 1000);
+ s3c_freq->freq_table = s3c2450_freq_table;
+ policy->cpuinfo.max_freq = 534000;
+ }
+
+ /* not needed anymore */
+ clk_put(msysclk);
+
+ if (s3c_freq->freq_table == NULL) {
+ pr_err("cpufreq: No frequency information for this CPU, msysclk at %lukHz\n",
+ rate / 1000);
+ return -ENODEV;
+ }
+
+ s3c_freq->is_dvs = 0;
+
+ s3c_freq->armdiv = clk_get(NULL, "armdiv");
+ if (IS_ERR(s3c_freq->armdiv)) {
+ ret = PTR_ERR(s3c_freq->armdiv);
+ pr_err("cpufreq: Unable to obtain ARMDIV: %d\n", ret);
+ return ret;
+ }
+
+ s3c_freq->hclk = clk_get(NULL, "hclk");
+ if (IS_ERR(s3c_freq->hclk)) {
+ ret = PTR_ERR(s3c_freq->hclk);
+ pr_err("cpufreq: Unable to obtain HCLK: %d\n", ret);
+ goto err_hclk;
+ }
+
+ /* chech hclk rate, we only support the common 133MHz for now
+ * hclk could also run at 66MHz, but this not often used
+ */
+ rate = clk_get_rate(s3c_freq->hclk);
+ if (rate < 133 * 1000 * 1000) {
+ pr_err("cpufreq: HCLK not at 133MHz\n");
+ clk_put(s3c_freq->hclk);
+ ret = -EINVAL;
+ goto err_armclk;
+ }
+
+ s3c_freq->armclk = clk_get(NULL, "armclk");
+ if (IS_ERR(s3c_freq->armclk)) {
+ ret = PTR_ERR(s3c_freq->armclk);
+ pr_err("cpufreq: Unable to obtain ARMCLK: %d\n", ret);
+ goto err_armclk;
+ }
+
+#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
+ s3c_freq->vddarm = regulator_get(NULL, "vddarm");
+ if (IS_ERR(s3c_freq->vddarm)) {
+ ret = PTR_ERR(s3c_freq->vddarm);
+ pr_err("cpufreq: Failed to obtain VDDARM: %d\n", ret);
+ goto err_vddarm;
+ }
+
+ s3c2416_cpufreq_cfg_regulator(s3c_freq);
+#else
+ s3c_freq->regulator_latency = 0;
+#endif
+
+ cpufreq_for_each_entry(pos, s3c_freq->freq_table) {
+ /* special handling for dvs mode */
+ if (pos->driver_data == 0) {
+ if (!s3c_freq->hclk) {
+ pr_debug("cpufreq: %dkHz unsupported as it would need unavailable dvs mode\n",
+ pos->frequency);
+ pos->frequency = CPUFREQ_ENTRY_INVALID;
+ } else {
+ continue;
+ }
+ }
+
+ /* Check for frequencies we can generate */
+ rate = clk_round_rate(s3c_freq->armdiv,
+ pos->frequency * 1000);
+ rate /= 1000;
+ if (rate != pos->frequency) {
+ pr_debug("cpufreq: %dkHz unsupported by clock (clk_round_rate return %lu)\n",
+ pos->frequency, rate);
+ pos->frequency = CPUFREQ_ENTRY_INVALID;
+ }
+ }
+
+ /* Datasheet says PLL stabalisation time must be at least 300us,
+ * so but add some fudge. (reference in LOCKCON0 register description)
+ */
+ ret = cpufreq_generic_init(policy, s3c_freq->freq_table,
+ (500 * 1000) + s3c_freq->regulator_latency);
+ if (ret)
+ goto err_freq_table;
+
+ register_reboot_notifier(&s3c2416_cpufreq_reboot_notifier);
+
+ return 0;
+
+err_freq_table:
+#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
+ regulator_put(s3c_freq->vddarm);
+err_vddarm:
+#endif
+ clk_put(s3c_freq->armclk);
+err_armclk:
+ clk_put(s3c_freq->hclk);
+err_hclk:
+ clk_put(s3c_freq->armdiv);
+
+ return ret;
+}
+
+static struct cpufreq_driver s3c2416_cpufreq_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = s3c2416_cpufreq_set_target,
+ .get = s3c2416_cpufreq_get_speed,
+ .init = s3c2416_cpufreq_driver_init,
+ .name = "s3c2416",
+ .attr = cpufreq_generic_attr,
+};
+
+static int __init s3c2416_cpufreq_init(void)
+{
+ return cpufreq_register_driver(&s3c2416_cpufreq_driver);
+}
+module_init(s3c2416_cpufreq_init);
diff --git a/kernel/drivers/cpufreq/s3c2440-cpufreq.c b/kernel/drivers/cpufreq/s3c2440-cpufreq.c
new file mode 100644
index 000000000..0129f5c70
--- /dev/null
+++ b/kernel/drivers/cpufreq/s3c2440-cpufreq.c
@@ -0,0 +1,307 @@
+/*
+ * Copyright (c) 2006-2009 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>
+ * Vincent Sanders <vince@simtec.co.uk>
+ *
+ * S3C2440/S3C2442 CPU Frequency scaling
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/cpufreq.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+
+#include <mach/regs-clock.h>
+
+#include <plat/cpu.h>
+#include <plat/cpu-freq-core.h>
+
+static struct clk *xtal;
+static struct clk *fclk;
+static struct clk *hclk;
+static struct clk *armclk;
+
+/* HDIV: 1, 2, 3, 4, 6, 8 */
+
+static inline int within_khz(unsigned long a, unsigned long b)
+{
+ long diff = a - b;
+
+ return (diff >= -1000 && diff <= 1000);
+}
+
+/**
+ * s3c2440_cpufreq_calcdivs - calculate divider settings
+ * @cfg: The cpu frequency settings.
+ *
+ * Calcualte the divider values for the given frequency settings
+ * specified in @cfg. The values are stored in @cfg for later use
+ * by the relevant set routine if the request settings can be reached.
+ */
+static int s3c2440_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg)
+{
+ unsigned int hdiv, pdiv;
+ unsigned long hclk, fclk, armclk;
+ unsigned long hclk_max;
+
+ fclk = cfg->freq.fclk;
+ armclk = cfg->freq.armclk;
+ hclk_max = cfg->max.hclk;
+
+ s3c_freq_dbg("%s: fclk is %lu, armclk %lu, max hclk %lu\n",
+ __func__, fclk, armclk, hclk_max);
+
+ if (armclk > fclk) {
+ printk(KERN_WARNING "%s: armclk > fclk\n", __func__);
+ armclk = fclk;
+ }
+
+ /* if we are in DVS, we need HCLK to be <= ARMCLK */
+ if (armclk < fclk && armclk < hclk_max)
+ hclk_max = armclk;
+
+ for (hdiv = 1; hdiv < 9; hdiv++) {
+ if (hdiv == 5 || hdiv == 7)
+ hdiv++;
+
+ hclk = (fclk / hdiv);
+ if (hclk <= hclk_max || within_khz(hclk, hclk_max))
+ break;
+ }
+
+ s3c_freq_dbg("%s: hclk %lu, div %d\n", __func__, hclk, hdiv);
+
+ if (hdiv > 8)
+ goto invalid;
+
+ pdiv = (hclk > cfg->max.pclk) ? 2 : 1;
+
+ if ((hclk / pdiv) > cfg->max.pclk)
+ pdiv++;
+
+ s3c_freq_dbg("%s: pdiv %d\n", __func__, pdiv);
+
+ if (pdiv > 2)
+ goto invalid;
+
+ pdiv *= hdiv;
+
+ /* calculate a valid armclk */
+
+ if (armclk < hclk)
+ armclk = hclk;
+
+ /* if we're running armclk lower than fclk, this really means
+ * that the system should go into dvs mode, which means that
+ * armclk is connected to hclk. */
+ if (armclk < fclk) {
+ cfg->divs.dvs = 1;
+ armclk = hclk;
+ } else
+ cfg->divs.dvs = 0;
+
+ cfg->freq.armclk = armclk;
+
+ /* store the result, and then return */
+
+ cfg->divs.h_divisor = hdiv;
+ cfg->divs.p_divisor = pdiv;
+
+ return 0;
+
+ invalid:
+ return -EINVAL;
+}
+
+#define CAMDIVN_HCLK_HALF (S3C2440_CAMDIVN_HCLK3_HALF | \
+ S3C2440_CAMDIVN_HCLK4_HALF)
+
+/**
+ * s3c2440_cpufreq_setdivs - set the cpu frequency divider settings
+ * @cfg: The cpu frequency settings.
+ *
+ * Set the divisors from the settings in @cfg, which where generated
+ * during the calculation phase by s3c2440_cpufreq_calcdivs().
+ */
+static void s3c2440_cpufreq_setdivs(struct s3c_cpufreq_config *cfg)
+{
+ unsigned long clkdiv, camdiv;
+
+ s3c_freq_dbg("%s: divsiors: h=%d, p=%d\n", __func__,
+ cfg->divs.h_divisor, cfg->divs.p_divisor);
+
+ clkdiv = __raw_readl(S3C2410_CLKDIVN);
+ camdiv = __raw_readl(S3C2440_CAMDIVN);
+
+ clkdiv &= ~(S3C2440_CLKDIVN_HDIVN_MASK | S3C2440_CLKDIVN_PDIVN);
+ camdiv &= ~CAMDIVN_HCLK_HALF;
+
+ switch (cfg->divs.h_divisor) {
+ case 1:
+ clkdiv |= S3C2440_CLKDIVN_HDIVN_1;
+ break;
+
+ case 2:
+ clkdiv |= S3C2440_CLKDIVN_HDIVN_2;
+ break;
+
+ case 6:
+ camdiv |= S3C2440_CAMDIVN_HCLK3_HALF;
+ case 3:
+ clkdiv |= S3C2440_CLKDIVN_HDIVN_3_6;
+ break;
+
+ case 8:
+ camdiv |= S3C2440_CAMDIVN_HCLK4_HALF;
+ case 4:
+ clkdiv |= S3C2440_CLKDIVN_HDIVN_4_8;
+ break;
+
+ default:
+ BUG(); /* we don't expect to get here. */
+ }
+
+ if (cfg->divs.p_divisor != cfg->divs.h_divisor)
+ clkdiv |= S3C2440_CLKDIVN_PDIVN;
+
+ /* todo - set pclk. */
+
+ /* Write the divisors first with hclk intentionally halved so that
+ * when we write clkdiv we will under-frequency instead of over. We
+ * then make a short delay and remove the hclk halving if necessary.
+ */
+
+ __raw_writel(camdiv | CAMDIVN_HCLK_HALF, S3C2440_CAMDIVN);
+ __raw_writel(clkdiv, S3C2410_CLKDIVN);
+
+ ndelay(20);
+ __raw_writel(camdiv, S3C2440_CAMDIVN);
+
+ clk_set_parent(armclk, cfg->divs.dvs ? hclk : fclk);
+}
+
+static int run_freq_for(unsigned long max_hclk, unsigned long fclk,
+ int *divs,
+ struct cpufreq_frequency_table *table,
+ size_t table_size)
+{
+ unsigned long freq;
+ int index = 0;
+ int div;
+
+ for (div = *divs; div > 0; div = *divs++) {
+ freq = fclk / div;
+
+ if (freq > max_hclk && div != 1)
+ continue;
+
+ freq /= 1000; /* table is in kHz */
+ index = s3c_cpufreq_addfreq(table, index, table_size, freq);
+ if (index < 0)
+ break;
+ }
+
+ return index;
+}
+
+static int hclk_divs[] = { 1, 2, 3, 4, 6, 8, -1 };
+
+static int s3c2440_cpufreq_calctable(struct s3c_cpufreq_config *cfg,
+ struct cpufreq_frequency_table *table,
+ size_t table_size)
+{
+ int ret;
+
+ WARN_ON(cfg->info == NULL);
+ WARN_ON(cfg->board == NULL);
+
+ ret = run_freq_for(cfg->info->max.hclk,
+ cfg->info->max.fclk,
+ hclk_divs,
+ table, table_size);
+
+ s3c_freq_dbg("%s: returning %d\n", __func__, ret);
+
+ return ret;
+}
+
+static struct s3c_cpufreq_info s3c2440_cpufreq_info = {
+ .max = {
+ .fclk = 400000000,
+ .hclk = 133333333,
+ .pclk = 66666666,
+ },
+
+ .locktime_m = 300,
+ .locktime_u = 300,
+ .locktime_bits = 16,
+
+ .name = "s3c244x",
+ .calc_iotiming = s3c2410_iotiming_calc,
+ .set_iotiming = s3c2410_iotiming_set,
+ .get_iotiming = s3c2410_iotiming_get,
+ .set_fvco = s3c2410_set_fvco,
+
+ .set_refresh = s3c2410_cpufreq_setrefresh,
+ .set_divs = s3c2440_cpufreq_setdivs,
+ .calc_divs = s3c2440_cpufreq_calcdivs,
+ .calc_freqtable = s3c2440_cpufreq_calctable,
+
+ .debug_io_show = s3c_cpufreq_debugfs_call(s3c2410_iotiming_debugfs),
+};
+
+static int s3c2440_cpufreq_add(struct device *dev,
+ struct subsys_interface *sif)
+{
+ xtal = s3c_cpufreq_clk_get(NULL, "xtal");
+ hclk = s3c_cpufreq_clk_get(NULL, "hclk");
+ fclk = s3c_cpufreq_clk_get(NULL, "fclk");
+ armclk = s3c_cpufreq_clk_get(NULL, "armclk");
+
+ if (IS_ERR(xtal) || IS_ERR(hclk) || IS_ERR(fclk) || IS_ERR(armclk)) {
+ printk(KERN_ERR "%s: failed to get clocks\n", __func__);
+ return -ENOENT;
+ }
+
+ return s3c_cpufreq_register(&s3c2440_cpufreq_info);
+}
+
+static struct subsys_interface s3c2440_cpufreq_interface = {
+ .name = "s3c2440_cpufreq",
+ .subsys = &s3c2440_subsys,
+ .add_dev = s3c2440_cpufreq_add,
+};
+
+static int s3c2440_cpufreq_init(void)
+{
+ return subsys_interface_register(&s3c2440_cpufreq_interface);
+}
+
+/* arch_initcall adds the clocks we need, so use subsys_initcall. */
+subsys_initcall(s3c2440_cpufreq_init);
+
+static struct subsys_interface s3c2442_cpufreq_interface = {
+ .name = "s3c2442_cpufreq",
+ .subsys = &s3c2442_subsys,
+ .add_dev = s3c2440_cpufreq_add,
+};
+
+static int s3c2442_cpufreq_init(void)
+{
+ return subsys_interface_register(&s3c2442_cpufreq_interface);
+}
+subsys_initcall(s3c2442_cpufreq_init);
diff --git a/kernel/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c b/kernel/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c
new file mode 100644
index 000000000..9b7b4289d
--- /dev/null
+++ b/kernel/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2009 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C24XX CPU Frequency scaling - debugfs status support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/cpufreq.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/err.h>
+
+#include <plat/cpu-freq-core.h>
+
+static struct dentry *dbgfs_root;
+static struct dentry *dbgfs_file_io;
+static struct dentry *dbgfs_file_info;
+static struct dentry *dbgfs_file_board;
+
+#define print_ns(x) ((x) / 10), ((x) % 10)
+
+static void show_max(struct seq_file *seq, struct s3c_freq *f)
+{
+ seq_printf(seq, "MAX: F=%lu, H=%lu, P=%lu, A=%lu\n",
+ f->fclk, f->hclk, f->pclk, f->armclk);
+}
+
+static int board_show(struct seq_file *seq, void *p)
+{
+ struct s3c_cpufreq_config *cfg;
+ struct s3c_cpufreq_board *brd;
+
+ cfg = s3c_cpufreq_getconfig();
+ if (!cfg) {
+ seq_printf(seq, "no configuration registered\n");
+ return 0;
+ }
+
+ brd = cfg->board;
+ if (!brd) {
+ seq_printf(seq, "no board definition set?\n");
+ return 0;
+ }
+
+ seq_printf(seq, "SDRAM refresh %u ns\n", brd->refresh);
+ seq_printf(seq, "auto_io=%u\n", brd->auto_io);
+ seq_printf(seq, "need_io=%u\n", brd->need_io);
+
+ show_max(seq, &brd->max);
+
+
+ return 0;
+}
+
+static int fops_board_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, board_show, NULL);
+}
+
+static const struct file_operations fops_board = {
+ .open = fops_board_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int info_show(struct seq_file *seq, void *p)
+{
+ struct s3c_cpufreq_config *cfg;
+
+ cfg = s3c_cpufreq_getconfig();
+ if (!cfg) {
+ seq_printf(seq, "no configuration registered\n");
+ return 0;
+ }
+
+ seq_printf(seq, " FCLK %ld Hz\n", cfg->freq.fclk);
+ seq_printf(seq, " HCLK %ld Hz (%lu.%lu ns)\n",
+ cfg->freq.hclk, print_ns(cfg->freq.hclk_tns));
+ seq_printf(seq, " PCLK %ld Hz\n", cfg->freq.hclk);
+ seq_printf(seq, "ARMCLK %ld Hz\n", cfg->freq.armclk);
+ seq_printf(seq, "\n");
+
+ show_max(seq, &cfg->max);
+
+ seq_printf(seq, "Divisors: P=%d, H=%d, A=%d, dvs=%s\n",
+ cfg->divs.h_divisor, cfg->divs.p_divisor,
+ cfg->divs.arm_divisor, cfg->divs.dvs ? "on" : "off");
+ seq_printf(seq, "\n");
+
+ seq_printf(seq, "lock_pll=%u\n", cfg->lock_pll);
+
+ return 0;
+}
+
+static int fops_info_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, info_show, NULL);
+}
+
+static const struct file_operations fops_info = {
+ .open = fops_info_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int io_show(struct seq_file *seq, void *p)
+{
+ void (*show_bank)(struct seq_file *, struct s3c_cpufreq_config *, union s3c_iobank *);
+ struct s3c_cpufreq_config *cfg;
+ struct s3c_iotimings *iot;
+ union s3c_iobank *iob;
+ int bank;
+
+ cfg = s3c_cpufreq_getconfig();
+ if (!cfg) {
+ seq_printf(seq, "no configuration registered\n");
+ return 0;
+ }
+
+ show_bank = cfg->info->debug_io_show;
+ if (!show_bank) {
+ seq_printf(seq, "no code to show bank timing\n");
+ return 0;
+ }
+
+ iot = s3c_cpufreq_getiotimings();
+ if (!iot) {
+ seq_printf(seq, "no io timings registered\n");
+ return 0;
+ }
+
+ seq_printf(seq, "hclk period is %lu.%lu ns\n", print_ns(cfg->freq.hclk_tns));
+
+ for (bank = 0; bank < MAX_BANKS; bank++) {
+ iob = &iot->bank[bank];
+
+ seq_printf(seq, "bank %d: ", bank);
+
+ if (!iob->io_2410) {
+ seq_printf(seq, "nothing set\n");
+ continue;
+ }
+
+ show_bank(seq, cfg, iob);
+ }
+
+ return 0;
+}
+
+static int fops_io_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, io_show, NULL);
+}
+
+static const struct file_operations fops_io = {
+ .open = fops_io_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+
+static int __init s3c_freq_debugfs_init(void)
+{
+ dbgfs_root = debugfs_create_dir("s3c-cpufreq", NULL);
+ if (IS_ERR(dbgfs_root)) {
+ printk(KERN_ERR "%s: error creating debugfs root\n", __func__);
+ return PTR_ERR(dbgfs_root);
+ }
+
+ dbgfs_file_io = debugfs_create_file("io-timing", S_IRUGO, dbgfs_root,
+ NULL, &fops_io);
+
+ dbgfs_file_info = debugfs_create_file("info", S_IRUGO, dbgfs_root,
+ NULL, &fops_info);
+
+ dbgfs_file_board = debugfs_create_file("board", S_IRUGO, dbgfs_root,
+ NULL, &fops_board);
+
+ return 0;
+}
+
+late_initcall(s3c_freq_debugfs_init);
+
diff --git a/kernel/drivers/cpufreq/s3c24xx-cpufreq.c b/kernel/drivers/cpufreq/s3c24xx-cpufreq.c
new file mode 100644
index 000000000..733aa5153
--- /dev/null
+++ b/kernel/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -0,0 +1,674 @@
+/*
+ * Copyright (c) 2006-2008 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C24XX CPU Frequency scaling
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/cpufreq.h>
+#include <linux/cpu.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/sysfs.h>
+#include <linux/slab.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+
+#include <plat/cpu.h>
+#include <plat/cpu-freq-core.h>
+
+#include <mach/regs-clock.h>
+
+/* note, cpufreq support deals in kHz, no Hz */
+
+static struct cpufreq_driver s3c24xx_driver;
+static struct s3c_cpufreq_config cpu_cur;
+static struct s3c_iotimings s3c24xx_iotiming;
+static struct cpufreq_frequency_table *pll_reg;
+static unsigned int last_target = ~0;
+static unsigned int ftab_size;
+static struct cpufreq_frequency_table *ftab;
+
+static struct clk *_clk_mpll;
+static struct clk *_clk_xtal;
+static struct clk *clk_fclk;
+static struct clk *clk_hclk;
+static struct clk *clk_pclk;
+static struct clk *clk_arm;
+
+#ifdef CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS
+struct s3c_cpufreq_config *s3c_cpufreq_getconfig(void)
+{
+ return &cpu_cur;
+}
+
+struct s3c_iotimings *s3c_cpufreq_getiotimings(void)
+{
+ return &s3c24xx_iotiming;
+}
+#endif /* CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS */
+
+static void s3c_cpufreq_getcur(struct s3c_cpufreq_config *cfg)
+{
+ unsigned long fclk, pclk, hclk, armclk;
+
+ cfg->freq.fclk = fclk = clk_get_rate(clk_fclk);
+ cfg->freq.hclk = hclk = clk_get_rate(clk_hclk);
+ cfg->freq.pclk = pclk = clk_get_rate(clk_pclk);
+ cfg->freq.armclk = armclk = clk_get_rate(clk_arm);
+
+ cfg->pll.driver_data = __raw_readl(S3C2410_MPLLCON);
+ cfg->pll.frequency = fclk;
+
+ cfg->freq.hclk_tns = 1000000000 / (cfg->freq.hclk / 10);
+
+ cfg->divs.h_divisor = fclk / hclk;
+ cfg->divs.p_divisor = fclk / pclk;
+}
+
+static inline void s3c_cpufreq_calc(struct s3c_cpufreq_config *cfg)
+{
+ unsigned long pll = cfg->pll.frequency;
+
+ cfg->freq.fclk = pll;
+ cfg->freq.hclk = pll / cfg->divs.h_divisor;
+ cfg->freq.pclk = pll / cfg->divs.p_divisor;
+
+ /* convert hclk into 10ths of nanoseconds for io calcs */
+ cfg->freq.hclk_tns = 1000000000 / (cfg->freq.hclk / 10);
+}
+
+static inline int closer(unsigned int target, unsigned int n, unsigned int c)
+{
+ int diff_cur = abs(target - c);
+ int diff_new = abs(target - n);
+
+ return (diff_new < diff_cur);
+}
+
+static void s3c_cpufreq_show(const char *pfx,
+ struct s3c_cpufreq_config *cfg)
+{
+ s3c_freq_dbg("%s: Fvco=%u, F=%lu, A=%lu, H=%lu (%u), P=%lu (%u)\n",
+ pfx, cfg->pll.frequency, cfg->freq.fclk, cfg->freq.armclk,
+ cfg->freq.hclk, cfg->divs.h_divisor,
+ cfg->freq.pclk, cfg->divs.p_divisor);
+}
+
+/* functions to wrapper the driver info calls to do the cpu specific work */
+
+static void s3c_cpufreq_setio(struct s3c_cpufreq_config *cfg)
+{
+ if (cfg->info->set_iotiming)
+ (cfg->info->set_iotiming)(cfg, &s3c24xx_iotiming);
+}
+
+static int s3c_cpufreq_calcio(struct s3c_cpufreq_config *cfg)
+{
+ if (cfg->info->calc_iotiming)
+ return (cfg->info->calc_iotiming)(cfg, &s3c24xx_iotiming);
+
+ return 0;
+}
+
+static void s3c_cpufreq_setrefresh(struct s3c_cpufreq_config *cfg)
+{
+ (cfg->info->set_refresh)(cfg);
+}
+
+static void s3c_cpufreq_setdivs(struct s3c_cpufreq_config *cfg)
+{
+ (cfg->info->set_divs)(cfg);
+}
+
+static int s3c_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg)
+{
+ return (cfg->info->calc_divs)(cfg);
+}
+
+static void s3c_cpufreq_setfvco(struct s3c_cpufreq_config *cfg)
+{
+ cfg->mpll = _clk_mpll;
+ (cfg->info->set_fvco)(cfg);
+}
+
+static inline void s3c_cpufreq_updateclk(struct clk *clk,
+ unsigned int freq)
+{
+ clk_set_rate(clk, freq);
+}
+
+static int s3c_cpufreq_settarget(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ struct cpufreq_frequency_table *pll)
+{
+ struct s3c_cpufreq_freqs freqs;
+ struct s3c_cpufreq_config cpu_new;
+ unsigned long flags;
+
+ cpu_new = cpu_cur; /* copy new from current */
+
+ s3c_cpufreq_show("cur", &cpu_cur);
+
+ /* TODO - check for DMA currently outstanding */
+
+ cpu_new.pll = pll ? *pll : cpu_cur.pll;
+
+ if (pll)
+ freqs.pll_changing = 1;
+
+ /* update our frequencies */
+
+ cpu_new.freq.armclk = target_freq;
+ cpu_new.freq.fclk = cpu_new.pll.frequency;
+
+ if (s3c_cpufreq_calcdivs(&cpu_new) < 0) {
+ printk(KERN_ERR "no divisors for %d\n", target_freq);
+ goto err_notpossible;
+ }
+
+ s3c_freq_dbg("%s: got divs\n", __func__);
+
+ s3c_cpufreq_calc(&cpu_new);
+
+ s3c_freq_dbg("%s: calculated frequencies for new\n", __func__);
+
+ if (cpu_new.freq.hclk != cpu_cur.freq.hclk) {
+ if (s3c_cpufreq_calcio(&cpu_new) < 0) {
+ printk(KERN_ERR "%s: no IO timings\n", __func__);
+ goto err_notpossible;
+ }
+ }
+
+ s3c_cpufreq_show("new", &cpu_new);
+
+ /* setup our cpufreq parameters */
+
+ freqs.old = cpu_cur.freq;
+ freqs.new = cpu_new.freq;
+
+ freqs.freqs.old = cpu_cur.freq.armclk / 1000;
+ freqs.freqs.new = cpu_new.freq.armclk / 1000;
+
+ /* update f/h/p clock settings before we issue the change
+ * notification, so that drivers do not need to do anything
+ * special if they want to recalculate on CPUFREQ_PRECHANGE. */
+
+ s3c_cpufreq_updateclk(_clk_mpll, cpu_new.pll.frequency);
+ s3c_cpufreq_updateclk(clk_fclk, cpu_new.freq.fclk);
+ s3c_cpufreq_updateclk(clk_hclk, cpu_new.freq.hclk);
+ s3c_cpufreq_updateclk(clk_pclk, cpu_new.freq.pclk);
+
+ /* start the frequency change */
+ cpufreq_freq_transition_begin(policy, &freqs.freqs);
+
+ /* If hclk is staying the same, then we do not need to
+ * re-write the IO or the refresh timings whilst we are changing
+ * speed. */
+
+ local_irq_save(flags);
+
+ /* is our memory clock slowing down? */
+ if (cpu_new.freq.hclk < cpu_cur.freq.hclk) {
+ s3c_cpufreq_setrefresh(&cpu_new);
+ s3c_cpufreq_setio(&cpu_new);
+ }
+
+ if (cpu_new.freq.fclk == cpu_cur.freq.fclk) {
+ /* not changing PLL, just set the divisors */
+
+ s3c_cpufreq_setdivs(&cpu_new);
+ } else {
+ if (cpu_new.freq.fclk < cpu_cur.freq.fclk) {
+ /* slow the cpu down, then set divisors */
+
+ s3c_cpufreq_setfvco(&cpu_new);
+ s3c_cpufreq_setdivs(&cpu_new);
+ } else {
+ /* set the divisors, then speed up */
+
+ s3c_cpufreq_setdivs(&cpu_new);
+ s3c_cpufreq_setfvco(&cpu_new);
+ }
+ }
+
+ /* did our memory clock speed up */
+ if (cpu_new.freq.hclk > cpu_cur.freq.hclk) {
+ s3c_cpufreq_setrefresh(&cpu_new);
+ s3c_cpufreq_setio(&cpu_new);
+ }
+
+ /* update our current settings */
+ cpu_cur = cpu_new;
+
+ local_irq_restore(flags);
+
+ /* notify everyone we've done this */
+ cpufreq_freq_transition_end(policy, &freqs.freqs, 0);
+
+ s3c_freq_dbg("%s: finished\n", __func__);
+ return 0;
+
+ err_notpossible:
+ printk(KERN_ERR "no compatible settings for %d\n", target_freq);
+ return -EINVAL;
+}
+
+/* s3c_cpufreq_target
+ *
+ * called by the cpufreq core to adjust the frequency that the CPU
+ * is currently running at.
+ */
+
+static int s3c_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ struct cpufreq_frequency_table *pll;
+ unsigned int index;
+
+ /* avoid repeated calls which cause a needless amout of duplicated
+ * logging output (and CPU time as the calculation process is
+ * done) */
+ if (target_freq == last_target)
+ return 0;
+
+ last_target = target_freq;
+
+ s3c_freq_dbg("%s: policy %p, target %u, relation %u\n",
+ __func__, policy, target_freq, relation);
+
+ if (ftab) {
+ if (cpufreq_frequency_table_target(policy, ftab,
+ target_freq, relation,
+ &index)) {
+ s3c_freq_dbg("%s: table failed\n", __func__);
+ return -EINVAL;
+ }
+
+ s3c_freq_dbg("%s: adjust %d to entry %d (%u)\n", __func__,
+ target_freq, index, ftab[index].frequency);
+ target_freq = ftab[index].frequency;
+ }
+
+ target_freq *= 1000; /* convert target to Hz */
+
+ /* find the settings for our new frequency */
+
+ if (!pll_reg || cpu_cur.lock_pll) {
+ /* either we've not got any PLL values, or we've locked
+ * to the current one. */
+ pll = NULL;
+ } else {
+ struct cpufreq_policy tmp_policy;
+ int ret;
+
+ /* we keep the cpu pll table in Hz, to ensure we get an
+ * accurate value for the PLL output. */
+
+ tmp_policy.min = policy->min * 1000;
+ tmp_policy.max = policy->max * 1000;
+ tmp_policy.cpu = policy->cpu;
+
+ /* cpufreq_frequency_table_target uses a pointer to 'index'
+ * which is the number of the table entry, not the value of
+ * the table entry's index field. */
+
+ ret = cpufreq_frequency_table_target(&tmp_policy, pll_reg,
+ target_freq, relation,
+ &index);
+
+ if (ret < 0) {
+ printk(KERN_ERR "%s: no PLL available\n", __func__);
+ goto err_notpossible;
+ }
+
+ pll = pll_reg + index;
+
+ s3c_freq_dbg("%s: target %u => %u\n",
+ __func__, target_freq, pll->frequency);
+
+ target_freq = pll->frequency;
+ }
+
+ return s3c_cpufreq_settarget(policy, target_freq, pll);
+
+ err_notpossible:
+ printk(KERN_ERR "no compatible settings for %d\n", target_freq);
+ return -EINVAL;
+}
+
+struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
+{
+ struct clk *clk;
+
+ clk = clk_get(dev, name);
+ if (IS_ERR(clk))
+ printk(KERN_ERR "cpufreq: failed to get clock '%s'\n", name);
+
+ return clk;
+}
+
+static int s3c_cpufreq_init(struct cpufreq_policy *policy)
+{
+ policy->clk = clk_arm;
+ return cpufreq_generic_init(policy, ftab, cpu_cur.info->latency);
+}
+
+static int __init s3c_cpufreq_initclks(void)
+{
+ _clk_mpll = s3c_cpufreq_clk_get(NULL, "mpll");
+ _clk_xtal = s3c_cpufreq_clk_get(NULL, "xtal");
+ clk_fclk = s3c_cpufreq_clk_get(NULL, "fclk");
+ clk_hclk = s3c_cpufreq_clk_get(NULL, "hclk");
+ clk_pclk = s3c_cpufreq_clk_get(NULL, "pclk");
+ clk_arm = s3c_cpufreq_clk_get(NULL, "armclk");
+
+ if (IS_ERR(clk_fclk) || IS_ERR(clk_hclk) || IS_ERR(clk_pclk) ||
+ IS_ERR(_clk_mpll) || IS_ERR(clk_arm) || IS_ERR(_clk_xtal)) {
+ printk(KERN_ERR "%s: could not get clock(s)\n", __func__);
+ return -ENOENT;
+ }
+
+ printk(KERN_INFO "%s: clocks f=%lu,h=%lu,p=%lu,a=%lu\n", __func__,
+ clk_get_rate(clk_fclk) / 1000,
+ clk_get_rate(clk_hclk) / 1000,
+ clk_get_rate(clk_pclk) / 1000,
+ clk_get_rate(clk_arm) / 1000);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static struct cpufreq_frequency_table suspend_pll;
+static unsigned int suspend_freq;
+
+static int s3c_cpufreq_suspend(struct cpufreq_policy *policy)
+{
+ suspend_pll.frequency = clk_get_rate(_clk_mpll);
+ suspend_pll.driver_data = __raw_readl(S3C2410_MPLLCON);
+ suspend_freq = clk_get_rate(clk_arm);
+
+ return 0;
+}
+
+static int s3c_cpufreq_resume(struct cpufreq_policy *policy)
+{
+ int ret;
+
+ s3c_freq_dbg("%s: resuming with policy %p\n", __func__, policy);
+
+ last_target = ~0; /* invalidate last_target setting */
+
+ /* whilst we will be called later on, we try and re-set the
+ * cpu frequencies as soon as possible so that we do not end
+ * up resuming devices and then immediately having to re-set
+ * a number of settings once these devices have restarted.
+ *
+ * as a note, it is expected devices are not used until they
+ * have been un-suspended and at that time they should have
+ * used the updated clock settings.
+ */
+
+ ret = s3c_cpufreq_settarget(NULL, suspend_freq, &suspend_pll);
+ if (ret) {
+ printk(KERN_ERR "%s: failed to reset pll/freq\n", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+#else
+#define s3c_cpufreq_resume NULL
+#define s3c_cpufreq_suspend NULL
+#endif
+
+static struct cpufreq_driver s3c24xx_driver = {
+ .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .target = s3c_cpufreq_target,
+ .get = cpufreq_generic_get,
+ .init = s3c_cpufreq_init,
+ .suspend = s3c_cpufreq_suspend,
+ .resume = s3c_cpufreq_resume,
+ .name = "s3c24xx",
+};
+
+
+int s3c_cpufreq_register(struct s3c_cpufreq_info *info)
+{
+ if (!info || !info->name) {
+ printk(KERN_ERR "%s: failed to pass valid information\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ printk(KERN_INFO "S3C24XX CPU Frequency driver, %s cpu support\n",
+ info->name);
+
+ /* check our driver info has valid data */
+
+ BUG_ON(info->set_refresh == NULL);
+ BUG_ON(info->set_divs == NULL);
+ BUG_ON(info->calc_divs == NULL);
+
+ /* info->set_fvco is optional, depending on whether there
+ * is a need to set the clock code. */
+
+ cpu_cur.info = info;
+
+ /* Note, driver registering should probably update locktime */
+
+ return 0;
+}
+
+int __init s3c_cpufreq_setboard(struct s3c_cpufreq_board *board)
+{
+ struct s3c_cpufreq_board *ours;
+
+ if (!board) {
+ printk(KERN_INFO "%s: no board data\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Copy the board information so that each board can make this
+ * initdata. */
+
+ ours = kzalloc(sizeof(*ours), GFP_KERNEL);
+ if (ours == NULL) {
+ printk(KERN_ERR "%s: no memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ *ours = *board;
+ cpu_cur.board = ours;
+
+ return 0;
+}
+
+static int __init s3c_cpufreq_auto_io(void)
+{
+ int ret;
+
+ if (!cpu_cur.info->get_iotiming) {
+ printk(KERN_ERR "%s: get_iotiming undefined\n", __func__);
+ return -ENOENT;
+ }
+
+ printk(KERN_INFO "%s: working out IO settings\n", __func__);
+
+ ret = (cpu_cur.info->get_iotiming)(&cpu_cur, &s3c24xx_iotiming);
+ if (ret)
+ printk(KERN_ERR "%s: failed to get timings\n", __func__);
+
+ return ret;
+}
+
+/* if one or is zero, then return the other, otherwise return the min */
+#define do_min(_a, _b) ((_a) == 0 ? (_b) : (_b) == 0 ? (_a) : min(_a, _b))
+
+/**
+ * s3c_cpufreq_freq_min - find the minimum settings for the given freq.
+ * @dst: The destination structure
+ * @a: One argument.
+ * @b: The other argument.
+ *
+ * Create a minimum of each frequency entry in the 'struct s3c_freq',
+ * unless the entry is zero when it is ignored and the non-zero argument
+ * used.
+ */
+static void s3c_cpufreq_freq_min(struct s3c_freq *dst,
+ struct s3c_freq *a, struct s3c_freq *b)
+{
+ dst->fclk = do_min(a->fclk, b->fclk);
+ dst->hclk = do_min(a->hclk, b->hclk);
+ dst->pclk = do_min(a->pclk, b->pclk);
+ dst->armclk = do_min(a->armclk, b->armclk);
+}
+
+static inline u32 calc_locktime(u32 freq, u32 time_us)
+{
+ u32 result;
+
+ result = freq * time_us;
+ result = DIV_ROUND_UP(result, 1000 * 1000);
+
+ return result;
+}
+
+static void s3c_cpufreq_update_loctkime(void)
+{
+ unsigned int bits = cpu_cur.info->locktime_bits;
+ u32 rate = (u32)clk_get_rate(_clk_xtal);
+ u32 val;
+
+ if (bits == 0) {
+ WARN_ON(1);
+ return;
+ }
+
+ val = calc_locktime(rate, cpu_cur.info->locktime_u) << bits;
+ val |= calc_locktime(rate, cpu_cur.info->locktime_m);
+
+ printk(KERN_INFO "%s: new locktime is 0x%08x\n", __func__, val);
+ __raw_writel(val, S3C2410_LOCKTIME);
+}
+
+static int s3c_cpufreq_build_freq(void)
+{
+ int size, ret;
+
+ if (!cpu_cur.info->calc_freqtable)
+ return -EINVAL;
+
+ kfree(ftab);
+ ftab = NULL;
+
+ size = cpu_cur.info->calc_freqtable(&cpu_cur, NULL, 0);
+ size++;
+
+ ftab = kzalloc(sizeof(*ftab) * size, GFP_KERNEL);
+ if (!ftab) {
+ printk(KERN_ERR "%s: no memory for tables\n", __func__);
+ return -ENOMEM;
+ }
+
+ ftab_size = size;
+
+ ret = cpu_cur.info->calc_freqtable(&cpu_cur, ftab, size);
+ s3c_cpufreq_addfreq(ftab, ret, size, CPUFREQ_TABLE_END);
+
+ return 0;
+}
+
+static int __init s3c_cpufreq_initcall(void)
+{
+ int ret = 0;
+
+ if (cpu_cur.info && cpu_cur.board) {
+ ret = s3c_cpufreq_initclks();
+ if (ret)
+ goto out;
+
+ /* get current settings */
+ s3c_cpufreq_getcur(&cpu_cur);
+ s3c_cpufreq_show("cur", &cpu_cur);
+
+ if (cpu_cur.board->auto_io) {
+ ret = s3c_cpufreq_auto_io();
+ if (ret) {
+ printk(KERN_ERR "%s: failed to get io timing\n",
+ __func__);
+ goto out;
+ }
+ }
+
+ if (cpu_cur.board->need_io && !cpu_cur.info->set_iotiming) {
+ printk(KERN_ERR "%s: no IO support registered\n",
+ __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!cpu_cur.info->need_pll)
+ cpu_cur.lock_pll = 1;
+
+ s3c_cpufreq_update_loctkime();
+
+ s3c_cpufreq_freq_min(&cpu_cur.max, &cpu_cur.board->max,
+ &cpu_cur.info->max);
+
+ if (cpu_cur.info->calc_freqtable)
+ s3c_cpufreq_build_freq();
+
+ ret = cpufreq_register_driver(&s3c24xx_driver);
+ }
+
+ out:
+ return ret;
+}
+
+late_initcall(s3c_cpufreq_initcall);
+
+/**
+ * s3c_plltab_register - register CPU PLL table.
+ * @plls: The list of PLL entries.
+ * @plls_no: The size of the PLL entries @plls.
+ *
+ * Register the given set of PLLs with the system.
+ */
+int __init s3c_plltab_register(struct cpufreq_frequency_table *plls,
+ unsigned int plls_no)
+{
+ struct cpufreq_frequency_table *vals;
+ unsigned int size;
+
+ size = sizeof(*vals) * (plls_no + 1);
+
+ vals = kzalloc(size, GFP_KERNEL);
+ if (vals) {
+ memcpy(vals, plls, size);
+ pll_reg = vals;
+
+ /* write a terminating entry, we don't store it in the
+ * table that is stored in the kernel */
+ vals += plls_no;
+ vals->frequency = CPUFREQ_TABLE_END;
+
+ printk(KERN_INFO "cpufreq: %d PLL entries\n", plls_no);
+ } else
+ printk(KERN_ERR "cpufreq: no memory for PLL tables\n");
+
+ return vals ? 0 : -ENOMEM;
+}
diff --git a/kernel/drivers/cpufreq/s3c64xx-cpufreq.c b/kernel/drivers/cpufreq/s3c64xx-cpufreq.c
new file mode 100644
index 000000000..176e84cc3
--- /dev/null
+++ b/kernel/drivers/cpufreq/s3c64xx-cpufreq.c
@@ -0,0 +1,227 @@
+/*
+ * Copyright 2009 Wolfson Microelectronics plc
+ *
+ * S3C64xx CPUfreq Support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) "cpufreq: " fmt
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/regulator/consumer.h>
+#include <linux/module.h>
+
+static struct regulator *vddarm;
+static unsigned long regulator_latency;
+
+#ifdef CONFIG_CPU_S3C6410
+struct s3c64xx_dvfs {
+ unsigned int vddarm_min;
+ unsigned int vddarm_max;
+};
+
+static struct s3c64xx_dvfs s3c64xx_dvfs_table[] = {
+ [0] = { 1000000, 1150000 },
+ [1] = { 1050000, 1150000 },
+ [2] = { 1100000, 1150000 },
+ [3] = { 1200000, 1350000 },
+ [4] = { 1300000, 1350000 },
+};
+
+static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
+ { 0, 0, 66000 },
+ { 0, 0, 100000 },
+ { 0, 0, 133000 },
+ { 0, 1, 200000 },
+ { 0, 1, 222000 },
+ { 0, 1, 266000 },
+ { 0, 2, 333000 },
+ { 0, 2, 400000 },
+ { 0, 2, 532000 },
+ { 0, 2, 533000 },
+ { 0, 3, 667000 },
+ { 0, 4, 800000 },
+ { 0, 0, CPUFREQ_TABLE_END },
+};
+#endif
+
+static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ struct s3c64xx_dvfs *dvfs;
+ unsigned int old_freq, new_freq;
+ int ret;
+
+ old_freq = clk_get_rate(policy->clk) / 1000;
+ new_freq = s3c64xx_freq_table[index].frequency;
+ dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[index].driver_data];
+
+#ifdef CONFIG_REGULATOR
+ if (vddarm && new_freq > old_freq) {
+ ret = regulator_set_voltage(vddarm,
+ dvfs->vddarm_min,
+ dvfs->vddarm_max);
+ if (ret != 0) {
+ pr_err("Failed to set VDDARM for %dkHz: %d\n",
+ new_freq, ret);
+ return ret;
+ }
+ }
+#endif
+
+ ret = clk_set_rate(policy->clk, new_freq * 1000);
+ if (ret < 0) {
+ pr_err("Failed to set rate %dkHz: %d\n",
+ new_freq, ret);
+ return ret;
+ }
+
+#ifdef CONFIG_REGULATOR
+ if (vddarm && new_freq < old_freq) {
+ ret = regulator_set_voltage(vddarm,
+ dvfs->vddarm_min,
+ dvfs->vddarm_max);
+ if (ret != 0) {
+ pr_err("Failed to set VDDARM for %dkHz: %d\n",
+ new_freq, ret);
+ if (clk_set_rate(policy->clk, old_freq * 1000) < 0)
+ pr_err("Failed to restore original clock rate\n");
+
+ return ret;
+ }
+ }
+#endif
+
+ pr_debug("Set actual frequency %lukHz\n",
+ clk_get_rate(policy->clk) / 1000);
+
+ return 0;
+}
+
+#ifdef CONFIG_REGULATOR
+static void __init s3c64xx_cpufreq_config_regulator(void)
+{
+ int count, v, i, found;
+ struct cpufreq_frequency_table *freq;
+ struct s3c64xx_dvfs *dvfs;
+
+ count = regulator_count_voltages(vddarm);
+ if (count < 0) {
+ pr_err("Unable to check supported voltages\n");
+ }
+
+ if (!count)
+ goto out;
+
+ cpufreq_for_each_valid_entry(freq, s3c64xx_freq_table) {
+ dvfs = &s3c64xx_dvfs_table[freq->driver_data];
+ found = 0;
+
+ for (i = 0; i < count; i++) {
+ v = regulator_list_voltage(vddarm, i);
+ if (v >= dvfs->vddarm_min && v <= dvfs->vddarm_max)
+ found = 1;
+ }
+
+ if (!found) {
+ pr_debug("%dkHz unsupported by regulator\n",
+ freq->frequency);
+ freq->frequency = CPUFREQ_ENTRY_INVALID;
+ }
+ }
+
+out:
+ /* Guess based on having to do an I2C/SPI write; in future we
+ * will be able to query the regulator performance here. */
+ regulator_latency = 1 * 1000 * 1000;
+}
+#endif
+
+static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
+{
+ int ret;
+ struct cpufreq_frequency_table *freq;
+
+ if (policy->cpu != 0)
+ return -EINVAL;
+
+ if (s3c64xx_freq_table == NULL) {
+ pr_err("No frequency information for this CPU\n");
+ return -ENODEV;
+ }
+
+ policy->clk = clk_get(NULL, "armclk");
+ if (IS_ERR(policy->clk)) {
+ pr_err("Unable to obtain ARMCLK: %ld\n",
+ PTR_ERR(policy->clk));
+ return PTR_ERR(policy->clk);
+ }
+
+#ifdef CONFIG_REGULATOR
+ vddarm = regulator_get(NULL, "vddarm");
+ if (IS_ERR(vddarm)) {
+ ret = PTR_ERR(vddarm);
+ pr_err("Failed to obtain VDDARM: %d\n", ret);
+ pr_err("Only frequency scaling available\n");
+ vddarm = NULL;
+ } else {
+ s3c64xx_cpufreq_config_regulator();
+ }
+#endif
+
+ cpufreq_for_each_entry(freq, s3c64xx_freq_table) {
+ unsigned long r;
+
+ /* Check for frequencies we can generate */
+ r = clk_round_rate(policy->clk, freq->frequency * 1000);
+ r /= 1000;
+ if (r != freq->frequency) {
+ pr_debug("%dkHz unsupported by clock\n",
+ freq->frequency);
+ freq->frequency = CPUFREQ_ENTRY_INVALID;
+ }
+
+ /* If we have no regulator then assume startup
+ * frequency is the maximum we can support. */
+ if (!vddarm && freq->frequency > clk_get_rate(policy->clk) / 1000)
+ freq->frequency = CPUFREQ_ENTRY_INVALID;
+ }
+
+ /* Datasheet says PLL stabalisation time (if we were to use
+ * the PLLs, which we don't currently) is ~300us worst case,
+ * but add some fudge.
+ */
+ ret = cpufreq_generic_init(policy, s3c64xx_freq_table,
+ (500 * 1000) + regulator_latency);
+ if (ret != 0) {
+ pr_err("Failed to configure frequency table: %d\n",
+ ret);
+ regulator_put(vddarm);
+ clk_put(policy->clk);
+ }
+
+ return ret;
+}
+
+static struct cpufreq_driver s3c64xx_cpufreq_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = s3c64xx_cpufreq_set_target,
+ .get = cpufreq_generic_get,
+ .init = s3c64xx_cpufreq_driver_init,
+ .name = "s3c",
+};
+
+static int __init s3c64xx_cpufreq_init(void)
+{
+ return cpufreq_register_driver(&s3c64xx_cpufreq_driver);
+}
+module_init(s3c64xx_cpufreq_init);
diff --git a/kernel/drivers/cpufreq/s5pv210-cpufreq.c b/kernel/drivers/cpufreq/s5pv210-cpufreq.c
new file mode 100644
index 000000000..b0dac7d6b
--- /dev/null
+++ b/kernel/drivers/cpufreq/s5pv210-cpufreq.c
@@ -0,0 +1,662 @@
+/*
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * CPU frequency scaling for S5PC110/S5PV210
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/cpufreq.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/regulator/consumer.h>
+
+static void __iomem *clk_base;
+static void __iomem *dmc_base[2];
+
+#define S5P_CLKREG(x) (clk_base + (x))
+
+#define S5P_APLL_LOCK S5P_CLKREG(0x00)
+#define S5P_APLL_CON S5P_CLKREG(0x100)
+#define S5P_CLK_SRC0 S5P_CLKREG(0x200)
+#define S5P_CLK_SRC2 S5P_CLKREG(0x208)
+#define S5P_CLK_DIV0 S5P_CLKREG(0x300)
+#define S5P_CLK_DIV2 S5P_CLKREG(0x308)
+#define S5P_CLK_DIV6 S5P_CLKREG(0x318)
+#define S5P_CLKDIV_STAT0 S5P_CLKREG(0x1000)
+#define S5P_CLKDIV_STAT1 S5P_CLKREG(0x1004)
+#define S5P_CLKMUX_STAT0 S5P_CLKREG(0x1100)
+#define S5P_CLKMUX_STAT1 S5P_CLKREG(0x1104)
+
+#define S5P_ARM_MCS_CON S5P_CLKREG(0x6100)
+
+/* CLKSRC0 */
+#define S5P_CLKSRC0_MUX200_SHIFT (16)
+#define S5P_CLKSRC0_MUX200_MASK (0x1 << S5P_CLKSRC0_MUX200_SHIFT)
+#define S5P_CLKSRC0_MUX166_MASK (0x1<<20)
+#define S5P_CLKSRC0_MUX133_MASK (0x1<<24)
+
+/* CLKSRC2 */
+#define S5P_CLKSRC2_G3D_SHIFT (0)
+#define S5P_CLKSRC2_G3D_MASK (0x3 << S5P_CLKSRC2_G3D_SHIFT)
+#define S5P_CLKSRC2_MFC_SHIFT (4)
+#define S5P_CLKSRC2_MFC_MASK (0x3 << S5P_CLKSRC2_MFC_SHIFT)
+
+/* CLKDIV0 */
+#define S5P_CLKDIV0_APLL_SHIFT (0)
+#define S5P_CLKDIV0_APLL_MASK (0x7 << S5P_CLKDIV0_APLL_SHIFT)
+#define S5P_CLKDIV0_A2M_SHIFT (4)
+#define S5P_CLKDIV0_A2M_MASK (0x7 << S5P_CLKDIV0_A2M_SHIFT)
+#define S5P_CLKDIV0_HCLK200_SHIFT (8)
+#define S5P_CLKDIV0_HCLK200_MASK (0x7 << S5P_CLKDIV0_HCLK200_SHIFT)
+#define S5P_CLKDIV0_PCLK100_SHIFT (12)
+#define S5P_CLKDIV0_PCLK100_MASK (0x7 << S5P_CLKDIV0_PCLK100_SHIFT)
+#define S5P_CLKDIV0_HCLK166_SHIFT (16)
+#define S5P_CLKDIV0_HCLK166_MASK (0xF << S5P_CLKDIV0_HCLK166_SHIFT)
+#define S5P_CLKDIV0_PCLK83_SHIFT (20)
+#define S5P_CLKDIV0_PCLK83_MASK (0x7 << S5P_CLKDIV0_PCLK83_SHIFT)
+#define S5P_CLKDIV0_HCLK133_SHIFT (24)
+#define S5P_CLKDIV0_HCLK133_MASK (0xF << S5P_CLKDIV0_HCLK133_SHIFT)
+#define S5P_CLKDIV0_PCLK66_SHIFT (28)
+#define S5P_CLKDIV0_PCLK66_MASK (0x7 << S5P_CLKDIV0_PCLK66_SHIFT)
+
+/* CLKDIV2 */
+#define S5P_CLKDIV2_G3D_SHIFT (0)
+#define S5P_CLKDIV2_G3D_MASK (0xF << S5P_CLKDIV2_G3D_SHIFT)
+#define S5P_CLKDIV2_MFC_SHIFT (4)
+#define S5P_CLKDIV2_MFC_MASK (0xF << S5P_CLKDIV2_MFC_SHIFT)
+
+/* CLKDIV6 */
+#define S5P_CLKDIV6_ONEDRAM_SHIFT (28)
+#define S5P_CLKDIV6_ONEDRAM_MASK (0xF << S5P_CLKDIV6_ONEDRAM_SHIFT)
+
+static struct clk *dmc0_clk;
+static struct clk *dmc1_clk;
+static DEFINE_MUTEX(set_freq_lock);
+
+/* APLL M,P,S values for 1G/800Mhz */
+#define APLL_VAL_1000 ((1 << 31) | (125 << 16) | (3 << 8) | 1)
+#define APLL_VAL_800 ((1 << 31) | (100 << 16) | (3 << 8) | 1)
+
+/* Use 800MHz when entering sleep mode */
+#define SLEEP_FREQ (800 * 1000)
+
+/* Tracks if cpu freqency can be updated anymore */
+static bool no_cpufreq_access;
+
+/*
+ * DRAM configurations to calculate refresh counter for changing
+ * frequency of memory.
+ */
+struct dram_conf {
+ unsigned long freq; /* HZ */
+ unsigned long refresh; /* DRAM refresh counter * 1000 */
+};
+
+/* DRAM configuration (DMC0 and DMC1) */
+static struct dram_conf s5pv210_dram_conf[2];
+
+enum perf_level {
+ L0, L1, L2, L3, L4,
+};
+
+enum s5pv210_mem_type {
+ LPDDR = 0x1,
+ LPDDR2 = 0x2,
+ DDR2 = 0x4,
+};
+
+enum s5pv210_dmc_port {
+ DMC0 = 0,
+ DMC1,
+};
+
+static struct cpufreq_frequency_table s5pv210_freq_table[] = {
+ {0, L0, 1000*1000},
+ {0, L1, 800*1000},
+ {0, L2, 400*1000},
+ {0, L3, 200*1000},
+ {0, L4, 100*1000},
+ {0, 0, CPUFREQ_TABLE_END},
+};
+
+static struct regulator *arm_regulator;
+static struct regulator *int_regulator;
+
+struct s5pv210_dvs_conf {
+ int arm_volt; /* uV */
+ int int_volt; /* uV */
+};
+
+static const int arm_volt_max = 1350000;
+static const int int_volt_max = 1250000;
+
+static struct s5pv210_dvs_conf dvs_conf[] = {
+ [L0] = {
+ .arm_volt = 1250000,
+ .int_volt = 1100000,
+ },
+ [L1] = {
+ .arm_volt = 1200000,
+ .int_volt = 1100000,
+ },
+ [L2] = {
+ .arm_volt = 1050000,
+ .int_volt = 1100000,
+ },
+ [L3] = {
+ .arm_volt = 950000,
+ .int_volt = 1100000,
+ },
+ [L4] = {
+ .arm_volt = 950000,
+ .int_volt = 1000000,
+ },
+};
+
+static u32 clkdiv_val[5][11] = {
+ /*
+ * Clock divider value for following
+ * { APLL, A2M, HCLK_MSYS, PCLK_MSYS,
+ * HCLK_DSYS, PCLK_DSYS, HCLK_PSYS, PCLK_PSYS,
+ * ONEDRAM, MFC, G3D }
+ */
+
+ /* L0 : [1000/200/100][166/83][133/66][200/200] */
+ {0, 4, 4, 1, 3, 1, 4, 1, 3, 0, 0},
+
+ /* L1 : [800/200/100][166/83][133/66][200/200] */
+ {0, 3, 3, 1, 3, 1, 4, 1, 3, 0, 0},
+
+ /* L2 : [400/200/100][166/83][133/66][200/200] */
+ {1, 3, 1, 1, 3, 1, 4, 1, 3, 0, 0},
+
+ /* L3 : [200/200/100][166/83][133/66][200/200] */
+ {3, 3, 1, 1, 3, 1, 4, 1, 3, 0, 0},
+
+ /* L4 : [100/100/100][83/83][66/66][100/100] */
+ {7, 7, 0, 0, 7, 0, 9, 0, 7, 0, 0},
+};
+
+/*
+ * This function set DRAM refresh counter
+ * accoriding to operating frequency of DRAM
+ * ch: DMC port number 0 or 1
+ * freq: Operating frequency of DRAM(KHz)
+ */
+static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq)
+{
+ unsigned long tmp, tmp1;
+ void __iomem *reg = NULL;
+
+ if (ch == DMC0) {
+ reg = (dmc_base[0] + 0x30);
+ } else if (ch == DMC1) {
+ reg = (dmc_base[1] + 0x30);
+ } else {
+ printk(KERN_ERR "Cannot find DMC port\n");
+ return;
+ }
+
+ /* Find current DRAM frequency */
+ tmp = s5pv210_dram_conf[ch].freq;
+
+ do_div(tmp, freq);
+
+ tmp1 = s5pv210_dram_conf[ch].refresh;
+
+ do_div(tmp1, tmp);
+
+ __raw_writel(tmp1, reg);
+}
+
+static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ unsigned long reg;
+ unsigned int priv_index;
+ unsigned int pll_changing = 0;
+ unsigned int bus_speed_changing = 0;
+ unsigned int old_freq, new_freq;
+ int arm_volt, int_volt;
+ int ret = 0;
+
+ mutex_lock(&set_freq_lock);
+
+ if (no_cpufreq_access) {
+ pr_err("Denied access to %s as it is disabled temporarily\n",
+ __func__);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ old_freq = policy->cur;
+ new_freq = s5pv210_freq_table[index].frequency;
+
+ /* Finding current running level index */
+ if (cpufreq_frequency_table_target(policy, s5pv210_freq_table,
+ old_freq, CPUFREQ_RELATION_H,
+ &priv_index)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ arm_volt = dvs_conf[index].arm_volt;
+ int_volt = dvs_conf[index].int_volt;
+
+ if (new_freq > old_freq) {
+ ret = regulator_set_voltage(arm_regulator,
+ arm_volt, arm_volt_max);
+ if (ret)
+ goto exit;
+
+ ret = regulator_set_voltage(int_regulator,
+ int_volt, int_volt_max);
+ if (ret)
+ goto exit;
+ }
+
+ /* Check if there need to change PLL */
+ if ((index == L0) || (priv_index == L0))
+ pll_changing = 1;
+
+ /* Check if there need to change System bus clock */
+ if ((index == L4) || (priv_index == L4))
+ bus_speed_changing = 1;
+
+ if (bus_speed_changing) {
+ /*
+ * Reconfigure DRAM refresh counter value for minimum
+ * temporary clock while changing divider.
+ * expected clock is 83Mhz : 7.8usec/(1/83Mhz) = 0x287
+ */
+ if (pll_changing)
+ s5pv210_set_refresh(DMC1, 83000);
+ else
+ s5pv210_set_refresh(DMC1, 100000);
+
+ s5pv210_set_refresh(DMC0, 83000);
+ }
+
+ /*
+ * APLL should be changed in this level
+ * APLL -> MPLL(for stable transition) -> APLL
+ * Some clock source's clock API are not prepared.
+ * Do not use clock API in below code.
+ */
+ if (pll_changing) {
+ /*
+ * 1. Temporary Change divider for MFC and G3D
+ * SCLKA2M(200/1=200)->(200/4=50)Mhz
+ */
+ reg = __raw_readl(S5P_CLK_DIV2);
+ reg &= ~(S5P_CLKDIV2_G3D_MASK | S5P_CLKDIV2_MFC_MASK);
+ reg |= (3 << S5P_CLKDIV2_G3D_SHIFT) |
+ (3 << S5P_CLKDIV2_MFC_SHIFT);
+ __raw_writel(reg, S5P_CLK_DIV2);
+
+ /* For MFC, G3D dividing */
+ do {
+ reg = __raw_readl(S5P_CLKDIV_STAT0);
+ } while (reg & ((1 << 16) | (1 << 17)));
+
+ /*
+ * 2. Change SCLKA2M(200Mhz)to SCLKMPLL in MFC_MUX, G3D MUX
+ * (200/4=50)->(667/4=166)Mhz
+ */
+ reg = __raw_readl(S5P_CLK_SRC2);
+ reg &= ~(S5P_CLKSRC2_G3D_MASK | S5P_CLKSRC2_MFC_MASK);
+ reg |= (1 << S5P_CLKSRC2_G3D_SHIFT) |
+ (1 << S5P_CLKSRC2_MFC_SHIFT);
+ __raw_writel(reg, S5P_CLK_SRC2);
+
+ do {
+ reg = __raw_readl(S5P_CLKMUX_STAT1);
+ } while (reg & ((1 << 7) | (1 << 3)));
+
+ /*
+ * 3. DMC1 refresh count for 133Mhz if (index == L4) is
+ * true refresh counter is already programed in upper
+ * code. 0x287@83Mhz
+ */
+ if (!bus_speed_changing)
+ s5pv210_set_refresh(DMC1, 133000);
+
+ /* 4. SCLKAPLL -> SCLKMPLL */
+ reg = __raw_readl(S5P_CLK_SRC0);
+ reg &= ~(S5P_CLKSRC0_MUX200_MASK);
+ reg |= (0x1 << S5P_CLKSRC0_MUX200_SHIFT);
+ __raw_writel(reg, S5P_CLK_SRC0);
+
+ do {
+ reg = __raw_readl(S5P_CLKMUX_STAT0);
+ } while (reg & (0x1 << 18));
+
+ }
+
+ /* Change divider */
+ reg = __raw_readl(S5P_CLK_DIV0);
+
+ reg &= ~(S5P_CLKDIV0_APLL_MASK | S5P_CLKDIV0_A2M_MASK |
+ S5P_CLKDIV0_HCLK200_MASK | S5P_CLKDIV0_PCLK100_MASK |
+ S5P_CLKDIV0_HCLK166_MASK | S5P_CLKDIV0_PCLK83_MASK |
+ S5P_CLKDIV0_HCLK133_MASK | S5P_CLKDIV0_PCLK66_MASK);
+
+ reg |= ((clkdiv_val[index][0] << S5P_CLKDIV0_APLL_SHIFT) |
+ (clkdiv_val[index][1] << S5P_CLKDIV0_A2M_SHIFT) |
+ (clkdiv_val[index][2] << S5P_CLKDIV0_HCLK200_SHIFT) |
+ (clkdiv_val[index][3] << S5P_CLKDIV0_PCLK100_SHIFT) |
+ (clkdiv_val[index][4] << S5P_CLKDIV0_HCLK166_SHIFT) |
+ (clkdiv_val[index][5] << S5P_CLKDIV0_PCLK83_SHIFT) |
+ (clkdiv_val[index][6] << S5P_CLKDIV0_HCLK133_SHIFT) |
+ (clkdiv_val[index][7] << S5P_CLKDIV0_PCLK66_SHIFT));
+
+ __raw_writel(reg, S5P_CLK_DIV0);
+
+ do {
+ reg = __raw_readl(S5P_CLKDIV_STAT0);
+ } while (reg & 0xff);
+
+ /* ARM MCS value changed */
+ reg = __raw_readl(S5P_ARM_MCS_CON);
+ reg &= ~0x3;
+ if (index >= L3)
+ reg |= 0x3;
+ else
+ reg |= 0x1;
+
+ __raw_writel(reg, S5P_ARM_MCS_CON);
+
+ if (pll_changing) {
+ /* 5. Set Lock time = 30us*24Mhz = 0x2cf */
+ __raw_writel(0x2cf, S5P_APLL_LOCK);
+
+ /*
+ * 6. Turn on APLL
+ * 6-1. Set PMS values
+ * 6-2. Wait untile the PLL is locked
+ */
+ if (index == L0)
+ __raw_writel(APLL_VAL_1000, S5P_APLL_CON);
+ else
+ __raw_writel(APLL_VAL_800, S5P_APLL_CON);
+
+ do {
+ reg = __raw_readl(S5P_APLL_CON);
+ } while (!(reg & (0x1 << 29)));
+
+ /*
+ * 7. Change souce clock from SCLKMPLL(667Mhz)
+ * to SCLKA2M(200Mhz) in MFC_MUX and G3D MUX
+ * (667/4=166)->(200/4=50)Mhz
+ */
+ reg = __raw_readl(S5P_CLK_SRC2);
+ reg &= ~(S5P_CLKSRC2_G3D_MASK | S5P_CLKSRC2_MFC_MASK);
+ reg |= (0 << S5P_CLKSRC2_G3D_SHIFT) |
+ (0 << S5P_CLKSRC2_MFC_SHIFT);
+ __raw_writel(reg, S5P_CLK_SRC2);
+
+ do {
+ reg = __raw_readl(S5P_CLKMUX_STAT1);
+ } while (reg & ((1 << 7) | (1 << 3)));
+
+ /*
+ * 8. Change divider for MFC and G3D
+ * (200/4=50)->(200/1=200)Mhz
+ */
+ reg = __raw_readl(S5P_CLK_DIV2);
+ reg &= ~(S5P_CLKDIV2_G3D_MASK | S5P_CLKDIV2_MFC_MASK);
+ reg |= (clkdiv_val[index][10] << S5P_CLKDIV2_G3D_SHIFT) |
+ (clkdiv_val[index][9] << S5P_CLKDIV2_MFC_SHIFT);
+ __raw_writel(reg, S5P_CLK_DIV2);
+
+ /* For MFC, G3D dividing */
+ do {
+ reg = __raw_readl(S5P_CLKDIV_STAT0);
+ } while (reg & ((1 << 16) | (1 << 17)));
+
+ /* 9. Change MPLL to APLL in MSYS_MUX */
+ reg = __raw_readl(S5P_CLK_SRC0);
+ reg &= ~(S5P_CLKSRC0_MUX200_MASK);
+ reg |= (0x0 << S5P_CLKSRC0_MUX200_SHIFT);
+ __raw_writel(reg, S5P_CLK_SRC0);
+
+ do {
+ reg = __raw_readl(S5P_CLKMUX_STAT0);
+ } while (reg & (0x1 << 18));
+
+ /*
+ * 10. DMC1 refresh counter
+ * L4 : DMC1 = 100Mhz 7.8us/(1/100) = 0x30c
+ * Others : DMC1 = 200Mhz 7.8us/(1/200) = 0x618
+ */
+ if (!bus_speed_changing)
+ s5pv210_set_refresh(DMC1, 200000);
+ }
+
+ /*
+ * L4 level need to change memory bus speed, hence onedram clock divier
+ * and memory refresh parameter should be changed
+ */
+ if (bus_speed_changing) {
+ reg = __raw_readl(S5P_CLK_DIV6);
+ reg &= ~S5P_CLKDIV6_ONEDRAM_MASK;
+ reg |= (clkdiv_val[index][8] << S5P_CLKDIV6_ONEDRAM_SHIFT);
+ __raw_writel(reg, S5P_CLK_DIV6);
+
+ do {
+ reg = __raw_readl(S5P_CLKDIV_STAT1);
+ } while (reg & (1 << 15));
+
+ /* Reconfigure DRAM refresh counter value */
+ if (index != L4) {
+ /*
+ * DMC0 : 166Mhz
+ * DMC1 : 200Mhz
+ */
+ s5pv210_set_refresh(DMC0, 166000);
+ s5pv210_set_refresh(DMC1, 200000);
+ } else {
+ /*
+ * DMC0 : 83Mhz
+ * DMC1 : 100Mhz
+ */
+ s5pv210_set_refresh(DMC0, 83000);
+ s5pv210_set_refresh(DMC1, 100000);
+ }
+ }
+
+ if (new_freq < old_freq) {
+ regulator_set_voltage(int_regulator,
+ int_volt, int_volt_max);
+
+ regulator_set_voltage(arm_regulator,
+ arm_volt, arm_volt_max);
+ }
+
+ printk(KERN_DEBUG "Perf changed[L%d]\n", index);
+
+exit:
+ mutex_unlock(&set_freq_lock);
+ return ret;
+}
+
+static int check_mem_type(void __iomem *dmc_reg)
+{
+ unsigned long val;
+
+ val = __raw_readl(dmc_reg + 0x4);
+ val = (val & (0xf << 8));
+
+ return val >> 8;
+}
+
+static int s5pv210_cpu_init(struct cpufreq_policy *policy)
+{
+ unsigned long mem_type;
+ int ret;
+
+ policy->clk = clk_get(NULL, "armclk");
+ if (IS_ERR(policy->clk))
+ return PTR_ERR(policy->clk);
+
+ dmc0_clk = clk_get(NULL, "sclk_dmc0");
+ if (IS_ERR(dmc0_clk)) {
+ ret = PTR_ERR(dmc0_clk);
+ goto out_dmc0;
+ }
+
+ dmc1_clk = clk_get(NULL, "hclk_msys");
+ if (IS_ERR(dmc1_clk)) {
+ ret = PTR_ERR(dmc1_clk);
+ goto out_dmc1;
+ }
+
+ if (policy->cpu != 0) {
+ ret = -EINVAL;
+ goto out_dmc1;
+ }
+
+ /*
+ * check_mem_type : This driver only support LPDDR & LPDDR2.
+ * other memory type is not supported.
+ */
+ mem_type = check_mem_type(dmc_base[0]);
+
+ if ((mem_type != LPDDR) && (mem_type != LPDDR2)) {
+ printk(KERN_ERR "CPUFreq doesn't support this memory type\n");
+ ret = -EINVAL;
+ goto out_dmc1;
+ }
+
+ /* Find current refresh counter and frequency each DMC */
+ s5pv210_dram_conf[0].refresh = (__raw_readl(dmc_base[0] + 0x30) * 1000);
+ s5pv210_dram_conf[0].freq = clk_get_rate(dmc0_clk);
+
+ s5pv210_dram_conf[1].refresh = (__raw_readl(dmc_base[1] + 0x30) * 1000);
+ s5pv210_dram_conf[1].freq = clk_get_rate(dmc1_clk);
+
+ policy->suspend_freq = SLEEP_FREQ;
+ return cpufreq_generic_init(policy, s5pv210_freq_table, 40000);
+
+out_dmc1:
+ clk_put(dmc0_clk);
+out_dmc0:
+ clk_put(policy->clk);
+ return ret;
+}
+
+static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ int ret;
+
+ ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0);
+ if (ret < 0)
+ return NOTIFY_BAD;
+
+ no_cpufreq_access = true;
+ return NOTIFY_DONE;
+}
+
+static struct cpufreq_driver s5pv210_driver = {
+ .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = s5pv210_target,
+ .get = cpufreq_generic_get,
+ .init = s5pv210_cpu_init,
+ .name = "s5pv210",
+#ifdef CONFIG_PM
+ .suspend = cpufreq_generic_suspend,
+ .resume = cpufreq_generic_suspend, /* We need to set SLEEP FREQ again */
+#endif
+};
+
+static struct notifier_block s5pv210_cpufreq_reboot_notifier = {
+ .notifier_call = s5pv210_cpufreq_reboot_notifier_event,
+};
+
+static int s5pv210_cpufreq_probe(struct platform_device *pdev)
+{
+ struct device_node *np;
+ int id;
+
+ /*
+ * HACK: This is a temporary workaround to get access to clock
+ * and DMC controller registers directly and remove static mappings
+ * and dependencies on platform headers. It is necessary to enable
+ * S5PV210 multi-platform support and will be removed together with
+ * this whole driver as soon as S5PV210 gets migrated to use
+ * cpufreq-dt driver.
+ */
+ np = of_find_compatible_node(NULL, NULL, "samsung,s5pv210-clock");
+ if (!np) {
+ pr_err("%s: failed to find clock controller DT node\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ clk_base = of_iomap(np, 0);
+ if (!clk_base) {
+ pr_err("%s: failed to map clock registers\n", __func__);
+ return -EFAULT;
+ }
+
+ for_each_compatible_node(np, NULL, "samsung,s5pv210-dmc") {
+ id = of_alias_get_id(np, "dmc");
+ if (id < 0 || id >= ARRAY_SIZE(dmc_base)) {
+ pr_err("%s: failed to get alias of dmc node '%s'\n",
+ __func__, np->name);
+ return id;
+ }
+
+ dmc_base[id] = of_iomap(np, 0);
+ if (!dmc_base[id]) {
+ pr_err("%s: failed to map dmc%d registers\n",
+ __func__, id);
+ return -EFAULT;
+ }
+ }
+
+ for (id = 0; id < ARRAY_SIZE(dmc_base); ++id) {
+ if (!dmc_base[id]) {
+ pr_err("%s: failed to find dmc%d node\n", __func__, id);
+ return -ENODEV;
+ }
+ }
+
+ arm_regulator = regulator_get(NULL, "vddarm");
+ if (IS_ERR(arm_regulator)) {
+ pr_err("failed to get regulator vddarm");
+ return PTR_ERR(arm_regulator);
+ }
+
+ int_regulator = regulator_get(NULL, "vddint");
+ if (IS_ERR(int_regulator)) {
+ pr_err("failed to get regulator vddint");
+ regulator_put(arm_regulator);
+ return PTR_ERR(int_regulator);
+ }
+
+ register_reboot_notifier(&s5pv210_cpufreq_reboot_notifier);
+
+ return cpufreq_register_driver(&s5pv210_driver);
+}
+
+static struct platform_driver s5pv210_cpufreq_platdrv = {
+ .driver = {
+ .name = "s5pv210-cpufreq",
+ },
+ .probe = s5pv210_cpufreq_probe,
+};
+module_platform_driver(s5pv210_cpufreq_platdrv);
diff --git a/kernel/drivers/cpufreq/sa1100-cpufreq.c b/kernel/drivers/cpufreq/sa1100-cpufreq.c
new file mode 100644
index 000000000..728eab77e
--- /dev/null
+++ b/kernel/drivers/cpufreq/sa1100-cpufreq.c
@@ -0,0 +1,220 @@
+/*
+ * cpu-sa1100.c: clock scaling for the SA1100
+ *
+ * Copyright (C) 2000 2001, The Delft University of Technology
+ *
+ * Authors:
+ * - Johan Pouwelse (J.A.Pouwelse@its.tudelft.nl): initial version
+ * - Erik Mouw (J.A.K.Mouw@its.tudelft.nl):
+ * - major rewrite for linux-2.3.99
+ * - rewritten for the more generic power management scheme in
+ * linux-2.4.5-rmk1
+ *
+ * This software has been developed while working on the LART
+ * computing board (http://www.lartmaker.nl/), which is
+ * sponsored by the Mobile Multi-media Communications
+ * (http://www.mobimedia.org/) and Ubiquitous Communications
+ * (http://www.ubicom.tudelft.nl/) projects.
+ *
+ * The authors can be reached at:
+ *
+ * Erik Mouw
+ * Information and Communication Theory Group
+ * Faculty of Information Technology and Systems
+ * Delft University of Technology
+ * P.O. Box 5031
+ * 2600 GA Delft
+ * The Netherlands
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ *
+ * Theory of operations
+ * ====================
+ *
+ * Clock scaling can be used to lower the power consumption of the CPU
+ * core. This will give you a somewhat longer running time.
+ *
+ * The SA-1100 has a single register to change the core clock speed:
+ *
+ * PPCR 0x90020014 PLL config
+ *
+ * However, the DRAM timings are closely related to the core clock
+ * speed, so we need to change these, too. The used registers are:
+ *
+ * MDCNFG 0xA0000000 DRAM config
+ * MDCAS0 0xA0000004 Access waveform
+ * MDCAS1 0xA0000008 Access waveform
+ * MDCAS2 0xA000000C Access waveform
+ *
+ * Care must be taken to change the DRAM parameters the correct way,
+ * because otherwise the DRAM becomes unusable and the kernel will
+ * crash.
+ *
+ * The simple solution to avoid a kernel crash is to put the actual
+ * clock change in ROM and jump to that code from the kernel. The main
+ * disadvantage is that the ROM has to be modified, which is not
+ * possible on all SA-1100 platforms. Another disadvantage is that
+ * jumping to ROM makes clock switching unnecessary complicated.
+ *
+ * The idea behind this driver is that the memory configuration can be
+ * changed while running from DRAM (even with interrupts turned on!)
+ * as long as all re-configuration steps yield a valid DRAM
+ * configuration. The advantages are clear: it will run on all SA-1100
+ * platforms, and the code is very simple.
+ *
+ * If you really want to understand what is going on in
+ * sa1100_update_dram_timings(), you'll have to read sections 8.2,
+ * 9.5.7.3, and 10.2 from the "Intel StrongARM SA-1100 Microprocessor
+ * Developers Manual" (available for free from Intel).
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/io.h>
+
+#include <asm/cputype.h>
+
+#include <mach/generic.h>
+#include <mach/hardware.h>
+
+struct sa1100_dram_regs {
+ int speed;
+ u32 mdcnfg;
+ u32 mdcas0;
+ u32 mdcas1;
+ u32 mdcas2;
+};
+
+
+static struct cpufreq_driver sa1100_driver;
+
+static struct sa1100_dram_regs sa1100_dram_settings[] = {
+ /*speed, mdcnfg, mdcas0, mdcas1, mdcas2, clock freq */
+ { 59000, 0x00dc88a3, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 59.0 MHz */
+ { 73700, 0x011490a3, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 73.7 MHz */
+ { 88500, 0x014e90a3, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 88.5 MHz */
+ {103200, 0x01889923, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 103.2 MHz */
+ {118000, 0x01c29923, 0x9999998f, 0xfffffff9, 0xffffffff},/* 118.0 MHz */
+ {132700, 0x01fb2123, 0x9999998f, 0xfffffff9, 0xffffffff},/* 132.7 MHz */
+ {147500, 0x02352123, 0x3333330f, 0xfffffff3, 0xffffffff},/* 147.5 MHz */
+ {162200, 0x026b29a3, 0x38e38e1f, 0xfff8e38e, 0xffffffff},/* 162.2 MHz */
+ {176900, 0x02a329a3, 0x71c71c1f, 0xfff1c71c, 0xffffffff},/* 176.9 MHz */
+ {191700, 0x02dd31a3, 0xe38e383f, 0xffe38e38, 0xffffffff},/* 191.7 MHz */
+ {206400, 0x03153223, 0xc71c703f, 0xffc71c71, 0xffffffff},/* 206.4 MHz */
+ {221200, 0x034fba23, 0xc71c703f, 0xffc71c71, 0xffffffff},/* 221.2 MHz */
+ {235900, 0x03853a23, 0xe1e1e07f, 0xe1e1e1e1, 0xffffffe1},/* 235.9 MHz */
+ {250700, 0x03bf3aa3, 0xc3c3c07f, 0xc3c3c3c3, 0xffffffc3},/* 250.7 MHz */
+ {265400, 0x03f7c2a3, 0xc3c3c07f, 0xc3c3c3c3, 0xffffffc3},/* 265.4 MHz */
+ {280200, 0x0431c2a3, 0x878780ff, 0x87878787, 0xffffff87},/* 280.2 MHz */
+ { 0, 0, 0, 0, 0 } /* last entry */
+};
+
+static void sa1100_update_dram_timings(int current_speed, int new_speed)
+{
+ struct sa1100_dram_regs *settings = sa1100_dram_settings;
+
+ /* find speed */
+ while (settings->speed != 0) {
+ if (new_speed == settings->speed)
+ break;
+
+ settings++;
+ }
+
+ if (settings->speed == 0) {
+ panic("%s: couldn't find dram setting for speed %d\n",
+ __func__, new_speed);
+ }
+
+ /* No risk, no fun: run with interrupts on! */
+ if (new_speed > current_speed) {
+ /* We're going FASTER, so first relax the memory
+ * timings before changing the core frequency
+ */
+
+ /* Half the memory access clock */
+ MDCNFG |= MDCNFG_CDB2;
+
+ /* The order of these statements IS important, keep 8
+ * pulses!!
+ */
+ MDCAS2 = settings->mdcas2;
+ MDCAS1 = settings->mdcas1;
+ MDCAS0 = settings->mdcas0;
+ MDCNFG = settings->mdcnfg;
+ } else {
+ /* We're going SLOWER: first decrease the core
+ * frequency and then tighten the memory settings.
+ */
+
+ /* Half the memory access clock */
+ MDCNFG |= MDCNFG_CDB2;
+
+ /* The order of these statements IS important, keep 8
+ * pulses!!
+ */
+ MDCAS0 = settings->mdcas0;
+ MDCAS1 = settings->mdcas1;
+ MDCAS2 = settings->mdcas2;
+ MDCNFG = settings->mdcnfg;
+ }
+}
+
+static int sa1100_target(struct cpufreq_policy *policy, unsigned int ppcr)
+{
+ unsigned int cur = sa11x0_getspeed(0);
+ unsigned int new_freq;
+
+ new_freq = sa11x0_freq_table[ppcr].frequency;
+
+ if (new_freq > cur)
+ sa1100_update_dram_timings(cur, new_freq);
+
+ PPCR = ppcr;
+
+ if (new_freq < cur)
+ sa1100_update_dram_timings(cur, new_freq);
+
+ return 0;
+}
+
+static int __init sa1100_cpu_init(struct cpufreq_policy *policy)
+{
+ return cpufreq_generic_init(policy, sa11x0_freq_table, CPUFREQ_ETERNAL);
+}
+
+static struct cpufreq_driver sa1100_driver __refdata = {
+ .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = sa1100_target,
+ .get = sa11x0_getspeed,
+ .init = sa1100_cpu_init,
+ .name = "sa1100",
+};
+
+static int __init sa1100_dram_init(void)
+{
+ if (cpu_is_sa1100())
+ return cpufreq_register_driver(&sa1100_driver);
+ else
+ return -ENODEV;
+}
+
+arch_initcall(sa1100_dram_init);
diff --git a/kernel/drivers/cpufreq/sa1110-cpufreq.c b/kernel/drivers/cpufreq/sa1110-cpufreq.c
new file mode 100644
index 000000000..b5befc211
--- /dev/null
+++ b/kernel/drivers/cpufreq/sa1110-cpufreq.c
@@ -0,0 +1,374 @@
+/*
+ * linux/arch/arm/mach-sa1100/cpu-sa1110.c
+ *
+ * Copyright (C) 2001 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Note: there are two erratas that apply to the SA1110 here:
+ * 7 - SDRAM auto-power-up failure (rev A0)
+ * 13 - Corruption of internal register reads/writes following
+ * SDRAM reads (rev A0, B0, B1)
+ *
+ * We ignore rev. A0 and B0 devices; I don't think they're worth supporting.
+ *
+ * The SDRAM type can be passed on the command line as cpu_sa1110.sdram=type
+ */
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+
+#include <asm/cputype.h>
+#include <asm/mach-types.h>
+
+#include <mach/generic.h>
+#include <mach/hardware.h>
+
+#undef DEBUG
+
+struct sdram_params {
+ const char name[20];
+ u_char rows; /* bits */
+ u_char cas_latency; /* cycles */
+ u_char tck; /* clock cycle time (ns) */
+ u_char trcd; /* activate to r/w (ns) */
+ u_char trp; /* precharge to activate (ns) */
+ u_char twr; /* write recovery time (ns) */
+ u_short refresh; /* refresh time for array (us) */
+};
+
+struct sdram_info {
+ u_int mdcnfg;
+ u_int mdrefr;
+ u_int mdcas[3];
+};
+
+static struct sdram_params sdram_tbl[] __initdata = {
+ { /* Toshiba TC59SM716 CL2 */
+ .name = "TC59SM716-CL2",
+ .rows = 12,
+ .tck = 10,
+ .trcd = 20,
+ .trp = 20,
+ .twr = 10,
+ .refresh = 64000,
+ .cas_latency = 2,
+ }, { /* Toshiba TC59SM716 CL3 */
+ .name = "TC59SM716-CL3",
+ .rows = 12,
+ .tck = 8,
+ .trcd = 20,
+ .trp = 20,
+ .twr = 8,
+ .refresh = 64000,
+ .cas_latency = 3,
+ }, { /* Samsung K4S641632D TC75 */
+ .name = "K4S641632D",
+ .rows = 14,
+ .tck = 9,
+ .trcd = 27,
+ .trp = 20,
+ .twr = 9,
+ .refresh = 64000,
+ .cas_latency = 3,
+ }, { /* Samsung K4S281632B-1H */
+ .name = "K4S281632B-1H",
+ .rows = 12,
+ .tck = 10,
+ .trp = 20,
+ .twr = 10,
+ .refresh = 64000,
+ .cas_latency = 3,
+ }, { /* Samsung KM416S4030CT */
+ .name = "KM416S4030CT",
+ .rows = 13,
+ .tck = 8,
+ .trcd = 24, /* 3 CLKs */
+ .trp = 24, /* 3 CLKs */
+ .twr = 16, /* Trdl: 2 CLKs */
+ .refresh = 64000,
+ .cas_latency = 3,
+ }, { /* Winbond W982516AH75L CL3 */
+ .name = "W982516AH75L",
+ .rows = 16,
+ .tck = 8,
+ .trcd = 20,
+ .trp = 20,
+ .twr = 8,
+ .refresh = 64000,
+ .cas_latency = 3,
+ }, { /* Micron MT48LC8M16A2TG-75 */
+ .name = "MT48LC8M16A2TG-75",
+ .rows = 12,
+ .tck = 8,
+ .trcd = 20,
+ .trp = 20,
+ .twr = 8,
+ .refresh = 64000,
+ .cas_latency = 3,
+ },
+};
+
+static struct sdram_params sdram_params;
+
+/*
+ * Given a period in ns and frequency in khz, calculate the number of
+ * cycles of frequency in period. Note that we round up to the next
+ * cycle, even if we are only slightly over.
+ */
+static inline u_int ns_to_cycles(u_int ns, u_int khz)
+{
+ return (ns * khz + 999999) / 1000000;
+}
+
+/*
+ * Create the MDCAS register bit pattern.
+ */
+static inline void set_mdcas(u_int *mdcas, int delayed, u_int rcd)
+{
+ u_int shift;
+
+ rcd = 2 * rcd - 1;
+ shift = delayed + 1 + rcd;
+
+ mdcas[0] = (1 << rcd) - 1;
+ mdcas[0] |= 0x55555555 << shift;
+ mdcas[1] = mdcas[2] = 0x55555555 << (shift & 1);
+}
+
+static void
+sdram_calculate_timing(struct sdram_info *sd, u_int cpu_khz,
+ struct sdram_params *sdram)
+{
+ u_int mem_khz, sd_khz, trp, twr;
+
+ mem_khz = cpu_khz / 2;
+ sd_khz = mem_khz;
+
+ /*
+ * If SDCLK would invalidate the SDRAM timings,
+ * run SDCLK at half speed.
+ *
+ * CPU steppings prior to B2 must either run the memory at
+ * half speed or use delayed read latching (errata 13).
+ */
+ if ((ns_to_cycles(sdram->tck, sd_khz) > 1) ||
+ (CPU_REVISION < CPU_SA1110_B2 && sd_khz < 62000))
+ sd_khz /= 2;
+
+ sd->mdcnfg = MDCNFG & 0x007f007f;
+
+ twr = ns_to_cycles(sdram->twr, mem_khz);
+
+ /* trp should always be >1 */
+ trp = ns_to_cycles(sdram->trp, mem_khz) - 1;
+ if (trp < 1)
+ trp = 1;
+
+ sd->mdcnfg |= trp << 8;
+ sd->mdcnfg |= trp << 24;
+ sd->mdcnfg |= sdram->cas_latency << 12;
+ sd->mdcnfg |= sdram->cas_latency << 28;
+ sd->mdcnfg |= twr << 14;
+ sd->mdcnfg |= twr << 30;
+
+ sd->mdrefr = MDREFR & 0xffbffff0;
+ sd->mdrefr |= 7;
+
+ if (sd_khz != mem_khz)
+ sd->mdrefr |= MDREFR_K1DB2;
+
+ /* initial number of '1's in MDCAS + 1 */
+ set_mdcas(sd->mdcas, sd_khz >= 62000,
+ ns_to_cycles(sdram->trcd, mem_khz));
+
+#ifdef DEBUG
+ printk(KERN_DEBUG "MDCNFG: %08x MDREFR: %08x MDCAS0: %08x MDCAS1: %08x MDCAS2: %08x\n",
+ sd->mdcnfg, sd->mdrefr, sd->mdcas[0], sd->mdcas[1],
+ sd->mdcas[2]);
+#endif
+}
+
+/*
+ * Set the SDRAM refresh rate.
+ */
+static inline void sdram_set_refresh(u_int dri)
+{
+ MDREFR = (MDREFR & 0xffff000f) | (dri << 4);
+ (void) MDREFR;
+}
+
+/*
+ * Update the refresh period. We do this such that we always refresh
+ * the SDRAMs within their permissible period. The refresh period is
+ * always a multiple of the memory clock (fixed at cpu_clock / 2).
+ *
+ * FIXME: we don't currently take account of burst accesses here,
+ * but neither do Intels DM nor Angel.
+ */
+static void
+sdram_update_refresh(u_int cpu_khz, struct sdram_params *sdram)
+{
+ u_int ns_row = (sdram->refresh * 1000) >> sdram->rows;
+ u_int dri = ns_to_cycles(ns_row, cpu_khz / 2) / 32;
+
+#ifdef DEBUG
+ mdelay(250);
+ printk(KERN_DEBUG "new dri value = %d\n", dri);
+#endif
+
+ sdram_set_refresh(dri);
+}
+
+/*
+ * Ok, set the CPU frequency.
+ */
+static int sa1110_target(struct cpufreq_policy *policy, unsigned int ppcr)
+{
+ struct sdram_params *sdram = &sdram_params;
+ struct sdram_info sd;
+ unsigned long flags;
+ unsigned int unused;
+
+ sdram_calculate_timing(&sd, sa11x0_freq_table[ppcr].frequency, sdram);
+
+#if 0
+ /*
+ * These values are wrong according to the SA1110 documentation
+ * and errata, but they seem to work. Need to get a storage
+ * scope on to the SDRAM signals to work out why.
+ */
+ if (policy->max < 147500) {
+ sd.mdrefr |= MDREFR_K1DB2;
+ sd.mdcas[0] = 0xaaaaaa7f;
+ } else {
+ sd.mdrefr &= ~MDREFR_K1DB2;
+ sd.mdcas[0] = 0xaaaaaa9f;
+ }
+ sd.mdcas[1] = 0xaaaaaaaa;
+ sd.mdcas[2] = 0xaaaaaaaa;
+#endif
+
+ /*
+ * The clock could be going away for some time. Set the SDRAMs
+ * to refresh rapidly (every 64 memory clock cycles). To get
+ * through the whole array, we need to wait 262144 mclk cycles.
+ * We wait 20ms to be safe.
+ */
+ sdram_set_refresh(2);
+ if (!irqs_disabled())
+ msleep(20);
+ else
+ mdelay(20);
+
+ /*
+ * Reprogram the DRAM timings with interrupts disabled, and
+ * ensure that we are doing this within a complete cache line.
+ * This means that we won't access SDRAM for the duration of
+ * the programming.
+ */
+ local_irq_save(flags);
+ asm("mcr p15, 0, %0, c7, c10, 4" : : "r" (0));
+ udelay(10);
+ __asm__ __volatile__("\n\
+ b 2f \n\
+ .align 5 \n\
+1: str %3, [%1, #0] @ MDCNFG \n\
+ str %4, [%1, #28] @ MDREFR \n\
+ str %5, [%1, #4] @ MDCAS0 \n\
+ str %6, [%1, #8] @ MDCAS1 \n\
+ str %7, [%1, #12] @ MDCAS2 \n\
+ str %8, [%2, #0] @ PPCR \n\
+ ldr %0, [%1, #0] \n\
+ b 3f \n\
+2: b 1b \n\
+3: nop \n\
+ nop"
+ : "=&r" (unused)
+ : "r" (&MDCNFG), "r" (&PPCR), "0" (sd.mdcnfg),
+ "r" (sd.mdrefr), "r" (sd.mdcas[0]),
+ "r" (sd.mdcas[1]), "r" (sd.mdcas[2]), "r" (ppcr));
+ local_irq_restore(flags);
+
+ /*
+ * Now, return the SDRAM refresh back to normal.
+ */
+ sdram_update_refresh(sa11x0_freq_table[ppcr].frequency, sdram);
+
+ return 0;
+}
+
+static int __init sa1110_cpu_init(struct cpufreq_policy *policy)
+{
+ return cpufreq_generic_init(policy, sa11x0_freq_table, CPUFREQ_ETERNAL);
+}
+
+/* sa1110_driver needs __refdata because it must remain after init registers
+ * it with cpufreq_register_driver() */
+static struct cpufreq_driver sa1110_driver __refdata = {
+ .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = sa1110_target,
+ .get = sa11x0_getspeed,
+ .init = sa1110_cpu_init,
+ .name = "sa1110",
+};
+
+static struct sdram_params *sa1110_find_sdram(const char *name)
+{
+ struct sdram_params *sdram;
+
+ for (sdram = sdram_tbl; sdram < sdram_tbl + ARRAY_SIZE(sdram_tbl);
+ sdram++)
+ if (strcmp(name, sdram->name) == 0)
+ return sdram;
+
+ return NULL;
+}
+
+static char sdram_name[16];
+
+static int __init sa1110_clk_init(void)
+{
+ struct sdram_params *sdram;
+ const char *name = sdram_name;
+
+ if (!cpu_is_sa1110())
+ return -ENODEV;
+
+ if (!name[0]) {
+ if (machine_is_assabet())
+ name = "TC59SM716-CL3";
+ if (machine_is_pt_system3())
+ name = "K4S641632D";
+ if (machine_is_h3100())
+ name = "KM416S4030CT";
+ if (machine_is_jornada720() || machine_is_h3600())
+ name = "K4S281632B-1H";
+ if (machine_is_nanoengine())
+ name = "MT48LC8M16A2TG-75";
+ }
+
+ sdram = sa1110_find_sdram(name);
+ if (sdram) {
+ printk(KERN_DEBUG "SDRAM: tck: %d trcd: %d trp: %d"
+ " twr: %d refresh: %d cas_latency: %d\n",
+ sdram->tck, sdram->trcd, sdram->trp,
+ sdram->twr, sdram->refresh, sdram->cas_latency);
+
+ memcpy(&sdram_params, sdram, sizeof(sdram_params));
+
+ return cpufreq_register_driver(&sa1110_driver);
+ }
+
+ return 0;
+}
+
+module_param_string(sdram, sdram_name, sizeof(sdram_name), 0);
+arch_initcall(sa1110_clk_init);
diff --git a/kernel/drivers/cpufreq/sc520_freq.c b/kernel/drivers/cpufreq/sc520_freq.c
new file mode 100644
index 000000000..ac84e4818
--- /dev/null
+++ b/kernel/drivers/cpufreq/sc520_freq.c
@@ -0,0 +1,140 @@
+/*
+ * sc520_freq.c: cpufreq driver for the AMD Elan sc520
+ *
+ * Copyright (C) 2005 Sean Young <sean@mess.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Based on elanfreq.c
+ *
+ * 2005-03-30: - initial revision
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/delay.h>
+#include <linux/cpufreq.h>
+#include <linux/timex.h>
+#include <linux/io.h>
+
+#include <asm/cpu_device_id.h>
+#include <asm/msr.h>
+
+#define MMCR_BASE 0xfffef000 /* The default base address */
+#define OFFS_CPUCTL 0x2 /* CPU Control Register */
+
+static __u8 __iomem *cpuctl;
+
+#define PFX "sc520_freq: "
+
+static struct cpufreq_frequency_table sc520_freq_table[] = {
+ {0, 0x01, 100000},
+ {0, 0x02, 133000},
+ {0, 0, CPUFREQ_TABLE_END},
+};
+
+static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu)
+{
+ u8 clockspeed_reg = *cpuctl;
+
+ switch (clockspeed_reg & 0x03) {
+ default:
+ printk(KERN_ERR PFX "error: cpuctl register has unexpected "
+ "value %02x\n", clockspeed_reg);
+ case 0x01:
+ return 100000;
+ case 0x02:
+ return 133000;
+ }
+}
+
+static int sc520_freq_target(struct cpufreq_policy *policy, unsigned int state)
+{
+
+ u8 clockspeed_reg;
+
+ local_irq_disable();
+
+ clockspeed_reg = *cpuctl & ~0x03;
+ *cpuctl = clockspeed_reg | sc520_freq_table[state].driver_data;
+
+ local_irq_enable();
+
+ return 0;
+}
+
+/*
+ * Module init and exit code
+ */
+
+static int sc520_freq_cpu_init(struct cpufreq_policy *policy)
+{
+ struct cpuinfo_x86 *c = &cpu_data(0);
+
+ /* capability check */
+ if (c->x86_vendor != X86_VENDOR_AMD ||
+ c->x86 != 4 || c->x86_model != 9)
+ return -ENODEV;
+
+ /* cpuinfo and default policy values */
+ policy->cpuinfo.transition_latency = 1000000; /* 1ms */
+
+ return cpufreq_table_validate_and_show(policy, sc520_freq_table);
+}
+
+
+static struct cpufreq_driver sc520_freq_driver = {
+ .get = sc520_freq_get_cpu_frequency,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = sc520_freq_target,
+ .init = sc520_freq_cpu_init,
+ .name = "sc520_freq",
+ .attr = cpufreq_generic_attr,
+};
+
+static const struct x86_cpu_id sc520_ids[] = {
+ { X86_VENDOR_AMD, 4, 9 },
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, sc520_ids);
+
+static int __init sc520_freq_init(void)
+{
+ int err;
+
+ if (!x86_match_cpu(sc520_ids))
+ return -ENODEV;
+
+ cpuctl = ioremap((unsigned long)(MMCR_BASE + OFFS_CPUCTL), 1);
+ if (!cpuctl) {
+ printk(KERN_ERR "sc520_freq: error: failed to remap memory\n");
+ return -ENOMEM;
+ }
+
+ err = cpufreq_register_driver(&sc520_freq_driver);
+ if (err)
+ iounmap(cpuctl);
+
+ return err;
+}
+
+
+static void __exit sc520_freq_exit(void)
+{
+ cpufreq_unregister_driver(&sc520_freq_driver);
+ iounmap(cpuctl);
+}
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sean Young <sean@mess.org>");
+MODULE_DESCRIPTION("cpufreq driver for AMD's Elan sc520 CPU");
+
+module_init(sc520_freq_init);
+module_exit(sc520_freq_exit);
+
diff --git a/kernel/drivers/cpufreq/sfi-cpufreq.c b/kernel/drivers/cpufreq/sfi-cpufreq.c
new file mode 100644
index 000000000..ffa3389e5
--- /dev/null
+++ b/kernel/drivers/cpufreq/sfi-cpufreq.c
@@ -0,0 +1,136 @@
+/*
+ * SFI Performance States Driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Author: Vishwesh M Rudramuni <vishwesh.m.rudramuni@intel.com>
+ * Author: Srinidhi Kasagar <srinidhi.kasagar@intel.com>
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sfi.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+
+#include <asm/msr.h>
+
+struct cpufreq_frequency_table *freq_table;
+static struct sfi_freq_table_entry *sfi_cpufreq_array;
+static int num_freq_table_entries;
+
+static int sfi_parse_freq(struct sfi_table_header *table)
+{
+ struct sfi_table_simple *sb;
+ struct sfi_freq_table_entry *pentry;
+ int totallen;
+
+ sb = (struct sfi_table_simple *)table;
+ num_freq_table_entries = SFI_GET_NUM_ENTRIES(sb,
+ struct sfi_freq_table_entry);
+ if (num_freq_table_entries <= 1) {
+ pr_err("No p-states discovered\n");
+ return -ENODEV;
+ }
+
+ pentry = (struct sfi_freq_table_entry *)sb->pentry;
+ totallen = num_freq_table_entries * sizeof(*pentry);
+
+ sfi_cpufreq_array = kzalloc(totallen, GFP_KERNEL);
+ if (!sfi_cpufreq_array)
+ return -ENOMEM;
+
+ memcpy(sfi_cpufreq_array, pentry, totallen);
+
+ return 0;
+}
+
+static int sfi_cpufreq_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ unsigned int next_perf_state = 0; /* Index into perf table */
+ u32 lo, hi;
+
+ next_perf_state = policy->freq_table[index].driver_data;
+
+ rdmsr_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, &lo, &hi);
+ lo = (lo & ~INTEL_PERF_CTL_MASK) |
+ ((u32) sfi_cpufreq_array[next_perf_state].ctrl_val &
+ INTEL_PERF_CTL_MASK);
+ wrmsr_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, lo, hi);
+
+ return 0;
+}
+
+static int sfi_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
+ policy->cpuinfo.transition_latency = 100000; /* 100us */
+
+ return cpufreq_table_validate_and_show(policy, freq_table);
+}
+
+static struct cpufreq_driver sfi_cpufreq_driver = {
+ .flags = CPUFREQ_CONST_LOOPS,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = sfi_cpufreq_target,
+ .init = sfi_cpufreq_cpu_init,
+ .name = "sfi-cpufreq",
+ .attr = cpufreq_generic_attr,
+};
+
+static int __init sfi_cpufreq_init(void)
+{
+ int ret, i;
+
+ /* parse the freq table from SFI */
+ ret = sfi_table_parse(SFI_SIG_FREQ, NULL, NULL, sfi_parse_freq);
+ if (ret)
+ return ret;
+
+ freq_table = kzalloc(sizeof(*freq_table) *
+ (num_freq_table_entries + 1), GFP_KERNEL);
+ if (!freq_table) {
+ ret = -ENOMEM;
+ goto err_free_array;
+ }
+
+ for (i = 0; i < num_freq_table_entries; i++) {
+ freq_table[i].driver_data = i;
+ freq_table[i].frequency = sfi_cpufreq_array[i].freq_mhz * 1000;
+ }
+ freq_table[i].frequency = CPUFREQ_TABLE_END;
+
+ ret = cpufreq_register_driver(&sfi_cpufreq_driver);
+ if (ret)
+ goto err_free_tbl;
+
+ return ret;
+
+err_free_tbl:
+ kfree(freq_table);
+err_free_array:
+ kfree(sfi_cpufreq_array);
+ return ret;
+}
+late_initcall(sfi_cpufreq_init);
+
+static void __exit sfi_cpufreq_exit(void)
+{
+ cpufreq_unregister_driver(&sfi_cpufreq_driver);
+ kfree(freq_table);
+ kfree(sfi_cpufreq_array);
+}
+module_exit(sfi_cpufreq_exit);
+
+MODULE_AUTHOR("Vishwesh M Rudramuni <vishwesh.m.rudramuni@intel.com>");
+MODULE_DESCRIPTION("SFI Performance-States Driver");
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/cpufreq/sh-cpufreq.c b/kernel/drivers/cpufreq/sh-cpufreq.c
new file mode 100644
index 000000000..86628e22b
--- /dev/null
+++ b/kernel/drivers/cpufreq/sh-cpufreq.c
@@ -0,0 +1,177 @@
+/*
+ * cpufreq driver for the SuperH processors.
+ *
+ * Copyright (C) 2002 - 2012 Paul Mundt
+ * Copyright (C) 2002 M. R. Brown
+ *
+ * Clock framework bits from arch/avr32/mach-at32ap/cpufreq.c
+ *
+ * Copyright (C) 2004-2007 Atmel Corporation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#define pr_fmt(fmt) "cpufreq: " fmt
+
+#include <linux/types.h>
+#include <linux/cpufreq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/cpumask.h>
+#include <linux/cpu.h>
+#include <linux/smp.h>
+#include <linux/sched.h> /* set_cpus_allowed() */
+#include <linux/clk.h>
+#include <linux/percpu.h>
+#include <linux/sh_clk.h>
+
+static DEFINE_PER_CPU(struct clk, sh_cpuclk);
+
+static unsigned int sh_cpufreq_get(unsigned int cpu)
+{
+ return (clk_get_rate(&per_cpu(sh_cpuclk, cpu)) + 500) / 1000;
+}
+
+/*
+ * Here we notify other drivers of the proposed change and the final change.
+ */
+static int sh_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ unsigned int cpu = policy->cpu;
+ struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
+ cpumask_t cpus_allowed;
+ struct cpufreq_freqs freqs;
+ struct device *dev;
+ long freq;
+
+ cpus_allowed = current->cpus_allowed;
+ set_cpus_allowed_ptr(current, cpumask_of(cpu));
+
+ BUG_ON(smp_processor_id() != cpu);
+
+ dev = get_cpu_device(cpu);
+
+ /* Convert target_freq from kHz to Hz */
+ freq = clk_round_rate(cpuclk, target_freq * 1000);
+
+ if (freq < (policy->min * 1000) || freq > (policy->max * 1000))
+ return -EINVAL;
+
+ dev_dbg(dev, "requested frequency %u Hz\n", target_freq * 1000);
+
+ freqs.old = sh_cpufreq_get(cpu);
+ freqs.new = (freq + 500) / 1000;
+ freqs.flags = 0;
+
+ cpufreq_freq_transition_begin(policy, &freqs);
+ set_cpus_allowed_ptr(current, &cpus_allowed);
+ clk_set_rate(cpuclk, freq);
+ cpufreq_freq_transition_end(policy, &freqs, 0);
+
+ dev_dbg(dev, "set frequency %lu Hz\n", freq);
+
+ return 0;
+}
+
+static int sh_cpufreq_verify(struct cpufreq_policy *policy)
+{
+ struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu);
+ struct cpufreq_frequency_table *freq_table;
+
+ freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL;
+ if (freq_table)
+ return cpufreq_frequency_table_verify(policy, freq_table);
+
+ cpufreq_verify_within_cpu_limits(policy);
+
+ policy->min = (clk_round_rate(cpuclk, 1) + 500) / 1000;
+ policy->max = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
+
+ cpufreq_verify_within_cpu_limits(policy);
+ return 0;
+}
+
+static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ unsigned int cpu = policy->cpu;
+ struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
+ struct cpufreq_frequency_table *freq_table;
+ struct device *dev;
+
+ dev = get_cpu_device(cpu);
+
+ cpuclk = clk_get(dev, "cpu_clk");
+ if (IS_ERR(cpuclk)) {
+ dev_err(dev, "couldn't get CPU clk\n");
+ return PTR_ERR(cpuclk);
+ }
+
+ freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL;
+ if (freq_table) {
+ int result;
+
+ result = cpufreq_table_validate_and_show(policy, freq_table);
+ if (result)
+ return result;
+ } else {
+ dev_notice(dev, "no frequency table found, falling back "
+ "to rate rounding.\n");
+
+ policy->min = policy->cpuinfo.min_freq =
+ (clk_round_rate(cpuclk, 1) + 500) / 1000;
+ policy->max = policy->cpuinfo.max_freq =
+ (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
+ }
+
+ policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+
+ dev_info(dev, "CPU Frequencies - Minimum %u.%03u MHz, "
+ "Maximum %u.%03u MHz.\n",
+ policy->min / 1000, policy->min % 1000,
+ policy->max / 1000, policy->max % 1000);
+
+ return 0;
+}
+
+static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ unsigned int cpu = policy->cpu;
+ struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
+
+ clk_put(cpuclk);
+
+ return 0;
+}
+
+static struct cpufreq_driver sh_cpufreq_driver = {
+ .name = "sh",
+ .get = sh_cpufreq_get,
+ .target = sh_cpufreq_target,
+ .verify = sh_cpufreq_verify,
+ .init = sh_cpufreq_cpu_init,
+ .exit = sh_cpufreq_cpu_exit,
+ .attr = cpufreq_generic_attr,
+};
+
+static int __init sh_cpufreq_module_init(void)
+{
+ pr_notice("SuperH CPU frequency driver.\n");
+ return cpufreq_register_driver(&sh_cpufreq_driver);
+}
+
+static void __exit sh_cpufreq_module_exit(void)
+{
+ cpufreq_unregister_driver(&sh_cpufreq_driver);
+}
+
+module_init(sh_cpufreq_module_init);
+module_exit(sh_cpufreq_module_exit);
+
+MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>");
+MODULE_DESCRIPTION("cpufreq driver for SuperH");
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/cpufreq/sparc-us2e-cpufreq.c b/kernel/drivers/cpufreq/sparc-us2e-cpufreq.c
new file mode 100644
index 000000000..b73feeb66
--- /dev/null
+++ b/kernel/drivers/cpufreq/sparc-us2e-cpufreq.c
@@ -0,0 +1,378 @@
+/* us2e_cpufreq.c: UltraSPARC-IIe cpu frequency support
+ *
+ * Copyright (C) 2003 David S. Miller (davem@redhat.com)
+ *
+ * Many thanks to Dominik Brodowski for fixing up the cpufreq
+ * infrastructure in order to make this driver easier to implement.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/cpufreq.h>
+#include <linux/threads.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+
+#include <asm/asi.h>
+#include <asm/timer.h>
+
+static struct cpufreq_driver *cpufreq_us2e_driver;
+
+struct us2e_freq_percpu_info {
+ struct cpufreq_frequency_table table[6];
+};
+
+/* Indexed by cpu number. */
+static struct us2e_freq_percpu_info *us2e_freq_table;
+
+#define HBIRD_MEM_CNTL0_ADDR 0x1fe0000f010UL
+#define HBIRD_ESTAR_MODE_ADDR 0x1fe0000f080UL
+
+/* UltraSPARC-IIe has five dividers: 1, 2, 4, 6, and 8. These are controlled
+ * in the ESTAR mode control register.
+ */
+#define ESTAR_MODE_DIV_1 0x0000000000000000UL
+#define ESTAR_MODE_DIV_2 0x0000000000000001UL
+#define ESTAR_MODE_DIV_4 0x0000000000000003UL
+#define ESTAR_MODE_DIV_6 0x0000000000000002UL
+#define ESTAR_MODE_DIV_8 0x0000000000000004UL
+#define ESTAR_MODE_DIV_MASK 0x0000000000000007UL
+
+#define MCTRL0_SREFRESH_ENAB 0x0000000000010000UL
+#define MCTRL0_REFR_COUNT_MASK 0x0000000000007f00UL
+#define MCTRL0_REFR_COUNT_SHIFT 8
+#define MCTRL0_REFR_INTERVAL 7800
+#define MCTRL0_REFR_CLKS_P_CNT 64
+
+static unsigned long read_hbreg(unsigned long addr)
+{
+ unsigned long ret;
+
+ __asm__ __volatile__("ldxa [%1] %2, %0"
+ : "=&r" (ret)
+ : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
+ return ret;
+}
+
+static void write_hbreg(unsigned long addr, unsigned long val)
+{
+ __asm__ __volatile__("stxa %0, [%1] %2\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
+ : "memory");
+ if (addr == HBIRD_ESTAR_MODE_ADDR) {
+ /* Need to wait 16 clock cycles for the PLL to lock. */
+ udelay(1);
+ }
+}
+
+static void self_refresh_ctl(int enable)
+{
+ unsigned long mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
+
+ if (enable)
+ mctrl |= MCTRL0_SREFRESH_ENAB;
+ else
+ mctrl &= ~MCTRL0_SREFRESH_ENAB;
+ write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl);
+ (void) read_hbreg(HBIRD_MEM_CNTL0_ADDR);
+}
+
+static void frob_mem_refresh(int cpu_slowing_down,
+ unsigned long clock_tick,
+ unsigned long old_divisor, unsigned long divisor)
+{
+ unsigned long old_refr_count, refr_count, mctrl;
+
+ refr_count = (clock_tick * MCTRL0_REFR_INTERVAL);
+ refr_count /= (MCTRL0_REFR_CLKS_P_CNT * divisor * 1000000000UL);
+
+ mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
+ old_refr_count = (mctrl & MCTRL0_REFR_COUNT_MASK)
+ >> MCTRL0_REFR_COUNT_SHIFT;
+
+ mctrl &= ~MCTRL0_REFR_COUNT_MASK;
+ mctrl |= refr_count << MCTRL0_REFR_COUNT_SHIFT;
+ write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl);
+ mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
+
+ if (cpu_slowing_down && !(mctrl & MCTRL0_SREFRESH_ENAB)) {
+ unsigned long usecs;
+
+ /* We have to wait for both refresh counts (old
+ * and new) to go to zero.
+ */
+ usecs = (MCTRL0_REFR_CLKS_P_CNT *
+ (refr_count + old_refr_count) *
+ 1000000UL *
+ old_divisor) / clock_tick;
+ udelay(usecs + 1UL);
+ }
+}
+
+static void us2e_transition(unsigned long estar, unsigned long new_bits,
+ unsigned long clock_tick,
+ unsigned long old_divisor, unsigned long divisor)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ estar &= ~ESTAR_MODE_DIV_MASK;
+
+ /* This is based upon the state transition diagram in the IIe manual. */
+ if (old_divisor == 2 && divisor == 1) {
+ self_refresh_ctl(0);
+ write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
+ frob_mem_refresh(0, clock_tick, old_divisor, divisor);
+ } else if (old_divisor == 1 && divisor == 2) {
+ frob_mem_refresh(1, clock_tick, old_divisor, divisor);
+ write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
+ self_refresh_ctl(1);
+ } else if (old_divisor == 1 && divisor > 2) {
+ us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick,
+ 1, 2);
+ us2e_transition(estar, new_bits, clock_tick,
+ 2, divisor);
+ } else if (old_divisor > 2 && divisor == 1) {
+ us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick,
+ old_divisor, 2);
+ us2e_transition(estar, new_bits, clock_tick,
+ 2, divisor);
+ } else if (old_divisor < divisor) {
+ frob_mem_refresh(0, clock_tick, old_divisor, divisor);
+ write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
+ } else if (old_divisor > divisor) {
+ write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
+ frob_mem_refresh(1, clock_tick, old_divisor, divisor);
+ } else {
+ BUG();
+ }
+
+ local_irq_restore(flags);
+}
+
+static unsigned long index_to_estar_mode(unsigned int index)
+{
+ switch (index) {
+ case 0:
+ return ESTAR_MODE_DIV_1;
+
+ case 1:
+ return ESTAR_MODE_DIV_2;
+
+ case 2:
+ return ESTAR_MODE_DIV_4;
+
+ case 3:
+ return ESTAR_MODE_DIV_6;
+
+ case 4:
+ return ESTAR_MODE_DIV_8;
+
+ default:
+ BUG();
+ }
+}
+
+static unsigned long index_to_divisor(unsigned int index)
+{
+ switch (index) {
+ case 0:
+ return 1;
+
+ case 1:
+ return 2;
+
+ case 2:
+ return 4;
+
+ case 3:
+ return 6;
+
+ case 4:
+ return 8;
+
+ default:
+ BUG();
+ }
+}
+
+static unsigned long estar_to_divisor(unsigned long estar)
+{
+ unsigned long ret;
+
+ switch (estar & ESTAR_MODE_DIV_MASK) {
+ case ESTAR_MODE_DIV_1:
+ ret = 1;
+ break;
+ case ESTAR_MODE_DIV_2:
+ ret = 2;
+ break;
+ case ESTAR_MODE_DIV_4:
+ ret = 4;
+ break;
+ case ESTAR_MODE_DIV_6:
+ ret = 6;
+ break;
+ case ESTAR_MODE_DIV_8:
+ ret = 8;
+ break;
+ default:
+ BUG();
+ }
+
+ return ret;
+}
+
+static unsigned int us2e_freq_get(unsigned int cpu)
+{
+ cpumask_t cpus_allowed;
+ unsigned long clock_tick, estar;
+
+ cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
+ set_cpus_allowed_ptr(current, cpumask_of(cpu));
+
+ clock_tick = sparc64_get_clock_tick(cpu) / 1000;
+ estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
+
+ set_cpus_allowed_ptr(current, &cpus_allowed);
+
+ return clock_tick / estar_to_divisor(estar);
+}
+
+static int us2e_freq_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ unsigned int cpu = policy->cpu;
+ unsigned long new_bits, new_freq;
+ unsigned long clock_tick, divisor, old_divisor, estar;
+ cpumask_t cpus_allowed;
+
+ cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
+ set_cpus_allowed_ptr(current, cpumask_of(cpu));
+
+ new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000;
+ new_bits = index_to_estar_mode(index);
+ divisor = index_to_divisor(index);
+ new_freq /= divisor;
+
+ estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
+
+ old_divisor = estar_to_divisor(estar);
+
+ if (old_divisor != divisor)
+ us2e_transition(estar, new_bits, clock_tick * 1000,
+ old_divisor, divisor);
+
+ set_cpus_allowed_ptr(current, &cpus_allowed);
+
+ return 0;
+}
+
+static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
+{
+ unsigned int cpu = policy->cpu;
+ unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
+ struct cpufreq_frequency_table *table =
+ &us2e_freq_table[cpu].table[0];
+
+ table[0].driver_data = 0;
+ table[0].frequency = clock_tick / 1;
+ table[1].driver_data = 1;
+ table[1].frequency = clock_tick / 2;
+ table[2].driver_data = 2;
+ table[2].frequency = clock_tick / 4;
+ table[2].driver_data = 3;
+ table[2].frequency = clock_tick / 6;
+ table[2].driver_data = 4;
+ table[2].frequency = clock_tick / 8;
+ table[2].driver_data = 5;
+ table[3].frequency = CPUFREQ_TABLE_END;
+
+ policy->cpuinfo.transition_latency = 0;
+ policy->cur = clock_tick;
+
+ return cpufreq_table_validate_and_show(policy, table);
+}
+
+static int us2e_freq_cpu_exit(struct cpufreq_policy *policy)
+{
+ if (cpufreq_us2e_driver)
+ us2e_freq_target(policy, 0);
+
+ return 0;
+}
+
+static int __init us2e_freq_init(void)
+{
+ unsigned long manuf, impl, ver;
+ int ret;
+
+ if (tlb_type != spitfire)
+ return -ENODEV;
+
+ __asm__("rdpr %%ver, %0" : "=r" (ver));
+ manuf = ((ver >> 48) & 0xffff);
+ impl = ((ver >> 32) & 0xffff);
+
+ if (manuf == 0x17 && impl == 0x13) {
+ struct cpufreq_driver *driver;
+
+ ret = -ENOMEM;
+ driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+ if (!driver)
+ goto err_out;
+
+ us2e_freq_table = kzalloc((NR_CPUS * sizeof(*us2e_freq_table)),
+ GFP_KERNEL);
+ if (!us2e_freq_table)
+ goto err_out;
+
+ driver->init = us2e_freq_cpu_init;
+ driver->verify = cpufreq_generic_frequency_table_verify;
+ driver->target_index = us2e_freq_target;
+ driver->get = us2e_freq_get;
+ driver->exit = us2e_freq_cpu_exit;
+ strcpy(driver->name, "UltraSPARC-IIe");
+
+ cpufreq_us2e_driver = driver;
+ ret = cpufreq_register_driver(driver);
+ if (ret)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ if (driver) {
+ kfree(driver);
+ cpufreq_us2e_driver = NULL;
+ }
+ kfree(us2e_freq_table);
+ us2e_freq_table = NULL;
+ return ret;
+ }
+
+ return -ENODEV;
+}
+
+static void __exit us2e_freq_exit(void)
+{
+ if (cpufreq_us2e_driver) {
+ cpufreq_unregister_driver(cpufreq_us2e_driver);
+ kfree(cpufreq_us2e_driver);
+ cpufreq_us2e_driver = NULL;
+ kfree(us2e_freq_table);
+ us2e_freq_table = NULL;
+ }
+}
+
+MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
+MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-IIe");
+MODULE_LICENSE("GPL");
+
+module_init(us2e_freq_init);
+module_exit(us2e_freq_exit);
diff --git a/kernel/drivers/cpufreq/sparc-us3-cpufreq.c b/kernel/drivers/cpufreq/sparc-us3-cpufreq.c
new file mode 100644
index 000000000..9bb42ba50
--- /dev/null
+++ b/kernel/drivers/cpufreq/sparc-us3-cpufreq.c
@@ -0,0 +1,237 @@
+/* us3_cpufreq.c: UltraSPARC-III cpu frequency support
+ *
+ * Copyright (C) 2003 David S. Miller (davem@redhat.com)
+ *
+ * Many thanks to Dominik Brodowski for fixing up the cpufreq
+ * infrastructure in order to make this driver easier to implement.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/cpufreq.h>
+#include <linux/threads.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+
+#include <asm/head.h>
+#include <asm/timer.h>
+
+static struct cpufreq_driver *cpufreq_us3_driver;
+
+struct us3_freq_percpu_info {
+ struct cpufreq_frequency_table table[4];
+};
+
+/* Indexed by cpu number. */
+static struct us3_freq_percpu_info *us3_freq_table;
+
+/* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
+ * in the Safari config register.
+ */
+#define SAFARI_CFG_DIV_1 0x0000000000000000UL
+#define SAFARI_CFG_DIV_2 0x0000000040000000UL
+#define SAFARI_CFG_DIV_32 0x0000000080000000UL
+#define SAFARI_CFG_DIV_MASK 0x00000000C0000000UL
+
+static unsigned long read_safari_cfg(void)
+{
+ unsigned long ret;
+
+ __asm__ __volatile__("ldxa [%%g0] %1, %0"
+ : "=&r" (ret)
+ : "i" (ASI_SAFARI_CONFIG));
+ return ret;
+}
+
+static void write_safari_cfg(unsigned long val)
+{
+ __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "r" (val), "i" (ASI_SAFARI_CONFIG)
+ : "memory");
+}
+
+static unsigned long get_current_freq(unsigned int cpu, unsigned long safari_cfg)
+{
+ unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
+ unsigned long ret;
+
+ switch (safari_cfg & SAFARI_CFG_DIV_MASK) {
+ case SAFARI_CFG_DIV_1:
+ ret = clock_tick / 1;
+ break;
+ case SAFARI_CFG_DIV_2:
+ ret = clock_tick / 2;
+ break;
+ case SAFARI_CFG_DIV_32:
+ ret = clock_tick / 32;
+ break;
+ default:
+ BUG();
+ }
+
+ return ret;
+}
+
+static unsigned int us3_freq_get(unsigned int cpu)
+{
+ cpumask_t cpus_allowed;
+ unsigned long reg;
+ unsigned int ret;
+
+ cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
+ set_cpus_allowed_ptr(current, cpumask_of(cpu));
+
+ reg = read_safari_cfg();
+ ret = get_current_freq(cpu, reg);
+
+ set_cpus_allowed_ptr(current, &cpus_allowed);
+
+ return ret;
+}
+
+static int us3_freq_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ unsigned int cpu = policy->cpu;
+ unsigned long new_bits, new_freq, reg;
+ cpumask_t cpus_allowed;
+
+ cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
+ set_cpus_allowed_ptr(current, cpumask_of(cpu));
+
+ new_freq = sparc64_get_clock_tick(cpu) / 1000;
+ switch (index) {
+ case 0:
+ new_bits = SAFARI_CFG_DIV_1;
+ new_freq /= 1;
+ break;
+ case 1:
+ new_bits = SAFARI_CFG_DIV_2;
+ new_freq /= 2;
+ break;
+ case 2:
+ new_bits = SAFARI_CFG_DIV_32;
+ new_freq /= 32;
+ break;
+
+ default:
+ BUG();
+ }
+
+ reg = read_safari_cfg();
+
+ reg &= ~SAFARI_CFG_DIV_MASK;
+ reg |= new_bits;
+ write_safari_cfg(reg);
+
+ set_cpus_allowed_ptr(current, &cpus_allowed);
+
+ return 0;
+}
+
+static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
+{
+ unsigned int cpu = policy->cpu;
+ unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000;
+ struct cpufreq_frequency_table *table =
+ &us3_freq_table[cpu].table[0];
+
+ table[0].driver_data = 0;
+ table[0].frequency = clock_tick / 1;
+ table[1].driver_data = 1;
+ table[1].frequency = clock_tick / 2;
+ table[2].driver_data = 2;
+ table[2].frequency = clock_tick / 32;
+ table[3].driver_data = 0;
+ table[3].frequency = CPUFREQ_TABLE_END;
+
+ policy->cpuinfo.transition_latency = 0;
+ policy->cur = clock_tick;
+
+ return cpufreq_table_validate_and_show(policy, table);
+}
+
+static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
+{
+ if (cpufreq_us3_driver)
+ us3_freq_target(policy, 0);
+
+ return 0;
+}
+
+static int __init us3_freq_init(void)
+{
+ unsigned long manuf, impl, ver;
+ int ret;
+
+ if (tlb_type != cheetah && tlb_type != cheetah_plus)
+ return -ENODEV;
+
+ __asm__("rdpr %%ver, %0" : "=r" (ver));
+ manuf = ((ver >> 48) & 0xffff);
+ impl = ((ver >> 32) & 0xffff);
+
+ if (manuf == CHEETAH_MANUF &&
+ (impl == CHEETAH_IMPL ||
+ impl == CHEETAH_PLUS_IMPL ||
+ impl == JAGUAR_IMPL ||
+ impl == PANTHER_IMPL)) {
+ struct cpufreq_driver *driver;
+
+ ret = -ENOMEM;
+ driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+ if (!driver)
+ goto err_out;
+
+ us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
+ GFP_KERNEL);
+ if (!us3_freq_table)
+ goto err_out;
+
+ driver->init = us3_freq_cpu_init;
+ driver->verify = cpufreq_generic_frequency_table_verify;
+ driver->target_index = us3_freq_target;
+ driver->get = us3_freq_get;
+ driver->exit = us3_freq_cpu_exit;
+ strcpy(driver->name, "UltraSPARC-III");
+
+ cpufreq_us3_driver = driver;
+ ret = cpufreq_register_driver(driver);
+ if (ret)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ if (driver) {
+ kfree(driver);
+ cpufreq_us3_driver = NULL;
+ }
+ kfree(us3_freq_table);
+ us3_freq_table = NULL;
+ return ret;
+ }
+
+ return -ENODEV;
+}
+
+static void __exit us3_freq_exit(void)
+{
+ if (cpufreq_us3_driver) {
+ cpufreq_unregister_driver(cpufreq_us3_driver);
+ kfree(cpufreq_us3_driver);
+ cpufreq_us3_driver = NULL;
+ kfree(us3_freq_table);
+ us3_freq_table = NULL;
+ }
+}
+
+MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
+MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-III");
+MODULE_LICENSE("GPL");
+
+module_init(us3_freq_init);
+module_exit(us3_freq_exit);
diff --git a/kernel/drivers/cpufreq/spear-cpufreq.c b/kernel/drivers/cpufreq/spear-cpufreq.c
new file mode 100644
index 000000000..4894924a3
--- /dev/null
+++ b/kernel/drivers/cpufreq/spear-cpufreq.c
@@ -0,0 +1,246 @@
+/*
+ * drivers/cpufreq/spear-cpufreq.c
+ *
+ * CPU Frequency Scaling for SPEAr platform
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Deepak Sikri <deepak.sikri@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+/* SPEAr CPUFreq driver data structure */
+static struct {
+ struct clk *clk;
+ unsigned int transition_latency;
+ struct cpufreq_frequency_table *freq_tbl;
+ u32 cnt;
+} spear_cpufreq;
+
+static struct clk *spear1340_cpu_get_possible_parent(unsigned long newfreq)
+{
+ struct clk *sys_pclk;
+ int pclk;
+ /*
+ * In SPEAr1340, cpu clk's parent sys clk can take input from
+ * following sources
+ */
+ const char *sys_clk_src[] = {
+ "sys_syn_clk",
+ "pll1_clk",
+ "pll2_clk",
+ "pll3_clk",
+ };
+
+ /*
+ * As sys clk can have multiple source with their own range
+ * limitation so we choose possible sources accordingly
+ */
+ if (newfreq <= 300000000)
+ pclk = 0; /* src is sys_syn_clk */
+ else if (newfreq > 300000000 && newfreq <= 500000000)
+ pclk = 3; /* src is pll3_clk */
+ else if (newfreq == 600000000)
+ pclk = 1; /* src is pll1_clk */
+ else
+ return ERR_PTR(-EINVAL);
+
+ /* Get parent to sys clock */
+ sys_pclk = clk_get(NULL, sys_clk_src[pclk]);
+ if (IS_ERR(sys_pclk))
+ pr_err("Failed to get %s clock\n", sys_clk_src[pclk]);
+
+ return sys_pclk;
+}
+
+/*
+ * In SPEAr1340, we cannot use newfreq directly because we need to actually
+ * access a source clock (clk) which might not be ancestor of cpu at present.
+ * Hence in SPEAr1340 we would operate on source clock directly before switching
+ * cpu clock to it.
+ */
+static int spear1340_set_cpu_rate(struct clk *sys_pclk, unsigned long newfreq)
+{
+ struct clk *sys_clk;
+ int ret = 0;
+
+ sys_clk = clk_get_parent(spear_cpufreq.clk);
+ if (IS_ERR(sys_clk)) {
+ pr_err("failed to get cpu's parent (sys) clock\n");
+ return PTR_ERR(sys_clk);
+ }
+
+ /* Set the rate of the source clock before changing the parent */
+ ret = clk_set_rate(sys_pclk, newfreq);
+ if (ret) {
+ pr_err("Failed to set sys clk rate to %lu\n", newfreq);
+ return ret;
+ }
+
+ ret = clk_set_parent(sys_clk, sys_pclk);
+ if (ret) {
+ pr_err("Failed to set sys clk parent\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int spear_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ long newfreq;
+ struct clk *srcclk;
+ int ret, mult = 1;
+
+ newfreq = spear_cpufreq.freq_tbl[index].frequency * 1000;
+
+ if (of_machine_is_compatible("st,spear1340")) {
+ /*
+ * SPEAr1340 is special in the sense that due to the possibility
+ * of multiple clock sources for cpu clk's parent we can have
+ * different clock source for different frequency of cpu clk.
+ * Hence we need to choose one from amongst these possible clock
+ * sources.
+ */
+ srcclk = spear1340_cpu_get_possible_parent(newfreq);
+ if (IS_ERR(srcclk)) {
+ pr_err("Failed to get src clk\n");
+ return PTR_ERR(srcclk);
+ }
+
+ /* SPEAr1340: src clk is always 2 * intended cpu clk */
+ mult = 2;
+ } else {
+ /*
+ * src clock to be altered is ancestor of cpu clock. Hence we
+ * can directly work on cpu clk
+ */
+ srcclk = spear_cpufreq.clk;
+ }
+
+ newfreq = clk_round_rate(srcclk, newfreq * mult);
+ if (newfreq <= 0) {
+ pr_err("clk_round_rate failed for cpu src clock\n");
+ return newfreq;
+ }
+
+ if (mult == 2)
+ ret = spear1340_set_cpu_rate(srcclk, newfreq);
+ else
+ ret = clk_set_rate(spear_cpufreq.clk, newfreq);
+
+ if (ret)
+ pr_err("CPU Freq: cpu clk_set_rate failed: %d\n", ret);
+
+ return ret;
+}
+
+static int spear_cpufreq_init(struct cpufreq_policy *policy)
+{
+ policy->clk = spear_cpufreq.clk;
+ return cpufreq_generic_init(policy, spear_cpufreq.freq_tbl,
+ spear_cpufreq.transition_latency);
+}
+
+static struct cpufreq_driver spear_cpufreq_driver = {
+ .name = "cpufreq-spear",
+ .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = spear_cpufreq_target,
+ .get = cpufreq_generic_get,
+ .init = spear_cpufreq_init,
+ .attr = cpufreq_generic_attr,
+};
+
+static int spear_cpufreq_probe(struct platform_device *pdev)
+{
+ struct device_node *np;
+ const struct property *prop;
+ struct cpufreq_frequency_table *freq_tbl;
+ const __be32 *val;
+ int cnt, i, ret;
+
+ np = of_cpu_device_node_get(0);
+ if (!np) {
+ pr_err("No cpu node found");
+ return -ENODEV;
+ }
+
+ if (of_property_read_u32(np, "clock-latency",
+ &spear_cpufreq.transition_latency))
+ spear_cpufreq.transition_latency = CPUFREQ_ETERNAL;
+
+ prop = of_find_property(np, "cpufreq_tbl", NULL);
+ if (!prop || !prop->value) {
+ pr_err("Invalid cpufreq_tbl");
+ ret = -ENODEV;
+ goto out_put_node;
+ }
+
+ cnt = prop->length / sizeof(u32);
+ val = prop->value;
+
+ freq_tbl = kzalloc(sizeof(*freq_tbl) * (cnt + 1), GFP_KERNEL);
+ if (!freq_tbl) {
+ ret = -ENOMEM;
+ goto out_put_node;
+ }
+
+ for (i = 0; i < cnt; i++)
+ freq_tbl[i].frequency = be32_to_cpup(val++);
+
+ freq_tbl[i].frequency = CPUFREQ_TABLE_END;
+
+ spear_cpufreq.freq_tbl = freq_tbl;
+
+ of_node_put(np);
+
+ spear_cpufreq.clk = clk_get(NULL, "cpu_clk");
+ if (IS_ERR(spear_cpufreq.clk)) {
+ pr_err("Unable to get CPU clock\n");
+ ret = PTR_ERR(spear_cpufreq.clk);
+ goto out_put_mem;
+ }
+
+ ret = cpufreq_register_driver(&spear_cpufreq_driver);
+ if (!ret)
+ return 0;
+
+ pr_err("failed register driver: %d\n", ret);
+ clk_put(spear_cpufreq.clk);
+
+out_put_mem:
+ kfree(freq_tbl);
+ return ret;
+
+out_put_node:
+ of_node_put(np);
+ return ret;
+}
+
+static struct platform_driver spear_cpufreq_platdrv = {
+ .driver = {
+ .name = "spear-cpufreq",
+ },
+ .probe = spear_cpufreq_probe,
+};
+module_platform_driver(spear_cpufreq_platdrv);
+
+MODULE_AUTHOR("Deepak Sikri <deepak.sikri@st.com>");
+MODULE_DESCRIPTION("SPEAr CPUFreq driver");
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/cpufreq/speedstep-centrino.c b/kernel/drivers/cpufreq/speedstep-centrino.c
new file mode 100644
index 000000000..7d4a31571
--- /dev/null
+++ b/kernel/drivers/cpufreq/speedstep-centrino.c
@@ -0,0 +1,566 @@
+/*
+ * cpufreq driver for Enhanced SpeedStep, as found in Intel's Pentium
+ * M (part of the Centrino chipset).
+ *
+ * Since the original Pentium M, most new Intel CPUs support Enhanced
+ * SpeedStep.
+ *
+ * Despite the "SpeedStep" in the name, this is almost entirely unlike
+ * traditional SpeedStep.
+ *
+ * Modelled on speedstep.c
+ *
+ * Copyright (C) 2003 Jeremy Fitzhardinge <jeremy@goop.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/sched.h> /* current */
+#include <linux/delay.h>
+#include <linux/compiler.h>
+#include <linux/gfp.h>
+
+#include <asm/msr.h>
+#include <asm/processor.h>
+#include <asm/cpufeature.h>
+#include <asm/cpu_device_id.h>
+
+#define PFX "speedstep-centrino: "
+#define MAINTAINER "linux-pm@vger.kernel.org"
+
+#define INTEL_MSR_RANGE (0xffff)
+
+struct cpu_id
+{
+ __u8 x86; /* CPU family */
+ __u8 x86_model; /* model */
+ __u8 x86_mask; /* stepping */
+};
+
+enum {
+ CPU_BANIAS,
+ CPU_DOTHAN_A1,
+ CPU_DOTHAN_A2,
+ CPU_DOTHAN_B0,
+ CPU_MP4HT_D0,
+ CPU_MP4HT_E0,
+};
+
+static const struct cpu_id cpu_ids[] = {
+ [CPU_BANIAS] = { 6, 9, 5 },
+ [CPU_DOTHAN_A1] = { 6, 13, 1 },
+ [CPU_DOTHAN_A2] = { 6, 13, 2 },
+ [CPU_DOTHAN_B0] = { 6, 13, 6 },
+ [CPU_MP4HT_D0] = {15, 3, 4 },
+ [CPU_MP4HT_E0] = {15, 4, 1 },
+};
+#define N_IDS ARRAY_SIZE(cpu_ids)
+
+struct cpu_model
+{
+ const struct cpu_id *cpu_id;
+ const char *model_name;
+ unsigned max_freq; /* max clock in kHz */
+
+ struct cpufreq_frequency_table *op_points; /* clock/voltage pairs */
+};
+static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c,
+ const struct cpu_id *x);
+
+/* Operating points for current CPU */
+static DEFINE_PER_CPU(struct cpu_model *, centrino_model);
+static DEFINE_PER_CPU(const struct cpu_id *, centrino_cpu);
+
+static struct cpufreq_driver centrino_driver;
+
+#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE
+
+/* Computes the correct form for IA32_PERF_CTL MSR for a particular
+ frequency/voltage operating point; frequency in MHz, volts in mV.
+ This is stored as "driver_data" in the structure. */
+#define OP(mhz, mv) \
+ { \
+ .frequency = (mhz) * 1000, \
+ .driver_data = (((mhz)/100) << 8) | ((mv - 700) / 16) \
+ }
+
+/*
+ * These voltage tables were derived from the Intel Pentium M
+ * datasheet, document 25261202.pdf, Table 5. I have verified they
+ * are consistent with my IBM ThinkPad X31, which has a 1.3GHz Pentium
+ * M.
+ */
+
+/* Ultra Low Voltage Intel Pentium M processor 900MHz (Banias) */
+static struct cpufreq_frequency_table banias_900[] =
+{
+ OP(600, 844),
+ OP(800, 988),
+ OP(900, 1004),
+ { .frequency = CPUFREQ_TABLE_END }
+};
+
+/* Ultra Low Voltage Intel Pentium M processor 1000MHz (Banias) */
+static struct cpufreq_frequency_table banias_1000[] =
+{
+ OP(600, 844),
+ OP(800, 972),
+ OP(900, 988),
+ OP(1000, 1004),
+ { .frequency = CPUFREQ_TABLE_END }
+};
+
+/* Low Voltage Intel Pentium M processor 1.10GHz (Banias) */
+static struct cpufreq_frequency_table banias_1100[] =
+{
+ OP( 600, 956),
+ OP( 800, 1020),
+ OP( 900, 1100),
+ OP(1000, 1164),
+ OP(1100, 1180),
+ { .frequency = CPUFREQ_TABLE_END }
+};
+
+
+/* Low Voltage Intel Pentium M processor 1.20GHz (Banias) */
+static struct cpufreq_frequency_table banias_1200[] =
+{
+ OP( 600, 956),
+ OP( 800, 1004),
+ OP( 900, 1020),
+ OP(1000, 1100),
+ OP(1100, 1164),
+ OP(1200, 1180),
+ { .frequency = CPUFREQ_TABLE_END }
+};
+
+/* Intel Pentium M processor 1.30GHz (Banias) */
+static struct cpufreq_frequency_table banias_1300[] =
+{
+ OP( 600, 956),
+ OP( 800, 1260),
+ OP(1000, 1292),
+ OP(1200, 1356),
+ OP(1300, 1388),
+ { .frequency = CPUFREQ_TABLE_END }
+};
+
+/* Intel Pentium M processor 1.40GHz (Banias) */
+static struct cpufreq_frequency_table banias_1400[] =
+{
+ OP( 600, 956),
+ OP( 800, 1180),
+ OP(1000, 1308),
+ OP(1200, 1436),
+ OP(1400, 1484),
+ { .frequency = CPUFREQ_TABLE_END }
+};
+
+/* Intel Pentium M processor 1.50GHz (Banias) */
+static struct cpufreq_frequency_table banias_1500[] =
+{
+ OP( 600, 956),
+ OP( 800, 1116),
+ OP(1000, 1228),
+ OP(1200, 1356),
+ OP(1400, 1452),
+ OP(1500, 1484),
+ { .frequency = CPUFREQ_TABLE_END }
+};
+
+/* Intel Pentium M processor 1.60GHz (Banias) */
+static struct cpufreq_frequency_table banias_1600[] =
+{
+ OP( 600, 956),
+ OP( 800, 1036),
+ OP(1000, 1164),
+ OP(1200, 1276),
+ OP(1400, 1420),
+ OP(1600, 1484),
+ { .frequency = CPUFREQ_TABLE_END }
+};
+
+/* Intel Pentium M processor 1.70GHz (Banias) */
+static struct cpufreq_frequency_table banias_1700[] =
+{
+ OP( 600, 956),
+ OP( 800, 1004),
+ OP(1000, 1116),
+ OP(1200, 1228),
+ OP(1400, 1308),
+ OP(1700, 1484),
+ { .frequency = CPUFREQ_TABLE_END }
+};
+#undef OP
+
+#define _BANIAS(cpuid, max, name) \
+{ .cpu_id = cpuid, \
+ .model_name = "Intel(R) Pentium(R) M processor " name "MHz", \
+ .max_freq = (max)*1000, \
+ .op_points = banias_##max, \
+}
+#define BANIAS(max) _BANIAS(&cpu_ids[CPU_BANIAS], max, #max)
+
+/* CPU models, their operating frequency range, and freq/voltage
+ operating points */
+static struct cpu_model models[] =
+{
+ _BANIAS(&cpu_ids[CPU_BANIAS], 900, " 900"),
+ BANIAS(1000),
+ BANIAS(1100),
+ BANIAS(1200),
+ BANIAS(1300),
+ BANIAS(1400),
+ BANIAS(1500),
+ BANIAS(1600),
+ BANIAS(1700),
+
+ /* NULL model_name is a wildcard */
+ { &cpu_ids[CPU_DOTHAN_A1], NULL, 0, NULL },
+ { &cpu_ids[CPU_DOTHAN_A2], NULL, 0, NULL },
+ { &cpu_ids[CPU_DOTHAN_B0], NULL, 0, NULL },
+ { &cpu_ids[CPU_MP4HT_D0], NULL, 0, NULL },
+ { &cpu_ids[CPU_MP4HT_E0], NULL, 0, NULL },
+
+ { NULL, }
+};
+#undef _BANIAS
+#undef BANIAS
+
+static int centrino_cpu_init_table(struct cpufreq_policy *policy)
+{
+ struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu);
+ struct cpu_model *model;
+
+ for(model = models; model->cpu_id != NULL; model++)
+ if (centrino_verify_cpu_id(cpu, model->cpu_id) &&
+ (model->model_name == NULL ||
+ strcmp(cpu->x86_model_id, model->model_name) == 0))
+ break;
+
+ if (model->cpu_id == NULL) {
+ /* No match at all */
+ pr_debug("no support for CPU model \"%s\": "
+ "send /proc/cpuinfo to " MAINTAINER "\n",
+ cpu->x86_model_id);
+ return -ENOENT;
+ }
+
+ if (model->op_points == NULL) {
+ /* Matched a non-match */
+ pr_debug("no table support for CPU model \"%s\"\n",
+ cpu->x86_model_id);
+ pr_debug("try using the acpi-cpufreq driver\n");
+ return -ENOENT;
+ }
+
+ per_cpu(centrino_model, policy->cpu) = model;
+
+ pr_debug("found \"%s\": max frequency: %dkHz\n",
+ model->model_name, model->max_freq);
+
+ return 0;
+}
+
+#else
+static inline int centrino_cpu_init_table(struct cpufreq_policy *policy)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE */
+
+static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c,
+ const struct cpu_id *x)
+{
+ if ((c->x86 == x->x86) &&
+ (c->x86_model == x->x86_model) &&
+ (c->x86_mask == x->x86_mask))
+ return 1;
+ return 0;
+}
+
+/* To be called only after centrino_model is initialized */
+static unsigned extract_clock(unsigned msr, unsigned int cpu, int failsafe)
+{
+ int i;
+
+ /*
+ * Extract clock in kHz from PERF_CTL value
+ * for centrino, as some DSDTs are buggy.
+ * Ideally, this can be done using the acpi_data structure.
+ */
+ if ((per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_BANIAS]) ||
+ (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_A1]) ||
+ (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_B0])) {
+ msr = (msr >> 8) & 0xff;
+ return msr * 100000;
+ }
+
+ if ((!per_cpu(centrino_model, cpu)) ||
+ (!per_cpu(centrino_model, cpu)->op_points))
+ return 0;
+
+ msr &= 0xffff;
+ for (i = 0;
+ per_cpu(centrino_model, cpu)->op_points[i].frequency
+ != CPUFREQ_TABLE_END;
+ i++) {
+ if (msr == per_cpu(centrino_model, cpu)->op_points[i].driver_data)
+ return per_cpu(centrino_model, cpu)->
+ op_points[i].frequency;
+ }
+ if (failsafe)
+ return per_cpu(centrino_model, cpu)->op_points[i-1].frequency;
+ else
+ return 0;
+}
+
+/* Return the current CPU frequency in kHz */
+static unsigned int get_cur_freq(unsigned int cpu)
+{
+ unsigned l, h;
+ unsigned clock_freq;
+
+ rdmsr_on_cpu(cpu, MSR_IA32_PERF_STATUS, &l, &h);
+ clock_freq = extract_clock(l, cpu, 0);
+
+ if (unlikely(clock_freq == 0)) {
+ /*
+ * On some CPUs, we can see transient MSR values (which are
+ * not present in _PSS), while CPU is doing some automatic
+ * P-state transition (like TM2). Get the last freq set
+ * in PERF_CTL.
+ */
+ rdmsr_on_cpu(cpu, MSR_IA32_PERF_CTL, &l, &h);
+ clock_freq = extract_clock(l, cpu, 1);
+ }
+ return clock_freq;
+}
+
+
+static int centrino_cpu_init(struct cpufreq_policy *policy)
+{
+ struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu);
+ unsigned l, h;
+ int i;
+
+ /* Only Intel makes Enhanced Speedstep-capable CPUs */
+ if (cpu->x86_vendor != X86_VENDOR_INTEL ||
+ !cpu_has(cpu, X86_FEATURE_EST))
+ return -ENODEV;
+
+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
+ centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
+
+ if (policy->cpu != 0)
+ return -ENODEV;
+
+ for (i = 0; i < N_IDS; i++)
+ if (centrino_verify_cpu_id(cpu, &cpu_ids[i]))
+ break;
+
+ if (i != N_IDS)
+ per_cpu(centrino_cpu, policy->cpu) = &cpu_ids[i];
+
+ if (!per_cpu(centrino_cpu, policy->cpu)) {
+ pr_debug("found unsupported CPU with "
+ "Enhanced SpeedStep: send /proc/cpuinfo to "
+ MAINTAINER "\n");
+ return -ENODEV;
+ }
+
+ if (centrino_cpu_init_table(policy))
+ return -ENODEV;
+
+ /* Check to see if Enhanced SpeedStep is enabled, and try to
+ enable it if not. */
+ rdmsr(MSR_IA32_MISC_ENABLE, l, h);
+
+ if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
+ l |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP;
+ pr_debug("trying to enable Enhanced SpeedStep (%x)\n", l);
+ wrmsr(MSR_IA32_MISC_ENABLE, l, h);
+
+ /* check to see if it stuck */
+ rdmsr(MSR_IA32_MISC_ENABLE, l, h);
+ if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
+ printk(KERN_INFO PFX
+ "couldn't enable Enhanced SpeedStep\n");
+ return -ENODEV;
+ }
+ }
+
+ policy->cpuinfo.transition_latency = 10000;
+ /* 10uS transition latency */
+
+ return cpufreq_table_validate_and_show(policy,
+ per_cpu(centrino_model, policy->cpu)->op_points);
+}
+
+static int centrino_cpu_exit(struct cpufreq_policy *policy)
+{
+ unsigned int cpu = policy->cpu;
+
+ if (!per_cpu(centrino_model, cpu))
+ return -ENODEV;
+
+ per_cpu(centrino_model, cpu) = NULL;
+
+ return 0;
+}
+
+/**
+ * centrino_setpolicy - set a new CPUFreq policy
+ * @policy: new policy
+ * @index: index of target frequency
+ *
+ * Sets a new CPUFreq policy.
+ */
+static int centrino_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu;
+ int retval = 0;
+ unsigned int j, first_cpu;
+ struct cpufreq_frequency_table *op_points;
+ cpumask_var_t covered_cpus;
+
+ if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)))
+ return -ENOMEM;
+
+ if (unlikely(per_cpu(centrino_model, cpu) == NULL)) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ first_cpu = 1;
+ op_points = &per_cpu(centrino_model, cpu)->op_points[index];
+ for_each_cpu(j, policy->cpus) {
+ int good_cpu;
+
+ /*
+ * Support for SMP systems.
+ * Make sure we are running on CPU that wants to change freq
+ */
+ if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
+ good_cpu = cpumask_any_and(policy->cpus,
+ cpu_online_mask);
+ else
+ good_cpu = j;
+
+ if (good_cpu >= nr_cpu_ids) {
+ pr_debug("couldn't limit to CPUs in this domain\n");
+ retval = -EAGAIN;
+ if (first_cpu) {
+ /* We haven't started the transition yet. */
+ goto out;
+ }
+ break;
+ }
+
+ msr = op_points->driver_data;
+
+ if (first_cpu) {
+ rdmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, &oldmsr, &h);
+ if (msr == (oldmsr & 0xffff)) {
+ pr_debug("no change needed - msr was and needs "
+ "to be %x\n", oldmsr);
+ retval = 0;
+ goto out;
+ }
+
+ first_cpu = 0;
+ /* all but 16 LSB are reserved, treat them with care */
+ oldmsr &= ~0xffff;
+ msr &= 0xffff;
+ oldmsr |= msr;
+ }
+
+ wrmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, oldmsr, h);
+ if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
+ break;
+
+ cpumask_set_cpu(j, covered_cpus);
+ }
+
+ if (unlikely(retval)) {
+ /*
+ * We have failed halfway through the frequency change.
+ * We have sent callbacks to policy->cpus and
+ * MSRs have already been written on coverd_cpus.
+ * Best effort undo..
+ */
+
+ for_each_cpu(j, covered_cpus)
+ wrmsr_on_cpu(j, MSR_IA32_PERF_CTL, oldmsr, h);
+ }
+ retval = 0;
+
+out:
+ free_cpumask_var(covered_cpus);
+ return retval;
+}
+
+static struct cpufreq_driver centrino_driver = {
+ .name = "centrino", /* should be speedstep-centrino,
+ but there's a 16 char limit */
+ .init = centrino_cpu_init,
+ .exit = centrino_cpu_exit,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = centrino_target,
+ .get = get_cur_freq,
+ .attr = cpufreq_generic_attr,
+};
+
+/*
+ * This doesn't replace the detailed checks above because
+ * the generic CPU IDs don't have a way to match for steppings
+ * or ASCII model IDs.
+ */
+static const struct x86_cpu_id centrino_ids[] = {
+ { X86_VENDOR_INTEL, 6, 9, X86_FEATURE_EST },
+ { X86_VENDOR_INTEL, 6, 13, X86_FEATURE_EST },
+ { X86_VENDOR_INTEL, 6, 13, X86_FEATURE_EST },
+ { X86_VENDOR_INTEL, 6, 13, X86_FEATURE_EST },
+ { X86_VENDOR_INTEL, 15, 3, X86_FEATURE_EST },
+ { X86_VENDOR_INTEL, 15, 4, X86_FEATURE_EST },
+ {}
+};
+#if 0
+/* Autoload or not? Do not for now. */
+MODULE_DEVICE_TABLE(x86cpu, centrino_ids);
+#endif
+
+/**
+ * centrino_init - initializes the Enhanced SpeedStep CPUFreq driver
+ *
+ * Initializes the Enhanced SpeedStep support. Returns -ENODEV on
+ * unsupported devices, -ENOENT if there's no voltage table for this
+ * particular CPU model, -EINVAL on problems during initiatization,
+ * and zero on success.
+ *
+ * This is quite picky. Not only does the CPU have to advertise the
+ * "est" flag in the cpuid capability flags, we look for a specific
+ * CPU model and stepping, and we need to have the exact model name in
+ * our voltage tables. That is, be paranoid about not releasing
+ * someone's valuable magic smoke.
+ */
+static int __init centrino_init(void)
+{
+ if (!x86_match_cpu(centrino_ids))
+ return -ENODEV;
+ return cpufreq_register_driver(&centrino_driver);
+}
+
+static void __exit centrino_exit(void)
+{
+ cpufreq_unregister_driver(&centrino_driver);
+}
+
+MODULE_AUTHOR ("Jeremy Fitzhardinge <jeremy@goop.org>");
+MODULE_DESCRIPTION ("Enhanced SpeedStep driver for Intel Pentium M processors.");
+MODULE_LICENSE ("GPL");
+
+late_initcall(centrino_init);
+module_exit(centrino_exit);
diff --git a/kernel/drivers/cpufreq/speedstep-ich.c b/kernel/drivers/cpufreq/speedstep-ich.c
new file mode 100644
index 000000000..e56d632a8
--- /dev/null
+++ b/kernel/drivers/cpufreq/speedstep-ich.c
@@ -0,0 +1,387 @@
+/*
+ * (C) 2001 Dave Jones, Arjan van de ven.
+ * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
+ *
+ * Licensed under the terms of the GNU GPL License version 2.
+ * Based upon reverse engineered information, and on Intel documentation
+ * for chipsets ICH2-M and ICH3-M.
+ *
+ * Many thanks to Ducrot Bruno for finding and fixing the last
+ * "missing link" for ICH2-M/ICH3-M support, and to Thomas Winkler
+ * for extensive testing.
+ *
+ * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
+ */
+
+
+/*********************************************************************
+ * SPEEDSTEP - DEFINITIONS *
+ *********************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+
+#include <asm/cpu_device_id.h>
+
+#include "speedstep-lib.h"
+
+
+/* speedstep_chipset:
+ * It is necessary to know which chipset is used. As accesses to
+ * this device occur at various places in this module, we need a
+ * static struct pci_dev * pointing to that device.
+ */
+static struct pci_dev *speedstep_chipset_dev;
+
+
+/* speedstep_processor
+ */
+static enum speedstep_processor speedstep_processor;
+
+static u32 pmbase;
+
+/*
+ * There are only two frequency states for each processor. Values
+ * are in kHz for the time being.
+ */
+static struct cpufreq_frequency_table speedstep_freqs[] = {
+ {0, SPEEDSTEP_HIGH, 0},
+ {0, SPEEDSTEP_LOW, 0},
+ {0, 0, CPUFREQ_TABLE_END},
+};
+
+
+/**
+ * speedstep_find_register - read the PMBASE address
+ *
+ * Returns: -ENODEV if no register could be found
+ */
+static int speedstep_find_register(void)
+{
+ if (!speedstep_chipset_dev)
+ return -ENODEV;
+
+ /* get PMBASE */
+ pci_read_config_dword(speedstep_chipset_dev, 0x40, &pmbase);
+ if (!(pmbase & 0x01)) {
+ printk(KERN_ERR "speedstep-ich: could not find speedstep register\n");
+ return -ENODEV;
+ }
+
+ pmbase &= 0xFFFFFFFE;
+ if (!pmbase) {
+ printk(KERN_ERR "speedstep-ich: could not find speedstep register\n");
+ return -ENODEV;
+ }
+
+ pr_debug("pmbase is 0x%x\n", pmbase);
+ return 0;
+}
+
+/**
+ * speedstep_set_state - set the SpeedStep state
+ * @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH)
+ *
+ * Tries to change the SpeedStep state. Can be called from
+ * smp_call_function_single.
+ */
+static void speedstep_set_state(unsigned int state)
+{
+ u8 pm2_blk;
+ u8 value;
+ unsigned long flags;
+
+ if (state > 0x1)
+ return;
+
+ /* Disable IRQs */
+ local_irq_save(flags);
+
+ /* read state */
+ value = inb(pmbase + 0x50);
+
+ pr_debug("read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value);
+
+ /* write new state */
+ value &= 0xFE;
+ value |= state;
+
+ pr_debug("writing 0x%x to pmbase 0x%x + 0x50\n", value, pmbase);
+
+ /* Disable bus master arbitration */
+ pm2_blk = inb(pmbase + 0x20);
+ pm2_blk |= 0x01;
+ outb(pm2_blk, (pmbase + 0x20));
+
+ /* Actual transition */
+ outb(value, (pmbase + 0x50));
+
+ /* Restore bus master arbitration */
+ pm2_blk &= 0xfe;
+ outb(pm2_blk, (pmbase + 0x20));
+
+ /* check if transition was successful */
+ value = inb(pmbase + 0x50);
+
+ /* Enable IRQs */
+ local_irq_restore(flags);
+
+ pr_debug("read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value);
+
+ if (state == (value & 0x1))
+ pr_debug("change to %u MHz succeeded\n",
+ speedstep_get_frequency(speedstep_processor) / 1000);
+ else
+ printk(KERN_ERR "cpufreq: change failed - I/O error\n");
+
+ return;
+}
+
+/* Wrapper for smp_call_function_single. */
+static void _speedstep_set_state(void *_state)
+{
+ speedstep_set_state(*(unsigned int *)_state);
+}
+
+/**
+ * speedstep_activate - activate SpeedStep control in the chipset
+ *
+ * Tries to activate the SpeedStep status and control registers.
+ * Returns -EINVAL on an unsupported chipset, and zero on success.
+ */
+static int speedstep_activate(void)
+{
+ u16 value = 0;
+
+ if (!speedstep_chipset_dev)
+ return -EINVAL;
+
+ pci_read_config_word(speedstep_chipset_dev, 0x00A0, &value);
+ if (!(value & 0x08)) {
+ value |= 0x08;
+ pr_debug("activating SpeedStep (TM) registers\n");
+ pci_write_config_word(speedstep_chipset_dev, 0x00A0, value);
+ }
+
+ return 0;
+}
+
+
+/**
+ * speedstep_detect_chipset - detect the Southbridge which contains SpeedStep logic
+ *
+ * Detects ICH2-M, ICH3-M and ICH4-M so far. The pci_dev points to
+ * the LPC bridge / PM module which contains all power-management
+ * functions. Returns the SPEEDSTEP_CHIPSET_-number for the detected
+ * chipset, or zero on failure.
+ */
+static unsigned int speedstep_detect_chipset(void)
+{
+ speedstep_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_82801DB_12,
+ PCI_ANY_ID, PCI_ANY_ID,
+ NULL);
+ if (speedstep_chipset_dev)
+ return 4; /* 4-M */
+
+ speedstep_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_82801CA_12,
+ PCI_ANY_ID, PCI_ANY_ID,
+ NULL);
+ if (speedstep_chipset_dev)
+ return 3; /* 3-M */
+
+
+ speedstep_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_82801BA_10,
+ PCI_ANY_ID, PCI_ANY_ID,
+ NULL);
+ if (speedstep_chipset_dev) {
+ /* speedstep.c causes lockups on Dell Inspirons 8000 and
+ * 8100 which use a pretty old revision of the 82815
+ * host bridge. Abort on these systems.
+ */
+ static struct pci_dev *hostbridge;
+
+ hostbridge = pci_get_subsys(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_82815_MC,
+ PCI_ANY_ID, PCI_ANY_ID,
+ NULL);
+
+ if (!hostbridge)
+ return 2; /* 2-M */
+
+ if (hostbridge->revision < 5) {
+ pr_debug("hostbridge does not support speedstep\n");
+ speedstep_chipset_dev = NULL;
+ pci_dev_put(hostbridge);
+ return 0;
+ }
+
+ pci_dev_put(hostbridge);
+ return 2; /* 2-M */
+ }
+
+ return 0;
+}
+
+static void get_freq_data(void *_speed)
+{
+ unsigned int *speed = _speed;
+
+ *speed = speedstep_get_frequency(speedstep_processor);
+}
+
+static unsigned int speedstep_get(unsigned int cpu)
+{
+ unsigned int speed;
+
+ /* You're supposed to ensure CPU is online. */
+ if (smp_call_function_single(cpu, get_freq_data, &speed, 1) != 0)
+ BUG();
+
+ pr_debug("detected %u kHz as current frequency\n", speed);
+ return speed;
+}
+
+/**
+ * speedstep_target - set a new CPUFreq policy
+ * @policy: new policy
+ * @index: index of target frequency
+ *
+ * Sets a new CPUFreq policy.
+ */
+static int speedstep_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ unsigned int policy_cpu;
+
+ policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask);
+
+ smp_call_function_single(policy_cpu, _speedstep_set_state, &index,
+ true);
+
+ return 0;
+}
+
+
+struct get_freqs {
+ struct cpufreq_policy *policy;
+ int ret;
+};
+
+static void get_freqs_on_cpu(void *_get_freqs)
+{
+ struct get_freqs *get_freqs = _get_freqs;
+
+ get_freqs->ret =
+ speedstep_get_freqs(speedstep_processor,
+ &speedstep_freqs[SPEEDSTEP_LOW].frequency,
+ &speedstep_freqs[SPEEDSTEP_HIGH].frequency,
+ &get_freqs->policy->cpuinfo.transition_latency,
+ &speedstep_set_state);
+}
+
+static int speedstep_cpu_init(struct cpufreq_policy *policy)
+{
+ unsigned int policy_cpu;
+ struct get_freqs gf;
+
+ /* only run on CPU to be set, or on its sibling */
+#ifdef CONFIG_SMP
+ cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
+#endif
+ policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask);
+
+ /* detect low and high frequency and transition latency */
+ gf.policy = policy;
+ smp_call_function_single(policy_cpu, get_freqs_on_cpu, &gf, 1);
+ if (gf.ret)
+ return gf.ret;
+
+ return cpufreq_table_validate_and_show(policy, speedstep_freqs);
+}
+
+
+static struct cpufreq_driver speedstep_driver = {
+ .name = "speedstep-ich",
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = speedstep_target,
+ .init = speedstep_cpu_init,
+ .get = speedstep_get,
+ .attr = cpufreq_generic_attr,
+};
+
+static const struct x86_cpu_id ss_smi_ids[] = {
+ { X86_VENDOR_INTEL, 6, 0xb, },
+ { X86_VENDOR_INTEL, 6, 0x8, },
+ { X86_VENDOR_INTEL, 15, 2 },
+ {}
+};
+#if 0
+/* Autoload or not? Do not for now. */
+MODULE_DEVICE_TABLE(x86cpu, ss_smi_ids);
+#endif
+
+/**
+ * speedstep_init - initializes the SpeedStep CPUFreq driver
+ *
+ * Initializes the SpeedStep support. Returns -ENODEV on unsupported
+ * devices, -EINVAL on problems during initiatization, and zero on
+ * success.
+ */
+static int __init speedstep_init(void)
+{
+ if (!x86_match_cpu(ss_smi_ids))
+ return -ENODEV;
+
+ /* detect processor */
+ speedstep_processor = speedstep_detect_processor();
+ if (!speedstep_processor) {
+ pr_debug("Intel(R) SpeedStep(TM) capable processor "
+ "not found\n");
+ return -ENODEV;
+ }
+
+ /* detect chipset */
+ if (!speedstep_detect_chipset()) {
+ pr_debug("Intel(R) SpeedStep(TM) for this chipset not "
+ "(yet) available.\n");
+ return -ENODEV;
+ }
+
+ /* activate speedstep support */
+ if (speedstep_activate()) {
+ pci_dev_put(speedstep_chipset_dev);
+ return -EINVAL;
+ }
+
+ if (speedstep_find_register())
+ return -ENODEV;
+
+ return cpufreq_register_driver(&speedstep_driver);
+}
+
+
+/**
+ * speedstep_exit - unregisters SpeedStep support
+ *
+ * Unregisters SpeedStep support.
+ */
+static void __exit speedstep_exit(void)
+{
+ pci_dev_put(speedstep_chipset_dev);
+ cpufreq_unregister_driver(&speedstep_driver);
+}
+
+
+MODULE_AUTHOR("Dave Jones, Dominik Brodowski <linux@brodo.de>");
+MODULE_DESCRIPTION("Speedstep driver for Intel mobile processors on chipsets "
+ "with ICH-M southbridges.");
+MODULE_LICENSE("GPL");
+
+module_init(speedstep_init);
+module_exit(speedstep_exit);
diff --git a/kernel/drivers/cpufreq/speedstep-lib.c b/kernel/drivers/cpufreq/speedstep-lib.c
new file mode 100644
index 000000000..4ab7a2156
--- /dev/null
+++ b/kernel/drivers/cpufreq/speedstep-lib.c
@@ -0,0 +1,482 @@
+/*
+ * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
+ *
+ * Licensed under the terms of the GNU GPL License version 2.
+ *
+ * Library for common functions for Intel SpeedStep v.1 and v.2 support
+ *
+ * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+
+#include <asm/msr.h>
+#include <asm/tsc.h>
+#include "speedstep-lib.h"
+
+#define PFX "speedstep-lib: "
+
+#ifdef CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK
+static int relaxed_check;
+#else
+#define relaxed_check 0
+#endif
+
+/*********************************************************************
+ * GET PROCESSOR CORE SPEED IN KHZ *
+ *********************************************************************/
+
+static unsigned int pentium3_get_frequency(enum speedstep_processor processor)
+{
+ /* See table 14 of p3_ds.pdf and table 22 of 29834003.pdf */
+ struct {
+ unsigned int ratio; /* Frequency Multiplier (x10) */
+ u8 bitmap; /* power on configuration bits
+ [27, 25:22] (in MSR 0x2a) */
+ } msr_decode_mult[] = {
+ { 30, 0x01 },
+ { 35, 0x05 },
+ { 40, 0x02 },
+ { 45, 0x06 },
+ { 50, 0x00 },
+ { 55, 0x04 },
+ { 60, 0x0b },
+ { 65, 0x0f },
+ { 70, 0x09 },
+ { 75, 0x0d },
+ { 80, 0x0a },
+ { 85, 0x26 },
+ { 90, 0x20 },
+ { 100, 0x2b },
+ { 0, 0xff } /* error or unknown value */
+ };
+
+ /* PIII(-M) FSB settings: see table b1-b of 24547206.pdf */
+ struct {
+ unsigned int value; /* Front Side Bus speed in MHz */
+ u8 bitmap; /* power on configuration bits [18: 19]
+ (in MSR 0x2a) */
+ } msr_decode_fsb[] = {
+ { 66, 0x0 },
+ { 100, 0x2 },
+ { 133, 0x1 },
+ { 0, 0xff}
+ };
+
+ u32 msr_lo, msr_tmp;
+ int i = 0, j = 0;
+
+ /* read MSR 0x2a - we only need the low 32 bits */
+ rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp);
+ pr_debug("P3 - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp);
+ msr_tmp = msr_lo;
+
+ /* decode the FSB */
+ msr_tmp &= 0x00c0000;
+ msr_tmp >>= 18;
+ while (msr_tmp != msr_decode_fsb[i].bitmap) {
+ if (msr_decode_fsb[i].bitmap == 0xff)
+ return 0;
+ i++;
+ }
+
+ /* decode the multiplier */
+ if (processor == SPEEDSTEP_CPU_PIII_C_EARLY) {
+ pr_debug("workaround for early PIIIs\n");
+ msr_lo &= 0x03c00000;
+ } else
+ msr_lo &= 0x0bc00000;
+ msr_lo >>= 22;
+ while (msr_lo != msr_decode_mult[j].bitmap) {
+ if (msr_decode_mult[j].bitmap == 0xff)
+ return 0;
+ j++;
+ }
+
+ pr_debug("speed is %u\n",
+ (msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100));
+
+ return msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100;
+}
+
+
+static unsigned int pentiumM_get_frequency(void)
+{
+ u32 msr_lo, msr_tmp;
+
+ rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp);
+ pr_debug("PM - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp);
+
+ /* see table B-2 of 24547212.pdf */
+ if (msr_lo & 0x00040000) {
+ printk(KERN_DEBUG PFX "PM - invalid FSB: 0x%x 0x%x\n",
+ msr_lo, msr_tmp);
+ return 0;
+ }
+
+ msr_tmp = (msr_lo >> 22) & 0x1f;
+ pr_debug("bits 22-26 are 0x%x, speed is %u\n",
+ msr_tmp, (msr_tmp * 100 * 1000));
+
+ return msr_tmp * 100 * 1000;
+}
+
+static unsigned int pentium_core_get_frequency(void)
+{
+ u32 fsb = 0;
+ u32 msr_lo, msr_tmp;
+ int ret;
+
+ rdmsr(MSR_FSB_FREQ, msr_lo, msr_tmp);
+ /* see table B-2 of 25366920.pdf */
+ switch (msr_lo & 0x07) {
+ case 5:
+ fsb = 100000;
+ break;
+ case 1:
+ fsb = 133333;
+ break;
+ case 3:
+ fsb = 166667;
+ break;
+ case 2:
+ fsb = 200000;
+ break;
+ case 0:
+ fsb = 266667;
+ break;
+ case 4:
+ fsb = 333333;
+ break;
+ default:
+ printk(KERN_ERR "PCORE - MSR_FSB_FREQ undefined value");
+ }
+
+ rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp);
+ pr_debug("PCORE - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n",
+ msr_lo, msr_tmp);
+
+ msr_tmp = (msr_lo >> 22) & 0x1f;
+ pr_debug("bits 22-26 are 0x%x, speed is %u\n",
+ msr_tmp, (msr_tmp * fsb));
+
+ ret = (msr_tmp * fsb);
+ return ret;
+}
+
+
+static unsigned int pentium4_get_frequency(void)
+{
+ struct cpuinfo_x86 *c = &boot_cpu_data;
+ u32 msr_lo, msr_hi, mult;
+ unsigned int fsb = 0;
+ unsigned int ret;
+ u8 fsb_code;
+
+ /* Pentium 4 Model 0 and 1 do not have the Core Clock Frequency
+ * to System Bus Frequency Ratio Field in the Processor Frequency
+ * Configuration Register of the MSR. Therefore the current
+ * frequency cannot be calculated and has to be measured.
+ */
+ if (c->x86_model < 2)
+ return cpu_khz;
+
+ rdmsr(0x2c, msr_lo, msr_hi);
+
+ pr_debug("P4 - MSR_EBC_FREQUENCY_ID: 0x%x 0x%x\n", msr_lo, msr_hi);
+
+ /* decode the FSB: see IA-32 Intel (C) Architecture Software
+ * Developer's Manual, Volume 3: System Prgramming Guide,
+ * revision #12 in Table B-1: MSRs in the Pentium 4 and
+ * Intel Xeon Processors, on page B-4 and B-5.
+ */
+ fsb_code = (msr_lo >> 16) & 0x7;
+ switch (fsb_code) {
+ case 0:
+ fsb = 100 * 1000;
+ break;
+ case 1:
+ fsb = 13333 * 10;
+ break;
+ case 2:
+ fsb = 200 * 1000;
+ break;
+ }
+
+ if (!fsb)
+ printk(KERN_DEBUG PFX "couldn't detect FSB speed. "
+ "Please send an e-mail to <linux@brodo.de>\n");
+
+ /* Multiplier. */
+ mult = msr_lo >> 24;
+
+ pr_debug("P4 - FSB %u kHz; Multiplier %u; Speed %u kHz\n",
+ fsb, mult, (fsb * mult));
+
+ ret = (fsb * mult);
+ return ret;
+}
+
+
+/* Warning: may get called from smp_call_function_single. */
+unsigned int speedstep_get_frequency(enum speedstep_processor processor)
+{
+ switch (processor) {
+ case SPEEDSTEP_CPU_PCORE:
+ return pentium_core_get_frequency();
+ case SPEEDSTEP_CPU_PM:
+ return pentiumM_get_frequency();
+ case SPEEDSTEP_CPU_P4D:
+ case SPEEDSTEP_CPU_P4M:
+ return pentium4_get_frequency();
+ case SPEEDSTEP_CPU_PIII_T:
+ case SPEEDSTEP_CPU_PIII_C:
+ case SPEEDSTEP_CPU_PIII_C_EARLY:
+ return pentium3_get_frequency(processor);
+ default:
+ return 0;
+ };
+ return 0;
+}
+EXPORT_SYMBOL_GPL(speedstep_get_frequency);
+
+
+/*********************************************************************
+ * DETECT SPEEDSTEP-CAPABLE PROCESSOR *
+ *********************************************************************/
+
+/* Keep in sync with the x86_cpu_id tables in the different modules */
+unsigned int speedstep_detect_processor(void)
+{
+ struct cpuinfo_x86 *c = &cpu_data(0);
+ u32 ebx, msr_lo, msr_hi;
+
+ pr_debug("x86: %x, model: %x\n", c->x86, c->x86_model);
+
+ if ((c->x86_vendor != X86_VENDOR_INTEL) ||
+ ((c->x86 != 6) && (c->x86 != 0xF)))
+ return 0;
+
+ if (c->x86 == 0xF) {
+ /* Intel Mobile Pentium 4-M
+ * or Intel Mobile Pentium 4 with 533 MHz FSB */
+ if (c->x86_model != 2)
+ return 0;
+
+ ebx = cpuid_ebx(0x00000001);
+ ebx &= 0x000000FF;
+
+ pr_debug("ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask);
+
+ switch (c->x86_mask) {
+ case 4:
+ /*
+ * B-stepping [M-P4-M]
+ * sample has ebx = 0x0f, production has 0x0e.
+ */
+ if ((ebx == 0x0e) || (ebx == 0x0f))
+ return SPEEDSTEP_CPU_P4M;
+ break;
+ case 7:
+ /*
+ * C-stepping [M-P4-M]
+ * needs to have ebx=0x0e, else it's a celeron:
+ * cf. 25130917.pdf / page 7, footnote 5 even
+ * though 25072120.pdf / page 7 doesn't say
+ * samples are only of B-stepping...
+ */
+ if (ebx == 0x0e)
+ return SPEEDSTEP_CPU_P4M;
+ break;
+ case 9:
+ /*
+ * D-stepping [M-P4-M or M-P4/533]
+ *
+ * this is totally strange: CPUID 0x0F29 is
+ * used by M-P4-M, M-P4/533 and(!) Celeron CPUs.
+ * The latter need to be sorted out as they don't
+ * support speedstep.
+ * Celerons with CPUID 0x0F29 may have either
+ * ebx=0x8 or 0xf -- 25130917.pdf doesn't say anything
+ * specific.
+ * M-P4-Ms may have either ebx=0xe or 0xf [see above]
+ * M-P4/533 have either ebx=0xe or 0xf. [25317607.pdf]
+ * also, M-P4M HTs have ebx=0x8, too
+ * For now, they are distinguished by the model_id
+ * string
+ */
+ if ((ebx == 0x0e) ||
+ (strstr(c->x86_model_id,
+ "Mobile Intel(R) Pentium(R) 4") != NULL))
+ return SPEEDSTEP_CPU_P4M;
+ break;
+ default:
+ break;
+ }
+ return 0;
+ }
+
+ switch (c->x86_model) {
+ case 0x0B: /* Intel PIII [Tualatin] */
+ /* cpuid_ebx(1) is 0x04 for desktop PIII,
+ * 0x06 for mobile PIII-M */
+ ebx = cpuid_ebx(0x00000001);
+ pr_debug("ebx is %x\n", ebx);
+
+ ebx &= 0x000000FF;
+
+ if (ebx != 0x06)
+ return 0;
+
+ /* So far all PIII-M processors support SpeedStep. See
+ * Intel's 24540640.pdf of June 2003
+ */
+ return SPEEDSTEP_CPU_PIII_T;
+
+ case 0x08: /* Intel PIII [Coppermine] */
+
+ /* all mobile PIII Coppermines have FSB 100 MHz
+ * ==> sort out a few desktop PIIIs. */
+ rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_hi);
+ pr_debug("Coppermine: MSR_IA32_EBL_CR_POWERON is 0x%x, 0x%x\n",
+ msr_lo, msr_hi);
+ msr_lo &= 0x00c0000;
+ if (msr_lo != 0x0080000)
+ return 0;
+
+ /*
+ * If the processor is a mobile version,
+ * platform ID has bit 50 set
+ * it has SpeedStep technology if either
+ * bit 56 or 57 is set
+ */
+ rdmsr(MSR_IA32_PLATFORM_ID, msr_lo, msr_hi);
+ pr_debug("Coppermine: MSR_IA32_PLATFORM ID is 0x%x, 0x%x\n",
+ msr_lo, msr_hi);
+ if ((msr_hi & (1<<18)) &&
+ (relaxed_check ? 1 : (msr_hi & (3<<24)))) {
+ if (c->x86_mask == 0x01) {
+ pr_debug("early PIII version\n");
+ return SPEEDSTEP_CPU_PIII_C_EARLY;
+ } else
+ return SPEEDSTEP_CPU_PIII_C;
+ }
+
+ default:
+ return 0;
+ }
+}
+EXPORT_SYMBOL_GPL(speedstep_detect_processor);
+
+
+/*********************************************************************
+ * DETECT SPEEDSTEP SPEEDS *
+ *********************************************************************/
+
+unsigned int speedstep_get_freqs(enum speedstep_processor processor,
+ unsigned int *low_speed,
+ unsigned int *high_speed,
+ unsigned int *transition_latency,
+ void (*set_state) (unsigned int state))
+{
+ unsigned int prev_speed;
+ unsigned int ret = 0;
+ unsigned long flags;
+ struct timeval tv1, tv2;
+
+ if ((!processor) || (!low_speed) || (!high_speed) || (!set_state))
+ return -EINVAL;
+
+ pr_debug("trying to determine both speeds\n");
+
+ /* get current speed */
+ prev_speed = speedstep_get_frequency(processor);
+ if (!prev_speed)
+ return -EIO;
+
+ pr_debug("previous speed is %u\n", prev_speed);
+
+ preempt_disable();
+ local_irq_save(flags);
+
+ /* switch to low state */
+ set_state(SPEEDSTEP_LOW);
+ *low_speed = speedstep_get_frequency(processor);
+ if (!*low_speed) {
+ ret = -EIO;
+ goto out;
+ }
+
+ pr_debug("low speed is %u\n", *low_speed);
+
+ /* start latency measurement */
+ if (transition_latency)
+ do_gettimeofday(&tv1);
+
+ /* switch to high state */
+ set_state(SPEEDSTEP_HIGH);
+
+ /* end latency measurement */
+ if (transition_latency)
+ do_gettimeofday(&tv2);
+
+ *high_speed = speedstep_get_frequency(processor);
+ if (!*high_speed) {
+ ret = -EIO;
+ goto out;
+ }
+
+ pr_debug("high speed is %u\n", *high_speed);
+
+ if (*low_speed == *high_speed) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* switch to previous state, if necessary */
+ if (*high_speed != prev_speed)
+ set_state(SPEEDSTEP_LOW);
+
+ if (transition_latency) {
+ *transition_latency = (tv2.tv_sec - tv1.tv_sec) * USEC_PER_SEC +
+ tv2.tv_usec - tv1.tv_usec;
+ pr_debug("transition latency is %u uSec\n", *transition_latency);
+
+ /* convert uSec to nSec and add 20% for safety reasons */
+ *transition_latency *= 1200;
+
+ /* check if the latency measurement is too high or too low
+ * and set it to a safe value (500uSec) in that case
+ */
+ if (*transition_latency > 10000000 ||
+ *transition_latency < 50000) {
+ printk(KERN_WARNING PFX "frequency transition "
+ "measured seems out of range (%u "
+ "nSec), falling back to a safe one of"
+ "%u nSec.\n",
+ *transition_latency, 500000);
+ *transition_latency = 500000;
+ }
+ }
+
+out:
+ local_irq_restore(flags);
+ preempt_enable();
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(speedstep_get_freqs);
+
+#ifdef CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK
+module_param(relaxed_check, int, 0444);
+MODULE_PARM_DESC(relaxed_check,
+ "Don't do all checks for speedstep capability.");
+#endif
+
+MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
+MODULE_DESCRIPTION("Library for Intel SpeedStep 1 or 2 cpufreq drivers.");
+MODULE_LICENSE("GPL");
diff --git a/kernel/drivers/cpufreq/speedstep-lib.h b/kernel/drivers/cpufreq/speedstep-lib.h
new file mode 100644
index 000000000..70d9cea12
--- /dev/null
+++ b/kernel/drivers/cpufreq/speedstep-lib.h
@@ -0,0 +1,49 @@
+/*
+ * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
+ *
+ * Licensed under the terms of the GNU GPL License version 2.
+ *
+ * Library for common functions for Intel SpeedStep v.1 and v.2 support
+ *
+ * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
+ */
+
+
+
+/* processors */
+enum speedstep_processor {
+ SPEEDSTEP_CPU_PIII_C_EARLY = 0x00000001, /* Coppermine core */
+ SPEEDSTEP_CPU_PIII_C = 0x00000002, /* Coppermine core */
+ SPEEDSTEP_CPU_PIII_T = 0x00000003, /* Tualatin core */
+ SPEEDSTEP_CPU_P4M = 0x00000004, /* P4-M */
+/* the following processors are not speedstep-capable and are not auto-detected
+ * in speedstep_detect_processor(). However, their speed can be detected using
+ * the speedstep_get_frequency() call. */
+ SPEEDSTEP_CPU_PM = 0xFFFFFF03, /* Pentium M */
+ SPEEDSTEP_CPU_P4D = 0xFFFFFF04, /* desktop P4 */
+ SPEEDSTEP_CPU_PCORE = 0xFFFFFF05, /* Core */
+};
+
+/* speedstep states -- only two of them */
+
+#define SPEEDSTEP_HIGH 0x00000000
+#define SPEEDSTEP_LOW 0x00000001
+
+
+/* detect a speedstep-capable processor */
+extern enum speedstep_processor speedstep_detect_processor(void);
+
+/* detect the current speed (in khz) of the processor */
+extern unsigned int speedstep_get_frequency(enum speedstep_processor processor);
+
+
+/* detect the low and high speeds of the processor. The callback
+ * set_state"'s first argument is either SPEEDSTEP_HIGH or
+ * SPEEDSTEP_LOW; the second argument is zero so that no
+ * cpufreq_notify_transition calls are initiated.
+ */
+extern unsigned int speedstep_get_freqs(enum speedstep_processor processor,
+ unsigned int *low_speed,
+ unsigned int *high_speed,
+ unsigned int *transition_latency,
+ void (*set_state) (unsigned int state));
diff --git a/kernel/drivers/cpufreq/speedstep-smi.c b/kernel/drivers/cpufreq/speedstep-smi.c
new file mode 100644
index 000000000..819229e82
--- /dev/null
+++ b/kernel/drivers/cpufreq/speedstep-smi.c
@@ -0,0 +1,396 @@
+/*
+ * Intel SpeedStep SMI driver.
+ *
+ * (C) 2003 Hiroshi Miura <miura@da-cha.org>
+ *
+ * Licensed under the terms of the GNU GPL License version 2.
+ *
+ */
+
+
+/*********************************************************************
+ * SPEEDSTEP - DEFINITIONS *
+ *********************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <asm/ist.h>
+#include <asm/cpu_device_id.h>
+
+#include "speedstep-lib.h"
+
+/* speedstep system management interface port/command.
+ *
+ * These parameters are got from IST-SMI BIOS call.
+ * If user gives it, these are used.
+ *
+ */
+static int smi_port;
+static int smi_cmd;
+static unsigned int smi_sig;
+
+/* info about the processor */
+static enum speedstep_processor speedstep_processor;
+
+/*
+ * There are only two frequency states for each processor. Values
+ * are in kHz for the time being.
+ */
+static struct cpufreq_frequency_table speedstep_freqs[] = {
+ {0, SPEEDSTEP_HIGH, 0},
+ {0, SPEEDSTEP_LOW, 0},
+ {0, 0, CPUFREQ_TABLE_END},
+};
+
+#define GET_SPEEDSTEP_OWNER 0
+#define GET_SPEEDSTEP_STATE 1
+#define SET_SPEEDSTEP_STATE 2
+#define GET_SPEEDSTEP_FREQS 4
+
+/* how often shall the SMI call be tried if it failed, e.g. because
+ * of DMA activity going on? */
+#define SMI_TRIES 5
+
+/**
+ * speedstep_smi_ownership
+ */
+static int speedstep_smi_ownership(void)
+{
+ u32 command, result, magic, dummy;
+ u32 function = GET_SPEEDSTEP_OWNER;
+ unsigned char magic_data[] = "Copyright (c) 1999 Intel Corporation";
+
+ command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff);
+ magic = virt_to_phys(magic_data);
+
+ pr_debug("trying to obtain ownership with command %x at port %x\n",
+ command, smi_port);
+
+ __asm__ __volatile__(
+ "push %%ebp\n"
+ "out %%al, (%%dx)\n"
+ "pop %%ebp\n"
+ : "=D" (result),
+ "=a" (dummy), "=b" (dummy), "=c" (dummy), "=d" (dummy),
+ "=S" (dummy)
+ : "a" (command), "b" (function), "c" (0), "d" (smi_port),
+ "D" (0), "S" (magic)
+ : "memory"
+ );
+
+ pr_debug("result is %x\n", result);
+
+ return result;
+}
+
+/**
+ * speedstep_smi_get_freqs - get SpeedStep preferred & current freq.
+ * @low: the low frequency value is placed here
+ * @high: the high frequency value is placed here
+ *
+ * Only available on later SpeedStep-enabled systems, returns false results or
+ * even hangs [cf. bugme.osdl.org # 1422] on earlier systems. Empirical testing
+ * shows that the latter occurs if !(ist_info.event & 0xFFFF).
+ */
+static int speedstep_smi_get_freqs(unsigned int *low, unsigned int *high)
+{
+ u32 command, result = 0, edi, high_mhz, low_mhz, dummy;
+ u32 state = 0;
+ u32 function = GET_SPEEDSTEP_FREQS;
+
+ if (!(ist_info.event & 0xFFFF)) {
+ pr_debug("bug #1422 -- can't read freqs from BIOS\n");
+ return -ENODEV;
+ }
+
+ command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff);
+
+ pr_debug("trying to determine frequencies with command %x at port %x\n",
+ command, smi_port);
+
+ __asm__ __volatile__(
+ "push %%ebp\n"
+ "out %%al, (%%dx)\n"
+ "pop %%ebp"
+ : "=a" (result),
+ "=b" (high_mhz),
+ "=c" (low_mhz),
+ "=d" (state), "=D" (edi), "=S" (dummy)
+ : "a" (command),
+ "b" (function),
+ "c" (state),
+ "d" (smi_port), "S" (0), "D" (0)
+ );
+
+ pr_debug("result %x, low_freq %u, high_freq %u\n",
+ result, low_mhz, high_mhz);
+
+ /* abort if results are obviously incorrect... */
+ if ((high_mhz + low_mhz) < 600)
+ return -EINVAL;
+
+ *high = high_mhz * 1000;
+ *low = low_mhz * 1000;
+
+ return result;
+}
+
+/**
+ * speedstep_set_state - set the SpeedStep state
+ * @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH)
+ *
+ */
+static void speedstep_set_state(unsigned int state)
+{
+ unsigned int result = 0, command, new_state, dummy;
+ unsigned long flags;
+ unsigned int function = SET_SPEEDSTEP_STATE;
+ unsigned int retry = 0;
+
+ if (state > 0x1)
+ return;
+
+ /* Disable IRQs */
+ preempt_disable();
+ local_irq_save(flags);
+
+ command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff);
+
+ pr_debug("trying to set frequency to state %u "
+ "with command %x at port %x\n",
+ state, command, smi_port);
+
+ do {
+ if (retry) {
+ /*
+ * We need to enable interrupts, otherwise the blockage
+ * won't resolve.
+ *
+ * We disable preemption so that other processes don't
+ * run. If other processes were running, they could
+ * submit more DMA requests, making the blockage worse.
+ */
+ pr_debug("retry %u, previous result %u, waiting...\n",
+ retry, result);
+ local_irq_enable();
+ mdelay(retry * 50);
+ local_irq_disable();
+ }
+ retry++;
+ __asm__ __volatile__(
+ "push %%ebp\n"
+ "out %%al, (%%dx)\n"
+ "pop %%ebp"
+ : "=b" (new_state), "=D" (result),
+ "=c" (dummy), "=a" (dummy),
+ "=d" (dummy), "=S" (dummy)
+ : "a" (command), "b" (function), "c" (state),
+ "d" (smi_port), "S" (0), "D" (0)
+ );
+ } while ((new_state != state) && (retry <= SMI_TRIES));
+
+ /* enable IRQs */
+ local_irq_restore(flags);
+ preempt_enable();
+
+ if (new_state == state)
+ pr_debug("change to %u MHz succeeded after %u tries "
+ "with result %u\n",
+ (speedstep_freqs[new_state].frequency / 1000),
+ retry, result);
+ else
+ printk(KERN_ERR "cpufreq: change to state %u "
+ "failed with new_state %u and result %u\n",
+ state, new_state, result);
+
+ return;
+}
+
+
+/**
+ * speedstep_target - set a new CPUFreq policy
+ * @policy: new policy
+ * @index: index of new freq
+ *
+ * Sets a new CPUFreq policy/freq.
+ */
+static int speedstep_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ speedstep_set_state(index);
+
+ return 0;
+}
+
+
+static int speedstep_cpu_init(struct cpufreq_policy *policy)
+{
+ int result;
+ unsigned int *low, *high;
+
+ /* capability check */
+ if (policy->cpu != 0)
+ return -ENODEV;
+
+ result = speedstep_smi_ownership();
+ if (result) {
+ pr_debug("fails in acquiring ownership of a SMI interface.\n");
+ return -EINVAL;
+ }
+
+ /* detect low and high frequency */
+ low = &speedstep_freqs[SPEEDSTEP_LOW].frequency;
+ high = &speedstep_freqs[SPEEDSTEP_HIGH].frequency;
+
+ result = speedstep_smi_get_freqs(low, high);
+ if (result) {
+ /* fall back to speedstep_lib.c dection mechanism:
+ * try both states out */
+ pr_debug("could not detect low and high frequencies "
+ "by SMI call.\n");
+ result = speedstep_get_freqs(speedstep_processor,
+ low, high,
+ NULL,
+ &speedstep_set_state);
+
+ if (result) {
+ pr_debug("could not detect two different speeds"
+ " -- aborting.\n");
+ return result;
+ } else
+ pr_debug("workaround worked.\n");
+ }
+
+ policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+ return cpufreq_table_validate_and_show(policy, speedstep_freqs);
+}
+
+static unsigned int speedstep_get(unsigned int cpu)
+{
+ if (cpu)
+ return -ENODEV;
+ return speedstep_get_frequency(speedstep_processor);
+}
+
+
+static int speedstep_resume(struct cpufreq_policy *policy)
+{
+ int result = speedstep_smi_ownership();
+
+ if (result)
+ pr_debug("fails in re-acquiring ownership of a SMI interface.\n");
+
+ return result;
+}
+
+static struct cpufreq_driver speedstep_driver = {
+ .name = "speedstep-smi",
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = speedstep_target,
+ .init = speedstep_cpu_init,
+ .get = speedstep_get,
+ .resume = speedstep_resume,
+ .attr = cpufreq_generic_attr,
+};
+
+static const struct x86_cpu_id ss_smi_ids[] = {
+ { X86_VENDOR_INTEL, 6, 0xb, },
+ { X86_VENDOR_INTEL, 6, 0x8, },
+ { X86_VENDOR_INTEL, 15, 2 },
+ {}
+};
+#if 0
+/* Not auto loaded currently */
+MODULE_DEVICE_TABLE(x86cpu, ss_smi_ids);
+#endif
+
+/**
+ * speedstep_init - initializes the SpeedStep CPUFreq driver
+ *
+ * Initializes the SpeedStep support. Returns -ENODEV on unsupported
+ * BIOS, -EINVAL on problems during initiatization, and zero on
+ * success.
+ */
+static int __init speedstep_init(void)
+{
+ if (!x86_match_cpu(ss_smi_ids))
+ return -ENODEV;
+
+ speedstep_processor = speedstep_detect_processor();
+
+ switch (speedstep_processor) {
+ case SPEEDSTEP_CPU_PIII_T:
+ case SPEEDSTEP_CPU_PIII_C:
+ case SPEEDSTEP_CPU_PIII_C_EARLY:
+ break;
+ default:
+ speedstep_processor = 0;
+ }
+
+ if (!speedstep_processor) {
+ pr_debug("No supported Intel CPU detected.\n");
+ return -ENODEV;
+ }
+
+ pr_debug("signature:0x%.8x, command:0x%.8x, "
+ "event:0x%.8x, perf_level:0x%.8x.\n",
+ ist_info.signature, ist_info.command,
+ ist_info.event, ist_info.perf_level);
+
+ /* Error if no IST-SMI BIOS or no PARM
+ sig= 'ISGE' aka 'Intel Speedstep Gate E' */
+ if ((ist_info.signature != 0x47534943) && (
+ (smi_port == 0) || (smi_cmd == 0)))
+ return -ENODEV;
+
+ if (smi_sig == 1)
+ smi_sig = 0x47534943;
+ else
+ smi_sig = ist_info.signature;
+
+ /* setup smi_port from MODLULE_PARM or BIOS */
+ if ((smi_port > 0xff) || (smi_port < 0))
+ return -EINVAL;
+ else if (smi_port == 0)
+ smi_port = ist_info.command & 0xff;
+
+ if ((smi_cmd > 0xff) || (smi_cmd < 0))
+ return -EINVAL;
+ else if (smi_cmd == 0)
+ smi_cmd = (ist_info.command >> 16) & 0xff;
+
+ return cpufreq_register_driver(&speedstep_driver);
+}
+
+
+/**
+ * speedstep_exit - unregisters SpeedStep support
+ *
+ * Unregisters SpeedStep support.
+ */
+static void __exit speedstep_exit(void)
+{
+ cpufreq_unregister_driver(&speedstep_driver);
+}
+
+module_param(smi_port, int, 0444);
+module_param(smi_cmd, int, 0444);
+module_param(smi_sig, uint, 0444);
+
+MODULE_PARM_DESC(smi_port, "Override the BIOS-given IST port with this value "
+ "-- Intel's default setting is 0xb2");
+MODULE_PARM_DESC(smi_cmd, "Override the BIOS-given IST command with this value "
+ "-- Intel's default setting is 0x82");
+MODULE_PARM_DESC(smi_sig, "Set to 1 to fake the IST signature when using the "
+ "SMI interface.");
+
+MODULE_AUTHOR("Hiroshi Miura");
+MODULE_DESCRIPTION("Speedstep driver for IST applet SMI interface.");
+MODULE_LICENSE("GPL");
+
+module_init(speedstep_init);
+module_exit(speedstep_exit);
diff --git a/kernel/drivers/cpufreq/tegra-cpufreq.c b/kernel/drivers/cpufreq/tegra-cpufreq.c
new file mode 100644
index 000000000..8084c7f7e
--- /dev/null
+++ b/kernel/drivers/cpufreq/tegra-cpufreq.c
@@ -0,0 +1,218 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@google.com>
+ * Based on arch/arm/plat-omap/cpu-omap.c, (C) 2005 Nokia Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+static struct cpufreq_frequency_table freq_table[] = {
+ { .frequency = 216000 },
+ { .frequency = 312000 },
+ { .frequency = 456000 },
+ { .frequency = 608000 },
+ { .frequency = 760000 },
+ { .frequency = 816000 },
+ { .frequency = 912000 },
+ { .frequency = 1000000 },
+ { .frequency = CPUFREQ_TABLE_END },
+};
+
+#define NUM_CPUS 2
+
+static struct clk *cpu_clk;
+static struct clk *pll_x_clk;
+static struct clk *pll_p_clk;
+static struct clk *emc_clk;
+static bool pll_x_prepared;
+
+static unsigned int tegra_get_intermediate(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ unsigned int ifreq = clk_get_rate(pll_p_clk) / 1000;
+
+ /*
+ * Don't switch to intermediate freq if:
+ * - we are already at it, i.e. policy->cur == ifreq
+ * - index corresponds to ifreq
+ */
+ if ((freq_table[index].frequency == ifreq) || (policy->cur == ifreq))
+ return 0;
+
+ return ifreq;
+}
+
+static int tegra_target_intermediate(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ int ret;
+
+ /*
+ * Take an extra reference to the main pll so it doesn't turn
+ * off when we move the cpu off of it as enabling it again while we
+ * switch to it from tegra_target() would take additional time.
+ *
+ * When target-freq is equal to intermediate freq we don't need to
+ * switch to an intermediate freq and so this routine isn't called.
+ * Also, we wouldn't be using pll_x anymore and must not take extra
+ * reference to it, as it can be disabled now to save some power.
+ */
+ clk_prepare_enable(pll_x_clk);
+
+ ret = clk_set_parent(cpu_clk, pll_p_clk);
+ if (ret)
+ clk_disable_unprepare(pll_x_clk);
+ else
+ pll_x_prepared = true;
+
+ return ret;
+}
+
+static int tegra_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ unsigned long rate = freq_table[index].frequency;
+ unsigned int ifreq = clk_get_rate(pll_p_clk) / 1000;
+ int ret = 0;
+
+ /*
+ * Vote on memory bus frequency based on cpu frequency
+ * This sets the minimum frequency, display or avp may request higher
+ */
+ if (rate >= 816000)
+ clk_set_rate(emc_clk, 600000000); /* cpu 816 MHz, emc max */
+ else if (rate >= 456000)
+ clk_set_rate(emc_clk, 300000000); /* cpu 456 MHz, emc 150Mhz */
+ else
+ clk_set_rate(emc_clk, 100000000); /* emc 50Mhz */
+
+ /*
+ * target freq == pll_p, don't need to take extra reference to pll_x_clk
+ * as it isn't used anymore.
+ */
+ if (rate == ifreq)
+ return clk_set_parent(cpu_clk, pll_p_clk);
+
+ ret = clk_set_rate(pll_x_clk, rate * 1000);
+ /* Restore to earlier frequency on error, i.e. pll_x */
+ if (ret)
+ pr_err("Failed to change pll_x to %lu\n", rate);
+
+ ret = clk_set_parent(cpu_clk, pll_x_clk);
+ /* This shouldn't fail while changing or restoring */
+ WARN_ON(ret);
+
+ /*
+ * Drop count to pll_x clock only if we switched to intermediate freq
+ * earlier while transitioning to a target frequency.
+ */
+ if (pll_x_prepared) {
+ clk_disable_unprepare(pll_x_clk);
+ pll_x_prepared = false;
+ }
+
+ return ret;
+}
+
+static int tegra_cpu_init(struct cpufreq_policy *policy)
+{
+ int ret;
+
+ if (policy->cpu >= NUM_CPUS)
+ return -EINVAL;
+
+ clk_prepare_enable(emc_clk);
+ clk_prepare_enable(cpu_clk);
+
+ /* FIXME: what's the actual transition time? */
+ ret = cpufreq_generic_init(policy, freq_table, 300 * 1000);
+ if (ret) {
+ clk_disable_unprepare(cpu_clk);
+ clk_disable_unprepare(emc_clk);
+ return ret;
+ }
+
+ policy->clk = cpu_clk;
+ policy->suspend_freq = freq_table[0].frequency;
+ return 0;
+}
+
+static int tegra_cpu_exit(struct cpufreq_policy *policy)
+{
+ clk_disable_unprepare(cpu_clk);
+ clk_disable_unprepare(emc_clk);
+ return 0;
+}
+
+static struct cpufreq_driver tegra_cpufreq_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .get_intermediate = tegra_get_intermediate,
+ .target_intermediate = tegra_target_intermediate,
+ .target_index = tegra_target,
+ .get = cpufreq_generic_get,
+ .init = tegra_cpu_init,
+ .exit = tegra_cpu_exit,
+ .name = "tegra",
+ .attr = cpufreq_generic_attr,
+#ifdef CONFIG_PM
+ .suspend = cpufreq_generic_suspend,
+#endif
+};
+
+static int __init tegra_cpufreq_init(void)
+{
+ cpu_clk = clk_get_sys(NULL, "cclk");
+ if (IS_ERR(cpu_clk))
+ return PTR_ERR(cpu_clk);
+
+ pll_x_clk = clk_get_sys(NULL, "pll_x");
+ if (IS_ERR(pll_x_clk))
+ return PTR_ERR(pll_x_clk);
+
+ pll_p_clk = clk_get_sys(NULL, "pll_p");
+ if (IS_ERR(pll_p_clk))
+ return PTR_ERR(pll_p_clk);
+
+ emc_clk = clk_get_sys("cpu", "emc");
+ if (IS_ERR(emc_clk)) {
+ clk_put(cpu_clk);
+ return PTR_ERR(emc_clk);
+ }
+
+ return cpufreq_register_driver(&tegra_cpufreq_driver);
+}
+
+static void __exit tegra_cpufreq_exit(void)
+{
+ cpufreq_unregister_driver(&tegra_cpufreq_driver);
+ clk_put(emc_clk);
+ clk_put(cpu_clk);
+}
+
+
+MODULE_AUTHOR("Colin Cross <ccross@android.com>");
+MODULE_DESCRIPTION("cpufreq driver for Nvidia Tegra2");
+MODULE_LICENSE("GPL");
+module_init(tegra_cpufreq_init);
+module_exit(tegra_cpufreq_exit);
diff --git a/kernel/drivers/cpufreq/unicore2-cpufreq.c b/kernel/drivers/cpufreq/unicore2-cpufreq.c
new file mode 100644
index 000000000..6f9dfa805
--- /dev/null
+++ b/kernel/drivers/cpufreq/unicore2-cpufreq.c
@@ -0,0 +1,80 @@
+/*
+ * clock scaling for the UniCore-II
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
+ * Copyright (C) 2001-2010 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+
+#include <mach/hardware.h>
+
+static struct cpufreq_driver ucv2_driver;
+
+/* make sure that only the "userspace" governor is run
+ * -- anything else wouldn't make sense on this platform, anyway.
+ */
+static int ucv2_verify_speed(struct cpufreq_policy *policy)
+{
+ if (policy->cpu)
+ return -EINVAL;
+
+ cpufreq_verify_within_cpu_limits(policy);
+ return 0;
+}
+
+static int ucv2_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ struct cpufreq_freqs freqs;
+ int ret;
+
+ freqs.old = policy->cur;
+ freqs.new = target_freq;
+
+ cpufreq_freq_transition_begin(policy, &freqs);
+ ret = clk_set_rate(policy->clk, target_freq * 1000);
+ cpufreq_freq_transition_end(policy, &freqs, ret);
+
+ return ret;
+}
+
+static int __init ucv2_cpu_init(struct cpufreq_policy *policy)
+{
+ if (policy->cpu != 0)
+ return -EINVAL;
+
+ policy->min = policy->cpuinfo.min_freq = 250000;
+ policy->max = policy->cpuinfo.max_freq = 1000000;
+ policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+ policy->clk = clk_get(NULL, "MAIN_CLK");
+ return PTR_ERR_OR_ZERO(policy->clk);
+}
+
+static struct cpufreq_driver ucv2_driver = {
+ .flags = CPUFREQ_STICKY,
+ .verify = ucv2_verify_speed,
+ .target = ucv2_target,
+ .get = cpufreq_generic_get,
+ .init = ucv2_cpu_init,
+ .name = "UniCore-II",
+};
+
+static int __init ucv2_cpufreq_init(void)
+{
+ return cpufreq_register_driver(&ucv2_driver);
+}
+
+arch_initcall(ucv2_cpufreq_init);
diff --git a/kernel/drivers/cpufreq/vexpress-spc-cpufreq.c b/kernel/drivers/cpufreq/vexpress-spc-cpufreq.c
new file mode 100644
index 000000000..433e93fd4
--- /dev/null
+++ b/kernel/drivers/cpufreq/vexpress-spc-cpufreq.c
@@ -0,0 +1,69 @@
+/*
+ * Versatile Express SPC CPUFreq Interface driver
+ *
+ * It provides necessary ops to arm_big_little cpufreq driver.
+ *
+ * Copyright (C) 2013 ARM Ltd.
+ * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/types.h>
+
+#include "arm_big_little.h"
+
+static int ve_spc_init_opp_table(struct device *cpu_dev)
+{
+ /*
+ * platform specific SPC code must initialise the opp table
+ * so just check if the OPP count is non-zero
+ */
+ return dev_pm_opp_get_opp_count(cpu_dev) <= 0;
+}
+
+static int ve_spc_get_transition_latency(struct device *cpu_dev)
+{
+ return 1000000; /* 1 ms */
+}
+
+static struct cpufreq_arm_bL_ops ve_spc_cpufreq_ops = {
+ .name = "vexpress-spc",
+ .get_transition_latency = ve_spc_get_transition_latency,
+ .init_opp_table = ve_spc_init_opp_table,
+};
+
+static int ve_spc_cpufreq_probe(struct platform_device *pdev)
+{
+ return bL_cpufreq_register(&ve_spc_cpufreq_ops);
+}
+
+static int ve_spc_cpufreq_remove(struct platform_device *pdev)
+{
+ bL_cpufreq_unregister(&ve_spc_cpufreq_ops);
+ return 0;
+}
+
+static struct platform_driver ve_spc_cpufreq_platdrv = {
+ .driver = {
+ .name = "vexpress-spc-cpufreq",
+ },
+ .probe = ve_spc_cpufreq_probe,
+ .remove = ve_spc_cpufreq_remove,
+};
+module_platform_driver(ve_spc_cpufreq_platdrv);
+
+MODULE_LICENSE("GPL");