diff options
author | Yunhong Jiang <yunhong.jiang@intel.com> | 2015-08-04 12:17:53 -0700 |
---|---|---|
committer | Yunhong Jiang <yunhong.jiang@intel.com> | 2015-08-04 15:44:42 -0700 |
commit | 9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00 (patch) | |
tree | 1c9cafbcd35f783a87880a10f85d1a060db1a563 /kernel/drivers/clk/st | |
parent | 98260f3884f4a202f9ca5eabed40b1354c489b29 (diff) |
Add the rt linux 4.1.3-rt3 as base
Import the rt linux 4.1.3-rt3 as OPNFV kvm base.
It's from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git linux-4.1.y-rt and
the base is:
commit 0917f823c59692d751951bf5ea699a2d1e2f26a2
Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Sat Jul 25 12:13:34 2015 +0200
Prepare v4.1.3-rt3
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
We lose all the git history this way and it's not good. We
should apply another opnfv project repo in future.
Change-Id: I87543d81c9df70d99c5001fbdf646b202c19f423
Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
Diffstat (limited to 'kernel/drivers/clk/st')
-rw-r--r-- | kernel/drivers/clk/st/Makefile | 1 | ||||
-rw-r--r-- | kernel/drivers/clk/st/clk-flexgen.c | 342 | ||||
-rw-r--r-- | kernel/drivers/clk/st/clkgen-fsyn.c | 1194 | ||||
-rw-r--r-- | kernel/drivers/clk/st/clkgen-mux.c | 830 | ||||
-rw-r--r-- | kernel/drivers/clk/st/clkgen-pll.c | 763 | ||||
-rw-r--r-- | kernel/drivers/clk/st/clkgen.h | 48 |
6 files changed, 3178 insertions, 0 deletions
diff --git a/kernel/drivers/clk/st/Makefile b/kernel/drivers/clk/st/Makefile new file mode 100644 index 000000000..ede7b2f13 --- /dev/null +++ b/kernel/drivers/clk/st/Makefile @@ -0,0 +1 @@ +obj-y += clkgen-mux.o clkgen-pll.o clkgen-fsyn.o clk-flexgen.o diff --git a/kernel/drivers/clk/st/clk-flexgen.c b/kernel/drivers/clk/st/clk-flexgen.c new file mode 100644 index 000000000..bf12a25eb --- /dev/null +++ b/kernel/drivers/clk/st/clk-flexgen.c @@ -0,0 +1,342 @@ +/* + * clk-flexgen.c + * + * Copyright (C) ST-Microelectronics SA 2013 + * Author: Maxime Coquelin <maxime.coquelin@st.com> for ST-Microelectronics. + * License terms: GNU General Public License (GPL), version 2 */ + +#include <linux/clk-provider.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/io.h> +#include <linux/err.h> +#include <linux/string.h> +#include <linux/of.h> +#include <linux/of_address.h> + +struct flexgen { + struct clk_hw hw; + + /* Crossbar */ + struct clk_mux mux; + /* Pre-divisor's gate */ + struct clk_gate pgate; + /* Pre-divisor */ + struct clk_divider pdiv; + /* Final divisor's gate */ + struct clk_gate fgate; + /* Final divisor */ + struct clk_divider fdiv; +}; + +#define to_flexgen(_hw) container_of(_hw, struct flexgen, hw) + +static int flexgen_enable(struct clk_hw *hw) +{ + struct flexgen *flexgen = to_flexgen(hw); + struct clk_hw *pgate_hw = &flexgen->pgate.hw; + struct clk_hw *fgate_hw = &flexgen->fgate.hw; + + __clk_hw_set_clk(pgate_hw, hw); + __clk_hw_set_clk(fgate_hw, hw); + + clk_gate_ops.enable(pgate_hw); + + clk_gate_ops.enable(fgate_hw); + + pr_debug("%s: flexgen output enabled\n", __clk_get_name(hw->clk)); + return 0; +} + +static void flexgen_disable(struct clk_hw *hw) +{ + struct flexgen *flexgen = to_flexgen(hw); + struct clk_hw *fgate_hw = &flexgen->fgate.hw; + + /* disable only the final gate */ + __clk_hw_set_clk(fgate_hw, hw); + + clk_gate_ops.disable(fgate_hw); + + pr_debug("%s: flexgen output disabled\n", __clk_get_name(hw->clk)); +} + +static int flexgen_is_enabled(struct clk_hw *hw) +{ + struct flexgen *flexgen = to_flexgen(hw); + struct clk_hw *fgate_hw = &flexgen->fgate.hw; + + __clk_hw_set_clk(fgate_hw, hw); + + if (!clk_gate_ops.is_enabled(fgate_hw)) + return 0; + + return 1; +} + +static u8 flexgen_get_parent(struct clk_hw *hw) +{ + struct flexgen *flexgen = to_flexgen(hw); + struct clk_hw *mux_hw = &flexgen->mux.hw; + + __clk_hw_set_clk(mux_hw, hw); + + return clk_mux_ops.get_parent(mux_hw); +} + +static int flexgen_set_parent(struct clk_hw *hw, u8 index) +{ + struct flexgen *flexgen = to_flexgen(hw); + struct clk_hw *mux_hw = &flexgen->mux.hw; + + __clk_hw_set_clk(mux_hw, hw); + + return clk_mux_ops.set_parent(mux_hw, index); +} + +static inline unsigned long +clk_best_div(unsigned long parent_rate, unsigned long rate) +{ + return parent_rate / rate + ((rate > (2*(parent_rate % rate))) ? 0 : 1); +} + +static long flexgen_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + unsigned long div; + + /* Round div according to exact prate and wished rate */ + div = clk_best_div(*prate, rate); + + if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) { + *prate = rate * div; + return rate; + } + + return *prate / div; +} + +unsigned long flexgen_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct flexgen *flexgen = to_flexgen(hw); + struct clk_hw *pdiv_hw = &flexgen->pdiv.hw; + struct clk_hw *fdiv_hw = &flexgen->fdiv.hw; + unsigned long mid_rate; + + __clk_hw_set_clk(pdiv_hw, hw); + __clk_hw_set_clk(fdiv_hw, hw); + + mid_rate = clk_divider_ops.recalc_rate(pdiv_hw, parent_rate); + + return clk_divider_ops.recalc_rate(fdiv_hw, mid_rate); +} + +static int flexgen_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct flexgen *flexgen = to_flexgen(hw); + struct clk_hw *pdiv_hw = &flexgen->pdiv.hw; + struct clk_hw *fdiv_hw = &flexgen->fdiv.hw; + unsigned long div = 0; + int ret = 0; + + __clk_hw_set_clk(pdiv_hw, hw); + __clk_hw_set_clk(fdiv_hw, hw); + + div = clk_best_div(parent_rate, rate); + + /* + * pdiv is mainly targeted for low freq results, while fdiv + * should be used for div <= 64. The other way round can + * lead to 'duty cycle' issues. + */ + + if (div <= 64) { + clk_divider_ops.set_rate(pdiv_hw, parent_rate, parent_rate); + ret = clk_divider_ops.set_rate(fdiv_hw, rate, rate * div); + } else { + clk_divider_ops.set_rate(fdiv_hw, parent_rate, parent_rate); + ret = clk_divider_ops.set_rate(pdiv_hw, rate, rate * div); + } + + return ret; +} + +static const struct clk_ops flexgen_ops = { + .enable = flexgen_enable, + .disable = flexgen_disable, + .is_enabled = flexgen_is_enabled, + .get_parent = flexgen_get_parent, + .set_parent = flexgen_set_parent, + .round_rate = flexgen_round_rate, + .recalc_rate = flexgen_recalc_rate, + .set_rate = flexgen_set_rate, +}; + +struct clk *clk_register_flexgen(const char *name, + const char **parent_names, u8 num_parents, + void __iomem *reg, spinlock_t *lock, u32 idx, + unsigned long flexgen_flags) { + struct flexgen *fgxbar; + struct clk *clk; + struct clk_init_data init; + u32 xbar_shift; + void __iomem *xbar_reg, *fdiv_reg; + + fgxbar = kzalloc(sizeof(struct flexgen), GFP_KERNEL); + if (!fgxbar) + return ERR_PTR(-ENOMEM); + + init.name = name; + init.ops = &flexgen_ops; + init.flags = CLK_IS_BASIC | flexgen_flags; + init.parent_names = parent_names; + init.num_parents = num_parents; + + xbar_reg = reg + 0x18 + (idx & ~0x3); + xbar_shift = (idx % 4) * 0x8; + fdiv_reg = reg + 0x164 + idx * 4; + + /* Crossbar element config */ + fgxbar->mux.lock = lock; + fgxbar->mux.mask = BIT(6) - 1; + fgxbar->mux.reg = xbar_reg; + fgxbar->mux.shift = xbar_shift; + fgxbar->mux.table = NULL; + + + /* Pre-divider's gate config (in xbar register)*/ + fgxbar->pgate.lock = lock; + fgxbar->pgate.reg = xbar_reg; + fgxbar->pgate.bit_idx = xbar_shift + 6; + + /* Pre-divider config */ + fgxbar->pdiv.lock = lock; + fgxbar->pdiv.reg = reg + 0x58 + idx * 4; + fgxbar->pdiv.width = 10; + + /* Final divider's gate config */ + fgxbar->fgate.lock = lock; + fgxbar->fgate.reg = fdiv_reg; + fgxbar->fgate.bit_idx = 6; + + /* Final divider config */ + fgxbar->fdiv.lock = lock; + fgxbar->fdiv.reg = fdiv_reg; + fgxbar->fdiv.width = 6; + + fgxbar->hw.init = &init; + + clk = clk_register(NULL, &fgxbar->hw); + if (IS_ERR(clk)) + kfree(fgxbar); + else + pr_debug("%s: parent %s rate %u\n", + __clk_get_name(clk), + __clk_get_name(clk_get_parent(clk)), + (unsigned int)clk_get_rate(clk)); + return clk; +} + +static const char ** __init flexgen_get_parents(struct device_node *np, + int *num_parents) +{ + const char **parents; + int nparents, i; + + nparents = of_count_phandle_with_args(np, "clocks", "#clock-cells"); + if (WARN_ON(nparents <= 0)) + return NULL; + + parents = kcalloc(nparents, sizeof(const char *), GFP_KERNEL); + if (!parents) + return NULL; + + for (i = 0; i < nparents; i++) + parents[i] = of_clk_get_parent_name(np, i); + + *num_parents = nparents; + return parents; +} + +void __init st_of_flexgen_setup(struct device_node *np) +{ + struct device_node *pnode; + void __iomem *reg; + struct clk_onecell_data *clk_data; + const char **parents; + int num_parents, i; + spinlock_t *rlock = NULL; + unsigned long flex_flags = 0; + + pnode = of_get_parent(np); + if (!pnode) + return; + + reg = of_iomap(pnode, 0); + if (!reg) + return; + + parents = flexgen_get_parents(np, &num_parents); + if (!parents) + return; + + clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); + if (!clk_data) + goto err; + + clk_data->clk_num = of_property_count_strings(np , + "clock-output-names"); + if (clk_data->clk_num <= 0) { + pr_err("%s: Failed to get number of output clocks (%d)", + __func__, clk_data->clk_num); + goto err; + } + + clk_data->clks = kcalloc(clk_data->clk_num, sizeof(struct clk *), + GFP_KERNEL); + if (!clk_data->clks) + goto err; + + rlock = kzalloc(sizeof(spinlock_t), GFP_KERNEL); + if (!rlock) + goto err; + + for (i = 0; i < clk_data->clk_num; i++) { + struct clk *clk; + const char *clk_name; + + if (of_property_read_string_index(np, "clock-output-names", + i, &clk_name)) { + break; + } + + /* + * If we read an empty clock name then the output is unused + */ + if (*clk_name == '\0') + continue; + + clk = clk_register_flexgen(clk_name, parents, num_parents, + reg, rlock, i, flex_flags); + + if (IS_ERR(clk)) + goto err; + + clk_data->clks[i] = clk; + } + + kfree(parents); + of_clk_add_provider(np, of_clk_src_onecell_get, clk_data); + + return; + +err: + if (clk_data) + kfree(clk_data->clks); + kfree(clk_data); + kfree(parents); + kfree(rlock); +} +CLK_OF_DECLARE(flexgen, "st,flexgen", st_of_flexgen_setup); diff --git a/kernel/drivers/clk/st/clkgen-fsyn.c b/kernel/drivers/clk/st/clkgen-fsyn.c new file mode 100644 index 000000000..a917c4c7e --- /dev/null +++ b/kernel/drivers/clk/st/clkgen-fsyn.c @@ -0,0 +1,1194 @@ +/* + * Copyright (C) 2014 STMicroelectronics R&D Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +/* + * Authors: + * Stephen Gallimore <stephen.gallimore@st.com>, + * Pankaj Dev <pankaj.dev@st.com>. + */ + +#include <linux/slab.h> +#include <linux/of_address.h> +#include <linux/clk-provider.h> + +#include "clkgen.h" + +/* + * Maximum input clock to the PLL before we divide it down by 2 + * although in reality in actual systems this has never been seen to + * be used. + */ +#define QUADFS_NDIV_THRESHOLD 30000000 + +#define PLL_BW_GOODREF (0L) +#define PLL_BW_VBADREF (1L) +#define PLL_BW_BADREF (2L) +#define PLL_BW_VGOODREF (3L) + +#define QUADFS_MAX_CHAN 4 + +struct stm_fs { + unsigned long ndiv; + unsigned long mdiv; + unsigned long pe; + unsigned long sdiv; + unsigned long nsdiv; +}; + +static const struct stm_fs fs216c65_rtbl[] = { + { .mdiv = 0x1f, .pe = 0x0, .sdiv = 0x7, .nsdiv = 0 }, /* 312.5 Khz */ + { .mdiv = 0x17, .pe = 0x25ed, .sdiv = 0x1, .nsdiv = 0 }, /* 27 MHz */ + { .mdiv = 0x1a, .pe = 0x7b36, .sdiv = 0x2, .nsdiv = 1 }, /* 36.87 MHz */ + { .mdiv = 0x13, .pe = 0x0, .sdiv = 0x2, .nsdiv = 1 }, /* 48 MHz */ + { .mdiv = 0x11, .pe = 0x1c72, .sdiv = 0x1, .nsdiv = 1 }, /* 108 MHz */ +}; + +static const struct stm_fs fs432c65_rtbl[] = { + { .mdiv = 0x1f, .pe = 0x0, .sdiv = 0x7, .nsdiv = 0 }, /* 625 Khz */ + { .mdiv = 0x13, .pe = 0x777c, .sdiv = 0x4, .nsdiv = 1 }, /* 25.175 MHz */ + { .mdiv = 0x19, .pe = 0x4d35, .sdiv = 0x2, .nsdiv = 0 }, /* 25.200 MHz */ + { .mdiv = 0x11, .pe = 0x1c72, .sdiv = 0x4, .nsdiv = 1 }, /* 27.000 MHz */ + { .mdiv = 0x17, .pe = 0x28f5, .sdiv = 0x2, .nsdiv = 0 }, /* 27.027 MHz */ + { .mdiv = 0x16, .pe = 0x3359, .sdiv = 0x2, .nsdiv = 0 }, /* 28.320 MHz */ + { .mdiv = 0x1f, .pe = 0x2083, .sdiv = 0x3, .nsdiv = 1 }, /* 30.240 MHz */ + { .mdiv = 0x1e, .pe = 0x430d, .sdiv = 0x3, .nsdiv = 1 }, /* 31.500 MHz */ + { .mdiv = 0x17, .pe = 0x0, .sdiv = 0x3, .nsdiv = 1 }, /* 40.000 MHz */ + { .mdiv = 0x19, .pe = 0x121a, .sdiv = 0x1, .nsdiv = 0 }, /* 49.500 MHz */ + { .mdiv = 0x13, .pe = 0x6667, .sdiv = 0x3, .nsdiv = 1 }, /* 50.000 MHz */ + { .mdiv = 0x10, .pe = 0x1ee6, .sdiv = 0x3, .nsdiv = 1 }, /* 57.284 MHz */ + { .mdiv = 0x1d, .pe = 0x3b14, .sdiv = 0x2, .nsdiv = 1 }, /* 65.000 MHz */ + { .mdiv = 0x12, .pe = 0x7c65, .sdiv = 0x1, .nsdiv = 0 }, /* 71.000 MHz */ + { .mdiv = 0x19, .pe = 0xecd, .sdiv = 0x2, .nsdiv = 1 }, /* 74.176 MHz */ + { .mdiv = 0x19, .pe = 0x121a, .sdiv = 0x2, .nsdiv = 1 }, /* 74.250 MHz */ + { .mdiv = 0x19, .pe = 0x3334, .sdiv = 0x2, .nsdiv = 1 }, /* 75.000 MHz */ + { .mdiv = 0x18, .pe = 0x5138, .sdiv = 0x2, .nsdiv = 1 }, /* 78.800 MHz */ + { .mdiv = 0x1d, .pe = 0x77d, .sdiv = 0x0, .nsdiv = 0 }, /* 85.500 MHz */ + { .mdiv = 0x1c, .pe = 0x13d5, .sdiv = 0x0, .nsdiv = 0 }, /* 88.750 MHz */ + { .mdiv = 0x11, .pe = 0x1c72, .sdiv = 0x2, .nsdiv = 1 }, /* 108.000 MHz */ + { .mdiv = 0x17, .pe = 0x28f5, .sdiv = 0x0, .nsdiv = 0 }, /* 108.108 MHz */ + { .mdiv = 0x10, .pe = 0x6e26, .sdiv = 0x2, .nsdiv = 1 }, /* 118.963 MHz */ + { .mdiv = 0x15, .pe = 0x3e63, .sdiv = 0x0, .nsdiv = 0 }, /* 119.000 MHz */ + { .mdiv = 0x1c, .pe = 0x471d, .sdiv = 0x1, .nsdiv = 1 }, /* 135.000 MHz */ + { .mdiv = 0x19, .pe = 0xecd, .sdiv = 0x1, .nsdiv = 1 }, /* 148.352 MHz */ + { .mdiv = 0x19, .pe = 0x121a, .sdiv = 0x1, .nsdiv = 1 }, /* 148.500 MHz */ + { .mdiv = 0x19, .pe = 0x121a, .sdiv = 0x0, .nsdiv = 1 }, /* 297 MHz */ +}; + +static const struct stm_fs fs660c32_rtbl[] = { + { .mdiv = 0x14, .pe = 0x376b, .sdiv = 0x4, .nsdiv = 1 }, /* 25.175 MHz */ + { .mdiv = 0x14, .pe = 0x30c3, .sdiv = 0x4, .nsdiv = 1 }, /* 25.200 MHz */ + { .mdiv = 0x10, .pe = 0x71c7, .sdiv = 0x4, .nsdiv = 1 }, /* 27.000 MHz */ + { .mdiv = 0x00, .pe = 0x47af, .sdiv = 0x3, .nsdiv = 0 }, /* 27.027 MHz */ + { .mdiv = 0x0e, .pe = 0x4e1a, .sdiv = 0x4, .nsdiv = 1 }, /* 28.320 MHz */ + { .mdiv = 0x0b, .pe = 0x534d, .sdiv = 0x4, .nsdiv = 1 }, /* 30.240 MHz */ + { .mdiv = 0x17, .pe = 0x6fbf, .sdiv = 0x2, .nsdiv = 0 }, /* 31.500 MHz */ + { .mdiv = 0x01, .pe = 0x0, .sdiv = 0x4, .nsdiv = 1 }, /* 40.000 MHz */ + { .mdiv = 0x15, .pe = 0x2aab, .sdiv = 0x3, .nsdiv = 1 }, /* 49.500 MHz */ + { .mdiv = 0x14, .pe = 0x6666, .sdiv = 0x3, .nsdiv = 1 }, /* 50.000 MHz */ + { .mdiv = 0x1d, .pe = 0x395f, .sdiv = 0x1, .nsdiv = 0 }, /* 57.284 MHz */ + { .mdiv = 0x08, .pe = 0x4ec5, .sdiv = 0x3, .nsdiv = 1 }, /* 65.000 MHz */ + { .mdiv = 0x05, .pe = 0x1770, .sdiv = 0x3, .nsdiv = 1 }, /* 71.000 MHz */ + { .mdiv = 0x03, .pe = 0x4ba7, .sdiv = 0x3, .nsdiv = 1 }, /* 74.176 MHz */ + { .mdiv = 0x0f, .pe = 0x3426, .sdiv = 0x1, .nsdiv = 0 }, /* 74.250 MHz */ + { .mdiv = 0x0e, .pe = 0x7777, .sdiv = 0x1, .nsdiv = 0 }, /* 75.000 MHz */ + { .mdiv = 0x01, .pe = 0x4053, .sdiv = 0x3, .nsdiv = 1 }, /* 78.800 MHz */ + { .mdiv = 0x09, .pe = 0x15b5, .sdiv = 0x1, .nsdiv = 0 }, /* 85.500 MHz */ + { .mdiv = 0x1b, .pe = 0x3f19, .sdiv = 0x2, .nsdiv = 1 }, /* 88.750 MHz */ + { .mdiv = 0x10, .pe = 0x71c7, .sdiv = 0x2, .nsdiv = 1 }, /* 108.000 MHz */ + { .mdiv = 0x00, .pe = 0x47af, .sdiv = 0x1, .nsdiv = 0 }, /* 108.108 MHz */ + { .mdiv = 0x0c, .pe = 0x3118, .sdiv = 0x2, .nsdiv = 1 }, /* 118.963 MHz */ + { .mdiv = 0x0c, .pe = 0x2f54, .sdiv = 0x2, .nsdiv = 1 }, /* 119.000 MHz */ + { .mdiv = 0x07, .pe = 0xe39, .sdiv = 0x2, .nsdiv = 1 }, /* 135.000 MHz */ + { .mdiv = 0x03, .pe = 0x4ba7, .sdiv = 0x2, .nsdiv = 1 }, /* 148.352 MHz */ + { .mdiv = 0x0f, .pe = 0x3426, .sdiv = 0x0, .nsdiv = 0 }, /* 148.500 MHz */ + { .mdiv = 0x03, .pe = 0x4ba7, .sdiv = 0x1, .nsdiv = 1 }, /* 296.704 MHz */ + { .mdiv = 0x03, .pe = 0x471c, .sdiv = 0x1, .nsdiv = 1 }, /* 297.000 MHz */ + { .mdiv = 0x00, .pe = 0x295f, .sdiv = 0x1, .nsdiv = 1 }, /* 326.700 MHz */ + { .mdiv = 0x1f, .pe = 0x3633, .sdiv = 0x0, .nsdiv = 1 }, /* 333.000 MHz */ + { .mdiv = 0x1c, .pe = 0x0, .sdiv = 0x0, .nsdiv = 1 }, /* 352.000 Mhz */ +}; + +struct clkgen_quadfs_data { + bool reset_present; + bool bwfilter_present; + bool lockstatus_present; + bool powerup_polarity; + bool standby_polarity; + bool nsdiv_present; + bool nrst_present; + struct clkgen_field ndiv; + struct clkgen_field ref_bw; + struct clkgen_field nreset; + struct clkgen_field npda; + struct clkgen_field lock_status; + + struct clkgen_field nrst[QUADFS_MAX_CHAN]; + struct clkgen_field nsb[QUADFS_MAX_CHAN]; + struct clkgen_field en[QUADFS_MAX_CHAN]; + struct clkgen_field mdiv[QUADFS_MAX_CHAN]; + struct clkgen_field pe[QUADFS_MAX_CHAN]; + struct clkgen_field sdiv[QUADFS_MAX_CHAN]; + struct clkgen_field nsdiv[QUADFS_MAX_CHAN]; + + const struct clk_ops *pll_ops; + const struct stm_fs *rtbl; + u8 rtbl_cnt; + int (*get_rate)(unsigned long , const struct stm_fs *, + unsigned long *); +}; + +static const struct clk_ops st_quadfs_pll_c65_ops; +static const struct clk_ops st_quadfs_pll_c32_ops; +static const struct clk_ops st_quadfs_fs216c65_ops; +static const struct clk_ops st_quadfs_fs432c65_ops; +static const struct clk_ops st_quadfs_fs660c32_ops; + +static int clk_fs216c65_get_rate(unsigned long, const struct stm_fs *, + unsigned long *); +static int clk_fs432c65_get_rate(unsigned long, const struct stm_fs *, + unsigned long *); +static int clk_fs660c32_dig_get_rate(unsigned long, const struct stm_fs *, + unsigned long *); +/* + * Values for all of the standalone instances of this clock + * generator found in STiH415 and STiH416 SYSCFG register banks. Note + * that the individual channel standby control bits (nsb) are in the + * first register along with the PLL control bits. + */ +static const struct clkgen_quadfs_data st_fs216c65_416 = { + /* 416 specific */ + .npda = CLKGEN_FIELD(0x0, 0x1, 14), + .nsb = { CLKGEN_FIELD(0x0, 0x1, 10), + CLKGEN_FIELD(0x0, 0x1, 11), + CLKGEN_FIELD(0x0, 0x1, 12), + CLKGEN_FIELD(0x0, 0x1, 13) }, + .nsdiv_present = true, + .nsdiv = { CLKGEN_FIELD(0x0, 0x1, 18), + CLKGEN_FIELD(0x0, 0x1, 19), + CLKGEN_FIELD(0x0, 0x1, 20), + CLKGEN_FIELD(0x0, 0x1, 21) }, + .mdiv = { CLKGEN_FIELD(0x4, 0x1f, 0), + CLKGEN_FIELD(0x14, 0x1f, 0), + CLKGEN_FIELD(0x24, 0x1f, 0), + CLKGEN_FIELD(0x34, 0x1f, 0) }, + .en = { CLKGEN_FIELD(0x10, 0x1, 0), + CLKGEN_FIELD(0x20, 0x1, 0), + CLKGEN_FIELD(0x30, 0x1, 0), + CLKGEN_FIELD(0x40, 0x1, 0) }, + .ndiv = CLKGEN_FIELD(0x0, 0x1, 15), + .bwfilter_present = true, + .ref_bw = CLKGEN_FIELD(0x0, 0x3, 16), + .pe = { CLKGEN_FIELD(0x8, 0xffff, 0), + CLKGEN_FIELD(0x18, 0xffff, 0), + CLKGEN_FIELD(0x28, 0xffff, 0), + CLKGEN_FIELD(0x38, 0xffff, 0) }, + .sdiv = { CLKGEN_FIELD(0xC, 0x7, 0), + CLKGEN_FIELD(0x1C, 0x7, 0), + CLKGEN_FIELD(0x2C, 0x7, 0), + CLKGEN_FIELD(0x3C, 0x7, 0) }, + .pll_ops = &st_quadfs_pll_c65_ops, + .rtbl = fs216c65_rtbl, + .rtbl_cnt = ARRAY_SIZE(fs216c65_rtbl), + .get_rate = clk_fs216c65_get_rate, +}; + +static const struct clkgen_quadfs_data st_fs432c65_416 = { + .npda = CLKGEN_FIELD(0x0, 0x1, 14), + .nsb = { CLKGEN_FIELD(0x0, 0x1, 10), + CLKGEN_FIELD(0x0, 0x1, 11), + CLKGEN_FIELD(0x0, 0x1, 12), + CLKGEN_FIELD(0x0, 0x1, 13) }, + .nsdiv_present = true, + .nsdiv = { CLKGEN_FIELD(0x0, 0x1, 18), + CLKGEN_FIELD(0x0, 0x1, 19), + CLKGEN_FIELD(0x0, 0x1, 20), + CLKGEN_FIELD(0x0, 0x1, 21) }, + .mdiv = { CLKGEN_FIELD(0x4, 0x1f, 0), + CLKGEN_FIELD(0x14, 0x1f, 0), + CLKGEN_FIELD(0x24, 0x1f, 0), + CLKGEN_FIELD(0x34, 0x1f, 0) }, + .en = { CLKGEN_FIELD(0x10, 0x1, 0), + CLKGEN_FIELD(0x20, 0x1, 0), + CLKGEN_FIELD(0x30, 0x1, 0), + CLKGEN_FIELD(0x40, 0x1, 0) }, + .ndiv = CLKGEN_FIELD(0x0, 0x1, 15), + .bwfilter_present = true, + .ref_bw = CLKGEN_FIELD(0x0, 0x3, 16), + .pe = { CLKGEN_FIELD(0x8, 0xffff, 0), + CLKGEN_FIELD(0x18, 0xffff, 0), + CLKGEN_FIELD(0x28, 0xffff, 0), + CLKGEN_FIELD(0x38, 0xffff, 0) }, + .sdiv = { CLKGEN_FIELD(0xC, 0x7, 0), + CLKGEN_FIELD(0x1C, 0x7, 0), + CLKGEN_FIELD(0x2C, 0x7, 0), + CLKGEN_FIELD(0x3C, 0x7, 0) }, + .pll_ops = &st_quadfs_pll_c65_ops, + .rtbl = fs432c65_rtbl, + .rtbl_cnt = ARRAY_SIZE(fs432c65_rtbl), + .get_rate = clk_fs432c65_get_rate, +}; + +static const struct clkgen_quadfs_data st_fs660c32_E_416 = { + .npda = CLKGEN_FIELD(0x0, 0x1, 14), + .nsb = { CLKGEN_FIELD(0x0, 0x1, 10), + CLKGEN_FIELD(0x0, 0x1, 11), + CLKGEN_FIELD(0x0, 0x1, 12), + CLKGEN_FIELD(0x0, 0x1, 13) }, + .nsdiv_present = true, + .nsdiv = { CLKGEN_FIELD(0x0, 0x1, 18), + CLKGEN_FIELD(0x0, 0x1, 19), + CLKGEN_FIELD(0x0, 0x1, 20), + CLKGEN_FIELD(0x0, 0x1, 21) }, + .mdiv = { CLKGEN_FIELD(0x4, 0x1f, 0), + CLKGEN_FIELD(0x14, 0x1f, 0), + CLKGEN_FIELD(0x24, 0x1f, 0), + CLKGEN_FIELD(0x34, 0x1f, 0) }, + .en = { CLKGEN_FIELD(0x10, 0x1, 0), + CLKGEN_FIELD(0x20, 0x1, 0), + CLKGEN_FIELD(0x30, 0x1, 0), + CLKGEN_FIELD(0x40, 0x1, 0) }, + .ndiv = CLKGEN_FIELD(0x0, 0x7, 15), + .pe = { CLKGEN_FIELD(0x8, 0x7fff, 0), + CLKGEN_FIELD(0x18, 0x7fff, 0), + CLKGEN_FIELD(0x28, 0x7fff, 0), + CLKGEN_FIELD(0x38, 0x7fff, 0) }, + .sdiv = { CLKGEN_FIELD(0xC, 0xf, 0), + CLKGEN_FIELD(0x1C, 0xf, 0), + CLKGEN_FIELD(0x2C, 0xf, 0), + CLKGEN_FIELD(0x3C, 0xf, 0) }, + .lockstatus_present = true, + .lock_status = CLKGEN_FIELD(0xAC, 0x1, 0), + .pll_ops = &st_quadfs_pll_c32_ops, + .rtbl = fs660c32_rtbl, + .rtbl_cnt = ARRAY_SIZE(fs660c32_rtbl), + .get_rate = clk_fs660c32_dig_get_rate, +}; + +static const struct clkgen_quadfs_data st_fs660c32_F_416 = { + .npda = CLKGEN_FIELD(0x0, 0x1, 14), + .nsb = { CLKGEN_FIELD(0x0, 0x1, 10), + CLKGEN_FIELD(0x0, 0x1, 11), + CLKGEN_FIELD(0x0, 0x1, 12), + CLKGEN_FIELD(0x0, 0x1, 13) }, + .nsdiv_present = true, + .nsdiv = { CLKGEN_FIELD(0x0, 0x1, 18), + CLKGEN_FIELD(0x0, 0x1, 19), + CLKGEN_FIELD(0x0, 0x1, 20), + CLKGEN_FIELD(0x0, 0x1, 21) }, + .mdiv = { CLKGEN_FIELD(0x4, 0x1f, 0), + CLKGEN_FIELD(0x14, 0x1f, 0), + CLKGEN_FIELD(0x24, 0x1f, 0), + CLKGEN_FIELD(0x34, 0x1f, 0) }, + .en = { CLKGEN_FIELD(0x10, 0x1, 0), + CLKGEN_FIELD(0x20, 0x1, 0), + CLKGEN_FIELD(0x30, 0x1, 0), + CLKGEN_FIELD(0x40, 0x1, 0) }, + .ndiv = CLKGEN_FIELD(0x0, 0x7, 15), + .pe = { CLKGEN_FIELD(0x8, 0x7fff, 0), + CLKGEN_FIELD(0x18, 0x7fff, 0), + CLKGEN_FIELD(0x28, 0x7fff, 0), + CLKGEN_FIELD(0x38, 0x7fff, 0) }, + .sdiv = { CLKGEN_FIELD(0xC, 0xf, 0), + CLKGEN_FIELD(0x1C, 0xf, 0), + CLKGEN_FIELD(0x2C, 0xf, 0), + CLKGEN_FIELD(0x3C, 0xf, 0) }, + .lockstatus_present = true, + .lock_status = CLKGEN_FIELD(0xEC, 0x1, 0), + .pll_ops = &st_quadfs_pll_c32_ops, + .rtbl = fs660c32_rtbl, + .rtbl_cnt = ARRAY_SIZE(fs660c32_rtbl), + .get_rate = clk_fs660c32_dig_get_rate, +}; + +static const struct clkgen_quadfs_data st_fs660c32_C_407 = { + .nrst_present = true, + .nrst = { CLKGEN_FIELD(0x2f0, 0x1, 0), + CLKGEN_FIELD(0x2f0, 0x1, 1), + CLKGEN_FIELD(0x2f0, 0x1, 2), + CLKGEN_FIELD(0x2f0, 0x1, 3) }, + .npda = CLKGEN_FIELD(0x2f0, 0x1, 12), + .nsb = { CLKGEN_FIELD(0x2f0, 0x1, 8), + CLKGEN_FIELD(0x2f0, 0x1, 9), + CLKGEN_FIELD(0x2f0, 0x1, 10), + CLKGEN_FIELD(0x2f0, 0x1, 11) }, + .nsdiv_present = true, + .nsdiv = { CLKGEN_FIELD(0x304, 0x1, 24), + CLKGEN_FIELD(0x308, 0x1, 24), + CLKGEN_FIELD(0x30c, 0x1, 24), + CLKGEN_FIELD(0x310, 0x1, 24) }, + .mdiv = { CLKGEN_FIELD(0x304, 0x1f, 15), + CLKGEN_FIELD(0x308, 0x1f, 15), + CLKGEN_FIELD(0x30c, 0x1f, 15), + CLKGEN_FIELD(0x310, 0x1f, 15) }, + .en = { CLKGEN_FIELD(0x2fc, 0x1, 0), + CLKGEN_FIELD(0x2fc, 0x1, 1), + CLKGEN_FIELD(0x2fc, 0x1, 2), + CLKGEN_FIELD(0x2fc, 0x1, 3) }, + .ndiv = CLKGEN_FIELD(0x2f4, 0x7, 16), + .pe = { CLKGEN_FIELD(0x304, 0x7fff, 0), + CLKGEN_FIELD(0x308, 0x7fff, 0), + CLKGEN_FIELD(0x30c, 0x7fff, 0), + CLKGEN_FIELD(0x310, 0x7fff, 0) }, + .sdiv = { CLKGEN_FIELD(0x304, 0xf, 20), + CLKGEN_FIELD(0x308, 0xf, 20), + CLKGEN_FIELD(0x30c, 0xf, 20), + CLKGEN_FIELD(0x310, 0xf, 20) }, + .lockstatus_present = true, + .lock_status = CLKGEN_FIELD(0x2A0, 0x1, 24), + .powerup_polarity = 1, + .standby_polarity = 1, + .pll_ops = &st_quadfs_pll_c32_ops, + .rtbl = fs660c32_rtbl, + .rtbl_cnt = ARRAY_SIZE(fs660c32_rtbl), + .get_rate = clk_fs660c32_dig_get_rate, +}; + +static const struct clkgen_quadfs_data st_fs660c32_D_407 = { + .nrst_present = true, + .nrst = { CLKGEN_FIELD(0x2a0, 0x1, 0), + CLKGEN_FIELD(0x2a0, 0x1, 1), + CLKGEN_FIELD(0x2a0, 0x1, 2), + CLKGEN_FIELD(0x2a0, 0x1, 3) }, + .ndiv = CLKGEN_FIELD(0x2a4, 0x7, 16), + .pe = { CLKGEN_FIELD(0x2b4, 0x7fff, 0), + CLKGEN_FIELD(0x2b8, 0x7fff, 0), + CLKGEN_FIELD(0x2bc, 0x7fff, 0), + CLKGEN_FIELD(0x2c0, 0x7fff, 0) }, + .sdiv = { CLKGEN_FIELD(0x2b4, 0xf, 20), + CLKGEN_FIELD(0x2b8, 0xf, 20), + CLKGEN_FIELD(0x2bc, 0xf, 20), + CLKGEN_FIELD(0x2c0, 0xf, 20) }, + .npda = CLKGEN_FIELD(0x2a0, 0x1, 12), + .nsb = { CLKGEN_FIELD(0x2a0, 0x1, 8), + CLKGEN_FIELD(0x2a0, 0x1, 9), + CLKGEN_FIELD(0x2a0, 0x1, 10), + CLKGEN_FIELD(0x2a0, 0x1, 11) }, + .nsdiv_present = true, + .nsdiv = { CLKGEN_FIELD(0x2b4, 0x1, 24), + CLKGEN_FIELD(0x2b8, 0x1, 24), + CLKGEN_FIELD(0x2bc, 0x1, 24), + CLKGEN_FIELD(0x2c0, 0x1, 24) }, + .mdiv = { CLKGEN_FIELD(0x2b4, 0x1f, 15), + CLKGEN_FIELD(0x2b8, 0x1f, 15), + CLKGEN_FIELD(0x2bc, 0x1f, 15), + CLKGEN_FIELD(0x2c0, 0x1f, 15) }, + .en = { CLKGEN_FIELD(0x2ac, 0x1, 0), + CLKGEN_FIELD(0x2ac, 0x1, 1), + CLKGEN_FIELD(0x2ac, 0x1, 2), + CLKGEN_FIELD(0x2ac, 0x1, 3) }, + .lockstatus_present = true, + .lock_status = CLKGEN_FIELD(0x2A0, 0x1, 24), + .powerup_polarity = 1, + .standby_polarity = 1, + .pll_ops = &st_quadfs_pll_c32_ops, + .rtbl = fs660c32_rtbl, + .rtbl_cnt = ARRAY_SIZE(fs660c32_rtbl), + .get_rate = clk_fs660c32_dig_get_rate,}; + +/** + * DOC: A Frequency Synthesizer that multiples its input clock by a fixed factor + * + * Traits of this clock: + * prepare - clk_(un)prepare only ensures parent is (un)prepared + * enable - clk_enable and clk_disable are functional & control the Fsyn + * rate - inherits rate from parent. set_rate/round_rate/recalc_rate + * parent - fixed parent. No clk_set_parent support + */ + +/** + * struct st_clk_quadfs_pll - A pll which outputs a fixed multiplier of + * its parent clock, found inside a type of + * ST quad channel frequency synthesizer block + * + * @hw: handle between common and hardware-specific interfaces. + * @ndiv: regmap field for the ndiv control. + * @regs_base: base address of the configuration registers. + * @lock: spinlock. + * + */ +struct st_clk_quadfs_pll { + struct clk_hw hw; + void __iomem *regs_base; + spinlock_t *lock; + struct clkgen_quadfs_data *data; + u32 ndiv; +}; + +#define to_quadfs_pll(_hw) container_of(_hw, struct st_clk_quadfs_pll, hw) + +static int quadfs_pll_enable(struct clk_hw *hw) +{ + struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw); + unsigned long flags = 0, timeout = jiffies + msecs_to_jiffies(10); + + if (pll->lock) + spin_lock_irqsave(pll->lock, flags); + + /* + * Bring block out of reset if we have reset control. + */ + if (pll->data->reset_present) + CLKGEN_WRITE(pll, nreset, 1); + + /* + * Use a fixed input clock noise bandwidth filter for the moment + */ + if (pll->data->bwfilter_present) + CLKGEN_WRITE(pll, ref_bw, PLL_BW_GOODREF); + + + CLKGEN_WRITE(pll, ndiv, pll->ndiv); + + /* + * Power up the PLL + */ + CLKGEN_WRITE(pll, npda, !pll->data->powerup_polarity); + + if (pll->lock) + spin_unlock_irqrestore(pll->lock, flags); + + if (pll->data->lockstatus_present) + while (!CLKGEN_READ(pll, lock_status)) { + if (time_after(jiffies, timeout)) + return -ETIMEDOUT; + cpu_relax(); + } + + return 0; +} + +static void quadfs_pll_disable(struct clk_hw *hw) +{ + struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw); + unsigned long flags = 0; + + if (pll->lock) + spin_lock_irqsave(pll->lock, flags); + + /* + * Powerdown the PLL and then put block into soft reset if we have + * reset control. + */ + CLKGEN_WRITE(pll, npda, pll->data->powerup_polarity); + + if (pll->data->reset_present) + CLKGEN_WRITE(pll, nreset, 0); + + if (pll->lock) + spin_unlock_irqrestore(pll->lock, flags); +} + +static int quadfs_pll_is_enabled(struct clk_hw *hw) +{ + struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw); + u32 npda = CLKGEN_READ(pll, npda); + + return !!npda; +} + +int clk_fs660c32_vco_get_rate(unsigned long input, struct stm_fs *fs, + unsigned long *rate) +{ + unsigned long nd = fs->ndiv + 16; /* ndiv value */ + + *rate = input * nd; + + return 0; +} + +static unsigned long quadfs_pll_fs660c32_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw); + unsigned long rate = 0; + struct stm_fs params; + + params.ndiv = CLKGEN_READ(pll, ndiv); + if (clk_fs660c32_vco_get_rate(parent_rate, ¶ms, &rate)) + pr_err("%s:%s error calculating rate\n", + __clk_get_name(hw->clk), __func__); + + pll->ndiv = params.ndiv; + + return rate; +} + +int clk_fs660c32_vco_get_params(unsigned long input, + unsigned long output, struct stm_fs *fs) +{ +/* Formula + VCO frequency = (fin x ndiv) / pdiv + ndiv = VCOfreq * pdiv / fin + */ + unsigned long pdiv = 1, n; + + /* Output clock range: 384Mhz to 660Mhz */ + if (output < 384000000 || output > 660000000) + return -EINVAL; + + if (input > 40000000) + /* This means that PDIV would be 2 instead of 1. + Not supported today. */ + return -EINVAL; + + input /= 1000; + output /= 1000; + + n = output * pdiv / input; + if (n < 16) + n = 16; + fs->ndiv = n - 16; /* Converting formula value to reg value */ + + return 0; +} + +static long quadfs_pll_fs660c32_round_rate(struct clk_hw *hw, unsigned long rate + , unsigned long *prate) +{ + struct stm_fs params; + + if (!clk_fs660c32_vco_get_params(*prate, rate, ¶ms)) + clk_fs660c32_vco_get_rate(*prate, ¶ms, &rate); + + pr_debug("%s: %s new rate %ld [sdiv=0x%x,md=0x%x,pe=0x%x,nsdiv3=%u]\n", + __func__, __clk_get_name(hw->clk), + rate, (unsigned int)params.sdiv, + (unsigned int)params.mdiv, + (unsigned int)params.pe, (unsigned int)params.nsdiv); + + return rate; +} + +static int quadfs_pll_fs660c32_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw); + struct stm_fs params; + long hwrate = 0; + unsigned long flags = 0; + + if (!rate || !parent_rate) + return -EINVAL; + + if (!clk_fs660c32_vco_get_params(parent_rate, rate, ¶ms)) + clk_fs660c32_vco_get_rate(parent_rate, ¶ms, &hwrate); + + pr_debug("%s: %s new rate %ld [ndiv=0x%x]\n", + __func__, __clk_get_name(hw->clk), + hwrate, (unsigned int)params.ndiv); + + if (!hwrate) + return -EINVAL; + + pll->ndiv = params.ndiv; + + if (pll->lock) + spin_lock_irqsave(pll->lock, flags); + + CLKGEN_WRITE(pll, ndiv, pll->ndiv); + + if (pll->lock) + spin_unlock_irqrestore(pll->lock, flags); + + return 0; +} + +static const struct clk_ops st_quadfs_pll_c65_ops = { + .enable = quadfs_pll_enable, + .disable = quadfs_pll_disable, + .is_enabled = quadfs_pll_is_enabled, +}; + +static const struct clk_ops st_quadfs_pll_c32_ops = { + .enable = quadfs_pll_enable, + .disable = quadfs_pll_disable, + .is_enabled = quadfs_pll_is_enabled, + .recalc_rate = quadfs_pll_fs660c32_recalc_rate, + .round_rate = quadfs_pll_fs660c32_round_rate, + .set_rate = quadfs_pll_fs660c32_set_rate, +}; + +static struct clk * __init st_clk_register_quadfs_pll( + const char *name, const char *parent_name, + struct clkgen_quadfs_data *quadfs, void __iomem *reg, + spinlock_t *lock) +{ + struct st_clk_quadfs_pll *pll; + struct clk *clk; + struct clk_init_data init; + + /* + * Sanity check required pointers. + */ + if (WARN_ON(!name || !parent_name)) + return ERR_PTR(-EINVAL); + + pll = kzalloc(sizeof(*pll), GFP_KERNEL); + if (!pll) + return ERR_PTR(-ENOMEM); + + init.name = name; + init.ops = quadfs->pll_ops; + init.flags = CLK_IS_BASIC; + init.parent_names = &parent_name; + init.num_parents = 1; + + pll->data = quadfs; + pll->regs_base = reg; + pll->lock = lock; + pll->hw.init = &init; + + clk = clk_register(NULL, &pll->hw); + + if (IS_ERR(clk)) + kfree(pll); + + return clk; +} + +/** + * DOC: A digital frequency synthesizer + * + * Traits of this clock: + * prepare - clk_(un)prepare only ensures parent is (un)prepared + * enable - clk_enable and clk_disable are functional + * rate - set rate is functional + * parent - fixed parent. No clk_set_parent support + */ + +/** + * struct st_clk_quadfs_fsynth - One clock output from a four channel digital + * frequency synthesizer (fsynth) block. + * + * @hw: handle between common and hardware-specific interfaces + * + * @nsb: regmap field in the output control register for the digital + * standby of this fsynth channel. This control is active low so + * the channel is in standby when the control bit is cleared. + * + * @nsdiv: regmap field in the output control register for + * for the optional divide by 3 of this fsynth channel. This control + * is active low so the divide by 3 is active when the control bit is + * cleared and the divide is bypassed when the bit is set. + */ +struct st_clk_quadfs_fsynth { + struct clk_hw hw; + void __iomem *regs_base; + spinlock_t *lock; + struct clkgen_quadfs_data *data; + + u32 chan; + /* + * Cached hardware values from set_rate so we can program the + * hardware in enable. There are two reasons for this: + * + * 1. The registers may not be writable until the parent has been + * enabled. + * + * 2. It restores the clock rate when a driver does an enable + * on PM restore, after a suspend to RAM has lost the hardware + * setup. + */ + u32 md; + u32 pe; + u32 sdiv; + u32 nsdiv; +}; + +#define to_quadfs_fsynth(_hw) \ + container_of(_hw, struct st_clk_quadfs_fsynth, hw) + +static void quadfs_fsynth_program_enable(struct st_clk_quadfs_fsynth *fs) +{ + /* + * Pulse the program enable register lsb to make the hardware take + * notice of the new md/pe values with a glitchless transition. + */ + CLKGEN_WRITE(fs, en[fs->chan], 1); + CLKGEN_WRITE(fs, en[fs->chan], 0); +} + +static void quadfs_fsynth_program_rate(struct st_clk_quadfs_fsynth *fs) +{ + unsigned long flags = 0; + + /* + * Ensure the md/pe parameters are ignored while we are + * reprogramming them so we can get a glitchless change + * when fine tuning the speed of a running clock. + */ + CLKGEN_WRITE(fs, en[fs->chan], 0); + + CLKGEN_WRITE(fs, mdiv[fs->chan], fs->md); + CLKGEN_WRITE(fs, pe[fs->chan], fs->pe); + CLKGEN_WRITE(fs, sdiv[fs->chan], fs->sdiv); + + if (fs->lock) + spin_lock_irqsave(fs->lock, flags); + + if (fs->data->nsdiv_present) + CLKGEN_WRITE(fs, nsdiv[fs->chan], fs->nsdiv); + + if (fs->lock) + spin_unlock_irqrestore(fs->lock, flags); +} + +static int quadfs_fsynth_enable(struct clk_hw *hw) +{ + struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw); + unsigned long flags = 0; + + pr_debug("%s: %s\n", __func__, __clk_get_name(hw->clk)); + + quadfs_fsynth_program_rate(fs); + + if (fs->lock) + spin_lock_irqsave(fs->lock, flags); + + CLKGEN_WRITE(fs, nsb[fs->chan], !fs->data->standby_polarity); + + if (fs->data->nrst_present) + CLKGEN_WRITE(fs, nrst[fs->chan], 0); + + if (fs->lock) + spin_unlock_irqrestore(fs->lock, flags); + + quadfs_fsynth_program_enable(fs); + + return 0; +} + +static void quadfs_fsynth_disable(struct clk_hw *hw) +{ + struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw); + unsigned long flags = 0; + + pr_debug("%s: %s\n", __func__, __clk_get_name(hw->clk)); + + if (fs->lock) + spin_lock_irqsave(fs->lock, flags); + + CLKGEN_WRITE(fs, nsb[fs->chan], !fs->data->standby_polarity); + + if (fs->lock) + spin_unlock_irqrestore(fs->lock, flags); +} + +static int quadfs_fsynth_is_enabled(struct clk_hw *hw) +{ + struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw); + u32 nsb = CLKGEN_READ(fs, nsb[fs->chan]); + + pr_debug("%s: %s enable bit = 0x%x\n", + __func__, __clk_get_name(hw->clk), nsb); + + return fs->data->standby_polarity ? !nsb : !!nsb; +} + +#define P15 (uint64_t)(1 << 15) + +static int clk_fs216c65_get_rate(unsigned long input, const struct stm_fs *fs, + unsigned long *rate) +{ + uint64_t res; + unsigned long ns; + unsigned long nd = 8; /* ndiv stuck at 0 => val = 8 */ + unsigned long s; + long m; + + m = fs->mdiv - 32; + s = 1 << (fs->sdiv + 1); + ns = (fs->nsdiv ? 1 : 3); + + res = (uint64_t)(s * ns * P15 * (uint64_t)(m + 33)); + res = res - (s * ns * fs->pe); + *rate = div64_u64(P15 * nd * input * 32, res); + + return 0; +} + +static int clk_fs432c65_get_rate(unsigned long input, const struct stm_fs *fs, + unsigned long *rate) +{ + uint64_t res; + unsigned long nd = 16; /* ndiv value; stuck at 0 (30Mhz input) */ + long m; + unsigned long sd; + unsigned long ns; + + m = fs->mdiv - 32; + sd = 1 << (fs->sdiv + 1); + ns = (fs->nsdiv ? 1 : 3); + + res = (uint64_t)(sd * ns * P15 * (uint64_t)(m + 33)); + res = res - (sd * ns * fs->pe); + *rate = div64_u64(P15 * nd * input * 32, res); + + return 0; +} + +#define P20 (uint64_t)(1 << 20) + +static int clk_fs660c32_dig_get_rate(unsigned long input, + const struct stm_fs *fs, unsigned long *rate) +{ + unsigned long s = (1 << fs->sdiv); + unsigned long ns; + uint64_t res; + + /* + * 'nsdiv' is a register value ('BIN') which is translated + * to a decimal value according to following rules. + * + * nsdiv ns.dec + * 0 3 + * 1 1 + */ + ns = (fs->nsdiv == 1) ? 1 : 3; + + res = (P20 * (32 + fs->mdiv) + 32 * fs->pe) * s * ns; + *rate = (unsigned long)div64_u64(input * P20 * 32, res); + + return 0; +} + +static int quadfs_fsynt_get_hw_value_for_recalc(struct st_clk_quadfs_fsynth *fs, + struct stm_fs *params) +{ + /* + * Get the initial hardware values for recalc_rate + */ + params->mdiv = CLKGEN_READ(fs, mdiv[fs->chan]); + params->pe = CLKGEN_READ(fs, pe[fs->chan]); + params->sdiv = CLKGEN_READ(fs, sdiv[fs->chan]); + + if (fs->data->nsdiv_present) + params->nsdiv = CLKGEN_READ(fs, nsdiv[fs->chan]); + else + params->nsdiv = 1; + + /* + * If All are NULL then assume no clock rate is programmed. + */ + if (!params->mdiv && !params->pe && !params->sdiv) + return 1; + + fs->md = params->mdiv; + fs->pe = params->pe; + fs->sdiv = params->sdiv; + fs->nsdiv = params->nsdiv; + + return 0; +} + +static long quadfs_find_best_rate(struct clk_hw *hw, unsigned long drate, + unsigned long prate, struct stm_fs *params) +{ + struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw); + int (*clk_fs_get_rate)(unsigned long , + const struct stm_fs *, unsigned long *); + struct stm_fs prev_params; + unsigned long prev_rate, rate = 0; + unsigned long diff_rate, prev_diff_rate = ~0; + int index; + + clk_fs_get_rate = fs->data->get_rate; + + for (index = 0; index < fs->data->rtbl_cnt; index++) { + prev_rate = rate; + + *params = fs->data->rtbl[index]; + prev_params = *params; + + clk_fs_get_rate(prate, &fs->data->rtbl[index], &rate); + + diff_rate = abs(drate - rate); + + if (diff_rate > prev_diff_rate) { + rate = prev_rate; + *params = prev_params; + break; + } + + prev_diff_rate = diff_rate; + + if (drate == rate) + return rate; + } + + + if (index == fs->data->rtbl_cnt) + *params = prev_params; + + return rate; +} + +static unsigned long quadfs_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw); + unsigned long rate = 0; + struct stm_fs params; + int (*clk_fs_get_rate)(unsigned long , + const struct stm_fs *, unsigned long *); + + clk_fs_get_rate = fs->data->get_rate; + + if (quadfs_fsynt_get_hw_value_for_recalc(fs, ¶ms)) + return 0; + + if (clk_fs_get_rate(parent_rate, ¶ms, &rate)) { + pr_err("%s:%s error calculating rate\n", + __clk_get_name(hw->clk), __func__); + } + + pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate); + + return rate; +} + +static long quadfs_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + struct stm_fs params; + + rate = quadfs_find_best_rate(hw, rate, *prate, ¶ms); + + pr_debug("%s: %s new rate %ld [sdiv=0x%x,md=0x%x,pe=0x%x,nsdiv3=%u]\n", + __func__, __clk_get_name(hw->clk), + rate, (unsigned int)params.sdiv, (unsigned int)params.mdiv, + (unsigned int)params.pe, (unsigned int)params.nsdiv); + + return rate; +} + + +static void quadfs_program_and_enable(struct st_clk_quadfs_fsynth *fs, + struct stm_fs *params) +{ + fs->md = params->mdiv; + fs->pe = params->pe; + fs->sdiv = params->sdiv; + fs->nsdiv = params->nsdiv; + + /* + * In some integrations you can only change the fsynth programming when + * the parent entity containing it is enabled. + */ + quadfs_fsynth_program_rate(fs); + quadfs_fsynth_program_enable(fs); +} + +static int quadfs_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw); + struct stm_fs params; + long hwrate; + int uninitialized_var(i); + + if (!rate || !parent_rate) + return -EINVAL; + + memset(¶ms, 0, sizeof(struct stm_fs)); + + hwrate = quadfs_find_best_rate(hw, rate, parent_rate, ¶ms); + if (!hwrate) + return -EINVAL; + + quadfs_program_and_enable(fs, ¶ms); + + return 0; +} + + + +static const struct clk_ops st_quadfs_ops = { + .enable = quadfs_fsynth_enable, + .disable = quadfs_fsynth_disable, + .is_enabled = quadfs_fsynth_is_enabled, + .round_rate = quadfs_round_rate, + .set_rate = quadfs_set_rate, + .recalc_rate = quadfs_recalc_rate, +}; + +static struct clk * __init st_clk_register_quadfs_fsynth( + const char *name, const char *parent_name, + struct clkgen_quadfs_data *quadfs, void __iomem *reg, u32 chan, + spinlock_t *lock) +{ + struct st_clk_quadfs_fsynth *fs; + struct clk *clk; + struct clk_init_data init; + + /* + * Sanity check required pointers, note that nsdiv3 is optional. + */ + if (WARN_ON(!name || !parent_name)) + return ERR_PTR(-EINVAL); + + fs = kzalloc(sizeof(*fs), GFP_KERNEL); + if (!fs) + return ERR_PTR(-ENOMEM); + + init.name = name; + init.ops = &st_quadfs_ops; + init.flags = CLK_GET_RATE_NOCACHE | CLK_IS_BASIC; + init.parent_names = &parent_name; + init.num_parents = 1; + + fs->data = quadfs; + fs->regs_base = reg; + fs->chan = chan; + fs->lock = lock; + fs->hw.init = &init; + + clk = clk_register(NULL, &fs->hw); + + if (IS_ERR(clk)) + kfree(fs); + + return clk; +} + +static const struct of_device_id quadfs_of_match[] = { + { + .compatible = "st,stih416-quadfs216", + .data = &st_fs216c65_416 + }, + { + .compatible = "st,stih416-quadfs432", + .data = &st_fs432c65_416 + }, + { + .compatible = "st,stih416-quadfs660-E", + .data = &st_fs660c32_E_416 + }, + { + .compatible = "st,stih416-quadfs660-F", + .data = &st_fs660c32_F_416 + }, + { + .compatible = "st,stih407-quadfs660-C", + .data = &st_fs660c32_C_407 + }, + { + .compatible = "st,stih407-quadfs660-D", + .data = &st_fs660c32_D_407 + }, + { + .compatible = "st,stih407-quadfs660-D", + .data = (void *)&st_fs660c32_D_407 + }, + {} +}; + +static void __init st_of_create_quadfs_fsynths( + struct device_node *np, const char *pll_name, + struct clkgen_quadfs_data *quadfs, void __iomem *reg, + spinlock_t *lock) +{ + struct clk_onecell_data *clk_data; + int fschan; + + clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); + if (!clk_data) + return; + + clk_data->clk_num = QUADFS_MAX_CHAN; + clk_data->clks = kzalloc(QUADFS_MAX_CHAN * sizeof(struct clk *), + GFP_KERNEL); + + if (!clk_data->clks) { + kfree(clk_data); + return; + } + + for (fschan = 0; fschan < QUADFS_MAX_CHAN; fschan++) { + struct clk *clk; + const char *clk_name; + + if (of_property_read_string_index(np, "clock-output-names", + fschan, &clk_name)) { + break; + } + + /* + * If we read an empty clock name then the channel is unused + */ + if (*clk_name == '\0') + continue; + + clk = st_clk_register_quadfs_fsynth(clk_name, pll_name, + quadfs, reg, fschan, lock); + + /* + * If there was an error registering this clock output, clean + * up and move on to the next one. + */ + if (!IS_ERR(clk)) { + clk_data->clks[fschan] = clk; + pr_debug("%s: parent %s rate %u\n", + __clk_get_name(clk), + __clk_get_name(clk_get_parent(clk)), + (unsigned int)clk_get_rate(clk)); + } + } + + of_clk_add_provider(np, of_clk_src_onecell_get, clk_data); +} + +static void __init st_of_quadfs_setup(struct device_node *np) +{ + const struct of_device_id *match; + struct clk *clk; + const char *pll_name, *clk_parent_name; + void __iomem *reg; + spinlock_t *lock; + + match = of_match_node(quadfs_of_match, np); + if (WARN_ON(!match)) + return; + + reg = of_iomap(np, 0); + if (!reg) + return; + + clk_parent_name = of_clk_get_parent_name(np, 0); + if (!clk_parent_name) + return; + + pll_name = kasprintf(GFP_KERNEL, "%s.pll", np->name); + if (!pll_name) + return; + + lock = kzalloc(sizeof(*lock), GFP_KERNEL); + if (!lock) + goto err_exit; + + spin_lock_init(lock); + + clk = st_clk_register_quadfs_pll(pll_name, clk_parent_name, + (struct clkgen_quadfs_data *) match->data, reg, lock); + if (IS_ERR(clk)) + goto err_exit; + else + pr_debug("%s: parent %s rate %u\n", + __clk_get_name(clk), + __clk_get_name(clk_get_parent(clk)), + (unsigned int)clk_get_rate(clk)); + + st_of_create_quadfs_fsynths(np, pll_name, + (struct clkgen_quadfs_data *)match->data, + reg, lock); + +err_exit: + kfree(pll_name); /* No longer need local copy of the PLL name */ +} +CLK_OF_DECLARE(quadfs, "st,quadfs", st_of_quadfs_setup); diff --git a/kernel/drivers/clk/st/clkgen-mux.c b/kernel/drivers/clk/st/clkgen-mux.c new file mode 100644 index 000000000..fdcff10f6 --- /dev/null +++ b/kernel/drivers/clk/st/clkgen-mux.c @@ -0,0 +1,830 @@ +/* + * clkgen-mux.c: ST GEN-MUX Clock driver + * + * Copyright (C) 2014 STMicroelectronics (R&D) Limited + * + * Authors: Stephen Gallimore <stephen.gallimore@st.com> + * Pankaj Dev <pankaj.dev@st.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#include <linux/slab.h> +#include <linux/of_address.h> +#include <linux/clk-provider.h> + +static DEFINE_SPINLOCK(clkgena_divmux_lock); +static DEFINE_SPINLOCK(clkgenf_lock); + +static const char ** __init clkgen_mux_get_parents(struct device_node *np, + int *num_parents) +{ + const char **parents; + int nparents, i; + + nparents = of_count_phandle_with_args(np, "clocks", "#clock-cells"); + if (WARN_ON(nparents <= 0)) + return ERR_PTR(-EINVAL); + + parents = kzalloc(nparents * sizeof(const char *), GFP_KERNEL); + if (!parents) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < nparents; i++) + parents[i] = of_clk_get_parent_name(np, i); + + *num_parents = nparents; + return parents; +} + +/** + * DOC: Clock mux with a programmable divider on each of its three inputs. + * The mux has an input setting which effectively gates its output. + * + * Traits of this clock: + * prepare - clk_(un)prepare only ensures parent is (un)prepared + * enable - clk_enable and clk_disable are functional & control gating + * rate - set rate is supported + * parent - set/get parent + */ + +#define NUM_INPUTS 3 + +struct clkgena_divmux { + struct clk_hw hw; + /* Subclassed mux and divider structures */ + struct clk_mux mux; + struct clk_divider div[NUM_INPUTS]; + /* Enable/running feedback register bits for each input */ + void __iomem *feedback_reg[NUM_INPUTS]; + int feedback_bit_idx; + + u8 muxsel; +}; + +#define to_clkgena_divmux(_hw) container_of(_hw, struct clkgena_divmux, hw) + +struct clkgena_divmux_data { + int num_outputs; + int mux_offset; + int mux_offset2; + int mux_start_bit; + int div_offsets[NUM_INPUTS]; + int fb_offsets[NUM_INPUTS]; + int fb_start_bit_idx; +}; + +#define CKGAX_CLKOPSRC_SWITCH_OFF 0x3 + +static int clkgena_divmux_is_running(struct clkgena_divmux *mux) +{ + u32 regval = readl(mux->feedback_reg[mux->muxsel]); + u32 running = regval & BIT(mux->feedback_bit_idx); + return !!running; +} + +static int clkgena_divmux_enable(struct clk_hw *hw) +{ + struct clkgena_divmux *genamux = to_clkgena_divmux(hw); + struct clk_hw *mux_hw = &genamux->mux.hw; + unsigned long timeout; + int ret = 0; + + __clk_hw_set_clk(mux_hw, hw); + + ret = clk_mux_ops.set_parent(mux_hw, genamux->muxsel); + if (ret) + return ret; + + timeout = jiffies + msecs_to_jiffies(10); + + while (!clkgena_divmux_is_running(genamux)) { + if (time_after(jiffies, timeout)) + return -ETIMEDOUT; + cpu_relax(); + } + + return 0; +} + +static void clkgena_divmux_disable(struct clk_hw *hw) +{ + struct clkgena_divmux *genamux = to_clkgena_divmux(hw); + struct clk_hw *mux_hw = &genamux->mux.hw; + + __clk_hw_set_clk(mux_hw, hw); + + clk_mux_ops.set_parent(mux_hw, CKGAX_CLKOPSRC_SWITCH_OFF); +} + +static int clkgena_divmux_is_enabled(struct clk_hw *hw) +{ + struct clkgena_divmux *genamux = to_clkgena_divmux(hw); + struct clk_hw *mux_hw = &genamux->mux.hw; + + __clk_hw_set_clk(mux_hw, hw); + + return (s8)clk_mux_ops.get_parent(mux_hw) > 0; +} + +u8 clkgena_divmux_get_parent(struct clk_hw *hw) +{ + struct clkgena_divmux *genamux = to_clkgena_divmux(hw); + struct clk_hw *mux_hw = &genamux->mux.hw; + + __clk_hw_set_clk(mux_hw, hw); + + genamux->muxsel = clk_mux_ops.get_parent(mux_hw); + if ((s8)genamux->muxsel < 0) { + pr_debug("%s: %s: Invalid parent, setting to default.\n", + __func__, __clk_get_name(hw->clk)); + genamux->muxsel = 0; + } + + return genamux->muxsel; +} + +static int clkgena_divmux_set_parent(struct clk_hw *hw, u8 index) +{ + struct clkgena_divmux *genamux = to_clkgena_divmux(hw); + + if (index >= CKGAX_CLKOPSRC_SWITCH_OFF) + return -EINVAL; + + genamux->muxsel = index; + + /* + * If the mux is already enabled, call enable directly to set the + * new mux position and wait for it to start running again. Otherwise + * do nothing. + */ + if (clkgena_divmux_is_enabled(hw)) + clkgena_divmux_enable(hw); + + return 0; +} + +unsigned long clkgena_divmux_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clkgena_divmux *genamux = to_clkgena_divmux(hw); + struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw; + + __clk_hw_set_clk(div_hw, hw); + + return clk_divider_ops.recalc_rate(div_hw, parent_rate); +} + +static int clkgena_divmux_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clkgena_divmux *genamux = to_clkgena_divmux(hw); + struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw; + + __clk_hw_set_clk(div_hw, hw); + + return clk_divider_ops.set_rate(div_hw, rate, parent_rate); +} + +static long clkgena_divmux_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + struct clkgena_divmux *genamux = to_clkgena_divmux(hw); + struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw; + + __clk_hw_set_clk(div_hw, hw); + + return clk_divider_ops.round_rate(div_hw, rate, prate); +} + +static const struct clk_ops clkgena_divmux_ops = { + .enable = clkgena_divmux_enable, + .disable = clkgena_divmux_disable, + .is_enabled = clkgena_divmux_is_enabled, + .get_parent = clkgena_divmux_get_parent, + .set_parent = clkgena_divmux_set_parent, + .round_rate = clkgena_divmux_round_rate, + .recalc_rate = clkgena_divmux_recalc_rate, + .set_rate = clkgena_divmux_set_rate, +}; + +/** + * clk_register_genamux - register a genamux clock with the clock framework + */ +struct clk *clk_register_genamux(const char *name, + const char **parent_names, u8 num_parents, + void __iomem *reg, + const struct clkgena_divmux_data *muxdata, + u32 idx) +{ + /* + * Fixed constants across all ClockgenA variants + */ + const int mux_width = 2; + const int divider_width = 5; + struct clkgena_divmux *genamux; + struct clk *clk; + struct clk_init_data init; + int i; + + genamux = kzalloc(sizeof(*genamux), GFP_KERNEL); + if (!genamux) + return ERR_PTR(-ENOMEM); + + init.name = name; + init.ops = &clkgena_divmux_ops; + init.flags = CLK_IS_BASIC; + init.parent_names = parent_names; + init.num_parents = num_parents; + + genamux->mux.lock = &clkgena_divmux_lock; + genamux->mux.mask = BIT(mux_width) - 1; + genamux->mux.shift = muxdata->mux_start_bit + (idx * mux_width); + if (genamux->mux.shift > 31) { + /* + * We have spilled into the second mux register so + * adjust the register address and the bit shift accordingly + */ + genamux->mux.reg = reg + muxdata->mux_offset2; + genamux->mux.shift -= 32; + } else { + genamux->mux.reg = reg + muxdata->mux_offset; + } + + for (i = 0; i < NUM_INPUTS; i++) { + /* + * Divider config for each input + */ + void __iomem *divbase = reg + muxdata->div_offsets[i]; + genamux->div[i].width = divider_width; + genamux->div[i].reg = divbase + (idx * sizeof(u32)); + + /* + * Mux enabled/running feedback register for each input. + */ + genamux->feedback_reg[i] = reg + muxdata->fb_offsets[i]; + } + + genamux->feedback_bit_idx = muxdata->fb_start_bit_idx + idx; + genamux->hw.init = &init; + + clk = clk_register(NULL, &genamux->hw); + if (IS_ERR(clk)) { + kfree(genamux); + goto err; + } + + pr_debug("%s: parent %s rate %lu\n", + __clk_get_name(clk), + __clk_get_name(clk_get_parent(clk)), + clk_get_rate(clk)); +err: + return clk; +} + +static struct clkgena_divmux_data st_divmux_c65hs = { + .num_outputs = 4, + .mux_offset = 0x14, + .mux_start_bit = 0, + .div_offsets = { 0x800, 0x900, 0xb00 }, + .fb_offsets = { 0x18, 0x1c, 0x20 }, + .fb_start_bit_idx = 0, +}; + +static struct clkgena_divmux_data st_divmux_c65ls = { + .num_outputs = 14, + .mux_offset = 0x14, + .mux_offset2 = 0x24, + .mux_start_bit = 8, + .div_offsets = { 0x810, 0xa10, 0xb10 }, + .fb_offsets = { 0x18, 0x1c, 0x20 }, + .fb_start_bit_idx = 4, +}; + +static struct clkgena_divmux_data st_divmux_c32odf0 = { + .num_outputs = 8, + .mux_offset = 0x1c, + .mux_start_bit = 0, + .div_offsets = { 0x800, 0x900, 0xa60 }, + .fb_offsets = { 0x2c, 0x24, 0x28 }, + .fb_start_bit_idx = 0, +}; + +static struct clkgena_divmux_data st_divmux_c32odf1 = { + .num_outputs = 8, + .mux_offset = 0x1c, + .mux_start_bit = 16, + .div_offsets = { 0x820, 0x980, 0xa80 }, + .fb_offsets = { 0x2c, 0x24, 0x28 }, + .fb_start_bit_idx = 8, +}; + +static struct clkgena_divmux_data st_divmux_c32odf2 = { + .num_outputs = 8, + .mux_offset = 0x20, + .mux_start_bit = 0, + .div_offsets = { 0x840, 0xa20, 0xb10 }, + .fb_offsets = { 0x2c, 0x24, 0x28 }, + .fb_start_bit_idx = 16, +}; + +static struct clkgena_divmux_data st_divmux_c32odf3 = { + .num_outputs = 8, + .mux_offset = 0x20, + .mux_start_bit = 16, + .div_offsets = { 0x860, 0xa40, 0xb30 }, + .fb_offsets = { 0x2c, 0x24, 0x28 }, + .fb_start_bit_idx = 24, +}; + +static const struct of_device_id clkgena_divmux_of_match[] = { + { + .compatible = "st,clkgena-divmux-c65-hs", + .data = &st_divmux_c65hs, + }, + { + .compatible = "st,clkgena-divmux-c65-ls", + .data = &st_divmux_c65ls, + }, + { + .compatible = "st,clkgena-divmux-c32-odf0", + .data = &st_divmux_c32odf0, + }, + { + .compatible = "st,clkgena-divmux-c32-odf1", + .data = &st_divmux_c32odf1, + }, + { + .compatible = "st,clkgena-divmux-c32-odf2", + .data = &st_divmux_c32odf2, + }, + { + .compatible = "st,clkgena-divmux-c32-odf3", + .data = &st_divmux_c32odf3, + }, + {} +}; + +static void __iomem * __init clkgen_get_register_base( + struct device_node *np) +{ + struct device_node *pnode; + void __iomem *reg = NULL; + + pnode = of_get_parent(np); + if (!pnode) + return NULL; + + reg = of_iomap(pnode, 0); + + of_node_put(pnode); + return reg; +} + +void __init st_of_clkgena_divmux_setup(struct device_node *np) +{ + const struct of_device_id *match; + const struct clkgena_divmux_data *data; + struct clk_onecell_data *clk_data; + void __iomem *reg; + const char **parents; + int num_parents = 0, i; + + match = of_match_node(clkgena_divmux_of_match, np); + if (WARN_ON(!match)) + return; + + data = (struct clkgena_divmux_data *)match->data; + + reg = clkgen_get_register_base(np); + if (!reg) + return; + + parents = clkgen_mux_get_parents(np, &num_parents); + if (IS_ERR(parents)) + return; + + clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); + if (!clk_data) + goto err; + + clk_data->clk_num = data->num_outputs; + clk_data->clks = kzalloc(clk_data->clk_num * sizeof(struct clk *), + GFP_KERNEL); + + if (!clk_data->clks) + goto err; + + for (i = 0; i < clk_data->clk_num; i++) { + struct clk *clk; + const char *clk_name; + + if (of_property_read_string_index(np, "clock-output-names", + i, &clk_name)) + break; + + /* + * If we read an empty clock name then the output is unused + */ + if (*clk_name == '\0') + continue; + + clk = clk_register_genamux(clk_name, parents, num_parents, + reg, data, i); + + if (IS_ERR(clk)) + goto err; + + clk_data->clks[i] = clk; + } + + kfree(parents); + + of_clk_add_provider(np, of_clk_src_onecell_get, clk_data); + return; +err: + if (clk_data) + kfree(clk_data->clks); + + kfree(clk_data); + kfree(parents); +} +CLK_OF_DECLARE(clkgenadivmux, "st,clkgena-divmux", st_of_clkgena_divmux_setup); + +struct clkgena_prediv_data { + u32 offset; + u8 shift; + struct clk_div_table *table; +}; + +static struct clk_div_table prediv_table16[] = { + { .val = 0, .div = 1 }, + { .val = 1, .div = 16 }, + { .div = 0 }, +}; + +static struct clkgena_prediv_data prediv_c65_data = { + .offset = 0x4c, + .shift = 31, + .table = prediv_table16, +}; + +static struct clkgena_prediv_data prediv_c32_data = { + .offset = 0x50, + .shift = 1, + .table = prediv_table16, +}; + +static const struct of_device_id clkgena_prediv_of_match[] = { + { .compatible = "st,clkgena-prediv-c65", .data = &prediv_c65_data }, + { .compatible = "st,clkgena-prediv-c32", .data = &prediv_c32_data }, + {} +}; + +void __init st_of_clkgena_prediv_setup(struct device_node *np) +{ + const struct of_device_id *match; + void __iomem *reg; + const char *parent_name, *clk_name; + struct clk *clk; + struct clkgena_prediv_data *data; + + match = of_match_node(clkgena_prediv_of_match, np); + if (!match) { + pr_err("%s: No matching data\n", __func__); + return; + } + + data = (struct clkgena_prediv_data *)match->data; + + reg = clkgen_get_register_base(np); + if (!reg) + return; + + parent_name = of_clk_get_parent_name(np, 0); + if (!parent_name) + return; + + if (of_property_read_string_index(np, "clock-output-names", + 0, &clk_name)) + return; + + clk = clk_register_divider_table(NULL, clk_name, parent_name, 0, + reg + data->offset, data->shift, 1, + 0, data->table, NULL); + if (IS_ERR(clk)) + return; + + of_clk_add_provider(np, of_clk_src_simple_get, clk); + pr_debug("%s: parent %s rate %u\n", + __clk_get_name(clk), + __clk_get_name(clk_get_parent(clk)), + (unsigned int)clk_get_rate(clk)); + + return; +} +CLK_OF_DECLARE(clkgenaprediv, "st,clkgena-prediv", st_of_clkgena_prediv_setup); + +struct clkgen_mux_data { + u32 offset; + u8 shift; + u8 width; + spinlock_t *lock; + unsigned long clk_flags; + u8 mux_flags; +}; + +static struct clkgen_mux_data clkgen_mux_c_vcc_hd_416 = { + .offset = 0, + .shift = 0, + .width = 1, +}; + +static struct clkgen_mux_data clkgen_mux_f_vcc_fvdp_416 = { + .offset = 0, + .shift = 0, + .width = 1, +}; + +static struct clkgen_mux_data clkgen_mux_f_vcc_hva_416 = { + .offset = 0, + .shift = 0, + .width = 1, +}; + +static struct clkgen_mux_data clkgen_mux_f_vcc_hd_416 = { + .offset = 0, + .shift = 16, + .width = 1, + .lock = &clkgenf_lock, +}; + +static struct clkgen_mux_data clkgen_mux_c_vcc_sd_416 = { + .offset = 0, + .shift = 17, + .width = 1, + .lock = &clkgenf_lock, +}; + +static struct clkgen_mux_data stih415_a9_mux_data = { + .offset = 0, + .shift = 1, + .width = 2, +}; +static struct clkgen_mux_data stih416_a9_mux_data = { + .offset = 0, + .shift = 0, + .width = 2, +}; +static struct clkgen_mux_data stih407_a9_mux_data = { + .offset = 0x1a4, + .shift = 1, + .width = 2, +}; + +static const struct of_device_id mux_of_match[] = { + { + .compatible = "st,stih416-clkgenc-vcc-hd", + .data = &clkgen_mux_c_vcc_hd_416, + }, + { + .compatible = "st,stih416-clkgenf-vcc-fvdp", + .data = &clkgen_mux_f_vcc_fvdp_416, + }, + { + .compatible = "st,stih416-clkgenf-vcc-hva", + .data = &clkgen_mux_f_vcc_hva_416, + }, + { + .compatible = "st,stih416-clkgenf-vcc-hd", + .data = &clkgen_mux_f_vcc_hd_416, + }, + { + .compatible = "st,stih416-clkgenf-vcc-sd", + .data = &clkgen_mux_c_vcc_sd_416, + }, + { + .compatible = "st,stih415-clkgen-a9-mux", + .data = &stih415_a9_mux_data, + }, + { + .compatible = "st,stih416-clkgen-a9-mux", + .data = &stih416_a9_mux_data, + }, + { + .compatible = "st,stih407-clkgen-a9-mux", + .data = &stih407_a9_mux_data, + }, + {} +}; + +void __init st_of_clkgen_mux_setup(struct device_node *np) +{ + const struct of_device_id *match; + struct clk *clk; + void __iomem *reg; + const char **parents; + int num_parents; + struct clkgen_mux_data *data; + + match = of_match_node(mux_of_match, np); + if (!match) { + pr_err("%s: No matching data\n", __func__); + return; + } + + data = (struct clkgen_mux_data *)match->data; + + reg = of_iomap(np, 0); + if (!reg) { + pr_err("%s: Failed to get base address\n", __func__); + return; + } + + parents = clkgen_mux_get_parents(np, &num_parents); + if (IS_ERR(parents)) { + pr_err("%s: Failed to get parents (%ld)\n", + __func__, PTR_ERR(parents)); + return; + } + + clk = clk_register_mux(NULL, np->name, parents, num_parents, + data->clk_flags | CLK_SET_RATE_PARENT, + reg + data->offset, + data->shift, data->width, data->mux_flags, + data->lock); + if (IS_ERR(clk)) + goto err; + + pr_debug("%s: parent %s rate %u\n", + __clk_get_name(clk), + __clk_get_name(clk_get_parent(clk)), + (unsigned int)clk_get_rate(clk)); + + of_clk_add_provider(np, of_clk_src_simple_get, clk); + +err: + kfree(parents); + + return; +} +CLK_OF_DECLARE(clkgen_mux, "st,clkgen-mux", st_of_clkgen_mux_setup); + +#define VCC_MAX_CHANNELS 16 + +#define VCC_GATE_OFFSET 0x0 +#define VCC_MUX_OFFSET 0x4 +#define VCC_DIV_OFFSET 0x8 + +struct clkgen_vcc_data { + spinlock_t *lock; + unsigned long clk_flags; +}; + +static struct clkgen_vcc_data st_clkgenc_vcc_416 = { + .clk_flags = CLK_SET_RATE_PARENT, +}; + +static struct clkgen_vcc_data st_clkgenf_vcc_416 = { + .lock = &clkgenf_lock, +}; + +static const struct of_device_id vcc_of_match[] = { + { .compatible = "st,stih416-clkgenc", .data = &st_clkgenc_vcc_416 }, + { .compatible = "st,stih416-clkgenf", .data = &st_clkgenf_vcc_416 }, + {} +}; + +void __init st_of_clkgen_vcc_setup(struct device_node *np) +{ + const struct of_device_id *match; + void __iomem *reg; + const char **parents; + int num_parents, i; + struct clk_onecell_data *clk_data; + struct clkgen_vcc_data *data; + + match = of_match_node(vcc_of_match, np); + if (WARN_ON(!match)) + return; + data = (struct clkgen_vcc_data *)match->data; + + reg = of_iomap(np, 0); + if (!reg) + return; + + parents = clkgen_mux_get_parents(np, &num_parents); + if (IS_ERR(parents)) + return; + + clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); + if (!clk_data) + goto err; + + clk_data->clk_num = VCC_MAX_CHANNELS; + clk_data->clks = kzalloc(clk_data->clk_num * sizeof(struct clk *), + GFP_KERNEL); + + if (!clk_data->clks) + goto err; + + for (i = 0; i < clk_data->clk_num; i++) { + struct clk *clk; + const char *clk_name; + struct clk_gate *gate; + struct clk_divider *div; + struct clk_mux *mux; + + if (of_property_read_string_index(np, "clock-output-names", + i, &clk_name)) + break; + + /* + * If we read an empty clock name then the output is unused + */ + if (*clk_name == '\0') + continue; + + gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL); + if (!gate) + break; + + div = kzalloc(sizeof(struct clk_divider), GFP_KERNEL); + if (!div) { + kfree(gate); + break; + } + + mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL); + if (!mux) { + kfree(gate); + kfree(div); + break; + } + + gate->reg = reg + VCC_GATE_OFFSET; + gate->bit_idx = i; + gate->flags = CLK_GATE_SET_TO_DISABLE; + gate->lock = data->lock; + + div->reg = reg + VCC_DIV_OFFSET; + div->shift = 2 * i; + div->width = 2; + div->flags = CLK_DIVIDER_POWER_OF_TWO | + CLK_DIVIDER_ROUND_CLOSEST; + + mux->reg = reg + VCC_MUX_OFFSET; + mux->shift = 2 * i; + mux->mask = 0x3; + + clk = clk_register_composite(NULL, clk_name, parents, + num_parents, + &mux->hw, &clk_mux_ops, + &div->hw, &clk_divider_ops, + &gate->hw, &clk_gate_ops, + data->clk_flags); + if (IS_ERR(clk)) { + kfree(gate); + kfree(div); + kfree(mux); + goto err; + } + + pr_debug("%s: parent %s rate %u\n", + __clk_get_name(clk), + __clk_get_name(clk_get_parent(clk)), + (unsigned int)clk_get_rate(clk)); + + clk_data->clks[i] = clk; + } + + kfree(parents); + + of_clk_add_provider(np, of_clk_src_onecell_get, clk_data); + return; + +err: + for (i = 0; i < clk_data->clk_num; i++) { + struct clk_composite *composite; + + if (!clk_data->clks[i]) + continue; + + composite = container_of(__clk_get_hw(clk_data->clks[i]), + struct clk_composite, hw); + kfree(container_of(composite->gate_hw, struct clk_gate, hw)); + kfree(container_of(composite->rate_hw, struct clk_divider, hw)); + kfree(container_of(composite->mux_hw, struct clk_mux, hw)); + } + + if (clk_data) + kfree(clk_data->clks); + + kfree(clk_data); + kfree(parents); +} +CLK_OF_DECLARE(clkgen_vcc, "st,clkgen-vcc", st_of_clkgen_vcc_setup); diff --git a/kernel/drivers/clk/st/clkgen-pll.c b/kernel/drivers/clk/st/clkgen-pll.c new file mode 100644 index 000000000..d204ba85d --- /dev/null +++ b/kernel/drivers/clk/st/clkgen-pll.c @@ -0,0 +1,763 @@ +/* + * Copyright (C) 2014 STMicroelectronics (R&D) Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +/* + * Authors: + * Stephen Gallimore <stephen.gallimore@st.com>, + * Pankaj Dev <pankaj.dev@st.com>. + */ + +#include <linux/slab.h> +#include <linux/of_address.h> +#include <linux/clk-provider.h> + +#include "clkgen.h" + +static DEFINE_SPINLOCK(clkgena_c32_odf_lock); + +/* + * Common PLL configuration register bits for PLL800 and PLL1600 C65 + */ +#define C65_MDIV_PLL800_MASK (0xff) +#define C65_MDIV_PLL1600_MASK (0x7) +#define C65_NDIV_MASK (0xff) +#define C65_PDIV_MASK (0x7) + +/* + * PLL configuration register bits for PLL3200 C32 + */ +#define C32_NDIV_MASK (0xff) +#define C32_IDF_MASK (0x7) +#define C32_ODF_MASK (0x3f) +#define C32_LDF_MASK (0x7f) + +#define C32_MAX_ODFS (4) + +struct clkgen_pll_data { + struct clkgen_field pdn_status; + struct clkgen_field locked_status; + struct clkgen_field mdiv; + struct clkgen_field ndiv; + struct clkgen_field pdiv; + struct clkgen_field idf; + struct clkgen_field ldf; + unsigned int num_odfs; + struct clkgen_field odf[C32_MAX_ODFS]; + struct clkgen_field odf_gate[C32_MAX_ODFS]; + const struct clk_ops *ops; +}; + +static const struct clk_ops st_pll1600c65_ops; +static const struct clk_ops st_pll800c65_ops; +static const struct clk_ops stm_pll3200c32_ops; +static const struct clk_ops st_pll1200c32_ops; + +static const struct clkgen_pll_data st_pll1600c65_ax = { + .pdn_status = CLKGEN_FIELD(0x0, 0x1, 19), + .locked_status = CLKGEN_FIELD(0x0, 0x1, 31), + .mdiv = CLKGEN_FIELD(0x0, C65_MDIV_PLL1600_MASK, 0), + .ndiv = CLKGEN_FIELD(0x0, C65_NDIV_MASK, 8), + .ops = &st_pll1600c65_ops +}; + +static const struct clkgen_pll_data st_pll800c65_ax = { + .pdn_status = CLKGEN_FIELD(0x0, 0x1, 19), + .locked_status = CLKGEN_FIELD(0x0, 0x1, 31), + .mdiv = CLKGEN_FIELD(0x0, C65_MDIV_PLL800_MASK, 0), + .ndiv = CLKGEN_FIELD(0x0, C65_NDIV_MASK, 8), + .pdiv = CLKGEN_FIELD(0x0, C65_PDIV_MASK, 16), + .ops = &st_pll800c65_ops +}; + +static const struct clkgen_pll_data st_pll3200c32_a1x_0 = { + .pdn_status = CLKGEN_FIELD(0x0, 0x1, 31), + .locked_status = CLKGEN_FIELD(0x4, 0x1, 31), + .ndiv = CLKGEN_FIELD(0x0, C32_NDIV_MASK, 0x0), + .idf = CLKGEN_FIELD(0x4, C32_IDF_MASK, 0x0), + .num_odfs = 4, + .odf = { CLKGEN_FIELD(0x54, C32_ODF_MASK, 4), + CLKGEN_FIELD(0x54, C32_ODF_MASK, 10), + CLKGEN_FIELD(0x54, C32_ODF_MASK, 16), + CLKGEN_FIELD(0x54, C32_ODF_MASK, 22) }, + .odf_gate = { CLKGEN_FIELD(0x54, 0x1, 0), + CLKGEN_FIELD(0x54, 0x1, 1), + CLKGEN_FIELD(0x54, 0x1, 2), + CLKGEN_FIELD(0x54, 0x1, 3) }, + .ops = &stm_pll3200c32_ops, +}; + +static const struct clkgen_pll_data st_pll3200c32_a1x_1 = { + .pdn_status = CLKGEN_FIELD(0xC, 0x1, 31), + .locked_status = CLKGEN_FIELD(0x10, 0x1, 31), + .ndiv = CLKGEN_FIELD(0xC, C32_NDIV_MASK, 0x0), + .idf = CLKGEN_FIELD(0x10, C32_IDF_MASK, 0x0), + .num_odfs = 4, + .odf = { CLKGEN_FIELD(0x58, C32_ODF_MASK, 4), + CLKGEN_FIELD(0x58, C32_ODF_MASK, 10), + CLKGEN_FIELD(0x58, C32_ODF_MASK, 16), + CLKGEN_FIELD(0x58, C32_ODF_MASK, 22) }, + .odf_gate = { CLKGEN_FIELD(0x58, 0x1, 0), + CLKGEN_FIELD(0x58, 0x1, 1), + CLKGEN_FIELD(0x58, 0x1, 2), + CLKGEN_FIELD(0x58, 0x1, 3) }, + .ops = &stm_pll3200c32_ops, +}; + +/* 415 specific */ +static const struct clkgen_pll_data st_pll3200c32_a9_415 = { + .pdn_status = CLKGEN_FIELD(0x0, 0x1, 0), + .locked_status = CLKGEN_FIELD(0x6C, 0x1, 0), + .ndiv = CLKGEN_FIELD(0x0, C32_NDIV_MASK, 9), + .idf = CLKGEN_FIELD(0x0, C32_IDF_MASK, 22), + .num_odfs = 1, + .odf = { CLKGEN_FIELD(0x0, C32_ODF_MASK, 3) }, + .odf_gate = { CLKGEN_FIELD(0x0, 0x1, 28) }, + .ops = &stm_pll3200c32_ops, +}; + +static const struct clkgen_pll_data st_pll3200c32_ddr_415 = { + .pdn_status = CLKGEN_FIELD(0x0, 0x1, 0), + .locked_status = CLKGEN_FIELD(0x100, 0x1, 0), + .ndiv = CLKGEN_FIELD(0x8, C32_NDIV_MASK, 0), + .idf = CLKGEN_FIELD(0x0, C32_IDF_MASK, 25), + .num_odfs = 2, + .odf = { CLKGEN_FIELD(0x8, C32_ODF_MASK, 8), + CLKGEN_FIELD(0x8, C32_ODF_MASK, 14) }, + .odf_gate = { CLKGEN_FIELD(0x4, 0x1, 28), + CLKGEN_FIELD(0x4, 0x1, 29) }, + .ops = &stm_pll3200c32_ops, +}; + +static const struct clkgen_pll_data st_pll1200c32_gpu_415 = { + .pdn_status = CLKGEN_FIELD(0x144, 0x1, 3), + .locked_status = CLKGEN_FIELD(0x168, 0x1, 0), + .ldf = CLKGEN_FIELD(0x0, C32_LDF_MASK, 3), + .idf = CLKGEN_FIELD(0x0, C32_IDF_MASK, 0), + .num_odfs = 0, + .odf = { CLKGEN_FIELD(0x0, C32_ODF_MASK, 10) }, + .ops = &st_pll1200c32_ops, +}; + +/* 416 specific */ +static const struct clkgen_pll_data st_pll3200c32_a9_416 = { + .pdn_status = CLKGEN_FIELD(0x0, 0x1, 0), + .locked_status = CLKGEN_FIELD(0x6C, 0x1, 0), + .ndiv = CLKGEN_FIELD(0x8, C32_NDIV_MASK, 0), + .idf = CLKGEN_FIELD(0x0, C32_IDF_MASK, 25), + .num_odfs = 1, + .odf = { CLKGEN_FIELD(0x8, C32_ODF_MASK, 8) }, + .odf_gate = { CLKGEN_FIELD(0x4, 0x1, 28) }, + .ops = &stm_pll3200c32_ops, +}; + +static const struct clkgen_pll_data st_pll3200c32_ddr_416 = { + .pdn_status = CLKGEN_FIELD(0x0, 0x1, 0), + .locked_status = CLKGEN_FIELD(0x10C, 0x1, 0), + .ndiv = CLKGEN_FIELD(0x8, C32_NDIV_MASK, 0), + .idf = CLKGEN_FIELD(0x0, C32_IDF_MASK, 25), + .num_odfs = 2, + .odf = { CLKGEN_FIELD(0x8, C32_ODF_MASK, 8), + CLKGEN_FIELD(0x8, C32_ODF_MASK, 14) }, + .odf_gate = { CLKGEN_FIELD(0x4, 0x1, 28), + CLKGEN_FIELD(0x4, 0x1, 29) }, + .ops = &stm_pll3200c32_ops, +}; + +static const struct clkgen_pll_data st_pll1200c32_gpu_416 = { + .pdn_status = CLKGEN_FIELD(0x8E4, 0x1, 3), + .locked_status = CLKGEN_FIELD(0x90C, 0x1, 0), + .ldf = CLKGEN_FIELD(0x0, C32_LDF_MASK, 3), + .idf = CLKGEN_FIELD(0x0, C32_IDF_MASK, 0), + .num_odfs = 0, + .odf = { CLKGEN_FIELD(0x0, C32_ODF_MASK, 10) }, + .ops = &st_pll1200c32_ops, +}; + +static const struct clkgen_pll_data st_pll3200c32_407_a0 = { + /* 407 A0 */ + .pdn_status = CLKGEN_FIELD(0x2a0, 0x1, 8), + .locked_status = CLKGEN_FIELD(0x2a0, 0x1, 24), + .ndiv = CLKGEN_FIELD(0x2a4, C32_NDIV_MASK, 16), + .idf = CLKGEN_FIELD(0x2a4, C32_IDF_MASK, 0x0), + .num_odfs = 1, + .odf = { CLKGEN_FIELD(0x2b4, C32_ODF_MASK, 0) }, + .odf_gate = { CLKGEN_FIELD(0x2b4, 0x1, 6) }, + .ops = &stm_pll3200c32_ops, +}; + +static const struct clkgen_pll_data st_pll3200c32_407_c0_0 = { + /* 407 C0 PLL0 */ + .pdn_status = CLKGEN_FIELD(0x2a0, 0x1, 8), + .locked_status = CLKGEN_FIELD(0x2a0, 0x1, 24), + .ndiv = CLKGEN_FIELD(0x2a4, C32_NDIV_MASK, 16), + .idf = CLKGEN_FIELD(0x2a4, C32_IDF_MASK, 0x0), + .num_odfs = 1, + .odf = { CLKGEN_FIELD(0x2b4, C32_ODF_MASK, 0) }, + .odf_gate = { CLKGEN_FIELD(0x2b4, 0x1, 6) }, + .ops = &stm_pll3200c32_ops, +}; + +static const struct clkgen_pll_data st_pll3200c32_407_c0_1 = { + /* 407 C0 PLL1 */ + .pdn_status = CLKGEN_FIELD(0x2c8, 0x1, 8), + .locked_status = CLKGEN_FIELD(0x2c8, 0x1, 24), + .ndiv = CLKGEN_FIELD(0x2cc, C32_NDIV_MASK, 16), + .idf = CLKGEN_FIELD(0x2cc, C32_IDF_MASK, 0x0), + .num_odfs = 1, + .odf = { CLKGEN_FIELD(0x2dc, C32_ODF_MASK, 0) }, + .odf_gate = { CLKGEN_FIELD(0x2dc, 0x1, 6) }, + .ops = &stm_pll3200c32_ops, +}; + +static const struct clkgen_pll_data st_pll3200c32_407_a9 = { + /* 407 A9 */ + .pdn_status = CLKGEN_FIELD(0x1a8, 0x1, 0), + .locked_status = CLKGEN_FIELD(0x87c, 0x1, 0), + .ndiv = CLKGEN_FIELD(0x1b0, C32_NDIV_MASK, 0), + .idf = CLKGEN_FIELD(0x1a8, C32_IDF_MASK, 25), + .num_odfs = 1, + .odf = { CLKGEN_FIELD(0x1b0, C32_ODF_MASK, 8) }, + .odf_gate = { CLKGEN_FIELD(0x1ac, 0x1, 28) }, + .ops = &stm_pll3200c32_ops, +}; + +/** + * DOC: Clock Generated by PLL, rate set and enabled by bootloader + * + * Traits of this clock: + * prepare - clk_(un)prepare only ensures parent is (un)prepared + * enable - clk_enable/disable only ensures parent is enabled + * rate - rate is fixed. No clk_set_rate support + * parent - fixed parent. No clk_set_parent support + */ + +/** + * PLL clock that is integrated in the ClockGenA instances on the STiH415 + * and STiH416. + * + * @hw: handle between common and hardware-specific interfaces. + * @type: PLL instance type. + * @regs_base: base of the PLL configuration register(s). + * + */ +struct clkgen_pll { + struct clk_hw hw; + struct clkgen_pll_data *data; + void __iomem *regs_base; +}; + +#define to_clkgen_pll(_hw) container_of(_hw, struct clkgen_pll, hw) + +static int clkgen_pll_is_locked(struct clk_hw *hw) +{ + struct clkgen_pll *pll = to_clkgen_pll(hw); + u32 locked = CLKGEN_READ(pll, locked_status); + + return !!locked; +} + +static int clkgen_pll_is_enabled(struct clk_hw *hw) +{ + struct clkgen_pll *pll = to_clkgen_pll(hw); + u32 poweroff = CLKGEN_READ(pll, pdn_status); + return !poweroff; +} + +unsigned long recalc_stm_pll800c65(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clkgen_pll *pll = to_clkgen_pll(hw); + unsigned long mdiv, ndiv, pdiv; + unsigned long rate; + uint64_t res; + + if (!clkgen_pll_is_enabled(hw) || !clkgen_pll_is_locked(hw)) + return 0; + + pdiv = CLKGEN_READ(pll, pdiv); + mdiv = CLKGEN_READ(pll, mdiv); + ndiv = CLKGEN_READ(pll, ndiv); + + if (!mdiv) + mdiv++; /* mdiv=0 or 1 => MDIV=1 */ + + res = (uint64_t)2 * (uint64_t)parent_rate * (uint64_t)ndiv; + rate = (unsigned long)div64_u64(res, mdiv * (1 << pdiv)); + + pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate); + + return rate; + +} + +unsigned long recalc_stm_pll1600c65(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clkgen_pll *pll = to_clkgen_pll(hw); + unsigned long mdiv, ndiv; + unsigned long rate; + + if (!clkgen_pll_is_enabled(hw) || !clkgen_pll_is_locked(hw)) + return 0; + + mdiv = CLKGEN_READ(pll, mdiv); + ndiv = CLKGEN_READ(pll, ndiv); + + if (!mdiv) + mdiv = 1; + + /* Note: input is divided by 1000 to avoid overflow */ + rate = ((2 * (parent_rate / 1000) * ndiv) / mdiv) * 1000; + + pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate); + + return rate; +} + +unsigned long recalc_stm_pll3200c32(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clkgen_pll *pll = to_clkgen_pll(hw); + unsigned long ndiv, idf; + unsigned long rate = 0; + + if (!clkgen_pll_is_enabled(hw) || !clkgen_pll_is_locked(hw)) + return 0; + + ndiv = CLKGEN_READ(pll, ndiv); + idf = CLKGEN_READ(pll, idf); + + if (idf) + /* Note: input is divided to avoid overflow */ + rate = ((2 * (parent_rate/1000) * ndiv) / idf) * 1000; + + pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate); + + return rate; +} + +unsigned long recalc_stm_pll1200c32(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clkgen_pll *pll = to_clkgen_pll(hw); + unsigned long odf, ldf, idf; + unsigned long rate; + + if (!clkgen_pll_is_enabled(hw) || !clkgen_pll_is_locked(hw)) + return 0; + + odf = CLKGEN_READ(pll, odf[0]); + ldf = CLKGEN_READ(pll, ldf); + idf = CLKGEN_READ(pll, idf); + + if (!idf) /* idf==0 means 1 */ + idf = 1; + if (!odf) /* odf==0 means 1 */ + odf = 1; + + /* Note: input is divided by 1000 to avoid overflow */ + rate = (((parent_rate / 1000) * ldf) / (odf * idf)) * 1000; + + pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate); + + return rate; +} + +static const struct clk_ops st_pll1600c65_ops = { + .is_enabled = clkgen_pll_is_enabled, + .recalc_rate = recalc_stm_pll1600c65, +}; + +static const struct clk_ops st_pll800c65_ops = { + .is_enabled = clkgen_pll_is_enabled, + .recalc_rate = recalc_stm_pll800c65, +}; + +static const struct clk_ops stm_pll3200c32_ops = { + .is_enabled = clkgen_pll_is_enabled, + .recalc_rate = recalc_stm_pll3200c32, +}; + +static const struct clk_ops st_pll1200c32_ops = { + .is_enabled = clkgen_pll_is_enabled, + .recalc_rate = recalc_stm_pll1200c32, +}; + +static struct clk * __init clkgen_pll_register(const char *parent_name, + struct clkgen_pll_data *pll_data, + void __iomem *reg, + const char *clk_name) +{ + struct clkgen_pll *pll; + struct clk *clk; + struct clk_init_data init; + + pll = kzalloc(sizeof(*pll), GFP_KERNEL); + if (!pll) + return ERR_PTR(-ENOMEM); + + init.name = clk_name; + init.ops = pll_data->ops; + + init.flags = CLK_IS_BASIC; + init.parent_names = &parent_name; + init.num_parents = 1; + + pll->data = pll_data; + pll->regs_base = reg; + pll->hw.init = &init; + + clk = clk_register(NULL, &pll->hw); + if (IS_ERR(clk)) { + kfree(pll); + return clk; + } + + pr_debug("%s: parent %s rate %lu\n", + __clk_get_name(clk), + __clk_get_name(clk_get_parent(clk)), + clk_get_rate(clk)); + + return clk; +} + +static struct clk * __init clkgen_c65_lsdiv_register(const char *parent_name, + const char *clk_name) +{ + struct clk *clk; + + clk = clk_register_fixed_factor(NULL, clk_name, parent_name, 0, 1, 2); + if (IS_ERR(clk)) + return clk; + + pr_debug("%s: parent %s rate %lu\n", + __clk_get_name(clk), + __clk_get_name(clk_get_parent(clk)), + clk_get_rate(clk)); + return clk; +} + +static void __iomem * __init clkgen_get_register_base( + struct device_node *np) +{ + struct device_node *pnode; + void __iomem *reg = NULL; + + pnode = of_get_parent(np); + if (!pnode) + return NULL; + + reg = of_iomap(pnode, 0); + + of_node_put(pnode); + return reg; +} + +#define CLKGENAx_PLL0_OFFSET 0x0 +#define CLKGENAx_PLL1_OFFSET 0x4 + +static void __init clkgena_c65_pll_setup(struct device_node *np) +{ + const int num_pll_outputs = 3; + struct clk_onecell_data *clk_data; + const char *parent_name; + void __iomem *reg; + const char *clk_name; + + parent_name = of_clk_get_parent_name(np, 0); + if (!parent_name) + return; + + reg = clkgen_get_register_base(np); + if (!reg) + return; + + clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); + if (!clk_data) + return; + + clk_data->clk_num = num_pll_outputs; + clk_data->clks = kzalloc(clk_data->clk_num * sizeof(struct clk *), + GFP_KERNEL); + + if (!clk_data->clks) + goto err; + + if (of_property_read_string_index(np, "clock-output-names", + 0, &clk_name)) + goto err; + + /* + * PLL0 HS (high speed) output + */ + clk_data->clks[0] = clkgen_pll_register(parent_name, + (struct clkgen_pll_data *) &st_pll1600c65_ax, + reg + CLKGENAx_PLL0_OFFSET, clk_name); + + if (IS_ERR(clk_data->clks[0])) + goto err; + + if (of_property_read_string_index(np, "clock-output-names", + 1, &clk_name)) + goto err; + + /* + * PLL0 LS (low speed) output, which is a fixed divide by 2 of the + * high speed output. + */ + clk_data->clks[1] = clkgen_c65_lsdiv_register(__clk_get_name + (clk_data->clks[0]), + clk_name); + + if (IS_ERR(clk_data->clks[1])) + goto err; + + if (of_property_read_string_index(np, "clock-output-names", + 2, &clk_name)) + goto err; + + /* + * PLL1 output + */ + clk_data->clks[2] = clkgen_pll_register(parent_name, + (struct clkgen_pll_data *) &st_pll800c65_ax, + reg + CLKGENAx_PLL1_OFFSET, clk_name); + + if (IS_ERR(clk_data->clks[2])) + goto err; + + of_clk_add_provider(np, of_clk_src_onecell_get, clk_data); + return; + +err: + kfree(clk_data->clks); + kfree(clk_data); +} +CLK_OF_DECLARE(clkgena_c65_plls, + "st,clkgena-plls-c65", clkgena_c65_pll_setup); + +static struct clk * __init clkgen_odf_register(const char *parent_name, + void * __iomem reg, + struct clkgen_pll_data *pll_data, + int odf, + spinlock_t *odf_lock, + const char *odf_name) +{ + struct clk *clk; + unsigned long flags; + struct clk_gate *gate; + struct clk_divider *div; + + flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_GATE; + + gate = kzalloc(sizeof(*gate), GFP_KERNEL); + if (!gate) + return ERR_PTR(-ENOMEM); + + gate->flags = CLK_GATE_SET_TO_DISABLE; + gate->reg = reg + pll_data->odf_gate[odf].offset; + gate->bit_idx = pll_data->odf_gate[odf].shift; + gate->lock = odf_lock; + + div = kzalloc(sizeof(*div), GFP_KERNEL); + if (!div) { + kfree(gate); + return ERR_PTR(-ENOMEM); + } + + div->flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO; + div->reg = reg + pll_data->odf[odf].offset; + div->shift = pll_data->odf[odf].shift; + div->width = fls(pll_data->odf[odf].mask); + div->lock = odf_lock; + + clk = clk_register_composite(NULL, odf_name, &parent_name, 1, + NULL, NULL, + &div->hw, &clk_divider_ops, + &gate->hw, &clk_gate_ops, + flags); + if (IS_ERR(clk)) + return clk; + + pr_debug("%s: parent %s rate %lu\n", + __clk_get_name(clk), + __clk_get_name(clk_get_parent(clk)), + clk_get_rate(clk)); + return clk; +} + +static const struct of_device_id c32_pll_of_match[] = { + { + .compatible = "st,plls-c32-a1x-0", + .data = &st_pll3200c32_a1x_0, + }, + { + .compatible = "st,plls-c32-a1x-1", + .data = &st_pll3200c32_a1x_1, + }, + { + .compatible = "st,stih415-plls-c32-a9", + .data = &st_pll3200c32_a9_415, + }, + { + .compatible = "st,stih415-plls-c32-ddr", + .data = &st_pll3200c32_ddr_415, + }, + { + .compatible = "st,stih416-plls-c32-a9", + .data = &st_pll3200c32_a9_416, + }, + { + .compatible = "st,stih416-plls-c32-ddr", + .data = &st_pll3200c32_ddr_416, + }, + { + .compatible = "st,stih407-plls-c32-a0", + .data = &st_pll3200c32_407_a0, + }, + { + .compatible = "st,stih407-plls-c32-c0_0", + .data = &st_pll3200c32_407_c0_0, + }, + { + .compatible = "st,stih407-plls-c32-c0_1", + .data = &st_pll3200c32_407_c0_1, + }, + { + .compatible = "st,stih407-plls-c32-a9", + .data = &st_pll3200c32_407_a9, + }, + {} +}; + +static void __init clkgen_c32_pll_setup(struct device_node *np) +{ + const struct of_device_id *match; + struct clk *clk; + const char *parent_name, *pll_name; + void __iomem *pll_base; + int num_odfs, odf; + struct clk_onecell_data *clk_data; + struct clkgen_pll_data *data; + + match = of_match_node(c32_pll_of_match, np); + if (!match) { + pr_err("%s: No matching data\n", __func__); + return; + } + + data = (struct clkgen_pll_data *) match->data; + + parent_name = of_clk_get_parent_name(np, 0); + if (!parent_name) + return; + + pll_base = clkgen_get_register_base(np); + if (!pll_base) + return; + + clk = clkgen_pll_register(parent_name, data, pll_base, np->name); + if (IS_ERR(clk)) + return; + + pll_name = __clk_get_name(clk); + + num_odfs = data->num_odfs; + + clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); + if (!clk_data) + return; + + clk_data->clk_num = num_odfs; + clk_data->clks = kzalloc(clk_data->clk_num * sizeof(struct clk *), + GFP_KERNEL); + + if (!clk_data->clks) + goto err; + + for (odf = 0; odf < num_odfs; odf++) { + struct clk *clk; + const char *clk_name; + + if (of_property_read_string_index(np, "clock-output-names", + odf, &clk_name)) + return; + + clk = clkgen_odf_register(pll_name, pll_base, data, + odf, &clkgena_c32_odf_lock, clk_name); + if (IS_ERR(clk)) + goto err; + + clk_data->clks[odf] = clk; + } + + of_clk_add_provider(np, of_clk_src_onecell_get, clk_data); + return; + +err: + kfree(pll_name); + kfree(clk_data->clks); + kfree(clk_data); +} +CLK_OF_DECLARE(clkgen_c32_pll, "st,clkgen-plls-c32", clkgen_c32_pll_setup); + +static const struct of_device_id c32_gpu_pll_of_match[] = { + { + .compatible = "st,stih415-gpu-pll-c32", + .data = &st_pll1200c32_gpu_415, + }, + { + .compatible = "st,stih416-gpu-pll-c32", + .data = &st_pll1200c32_gpu_416, + }, + {} +}; + +static void __init clkgengpu_c32_pll_setup(struct device_node *np) +{ + const struct of_device_id *match; + struct clk *clk; + const char *parent_name; + void __iomem *reg; + const char *clk_name; + struct clkgen_pll_data *data; + + match = of_match_node(c32_gpu_pll_of_match, np); + if (!match) { + pr_err("%s: No matching data\n", __func__); + return; + } + + data = (struct clkgen_pll_data *)match->data; + + parent_name = of_clk_get_parent_name(np, 0); + if (!parent_name) + return; + + reg = clkgen_get_register_base(np); + if (!reg) + return; + + if (of_property_read_string_index(np, "clock-output-names", + 0, &clk_name)) + return; + + /* + * PLL 1200MHz output + */ + clk = clkgen_pll_register(parent_name, data, reg, clk_name); + + if (!IS_ERR(clk)) + of_clk_add_provider(np, of_clk_src_simple_get, clk); + + return; +} +CLK_OF_DECLARE(clkgengpu_c32_pll, + "st,clkgengpu-pll-c32", clkgengpu_c32_pll_setup); diff --git a/kernel/drivers/clk/st/clkgen.h b/kernel/drivers/clk/st/clkgen.h new file mode 100644 index 000000000..35c863295 --- /dev/null +++ b/kernel/drivers/clk/st/clkgen.h @@ -0,0 +1,48 @@ +/************************************************************************ +File : Clock H/w specific Information + +Author: Pankaj Dev <pankaj.dev@st.com> + +Copyright (C) 2014 STMicroelectronics +************************************************************************/ + +#ifndef __CLKGEN_INFO_H +#define __CLKGEN_INFO_H + +struct clkgen_field { + unsigned int offset; + unsigned int mask; + unsigned int shift; +}; + +static inline unsigned long clkgen_read(void __iomem *base, + struct clkgen_field *field) +{ + return (readl(base + field->offset) >> field->shift) & field->mask; +} + + +static inline void clkgen_write(void __iomem *base, struct clkgen_field *field, + unsigned long val) +{ + writel((readl(base + field->offset) & + ~(field->mask << field->shift)) | (val << field->shift), + base + field->offset); + + return; +} + +#define CLKGEN_FIELD(_offset, _mask, _shift) { \ + .offset = _offset, \ + .mask = _mask, \ + .shift = _shift, \ + } + +#define CLKGEN_READ(pll, field) clkgen_read(pll->regs_base, \ + &pll->data->field) + +#define CLKGEN_WRITE(pll, field, val) clkgen_write(pll->regs_base, \ + &pll->data->field, val) + +#endif /*__CLKGEN_INFO_H*/ + |