diff options
Diffstat (limited to 'drivers')
140 files changed, 14044 insertions, 2737 deletions
diff --git a/drivers/Makefile b/drivers/Makefile index 603aa98590..41933605ce 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -4,7 +4,9 @@ obj-$(CONFIG_$(SPL_TPL_)CLK) += clk/ obj-$(CONFIG_$(SPL_TPL_)DM) += core/ obj-$(CONFIG_$(SPL_TPL_)DFU) += dfu/ obj-$(CONFIG_$(SPL_TPL_)GPIO_SUPPORT) += gpio/ -obj-$(CONFIG_$(SPL_TPL_)DRIVERS_MISC_SUPPORT) += misc/ sysreset/ firmware/ +obj-$(CONFIG_$(SPL_TPL_)DRIVERS_MISC_SUPPORT) += misc/ +obj-$(CONFIG_$(SPL_TPL_)SYSRESET) += sysreset/ +obj-$(CONFIG_$(SPL_TPL_)FIRMWARE) +=firmware/ obj-$(CONFIG_$(SPL_TPL_)I2C_SUPPORT) += i2c/ obj-$(CONFIG_$(SPL_TPL_)INPUT) += input/ obj-$(CONFIG_$(SPL_TPL_)LED) += led/ @@ -81,7 +83,6 @@ obj-y += cache/ obj-$(CONFIG_CPU) += cpu/ obj-y += crypto/ obj-$(CONFIG_FASTBOOT) += fastboot/ -obj-y += firmware/ obj-$(CONFIG_FPGA) += fpga/ obj-y += misc/ obj-$(CONFIG_MMC) += mmc/ @@ -96,7 +97,6 @@ obj-y += rtc/ obj-y += scsi/ obj-y += sound/ obj-y += spmi/ -obj-y += sysreset/ obj-y += video/ obj-y += watchdog/ obj-$(CONFIG_QE) += qe/ diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index 96969b9e30..5e92446c18 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -46,6 +46,20 @@ config CLK_BOSTON help Enable this to support the clocks +config SPL_CLK_CCF + bool "SPL Common Clock Framework [CCF] support " + depends on SPL_CLK_IMX6Q + help + Enable this option if you want to (re-)use the Linux kernel's Common + Clock Framework [CCF] code in U-Boot's SPL. + +config CLK_CCF + bool "Common Clock Framework [CCF] support " + depends on CLK_IMX6Q || SANDBOX_CLK_CCF + help + Enable this option if you want to (re-)use the Linux kernel's Common + Clock Framework [CCF] code in U-Boot's clock driver. + config CLK_STM32F bool "Enable clock driver support for STM32F family" depends on CLK && (STM32F7 || STM32F4) @@ -98,6 +112,7 @@ config CLK_STM32MP1 Enable the STM32 clock (RCC) driver. Enable support for manipulating STM32MP1's on-SoC clocks. +source "drivers/clk/analogbits/Kconfig" source "drivers/clk/at91/Kconfig" source "drivers/clk/exynos/Kconfig" source "drivers/clk/imx/Kconfig" @@ -124,4 +139,12 @@ config CLK_MPC83XX help Support for the clock driver of the MPC83xx series of SoCs. +config SANDBOX_CLK_CCF + bool "Sandbox Common Clock Framework [CCF] support " + depends on SANDBOX + select CLK_CCF + help + Enable this option if you want to test the Linux kernel's Common + Clock Framework [CCF] code in U-Boot's Sandbox clock driver. + endmenu diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index 719b9b8e02..b7fec605c6 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile @@ -7,7 +7,10 @@ obj-$(CONFIG_$(SPL_TPL_)CLK) += clk-uclass.o obj-$(CONFIG_$(SPL_TPL_)CLK) += clk_fixed_rate.o obj-$(CONFIG_$(SPL_TPL_)CLK) += clk_fixed_factor.o +obj-$(CONFIG_$(SPL_TPL_)CLK_CCF) += clk.o clk-divider.o clk-mux.o +obj-$(CONFIG_$(SPL_TPL_)CLK_CCF) += clk-fixed-factor.o +obj-y += analogbits/ obj-y += imx/ obj-y += tegra/ obj-$(CONFIG_ARCH_ASPEED) += aspeed/ @@ -36,5 +39,6 @@ obj-$(CONFIG_ICS8N3QV01) += ics8n3qv01.o obj-$(CONFIG_MACH_PIC32) += clk_pic32.o obj-$(CONFIG_SANDBOX) += clk_sandbox.o obj-$(CONFIG_SANDBOX) += clk_sandbox_test.o +obj-$(CONFIG_SANDBOX_CLK_CCF) += clk_sandbox_ccf.o obj-$(CONFIG_STM32H7) += clk_stm32h7.o obj-$(CONFIG_CLK_TI_SCI) += clk-ti-sci.o diff --git a/drivers/clk/analogbits/Kconfig b/drivers/clk/analogbits/Kconfig new file mode 100644 index 0000000000..1d25e6f124 --- /dev/null +++ b/drivers/clk/analogbits/Kconfig @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 + +config CLK_ANALOGBITS_WRPLL_CLN28HPC + bool diff --git a/drivers/clk/analogbits/Makefile b/drivers/clk/analogbits/Makefile new file mode 100644 index 0000000000..ec1bb4092b --- /dev/null +++ b/drivers/clk/analogbits/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0+ + +obj-$(CONFIG_CLK_ANALOGBITS_WRPLL_CLN28HPC) += wrpll-cln28hpc.o diff --git a/drivers/clk/sifive/wrpll-cln28hpc.c b/drivers/clk/analogbits/wrpll-cln28hpc.c index d377849693..776ead319a 100644 --- a/drivers/clk/sifive/wrpll-cln28hpc.c +++ b/drivers/clk/analogbits/wrpll-cln28hpc.c @@ -1,20 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (c) 2019 Western Digital Corporation or its affiliates. - * - * Copyright (C) 2018 SiFive, Inc. + * Copyright (C) 2018-2019 SiFive, Inc. * Wesley Terpstra * Paul Walmsley * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * This library supports configuration parsing and reprogramming of * the CLN28HPC variant of the Analog Bits Wide Range PLL. The * intention is for this library to be reusable for any device that @@ -29,14 +18,14 @@ * References: * - Analog Bits "Wide Range PLL Datasheet", version 2015.10.01 * - SiFive FU540-C000 Manual v1p0, Chapter 7 "Clocking and Reset" + * https://static.dev.sifive.com/FU540-C000-v1.0.pdf */ #include <linux/bug.h> #include <linux/err.h> #include <linux/log2.h> #include <linux/math64.h> - -#include "analogbits-wrpll-cln28hpc.h" +#include <linux/clk/analogbits-wrpll-cln28hpc.h> /* MIN_INPUT_FREQ: minimum input clock frequency, in Hz (Fref_min) */ #define MIN_INPUT_FREQ 7000000 @@ -85,40 +74,38 @@ * range selection. * * Return: The RANGE value to be presented to the PLL configuration inputs, - * or -1 upon error. + * or a negative return code upon error. */ static int __wrpll_calc_filter_range(unsigned long post_divr_freq) { - u8 range; - if (post_divr_freq < MIN_POST_DIVR_FREQ || post_divr_freq > MAX_POST_DIVR_FREQ) { WARN(1, "%s: post-divider reference freq out of range: %lu", __func__, post_divr_freq); - return -1; + return -ERANGE; } - if (post_divr_freq < 11000000) - range = 1; - else if (post_divr_freq < 18000000) - range = 2; - else if (post_divr_freq < 30000000) - range = 3; - else if (post_divr_freq < 50000000) - range = 4; - else if (post_divr_freq < 80000000) - range = 5; - else if (post_divr_freq < 130000000) - range = 6; - else - range = 7; - - return range; + switch (post_divr_freq) { + case 0 ... 10999999: + return 1; + case 11000000 ... 17999999: + return 2; + case 18000000 ... 29999999: + return 3; + case 30000000 ... 49999999: + return 4; + case 50000000 ... 79999999: + return 5; + case 80000000 ... 129999999: + return 6; + } + + return 7; } /** * __wrpll_calc_fbdiv() - return feedback fixed divide value - * @c: ptr to a struct analogbits_wrpll_cfg record to read from + * @c: ptr to a struct wrpll_cfg record to read from * * The internal feedback path includes a fixed by-two divider; the * external feedback path does not. Return the appropriate divider @@ -133,7 +120,7 @@ static int __wrpll_calc_filter_range(unsigned long post_divr_freq) * Return: 2 if internal feedback is enabled or 1 if external feedback * is enabled. */ -static u8 __wrpll_calc_fbdiv(struct analogbits_wrpll_cfg *c) +static u8 __wrpll_calc_fbdiv(const struct wrpll_cfg *c) { return (c->flags & WRPLL_FLAGS_INT_FEEDBACK_MASK) ? 2 : 1; } @@ -173,7 +160,7 @@ static u8 __wrpll_calc_divq(u32 target_rate, u64 *vco_rate) *vco_rate = MIN_VCO_FREQ; } else { divq = ilog2(s); - *vco_rate = target_rate << divq; + *vco_rate = (u64)target_rate << divq; } wcd_out: @@ -182,7 +169,7 @@ wcd_out: /** * __wrpll_update_parent_rate() - update PLL data when parent rate changes - * @c: ptr to a struct analogbits_wrpll_cfg record to write PLL data to + * @c: ptr to a struct wrpll_cfg record to write PLL data to * @parent_rate: PLL input refclk rate (pre-R-divider) * * Pre-compute some data used by the PLL configuration algorithm when @@ -190,46 +177,40 @@ wcd_out: * computation when the parent rate remains constant - expected to be * the common case. * - * Returns: 0 upon success or -1 if the reference clock rate is out of range. + * Returns: 0 upon success or -ERANGE if the reference clock rate is + * out of range. */ -static int __wrpll_update_parent_rate(struct analogbits_wrpll_cfg *c, +static int __wrpll_update_parent_rate(struct wrpll_cfg *c, unsigned long parent_rate) { u8 max_r_for_parent; if (parent_rate > MAX_INPUT_FREQ || parent_rate < MIN_POST_DIVR_FREQ) - return -1; + return -ERANGE; - c->_parent_rate = parent_rate; + c->parent_rate = parent_rate; max_r_for_parent = div_u64(parent_rate, MIN_POST_DIVR_FREQ); - c->_max_r = min_t(u8, MAX_DIVR_DIVISOR, max_r_for_parent); + c->max_r = min_t(u8, MAX_DIVR_DIVISOR, max_r_for_parent); - /* Round up */ - c->_init_r = div_u64(parent_rate + MAX_POST_DIVR_FREQ - 1, - MAX_POST_DIVR_FREQ); + c->init_r = DIV_ROUND_UP_ULL(parent_rate, MAX_POST_DIVR_FREQ); return 0; } -/* - * Public functions - */ - /** - * analogbits_wrpll_configure() - compute PLL configuration for a target rate - * @c: ptr to a struct analogbits_wrpll_cfg record to write into + * wrpll_configure() - compute PLL configuration for a target rate + * @c: ptr to a struct wrpll_cfg record to write into * @target_rate: target PLL output clock rate (post-Q-divider) * @parent_rate: PLL input refclk rate (pre-R-divider) * - * Given a pointer to a PLL context @c, a desired PLL target output - * rate @target_rate, and a reference clock input rate @parent_rate, - * compute the appropriate PLL signal configuration values. PLL - * reprogramming is not glitchless, so the caller should switch any - * downstream logic to a different clock source or clock-gate it - * before presenting these values to the PLL configuration signals. + * Compute the appropriate PLL signal configuration values and store + * in PLL context @c. PLL reprogramming is not glitchless, so the + * caller should switch any downstream logic to a different clock + * source or clock-gate it before presenting these values to the PLL + * configuration signals. * * The caller must pass this function a pre-initialized struct - * analogbits_wrpll_cfg record: either initialized to zero (with the + * wrpll_cfg record: either initialized to zero (with the * exception of the .name and .flags fields) or read from the PLL. * * Context: Any context. Caller must protect the memory pointed to by @c @@ -237,41 +218,26 @@ static int __wrpll_update_parent_rate(struct analogbits_wrpll_cfg *c, * * Return: 0 upon success; anything else upon failure. */ -int analogbits_wrpll_configure_for_rate(struct analogbits_wrpll_cfg *c, - u32 target_rate, - unsigned long parent_rate) +int wrpll_configure_for_rate(struct wrpll_cfg *c, u32 target_rate, + unsigned long parent_rate) { unsigned long ratio; u64 target_vco_rate, delta, best_delta, f_pre_div, vco, vco_pre; - u32 best_f, f, post_divr_freq, fbcfg; + u32 best_f, f, post_divr_freq; u8 fbdiv, divq, best_r, r; - - if (!c) - return -1; + int range; if (c->flags == 0) { WARN(1, "%s called with uninitialized PLL config", __func__); - return -1; - } - - fbcfg = WRPLL_FLAGS_INT_FEEDBACK_MASK | WRPLL_FLAGS_EXT_FEEDBACK_MASK; - if ((c->flags & fbcfg) == fbcfg) { - WARN(1, "%s called with invalid PLL config", __func__); - return -1; - } - - if (c->flags == WRPLL_FLAGS_EXT_FEEDBACK_MASK) { - WARN(1, "%s: external feedback mode not currently supported", - __func__); - return -1; + return -EINVAL; } /* Initialize rounding data if it hasn't been initialized already */ - if (parent_rate != c->_parent_rate) { + if (parent_rate != c->parent_rate) { if (__wrpll_update_parent_rate(c, parent_rate)) { pr_err("%s: PLL input rate is out of range\n", __func__); - return -1; + return -ERANGE; } } @@ -282,11 +248,12 @@ int analogbits_wrpll_configure_for_rate(struct analogbits_wrpll_cfg *c, c->flags |= WRPLL_FLAGS_BYPASS_MASK; return 0; } + c->flags &= ~WRPLL_FLAGS_BYPASS_MASK; /* Calculate the Q shift and target VCO rate */ divq = __wrpll_calc_divq(target_rate, &target_vco_rate); - if (divq == 0) + if (!divq) return -1; c->divq = divq; @@ -302,8 +269,7 @@ int analogbits_wrpll_configure_for_rate(struct analogbits_wrpll_cfg *c, * Consider all values for R which land within * [MIN_POST_DIVR_FREQ, MAX_POST_DIVR_FREQ]; prefer smaller R */ - for (r = c->_init_r; r <= c->_max_r; ++r) { - /* What is the best F we can pick in this case? */ + for (r = c->init_r; r <= c->max_r; ++r) { f_pre_div = ratio * r; f = (f_pre_div + (1 << ROUND_SHIFT)) >> ROUND_SHIFT; f >>= (fbdiv - 1); @@ -335,46 +301,54 @@ int analogbits_wrpll_configure_for_rate(struct analogbits_wrpll_cfg *c, post_divr_freq = div_u64(parent_rate, best_r); /* Pick the best PLL jitter filter */ - c->range = __wrpll_calc_filter_range(post_divr_freq); + range = __wrpll_calc_filter_range(post_divr_freq); + if (range < 0) + return range; + c->range = range; return 0; } /** - * analogbits_wrpll_calc_output_rate() - calculate the PLL's target output rate - * @c: ptr to a struct analogbits_wrpll_cfg record to read from + * wrpll_calc_output_rate() - calculate the PLL's target output rate + * @c: ptr to a struct wrpll_cfg record to read from * @parent_rate: PLL refclk rate * * Given a pointer to the PLL's current input configuration @c and the * PLL's input reference clock rate @parent_rate (before the R * pre-divider), calculate the PLL's output clock rate (after the Q - * post-divider) + * post-divider). * * Context: Any context. Caller must protect the memory pointed to by @c * from simultaneous modification. * - * Return: the PLL's output clock rate, in Hz. + * Return: the PLL's output clock rate, in Hz. The return value from + * this function is intended to be convenient to pass directly + * to the Linux clock framework; thus there is no explicit + * error return value. */ -unsigned long analogbits_wrpll_calc_output_rate(struct analogbits_wrpll_cfg *c, - unsigned long parent_rate) +unsigned long wrpll_calc_output_rate(const struct wrpll_cfg *c, + unsigned long parent_rate) { u8 fbdiv; u64 n; - WARN(c->flags & WRPLL_FLAGS_EXT_FEEDBACK_MASK, - "external feedback mode not yet supported"); + if (c->flags & WRPLL_FLAGS_EXT_FEEDBACK_MASK) { + WARN(1, "external feedback mode not yet supported"); + return ULONG_MAX; + } fbdiv = __wrpll_calc_fbdiv(c); n = parent_rate * fbdiv * (c->divf + 1); - n = div_u64(n, (c->divr + 1)); + n = div_u64(n, c->divr + 1); n >>= c->divq; return n; } /** - * analogbits_wrpll_calc_max_lock_us() - return the time for the PLL to lock - * @c: ptr to a struct analogbits_wrpll_cfg record to read from + * wrpll_calc_max_lock_us() - return the time for the PLL to lock + * @c: ptr to a struct wrpll_cfg record to read from * * Return the minimum amount of time (in microseconds) that the caller * must wait after reprogramming the PLL to ensure that it is locked @@ -384,7 +358,7 @@ unsigned long analogbits_wrpll_calc_output_rate(struct analogbits_wrpll_cfg *c, * Return: the minimum amount of time the caller must wait for the PLL * to lock (in microseconds) */ -unsigned int analogbits_wrpll_calc_max_lock_us(struct analogbits_wrpll_cfg *c) +unsigned int wrpll_calc_max_lock_us(const struct wrpll_cfg *c) { return MAX_LOCK_US; } diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c new file mode 100644 index 0000000000..6921c76a48 --- /dev/null +++ b/drivers/clk/clk-divider.c @@ -0,0 +1,155 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 DENX Software Engineering + * Lukasz Majewski, DENX Software Engineering, lukma@denx.de + * + * Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> + * Copyright (C) 2011 Richard Zhao, Linaro <richard.zhao@linaro.org> + * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org> + * + */ + +#include <common.h> +#include <asm/io.h> +#include <malloc.h> +#include <clk-uclass.h> +#include <dm/device.h> +#include <dm/uclass.h> +#include <dm/lists.h> +#include <dm/device-internal.h> +#include <linux/clk-provider.h> +#include <div64.h> +#include <clk.h> +#include "clk.h" + +#define UBOOT_DM_CLK_CCF_DIVIDER "ccf_clk_divider" + +static unsigned int _get_table_div(const struct clk_div_table *table, + unsigned int val) +{ + const struct clk_div_table *clkt; + + for (clkt = table; clkt->div; clkt++) + if (clkt->val == val) + return clkt->div; + return 0; +} + +static unsigned int _get_div(const struct clk_div_table *table, + unsigned int val, unsigned long flags, u8 width) +{ + if (flags & CLK_DIVIDER_ONE_BASED) + return val; + if (flags & CLK_DIVIDER_POWER_OF_TWO) + return 1 << val; + if (flags & CLK_DIVIDER_MAX_AT_ZERO) + return val ? val : clk_div_mask(width) + 1; + if (table) + return _get_table_div(table, val); + return val + 1; +} + +unsigned long divider_recalc_rate(struct clk *hw, unsigned long parent_rate, + unsigned int val, + const struct clk_div_table *table, + unsigned long flags, unsigned long width) +{ + unsigned int div; + + div = _get_div(table, val, flags, width); + if (!div) { + WARN(!(flags & CLK_DIVIDER_ALLOW_ZERO), + "%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n", + clk_hw_get_name(hw)); + return parent_rate; + } + + return DIV_ROUND_UP_ULL((u64)parent_rate, div); +} + +static ulong clk_divider_recalc_rate(struct clk *clk) +{ + struct clk_divider *divider = + to_clk_divider(dev_get_clk_ptr(clk->dev)); + unsigned long parent_rate = clk_get_parent_rate(clk); + unsigned int val; + +#if CONFIG_IS_ENABLED(SANDBOX_CLK_CCF) + val = divider->io_divider_val; +#else + val = readl(divider->reg); +#endif + val >>= divider->shift; + val &= clk_div_mask(divider->width); + + return divider_recalc_rate(clk, parent_rate, val, divider->table, + divider->flags, divider->width); +} + +const struct clk_ops clk_divider_ops = { + .get_rate = clk_divider_recalc_rate, +}; + +static struct clk *_register_divider(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + void __iomem *reg, u8 shift, u8 width, + u8 clk_divider_flags, const struct clk_div_table *table) +{ + struct clk_divider *div; + struct clk *clk; + int ret; + + if (clk_divider_flags & CLK_DIVIDER_HIWORD_MASK) { + if (width + shift > 16) { + pr_warn("divider value exceeds LOWORD field\n"); + return ERR_PTR(-EINVAL); + } + } + + /* allocate the divider */ + div = kzalloc(sizeof(*div), GFP_KERNEL); + if (!div) + return ERR_PTR(-ENOMEM); + + /* struct clk_divider assignments */ + div->reg = reg; + div->shift = shift; + div->width = width; + div->flags = clk_divider_flags; + div->table = table; +#if CONFIG_IS_ENABLED(SANDBOX_CLK_CCF) + div->io_divider_val = *(u32 *)reg; +#endif + + /* register the clock */ + clk = &div->clk; + + ret = clk_register(clk, UBOOT_DM_CLK_CCF_DIVIDER, name, parent_name); + if (ret) { + kfree(div); + return ERR_PTR(ret); + } + + return clk; +} + +struct clk *clk_register_divider(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + void __iomem *reg, u8 shift, u8 width, + u8 clk_divider_flags) +{ + struct clk *clk; + + clk = _register_divider(dev, name, parent_name, flags, reg, shift, + width, clk_divider_flags, NULL); + if (IS_ERR(clk)) + return ERR_CAST(clk); + return clk; +} + +U_BOOT_DRIVER(ccf_clk_divider) = { + .name = UBOOT_DM_CLK_CCF_DIVIDER, + .id = UCLASS_CLK, + .ops = &clk_divider_ops, + .flags = DM_FLAG_PRE_RELOC, +}; diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c new file mode 100644 index 0000000000..711b0588bc --- /dev/null +++ b/drivers/clk/clk-fixed-factor.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 DENX Software Engineering + * Lukasz Majewski, DENX Software Engineering, lukma@denx.de + * + * Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> + */ +#include <common.h> +#include <malloc.h> +#include <clk-uclass.h> +#include <dm/device.h> +#include <linux/clk-provider.h> +#include <div64.h> +#include <clk.h> +#include "clk.h" + +#define UBOOT_DM_CLK_IMX_FIXED_FACTOR "ccf_clk_fixed_factor" + +static ulong clk_factor_recalc_rate(struct clk *clk) +{ + struct clk_fixed_factor *fix = + to_clk_fixed_factor(dev_get_clk_ptr(clk->dev)); + unsigned long parent_rate = clk_get_parent_rate(clk); + unsigned long long int rate; + + rate = (unsigned long long int)parent_rate * fix->mult; + do_div(rate, fix->div); + return (ulong)rate; +} + +const struct clk_ops ccf_clk_fixed_factor_ops = { + .get_rate = clk_factor_recalc_rate, +}; + +struct clk *clk_hw_register_fixed_factor(struct device *dev, + const char *name, const char *parent_name, unsigned long flags, + unsigned int mult, unsigned int div) +{ + struct clk_fixed_factor *fix; + struct clk *clk; + int ret; + + fix = kzalloc(sizeof(*fix), GFP_KERNEL); + if (!fix) + return ERR_PTR(-ENOMEM); + + /* struct clk_fixed_factor assignments */ + fix->mult = mult; + fix->div = div; + clk = &fix->clk; + + ret = clk_register(clk, UBOOT_DM_CLK_IMX_FIXED_FACTOR, name, + parent_name); + if (ret) { + kfree(fix); + return ERR_PTR(ret); + } + + return clk; +} + +struct clk *clk_register_fixed_factor(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + unsigned int mult, unsigned int div) +{ + struct clk *clk; + + clk = clk_hw_register_fixed_factor(dev, name, parent_name, flags, mult, + div); + if (IS_ERR(clk)) + return ERR_CAST(clk); + return clk; +} + +U_BOOT_DRIVER(imx_clk_fixed_factor) = { + .name = UBOOT_DM_CLK_IMX_FIXED_FACTOR, + .id = UCLASS_CLK, + .ops = &ccf_clk_fixed_factor_ops, + .flags = DM_FLAG_PRE_RELOC, +}; diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c new file mode 100644 index 0000000000..3c075aa09e --- /dev/null +++ b/drivers/clk/clk-mux.c @@ -0,0 +1,172 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 DENX Software Engineering + * Lukasz Majewski, DENX Software Engineering, lukma@denx.de + * + * Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> + * Copyright (C) 2011 Richard Zhao, Linaro <richard.zhao@linaro.org> + * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org> + * + * Simple multiplexer clock implementation + */ + +/* + * U-Boot CCF porting node: + * + * The Linux kernel - as of tag: 5.0-rc3 is using also the imx_clk_fixup_mux() + * version of CCF mux. It is used on e.g. imx6q to provide fixes (like + * imx_cscmr1_fixup) for broken HW. + * + * At least for IMX6Q (but NOT IMX6QP) it is important when we set the parent + * clock. + */ + +#include <common.h> +#include <asm/io.h> +#include <malloc.h> +#include <clk-uclass.h> +#include <dm/device.h> +#include <linux/clk-provider.h> +#include <clk.h> +#include "clk.h" + +#define UBOOT_DM_CLK_CCF_MUX "ccf_clk_mux" + +int clk_mux_val_to_index(struct clk *clk, u32 *table, unsigned int flags, + unsigned int val) +{ + struct clk_mux *mux = to_clk_mux(clk); + int num_parents = mux->num_parents; + + if (table) { + int i; + + for (i = 0; i < num_parents; i++) + if (table[i] == val) + return i; + return -EINVAL; + } + + if (val && (flags & CLK_MUX_INDEX_BIT)) + val = ffs(val) - 1; + + if (val && (flags & CLK_MUX_INDEX_ONE)) + val--; + + if (val >= num_parents) + return -EINVAL; + + return val; +} + +static u8 clk_mux_get_parent(struct clk *clk) +{ + struct clk_mux *mux = to_clk_mux(clk); + u32 val; + +#if CONFIG_IS_ENABLED(SANDBOX_CLK_CCF) + val = mux->io_mux_val; +#else + val = readl(mux->reg); +#endif + val >>= mux->shift; + val &= mux->mask; + + return clk_mux_val_to_index(clk, mux->table, mux->flags, val); +} + +const struct clk_ops clk_mux_ops = { + .get_rate = clk_generic_get_rate, +}; + +struct clk *clk_hw_register_mux_table(struct device *dev, const char *name, + const char * const *parent_names, u8 num_parents, + unsigned long flags, + void __iomem *reg, u8 shift, u32 mask, + u8 clk_mux_flags, u32 *table) +{ + struct clk_mux *mux; + struct clk *clk; + u8 width = 0; + int ret; + + if (clk_mux_flags & CLK_MUX_HIWORD_MASK) { + width = fls(mask) - ffs(mask) + 1; + if (width + shift > 16) { + pr_err("mux value exceeds LOWORD field\n"); + return ERR_PTR(-EINVAL); + } + } + + /* allocate the mux */ + mux = kzalloc(sizeof(*mux), GFP_KERNEL); + if (!mux) + return ERR_PTR(-ENOMEM); + + /* U-boot specific assignments */ + mux->parent_names = parent_names; + mux->num_parents = num_parents; + + /* struct clk_mux assignments */ + mux->reg = reg; + mux->shift = shift; + mux->mask = mask; + mux->flags = clk_mux_flags; + mux->table = table; +#if CONFIG_IS_ENABLED(SANDBOX_CLK_CCF) + mux->io_mux_val = *(u32 *)reg; +#endif + + clk = &mux->clk; + + /* + * Read the current mux setup - so we assign correct parent. + * + * Changing parent would require changing internals of udevice struct + * for the corresponding clock (to do that define .set_parent() method. + */ + ret = clk_register(clk, UBOOT_DM_CLK_CCF_MUX, name, + parent_names[clk_mux_get_parent(clk)]); + if (ret) { + kfree(mux); + return ERR_PTR(ret); + } + + return clk; +} + +struct clk *clk_register_mux_table(struct device *dev, const char *name, + const char * const *parent_names, u8 num_parents, + unsigned long flags, + void __iomem *reg, u8 shift, u32 mask, + u8 clk_mux_flags, u32 *table) +{ + struct clk *clk; + + clk = clk_hw_register_mux_table(dev, name, parent_names, num_parents, + flags, reg, shift, mask, clk_mux_flags, + table); + if (IS_ERR(clk)) + return ERR_CAST(clk); + return clk; +} + +struct clk *clk_register_mux(struct device *dev, const char *name, + const char * const *parent_names, u8 num_parents, + unsigned long flags, + void __iomem *reg, u8 shift, u8 width, + u8 clk_mux_flags) +{ + u32 mask = BIT(width) - 1; + + return clk_register_mux_table(dev, name, parent_names, num_parents, + flags, reg, shift, mask, clk_mux_flags, + NULL); +} + +U_BOOT_DRIVER(ccf_clk_mux) = { + .name = UBOOT_DM_CLK_CCF_MUX, + .id = UCLASS_CLK, + .ops = &clk_mux_ops, + .flags = DM_FLAG_PRE_RELOC, +}; diff --git a/drivers/clk/clk-uclass.c b/drivers/clk/clk-uclass.c index 79b3b0494c..85dfe712f5 100644 --- a/drivers/clk/clk-uclass.c +++ b/drivers/clk/clk-uclass.c @@ -13,6 +13,7 @@ #include <dm/read.h> #include <dt-structs.h> #include <errno.h> +#include <linux/clk-provider.h> static inline const struct clk_ops *clk_dev_ops(struct udevice *dev) { @@ -51,6 +52,8 @@ static int clk_of_xlate_default(struct clk *clk, else clk->id = 0; + clk->data = 0; + return 0; } @@ -379,6 +382,43 @@ ulong clk_get_rate(struct clk *clk) return ops->get_rate(clk); } +struct clk *clk_get_parent(struct clk *clk) +{ + struct udevice *pdev; + struct clk *pclk; + + debug("%s(clk=%p)\n", __func__, clk); + + pdev = dev_get_parent(clk->dev); + pclk = dev_get_clk_ptr(pdev); + if (!pclk) + return ERR_PTR(-ENODEV); + + return pclk; +} + +long long clk_get_parent_rate(struct clk *clk) +{ + const struct clk_ops *ops; + struct clk *pclk; + + debug("%s(clk=%p)\n", __func__, clk); + + pclk = clk_get_parent(clk); + if (IS_ERR(pclk)) + return -ENODEV; + + ops = clk_dev_ops(pclk->dev); + if (!ops->get_rate) + return -ENOSYS; + + /* Read the 'rate' if not already set or if proper flag set*/ + if (!pclk->rate || pclk->flags & CLK_GET_RATE_NOCACHE) + pclk->rate = clk_get_rate(pclk); + + return pclk->rate; +} + ulong clk_set_rate(struct clk *clk, ulong rate) { const struct clk_ops *ops = clk_dev_ops(clk->dev); @@ -453,6 +493,28 @@ int clk_disable_bulk(struct clk_bulk *bulk) return 0; } +int clk_get_by_id(ulong id, struct clk **clkp) +{ + struct udevice *dev; + struct uclass *uc; + int ret; + + ret = uclass_get(UCLASS_CLK, &uc); + if (ret) + return ret; + + uclass_foreach_dev(dev, uc) { + struct clk *clk = dev_get_clk_ptr(dev); + + if (clk && clk->id == id) { + *clkp = clk; + return 0; + } + } + + return -ENOENT; +} + UCLASS_DRIVER(clk) = { .id = UCLASS_CLK, .name = "clk", diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c new file mode 100644 index 0000000000..7d748c9fc7 --- /dev/null +++ b/drivers/clk/clk.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2019 DENX Software Engineering + * Lukasz Majewski, DENX Software Engineering, lukma@denx.de + */ + +#include <common.h> +#include <clk-uclass.h> +#include <dm/device.h> +#include <dm/uclass.h> +#include <dm/lists.h> +#include <dm/device-internal.h> +#include <clk.h> + +int clk_register(struct clk *clk, const char *drv_name, + const char *name, const char *parent_name) +{ + struct udevice *parent; + struct driver *drv; + int ret; + + ret = uclass_get_device_by_name(UCLASS_CLK, parent_name, &parent); + if (ret) + printf("%s: UCLASS parent: 0x%p\n", __func__, parent); + + debug("%s: name: %s parent: %s [0x%p]\n", __func__, name, parent->name, + parent); + + drv = lists_driver_lookup_name(drv_name); + if (!drv) { + printf("%s: %s is not a valid driver name\n", + __func__, drv_name); + return -ENOENT; + } + + ret = device_bind(parent, drv, name, NULL, -1, &clk->dev); + if (ret) { + printf("%s: CLK: %s driver bind error [%d]!\n", __func__, name, + ret); + return ret; + } + + /* Store back pointer to clk from udevice */ + clk->dev->uclass_priv = clk; + + return 0; +} + +ulong clk_generic_get_rate(struct clk *clk) +{ + return clk_get_parent_rate(clk); +} + +const char *clk_hw_get_name(const struct clk *hw) +{ + return hw->dev->name; +} diff --git a/drivers/clk/clk_fixed_factor.c b/drivers/clk/clk_fixed_factor.c index 5fa20a84db..dcdb6ddf5c 100644 --- a/drivers/clk/clk_fixed_factor.c +++ b/drivers/clk/clk_fixed_factor.c @@ -24,9 +24,6 @@ static ulong clk_fixed_factor_get_rate(struct clk *clk) uint64_t rate; struct clk_fixed_factor *ff = to_clk_fixed_factor(clk->dev); - if (clk->id != 0) - return -EINVAL; - rate = clk_get_rate(&ff->parent); if (IS_ERR_VALUE(rate)) return rate; diff --git a/drivers/clk/clk_fixed_rate.c b/drivers/clk/clk_fixed_rate.c index d8d9f86c86..1fdf8c4e54 100644 --- a/drivers/clk/clk_fixed_rate.c +++ b/drivers/clk/clk_fixed_rate.c @@ -8,6 +8,7 @@ #include <dm.h> struct clk_fixed_rate { + struct clk clk; unsigned long fixed_rate; }; @@ -15,9 +16,6 @@ struct clk_fixed_rate { static ulong clk_fixed_rate_get_rate(struct clk *clk) { - if (clk->id != 0) - return -EINVAL; - return to_clk_fixed_rate(clk->dev)->fixed_rate; } @@ -27,10 +25,14 @@ const struct clk_ops clk_fixed_rate_ops = { static int clk_fixed_rate_ofdata_to_platdata(struct udevice *dev) { + struct clk *clk = &to_clk_fixed_rate(dev)->clk; #if !CONFIG_IS_ENABLED(OF_PLATDATA) to_clk_fixed_rate(dev)->fixed_rate = dev_read_u32_default(dev, "clock-frequency", 0); #endif + /* Make fixed rate clock accessible from higher level struct clk */ + dev->uclass_priv = clk; + clk->dev = dev; return 0; } diff --git a/drivers/clk/clk_sandbox_ccf.c b/drivers/clk/clk_sandbox_ccf.c new file mode 100644 index 0000000000..edeb0f2cf3 --- /dev/null +++ b/drivers/clk/clk_sandbox_ccf.c @@ -0,0 +1,185 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2019 + * Lukasz Majewski, DENX Software Engineering, lukma@denx.de + * + * Common Clock Framework [CCF] driver for Sandbox + */ + +#include <common.h> +#include <dm.h> +#include <clk.h> +#include <asm/clk.h> +#include <clk-uclass.h> +#include <linux/clk-provider.h> +#include <sandbox-clk.h> + +/* + * Sandbox implementation of CCF primitives necessary for clk-uclass testing + * + * --- Sandbox PLLv3 --- + */ +struct clk_pllv3 { + struct clk clk; + u32 div_mask; + u32 div_shift; +}; + +static ulong clk_pllv3_get_rate(struct clk *clk) +{ + unsigned long parent_rate = clk_get_parent_rate(clk); + + return parent_rate * 24; +} + +static const struct clk_ops clk_pllv3_generic_ops = { + .get_rate = clk_pllv3_get_rate, +}; + +struct clk *sandbox_clk_pllv3(enum sandbox_pllv3_type type, const char *name, + const char *parent_name, void __iomem *base, + u32 div_mask) +{ + struct clk_pllv3 *pll; + struct clk *clk; + char *drv_name = "sandbox_clk_pllv3"; + int ret; + + pll = kzalloc(sizeof(*pll), GFP_KERNEL); + if (!pll) + return ERR_PTR(-ENOMEM); + + pll->div_mask = div_mask; + clk = &pll->clk; + + ret = clk_register(clk, drv_name, name, parent_name); + if (ret) { + kfree(pll); + return ERR_PTR(ret); + } + + return clk; +} + +U_BOOT_DRIVER(sandbox_clk_pll_generic) = { + .name = "sandbox_clk_pllv3", + .id = UCLASS_CLK, + .ops = &clk_pllv3_generic_ops, +}; + +/* --- Sandbox PLLv3 --- */ +/* --- Sandbox Gate --- */ +struct clk_gate2 { + struct clk clk; + bool state; +}; + +#define to_clk_gate2(_clk) container_of(_clk, struct clk_gate2, clk) + +static int clk_gate2_enable(struct clk *clk) +{ + struct clk_gate2 *gate = to_clk_gate2(dev_get_clk_ptr(clk->dev)); + + gate->state = 1; + return 0; +} + +static int clk_gate2_disable(struct clk *clk) +{ + struct clk_gate2 *gate = to_clk_gate2(dev_get_clk_ptr(clk->dev)); + + gate->state = 0; + return 0; +} + +static const struct clk_ops clk_gate2_ops = { + .enable = clk_gate2_enable, + .disable = clk_gate2_disable, + .get_rate = clk_generic_get_rate, +}; + +struct clk *sandbox_clk_register_gate2(struct device *dev, const char *name, + const char *parent_name, + unsigned long flags, void __iomem *reg, + u8 bit_idx, u8 cgr_val, + u8 clk_gate2_flags) +{ + struct clk_gate2 *gate; + struct clk *clk; + int ret; + + gate = kzalloc(sizeof(*gate), GFP_KERNEL); + if (!gate) + return ERR_PTR(-ENOMEM); + + gate->state = 0; + clk = &gate->clk; + + ret = clk_register(clk, "sandbox_clk_gate2", name, parent_name); + if (ret) { + kfree(gate); + return ERR_PTR(ret); + } + + return clk; +} + +U_BOOT_DRIVER(sandbox_clk_gate2) = { + .name = "sandbox_clk_gate2", + .id = UCLASS_CLK, + .ops = &clk_gate2_ops, +}; + +/* --- Sandbox Gate --- */ +/* The CCF core driver itself */ +static const struct udevice_id sandbox_clk_ccf_test_ids[] = { + { .compatible = "sandbox,clk-ccf" }, + { } +}; + +static const char *const usdhc_sels[] = { "pll3_60m", "pll3_80m", }; + +static int sandbox_clk_ccf_probe(struct udevice *dev) +{ + void *base = NULL; + u32 reg; + + clk_dm(SANDBOX_CLK_PLL3, + sandbox_clk_pllv3(SANDBOX_PLLV3_USB, "pll3_usb_otg", "osc", + base + 0x10, 0x3)); + + clk_dm(SANDBOX_CLK_PLL3_60M, + sandbox_clk_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8)); + + clk_dm(SANDBOX_CLK_PLL3_80M, + sandbox_clk_fixed_factor("pll3_80m", "pll3_usb_otg", 1, 6)); + + /* The HW adds +1 to the divider value (2+1) is the divider */ + reg = (2 << 19); + clk_dm(SANDBOX_CLK_ECSPI_ROOT, + sandbox_clk_divider("ecspi_root", "pll3_60m", ®, 19, 6)); + + clk_dm(SANDBOX_CLK_ECSPI1, + sandbox_clk_gate2("ecspi1", "ecspi_root", base + 0x6c, 0)); + + /* Select 'pll3_60m' */ + reg = 0; + clk_dm(SANDBOX_CLK_USDHC1_SEL, + sandbox_clk_mux("usdhc1_sel", ®, 16, 1, usdhc_sels, + ARRAY_SIZE(usdhc_sels))); + + /* Select 'pll3_80m' */ + reg = BIT(17); + clk_dm(SANDBOX_CLK_USDHC2_SEL, + sandbox_clk_mux("usdhc2_sel", ®, 17, 1, usdhc_sels, + ARRAY_SIZE(usdhc_sels))); + + return 0; +} + +U_BOOT_DRIVER(sandbox_clk_ccf) = { + .name = "sandbox_clk_ccf", + .id = UCLASS_CLK, + .probe = sandbox_clk_ccf_probe, + .of_match = sandbox_clk_ccf_test_ids, +}; diff --git a/drivers/clk/clk_stm32mp1.c b/drivers/clk/clk_stm32mp1.c index 6ffa05b8fd..5806d48696 100644 --- a/drivers/clk/clk_stm32mp1.c +++ b/drivers/clk/clk_stm32mp1.c @@ -300,6 +300,7 @@ enum stm32mp1_parent_sel { _DSI_SEL, _ADC12_SEL, _SPI1_SEL, + _RTC_SEL, _PARENT_SEL_NB, _UNKNOWN_SEL = 0xff, }; @@ -534,6 +535,7 @@ static const struct stm32mp1_clk_gate stm32mp1_clk_gate[] = { STM32MP1_CLK_SET_CLR(RCC_MP_APB4ENSETR, 16, USBPHY_K, _USBPHY_SEL), STM32MP1_CLK_SET_CLR(RCC_MP_APB5ENSETR, 2, I2C4_K, _I2C46_SEL), + STM32MP1_CLK_SET_CLR(RCC_MP_APB5ENSETR, 8, RTCAPB, _PCLK5), STM32MP1_CLK_SET_CLR(RCC_MP_APB5ENSETR, 20, STGEN_K, _STGEN_SEL), STM32MP1_CLK_SET_CLR_F(RCC_MP_AHB2ENSETR, 5, ADC12, _HCLK2), @@ -569,6 +571,8 @@ static const struct stm32mp1_clk_gate stm32mp1_clk_gate[] = { STM32MP1_CLK_SET_CLR(RCC_MP_AHB6ENSETR, 24, USBH, _UNKNOWN_SEL), STM32MP1_CLK(RCC_DBGCFGR, 8, CK_DBG, _UNKNOWN_SEL), + + STM32MP1_CLK(RCC_BDCR, 20, RTC, _RTC_SEL), }; static const u8 i2c12_parents[] = {_PCLK1, _PLL4_R, _HSI_KER, _CSI_KER}; @@ -594,6 +598,7 @@ static const u8 dsi_parents[] = {_DSI_PHY, _PLL4_P}; static const u8 adc_parents[] = {_PLL4_R, _CK_PER, _PLL3_Q}; static const u8 spi_parents[] = {_PLL4_P, _PLL3_Q, _I2S_CKIN, _CK_PER, _PLL3_R}; +static const u8 rtc_parents[] = {_UNKNOWN_ID, _LSE, _LSI, _HSE}; static const struct stm32mp1_clk_sel stm32mp1_clk_sel[_PARENT_SEL_NB] = { STM32MP1_CLK_PARENT(_I2C12_SEL, RCC_I2C12CKSELR, 0, 0x7, i2c12_parents), @@ -619,6 +624,9 @@ static const struct stm32mp1_clk_sel stm32mp1_clk_sel[_PARENT_SEL_NB] = { STM32MP1_CLK_PARENT(_DSI_SEL, RCC_DSICKSELR, 0, 0x1, dsi_parents), STM32MP1_CLK_PARENT(_ADC12_SEL, RCC_ADCCKSELR, 0, 0x1, adc_parents), STM32MP1_CLK_PARENT(_SPI1_SEL, RCC_SPI2S1CKSELR, 0, 0x7, spi_parents), + STM32MP1_CLK_PARENT(_RTC_SEL, RCC_BDCR, RCC_BDCR_RTCSRC_SHIFT, + (RCC_BDCR_RTCSRC_MASK >> RCC_BDCR_RTCSRC_SHIFT), + rtc_parents), }; #ifdef STM32MP1_CLOCK_TREE_INIT @@ -734,6 +742,7 @@ char * const stm32mp1_clk_parent_sel_name[_PARENT_SEL_NB] = { [_DSI_SEL] = "DSI", [_ADC12_SEL] = "ADC12", [_SPI1_SEL] = "SPI1", + [_RTC_SEL] = "RTC", }; static const struct stm32mp1_clk_data stm32mp1_data = { diff --git a/drivers/clk/imx/Kconfig b/drivers/clk/imx/Kconfig index a6fb58d6cf..3e6a980c8c 100644 --- a/drivers/clk/imx/Kconfig +++ b/drivers/clk/imx/Kconfig @@ -1,3 +1,19 @@ +config SPL_CLK_IMX6Q + bool "SPL clock support for i.MX6Q" + depends on ARCH_MX6 && SPL + select SPL_CLK + select SPL_CLK_CCF + help + This enables SPL DM/DTS support for clock driver in i.MX6Q platforms. + +config CLK_IMX6Q + bool "Clock support for i.MX6Q" + depends on ARCH_MX6 + select CLK + select CLK_CCF + help + This enables DM/DTS support for clock driver in i.MX6Q platforms. + config CLK_IMX8 bool "Clock support for i.MX8" depends on ARCH_IMX8 diff --git a/drivers/clk/imx/Makefile b/drivers/clk/imx/Makefile index eb379c188a..105a58ca90 100644 --- a/drivers/clk/imx/Makefile +++ b/drivers/clk/imx/Makefile @@ -2,6 +2,8 @@ # # SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_$(SPL_TPL_)CLK_CCF) += clk-gate2.o clk-pllv3.o clk-pfd.o +obj-$(CONFIG_$(SPL_TPL_)CLK_IMX6Q) += clk-imx6q.o obj-$(CONFIG_CLK_IMX8) += clk-imx8.o ifdef CONFIG_CLK_IMX8 diff --git a/drivers/clk/imx/clk-gate2.c b/drivers/clk/imx/clk-gate2.c new file mode 100644 index 0000000000..571be32088 --- /dev/null +++ b/drivers/clk/imx/clk-gate2.c @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2019 DENX Software Engineering + * Lukasz Majewski, DENX Software Engineering, lukma@denx.de + * + * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com> + * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Gated clock implementation + * + */ + +#include <common.h> +#include <asm/io.h> +#include <malloc.h> +#include <clk-uclass.h> +#include <dm/device.h> +#include <linux/clk-provider.h> +#include <clk.h> +#include "clk.h" + +#define UBOOT_DM_CLK_IMX_GATE2 "imx_clk_gate2" + +struct clk_gate2 { + struct clk clk; + void __iomem *reg; + u8 bit_idx; + u8 cgr_val; + u8 flags; +}; + +#define to_clk_gate2(_clk) container_of(_clk, struct clk_gate2, clk) + +static int clk_gate2_enable(struct clk *clk) +{ + struct clk_gate2 *gate = to_clk_gate2(dev_get_clk_ptr(clk->dev)); + u32 reg; + + reg = readl(gate->reg); + reg &= ~(3 << gate->bit_idx); + reg |= gate->cgr_val << gate->bit_idx; + writel(reg, gate->reg); + + return 0; +} + +static int clk_gate2_disable(struct clk *clk) +{ + struct clk_gate2 *gate = to_clk_gate2(dev_get_clk_ptr(clk->dev)); + u32 reg; + + reg = readl(gate->reg); + reg &= ~(3 << gate->bit_idx); + writel(reg, gate->reg); + + return 0; +} + +static const struct clk_ops clk_gate2_ops = { + .enable = clk_gate2_enable, + .disable = clk_gate2_disable, + .get_rate = clk_generic_get_rate, +}; + +struct clk *clk_register_gate2(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + void __iomem *reg, u8 bit_idx, u8 cgr_val, + u8 clk_gate2_flags) +{ + struct clk_gate2 *gate; + struct clk *clk; + int ret; + + gate = kzalloc(sizeof(*gate), GFP_KERNEL); + if (!gate) + return ERR_PTR(-ENOMEM); + + gate->reg = reg; + gate->bit_idx = bit_idx; + gate->cgr_val = cgr_val; + gate->flags = clk_gate2_flags; + + clk = &gate->clk; + + ret = clk_register(clk, UBOOT_DM_CLK_IMX_GATE2, name, parent_name); + if (ret) { + kfree(gate); + return ERR_PTR(ret); + } + + return clk; +} + +U_BOOT_DRIVER(clk_gate2) = { + .name = UBOOT_DM_CLK_IMX_GATE2, + .id = UCLASS_CLK, + .ops = &clk_gate2_ops, + .flags = DM_FLAG_PRE_RELOC, +}; diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c new file mode 100644 index 0000000000..92e9337d44 --- /dev/null +++ b/drivers/clk/imx/clk-imx6q.c @@ -0,0 +1,179 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 DENX Software Engineering + * Lukasz Majewski, DENX Software Engineering, lukma@denx.de + */ + +#include <common.h> +#include <clk-uclass.h> +#include <dm.h> +#include <asm/arch/clock.h> +#include <asm/arch/imx-regs.h> +#include <dt-bindings/clock/imx6qdl-clock.h> + +#include "clk.h" + +static int imx6q_check_id(ulong id) +{ + if (id < IMX6QDL_CLK_DUMMY || id >= IMX6QDL_CLK_END) { + printf("%s: Invalid clk ID #%lu\n", __func__, id); + return -EINVAL; + } + + return 0; +} + +static ulong imx6q_clk_get_rate(struct clk *clk) +{ + struct clk *c; + int ret; + + debug("%s(#%lu)\n", __func__, clk->id); + + ret = imx6q_check_id(clk->id); + if (ret) + return ret; + + ret = clk_get_by_id(clk->id, &c); + if (ret) + return ret; + + return clk_get_rate(c); +} + +static ulong imx6q_clk_set_rate(struct clk *clk, unsigned long rate) +{ + debug("%s(#%lu), rate: %lu\n", __func__, clk->id, rate); + + return rate; +} + +static int __imx6q_clk_enable(struct clk *clk, bool enable) +{ + struct clk *c; + int ret = 0; + + debug("%s(#%lu) en: %d\n", __func__, clk->id, enable); + + ret = imx6q_check_id(clk->id); + if (ret) + return ret; + + ret = clk_get_by_id(clk->id, &c); + if (ret) + return ret; + + if (enable) + ret = clk_enable(c); + else + ret = clk_disable(c); + + return ret; +} + +static int imx6q_clk_disable(struct clk *clk) +{ + return __imx6q_clk_enable(clk, 0); +} + +static int imx6q_clk_enable(struct clk *clk) +{ + return __imx6q_clk_enable(clk, 1); +} + +static struct clk_ops imx6q_clk_ops = { + .set_rate = imx6q_clk_set_rate, + .get_rate = imx6q_clk_get_rate, + .enable = imx6q_clk_enable, + .disable = imx6q_clk_disable, +}; + +static const char *const usdhc_sels[] = { "pll2_pfd2_396m", "pll2_pfd0_352m", }; + +static int imx6q_clk_probe(struct udevice *dev) +{ + void *base; + + /* Anatop clocks */ + base = (void *)ANATOP_BASE_ADDR; + + clk_dm(IMX6QDL_CLK_PLL2, + imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2_bus", "osc", + base + 0x30, 0x1)); + clk_dm(IMX6QDL_CLK_PLL3_USB_OTG, + imx_clk_pllv3(IMX_PLLV3_USB, "pll3_usb_otg", "osc", + base + 0x10, 0x3)); + clk_dm(IMX6QDL_CLK_PLL3_60M, + imx_clk_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8)); + clk_dm(IMX6QDL_CLK_PLL2_PFD0_352M, + imx_clk_pfd("pll2_pfd0_352m", "pll2_bus", base + 0x100, 0)); + clk_dm(IMX6QDL_CLK_PLL2_PFD2_396M, + imx_clk_pfd("pll2_pfd2_396m", "pll2_bus", base + 0x100, 2)); + + /* CCM clocks */ + base = dev_read_addr_ptr(dev); + if (base == (void *)FDT_ADDR_T_NONE) + return -EINVAL; + + clk_dm(IMX6QDL_CLK_USDHC1_SEL, + imx_clk_mux("usdhc1_sel", base + 0x1c, 16, 1, + usdhc_sels, ARRAY_SIZE(usdhc_sels))); + clk_dm(IMX6QDL_CLK_USDHC2_SEL, + imx_clk_mux("usdhc2_sel", base + 0x1c, 17, 1, + usdhc_sels, ARRAY_SIZE(usdhc_sels))); + clk_dm(IMX6QDL_CLK_USDHC3_SEL, + imx_clk_mux("usdhc3_sel", base + 0x1c, 18, 1, + usdhc_sels, ARRAY_SIZE(usdhc_sels))); + clk_dm(IMX6QDL_CLK_USDHC4_SEL, + imx_clk_mux("usdhc4_sel", base + 0x1c, 19, 1, + usdhc_sels, ARRAY_SIZE(usdhc_sels))); + + clk_dm(IMX6QDL_CLK_USDHC1_PODF, + imx_clk_divider("usdhc1_podf", "usdhc1_sel", + base + 0x24, 11, 3)); + clk_dm(IMX6QDL_CLK_USDHC2_PODF, + imx_clk_divider("usdhc2_podf", "usdhc2_sel", + base + 0x24, 16, 3)); + clk_dm(IMX6QDL_CLK_USDHC3_PODF, + imx_clk_divider("usdhc3_podf", "usdhc3_sel", + base + 0x24, 19, 3)); + clk_dm(IMX6QDL_CLK_USDHC4_PODF, + imx_clk_divider("usdhc4_podf", "usdhc4_sel", + base + 0x24, 22, 3)); + + clk_dm(IMX6QDL_CLK_ECSPI_ROOT, + imx_clk_divider("ecspi_root", "pll3_60m", base + 0x38, 19, 6)); + + clk_dm(IMX6QDL_CLK_ECSPI1, + imx_clk_gate2("ecspi1", "ecspi_root", base + 0x6c, 0)); + clk_dm(IMX6QDL_CLK_ECSPI2, + imx_clk_gate2("ecspi2", "ecspi_root", base + 0x6c, 2)); + clk_dm(IMX6QDL_CLK_ECSPI3, + imx_clk_gate2("ecspi3", "ecspi_root", base + 0x6c, 4)); + clk_dm(IMX6QDL_CLK_ECSPI4, + imx_clk_gate2("ecspi4", "ecspi_root", base + 0x6c, 6)); + clk_dm(IMX6QDL_CLK_USDHC1, + imx_clk_gate2("usdhc1", "usdhc1_podf", base + 0x80, 2)); + clk_dm(IMX6QDL_CLK_USDHC2, + imx_clk_gate2("usdhc2", "usdhc2_podf", base + 0x80, 4)); + clk_dm(IMX6QDL_CLK_USDHC3, + imx_clk_gate2("usdhc3", "usdhc3_podf", base + 0x80, 6)); + clk_dm(IMX6QDL_CLK_USDHC4, + imx_clk_gate2("usdhc4", "usdhc4_podf", base + 0x80, 8)); + + return 0; +} + +static const struct udevice_id imx6q_clk_ids[] = { + { .compatible = "fsl,imx6q-ccm" }, + { }, +}; + +U_BOOT_DRIVER(imx6q_clk) = { + .name = "clk_imx6q", + .id = UCLASS_CLK, + .of_match = imx6q_clk_ids, + .ops = &imx6q_clk_ops, + .probe = imx6q_clk_probe, + .flags = DM_FLAG_PRE_RELOC, +}; diff --git a/drivers/clk/imx/clk-pfd.c b/drivers/clk/imx/clk-pfd.c new file mode 100644 index 0000000000..188b2b3b90 --- /dev/null +++ b/drivers/clk/imx/clk-pfd.c @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2019 DENX Software Engineering + * Lukasz Majewski, DENX Software Engineering, lukma@denx.de + * + * Copyright 2012 Freescale Semiconductor, Inc. + * Copyright 2012 Linaro Ltd. + * + * The code contained herein is licensed under the GNU General Public + * License. You may obtain a copy of the GNU General Public License + * Version 2 or later at the following locations: + * + * http://www.opensource.org/licenses/gpl-license.html + * http://www.gnu.org/copyleft/gpl.html + */ + +#include <common.h> +#include <asm/io.h> +#include <malloc.h> +#include <clk-uclass.h> +#include <dm/device.h> +#include <linux/clk-provider.h> +#include <div64.h> +#include <clk.h> +#include "clk.h" + +#define UBOOT_DM_CLK_IMX_PFD "imx_clk_pfd" + +struct clk_pfd { + struct clk clk; + void __iomem *reg; + u8 idx; +}; + +#define to_clk_pfd(_clk) container_of(_clk, struct clk_pfd, clk) + +#define SET 0x4 +#define CLR 0x8 +#define OTG 0xc + +static unsigned long clk_pfd_recalc_rate(struct clk *clk) +{ + struct clk_pfd *pfd = + to_clk_pfd(dev_get_clk_ptr(clk->dev)); + unsigned long parent_rate = clk_get_parent_rate(clk); + u64 tmp = parent_rate; + u8 frac = (readl(pfd->reg) >> (pfd->idx * 8)) & 0x3f; + + tmp *= 18; + do_div(tmp, frac); + + return tmp; +} + +static const struct clk_ops clk_pfd_ops = { + .get_rate = clk_pfd_recalc_rate, +}; + +struct clk *imx_clk_pfd(const char *name, const char *parent_name, + void __iomem *reg, u8 idx) +{ + struct clk_pfd *pfd; + struct clk *clk; + int ret; + + pfd = kzalloc(sizeof(*pfd), GFP_KERNEL); + if (!pfd) + return ERR_PTR(-ENOMEM); + + pfd->reg = reg; + pfd->idx = idx; + + /* register the clock */ + clk = &pfd->clk; + + ret = clk_register(clk, UBOOT_DM_CLK_IMX_PFD, name, parent_name); + if (ret) { + kfree(pfd); + return ERR_PTR(ret); + } + + return clk; +} + +U_BOOT_DRIVER(clk_pfd) = { + .name = UBOOT_DM_CLK_IMX_PFD, + .id = UCLASS_CLK, + .ops = &clk_pfd_ops, + .flags = DM_FLAG_PRE_RELOC, +}; diff --git a/drivers/clk/imx/clk-pllv3.c b/drivers/clk/imx/clk-pllv3.c new file mode 100644 index 0000000000..fbb7b24d5e --- /dev/null +++ b/drivers/clk/imx/clk-pllv3.c @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2019 DENX Software Engineering + * Lukasz Majewski, DENX Software Engineering, lukma@denx.de + */ + +#include <common.h> +#include <asm/io.h> +#include <malloc.h> +#include <clk-uclass.h> +#include <dm/device.h> +#include <dm/uclass.h> +#include <clk.h> +#include "clk.h" + +#define UBOOT_DM_CLK_IMX_PLLV3 "imx_clk_pllv3" + +struct clk_pllv3 { + struct clk clk; + void __iomem *base; + u32 div_mask; + u32 div_shift; +}; + +#define to_clk_pllv3(_clk) container_of(_clk, struct clk_pllv3, clk) + +static ulong clk_pllv3_get_rate(struct clk *clk) +{ + struct clk_pllv3 *pll = to_clk_pllv3(dev_get_clk_ptr(clk->dev)); + unsigned long parent_rate = clk_get_parent_rate(clk); + + u32 div = (readl(pll->base) >> pll->div_shift) & pll->div_mask; + + return (div == 1) ? parent_rate * 22 : parent_rate * 20; +} + +static const struct clk_ops clk_pllv3_generic_ops = { + .get_rate = clk_pllv3_get_rate, +}; + +struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name, + const char *parent_name, void __iomem *base, + u32 div_mask) +{ + struct clk_pllv3 *pll; + struct clk *clk; + char *drv_name; + int ret; + + pll = kzalloc(sizeof(*pll), GFP_KERNEL); + if (!pll) + return ERR_PTR(-ENOMEM); + + switch (type) { + case IMX_PLLV3_GENERIC: + case IMX_PLLV3_USB: + drv_name = UBOOT_DM_CLK_IMX_PLLV3; + break; + default: + kfree(pll); + return ERR_PTR(-ENOTSUPP); + } + + pll->base = base; + pll->div_mask = div_mask; + clk = &pll->clk; + + ret = clk_register(clk, drv_name, name, parent_name); + if (ret) { + kfree(pll); + return ERR_PTR(ret); + } + + return clk; +} + +U_BOOT_DRIVER(clk_pllv3_generic) = { + .name = UBOOT_DM_CLK_IMX_PLLV3, + .id = UCLASS_CLK, + .ops = &clk_pllv3_generic_ops, + .flags = DM_FLAG_PRE_RELOC, +}; diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h new file mode 100644 index 0000000000..e6d51830e8 --- /dev/null +++ b/drivers/clk/imx/clk.h @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2019 DENX Software Engineering + * Lukasz Majewski, DENX Software Engineering, lukma@denx.de + */ +#ifndef __MACH_IMX_CLK_H +#define __MACH_IMX_CLK_H + +#include <linux/clk-provider.h> + +enum imx_pllv3_type { + IMX_PLLV3_GENERIC, + IMX_PLLV3_SYS, + IMX_PLLV3_USB, + IMX_PLLV3_USB_VF610, + IMX_PLLV3_AV, + IMX_PLLV3_ENET, + IMX_PLLV3_ENET_IMX7, + IMX_PLLV3_SYS_VF610, + IMX_PLLV3_DDR_IMX7, +}; + +struct clk *clk_register_gate2(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + void __iomem *reg, u8 bit_idx, u8 cgr_val, + u8 clk_gate_flags); + +struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name, + const char *parent_name, void __iomem *base, + u32 div_mask); + +static inline struct clk *imx_clk_gate2(const char *name, const char *parent, + void __iomem *reg, u8 shift) +{ + return clk_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT, reg, + shift, 0x3, 0); +} + +static inline struct clk *imx_clk_fixed_factor(const char *name, + const char *parent, unsigned int mult, unsigned int div) +{ + return clk_register_fixed_factor(NULL, name, parent, + CLK_SET_RATE_PARENT, mult, div); +} + +static inline struct clk *imx_clk_divider(const char *name, const char *parent, + void __iomem *reg, u8 shift, u8 width) +{ + return clk_register_divider(NULL, name, parent, CLK_SET_RATE_PARENT, + reg, shift, width, 0); +} + +struct clk *imx_clk_pfd(const char *name, const char *parent_name, + void __iomem *reg, u8 idx); + +struct clk *imx_clk_fixup_mux(const char *name, void __iomem *reg, + u8 shift, u8 width, const char * const *parents, + int num_parents, void (*fixup)(u32 *val)); + +static inline struct clk *imx_clk_mux(const char *name, void __iomem *reg, + u8 shift, u8 width, const char * const *parents, + int num_parents) +{ + return clk_register_mux(NULL, name, parents, num_parents, + CLK_SET_RATE_NO_REPARENT, reg, shift, + width, 0); +} + +#endif /* __MACH_IMX_CLK_H */ diff --git a/drivers/clk/rockchip/clk_rk3188.c b/drivers/clk/rockchip/clk_rk3188.c index 9bb9959c9d..dda686cfb3 100644 --- a/drivers/clk/rockchip/clk_rk3188.c +++ b/drivers/clk/rockchip/clk_rk3188.c @@ -562,6 +562,9 @@ static int rk3188_clk_probe(struct udevice *dev) #endif rkclk_init(priv->cru, priv->grf, priv->has_bwadj); + + /* Init CPU frequency */ + rkclk_configure_cpu(priv->cru, priv->grf, APLL_HZ, priv->has_bwadj); #endif return 0; diff --git a/drivers/clk/rockchip/clk_rk3399.c b/drivers/clk/rockchip/clk_rk3399.c index aa6a8ad1c9..d9950c159b 100644 --- a/drivers/clk/rockchip/clk_rk3399.c +++ b/drivers/clk/rockchip/clk_rk3399.c @@ -38,8 +38,8 @@ struct pll_div { }; #define RATE_TO_DIV(input_rate, output_rate) \ - ((input_rate) / (output_rate) - 1); -#define DIV_TO_RATE(input_rate, div) ((input_rate) / ((div) + 1)) + ((input_rate) / (output_rate) - 1) +#define DIV_TO_RATE(input_rate, div) ((input_rate) / ((div) + 1)) #define PLL_DIVISORS(hz, _refdiv, _postdiv1, _postdiv2) {\ .refdiv = _refdiv,\ @@ -53,15 +53,15 @@ static const struct pll_div cpll_init_cfg = PLL_DIVISORS(CPLL_HZ, 1, 2, 2); static const struct pll_div ppll_init_cfg = PLL_DIVISORS(PPLL_HZ, 2, 2, 1); #endif -static const struct pll_div apll_l_1600_cfg = PLL_DIVISORS(1600*MHz, 3, 1, 1); -static const struct pll_div apll_l_600_cfg = PLL_DIVISORS(600*MHz, 1, 2, 1); +static const struct pll_div apll_l_1600_cfg = PLL_DIVISORS(1600 * MHz, 3, 1, 1); +static const struct pll_div apll_l_600_cfg = PLL_DIVISORS(600 * MHz, 1, 2, 1); static const struct pll_div *apll_l_cfgs[] = { [APLL_L_1600_MHZ] = &apll_l_1600_cfg, [APLL_L_600_MHZ] = &apll_l_600_cfg, }; -static const struct pll_div apll_b_600_cfg = PLL_DIVISORS(600*MHz, 1, 2, 1); +static const struct pll_div apll_b_600_cfg = PLL_DIVISORS(600 * MHz, 1, 2, 1); static const struct pll_div *apll_b_cfgs[] = { [APLL_B_600_MHZ] = &apll_b_600_cfg, }; @@ -393,7 +393,7 @@ static int pll_para_config(u32 freq_hz, struct pll_div *div) fref_khz = ref_khz / refdiv; fbdiv = vco_khz / fref_khz; - if ((fbdiv >= max_fbdiv) || (fbdiv <= min_fbdiv)) + if (fbdiv >= max_fbdiv || fbdiv <= min_fbdiv) continue; diff_khz = vco_khz - fbdiv * fref_khz; if (fbdiv + 1 < max_fbdiv && diff_khz > fref_khz / 2) { @@ -409,7 +409,7 @@ static int pll_para_config(u32 freq_hz, struct pll_div *div) div->fbdiv = fbdiv; } - if (best_diff_khz > 4 * (MHz/KHz)) { + if (best_diff_khz > 4 * (MHz / KHz)) { printf("%s: Failed to match output frequency %u, " "difference is %u Hz,exceed 4MHZ\n", __func__, freq_hz, best_diff_khz * KHz); @@ -489,28 +489,21 @@ void rk3399_configure_cpu_b(struct rk3399_cru *cru, } #define I2C_CLK_REG_MASK(bus) \ - (I2C_DIV_CON_MASK << \ - CLK_I2C ##bus## _DIV_CON_SHIFT | \ - CLK_I2C_PLL_SEL_MASK << \ - CLK_I2C ##bus## _PLL_SEL_SHIFT) + (I2C_DIV_CON_MASK << CLK_I2C ##bus## _DIV_CON_SHIFT | \ + CLK_I2C_PLL_SEL_MASK << CLK_I2C ##bus## _PLL_SEL_SHIFT) #define I2C_CLK_REG_VALUE(bus, clk_div) \ - ((clk_div - 1) << \ - CLK_I2C ##bus## _DIV_CON_SHIFT | \ - CLK_I2C_PLL_SEL_GPLL << \ - CLK_I2C ##bus## _PLL_SEL_SHIFT) + ((clk_div - 1) << CLK_I2C ##bus## _DIV_CON_SHIFT | \ + CLK_I2C_PLL_SEL_GPLL << CLK_I2C ##bus## _PLL_SEL_SHIFT) #define I2C_CLK_DIV_VALUE(con, bus) \ - (con >> CLK_I2C ##bus## _DIV_CON_SHIFT) & \ - I2C_DIV_CON_MASK; + ((con >> CLK_I2C ##bus## _DIV_CON_SHIFT) & I2C_DIV_CON_MASK) #define I2C_PMUCLK_REG_MASK(bus) \ - (I2C_DIV_CON_MASK << \ - CLK_I2C ##bus## _DIV_CON_SHIFT) + (I2C_DIV_CON_MASK << CLK_I2C ##bus## _DIV_CON_SHIFT) #define I2C_PMUCLK_REG_VALUE(bus, clk_div) \ - ((clk_div - 1) << \ - CLK_I2C ##bus## _DIV_CON_SHIFT) + ((clk_div - 1) << CLK_I2C ##bus## _DIV_CON_SHIFT) static ulong rk3399_i2c_get_clk(struct rk3399_cru *cru, ulong clk_id) { @@ -597,9 +590,9 @@ static ulong rk3399_i2c_set_clk(struct rk3399_cru *cru, ulong clk_id, uint hz) */ struct spi_clkreg { - uint8_t reg; /* CLKSEL_CON[reg] register in CRU */ - uint8_t div_shift; - uint8_t sel_shift; + u8 reg; /* CLKSEL_CON[reg] register in CRU */ + u8 div_shift; + u8 sel_shift; }; /* @@ -678,7 +671,7 @@ static ulong rk3399_spi_set_clk(struct rk3399_cru *cru, ulong clk_id, uint hz) static ulong rk3399_vop_set_clk(struct rk3399_cru *cru, ulong clk_id, u32 hz) { struct pll_div vpll_config = {0}; - int aclk_vop = 198*MHz; + int aclk_vop = 198 * MHz; void *aclkreg_addr, *dclkreg_addr; u32 div; @@ -710,7 +703,7 @@ static ulong rk3399_vop_set_clk(struct rk3399_cru *cru, ulong clk_id, u32 hz) rkclk_set_pll(&cru->vpll_con[0], &vpll_config); rk_clrsetreg(dclkreg_addr, - DCLK_VOP_DCLK_SEL_MASK | DCLK_VOP_PLL_SEL_MASK| + DCLK_VOP_DCLK_SEL_MASK | DCLK_VOP_PLL_SEL_MASK | DCLK_VOP_DIV_CON_MASK, DCLK_VOP_DCLK_SEL_DIVOUT << DCLK_VOP_DCLK_SEL_SHIFT | DCLK_VOP_PLL_SEL_VPLL << DCLK_VOP_PLL_SEL_SHIFT | @@ -750,7 +743,7 @@ static ulong rk3399_mmc_set_clk(struct rk3399_cru *cru, ulong clk_id, ulong set_rate) { int src_clk_div; - int aclk_emmc = 198*MHz; + int aclk_emmc = 198 * MHz; switch (clk_id) { case HCLK_SDMMC: @@ -776,7 +769,7 @@ static ulong rk3399_mmc_set_clk(struct rk3399_cru *cru, break; case SCLK_EMMC: /* Select aclk_emmc source from GPLL */ - src_clk_div = DIV_ROUND_UP(GPLL_HZ , aclk_emmc); + src_clk_div = DIV_ROUND_UP(GPLL_HZ, aclk_emmc); assert(src_clk_div - 1 < 32); rk_clrsetreg(&cru->clksel_con[21], @@ -834,23 +827,31 @@ static ulong rk3399_ddr_set_clk(struct rk3399_cru *cru, /* clk_ddrc == DPLL = 24MHz / refdiv * fbdiv / postdiv1 / postdiv2 */ switch (set_rate) { - case 200*MHz: + case 50 * MHz: + dpll_cfg = (struct pll_div) + {.refdiv = 1, .fbdiv = 12, .postdiv1 = 3, .postdiv2 = 2}; + break; + case 200 * MHz: dpll_cfg = (struct pll_div) {.refdiv = 1, .fbdiv = 50, .postdiv1 = 6, .postdiv2 = 1}; break; - case 300*MHz: + case 300 * MHz: dpll_cfg = (struct pll_div) {.refdiv = 2, .fbdiv = 100, .postdiv1 = 4, .postdiv2 = 1}; break; - case 666*MHz: + case 400 * MHz: + dpll_cfg = (struct pll_div) + {.refdiv = 1, .fbdiv = 50, .postdiv1 = 3, .postdiv2 = 1}; + break; + case 666 * MHz: dpll_cfg = (struct pll_div) {.refdiv = 2, .fbdiv = 111, .postdiv1 = 2, .postdiv2 = 1}; break; - case 800*MHz: + case 800 * MHz: dpll_cfg = (struct pll_div) {.refdiv = 1, .fbdiv = 100, .postdiv1 = 3, .postdiv2 = 1}; break; - case 933*MHz: + case 933 * MHz: dpll_cfg = (struct pll_div) {.refdiv = 1, .fbdiv = 116, .postdiv1 = 3, .postdiv2 = 1}; break; @@ -916,7 +917,6 @@ static ulong rk3399_clk_get_rate(struct clk *clk) case SCLK_UART2: case SCLK_UART3: return 24000000; - break; case PCLK_HDMI_CTRL: break; case DCLK_VOP0: @@ -1014,7 +1014,8 @@ static ulong rk3399_clk_set_rate(struct clk *clk, ulong rate) return ret; } -static int __maybe_unused rk3399_gmac_set_parent(struct clk *clk, struct clk *parent) +static int __maybe_unused rk3399_gmac_set_parent(struct clk *clk, + struct clk *parent) { struct rk3399_clk_priv *priv = dev_get_priv(clk->dev); const char *clock_output_name; @@ -1024,7 +1025,7 @@ static int __maybe_unused rk3399_gmac_set_parent(struct clk *clk, struct clk *pa * If the requested parent is in the same clock-controller and * the id is SCLK_MAC ("clk_gmac"), switch to the internal clock. */ - if ((parent->dev == clk->dev) && (parent->id == SCLK_MAC)) { + if (parent->dev == clk->dev && parent->id == SCLK_MAC) { debug("%s: switching RGMII to SCLK_MAC\n", __func__); rk_clrreg(&priv->cru->clksel_con[19], BIT(4)); return 0; @@ -1049,7 +1050,8 @@ static int __maybe_unused rk3399_gmac_set_parent(struct clk *clk, struct clk *pa return -EINVAL; } -static int __maybe_unused rk3399_clk_set_parent(struct clk *clk, struct clk *parent) +static int __maybe_unused rk3399_clk_set_parent(struct clk *clk, + struct clk *parent) { switch (clk->id) { case SCLK_RMII_SRC: @@ -1078,6 +1080,18 @@ static int rk3399_clk_enable(struct clk *clk) case PCLK_GMAC: /* Required to successfully probe the Designware GMAC driver */ return 0; + + case SCLK_USB3OTG0_REF: + case SCLK_USB3OTG1_REF: + case SCLK_USB3OTG0_SUSPEND: + case SCLK_USB3OTG1_SUSPEND: + case ACLK_USB3OTG0: + case ACLK_USB3OTG1: + case ACLK_USB3_RKSOC_AXI_PERF: + case ACLK_USB3: + case ACLK_USB3_GRF: + /* Required to successfully probe the Designware USB3 driver */ + return 0; } debug("%s: unsupported clk %ld\n", __func__, clk->id); diff --git a/drivers/clk/sifive/Kconfig b/drivers/clk/sifive/Kconfig index 644881b948..c4d0a1f9b1 100644 --- a/drivers/clk/sifive/Kconfig +++ b/drivers/clk/sifive/Kconfig @@ -1,8 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -config CLK_ANALOGBITS_WRPLL_CLN28HPC - bool - config CLK_SIFIVE bool "SiFive SoC driver support" depends on CLK @@ -17,10 +14,3 @@ config CLK_SIFIVE_FU540_PRCI Supports the Power Reset Clock interface (PRCI) IP block found in FU540 SoCs. If this kernel is meant to run on a SiFive FU540 SoC, enable this driver. - -config CLK_SIFIVE_GEMGXL_MGMT - bool "GEMGXL management for SiFive FU540 SoCs" - depends on CLK_SIFIVE - help - Supports the GEMGXL management IP block found in FU540 SoCs to - control GEM TX clock operation mode for 10/100/1000 Mbps. diff --git a/drivers/clk/sifive/Makefile b/drivers/clk/sifive/Makefile index f8263e79b7..b224279afb 100644 --- a/drivers/clk/sifive/Makefile +++ b/drivers/clk/sifive/Makefile @@ -1,7 +1,3 @@ # SPDX-License-Identifier: GPL-2.0+ -obj-$(CONFIG_CLK_ANALOGBITS_WRPLL_CLN28HPC) += wrpll-cln28hpc.o - obj-$(CONFIG_CLK_SIFIVE_FU540_PRCI) += fu540-prci.o - -obj-$(CONFIG_CLK_SIFIVE_GEMGXL_MGMT) += gemgxl-mgmt.o diff --git a/drivers/clk/sifive/analogbits-wrpll-cln28hpc.h b/drivers/clk/sifive/analogbits-wrpll-cln28hpc.h deleted file mode 100644 index 4432e24749..0000000000 --- a/drivers/clk/sifive/analogbits-wrpll-cln28hpc.h +++ /dev/null @@ -1,101 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (c) 2019 Western Digital Corporation or its affiliates. - * - * Copyright (C) 2018 SiFive, Inc. - * Wesley Terpstra - * Paul Walmsley - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __LINUX_CLK_ANALOGBITS_WRPLL_CLN28HPC_H -#define __LINUX_CLK_ANALOGBITS_WRPLL_CLN28HPC_H - -#include <linux/types.h> - -/* DIVQ_VALUES: number of valid DIVQ values */ -#define DIVQ_VALUES 6 - -/* - * Bit definitions for struct analogbits_wrpll_cfg.flags - * - * WRPLL_FLAGS_BYPASS_FLAG: if set, the PLL is either in bypass, or should be - * programmed to enter bypass - * WRPLL_FLAGS_RESET_FLAG: if set, the PLL is in reset - * WRPLL_FLAGS_INT_FEEDBACK_FLAG: if set, the PLL is configured for internal - * feedback mode - * WRPLL_FLAGS_EXT_FEEDBACK_FLAG: if set, the PLL is configured for external - * feedback mode (not yet supported by this driver) - * - * The flags WRPLL_FLAGS_INT_FEEDBACK_FLAG and WRPLL_FLAGS_EXT_FEEDBACK_FLAG are - * mutually exclusive. If both bits are set, or both are zero, the struct - * analogbits_wrpll_cfg record is uninitialized or corrupt. - */ -#define WRPLL_FLAGS_BYPASS_SHIFT 0 -#define WRPLL_FLAGS_BYPASS_MASK BIT(WRPLL_FLAGS_BYPASS_SHIFT) -#define WRPLL_FLAGS_RESET_SHIFT 1 -#define WRPLL_FLAGS_RESET_MASK BIT(WRPLL_FLAGS_RESET_SHIFT) -#define WRPLL_FLAGS_INT_FEEDBACK_SHIFT 2 -#define WRPLL_FLAGS_INT_FEEDBACK_MASK BIT(WRPLL_FLAGS_INT_FEEDBACK_SHIFT) -#define WRPLL_FLAGS_EXT_FEEDBACK_SHIFT 3 -#define WRPLL_FLAGS_EXT_FEEDBACK_MASK BIT(WRPLL_FLAGS_EXT_FEEDBACK_SHIFT) - -/** - * struct analogbits_wrpll_cfg - WRPLL configuration values - * @divr: reference divider value (6 bits), as presented to the PLL signals. - * @divf: feedback divider value (9 bits), as presented to the PLL signals. - * @divq: output divider value (3 bits), as presented to the PLL signals. - * @flags: PLL configuration flags. See above for more information. - * @range: PLL loop filter range. See below for more information. - * @_output_rate_cache: cached output rates, swept across DIVQ. - * @_parent_rate: PLL refclk rate for which values are valid - * @_max_r: maximum possible R divider value, given @parent_rate - * @_init_r: initial R divider value to start the search from - * - * @divr, @divq, @divq, @range represent what the PLL expects to see - * on its input signals. Thus @divr and @divf are the actual divisors - * minus one. @divq is a power-of-two divider; for example, 1 = - * divide-by-2 and 6 = divide-by-64. 0 is an invalid @divq value. - * - * When initially passing a struct analogbits_wrpll_cfg record, the - * record should be zero-initialized with the exception of the @flags - * field. The only flag bits that need to be set are either - * WRPLL_FLAGS_INT_FEEDBACK or WRPLL_FLAGS_EXT_FEEDBACK. - * - * Field names beginning with an underscore should be considered - * private to the wrpll-cln28hpc.c code. - */ -struct analogbits_wrpll_cfg { - u8 divr; - u8 divq; - u8 range; - u8 flags; - u16 divf; - u32 _output_rate_cache[DIVQ_VALUES]; - unsigned long _parent_rate; - u8 _max_r; - u8 _init_r; -}; - -/* - * Function prototypes - */ - -int analogbits_wrpll_configure_for_rate(struct analogbits_wrpll_cfg *c, - u32 target_rate, - unsigned long parent_rate); - -unsigned int analogbits_wrpll_calc_max_lock_us(struct analogbits_wrpll_cfg *c); - -unsigned long analogbits_wrpll_calc_output_rate(struct analogbits_wrpll_cfg *c, - unsigned long parent_rate); - -#endif /* __LINUX_CLK_ANALOGBITS_WRPLL_CLN28HPC_H */ diff --git a/drivers/clk/sifive/fu540-prci.c b/drivers/clk/sifive/fu540-prci.c index 2d47ebc6b1..ce0769f2d1 100644 --- a/drivers/clk/sifive/fu540-prci.c +++ b/drivers/clk/sifive/fu540-prci.c @@ -37,9 +37,8 @@ #include <errno.h> #include <linux/math64.h> -#include <dt-bindings/clk/sifive-fu540-prci.h> - -#include "analogbits-wrpll-cln28hpc.h" +#include <linux/clk/analogbits-wrpll-cln28hpc.h> +#include <dt-bindings/clock/sifive-fu540-prci.h> /* * EXPECTED_CLK_PARENT_COUNT: how many parent clocks this driver expects: @@ -159,30 +158,32 @@ * PRCI per-device instance data */ struct __prci_data { - void *base; - struct clk parent; + void *va; + struct clk parent_hfclk; + struct clk parent_rtcclk; }; /** * struct __prci_wrpll_data - WRPLL configuration and integration data * @c: WRPLL current configuration record - * @bypass: fn ptr to code to bypass the WRPLL (if applicable; else NULL) - * @no_bypass: fn ptr to code to not bypass the WRPLL (if applicable; else NULL) + * @enable_bypass: fn ptr to code to bypass the WRPLL (if applicable; else NULL) + * @disable_bypass: fn ptr to code to not bypass the WRPLL (or NULL) * @cfg0_offs: WRPLL CFG0 register offset (in bytes) from the PRCI base address * - * @bypass and @no_bypass are used for WRPLL instances that contain a separate - * external glitchless clock mux downstream from the PLL. The WRPLL internal - * bypass mux is not glitchless. + * @enable_bypass and @disable_bypass are used for WRPLL instances + * that contain a separate external glitchless clock mux downstream + * from the PLL. The WRPLL internal bypass mux is not glitchless. */ struct __prci_wrpll_data { - struct analogbits_wrpll_cfg c; - void (*bypass)(struct __prci_data *pd); - void (*no_bypass)(struct __prci_data *pd); + struct wrpll_cfg c; + void (*enable_bypass)(struct __prci_data *pd); + void (*disable_bypass)(struct __prci_data *pd); u8 cfg0_offs; }; struct __prci_clock; +/* struct __prci_clock_ops - clock operations */ struct __prci_clock_ops { int (*set_rate)(struct __prci_clock *pc, unsigned long rate, @@ -198,8 +199,7 @@ struct __prci_clock_ops { * struct __prci_clock - describes a clock device managed by PRCI * @name: user-readable clock name string - should match the manual * @parent_name: parent name for this clock - * @ops: struct clk_ops for the Linux clock framework to use for control - * @hw: Linux-private clock data + * @ops: struct __prci_clock_ops for control * @pwd: WRPLL-specific data, associated with this clock (if not NULL) * @pd: PRCI-specific data associated with this clock (if not NULL) * @@ -233,19 +233,19 @@ struct __prci_clock { */ static u32 __prci_readl(struct __prci_data *pd, u32 offs) { - return readl(pd->base + offs); + return readl(pd->va + offs); } static void __prci_writel(u32 v, u32 offs, struct __prci_data *pd) { - return writel(v, pd->base + offs); + writel(v, pd->va + offs); } /* WRPLL-related private functions */ /** * __prci_wrpll_unpack() - unpack WRPLL configuration registers into parameters - * @c: ptr to a struct analogbits_wrpll_cfg record to write config into + * @c: ptr to a struct wrpll_cfg record to write config into * @r: value read from the PRCI PLL configuration register * * Given a value @r read from an FU540 PRCI PLL configuration register, @@ -257,7 +257,7 @@ static void __prci_writel(u32 v, u32 offs, struct __prci_data *pd) * * Context: Any context. */ -static void __prci_wrpll_unpack(struct analogbits_wrpll_cfg *c, u32 r) +static void __prci_wrpll_unpack(struct wrpll_cfg *c, u32 r) { u32 v; @@ -280,15 +280,13 @@ static void __prci_wrpll_unpack(struct analogbits_wrpll_cfg *c, u32 r) c->flags &= (WRPLL_FLAGS_INT_FEEDBACK_MASK | WRPLL_FLAGS_EXT_FEEDBACK_MASK); - if (r & PRCI_COREPLLCFG0_FSE_MASK) - c->flags |= WRPLL_FLAGS_INT_FEEDBACK_MASK; - else - c->flags |= WRPLL_FLAGS_EXT_FEEDBACK_MASK; + /* external feedback mode not supported */ + c->flags |= WRPLL_FLAGS_INT_FEEDBACK_MASK; } /** * __prci_wrpll_pack() - pack PLL configuration parameters into a register value - * @c: pointer to a struct analogbits_wrpll_cfg record containing the PLL's cfg + * @c: pointer to a struct wrpll_cfg record containing the PLL's cfg * * Using a set of WRPLL configuration values pointed to by @c, * assemble a PRCI PLL configuration register value, and return it to @@ -301,7 +299,7 @@ static void __prci_wrpll_unpack(struct analogbits_wrpll_cfg *c, u32 r) * Returns: a value suitable for writing into a PRCI PLL configuration * register */ -static u32 __prci_wrpll_pack(struct analogbits_wrpll_cfg *c) +static u32 __prci_wrpll_pack(const struct wrpll_cfg *c) { u32 r = 0; @@ -309,8 +307,9 @@ static u32 __prci_wrpll_pack(struct analogbits_wrpll_cfg *c) r |= c->divf << PRCI_COREPLLCFG0_DIVF_SHIFT; r |= c->divq << PRCI_COREPLLCFG0_DIVQ_SHIFT; r |= c->range << PRCI_COREPLLCFG0_RANGE_SHIFT; - if (c->flags & WRPLL_FLAGS_INT_FEEDBACK_MASK) - r |= PRCI_COREPLLCFG0_FSE_MASK; + + /* external feedback mode not supported */ + r |= PRCI_COREPLLCFG0_FSE_MASK; return r; } @@ -349,11 +348,11 @@ static void __prci_wrpll_read_cfg(struct __prci_data *pd, */ static void __prci_wrpll_write_cfg(struct __prci_data *pd, struct __prci_wrpll_data *pwd, - struct analogbits_wrpll_cfg *c) + struct wrpll_cfg *c) { __prci_writel(__prci_wrpll_pack(c), pwd->cfg0_offs, pd); - memcpy(&pwd->c, c, sizeof(struct analogbits_wrpll_cfg)); + memcpy(&pwd->c, c, sizeof(*c)); } /* Core clock mux control */ @@ -404,7 +403,7 @@ static unsigned long sifive_fu540_prci_wrpll_recalc_rate( { struct __prci_wrpll_data *pwd = pc->pwd; - return analogbits_wrpll_calc_output_rate(&pwd->c, parent_rate); + return wrpll_calc_output_rate(&pwd->c, parent_rate); } static unsigned long sifive_fu540_prci_wrpll_round_rate( @@ -413,13 +412,13 @@ static unsigned long sifive_fu540_prci_wrpll_round_rate( unsigned long *parent_rate) { struct __prci_wrpll_data *pwd = pc->pwd; - struct analogbits_wrpll_cfg c; + struct wrpll_cfg c; memcpy(&c, &pwd->c, sizeof(c)); - analogbits_wrpll_configure_for_rate(&c, rate, *parent_rate); + wrpll_configure_for_rate(&c, rate, *parent_rate); - return analogbits_wrpll_calc_output_rate(&c, *parent_rate); + return wrpll_calc_output_rate(&c, *parent_rate); } static int sifive_fu540_prci_wrpll_set_rate(struct __prci_clock *pc, @@ -430,19 +429,19 @@ static int sifive_fu540_prci_wrpll_set_rate(struct __prci_clock *pc, struct __prci_data *pd = pc->pd; int r; - r = analogbits_wrpll_configure_for_rate(&pwd->c, rate, parent_rate); + r = wrpll_configure_for_rate(&pwd->c, rate, parent_rate); if (r) - return -ERANGE; + return r; - if (pwd->bypass) - pwd->bypass(pd); + if (pwd->enable_bypass) + pwd->enable_bypass(pd); __prci_wrpll_write_cfg(pd, pwd, &pwd->c); - udelay(analogbits_wrpll_calc_max_lock_us(&pwd->c)); + udelay(wrpll_calc_max_lock_us(&pwd->c)); - if (pwd->no_bypass) - pwd->no_bypass(pd); + if (pwd->disable_bypass) + pwd->disable_bypass(pd); return 0; } @@ -484,8 +483,8 @@ static const struct __prci_clock_ops sifive_fu540_prci_tlclksel_clk_ops = { static struct __prci_wrpll_data __prci_corepll_data = { .cfg0_offs = PRCI_COREPLLCFG0_OFFSET, - .bypass = __prci_coreclksel_use_hfclk, - .no_bypass = __prci_coreclksel_use_corepll, + .enable_bypass = __prci_coreclksel_use_hfclk, + .disable_bypass = __prci_coreclksel_use_corepll, }; static struct __prci_wrpll_data __prci_ddrpll_data = { @@ -526,6 +525,27 @@ static struct __prci_clock __prci_init_clocks[] = { }, }; +static ulong sifive_fu540_prci_parent_rate(struct __prci_clock *pc) +{ + ulong parent_rate; + struct __prci_clock *p; + + if (strcmp(pc->parent_name, "corepll") == 0) { + p = &__prci_init_clocks[PRCI_CLK_COREPLL]; + if (!p->pd || !p->ops->recalc_rate) + return -ENXIO; + + return p->ops->recalc_rate(p, sifive_fu540_prci_parent_rate(p)); + } + + if (strcmp(pc->parent_name, "rtcclk") == 0) + parent_rate = clk_get_rate(&pc->pd->parent_rtcclk); + else + parent_rate = clk_get_rate(&pc->pd->parent_hfclk); + + return parent_rate; +} + static ulong sifive_fu540_prci_get_rate(struct clk *clk) { struct __prci_clock *pc; @@ -537,7 +557,7 @@ static ulong sifive_fu540_prci_get_rate(struct clk *clk) if (!pc->pd || !pc->ops->recalc_rate) return -ENXIO; - return pc->ops->recalc_rate(pc, clk_get_rate(&pc->pd->parent)); + return pc->ops->recalc_rate(pc, sifive_fu540_prci_parent_rate(pc)); } static ulong sifive_fu540_prci_set_rate(struct clk *clk, ulong rate) @@ -552,7 +572,7 @@ static ulong sifive_fu540_prci_set_rate(struct clk *clk, ulong rate) if (!pc->pd || !pc->ops->set_rate) return -ENXIO; - err = pc->ops->set_rate(pc, rate, clk_get_rate(&pc->pd->parent)); + err = pc->ops->set_rate(pc, rate, sifive_fu540_prci_parent_rate(pc)); if (err) return err; @@ -565,11 +585,15 @@ static int sifive_fu540_prci_probe(struct udevice *dev) struct __prci_clock *pc; struct __prci_data *pd = dev_get_priv(dev); - pd->base = (void *)dev_read_addr(dev); - if (IS_ERR(pd->base)) - return PTR_ERR(pd->base); + pd->va = (void *)dev_read_addr(dev); + if (IS_ERR(pd->va)) + return PTR_ERR(pd->va); + + err = clk_get_by_index(dev, 0, &pd->parent_hfclk); + if (err) + return err; - err = clk_get_by_index(dev, 0, &pd->parent); + err = clk_get_by_index(dev, 1, &pd->parent_rtcclk); if (err) return err; @@ -589,8 +613,7 @@ static struct clk_ops sifive_fu540_prci_ops = { }; static const struct udevice_id sifive_fu540_prci_ids[] = { - { .compatible = "sifive,fu540-c000-prci0" }, - { .compatible = "sifive,aloeprci0" }, + { .compatible = "sifive,fu540-c000-prci" }, { } }; diff --git a/drivers/clk/sifive/gemgxl-mgmt.c b/drivers/clk/sifive/gemgxl-mgmt.c deleted file mode 100644 index eb37416b5e..0000000000 --- a/drivers/clk/sifive/gemgxl-mgmt.c +++ /dev/null @@ -1,60 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * Copyright (C) 2019, Bin Meng <bmeng.cn@gmail.com> - */ - -#include <common.h> -#include <clk-uclass.h> -#include <dm.h> -#include <asm/io.h> - -struct gemgxl_mgmt_regs { - __u32 tx_clk_sel; -}; - -struct gemgxl_mgmt_platdata { - struct gemgxl_mgmt_regs *regs; -}; - -static int gemgxl_mgmt_ofdata_to_platdata(struct udevice *dev) -{ - struct gemgxl_mgmt_platdata *plat = dev_get_platdata(dev); - - plat->regs = (struct gemgxl_mgmt_regs *)dev_read_addr(dev); - - return 0; -} - -static ulong gemgxl_mgmt_set_rate(struct clk *clk, ulong rate) -{ - struct gemgxl_mgmt_platdata *plat = dev_get_platdata(clk->dev); - - /* - * GEMGXL TX clock operation mode: - * - * 0 = GMII mode. Use 125 MHz gemgxlclk from PRCI in TX logic - * and output clock on GMII output signal GTX_CLK - * 1 = MII mode. Use MII input signal TX_CLK in TX logic - */ - writel(rate != 125000000, &plat->regs->tx_clk_sel); - - return 0; -} - -const struct clk_ops gemgxl_mgmt_ops = { - .set_rate = gemgxl_mgmt_set_rate, -}; - -static const struct udevice_id gemgxl_mgmt_match[] = { - { .compatible = "sifive,cadencegemgxlmgmt0", }, - { /* sentinel */ } -}; - -U_BOOT_DRIVER(sifive_gemgxl_mgmt) = { - .name = "sifive-gemgxl-mgmt", - .id = UCLASS_CLK, - .of_match = gemgxl_mgmt_match, - .ofdata_to_platdata = gemgxl_mgmt_ofdata_to_platdata, - .platdata_auto_alloc_size = sizeof(struct gemgxl_mgmt_platdata), - .ops = &gemgxl_mgmt_ops, -}; diff --git a/drivers/core/device.c b/drivers/core/device.c index 0d15e5062b..474c1642ee 100644 --- a/drivers/core/device.c +++ b/drivers/core/device.c @@ -388,7 +388,8 @@ int device_probe(struct udevice *dev) if (dev->parent && device_get_uclass_id(dev) != UCLASS_PINCTRL) pinctrl_select_state(dev, "default"); - if (dev->parent && device_get_uclass_id(dev) != UCLASS_POWER_DOMAIN) { + if (CONFIG_IS_ENABLED(POWER_DOMAIN) && dev->parent && + device_get_uclass_id(dev) != UCLASS_POWER_DOMAIN) { if (!power_domain_get(dev, &pd)) power_domain_on(&pd); } @@ -409,10 +410,16 @@ int device_probe(struct udevice *dev) goto fail; } - /* Process 'assigned-{clocks/clock-parents/clock-rates}' properties */ - ret = clk_set_defaults(dev); - if (ret) - goto fail; + /* Only handle devices that have a valid ofnode */ + if (dev_of_valid(dev)) { + /* + * Process 'assigned-{clocks/clock-parents/clock-rates}' + * properties + */ + ret = clk_set_defaults(dev); + if (ret) + goto fail; + } if (drv->probe) { ret = drv->probe(dev); diff --git a/drivers/core/of_addr.c b/drivers/core/of_addr.c index 1bfaaeec00..4e256d9926 100644 --- a/drivers/core/of_addr.c +++ b/drivers/core/of_addr.c @@ -318,6 +318,10 @@ u64 of_translate_address(const struct device_node *dev, const __be32 *in_addr) return __of_translate_address(dev, in_addr, "ranges"); } +u64 of_translate_dma_address(const struct device_node *dev, const __be32 *in_addr) +{ + return __of_translate_address(dev, in_addr, "dma-ranges"); +} static int __of_address_to_resource(const struct device_node *dev, const __be32 *addrp, u64 size, unsigned int flags, diff --git a/drivers/core/ofnode.c b/drivers/core/ofnode.c index c72c6e2673..2ac73af934 100644 --- a/drivers/core/ofnode.c +++ b/drivers/core/ofnode.c @@ -770,6 +770,14 @@ u64 ofnode_translate_address(ofnode node, const fdt32_t *in_addr) return fdt_translate_address(gd->fdt_blob, ofnode_to_offset(node), in_addr); } +u64 ofnode_translate_dma_address(ofnode node, const fdt32_t *in_addr) +{ + if (ofnode_is_np(node)) + return of_translate_dma_address(ofnode_to_np(node), in_addr); + else + return fdt_translate_dma_address(gd->fdt_blob, ofnode_to_offset(node), in_addr); +} + int ofnode_device_is_compatible(ofnode node, const char *compat) { if (ofnode_is_np(node)) @@ -876,5 +884,5 @@ int ofnode_set_enabled(ofnode node, bool value) if (value) return ofnode_write_string(node, "status", "okay"); else - return ofnode_write_string(node, "status", "disable"); + return ofnode_write_string(node, "status", "disabled"); } diff --git a/drivers/core/read.c b/drivers/core/read.c index 6bda077a34..1a044b05e8 100644 --- a/drivers/core/read.c +++ b/drivers/core/read.c @@ -265,6 +265,11 @@ u64 dev_translate_address(struct udevice *dev, const fdt32_t *in_addr) return ofnode_translate_address(dev_ofnode(dev), in_addr); } +u64 dev_translate_dma_address(struct udevice *dev, const fdt32_t *in_addr) +{ + return ofnode_translate_dma_address(dev_ofnode(dev), in_addr); +} + int dev_read_alias_highest_id(const char *stem) { if (of_live_active()) diff --git a/drivers/ddr/altera/sequencer.c b/drivers/ddr/altera/sequencer.c index 0e4526288e..b85b56efe5 100644 --- a/drivers/ddr/altera/sequencer.c +++ b/drivers/ddr/altera/sequencer.c @@ -9,31 +9,27 @@ #include <errno.h> #include "sequencer.h" -static struct socfpga_sdr_rw_load_manager *sdr_rw_load_mgr_regs = +static const struct socfpga_sdr_rw_load_manager *sdr_rw_load_mgr_regs = (struct socfpga_sdr_rw_load_manager *) (SDR_PHYGRP_RWMGRGRP_ADDRESS | 0x800); -static struct socfpga_sdr_rw_load_jump_manager *sdr_rw_load_jump_mgr_regs = - (struct socfpga_sdr_rw_load_jump_manager *) +static const struct socfpga_sdr_rw_load_jump_manager *sdr_rw_load_jump_mgr_regs + = (struct socfpga_sdr_rw_load_jump_manager *) (SDR_PHYGRP_RWMGRGRP_ADDRESS | 0xC00); -static struct socfpga_sdr_reg_file *sdr_reg_file = +static const struct socfpga_sdr_reg_file *sdr_reg_file = (struct socfpga_sdr_reg_file *)SDR_PHYGRP_REGFILEGRP_ADDRESS; -static struct socfpga_sdr_scc_mgr *sdr_scc_mgr = +static const struct socfpga_sdr_scc_mgr *sdr_scc_mgr = (struct socfpga_sdr_scc_mgr *) (SDR_PHYGRP_SCCGRP_ADDRESS | 0xe00); -static struct socfpga_phy_mgr_cmd *phy_mgr_cmd = +static const struct socfpga_phy_mgr_cmd *phy_mgr_cmd = (struct socfpga_phy_mgr_cmd *)SDR_PHYGRP_PHYMGRGRP_ADDRESS; -static struct socfpga_phy_mgr_cfg *phy_mgr_cfg = +static const struct socfpga_phy_mgr_cfg *phy_mgr_cfg = (struct socfpga_phy_mgr_cfg *) (SDR_PHYGRP_PHYMGRGRP_ADDRESS | 0x40); -static struct socfpga_data_mgr *data_mgr = +static const struct socfpga_data_mgr *data_mgr = (struct socfpga_data_mgr *)SDR_PHYGRP_DATAMGRGRP_ADDRESS; -static struct socfpga_sdr_ctrl *sdr_ctrl = +static const struct socfpga_sdr_ctrl *sdr_ctrl = (struct socfpga_sdr_ctrl *)SDR_CTRLGRP_ADDRESS; -const struct socfpga_sdram_rw_mgr_config *rwcfg; -const struct socfpga_sdram_io_config *iocfg; -const struct socfpga_sdram_misc_config *misccfg; - #define DELTA_D 1 /* @@ -55,37 +51,20 @@ const struct socfpga_sdram_misc_config *misccfg; #define STATIC_CALIB_STEPS (STATIC_IN_RTL_SIM | CALIB_SKIP_FULL_TEST | \ STATIC_SKIP_DELAY_LOOPS) -/* calibration steps requested by the rtl */ -static u16 dyn_calib_steps; - -/* - * To make CALIB_SKIP_DELAY_LOOPS a dynamic conditional option - * instead of static, we use boolean logic to select between - * non-skip and skip values - * - * The mask is set to include all bits when not-skipping, but is - * zero when skipping - */ - -static u16 skip_delay_mask; /* mask off bits when skipping/not-skipping */ - #define SKIP_DELAY_LOOP_VALUE_OR_ZERO(non_skip_value) \ - ((non_skip_value) & skip_delay_mask) - -static struct gbl_type *gbl; -static struct param_type *param; + ((non_skip_value) & seq->skip_delay_mask) -static void set_failing_group_stage(u32 group, u32 stage, - u32 substage) +static void set_failing_group_stage(struct socfpga_sdrseq *seq, + u32 group, u32 stage, u32 substage) { /* * Only set the global stage if there was not been any other * failing group */ - if (gbl->error_stage == CAL_STAGE_NIL) { - gbl->error_substage = substage; - gbl->error_stage = stage; - gbl->error_group = group; + if (seq->gbl.error_stage == CAL_STAGE_NIL) { + seq->gbl.error_substage = substage; + seq->gbl.error_stage = stage; + seq->gbl.error_group = group; } } @@ -110,7 +89,7 @@ static void reg_file_set_sub_stage(u8 set_sub_stage) * * Initialize PHY Manager. */ -static void phy_mgr_initialize(void) +static void phy_mgr_initialize(struct socfpga_sdrseq *seq) { u32 ratio; @@ -132,15 +111,17 @@ static void phy_mgr_initialize(void) writel(0, &phy_mgr_cfg->cal_debug_info); /* Init params only if we do NOT skip calibration. */ - if ((dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL) + if ((seq->dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL) return; - ratio = rwcfg->mem_dq_per_read_dqs / - rwcfg->mem_virtual_groups_per_read_dqs; - param->read_correct_mask_vg = (1 << ratio) - 1; - param->write_correct_mask_vg = (1 << ratio) - 1; - param->read_correct_mask = (1 << rwcfg->mem_dq_per_read_dqs) - 1; - param->write_correct_mask = (1 << rwcfg->mem_dq_per_write_dqs) - 1; + ratio = seq->rwcfg->mem_dq_per_read_dqs / + seq->rwcfg->mem_virtual_groups_per_read_dqs; + seq->param.read_correct_mask_vg = (1 << ratio) - 1; + seq->param.write_correct_mask_vg = (1 << ratio) - 1; + seq->param.read_correct_mask = (1 << seq->rwcfg->mem_dq_per_read_dqs) + - 1; + seq->param.write_correct_mask = (1 << seq->rwcfg->mem_dq_per_write_dqs) + - 1; } /** @@ -150,7 +131,8 @@ static void phy_mgr_initialize(void) * * Set Rank and ODT mask (On-Die Termination). */ -static void set_rank_and_odt_mask(const u32 rank, const u32 odt_mode) +static void set_rank_and_odt_mask(struct socfpga_sdrseq *seq, + const u32 rank, const u32 odt_mode) { u32 odt_mask_0 = 0; u32 odt_mask_1 = 0; @@ -160,14 +142,14 @@ static void set_rank_and_odt_mask(const u32 rank, const u32 odt_mode) odt_mask_0 = 0x0; odt_mask_1 = 0x0; } else { /* RW_MGR_ODT_MODE_READ_WRITE */ - switch (rwcfg->mem_number_of_ranks) { + switch (seq->rwcfg->mem_number_of_ranks) { case 1: /* 1 Rank */ /* Read: ODT = 0 ; Write: ODT = 1 */ odt_mask_0 = 0x0; odt_mask_1 = 0x1; break; case 2: /* 2 Ranks */ - if (rwcfg->mem_number_of_cs_per_dimm == 1) { + if (seq->rwcfg->mem_number_of_cs_per_dimm == 1) { /* * - Dual-Slot , Single-Rank (1 CS per DIMM) * OR @@ -307,16 +289,18 @@ static void scc_mgr_set_dq_in_delay(u32 dq_in_group, u32 delay) scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, dq_in_group, delay); } -static void scc_mgr_set_dqs_io_in_delay(u32 delay) +static void scc_mgr_set_dqs_io_in_delay(struct socfpga_sdrseq *seq, + u32 delay) { - scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, rwcfg->mem_dq_per_write_dqs, - delay); + scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, + seq->rwcfg->mem_dq_per_write_dqs, delay); } -static void scc_mgr_set_dm_in_delay(u32 dm, u32 delay) +static void scc_mgr_set_dm_in_delay(struct socfpga_sdrseq *seq, u32 dm, + u32 delay) { scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, - rwcfg->mem_dq_per_write_dqs + 1 + dm, + seq->rwcfg->mem_dq_per_write_dqs + 1 + dm, delay); } @@ -325,16 +309,18 @@ static void scc_mgr_set_dq_out1_delay(u32 dq_in_group, u32 delay) scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, dq_in_group, delay); } -static void scc_mgr_set_dqs_out1_delay(u32 delay) +static void scc_mgr_set_dqs_out1_delay(struct socfpga_sdrseq *seq, + u32 delay) { - scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, rwcfg->mem_dq_per_write_dqs, - delay); + scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, + seq->rwcfg->mem_dq_per_write_dqs, delay); } -static void scc_mgr_set_dm_out1_delay(u32 dm, u32 delay) +static void scc_mgr_set_dm_out1_delay(struct socfpga_sdrseq *seq, u32 dm, + u32 delay) { scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, - rwcfg->mem_dq_per_write_dqs + 1 + dm, + seq->rwcfg->mem_dq_per_write_dqs + 1 + dm, delay); } @@ -372,12 +358,13 @@ static void scc_mgr_load_dm(u32 dm) * This function sets the SCC Manager (Scan Chain Control Manager) register * and optionally triggers the SCC update for all ranks. */ -static void scc_mgr_set_all_ranks(const u32 off, const u32 grp, const u32 val, +static void scc_mgr_set_all_ranks(struct socfpga_sdrseq *seq, + const u32 off, const u32 grp, const u32 val, const int update) { u32 r; - for (r = 0; r < rwcfg->mem_number_of_ranks; + for (r = 0; r < seq->rwcfg->mem_number_of_ranks; r += NUM_RANKS_PER_SHADOW_REG) { scc_mgr_set(off, grp, val); @@ -388,7 +375,8 @@ static void scc_mgr_set_all_ranks(const u32 off, const u32 grp, const u32 val, } } -static void scc_mgr_set_dqs_en_phase_all_ranks(u32 read_group, u32 phase) +static void scc_mgr_set_dqs_en_phase_all_ranks(struct socfpga_sdrseq *seq, + u32 read_group, u32 phase) { /* * USER although the h/w doesn't support different phases per @@ -398,12 +386,12 @@ static void scc_mgr_set_dqs_en_phase_all_ranks(u32 read_group, u32 phase) * for efficiency, the scan chain update should occur only * once to sr0. */ - scc_mgr_set_all_ranks(SCC_MGR_DQS_EN_PHASE_OFFSET, + scc_mgr_set_all_ranks(seq, SCC_MGR_DQS_EN_PHASE_OFFSET, read_group, phase, 0); } -static void scc_mgr_set_dqdqs_output_phase_all_ranks(u32 write_group, - u32 phase) +static void scc_mgr_set_dqdqs_output_phase_all_ranks(struct socfpga_sdrseq *seq, + u32 write_group, u32 phase) { /* * USER although the h/w doesn't support different phases per @@ -413,12 +401,12 @@ static void scc_mgr_set_dqdqs_output_phase_all_ranks(u32 write_group, * for efficiency, the scan chain update should occur only * once to sr0. */ - scc_mgr_set_all_ranks(SCC_MGR_DQDQS_OUT_PHASE_OFFSET, + scc_mgr_set_all_ranks(seq, SCC_MGR_DQDQS_OUT_PHASE_OFFSET, write_group, phase, 0); } -static void scc_mgr_set_dqs_en_delay_all_ranks(u32 read_group, - u32 delay) +static void scc_mgr_set_dqs_en_delay_all_ranks(struct socfpga_sdrseq *seq, + u32 read_group, u32 delay) { /* * In shadow register mode, the T11 settings are stored in @@ -428,7 +416,7 @@ static void scc_mgr_set_dqs_en_delay_all_ranks(u32 read_group, * select_shadow_regs_for_update with update_scan_chains * set to 0. */ - scc_mgr_set_all_ranks(SCC_MGR_DQS_EN_DELAY_OFFSET, + scc_mgr_set_all_ranks(seq, SCC_MGR_DQS_EN_DELAY_OFFSET, read_group, delay, 1); } @@ -439,10 +427,11 @@ static void scc_mgr_set_dqs_en_delay_all_ranks(u32 read_group, * * This function sets the OCT output delay in SCC manager. */ -static void scc_mgr_set_oct_out1_delay(const u32 write_group, const u32 delay) +static void scc_mgr_set_oct_out1_delay(struct socfpga_sdrseq *seq, + const u32 write_group, const u32 delay) { - const int ratio = rwcfg->mem_if_read_dqs_width / - rwcfg->mem_if_write_dqs_width; + const int ratio = seq->rwcfg->mem_if_read_dqs_width / + seq->rwcfg->mem_if_write_dqs_width; const int base = write_group * ratio; int i; /* @@ -490,7 +479,7 @@ static void scc_mgr_set_hhp_extras(void) * * Zero all DQS config. */ -static void scc_mgr_zero_all(void) +static void scc_mgr_zero_all(struct socfpga_sdrseq *seq) { int i, r; @@ -498,23 +487,26 @@ static void scc_mgr_zero_all(void) * USER Zero all DQS config settings, across all groups and all * shadow registers */ - for (r = 0; r < rwcfg->mem_number_of_ranks; + for (r = 0; r < seq->rwcfg->mem_number_of_ranks; r += NUM_RANKS_PER_SHADOW_REG) { - for (i = 0; i < rwcfg->mem_if_read_dqs_width; i++) { + for (i = 0; i < seq->rwcfg->mem_if_read_dqs_width; i++) { /* * The phases actually don't exist on a per-rank basis, * but there's no harm updating them several times, so * let's keep the code simple. */ - scc_mgr_set_dqs_bus_in_delay(i, iocfg->dqs_in_reserve); + scc_mgr_set_dqs_bus_in_delay(i, + seq->iocfg->dqs_in_reserve + ); scc_mgr_set_dqs_en_phase(i, 0); scc_mgr_set_dqs_en_delay(i, 0); } - for (i = 0; i < rwcfg->mem_if_write_dqs_width; i++) { + for (i = 0; i < seq->rwcfg->mem_if_write_dqs_width; i++) { scc_mgr_set_dqdqs_output_phase(i, 0); /* Arria V/Cyclone V don't have out2. */ - scc_mgr_set_oct_out1_delay(i, iocfg->dqs_out_reserve); + scc_mgr_set_oct_out1_delay(seq, i, + seq->iocfg->dqs_out_reserve); } } @@ -551,10 +543,11 @@ static void scc_set_bypass_mode(const u32 write_group) * * Load DQS settings for Write Group, do not trigger SCC update. */ -static void scc_mgr_load_dqs_for_write_group(const u32 write_group) +static void scc_mgr_load_dqs_for_write_group(struct socfpga_sdrseq *seq, + const u32 write_group) { - const int ratio = rwcfg->mem_if_read_dqs_width / - rwcfg->mem_if_write_dqs_width; + const int ratio = seq->rwcfg->mem_if_read_dqs_width / + seq->rwcfg->mem_if_write_dqs_width; const int base = write_group * ratio; int i; /* @@ -573,14 +566,15 @@ static void scc_mgr_load_dqs_for_write_group(const u32 write_group) * * Zero DQ, DM, DQS and OCT configs for a group. */ -static void scc_mgr_zero_group(const u32 write_group, const int out_only) +static void scc_mgr_zero_group(struct socfpga_sdrseq *seq, + const u32 write_group, const int out_only) { int i, r; - for (r = 0; r < rwcfg->mem_number_of_ranks; + for (r = 0; r < seq->rwcfg->mem_number_of_ranks; r += NUM_RANKS_PER_SHADOW_REG) { /* Zero all DQ config settings. */ - for (i = 0; i < rwcfg->mem_dq_per_write_dqs; i++) { + for (i = 0; i < seq->rwcfg->mem_dq_per_write_dqs; i++) { scc_mgr_set_dq_out1_delay(i, 0); if (!out_only) scc_mgr_set_dq_in_delay(i, 0); @@ -592,8 +586,8 @@ static void scc_mgr_zero_group(const u32 write_group, const int out_only) /* Zero all DM config settings. */ for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) { if (!out_only) - scc_mgr_set_dm_in_delay(i, 0); - scc_mgr_set_dm_out1_delay(i, 0); + scc_mgr_set_dm_in_delay(seq, i, 0); + scc_mgr_set_dm_out1_delay(seq, i, 0); } /* Multicast to all DM enables. */ @@ -601,12 +595,13 @@ static void scc_mgr_zero_group(const u32 write_group, const int out_only) /* Zero all DQS IO settings. */ if (!out_only) - scc_mgr_set_dqs_io_in_delay(0); + scc_mgr_set_dqs_io_in_delay(seq, 0); /* Arria V/Cyclone V don't have out2. */ - scc_mgr_set_dqs_out1_delay(iocfg->dqs_out_reserve); - scc_mgr_set_oct_out1_delay(write_group, iocfg->dqs_out_reserve); - scc_mgr_load_dqs_for_write_group(write_group); + scc_mgr_set_dqs_out1_delay(seq, seq->iocfg->dqs_out_reserve); + scc_mgr_set_oct_out1_delay(seq, write_group, + seq->iocfg->dqs_out_reserve); + scc_mgr_load_dqs_for_write_group(seq, write_group); /* Multicast to all DQS IO enables (only 1 in total). */ writel(0, &sdr_scc_mgr->dqs_io_ena); @@ -620,69 +615,76 @@ static void scc_mgr_zero_group(const u32 write_group, const int out_only) * apply and load a particular input delay for the DQ pins in a group * group_bgn is the index of the first dq pin (in the write group) */ -static void scc_mgr_apply_group_dq_in_delay(u32 group_bgn, u32 delay) +static void scc_mgr_apply_group_dq_in_delay(struct socfpga_sdrseq *seq, + u32 group_bgn, u32 delay) { u32 i, p; - for (i = 0, p = group_bgn; i < rwcfg->mem_dq_per_read_dqs; i++, p++) { + for (i = 0, p = group_bgn; i < seq->rwcfg->mem_dq_per_read_dqs; + i++, p++) { scc_mgr_set_dq_in_delay(p, delay); scc_mgr_load_dq(p); } } /** - * scc_mgr_apply_group_dq_out1_delay() - Apply and load an output delay for the DQ pins in a group + * scc_mgr_apply_group_dq_out1_delay() - Apply and load an output delay for the + * DQ pins in a group * @delay: Delay value * * Apply and load a particular output delay for the DQ pins in a group. */ -static void scc_mgr_apply_group_dq_out1_delay(const u32 delay) +static void scc_mgr_apply_group_dq_out1_delay(struct socfpga_sdrseq *seq, + const u32 delay) { int i; - for (i = 0; i < rwcfg->mem_dq_per_write_dqs; i++) { + for (i = 0; i < seq->rwcfg->mem_dq_per_write_dqs; i++) { scc_mgr_set_dq_out1_delay(i, delay); scc_mgr_load_dq(i); } } /* apply and load a particular output delay for the DM pins in a group */ -static void scc_mgr_apply_group_dm_out1_delay(u32 delay1) +static void scc_mgr_apply_group_dm_out1_delay(struct socfpga_sdrseq *seq, + u32 delay1) { u32 i; for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) { - scc_mgr_set_dm_out1_delay(i, delay1); + scc_mgr_set_dm_out1_delay(seq, i, delay1); scc_mgr_load_dm(i); } } /* apply and load delay on both DQS and OCT out1 */ -static void scc_mgr_apply_group_dqs_io_and_oct_out1(u32 write_group, - u32 delay) +static void scc_mgr_apply_group_dqs_io_and_oct_out1(struct socfpga_sdrseq *seq, + u32 write_group, u32 delay) { - scc_mgr_set_dqs_out1_delay(delay); + scc_mgr_set_dqs_out1_delay(seq, delay); scc_mgr_load_dqs_io(); - scc_mgr_set_oct_out1_delay(write_group, delay); - scc_mgr_load_dqs_for_write_group(write_group); + scc_mgr_set_oct_out1_delay(seq, write_group, delay); + scc_mgr_load_dqs_for_write_group(seq, write_group); } /** - * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output side: DQ, DM, DQS, OCT + * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output + * side: DQ, DM, DQS, OCT * @write_group: Write group * @delay: Delay value * * Apply a delay to the entire output side: DQ, DM, DQS, OCT. */ -static void scc_mgr_apply_group_all_out_delay_add(const u32 write_group, +static void scc_mgr_apply_group_all_out_delay_add(struct socfpga_sdrseq *seq, + const u32 write_group, const u32 delay) { u32 i, new_delay; /* DQ shift */ - for (i = 0; i < rwcfg->mem_dq_per_write_dqs; i++) + for (i = 0; i < seq->rwcfg->mem_dq_per_write_dqs; i++) scc_mgr_load_dq(i); /* DM shift */ @@ -691,49 +693,51 @@ static void scc_mgr_apply_group_all_out_delay_add(const u32 write_group, /* DQS shift */ new_delay = READ_SCC_DQS_IO_OUT2_DELAY + delay; - if (new_delay > iocfg->io_out2_delay_max) { + if (new_delay > seq->iocfg->io_out2_delay_max) { debug_cond(DLEVEL >= 1, "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n", __func__, __LINE__, write_group, delay, new_delay, - iocfg->io_out2_delay_max, - new_delay - iocfg->io_out2_delay_max); - new_delay -= iocfg->io_out2_delay_max; - scc_mgr_set_dqs_out1_delay(new_delay); + seq->iocfg->io_out2_delay_max, + new_delay - seq->iocfg->io_out2_delay_max); + new_delay -= seq->iocfg->io_out2_delay_max; + scc_mgr_set_dqs_out1_delay(seq, new_delay); } scc_mgr_load_dqs_io(); /* OCT shift */ new_delay = READ_SCC_OCT_OUT2_DELAY + delay; - if (new_delay > iocfg->io_out2_delay_max) { + if (new_delay > seq->iocfg->io_out2_delay_max) { debug_cond(DLEVEL >= 1, "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n", __func__, __LINE__, write_group, delay, - new_delay, iocfg->io_out2_delay_max, - new_delay - iocfg->io_out2_delay_max); - new_delay -= iocfg->io_out2_delay_max; - scc_mgr_set_oct_out1_delay(write_group, new_delay); + new_delay, seq->iocfg->io_out2_delay_max, + new_delay - seq->iocfg->io_out2_delay_max); + new_delay -= seq->iocfg->io_out2_delay_max; + scc_mgr_set_oct_out1_delay(seq, write_group, new_delay); } - scc_mgr_load_dqs_for_write_group(write_group); + scc_mgr_load_dqs_for_write_group(seq, write_group); } /** - * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output side to all ranks + * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output + * side to all ranks * @write_group: Write group * @delay: Delay value * * Apply a delay to the entire output side (DQ, DM, DQS, OCT) to all ranks. */ static void -scc_mgr_apply_group_all_out_delay_add_all_ranks(const u32 write_group, +scc_mgr_apply_group_all_out_delay_add_all_ranks(struct socfpga_sdrseq *seq, + const u32 write_group, const u32 delay) { int r; - for (r = 0; r < rwcfg->mem_number_of_ranks; + for (r = 0; r < seq->rwcfg->mem_number_of_ranks; r += NUM_RANKS_PER_SHADOW_REG) { - scc_mgr_apply_group_all_out_delay_add(write_group, delay); + scc_mgr_apply_group_all_out_delay_add(seq, write_group, delay); writel(0, &sdr_scc_mgr->update); } } @@ -744,7 +748,7 @@ scc_mgr_apply_group_all_out_delay_add_all_ranks(const u32 write_group, * Optimization used to recover some slots in ddr3 inst_rom could be * applied to other protocols if we wanted to */ -static void set_jump_as_return(void) +static void set_jump_as_return(struct socfpga_sdrseq *seq) { /* * To save space, we replace return with jump to special shared @@ -752,7 +756,7 @@ static void set_jump_as_return(void) * we always jump. */ writel(0xff, &sdr_rw_load_mgr_regs->load_cntr0); - writel(rwcfg->rreturn, &sdr_rw_load_jump_mgr_regs->load_jump_add0); + writel(seq->rwcfg->rreturn, &sdr_rw_load_jump_mgr_regs->load_jump_add0); } /** @@ -761,7 +765,8 @@ static void set_jump_as_return(void) * * Delay for N memory clocks. */ -static void delay_for_n_mem_clocks(const u32 clocks) +static void delay_for_n_mem_clocks(struct socfpga_sdrseq *seq, + const u32 clocks) { u32 afi_clocks; u16 c_loop; @@ -771,7 +776,7 @@ static void delay_for_n_mem_clocks(const u32 clocks) debug("%s:%d: clocks=%u ... start\n", __func__, __LINE__, clocks); /* Scale (rounding up) to get afi clocks. */ - afi_clocks = DIV_ROUND_UP(clocks, misccfg->afi_rate_ratio); + afi_clocks = DIV_ROUND_UP(clocks, seq->misccfg->afi_rate_ratio); if (afi_clocks) /* Temporary underflow protection */ afi_clocks--; @@ -807,10 +812,10 @@ static void delay_for_n_mem_clocks(const u32 clocks) writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner), &sdr_rw_load_mgr_regs->load_cntr1); - writel(rwcfg->idle_loop1, + writel(seq->rwcfg->idle_loop1, &sdr_rw_load_jump_mgr_regs->load_jump_add1); - writel(rwcfg->idle_loop1, SDR_PHYGRP_RWMGRGRP_ADDRESS | + writel(seq->rwcfg->idle_loop1, SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET); } else { writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner), @@ -819,14 +824,14 @@ static void delay_for_n_mem_clocks(const u32 clocks) writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(outer), &sdr_rw_load_mgr_regs->load_cntr1); - writel(rwcfg->idle_loop2, + writel(seq->rwcfg->idle_loop2, &sdr_rw_load_jump_mgr_regs->load_jump_add0); - writel(rwcfg->idle_loop2, + writel(seq->rwcfg->idle_loop2, &sdr_rw_load_jump_mgr_regs->load_jump_add1); do { - writel(rwcfg->idle_loop2, + writel(seq->rwcfg->idle_loop2, SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET); } while (c_loop-- != 0); @@ -843,7 +848,8 @@ static void delay_for_n_mem_clocks(const u32 clocks) * * Load instruction registers. */ -static void rw_mgr_mem_init_load_regs(u32 cntr0, u32 cntr1, u32 cntr2, u32 jump) +static void rw_mgr_mem_init_load_regs(struct socfpga_sdrseq *seq, + u32 cntr0, u32 cntr1, u32 cntr2, u32 jump) { u32 grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET; @@ -873,58 +879,59 @@ static void rw_mgr_mem_init_load_regs(u32 cntr0, u32 cntr1, u32 cntr2, u32 jump) * * Load user calibration values and optionally precharge the banks. */ -static void rw_mgr_mem_load_user(const u32 fin1, const u32 fin2, +static void rw_mgr_mem_load_user(struct socfpga_sdrseq *seq, + const u32 fin1, const u32 fin2, const int precharge) { u32 grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET; u32 r; - for (r = 0; r < rwcfg->mem_number_of_ranks; r++) { + for (r = 0; r < seq->rwcfg->mem_number_of_ranks; r++) { /* set rank */ - set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF); + set_rank_and_odt_mask(seq, r, RW_MGR_ODT_MODE_OFF); /* precharge all banks ... */ if (precharge) - writel(rwcfg->precharge_all, grpaddr); + writel(seq->rwcfg->precharge_all, grpaddr); /* * USER Use Mirror-ed commands for odd ranks if address * mirrorring is on */ - if ((rwcfg->mem_address_mirroring >> r) & 0x1) { - set_jump_as_return(); - writel(rwcfg->mrs2_mirr, grpaddr); - delay_for_n_mem_clocks(4); - set_jump_as_return(); - writel(rwcfg->mrs3_mirr, grpaddr); - delay_for_n_mem_clocks(4); - set_jump_as_return(); - writel(rwcfg->mrs1_mirr, grpaddr); - delay_for_n_mem_clocks(4); - set_jump_as_return(); + if ((seq->rwcfg->mem_address_mirroring >> r) & 0x1) { + set_jump_as_return(seq); + writel(seq->rwcfg->mrs2_mirr, grpaddr); + delay_for_n_mem_clocks(seq, 4); + set_jump_as_return(seq); + writel(seq->rwcfg->mrs3_mirr, grpaddr); + delay_for_n_mem_clocks(seq, 4); + set_jump_as_return(seq); + writel(seq->rwcfg->mrs1_mirr, grpaddr); + delay_for_n_mem_clocks(seq, 4); + set_jump_as_return(seq); writel(fin1, grpaddr); } else { - set_jump_as_return(); - writel(rwcfg->mrs2, grpaddr); - delay_for_n_mem_clocks(4); - set_jump_as_return(); - writel(rwcfg->mrs3, grpaddr); - delay_for_n_mem_clocks(4); - set_jump_as_return(); - writel(rwcfg->mrs1, grpaddr); - set_jump_as_return(); + set_jump_as_return(seq); + writel(seq->rwcfg->mrs2, grpaddr); + delay_for_n_mem_clocks(seq, 4); + set_jump_as_return(seq); + writel(seq->rwcfg->mrs3, grpaddr); + delay_for_n_mem_clocks(seq, 4); + set_jump_as_return(seq); + writel(seq->rwcfg->mrs1, grpaddr); + set_jump_as_return(seq); writel(fin2, grpaddr); } if (precharge) continue; - set_jump_as_return(); - writel(rwcfg->zqcl, grpaddr); + set_jump_as_return(seq); + writel(seq->rwcfg->zqcl, grpaddr); /* tZQinit = tDLLK = 512 ck cycles */ - delay_for_n_mem_clocks(512); + delay_for_n_mem_clocks(seq, 512); } } @@ -933,7 +940,7 @@ static void rw_mgr_mem_load_user(const u32 fin1, const u32 fin2, * * Initialize RW Manager. */ -static void rw_mgr_mem_initialize(void) +static void rw_mgr_mem_initialize(struct socfpga_sdrseq *seq) { debug("%s:%d\n", __func__, __LINE__); @@ -964,10 +971,10 @@ static void rw_mgr_mem_initialize(void) * One possible solution is n = 0 , a = 256 , b = 106 => a = FF, * b = 6A */ - rw_mgr_mem_init_load_regs(misccfg->tinit_cntr0_val, - misccfg->tinit_cntr1_val, - misccfg->tinit_cntr2_val, - rwcfg->init_reset_0_cke_0); + rw_mgr_mem_init_load_regs(seq, seq->misccfg->tinit_cntr0_val, + seq->misccfg->tinit_cntr1_val, + seq->misccfg->tinit_cntr2_val, + seq->rwcfg->init_reset_0_cke_0); /* Indicate that memory is stable. */ writel(1, &phy_mgr_cfg->reset_mem_stbl); @@ -986,18 +993,18 @@ static void rw_mgr_mem_initialize(void) * One possible solution is n = 2 , a = 131 , b = 256 => a = 83, * b = FF */ - rw_mgr_mem_init_load_regs(misccfg->treset_cntr0_val, - misccfg->treset_cntr1_val, - misccfg->treset_cntr2_val, - rwcfg->init_reset_1_cke_0); + rw_mgr_mem_init_load_regs(seq, seq->misccfg->treset_cntr0_val, + seq->misccfg->treset_cntr1_val, + seq->misccfg->treset_cntr2_val, + seq->rwcfg->init_reset_1_cke_0); /* Bring up clock enable. */ /* tXRP < 250 ck cycles */ - delay_for_n_mem_clocks(250); + delay_for_n_mem_clocks(seq, 250); - rw_mgr_mem_load_user(rwcfg->mrs0_dll_reset_mirr, rwcfg->mrs0_dll_reset, - 0); + rw_mgr_mem_load_user(seq, seq->rwcfg->mrs0_dll_reset_mirr, + seq->rwcfg->mrs0_dll_reset, 0); } /** @@ -1006,9 +1013,10 @@ static void rw_mgr_mem_initialize(void) * At the end of calibration we have to program the user settings in * and hand off the memory to the user. */ -static void rw_mgr_mem_handoff(void) +static void rw_mgr_mem_handoff(struct socfpga_sdrseq *seq) { - rw_mgr_mem_load_user(rwcfg->mrs0_user_mirr, rwcfg->mrs0_user, 1); + rw_mgr_mem_load_user(seq, seq->rwcfg->mrs0_user_mirr, + seq->rwcfg->mrs0_user, 1); /* * Need to wait tMOD (12CK or 15ns) time before issuing other * commands, but we will have plenty of NIOS cycles before actual @@ -1024,12 +1032,12 @@ static void rw_mgr_mem_handoff(void) * Issue write test command. Two variants are provided, one that just tests * a write pattern and another that tests datamask functionality. */ -static void rw_mgr_mem_calibrate_write_test_issue(u32 group, - u32 test_dm) +static void rw_mgr_mem_calibrate_write_test_issue(struct socfpga_sdrseq *seq, + u32 group, u32 test_dm) { const u32 quick_write_mode = (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES) && - misccfg->enable_super_quick_calibration; + seq->misccfg->enable_super_quick_calibration; u32 mcc_instruction; u32 rw_wl_nop_cycles; @@ -1059,7 +1067,7 @@ static void rw_mgr_mem_calibrate_write_test_issue(u32 group, * one counter left to issue this command in "multiple-group" mode */ - rw_wl_nop_cycles = gbl->rw_wl_nop_cycles; + rw_wl_nop_cycles = seq->gbl.rw_wl_nop_cycles; if (rw_wl_nop_cycles == -1) { /* @@ -1072,16 +1080,16 @@ static void rw_mgr_mem_calibrate_write_test_issue(u32 group, /* CNTR 3 - Not used */ if (test_dm) { - mcc_instruction = rwcfg->lfsr_wr_rd_dm_bank_0_wl_1; - writel(rwcfg->lfsr_wr_rd_dm_bank_0_data, + mcc_instruction = seq->rwcfg->lfsr_wr_rd_dm_bank_0_wl_1; + writel(seq->rwcfg->lfsr_wr_rd_dm_bank_0_data, &sdr_rw_load_jump_mgr_regs->load_jump_add2); - writel(rwcfg->lfsr_wr_rd_dm_bank_0_nop, + writel(seq->rwcfg->lfsr_wr_rd_dm_bank_0_nop, &sdr_rw_load_jump_mgr_regs->load_jump_add3); } else { - mcc_instruction = rwcfg->lfsr_wr_rd_bank_0_wl_1; - writel(rwcfg->lfsr_wr_rd_bank_0_data, + mcc_instruction = seq->rwcfg->lfsr_wr_rd_bank_0_wl_1; + writel(seq->rwcfg->lfsr_wr_rd_bank_0_data, &sdr_rw_load_jump_mgr_regs->load_jump_add2); - writel(rwcfg->lfsr_wr_rd_bank_0_nop, + writel(seq->rwcfg->lfsr_wr_rd_bank_0_nop, &sdr_rw_load_jump_mgr_regs->load_jump_add3); } } else if (rw_wl_nop_cycles == 0) { @@ -1094,12 +1102,12 @@ static void rw_mgr_mem_calibrate_write_test_issue(u32 group, /* CNTR 3 - Not used */ if (test_dm) { - mcc_instruction = rwcfg->lfsr_wr_rd_dm_bank_0; - writel(rwcfg->lfsr_wr_rd_dm_bank_0_dqs, + mcc_instruction = seq->rwcfg->lfsr_wr_rd_dm_bank_0; + writel(seq->rwcfg->lfsr_wr_rd_dm_bank_0_dqs, &sdr_rw_load_jump_mgr_regs->load_jump_add2); } else { - mcc_instruction = rwcfg->lfsr_wr_rd_bank_0; - writel(rwcfg->lfsr_wr_rd_bank_0_dqs, + mcc_instruction = seq->rwcfg->lfsr_wr_rd_bank_0; + writel(seq->rwcfg->lfsr_wr_rd_bank_0_dqs, &sdr_rw_load_jump_mgr_regs->load_jump_add2); } } else { @@ -1117,12 +1125,12 @@ static void rw_mgr_mem_calibrate_write_test_issue(u32 group, */ writel(rw_wl_nop_cycles - 1, &sdr_rw_load_mgr_regs->load_cntr3); if (test_dm) { - mcc_instruction = rwcfg->lfsr_wr_rd_dm_bank_0; - writel(rwcfg->lfsr_wr_rd_dm_bank_0_nop, + mcc_instruction = seq->rwcfg->lfsr_wr_rd_dm_bank_0; + writel(seq->rwcfg->lfsr_wr_rd_dm_bank_0_nop, &sdr_rw_load_jump_mgr_regs->load_jump_add3); } else { - mcc_instruction = rwcfg->lfsr_wr_rd_bank_0; - writel(rwcfg->lfsr_wr_rd_bank_0_nop, + mcc_instruction = seq->rwcfg->lfsr_wr_rd_bank_0; + writel(seq->rwcfg->lfsr_wr_rd_bank_0_nop, &sdr_rw_load_jump_mgr_regs->load_jump_add3); } } @@ -1144,10 +1152,10 @@ static void rw_mgr_mem_calibrate_write_test_issue(u32 group, writel(0x30, &sdr_rw_load_mgr_regs->load_cntr1); if (test_dm) { - writel(rwcfg->lfsr_wr_rd_dm_bank_0_wait, + writel(seq->rwcfg->lfsr_wr_rd_dm_bank_0_wait, &sdr_rw_load_jump_mgr_regs->load_jump_add1); } else { - writel(rwcfg->lfsr_wr_rd_bank_0_wait, + writel(seq->rwcfg->lfsr_wr_rd_bank_0_wait, &sdr_rw_load_jump_mgr_regs->load_jump_add1); } @@ -1157,7 +1165,8 @@ static void rw_mgr_mem_calibrate_write_test_issue(u32 group, } /** - * rw_mgr_mem_calibrate_write_test() - Test writes, check for single/multiple pass + * rw_mgr_mem_calibrate_write_test() - Test writes, check for single/multiple + * pass * @rank_bgn: Rank number * @write_group: Write Group * @use_dm: Use DM @@ -1168,36 +1177,38 @@ static void rw_mgr_mem_calibrate_write_test_issue(u32 group, * Test writes, can check for a single bit pass or multiple bit pass. */ static int -rw_mgr_mem_calibrate_write_test(const u32 rank_bgn, const u32 write_group, +rw_mgr_mem_calibrate_write_test(struct socfpga_sdrseq *seq, + const u32 rank_bgn, const u32 write_group, const u32 use_dm, const u32 all_correct, u32 *bit_chk, const u32 all_ranks) { const u32 rank_end = all_ranks ? - rwcfg->mem_number_of_ranks : + seq->rwcfg->mem_number_of_ranks : (rank_bgn + NUM_RANKS_PER_SHADOW_REG); - const u32 shift_ratio = rwcfg->mem_dq_per_write_dqs / - rwcfg->mem_virtual_groups_per_write_dqs; - const u32 correct_mask_vg = param->write_correct_mask_vg; + const u32 shift_ratio = seq->rwcfg->mem_dq_per_write_dqs / + seq->rwcfg->mem_virtual_groups_per_write_dqs; + const u32 correct_mask_vg = seq->param.write_correct_mask_vg; - u32 tmp_bit_chk, base_rw_mgr; + u32 tmp_bit_chk, base_rw_mgr, group; int vg, r; - *bit_chk = param->write_correct_mask; + *bit_chk = seq->param.write_correct_mask; for (r = rank_bgn; r < rank_end; r++) { /* Set rank */ - set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE); + set_rank_and_odt_mask(seq, r, RW_MGR_ODT_MODE_READ_WRITE); tmp_bit_chk = 0; - for (vg = rwcfg->mem_virtual_groups_per_write_dqs - 1; + for (vg = seq->rwcfg->mem_virtual_groups_per_write_dqs - 1; vg >= 0; vg--) { /* Reset the FIFOs to get pointers to known state. */ writel(0, &phy_mgr_cmd->fifo_reset); - rw_mgr_mem_calibrate_write_test_issue( - write_group * - rwcfg->mem_virtual_groups_per_write_dqs + vg, - use_dm); + group = write_group * + seq->rwcfg->mem_virtual_groups_per_write_dqs + + vg; + rw_mgr_mem_calibrate_write_test_issue(seq, group, + use_dm); base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS); tmp_bit_chk <<= shift_ratio; @@ -1207,14 +1218,14 @@ rw_mgr_mem_calibrate_write_test(const u32 rank_bgn, const u32 write_group, *bit_chk &= tmp_bit_chk; } - set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); + set_rank_and_odt_mask(seq, 0, RW_MGR_ODT_MODE_OFF); if (all_correct) { debug_cond(DLEVEL >= 2, "write_test(%u,%u,ALL) : %u == %u => %i\n", write_group, use_dm, *bit_chk, - param->write_correct_mask, - *bit_chk == param->write_correct_mask); - return *bit_chk == param->write_correct_mask; + seq->param.write_correct_mask, + *bit_chk == seq->param.write_correct_mask); + return *bit_chk == seq->param.write_correct_mask; } else { debug_cond(DLEVEL >= 2, "write_test(%u,%u,ONE) : %u != %i => %i\n", @@ -1233,47 +1244,49 @@ rw_mgr_mem_calibrate_write_test(const u32 rank_bgn, const u32 write_group, * read test to ensure memory works. */ static int -rw_mgr_mem_calibrate_read_test_patterns(const u32 rank_bgn, const u32 group, +rw_mgr_mem_calibrate_read_test_patterns(struct socfpga_sdrseq *seq, + const u32 rank_bgn, const u32 group, const u32 all_ranks) { const u32 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET; const u32 addr_offset = - (group * rwcfg->mem_virtual_groups_per_read_dqs) << 2; + (group * seq->rwcfg->mem_virtual_groups_per_read_dqs) + << 2; const u32 rank_end = all_ranks ? - rwcfg->mem_number_of_ranks : + seq->rwcfg->mem_number_of_ranks : (rank_bgn + NUM_RANKS_PER_SHADOW_REG); - const u32 shift_ratio = rwcfg->mem_dq_per_read_dqs / - rwcfg->mem_virtual_groups_per_read_dqs; - const u32 correct_mask_vg = param->read_correct_mask_vg; + const u32 shift_ratio = seq->rwcfg->mem_dq_per_read_dqs / + seq->rwcfg->mem_virtual_groups_per_read_dqs; + const u32 correct_mask_vg = seq->param.read_correct_mask_vg; u32 tmp_bit_chk, base_rw_mgr, bit_chk; int vg, r; int ret = 0; - bit_chk = param->read_correct_mask; + bit_chk = seq->param.read_correct_mask; for (r = rank_bgn; r < rank_end; r++) { /* Set rank */ - set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE); + set_rank_and_odt_mask(seq, r, RW_MGR_ODT_MODE_READ_WRITE); /* Load up a constant bursts of read commands */ writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0); - writel(rwcfg->guaranteed_read, + writel(seq->rwcfg->guaranteed_read, &sdr_rw_load_jump_mgr_regs->load_jump_add0); writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1); - writel(rwcfg->guaranteed_read_cont, + writel(seq->rwcfg->guaranteed_read_cont, &sdr_rw_load_jump_mgr_regs->load_jump_add1); tmp_bit_chk = 0; - for (vg = rwcfg->mem_virtual_groups_per_read_dqs - 1; + for (vg = seq->rwcfg->mem_virtual_groups_per_read_dqs - 1; vg >= 0; vg--) { /* Reset the FIFOs to get pointers to known state. */ writel(0, &phy_mgr_cmd->fifo_reset); writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RESET_READ_DATAPATH_OFFSET); - writel(rwcfg->guaranteed_read, + writel(seq->rwcfg->guaranteed_read, addr + addr_offset + (vg << 2)); base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS); @@ -1284,33 +1297,35 @@ rw_mgr_mem_calibrate_read_test_patterns(const u32 rank_bgn, const u32 group, bit_chk &= tmp_bit_chk; } - writel(rwcfg->clear_dqs_enable, addr + (group << 2)); + writel(seq->rwcfg->clear_dqs_enable, addr + (group << 2)); - set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); + set_rank_and_odt_mask(seq, 0, RW_MGR_ODT_MODE_OFF); - if (bit_chk != param->read_correct_mask) + if (bit_chk != seq->param.read_correct_mask) ret = -EIO; debug_cond(DLEVEL >= 1, "%s:%d test_load_patterns(%u,ALL) => (%u == %u) => %i\n", __func__, __LINE__, group, bit_chk, - param->read_correct_mask, ret); + seq->param.read_correct_mask, ret); return ret; } /** - * rw_mgr_mem_calibrate_read_load_patterns() - Load up the patterns for read test + * rw_mgr_mem_calibrate_read_load_patterns() - Load up the patterns for read + * test * @rank_bgn: Rank number * @all_ranks: Test all ranks * * Load up the patterns we are going to use during a read test. */ -static void rw_mgr_mem_calibrate_read_load_patterns(const u32 rank_bgn, +static void rw_mgr_mem_calibrate_read_load_patterns(struct socfpga_sdrseq *seq, + const u32 rank_bgn, const int all_ranks) { const u32 rank_end = all_ranks ? - rwcfg->mem_number_of_ranks : + seq->rwcfg->mem_number_of_ranks : (rank_bgn + NUM_RANKS_PER_SHADOW_REG); u32 r; @@ -1318,34 +1333,35 @@ static void rw_mgr_mem_calibrate_read_load_patterns(const u32 rank_bgn, for (r = rank_bgn; r < rank_end; r++) { /* set rank */ - set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE); + set_rank_and_odt_mask(seq, r, RW_MGR_ODT_MODE_READ_WRITE); /* Load up a constant bursts */ writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0); - writel(rwcfg->guaranteed_write_wait0, + writel(seq->rwcfg->guaranteed_write_wait0, &sdr_rw_load_jump_mgr_regs->load_jump_add0); writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1); - writel(rwcfg->guaranteed_write_wait1, + writel(seq->rwcfg->guaranteed_write_wait1, &sdr_rw_load_jump_mgr_regs->load_jump_add1); writel(0x04, &sdr_rw_load_mgr_regs->load_cntr2); - writel(rwcfg->guaranteed_write_wait2, + writel(seq->rwcfg->guaranteed_write_wait2, &sdr_rw_load_jump_mgr_regs->load_jump_add2); writel(0x04, &sdr_rw_load_mgr_regs->load_cntr3); - writel(rwcfg->guaranteed_write_wait3, + writel(seq->rwcfg->guaranteed_write_wait3, &sdr_rw_load_jump_mgr_regs->load_jump_add3); - writel(rwcfg->guaranteed_write, SDR_PHYGRP_RWMGRGRP_ADDRESS | - RW_MGR_RUN_SINGLE_GROUP_OFFSET); + writel(seq->rwcfg->guaranteed_write, + SDR_PHYGRP_RWMGRGRP_ADDRESS | + RW_MGR_RUN_SINGLE_GROUP_OFFSET); } - set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); + set_rank_and_odt_mask(seq, 0, RW_MGR_ODT_MODE_OFF); } /** @@ -1363,36 +1379,37 @@ static void rw_mgr_mem_calibrate_read_load_patterns(const u32 rank_bgn, * checks than the regular read test. */ static int -rw_mgr_mem_calibrate_read_test(const u32 rank_bgn, const u32 group, +rw_mgr_mem_calibrate_read_test(struct socfpga_sdrseq *seq, + const u32 rank_bgn, const u32 group, const u32 num_tries, const u32 all_correct, u32 *bit_chk, const u32 all_groups, const u32 all_ranks) { - const u32 rank_end = all_ranks ? rwcfg->mem_number_of_ranks : + const u32 rank_end = all_ranks ? seq->rwcfg->mem_number_of_ranks : (rank_bgn + NUM_RANKS_PER_SHADOW_REG); const u32 quick_read_mode = ((STATIC_CALIB_STEPS & CALIB_SKIP_DELAY_SWEEPS) && - misccfg->enable_super_quick_calibration); - u32 correct_mask_vg = param->read_correct_mask_vg; + seq->misccfg->enable_super_quick_calibration); + u32 correct_mask_vg = seq->param.read_correct_mask_vg; u32 tmp_bit_chk; u32 base_rw_mgr; u32 addr; int r, vg, ret; - *bit_chk = param->read_correct_mask; + *bit_chk = seq->param.read_correct_mask; for (r = rank_bgn; r < rank_end; r++) { /* set rank */ - set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE); + set_rank_and_odt_mask(seq, r, RW_MGR_ODT_MODE_READ_WRITE); writel(0x10, &sdr_rw_load_mgr_regs->load_cntr1); - writel(rwcfg->read_b2b_wait1, + writel(seq->rwcfg->read_b2b_wait1, &sdr_rw_load_jump_mgr_regs->load_jump_add1); writel(0x10, &sdr_rw_load_mgr_regs->load_cntr2); - writel(rwcfg->read_b2b_wait2, + writel(seq->rwcfg->read_b2b_wait2, &sdr_rw_load_jump_mgr_regs->load_jump_add2); if (quick_read_mode) @@ -1403,21 +1420,21 @@ rw_mgr_mem_calibrate_read_test(const u32 rank_bgn, const u32 group, else writel(0x32, &sdr_rw_load_mgr_regs->load_cntr0); - writel(rwcfg->read_b2b, + writel(seq->rwcfg->read_b2b, &sdr_rw_load_jump_mgr_regs->load_jump_add0); if (all_groups) - writel(rwcfg->mem_if_read_dqs_width * - rwcfg->mem_virtual_groups_per_read_dqs - 1, + writel(seq->rwcfg->mem_if_read_dqs_width * + seq->rwcfg->mem_virtual_groups_per_read_dqs - 1, &sdr_rw_load_mgr_regs->load_cntr3); else writel(0x0, &sdr_rw_load_mgr_regs->load_cntr3); - writel(rwcfg->read_b2b, + writel(seq->rwcfg->read_b2b, &sdr_rw_load_jump_mgr_regs->load_jump_add3); tmp_bit_chk = 0; - for (vg = rwcfg->mem_virtual_groups_per_read_dqs - 1; vg >= 0; - vg--) { + for (vg = seq->rwcfg->mem_virtual_groups_per_read_dqs - 1; + vg >= 0; vg--) { /* Reset the FIFOs to get pointers to known state. */ writel(0, &phy_mgr_cmd->fifo_reset); writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS | @@ -1431,14 +1448,15 @@ rw_mgr_mem_calibrate_read_test(const u32 rank_bgn, const u32 group, RW_MGR_RUN_SINGLE_GROUP_OFFSET; } - writel(rwcfg->read_b2b, addr + + writel(seq->rwcfg->read_b2b, addr + ((group * - rwcfg->mem_virtual_groups_per_read_dqs + + seq->rwcfg->mem_virtual_groups_per_read_dqs + vg) << 2)); base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS); - tmp_bit_chk <<= rwcfg->mem_dq_per_read_dqs / - rwcfg->mem_virtual_groups_per_read_dqs; + tmp_bit_chk <<= + seq->rwcfg->mem_dq_per_read_dqs / + seq->rwcfg->mem_virtual_groups_per_read_dqs; tmp_bit_chk |= correct_mask_vg & ~(base_rw_mgr); } @@ -1446,16 +1464,16 @@ rw_mgr_mem_calibrate_read_test(const u32 rank_bgn, const u32 group, } addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET; - writel(rwcfg->clear_dqs_enable, addr + (group << 2)); + writel(seq->rwcfg->clear_dqs_enable, addr + (group << 2)); - set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF); + set_rank_and_odt_mask(seq, 0, RW_MGR_ODT_MODE_OFF); if (all_correct) { - ret = (*bit_chk == param->read_correct_mask); + ret = (*bit_chk == seq->param.read_correct_mask); debug_cond(DLEVEL >= 2, "%s:%d read_test(%u,ALL,%u) => (%u == %u) => %i\n", __func__, __LINE__, group, all_groups, *bit_chk, - param->read_correct_mask, ret); + seq->param.read_correct_mask, ret); } else { ret = (*bit_chk != 0x00); debug_cond(DLEVEL >= 2, @@ -1477,13 +1495,15 @@ rw_mgr_mem_calibrate_read_test(const u32 rank_bgn, const u32 group, * Perform a READ test across all memory ranks. */ static int -rw_mgr_mem_calibrate_read_test_all_ranks(const u32 grp, const u32 num_tries, +rw_mgr_mem_calibrate_read_test_all_ranks(struct socfpga_sdrseq *seq, + const u32 grp, const u32 num_tries, const u32 all_correct, const u32 all_groups) { u32 bit_chk; - return rw_mgr_mem_calibrate_read_test(0, grp, num_tries, all_correct, - &bit_chk, all_groups, 1); + return rw_mgr_mem_calibrate_read_test(seq, 0, grp, num_tries, + all_correct, &bit_chk, all_groups, + 1); } /** @@ -1503,11 +1523,11 @@ static void rw_mgr_incr_vfifo(const u32 grp) * * Decrease VFIFO value. */ -static void rw_mgr_decr_vfifo(const u32 grp) +static void rw_mgr_decr_vfifo(struct socfpga_sdrseq *seq, const u32 grp) { u32 i; - for (i = 0; i < misccfg->read_valid_fifo_size - 1; i++) + for (i = 0; i < seq->misccfg->read_valid_fifo_size - 1; i++) rw_mgr_incr_vfifo(grp); } @@ -1517,15 +1537,16 @@ static void rw_mgr_decr_vfifo(const u32 grp) * * Push VFIFO until a failing read happens. */ -static int find_vfifo_failing_read(const u32 grp) +static int find_vfifo_failing_read(struct socfpga_sdrseq *seq, + const u32 grp) { u32 v, ret, fail_cnt = 0; - for (v = 0; v < misccfg->read_valid_fifo_size; v++) { + for (v = 0; v < seq->misccfg->read_valid_fifo_size; v++) { debug_cond(DLEVEL >= 2, "%s:%d: vfifo %u\n", __func__, __LINE__, v); - ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1, - PASS_ONE_BIT, 0); + ret = rw_mgr_mem_calibrate_read_test_all_ranks(seq, grp, 1, + PASS_ONE_BIT, 0); if (!ret) { fail_cnt++; @@ -1553,21 +1574,22 @@ static int find_vfifo_failing_read(const u32 grp) * * Find working or non-working DQS enable phase setting. */ -static int sdr_find_phase_delay(int working, int delay, const u32 grp, - u32 *work, const u32 work_inc, u32 *pd) +static int sdr_find_phase_delay(struct socfpga_sdrseq *seq, int working, + int delay, const u32 grp, u32 *work, + const u32 work_inc, u32 *pd) { - const u32 max = delay ? iocfg->dqs_en_delay_max : - iocfg->dqs_en_phase_max; + const u32 max = delay ? seq->iocfg->dqs_en_delay_max : + seq->iocfg->dqs_en_phase_max; u32 ret; for (; *pd <= max; (*pd)++) { if (delay) - scc_mgr_set_dqs_en_delay_all_ranks(grp, *pd); + scc_mgr_set_dqs_en_delay_all_ranks(seq, grp, *pd); else - scc_mgr_set_dqs_en_phase_all_ranks(grp, *pd); + scc_mgr_set_dqs_en_phase_all_ranks(seq, grp, *pd); - ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1, - PASS_ONE_BIT, 0); + ret = rw_mgr_mem_calibrate_read_test_all_ranks(seq, grp, 1, + PASS_ONE_BIT, 0); if (!working) ret = !ret; @@ -1590,22 +1612,22 @@ static int sdr_find_phase_delay(int working, int delay, const u32 grp, * * Find working or non-working DQS enable phase setting. */ -static int sdr_find_phase(int working, const u32 grp, u32 *work, - u32 *i, u32 *p) +static int sdr_find_phase(struct socfpga_sdrseq *seq, int working, + const u32 grp, u32 *work, u32 *i, u32 *p) { - const u32 end = misccfg->read_valid_fifo_size + (working ? 0 : 1); + const u32 end = seq->misccfg->read_valid_fifo_size + (working ? 0 : 1); int ret; for (; *i < end; (*i)++) { if (working) *p = 0; - ret = sdr_find_phase_delay(working, 0, grp, work, - iocfg->delay_per_opa_tap, p); + ret = sdr_find_phase_delay(seq, working, 0, grp, work, + seq->iocfg->delay_per_opa_tap, p); if (!ret) return 0; - if (*p > iocfg->dqs_en_phase_max) { + if (*p > seq->iocfg->dqs_en_phase_max) { /* Fiddle with FIFO. */ rw_mgr_incr_vfifo(grp); if (!working) @@ -1626,22 +1648,22 @@ static int sdr_find_phase(int working, const u32 grp, u32 *work, * * Find working DQS enable phase setting. */ -static int sdr_working_phase(const u32 grp, u32 *work_bgn, u32 *d, - u32 *p, u32 *i) +static int sdr_working_phase(struct socfpga_sdrseq *seq, const u32 grp, + u32 *work_bgn, u32 *d, u32 *p, u32 *i) { - const u32 dtaps_per_ptap = iocfg->delay_per_opa_tap / - iocfg->delay_per_dqs_en_dchain_tap; + const u32 dtaps_per_ptap = seq->iocfg->delay_per_opa_tap / + seq->iocfg->delay_per_dqs_en_dchain_tap; int ret; *work_bgn = 0; for (*d = 0; *d <= dtaps_per_ptap; (*d)++) { *i = 0; - scc_mgr_set_dqs_en_delay_all_ranks(grp, *d); - ret = sdr_find_phase(1, grp, work_bgn, i, p); + scc_mgr_set_dqs_en_delay_all_ranks(seq, grp, *d); + ret = sdr_find_phase(seq, 1, grp, work_bgn, i, p); if (!ret) return 0; - *work_bgn += iocfg->delay_per_dqs_en_dchain_tap; + *work_bgn += seq->iocfg->delay_per_dqs_en_dchain_tap; } /* Cannot find working solution */ @@ -1658,43 +1680,44 @@ static int sdr_working_phase(const u32 grp, u32 *work_bgn, u32 *d, * * Find DQS enable backup phase setting. */ -static void sdr_backup_phase(const u32 grp, u32 *work_bgn, u32 *p) +static void sdr_backup_phase(struct socfpga_sdrseq *seq, const u32 grp, + u32 *work_bgn, u32 *p) { u32 tmp_delay, d; int ret; /* Special case code for backing up a phase */ if (*p == 0) { - *p = iocfg->dqs_en_phase_max; - rw_mgr_decr_vfifo(grp); + *p = seq->iocfg->dqs_en_phase_max; + rw_mgr_decr_vfifo(seq, grp); } else { (*p)--; } - tmp_delay = *work_bgn - iocfg->delay_per_opa_tap; - scc_mgr_set_dqs_en_phase_all_ranks(grp, *p); + tmp_delay = *work_bgn - seq->iocfg->delay_per_opa_tap; + scc_mgr_set_dqs_en_phase_all_ranks(seq, grp, *p); - for (d = 0; d <= iocfg->dqs_en_delay_max && tmp_delay < *work_bgn; + for (d = 0; d <= seq->iocfg->dqs_en_delay_max && tmp_delay < *work_bgn; d++) { - scc_mgr_set_dqs_en_delay_all_ranks(grp, d); + scc_mgr_set_dqs_en_delay_all_ranks(seq, grp, d); - ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1, - PASS_ONE_BIT, 0); + ret = rw_mgr_mem_calibrate_read_test_all_ranks(seq, grp, 1, + PASS_ONE_BIT, 0); if (ret) { *work_bgn = tmp_delay; break; } - tmp_delay += iocfg->delay_per_dqs_en_dchain_tap; + tmp_delay += seq->iocfg->delay_per_dqs_en_dchain_tap; } /* Restore VFIFO to old state before we decremented it (if needed). */ (*p)++; - if (*p > iocfg->dqs_en_phase_max) { + if (*p > seq->iocfg->dqs_en_phase_max) { *p = 0; rw_mgr_incr_vfifo(grp); } - scc_mgr_set_dqs_en_delay_all_ranks(grp, 0); + scc_mgr_set_dqs_en_delay_all_ranks(seq, grp, 0); } /** @@ -1706,19 +1729,20 @@ static void sdr_backup_phase(const u32 grp, u32 *work_bgn, u32 *p) * * Find non-working DQS enable phase setting. */ -static int sdr_nonworking_phase(const u32 grp, u32 *work_end, u32 *p, u32 *i) +static int sdr_nonworking_phase(struct socfpga_sdrseq *seq, + const u32 grp, u32 *work_end, u32 *p, u32 *i) { int ret; (*p)++; - *work_end += iocfg->delay_per_opa_tap; - if (*p > iocfg->dqs_en_phase_max) { + *work_end += seq->iocfg->delay_per_opa_tap; + if (*p > seq->iocfg->dqs_en_phase_max) { /* Fiddle with FIFO. */ *p = 0; rw_mgr_incr_vfifo(grp); } - ret = sdr_find_phase(0, grp, work_end, i, p); + ret = sdr_find_phase(seq, 0, grp, work_end, i, p); if (ret) { /* Cannot see edge of failing read. */ debug_cond(DLEVEL >= 2, "%s:%d: end: failed\n", @@ -1736,7 +1760,8 @@ static int sdr_nonworking_phase(const u32 grp, u32 *work_end, u32 *p, u32 *i) * * Find center of the working DQS enable window. */ -static int sdr_find_window_center(const u32 grp, const u32 work_bgn, +static int sdr_find_window_center(struct socfpga_sdrseq *seq, + const u32 grp, const u32 work_bgn, const u32 work_end) { u32 work_mid; @@ -1748,37 +1773,41 @@ static int sdr_find_window_center(const u32 grp, const u32 work_bgn, debug_cond(DLEVEL >= 2, "work_bgn=%d work_end=%d work_mid=%d\n", work_bgn, work_end, work_mid); /* Get the middle delay to be less than a VFIFO delay */ - tmp_delay = (iocfg->dqs_en_phase_max + 1) * iocfg->delay_per_opa_tap; + tmp_delay = (seq->iocfg->dqs_en_phase_max + 1) + * seq->iocfg->delay_per_opa_tap; debug_cond(DLEVEL >= 2, "vfifo ptap delay %d\n", tmp_delay); work_mid %= tmp_delay; debug_cond(DLEVEL >= 2, "new work_mid %d\n", work_mid); - tmp_delay = rounddown(work_mid, iocfg->delay_per_opa_tap); - if (tmp_delay > iocfg->dqs_en_phase_max * iocfg->delay_per_opa_tap) - tmp_delay = iocfg->dqs_en_phase_max * iocfg->delay_per_opa_tap; - p = tmp_delay / iocfg->delay_per_opa_tap; + tmp_delay = rounddown(work_mid, seq->iocfg->delay_per_opa_tap); + if (tmp_delay > seq->iocfg->dqs_en_phase_max + * seq->iocfg->delay_per_opa_tap) { + tmp_delay = seq->iocfg->dqs_en_phase_max + * seq->iocfg->delay_per_opa_tap; + } + p = tmp_delay / seq->iocfg->delay_per_opa_tap; debug_cond(DLEVEL >= 2, "new p %d, tmp_delay=%d\n", p, tmp_delay); d = DIV_ROUND_UP(work_mid - tmp_delay, - iocfg->delay_per_dqs_en_dchain_tap); - if (d > iocfg->dqs_en_delay_max) - d = iocfg->dqs_en_delay_max; - tmp_delay += d * iocfg->delay_per_dqs_en_dchain_tap; + seq->iocfg->delay_per_dqs_en_dchain_tap); + if (d > seq->iocfg->dqs_en_delay_max) + d = seq->iocfg->dqs_en_delay_max; + tmp_delay += d * seq->iocfg->delay_per_dqs_en_dchain_tap; debug_cond(DLEVEL >= 2, "new d %d, tmp_delay=%d\n", d, tmp_delay); - scc_mgr_set_dqs_en_phase_all_ranks(grp, p); - scc_mgr_set_dqs_en_delay_all_ranks(grp, d); + scc_mgr_set_dqs_en_phase_all_ranks(seq, grp, p); + scc_mgr_set_dqs_en_delay_all_ranks(seq, grp, d); /* * push vfifo until we can successfully calibrate. We can do this * because the largest possible margin in 1 VFIFO cycle. */ - for (i = 0; i < misccfg->read_valid_fifo_size; i++) { + for (i = 0; i < seq->misccfg->read_valid_fifo_size; i++) { debug_cond(DLEVEL >= 2, "find_dqs_en_phase: center\n"); - if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1, + if (rw_mgr_mem_calibrate_read_test_all_ranks(seq, grp, 1, PASS_ONE_BIT, 0)) { debug_cond(DLEVEL >= 2, @@ -1797,12 +1826,15 @@ static int sdr_find_window_center(const u32 grp, const u32 work_bgn, } /** - * rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase() - Find a good DQS enable to use + * rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase() - Find a good DQS enable to + * use * @grp: Read/Write Group * * Find a good DQS enable to use. */ -static int rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(const u32 grp) +static int +rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(struct socfpga_sdrseq *seq, + const u32 grp) { u32 d, p, i; u32 dtaps_per_ptap; @@ -1814,19 +1846,19 @@ static int rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(const u32 grp) reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER); - scc_mgr_set_dqs_en_delay_all_ranks(grp, 0); - scc_mgr_set_dqs_en_phase_all_ranks(grp, 0); + scc_mgr_set_dqs_en_delay_all_ranks(seq, grp, 0); + scc_mgr_set_dqs_en_phase_all_ranks(seq, grp, 0); /* Step 0: Determine number of delay taps for each phase tap. */ - dtaps_per_ptap = iocfg->delay_per_opa_tap / - iocfg->delay_per_dqs_en_dchain_tap; + dtaps_per_ptap = seq->iocfg->delay_per_opa_tap / + seq->iocfg->delay_per_dqs_en_dchain_tap; /* Step 1: First push vfifo until we get a failing read. */ - find_vfifo_failing_read(grp); + find_vfifo_failing_read(seq, grp); /* Step 2: Find first working phase, increment in ptaps. */ work_bgn = 0; - ret = sdr_working_phase(grp, &work_bgn, &d, &p, &i); + ret = sdr_working_phase(seq, grp, &work_bgn, &d, &p, &i); if (ret) return ret; @@ -1842,13 +1874,13 @@ static int rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(const u32 grp) * Step 3a: If we have room, back off by one and * increment in dtaps. */ - sdr_backup_phase(grp, &work_bgn, &p); + sdr_backup_phase(seq, grp, &work_bgn, &p); /* * Step 4a: go forward from working phase to non working * phase, increment in ptaps. */ - ret = sdr_nonworking_phase(grp, &work_end, &p, &i); + ret = sdr_nonworking_phase(seq, grp, &work_end, &p, &i); if (ret) return ret; @@ -1856,14 +1888,14 @@ static int rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(const u32 grp) /* Special case code for backing up a phase */ if (p == 0) { - p = iocfg->dqs_en_phase_max; - rw_mgr_decr_vfifo(grp); + p = seq->iocfg->dqs_en_phase_max; + rw_mgr_decr_vfifo(seq, grp); } else { p = p - 1; } - work_end -= iocfg->delay_per_opa_tap; - scc_mgr_set_dqs_en_phase_all_ranks(grp, p); + work_end -= seq->iocfg->delay_per_opa_tap; + scc_mgr_set_dqs_en_phase_all_ranks(seq, grp, p); d = 0; @@ -1872,12 +1904,12 @@ static int rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(const u32 grp) } /* The dtap increment to find the failing edge is done here. */ - sdr_find_phase_delay(0, 1, grp, &work_end, - iocfg->delay_per_dqs_en_dchain_tap, &d); + sdr_find_phase_delay(seq, 0, 1, grp, &work_end, + seq->iocfg->delay_per_dqs_en_dchain_tap, &d); /* Go back to working dtap */ if (d != 0) - work_end -= iocfg->delay_per_dqs_en_dchain_tap; + work_end -= seq->iocfg->delay_per_dqs_en_dchain_tap; debug_cond(DLEVEL >= 2, "%s:%d p/d: ptap=%u dtap=%u end=%u\n", @@ -1903,8 +1935,8 @@ static int rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(const u32 grp) /* Special case code for backing up a phase */ if (p == 0) { - p = iocfg->dqs_en_phase_max; - rw_mgr_decr_vfifo(grp); + p = seq->iocfg->dqs_en_phase_max; + rw_mgr_decr_vfifo(seq, grp); debug_cond(DLEVEL >= 2, "%s:%d backedup cycle/phase: p=%u\n", __func__, __LINE__, p); } else { @@ -1913,7 +1945,7 @@ static int rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(const u32 grp) __func__, __LINE__, p); } - scc_mgr_set_dqs_en_phase_all_ranks(grp, p); + scc_mgr_set_dqs_en_phase_all_ranks(seq, grp, p); /* * Increase dtap until we first see a passing read (in case the @@ -1927,14 +1959,14 @@ static int rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(const u32 grp) initial_failing_dtap = d; - found_passing_read = !sdr_find_phase_delay(1, 1, grp, NULL, 0, &d); + found_passing_read = !sdr_find_phase_delay(seq, 1, 1, grp, NULL, 0, &d); if (found_passing_read) { /* Find a failing read. */ debug_cond(DLEVEL >= 2, "%s:%d find failing read\n", __func__, __LINE__); d++; - found_failing_read = !sdr_find_phase_delay(0, 1, grp, NULL, 0, - &d); + found_failing_read = !sdr_find_phase_delay(seq, 0, 1, grp, NULL, + 0, &d); } else { debug_cond(DLEVEL >= 1, "%s:%d failed to calculate dtaps per ptap. Fall back on static value\n", @@ -1944,7 +1976,7 @@ static int rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(const u32 grp) /* * The dynamically calculated dtaps_per_ptap is only valid if we * found a passing/failing read. If we didn't, it means d hit the max - * (iocfg->dqs_en_delay_max). Otherwise, dtaps_per_ptap retains its + * (seq->iocfg->dqs_en_delay_max). Otherwise, dtaps_per_ptap retains its * statically calculated value. */ if (found_passing_read && found_failing_read) @@ -1955,7 +1987,7 @@ static int rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(const u32 grp) __func__, __LINE__, d, initial_failing_dtap, dtaps_per_ptap); /* Step 6: Find the centre of the window. */ - ret = sdr_find_window_center(grp, work_bgn, work_end); + ret = sdr_find_window_center(seq, grp, work_bgn, work_end); return ret; } @@ -1973,33 +2005,35 @@ static int rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(const u32 grp) * * Test if the found edge is valid. */ -static u32 search_stop_check(const int write, const int d, const int rank_bgn, +static u32 search_stop_check(struct socfpga_sdrseq *seq, const int write, + const int d, const int rank_bgn, const u32 write_group, const u32 read_group, u32 *bit_chk, u32 *sticky_bit_chk, const u32 use_read_test) { - const u32 ratio = rwcfg->mem_if_read_dqs_width / - rwcfg->mem_if_write_dqs_width; - const u32 correct_mask = write ? param->write_correct_mask : - param->read_correct_mask; - const u32 per_dqs = write ? rwcfg->mem_dq_per_write_dqs : - rwcfg->mem_dq_per_read_dqs; + const u32 ratio = seq->rwcfg->mem_if_read_dqs_width / + seq->rwcfg->mem_if_write_dqs_width; + const u32 correct_mask = write ? seq->param.write_correct_mask : + seq->param.read_correct_mask; + const u32 per_dqs = write ? seq->rwcfg->mem_dq_per_write_dqs : + seq->rwcfg->mem_dq_per_read_dqs; u32 ret; /* * Stop searching when the read test doesn't pass AND when * we've seen a passing read on every bit. */ if (write) { /* WRITE-ONLY */ - ret = !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, - 0, PASS_ONE_BIT, - bit_chk, 0); + ret = !rw_mgr_mem_calibrate_write_test(seq, rank_bgn, + write_group, 0, + PASS_ONE_BIT, bit_chk, + 0); } else if (use_read_test) { /* READ-ONLY */ - ret = !rw_mgr_mem_calibrate_read_test(rank_bgn, read_group, + ret = !rw_mgr_mem_calibrate_read_test(seq, rank_bgn, read_group, NUM_READ_PB_TESTS, PASS_ONE_BIT, bit_chk, 0, 0); } else { /* READ-ONLY */ - rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 0, + rw_mgr_mem_calibrate_write_test(seq, rank_bgn, write_group, 0, PASS_ONE_BIT, bit_chk, 0); *bit_chk = *bit_chk >> (per_dqs * (read_group - (write_group * ratio))); @@ -2028,29 +2062,30 @@ static u32 search_stop_check(const int write, const int d, const int rank_bgn, * * Find left edge of DQ/DQS working phase. */ -static void search_left_edge(const int write, const int rank_bgn, - const u32 write_group, const u32 read_group, const u32 test_bgn, - u32 *sticky_bit_chk, - int *left_edge, int *right_edge, const u32 use_read_test) -{ - const u32 delay_max = write ? iocfg->io_out1_delay_max : - iocfg->io_in_delay_max; - const u32 dqs_max = write ? iocfg->io_out1_delay_max : - iocfg->dqs_in_delay_max; - const u32 per_dqs = write ? rwcfg->mem_dq_per_write_dqs : - rwcfg->mem_dq_per_read_dqs; +static void search_left_edge(struct socfpga_sdrseq *seq, const int write, + const int rank_bgn, const u32 write_group, + const u32 read_group, const u32 test_bgn, + u32 *sticky_bit_chk, int *left_edge, + int *right_edge, const u32 use_read_test) +{ + const u32 delay_max = write ? seq->iocfg->io_out1_delay_max : + seq->iocfg->io_in_delay_max; + const u32 dqs_max = write ? seq->iocfg->io_out1_delay_max : + seq->iocfg->dqs_in_delay_max; + const u32 per_dqs = write ? seq->rwcfg->mem_dq_per_write_dqs : + seq->rwcfg->mem_dq_per_read_dqs; u32 stop, bit_chk; int i, d; for (d = 0; d <= dqs_max; d++) { if (write) - scc_mgr_apply_group_dq_out1_delay(d); + scc_mgr_apply_group_dq_out1_delay(seq, d); else - scc_mgr_apply_group_dq_in_delay(test_bgn, d); + scc_mgr_apply_group_dq_in_delay(seq, test_bgn, d); writel(0, &sdr_scc_mgr->update); - stop = search_stop_check(write, d, rank_bgn, write_group, + stop = search_stop_check(seq, write, d, rank_bgn, write_group, read_group, &bit_chk, sticky_bit_chk, use_read_test); if (stop == 1) @@ -2080,9 +2115,9 @@ static void search_left_edge(const int write, const int rank_bgn, /* Reset DQ delay chains to 0 */ if (write) - scc_mgr_apply_group_dq_out1_delay(0); + scc_mgr_apply_group_dq_out1_delay(seq, 0); else - scc_mgr_apply_group_dq_in_delay(test_bgn, 0); + scc_mgr_apply_group_dq_in_delay(seq, test_bgn, 0); *sticky_bit_chk = 0; for (i = per_dqs - 1; i >= 0; i--) { @@ -2138,31 +2173,33 @@ static void search_left_edge(const int write, const int rank_bgn, * * Find right edge of DQ/DQS working phase. */ -static int search_right_edge(const int write, const int rank_bgn, - const u32 write_group, const u32 read_group, - const int start_dqs, const int start_dqs_en, - u32 *sticky_bit_chk, - int *left_edge, int *right_edge, const u32 use_read_test) -{ - const u32 delay_max = write ? iocfg->io_out1_delay_max : - iocfg->io_in_delay_max; - const u32 dqs_max = write ? iocfg->io_out1_delay_max : - iocfg->dqs_in_delay_max; - const u32 per_dqs = write ? rwcfg->mem_dq_per_write_dqs : - rwcfg->mem_dq_per_read_dqs; +static int search_right_edge(struct socfpga_sdrseq *seq, const int write, + const int rank_bgn, const u32 write_group, + const u32 read_group, const int start_dqs, + const int start_dqs_en, u32 *sticky_bit_chk, + int *left_edge, int *right_edge, + const u32 use_read_test) +{ + const u32 delay_max = write ? seq->iocfg->io_out1_delay_max : + seq->iocfg->io_in_delay_max; + const u32 dqs_max = write ? seq->iocfg->io_out1_delay_max : + seq->iocfg->dqs_in_delay_max; + const u32 per_dqs = write ? seq->rwcfg->mem_dq_per_write_dqs : + seq->rwcfg->mem_dq_per_read_dqs; u32 stop, bit_chk; int i, d; for (d = 0; d <= dqs_max - start_dqs; d++) { if (write) { /* WRITE-ONLY */ - scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, + scc_mgr_apply_group_dqs_io_and_oct_out1(seq, + write_group, d + start_dqs); } else { /* READ-ONLY */ scc_mgr_set_dqs_bus_in_delay(read_group, d + start_dqs); - if (iocfg->shift_dqs_en_when_shift_dqs) { + if (seq->iocfg->shift_dqs_en_when_shift_dqs) { u32 delay = d + start_dqs_en; - if (delay > iocfg->dqs_en_delay_max) - delay = iocfg->dqs_en_delay_max; + if (delay > seq->iocfg->dqs_en_delay_max) + delay = seq->iocfg->dqs_en_delay_max; scc_mgr_set_dqs_en_delay(read_group, delay); } scc_mgr_load_dqs(read_group); @@ -2170,12 +2207,13 @@ static int search_right_edge(const int write, const int rank_bgn, writel(0, &sdr_scc_mgr->update); - stop = search_stop_check(write, d, rank_bgn, write_group, + stop = search_stop_check(seq, write, d, rank_bgn, write_group, read_group, &bit_chk, sticky_bit_chk, use_read_test); if (stop == 1) { if (write && (d == 0)) { /* WRITE-ONLY */ - for (i = 0; i < rwcfg->mem_dq_per_write_dqs; + for (i = 0; + i < seq->rwcfg->mem_dq_per_write_dqs; i++) { /* * d = 0 failed, but it passed when @@ -2263,11 +2301,12 @@ static int search_right_edge(const int write, const int rank_bgn, * * Find index and value of the middle of the DQ/DQS working phase. */ -static int get_window_mid_index(const int write, int *left_edge, +static int get_window_mid_index(struct socfpga_sdrseq *seq, + const int write, int *left_edge, int *right_edge, int *mid_min) { - const u32 per_dqs = write ? rwcfg->mem_dq_per_write_dqs : - rwcfg->mem_dq_per_read_dqs; + const u32 per_dqs = write ? seq->rwcfg->mem_dq_per_write_dqs : + seq->rwcfg->mem_dq_per_read_dqs; int i, mid, min_index; /* Find middle of window for each DQ bit */ @@ -2310,15 +2349,16 @@ static int get_window_mid_index(const int write, int *left_edge, * * Align the DQ/DQS windows in each group. */ -static void center_dq_windows(const int write, int *left_edge, int *right_edge, +static void center_dq_windows(struct socfpga_sdrseq *seq, + const int write, int *left_edge, int *right_edge, const int mid_min, const int orig_mid_min, const int min_index, const int test_bgn, int *dq_margin, int *dqs_margin) { - const s32 delay_max = write ? iocfg->io_out1_delay_max : - iocfg->io_in_delay_max; - const s32 per_dqs = write ? rwcfg->mem_dq_per_write_dqs : - rwcfg->mem_dq_per_read_dqs; + const s32 delay_max = write ? seq->iocfg->io_out1_delay_max : + seq->iocfg->io_in_delay_max; + const s32 per_dqs = write ? seq->rwcfg->mem_dq_per_write_dqs : + seq->rwcfg->mem_dq_per_read_dqs; const s32 delay_off = write ? SCC_MGR_IO_OUT1_DELAY_OFFSET : SCC_MGR_IO_IN_DELAY_OFFSET; const s32 addr = SDR_PHYGRP_SCCGRP_ADDRESS | delay_off; @@ -2385,9 +2425,12 @@ static void center_dq_windows(const int write, int *left_edge, int *right_edge, * * Per-bit deskew DQ and centering. */ -static int rw_mgr_mem_calibrate_vfifo_center(const u32 rank_bgn, - const u32 rw_group, const u32 test_bgn, - const int use_read_test, const int update_fom) +static int rw_mgr_mem_calibrate_vfifo_center(struct socfpga_sdrseq *seq, + const u32 rank_bgn, + const u32 rw_group, + const u32 test_bgn, + const int use_read_test, + const int update_fom) { const u32 addr = SDR_PHYGRP_SCCGRP_ADDRESS + SCC_MGR_DQS_IN_DELAY_OFFSET + @@ -2397,36 +2440,36 @@ static int rw_mgr_mem_calibrate_vfifo_center(const u32 rank_bgn, * signed numbers. */ u32 sticky_bit_chk; - int32_t left_edge[rwcfg->mem_dq_per_read_dqs]; - int32_t right_edge[rwcfg->mem_dq_per_read_dqs]; - int32_t orig_mid_min, mid_min; - int32_t new_dqs, start_dqs, start_dqs_en = 0, final_dqs_en; - int32_t dq_margin, dqs_margin; + s32 left_edge[seq->rwcfg->mem_dq_per_read_dqs]; + s32 right_edge[seq->rwcfg->mem_dq_per_read_dqs]; + s32 orig_mid_min, mid_min; + s32 new_dqs, start_dqs, start_dqs_en = 0, final_dqs_en; + s32 dq_margin, dqs_margin; int i, min_index; int ret; debug("%s:%d: %u %u", __func__, __LINE__, rw_group, test_bgn); start_dqs = readl(addr); - if (iocfg->shift_dqs_en_when_shift_dqs) - start_dqs_en = readl(addr - iocfg->dqs_en_delay_offset); + if (seq->iocfg->shift_dqs_en_when_shift_dqs) + start_dqs_en = readl(addr - seq->iocfg->dqs_en_delay_offset); /* set the left and right edge of each bit to an illegal value */ - /* use (iocfg->io_in_delay_max + 1) as an illegal value */ + /* use (seq->iocfg->io_in_delay_max + 1) as an illegal value */ sticky_bit_chk = 0; - for (i = 0; i < rwcfg->mem_dq_per_read_dqs; i++) { - left_edge[i] = iocfg->io_in_delay_max + 1; - right_edge[i] = iocfg->io_in_delay_max + 1; + for (i = 0; i < seq->rwcfg->mem_dq_per_read_dqs; i++) { + left_edge[i] = seq->iocfg->io_in_delay_max + 1; + right_edge[i] = seq->iocfg->io_in_delay_max + 1; } /* Search for the left edge of the window for each bit */ - search_left_edge(0, rank_bgn, rw_group, rw_group, test_bgn, + search_left_edge(seq, 0, rank_bgn, rw_group, rw_group, test_bgn, &sticky_bit_chk, left_edge, right_edge, use_read_test); /* Search for the right edge of the window for each bit */ - ret = search_right_edge(0, rank_bgn, rw_group, rw_group, + ret = search_right_edge(seq, 0, rank_bgn, rw_group, rw_group, start_dqs, start_dqs_en, &sticky_bit_chk, left_edge, right_edge, use_read_test); @@ -2437,7 +2480,7 @@ static int rw_mgr_mem_calibrate_vfifo_center(const u32 rank_bgn, * dqs/ck relationships. */ scc_mgr_set_dqs_bus_in_delay(rw_group, start_dqs); - if (iocfg->shift_dqs_en_when_shift_dqs) + if (seq->iocfg->shift_dqs_en_when_shift_dqs) scc_mgr_set_dqs_en_delay(rw_group, start_dqs_en); scc_mgr_load_dqs(rw_group); @@ -2447,26 +2490,27 @@ static int rw_mgr_mem_calibrate_vfifo_center(const u32 rank_bgn, "%s:%d vfifo_center: failed to find edge [%u]: %d %d", __func__, __LINE__, i, left_edge[i], right_edge[i]); if (use_read_test) { - set_failing_group_stage(rw_group * - rwcfg->mem_dq_per_read_dqs + i, + set_failing_group_stage(seq, rw_group * + seq->rwcfg->mem_dq_per_read_dqs + i, CAL_STAGE_VFIFO, CAL_SUBSTAGE_VFIFO_CENTER); } else { - set_failing_group_stage(rw_group * - rwcfg->mem_dq_per_read_dqs + i, + set_failing_group_stage(seq, rw_group * + seq->rwcfg->mem_dq_per_read_dqs + i, CAL_STAGE_VFIFO_AFTER_WRITES, CAL_SUBSTAGE_VFIFO_CENTER); } return -EIO; } - min_index = get_window_mid_index(0, left_edge, right_edge, &mid_min); + min_index = get_window_mid_index(seq, 0, left_edge, right_edge, + &mid_min); /* Determine the amount we can change DQS (which is -mid_min) */ orig_mid_min = mid_min; new_dqs = start_dqs - mid_min; - if (new_dqs > iocfg->dqs_in_delay_max) - new_dqs = iocfg->dqs_in_delay_max; + if (new_dqs > seq->iocfg->dqs_in_delay_max) + new_dqs = seq->iocfg->dqs_in_delay_max; else if (new_dqs < 0) new_dqs = 0; @@ -2474,10 +2518,10 @@ static int rw_mgr_mem_calibrate_vfifo_center(const u32 rank_bgn, debug_cond(DLEVEL >= 1, "vfifo_center: new mid_min=%d new_dqs=%d\n", mid_min, new_dqs); - if (iocfg->shift_dqs_en_when_shift_dqs) { - if (start_dqs_en - mid_min > iocfg->dqs_en_delay_max) + if (seq->iocfg->shift_dqs_en_when_shift_dqs) { + if (start_dqs_en - mid_min > seq->iocfg->dqs_en_delay_max) mid_min += start_dqs_en - mid_min - - iocfg->dqs_en_delay_max; + seq->iocfg->dqs_en_delay_max; else if (start_dqs_en - mid_min < 0) mid_min += start_dqs_en - mid_min; } @@ -2486,15 +2530,15 @@ static int rw_mgr_mem_calibrate_vfifo_center(const u32 rank_bgn, debug_cond(DLEVEL >= 1, "vfifo_center: start_dqs=%d start_dqs_en=%d new_dqs=%d mid_min=%d\n", start_dqs, - iocfg->shift_dqs_en_when_shift_dqs ? start_dqs_en : -1, + seq->iocfg->shift_dqs_en_when_shift_dqs ? start_dqs_en : -1, new_dqs, mid_min); /* Add delay to bring centre of all DQ windows to the same "level". */ - center_dq_windows(0, left_edge, right_edge, mid_min, orig_mid_min, + center_dq_windows(seq, 0, left_edge, right_edge, mid_min, orig_mid_min, min_index, test_bgn, &dq_margin, &dqs_margin); /* Move DQS-en */ - if (iocfg->shift_dqs_en_when_shift_dqs) { + if (seq->iocfg->shift_dqs_en_when_shift_dqs) { final_dqs_en = start_dqs_en - mid_min; scc_mgr_set_dqs_en_delay(rw_group, final_dqs_en); scc_mgr_load_dqs(rw_group); @@ -2520,7 +2564,8 @@ static int rw_mgr_mem_calibrate_vfifo_center(const u32 rank_bgn, } /** - * rw_mgr_mem_calibrate_guaranteed_write() - Perform guaranteed write into the device + * rw_mgr_mem_calibrate_guaranteed_write() - Perform guaranteed write into the + * device * @rw_group: Read/Write Group * @phase: DQ/DQS phase * @@ -2528,13 +2573,14 @@ static int rw_mgr_mem_calibrate_vfifo_center(const u32 rank_bgn, * device, the sequencer uses a guaranteed write mechanism to write data into * the memory device. */ -static int rw_mgr_mem_calibrate_guaranteed_write(const u32 rw_group, +static int rw_mgr_mem_calibrate_guaranteed_write(struct socfpga_sdrseq *seq, + const u32 rw_group, const u32 phase) { int ret; /* Set a particular DQ/DQS phase. */ - scc_mgr_set_dqdqs_output_phase_all_ranks(rw_group, phase); + scc_mgr_set_dqdqs_output_phase_all_ranks(seq, rw_group, phase); debug_cond(DLEVEL >= 1, "%s:%d guaranteed write: g=%u p=%u\n", __func__, __LINE__, rw_group, phase); @@ -2544,16 +2590,16 @@ static int rw_mgr_mem_calibrate_guaranteed_write(const u32 rw_group, * Load up the patterns used by read calibration using the * current DQDQS phase. */ - rw_mgr_mem_calibrate_read_load_patterns(0, 1); + rw_mgr_mem_calibrate_read_load_patterns(seq, 0, 1); - if (gbl->phy_debug_mode_flags & PHY_DEBUG_DISABLE_GUARANTEED_READ) + if (seq->gbl.phy_debug_mode_flags & PHY_DEBUG_DISABLE_GUARANTEED_READ) return 0; /* * Altera EMI_RM 2015.05.04 :: Figure 1-26 * Back-to-Back reads of the patterns used for calibration. */ - ret = rw_mgr_mem_calibrate_read_test_patterns(0, rw_group, 1); + ret = rw_mgr_mem_calibrate_read_test_patterns(seq, 0, rw_group, 1); if (ret) debug_cond(DLEVEL >= 1, "%s:%d Guaranteed read test failed: g=%u p=%u\n", @@ -2569,8 +2615,10 @@ static int rw_mgr_mem_calibrate_guaranteed_write(const u32 rw_group, * DQS enable calibration ensures reliable capture of the DQ signal without * glitches on the DQS line. */ -static int rw_mgr_mem_calibrate_dqs_enable_calibration(const u32 rw_group, - const u32 test_bgn) +static int +rw_mgr_mem_calibrate_dqs_enable_calibration(struct socfpga_sdrseq *seq, + const u32 rw_group, + const u32 test_bgn) { /* * Altera EMI_RM 2015.05.04 :: Figure 1-27 @@ -2578,18 +2626,18 @@ static int rw_mgr_mem_calibrate_dqs_enable_calibration(const u32 rw_group, */ /* We start at zero, so have one less dq to devide among */ - const u32 delay_step = iocfg->io_in_delay_max / - (rwcfg->mem_dq_per_read_dqs - 1); + const u32 delay_step = seq->iocfg->io_in_delay_max / + (seq->rwcfg->mem_dq_per_read_dqs - 1); int ret; u32 i, p, d, r; debug("%s:%d (%u,%u)\n", __func__, __LINE__, rw_group, test_bgn); /* Try different dq_in_delays since the DQ path is shorter than DQS. */ - for (r = 0; r < rwcfg->mem_number_of_ranks; + for (r = 0; r < seq->rwcfg->mem_number_of_ranks; r += NUM_RANKS_PER_SHADOW_REG) { for (i = 0, p = test_bgn, d = 0; - i < rwcfg->mem_dq_per_read_dqs; + i < seq->rwcfg->mem_dq_per_read_dqs; i++, p++, d += delay_step) { debug_cond(DLEVEL >= 1, "%s:%d: g=%u r=%u i=%u p=%u d=%u\n", @@ -2606,15 +2654,15 @@ static int rw_mgr_mem_calibrate_dqs_enable_calibration(const u32 rw_group, * Try rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase across different * dq_in_delay values */ - ret = rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(rw_group); + ret = rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(seq, rw_group); debug_cond(DLEVEL >= 1, "%s:%d: g=%u found=%u; Reseting delay chain to zero\n", __func__, __LINE__, rw_group, !ret); - for (r = 0; r < rwcfg->mem_number_of_ranks; + for (r = 0; r < seq->rwcfg->mem_number_of_ranks; r += NUM_RANKS_PER_SHADOW_REG) { - scc_mgr_apply_group_dq_in_delay(test_bgn, 0); + scc_mgr_apply_group_dq_in_delay(seq, test_bgn, 0); writel(0, &sdr_scc_mgr->update); } @@ -2632,7 +2680,8 @@ static int rw_mgr_mem_calibrate_dqs_enable_calibration(const u32 rw_group, * within a group. */ static int -rw_mgr_mem_calibrate_dq_dqs_centering(const u32 rw_group, const u32 test_bgn, +rw_mgr_mem_calibrate_dq_dqs_centering(struct socfpga_sdrseq *seq, + const u32 rw_group, const u32 test_bgn, const int use_read_test, const int update_fom) @@ -2646,9 +2695,9 @@ rw_mgr_mem_calibrate_dq_dqs_centering(const u32 rw_group, const u32 test_bgn, */ grp_calibrated = 1; for (rank_bgn = 0, sr = 0; - rank_bgn < rwcfg->mem_number_of_ranks; + rank_bgn < seq->rwcfg->mem_number_of_ranks; rank_bgn += NUM_RANKS_PER_SHADOW_REG, sr++) { - ret = rw_mgr_mem_calibrate_vfifo_center(rank_bgn, rw_group, + ret = rw_mgr_mem_calibrate_vfifo_center(seq, rank_bgn, rw_group, test_bgn, use_read_test, update_fom); @@ -2679,7 +2728,8 @@ rw_mgr_mem_calibrate_dq_dqs_centering(const u32 rw_group, const u32 test_bgn, * - DQS input phase and DQS input delay (DQ/DQS Centering) * - we also do a per-bit deskew on the DQ lines. */ -static int rw_mgr_mem_calibrate_vfifo(const u32 rw_group, const u32 test_bgn) +static int rw_mgr_mem_calibrate_vfifo(struct socfpga_sdrseq *seq, + const u32 rw_group, const u32 test_bgn) { u32 p, d; u32 dtaps_per_ptap; @@ -2697,8 +2747,9 @@ static int rw_mgr_mem_calibrate_vfifo(const u32 rw_group, const u32 test_bgn) failed_substage = CAL_SUBSTAGE_GUARANTEED_READ; /* USER Determine number of delay taps for each phase tap. */ - dtaps_per_ptap = DIV_ROUND_UP(iocfg->delay_per_opa_tap, - iocfg->delay_per_dqs_en_dchain_tap) - 1; + dtaps_per_ptap = DIV_ROUND_UP(seq->iocfg->delay_per_opa_tap, + seq->iocfg->delay_per_dqs_en_dchain_tap) + - 1; for (d = 0; d <= dtaps_per_ptap; d += 2) { /* @@ -2708,18 +2759,22 @@ static int rw_mgr_mem_calibrate_vfifo(const u32 rw_group, const u32 test_bgn) * output side yet. */ if (d > 0) { - scc_mgr_apply_group_all_out_delay_add_all_ranks( - rw_group, d); + scc_mgr_apply_group_all_out_delay_add_all_ranks(seq, + rw_group, + d); } - for (p = 0; p <= iocfg->dqdqs_out_phase_max; p++) { + for (p = 0; p <= seq->iocfg->dqdqs_out_phase_max; p++) { /* 1) Guaranteed Write */ - ret = rw_mgr_mem_calibrate_guaranteed_write(rw_group, p); + ret = rw_mgr_mem_calibrate_guaranteed_write(seq, + rw_group, + p); if (ret) break; /* 2) DQS Enable Calibration */ - ret = rw_mgr_mem_calibrate_dqs_enable_calibration(rw_group, + ret = rw_mgr_mem_calibrate_dqs_enable_calibration(seq, + rw_group, test_bgn); if (ret) { failed_substage = CAL_SUBSTAGE_DQS_EN_PHASE; @@ -2731,8 +2786,10 @@ static int rw_mgr_mem_calibrate_vfifo(const u32 rw_group, const u32 test_bgn) * If doing read after write calibration, do not update * FOM now. Do it then. */ - ret = rw_mgr_mem_calibrate_dq_dqs_centering(rw_group, - test_bgn, 1, 0); + ret = rw_mgr_mem_calibrate_dq_dqs_centering(seq, + rw_group, + test_bgn, + 1, 0); if (ret) { failed_substage = CAL_SUBSTAGE_VFIFO_CENTER; continue; @@ -2744,7 +2801,8 @@ static int rw_mgr_mem_calibrate_vfifo(const u32 rw_group, const u32 test_bgn) } /* Calibration Stage 1 failed. */ - set_failing_group_stage(rw_group, CAL_STAGE_VFIFO, failed_substage); + set_failing_group_stage(seq, rw_group, CAL_STAGE_VFIFO, + failed_substage); return 0; /* Calibration Stage 1 completed OK. */ @@ -2755,7 +2813,7 @@ cal_done_ok: * first case). */ if (d > 2) - scc_mgr_zero_group(rw_group, 1); + scc_mgr_zero_group(seq, rw_group, 1); return 1; } @@ -2770,7 +2828,8 @@ cal_done_ok: * This function implements UniPHY calibration Stage 3, as explained in * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages". */ -static int rw_mgr_mem_calibrate_vfifo_end(const u32 rw_group, +static int rw_mgr_mem_calibrate_vfifo_end(struct socfpga_sdrseq *seq, + const u32 rw_group, const u32 test_bgn) { int ret; @@ -2782,9 +2841,10 @@ static int rw_mgr_mem_calibrate_vfifo_end(const u32 rw_group, reg_file_set_stage(CAL_STAGE_VFIFO_AFTER_WRITES); reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER); - ret = rw_mgr_mem_calibrate_dq_dqs_centering(rw_group, test_bgn, 0, 1); + ret = rw_mgr_mem_calibrate_dq_dqs_centering(seq, rw_group, test_bgn, 0, + 1); if (ret) - set_failing_group_stage(rw_group, + set_failing_group_stage(seq, rw_group, CAL_STAGE_VFIFO_AFTER_WRITES, CAL_SUBSTAGE_VFIFO_CENTER); return ret; @@ -2799,7 +2859,7 @@ static int rw_mgr_mem_calibrate_vfifo_end(const u32 rw_group, * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages". * Calibrate LFIFO to find smallest read latency. */ -static u32 rw_mgr_mem_calibrate_lfifo(void) +static u32 rw_mgr_mem_calibrate_lfifo(struct socfpga_sdrseq *seq) { int found_one = 0; @@ -2810,14 +2870,15 @@ static u32 rw_mgr_mem_calibrate_lfifo(void) reg_file_set_sub_stage(CAL_SUBSTAGE_READ_LATENCY); /* Load up the patterns used by read calibration for all ranks */ - rw_mgr_mem_calibrate_read_load_patterns(0, 1); + rw_mgr_mem_calibrate_read_load_patterns(seq, 0, 1); do { - writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat); + writel(seq->gbl.curr_read_lat, &phy_mgr_cfg->phy_rlat); debug_cond(DLEVEL >= 2, "%s:%d lfifo: read_lat=%u", - __func__, __LINE__, gbl->curr_read_lat); + __func__, __LINE__, seq->gbl.curr_read_lat); - if (!rw_mgr_mem_calibrate_read_test_all_ranks(0, NUM_READ_TESTS, + if (!rw_mgr_mem_calibrate_read_test_all_ranks(seq, 0, + NUM_READ_TESTS, PASS_ALL_BITS, 1)) break; @@ -2826,26 +2887,26 @@ static u32 rw_mgr_mem_calibrate_lfifo(void) * Reduce read latency and see if things are * working correctly. */ - gbl->curr_read_lat--; - } while (gbl->curr_read_lat > 0); + seq->gbl.curr_read_lat--; + } while (seq->gbl.curr_read_lat > 0); /* Reset the fifos to get pointers to known state. */ writel(0, &phy_mgr_cmd->fifo_reset); if (found_one) { /* Add a fudge factor to the read latency that was determined */ - gbl->curr_read_lat += 2; - writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat); + seq->gbl.curr_read_lat += 2; + writel(seq->gbl.curr_read_lat, &phy_mgr_cfg->phy_rlat); debug_cond(DLEVEL >= 2, "%s:%d lfifo: success: using read_lat=%u\n", - __func__, __LINE__, gbl->curr_read_lat); + __func__, __LINE__, seq->gbl.curr_read_lat); } else { - set_failing_group_stage(0xff, CAL_STAGE_LFIFO, + set_failing_group_stage(seq, 0xff, CAL_STAGE_LFIFO, CAL_SUBSTAGE_READ_LATENCY); debug_cond(DLEVEL >= 2, "%s:%d lfifo: failed at initial read_lat=%u\n", - __func__, __LINE__, gbl->curr_read_lat); + __func__, __LINE__, seq->gbl.curr_read_lat); } return found_one; @@ -2853,7 +2914,8 @@ static u32 rw_mgr_mem_calibrate_lfifo(void) /** * search_window() - Search for the/part of the window with DM/DQS shift - * @search_dm: If 1, search for the DM shift, if 0, search for DQS shift + * @search_dm: If 1, search for the DM shift, if 0, search for DQS + * shift * @rank_bgn: Rank number * @write_group: Write Group * @bgn_curr: Current window begin @@ -2865,20 +2927,21 @@ static u32 rw_mgr_mem_calibrate_lfifo(void) * * Search for the/part of the window with DM/DQS shift. */ -static void search_window(const int search_dm, - const u32 rank_bgn, const u32 write_group, - int *bgn_curr, int *end_curr, int *bgn_best, - int *end_best, int *win_best, int new_dqs) +static void search_window(struct socfpga_sdrseq *seq, + const int search_dm, const u32 rank_bgn, + const u32 write_group, int *bgn_curr, int *end_curr, + int *bgn_best, int *end_best, int *win_best, + int new_dqs) { u32 bit_chk; - const int max = iocfg->io_out1_delay_max - new_dqs; + const int max = seq->iocfg->io_out1_delay_max - new_dqs; int d, di; /* Search for the/part of the window with DM/DQS shift. */ for (di = max; di >= 0; di -= DELTA_D) { if (search_dm) { d = di; - scc_mgr_apply_group_dm_out1_delay(d); + scc_mgr_apply_group_dm_out1_delay(seq, d); } else { /* For DQS, we go from 0...max */ d = max - di; @@ -2886,14 +2949,15 @@ static void search_window(const int search_dm, * Note: This only shifts DQS, so are we limiting * ourselves to width of DQ unnecessarily. */ - scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, + scc_mgr_apply_group_dqs_io_and_oct_out1(seq, + write_group, d + new_dqs); } writel(0, &sdr_scc_mgr->update); - if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1, - PASS_ALL_BITS, &bit_chk, + if (rw_mgr_mem_calibrate_write_test(seq, rank_bgn, write_group, + 1, PASS_ALL_BITS, &bit_chk, 0)) { /* Set current end of the window. */ *end_curr = search_dm ? -d : d; @@ -2902,7 +2966,7 @@ static void search_window(const int search_dm, * If a starting edge of our window has not been seen * this is our current start of the DM window. */ - if (*bgn_curr == iocfg->io_out1_delay_max + 1) + if (*bgn_curr == seq->iocfg->io_out1_delay_max + 1) *bgn_curr = search_dm ? -d : d; /* @@ -2916,8 +2980,8 @@ static void search_window(const int search_dm, } } else { /* We just saw a failing test. Reset temp edge. */ - *bgn_curr = iocfg->io_out1_delay_max + 1; - *end_curr = iocfg->io_out1_delay_max + 1; + *bgn_curr = seq->iocfg->io_out1_delay_max + 1; + *end_curr = seq->iocfg->io_out1_delay_max + 1; /* Early exit is only applicable to DQS. */ if (search_dm) @@ -2928,7 +2992,8 @@ static void search_window(const int search_dm, * chain space is less than already seen largest * window we can exit. */ - if (*win_best - 1 > iocfg->io_out1_delay_max - new_dqs - d) + if (*win_best - 1 > seq->iocfg->io_out1_delay_max + - new_dqs - d) break; } } @@ -2944,22 +3009,23 @@ static void search_window(const int search_dm, * certain windows. */ static int -rw_mgr_mem_calibrate_writes_center(const u32 rank_bgn, const u32 write_group, +rw_mgr_mem_calibrate_writes_center(struct socfpga_sdrseq *seq, + const u32 rank_bgn, const u32 write_group, const u32 test_bgn) { int i; u32 sticky_bit_chk; u32 min_index; - int left_edge[rwcfg->mem_dq_per_write_dqs]; - int right_edge[rwcfg->mem_dq_per_write_dqs]; + int left_edge[seq->rwcfg->mem_dq_per_write_dqs]; + int right_edge[seq->rwcfg->mem_dq_per_write_dqs]; int mid; int mid_min, orig_mid_min; int new_dqs, start_dqs; int dq_margin, dqs_margin, dm_margin; - int bgn_curr = iocfg->io_out1_delay_max + 1; - int end_curr = iocfg->io_out1_delay_max + 1; - int bgn_best = iocfg->io_out1_delay_max + 1; - int end_best = iocfg->io_out1_delay_max + 1; + int bgn_curr = seq->iocfg->io_out1_delay_max + 1; + int end_curr = seq->iocfg->io_out1_delay_max + 1; + int bgn_best = seq->iocfg->io_out1_delay_max + 1; + int end_best = seq->iocfg->io_out1_delay_max + 1; int win_best = 0; int ret; @@ -2970,37 +3036,39 @@ rw_mgr_mem_calibrate_writes_center(const u32 rank_bgn, const u32 write_group, start_dqs = readl((SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_OUT1_DELAY_OFFSET) + - (rwcfg->mem_dq_per_write_dqs << 2)); + (seq->rwcfg->mem_dq_per_write_dqs << 2)); /* Per-bit deskew. */ /* * Set the left and right edge of each bit to an illegal value. - * Use (iocfg->io_out1_delay_max + 1) as an illegal value. + * Use (seq->iocfg->io_out1_delay_max + 1) as an illegal value. */ sticky_bit_chk = 0; - for (i = 0; i < rwcfg->mem_dq_per_write_dqs; i++) { - left_edge[i] = iocfg->io_out1_delay_max + 1; - right_edge[i] = iocfg->io_out1_delay_max + 1; + for (i = 0; i < seq->rwcfg->mem_dq_per_write_dqs; i++) { + left_edge[i] = seq->iocfg->io_out1_delay_max + 1; + right_edge[i] = seq->iocfg->io_out1_delay_max + 1; } /* Search for the left edge of the window for each bit. */ - search_left_edge(1, rank_bgn, write_group, 0, test_bgn, + search_left_edge(seq, 1, rank_bgn, write_group, 0, test_bgn, &sticky_bit_chk, left_edge, right_edge, 0); /* Search for the right edge of the window for each bit. */ - ret = search_right_edge(1, rank_bgn, write_group, 0, + ret = search_right_edge(seq, 1, rank_bgn, write_group, 0, start_dqs, 0, &sticky_bit_chk, left_edge, right_edge, 0); if (ret) { - set_failing_group_stage(test_bgn + ret - 1, CAL_STAGE_WRITES, + set_failing_group_stage(seq, test_bgn + ret - 1, + CAL_STAGE_WRITES, CAL_SUBSTAGE_WRITES_CENTER); return -EINVAL; } - min_index = get_window_mid_index(1, left_edge, right_edge, &mid_min); + min_index = get_window_mid_index(seq, 1, left_edge, right_edge, + &mid_min); /* Determine the amount we can change DQS (which is -mid_min). */ orig_mid_min = mid_min; @@ -3011,11 +3079,11 @@ rw_mgr_mem_calibrate_writes_center(const u32 rank_bgn, const u32 write_group, __func__, __LINE__, start_dqs, new_dqs, mid_min); /* Add delay to bring centre of all DQ windows to the same "level". */ - center_dq_windows(1, left_edge, right_edge, mid_min, orig_mid_min, + center_dq_windows(seq, 1, left_edge, right_edge, mid_min, orig_mid_min, min_index, 0, &dq_margin, &dqs_margin); /* Move DQS */ - scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs); + scc_mgr_apply_group_dqs_io_and_oct_out1(seq, write_group, new_dqs); writel(0, &sdr_scc_mgr->update); /* Centre DM */ @@ -3023,17 +3091,17 @@ rw_mgr_mem_calibrate_writes_center(const u32 rank_bgn, const u32 write_group, /* * Set the left and right edge of each bit to an illegal value. - * Use (iocfg->io_out1_delay_max + 1) as an illegal value. + * Use (seq->iocfg->io_out1_delay_max + 1) as an illegal value. */ - left_edge[0] = iocfg->io_out1_delay_max + 1; - right_edge[0] = iocfg->io_out1_delay_max + 1; + left_edge[0] = seq->iocfg->io_out1_delay_max + 1; + right_edge[0] = seq->iocfg->io_out1_delay_max + 1; /* Search for the/part of the window with DM shift. */ - search_window(1, rank_bgn, write_group, &bgn_curr, &end_curr, + search_window(seq, 1, rank_bgn, write_group, &bgn_curr, &end_curr, &bgn_best, &end_best, &win_best, 0); /* Reset DM delay chains to 0. */ - scc_mgr_apply_group_dm_out1_delay(0); + scc_mgr_apply_group_dm_out1_delay(seq, 0); /* * Check to see if the current window nudges up aganist 0 delay. @@ -3041,12 +3109,12 @@ rw_mgr_mem_calibrate_writes_center(const u32 rank_bgn, const u32 write_group, * search begins as a new search. */ if (end_curr != 0) { - bgn_curr = iocfg->io_out1_delay_max + 1; - end_curr = iocfg->io_out1_delay_max + 1; + bgn_curr = seq->iocfg->io_out1_delay_max + 1; + end_curr = seq->iocfg->io_out1_delay_max + 1; } /* Search for the/part of the window with DQS shifts. */ - search_window(0, rank_bgn, write_group, &bgn_curr, &end_curr, + search_window(seq, 0, rank_bgn, write_group, &bgn_curr, &end_curr, &bgn_best, &end_best, &win_best, new_dqs); /* Assign left and right edge for cal and reporting. */ @@ -3057,7 +3125,7 @@ rw_mgr_mem_calibrate_writes_center(const u32 rank_bgn, const u32 write_group, __func__, __LINE__, left_edge[0], right_edge[0]); /* Move DQS (back to orig). */ - scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs); + scc_mgr_apply_group_dqs_io_and_oct_out1(seq, write_group, new_dqs); /* Move DM */ @@ -3074,7 +3142,7 @@ rw_mgr_mem_calibrate_writes_center(const u32 rank_bgn, const u32 write_group, else dm_margin = left_edge[0] - mid; - scc_mgr_apply_group_dm_out1_delay(mid); + scc_mgr_apply_group_dm_out1_delay(seq, mid); writel(0, &sdr_scc_mgr->update); debug_cond(DLEVEL >= 2, @@ -3082,7 +3150,7 @@ rw_mgr_mem_calibrate_writes_center(const u32 rank_bgn, const u32 write_group, __func__, __LINE__, left_edge[0], right_edge[0], mid, dm_margin); /* Export values. */ - gbl->fom_out += dq_margin + dqs_margin; + seq->gbl.fom_out += dq_margin + dqs_margin; debug_cond(DLEVEL >= 2, "%s:%d write_center: dq_margin=%d dqs_margin=%d dm_margin=%d\n", @@ -3111,7 +3179,8 @@ rw_mgr_mem_calibrate_writes_center(const u32 rank_bgn, const u32 write_group, * This function implements UniPHY calibration Stage 2, as explained in * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages". */ -static int rw_mgr_mem_calibrate_writes(const u32 rank_bgn, const u32 group, +static int rw_mgr_mem_calibrate_writes(struct socfpga_sdrseq *seq, + const u32 rank_bgn, const u32 group, const u32 test_bgn) { int ret; @@ -3123,9 +3192,10 @@ static int rw_mgr_mem_calibrate_writes(const u32 rank_bgn, const u32 group, reg_file_set_stage(CAL_STAGE_WRITES); reg_file_set_sub_stage(CAL_SUBSTAGE_WRITES_CENTER); - ret = rw_mgr_mem_calibrate_writes_center(rank_bgn, group, test_bgn); + ret = rw_mgr_mem_calibrate_writes_center(seq, rank_bgn, group, + test_bgn); if (ret) - set_failing_group_stage(group, CAL_STAGE_WRITES, + set_failing_group_stage(seq, group, CAL_STAGE_WRITES, CAL_SUBSTAGE_WRITES_CENTER); return ret; @@ -3136,29 +3206,30 @@ static int rw_mgr_mem_calibrate_writes(const u32 rank_bgn, const u32 group, * * Precharge all banks and activate row 0 in bank "000..." and bank "111...". */ -static void mem_precharge_and_activate(void) +static void mem_precharge_and_activate(struct socfpga_sdrseq *seq) { int r; - for (r = 0; r < rwcfg->mem_number_of_ranks; r++) { + for (r = 0; r < seq->rwcfg->mem_number_of_ranks; r++) { /* Set rank. */ - set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF); + set_rank_and_odt_mask(seq, r, RW_MGR_ODT_MODE_OFF); /* Precharge all banks. */ - writel(rwcfg->precharge_all, SDR_PHYGRP_RWMGRGRP_ADDRESS | + writel(seq->rwcfg->precharge_all, SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET); writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr0); - writel(rwcfg->activate_0_and_1_wait1, + writel(seq->rwcfg->activate_0_and_1_wait1, &sdr_rw_load_jump_mgr_regs->load_jump_add0); writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr1); - writel(rwcfg->activate_0_and_1_wait2, + writel(seq->rwcfg->activate_0_and_1_wait2, &sdr_rw_load_jump_mgr_regs->load_jump_add1); /* Activate rows. */ - writel(rwcfg->activate_0_and_1, SDR_PHYGRP_RWMGRGRP_ADDRESS | - RW_MGR_RUN_SINGLE_GROUP_OFFSET); + writel(seq->rwcfg->activate_0_and_1, + SDR_PHYGRP_RWMGRGRP_ADDRESS | + RW_MGR_RUN_SINGLE_GROUP_OFFSET); } } @@ -3167,14 +3238,15 @@ static void mem_precharge_and_activate(void) * * Configure memory RLAT and WLAT parameters. */ -static void mem_init_latency(void) +static void mem_init_latency(struct socfpga_sdrseq *seq) { /* * For AV/CV, LFIFO is hardened and always runs at full rate * so max latency in AFI clocks, used here, is correspondingly * smaller. */ - const u32 max_latency = (1 << misccfg->max_latency_count_width) - 1; + const u32 max_latency = (1 << seq->misccfg->max_latency_count_width) + - 1; u32 rlat, wlat; debug("%s:%d\n", __func__, __LINE__); @@ -3186,17 +3258,17 @@ static void mem_init_latency(void) wlat = readl(&data_mgr->t_wl_add); wlat += readl(&data_mgr->mem_t_add); - gbl->rw_wl_nop_cycles = wlat - 1; + seq->gbl.rw_wl_nop_cycles = wlat - 1; /* Read in readl latency. */ rlat = readl(&data_mgr->t_rl_add); /* Set a pretty high read latency initially. */ - gbl->curr_read_lat = rlat + 16; - if (gbl->curr_read_lat > max_latency) - gbl->curr_read_lat = max_latency; + seq->gbl.curr_read_lat = rlat + 16; + if (seq->gbl.curr_read_lat > max_latency) + seq->gbl.curr_read_lat = max_latency; - writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat); + writel(seq->gbl.curr_read_lat, &phy_mgr_cfg->phy_rlat); /* Advertise write latency. */ writel(wlat, &phy_mgr_cfg->afi_wlat); @@ -3207,22 +3279,22 @@ static void mem_init_latency(void) * * Set VFIFO and LFIFO to instant-on settings in skip calibration mode. */ -static void mem_skip_calibrate(void) +static void mem_skip_calibrate(struct socfpga_sdrseq *seq) { u32 vfifo_offset; u32 i, j, r; debug("%s:%d\n", __func__, __LINE__); /* Need to update every shadow register set used by the interface */ - for (r = 0; r < rwcfg->mem_number_of_ranks; + for (r = 0; r < seq->rwcfg->mem_number_of_ranks; r += NUM_RANKS_PER_SHADOW_REG) { /* * Set output phase alignment settings appropriate for * skip calibration. */ - for (i = 0; i < rwcfg->mem_if_read_dqs_width; i++) { + for (i = 0; i < seq->rwcfg->mem_if_read_dqs_width; i++) { scc_mgr_set_dqs_en_phase(i, 0); - if (iocfg->dll_chain_length == 6) + if (seq->iocfg->dll_chain_length == 6) scc_mgr_set_dqdqs_output_phase(i, 6); else scc_mgr_set_dqdqs_output_phase(i, 7); @@ -3245,20 +3317,22 @@ static void mem_skip_calibrate(void) * Hence, to make DQS aligned to CK, we need to delay * DQS by: * (720 - 90 - 180 - 2) * - * (360 / iocfg->dll_chain_length) + * (360 / seq->iocfg->dll_chain_length) * - * Dividing the above by (360 / iocfg->dll_chain_length) + * Dividing the above by + (360 / seq->iocfg->dll_chain_length) * gives us the number of ptaps, which simplies to: * - * (1.25 * iocfg->dll_chain_length - 2) + * (1.25 * seq->iocfg->dll_chain_length - 2) */ scc_mgr_set_dqdqs_output_phase(i, - ((125 * iocfg->dll_chain_length) / 100) - 2); + ((125 * seq->iocfg->dll_chain_length) + / 100) - 2); } writel(0xff, &sdr_scc_mgr->dqs_ena); writel(0xff, &sdr_scc_mgr->dqs_io_ena); - for (i = 0; i < rwcfg->mem_if_write_dqs_width; i++) { + for (i = 0; i < seq->rwcfg->mem_if_write_dqs_width; i++) { writel(i, SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_GROUP_COUNTER_OFFSET); } @@ -3268,7 +3342,7 @@ static void mem_skip_calibrate(void) } /* Compensate for simulation model behaviour */ - for (i = 0; i < rwcfg->mem_if_read_dqs_width; i++) { + for (i = 0; i < seq->rwcfg->mem_if_read_dqs_width; i++) { scc_mgr_set_dqs_bus_in_delay(i, 10); scc_mgr_load_dqs(i); } @@ -3278,7 +3352,7 @@ static void mem_skip_calibrate(void) * ArriaV has hard FIFOs that can only be initialized by incrementing * in sequencer. */ - vfifo_offset = misccfg->calib_vfifo_offset; + vfifo_offset = seq->misccfg->calib_vfifo_offset; for (j = 0; j < vfifo_offset; j++) writel(0xff, &phy_mgr_cmd->inc_vfifo_hard_phy); writel(0, &phy_mgr_cmd->fifo_reset); @@ -3287,8 +3361,8 @@ static void mem_skip_calibrate(void) * For Arria V and Cyclone V with hard LFIFO, we get the skip-cal * setting from generation-time constant. */ - gbl->curr_read_lat = misccfg->calib_lfifo_offset; - writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat); + seq->gbl.curr_read_lat = seq->misccfg->calib_lfifo_offset; + writel(seq->gbl.curr_read_lat, &phy_mgr_cfg->phy_rlat); } /** @@ -3296,7 +3370,7 @@ static void mem_skip_calibrate(void) * * Perform memory calibration. */ -static u32 mem_calibrate(void) +static u32 mem_calibrate(struct socfpga_sdrseq *seq) { u32 i; u32 rank_bgn, sr; @@ -3306,25 +3380,25 @@ static u32 mem_calibrate(void) u32 failing_groups = 0; u32 group_failed = 0; - const u32 rwdqs_ratio = rwcfg->mem_if_read_dqs_width / - rwcfg->mem_if_write_dqs_width; + const u32 rwdqs_ratio = seq->rwcfg->mem_if_read_dqs_width / + seq->rwcfg->mem_if_write_dqs_width; debug("%s:%d\n", __func__, __LINE__); /* Initialize the data settings */ - gbl->error_substage = CAL_SUBSTAGE_NIL; - gbl->error_stage = CAL_STAGE_NIL; - gbl->error_group = 0xff; - gbl->fom_in = 0; - gbl->fom_out = 0; + seq->gbl.error_substage = CAL_SUBSTAGE_NIL; + seq->gbl.error_stage = CAL_STAGE_NIL; + seq->gbl.error_group = 0xff; + seq->gbl.fom_in = 0; + seq->gbl.fom_out = 0; /* Initialize WLAT and RLAT. */ - mem_init_latency(); + mem_init_latency(seq); /* Initialize bit slips. */ - mem_precharge_and_activate(); + mem_precharge_and_activate(seq); - for (i = 0; i < rwcfg->mem_if_read_dqs_width; i++) { + for (i = 0; i < seq->rwcfg->mem_if_read_dqs_width; i++) { writel(i, SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_GROUP_COUNTER_OFFSET); /* Only needed once to set all groups, pins, DQ, DQS, DM. */ @@ -3335,12 +3409,12 @@ static u32 mem_calibrate(void) } /* Calibration is skipped. */ - if ((dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL) { + if ((seq->dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL) { /* * Set VFIFO and LFIFO to instant-on settings in skip * calibration mode. */ - mem_skip_calibrate(); + mem_skip_calibrate(seq); /* * Do not remove this line as it makes sure all of our @@ -3356,13 +3430,13 @@ static u32 mem_calibrate(void) * Zero all delay chain/phase settings for all * groups and all shadow register sets. */ - scc_mgr_zero_all(); + scc_mgr_zero_all(seq); run_groups = ~0; for (write_group = 0, write_test_bgn = 0; write_group - < rwcfg->mem_if_write_dqs_width; write_group++, - write_test_bgn += rwcfg->mem_dq_per_write_dqs) { + < seq->rwcfg->mem_if_write_dqs_width; write_group++, + write_test_bgn += seq->rwcfg->mem_dq_per_write_dqs) { /* Initialize the group failure */ group_failed = 0; @@ -3376,22 +3450,22 @@ static u32 mem_calibrate(void) writel(write_group, SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_GROUP_COUNTER_OFFSET); - scc_mgr_zero_group(write_group, 0); + scc_mgr_zero_group(seq, write_group, 0); for (read_group = write_group * rwdqs_ratio, read_test_bgn = 0; read_group < (write_group + 1) * rwdqs_ratio; read_group++, - read_test_bgn += rwcfg->mem_dq_per_read_dqs) { + read_test_bgn += seq->rwcfg->mem_dq_per_read_dqs) { if (STATIC_CALIB_STEPS & CALIB_SKIP_VFIFO) continue; /* Calibrate the VFIFO */ - if (rw_mgr_mem_calibrate_vfifo(read_group, + if (rw_mgr_mem_calibrate_vfifo(seq, read_group, read_test_bgn)) continue; - if (!(gbl->phy_debug_mode_flags & + if (!(seq->gbl.phy_debug_mode_flags & PHY_DEBUG_SWEEP_ALL_GROUPS)) return 0; @@ -3401,7 +3475,7 @@ static u32 mem_calibrate(void) /* Calibrate the output side */ for (rank_bgn = 0, sr = 0; - rank_bgn < rwcfg->mem_number_of_ranks; + rank_bgn < seq->rwcfg->mem_number_of_ranks; rank_bgn += NUM_RANKS_PER_SHADOW_REG, sr++) { if (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES) continue; @@ -3412,13 +3486,13 @@ static u32 mem_calibrate(void) continue; /* Calibrate WRITEs */ - if (!rw_mgr_mem_calibrate_writes(rank_bgn, + if (!rw_mgr_mem_calibrate_writes(seq, rank_bgn, write_group, write_test_bgn)) continue; group_failed = 1; - if (!(gbl->phy_debug_mode_flags & + if (!(seq->gbl.phy_debug_mode_flags & PHY_DEBUG_SWEEP_ALL_GROUPS)) return 0; } @@ -3431,15 +3505,16 @@ static u32 mem_calibrate(void) read_test_bgn = 0; read_group < (write_group + 1) * rwdqs_ratio; read_group++, - read_test_bgn += rwcfg->mem_dq_per_read_dqs) { + read_test_bgn += seq->rwcfg->mem_dq_per_read_dqs) { if (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES) continue; - if (!rw_mgr_mem_calibrate_vfifo_end(read_group, + if (!rw_mgr_mem_calibrate_vfifo_end(seq, + read_group, read_test_bgn)) continue; - if (!(gbl->phy_debug_mode_flags & + if (!(seq->gbl.phy_debug_mode_flags & PHY_DEBUG_SWEEP_ALL_GROUPS)) return 0; @@ -3465,7 +3540,7 @@ grp_failed: /* A group failed, increment the counter. */ continue; /* Calibrate the LFIFO */ - if (!rw_mgr_mem_calibrate_lfifo()) + if (!rw_mgr_mem_calibrate_lfifo(seq)) return 0; } @@ -3482,7 +3557,7 @@ grp_failed: /* A group failed, increment the counter. */ * * This function triggers the entire memory calibration procedure. */ -static int run_mem_calibrate(void) +static int run_mem_calibrate(struct socfpga_sdrseq *seq) { int pass; u32 ctrl_cfg; @@ -3497,17 +3572,17 @@ static int run_mem_calibrate(void) writel(ctrl_cfg & ~SDR_CTRLGRP_CTRLCFG_DQSTRKEN_MASK, &sdr_ctrl->ctrl_cfg); - phy_mgr_initialize(); - rw_mgr_mem_initialize(); + phy_mgr_initialize(seq); + rw_mgr_mem_initialize(seq); /* Perform the actual memory calibration. */ - pass = mem_calibrate(); + pass = mem_calibrate(seq); - mem_precharge_and_activate(); + mem_precharge_and_activate(seq); writel(0, &phy_mgr_cmd->fifo_reset); /* Handoff. */ - rw_mgr_mem_handoff(); + rw_mgr_mem_handoff(seq); /* * In Hard PHY this is a 2-bit control: * 0: AFI Mux Select @@ -3528,25 +3603,25 @@ static int run_mem_calibrate(void) * This function reports the results of the memory calibration * and writes debug information into the register file. */ -static void debug_mem_calibrate(int pass) +static void debug_mem_calibrate(struct socfpga_sdrseq *seq, int pass) { u32 debug_info; if (pass) { debug("%s: CALIBRATION PASSED\n", __FILE__); - gbl->fom_in /= 2; - gbl->fom_out /= 2; + seq->gbl.fom_in /= 2; + seq->gbl.fom_out /= 2; - if (gbl->fom_in > 0xff) - gbl->fom_in = 0xff; + if (seq->gbl.fom_in > 0xff) + seq->gbl.fom_in = 0xff; - if (gbl->fom_out > 0xff) - gbl->fom_out = 0xff; + if (seq->gbl.fom_out > 0xff) + seq->gbl.fom_out = 0xff; /* Update the FOM in the register file */ - debug_info = gbl->fom_in; - debug_info |= gbl->fom_out << 8; + debug_info = seq->gbl.fom_in; + debug_info |= seq->gbl.fom_out << 8; writel(debug_info, &sdr_reg_file->fom); writel(debug_info, &phy_mgr_cfg->cal_debug_info); @@ -3554,18 +3629,18 @@ static void debug_mem_calibrate(int pass) } else { debug("%s: CALIBRATION FAILED\n", __FILE__); - debug_info = gbl->error_stage; - debug_info |= gbl->error_substage << 8; - debug_info |= gbl->error_group << 16; + debug_info = seq->gbl.error_stage; + debug_info |= seq->gbl.error_substage << 8; + debug_info |= seq->gbl.error_group << 16; writel(debug_info, &sdr_reg_file->failing_stage); writel(debug_info, &phy_mgr_cfg->cal_debug_info); writel(PHY_MGR_CAL_FAIL, &phy_mgr_cfg->cal_status); /* Update the failing group/stage in the register file */ - debug_info = gbl->error_stage; - debug_info |= gbl->error_substage << 8; - debug_info |= gbl->error_group << 16; + debug_info = seq->gbl.error_stage; + debug_info |= seq->gbl.error_substage << 8; + debug_info |= seq->gbl.error_group << 16; writel(debug_info, &sdr_reg_file->failing_stage); } @@ -3599,10 +3674,11 @@ static void hc_initialize_rom_data(void) * * Initialize SDR register file. */ -static void initialize_reg_file(void) +static void initialize_reg_file(struct socfpga_sdrseq *seq) { /* Initialize the register file with the correct data */ - writel(misccfg->reg_file_init_seq_signature, &sdr_reg_file->signature); + writel(seq->misccfg->reg_file_init_seq_signature, + &sdr_reg_file->signature); writel(0, &sdr_reg_file->debug_data_addr); writel(0, &sdr_reg_file->cur_stage); writel(0, &sdr_reg_file->fom); @@ -3666,15 +3742,15 @@ static void initialize_hps_phy(void) * * Initialize the register file with usable initial data. */ -static void initialize_tracking(void) +static void initialize_tracking(struct socfpga_sdrseq *seq) { /* * Initialize the register file with the correct data. * Compute usable version of value in case we skip full * computation later. */ - writel(DIV_ROUND_UP(iocfg->delay_per_opa_tap, - iocfg->delay_per_dchain_tap) - 1, + writel(DIV_ROUND_UP(seq->iocfg->delay_per_opa_tap, + seq->iocfg->delay_per_dchain_tap) - 1, &sdr_reg_file->dtaps_per_ptap); /* trk_sample_count */ @@ -3693,23 +3769,22 @@ static void initialize_tracking(void) &sdr_reg_file->delays); /* mux delay */ - writel((rwcfg->idle << 24) | (rwcfg->activate_1 << 16) | - (rwcfg->sgle_read << 8) | (rwcfg->precharge_all << 0), + writel((seq->rwcfg->idle << 24) | (seq->rwcfg->activate_1 << 16) | + (seq->rwcfg->sgle_read << 8) | (seq->rwcfg->precharge_all << 0), &sdr_reg_file->trk_rw_mgr_addr); - writel(rwcfg->mem_if_read_dqs_width, + writel(seq->rwcfg->mem_if_read_dqs_width, &sdr_reg_file->trk_read_dqs_width); /* trefi [7:0] */ - writel((rwcfg->refresh_all << 24) | (1000 << 0), + writel((seq->rwcfg->refresh_all << 24) | (1000 << 0), &sdr_reg_file->trk_rfsh); } int sdram_calibration_full(struct socfpga_sdr *sdr) { - struct param_type my_param; - struct gbl_type my_gbl; u32 pass; + struct socfpga_sdrseq seq; /* * For size reasons, this file uses hard coded addresses. @@ -3718,60 +3793,61 @@ int sdram_calibration_full(struct socfpga_sdr *sdr) if (sdr != (struct socfpga_sdr *)SOCFPGA_SDR_ADDRESS) return -ENODEV; - memset(&my_param, 0, sizeof(my_param)); - memset(&my_gbl, 0, sizeof(my_gbl)); - - param = &my_param; - gbl = &my_gbl; + memset(&seq, 0, sizeof(seq)); - rwcfg = socfpga_get_sdram_rwmgr_config(); - iocfg = socfpga_get_sdram_io_config(); - misccfg = socfpga_get_sdram_misc_config(); + seq.rwcfg = socfpga_get_sdram_rwmgr_config(); + seq.iocfg = socfpga_get_sdram_io_config(); + seq.misccfg = socfpga_get_sdram_misc_config(); /* Set the calibration enabled by default */ - gbl->phy_debug_mode_flags |= PHY_DEBUG_ENABLE_CAL_RPT; + seq.gbl.phy_debug_mode_flags |= PHY_DEBUG_ENABLE_CAL_RPT; /* * Only sweep all groups (regardless of fail state) by default * Set enabled read test by default. */ #if DISABLE_GUARANTEED_READ - gbl->phy_debug_mode_flags |= PHY_DEBUG_DISABLE_GUARANTEED_READ; + seq.gbl.phy_debug_mode_flags |= PHY_DEBUG_DISABLE_GUARANTEED_READ; #endif /* Initialize the register file */ - initialize_reg_file(); + initialize_reg_file(&seq); /* Initialize any PHY CSR */ initialize_hps_phy(); scc_mgr_initialize(); - initialize_tracking(); + initialize_tracking(&seq); debug("%s: Preparing to start memory calibration\n", __FILE__); debug("%s:%d\n", __func__, __LINE__); debug_cond(DLEVEL >= 1, "DDR3 FULL_RATE ranks=%u cs/dimm=%u dq/dqs=%u,%u vg/dqs=%u,%u ", - rwcfg->mem_number_of_ranks, rwcfg->mem_number_of_cs_per_dimm, - rwcfg->mem_dq_per_read_dqs, rwcfg->mem_dq_per_write_dqs, - rwcfg->mem_virtual_groups_per_read_dqs, - rwcfg->mem_virtual_groups_per_write_dqs); + seq.rwcfg->mem_number_of_ranks, + seq.rwcfg->mem_number_of_cs_per_dimm, + seq.rwcfg->mem_dq_per_read_dqs, + seq.rwcfg->mem_dq_per_write_dqs, + seq.rwcfg->mem_virtual_groups_per_read_dqs, + seq.rwcfg->mem_virtual_groups_per_write_dqs); debug_cond(DLEVEL >= 1, "dqs=%u,%u dq=%u dm=%u ptap_delay=%u dtap_delay=%u ", - rwcfg->mem_if_read_dqs_width, rwcfg->mem_if_write_dqs_width, - rwcfg->mem_data_width, rwcfg->mem_data_mask_width, - iocfg->delay_per_opa_tap, iocfg->delay_per_dchain_tap); + seq.rwcfg->mem_if_read_dqs_width, + seq.rwcfg->mem_if_write_dqs_width, + seq.rwcfg->mem_data_width, seq.rwcfg->mem_data_mask_width, + seq.iocfg->delay_per_opa_tap, + seq.iocfg->delay_per_dchain_tap); debug_cond(DLEVEL >= 1, "dtap_dqsen_delay=%u, dll=%u", - iocfg->delay_per_dqs_en_dchain_tap, iocfg->dll_chain_length); + seq.iocfg->delay_per_dqs_en_dchain_tap, + seq.iocfg->dll_chain_length); debug_cond(DLEVEL >= 1, "max values: en_p=%u dqdqs_p=%u en_d=%u dqs_in_d=%u ", - iocfg->dqs_en_phase_max, iocfg->dqdqs_out_phase_max, - iocfg->dqs_en_delay_max, iocfg->dqs_in_delay_max); + seq.iocfg->dqs_en_phase_max, seq.iocfg->dqdqs_out_phase_max, + seq.iocfg->dqs_en_delay_max, seq.iocfg->dqs_in_delay_max); debug_cond(DLEVEL >= 1, "io_in_d=%u io_out1_d=%u io_out2_d=%u ", - iocfg->io_in_delay_max, iocfg->io_out1_delay_max, - iocfg->io_out2_delay_max); + seq.iocfg->io_in_delay_max, seq.iocfg->io_out1_delay_max, + seq.iocfg->io_out2_delay_max); debug_cond(DLEVEL >= 1, "dqs_in_reserve=%u dqs_out_reserve=%u\n", - iocfg->dqs_in_reserve, iocfg->dqs_out_reserve); + seq.iocfg->dqs_in_reserve, seq.iocfg->dqs_out_reserve); hc_initialize_rom_data(); @@ -3783,17 +3859,17 @@ int sdram_calibration_full(struct socfpga_sdr *sdr) * Load global needed for those actions that require * some dynamic calibration support. */ - dyn_calib_steps = STATIC_CALIB_STEPS; + seq.dyn_calib_steps = STATIC_CALIB_STEPS; /* * Load global to allow dynamic selection of delay loop settings * based on calibration mode. */ - if (!(dyn_calib_steps & CALIB_SKIP_DELAY_LOOPS)) - skip_delay_mask = 0xff; + if (!(seq.dyn_calib_steps & CALIB_SKIP_DELAY_LOOPS)) + seq.skip_delay_mask = 0xff; else - skip_delay_mask = 0x0; + seq.skip_delay_mask = 0x0; - pass = run_mem_calibrate(); - debug_mem_calibrate(pass); + pass = run_mem_calibrate(&seq); + debug_mem_calibrate(&seq, pass); return pass; } diff --git a/drivers/ddr/altera/sequencer.h b/drivers/ddr/altera/sequencer.h index d7f6935201..4a03c3fdf9 100644 --- a/drivers/ddr/altera/sequencer.h +++ b/drivers/ddr/altera/sequencer.h @@ -6,14 +6,16 @@ #ifndef _SEQUENCER_H_ #define _SEQUENCER_H_ -#define RW_MGR_NUM_DM_PER_WRITE_GROUP (rwcfg->mem_data_mask_width \ - / rwcfg->mem_if_write_dqs_width) -#define RW_MGR_NUM_TRUE_DM_PER_WRITE_GROUP (rwcfg->true_mem_data_mask_width \ - / rwcfg->mem_if_write_dqs_width) +#define RW_MGR_NUM_DM_PER_WRITE_GROUP (seq->rwcfg->mem_data_mask_width \ + / seq->rwcfg->mem_if_write_dqs_width) +#define RW_MGR_NUM_TRUE_DM_PER_WRITE_GROUP ( \ + seq->rwcfg->true_mem_data_mask_width \ + / seq->rwcfg->mem_if_write_dqs_width) -#define RW_MGR_NUM_DQS_PER_WRITE_GROUP (rwcfg->mem_if_read_dqs_width \ - / rwcfg->mem_if_write_dqs_width) -#define NUM_RANKS_PER_SHADOW_REG (rwcfg->mem_number_of_ranks / NUM_SHADOW_REGS) +#define RW_MGR_NUM_DQS_PER_WRITE_GROUP (seq->rwcfg->mem_if_read_dqs_width \ + / seq->rwcfg->mem_if_write_dqs_width) +#define NUM_RANKS_PER_SHADOW_REG (seq->rwcfg->mem_number_of_ranks \ + / NUM_SHADOW_REGS) #define RW_MGR_RUN_SINGLE_GROUP_OFFSET 0x0 #define RW_MGR_RUN_ALL_GROUPS_OFFSET 0x0400 @@ -256,6 +258,26 @@ struct socfpga_sdr { u8 _align9[0xea4]; }; +struct socfpga_sdrseq { + const struct socfpga_sdram_rw_mgr_config *rwcfg; + const struct socfpga_sdram_io_config *iocfg; + const struct socfpga_sdram_misc_config *misccfg; + /* calibration steps requested by the rtl */ + u16 dyn_calib_steps; + /* + * To make CALIB_SKIP_DELAY_LOOPS a dynamic conditional option + * instead of static, we use boolean logic to select between + * non-skip and skip values + * + * The mask is set to include all bits when not-skipping, but is + * zero when skipping + */ + + u16 skip_delay_mask; /* mask off bits when skipping/not-skipping */ + struct gbl_type gbl; + struct param_type param; +}; + int sdram_calibration_full(struct socfpga_sdr *sdr); #endif /* _SEQUENCER_H_ */ diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index 7d8f161b26..873bc8c796 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -1,9 +1,13 @@ config FIRMWARE bool "Enable Firmware driver support" +config SPL_FIRMWARE + bool "Enable Firmware driver support in SPL" + depends on FIRMWARE + config SPL_ARM_PSCI_FW bool - select FIRMWARE + select SPL_FIRMWARE config ARM_PSCI_FW bool @@ -13,6 +17,7 @@ config TI_SCI_PROTOCOL tristate "TI System Control Interface (TISCI) Message Protocol" depends on K3_SEC_PROXY select FIRMWARE + select SPL_FIRMWARE if SPL help TI System Control Interface (TISCI) Message Protocol is used to manage compute systems such as ARM, DSP etc with the system controller in diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c index 303aa6a631..1fd29f2cdf 100644 --- a/drivers/firmware/ti_sci.c +++ b/drivers/firmware/ti_sci.c @@ -87,11 +87,18 @@ struct ti_sci_info { struct mbox_chan chan_notify; struct ti_sci_xfer xfer; struct list_head list; + struct list_head dev_list; bool is_secure; u8 host_id; u8 seq; }; +struct ti_sci_exclusive_dev { + u32 id; + u32 count; + struct list_head list; +}; + #define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle) /** @@ -101,7 +108,8 @@ struct ti_sci_info { * @msg_flags: Flag to set for the message * @buf: Buffer to be send to mailbox channel * @tx_message_size: transmit message size - * @rx_message_size: receive message size + * @rx_message_size: receive message size. may be set to zero for send-only + * transactions. * * Helper function which is used by various command functions that are * exposed to clients of this driver for allocating a message traffic event. @@ -121,7 +129,8 @@ static struct ti_sci_xfer *ti_sci_setup_one_xfer(struct ti_sci_info *info, /* Ensure we have sane transfer sizes */ if (rx_message_size > info->desc->max_msg_size || tx_message_size > info->desc->max_msg_size || - rx_message_size < sizeof(*hdr) || tx_message_size < sizeof(*hdr)) + (rx_message_size > 0 && rx_message_size < sizeof(*hdr)) || + tx_message_size < sizeof(*hdr)) return ERR_PTR(-ERANGE); info->seq = ~info->seq; @@ -219,7 +228,9 @@ static inline int ti_sci_do_xfer(struct ti_sci_info *info, xfer->tx_message.buf = (u32 *)secure_buf; xfer->tx_message.len += sizeof(secure_hdr); - xfer->rx_len += sizeof(secure_hdr); + + if (xfer->rx_len) + xfer->rx_len += sizeof(secure_hdr); } /* Send the message */ @@ -230,7 +241,11 @@ static inline int ti_sci_do_xfer(struct ti_sci_info *info, return ret; } - return ti_sci_get_response(info, xfer, &info->chan_rx); + /* Get response if requested */ + if (xfer->rx_len) + ret = ti_sci_get_response(info, xfer, &info->chan_rx); + + return ret; } /** @@ -419,6 +434,47 @@ static int ti_sci_cmd_set_board_config_pm(const struct ti_sci_handle *handle, addr, size); } +static struct ti_sci_exclusive_dev +*ti_sci_get_exclusive_dev(struct list_head *dev_list, u32 id) +{ + struct ti_sci_exclusive_dev *dev; + + list_for_each_entry(dev, dev_list, list) + if (dev->id == id) + return dev; + + return NULL; +} + +static void ti_sci_add_exclusive_dev(struct ti_sci_info *info, u32 id) +{ + struct ti_sci_exclusive_dev *dev; + + dev = ti_sci_get_exclusive_dev(&info->dev_list, id); + if (dev) { + dev->count++; + return; + } + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + dev->id = id; + dev->count = 1; + INIT_LIST_HEAD(&dev->list); + list_add_tail(&dev->list, &info->dev_list); +} + +static void ti_sci_delete_exclusive_dev(struct ti_sci_info *info, u32 id) +{ + struct ti_sci_exclusive_dev *dev; + + dev = ti_sci_get_exclusive_dev(&info->dev_list, id); + if (!dev) + return; + + if (dev->count > 0) + dev->count--; +} + /** * ti_sci_set_device_state() - Set device state helper * @handle: pointer to TI SCI handle @@ -466,6 +522,54 @@ static int ti_sci_set_device_state(const struct ti_sci_handle *handle, if (!ti_sci_is_response_ack(resp)) return -ENODEV; + if (state == MSG_DEVICE_SW_STATE_AUTO_OFF) + ti_sci_delete_exclusive_dev(info, id); + else if (flags & MSG_FLAG_DEVICE_EXCLUSIVE) + ti_sci_add_exclusive_dev(info, id); + + return ret; +} + +/** + * ti_sci_set_device_state_no_wait() - Set device state helper without + * requesting or waiting for a response. + * @handle: pointer to TI SCI handle + * @id: Device identifier + * @flags: flags to setup for the device + * @state: State to move the device to + * + * Return: 0 if all went well, else returns appropriate error value. + */ +static int ti_sci_set_device_state_no_wait(const struct ti_sci_handle *handle, + u32 id, u32 flags, u8 state) +{ + struct ti_sci_msg_req_set_device_state req; + struct ti_sci_info *info; + struct ti_sci_xfer *xfer; + int ret = 0; + + if (IS_ERR(handle)) + return PTR_ERR(handle); + if (!handle) + return -EINVAL; + + info = handle_to_ti_sci_info(handle); + + xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE, + flags | TI_SCI_FLAG_REQ_GENERIC_NORESPONSE, + (u32 *)&req, sizeof(req), 0); + if (IS_ERR(xfer)) { + ret = PTR_ERR(xfer); + dev_err(info->dev, "Message alloc failed(%d)\n", ret); + return ret; + } + req.id = id; + req.state = state; + + ret = ti_sci_do_xfer(info, xfer); + if (ret) + dev_err(info->dev, "Mbox send fail %d\n", ret); + return ret; } @@ -547,8 +651,14 @@ static int ti_sci_get_device_state(const struct ti_sci_handle *handle, */ static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id) { - return ti_sci_set_device_state(handle, id, - MSG_FLAG_DEVICE_EXCLUSIVE, + return ti_sci_set_device_state(handle, id, 0, + MSG_DEVICE_SW_STATE_ON); +} + +static int ti_sci_cmd_get_device_exclusive(const struct ti_sci_handle *handle, + u32 id) +{ + return ti_sci_set_device_state(handle, id, MSG_FLAG_DEVICE_EXCLUSIVE, MSG_DEVICE_SW_STATE_ON); } @@ -566,7 +676,14 @@ static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id) static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id) { return ti_sci_set_device_state(handle, id, - MSG_FLAG_DEVICE_EXCLUSIVE, + 0, + MSG_DEVICE_SW_STATE_RETENTION); +} + +static int ti_sci_cmd_idle_device_exclusive(const struct ti_sci_handle *handle, + u32 id) +{ + return ti_sci_set_device_state(handle, id, MSG_FLAG_DEVICE_EXCLUSIVE, MSG_DEVICE_SW_STATE_RETENTION); } @@ -583,8 +700,27 @@ static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id) */ static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id) { - return ti_sci_set_device_state(handle, id, - 0, MSG_DEVICE_SW_STATE_AUTO_OFF); + return ti_sci_set_device_state(handle, id, 0, + MSG_DEVICE_SW_STATE_AUTO_OFF); +} + +static +int ti_sci_cmd_release_exclusive_devices(const struct ti_sci_handle *handle) +{ + struct ti_sci_exclusive_dev *dev, *tmp; + struct ti_sci_info *info; + int i, cnt; + + info = handle_to_ti_sci_info(handle); + + list_for_each_entry_safe(dev, tmp, &info->dev_list, list) { + cnt = dev->count; + debug("%s: id = %d, cnt = %d\n", __func__, dev->id, cnt); + for (i = 0; i < cnt; i++) + ti_sci_cmd_put_device(handle, dev->id); + } + + return 0; } /** @@ -2027,6 +2163,137 @@ static int ti_sci_cmd_get_proc_boot_status(const struct ti_sci_handle *handle, } /** + * ti_sci_proc_wait_boot_status_no_wait() - Helper function to wait for a + * processor boot status without requesting or + * waiting for a response. + * @proc_id: Processor ID this request is for + * @num_wait_iterations: Total number of iterations we will check before + * we will timeout and give up + * @num_match_iterations: How many iterations should we have continued + * status to account for status bits glitching. + * This is to make sure that match occurs for + * consecutive checks. This implies that the + * worst case should consider that the stable + * time should at the worst be num_wait_iterations + * num_match_iterations to prevent timeout. + * @delay_per_iteration_us: Specifies how long to wait (in micro seconds) + * between each status checks. This is the minimum + * duration, and overhead of register reads and + * checks are on top of this and can vary based on + * varied conditions. + * @delay_before_iterations_us: Specifies how long to wait (in micro seconds) + * before the very first check in the first + * iteration of status check loop. This is the + * minimum duration, and overhead of register + * reads and checks are. + * @status_flags_1_set_all_wait:If non-zero, Specifies that all bits of the + * status matching this field requested MUST be 1. + * @status_flags_1_set_any_wait:If non-zero, Specifies that at least one of the + * bits matching this field requested MUST be 1. + * @status_flags_1_clr_all_wait:If non-zero, Specifies that all bits of the + * status matching this field requested MUST be 0. + * @status_flags_1_clr_any_wait:If non-zero, Specifies that at least one of the + * bits matching this field requested MUST be 0. + * + * Return: 0 if all goes well, else appropriate error message + */ +static int +ti_sci_proc_wait_boot_status_no_wait(const struct ti_sci_handle *handle, + u8 proc_id, + u8 num_wait_iterations, + u8 num_match_iterations, + u8 delay_per_iteration_us, + u8 delay_before_iterations_us, + u32 status_flags_1_set_all_wait, + u32 status_flags_1_set_any_wait, + u32 status_flags_1_clr_all_wait, + u32 status_flags_1_clr_any_wait) +{ + struct ti_sci_msg_req_wait_proc_boot_status req; + struct ti_sci_info *info; + struct ti_sci_xfer *xfer; + int ret = 0; + + if (IS_ERR(handle)) + return PTR_ERR(handle); + if (!handle) + return -EINVAL; + + info = handle_to_ti_sci_info(handle); + + xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_WAIT_PROC_BOOT_STATUS, + TI_SCI_FLAG_REQ_GENERIC_NORESPONSE, + (u32 *)&req, sizeof(req), 0); + if (IS_ERR(xfer)) { + ret = PTR_ERR(xfer); + dev_err(info->dev, "Message alloc failed(%d)\n", ret); + return ret; + } + req.processor_id = proc_id; + req.num_wait_iterations = num_wait_iterations; + req.num_match_iterations = num_match_iterations; + req.delay_per_iteration_us = delay_per_iteration_us; + req.delay_before_iterations_us = delay_before_iterations_us; + req.status_flags_1_set_all_wait = status_flags_1_set_all_wait; + req.status_flags_1_set_any_wait = status_flags_1_set_any_wait; + req.status_flags_1_clr_all_wait = status_flags_1_clr_all_wait; + req.status_flags_1_clr_any_wait = status_flags_1_clr_any_wait; + + ret = ti_sci_do_xfer(info, xfer); + if (ret) + dev_err(info->dev, "Mbox send fail %d\n", ret); + + return ret; +} + +/** + * ti_sci_cmd_proc_shutdown_no_wait() - Command to shutdown a core without + * requesting or waiting for a response. Note that this API call + * should be followed by placing the respective processor into + * either WFE or WFI mode. + * @handle: Pointer to TI SCI handle + * @proc_id: Processor ID this request is for + * + * Return: 0 if all went well, else returns appropriate error value. + */ +static int ti_sci_cmd_proc_shutdown_no_wait(const struct ti_sci_handle *handle, + u8 proc_id) +{ + int ret; + + /* + * Send the core boot status wait message waiting for either WFE or + * WFI without requesting or waiting for a TISCI response with the + * maximum wait time to give us the best chance to get to the WFE/WFI + * command that should follow the invocation of this API before the + * DMSC-internal processing of this command times out. Note that + * waiting for the R5 WFE/WFI flags will also work on an ARMV8 type + * core as the related flag bit positions are the same. + */ + ret = ti_sci_proc_wait_boot_status_no_wait(handle, proc_id, + U8_MAX, 100, U8_MAX, U8_MAX, + 0, PROC_BOOT_STATUS_FLAG_R5_WFE | PROC_BOOT_STATUS_FLAG_R5_WFI, + 0, 0); + if (ret) { + dev_err(info->dev, "Sending core %u wait message fail %d\n", + proc_id, ret); + return ret; + } + + /* + * Release a processor managed by TISCI without requesting or waiting + * for a response. + */ + ret = ti_sci_set_device_state_no_wait(handle, proc_id, 0, + MSG_DEVICE_SW_STATE_AUTO_OFF); + if (ret) + dev_err(info->dev, "Sending core %u shutdown message fail %d\n", + proc_id, ret); + + return ret; +} + +/** * ti_sci_cmd_ring_config() - configure RA ring * @handle: pointer to TI SCI handle * @valid_params: Bitfield defining validity of ring configuration parameters. @@ -2632,7 +2899,9 @@ static void ti_sci_setup_ops(struct ti_sci_info *info) bops->board_config_pm = ti_sci_cmd_set_board_config_pm; dops->get_device = ti_sci_cmd_get_device; + dops->get_device_exclusive = ti_sci_cmd_get_device_exclusive; dops->idle_device = ti_sci_cmd_idle_device; + dops->idle_device_exclusive = ti_sci_cmd_idle_device_exclusive; dops->put_device = ti_sci_cmd_put_device; dops->is_valid = ti_sci_cmd_dev_is_valid; dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt; @@ -2642,6 +2911,7 @@ static void ti_sci_setup_ops(struct ti_sci_info *info) dops->is_transitioning = ti_sci_cmd_dev_is_trans; dops->set_device_resets = ti_sci_cmd_set_device_resets; dops->get_device_resets = ti_sci_cmd_get_device_resets; + dops->release_exclusive_devices = ti_sci_cmd_release_exclusive_devices; cops->get_clock = ti_sci_cmd_get_clock; cops->idle_clock = ti_sci_cmd_idle_clock; @@ -2672,6 +2942,7 @@ static void ti_sci_setup_ops(struct ti_sci_info *info) pops->set_proc_boot_ctrl = ti_sci_cmd_set_proc_boot_ctrl; pops->proc_auth_boot_image = ti_sci_cmd_proc_auth_boot_image; pops->get_proc_boot_status = ti_sci_cmd_get_proc_boot_status; + pops->proc_shutdown_no_wait = ti_sci_cmd_proc_shutdown_no_wait; rops->config = ti_sci_cmd_ring_config; rops->get_config = ti_sci_cmd_ring_get_config; @@ -2835,6 +3106,8 @@ static int ti_sci_probe(struct udevice *dev) ret = ti_sci_cmd_get_revision(&info->handle); + INIT_LIST_HEAD(&info->dev_list); + return ret; } diff --git a/drivers/firmware/ti_sci.h b/drivers/firmware/ti_sci.h index a484b1fa40..69ff74d6a9 100644 --- a/drivers/firmware/ti_sci.h +++ b/drivers/firmware/ti_sci.h @@ -50,6 +50,7 @@ #define TISCI_MSG_SET_PROC_BOOT_CTRL 0xc101 #define TISCI_MSG_PROC_AUTH_BOOT_IMIAGE 0xc120 #define TISCI_MSG_GET_PROC_BOOT_STATUS 0xc400 +#define TISCI_MSG_WAIT_PROC_BOOT_STATUS 0xc401 /* Resource Management Requests */ #define TI_SCI_MSG_GET_RESOURCE_RANGE 0x1500 @@ -773,6 +774,55 @@ struct ti_sci_msg_resp_get_proc_boot_status { } __packed; /** + * struct ti_sci_msg_req_wait_proc_boot_status - Wait for a processor + * boot status + * @hdr: Generic Header + * @processor_id: ID of processor + * @num_wait_iterations: Total number of iterations we will check before + * we will timeout and give up + * @num_match_iterations: How many iterations should we have continued + * status to account for status bits glitching. + * This is to make sure that match occurs for + * consecutive checks. This implies that the + * worst case should consider that the stable + * time should at the worst be num_wait_iterations + * num_match_iterations to prevent timeout. + * @delay_per_iteration_us: Specifies how long to wait (in micro seconds) + * between each status checks. This is the minimum + * duration, and overhead of register reads and + * checks are on top of this and can vary based on + * varied conditions. + * @delay_before_iterations_us: Specifies how long to wait (in micro seconds) + * before the very first check in the first + * iteration of status check loop. This is the + * minimum duration, and overhead of register + * reads and checks are. + * @status_flags_1_set_all_wait:If non-zero, Specifies that all bits of the + * status matching this field requested MUST be 1. + * @status_flags_1_set_any_wait:If non-zero, Specifies that at least one of the + * bits matching this field requested MUST be 1. + * @status_flags_1_clr_all_wait:If non-zero, Specifies that all bits of the + * status matching this field requested MUST be 0. + * @status_flags_1_clr_any_wait:If non-zero, Specifies that at least one of the + * bits matching this field requested MUST be 0. + * + * Request type is TISCI_MSG_WAIT_PROC_BOOT_STATUS, response is appropriate + * message, or NACK in case of inability to satisfy request. + */ +struct ti_sci_msg_req_wait_proc_boot_status { + struct ti_sci_msg_hdr hdr; + u8 processor_id; + u8 num_wait_iterations; + u8 num_match_iterations; + u8 delay_per_iteration_us; + u8 delay_before_iterations_us; + u32 status_flags_1_set_all_wait; + u32 status_flags_1_set_any_wait; + u32 status_flags_1_clr_all_wait; + u32 status_flags_1_clr_any_wait; +} __packed; + +/** * struct ti_sci_msg_rm_ring_cfg_req - Configure a Navigator Subsystem ring * * Configures the non-real-time registers of a Navigator Subsystem ring. diff --git a/drivers/fpga/socfpga_arria10.c b/drivers/fpga/socfpga_arria10.c index 285280e507..5fb9d6a191 100644 --- a/drivers/fpga/socfpga_arria10.c +++ b/drivers/fpga/socfpga_arria10.c @@ -936,10 +936,11 @@ int socfpga_load(Altera_desc *desc, const void *rbf_data, size_t rbf_size) fpgamgr_program_write(rbf_data, rbf_size); status = fpgamgr_program_finish(); - if (status) { - config_pins(gd->fdt_blob, "fpga"); - puts("FPGA: Enter user mode.\n"); - } + if (status) + return status; + + config_pins(gd->fdt_blob, "fpga"); + puts("FPGA: Enter user mode.\n"); return status; } diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 800584f512..7d9c97f537 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -14,7 +14,7 @@ config DM_GPIO particular GPIOs that they provide. The uclass interface is defined in include/asm-generic/gpio.h. -config DM_GPIO_HOG +config GPIO_HOG bool "Enable GPIO hog support" depends on DM_GPIO default n diff --git a/drivers/gpio/gpio-uclass.c b/drivers/gpio/gpio-uclass.c index 308d0863ad..01cfa2f788 100644 --- a/drivers/gpio/gpio-uclass.c +++ b/drivers/gpio/gpio-uclass.c @@ -144,7 +144,7 @@ static int gpio_find_and_xlate(struct gpio_desc *desc, return gpio_xlate_offs_flags(desc->dev, desc, args); } -#if defined(CONFIG_DM_GPIO_HOG) +#if defined(CONFIG_GPIO_HOG) struct gpio_hog_priv { struct gpio_desc gpiod; @@ -181,9 +181,8 @@ static int gpio_hog_ofdata_to_platdata(struct udevice *dev) return ret; } nodename = dev_read_string(dev, "line-name"); - if (!nodename) - nodename = dev_read_name(dev); - device_set_name(dev, nodename); + if (nodename) + device_set_name(dev, nodename); return 0; } @@ -202,9 +201,15 @@ static int gpio_hog_probe(struct udevice *dev) dev->name); return ret; } - dm_gpio_set_dir(&priv->gpiod); - if (plat->gpiod_flags == GPIOD_IS_OUT) - dm_gpio_set_value(&priv->gpiod, plat->value); + + if (plat->gpiod_flags == GPIOD_IS_OUT) { + ret = dm_gpio_set_value(&priv->gpiod, plat->value); + if (ret < 0) { + debug("%s: node %s could not set gpio.\n", __func__, + dev->name); + return ret; + } + } return 0; } @@ -213,32 +218,38 @@ int gpio_hog_probe_all(void) { struct udevice *dev; int ret; + int retval = 0; for (uclass_first_device(UCLASS_NOP, &dev); dev; uclass_find_next_device(&dev)) { if (dev->driver == DM_GET_DRIVER(gpio_hog)) { ret = device_probe(dev); - if (ret) - return ret; + if (ret) { + printf("Failed to probe device %s err: %d\n", + dev->name, ret); + retval = ret; + } } } - return 0; + return retval; } -struct gpio_desc *gpio_hog_lookup_name(const char *name) +int gpio_hog_lookup_name(const char *name, struct gpio_desc **desc) { struct udevice *dev; + *desc = NULL; gpio_hog_probe_all(); if (!uclass_get_device_by_name(UCLASS_NOP, name, &dev)) { struct gpio_hog_priv *priv = dev_get_priv(dev); - return &priv->gpiod; + *desc = &priv->gpiod; + return 0; } - return NULL; + return -ENODEV; } U_BOOT_DRIVER(gpio_hog) = { @@ -250,9 +261,9 @@ U_BOOT_DRIVER(gpio_hog) = { .platdata_auto_alloc_size = sizeof(struct gpio_hog_data), }; #else -struct gpio_desc *gpio_hog_lookup_name(const char *name) +int gpio_hog_lookup_name(const char *name, struct gpio_desc **desc) { - return NULL; + return 0; } #endif @@ -755,13 +766,45 @@ int dm_gpio_get_values_as_int(const struct gpio_desc *desc_list, int count) return vector; } +/** + * gpio_request_tail: common work for requesting a gpio. + * + * ret: return value from previous work in function which calls + * this function. + * This seems bogus (why calling this function instead not + * calling it and end caller function instead?). + * Because on error in caller function we want to set some + * default values in gpio desc and have a common error + * debug message, which provides this function. + * nodename: Name of node for which gpio gets requested + * used for gpio label name. + * args: pointer to output arguments structure + * list_name: Name of GPIO list + * used for gpio label name. + * index: gpio index in gpio list + * used for gpio label name. + * desc: pointer to gpio descriptor, filled from this + * function. + * flags: gpio flags to use. + * add_index: should index added to gpio label name + * gpio_dev: pointer to gpio device from which the gpio + * will be requested. If NULL try to get the + * gpio device with uclass_get_device_by_ofnode() + * + * return: In error case this function sets default values in + * gpio descriptor, also emmits a debug message. + * On success it returns 0 else the error code from + * function calls, or the error code passed through + * ret to this function. + * + */ static int gpio_request_tail(int ret, const char *nodename, struct ofnode_phandle_args *args, const char *list_name, int index, struct gpio_desc *desc, int flags, - bool add_index, struct udevice *dev) + bool add_index, struct udevice *gpio_dev) { - desc->dev = dev; + desc->dev = gpio_dev; desc->offset = 0; desc->flags = 0; if (ret) @@ -771,7 +814,8 @@ static int gpio_request_tail(int ret, const char *nodename, ret = uclass_get_device_by_ofnode(UCLASS_GPIO, args->node, &desc->dev); if (ret) { - debug("%s: uclass_get_device_by_ofnode failed\n", __func__); + debug("%s: uclass_get_device_by_ofnode failed\n", + __func__); goto err; } } @@ -989,10 +1033,8 @@ int gpio_dev_request_index(struct udevice *dev, const char *nodename, static int gpio_post_bind(struct udevice *dev) { -#if defined(CONFIG_DM_GPIO_HOG) struct udevice *child; ofnode node; -#endif #if defined(CONFIG_NEEDS_MANUAL_RELOC) struct dm_gpio_ops *ops = (struct dm_gpio_ops *)device_get_ops(dev); @@ -1024,16 +1066,21 @@ static int gpio_post_bind(struct udevice *dev) } #endif -#if defined(CONFIG_DM_GPIO_HOG) - dev_for_each_subnode(node, dev) { - if (ofnode_read_bool(node, "gpio-hog")) { - const char *name = ofnode_get_name(node); - - device_bind_driver_to_node(dev, "gpio_hog", name, - node, &child); + if (IS_ENABLED(CONFIG_GPIO_HOG)) { + dev_for_each_subnode(node, dev) { + if (ofnode_read_bool(node, "gpio-hog")) { + const char *name = ofnode_get_name(node); + int ret; + + ret = device_bind_driver_to_node(dev, + "gpio_hog", + name, node, + &child); + if (ret) + return ret; + } } } -#endif return 0; } diff --git a/drivers/gpio/mxc_gpio.c b/drivers/gpio/mxc_gpio.c index 8bd30c75b2..64ab7a303f 100644 --- a/drivers/gpio/mxc_gpio.c +++ b/drivers/gpio/mxc_gpio.c @@ -31,7 +31,7 @@ struct mxc_bank_info { }; #ifndef CONFIG_DM_GPIO -#define GPIO_TO_PORT(n) (n / 32) +#define GPIO_TO_PORT(n) ((n) / 32) /* GPIO port description */ static unsigned long gpio_ports[] = { diff --git a/drivers/gpio/mxs_gpio.c b/drivers/gpio/mxs_gpio.c index c2c8a25886..b2451fdda8 100644 --- a/drivers/gpio/mxs_gpio.c +++ b/drivers/gpio/mxs_gpio.c @@ -51,6 +51,7 @@ void mxs_gpio_init(void) } } +#if !CONFIG_IS_ENABLED(DM_GPIO) int gpio_get_value(unsigned gpio) { uint32_t bank = PAD_BANK(gpio); @@ -127,3 +128,150 @@ int name_to_gpio(const char *name) return (bank << MXS_PAD_BANK_SHIFT) | (pin << MXS_PAD_PIN_SHIFT); } +#else /* CONFIG_DM_GPIO */ +#include <dm.h> +#include <asm/gpio.h> +#include <asm/arch/gpio.h> +#define MXS_MAX_GPIO_PER_BANK 32 + +DECLARE_GLOBAL_DATA_PTR; +/* + * According to i.MX28 Reference Manual: + * 'i.MX28 Applications Processor Reference Manual, Rev. 1, 2010' + * The i.MX28 has following number of GPIOs available: + * Bank 0: 0-28 -> 29 PINS + * Bank 1: 0-31 -> 32 PINS + * Bank 2: 0-27 -> 28 PINS + * Bank 3: 0-30 -> 31 PINS + * Bank 4: 0-20 -> 21 PINS + */ + +struct mxs_gpio_priv { + unsigned int bank; +}; + +static int mxs_gpio_get_value(struct udevice *dev, unsigned offset) +{ + struct mxs_gpio_priv *priv = dev_get_priv(dev); + struct mxs_register_32 *reg = + (struct mxs_register_32 *)(MXS_PINCTRL_BASE + + PINCTRL_DIN(priv->bank)); + + return (readl(®->reg) >> offset) & 1; +} + +static int mxs_gpio_set_value(struct udevice *dev, unsigned offset, + int value) +{ + struct mxs_gpio_priv *priv = dev_get_priv(dev); + struct mxs_register_32 *reg = + (struct mxs_register_32 *)(MXS_PINCTRL_BASE + + PINCTRL_DOUT(priv->bank)); + if (value) + writel(BIT(offset), ®->reg_set); + else + writel(BIT(offset), ®->reg_clr); + + return 0; +} + +static int mxs_gpio_direction_input(struct udevice *dev, unsigned offset) +{ + struct mxs_gpio_priv *priv = dev_get_priv(dev); + struct mxs_register_32 *reg = + (struct mxs_register_32 *)(MXS_PINCTRL_BASE + + PINCTRL_DOE(priv->bank)); + + writel(BIT(offset), ®->reg_clr); + + return 0; +} + +static int mxs_gpio_direction_output(struct udevice *dev, unsigned offset, + int value) +{ + struct mxs_gpio_priv *priv = dev_get_priv(dev); + struct mxs_register_32 *reg = + (struct mxs_register_32 *)(MXS_PINCTRL_BASE + + PINCTRL_DOE(priv->bank)); + + mxs_gpio_set_value(dev, offset, value); + + writel(BIT(offset), ®->reg_set); + + return 0; +} + +static int mxs_gpio_get_function(struct udevice *dev, unsigned offset) +{ + struct mxs_gpio_priv *priv = dev_get_priv(dev); + struct mxs_register_32 *reg = + (struct mxs_register_32 *)(MXS_PINCTRL_BASE + + PINCTRL_DOE(priv->bank)); + bool is_output = !!(readl(®->reg) >> offset); + + return is_output ? GPIOF_OUTPUT : GPIOF_INPUT; +} + +static const struct dm_gpio_ops gpio_mxs_ops = { + .direction_input = mxs_gpio_direction_input, + .direction_output = mxs_gpio_direction_output, + .get_value = mxs_gpio_get_value, + .set_value = mxs_gpio_set_value, + .get_function = mxs_gpio_get_function, +}; + +static int mxs_gpio_probe(struct udevice *dev) +{ + struct mxs_gpio_priv *priv = dev_get_priv(dev); + struct gpio_dev_priv *uc_priv = dev_get_uclass_priv(dev); + struct fdtdec_phandle_args args; + int node = dev_of_offset(dev); + char name[16], *str; + fdt_addr_t addr; + int ret; + + addr = devfdt_get_addr(dev); + if (addr == FDT_ADDR_T_NONE) { + printf("%s: No 'reg' property defined!\n", __func__); + return -EINVAL; + } + + priv->bank = (unsigned int)addr; + + snprintf(name, sizeof(name), "GPIO%d_", priv->bank); + str = strdup(name); + if (!str) + return -ENOMEM; + + uc_priv->bank_name = str; + + ret = fdtdec_parse_phandle_with_args(gd->fdt_blob, node, "gpio-ranges", + NULL, 3, 0, &args); + if (ret) + printf("%s: 'gpio-ranges' not defined - using default!\n", + __func__); + + uc_priv->gpio_count = ret == 0 ? args.args[2] : MXS_MAX_GPIO_PER_BANK; + + debug("%s: %s: %d pins\n", __func__, uc_priv->bank_name, + uc_priv->gpio_count); + + return 0; +} + +static const struct udevice_id mxs_gpio_ids[] = { + { .compatible = "fsl,imx23-gpio" }, + { .compatible = "fsl,imx28-gpio" }, + { } +}; + +U_BOOT_DRIVER(gpio_mxs) = { + .name = "gpio_mxs", + .id = UCLASS_GPIO, + .ops = &gpio_mxs_ops, + .probe = mxs_gpio_probe, + .priv_auto_alloc_size = sizeof(struct mxs_gpio_priv), + .of_match = mxs_gpio_ids, +}; +#endif /* CONFIG_DM_GPIO */ diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig index 4772db3837..03d2fed341 100644 --- a/drivers/i2c/Kconfig +++ b/drivers/i2c/Kconfig @@ -12,18 +12,7 @@ config DM_I2C write and speed, is implemented with the bus drivers operations, which provide methods for bus setting and data transfer. Each chip device (bus child) info is kept as parent platdata. The interface - is defined in include/i2c.h. When i2c bus driver supports the i2c - uclass, but the device drivers not, then DM_I2C_COMPAT config can - be used as compatibility layer. - -config DM_I2C_COMPAT - bool "Enable I2C compatibility layer" - depends on DM - help - Enable old-style I2C functions for compatibility with existing code. - This option can be enabled as a temporary measure to avoid needing - to convert all code for a board in a single commit. It should not - be enabled for any board in an official release. + is defined in include/i2c.h. config I2C_CROS_EC_TUNNEL tristate "Chrome OS EC tunnel I2C bus" diff --git a/drivers/i2c/Makefile b/drivers/i2c/Makefile index dc40055efb..c2f75d8755 100644 --- a/drivers/i2c/Makefile +++ b/drivers/i2c/Makefile @@ -3,7 +3,6 @@ # (C) Copyright 2000-2007 # Wolfgang Denk, DENX Software Engineering, wd@denx.de. obj-$(CONFIG_DM_I2C) += i2c-uclass.o -obj-$(CONFIG_DM_I2C_COMPAT) += i2c-uclass-compat.o obj-$(CONFIG_DM_I2C_GPIO) += i2c-gpio.o obj-$(CONFIG_$(SPL_)I2C_CROS_EC_TUNNEL) += cros_ec_tunnel.o obj-$(CONFIG_$(SPL_)I2C_CROS_EC_LDO) += cros_ec_ldo.o diff --git a/drivers/i2c/i2c-uclass-compat.c b/drivers/i2c/i2c-uclass-compat.c deleted file mode 100644 index b3ade88113..0000000000 --- a/drivers/i2c/i2c-uclass-compat.c +++ /dev/null @@ -1,128 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * Copyright (c) 2014 Google, Inc - */ - -#include <common.h> -#include <dm.h> -#include <errno.h> -#include <i2c.h> - -static int cur_busnum __attribute__((section(".data"))); - -static int i2c_compat_get_device(uint chip_addr, int alen, - struct udevice **devp) -{ - struct dm_i2c_chip *chip; - int ret; - - ret = i2c_get_chip_for_busnum(cur_busnum, chip_addr, alen, devp); - if (ret) - return ret; - chip = dev_get_parent_platdata(*devp); - if (chip->offset_len != alen) { - printf("I2C chip %x: requested alen %d does not match chip offset_len %d\n", - chip_addr, alen, chip->offset_len); - return -EADDRNOTAVAIL; - } - - return 0; -} - -int i2c_probe(uint8_t chip_addr) -{ - struct udevice *bus, *dev; - int ret; - - ret = uclass_get_device_by_seq(UCLASS_I2C, cur_busnum, &bus); - if (ret) { - debug("Cannot find I2C bus %d: err=%d\n", cur_busnum, ret); - return ret; - } - - if (!bus) - return -ENOENT; - - return dm_i2c_probe(bus, chip_addr, 0, &dev); -} - -int i2c_read(uint8_t chip_addr, unsigned int addr, int alen, uint8_t *buffer, - int len) -{ - struct udevice *dev; - int ret; - - ret = i2c_compat_get_device(chip_addr, alen, &dev); - if (ret) - return ret; - - return dm_i2c_read(dev, addr, buffer, len); -} - -int i2c_write(uint8_t chip_addr, unsigned int addr, int alen, uint8_t *buffer, - int len) -{ - struct udevice *dev; - int ret; - - ret = i2c_compat_get_device(chip_addr, alen, &dev); - if (ret) - return ret; - - return dm_i2c_write(dev, addr, buffer, len); -} - -int i2c_get_bus_num_fdt(int node) -{ - struct udevice *bus; - int ret; - - ret = uclass_get_device_by_of_offset(UCLASS_I2C, node, &bus); - if (ret) - return ret; - - return bus->seq; -} - -unsigned int i2c_get_bus_num(void) -{ - return cur_busnum; -} - -int i2c_set_bus_num(unsigned int bus) -{ - cur_busnum = bus; - - return 0; -} - -void i2c_init(int speed, int slaveaddr) -{ - /* Nothing to do here - the init happens through driver model */ -} - -void board_i2c_init(const void *blob) -{ - /* Nothing to do here - the init happens through driver model */ -} - -uint8_t i2c_reg_read(uint8_t chip_addr, uint8_t offset) -{ - struct udevice *dev; - int ret; - - ret = i2c_compat_get_device(chip_addr, 1, &dev); - if (ret) - return 0xff; - return dm_i2c_reg_read(dev, offset); -} - -void i2c_reg_write(uint8_t chip_addr, uint8_t offset, uint8_t val) -{ - struct udevice *dev; - int ret; - - ret = i2c_compat_get_device(chip_addr, 1, &dev); - if (!ret) - dm_i2c_reg_write(dev, offset, val); -} diff --git a/drivers/misc/imx8/scu_api.c b/drivers/misc/imx8/scu_api.c index d9c4d5d784..031bc0048b 100644 --- a/drivers/misc/imx8/scu_api.c +++ b/drivers/misc/imx8/scu_api.c @@ -273,6 +273,34 @@ int sc_misc_otp_fuse_read(sc_ipc_t ipc, u32 word, u32 *val) return 0; } +int sc_misc_get_temp(sc_ipc_t ipc, sc_rsrc_t resource, sc_misc_temp_t temp, + s16 *celsius, s8 *tenths) +{ + struct udevice *dev = gd->arch.scu_dev; + int size = sizeof(struct sc_rpc_msg_s); + struct sc_rpc_msg_s msg; + int ret; + + RPC_VER(&msg) = SC_RPC_VERSION; + RPC_SVC(&msg) = (u8)SC_RPC_SVC_MISC; + RPC_FUNC(&msg) = (u8)MISC_FUNC_GET_TEMP; + RPC_U16(&msg, 0U) = (u16)resource; + RPC_U8(&msg, 2U) = (u8)temp; + RPC_SIZE(&msg) = 2U; + + ret = misc_call(dev, SC_FALSE, &msg, size, &msg, size); + if (ret < 0) + return ret; + + if (celsius) + *celsius = RPC_I16(&msg, 0U); + + if (tenths) + *tenths = RPC_I8(&msg, 2U); + + return 0; +} + /* RM */ sc_bool_t sc_rm_is_memreg_owned(sc_ipc_t ipc, sc_rm_mr_t mr) { diff --git a/drivers/mmc/am654_sdhci.c b/drivers/mmc/am654_sdhci.c index fb0fb58070..1793a3f99a 100644 --- a/drivers/mmc/am654_sdhci.c +++ b/drivers/mmc/am654_sdhci.c @@ -72,6 +72,8 @@ struct am654_sdhci_plat { u32 otap_del_sel; u32 trm_icp; u32 drv_strength; + u32 flags; +#define DLL_PRESENT (1 << 0) bool dll_on; }; @@ -162,6 +164,10 @@ const struct sdhci_ops am654_sdhci_ops = { .set_control_reg = &am654_sdhci_set_control_reg, }; +const struct sdhci_ops j721e_4bit_sdhci_ops = { + .set_control_reg = &am654_sdhci_set_control_reg, +}; + int am654_sdhci_init(struct am654_sdhci_plat *plat) { u32 ctl_cfg_2 = 0; @@ -172,24 +178,28 @@ int am654_sdhci_init(struct am654_sdhci_plat *plat) mask = OTAPDLYENA_MASK | OTAPDLYSEL_MASK; regmap_update_bits(plat->base, PHY_CTRL4, mask, 0x0); - regmap_read(plat->base, PHY_STAT1, &val); - if (~val & CALDONE_MASK) { - /* Calibrate IO lines */ - regmap_update_bits(plat->base, PHY_CTRL1, PDB_MASK, PDB_MASK); - ret = regmap_read_poll_timeout(plat->base, PHY_STAT1, val, - val & CALDONE_MASK, 1, 20); - if (ret) - return ret; - } + if (plat->flags & DLL_PRESENT) { + regmap_read(plat->base, PHY_STAT1, &val); + if (~val & CALDONE_MASK) { + /* Calibrate IO lines */ + regmap_update_bits(plat->base, PHY_CTRL1, PDB_MASK, + PDB_MASK); + ret = regmap_read_poll_timeout(plat->base, PHY_STAT1, + val, val & CALDONE_MASK, + 1, 20); + if (ret) + return ret; + } - /* Configure DLL TRIM */ - mask = DLL_TRIM_ICP_MASK; - val = plat->trm_icp << DLL_TRIM_ICP_SHIFT; + /* Configure DLL TRIM */ + mask = DLL_TRIM_ICP_MASK; + val = plat->trm_icp << DLL_TRIM_ICP_SHIFT; - /* Configure DLL driver strength */ - mask |= DR_TY_MASK; - val |= plat->drv_strength << DR_TY_SHIFT; - regmap_update_bits(plat->base, PHY_CTRL1, mask, val); + /* Configure DLL driver strength */ + mask |= DR_TY_MASK; + val |= plat->drv_strength << DR_TY_SHIFT; + regmap_update_bits(plat->base, PHY_CTRL1, mask, val); + } /* Enable pins by setting IO mux to 0 */ regmap_update_bits(plat->base, PHY_CTRL1, IOMUX_ENABLE_MASK, 0); @@ -245,7 +255,7 @@ static int am654_sdhci_probe(struct udevice *dev) AM654_SDHCI_MIN_FREQ); if (ret) return ret; - host->ops = &am654_sdhci_ops; + host->ops = (struct sdhci_ops *)dev_get_driver_data(dev); host->mmc->priv = host; upriv->mmc = host->mmc; @@ -268,37 +278,44 @@ static int am654_sdhci_ofdata_to_platdata(struct udevice *dev) host->ioaddr = (void *)dev_read_addr(dev); plat->non_removable = dev_read_bool(dev, "non-removable"); - ret = dev_read_u32(dev, "ti,trm-icp", &plat->trm_icp); - if (ret) - return ret; + if (device_is_compatible(dev, "ti,am654-sdhci-5.1") || + device_is_compatible(dev, "ti,j721e-sdhci-8bit")) + plat->flags |= DLL_PRESENT; ret = dev_read_u32(dev, "ti,otap-del-sel", &plat->otap_del_sel); if (ret) return ret; - ret = dev_read_u32(dev, "ti,driver-strength-ohm", &drv_strength); - if (ret) - return ret; + if (plat->flags & DLL_PRESENT) { + ret = dev_read_u32(dev, "ti,trm-icp", &plat->trm_icp); + if (ret) + return ret; + + ret = dev_read_u32(dev, "ti,driver-strength-ohm", + &drv_strength); + if (ret) + return ret; - switch (drv_strength) { - case 50: - plat->drv_strength = DRIVER_STRENGTH_50_OHM; - break; - case 33: - plat->drv_strength = DRIVER_STRENGTH_33_OHM; - break; - case 66: - plat->drv_strength = DRIVER_STRENGTH_66_OHM; - break; - case 100: - plat->drv_strength = DRIVER_STRENGTH_100_OHM; - break; - case 40: - plat->drv_strength = DRIVER_STRENGTH_40_OHM; - break; - default: - dev_err(dev, "Invalid driver strength\n"); - return -EINVAL; + switch (drv_strength) { + case 50: + plat->drv_strength = DRIVER_STRENGTH_50_OHM; + break; + case 33: + plat->drv_strength = DRIVER_STRENGTH_33_OHM; + break; + case 66: + plat->drv_strength = DRIVER_STRENGTH_66_OHM; + break; + case 100: + plat->drv_strength = DRIVER_STRENGTH_100_OHM; + break; + case 40: + plat->drv_strength = DRIVER_STRENGTH_40_OHM; + break; + default: + dev_err(dev, "Invalid driver strength\n"); + return -EINVAL; + } } ret = mmc_of_parse(dev, cfg); @@ -316,7 +333,18 @@ static int am654_sdhci_bind(struct udevice *dev) } static const struct udevice_id am654_sdhci_ids[] = { - { .compatible = "ti,am654-sdhci-5.1" }, + { + .compatible = "ti,am654-sdhci-5.1", + .data = (ulong)&am654_sdhci_ops, + }, + { + .compatible = "ti,j721e-sdhci-8bit", + .data = (ulong)&am654_sdhci_ops, + }, + { + .compatible = "ti,j721e-sdhci-4bit", + .data = (ulong)&j721e_4bit_sdhci_ops, + }, { } }; diff --git a/drivers/mmc/mmc_spi.c b/drivers/mmc/mmc_spi.c index f3d687ae80..350812a04b 100644 --- a/drivers/mmc/mmc_spi.c +++ b/drivers/mmc/mmc_spi.c @@ -84,7 +84,7 @@ static int mmc_spi_sendcmd(struct udevice *dev, cmdo[4] = cmdarg >> 8; cmdo[5] = cmdarg; cmdo[6] = (crc7(0, &cmdo[1], 5) << 1) | 0x01; - ret = dm_spi_xfer(dev, sizeof(cmdo) * 8, cmdo, NULL, 0); + ret = dm_spi_xfer(dev, sizeof(cmdo) * 8, cmdo, NULL, SPI_XFER_BEGIN); if (ret) return ret; @@ -360,6 +360,8 @@ static int dm_mmc_spi_request(struct udevice *dev, struct mmc_cmd *cmd, } done: + dm_spi_xfer(dev, 0, NULL, NULL, SPI_XFER_END); + dm_spi_release_bus(dev); return ret; diff --git a/drivers/mmc/rockchip_sdhci.c b/drivers/mmc/rockchip_sdhci.c index cb623d589b..dd3d5574db 100644 --- a/drivers/mmc/rockchip_sdhci.c +++ b/drivers/mmc/rockchip_sdhci.c @@ -68,15 +68,15 @@ static int arasan_sdhci_probe(struct udevice *dev) if (host->bus_width == 8) host->host_caps |= MMC_MODE_8BIT; - ret = sdhci_setup_cfg(&plat->cfg, host, 0, EMMC_MIN_FREQ); - host->mmc = &plat->mmc; - if (ret) - return ret; host->mmc->priv = &prv->host; host->mmc->dev = dev; upriv->mmc = host->mmc; + ret = sdhci_setup_cfg(&plat->cfg, host, 0, EMMC_MIN_FREQ); + if (ret) + return ret; + return sdhci_probe(dev); } diff --git a/drivers/mtd/nand/raw/davinci_nand.c b/drivers/mtd/nand/raw/davinci_nand.c index cfa9b535c8..33c2f16be8 100644 --- a/drivers/mtd/nand/raw/davinci_nand.c +++ b/drivers/mtd/nand/raw/davinci_nand.c @@ -31,6 +31,7 @@ #include <common.h> #include <asm/io.h> #include <nand.h> +#include <dm/uclass.h> #include <asm/ti-common/davinci_nand.h> /* Definitions for 4-bit hardware ECC */ @@ -730,7 +731,7 @@ static int nand_davinci_dev_ready(struct mtd_info *mtd) return __raw_readl(&davinci_emif_regs->nandfsr) & 0x1; } -void davinci_nand_init(struct nand_chip *nand) +static void davinci_nand_init(struct nand_chip *nand) { #if defined CONFIG_KEYSTONE_RBL_NAND int i; @@ -785,10 +786,53 @@ void davinci_nand_init(struct nand_chip *nand) nand->dev_ready = nand_davinci_dev_ready; } -int board_nand_init(struct nand_chip *chip) __attribute__((weak)); +#ifdef CONFIG_SYS_NAND_SELF_INIT +static int davinci_nand_probe(struct udevice *dev) +{ + struct nand_chip *nand = dev_get_priv(dev); + struct mtd_info *mtd = nand_to_mtd(nand); + int ret; + + nand->IO_ADDR_R = (void __iomem *)CONFIG_SYS_NAND_BASE; + nand->IO_ADDR_W = (void __iomem *)CONFIG_SYS_NAND_BASE; + + davinci_nand_init(nand); + + ret = nand_scan(mtd, CONFIG_SYS_NAND_MAX_CHIPS); + if (ret) + return ret; + + return nand_register(0, mtd); +} + +static const struct udevice_id davinci_nand_ids[] = { + { .compatible = "ti,davinci-nand" }, + { } +}; + +U_BOOT_DRIVER(davinci_nand) = { + .name = "davinci-nand", + .id = UCLASS_MTD, + .of_match = davinci_nand_ids, + .probe = davinci_nand_probe, + .priv_auto_alloc_size = sizeof(struct nand_chip), +}; + +void board_nand_init(void) +{ + struct udevice *dev; + int ret; + ret = uclass_get_device_by_driver(UCLASS_MTD, + DM_GET_DRIVER(davinci_nand), &dev); + if (ret && ret != -ENODEV) + pr_err("Failed to initialize %s: %d\n", dev->name, ret); +} +#else +int board_nand_init(struct nand_chip *chip) __attribute__((weak)); int board_nand_init(struct nand_chip *chip) { davinci_nand_init(chip); return 0; } +#endif /* CONFIG_SYS_NAND_SELF_INIT */ diff --git a/drivers/mtd/nand/raw/mxs_nand.c b/drivers/mtd/nand/raw/mxs_nand.c index b93d77a395..a41b9620d0 100644 --- a/drivers/mtd/nand/raw/mxs_nand.c +++ b/drivers/mtd/nand/raw/mxs_nand.c @@ -25,7 +25,7 @@ #include <asm/mach-imx/regs-bch.h> #include <asm/mach-imx/regs-gpmi.h> #include <asm/arch/sys_proto.h> -#include "mxs_nand.h" +#include <mxs_nand.h> #define MXS_NAND_DMA_DESCRIPTOR_COUNT 4 diff --git a/drivers/mtd/nand/raw/mxs_nand.h b/drivers/mtd/nand/raw/mxs_nand.h deleted file mode 100644 index 4bd65cded9..0000000000 --- a/drivers/mtd/nand/raw/mxs_nand.h +++ /dev/null @@ -1,73 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ -/* - * NXP GPMI NAND flash driver - * - * Copyright (C) 2018 Toradex - * Authors: - * Stefan Agner <stefan.agner@toradex.com> - */ - -#include <linux/mtd/mtd.h> -#include <asm/cache.h> -#include <nand.h> -#include <asm/mach-imx/dma.h> - -/** - * @gf_len: The length of Galois Field. (e.g., 13 or 14) - * @ecc_strength: A number that describes the strength of the ECC - * algorithm. - * @ecc_chunk_size: The size, in bytes, of a single ECC chunk. Note - * the first chunk in the page includes both data and - * metadata, so it's a bit larger than this value. - * @ecc_chunk_count: The number of ECC chunks in the page, - * @block_mark_byte_offset: The byte offset in the ECC-based page view at - * which the underlying physical block mark appears. - * @block_mark_bit_offset: The bit offset into the ECC-based page view at - * which the underlying physical block mark appears. - */ -struct bch_geometry { - unsigned int gf_len; - unsigned int ecc_strength; - unsigned int ecc_chunk_size; - unsigned int ecc_chunk_count; - unsigned int block_mark_byte_offset; - unsigned int block_mark_bit_offset; -}; - -struct mxs_nand_info { - struct nand_chip chip; - struct udevice *dev; - unsigned int max_ecc_strength_supported; - bool use_minimum_ecc; - int cur_chip; - - uint32_t cmd_queue_len; - uint32_t data_buf_size; - struct bch_geometry bch_geometry; - - uint8_t *cmd_buf; - uint8_t *data_buf; - uint8_t *oob_buf; - - uint8_t marking_block_bad; - uint8_t raw_oob_mode; - - struct mxs_gpmi_regs *gpmi_regs; - struct mxs_bch_regs *bch_regs; - - /* Functions with altered behaviour */ - int (*hooked_read_oob)(struct mtd_info *mtd, - loff_t from, struct mtd_oob_ops *ops); - int (*hooked_write_oob)(struct mtd_info *mtd, - loff_t to, struct mtd_oob_ops *ops); - int (*hooked_block_markbad)(struct mtd_info *mtd, - loff_t ofs); - - /* DMA descriptors */ - struct mxs_dma_desc **desc; - uint32_t desc_index; -}; - -int mxs_nand_init_ctrl(struct mxs_nand_info *nand_info); -int mxs_nand_init_spl(struct nand_chip *nand); -int mxs_nand_setup_ecc(struct mtd_info *mtd); diff --git a/drivers/mtd/nand/raw/mxs_nand_dt.c b/drivers/mtd/nand/raw/mxs_nand_dt.c index 44dec5dedf..8ad7d618c6 100644 --- a/drivers/mtd/nand/raw/mxs_nand_dt.c +++ b/drivers/mtd/nand/raw/mxs_nand_dt.c @@ -15,7 +15,7 @@ #include <linux/ioport.h> #include <linux/printk.h> -#include "mxs_nand.h" +#include <mxs_nand.h> struct mxs_nand_dt_data { unsigned int max_ecc_strength_supported; diff --git a/drivers/mtd/nand/raw/mxs_nand_spl.c b/drivers/mtd/nand/raw/mxs_nand_spl.c index ee7d9cb957..975a91a37d 100644 --- a/drivers/mtd/nand/raw/mxs_nand_spl.c +++ b/drivers/mtd/nand/raw/mxs_nand_spl.c @@ -6,7 +6,7 @@ #include <common.h> #include <nand.h> #include <malloc.h> -#include "mxs_nand.h" +#include <mxs_nand.h> static struct mtd_info *mtd; static struct nand_chip nand_chip; diff --git a/drivers/mtd/spi/spi-nor-ids.c b/drivers/mtd/spi/spi-nor-ids.c index ec929760ee..a3920ba520 100644 --- a/drivers/mtd/spi/spi-nor-ids.c +++ b/drivers/mtd/spi/spi-nor-ids.c @@ -163,11 +163,15 @@ const struct flash_info spi_nor_ids[] = { { INFO("n25q128a13", 0x20ba18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) }, { INFO("n25q256a", 0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) }, { INFO("n25q256ax1", 0x20bb19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) }, + { INFO6("mt25qu512a", 0x20bb20, 0x104400, 64 * 1024, 1024, + SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) }, { INFO("n25q512a", 0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) }, { INFO("n25q512ax3", 0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) }, { INFO("n25q00", 0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) }, { INFO("n25q00a", 0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) }, { INFO("mt25qu02g", 0x20bb22, 0, 64 * 1024, 4096, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) }, + { INFO("mt35xu512aba", 0x2c5b1a, 0, 128 * 1024, 512, USE_FSR | SPI_NOR_4B_OPCODES) }, + { INFO("mt35xu02g", 0x2c5b1c, 0, 128 * 1024, 2048, USE_FSR | SPI_NOR_4B_OPCODES) }, #endif #ifdef CONFIG_SPI_FLASH_SPANSION /* SPANSION */ /* Spansion/Cypress -- single (large) sector size only, at least diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 635f8d72c2..084e095229 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -24,6 +24,18 @@ config DM_MDIO This is currently implemented in net/mdio-uclass.c Look in include/miiphy.h for details. +config DM_MDIO_MUX + bool "Enable Driver Model for MDIO MUX devices" + depends on DM_MDIO + help + Enable driver model for MDIO MUX devices + + Adds UCLASS_MDIO_MUX DM class supporting MDIO MUXes. Useful for + systems that support DM_MDIO and integrate one or multiple muxes on + the MDIO bus. + This is currently implemented in net/mdio-mux-uclass.c + Look in include/miiphy.h for details. + config MDIO_SANDBOX depends on DM_MDIO && SANDBOX default y @@ -34,6 +46,16 @@ config MDIO_SANDBOX This driver is used in for testing in test/dm/mdio.c +config MDIO_MUX_SANDBOX + depends on DM_MDIO_MUX && MDIO_SANDBOX + default y + bool "Sandbox: Mocked MDIO-MUX driver" + help + This driver implements dummy select/deselect ops mimicking a MUX on + the MDIO bux. It uses mdio_sandbox driver as parent MDIO. + + This driver is used for testing in test/dm/mdio.c + menuconfig NETDEVICES bool "Network device support" depends on NET @@ -174,7 +196,7 @@ config ETH_SANDBOX_RAW This driver is a bridge from the bottom of the network stack in U-Boot to the RAW AF_PACKET API in Linux. This allows real network traffic to be tested from within sandbox. See - board/sandbox/README.sandbox for more details. + doc/arch/index.rst for more details. config ETH_DESIGNWARE bool "Synopsys Designware Ethernet MAC" @@ -213,7 +235,7 @@ config FEC_MXC_MDIO_BASE config FEC_MXC bool "FEC Ethernet controller" - depends on MX5 || MX6 || MX7 || IMX8 || VF610 + depends on MX28 || MX5 || MX6 || MX7 || IMX8 || VF610 help This driver supports the 10/100 Fast Ethernet controller for NXP i.MX processors. @@ -566,4 +588,19 @@ config HIGMACV300_ETH This driver supports HIGMACV300 Ethernet controller found on HiSilicon SoCs. +config FSL_ENETC + bool "NXP ENETC Ethernet controller" + depends on DM_PCI && DM_ETH && DM_MDIO + help + This driver supports the NXP ENETC Ethernet controller found on some + of the NXP SoCs. + +config MDIO_MUX_I2CREG + bool "MDIO MUX accessed as a register over I2C" + depends on DM_MDIO_MUX && DM_I2C + help + This driver is used for MDIO muxes driven by writing to a register of + an I2C chip. The board it was developed for uses a mux controlled by + on-board FPGA which in turn is accessed as a chip over I2C. + endif # NETDEVICES diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 40038427db..71c0889355 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -37,6 +37,8 @@ obj-$(CONFIG_LAN91C96) += lan91c96.o obj-$(CONFIG_LPC32XX_ETH) += lpc32xx_eth.o obj-$(CONFIG_MACB) += macb.o obj-$(CONFIG_MCFFEC) += mcffec.o mcfmii.o +obj-$(CONFIG_MDIO_MUX_I2CREG) += mdio_mux_i2creg.o +obj-$(CONFIG_MDIO_MUX_SANDBOX) += mdio_mux_sandbox.o obj-$(CONFIG_MPC8XX_FEC) += mpc8xx_fec.o obj-$(CONFIG_MT7628_ETH) += mt7628-eth.o obj-$(CONFIG_MVGBE) += mvgbe.o @@ -78,3 +80,4 @@ obj-$(CONFIG_MEDIATEK_ETH) += mtk_eth.o obj-y += mscc_eswitch/ obj-$(CONFIG_HIGMACV300_ETH) += higmacv300.o obj-$(CONFIG_MDIO_SANDBOX) += mdio_sandbox.o +obj-$(CONFIG_FSL_ENETC) += fsl_enetc.o fsl_enetc_mdio.o diff --git a/drivers/net/designware.c b/drivers/net/designware.c index 2c5d9560c5..c4fe40b19a 100644 --- a/drivers/net/designware.c +++ b/drivers/net/designware.c @@ -480,18 +480,16 @@ static int _dw_free_pkt(struct dw_eth_dev *priv) static int dw_phy_init(struct dw_eth_dev *priv, void *dev) { struct phy_device *phydev; - int mask = 0xffffffff, ret; + int phy_addr = -1, ret; #ifdef CONFIG_PHY_ADDR - mask = 1 << CONFIG_PHY_ADDR; + phy_addr = CONFIG_PHY_ADDR; #endif - phydev = phy_find_by_mask(priv->bus, mask, priv->interface); + phydev = phy_connect(priv->bus, phy_addr, dev, priv->interface); if (!phydev) return -ENODEV; - phy_connect_dev(phydev, dev); - phydev->supported &= PHY_GBIT_FEATURES; if (priv->max_speed) { ret = phy_set_supported(phydev, priv->max_speed); @@ -677,10 +675,10 @@ int designware_eth_probe(struct udevice *dev) struct dw_eth_dev *priv = dev_get_priv(dev); u32 iobase = pdata->iobase; ulong ioaddr; - int ret; + int ret, err; struct reset_ctl_bulk reset_bulk; #ifdef CONFIG_CLK - int i, err, clock_nb; + int i, clock_nb; priv->clock_count = 0; clock_nb = dev_count_phandle_with_args(dev, "clocks", "#clock-cells"); @@ -753,13 +751,23 @@ int designware_eth_probe(struct udevice *dev) priv->interface = pdata->phy_interface; priv->max_speed = pdata->max_speed; - dw_mdio_init(dev->name, dev); + ret = dw_mdio_init(dev->name, dev); + if (ret) { + err = ret; + goto mdio_err; + } priv->bus = miiphy_get_dev_by_name(dev->name); ret = dw_phy_init(priv, dev); debug("%s, ret=%d\n", __func__, ret); + if (!ret) + return 0; - return ret; + /* continue here for cleanup if no PHY found */ + err = ret; + mdio_unregister(priv->bus); + mdio_free(priv->bus); +mdio_err: #ifdef CONFIG_CLK clk_err: @@ -767,8 +775,8 @@ clk_err: if (ret) pr_err("failed to disable all clocks\n"); - return err; #endif + return err; } static int designware_eth_remove(struct udevice *dev) diff --git a/drivers/net/fec_mxc.c b/drivers/net/fec_mxc.c index d7c080943a..96e3ad9a1a 100644 --- a/drivers/net/fec_mxc.c +++ b/drivers/net/fec_mxc.c @@ -1485,6 +1485,7 @@ static int fecmxc_ofdata_to_platdata(struct udevice *dev) } static const struct udevice_id fecmxc_ids[] = { + { .compatible = "fsl,imx28-fec" }, { .compatible = "fsl,imx6q-fec" }, { .compatible = "fsl,imx6sl-fec" }, { .compatible = "fsl,imx6sx-fec" }, diff --git a/drivers/net/fsl_enetc.c b/drivers/net/fsl_enetc.c new file mode 100644 index 0000000000..e7c5062c39 --- /dev/null +++ b/drivers/net/fsl_enetc.c @@ -0,0 +1,597 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * ENETC ethernet controller driver + * Copyright 2017-2019 NXP + */ + +#include <common.h> +#include <dm.h> +#include <errno.h> +#include <memalign.h> +#include <asm/io.h> +#include <pci.h> +#include <miiphy.h> + +#include "fsl_enetc.h" + +/* + * Bind the device: + * - set a more explicit name on the interface + */ +static int enetc_bind(struct udevice *dev) +{ + char name[16]; + static int eth_num_devices; + + /* + * prefer using PCI function numbers to number interfaces, but these + * are only available if dts nodes are present. For PCI they are + * optional, handle that case too. Just in case some nodes are present + * and some are not, use different naming scheme - enetc-N based on + * PCI function # and enetc#N based on interface count + */ + if (ofnode_valid(dev->node)) + sprintf(name, "enetc-%u", PCI_FUNC(pci_get_devfn(dev))); + else + sprintf(name, "enetc#%u", eth_num_devices++); + device_set_name(dev, name); + + return 0; +} + +/* MDIO wrappers, we're using these to drive internal MDIO to get to serdes */ +static int enetc_mdio_read(struct mii_dev *bus, int addr, int devad, int reg) +{ + struct enetc_mdio_priv priv; + + priv.regs_base = bus->priv; + return enetc_mdio_read_priv(&priv, addr, devad, reg); +} + +static int enetc_mdio_write(struct mii_dev *bus, int addr, int devad, int reg, + u16 val) +{ + struct enetc_mdio_priv priv; + + priv.regs_base = bus->priv; + return enetc_mdio_write_priv(&priv, addr, devad, reg, val); +} + +/* only interfaces that can pin out through serdes have internal MDIO */ +static bool enetc_has_imdio(struct udevice *dev) +{ + struct enetc_priv *priv = dev_get_priv(dev); + + return !!(priv->imdio.priv); +} + +/* set up serdes for SGMII */ +static int enetc_init_sgmii(struct udevice *dev) +{ + struct enetc_priv *priv = dev_get_priv(dev); + bool is2500 = false; + u16 reg; + + if (!enetc_has_imdio(dev)) + return 0; + + if (priv->if_type == PHY_INTERFACE_MODE_SGMII_2500) + is2500 = true; + + /* + * Set to SGMII mode, for 1Gbps enable AN, for 2.5Gbps set fixed speed. + * Although fixed speed is 1Gbps, we could be running at 2.5Gbps based + * on PLL configuration. Setting 1G for 2.5G here is counter intuitive + * but intentional. + */ + reg = ENETC_PCS_IF_MODE_SGMII; + reg |= is2500 ? ENETC_PCS_IF_MODE_SPEED_1G : ENETC_PCS_IF_MODE_SGMII_AN; + enetc_mdio_write(&priv->imdio, ENETC_PCS_PHY_ADDR, MDIO_DEVAD_NONE, + ENETC_PCS_IF_MODE, reg); + + /* Dev ability - SGMII */ + enetc_mdio_write(&priv->imdio, ENETC_PCS_PHY_ADDR, MDIO_DEVAD_NONE, + ENETC_PCS_DEV_ABILITY, ENETC_PCS_DEV_ABILITY_SGMII); + + /* Adjust link timer for SGMII */ + enetc_mdio_write(&priv->imdio, ENETC_PCS_PHY_ADDR, MDIO_DEVAD_NONE, + ENETC_PCS_LINK_TIMER1, ENETC_PCS_LINK_TIMER1_VAL); + enetc_mdio_write(&priv->imdio, ENETC_PCS_PHY_ADDR, MDIO_DEVAD_NONE, + ENETC_PCS_LINK_TIMER2, ENETC_PCS_LINK_TIMER2_VAL); + + reg = ENETC_PCS_CR_DEF_VAL; + reg |= is2500 ? ENETC_PCS_CR_RST : ENETC_PCS_CR_RESET_AN; + /* restart PCS AN */ + enetc_mdio_write(&priv->imdio, ENETC_PCS_PHY_ADDR, MDIO_DEVAD_NONE, + ENETC_PCS_CR, reg); + + return 0; +} + +/* set up MAC for RGMII */ +static int enetc_init_rgmii(struct udevice *dev) +{ + struct enetc_priv *priv = dev_get_priv(dev); + u32 if_mode; + + /* enable RGMII AN */ + if_mode = enetc_read_port(priv, ENETC_PM_IF_MODE); + if_mode |= ENETC_PM_IF_MODE_AN_ENA; + enetc_write_port(priv, ENETC_PM_IF_MODE, if_mode); + + return 0; +} + +/* set up MAC and serdes for SXGMII */ +static int enetc_init_sxgmii(struct udevice *dev) +{ + struct enetc_priv *priv = dev_get_priv(dev); + u32 if_mode; + + /* set ifmode to (US)XGMII */ + if_mode = enetc_read_port(priv, ENETC_PM_IF_MODE); + if_mode &= ~ENETC_PM_IF_IFMODE_MASK; + enetc_write_port(priv, ENETC_PM_IF_MODE, if_mode); + + if (!enetc_has_imdio(dev)) + return 0; + + /* Dev ability - SXGMII */ + enetc_mdio_write(&priv->imdio, ENETC_PCS_PHY_ADDR, ENETC_PCS_DEVAD_REPL, + ENETC_PCS_DEV_ABILITY, ENETC_PCS_DEV_ABILITY_SXGMII); + + /* Restart PCS AN */ + enetc_mdio_write(&priv->imdio, ENETC_PCS_PHY_ADDR, ENETC_PCS_DEVAD_REPL, + ENETC_PCS_CR, + ENETC_PCS_CR_RST | ENETC_PCS_CR_RESET_AN); + + return 0; +} + +/* Apply protocol specific configuration to MAC, serdes as needed */ +static void enetc_start_pcs(struct udevice *dev) +{ + struct enetc_priv *priv = dev_get_priv(dev); + const char *if_str; + + priv->if_type = PHY_INTERFACE_MODE_NONE; + + /* check internal mdio capability, not all ports need it */ + if (enetc_read_port(priv, ENETC_PCAPR0) & ENETC_PCAPRO_MDIO) { + /* + * set up internal MDIO, this is part of ETH PCI function and is + * used to access serdes / internal SoC PHYs. + * We don't currently register it as a MDIO bus as it goes away + * when the interface is removed, so it can't practically be + * used in the console. + */ + priv->imdio.read = enetc_mdio_read; + priv->imdio.write = enetc_mdio_write; + priv->imdio.priv = priv->port_regs + ENETC_PM_IMDIO_BASE; + strncpy(priv->imdio.name, dev->name, MDIO_NAME_LEN); + } + + if (!ofnode_valid(dev->node)) { + enetc_dbg(dev, "no enetc ofnode found, skipping PCS set-up\n"); + return; + } + + if_str = ofnode_read_string(dev->node, "phy-mode"); + if (if_str) + priv->if_type = phy_get_interface_by_name(if_str); + else + enetc_dbg(dev, + "phy-mode property not found, defaulting to SGMII\n"); + if (priv->if_type < 0) + priv->if_type = PHY_INTERFACE_MODE_NONE; + + switch (priv->if_type) { + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_SGMII_2500: + enetc_init_sgmii(dev); + break; + case PHY_INTERFACE_MODE_RGMII: + enetc_init_rgmii(dev); + break; + case PHY_INTERFACE_MODE_XGMII: + enetc_init_sxgmii(dev); + break; + }; +} + +/* Configure the actual/external ethernet PHY, if one is found */ +static void enetc_start_phy(struct udevice *dev) +{ + struct enetc_priv *priv = dev_get_priv(dev); + struct udevice *miidev; + struct phy_device *phy; + u32 phandle, phy_id; + ofnode phy_node; + int supported; + + if (!ofnode_valid(dev->node)) { + enetc_dbg(dev, "no enetc ofnode found, skipping PHY set-up\n"); + return; + } + + if (ofnode_read_u32(dev->node, "phy-handle", &phandle)) { + enetc_dbg(dev, "phy-handle not found, skipping PHY set-up\n"); + return; + } + + phy_node = ofnode_get_by_phandle(phandle); + if (!ofnode_valid(phy_node)) { + enetc_dbg(dev, "invalid phy node, skipping PHY set-up\n"); + return; + } + enetc_dbg(dev, "phy node: %s\n", ofnode_get_name(phy_node)); + + if (ofnode_read_u32(phy_node, "reg", &phy_id)) { + enetc_dbg(dev, + "missing reg in PHY node, skipping PHY set-up\n"); + return; + } + + if (uclass_get_device_by_ofnode(UCLASS_MDIO, + ofnode_get_parent(phy_node), + &miidev)) { + enetc_dbg(dev, "can't find MDIO bus for node %s\n", + ofnode_get_name(ofnode_get_parent(phy_node))); + return; + } + + phy = dm_mdio_phy_connect(miidev, phy_id, dev, priv->if_type); + if (!phy) { + enetc_dbg(dev, "dm_mdio_phy_connect returned null\n"); + return; + } + + supported = GENMASK(6, 0); /* speeds up to 1G & AN */ + phy->advertising = phy->supported & supported; + phy_config(phy); + phy_startup(phy); +} + +/* + * Probe ENETC driver: + * - initialize port and station interface BARs + */ +static int enetc_probe(struct udevice *dev) +{ + struct enetc_priv *priv = dev_get_priv(dev); + + if (ofnode_valid(dev->node) && !ofnode_is_available(dev->node)) { + enetc_dbg(dev, "interface disabled\n"); + return -ENODEV; + } + + priv->enetc_txbd = memalign(ENETC_BD_ALIGN, + sizeof(struct enetc_tx_bd) * ENETC_BD_CNT); + priv->enetc_rxbd = memalign(ENETC_BD_ALIGN, + sizeof(union enetc_rx_bd) * ENETC_BD_CNT); + + if (!priv->enetc_txbd || !priv->enetc_rxbd) { + /* free should be able to handle NULL, just free all pointers */ + free(priv->enetc_txbd); + free(priv->enetc_rxbd); + + return -ENOMEM; + } + + /* initialize register */ + priv->regs_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, 0); + if (!priv->regs_base) { + enetc_dbg(dev, "failed to map BAR0\n"); + return -EINVAL; + } + priv->port_regs = priv->regs_base + ENETC_PORT_REGS_OFF; + + dm_pci_clrset_config16(dev, PCI_COMMAND, 0, PCI_COMMAND_MEMORY); + + return 0; +} + +/* + * Remove the driver from an interface: + * - free up allocated memory + */ +static int enetc_remove(struct udevice *dev) +{ + struct enetc_priv *priv = dev_get_priv(dev); + + free(priv->enetc_txbd); + free(priv->enetc_rxbd); + + return 0; +} + +/* ENETC Port MAC address registers, accepts big-endian format */ +static void enetc_set_primary_mac_addr(struct enetc_priv *priv, const u8 *addr) +{ + u16 lower = *(const u16 *)(addr + 4); + u32 upper = *(const u32 *)addr; + + enetc_write_port(priv, ENETC_PSIPMAR0, upper); + enetc_write_port(priv, ENETC_PSIPMAR1, lower); +} + +/* Configure port parameters (# of rings, frame size, enable port) */ +static void enetc_enable_si_port(struct enetc_priv *priv) +{ + u32 val; + + /* set Rx/Tx BDR count */ + val = ENETC_PSICFGR_SET_TXBDR(ENETC_TX_BDR_CNT); + val |= ENETC_PSICFGR_SET_RXBDR(ENETC_RX_BDR_CNT); + enetc_write_port(priv, ENETC_PSICFGR(0), val); + /* set Rx max frame size */ + enetc_write_port(priv, ENETC_PM_MAXFRM, ENETC_RX_MAXFRM_SIZE); + /* enable MAC port */ + enetc_write_port(priv, ENETC_PM_CC, ENETC_PM_CC_RX_TX_EN); + /* enable port */ + enetc_write_port(priv, ENETC_PMR, ENETC_PMR_SI0_EN); + /* set SI cache policy */ + enetc_write(priv, ENETC_SICAR0, + ENETC_SICAR_RD_CFG | ENETC_SICAR_WR_CFG); + /* enable SI */ + enetc_write(priv, ENETC_SIMR, ENETC_SIMR_EN); +} + +/* returns DMA address for a given buffer index */ +static inline u64 enetc_rxb_address(struct udevice *dev, int i) +{ + return cpu_to_le64(dm_pci_virt_to_mem(dev, net_rx_packets[i])); +} + +/* + * Setup a single Tx BD Ring (ID = 0): + * - set Tx buffer descriptor address + * - set the BD count + * - initialize the producer and consumer index + */ +static void enetc_setup_tx_bdr(struct udevice *dev) +{ + struct enetc_priv *priv = dev_get_priv(dev); + struct bd_ring *tx_bdr = &priv->tx_bdr; + u64 tx_bd_add = (u64)priv->enetc_txbd; + + /* used later to advance to the next Tx BD */ + tx_bdr->bd_count = ENETC_BD_CNT; + tx_bdr->next_prod_idx = 0; + tx_bdr->next_cons_idx = 0; + tx_bdr->cons_idx = priv->regs_base + + ENETC_BDR(TX, ENETC_TX_BDR_ID, ENETC_TBCIR); + tx_bdr->prod_idx = priv->regs_base + + ENETC_BDR(TX, ENETC_TX_BDR_ID, ENETC_TBPIR); + + /* set Tx BD address */ + enetc_bdr_write(priv, TX, ENETC_TX_BDR_ID, ENETC_TBBAR0, + lower_32_bits(tx_bd_add)); + enetc_bdr_write(priv, TX, ENETC_TX_BDR_ID, ENETC_TBBAR1, + upper_32_bits(tx_bd_add)); + /* set Tx 8 BD count */ + enetc_bdr_write(priv, TX, ENETC_TX_BDR_ID, ENETC_TBLENR, + tx_bdr->bd_count); + + /* reset both producer/consumer indexes */ + enetc_write_reg(tx_bdr->cons_idx, tx_bdr->next_cons_idx); + enetc_write_reg(tx_bdr->prod_idx, tx_bdr->next_prod_idx); + + /* enable TX ring */ + enetc_bdr_write(priv, TX, ENETC_TX_BDR_ID, ENETC_TBMR, ENETC_TBMR_EN); +} + +/* + * Setup a single Rx BD Ring (ID = 0): + * - set Rx buffer descriptors address (one descriptor per buffer) + * - set buffer size as max frame size + * - enable Rx ring + * - reset consumer and producer indexes + * - set buffer for each descriptor + */ +static void enetc_setup_rx_bdr(struct udevice *dev) +{ + struct enetc_priv *priv = dev_get_priv(dev); + struct bd_ring *rx_bdr = &priv->rx_bdr; + u64 rx_bd_add = (u64)priv->enetc_rxbd; + int i; + + /* used later to advance to the next BD produced by ENETC HW */ + rx_bdr->bd_count = ENETC_BD_CNT; + rx_bdr->next_prod_idx = 0; + rx_bdr->next_cons_idx = 0; + rx_bdr->cons_idx = priv->regs_base + + ENETC_BDR(RX, ENETC_RX_BDR_ID, ENETC_RBCIR); + rx_bdr->prod_idx = priv->regs_base + + ENETC_BDR(RX, ENETC_RX_BDR_ID, ENETC_RBPIR); + + /* set Rx BD address */ + enetc_bdr_write(priv, RX, ENETC_RX_BDR_ID, ENETC_RBBAR0, + lower_32_bits(rx_bd_add)); + enetc_bdr_write(priv, RX, ENETC_RX_BDR_ID, ENETC_RBBAR1, + upper_32_bits(rx_bd_add)); + /* set Rx BD count (multiple of 8) */ + enetc_bdr_write(priv, RX, ENETC_RX_BDR_ID, ENETC_RBLENR, + rx_bdr->bd_count); + /* set Rx buffer size */ + enetc_bdr_write(priv, RX, ENETC_RX_BDR_ID, ENETC_RBBSR, PKTSIZE_ALIGN); + + /* fill Rx BD */ + memset(priv->enetc_rxbd, 0, + rx_bdr->bd_count * sizeof(union enetc_rx_bd)); + for (i = 0; i < rx_bdr->bd_count; i++) { + priv->enetc_rxbd[i].w.addr = enetc_rxb_address(dev, i); + /* each RX buffer must be aligned to 64B */ + WARN_ON(priv->enetc_rxbd[i].w.addr & (ARCH_DMA_MINALIGN - 1)); + } + + /* reset producer (ENETC owned) and consumer (SW owned) index */ + enetc_write_reg(rx_bdr->cons_idx, rx_bdr->next_cons_idx); + enetc_write_reg(rx_bdr->prod_idx, rx_bdr->next_prod_idx); + + /* enable Rx ring */ + enetc_bdr_write(priv, RX, ENETC_RX_BDR_ID, ENETC_RBMR, ENETC_RBMR_EN); +} + +/* + * Start ENETC interface: + * - perform FLR + * - enable access to port and SI registers + * - set mac address + * - setup TX/RX buffer descriptors + * - enable Tx/Rx rings + */ +static int enetc_start(struct udevice *dev) +{ + struct eth_pdata *plat = dev_get_platdata(dev); + struct enetc_priv *priv = dev_get_priv(dev); + + /* reset and enable the PCI device */ + dm_pci_flr(dev); + dm_pci_clrset_config16(dev, PCI_COMMAND, 0, + PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); + + if (!is_valid_ethaddr(plat->enetaddr)) { + enetc_dbg(dev, "invalid MAC address, generate random ...\n"); + net_random_ethaddr(plat->enetaddr); + } + enetc_set_primary_mac_addr(priv, plat->enetaddr); + + enetc_enable_si_port(priv); + + /* setup Tx/Rx buffer descriptors */ + enetc_setup_tx_bdr(dev); + enetc_setup_rx_bdr(dev); + + enetc_start_pcs(dev); + enetc_start_phy(dev); + + return 0; +} + +/* + * Stop the network interface: + * - just quiesce it, we can wipe all configuration as _start starts from + * scratch each time + */ +static void enetc_stop(struct udevice *dev) +{ + /* FLR is sufficient to quiesce the device */ + dm_pci_flr(dev); +} + +/* + * ENETC transmit packet: + * - check if Tx BD ring is full + * - set buffer/packet address (dma address) + * - set final fragment flag + * - try while producer index equals consumer index or timeout + */ +static int enetc_send(struct udevice *dev, void *packet, int length) +{ + struct enetc_priv *priv = dev_get_priv(dev); + struct bd_ring *txr = &priv->tx_bdr; + void *nv_packet = (void *)packet; + int tries = ENETC_POLL_TRIES; + u32 pi, ci; + + pi = txr->next_prod_idx; + ci = enetc_read_reg(txr->cons_idx) & ENETC_BDR_IDX_MASK; + /* Tx ring is full when */ + if (((pi + 1) % txr->bd_count) == ci) { + enetc_dbg(dev, "Tx BDR full\n"); + return -ETIMEDOUT; + } + enetc_dbg(dev, "TxBD[%d]send: pkt_len=%d, buff @0x%x%08x\n", pi, length, + upper_32_bits((u64)nv_packet), lower_32_bits((u64)nv_packet)); + + /* prepare Tx BD */ + memset(&priv->enetc_txbd[pi], 0x0, sizeof(struct enetc_tx_bd)); + priv->enetc_txbd[pi].addr = + cpu_to_le64(dm_pci_virt_to_mem(dev, nv_packet)); + priv->enetc_txbd[pi].buf_len = cpu_to_le16(length); + priv->enetc_txbd[pi].frm_len = cpu_to_le16(length); + priv->enetc_txbd[pi].flags = cpu_to_le16(ENETC_TXBD_FLAGS_F); + dmb(); + /* send frame: increment producer index */ + pi = (pi + 1) % txr->bd_count; + txr->next_prod_idx = pi; + enetc_write_reg(txr->prod_idx, pi); + while ((--tries >= 0) && + (pi != (enetc_read_reg(txr->cons_idx) & ENETC_BDR_IDX_MASK))) + udelay(10); + + return tries > 0 ? 0 : -ETIMEDOUT; +} + +/* + * Receive frame: + * - wait for the next BD to get ready bit set + * - clean up the descriptor + * - move on and indicate to HW that the cleaned BD is available for Rx + */ +static int enetc_recv(struct udevice *dev, int flags, uchar **packetp) +{ + struct enetc_priv *priv = dev_get_priv(dev); + struct bd_ring *rxr = &priv->rx_bdr; + int tries = ENETC_POLL_TRIES; + int pi = rxr->next_prod_idx; + int ci = rxr->next_cons_idx; + u32 status; + int len; + u8 rdy; + + do { + dmb(); + status = le32_to_cpu(priv->enetc_rxbd[pi].r.lstatus); + /* check if current BD is ready to be consumed */ + rdy = ENETC_RXBD_STATUS_R(status); + } while (--tries >= 0 && !rdy); + + if (!rdy) + return -EAGAIN; + + dmb(); + len = le16_to_cpu(priv->enetc_rxbd[pi].r.buf_len); + *packetp = (uchar *)enetc_rxb_address(dev, pi); + enetc_dbg(dev, "RxBD[%d]: len=%d err=%d pkt=0x%x%08x\n", pi, len, + ENETC_RXBD_STATUS_ERRORS(status), + upper_32_bits((u64)*packetp), lower_32_bits((u64)*packetp)); + + /* BD clean up and advance to next in ring */ + memset(&priv->enetc_rxbd[pi], 0, sizeof(union enetc_rx_bd)); + priv->enetc_rxbd[pi].w.addr = enetc_rxb_address(dev, pi); + rxr->next_prod_idx = (pi + 1) % rxr->bd_count; + ci = (ci + 1) % rxr->bd_count; + rxr->next_cons_idx = ci; + dmb(); + /* free up the slot in the ring for HW */ + enetc_write_reg(rxr->cons_idx, ci); + + return len; +} + +static const struct eth_ops enetc_ops = { + .start = enetc_start, + .send = enetc_send, + .recv = enetc_recv, + .stop = enetc_stop, +}; + +U_BOOT_DRIVER(eth_enetc) = { + .name = "enetc_eth", + .id = UCLASS_ETH, + .bind = enetc_bind, + .probe = enetc_probe, + .remove = enetc_remove, + .ops = &enetc_ops, + .priv_auto_alloc_size = sizeof(struct enetc_priv), + .platdata_auto_alloc_size = sizeof(struct eth_pdata), +}; + +static struct pci_device_id enetc_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_ENETC_ETH) }, + {} +}; + +U_BOOT_PCI_DEVICE(eth_enetc, enetc_ids); diff --git a/drivers/net/fsl_enetc.h b/drivers/net/fsl_enetc.h new file mode 100644 index 0000000000..0bb4cdff47 --- /dev/null +++ b/drivers/net/fsl_enetc.h @@ -0,0 +1,229 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * ENETC ethernet controller driver + * Copyright 2017-2019 NXP + */ + +#ifndef _ENETC_H +#define _ENETC_H + +#define enetc_dbg(dev, fmt, args...) debug("%s:" fmt, dev->name, ##args) + +/* PCI function IDs */ +#define PCI_DEVICE_ID_ENETC_ETH 0xE100 +#define PCI_DEVICE_ID_ENETC_MDIO 0xEE01 + +/* ENETC Ethernet controller registers */ +/* Station interface register offsets */ +#define ENETC_SIMR 0x000 +#define ENETC_SIMR_EN BIT(31) +#define ENETC_SICAR0 0x040 +/* write cache cfg: snoop, no allocate, data & BD coherent */ +#define ENETC_SICAR_WR_CFG 0x6767 +/* read cache cfg: coherent copy, look up, don't alloc in cache */ +#define ENETC_SICAR_RD_CFG 0x27270000 +#define ENETC_SIROCT 0x300 +#define ENETC_SIRFRM 0x308 +#define ENETC_SITOCT 0x320 +#define ENETC_SITFRM 0x328 + +/* Rx/Tx Buffer Descriptor Ring registers */ +enum enetc_bdr_type {TX, RX}; +#define ENETC_BDR(type, n, off) (0x8000 + (type) * 0x100 + (n) * 0x200 + (off)) +#define ENETC_BDR_IDX_MASK 0xffff + +/* Rx BDR reg offsets */ +#define ENETC_RBMR 0x00 +#define ENETC_RBMR_EN BIT(31) +#define ENETC_RBBSR 0x08 +/* initial consumer index for Rx BDR */ +#define ENETC_RBCIR 0x0c +#define ENETC_RBBAR0 0x10 +#define ENETC_RBBAR1 0x14 +#define ENETC_RBPIR 0x18 +#define ENETC_RBLENR 0x20 + +/* Tx BDR reg offsets */ +#define ENETC_TBMR 0x00 +#define ENETC_TBMR_EN BIT(31) +#define ENETC_TBBAR0 0x10 +#define ENETC_TBBAR1 0x14 +#define ENETC_TBPIR 0x18 +#define ENETC_TBCIR 0x1c +#define ENETC_TBLENR 0x20 + +/* Port registers offset */ +#define ENETC_PORT_REGS_OFF 0x10000 + +/* Port registers */ +#define ENETC_PMR 0x0000 +#define ENETC_PMR_SI0_EN BIT(16) +#define ENETC_PSIPMMR 0x0018 +#define ENETC_PSIPMAR0 0x0100 +#define ENETC_PSIPMAR1 0x0104 +#define ENETC_PCAPR0 0x0900 +#define ENETC_PCAPRO_MDIO BIT(11) +#define ENETC_PSICFGR(n) (0x0940 + (n) * 0x10) +#define ENETC_PSICFGR_SET_TXBDR(val) ((val) & 0xff) +#define ENETC_PSICFGR_SET_RXBDR(val) (((val) & 0xff) << 16) +/* MAC configuration */ +#define ENETC_PM_CC 0x8008 +#define ENETC_PM_CC_DEFAULT 0x0810 +#define ENETC_PM_CC_RX_TX_EN 0x8813 +#define ENETC_PM_MAXFRM 0x8014 +#define ENETC_RX_MAXFRM_SIZE PKTSIZE_ALIGN +#define ENETC_PM_IMDIO_BASE 0x8030 +#define ENETC_PM_IF_MODE 0x8300 +#define ENETC_PM_IF_MODE_RG BIT(2) +#define ENETC_PM_IF_MODE_AN_ENA BIT(15) +#define ENETC_PM_IF_IFMODE_MASK GENMASK(1, 0) + +/* buffer descriptors count must be multiple of 8 and aligned to 128 bytes */ +#define ENETC_BD_CNT CONFIG_SYS_RX_ETH_BUFFER +#define ENETC_BD_ALIGN 128 + +/* single pair of Rx/Tx rings */ +#define ENETC_RX_BDR_CNT 1 +#define ENETC_TX_BDR_CNT 1 +#define ENETC_RX_BDR_ID 0 +#define ENETC_TX_BDR_ID 0 + +/* Tx buffer descriptor */ +struct enetc_tx_bd { + __le64 addr; + __le16 buf_len; + __le16 frm_len; + __le16 err_csum; + __le16 flags; +}; + +#define ENETC_TXBD_FLAGS_F BIT(15) +#define ENETC_POLL_TRIES 32000 + +/* Rx buffer descriptor */ +union enetc_rx_bd { + /* SW provided BD format */ + struct { + __le64 addr; + u8 reserved[8]; + } w; + + /* ENETC returned BD format */ + struct { + __le16 inet_csum; + __le16 parse_summary; + __le32 rss_hash; + __le16 buf_len; + __le16 vlan_opt; + union { + struct { + __le16 flags; + __le16 error; + }; + __le32 lstatus; + }; + } r; +}; + +#define ENETC_RXBD_STATUS_R(status) (((status) >> 30) & 0x1) +#define ENETC_RXBD_STATUS_F(status) (((status) >> 31) & 0x1) +#define ENETC_RXBD_STATUS_ERRORS(status) (((status) >> 16) & 0xff) +#define ENETC_RXBD_STATUS(flags) ((flags) << 16) + +/* Tx/Rx ring info */ +struct bd_ring { + void *cons_idx; + void *prod_idx; + /* next BD index to use */ + int next_prod_idx; + int next_cons_idx; + int bd_count; +}; + +/* ENETC private structure */ +struct enetc_priv { + struct enetc_tx_bd *enetc_txbd; + union enetc_rx_bd *enetc_rxbd; + + void *regs_base; /* base ENETC registers */ + void *port_regs; /* base ENETC port registers */ + + /* Rx/Tx buffer descriptor rings info */ + struct bd_ring tx_bdr; + struct bd_ring rx_bdr; + + int if_type; + struct mii_dev imdio; +}; + +/* register accessors */ +#define enetc_read_reg(x) readl((x)) +#define enetc_write_reg(x, val) writel((val), (x)) +#define enetc_read(priv, off) enetc_read_reg((priv)->regs_base + (off)) +#define enetc_write(priv, off, v) \ + enetc_write_reg((priv)->regs_base + (off), v) + +/* port register accessors */ +#define enetc_port_regs(priv, off) ((priv)->port_regs + (off)) +#define enetc_read_port(priv, off) \ + enetc_read_reg(enetc_port_regs((priv), (off))) +#define enetc_write_port(priv, off, v) \ + enetc_write_reg(enetc_port_regs((priv), (off)), v) + +/* BDR register accessors, see ENETC_BDR() */ +#define enetc_bdr_read(priv, t, n, off) \ + enetc_read(priv, ENETC_BDR(t, n, off)) +#define enetc_bdr_write(priv, t, n, off, val) \ + enetc_write(priv, ENETC_BDR(t, n, off), val) + +/* PCS / internal SoC PHY ID, it defaults to 0 on all interfaces */ +#define ENETC_PCS_PHY_ADDR 0 + +/* PCS registers */ +#define ENETC_PCS_CR 0x00 +#define ENETC_PCS_CR_RESET_AN 0x1200 +#define ENETC_PCS_CR_DEF_VAL 0x0140 +#define ENETC_PCS_CR_RST BIT(15) +#define ENETC_PCS_DEV_ABILITY 0x04 +#define ENETC_PCS_DEV_ABILITY_SGMII 0x4001 +#define ENETC_PCS_DEV_ABILITY_SXGMII 0x5001 +#define ENETC_PCS_LINK_TIMER1 0x12 +#define ENETC_PCS_LINK_TIMER1_VAL 0x06a0 +#define ENETC_PCS_LINK_TIMER2 0x13 +#define ENETC_PCS_LINK_TIMER2_VAL 0x0003 +#define ENETC_PCS_IF_MODE 0x14 +#define ENETC_PCS_IF_MODE_SGMII BIT(0) +#define ENETC_PCS_IF_MODE_SGMII_AN BIT(1) +#define ENETC_PCS_IF_MODE_SPEED_1G BIT(3) + +/* PCS replicator block for USXGMII */ +#define ENETC_PCS_DEVAD_REPL 0x1f + +/* ENETC external MDIO registers */ +#define ENETC_MDIO_BASE 0x1c00 +#define ENETC_MDIO_CFG 0x00 +#define ENETC_EMDIO_CFG_C22 0x00809508 +#define ENETC_EMDIO_CFG_C45 0x00809548 +#define ENETC_EMDIO_CFG_RD_ER BIT(1) +#define ENETC_EMDIO_CFG_BSY BIT(0) +#define ENETC_MDIO_CTL 0x04 +#define ENETC_MDIO_CTL_READ BIT(15) +#define ENETC_MDIO_DATA 0x08 +#define ENETC_MDIO_STAT 0x0c + +#define ENETC_MDIO_READ_ERR 0xffff + +struct enetc_mdio_priv { + void *regs_base; +}; + +/* + * these functions are implemented by ENETC_MDIO and are re-used by ENETC driver + * to drive serdes / internal SoC PHYs + */ +int enetc_mdio_read_priv(struct enetc_mdio_priv *priv, int addr, int devad, + int reg); +int enetc_mdio_write_priv(struct enetc_mdio_priv *priv, int addr, int devad, + int reg, u16 val); + +#endif /* _ENETC_H */ diff --git a/drivers/net/fsl_enetc_mdio.c b/drivers/net/fsl_enetc_mdio.c new file mode 100644 index 0000000000..60d21537b8 --- /dev/null +++ b/drivers/net/fsl_enetc_mdio.c @@ -0,0 +1,149 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * ENETC ethernet controller driver + * Copyright 2019 NXP + */ + +#include <common.h> +#include <dm.h> +#include <errno.h> +#include <pci.h> +#include <miiphy.h> +#include <asm/io.h> +#include <asm/processor.h> +#include <miiphy.h> + +#include "fsl_enetc.h" + +static void enetc_mdio_wait_bsy(struct enetc_mdio_priv *priv) +{ + while (enetc_read(priv, ENETC_MDIO_CFG) & ENETC_EMDIO_CFG_BSY) + cpu_relax(); +} + +int enetc_mdio_read_priv(struct enetc_mdio_priv *priv, int addr, int devad, + int reg) +{ + if (devad == MDIO_DEVAD_NONE) + enetc_write(priv, ENETC_MDIO_CFG, ENETC_EMDIO_CFG_C22); + else + enetc_write(priv, ENETC_MDIO_CFG, ENETC_EMDIO_CFG_C45); + enetc_mdio_wait_bsy(priv); + + if (devad == MDIO_DEVAD_NONE) { + enetc_write(priv, ENETC_MDIO_CTL, ENETC_MDIO_CTL_READ | + (addr << 5) | reg); + } else { + enetc_write(priv, ENETC_MDIO_CTL, (addr << 5) + devad); + enetc_mdio_wait_bsy(priv); + + enetc_write(priv, ENETC_MDIO_STAT, reg); + enetc_mdio_wait_bsy(priv); + + enetc_write(priv, ENETC_MDIO_CTL, ENETC_MDIO_CTL_READ | + (addr << 5) | devad); + } + + enetc_mdio_wait_bsy(priv); + if (enetc_read(priv, ENETC_MDIO_CFG) & ENETC_EMDIO_CFG_RD_ER) + return ENETC_MDIO_READ_ERR; + + return enetc_read(priv, ENETC_MDIO_DATA); +} + +int enetc_mdio_write_priv(struct enetc_mdio_priv *priv, int addr, int devad, + int reg, u16 val) +{ + if (devad == MDIO_DEVAD_NONE) + enetc_write(priv, ENETC_MDIO_CFG, ENETC_EMDIO_CFG_C22); + else + enetc_write(priv, ENETC_MDIO_CFG, ENETC_EMDIO_CFG_C45); + enetc_mdio_wait_bsy(priv); + + if (devad != MDIO_DEVAD_NONE) { + enetc_write(priv, ENETC_MDIO_CTL, (addr << 5) + devad); + enetc_write(priv, ENETC_MDIO_STAT, reg); + } else { + enetc_write(priv, ENETC_MDIO_CTL, (addr << 5) + reg); + } + enetc_mdio_wait_bsy(priv); + + enetc_write(priv, ENETC_MDIO_DATA, val); + enetc_mdio_wait_bsy(priv); + + return 0; +} + +/* DM wrappers */ +static int dm_enetc_mdio_read(struct udevice *dev, int addr, int devad, int reg) +{ + struct enetc_mdio_priv *priv = dev_get_priv(dev); + + return enetc_mdio_read_priv(priv, addr, devad, reg); +} + +static int dm_enetc_mdio_write(struct udevice *dev, int addr, int devad, + int reg, u16 val) +{ + struct enetc_mdio_priv *priv = dev_get_priv(dev); + + return enetc_mdio_write_priv(priv, addr, devad, reg, val); +} + +static const struct mdio_ops enetc_mdio_ops = { + .read = dm_enetc_mdio_read, + .write = dm_enetc_mdio_write, +}; + +static int enetc_mdio_bind(struct udevice *dev) +{ + char name[16]; + static int eth_num_devices; + + /* + * prefer using PCI function numbers to number interfaces, but these + * are only available if dts nodes are present. For PCI they are + * optional, handle that case too. Just in case some nodes are present + * and some are not, use different naming scheme - enetc-N based on + * PCI function # and enetc#N based on interface count + */ + if (ofnode_valid(dev->node)) + sprintf(name, "emdio-%u", PCI_FUNC(pci_get_devfn(dev))); + else + sprintf(name, "emdio#%u", eth_num_devices++); + device_set_name(dev, name); + + return 0; +} + +static int enetc_mdio_probe(struct udevice *dev) +{ + struct enetc_mdio_priv *priv = dev_get_priv(dev); + + priv->regs_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, 0); + if (!priv->regs_base) { + enetc_dbg(dev, "failed to map BAR0\n"); + return -EINVAL; + } + + priv->regs_base += ENETC_MDIO_BASE; + + dm_pci_clrset_config16(dev, PCI_COMMAND, 0, PCI_COMMAND_MEMORY); + + return 0; +} + +U_BOOT_DRIVER(enetc_mdio) = { + .name = "enetc_mdio", + .id = UCLASS_MDIO, + .bind = enetc_mdio_bind, + .probe = enetc_mdio_probe, + .ops = &enetc_mdio_ops, + .priv_auto_alloc_size = sizeof(struct enetc_mdio_priv), +}; + +static struct pci_device_id enetc_mdio_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_ENETC_MDIO) }, +}; + +U_BOOT_PCI_DEVICE(enetc_mdio, enetc_mdio_ids); diff --git a/drivers/net/macb.c b/drivers/net/macb.c index a7eddd647d..c99cf663a4 100644 --- a/drivers/net/macb.c +++ b/drivers/net/macb.c @@ -45,9 +45,17 @@ DECLARE_GLOBAL_DATA_PTR; -#define MACB_RX_BUFFER_SIZE 4096 -#define MACB_RX_RING_SIZE (MACB_RX_BUFFER_SIZE / 128) +/* + * These buffer sizes must be power of 2 and divisible + * by RX_BUFFER_MULTIPLE + */ +#define MACB_RX_BUFFER_SIZE 128 +#define GEM_RX_BUFFER_SIZE 2048 +#define RX_BUFFER_MULTIPLE 64 + +#define MACB_RX_RING_SIZE 32 #define MACB_TX_RING_SIZE 16 + #define MACB_TX_TIMEOUT 1000 #define MACB_AUTONEG_TIMEOUT 5000000 @@ -77,31 +85,16 @@ struct macb_dma_desc { #define MACB_RX_DMA_DESC_SIZE (DMA_DESC_BYTES(MACB_RX_RING_SIZE)) #define MACB_TX_DUMMY_DMA_DESC_SIZE (DMA_DESC_BYTES(1)) -#define RXADDR_USED 0x00000001 -#define RXADDR_WRAP 0x00000002 - #define RXBUF_FRMLEN_MASK 0x00000fff -#define RXBUF_FRAME_START 0x00004000 -#define RXBUF_FRAME_END 0x00008000 -#define RXBUF_TYPEID_MATCH 0x00400000 -#define RXBUF_ADDR4_MATCH 0x00800000 -#define RXBUF_ADDR3_MATCH 0x01000000 -#define RXBUF_ADDR2_MATCH 0x02000000 -#define RXBUF_ADDR1_MATCH 0x04000000 -#define RXBUF_BROADCAST 0x80000000 - #define TXBUF_FRMLEN_MASK 0x000007ff -#define TXBUF_FRAME_END 0x00008000 -#define TXBUF_NOCRC 0x00010000 -#define TXBUF_EXHAUSTED 0x08000000 -#define TXBUF_UNDERRUN 0x10000000 -#define TXBUF_MAXRETRY 0x20000000 -#define TXBUF_WRAP 0x40000000 -#define TXBUF_USED 0x80000000 struct macb_device { void *regs; + bool is_big_endian; + + const struct macb_config *config; + unsigned int rx_tail; unsigned int tx_head; unsigned int tx_tail; @@ -112,6 +105,7 @@ struct macb_device { void *tx_buffer; struct macb_dma_desc *rx_ring; struct macb_dma_desc *tx_ring; + size_t rx_buffer_size; unsigned long rx_buffer_dma; unsigned long rx_ring_dma; @@ -137,6 +131,13 @@ struct macb_device { phy_interface_t phy_interface; #endif }; + +struct macb_config { + unsigned int dma_burst_length; + + int (*clk_init)(struct udevice *dev, ulong rate); +}; + #ifndef CONFIG_DM_ETH #define to_macb(_nd) container_of(_nd, struct macb_device, netdev) #endif @@ -316,9 +317,9 @@ static int _macb_send(struct macb_device *macb, const char *name, void *packet, paddr = dma_map_single(packet, length, DMA_TO_DEVICE); ctrl = length & TXBUF_FRMLEN_MASK; - ctrl |= TXBUF_FRAME_END; + ctrl |= MACB_BIT(TX_LAST); if (tx_head == (MACB_TX_RING_SIZE - 1)) { - ctrl |= TXBUF_WRAP; + ctrl |= MACB_BIT(TX_WRAP); macb->tx_head = 0; } else { macb->tx_head++; @@ -340,7 +341,7 @@ static int _macb_send(struct macb_device *macb, const char *name, void *packet, barrier(); macb_invalidate_ring_desc(macb, TX); ctrl = macb->tx_ring[tx_head].ctrl; - if (ctrl & TXBUF_USED) + if (ctrl & MACB_BIT(TX_USED)) break; udelay(1); } @@ -348,9 +349,9 @@ static int _macb_send(struct macb_device *macb, const char *name, void *packet, dma_unmap_single(packet, length, paddr); if (i <= MACB_TX_TIMEOUT) { - if (ctrl & TXBUF_UNDERRUN) + if (ctrl & MACB_BIT(TX_UNDERRUN)) printf("%s: TX underrun\n", name); - if (ctrl & TXBUF_EXHAUSTED) + if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED)) printf("%s: TX buffers exhausted in mid frame\n", name); } else { printf("%s: TX timeout\n", name); @@ -369,14 +370,14 @@ static void reclaim_rx_buffers(struct macb_device *macb, macb_invalidate_ring_desc(macb, RX); while (i > new_tail) { - macb->rx_ring[i].addr &= ~RXADDR_USED; + macb->rx_ring[i].addr &= ~MACB_BIT(RX_USED); i++; if (i > MACB_RX_RING_SIZE) i = 0; } while (i < new_tail) { - macb->rx_ring[i].addr &= ~RXADDR_USED; + macb->rx_ring[i].addr &= ~MACB_BIT(RX_USED); i++; } @@ -396,26 +397,27 @@ static int _macb_recv(struct macb_device *macb, uchar **packetp) for (;;) { macb_invalidate_ring_desc(macb, RX); - if (!(macb->rx_ring[next_rx_tail].addr & RXADDR_USED)) + if (!(macb->rx_ring[next_rx_tail].addr & MACB_BIT(RX_USED))) return -EAGAIN; status = macb->rx_ring[next_rx_tail].ctrl; - if (status & RXBUF_FRAME_START) { + if (status & MACB_BIT(RX_SOF)) { if (next_rx_tail != macb->rx_tail) reclaim_rx_buffers(macb, next_rx_tail); macb->wrapped = false; } - if (status & RXBUF_FRAME_END) { - buffer = macb->rx_buffer + 128 * macb->rx_tail; + if (status & MACB_BIT(RX_EOF)) { + buffer = macb->rx_buffer + + macb->rx_buffer_size * macb->rx_tail; length = status & RXBUF_FRMLEN_MASK; macb_invalidate_rx_buffer(macb); if (macb->wrapped) { unsigned int headlen, taillen; - headlen = 128 * (MACB_RX_RING_SIZE - - macb->rx_tail); + headlen = macb->rx_buffer_size * + (MACB_RX_RING_SIZE - macb->rx_tail); taillen = length - headlen; memcpy((void *)net_rx_packets[0], buffer, headlen); @@ -495,21 +497,38 @@ static int macb_phy_find(struct macb_device *macb, const char *name) * when operation failed. */ #ifdef CONFIG_DM_ETH +static int macb_sifive_clk_init(struct udevice *dev, ulong rate) +{ + fdt_addr_t addr; + void *gemgxl_regs; + + addr = dev_read_addr_index(dev, 1); + if (addr == FDT_ADDR_T_NONE) + return -ENODEV; + + gemgxl_regs = (void __iomem *)addr; + if (!gemgxl_regs) + return -ENODEV; + + /* + * SiFive GEMGXL TX clock operation mode: + * + * 0 = GMII mode. Use 125 MHz gemgxlclk from PRCI in TX logic + * and output clock on GMII output signal GTX_CLK + * 1 = MII mode. Use MII input signal TX_CLK in TX logic + */ + writel(rate != 125000000, gemgxl_regs); + return 0; +} + int __weak macb_linkspd_cb(struct udevice *dev, unsigned int speed) { #ifdef CONFIG_CLK + struct macb_device *macb = dev_get_priv(dev); struct clk tx_clk; ulong rate; int ret; - /* - * "tx_clk" is an optional clock source for MACB. - * Ignore if it does not exist in DT. - */ - ret = clk_get_by_name(dev, "tx_clk", &tx_clk); - if (ret) - return 0; - switch (speed) { case _10BASET: rate = 2500000; /* 2.5 MHz */ @@ -525,6 +544,17 @@ int __weak macb_linkspd_cb(struct udevice *dev, unsigned int speed) return 0; } + if (macb->config->clk_init) + return macb->config->clk_init(dev, rate); + + /* + * "tx_clk" is an optional clock source for MACB. + * Ignore if it does not exist in DT. + */ + ret = clk_get_by_name(dev, "tx_clk", &tx_clk); + if (ret) + return 0; + if (tx_clk.dev) { ret = clk_set_rate(&tx_clk, rate); if (ret) @@ -699,7 +729,7 @@ static int gmac_init_multi_queues(struct macb_device *macb) if (queue_mask & (1 << i)) num_queues++; - macb->dummy_desc->ctrl = TXBUF_USED; + macb->dummy_desc->ctrl = MACB_BIT(TX_USED); macb->dummy_desc->addr = 0; flush_dcache_range(macb->dummy_desc_dma, macb->dummy_desc_dma + ALIGN(MACB_TX_DUMMY_DMA_DESC_SIZE, PKTALIGN)); @@ -710,6 +740,31 @@ static int gmac_init_multi_queues(struct macb_device *macb) return 0; } +static void gmac_configure_dma(struct macb_device *macb) +{ + u32 buffer_size; + u32 dmacfg; + + buffer_size = macb->rx_buffer_size / RX_BUFFER_MULTIPLE; + dmacfg = gem_readl(macb, DMACFG) & ~GEM_BF(RXBS, -1L); + dmacfg |= GEM_BF(RXBS, buffer_size); + + if (macb->config->dma_burst_length) + dmacfg = GEM_BFINS(FBLDO, + macb->config->dma_burst_length, dmacfg); + + dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); + dmacfg &= ~GEM_BIT(ENDIA_PKT); + + if (macb->is_big_endian) + dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */ + else + dmacfg &= ~GEM_BIT(ENDIA_DESC); + + dmacfg &= ~GEM_BIT(ADDR64); + gem_writel(macb, DMACFG, dmacfg); +} + #ifdef CONFIG_DM_ETH static int _macb_init(struct udevice *dev, const char *name) #else @@ -732,10 +787,10 @@ static int _macb_init(struct macb_device *macb, const char *name) paddr = macb->rx_buffer_dma; for (i = 0; i < MACB_RX_RING_SIZE; i++) { if (i == (MACB_RX_RING_SIZE - 1)) - paddr |= RXADDR_WRAP; + paddr |= MACB_BIT(RX_WRAP); macb->rx_ring[i].addr = paddr; macb->rx_ring[i].ctrl = 0; - paddr += 128; + paddr += macb->rx_buffer_size; } macb_flush_ring_desc(macb, RX); macb_flush_rx_buffer(macb); @@ -743,9 +798,10 @@ static int _macb_init(struct macb_device *macb, const char *name) for (i = 0; i < MACB_TX_RING_SIZE; i++) { macb->tx_ring[i].addr = 0; if (i == (MACB_TX_RING_SIZE - 1)) - macb->tx_ring[i].ctrl = TXBUF_USED | TXBUF_WRAP; + macb->tx_ring[i].ctrl = MACB_BIT(TX_USED) | + MACB_BIT(TX_WRAP); else - macb->tx_ring[i].ctrl = TXBUF_USED; + macb->tx_ring[i].ctrl = MACB_BIT(TX_USED); } macb_flush_ring_desc(macb, TX); @@ -762,6 +818,8 @@ static int _macb_init(struct macb_device *macb, const char *name) macb_writel(macb, TBQP, macb->tx_ring_dma); if (macb_is_gem(macb)) { + /* Initialize DMA properties */ + gmac_configure_dma(macb); /* Check the multi queue and initialize the queue for tx */ gmac_init_multi_queues(macb); @@ -774,14 +832,21 @@ static int _macb_init(struct macb_device *macb, const char *name) #ifdef CONFIG_DM_ETH if ((macb->phy_interface == PHY_INTERFACE_MODE_RMII) || (macb->phy_interface == PHY_INTERFACE_MODE_RGMII)) - gem_writel(macb, UR, GEM_BIT(RGMII)); + gem_writel(macb, USRIO, GEM_BIT(RGMII)); else - gem_writel(macb, UR, 0); + gem_writel(macb, USRIO, 0); + + if (macb->phy_interface == PHY_INTERFACE_MODE_SGMII) { + unsigned int ncfgr = macb_readl(macb, NCFGR); + + ncfgr |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); + macb_writel(macb, NCFGR, ncfgr); + } #else #if defined(CONFIG_RGMII) || defined(CONFIG_RMII) - gem_writel(macb, UR, GEM_BIT(RGMII)); + gem_writel(macb, USRIO, GEM_BIT(RGMII)); #else - gem_writel(macb, UR, 0); + gem_writel(macb, USRIO, 0); #endif #endif } else { @@ -903,8 +968,12 @@ static u32 gem_mdc_clk_div(int id, struct macb_device *macb) config = GEM_BF(CLK, GEM_CLK_DIV48); else if (macb_hz < 160000000) config = GEM_BF(CLK, GEM_CLK_DIV64); - else + else if (macb_hz < 240000000) config = GEM_BF(CLK, GEM_CLK_DIV96); + else if (macb_hz < 320000000) + config = GEM_BF(CLK, GEM_CLK_DIV128); + else + config = GEM_BF(CLK, GEM_CLK_DIV224); return config; } @@ -932,8 +1001,14 @@ static void _macb_eth_initialize(struct macb_device *macb) int id = 0; /* This is not used by functions we call */ u32 ncfgr; + if (macb_is_gem(macb)) + macb->rx_buffer_size = GEM_RX_BUFFER_SIZE; + else + macb->rx_buffer_size = MACB_RX_BUFFER_SIZE; + /* TODO: we need check the rx/tx_ring_dma is dcache line aligned */ - macb->rx_buffer = dma_alloc_coherent(MACB_RX_BUFFER_SIZE, + macb->rx_buffer = dma_alloc_coherent(macb->rx_buffer_size * + MACB_RX_RING_SIZE, &macb->rx_buffer_dma); macb->rx_ring = dma_alloc_coherent(MACB_RX_DMA_DESC_SIZE, &macb->rx_ring_dma); @@ -1142,12 +1217,17 @@ static int macb_enable_clk(struct udevice *dev) } #endif +static const struct macb_config default_gem_config = { + .dma_burst_length = 16, + .clk_init = NULL, +}; + static int macb_eth_probe(struct udevice *dev) { struct eth_pdata *pdata = dev_get_platdata(dev); struct macb_device *macb = dev_get_priv(dev); const char *phy_mode; - __maybe_unused int ret; + int ret; phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode", NULL); @@ -1160,6 +1240,12 @@ static int macb_eth_probe(struct udevice *dev) macb->regs = (void *)pdata->iobase; + macb->is_big_endian = (cpu_to_be32(0x12345678) == 0x12345678); + + macb->config = (struct macb_config *)dev_get_driver_data(dev); + if (!macb->config) + macb->config = &default_gem_config; + #ifdef CONFIG_CLK ret = macb_enable_clk(dev); if (ret) @@ -1220,13 +1306,25 @@ static int macb_eth_ofdata_to_platdata(struct udevice *dev) return macb_late_eth_ofdata_to_platdata(dev); } +static const struct macb_config sama5d4_config = { + .dma_burst_length = 4, + .clk_init = NULL, +}; + +static const struct macb_config sifive_config = { + .dma_burst_length = 16, + .clk_init = macb_sifive_clk_init, +}; + static const struct udevice_id macb_eth_ids[] = { { .compatible = "cdns,macb" }, { .compatible = "cdns,at91sam9260-macb" }, { .compatible = "atmel,sama5d2-gem" }, { .compatible = "atmel,sama5d3-gem" }, - { .compatible = "atmel,sama5d4-gem" }, + { .compatible = "atmel,sama5d4-gem", .data = (ulong)&sama5d4_config }, { .compatible = "cdns,zynq-gem" }, + { .compatible = "sifive,fu540-c000-gem", + .data = (ulong)&sifive_config }, { } }; diff --git a/drivers/net/macb.h b/drivers/net/macb.h index 3cc27f8560..9b16383eba 100644 --- a/drivers/net/macb.h +++ b/drivers/net/macb.h @@ -5,221 +5,410 @@ #ifndef __DRIVERS_MACB_H__ #define __DRIVERS_MACB_H__ +#define MACB_GREGS_NBR 16 +#define MACB_GREGS_VERSION 2 +#define MACB_MAX_QUEUES 8 + /* MACB register offsets */ -#define MACB_NCR 0x0000 -#define MACB_NCFGR 0x0004 -#define MACB_NSR 0x0008 -#define GEM_UR 0x000c -#define MACB_DMACFG 0x0010 -#define MACB_TSR 0x0014 -#define MACB_RBQP 0x0018 -#define MACB_TBQP 0x001c -#define MACB_RSR 0x0020 -#define MACB_ISR 0x0024 -#define MACB_IER 0x0028 -#define MACB_IDR 0x002c -#define MACB_IMR 0x0030 -#define MACB_MAN 0x0034 -#define MACB_PTR 0x0038 -#define MACB_PFR 0x003c -#define MACB_FTO 0x0040 -#define MACB_SCF 0x0044 -#define MACB_MCF 0x0048 -#define MACB_FRO 0x004c -#define MACB_FCSE 0x0050 -#define MACB_ALE 0x0054 -#define MACB_DTF 0x0058 -#define MACB_LCOL 0x005c -#define MACB_EXCOL 0x0060 -#define MACB_TUND 0x0064 -#define MACB_CSE 0x0068 -#define MACB_RRE 0x006c -#define MACB_ROVR 0x0070 -#define MACB_RSE 0x0074 -#define MACB_ELE 0x0078 -#define MACB_RJA 0x007c -#define MACB_USF 0x0080 -#define MACB_STE 0x0084 -#define MACB_RLE 0x0088 -#define MACB_TPF 0x008c -#define MACB_HRB 0x0090 -#define MACB_HRT 0x0094 -#define MACB_SA1B 0x0098 -#define MACB_SA1T 0x009c -#define MACB_SA2B 0x00a0 -#define MACB_SA2T 0x00a4 -#define MACB_SA3B 0x00a8 -#define MACB_SA3T 0x00ac -#define MACB_SA4B 0x00b0 -#define MACB_SA4T 0x00b4 -#define MACB_TID 0x00b8 -#define MACB_TPQ 0x00bc -#define MACB_USRIO 0x00c0 -#define MACB_WOL 0x00c4 -#define MACB_MID 0x00fc - -/* GEM specific register offsets */ -#define GEM_DCFG1 0x0280 -#define GEM_DCFG6 0x0294 - -#define MACB_MAX_QUEUES 8 - -/* GEM specific multi queues register offset */ -/* hw_q can be 0~7 */ -#define GEM_TBQP(hw_q) (0x0440 + ((hw_q) << 2)) +#define MACB_NCR 0x0000 /* Network Control */ +#define MACB_NCFGR 0x0004 /* Network Config */ +#define MACB_NSR 0x0008 /* Network Status */ +#define MACB_TAR 0x000c /* AT91RM9200 only */ +#define MACB_TCR 0x0010 /* AT91RM9200 only */ +#define MACB_TSR 0x0014 /* Transmit Status */ +#define MACB_RBQP 0x0018 /* RX Q Base Address */ +#define MACB_TBQP 0x001c /* TX Q Base Address */ +#define MACB_RSR 0x0020 /* Receive Status */ +#define MACB_ISR 0x0024 /* Interrupt Status */ +#define MACB_IER 0x0028 /* Interrupt Enable */ +#define MACB_IDR 0x002c /* Interrupt Disable */ +#define MACB_IMR 0x0030 /* Interrupt Mask */ +#define MACB_MAN 0x0034 /* PHY Maintenance */ +#define MACB_PTR 0x0038 +#define MACB_PFR 0x003c +#define MACB_FTO 0x0040 +#define MACB_SCF 0x0044 +#define MACB_MCF 0x0048 +#define MACB_FRO 0x004c +#define MACB_FCSE 0x0050 +#define MACB_ALE 0x0054 +#define MACB_DTF 0x0058 +#define MACB_LCOL 0x005c +#define MACB_EXCOL 0x0060 +#define MACB_TUND 0x0064 +#define MACB_CSE 0x0068 +#define MACB_RRE 0x006c +#define MACB_ROVR 0x0070 +#define MACB_RSE 0x0074 +#define MACB_ELE 0x0078 +#define MACB_RJA 0x007c +#define MACB_USF 0x0080 +#define MACB_STE 0x0084 +#define MACB_RLE 0x0088 +#define MACB_TPF 0x008c +#define MACB_HRB 0x0090 +#define MACB_HRT 0x0094 +#define MACB_SA1B 0x0098 +#define MACB_SA1T 0x009c +#define MACB_SA2B 0x00a0 +#define MACB_SA2T 0x00a4 +#define MACB_SA3B 0x00a8 +#define MACB_SA3T 0x00ac +#define MACB_SA4B 0x00b0 +#define MACB_SA4T 0x00b4 +#define MACB_TID 0x00b8 +#define MACB_TPQ 0x00bc +#define MACB_USRIO 0x00c0 +#define MACB_WOL 0x00c4 +#define MACB_MID 0x00fc +#define MACB_TBQPH 0x04C8 +#define MACB_RBQPH 0x04D4 + +/* GEM register offsets. */ +#define GEM_NCFGR 0x0004 /* Network Config */ +#define GEM_USRIO 0x000c /* User IO */ +#define GEM_DMACFG 0x0010 /* DMA Configuration */ +#define GEM_JML 0x0048 /* Jumbo Max Length */ +#define GEM_HRB 0x0080 /* Hash Bottom */ +#define GEM_HRT 0x0084 /* Hash Top */ +#define GEM_SA1B 0x0088 /* Specific1 Bottom */ +#define GEM_SA1T 0x008C /* Specific1 Top */ +#define GEM_SA2B 0x0090 /* Specific2 Bottom */ +#define GEM_SA2T 0x0094 /* Specific2 Top */ +#define GEM_SA3B 0x0098 /* Specific3 Bottom */ +#define GEM_SA3T 0x009C /* Specific3 Top */ +#define GEM_SA4B 0x00A0 /* Specific4 Bottom */ +#define GEM_SA4T 0x00A4 /* Specific4 Top */ +#define GEM_EFTSH 0x00e8 /* PTP Event Frame Transmitted Seconds Register 47:32 */ +#define GEM_EFRSH 0x00ec /* PTP Event Frame Received Seconds Register 47:32 */ +#define GEM_PEFTSH 0x00f0 /* PTP Peer Event Frame Transmitted Seconds Register 47:32 */ +#define GEM_PEFRSH 0x00f4 /* PTP Peer Event Frame Received Seconds Register 47:32 */ +#define GEM_OTX 0x0100 /* Octets transmitted */ +#define GEM_OCTTXL 0x0100 /* Octets transmitted [31:0] */ +#define GEM_OCTTXH 0x0104 /* Octets transmitted [47:32] */ +#define GEM_TXCNT 0x0108 /* Frames Transmitted counter */ +#define GEM_TXBCCNT 0x010c /* Broadcast Frames counter */ +#define GEM_TXMCCNT 0x0110 /* Multicast Frames counter */ +#define GEM_TXPAUSECNT 0x0114 /* Pause Frames Transmitted Counter */ +#define GEM_TX64CNT 0x0118 /* 64 byte Frames TX counter */ +#define GEM_TX65CNT 0x011c /* 65-127 byte Frames TX counter */ +#define GEM_TX128CNT 0x0120 /* 128-255 byte Frames TX counter */ +#define GEM_TX256CNT 0x0124 /* 256-511 byte Frames TX counter */ +#define GEM_TX512CNT 0x0128 /* 512-1023 byte Frames TX counter */ +#define GEM_TX1024CNT 0x012c /* 1024-1518 byte Frames TX counter */ +#define GEM_TX1519CNT 0x0130 /* 1519+ byte Frames TX counter */ +#define GEM_TXURUNCNT 0x0134 /* TX under run error counter */ +#define GEM_SNGLCOLLCNT 0x0138 /* Single Collision Frame Counter */ +#define GEM_MULTICOLLCNT 0x013c /* Multiple Collision Frame Counter */ +#define GEM_EXCESSCOLLCNT 0x0140 /* Excessive Collision Frame Counter */ +#define GEM_LATECOLLCNT 0x0144 /* Late Collision Frame Counter */ +#define GEM_TXDEFERCNT 0x0148 /* Deferred Transmission Frame Counter */ +#define GEM_TXCSENSECNT 0x014c /* Carrier Sense Error Counter */ +#define GEM_ORX 0x0150 /* Octets received */ +#define GEM_OCTRXL 0x0150 /* Octets received [31:0] */ +#define GEM_OCTRXH 0x0154 /* Octets received [47:32] */ +#define GEM_RXCNT 0x0158 /* Frames Received Counter */ +#define GEM_RXBROADCNT 0x015c /* Broadcast Frames Received Counter */ +#define GEM_RXMULTICNT 0x0160 /* Multicast Frames Received Counter */ +#define GEM_RXPAUSECNT 0x0164 /* Pause Frames Received Counter */ +#define GEM_RX64CNT 0x0168 /* 64 byte Frames RX Counter */ +#define GEM_RX65CNT 0x016c /* 65-127 byte Frames RX Counter */ +#define GEM_RX128CNT 0x0170 /* 128-255 byte Frames RX Counter */ +#define GEM_RX256CNT 0x0174 /* 256-511 byte Frames RX Counter */ +#define GEM_RX512CNT 0x0178 /* 512-1023 byte Frames RX Counter */ +#define GEM_RX1024CNT 0x017c /* 1024-1518 byte Frames RX Counter */ +#define GEM_RX1519CNT 0x0180 /* 1519+ byte Frames RX Counter */ +#define GEM_RXUNDRCNT 0x0184 /* Undersize Frames Received Counter */ +#define GEM_RXOVRCNT 0x0188 /* Oversize Frames Received Counter */ +#define GEM_RXJABCNT 0x018c /* Jabbers Received Counter */ +#define GEM_RXFCSCNT 0x0190 /* Frame Check Sequence Error Counter */ +#define GEM_RXLENGTHCNT 0x0194 /* Length Field Error Counter */ +#define GEM_RXSYMBCNT 0x0198 /* Symbol Error Counter */ +#define GEM_RXALIGNCNT 0x019c /* Alignment Error Counter */ +#define GEM_RXRESERRCNT 0x01a0 /* Receive Resource Error Counter */ +#define GEM_RXORCNT 0x01a4 /* Receive Overrun Counter */ +#define GEM_RXIPCCNT 0x01a8 /* IP header Checksum Error Counter */ +#define GEM_RXTCPCCNT 0x01ac /* TCP Checksum Error Counter */ +#define GEM_RXUDPCCNT 0x01b0 /* UDP Checksum Error Counter */ +#define GEM_TISUBN 0x01bc /* 1588 Timer Increment Sub-ns */ +#define GEM_TSH 0x01c0 /* 1588 Timer Seconds High */ +#define GEM_TSL 0x01d0 /* 1588 Timer Seconds Low */ +#define GEM_TN 0x01d4 /* 1588 Timer Nanoseconds */ +#define GEM_TA 0x01d8 /* 1588 Timer Adjust */ +#define GEM_TI 0x01dc /* 1588 Timer Increment */ +#define GEM_EFTSL 0x01e0 /* PTP Event Frame Tx Seconds Low */ +#define GEM_EFTN 0x01e4 /* PTP Event Frame Tx Nanoseconds */ +#define GEM_EFRSL 0x01e8 /* PTP Event Frame Rx Seconds Low */ +#define GEM_EFRN 0x01ec /* PTP Event Frame Rx Nanoseconds */ +#define GEM_PEFTSL 0x01f0 /* PTP Peer Event Frame Tx Secs Low */ +#define GEM_PEFTN 0x01f4 /* PTP Peer Event Frame Tx Ns */ +#define GEM_PEFRSL 0x01f8 /* PTP Peer Event Frame Rx Sec Low */ +#define GEM_PEFRN 0x01fc /* PTP Peer Event Frame Rx Ns */ +#define GEM_DCFG1 0x0280 /* Design Config 1 */ +#define GEM_DCFG2 0x0284 /* Design Config 2 */ +#define GEM_DCFG3 0x0288 /* Design Config 3 */ +#define GEM_DCFG4 0x028c /* Design Config 4 */ +#define GEM_DCFG5 0x0290 /* Design Config 5 */ +#define GEM_DCFG6 0x0294 /* Design Config 6 */ +#define GEM_DCFG7 0x0298 /* Design Config 7 */ +#define GEM_DCFG8 0x029C /* Design Config 8 */ +#define GEM_DCFG10 0x02A4 /* Design Config 10 */ + +#define GEM_TXBDCTRL 0x04cc /* TX Buffer Descriptor control register */ +#define GEM_RXBDCTRL 0x04d0 /* RX Buffer Descriptor control register */ + +/* Screener Type 2 match registers */ +#define GEM_SCRT2 0x540 + +/* EtherType registers */ +#define GEM_ETHT 0x06E0 + +/* Type 2 compare registers */ +#define GEM_T2CMPW0 0x0700 +#define GEM_T2CMPW1 0x0704 +#define T2CMP_OFST(t2idx) (t2idx * 2) + +/* type 2 compare registers + * each location requires 3 compare regs + */ +#define GEM_IP4SRC_CMP(idx) (idx * 3) +#define GEM_IP4DST_CMP(idx) (idx * 3 + 1) +#define GEM_PORT_CMP(idx) (idx * 3 + 2) + +/* Which screening type 2 EtherType register will be used (0 - 7) */ +#define SCRT2_ETHT 0 + +#define GEM_ISR(hw_q) (0x0400 + ((hw_q) << 2)) +#define GEM_TBQP(hw_q) (0x0440 + ((hw_q) << 2)) +#define GEM_TBQPH(hw_q) (0x04C8) +#define GEM_RBQP(hw_q) (0x0480 + ((hw_q) << 2)) +#define GEM_RBQS(hw_q) (0x04A0 + ((hw_q) << 2)) +#define GEM_RBQPH(hw_q) (0x04D4) +#define GEM_IER(hw_q) (0x0600 + ((hw_q) << 2)) +#define GEM_IDR(hw_q) (0x0620 + ((hw_q) << 2)) +#define GEM_IMR(hw_q) (0x0640 + ((hw_q) << 2)) /* Bitfields in NCR */ -#define MACB_LB_OFFSET 0 -#define MACB_LB_SIZE 1 -#define MACB_LLB_OFFSET 1 -#define MACB_LLB_SIZE 1 -#define MACB_RE_OFFSET 2 -#define MACB_RE_SIZE 1 -#define MACB_TE_OFFSET 3 -#define MACB_TE_SIZE 1 -#define MACB_MPE_OFFSET 4 -#define MACB_MPE_SIZE 1 -#define MACB_CLRSTAT_OFFSET 5 -#define MACB_CLRSTAT_SIZE 1 -#define MACB_INCSTAT_OFFSET 6 -#define MACB_INCSTAT_SIZE 1 -#define MACB_WESTAT_OFFSET 7 -#define MACB_WESTAT_SIZE 1 -#define MACB_BP_OFFSET 8 -#define MACB_BP_SIZE 1 -#define MACB_TSTART_OFFSET 9 -#define MACB_TSTART_SIZE 1 -#define MACB_THALT_OFFSET 10 -#define MACB_THALT_SIZE 1 -#define MACB_NCR_TPF_OFFSET 11 -#define MACB_NCR_TPF_SIZE 1 -#define MACB_TZQ_OFFSET 12 -#define MACB_TZQ_SIZE 1 +#define MACB_LB_OFFSET 0 /* reserved */ +#define MACB_LB_SIZE 1 +#define MACB_LLB_OFFSET 1 /* Loop back local */ +#define MACB_LLB_SIZE 1 +#define MACB_RE_OFFSET 2 /* Receive enable */ +#define MACB_RE_SIZE 1 +#define MACB_TE_OFFSET 3 /* Transmit enable */ +#define MACB_TE_SIZE 1 +#define MACB_MPE_OFFSET 4 /* Management port enable */ +#define MACB_MPE_SIZE 1 +#define MACB_CLRSTAT_OFFSET 5 /* Clear stats regs */ +#define MACB_CLRSTAT_SIZE 1 +#define MACB_INCSTAT_OFFSET 6 /* Incremental stats regs */ +#define MACB_INCSTAT_SIZE 1 +#define MACB_WESTAT_OFFSET 7 /* Write enable stats regs */ +#define MACB_WESTAT_SIZE 1 +#define MACB_BP_OFFSET 8 /* Back pressure */ +#define MACB_BP_SIZE 1 +#define MACB_TSTART_OFFSET 9 /* Start transmission */ +#define MACB_TSTART_SIZE 1 +#define MACB_THALT_OFFSET 10 /* Transmit halt */ +#define MACB_THALT_SIZE 1 +#define MACB_NCR_TPF_OFFSET 11 /* Transmit pause frame */ +#define MACB_NCR_TPF_SIZE 1 +#define MACB_TZQ_OFFSET 12 /* Transmit zero quantum pause frame */ +#define MACB_TZQ_SIZE 1 +#define MACB_SRTSM_OFFSET 15 +#define MACB_OSSMODE_OFFSET 24 /* Enable One Step Synchro Mode */ +#define MACB_OSSMODE_SIZE 1 /* Bitfields in NCFGR */ -#define MACB_SPD_OFFSET 0 -#define MACB_SPD_SIZE 1 -#define MACB_FD_OFFSET 1 -#define MACB_FD_SIZE 1 -#define MACB_BIT_RATE_OFFSET 2 -#define MACB_BIT_RATE_SIZE 1 -#define MACB_JFRAME_OFFSET 3 -#define MACB_JFRAME_SIZE 1 -#define MACB_CAF_OFFSET 4 -#define MACB_CAF_SIZE 1 -#define MACB_NBC_OFFSET 5 -#define MACB_NBC_SIZE 1 -#define MACB_NCFGR_MTI_OFFSET 6 -#define MACB_NCFGR_MTI_SIZE 1 -#define MACB_UNI_OFFSET 7 -#define MACB_UNI_SIZE 1 -#define MACB_BIG_OFFSET 8 -#define MACB_BIG_SIZE 1 -#define MACB_EAE_OFFSET 9 -#define MACB_EAE_SIZE 1 -#define MACB_CLK_OFFSET 10 -#define MACB_CLK_SIZE 2 -#define MACB_RTY_OFFSET 12 -#define MACB_RTY_SIZE 1 -#define MACB_PAE_OFFSET 13 -#define MACB_PAE_SIZE 1 -#define MACB_RBOF_OFFSET 14 -#define MACB_RBOF_SIZE 2 -#define MACB_RLCE_OFFSET 16 -#define MACB_RLCE_SIZE 1 -#define MACB_DRFCS_OFFSET 17 -#define MACB_DRFCS_SIZE 1 -#define MACB_EFRHD_OFFSET 18 -#define MACB_EFRHD_SIZE 1 -#define MACB_IRXFCS_OFFSET 19 -#define MACB_IRXFCS_SIZE 1 - -#define GEM_GBE_OFFSET 10 -#define GEM_GBE_SIZE 1 -#define GEM_CLK_OFFSET 18 -#define GEM_CLK_SIZE 3 -#define GEM_DBW_OFFSET 21 -#define GEM_DBW_SIZE 2 +#define MACB_SPD_OFFSET 0 /* Speed */ +#define MACB_SPD_SIZE 1 +#define MACB_FD_OFFSET 1 /* Full duplex */ +#define MACB_FD_SIZE 1 +#define MACB_BIT_RATE_OFFSET 2 /* Discard non-VLAN frames */ +#define MACB_BIT_RATE_SIZE 1 +#define MACB_JFRAME_OFFSET 3 /* reserved */ +#define MACB_JFRAME_SIZE 1 +#define MACB_CAF_OFFSET 4 /* Copy all frames */ +#define MACB_CAF_SIZE 1 +#define MACB_NBC_OFFSET 5 /* No broadcast */ +#define MACB_NBC_SIZE 1 +#define MACB_NCFGR_MTI_OFFSET 6 /* Multicast hash enable */ +#define MACB_NCFGR_MTI_SIZE 1 +#define MACB_UNI_OFFSET 7 /* Unicast hash enable */ +#define MACB_UNI_SIZE 1 +#define MACB_BIG_OFFSET 8 /* Receive 1536 byte frames */ +#define MACB_BIG_SIZE 1 +#define MACB_EAE_OFFSET 9 /* External address match enable */ +#define MACB_EAE_SIZE 1 +#define MACB_CLK_OFFSET 10 +#define MACB_CLK_SIZE 2 +#define MACB_RTY_OFFSET 12 /* Retry test */ +#define MACB_RTY_SIZE 1 +#define MACB_PAE_OFFSET 13 /* Pause enable */ +#define MACB_PAE_SIZE 1 +#define MACB_RM9200_RMII_OFFSET 13 /* AT91RM9200 only */ +#define MACB_RM9200_RMII_SIZE 1 /* AT91RM9200 only */ +#define MACB_RBOF_OFFSET 14 /* Receive buffer offset */ +#define MACB_RBOF_SIZE 2 +#define MACB_RLCE_OFFSET 16 /* Length field error frame discard */ +#define MACB_RLCE_SIZE 1 +#define MACB_DRFCS_OFFSET 17 /* FCS remove */ +#define MACB_DRFCS_SIZE 1 +#define MACB_EFRHD_OFFSET 18 +#define MACB_EFRHD_SIZE 1 +#define MACB_IRXFCS_OFFSET 19 +#define MACB_IRXFCS_SIZE 1 + +/* GEM specific NCFGR bitfields. */ +#define GEM_GBE_OFFSET 10 /* Gigabit mode enable */ +#define GEM_GBE_SIZE 1 +#define GEM_PCSSEL_OFFSET 11 +#define GEM_PCSSEL_SIZE 1 +#define GEM_CLK_OFFSET 18 /* MDC clock division */ +#define GEM_CLK_SIZE 3 +#define GEM_DBW_OFFSET 21 /* Data bus width */ +#define GEM_DBW_SIZE 2 +#define GEM_RXCOEN_OFFSET 24 +#define GEM_RXCOEN_SIZE 1 +#define GEM_SGMIIEN_OFFSET 27 +#define GEM_SGMIIEN_SIZE 1 + + +/* Constants for data bus width. */ +#define GEM_DBW32 0 /* 32 bit AMBA AHB data bus width */ +#define GEM_DBW64 1 /* 64 bit AMBA AHB data bus width */ +#define GEM_DBW128 2 /* 128 bit AMBA AHB data bus width */ + +/* Bitfields in DMACFG. */ +#define GEM_FBLDO_OFFSET 0 /* fixed burst length for DMA */ +#define GEM_FBLDO_SIZE 5 +#define GEM_ENDIA_DESC_OFFSET 6 /* endian swap mode for management descriptor access */ +#define GEM_ENDIA_DESC_SIZE 1 +#define GEM_ENDIA_PKT_OFFSET 7 /* endian swap mode for packet data access */ +#define GEM_ENDIA_PKT_SIZE 1 +#define GEM_RXBMS_OFFSET 8 /* RX packet buffer memory size select */ +#define GEM_RXBMS_SIZE 2 +#define GEM_TXPBMS_OFFSET 10 /* TX packet buffer memory size select */ +#define GEM_TXPBMS_SIZE 1 +#define GEM_TXCOEN_OFFSET 11 /* TX IP/TCP/UDP checksum gen offload */ +#define GEM_TXCOEN_SIZE 1 +#define GEM_RXBS_OFFSET 16 /* DMA receive buffer size */ +#define GEM_RXBS_SIZE 8 +#define GEM_DDRP_OFFSET 24 /* disc_when_no_ahb */ +#define GEM_DDRP_SIZE 1 +#define GEM_RXEXT_OFFSET 28 /* RX extended Buffer Descriptor mode */ +#define GEM_RXEXT_SIZE 1 +#define GEM_TXEXT_OFFSET 29 /* TX extended Buffer Descriptor mode */ +#define GEM_TXEXT_SIZE 1 +#define GEM_ADDR64_OFFSET 30 /* Address bus width - 64b or 32b */ +#define GEM_ADDR64_SIZE 1 + /* Bitfields in NSR */ -#define MACB_NSR_LINK_OFFSET 0 -#define MACB_NSR_LINK_SIZE 1 -#define MACB_MDIO_OFFSET 1 -#define MACB_MDIO_SIZE 1 -#define MACB_IDLE_OFFSET 2 -#define MACB_IDLE_SIZE 1 - -/* Bitfields in UR */ -#define GEM_RGMII_OFFSET 0 -#define GEM_RGMII_SIZE 1 +#define MACB_NSR_LINK_OFFSET 0 /* pcs_link_state */ +#define MACB_NSR_LINK_SIZE 1 +#define MACB_MDIO_OFFSET 1 /* status of the mdio_in pin */ +#define MACB_MDIO_SIZE 1 +#define MACB_IDLE_OFFSET 2 /* The PHY management logic is idle */ +#define MACB_IDLE_SIZE 1 /* Bitfields in TSR */ -#define MACB_UBR_OFFSET 0 -#define MACB_UBR_SIZE 1 -#define MACB_COL_OFFSET 1 -#define MACB_COL_SIZE 1 -#define MACB_TSR_RLE_OFFSET 2 -#define MACB_TSR_RLE_SIZE 1 -#define MACB_TGO_OFFSET 3 -#define MACB_TGO_SIZE 1 -#define MACB_BEX_OFFSET 4 -#define MACB_BEX_SIZE 1 -#define MACB_COMP_OFFSET 5 -#define MACB_COMP_SIZE 1 -#define MACB_UND_OFFSET 6 -#define MACB_UND_SIZE 1 +#define MACB_UBR_OFFSET 0 /* Used bit read */ +#define MACB_UBR_SIZE 1 +#define MACB_COL_OFFSET 1 /* Collision occurred */ +#define MACB_COL_SIZE 1 +#define MACB_TSR_RLE_OFFSET 2 /* Retry limit exceeded */ +#define MACB_TSR_RLE_SIZE 1 +#define MACB_TGO_OFFSET 3 /* Transmit go */ +#define MACB_TGO_SIZE 1 +#define MACB_BEX_OFFSET 4 /* TX frame corruption due to AHB error */ +#define MACB_BEX_SIZE 1 +#define MACB_RM9200_BNQ_OFFSET 4 /* AT91RM9200 only */ +#define MACB_RM9200_BNQ_SIZE 1 /* AT91RM9200 only */ +#define MACB_COMP_OFFSET 5 /* Trnasmit complete */ +#define MACB_COMP_SIZE 1 +#define MACB_UND_OFFSET 6 /* Trnasmit under run */ +#define MACB_UND_SIZE 1 /* Bitfields in RSR */ -#define MACB_BNA_OFFSET 0 -#define MACB_BNA_SIZE 1 -#define MACB_REC_OFFSET 1 -#define MACB_REC_SIZE 1 -#define MACB_OVR_OFFSET 2 -#define MACB_OVR_SIZE 1 +#define MACB_BNA_OFFSET 0 /* Buffer not available */ +#define MACB_BNA_SIZE 1 +#define MACB_REC_OFFSET 1 /* Frame received */ +#define MACB_REC_SIZE 1 +#define MACB_OVR_OFFSET 2 /* Receive overrun */ +#define MACB_OVR_SIZE 1 /* Bitfields in ISR/IER/IDR/IMR */ -#define MACB_MFD_OFFSET 0 -#define MACB_MFD_SIZE 1 -#define MACB_RCOMP_OFFSET 1 -#define MACB_RCOMP_SIZE 1 -#define MACB_RXUBR_OFFSET 2 -#define MACB_RXUBR_SIZE 1 -#define MACB_TXUBR_OFFSET 3 -#define MACB_TXUBR_SIZE 1 -#define MACB_ISR_TUND_OFFSET 4 -#define MACB_ISR_TUND_SIZE 1 -#define MACB_ISR_RLE_OFFSET 5 -#define MACB_ISR_RLE_SIZE 1 -#define MACB_TXERR_OFFSET 6 -#define MACB_TXERR_SIZE 1 -#define MACB_TCOMP_OFFSET 7 -#define MACB_TCOMP_SIZE 1 -#define MACB_ISR_LINK_OFFSET 9 -#define MACB_ISR_LINK_SIZE 1 -#define MACB_ISR_ROVR_OFFSET 10 -#define MACB_ISR_ROVR_SIZE 1 -#define MACB_HRESP_OFFSET 11 -#define MACB_HRESP_SIZE 1 -#define MACB_PFR_OFFSET 12 -#define MACB_PFR_SIZE 1 -#define MACB_PTZ_OFFSET 13 -#define MACB_PTZ_SIZE 1 +#define MACB_MFD_OFFSET 0 /* Management frame sent */ +#define MACB_MFD_SIZE 1 +#define MACB_RCOMP_OFFSET 1 /* Receive complete */ +#define MACB_RCOMP_SIZE 1 +#define MACB_RXUBR_OFFSET 2 /* RX used bit read */ +#define MACB_RXUBR_SIZE 1 +#define MACB_TXUBR_OFFSET 3 /* TX used bit read */ +#define MACB_TXUBR_SIZE 1 +#define MACB_ISR_TUND_OFFSET 4 /* Enable TX buffer under run interrupt */ +#define MACB_ISR_TUND_SIZE 1 +#define MACB_ISR_RLE_OFFSET 5 /* EN retry exceeded/late coll interrupt */ +#define MACB_ISR_RLE_SIZE 1 +#define MACB_TXERR_OFFSET 6 /* EN TX frame corrupt from error interrupt */ +#define MACB_TXERR_SIZE 1 +#define MACB_TCOMP_OFFSET 7 /* Enable transmit complete interrupt */ +#define MACB_TCOMP_SIZE 1 +#define MACB_ISR_LINK_OFFSET 9 /* Enable link change interrupt */ +#define MACB_ISR_LINK_SIZE 1 +#define MACB_ISR_ROVR_OFFSET 10 /* Enable receive overrun interrupt */ +#define MACB_ISR_ROVR_SIZE 1 +#define MACB_HRESP_OFFSET 11 /* Enable hrsep not OK interrupt */ +#define MACB_HRESP_SIZE 1 +#define MACB_PFR_OFFSET 12 /* Enable pause frame w/ quantum interrupt */ +#define MACB_PFR_SIZE 1 +#define MACB_PTZ_OFFSET 13 /* Enable pause time zero interrupt */ +#define MACB_PTZ_SIZE 1 +#define MACB_WOL_OFFSET 14 /* Enable wake-on-lan interrupt */ +#define MACB_WOL_SIZE 1 +#define MACB_DRQFR_OFFSET 18 /* PTP Delay Request Frame Received */ +#define MACB_DRQFR_SIZE 1 +#define MACB_SFR_OFFSET 19 /* PTP Sync Frame Received */ +#define MACB_SFR_SIZE 1 +#define MACB_DRQFT_OFFSET 20 /* PTP Delay Request Frame Transmitted */ +#define MACB_DRQFT_SIZE 1 +#define MACB_SFT_OFFSET 21 /* PTP Sync Frame Transmitted */ +#define MACB_SFT_SIZE 1 +#define MACB_PDRQFR_OFFSET 22 /* PDelay Request Frame Received */ +#define MACB_PDRQFR_SIZE 1 +#define MACB_PDRSFR_OFFSET 23 /* PDelay Response Frame Received */ +#define MACB_PDRSFR_SIZE 1 +#define MACB_PDRQFT_OFFSET 24 /* PDelay Request Frame Transmitted */ +#define MACB_PDRQFT_SIZE 1 +#define MACB_PDRSFT_OFFSET 25 /* PDelay Response Frame Transmitted */ +#define MACB_PDRSFT_SIZE 1 +#define MACB_SRI_OFFSET 26 /* TSU Seconds Register Increment */ +#define MACB_SRI_SIZE 1 + +/* Timer increment fields */ +#define MACB_TI_CNS_OFFSET 0 +#define MACB_TI_CNS_SIZE 8 +#define MACB_TI_ACNS_OFFSET 8 +#define MACB_TI_ACNS_SIZE 8 +#define MACB_TI_NIT_OFFSET 16 +#define MACB_TI_NIT_SIZE 8 /* Bitfields in MAN */ -#define MACB_DATA_OFFSET 0 -#define MACB_DATA_SIZE 16 -#define MACB_CODE_OFFSET 16 -#define MACB_CODE_SIZE 2 -#define MACB_REGA_OFFSET 18 -#define MACB_REGA_SIZE 5 -#define MACB_PHYA_OFFSET 23 -#define MACB_PHYA_SIZE 5 -#define MACB_RW_OFFSET 28 -#define MACB_RW_SIZE 2 -#define MACB_SOF_OFFSET 30 -#define MACB_SOF_SIZE 2 - -/* Bitfields in USRIO */ +#define MACB_DATA_OFFSET 0 /* data */ +#define MACB_DATA_SIZE 16 +#define MACB_CODE_OFFSET 16 /* Must be written to 10 */ +#define MACB_CODE_SIZE 2 +#define MACB_REGA_OFFSET 18 /* Register address */ +#define MACB_REGA_SIZE 5 +#define MACB_PHYA_OFFSET 23 /* PHY address */ +#define MACB_PHYA_SIZE 5 +#define MACB_RW_OFFSET 28 /* Operation. 10 is read. 01 is write. */ +#define MACB_RW_SIZE 2 +#define MACB_SOF_OFFSET 30 /* Must be written to 1 for Clause 22 */ +#define MACB_SOF_SIZE 2 + +/* Bitfields in USRIO (AVR32) */ #define MACB_MII_OFFSET 0 #define MACB_MII_SIZE 1 #define MACB_EAM_OFFSET 1 @@ -232,6 +421,8 @@ /* Bitfields in USRIO (AT91) */ #define MACB_RMII_OFFSET 0 #define MACB_RMII_SIZE 1 +#define GEM_RGMII_OFFSET 0 /* GEM gigabit mode */ +#define GEM_RGMII_SIZE 1 #define MACB_CLKEN_OFFSET 1 #define MACB_CLKEN_SIZE 1 @@ -249,17 +440,166 @@ /* Bitfields in MID */ #define MACB_IDNUM_OFFSET 16 -#define MACB_IDNUM_SIZE 16 +#define MACB_IDNUM_SIZE 12 +#define MACB_REV_OFFSET 0 +#define MACB_REV_SIZE 16 -/* Bitfields in DCFG1 */ +/* Bitfields in DCFG1. */ +#define GEM_IRQCOR_OFFSET 23 +#define GEM_IRQCOR_SIZE 1 #define GEM_DBWDEF_OFFSET 25 #define GEM_DBWDEF_SIZE 3 -/* constants for data bus width */ -#define GEM_DBW32 0 -#define GEM_DBW64 1 -#define GEM_DBW128 2 +/* Bitfields in DCFG2. */ +#define GEM_RX_PKT_BUFF_OFFSET 20 +#define GEM_RX_PKT_BUFF_SIZE 1 +#define GEM_TX_PKT_BUFF_OFFSET 21 +#define GEM_TX_PKT_BUFF_SIZE 1 + + +/* Bitfields in DCFG5. */ +#define GEM_TSU_OFFSET 8 +#define GEM_TSU_SIZE 1 + +/* Bitfields in DCFG6. */ +#define GEM_PBUF_LSO_OFFSET 27 +#define GEM_PBUF_LSO_SIZE 1 +#define GEM_DAW64_OFFSET 23 +#define GEM_DAW64_SIZE 1 + +/* Bitfields in DCFG8. */ +#define GEM_T1SCR_OFFSET 24 +#define GEM_T1SCR_SIZE 8 +#define GEM_T2SCR_OFFSET 16 +#define GEM_T2SCR_SIZE 8 +#define GEM_SCR2ETH_OFFSET 8 +#define GEM_SCR2ETH_SIZE 8 +#define GEM_SCR2CMP_OFFSET 0 +#define GEM_SCR2CMP_SIZE 8 + +/* Bitfields in DCFG10 */ +#define GEM_TXBD_RDBUFF_OFFSET 12 +#define GEM_TXBD_RDBUFF_SIZE 4 +#define GEM_RXBD_RDBUFF_OFFSET 8 +#define GEM_RXBD_RDBUFF_SIZE 4 + +/* Bitfields in TISUBN */ +#define GEM_SUBNSINCR_OFFSET 0 +#define GEM_SUBNSINCR_SIZE 16 + +/* Bitfields in TI */ +#define GEM_NSINCR_OFFSET 0 +#define GEM_NSINCR_SIZE 8 + +/* Bitfields in TSH */ +#define GEM_TSH_OFFSET 0 /* TSU timer value (s). MSB [47:32] of seconds timer count */ +#define GEM_TSH_SIZE 16 + +/* Bitfields in TSL */ +#define GEM_TSL_OFFSET 0 /* TSU timer value (s). LSB [31:0] of seconds timer count */ +#define GEM_TSL_SIZE 32 +/* Bitfields in TN */ +#define GEM_TN_OFFSET 0 /* TSU timer value (ns) */ +#define GEM_TN_SIZE 30 + +/* Bitfields in TXBDCTRL */ +#define GEM_TXTSMODE_OFFSET 4 /* TX Descriptor Timestamp Insertion mode */ +#define GEM_TXTSMODE_SIZE 2 + +/* Bitfields in RXBDCTRL */ +#define GEM_RXTSMODE_OFFSET 4 /* RX Descriptor Timestamp Insertion mode */ +#define GEM_RXTSMODE_SIZE 2 + +/* Bitfields in SCRT2 */ +#define GEM_QUEUE_OFFSET 0 /* Queue Number */ +#define GEM_QUEUE_SIZE 4 +#define GEM_VLANPR_OFFSET 4 /* VLAN Priority */ +#define GEM_VLANPR_SIZE 3 +#define GEM_VLANEN_OFFSET 8 /* VLAN Enable */ +#define GEM_VLANEN_SIZE 1 +#define GEM_ETHT2IDX_OFFSET 9 /* Index to screener type 2 EtherType register */ +#define GEM_ETHT2IDX_SIZE 3 +#define GEM_ETHTEN_OFFSET 12 /* EtherType Enable */ +#define GEM_ETHTEN_SIZE 1 +#define GEM_CMPA_OFFSET 13 /* Compare A - Index to screener type 2 Compare register */ +#define GEM_CMPA_SIZE 5 +#define GEM_CMPAEN_OFFSET 18 /* Compare A Enable */ +#define GEM_CMPAEN_SIZE 1 +#define GEM_CMPB_OFFSET 19 /* Compare B - Index to screener type 2 Compare register */ +#define GEM_CMPB_SIZE 5 +#define GEM_CMPBEN_OFFSET 24 /* Compare B Enable */ +#define GEM_CMPBEN_SIZE 1 +#define GEM_CMPC_OFFSET 25 /* Compare C - Index to screener type 2 Compare register */ +#define GEM_CMPC_SIZE 5 +#define GEM_CMPCEN_OFFSET 30 /* Compare C Enable */ +#define GEM_CMPCEN_SIZE 1 + +/* Bitfields in ETHT */ +#define GEM_ETHTCMP_OFFSET 0 /* EtherType compare value */ +#define GEM_ETHTCMP_SIZE 16 + +/* Bitfields in T2CMPW0 */ +#define GEM_T2CMP_OFFSET 16 /* 0xFFFF0000 compare value */ +#define GEM_T2CMP_SIZE 16 +#define GEM_T2MASK_OFFSET 0 /* 0x0000FFFF compare value or mask */ +#define GEM_T2MASK_SIZE 16 + +/* Bitfields in T2CMPW1 */ +#define GEM_T2DISMSK_OFFSET 9 /* disable mask */ +#define GEM_T2DISMSK_SIZE 1 +#define GEM_T2CMPOFST_OFFSET 7 /* compare offset */ +#define GEM_T2CMPOFST_SIZE 2 +#define GEM_T2OFST_OFFSET 0 /* offset value */ +#define GEM_T2OFST_SIZE 7 + +/* Offset for screener type 2 compare values (T2CMPOFST). + * Note the offset is applied after the specified point, + * e.g. GEM_T2COMPOFST_ETYPE denotes the EtherType field, so an offset + * of 12 bytes from this would be the source IP address in an IP header + */ +#define GEM_T2COMPOFST_SOF 0 +#define GEM_T2COMPOFST_ETYPE 1 +#define GEM_T2COMPOFST_IPHDR 2 +#define GEM_T2COMPOFST_TCPUDP 3 + +/* offset from EtherType to IP address */ +#define ETYPE_SRCIP_OFFSET 12 +#define ETYPE_DSTIP_OFFSET 16 + +/* offset from IP header to port */ +#define IPHDR_SRCPORT_OFFSET 0 +#define IPHDR_DSTPORT_OFFSET 2 + +/* Transmit DMA buffer descriptor Word 1 */ +#define GEM_DMA_TXVALID_OFFSET 23 /* timestamp has been captured in the Buffer Descriptor */ +#define GEM_DMA_TXVALID_SIZE 1 + +/* Receive DMA buffer descriptor Word 0 */ +#define GEM_DMA_RXVALID_OFFSET 2 /* indicates a valid timestamp in the Buffer Descriptor */ +#define GEM_DMA_RXVALID_SIZE 1 + +/* DMA buffer descriptor Word 2 (32 bit addressing) or Word 4 (64 bit addressing) */ +#define GEM_DMA_SECL_OFFSET 30 /* Timestamp seconds[1:0] */ +#define GEM_DMA_SECL_SIZE 2 +#define GEM_DMA_NSEC_OFFSET 0 /* Timestamp nanosecs [29:0] */ +#define GEM_DMA_NSEC_SIZE 30 + +/* DMA buffer descriptor Word 3 (32 bit addressing) or Word 5 (64 bit addressing) */ + +/* New hardware supports 12 bit precision of timestamp in DMA buffer descriptor. + * Old hardware supports only 6 bit precision but it is enough for PTP. + * Less accuracy is used always instead of checking hardware version. + */ +#define GEM_DMA_SECH_OFFSET 0 /* Timestamp seconds[5:2] */ +#define GEM_DMA_SECH_SIZE 4 +#define GEM_DMA_SEC_WIDTH (GEM_DMA_SECH_SIZE + GEM_DMA_SECL_SIZE) +#define GEM_DMA_SEC_TOP (1 << GEM_DMA_SEC_WIDTH) +#define GEM_DMA_SEC_MASK (GEM_DMA_SEC_TOP - 1) + +/* Bitfields in ADJ */ +#define GEM_ADDSUB_OFFSET 31 +#define GEM_ADDSUB_SIZE 1 /* Constants for CLK */ #define MACB_CLK_DIV8 0 #define MACB_CLK_DIV16 1 @@ -273,6 +613,8 @@ #define GEM_CLK_DIV48 3 #define GEM_CLK_DIV64 4 #define GEM_CLK_DIV96 5 +#define GEM_CLK_DIV128 6 +#define GEM_CLK_DIV224 7 /* Constants for MAN register */ #define MACB_MAN_SOF 1 @@ -280,19 +622,38 @@ #define MACB_MAN_READ 2 #define MACB_MAN_CODE 2 +/* Capability mask bits */ +#define MACB_CAPS_ISR_CLEAR_ON_WRITE 0x00000001 +#define MACB_CAPS_USRIO_HAS_CLKEN 0x00000002 +#define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII 0x00000004 +#define MACB_CAPS_NO_GIGABIT_HALF 0x00000008 +#define MACB_CAPS_USRIO_DISABLED 0x00000010 +#define MACB_CAPS_JUMBO 0x00000020 +#define MACB_CAPS_GEM_HAS_PTP 0x00000040 +#define MACB_CAPS_BD_RD_PREFETCH 0x00000080 +#define MACB_CAPS_NEEDS_RSTONUBR 0x00000100 +#define MACB_CAPS_FIFO_MODE 0x10000000 +#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 +#define MACB_CAPS_SG_DISABLED 0x40000000 +#define MACB_CAPS_MACB_IS_GEM 0x80000000 + +/* LSO settings */ +#define MACB_LSO_UFO_ENABLE 0x01 +#define MACB_LSO_TSO_ENABLE 0x02 + /* Bit manipulation macros */ #define MACB_BIT(name) \ (1 << MACB_##name##_OFFSET) -#define MACB_BF(name, value) \ +#define MACB_BF(name,value) \ (((value) & ((1 << MACB_##name##_SIZE) - 1)) \ << MACB_##name##_OFFSET) -#define MACB_BFEXT(name, value)\ +#define MACB_BFEXT(name,value)\ (((value) >> MACB_##name##_OFFSET) \ & ((1 << MACB_##name##_SIZE) - 1)) -#define MACB_BFINS(name, value, old) \ +#define MACB_BFINS(name,value,old) \ (((old) & ~(((1 << MACB_##name##_SIZE) - 1) \ << MACB_##name##_OFFSET)) \ - | MACB_BF(name, value)) + | MACB_BF(name,value)) #define GEM_BIT(name) \ (1 << GEM_##name##_OFFSET) @@ -316,6 +677,95 @@ readl((port)->regs + GEM_##reg) #define gem_writel(port, reg, value) \ writel((value), (port)->regs + GEM_##reg) + +/* DMA descriptor bitfields */ +#define MACB_RX_USED_OFFSET 0 +#define MACB_RX_USED_SIZE 1 +#define MACB_RX_WRAP_OFFSET 1 +#define MACB_RX_WRAP_SIZE 1 +#define MACB_RX_WADDR_OFFSET 2 +#define MACB_RX_WADDR_SIZE 30 + +#define MACB_RX_FRMLEN_OFFSET 0 +#define MACB_RX_FRMLEN_SIZE 12 +#define MACB_RX_OFFSET_OFFSET 12 +#define MACB_RX_OFFSET_SIZE 2 +#define MACB_RX_SOF_OFFSET 14 +#define MACB_RX_SOF_SIZE 1 +#define MACB_RX_EOF_OFFSET 15 +#define MACB_RX_EOF_SIZE 1 +#define MACB_RX_CFI_OFFSET 16 +#define MACB_RX_CFI_SIZE 1 +#define MACB_RX_VLAN_PRI_OFFSET 17 +#define MACB_RX_VLAN_PRI_SIZE 3 +#define MACB_RX_PRI_TAG_OFFSET 20 +#define MACB_RX_PRI_TAG_SIZE 1 +#define MACB_RX_VLAN_TAG_OFFSET 21 +#define MACB_RX_VLAN_TAG_SIZE 1 +#define MACB_RX_TYPEID_MATCH_OFFSET 22 +#define MACB_RX_TYPEID_MATCH_SIZE 1 +#define MACB_RX_SA4_MATCH_OFFSET 23 +#define MACB_RX_SA4_MATCH_SIZE 1 +#define MACB_RX_SA3_MATCH_OFFSET 24 +#define MACB_RX_SA3_MATCH_SIZE 1 +#define MACB_RX_SA2_MATCH_OFFSET 25 +#define MACB_RX_SA2_MATCH_SIZE 1 +#define MACB_RX_SA1_MATCH_OFFSET 26 +#define MACB_RX_SA1_MATCH_SIZE 1 +#define MACB_RX_EXT_MATCH_OFFSET 28 +#define MACB_RX_EXT_MATCH_SIZE 1 +#define MACB_RX_UHASH_MATCH_OFFSET 29 +#define MACB_RX_UHASH_MATCH_SIZE 1 +#define MACB_RX_MHASH_MATCH_OFFSET 30 +#define MACB_RX_MHASH_MATCH_SIZE 1 +#define MACB_RX_BROADCAST_OFFSET 31 +#define MACB_RX_BROADCAST_SIZE 1 + +#define MACB_RX_FRMLEN_MASK 0xFFF +#define MACB_RX_JFRMLEN_MASK 0x3FFF + +/* RX checksum offload disabled: bit 24 clear in NCFGR */ +#define GEM_RX_TYPEID_MATCH_OFFSET 22 +#define GEM_RX_TYPEID_MATCH_SIZE 2 + +/* RX checksum offload enabled: bit 24 set in NCFGR */ +#define GEM_RX_CSUM_OFFSET 22 +#define GEM_RX_CSUM_SIZE 2 + +#define MACB_TX_FRMLEN_OFFSET 0 +#define MACB_TX_FRMLEN_SIZE 11 +#define MACB_TX_LAST_OFFSET 15 +#define MACB_TX_LAST_SIZE 1 +#define MACB_TX_NOCRC_OFFSET 16 +#define MACB_TX_NOCRC_SIZE 1 +#define MACB_MSS_MFS_OFFSET 16 +#define MACB_MSS_MFS_SIZE 14 +#define MACB_TX_LSO_OFFSET 17 +#define MACB_TX_LSO_SIZE 2 +#define MACB_TX_TCP_SEQ_SRC_OFFSET 19 +#define MACB_TX_TCP_SEQ_SRC_SIZE 1 +#define MACB_TX_BUF_EXHAUSTED_OFFSET 27 +#define MACB_TX_BUF_EXHAUSTED_SIZE 1 +#define MACB_TX_UNDERRUN_OFFSET 28 +#define MACB_TX_UNDERRUN_SIZE 1 +#define MACB_TX_ERROR_OFFSET 29 +#define MACB_TX_ERROR_SIZE 1 +#define MACB_TX_WRAP_OFFSET 30 +#define MACB_TX_WRAP_SIZE 1 +#define MACB_TX_USED_OFFSET 31 +#define MACB_TX_USED_SIZE 1 + +#define GEM_TX_FRMLEN_OFFSET 0 +#define GEM_TX_FRMLEN_SIZE 14 + +/* Buffer descriptor constants */ +#define GEM_RX_CSUM_NONE 0 +#define GEM_RX_CSUM_IP_ONLY 1 +#define GEM_RX_CSUM_IP_TCP 2 +#define GEM_RX_CSUM_IP_UDP 3 + +/* limit RX checksum offload to TCP and UDP packets */ +#define GEM_RX_CSUM_CHECKED_MASK 2 #define gem_writel_queue_TBQP(port, value, queue_num) \ writel((value), (port)->regs + GEM_TBQP(queue_num)) diff --git a/drivers/net/mdio_mux_sandbox.c b/drivers/net/mdio_mux_sandbox.c new file mode 100644 index 0000000000..3dba4d18a1 --- /dev/null +++ b/drivers/net/mdio_mux_sandbox.c @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * (C) Copyright 2019 + * Alex Marginean, NXP + */ + +#include <dm.h> +#include <errno.h> +#include <miiphy.h> + +/* macros copied over from mdio_sandbox.c */ +#define SANDBOX_PHY_ADDR 5 +#define SANDBOX_PHY_REG_CNT 2 + +struct mdio_mux_sandbox_priv { + int enabled; + int sel; +}; + +static int mdio_mux_sandbox_mark_selection(struct udevice *dev, int sel) +{ + struct udevice *mdio; + struct mdio_ops *ops; + int err; + + /* + * find the sandbox parent mdio and write a register on the PHY there + * so the mux test can verify selection. + */ + err = uclass_get_device_by_name(UCLASS_MDIO, "mdio-test", &mdio); + if (err) + return err; + ops = mdio_get_ops(mdio); + return ops->write(mdio, SANDBOX_PHY_ADDR, MDIO_DEVAD_NONE, + SANDBOX_PHY_REG_CNT - 1, (u16)sel); +} + +static int mdio_mux_sandbox_select(struct udevice *dev, int cur, int sel) +{ + struct mdio_mux_sandbox_priv *priv = dev_get_priv(dev); + + if (!priv->enabled) + return -ENODEV; + + if (cur != priv->sel) + return -EINVAL; + + priv->sel = sel; + mdio_mux_sandbox_mark_selection(dev, priv->sel); + + return 0; +} + +static int mdio_mux_sandbox_deselect(struct udevice *dev, int sel) +{ + struct mdio_mux_sandbox_priv *priv = dev_get_priv(dev); + + if (!priv->enabled) + return -ENODEV; + + if (sel != priv->sel) + return -EINVAL; + + priv->sel = -1; + mdio_mux_sandbox_mark_selection(dev, priv->sel); + + return 0; +} + +static const struct mdio_mux_ops mdio_mux_sandbox_ops = { + .select = mdio_mux_sandbox_select, + .deselect = mdio_mux_sandbox_deselect, +}; + +static int mdio_mux_sandbox_probe(struct udevice *dev) +{ + struct mdio_mux_sandbox_priv *priv = dev_get_priv(dev); + + priv->enabled = 1; + priv->sel = -1; + + return 0; +} + +static const struct udevice_id mdio_mux_sandbox_ids[] = { + { .compatible = "sandbox,mdio-mux" }, + { } +}; + +U_BOOT_DRIVER(mdio_mux_sandbox) = { + .name = "mdio_mux_sandbox", + .id = UCLASS_MDIO_MUX, + .of_match = mdio_mux_sandbox_ids, + .probe = mdio_mux_sandbox_probe, + .ops = &mdio_mux_sandbox_ops, + .priv_auto_alloc_size = sizeof(struct mdio_mux_sandbox_priv), +}; diff --git a/drivers/net/mdio_sandbox.c b/drivers/net/mdio_sandbox.c index 07515e078c..df053f5381 100644 --- a/drivers/net/mdio_sandbox.c +++ b/drivers/net/mdio_sandbox.c @@ -9,11 +9,11 @@ #include <miiphy.h> #define SANDBOX_PHY_ADDR 5 -#define SANDBOX_PHY_REG 0 +#define SANDBOX_PHY_REG_CNT 2 struct mdio_sandbox_priv { int enabled; - u16 reg; + u16 reg[SANDBOX_PHY_REG_CNT]; }; static int mdio_sandbox_read(struct udevice *dev, int addr, int devad, int reg) @@ -27,10 +27,10 @@ static int mdio_sandbox_read(struct udevice *dev, int addr, int devad, int reg) return -ENODEV; if (devad != MDIO_DEVAD_NONE) return -ENODEV; - if (reg != SANDBOX_PHY_REG) + if (reg < 0 || reg > SANDBOX_PHY_REG_CNT) return -ENODEV; - return priv->reg; + return priv->reg[reg]; } static int mdio_sandbox_write(struct udevice *dev, int addr, int devad, int reg, @@ -45,10 +45,10 @@ static int mdio_sandbox_write(struct udevice *dev, int addr, int devad, int reg, return -ENODEV; if (devad != MDIO_DEVAD_NONE) return -ENODEV; - if (reg != SANDBOX_PHY_REG) + if (reg < 0 || reg > SANDBOX_PHY_REG_CNT) return -ENODEV; - priv->reg = val; + priv->reg[reg] = val; return 0; } @@ -56,8 +56,10 @@ static int mdio_sandbox_write(struct udevice *dev, int addr, int devad, int reg, static int mdio_sandbox_reset(struct udevice *dev) { struct mdio_sandbox_priv *priv = dev_get_priv(dev); + int i; - priv->reg = 0; + for (i = 0; i < SANDBOX_PHY_REG_CNT; i++) + priv->reg[i] = 0; return 0; } diff --git a/drivers/net/phy/aquantia.c b/drivers/net/phy/aquantia.c index 5c3298d612..465ec2d342 100644 --- a/drivers/net/phy/aquantia.c +++ b/drivers/net/phy/aquantia.c @@ -461,6 +461,19 @@ struct phy_driver aqr107_driver = { .shutdown = &gen10g_shutdown, }; +struct phy_driver aqr112_driver = { + .name = "Aquantia AQR112", + .uid = 0x3a1b660, + .mask = 0xfffffff0, + .features = PHY_10G_FEATURES, + .mmds = (MDIO_MMD_PMAPMD | MDIO_MMD_PCS | + MDIO_MMD_PHYXS | MDIO_MMD_AN | + MDIO_MMD_VEND1), + .config = &aquantia_config, + .startup = &aquantia_startup, + .shutdown = &gen10g_shutdown, +}; + struct phy_driver aqr405_driver = { .name = "Aquantia AQR405", .uid = 0x3a1b4b2, @@ -474,6 +487,19 @@ struct phy_driver aqr405_driver = { .shutdown = &gen10g_shutdown, }; +struct phy_driver aqr412_driver = { + .name = "Aquantia AQR412", + .uid = 0x3a1b710, + .mask = 0xfffffff0, + .features = PHY_10G_FEATURES, + .mmds = (MDIO_MMD_PMAPMD | MDIO_MMD_PCS | + MDIO_MMD_PHYXS | MDIO_MMD_AN | + MDIO_MMD_VEND1), + .config = &aquantia_config, + .startup = &aquantia_startup, + .shutdown = &gen10g_shutdown, +}; + int phy_aquantia_init(void) { phy_register(&aq1202_driver); @@ -481,7 +507,9 @@ int phy_aquantia_init(void) phy_register(&aqr105_driver); phy_register(&aqr106_driver); phy_register(&aqr107_driver); + phy_register(&aqr112_driver); phy_register(&aqr405_driver); + phy_register(&aqr412_driver); return 0; } diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index c1c1af9abd..ae37dd6c1e 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -727,12 +727,23 @@ static struct phy_device *create_phy_by_mask(struct mii_dev *bus, while (phy_mask) { int addr = ffs(phy_mask) - 1; int r = get_phy_id(bus, addr, devad, &phy_id); + + /* + * If the PHY ID is flat 0 we ignore it. There are C45 PHYs + * that return all 0s for C22 reads (like Aquantia AQR112) and + * there are C22 PHYs that return all 0s for C45 reads (like + * Atheros AR8035). + */ + if (r == 0 && phy_id == 0) + goto next; + /* If the PHY ID is mostly f's, we didn't find anything */ if (r == 0 && (phy_id & 0x1fffffff) != 0x1fffffff) { is_c45 = (devad == MDIO_DEVAD_NONE) ? false : true; return phy_device_create(bus, addr, phy_id, is_c45, interface); } +next: phy_mask &= ~(1 << addr); } return NULL; diff --git a/drivers/net/sun8i_emac.c b/drivers/net/sun8i_emac.c index c0a440886e..0629b16e57 100644 --- a/drivers/net/sun8i_emac.c +++ b/drivers/net/sun8i_emac.c @@ -846,31 +846,44 @@ static const struct eth_ops sun8i_emac_eth_ops = { static int sun8i_get_ephy_nodes(struct emac_eth_dev *priv) { - int node, ret; + int emac_node, ephy_node, ret, ephy_handle; + + emac_node = fdt_path_offset(gd->fdt_blob, + "/soc/ethernet@1c30000"); + if (emac_node < 0) { + debug("failed to get emac node\n"); + return emac_node; + } + ephy_handle = fdtdec_lookup_phandle(gd->fdt_blob, + emac_node, "phy-handle"); /* look for mdio-mux node for internal PHY node */ - node = fdt_path_offset(gd->fdt_blob, - "/soc/ethernet@1c30000/mdio-mux/mdio@1/ethernet-phy@1"); - if (node < 0) { + ephy_node = fdt_path_offset(gd->fdt_blob, + "/soc/ethernet@1c30000/mdio-mux/mdio@1/ethernet-phy@1"); + if (ephy_node < 0) { debug("failed to get mdio-mux with internal PHY\n"); - return node; + return ephy_node; } - ret = fdt_node_check_compatible(gd->fdt_blob, node, + /* This is not the phy we are looking for */ + if (ephy_node != ephy_handle) + return 0; + + ret = fdt_node_check_compatible(gd->fdt_blob, ephy_node, "allwinner,sun8i-h3-mdio-internal"); if (ret < 0) { debug("failed to find mdio-internal node\n"); return ret; } - ret = clk_get_by_index_nodev(offset_to_ofnode(node), 0, + ret = clk_get_by_index_nodev(offset_to_ofnode(ephy_node), 0, &priv->ephy_clk); if (ret) { dev_err(dev, "failed to get EPHY TX clock\n"); return ret; } - ret = reset_get_by_index_nodev(offset_to_ofnode(node), 0, + ret = reset_get_by_index_nodev(offset_to_ofnode(ephy_node), 0, &priv->ephy_rst); if (ret) { dev_err(dev, "failed to get EPHY TX reset\n"); diff --git a/drivers/net/ti/davinci_emac.c b/drivers/net/ti/davinci_emac.c index 9d53984973..2bd9c51079 100644 --- a/drivers/net/ti/davinci_emac.c +++ b/drivers/net/ti/davinci_emac.c @@ -26,7 +26,6 @@ #include <net.h> #include <miiphy.h> #include <malloc.h> -#include <netdev.h> #include <linux/compiler.h> #include <asm/arch/emac_defs.h> #include <asm/io.h> @@ -107,8 +106,9 @@ static u_int8_t num_phy; phy_t phy[CONFIG_SYS_DAVINCI_EMAC_PHY_COUNT]; -static int davinci_eth_set_mac_addr(struct eth_device *dev) +static int davinci_emac_write_hwaddr(struct udevice *dev) { + struct eth_pdata *pdata = dev_get_platdata(dev); unsigned long mac_hi; unsigned long mac_lo; @@ -118,12 +118,12 @@ static int davinci_eth_set_mac_addr(struct eth_device *dev) * Using channel 0 only - other channels are disabled * */ writel(0, &adap_emac->MACINDEX); - mac_hi = (dev->enetaddr[3] << 24) | - (dev->enetaddr[2] << 16) | - (dev->enetaddr[1] << 8) | - (dev->enetaddr[0]); - mac_lo = (dev->enetaddr[5] << 8) | - (dev->enetaddr[4]); + mac_hi = (pdata->enetaddr[3] << 24) | + (pdata->enetaddr[2] << 16) | + (pdata->enetaddr[1] << 8) | + (pdata->enetaddr[0]); + mac_lo = (pdata->enetaddr[5] << 8) | + (pdata->enetaddr[4]); writel(mac_hi, &adap_emac->MACADDRHI); #if defined(DAVINCI_EMAC_VERSION2) @@ -411,7 +411,7 @@ static void __attribute__((unused)) davinci_eth_gigabit_enable(int phy_addr) } /* Eth device open */ -static int davinci_eth_open(struct eth_device *dev, bd_t *bis) +static int davinci_emac_start(struct udevice *dev) { dv_reg_p addr; u_int32_t clkdiv, cnt, mac_control; @@ -447,7 +447,7 @@ static int davinci_eth_open(struct eth_device *dev, bd_t *bis) writel(1, &adap_emac->TXCONTROL); writel(1, &adap_emac->RXCONTROL); - davinci_eth_set_mac_addr(dev); + davinci_emac_write_hwaddr(dev); /* Set DMA 8 TX / 8 RX Head pointers to 0 */ addr = &adap_emac->TX0HDP; @@ -588,7 +588,7 @@ static void davinci_eth_ch_teardown(int ch) } /* Eth device close */ -static void davinci_eth_close(struct eth_device *dev) +static void davinci_emac_stop(struct udevice *dev) { debug_emac("+ emac_close\n"); @@ -619,8 +619,8 @@ static int tx_send_loop = 0; * This function sends a single packet on the network and returns * positive number (number of bytes transmitted) or negative for error */ -static int davinci_eth_send_packet (struct eth_device *dev, - void *packet, int length) +static int davinci_emac_send(struct udevice *dev, + void *packet, int length) { int ret_status = -1; int index; @@ -672,7 +672,7 @@ static int davinci_eth_send_packet (struct eth_device *dev, /* * This function handles receipt of a packet from the network */ -static int davinci_eth_rcv_packet (struct eth_device *dev) +static int davinci_emac_recv(struct udevice *dev, int flags, uchar **packetp) { volatile emac_desc *rx_curr_desc; volatile emac_desc *curr_desc; @@ -682,6 +682,7 @@ static int davinci_eth_rcv_packet (struct eth_device *dev) rx_curr_desc = emac_rx_active_head; if (!rx_curr_desc) return 0; + *packetp = rx_curr_desc->buffer; status = rx_curr_desc->pkt_flag_len; if ((status & EMAC_CPPI_OWNERSHIP_BIT) == 0) { if (status & EMAC_CPPI_RX_ERROR_FRAME) { @@ -693,7 +694,6 @@ static int davinci_eth_rcv_packet (struct eth_device *dev) rx_curr_desc->buff_off_len & 0xffff; invalidate_dcache_range(tmp, tmp + ALIGN(len, PKTALIGN)); - net_process_received_packet(rx_curr_desc->buffer, len); ret = len; } @@ -742,6 +742,7 @@ static int davinci_eth_rcv_packet (struct eth_device *dev) } return (ret); } + return (0); } @@ -750,30 +751,12 @@ static int davinci_eth_rcv_packet (struct eth_device *dev) * EMAC modules power or pin multiplexors, that is done by board_init() * much earlier in bootup process. Returns 1 on success, 0 otherwise. */ -int davinci_emac_initialize(void) +static int davinci_emac_probe(struct udevice *dev) { u_int32_t phy_id; u_int16_t tmp; int i; int ret; - struct eth_device *dev; - - dev = malloc(sizeof *dev); - - if (dev == NULL) - return -1; - - memset(dev, 0, sizeof *dev); - strcpy(dev->name, "DaVinci-EMAC"); - - dev->iobase = 0; - dev->init = davinci_eth_open; - dev->halt = davinci_eth_close; - dev->send = davinci_eth_send_packet; - dev->recv = davinci_eth_rcv_packet; - dev->write_hwaddr = davinci_eth_set_mac_addr; - - eth_register(dev); davinci_eth_mdio_enable(); @@ -854,5 +837,29 @@ int davinci_emac_initialize(void) phy[i].auto_negotiate(i); } #endif - return(1); + return 0; } + +static const struct eth_ops davinci_emac_ops = { + .start = davinci_emac_start, + .send = davinci_emac_send, + .recv = davinci_emac_recv, + .stop = davinci_emac_stop, + .write_hwaddr = davinci_emac_write_hwaddr, +}; + +static const struct udevice_id davinci_emac_ids[] = { + { .compatible = "ti,davinci-dm6467-emac" }, + { .compatible = "ti,am3517-emac", }, + { .compatible = "ti,dm816-emac", }, + { } +}; + +U_BOOT_DRIVER(davinci_emac) = { + .name = "davinci_emac", + .id = UCLASS_ETH, + .of_match = davinci_emac_ids, + .probe = davinci_emac_probe, + .ops = &davinci_emac_ops, + .platdata_auto_alloc_size = sizeof(struct eth_pdata), +}; diff --git a/drivers/net/tsec.c b/drivers/net/tsec.c index 06a9b4fb03..f85cdcb97e 100644 --- a/drivers/net/tsec.c +++ b/drivers/net/tsec.c @@ -259,8 +259,8 @@ static int tsec_send(struct udevice *dev, void *packet, int length) { struct tsec_private *priv = (struct tsec_private *)dev->priv; struct tsec __iomem *regs = priv->regs; - u16 status; int result = 0; + u16 status; int i; /* Find an empty buffer descriptor */ @@ -268,7 +268,7 @@ static int tsec_send(struct udevice *dev, void *packet, int length) in_be16(&priv->txbd[priv->tx_idx].status) & TXBD_READY; i++) { if (i >= TOUT_LOOP) { - debug("%s: tsec: tx buffers full\n", dev->name); + printf("%s: tsec: tx buffers full\n", dev->name); return result; } } @@ -287,7 +287,7 @@ static int tsec_send(struct udevice *dev, void *packet, int length) in_be16(&priv->txbd[priv->tx_idx].status) & TXBD_READY; i++) { if (i >= TOUT_LOOP) { - debug("%s: tsec: tx error\n", dev->name); + printf("%s: tsec: tx error\n", dev->name); return result; } } @@ -560,6 +560,8 @@ static int tsec_init(struct udevice *dev) struct tsec_private *priv = (struct tsec_private *)dev->priv; #ifdef CONFIG_DM_ETH struct eth_pdata *pdata = dev_get_platdata(dev); +#else + struct eth_device *pdata = dev; #endif struct tsec __iomem *regs = priv->regs; u32 tempval; @@ -580,21 +582,12 @@ static int tsec_init(struct udevice *dev) * order (BE), MACnADDR1 is set to 0xCDAB7856 and * MACnADDR2 is set to 0x34120000. */ -#ifndef CONFIG_DM_ETH - tempval = (dev->enetaddr[5] << 24) | (dev->enetaddr[4] << 16) | - (dev->enetaddr[3] << 8) | dev->enetaddr[2]; -#else tempval = (pdata->enetaddr[5] << 24) | (pdata->enetaddr[4] << 16) | (pdata->enetaddr[3] << 8) | pdata->enetaddr[2]; -#endif out_be32(®s->macstnaddr1, tempval); -#ifndef CONFIG_DM_ETH - tempval = (dev->enetaddr[1] << 24) | (dev->enetaddr[0] << 16); -#else tempval = (pdata->enetaddr[1] << 24) | (pdata->enetaddr[0] << 16); -#endif out_be32(®s->macstnaddr2, tempval); @@ -708,9 +701,9 @@ static int init_phy(struct tsec_private *priv) */ static int tsec_initialize(bd_t *bis, struct tsec_info_struct *tsec_info) { + struct tsec_private *priv; struct eth_device *dev; int i; - struct tsec_private *priv; dev = (struct eth_device *)malloc(sizeof(*dev)); @@ -794,12 +787,14 @@ int tsec_standard_init(bd_t *bis) #else /* CONFIG_DM_ETH */ int tsec_probe(struct udevice *dev) { - struct tsec_private *priv = dev_get_priv(dev); struct eth_pdata *pdata = dev_get_platdata(dev); - struct fsl_pq_mdio_info mdio_info; + struct tsec_private *priv = dev_get_priv(dev); struct ofnode_phandle_args phandle_args; - ofnode parent; + u32 tbiaddr = CONFIG_SYS_TBIPA_VALUE; + struct fsl_pq_mdio_info mdio_info; const char *phy_mode; + fdt_addr_t reg; + ofnode parent; int ret; pdata->iobase = (phys_addr_t)dev_read_addr(dev); @@ -807,7 +802,7 @@ int tsec_probe(struct udevice *dev) if (dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0, &phandle_args)) { - debug("phy-handle does not exist under tsec %s\n", dev->name); + printf("phy-handle does not exist under tsec %s\n", dev->name); return -ENOENT; } else { int reg = ofnode_read_u32_default(phandle_args.node, "reg", 0); @@ -816,29 +811,27 @@ int tsec_probe(struct udevice *dev) } parent = ofnode_get_parent(phandle_args.node); - if (ofnode_valid(parent)) { - int reg = ofnode_get_addr_index(parent, 0); - - priv->phyregs_sgmii = (struct tsec_mii_mng *)reg; - } else { - debug("No parent node for PHY?\n"); + if (!ofnode_valid(parent)) { + printf("No parent node for PHY?\n"); return -ENOENT; } - if (dev_read_phandle_with_args(dev, "tbi-handle", NULL, 0, 0, - &phandle_args)) { - priv->tbiaddr = CONFIG_SYS_TBIPA_VALUE; - } else { - int reg = ofnode_read_u32_default(phandle_args.node, "reg", - CONFIG_SYS_TBIPA_VALUE); - priv->tbiaddr = reg; - } + reg = ofnode_get_addr_index(parent, 0); + priv->phyregs_sgmii = (struct tsec_mii_mng *) + (reg + TSEC_MDIO_REGS_OFFSET); + + ret = dev_read_phandle_with_args(dev, "tbi-handle", NULL, 0, 0, + &phandle_args); + if (ret == 0) + ofnode_read_u32(phandle_args.node, "reg", &tbiaddr); + + priv->tbiaddr = tbiaddr; phy_mode = dev_read_prop(dev, "phy-connection-type", NULL); if (phy_mode) pdata->phy_interface = phy_get_interface_by_name(phy_mode); if (pdata->phy_interface == -1) { - debug("Invalid PHY interface '%s'\n", phy_mode); + printf("Invalid PHY interface '%s'\n", phy_mode); return -EINVAL; } priv->interface = pdata->phy_interface; @@ -887,7 +880,7 @@ static const struct eth_ops tsec_ops = { }; static const struct udevice_id tsec_ids[] = { - { .compatible = "fsl,tsec" }, + { .compatible = "fsl,etsec2" }, { } }; diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c index 938cc75496..3004335c57 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c @@ -14,6 +14,7 @@ #include "pinctrl-mtk-common.h" +#if CONFIG_IS_ENABLED(PINCONF) /** * struct mtk_drive_desc - the structure that holds the information * of the driving current @@ -39,6 +40,7 @@ static const struct mtk_drive_desc mtk_drive[] = { [DRV_GRP3] = { 2, 8, 2, 2 }, [DRV_GRP4] = { 2, 16, 2, 1 }, }; +#endif static const char *mtk_pinctrl_dummy_name = "_dummy"; diff --git a/drivers/pinctrl/nxp/Kconfig b/drivers/pinctrl/nxp/Kconfig index 61f93be42d..f2e67ca231 100644 --- a/drivers/pinctrl/nxp/Kconfig +++ b/drivers/pinctrl/nxp/Kconfig @@ -89,6 +89,16 @@ config PINCTRL_IMX8M only parses the 'fsl,pins' property and configure related registers. +config PINCTRL_MXS + bool "NXP MXS pinctrl driver" + depends on ARCH_MX28 && PINCTRL_FULL + help + Say Y here to enable the i.MX mxs pinctrl driver + + This option provides a simple pinctrl driver for i.MX mxs SoC + familiy, e.g. i.MX28. This feature depends on device tree + configuration. + config PINCTRL_VYBRID bool "Vybrid (vf610) pinctrl driver" depends on ARCH_VF610 && PINCTRL_FULL diff --git a/drivers/pinctrl/nxp/Makefile b/drivers/pinctrl/nxp/Makefile index b340d9448a..b86448aac9 100644 --- a/drivers/pinctrl/nxp/Makefile +++ b/drivers/pinctrl/nxp/Makefile @@ -6,4 +6,5 @@ obj-$(CONFIG_PINCTRL_IMX7ULP) += pinctrl-imx7ulp.o obj-$(CONFIG_PINCTRL_IMX_SCU) += pinctrl-scu.o obj-$(CONFIG_PINCTRL_IMX8) += pinctrl-imx8.o obj-$(CONFIG_PINCTRL_IMX8M) += pinctrl-imx8m.o +obj-$(CONFIG_PINCTRL_MXS) += pinctrl-mxs.o obj-$(CONFIG_PINCTRL_VYBRID) += pinctrl-vf610.o diff --git a/drivers/pinctrl/nxp/pinctrl-mxs.c b/drivers/pinctrl/nxp/pinctrl-mxs.c new file mode 100644 index 0000000000..6f6ca84674 --- /dev/null +++ b/drivers/pinctrl/nxp/pinctrl-mxs.c @@ -0,0 +1,190 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2019 DENX Software Engineering + * Lukasz Majewski, DENX Software Engineering, lukma@denx.de + */ + +#include <common.h> +#include <linux/io.h> +#include <linux/err.h> +#include <dm.h> +#include <dm/pinctrl.h> +#include <dm/read.h> +#include "pinctrl-mxs.h" + +DECLARE_GLOBAL_DATA_PTR; + +struct mxs_pinctrl_priv { + void __iomem *base; + const struct mxs_regs *regs; +}; + +static unsigned long mxs_dt_node_to_map(struct udevice *conf) +{ + unsigned long config = 0; + int ret; + u32 val; + + ret = dev_read_u32(conf, "fsl,drive-strength", &val); + if (!ret) + config = val | MA_PRESENT; + + ret = dev_read_u32(conf, "fsl,voltage", &val); + if (!ret) + config |= val << VOL_SHIFT | VOL_PRESENT; + + ret = dev_read_u32(conf, "fsl,pull-up", &val); + if (!ret) + config |= val << PULL_SHIFT | PULL_PRESENT; + + return config; +} + +static int mxs_pinctrl_set_mux(struct udevice *dev, u32 val, int bank, int pin) +{ + struct mxs_pinctrl_priv *iomux = dev_get_priv(dev); + int muxsel = MUXID_TO_MUXSEL(val), shift; + void __iomem *reg; + + reg = iomux->base + iomux->regs->muxsel; + reg += bank * 0x20 + pin / 16 * 0x10; + shift = pin % 16 * 2; + + mxs_pinctrl_rmwl(muxsel, 0x3, shift, reg); + debug(" mux %d,", muxsel); + + return 0; +} + +static int mxs_pinctrl_set_state(struct udevice *dev, struct udevice *conf) +{ + struct mxs_pinctrl_priv *iomux = dev_get_priv(dev); + u32 *pin_data, val, ma, vol, pull; + int npins, size, i, ret; + unsigned long config; + + debug("\n%s: set state: %s\n", __func__, conf->name); + + size = dev_read_size(conf, "fsl,pinmux-ids"); + if (size < 0) + return size; + + if (!size || size % sizeof(int)) { + dev_err(dev, "Invalid fsl,pinmux-ids property in %s\n", + conf->name); + return -EINVAL; + } + + npins = size / sizeof(int); + + pin_data = devm_kzalloc(dev, size, 0); + if (!pin_data) + return -ENOMEM; + + ret = dev_read_u32_array(conf, "fsl,pinmux-ids", pin_data, npins); + if (ret) { + dev_err(dev, "Error reading pin data.\n"); + devm_kfree(dev, pin_data); + return -EINVAL; + } + + config = mxs_dt_node_to_map(conf); + + ma = CONFIG_TO_MA(config); + vol = CONFIG_TO_VOL(config); + pull = CONFIG_TO_PULL(config); + + for (i = 0; i < npins; i++) { + int pinid, bank, pin, shift; + void __iomem *reg; + + val = pin_data[i]; + + pinid = MUXID_TO_PINID(val); + bank = PINID_TO_BANK(pinid); + pin = PINID_TO_PIN(pinid); + + debug("(val: 0x%x) pin %d,", val, pinid); + /* Setup pinmux */ + mxs_pinctrl_set_mux(dev, val, bank, pin); + + debug(" ma: %d, vol: %d, pull: %d\n", ma, vol, pull); + + /* drive */ + reg = iomux->base + iomux->regs->drive; + reg += bank * 0x40 + pin / 8 * 0x10; + + /* mA */ + if (config & MA_PRESENT) { + shift = pin % 8 * 4; + mxs_pinctrl_rmwl(ma, 0x3, shift, reg); + } + + /* vol */ + if (config & VOL_PRESENT) { + shift = pin % 8 * 4 + 2; + if (vol) + writel(1 << shift, reg + SET); + else + writel(1 << shift, reg + CLR); + } + + /* pull */ + if (config & PULL_PRESENT) { + reg = iomux->base + iomux->regs->pull; + reg += bank * 0x10; + shift = pin; + if (pull) + writel(1 << shift, reg + SET); + else + writel(1 << shift, reg + CLR); + } + } + + devm_kfree(dev, pin_data); + return 0; +} + +static struct pinctrl_ops mxs_pinctrl_ops = { + .set_state = mxs_pinctrl_set_state, +}; + +static int mxs_pinctrl_probe(struct udevice *dev) +{ + struct mxs_pinctrl_priv *iomux = dev_get_priv(dev); + + iomux->base = dev_read_addr_ptr(dev); + iomux->regs = (struct mxs_regs *)dev_get_driver_data(dev); + + return 0; +} + +static const struct mxs_regs imx23_regs = { + .muxsel = 0x100, + .drive = 0x200, + .pull = 0x400, +}; + +static const struct mxs_regs imx28_regs = { + .muxsel = 0x100, + .drive = 0x300, + .pull = 0x600, +}; + +static const struct udevice_id mxs_pinctrl_match[] = { + { .compatible = "fsl,imx23-pinctrl", .data = (ulong)&imx23_regs }, + { .compatible = "fsl,imx28-pinctrl", .data = (ulong)&imx28_regs }, + { /* sentinel */ } +}; + +U_BOOT_DRIVER(mxs_pinctrl) = { + .name = "mxs-pinctrl", + .id = UCLASS_PINCTRL, + .of_match = of_match_ptr(mxs_pinctrl_match), + .probe = mxs_pinctrl_probe, +#if !CONFIG_IS_ENABLED(OF_PLATDATA) + .bind = dm_scan_fdt_dev, +#endif + .priv_auto_alloc_size = sizeof(struct mxs_pinctrl_priv), + .ops = &mxs_pinctrl_ops, +}; diff --git a/drivers/pinctrl/nxp/pinctrl-mxs.h b/drivers/pinctrl/nxp/pinctrl-mxs.h new file mode 100644 index 0000000000..a398e43cbe --- /dev/null +++ b/drivers/pinctrl/nxp/pinctrl-mxs.h @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2012 Freescale Semiconductor, Inc. + */ + +#ifndef __PINCTRL_MXS_H +#define __PINCTRL_MXS_H + +#include <dm/pinctrl.h> + +#define SET 0x4 +#define CLR 0x8 +#define TOG 0xc + +#define MXS_PINCTRL_PIN(pin) PINCTRL_PIN(pin, #pin) +#define PINID(bank, pin) ((bank) * 32 + (pin)) + +/* + * pinmux-id bit field definitions + * + * bank: 15..12 (4) + * pin: 11..4 (8) + * muxsel: 3..0 (4) + */ +#define MUXID_TO_PINID(m) PINID((m) >> 12 & 0xf, (m) >> 4 & 0xff) +#define MUXID_TO_MUXSEL(m) ((m) & 0xf) + +#define PINID_TO_BANK(p) ((p) >> 5) +#define PINID_TO_PIN(p) ((p) % 32) + +/* + * pin config bit field definitions + * + * pull-up: 6..5 (2) + * voltage: 4..3 (2) + * mA: 2..0 (3) + * + * MSB of each field is presence bit for the config. + */ +#define PULL_PRESENT (1 << 6) +#define PULL_SHIFT 5 +#define VOL_PRESENT (1 << 4) +#define VOL_SHIFT 3 +#define MA_PRESENT (1 << 2) +#define MA_SHIFT 0 +#define CONFIG_TO_PULL(c) ((c) >> PULL_SHIFT & 0x1) +#define CONFIG_TO_VOL(c) ((c) >> VOL_SHIFT & 0x1) +#define CONFIG_TO_MA(c) ((c) >> MA_SHIFT & 0x3) + +struct mxs_regs { + u16 muxsel; + u16 drive; + u16 pull; +}; + +static inline void mxs_pinctrl_rmwl(u32 value, u32 mask, u8 shift, + void __iomem *reg) +{ + clrsetbits_le32(reg, mask << shift, value << shift); +} +#endif /* __PINCTRL_MXS_H */ diff --git a/drivers/power/domain/ti-sci-power-domain.c b/drivers/power/domain/ti-sci-power-domain.c index aafde62cbf..b9cd37b612 100644 --- a/drivers/power/domain/ti-sci-power-domain.c +++ b/drivers/power/domain/ti-sci-power-domain.c @@ -13,6 +13,7 @@ #include <errno.h> #include <power-domain-uclass.h> #include <linux/soc/ti/ti_sci_protocol.h> +#include <dt-bindings/soc/ti,sci_pm_domain.h> /** * struct ti_sci_power_domain_data - pm domain controller information structure @@ -56,11 +57,16 @@ static int ti_sci_power_domain_on(struct power_domain *pd) struct ti_sci_power_domain_data *data = dev_get_priv(pd->dev); const struct ti_sci_handle *sci = data->sci; const struct ti_sci_dev_ops *dops = &sci->ops.dev_ops; + u8 flags = (uintptr_t)pd->priv; int ret; debug("%s(pd=%p)\n", __func__, pd); - ret = dops->get_device(sci, pd->id); + if (flags & TI_SCI_PD_EXCLUSIVE) + ret = dops->get_device_exclusive(sci, pd->id); + else + ret = dops->get_device(sci, pd->id); + if (ret) dev_err(power_domain->dev, "%s: get_device failed (%d)\n", __func__, ret); @@ -85,6 +91,28 @@ static int ti_sci_power_domain_off(struct power_domain *pd) return ret; } +static int ti_sci_power_domain_of_xlate(struct power_domain *pd, + struct ofnode_phandle_args *args) +{ + u8 flags; + + debug("%s(power_domain=%p)\n", __func__, pd); + + if (args->args_count < 1) { + debug("Invalid args_count: %d\n", args->args_count); + return -EINVAL; + } + + pd->id = args->args[0]; + /* By default request for device exclusive */ + flags = TI_SCI_PD_EXCLUSIVE; + if (args->args_count == 2) + flags = args->args[1] & TI_SCI_PD_EXCLUSIVE; + pd->priv = (void *)(uintptr_t)flags; + + return 0; +} + static const struct udevice_id ti_sci_power_domain_of_match[] = { { .compatible = "ti,sci-pm-domain" }, { /* sentinel */ } @@ -95,6 +123,7 @@ static struct power_domain_ops ti_sci_power_domain_ops = { .free = ti_sci_power_domain_free, .on = ti_sci_power_domain_on, .off = ti_sci_power_domain_off, + .of_xlate = ti_sci_power_domain_of_xlate, }; U_BOOT_DRIVER(ti_sci_pm_domains) = { diff --git a/drivers/power/pmic/bd71837.c b/drivers/power/pmic/bd71837.c index 24d9f7fab7..e292d42a8c 100644 --- a/drivers/power/pmic/bd71837.c +++ b/drivers/power/pmic/bd71837.c @@ -3,6 +3,8 @@ * Copyright 2018 NXP */ +#define DEBUG + #include <common.h> #include <errno.h> #include <dm.h> @@ -15,15 +17,15 @@ DECLARE_GLOBAL_DATA_PTR; static const struct pmic_child_info pmic_children_info[] = { /* buck */ - { .prefix = "b", .driver = BD71837_REGULATOR_DRIVER}, + { .prefix = "b", .driver = BD718XX_REGULATOR_DRIVER}, /* ldo */ - { .prefix = "l", .driver = BD71837_REGULATOR_DRIVER}, + { .prefix = "l", .driver = BD718XX_REGULATOR_DRIVER}, { }, }; static int bd71837_reg_count(struct udevice *dev) { - return BD71837_REG_NUM; + return BD718XX_MAX_REGISTER - 1; } static int bd71837_write(struct udevice *dev, uint reg, const uint8_t *buff, @@ -54,7 +56,7 @@ static int bd71837_bind(struct udevice *dev) regulators_node = dev_read_subnode(dev, "regulators"); if (!ofnode_valid(regulators_node)) { - debug("%s: %s regulators subnode not found!", __func__, + debug("%s: %s regulators subnode not found!\n", __func__, dev->name); return -ENXIO; } @@ -69,6 +71,24 @@ static int bd71837_bind(struct udevice *dev) return 0; } +static int bd718x7_probe(struct udevice *dev) +{ + int ret; + uint8_t mask = BD718XX_REGLOCK_PWRSEQ | BD718XX_REGLOCK_VREG; + + /* Unlock the PMIC regulator control before probing the children */ + ret = pmic_clrsetbits(dev, BD718XX_REGLOCK, mask, 0); + if (ret) { + debug("%s: %s Failed to unlock regulator control\n", __func__, + dev->name); + return ret; + } + debug("%s: '%s' - BD718x7 PMIC registers unlocked\n", __func__, + dev->name); + + return 0; +} + static struct dm_pmic_ops bd71837_ops = { .reg_count = bd71837_reg_count, .read = bd71837_read, @@ -76,7 +96,8 @@ static struct dm_pmic_ops bd71837_ops = { }; static const struct udevice_id bd71837_ids[] = { - { .compatible = "rohm,bd71837", .data = 0x4b, }, + { .compatible = "rohm,bd71837", .data = ROHM_CHIP_TYPE_BD71837, }, + { .compatible = "rohm,bd71847", .data = ROHM_CHIP_TYPE_BD71847, }, { } }; @@ -85,5 +106,6 @@ U_BOOT_DRIVER(pmic_bd71837) = { .id = UCLASS_PMIC, .of_match = bd71837_ids, .bind = bd71837_bind, + .probe = bd718x7_probe, .ops = &bd71837_ops, }; diff --git a/drivers/power/regulator/Kconfig b/drivers/power/regulator/Kconfig index 337e9e7471..9aa00fad42 100644 --- a/drivers/power/regulator/Kconfig +++ b/drivers/power/regulator/Kconfig @@ -43,6 +43,23 @@ config REGULATOR_AS3722 but does not yet support change voltages. Currently this must be done using direct register writes to the PMIC. +config DM_REGULATOR_BD71837 + bool "Enable Driver Model for ROHM BD71837/BD71847 regulators" + depends on DM_REGULATOR && DM_PMIC_BD71837 + help + This config enables implementation of driver-model regulator uclass + features for regulators on ROHM BD71837 and BD71847 PMICs. + BD71837 contains 8 bucks and 7 LDOS. BD71847 is reduced version + containing 6 bucks and 6 LDOs. The driver implements get/set api for + value and enable. + +config SPL_DM_REGULATOR_BD71837 + bool "Enable Driver Model for ROHM BD71837/BD71847 regulators in SPL" + depends on DM_REGULATOR_BD71837 + help + This config enables implementation of driver-model regulator uclass + features for regulators on ROHM BD71837 and BD71847 in SPL. + config DM_REGULATOR_PFUZE100 bool "Enable Driver Model for REGULATOR PFUZE100" depends on DM_REGULATOR && DM_PMIC_PFUZE100 diff --git a/drivers/power/regulator/Makefile b/drivers/power/regulator/Makefile index e728b73aee..6a3d4bbee4 100644 --- a/drivers/power/regulator/Makefile +++ b/drivers/power/regulator/Makefile @@ -9,6 +9,7 @@ obj-$(CONFIG_REGULATOR_ACT8846) += act8846.o obj-$(CONFIG_REGULATOR_AS3722) += as3722_regulator.o obj-$(CONFIG_DM_REGULATOR_MAX77686) += max77686.o obj-$(CONFIG_$(SPL_)DM_PMIC_PFUZE100) += pfuze100.o +obj-$(CONFIG_$(SPL_)DM_REGULATOR_BD71837) += bd71837.o obj-$(CONFIG_$(SPL_)REGULATOR_PWM) += pwm_regulator.o obj-$(CONFIG_$(SPL_)DM_REGULATOR_FAN53555) += fan53555.o obj-$(CONFIG_$(SPL_)DM_REGULATOR_COMMON) += regulator_common.o diff --git a/drivers/power/regulator/bd71837.c b/drivers/power/regulator/bd71837.c new file mode 100644 index 0000000000..575429aa2d --- /dev/null +++ b/drivers/power/regulator/bd71837.c @@ -0,0 +1,468 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2019 ROHM Semiconductors + * + * ROHM BD71837 regulator driver + */ + +#include <common.h> +#include <dm.h> +#include <power/bd71837.h> +#include <power/pmic.h> +#include <power/regulator.h> + +#define HW_STATE_CONTROL 0 +#define DEBUG + +/** + * struct bd71837_vrange - describe linear range of voltages + * + * @min_volt: smallest voltage in range + * @step: how much voltage changes at each selector step + * @min_sel: smallest selector in the range + * @max_sel: maximum selector in the range + * @rangeval: register value used to select this range if selectible + * ranges are supported + */ +struct bd71837_vrange { + unsigned int min_volt; + unsigned int step; + u8 min_sel; + u8 max_sel; + u8 rangeval; +}; + +/** + * struct bd71837_platdata - describe regulator control registers + * + * @name: name of the regulator. Used for matching the dt-entry + * @enable_reg: register address used to enable/disable regulator + * @enablemask: register mask used to enable/disable regulator + * @volt_reg: register address used to configure regulator voltage + * @volt_mask: register mask used to configure regulator voltage + * @ranges: pointer to ranges of regulator voltages and matching register + * values + * @numranges: number of voltage ranges pointed by ranges + * @rangemask: mask for selecting used ranges if multiple ranges are supported + * @sel_mask: bit to toggle in order to transfer the register control to SW + * @dvs: whether the voltage can be changed when regulator is enabled + */ +struct bd71837_platdata { + const char *name; + u8 enable_reg; + u8 enablemask; + u8 volt_reg; + u8 volt_mask; + struct bd71837_vrange *ranges; + unsigned int numranges; + u8 rangemask; + u8 sel_mask; + bool dvs; +}; + +#define BD_RANGE(_min, _vstep, _sel_low, _sel_hi, _range_sel) \ +{ \ + .min_volt = (_min), .step = (_vstep), .min_sel = (_sel_low), \ + .max_sel = (_sel_hi), .rangeval = (_range_sel) \ +} + +#define BD_DATA(_name, enreg, enmask, vreg, vmask, _range, rmask, _dvs, sel) \ +{ \ + .name = (_name), .enable_reg = (enreg), .enablemask = (enmask), \ + .volt_reg = (vreg), .volt_mask = (vmask), .ranges = (_range), \ + .numranges = ARRAY_SIZE(_range), .rangemask = (rmask), .dvs = (_dvs), \ + .sel_mask = (sel) \ +} + +static struct bd71837_vrange dvs_buck_vranges[] = { + BD_RANGE(700000, 10000, 0, 0x3c, 0), + BD_RANGE(1300000, 0, 0x3d, 0x3f, 0), +}; + +static struct bd71837_vrange bd71847_buck3_vranges[] = { + BD_RANGE(700000, 100000, 0x00, 0x03, 0), + BD_RANGE(1050000, 50000, 0x04, 0x05, 0), + BD_RANGE(1200000, 150000, 0x06, 0x07, 0), + BD_RANGE(550000, 50000, 0x0, 0x7, 0x40), + BD_RANGE(675000, 100000, 0x0, 0x3, 0x80), + BD_RANGE(1025000, 50000, 0x4, 0x5, 0x80), + BD_RANGE(1175000, 150000, 0x6, 0x7, 0x80), +}; + +static struct bd71837_vrange bd71847_buck4_vranges[] = { + BD_RANGE(3000000, 100000, 0x00, 0x03, 0), + BD_RANGE(2600000, 100000, 0x00, 0x03, 40), +}; + +static struct bd71837_vrange bd71837_buck5_vranges[] = { + BD_RANGE(700000, 100000, 0, 0x3, 0), + BD_RANGE(1050000, 50000, 0x04, 0x05, 0), + BD_RANGE(1200000, 150000, 0x06, 0x07, 0), + BD_RANGE(675000, 100000, 0x0, 0x3, 0x80), + BD_RANGE(1025000, 50000, 0x04, 0x05, 0x80), + BD_RANGE(1175000, 150000, 0x06, 0x07, 0x80), +}; + +static struct bd71837_vrange bd71837_buck6_vranges[] = { + BD_RANGE(3000000, 100000, 0x00, 0x03, 0), +}; + +static struct bd71837_vrange nodvs_buck3_vranges[] = { + BD_RANGE(1605000, 90000, 0, 1, 0), + BD_RANGE(1755000, 45000, 2, 4, 0), + BD_RANGE(1905000, 45000, 5, 7, 0), +}; + +static struct bd71837_vrange nodvs_buck4_vranges[] = { + BD_RANGE(800000, 10000, 0x00, 0x3C, 0), +}; + +static struct bd71837_vrange ldo1_vranges[] = { + BD_RANGE(3000000, 100000, 0x00, 0x03, 0), + BD_RANGE(1600000, 100000, 0x00, 0x03, 0x20), +}; + +static struct bd71837_vrange ldo2_vranges[] = { + BD_RANGE(900000, 0, 0, 0, 0), + BD_RANGE(800000, 0, 1, 1, 0), +}; + +static struct bd71837_vrange ldo3_vranges[] = { + BD_RANGE(1800000, 100000, 0x00, 0x0f, 0), +}; + +static struct bd71837_vrange ldo4_vranges[] = { + BD_RANGE(900000, 100000, 0x00, 0x09, 0), +}; + +static struct bd71837_vrange bd71837_ldo5_vranges[] = { + BD_RANGE(1800000, 100000, 0x00, 0x0f, 0), +}; + +static struct bd71837_vrange bd71847_ldo5_vranges[] = { + BD_RANGE(1800000, 100000, 0x00, 0x0f, 0), + BD_RANGE(800000, 100000, 0x00, 0x0f, 0x20), +}; + +static struct bd71837_vrange ldo6_vranges[] = { + BD_RANGE(900000, 100000, 0x00, 0x09, 0), +}; + +static struct bd71837_vrange ldo7_vranges[] = { + BD_RANGE(1800000, 100000, 0x00, 0x0f, 0), +}; + +/* + * We use enable mask 'HW_STATE_CONTROL' to indicate that this regulator + * must not be enabled or disabled by SW. The typical use-case for BD71837 + * is powering NXP i.MX8. In this use-case we (for now) only allow control + * for BUCK3 and BUCK4 which are not boot critical. + */ +static struct bd71837_platdata bd71837_reg_data[] = { +/* Bucks 1-4 which support dynamic voltage scaling */ + BD_DATA("BUCK1", BD718XX_BUCK1_CTRL, HW_STATE_CONTROL, + BD718XX_BUCK1_VOLT_RUN, DVS_BUCK_RUN_MASK, dvs_buck_vranges, 0, + true, BD718XX_BUCK_SEL), + BD_DATA("BUCK2", BD718XX_BUCK2_CTRL, HW_STATE_CONTROL, + BD718XX_BUCK2_VOLT_RUN, DVS_BUCK_RUN_MASK, dvs_buck_vranges, 0, + true, BD718XX_BUCK_SEL), + BD_DATA("BUCK3", BD71837_BUCK3_CTRL, BD718XX_BUCK_EN, + BD71837_BUCK3_VOLT_RUN, DVS_BUCK_RUN_MASK, dvs_buck_vranges, 0, + true, BD718XX_BUCK_SEL), + BD_DATA("BUCK4", BD71837_BUCK4_CTRL, BD718XX_BUCK_EN, + BD71837_BUCK4_VOLT_RUN, DVS_BUCK_RUN_MASK, dvs_buck_vranges, 0, + true, BD718XX_BUCK_SEL), +/* Bucks 5-8 which do not support dynamic voltage scaling */ + BD_DATA("BUCK5", BD718XX_1ST_NODVS_BUCK_CTRL, HW_STATE_CONTROL, + BD718XX_1ST_NODVS_BUCK_VOLT, BD718XX_1ST_NODVS_BUCK_MASK, + bd71837_buck5_vranges, 0x80, false, BD718XX_BUCK_SEL), + BD_DATA("BUCK6", BD718XX_2ND_NODVS_BUCK_CTRL, HW_STATE_CONTROL, + BD718XX_2ND_NODVS_BUCK_VOLT, BD71837_BUCK6_MASK, + bd71837_buck6_vranges, 0, false, BD718XX_BUCK_SEL), + BD_DATA("BUCK7", BD718XX_3RD_NODVS_BUCK_CTRL, HW_STATE_CONTROL, + BD718XX_3RD_NODVS_BUCK_VOLT, BD718XX_3RD_NODVS_BUCK_MASK, + nodvs_buck3_vranges, 0, false, BD718XX_BUCK_SEL), + BD_DATA("BUCK8", BD718XX_4TH_NODVS_BUCK_CTRL, HW_STATE_CONTROL, + BD718XX_4TH_NODVS_BUCK_VOLT, BD718XX_4TH_NODVS_BUCK_MASK, + nodvs_buck4_vranges, 0, false, BD718XX_BUCK_SEL), +/* LDOs */ + BD_DATA("LDO1", BD718XX_LDO1_VOLT, HW_STATE_CONTROL, BD718XX_LDO1_VOLT, + BD718XX_LDO1_MASK, ldo1_vranges, 0x20, false, BD718XX_LDO_SEL), + BD_DATA("LDO2", BD718XX_LDO2_VOLT, HW_STATE_CONTROL, BD718XX_LDO2_VOLT, + BD718XX_LDO2_MASK, ldo2_vranges, 0, false, BD718XX_LDO_SEL), + BD_DATA("LDO3", BD718XX_LDO3_VOLT, HW_STATE_CONTROL, BD718XX_LDO3_VOLT, + BD718XX_LDO3_MASK, ldo3_vranges, 0, false, BD718XX_LDO_SEL), + BD_DATA("LDO4", BD718XX_LDO4_VOLT, HW_STATE_CONTROL, BD718XX_LDO4_VOLT, + BD718XX_LDO4_MASK, ldo4_vranges, 0, false, BD718XX_LDO_SEL), + BD_DATA("LDO5", BD718XX_LDO5_VOLT, HW_STATE_CONTROL, BD718XX_LDO5_VOLT, + BD71837_LDO5_MASK, bd71837_ldo5_vranges, 0, false, + BD718XX_LDO_SEL), + BD_DATA("LDO6", BD718XX_LDO6_VOLT, HW_STATE_CONTROL, BD718XX_LDO6_VOLT, + BD718XX_LDO6_MASK, ldo6_vranges, 0, false, BD718XX_LDO_SEL), + BD_DATA("LDO7", BD71837_LDO7_VOLT, HW_STATE_CONTROL, BD71837_LDO7_VOLT, + BD71837_LDO7_MASK, ldo7_vranges, 0, false, BD718XX_LDO_SEL), +}; + +static struct bd71837_platdata bd71847_reg_data[] = { +/* Bucks 1 and 2 which support dynamic voltage scaling */ + BD_DATA("BUCK1", BD718XX_BUCK1_CTRL, HW_STATE_CONTROL, + BD718XX_BUCK1_VOLT_RUN, DVS_BUCK_RUN_MASK, dvs_buck_vranges, 0, + true, BD718XX_BUCK_SEL), + BD_DATA("BUCK2", BD718XX_BUCK2_CTRL, HW_STATE_CONTROL, + BD718XX_BUCK2_VOLT_RUN, DVS_BUCK_RUN_MASK, dvs_buck_vranges, 0, + true, BD718XX_BUCK_SEL), +/* Bucks 3-6 which do not support dynamic voltage scaling */ + BD_DATA("BUCK3", BD718XX_1ST_NODVS_BUCK_CTRL, HW_STATE_CONTROL, + BD718XX_1ST_NODVS_BUCK_VOLT, BD718XX_1ST_NODVS_BUCK_MASK, + bd71847_buck3_vranges, 0xc0, false, BD718XX_BUCK_SEL), + BD_DATA("BUCK4", BD718XX_2ND_NODVS_BUCK_CTRL, HW_STATE_CONTROL, + BD718XX_2ND_NODVS_BUCK_VOLT, BD71837_BUCK6_MASK, + bd71847_buck4_vranges, 0x40, false, BD718XX_BUCK_SEL), + BD_DATA("BUCK5", BD718XX_3RD_NODVS_BUCK_CTRL, HW_STATE_CONTROL, + BD718XX_3RD_NODVS_BUCK_VOLT, BD718XX_3RD_NODVS_BUCK_MASK, + nodvs_buck3_vranges, 0, false, BD718XX_BUCK_SEL), + BD_DATA("BUCK6", BD718XX_4TH_NODVS_BUCK_CTRL, HW_STATE_CONTROL, + BD718XX_4TH_NODVS_BUCK_VOLT, BD718XX_4TH_NODVS_BUCK_MASK, + nodvs_buck4_vranges, 0, false, BD718XX_BUCK_SEL), +/* LDOs */ + BD_DATA("LDO1", BD718XX_LDO1_VOLT, HW_STATE_CONTROL, BD718XX_LDO1_VOLT, + BD718XX_LDO1_MASK, ldo1_vranges, 0x20, false, BD718XX_LDO_SEL), + BD_DATA("LDO2", BD718XX_LDO2_VOLT, HW_STATE_CONTROL, BD718XX_LDO2_VOLT, + BD718XX_LDO2_MASK, ldo2_vranges, 0, false, BD718XX_LDO_SEL), + BD_DATA("LDO3", BD718XX_LDO3_VOLT, HW_STATE_CONTROL, BD718XX_LDO3_VOLT, + BD718XX_LDO3_MASK, ldo3_vranges, 0, false, BD718XX_LDO_SEL), + BD_DATA("LDO4", BD718XX_LDO4_VOLT, HW_STATE_CONTROL, BD718XX_LDO4_VOLT, + BD718XX_LDO4_MASK, ldo4_vranges, 0, false, BD718XX_LDO_SEL), + BD_DATA("LDO5", BD718XX_LDO5_VOLT, HW_STATE_CONTROL, BD718XX_LDO5_VOLT, + BD71847_LDO5_MASK, bd71847_ldo5_vranges, 0x20, false, + BD718XX_LDO_SEL), + BD_DATA("LDO6", BD718XX_LDO6_VOLT, HW_STATE_CONTROL, BD718XX_LDO6_VOLT, + BD718XX_LDO6_MASK, ldo6_vranges, 0, false, BD718XX_LDO_SEL), +}; + +static int vrange_find_value(struct bd71837_vrange *r, unsigned int sel, + unsigned int *val) +{ + if (!val || sel < r->min_sel || sel > r->max_sel) + return -EINVAL; + + *val = r->min_volt + r->step * (sel - r->min_sel); + return 0; +} + +static int vrange_find_selector(struct bd71837_vrange *r, int val, + unsigned int *sel) +{ + int ret = -EINVAL; + int num_vals = r->max_sel - r->min_sel + 1; + + if (val >= r->min_volt && + val <= r->min_volt + r->step * (num_vals - 1)) { + if (r->step) { + *sel = r->min_sel + ((val - r->min_volt) / r->step); + ret = 0; + } else { + *sel = r->min_sel; + ret = 0; + } + } + return ret; +} + +static int bd71837_get_enable(struct udevice *dev) +{ + int val; + struct bd71837_platdata *plat = dev_get_platdata(dev); + + /* + * boot critical regulators on bd71837 must not be controlled by sw + * due to the 'feature' which leaves power rails down if bd71837 is + * reseted to snvs state. hence we can't get the state here. + * + * if we are alive it means we probably are on run state and + * if the regulator can't be controlled we can assume it is + * enabled. + */ + if (plat->enablemask == HW_STATE_CONTROL) + return 1; + + val = pmic_reg_read(dev->parent, plat->enable_reg); + if (val < 0) + return val; + + return (val & plat->enablemask); +} + +static int bd71837_set_enable(struct udevice *dev, bool enable) +{ + int val = 0; + struct bd71837_platdata *plat = dev_get_platdata(dev); + + /* + * boot critical regulators on bd71837 must not be controlled by sw + * due to the 'feature' which leaves power rails down if bd71837 is + * reseted to snvs state. Hence we can't set the state here. + */ + if (plat->enablemask == HW_STATE_CONTROL) + return -EINVAL; + + if (enable) + val = plat->enablemask; + + return pmic_clrsetbits(dev->parent, plat->enable_reg, plat->enablemask, + val); +} + +static int bd71837_set_value(struct udevice *dev, int uvolt) +{ + unsigned int sel; + unsigned int range; + int i; + int found = 0; + struct bd71837_platdata *plat = dev_get_platdata(dev); + + /* + * An under/overshooting may occur if voltage is changed for other + * regulators but buck 1,2,3 or 4 when regulator is enabled. Prevent + * change to protect the HW + */ + if (!plat->dvs) + if (bd71837_get_enable(dev)) { + pr_err("Only DVS bucks can be changed when enabled\n"); + return -EINVAL; + } + + for (i = 0; i < plat->numranges; i++) { + struct bd71837_vrange *r = &plat->ranges[i]; + + found = !vrange_find_selector(r, uvolt, &sel); + if (found) { + unsigned int tmp; + + /* + * We require exactly the requested value to be + * supported - this can be changed later if needed + */ + range = r->rangeval; + found = !vrange_find_value(r, sel, &tmp); + if (found && tmp == uvolt) + break; + found = 0; + } + } + + if (!found) + return -EINVAL; + + sel <<= ffs(plat->volt_mask) - 1; + + if (plat->rangemask) + sel |= range; + + return pmic_clrsetbits(dev->parent, plat->volt_reg, plat->volt_mask | + plat->rangemask, sel); +} + +static int bd71837_get_value(struct udevice *dev) +{ + unsigned int reg, range; + unsigned int tmp; + struct bd71837_platdata *plat = dev_get_platdata(dev); + int i; + + reg = pmic_reg_read(dev->parent, plat->volt_reg); + if (((int)reg) < 0) + return reg; + + range = reg & plat->rangemask; + + reg &= plat->volt_mask; + reg >>= ffs(plat->volt_mask) - 1; + + for (i = 0; i < plat->numranges; i++) { + struct bd71837_vrange *r = &plat->ranges[i]; + + if (plat->rangemask && ((plat->rangemask & range) != + r->rangeval)) + continue; + + if (!vrange_find_value(r, reg, &tmp)) + return tmp; + } + + pr_err("Unknown voltage value read from pmic\n"); + + return -EINVAL; +} + +static int bd71837_regulator_probe(struct udevice *dev) +{ + struct bd71837_platdata *plat = dev_get_platdata(dev); + int i, ret; + struct dm_regulator_uclass_platdata *uc_pdata; + int type; + struct bd71837_platdata *init_data; + int data_amnt; + + type = dev_get_driver_data(dev_get_parent(dev)); + + switch (type) { + case ROHM_CHIP_TYPE_BD71837: + init_data = bd71837_reg_data; + data_amnt = ARRAY_SIZE(bd71837_reg_data); + break; + case ROHM_CHIP_TYPE_BD71847: + init_data = bd71847_reg_data; + data_amnt = ARRAY_SIZE(bd71847_reg_data); + break; + default: + debug("Unknown PMIC type\n"); + init_data = NULL; + data_amnt = 0; + break; + } + + for (i = 0; i < data_amnt; i++) { + if (!strcmp(dev->name, init_data[i].name)) { + *plat = init_data[i]; + if (plat->enablemask != HW_STATE_CONTROL) { + /* + * Take the regulator under SW control. Ensure + * the initial state matches dt flags and then + * write the SEL bit + */ + uc_pdata = dev_get_uclass_platdata(dev); + ret = bd71837_set_enable(dev, + !!(uc_pdata->boot_on || + uc_pdata->always_on)); + if (ret) + return ret; + + return pmic_clrsetbits(dev->parent, + plat->enable_reg, + plat->sel_mask, + plat->sel_mask); + } + return 0; + } + } + + pr_err("Unknown regulator '%s'\n", dev->name); + + return -ENOENT; +} + +static const struct dm_regulator_ops bd71837_regulator_ops = { + .get_value = bd71837_get_value, + .set_value = bd71837_set_value, + .get_enable = bd71837_get_enable, + .set_enable = bd71837_set_enable, +}; + +U_BOOT_DRIVER(bd71837_regulator) = { + .name = BD718XX_REGULATOR_DRIVER, + .id = UCLASS_REGULATOR, + .ops = &bd71837_regulator_ops, + .probe = bd71837_regulator_probe, + .platdata_auto_alloc_size = sizeof(struct bd71837_platdata), +}; diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig index 2984b79766..1f36fc78fa 100644 --- a/drivers/pwm/Kconfig +++ b/drivers/pwm/Kconfig @@ -18,6 +18,11 @@ config PWM_EXYNOS used. It provides 5 channels which can be independently programmed. Channel 4 (the last) is normally used as a timer. +config PWM_IMX + bool "Enable support for i.MX27 and later PWM" + help + This PWM is found i.MX27 and later i.MX SoCs. + config PWM_ROCKCHIP bool "Enable support for the Rockchip PWM" depends on DM_PWM diff --git a/drivers/pwm/pwm-imx.c b/drivers/pwm/pwm-imx.c index 83c1bfa820..8d8f3e6f9f 100644 --- a/drivers/pwm/pwm-imx.c +++ b/drivers/pwm/pwm-imx.c @@ -8,6 +8,7 @@ #include <common.h> #include <div64.h> +#include <dm.h> #include <pwm.h> #include <asm/arch/imx-regs.h> #include <asm/io.h> @@ -24,18 +25,12 @@ int pwm_init(int pwm_id, int div, int invert) return 0; } -int pwm_config(int pwm_id, int duty_ns, int period_ns) +int pwm_config_internal(struct pwm_regs *pwm, unsigned long period_cycles, + unsigned long duty_cycles, unsigned long prescale) { - struct pwm_regs *pwm = (struct pwm_regs *)pwm_id_to_reg(pwm_id); - unsigned long period_cycles, duty_cycles, prescale; u32 cr; - if (!pwm) - return -1; - - pwm_imx_get_parms(period_ns, duty_ns, &period_cycles, &duty_cycles, - &prescale); - + writel(0, &pwm->ir); cr = PWMCR_PRESCALER(prescale) | PWMCR_DOZEEN | PWMCR_WAITEN | PWMCR_DBGEN | PWMCR_CLKSRC_IPG_HIGH; @@ -48,6 +43,20 @@ int pwm_config(int pwm_id, int duty_ns, int period_ns) return 0; } +int pwm_config(int pwm_id, int duty_ns, int period_ns) +{ + struct pwm_regs *pwm = (struct pwm_regs *)pwm_id_to_reg(pwm_id); + unsigned long period_cycles, duty_cycles, prescale; + + if (!pwm) + return -1; + + pwm_imx_get_parms(period_ns, duty_ns, &period_cycles, &duty_cycles, + &prescale); + + return pwm_config_internal(pwm, period_cycles, duty_cycles, prescale); +} + int pwm_enable(int pwm_id) { struct pwm_regs *pwm = (struct pwm_regs *)pwm_id_to_reg(pwm_id); @@ -68,3 +77,86 @@ void pwm_disable(int pwm_id) clrbits_le32(&pwm->cr, PWMCR_EN); } + +#if defined(CONFIG_DM_PWM) +struct imx_pwm_priv { + struct pwm_regs *regs; + bool invert; +}; + +static int imx_pwm_set_invert(struct udevice *dev, uint channel, + bool polarity) +{ + struct imx_pwm_priv *priv = dev_get_priv(dev); + + debug("%s: polarity=%u\n", __func__, polarity); + priv->invert = polarity; + + return 0; +} + +static int imx_pwm_set_config(struct udevice *dev, uint channel, + uint period_ns, uint duty_ns) +{ + struct imx_pwm_priv *priv = dev_get_priv(dev); + struct pwm_regs *regs = priv->regs; + unsigned long period_cycles, duty_cycles, prescale; + + debug("%s: Config '%s' channel: %d\n", __func__, dev->name, channel); + + pwm_imx_get_parms(period_ns, duty_ns, &period_cycles, &duty_cycles, + &prescale); + + return pwm_config_internal(regs, period_cycles, duty_cycles, prescale); +}; + +static int imx_pwm_set_enable(struct udevice *dev, uint channel, bool enable) +{ + struct imx_pwm_priv *priv = dev_get_priv(dev); + struct pwm_regs *regs = priv->regs; + + debug("%s: Enable '%s' state: %d\n", __func__, dev->name, enable); + + if (enable) + setbits_le32(®s->cr, PWMCR_EN); + else + clrbits_le32(®s->cr, PWMCR_EN); + + return 0; +}; + +static int imx_pwm_ofdata_to_platdata(struct udevice *dev) +{ + struct imx_pwm_priv *priv = dev_get_priv(dev); + + priv->regs = (struct pwm_regs *)devfdt_get_addr(dev); + + return 0; +} + +static int imx_pwm_probe(struct udevice *dev) +{ + return 0; +} + +static const struct pwm_ops imx_pwm_ops = { + .set_invert = imx_pwm_set_invert, + .set_config = imx_pwm_set_config, + .set_enable = imx_pwm_set_enable, +}; + +static const struct udevice_id imx_pwm_ids[] = { + { .compatible = "fsl,imx27-pwm" }, + { } +}; + +U_BOOT_DRIVER(imx_pwm) = { + .name = "imx_pwm", + .id = UCLASS_PWM, + .of_match = imx_pwm_ids, + .ops = &imx_pwm_ops, + .ofdata_to_platdata = imx_pwm_ofdata_to_platdata, + .probe = imx_pwm_probe, + .priv_auto_alloc_size = sizeof(struct imx_pwm_priv), +}; +#endif diff --git a/drivers/ram/Kconfig b/drivers/ram/Kconfig index fbf7d7b20f..568d8f2c6a 100644 --- a/drivers/ram/Kconfig +++ b/drivers/ram/Kconfig @@ -54,4 +54,5 @@ config K3_AM654_DDRSS config add support for the initialization of the external SDRAM devices connected to DDR subsystem. +source "drivers/ram/rockchip/Kconfig" source "drivers/ram/stm32mp1/Kconfig" diff --git a/drivers/ram/rockchip/Kconfig b/drivers/ram/rockchip/Kconfig new file mode 100644 index 0000000000..4f274e01b3 --- /dev/null +++ b/drivers/ram/rockchip/Kconfig @@ -0,0 +1,33 @@ +config RAM_ROCKCHIP + bool "Ram drivers support for Rockchip SoCs" + depends on RAM && ARCH_ROCKCHIP + default y + help + This enables support for ram drivers Rockchip SoCs. + +if RAM_ROCKCHIP + +config RAM_ROCKCHIP_DEBUG + bool "Rockchip ram drivers debugging" + help + This enables debugging ram driver API's for the platforms + based on Rockchip SoCs. + + This is an option for developers to understand the ram drivers + initialization, configurations and etc. + +config RAM_RK3399 + bool "Ram driver for Rockchip RK3399" + default ROCKCHIP_RK3399 + help + This enables ram drivers support for the platforms based on + Rockchip RK3399 SoC. + +config RAM_RK3399_LPDDR4 + bool "LPDDR4 support for Rockchip RK3399" + depends on RAM_RK3399 + help + This enables LPDDR4 sdram code support for the platforms based + on Rockchip RK3399 SoC. + +endif # RAM_ROCKCHIP diff --git a/drivers/ram/rockchip/Makefile b/drivers/ram/rockchip/Makefile index 5df196066d..feb1f82d00 100644 --- a/drivers/ram/rockchip/Makefile +++ b/drivers/ram/rockchip/Makefile @@ -3,10 +3,11 @@ # Copyright (c) 2017 Theobroma Systems Design und Consulting GmbH # +obj-$(CONFIG_RAM_ROCKCHIP_DEBUG) += sdram_debug.o obj-$(CONFIG_ROCKCHIP_RK3368) = dmc-rk3368.o obj-$(CONFIG_ROCKCHIP_RK3128) = sdram_rk3128.o obj-$(CONFIG_ROCKCHIP_RK3188) = sdram_rk3188.o obj-$(CONFIG_ROCKCHIP_RK322X) = sdram_rk322x.o obj-$(CONFIG_ROCKCHIP_RK3288) = sdram_rk3288.o obj-$(CONFIG_ROCKCHIP_RK3328) = sdram_rk3328.o -obj-$(CONFIG_ROCKCHIP_RK3399) = sdram_rk3399.o +obj-$(CONFIG_RAM_RK3399) += sdram_rk3399.o diff --git a/drivers/ram/rockchip/sdram-rk3399-lpddr4-400.inc b/drivers/ram/rockchip/sdram-rk3399-lpddr4-400.inc new file mode 100644 index 0000000000..c50a03d9dd --- /dev/null +++ b/drivers/ram/rockchip/sdram-rk3399-lpddr4-400.inc @@ -0,0 +1,1570 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * (C) Copyright 2019 Rockchip Electronics Co., Ltd. + * (C) Copyright 2019 Amarula Solutions + */ + +{ + { + { + { + .rank = 0x2, + .col = 0xA, + .bk = 0x3, + .bw = 0x2, + .dbw = 0x1, + .row_3_4 = 0x0, + .cs0_row = 0xF, + .cs1_row = 0xF, + .ddrconfig = 1, + }, + { + .ddrtiminga0 = 0x80241d22, + .ddrtimingb0 = 0x15050f08, + .ddrtimingc0 = { + 0x00000602, + }, + .devtodev0 = 0x00002122, + .ddrmode = { + 0x0000004c, + }, + .agingx0 = 0x00000000, + } + }, + { + { + .rank = 0x2, + .col = 0xA, + .bk = 0x3, + .bw = 0x2, + .dbw = 0x1, + .row_3_4 = 0x0, + .cs0_row = 0xF, + .cs1_row = 0xF, + .ddrconfig = 1, + }, + { + .ddrtiminga0 = 0x80241d22, + .ddrtimingb0 = 0x15050f08, + .ddrtimingc0 = { + 0x00000602, + }, + .devtodev0 = 0x00002122, + .ddrmode = { + 0x0000004c, + }, + .agingx0 = 0x00000000, + } + } + }, + { + .ddr_freq = 400 * MHz, + .dramtype = LPDDR4, + .num_channels = 2, + .stride = 13, + .odt = 1, + }, + { + { + 0x00000b00, /* DENALI_CTL_00_DATA */ + 0x00000000, /* DENALI_CTL_01_DATA */ + 0x00000000, /* DENALI_CTL_02_DATA */ + 0x00000000, /* DENALI_CTL_03_DATA */ + 0x00000000, /* DENALI_CTL_04_DATA */ + 0x00013880, /* DENALI_CTL_05_DATA */ + 0x000c3500, /* DENALI_CTL_06_DATA */ + 0x00000005, /* DENALI_CTL_07_DATA */ + 0x00000320, /* DENALI_CTL_08_DATA */ + 0x00027100, /* DENALI_CTL_09_DATA */ + 0x00186a00, /* DENALI_CTL_10_DATA */ + 0x00000005, /* DENALI_CTL_11_DATA */ + 0x00000640, /* DENALI_CTL_12_DATA */ + 0x00002710, /* DENALI_CTL_13_DATA */ + 0x000186a0, /* DENALI_CTL_14_DATA */ + 0x00000005, /* DENALI_CTL_15_DATA */ + 0x01000064, /* DENALI_CTL_16_DATA */ + 0x00000000, /* DENALI_CTL_17_DATA */ + 0x02020101, /* DENALI_CTL_18_DATA */ + 0x00000102, /* DENALI_CTL_19_DATA */ + 0x00000050, /* DENALI_CTL_20_DATA */ + 0x000000c8, /* DENALI_CTL_21_DATA */ + 0x00000000, /* DENALI_CTL_22_DATA */ + 0x06140000, /* DENALI_CTL_23_DATA */ + 0x00081c00, /* DENALI_CTL_24_DATA */ + 0x0400040c, /* DENALI_CTL_25_DATA */ + 0x19042008, /* DENALI_CTL_26_DATA */ + 0x10080a11, /* DENALI_CTL_27_DATA */ + 0x22310800, /* DENALI_CTL_28_DATA */ + 0x00200f0a, /* DENALI_CTL_29_DATA */ + 0x0a030704, /* DENALI_CTL_30_DATA */ + 0x08000204, /* DENALI_CTL_31_DATA */ + 0x00000a0a, /* DENALI_CTL_32_DATA */ + 0x04006db0, /* DENALI_CTL_33_DATA */ + 0x0a0a0804, /* DENALI_CTL_34_DATA */ + 0x0600db60, /* DENALI_CTL_35_DATA */ + 0x0a0a0806, /* DENALI_CTL_36_DATA */ + 0x04000db6, /* DENALI_CTL_37_DATA */ + 0x02030404, /* DENALI_CTL_38_DATA */ + 0x0f0a0800, /* DENALI_CTL_39_DATA */ + 0x08040411, /* DENALI_CTL_40_DATA */ + 0x1400640a, /* DENALI_CTL_41_DATA */ + 0x02010a0a, /* DENALI_CTL_42_DATA */ + 0x00010001, /* DENALI_CTL_43_DATA */ + 0x04082012, /* DENALI_CTL_44_DATA */ + 0x00041109, /* DENALI_CTL_45_DATA */ + 0x00000000, /* DENALI_CTL_46_DATA */ + 0x03010000, /* DENALI_CTL_47_DATA */ + 0x06100034, /* DENALI_CTL_48_DATA */ + 0x0c280068, /* DENALI_CTL_49_DATA */ + 0x00bb0007, /* DENALI_CTL_50_DATA */ + 0x00000000, /* DENALI_CTL_51_DATA */ + 0x00060003, /* DENALI_CTL_52_DATA */ + 0x000a0003, /* DENALI_CTL_53_DATA */ + 0x000a0014, /* DENALI_CTL_54_DATA */ + 0x01000000, /* DENALI_CTL_55_DATA */ + 0x030a0000, /* DENALI_CTL_56_DATA */ + 0x0c000002, /* DENALI_CTL_57_DATA */ + 0x00000103, /* DENALI_CTL_58_DATA */ + 0x0003030a, /* DENALI_CTL_59_DATA */ + 0x00060037, /* DENALI_CTL_60_DATA */ + 0x0003006e, /* DENALI_CTL_61_DATA */ + 0x05050007, /* DENALI_CTL_62_DATA */ + 0x03020605, /* DENALI_CTL_63_DATA */ + 0x06050301, /* DENALI_CTL_64_DATA */ + 0x06020c05, /* DENALI_CTL_65_DATA */ + 0x05050302, /* DENALI_CTL_66_DATA */ + 0x03020305, /* DENALI_CTL_67_DATA */ + 0x00000301, /* DENALI_CTL_68_DATA */ + 0x00000301, /* DENALI_CTL_69_DATA */ + 0x00000001, /* DENALI_CTL_70_DATA */ + 0x00000000, /* DENALI_CTL_71_DATA */ + 0x00000000, /* DENALI_CTL_72_DATA */ + 0x01000000, /* DENALI_CTL_73_DATA */ + 0x80104002, /* DENALI_CTL_74_DATA */ + 0x00040003, /* DENALI_CTL_75_DATA */ + 0x00040005, /* DENALI_CTL_76_DATA */ + 0x00030000, /* DENALI_CTL_77_DATA */ + 0x00050004, /* DENALI_CTL_78_DATA */ + 0x00000004, /* DENALI_CTL_79_DATA */ + 0x00040003, /* DENALI_CTL_80_DATA */ + 0x00040005, /* DENALI_CTL_81_DATA */ + 0x18400000, /* DENALI_CTL_82_DATA */ + 0x00000c20, /* DENALI_CTL_83_DATA */ + 0x185030a0, /* DENALI_CTL_84_DATA */ + 0x02ec0000, /* DENALI_CTL_85_DATA */ + 0x00000176, /* DENALI_CTL_86_DATA */ + 0x00000000, /* DENALI_CTL_87_DATA */ + 0x00000000, /* DENALI_CTL_88_DATA */ + 0x00000000, /* DENALI_CTL_89_DATA */ + 0x00000000, /* DENALI_CTL_90_DATA */ + 0x00000000, /* DENALI_CTL_91_DATA */ + 0x06030300, /* DENALI_CTL_92_DATA */ + 0x00030303, /* DENALI_CTL_93_DATA */ + 0x02030200, /* DENALI_CTL_94_DATA */ + 0x00040703, /* DENALI_CTL_95_DATA */ + 0x03020302, /* DENALI_CTL_96_DATA */ + 0x02000407, /* DENALI_CTL_97_DATA */ + 0x07030203, /* DENALI_CTL_98_DATA */ + 0x00030f04, /* DENALI_CTL_99_DATA */ + 0x00070004, /* DENALI_CTL_100_DATA */ + 0x00000000, /* DENALI_CTL_101_DATA */ + 0x00000000, /* DENALI_CTL_102_DATA */ + 0x00000000, /* DENALI_CTL_103_DATA */ + 0x00000000, /* DENALI_CTL_104_DATA */ + 0x00000000, /* DENALI_CTL_105_DATA */ + 0x00000000, /* DENALI_CTL_106_DATA */ + 0x00000000, /* DENALI_CTL_107_DATA */ + 0x00010000, /* DENALI_CTL_108_DATA */ + 0x20040020, /* DENALI_CTL_109_DATA */ + 0x00200400, /* DENALI_CTL_110_DATA */ + 0x01000400, /* DENALI_CTL_111_DATA */ + 0x00000b80, /* DENALI_CTL_112_DATA */ + 0x00000000, /* DENALI_CTL_113_DATA */ + 0x00000001, /* DENALI_CTL_114_DATA */ + 0x00000002, /* DENALI_CTL_115_DATA */ + 0x0000000e, /* DENALI_CTL_116_DATA */ + 0x00000000, /* DENALI_CTL_117_DATA */ + 0x00000000, /* DENALI_CTL_118_DATA */ + 0x00000000, /* DENALI_CTL_119_DATA */ + 0x00000000, /* DENALI_CTL_120_DATA */ + 0x00000000, /* DENALI_CTL_121_DATA */ + 0x00500000, /* DENALI_CTL_122_DATA */ + 0x00640028, /* DENALI_CTL_123_DATA */ + 0x00640404, /* DENALI_CTL_124_DATA */ + 0x005000a0, /* DENALI_CTL_125_DATA */ + 0x060600c8, /* DENALI_CTL_126_DATA */ + 0x000a00c8, /* DENALI_CTL_127_DATA */ + 0x000d0005, /* DENALI_CTL_128_DATA */ + 0x000d0404, /* DENALI_CTL_129_DATA */ + 0x00000000, /* DENALI_CTL_130_DATA */ + 0x00000000, /* DENALI_CTL_131_DATA */ + 0x00000000, /* DENALI_CTL_132_DATA */ + 0x001400a3, /* DENALI_CTL_133_DATA */ + 0x00e30009, /* DENALI_CTL_134_DATA */ + 0x00120024, /* DENALI_CTL_135_DATA */ + 0x00040063, /* DENALI_CTL_136_DATA */ + 0x00000000, /* DENALI_CTL_137_DATA */ + 0x00310031, /* DENALI_CTL_138_DATA */ + 0x00000031, /* DENALI_CTL_139_DATA */ + 0x004d0000, /* DENALI_CTL_140_DATA */ + 0x004d004d, /* DENALI_CTL_141_DATA */ + 0x004d0000, /* DENALI_CTL_142_DATA */ + 0x004d004d, /* DENALI_CTL_143_DATA */ + 0x00010101, /* DENALI_CTL_144_DATA */ + 0x00000000, /* DENALI_CTL_145_DATA */ + 0x00000000, /* DENALI_CTL_146_DATA */ + 0x001400a3, /* DENALI_CTL_147_DATA */ + 0x00e30009, /* DENALI_CTL_148_DATA */ + 0x00120024, /* DENALI_CTL_149_DATA */ + 0x00040063, /* DENALI_CTL_150_DATA */ + 0x00000000, /* DENALI_CTL_151_DATA */ + 0x00310031, /* DENALI_CTL_152_DATA */ + 0x00000031, /* DENALI_CTL_153_DATA */ + 0x004d0000, /* DENALI_CTL_154_DATA */ + 0x004d004d, /* DENALI_CTL_155_DATA */ + 0x004d0000, /* DENALI_CTL_156_DATA */ + 0x004d004d, /* DENALI_CTL_157_DATA */ + 0x00010101, /* DENALI_CTL_158_DATA */ + 0x00000000, /* DENALI_CTL_159_DATA */ + 0x00000000, /* DENALI_CTL_160_DATA */ + 0x00000000, /* DENALI_CTL_161_DATA */ + 0x00000001, /* DENALI_CTL_162_DATA */ + 0x00000000, /* DENALI_CTL_163_DATA */ + 0x18151100, /* DENALI_CTL_164_DATA */ + 0x0000000c, /* DENALI_CTL_165_DATA */ + 0x00000000, /* DENALI_CTL_166_DATA */ + 0x00000000, /* DENALI_CTL_167_DATA */ + 0x00000000, /* DENALI_CTL_168_DATA */ + 0x00000000, /* DENALI_CTL_169_DATA */ + 0x00000000, /* DENALI_CTL_170_DATA */ + 0x00000000, /* DENALI_CTL_171_DATA */ + 0x00000000, /* DENALI_CTL_172_DATA */ + 0x00000000, /* DENALI_CTL_173_DATA */ + 0x00000000, /* DENALI_CTL_174_DATA */ + 0x00000000, /* DENALI_CTL_175_DATA */ + 0x00000000, /* DENALI_CTL_176_DATA */ + 0x00000000, /* DENALI_CTL_177_DATA */ + 0x00000000, /* DENALI_CTL_178_DATA */ + 0x00020003, /* DENALI_CTL_179_DATA */ + 0x00400100, /* DENALI_CTL_180_DATA */ + 0x000c0190, /* DENALI_CTL_181_DATA */ + 0x01000200, /* DENALI_CTL_182_DATA */ + 0x03200040, /* DENALI_CTL_183_DATA */ + 0x00020018, /* DENALI_CTL_184_DATA */ + 0x00400100, /* DENALI_CTL_185_DATA */ + 0x00080032, /* DENALI_CTL_186_DATA */ + 0x00140000, /* DENALI_CTL_187_DATA */ + 0x00030028, /* DENALI_CTL_188_DATA */ + 0x01010100, /* DENALI_CTL_189_DATA */ + 0x02000202, /* DENALI_CTL_190_DATA */ + 0x0b000002, /* DENALI_CTL_191_DATA */ + 0x01000f0f, /* DENALI_CTL_192_DATA */ + 0x00000000, /* DENALI_CTL_193_DATA */ + 0x00000000, /* DENALI_CTL_194_DATA */ + 0x00010003, /* DENALI_CTL_195_DATA */ + 0x00000c03, /* DENALI_CTL_196_DATA */ + 0x00040101, /* DENALI_CTL_197_DATA */ + 0x04010100, /* DENALI_CTL_198_DATA */ + 0x01000000, /* DENALI_CTL_199_DATA */ + 0x02010000, /* DENALI_CTL_200_DATA */ + 0x00000001, /* DENALI_CTL_201_DATA */ + 0x00000000, /* DENALI_CTL_202_DATA */ + 0x00000000, /* DENALI_CTL_203_DATA */ + 0x00000000, /* DENALI_CTL_204_DATA */ + 0x00000000, /* DENALI_CTL_205_DATA */ + 0x00000000, /* DENALI_CTL_206_DATA */ + 0x00000000, /* DENALI_CTL_207_DATA */ + 0x00000000, /* DENALI_CTL_208_DATA */ + 0x00000000, /* DENALI_CTL_209_DATA */ + 0x00000000, /* DENALI_CTL_210_DATA */ + 0x00010000, /* DENALI_CTL_211_DATA */ + 0x00000001, /* DENALI_CTL_212_DATA */ + 0x01010001, /* DENALI_CTL_213_DATA */ + 0x05040001, /* DENALI_CTL_214_DATA */ + 0x040a0703, /* DENALI_CTL_215_DATA */ + 0x02080808, /* DENALI_CTL_216_DATA */ + 0x020e000a, /* DENALI_CTL_217_DATA */ + 0x020f010b, /* DENALI_CTL_218_DATA */ + 0x000d0008, /* DENALI_CTL_219_DATA */ + 0x00080b0a, /* DENALI_CTL_220_DATA */ + 0x03000200, /* DENALI_CTL_221_DATA */ + 0x00000100, /* DENALI_CTL_222_DATA */ + 0x00000000, /* DENALI_CTL_223_DATA */ + 0x00000000, /* DENALI_CTL_224_DATA */ + 0x0d000001, /* DENALI_CTL_225_DATA */ + 0x00000028, /* DENALI_CTL_226_DATA */ + 0x00010000, /* DENALI_CTL_227_DATA */ + 0x00000003, /* DENALI_CTL_228_DATA */ + 0x00000000, /* DENALI_CTL_229_DATA */ + 0x00000000, /* DENALI_CTL_230_DATA */ + 0x00000000, /* DENALI_CTL_231_DATA */ + 0x00000000, /* DENALI_CTL_232_DATA */ + 0x00000000, /* DENALI_CTL_233_DATA */ + 0x00000000, /* DENALI_CTL_234_DATA */ + 0x00000000, /* DENALI_CTL_235_DATA */ + 0x00000000, /* DENALI_CTL_236_DATA */ + 0x00010100, /* DENALI_CTL_237_DATA */ + 0x01000000, /* DENALI_CTL_238_DATA */ + 0x00000001, /* DENALI_CTL_239_DATA */ + 0x00000303, /* DENALI_CTL_240_DATA */ + 0x00000000, /* DENALI_CTL_241_DATA */ + 0x00000000, /* DENALI_CTL_242_DATA */ + 0x00000000, /* DENALI_CTL_243_DATA */ + 0x00000000, /* DENALI_CTL_244_DATA */ + 0x00000000, /* DENALI_CTL_245_DATA */ + 0x00000000, /* DENALI_CTL_246_DATA */ + 0x00000000, /* DENALI_CTL_247_DATA */ + 0x00000000, /* DENALI_CTL_248_DATA */ + 0x00000000, /* DENALI_CTL_249_DATA */ + 0x00000000, /* DENALI_CTL_250_DATA */ + 0x00000000, /* DENALI_CTL_251_DATA */ + 0x00000000, /* DENALI_CTL_252_DATA */ + 0x00000000, /* DENALI_CTL_253_DATA */ + 0x00000000, /* DENALI_CTL_254_DATA */ + 0x00000000, /* DENALI_CTL_255_DATA */ + 0x000556aa, /* DENALI_CTL_256_DATA */ + 0x000aaaaa, /* DENALI_CTL_257_DATA */ + 0x000aa955, /* DENALI_CTL_258_DATA */ + 0x00055555, /* DENALI_CTL_259_DATA */ + 0x000b3133, /* DENALI_CTL_260_DATA */ + 0x0004cd33, /* DENALI_CTL_261_DATA */ + 0x0004cecc, /* DENALI_CTL_262_DATA */ + 0x000b32cc, /* DENALI_CTL_263_DATA */ + 0x00010300, /* DENALI_CTL_264_DATA */ + 0x03000100, /* DENALI_CTL_265_DATA */ + 0x00000000, /* DENALI_CTL_266_DATA */ + 0x00000000, /* DENALI_CTL_267_DATA */ + 0x00000000, /* DENALI_CTL_268_DATA */ + 0x00000000, /* DENALI_CTL_269_DATA */ + 0x00000000, /* DENALI_CTL_270_DATA */ + 0x00000000, /* DENALI_CTL_271_DATA */ + 0x00000000, /* DENALI_CTL_272_DATA */ + 0x00000000, /* DENALI_CTL_273_DATA */ + 0x00ffff00, /* DENALI_CTL_274_DATA */ + 0x1a160000, /* DENALI_CTL_275_DATA */ + 0x08000012, /* DENALI_CTL_276_DATA */ + 0x00000c20, /* DENALI_CTL_277_DATA */ + 0x00000200, /* DENALI_CTL_278_DATA */ + 0x00000200, /* DENALI_CTL_279_DATA */ + 0x00000200, /* DENALI_CTL_280_DATA */ + 0x00000200, /* DENALI_CTL_281_DATA */ + 0x00000c20, /* DENALI_CTL_282_DATA */ + 0x00007940, /* DENALI_CTL_283_DATA */ + 0x18500409, /* DENALI_CTL_284_DATA */ + 0x00000200, /* DENALI_CTL_285_DATA */ + 0x00000200, /* DENALI_CTL_286_DATA */ + 0x00000200, /* DENALI_CTL_287_DATA */ + 0x00000200, /* DENALI_CTL_288_DATA */ + 0x00001850, /* DENALI_CTL_289_DATA */ + 0x0000f320, /* DENALI_CTL_290_DATA */ + 0x0176060c, /* DENALI_CTL_291_DATA */ + 0x00000200, /* DENALI_CTL_292_DATA */ + 0x00000200, /* DENALI_CTL_293_DATA */ + 0x00000200, /* DENALI_CTL_294_DATA */ + 0x00000200, /* DENALI_CTL_295_DATA */ + 0x00000176, /* DENALI_CTL_296_DATA */ + 0x00000e9c, /* DENALI_CTL_297_DATA */ + 0x02020205, /* DENALI_CTL_298_DATA */ + 0x03030202, /* DENALI_CTL_299_DATA */ + 0x00000018, /* DENALI_CTL_300_DATA */ + 0x00000000, /* DENALI_CTL_301_DATA */ + 0x00000000, /* DENALI_CTL_302_DATA */ + 0x00001403, /* DENALI_CTL_303_DATA */ + 0x00000000, /* DENALI_CTL_304_DATA */ + 0x00000000, /* DENALI_CTL_305_DATA */ + 0x00000000, /* DENALI_CTL_306_DATA */ + 0x00030000, /* DENALI_CTL_307_DATA */ + 0x000a001c, /* DENALI_CTL_308_DATA */ + 0x000e0020, /* DENALI_CTL_309_DATA */ + 0x00060018, /* DENALI_CTL_310_DATA */ + 0x00000000, /* DENALI_CTL_311_DATA */ + 0x00000000, /* DENALI_CTL_312_DATA */ + 0x02000000, /* DENALI_CTL_313_DATA */ + 0x00090305, /* DENALI_CTL_314_DATA */ + 0x00050101, /* DENALI_CTL_315_DATA */ + 0x00000000, /* DENALI_CTL_316_DATA */ + 0x00000000, /* DENALI_CTL_317_DATA */ + 0x00000000, /* DENALI_CTL_318_DATA */ + 0x00000000, /* DENALI_CTL_319_DATA */ + 0x00000000, /* DENALI_CTL_320_DATA */ + 0x00000000, /* DENALI_CTL_321_DATA */ + 0x00000000, /* DENALI_CTL_322_DATA */ + 0x00000000, /* DENALI_CTL_323_DATA */ + 0x01000001, /* DENALI_CTL_324_DATA */ + 0x01010101, /* DENALI_CTL_325_DATA */ + 0x01000101, /* DENALI_CTL_326_DATA */ + 0x01000100, /* DENALI_CTL_327_DATA */ + 0x00010001, /* DENALI_CTL_328_DATA */ + 0x00010002, /* DENALI_CTL_329_DATA */ + 0x00020100, /* DENALI_CTL_330_DATA */ + 0x00000002 /* DENALI_CTL_331_DATA */ + } + }, + { + { + 0x00000b00, /* DENALI_PI_00_DATA */ + 0x00000000, /* DENALI_PI_01_DATA */ + 0x000002ec, /* DENALI_PI_02_DATA */ + 0x00000176, /* DENALI_PI_03_DATA */ + 0x000030a0, /* DENALI_PI_04_DATA */ + 0x00001850, /* DENALI_PI_05_DATA */ + 0x00001840, /* DENALI_PI_06_DATA */ + 0x01760c20, /* DENALI_PI_07_DATA */ + 0x00000200, /* DENALI_PI_08_DATA */ + 0x00000200, /* DENALI_PI_09_DATA */ + 0x00000200, /* DENALI_PI_10_DATA */ + 0x00000200, /* DENALI_PI_11_DATA */ + 0x00001850, /* DENALI_PI_12_DATA */ + 0x00000200, /* DENALI_PI_13_DATA */ + 0x00000200, /* DENALI_PI_14_DATA */ + 0x00000200, /* DENALI_PI_15_DATA */ + 0x00000200, /* DENALI_PI_16_DATA */ + 0x00000c20, /* DENALI_PI_17_DATA */ + 0x00000200, /* DENALI_PI_18_DATA */ + 0x00000200, /* DENALI_PI_19_DATA */ + 0x00000200, /* DENALI_PI_20_DATA */ + 0x00000200, /* DENALI_PI_21_DATA */ + 0x00010000, /* DENALI_PI_22_DATA */ + 0x00000007, /* DENALI_PI_23_DATA */ + 0x01000001, /* DENALI_PI_24_DATA */ + 0x00000000, /* DENALI_PI_25_DATA */ + 0x3fffffff, /* DENALI_PI_26_DATA */ + 0x00000000, /* DENALI_PI_27_DATA */ + 0x00000000, /* DENALI_PI_28_DATA */ + 0x00000000, /* DENALI_PI_29_DATA */ + 0x00000000, /* DENALI_PI_30_DATA */ + 0x00000000, /* DENALI_PI_31_DATA */ + 0x00000000, /* DENALI_PI_32_DATA */ + 0x00000000, /* DENALI_PI_33_DATA */ + 0x00000000, /* DENALI_PI_34_DATA */ + 0x00000000, /* DENALI_PI_35_DATA */ + 0x00000000, /* DENALI_PI_36_DATA */ + 0x00000000, /* DENALI_PI_37_DATA */ + 0x00000000, /* DENALI_PI_38_DATA */ + 0x00000000, /* DENALI_PI_39_DATA */ + 0x00000000, /* DENALI_PI_40_DATA */ + 0x0f000101, /* DENALI_PI_41_DATA */ + 0x082b3223, /* DENALI_PI_42_DATA */ + 0x080c0004, /* DENALI_PI_43_DATA */ + 0x00061c00, /* DENALI_PI_44_DATA */ + 0x00000214, /* DENALI_PI_45_DATA */ + 0x00bb0007, /* DENALI_PI_46_DATA */ + 0x0c280068, /* DENALI_PI_47_DATA */ + 0x06100034, /* DENALI_PI_48_DATA */ + 0x00000500, /* DENALI_PI_49_DATA */ + 0x00000000, /* DENALI_PI_50_DATA */ + 0x00000000, /* DENALI_PI_51_DATA */ + 0x00000000, /* DENALI_PI_52_DATA */ + 0x00000000, /* DENALI_PI_53_DATA */ + 0x00000000, /* DENALI_PI_54_DATA */ + 0x00000000, /* DENALI_PI_55_DATA */ + 0x00000000, /* DENALI_PI_56_DATA */ + 0x00000000, /* DENALI_PI_57_DATA */ + 0x04040100, /* DENALI_PI_58_DATA */ + 0x0a000004, /* DENALI_PI_59_DATA */ + 0x00000128, /* DENALI_PI_60_DATA */ + 0x00000000, /* DENALI_PI_61_DATA */ + 0x0003000f, /* DENALI_PI_62_DATA */ + 0x00000018, /* DENALI_PI_63_DATA */ + 0x00000000, /* DENALI_PI_64_DATA */ + 0x00000000, /* DENALI_PI_65_DATA */ + 0x00060002, /* DENALI_PI_66_DATA */ + 0x00010001, /* DENALI_PI_67_DATA */ + 0x00000101, /* DENALI_PI_68_DATA */ + 0x00020001, /* DENALI_PI_69_DATA */ + 0x00080004, /* DENALI_PI_70_DATA */ + 0x00000000, /* DENALI_PI_71_DATA */ + 0x05030000, /* DENALI_PI_72_DATA */ + 0x070a0404, /* DENALI_PI_73_DATA */ + 0x00000000, /* DENALI_PI_74_DATA */ + 0x00000000, /* DENALI_PI_75_DATA */ + 0x00000000, /* DENALI_PI_76_DATA */ + 0x000f0f00, /* DENALI_PI_77_DATA */ + 0x0000001e, /* DENALI_PI_78_DATA */ + 0x00000000, /* DENALI_PI_79_DATA */ + 0x01010300, /* DENALI_PI_80_DATA */ + 0x00000000, /* DENALI_PI_81_DATA */ + 0x00000000, /* DENALI_PI_82_DATA */ + 0x01000000, /* DENALI_PI_83_DATA */ + 0x00000101, /* DENALI_PI_84_DATA */ + 0x55555a5a, /* DENALI_PI_85_DATA */ + 0x55555a5a, /* DENALI_PI_86_DATA */ + 0x55555a5a, /* DENALI_PI_87_DATA */ + 0x55555a5a, /* DENALI_PI_88_DATA */ + 0x0c050001, /* DENALI_PI_89_DATA */ + 0x06020009, /* DENALI_PI_90_DATA */ + 0x00010004, /* DENALI_PI_91_DATA */ + 0x00000203, /* DENALI_PI_92_DATA */ + 0x00030000, /* DENALI_PI_93_DATA */ + 0x170f0000, /* DENALI_PI_94_DATA */ + 0x00060018, /* DENALI_PI_95_DATA */ + 0x000e0020, /* DENALI_PI_96_DATA */ + 0x000a001c, /* DENALI_PI_97_DATA */ + 0x00000000, /* DENALI_PI_98_DATA */ + 0x00000000, /* DENALI_PI_99_DATA */ + 0x00000100, /* DENALI_PI_100_DATA */ + 0x140a0000, /* DENALI_PI_101_DATA */ + 0x000d010a, /* DENALI_PI_102_DATA */ + 0x0100c802, /* DENALI_PI_103_DATA */ + 0x010a0064, /* DENALI_PI_104_DATA */ + 0x000e0100, /* DENALI_PI_105_DATA */ + 0x0100000e, /* DENALI_PI_106_DATA */ + 0x00c900c9, /* DENALI_PI_107_DATA */ + 0x00650100, /* DENALI_PI_108_DATA */ + 0x1e1a0065, /* DENALI_PI_109_DATA */ + 0x10010204, /* DENALI_PI_110_DATA */ + 0x06070605, /* DENALI_PI_111_DATA */ + 0x20000202, /* DENALI_PI_112_DATA */ + 0x00201000, /* DENALI_PI_113_DATA */ + 0x00201000, /* DENALI_PI_114_DATA */ + 0x04041000, /* DENALI_PI_115_DATA */ + 0x10020100, /* DENALI_PI_116_DATA */ + 0x0003010c, /* DENALI_PI_117_DATA */ + 0x004b004a, /* DENALI_PI_118_DATA */ + 0x1a0f0000, /* DENALI_PI_119_DATA */ + 0x0102041e, /* DENALI_PI_120_DATA */ + 0x34000000, /* DENALI_PI_121_DATA */ + 0x00000000, /* DENALI_PI_122_DATA */ + 0x00000000, /* DENALI_PI_123_DATA */ + 0x00010000, /* DENALI_PI_124_DATA */ + 0x00000400, /* DENALI_PI_125_DATA */ + 0x00310000, /* DENALI_PI_126_DATA */ + 0x004d4d00, /* DENALI_PI_127_DATA */ + 0x00120024, /* DENALI_PI_128_DATA */ + 0x4d000031, /* DENALI_PI_129_DATA */ + 0x0000144d, /* DENALI_PI_130_DATA */ + 0x00310009, /* DENALI_PI_131_DATA */ + 0x004d4d00, /* DENALI_PI_132_DATA */ + 0x00000004, /* DENALI_PI_133_DATA */ + 0x4d000031, /* DENALI_PI_134_DATA */ + 0x0000244d, /* DENALI_PI_135_DATA */ + 0x00310012, /* DENALI_PI_136_DATA */ + 0x004d4d00, /* DENALI_PI_137_DATA */ + 0x00090014, /* DENALI_PI_138_DATA */ + 0x4d000031, /* DENALI_PI_139_DATA */ + 0x0004004d, /* DENALI_PI_140_DATA */ + 0x00310000, /* DENALI_PI_141_DATA */ + 0x004d4d00, /* DENALI_PI_142_DATA */ + 0x00120024, /* DENALI_PI_143_DATA */ + 0x4d000031, /* DENALI_PI_144_DATA */ + 0x0000144d, /* DENALI_PI_145_DATA */ + 0x00310009, /* DENALI_PI_146_DATA */ + 0x004d4d00, /* DENALI_PI_147_DATA */ + 0x00000004, /* DENALI_PI_148_DATA */ + 0x4d000031, /* DENALI_PI_149_DATA */ + 0x0000244d, /* DENALI_PI_150_DATA */ + 0x00310012, /* DENALI_PI_151_DATA */ + 0x004d4d00, /* DENALI_PI_152_DATA */ + 0x00090014, /* DENALI_PI_153_DATA */ + 0x4d000031, /* DENALI_PI_154_DATA */ + 0x0200004d, /* DENALI_PI_155_DATA */ + 0x00c8000d, /* DENALI_PI_156_DATA */ + 0x08080064, /* DENALI_PI_157_DATA */ + 0x040a0404, /* DENALI_PI_158_DATA */ + 0x03000d92, /* DENALI_PI_159_DATA */ + 0x010a2001, /* DENALI_PI_160_DATA */ + 0x0f11080a, /* DENALI_PI_161_DATA */ + 0x0000110a, /* DENALI_PI_162_DATA */ + 0x2200d92e, /* DENALI_PI_163_DATA */ + 0x080c2003, /* DENALI_PI_164_DATA */ + 0x0809080a, /* DENALI_PI_165_DATA */ + 0x00000a0a, /* DENALI_PI_166_DATA */ + 0x11006c97, /* DENALI_PI_167_DATA */ + 0x040a2002, /* DENALI_PI_168_DATA */ + 0x0200020a, /* DENALI_PI_169_DATA */ + 0x02000200, /* DENALI_PI_170_DATA */ + 0x02000200, /* DENALI_PI_171_DATA */ + 0x02000200, /* DENALI_PI_172_DATA */ + 0x02000200, /* DENALI_PI_173_DATA */ + 0x00000000, /* DENALI_PI_174_DATA */ + 0x00000000, /* DENALI_PI_175_DATA */ + 0x00000000, /* DENALI_PI_176_DATA */ + 0x00000000, /* DENALI_PI_177_DATA */ + 0x00000000, /* DENALI_PI_178_DATA */ + 0x00000000, /* DENALI_PI_179_DATA */ + 0x00000000, /* DENALI_PI_180_DATA */ + 0x00000000, /* DENALI_PI_181_DATA */ + 0x00000000, /* DENALI_PI_182_DATA */ + 0x00000000, /* DENALI_PI_183_DATA */ + 0x00000000, /* DENALI_PI_184_DATA */ + 0x00000000, /* DENALI_PI_185_DATA */ + 0x01000400, /* DENALI_PI_186_DATA */ + 0x00017600, /* DENALI_PI_187_DATA */ + 0x00000e9c, /* DENALI_PI_188_DATA */ + 0x00001850, /* DENALI_PI_189_DATA */ + 0x0000f320, /* DENALI_PI_190_DATA */ + 0x00000c20, /* DENALI_PI_191_DATA */ + 0x00007940, /* DENALI_PI_192_DATA */ + 0x08000000, /* DENALI_PI_193_DATA */ + 0x00000100, /* DENALI_PI_194_DATA */ + 0x00000000, /* DENALI_PI_195_DATA */ + 0x00000000, /* DENALI_PI_196_DATA */ + 0x00000000, /* DENALI_PI_197_DATA */ + 0x00000000, /* DENALI_PI_198_DATA */ + 0x00000002 /* DENALI_PI_199_DATA */ + } + }, + { + { + 0x76543210, /* DENALI_PHY_00_DATA */ + 0x0004f008, /* DENALI_PHY_01_DATA */ + 0x00020119, /* DENALI_PHY_02_DATA */ + 0x00000000, /* DENALI_PHY_03_DATA */ + 0x00000000, /* DENALI_PHY_04_DATA */ + 0x00010000, /* DENALI_PHY_05_DATA */ + 0x01665555, /* DENALI_PHY_06_DATA */ + 0x03665555, /* DENALI_PHY_07_DATA */ + 0x00010f00, /* DENALI_PHY_08_DATA */ + 0x04000100, /* DENALI_PHY_09_DATA */ + 0x00000001, /* DENALI_PHY_10_DATA */ + 0x00170180, /* DENALI_PHY_11_DATA */ + 0x00cc0201, /* DENALI_PHY_12_DATA */ + 0x00030066, /* DENALI_PHY_13_DATA */ + 0x00000000, /* DENALI_PHY_14_DATA */ + 0x00000000, /* DENALI_PHY_15_DATA */ + 0x00000000, /* DENALI_PHY_16_DATA */ + 0x00000000, /* DENALI_PHY_17_DATA */ + 0x00000000, /* DENALI_PHY_18_DATA */ + 0x00000000, /* DENALI_PHY_19_DATA */ + 0x00000000, /* DENALI_PHY_20_DATA */ + 0x00000000, /* DENALI_PHY_21_DATA */ + 0x04080000, /* DENALI_PHY_22_DATA */ + 0x04080400, /* DENALI_PHY_23_DATA */ + 0x30000000, /* DENALI_PHY_24_DATA */ + 0x0c00c007, /* DENALI_PHY_25_DATA */ + 0x00000100, /* DENALI_PHY_26_DATA */ + 0x00000000, /* DENALI_PHY_27_DATA */ + 0xfd02fe01, /* DENALI_PHY_28_DATA */ + 0xf708fb04, /* DENALI_PHY_29_DATA */ + 0xdf20ef10, /* DENALI_PHY_30_DATA */ + 0x7f80bf40, /* DENALI_PHY_31_DATA */ + 0x0001aaaa, /* DENALI_PHY_32_DATA */ + 0x00000000, /* DENALI_PHY_33_DATA */ + 0x00000000, /* DENALI_PHY_34_DATA */ + 0x00000000, /* DENALI_PHY_35_DATA */ + 0x00000000, /* DENALI_PHY_36_DATA */ + 0x00000000, /* DENALI_PHY_37_DATA */ + 0x00000000, /* DENALI_PHY_38_DATA */ + 0x00000000, /* DENALI_PHY_39_DATA */ + 0x00000000, /* DENALI_PHY_40_DATA */ + 0x00000000, /* DENALI_PHY_41_DATA */ + 0x00000000, /* DENALI_PHY_42_DATA */ + 0x00000000, /* DENALI_PHY_43_DATA */ + 0x00000000, /* DENALI_PHY_44_DATA */ + 0x00000000, /* DENALI_PHY_45_DATA */ + 0x00000000, /* DENALI_PHY_46_DATA */ + 0x00000000, /* DENALI_PHY_47_DATA */ + 0x00000000, /* DENALI_PHY_48_DATA */ + 0x00000000, /* DENALI_PHY_49_DATA */ + 0x00000000, /* DENALI_PHY_50_DATA */ + 0x00000000, /* DENALI_PHY_51_DATA */ + 0x00200000, /* DENALI_PHY_52_DATA */ + 0x00000000, /* DENALI_PHY_53_DATA */ + 0x00000000, /* DENALI_PHY_54_DATA */ + 0x00000000, /* DENALI_PHY_55_DATA */ + 0x00000000, /* DENALI_PHY_56_DATA */ + 0x00000000, /* DENALI_PHY_57_DATA */ + 0x00000000, /* DENALI_PHY_58_DATA */ + 0x02800280, /* DENALI_PHY_59_DATA */ + 0x02800280, /* DENALI_PHY_60_DATA */ + 0x02800280, /* DENALI_PHY_61_DATA */ + 0x02800280, /* DENALI_PHY_62_DATA */ + 0x00000280, /* DENALI_PHY_63_DATA */ + 0x00000000, /* DENALI_PHY_64_DATA */ + 0x00000000, /* DENALI_PHY_65_DATA */ + 0x00000000, /* DENALI_PHY_66_DATA */ + 0x00000000, /* DENALI_PHY_67_DATA */ + 0x00800000, /* DENALI_PHY_68_DATA */ + 0x00800080, /* DENALI_PHY_69_DATA */ + 0x00800080, /* DENALI_PHY_70_DATA */ + 0x00800080, /* DENALI_PHY_71_DATA */ + 0x00800080, /* DENALI_PHY_72_DATA */ + 0x00800080, /* DENALI_PHY_73_DATA */ + 0x00800080, /* DENALI_PHY_74_DATA */ + 0x00800080, /* DENALI_PHY_75_DATA */ + 0x00800080, /* DENALI_PHY_76_DATA */ + 0x01190080, /* DENALI_PHY_77_DATA */ + 0x00000001, /* DENALI_PHY_78_DATA */ + 0x00000000, /* DENALI_PHY_79_DATA */ + 0x00000000, /* DENALI_PHY_80_DATA */ + 0x00000200, /* DENALI_PHY_81_DATA */ + 0x00000000, /* DENALI_PHY_82_DATA */ + 0x51315152, /* DENALI_PHY_83_DATA */ + 0xc0003150, /* DENALI_PHY_84_DATA */ + 0x010000c0, /* DENALI_PHY_85_DATA */ + 0x00100000, /* DENALI_PHY_86_DATA */ + 0x07044204, /* DENALI_PHY_87_DATA */ + 0x000f0c18, /* DENALI_PHY_88_DATA */ + 0x01000140, /* DENALI_PHY_89_DATA */ + 0x00000c10, /* DENALI_PHY_90_DATA */ + 0x00000000, /* DENALI_PHY_91_DATA */ + 0x00000000, /* DENALI_PHY_92_DATA */ + 0x00000000, /* DENALI_PHY_93_DATA */ + 0x00000000, /* DENALI_PHY_94_DATA */ + 0x00000000, /* DENALI_PHY_95_DATA */ + 0x00000000, /* DENALI_PHY_96_DATA */ + 0x00000000, /* DENALI_PHY_97_DATA */ + 0x00000000, /* DENALI_PHY_98_DATA */ + 0x00000000, /* DENALI_PHY_99_DATA */ + 0x00000000, /* DENALI_PHY_100_DATA */ + 0x00000000, /* DENALI_PHY_101_DATA */ + 0x00000000, /* DENALI_PHY_102_DATA */ + 0x00000000, /* DENALI_PHY_103_DATA */ + 0x00000000, /* DENALI_PHY_104_DATA */ + 0x00000000, /* DENALI_PHY_105_DATA */ + 0x00000000, /* DENALI_PHY_106_DATA */ + 0x00000000, /* DENALI_PHY_107_DATA */ + 0x00000000, /* DENALI_PHY_108_DATA */ + 0x00000000, /* DENALI_PHY_109_DATA */ + 0x00000000, /* DENALI_PHY_110_DATA */ + 0x00000000, /* DENALI_PHY_111_DATA */ + 0x00000000, /* DENALI_PHY_112_DATA */ + 0x00000000, /* DENALI_PHY_113_DATA */ + 0x00000000, /* DENALI_PHY_114_DATA */ + 0x00000000, /* DENALI_PHY_115_DATA */ + 0x00000000, /* DENALI_PHY_116_DATA */ + 0x00000000, /* DENALI_PHY_117_DATA */ + 0x00000000, /* DENALI_PHY_118_DATA */ + 0x00000000, /* DENALI_PHY_119_DATA */ + 0x00000000, /* DENALI_PHY_120_DATA */ + 0x00000000, /* DENALI_PHY_121_DATA */ + 0x00000000, /* DENALI_PHY_122_DATA */ + 0x00000000, /* DENALI_PHY_123_DATA */ + 0x00000000, /* DENALI_PHY_124_DATA */ + 0x00000000, /* DENALI_PHY_125_DATA */ + 0x00000000, /* DENALI_PHY_126_DATA */ + 0x00000000, /* DENALI_PHY_127_DATA */ + 0x76543210, /* DENALI_PHY_128_DATA */ + 0x0004f008, /* DENALI_PHY_129_DATA */ + 0x00020119, /* DENALI_PHY_130_DATA */ + 0x00000000, /* DENALI_PHY_131_DATA */ + 0x00000000, /* DENALI_PHY_132_DATA */ + 0x00010000, /* DENALI_PHY_133_DATA */ + 0x01665555, /* DENALI_PHY_134_DATA */ + 0x03665555, /* DENALI_PHY_135_DATA */ + 0x00010f00, /* DENALI_PHY_136_DATA */ + 0x04000100, /* DENALI_PHY_137_DATA */ + 0x00000001, /* DENALI_PHY_138_DATA */ + 0x00170180, /* DENALI_PHY_139_DATA */ + 0x00cc0201, /* DENALI_PHY_140_DATA */ + 0x00030066, /* DENALI_PHY_141_DATA */ + 0x00000000, /* DENALI_PHY_142_DATA */ + 0x00000000, /* DENALI_PHY_143_DATA */ + 0x00000000, /* DENALI_PHY_144_DATA */ + 0x00000000, /* DENALI_PHY_145_DATA */ + 0x00000000, /* DENALI_PHY_146_DATA */ + 0x00000000, /* DENALI_PHY_147_DATA */ + 0x00000000, /* DENALI_PHY_148_DATA */ + 0x00000000, /* DENALI_PHY_149_DATA */ + 0x04080000, /* DENALI_PHY_150_DATA */ + 0x04080400, /* DENALI_PHY_151_DATA */ + 0x30000000, /* DENALI_PHY_152_DATA */ + 0x0c00c007, /* DENALI_PHY_153_DATA */ + 0x00000100, /* DENALI_PHY_154_DATA */ + 0x00000000, /* DENALI_PHY_155_DATA */ + 0xfd02fe01, /* DENALI_PHY_156_DATA */ + 0xf708fb04, /* DENALI_PHY_157_DATA */ + 0xdf20ef10, /* DENALI_PHY_158_DATA */ + 0x7f80bf40, /* DENALI_PHY_159_DATA */ + 0x0000aaaa, /* DENALI_PHY_160_DATA */ + 0x00000000, /* DENALI_PHY_161_DATA */ + 0x00000000, /* DENALI_PHY_162_DATA */ + 0x00000000, /* DENALI_PHY_163_DATA */ + 0x00000000, /* DENALI_PHY_164_DATA */ + 0x00000000, /* DENALI_PHY_165_DATA */ + 0x00000000, /* DENALI_PHY_166_DATA */ + 0x00000000, /* DENALI_PHY_167_DATA */ + 0x00000000, /* DENALI_PHY_168_DATA */ + 0x00000000, /* DENALI_PHY_169_DATA */ + 0x00000000, /* DENALI_PHY_170_DATA */ + 0x00000000, /* DENALI_PHY_171_DATA */ + 0x00000000, /* DENALI_PHY_172_DATA */ + 0x00000000, /* DENALI_PHY_173_DATA */ + 0x00000000, /* DENALI_PHY_174_DATA */ + 0x00000000, /* DENALI_PHY_175_DATA */ + 0x00000000, /* DENALI_PHY_176_DATA */ + 0x00000000, /* DENALI_PHY_177_DATA */ + 0x00000000, /* DENALI_PHY_178_DATA */ + 0x00000000, /* DENALI_PHY_179_DATA */ + 0x00200000, /* DENALI_PHY_180_DATA */ + 0x00000000, /* DENALI_PHY_181_DATA */ + 0x00000000, /* DENALI_PHY_182_DATA */ + 0x00000000, /* DENALI_PHY_183_DATA */ + 0x00000000, /* DENALI_PHY_184_DATA */ + 0x00000000, /* DENALI_PHY_185_DATA */ + 0x00000000, /* DENALI_PHY_186_DATA */ + 0x02800280, /* DENALI_PHY_187_DATA */ + 0x02800280, /* DENALI_PHY_188_DATA */ + 0x02800280, /* DENALI_PHY_189_DATA */ + 0x02800280, /* DENALI_PHY_190_DATA */ + 0x00000280, /* DENALI_PHY_191_DATA */ + 0x00000000, /* DENALI_PHY_192_DATA */ + 0x00000000, /* DENALI_PHY_193_DATA */ + 0x00000000, /* DENALI_PHY_194_DATA */ + 0x00000000, /* DENALI_PHY_195_DATA */ + 0x00800000, /* DENALI_PHY_196_DATA */ + 0x00800080, /* DENALI_PHY_197_DATA */ + 0x00800080, /* DENALI_PHY_198_DATA */ + 0x00800080, /* DENALI_PHY_199_DATA */ + 0x00800080, /* DENALI_PHY_200_DATA */ + 0x00800080, /* DENALI_PHY_201_DATA */ + 0x00800080, /* DENALI_PHY_202_DATA */ + 0x00800080, /* DENALI_PHY_203_DATA */ + 0x00800080, /* DENALI_PHY_204_DATA */ + 0x01190080, /* DENALI_PHY_205_DATA */ + 0x00000001, /* DENALI_PHY_206_DATA */ + 0x00000000, /* DENALI_PHY_207_DATA */ + 0x00000000, /* DENALI_PHY_208_DATA */ + 0x00000200, /* DENALI_PHY_209_DATA */ + 0x00000000, /* DENALI_PHY_210_DATA */ + 0x51315152, /* DENALI_PHY_211_DATA */ + 0xc0003150, /* DENALI_PHY_212_DATA */ + 0x010000c0, /* DENALI_PHY_213_DATA */ + 0x00100000, /* DENALI_PHY_214_DATA */ + 0x07044204, /* DENALI_PHY_215_DATA */ + 0x000f0c18, /* DENALI_PHY_216_DATA */ + 0x01000140, /* DENALI_PHY_217_DATA */ + 0x00000c10, /* DENALI_PHY_218_DATA */ + 0x00000000, /* DENALI_PHY_219_DATA */ + 0x00000000, /* DENALI_PHY_220_DATA */ + 0x00000000, /* DENALI_PHY_221_DATA */ + 0x00000000, /* DENALI_PHY_222_DATA */ + 0x00000000, /* DENALI_PHY_223_DATA */ + 0x00000000, /* DENALI_PHY_224_DATA */ + 0x00000000, /* DENALI_PHY_225_DATA */ + 0x00000000, /* DENALI_PHY_226_DATA */ + 0x00000000, /* DENALI_PHY_227_DATA */ + 0x00000000, /* DENALI_PHY_228_DATA */ + 0x00000000, /* DENALI_PHY_229_DATA */ + 0x00000000, /* DENALI_PHY_230_DATA */ + 0x00000000, /* DENALI_PHY_231_DATA */ + 0x00000000, /* DENALI_PHY_232_DATA */ + 0x00000000, /* DENALI_PHY_233_DATA */ + 0x00000000, /* DENALI_PHY_234_DATA */ + 0x00000000, /* DENALI_PHY_235_DATA */ + 0x00000000, /* DENALI_PHY_236_DATA */ + 0x00000000, /* DENALI_PHY_237_DATA */ + 0x00000000, /* DENALI_PHY_238_DATA */ + 0x00000000, /* DENALI_PHY_239_DATA */ + 0x00000000, /* DENALI_PHY_240_DATA */ + 0x00000000, /* DENALI_PHY_241_DATA */ + 0x00000000, /* DENALI_PHY_242_DATA */ + 0x00000000, /* DENALI_PHY_243_DATA */ + 0x00000000, /* DENALI_PHY_244_DATA */ + 0x00000000, /* DENALI_PHY_245_DATA */ + 0x00000000, /* DENALI_PHY_246_DATA */ + 0x00000000, /* DENALI_PHY_247_DATA */ + 0x00000000, /* DENALI_PHY_248_DATA */ + 0x00000000, /* DENALI_PHY_249_DATA */ + 0x00000000, /* DENALI_PHY_250_DATA */ + 0x00000000, /* DENALI_PHY_251_DATA */ + 0x00000000, /* DENALI_PHY_252_DATA */ + 0x00000000, /* DENALI_PHY_253_DATA */ + 0x00000000, /* DENALI_PHY_254_DATA */ + 0x00000000, /* DENALI_PHY_255_DATA */ + 0x76543210, /* DENALI_PHY_256_DATA */ + 0x0004f008, /* DENALI_PHY_257_DATA */ + 0x00020119, /* DENALI_PHY_258_DATA */ + 0x00000000, /* DENALI_PHY_259_DATA */ + 0x00000000, /* DENALI_PHY_260_DATA */ + 0x00010000, /* DENALI_PHY_261_DATA */ + 0x01665555, /* DENALI_PHY_262_DATA */ + 0x03665555, /* DENALI_PHY_263_DATA */ + 0x00010f00, /* DENALI_PHY_264_DATA */ + 0x04000100, /* DENALI_PHY_265_DATA */ + 0x00000001, /* DENALI_PHY_266_DATA */ + 0x00170180, /* DENALI_PHY_267_DATA */ + 0x00cc0201, /* DENALI_PHY_268_DATA */ + 0x00030066, /* DENALI_PHY_269_DATA */ + 0x00000000, /* DENALI_PHY_270_DATA */ + 0x00000000, /* DENALI_PHY_271_DATA */ + 0x00000000, /* DENALI_PHY_272_DATA */ + 0x00000000, /* DENALI_PHY_273_DATA */ + 0x00000000, /* DENALI_PHY_274_DATA */ + 0x00000000, /* DENALI_PHY_275_DATA */ + 0x00000000, /* DENALI_PHY_276_DATA */ + 0x00000000, /* DENALI_PHY_277_DATA */ + 0x04080000, /* DENALI_PHY_278_DATA */ + 0x04080400, /* DENALI_PHY_279_DATA */ + 0x30000000, /* DENALI_PHY_280_DATA */ + 0x0c00c007, /* DENALI_PHY_281_DATA */ + 0x00000100, /* DENALI_PHY_282_DATA */ + 0x00000000, /* DENALI_PHY_283_DATA */ + 0xfd02fe01, /* DENALI_PHY_284_DATA */ + 0xf708fb04, /* DENALI_PHY_285_DATA */ + 0xdf20ef10, /* DENALI_PHY_286_DATA */ + 0x7f80bf40, /* DENALI_PHY_287_DATA */ + 0x0001aaaa, /* DENALI_PHY_288_DATA */ + 0x00000000, /* DENALI_PHY_289_DATA */ + 0x00000000, /* DENALI_PHY_290_DATA */ + 0x00000000, /* DENALI_PHY_291_DATA */ + 0x00000000, /* DENALI_PHY_292_DATA */ + 0x00000000, /* DENALI_PHY_293_DATA */ + 0x00000000, /* DENALI_PHY_294_DATA */ + 0x00000000, /* DENALI_PHY_295_DATA */ + 0x00000000, /* DENALI_PHY_296_DATA */ + 0x00000000, /* DENALI_PHY_297_DATA */ + 0x00000000, /* DENALI_PHY_298_DATA */ + 0x00000000, /* DENALI_PHY_299_DATA */ + 0x00000000, /* DENALI_PHY_300_DATA */ + 0x00000000, /* DENALI_PHY_301_DATA */ + 0x00000000, /* DENALI_PHY_302_DATA */ + 0x00000000, /* DENALI_PHY_303_DATA */ + 0x00000000, /* DENALI_PHY_304_DATA */ + 0x00000000, /* DENALI_PHY_305_DATA */ + 0x00000000, /* DENALI_PHY_306_DATA */ + 0x00000000, /* DENALI_PHY_307_DATA */ + 0x00200000, /* DENALI_PHY_308_DATA */ + 0x00000000, /* DENALI_PHY_309_DATA */ + 0x00000000, /* DENALI_PHY_310_DATA */ + 0x00000000, /* DENALI_PHY_311_DATA */ + 0x00000000, /* DENALI_PHY_312_DATA */ + 0x00000000, /* DENALI_PHY_313_DATA */ + 0x00000000, /* DENALI_PHY_314_DATA */ + 0x02800280, /* DENALI_PHY_315_DATA */ + 0x02800280, /* DENALI_PHY_316_DATA */ + 0x02800280, /* DENALI_PHY_317_DATA */ + 0x02800280, /* DENALI_PHY_318_DATA */ + 0x00000280, /* DENALI_PHY_319_DATA */ + 0x00000000, /* DENALI_PHY_320_DATA */ + 0x00000000, /* DENALI_PHY_321_DATA */ + 0x00000000, /* DENALI_PHY_322_DATA */ + 0x00000000, /* DENALI_PHY_323_DATA */ + 0x00800000, /* DENALI_PHY_324_DATA */ + 0x00800080, /* DENALI_PHY_325_DATA */ + 0x00800080, /* DENALI_PHY_326_DATA */ + 0x00800080, /* DENALI_PHY_327_DATA */ + 0x00800080, /* DENALI_PHY_328_DATA */ + 0x00800080, /* DENALI_PHY_329_DATA */ + 0x00800080, /* DENALI_PHY_330_DATA */ + 0x00800080, /* DENALI_PHY_331_DATA */ + 0x00800080, /* DENALI_PHY_332_DATA */ + 0x01190080, /* DENALI_PHY_333_DATA */ + 0x00000001, /* DENALI_PHY_334_DATA */ + 0x00000000, /* DENALI_PHY_335_DATA */ + 0x00000000, /* DENALI_PHY_336_DATA */ + 0x00000200, /* DENALI_PHY_337_DATA */ + 0x00000000, /* DENALI_PHY_338_DATA */ + 0x51315152, /* DENALI_PHY_339_DATA */ + 0xc0003150, /* DENALI_PHY_340_DATA */ + 0x010000c0, /* DENALI_PHY_341_DATA */ + 0x00100000, /* DENALI_PHY_342_DATA */ + 0x07044204, /* DENALI_PHY_343_DATA */ + 0x000f0c18, /* DENALI_PHY_344_DATA */ + 0x01000140, /* DENALI_PHY_345_DATA */ + 0x00000c10, /* DENALI_PHY_346_DATA */ + 0x00000000, /* DENALI_PHY_347_DATA */ + 0x00000000, /* DENALI_PHY_348_DATA */ + 0x00000000, /* DENALI_PHY_349_DATA */ + 0x00000000, /* DENALI_PHY_350_DATA */ + 0x00000000, /* DENALI_PHY_351_DATA */ + 0x00000000, /* DENALI_PHY_352_DATA */ + 0x00000000, /* DENALI_PHY_353_DATA */ + 0x00000000, /* DENALI_PHY_354_DATA */ + 0x00000000, /* DENALI_PHY_355_DATA */ + 0x00000000, /* DENALI_PHY_356_DATA */ + 0x00000000, /* DENALI_PHY_357_DATA */ + 0x00000000, /* DENALI_PHY_358_DATA */ + 0x00000000, /* DENALI_PHY_359_DATA */ + 0x00000000, /* DENALI_PHY_360_DATA */ + 0x00000000, /* DENALI_PHY_361_DATA */ + 0x00000000, /* DENALI_PHY_362_DATA */ + 0x00000000, /* DENALI_PHY_363_DATA */ + 0x00000000, /* DENALI_PHY_364_DATA */ + 0x00000000, /* DENALI_PHY_365_DATA */ + 0x00000000, /* DENALI_PHY_366_DATA */ + 0x00000000, /* DENALI_PHY_367_DATA */ + 0x00000000, /* DENALI_PHY_368_DATA */ + 0x00000000, /* DENALI_PHY_369_DATA */ + 0x00000000, /* DENALI_PHY_370_DATA */ + 0x00000000, /* DENALI_PHY_371_DATA */ + 0x00000000, /* DENALI_PHY_372_DATA */ + 0x00000000, /* DENALI_PHY_373_DATA */ + 0x00000000, /* DENALI_PHY_374_DATA */ + 0x00000000, /* DENALI_PHY_375_DATA */ + 0x00000000, /* DENALI_PHY_376_DATA */ + 0x00000000, /* DENALI_PHY_377_DATA */ + 0x00000000, /* DENALI_PHY_378_DATA */ + 0x00000000, /* DENALI_PHY_379_DATA */ + 0x00000000, /* DENALI_PHY_380_DATA */ + 0x00000000, /* DENALI_PHY_381_DATA */ + 0x00000000, /* DENALI_PHY_382_DATA */ + 0x00000000, /* DENALI_PHY_383_DATA */ + 0x76543210, /* DENALI_PHY_384_DATA */ + 0x0004f008, /* DENALI_PHY_385_DATA */ + 0x00020119, /* DENALI_PHY_386_DATA */ + 0x00000000, /* DENALI_PHY_387_DATA */ + 0x00000000, /* DENALI_PHY_388_DATA */ + 0x00010000, /* DENALI_PHY_389_DATA */ + 0x01665555, /* DENALI_PHY_390_DATA */ + 0x03665555, /* DENALI_PHY_391_DATA */ + 0x00010f00, /* DENALI_PHY_392_DATA */ + 0x04000100, /* DENALI_PHY_393_DATA */ + 0x00000001, /* DENALI_PHY_394_DATA */ + 0x00170180, /* DENALI_PHY_395_DATA */ + 0x00cc0201, /* DENALI_PHY_396_DATA */ + 0x00030066, /* DENALI_PHY_397_DATA */ + 0x00000000, /* DENALI_PHY_398_DATA */ + 0x00000000, /* DENALI_PHY_399_DATA */ + 0x00000000, /* DENALI_PHY_400_DATA */ + 0x00000000, /* DENALI_PHY_401_DATA */ + 0x00000000, /* DENALI_PHY_402_DATA */ + 0x00000000, /* DENALI_PHY_403_DATA */ + 0x00000000, /* DENALI_PHY_404_DATA */ + 0x00000000, /* DENALI_PHY_405_DATA */ + 0x04080000, /* DENALI_PHY_406_DATA */ + 0x04080400, /* DENALI_PHY_407_DATA */ + 0x30000000, /* DENALI_PHY_408_DATA */ + 0x0c00c007, /* DENALI_PHY_409_DATA */ + 0x00000100, /* DENALI_PHY_410_DATA */ + 0x00000000, /* DENALI_PHY_411_DATA */ + 0xfd02fe01, /* DENALI_PHY_412_DATA */ + 0xf708fb04, /* DENALI_PHY_413_DATA */ + 0xdf20ef10, /* DENALI_PHY_414_DATA */ + 0x7f80bf40, /* DENALI_PHY_415_DATA */ + 0x0000aaaa, /* DENALI_PHY_416_DATA */ + 0x00000000, /* DENALI_PHY_417_DATA */ + 0x00000000, /* DENALI_PHY_418_DATA */ + 0x00000000, /* DENALI_PHY_419_DATA */ + 0x00000000, /* DENALI_PHY_420_DATA */ + 0x00000000, /* DENALI_PHY_421_DATA */ + 0x00000000, /* DENALI_PHY_422_DATA */ + 0x00000000, /* DENALI_PHY_423_DATA */ + 0x00000000, /* DENALI_PHY_424_DATA */ + 0x00000000, /* DENALI_PHY_425_DATA */ + 0x00000000, /* DENALI_PHY_426_DATA */ + 0x00000000, /* DENALI_PHY_427_DATA */ + 0x00000000, /* DENALI_PHY_428_DATA */ + 0x00000000, /* DENALI_PHY_429_DATA */ + 0x00000000, /* DENALI_PHY_430_DATA */ + 0x00000000, /* DENALI_PHY_431_DATA */ + 0x00000000, /* DENALI_PHY_432_DATA */ + 0x00000000, /* DENALI_PHY_433_DATA */ + 0x00000000, /* DENALI_PHY_434_DATA */ + 0x00000000, /* DENALI_PHY_435_DATA */ + 0x00200000, /* DENALI_PHY_436_DATA */ + 0x00000000, /* DENALI_PHY_437_DATA */ + 0x00000000, /* DENALI_PHY_438_DATA */ + 0x00000000, /* DENALI_PHY_439_DATA */ + 0x00000000, /* DENALI_PHY_440_DATA */ + 0x00000000, /* DENALI_PHY_441_DATA */ + 0x00000000, /* DENALI_PHY_442_DATA */ + 0x02800280, /* DENALI_PHY_443_DATA */ + 0x02800280, /* DENALI_PHY_444_DATA */ + 0x02800280, /* DENALI_PHY_445_DATA */ + 0x02800280, /* DENALI_PHY_446_DATA */ + 0x00000280, /* DENALI_PHY_447_DATA */ + 0x00000000, /* DENALI_PHY_448_DATA */ + 0x00000000, /* DENALI_PHY_449_DATA */ + 0x00000000, /* DENALI_PHY_450_DATA */ + 0x00000000, /* DENALI_PHY_451_DATA */ + 0x00800000, /* DENALI_PHY_452_DATA */ + 0x00800080, /* DENALI_PHY_453_DATA */ + 0x00800080, /* DENALI_PHY_454_DATA */ + 0x00800080, /* DENALI_PHY_455_DATA */ + 0x00800080, /* DENALI_PHY_456_DATA */ + 0x00800080, /* DENALI_PHY_457_DATA */ + 0x00800080, /* DENALI_PHY_458_DATA */ + 0x00800080, /* DENALI_PHY_459_DATA */ + 0x00800080, /* DENALI_PHY_460_DATA */ + 0x01190080, /* DENALI_PHY_461_DATA */ + 0x00000001, /* DENALI_PHY_462_DATA */ + 0x00000000, /* DENALI_PHY_463_DATA */ + 0x00000000, /* DENALI_PHY_464_DATA */ + 0x00000200, /* DENALI_PHY_465_DATA */ + 0x00000000, /* DENALI_PHY_466_DATA */ + 0x51315152, /* DENALI_PHY_467_DATA */ + 0xc0003150, /* DENALI_PHY_468_DATA */ + 0x010000c0, /* DENALI_PHY_469_DATA */ + 0x00100000, /* DENALI_PHY_470_DATA */ + 0x07044204, /* DENALI_PHY_471_DATA */ + 0x000f0c18, /* DENALI_PHY_472_DATA */ + 0x01000140, /* DENALI_PHY_473_DATA */ + 0x00000c10, /* DENALI_PHY_474_DATA */ + 0x00000000, /* DENALI_PHY_475_DATA */ + 0x00000000, /* DENALI_PHY_476_DATA */ + 0x00000000, /* DENALI_PHY_477_DATA */ + 0x00000000, /* DENALI_PHY_478_DATA */ + 0x00000000, /* DENALI_PHY_479_DATA */ + 0x00000000, /* DENALI_PHY_480_DATA */ + 0x00000000, /* DENALI_PHY_481_DATA */ + 0x00000000, /* DENALI_PHY_482_DATA */ + 0x00000000, /* DENALI_PHY_483_DATA */ + 0x00000000, /* DENALI_PHY_484_DATA */ + 0x00000000, /* DENALI_PHY_485_DATA */ + 0x00000000, /* DENALI_PHY_486_DATA */ + 0x00000000, /* DENALI_PHY_487_DATA */ + 0x00000000, /* DENALI_PHY_488_DATA */ + 0x00000000, /* DENALI_PHY_489_DATA */ + 0x00000000, /* DENALI_PHY_490_DATA */ + 0x00000000, /* DENALI_PHY_491_DATA */ + 0x00000000, /* DENALI_PHY_492_DATA */ + 0x00000000, /* DENALI_PHY_493_DATA */ + 0x00000000, /* DENALI_PHY_494_DATA */ + 0x00000000, /* DENALI_PHY_495_DATA */ + 0x00000000, /* DENALI_PHY_496_DATA */ + 0x00000000, /* DENALI_PHY_497_DATA */ + 0x00000000, /* DENALI_PHY_498_DATA */ + 0x00000000, /* DENALI_PHY_499_DATA */ + 0x00000000, /* DENALI_PHY_500_DATA */ + 0x00000000, /* DENALI_PHY_501_DATA */ + 0x00000000, /* DENALI_PHY_502_DATA */ + 0x00000000, /* DENALI_PHY_503_DATA */ + 0x00000000, /* DENALI_PHY_504_DATA */ + 0x00000000, /* DENALI_PHY_505_DATA */ + 0x00000000, /* DENALI_PHY_506_DATA */ + 0x00000000, /* DENALI_PHY_507_DATA */ + 0x00000000, /* DENALI_PHY_508_DATA */ + 0x00000000, /* DENALI_PHY_509_DATA */ + 0x00000000, /* DENALI_PHY_510_DATA */ + 0x00000000, /* DENALI_PHY_511_DATA */ + 0x00000000, /* DENALI_PHY_512_DATA */ + 0x00000000, /* DENALI_PHY_513_DATA */ + 0x00000000, /* DENALI_PHY_514_DATA */ + 0x00000000, /* DENALI_PHY_515_DATA */ + 0x00000000, /* DENALI_PHY_516_DATA */ + 0x00000000, /* DENALI_PHY_517_DATA */ + 0x00000000, /* DENALI_PHY_518_DATA */ + 0x00000002, /* DENALI_PHY_519_DATA */ + 0x00000000, /* DENALI_PHY_520_DATA */ + 0x00000000, /* DENALI_PHY_521_DATA */ + 0x00000000, /* DENALI_PHY_522_DATA */ + 0x00400320, /* DENALI_PHY_523_DATA */ + 0x00000040, /* DENALI_PHY_524_DATA */ + 0x00dcba98, /* DENALI_PHY_525_DATA */ + 0x00000000, /* DENALI_PHY_526_DATA */ + 0x00dcba98, /* DENALI_PHY_527_DATA */ + 0x01000000, /* DENALI_PHY_528_DATA */ + 0x00020003, /* DENALI_PHY_529_DATA */ + 0x00000000, /* DENALI_PHY_530_DATA */ + 0x00000000, /* DENALI_PHY_531_DATA */ + 0x00000000, /* DENALI_PHY_532_DATA */ + 0x0000002a, /* DENALI_PHY_533_DATA */ + 0x00000015, /* DENALI_PHY_534_DATA */ + 0x00000015, /* DENALI_PHY_535_DATA */ + 0x0000002a, /* DENALI_PHY_536_DATA */ + 0x00000033, /* DENALI_PHY_537_DATA */ + 0x0000000c, /* DENALI_PHY_538_DATA */ + 0x0000000c, /* DENALI_PHY_539_DATA */ + 0x00000033, /* DENALI_PHY_540_DATA */ + 0x0a418820, /* DENALI_PHY_541_DATA */ + 0x003f0000, /* DENALI_PHY_542_DATA */ + 0x0000003f, /* DENALI_PHY_543_DATA */ + 0x00030055, /* DENALI_PHY_544_DATA */ + 0x03000300, /* DENALI_PHY_545_DATA */ + 0x03000300, /* DENALI_PHY_546_DATA */ + 0x00000300, /* DENALI_PHY_547_DATA */ + 0x42080010, /* DENALI_PHY_548_DATA */ + 0x00000003, /* DENALI_PHY_549_DATA */ + 0x00000000, /* DENALI_PHY_550_DATA */ + 0x00000000, /* DENALI_PHY_551_DATA */ + 0x00000000, /* DENALI_PHY_552_DATA */ + 0x00000000, /* DENALI_PHY_553_DATA */ + 0x00000000, /* DENALI_PHY_554_DATA */ + 0x00000000, /* DENALI_PHY_555_DATA */ + 0x00000000, /* DENALI_PHY_556_DATA */ + 0x00000000, /* DENALI_PHY_557_DATA */ + 0x00000000, /* DENALI_PHY_558_DATA */ + 0x00000000, /* DENALI_PHY_559_DATA */ + 0x00000000, /* DENALI_PHY_560_DATA */ + 0x00000000, /* DENALI_PHY_561_DATA */ + 0x00000000, /* DENALI_PHY_562_DATA */ + 0x00000000, /* DENALI_PHY_563_DATA */ + 0x00000000, /* DENALI_PHY_564_DATA */ + 0x00000000, /* DENALI_PHY_565_DATA */ + 0x00000000, /* DENALI_PHY_566_DATA */ + 0x00000000, /* DENALI_PHY_567_DATA */ + 0x00000000, /* DENALI_PHY_568_DATA */ + 0x00000000, /* DENALI_PHY_569_DATA */ + 0x00000000, /* DENALI_PHY_570_DATA */ + 0x00000000, /* DENALI_PHY_571_DATA */ + 0x00000000, /* DENALI_PHY_572_DATA */ + 0x00000000, /* DENALI_PHY_573_DATA */ + 0x00000000, /* DENALI_PHY_574_DATA */ + 0x00000000, /* DENALI_PHY_575_DATA */ + 0x00000000, /* DENALI_PHY_576_DATA */ + 0x00000000, /* DENALI_PHY_577_DATA */ + 0x00000000, /* DENALI_PHY_578_DATA */ + 0x00000000, /* DENALI_PHY_579_DATA */ + 0x00000000, /* DENALI_PHY_580_DATA */ + 0x00000000, /* DENALI_PHY_581_DATA */ + 0x00000000, /* DENALI_PHY_582_DATA */ + 0x00000000, /* DENALI_PHY_583_DATA */ + 0x00000000, /* DENALI_PHY_584_DATA */ + 0x00000000, /* DENALI_PHY_585_DATA */ + 0x00000000, /* DENALI_PHY_586_DATA */ + 0x00000000, /* DENALI_PHY_587_DATA */ + 0x00000000, /* DENALI_PHY_588_DATA */ + 0x00000000, /* DENALI_PHY_589_DATA */ + 0x00000000, /* DENALI_PHY_590_DATA */ + 0x00000000, /* DENALI_PHY_591_DATA */ + 0x00000000, /* DENALI_PHY_592_DATA */ + 0x00000000, /* DENALI_PHY_593_DATA */ + 0x00000000, /* DENALI_PHY_594_DATA */ + 0x00000000, /* DENALI_PHY_595_DATA */ + 0x00000000, /* DENALI_PHY_596_DATA */ + 0x00000000, /* DENALI_PHY_597_DATA */ + 0x00000000, /* DENALI_PHY_598_DATA */ + 0x00000000, /* DENALI_PHY_599_DATA */ + 0x00000000, /* DENALI_PHY_600_DATA */ + 0x00000000, /* DENALI_PHY_601_DATA */ + 0x00000000, /* DENALI_PHY_602_DATA */ + 0x00000000, /* DENALI_PHY_603_DATA */ + 0x00000000, /* DENALI_PHY_604_DATA */ + 0x00000000, /* DENALI_PHY_605_DATA */ + 0x00000000, /* DENALI_PHY_606_DATA */ + 0x00000000, /* DENALI_PHY_607_DATA */ + 0x00000000, /* DENALI_PHY_608_DATA */ + 0x00000000, /* DENALI_PHY_609_DATA */ + 0x00000000, /* DENALI_PHY_610_DATA */ + 0x00000000, /* DENALI_PHY_611_DATA */ + 0x00000000, /* DENALI_PHY_612_DATA */ + 0x00000000, /* DENALI_PHY_613_DATA */ + 0x00000000, /* DENALI_PHY_614_DATA */ + 0x00000000, /* DENALI_PHY_615_DATA */ + 0x00000000, /* DENALI_PHY_616_DATA */ + 0x00000000, /* DENALI_PHY_617_DATA */ + 0x00000000, /* DENALI_PHY_618_DATA */ + 0x00000000, /* DENALI_PHY_619_DATA */ + 0x00000000, /* DENALI_PHY_620_DATA */ + 0x00000000, /* DENALI_PHY_621_DATA */ + 0x00000000, /* DENALI_PHY_622_DATA */ + 0x00000000, /* DENALI_PHY_623_DATA */ + 0x00000000, /* DENALI_PHY_624_DATA */ + 0x00000000, /* DENALI_PHY_625_DATA */ + 0x00000000, /* DENALI_PHY_626_DATA */ + 0x00000000, /* DENALI_PHY_627_DATA */ + 0x00000000, /* DENALI_PHY_628_DATA */ + 0x00000000, /* DENALI_PHY_629_DATA */ + 0x00000000, /* DENALI_PHY_630_DATA */ + 0x00000000, /* DENALI_PHY_631_DATA */ + 0x00000000, /* DENALI_PHY_632_DATA */ + 0x00000000, /* DENALI_PHY_633_DATA */ + 0x00000000, /* DENALI_PHY_634_DATA */ + 0x00000000, /* DENALI_PHY_635_DATA */ + 0x00000000, /* DENALI_PHY_636_DATA */ + 0x00000000, /* DENALI_PHY_637_DATA */ + 0x00000000, /* DENALI_PHY_638_DATA */ + 0x00000000, /* DENALI_PHY_639_DATA */ + 0x00000000, /* DENALI_PHY_640_DATA */ + 0x00000000, /* DENALI_PHY_641_DATA */ + 0x00000000, /* DENALI_PHY_642_DATA */ + 0x00000000, /* DENALI_PHY_643_DATA */ + 0x00000000, /* DENALI_PHY_644_DATA */ + 0x00000000, /* DENALI_PHY_645_DATA */ + 0x00000000, /* DENALI_PHY_646_DATA */ + 0x00000002, /* DENALI_PHY_647_DATA */ + 0x00000000, /* DENALI_PHY_648_DATA */ + 0x00000000, /* DENALI_PHY_649_DATA */ + 0x00000000, /* DENALI_PHY_650_DATA */ + 0x00400320, /* DENALI_PHY_651_DATA */ + 0x00000040, /* DENALI_PHY_652_DATA */ + 0x00000000, /* DENALI_PHY_653_DATA */ + 0x00000000, /* DENALI_PHY_654_DATA */ + 0x00000000, /* DENALI_PHY_655_DATA */ + 0x01000000, /* DENALI_PHY_656_DATA */ + 0x00020003, /* DENALI_PHY_657_DATA */ + 0x00000000, /* DENALI_PHY_658_DATA */ + 0x00000000, /* DENALI_PHY_659_DATA */ + 0x00000000, /* DENALI_PHY_660_DATA */ + 0x0000002a, /* DENALI_PHY_661_DATA */ + 0x00000015, /* DENALI_PHY_662_DATA */ + 0x00000015, /* DENALI_PHY_663_DATA */ + 0x0000002a, /* DENALI_PHY_664_DATA */ + 0x00000033, /* DENALI_PHY_665_DATA */ + 0x0000000c, /* DENALI_PHY_666_DATA */ + 0x0000000c, /* DENALI_PHY_667_DATA */ + 0x00000033, /* DENALI_PHY_668_DATA */ + 0x00000000, /* DENALI_PHY_669_DATA */ + 0x00000000, /* DENALI_PHY_670_DATA */ + 0x00000000, /* DENALI_PHY_671_DATA */ + 0x00030055, /* DENALI_PHY_672_DATA */ + 0x03000300, /* DENALI_PHY_673_DATA */ + 0x03000300, /* DENALI_PHY_674_DATA */ + 0x00000300, /* DENALI_PHY_675_DATA */ + 0x42080010, /* DENALI_PHY_676_DATA */ + 0x00000003, /* DENALI_PHY_677_DATA */ + 0x00000000, /* DENALI_PHY_678_DATA */ + 0x00000000, /* DENALI_PHY_679_DATA */ + 0x00000000, /* DENALI_PHY_680_DATA */ + 0x00000000, /* DENALI_PHY_681_DATA */ + 0x00000000, /* DENALI_PHY_682_DATA */ + 0x00000000, /* DENALI_PHY_683_DATA */ + 0x00000000, /* DENALI_PHY_684_DATA */ + 0x00000000, /* DENALI_PHY_685_DATA */ + 0x00000000, /* DENALI_PHY_686_DATA */ + 0x00000000, /* DENALI_PHY_687_DATA */ + 0x00000000, /* DENALI_PHY_688_DATA */ + 0x00000000, /* DENALI_PHY_689_DATA */ + 0x00000000, /* DENALI_PHY_690_DATA */ + 0x00000000, /* DENALI_PHY_691_DATA */ + 0x00000000, /* DENALI_PHY_692_DATA */ + 0x00000000, /* DENALI_PHY_693_DATA */ + 0x00000000, /* DENALI_PHY_694_DATA */ + 0x00000000, /* DENALI_PHY_695_DATA */ + 0x00000000, /* DENALI_PHY_696_DATA */ + 0x00000000, /* DENALI_PHY_697_DATA */ + 0x00000000, /* DENALI_PHY_698_DATA */ + 0x00000000, /* DENALI_PHY_699_DATA */ + 0x00000000, /* DENALI_PHY_700_DATA */ + 0x00000000, /* DENALI_PHY_701_DATA */ + 0x00000000, /* DENALI_PHY_702_DATA */ + 0x00000000, /* DENALI_PHY_703_DATA */ + 0x00000000, /* DENALI_PHY_704_DATA */ + 0x00000000, /* DENALI_PHY_705_DATA */ + 0x00000000, /* DENALI_PHY_706_DATA */ + 0x00000000, /* DENALI_PHY_707_DATA */ + 0x00000000, /* DENALI_PHY_708_DATA */ + 0x00000000, /* DENALI_PHY_709_DATA */ + 0x00000000, /* DENALI_PHY_710_DATA */ + 0x00000000, /* DENALI_PHY_711_DATA */ + 0x00000000, /* DENALI_PHY_712_DATA */ + 0x00000000, /* DENALI_PHY_713_DATA */ + 0x00000000, /* DENALI_PHY_714_DATA */ + 0x00000000, /* DENALI_PHY_715_DATA */ + 0x00000000, /* DENALI_PHY_716_DATA */ + 0x00000000, /* DENALI_PHY_717_DATA */ + 0x00000000, /* DENALI_PHY_718_DATA */ + 0x00000000, /* DENALI_PHY_719_DATA */ + 0x00000000, /* DENALI_PHY_720_DATA */ + 0x00000000, /* DENALI_PHY_721_DATA */ + 0x00000000, /* DENALI_PHY_722_DATA */ + 0x00000000, /* DENALI_PHY_723_DATA */ + 0x00000000, /* DENALI_PHY_724_DATA */ + 0x00000000, /* DENALI_PHY_725_DATA */ + 0x00000000, /* DENALI_PHY_726_DATA */ + 0x00000000, /* DENALI_PHY_727_DATA */ + 0x00000000, /* DENALI_PHY_728_DATA */ + 0x00000000, /* DENALI_PHY_729_DATA */ + 0x00000000, /* DENALI_PHY_730_DATA */ + 0x00000000, /* DENALI_PHY_731_DATA */ + 0x00000000, /* DENALI_PHY_732_DATA */ + 0x00000000, /* DENALI_PHY_733_DATA */ + 0x00000000, /* DENALI_PHY_734_DATA */ + 0x00000000, /* DENALI_PHY_735_DATA */ + 0x00000000, /* DENALI_PHY_736_DATA */ + 0x00000000, /* DENALI_PHY_737_DATA */ + 0x00000000, /* DENALI_PHY_738_DATA */ + 0x00000000, /* DENALI_PHY_739_DATA */ + 0x00000000, /* DENALI_PHY_740_DATA */ + 0x00000000, /* DENALI_PHY_741_DATA */ + 0x00000000, /* DENALI_PHY_742_DATA */ + 0x00000000, /* DENALI_PHY_743_DATA */ + 0x00000000, /* DENALI_PHY_744_DATA */ + 0x00000000, /* DENALI_PHY_745_DATA */ + 0x00000000, /* DENALI_PHY_746_DATA */ + 0x00000000, /* DENALI_PHY_747_DATA */ + 0x00000000, /* DENALI_PHY_748_DATA */ + 0x00000000, /* DENALI_PHY_749_DATA */ + 0x00000000, /* DENALI_PHY_750_DATA */ + 0x00000000, /* DENALI_PHY_751_DATA */ + 0x00000000, /* DENALI_PHY_752_DATA */ + 0x00000000, /* DENALI_PHY_753_DATA */ + 0x00000000, /* DENALI_PHY_754_DATA */ + 0x00000000, /* DENALI_PHY_755_DATA */ + 0x00000000, /* DENALI_PHY_756_DATA */ + 0x00000000, /* DENALI_PHY_757_DATA */ + 0x00000000, /* DENALI_PHY_758_DATA */ + 0x00000000, /* DENALI_PHY_759_DATA */ + 0x00000000, /* DENALI_PHY_760_DATA */ + 0x00000000, /* DENALI_PHY_761_DATA */ + 0x00000000, /* DENALI_PHY_762_DATA */ + 0x00000000, /* DENALI_PHY_763_DATA */ + 0x00000000, /* DENALI_PHY_764_DATA */ + 0x00000000, /* DENALI_PHY_765_DATA */ + 0x00000000, /* DENALI_PHY_766_DATA */ + 0x00000000, /* DENALI_PHY_767_DATA */ + 0x00000000, /* DENALI_PHY_768_DATA */ + 0x00000000, /* DENALI_PHY_769_DATA */ + 0x00000000, /* DENALI_PHY_770_DATA */ + 0x00000000, /* DENALI_PHY_771_DATA */ + 0x00000000, /* DENALI_PHY_772_DATA */ + 0x00000000, /* DENALI_PHY_773_DATA */ + 0x00000000, /* DENALI_PHY_774_DATA */ + 0x00000002, /* DENALI_PHY_775_DATA */ + 0x00000000, /* DENALI_PHY_776_DATA */ + 0x00000000, /* DENALI_PHY_777_DATA */ + 0x00000000, /* DENALI_PHY_778_DATA */ + 0x00400320, /* DENALI_PHY_779_DATA */ + 0x00000040, /* DENALI_PHY_780_DATA */ + 0x00000000, /* DENALI_PHY_781_DATA */ + 0x00000000, /* DENALI_PHY_782_DATA */ + 0x00000000, /* DENALI_PHY_783_DATA */ + 0x01000000, /* DENALI_PHY_784_DATA */ + 0x00020003, /* DENALI_PHY_785_DATA */ + 0x00000000, /* DENALI_PHY_786_DATA */ + 0x00000000, /* DENALI_PHY_787_DATA */ + 0x00000000, /* DENALI_PHY_788_DATA */ + 0x0000002a, /* DENALI_PHY_789_DATA */ + 0x00000015, /* DENALI_PHY_790_DATA */ + 0x00000015, /* DENALI_PHY_791_DATA */ + 0x0000002a, /* DENALI_PHY_792_DATA */ + 0x00000033, /* DENALI_PHY_793_DATA */ + 0x0000000c, /* DENALI_PHY_794_DATA */ + 0x0000000c, /* DENALI_PHY_795_DATA */ + 0x00000033, /* DENALI_PHY_796_DATA */ + 0x1ee6b16a, /* DENALI_PHY_797_DATA */ + 0x10000000, /* DENALI_PHY_798_DATA */ + 0x00000000, /* DENALI_PHY_799_DATA */ + 0x00030055, /* DENALI_PHY_800_DATA */ + 0x03000300, /* DENALI_PHY_801_DATA */ + 0x03000300, /* DENALI_PHY_802_DATA */ + 0x00000300, /* DENALI_PHY_803_DATA */ + 0x42080010, /* DENALI_PHY_804_DATA */ + 0x00000003, /* DENALI_PHY_805_DATA */ + 0x00000000, /* DENALI_PHY_806_DATA */ + 0x00000000, /* DENALI_PHY_807_DATA */ + 0x00000000, /* DENALI_PHY_808_DATA */ + 0x00000000, /* DENALI_PHY_809_DATA */ + 0x00000000, /* DENALI_PHY_810_DATA */ + 0x00000000, /* DENALI_PHY_811_DATA */ + 0x00000000, /* DENALI_PHY_812_DATA */ + 0x00000000, /* DENALI_PHY_813_DATA */ + 0x00000000, /* DENALI_PHY_814_DATA */ + 0x00000000, /* DENALI_PHY_815_DATA */ + 0x00000000, /* DENALI_PHY_816_DATA */ + 0x00000000, /* DENALI_PHY_817_DATA */ + 0x00000000, /* DENALI_PHY_818_DATA */ + 0x00000000, /* DENALI_PHY_819_DATA */ + 0x00000000, /* DENALI_PHY_820_DATA */ + 0x00000000, /* DENALI_PHY_821_DATA */ + 0x00000000, /* DENALI_PHY_822_DATA */ + 0x00000000, /* DENALI_PHY_823_DATA */ + 0x00000000, /* DENALI_PHY_824_DATA */ + 0x00000000, /* DENALI_PHY_825_DATA */ + 0x00000000, /* DENALI_PHY_826_DATA */ + 0x00000000, /* DENALI_PHY_827_DATA */ + 0x00000000, /* DENALI_PHY_828_DATA */ + 0x00000000, /* DENALI_PHY_829_DATA */ + 0x00000000, /* DENALI_PHY_830_DATA */ + 0x00000000, /* DENALI_PHY_831_DATA */ + 0x00000000, /* DENALI_PHY_832_DATA */ + 0x00000000, /* DENALI_PHY_833_DATA */ + 0x00000000, /* DENALI_PHY_834_DATA */ + 0x00000000, /* DENALI_PHY_835_DATA */ + 0x00000000, /* DENALI_PHY_836_DATA */ + 0x00000000, /* DENALI_PHY_837_DATA */ + 0x00000000, /* DENALI_PHY_838_DATA */ + 0x00000000, /* DENALI_PHY_839_DATA */ + 0x00000000, /* DENALI_PHY_840_DATA */ + 0x00000000, /* DENALI_PHY_841_DATA */ + 0x00000000, /* DENALI_PHY_842_DATA */ + 0x00000000, /* DENALI_PHY_843_DATA */ + 0x00000000, /* DENALI_PHY_844_DATA */ + 0x00000000, /* DENALI_PHY_845_DATA */ + 0x00000000, /* DENALI_PHY_846_DATA */ + 0x00000000, /* DENALI_PHY_847_DATA */ + 0x00000000, /* DENALI_PHY_848_DATA */ + 0x00000000, /* DENALI_PHY_849_DATA */ + 0x00000000, /* DENALI_PHY_850_DATA */ + 0x00000000, /* DENALI_PHY_851_DATA */ + 0x00000000, /* DENALI_PHY_852_DATA */ + 0x00000000, /* DENALI_PHY_853_DATA */ + 0x00000000, /* DENALI_PHY_854_DATA */ + 0x00000000, /* DENALI_PHY_855_DATA */ + 0x00000000, /* DENALI_PHY_856_DATA */ + 0x00000000, /* DENALI_PHY_857_DATA */ + 0x00000000, /* DENALI_PHY_858_DATA */ + 0x00000000, /* DENALI_PHY_859_DATA */ + 0x00000000, /* DENALI_PHY_860_DATA */ + 0x00000000, /* DENALI_PHY_861_DATA */ + 0x00000000, /* DENALI_PHY_862_DATA */ + 0x00000000, /* DENALI_PHY_863_DATA */ + 0x00000000, /* DENALI_PHY_864_DATA */ + 0x00000000, /* DENALI_PHY_865_DATA */ + 0x00000000, /* DENALI_PHY_866_DATA */ + 0x00000000, /* DENALI_PHY_867_DATA */ + 0x00000000, /* DENALI_PHY_868_DATA */ + 0x00000000, /* DENALI_PHY_869_DATA */ + 0x00000000, /* DENALI_PHY_870_DATA */ + 0x00000000, /* DENALI_PHY_871_DATA */ + 0x00000000, /* DENALI_PHY_872_DATA */ + 0x00000000, /* DENALI_PHY_873_DATA */ + 0x00000000, /* DENALI_PHY_874_DATA */ + 0x00000000, /* DENALI_PHY_875_DATA */ + 0x00000000, /* DENALI_PHY_876_DATA */ + 0x00000000, /* DENALI_PHY_877_DATA */ + 0x00000000, /* DENALI_PHY_878_DATA */ + 0x00000000, /* DENALI_PHY_879_DATA */ + 0x00000000, /* DENALI_PHY_880_DATA */ + 0x00000000, /* DENALI_PHY_881_DATA */ + 0x00000000, /* DENALI_PHY_882_DATA */ + 0x00000000, /* DENALI_PHY_883_DATA */ + 0x00000000, /* DENALI_PHY_884_DATA */ + 0x00000000, /* DENALI_PHY_885_DATA */ + 0x00000000, /* DENALI_PHY_886_DATA */ + 0x00000000, /* DENALI_PHY_887_DATA */ + 0x00000000, /* DENALI_PHY_888_DATA */ + 0x00000000, /* DENALI_PHY_889_DATA */ + 0x00000000, /* DENALI_PHY_890_DATA */ + 0x00000000, /* DENALI_PHY_891_DATA */ + 0x00000000, /* DENALI_PHY_892_DATA */ + 0x00000000, /* DENALI_PHY_893_DATA */ + 0x00000000, /* DENALI_PHY_894_DATA */ + 0x00000000, /* DENALI_PHY_895_DATA */ + 0x00000000, /* DENALI_PHY_896_DATA */ + 0x00000000, /* DENALI_PHY_897_DATA */ + 0x00000005, /* DENALI_PHY_898_DATA */ + 0x04000f01, /* DENALI_PHY_899_DATA */ + 0x00020040, /* DENALI_PHY_900_DATA */ + 0x00020055, /* DENALI_PHY_901_DATA */ + 0x00000000, /* DENALI_PHY_902_DATA */ + 0x00000000, /* DENALI_PHY_903_DATA */ + 0x00000000, /* DENALI_PHY_904_DATA */ + 0x00000050, /* DENALI_PHY_905_DATA */ + 0x00000000, /* DENALI_PHY_906_DATA */ + 0x01010100, /* DENALI_PHY_907_DATA */ + 0x00000600, /* DENALI_PHY_908_DATA */ + 0x00000000, /* DENALI_PHY_909_DATA */ + 0x00006400, /* DENALI_PHY_910_DATA */ + 0x03221302, /* DENALI_PHY_911_DATA */ + 0x00000000, /* DENALI_PHY_912_DATA */ + 0x000d1f01, /* DENALI_PHY_913_DATA */ + 0x0d1f0d1f, /* DENALI_PHY_914_DATA */ + 0x0d1f0d1f, /* DENALI_PHY_915_DATA */ + 0x00030003, /* DENALI_PHY_916_DATA */ + 0x03000300, /* DENALI_PHY_917_DATA */ + 0x00000300, /* DENALI_PHY_918_DATA */ + 0x03221302, /* DENALI_PHY_919_DATA */ + 0x00000000, /* DENALI_PHY_920_DATA */ + 0x00000000, /* DENALI_PHY_921_DATA */ + 0x01020000, /* DENALI_PHY_922_DATA */ + 0x00000001, /* DENALI_PHY_923_DATA */ + 0x00000411, /* DENALI_PHY_924_DATA */ + 0x00000411, /* DENALI_PHY_925_DATA */ + 0x00000040, /* DENALI_PHY_926_DATA */ + 0x00000040, /* DENALI_PHY_927_DATA */ + 0x00000411, /* DENALI_PHY_928_DATA */ + 0x00000411, /* DENALI_PHY_929_DATA */ + 0x00004410, /* DENALI_PHY_930_DATA */ + 0x00004410, /* DENALI_PHY_931_DATA */ + 0x00004410, /* DENALI_PHY_932_DATA */ + 0x00004410, /* DENALI_PHY_933_DATA */ + 0x00004410, /* DENALI_PHY_934_DATA */ + 0x00000411, /* DENALI_PHY_935_DATA */ + 0x00004410, /* DENALI_PHY_936_DATA */ + 0x00000411, /* DENALI_PHY_937_DATA */ + 0x00004410, /* DENALI_PHY_938_DATA */ + 0x00000411, /* DENALI_PHY_939_DATA */ + 0x00004410, /* DENALI_PHY_940_DATA */ + 0x00000000, /* DENALI_PHY_941_DATA */ + 0x00000000, /* DENALI_PHY_942_DATA */ + 0x00000000, /* DENALI_PHY_943_DATA */ + 0x64000000, /* DENALI_PHY_944_DATA */ + 0x00000000, /* DENALI_PHY_945_DATA */ + 0x00000000, /* DENALI_PHY_946_DATA */ + 0x00000408, /* DENALI_PHY_947_DATA */ + 0x00000000, /* DENALI_PHY_948_DATA */ + 0x00000000, /* DENALI_PHY_949_DATA */ + 0x00000000, /* DENALI_PHY_950_DATA */ + 0x00000000, /* DENALI_PHY_951_DATA */ + 0x00000000, /* DENALI_PHY_952_DATA */ + 0x00000000, /* DENALI_PHY_953_DATA */ + 0xe4000000, /* DENALI_PHY_954_DATA */ + 0x00000000, /* DENALI_PHY_955_DATA */ + 0x00000000, /* DENALI_PHY_956_DATA */ + 0x01010000, /* DENALI_PHY_957_DATA */ + 0x00000000 /* DENALI_PHY_958_DATA */ + } + }, +}, diff --git a/drivers/ram/rockchip/sdram-rk3399-lpddr4-800.inc b/drivers/ram/rockchip/sdram-rk3399-lpddr4-800.inc new file mode 100644 index 0000000000..d8ae3359a3 --- /dev/null +++ b/drivers/ram/rockchip/sdram-rk3399-lpddr4-800.inc @@ -0,0 +1,1570 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * (C) Copyright 2019 Rockchip Electronics Co., Ltd. + * (C) Copyright 2019 Amarula Solutions + */ + +{ + { + { + { + .rank = 0x2, + .col = 0xA, + .bk = 0x3, + .bw = 0x2, + .dbw = 0x1, + .row_3_4 = 0x0, + .cs0_row = 0xF, + .cs1_row = 0xF, + .ddrconfig = 1, + }, + { + .ddrtiminga0 = 0x80241d22, + .ddrtimingb0 = 0x15050f08, + .ddrtimingc0 = { + 0x00000602, + }, + .devtodev0 = 0x00002122, + .ddrmode = { + 0x0000004c, + }, + .agingx0 = 0x00000000, + } + }, + { + { + .rank = 0x2, + .col = 0xA, + .bk = 0x3, + .bw = 0x2, + .dbw = 0x1, + .row_3_4 = 0x0, + .cs0_row = 0xF, + .cs1_row = 0xF, + .ddrconfig = 1, + }, + { + .ddrtiminga0 = 0x80241d22, + .ddrtimingb0 = 0x15050f08, + .ddrtimingc0 = { + 0x00000602, + }, + .devtodev0 = 0x00002122, + .ddrmode = { + 0x0000004c, + }, + .agingx0 = 0x00000000, + } + } + }, + { + .ddr_freq = 800 * MHz, + .dramtype = LPDDR4, + .num_channels = 2, + .stride = 13, + .odt = 1, + }, + { + { + 0x00000b00, /* DENALI_CTL_00_DATA */ + 0x00000000, /* DENALI_CTL_01_DATA */ + 0x00000000, /* DENALI_CTL_02_DATA */ + 0x00000000, /* DENALI_CTL_03_DATA */ + 0x00000000, /* DENALI_CTL_04_DATA */ + 0x00013880, /* DENALI_CTL_05_DATA */ + 0x000c3500, /* DENALI_CTL_06_DATA */ + 0x00000005, /* DENALI_CTL_07_DATA */ + 0x00000320, /* DENALI_CTL_08_DATA */ + 0x00027100, /* DENALI_CTL_09_DATA */ + 0x00186a00, /* DENALI_CTL_10_DATA */ + 0x00000005, /* DENALI_CTL_11_DATA */ + 0x00000640, /* DENALI_CTL_12_DATA */ + 0x00002710, /* DENALI_CTL_13_DATA */ + 0x000186a0, /* DENALI_CTL_14_DATA */ + 0x00000005, /* DENALI_CTL_15_DATA */ + 0x01000064, /* DENALI_CTL_16_DATA */ + 0x00000000, /* DENALI_CTL_17_DATA */ + 0x02020101, /* DENALI_CTL_18_DATA */ + 0x00000102, /* DENALI_CTL_19_DATA */ + 0x00000050, /* DENALI_CTL_20_DATA */ + 0x000000c8, /* DENALI_CTL_21_DATA */ + 0x00000000, /* DENALI_CTL_22_DATA */ + 0x06140000, /* DENALI_CTL_23_DATA */ + 0x00081c00, /* DENALI_CTL_24_DATA */ + 0x0400040c, /* DENALI_CTL_25_DATA */ + 0x19042008, /* DENALI_CTL_26_DATA */ + 0x10080a11, /* DENALI_CTL_27_DATA */ + 0x22310800, /* DENALI_CTL_28_DATA */ + 0x00200f0a, /* DENALI_CTL_29_DATA */ + 0x0a030704, /* DENALI_CTL_30_DATA */ + 0x08000204, /* DENALI_CTL_31_DATA */ + 0x00000a0a, /* DENALI_CTL_32_DATA */ + 0x04006db0, /* DENALI_CTL_33_DATA */ + 0x0a0a0804, /* DENALI_CTL_34_DATA */ + 0x0600db60, /* DENALI_CTL_35_DATA */ + 0x0a0a0806, /* DENALI_CTL_36_DATA */ + 0x04000db6, /* DENALI_CTL_37_DATA */ + 0x02030404, /* DENALI_CTL_38_DATA */ + 0x0f0a0800, /* DENALI_CTL_39_DATA */ + 0x08040411, /* DENALI_CTL_40_DATA */ + 0x1400640a, /* DENALI_CTL_41_DATA */ + 0x02010a0a, /* DENALI_CTL_42_DATA */ + 0x00010001, /* DENALI_CTL_43_DATA */ + 0x04082012, /* DENALI_CTL_44_DATA */ + 0x00041109, /* DENALI_CTL_45_DATA */ + 0x00000000, /* DENALI_CTL_46_DATA */ + 0x03010000, /* DENALI_CTL_47_DATA */ + 0x06100034, /* DENALI_CTL_48_DATA */ + 0x0c280068, /* DENALI_CTL_49_DATA */ + 0x00bb0007, /* DENALI_CTL_50_DATA */ + 0x00000000, /* DENALI_CTL_51_DATA */ + 0x00060003, /* DENALI_CTL_52_DATA */ + 0x000a0003, /* DENALI_CTL_53_DATA */ + 0x000a0014, /* DENALI_CTL_54_DATA */ + 0x01000000, /* DENALI_CTL_55_DATA */ + 0x030a0000, /* DENALI_CTL_56_DATA */ + 0x0c000002, /* DENALI_CTL_57_DATA */ + 0x00000103, /* DENALI_CTL_58_DATA */ + 0x0003030a, /* DENALI_CTL_59_DATA */ + 0x00060037, /* DENALI_CTL_60_DATA */ + 0x0003006e, /* DENALI_CTL_61_DATA */ + 0x05050007, /* DENALI_CTL_62_DATA */ + 0x03020605, /* DENALI_CTL_63_DATA */ + 0x06050301, /* DENALI_CTL_64_DATA */ + 0x06020c05, /* DENALI_CTL_65_DATA */ + 0x05050302, /* DENALI_CTL_66_DATA */ + 0x03020305, /* DENALI_CTL_67_DATA */ + 0x00000301, /* DENALI_CTL_68_DATA */ + 0x00000301, /* DENALI_CTL_69_DATA */ + 0x00000001, /* DENALI_CTL_70_DATA */ + 0x00000000, /* DENALI_CTL_71_DATA */ + 0x00000000, /* DENALI_CTL_72_DATA */ + 0x01000000, /* DENALI_CTL_73_DATA */ + 0x80104002, /* DENALI_CTL_74_DATA */ + 0x00040003, /* DENALI_CTL_75_DATA */ + 0x00040005, /* DENALI_CTL_76_DATA */ + 0x00030000, /* DENALI_CTL_77_DATA */ + 0x00050004, /* DENALI_CTL_78_DATA */ + 0x00000004, /* DENALI_CTL_79_DATA */ + 0x00040003, /* DENALI_CTL_80_DATA */ + 0x00040005, /* DENALI_CTL_81_DATA */ + 0x18400000, /* DENALI_CTL_82_DATA */ + 0x00000c20, /* DENALI_CTL_83_DATA */ + 0x185030a0, /* DENALI_CTL_84_DATA */ + 0x02ec0000, /* DENALI_CTL_85_DATA */ + 0x00000176, /* DENALI_CTL_86_DATA */ + 0x00000000, /* DENALI_CTL_87_DATA */ + 0x00000000, /* DENALI_CTL_88_DATA */ + 0x00000000, /* DENALI_CTL_89_DATA */ + 0x00000000, /* DENALI_CTL_90_DATA */ + 0x00000000, /* DENALI_CTL_91_DATA */ + 0x06030300, /* DENALI_CTL_92_DATA */ + 0x00030303, /* DENALI_CTL_93_DATA */ + 0x02030200, /* DENALI_CTL_94_DATA */ + 0x00040703, /* DENALI_CTL_95_DATA */ + 0x03020302, /* DENALI_CTL_96_DATA */ + 0x02000407, /* DENALI_CTL_97_DATA */ + 0x07030203, /* DENALI_CTL_98_DATA */ + 0x00030f04, /* DENALI_CTL_99_DATA */ + 0x00070004, /* DENALI_CTL_100_DATA */ + 0x00000000, /* DENALI_CTL_101_DATA */ + 0x00000000, /* DENALI_CTL_102_DATA */ + 0x00000000, /* DENALI_CTL_103_DATA */ + 0x00000000, /* DENALI_CTL_104_DATA */ + 0x00000000, /* DENALI_CTL_105_DATA */ + 0x00000000, /* DENALI_CTL_106_DATA */ + 0x00000000, /* DENALI_CTL_107_DATA */ + 0x00010000, /* DENALI_CTL_108_DATA */ + 0x20040020, /* DENALI_CTL_109_DATA */ + 0x00200400, /* DENALI_CTL_110_DATA */ + 0x01000400, /* DENALI_CTL_111_DATA */ + 0x00000b80, /* DENALI_CTL_112_DATA */ + 0x00000000, /* DENALI_CTL_113_DATA */ + 0x00000001, /* DENALI_CTL_114_DATA */ + 0x00000002, /* DENALI_CTL_115_DATA */ + 0x0000000e, /* DENALI_CTL_116_DATA */ + 0x00000000, /* DENALI_CTL_117_DATA */ + 0x00000000, /* DENALI_CTL_118_DATA */ + 0x00000000, /* DENALI_CTL_119_DATA */ + 0x00000000, /* DENALI_CTL_120_DATA */ + 0x00000000, /* DENALI_CTL_121_DATA */ + 0x00500000, /* DENALI_CTL_122_DATA */ + 0x00640028, /* DENALI_CTL_123_DATA */ + 0x00640404, /* DENALI_CTL_124_DATA */ + 0x005000a0, /* DENALI_CTL_125_DATA */ + 0x060600c8, /* DENALI_CTL_126_DATA */ + 0x000a00c8, /* DENALI_CTL_127_DATA */ + 0x000d0005, /* DENALI_CTL_128_DATA */ + 0x000d0404, /* DENALI_CTL_129_DATA */ + 0x00000000, /* DENALI_CTL_130_DATA */ + 0x00000000, /* DENALI_CTL_131_DATA */ + 0x00000000, /* DENALI_CTL_132_DATA */ + 0x001400a3, /* DENALI_CTL_133_DATA */ + 0x00e30009, /* DENALI_CTL_134_DATA */ + 0x00120024, /* DENALI_CTL_135_DATA */ + 0x00040063, /* DENALI_CTL_136_DATA */ + 0x00000000, /* DENALI_CTL_137_DATA */ + 0x00310031, /* DENALI_CTL_138_DATA */ + 0x00000031, /* DENALI_CTL_139_DATA */ + 0x004d0000, /* DENALI_CTL_140_DATA */ + 0x004d004d, /* DENALI_CTL_141_DATA */ + 0x004d0000, /* DENALI_CTL_142_DATA */ + 0x004d004d, /* DENALI_CTL_143_DATA */ + 0x00010101, /* DENALI_CTL_144_DATA */ + 0x00000000, /* DENALI_CTL_145_DATA */ + 0x00000000, /* DENALI_CTL_146_DATA */ + 0x001400a3, /* DENALI_CTL_147_DATA */ + 0x00e30009, /* DENALI_CTL_148_DATA */ + 0x00120024, /* DENALI_CTL_149_DATA */ + 0x00040063, /* DENALI_CTL_150_DATA */ + 0x00000000, /* DENALI_CTL_151_DATA */ + 0x00310031, /* DENALI_CTL_152_DATA */ + 0x00000031, /* DENALI_CTL_153_DATA */ + 0x004d0000, /* DENALI_CTL_154_DATA */ + 0x004d004d, /* DENALI_CTL_155_DATA */ + 0x004d0000, /* DENALI_CTL_156_DATA */ + 0x004d004d, /* DENALI_CTL_157_DATA */ + 0x00010101, /* DENALI_CTL_158_DATA */ + 0x00000000, /* DENALI_CTL_159_DATA */ + 0x00000000, /* DENALI_CTL_160_DATA */ + 0x00000000, /* DENALI_CTL_161_DATA */ + 0x00000001, /* DENALI_CTL_162_DATA */ + 0x00000000, /* DENALI_CTL_163_DATA */ + 0x18151100, /* DENALI_CTL_164_DATA */ + 0x0000000c, /* DENALI_CTL_165_DATA */ + 0x00000000, /* DENALI_CTL_166_DATA */ + 0x00000000, /* DENALI_CTL_167_DATA */ + 0x00000000, /* DENALI_CTL_168_DATA */ + 0x00000000, /* DENALI_CTL_169_DATA */ + 0x00000000, /* DENALI_CTL_170_DATA */ + 0x00000000, /* DENALI_CTL_171_DATA */ + 0x00000000, /* DENALI_CTL_172_DATA */ + 0x00000000, /* DENALI_CTL_173_DATA */ + 0x00000000, /* DENALI_CTL_174_DATA */ + 0x00000000, /* DENALI_CTL_175_DATA */ + 0x00000000, /* DENALI_CTL_176_DATA */ + 0x00000000, /* DENALI_CTL_177_DATA */ + 0x00000000, /* DENALI_CTL_178_DATA */ + 0x00020003, /* DENALI_CTL_179_DATA */ + 0x00400100, /* DENALI_CTL_180_DATA */ + 0x000c0190, /* DENALI_CTL_181_DATA */ + 0x01000200, /* DENALI_CTL_182_DATA */ + 0x03200040, /* DENALI_CTL_183_DATA */ + 0x00020018, /* DENALI_CTL_184_DATA */ + 0x00400100, /* DENALI_CTL_185_DATA */ + 0x00080032, /* DENALI_CTL_186_DATA */ + 0x00140000, /* DENALI_CTL_187_DATA */ + 0x00030028, /* DENALI_CTL_188_DATA */ + 0x01010100, /* DENALI_CTL_189_DATA */ + 0x02000202, /* DENALI_CTL_190_DATA */ + 0x0b000002, /* DENALI_CTL_191_DATA */ + 0x01000f0f, /* DENALI_CTL_192_DATA */ + 0x00000000, /* DENALI_CTL_193_DATA */ + 0x00000000, /* DENALI_CTL_194_DATA */ + 0x00010003, /* DENALI_CTL_195_DATA */ + 0x00000c03, /* DENALI_CTL_196_DATA */ + 0x00040101, /* DENALI_CTL_197_DATA */ + 0x04010100, /* DENALI_CTL_198_DATA */ + 0x01000000, /* DENALI_CTL_199_DATA */ + 0x02010000, /* DENALI_CTL_200_DATA */ + 0x00000001, /* DENALI_CTL_201_DATA */ + 0x00000000, /* DENALI_CTL_202_DATA */ + 0x00000000, /* DENALI_CTL_203_DATA */ + 0x00000000, /* DENALI_CTL_204_DATA */ + 0x00000000, /* DENALI_CTL_205_DATA */ + 0x00000000, /* DENALI_CTL_206_DATA */ + 0x00000000, /* DENALI_CTL_207_DATA */ + 0x00000000, /* DENALI_CTL_208_DATA */ + 0x00000000, /* DENALI_CTL_209_DATA */ + 0x00000000, /* DENALI_CTL_210_DATA */ + 0x00010000, /* DENALI_CTL_211_DATA */ + 0x00000001, /* DENALI_CTL_212_DATA */ + 0x01010001, /* DENALI_CTL_213_DATA */ + 0x05040001, /* DENALI_CTL_214_DATA */ + 0x040a0703, /* DENALI_CTL_215_DATA */ + 0x02080808, /* DENALI_CTL_216_DATA */ + 0x020e000a, /* DENALI_CTL_217_DATA */ + 0x020f010b, /* DENALI_CTL_218_DATA */ + 0x000d0008, /* DENALI_CTL_219_DATA */ + 0x00080b0a, /* DENALI_CTL_220_DATA */ + 0x03000200, /* DENALI_CTL_221_DATA */ + 0x00000100, /* DENALI_CTL_222_DATA */ + 0x00000000, /* DENALI_CTL_223_DATA */ + 0x00000000, /* DENALI_CTL_224_DATA */ + 0x0d000001, /* DENALI_CTL_225_DATA */ + 0x00000028, /* DENALI_CTL_226_DATA */ + 0x00010000, /* DENALI_CTL_227_DATA */ + 0x00000003, /* DENALI_CTL_228_DATA */ + 0x00000000, /* DENALI_CTL_229_DATA */ + 0x00000000, /* DENALI_CTL_230_DATA */ + 0x00000000, /* DENALI_CTL_231_DATA */ + 0x00000000, /* DENALI_CTL_232_DATA */ + 0x00000000, /* DENALI_CTL_233_DATA */ + 0x00000000, /* DENALI_CTL_234_DATA */ + 0x00000000, /* DENALI_CTL_235_DATA */ + 0x00000000, /* DENALI_CTL_236_DATA */ + 0x00010100, /* DENALI_CTL_237_DATA */ + 0x01000000, /* DENALI_CTL_238_DATA */ + 0x00000001, /* DENALI_CTL_239_DATA */ + 0x00000303, /* DENALI_CTL_240_DATA */ + 0x00000000, /* DENALI_CTL_241_DATA */ + 0x00000000, /* DENALI_CTL_242_DATA */ + 0x00000000, /* DENALI_CTL_243_DATA */ + 0x00000000, /* DENALI_CTL_244_DATA */ + 0x00000000, /* DENALI_CTL_245_DATA */ + 0x00000000, /* DENALI_CTL_246_DATA */ + 0x00000000, /* DENALI_CTL_247_DATA */ + 0x00000000, /* DENALI_CTL_248_DATA */ + 0x00000000, /* DENALI_CTL_249_DATA */ + 0x00000000, /* DENALI_CTL_250_DATA */ + 0x00000000, /* DENALI_CTL_251_DATA */ + 0x00000000, /* DENALI_CTL_252_DATA */ + 0x00000000, /* DENALI_CTL_253_DATA */ + 0x00000000, /* DENALI_CTL_254_DATA */ + 0x00000000, /* DENALI_CTL_255_DATA */ + 0x000556aa, /* DENALI_CTL_256_DATA */ + 0x000aaaaa, /* DENALI_CTL_257_DATA */ + 0x000aa955, /* DENALI_CTL_258_DATA */ + 0x00055555, /* DENALI_CTL_259_DATA */ + 0x000b3133, /* DENALI_CTL_260_DATA */ + 0x0004cd33, /* DENALI_CTL_261_DATA */ + 0x0004cecc, /* DENALI_CTL_262_DATA */ + 0x000b32cc, /* DENALI_CTL_263_DATA */ + 0x00010300, /* DENALI_CTL_264_DATA */ + 0x03000100, /* DENALI_CTL_265_DATA */ + 0x00000000, /* DENALI_CTL_266_DATA */ + 0x00000000, /* DENALI_CTL_267_DATA */ + 0x00000000, /* DENALI_CTL_268_DATA */ + 0x00000000, /* DENALI_CTL_269_DATA */ + 0x00000000, /* DENALI_CTL_270_DATA */ + 0x00000000, /* DENALI_CTL_271_DATA */ + 0x00000000, /* DENALI_CTL_272_DATA */ + 0x00000000, /* DENALI_CTL_273_DATA */ + 0x00ffff00, /* DENALI_CTL_274_DATA */ + 0x1a160000, /* DENALI_CTL_275_DATA */ + 0x08000012, /* DENALI_CTL_276_DATA */ + 0x00000c20, /* DENALI_CTL_277_DATA */ + 0x00000200, /* DENALI_CTL_278_DATA */ + 0x00000200, /* DENALI_CTL_279_DATA */ + 0x00000200, /* DENALI_CTL_280_DATA */ + 0x00000200, /* DENALI_CTL_281_DATA */ + 0x00000c20, /* DENALI_CTL_282_DATA */ + 0x00007940, /* DENALI_CTL_283_DATA */ + 0x18500409, /* DENALI_CTL_284_DATA */ + 0x00000200, /* DENALI_CTL_285_DATA */ + 0x00000200, /* DENALI_CTL_286_DATA */ + 0x00000200, /* DENALI_CTL_287_DATA */ + 0x00000200, /* DENALI_CTL_288_DATA */ + 0x00001850, /* DENALI_CTL_289_DATA */ + 0x0000f320, /* DENALI_CTL_290_DATA */ + 0x0176060c, /* DENALI_CTL_291_DATA */ + 0x00000200, /* DENALI_CTL_292_DATA */ + 0x00000200, /* DENALI_CTL_293_DATA */ + 0x00000200, /* DENALI_CTL_294_DATA */ + 0x00000200, /* DENALI_CTL_295_DATA */ + 0x00000176, /* DENALI_CTL_296_DATA */ + 0x00000e9c, /* DENALI_CTL_297_DATA */ + 0x02020205, /* DENALI_CTL_298_DATA */ + 0x03030202, /* DENALI_CTL_299_DATA */ + 0x00000018, /* DENALI_CTL_300_DATA */ + 0x00000000, /* DENALI_CTL_301_DATA */ + 0x00000000, /* DENALI_CTL_302_DATA */ + 0x00001403, /* DENALI_CTL_303_DATA */ + 0x00000000, /* DENALI_CTL_304_DATA */ + 0x00000000, /* DENALI_CTL_305_DATA */ + 0x00000000, /* DENALI_CTL_306_DATA */ + 0x00030000, /* DENALI_CTL_307_DATA */ + 0x000a001c, /* DENALI_CTL_308_DATA */ + 0x000e0020, /* DENALI_CTL_309_DATA */ + 0x00060018, /* DENALI_CTL_310_DATA */ + 0x00000000, /* DENALI_CTL_311_DATA */ + 0x00000000, /* DENALI_CTL_312_DATA */ + 0x02000000, /* DENALI_CTL_313_DATA */ + 0x00090305, /* DENALI_CTL_314_DATA */ + 0x00050101, /* DENALI_CTL_315_DATA */ + 0x00000000, /* DENALI_CTL_316_DATA */ + 0x00000000, /* DENALI_CTL_317_DATA */ + 0x00000000, /* DENALI_CTL_318_DATA */ + 0x00000000, /* DENALI_CTL_319_DATA */ + 0x00000000, /* DENALI_CTL_320_DATA */ + 0x00000000, /* DENALI_CTL_321_DATA */ + 0x00000000, /* DENALI_CTL_322_DATA */ + 0x00000000, /* DENALI_CTL_323_DATA */ + 0x01000001, /* DENALI_CTL_324_DATA */ + 0x01010101, /* DENALI_CTL_325_DATA */ + 0x01000101, /* DENALI_CTL_326_DATA */ + 0x01000100, /* DENALI_CTL_327_DATA */ + 0x00010001, /* DENALI_CTL_328_DATA */ + 0x00010002, /* DENALI_CTL_329_DATA */ + 0x00020100, /* DENALI_CTL_330_DATA */ + 0x00000002 /* DENALI_CTL_331_DATA */ + } + }, + { + { + 0x00000b00, /* DENALI_PI_00_DATA */ + 0x00000000, /* DENALI_PI_01_DATA */ + 0x000002ec, /* DENALI_PI_02_DATA */ + 0x00000176, /* DENALI_PI_03_DATA */ + 0x000030a0, /* DENALI_PI_04_DATA */ + 0x00001850, /* DENALI_PI_05_DATA */ + 0x00001840, /* DENALI_PI_06_DATA */ + 0x01760c20, /* DENALI_PI_07_DATA */ + 0x00000200, /* DENALI_PI_08_DATA */ + 0x00000200, /* DENALI_PI_09_DATA */ + 0x00000200, /* DENALI_PI_10_DATA */ + 0x00000200, /* DENALI_PI_11_DATA */ + 0x00001850, /* DENALI_PI_12_DATA */ + 0x00000200, /* DENALI_PI_13_DATA */ + 0x00000200, /* DENALI_PI_14_DATA */ + 0x00000200, /* DENALI_PI_15_DATA */ + 0x00000200, /* DENALI_PI_16_DATA */ + 0x00000c20, /* DENALI_PI_17_DATA */ + 0x00000200, /* DENALI_PI_18_DATA */ + 0x00000200, /* DENALI_PI_19_DATA */ + 0x00000200, /* DENALI_PI_20_DATA */ + 0x00000200, /* DENALI_PI_21_DATA */ + 0x00010000, /* DENALI_PI_22_DATA */ + 0x00000007, /* DENALI_PI_23_DATA */ + 0x01000001, /* DENALI_PI_24_DATA */ + 0x00000000, /* DENALI_PI_25_DATA */ + 0x3fffffff, /* DENALI_PI_26_DATA */ + 0x00000000, /* DENALI_PI_27_DATA */ + 0x00000000, /* DENALI_PI_28_DATA */ + 0x00000000, /* DENALI_PI_29_DATA */ + 0x00000000, /* DENALI_PI_30_DATA */ + 0x00000000, /* DENALI_PI_31_DATA */ + 0x00000000, /* DENALI_PI_32_DATA */ + 0x00000000, /* DENALI_PI_33_DATA */ + 0x00000000, /* DENALI_PI_34_DATA */ + 0x00000000, /* DENALI_PI_35_DATA */ + 0x00000000, /* DENALI_PI_36_DATA */ + 0x00000000, /* DENALI_PI_37_DATA */ + 0x00000000, /* DENALI_PI_38_DATA */ + 0x00000000, /* DENALI_PI_39_DATA */ + 0x00000000, /* DENALI_PI_40_DATA */ + 0x0f000101, /* DENALI_PI_41_DATA */ + 0x082b3223, /* DENALI_PI_42_DATA */ + 0x080c0004, /* DENALI_PI_43_DATA */ + 0x00061c00, /* DENALI_PI_44_DATA */ + 0x00000214, /* DENALI_PI_45_DATA */ + 0x00bb0007, /* DENALI_PI_46_DATA */ + 0x0c280068, /* DENALI_PI_47_DATA */ + 0x06100034, /* DENALI_PI_48_DATA */ + 0x00000500, /* DENALI_PI_49_DATA */ + 0x00000000, /* DENALI_PI_50_DATA */ + 0x00000000, /* DENALI_PI_51_DATA */ + 0x00000000, /* DENALI_PI_52_DATA */ + 0x00000000, /* DENALI_PI_53_DATA */ + 0x00000000, /* DENALI_PI_54_DATA */ + 0x00000000, /* DENALI_PI_55_DATA */ + 0x00000000, /* DENALI_PI_56_DATA */ + 0x00000000, /* DENALI_PI_57_DATA */ + 0x04040100, /* DENALI_PI_58_DATA */ + 0x0a000004, /* DENALI_PI_59_DATA */ + 0x00000128, /* DENALI_PI_60_DATA */ + 0x00000000, /* DENALI_PI_61_DATA */ + 0x0003000f, /* DENALI_PI_62_DATA */ + 0x00000018, /* DENALI_PI_63_DATA */ + 0x00000000, /* DENALI_PI_64_DATA */ + 0x00000000, /* DENALI_PI_65_DATA */ + 0x00060002, /* DENALI_PI_66_DATA */ + 0x00010001, /* DENALI_PI_67_DATA */ + 0x00000101, /* DENALI_PI_68_DATA */ + 0x00020001, /* DENALI_PI_69_DATA */ + 0x00080004, /* DENALI_PI_70_DATA */ + 0x00000000, /* DENALI_PI_71_DATA */ + 0x05030000, /* DENALI_PI_72_DATA */ + 0x070a0404, /* DENALI_PI_73_DATA */ + 0x00000000, /* DENALI_PI_74_DATA */ + 0x00000000, /* DENALI_PI_75_DATA */ + 0x00000000, /* DENALI_PI_76_DATA */ + 0x000f0f00, /* DENALI_PI_77_DATA */ + 0x0000001e, /* DENALI_PI_78_DATA */ + 0x00000000, /* DENALI_PI_79_DATA */ + 0x01010300, /* DENALI_PI_80_DATA */ + 0x00000000, /* DENALI_PI_81_DATA */ + 0x00000000, /* DENALI_PI_82_DATA */ + 0x01000000, /* DENALI_PI_83_DATA */ + 0x00000101, /* DENALI_PI_84_DATA */ + 0x55555a5a, /* DENALI_PI_85_DATA */ + 0x55555a5a, /* DENALI_PI_86_DATA */ + 0x55555a5a, /* DENALI_PI_87_DATA */ + 0x55555a5a, /* DENALI_PI_88_DATA */ + 0x0c050001, /* DENALI_PI_89_DATA */ + 0x06020009, /* DENALI_PI_90_DATA */ + 0x00010004, /* DENALI_PI_91_DATA */ + 0x00000203, /* DENALI_PI_92_DATA */ + 0x00030000, /* DENALI_PI_93_DATA */ + 0x170f0000, /* DENALI_PI_94_DATA */ + 0x00060018, /* DENALI_PI_95_DATA */ + 0x000e0020, /* DENALI_PI_96_DATA */ + 0x000a001c, /* DENALI_PI_97_DATA */ + 0x00000000, /* DENALI_PI_98_DATA */ + 0x00000000, /* DENALI_PI_99_DATA */ + 0x00000100, /* DENALI_PI_100_DATA */ + 0x140a0000, /* DENALI_PI_101_DATA */ + 0x000d010a, /* DENALI_PI_102_DATA */ + 0x0100c802, /* DENALI_PI_103_DATA */ + 0x010a0064, /* DENALI_PI_104_DATA */ + 0x000e0100, /* DENALI_PI_105_DATA */ + 0x0100000e, /* DENALI_PI_106_DATA */ + 0x00c900c9, /* DENALI_PI_107_DATA */ + 0x00650100, /* DENALI_PI_108_DATA */ + 0x1e1a0065, /* DENALI_PI_109_DATA */ + 0x10010204, /* DENALI_PI_110_DATA */ + 0x06070605, /* DENALI_PI_111_DATA */ + 0x20000202, /* DENALI_PI_112_DATA */ + 0x00201000, /* DENALI_PI_113_DATA */ + 0x00201000, /* DENALI_PI_114_DATA */ + 0x04041000, /* DENALI_PI_115_DATA */ + 0x10020100, /* DENALI_PI_116_DATA */ + 0x0003010c, /* DENALI_PI_117_DATA */ + 0x004b004a, /* DENALI_PI_118_DATA */ + 0x1a0f0000, /* DENALI_PI_119_DATA */ + 0x0102041e, /* DENALI_PI_120_DATA */ + 0x34000000, /* DENALI_PI_121_DATA */ + 0x00000000, /* DENALI_PI_122_DATA */ + 0x00000000, /* DENALI_PI_123_DATA */ + 0x00010000, /* DENALI_PI_124_DATA */ + 0x00000400, /* DENALI_PI_125_DATA */ + 0x00310000, /* DENALI_PI_126_DATA */ + 0x004d4d00, /* DENALI_PI_127_DATA */ + 0x00120024, /* DENALI_PI_128_DATA */ + 0x4d000031, /* DENALI_PI_129_DATA */ + 0x0000144d, /* DENALI_PI_130_DATA */ + 0x00310009, /* DENALI_PI_131_DATA */ + 0x004d4d00, /* DENALI_PI_132_DATA */ + 0x00000004, /* DENALI_PI_133_DATA */ + 0x4d000031, /* DENALI_PI_134_DATA */ + 0x0000244d, /* DENALI_PI_135_DATA */ + 0x00310012, /* DENALI_PI_136_DATA */ + 0x004d4d00, /* DENALI_PI_137_DATA */ + 0x00090014, /* DENALI_PI_138_DATA */ + 0x4d000031, /* DENALI_PI_139_DATA */ + 0x0004004d, /* DENALI_PI_140_DATA */ + 0x00310000, /* DENALI_PI_141_DATA */ + 0x004d4d00, /* DENALI_PI_142_DATA */ + 0x00120024, /* DENALI_PI_143_DATA */ + 0x4d000031, /* DENALI_PI_144_DATA */ + 0x0000144d, /* DENALI_PI_145_DATA */ + 0x00310009, /* DENALI_PI_146_DATA */ + 0x004d4d00, /* DENALI_PI_147_DATA */ + 0x00000004, /* DENALI_PI_148_DATA */ + 0x4d000031, /* DENALI_PI_149_DATA */ + 0x0000244d, /* DENALI_PI_150_DATA */ + 0x00310012, /* DENALI_PI_151_DATA */ + 0x004d4d00, /* DENALI_PI_152_DATA */ + 0x00090014, /* DENALI_PI_153_DATA */ + 0x4d000031, /* DENALI_PI_154_DATA */ + 0x0200004d, /* DENALI_PI_155_DATA */ + 0x00c8000d, /* DENALI_PI_156_DATA */ + 0x08080064, /* DENALI_PI_157_DATA */ + 0x040a0404, /* DENALI_PI_158_DATA */ + 0x03000d92, /* DENALI_PI_159_DATA */ + 0x010a2001, /* DENALI_PI_160_DATA */ + 0x0f11080a, /* DENALI_PI_161_DATA */ + 0x0000110a, /* DENALI_PI_162_DATA */ + 0x2200d92e, /* DENALI_PI_163_DATA */ + 0x080c2003, /* DENALI_PI_164_DATA */ + 0x0809080a, /* DENALI_PI_165_DATA */ + 0x00000a0a, /* DENALI_PI_166_DATA */ + 0x11006c97, /* DENALI_PI_167_DATA */ + 0x040a2002, /* DENALI_PI_168_DATA */ + 0x0200020a, /* DENALI_PI_169_DATA */ + 0x02000200, /* DENALI_PI_170_DATA */ + 0x02000200, /* DENALI_PI_171_DATA */ + 0x02000200, /* DENALI_PI_172_DATA */ + 0x02000200, /* DENALI_PI_173_DATA */ + 0x00000000, /* DENALI_PI_174_DATA */ + 0x00000000, /* DENALI_PI_175_DATA */ + 0x00000000, /* DENALI_PI_176_DATA */ + 0x00000000, /* DENALI_PI_177_DATA */ + 0x00000000, /* DENALI_PI_178_DATA */ + 0x00000000, /* DENALI_PI_179_DATA */ + 0x00000000, /* DENALI_PI_180_DATA */ + 0x00000000, /* DENALI_PI_181_DATA */ + 0x00000000, /* DENALI_PI_182_DATA */ + 0x00000000, /* DENALI_PI_183_DATA */ + 0x00000000, /* DENALI_PI_184_DATA */ + 0x00000000, /* DENALI_PI_185_DATA */ + 0x01000400, /* DENALI_PI_186_DATA */ + 0x00017600, /* DENALI_PI_187_DATA */ + 0x00000e9c, /* DENALI_PI_188_DATA */ + 0x00001850, /* DENALI_PI_189_DATA */ + 0x0000f320, /* DENALI_PI_190_DATA */ + 0x00000c20, /* DENALI_PI_191_DATA */ + 0x00007940, /* DENALI_PI_192_DATA */ + 0x08000000, /* DENALI_PI_193_DATA */ + 0x00000100, /* DENALI_PI_194_DATA */ + 0x00000000, /* DENALI_PI_195_DATA */ + 0x00000000, /* DENALI_PI_196_DATA */ + 0x00000000, /* DENALI_PI_197_DATA */ + 0x00000000, /* DENALI_PI_198_DATA */ + 0x00000002 /* DENALI_PI_199_DATA */ + } + }, + { + { + 0x76543210, /* DENALI_PHY_00_DATA */ + 0x0004f008, /* DENALI_PHY_01_DATA */ + 0x00020119, /* DENALI_PHY_02_DATA */ + 0x00000000, /* DENALI_PHY_03_DATA */ + 0x00000000, /* DENALI_PHY_04_DATA */ + 0x00010000, /* DENALI_PHY_05_DATA */ + 0x01665555, /* DENALI_PHY_06_DATA */ + 0x03665555, /* DENALI_PHY_07_DATA */ + 0x00010f00, /* DENALI_PHY_08_DATA */ + 0x05010200, /* DENALI_PHY_09_DATA */ + 0x00000002, /* DENALI_PHY_10_DATA */ + 0x00170180, /* DENALI_PHY_11_DATA */ + 0x00cc0201, /* DENALI_PHY_12_DATA */ + 0x00030066, /* DENALI_PHY_13_DATA */ + 0x00000000, /* DENALI_PHY_14_DATA */ + 0x00000000, /* DENALI_PHY_15_DATA */ + 0x00000000, /* DENALI_PHY_16_DATA */ + 0x00000000, /* DENALI_PHY_17_DATA */ + 0x00000000, /* DENALI_PHY_18_DATA */ + 0x00000000, /* DENALI_PHY_19_DATA */ + 0x00000000, /* DENALI_PHY_20_DATA */ + 0x00000000, /* DENALI_PHY_21_DATA */ + 0x04080000, /* DENALI_PHY_22_DATA */ + 0x04080400, /* DENALI_PHY_23_DATA */ + 0x30000000, /* DENALI_PHY_24_DATA */ + 0x0c00c007, /* DENALI_PHY_25_DATA */ + 0x00000100, /* DENALI_PHY_26_DATA */ + 0x00000000, /* DENALI_PHY_27_DATA */ + 0xfd02fe01, /* DENALI_PHY_28_DATA */ + 0xf708fb04, /* DENALI_PHY_29_DATA */ + 0xdf20ef10, /* DENALI_PHY_30_DATA */ + 0x7f80bf40, /* DENALI_PHY_31_DATA */ + 0x0001aaaa, /* DENALI_PHY_32_DATA */ + 0x00000000, /* DENALI_PHY_33_DATA */ + 0x00000000, /* DENALI_PHY_34_DATA */ + 0x00000000, /* DENALI_PHY_35_DATA */ + 0x00000000, /* DENALI_PHY_36_DATA */ + 0x00000000, /* DENALI_PHY_37_DATA */ + 0x00000000, /* DENALI_PHY_38_DATA */ + 0x00000000, /* DENALI_PHY_39_DATA */ + 0x00000000, /* DENALI_PHY_40_DATA */ + 0x00000000, /* DENALI_PHY_41_DATA */ + 0x00000000, /* DENALI_PHY_42_DATA */ + 0x00000000, /* DENALI_PHY_43_DATA */ + 0x00000000, /* DENALI_PHY_44_DATA */ + 0x00000000, /* DENALI_PHY_45_DATA */ + 0x00000000, /* DENALI_PHY_46_DATA */ + 0x00000000, /* DENALI_PHY_47_DATA */ + 0x00000000, /* DENALI_PHY_48_DATA */ + 0x00000000, /* DENALI_PHY_49_DATA */ + 0x00000000, /* DENALI_PHY_50_DATA */ + 0x00000000, /* DENALI_PHY_51_DATA */ + 0x00200000, /* DENALI_PHY_52_DATA */ + 0x00000000, /* DENALI_PHY_53_DATA */ + 0x00000000, /* DENALI_PHY_54_DATA */ + 0x00000000, /* DENALI_PHY_55_DATA */ + 0x00000000, /* DENALI_PHY_56_DATA */ + 0x00000000, /* DENALI_PHY_57_DATA */ + 0x00000000, /* DENALI_PHY_58_DATA */ + 0x02800280, /* DENALI_PHY_59_DATA */ + 0x02800280, /* DENALI_PHY_60_DATA */ + 0x02800280, /* DENALI_PHY_61_DATA */ + 0x02800280, /* DENALI_PHY_62_DATA */ + 0x00000280, /* DENALI_PHY_63_DATA */ + 0x00000000, /* DENALI_PHY_64_DATA */ + 0x00000000, /* DENALI_PHY_65_DATA */ + 0x00000000, /* DENALI_PHY_66_DATA */ + 0x00000000, /* DENALI_PHY_67_DATA */ + 0x00800000, /* DENALI_PHY_68_DATA */ + 0x00800080, /* DENALI_PHY_69_DATA */ + 0x00800080, /* DENALI_PHY_70_DATA */ + 0x00800080, /* DENALI_PHY_71_DATA */ + 0x00800080, /* DENALI_PHY_72_DATA */ + 0x00800080, /* DENALI_PHY_73_DATA */ + 0x00800080, /* DENALI_PHY_74_DATA */ + 0x00800080, /* DENALI_PHY_75_DATA */ + 0x00800080, /* DENALI_PHY_76_DATA */ + 0x01190080, /* DENALI_PHY_77_DATA */ + 0x00000002, /* DENALI_PHY_78_DATA */ + 0x00000000, /* DENALI_PHY_79_DATA */ + 0x00000000, /* DENALI_PHY_80_DATA */ + 0x00000200, /* DENALI_PHY_81_DATA */ + 0x00000000, /* DENALI_PHY_82_DATA */ + 0x51315152, /* DENALI_PHY_83_DATA */ + 0xc0013150, /* DENALI_PHY_84_DATA */ + 0x020000c0, /* DENALI_PHY_85_DATA */ + 0x00100001, /* DENALI_PHY_86_DATA */ + 0x07054204, /* DENALI_PHY_87_DATA */ + 0x000f0c18, /* DENALI_PHY_88_DATA */ + 0x01000140, /* DENALI_PHY_89_DATA */ + 0x00000c10, /* DENALI_PHY_90_DATA */ + 0x00000000, /* DENALI_PHY_91_DATA */ + 0x00000000, /* DENALI_PHY_92_DATA */ + 0x00000000, /* DENALI_PHY_93_DATA */ + 0x00000000, /* DENALI_PHY_94_DATA */ + 0x00000000, /* DENALI_PHY_95_DATA */ + 0x00000000, /* DENALI_PHY_96_DATA */ + 0x00000000, /* DENALI_PHY_97_DATA */ + 0x00000000, /* DENALI_PHY_98_DATA */ + 0x00000000, /* DENALI_PHY_99_DATA */ + 0x00000000, /* DENALI_PHY_100_DATA */ + 0x00000000, /* DENALI_PHY_101_DATA */ + 0x00000000, /* DENALI_PHY_102_DATA */ + 0x00000000, /* DENALI_PHY_103_DATA */ + 0x00000000, /* DENALI_PHY_104_DATA */ + 0x00000000, /* DENALI_PHY_105_DATA */ + 0x00000000, /* DENALI_PHY_106_DATA */ + 0x00000000, /* DENALI_PHY_107_DATA */ + 0x00000000, /* DENALI_PHY_108_DATA */ + 0x00000000, /* DENALI_PHY_109_DATA */ + 0x00000000, /* DENALI_PHY_110_DATA */ + 0x00000000, /* DENALI_PHY_111_DATA */ + 0x00000000, /* DENALI_PHY_112_DATA */ + 0x00000000, /* DENALI_PHY_113_DATA */ + 0x00000000, /* DENALI_PHY_114_DATA */ + 0x00000000, /* DENALI_PHY_115_DATA */ + 0x00000000, /* DENALI_PHY_116_DATA */ + 0x00000000, /* DENALI_PHY_117_DATA */ + 0x00000000, /* DENALI_PHY_118_DATA */ + 0x00000000, /* DENALI_PHY_119_DATA */ + 0x00000000, /* DENALI_PHY_120_DATA */ + 0x00000000, /* DENALI_PHY_121_DATA */ + 0x00000000, /* DENALI_PHY_122_DATA */ + 0x00000000, /* DENALI_PHY_123_DATA */ + 0x00000000, /* DENALI_PHY_124_DATA */ + 0x00000000, /* DENALI_PHY_125_DATA */ + 0x00000000, /* DENALI_PHY_126_DATA */ + 0x00000000, /* DENALI_PHY_127_DATA */ + 0x76543210, /* DENALI_PHY_128_DATA */ + 0x0004f008, /* DENALI_PHY_129_DATA */ + 0x00020119, /* DENALI_PHY_130_DATA */ + 0x00000000, /* DENALI_PHY_131_DATA */ + 0x00000000, /* DENALI_PHY_132_DATA */ + 0x00010000, /* DENALI_PHY_133_DATA */ + 0x01665555, /* DENALI_PHY_134_DATA */ + 0x03665555, /* DENALI_PHY_135_DATA */ + 0x00010f00, /* DENALI_PHY_136_DATA */ + 0x05010200, /* DENALI_PHY_137_DATA */ + 0x00000002, /* DENALI_PHY_138_DATA */ + 0x00170180, /* DENALI_PHY_139_DATA */ + 0x00cc0201, /* DENALI_PHY_140_DATA */ + 0x00030066, /* DENALI_PHY_141_DATA */ + 0x00000000, /* DENALI_PHY_142_DATA */ + 0x00000000, /* DENALI_PHY_143_DATA */ + 0x00000000, /* DENALI_PHY_144_DATA */ + 0x00000000, /* DENALI_PHY_145_DATA */ + 0x00000000, /* DENALI_PHY_146_DATA */ + 0x00000000, /* DENALI_PHY_147_DATA */ + 0x00000000, /* DENALI_PHY_148_DATA */ + 0x00000000, /* DENALI_PHY_149_DATA */ + 0x04080000, /* DENALI_PHY_150_DATA */ + 0x04080400, /* DENALI_PHY_151_DATA */ + 0x30000000, /* DENALI_PHY_152_DATA */ + 0x0c00c007, /* DENALI_PHY_153_DATA */ + 0x00000100, /* DENALI_PHY_154_DATA */ + 0x00000000, /* DENALI_PHY_155_DATA */ + 0xfd02fe01, /* DENALI_PHY_156_DATA */ + 0xf708fb04, /* DENALI_PHY_157_DATA */ + 0xdf20ef10, /* DENALI_PHY_158_DATA */ + 0x7f80bf40, /* DENALI_PHY_159_DATA */ + 0x0000aaaa, /* DENALI_PHY_160_DATA */ + 0x00000000, /* DENALI_PHY_161_DATA */ + 0x00000000, /* DENALI_PHY_162_DATA */ + 0x00000000, /* DENALI_PHY_163_DATA */ + 0x00000000, /* DENALI_PHY_164_DATA */ + 0x00000000, /* DENALI_PHY_165_DATA */ + 0x00000000, /* DENALI_PHY_166_DATA */ + 0x00000000, /* DENALI_PHY_167_DATA */ + 0x00000000, /* DENALI_PHY_168_DATA */ + 0x00000000, /* DENALI_PHY_169_DATA */ + 0x00000000, /* DENALI_PHY_170_DATA */ + 0x00000000, /* DENALI_PHY_171_DATA */ + 0x00000000, /* DENALI_PHY_172_DATA */ + 0x00000000, /* DENALI_PHY_173_DATA */ + 0x00000000, /* DENALI_PHY_174_DATA */ + 0x00000000, /* DENALI_PHY_175_DATA */ + 0x00000000, /* DENALI_PHY_176_DATA */ + 0x00000000, /* DENALI_PHY_177_DATA */ + 0x00000000, /* DENALI_PHY_178_DATA */ + 0x00000000, /* DENALI_PHY_179_DATA */ + 0x00200000, /* DENALI_PHY_180_DATA */ + 0x00000000, /* DENALI_PHY_181_DATA */ + 0x00000000, /* DENALI_PHY_182_DATA */ + 0x00000000, /* DENALI_PHY_183_DATA */ + 0x00000000, /* DENALI_PHY_184_DATA */ + 0x00000000, /* DENALI_PHY_185_DATA */ + 0x00000000, /* DENALI_PHY_186_DATA */ + 0x02800280, /* DENALI_PHY_187_DATA */ + 0x02800280, /* DENALI_PHY_188_DATA */ + 0x02800280, /* DENALI_PHY_189_DATA */ + 0x02800280, /* DENALI_PHY_190_DATA */ + 0x00000280, /* DENALI_PHY_191_DATA */ + 0x00000000, /* DENALI_PHY_192_DATA */ + 0x00000000, /* DENALI_PHY_193_DATA */ + 0x00000000, /* DENALI_PHY_194_DATA */ + 0x00000000, /* DENALI_PHY_195_DATA */ + 0x00800000, /* DENALI_PHY_196_DATA */ + 0x00800080, /* DENALI_PHY_197_DATA */ + 0x00800080, /* DENALI_PHY_198_DATA */ + 0x00800080, /* DENALI_PHY_199_DATA */ + 0x00800080, /* DENALI_PHY_200_DATA */ + 0x00800080, /* DENALI_PHY_201_DATA */ + 0x00800080, /* DENALI_PHY_202_DATA */ + 0x00800080, /* DENALI_PHY_203_DATA */ + 0x00800080, /* DENALI_PHY_204_DATA */ + 0x01190080, /* DENALI_PHY_205_DATA */ + 0x00000002, /* DENALI_PHY_206_DATA */ + 0x00000000, /* DENALI_PHY_207_DATA */ + 0x00000000, /* DENALI_PHY_208_DATA */ + 0x00000200, /* DENALI_PHY_209_DATA */ + 0x00000000, /* DENALI_PHY_210_DATA */ + 0x51315152, /* DENALI_PHY_211_DATA */ + 0xc0013150, /* DENALI_PHY_212_DATA */ + 0x020000c0, /* DENALI_PHY_213_DATA */ + 0x00100001, /* DENALI_PHY_214_DATA */ + 0x07054204, /* DENALI_PHY_215_DATA */ + 0x000f0c18, /* DENALI_PHY_216_DATA */ + 0x01000140, /* DENALI_PHY_217_DATA */ + 0x00000c10, /* DENALI_PHY_218_DATA */ + 0x00000000, /* DENALI_PHY_219_DATA */ + 0x00000000, /* DENALI_PHY_220_DATA */ + 0x00000000, /* DENALI_PHY_221_DATA */ + 0x00000000, /* DENALI_PHY_222_DATA */ + 0x00000000, /* DENALI_PHY_223_DATA */ + 0x00000000, /* DENALI_PHY_224_DATA */ + 0x00000000, /* DENALI_PHY_225_DATA */ + 0x00000000, /* DENALI_PHY_226_DATA */ + 0x00000000, /* DENALI_PHY_227_DATA */ + 0x00000000, /* DENALI_PHY_228_DATA */ + 0x00000000, /* DENALI_PHY_229_DATA */ + 0x00000000, /* DENALI_PHY_230_DATA */ + 0x00000000, /* DENALI_PHY_231_DATA */ + 0x00000000, /* DENALI_PHY_232_DATA */ + 0x00000000, /* DENALI_PHY_233_DATA */ + 0x00000000, /* DENALI_PHY_234_DATA */ + 0x00000000, /* DENALI_PHY_235_DATA */ + 0x00000000, /* DENALI_PHY_236_DATA */ + 0x00000000, /* DENALI_PHY_237_DATA */ + 0x00000000, /* DENALI_PHY_238_DATA */ + 0x00000000, /* DENALI_PHY_239_DATA */ + 0x00000000, /* DENALI_PHY_240_DATA */ + 0x00000000, /* DENALI_PHY_241_DATA */ + 0x00000000, /* DENALI_PHY_242_DATA */ + 0x00000000, /* DENALI_PHY_243_DATA */ + 0x00000000, /* DENALI_PHY_244_DATA */ + 0x00000000, /* DENALI_PHY_245_DATA */ + 0x00000000, /* DENALI_PHY_246_DATA */ + 0x00000000, /* DENALI_PHY_247_DATA */ + 0x00000000, /* DENALI_PHY_248_DATA */ + 0x00000000, /* DENALI_PHY_249_DATA */ + 0x00000000, /* DENALI_PHY_250_DATA */ + 0x00000000, /* DENALI_PHY_251_DATA */ + 0x00000000, /* DENALI_PHY_252_DATA */ + 0x00000000, /* DENALI_PHY_253_DATA */ + 0x00000000, /* DENALI_PHY_254_DATA */ + 0x00000000, /* DENALI_PHY_255_DATA */ + 0x76543210, /* DENALI_PHY_256_DATA */ + 0x0004f008, /* DENALI_PHY_257_DATA */ + 0x00020119, /* DENALI_PHY_258_DATA */ + 0x00000000, /* DENALI_PHY_259_DATA */ + 0x00000000, /* DENALI_PHY_260_DATA */ + 0x00010000, /* DENALI_PHY_261_DATA */ + 0x01665555, /* DENALI_PHY_262_DATA */ + 0x03665555, /* DENALI_PHY_263_DATA */ + 0x00010f00, /* DENALI_PHY_264_DATA */ + 0x05010200, /* DENALI_PHY_265_DATA */ + 0x00000002, /* DENALI_PHY_266_DATA */ + 0x00170180, /* DENALI_PHY_267_DATA */ + 0x00cc0201, /* DENALI_PHY_268_DATA */ + 0x00030066, /* DENALI_PHY_269_DATA */ + 0x00000000, /* DENALI_PHY_270_DATA */ + 0x00000000, /* DENALI_PHY_271_DATA */ + 0x00000000, /* DENALI_PHY_272_DATA */ + 0x00000000, /* DENALI_PHY_273_DATA */ + 0x00000000, /* DENALI_PHY_274_DATA */ + 0x00000000, /* DENALI_PHY_275_DATA */ + 0x00000000, /* DENALI_PHY_276_DATA */ + 0x00000000, /* DENALI_PHY_277_DATA */ + 0x04080000, /* DENALI_PHY_278_DATA */ + 0x04080400, /* DENALI_PHY_279_DATA */ + 0x30000000, /* DENALI_PHY_280_DATA */ + 0x0c00c007, /* DENALI_PHY_281_DATA */ + 0x00000100, /* DENALI_PHY_282_DATA */ + 0x00000000, /* DENALI_PHY_283_DATA */ + 0xfd02fe01, /* DENALI_PHY_284_DATA */ + 0xf708fb04, /* DENALI_PHY_285_DATA */ + 0xdf20ef10, /* DENALI_PHY_286_DATA */ + 0x7f80bf40, /* DENALI_PHY_287_DATA */ + 0x0001aaaa, /* DENALI_PHY_288_DATA */ + 0x00000000, /* DENALI_PHY_289_DATA */ + 0x00000000, /* DENALI_PHY_290_DATA */ + 0x00000000, /* DENALI_PHY_291_DATA */ + 0x00000000, /* DENALI_PHY_292_DATA */ + 0x00000000, /* DENALI_PHY_293_DATA */ + 0x00000000, /* DENALI_PHY_294_DATA */ + 0x00000000, /* DENALI_PHY_295_DATA */ + 0x00000000, /* DENALI_PHY_296_DATA */ + 0x00000000, /* DENALI_PHY_297_DATA */ + 0x00000000, /* DENALI_PHY_298_DATA */ + 0x00000000, /* DENALI_PHY_299_DATA */ + 0x00000000, /* DENALI_PHY_300_DATA */ + 0x00000000, /* DENALI_PHY_301_DATA */ + 0x00000000, /* DENALI_PHY_302_DATA */ + 0x00000000, /* DENALI_PHY_303_DATA */ + 0x00000000, /* DENALI_PHY_304_DATA */ + 0x00000000, /* DENALI_PHY_305_DATA */ + 0x00000000, /* DENALI_PHY_306_DATA */ + 0x00000000, /* DENALI_PHY_307_DATA */ + 0x00200000, /* DENALI_PHY_308_DATA */ + 0x00000000, /* DENALI_PHY_309_DATA */ + 0x00000000, /* DENALI_PHY_310_DATA */ + 0x00000000, /* DENALI_PHY_311_DATA */ + 0x00000000, /* DENALI_PHY_312_DATA */ + 0x00000000, /* DENALI_PHY_313_DATA */ + 0x00000000, /* DENALI_PHY_314_DATA */ + 0x02800280, /* DENALI_PHY_315_DATA */ + 0x02800280, /* DENALI_PHY_316_DATA */ + 0x02800280, /* DENALI_PHY_317_DATA */ + 0x02800280, /* DENALI_PHY_318_DATA */ + 0x00000280, /* DENALI_PHY_319_DATA */ + 0x00000000, /* DENALI_PHY_320_DATA */ + 0x00000000, /* DENALI_PHY_321_DATA */ + 0x00000000, /* DENALI_PHY_322_DATA */ + 0x00000000, /* DENALI_PHY_323_DATA */ + 0x00800000, /* DENALI_PHY_324_DATA */ + 0x00800080, /* DENALI_PHY_325_DATA */ + 0x00800080, /* DENALI_PHY_326_DATA */ + 0x00800080, /* DENALI_PHY_327_DATA */ + 0x00800080, /* DENALI_PHY_328_DATA */ + 0x00800080, /* DENALI_PHY_329_DATA */ + 0x00800080, /* DENALI_PHY_330_DATA */ + 0x00800080, /* DENALI_PHY_331_DATA */ + 0x00800080, /* DENALI_PHY_332_DATA */ + 0x01190080, /* DENALI_PHY_333_DATA */ + 0x00000002, /* DENALI_PHY_334_DATA */ + 0x00000000, /* DENALI_PHY_335_DATA */ + 0x00000000, /* DENALI_PHY_336_DATA */ + 0x00000200, /* DENALI_PHY_337_DATA */ + 0x00000000, /* DENALI_PHY_338_DATA */ + 0x51315152, /* DENALI_PHY_339_DATA */ + 0xc0013150, /* DENALI_PHY_340_DATA */ + 0x020000c0, /* DENALI_PHY_341_DATA */ + 0x00100001, /* DENALI_PHY_342_DATA */ + 0x07054204, /* DENALI_PHY_343_DATA */ + 0x000f0c18, /* DENALI_PHY_344_DATA */ + 0x01000140, /* DENALI_PHY_345_DATA */ + 0x00000c10, /* DENALI_PHY_346_DATA */ + 0x00000000, /* DENALI_PHY_347_DATA */ + 0x00000000, /* DENALI_PHY_348_DATA */ + 0x00000000, /* DENALI_PHY_349_DATA */ + 0x00000000, /* DENALI_PHY_350_DATA */ + 0x00000000, /* DENALI_PHY_351_DATA */ + 0x00000000, /* DENALI_PHY_352_DATA */ + 0x00000000, /* DENALI_PHY_353_DATA */ + 0x00000000, /* DENALI_PHY_354_DATA */ + 0x00000000, /* DENALI_PHY_355_DATA */ + 0x00000000, /* DENALI_PHY_356_DATA */ + 0x00000000, /* DENALI_PHY_357_DATA */ + 0x00000000, /* DENALI_PHY_358_DATA */ + 0x00000000, /* DENALI_PHY_359_DATA */ + 0x00000000, /* DENALI_PHY_360_DATA */ + 0x00000000, /* DENALI_PHY_361_DATA */ + 0x00000000, /* DENALI_PHY_362_DATA */ + 0x00000000, /* DENALI_PHY_363_DATA */ + 0x00000000, /* DENALI_PHY_364_DATA */ + 0x00000000, /* DENALI_PHY_365_DATA */ + 0x00000000, /* DENALI_PHY_366_DATA */ + 0x00000000, /* DENALI_PHY_367_DATA */ + 0x00000000, /* DENALI_PHY_368_DATA */ + 0x00000000, /* DENALI_PHY_369_DATA */ + 0x00000000, /* DENALI_PHY_370_DATA */ + 0x00000000, /* DENALI_PHY_371_DATA */ + 0x00000000, /* DENALI_PHY_372_DATA */ + 0x00000000, /* DENALI_PHY_373_DATA */ + 0x00000000, /* DENALI_PHY_374_DATA */ + 0x00000000, /* DENALI_PHY_375_DATA */ + 0x00000000, /* DENALI_PHY_376_DATA */ + 0x00000000, /* DENALI_PHY_377_DATA */ + 0x00000000, /* DENALI_PHY_378_DATA */ + 0x00000000, /* DENALI_PHY_379_DATA */ + 0x00000000, /* DENALI_PHY_380_DATA */ + 0x00000000, /* DENALI_PHY_381_DATA */ + 0x00000000, /* DENALI_PHY_382_DATA */ + 0x00000000, /* DENALI_PHY_383_DATA */ + 0x76543210, /* DENALI_PHY_384_DATA */ + 0x0004f008, /* DENALI_PHY_385_DATA */ + 0x00020119, /* DENALI_PHY_386_DATA */ + 0x00000000, /* DENALI_PHY_387_DATA */ + 0x00000000, /* DENALI_PHY_388_DATA */ + 0x00010000, /* DENALI_PHY_389_DATA */ + 0x01665555, /* DENALI_PHY_390_DATA */ + 0x03665555, /* DENALI_PHY_391_DATA */ + 0x00010f00, /* DENALI_PHY_392_DATA */ + 0x05010200, /* DENALI_PHY_393_DATA */ + 0x00000002, /* DENALI_PHY_394_DATA */ + 0x00170180, /* DENALI_PHY_395_DATA */ + 0x00cc0201, /* DENALI_PHY_396_DATA */ + 0x00030066, /* DENALI_PHY_397_DATA */ + 0x00000000, /* DENALI_PHY_398_DATA */ + 0x00000000, /* DENALI_PHY_399_DATA */ + 0x00000000, /* DENALI_PHY_400_DATA */ + 0x00000000, /* DENALI_PHY_401_DATA */ + 0x00000000, /* DENALI_PHY_402_DATA */ + 0x00000000, /* DENALI_PHY_403_DATA */ + 0x00000000, /* DENALI_PHY_404_DATA */ + 0x00000000, /* DENALI_PHY_405_DATA */ + 0x04080000, /* DENALI_PHY_406_DATA */ + 0x04080400, /* DENALI_PHY_407_DATA */ + 0x30000000, /* DENALI_PHY_408_DATA */ + 0x0c00c007, /* DENALI_PHY_409_DATA */ + 0x00000100, /* DENALI_PHY_410_DATA */ + 0x00000000, /* DENALI_PHY_411_DATA */ + 0xfd02fe01, /* DENALI_PHY_412_DATA */ + 0xf708fb04, /* DENALI_PHY_413_DATA */ + 0xdf20ef10, /* DENALI_PHY_414_DATA */ + 0x7f80bf40, /* DENALI_PHY_415_DATA */ + 0x0000aaaa, /* DENALI_PHY_416_DATA */ + 0x00000000, /* DENALI_PHY_417_DATA */ + 0x00000000, /* DENALI_PHY_418_DATA */ + 0x00000000, /* DENALI_PHY_419_DATA */ + 0x00000000, /* DENALI_PHY_420_DATA */ + 0x00000000, /* DENALI_PHY_421_DATA */ + 0x00000000, /* DENALI_PHY_422_DATA */ + 0x00000000, /* DENALI_PHY_423_DATA */ + 0x00000000, /* DENALI_PHY_424_DATA */ + 0x00000000, /* DENALI_PHY_425_DATA */ + 0x00000000, /* DENALI_PHY_426_DATA */ + 0x00000000, /* DENALI_PHY_427_DATA */ + 0x00000000, /* DENALI_PHY_428_DATA */ + 0x00000000, /* DENALI_PHY_429_DATA */ + 0x00000000, /* DENALI_PHY_430_DATA */ + 0x00000000, /* DENALI_PHY_431_DATA */ + 0x00000000, /* DENALI_PHY_432_DATA */ + 0x00000000, /* DENALI_PHY_433_DATA */ + 0x00000000, /* DENALI_PHY_434_DATA */ + 0x00000000, /* DENALI_PHY_435_DATA */ + 0x00200000, /* DENALI_PHY_436_DATA */ + 0x00000000, /* DENALI_PHY_437_DATA */ + 0x00000000, /* DENALI_PHY_438_DATA */ + 0x00000000, /* DENALI_PHY_439_DATA */ + 0x00000000, /* DENALI_PHY_440_DATA */ + 0x00000000, /* DENALI_PHY_441_DATA */ + 0x00000000, /* DENALI_PHY_442_DATA */ + 0x02800280, /* DENALI_PHY_443_DATA */ + 0x02800280, /* DENALI_PHY_444_DATA */ + 0x02800280, /* DENALI_PHY_445_DATA */ + 0x02800280, /* DENALI_PHY_446_DATA */ + 0x00000280, /* DENALI_PHY_447_DATA */ + 0x00000000, /* DENALI_PHY_448_DATA */ + 0x00000000, /* DENALI_PHY_449_DATA */ + 0x00000000, /* DENALI_PHY_450_DATA */ + 0x00000000, /* DENALI_PHY_451_DATA */ + 0x00800000, /* DENALI_PHY_452_DATA */ + 0x00800080, /* DENALI_PHY_453_DATA */ + 0x00800080, /* DENALI_PHY_454_DATA */ + 0x00800080, /* DENALI_PHY_455_DATA */ + 0x00800080, /* DENALI_PHY_456_DATA */ + 0x00800080, /* DENALI_PHY_457_DATA */ + 0x00800080, /* DENALI_PHY_458_DATA */ + 0x00800080, /* DENALI_PHY_459_DATA */ + 0x00800080, /* DENALI_PHY_460_DATA */ + 0x01190080, /* DENALI_PHY_461_DATA */ + 0x00000002, /* DENALI_PHY_462_DATA */ + 0x00000000, /* DENALI_PHY_463_DATA */ + 0x00000000, /* DENALI_PHY_464_DATA */ + 0x00000200, /* DENALI_PHY_465_DATA */ + 0x00000000, /* DENALI_PHY_466_DATA */ + 0x51315152, /* DENALI_PHY_467_DATA */ + 0xc0013150, /* DENALI_PHY_468_DATA */ + 0x020000c0, /* DENALI_PHY_469_DATA */ + 0x00100001, /* DENALI_PHY_470_DATA */ + 0x07054204, /* DENALI_PHY_471_DATA */ + 0x000f0c18, /* DENALI_PHY_472_DATA */ + 0x01000140, /* DENALI_PHY_473_DATA */ + 0x00000c10, /* DENALI_PHY_474_DATA */ + 0x00000000, /* DENALI_PHY_475_DATA */ + 0x00000000, /* DENALI_PHY_476_DATA */ + 0x00000000, /* DENALI_PHY_477_DATA */ + 0x00000000, /* DENALI_PHY_478_DATA */ + 0x00000000, /* DENALI_PHY_479_DATA */ + 0x00000000, /* DENALI_PHY_480_DATA */ + 0x00000000, /* DENALI_PHY_481_DATA */ + 0x00000000, /* DENALI_PHY_482_DATA */ + 0x00000000, /* DENALI_PHY_483_DATA */ + 0x00000000, /* DENALI_PHY_484_DATA */ + 0x00000000, /* DENALI_PHY_485_DATA */ + 0x00000000, /* DENALI_PHY_486_DATA */ + 0x00000000, /* DENALI_PHY_487_DATA */ + 0x00000000, /* DENALI_PHY_488_DATA */ + 0x00000000, /* DENALI_PHY_489_DATA */ + 0x00000000, /* DENALI_PHY_490_DATA */ + 0x00000000, /* DENALI_PHY_491_DATA */ + 0x00000000, /* DENALI_PHY_492_DATA */ + 0x00000000, /* DENALI_PHY_493_DATA */ + 0x00000000, /* DENALI_PHY_494_DATA */ + 0x00000000, /* DENALI_PHY_495_DATA */ + 0x00000000, /* DENALI_PHY_496_DATA */ + 0x00000000, /* DENALI_PHY_497_DATA */ + 0x00000000, /* DENALI_PHY_498_DATA */ + 0x00000000, /* DENALI_PHY_499_DATA */ + 0x00000000, /* DENALI_PHY_500_DATA */ + 0x00000000, /* DENALI_PHY_501_DATA */ + 0x00000000, /* DENALI_PHY_502_DATA */ + 0x00000000, /* DENALI_PHY_503_DATA */ + 0x00000000, /* DENALI_PHY_504_DATA */ + 0x00000000, /* DENALI_PHY_505_DATA */ + 0x00000000, /* DENALI_PHY_506_DATA */ + 0x00000000, /* DENALI_PHY_507_DATA */ + 0x00000000, /* DENALI_PHY_508_DATA */ + 0x00000000, /* DENALI_PHY_509_DATA */ + 0x00000000, /* DENALI_PHY_510_DATA */ + 0x00000000, /* DENALI_PHY_511_DATA */ + 0x00000000, /* DENALI_PHY_512_DATA */ + 0x00000000, /* DENALI_PHY_513_DATA */ + 0x00000000, /* DENALI_PHY_514_DATA */ + 0x00000000, /* DENALI_PHY_515_DATA */ + 0x00000000, /* DENALI_PHY_516_DATA */ + 0x00000000, /* DENALI_PHY_517_DATA */ + 0x00000000, /* DENALI_PHY_518_DATA */ + 0x00000002, /* DENALI_PHY_519_DATA */ + 0x00000000, /* DENALI_PHY_520_DATA */ + 0x00000000, /* DENALI_PHY_521_DATA */ + 0x00000000, /* DENALI_PHY_522_DATA */ + 0x00400320, /* DENALI_PHY_523_DATA */ + 0x00000040, /* DENALI_PHY_524_DATA */ + 0x00dcba98, /* DENALI_PHY_525_DATA */ + 0x00000000, /* DENALI_PHY_526_DATA */ + 0x00dcba98, /* DENALI_PHY_527_DATA */ + 0x01000000, /* DENALI_PHY_528_DATA */ + 0x00020003, /* DENALI_PHY_529_DATA */ + 0x00000000, /* DENALI_PHY_530_DATA */ + 0x00000000, /* DENALI_PHY_531_DATA */ + 0x00000000, /* DENALI_PHY_532_DATA */ + 0x0000002a, /* DENALI_PHY_533_DATA */ + 0x00000015, /* DENALI_PHY_534_DATA */ + 0x00000015, /* DENALI_PHY_535_DATA */ + 0x0000002a, /* DENALI_PHY_536_DATA */ + 0x00000033, /* DENALI_PHY_537_DATA */ + 0x0000000c, /* DENALI_PHY_538_DATA */ + 0x0000000c, /* DENALI_PHY_539_DATA */ + 0x00000033, /* DENALI_PHY_540_DATA */ + 0x0a418820, /* DENALI_PHY_541_DATA */ + 0x003f0000, /* DENALI_PHY_542_DATA */ + 0x0000003f, /* DENALI_PHY_543_DATA */ + 0x00030055, /* DENALI_PHY_544_DATA */ + 0x03000300, /* DENALI_PHY_545_DATA */ + 0x03000300, /* DENALI_PHY_546_DATA */ + 0x00000300, /* DENALI_PHY_547_DATA */ + 0x42080010, /* DENALI_PHY_548_DATA */ + 0x00000003, /* DENALI_PHY_549_DATA */ + 0x00000000, /* DENALI_PHY_550_DATA */ + 0x00000000, /* DENALI_PHY_551_DATA */ + 0x00000000, /* DENALI_PHY_552_DATA */ + 0x00000000, /* DENALI_PHY_553_DATA */ + 0x00000000, /* DENALI_PHY_554_DATA */ + 0x00000000, /* DENALI_PHY_555_DATA */ + 0x00000000, /* DENALI_PHY_556_DATA */ + 0x00000000, /* DENALI_PHY_557_DATA */ + 0x00000000, /* DENALI_PHY_558_DATA */ + 0x00000000, /* DENALI_PHY_559_DATA */ + 0x00000000, /* DENALI_PHY_560_DATA */ + 0x00000000, /* DENALI_PHY_561_DATA */ + 0x00000000, /* DENALI_PHY_562_DATA */ + 0x00000000, /* DENALI_PHY_563_DATA */ + 0x00000000, /* DENALI_PHY_564_DATA */ + 0x00000000, /* DENALI_PHY_565_DATA */ + 0x00000000, /* DENALI_PHY_566_DATA */ + 0x00000000, /* DENALI_PHY_567_DATA */ + 0x00000000, /* DENALI_PHY_568_DATA */ + 0x00000000, /* DENALI_PHY_569_DATA */ + 0x00000000, /* DENALI_PHY_570_DATA */ + 0x00000000, /* DENALI_PHY_571_DATA */ + 0x00000000, /* DENALI_PHY_572_DATA */ + 0x00000000, /* DENALI_PHY_573_DATA */ + 0x00000000, /* DENALI_PHY_574_DATA */ + 0x00000000, /* DENALI_PHY_575_DATA */ + 0x00000000, /* DENALI_PHY_576_DATA */ + 0x00000000, /* DENALI_PHY_577_DATA */ + 0x00000000, /* DENALI_PHY_578_DATA */ + 0x00000000, /* DENALI_PHY_579_DATA */ + 0x00000000, /* DENALI_PHY_580_DATA */ + 0x00000000, /* DENALI_PHY_581_DATA */ + 0x00000000, /* DENALI_PHY_582_DATA */ + 0x00000000, /* DENALI_PHY_583_DATA */ + 0x00000000, /* DENALI_PHY_584_DATA */ + 0x00000000, /* DENALI_PHY_585_DATA */ + 0x00000000, /* DENALI_PHY_586_DATA */ + 0x00000000, /* DENALI_PHY_587_DATA */ + 0x00000000, /* DENALI_PHY_588_DATA */ + 0x00000000, /* DENALI_PHY_589_DATA */ + 0x00000000, /* DENALI_PHY_590_DATA */ + 0x00000000, /* DENALI_PHY_591_DATA */ + 0x00000000, /* DENALI_PHY_592_DATA */ + 0x00000000, /* DENALI_PHY_593_DATA */ + 0x00000000, /* DENALI_PHY_594_DATA */ + 0x00000000, /* DENALI_PHY_595_DATA */ + 0x00000000, /* DENALI_PHY_596_DATA */ + 0x00000000, /* DENALI_PHY_597_DATA */ + 0x00000000, /* DENALI_PHY_598_DATA */ + 0x00000000, /* DENALI_PHY_599_DATA */ + 0x00000000, /* DENALI_PHY_600_DATA */ + 0x00000000, /* DENALI_PHY_601_DATA */ + 0x00000000, /* DENALI_PHY_602_DATA */ + 0x00000000, /* DENALI_PHY_603_DATA */ + 0x00000000, /* DENALI_PHY_604_DATA */ + 0x00000000, /* DENALI_PHY_605_DATA */ + 0x00000000, /* DENALI_PHY_606_DATA */ + 0x00000000, /* DENALI_PHY_607_DATA */ + 0x00000000, /* DENALI_PHY_608_DATA */ + 0x00000000, /* DENALI_PHY_609_DATA */ + 0x00000000, /* DENALI_PHY_610_DATA */ + 0x00000000, /* DENALI_PHY_611_DATA */ + 0x00000000, /* DENALI_PHY_612_DATA */ + 0x00000000, /* DENALI_PHY_613_DATA */ + 0x00000000, /* DENALI_PHY_614_DATA */ + 0x00000000, /* DENALI_PHY_615_DATA */ + 0x00000000, /* DENALI_PHY_616_DATA */ + 0x00000000, /* DENALI_PHY_617_DATA */ + 0x00000000, /* DENALI_PHY_618_DATA */ + 0x00000000, /* DENALI_PHY_619_DATA */ + 0x00000000, /* DENALI_PHY_620_DATA */ + 0x00000000, /* DENALI_PHY_621_DATA */ + 0x00000000, /* DENALI_PHY_622_DATA */ + 0x00000000, /* DENALI_PHY_623_DATA */ + 0x00000000, /* DENALI_PHY_624_DATA */ + 0x00000000, /* DENALI_PHY_625_DATA */ + 0x00000000, /* DENALI_PHY_626_DATA */ + 0x00000000, /* DENALI_PHY_627_DATA */ + 0x00000000, /* DENALI_PHY_628_DATA */ + 0x00000000, /* DENALI_PHY_629_DATA */ + 0x00000000, /* DENALI_PHY_630_DATA */ + 0x00000000, /* DENALI_PHY_631_DATA */ + 0x00000000, /* DENALI_PHY_632_DATA */ + 0x00000000, /* DENALI_PHY_633_DATA */ + 0x00000000, /* DENALI_PHY_634_DATA */ + 0x00000000, /* DENALI_PHY_635_DATA */ + 0x00000000, /* DENALI_PHY_636_DATA */ + 0x00000000, /* DENALI_PHY_637_DATA */ + 0x00000000, /* DENALI_PHY_638_DATA */ + 0x00000000, /* DENALI_PHY_639_DATA */ + 0x00000000, /* DENALI_PHY_640_DATA */ + 0x00000000, /* DENALI_PHY_641_DATA */ + 0x00000000, /* DENALI_PHY_642_DATA */ + 0x00000000, /* DENALI_PHY_643_DATA */ + 0x00000000, /* DENALI_PHY_644_DATA */ + 0x00000000, /* DENALI_PHY_645_DATA */ + 0x00000000, /* DENALI_PHY_646_DATA */ + 0x00000002, /* DENALI_PHY_647_DATA */ + 0x00000000, /* DENALI_PHY_648_DATA */ + 0x00000000, /* DENALI_PHY_649_DATA */ + 0x00000000, /* DENALI_PHY_650_DATA */ + 0x00400320, /* DENALI_PHY_651_DATA */ + 0x00000040, /* DENALI_PHY_652_DATA */ + 0x00000000, /* DENALI_PHY_653_DATA */ + 0x00000000, /* DENALI_PHY_654_DATA */ + 0x00000000, /* DENALI_PHY_655_DATA */ + 0x01000000, /* DENALI_PHY_656_DATA */ + 0x00020003, /* DENALI_PHY_657_DATA */ + 0x00000000, /* DENALI_PHY_658_DATA */ + 0x00000000, /* DENALI_PHY_659_DATA */ + 0x00000000, /* DENALI_PHY_660_DATA */ + 0x0000002a, /* DENALI_PHY_661_DATA */ + 0x00000015, /* DENALI_PHY_662_DATA */ + 0x00000015, /* DENALI_PHY_663_DATA */ + 0x0000002a, /* DENALI_PHY_664_DATA */ + 0x00000033, /* DENALI_PHY_665_DATA */ + 0x0000000c, /* DENALI_PHY_666_DATA */ + 0x0000000c, /* DENALI_PHY_667_DATA */ + 0x00000033, /* DENALI_PHY_668_DATA */ + 0x00000000, /* DENALI_PHY_669_DATA */ + 0x00000000, /* DENALI_PHY_670_DATA */ + 0x00000000, /* DENALI_PHY_671_DATA */ + 0x00030055, /* DENALI_PHY_672_DATA */ + 0x03000300, /* DENALI_PHY_673_DATA */ + 0x03000300, /* DENALI_PHY_674_DATA */ + 0x00000300, /* DENALI_PHY_675_DATA */ + 0x42080010, /* DENALI_PHY_676_DATA */ + 0x00000003, /* DENALI_PHY_677_DATA */ + 0x00000000, /* DENALI_PHY_678_DATA */ + 0x00000000, /* DENALI_PHY_679_DATA */ + 0x00000000, /* DENALI_PHY_680_DATA */ + 0x00000000, /* DENALI_PHY_681_DATA */ + 0x00000000, /* DENALI_PHY_682_DATA */ + 0x00000000, /* DENALI_PHY_683_DATA */ + 0x00000000, /* DENALI_PHY_684_DATA */ + 0x00000000, /* DENALI_PHY_685_DATA */ + 0x00000000, /* DENALI_PHY_686_DATA */ + 0x00000000, /* DENALI_PHY_687_DATA */ + 0x00000000, /* DENALI_PHY_688_DATA */ + 0x00000000, /* DENALI_PHY_689_DATA */ + 0x00000000, /* DENALI_PHY_690_DATA */ + 0x00000000, /* DENALI_PHY_691_DATA */ + 0x00000000, /* DENALI_PHY_692_DATA */ + 0x00000000, /* DENALI_PHY_693_DATA */ + 0x00000000, /* DENALI_PHY_694_DATA */ + 0x00000000, /* DENALI_PHY_695_DATA */ + 0x00000000, /* DENALI_PHY_696_DATA */ + 0x00000000, /* DENALI_PHY_697_DATA */ + 0x00000000, /* DENALI_PHY_698_DATA */ + 0x00000000, /* DENALI_PHY_699_DATA */ + 0x00000000, /* DENALI_PHY_700_DATA */ + 0x00000000, /* DENALI_PHY_701_DATA */ + 0x00000000, /* DENALI_PHY_702_DATA */ + 0x00000000, /* DENALI_PHY_703_DATA */ + 0x00000000, /* DENALI_PHY_704_DATA */ + 0x00000000, /* DENALI_PHY_705_DATA */ + 0x00000000, /* DENALI_PHY_706_DATA */ + 0x00000000, /* DENALI_PHY_707_DATA */ + 0x00000000, /* DENALI_PHY_708_DATA */ + 0x00000000, /* DENALI_PHY_709_DATA */ + 0x00000000, /* DENALI_PHY_710_DATA */ + 0x00000000, /* DENALI_PHY_711_DATA */ + 0x00000000, /* DENALI_PHY_712_DATA */ + 0x00000000, /* DENALI_PHY_713_DATA */ + 0x00000000, /* DENALI_PHY_714_DATA */ + 0x00000000, /* DENALI_PHY_715_DATA */ + 0x00000000, /* DENALI_PHY_716_DATA */ + 0x00000000, /* DENALI_PHY_717_DATA */ + 0x00000000, /* DENALI_PHY_718_DATA */ + 0x00000000, /* DENALI_PHY_719_DATA */ + 0x00000000, /* DENALI_PHY_720_DATA */ + 0x00000000, /* DENALI_PHY_721_DATA */ + 0x00000000, /* DENALI_PHY_722_DATA */ + 0x00000000, /* DENALI_PHY_723_DATA */ + 0x00000000, /* DENALI_PHY_724_DATA */ + 0x00000000, /* DENALI_PHY_725_DATA */ + 0x00000000, /* DENALI_PHY_726_DATA */ + 0x00000000, /* DENALI_PHY_727_DATA */ + 0x00000000, /* DENALI_PHY_728_DATA */ + 0x00000000, /* DENALI_PHY_729_DATA */ + 0x00000000, /* DENALI_PHY_730_DATA */ + 0x00000000, /* DENALI_PHY_731_DATA */ + 0x00000000, /* DENALI_PHY_732_DATA */ + 0x00000000, /* DENALI_PHY_733_DATA */ + 0x00000000, /* DENALI_PHY_734_DATA */ + 0x00000000, /* DENALI_PHY_735_DATA */ + 0x00000000, /* DENALI_PHY_736_DATA */ + 0x00000000, /* DENALI_PHY_737_DATA */ + 0x00000000, /* DENALI_PHY_738_DATA */ + 0x00000000, /* DENALI_PHY_739_DATA */ + 0x00000000, /* DENALI_PHY_740_DATA */ + 0x00000000, /* DENALI_PHY_741_DATA */ + 0x00000000, /* DENALI_PHY_742_DATA */ + 0x00000000, /* DENALI_PHY_743_DATA */ + 0x00000000, /* DENALI_PHY_744_DATA */ + 0x00000000, /* DENALI_PHY_745_DATA */ + 0x00000000, /* DENALI_PHY_746_DATA */ + 0x00000000, /* DENALI_PHY_747_DATA */ + 0x00000000, /* DENALI_PHY_748_DATA */ + 0x00000000, /* DENALI_PHY_749_DATA */ + 0x00000000, /* DENALI_PHY_750_DATA */ + 0x00000000, /* DENALI_PHY_751_DATA */ + 0x00000000, /* DENALI_PHY_752_DATA */ + 0x00000000, /* DENALI_PHY_753_DATA */ + 0x00000000, /* DENALI_PHY_754_DATA */ + 0x00000000, /* DENALI_PHY_755_DATA */ + 0x00000000, /* DENALI_PHY_756_DATA */ + 0x00000000, /* DENALI_PHY_757_DATA */ + 0x00000000, /* DENALI_PHY_758_DATA */ + 0x00000000, /* DENALI_PHY_759_DATA */ + 0x00000000, /* DENALI_PHY_760_DATA */ + 0x00000000, /* DENALI_PHY_761_DATA */ + 0x00000000, /* DENALI_PHY_762_DATA */ + 0x00000000, /* DENALI_PHY_763_DATA */ + 0x00000000, /* DENALI_PHY_764_DATA */ + 0x00000000, /* DENALI_PHY_765_DATA */ + 0x00000000, /* DENALI_PHY_766_DATA */ + 0x00000000, /* DENALI_PHY_767_DATA */ + 0x00000000, /* DENALI_PHY_768_DATA */ + 0x00000000, /* DENALI_PHY_769_DATA */ + 0x00000000, /* DENALI_PHY_770_DATA */ + 0x00000000, /* DENALI_PHY_771_DATA */ + 0x00000000, /* DENALI_PHY_772_DATA */ + 0x00000000, /* DENALI_PHY_773_DATA */ + 0x00000000, /* DENALI_PHY_774_DATA */ + 0x00000002, /* DENALI_PHY_775_DATA */ + 0x00000000, /* DENALI_PHY_776_DATA */ + 0x00000000, /* DENALI_PHY_777_DATA */ + 0x00000000, /* DENALI_PHY_778_DATA */ + 0x00400320, /* DENALI_PHY_779_DATA */ + 0x00000040, /* DENALI_PHY_780_DATA */ + 0x00000000, /* DENALI_PHY_781_DATA */ + 0x00000000, /* DENALI_PHY_782_DATA */ + 0x00000000, /* DENALI_PHY_783_DATA */ + 0x01000000, /* DENALI_PHY_784_DATA */ + 0x00020003, /* DENALI_PHY_785_DATA */ + 0x00000000, /* DENALI_PHY_786_DATA */ + 0x00000000, /* DENALI_PHY_787_DATA */ + 0x00000000, /* DENALI_PHY_788_DATA */ + 0x0000002a, /* DENALI_PHY_789_DATA */ + 0x00000015, /* DENALI_PHY_790_DATA */ + 0x00000015, /* DENALI_PHY_791_DATA */ + 0x0000002a, /* DENALI_PHY_792_DATA */ + 0x00000033, /* DENALI_PHY_793_DATA */ + 0x0000000c, /* DENALI_PHY_794_DATA */ + 0x0000000c, /* DENALI_PHY_795_DATA */ + 0x00000033, /* DENALI_PHY_796_DATA */ + 0x1ee6b16a, /* DENALI_PHY_797_DATA */ + 0x10000000, /* DENALI_PHY_798_DATA */ + 0x00000000, /* DENALI_PHY_799_DATA */ + 0x00030055, /* DENALI_PHY_800_DATA */ + 0x03000300, /* DENALI_PHY_801_DATA */ + 0x03000300, /* DENALI_PHY_802_DATA */ + 0x00000300, /* DENALI_PHY_803_DATA */ + 0x42080010, /* DENALI_PHY_804_DATA */ + 0x00000003, /* DENALI_PHY_805_DATA */ + 0x00000000, /* DENALI_PHY_806_DATA */ + 0x00000000, /* DENALI_PHY_807_DATA */ + 0x00000000, /* DENALI_PHY_808_DATA */ + 0x00000000, /* DENALI_PHY_809_DATA */ + 0x00000000, /* DENALI_PHY_810_DATA */ + 0x00000000, /* DENALI_PHY_811_DATA */ + 0x00000000, /* DENALI_PHY_812_DATA */ + 0x00000000, /* DENALI_PHY_813_DATA */ + 0x00000000, /* DENALI_PHY_814_DATA */ + 0x00000000, /* DENALI_PHY_815_DATA */ + 0x00000000, /* DENALI_PHY_816_DATA */ + 0x00000000, /* DENALI_PHY_817_DATA */ + 0x00000000, /* DENALI_PHY_818_DATA */ + 0x00000000, /* DENALI_PHY_819_DATA */ + 0x00000000, /* DENALI_PHY_820_DATA */ + 0x00000000, /* DENALI_PHY_821_DATA */ + 0x00000000, /* DENALI_PHY_822_DATA */ + 0x00000000, /* DENALI_PHY_823_DATA */ + 0x00000000, /* DENALI_PHY_824_DATA */ + 0x00000000, /* DENALI_PHY_825_DATA */ + 0x00000000, /* DENALI_PHY_826_DATA */ + 0x00000000, /* DENALI_PHY_827_DATA */ + 0x00000000, /* DENALI_PHY_828_DATA */ + 0x00000000, /* DENALI_PHY_829_DATA */ + 0x00000000, /* DENALI_PHY_830_DATA */ + 0x00000000, /* DENALI_PHY_831_DATA */ + 0x00000000, /* DENALI_PHY_832_DATA */ + 0x00000000, /* DENALI_PHY_833_DATA */ + 0x00000000, /* DENALI_PHY_834_DATA */ + 0x00000000, /* DENALI_PHY_835_DATA */ + 0x00000000, /* DENALI_PHY_836_DATA */ + 0x00000000, /* DENALI_PHY_837_DATA */ + 0x00000000, /* DENALI_PHY_838_DATA */ + 0x00000000, /* DENALI_PHY_839_DATA */ + 0x00000000, /* DENALI_PHY_840_DATA */ + 0x00000000, /* DENALI_PHY_841_DATA */ + 0x00000000, /* DENALI_PHY_842_DATA */ + 0x00000000, /* DENALI_PHY_843_DATA */ + 0x00000000, /* DENALI_PHY_844_DATA */ + 0x00000000, /* DENALI_PHY_845_DATA */ + 0x00000000, /* DENALI_PHY_846_DATA */ + 0x00000000, /* DENALI_PHY_847_DATA */ + 0x00000000, /* DENALI_PHY_848_DATA */ + 0x00000000, /* DENALI_PHY_849_DATA */ + 0x00000000, /* DENALI_PHY_850_DATA */ + 0x00000000, /* DENALI_PHY_851_DATA */ + 0x00000000, /* DENALI_PHY_852_DATA */ + 0x00000000, /* DENALI_PHY_853_DATA */ + 0x00000000, /* DENALI_PHY_854_DATA */ + 0x00000000, /* DENALI_PHY_855_DATA */ + 0x00000000, /* DENALI_PHY_856_DATA */ + 0x00000000, /* DENALI_PHY_857_DATA */ + 0x00000000, /* DENALI_PHY_858_DATA */ + 0x00000000, /* DENALI_PHY_859_DATA */ + 0x00000000, /* DENALI_PHY_860_DATA */ + 0x00000000, /* DENALI_PHY_861_DATA */ + 0x00000000, /* DENALI_PHY_862_DATA */ + 0x00000000, /* DENALI_PHY_863_DATA */ + 0x00000000, /* DENALI_PHY_864_DATA */ + 0x00000000, /* DENALI_PHY_865_DATA */ + 0x00000000, /* DENALI_PHY_866_DATA */ + 0x00000000, /* DENALI_PHY_867_DATA */ + 0x00000000, /* DENALI_PHY_868_DATA */ + 0x00000000, /* DENALI_PHY_869_DATA */ + 0x00000000, /* DENALI_PHY_870_DATA */ + 0x00000000, /* DENALI_PHY_871_DATA */ + 0x00000000, /* DENALI_PHY_872_DATA */ + 0x00000000, /* DENALI_PHY_873_DATA */ + 0x00000000, /* DENALI_PHY_874_DATA */ + 0x00000000, /* DENALI_PHY_875_DATA */ + 0x00000000, /* DENALI_PHY_876_DATA */ + 0x00000000, /* DENALI_PHY_877_DATA */ + 0x00000000, /* DENALI_PHY_878_DATA */ + 0x00000000, /* DENALI_PHY_879_DATA */ + 0x00000000, /* DENALI_PHY_880_DATA */ + 0x00000000, /* DENALI_PHY_881_DATA */ + 0x00000000, /* DENALI_PHY_882_DATA */ + 0x00000000, /* DENALI_PHY_883_DATA */ + 0x00000000, /* DENALI_PHY_884_DATA */ + 0x00000000, /* DENALI_PHY_885_DATA */ + 0x00000000, /* DENALI_PHY_886_DATA */ + 0x00000000, /* DENALI_PHY_887_DATA */ + 0x00000000, /* DENALI_PHY_888_DATA */ + 0x00000000, /* DENALI_PHY_889_DATA */ + 0x00000000, /* DENALI_PHY_890_DATA */ + 0x00000000, /* DENALI_PHY_891_DATA */ + 0x00000000, /* DENALI_PHY_892_DATA */ + 0x00000000, /* DENALI_PHY_893_DATA */ + 0x00000000, /* DENALI_PHY_894_DATA */ + 0x00000000, /* DENALI_PHY_895_DATA */ + 0x00000000, /* DENALI_PHY_896_DATA */ + 0x00000000, /* DENALI_PHY_897_DATA */ + 0x00000005, /* DENALI_PHY_898_DATA */ + 0x04000f01, /* DENALI_PHY_899_DATA */ + 0x00020040, /* DENALI_PHY_900_DATA */ + 0x00020055, /* DENALI_PHY_901_DATA */ + 0x00000000, /* DENALI_PHY_902_DATA */ + 0x00000000, /* DENALI_PHY_903_DATA */ + 0x00000000, /* DENALI_PHY_904_DATA */ + 0x00000050, /* DENALI_PHY_905_DATA */ + 0x00000000, /* DENALI_PHY_906_DATA */ + 0x01010100, /* DENALI_PHY_907_DATA */ + 0x00000600, /* DENALI_PHY_908_DATA */ + 0x00000000, /* DENALI_PHY_909_DATA */ + 0x00006400, /* DENALI_PHY_910_DATA */ + 0x01221102, /* DENALI_PHY_911_DATA */ + 0x00000000, /* DENALI_PHY_912_DATA */ + 0x000d1f00, /* DENALI_PHY_913_DATA */ + 0x0d1f0d1f, /* DENALI_PHY_914_DATA */ + 0x0d1f0d1f, /* DENALI_PHY_915_DATA */ + 0x00030003, /* DENALI_PHY_916_DATA */ + 0x03000300, /* DENALI_PHY_917_DATA */ + 0x00000300, /* DENALI_PHY_918_DATA */ + 0x01221102, /* DENALI_PHY_919_DATA */ + 0x00000000, /* DENALI_PHY_920_DATA */ + 0x00000000, /* DENALI_PHY_921_DATA */ + 0x03020000, /* DENALI_PHY_922_DATA */ + 0x00000001, /* DENALI_PHY_923_DATA */ + 0x00000411, /* DENALI_PHY_924_DATA */ + 0x00000411, /* DENALI_PHY_925_DATA */ + 0x00000040, /* DENALI_PHY_926_DATA */ + 0x00000040, /* DENALI_PHY_927_DATA */ + 0x00000411, /* DENALI_PHY_928_DATA */ + 0x00000411, /* DENALI_PHY_929_DATA */ + 0x00004410, /* DENALI_PHY_930_DATA */ + 0x00004410, /* DENALI_PHY_931_DATA */ + 0x00004410, /* DENALI_PHY_932_DATA */ + 0x00004410, /* DENALI_PHY_933_DATA */ + 0x00004410, /* DENALI_PHY_934_DATA */ + 0x00000411, /* DENALI_PHY_935_DATA */ + 0x00004410, /* DENALI_PHY_936_DATA */ + 0x00000411, /* DENALI_PHY_937_DATA */ + 0x00004410, /* DENALI_PHY_938_DATA */ + 0x00000411, /* DENALI_PHY_939_DATA */ + 0x00004410, /* DENALI_PHY_940_DATA */ + 0x00000000, /* DENALI_PHY_941_DATA */ + 0x00000000, /* DENALI_PHY_942_DATA */ + 0x00000000, /* DENALI_PHY_943_DATA */ + 0x64000000, /* DENALI_PHY_944_DATA */ + 0x00000000, /* DENALI_PHY_945_DATA */ + 0x00000000, /* DENALI_PHY_946_DATA */ + 0x00000508, /* DENALI_PHY_947_DATA */ + 0x00000000, /* DENALI_PHY_948_DATA */ + 0x00000000, /* DENALI_PHY_949_DATA */ + 0x00000000, /* DENALI_PHY_950_DATA */ + 0x00000000, /* DENALI_PHY_951_DATA */ + 0x00000000, /* DENALI_PHY_952_DATA */ + 0x00000000, /* DENALI_PHY_953_DATA */ + 0xe4000000, /* DENALI_PHY_954_DATA */ + 0x00000000, /* DENALI_PHY_955_DATA */ + 0x00000000, /* DENALI_PHY_956_DATA */ + 0x01010000, /* DENALI_PHY_957_DATA */ + 0x00000000 /* DENALI_PHY_958_DATA */ + } + }, +}, diff --git a/drivers/ram/rockchip/sdram_debug.c b/drivers/ram/rockchip/sdram_debug.c new file mode 100644 index 0000000000..9cf662675b --- /dev/null +++ b/drivers/ram/rockchip/sdram_debug.c @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * (C) Copyright 2019 Rockchip Electronics Co., Ltd + * (C) Copyright 2019 Amarula Solutions. + * Author: Jagan Teki <jagan@amarulasolutions.com> + */ + +#include <common.h> +#include <debug_uart.h> +#include <asm/arch-rockchip/sdram_common.h> + +void sdram_print_dram_type(unsigned char dramtype) +{ + switch (dramtype) { + case DDR3: + printascii("DDR3"); + break; + case DDR4: + printascii("DDR4"); + break; + case LPDDR2: + printascii("LPDDR2"); + break; + case LPDDR3: + printascii("LPDDR3"); + break; + case LPDDR4: + printascii("LPDDR4"); + break; + default: + printascii("Unknown Device"); + break; + } +} + +/** + * cs = 0, cs0 + * cs = 1, cs1 + * cs => 2, cs0+cs1 + * note: it didn't consider about row_3_4 + */ +u64 sdram_get_cs_cap(struct sdram_cap_info *cap_info, u32 cs, u32 dram_type) +{ + u32 bg; + u64 cap[2]; + + if (dram_type == DDR4) + /* DDR4 8bit dram BG = 2(4bank groups), + * 16bit dram BG = 1 (2 bank groups) + */ + bg = (cap_info->dbw == 0) ? 2 : 1; + else + bg = 0; + + cap[0] = 1llu << (cap_info->bw + cap_info->col + + bg + cap_info->bk + cap_info->cs0_row); + + if (cap_info->rank == 2) + cap[1] = 1llu << (cap_info->bw + cap_info->col + + bg + cap_info->bk + cap_info->cs1_row); + else + cap[1] = 0; + + if (cs == 0) + return cap[0]; + else if (cs == 1) + return cap[1]; + else + return (cap[0] + cap[1]); +} + +void sdram_print_ddr_info(struct sdram_cap_info *cap_info, + struct sdram_base_params *base) +{ + u32 bg, cap; + + bg = (cap_info->dbw == 0) ? 2 : 1; + + sdram_print_dram_type(base->dramtype); + + printascii(", "); + printdec(base->ddr_freq); + printascii("MHz\n"); + + printascii("BW="); + printdec(8 << cap_info->bw); + + printascii(" Col="); + printdec(cap_info->col); + + printascii(" Bk="); + printdec(0x1 << cap_info->bk); + if (base->dramtype == DDR4) { + printascii(" BG="); + printdec(1 << bg); + } + + printascii(" CS0 Row="); + printdec(cap_info->cs0_row); + if (cap_info->rank > 1) { + printascii(" CS1 Row="); + printdec(cap_info->cs1_row); + } + + printascii(" CS="); + printdec(cap_info->rank); + + printascii(" Die BW="); + printdec(8 << cap_info->dbw); + + cap = sdram_get_cs_cap(cap_info, 3, base->dramtype); + if (cap_info->row_3_4) + cap = cap * 3 / 4; + + printascii(" Size="); + printdec(cap >> 20); + printascii("MB\n"); +} + +void sdram_print_stride(unsigned int stride) +{ + switch (stride) { + case 0xc: + printf("128B stride\n"); + break; + case 5: + case 9: + case 0xd: + case 0x11: + case 0x19: + printf("256B stride\n"); + break; + case 0xa: + case 0xe: + case 0x12: + printf("512B stride\n"); + break; + case 0xf: + printf("4K stride\n"); + break; + case 0x1f: + printf("32MB + 256B stride\n"); + break; + default: + printf("no stride\n"); + } +} diff --git a/drivers/ram/rockchip/sdram_rk322x.c b/drivers/ram/rockchip/sdram_rk322x.c index e96ac54c39..94893e17cf 100644 --- a/drivers/ram/rockchip/sdram_rk322x.c +++ b/drivers/ram/rockchip/sdram_rk322x.c @@ -16,7 +16,6 @@ #include <asm/arch-rockchip/grf_rk322x.h> #include <asm/arch-rockchip/hardware.h> #include <asm/arch-rockchip/sdram_rk322x.h> -#include <asm/arch-rockchip/timer.h> #include <asm/arch-rockchip/uart.h> #include <asm/arch-rockchip/sdram_common.h> #include <asm/types.h> @@ -96,26 +95,26 @@ void phy_pctrl_reset(struct rk322x_cru *cru, 1 << DDRCTRL_PSRST_SHIFT | 1 << DDRCTRL_SRST_SHIFT | 1 << DDRPHY_PSRST_SHIFT | 1 << DDRPHY_SRST_SHIFT); - rockchip_udelay(10); + udelay(10); rk_clrreg(&cru->cru_softrst_con[5], 1 << DDRPHY_PSRST_SHIFT | 1 << DDRPHY_SRST_SHIFT); - rockchip_udelay(10); + udelay(10); rk_clrreg(&cru->cru_softrst_con[5], 1 << DDRCTRL_PSRST_SHIFT | 1 << DDRCTRL_SRST_SHIFT); - rockchip_udelay(10); + udelay(10); clrbits_le32(&ddr_phy->ddrphy_reg[0], SOFT_RESET_MASK << SOFT_RESET_SHIFT); - rockchip_udelay(10); + udelay(10); setbits_le32(&ddr_phy->ddrphy_reg[0], SOFT_DERESET_ANALOG); - rockchip_udelay(5); + udelay(5); setbits_le32(&ddr_phy->ddrphy_reg[0], SOFT_DERESET_DIGITAL); - rockchip_udelay(1); + udelay(1); } void phy_dll_bypass_set(struct rk322x_ddr_phy *ddr_phy, u32 freq) @@ -154,7 +153,7 @@ static void send_command(struct rk322x_ddr_pctl *pctl, u32 rank, u32 cmd, u32 arg) { writel((START_CMD | (rank << 20) | arg | cmd), &pctl->mcmd); - rockchip_udelay(1); + udelay(1); while (readl(&pctl->mcmd) & START_CMD) ; } @@ -167,7 +166,7 @@ static void memory_init(struct chan_info *chan, if (dramtype == DDR3) { send_command(pctl, 3, DESELECT_CMD, 0); - rockchip_udelay(1); + udelay(1); send_command(pctl, 3, PREA_CMD, 0); send_command(pctl, 3, MRS_CMD, (0x02 & BANK_ADDR_MASK) << BANK_ADDR_SHIFT | @@ -196,17 +195,17 @@ static void memory_init(struct chan_info *chan, (0x63 & LPDDR23_MA_MASK) << LPDDR23_MA_SHIFT | (0 & LPDDR23_OP_MASK) << LPDDR23_OP_SHIFT); - rockchip_udelay(10); + udelay(10); send_command(pctl, 3, MRS_CMD, (0x10 & LPDDR23_MA_MASK) << LPDDR23_MA_SHIFT | (0xff & LPDDR23_OP_MASK) << LPDDR23_OP_SHIFT); - rockchip_udelay(1); + udelay(1); send_command(pctl, 3, MRS_CMD, (0x10 & LPDDR23_MA_MASK) << LPDDR23_MA_SHIFT | (0xff & LPDDR23_OP_MASK) << LPDDR23_OP_SHIFT); - rockchip_udelay(1); + udelay(1); send_command(pctl, 3, MRS_CMD, (1 & LPDDR23_MA_MASK) << LPDDR23_MA_SHIFT | (sdram_params->phy_timing.mr[1] & @@ -243,7 +242,7 @@ static u32 data_training(struct chan_info *chan) DQS_SQU_CAL_SEL_CS0); setbits_le32(&ddr_phy->ddrphy_reg[2], DQS_SQU_CAL_START); - rockchip_udelay(30); + udelay(30); ret = readl(&ddr_phy->ddrphy_reg[0xff]); clrbits_le32(&ddr_phy->ddrphy_reg[2], @@ -367,9 +366,9 @@ static void phy_softreset(struct dram_info *dram) writel(GRF_DDRPHY_BUFFEREN_CORE_EN, &grf->soc_con[0]); clrbits_le32(&ddr_phy->ddrphy_reg[0], 0x3 << 2); - rockchip_udelay(1); + udelay(1); setbits_le32(&ddr_phy->ddrphy_reg[0], 1 << 2); - rockchip_udelay(5); + udelay(5); setbits_le32(&ddr_phy->ddrphy_reg[0], 1 << 3); writel(GRF_DDRPHY_BUFFEREN_CORE_DIS, &grf->soc_con[0]); } diff --git a/drivers/ram/rockchip/sdram_rk3399.c b/drivers/ram/rockchip/sdram_rk3399.c index 52518656c4..81fc71c051 100644 --- a/drivers/ram/rockchip/sdram_rk3399.c +++ b/drivers/ram/rockchip/sdram_rk3399.c @@ -14,14 +14,40 @@ #include <syscon.h> #include <asm/io.h> #include <asm/arch-rockchip/clock.h> -#include <asm/arch-rockchip/sdram_common.h> -#include <asm/arch-rockchip/sdram_rk3399.h> #include <asm/arch-rockchip/cru_rk3399.h> #include <asm/arch-rockchip/grf_rk3399.h> +#include <asm/arch-rockchip/pmu_rk3399.h> #include <asm/arch-rockchip/hardware.h> +#include <asm/arch-rockchip/sdram_common.h> +#include <asm/arch-rockchip/sdram_rk3399.h> #include <linux/err.h> #include <time.h> +#define PRESET_SGRF_HOLD(n) ((0x1 << (6 + 16)) | ((n) << 6)) +#define PRESET_GPIO0_HOLD(n) ((0x1 << (7 + 16)) | ((n) << 7)) +#define PRESET_GPIO1_HOLD(n) ((0x1 << (8 + 16)) | ((n) << 8)) + +#define PHY_DRV_ODT_HI_Z 0x0 +#define PHY_DRV_ODT_240 0x1 +#define PHY_DRV_ODT_120 0x8 +#define PHY_DRV_ODT_80 0x9 +#define PHY_DRV_ODT_60 0xc +#define PHY_DRV_ODT_48 0xd +#define PHY_DRV_ODT_40 0xe +#define PHY_DRV_ODT_34_3 0xf + +#define PHY_BOOSTP_EN 0x1 +#define PHY_BOOSTN_EN 0x1 +#define PHY_SLEWP_EN 0x1 +#define PHY_SLEWN_EN 0x1 +#define PHY_RX_CM_INPUT 0x1 +#define CS0_MR22_VAL 0 +#define CS1_MR22_VAL 3 + +#define CRU_SFTRST_DDR_CTRL(ch, n) ((0x1 << (8 + 16 + (ch) * 4)) | \ + ((n) << (8 + (ch) * 4))) +#define CRU_SFTRST_DDR_PHY(ch, n) ((0x1 << (9 + 16 + (ch) * 4)) | \ + ((n) << (9 + (ch) * 4))) struct chan_info { struct rk3399_ddr_pctl_regs *pctl; struct rk3399_ddr_pi_regs *pi; @@ -32,29 +58,27 @@ struct chan_info { struct dram_info { #if defined(CONFIG_TPL_BUILD) || \ (!defined(CONFIG_TPL) && defined(CONFIG_SPL_BUILD)) + u32 pwrup_srefresh_exit[2]; struct chan_info chan[2]; struct clk ddr_clk; struct rk3399_cru *cru; + struct rk3399_grf_regs *grf; + struct rk3399_pmu_regs *pmu; struct rk3399_pmucru *pmucru; struct rk3399_pmusgrf_regs *pmusgrf; struct rk3399_ddr_cic_regs *cic; + const struct sdram_rk3399_ops *ops; #endif struct ram_info info; struct rk3399_pmugrf_regs *pmugrf; }; -#define PRESET_SGRF_HOLD(n) ((0x1 << (6 + 16)) | ((n) << 6)) -#define PRESET_GPIO0_HOLD(n) ((0x1 << (7 + 16)) | ((n) << 7)) -#define PRESET_GPIO1_HOLD(n) ((0x1 << (8 + 16)) | ((n) << 8)) - -#define PHY_DRV_ODT_Hi_Z 0x0 -#define PHY_DRV_ODT_240 0x1 -#define PHY_DRV_ODT_120 0x8 -#define PHY_DRV_ODT_80 0x9 -#define PHY_DRV_ODT_60 0xc -#define PHY_DRV_ODT_48 0xd -#define PHY_DRV_ODT_40 0xe -#define PHY_DRV_ODT_34_3 0xf +struct sdram_rk3399_ops { + int (*data_training)(struct dram_info *dram, u32 channel, u8 rank, + struct rk3399_sdram_params *sdram); + int (*set_rate)(struct dram_info *dram, + struct rk3399_sdram_params *params); +}; #if defined(CONFIG_TPL_BUILD) || \ (!defined(CONFIG_TPL) && defined(CONFIG_SPL_BUILD)) @@ -68,6 +92,154 @@ struct rockchip_dmc_plat { struct regmap *map; }; +struct io_setting { + u32 mhz; + u32 mr5; + /* dram side */ + u32 dq_odt; + u32 ca_odt; + u32 pdds; + u32 dq_vref; + u32 ca_vref; + /* phy side */ + u32 rd_odt; + u32 wr_dq_drv; + u32 wr_ca_drv; + u32 wr_ckcs_drv; + u32 rd_odt_en; + u32 rd_vref; +} lpddr4_io_setting[] = { + { + 50 * MHz, + 0, + /* dram side */ + 0, /* dq_odt; */ + 0, /* ca_odt; */ + 6, /* pdds; */ + 0x72, /* dq_vref; */ + 0x72, /* ca_vref; */ + /* phy side */ + PHY_DRV_ODT_HI_Z, /* rd_odt; */ + PHY_DRV_ODT_40, /* wr_dq_drv; */ + PHY_DRV_ODT_40, /* wr_ca_drv; */ + PHY_DRV_ODT_40, /* wr_ckcs_drv; */ + 0, /* rd_odt_en;*/ + 41, /* rd_vref; (unit %, range 3.3% - 48.7%) */ + }, + { + 600 * MHz, + 0, + /* dram side */ + 1, /* dq_odt; */ + 0, /* ca_odt; */ + 6, /* pdds; */ + 0x72, /* dq_vref; */ + 0x72, /* ca_vref; */ + /* phy side */ + PHY_DRV_ODT_HI_Z, /* rd_odt; */ + PHY_DRV_ODT_48, /* wr_dq_drv; */ + PHY_DRV_ODT_40, /* wr_ca_drv; */ + PHY_DRV_ODT_40, /* wr_ckcs_drv; */ + 0, /* rd_odt_en; */ + 32, /* rd_vref; (unit %, range 3.3% - 48.7%) */ + }, + { + 800 * MHz, + 0, + /* dram side */ + 1, /* dq_odt; */ + 0, /* ca_odt; */ + 1, /* pdds; */ + 0x72, /* dq_vref; */ + 0x72, /* ca_vref; */ + /* phy side */ + PHY_DRV_ODT_40, /* rd_odt; */ + PHY_DRV_ODT_48, /* wr_dq_drv; */ + PHY_DRV_ODT_40, /* wr_ca_drv; */ + PHY_DRV_ODT_40, /* wr_ckcs_drv; */ + 1, /* rd_odt_en; */ + 17, /* rd_vref; (unit %, range 3.3% - 48.7%) */ + }, + { + 933 * MHz, + 0, + /* dram side */ + 3, /* dq_odt; */ + 0, /* ca_odt; */ + 6, /* pdds; */ + 0x59, /* dq_vref; 32% */ + 0x72, /* ca_vref; */ + /* phy side */ + PHY_DRV_ODT_HI_Z, /* rd_odt; */ + PHY_DRV_ODT_48, /* wr_dq_drv; */ + PHY_DRV_ODT_40, /* wr_ca_drv; */ + PHY_DRV_ODT_40, /* wr_ckcs_drv; */ + 0, /* rd_odt_en; */ + 32, /* rd_vref; (unit %, range 3.3% - 48.7%) */ + }, + { + 1066 * MHz, + 0, + /* dram side */ + 6, /* dq_odt; */ + 0, /* ca_odt; */ + 1, /* pdds; */ + 0x10, /* dq_vref; */ + 0x72, /* ca_vref; */ + /* phy side */ + PHY_DRV_ODT_40, /* rd_odt; */ + PHY_DRV_ODT_60, /* wr_dq_drv; */ + PHY_DRV_ODT_40, /* wr_ca_drv; */ + PHY_DRV_ODT_40, /* wr_ckcs_drv; */ + 1, /* rd_odt_en; */ + 17, /* rd_vref; (unit %, range 3.3% - 48.7%) */ + }, +}; + +/** + * phy = 0, PHY boot freq + * phy = 1, PHY index 0 + * phy = 2, PHY index 1 + */ +static struct io_setting * +lpddr4_get_io_settings(const struct rk3399_sdram_params *params, u32 mr5) +{ + struct io_setting *io = NULL; + u32 n; + + for (n = 0; n < ARRAY_SIZE(lpddr4_io_setting); n++) { + io = &lpddr4_io_setting[n]; + + if (io->mr5 != 0) { + if (io->mhz >= params->base.ddr_freq && + io->mr5 == mr5) + break; + } else { + if (io->mhz >= params->base.ddr_freq) + break; + } + } + + return io; +} + +static void *get_denali_phy(const struct chan_info *chan, + struct rk3399_sdram_params *params, bool reg) +{ + return reg ? &chan->publ->denali_phy : ¶ms->phy_regs.denali_phy; +} + +static void *get_denali_ctl(const struct chan_info *chan, + struct rk3399_sdram_params *params, bool reg) +{ + return reg ? &chan->pctl->denali_ctl : ¶ms->pctl_regs.denali_ctl; +} + +static void *get_ddrc0_con(struct dram_info *dram, u8 channel) +{ + return (channel == 0) ? &dram->grf->ddrc0_con0 : &dram->grf->ddrc0_con1; +} + static void copy_to_reg(u32 *dest, const u32 *src, u32 n) { int i; @@ -79,6 +251,29 @@ static void copy_to_reg(u32 *dest, const u32 *src, u32 n) } } +static void rkclk_ddr_reset(struct rk3399_cru *cru, u32 channel, u32 ctl, + u32 phy) +{ + channel &= 0x1; + ctl &= 0x1; + phy &= 0x1; + writel(CRU_SFTRST_DDR_CTRL(channel, ctl) | + CRU_SFTRST_DDR_PHY(channel, phy), + &cru->softrst_con[4]); +} + +static void phy_pctrl_reset(struct rk3399_cru *cru, u32 channel) +{ + rkclk_ddr_reset(cru, channel, 1, 1); + udelay(10); + + rkclk_ddr_reset(cru, channel, 1, 0); + udelay(10); + + rkclk_ddr_reset(cru, channel, 0, 0); + udelay(10); +} + static void phy_dll_bypass_set(struct rk3399_ddr_publ_regs *ddr_publ_regs, u32 freq) { @@ -111,10 +306,9 @@ static void phy_dll_bypass_set(struct rk3399_ddr_publ_regs *ddr_publ_regs, } static void set_memory_map(const struct chan_info *chan, u32 channel, - const struct rk3399_sdram_params *sdram_params) + const struct rk3399_sdram_params *params) { - const struct rk3399_sdram_channel *sdram_ch = - &sdram_params->ch[channel]; + const struct rk3399_sdram_channel *sdram_ch = ¶ms->ch[channel]; u32 *denali_ctl = chan->pctl->denali_ctl; u32 *denali_pi = chan->pi->denali_pi; u32 cs_map; @@ -122,179 +316,51 @@ static void set_memory_map(const struct chan_info *chan, u32 channel, u32 row; /* Get row number from ddrconfig setting */ - if (sdram_ch->ddrconfig < 2 || sdram_ch->ddrconfig == 4) + if (sdram_ch->cap_info.ddrconfig < 2 || + sdram_ch->cap_info.ddrconfig == 4) row = 16; - else if (sdram_ch->ddrconfig == 3) + else if (sdram_ch->cap_info.ddrconfig == 3) row = 14; else row = 15; - cs_map = (sdram_ch->rank > 1) ? 3 : 1; - reduc = (sdram_ch->bw == 2) ? 0 : 1; + cs_map = (sdram_ch->cap_info.rank > 1) ? 3 : 1; + reduc = (sdram_ch->cap_info.bw == 2) ? 0 : 1; /* Set the dram configuration to ctrl */ - clrsetbits_le32(&denali_ctl[191], 0xF, (12 - sdram_ch->col)); + clrsetbits_le32(&denali_ctl[191], 0xF, (12 - sdram_ch->cap_info.col)); clrsetbits_le32(&denali_ctl[190], (0x3 << 16) | (0x7 << 24), - ((3 - sdram_ch->bk) << 16) | + ((3 - sdram_ch->cap_info.bk) << 16) | ((16 - row) << 24)); clrsetbits_le32(&denali_ctl[196], 0x3 | (1 << 16), cs_map | (reduc << 16)); /* PI_199 PI_COL_DIFF:RW:0:4 */ - clrsetbits_le32(&denali_pi[199], 0xF, (12 - sdram_ch->col)); + clrsetbits_le32(&denali_pi[199], 0xF, (12 - sdram_ch->cap_info.col)); /* PI_155 PI_ROW_DIFF:RW:24:3 PI_BANK_DIFF:RW:16:2 */ clrsetbits_le32(&denali_pi[155], (0x3 << 16) | (0x7 << 24), - ((3 - sdram_ch->bk) << 16) | + ((3 - sdram_ch->cap_info.bk) << 16) | ((16 - row) << 24)); - /* PI_41 PI_CS_MAP:RW:24:4 */ - clrsetbits_le32(&denali_pi[41], 0xf << 24, cs_map << 24); - if ((sdram_ch->rank == 1) && (sdram_params->base.dramtype == DDR3)) - writel(0x2EC7FFFF, &denali_pi[34]); -} -static void set_ds_odt(const struct chan_info *chan, - const struct rk3399_sdram_params *sdram_params) -{ - u32 *denali_phy = chan->publ->denali_phy; - - u32 tsel_idle_en, tsel_wr_en, tsel_rd_en; - u32 tsel_idle_select_p, tsel_wr_select_p, tsel_rd_select_p; - u32 ca_tsel_wr_select_p, ca_tsel_wr_select_n; - u32 tsel_idle_select_n, tsel_wr_select_n, tsel_rd_select_n; - u32 reg_value; - - if (sdram_params->base.dramtype == LPDDR4) { - tsel_rd_select_p = PHY_DRV_ODT_Hi_Z; - tsel_wr_select_p = PHY_DRV_ODT_40; - ca_tsel_wr_select_p = PHY_DRV_ODT_40; - tsel_idle_select_p = PHY_DRV_ODT_Hi_Z; - - tsel_rd_select_n = PHY_DRV_ODT_240; - tsel_wr_select_n = PHY_DRV_ODT_40; - ca_tsel_wr_select_n = PHY_DRV_ODT_40; - tsel_idle_select_n = PHY_DRV_ODT_240; - } else if (sdram_params->base.dramtype == LPDDR3) { - tsel_rd_select_p = PHY_DRV_ODT_240; - tsel_wr_select_p = PHY_DRV_ODT_34_3; - ca_tsel_wr_select_p = PHY_DRV_ODT_48; - tsel_idle_select_p = PHY_DRV_ODT_240; - - tsel_rd_select_n = PHY_DRV_ODT_Hi_Z; - tsel_wr_select_n = PHY_DRV_ODT_34_3; - ca_tsel_wr_select_n = PHY_DRV_ODT_48; - tsel_idle_select_n = PHY_DRV_ODT_Hi_Z; - } else { - tsel_rd_select_p = PHY_DRV_ODT_240; - tsel_wr_select_p = PHY_DRV_ODT_34_3; - ca_tsel_wr_select_p = PHY_DRV_ODT_34_3; - tsel_idle_select_p = PHY_DRV_ODT_240; - - tsel_rd_select_n = PHY_DRV_ODT_240; - tsel_wr_select_n = PHY_DRV_ODT_34_3; - ca_tsel_wr_select_n = PHY_DRV_ODT_34_3; - tsel_idle_select_n = PHY_DRV_ODT_240; + if (IS_ENABLED(CONFIG_RAM_RK3399_LPDDR4)) { + if (cs_map == 1) + cs_map = 0x5; + else if (cs_map == 2) + cs_map = 0xa; + else + cs_map = 0xF; } - if (sdram_params->base.odt == 1) - tsel_rd_en = 1; - else - tsel_rd_en = 0; - - tsel_wr_en = 0; - tsel_idle_en = 0; - - /* - * phy_dq_tsel_select_X 24bits DENALI_PHY_6/134/262/390 offset_0 - * sets termination values for read/idle cycles and drive strength - * for write cycles for DQ/DM - */ - reg_value = tsel_rd_select_n | (tsel_rd_select_p << 0x4) | - (tsel_wr_select_n << 8) | (tsel_wr_select_p << 12) | - (tsel_idle_select_n << 16) | (tsel_idle_select_p << 20); - clrsetbits_le32(&denali_phy[6], 0xffffff, reg_value); - clrsetbits_le32(&denali_phy[134], 0xffffff, reg_value); - clrsetbits_le32(&denali_phy[262], 0xffffff, reg_value); - clrsetbits_le32(&denali_phy[390], 0xffffff, reg_value); - - /* - * phy_dqs_tsel_select_X 24bits DENALI_PHY_7/135/263/391 offset_0 - * sets termination values for read/idle cycles and drive strength - * for write cycles for DQS - */ - clrsetbits_le32(&denali_phy[7], 0xffffff, reg_value); - clrsetbits_le32(&denali_phy[135], 0xffffff, reg_value); - clrsetbits_le32(&denali_phy[263], 0xffffff, reg_value); - clrsetbits_le32(&denali_phy[391], 0xffffff, reg_value); - - /* phy_adr_tsel_select_ 8bits DENALI_PHY_544/672/800 offset_0 */ - reg_value = ca_tsel_wr_select_n | (ca_tsel_wr_select_p << 0x4); - clrsetbits_le32(&denali_phy[544], 0xff, reg_value); - clrsetbits_le32(&denali_phy[672], 0xff, reg_value); - clrsetbits_le32(&denali_phy[800], 0xff, reg_value); - - /* phy_pad_addr_drive 8bits DENALI_PHY_928 offset_0 */ - clrsetbits_le32(&denali_phy[928], 0xff, reg_value); - - /* phy_pad_rst_drive 8bits DENALI_PHY_937 offset_0 */ - clrsetbits_le32(&denali_phy[937], 0xff, reg_value); - - /* phy_pad_cke_drive 8bits DENALI_PHY_935 offset_0 */ - clrsetbits_le32(&denali_phy[935], 0xff, reg_value); - - /* phy_pad_cs_drive 8bits DENALI_PHY_939 offset_0 */ - clrsetbits_le32(&denali_phy[939], 0xff, reg_value); - - /* phy_pad_clk_drive 8bits DENALI_PHY_929 offset_0 */ - clrsetbits_le32(&denali_phy[929], 0xff, reg_value); - - /* phy_pad_fdbk_drive 23bit DENALI_PHY_924/925 */ - clrsetbits_le32(&denali_phy[924], 0xff, - tsel_wr_select_n | (tsel_wr_select_p << 4)); - clrsetbits_le32(&denali_phy[925], 0xff, - tsel_rd_select_n | (tsel_rd_select_p << 4)); - - /* phy_dq_tsel_enable_X 3bits DENALI_PHY_5/133/261/389 offset_16 */ - reg_value = (tsel_rd_en | (tsel_wr_en << 1) | (tsel_idle_en << 2)) - << 16; - clrsetbits_le32(&denali_phy[5], 0x7 << 16, reg_value); - clrsetbits_le32(&denali_phy[133], 0x7 << 16, reg_value); - clrsetbits_le32(&denali_phy[261], 0x7 << 16, reg_value); - clrsetbits_le32(&denali_phy[389], 0x7 << 16, reg_value); - - /* phy_dqs_tsel_enable_X 3bits DENALI_PHY_6/134/262/390 offset_24 */ - reg_value = (tsel_rd_en | (tsel_wr_en << 1) | (tsel_idle_en << 2)) - << 24; - clrsetbits_le32(&denali_phy[6], 0x7 << 24, reg_value); - clrsetbits_le32(&denali_phy[134], 0x7 << 24, reg_value); - clrsetbits_le32(&denali_phy[262], 0x7 << 24, reg_value); - clrsetbits_le32(&denali_phy[390], 0x7 << 24, reg_value); - - /* phy_adr_tsel_enable_ 1bit DENALI_PHY_518/646/774 offset_8 */ - reg_value = tsel_wr_en << 8; - clrsetbits_le32(&denali_phy[518], 0x1 << 8, reg_value); - clrsetbits_le32(&denali_phy[646], 0x1 << 8, reg_value); - clrsetbits_le32(&denali_phy[774], 0x1 << 8, reg_value); - - /* phy_pad_addr_term tsel 1bit DENALI_PHY_933 offset_17 */ - reg_value = tsel_wr_en << 17; - clrsetbits_le32(&denali_phy[933], 0x1 << 17, reg_value); - /* - * pad_rst/cke/cs/clk_term tsel 1bits - * DENALI_PHY_938/936/940/934 offset_17 - */ - clrsetbits_le32(&denali_phy[938], 0x1 << 17, reg_value); - clrsetbits_le32(&denali_phy[936], 0x1 << 17, reg_value); - clrsetbits_le32(&denali_phy[940], 0x1 << 17, reg_value); - clrsetbits_le32(&denali_phy[934], 0x1 << 17, reg_value); - - /* phy_pad_fdbk_term 1bit DENALI_PHY_930 offset_17 */ - clrsetbits_le32(&denali_phy[930], 0x1 << 17, reg_value); + /* PI_41 PI_CS_MAP:RW:24:4 */ + clrsetbits_le32(&denali_pi[41], 0xf << 24, cs_map << 24); + if (sdram_ch->cap_info.rank == 1 && params->base.dramtype == DDR3) + writel(0x2EC7FFFF, &denali_pi[34]); } static int phy_io_config(const struct chan_info *chan, - const struct rk3399_sdram_params *sdram_params) + const struct rk3399_sdram_params *params, u32 mr5) { u32 *denali_phy = chan->publ->denali_phy; u32 vref_mode_dq, vref_value_dq, vref_mode_ac, vref_value_ac; @@ -303,15 +369,29 @@ static int phy_io_config(const struct chan_info *chan, u32 drv_value, odt_value; u32 speed; - /* vref setting */ - if (sdram_params->base.dramtype == LPDDR4) { - /* LPDDR4 */ - vref_mode_dq = 0x6; - vref_value_dq = 0x1f; + /* vref setting & mode setting */ + if (params->base.dramtype == LPDDR4) { + struct io_setting *io = lpddr4_get_io_settings(params, mr5); + u32 rd_vref = io->rd_vref * 1000; + + if (rd_vref < 36700) { + /* MODE_LV[2:0] = LPDDR4 (Range 2)*/ + vref_mode_dq = 0x7; + /* MODE[2:0]= LPDDR4 Range 2(0.4*VDDQ) */ + mode_sel = 0x5; + vref_value_dq = (rd_vref - 3300) / 521; + } else { + /* MODE_LV[2:0] = LPDDR4 (Range 1)*/ + vref_mode_dq = 0x6; + /* MODE[2:0]= LPDDR4 Range 1(0.33*VDDQ) */ + mode_sel = 0x4; + vref_value_dq = (rd_vref - 15300) / 521; + } vref_mode_ac = 0x6; - vref_value_ac = 0x1f; - } else if (sdram_params->base.dramtype == LPDDR3) { - if (sdram_params->base.odt == 1) { + /* VDDQ/3/2=16.8% */ + vref_value_ac = 0x3; + } else if (params->base.dramtype == LPDDR3) { + if (params->base.odt == 1) { vref_mode_dq = 0x5; /* LPDDR3 ODT */ drv_value = (readl(&denali_phy[6]) >> 12) & 0xf; odt_value = (readl(&denali_phy[6]) >> 4) & 0xf; @@ -370,12 +450,14 @@ static int phy_io_config(const struct chan_info *chan, } vref_mode_ac = 0x2; vref_value_ac = 0x1f; - } else if (sdram_params->base.dramtype == DDR3) { + mode_sel = 0x0; + } else if (params->base.dramtype == DDR3) { /* DDR3L */ vref_mode_dq = 0x1; vref_value_dq = 0x1f; vref_mode_ac = 0x1; vref_value_ac = 0x1f; + mode_sel = 0x1; } else { debug("Unknown DRAM type.\n"); return -EINVAL; @@ -397,15 +479,6 @@ static int phy_io_config(const struct chan_info *chan, /* PHY_915 PHY_PAD_VREF_CTRL_AC 12bits offset_16 */ clrsetbits_le32(&denali_phy[915], 0xfff << 16, reg_value << 16); - if (sdram_params->base.dramtype == LPDDR4) - mode_sel = 0x6; - else if (sdram_params->base.dramtype == LPDDR3) - mode_sel = 0x0; - else if (sdram_params->base.dramtype == DDR3) - mode_sel = 0x1; - else - return -EINVAL; - /* PHY_924 PHY_PAD_FDBK_DRIVE */ clrsetbits_le32(&denali_phy[924], 0x7 << 15, mode_sel << 15); /* PHY_926 PHY_PAD_DATA_DRIVE */ @@ -423,13 +496,52 @@ static int phy_io_config(const struct chan_info *chan, /* PHY_939 PHY_PAD_CS_DRIVE */ clrsetbits_le32(&denali_phy[939], 0x7 << 14, mode_sel << 14); + if (IS_ENABLED(CONFIG_RAM_RK3399_LPDDR4)) { + /* BOOSTP_EN & BOOSTN_EN */ + reg_value = ((PHY_BOOSTP_EN << 4) | PHY_BOOSTN_EN); + /* PHY_925 PHY_PAD_FDBK_DRIVE2 */ + clrsetbits_le32(&denali_phy[925], 0xff << 8, reg_value << 8); + /* PHY_926 PHY_PAD_DATA_DRIVE */ + clrsetbits_le32(&denali_phy[926], 0xff << 12, reg_value << 12); + /* PHY_927 PHY_PAD_DQS_DRIVE */ + clrsetbits_le32(&denali_phy[927], 0xff << 14, reg_value << 14); + /* PHY_928 PHY_PAD_ADDR_DRIVE */ + clrsetbits_le32(&denali_phy[928], 0xff << 20, reg_value << 20); + /* PHY_929 PHY_PAD_CLK_DRIVE */ + clrsetbits_le32(&denali_phy[929], 0xff << 22, reg_value << 22); + /* PHY_935 PHY_PAD_CKE_DRIVE */ + clrsetbits_le32(&denali_phy[935], 0xff << 20, reg_value << 20); + /* PHY_937 PHY_PAD_RST_DRIVE */ + clrsetbits_le32(&denali_phy[937], 0xff << 20, reg_value << 20); + /* PHY_939 PHY_PAD_CS_DRIVE */ + clrsetbits_le32(&denali_phy[939], 0xff << 20, reg_value << 20); + + /* SLEWP_EN & SLEWN_EN */ + reg_value = ((PHY_SLEWP_EN << 3) | PHY_SLEWN_EN); + /* PHY_924 PHY_PAD_FDBK_DRIVE */ + clrsetbits_le32(&denali_phy[924], 0x3f << 8, reg_value << 8); + /* PHY_926 PHY_PAD_DATA_DRIVE */ + clrsetbits_le32(&denali_phy[926], 0x3f, reg_value); + /* PHY_927 PHY_PAD_DQS_DRIVE */ + clrsetbits_le32(&denali_phy[927], 0x3f, reg_value); + /* PHY_928 PHY_PAD_ADDR_DRIVE */ + clrsetbits_le32(&denali_phy[928], 0x3f << 8, reg_value << 8); + /* PHY_929 PHY_PAD_CLK_DRIVE */ + clrsetbits_le32(&denali_phy[929], 0x3f << 8, reg_value << 8); + /* PHY_935 PHY_PAD_CKE_DRIVE */ + clrsetbits_le32(&denali_phy[935], 0x3f << 8, reg_value << 8); + /* PHY_937 PHY_PAD_RST_DRIVE */ + clrsetbits_le32(&denali_phy[937], 0x3f << 8, reg_value << 8); + /* PHY_939 PHY_PAD_CS_DRIVE */ + clrsetbits_le32(&denali_phy[939], 0x3f << 8, reg_value << 8); + } /* speed setting */ - if (sdram_params->base.ddr_freq < 400) + if (params->base.ddr_freq < 400) speed = 0x0; - else if (sdram_params->base.ddr_freq < 800) + else if (params->base.ddr_freq < 800) speed = 0x1; - else if (sdram_params->base.ddr_freq < 1200) + else if (params->base.ddr_freq < 1200) speed = 0x2; else speed = 0x3; @@ -451,21 +563,304 @@ static int phy_io_config(const struct chan_info *chan, /* PHY_939 PHY_PAD_CS_DRIVE */ clrsetbits_le32(&denali_phy[939], 0x3 << 17, speed << 17); + if (IS_ENABLED(CONFIG_RAM_RK3399_LPDDR4)) { + /* RX_CM_INPUT */ + reg_value = PHY_RX_CM_INPUT; + /* PHY_924 PHY_PAD_FDBK_DRIVE */ + clrsetbits_le32(&denali_phy[924], 0x1 << 14, reg_value << 14); + /* PHY_926 PHY_PAD_DATA_DRIVE */ + clrsetbits_le32(&denali_phy[926], 0x1 << 11, reg_value << 11); + /* PHY_927 PHY_PAD_DQS_DRIVE */ + clrsetbits_le32(&denali_phy[927], 0x1 << 13, reg_value << 13); + /* PHY_928 PHY_PAD_ADDR_DRIVE */ + clrsetbits_le32(&denali_phy[928], 0x1 << 19, reg_value << 19); + /* PHY_929 PHY_PAD_CLK_DRIVE */ + clrsetbits_le32(&denali_phy[929], 0x1 << 21, reg_value << 21); + /* PHY_935 PHY_PAD_CKE_DRIVE */ + clrsetbits_le32(&denali_phy[935], 0x1 << 19, reg_value << 19); + /* PHY_937 PHY_PAD_RST_DRIVE */ + clrsetbits_le32(&denali_phy[937], 0x1 << 19, reg_value << 19); + /* PHY_939 PHY_PAD_CS_DRIVE */ + clrsetbits_le32(&denali_phy[939], 0x1 << 19, reg_value << 19); + } + return 0; } -static int pctl_cfg(const struct chan_info *chan, u32 channel, - const struct rk3399_sdram_params *sdram_params) +static void set_ds_odt(const struct chan_info *chan, + struct rk3399_sdram_params *params, + bool ctl_phy_reg, u32 mr5) +{ + u32 *denali_phy = get_denali_phy(chan, params, ctl_phy_reg); + u32 *denali_ctl = get_denali_ctl(chan, params, ctl_phy_reg); + u32 tsel_idle_en, tsel_wr_en, tsel_rd_en; + u32 tsel_idle_select_p, tsel_rd_select_p; + u32 tsel_idle_select_n, tsel_rd_select_n; + u32 tsel_wr_select_dq_p, tsel_wr_select_ca_p; + u32 tsel_wr_select_dq_n, tsel_wr_select_ca_n; + u32 tsel_ckcs_select_p, tsel_ckcs_select_n; + struct io_setting *io = NULL; + u32 soc_odt = 0; + u32 reg_value; + + if (params->base.dramtype == LPDDR4) { + io = lpddr4_get_io_settings(params, mr5); + + tsel_rd_select_p = PHY_DRV_ODT_HI_Z; + tsel_rd_select_n = io->rd_odt; + + tsel_idle_select_p = PHY_DRV_ODT_HI_Z; + tsel_idle_select_n = PHY_DRV_ODT_240; + + tsel_wr_select_dq_p = io->wr_dq_drv; + tsel_wr_select_dq_n = PHY_DRV_ODT_40; + + tsel_wr_select_ca_p = io->wr_ca_drv; + tsel_wr_select_ca_n = PHY_DRV_ODT_40; + + tsel_ckcs_select_p = io->wr_ckcs_drv; + tsel_ckcs_select_n = PHY_DRV_ODT_34_3; + switch (tsel_rd_select_n) { + case PHY_DRV_ODT_240: + soc_odt = 1; + break; + case PHY_DRV_ODT_120: + soc_odt = 2; + break; + case PHY_DRV_ODT_80: + soc_odt = 3; + break; + case PHY_DRV_ODT_60: + soc_odt = 4; + break; + case PHY_DRV_ODT_48: + soc_odt = 5; + break; + case PHY_DRV_ODT_40: + soc_odt = 6; + break; + case PHY_DRV_ODT_34_3: + soc_odt = 6; + printf("%s: Unable to support LPDDR4 MR22 Soc ODT\n", + __func__); + break; + case PHY_DRV_ODT_HI_Z: + default: + soc_odt = 0; + break; + } + } else if (params->base.dramtype == LPDDR3) { + tsel_rd_select_p = PHY_DRV_ODT_240; + tsel_rd_select_n = PHY_DRV_ODT_HI_Z; + + tsel_idle_select_p = PHY_DRV_ODT_240; + tsel_idle_select_n = PHY_DRV_ODT_HI_Z; + + tsel_wr_select_dq_p = PHY_DRV_ODT_34_3; + tsel_wr_select_dq_n = PHY_DRV_ODT_34_3; + + tsel_wr_select_ca_p = PHY_DRV_ODT_48; + tsel_wr_select_ca_n = PHY_DRV_ODT_48; + + tsel_ckcs_select_p = PHY_DRV_ODT_34_3; + tsel_ckcs_select_n = PHY_DRV_ODT_34_3; + } else { + tsel_rd_select_p = PHY_DRV_ODT_240; + tsel_rd_select_n = PHY_DRV_ODT_240; + + tsel_idle_select_p = PHY_DRV_ODT_240; + tsel_idle_select_n = PHY_DRV_ODT_240; + + tsel_wr_select_dq_p = PHY_DRV_ODT_34_3; + tsel_wr_select_dq_n = PHY_DRV_ODT_34_3; + + tsel_wr_select_ca_p = PHY_DRV_ODT_34_3; + tsel_wr_select_ca_n = PHY_DRV_ODT_34_3; + + tsel_ckcs_select_p = PHY_DRV_ODT_34_3; + tsel_ckcs_select_n = PHY_DRV_ODT_34_3; + } + + if (params->base.odt == 1) { + tsel_rd_en = 1; + + if (params->base.dramtype == LPDDR4) + tsel_rd_en = io->rd_odt_en; + } else { + tsel_rd_en = 0; + } + + tsel_wr_en = 0; + tsel_idle_en = 0; + + /* F0_0 */ + clrsetbits_le32(&denali_ctl[145], 0xFF << 16, + (soc_odt | (CS0_MR22_VAL << 3)) << 16); + /* F2_0, F1_0 */ + clrsetbits_le32(&denali_ctl[146], 0xFF00FF, + ((soc_odt | (CS0_MR22_VAL << 3)) << 16) | + (soc_odt | (CS0_MR22_VAL << 3))); + /* F0_1 */ + clrsetbits_le32(&denali_ctl[159], 0xFF << 16, + (soc_odt | (CS1_MR22_VAL << 3)) << 16); + /* F2_1, F1_1 */ + clrsetbits_le32(&denali_ctl[160], 0xFF00FF, + ((soc_odt | (CS1_MR22_VAL << 3)) << 16) | + (soc_odt | (CS1_MR22_VAL << 3))); + + /* + * phy_dq_tsel_select_X 24bits DENALI_PHY_6/134/262/390 offset_0 + * sets termination values for read/idle cycles and drive strength + * for write cycles for DQ/DM + */ + reg_value = tsel_rd_select_n | (tsel_rd_select_p << 0x4) | + (tsel_wr_select_dq_n << 8) | (tsel_wr_select_dq_p << 12) | + (tsel_idle_select_n << 16) | (tsel_idle_select_p << 20); + clrsetbits_le32(&denali_phy[6], 0xffffff, reg_value); + clrsetbits_le32(&denali_phy[134], 0xffffff, reg_value); + clrsetbits_le32(&denali_phy[262], 0xffffff, reg_value); + clrsetbits_le32(&denali_phy[390], 0xffffff, reg_value); + + /* + * phy_dqs_tsel_select_X 24bits DENALI_PHY_7/135/263/391 offset_0 + * sets termination values for read/idle cycles and drive strength + * for write cycles for DQS + */ + clrsetbits_le32(&denali_phy[7], 0xffffff, reg_value); + clrsetbits_le32(&denali_phy[135], 0xffffff, reg_value); + clrsetbits_le32(&denali_phy[263], 0xffffff, reg_value); + clrsetbits_le32(&denali_phy[391], 0xffffff, reg_value); + + /* phy_adr_tsel_select_ 8bits DENALI_PHY_544/672/800 offset_0 */ + reg_value = tsel_wr_select_ca_n | (tsel_wr_select_ca_p << 0x4); + if (IS_ENABLED(CONFIG_RAM_RK3399_LPDDR4)) { + /* LPDDR4 these register read always return 0, so + * can not use clrsetbits_le32(), need to write32 + */ + writel((0x300 << 8) | reg_value, &denali_phy[544]); + writel((0x300 << 8) | reg_value, &denali_phy[672]); + writel((0x300 << 8) | reg_value, &denali_phy[800]); + } else { + clrsetbits_le32(&denali_phy[544], 0xff, reg_value); + clrsetbits_le32(&denali_phy[672], 0xff, reg_value); + clrsetbits_le32(&denali_phy[800], 0xff, reg_value); + } + + /* phy_pad_addr_drive 8bits DENALI_PHY_928 offset_0 */ + clrsetbits_le32(&denali_phy[928], 0xff, reg_value); + + /* phy_pad_rst_drive 8bits DENALI_PHY_937 offset_0 */ + if (!ctl_phy_reg) + clrsetbits_le32(&denali_phy[937], 0xff, reg_value); + + /* phy_pad_cke_drive 8bits DENALI_PHY_935 offset_0 */ + clrsetbits_le32(&denali_phy[935], 0xff, reg_value); + + /* phy_pad_cs_drive 8bits DENALI_PHY_939 offset_0 */ + clrsetbits_le32(&denali_phy[939], 0xff, + tsel_ckcs_select_n | (tsel_ckcs_select_p << 0x4)); + + /* phy_pad_clk_drive 8bits DENALI_PHY_929 offset_0 */ + clrsetbits_le32(&denali_phy[929], 0xff, + tsel_ckcs_select_n | (tsel_ckcs_select_p << 0x4)); + + /* phy_pad_fdbk_drive 23bit DENALI_PHY_924/925 */ + clrsetbits_le32(&denali_phy[924], 0xff, + tsel_wr_select_dq_n | (tsel_wr_select_dq_p << 4)); + clrsetbits_le32(&denali_phy[925], 0xff, + tsel_rd_select_n | (tsel_rd_select_p << 4)); + + /* phy_dq_tsel_enable_X 3bits DENALI_PHY_5/133/261/389 offset_16 */ + reg_value = (tsel_rd_en | (tsel_wr_en << 1) | (tsel_idle_en << 2)) + << 16; + clrsetbits_le32(&denali_phy[5], 0x7 << 16, reg_value); + clrsetbits_le32(&denali_phy[133], 0x7 << 16, reg_value); + clrsetbits_le32(&denali_phy[261], 0x7 << 16, reg_value); + clrsetbits_le32(&denali_phy[389], 0x7 << 16, reg_value); + + /* phy_dqs_tsel_enable_X 3bits DENALI_PHY_6/134/262/390 offset_24 */ + reg_value = (tsel_rd_en | (tsel_wr_en << 1) | (tsel_idle_en << 2)) + << 24; + clrsetbits_le32(&denali_phy[6], 0x7 << 24, reg_value); + clrsetbits_le32(&denali_phy[134], 0x7 << 24, reg_value); + clrsetbits_le32(&denali_phy[262], 0x7 << 24, reg_value); + clrsetbits_le32(&denali_phy[390], 0x7 << 24, reg_value); + + /* phy_adr_tsel_enable_ 1bit DENALI_PHY_518/646/774 offset_8 */ + reg_value = tsel_wr_en << 8; + clrsetbits_le32(&denali_phy[518], 0x1 << 8, reg_value); + clrsetbits_le32(&denali_phy[646], 0x1 << 8, reg_value); + clrsetbits_le32(&denali_phy[774], 0x1 << 8, reg_value); + + /* phy_pad_addr_term tsel 1bit DENALI_PHY_933 offset_17 */ + reg_value = tsel_wr_en << 17; + clrsetbits_le32(&denali_phy[933], 0x1 << 17, reg_value); + /* + * pad_rst/cke/cs/clk_term tsel 1bits + * DENALI_PHY_938/936/940/934 offset_17 + */ + clrsetbits_le32(&denali_phy[938], 0x1 << 17, reg_value); + clrsetbits_le32(&denali_phy[936], 0x1 << 17, reg_value); + clrsetbits_le32(&denali_phy[940], 0x1 << 17, reg_value); + clrsetbits_le32(&denali_phy[934], 0x1 << 17, reg_value); + + /* phy_pad_fdbk_term 1bit DENALI_PHY_930 offset_17 */ + clrsetbits_le32(&denali_phy[930], 0x1 << 17, reg_value); + + phy_io_config(chan, params, mr5); +} + +static void pctl_start(struct dram_info *dram, u8 channel) +{ + const struct chan_info *chan = &dram->chan[channel]; + u32 *denali_ctl = chan->pctl->denali_ctl; + u32 *denali_phy = chan->publ->denali_phy; + u32 *ddrc0_con = get_ddrc0_con(dram, channel); + u32 count = 0; + u32 byte, tmp; + + writel(0x01000000, &ddrc0_con); + + clrsetbits_le32(&denali_phy[957], 0x3 << 24, 0x2 << 24); + + while (!(readl(&denali_ctl[203]) & (1 << 3))) { + if (count > 1000) { + printf("%s: Failed to init pctl for channel %d\n", + __func__, channel); + while (1) + ; + } + + udelay(1); + count++; + } + + writel(0x01000100, &ddrc0_con); + + for (byte = 0; byte < 4; byte++) { + tmp = 0x820; + writel((tmp << 16) | tmp, &denali_phy[53 + (128 * byte)]); + writel((tmp << 16) | tmp, &denali_phy[54 + (128 * byte)]); + writel((tmp << 16) | tmp, &denali_phy[55 + (128 * byte)]); + writel((tmp << 16) | tmp, &denali_phy[56 + (128 * byte)]); + writel((tmp << 16) | tmp, &denali_phy[57 + (128 * byte)]); + + clrsetbits_le32(&denali_phy[58 + (128 * byte)], 0xffff, tmp); + } + + clrsetbits_le32(&denali_ctl[68], PWRUP_SREFRESH_EXIT, + dram->pwrup_srefresh_exit[channel]); +} + +static int pctl_cfg(struct dram_info *dram, const struct chan_info *chan, + u32 channel, struct rk3399_sdram_params *params) { u32 *denali_ctl = chan->pctl->denali_ctl; u32 *denali_pi = chan->pi->denali_pi; u32 *denali_phy = chan->publ->denali_phy; - const u32 *params_ctl = sdram_params->pctl_regs.denali_ctl; - const u32 *params_phy = sdram_params->phy_regs.denali_phy; + const u32 *params_ctl = params->pctl_regs.denali_ctl; + const u32 *params_phy = params->phy_regs.denali_phy; u32 tmp, tmp1, tmp2; - u32 pwrup_srefresh_exit; - int ret; - const ulong timeout_ms = 200; /* * work around controller bug: @@ -474,16 +869,38 @@ static int pctl_cfg(const struct chan_info *chan, u32 channel, copy_to_reg(&denali_ctl[1], ¶ms_ctl[1], sizeof(struct rk3399_ddr_pctl_regs) - 4); writel(params_ctl[0], &denali_ctl[0]); - copy_to_reg(denali_pi, &sdram_params->pi_regs.denali_pi[0], + + /* + * two channel init at the same time, then ZQ Cal Start + * at the same time, it will use the same RZQ, but cannot + * start at the same time. + * + * So, increase tINIT3 for channel 1, will avoid two + * channel ZQ Cal Start at the same time + */ + if (params->base.dramtype == LPDDR4 && channel == 1) { + tmp = ((params->base.ddr_freq * MHz + 999) / 1000); + tmp1 = readl(&denali_ctl[14]); + writel(tmp + tmp1, &denali_ctl[14]); + } + + copy_to_reg(denali_pi, ¶ms->pi_regs.denali_pi[0], sizeof(struct rk3399_ddr_pi_regs)); + /* rank count need to set for init */ - set_memory_map(chan, channel, sdram_params); + set_memory_map(chan, channel, params); + + writel(params->phy_regs.denali_phy[910], &denali_phy[910]); + writel(params->phy_regs.denali_phy[911], &denali_phy[911]); + writel(params->phy_regs.denali_phy[912], &denali_phy[912]); - writel(sdram_params->phy_regs.denali_phy[910], &denali_phy[910]); - writel(sdram_params->phy_regs.denali_phy[911], &denali_phy[911]); - writel(sdram_params->phy_regs.denali_phy[912], &denali_phy[912]); + if (IS_ENABLED(CONFIG_RAM_RK3399_LPDDR4)) { + writel(params->phy_regs.denali_phy[898], &denali_phy[898]); + writel(params->phy_regs.denali_phy[919], &denali_phy[919]); + } - pwrup_srefresh_exit = readl(&denali_ctl[68]) & PWRUP_SREFRESH_EXIT; + dram->pwrup_srefresh_exit[channel] = readl(&denali_ctl[68]) & + PWRUP_SREFRESH_EXIT; clrbits_le32(&denali_ctl[68], PWRUP_SREFRESH_EXIT); /* PHY_DLL_RST_EN */ @@ -492,16 +909,22 @@ static int pctl_cfg(const struct chan_info *chan, u32 channel, setbits_le32(&denali_pi[0], START); setbits_le32(&denali_ctl[0], START); - /* Wating for phy DLL lock */ - while (1) { - tmp = readl(&denali_phy[920]); - tmp1 = readl(&denali_phy[921]); - tmp2 = readl(&denali_phy[922]); - if ((((tmp >> 16) & 0x1) == 0x1) && - (((tmp1 >> 16) & 0x1) == 0x1) && - (((tmp1 >> 0) & 0x1) == 0x1) && - (((tmp2 >> 0) & 0x1) == 0x1)) - break; + /** + * LPDDR4 use PLL bypass mode for init + * not need to wait for the PLL to lock + */ + if (params->base.dramtype != LPDDR4) { + /* Waiting for phy DLL lock */ + while (1) { + tmp = readl(&denali_phy[920]); + tmp1 = readl(&denali_phy[921]); + tmp2 = readl(&denali_phy[922]); + if ((((tmp >> 16) & 0x1) == 0x1) && + (((tmp1 >> 16) & 0x1) == 0x1) && + (((tmp1 >> 0) & 0x1) == 0x1) && + (((tmp2 >> 0) & 0x1) == 0x1)) + break; + } } copy_to_reg(&denali_phy[896], ¶ms_phy[896], (958 - 895) * 4); @@ -512,7 +935,7 @@ static int pctl_cfg(const struct chan_info *chan, u32 channel, copy_to_reg(&denali_phy[512], ¶ms_phy[512], (549 - 512 + 1) * 4); copy_to_reg(&denali_phy[640], ¶ms_phy[640], (677 - 640 + 1) * 4); copy_to_reg(&denali_phy[768], ¶ms_phy[768], (805 - 768 + 1) * 4); - set_ds_odt(chan, sdram_params); + set_ds_odt(chan, params, true, 0); /* * phy_dqs_tsel_wr_timing_X 8bits DENALI_PHY_84/212/340/468 offset_8 @@ -540,26 +963,6 @@ static int pctl_cfg(const struct chan_info *chan, u32 channel, tmp = (readl(&denali_phy[467]) >> 16) & 0xff; clrsetbits_le32(&denali_phy[467], 0xff << 16, (tmp + 0x10) << 16); - ret = phy_io_config(chan, sdram_params); - if (ret) - return ret; - - /* PHY_DLL_RST_EN */ - clrsetbits_le32(&denali_phy[957], 0x3 << 24, 0x2 << 24); - - /* Wating for PHY and DRAM init complete */ - tmp = get_timer(0); - do { - if (get_timer(tmp) > timeout_ms) { - pr_err("DRAM (%s): phy failed to lock within %ld ms\n", - __func__, timeout_ms); - return -ETIME; - } - } while (!(readl(&denali_ctl[203]) & (1 << 3))); - debug("DRAM (%s): phy locked after %ld ms\n", __func__, get_timer(tmp)); - - clrsetbits_le32(&denali_ctl[68], PWRUP_SREFRESH_EXIT, - pwrup_srefresh_exit); return 0; } @@ -569,7 +972,7 @@ static void select_per_cs_training_index(const struct chan_info *chan, u32 *denali_phy = chan->publ->denali_phy; /* PHY_84 PHY_PER_CS_TRAINING_EN_0 1bit offset_16 */ - if ((readl(&denali_phy[84])>>16) & 1) { + if ((readl(&denali_phy[84]) >> 16) & 1) { /* * PHY_8/136/264/392 * phy_per_cs_training_index_X 1bit offset_24 @@ -611,18 +1014,32 @@ static void override_write_leveling_value(const struct chan_info *chan) } static int data_training_ca(const struct chan_info *chan, u32 channel, - const struct rk3399_sdram_params *sdram_params) + const struct rk3399_sdram_params *params) { u32 *denali_pi = chan->pi->denali_pi; u32 *denali_phy = chan->publ->denali_phy; u32 i, tmp; u32 obs_0, obs_1, obs_2, obs_err = 0; - u32 rank = sdram_params->ch[channel].rank; + u32 rank = params->ch[channel].cap_info.rank; + u32 rank_mask; + + /* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */ + writel(0x00003f7c, (&denali_pi[175])); + + if (params->base.dramtype == LPDDR4) + rank_mask = (rank == 1) ? 0x5 : 0xf; + else + rank_mask = (rank == 1) ? 0x1 : 0x3; + + for (i = 0; i < 4; i++) { + if (!(rank_mask & (1 << i))) + continue; - for (i = 0; i < rank; i++) { select_per_cs_training_index(chan, i); + /* PI_100 PI_CALVL_EN:RW:8:2 */ clrsetbits_le32(&denali_pi[100], 0x3 << 8, 0x2 << 8); + /* PI_92 PI_CALVL_REQ:WR:16:1,PI_CALVL_CS:RW:24:2 */ clrsetbits_le32(&denali_pi[92], (0x1 << 16) | (0x3 << 24), @@ -646,33 +1063,40 @@ static int data_training_ca(const struct chan_info *chan, u32 channel, if ((((tmp >> 11) & 0x1) == 0x1) && (((tmp >> 13) & 0x1) == 0x1) && (((tmp >> 5) & 0x1) == 0x0) && - (obs_err == 0)) + obs_err == 0) break; else if ((((tmp >> 5) & 0x1) == 0x1) || (obs_err == 1)) return -EIO; } + /* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */ writel(0x00003f7c, (&denali_pi[175])); } + clrbits_le32(&denali_pi[100], 0x3 << 8); return 0; } static int data_training_wl(const struct chan_info *chan, u32 channel, - const struct rk3399_sdram_params *sdram_params) + const struct rk3399_sdram_params *params) { u32 *denali_pi = chan->pi->denali_pi; u32 *denali_phy = chan->publ->denali_phy; u32 i, tmp; u32 obs_0, obs_1, obs_2, obs_3, obs_err = 0; - u32 rank = sdram_params->ch[channel].rank; + u32 rank = params->ch[channel].cap_info.rank; + + /* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */ + writel(0x00003f7c, (&denali_pi[175])); for (i = 0; i < rank; i++) { select_per_cs_training_index(chan, i); + /* PI_60 PI_WRLVL_EN:RW:8:2 */ clrsetbits_le32(&denali_pi[60], 0x3 << 8, 0x2 << 8); + /* PI_59 PI_WRLVL_REQ:WR:8:1,PI_WRLVL_CS:RW:16:2 */ clrsetbits_le32(&denali_pi[59], (0x1 << 8) | (0x3 << 16), @@ -700,12 +1124,13 @@ static int data_training_wl(const struct chan_info *chan, u32 channel, if ((((tmp >> 10) & 0x1) == 0x1) && (((tmp >> 13) & 0x1) == 0x1) && (((tmp >> 4) & 0x1) == 0x0) && - (obs_err == 0)) + obs_err == 0) break; else if ((((tmp >> 4) & 0x1) == 0x1) || (obs_err == 1)) return -EIO; } + /* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */ writel(0x00003f7c, (&denali_pi[175])); } @@ -717,18 +1142,23 @@ static int data_training_wl(const struct chan_info *chan, u32 channel, } static int data_training_rg(const struct chan_info *chan, u32 channel, - const struct rk3399_sdram_params *sdram_params) + const struct rk3399_sdram_params *params) { u32 *denali_pi = chan->pi->denali_pi; u32 *denali_phy = chan->publ->denali_phy; u32 i, tmp; u32 obs_0, obs_1, obs_2, obs_3, obs_err = 0; - u32 rank = sdram_params->ch[channel].rank; + u32 rank = params->ch[channel].cap_info.rank; + + /* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */ + writel(0x00003f7c, (&denali_pi[175])); for (i = 0; i < rank; i++) { select_per_cs_training_index(chan, i); + /* PI_80 PI_RDLVL_GATE_EN:RW:24:2 */ clrsetbits_le32(&denali_pi[80], 0x3 << 24, 0x2 << 24); + /* * PI_74 PI_RDLVL_GATE_REQ:WR:16:1 * PI_RDLVL_CS:RW:24:2 @@ -759,31 +1189,38 @@ static int data_training_rg(const struct chan_info *chan, u32 channel, if ((((tmp >> 9) & 0x1) == 0x1) && (((tmp >> 13) & 0x1) == 0x1) && (((tmp >> 3) & 0x1) == 0x0) && - (obs_err == 0)) + obs_err == 0) break; else if ((((tmp >> 3) & 0x1) == 0x1) || (obs_err == 1)) return -EIO; } + /* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */ writel(0x00003f7c, (&denali_pi[175])); } + clrbits_le32(&denali_pi[80], 0x3 << 24); return 0; } static int data_training_rl(const struct chan_info *chan, u32 channel, - const struct rk3399_sdram_params *sdram_params) + const struct rk3399_sdram_params *params) { u32 *denali_pi = chan->pi->denali_pi; u32 i, tmp; - u32 rank = sdram_params->ch[channel].rank; + u32 rank = params->ch[channel].cap_info.rank; + + /* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */ + writel(0x00003f7c, (&denali_pi[175])); for (i = 0; i < rank; i++) { select_per_cs_training_index(chan, i); + /* PI_80 PI_RDLVL_EN:RW:16:2 */ clrsetbits_le32(&denali_pi[80], 0x3 << 16, 0x2 << 16); + /* PI_74 PI_RDLVL_REQ:WR:8:1,PI_RDLVL_CS:RW:24:2 */ clrsetbits_le32(&denali_pi[74], (0x1 << 8) | (0x3 << 24), @@ -806,30 +1243,47 @@ static int data_training_rl(const struct chan_info *chan, u32 channel, else if (((tmp >> 2) & 0x1) == 0x1) return -EIO; } + /* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */ writel(0x00003f7c, (&denali_pi[175])); } + clrbits_le32(&denali_pi[80], 0x3 << 16); return 0; } static int data_training_wdql(const struct chan_info *chan, u32 channel, - const struct rk3399_sdram_params *sdram_params) + const struct rk3399_sdram_params *params) { u32 *denali_pi = chan->pi->denali_pi; u32 i, tmp; - u32 rank = sdram_params->ch[channel].rank; + u32 rank = params->ch[channel].cap_info.rank; + u32 rank_mask; + + /* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */ + writel(0x00003f7c, (&denali_pi[175])); + + if (params->base.dramtype == LPDDR4) + rank_mask = (rank == 1) ? 0x5 : 0xf; + else + rank_mask = (rank == 1) ? 0x1 : 0x3; + + for (i = 0; i < 4; i++) { + if (!(rank_mask & (1 << i))) + continue; - for (i = 0; i < rank; i++) { select_per_cs_training_index(chan, i); + /* * disable PI_WDQLVL_VREF_EN before wdq leveling? * PI_181 PI_WDQLVL_VREF_EN:RW:8:1 */ clrbits_le32(&denali_pi[181], 0x1 << 8); + /* PI_124 PI_WDQLVL_EN:RW:16:2 */ clrsetbits_le32(&denali_pi[124], 0x3 << 16, 0x2 << 16); + /* PI_121 PI_WDQLVL_REQ:WR:8:1,PI_WDQLVL_CS:RW:16:2 */ clrsetbits_le32(&denali_pi[121], (0x1 << 8) | (0x3 << 16), @@ -846,32 +1300,36 @@ static int data_training_wdql(const struct chan_info *chan, u32 channel, else if (((tmp >> 6) & 0x1) == 0x1) return -EIO; } + /* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */ writel(0x00003f7c, (&denali_pi[175])); } + clrbits_le32(&denali_pi[124], 0x3 << 16); return 0; } -static int data_training(const struct chan_info *chan, u32 channel, - const struct rk3399_sdram_params *sdram_params, +static int data_training(struct dram_info *dram, u32 channel, + const struct rk3399_sdram_params *params, u32 training_flag) { + struct chan_info *chan = &dram->chan[channel]; u32 *denali_phy = chan->publ->denali_phy; + int ret; /* PHY_927 PHY_PAD_DQS_DRIVE RPULL offset_22 */ setbits_le32(&denali_phy[927], (1 << 22)); if (training_flag == PI_FULL_TRAINING) { - if (sdram_params->base.dramtype == LPDDR4) { - training_flag = PI_CA_TRAINING | PI_WRITE_LEVELING | + if (params->base.dramtype == LPDDR4) { + training_flag = PI_WRITE_LEVELING | PI_READ_GATE_TRAINING | PI_READ_LEVELING | PI_WDQ_LEVELING; - } else if (sdram_params->base.dramtype == LPDDR3) { + } else if (params->base.dramtype == LPDDR3) { training_flag = PI_CA_TRAINING | PI_WRITE_LEVELING | PI_READ_GATE_TRAINING; - } else if (sdram_params->base.dramtype == DDR3) { + } else if (params->base.dramtype == DDR3) { training_flag = PI_WRITE_LEVELING | PI_READ_GATE_TRAINING | PI_READ_LEVELING; @@ -879,24 +1337,49 @@ static int data_training(const struct chan_info *chan, u32 channel, } /* ca training(LPDDR4,LPDDR3 support) */ - if ((training_flag & PI_CA_TRAINING) == PI_CA_TRAINING) - data_training_ca(chan, channel, sdram_params); + if ((training_flag & PI_CA_TRAINING) == PI_CA_TRAINING) { + ret = data_training_ca(chan, channel, params); + if (ret < 0) { + debug("%s: data training ca failed\n", __func__); + return ret; + } + } /* write leveling(LPDDR4,LPDDR3,DDR3 support) */ - if ((training_flag & PI_WRITE_LEVELING) == PI_WRITE_LEVELING) - data_training_wl(chan, channel, sdram_params); + if ((training_flag & PI_WRITE_LEVELING) == PI_WRITE_LEVELING) { + ret = data_training_wl(chan, channel, params); + if (ret < 0) { + debug("%s: data training wl failed\n", __func__); + return ret; + } + } /* read gate training(LPDDR4,LPDDR3,DDR3 support) */ - if ((training_flag & PI_READ_GATE_TRAINING) == PI_READ_GATE_TRAINING) - data_training_rg(chan, channel, sdram_params); + if ((training_flag & PI_READ_GATE_TRAINING) == PI_READ_GATE_TRAINING) { + ret = data_training_rg(chan, channel, params); + if (ret < 0) { + debug("%s: data training rg failed\n", __func__); + return ret; + } + } /* read leveling(LPDDR4,LPDDR3,DDR3 support) */ - if ((training_flag & PI_READ_LEVELING) == PI_READ_LEVELING) - data_training_rl(chan, channel, sdram_params); + if ((training_flag & PI_READ_LEVELING) == PI_READ_LEVELING) { + ret = data_training_rl(chan, channel, params); + if (ret < 0) { + debug("%s: data training rl failed\n", __func__); + return ret; + } + } /* wdq leveling(LPDDR4 support) */ - if ((training_flag & PI_WDQ_LEVELING) == PI_WDQ_LEVELING) - data_training_wdql(chan, channel, sdram_params); + if ((training_flag & PI_WDQ_LEVELING) == PI_WDQ_LEVELING) { + ret = data_training_wdql(chan, channel, params); + if (ret < 0) { + debug("%s: data training wdql failed\n", __func__); + return ret; + } + } /* PHY_927 PHY_PAD_DQS_DRIVE RPULL offset_22 */ clrbits_le32(&denali_phy[927], (1 << 22)); @@ -905,7 +1388,7 @@ static int data_training(const struct chan_info *chan, u32 channel, } static void set_ddrconfig(const struct chan_info *chan, - const struct rk3399_sdram_params *sdram_params, + const struct rk3399_sdram_params *params, unsigned char channel, u32 ddrconfig) { /* only need to set ddrconfig */ @@ -913,14 +1396,14 @@ static void set_ddrconfig(const struct chan_info *chan, unsigned int cs0_cap = 0; unsigned int cs1_cap = 0; - cs0_cap = (1 << (sdram_params->ch[channel].cs0_row - + sdram_params->ch[channel].col - + sdram_params->ch[channel].bk - + sdram_params->ch[channel].bw - 20)); - if (sdram_params->ch[channel].rank > 1) - cs1_cap = cs0_cap >> (sdram_params->ch[channel].cs0_row - - sdram_params->ch[channel].cs1_row); - if (sdram_params->ch[channel].row_3_4) { + cs0_cap = (1 << (params->ch[channel].cap_info.cs0_row + + params->ch[channel].cap_info.col + + params->ch[channel].cap_info.bk + + params->ch[channel].cap_info.bw - 20)); + if (params->ch[channel].cap_info.rank > 1) + cs1_cap = cs0_cap >> (params->ch[channel].cap_info.cs0_row + - params->ch[channel].cap_info.cs1_row); + if (params->ch[channel].cap_info.row_3_4) { cs0_cap = cs0_cap * 3 / 4; cs1_cap = cs1_cap * 3 / 4; } @@ -931,57 +1414,72 @@ static void set_ddrconfig(const struct chan_info *chan, } static void dram_all_config(struct dram_info *dram, - const struct rk3399_sdram_params *sdram_params) + const struct rk3399_sdram_params *params) { - u32 sys_reg = 0; + u32 sys_reg2 = 0; + u32 sys_reg3 = 0; unsigned int channel, idx; - sys_reg |= sdram_params->base.dramtype << SYS_REG_DDRTYPE_SHIFT; - sys_reg |= (sdram_params->base.num_channels - 1) - << SYS_REG_NUM_CH_SHIFT; + sys_reg2 |= SYS_REG_ENC_DDRTYPE(params->base.dramtype); + sys_reg2 |= SYS_REG_ENC_NUM_CH(params->base.num_channels); + for (channel = 0, idx = 0; - (idx < sdram_params->base.num_channels) && (channel < 2); + (idx < params->base.num_channels) && (channel < 2); channel++) { - const struct rk3399_sdram_channel *info = - &sdram_params->ch[channel]; + const struct rk3399_sdram_channel *info = ¶ms->ch[channel]; struct rk3399_msch_regs *ddr_msch_regs; const struct rk3399_msch_timings *noc_timing; - if (sdram_params->ch[channel].col == 0) + if (params->ch[channel].cap_info.col == 0) continue; idx++; - sys_reg |= info->row_3_4 << SYS_REG_ROW_3_4_SHIFT(channel); - sys_reg |= 1 << SYS_REG_CHINFO_SHIFT(channel); - sys_reg |= (info->rank - 1) << SYS_REG_RANK_SHIFT(channel); - sys_reg |= (info->col - 9) << SYS_REG_COL_SHIFT(channel); - sys_reg |= info->bk == 3 ? 0 : 1 << SYS_REG_BK_SHIFT(channel); - sys_reg |= (info->cs0_row - 13) << SYS_REG_CS0_ROW_SHIFT(channel); - sys_reg |= (info->cs1_row - 13) << SYS_REG_CS1_ROW_SHIFT(channel); - sys_reg |= (2 >> info->bw) << SYS_REG_BW_SHIFT(channel); - sys_reg |= (2 >> info->dbw) << SYS_REG_DBW_SHIFT(channel); + sys_reg2 |= SYS_REG_ENC_ROW_3_4(info->cap_info.row_3_4, channel); + sys_reg2 |= SYS_REG_ENC_CHINFO(channel); + sys_reg2 |= SYS_REG_ENC_RANK(info->cap_info.rank, channel); + sys_reg2 |= SYS_REG_ENC_COL(info->cap_info.col, channel); + sys_reg2 |= SYS_REG_ENC_BK(info->cap_info.bk, channel); + sys_reg2 |= SYS_REG_ENC_BW(info->cap_info.bw, channel); + sys_reg2 |= SYS_REG_ENC_DBW(info->cap_info.dbw, channel); + SYS_REG_ENC_CS0_ROW(info->cap_info.cs0_row, sys_reg2, sys_reg3, channel); + if (info->cap_info.cs1_row) + SYS_REG_ENC_CS1_ROW(info->cap_info.cs1_row, sys_reg2, + sys_reg3, channel); + sys_reg3 |= SYS_REG_ENC_CS1_COL(info->cap_info.col, channel); + sys_reg3 |= SYS_REG_ENC_VERSION(DDR_SYS_REG_VERSION); ddr_msch_regs = dram->chan[channel].msch; - noc_timing = &sdram_params->ch[channel].noc_timings; + noc_timing = ¶ms->ch[channel].noc_timings; writel(noc_timing->ddrtiminga0, &ddr_msch_regs->ddrtiminga0); writel(noc_timing->ddrtimingb0, &ddr_msch_regs->ddrtimingb0); - writel(noc_timing->ddrtimingc0, + writel(noc_timing->ddrtimingc0.d32, &ddr_msch_regs->ddrtimingc0); writel(noc_timing->devtodev0, &ddr_msch_regs->devtodev0); - writel(noc_timing->ddrmode, + writel(noc_timing->ddrmode.d32, &ddr_msch_regs->ddrmode); - /* rank 1 memory clock disable (dfi_dram_clk_disable = 1) */ - if (sdram_params->ch[channel].rank == 1) + /** + * rank 1 memory clock disable (dfi_dram_clk_disable = 1) + * + * The hardware for LPDDR4 with + * - CLK0P/N connect to lower 16-bits + * - CLK1P/N connect to higher 16-bits + * + * dfi dram clk is configured via CLK1P/N, so disabling + * dfi dram clk will disable the CLK1P/N as well for lpddr4. + */ + if (params->ch[channel].cap_info.rank == 1 && + params->base.dramtype != LPDDR4) setbits_le32(&dram->chan[channel].pctl->denali_ctl[276], 1 << 17); } - writel(sys_reg, &dram->pmugrf->os_reg2); + writel(sys_reg2, &dram->pmugrf->os_reg2); + writel(sys_reg3, &dram->pmugrf->os_reg3); rk_clrsetreg(&dram->pmusgrf->soc_con4, 0x1f << 10, - sdram_params->base.stride << 10); + params->base.stride << 10); /* reboot hold register set */ writel(PRESET_SGRF_HOLD(0) | PRESET_GPIO0_HOLD(1) | @@ -990,12 +1488,30 @@ static void dram_all_config(struct dram_info *dram, clrsetbits_le32(&dram->cru->glb_rst_con, 0x3, 0x3); } +#if !defined(CONFIG_RAM_RK3399_LPDDR4) +static int default_data_training(struct dram_info *dram, u32 channel, u8 rank, + struct rk3399_sdram_params *params) +{ + u8 training_flag = PI_READ_GATE_TRAINING; + + /* + * LPDDR3 CA training msut be trigger before + * other training. + * DDR3 is not have CA training. + */ + + if (params->base.dramtype == LPDDR3) + training_flag |= PI_CA_TRAINING; + + return data_training(dram, channel, params, training_flag); +} + static int switch_to_phy_index1(struct dram_info *dram, - const struct rk3399_sdram_params *sdram_params) + struct rk3399_sdram_params *params) { u32 channel; u32 *denali_phy; - u32 ch_count = sdram_params->base.num_channels; + u32 ch_count = params->base.num_channels; int ret; int i = 0; @@ -1025,9 +1541,8 @@ static int switch_to_phy_index1(struct dram_info *dram, for (channel = 0; channel < ch_count; channel++) { denali_phy = dram->chan[channel].publ->denali_phy; clrsetbits_le32(&denali_phy[896], (0x3 << 8) | 1, 1 << 8); - ret = data_training(&dram->chan[channel], channel, - sdram_params, PI_FULL_TRAINING); - if (ret) { + ret = data_training(dram, channel, params, PI_FULL_TRAINING); + if (ret < 0) { debug("index1 training failed\n"); return ret; } @@ -1036,12 +1551,979 @@ static int switch_to_phy_index1(struct dram_info *dram, return 0; } -static int sdram_init(struct dram_info *dram, - const struct rk3399_sdram_params *sdram_params) +#else + +struct rk3399_sdram_params lpddr4_timings[] = { + #include "sdram-rk3399-lpddr4-400.inc" + #include "sdram-rk3399-lpddr4-800.inc" +}; + +static void *get_denali_pi(const struct chan_info *chan, + struct rk3399_sdram_params *params, bool reg) +{ + return reg ? &chan->pi->denali_pi : ¶ms->pi_regs.denali_pi; +} + +static u32 lpddr4_get_phy(struct rk3399_sdram_params *params, u32 ctl) +{ + u32 lpddr4_phy[] = {1, 0, 0xb}; + + return lpddr4_phy[ctl]; +} + +static u32 lpddr4_get_ctl(struct rk3399_sdram_params *params, u32 phy) +{ + u32 lpddr4_ctl[] = {1, 0, 2}; + + return lpddr4_ctl[phy]; +} + +static u32 get_ddr_stride(struct rk3399_pmusgrf_regs *pmusgrf) +{ + return ((readl(&pmusgrf->soc_con4) >> 10) & 0x1F); +} + +static void set_ddr_stride(struct rk3399_pmusgrf_regs *pmusgrf, u32 stride) +{ + rk_clrsetreg(&pmusgrf->soc_con4, 0x1f << 10, stride << 10); +} + +static void set_cap_relate_config(const struct chan_info *chan, + struct rk3399_sdram_params *params, + unsigned int channel) +{ + u32 *denali_ctl = chan->pctl->denali_ctl; + u32 tmp; + struct rk3399_msch_timings *noc_timing; + + if (params->base.dramtype == LPDDR3) { + tmp = (8 << params->ch[channel].cap_info.bw) / + (8 << params->ch[channel].cap_info.dbw); + + /** + * memdata_ratio + * 1 -> 0, 2 -> 1, 4 -> 2 + */ + clrsetbits_le32(&denali_ctl[197], 0x7, + (tmp >> 1)); + clrsetbits_le32(&denali_ctl[198], 0x7 << 8, + (tmp >> 1) << 8); + } + + noc_timing = ¶ms->ch[channel].noc_timings; + + /* + * noc timing bw relate timing is 32 bit, and real bw is 16bit + * actually noc reg is setting at function dram_all_config + */ + if (params->ch[channel].cap_info.bw == 16 && + noc_timing->ddrmode.b.mwrsize == 2) { + if (noc_timing->ddrmode.b.burstsize) + noc_timing->ddrmode.b.burstsize -= 1; + noc_timing->ddrmode.b.mwrsize -= 1; + noc_timing->ddrtimingc0.b.burstpenalty *= 2; + noc_timing->ddrtimingc0.b.wrtomwr *= 2; + } +} + +static u32 calculate_ddrconfig(struct rk3399_sdram_params *params, u32 channel) +{ + unsigned int cs0_row = params->ch[channel].cap_info.cs0_row; + unsigned int col = params->ch[channel].cap_info.col; + unsigned int bw = params->ch[channel].cap_info.bw; + u16 ddr_cfg_2_rbc[] = { + /* + * [6] highest bit col + * [5:3] max row(14+n) + * [2] insertion row + * [1:0] col(9+n),col, data bus 32bit + * + * highbitcol, max_row, insertion_row, col + */ + ((0 << 6) | (2 << 3) | (0 << 2) | 0), /* 0 */ + ((0 << 6) | (2 << 3) | (0 << 2) | 1), /* 1 */ + ((0 << 6) | (1 << 3) | (0 << 2) | 2), /* 2 */ + ((0 << 6) | (0 << 3) | (0 << 2) | 3), /* 3 */ + ((0 << 6) | (2 << 3) | (1 << 2) | 1), /* 4 */ + ((0 << 6) | (1 << 3) | (1 << 2) | 2), /* 5 */ + ((1 << 6) | (0 << 3) | (0 << 2) | 2), /* 6 */ + ((1 << 6) | (1 << 3) | (0 << 2) | 2), /* 7 */ + }; + u32 i; + + col -= (bw == 2) ? 0 : 1; + col -= 9; + + for (i = 0; i < 4; i++) { + if ((col == (ddr_cfg_2_rbc[i] & 0x3)) && + (cs0_row <= (((ddr_cfg_2_rbc[i] >> 3) & 0x7) + 14))) + break; + } + + if (i >= 4) + i = -EINVAL; + + return i; +} + +/** + * read mr_num mode register + * rank = 1: cs0 + * rank = 2: cs1 + */ +static int read_mr(struct rk3399_ddr_pctl_regs *ddr_pctl_regs, u32 rank, + u32 mr_num, u32 *buf) +{ + s32 timeout = 100; + + writel(((1 << 16) | (((rank == 2) ? 1 : 0) << 8) | mr_num) << 8, + &ddr_pctl_regs->denali_ctl[118]); + + while (0 == (readl(&ddr_pctl_regs->denali_ctl[203]) & + ((1 << 21) | (1 << 12)))) { + udelay(1); + + if (timeout <= 0) { + printf("%s: pctl timeout!\n", __func__); + return -ETIMEDOUT; + } + + timeout--; + } + + if (!(readl(&ddr_pctl_regs->denali_ctl[203]) & (1 << 12))) { + *buf = readl(&ddr_pctl_regs->denali_ctl[119]) & 0xFF; + } else { + printf("%s: read mr failed with 0x%x status\n", __func__, + readl(&ddr_pctl_regs->denali_ctl[17]) & 0x3); + *buf = 0; + } + + setbits_le32(&ddr_pctl_regs->denali_ctl[205], (1 << 21) | (1 << 12)); + + return 0; +} + +static int lpddr4_mr_detect(struct dram_info *dram, u32 channel, u8 rank, + struct rk3399_sdram_params *params) +{ + u64 cs0_cap; + u32 stride; + u32 cs = 0, col = 0, bk = 0, bw = 0, row_3_4 = 0; + u32 cs0_row = 0, cs1_row = 0, ddrconfig = 0; + u32 mr5, mr12, mr14; + struct chan_info *chan = &dram->chan[channel]; + struct rk3399_ddr_pctl_regs *ddr_pctl_regs = chan->pctl; + void __iomem *addr = NULL; + int ret = 0; + u32 val; + + stride = get_ddr_stride(dram->pmusgrf); + + if (params->ch[channel].cap_info.col == 0) { + ret = -EPERM; + goto end; + } + + cs = params->ch[channel].cap_info.rank; + col = params->ch[channel].cap_info.col; + bk = params->ch[channel].cap_info.bk; + bw = params->ch[channel].cap_info.bw; + row_3_4 = params->ch[channel].cap_info.row_3_4; + cs0_row = params->ch[channel].cap_info.cs0_row; + cs1_row = params->ch[channel].cap_info.cs1_row; + ddrconfig = params->ch[channel].cap_info.ddrconfig; + + /* 2GB */ + params->ch[channel].cap_info.rank = 2; + params->ch[channel].cap_info.col = 10; + params->ch[channel].cap_info.bk = 3; + params->ch[channel].cap_info.bw = 2; + params->ch[channel].cap_info.row_3_4 = 0; + params->ch[channel].cap_info.cs0_row = 15; + params->ch[channel].cap_info.cs1_row = 15; + params->ch[channel].cap_info.ddrconfig = 1; + + set_memory_map(chan, channel, params); + params->ch[channel].cap_info.ddrconfig = + calculate_ddrconfig(params, channel); + set_ddrconfig(chan, params, channel, + params->ch[channel].cap_info.ddrconfig); + set_cap_relate_config(chan, params, channel); + + cs0_cap = (1 << (params->ch[channel].cap_info.bw + + params->ch[channel].cap_info.col + + params->ch[channel].cap_info.bk + + params->ch[channel].cap_info.cs0_row)); + + if (params->ch[channel].cap_info.row_3_4) + cs0_cap = cs0_cap * 3 / 4; + + if (channel == 0) + set_ddr_stride(dram->pmusgrf, 0x17); + else + set_ddr_stride(dram->pmusgrf, 0x18); + + /* read and write data to DRAM, avoid be optimized by compiler. */ + if (rank == 1) + addr = (void __iomem *)0x100; + else if (rank == 2) + addr = (void __iomem *)(cs0_cap + 0x100); + + val = readl(addr); + writel(val + 1, addr); + + read_mr(ddr_pctl_regs, rank, 5, &mr5); + read_mr(ddr_pctl_regs, rank, 12, &mr12); + read_mr(ddr_pctl_regs, rank, 14, &mr14); + + if (mr5 == 0 || mr12 != 0x4d || mr14 != 0x4d) { + ret = -EINVAL; + goto end; + } +end: + params->ch[channel].cap_info.rank = cs; + params->ch[channel].cap_info.col = col; + params->ch[channel].cap_info.bk = bk; + params->ch[channel].cap_info.bw = bw; + params->ch[channel].cap_info.row_3_4 = row_3_4; + params->ch[channel].cap_info.cs0_row = cs0_row; + params->ch[channel].cap_info.cs1_row = cs1_row; + params->ch[channel].cap_info.ddrconfig = ddrconfig; + + set_ddr_stride(dram->pmusgrf, stride); + + return ret; +} + +static void set_lpddr4_dq_odt(const struct chan_info *chan, + struct rk3399_sdram_params *params, u32 ctl, + bool en, bool ctl_phy_reg, u32 mr5) +{ + u32 *denali_ctl = get_denali_ctl(chan, params, ctl_phy_reg); + u32 *denali_pi = get_denali_pi(chan, params, ctl_phy_reg); + struct io_setting *io; + u32 reg_value; + + if (!en) + return; + + io = lpddr4_get_io_settings(params, mr5); + + reg_value = io->dq_odt; + + switch (ctl) { + case 0: + clrsetbits_le32(&denali_ctl[139], 0x7 << 24, reg_value << 24); + clrsetbits_le32(&denali_ctl[153], 0x7 << 24, reg_value << 24); + + clrsetbits_le32(&denali_pi[132], 0x7 << 0, (reg_value << 0)); + clrsetbits_le32(&denali_pi[139], 0x7 << 16, (reg_value << 16)); + clrsetbits_le32(&denali_pi[147], 0x7 << 0, (reg_value << 0)); + clrsetbits_le32(&denali_pi[154], 0x7 << 16, (reg_value << 16)); + break; + case 1: + clrsetbits_le32(&denali_ctl[140], 0x7 << 0, reg_value << 0); + clrsetbits_le32(&denali_ctl[154], 0x7 << 0, reg_value << 0); + + clrsetbits_le32(&denali_pi[129], 0x7 << 16, (reg_value << 16)); + clrsetbits_le32(&denali_pi[137], 0x7 << 0, (reg_value << 0)); + clrsetbits_le32(&denali_pi[144], 0x7 << 16, (reg_value << 16)); + clrsetbits_le32(&denali_pi[152], 0x7 << 0, (reg_value << 0)); + break; + case 2: + default: + clrsetbits_le32(&denali_ctl[140], 0x7 << 8, (reg_value << 8)); + clrsetbits_le32(&denali_ctl[154], 0x7 << 8, (reg_value << 8)); + + clrsetbits_le32(&denali_pi[127], 0x7 << 0, (reg_value << 0)); + clrsetbits_le32(&denali_pi[134], 0x7 << 16, (reg_value << 16)); + clrsetbits_le32(&denali_pi[142], 0x7 << 0, (reg_value << 0)); + clrsetbits_le32(&denali_pi[149], 0x7 << 16, (reg_value << 16)); + break; + } +} + +static void set_lpddr4_ca_odt(const struct chan_info *chan, + struct rk3399_sdram_params *params, u32 ctl, + bool en, bool ctl_phy_reg, u32 mr5) +{ + u32 *denali_ctl = get_denali_ctl(chan, params, ctl_phy_reg); + u32 *denali_pi = get_denali_pi(chan, params, ctl_phy_reg); + struct io_setting *io; + u32 reg_value; + + if (!en) + return; + + io = lpddr4_get_io_settings(params, mr5); + + reg_value = io->ca_odt; + + switch (ctl) { + case 0: + clrsetbits_le32(&denali_ctl[139], 0x7 << 28, reg_value << 28); + clrsetbits_le32(&denali_ctl[153], 0x7 << 28, reg_value << 28); + + clrsetbits_le32(&denali_pi[132], 0x7 << 4, reg_value << 4); + clrsetbits_le32(&denali_pi[139], 0x7 << 20, reg_value << 20); + clrsetbits_le32(&denali_pi[147], 0x7 << 4, reg_value << 4); + clrsetbits_le32(&denali_pi[154], 0x7 << 20, reg_value << 20); + break; + case 1: + clrsetbits_le32(&denali_ctl[140], 0x7 << 4, reg_value << 4); + clrsetbits_le32(&denali_ctl[154], 0x7 << 4, reg_value << 4); + + clrsetbits_le32(&denali_pi[129], 0x7 << 20, reg_value << 20); + clrsetbits_le32(&denali_pi[137], 0x7 << 4, reg_value << 4); + clrsetbits_le32(&denali_pi[144], 0x7 << 20, reg_value << 20); + clrsetbits_le32(&denali_pi[152], 0x7 << 4, reg_value << 4); + break; + case 2: + default: + clrsetbits_le32(&denali_ctl[140], 0x7 << 12, (reg_value << 12)); + clrsetbits_le32(&denali_ctl[154], 0x7 << 12, (reg_value << 12)); + + clrsetbits_le32(&denali_pi[127], 0x7 << 4, reg_value << 4); + clrsetbits_le32(&denali_pi[134], 0x7 << 20, reg_value << 20); + clrsetbits_le32(&denali_pi[142], 0x7 << 4, reg_value << 4); + clrsetbits_le32(&denali_pi[149], 0x7 << 20, reg_value << 20); + break; + } +} + +static void set_lpddr4_MR3(const struct chan_info *chan, + struct rk3399_sdram_params *params, u32 ctl, + bool ctl_phy_reg, u32 mr5) +{ + u32 *denali_ctl = get_denali_ctl(chan, params, ctl_phy_reg); + u32 *denali_pi = get_denali_pi(chan, params, ctl_phy_reg); + struct io_setting *io; + u32 reg_value; + + io = lpddr4_get_io_settings(params, mr5); + + reg_value = ((io->pdds << 3) | 1); + + switch (ctl) { + case 0: + clrsetbits_le32(&denali_ctl[138], 0xFFFF, reg_value); + clrsetbits_le32(&denali_ctl[152], 0xFFFF, reg_value); + + clrsetbits_le32(&denali_pi[131], 0xFFFF << 16, reg_value << 16); + clrsetbits_le32(&denali_pi[139], 0xFFFF, reg_value); + clrsetbits_le32(&denali_pi[146], 0xFFFF << 16, reg_value << 16); + clrsetbits_le32(&denali_pi[154], 0xFFFF, reg_value); + break; + case 1: + clrsetbits_le32(&denali_ctl[138], 0xFFFF << 16, + reg_value << 16); + clrsetbits_le32(&denali_ctl[152], 0xFFFF << 16, + reg_value << 16); + + clrsetbits_le32(&denali_pi[129], 0xFFFF, reg_value); + clrsetbits_le32(&denali_pi[136], 0xFFFF << 16, reg_value << 16); + clrsetbits_le32(&denali_pi[144], 0xFFFF, reg_value); + clrsetbits_le32(&denali_pi[151], 0xFFFF << 16, reg_value << 16); + break; + case 2: + default: + clrsetbits_le32(&denali_ctl[139], 0xFFFF, reg_value); + clrsetbits_le32(&denali_ctl[153], 0xFFFF, reg_value); + + clrsetbits_le32(&denali_pi[126], 0xFFFF << 16, reg_value << 16); + clrsetbits_le32(&denali_pi[134], 0xFFFF, reg_value); + clrsetbits_le32(&denali_pi[141], 0xFFFF << 16, reg_value << 16); + clrsetbits_le32(&denali_pi[149], 0xFFFF, reg_value); + break; + } +} + +static void set_lpddr4_MR12(const struct chan_info *chan, + struct rk3399_sdram_params *params, u32 ctl, + bool ctl_phy_reg, u32 mr5) +{ + u32 *denali_ctl = get_denali_ctl(chan, params, ctl_phy_reg); + u32 *denali_pi = get_denali_pi(chan, params, ctl_phy_reg); + struct io_setting *io; + u32 reg_value; + + io = lpddr4_get_io_settings(params, mr5); + + reg_value = io->ca_vref; + + switch (ctl) { + case 0: + clrsetbits_le32(&denali_ctl[140], 0xFFFF << 16, + reg_value << 16); + clrsetbits_le32(&denali_ctl[154], 0xFFFF << 16, + reg_value << 16); + + clrsetbits_le32(&denali_pi[132], 0xFF << 8, reg_value << 8); + clrsetbits_le32(&denali_pi[139], 0xFF << 24, reg_value << 24); + clrsetbits_le32(&denali_pi[147], 0xFF << 8, reg_value << 8); + clrsetbits_le32(&denali_pi[154], 0xFF << 24, reg_value << 24); + break; + case 1: + clrsetbits_le32(&denali_ctl[141], 0xFFFF, reg_value); + clrsetbits_le32(&denali_ctl[155], 0xFFFF, reg_value); + + clrsetbits_le32(&denali_pi[129], 0xFF << 24, reg_value << 24); + clrsetbits_le32(&denali_pi[137], 0xFF << 8, reg_value << 8); + clrsetbits_le32(&denali_pi[144], 0xFF << 24, reg_value << 24); + clrsetbits_le32(&denali_pi[152], 0xFF << 8, reg_value << 8); + break; + case 2: + default: + clrsetbits_le32(&denali_ctl[141], 0xFFFF << 16, + reg_value << 16); + clrsetbits_le32(&denali_ctl[155], 0xFFFF << 16, + reg_value << 16); + + clrsetbits_le32(&denali_pi[127], 0xFF << 8, reg_value << 8); + clrsetbits_le32(&denali_pi[134], 0xFF << 24, reg_value << 24); + clrsetbits_le32(&denali_pi[142], 0xFF << 8, reg_value << 8); + clrsetbits_le32(&denali_pi[149], 0xFF << 24, reg_value << 24); + break; + } +} + +static void set_lpddr4_MR14(const struct chan_info *chan, + struct rk3399_sdram_params *params, u32 ctl, + bool ctl_phy_reg, u32 mr5) +{ + u32 *denali_ctl = get_denali_ctl(chan, params, ctl_phy_reg); + u32 *denali_pi = get_denali_pi(chan, params, ctl_phy_reg); + struct io_setting *io; + u32 reg_value; + + io = lpddr4_get_io_settings(params, mr5); + + reg_value = io->dq_vref; + + switch (ctl) { + case 0: + clrsetbits_le32(&denali_ctl[142], 0xFFFF << 16, + reg_value << 16); + clrsetbits_le32(&denali_ctl[156], 0xFFFF << 16, + reg_value << 16); + + clrsetbits_le32(&denali_pi[132], 0xFF << 16, reg_value << 16); + clrsetbits_le32(&denali_pi[140], 0xFF << 0, reg_value << 0); + clrsetbits_le32(&denali_pi[147], 0xFF << 16, reg_value << 16); + clrsetbits_le32(&denali_pi[155], 0xFF << 0, reg_value << 0); + break; + case 1: + clrsetbits_le32(&denali_ctl[143], 0xFFFF, reg_value); + clrsetbits_le32(&denali_ctl[157], 0xFFFF, reg_value); + + clrsetbits_le32(&denali_pi[130], 0xFF << 0, reg_value << 0); + clrsetbits_le32(&denali_pi[137], 0xFF << 16, reg_value << 16); + clrsetbits_le32(&denali_pi[145], 0xFF << 0, reg_value << 0); + clrsetbits_le32(&denali_pi[152], 0xFF << 16, reg_value << 16); + break; + case 2: + default: + clrsetbits_le32(&denali_ctl[143], 0xFFFF << 16, + reg_value << 16); + clrsetbits_le32(&denali_ctl[157], 0xFFFF << 16, + reg_value << 16); + + clrsetbits_le32(&denali_pi[127], 0xFF << 16, reg_value << 16); + clrsetbits_le32(&denali_pi[135], 0xFF << 0, reg_value << 0); + clrsetbits_le32(&denali_pi[142], 0xFF << 16, reg_value << 16); + clrsetbits_le32(&denali_pi[150], 0xFF << 0, reg_value << 0); + break; + } +} + +static void lpddr4_copy_phy(struct dram_info *dram, + struct rk3399_sdram_params *params, u32 phy, + struct rk3399_sdram_params *timings, + u32 channel) +{ + u32 *denali_ctl, *denali_phy; + u32 *denali_phy_params; + u32 speed = 0; + u32 ctl, mr5; + + denali_ctl = dram->chan[channel].pctl->denali_ctl; + denali_phy = dram->chan[channel].publ->denali_phy; + denali_phy_params = timings->phy_regs.denali_phy; + + /* switch index */ + clrsetbits_le32(&denali_phy_params[896], 0x3 << 8, phy << 8); + writel(denali_phy_params[896], &denali_phy[896]); + + /* phy_pll_ctrl_ca, phy_pll_ctrl */ + writel(denali_phy_params[911], &denali_phy[911]); + + /* phy_low_freq_sel */ + clrsetbits_le32(&denali_phy[913], 0x1, + denali_phy_params[913] & 0x1); + + /* phy_grp_slave_delay_x, phy_cslvl_dly_step */ + writel(denali_phy_params[916], &denali_phy[916]); + writel(denali_phy_params[917], &denali_phy[917]); + writel(denali_phy_params[918], &denali_phy[918]); + + /* phy_adrz_sw_wraddr_shift_x */ + writel(denali_phy_params[512], &denali_phy[512]); + clrsetbits_le32(&denali_phy[513], 0xffff, + denali_phy_params[513] & 0xffff); + writel(denali_phy_params[640], &denali_phy[640]); + clrsetbits_le32(&denali_phy[641], 0xffff, + denali_phy_params[641] & 0xffff); + writel(denali_phy_params[768], &denali_phy[768]); + clrsetbits_le32(&denali_phy[769], 0xffff, + denali_phy_params[769] & 0xffff); + + writel(denali_phy_params[544], &denali_phy[544]); + writel(denali_phy_params[545], &denali_phy[545]); + writel(denali_phy_params[546], &denali_phy[546]); + writel(denali_phy_params[547], &denali_phy[547]); + + writel(denali_phy_params[672], &denali_phy[672]); + writel(denali_phy_params[673], &denali_phy[673]); + writel(denali_phy_params[674], &denali_phy[674]); + writel(denali_phy_params[675], &denali_phy[675]); + + writel(denali_phy_params[800], &denali_phy[800]); + writel(denali_phy_params[801], &denali_phy[801]); + writel(denali_phy_params[802], &denali_phy[802]); + writel(denali_phy_params[803], &denali_phy[803]); + + /* + * phy_adr_master_delay_start_x + * phy_adr_master_delay_step_x + * phy_adr_master_delay_wait_x + */ + writel(denali_phy_params[548], &denali_phy[548]); + writel(denali_phy_params[676], &denali_phy[676]); + writel(denali_phy_params[804], &denali_phy[804]); + + /* phy_adr_calvl_dly_step_x */ + writel(denali_phy_params[549], &denali_phy[549]); + writel(denali_phy_params[677], &denali_phy[677]); + writel(denali_phy_params[805], &denali_phy[805]); + + /* + * phy_clk_wrdm_slave_delay_x + * phy_clk_wrdqz_slave_delay_x + * phy_clk_wrdqs_slave_delay_x + */ + copy_to_reg((u32 *)&denali_phy[59], (u32 *)&denali_phy_params[59], + (63 - 58) * 4); + copy_to_reg((u32 *)&denali_phy[187], (u32 *)&denali_phy_params[187], + (191 - 186) * 4); + copy_to_reg((u32 *)&denali_phy[315], (u32 *)&denali_phy_params[315], + (319 - 314) * 4); + copy_to_reg((u32 *)&denali_phy[443], (u32 *)&denali_phy_params[443], + (447 - 442) * 4); + + /* + * phy_dqs_tsel_wr_timing_x 8bits denali_phy_84/212/340/468 offset_8 + * dqs_tsel_wr_end[7:4] add half cycle + * phy_dq_tsel_wr_timing_x 8bits denali_phy_83/211/339/467 offset_8 + * dq_tsel_wr_end[7:4] add half cycle + */ + writel(denali_phy_params[83] + (0x10 << 16), &denali_phy[83]); + writel(denali_phy_params[84] + (0x10 << 8), &denali_phy[84]); + writel(denali_phy_params[85], &denali_phy[85]); + + writel(denali_phy_params[211] + (0x10 << 16), &denali_phy[211]); + writel(denali_phy_params[212] + (0x10 << 8), &denali_phy[212]); + writel(denali_phy_params[213], &denali_phy[213]); + + writel(denali_phy_params[339] + (0x10 << 16), &denali_phy[339]); + writel(denali_phy_params[340] + (0x10 << 8), &denali_phy[340]); + writel(denali_phy_params[341], &denali_phy[341]); + + writel(denali_phy_params[467] + (0x10 << 16), &denali_phy[467]); + writel(denali_phy_params[468] + (0x10 << 8), &denali_phy[468]); + writel(denali_phy_params[469], &denali_phy[469]); + + /* + * phy_gtlvl_resp_wait_cnt_x + * phy_gtlvl_dly_step_x + * phy_wrlvl_resp_wait_cnt_x + * phy_gtlvl_final_step_x + * phy_gtlvl_back_step_x + * phy_rdlvl_dly_step_x + * + * phy_master_delay_step_x + * phy_master_delay_wait_x + * phy_wrlvl_dly_step_x + * phy_rptr_update_x + * phy_wdqlvl_dly_step_x + */ + writel(denali_phy_params[87], &denali_phy[87]); + writel(denali_phy_params[88], &denali_phy[88]); + writel(denali_phy_params[89], &denali_phy[89]); + writel(denali_phy_params[90], &denali_phy[90]); + + writel(denali_phy_params[215], &denali_phy[215]); + writel(denali_phy_params[216], &denali_phy[216]); + writel(denali_phy_params[217], &denali_phy[217]); + writel(denali_phy_params[218], &denali_phy[218]); + + writel(denali_phy_params[343], &denali_phy[343]); + writel(denali_phy_params[344], &denali_phy[344]); + writel(denali_phy_params[345], &denali_phy[345]); + writel(denali_phy_params[346], &denali_phy[346]); + + writel(denali_phy_params[471], &denali_phy[471]); + writel(denali_phy_params[472], &denali_phy[472]); + writel(denali_phy_params[473], &denali_phy[473]); + writel(denali_phy_params[474], &denali_phy[474]); + + /* + * phy_gtlvl_lat_adj_start_x + * phy_gtlvl_rddqs_slv_dly_start_x + * phy_rdlvl_rddqs_dq_slv_dly_start_x + * phy_wdqlvl_dqdm_slv_dly_start_x + */ + writel(denali_phy_params[80], &denali_phy[80]); + writel(denali_phy_params[81], &denali_phy[81]); + + writel(denali_phy_params[208], &denali_phy[208]); + writel(denali_phy_params[209], &denali_phy[209]); + + writel(denali_phy_params[336], &denali_phy[336]); + writel(denali_phy_params[337], &denali_phy[337]); + + writel(denali_phy_params[464], &denali_phy[464]); + writel(denali_phy_params[465], &denali_phy[465]); + + /* + * phy_master_delay_start_x + * phy_sw_master_mode_x + * phy_rddata_en_tsel_dly_x + */ + writel(denali_phy_params[86], &denali_phy[86]); + writel(denali_phy_params[214], &denali_phy[214]); + writel(denali_phy_params[342], &denali_phy[342]); + writel(denali_phy_params[470], &denali_phy[470]); + + /* + * phy_rddqz_slave_delay_x + * phy_rddqs_dqz_fall_slave_delay_x + * phy_rddqs_dqz_rise_slave_delay_x + * phy_rddqs_dm_fall_slave_delay_x + * phy_rddqs_dm_rise_slave_delay_x + * phy_rddqs_gate_slave_delay_x + * phy_wrlvl_delay_early_threshold_x + * phy_write_path_lat_add_x + * phy_rddqs_latency_adjust_x + * phy_wrlvl_delay_period_threshold_x + * phy_wrlvl_early_force_zero_x + */ + copy_to_reg((u32 *)&denali_phy[64], (u32 *)&denali_phy_params[64], + (67 - 63) * 4); + clrsetbits_le32(&denali_phy[68], 0xfffffc00, + denali_phy_params[68] & 0xfffffc00); + copy_to_reg((u32 *)&denali_phy[69], (u32 *)&denali_phy_params[69], + (79 - 68) * 4); + copy_to_reg((u32 *)&denali_phy[192], (u32 *)&denali_phy_params[192], + (195 - 191) * 4); + clrsetbits_le32(&denali_phy[196], 0xfffffc00, + denali_phy_params[196] & 0xfffffc00); + copy_to_reg((u32 *)&denali_phy[197], (u32 *)&denali_phy_params[197], + (207 - 196) * 4); + copy_to_reg((u32 *)&denali_phy[320], (u32 *)&denali_phy_params[320], + (323 - 319) * 4); + clrsetbits_le32(&denali_phy[324], 0xfffffc00, + denali_phy_params[324] & 0xfffffc00); + copy_to_reg((u32 *)&denali_phy[325], (u32 *)&denali_phy_params[325], + (335 - 324) * 4); + + copy_to_reg((u32 *)&denali_phy[448], (u32 *)&denali_phy_params[448], + (451 - 447) * 4); + clrsetbits_le32(&denali_phy[452], 0xfffffc00, + denali_phy_params[452] & 0xfffffc00); + copy_to_reg((u32 *)&denali_phy[453], (u32 *)&denali_phy_params[453], + (463 - 452) * 4); + + /* phy_two_cyc_preamble_x */ + clrsetbits_le32(&denali_phy[7], 0x3 << 24, + denali_phy_params[7] & (0x3 << 24)); + clrsetbits_le32(&denali_phy[135], 0x3 << 24, + denali_phy_params[135] & (0x3 << 24)); + clrsetbits_le32(&denali_phy[263], 0x3 << 24, + denali_phy_params[263] & (0x3 << 24)); + clrsetbits_le32(&denali_phy[391], 0x3 << 24, + denali_phy_params[391] & (0x3 << 24)); + + /* speed */ + if (timings->base.ddr_freq < 400 * MHz) + speed = 0x0; + else if (timings->base.ddr_freq < 800 * MHz) + speed = 0x1; + else if (timings->base.ddr_freq < 1200 * MHz) + speed = 0x2; + + /* phy_924 phy_pad_fdbk_drive */ + clrsetbits_le32(&denali_phy[924], 0x3 << 21, speed << 21); + /* phy_926 phy_pad_data_drive */ + clrsetbits_le32(&denali_phy[926], 0x3 << 9, speed << 9); + /* phy_927 phy_pad_dqs_drive */ + clrsetbits_le32(&denali_phy[927], 0x3 << 9, speed << 9); + /* phy_928 phy_pad_addr_drive */ + clrsetbits_le32(&denali_phy[928], 0x3 << 17, speed << 17); + /* phy_929 phy_pad_clk_drive */ + clrsetbits_le32(&denali_phy[929], 0x3 << 17, speed << 17); + /* phy_935 phy_pad_cke_drive */ + clrsetbits_le32(&denali_phy[935], 0x3 << 17, speed << 17); + /* phy_937 phy_pad_rst_drive */ + clrsetbits_le32(&denali_phy[937], 0x3 << 17, speed << 17); + /* phy_939 phy_pad_cs_drive */ + clrsetbits_le32(&denali_phy[939], 0x3 << 17, speed << 17); + + read_mr(dram->chan[channel].pctl, 1, 5, &mr5); + set_ds_odt(&dram->chan[channel], timings, true, mr5); + + ctl = lpddr4_get_ctl(timings, phy); + set_lpddr4_dq_odt(&dram->chan[channel], timings, ctl, true, true, mr5); + set_lpddr4_ca_odt(&dram->chan[channel], timings, ctl, true, true, mr5); + set_lpddr4_MR3(&dram->chan[channel], timings, ctl, true, mr5); + set_lpddr4_MR12(&dram->chan[channel], timings, ctl, true, mr5); + set_lpddr4_MR14(&dram->chan[channel], timings, ctl, true, mr5); + + /* + * if phy_sw_master_mode_x not bypass mode, + * clear phy_slice_pwr_rdc_disable. + * note: need use timings, not ddr_publ_regs + */ + if (!((denali_phy_params[86] >> 8) & (1 << 2))) { + clrbits_le32(&denali_phy[10], 1 << 16); + clrbits_le32(&denali_phy[138], 1 << 16); + clrbits_le32(&denali_phy[266], 1 << 16); + clrbits_le32(&denali_phy[394], 1 << 16); + } + + /* + * when PHY_PER_CS_TRAINING_EN=1, W2W_DIFFCS_DLY_Fx can't + * smaller than 8 + * NOTE: need use timings, not ddr_publ_regs + */ + if ((denali_phy_params[84] >> 16) & 1) { + if (((readl(&denali_ctl[217 + ctl]) >> 16) & 0x1f) < 8) + clrsetbits_le32(&denali_ctl[217 + ctl], + 0x1f << 16, 8 << 16); + } +} + +static void lpddr4_set_phy(struct dram_info *dram, + struct rk3399_sdram_params *params, u32 phy, + struct rk3399_sdram_params *timings) +{ + u32 channel; + + for (channel = 0; channel < 2; channel++) + lpddr4_copy_phy(dram, params, phy, timings, channel); +} + +static int lpddr4_set_ctl(struct dram_info *dram, + struct rk3399_sdram_params *params, u32 ctl, u32 hz) +{ + u32 channel; + int ret_clk, ret; + + /* cci idle req stall */ + writel(0x70007, &dram->grf->soc_con0); + + /* enable all clk */ + setbits_le32(&dram->pmu->pmu_noc_auto_ena, (0x3 << 7)); + + /* idle */ + setbits_le32(&dram->pmu->pmu_bus_idle_req, (0x3 << 18)); + while ((readl(&dram->pmu->pmu_bus_idle_st) & (0x3 << 18)) + != (0x3 << 18)) + ; + + /* change freq */ + writel((((0x3 << 4) | (1 << 2) | 1) << 16) | + (ctl << 4) | (1 << 2) | 1, &dram->cic->cic_ctrl0); + while (!(readl(&dram->cic->cic_status0) & (1 << 2))) + ; + + ret_clk = clk_set_rate(&dram->ddr_clk, hz); + if (ret_clk < 0) { + printf("%s clk set failed %d\n", __func__, ret_clk); + return ret_clk; + } + + writel(0x20002, &dram->cic->cic_ctrl0); + while (!(readl(&dram->cic->cic_status0) & (1 << 0))) + ; + + /* deidle */ + clrbits_le32(&dram->pmu->pmu_bus_idle_req, (0x3 << 18)); + while (readl(&dram->pmu->pmu_bus_idle_st) & (0x3 << 18)) + ; + + /* clear enable all clk */ + clrbits_le32(&dram->pmu->pmu_noc_auto_ena, (0x3 << 7)); + + /* lpddr4 ctl2 can not do training, all training will fail */ + if (!(params->base.dramtype == LPDDR4 && ctl == 2)) { + for (channel = 0; channel < 2; channel++) { + if (!(params->ch[channel].cap_info.col)) + continue; + ret = data_training(dram, channel, params, + PI_FULL_TRAINING); + if (ret) + printf("%s: channel %d training failed!\n", + __func__, channel); + else + debug("%s: channel %d training pass\n", + __func__, channel); + } + } + + return 0; +} + +static int lpddr4_set_rate(struct dram_info *dram, + struct rk3399_sdram_params *params) +{ + u32 ctl; + u32 phy; + + for (ctl = 0; ctl < 2; ctl++) { + phy = lpddr4_get_phy(params, ctl); + + lpddr4_set_phy(dram, params, phy, &lpddr4_timings[ctl]); + lpddr4_set_ctl(dram, params, ctl, + lpddr4_timings[ctl].base.ddr_freq); + + debug("%s: change freq to %d mhz %d, %d\n", __func__, + lpddr4_timings[ctl].base.ddr_freq / MHz, ctl, phy); + } + + return 0; +} +#endif /* CONFIG_RAM_RK3399_LPDDR4 */ + +static unsigned char calculate_stride(struct rk3399_sdram_params *params) +{ + unsigned int stride = params->base.stride; + unsigned int channel, chinfo = 0; + unsigned int ch_cap[2] = {0, 0}; + u64 cap; + + for (channel = 0; channel < 2; channel++) { + unsigned int cs0_cap = 0; + unsigned int cs1_cap = 0; + struct sdram_cap_info *cap_info = ¶ms->ch[channel].cap_info; + + if (cap_info->col == 0) + continue; + + cs0_cap = (1 << (cap_info->cs0_row + cap_info->col + + cap_info->bk + cap_info->bw - 20)); + if (cap_info->rank > 1) + cs1_cap = cs0_cap >> (cap_info->cs0_row + - cap_info->cs1_row); + if (cap_info->row_3_4) { + cs0_cap = cs0_cap * 3 / 4; + cs1_cap = cs1_cap * 3 / 4; + } + ch_cap[channel] = cs0_cap + cs1_cap; + chinfo |= 1 << channel; + } + + /* stride calculation for 1 channel */ + if (params->base.num_channels == 1 && chinfo & 1) + return 0x17; /* channel a */ + + /* stride calculation for 2 channels, default gstride type is 256B */ + if (ch_cap[0] == ch_cap[1]) { + cap = ch_cap[0] + ch_cap[1]; + switch (cap) { + /* 512MB */ + case 512: + stride = 0; + break; + /* 1GB */ + case 1024: + stride = 0x5; + break; + /* + * 768MB + 768MB same as total 2GB memory + * useful space: 0-768MB 1GB-1792MB + */ + case 1536: + /* 2GB */ + case 2048: + stride = 0x9; + break; + /* 1536MB + 1536MB */ + case 3072: + stride = 0x11; + break; + /* 4GB */ + case 4096: + stride = 0xD; + break; + default: + printf("%s: Unable to calculate stride for ", __func__); + print_size((cap * (1 << 20)), " capacity\n"); + break; + } + } + + sdram_print_stride(stride); + + return stride; +} + +static void clear_channel_params(struct rk3399_sdram_params *params, u8 channel) +{ + params->ch[channel].cap_info.rank = 0; + params->ch[channel].cap_info.col = 0; + params->ch[channel].cap_info.bk = 0; + params->ch[channel].cap_info.bw = 32; + params->ch[channel].cap_info.dbw = 32; + params->ch[channel].cap_info.row_3_4 = 0; + params->ch[channel].cap_info.cs0_row = 0; + params->ch[channel].cap_info.cs1_row = 0; + params->ch[channel].cap_info.ddrconfig = 0; +} + +static int pctl_init(struct dram_info *dram, struct rk3399_sdram_params *params) { - unsigned char dramtype = sdram_params->base.dramtype; - unsigned int ddr_freq = sdram_params->base.ddr_freq; int channel; + int ret; + + for (channel = 0; channel < 2; channel++) { + const struct chan_info *chan = &dram->chan[channel]; + struct rk3399_cru *cru = dram->cru; + struct rk3399_ddr_publ_regs *publ = chan->publ; + + phy_pctrl_reset(cru, channel); + phy_dll_bypass_set(publ, params->base.ddr_freq); + + ret = pctl_cfg(dram, chan, channel, params); + if (ret < 0) { + printf("%s: pctl config failed\n", __func__); + return ret; + } + + /* start to trigger initialization */ + pctl_start(dram, channel); + } + + return 0; +} + +static int sdram_init(struct dram_info *dram, + struct rk3399_sdram_params *params) +{ + unsigned char dramtype = params->base.dramtype; + unsigned int ddr_freq = params->base.ddr_freq; + int channel, ch, rank; + int ret; debug("Starting SDRAM initialization...\n"); @@ -1052,35 +2534,78 @@ static int sdram_init(struct dram_info *dram, return -E2BIG; } + for (ch = 0; ch < 2; ch++) { + params->ch[ch].cap_info.rank = 2; + for (rank = 2; rank != 0; rank--) { + ret = pctl_init(dram, params); + if (ret < 0) { + printf("%s: pctl init failed\n", __func__); + return ret; + } + + /* LPDDR2/LPDDR3 need to wait DAI complete, max 10us */ + if (dramtype == LPDDR3) + udelay(10); + + params->ch[ch].cap_info.rank = rank; + + ret = dram->ops->data_training(dram, ch, rank, params); + if (!ret) { + debug("%s: data trained for rank %d, ch %d\n", + __func__, rank, ch); + break; + } + } + /* Computed rank with associated channel number */ + params->ch[ch].cap_info.rank = rank; + } + + params->base.num_channels = 0; for (channel = 0; channel < 2; channel++) { const struct chan_info *chan = &dram->chan[channel]; - struct rk3399_ddr_publ_regs *publ = chan->publ; - - phy_dll_bypass_set(publ, ddr_freq); + struct sdram_cap_info *cap_info = ¶ms->ch[channel].cap_info; + u8 training_flag = PI_FULL_TRAINING; - if (channel >= sdram_params->base.num_channels) + if (cap_info->rank == 0) { + clear_channel_params(params, channel); continue; - - if (pctl_cfg(chan, channel, sdram_params) != 0) { - printf("pctl_cfg fail, reset\n"); - return -EIO; + } else { + params->base.num_channels++; } - /* LPDDR2/LPDDR3 need to wait DAI complete, max 10us */ - if (dramtype == LPDDR3) - udelay(10); + debug("Channel "); + debug(channel ? "1: " : "0: "); - if (data_training(chan, channel, - sdram_params, PI_FULL_TRAINING)) { - printf("SDRAM initialization failed, reset\n"); - return -EIO; + /* LPDDR3 should have write and read gate training */ + if (params->base.dramtype == LPDDR3) + training_flag = PI_WRITE_LEVELING | + PI_READ_GATE_TRAINING; + + if (params->base.dramtype != LPDDR4) { + ret = data_training(dram, channel, params, + training_flag); + if (!ret) { + debug("%s: data train failed for channel %d\n", + __func__, ret); + continue; + } } - set_ddrconfig(chan, sdram_params, channel, - sdram_params->ch[channel].ddrconfig); + sdram_print_ddr_info(cap_info, ¶ms->base); + + set_ddrconfig(chan, params, channel, cap_info->ddrconfig); } - dram_all_config(dram, sdram_params); - switch_to_phy_index1(dram, sdram_params); + + if (params->base.num_channels == 0) { + printf("%s: ", __func__); + sdram_print_dram_type(params->base.dramtype); + printf(" - %dMHz failed!\n", params->base.ddr_freq); + return -EINVAL; + } + + params->base.stride = calculate_stride(params); + dram_all_config(dram, params); + dram->ops->set_rate(dram, params); debug("Finish SDRAM initialization...\n"); return 0; @@ -1116,8 +2641,8 @@ static int conv_of_platdata(struct udevice *dev) int ret; ret = regmap_init_mem_platdata(dev, dtplat->reg, - ARRAY_SIZE(dtplat->reg) / 2, - &plat->map); + ARRAY_SIZE(dtplat->reg) / 2, + &plat->map); if (ret) return ret; @@ -1125,6 +2650,16 @@ static int conv_of_platdata(struct udevice *dev) } #endif +static const struct sdram_rk3399_ops rk3399_ops = { +#if !defined(CONFIG_RAM_RK3399_LPDDR4) + .data_training = default_data_training, + .set_rate = switch_to_phy_index1, +#else + .data_training = lpddr4_mr_detect, + .set_rate = lpddr4_set_rate, +#endif +}; + static int rk3399_dmc_init(struct udevice *dev) { struct dram_info *priv = dev_get_priv(dev); @@ -1142,7 +2677,10 @@ static int rk3399_dmc_init(struct udevice *dev) return ret; #endif + priv->ops = &rk3399_ops; priv->cic = syscon_get_first_range(ROCKCHIP_SYSCON_CIC); + priv->grf = syscon_get_first_range(ROCKCHIP_SYSCON_GRF); + priv->pmu = syscon_get_first_range(ROCKCHIP_SYSCON_PMU); priv->pmugrf = syscon_get_first_range(ROCKCHIP_SYSCON_PMUGRF); priv->pmusgrf = syscon_get_first_range(ROCKCHIP_SYSCON_PMUSGRF); priv->pmucru = rockchip_get_pmucru(); @@ -1161,8 +2699,9 @@ static int rk3399_dmc_init(struct udevice *dev) priv->chan[0].publ, priv->chan[0].msch, priv->chan[1].pctl, priv->chan[1].pi, priv->chan[1].publ, priv->chan[1].msch); - debug("cru %p, cic %p, grf %p, sgrf %p, pmucru %p\n", priv->cru, - priv->cic, priv->pmugrf, priv->pmusgrf, priv->pmucru); + debug("cru %p, cic %p, grf %p, sgrf %p, pmucru %p, pmu %p\n", priv->cru, + priv->cic, priv->pmugrf, priv->pmusgrf, priv->pmucru, priv->pmu); + #if CONFIG_IS_ENABLED(OF_PLATDATA) ret = clk_get_by_index_platdata(dev, 0, dtplat->clocks, &priv->ddr_clk); #else @@ -1172,14 +2711,16 @@ static int rk3399_dmc_init(struct udevice *dev) printf("%s clk get failed %d\n", __func__, ret); return ret; } + ret = clk_set_rate(&priv->ddr_clk, params->base.ddr_freq * MHz); if (ret < 0) { printf("%s clk set failed %d\n", __func__, ret); return ret; } + ret = sdram_init(priv, params); if (ret < 0) { - printf("%s DRAM init failed%d\n", __func__, ret); + printf("%s DRAM init failed %d\n", __func__, ret); return ret; } @@ -1197,10 +2738,10 @@ static int rk3399_dmc_probe(struct udevice *dev) struct dram_info *priv = dev_get_priv(dev); priv->pmugrf = syscon_get_first_range(ROCKCHIP_SYSCON_PMUGRF); - debug("%s: pmugrf=%p\n", __func__, priv->pmugrf); + debug("%s: pmugrf = %p\n", __func__, priv->pmugrf); priv->info.base = CONFIG_SYS_SDRAM_BASE; - priv->info.size = rockchip_sdram_size( - (phys_addr_t)&priv->pmugrf->os_reg2); + priv->info.size = + rockchip_sdram_size((phys_addr_t)&priv->pmugrf->os_reg2); #endif return 0; } @@ -1218,7 +2759,6 @@ static struct ram_ops rk3399_dmc_ops = { .get_info = rk3399_dmc_get_info, }; - static const struct udevice_id rk3399_dmc_ids[] = { { .compatible = "rockchip,rk3399-dmc" }, { } diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig index 9eb532bc7a..f54a245424 100644 --- a/drivers/remoteproc/Kconfig +++ b/drivers/remoteproc/Kconfig @@ -22,15 +22,6 @@ config K3_SYSTEM_CONTROLLER help Say 'y' here to add support for TI' K3 System Controller. -config REMOTEPROC_K3 - bool "Support for TI's K3 based remoteproc driver" - select REMOTEPROC - depends on DM - depends on ARCH_K3 - depends on OF_CONTROL - help - Say 'y' here to add support for TI' K3 remoteproc driver. - config REMOTEPROC_SANDBOX bool "Support for Test processor for Sandbox" select REMOTEPROC @@ -40,6 +31,27 @@ config REMOTEPROC_SANDBOX Say 'y' here to add support for test processor which does dummy operations for sandbox platform. +config REMOTEPROC_STM32_COPRO + bool "Support for STM32 coprocessor" + select REMOTEPROC + depends on DM + depends on ARCH_STM32MP + depends on OF_CONTROL + help + Say 'y' here to add support for STM32 Cortex-M4 coprocessors via the + remoteproc framework. + +config REMOTEPROC_TI_K3_ARM64 + bool "Support for TI's K3 based ARM64 remoteproc driver" + select REMOTEPROC + depends on DM + depends on ARCH_K3 + depends on OF_CONTROL + help + Say y here to support TI's ARM64 processor subsystems + on various TI K3 family of SoCs through the remote processor + framework. + config REMOTEPROC_TI_POWER bool "Support for TI Power processor" select REMOTEPROC diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile index 77eb708523..271ba55b09 100644 --- a/drivers/remoteproc/Makefile +++ b/drivers/remoteproc/Makefile @@ -4,10 +4,11 @@ # Texas Instruments Incorporated - http://www.ti.com/ # -obj-$(CONFIG_$(SPL_)REMOTEPROC) += rproc-uclass.o +obj-$(CONFIG_$(SPL_)REMOTEPROC) += rproc-uclass.o rproc-elf-loader.o # Remote proc drivers - Please keep this list alphabetically sorted. obj-$(CONFIG_K3_SYSTEM_CONTROLLER) += k3_system_controller.o -obj-$(CONFIG_REMOTEPROC_K3) += k3_rproc.o obj-$(CONFIG_REMOTEPROC_SANDBOX) += sandbox_testproc.o +obj-$(CONFIG_REMOTEPROC_STM32_COPRO) += stm32_copro.o +obj-$(CONFIG_REMOTEPROC_TI_K3_ARM64) += ti_k3_arm64_rproc.o obj-$(CONFIG_REMOTEPROC_TI_POWER) += ti_power_proc.o diff --git a/drivers/remoteproc/rproc-elf-loader.c b/drivers/remoteproc/rproc-elf-loader.c new file mode 100644 index 0000000000..67937a7595 --- /dev/null +++ b/drivers/remoteproc/rproc-elf-loader.c @@ -0,0 +1,106 @@ +// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause +/* + * Copyright (C) 2019, STMicroelectronics - All Rights Reserved + */ +#include <common.h> +#include <dm.h> +#include <elf.h> +#include <remoteproc.h> + +/* Basic function to verify ELF32 image format */ +int rproc_elf32_sanity_check(ulong addr, ulong size) +{ + Elf32_Ehdr *ehdr; + char class; + + if (!addr) { + pr_debug("Invalid fw address?\n"); + return -EFAULT; + } + + if (size < sizeof(Elf32_Ehdr)) { + pr_debug("Image is too small\n"); + return -ENOSPC; + } + + ehdr = (Elf32_Ehdr *)addr; + class = ehdr->e_ident[EI_CLASS]; + + if (!IS_ELF(*ehdr) || ehdr->e_type != ET_EXEC || class != ELFCLASS32) { + pr_debug("Not an executable ELF32 image\n"); + return -EPROTONOSUPPORT; + } + + /* We assume the firmware has the same endianness as the host */ +# ifdef __LITTLE_ENDIAN + if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB) { +# else /* BIG ENDIAN */ + if (ehdr->e_ident[EI_DATA] != ELFDATA2MSB) { +# endif + pr_debug("Unsupported firmware endianness\n"); + return -EILSEQ; + } + + if (size < ehdr->e_shoff + sizeof(Elf32_Shdr)) { + pr_debug("Image is too small\n"); + return -ENOSPC; + } + + if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) { + pr_debug("Image is corrupted (bad magic)\n"); + return -EBADF; + } + + if (ehdr->e_phnum == 0) { + pr_debug("No loadable segments\n"); + return -ENOEXEC; + } + + if (ehdr->e_phoff > size) { + pr_debug("Firmware size is too small\n"); + return -ENOSPC; + } + + return 0; +} + +/* A very simple elf loader, assumes the image is valid */ +int rproc_elf32_load_image(struct udevice *dev, unsigned long addr) +{ + Elf32_Ehdr *ehdr; /* Elf header structure pointer */ + Elf32_Phdr *phdr; /* Program header structure pointer */ + const struct dm_rproc_ops *ops; + unsigned int i; + + ehdr = (Elf32_Ehdr *)addr; + phdr = (Elf32_Phdr *)(addr + ehdr->e_phoff); + + ops = rproc_get_ops(dev); + + /* Load each program header */ + for (i = 0; i < ehdr->e_phnum; ++i) { + void *dst = (void *)(uintptr_t)phdr->p_paddr; + void *src = (void *)addr + phdr->p_offset; + + if (phdr->p_type != PT_LOAD) + continue; + + if (ops->device_to_virt) + dst = ops->device_to_virt(dev, (ulong)dst); + + dev_dbg(dev, "Loading phdr %i to 0x%p (%i bytes)\n", + i, dst, phdr->p_filesz); + if (phdr->p_filesz) + memcpy(dst, src, phdr->p_filesz); + if (phdr->p_filesz != phdr->p_memsz) + memset(dst + phdr->p_filesz, 0x00, + phdr->p_memsz - phdr->p_filesz); + flush_cache(rounddown((unsigned long)dst, ARCH_DMA_MINALIGN), + roundup((unsigned long)dst + phdr->p_filesz, + ARCH_DMA_MINALIGN) - + rounddown((unsigned long)dst, ARCH_DMA_MINALIGN)); + ++phdr; + } + + return 0; +} diff --git a/drivers/remoteproc/sandbox_testproc.c b/drivers/remoteproc/sandbox_testproc.c index 51a67e6bf1..5f35119ab7 100644 --- a/drivers/remoteproc/sandbox_testproc.c +++ b/drivers/remoteproc/sandbox_testproc.c @@ -8,6 +8,7 @@ #include <dm.h> #include <errno.h> #include <remoteproc.h> +#include <asm/io.h> /** * enum sandbox_state - different device states @@ -300,6 +301,23 @@ static int sandbox_testproc_ping(struct udevice *dev) return ret; } +#define SANDBOX_RPROC_DEV_TO_PHY_OFFSET 0x1000 +/** + * sandbox_testproc_device_to_virt() - Convert device address to virtual address + * @dev: device to operate upon + * @da: device address + * @return converted virtual address + */ +static void *sandbox_testproc_device_to_virt(struct udevice *dev, ulong da) +{ + u64 paddr; + + /* Use a simple offset conversion */ + paddr = da + SANDBOX_RPROC_DEV_TO_PHY_OFFSET; + + return phys_to_virt(paddr); +} + static const struct dm_rproc_ops sandbox_testproc_ops = { .init = sandbox_testproc_init, .reset = sandbox_testproc_reset, @@ -308,6 +326,7 @@ static const struct dm_rproc_ops sandbox_testproc_ops = { .stop = sandbox_testproc_stop, .is_running = sandbox_testproc_is_running, .ping = sandbox_testproc_ping, + .device_to_virt = sandbox_testproc_device_to_virt, }; static const struct udevice_id sandbox_ids[] = { diff --git a/drivers/remoteproc/stm32_copro.c b/drivers/remoteproc/stm32_copro.c new file mode 100644 index 0000000000..de3b9729f3 --- /dev/null +++ b/drivers/remoteproc/stm32_copro.c @@ -0,0 +1,257 @@ +// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause +/* + * Copyright (C) 2019, STMicroelectronics - All Rights Reserved + */ +#define pr_fmt(fmt) "%s: " fmt, __func__ +#include <common.h> +#include <dm.h> +#include <errno.h> +#include <fdtdec.h> +#include <regmap.h> +#include <remoteproc.h> +#include <reset.h> +#include <syscon.h> +#include <asm/io.h> + +#define RCC_GCR_HOLD_BOOT 0 +#define RCC_GCR_RELEASE_BOOT 1 + +/** + * struct stm32_copro_privdata - power processor private data + * @reset_ctl: reset controller handle + * @hold_boot_regmap: regmap for remote processor reset hold boot + * @hold_boot_offset: offset of the register controlling the hold boot setting + * @hold_boot_mask: bitmask of the register for the hold boot field + * @is_running: is the remote processor running + */ +struct stm32_copro_privdata { + struct reset_ctl reset_ctl; + struct regmap *hold_boot_regmap; + uint hold_boot_offset; + uint hold_boot_mask; + bool is_running; +}; + +/** + * stm32_copro_probe() - Basic probe + * @dev: corresponding STM32 remote processor device + * @return 0 if all went ok, else corresponding -ve error + */ +static int stm32_copro_probe(struct udevice *dev) +{ + struct stm32_copro_privdata *priv; + struct regmap *regmap; + const fdt32_t *cell; + int len, ret; + + priv = dev_get_priv(dev); + + regmap = syscon_regmap_lookup_by_phandle(dev, "st,syscfg-holdboot"); + if (IS_ERR(regmap)) { + dev_err(dev, "unable to find holdboot regmap (%ld)\n", + PTR_ERR(regmap)); + return PTR_ERR(regmap); + } + + cell = dev_read_prop(dev, "st,syscfg-holdboot", &len); + if (len < 3 * sizeof(fdt32_t)) { + dev_err(dev, "holdboot offset and mask not available\n"); + return -EINVAL; + } + + priv->hold_boot_regmap = regmap; + priv->hold_boot_offset = fdtdec_get_number(cell + 1, 1); + priv->hold_boot_mask = fdtdec_get_number(cell + 2, 1); + + ret = reset_get_by_index(dev, 0, &priv->reset_ctl); + if (ret) { + dev_err(dev, "failed to get reset (%d)\n", ret); + return ret; + } + + dev_dbg(dev, "probed\n"); + + return 0; +} + +/** + * stm32_copro_set_hold_boot() - Hold boot bit management + * @dev: corresponding STM32 remote processor device + * @hold: hold boot value + * @return 0 if all went ok, else corresponding -ve error + */ +static int stm32_copro_set_hold_boot(struct udevice *dev, bool hold) +{ + struct stm32_copro_privdata *priv; + uint val; + int ret; + + priv = dev_get_priv(dev); + + val = hold ? RCC_GCR_HOLD_BOOT : RCC_GCR_RELEASE_BOOT; + + /* + * Note: shall run an SMC call (STM32_SMC_RCC) if platform is secured. + * To be updated when the code for this SMC service is available which + * is not the case for the time being. + */ + ret = regmap_update_bits(priv->hold_boot_regmap, priv->hold_boot_offset, + priv->hold_boot_mask, val); + if (ret) + dev_err(dev, "failed to set hold boot\n"); + + return ret; +} + +/** + * stm32_copro_device_to_virt() - Convert device address to virtual address + * @dev: corresponding STM32 remote processor device + * @da: device address + * @return converted virtual address + */ +static void *stm32_copro_device_to_virt(struct udevice *dev, ulong da) +{ + fdt32_t in_addr = cpu_to_be32(da); + u64 paddr; + + paddr = dev_translate_dma_address(dev, &in_addr); + if (paddr == OF_BAD_ADDR) { + dev_err(dev, "Unable to convert address %ld\n", da); + return NULL; + } + + return phys_to_virt(paddr); +} + +/** + * stm32_copro_load() - Loadup the STM32 remote processor + * @dev: corresponding STM32 remote processor device + * @addr: Address in memory where image is stored + * @size: Size in bytes of the image + * @return 0 if all went ok, else corresponding -ve error + */ +static int stm32_copro_load(struct udevice *dev, ulong addr, ulong size) +{ + struct stm32_copro_privdata *priv; + int ret; + + priv = dev_get_priv(dev); + + ret = stm32_copro_set_hold_boot(dev, true); + if (ret) + return ret; + + ret = reset_assert(&priv->reset_ctl); + if (ret) { + dev_err(dev, "Unable to assert reset line (ret=%d)\n", ret); + return ret; + } + + /* Support only ELF32 image */ + ret = rproc_elf32_sanity_check(addr, size); + if (ret) { + dev_err(dev, "Invalid ELF32 image (%d)\n", ret); + return ret; + } + + return rproc_elf32_load_image(dev, addr); +} + +/** + * stm32_copro_start() - Start the STM32 remote processor + * @dev: corresponding STM32 remote processor device + * @return 0 if all went ok, else corresponding -ve error + */ +static int stm32_copro_start(struct udevice *dev) +{ + struct stm32_copro_privdata *priv; + int ret; + + priv = dev_get_priv(dev); + + /* move hold boot from true to false start the copro */ + ret = stm32_copro_set_hold_boot(dev, false); + if (ret) + return ret; + + /* + * Once copro running, reset hold boot flag to avoid copro + * rebooting autonomously + */ + ret = stm32_copro_set_hold_boot(dev, true); + priv->is_running = !ret; + return ret; +} + +/** + * stm32_copro_reset() - Reset the STM32 remote processor + * @dev: corresponding STM32 remote processor device + * @return 0 if all went ok, else corresponding -ve error + */ +static int stm32_copro_reset(struct udevice *dev) +{ + struct stm32_copro_privdata *priv; + int ret; + + priv = dev_get_priv(dev); + + ret = stm32_copro_set_hold_boot(dev, true); + if (ret) + return ret; + + ret = reset_assert(&priv->reset_ctl); + if (ret) { + dev_err(dev, "Unable to assert reset line (ret=%d)\n", ret); + return ret; + } + + priv->is_running = false; + + return 0; +} + +/** + * stm32_copro_stop() - Stop the STM32 remote processor + * @dev: corresponding STM32 remote processor device + * @return 0 if all went ok, else corresponding -ve error + */ +static int stm32_copro_stop(struct udevice *dev) +{ + return stm32_copro_reset(dev); +} + +/** + * stm32_copro_is_running() - Is the STM32 remote processor running + * @dev: corresponding STM32 remote processor device + * @return 1 if the remote processor is running, 0 otherwise + */ +static int stm32_copro_is_running(struct udevice *dev) +{ + struct stm32_copro_privdata *priv; + + priv = dev_get_priv(dev); + return priv->is_running; +} + +static const struct dm_rproc_ops stm32_copro_ops = { + .load = stm32_copro_load, + .start = stm32_copro_start, + .stop = stm32_copro_stop, + .reset = stm32_copro_reset, + .is_running = stm32_copro_is_running, + .device_to_virt = stm32_copro_device_to_virt, +}; + +static const struct udevice_id stm32_copro_ids[] = { + {.compatible = "st,stm32mp1-rproc"}, + {} +}; + +U_BOOT_DRIVER(stm32_copro) = { + .name = "stm32_m4_proc", + .of_match = stm32_copro_ids, + .id = UCLASS_REMOTEPROC, + .ops = &stm32_copro_ops, + .probe = stm32_copro_probe, + .priv_auto_alloc_size = sizeof(struct stm32_copro_privdata), +}; diff --git a/drivers/remoteproc/k3_rproc.c b/drivers/remoteproc/ti_k3_arm64_rproc.c index 3c29d925ce..9676a96f98 100644 --- a/drivers/remoteproc/k3_rproc.c +++ b/drivers/remoteproc/ti_k3_arm64_rproc.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0+ /* - * Texas Instruments' K3 Remoteproc driver + * Texas Instruments' K3 ARM64 Remoteproc driver * * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/ * Lokesh Vutla <lokeshvutla@ti.com> @@ -16,6 +16,7 @@ #include <asm/io.h> #include <power-domain.h> #include <linux/soc/ti/ti_sci_protocol.h> +#include "ti_sci_proc.h" #define INVALID_ID 0xffff @@ -23,68 +24,53 @@ #define GTC_CNTR_EN 0x3 /** - * struct k3_rproc_privdata - Structure representing Remote processor data. + * struct k3_arm64_privdata - Structure representing Remote processor data. * @rproc_pwrdmn: rproc power domain data * @rproc_rst: rproc reset control data * @sci: Pointer to TISCI handle + * @tsp: TISCI processor control helper structure * @gtc_base: Timer base address. - * @proc_id: TISCI processor ID - * @host_id: TISCI host id to which the processor gets assigned to. */ -struct k3_rproc_privdata { +struct k3_arm64_privdata { struct power_domain rproc_pwrdmn; struct power_domain gtc_pwrdmn; struct reset_ctl rproc_rst; - const struct ti_sci_handle *sci; + struct ti_sci_proc tsp; void *gtc_base; - u16 proc_id; - u16 host_id; }; /** - * k3_rproc_load() - Load up the Remote processor image + * k3_arm64_load() - Load up the Remote processor image * @dev: rproc device pointer * @addr: Address at which image is available * @size: size of the image * * Return: 0 if all goes good, else appropriate error message. */ -static int k3_rproc_load(struct udevice *dev, ulong addr, ulong size) +static int k3_arm64_load(struct udevice *dev, ulong addr, ulong size) { - struct k3_rproc_privdata *rproc = dev_get_priv(dev); - const struct ti_sci_proc_ops *pops = &rproc->sci->ops.proc_ops; + struct k3_arm64_privdata *rproc = dev_get_priv(dev); int ret; dev_dbg(dev, "%s addr = 0x%lx, size = 0x%lx\n", __func__, addr, size); /* request for the processor */ - ret = pops->proc_request(rproc->sci, rproc->proc_id); - if (ret) { - dev_err(dev, "Requesting processor failed %d\n", ret); + ret = ti_sci_proc_request(&rproc->tsp); + if (ret) return ret; - } - - ret = pops->set_proc_boot_cfg(rproc->sci, rproc->proc_id, addr, 0, 0); - if (ret) { - dev_err(dev, "set_proc_boot_cfg failed %d\n", ret); - return ret; - } - - dev_dbg(dev, "%s: rproc successfully loaded\n", __func__); - return 0; + return ti_sci_proc_set_config(&rproc->tsp, addr, 0, 0); } /** - * k3_rproc_start() - Start the remote processor + * k3_arm64_start() - Start the remote processor * @dev: rproc device pointer * * Return: 0 if all went ok, else return appropriate error */ -static int k3_rproc_start(struct udevice *dev) +static int k3_arm64_start(struct udevice *dev) { - struct k3_rproc_privdata *rproc = dev_get_priv(dev); - const struct ti_sci_proc_ops *pops = &rproc->sci->ops.proc_ops; + struct k3_arm64_privdata *rproc = dev_get_priv(dev); int ret; dev_dbg(dev, "%s\n", __func__); @@ -109,33 +95,16 @@ static int k3_rproc_start(struct udevice *dev) return ret; } - if (rproc->host_id != INVALID_ID) { - ret = pops->proc_handover(rproc->sci, rproc->proc_id, - rproc->host_id); - if (ret) { - dev_err(dev, "Handover processor failed %d\n", ret); - return ret; - } - } else { - ret = pops->proc_release(rproc->sci, rproc->proc_id); - if (ret) { - dev_err(dev, "Processor release failed %d\n", ret); - return ret; - } - } - - dev_dbg(dev, "%s: rproc successfully started\n", __func__); - - return 0; + return ti_sci_proc_release(&rproc->tsp); } /** - * k3_rproc_init() - Initialize the remote processor + * k3_arm64_init() - Initialize the remote processor * @dev: rproc device pointer * * Return: 0 if all went ok, else return appropriate error */ -static int k3_rproc_init(struct udevice *dev) +static int k3_arm64_init(struct udevice *dev) { dev_dbg(dev, "%s\n", __func__); @@ -145,12 +114,33 @@ static int k3_rproc_init(struct udevice *dev) return 0; } -static const struct dm_rproc_ops k3_rproc_ops = { - .init = k3_rproc_init, - .load = k3_rproc_load, - .start = k3_rproc_start, +static const struct dm_rproc_ops k3_arm64_ops = { + .init = k3_arm64_init, + .load = k3_arm64_load, + .start = k3_arm64_start, }; +static int ti_sci_proc_of_to_priv(struct udevice *dev, struct ti_sci_proc *tsp) +{ + dev_dbg(dev, "%s\n", __func__); + + tsp->sci = ti_sci_get_by_phandle(dev, "ti,sci"); + if (IS_ERR(tsp->sci)) { + dev_err(dev, "ti_sci get failed: %ld\n", PTR_ERR(tsp->sci)); + return PTR_ERR(tsp->sci); + } + + tsp->proc_id = dev_read_u32_default(dev, "ti,sci-proc-id", INVALID_ID); + if (tsp->proc_id == INVALID_ID) { + dev_err(dev, "proc id not populated\n"); + return -ENOENT; + } + tsp->host_id = dev_read_u32_default(dev, "ti,sci-host-id", INVALID_ID); + tsp->ops = &tsp->sci->ops.proc_ops; + + return 0; +} + /** * k3_of_to_priv() - generate private data from device tree * @dev: corresponding k3 remote processor device @@ -158,8 +148,8 @@ static const struct dm_rproc_ops k3_rproc_ops = { * * Return: 0 if all goes good, else appropriate error message. */ -static int k3_rproc_of_to_priv(struct udevice *dev, - struct k3_rproc_privdata *rproc) +static int k3_arm64_of_to_priv(struct udevice *dev, + struct k3_arm64_privdata *rproc) { int ret; @@ -183,11 +173,9 @@ static int k3_rproc_of_to_priv(struct udevice *dev, return ret; } - rproc->sci = ti_sci_get_by_phandle(dev, "ti,sci"); - if (IS_ERR(rproc->sci)) { - dev_err(dev, "ti_sci get failed: %d\n", ret); - return PTR_ERR(rproc->sci); - } + ret = ti_sci_proc_of_to_priv(dev, &rproc->tsp); + if (ret) + return ret; rproc->gtc_base = dev_read_addr_ptr(dev); if (!rproc->gtc_base) { @@ -195,30 +183,25 @@ static int k3_rproc_of_to_priv(struct udevice *dev, return -ENODEV; } - rproc->proc_id = dev_read_u32_default(dev, "ti,sci-proc-id", - INVALID_ID); - rproc->host_id = dev_read_u32_default(dev, "ti,sci-host-id", - INVALID_ID); - return 0; } /** - * k3_rproc_probe() - Basic probe + * k3_arm64_probe() - Basic probe * @dev: corresponding k3 remote processor device * * Return: 0 if all goes good, else appropriate error message. */ -static int k3_rproc_probe(struct udevice *dev) +static int k3_arm64_probe(struct udevice *dev) { - struct k3_rproc_privdata *priv; + struct k3_arm64_privdata *priv; int ret; dev_dbg(dev, "%s\n", __func__); priv = dev_get_priv(dev); - ret = k3_rproc_of_to_priv(dev, priv); + ret = k3_arm64_of_to_priv(dev, priv); if (ret) { dev_dbg(dev, "%s: Probe failed with error %d\n", __func__, ret); return ret; @@ -229,16 +212,17 @@ static int k3_rproc_probe(struct udevice *dev) return 0; } -static const struct udevice_id k3_rproc_ids[] = { +static const struct udevice_id k3_arm64_ids[] = { + { .compatible = "ti,am654-arm64"}, { .compatible = "ti,am654-rproc"}, {} }; -U_BOOT_DRIVER(k3_rproc) = { - .name = "k3_rproc", - .of_match = k3_rproc_ids, +U_BOOT_DRIVER(k3_arm64) = { + .name = "k3_arm64", + .of_match = k3_arm64_ids, .id = UCLASS_REMOTEPROC, - .ops = &k3_rproc_ops, - .probe = k3_rproc_probe, - .priv_auto_alloc_size = sizeof(struct k3_rproc_privdata), + .ops = &k3_arm64_ops, + .probe = k3_arm64_probe, + .priv_auto_alloc_size = sizeof(struct k3_arm64_privdata), }; diff --git a/drivers/remoteproc/ti_sci_proc.h b/drivers/remoteproc/ti_sci_proc.h new file mode 100644 index 0000000000..ccfc39ec88 --- /dev/null +++ b/drivers/remoteproc/ti_sci_proc.h @@ -0,0 +1,121 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Texas Instruments TI-SCI Processor Controller Helper Functions + * + * Copyright (C) 2018-2019 Texas Instruments Incorporated - http://www.ti.com/ + * Lokesh Vutla <lokeshvutla@ti.com> + * Suman Anna <s-anna@ti.com> + */ + +#ifndef REMOTEPROC_TI_SCI_PROC_H +#define REMOTEPROC_TI_SCI_PROC_H + +#define TISCI_INVALID_HOST 0xff + +/** + * struct ti_sci_proc - structure representing a processor control client + * @sci: cached TI-SCI protocol handle + * @ops: cached TI-SCI proc ops + * @proc_id: processor id for the consumer remoteproc device + * @host_id: host id to pass the control over for this consumer remoteproc + * device + */ +struct ti_sci_proc { + const struct ti_sci_handle *sci; + const struct ti_sci_proc_ops *ops; + u8 proc_id; + u8 host_id; +}; + +static inline int ti_sci_proc_request(struct ti_sci_proc *tsp) +{ + int ret; + + debug("%s: proc_id = %d\n", __func__, tsp->proc_id); + + ret = tsp->ops->proc_request(tsp->sci, tsp->proc_id); + if (ret) + pr_err("ti-sci processor request failed: %d\n", ret); + return ret; +} + +static inline int ti_sci_proc_release(struct ti_sci_proc *tsp) +{ + int ret; + + debug("%s: proc_id = %d\n", __func__, tsp->proc_id); + + if (tsp->host_id != TISCI_INVALID_HOST) + ret = tsp->ops->proc_handover(tsp->sci, tsp->proc_id, + tsp->host_id); + else + ret = tsp->ops->proc_release(tsp->sci, tsp->proc_id); + + if (ret) + pr_err("ti-sci processor release failed: %d\n", ret); + return ret; +} + +static inline int ti_sci_proc_handover(struct ti_sci_proc *tsp) +{ + int ret; + + debug("%s: proc_id = %d\n", __func__, tsp->proc_id); + + ret = tsp->ops->proc_handover(tsp->sci, tsp->proc_id, tsp->host_id); + if (ret) + pr_err("ti-sci processor handover of %d to %d failed: %d\n", + tsp->proc_id, tsp->host_id, ret); + return ret; +} + +static inline int ti_sci_proc_get_status(struct ti_sci_proc *tsp, + u64 *boot_vector, u32 *cfg_flags, + u32 *ctrl_flags, u32 *status_flags) +{ + int ret; + + ret = tsp->ops->get_proc_boot_status(tsp->sci, tsp->proc_id, + boot_vector, cfg_flags, ctrl_flags, + status_flags); + if (ret) + pr_err("ti-sci processor get_status failed: %d\n", ret); + + debug("%s: proc_id = %d, boot_vector = 0x%llx, cfg_flags = 0x%x, ctrl_flags = 0x%x, sts = 0x%x\n", + __func__, tsp->proc_id, *boot_vector, *cfg_flags, *ctrl_flags, + *status_flags); + return ret; +} + +static inline int ti_sci_proc_set_config(struct ti_sci_proc *tsp, + u64 boot_vector, + u32 cfg_set, u32 cfg_clr) +{ + int ret; + + debug("%s: proc_id = %d, boot_vector = 0x%llx, cfg_set = 0x%x, cfg_clr = 0x%x\n", + __func__, tsp->proc_id, boot_vector, cfg_set, cfg_clr); + + ret = tsp->ops->set_proc_boot_cfg(tsp->sci, tsp->proc_id, boot_vector, + cfg_set, cfg_clr); + if (ret) + pr_err("ti-sci processor set_config failed: %d\n", ret); + return ret; +} + +static inline int ti_sci_proc_set_control(struct ti_sci_proc *tsp, + u32 ctrl_set, u32 ctrl_clr) +{ + int ret; + + debug("%s: proc_id = %d, ctrl_set = 0x%x, ctrl_clr = 0x%x\n", __func__, + tsp->proc_id, ctrl_set, ctrl_clr); + + ret = tsp->ops->set_proc_boot_ctrl(tsp->sci, tsp->proc_id, ctrl_set, + ctrl_clr); + if (ret) + pr_err("ti-sci processor set_control failed: %d\n", ret); + return ret; +} + +#endif /* REMOTEPROC_TI_SCI_PROC_H */ diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c index ee4cbcb02f..822a3fe265 100644 --- a/drivers/reset/reset-socfpga.c +++ b/drivers/reset/reset-socfpga.c @@ -14,6 +14,7 @@ #include <common.h> #include <dm.h> +#include <dm/lists.h> #include <dm/of_access.h> #include <reset-uclass.h> #include <linux/bitops.h> @@ -130,6 +131,23 @@ static int socfpga_reset_remove(struct udevice *dev) return 0; } +static int socfpga_reset_bind(struct udevice *dev) +{ + int ret; + struct udevice *sys_child; + + /* + * The sysreset driver does not have a device node, so bind it here. + * Bind it to the node, too, so that it can get its base address. + */ + ret = device_bind_driver_to_node(dev, "socfpga_sysreset", "sysreset", + dev->node, &sys_child); + if (ret) + debug("Warning: No sysreset driver: ret=%d\n", ret); + + return 0; +} + static const struct udevice_id socfpga_reset_match[] = { { .compatible = "altr,rst-mgr" }, { /* sentinel */ }, @@ -139,6 +157,7 @@ U_BOOT_DRIVER(socfpga_reset) = { .name = "socfpga-reset", .id = UCLASS_RESET, .of_match = socfpga_reset_match, + .bind = socfpga_reset_bind, .probe = socfpga_reset_probe, .priv_auto_alloc_size = sizeof(struct socfpga_reset_data), .ops = &socfpga_reset_ops, diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 532e94d337..860b73d369 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -86,6 +86,11 @@ config RTC_RX8010SJ help Support for Epson RX8010SJ Real Time Clock devices. +config RTC_RX8025 + bool "Enable RX8025 driver" + help + Support for Epson RX8025 Real Time Clock devices. + config RTC_PL031 bool "Enable ARM AMBA PL031 RTC driver" help @@ -120,4 +125,10 @@ config RTC_M41T62 Enable driver for ST's M41T62 compatible RTC devices (like RV-4162). It is a serial (I2C) real-time clock (RTC) with alarm. +config RTC_STM32 + bool "Enable STM32 RTC driver" + depends on DM_RTC + help + Enable STM32 RTC driver. This driver supports the rtc that is present + on some STM32 SoCs. endmenu diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index 915adb87fe..f97a669982 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -51,5 +51,6 @@ obj-$(CONFIG_RTC_RX8025) += rx8025.o obj-$(CONFIG_RTC_RX8010SJ) += rx8010sj.o obj-$(CONFIG_RTC_S3C24X0) += s3c24x0_rtc.o obj-$(CONFIG_RTC_S35392A) += s35392a.o +obj-$(CONFIG_RTC_STM32) += stm32_rtc.o obj-$(CONFIG_SANDBOX) += sandbox_rtc.o obj-$(CONFIG_RTC_X1205) += x1205.o diff --git a/drivers/rtc/rx8025.c b/drivers/rtc/rx8025.c index 7bd9f8b42a..e717dcbbfe 100644 --- a/drivers/rtc/rx8025.c +++ b/drivers/rtc/rx8025.c @@ -10,8 +10,9 @@ #include <common.h> #include <command.h> -#include <rtc.h> +#include <dm.h> #include <i2c.h> +#include <rtc.h> /*---------------------------------------------------------------------*/ #undef DEBUG_RTC @@ -27,6 +28,18 @@ # define CONFIG_SYS_I2C_RTC_ADDR 0x32 #endif +#ifdef CONFIG_DM_RTC +#define DEV_TYPE struct udevice +#else +/* Local udevice */ +struct ludevice { + u8 chip; +}; + +#define DEV_TYPE struct ludevice + +#endif + /* * RTC register addresses */ @@ -68,21 +81,35 @@ */ /* static uchar rtc_read (uchar reg); */ +#ifdef CONFIG_DM_RTC +/* + * on mpc85xx based board with DM and offset len 1 + * accessing rtc works fine. May we can drop this ? + */ +#define rtc_read(reg) buf[(reg) & 0xf] +#else #define rtc_read(reg) buf[((reg) + 1) & 0xf] +#endif -static void rtc_write (uchar reg, uchar val); +static int rtc_write(DEV_TYPE *dev, uchar reg, uchar val); /* * Get the current time from the RTC */ -int rtc_get (struct rtc_time *tmp) +static int rx8025_rtc_get(DEV_TYPE *dev, struct rtc_time *tmp) { int rel = 0; uchar sec, min, hour, mday, wday, mon, year, ctl2; uchar buf[16]; - if (i2c_read(CONFIG_SYS_I2C_RTC_ADDR, 0, 0, buf, 16)) +#ifdef CONFIG_DM_RTC + if (dm_i2c_read(dev, 0, buf, sizeof(buf))) { +#else + if (i2c_read(dev->chip, 0, 0, buf, 16)) { +#endif printf("Error reading from RTC\n"); + return -EIO; + } sec = rtc_read(RTC_SEC_REG_ADDR); min = rtc_read(RTC_MIN_REG_ADDR); @@ -92,9 +119,9 @@ int rtc_get (struct rtc_time *tmp) mon = rtc_read(RTC_MON_REG_ADDR); year = rtc_read(RTC_YR_REG_ADDR); - DEBUGR ("Get RTC year: %02x mon: %02x mday: %02x wday: %02x " - "hr: %02x min: %02x sec: %02x\n", - year, mon, mday, wday, hour, min, sec); + DEBUGR("Get RTC year: %02x mon: %02x mday: %02x wday: %02x " + "hr: %02x min: %02x sec: %02x\n", + year, mon, mday, wday, hour, min, sec); /* dump status */ ctl2 = rtc_read(RTC_CTL2_REG_ADDR); @@ -113,13 +140,14 @@ int rtc_get (struct rtc_time *tmp) rel = -1; } - tmp->tm_sec = bcd2bin (sec & 0x7F); - tmp->tm_min = bcd2bin (min & 0x7F); + tmp->tm_sec = bcd2bin(sec & 0x7F); + tmp->tm_min = bcd2bin(min & 0x7F); if (rtc_read(RTC_CTL1_REG_ADDR) & RTC_CTL1_BIT_2412) - tmp->tm_hour = bcd2bin (hour & 0x3F); + tmp->tm_hour = bcd2bin(hour & 0x3F); else - tmp->tm_hour = bcd2bin (hour & 0x1F) % 12 + + tmp->tm_hour = bcd2bin(hour & 0x1F) % 12 + ((hour & 0x20) ? 12 : 0); + tmp->tm_mday = bcd2bin (mday & 0x3F); tmp->tm_mon = bcd2bin (mon & 0x1F); tmp->tm_year = bcd2bin (year) + ( bcd2bin (year) >= 70 ? 1900 : 2000); @@ -127,9 +155,9 @@ int rtc_get (struct rtc_time *tmp) tmp->tm_yday = 0; tmp->tm_isdst= 0; - DEBUGR ("Get DATE: %4d-%02d-%02d (wday=%d) TIME: %2d:%02d:%02d\n", - tmp->tm_year, tmp->tm_mon, tmp->tm_mday, tmp->tm_wday, - tmp->tm_hour, tmp->tm_min, tmp->tm_sec); + DEBUGR("Get DATE: %4d-%02d-%02d (wday=%d) TIME: %2d:%02d:%02d\n", + tmp->tm_year, tmp->tm_mon, tmp->tm_mday, tmp->tm_wday, + tmp->tm_hour, tmp->tm_min, tmp->tm_sec); return rel; } @@ -137,54 +165,142 @@ int rtc_get (struct rtc_time *tmp) /* * Set the RTC */ -int rtc_set (struct rtc_time *tmp) +static int rx8025_rtc_set(DEV_TYPE *dev, const struct rtc_time *tmp) { - DEBUGR ("Set DATE: %4d-%02d-%02d (wday=%d) TIME: %2d:%02d:%02d\n", - tmp->tm_year, tmp->tm_mon, tmp->tm_mday, tmp->tm_wday, - tmp->tm_hour, tmp->tm_min, tmp->tm_sec); + DEBUGR("Set DATE: %4d-%02d-%02d (wday=%d) TIME: %2d:%02d:%02d\n", + tmp->tm_year, tmp->tm_mon, tmp->tm_mday, tmp->tm_wday, + tmp->tm_hour, tmp->tm_min, tmp->tm_sec); if (tmp->tm_year < 1970 || tmp->tm_year > 2069) printf("WARNING: year should be between 1970 and 2069!\n"); - rtc_write (RTC_YR_REG_ADDR, bin2bcd (tmp->tm_year % 100)); - rtc_write (RTC_MON_REG_ADDR, bin2bcd (tmp->tm_mon)); - rtc_write (RTC_DAY_REG_ADDR, bin2bcd (tmp->tm_wday)); - rtc_write (RTC_DATE_REG_ADDR, bin2bcd (tmp->tm_mday)); - rtc_write (RTC_HR_REG_ADDR, bin2bcd (tmp->tm_hour)); - rtc_write (RTC_MIN_REG_ADDR, bin2bcd (tmp->tm_min)); - rtc_write (RTC_SEC_REG_ADDR, bin2bcd (tmp->tm_sec)); + if (rtc_write(dev, RTC_YR_REG_ADDR, bin2bcd(tmp->tm_year % 100))) + return -EIO; - rtc_write (RTC_CTL1_REG_ADDR, RTC_CTL1_BIT_2412); + if (rtc_write(dev, RTC_MON_REG_ADDR, bin2bcd(tmp->tm_mon))) + return -EIO; - return 0; + if (rtc_write(dev, RTC_DAY_REG_ADDR, bin2bcd(tmp->tm_wday))) + return -EIO; + + if (rtc_write(dev, RTC_DATE_REG_ADDR, bin2bcd(tmp->tm_mday))) + return -EIO; + + if (rtc_write(dev, RTC_HR_REG_ADDR, bin2bcd(tmp->tm_hour))) + return -EIO; + + if (rtc_write(dev, RTC_MIN_REG_ADDR, bin2bcd(tmp->tm_min))) + return -EIO; + + if (rtc_write(dev, RTC_SEC_REG_ADDR, bin2bcd(tmp->tm_sec))) + return -EIO; + + return rtc_write(dev, RTC_CTL1_REG_ADDR, RTC_CTL1_BIT_2412); } /* * Reset the RTC */ -void rtc_reset (void) +static int rx8025_rtc_reset(DEV_TYPE *dev) { uchar buf[16]; uchar ctl2; - if (i2c_read(CONFIG_SYS_I2C_RTC_ADDR, 0, 0, buf, 16)) +#ifdef CONFIG_DM_RTC + if (dm_i2c_read(dev, 0, buf, sizeof(buf))) { +#else + if (i2c_read(dev->chip, 0, 0, buf, 16)) { +#endif printf("Error reading from RTC\n"); + return -EIO; + } ctl2 = rtc_read(RTC_CTL2_REG_ADDR); ctl2 &= ~(RTC_CTL2_BIT_PON | RTC_CTL2_BIT_VDET); ctl2 |= RTC_CTL2_BIT_XST | RTC_CTL2_BIT_VDSL; - rtc_write (RTC_CTL2_REG_ADDR, ctl2); + + return rtc_write(dev, RTC_CTL2_REG_ADDR, ctl2); } /* * Helper functions */ -static void rtc_write (uchar reg, uchar val) +static int rtc_write(DEV_TYPE *dev, uchar reg, uchar val) { uchar buf[2]; buf[0] = reg << 4; buf[1] = val; - if (i2c_write(CONFIG_SYS_I2C_RTC_ADDR, 0, 0, buf, 2) != 0) + +#ifdef CONFIG_DM_RTC + if (dm_i2c_write(dev, 0, buf, 2)) { +#else + if (i2c_write(dev->chip, 0, 0, buf, 2) != 0) { +#endif printf("Error writing to RTC\n"); + return -EIO; + } + return 0; } + +#ifdef CONFIG_DM_RTC +static int rx8025_probe(struct udevice *dev) +{ + uchar buf[16]; + int ret = 0; + + if (i2c_get_chip_offset_len(dev) != 1) + ret = i2c_set_chip_offset_len(dev, 1); + + if (ret) + return ret; + + return dm_i2c_read(dev, 0, buf, sizeof(buf)); +} + +static const struct rtc_ops rx8025_rtc_ops = { + .get = rx8025_rtc_get, + .set = rx8025_rtc_set, + .reset = rx8025_rtc_reset, +}; + +static const struct udevice_id rx8025_rtc_ids[] = { + { .compatible = "epson,rx8025" }, + { } +}; + +U_BOOT_DRIVER(rx8010sj_rtc) = { + .name = "rx8025_rtc", + .id = UCLASS_RTC, + .probe = rx8025_probe, + .of_match = rx8025_rtc_ids, + .ops = &rx8025_rtc_ops, +}; +#else +int rtc_get(struct rtc_time *tm) +{ + struct ludevice dev = { + .chip = CONFIG_SYS_I2C_RTC_ADDR, + }; + + return rx8025_rtc_get(&dev, tm); +} + +int rtc_set(struct rtc_time *tm) +{ + struct ludevice dev = { + .chip = CONFIG_SYS_I2C_RTC_ADDR, + }; + + return rx8025_rtc_set(&dev, tm); +} + +void rtc_reset(void) +{ + struct ludevice dev = { + .chip = CONFIG_SYS_I2C_RTC_ADDR, + }; + + rx8025_rtc_reset(&dev); +} +#endif diff --git a/drivers/rtc/stm32_rtc.c b/drivers/rtc/stm32_rtc.c new file mode 100644 index 0000000000..abd339076a --- /dev/null +++ b/drivers/rtc/stm32_rtc.c @@ -0,0 +1,323 @@ +// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause +/* + * Copyright (C) 2019, STMicroelectronics - All Rights Reserved + */ +#include <common.h> +#include <clk.h> +#include <dm.h> +#include <rtc.h> +#include <asm/io.h> +#include <linux/iopoll.h> + +#define STM32_RTC_TR 0x00 +#define STM32_RTC_DR 0x04 +#define STM32_RTC_ISR 0x0C +#define STM32_RTC_PRER 0x10 +#define STM32_RTC_CR 0x18 +#define STM32_RTC_WPR 0x24 + +/* STM32_RTC_TR bit fields */ +#define STM32_RTC_SEC_SHIFT 0 +#define STM32_RTC_SEC GENMASK(6, 0) +#define STM32_RTC_MIN_SHIFT 8 +#define STM32_RTC_MIN GENMASK(14, 8) +#define STM32_RTC_HOUR_SHIFT 16 +#define STM32_RTC_HOUR GENMASK(21, 16) + +/* STM32_RTC_DR bit fields */ +#define STM32_RTC_DATE_SHIFT 0 +#define STM32_RTC_DATE GENMASK(5, 0) +#define STM32_RTC_MONTH_SHIFT 8 +#define STM32_RTC_MONTH GENMASK(12, 8) +#define STM32_RTC_WDAY_SHIFT 13 +#define STM32_RTC_WDAY GENMASK(15, 13) +#define STM32_RTC_YEAR_SHIFT 16 +#define STM32_RTC_YEAR GENMASK(23, 16) + +/* STM32_RTC_CR bit fields */ +#define STM32_RTC_CR_FMT BIT(6) + +/* STM32_RTC_ISR/STM32_RTC_ICSR bit fields */ +#define STM32_RTC_ISR_INITS BIT(4) +#define STM32_RTC_ISR_RSF BIT(5) +#define STM32_RTC_ISR_INITF BIT(6) +#define STM32_RTC_ISR_INIT BIT(7) + +/* STM32_RTC_PRER bit fields */ +#define STM32_RTC_PRER_PRED_S_SHIFT 0 +#define STM32_RTC_PRER_PRED_S GENMASK(14, 0) +#define STM32_RTC_PRER_PRED_A_SHIFT 16 +#define STM32_RTC_PRER_PRED_A GENMASK(22, 16) + +/* STM32_RTC_WPR key constants */ +#define RTC_WPR_1ST_KEY 0xCA +#define RTC_WPR_2ND_KEY 0x53 +#define RTC_WPR_WRONG_KEY 0xFF + +struct stm32_rtc_priv { + fdt_addr_t base; +}; + +static int stm32_rtc_get(struct udevice *dev, struct rtc_time *tm) +{ + struct stm32_rtc_priv *priv = dev_get_priv(dev); + u32 tr, dr; + + tr = readl(priv->base + STM32_RTC_TR); + dr = readl(priv->base + STM32_RTC_DR); + + tm->tm_sec = bcd2bin((tr & STM32_RTC_SEC) >> STM32_RTC_SEC_SHIFT); + tm->tm_min = bcd2bin((tr & STM32_RTC_MIN) >> STM32_RTC_MIN_SHIFT); + tm->tm_hour = bcd2bin((tr & STM32_RTC_HOUR) >> STM32_RTC_HOUR_SHIFT); + + tm->tm_mday = bcd2bin((dr & STM32_RTC_DATE) >> STM32_RTC_DATE_SHIFT); + tm->tm_mon = bcd2bin((dr & STM32_RTC_MONTH) >> STM32_RTC_MONTH_SHIFT); + tm->tm_year = bcd2bin((dr & STM32_RTC_YEAR) >> STM32_RTC_YEAR_SHIFT); + tm->tm_wday = bcd2bin((dr & STM32_RTC_WDAY) >> STM32_RTC_WDAY_SHIFT); + tm->tm_yday = 0; + tm->tm_isdst = 0; + + dev_dbg(dev, "Get DATE: %4d-%02d-%02d (wday=%d) TIME: %2d:%02d:%02d\n", + tm->tm_year, tm->tm_mon, tm->tm_mday, tm->tm_wday, + tm->tm_hour, tm->tm_min, tm->tm_sec); + + return 0; +} + +static void stm32_rtc_unlock(struct udevice *dev) +{ + struct stm32_rtc_priv *priv = dev_get_priv(dev); + + writel(RTC_WPR_1ST_KEY, priv->base + STM32_RTC_WPR); + writel(RTC_WPR_2ND_KEY, priv->base + STM32_RTC_WPR); +} + +static void stm32_rtc_lock(struct udevice *dev) +{ + struct stm32_rtc_priv *priv = dev_get_priv(dev); + + writel(RTC_WPR_WRONG_KEY, priv->base + STM32_RTC_WPR); +} + +static int stm32_rtc_enter_init_mode(struct udevice *dev) +{ + struct stm32_rtc_priv *priv = dev_get_priv(dev); + u32 isr = readl(priv->base + STM32_RTC_ISR); + + if (!(isr & STM32_RTC_ISR_INITF)) { + isr |= STM32_RTC_ISR_INIT; + writel(isr, priv->base + STM32_RTC_ISR); + + return readl_poll_timeout(priv->base + STM32_RTC_ISR, + isr, + (isr & STM32_RTC_ISR_INITF), + 100000); + } + + return 0; +} + +static int stm32_rtc_wait_sync(struct udevice *dev) +{ + struct stm32_rtc_priv *priv = dev_get_priv(dev); + u32 isr = readl(priv->base + STM32_RTC_ISR); + + isr &= ~STM32_RTC_ISR_RSF; + writel(isr, priv->base + STM32_RTC_ISR); + + /* + * Wait for RSF to be set to ensure the calendar registers are + * synchronised, it takes around 2 rtc_ck clock cycles + */ + return readl_poll_timeout(priv->base + STM32_RTC_ISR, + isr, (isr & STM32_RTC_ISR_RSF), + 100000); +} + +static void stm32_rtc_exit_init_mode(struct udevice *dev) +{ + struct stm32_rtc_priv *priv = dev_get_priv(dev); + u32 isr = readl(priv->base + STM32_RTC_ISR); + + isr &= ~STM32_RTC_ISR_INIT; + writel(isr, priv->base + STM32_RTC_ISR); +} + +static int stm32_rtc_set_time(struct udevice *dev, u32 time, u32 date) +{ + struct stm32_rtc_priv *priv = dev_get_priv(dev); + int ret; + + stm32_rtc_unlock(dev); + + ret = stm32_rtc_enter_init_mode(dev); + if (ret) + goto lock; + + writel(time, priv->base + STM32_RTC_TR); + writel(date, priv->base + STM32_RTC_DR); + + stm32_rtc_exit_init_mode(dev); + + ret = stm32_rtc_wait_sync(dev); + +lock: + stm32_rtc_lock(dev); + return ret; +} + +static int stm32_rtc_set(struct udevice *dev, const struct rtc_time *tm) +{ + u32 t, d; + + dev_dbg(dev, "Set DATE: %4d-%02d-%02d (wday=%d) TIME: %2d:%02d:%02d\n", + tm->tm_year, tm->tm_mon, tm->tm_mday, tm->tm_wday, + tm->tm_hour, tm->tm_min, tm->tm_sec); + + /* Time in BCD format */ + t = (bin2bcd(tm->tm_sec) << STM32_RTC_SEC_SHIFT) & STM32_RTC_SEC; + t |= (bin2bcd(tm->tm_min) << STM32_RTC_MIN_SHIFT) & STM32_RTC_MIN; + t |= (bin2bcd(tm->tm_hour) << STM32_RTC_HOUR_SHIFT) & STM32_RTC_HOUR; + + /* Date in BCD format */ + d = (bin2bcd(tm->tm_mday) << STM32_RTC_DATE_SHIFT) & STM32_RTC_DATE; + d |= (bin2bcd(tm->tm_mon) << STM32_RTC_MONTH_SHIFT) & STM32_RTC_MONTH; + d |= (bin2bcd(tm->tm_year) << STM32_RTC_YEAR_SHIFT) & STM32_RTC_YEAR; + d |= (bin2bcd(tm->tm_wday) << STM32_RTC_WDAY_SHIFT) & STM32_RTC_WDAY; + + return stm32_rtc_set_time(dev, t, d); +} + +static int stm32_rtc_reset(struct udevice *dev) +{ + dev_dbg(dev, "Reset DATE\n"); + + return stm32_rtc_set_time(dev, 0, 0); +} + +static int stm32_rtc_init(struct udevice *dev) +{ + struct stm32_rtc_priv *priv = dev_get_priv(dev); + unsigned int prer, pred_a, pred_s, pred_a_max, pred_s_max, cr; + unsigned int rate; + struct clk clk; + int ret; + u32 isr = readl(priv->base + STM32_RTC_ISR); + + if (isr & STM32_RTC_ISR_INITS) + return 0; + + ret = clk_get_by_index(dev, 1, &clk); + if (ret) + return ret; + + ret = clk_enable(&clk); + if (ret) { + clk_free(&clk); + return ret; + } + + rate = clk_get_rate(&clk); + + /* Find prediv_a and prediv_s to obtain the 1Hz calendar clock */ + pred_a_max = STM32_RTC_PRER_PRED_A >> STM32_RTC_PRER_PRED_A_SHIFT; + pred_s_max = STM32_RTC_PRER_PRED_S >> STM32_RTC_PRER_PRED_S_SHIFT; + + for (pred_a = pred_a_max; pred_a + 1 > 0; pred_a--) { + pred_s = (rate / (pred_a + 1)) - 1; + + if (((pred_s + 1) * (pred_a + 1)) == rate) + break; + } + + /* + * Can't find a 1Hz, so give priority to RTC power consumption + * by choosing the higher possible value for prediv_a + */ + if (pred_s > pred_s_max || pred_a > pred_a_max) { + pred_a = pred_a_max; + pred_s = (rate / (pred_a + 1)) - 1; + } + + stm32_rtc_unlock(dev); + + ret = stm32_rtc_enter_init_mode(dev); + if (ret) { + dev_err(dev, + "Can't enter in init mode. Prescaler config failed.\n"); + goto unlock; + } + + prer = (pred_s << STM32_RTC_PRER_PRED_S_SHIFT) & STM32_RTC_PRER_PRED_S; + prer |= (pred_a << STM32_RTC_PRER_PRED_A_SHIFT) & STM32_RTC_PRER_PRED_A; + writel(prer, priv->base + STM32_RTC_PRER); + + /* Force 24h time format */ + cr = readl(priv->base + STM32_RTC_CR); + cr &= ~STM32_RTC_CR_FMT; + writel(cr, priv->base + STM32_RTC_CR); + + stm32_rtc_exit_init_mode(dev); + + ret = stm32_rtc_wait_sync(dev); + +unlock: + stm32_rtc_lock(dev); + + if (ret) { + clk_disable(&clk); + clk_free(&clk); + } + + return ret; +} + +static int stm32_rtc_probe(struct udevice *dev) +{ + struct stm32_rtc_priv *priv = dev_get_priv(dev); + struct clk clk; + int ret; + + priv->base = dev_read_addr(dev); + if (priv->base == FDT_ADDR_T_NONE) + return -EINVAL; + + ret = clk_get_by_index(dev, 0, &clk); + if (ret) + return ret; + + ret = clk_enable(&clk); + if (ret) { + clk_free(&clk); + return ret; + } + + ret = stm32_rtc_init(dev); + + if (ret) { + clk_disable(&clk); + clk_free(&clk); + } + + return ret; +} + +static const struct rtc_ops stm32_rtc_ops = { + .get = stm32_rtc_get, + .set = stm32_rtc_set, + .reset = stm32_rtc_reset, +}; + +static const struct udevice_id stm32_rtc_ids[] = { + { .compatible = "st,stm32mp1-rtc" }, + { } +}; + +U_BOOT_DRIVER(rtc_stm32) = { + .name = "rtc-stm32", + .id = UCLASS_RTC, + .probe = stm32_rtc_probe, + .of_match = stm32_rtc_ids, + .ops = &stm32_rtc_ops, + .priv_auto_alloc_size = sizeof(struct stm32_rtc_priv), +}; diff --git a/drivers/serial/serial_mxc.c b/drivers/serial/serial_mxc.c index a435e68005..42abb96a26 100644 --- a/drivers/serial/serial_mxc.c +++ b/drivers/serial/serial_mxc.c @@ -342,6 +342,8 @@ static int mxc_serial_ofdata_to_platdata(struct udevice *dev) } static const struct udevice_id mxc_serial_ids[] = { + { .compatible = "fsl,imx21-uart" }, + { .compatible = "fsl,imx53-uart" }, { .compatible = "fsl,imx6sx-uart" }, { .compatible = "fsl,imx6ul-uart" }, { .compatible = "fsl,imx7d-uart" }, @@ -360,9 +362,7 @@ U_BOOT_DRIVER(serial_mxc) = { #endif .probe = mxc_serial_probe, .ops = &mxc_serial_ops, -#if !CONFIG_IS_ENABLED(OF_CONTROL) .flags = DM_FLAG_PRE_RELOC, -#endif }; #endif diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index cc174dd036..f459c0a411 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -158,13 +158,14 @@ config MT7621_SPI the SPI NOR flash on platforms embedding this Ralink / MediaTek SPI core, like MT7621/7628/7688. -config MTK_QSPI - bool "Mediatek QSPI driver" - imply SPI_FLASH_BAR +config MTK_SNFI_SPI + bool "Mediatek SPI memory controller driver" + depends on SPI_MEM help - Enable the Mediatek QSPI driver. This driver can be - used to access the SPI NOR flash on platforms embedding this - Mediatek QSPI IP core. + Enable the Mediatek SPI memory controller driver. This driver is + originally based on the MediaTek SNFI IP core. It can only be + used to access SPI memory devices like SPI-NOR or SPI-NAND on + platforms embedding this IP core, like MT7622/M7629. config MVEBU_A3700_SPI bool "Marvell Armada 3700 SPI driver" @@ -232,6 +233,14 @@ config SANDBOX_SPI }; }; +config SPI_SIFIVE + bool "SiFive SPI driver" + help + This driver supports the SiFive SPI IP. If unsure say N. + Enable the SiFive SPI controller driver. + + The SiFive SPI controller driver is found on various SiFive SoCs. + config SPI_SUNXI bool "Allwinner SoC SPI controllers" help diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index ab84122f08..ae4f2958f8 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -37,7 +37,7 @@ obj-$(CONFIG_LPC32XX_SSP) += lpc32xx_ssp.o obj-$(CONFIG_MESON_SPIFC) += meson_spifc.o obj-$(CONFIG_MPC8XX_SPI) += mpc8xx_spi.o obj-$(CONFIG_MPC8XXX_SPI) += mpc8xxx_spi.o -obj-$(CONFIG_MTK_QSPI) += mtk_qspi.o +obj-$(CONFIG_MTK_SNFI_SPI) += mtk_snfi_spi.o obj-$(CONFIG_MT7621_SPI) += mt7621_spi.o obj-$(CONFIG_MSCC_BB_SPI) += mscc_bb_spi.o obj-$(CONFIG_MVEBU_A3700_SPI) += mvebu_a3700_spi.o @@ -50,6 +50,7 @@ obj-$(CONFIG_PL022_SPI) += pl022_spi.o obj-$(CONFIG_RENESAS_RPC_SPI) += renesas_rpc_spi.o obj-$(CONFIG_ROCKCHIP_SPI) += rk_spi.o obj-$(CONFIG_SANDBOX_SPI) += sandbox_spi.o +obj-$(CONFIG_SPI_SIFIVE) += spi-sifive.o obj-$(CONFIG_SPI_SUNXI) += spi-sunxi.o obj-$(CONFIG_SH_SPI) += sh_spi.o obj-$(CONFIG_SH_QSPI) += sh_qspi.o diff --git a/drivers/spi/fsl_qspi.c b/drivers/spi/fsl_qspi.c index 1598c4f698..41abe1996f 100644 --- a/drivers/spi/fsl_qspi.c +++ b/drivers/spi/fsl_qspi.c @@ -10,6 +10,7 @@ #include <spi.h> #include <asm/io.h> #include <linux/sizes.h> +#include <linux/iopoll.h> #include <dm.h> #include <errno.h> #include <watchdog.h> @@ -150,20 +151,13 @@ static void qspi_write32(u32 flags, u32 *addr, u32 val) static inline int is_controller_busy(const struct fsl_qspi_priv *priv) { u32 val; - const u32 mask = QSPI_SR_BUSY_MASK | QSPI_SR_AHB_ACC_MASK | - QSPI_SR_IP_ACC_MASK; - unsigned int retry = 5; + u32 mask = QSPI_SR_BUSY_MASK | QSPI_SR_AHB_ACC_MASK | + QSPI_SR_IP_ACC_MASK; - do { - val = qspi_read32(priv->flags, &priv->regs->sr); + if (priv->flags & QSPI_FLAG_REGMAP_ENDIAN_BIG) + mask = (u32)cpu_to_be32(mask); - if ((~val & mask) == mask) - return 0; - - udelay(1); - } while (--retry); - - return -ETIMEDOUT; + return readl_poll_timeout(&priv->regs->sr, val, !(val & mask), 1000); } /* QSPI support swapping the flash read/write data diff --git a/drivers/spi/mtk_qspi.c b/drivers/spi/mtk_qspi.c deleted file mode 100644 index b510733e92..0000000000 --- a/drivers/spi/mtk_qspi.c +++ /dev/null @@ -1,359 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (c) 2018 MediaTek, Inc. - * Author : Guochun.Mao@mediatek.com - */ - -#include <common.h> -#include <dm.h> -#include <malloc.h> -#include <spi.h> -#include <asm/io.h> -#include <linux/iopoll.h> -#include <linux/ioport.h> - -/* Register Offset */ -struct mtk_qspi_regs { - u32 cmd; - u32 cnt; - u32 rdsr; - u32 rdata; - u32 radr[3]; - u32 wdata; - u32 prgdata[6]; - u32 shreg[10]; - u32 cfg[2]; - u32 shreg10; - u32 mode_mon; - u32 status[4]; - u32 flash_time; - u32 flash_cfg; - u32 reserved_0[3]; - u32 sf_time; - u32 pp_dw_data; - u32 reserved_1; - u32 delsel_0[2]; - u32 intrstus; - u32 intren; - u32 reserved_2; - u32 cfg3; - u32 reserved_3; - u32 chksum; - u32 aaicmd; - u32 wrprot; - u32 radr3; - u32 dual; - u32 delsel_1[3]; -}; - -struct mtk_qspi_platdata { - fdt_addr_t reg_base; - fdt_addr_t mem_base; -}; - -struct mtk_qspi_priv { - struct mtk_qspi_regs *regs; - unsigned long *mem_base; - u8 op; - u8 tx[3]; /* only record max 3 bytes paras, when it's address. */ - u32 txlen; /* dout buffer length - op code length */ - u8 *rx; - u32 rxlen; -}; - -#define MTK_QSPI_CMD_POLLINGREG_US 500000 -#define MTK_QSPI_WRBUF_SIZE 256 -#define MTK_QSPI_COMMAND_ENABLE 0x30 - -/* NOR flash controller commands */ -#define MTK_QSPI_RD_TRIGGER BIT(0) -#define MTK_QSPI_READSTATUS BIT(1) -#define MTK_QSPI_PRG_CMD BIT(2) -#define MTK_QSPI_WR_TRIGGER BIT(4) -#define MTK_QSPI_WRITESTATUS BIT(5) -#define MTK_QSPI_AUTOINC BIT(7) - -#define MTK_QSPI_MAX_RX_TX_SHIFT 0x6 -#define MTK_QSPI_MAX_SHIFT 0x8 - -#define MTK_QSPI_WR_BUF_ENABLE 0x1 -#define MTK_QSPI_WR_BUF_DISABLE 0x0 - -static int mtk_qspi_execute_cmd(struct mtk_qspi_priv *priv, u8 cmd) -{ - u8 tmp; - u8 val = cmd & ~MTK_QSPI_AUTOINC; - - writeb(cmd, &priv->regs->cmd); - - return readb_poll_timeout(&priv->regs->cmd, tmp, !(val & tmp), - MTK_QSPI_CMD_POLLINGREG_US); -} - -static int mtk_qspi_tx_rx(struct mtk_qspi_priv *priv) -{ - int len = 1 + priv->txlen + priv->rxlen; - int i, ret, idx; - - if (len > MTK_QSPI_MAX_SHIFT) - return -ERR_INVAL; - - writeb(len * 8, &priv->regs->cnt); - - /* start at PRGDATA5, go down to PRGDATA0 */ - idx = MTK_QSPI_MAX_RX_TX_SHIFT - 1; - - /* opcode */ - writeb(priv->op, &priv->regs->prgdata[idx]); - idx--; - - /* program TX data */ - for (i = 0; i < priv->txlen; i++, idx--) - writeb(priv->tx[i], &priv->regs->prgdata[idx]); - - /* clear out rest of TX registers */ - while (idx >= 0) { - writeb(0, &priv->regs->prgdata[idx]); - idx--; - } - - ret = mtk_qspi_execute_cmd(priv, MTK_QSPI_PRG_CMD); - if (ret) - return ret; - - /* restart at first RX byte */ - idx = priv->rxlen - 1; - - /* read out RX data */ - for (i = 0; i < priv->rxlen; i++, idx--) - priv->rx[i] = readb(&priv->regs->shreg[idx]); - - return 0; -} - -static int mtk_qspi_read(struct mtk_qspi_priv *priv, - u32 addr, u8 *buf, u32 len) -{ - memcpy(buf, (u8 *)priv->mem_base + addr, len); - return 0; -} - -static void mtk_qspi_set_addr(struct mtk_qspi_priv *priv, u32 addr) -{ - int i; - - for (i = 0; i < 3; i++) { - writeb(addr & 0xff, &priv->regs->radr[i]); - addr >>= 8; - } -} - -static int mtk_qspi_write_single_byte(struct mtk_qspi_priv *priv, - u32 addr, u32 length, const u8 *data) -{ - int i, ret; - - mtk_qspi_set_addr(priv, addr); - - for (i = 0; i < length; i++) { - writeb(*data++, &priv->regs->wdata); - ret = mtk_qspi_execute_cmd(priv, MTK_QSPI_WR_TRIGGER); - if (ret < 0) - return ret; - } - return 0; -} - -static int mtk_qspi_write_buffer(struct mtk_qspi_priv *priv, u32 addr, - const u8 *buf) -{ - int i, data; - - mtk_qspi_set_addr(priv, addr); - - for (i = 0; i < MTK_QSPI_WRBUF_SIZE; i += 4) { - data = buf[i + 3] << 24 | buf[i + 2] << 16 | - buf[i + 1] << 8 | buf[i]; - writel(data, &priv->regs->pp_dw_data); - } - - return mtk_qspi_execute_cmd(priv, MTK_QSPI_WR_TRIGGER); -} - -static int mtk_qspi_write(struct mtk_qspi_priv *priv, - u32 addr, const u8 *buf, u32 len) -{ - int ret; - - /* setting pre-fetch buffer for page program */ - writel(MTK_QSPI_WR_BUF_ENABLE, &priv->regs->cfg[1]); - while (len >= MTK_QSPI_WRBUF_SIZE) { - ret = mtk_qspi_write_buffer(priv, addr, buf); - if (ret < 0) - return ret; - - len -= MTK_QSPI_WRBUF_SIZE; - addr += MTK_QSPI_WRBUF_SIZE; - buf += MTK_QSPI_WRBUF_SIZE; - } - /* disable pre-fetch buffer for page program */ - writel(MTK_QSPI_WR_BUF_DISABLE, &priv->regs->cfg[1]); - - if (len) - return mtk_qspi_write_single_byte(priv, addr, len, buf); - - return 0; -} - -static int mtk_qspi_claim_bus(struct udevice *dev) -{ - /* nothing to do */ - return 0; -} - -static int mtk_qspi_release_bus(struct udevice *dev) -{ - /* nothing to do */ - return 0; -} - -static int mtk_qspi_transfer(struct mtk_qspi_priv *priv, unsigned int bitlen, - const void *dout, void *din, unsigned long flags) -{ - u32 bytes = DIV_ROUND_UP(bitlen, 8); - u32 addr; - - if (!bytes) - return -ERR_INVAL; - - if (dout) { - if (flags & SPI_XFER_BEGIN) { - /* parse op code and potential paras first */ - priv->op = *(u8 *)dout; - if (bytes > 1) - memcpy(priv->tx, (u8 *)dout + 1, - bytes <= 4 ? bytes - 1 : 3); - priv->txlen = bytes - 1; - } - - if (flags == SPI_XFER_ONCE) { - /* operations without receiving or sending data. - * for example: erase, write flash register or write - * enable... - */ - priv->rx = NULL; - priv->rxlen = 0; - return mtk_qspi_tx_rx(priv); - } - - if (flags & SPI_XFER_END) { - /* here, dout should be data to be written. - * and priv->tx should be filled 3Bytes address. - */ - addr = priv->tx[0] << 16 | priv->tx[1] << 8 | - priv->tx[2]; - return mtk_qspi_write(priv, addr, (u8 *)dout, bytes); - } - } - - if (din) { - if (priv->txlen >= 3) { - /* if run to here, priv->tx[] should be the address - * where read data from, - * and, din is the buf to receive data. - */ - addr = priv->tx[0] << 16 | priv->tx[1] << 8 | - priv->tx[2]; - return mtk_qspi_read(priv, addr, (u8 *)din, bytes); - } - - /* should be reading flash's register */ - priv->rx = (u8 *)din; - priv->rxlen = bytes; - return mtk_qspi_tx_rx(priv); - } - - return 0; -} - -static int mtk_qspi_xfer(struct udevice *dev, unsigned int bitlen, - const void *dout, void *din, unsigned long flags) -{ - struct udevice *bus = dev->parent; - struct mtk_qspi_priv *priv = dev_get_priv(bus); - - return mtk_qspi_transfer(priv, bitlen, dout, din, flags); -} - -static int mtk_qspi_set_speed(struct udevice *bus, uint speed) -{ - /* nothing to do */ - return 0; -} - -static int mtk_qspi_set_mode(struct udevice *bus, uint mode) -{ - /* nothing to do */ - return 0; -} - -static int mtk_qspi_ofdata_to_platdata(struct udevice *bus) -{ - struct resource res_reg, res_mem; - struct mtk_qspi_platdata *plat = bus->platdata; - int ret; - - ret = dev_read_resource_byname(bus, "reg_base", &res_reg); - if (ret) { - debug("can't get reg_base resource(ret = %d)\n", ret); - return -ENOMEM; - } - - ret = dev_read_resource_byname(bus, "mem_base", &res_mem); - if (ret) { - debug("can't get map_base resource(ret = %d)\n", ret); - return -ENOMEM; - } - - plat->mem_base = res_mem.start; - plat->reg_base = res_reg.start; - - return 0; -} - -static int mtk_qspi_probe(struct udevice *bus) -{ - struct mtk_qspi_platdata *plat = dev_get_platdata(bus); - struct mtk_qspi_priv *priv = dev_get_priv(bus); - - priv->regs = (struct mtk_qspi_regs *)plat->reg_base; - priv->mem_base = (unsigned long *)plat->mem_base; - - writel(MTK_QSPI_COMMAND_ENABLE, &priv->regs->wrprot); - - return 0; -} - -static const struct dm_spi_ops mtk_qspi_ops = { - .claim_bus = mtk_qspi_claim_bus, - .release_bus = mtk_qspi_release_bus, - .xfer = mtk_qspi_xfer, - .set_speed = mtk_qspi_set_speed, - .set_mode = mtk_qspi_set_mode, -}; - -static const struct udevice_id mtk_qspi_ids[] = { - { .compatible = "mediatek,mt7629-qspi" }, - { } -}; - -U_BOOT_DRIVER(mtk_qspi) = { - .name = "mtk_qspi", - .id = UCLASS_SPI, - .of_match = mtk_qspi_ids, - .ops = &mtk_qspi_ops, - .ofdata_to_platdata = mtk_qspi_ofdata_to_platdata, - .platdata_auto_alloc_size = sizeof(struct mtk_qspi_platdata), - .priv_auto_alloc_size = sizeof(struct mtk_qspi_priv), - .probe = mtk_qspi_probe, -}; diff --git a/drivers/spi/mtk_snfi_spi.c b/drivers/spi/mtk_snfi_spi.c new file mode 100644 index 0000000000..2a89476515 --- /dev/null +++ b/drivers/spi/mtk_snfi_spi.c @@ -0,0 +1,318 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2019 MediaTek Inc. All Rights Reserved. + * + * Author: Weijie Gao <weijie.gao@mediatek.com> + */ + +#include <common.h> +#include <clk.h> +#include <dm.h> +#include <errno.h> +#include <spi.h> +#include <spi-mem.h> +#include <stdbool.h> +#include <watchdog.h> +#include <dm/pinctrl.h> +#include <linux/bitops.h> +#include <linux/io.h> +#include <linux/iopoll.h> + +#define SNFI_MAC_CTL 0x500 +#define MAC_XIO_SEL BIT(4) +#define SF_MAC_EN BIT(3) +#define SF_TRIG BIT(2) +#define WIP_READY BIT(1) +#define WIP BIT(0) + +#define SNFI_MAC_OUTL 0x504 +#define SNFI_MAC_INL 0x508 + +#define SNFI_MISC_CTL 0x538 +#define SW_RST BIT(28) +#define FIFO_RD_LTC_SHIFT 25 +#define FIFO_RD_LTC GENMASK(26, 25) +#define LATCH_LAT_SHIFT 8 +#define LATCH_LAT GENMASK(9, 8) +#define CS_DESELECT_CYC_SHIFT 0 +#define CS_DESELECT_CYC GENMASK(4, 0) + +#define SNF_STA_CTL1 0x550 +#define SPI_STATE GENMASK(3, 0) + +#define SNFI_GPRAM_OFFSET 0x800 +#define SNFI_GPRAM_SIZE 0x80 + +#define SNFI_POLL_INTERVAL 500000 +#define SNFI_RST_POLL_INTERVAL 1000000 + +struct mtk_snfi_priv { + void __iomem *base; + + struct clk nfi_clk; + struct clk pad_clk; +}; + +static int mtk_snfi_adjust_op_size(struct spi_slave *slave, + struct spi_mem_op *op) +{ + u32 nbytes; + + /* + * When there is input data, it will be appended after the output + * data in the GPRAM. So the total size of either pure output data + * or the output+input data must not exceed the GPRAM size. + */ + + nbytes = sizeof(op->cmd.opcode) + op->addr.nbytes + + op->dummy.nbytes; + + if (nbytes + op->data.nbytes <= SNFI_GPRAM_SIZE) + return 0; + + if (nbytes >= SNFI_GPRAM_SIZE) + return -ENOTSUPP; + + op->data.nbytes = SNFI_GPRAM_SIZE - nbytes; + + return 0; +} + +static bool mtk_snfi_supports_op(struct spi_slave *slave, + const struct spi_mem_op *op) +{ + if (op->cmd.buswidth > 1 || op->addr.buswidth > 1 || + op->dummy.buswidth > 1 || op->data.buswidth > 1) + return false; + + return true; +} + +static int mtk_snfi_mac_trigger(struct mtk_snfi_priv *priv, + struct udevice *bus, u32 outlen, u32 inlen) +{ + int ret; + u32 val; + +#ifdef CONFIG_PINCTRL + pinctrl_select_state(bus, "snfi"); +#endif + + writel(SF_MAC_EN, priv->base + SNFI_MAC_CTL); + writel(outlen, priv->base + SNFI_MAC_OUTL); + writel(inlen, priv->base + SNFI_MAC_INL); + + writel(SF_MAC_EN | SF_TRIG, priv->base + SNFI_MAC_CTL); + + ret = readl_poll_timeout(priv->base + SNFI_MAC_CTL, val, + val & WIP_READY, SNFI_POLL_INTERVAL); + if (ret) { + printf("%s: timed out waiting for WIP_READY\n", __func__); + goto cleanup; + } + + ret = readl_poll_timeout(priv->base + SNFI_MAC_CTL, val, + !(val & WIP), SNFI_POLL_INTERVAL); + if (ret) + printf("%s: timed out waiting for WIP cleared\n", __func__); + + writel(0, priv->base + SNFI_MAC_CTL); + +cleanup: +#ifdef CONFIG_PINCTRL + pinctrl_select_state(bus, "default"); +#endif + + return ret; +} + +static int mtk_snfi_mac_reset(struct mtk_snfi_priv *priv) +{ + int ret; + u32 val; + + setbits_32(priv->base + SNFI_MISC_CTL, SW_RST); + + ret = readl_poll_timeout(priv->base + SNF_STA_CTL1, val, + !(val & SPI_STATE), SNFI_POLL_INTERVAL); + if (ret) + printf("%s: failed to reset snfi mac\n", __func__); + + writel((2 << FIFO_RD_LTC_SHIFT) | + (10 << CS_DESELECT_CYC_SHIFT), + priv->base + SNFI_MISC_CTL); + + return ret; +} + +static void mtk_snfi_copy_to_gpram(struct mtk_snfi_priv *priv, + const void *data, size_t len) +{ + void __iomem *gpram = priv->base + SNFI_GPRAM_OFFSET; + size_t i, n = (len + sizeof(u32) - 1) / sizeof(u32); + const u32 *buff = data; + + /* + * The output data will always be copied to the beginning of + * the GPRAM. Uses word write for better performace. + * + * Trailing bytes in the last word are not cared. + */ + + for (i = 0; i < n; i++) + writel(buff[i], gpram + i * sizeof(u32)); +} + +static void mtk_snfi_copy_from_gpram(struct mtk_snfi_priv *priv, u8 *cache, + void *data, size_t pos, size_t len) +{ + void __iomem *gpram = priv->base + SNFI_GPRAM_OFFSET; + u32 *buff = (u32 *)cache; + size_t i, off, end; + + /* Start position in the buffer */ + off = pos & (sizeof(u32) - 1); + + /* End position for copy */ + end = (len + pos + sizeof(u32) - 1) & (~(sizeof(u32) - 1)); + + /* Start position for copy */ + pos &= ~(sizeof(u32) - 1); + + /* + * Read aligned data from GPRAM to buffer first. + * Uses word read for better performace. + */ + i = 0; + while (pos < end) { + buff[i++] = readl(gpram + pos); + pos += sizeof(u32); + } + + /* Copy rx data */ + memcpy(data, cache + off, len); +} + +static int mtk_snfi_exec_op(struct spi_slave *slave, + const struct spi_mem_op *op) +{ + struct udevice *bus = dev_get_parent(slave->dev); + struct mtk_snfi_priv *priv = dev_get_priv(bus); + u8 gpram_cache[SNFI_GPRAM_SIZE]; + u32 i, len = 0, inlen = 0; + int addr_sh; + int ret; + + WATCHDOG_RESET(); + + ret = mtk_snfi_mac_reset(priv); + if (ret) + return ret; + + /* Put opcode */ + gpram_cache[len++] = op->cmd.opcode; + + /* Put address */ + addr_sh = (op->addr.nbytes - 1) * 8; + while (addr_sh >= 0) { + gpram_cache[len++] = (op->addr.val >> addr_sh) & 0xff; + addr_sh -= 8; + } + + /* Put dummy bytes */ + for (i = 0; i < op->dummy.nbytes; i++) + gpram_cache[len++] = 0; + + /* Put output data */ + if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT) { + memcpy(gpram_cache + len, op->data.buf.out, op->data.nbytes); + len += op->data.nbytes; + } + + /* Copy final output data to GPRAM */ + mtk_snfi_copy_to_gpram(priv, gpram_cache, len); + + /* Start one SPI transaction */ + if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN) + inlen = op->data.nbytes; + + ret = mtk_snfi_mac_trigger(priv, bus, len, inlen); + if (ret) + return ret; + + /* Copy input data from GPRAM */ + if (inlen) + mtk_snfi_copy_from_gpram(priv, gpram_cache, op->data.buf.in, + len, inlen); + + return 0; +} + +static int mtk_snfi_spi_probe(struct udevice *bus) +{ + struct mtk_snfi_priv *priv = dev_get_priv(bus); + int ret; + + priv->base = (void __iomem *)devfdt_get_addr(bus); + if (!priv->base) + return -EINVAL; + + ret = clk_get_by_name(bus, "nfi_clk", &priv->nfi_clk); + if (ret < 0) + return ret; + + ret = clk_get_by_name(bus, "pad_clk", &priv->pad_clk); + if (ret < 0) + return ret; + + clk_enable(&priv->nfi_clk); + clk_enable(&priv->pad_clk); + + return 0; +} + +static int mtk_snfi_set_speed(struct udevice *bus, uint speed) +{ + /* + * The SNFI does not have a bus clock divider. + * The bus clock is set in dts (pad_clk, UNIVPLL2_D8 = 50MHz). + */ + + return 0; +} + +static int mtk_snfi_set_mode(struct udevice *bus, uint mode) +{ + /* The SNFI supports only mode 0 */ + + if (mode) + return -EINVAL; + + return 0; +} + +static const struct spi_controller_mem_ops mtk_snfi_mem_ops = { + .adjust_op_size = mtk_snfi_adjust_op_size, + .supports_op = mtk_snfi_supports_op, + .exec_op = mtk_snfi_exec_op, +}; + +static const struct dm_spi_ops mtk_snfi_spi_ops = { + .mem_ops = &mtk_snfi_mem_ops, + .set_speed = mtk_snfi_set_speed, + .set_mode = mtk_snfi_set_mode, +}; + +static const struct udevice_id mtk_snfi_spi_ids[] = { + { .compatible = "mediatek,mtk-snfi-spi" }, + { } +}; + +U_BOOT_DRIVER(mtk_snfi_spi) = { + .name = "mtk_snfi_spi", + .id = UCLASS_SPI, + .of_match = mtk_snfi_spi_ids, + .ops = &mtk_snfi_spi_ops, + .priv_auto_alloc_size = sizeof(struct mtk_snfi_priv), + .probe = mtk_snfi_spi_probe, +}; diff --git a/drivers/spi/mxs_spi.c b/drivers/spi/mxs_spi.c index 5065e407f8..3a9756fbf1 100644 --- a/drivers/spi/mxs_spi.c +++ b/drivers/spi/mxs_spi.c @@ -2,6 +2,9 @@ /* * Freescale i.MX28 SPI driver * + * Copyright (C) 2019 DENX Software Engineering + * Lukasz Majewski, DENX Software Engineering, lukma@denx.de + * * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com> * on behalf of DENX Software Engineering GmbH * @@ -27,6 +30,19 @@ #define MXSSSP_SMALL_TRANSFER 512 +static void mxs_spi_start_xfer(struct mxs_ssp_regs *ssp_regs) +{ + writel(SSP_CTRL0_LOCK_CS, &ssp_regs->hw_ssp_ctrl0_set); + writel(SSP_CTRL0_IGNORE_CRC, &ssp_regs->hw_ssp_ctrl0_clr); +} + +static void mxs_spi_end_xfer(struct mxs_ssp_regs *ssp_regs) +{ + writel(SSP_CTRL0_LOCK_CS, &ssp_regs->hw_ssp_ctrl0_clr); + writel(SSP_CTRL0_IGNORE_CRC, &ssp_regs->hw_ssp_ctrl0_set); +} + +#if !CONFIG_IS_ENABLED(DM_SPI) struct mxs_spi_slave { struct spi_slave slave; uint32_t max_khz; @@ -38,94 +54,38 @@ static inline struct mxs_spi_slave *to_mxs_slave(struct spi_slave *slave) { return container_of(slave, struct mxs_spi_slave, slave); } +#else +#include <dm.h> +#include <errno.h> +struct mxs_spi_platdata { + s32 frequency; /* Default clock frequency, -1 for none */ + fdt_addr_t base; /* SPI IP block base address */ + int num_cs; /* Number of CSes supported */ + int dma_id; /* ID of the DMA channel */ + int clk_id; /* ID of the SSP clock */ +}; -int spi_cs_is_valid(unsigned int bus, unsigned int cs) -{ - /* MXS SPI: 4 ports and 3 chip selects maximum */ - if (!mxs_ssp_bus_id_valid(bus) || cs > 2) - return 0; - else - return 1; -} - -struct spi_slave *spi_setup_slave(unsigned int bus, unsigned int cs, - unsigned int max_hz, unsigned int mode) -{ - struct mxs_spi_slave *mxs_slave; - - if (!spi_cs_is_valid(bus, cs)) { - printf("mxs_spi: invalid bus %d / chip select %d\n", bus, cs); - return NULL; - } - - mxs_slave = spi_alloc_slave(struct mxs_spi_slave, bus, cs); - if (!mxs_slave) - return NULL; - - if (mxs_dma_init_channel(MXS_DMA_CHANNEL_AHB_APBH_SSP0 + bus)) - goto err_init; - - mxs_slave->max_khz = max_hz / 1000; - mxs_slave->mode = mode; - mxs_slave->regs = mxs_ssp_regs_by_bus(bus); - - return &mxs_slave->slave; - -err_init: - free(mxs_slave); - return NULL; -} - -void spi_free_slave(struct spi_slave *slave) -{ - struct mxs_spi_slave *mxs_slave = to_mxs_slave(slave); - free(mxs_slave); -} - -int spi_claim_bus(struct spi_slave *slave) -{ - struct mxs_spi_slave *mxs_slave = to_mxs_slave(slave); - struct mxs_ssp_regs *ssp_regs = mxs_slave->regs; - uint32_t reg = 0; - - mxs_reset_block(&ssp_regs->hw_ssp_ctrl0_reg); - - writel((slave->cs << MXS_SSP_CHIPSELECT_SHIFT) | - SSP_CTRL0_BUS_WIDTH_ONE_BIT, - &ssp_regs->hw_ssp_ctrl0); - - reg = SSP_CTRL1_SSP_MODE_SPI | SSP_CTRL1_WORD_LENGTH_EIGHT_BITS; - reg |= (mxs_slave->mode & SPI_CPOL) ? SSP_CTRL1_POLARITY : 0; - reg |= (mxs_slave->mode & SPI_CPHA) ? SSP_CTRL1_PHASE : 0; - writel(reg, &ssp_regs->hw_ssp_ctrl1); - - writel(0, &ssp_regs->hw_ssp_cmd0); - - mxs_set_ssp_busclock(slave->bus, mxs_slave->max_khz); - - return 0; -} - -void spi_release_bus(struct spi_slave *slave) -{ -} - -static void mxs_spi_start_xfer(struct mxs_ssp_regs *ssp_regs) -{ - writel(SSP_CTRL0_LOCK_CS, &ssp_regs->hw_ssp_ctrl0_set); - writel(SSP_CTRL0_IGNORE_CRC, &ssp_regs->hw_ssp_ctrl0_clr); -} - -static void mxs_spi_end_xfer(struct mxs_ssp_regs *ssp_regs) -{ - writel(SSP_CTRL0_LOCK_CS, &ssp_regs->hw_ssp_ctrl0_clr); - writel(SSP_CTRL0_IGNORE_CRC, &ssp_regs->hw_ssp_ctrl0_set); -} +struct mxs_spi_priv { + struct mxs_ssp_regs *regs; + unsigned int dma_channel; + unsigned int max_freq; + unsigned int clk_id; + unsigned int mode; +}; +#endif +#if !CONFIG_IS_ENABLED(DM_SPI) static int mxs_spi_xfer_pio(struct mxs_spi_slave *slave, char *data, int length, int write, unsigned long flags) { struct mxs_ssp_regs *ssp_regs = slave->regs; +#else +static int mxs_spi_xfer_pio(struct mxs_spi_priv *priv, + char *data, int length, int write, + unsigned long flags) +{ + struct mxs_ssp_regs *ssp_regs = priv->regs; +#endif if (flags & SPI_XFER_BEGIN) mxs_spi_start_xfer(ssp_regs); @@ -181,12 +141,19 @@ static int mxs_spi_xfer_pio(struct mxs_spi_slave *slave, return 0; } +#if !CONFIG_IS_ENABLED(DM_SPI) static int mxs_spi_xfer_dma(struct mxs_spi_slave *slave, char *data, int length, int write, unsigned long flags) { + struct mxs_ssp_regs *ssp_regs = slave->regs; +#else +static int mxs_spi_xfer_dma(struct mxs_spi_priv *priv, + char *data, int length, int write, + unsigned long flags) +{ struct mxs_ssp_regs *ssp_regs = priv->regs; +#endif const int xfer_max_sz = 0xff00; const int desc_count = DIV_ROUND_UP(length, xfer_max_sz) + 1; - struct mxs_ssp_regs *ssp_regs = slave->regs; struct mxs_dma_desc *dp; uint32_t ctrl0; uint32_t cache_data_count; @@ -225,7 +192,11 @@ static int mxs_spi_xfer_dma(struct mxs_spi_slave *slave, /* Invalidate the area, so no writeback into the RAM races with DMA */ invalidate_dcache_range(dstart, dstart + cache_data_count); +#if !CONFIG_IS_ENABLED(DM_SPI) dmach = MXS_DMA_CHANNEL_AHB_APBH_SSP0 + slave->slave.bus; +#else + dmach = priv->dma_channel; +#endif dp = desc; while (length) { @@ -302,11 +273,20 @@ static int mxs_spi_xfer_dma(struct mxs_spi_slave *slave, return ret; } +#if !CONFIG_IS_ENABLED(DM_SPI) int spi_xfer(struct spi_slave *slave, unsigned int bitlen, const void *dout, void *din, unsigned long flags) { struct mxs_spi_slave *mxs_slave = to_mxs_slave(slave); struct mxs_ssp_regs *ssp_regs = mxs_slave->regs; +#else +int mxs_spi_xfer(struct udevice *dev, unsigned int bitlen, + const void *dout, void *din, unsigned long flags) +{ + struct udevice *bus = dev_get_parent(dev); + struct mxs_spi_priv *priv = dev_get_priv(bus); + struct mxs_ssp_regs *ssp_regs = priv->regs; +#endif int len = bitlen / 8; char dummy; int write = 0; @@ -350,9 +330,263 @@ int spi_xfer(struct spi_slave *slave, unsigned int bitlen, if (!dma || (len < MXSSSP_SMALL_TRANSFER)) { writel(SSP_CTRL1_DMA_ENABLE, &ssp_regs->hw_ssp_ctrl1_clr); +#if !CONFIG_IS_ENABLED(DM_SPI) return mxs_spi_xfer_pio(mxs_slave, data, len, write, flags); +#else + return mxs_spi_xfer_pio(priv, data, len, write, flags); +#endif } else { writel(SSP_CTRL1_DMA_ENABLE, &ssp_regs->hw_ssp_ctrl1_set); +#if !CONFIG_IS_ENABLED(DM_SPI) return mxs_spi_xfer_dma(mxs_slave, data, len, write, flags); +#else + return mxs_spi_xfer_dma(priv, data, len, write, flags); +#endif } } + +#if !CONFIG_IS_ENABLED(DM_SPI) +int spi_cs_is_valid(unsigned int bus, unsigned int cs) +{ + /* MXS SPI: 4 ports and 3 chip selects maximum */ + if (!mxs_ssp_bus_id_valid(bus) || cs > 2) + return 0; + else + return 1; +} + +struct spi_slave *spi_setup_slave(unsigned int bus, unsigned int cs, + unsigned int max_hz, unsigned int mode) +{ + struct mxs_spi_slave *mxs_slave; + + if (!spi_cs_is_valid(bus, cs)) { + printf("mxs_spi: invalid bus %d / chip select %d\n", bus, cs); + return NULL; + } + + mxs_slave = spi_alloc_slave(struct mxs_spi_slave, bus, cs); + if (!mxs_slave) + return NULL; + + if (mxs_dma_init_channel(MXS_DMA_CHANNEL_AHB_APBH_SSP0 + bus)) + goto err_init; + + mxs_slave->max_khz = max_hz / 1000; + mxs_slave->mode = mode; + mxs_slave->regs = mxs_ssp_regs_by_bus(bus); + + return &mxs_slave->slave; + +err_init: + free(mxs_slave); + return NULL; +} + +void spi_free_slave(struct spi_slave *slave) +{ + struct mxs_spi_slave *mxs_slave = to_mxs_slave(slave); + + free(mxs_slave); +} + +int spi_claim_bus(struct spi_slave *slave) +{ + struct mxs_spi_slave *mxs_slave = to_mxs_slave(slave); + struct mxs_ssp_regs *ssp_regs = mxs_slave->regs; + u32 reg = 0; + + mxs_reset_block(&ssp_regs->hw_ssp_ctrl0_reg); + + writel((slave->cs << MXS_SSP_CHIPSELECT_SHIFT) | + SSP_CTRL0_BUS_WIDTH_ONE_BIT, + &ssp_regs->hw_ssp_ctrl0); + + reg = SSP_CTRL1_SSP_MODE_SPI | SSP_CTRL1_WORD_LENGTH_EIGHT_BITS; + reg |= (mxs_slave->mode & SPI_CPOL) ? SSP_CTRL1_POLARITY : 0; + reg |= (mxs_slave->mode & SPI_CPHA) ? SSP_CTRL1_PHASE : 0; + writel(reg, &ssp_regs->hw_ssp_ctrl1); + + writel(0, &ssp_regs->hw_ssp_cmd0); + + mxs_set_ssp_busclock(slave->bus, mxs_slave->max_khz); + + return 0; +} + +void spi_release_bus(struct spi_slave *slave) +{ +} + +#else /* CONFIG_DM_SPI */ +/* Base numbers of i.MX2[38] clk for ssp0 IP block */ +#define MXS_SSP_IMX23_CLKID_SSP0 33 +#define MXS_SSP_IMX28_CLKID_SSP0 46 + +static int mxs_spi_probe(struct udevice *bus) +{ + struct mxs_spi_platdata *plat = dev_get_platdata(bus); + struct mxs_spi_priv *priv = dev_get_priv(bus); + int ret; + + debug("%s: probe\n", __func__); + priv->regs = (struct mxs_ssp_regs *)plat->base; + priv->max_freq = plat->frequency; + + priv->dma_channel = plat->dma_id; + priv->clk_id = plat->clk_id; + + ret = mxs_dma_init_channel(priv->dma_channel); + if (ret) { + printf("%s: DMA init channel error %d\n", __func__, ret); + return ret; + } + + return 0; +} + +static int mxs_spi_claim_bus(struct udevice *dev) +{ + struct udevice *bus = dev_get_parent(dev); + struct mxs_spi_priv *priv = dev_get_priv(bus); + struct mxs_ssp_regs *ssp_regs = priv->regs; + int cs = spi_chip_select(dev); + + /* + * i.MX28 supports up to 3 CS (SSn0, SSn1, SSn2) + * To set them it uses following tuple (WAIT_FOR_IRQ,WAIT_FOR_CMD), + * where: + * + * WAIT_FOR_IRQ is bit 21 of HW_SSP_CTRL0 + * WAIT_FOR_CMD is bit 20 (#defined as MXS_SSP_CHIPSELECT_SHIFT here) of + * HW_SSP_CTRL0 + * SSn0 b00 + * SSn1 b01 + * SSn2 b10 (which require setting WAIT_FOR_IRQ) + * + * However, for now i.MX28 SPI driver will support up till 2 CSes + * (SSn0, and SSn1). + */ + + /* Ungate SSP clock and set active CS */ + clrsetbits_le32(&ssp_regs->hw_ssp_ctrl0, + BIT(MXS_SSP_CHIPSELECT_SHIFT) | + SSP_CTRL0_CLKGATE, (cs << MXS_SSP_CHIPSELECT_SHIFT)); + + return 0; +} + +static int mxs_spi_release_bus(struct udevice *dev) +{ + struct udevice *bus = dev_get_parent(dev); + struct mxs_spi_priv *priv = dev_get_priv(bus); + struct mxs_ssp_regs *ssp_regs = priv->regs; + + /* Gate SSP clock */ + setbits_le32(&ssp_regs->hw_ssp_ctrl0, SSP_CTRL0_CLKGATE); + + return 0; +} + +static int mxs_spi_set_speed(struct udevice *bus, uint speed) +{ + struct mxs_spi_priv *priv = dev_get_priv(bus); +#ifdef CONFIG_MX28 + int clkid = priv->clk_id - MXS_SSP_IMX28_CLKID_SSP0; +#else /* CONFIG_MX23 */ + int clkid = priv->clk_id - MXS_SSP_IMX23_CLKID_SSP0; +#endif + if (speed > priv->max_freq) + speed = priv->max_freq; + + debug("%s speed: %u [Hz] clkid: %d\n", __func__, speed, clkid); + mxs_set_ssp_busclock(clkid, speed / 1000); + + return 0; +} + +static int mxs_spi_set_mode(struct udevice *bus, uint mode) +{ + struct mxs_spi_priv *priv = dev_get_priv(bus); + struct mxs_ssp_regs *ssp_regs = priv->regs; + u32 reg; + + priv->mode = mode; + debug("%s: mode 0x%x\n", __func__, mode); + + reg = SSP_CTRL1_SSP_MODE_SPI | SSP_CTRL1_WORD_LENGTH_EIGHT_BITS; + reg |= (priv->mode & SPI_CPOL) ? SSP_CTRL1_POLARITY : 0; + reg |= (priv->mode & SPI_CPHA) ? SSP_CTRL1_PHASE : 0; + writel(reg, &ssp_regs->hw_ssp_ctrl1); + + /* Single bit SPI support */ + writel(SSP_CTRL0_BUS_WIDTH_ONE_BIT, &ssp_regs->hw_ssp_ctrl0); + + return 0; +} + +static const struct dm_spi_ops mxs_spi_ops = { + .claim_bus = mxs_spi_claim_bus, + .release_bus = mxs_spi_release_bus, + .xfer = mxs_spi_xfer, + .set_speed = mxs_spi_set_speed, + .set_mode = mxs_spi_set_mode, + /* + * cs_info is not needed, since we require all chip selects to be + * in the device tree explicitly + */ +}; + +#if CONFIG_IS_ENABLED(OF_CONTROL) && !CONFIG_IS_ENABLED(OF_PLATDATA) +static int mxs_ofdata_to_platdata(struct udevice *bus) +{ + struct mxs_spi_platdata *plat = bus->platdata; + u32 prop[2]; + int ret; + + plat->base = dev_read_addr(bus); + plat->frequency = + dev_read_u32_default(bus, "spi-max-frequency", 40000000); + plat->num_cs = dev_read_u32_default(bus, "num-cs", 2); + + ret = dev_read_u32_array(bus, "dmas", prop, ARRAY_SIZE(prop)); + if (ret) { + printf("%s: Reading 'dmas' property failed!\n", __func__); + return ret; + } + plat->dma_id = prop[1]; + + ret = dev_read_u32_array(bus, "clocks", prop, ARRAY_SIZE(prop)); + if (ret) { + printf("%s: Reading 'clocks' property failed!\n", __func__); + return ret; + } + plat->clk_id = prop[1]; + + debug("%s: base=0x%x, max-frequency=%d num-cs=%d dma_id=%d clk_id=%d\n", + __func__, (uint)plat->base, plat->frequency, plat->num_cs, + plat->dma_id, plat->clk_id); + + return 0; +} +#endif + +static const struct udevice_id mxs_spi_ids[] = { + { .compatible = "fsl,imx23-spi" }, + { .compatible = "fsl,imx28-spi" }, + { } +}; + +U_BOOT_DRIVER(mxs_spi) = { + .name = "mxs_spi", + .id = UCLASS_SPI, +#if CONFIG_IS_ENABLED(OF_CONTROL) && !CONFIG_IS_ENABLED(OF_PLATDATA) + .of_match = mxs_spi_ids, + .ofdata_to_platdata = mxs_ofdata_to_platdata, +#endif + .priv_auto_alloc_size = sizeof(struct mxs_spi_platdata), + .ops = &mxs_spi_ops, + .priv_auto_alloc_size = sizeof(struct mxs_spi_priv), + .probe = mxs_spi_probe, +}; +#endif diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c index 7aabebeff5..7788ab9953 100644 --- a/drivers/spi/spi-mem.c +++ b/drivers/spi/spi-mem.c @@ -430,12 +430,14 @@ int spi_mem_adjust_op_size(struct spi_slave *slave, struct spi_mem_op *op) if (slave->max_write_size && len > slave->max_write_size) return -EINVAL; - if (op->data.dir == SPI_MEM_DATA_IN && slave->max_read_size) - op->data.nbytes = min(op->data.nbytes, + if (op->data.dir == SPI_MEM_DATA_IN) { + if (slave->max_read_size) + op->data.nbytes = min(op->data.nbytes, slave->max_read_size); - else if (slave->max_write_size) + } else if (slave->max_write_size) { op->data.nbytes = min(op->data.nbytes, slave->max_write_size - len); + } if (!op->data.nbytes) return -EINVAL; diff --git a/drivers/spi/spi-sifive.c b/drivers/spi/spi-sifive.c new file mode 100644 index 0000000000..969bd4b75c --- /dev/null +++ b/drivers/spi/spi-sifive.c @@ -0,0 +1,370 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2018 SiFive, Inc. + * Copyright 2019 Bhargav Shah <bhargavshah1988@gmail.com> + * + * SiFive SPI controller driver (master mode only) + */ + +#include <common.h> +#include <dm.h> +#include <malloc.h> +#include <spi.h> +#include <asm/io.h> +#include <linux/log2.h> +#include <clk.h> + +#define SIFIVE_SPI_MAX_CS 32 + +#define SIFIVE_SPI_DEFAULT_DEPTH 8 +#define SIFIVE_SPI_DEFAULT_BITS 8 + +/* register offsets */ +#define SIFIVE_SPI_REG_SCKDIV 0x00 /* Serial clock divisor */ +#define SIFIVE_SPI_REG_SCKMODE 0x04 /* Serial clock mode */ +#define SIFIVE_SPI_REG_CSID 0x10 /* Chip select ID */ +#define SIFIVE_SPI_REG_CSDEF 0x14 /* Chip select default */ +#define SIFIVE_SPI_REG_CSMODE 0x18 /* Chip select mode */ +#define SIFIVE_SPI_REG_DELAY0 0x28 /* Delay control 0 */ +#define SIFIVE_SPI_REG_DELAY1 0x2c /* Delay control 1 */ +#define SIFIVE_SPI_REG_FMT 0x40 /* Frame format */ +#define SIFIVE_SPI_REG_TXDATA 0x48 /* Tx FIFO data */ +#define SIFIVE_SPI_REG_RXDATA 0x4c /* Rx FIFO data */ +#define SIFIVE_SPI_REG_TXMARK 0x50 /* Tx FIFO watermark */ +#define SIFIVE_SPI_REG_RXMARK 0x54 /* Rx FIFO watermark */ +#define SIFIVE_SPI_REG_FCTRL 0x60 /* SPI flash interface control */ +#define SIFIVE_SPI_REG_FFMT 0x64 /* SPI flash instruction format */ +#define SIFIVE_SPI_REG_IE 0x70 /* Interrupt Enable Register */ +#define SIFIVE_SPI_REG_IP 0x74 /* Interrupt Pendings Register */ + +/* sckdiv bits */ +#define SIFIVE_SPI_SCKDIV_DIV_MASK 0xfffU + +/* sckmode bits */ +#define SIFIVE_SPI_SCKMODE_PHA BIT(0) +#define SIFIVE_SPI_SCKMODE_POL BIT(1) +#define SIFIVE_SPI_SCKMODE_MODE_MASK (SIFIVE_SPI_SCKMODE_PHA | \ + SIFIVE_SPI_SCKMODE_POL) + +/* csmode bits */ +#define SIFIVE_SPI_CSMODE_MODE_AUTO 0U +#define SIFIVE_SPI_CSMODE_MODE_HOLD 2U +#define SIFIVE_SPI_CSMODE_MODE_OFF 3U + +/* delay0 bits */ +#define SIFIVE_SPI_DELAY0_CSSCK(x) ((u32)(x)) +#define SIFIVE_SPI_DELAY0_CSSCK_MASK 0xffU +#define SIFIVE_SPI_DELAY0_SCKCS(x) ((u32)(x) << 16) +#define SIFIVE_SPI_DELAY0_SCKCS_MASK (0xffU << 16) + +/* delay1 bits */ +#define SIFIVE_SPI_DELAY1_INTERCS(x) ((u32)(x)) +#define SIFIVE_SPI_DELAY1_INTERCS_MASK 0xffU +#define SIFIVE_SPI_DELAY1_INTERXFR(x) ((u32)(x) << 16) +#define SIFIVE_SPI_DELAY1_INTERXFR_MASK (0xffU << 16) + +/* fmt bits */ +#define SIFIVE_SPI_FMT_PROTO_SINGLE 0U +#define SIFIVE_SPI_FMT_PROTO_DUAL 1U +#define SIFIVE_SPI_FMT_PROTO_QUAD 2U +#define SIFIVE_SPI_FMT_PROTO_MASK 3U +#define SIFIVE_SPI_FMT_ENDIAN BIT(2) +#define SIFIVE_SPI_FMT_DIR BIT(3) +#define SIFIVE_SPI_FMT_LEN(x) ((u32)(x) << 16) +#define SIFIVE_SPI_FMT_LEN_MASK (0xfU << 16) + +/* txdata bits */ +#define SIFIVE_SPI_TXDATA_DATA_MASK 0xffU +#define SIFIVE_SPI_TXDATA_FULL BIT(31) + +/* rxdata bits */ +#define SIFIVE_SPI_RXDATA_DATA_MASK 0xffU +#define SIFIVE_SPI_RXDATA_EMPTY BIT(31) + +/* ie and ip bits */ +#define SIFIVE_SPI_IP_TXWM BIT(0) +#define SIFIVE_SPI_IP_RXWM BIT(1) + +struct sifive_spi { + void *regs; /* base address of the registers */ + u32 fifo_depth; + u32 bits_per_word; + u32 cs_inactive; /* Level of the CS pins when inactive*/ + u32 freq; + u32 num_cs; +}; + +static void sifive_spi_prep_device(struct sifive_spi *spi, + struct dm_spi_slave_platdata *slave) +{ + /* Update the chip select polarity */ + if (slave->mode & SPI_CS_HIGH) + spi->cs_inactive &= ~BIT(slave->cs); + else + spi->cs_inactive |= BIT(slave->cs); + writel(spi->cs_inactive, spi->regs + SIFIVE_SPI_REG_CSDEF); + + /* Select the correct device */ + writel(slave->cs, spi->regs + SIFIVE_SPI_REG_CSID); +} + +static int sifive_spi_set_cs(struct sifive_spi *spi, + struct dm_spi_slave_platdata *slave) +{ + u32 cs_mode = SIFIVE_SPI_CSMODE_MODE_HOLD; + + if (slave->mode & SPI_CS_HIGH) + cs_mode = SIFIVE_SPI_CSMODE_MODE_AUTO; + + writel(cs_mode, spi->regs + SIFIVE_SPI_REG_CSMODE); + + return 0; +} + +static void sifive_spi_clear_cs(struct sifive_spi *spi) +{ + writel(SIFIVE_SPI_CSMODE_MODE_AUTO, spi->regs + SIFIVE_SPI_REG_CSMODE); +} + +static void sifive_spi_prep_transfer(struct sifive_spi *spi, + bool is_rx_xfer, + struct dm_spi_slave_platdata *slave) +{ + u32 cr; + + /* Modify the SPI protocol mode */ + cr = readl(spi->regs + SIFIVE_SPI_REG_FMT); + + /* Bits per word ? */ + cr &= ~SIFIVE_SPI_FMT_LEN_MASK; + cr |= SIFIVE_SPI_FMT_LEN(spi->bits_per_word); + + /* LSB first? */ + cr &= ~SIFIVE_SPI_FMT_ENDIAN; + if (slave->mode & SPI_LSB_FIRST) + cr |= SIFIVE_SPI_FMT_ENDIAN; + + /* Number of wires ? */ + cr &= ~SIFIVE_SPI_FMT_PROTO_MASK; + if ((slave->mode & SPI_TX_QUAD) || (slave->mode & SPI_RX_QUAD)) + cr |= SIFIVE_SPI_FMT_PROTO_QUAD; + else if ((slave->mode & SPI_TX_DUAL) || (slave->mode & SPI_RX_DUAL)) + cr |= SIFIVE_SPI_FMT_PROTO_DUAL; + else + cr |= SIFIVE_SPI_FMT_PROTO_SINGLE; + + /* SPI direction in/out ? */ + cr &= ~SIFIVE_SPI_FMT_DIR; + if (!is_rx_xfer) + cr |= SIFIVE_SPI_FMT_DIR; + + writel(cr, spi->regs + SIFIVE_SPI_REG_FMT); +} + +static void sifive_spi_rx(struct sifive_spi *spi, u8 *rx_ptr) +{ + u32 data; + + do { + data = readl(spi->regs + SIFIVE_SPI_REG_RXDATA); + } while (data & SIFIVE_SPI_RXDATA_EMPTY); + + if (rx_ptr) + *rx_ptr = data & SIFIVE_SPI_RXDATA_DATA_MASK; +} + +static void sifive_spi_tx(struct sifive_spi *spi, const u8 *tx_ptr) +{ + u32 data; + u8 tx_data = (tx_ptr) ? *tx_ptr & SIFIVE_SPI_TXDATA_DATA_MASK : + SIFIVE_SPI_TXDATA_DATA_MASK; + + do { + data = readl(spi->regs + SIFIVE_SPI_REG_TXDATA); + } while (data & SIFIVE_SPI_TXDATA_FULL); + + writel(tx_data, spi->regs + SIFIVE_SPI_REG_TXDATA); +} + +static int sifive_spi_xfer(struct udevice *dev, unsigned int bitlen, + const void *dout, void *din, unsigned long flags) +{ + struct udevice *bus = dev->parent; + struct sifive_spi *spi = dev_get_priv(bus); + struct dm_spi_slave_platdata *slave = dev_get_parent_platdata(dev); + const unsigned char *tx_ptr = dout; + u8 *rx_ptr = din; + u32 remaining_len; + int ret; + + if (flags & SPI_XFER_BEGIN) { + sifive_spi_prep_device(spi, slave); + + ret = sifive_spi_set_cs(spi, slave); + if (ret) + return ret; + } + + sifive_spi_prep_transfer(spi, true, slave); + + remaining_len = bitlen / 8; + + while (remaining_len) { + int n_words, tx_words, rx_words; + + n_words = min(remaining_len, spi->fifo_depth); + + /* Enqueue n_words for transmission */ + if (tx_ptr) { + for (tx_words = 0; tx_words < n_words; ++tx_words) { + sifive_spi_tx(spi, tx_ptr); + sifive_spi_rx(spi, NULL); + tx_ptr++; + } + } + + /* Read out all the data from the RX FIFO */ + if (rx_ptr) { + for (rx_words = 0; rx_words < n_words; ++rx_words) { + sifive_spi_tx(spi, NULL); + sifive_spi_rx(spi, rx_ptr); + rx_ptr++; + } + } + + remaining_len -= n_words; + } + + if (flags & SPI_XFER_END) + sifive_spi_clear_cs(spi); + + return 0; +} + +static int sifive_spi_set_speed(struct udevice *bus, uint speed) +{ + struct sifive_spi *spi = dev_get_priv(bus); + u32 scale; + + if (speed > spi->freq) + speed = spi->freq; + + /* Cofigure max speed */ + scale = (DIV_ROUND_UP(spi->freq >> 1, speed) - 1) + & SIFIVE_SPI_SCKDIV_DIV_MASK; + writel(scale, spi->regs + SIFIVE_SPI_REG_SCKDIV); + + return 0; +} + +static int sifive_spi_set_mode(struct udevice *bus, uint mode) +{ + struct sifive_spi *spi = dev_get_priv(bus); + u32 cr; + + /* Switch clock mode bits */ + cr = readl(spi->regs + SIFIVE_SPI_REG_SCKMODE) & + ~SIFIVE_SPI_SCKMODE_MODE_MASK; + if (mode & SPI_CPHA) + cr |= SIFIVE_SPI_SCKMODE_PHA; + if (mode & SPI_CPOL) + cr |= SIFIVE_SPI_SCKMODE_POL; + + writel(cr, spi->regs + SIFIVE_SPI_REG_SCKMODE); + + return 0; +} + +static int sifive_spi_cs_info(struct udevice *bus, uint cs, + struct spi_cs_info *info) +{ + struct sifive_spi *spi = dev_get_priv(bus); + + if (cs >= spi->num_cs) + return -EINVAL; + + return 0; +} + +static void sifive_spi_init_hw(struct sifive_spi *spi) +{ + u32 cs_bits; + + /* probe the number of CS lines */ + spi->cs_inactive = readl(spi->regs + SIFIVE_SPI_REG_CSDEF); + writel(0xffffffffU, spi->regs + SIFIVE_SPI_REG_CSDEF); + cs_bits = readl(spi->regs + SIFIVE_SPI_REG_CSDEF); + writel(spi->cs_inactive, spi->regs + SIFIVE_SPI_REG_CSDEF); + if (!cs_bits) { + printf("Could not auto probe CS lines\n"); + return; + } + + spi->num_cs = ilog2(cs_bits) + 1; + if (spi->num_cs > SIFIVE_SPI_MAX_CS) { + printf("Invalid number of spi slaves\n"); + return; + } + + /* Watermark interrupts are disabled by default */ + writel(0, spi->regs + SIFIVE_SPI_REG_IE); + + /* Set CS/SCK Delays and Inactive Time to defaults */ + writel(SIFIVE_SPI_DELAY0_CSSCK(1) | SIFIVE_SPI_DELAY0_SCKCS(1), + spi->regs + SIFIVE_SPI_REG_DELAY0); + writel(SIFIVE_SPI_DELAY1_INTERCS(1) | SIFIVE_SPI_DELAY1_INTERXFR(0), + spi->regs + SIFIVE_SPI_REG_DELAY1); + + /* Exit specialized memory-mapped SPI flash mode */ + writel(0, spi->regs + SIFIVE_SPI_REG_FCTRL); +} + +static int sifive_spi_probe(struct udevice *bus) +{ + struct sifive_spi *spi = dev_get_priv(bus); + struct clk clkdev; + int ret; + + spi->regs = (void *)(ulong)dev_remap_addr(bus); + if (!spi->regs) + return -ENODEV; + + spi->fifo_depth = dev_read_u32_default(bus, + "sifive,fifo-depth", + SIFIVE_SPI_DEFAULT_DEPTH); + + spi->bits_per_word = dev_read_u32_default(bus, + "sifive,max-bits-per-word", + SIFIVE_SPI_DEFAULT_BITS); + + ret = clk_get_by_index(bus, 0, &clkdev); + if (ret) + return ret; + spi->freq = clk_get_rate(&clkdev); + + /* init the sifive spi hw */ + sifive_spi_init_hw(spi); + + return 0; +} + +static const struct dm_spi_ops sifive_spi_ops = { + .xfer = sifive_spi_xfer, + .set_speed = sifive_spi_set_speed, + .set_mode = sifive_spi_set_mode, + .cs_info = sifive_spi_cs_info, +}; + +static const struct udevice_id sifive_spi_ids[] = { + { .compatible = "sifive,spi0" }, + { } +}; + +U_BOOT_DRIVER(sifive_spi) = { + .name = "sifive_spi", + .id = UCLASS_SPI, + .of_match = sifive_spi_ids, + .ops = &sifive_spi_ops, + .priv_auto_alloc_size = sizeof(struct sifive_spi), + .probe = sifive_spi_probe, +}; diff --git a/drivers/spi/stm32_qspi.c b/drivers/spi/stm32_qspi.c index 8d612f22d6..958c394a1a 100644 --- a/drivers/spi/stm32_qspi.c +++ b/drivers/spi/stm32_qspi.c @@ -526,7 +526,6 @@ static const struct dm_spi_ops stm32_qspi_ops = { }; static const struct udevice_id stm32_qspi_ids[] = { - { .compatible = "st,stm32-qspi" }, { .compatible = "st,stm32f469-qspi" }, { } }; diff --git a/drivers/sysreset/Kconfig b/drivers/sysreset/Kconfig index 30aed2c4c1..90c41ab44d 100644 --- a/drivers/sysreset/Kconfig +++ b/drivers/sysreset/Kconfig @@ -50,10 +50,25 @@ config SYSRESET_MICROBLAZE config SYSRESET_PSCI bool "Enable support for PSCI System Reset" depends on ARM_PSCI_FW + select SPL_ARM_PSCI_FW if SPL help Enable PSCI SYSTEM_RESET function call. To use this, PSCI firmware must be running on your system. +config SYSRESET_SOCFPGA + bool "Enable support for Intel SOCFPGA family" + depends on ARCH_SOCFPGA && (TARGET_SOCFPGA_GEN5 || TARGET_SOCFPGA_ARRIA10) + help + This enables the system reset driver support for Intel SOCFPGA SoCs + (Cyclone 5, Arria 5 and Arria 10). + +config SYSRESET_SOCFPGA_S10 + bool "Enable support for Intel SOCFPGA Stratix 10" + depends on ARCH_SOCFPGA && TARGET_SOCFPGA_STRATIX10 + help + This enables the system reset driver support for Intel SOCFPGA + Stratix SoCs. + config SYSRESET_TI_SCI bool "TI System Control Interface (TI SCI) system reset driver" depends on TI_SCI_PROTOCOL diff --git a/drivers/sysreset/Makefile b/drivers/sysreset/Makefile index 8e1c845dfe..cf01492295 100644 --- a/drivers/sysreset/Makefile +++ b/drivers/sysreset/Makefile @@ -11,6 +11,8 @@ obj-$(CONFIG_SYSRESET_GPIO) += sysreset_gpio.o obj-$(CONFIG_SYSRESET_MCP83XX) += sysreset_mpc83xx.o obj-$(CONFIG_SYSRESET_MICROBLAZE) += sysreset_microblaze.o obj-$(CONFIG_SYSRESET_PSCI) += sysreset_psci.o +obj-$(CONFIG_SYSRESET_SOCFPGA) += sysreset_socfpga.o +obj-$(CONFIG_SYSRESET_SOCFPGA_S10) += sysreset_socfpga_s10.o obj-$(CONFIG_SYSRESET_TI_SCI) += sysreset-ti-sci.o obj-$(CONFIG_SYSRESET_SYSCON) += sysreset_syscon.o obj-$(CONFIG_SYSRESET_WATCHDOG) += sysreset_watchdog.o diff --git a/drivers/sysreset/sysreset_socfpga.c b/drivers/sysreset/sysreset_socfpga.c new file mode 100644 index 0000000000..d6c26a5b23 --- /dev/null +++ b/drivers/sysreset/sysreset_socfpga.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Pepperl+Fuchs + * Simon Goldschmidt <simon.k.r.goldschmidt@gmail.com> + */ + +#include <common.h> +#include <dm.h> +#include <errno.h> +#include <sysreset.h> +#include <asm/io.h> +#include <asm/arch/reset_manager.h> + +struct socfpga_sysreset_data { + struct socfpga_reset_manager *rstmgr_base; +}; + +static int socfpga_sysreset_request(struct udevice *dev, + enum sysreset_t type) +{ + struct socfpga_sysreset_data *data = dev_get_priv(dev); + + switch (type) { + case SYSRESET_WARM: + writel(BIT(RSTMGR_CTRL_SWWARMRSTREQ_LSB), + &data->rstmgr_base->ctrl); + break; + case SYSRESET_COLD: + writel(BIT(RSTMGR_CTRL_SWCOLDRSTREQ_LSB), + &data->rstmgr_base->ctrl); + break; + default: + return -EPROTONOSUPPORT; + } + return -EINPROGRESS; +} + +static int socfpga_sysreset_probe(struct udevice *dev) +{ + struct socfpga_sysreset_data *data = dev_get_priv(dev); + + data->rstmgr_base = devfdt_get_addr_ptr(dev); + return 0; +} + +static struct sysreset_ops socfpga_sysreset = { + .request = socfpga_sysreset_request, +}; + +U_BOOT_DRIVER(sysreset_socfpga) = { + .id = UCLASS_SYSRESET, + .name = "socfpga_sysreset", + .priv_auto_alloc_size = sizeof(struct socfpga_sysreset_data), + .ops = &socfpga_sysreset, + .probe = socfpga_sysreset_probe, +}; diff --git a/drivers/sysreset/sysreset_socfpga_s10.c b/drivers/sysreset/sysreset_socfpga_s10.c new file mode 100644 index 0000000000..9837aadf64 --- /dev/null +++ b/drivers/sysreset/sysreset_socfpga_s10.c @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Pepperl+Fuchs + * Simon Goldschmidt <simon.k.r.goldschmidt@gmail.com> + */ + +#include <common.h> +#include <dm.h> +#include <errno.h> +#include <sysreset.h> +#include <asm/arch/mailbox_s10.h> + +static int socfpga_sysreset_request(struct udevice *dev, + enum sysreset_t type) +{ + puts("Mailbox: Issuing mailbox cmd REBOOT_HPS\n"); + mbox_reset_cold(); + return -EINPROGRESS; +} + +static struct sysreset_ops socfpga_sysreset = { + .request = socfpga_sysreset_request, +}; + +U_BOOT_DRIVER(sysreset_socfpga) = { + .id = UCLASS_SYSRESET, + .name = "socfpga_sysreset", + .ops = &socfpga_sysreset, +}; diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index a71b9be5fb..bdf8dc6fef 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig @@ -17,6 +17,15 @@ config IMX_THERMAL cpufreq is used as the cooling device to throttle CPUs when the passive trip is crossed. +config IMX_SCU_THERMAL + bool "Temperature sensor driver for NXP i.MX8" + depends on ARCH_IMX8 + help + Support for Temperature sensors on NXP i.MX8. + It supports one critical trip point and one passive trip point. The + boot is hold to the cool device to throttle CPUs when the passive + trip is crossed + config TI_DRA7_THERMAL bool "Temperature sensor driver for TI dra7xx SOCs" help diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile index cc75e387e4..ef2929d180 100644 --- a/drivers/thermal/Makefile +++ b/drivers/thermal/Makefile @@ -5,4 +5,5 @@ obj-$(CONFIG_DM_THERMAL) += thermal-uclass.o obj-$(CONFIG_IMX_THERMAL) += imx_thermal.o +obj-$(CONFIG_IMX_SCU_THERMAL) += imx_scu_thermal.o obj-$(CONFIG_TI_DRA7_THERMAL) += ti-bandgap.o diff --git a/drivers/thermal/imx_scu_thermal.c b/drivers/thermal/imx_scu_thermal.c new file mode 100644 index 0000000000..7e17377b69 --- /dev/null +++ b/drivers/thermal/imx_scu_thermal.c @@ -0,0 +1,203 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2019 NXP + */ + +#include <config.h> +#include <common.h> +#include <dm.h> +#include <errno.h> +#include <thermal.h> +#include <dm/device-internal.h> +#include <dm/device.h> +#include <asm/arch/sci/sci.h> + +DECLARE_GLOBAL_DATA_PTR; + +struct imx_sc_thermal_plat { + int critical; + int alert; + int polling_delay; + int id; + bool zone_node; +}; + +static int read_temperature(struct udevice *dev, int *temp) +{ + s16 celsius; + s8 tenths; + int ret; + + sc_rsrc_t *sensor_rsrc = (sc_rsrc_t *)dev_get_driver_data(dev); + + struct imx_sc_thermal_plat *pdata = dev_get_platdata(dev); + + if (!temp) + return -EINVAL; + + ret = sc_misc_get_temp(-1, sensor_rsrc[pdata->id], SC_C_TEMP, + &celsius, &tenths); + if (ret) { + printf("Error: get temperature failed! (error = %d)\n", ret); + return ret; + } + + *temp = celsius * 1000 + tenths * 100; + + return 0; +} + +int imx_sc_thermal_get_temp(struct udevice *dev, int *temp) +{ + struct imx_sc_thermal_plat *pdata = dev_get_platdata(dev); + int cpu_temp = 0; + int ret; + + ret = read_temperature(dev, &cpu_temp); + if (ret) + return ret; + + while (cpu_temp >= pdata->alert) { + printf("CPU Temperature (%dC) has beyond alert (%dC), close to critical (%dC)", + cpu_temp, pdata->alert, pdata->critical); + puts(" waiting...\n"); + mdelay(pdata->polling_delay); + ret = read_temperature(dev, &cpu_temp); + if (ret) + return ret; + } + + *temp = cpu_temp / 1000; + + return 0; +} + +static const struct dm_thermal_ops imx_sc_thermal_ops = { + .get_temp = imx_sc_thermal_get_temp, +}; + +static int imx_sc_thermal_probe(struct udevice *dev) +{ + debug("%s dev name %s\n", __func__, dev->name); + return 0; +} + +static int imx_sc_thermal_bind(struct udevice *dev) +{ + struct imx_sc_thermal_plat *pdata = dev_get_platdata(dev); + int reg, ret; + int offset; + const char *name; + const void *prop; + + debug("%s dev name %s\n", __func__, dev->name); + + prop = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "compatible", + NULL); + if (!prop) + return 0; + + pdata->zone_node = 1; + + reg = fdtdec_get_int(gd->fdt_blob, dev_of_offset(dev), "tsens-num", 0); + if (reg == 0) { + printf("%s: no temp sensor number provided!\n", __func__); + return -EINVAL; + } + + offset = fdt_subnode_offset(gd->fdt_blob, 0, "thermal-zones"); + fdt_for_each_subnode(offset, gd->fdt_blob, offset) { + /* Bind the subnode to this driver */ + name = fdt_get_name(gd->fdt_blob, offset, NULL); + + ret = device_bind_with_driver_data(dev, dev->driver, name, + dev->driver_data, + offset_to_ofnode(offset), + NULL); + if (ret) + printf("Error binding driver '%s': %d\n", + dev->driver->name, ret); + } + return 0; +} + +static int imx_sc_thermal_ofdata_to_platdata(struct udevice *dev) +{ + struct imx_sc_thermal_plat *pdata = dev_get_platdata(dev); + struct fdtdec_phandle_args args; + const char *type; + int ret; + int trips_np; + + debug("%s dev name %s\n", __func__, dev->name); + + if (pdata->zone_node) + return 0; + + ret = fdtdec_parse_phandle_with_args(gd->fdt_blob, dev_of_offset(dev), + "thermal-sensors", + "#thermal-sensor-cells", + 0, 0, &args); + if (ret) + return ret; + + if (args.node != dev_of_offset(dev->parent)) + return -EFAULT; + + if (args.args_count >= 1) + pdata->id = args.args[0]; + else + pdata->id = 0; + + debug("args.args_count %d, id %d\n", args.args_count, pdata->id); + + pdata->polling_delay = fdtdec_get_int(gd->fdt_blob, dev_of_offset(dev), + "polling-delay", 1000); + + trips_np = fdt_subnode_offset(gd->fdt_blob, dev_of_offset(dev), + "trips"); + fdt_for_each_subnode(trips_np, gd->fdt_blob, trips_np) { + type = fdt_getprop(gd->fdt_blob, trips_np, "type", NULL); + if (type) { + if (strcmp(type, "critical") == 0) { + pdata->critical = fdtdec_get_int(gd->fdt_blob, + trips_np, + "temperature", + 85); + } else if (strcmp(type, "passive") == 0) { + pdata->alert = fdtdec_get_int(gd->fdt_blob, + trips_np, + "temperature", + 80); + } + } + } + + debug("id %d polling_delay %d, critical %d, alert %d\n", pdata->id, + pdata->polling_delay, pdata->critical, pdata->alert); + + return 0; +} + +static const sc_rsrc_t imx8qxp_sensor_rsrc[] = { + SC_R_SYSTEM, SC_R_DRC_0, SC_R_PMIC_0, + SC_R_PMIC_1, SC_R_PMIC_2, +}; + +static const struct udevice_id imx_sc_thermal_ids[] = { + { .compatible = "nxp,imx8qxp-sc-tsens", .data = + (ulong)&imx8qxp_sensor_rsrc, }, + { } +}; + +U_BOOT_DRIVER(imx_sc_thermal) = { + .name = "imx_sc_thermal", + .id = UCLASS_THERMAL, + .ops = &imx_sc_thermal_ops, + .of_match = imx_sc_thermal_ids, + .bind = imx_sc_thermal_bind, + .probe = imx_sc_thermal_probe, + .ofdata_to_platdata = imx_sc_thermal_ofdata_to_platdata, + .platdata_auto_alloc_size = sizeof(struct imx_sc_thermal_plat), + .flags = DM_FLAG_PRE_RELOC, +}; diff --git a/drivers/timer/timer-uclass.c b/drivers/timer/timer-uclass.c index 12ee6eb804..97a4c74851 100644 --- a/drivers/timer/timer-uclass.c +++ b/drivers/timer/timer-uclass.c @@ -48,6 +48,10 @@ static int timer_pre_probe(struct udevice *dev) int err; ulong ret; + /* It is possible that a timer device has a null ofnode */ + if (!dev_of_valid(dev)) + return 0; + err = clk_get_by_index(dev, 0, &timer_clk); if (!err) { ret = clk_get_rate(&timer_clk); diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index b1188bcbf5..ac68aa2d27 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig @@ -24,6 +24,7 @@ config USB_XHCI_DWC3 config USB_XHCI_DWC3_OF_SIMPLE bool "DesignWare USB3 DRD Generic OF Simple Glue Layer" depends on DM_USB + default y if ARCH_ROCKCHIP default y if DRA7XX help Support USB2/3 functionality in simple SoC integrations with diff --git a/drivers/usb/host/dwc3-of-simple.c b/drivers/usb/host/dwc3-of-simple.c index b118997f6e..45df614b09 100644 --- a/drivers/usb/host/dwc3-of-simple.c +++ b/drivers/usb/host/dwc3-of-simple.c @@ -92,6 +92,7 @@ static int dwc3_of_simple_remove(struct udevice *dev) static const struct udevice_id dwc3_of_simple_ids[] = { { .compatible = "amlogic,meson-gxl-dwc3" }, + { .compatible = "rockchip,rk3399-dwc3" }, { .compatible = "ti,dwc3" }, { } }; diff --git a/drivers/usb/host/xhci-dwc3.c b/drivers/usb/host/xhci-dwc3.c index 83b9f119e7..9e8cae7ae4 100644 --- a/drivers/usb/host/xhci-dwc3.c +++ b/drivers/usb/host/xhci-dwc3.c @@ -118,6 +118,8 @@ static int xhci_dwc3_probe(struct udevice *dev) struct dwc3 *dwc3_reg; enum usb_dr_mode dr_mode; struct xhci_dwc3_platdata *plat = dev_get_platdata(dev); + const char *phy; + u32 reg; int ret; hccr = (struct xhci_hccr *)((uintptr_t)dev_read_addr(dev)); @@ -132,6 +134,24 @@ static int xhci_dwc3_probe(struct udevice *dev) dwc3_core_init(dwc3_reg); + /* Set dwc3 usb2 phy config */ + reg = readl(&dwc3_reg->g_usb2phycfg[0]); + + phy = dev_read_string(dev, "phy_type"); + if (phy && strcmp(phy, "utmi_wide") == 0) { + reg |= DWC3_GUSB2PHYCFG_PHYIF; + reg &= ~DWC3_GUSB2PHYCFG_USBTRDTIM_MASK; + reg |= DWC3_GUSB2PHYCFG_USBTRDTIM_16BIT; + } + + if (dev_read_bool(dev, "snps,dis_enblslpm-quirk")) + reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM; + + if (dev_read_bool(dev, "snps,dis-u2-freeclk-exists-quirk")) + reg &= ~DWC3_GUSB2PHYCFG_U2_FREECLK_EXISTS; + + writel(reg, &dwc3_reg->g_usb2phycfg[0]); + dr_mode = usb_get_dr_mode(dev_of_offset(dev)); if (dr_mode == USB_DR_MODE_UNKNOWN) /* by default set dual role mode to HOST */ diff --git a/drivers/usb/host/xhci-rockchip.c b/drivers/usb/host/xhci-rockchip.c index f19bea3a91..e7b0dbcca5 100644 --- a/drivers/usb/host/xhci-rockchip.c +++ b/drivers/usb/host/xhci-rockchip.c @@ -167,7 +167,6 @@ static int xhci_usb_remove(struct udevice *dev) } static const struct udevice_id xhci_usb_ids[] = { - { .compatible = "rockchip,rk3399-xhci" }, { .compatible = "rockchip,rk3328-xhci" }, { } }; @@ -187,7 +186,6 @@ U_BOOT_DRIVER(usb_xhci) = { }; static const struct udevice_id usb_phy_ids[] = { - { .compatible = "rockchip,rk3399-usb3-phy" }, { .compatible = "rockchip,rk3328-usb3-phy" }, { } }; diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index c3781b160d..261fa98517 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -484,7 +484,7 @@ config VIDEO_IVYBRIDGE_IGD config VIDEO_FSL_DCU_FB bool "Enable Freescale Display Control Unit" - depends on VIDEO + depends on VIDEO || DM_VIDEO help This enables support for Freescale Display Control Unit (DCU4) module found on Freescale Vybrid and QorIQ family of SoCs. diff --git a/drivers/video/bcm2835.c b/drivers/video/bcm2835.c index bc41090aed..1d2eda084c 100644 --- a/drivers/video/bcm2835.c +++ b/drivers/video/bcm2835.c @@ -19,13 +19,15 @@ static int bcm2835_video_probe(struct udevice *dev) debug("bcm2835: Query resolution...\n"); ret = bcm2835_get_video_size(&w, &h); - if (ret) + if (ret || w == 0 || h == 0) return -EIO; debug("bcm2835: Setting up display for %d x %d\n", w, h); ret = bcm2835_set_video_params(&w, &h, 32, BCM2835_MBOX_PIXEL_ORDER_RGB, BCM2835_MBOX_ALPHA_MODE_IGNORED, &fb_base, &fb_size, &pitch); + if (ret) + return -EIO; debug("bcm2835: Final resolution is %d x %d\n", w, h); diff --git a/drivers/video/display-uclass.c b/drivers/video/display-uclass.c index 99ef5e76f5..1a29ce5d85 100644 --- a/drivers/video/display-uclass.c +++ b/drivers/video/display-uclass.c @@ -37,6 +37,17 @@ int display_enable(struct udevice *dev, int panel_bpp, return 0; } +static bool display_mode_valid(void *priv, const struct display_timing *timing) +{ + struct udevice *dev = priv; + struct dm_display_ops *ops = display_get_ops(dev); + + if (ops && ops->mode_valid) + return ops->mode_valid(dev, timing); + + return true; +} + int display_read_timing(struct udevice *dev, struct display_timing *timing) { struct dm_display_ops *ops = display_get_ops(dev); @@ -53,7 +64,9 @@ int display_read_timing(struct udevice *dev, struct display_timing *timing) if (ret < 0) return ret; - return edid_get_timing(buf, ret, timing, &panel_bits_per_colour); + return edid_get_timing_validate(buf, ret, timing, + &panel_bits_per_colour, + display_mode_valid, dev); } bool display_in_use(struct udevice *dev) diff --git a/drivers/video/dw_hdmi.c b/drivers/video/dw_hdmi.c index 463436edf3..bf74d6adf2 100644 --- a/drivers/video/dw_hdmi.c +++ b/drivers/video/dw_hdmi.c @@ -8,6 +8,7 @@ #include <common.h> #include <fdtdec.h> #include <asm/io.h> +#include <i2c.h> #include <media_bus_format.h> #include "dw_hdmi.h" @@ -812,6 +813,18 @@ static int hdmi_read_edid(struct dw_hdmi *hdmi, int block, u8 *buff) u32 trytime = 5; u32 n; + if (CONFIG_IS_ENABLED(DM_I2C) && hdmi->ddc_bus) { + struct udevice *chip; + + edid_read_err = i2c_get_chip(hdmi->ddc_bus, + HDMI_I2CM_SLAVE_DDC_ADDR, + 1, &chip); + if (edid_read_err) + return edid_read_err; + + return dm_i2c_read(chip, shift, buff, HDMI_EDID_BLOCK_SIZE); + } + /* set ddc i2c clk which devided from ddc_clk to 100khz */ hdmi_write(hdmi, hdmi->i2c_clk_high, HDMI_I2CM_SS_SCL_HCNT_0_ADDR); hdmi_write(hdmi, hdmi->i2c_clk_low, HDMI_I2CM_SS_SCL_LCNT_0_ADDR); diff --git a/drivers/video/fsl_dcu_fb.c b/drivers/video/fsl_dcu_fb.c index 9f6e7f83b0..add64b85b5 100644 --- a/drivers/video/fsl_dcu_fb.c +++ b/drivers/video/fsl_dcu_fb.c @@ -1,16 +1,19 @@ // SPDX-License-Identifier: GPL-2.0+ /* * Copyright 2014 Freescale Semiconductor, Inc. + * Copyright 2019 Toradex AG * * FSL DCU Framebuffer driver */ #include <asm/io.h> #include <common.h> +#include <dm.h> #include <fdt_support.h> #include <fsl_dcu_fb.h> #include <linux/fb.h> #include <malloc.h> +#include <video.h> #include <video_fb.h> #include "videomodes.h" @@ -218,8 +221,6 @@ struct dcu_reg { u32 ctrldescl[DCU_LAYER_MAX_NUM][16]; }; -static struct fb_info info; - static void reset_total_layers(void) { struct dcu_reg *regs = (struct dcu_reg *)CONFIG_SYS_DCU_ADDR; @@ -240,20 +241,22 @@ static void reset_total_layers(void) } } -static int layer_ctrldesc_init(int index, u32 pixel_format) +static int layer_ctrldesc_init(struct fb_info fbinfo, + int index, u32 pixel_format) { struct dcu_reg *regs = (struct dcu_reg *)CONFIG_SYS_DCU_ADDR; unsigned int bpp = BPP_24_RGB888; dcu_write32(®s->ctrldescl[index][0], - DCU_CTRLDESCLN_1_HEIGHT(info.var.yres) | - DCU_CTRLDESCLN_1_WIDTH(info.var.xres)); + DCU_CTRLDESCLN_1_HEIGHT(fbinfo.var.yres) | + DCU_CTRLDESCLN_1_WIDTH(fbinfo.var.xres)); dcu_write32(®s->ctrldescl[index][1], DCU_CTRLDESCLN_2_POSY(0) | DCU_CTRLDESCLN_2_POSX(0)); - dcu_write32(®s->ctrldescl[index][2], (unsigned int)info.screen_base); + dcu_write32(®s->ctrldescl[index][2], + (unsigned int)fbinfo.screen_base); switch (pixel_format) { case 16: @@ -294,42 +297,46 @@ static int layer_ctrldesc_init(int index, u32 pixel_format) return 0; } -int fsl_dcu_init(unsigned int xres, unsigned int yres, - unsigned int pixel_format) +int fsl_dcu_init(struct fb_info *fbinfo, unsigned int xres, + unsigned int yres, unsigned int pixel_format) { struct dcu_reg *regs = (struct dcu_reg *)CONFIG_SYS_DCU_ADDR; unsigned int div, mode; +/* + * When DM_VIDEO is enabled reservation of framebuffer is done + * in advance during bind() call. + */ +#if !CONFIG_IS_ENABLED(DM_VIDEO) + fbinfo->screen_size = fbinfo->var.xres * fbinfo->var.yres * + (fbinfo->var.bits_per_pixel / 8); - info.screen_size = - info.var.xres * info.var.yres * (info.var.bits_per_pixel / 8); - - if (info.screen_size > CONFIG_VIDEO_FSL_DCU_MAX_FB_SIZE_MB) { - info.screen_size = 0; + if (fbinfo->screen_size > CONFIG_VIDEO_FSL_DCU_MAX_FB_SIZE_MB) { + fbinfo->screen_size = 0; return -ENOMEM; } - /* Reserve framebuffer at the end of memory */ gd->fb_base = gd->bd->bi_dram[0].start + - gd->bd->bi_dram[0].size - info.screen_size; - info.screen_base = (char *)gd->fb_base; + gd->bd->bi_dram[0].size - fbinfo->screen_size; + fbinfo->screen_base = (char *)gd->fb_base; - memset(info.screen_base, 0, info.screen_size); + memset(fbinfo->screen_base, 0, fbinfo->screen_size); +#endif reset_total_layers(); dcu_write32(®s->disp_size, - DCU_DISP_SIZE_DELTA_Y(info.var.yres) | - DCU_DISP_SIZE_DELTA_X(info.var.xres / 16)); + DCU_DISP_SIZE_DELTA_Y(fbinfo->var.yres) | + DCU_DISP_SIZE_DELTA_X(fbinfo->var.xres / 16)); dcu_write32(®s->hsyn_para, - DCU_HSYN_PARA_BP(info.var.left_margin) | - DCU_HSYN_PARA_PW(info.var.hsync_len) | - DCU_HSYN_PARA_FP(info.var.right_margin)); + DCU_HSYN_PARA_BP(fbinfo->var.left_margin) | + DCU_HSYN_PARA_PW(fbinfo->var.hsync_len) | + DCU_HSYN_PARA_FP(fbinfo->var.right_margin)); dcu_write32(®s->vsyn_para, - DCU_VSYN_PARA_BP(info.var.upper_margin) | - DCU_VSYN_PARA_PW(info.var.vsync_len) | - DCU_VSYN_PARA_FP(info.var.lower_margin)); + DCU_VSYN_PARA_BP(fbinfo->var.upper_margin) | + DCU_VSYN_PARA_PW(fbinfo->var.vsync_len) | + DCU_VSYN_PARA_FP(fbinfo->var.lower_margin)); dcu_write32(®s->synpol, DCU_SYN_POL_INV_PXCK_FALL | @@ -352,9 +359,9 @@ int fsl_dcu_init(unsigned int xres, unsigned int yres, mode = dcu_read32(®s->mode); dcu_write32(®s->mode, mode | DCU_MODE_NORMAL); - layer_ctrldesc_init(0, pixel_format); + layer_ctrldesc_init(*fbinfo, 0, pixel_format); - div = dcu_set_pixel_clock(info.var.pixclock); + div = dcu_set_pixel_clock(fbinfo->var.pixclock); dcu_write32(®s->div_ratio, (div - 1)); dcu_write32(®s->update_mode, DCU_UPDATE_MODE_READREG); @@ -367,24 +374,26 @@ ulong board_get_usable_ram_top(ulong total_size) return gd->ram_top - CONFIG_VIDEO_FSL_DCU_MAX_FB_SIZE_MB; } -void *video_hw_init(void) +int fsl_probe_common(struct fb_info *fbinfo, unsigned int *win_x, + unsigned int *win_y) { - static GraphicDevice ctfb; const char *options; unsigned int depth = 0, freq = 0; + struct fb_videomode *fsl_dcu_mode_db = &fsl_dcu_mode_480_272; - if (!video_get_video_mode(&ctfb.winSizeX, &ctfb.winSizeY, &depth, &freq, + if (!video_get_video_mode(win_x, win_y, &depth, &freq, &options)) - return NULL; + return -EINVAL; /* Find the monitor port, which is a required option */ if (!options) - return NULL; + return -EINVAL; + if (strncmp(options, "monitor=", 8) != 0) - return NULL; + return -EINVAL; - switch (RESOLUTION(ctfb.winSizeX, ctfb.winSizeY)) { + switch (RESOLUTION(*win_x, *win_y)) { case RESOLUTION(480, 272): fsl_dcu_mode_db = &fsl_dcu_mode_480_272; break; @@ -402,39 +411,31 @@ void *video_hw_init(void) break; default: printf("unsupported resolution %ux%u\n", - ctfb.winSizeX, ctfb.winSizeY); + *win_x, *win_y); } - info.var.xres = fsl_dcu_mode_db->xres; - info.var.yres = fsl_dcu_mode_db->yres; - info.var.bits_per_pixel = 32; - info.var.pixclock = fsl_dcu_mode_db->pixclock; - info.var.left_margin = fsl_dcu_mode_db->left_margin; - info.var.right_margin = fsl_dcu_mode_db->right_margin; - info.var.upper_margin = fsl_dcu_mode_db->upper_margin; - info.var.lower_margin = fsl_dcu_mode_db->lower_margin; - info.var.hsync_len = fsl_dcu_mode_db->hsync_len; - info.var.vsync_len = fsl_dcu_mode_db->vsync_len; - info.var.sync = fsl_dcu_mode_db->sync; - info.var.vmode = fsl_dcu_mode_db->vmode; - info.fix.line_length = info.var.xres * info.var.bits_per_pixel / 8; - - if (platform_dcu_init(ctfb.winSizeX, ctfb.winSizeY, - options + 8, fsl_dcu_mode_db) < 0) - return NULL; - - ctfb.frameAdrs = (unsigned int)info.screen_base; - ctfb.plnSizeX = ctfb.winSizeX; - ctfb.plnSizeY = ctfb.winSizeY; - - ctfb.gdfBytesPP = 4; - ctfb.gdfIndex = GDF_32BIT_X888RGB; - - ctfb.memSize = info.screen_size; - - return &ctfb; + fbinfo->var.xres = fsl_dcu_mode_db->xres; + fbinfo->var.yres = fsl_dcu_mode_db->yres; + fbinfo->var.bits_per_pixel = 32; + fbinfo->var.pixclock = fsl_dcu_mode_db->pixclock; + fbinfo->var.left_margin = fsl_dcu_mode_db->left_margin; + fbinfo->var.right_margin = fsl_dcu_mode_db->right_margin; + fbinfo->var.upper_margin = fsl_dcu_mode_db->upper_margin; + fbinfo->var.lower_margin = fsl_dcu_mode_db->lower_margin; + fbinfo->var.hsync_len = fsl_dcu_mode_db->hsync_len; + fbinfo->var.vsync_len = fsl_dcu_mode_db->vsync_len; + fbinfo->var.sync = fsl_dcu_mode_db->sync; + fbinfo->var.vmode = fsl_dcu_mode_db->vmode; + fbinfo->fix.line_length = fbinfo->var.xres * + fbinfo->var.bits_per_pixel / 8; + + return platform_dcu_init(fbinfo, *win_x, *win_y, + options + 8, fsl_dcu_mode_db); } +#ifndef CONFIG_DM_VIDEO +static struct fb_info info; + #if defined(CONFIG_OF_BOARD_SETUP) int fsl_dcu_fixedfb_setup(void *blob) { @@ -457,3 +458,89 @@ int fsl_dcu_fixedfb_setup(void *blob) return 0; } #endif + +void *video_hw_init(void) +{ + static GraphicDevice ctfb; + + if (fsl_probe_common(&info, &ctfb.winSizeX, &ctfb.winSizeY) < 0) + return NULL; + + ctfb.frameAdrs = (unsigned int)info.screen_base; + ctfb.plnSizeX = ctfb.winSizeX; + ctfb.plnSizeY = ctfb.winSizeY; + + ctfb.gdfBytesPP = 4; + ctfb.gdfIndex = GDF_32BIT_X888RGB; + + ctfb.memSize = info.screen_size; + + return &ctfb; +} + +#else /* ifndef CONFIG_DM_VIDEO */ + +static int fsl_dcu_video_probe(struct udevice *dev) +{ + struct video_uc_platdata *plat = dev_get_uclass_platdata(dev); + struct video_priv *uc_priv = dev_get_uclass_priv(dev); + struct fb_info fbinfo = { 0 }; + unsigned int win_x; + unsigned int win_y; + u32 fb_start, fb_end; + int ret = 0; + + fb_start = plat->base & ~(MMU_SECTION_SIZE - 1); + fb_end = plat->base + plat->size; + fb_end = ALIGN(fb_end, 1 << MMU_SECTION_SHIFT); + + fbinfo.screen_base = (char *)fb_start; + fbinfo.screen_size = plat->size; + + ret = fsl_probe_common(&fbinfo, &win_x, &win_y); + if (ret < 0) + return ret; + + uc_priv->bpix = VIDEO_BPP32; + uc_priv->xsize = win_x; + uc_priv->ysize = win_y; + + /* Enable dcache for the frame buffer */ + mmu_set_region_dcache_behaviour(fb_start, fb_end - fb_start, + DCACHE_WRITEBACK); + video_set_flush_dcache(dev, true); + return ret; +} + +static int fsl_dcu_video_bind(struct udevice *dev) +{ + struct video_uc_platdata *plat = dev_get_uclass_platdata(dev); + unsigned int win_x; + unsigned int win_y; + unsigned int depth = 0, freq = 0; + const char *options; + int ret = 0; + + ret = video_get_video_mode(&win_x, &win_y, &depth, &freq, &options); + if (ret < 0) + return ret; + + plat->size = win_x * win_y * 32; + + return 0; +} + +static const struct udevice_id fsl_dcu_video_ids[] = { + { .compatible = "fsl,vf610-dcu" }, + { /* sentinel */ } +}; + +U_BOOT_DRIVER(fsl_dcu_video) = { + .name = "fsl_dcu_video", + .id = UCLASS_VIDEO, + .of_match = fsl_dcu_video_ids, + .bind = fsl_dcu_video_bind, + .probe = fsl_dcu_video_probe, + .flags = DM_FLAG_PRE_RELOC, +}; +#endif /* ifndef CONFIG_DM_VIDEO */ diff --git a/drivers/video/meson/meson_dw_hdmi.c b/drivers/video/meson/meson_dw_hdmi.c index 483c93f6b6..9831d978fc 100644 --- a/drivers/video/meson/meson_dw_hdmi.c +++ b/drivers/video/meson/meson_dw_hdmi.c @@ -375,6 +375,9 @@ static int meson_dw_hdmi_probe(struct udevice *dev) } #endif + uclass_get_device_by_phandle(UCLASS_I2C, dev, "ddc-i2c-bus", + &priv->hdmi.ddc_bus); + ret = reset_get_bulk(dev, &resets); if (ret) return ret; @@ -426,9 +429,16 @@ static int meson_dw_hdmi_probe(struct udevice *dev) return ret; } +static bool meson_dw_hdmi_mode_valid(struct udevice *dev, + const struct display_timing *timing) +{ + return meson_venc_hdmi_supported_mode(timing); +} + static const struct dm_display_ops meson_dw_hdmi_ops = { .read_edid = meson_dw_hdmi_read_edid, .enable = meson_dw_hdmi_enable, + .mode_valid = meson_dw_hdmi_mode_valid, }; static const struct udevice_id meson_dw_hdmi_ids[] = { diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c index f02ba20138..6c9a7c05e8 100644 --- a/drivers/video/mxsfb.c +++ b/drivers/video/mxsfb.c @@ -271,6 +271,42 @@ dealloc_fb: } #else /* ifndef CONFIG_DM_VIDEO */ +static int mxs_of_get_timings(struct udevice *dev, + struct display_timing *timings, + u32 *bpp) +{ + int ret = 0; + u32 display_phandle; + ofnode display_node; + + ret = ofnode_read_u32(dev_ofnode(dev), "display", &display_phandle); + if (ret) { + dev_err(dev, "required display property isn't provided\n"); + return -EINVAL; + } + + display_node = ofnode_get_by_phandle(display_phandle); + if (!ofnode_valid(display_node)) { + dev_err(dev, "failed to find display subnode\n"); + return -EINVAL; + } + + ret = ofnode_read_u32(display_node, "bits-per-pixel", bpp); + if (ret) { + dev_err(dev, + "required bits-per-pixel property isn't provided\n"); + return -EINVAL; + } + + ret = ofnode_decode_display_timing(display_node, 0, timings); + if (ret) { + dev_err(dev, "failed to get any display timings\n"); + return -EINVAL; + } + + return ret; +} + static int mxs_video_probe(struct udevice *dev) { struct video_uc_platdata *plat = dev_get_uclass_platdata(dev); @@ -278,18 +314,16 @@ static int mxs_video_probe(struct udevice *dev) struct ctfb_res_modes mode; struct display_timing timings; - int bpp = -1; + u32 bpp = 0; u32 fb_start, fb_end; int ret; debug("%s() plat: base 0x%lx, size 0x%x\n", __func__, plat->base, plat->size); - ret = ofnode_decode_display_timing(dev_ofnode(dev), 0, &timings); - if (ret) { - dev_err(dev, "failed to get any display timings\n"); - return -EINVAL; - } + ret = mxs_of_get_timings(dev, &timings, &bpp); + if (ret) + return ret; mode.xres = timings.hactive.typ; mode.yres = timings.vactive.typ; @@ -301,13 +335,12 @@ static int mxs_video_probe(struct udevice *dev) mode.vsync_len = timings.vsync_len.typ; mode.pixclock = HZ2PS(timings.pixelclock.typ); - bpp = BITS_PP; - ret = mxs_probe_common(&mode, bpp, plat->base); if (ret) return ret; switch (bpp) { + case 32: case 24: case 18: uc_priv->bpix = VIDEO_BPP32; @@ -341,15 +374,32 @@ static int mxs_video_bind(struct udevice *dev) { struct video_uc_platdata *plat = dev_get_uclass_platdata(dev); struct display_timing timings; + u32 bpp = 0; + u32 bytes_pp = 0; int ret; - ret = ofnode_decode_display_timing(dev_ofnode(dev), 0, &timings); - if (ret) { - dev_err(dev, "failed to get any display timings\n"); + ret = mxs_of_get_timings(dev, &timings, &bpp); + if (ret) + return ret; + + switch (bpp) { + case 32: + case 24: + case 18: + bytes_pp = 4; + break; + case 16: + bytes_pp = 2; + break; + case 8: + bytes_pp = 1; + break; + default: + dev_err(dev, "invalid bpp specified (bpp = %i)\n", bpp); return -EINVAL; } - plat->size = timings.hactive.typ * timings.vactive.typ * BYTES_PP; + plat->size = timings.hactive.typ * timings.vactive.typ * bytes_pp; return 0; } diff --git a/drivers/video/rockchip/rk3288_hdmi.c b/drivers/video/rockchip/rk3288_hdmi.c index 315d3adf27..3d25ce924c 100644 --- a/drivers/video/rockchip/rk3288_hdmi.c +++ b/drivers/video/rockchip/rk3288_hdmi.c @@ -33,7 +33,7 @@ static int rk3288_hdmi_enable(struct udevice *dev, int panel_bpp, /* hdmi data from vop id */ rk_clrsetreg(&grf->soc_con6, 1 << 4, (vop_id == 1) ? (1 << 4) : 0); - return 0; + return dw_hdmi_enable(&priv->hdmi, edid); } static int rk3288_hdmi_ofdata_to_platdata(struct udevice *dev) diff --git a/drivers/video/rockchip/rk_hdmi.c b/drivers/video/rockchip/rk_hdmi.c index 51931ceefa..5b44a7e8c9 100644 --- a/drivers/video/rockchip/rk_hdmi.c +++ b/drivers/video/rockchip/rk_hdmi.c @@ -93,6 +93,9 @@ int rk_hdmi_ofdata_to_platdata(struct udevice *dev) priv->grf = syscon_get_first_range(ROCKCHIP_SYSCON_GRF); + uclass_get_device_by_phandle(UCLASS_I2C, dev, "ddc-i2c-bus", + &hdmi->ddc_bus); + return 0; } diff --git a/drivers/video/simple_panel.c b/drivers/video/simple_panel.c index 7a968e740c..c3c0e84732 100644 --- a/drivers/video/simple_panel.c +++ b/drivers/video/simple_panel.c @@ -105,6 +105,7 @@ static const struct udevice_id simple_panel_ids[] = { { .compatible = "auo,b133xtn01" }, { .compatible = "auo,b116xw03" }, { .compatible = "auo,b133htn01" }, + { .compatible = "lg,lb070wv8" }, { } }; diff --git a/drivers/video/sunxi/sunxi_dw_hdmi.c b/drivers/video/sunxi/sunxi_dw_hdmi.c index 6fe1aa7ee4..cec23295b5 100644 --- a/drivers/video/sunxi/sunxi_dw_hdmi.c +++ b/drivers/video/sunxi/sunxi_dw_hdmi.c @@ -373,6 +373,9 @@ static int sunxi_dw_hdmi_probe(struct udevice *dev) priv->hdmi.phy_set = sunxi_dw_hdmi_phy_cfg; priv->mux = uc_plat->source_id; + uclass_get_device_by_phandle(UCLASS_I2C, dev, "ddc-i2c-bus", + &priv->hdmi.ddc_bus); + dw_hdmi_init(&priv->hdmi); return 0; diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index ee0ddffe73..ccda432f49 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -28,7 +28,7 @@ config BCM2835_WDT config IMX_WATCHDOG bool "Enable Watchdog Timer support for IMX and LSCH2 of NXP" - select HW_WATCHDOG + select HW_WATCHDOG if !WDT help Select this to enable the IMX and LSCH2 of Layerscape watchdog driver. @@ -169,4 +169,11 @@ config WDT_TANGIER Intel Tangier SoC. If you're using a board with Intel Tangier SoC, say Y here. +config SPL_WDT + bool "Enable driver model for watchdog timer drivers in SPL" + depends on SPL_DM + help + Enable driver model for watchdog timer in SPL. + This is similar to CONFIG_WDT in U-Boot. + endmenu diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index 68c989aa0b..97aa6a836c 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile @@ -15,7 +15,7 @@ obj-$(CONFIG_XILINX_TB_WATCHDOG) += xilinx_tb_wdt.o obj-$(CONFIG_OMAP_WATCHDOG) += omap_wdt.o obj-$(CONFIG_DESIGNWARE_WATCHDOG) += designware_wdt.o obj-$(CONFIG_ULP_WATCHDOG) += ulp_wdog.o -obj-$(CONFIG_WDT) += wdt-uclass.o +obj-$(CONFIG_$(SPL_TPL_)WDT) += wdt-uclass.o obj-$(CONFIG_WDT_SANDBOX) += sandbox_wdt.o obj-$(CONFIG_WDT_ARMADA_37XX) += armada-37xx-wdt.o obj-$(CONFIG_WDT_ASPEED) += ast_wdt.o diff --git a/drivers/watchdog/imx_watchdog.c b/drivers/watchdog/imx_watchdog.c index 14cc618074..53a3e9f5c7 100644 --- a/drivers/watchdog/imx_watchdog.c +++ b/drivers/watchdog/imx_watchdog.c @@ -5,7 +5,9 @@ */ #include <common.h> +#include <dm.h> #include <asm/io.h> +#include <wdt.h> #include <watchdog.h> #include <asm/arch/imx-regs.h> #ifdef CONFIG_FSL_LSCH2 @@ -13,20 +15,40 @@ #endif #include <fsl_wdog.h> -#ifdef CONFIG_IMX_WATCHDOG -void hw_watchdog_reset(void) +static void imx_watchdog_expire_now(struct watchdog_regs *wdog) +{ + clrsetbits_le16(&wdog->wcr, WCR_WT_MSK, WCR_WDE); + + writew(0x5555, &wdog->wsr); + writew(0xaaaa, &wdog->wsr); /* load minimum 1/2 second timeout */ + while (1) { + /* + * spin for .5 seconds before reset + */ + } +} + +#if !defined(CONFIG_IMX_WATCHDOG) || \ + (defined(CONFIG_IMX_WATCHDOG) && !CONFIG_IS_ENABLED(WDT)) +void __attribute__((weak)) reset_cpu(ulong addr) { -#ifndef CONFIG_WATCHDOG_RESET_DISABLE struct watchdog_regs *wdog = (struct watchdog_regs *)WDOG1_BASE_ADDR; + imx_watchdog_expire_now(wdog); +} +#endif + +#if defined(CONFIG_IMX_WATCHDOG) +static void imx_watchdog_reset(struct watchdog_regs *wdog) +{ +#ifndef CONFIG_WATCHDOG_RESET_DISABLE writew(0x5555, &wdog->wsr); writew(0xaaaa, &wdog->wsr); #endif /* CONFIG_WATCHDOG_RESET_DISABLE*/ } -void hw_watchdog_init(void) +static void imx_watchdog_init(struct watchdog_regs *wdog) { - struct watchdog_regs *wdog = (struct watchdog_regs *)WDOG1_BASE_ADDR; u16 timeout; /* @@ -44,21 +66,86 @@ void hw_watchdog_init(void) writew(WCR_WDZST | WCR_WDBG | WCR_WDE | WCR_WDT | WCR_SRS | WCR_WDA | SET_WCR_WT(timeout), &wdog->wcr); #endif /* CONFIG_FSL_LSCH2*/ - hw_watchdog_reset(); + imx_watchdog_reset(wdog); } -#endif -void __attribute__((weak)) reset_cpu(ulong addr) +#if !CONFIG_IS_ENABLED(WDT) +void hw_watchdog_reset(void) { struct watchdog_regs *wdog = (struct watchdog_regs *)WDOG1_BASE_ADDR; - clrsetbits_le16(&wdog->wcr, WCR_WT_MSK, WCR_WDE); + imx_watchdog_reset(wdog); +} - writew(0x5555, &wdog->wsr); - writew(0xaaaa, &wdog->wsr); /* load minimum 1/2 second timeout */ - while (1) { - /* - * spin for .5 seconds before reset - */ - } +void hw_watchdog_init(void) +{ + struct watchdog_regs *wdog = (struct watchdog_regs *)WDOG1_BASE_ADDR; + + imx_watchdog_init(wdog); +} +#else +struct imx_wdt_priv { + void __iomem *base; +}; + +static int imx_wdt_reset(struct udevice *dev) +{ + struct imx_wdt_priv *priv = dev_get_priv(dev); + + imx_watchdog_reset(priv->base); + + return 0; +} + +static int imx_wdt_expire_now(struct udevice *dev, ulong flags) +{ + struct imx_wdt_priv *priv = dev_get_priv(dev); + + imx_watchdog_expire_now(priv->base); + hang(); + + return 0; +} + +static int imx_wdt_start(struct udevice *dev, u64 timeout, ulong flags) +{ + struct imx_wdt_priv *priv = dev_get_priv(dev); + + imx_watchdog_init(priv->base); + + return 0; +} + +static int imx_wdt_probe(struct udevice *dev) +{ + struct imx_wdt_priv *priv = dev_get_priv(dev); + + priv->base = dev_read_addr_ptr(dev); + if (!priv->base) + return -ENOENT; + + return 0; } + +static const struct wdt_ops imx_wdt_ops = { + .start = imx_wdt_start, + .reset = imx_wdt_reset, + .expire_now = imx_wdt_expire_now, +}; + +static const struct udevice_id imx_wdt_ids[] = { + { .compatible = "fsl,imx21-wdt" }, + {} +}; + +U_BOOT_DRIVER(imx_wdt) = { + .name = "imx_wdt", + .id = UCLASS_WDT, + .of_match = imx_wdt_ids, + .probe = imx_wdt_probe, + .ops = &imx_wdt_ops, + .priv_auto_alloc_size = sizeof(struct imx_wdt_priv), + .flags = DM_FLAG_PRE_RELOC, +}; +#endif +#endif |