summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/clk/uniphier/clk-uniphier-sys.c14
-rw-r--r--drivers/gpio/pca953x_gpio.c1
-rw-r--r--drivers/pci_endpoint/Kconfig34
-rw-r--r--drivers/pci_endpoint/Makefile8
-rw-r--r--drivers/pci_endpoint/pci_ep-uclass.c211
-rw-r--r--drivers/pci_endpoint/pcie-cadence-ep.c177
-rw-r--r--drivers/pci_endpoint/pcie-cadence.h309
-rw-r--r--drivers/pci_endpoint/sandbox-pci_ep.c182
-rw-r--r--drivers/spi/Kconfig8
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/kirkwood_spi.c52
-rw-r--r--drivers/spi/uniphier_spi.c413
-rw-r--r--drivers/tpm/tpm2_tis_spi.c12
15 files changed, 1381 insertions, 44 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 96ff4f566a..5a9d01b508 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -66,6 +66,8 @@ source "drivers/nvme/Kconfig"
source "drivers/pci/Kconfig"
+source "drivers/pci_endpoint/Kconfig"
+
source "drivers/pch/Kconfig"
source "drivers/pcmcia/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 6635dabd2c..603aa98590 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -86,6 +86,7 @@ obj-$(CONFIG_FPGA) += fpga/
obj-y += misc/
obj-$(CONFIG_MMC) += mmc/
obj-$(CONFIG_NVME) += nvme/
+obj-$(CONFIG_PCI_ENDPOINT) += pci_endpoint/
obj-y += pcmcia/
obj-y += dfu/
obj-$(CONFIG_PCH) += pch/
diff --git a/drivers/clk/uniphier/clk-uniphier-sys.c b/drivers/clk/uniphier/clk-uniphier-sys.c
index 487b43ebda..c627a4bf85 100644
--- a/drivers/clk/uniphier/clk-uniphier-sys.c
+++ b/drivers/clk/uniphier/clk-uniphier-sys.c
@@ -18,8 +18,8 @@ const struct uniphier_clk_data uniphier_pxs2_sys_clk_data[] = {
#if defined(CONFIG_ARCH_UNIPHIER_LD4) || defined(CONFIG_ARCH_UNIPHIER_SLD8) ||\
defined(CONFIG_ARCH_UNIPHIER_PRO4) || defined(CONFIG_ARCH_UNIPHIER_PRO5) ||\
defined(CONFIG_ARCH_UNIPHIER_PXS2) || defined(CONFIG_ARCH_UNIPHIER_LD6B)
- UNIPHIER_LD4_SYS_CLK_NAND(2),
- UNIPHIER_CLK_RATE(3, 200000000),
+ UNIPHIER_LD4_SYS_CLK_NAND(2), /* nand */
+ UNIPHIER_CLK_RATE(3, 200000000), /* nand-4x */
UNIPHIER_CLK_GATE_SIMPLE(6, 0x2104, 12), /* ether (Pro4, PXs2) */
UNIPHIER_CLK_GATE_SIMPLE(7, 0x2104, 5), /* ether-gb (Pro4) */
UNIPHIER_CLK_GATE_SIMPLE(8, 0x2104, 10), /* stdmac */
@@ -35,8 +35,9 @@ const struct uniphier_clk_data uniphier_pxs2_sys_clk_data[] = {
const struct uniphier_clk_data uniphier_ld20_sys_clk_data[] = {
#if defined(CONFIG_ARCH_UNIPHIER_LD11) || defined(CONFIG_ARCH_UNIPHIER_LD20)
- UNIPHIER_LD11_SYS_CLK_NAND(2),
- UNIPHIER_CLK_RATE(3, 200000000),
+ UNIPHIER_LD11_SYS_CLK_NAND(2), /* nand */
+ UNIPHIER_CLK_RATE(3, 200000000), /* nand-4x */
+ UNIPHIER_CLK_GATE_SIMPLE(4, 0x210c, 2), /* emmc */
UNIPHIER_CLK_GATE_SIMPLE(6, 0x210c, 6), /* ether */
UNIPHIER_CLK_GATE_SIMPLE(8, 0x210c, 8), /* stdmac */
UNIPHIER_CLK_GATE_SIMPLE(14, 0x210c, 14), /* usb30 (LD20) */
@@ -48,8 +49,9 @@ const struct uniphier_clk_data uniphier_ld20_sys_clk_data[] = {
const struct uniphier_clk_data uniphier_pxs3_sys_clk_data[] = {
#if defined(CONFIG_ARCH_UNIPHIER_PXS3)
- UNIPHIER_LD11_SYS_CLK_NAND(2),
- UNIPHIER_CLK_RATE(3, 200000000),
+ UNIPHIER_LD11_SYS_CLK_NAND(2), /* nand */
+ UNIPHIER_CLK_RATE(3, 200000000), /* nand-4x */
+ UNIPHIER_CLK_GATE_SIMPLE(4, 0x210c, 2), /* emmc */
UNIPHIER_CLK_GATE_SIMPLE(6, 0x210c, 9), /* ether0 */
UNIPHIER_CLK_GATE_SIMPLE(7, 0x210c, 10), /* ether1 */
UNIPHIER_CLK_GATE_SIMPLE(12, 0x210c, 4), /* usb30 (gio0) */
diff --git a/drivers/gpio/pca953x_gpio.c b/drivers/gpio/pca953x_gpio.c
index 341527acc5..07a3356b3c 100644
--- a/drivers/gpio/pca953x_gpio.c
+++ b/drivers/gpio/pca953x_gpio.c
@@ -363,6 +363,7 @@ static const struct udevice_id pca953x_ids[] = {
{ .compatible = "ti,tca6408", .data = OF_953X(8, PCA_INT), },
{ .compatible = "ti,tca6416", .data = OF_953X(16, PCA_INT), },
{ .compatible = "ti,tca6424", .data = OF_953X(24, PCA_INT), },
+ { .compatible = "ti,tca9539", .data = OF_953X(16, PCA_INT), },
{ .compatible = "onsemi,pca9654", .data = OF_953X(8, PCA_INT), },
diff --git a/drivers/pci_endpoint/Kconfig b/drivers/pci_endpoint/Kconfig
new file mode 100644
index 0000000000..19cfa0aafb
--- /dev/null
+++ b/drivers/pci_endpoint/Kconfig
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# PCI Endpoint Support
+#
+
+menu "PCI Endpoint"
+
+config PCI_ENDPOINT
+ bool "PCI Endpoint Support"
+ depends on DM
+ help
+ Enable this configuration option to support configurable PCI
+ endpoints. This should be enabled if the platform has a PCI
+ controllers that can operate in endpoint mode (as a device
+ connected to PCI host or bridge).
+
+config PCIE_CADENCE_EP
+ bool "Cadence PCIe endpoint controller"
+ depends on PCI_ENDPOINT
+ help
+ Say Y here if you want to support the Cadence PCIe controller in
+ endpoint mode. This PCIe controller may be embedded into many
+ different vendors SoCs.
+
+config PCI_SANDBOX_EP
+ bool "Sandbox PCIe endpoint controller"
+ depends on PCI_ENDPOINT
+ help
+ Say Y here if you want to support the Sandbox PCIe controller in
+ endpoint mode.
+ The sandbox driver act as a dummy driver which stores and
+ retrieves PCIe endpoint configuration as is.
+
+endmenu
diff --git a/drivers/pci_endpoint/Makefile b/drivers/pci_endpoint/Makefile
new file mode 100644
index 0000000000..3cd987259d
--- /dev/null
+++ b/drivers/pci_endpoint/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# (C) Copyright 2019
+# Ramon Fried <ramon.fried@gmail.com>
+
+obj-y += pci_ep-uclass.o
+obj-$(CONFIG_PCIE_CADENCE_EP) += pcie-cadence-ep.o
+obj-$(CONFIG_PCI_SANDBOX_EP) += sandbox-pci_ep.o
diff --git a/drivers/pci_endpoint/pci_ep-uclass.c b/drivers/pci_endpoint/pci_ep-uclass.c
new file mode 100644
index 0000000000..2f9c70398d
--- /dev/null
+++ b/drivers/pci_endpoint/pci_ep-uclass.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PCI Endpoint uclass
+ *
+ * Based on Linux PCI-EP driver written by
+ * Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * Copyright (c) 2019
+ * Written by Ramon Fried <ramon.fried@gmail.com>
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <errno.h>
+#include <linux/log2.h>
+#include <pci_ep.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+int pci_ep_write_header(struct udevice *dev, uint fn, struct pci_ep_header *hdr)
+{
+ struct pci_ep_ops *ops = pci_ep_get_ops(dev);
+
+ if (!ops->write_header)
+ return -ENOSYS;
+
+ return ops->write_header(dev, fn, hdr);
+}
+
+int pci_ep_read_header(struct udevice *dev, uint fn, struct pci_ep_header *hdr)
+{
+ struct pci_ep_ops *ops = pci_ep_get_ops(dev);
+
+ if (!ops->read_header)
+ return -ENOSYS;
+
+ return ops->read_header(dev, fn, hdr);
+}
+
+int pci_ep_set_bar(struct udevice *dev, uint func_no, struct pci_bar *ep_bar)
+{
+ struct pci_ep_ops *ops = pci_ep_get_ops(dev);
+ int flags = ep_bar->flags;
+
+ /* Some basic bar validity checks */
+ if (ep_bar->barno > BAR_5 || ep_bar < BAR_0)
+ return -EINVAL;
+
+ if ((ep_bar->barno == BAR_5 &&
+ (flags & PCI_BASE_ADDRESS_MEM_TYPE_64)) ||
+ ((flags & PCI_BASE_ADDRESS_SPACE_IO) &&
+ (flags & PCI_BASE_ADDRESS_IO_MASK)) ||
+ (upper_32_bits(ep_bar->size) &&
+ !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)))
+ return -EINVAL;
+
+ if (!ops->set_bar)
+ return -ENOSYS;
+
+ return ops->set_bar(dev, func_no, ep_bar);
+}
+
+int pci_ep_read_bar(struct udevice *dev, uint func_no, struct pci_bar *ep_bar,
+ enum pci_barno barno)
+{
+ struct pci_ep_ops *ops = pci_ep_get_ops(dev);
+
+ /* Some basic bar validity checks */
+ if (barno > BAR_5 || barno < BAR_0)
+ return -EINVAL;
+
+ if (!ops->read_bar)
+ return -ENOSYS;
+
+ return ops->read_bar(dev, func_no, ep_bar, barno);
+}
+
+int pci_ep_clear_bar(struct udevice *dev, uint func_num, enum pci_barno bar)
+{
+ struct pci_ep_ops *ops = pci_ep_get_ops(dev);
+
+ if (!ops->clear_bar)
+ return -ENOSYS;
+
+ return ops->clear_bar(dev, func_num, bar);
+}
+
+int pci_ep_map_addr(struct udevice *dev, uint func_no, phys_addr_t addr,
+ u64 pci_addr, size_t size)
+{
+ struct pci_ep_ops *ops = pci_ep_get_ops(dev);
+
+ if (!ops->map_addr)
+ return -ENOSYS;
+
+ return ops->map_addr(dev, func_no, addr, pci_addr, size);
+}
+
+int pci_ep_unmap_addr(struct udevice *dev, uint func_no, phys_addr_t addr)
+{
+ struct pci_ep_ops *ops = pci_ep_get_ops(dev);
+
+ if (!ops->unmap_addr)
+ return -ENOSYS;
+
+ return ops->unmap_addr(dev, func_no, addr);
+}
+
+int pci_ep_set_msi(struct udevice *dev, uint func_no, uint interrupts)
+{
+ struct pci_ep_ops *ops = pci_ep_get_ops(dev);
+ uint encode_int;
+
+ if (interrupts > 32)
+ return -EINVAL;
+
+ if (!ops->set_msi)
+ return -ENOSYS;
+
+ /* MSI spec permits allocation of
+ * only 1, 2, 4, 8, 16, 32 interrupts
+ */
+ encode_int = order_base_2(interrupts);
+
+ return ops->set_msi(dev, func_no, encode_int);
+}
+
+int pci_ep_get_msi(struct udevice *dev, uint func_no)
+{
+ struct pci_ep_ops *ops = pci_ep_get_ops(dev);
+ int interrupt;
+
+ if (!ops->get_msi)
+ return -ENOSYS;
+
+ interrupt = ops->get_msi(dev, func_no);
+
+ if (interrupt < 0)
+ return 0;
+
+ /* Translate back from order base 2*/
+ interrupt = 1 << interrupt;
+
+ return interrupt;
+}
+
+int pci_ep_set_msix(struct udevice *dev, uint func_no, uint interrupts)
+{
+ struct pci_ep_ops *ops = pci_ep_get_ops(dev);
+
+ if (interrupts < 1 || interrupts > 2048)
+ return -EINVAL;
+
+ if (!ops->set_msix)
+ return -ENOSYS;
+
+ return ops->set_msix(dev, func_no, interrupts - 1);
+}
+
+int pci_ep_get_msix(struct udevice *dev, uint func_no)
+{
+ struct pci_ep_ops *ops = pci_ep_get_ops(dev);
+ int interrupt;
+
+ if (!ops->get_msix)
+ return -ENOSYS;
+
+ interrupt = ops->get_msix(dev, func_no);
+
+ if (interrupt < 0)
+ return 0;
+
+ return interrupt + 1;
+}
+
+int pci_ep_raise_irq(struct udevice *dev, uint func_no,
+ enum pci_ep_irq_type type, uint interrupt_num)
+{
+ struct pci_ep_ops *ops = pci_ep_get_ops(dev);
+
+ if (!ops->raise_irq)
+ return -ENOSYS;
+
+ return ops->raise_irq(dev, func_no, type, interrupt_num);
+}
+
+int pci_ep_start(struct udevice *dev)
+{
+ struct pci_ep_ops *ops = pci_ep_get_ops(dev);
+
+ if (!ops->start)
+ return -ENOSYS;
+
+ return ops->start(dev);
+}
+
+int pci_ep_stop(struct udevice *dev)
+{
+ struct pci_ep_ops *ops = pci_ep_get_ops(dev);
+
+ if (!ops->stop)
+ return -ENOSYS;
+
+ return ops->stop(dev);
+}
+
+UCLASS_DRIVER(pci_ep) = {
+ .id = UCLASS_PCI_EP,
+ .name = "pci_ep",
+ .flags = DM_UC_FLAG_SEQ_ALIAS,
+};
diff --git a/drivers/pci_endpoint/pcie-cadence-ep.c b/drivers/pci_endpoint/pcie-cadence-ep.c
new file mode 100644
index 0000000000..59231d340a
--- /dev/null
+++ b/drivers/pci_endpoint/pcie-cadence-ep.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2019
+ * Written by Ramon Fried <ramon.fried@gmail.com>
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <errno.h>
+#include <pci_ep.h>
+#include <linux/sizes.h>
+#include <linux/log2.h>
+#include "pcie-cadence.h"
+
+DECLARE_GLOBAL_DATA_PTR;
+
+static int cdns_write_header(struct udevice *dev, uint fn,
+ struct pci_ep_header *hdr)
+{
+ struct cdns_pcie *pcie = dev_get_priv(dev);
+
+ cdns_pcie_ep_fn_writew(pcie, fn, PCI_DEVICE_ID, hdr->deviceid);
+ cdns_pcie_ep_fn_writeb(pcie, fn, PCI_REVISION_ID, hdr->revid);
+ cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CLASS_PROG,
+ hdr->progif_code);
+ cdns_pcie_ep_fn_writew(pcie, fn, PCI_CLASS_DEVICE,
+ hdr->subclass_code |
+ hdr->baseclass_code << 8);
+ cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CACHE_LINE_SIZE,
+ hdr->cache_line_size);
+ cdns_pcie_ep_fn_writew(pcie, fn, PCI_SUBSYSTEM_ID,
+ hdr->subsys_id);
+ cdns_pcie_ep_fn_writeb(pcie, fn, PCI_INTERRUPT_PIN,
+ hdr->interrupt_pin);
+
+ /*
+ * Vendor ID can only be modified from function 0, all other functions
+ * use the same vendor ID as function 0.
+ */
+ if (fn == 0) {
+ /* Update the vendor IDs. */
+ u32 id = CDNS_PCIE_LM_ID_VENDOR(hdr->vendorid) |
+ CDNS_PCIE_LM_ID_SUBSYS(hdr->subsys_vendor_id);
+
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id);
+ }
+
+ return 0;
+}
+
+static int cdns_set_bar(struct udevice *dev, uint fn, struct pci_bar *ep_bar)
+{
+ struct cdns_pcie *pcie = dev_get_priv(dev);
+ dma_addr_t bar_phys = ep_bar->phys_addr;
+ enum pci_barno bar = ep_bar->barno;
+ int flags = ep_bar->flags;
+ u32 addr0, addr1, reg, cfg, b, aperture, ctrl;
+ u64 sz;
+
+ /* BAR size is 2^(aperture + 7) */
+ sz = max_t(size_t, ep_bar->size, CDNS_PCIE_EP_MIN_APERTURE);
+ /*
+ * roundup_pow_of_two() returns an unsigned long, which is not suited
+ * for 64bit values.
+ */
+ sz = 1ULL << fls64(sz - 1);
+ aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */
+
+ if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
+ ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS;
+ } else {
+ bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
+ bool is_64bits = (sz > SZ_2G) |
+ !!(ep_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64);
+
+ if (is_64bits && (bar & 1))
+ return -EINVAL;
+
+ if (is_64bits && !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
+ ep_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
+
+ if (is_64bits && is_prefetch)
+ ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS;
+ else if (is_prefetch)
+ ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS;
+ else if (is_64bits)
+ ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS;
+ else
+ ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS;
+ }
+
+ addr0 = lower_32_bits(bar_phys);
+ addr1 = upper_32_bits(bar_phys);
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar),
+ addr0);
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar),
+ addr1);
+
+ if (bar < BAR_4) {
+ reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
+ b = bar;
+ } else {
+ reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
+ b = bar - BAR_4;
+ }
+
+ cfg = cdns_pcie_readl(pcie, reg);
+ cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
+ CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
+ cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) |
+ CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));
+ cdns_pcie_writel(pcie, reg, cfg);
+
+ return 0;
+}
+
+static int cdns_set_msi(struct udevice *dev, uint fn, uint mmc)
+{
+ struct cdns_pcie *pcie = dev_get_priv(dev);
+ u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
+
+ /*
+ * Set the Multiple Message Capable bitfield into the Message Control
+ * register.
+ */
+ u16 flags;
+
+ flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
+ flags = (flags & ~PCI_MSI_FLAGS_QMASK) | (mmc << 1);
+ flags |= PCI_MSI_FLAGS_64BIT;
+ flags &= ~PCI_MSI_FLAGS_MASKBIT;
+ cdns_pcie_ep_fn_writew(pcie, fn, cap + PCI_MSI_FLAGS, flags);
+
+ return 0;
+}
+
+static struct pci_ep_ops cdns_pci_ep_ops = {
+ .write_header = cdns_write_header,
+ .set_bar = cdns_set_bar,
+ .set_msi = cdns_set_msi,
+};
+
+static int cdns_pci_ep_probe(struct udevice *dev)
+{
+ struct cdns_pcie *pdata = dev_get_priv(dev);
+
+ pdata->reg_base = (void __iomem *)devfdt_get_addr(dev);
+ if (!pdata->reg_base)
+ return -ENOMEM;
+
+ pdata->max_functions = fdtdec_get_int(gd->fdt_blob, dev_of_offset(dev),
+ "max-functions", 1);
+ pdata->max_regions = fdtdec_get_int(gd->fdt_blob, dev_of_offset(dev),
+ "cdns,max-outbound-regions", 8);
+
+ return 0;
+}
+
+static int cdns_pci_ep_remove(struct udevice *dev)
+{
+ return 0;
+}
+
+const struct udevice_id cadence_pci_ep_of_match[] = {
+ { .compatible = "cdns,cdns-pcie-ep" },
+ { }
+};
+
+U_BOOT_DRIVER(cdns_pcie) = {
+ .name = "cdns,pcie-ep",
+ .id = UCLASS_PCI_EP,
+ .of_match = cadence_pci_ep_of_match,
+ .ops = &cdns_pci_ep_ops,
+ .probe = cdns_pci_ep_probe,
+ .remove = cdns_pci_ep_remove,
+ .priv_auto_alloc_size = sizeof(struct cdns_pcie),
+};
diff --git a/drivers/pci_endpoint/pcie-cadence.h b/drivers/pci_endpoint/pcie-cadence.h
new file mode 100644
index 0000000000..91630d35c3
--- /dev/null
+++ b/drivers/pci_endpoint/pcie-cadence.h
@@ -0,0 +1,309 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Cadence PCIe controlloer definitions
+ * Adapted from linux kernel driver.
+ * Copyright (c) 2017 Cadence
+ *
+ * Copyright (c) 2019
+ * Written by Ramon Fried <ramon.fried@gmail.com>
+ */
+
+#ifndef PCIE_CADENCE_H
+#define PCIE_CADENCE_H
+
+#include <common.h>
+#include <pci_ep.h>
+#include <asm/io.h>
+
+/*
+ * Local Management Registers
+ */
+#define CDNS_PCIE_LM_BASE 0x00100000
+
+/* Vendor ID Register */
+#define CDNS_PCIE_LM_ID (CDNS_PCIE_LM_BASE + 0x0044)
+#define CDNS_PCIE_LM_ID_VENDOR_MASK GENMASK(15, 0)
+#define CDNS_PCIE_LM_ID_VENDOR_SHIFT 0
+#define CDNS_PCIE_LM_ID_VENDOR(vid) \
+ (((vid) << CDNS_PCIE_LM_ID_VENDOR_SHIFT) & CDNS_PCIE_LM_ID_VENDOR_MASK)
+#define CDNS_PCIE_LM_ID_SUBSYS_MASK GENMASK(31, 16)
+#define CDNS_PCIE_LM_ID_SUBSYS_SHIFT 16
+#define CDNS_PCIE_LM_ID_SUBSYS(sub) \
+ (((sub) << CDNS_PCIE_LM_ID_SUBSYS_SHIFT) & CDNS_PCIE_LM_ID_SUBSYS_MASK)
+
+/* Root Port Requestor ID Register */
+#define CDNS_PCIE_LM_RP_RID (CDNS_PCIE_LM_BASE + 0x0228)
+#define CDNS_PCIE_LM_RP_RID_MASK GENMASK(15, 0)
+#define CDNS_PCIE_LM_RP_RID_SHIFT 0
+#define CDNS_PCIE_LM_RP_RID_(rid) \
+ (((rid) << CDNS_PCIE_LM_RP_RID_SHIFT) & CDNS_PCIE_LM_RP_RID_MASK)
+
+/* Endpoint Bus and Device Number Register */
+#define CDNS_PCIE_LM_EP_ID (CDNS_PCIE_LM_BASE + 0x022c)
+#define CDNS_PCIE_LM_EP_ID_DEV_MASK GENMASK(4, 0)
+#define CDNS_PCIE_LM_EP_ID_DEV_SHIFT 0
+#define CDNS_PCIE_LM_EP_ID_BUS_MASK GENMASK(15, 8)
+#define CDNS_PCIE_LM_EP_ID_BUS_SHIFT 8
+
+/* Endpoint Function f BAR b Configuration Registers */
+#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) \
+ (CDNS_PCIE_LM_BASE + 0x0240 + (fn) * 0x0008)
+#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn) \
+ (CDNS_PCIE_LM_BASE + 0x0244 + (fn) * 0x0008)
+#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) \
+ (GENMASK(4, 0) << ((b) * 8))
+#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \
+ (((a) << ((b) * 8)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b))
+#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b) \
+ (GENMASK(7, 5) << ((b) * 8))
+#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, c) \
+ (((c) << ((b) * 8 + 5)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b))
+
+/* Endpoint Function Configuration Register */
+#define CDNS_PCIE_LM_EP_FUNC_CFG (CDNS_PCIE_LM_BASE + 0x02c0)
+
+/* Root Complex BAR Configuration Register */
+#define CDNS_PCIE_LM_RC_BAR_CFG (CDNS_PCIE_LM_BASE + 0x0300)
+#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK GENMASK(5, 0)
+#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE(a) \
+ (((a) << 0) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK)
+#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK GENMASK(8, 6)
+#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(c) \
+ (((c) << 6) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK)
+#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK GENMASK(13, 9)
+#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE(a) \
+ (((a) << 9) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK)
+#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK GENMASK(16, 14)
+#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(c) \
+ (((c) << 14) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK)
+#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE BIT(17)
+#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_32BITS 0
+#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS BIT(18)
+#define CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE BIT(19)
+#define CDNS_PCIE_LM_RC_BAR_CFG_IO_16BITS 0
+#define CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS BIT(20)
+#define CDNS_PCIE_LM_RC_BAR_CFG_CHECK_ENABLE BIT(31)
+
+/* BAR control values applicable to both Endpoint Function and Root Complex */
+#define CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED 0x0
+#define CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS 0x1
+#define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS 0x4
+#define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS 0x5
+#define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS 0x6
+#define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS 0x7
+
+/*
+ * Endpoint Function Registers (PCI configuration space for endpoint functions)
+ */
+#define CDNS_PCIE_EP_FUNC_BASE(fn) (((fn) << 12) & GENMASK(19, 12))
+
+#define CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET 0x90
+
+/*
+ * Root Port Registers (PCI configuration space for the root port function)
+ */
+#define CDNS_PCIE_RP_BASE 0x00200000
+
+/*
+ * Address Translation Registers
+ */
+#define CDNS_PCIE_AT_BASE 0x00400000
+
+/* Region r Outbound AXI to PCIe Address Translation Register 0 */
+#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r) \
+ (CDNS_PCIE_AT_BASE + 0x0000 + ((r) & 0x1f) * 0x0020)
+#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK GENMASK(5, 0)
+#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) \
+ (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK)
+#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK GENMASK(19, 12)
+#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \
+ (((devfn) << 12) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK)
+#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK GENMASK(27, 20)
+#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(bus) \
+ (((bus) << 20) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK)
+
+/* Region r Outbound AXI to PCIe Address Translation Register 1 */
+#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r) \
+ (CDNS_PCIE_AT_BASE + 0x0004 + ((r) & 0x1f) * 0x0020)
+
+/* Region r Outbound PCIe Descriptor Register 0 */
+#define CDNS_PCIE_AT_OB_REGION_DESC0(r) \
+ (CDNS_PCIE_AT_BASE + 0x0008 + ((r) & 0x1f) * 0x0020)
+#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MASK GENMASK(3, 0)
+#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM 0x2
+#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO 0x6
+#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0 0xa
+#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1 0xb
+#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG 0xc
+#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_VENDOR_MSG 0xd
+/* Bit 23 MUST be set in RC mode. */
+#define CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID BIT(23)
+#define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK GENMASK(31, 24)
+#define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(devfn) \
+ (((devfn) << 24) & CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK)
+
+/* Region r Outbound PCIe Descriptor Register 1 */
+#define CDNS_PCIE_AT_OB_REGION_DESC1(r) \
+ (CDNS_PCIE_AT_BASE + 0x000c + ((r) & 0x1f) * 0x0020)
+#define CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK GENMASK(7, 0)
+#define CDNS_PCIE_AT_OB_REGION_DESC1_BUS(bus) \
+ ((bus) & CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK)
+
+/* Region r AXI Region Base Address Register 0 */
+#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r) \
+ (CDNS_PCIE_AT_BASE + 0x0018 + ((r) & 0x1f) * 0x0020)
+#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK GENMASK(5, 0)
+#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) \
+ (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK)
+
+/* Region r AXI Region Base Address Register 1 */
+#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r) \
+ (CDNS_PCIE_AT_BASE + 0x001c + ((r) & 0x1f) * 0x0020)
+
+/* Root Port BAR Inbound PCIe to AXI Address Translation Register */
+#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar) \
+ (CDNS_PCIE_AT_BASE + 0x0800 + (bar) * 0x0008)
+#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK GENMASK(5, 0)
+#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(nbits) \
+ (((nbits) - 1) & CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK)
+#define CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar) \
+ (CDNS_PCIE_AT_BASE + 0x0804 + (bar) * 0x0008)
+
+/* AXI link down register */
+#define CDNS_PCIE_AT_LINKDOWN (CDNS_PCIE_AT_BASE + 0x0824)
+
+enum cdns_pcie_rp_bar {
+ RP_BAR0,
+ RP_BAR1,
+ RP_NO_BAR
+};
+
+/* Endpoint Function BAR Inbound PCIe to AXI Address Translation Register */
+#define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \
+ (CDNS_PCIE_AT_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008)
+#define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \
+ (CDNS_PCIE_AT_BASE + 0x0844 + (fn) * 0x0040 + (bar) * 0x0008)
+
+/* Normal/Vendor specific message access: offset inside some outbound region */
+#define CDNS_PCIE_NORMAL_MSG_ROUTING_MASK GENMASK(7, 5)
+#define CDNS_PCIE_NORMAL_MSG_ROUTING(route) \
+ (((route) << 5) & CDNS_PCIE_NORMAL_MSG_ROUTING_MASK)
+#define CDNS_PCIE_NORMAL_MSG_CODE_MASK GENMASK(15, 8)
+#define CDNS_PCIE_NORMAL_MSG_CODE(code) \
+ (((code) << 8) & CDNS_PCIE_NORMAL_MSG_CODE_MASK)
+#define CDNS_PCIE_MSG_NO_DATA BIT(16)
+
+#define CDNS_PCIE_EP_MIN_APERTURE 128 /* 128 bytes */
+
+enum cdns_pcie_msg_code {
+ MSG_CODE_ASSERT_INTA = 0x20,
+ MSG_CODE_ASSERT_INTB = 0x21,
+ MSG_CODE_ASSERT_INTC = 0x22,
+ MSG_CODE_ASSERT_INTD = 0x23,
+ MSG_CODE_DEASSERT_INTA = 0x24,
+ MSG_CODE_DEASSERT_INTB = 0x25,
+ MSG_CODE_DEASSERT_INTC = 0x26,
+ MSG_CODE_DEASSERT_INTD = 0x27,
+};
+
+enum cdns_pcie_msg_routing {
+ /* Route to Root Complex */
+ MSG_ROUTING_TO_RC,
+
+ /* Use Address Routing */
+ MSG_ROUTING_BY_ADDR,
+
+ /* Use ID Routing */
+ MSG_ROUTING_BY_ID,
+
+ /* Route as Broadcast Message from Root Complex */
+ MSG_ROUTING_BCAST,
+
+ /* Local message; terminate at receiver (INTx messages) */
+ MSG_ROUTING_LOCAL,
+
+ /* Gather & route to Root Complex (PME_TO_Ack message) */
+ MSG_ROUTING_GATHER,
+};
+
+struct cdns_pcie {
+ void __iomem *reg_base;
+ u32 max_functions;
+ u32 max_regions;
+};
+
+/* Register access */
+static inline void cdns_pcie_writeb(struct cdns_pcie *pcie, u32 reg, u8 value)
+{
+ writeb(value, pcie->reg_base + reg);
+}
+
+static inline void cdns_pcie_writew(struct cdns_pcie *pcie, u32 reg, u16 value)
+{
+ writew(value, pcie->reg_base + reg);
+}
+
+static inline void cdns_pcie_writel(struct cdns_pcie *pcie, u32 reg, u32 value)
+{
+ writel(value, pcie->reg_base + reg);
+}
+
+static inline u32 cdns_pcie_readl(struct cdns_pcie *pcie, u32 reg)
+{
+ return readl(pcie->reg_base + reg);
+}
+
+/* Root Port register access */
+static inline void cdns_pcie_rp_writeb(struct cdns_pcie *pcie,
+ u32 reg, u8 value)
+{
+ writeb(value, pcie->reg_base + CDNS_PCIE_RP_BASE + reg);
+}
+
+static inline void cdns_pcie_rp_writew(struct cdns_pcie *pcie,
+ u32 reg, u16 value)
+{
+ writew(value, pcie->reg_base + CDNS_PCIE_RP_BASE + reg);
+}
+
+static inline void cdns_pcie_rp_writel(struct cdns_pcie *pcie,
+ u32 reg, u32 value)
+{
+ writel(value, pcie->reg_base + CDNS_PCIE_RP_BASE + reg);
+}
+
+/* Endpoint Function register access */
+static inline void cdns_pcie_ep_fn_writeb(struct cdns_pcie *pcie, u8 fn,
+ u32 reg, u8 value)
+{
+ writeb(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
+}
+
+static inline void cdns_pcie_ep_fn_writew(struct cdns_pcie *pcie, u8 fn,
+ u32 reg, u16 value)
+{
+ writew(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
+}
+
+static inline void cdns_pcie_ep_fn_writel(struct cdns_pcie *pcie, u8 fn,
+ u32 reg, u32 value)
+{
+ writel(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
+}
+
+static inline u8 cdns_pcie_ep_fn_readb(struct cdns_pcie *pcie, u8 fn, u32 reg)
+{
+ return readb(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
+}
+
+static inline u16 cdns_pcie_ep_fn_readw(struct cdns_pcie *pcie, u8 fn, u32 reg)
+{
+ return readw(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
+}
+
+static inline u32 cdns_pcie_ep_fn_readl(struct cdns_pcie *pcie, u8 fn, u32 reg)
+{
+ return readl(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
+}
+
+#endif /* end of include guard: PCIE_CADENCE_H */
diff --git a/drivers/pci_endpoint/sandbox-pci_ep.c b/drivers/pci_endpoint/sandbox-pci_ep.c
new file mode 100644
index 0000000000..0258433d8f
--- /dev/null
+++ b/drivers/pci_endpoint/sandbox-pci_ep.c
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2019 Ramon Fried <ramon.fried@gmail.com>
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <errno.h>
+#include <pci.h>
+#include <pci_ep.h>
+#include <asm/test.h>
+
+/**
+ * struct sandbox_pci_ep_priv - private data for driver
+ * @hdr: Stores the EP device header
+ * @msix: required MSIx count;
+ * @msi: required MSI count;
+ */
+struct sandbox_pci_ep_priv {
+ struct pci_ep_header hdr;
+ struct pci_bar bars[6];
+ int msix;
+ int msi;
+ int irq_count;
+};
+
+/* Method exported for testing purposes */
+int sandbox_get_pci_ep_irq_count(struct udevice *dev)
+{
+ struct sandbox_pci_ep_priv *priv = dev_get_priv(dev);
+
+ return priv->irq_count;
+}
+
+static const struct udevice_id sandbox_pci_ep_ids[] = {
+ { .compatible = "sandbox,pci_ep" },
+ { }
+};
+
+static int sandbox_write_header(struct udevice *dev, uint fn,
+ struct pci_ep_header *hdr)
+{
+ struct sandbox_pci_ep_priv *priv = dev_get_priv(dev);
+
+ if (fn > 0)
+ return -ENODEV;
+
+ memcpy(&priv->hdr, hdr, sizeof(*hdr));
+
+ return 0;
+}
+
+static int sandbox_read_header(struct udevice *dev, uint fn,
+ struct pci_ep_header *hdr)
+{
+ struct sandbox_pci_ep_priv *priv = dev_get_priv(dev);
+
+ if (fn > 0)
+ return -ENODEV;
+
+ memcpy(hdr, &priv->hdr, sizeof(*hdr));
+
+ return 0;
+}
+
+static int sandbox_set_bar(struct udevice *dev, uint fn,
+ struct pci_bar *ep_bar)
+{
+ struct sandbox_pci_ep_priv *priv = dev_get_priv(dev);
+ int bar_idx;
+
+ if (fn > 0)
+ return -ENODEV;
+
+ bar_idx = ep_bar->barno;
+
+ memcpy(&priv->bars[bar_idx], ep_bar, sizeof(*ep_bar));
+
+ return 0;
+}
+
+static int sandbox_read_bar(struct udevice *dev, uint fn,
+ struct pci_bar *ep_bar, enum pci_barno barno)
+{
+ struct sandbox_pci_ep_priv *priv = dev_get_priv(dev);
+ int bar_idx;
+
+ if (fn > 0)
+ return -ENODEV;
+
+ bar_idx = ep_bar->barno;
+
+ memcpy(ep_bar, &priv->bars[bar_idx], sizeof(*ep_bar));
+
+ return 0;
+}
+
+static int sandbox_set_msi(struct udevice *dev, uint fn, uint interrupts)
+{
+ struct sandbox_pci_ep_priv *priv = dev_get_priv(dev);
+
+ if (fn > 0)
+ return -ENODEV;
+
+ priv->msi = interrupts;
+
+ return 0;
+}
+
+static int sandbox_get_msi(struct udevice *dev, uint fn)
+{
+ struct sandbox_pci_ep_priv *priv = dev_get_priv(dev);
+
+ if (fn > 0)
+ return -ENODEV;
+
+ return priv->msi;
+}
+
+static int sandbox_set_msix(struct udevice *dev, uint fn, uint interrupts)
+{
+ struct sandbox_pci_ep_priv *priv = dev_get_priv(dev);
+
+ if (fn > 0)
+ return -ENODEV;
+
+ priv->msix = interrupts;
+
+ return 0;
+}
+
+static int sandbox_get_msix(struct udevice *dev, uint fn)
+{
+ struct sandbox_pci_ep_priv *priv = dev_get_priv(dev);
+
+ if (fn > 0)
+ return -ENODEV;
+
+ return priv->msix;
+}
+
+static int sandbox_raise_irq(struct udevice *dev, uint fn,
+ enum pci_ep_irq_type type, uint interrupt_num)
+{
+ struct sandbox_pci_ep_priv *priv = dev_get_priv(dev);
+
+ if (fn > 0)
+ return -ENODEV;
+
+ priv->irq_count++;
+
+ return 0;
+}
+
+static int sandbox_pci_ep_probe(struct udevice *dev)
+{
+ struct sandbox_pci_ep_priv *priv = dev_get_priv(dev);
+
+ memset(priv, 0, sizeof(*priv));
+ return 0;
+}
+
+static struct pci_ep_ops sandbox_pci_ep_ops = {
+ .write_header = sandbox_write_header,
+ .read_header = sandbox_read_header,
+ .set_bar = sandbox_set_bar,
+ .read_bar = sandbox_read_bar,
+ .set_msi = sandbox_set_msi,
+ .get_msi = sandbox_get_msi,
+ .set_msix = sandbox_set_msix,
+ .get_msix = sandbox_get_msix,
+ .raise_irq = sandbox_raise_irq,
+};
+
+U_BOOT_DRIVER(pci_ep_sandbox) = {
+ .name = "pci_ep_sandbox",
+ .id = UCLASS_PCI_EP,
+ .of_match = sandbox_pci_ep_ids,
+ .probe = sandbox_pci_ep_probe,
+ .ops = &sandbox_pci_ep_ops,
+ .priv_auto_alloc_size = sizeof(struct sandbox_pci_ep_priv),
+};
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 8e1424ba42..cc174dd036 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -293,6 +293,14 @@ config TI_QSPI
Enable the TI Quad-SPI (QSPI) driver for DRA7xx and AM43xx evms.
This driver support spi flash single, quad and memory reads.
+config UNIPHIER_SPI
+ bool "Socionext UniPhier SPI driver"
+ depends on ARCH_UNIPHIER
+ help
+ Enable the Socionext UniPhier SPI driver. This driver can
+ be used to access SPI chips on platforms embedding this
+ UniPhier IP core.
+
config XILINX_SPI
bool "Xilinx SPI driver"
help
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 64c407e2ed..ab84122f08 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -59,6 +59,7 @@ obj-$(CONFIG_TEGRA114_SPI) += tegra114_spi.o
obj-$(CONFIG_TEGRA20_SFLASH) += tegra20_sflash.o
obj-$(CONFIG_TEGRA20_SLINK) += tegra20_slink.o
obj-$(CONFIG_TEGRA210_QSPI) += tegra210_qspi.o
+obj-$(CONFIG_UNIPHIER_SPI) += uniphier_spi.o
obj-$(CONFIG_XILINX_SPI) += xilinx_spi.o
obj-$(CONFIG_ZYNQ_SPI) += zynq_spi.o
obj-$(CONFIG_ZYNQ_QSPI) += zynq_qspi.o
diff --git a/drivers/spi/kirkwood_spi.c b/drivers/spi/kirkwood_spi.c
index 5dd1ad67cf..c725625146 100644
--- a/drivers/spi/kirkwood_spi.c
+++ b/drivers/spi/kirkwood_spi.c
@@ -151,10 +151,6 @@ void spi_free_slave(struct spi_slave *slave)
free(slave);
}
-#if defined(CONFIG_SYS_KW_SPI_MPP)
-u32 spi_mpp_backup[4];
-#endif
-
__attribute__((weak)) int board_spi_claim_bus(struct spi_slave *slave)
{
return 0;
@@ -162,34 +158,6 @@ __attribute__((weak)) int board_spi_claim_bus(struct spi_slave *slave)
int spi_claim_bus(struct spi_slave *slave)
{
-#if defined(CONFIG_SYS_KW_SPI_MPP)
- u32 config;
- u32 spi_mpp_config[4];
-
- config = CONFIG_SYS_KW_SPI_MPP;
-
- if (config & MOSI_MPP6)
- spi_mpp_config[0] = MPP6_SPI_MOSI;
- else
- spi_mpp_config[0] = MPP1_SPI_MOSI;
-
- if (config & SCK_MPP10)
- spi_mpp_config[1] = MPP10_SPI_SCK;
- else
- spi_mpp_config[1] = MPP2_SPI_SCK;
-
- if (config & MISO_MPP11)
- spi_mpp_config[2] = MPP11_SPI_MISO;
- else
- spi_mpp_config[2] = MPP3_SPI_MISO;
-
- spi_mpp_config[3] = 0;
- spi_mpp_backup[3] = 0;
-
- /* set new spi mpp and save current mpp config */
- kirkwood_mpp_conf(spi_mpp_config, spi_mpp_backup);
-#endif
-
return board_spi_claim_bus(slave);
}
@@ -199,10 +167,6 @@ __attribute__((weak)) void board_spi_release_bus(struct spi_slave *slave)
void spi_release_bus(struct spi_slave *slave)
{
-#if defined(CONFIG_SYS_KW_SPI_MPP)
- kirkwood_mpp_conf(spi_mpp_backup, NULL);
-#endif
-
board_spi_release_bus(slave);
}
@@ -338,6 +302,11 @@ static int mvebu_spi_xfer(struct udevice *dev, unsigned int bitlen,
return _spi_xfer(plat->spireg, bitlen, dout, din, flags);
}
+__attribute__((weak)) int mvebu_board_spi_claim_bus(struct udevice *dev)
+{
+ return 0;
+}
+
static int mvebu_spi_claim_bus(struct udevice *dev)
{
struct udevice *bus = dev->parent;
@@ -348,9 +317,19 @@ static int mvebu_spi_claim_bus(struct udevice *dev)
KWSPI_CS_MASK << KWSPI_CS_SHIFT,
spi_chip_select(dev) << KWSPI_CS_SHIFT);
+ return mvebu_board_spi_claim_bus(dev);
+}
+
+__attribute__((weak)) int mvebu_board_spi_release_bus(struct udevice *dev)
+{
return 0;
}
+static int mvebu_spi_release_bus(struct udevice *dev)
+{
+ return mvebu_board_spi_release_bus(dev);
+}
+
static int mvebu_spi_probe(struct udevice *bus)
{
struct mvebu_spi_platdata *plat = dev_get_platdata(bus);
@@ -377,6 +356,7 @@ static int mvebu_spi_ofdata_to_platdata(struct udevice *bus)
static const struct dm_spi_ops mvebu_spi_ops = {
.claim_bus = mvebu_spi_claim_bus,
+ .release_bus = mvebu_spi_release_bus,
.xfer = mvebu_spi_xfer,
.set_speed = mvebu_spi_set_speed,
.set_mode = mvebu_spi_set_mode,
diff --git a/drivers/spi/uniphier_spi.c b/drivers/spi/uniphier_spi.c
new file mode 100644
index 0000000000..ef02d07aa4
--- /dev/null
+++ b/drivers/spi/uniphier_spi.c
@@ -0,0 +1,413 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * uniphier_spi.c - Socionext UniPhier SPI driver
+ * Copyright 2019 Socionext, Inc.
+ */
+
+#include <clk.h>
+#include <common.h>
+#include <dm.h>
+#include <linux/bitfield.h>
+#include <linux/io.h>
+#include <spi.h>
+#include <wait_bit.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+#define SSI_CTL 0x00
+#define SSI_CTL_EN BIT(0)
+
+#define SSI_CKS 0x04
+#define SSI_CKS_CKRAT_MASK GENMASK(7, 0)
+#define SSI_CKS_CKPHS BIT(14)
+#define SSI_CKS_CKINIT BIT(13)
+#define SSI_CKS_CKDLY BIT(12)
+
+#define SSI_TXWDS 0x08
+#define SSI_TXWDS_WDLEN_MASK GENMASK(13, 8)
+#define SSI_TXWDS_TDTF_MASK GENMASK(7, 6)
+#define SSI_TXWDS_DTLEN_MASK GENMASK(5, 0)
+
+#define SSI_RXWDS 0x0c
+#define SSI_RXWDS_RDTF_MASK GENMASK(7, 6)
+#define SSI_RXWDS_DTLEN_MASK GENMASK(5, 0)
+
+#define SSI_FPS 0x10
+#define SSI_FPS_FSPOL BIT(15)
+#define SSI_FPS_FSTRT BIT(14)
+
+#define SSI_SR 0x14
+#define SSI_SR_BUSY BIT(7)
+#define SSI_SR_TNF BIT(5)
+#define SSI_SR_RNE BIT(0)
+
+#define SSI_IE 0x18
+
+#define SSI_IC 0x1c
+#define SSI_IC_TCIC BIT(4)
+#define SSI_IC_RCIC BIT(3)
+#define SSI_IC_RORIC BIT(0)
+
+#define SSI_FC 0x20
+#define SSI_FC_TXFFL BIT(12)
+#define SSI_FC_TXFTH_MASK GENMASK(11, 8)
+#define SSI_FC_RXFFL BIT(4)
+#define SSI_FC_RXFTH_MASK GENMASK(3, 0)
+
+#define SSI_XDR 0x24 /* TXDR for write, RXDR for read */
+
+#define SSI_FIFO_DEPTH 8U
+
+#define SSI_REG_TIMEOUT (CONFIG_SYS_HZ / 100) /* 10 ms */
+#define SSI_XFER_TIMEOUT (CONFIG_SYS_HZ) /* 1 sec */
+
+#define SSI_CLK 50000000 /* internal I/O clock: 50MHz */
+
+struct uniphier_spi_platdata {
+ void __iomem *base;
+ u32 frequency; /* input frequency */
+ u32 speed_hz;
+ uint deactivate_delay_us; /* Delay to wait after deactivate */
+ uint activate_delay_us; /* Delay to wait after activate */
+};
+
+struct uniphier_spi_priv {
+ void __iomem *base;
+ u8 mode;
+ u8 fifo_depth;
+ u8 bits_per_word;
+ ulong last_transaction_us; /* Time of last transaction end */
+};
+
+static void uniphier_spi_enable(struct uniphier_spi_priv *priv, int enable)
+{
+ u32 val;
+
+ val = readl(priv->base + SSI_CTL);
+ if (enable)
+ val |= SSI_CTL_EN;
+ else
+ val &= ~SSI_CTL_EN;
+ writel(val, priv->base + SSI_CTL);
+}
+
+static void uniphier_spi_regdump(struct uniphier_spi_priv *priv)
+{
+ pr_debug("CTL %08x\n", readl(priv->base + SSI_CTL));
+ pr_debug("CKS %08x\n", readl(priv->base + SSI_CKS));
+ pr_debug("TXWDS %08x\n", readl(priv->base + SSI_TXWDS));
+ pr_debug("RXWDS %08x\n", readl(priv->base + SSI_RXWDS));
+ pr_debug("FPS %08x\n", readl(priv->base + SSI_FPS));
+ pr_debug("SR %08x\n", readl(priv->base + SSI_SR));
+ pr_debug("IE %08x\n", readl(priv->base + SSI_IE));
+ pr_debug("IC %08x\n", readl(priv->base + SSI_IC));
+ pr_debug("FC %08x\n", readl(priv->base + SSI_FC));
+ pr_debug("XDR %08x\n", readl(priv->base + SSI_XDR));
+}
+
+static void spi_cs_activate(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct uniphier_spi_platdata *plat = bus->platdata;
+ struct uniphier_spi_priv *priv = dev_get_priv(bus);
+ ulong delay_us; /* The delay completed so far */
+ u32 val;
+
+ /* If it's too soon to do another transaction, wait */
+ if (plat->deactivate_delay_us && priv->last_transaction_us) {
+ delay_us = timer_get_us() - priv->last_transaction_us;
+ if (delay_us < plat->deactivate_delay_us)
+ udelay(plat->deactivate_delay_us - delay_us);
+ }
+
+ val = readl(priv->base + SSI_FPS);
+ if (priv->mode & SPI_CS_HIGH)
+ val |= SSI_FPS_FSPOL;
+ else
+ val &= ~SSI_FPS_FSPOL;
+ writel(val, priv->base + SSI_FPS);
+
+ if (plat->activate_delay_us)
+ udelay(plat->activate_delay_us);
+}
+
+static void spi_cs_deactivate(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct uniphier_spi_platdata *plat = bus->platdata;
+ struct uniphier_spi_priv *priv = dev_get_priv(bus);
+ u32 val;
+
+ val = readl(priv->base + SSI_FPS);
+ if (priv->mode & SPI_CS_HIGH)
+ val &= ~SSI_FPS_FSPOL;
+ else
+ val |= SSI_FPS_FSPOL;
+ writel(val, priv->base + SSI_FPS);
+
+ /* Remember time of this transaction so we can honour the bus delay */
+ if (plat->deactivate_delay_us)
+ priv->last_transaction_us = timer_get_us();
+}
+
+static int uniphier_spi_claim_bus(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct uniphier_spi_priv *priv = dev_get_priv(bus);
+ u32 val, size;
+
+ uniphier_spi_enable(priv, false);
+
+ /* disable interrupts */
+ writel(0, priv->base + SSI_IE);
+
+ /* bits_per_word */
+ size = priv->bits_per_word;
+ val = readl(priv->base + SSI_TXWDS);
+ val &= ~(SSI_TXWDS_WDLEN_MASK | SSI_TXWDS_DTLEN_MASK);
+ val |= FIELD_PREP(SSI_TXWDS_WDLEN_MASK, size);
+ val |= FIELD_PREP(SSI_TXWDS_DTLEN_MASK, size);
+ writel(val, priv->base + SSI_TXWDS);
+
+ val = readl(priv->base + SSI_RXWDS);
+ val &= ~SSI_RXWDS_DTLEN_MASK;
+ val |= FIELD_PREP(SSI_RXWDS_DTLEN_MASK, size);
+ writel(val, priv->base + SSI_RXWDS);
+
+ /* reset FIFOs */
+ val = SSI_FC_TXFFL | SSI_FC_RXFFL;
+ writel(val, priv->base + SSI_FC);
+
+ /* FIFO threthold */
+ val = readl(priv->base + SSI_FC);
+ val &= ~(SSI_FC_TXFTH_MASK | SSI_FC_RXFTH_MASK);
+ val |= FIELD_PREP(SSI_FC_TXFTH_MASK, priv->fifo_depth);
+ val |= FIELD_PREP(SSI_FC_RXFTH_MASK, priv->fifo_depth);
+ writel(val, priv->base + SSI_FC);
+
+ /* clear interrupts */
+ writel(SSI_IC_TCIC | SSI_IC_RCIC | SSI_IC_RORIC,
+ priv->base + SSI_IC);
+
+ uniphier_spi_enable(priv, true);
+
+ return 0;
+}
+
+static int uniphier_spi_release_bus(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct uniphier_spi_priv *priv = dev_get_priv(bus);
+
+ uniphier_spi_enable(priv, false);
+
+ return 0;
+}
+
+static int uniphier_spi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ struct udevice *bus = dev->parent;
+ struct uniphier_spi_priv *priv = dev_get_priv(bus);
+ const u8 *tx_buf = dout;
+ u8 *rx_buf = din, buf;
+ u32 len = bitlen / 8;
+ u32 tx_len, rx_len;
+ u32 ts, status;
+ int ret = 0;
+
+ if (bitlen % 8) {
+ dev_err(dev, "Non byte aligned SPI transfer\n");
+ return -EINVAL;
+ }
+
+ if (flags & SPI_XFER_BEGIN)
+ spi_cs_activate(dev);
+
+ uniphier_spi_enable(priv, true);
+
+ ts = get_timer(0);
+ tx_len = len;
+ rx_len = len;
+
+ uniphier_spi_regdump(priv);
+
+ while (tx_len || rx_len) {
+ ret = wait_for_bit_le32(priv->base + SSI_SR, SSI_SR_BUSY, false,
+ SSI_REG_TIMEOUT * 1000, false);
+ if (ret) {
+ if (ret == -ETIMEDOUT)
+ dev_err(dev, "access timeout\n");
+ break;
+ }
+
+ status = readl(priv->base + SSI_SR);
+ /* write the data into TX */
+ if (tx_len && (status & SSI_SR_TNF)) {
+ buf = tx_buf ? *tx_buf++ : 0;
+ writel(buf, priv->base + SSI_XDR);
+ tx_len--;
+ }
+
+ /* read the data from RX */
+ if (rx_len && (status & SSI_SR_RNE)) {
+ buf = readl(priv->base + SSI_XDR);
+ if (rx_buf)
+ *rx_buf++ = buf;
+ rx_len--;
+ }
+
+ if (get_timer(ts) >= SSI_XFER_TIMEOUT) {
+ dev_err(dev, "transfer timeout\n");
+ ret = -ETIMEDOUT;
+ break;
+ }
+ }
+
+ if (flags & SPI_XFER_END)
+ spi_cs_deactivate(dev);
+
+ uniphier_spi_enable(priv, false);
+
+ return ret;
+}
+
+static int uniphier_spi_set_speed(struct udevice *bus, uint speed)
+{
+ struct uniphier_spi_platdata *plat = bus->platdata;
+ struct uniphier_spi_priv *priv = dev_get_priv(bus);
+ u32 val, ckdiv;
+
+ if (speed > plat->frequency)
+ speed = plat->frequency;
+
+ /* baudrate */
+ ckdiv = DIV_ROUND_UP(SSI_CLK, speed);
+ ckdiv = round_up(ckdiv, 2);
+
+ val = readl(priv->base + SSI_CKS);
+ val &= ~SSI_CKS_CKRAT_MASK;
+ val |= ckdiv & SSI_CKS_CKRAT_MASK;
+ writel(val, priv->base + SSI_CKS);
+
+ return 0;
+}
+
+static int uniphier_spi_set_mode(struct udevice *bus, uint mode)
+{
+ struct uniphier_spi_priv *priv = dev_get_priv(bus);
+ u32 val1, val2;
+
+ /*
+ * clock setting
+ * CKPHS capture timing. 0:rising edge, 1:falling edge
+ * CKINIT clock initial level. 0:low, 1:high
+ * CKDLY clock delay. 0:no delay, 1:delay depending on FSTRT
+ * (FSTRT=0: 1 clock, FSTRT=1: 0.5 clock)
+ *
+ * frame setting
+ * FSPOL frame signal porarity. 0: low, 1: high
+ * FSTRT start frame timing
+ * 0: rising edge of clock, 1: falling edge of clock
+ */
+ val1 = readl(priv->base + SSI_CKS);
+ val2 = readl(priv->base + SSI_FPS);
+
+ switch (mode & (SPI_CPOL | SPI_CPHA)) {
+ case SPI_MODE_0:
+ /* CKPHS=1, CKINIT=0, CKDLY=1, FSTRT=0 */
+ val1 |= SSI_CKS_CKPHS | SSI_CKS_CKDLY;
+ val1 &= ~SSI_CKS_CKINIT;
+ val2 &= ~SSI_FPS_FSTRT;
+ break;
+ case SPI_MODE_1:
+ /* CKPHS=0, CKINIT=0, CKDLY=0, FSTRT=1 */
+ val1 &= ~(SSI_CKS_CKPHS | SSI_CKS_CKINIT | SSI_CKS_CKDLY);
+ val2 |= SSI_FPS_FSTRT;
+ break;
+ case SPI_MODE_2:
+ /* CKPHS=0, CKINIT=1, CKDLY=1, FSTRT=1 */
+ val1 |= SSI_CKS_CKINIT | SSI_CKS_CKDLY;
+ val1 &= ~SSI_CKS_CKPHS;
+ val2 |= SSI_FPS_FSTRT;
+ break;
+ case SPI_MODE_3:
+ /* CKPHS=1, CKINIT=1, CKDLY=0, FSTRT=0 */
+ val1 |= SSI_CKS_CKPHS | SSI_CKS_CKINIT;
+ val1 &= ~SSI_CKS_CKDLY;
+ val2 &= ~SSI_FPS_FSTRT;
+ break;
+ }
+
+ writel(val1, priv->base + SSI_CKS);
+ writel(val2, priv->base + SSI_FPS);
+
+ /* format */
+ val1 = readl(priv->base + SSI_TXWDS);
+ val2 = readl(priv->base + SSI_RXWDS);
+ if (mode & SPI_LSB_FIRST) {
+ val1 |= FIELD_PREP(SSI_TXWDS_TDTF_MASK, 1);
+ val2 |= FIELD_PREP(SSI_RXWDS_RDTF_MASK, 1);
+ }
+ writel(val1, priv->base + SSI_TXWDS);
+ writel(val2, priv->base + SSI_RXWDS);
+
+ priv->mode = mode;
+
+ return 0;
+}
+
+static int uniphier_spi_ofdata_to_platdata(struct udevice *bus)
+{
+ struct uniphier_spi_platdata *plat = bus->platdata;
+ const void *blob = gd->fdt_blob;
+ int node = dev_of_offset(bus);
+
+ plat->base = devfdt_get_addr_ptr(bus);
+
+ plat->frequency =
+ fdtdec_get_int(blob, node, "spi-max-frequency", 12500000);
+ plat->deactivate_delay_us =
+ fdtdec_get_int(blob, node, "spi-deactivate-delay", 0);
+ plat->activate_delay_us =
+ fdtdec_get_int(blob, node, "spi-activate-delay", 0);
+ plat->speed_hz = plat->frequency / 2;
+
+ return 0;
+}
+
+static int uniphier_spi_probe(struct udevice *bus)
+{
+ struct uniphier_spi_platdata *plat = dev_get_platdata(bus);
+ struct uniphier_spi_priv *priv = dev_get_priv(bus);
+
+ priv->base = plat->base;
+ priv->fifo_depth = SSI_FIFO_DEPTH;
+ priv->bits_per_word = 8;
+
+ return 0;
+}
+
+static const struct dm_spi_ops uniphier_spi_ops = {
+ .claim_bus = uniphier_spi_claim_bus,
+ .release_bus = uniphier_spi_release_bus,
+ .xfer = uniphier_spi_xfer,
+ .set_speed = uniphier_spi_set_speed,
+ .set_mode = uniphier_spi_set_mode,
+};
+
+static const struct udevice_id uniphier_spi_ids[] = {
+ { .compatible = "socionext,uniphier-scssi" },
+ { /* Sentinel */ }
+};
+
+U_BOOT_DRIVER(uniphier_spi) = {
+ .name = "uniphier_spi",
+ .id = UCLASS_SPI,
+ .of_match = uniphier_spi_ids,
+ .ops = &uniphier_spi_ops,
+ .ofdata_to_platdata = uniphier_spi_ofdata_to_platdata,
+ .platdata_auto_alloc_size = sizeof(struct uniphier_spi_platdata),
+ .priv_auto_alloc_size = sizeof(struct uniphier_spi_priv),
+ .probe = uniphier_spi_probe,
+};
diff --git a/drivers/tpm/tpm2_tis_spi.c b/drivers/tpm/tpm2_tis_spi.c
index 8878130bd7..7186c179d1 100644
--- a/drivers/tpm/tpm2_tis_spi.c
+++ b/drivers/tpm/tpm2_tis_spi.c
@@ -295,6 +295,14 @@ static int tpm_tis_spi_wait_for_stat(struct udevice *dev, u8 mask,
return -ETIMEDOUT;
}
+static u8 tpm_tis_spi_valid_status(struct udevice *dev, u8 *status)
+{
+ struct tpm_chip *chip = dev_get_priv(dev);
+
+ return tpm_tis_spi_wait_for_stat(dev, TPM_STS_VALID,
+ chip->timeout_c, status);
+}
+
static int tpm_tis_spi_get_burstcount(struct udevice *dev)
{
struct tpm_chip *chip = dev_get_priv(dev);
@@ -455,7 +463,7 @@ static int tpm_tis_spi_send(struct udevice *dev, const u8 *buf, size_t len)
i += size;
}
- ret = tpm_tis_spi_status(dev, &status);
+ ret = tpm_tis_spi_valid_status(dev, &status);
if (ret)
goto out_err;
@@ -469,7 +477,7 @@ static int tpm_tis_spi_send(struct udevice *dev, const u8 *buf, size_t len)
if (ret)
goto out_err;
- ret = tpm_tis_spi_status(dev, &status);
+ ret = tpm_tis_spi_valid_status(dev, &status);
if (ret)
goto out_err;