summaryrefslogtreecommitdiff
path: root/drivers/pci
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/Kconfig8
-rw-r--r--drivers/pci/Makefile2
-rw-r--r--drivers/pci/pcie_layerscape_gen4.c572
-rw-r--r--drivers/pci/pcie_layerscape_gen4.h264
-rw-r--r--drivers/pci/pcie_layerscape_gen4_fixup.c249
5 files changed, 1095 insertions, 0 deletions
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index bfc43fe58e..429bb836a8 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -113,6 +113,14 @@ config PCIE_LAYERSCAPE
PCIe controllers. The PCIe may works in RC or EP mode according to
RCW[HOST_AGT_PEX] setting.
+config PCIE_LAYERSCAPE_GEN4
+ bool "Layerscape Gen4 PCIe support"
+ depends on DM_PCI
+ help
+ Support PCIe Gen4 on NXP Layerscape SoCs, which may have one or
+ several PCIe controllers. The PCIe controller can work in RC or
+ EP mode according to RCW[HOST_AGT_PEX] setting.
+
config PCIE_INTEL_FPGA
bool "Intel FPGA PCIe support"
depends on DM_PCI
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index e551f203d0..bd392edba1 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -33,5 +33,7 @@ obj-$(CONFIG_PCI_AARDVARK) += pci-aardvark.o
obj-$(CONFIG_PCIE_DW_MVEBU) += pcie_dw_mvebu.o
obj-$(CONFIG_PCIE_LAYERSCAPE) += pcie_layerscape.o
obj-$(CONFIG_PCIE_LAYERSCAPE) += pcie_layerscape_fixup.o
+obj-$(CONFIG_PCIE_LAYERSCAPE_GEN4) += pcie_layerscape_gen4.o \
+ pcie_layerscape_gen4_fixup.o
obj-$(CONFIG_PCI_XILINX) += pcie_xilinx.o
obj-$(CONFIG_PCIE_INTEL_FPGA) += pcie_intel_fpga.o
diff --git a/drivers/pci/pcie_layerscape_gen4.c b/drivers/pci/pcie_layerscape_gen4.c
new file mode 100644
index 0000000000..1fd8761bbc
--- /dev/null
+++ b/drivers/pci/pcie_layerscape_gen4.c
@@ -0,0 +1,572 @@
+// SPDX-License-Identifier: GPL-2.0+ OR X11
+/*
+ * Copyright 2018-2019 NXP
+ *
+ * PCIe Gen4 driver for NXP Layerscape SoCs
+ * Author: Hou Zhiqiang <Minder.Hou@gmail.com>
+ */
+
+#include <common.h>
+#include <asm/arch/fsl_serdes.h>
+#include <pci.h>
+#include <asm/io.h>
+#include <errno.h>
+#include <malloc.h>
+#include <dm.h>
+#include <linux/sizes.h>
+
+#include "pcie_layerscape_gen4.h"
+
+DECLARE_GLOBAL_DATA_PTR;
+
+LIST_HEAD(ls_pcie_g4_list);
+
+static u64 bar_size[4] = {
+ PCIE_BAR0_SIZE,
+ PCIE_BAR1_SIZE,
+ PCIE_BAR2_SIZE,
+ PCIE_BAR4_SIZE
+};
+
+static int ls_pcie_g4_ltssm(struct ls_pcie_g4 *pcie)
+{
+ u32 state;
+
+ state = pf_ctrl_readl(pcie, PCIE_LTSSM_STA) & LTSSM_STATE_MASK;
+
+ return state;
+}
+
+static int ls_pcie_g4_link_up(struct ls_pcie_g4 *pcie)
+{
+ int ltssm;
+
+ ltssm = ls_pcie_g4_ltssm(pcie);
+ if (ltssm != LTSSM_PCIE_L0)
+ return 0;
+
+ return 1;
+}
+
+static void ls_pcie_g4_ep_enable_cfg(struct ls_pcie_g4 *pcie)
+{
+ ccsr_writel(pcie, GPEX_CFG_READY, PCIE_CONFIG_READY);
+}
+
+static void ls_pcie_g4_cfg_set_target(struct ls_pcie_g4 *pcie, u32 target)
+{
+ ccsr_writel(pcie, PAB_AXI_AMAP_PEX_WIN_L(0), target);
+ ccsr_writel(pcie, PAB_AXI_AMAP_PEX_WIN_H(0), 0);
+}
+
+static int ls_pcie_g4_outbound_win_set(struct ls_pcie_g4 *pcie, int idx,
+ int type, u64 phys, u64 bus_addr,
+ pci_size_t size)
+{
+ u32 val;
+ u32 size_h, size_l;
+
+ if (idx >= PAB_WINS_NUM)
+ return -EINVAL;
+
+ size_h = upper_32_bits(~(size - 1));
+ size_l = lower_32_bits(~(size - 1));
+
+ val = ccsr_readl(pcie, PAB_AXI_AMAP_CTRL(idx));
+ val &= ~((AXI_AMAP_CTRL_TYPE_MASK << AXI_AMAP_CTRL_TYPE_SHIFT) |
+ (AXI_AMAP_CTRL_SIZE_MASK << AXI_AMAP_CTRL_SIZE_SHIFT) |
+ AXI_AMAP_CTRL_EN);
+ val |= ((type & AXI_AMAP_CTRL_TYPE_MASK) << AXI_AMAP_CTRL_TYPE_SHIFT) |
+ ((size_l >> AXI_AMAP_CTRL_SIZE_SHIFT) <<
+ AXI_AMAP_CTRL_SIZE_SHIFT) | AXI_AMAP_CTRL_EN;
+
+ ccsr_writel(pcie, PAB_AXI_AMAP_CTRL(idx), val);
+
+ ccsr_writel(pcie, PAB_AXI_AMAP_AXI_WIN(idx), lower_32_bits(phys));
+ ccsr_writel(pcie, PAB_EXT_AXI_AMAP_AXI_WIN(idx), upper_32_bits(phys));
+ ccsr_writel(pcie, PAB_AXI_AMAP_PEX_WIN_L(idx), lower_32_bits(bus_addr));
+ ccsr_writel(pcie, PAB_AXI_AMAP_PEX_WIN_H(idx), upper_32_bits(bus_addr));
+ ccsr_writel(pcie, PAB_EXT_AXI_AMAP_SIZE(idx), size_h);
+
+ return 0;
+}
+
+static int ls_pcie_g4_rc_inbound_win_set(struct ls_pcie_g4 *pcie, int idx,
+ int type, u64 phys, u64 bus_addr,
+ pci_size_t size)
+{
+ u32 val;
+ pci_size_t win_size = ~(size - 1);
+
+ val = ccsr_readl(pcie, PAB_PEX_AMAP_CTRL(idx));
+
+ val &= ~(PEX_AMAP_CTRL_TYPE_MASK << PEX_AMAP_CTRL_TYPE_SHIFT);
+ val &= ~(PEX_AMAP_CTRL_EN_MASK << PEX_AMAP_CTRL_EN_SHIFT);
+ val = (val | (type << PEX_AMAP_CTRL_TYPE_SHIFT));
+ val = (val | (1 << PEX_AMAP_CTRL_EN_SHIFT));
+
+ ccsr_writel(pcie, PAB_PEX_AMAP_CTRL(idx),
+ val | lower_32_bits(win_size));
+
+ ccsr_writel(pcie, PAB_EXT_PEX_AMAP_SIZE(idx), upper_32_bits(win_size));
+ ccsr_writel(pcie, PAB_PEX_AMAP_AXI_WIN(idx), lower_32_bits(phys));
+ ccsr_writel(pcie, PAB_EXT_PEX_AMAP_AXI_WIN(idx), upper_32_bits(phys));
+ ccsr_writel(pcie, PAB_PEX_AMAP_PEX_WIN_L(idx), lower_32_bits(bus_addr));
+ ccsr_writel(pcie, PAB_PEX_AMAP_PEX_WIN_H(idx), upper_32_bits(bus_addr));
+
+ return 0;
+}
+
+static void ls_pcie_g4_dump_wins(struct ls_pcie_g4 *pcie, int wins)
+{
+ int i;
+
+ for (i = 0; i < wins; i++) {
+ debug("APIO Win%d:\n", i);
+ debug("\tLOWER PHYS: 0x%08x\n",
+ ccsr_readl(pcie, PAB_AXI_AMAP_AXI_WIN(i)));
+ debug("\tUPPER PHYS: 0x%08x\n",
+ ccsr_readl(pcie, PAB_EXT_AXI_AMAP_AXI_WIN(i)));
+ debug("\tLOWER BUS: 0x%08x\n",
+ ccsr_readl(pcie, PAB_AXI_AMAP_PEX_WIN_L(i)));
+ debug("\tUPPER BUS: 0x%08x\n",
+ ccsr_readl(pcie, PAB_AXI_AMAP_PEX_WIN_H(i)));
+ debug("\tSIZE: 0x%08x\n",
+ ccsr_readl(pcie, PAB_AXI_AMAP_CTRL(i)) &
+ (AXI_AMAP_CTRL_SIZE_MASK << AXI_AMAP_CTRL_SIZE_SHIFT));
+ debug("\tEXT_SIZE: 0x%08x\n",
+ ccsr_readl(pcie, PAB_EXT_AXI_AMAP_SIZE(i)));
+ debug("\tPARAM: 0x%08x\n",
+ ccsr_readl(pcie, PAB_AXI_AMAP_PCI_HDR_PARAM(i)));
+ debug("\tCTRL: 0x%08x\n",
+ ccsr_readl(pcie, PAB_AXI_AMAP_CTRL(i)));
+ }
+}
+
+static void ls_pcie_g4_setup_wins(struct ls_pcie_g4 *pcie)
+{
+ struct pci_region *io, *mem, *pref;
+ int idx = 1;
+
+ /* INBOUND WIN */
+ ls_pcie_g4_rc_inbound_win_set(pcie, 0, IB_TYPE_MEM_F, 0, 0, SIZE_1T);
+
+ /* OUTBOUND WIN 0: CFG */
+ ls_pcie_g4_outbound_win_set(pcie, 0, PAB_AXI_TYPE_CFG,
+ pcie->cfg_res.start, 0,
+ fdt_resource_size(&pcie->cfg_res));
+
+ pci_get_regions(pcie->bus, &io, &mem, &pref);
+
+ if (io)
+ /* OUTBOUND WIN: IO */
+ ls_pcie_g4_outbound_win_set(pcie, idx++, PAB_AXI_TYPE_IO,
+ io->phys_start, io->bus_start,
+ io->size);
+
+ if (mem)
+ /* OUTBOUND WIN: MEM */
+ ls_pcie_g4_outbound_win_set(pcie, idx++, PAB_AXI_TYPE_MEM,
+ mem->phys_start, mem->bus_start,
+ mem->size);
+
+ if (pref)
+ /* OUTBOUND WIN: perf MEM */
+ ls_pcie_g4_outbound_win_set(pcie, idx++, PAB_AXI_TYPE_MEM,
+ pref->phys_start, pref->bus_start,
+ pref->size);
+
+ ls_pcie_g4_dump_wins(pcie, idx);
+}
+
+/* Return 0 if the address is valid, -errno if not valid */
+static int ls_pcie_g4_addr_valid(struct ls_pcie_g4 *pcie, pci_dev_t bdf)
+{
+ struct udevice *bus = pcie->bus;
+
+ if (pcie->mode == PCI_HEADER_TYPE_NORMAL)
+ return -ENODEV;
+
+ if (!pcie->enabled)
+ return -ENXIO;
+
+ if (PCI_BUS(bdf) < bus->seq)
+ return -EINVAL;
+
+ if ((PCI_BUS(bdf) > bus->seq) && (!ls_pcie_g4_link_up(pcie)))
+ return -EINVAL;
+
+ if (PCI_BUS(bdf) <= (bus->seq + 1) && (PCI_DEV(bdf) > 0))
+ return -EINVAL;
+
+ return 0;
+}
+
+void *ls_pcie_g4_conf_address(struct ls_pcie_g4 *pcie, pci_dev_t bdf,
+ int offset)
+{
+ struct udevice *bus = pcie->bus;
+ u32 target;
+
+ if (PCI_BUS(bdf) == bus->seq) {
+ if (offset < INDIRECT_ADDR_BNDRY) {
+ ccsr_set_page(pcie, 0);
+ return pcie->ccsr + offset;
+ }
+
+ ccsr_set_page(pcie, OFFSET_TO_PAGE_IDX(offset));
+ return pcie->ccsr + OFFSET_TO_PAGE_ADDR(offset);
+ }
+
+ target = PAB_TARGET_BUS(PCI_BUS(bdf) - bus->seq) |
+ PAB_TARGET_DEV(PCI_DEV(bdf)) |
+ PAB_TARGET_FUNC(PCI_FUNC(bdf));
+
+ ls_pcie_g4_cfg_set_target(pcie, target);
+
+ return pcie->cfg + offset;
+}
+
+static int ls_pcie_g4_read_config(struct udevice *bus, pci_dev_t bdf,
+ uint offset, ulong *valuep,
+ enum pci_size_t size)
+{
+ struct ls_pcie_g4 *pcie = dev_get_priv(bus);
+ void *address;
+ int ret = 0;
+
+ if (ls_pcie_g4_addr_valid(pcie, bdf)) {
+ *valuep = pci_get_ff(size);
+ return 0;
+ }
+
+ address = ls_pcie_g4_conf_address(pcie, bdf, offset);
+
+ switch (size) {
+ case PCI_SIZE_8:
+ *valuep = readb(address);
+ break;
+ case PCI_SIZE_16:
+ *valuep = readw(address);
+ break;
+ case PCI_SIZE_32:
+ *valuep = readl(address);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int ls_pcie_g4_write_config(struct udevice *bus, pci_dev_t bdf,
+ uint offset, ulong value,
+ enum pci_size_t size)
+{
+ struct ls_pcie_g4 *pcie = dev_get_priv(bus);
+ void *address;
+
+ if (ls_pcie_g4_addr_valid(pcie, bdf))
+ return 0;
+
+ address = ls_pcie_g4_conf_address(pcie, bdf, offset);
+
+ switch (size) {
+ case PCI_SIZE_8:
+ writeb(value, address);
+ return 0;
+ case PCI_SIZE_16:
+ writew(value, address);
+ return 0;
+ case PCI_SIZE_32:
+ writel(value, address);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void ls_pcie_g4_setup_ctrl(struct ls_pcie_g4 *pcie)
+{
+ u32 val;
+
+ /* Fix class code */
+ val = ccsr_readl(pcie, GPEX_CLASSCODE);
+ val &= ~(GPEX_CLASSCODE_MASK << GPEX_CLASSCODE_SHIFT);
+ val |= PCI_CLASS_BRIDGE_PCI << GPEX_CLASSCODE_SHIFT;
+ ccsr_writel(pcie, GPEX_CLASSCODE, val);
+
+ /* Enable APIO and Memory/IO/CFG Wins */
+ val = ccsr_readl(pcie, PAB_AXI_PIO_CTRL(0));
+ val |= APIO_EN | MEM_WIN_EN | IO_WIN_EN | CFG_WIN_EN;
+ ccsr_writel(pcie, PAB_AXI_PIO_CTRL(0), val);
+
+ ls_pcie_g4_setup_wins(pcie);
+
+ pcie->stream_id_cur = 0;
+}
+
+static void ls_pcie_g4_ep_inbound_win_set(struct ls_pcie_g4 *pcie, int pf,
+ int bar, u64 phys)
+{
+ u32 val;
+
+ /* PF BAR1 is for MSI-X and only need to enable */
+ if (bar == 1) {
+ ccsr_writel(pcie, PAB_PEX_BAR_AMAP(pf, bar), BAR_AMAP_EN);
+ return;
+ }
+
+ val = upper_32_bits(phys);
+ ccsr_writel(pcie, PAB_EXT_PEX_BAR_AMAP(pf, bar), val);
+ val = lower_32_bits(phys) | BAR_AMAP_EN;
+ ccsr_writel(pcie, PAB_PEX_BAR_AMAP(pf, bar), val);
+}
+
+static void ls_pcie_g4_ep_setup_wins(struct ls_pcie_g4 *pcie, int pf)
+{
+ u64 phys;
+ int bar;
+ u32 val;
+
+ if ((!pcie->sriov_support && pf > LS_G4_PF0) || pf > LS_G4_PF1)
+ return;
+
+ phys = CONFIG_SYS_PCI_EP_MEMORY_BASE + PCIE_BAR_SIZE * 4 * pf;
+ for (bar = 0; bar < PF_BAR_NUM; bar++) {
+ ls_pcie_g4_ep_inbound_win_set(pcie, pf, bar, phys);
+ phys += PCIE_BAR_SIZE;
+ }
+
+ /* OUTBOUND: map MEM */
+ ls_pcie_g4_outbound_win_set(pcie, pf, PAB_AXI_TYPE_MEM,
+ pcie->cfg_res.start +
+ CONFIG_SYS_PCI_MEMORY_SIZE * pf, 0x0,
+ CONFIG_SYS_PCI_MEMORY_SIZE);
+
+ val = ccsr_readl(pcie, PAB_AXI_AMAP_PCI_HDR_PARAM(pf));
+ val &= ~FUNC_NUM_PCIE_MASK;
+ val |= pf;
+ ccsr_writel(pcie, PAB_AXI_AMAP_PCI_HDR_PARAM(pf), val);
+}
+
+static void ls_pcie_g4_ep_enable_bar(struct ls_pcie_g4 *pcie, int pf,
+ int bar, bool vf_bar, bool enable)
+{
+ u32 val;
+ u32 bar_pos = BAR_POS(bar, pf, vf_bar);
+
+ val = ccsr_readl(pcie, GPEX_BAR_ENABLE);
+ if (enable)
+ val |= 1 << bar_pos;
+ else
+ val &= ~(1 << bar_pos);
+ ccsr_writel(pcie, GPEX_BAR_ENABLE, val);
+}
+
+static void ls_pcie_g4_ep_set_bar_size(struct ls_pcie_g4 *pcie, int pf,
+ int bar, bool vf_bar, u64 size)
+{
+ u32 bar_pos = BAR_POS(bar, pf, vf_bar);
+ u32 mask_l = lower_32_bits(~(size - 1));
+ u32 mask_h = upper_32_bits(~(size - 1));
+
+ ccsr_writel(pcie, GPEX_BAR_SELECT, bar_pos);
+ ccsr_writel(pcie, GPEX_BAR_SIZE_LDW, mask_l);
+ ccsr_writel(pcie, GPEX_BAR_SIZE_UDW, mask_h);
+}
+
+static void ls_pcie_g4_ep_setup_bar(struct ls_pcie_g4 *pcie, int pf,
+ int bar, bool vf_bar, u64 size)
+{
+ bool en = size ? true : false;
+
+ ls_pcie_g4_ep_enable_bar(pcie, pf, bar, vf_bar, en);
+ ls_pcie_g4_ep_set_bar_size(pcie, pf, bar, vf_bar, size);
+}
+
+static void ls_pcie_g4_ep_setup_bars(struct ls_pcie_g4 *pcie, int pf)
+{
+ int bar;
+
+ /* Setup PF BARs */
+ for (bar = 0; bar < PF_BAR_NUM; bar++)
+ ls_pcie_g4_ep_setup_bar(pcie, pf, bar, false, bar_size[bar]);
+
+ if (!pcie->sriov_support)
+ return;
+
+ /* Setup VF BARs */
+ for (bar = 0; bar < VF_BAR_NUM; bar++)
+ ls_pcie_g4_ep_setup_bar(pcie, pf, bar, true, bar_size[bar]);
+}
+
+static void ls_pcie_g4_set_sriov(struct ls_pcie_g4 *pcie, int pf)
+{
+ unsigned int val;
+
+ val = ccsr_readl(pcie, GPEX_SRIOV_INIT_VFS_TOTAL_VF(pf));
+ val &= ~(TTL_VF_MASK << TTL_VF_SHIFT);
+ val |= PCIE_VF_NUM << TTL_VF_SHIFT;
+ val &= ~(INI_VF_MASK << INI_VF_SHIFT);
+ val |= PCIE_VF_NUM << INI_VF_SHIFT;
+ ccsr_writel(pcie, GPEX_SRIOV_INIT_VFS_TOTAL_VF(pf), val);
+
+ val = ccsr_readl(pcie, PCIE_SRIOV_VF_OFFSET_STRIDE);
+ val += PCIE_VF_NUM * pf - pf;
+ ccsr_writel(pcie, GPEX_SRIOV_VF_OFFSET_STRIDE(pf), val);
+}
+
+static void ls_pcie_g4_setup_ep(struct ls_pcie_g4 *pcie)
+{
+ u32 pf, sriov;
+ u32 val;
+ int i;
+
+ /* Enable APIO and Memory Win */
+ val = ccsr_readl(pcie, PAB_AXI_PIO_CTRL(0));
+ val |= APIO_EN | MEM_WIN_EN;
+ ccsr_writel(pcie, PAB_AXI_PIO_CTRL(0), val);
+
+ sriov = ccsr_readl(pcie, PCIE_SRIOV_CAPABILITY);
+ if (PCI_EXT_CAP_ID(sriov) == PCI_EXT_CAP_ID_SRIOV)
+ pcie->sriov_support = 1;
+
+ pf = pcie->sriov_support ? PCIE_PF_NUM : 1;
+
+ for (i = 0; i < pf; i++) {
+ ls_pcie_g4_ep_setup_bars(pcie, i);
+ ls_pcie_g4_ep_setup_wins(pcie, i);
+ if (pcie->sriov_support)
+ ls_pcie_g4_set_sriov(pcie, i);
+ }
+
+ ls_pcie_g4_ep_enable_cfg(pcie);
+ ls_pcie_g4_dump_wins(pcie, pf);
+}
+
+static int ls_pcie_g4_probe(struct udevice *dev)
+{
+ struct ls_pcie_g4 *pcie = dev_get_priv(dev);
+ const void *fdt = gd->fdt_blob;
+ int node = dev_of_offset(dev);
+ u32 link_ctrl_sta;
+ u32 val;
+ int ret;
+
+ pcie->bus = dev;
+
+ ret = fdt_get_named_resource(fdt, node, "reg", "reg-names",
+ "ccsr", &pcie->ccsr_res);
+ if (ret) {
+ printf("ls-pcie-g4: resource \"ccsr\" not found\n");
+ return ret;
+ }
+
+ pcie->idx = (pcie->ccsr_res.start - PCIE_SYS_BASE_ADDR) /
+ PCIE_CCSR_SIZE;
+
+ list_add(&pcie->list, &ls_pcie_g4_list);
+
+ pcie->enabled = is_serdes_configured(PCIE_SRDS_PRTCL(pcie->idx));
+ if (!pcie->enabled) {
+ printf("PCIe%d: %s disabled\n", pcie->idx, dev->name);
+ return 0;
+ }
+
+ pcie->ccsr = map_physmem(pcie->ccsr_res.start,
+ fdt_resource_size(&pcie->ccsr_res),
+ MAP_NOCACHE);
+
+ ret = fdt_get_named_resource(fdt, node, "reg", "reg-names",
+ "config", &pcie->cfg_res);
+ if (ret) {
+ printf("%s: resource \"config\" not found\n", dev->name);
+ return ret;
+ }
+
+ pcie->cfg = map_physmem(pcie->cfg_res.start,
+ fdt_resource_size(&pcie->cfg_res),
+ MAP_NOCACHE);
+
+ ret = fdt_get_named_resource(fdt, node, "reg", "reg-names",
+ "lut", &pcie->lut_res);
+ if (ret) {
+ printf("ls-pcie-g4: resource \"lut\" not found\n");
+ return ret;
+ }
+
+ pcie->lut = map_physmem(pcie->lut_res.start,
+ fdt_resource_size(&pcie->lut_res),
+ MAP_NOCACHE);
+
+ ret = fdt_get_named_resource(fdt, node, "reg", "reg-names",
+ "pf_ctrl", &pcie->pf_ctrl_res);
+ if (ret) {
+ printf("ls-pcie-g4: resource \"pf_ctrl\" not found\n");
+ return ret;
+ }
+
+ pcie->pf_ctrl = map_physmem(pcie->pf_ctrl_res.start,
+ fdt_resource_size(&pcie->pf_ctrl_res),
+ MAP_NOCACHE);
+
+ pcie->big_endian = fdtdec_get_bool(fdt, node, "big-endian");
+
+ debug("%s ccsr:%lx, cfg:0x%lx, big-endian:%d\n",
+ dev->name, (unsigned long)pcie->ccsr, (unsigned long)pcie->cfg,
+ pcie->big_endian);
+
+ pcie->mode = readb(pcie->ccsr + PCI_HEADER_TYPE) & 0x7f;
+
+ if (pcie->mode == PCI_HEADER_TYPE_NORMAL) {
+ printf("PCIe%u: %s %s", pcie->idx, dev->name, "Endpoint");
+ ls_pcie_g4_setup_ep(pcie);
+ } else {
+ printf("PCIe%u: %s %s", pcie->idx, dev->name, "Root Complex");
+ ls_pcie_g4_setup_ctrl(pcie);
+ }
+
+ /* Enable Amba & PEX PIO */
+ val = ccsr_readl(pcie, PAB_CTRL);
+ val |= PAB_CTRL_APIO_EN | PAB_CTRL_PPIO_EN;
+ ccsr_writel(pcie, PAB_CTRL, val);
+
+ val = ccsr_readl(pcie, PAB_PEX_PIO_CTRL(0));
+ val |= PPIO_EN;
+ ccsr_writel(pcie, PAB_PEX_PIO_CTRL(0), val);
+
+ if (!ls_pcie_g4_link_up(pcie)) {
+ /* Let the user know there's no PCIe link */
+ printf(": no link\n");
+ return 0;
+ }
+
+ /* Print the negotiated PCIe link width */
+ link_ctrl_sta = ccsr_readl(pcie, PCIE_LINK_CTRL_STA);
+ printf(": x%d gen%d\n",
+ (link_ctrl_sta >> PCIE_LINK_WIDTH_SHIFT & PCIE_LINK_WIDTH_MASK),
+ (link_ctrl_sta >> PCIE_LINK_SPEED_SHIFT) & PCIE_LINK_SPEED_MASK);
+
+ return 0;
+}
+
+static const struct dm_pci_ops ls_pcie_g4_ops = {
+ .read_config = ls_pcie_g4_read_config,
+ .write_config = ls_pcie_g4_write_config,
+};
+
+static const struct udevice_id ls_pcie_g4_ids[] = {
+ { .compatible = "fsl,lx2160a-pcie" },
+ { }
+};
+
+U_BOOT_DRIVER(pcie_layerscape_gen4) = {
+ .name = "pcie_layerscape_gen4",
+ .id = UCLASS_PCI,
+ .of_match = ls_pcie_g4_ids,
+ .ops = &ls_pcie_g4_ops,
+ .probe = ls_pcie_g4_probe,
+ .priv_auto_alloc_size = sizeof(struct ls_pcie_g4),
+};
diff --git a/drivers/pci/pcie_layerscape_gen4.h b/drivers/pci/pcie_layerscape_gen4.h
new file mode 100644
index 0000000000..27c2d09332
--- /dev/null
+++ b/drivers/pci/pcie_layerscape_gen4.h
@@ -0,0 +1,264 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2018-2019 NXP
+ *
+ * PCIe Gen4 driver for NXP Layerscape SoCs
+ * Author: Hou Zhiqiang <Minder.Hou@gmail.com>
+ */
+
+#ifndef _PCIE_LAYERSCAPE_GEN4_H_
+#define _PCIE_LAYERSCAPE_GEN4_H_
+#include <pci.h>
+#include <dm.h>
+
+#ifndef CONFIG_SYS_PCI_MEMORY_SIZE
+#define CONFIG_SYS_PCI_MEMORY_SIZE (4 * 1024 * 1024 * 1024ULL)
+#endif
+
+#ifndef CONFIG_SYS_PCI_EP_MEMORY_BASE
+#define CONFIG_SYS_PCI_EP_MEMORY_BASE CONFIG_SYS_LOAD_ADDR
+#endif
+
+#define PCIE_PF_NUM 2
+#define PCIE_VF_NUM 32
+
+#define LS_G4_PF0 0
+#define LS_G4_PF1 1
+#define PF_BAR_NUM 4
+#define VF_BAR_NUM 4
+#define PCIE_BAR_SIZE (8 * 1024) /* 8K */
+#define PCIE_BAR0_SIZE PCIE_BAR_SIZE
+#define PCIE_BAR1_SIZE PCIE_BAR_SIZE
+#define PCIE_BAR2_SIZE PCIE_BAR_SIZE
+#define PCIE_BAR4_SIZE PCIE_BAR_SIZE
+#define SIZE_1T (1024 * 1024 * 1024 * 1024ULL)
+
+/* GPEX CSR */
+#define GPEX_CLASSCODE 0x474
+#define GPEX_CLASSCODE_SHIFT 16
+#define GPEX_CLASSCODE_MASK 0xffff
+
+#define GPEX_CFG_READY 0x4b0
+#define PCIE_CONFIG_READY BIT(0)
+
+#define GPEX_BAR_ENABLE 0x4d4
+#define GPEX_BAR_SIZE_LDW 0x4d8
+#define GPEX_BAR_SIZE_UDW 0x4dC
+#define GPEX_BAR_SELECT 0x4e0
+
+#define BAR_POS(bar, pf, vf_bar) \
+ ((bar) + (pf) * PF_BAR_NUM + (vf_bar) * PCIE_PF_NUM * PF_BAR_NUM)
+
+#define GPEX_SRIOV_INIT_VFS_TOTAL_VF(pf) (0x644 + (pf) * 4)
+#define TTL_VF_MASK 0xffff
+#define TTL_VF_SHIFT 16
+#define INI_VF_MASK 0xffff
+#define INI_VF_SHIFT 0
+#define GPEX_SRIOV_VF_OFFSET_STRIDE(pf) (0x704 + (pf) * 4)
+
+/* PAB CSR */
+#define PAB_CTRL 0x808
+#define PAB_CTRL_APIO_EN BIT(0)
+#define PAB_CTRL_PPIO_EN BIT(1)
+#define PAB_CTRL_MAX_BRST_LEN_SHIFT 4
+#define PAB_CTRL_MAX_BRST_LEN_MASK 0x3
+#define PAB_CTRL_PAGE_SEL_SHIFT 13
+#define PAB_CTRL_PAGE_SEL_MASK 0x3f
+#define PAB_CTRL_FUNC_SEL_SHIFT 19
+#define PAB_CTRL_FUNC_SEL_MASK 0x1ff
+
+#define PAB_RST_CTRL 0x820
+#define PAB_BR_STAT 0x80c
+
+/* AXI PIO Engines */
+#define PAB_AXI_PIO_CTRL(idx) (0x840 + 0x10 * (idx))
+#define APIO_EN BIT(0)
+#define MEM_WIN_EN BIT(1)
+#define IO_WIN_EN BIT(2)
+#define CFG_WIN_EN BIT(3)
+#define PAB_AXI_PIO_STAT(idx) (0x844 + 0x10 * (idx))
+#define PAB_AXI_PIO_SL_CMD_STAT(idx) (0x848 + 0x10 * (idx))
+#define PAB_AXI_PIO_SL_ADDR_STAT(idx) (0x84c + 0x10 * (idx))
+#define PAB_AXI_PIO_SL_EXT_ADDR_STAT(idx) (0xb8a0 + 0x4 * (idx))
+
+/* PEX PIO Engines */
+#define PAB_PEX_PIO_CTRL(idx) (0x8c0 + 0x10 * (idx))
+#define PPIO_EN BIT(0)
+#define PAB_PEX_PIO_STAT(idx) (0x8c4 + 0x10 * (idx))
+#define PAB_PEX_PIO_MT_STAT(idx) (0x8c8 + 0x10 * (idx))
+
+#define INDIRECT_ADDR_BNDRY 0xc00
+#define PAGE_IDX_SHIFT 10
+#define PAGE_ADDR_MASK 0x3ff
+
+#define OFFSET_TO_PAGE_IDX(off) \
+ (((off) >> PAGE_IDX_SHIFT) & PAB_CTRL_PAGE_SEL_MASK)
+
+#define OFFSET_TO_PAGE_ADDR(off) \
+ (((off) & PAGE_ADDR_MASK) | INDIRECT_ADDR_BNDRY)
+
+/* APIO WINs */
+#define PAB_AXI_AMAP_CTRL(idx) (0xba0 + 0x10 * (idx))
+#define PAB_EXT_AXI_AMAP_SIZE(idx) (0xbaf0 + 0x4 * (idx))
+#define PAB_AXI_AMAP_AXI_WIN(idx) (0xba4 + 0x10 * (idx))
+#define PAB_EXT_AXI_AMAP_AXI_WIN(idx) (0x80a0 + 0x4 * (idx))
+#define PAB_AXI_AMAP_PEX_WIN_L(idx) (0xba8 + 0x10 * (idx))
+#define PAB_AXI_AMAP_PEX_WIN_H(idx) (0xbac + 0x10 * (idx))
+#define PAB_AXI_AMAP_PCI_HDR_PARAM(idx) (0x5ba0 + 0x4 * (idx))
+#define FUNC_NUM_PCIE_MASK GENMASK(7, 0)
+
+#define AXI_AMAP_CTRL_EN BIT(0)
+#define AXI_AMAP_CTRL_TYPE_SHIFT 1
+#define AXI_AMAP_CTRL_TYPE_MASK 0x3
+#define AXI_AMAP_CTRL_SIZE_SHIFT 10
+#define AXI_AMAP_CTRL_SIZE_MASK 0x3fffff
+
+#define PAB_TARGET_BUS(x) (((x) & 0xff) << 24)
+#define PAB_TARGET_DEV(x) (((x) & 0x1f) << 19)
+#define PAB_TARGET_FUNC(x) (((x) & 0x7) << 16)
+
+#define PAB_AXI_TYPE_CFG 0x00
+#define PAB_AXI_TYPE_IO 0x01
+#define PAB_AXI_TYPE_MEM 0x02
+#define PAB_AXI_TYPE_ATOM 0x03
+
+#define PAB_WINS_NUM 256
+
+/* PPIO WINs RC mode */
+#define PAB_PEX_AMAP_CTRL(idx) (0x4ba0 + 0x10 * (idx))
+#define PAB_EXT_PEX_AMAP_SIZE(idx) (0xbef0 + 0x04 * (idx))
+#define PAB_PEX_AMAP_AXI_WIN(idx) (0x4ba4 + 0x10 * (idx))
+#define PAB_EXT_PEX_AMAP_AXI_WIN(idx) (0xb4a0 + 0x04 * (idx))
+#define PAB_PEX_AMAP_PEX_WIN_L(idx) (0x4ba8 + 0x10 * (idx))
+#define PAB_PEX_AMAP_PEX_WIN_H(idx) (0x4bac + 0x10 * (idx))
+
+#define IB_TYPE_MEM_F 0x2
+#define IB_TYPE_MEM_NF 0x3
+
+#define PEX_AMAP_CTRL_TYPE_SHIFT 0x1
+#define PEX_AMAP_CTRL_EN_SHIFT 0x0
+#define PEX_AMAP_CTRL_TYPE_MASK 0x3
+#define PEX_AMAP_CTRL_EN_MASK 0x1
+
+/* PPIO WINs EP mode */
+#define PAB_PEX_BAR_AMAP(pf, bar) \
+ (0x1ba0 + 0x20 * (pf) + 4 * (bar))
+#define BAR_AMAP_EN BIT(0)
+#define PAB_EXT_PEX_BAR_AMAP(pf, bar) \
+ (0x84a0 + 0x20 * (pf) + 4 * (bar))
+
+/* CCSR registers */
+#define PCIE_LINK_CTRL_STA 0x5c
+#define PCIE_LINK_SPEED_SHIFT 16
+#define PCIE_LINK_SPEED_MASK 0x0f
+#define PCIE_LINK_WIDTH_SHIFT 20
+#define PCIE_LINK_WIDTH_MASK 0x3f
+#define PCIE_SRIOV_CAPABILITY 0x2a0
+#define PCIE_SRIOV_VF_OFFSET_STRIDE 0x2b4
+
+/* LUT registers */
+#define PCIE_LUT_UDR(n) (0x800 + (n) * 8)
+#define PCIE_LUT_LDR(n) (0x804 + (n) * 8)
+#define PCIE_LUT_ENABLE BIT(31)
+#define PCIE_LUT_ENTRY_COUNT 32
+
+/* PF control registers */
+#define PCIE_LTSSM_STA 0x7fc
+#define LTSSM_STATE_MASK 0x7f
+#define LTSSM_PCIE_L0 0x2d /* L0 state */
+
+#define PCIE_SRDS_PRTCL(idx) (PCIE1 + (idx))
+#define PCIE_SYS_BASE_ADDR 0x3400000
+#define PCIE_CCSR_SIZE 0x0100000
+
+struct ls_pcie_g4 {
+ int idx;
+ struct list_head list;
+ struct udevice *bus;
+ struct fdt_resource ccsr_res;
+ struct fdt_resource cfg_res;
+ struct fdt_resource lut_res;
+ struct fdt_resource pf_ctrl_res;
+ void __iomem *ccsr;
+ void __iomem *cfg;
+ void __iomem *lut;
+ void __iomem *pf_ctrl;
+ bool big_endian;
+ bool enabled;
+ int next_lut_index;
+ struct pci_controller hose;
+ int stream_id_cur;
+ int mode;
+ int sriov_support;
+};
+
+extern struct list_head ls_pcie_g4_list;
+
+static inline void lut_writel(struct ls_pcie_g4 *pcie, unsigned int value,
+ unsigned int offset)
+{
+ if (pcie->big_endian)
+ out_be32(pcie->lut + offset, value);
+ else
+ out_le32(pcie->lut + offset, value);
+}
+
+static inline u32 lut_readl(struct ls_pcie_g4 *pcie, unsigned int offset)
+{
+ if (pcie->big_endian)
+ return in_be32(pcie->lut + offset);
+ else
+ return in_le32(pcie->lut + offset);
+}
+
+static inline void ccsr_set_page(struct ls_pcie_g4 *pcie, u8 pg_idx)
+{
+ u32 val;
+
+ val = in_le32(pcie->ccsr + PAB_CTRL);
+ val &= ~(PAB_CTRL_PAGE_SEL_MASK << PAB_CTRL_PAGE_SEL_SHIFT);
+ val |= (pg_idx & PAB_CTRL_PAGE_SEL_MASK) << PAB_CTRL_PAGE_SEL_SHIFT;
+
+ out_le32(pcie->ccsr + PAB_CTRL, val);
+}
+
+static inline unsigned int ccsr_readl(struct ls_pcie_g4 *pcie, u32 offset)
+{
+ if (offset < INDIRECT_ADDR_BNDRY) {
+ ccsr_set_page(pcie, 0);
+ return in_le32(pcie->ccsr + offset);
+ }
+
+ ccsr_set_page(pcie, OFFSET_TO_PAGE_IDX(offset));
+ return in_le32(pcie->ccsr + OFFSET_TO_PAGE_ADDR(offset));
+}
+
+static inline void ccsr_writel(struct ls_pcie_g4 *pcie, u32 offset, u32 value)
+{
+ if (offset < INDIRECT_ADDR_BNDRY) {
+ ccsr_set_page(pcie, 0);
+ out_le32(pcie->ccsr + offset, value);
+ } else {
+ ccsr_set_page(pcie, OFFSET_TO_PAGE_IDX(offset));
+ out_le32(pcie->ccsr + OFFSET_TO_PAGE_ADDR(offset), value);
+ }
+}
+
+static inline unsigned int pf_ctrl_readl(struct ls_pcie_g4 *pcie, u32 offset)
+{
+ if (pcie->big_endian)
+ return in_be32(pcie->pf_ctrl + offset);
+ else
+ return in_le32(pcie->pf_ctrl + offset);
+}
+
+static inline void pf_ctrl_writel(struct ls_pcie_g4 *pcie, u32 offset,
+ u32 value)
+{
+ if (pcie->big_endian)
+ out_be32(pcie->pf_ctrl + offset, value);
+ else
+ out_le32(pcie->pf_ctrl + offset, value);
+}
+
+#endif /* _PCIE_LAYERSCAPE_GEN4_H_ */
diff --git a/drivers/pci/pcie_layerscape_gen4_fixup.c b/drivers/pci/pcie_layerscape_gen4_fixup.c
new file mode 100644
index 0000000000..1c9e5750bd
--- /dev/null
+++ b/drivers/pci/pcie_layerscape_gen4_fixup.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0+ OR X11
+/*
+ * Copyright 2018-2019 NXP
+ *
+ * PCIe Gen4 driver for NXP Layerscape SoCs
+ * Author: Hou Zhiqiang <Minder.Hou@gmail.com>
+ *
+ */
+
+#include <common.h>
+#include <pci.h>
+#include <asm/arch/fsl_serdes.h>
+#include <asm/io.h>
+#include <errno.h>
+#ifdef CONFIG_OF_BOARD_SETUP
+#include <linux/libfdt.h>
+#include <fdt_support.h>
+#ifdef CONFIG_ARM
+#include <asm/arch/clock.h>
+#endif
+#include "pcie_layerscape_gen4.h"
+
+#if defined(CONFIG_FSL_LSCH3) || defined(CONFIG_FSL_LSCH2)
+/*
+ * Return next available LUT index.
+ */
+static int ls_pcie_g4_next_lut_index(struct ls_pcie_g4 *pcie)
+{
+ if (pcie->next_lut_index < PCIE_LUT_ENTRY_COUNT)
+ return pcie->next_lut_index++;
+
+ return -ENOSPC; /* LUT is full */
+}
+
+/* returns the next available streamid for pcie, -errno if failed */
+static int ls_pcie_g4_next_streamid(struct ls_pcie_g4 *pcie)
+{
+ int stream_id = pcie->stream_id_cur;
+
+ if (stream_id > FSL_PEX_STREAM_ID_NUM)
+ return -EINVAL;
+
+ pcie->stream_id_cur++;
+
+ return stream_id | ((pcie->idx + 1) << 11);
+}
+
+/*
+ * Program a single LUT entry
+ */
+static void ls_pcie_g4_lut_set_mapping(struct ls_pcie_g4 *pcie, int index,
+ u32 devid, u32 streamid)
+{
+ /* leave mask as all zeroes, want to match all bits */
+ lut_writel(pcie, devid << 16, PCIE_LUT_UDR(index));
+ lut_writel(pcie, streamid | PCIE_LUT_ENABLE, PCIE_LUT_LDR(index));
+}
+
+/*
+ * An msi-map is a property to be added to the pci controller
+ * node. It is a table, where each entry consists of 4 fields
+ * e.g.:
+ *
+ * msi-map = <[devid] [phandle-to-msi-ctrl] [stream-id] [count]
+ * [devid] [phandle-to-msi-ctrl] [stream-id] [count]>;
+ */
+static void fdt_pcie_set_msi_map_entry(void *blob, struct ls_pcie_g4 *pcie,
+ u32 devid, u32 streamid)
+{
+ u32 *prop;
+ u32 phandle;
+ int nodeoff;
+
+#ifdef CONFIG_FSL_PCIE_COMPAT
+ nodeoff = fdt_node_offset_by_compat_reg(blob, CONFIG_FSL_PCIE_COMPAT,
+ pcie->ccsr_res.start);
+#else
+#error "No CONFIG_FSL_PCIE_COMPAT defined"
+#endif
+ if (nodeoff < 0) {
+ debug("%s: ERROR: failed to find pcie compatiable\n", __func__);
+ return;
+ }
+
+ /* get phandle to MSI controller */
+ prop = (u32 *)fdt_getprop(blob, nodeoff, "msi-parent", 0);
+ if (!prop) {
+ debug("\n%s: ERROR: missing msi-parent: PCIe%d\n",
+ __func__, pcie->idx);
+ return;
+ }
+ phandle = fdt32_to_cpu(*prop);
+
+ /* set one msi-map row */
+ fdt_appendprop_u32(blob, nodeoff, "msi-map", devid);
+ fdt_appendprop_u32(blob, nodeoff, "msi-map", phandle);
+ fdt_appendprop_u32(blob, nodeoff, "msi-map", streamid);
+ fdt_appendprop_u32(blob, nodeoff, "msi-map", 1);
+}
+
+/*
+ * An iommu-map is a property to be added to the pci controller
+ * node. It is a table, where each entry consists of 4 fields
+ * e.g.:
+ *
+ * iommu-map = <[devid] [phandle-to-iommu-ctrl] [stream-id] [count]
+ * [devid] [phandle-to-iommu-ctrl] [stream-id] [count]>;
+ */
+static void fdt_pcie_set_iommu_map_entry(void *blob, struct ls_pcie_g4 *pcie,
+ u32 devid, u32 streamid)
+{
+ u32 *prop;
+ u32 iommu_map[4];
+ int nodeoff;
+ int lenp;
+
+#ifdef CONFIG_FSL_PCIE_COMPAT
+ nodeoff = fdt_node_offset_by_compat_reg(blob, CONFIG_FSL_PCIE_COMPAT,
+ pcie->ccsr_res.start);
+#else
+#error "No CONFIG_FSL_PCIE_COMPAT defined"
+#endif
+ if (nodeoff < 0) {
+ debug("%s: ERROR: failed to find pcie compatiable\n", __func__);
+ return;
+ }
+
+ /* get phandle to iommu controller */
+ prop = fdt_getprop_w(blob, nodeoff, "iommu-map", &lenp);
+ if (!prop) {
+ debug("\n%s: ERROR: missing iommu-map: PCIe%d\n",
+ __func__, pcie->idx);
+ return;
+ }
+
+ /* set iommu-map row */
+ iommu_map[0] = cpu_to_fdt32(devid);
+ iommu_map[1] = *++prop;
+ iommu_map[2] = cpu_to_fdt32(streamid);
+ iommu_map[3] = cpu_to_fdt32(1);
+
+ if (devid == 0)
+ fdt_setprop_inplace(blob, nodeoff, "iommu-map", iommu_map, 16);
+ else
+ fdt_appendprop(blob, nodeoff, "iommu-map", iommu_map, 16);
+}
+
+static void fdt_fixup_pcie(void *blob)
+{
+ struct udevice *dev, *bus;
+ struct ls_pcie_g4 *pcie;
+ int streamid;
+ int index;
+ pci_dev_t bdf;
+
+ /* Scan all known buses */
+ for (pci_find_first_device(&dev); dev; pci_find_next_device(&dev)) {
+ for (bus = dev; device_is_on_pci_bus(bus);)
+ bus = bus->parent;
+ pcie = dev_get_priv(bus);
+
+ streamid = ls_pcie_g4_next_streamid(pcie);
+ if (streamid < 0) {
+ debug("ERROR: no stream ids free\n");
+ continue;
+ }
+
+ index = ls_pcie_g4_next_lut_index(pcie);
+ if (index < 0) {
+ debug("ERROR: no LUT indexes free\n");
+ continue;
+ }
+
+ /* the DT fixup must be relative to the hose first_busno */
+ bdf = dm_pci_get_bdf(dev) - PCI_BDF(bus->seq, 0, 0);
+ /* map PCI b.d.f to streamID in LUT */
+ ls_pcie_g4_lut_set_mapping(pcie, index, bdf >> 8, streamid);
+ /* update msi-map in device tree */
+ fdt_pcie_set_msi_map_entry(blob, pcie, bdf >> 8, streamid);
+ /* update iommu-map in device tree */
+ fdt_pcie_set_iommu_map_entry(blob, pcie, bdf >> 8, streamid);
+ }
+}
+#endif
+
+static void ft_pcie_ep_layerscape_gen4_fix(void *blob, struct ls_pcie_g4 *pcie)
+{
+ int off;
+
+ off = fdt_node_offset_by_compat_reg(blob, "fsl,lx2160a-pcie-ep",
+ pcie->ccsr_res.start);
+
+ if (off < 0) {
+ debug("%s: ERROR: failed to find pcie compatiable\n",
+ __func__);
+ return;
+ }
+
+ if (pcie->enabled && pcie->mode == PCI_HEADER_TYPE_NORMAL)
+ fdt_set_node_status(blob, off, FDT_STATUS_OKAY, 0);
+ else
+ fdt_set_node_status(blob, off, FDT_STATUS_DISABLED, 0);
+}
+
+static void ft_pcie_rc_layerscape_gen4_fix(void *blob, struct ls_pcie_g4 *pcie)
+{
+ int off;
+
+#ifdef CONFIG_FSL_PCIE_COMPAT
+ off = fdt_node_offset_by_compat_reg(blob, CONFIG_FSL_PCIE_COMPAT,
+ pcie->ccsr_res.start);
+#else
+#error "No CONFIG_FSL_PCIE_COMPAT defined"
+#endif
+ if (off < 0) {
+ debug("%s: ERROR: failed to find pcie compatiable\n", __func__);
+ return;
+ }
+
+ if (pcie->enabled && pcie->mode == PCI_HEADER_TYPE_BRIDGE)
+ fdt_set_node_status(blob, off, FDT_STATUS_OKAY, 0);
+ else
+ fdt_set_node_status(blob, off, FDT_STATUS_DISABLED, 0);
+}
+
+static void ft_pcie_layerscape_gen4_setup(void *blob, struct ls_pcie_g4 *pcie)
+{
+ ft_pcie_rc_layerscape_gen4_fix(blob, pcie);
+ ft_pcie_ep_layerscape_gen4_fix(blob, pcie);
+}
+
+/* Fixup Kernel DT for PCIe */
+void ft_pci_setup(void *blob, bd_t *bd)
+{
+ struct ls_pcie_g4 *pcie;
+
+ list_for_each_entry(pcie, &ls_pcie_g4_list, list)
+ ft_pcie_layerscape_gen4_setup(blob, pcie);
+
+#if defined(CONFIG_FSL_LSCH3) || defined(CONFIG_FSL_LSCH2)
+ fdt_fixup_pcie(blob);
+#endif
+}
+
+#else /* !CONFIG_OF_BOARD_SETUP */
+void ft_pci_setup(void *blob, bd_t *bd)
+{
+}
+#endif