summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ddr/fsl/main.c8
-rw-r--r--drivers/misc/fsl_debug_server.c5
-rw-r--r--drivers/net/fsl-mc/dpio/qbman_portal.c66
-rw-r--r--drivers/net/fsl-mc/dpio/qbman_portal.h22
-rw-r--r--drivers/net/fsl-mc/dpio/qbman_private.h2
-rw-r--r--drivers/net/fsl-mc/dpni.c2
-rw-r--r--drivers/net/fsl-mc/mc.c121
-rw-r--r--drivers/net/ldpaa_eth/ldpaa_eth.c162
-rw-r--r--drivers/net/ldpaa_eth/ldpaa_eth.h1
-rw-r--r--drivers/pci/pcie_layerscape.c62
10 files changed, 266 insertions, 185 deletions
diff --git a/drivers/ddr/fsl/main.c b/drivers/ddr/fsl/main.c
index fa223834f2..14ecf1219c 100644
--- a/drivers/ddr/fsl/main.c
+++ b/drivers/ddr/fsl/main.c
@@ -135,6 +135,13 @@ static void __get_spd(generic_spd_eeprom_t *spd, u8 i2c_address)
__attribute__((weak, alias("__get_spd")))
void get_spd(generic_spd_eeprom_t *spd, u8 i2c_address);
+/* This function allows boards to update SPD address */
+__weak void update_spd_address(unsigned int ctrl_num,
+ unsigned int slot,
+ unsigned int *addr)
+{
+}
+
void fsl_ddr_get_spd(generic_spd_eeprom_t *ctrl_dimms_spd,
unsigned int ctrl_num, unsigned int dimm_slots_per_ctrl)
{
@@ -148,6 +155,7 @@ void fsl_ddr_get_spd(generic_spd_eeprom_t *ctrl_dimms_spd,
for (i = 0; i < dimm_slots_per_ctrl; i++) {
i2c_address = spd_i2c_addr[ctrl_num][i];
+ update_spd_address(ctrl_num, i, &i2c_address);
get_spd(&(ctrl_dimms_spd[i]), i2c_address);
}
}
diff --git a/drivers/misc/fsl_debug_server.c b/drivers/misc/fsl_debug_server.c
index e080fe6132..44cd9b9fd4 100644
--- a/drivers/misc/fsl_debug_server.c
+++ b/drivers/misc/fsl_debug_server.c
@@ -10,6 +10,7 @@
#include <asm/system.h>
#include <asm/arch-fsl-lsch3/immap_lsch3.h>
+#include <fsl-mc/fsl_mc.h>
#include <fsl_debug_server.h>
DECLARE_GLOBAL_DATA_PTR;
@@ -151,6 +152,10 @@ int debug_server_init(void)
debug_server_ram_addr =
gd->bd->bi_dram[0].start + gd->bd->bi_dram[0].size;
+#ifdef CONFIG_FSL_MC_ENET
+ debug_server_ram_addr += mc_get_dram_block_size();
+#endif
+
error = debug_server_parse_firmware_fit_image(&raw_image_addr,
&raw_image_size);
if (error != 0)
diff --git a/drivers/net/fsl-mc/dpio/qbman_portal.c b/drivers/net/fsl-mc/dpio/qbman_portal.c
index dd2a7deee5..5fa8d953e5 100644
--- a/drivers/net/fsl-mc/dpio/qbman_portal.c
+++ b/drivers/net/fsl-mc/dpio/qbman_portal.c
@@ -64,7 +64,7 @@ enum qbman_sdqcr_fc {
struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
{
int ret;
- struct qbman_swp *p = kmalloc(sizeof(*p), GFP_KERNEL);
+ struct qbman_swp *p = malloc(sizeof(struct qbman_swp));
if (!p)
return NULL;
@@ -77,7 +77,7 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
qb_attr_code_encode(&code_sdqcr_dct, &p->sdq, qbman_sdqcr_dct_prio_ics);
qb_attr_code_encode(&code_sdqcr_fc, &p->sdq, qbman_sdqcr_fc_up_to_3);
qb_attr_code_encode(&code_sdqcr_tok, &p->sdq, 0xbb);
- p->vdq.busy = 0; /* TODO: convert to atomic_t */
+ atomic_set(&p->vdq.busy, 1);
p->vdq.valid_bit = QB_VALID_BIT;
p->dqrr.next_idx = 0;
p->dqrr.valid_bit = QB_VALID_BIT;
@@ -165,7 +165,6 @@ static struct qb_attr_code code_eq_qd_bin = QB_CODE(4, 0, 16);
static struct qb_attr_code code_eq_qd_pri = QB_CODE(4, 16, 4);
static struct qb_attr_code code_eq_rsp_stash = QB_CODE(5, 16, 1);
static struct qb_attr_code code_eq_rsp_lo = QB_CODE(6, 0, 32);
-static struct qb_attr_code code_eq_rsp_hi = QB_CODE(7, 0, 32);
enum qbman_eq_cmd_e {
/* No enqueue, primarily for plugging ORP gaps for dropped frames */
@@ -197,8 +196,7 @@ void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
{
uint32_t *cl = qb_cl(d);
- qb_attr_code_encode(&code_eq_rsp_lo, cl, lower32(storage_phys));
- qb_attr_code_encode(&code_eq_rsp_hi, cl, upper32(storage_phys));
+ qb_attr_code_encode_64(&code_eq_rsp_lo, (uint64_t *)cl, storage_phys);
qb_attr_code_encode(&code_eq_rsp_stash, cl, !!stash);
}
@@ -253,7 +251,6 @@ static struct qb_attr_code code_pull_numframes = QB_CODE(0, 8, 4);
static struct qb_attr_code code_pull_token = QB_CODE(0, 16, 8);
static struct qb_attr_code code_pull_dqsource = QB_CODE(1, 0, 24);
static struct qb_attr_code code_pull_rsp_lo = QB_CODE(2, 0, 32);
-static struct qb_attr_code code_pull_rsp_hi = QB_CODE(3, 0, 32);
enum qb_pull_dt_e {
qb_pull_dt_channel,
@@ -282,8 +279,7 @@ void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
}
qb_attr_code_encode(&code_pull_rls, cl, 1);
qb_attr_code_encode(&code_pull_stash, cl, !!stash);
- qb_attr_code_encode(&code_pull_rsp_lo, cl, lower32(storage_phys));
- qb_attr_code_encode(&code_pull_rsp_hi, cl, upper32(storage_phys));
+ qb_attr_code_encode_64(&code_pull_rsp_lo, (uint64_t *)cl, storage_phys);
}
void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, uint8_t numframes)
@@ -316,10 +312,10 @@ int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
uint32_t *p;
uint32_t *cl = qb_cl(d);
- /* TODO: convert to atomic_t */
- if (s->vdq.busy)
+ if (!atomic_dec_and_test(&s->vdq.busy)) {
+ atomic_inc(&s->vdq.busy);
return -EBUSY;
- s->vdq.busy = 1;
+ }
s->vdq.storage = *(void **)&cl[4];
s->vdq.token = qb_attr_code_decode(&code_pull_token, cl);
p = qbman_cena_write_start(&s->sys, QBMAN_CENA_SWP_VDQCR);
@@ -359,36 +355,44 @@ const struct ldpaa_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
{
uint32_t verb;
uint32_t response_verb;
- const struct ldpaa_dq *dq = qbman_cena_read(&s->sys,
- QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
- const uint32_t *p = qb_cl(dq);
+ uint32_t flags;
+ const struct ldpaa_dq *dq;
+ const uint32_t *p;
+ dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
+ p = qb_cl(dq);
verb = qb_attr_code_decode(&code_dqrr_verb, p);
- /* If the valid-bit isn't of the expected polarity, nothing there */
+
+ /* If the valid-bit isn't of the expected polarity, nothing there. Note,
+ * in the DQRR reset bug workaround, we shouldn't need to skip these
+ * check, because we've already determined that a new entry is available
+ * and we've invalidated the cacheline before reading it, so the
+ * valid-bit behaviour is repaired and should tell us what we already
+ * knew from reading PI.
+ */
if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
qbman_cena_invalidate_prefetch(&s->sys,
- QBMAN_CENA_SWP_DQRR(
- s->dqrr.next_idx));
+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
return NULL;
}
/* There's something there. Move "next_idx" attention to the next ring
* entry (and prefetch it) before returning what we found. */
s->dqrr.next_idx++;
- s->dqrr.next_idx &= 3; /* Wrap around at 4 */
+ s->dqrr.next_idx &= QBMAN_DQRR_SIZE - 1; /* Wrap around at 4 */
/* TODO: it's possible to do all this without conditionals, optimise it
* later. */
if (!s->dqrr.next_idx)
s->dqrr.valid_bit ^= QB_VALID_BIT;
- /* VDQCR "no longer busy" hook - if VDQCR shows "busy" and this is a
- * VDQCR result, mark it as non-busy. */
- if (s->vdq.busy) {
- uint32_t flags = ldpaa_dq_flags(dq);
-
- response_verb = qb_attr_code_decode(&code_dqrr_response, &verb);
- if ((response_verb == QBMAN_DQRR_RESPONSE_DQ) &&
- (flags & LDPAA_DQ_STAT_VOLATILE))
- s->vdq.busy = 0;
- }
+
+ /* If this is the final response to a volatile dequeue command
+ indicate that the vdq is no longer busy */
+ flags = ldpaa_dq_flags(dq);
+ response_verb = qb_attr_code_decode(&code_dqrr_response, &verb);
+ if ((response_verb == QBMAN_DQRR_RESPONSE_DQ) &&
+ (flags & LDPAA_DQ_STAT_VOLATILE) &&
+ (flags & LDPAA_DQ_STAT_EXPIRED))
+ atomic_inc(&s->vdq.busy);
+
qbman_cena_invalidate_prefetch(&s->sys,
QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
return dq;
@@ -448,8 +452,10 @@ int qbman_dq_entry_has_newtoken(struct qbman_swp *s,
* reset "busy". We instead base the decision on whether the current
* result is sitting at the first 'storage' location of the busy
* command. */
- if (s->vdq.busy && (s->vdq.storage == dq))
- s->vdq.busy = 0;
+ if (s->vdq.storage == dq) {
+ s->vdq.storage = NULL;
+ atomic_inc(&s->vdq.busy);
+ }
return 1;
}
diff --git a/drivers/net/fsl-mc/dpio/qbman_portal.h b/drivers/net/fsl-mc/dpio/qbman_portal.h
index bb67c3bd06..86e2c3aac4 100644
--- a/drivers/net/fsl-mc/dpio/qbman_portal.h
+++ b/drivers/net/fsl-mc/dpio/qbman_portal.h
@@ -14,6 +14,10 @@
/* Management command result codes */
#define QBMAN_MC_RSLT_OK 0xf0
+/* TBD: as of QBMan 4.1, DQRR will be 8 rather than 4! */
+#define QBMAN_DQRR_SIZE 4
+
+
/* --------------------- */
/* portal data structure */
/* --------------------- */
@@ -48,14 +52,13 @@ struct qbman_swp {
* to whether or not a command can be submitted, not whether or
* not a previously-submitted command is still executing. In
* other words, once proof is seen that the previously-submitted
- * command is executing, "vdq" is no longer "busy". TODO:
- * convert this to "atomic_t" so that it is thread-safe (without
- * locking). */
- int busy;
+ * command is executing, "vdq" is no longer "busy".
+ */
+ atomic_t busy;
uint32_t valid_bit; /* 0x00 or 0x80 */
/* We need to determine when vdq is no longer busy. This depends
* on whether the "busy" (last-submitted) dequeue command is
- * targetting DQRR or main-memory, and detected is based on the
+ * targeting DQRR or main-memory, and detected is based on the
* presence of the dequeue command's "token" showing up in
* dequeue entries in DQRR or main-memory (respectively). Debug
* builds will, when submitting vdq commands, verify that the
@@ -127,6 +130,7 @@ static inline uint32_t qb_attr_code_decode(const struct qb_attr_code *code,
return d32_uint32_t(code->lsoffset, code->width, cacheline[code->word]);
}
+
/* encode a field to a cacheline */
static inline void qb_attr_code_encode(const struct qb_attr_code *code,
uint32_t *cacheline, uint32_t val)
@@ -136,6 +140,12 @@ static inline void qb_attr_code_encode(const struct qb_attr_code *code,
| e32_uint32_t(code->lsoffset, code->width, val);
}
+static inline void qb_attr_code_encode_64(const struct qb_attr_code *code,
+ uint64_t *cacheline, uint64_t val)
+{
+ cacheline[code->word / 2] = val;
+}
+
/* ---------------------- */
/* Descriptors/cachelines */
/* ---------------------- */
@@ -144,7 +154,7 @@ static inline void qb_attr_code_encode(const struct qb_attr_code *code,
* a "descriptor" type that the caller can instantiate however they like.
* Ultimately though, it is just a cacheline of binary storage (or something
* smaller when it is known that the descriptor doesn't need all 64 bytes) for
- * holding pre-formatted pieces of harware commands. The performance-critical
+ * holding pre-formatted pieces of hardware commands. The performance-critical
* code can then copy these descriptors directly into hardware command
* registers more efficiently than trying to construct/format commands
* on-the-fly. The API user sees the descriptor as an array of 32-bit words in
diff --git a/drivers/net/fsl-mc/dpio/qbman_private.h b/drivers/net/fsl-mc/dpio/qbman_private.h
index 2d2556b755..f1f16b828b 100644
--- a/drivers/net/fsl-mc/dpio/qbman_private.h
+++ b/drivers/net/fsl-mc/dpio/qbman_private.h
@@ -9,7 +9,7 @@
#include <errno.h>
#include <asm/io.h>
#include <linux/types.h>
-#include <linux/compat.h>
+#include <asm/atomic.h>
#include <malloc.h>
#include <fsl-mc/fsl_qbman_base.h>
diff --git a/drivers/net/fsl-mc/dpni.c b/drivers/net/fsl-mc/dpni.c
index b384401295..7bc2504f8c 100644
--- a/drivers/net/fsl-mc/dpni.c
+++ b/drivers/net/fsl-mc/dpni.c
@@ -313,7 +313,7 @@ int dpni_set_counter(struct fsl_mc_io *mc_io,
int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
uint16_t token,
- struct dpni_link_cfg *cfg)
+ const struct dpni_link_cfg *cfg)
{
struct mc_command cmd = { 0 };
diff --git a/drivers/net/fsl-mc/mc.c b/drivers/net/fsl-mc/mc.c
index c5c44bcab0..62a68c2744 100644
--- a/drivers/net/fsl-mc/mc.c
+++ b/drivers/net/fsl-mc/mc.c
@@ -3,13 +3,15 @@
*
* SPDX-License-Identifier: GPL-2.0+
*/
+#include <common.h>
#include <errno.h>
#include <asm/io.h>
+#include <libfdt.h>
+#include <fdt_support.h>
#include <fsl-mc/fsl_mc.h>
#include <fsl-mc/fsl_mc_sys.h>
#include <fsl-mc/fsl_mc_private.h>
#include <fsl-mc/fsl_dpmng.h>
-#include <fsl_debug_server.h>
#include <fsl-mc/fsl_dprc.h>
#include <fsl-mc/fsl_dpio.h>
#include <fsl-mc/fsl_qbman_portal.h>
@@ -186,6 +188,36 @@ static int calculate_mc_private_ram_params(u64 mc_private_ram_start_addr,
return 0;
}
+static int mc_fixup_dpc(u64 dpc_addr)
+{
+ void *blob = (void *)dpc_addr;
+ int nodeoffset;
+
+ /* delete any existing ICID pools */
+ nodeoffset = fdt_path_offset(blob, "/resources/icid_pools");
+ if (fdt_del_node(blob, nodeoffset) < 0)
+ printf("\nfsl-mc: WARNING: could not delete ICID pool\n");
+
+ /* add a new pool */
+ nodeoffset = fdt_path_offset(blob, "/resources");
+ if (nodeoffset < 0) {
+ printf("\nfsl-mc: ERROR: DPC is missing /resources\n");
+ return -EINVAL;
+ }
+ nodeoffset = fdt_add_subnode(blob, nodeoffset, "icid_pools");
+ nodeoffset = fdt_add_subnode(blob, nodeoffset, "icid_pool@0");
+ do_fixup_by_path_u32(blob, "/resources/icid_pools/icid_pool@0",
+ "base_icid", FSL_DPAA2_STREAM_ID_START, 1);
+ do_fixup_by_path_u32(blob, "/resources/icid_pools/icid_pool@0",
+ "num",
+ FSL_DPAA2_STREAM_ID_END -
+ FSL_DPAA2_STREAM_ID_START + 1, 1);
+
+ flush_dcache_range(dpc_addr, dpc_addr + fdt_totalsize(blob));
+
+ return 0;
+}
+
static int load_mc_dpc(u64 mc_ram_addr, size_t mc_ram_size)
{
u64 mc_dpc_offset;
@@ -225,13 +257,13 @@ static int load_mc_dpc(u64 mc_ram_addr, size_t mc_ram_size)
* Don't return with error here, since the MC firmware can
* still boot without a DPC
*/
- printf("fsl-mc: WARNING: No DPC image found\n");
+ printf("\nfsl-mc: WARNING: No DPC image found");
return 0;
}
dpc_size = fdt_totalsize(dpc_fdt_hdr);
if (dpc_size > CONFIG_SYS_LS_MC_DPC_MAX_LENGTH) {
- printf("fsl-mc: ERROR: Bad DPC image (too large: %d)\n",
+ printf("\nfsl-mc: ERROR: Bad DPC image (too large: %d)\n",
dpc_size);
return -EINVAL;
}
@@ -240,6 +272,9 @@ static int load_mc_dpc(u64 mc_ram_addr, size_t mc_ram_size)
(u64)dpc_fdt_hdr, dpc_size, mc_ram_addr + mc_dpc_offset);
#endif /* not defined CONFIG_SYS_LS_MC_DPC_IN_DDR */
+ if (mc_fixup_dpc(mc_ram_addr + mc_dpc_offset))
+ return -EINVAL;
+
dump_ram_words("DPC", (void *)(mc_ram_addr + mc_dpc_offset));
return 0;
}
@@ -279,13 +314,13 @@ static int load_mc_dpl(u64 mc_ram_addr, size_t mc_ram_size)
error = fdt_check_header(dpl_fdt_hdr);
if (error != 0) {
- printf("fsl-mc: ERROR: Bad DPL image (bad header)\n");
+ printf("\nfsl-mc: ERROR: Bad DPL image (bad header)\n");
return error;
}
dpl_size = fdt_totalsize(dpl_fdt_hdr);
if (dpl_size > CONFIG_SYS_LS_MC_DPL_MAX_LENGTH) {
- printf("fsl-mc: ERROR: Bad DPL image (too large: %d)\n",
+ printf("\nfsl-mc: ERROR: Bad DPL image (too large: %d)\n",
dpl_size);
return -EINVAL;
}
@@ -322,6 +357,23 @@ static unsigned long get_mc_boot_timeout_ms(void)
return timeout_ms;
}
+#ifdef CONFIG_SYS_LS_MC_AIOP_IMG_IN_NOR
+static int load_mc_aiop_img(u64 mc_ram_addr, size_t mc_ram_size)
+{
+ void *aiop_img;
+
+ /*
+ * Load the MC AIOP image in the MC private DRAM block:
+ */
+
+ aiop_img = (void *)CONFIG_SYS_LS_MC_AIOP_IMG_ADDR;
+ mc_copy_image("MC AIOP image",
+ (u64)aiop_img, CONFIG_SYS_LS_MC_AIOP_IMG_MAX_LENGTH,
+ mc_ram_addr + CONFIG_SYS_LS_MC_DRAM_AIOP_IMG_OFFSET);
+
+ return 0;
+}
+#endif
static int wait_for_mc(bool booting_mc, u32 *final_reg_gsr)
{
u32 reg_gsr;
@@ -330,7 +382,6 @@ static int wait_for_mc(bool booting_mc, u32 *final_reg_gsr)
struct mc_ccsr_registers __iomem *mc_ccsr_regs = MC_CCSR_BASE_ADDR;
dmb();
- debug("Polling mc_ccsr_regs->reg_gsr ...\n");
assert(timeout_ms > 0);
for (;;) {
udelay(1000); /* throttle polling */
@@ -345,10 +396,7 @@ static int wait_for_mc(bool booting_mc, u32 *final_reg_gsr)
}
if (timeout_ms == 0) {
- if (booting_mc)
- printf("fsl-mc: timeout booting management complex firmware\n");
- else
- printf("fsl-mc: timeout deploying data path layout\n");
+ printf("ERROR: timeout\n");
/* TODO: Get an error status from an MC CCSR register */
return -ETIMEDOUT;
@@ -361,15 +409,13 @@ static int wait_for_mc(bool booting_mc, u32 *final_reg_gsr)
* appropriate errno, so that the status property is set to
* failure in the fsl,dprc device tree node.
*/
- if (booting_mc) {
- printf("fsl-mc: WARNING: Firmware booted with error (GSR: %#x)\n",
- reg_gsr);
- } else {
- printf("fsl-mc: WARNING: Data path layout deployed with error (GSR: %#x)\n",
- reg_gsr);
- }
+ printf("WARNING: Firmware returned an error (GSR: %#x)\n",
+ reg_gsr);
+ } else {
+ printf("SUCCESS\n");
}
+
*final_reg_gsr = reg_gsr;
return 0;
}
@@ -403,13 +449,6 @@ int mc_init(void)
gd->bd->bi_dram[0].start + gd->bd->bi_dram[0].size;
}
-#ifdef CONFIG_FSL_DEBUG_SERVER
- /*
- * FIXME: I don't think this is right. See get_dram_size_to_hide()
- */
- mc_ram_addr -= debug_server_get_dram_block_size();
-#endif
-
error = calculate_mc_private_ram_params(mc_ram_addr,
mc_ram_size,
&mc_ram_aligned_base_addr,
@@ -454,6 +493,12 @@ int mc_init(void)
if (error != 0)
goto out;
+#ifdef CONFIG_SYS_LS_MC_AIOP_IMG_IN_NOR
+ error = load_mc_aiop_img(mc_ram_addr, mc_ram_size);
+ if (error != 0)
+ goto out;
+#endif
+
debug("mc_ccsr_regs %p\n", mc_ccsr_regs);
dump_mc_ccsr_regs(mc_ccsr_regs);
@@ -465,14 +510,14 @@ int mc_init(void)
out_le32(&mc_ccsr_regs->reg_mcfbalr, reg_mcfbalr);
out_le32(&mc_ccsr_regs->reg_mcfbahr,
(u32)(mc_ram_aligned_base_addr >> 32));
- out_le32(&mc_ccsr_regs->reg_mcfapr, MCFAPR_BYPASS_ICID_MASK);
+ out_le32(&mc_ccsr_regs->reg_mcfapr, FSL_BYPASS_AMQ);
/*
* Tell the MC that we want delayed DPL deployment.
*/
out_le32(&mc_ccsr_regs->reg_gsr, 0xDD00);
- printf("\nfsl-mc: Booting Management Complex ...\n");
+ printf("\nfsl-mc: Booting Management Complex ... ");
/*
* Deassert reset and release MC core 0 to run
@@ -509,9 +554,14 @@ int mc_init(void)
goto out;
}
- if (MC_VER_MAJOR != mc_ver_info.major)
+ if (MC_VER_MAJOR != mc_ver_info.major) {
printf("fsl-mc: ERROR: Firmware major version mismatch (found: %d, expected: %d)\n",
mc_ver_info.major, MC_VER_MAJOR);
+ printf("fsl-mc: Update the Management Complex firmware\n");
+
+ error = -ENODEV;
+ goto out;
+ }
if (MC_VER_MINOR != mc_ver_info.minor)
printf("fsl-mc: WARNING: Firmware minor version mismatch (found: %d, expected: %d)\n",
@@ -525,13 +575,14 @@ int mc_init(void)
* Tell the MC to deploy the DPL:
*/
out_le32(&mc_ccsr_regs->reg_gsr, 0x0);
- printf("\nfsl-mc: Deploying data path layout ...\n");
+ printf("fsl-mc: Deploying data path layout ... ");
error = wait_for_mc(false, &reg_gsr);
if (error != 0)
goto out;
+
out:
if (error != 0)
- mc_boot_status = -error;
+ mc_boot_status = error;
else
mc_boot_status = 0;
@@ -600,14 +651,16 @@ int dpio_init(struct dprc_obj_desc obj_desc)
printf("dpio_enable() failed %d\n", err);
goto err_get_enable;
}
- debug("ce_paddr=0x%llx, ci_paddr=0x%llx, portalid=%d, prios=%d\n",
- attr.qbman_portal_ce_paddr,
- attr.qbman_portal_ci_paddr,
+ debug("ce_offset=0x%llx, ci_offset=0x%llx, portalid=%d, prios=%d\n",
+ attr.qbman_portal_ce_offset,
+ attr.qbman_portal_ci_offset,
attr.qbman_portal_id,
attr.num_priorities);
- p_des.cena_bar = (void *)attr.qbman_portal_ce_paddr;
- p_des.cinh_bar = (void *)attr.qbman_portal_ci_paddr;
+ p_des.cena_bar = (void *)(SOC_QBMAN_PORTALS_BASE_ADDR
+ + attr.qbman_portal_ce_offset);
+ p_des.cinh_bar = (void *)(SOC_QBMAN_PORTALS_BASE_ADDR
+ + attr.qbman_portal_ci_offset);
dflt_dpio->sw_portal = qbman_swp_init(&p_des);
if (dflt_dpio->sw_portal == NULL) {
diff --git a/drivers/net/ldpaa_eth/ldpaa_eth.c b/drivers/net/ldpaa_eth/ldpaa_eth.c
index d4be1bada9..50ca6e45fb 100644
--- a/drivers/net/ldpaa_eth/ldpaa_eth.c
+++ b/drivers/net/ldpaa_eth/ldpaa_eth.c
@@ -31,6 +31,8 @@ static void ldpaa_eth_rx(struct ldpaa_eth_priv *priv,
uint32_t fd_length;
struct ldpaa_fas *fas;
uint32_t status, err;
+ u32 timeo = (CONFIG_SYS_HZ * 2) / 1000;
+ u32 time_start;
struct qbman_release_desc releasedesc;
struct qbman_swp *swp = dflt_dpio->sw_portal;
@@ -65,10 +67,15 @@ error:
flush_dcache_range(fd_addr, fd_addr + LDPAA_ETH_RX_BUFFER_SIZE);
qbman_release_desc_clear(&releasedesc);
qbman_release_desc_set_bpid(&releasedesc, dflt_dpbp->dpbp_attr.bpid);
+ time_start = get_timer(0);
do {
/* Release buffer into the QBMAN */
err = qbman_swp_release(swp, &releasedesc, &fd_addr, 1);
- } while (err == -EBUSY);
+ } while (get_timer(time_start) < timeo && err == -EBUSY);
+
+ if (err == -EBUSY)
+ printf("Rx frame: QBMAN buffer release fails\n");
+
return;
}
@@ -77,7 +84,9 @@ static int ldpaa_eth_pull_dequeue_rx(struct eth_device *dev)
struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)dev->priv;
const struct ldpaa_dq *dq;
const struct dpaa_fd *fd;
- int i = 5, err = 0, status, loop = 20;
+ int i = 5, err = 0, status;
+ u32 timeo = (CONFIG_SYS_HZ * 2) / 1000;
+ u32 time_start;
static struct qbman_pull_desc pulldesc;
struct qbman_swp *swp = dflt_dpio->sw_portal;
@@ -92,13 +101,11 @@ static int ldpaa_eth_pull_dequeue_rx(struct eth_device *dev)
continue;
}
- do {
- loop--;
- dq = qbman_swp_dqrr_next(swp);
+ time_start = get_timer(0);
- if (!loop)
- break;
- } while (!dq);
+ do {
+ dq = qbman_swp_dqrr_next(swp);
+ } while (get_timer(time_start) < timeo && !dq);
if (dq) {
/* Check for valid frame. If not sent a consume
@@ -112,7 +119,7 @@ static int ldpaa_eth_pull_dequeue_rx(struct eth_device *dev)
debug("No frame delivered\n");
qbman_swp_dqrr_consume(swp, dq);
- break;
+ continue;
}
fd = ldpaa_dq_fd(dq);
@@ -121,92 +128,9 @@ static int ldpaa_eth_pull_dequeue_rx(struct eth_device *dev)
ldpaa_eth_rx(priv, fd);
qbman_swp_dqrr_consume(swp, dq);
break;
- }
- }
-
- return err;
-}
-
-static void ldpaa_eth_tx_conf(struct ldpaa_eth_priv *priv,
- const struct dpaa_fd *fd)
-{
- uint64_t fd_addr;
- struct ldpaa_fas *fas;
- uint32_t status, err;
- struct qbman_release_desc releasedesc;
- struct qbman_swp *swp = dflt_dpio->sw_portal;
-
- fd_addr = ldpaa_fd_get_addr(fd);
-
-
- debug("TX Conf frame:data addr=0x%p\n", (u64 *)fd_addr);
-
- /* Check the status from the Frame Annotation */
- if (fd->simple.frc & LDPAA_FD_FRC_FASV) {
- fas = (struct ldpaa_fas *)
- ((uint8_t *)(fd_addr) +
- priv->buf_layout.private_data_size);
- status = le32_to_cpu(fas->status);
- if (status & LDPAA_ETH_TXCONF_ERR_MASK) {
- printf("TxConf frame error(s): 0x%08x\n",
- status & LDPAA_ETH_TXCONF_ERR_MASK);
- }
- }
-
- qbman_release_desc_clear(&releasedesc);
- qbman_release_desc_set_bpid(&releasedesc, dflt_dpbp->dpbp_attr.bpid);
- do {
- /* Release buffer into the QBMAN */
- err = qbman_swp_release(swp, &releasedesc, &fd_addr, 1);
- } while (err == -EBUSY);
-}
-
-static int ldpaa_eth_pull_dequeue_tx_conf(struct ldpaa_eth_priv *priv)
-{
- const struct ldpaa_dq *dq;
- const struct dpaa_fd *fd;
- int err = 0;
- int i = 5, status, loop = 20;
- static struct qbman_pull_desc pulldesc;
- struct qbman_swp *swp = dflt_dpio->sw_portal;
-
- while (--i) {
- qbman_pull_desc_clear(&pulldesc);
- qbman_pull_desc_set_numframes(&pulldesc, 1);
- qbman_pull_desc_set_fq(&pulldesc, priv->tx_conf_fqid);
-
- err = qbman_swp_pull(swp, &pulldesc);
- if (err < 0) {
- printf("Dequeue TX conf frames error:0x%08x\n", err);
- continue;
- }
-
- do {
- loop--;
- dq = qbman_swp_dqrr_next(swp);
-
- if (!loop)
- break;
- } while (!dq);
-
- if (dq) {
- /* Check for valid frame. If not sent a consume
- * confirmation to QBMAN otherwise give it to NADK
- * application and then send consume confirmation to
- * QBMAN.
- */
- status = (uint8_t)ldpaa_dq_flags(dq);
- if ((status & LDPAA_DQ_STAT_VALIDFRAME) == 0) {
- debug("Dequeue TX conf frames:");
- debug("No frame is delivered\n");
-
- qbman_swp_dqrr_consume(swp, dq);
- break;
- }
- fd = ldpaa_dq_fd(dq);
-
- ldpaa_eth_tx_conf(priv, fd);
- qbman_swp_dqrr_consume(swp, dq);
+ } else {
+ err = -ENODATA;
+ debug("No DQRR entries\n");
break;
}
}
@@ -220,8 +144,11 @@ static int ldpaa_eth_tx(struct eth_device *net_dev, void *buf, int len)
struct dpaa_fd fd;
u64 buffer_start;
int data_offset, err;
+ u32 timeo = (CONFIG_SYS_HZ * 10) / 1000;
+ u32 time_start;
struct qbman_swp *swp = dflt_dpio->sw_portal;
struct qbman_eq_desc ed;
+ struct qbman_release_desc releasedesc;
/* Setup the FD fields */
memset(&fd, 0, sizeof(fd));
@@ -257,15 +184,34 @@ static int ldpaa_eth_tx(struct eth_device *net_dev, void *buf, int len)
qbman_eq_desc_clear(&ed);
qbman_eq_desc_set_no_orp(&ed, 0);
qbman_eq_desc_set_qd(&ed, priv->tx_qdid, priv->tx_flow_id, 0);
- err = qbman_swp_enqueue(swp, &ed, (const struct qbman_fd *)(&fd));
- if (err < 0)
+
+ time_start = get_timer(0);
+
+ while (get_timer(time_start) < timeo) {
+ err = qbman_swp_enqueue(swp, &ed,
+ (const struct qbman_fd *)(&fd));
+ if (err != -EBUSY)
+ break;
+ }
+
+ if (err < 0) {
printf("error enqueueing Tx frame\n");
+ goto error;
+ }
+
+ return err;
- mdelay(1);
+error:
+ qbman_release_desc_clear(&releasedesc);
+ qbman_release_desc_set_bpid(&releasedesc, dflt_dpbp->dpbp_attr.bpid);
+ time_start = get_timer(0);
+ do {
+ /* Release buffer into the QBMAN */
+ err = qbman_swp_release(swp, &releasedesc, &buffer_start, 1);
+ } while (get_timer(time_start) < timeo && err == -EBUSY);
- err = ldpaa_eth_pull_dequeue_tx_conf(priv);
- if (err < 0)
- printf("error Tx Conf frame\n");
+ if (err == -EBUSY)
+ printf("TX data: QBMAN buffer release fails\n");
return err;
}
@@ -274,7 +220,6 @@ static int ldpaa_eth_open(struct eth_device *net_dev, bd_t *bd)
{
struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
struct dpni_queue_attr rx_queue_attr;
- struct dpni_tx_flow_attr tx_flow_attr;
uint8_t mac_addr[6];
int err;
@@ -345,21 +290,11 @@ static int ldpaa_eth_open(struct eth_device *net_dev, bd_t *bd)
goto err_qdid;
}
- err = dpni_get_tx_flow(dflt_mc_io, priv->dpni_handle, priv->tx_flow_id,
- &tx_flow_attr);
- if (err) {
- printf("dpni_get_tx_flow() failed\n");
- goto err_tx_flow;
- }
-
- priv->tx_conf_fqid = tx_flow_attr.conf_err_attr.queue_attr.fqid;
-
if (!priv->phydev->link)
printf("%s: No link.\n", priv->phydev->dev->name);
return priv->phydev->link ? 0 : -1;
-err_tx_flow:
err_qdid:
err_rx_flow:
dpni_disable(dflt_mc_io, priv->dpni_handle);
@@ -626,6 +561,9 @@ static int ldpaa_dpni_bind(struct ldpaa_eth_priv *priv)
priv->tx_flow_id = DPNI_NEW_FLOW_ID;
memset(&dflt_tx_flow, 0, sizeof(dflt_tx_flow));
+ dflt_tx_flow.options = DPNI_TX_FLOW_OPT_ONLY_TX_ERROR;
+ dflt_tx_flow.conf_err_cfg.use_default_queue = 0;
+ dflt_tx_flow.conf_err_cfg.errors_only = 1;
err = dpni_set_tx_flow(dflt_mc_io, priv->dpni_handle,
&priv->tx_flow_id, &dflt_tx_flow);
if (err) {
diff --git a/drivers/net/ldpaa_eth/ldpaa_eth.h b/drivers/net/ldpaa_eth/ldpaa_eth.h
index 3107ab6cff..b4ef700cb0 100644
--- a/drivers/net/ldpaa_eth/ldpaa_eth.h
+++ b/drivers/net/ldpaa_eth/ldpaa_eth.h
@@ -128,7 +128,6 @@ struct ldpaa_eth_priv {
uint32_t rx_dflt_fqid;
uint16_t tx_qdid;
- uint32_t tx_conf_fqid;
uint16_t tx_flow_id;
enum ldpaa_eth_type type; /* 1G or 10G ethernet */
diff --git a/drivers/pci/pcie_layerscape.c b/drivers/pci/pcie_layerscape.c
index 402c5193e0..3ef4975556 100644
--- a/drivers/pci/pcie_layerscape.c
+++ b/drivers/pci/pcie_layerscape.c
@@ -11,6 +11,7 @@
#include <asm/io.h>
#include <errno.h>
#include <malloc.h>
+#include <asm/arch-fsl-lsch3/fdt.h>
#ifndef CONFIG_SYS_PCI_MEMORY_BUS
#define CONFIG_SYS_PCI_MEMORY_BUS CONFIG_SYS_SDRAM_BASE
@@ -528,3 +529,64 @@ void ft_pci_setup(void *blob, bd_t *bd)
{
}
#endif
+
+#ifdef CONFIG_LS2085A
+
+void pcie_set_available_streamids(void *blob, const char *pcie_path,
+ u32 *stream_ids, int count)
+{
+ int nodeoffset;
+ int i;
+
+ nodeoffset = fdt_path_offset(blob, pcie_path);
+ if (nodeoffset < 0) {
+ printf("\n%s: ERROR: unable to update PCIe node\n", __func__);
+ return;
+ }
+
+ /* for each stream ID, append to mmu-masters */
+ for (i = 0; i < count; i++) {
+ fdt_appendprop_u32(blob, nodeoffset, "available-stream-ids",
+ stream_ids[i]);
+ }
+}
+
+#define MAX_STREAM_IDS 4
+void fdt_fixup_smmu_pcie(void *blob)
+{
+ int count;
+ u32 stream_ids[MAX_STREAM_IDS];
+
+ #ifdef CONFIG_PCIE1
+ /* PEX1 stream ID fixup */
+ count = FSL_PEX1_STREAM_ID_END - FSL_PEX1_STREAM_ID_START + 1;
+ alloc_stream_ids(FSL_PEX1_STREAM_ID_START, count, stream_ids,
+ MAX_STREAM_IDS);
+ pcie_set_available_streamids(blob, "/pcie@3400000", stream_ids, count);
+ #endif
+
+ #ifdef CONFIG_PCIE2
+ /* PEX2 stream ID fixup */
+ count = FSL_PEX2_STREAM_ID_END - FSL_PEX2_STREAM_ID_START + 1;
+ alloc_stream_ids(FSL_PEX2_STREAM_ID_START, count, stream_ids,
+ MAX_STREAM_IDS);
+ pcie_set_available_streamids(blob, "/pcie@3500000", stream_ids, count);
+ #endif
+
+ #ifdef CONFIG_PCIE3
+ /* PEX3 stream ID fixup */
+ count = FSL_PEX3_STREAM_ID_END - FSL_PEX3_STREAM_ID_START + 1;
+ alloc_stream_ids(FSL_PEX3_STREAM_ID_START, count, stream_ids,
+ MAX_STREAM_IDS);
+ pcie_set_available_streamids(blob, "/pcie@3600000", stream_ids, count);
+ #endif
+
+ #ifdef CONFIG_PCIE4
+ /* PEX4 stream ID fixup */
+ count = FSL_PEX4_STREAM_ID_END - FSL_PEX4_STREAM_ID_START + 1;
+ alloc_stream_ids(FSL_PEX4_STREAM_ID_START, count, stream_ids,
+ MAX_STREAM_IDS);
+ pcie_set_available_streamids(blob, "/pcie@3700000", stream_ids, count);
+ #endif
+}
+#endif