diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/dm/uclass-id.h | 1 | ||||
-rw-r--r-- | include/dma-uclass.h | 128 | ||||
-rw-r--r-- | include/dma.h | 282 | ||||
-rw-r--r-- | include/dt-bindings/clock/am3.h | 227 | ||||
-rw-r--r-- | include/hwspinlock.h | 140 | ||||
-rw-r--r-- | include/spl.h | 4 |
6 files changed, 756 insertions, 26 deletions
diff --git a/include/dm/uclass-id.h b/include/dm/uclass-id.h index 62d9e2f404..037af0460c 100644 --- a/include/dm/uclass-id.h +++ b/include/dm/uclass-id.h @@ -42,6 +42,7 @@ enum uclass_id { UCLASS_FIRMWARE, /* Firmware */ UCLASS_FS_FIRMWARE_LOADER, /* Generic loader */ UCLASS_GPIO, /* Bank of general-purpose I/O pins */ + UCLASS_HWSPINLOCK, /* Hardware semaphores */ UCLASS_I2C, /* I2C bus */ UCLASS_I2C_EEPROM, /* I2C EEPROM device */ UCLASS_I2C_GENERIC, /* Generic I2C device */ diff --git a/include/dma-uclass.h b/include/dma-uclass.h new file mode 100644 index 0000000000..31b43fb4b9 --- /dev/null +++ b/include/dma-uclass.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2018 Álvaro Fernández Rojas <noltari@gmail.com> + * Copyright (C) 2015 - 2018 Texas Instruments Incorporated <www.ti.com> + * Written by Mugunthan V N <mugunthanvnm@ti.com> + * + */ + +#ifndef _DMA_UCLASS_H +#define _DMA_UCLASS_H + +/* See dma.h for background documentation. */ + +#include <dma.h> + +struct ofnode_phandle_args; + +/* + * struct dma_ops - Driver model DMA operations + * + * The uclass interface is implemented by all DMA devices which use + * driver model. + */ +struct dma_ops { +#ifdef CONFIG_DMA_CHANNELS + /** + * of_xlate - Translate a client's device-tree (OF) DMA specifier. + * + * The DMA core calls this function as the first step in implementing + * a client's dma_get_by_*() call. + * + * If this function pointer is set to NULL, the DMA core will use a + * default implementation, which assumes #dma-cells = <1>, and that + * the DT cell contains a simple integer DMA Channel. + * + * At present, the DMA API solely supports device-tree. If this + * changes, other xxx_xlate() functions may be added to support those + * other mechanisms. + * + * @dma: The dma struct to hold the translation result. + * @args: The dma specifier values from device tree. + * @return 0 if OK, or a negative error code. + */ + int (*of_xlate)(struct dma *dma, + struct ofnode_phandle_args *args); + /** + * request - Request a translated DMA. + * + * The DMA core calls this function as the second step in + * implementing a client's dma_get_by_*() call, following a successful + * xxx_xlate() call, or as the only step in implementing a client's + * dma_request() call. + * + * @dma: The DMA struct to request; this has been filled in by + * a previoux xxx_xlate() function call, or by the caller of + * dma_request(). + * @return 0 if OK, or a negative error code. + */ + int (*request)(struct dma *dma); + /** + * free - Free a previously requested dma. + * + * This is the implementation of the client dma_free() API. + * + * @dma: The DMA to free. + * @return 0 if OK, or a negative error code. + */ + int (*free)(struct dma *dma); + /** + * enable() - Enable a DMA Channel. + * + * @dma: The DMA Channel to manipulate. + * @return zero on success, or -ve error code. + */ + int (*enable)(struct dma *dma); + /** + * disable() - Disable a DMA Channel. + * + * @dma: The DMA Channel to manipulate. + * @return zero on success, or -ve error code. + */ + int (*disable)(struct dma *dma); + /** + * prepare_rcv_buf() - Prepare/Add receive DMA buffer. + * + * @dma: The DMA Channel to manipulate. + * @dst: The receive buffer pointer. + * @size: The receive buffer size + * @return zero on success, or -ve error code. + */ + int (*prepare_rcv_buf)(struct dma *dma, void *dst, size_t size); + /** + * receive() - Receive a DMA transfer. + * + * @dma: The DMA Channel to manipulate. + * @dst: The destination pointer. + * @metadata: DMA driver's specific data + * @return zero on success, or -ve error code. + */ + int (*receive)(struct dma *dma, void **dst, void *metadata); + /** + * send() - Send a DMA transfer. + * + * @dma: The DMA Channel to manipulate. + * @src: The source pointer. + * @len: Length of the data to be sent (number of bytes). + * @metadata: DMA driver's specific data + * @return zero on success, or -ve error code. + */ + int (*send)(struct dma *dma, void *src, size_t len, void *metadata); +#endif /* CONFIG_DMA_CHANNELS */ + /** + * transfer() - Issue a DMA transfer. The implementation must + * wait until the transfer is done. + * + * @dev: The DMA device + * @direction: direction of data transfer (should be one from + * enum dma_direction) + * @dst: The destination pointer. + * @src: The source pointer. + * @len: Length of the data to be copied (number of bytes). + * @return zero on success, or -ve error code. + */ + int (*transfer)(struct udevice *dev, int direction, void *dst, + void *src, size_t len); +}; + +#endif /* _DMA_UCLASS_H */ diff --git a/include/dma.h b/include/dma.h index 50e965241c..d1c3d0df7d 100644 --- a/include/dma.h +++ b/include/dma.h @@ -1,12 +1,17 @@ /* SPDX-License-Identifier: GPL-2.0+ */ /* - * (C) Copyright 2015 - * Texas Instruments Incorporated, <www.ti.com> + * Copyright (C) 2018 Álvaro Fernández Rojas <noltari@gmail.com> + * Copyright (C) 2015 - 2018 Texas Instruments Incorporated <www.ti.com> + * Written by Mugunthan V N <mugunthanvnm@ti.com> + * */ #ifndef _DMA_H_ #define _DMA_H_ +#include <linux/errno.h> +#include <linux/types.h> + /* * enum dma_direction - dma transfer direction indicator * @DMA_MEM_TO_MEM: Memcpy mode @@ -27,28 +32,6 @@ enum dma_direction { #define DMA_SUPPORTS_DEV_TO_DEV BIT(3) /* - * struct dma_ops - Driver model DMA operations - * - * The uclass interface is implemented by all DMA devices which use - * driver model. - */ -struct dma_ops { - /* - * Get the current timer count - * - * @dev: The DMA device - * @direction: direction of data transfer should be one from - enum dma_direction - * @dst: Destination pointer - * @src: Source pointer - * @len: Length of the data to be copied. - * @return: 0 if OK, -ve on error - */ - int (*transfer)(struct udevice *dev, int direction, void *dst, - void *src, size_t len); -}; - -/* * struct dma_dev_priv - information about a device used by the uclass * * @supported: mode of transfers that DMA can support, should be @@ -58,6 +41,257 @@ struct dma_dev_priv { u32 supported; }; +#ifdef CONFIG_DMA_CHANNELS +/** + * A DMA is a feature of computer systems that allows certain hardware + * subsystems to access main system memory, independent of the CPU. + * DMA channels are typically generated externally to the HW module + * consuming them, by an entity this API calls a DMA provider. This API + * provides a standard means for drivers to enable and disable DMAs, and to + * copy, send and receive data using DMA. + * + * A driver that implements UCLASS_DMA is a DMA provider. A provider will + * often implement multiple separate DMAs, since the hardware it manages + * often has this capability. dma_uclass.h describes the interface which + * DMA providers must implement. + * + * DMA consumers/clients are the HW modules driven by the DMA channels. This + * header file describes the API used by drivers for those HW modules. + * + * DMA consumer DMA_MEM_TO_DEV (transmit) usage example (based on networking). + * Note. dma_send() is sync operation always - it'll start transfer and will + * poll for it to complete: + * - get/request dma channel + * struct dma dma_tx; + * ret = dma_get_by_name(common->dev, "tx0", &dma_tx); + * if (ret) ... + * + * - enable dma channel + * ret = dma_enable(&dma_tx); + * if (ret) ... + * + * - dma transmit DMA_MEM_TO_DEV. + * struct ti_drv_packet_data packet_data; + * + * packet_data.opt1 = val1; + * packet_data.opt2 = val2; + * ret = dma_send(&dma_tx, packet, length, &packet_data); + * if (ret) .. + * + * DMA consumer DMA_DEV_TO_MEM (receive) usage example (based on networking). + * Note. dma_receive() is sync operation always - it'll start transfer + * (if required) and will poll for it to complete (or for any previously + * configured dev2mem transfer to complete): + * - get/request dma channel + * struct dma dma_rx; + * ret = dma_get_by_name(common->dev, "rx0", &dma_rx); + * if (ret) ... + * + * - enable dma channel + * ret = dma_enable(&dma_rx); + * if (ret) ... + * + * - dma receive DMA_DEV_TO_MEM. + * struct ti_drv_packet_data packet_data; + * + * len = dma_receive(&dma_rx, (void **)packet, &packet_data); + * if (ret < 0) ... + * + * DMA consumer DMA_DEV_TO_MEM (receive) zero-copy usage example (based on + * networking). Networking subsystem allows to configure and use few receive + * buffers (dev2mem), as Networking RX DMA channels usually implemented + * as streaming interface + * - get/request dma channel + * struct dma dma_rx; + * ret = dma_get_by_name(common->dev, "rx0", &dma_rx); + * if (ret) ... + * + * for (i = 0; i < RX_DESC_NUM; i++) { + * ret = dma_prepare_rcv_buf(&dma_rx, + * net_rx_packets[i], + * RX_BUF_SIZE); + * if (ret) ... + * } + * + * - enable dma channel + * ret = dma_enable(&dma_rx); + * if (ret) ... + * + * - dma receive DMA_DEV_TO_MEM. + * struct ti_drv_packet_data packet_data; + * + * len = dma_receive(&dma_rx, (void **)packet, &packet_data); + * if (ret < 0) .. + * + * -- process packet -- + * + * - return buffer back to DAM channel + * ret = dma_prepare_rcv_buf(&dma_rx, + * net_rx_packets[rx_next], + * RX_BUF_SIZE); + */ + +struct udevice; + +/** + * struct dma - A handle to (allowing control of) a single DMA. + * + * Clients provide storage for DMA handles. The content of the structure is + * managed solely by the DMA API and DMA drivers. A DMA struct is + * initialized by "get"ing the DMA struct. The DMA struct is passed to all + * other DMA APIs to identify which DMA channel to operate upon. + * + * @dev: The device which implements the DMA channel. + * @id: The DMA channel ID within the provider. + * + * Currently, the DMA API assumes that a single integer ID is enough to + * identify and configure any DMA channel for any DMA provider. If this + * assumption becomes invalid in the future, the struct could be expanded to + * either (a) add more fields to allow DMA providers to store additional + * information, or (b) replace the id field with an opaque pointer, which the + * provider would dynamically allocated during its .of_xlate op, and process + * during is .request op. This may require the addition of an extra op to clean + * up the allocation. + */ +struct dma { + struct udevice *dev; + /* + * Written by of_xlate. We assume a single id is enough for now. In the + * future, we might add more fields here. + */ + unsigned long id; +}; + +# if CONFIG_IS_ENABLED(OF_CONTROL) && CONFIG_IS_ENABLED(DMA) +/** + * dma_get_by_index - Get/request a DMA by integer index. + * + * This looks up and requests a DMA. The index is relative to the client + * device; each device is assumed to have n DMAs associated with it somehow, + * and this function finds and requests one of them. The mapping of client + * device DMA indices to provider DMAs may be via device-tree properties, + * board-provided mapping tables, or some other mechanism. + * + * @dev: The client device. + * @index: The index of the DMA to request, within the client's list of + * DMA channels. + * @dma: A pointer to a DMA struct to initialize. + * @return 0 if OK, or a negative error code. + */ +int dma_get_by_index(struct udevice *dev, int index, struct dma *dma); + +/** + * dma_get_by_name - Get/request a DMA by name. + * + * This looks up and requests a DMA. The name is relative to the client + * device; each device is assumed to have n DMAs associated with it somehow, + * and this function finds and requests one of them. The mapping of client + * device DMA names to provider DMAs may be via device-tree properties, + * board-provided mapping tables, or some other mechanism. + * + * @dev: The client device. + * @name: The name of the DMA to request, within the client's list of + * DMA channels. + * @dma: A pointer to a DMA struct to initialize. + * @return 0 if OK, or a negative error code. + */ +int dma_get_by_name(struct udevice *dev, const char *name, struct dma *dma); +# else +static inline int dma_get_by_index(struct udevice *dev, int index, + struct dma *dma) +{ + return -ENOSYS; +} + +static inline int dma_get_by_name(struct udevice *dev, const char *name, + struct dma *dma) +{ + return -ENOSYS; +} +# endif + +/** + * dma_request - Request a DMA by provider-specific ID. + * + * This requests a DMA using a provider-specific ID. Generally, this function + * should not be used, since dma_get_by_index/name() provide an interface that + * better separates clients from intimate knowledge of DMA providers. + * However, this function may be useful in core SoC-specific code. + * + * @dev: The DMA provider device. + * @dma: A pointer to a DMA struct to initialize. The caller must + * have already initialized any field in this struct which the + * DMA provider uses to identify the DMA channel. + * @return 0 if OK, or a negative error code. + */ +int dma_request(struct udevice *dev, struct dma *dma); + +/** + * dma_free - Free a previously requested DMA. + * + * @dma: A DMA struct that was previously successfully requested by + * dma_request/get_by_*(). + * @return 0 if OK, or a negative error code. + */ +int dma_free(struct dma *dma); + +/** + * dma_enable() - Enable (turn on) a DMA channel. + * + * @dma: A DMA struct that was previously successfully requested by + * dma_request/get_by_*(). + * @return zero on success, or -ve error code. + */ +int dma_enable(struct dma *dma); + +/** + * dma_disable() - Disable (turn off) a DMA channel. + * + * @dma: A DMA struct that was previously successfully requested by + * dma_request/get_by_*(). + * @return zero on success, or -ve error code. + */ +int dma_disable(struct dma *dma); + +/** + * dma_prepare_rcv_buf() - Prepare/add receive DMA buffer. + * + * It allows to implement zero-copy async DMA_DEV_TO_MEM (receive) transactions + * if supported by DMA providers. + * + * @dma: A DMA struct that was previously successfully requested by + * dma_request/get_by_*(). + * @dst: The receive buffer pointer. + * @size: The receive buffer size + * @return zero on success, or -ve error code. + */ +int dma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size); + +/** + * dma_receive() - Receive a DMA transfer. + * + * @dma: A DMA struct that was previously successfully requested by + * dma_request/get_by_*(). + * @dst: The destination pointer. + * @metadata: DMA driver's channel specific data + * @return length of received data on success, or zero - no data, + * or -ve error code. + */ +int dma_receive(struct dma *dma, void **dst, void *metadata); + +/** + * dma_send() - Send a DMA transfer. + * + * @dma: A DMA struct that was previously successfully requested by + * dma_request/get_by_*(). + * @src: The source pointer. + * @len: Length of the data to be sent (number of bytes). + * @metadata: DMA driver's channel specific data + * @return zero on success, or -ve error code. + */ +int dma_send(struct dma *dma, void *src, size_t len, void *metadata); +#endif /* CONFIG_DMA_CHANNELS */ + /* * dma_get_device - get a DMA device which supports transfer * type of transfer_type diff --git a/include/dt-bindings/clock/am3.h b/include/dt-bindings/clock/am3.h new file mode 100644 index 0000000000..86a8806e21 --- /dev/null +++ b/include/dt-bindings/clock/am3.h @@ -0,0 +1,227 @@ +/* + * Copyright 2017 Texas Instruments, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __DT_BINDINGS_CLK_AM3_H +#define __DT_BINDINGS_CLK_AM3_H + +#define AM3_CLKCTRL_OFFSET 0x0 +#define AM3_CLKCTRL_INDEX(offset) ((offset) - AM3_CLKCTRL_OFFSET) + +/* XXX: Compatibility part begin, remove this once compatibility support is no longer needed */ + +/* l4_per clocks */ +#define AM3_L4_PER_CLKCTRL_OFFSET 0x14 +#define AM3_L4_PER_CLKCTRL_INDEX(offset) ((offset) - AM3_L4_PER_CLKCTRL_OFFSET) +#define AM3_CPGMAC0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x14) +#define AM3_LCDC_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x18) +#define AM3_USB_OTG_HS_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x1c) +#define AM3_TPTC0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x24) +#define AM3_EMIF_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x28) +#define AM3_OCMCRAM_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x2c) +#define AM3_GPMC_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x30) +#define AM3_MCASP0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x34) +#define AM3_UART6_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x38) +#define AM3_MMC1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x3c) +#define AM3_ELM_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x40) +#define AM3_I2C3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x44) +#define AM3_I2C2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x48) +#define AM3_SPI0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x4c) +#define AM3_SPI1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x50) +#define AM3_L4_LS_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x60) +#define AM3_MCASP1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x68) +#define AM3_UART2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x6c) +#define AM3_UART3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x70) +#define AM3_UART4_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x74) +#define AM3_UART5_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x78) +#define AM3_TIMER7_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x7c) +#define AM3_TIMER2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x80) +#define AM3_TIMER3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x84) +#define AM3_TIMER4_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x88) +#define AM3_RNG_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x90) +#define AM3_AES_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x94) +#define AM3_SHAM_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xa0) +#define AM3_GPIO2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xac) +#define AM3_GPIO3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xb0) +#define AM3_GPIO4_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xb4) +#define AM3_TPCC_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xbc) +#define AM3_D_CAN0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xc0) +#define AM3_D_CAN1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xc4) +#define AM3_EPWMSS1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xcc) +#define AM3_EPWMSS0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xd4) +#define AM3_EPWMSS2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xd8) +#define AM3_L3_INSTR_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xdc) +#define AM3_L3_MAIN_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xe0) +#define AM3_PRUSS_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xe8) +#define AM3_TIMER5_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xec) +#define AM3_TIMER6_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xf0) +#define AM3_MMC2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xf4) +#define AM3_MMC3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xf8) +#define AM3_TPTC1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xfc) +#define AM3_TPTC2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x100) +#define AM3_SPINLOCK_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x10c) +#define AM3_MAILBOX_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x110) +#define AM3_L4_HS_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x120) +#define AM3_OCPWP_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x130) +#define AM3_CLKDIV32K_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x14c) + +/* l4_wkup clocks */ +#define AM3_L4_WKUP_CLKCTRL_OFFSET 0x4 +#define AM3_L4_WKUP_CLKCTRL_INDEX(offset) ((offset) - AM3_L4_WKUP_CLKCTRL_OFFSET) +#define AM3_CONTROL_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0x4) +#define AM3_GPIO1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0x8) +#define AM3_L4_WKUP_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xc) +#define AM3_DEBUGSS_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0x14) +#define AM3_WKUP_M3_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xb0) +#define AM3_UART1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xb4) +#define AM3_I2C1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xb8) +#define AM3_ADC_TSC_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xbc) +#define AM3_SMARTREFLEX0_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xc0) +#define AM3_TIMER1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xc4) +#define AM3_SMARTREFLEX1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xc8) +#define AM3_WD_TIMER2_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xd4) + +/* mpu clocks */ +#define AM3_MPU_CLKCTRL_OFFSET 0x4 +#define AM3_MPU_CLKCTRL_INDEX(offset) ((offset) - AM3_MPU_CLKCTRL_OFFSET) +#define AM3_MPU_CLKCTRL AM3_MPU_CLKCTRL_INDEX(0x4) + +/* l4_rtc clocks */ +#define AM3_RTC_CLKCTRL AM3_CLKCTRL_INDEX(0x0) + +/* gfx_l3 clocks */ +#define AM3_GFX_L3_CLKCTRL_OFFSET 0x4 +#define AM3_GFX_L3_CLKCTRL_INDEX(offset) ((offset) - AM3_GFX_L3_CLKCTRL_OFFSET) +#define AM3_GFX_CLKCTRL AM3_GFX_L3_CLKCTRL_INDEX(0x4) + +/* l4_cefuse clocks */ +#define AM3_L4_CEFUSE_CLKCTRL_OFFSET 0x20 +#define AM3_L4_CEFUSE_CLKCTRL_INDEX(offset) ((offset) - AM3_L4_CEFUSE_CLKCTRL_OFFSET) +#define AM3_CEFUSE_CLKCTRL AM3_L4_CEFUSE_CLKCTRL_INDEX(0x20) + +/* XXX: Compatibility part end */ + +/* l4ls clocks */ +#define AM3_L4LS_CLKCTRL_OFFSET 0x38 +#define AM3_L4LS_CLKCTRL_INDEX(offset) ((offset) - AM3_L4LS_CLKCTRL_OFFSET) +#define AM3_L4LS_UART6_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x38) +#define AM3_L4LS_MMC1_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x3c) +#define AM3_L4LS_ELM_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x40) +#define AM3_L4LS_I2C3_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x44) +#define AM3_L4LS_I2C2_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x48) +#define AM3_L4LS_SPI0_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x4c) +#define AM3_L4LS_SPI1_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x50) +#define AM3_L4LS_L4_LS_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x60) +#define AM3_L4LS_UART2_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x6c) +#define AM3_L4LS_UART3_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x70) +#define AM3_L4LS_UART4_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x74) +#define AM3_L4LS_UART5_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x78) +#define AM3_L4LS_TIMER7_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x7c) +#define AM3_L4LS_TIMER2_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x80) +#define AM3_L4LS_TIMER3_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x84) +#define AM3_L4LS_TIMER4_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x88) +#define AM3_L4LS_RNG_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x90) +#define AM3_L4LS_GPIO2_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xac) +#define AM3_L4LS_GPIO3_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xb0) +#define AM3_L4LS_GPIO4_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xb4) +#define AM3_L4LS_D_CAN0_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xc0) +#define AM3_L4LS_D_CAN1_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xc4) +#define AM3_L4LS_EPWMSS1_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xcc) +#define AM3_L4LS_EPWMSS0_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xd4) +#define AM3_L4LS_EPWMSS2_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xd8) +#define AM3_L4LS_TIMER5_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xec) +#define AM3_L4LS_TIMER6_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xf0) +#define AM3_L4LS_MMC2_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0xf4) +#define AM3_L4LS_SPINLOCK_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x10c) +#define AM3_L4LS_MAILBOX_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x110) +#define AM3_L4LS_OCPWP_CLKCTRL AM3_L4LS_CLKCTRL_INDEX(0x130) + +/* l3s clocks */ +#define AM3_L3S_CLKCTRL_OFFSET 0x1c +#define AM3_L3S_CLKCTRL_INDEX(offset) ((offset) - AM3_L3S_CLKCTRL_OFFSET) +#define AM3_L3S_USB_OTG_HS_CLKCTRL AM3_L3S_CLKCTRL_INDEX(0x1c) +#define AM3_L3S_GPMC_CLKCTRL AM3_L3S_CLKCTRL_INDEX(0x30) +#define AM3_L3S_MCASP0_CLKCTRL AM3_L3S_CLKCTRL_INDEX(0x34) +#define AM3_L3S_MCASP1_CLKCTRL AM3_L3S_CLKCTRL_INDEX(0x68) +#define AM3_L3S_MMC3_CLKCTRL AM3_L3S_CLKCTRL_INDEX(0xf8) + +/* l3 clocks */ +#define AM3_L3_CLKCTRL_OFFSET 0x24 +#define AM3_L3_CLKCTRL_INDEX(offset) ((offset) - AM3_L3_CLKCTRL_OFFSET) +#define AM3_L3_TPTC0_CLKCTRL AM3_L3_CLKCTRL_INDEX(0x24) +#define AM3_L3_EMIF_CLKCTRL AM3_L3_CLKCTRL_INDEX(0x28) +#define AM3_L3_OCMCRAM_CLKCTRL AM3_L3_CLKCTRL_INDEX(0x2c) +#define AM3_L3_AES_CLKCTRL AM3_L3_CLKCTRL_INDEX(0x94) +#define AM3_L3_SHAM_CLKCTRL AM3_L3_CLKCTRL_INDEX(0xa0) +#define AM3_L3_TPCC_CLKCTRL AM3_L3_CLKCTRL_INDEX(0xbc) +#define AM3_L3_L3_INSTR_CLKCTRL AM3_L3_CLKCTRL_INDEX(0xdc) +#define AM3_L3_L3_MAIN_CLKCTRL AM3_L3_CLKCTRL_INDEX(0xe0) +#define AM3_L3_TPTC1_CLKCTRL AM3_L3_CLKCTRL_INDEX(0xfc) +#define AM3_L3_TPTC2_CLKCTRL AM3_L3_CLKCTRL_INDEX(0x100) + +/* l4hs clocks */ +#define AM3_L4HS_CLKCTRL_OFFSET 0x120 +#define AM3_L4HS_CLKCTRL_INDEX(offset) ((offset) - AM3_L4HS_CLKCTRL_OFFSET) +#define AM3_L4HS_L4_HS_CLKCTRL AM3_L4HS_CLKCTRL_INDEX(0x120) + +/* pruss_ocp clocks */ +#define AM3_PRUSS_OCP_CLKCTRL_OFFSET 0xe8 +#define AM3_PRUSS_OCP_CLKCTRL_INDEX(offset) ((offset) - AM3_PRUSS_OCP_CLKCTRL_OFFSET) +#define AM3_PRUSS_OCP_PRUSS_CLKCTRL AM3_PRUSS_OCP_CLKCTRL_INDEX(0xe8) + +/* cpsw_125mhz clocks */ +#define AM3_CPSW_125MHZ_CPGMAC0_CLKCTRL AM3_CLKCTRL_INDEX(0x14) + +/* lcdc clocks */ +#define AM3_LCDC_CLKCTRL_OFFSET 0x18 +#define AM3_LCDC_CLKCTRL_INDEX(offset) ((offset) - AM3_LCDC_CLKCTRL_OFFSET) +#define AM3_LCDC_LCDC_CLKCTRL AM3_LCDC_CLKCTRL_INDEX(0x18) + +/* clk_24mhz clocks */ +#define AM3_CLK_24MHZ_CLKCTRL_OFFSET 0x14c +#define AM3_CLK_24MHZ_CLKCTRL_INDEX(offset) ((offset) - AM3_CLK_24MHZ_CLKCTRL_OFFSET) +#define AM3_CLK_24MHZ_CLKDIV32K_CLKCTRL AM3_CLK_24MHZ_CLKCTRL_INDEX(0x14c) + +/* l4_wkup clocks */ +#define AM3_L4_WKUP_CONTROL_CLKCTRL AM3_CLKCTRL_INDEX(0x4) +#define AM3_L4_WKUP_GPIO1_CLKCTRL AM3_CLKCTRL_INDEX(0x8) +#define AM3_L4_WKUP_L4_WKUP_CLKCTRL AM3_CLKCTRL_INDEX(0xc) +#define AM3_L4_WKUP_UART1_CLKCTRL AM3_CLKCTRL_INDEX(0xb4) +#define AM3_L4_WKUP_I2C1_CLKCTRL AM3_CLKCTRL_INDEX(0xb8) +#define AM3_L4_WKUP_ADC_TSC_CLKCTRL AM3_CLKCTRL_INDEX(0xbc) +#define AM3_L4_WKUP_SMARTREFLEX0_CLKCTRL AM3_CLKCTRL_INDEX(0xc0) +#define AM3_L4_WKUP_TIMER1_CLKCTRL AM3_CLKCTRL_INDEX(0xc4) +#define AM3_L4_WKUP_SMARTREFLEX1_CLKCTRL AM3_CLKCTRL_INDEX(0xc8) +#define AM3_L4_WKUP_WD_TIMER2_CLKCTRL AM3_CLKCTRL_INDEX(0xd4) + +/* l3_aon clocks */ +#define AM3_L3_AON_CLKCTRL_OFFSET 0x14 +#define AM3_L3_AON_CLKCTRL_INDEX(offset) ((offset) - AM3_L3_AON_CLKCTRL_OFFSET) +#define AM3_L3_AON_DEBUGSS_CLKCTRL AM3_L3_AON_CLKCTRL_INDEX(0x14) + +/* l4_wkup_aon clocks */ +#define AM3_L4_WKUP_AON_CLKCTRL_OFFSET 0xb0 +#define AM3_L4_WKUP_AON_CLKCTRL_INDEX(offset) ((offset) - AM3_L4_WKUP_AON_CLKCTRL_OFFSET) +#define AM3_L4_WKUP_AON_WKUP_M3_CLKCTRL AM3_L4_WKUP_AON_CLKCTRL_INDEX(0xb0) + +/* mpu clocks */ +#define AM3_MPU_MPU_CLKCTRL AM3_CLKCTRL_INDEX(0x4) + +/* l4_rtc clocks */ +#define AM3_L4_RTC_RTC_CLKCTRL AM3_CLKCTRL_INDEX(0x0) + +/* gfx_l3 clocks */ +#define AM3_GFX_L3_GFX_CLKCTRL AM3_CLKCTRL_INDEX(0x4) + +/* l4_cefuse clocks */ +#define AM3_L4_CEFUSE_CEFUSE_CLKCTRL AM3_CLKCTRL_INDEX(0x20) + +#endif diff --git a/include/hwspinlock.h b/include/hwspinlock.h new file mode 100644 index 0000000000..99389c13c2 --- /dev/null +++ b/include/hwspinlock.h @@ -0,0 +1,140 @@ +/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */ +/* + * Copyright (C) 2018, STMicroelectronics - All Rights Reserved + */ + +#ifndef _HWSPINLOCK_H_ +#define _HWSPINLOCK_H_ + +/** + * Implement a hwspinlock uclass. + * Hardware spinlocks are used to perform hardware protection of + * critical sections and synchronisation between multiprocessors. + */ + +struct udevice; + +/** + * struct hwspinlock - A handle to (allowing control of) a single hardware + * spinlock. + * + * @dev: The device which implements the hardware spinlock. + * @id: The hardware spinlock ID within the provider. + */ +struct hwspinlock { + struct udevice *dev; + unsigned long id; +}; + +#if CONFIG_IS_ENABLED(DM_HWSPINLOCK) + +/** + * hwspinlock_get_by_index - Get a hardware spinlock by integer index + * + * This looks up and request a hardware spinlock. The index is relative to the + * client device; each device is assumed to have n hardware spinlock associated + * with it somehow, and this function finds and requests one of them. + * + * @dev: The client device. + * @index: The index of the hardware spinlock to request, within the + * client's list of hardware spinlock. + * @hws: A pointer to a hardware spinlock struct to initialize. + * @return 0 if OK, or a negative error code. + */ +int hwspinlock_get_by_index(struct udevice *dev, + int index, struct hwspinlock *hws); + +/** + * Lock the hardware spinlock + * + * @hws: A hardware spinlock struct that previously requested by + * hwspinlock_get_by_index + * @timeout: Timeout value in msecs + * @return: 0 if OK, -ETIMEDOUT if timeout, -ve on other errors + */ +int hwspinlock_lock_timeout(struct hwspinlock *hws, unsigned int timeout); + +/** + * Unlock the hardware spinlock + * + * @hws: A hardware spinlock struct that previously requested by + * hwspinlock_get_by_index + * @return: 0 if OK, -ve on error + */ +int hwspinlock_unlock(struct hwspinlock *hws); + +#else + +static inline int hwspinlock_get_by_index(struct udevice *dev, + int index, + struct hwspinlock *hws) +{ + return -ENOSYS; +} + +static inline int hwspinlock_lock_timeout(struct hwspinlock *hws, + int timeout) +{ + return -ENOSYS; +} + +static inline int hwspinlock_unlock(struct hwspinlock *hws) +{ + return -ENOSYS; +} + +#endif /* CONFIG_DM_HWSPINLOCK */ + +struct ofnode_phandle_args; + +/** + * struct hwspinlock_ops - Driver model hwspinlock operations + * + * The uclass interface is implemented by all hwspinlock devices which use + * driver model. + */ +struct hwspinlock_ops { + /** + * of_xlate - Translate a client's device-tree (OF) hardware specifier. + * + * The hardware core calls this function as the first step in + * implementing a client's hwspinlock_get_by_*() call. + * + * @hws: The hardware spinlock struct to hold the translation + * result. + * @args: The hardware spinlock specifier values from device tree. + * @return 0 if OK, or a negative error code. + */ + int (*of_xlate)(struct hwspinlock *hws, + struct ofnode_phandle_args *args); + + /** + * Lock the hardware spinlock + * + * @dev: hwspinlock Device + * @index: index of the lock to be used + * @return 0 if OK, -ve on error + */ + int (*lock)(struct udevice *dev, int index); + + /** + * Unlock the hardware spinlock + * + * @dev: hwspinlock Device + * @index: index of the lock to be unlocked + * @return 0 if OK, -ve on error + */ + int (*unlock)(struct udevice *dev, int index); + + /** + * Relax - optional + * Platform-specific relax method, called by hwspinlock core + * while spinning on a lock, between two successive call to + * lock + * + * @dev: hwspinlock Device + */ + void (*relax)(struct udevice *dev); +}; + +#endif /* _HWSPINLOCK_H_ */ diff --git a/include/spl.h b/include/spl.h index ee92832f0a..ff4e6277d3 100644 --- a/include/spl.h +++ b/include/spl.h @@ -52,9 +52,9 @@ static inline bool u_boot_first_phase(void) /* A string name for SPL or TPL */ #ifdef CONFIG_SPL_BUILD # ifdef CONFIG_TPL_BUILD -# define SPL_TPL_NAME "tpl" +# define SPL_TPL_NAME "TPL" # else -# define SPL_TPL_NAME "spl" +# define SPL_TPL_NAME "SPL" # endif # define SPL_TPL_PROMPT SPL_TPL_NAME ": " #else |