diff options
Diffstat (limited to 'arch/riscv/include/asm')
-rw-r--r-- | arch/riscv/include/asm/barrier.h | 67 | ||||
-rw-r--r-- | arch/riscv/include/asm/cache.h | 3 | ||||
-rw-r--r-- | arch/riscv/include/asm/io.h | 48 | ||||
-rw-r--r-- | arch/riscv/include/asm/posix_types.h | 6 | ||||
-rw-r--r-- | arch/riscv/include/asm/types.h | 4 |
5 files changed, 87 insertions, 41 deletions
diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h new file mode 100644 index 0000000000..a3f60a8458 --- /dev/null +++ b/arch/riscv/include/asm/barrier.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2012 ARM Ltd. + * Copyright (C) 2013 Regents of the University of California + * Copyright (C) 2017 SiFive + * + * Taken from Linux arch/riscv/include/asm/barrier.h, which is based on + * arch/arm/include/asm/barrier.h + */ + +#ifndef _ASM_RISCV_BARRIER_H +#define _ASM_RISCV_BARRIER_H + +#ifndef __ASSEMBLY__ + +#define nop() __asm__ __volatile__ ("nop") + +#define RISCV_FENCE(p, s) \ + __asm__ __volatile__ ("fence " #p "," #s : : : "memory") + +/* These barriers need to enforce ordering on both devices or memory. */ +#define mb() RISCV_FENCE(iorw,iorw) +#define rmb() RISCV_FENCE(ir,ir) +#define wmb() RISCV_FENCE(ow,ow) + +/* These barriers do not need to enforce ordering on devices, just memory. */ +#define __smp_mb() RISCV_FENCE(rw,rw) +#define __smp_rmb() RISCV_FENCE(r,r) +#define __smp_wmb() RISCV_FENCE(w,w) + +#define __smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + RISCV_FENCE(rw,w); \ + WRITE_ONCE(*p, v); \ +} while (0) + +#define __smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = READ_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + RISCV_FENCE(r,rw); \ + ___p1; \ +}) + +/* + * This is a very specific barrier: it's currently only used in two places in + * the kernel, both in the scheduler. See include/linux/spinlock.h for the two + * orderings it guarantees, but the "critical section is RCsc" guarantee + * mandates a barrier on RISC-V. The sequence looks like: + * + * lr.aq lock + * sc lock <= LOCKED + * smp_mb__after_spinlock() + * // critical section + * lr lock + * sc.rl lock <= UNLOCKED + * + * The AQ/RL pair provides a RCpc critical section, but there's not really any + * way we can take advantage of that here because the ordering is only enforced + * on that one lock. Thus, we're just doing a full fence. + */ +#define smp_mb__after_spinlock() RISCV_FENCE(rw,rw) + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_RISCV_BARRIER_H */ diff --git a/arch/riscv/include/asm/cache.h b/arch/riscv/include/asm/cache.h index ca83dd67c2..ec8fe201d3 100644 --- a/arch/riscv/include/asm/cache.h +++ b/arch/riscv/include/asm/cache.h @@ -7,6 +7,9 @@ #ifndef _ASM_RISCV_CACHE_H #define _ASM_RISCV_CACHE_H +/* cache */ +void cache_flush(void); + /* * The current upper bound for RISCV L1 data cache line sizes is 32 bytes. * We use that value for aligning DMA buffers unless the board config has diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h index f4a76d8720..acf5a96449 100644 --- a/arch/riscv/include/asm/io.h +++ b/arch/riscv/include/asm/io.h @@ -10,22 +10,13 @@ #ifdef __KERNEL__ #include <linux/types.h> +#include <asm/barrier.h> #include <asm/byteorder.h> static inline void sync(void) { } -/* - * Given a physical address and a length, return a virtual address - * that can be used to access the memory range with the caching - * properties specified by "flags". - */ -#define MAP_NOCACHE (0) -#define MAP_WRCOMBINE (0) -#define MAP_WRBACK (0) -#define MAP_WRTHROUGH (0) - #ifdef CONFIG_ARCH_MAP_SYSMEM static inline void *map_sysmem(phys_addr_t paddr, unsigned long len) { @@ -48,24 +39,6 @@ static inline phys_addr_t map_to_sysmem(const void *ptr) } #endif -static inline void * -map_physmem(phys_addr_t paddr, unsigned long len, unsigned long flags) -{ - return (void *)paddr; -} - -/* - * Take down a mapping set up by map_physmem(). - */ -static inline void unmap_physmem(void *vaddr, unsigned long flags) -{ -} - -static inline phys_addr_t virt_to_phys(void *vaddr) -{ - return (phys_addr_t)(vaddr); -} - /* * Generic virtual read/write. Note that we don't support half-word * read/writes. We define __arch_*[bl] here, and leave __arch_*w @@ -74,12 +47,12 @@ static inline phys_addr_t virt_to_phys(void *vaddr) #define __arch_getb(a) (*(unsigned char *)(a)) #define __arch_getw(a) (*(unsigned short *)(a)) #define __arch_getl(a) (*(unsigned int *)(a)) -#define __arch_getq(a) (*(unsigned long *)(a)) +#define __arch_getq(a) (*(unsigned long long *)(a)) #define __arch_putb(v, a) (*(unsigned char *)(a) = (v)) #define __arch_putw(v, a) (*(unsigned short *)(a) = (v)) #define __arch_putl(v, a) (*(unsigned int *)(a) = (v)) -#define __arch_putq(v, a) (*(unsigned long *)(a) = (v)) +#define __arch_putq(v, a) (*(unsigned long long *)(a) = (v)) #define __raw_writeb(v, a) __arch_putb(v, a) #define __raw_writew(v, a) __arch_putw(v, a) @@ -91,13 +64,9 @@ static inline phys_addr_t virt_to_phys(void *vaddr) #define __raw_readl(a) __arch_getl(a) #define __raw_readq(a) __arch_getq(a) -/* - * TODO: The kernel offers some more advanced versions of barriers, it might - * have some advantages to use them instead of the simple one here. - */ -#define dmb() __asm__ __volatile__ ("" : : : "memory") -#define __iormb() dmb() -#define __iowmb() dmb() +#define dmb() mb() +#define __iormb() rmb() +#define __iowmb() wmb() static inline void writeb(u8 val, volatile void __iomem *addr) { @@ -152,7 +121,7 @@ static inline u32 readl(const volatile void __iomem *addr) static inline u64 readq(const volatile void __iomem *addr) { - u32 val; + u64 val; val = __arch_getq(addr); __iormb(); @@ -487,4 +456,7 @@ out: #endif /* __mem_isa */ #endif /* __KERNEL__ */ + +#include <asm-generic/io.h> + #endif /* __ASM_RISCV_IO_H */ diff --git a/arch/riscv/include/asm/posix_types.h b/arch/riscv/include/asm/posix_types.h index 7438dbeb03..0fc052082a 100644 --- a/arch/riscv/include/asm/posix_types.h +++ b/arch/riscv/include/asm/posix_types.h @@ -37,10 +37,10 @@ typedef unsigned short __kernel_gid_t; #ifdef __GNUC__ typedef __SIZE_TYPE__ __kernel_size_t; #else -typedef unsigned int __kernel_size_t; +typedef unsigned long __kernel_size_t; #endif -typedef int __kernel_ssize_t; -typedef int __kernel_ptrdiff_t; +typedef long __kernel_ssize_t; +typedef long __kernel_ptrdiff_t; typedef long __kernel_time_t; typedef long __kernel_suseconds_t; typedef long __kernel_clock_t; diff --git a/arch/riscv/include/asm/types.h b/arch/riscv/include/asm/types.h index bd8627196d..403cf9a48f 100644 --- a/arch/riscv/include/asm/types.h +++ b/arch/riscv/include/asm/types.h @@ -21,7 +21,11 @@ typedef unsigned short umode_t; */ #ifdef __KERNEL__ +#ifdef CONFIG_ARCH_RV64I +#define BITS_PER_LONG 64 +#else #define BITS_PER_LONG 32 +#endif #include <stddef.h> |