summaryrefslogtreecommitdiff
path: root/linux/arch/alpha/include/asm/barrier.h
diff options
context:
space:
mode:
authorMichael J. Chudobiak <mjc@avtechpulse.com>2016-04-25 10:00:44 -0400
committerMichael J. Chudobiak <mjc@avtechpulse.com>2016-04-25 10:00:44 -0400
commita1df417e74aa6dae7352dc8cbb0ad471af5b7c69 (patch)
treec34b2311e37ea31db153c90cb8f4570374d05e78 /linux/arch/alpha/include/asm/barrier.h
initial Olimex linux tree from Daniel, originally Feb 3, 2016
Diffstat (limited to 'linux/arch/alpha/include/asm/barrier.h')
-rw-r--r--linux/arch/alpha/include/asm/barrier.h71
1 files changed, 71 insertions, 0 deletions
diff --git a/linux/arch/alpha/include/asm/barrier.h b/linux/arch/alpha/include/asm/barrier.h
new file mode 100644
index 00000000..77516c87
--- /dev/null
+++ b/linux/arch/alpha/include/asm/barrier.h
@@ -0,0 +1,71 @@
+#ifndef __BARRIER_H
+#define __BARRIER_H
+
+#include <asm/compiler.h>
+
+#define mb() __asm__ __volatile__("mb": : :"memory")
+#define rmb() __asm__ __volatile__("mb": : :"memory")
+#define wmb() __asm__ __volatile__("wmb": : :"memory")
+
+/**
+ * read_barrier_depends - Flush all pending reads that subsequents reads
+ * depend on.
+ *
+ * No data-dependent reads from memory-like regions are ever reordered
+ * over this barrier. All reads preceding this primitive are guaranteed
+ * to access memory (but not necessarily other CPUs' caches) before any
+ * reads following this primitive that depend on the data return by
+ * any of the preceding reads. This primitive is much lighter weight than
+ * rmb() on most CPUs, and is never heavier weight than is
+ * rmb().
+ *
+ * These ordering constraints are respected by both the local CPU
+ * and the compiler.
+ *
+ * Ordering is not guaranteed by anything other than these primitives,
+ * not even by data dependencies. See the documentation for
+ * memory_barrier() for examples and URLs to more information.
+ *
+ * For example, the following code would force ordering (the initial
+ * value of "a" is zero, "b" is one, and "p" is "&a"):
+ *
+ * <programlisting>
+ * CPU 0 CPU 1
+ *
+ * b = 2;
+ * memory_barrier();
+ * p = &b; q = p;
+ * read_barrier_depends();
+ * d = *q;
+ * </programlisting>
+ *
+ * because the read of "*q" depends on the read of "p" and these
+ * two reads are separated by a read_barrier_depends(). However,
+ * the following code, with the same initial values for "a" and "b":
+ *
+ * <programlisting>
+ * CPU 0 CPU 1
+ *
+ * a = 2;
+ * memory_barrier();
+ * b = 3; y = b;
+ * read_barrier_depends();
+ * x = a;
+ * </programlisting>
+ *
+ * does not enforce ordering, since there is no data dependency between
+ * the read of "a" and the read of "b". Therefore, on some CPUs, such
+ * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
+ * in cases like this where there are no data dependencies.
+ */
+#define read_barrier_depends() __asm__ __volatile__("mb": : :"memory")
+
+#ifdef CONFIG_SMP
+#define __ASM_SMP_MB "\tmb\n"
+#else
+#define __ASM_SMP_MB
+#endif
+
+#include <asm-generic/barrier.h>
+
+#endif /* __BARRIER_H */