summaryrefslogtreecommitdiff
path: root/arch/x86/include/asm/mp.h
blob: eb49e690f2288f0f2878fd3210d676e5a1b43ac0 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
/* SPDX-License-Identifier: GPL-2.0 */
/*
 * Copyright (c) 2015 Google, Inc
 *
 * Taken from coreboot file of the same name
 */

#ifndef _X86_MP_H_
#define _X86_MP_H_

#include <asm/atomic.h>
#include <asm/cache.h>

enum {
	/* Indicates that the function should run on all CPUs */
	MP_SELECT_ALL	= -1,

	/* Run on boot CPUs */
	MP_SELECT_BSP	= -2,

	/* Run on non-boot CPUs */
	MP_SELECT_APS	= -3,
};

typedef int (*mp_callback_t)(struct udevice *cpu, void *arg);

/*
 * A mp_flight_record details a sequence of calls for the APs to perform
 * along with the BSP to coordinate sequencing. Each flight record either
 * provides a barrier for each AP before calling the callback or the APs
 * are allowed to perform the callback without waiting. Regardless, each
 * record has the cpus_entered field incremented for each record. When
 * the BSP observes that the cpus_entered matches the number of APs
 * the bsp_call is called with bsp_arg and upon returning releases the
 * barrier allowing the APs to make further progress.
 *
 * Note that ap_call() and bsp_call() can be NULL. In the NULL case the
 * callback will just not be called.
 */
struct mp_flight_record {
	atomic_t barrier;
	atomic_t cpus_entered;
	mp_callback_t ap_call;
	void *ap_arg;
	mp_callback_t bsp_call;
	void *bsp_arg;
} __attribute__((aligned(ARCH_DMA_MINALIGN)));

#define MP_FLIGHT_RECORD(barrier_, ap_func_, ap_arg_, bsp_func_, bsp_arg_) \
	{							\
		.barrier = ATOMIC_INIT(barrier_),		\
		.cpus_entered = ATOMIC_INIT(0),			\
		.ap_call = ap_func_,				\
		.ap_arg = ap_arg_,				\
		.bsp_call = bsp_func_,				\
		.bsp_arg = bsp_arg_,				\
	}

#define MP_FR_BLOCK_APS(ap_func, ap_arg, bsp_func, bsp_arg) \
	MP_FLIGHT_RECORD(0, ap_func, ap_arg, bsp_func, bsp_arg)

#define MP_FR_NOBLOCK_APS(ap_func, ap_arg, bsp_func, bsp_arg) \
	MP_FLIGHT_RECORD(1, ap_func, ap_arg, bsp_func, bsp_arg)

/*
 * mp_init() will set up the SIPI vector and bring up the APs according to
 * mp_params. Each flight record will be executed according to the plan. Note
 * that the MP infrastructure uses SMM default area without saving it. It's
 * up to the chipset or mainboard to either e820 reserve this area or save this
 * region prior to calling mp_init() and restoring it after mp_init returns.
 *
 * At the time mp_init() is called the MTRR MSRs are mirrored into APs then
 * caching is enabled before running the flight plan.
 *
 * The MP init has the following properties:
 * 1. APs are brought up in parallel.
 * 2. The ordering of cpu number and APIC ids is not deterministic.
 *    Therefore, one cannot rely on this property or the order of devices in
 *    the device tree unless the chipset or mainboard know the APIC ids
 *    a priori.
 *
 * mp_init() returns < 0 on error, 0 on success.
 */
int mp_init(void);

/* Set up additional CPUs */
int x86_mp_init(void);

/**
 * mp_run_func() - Function to call on the AP
 *
 * @arg: Argument to pass
 */
typedef void (*mp_run_func)(void *arg);

#if defined(CONFIG_SMP) && !CONFIG_IS_ENABLED(X86_64)
/**
 * mp_run_on_cpus() - Run a function on one or all CPUs
 *
 * This does not return until all CPUs have completed the work
 *
 * Running on anything other than the boot CPU is only supported if
 * CONFIG_SMP_AP_WORK is enabled
 *
 * @cpu_select: CPU to run on (its dev->req_seq value), or MP_SELECT_ALL for
 *	all, or MP_SELECT_BSP for BSP
 * @func: Function to run
 * @arg: Argument to pass to the function
 * @return 0 on success, -ve on error
 */
int mp_run_on_cpus(int cpu_select, mp_run_func func, void *arg);
#else
static inline int mp_run_on_cpus(int cpu_select, mp_run_func func, void *arg)
{
	/* There is only one CPU, so just call the function here */
	func(arg);

	return 0;
}
#endif

#endif /* _X86_MP_H_ */