1 /*
2  * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <arch.h>
8 #include <arch_helpers.h>
9 #include <assert.h>
10 #include <bl_common.h>
11 #include <context.h>
12 #include <context_mgmt.h>
13 #include <errata_report.h>
14 #include <platform.h>
15 #include <stddef.h>
16 #include "psci_private.h"
17 
18 /*******************************************************************************
19  * Per cpu non-secure contexts used to program the architectural state prior
20  * return to the normal world.
21  * TODO: Use the memory allocator to set aside memory for the contexts instead
22  * of relying on platform defined constants.
23  ******************************************************************************/
24 static cpu_context_t psci_ns_context[PLATFORM_CORE_COUNT];
25 
26 /******************************************************************************
27  * Define the psci capability variable.
28  *****************************************************************************/
29 unsigned int psci_caps;
30 
31 /*******************************************************************************
32  * Function which initializes the 'psci_non_cpu_pd_nodes' or the
33  * 'psci_cpu_pd_nodes' corresponding to the power level.
34  ******************************************************************************/
psci_init_pwr_domain_node(unsigned int node_idx,unsigned int parent_idx,unsigned int level)35 static void psci_init_pwr_domain_node(unsigned int node_idx,
36 					unsigned int parent_idx,
37 					unsigned int level)
38 {
39 	if (level > PSCI_CPU_PWR_LVL) {
40 		psci_non_cpu_pd_nodes[node_idx].level = level;
41 		psci_lock_init(psci_non_cpu_pd_nodes, node_idx);
42 		psci_non_cpu_pd_nodes[node_idx].parent_node = parent_idx;
43 		psci_non_cpu_pd_nodes[node_idx].local_state =
44 							 PLAT_MAX_OFF_STATE;
45 	} else {
46 		psci_cpu_data_t *svc_cpu_data;
47 
48 		psci_cpu_pd_nodes[node_idx].parent_node = parent_idx;
49 
50 		/* Initialize with an invalid mpidr */
51 		psci_cpu_pd_nodes[node_idx].mpidr = PSCI_INVALID_MPIDR;
52 
53 		svc_cpu_data =
54 			&(_cpu_data_by_index(node_idx)->psci_svc_cpu_data);
55 
56 		/* Set the Affinity Info for the cores as OFF */
57 		svc_cpu_data->aff_info_state = AFF_STATE_OFF;
58 
59 		/* Invalidate the suspend level for the cpu */
60 		svc_cpu_data->target_pwrlvl = PSCI_INVALID_PWR_LVL;
61 
62 		/* Set the power state to OFF state */
63 		svc_cpu_data->local_state = PLAT_MAX_OFF_STATE;
64 
65 		psci_flush_dcache_range((uintptr_t)svc_cpu_data,
66 						 sizeof(*svc_cpu_data));
67 
68 		cm_set_context_by_index(node_idx,
69 					(void *) &psci_ns_context[node_idx],
70 					NON_SECURE);
71 	}
72 }
73 
74 /*******************************************************************************
75  * This functions updates cpu_start_idx and ncpus field for each of the node in
76  * psci_non_cpu_pd_nodes[]. It does so by comparing the parent nodes of each of
77  * the CPUs and check whether they match with the parent of the previous
78  * CPU. The basic assumption for this work is that children of the same parent
79  * are allocated adjacent indices. The platform should ensure this though proper
80  * mapping of the CPUs to indices via plat_core_pos_by_mpidr() and
81  * plat_my_core_pos() APIs.
82  *******************************************************************************/
psci_update_pwrlvl_limits(void)83 static void psci_update_pwrlvl_limits(void)
84 {
85 	int j;
86 	unsigned int nodes_idx[PLAT_MAX_PWR_LVL] = {0};
87 	unsigned int temp_index[PLAT_MAX_PWR_LVL], cpu_idx;
88 
89 	for (cpu_idx = 0; cpu_idx < PLATFORM_CORE_COUNT; cpu_idx++) {
90 		psci_get_parent_pwr_domain_nodes(cpu_idx,
91 						 PLAT_MAX_PWR_LVL,
92 						 temp_index);
93 		for (j = PLAT_MAX_PWR_LVL - 1; j >= 0; j--) {
94 			if (temp_index[j] != nodes_idx[j]) {
95 				nodes_idx[j] = temp_index[j];
96 				psci_non_cpu_pd_nodes[nodes_idx[j]].cpu_start_idx
97 					= cpu_idx;
98 			}
99 			psci_non_cpu_pd_nodes[nodes_idx[j]].ncpus++;
100 		}
101 	}
102 }
103 
104 /*******************************************************************************
105  * Core routine to populate the power domain tree. The tree descriptor passed by
106  * the platform is populated breadth-first and the first entry in the map
107  * informs the number of root power domains. The parent nodes of the root nodes
108  * will point to an invalid entry(-1).
109  ******************************************************************************/
populate_power_domain_tree(const unsigned char * topology)110 static void populate_power_domain_tree(const unsigned char *topology)
111 {
112 	unsigned int i, j = 0, num_nodes_at_lvl = 1, num_nodes_at_next_lvl;
113 	unsigned int node_index = 0, parent_node_index = 0, num_children;
114 	int level = PLAT_MAX_PWR_LVL;
115 
116 	/*
117 	 * For each level the inputs are:
118 	 * - number of nodes at this level in plat_array i.e. num_nodes_at_level
119 	 *   This is the sum of values of nodes at the parent level.
120 	 * - Index of first entry at this level in the plat_array i.e.
121 	 *   parent_node_index.
122 	 * - Index of first free entry in psci_non_cpu_pd_nodes[] or
123 	 *   psci_cpu_pd_nodes[] i.e. node_index depending upon the level.
124 	 */
125 	while (level >= PSCI_CPU_PWR_LVL) {
126 		num_nodes_at_next_lvl = 0;
127 		/*
128 		 * For each entry (parent node) at this level in the plat_array:
129 		 * - Find the number of children
130 		 * - Allocate a node in a power domain array for each child
131 		 * - Set the parent of the child to the parent_node_index - 1
132 		 * - Increment parent_node_index to point to the next parent
133 		 * - Accumulate the number of children at next level.
134 		 */
135 		for (i = 0; i < num_nodes_at_lvl; i++) {
136 			assert(parent_node_index <=
137 					PSCI_NUM_NON_CPU_PWR_DOMAINS);
138 			num_children = topology[parent_node_index];
139 
140 			for (j = node_index;
141 				j < node_index + num_children; j++)
142 				psci_init_pwr_domain_node(j,
143 							  parent_node_index - 1,
144 							  level);
145 
146 			node_index = j;
147 			num_nodes_at_next_lvl += num_children;
148 			parent_node_index++;
149 		}
150 
151 		num_nodes_at_lvl = num_nodes_at_next_lvl;
152 		level--;
153 
154 		/* Reset the index for the cpu power domain array */
155 		if (level == PSCI_CPU_PWR_LVL)
156 			node_index = 0;
157 	}
158 
159 	/* Validate the sanity of array exported by the platform */
160 	assert(j == PLATFORM_CORE_COUNT);
161 }
162 
163 /*******************************************************************************
164  * This function does the architectural setup and takes the warm boot
165  * entry-point `mailbox_ep` as an argument. The function also initializes the
166  * power domain topology tree by querying the platform. The power domain nodes
167  * higher than the CPU are populated in the array psci_non_cpu_pd_nodes[] and
168  * the CPU power domains are populated in psci_cpu_pd_nodes[]. The platform
169  * exports its static topology map through the
170  * populate_power_domain_topology_tree() API. The algorithm populates the
171  * psci_non_cpu_pd_nodes and psci_cpu_pd_nodes iteratively by using this
172  * topology map.  On a platform that implements two clusters of 2 cpus each,
173  * and supporting 3 domain levels, the populated psci_non_cpu_pd_nodes would
174  * look like this:
175  *
176  * ---------------------------------------------------
177  * | system node | cluster 0 node  | cluster 1 node  |
178  * ---------------------------------------------------
179  *
180  * And populated psci_cpu_pd_nodes would look like this :
181  * <-    cpus cluster0   -><-   cpus cluster1   ->
182  * ------------------------------------------------
183  * |   CPU 0   |   CPU 1   |   CPU 2   |   CPU 3  |
184  * ------------------------------------------------
185  ******************************************************************************/
psci_setup(const psci_lib_args_t * lib_args)186 int psci_setup(const psci_lib_args_t *lib_args)
187 {
188 	const unsigned char *topology_tree;
189 
190 	assert(VERIFY_PSCI_LIB_ARGS_V1(lib_args));
191 
192 	/* Do the Architectural initialization */
193 	psci_arch_setup();
194 
195 	/* Query the topology map from the platform */
196 	topology_tree = plat_get_power_domain_tree_desc();
197 
198 	/* Populate the power domain arrays using the platform topology map */
199 	populate_power_domain_tree(topology_tree);
200 
201 	/* Update the CPU limits for each node in psci_non_cpu_pd_nodes */
202 	psci_update_pwrlvl_limits();
203 
204 	/* Populate the mpidr field of cpu node for this CPU */
205 	psci_cpu_pd_nodes[plat_my_core_pos()].mpidr =
206 		read_mpidr() & MPIDR_AFFINITY_MASK;
207 
208 	psci_init_req_local_pwr_states();
209 
210 	/*
211 	 * Set the requested and target state of this CPU and all the higher
212 	 * power domain levels for this CPU to run.
213 	 */
214 	psci_set_pwr_domains_to_run(PLAT_MAX_PWR_LVL);
215 
216 	plat_setup_psci_ops((uintptr_t)lib_args->mailbox_ep, &psci_plat_pm_ops);
217 	assert(psci_plat_pm_ops);
218 
219 	/*
220 	 * Flush `psci_plat_pm_ops` as it will be accessed by secondary CPUs
221 	 * during warm boot, possibly before data cache is enabled.
222 	 */
223 	psci_flush_dcache_range((uintptr_t)&psci_plat_pm_ops,
224 					sizeof(psci_plat_pm_ops));
225 
226 	/* Initialize the psci capability */
227 	psci_caps = PSCI_GENERIC_CAP;
228 
229 	if (psci_plat_pm_ops->pwr_domain_off)
230 		psci_caps |=  define_psci_cap(PSCI_CPU_OFF);
231 	if (psci_plat_pm_ops->pwr_domain_on &&
232 			psci_plat_pm_ops->pwr_domain_on_finish)
233 		psci_caps |=  define_psci_cap(PSCI_CPU_ON_AARCH64);
234 	if (psci_plat_pm_ops->pwr_domain_suspend &&
235 			psci_plat_pm_ops->pwr_domain_suspend_finish) {
236 		psci_caps |=  define_psci_cap(PSCI_CPU_SUSPEND_AARCH64);
237 		if (psci_plat_pm_ops->get_sys_suspend_power_state)
238 			psci_caps |=  define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64);
239 	}
240 	if (psci_plat_pm_ops->system_off)
241 		psci_caps |=  define_psci_cap(PSCI_SYSTEM_OFF);
242 	if (psci_plat_pm_ops->system_reset)
243 		psci_caps |=  define_psci_cap(PSCI_SYSTEM_RESET);
244 	if (psci_plat_pm_ops->get_node_hw_state)
245 		psci_caps |= define_psci_cap(PSCI_NODE_HW_STATE_AARCH64);
246 	if (psci_plat_pm_ops->read_mem_protect &&
247 			psci_plat_pm_ops->write_mem_protect)
248 		psci_caps |= define_psci_cap(PSCI_MEM_PROTECT);
249 	if (psci_plat_pm_ops->mem_protect_chk)
250 		psci_caps |= define_psci_cap(PSCI_MEM_CHK_RANGE_AARCH64);
251 	if (psci_plat_pm_ops->system_reset2)
252 		psci_caps |= define_psci_cap(PSCI_SYSTEM_RESET2_AARCH64);
253 
254 #if ENABLE_PSCI_STAT
255 	psci_caps |=  define_psci_cap(PSCI_STAT_RESIDENCY_AARCH64);
256 	psci_caps |=  define_psci_cap(PSCI_STAT_COUNT_AARCH64);
257 #endif
258 
259 	return 0;
260 }
261 
262 /*******************************************************************************
263  * This duplicates what the primary cpu did after a cold boot in BL1. The same
264  * needs to be done when a cpu is hotplugged in. This function could also over-
265  * ride any EL3 setup done by BL1 as this code resides in rw memory.
266  ******************************************************************************/
psci_arch_setup(void)267 void psci_arch_setup(void)
268 {
269 	/* Program the counter frequency */
270 	write_cntfrq_el0(plat_get_syscnt_freq2());
271 
272 	/* Initialize the cpu_ops pointer. */
273 	init_cpu_ops();
274 
275 	/* Having initialized cpu_ops, we can now print errata status */
276 	print_errata_status();
277 }
278 
279 /******************************************************************************
280  * PSCI Library interface to initialize the cpu context for the next non
281  * secure image during cold boot. The relevant registers in the cpu context
282  * need to be retrieved and programmed on return from this interface.
283  *****************************************************************************/
psci_prepare_next_non_secure_ctx(entry_point_info_t * next_image_info)284 void psci_prepare_next_non_secure_ctx(entry_point_info_t *next_image_info)
285 {
286 	assert(GET_SECURITY_STATE(next_image_info->h.attr) == NON_SECURE);
287 	cm_init_my_context(next_image_info);
288 	cm_prepare_el3_exit(NON_SECURE);
289 }
290