1 /*
2  * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * Redistributions of source code must retain the above copyright notice, this
8  * list of conditions and the following disclaimer.
9  *
10  * Redistributions in binary form must reproduce the above copyright notice,
11  * this list of conditions and the following disclaimer in the documentation
12  * and/or other materials provided with the distribution.
13  *
14  * Neither the name of ARM nor the names of its contributors may be used
15  * to endorse or promote products derived from this software without specific
16  * prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <arch.h>
32 #include <arch_helpers.h>
33 #include <assert.h>
34 #include <bl_common.h>
35 #include <context.h>
36 #include <context_mgmt.h>
37 #include <debug.h>
38 #include <platform.h>
39 #include <string.h>
40 #include "psci_private.h"
41 
42 /*
43  * SPD power management operations, expected to be supplied by the registered
44  * SPD on successful SP initialization
45  */
46 const spd_pm_ops_t *psci_spd_pm;
47 
48 /*******************************************************************************
49  * Grand array that holds the platform's topology information for state
50  * management of affinity instances. Each node (aff_map_node) in the array
51  * corresponds to an affinity instance e.g. cluster, cpu within an mpidr
52  ******************************************************************************/
53 aff_map_node_t psci_aff_map[PSCI_NUM_AFFS]
54 #if USE_COHERENT_MEM
55 __attribute__ ((section("tzfw_coherent_mem")))
56 #endif
57 ;
58 
59 /*******************************************************************************
60  * Pointer to functions exported by the platform to complete power mgmt. ops
61  ******************************************************************************/
62 const plat_pm_ops_t *psci_plat_pm_ops;
63 
64 /*******************************************************************************
65  * This function is passed an array of pointers to affinity level nodes in the
66  * topology tree for an mpidr. It iterates through the nodes to find the highest
67  * affinity level which is marked as physically powered off.
68  ******************************************************************************/
psci_find_max_phys_off_afflvl(uint32_t start_afflvl,uint32_t end_afflvl,aff_map_node_t * mpidr_nodes[])69 uint32_t psci_find_max_phys_off_afflvl(uint32_t start_afflvl,
70 				       uint32_t end_afflvl,
71 				       aff_map_node_t *mpidr_nodes[])
72 {
73 	uint32_t max_afflvl = PSCI_INVALID_DATA;
74 
75 	for (; start_afflvl <= end_afflvl; start_afflvl++) {
76 		if (mpidr_nodes[start_afflvl] == NULL)
77 			continue;
78 
79 		if (psci_get_phys_state(mpidr_nodes[start_afflvl]) ==
80 		    PSCI_STATE_OFF)
81 			max_afflvl = start_afflvl;
82 	}
83 
84 	return max_afflvl;
85 }
86 
87 /*******************************************************************************
88  * This function verifies that the all the other cores in the system have been
89  * turned OFF and the current CPU is the last running CPU in the system.
90  * Returns 1 (true) if the current CPU is the last ON CPU or 0 (false)
91  * otherwise.
92  ******************************************************************************/
psci_is_last_on_cpu(void)93 unsigned int psci_is_last_on_cpu(void)
94 {
95 	unsigned long mpidr = read_mpidr_el1() & MPIDR_AFFINITY_MASK;
96 	unsigned int i;
97 
98 	for (i = psci_aff_limits[MPIDR_AFFLVL0].min;
99 			i <= psci_aff_limits[MPIDR_AFFLVL0].max; i++) {
100 
101 		assert(psci_aff_map[i].level == MPIDR_AFFLVL0);
102 
103 		if (!(psci_aff_map[i].state & PSCI_AFF_PRESENT))
104 			continue;
105 
106 		if (psci_aff_map[i].mpidr == mpidr) {
107 			assert(psci_get_state(&psci_aff_map[i])
108 					== PSCI_STATE_ON);
109 			continue;
110 		}
111 
112 		if (psci_get_state(&psci_aff_map[i]) != PSCI_STATE_OFF)
113 			return 0;
114 	}
115 
116 	return 1;
117 }
118 
119 /*******************************************************************************
120  * This function saves the highest affinity level which is in OFF state. The
121  * affinity instance with which the level is associated is determined by the
122  * caller.
123  ******************************************************************************/
psci_set_max_phys_off_afflvl(uint32_t afflvl)124 void psci_set_max_phys_off_afflvl(uint32_t afflvl)
125 {
126 	set_cpu_data(psci_svc_cpu_data.max_phys_off_afflvl, afflvl);
127 
128 	/*
129 	 * Ensure that the saved value is flushed to main memory and any
130 	 * speculatively pre-fetched stale copies are invalidated from the
131 	 * caches of other cpus in the same coherency domain. This ensures that
132 	 * the value can be safely read irrespective of the state of the data
133 	 * cache.
134 	 */
135 	flush_cpu_data(psci_svc_cpu_data.max_phys_off_afflvl);
136 }
137 
138 /*******************************************************************************
139  * This function reads the saved highest affinity level which is in OFF
140  * state. The affinity instance with which the level is associated is determined
141  * by the caller.
142  ******************************************************************************/
psci_get_max_phys_off_afflvl(void)143 uint32_t psci_get_max_phys_off_afflvl(void)
144 {
145 	/*
146 	 * Ensure that the last update of this value in this cpu's cache is
147 	 * flushed to main memory and any speculatively pre-fetched stale copies
148 	 * are invalidated from the caches of other cpus in the same coherency
149 	 * domain. This ensures that the value is always read from the main
150 	 * memory when it was written before the data cache was enabled.
151 	 */
152 	flush_cpu_data(psci_svc_cpu_data.max_phys_off_afflvl);
153 	return get_cpu_data(psci_svc_cpu_data.max_phys_off_afflvl);
154 }
155 
156 /*******************************************************************************
157  * Routine to return the maximum affinity level to traverse to after a cpu has
158  * been physically powered up. It is expected to be called immediately after
159  * reset from assembler code.
160  ******************************************************************************/
get_power_on_target_afflvl()161 int get_power_on_target_afflvl()
162 {
163 	int afflvl;
164 
165 #if DEBUG
166 	unsigned int state;
167 	aff_map_node_t *node;
168 
169 	/* Retrieve our node from the topology tree */
170 	node = psci_get_aff_map_node(read_mpidr_el1() & MPIDR_AFFINITY_MASK,
171 				     MPIDR_AFFLVL0);
172 	assert(node);
173 
174 	/*
175 	 * Sanity check the state of the cpu. It should be either suspend or "on
176 	 * pending"
177 	 */
178 	state = psci_get_state(node);
179 	assert(state == PSCI_STATE_SUSPEND || state == PSCI_STATE_ON_PENDING);
180 #endif
181 
182 	/*
183 	 * Assume that this cpu was suspended and retrieve its target affinity
184 	 * level. If it is invalid then it could only have been turned off
185 	 * earlier. get_max_afflvl() will return the highest affinity level a
186 	 * cpu can be turned off to.
187 	 */
188 	afflvl = psci_get_suspend_afflvl();
189 	if (afflvl == PSCI_INVALID_DATA)
190 		afflvl = get_max_afflvl();
191 	return afflvl;
192 }
193 
194 /*******************************************************************************
195  * Simple routine to retrieve the maximum affinity level supported by the
196  * platform and check that it makes sense.
197  ******************************************************************************/
get_max_afflvl(void)198 int get_max_afflvl(void)
199 {
200 	int aff_lvl;
201 
202 	aff_lvl = plat_get_max_afflvl();
203 	assert(aff_lvl <= MPIDR_MAX_AFFLVL && aff_lvl >= MPIDR_AFFLVL0);
204 
205 	return aff_lvl;
206 }
207 
208 /*******************************************************************************
209  * Simple routine to set the id of an affinity instance at a given level in the
210  * mpidr.
211  ******************************************************************************/
mpidr_set_aff_inst(unsigned long mpidr,unsigned char aff_inst,int aff_lvl)212 unsigned long mpidr_set_aff_inst(unsigned long mpidr,
213 				 unsigned char aff_inst,
214 				 int aff_lvl)
215 {
216 	unsigned long aff_shift;
217 
218 	assert(aff_lvl <= MPIDR_AFFLVL3);
219 
220 	/*
221 	 * Decide the number of bits to shift by depending upon
222 	 * the affinity level
223 	 */
224 	aff_shift = get_afflvl_shift(aff_lvl);
225 
226 	/* Clear the existing affinity instance & set the new one*/
227 	mpidr &= ~(MPIDR_AFFLVL_MASK << aff_shift);
228 	mpidr |= aff_inst << aff_shift;
229 
230 	return mpidr;
231 }
232 
233 /*******************************************************************************
234  * This function sanity checks a range of affinity levels.
235  ******************************************************************************/
psci_check_afflvl_range(int start_afflvl,int end_afflvl)236 int psci_check_afflvl_range(int start_afflvl, int end_afflvl)
237 {
238 	/* Sanity check the parameters passed */
239 	if (end_afflvl > get_max_afflvl())
240 		return PSCI_E_INVALID_PARAMS;
241 
242 	if (start_afflvl < MPIDR_AFFLVL0)
243 		return PSCI_E_INVALID_PARAMS;
244 
245 	if (end_afflvl < start_afflvl)
246 		return PSCI_E_INVALID_PARAMS;
247 
248 	return PSCI_E_SUCCESS;
249 }
250 
251 /*******************************************************************************
252  * This function is passed an array of pointers to affinity level nodes in the
253  * topology tree for an mpidr and the state which each node should transition
254  * to. It updates the state of each node between the specified affinity levels.
255  ******************************************************************************/
psci_do_afflvl_state_mgmt(uint32_t start_afflvl,uint32_t end_afflvl,aff_map_node_t * mpidr_nodes[],uint32_t state)256 void psci_do_afflvl_state_mgmt(uint32_t start_afflvl,
257 			       uint32_t end_afflvl,
258 			       aff_map_node_t *mpidr_nodes[],
259 			       uint32_t state)
260 {
261 	uint32_t level;
262 
263 	for (level = start_afflvl; level <= end_afflvl; level++) {
264 		if (mpidr_nodes[level] == NULL)
265 			continue;
266 		psci_set_state(mpidr_nodes[level], state);
267 	}
268 }
269 
270 /*******************************************************************************
271  * This function is passed an array of pointers to affinity level nodes in the
272  * topology tree for an mpidr. It picks up locks for each affinity level bottom
273  * up in the range specified.
274  ******************************************************************************/
psci_acquire_afflvl_locks(int start_afflvl,int end_afflvl,aff_map_node_t * mpidr_nodes[])275 void psci_acquire_afflvl_locks(int start_afflvl,
276 			       int end_afflvl,
277 			       aff_map_node_t *mpidr_nodes[])
278 {
279 	int level;
280 
281 	for (level = start_afflvl; level <= end_afflvl; level++) {
282 		if (mpidr_nodes[level] == NULL)
283 			continue;
284 
285 		psci_lock_get(mpidr_nodes[level]);
286 	}
287 }
288 
289 /*******************************************************************************
290  * This function is passed an array of pointers to affinity level nodes in the
291  * topology tree for an mpidr. It releases the lock for each affinity level top
292  * down in the range specified.
293  ******************************************************************************/
psci_release_afflvl_locks(int start_afflvl,int end_afflvl,aff_map_node_t * mpidr_nodes[])294 void psci_release_afflvl_locks(int start_afflvl,
295 			       int end_afflvl,
296 			       aff_map_node_t *mpidr_nodes[])
297 {
298 	int level;
299 
300 	for (level = end_afflvl; level >= start_afflvl; level--) {
301 		if (mpidr_nodes[level] == NULL)
302 			continue;
303 
304 		psci_lock_release(mpidr_nodes[level]);
305 	}
306 }
307 
308 /*******************************************************************************
309  * Simple routine to determine whether an affinity instance at a given level
310  * in an mpidr exists or not.
311  ******************************************************************************/
psci_validate_mpidr(unsigned long mpidr,int level)312 int psci_validate_mpidr(unsigned long mpidr, int level)
313 {
314 	aff_map_node_t *node;
315 
316 	node = psci_get_aff_map_node(mpidr, level);
317 	if (node && (node->state & PSCI_AFF_PRESENT))
318 		return PSCI_E_SUCCESS;
319 	else
320 		return PSCI_E_INVALID_PARAMS;
321 }
322 
323 /*******************************************************************************
324  * This function determines the full entrypoint information for the requested
325  * PSCI entrypoint on power on/resume and returns it.
326  ******************************************************************************/
psci_get_ns_ep_info(entry_point_info_t * ep,uint64_t entrypoint,uint64_t context_id)327 int psci_get_ns_ep_info(entry_point_info_t *ep,
328 		       uint64_t entrypoint, uint64_t context_id)
329 {
330 	uint32_t ep_attr, mode, sctlr, daif, ee;
331 	uint32_t ns_scr_el3 = read_scr_el3();
332 	uint32_t ns_sctlr_el1 = read_sctlr_el1();
333 
334 	sctlr = ns_scr_el3 & SCR_HCE_BIT ? read_sctlr_el2() : ns_sctlr_el1;
335 	ee = 0;
336 
337 	ep_attr = NON_SECURE | EP_ST_DISABLE;
338 	if (sctlr & SCTLR_EE_BIT) {
339 		ep_attr |= EP_EE_BIG;
340 		ee = 1;
341 	}
342 	SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr);
343 
344 	ep->pc = entrypoint;
345 	memset(&ep->args, 0, sizeof(ep->args));
346 	ep->args.arg0 = context_id;
347 
348 	/*
349 	 * Figure out whether the cpu enters the non-secure address space
350 	 * in aarch32 or aarch64
351 	 */
352 	if (ns_scr_el3 & SCR_RW_BIT) {
353 
354 		/*
355 		 * Check whether a Thumb entry point has been provided for an
356 		 * aarch64 EL
357 		 */
358 		if (entrypoint & 0x1)
359 			return PSCI_E_INVALID_PARAMS;
360 
361 		mode = ns_scr_el3 & SCR_HCE_BIT ? MODE_EL2 : MODE_EL1;
362 
363 		ep->spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
364 	} else {
365 
366 		mode = ns_scr_el3 & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc;
367 
368 		/*
369 		 * TODO: Choose async. exception bits if HYP mode is not
370 		 * implemented according to the values of SCR.{AW, FW} bits
371 		 */
372 		daif = DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT;
373 
374 		ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, daif);
375 	}
376 
377 	return PSCI_E_SUCCESS;
378 }
379 
380 /*******************************************************************************
381  * This function takes a pointer to an affinity node in the topology tree and
382  * returns its state. State of a non-leaf node needs to be calculated.
383  ******************************************************************************/
psci_get_state(aff_map_node_t * node)384 unsigned short psci_get_state(aff_map_node_t *node)
385 {
386 #if !USE_COHERENT_MEM
387 	flush_dcache_range((uint64_t) node, sizeof(*node));
388 #endif
389 
390 	assert(node->level >= MPIDR_AFFLVL0 && node->level <= MPIDR_MAX_AFFLVL);
391 
392 	/* A cpu node just contains the state which can be directly returned */
393 	if (node->level == MPIDR_AFFLVL0)
394 		return (node->state >> PSCI_STATE_SHIFT) & PSCI_STATE_MASK;
395 
396 	/*
397 	 * For an affinity level higher than a cpu, the state has to be
398 	 * calculated. It depends upon the value of the reference count
399 	 * which is managed by each node at the next lower affinity level
400 	 * e.g. for a cluster, each cpu increments/decrements the reference
401 	 * count. If the reference count is 0 then the affinity level is
402 	 * OFF else ON.
403 	 */
404 	if (node->ref_count)
405 		return PSCI_STATE_ON;
406 	else
407 		return PSCI_STATE_OFF;
408 }
409 
410 /*******************************************************************************
411  * This function takes a pointer to an affinity node in the topology tree and
412  * a target state. State of a non-leaf node needs to be converted to a reference
413  * count. State of a leaf node can be set directly.
414  ******************************************************************************/
psci_set_state(aff_map_node_t * node,unsigned short state)415 void psci_set_state(aff_map_node_t *node, unsigned short state)
416 {
417 	assert(node->level >= MPIDR_AFFLVL0 && node->level <= MPIDR_MAX_AFFLVL);
418 
419 	/*
420 	 * For an affinity level higher than a cpu, the state is used
421 	 * to decide whether the reference count is incremented or
422 	 * decremented. Entry into the ON_PENDING state does not have
423 	 * effect.
424 	 */
425 	if (node->level > MPIDR_AFFLVL0) {
426 		switch (state) {
427 		case PSCI_STATE_ON:
428 			node->ref_count++;
429 			break;
430 		case PSCI_STATE_OFF:
431 		case PSCI_STATE_SUSPEND:
432 			node->ref_count--;
433 			break;
434 		case PSCI_STATE_ON_PENDING:
435 			/*
436 			 * An affinity level higher than a cpu will not undergo
437 			 * a state change when it is about to be turned on
438 			 */
439 			return;
440 		default:
441 			assert(0);
442 		}
443 	} else {
444 		node->state &= ~(PSCI_STATE_MASK << PSCI_STATE_SHIFT);
445 		node->state |= (state & PSCI_STATE_MASK) << PSCI_STATE_SHIFT;
446 	}
447 
448 #if !USE_COHERENT_MEM
449 	flush_dcache_range((uint64_t) node, sizeof(*node));
450 #endif
451 }
452 
453 /*******************************************************************************
454  * An affinity level could be on, on_pending, suspended or off. These are the
455  * logical states it can be in. Physically either it is off or on. When it is in
456  * the state on_pending then it is about to be turned on. It is not possible to
457  * tell whether that's actually happenned or not. So we err on the side of
458  * caution & treat the affinity level as being turned off.
459  ******************************************************************************/
psci_get_phys_state(aff_map_node_t * node)460 unsigned short psci_get_phys_state(aff_map_node_t *node)
461 {
462 	unsigned int state;
463 
464 	state = psci_get_state(node);
465 	return get_phys_state(state);
466 }
467 
468 /*******************************************************************************
469  * This function takes an array of pointers to affinity instance nodes in the
470  * topology tree and calls the physical power on handler for the corresponding
471  * affinity levels
472  ******************************************************************************/
psci_call_power_on_handlers(aff_map_node_t * mpidr_nodes[],int start_afflvl,int end_afflvl,afflvl_power_on_finisher_t * pon_handlers)473 static void psci_call_power_on_handlers(aff_map_node_t *mpidr_nodes[],
474 				       int start_afflvl,
475 				       int end_afflvl,
476 				       afflvl_power_on_finisher_t *pon_handlers)
477 {
478 	int level;
479 	aff_map_node_t *node;
480 
481 	for (level = end_afflvl; level >= start_afflvl; level--) {
482 		node = mpidr_nodes[level];
483 		if (node == NULL)
484 			continue;
485 
486 		/*
487 		 * If we run into any trouble while powering up an
488 		 * affinity instance, then there is no recovery path
489 		 * so simply return an error and let the caller take
490 		 * care of the situation.
491 		 */
492 		pon_handlers[level](node);
493 	}
494 }
495 
496 /*******************************************************************************
497  * Generic handler which is called when a cpu is physically powered on. It
498  * traverses through all the affinity levels performing generic, architectural,
499  * platform setup and state management e.g. for a cluster that's been powered
500  * on, it will call the platform specific code which will enable coherency at
501  * the interconnect level. For a cpu it could mean turning on the MMU etc.
502  *
503  * The state of all the relevant affinity levels is changed after calling the
504  * affinity level specific handlers as their actions would depend upon the state
505  * the affinity level is exiting from.
506  *
507  * The affinity level specific handlers are called in descending order i.e. from
508  * the highest to the lowest affinity level implemented by the platform because
509  * to turn on affinity level X it is neccesary to turn on affinity level X + 1
510  * first.
511  ******************************************************************************/
psci_afflvl_power_on_finish(int start_afflvl,int end_afflvl,afflvl_power_on_finisher_t * pon_handlers)512 void psci_afflvl_power_on_finish(int start_afflvl,
513 				 int end_afflvl,
514 				 afflvl_power_on_finisher_t *pon_handlers)
515 {
516 	mpidr_aff_map_nodes_t mpidr_nodes;
517 	int rc;
518 	unsigned int max_phys_off_afflvl;
519 
520 
521 	/*
522 	 * Collect the pointers to the nodes in the topology tree for
523 	 * each affinity instance in the mpidr. If this function does
524 	 * not return successfully then either the mpidr or the affinity
525 	 * levels are incorrect. Either case is an irrecoverable error.
526 	 */
527 	rc = psci_get_aff_map_nodes(read_mpidr_el1() & MPIDR_AFFINITY_MASK,
528 				    start_afflvl,
529 				    end_afflvl,
530 				    mpidr_nodes);
531 	if (rc != PSCI_E_SUCCESS)
532 		panic();
533 
534 	/*
535 	 * This function acquires the lock corresponding to each affinity
536 	 * level so that by the time all locks are taken, the system topology
537 	 * is snapshot and state management can be done safely.
538 	 */
539 	psci_acquire_afflvl_locks(start_afflvl,
540 				  end_afflvl,
541 				  mpidr_nodes);
542 
543 	max_phys_off_afflvl = psci_find_max_phys_off_afflvl(start_afflvl,
544 							    end_afflvl,
545 							    mpidr_nodes);
546 	assert(max_phys_off_afflvl != PSCI_INVALID_DATA);
547 
548 	/*
549 	 * Stash the highest affinity level that will come out of the OFF or
550 	 * SUSPEND states.
551 	 */
552 	psci_set_max_phys_off_afflvl(max_phys_off_afflvl);
553 
554 	/* Perform generic, architecture and platform specific handling */
555 	psci_call_power_on_handlers(mpidr_nodes,
556 					 start_afflvl,
557 					 end_afflvl,
558 					 pon_handlers);
559 
560 	/*
561 	 * This function updates the state of each affinity instance
562 	 * corresponding to the mpidr in the range of affinity levels
563 	 * specified.
564 	 */
565 	psci_do_afflvl_state_mgmt(start_afflvl,
566 				  end_afflvl,
567 				  mpidr_nodes,
568 				  PSCI_STATE_ON);
569 
570 	/*
571 	 * Invalidate the entry for the highest affinity level stashed earlier.
572 	 * This ensures that any reads of this variable outside the power
573 	 * up/down sequences return PSCI_INVALID_DATA
574 	 */
575 	psci_set_max_phys_off_afflvl(PSCI_INVALID_DATA);
576 
577 	/*
578 	 * This loop releases the lock corresponding to each affinity level
579 	 * in the reverse order to which they were acquired.
580 	 */
581 	psci_release_afflvl_locks(start_afflvl,
582 				  end_afflvl,
583 				  mpidr_nodes);
584 }
585 
586 /*******************************************************************************
587  * This function initializes the set of hooks that PSCI invokes as part of power
588  * management operation. The power management hooks are expected to be provided
589  * by the SPD, after it finishes all its initialization
590  ******************************************************************************/
psci_register_spd_pm_hook(const spd_pm_ops_t * pm)591 void psci_register_spd_pm_hook(const spd_pm_ops_t *pm)
592 {
593 	assert(pm);
594 	psci_spd_pm = pm;
595 
596 	if (pm->svc_migrate)
597 		psci_caps |= define_psci_cap(PSCI_MIG_AARCH64);
598 
599 	if (pm->svc_migrate_info)
600 		psci_caps |= define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64)
601 				| define_psci_cap(PSCI_MIG_INFO_TYPE);
602 }
603 
604 /*******************************************************************************
605  * This function invokes the migrate info hook in the spd_pm_ops. It performs
606  * the necessary return value validation. If the Secure Payload is UP and
607  * migrate capable, it returns the mpidr of the CPU on which the Secure payload
608  * is resident through the mpidr parameter. Else the value of the parameter on
609  * return is undefined.
610  ******************************************************************************/
psci_spd_migrate_info(uint64_t * mpidr)611 int psci_spd_migrate_info(uint64_t *mpidr)
612 {
613 	int rc;
614 
615 	if (!psci_spd_pm || !psci_spd_pm->svc_migrate_info)
616 		return PSCI_E_NOT_SUPPORTED;
617 
618 	rc = psci_spd_pm->svc_migrate_info(mpidr);
619 
620 	assert(rc == PSCI_TOS_UP_MIG_CAP || rc == PSCI_TOS_NOT_UP_MIG_CAP \
621 		|| rc == PSCI_TOS_NOT_PRESENT_MP || rc == PSCI_E_NOT_SUPPORTED);
622 
623 	return rc;
624 }
625 
626 
627 /*******************************************************************************
628  * This function prints the state of all affinity instances present in the
629  * system
630  ******************************************************************************/
psci_print_affinity_map(void)631 void psci_print_affinity_map(void)
632 {
633 #if LOG_LEVEL >= LOG_LEVEL_INFO
634 	aff_map_node_t *node;
635 	unsigned int idx;
636 	/* This array maps to the PSCI_STATE_X definitions in psci.h */
637 	static const char *psci_state_str[] = {
638 		"ON",
639 		"OFF",
640 		"ON_PENDING",
641 		"SUSPEND"
642 	};
643 
644 	INFO("PSCI Affinity Map:\n");
645 	for (idx = 0; idx < PSCI_NUM_AFFS ; idx++) {
646 		node = &psci_aff_map[idx];
647 		if (!(node->state & PSCI_AFF_PRESENT)) {
648 			continue;
649 		}
650 		INFO("  AffInst: Level %u, MPID 0x%lx, State %s\n",
651 				node->level, node->mpidr,
652 				psci_state_str[psci_get_state(node)]);
653 	}
654 #endif
655 }
656