1 /*
2  * Copyright (C) 2018 Marvell International Ltd.
3  *
4  * SPDX-License-Identifier:     BSD-3-Clause
5  * https://spdx.org/licenses
6  */
7 
8 #include <assert.h>
9 
10 #include <common/debug.h>
11 #include <drivers/arm/gicv2.h>
12 #include <drivers/console.h>
13 #include <drivers/delay_timer.h>
14 #include <drivers/marvell/cache_llc.h>
15 #include <lib/bakery_lock.h>
16 #include <lib/mmio.h>
17 #include <plat/common/platform.h>
18 
19 #include <armada_common.h>
20 #include <marvell_pm.h>
21 #include <mss_pm_ipc.h>
22 #include <plat_marvell.h>
23 #include <plat_pm_trace.h>
24 
25 #define MVEBU_PRIVATE_UID_REG		0x30
26 #define MVEBU_RFU_GLOBL_SW_RST		0x84
27 #define MVEBU_CCU_RVBAR(cpu)		(MVEBU_REGS_BASE + 0x640 + (cpu * 4))
28 #define MVEBU_CCU_CPU_UN_RESET(cpu)	(MVEBU_REGS_BASE + 0x650 + (cpu * 4))
29 
30 #define MPIDR_CPU_GET(mpidr)		((mpidr) & MPIDR_CPU_MASK)
31 #define MPIDR_CLUSTER_GET(mpidr)	MPIDR_AFFLVL1_VAL((mpidr))
32 
33 #define MVEBU_GPIO_MASK(index)		(1 << (index % 32))
34 #define MVEBU_MPP_MASK(index)		(0xF << (4 * (index % 8)))
35 #define MVEBU_GPIO_VALUE(index, value)	(value << (index % 32))
36 
37 #define MVEBU_USER_CMD_0_REG		(MVEBU_DRAM_MAC_BASE + 0x20)
38 #define MVEBU_USER_CMD_CH0_OFFSET	28
39 #define MVEBU_USER_CMD_CH0_MASK		(1 << MVEBU_USER_CMD_CH0_OFFSET)
40 #define MVEBU_USER_CMD_CH0_EN		(1 << MVEBU_USER_CMD_CH0_OFFSET)
41 #define MVEBU_USER_CMD_CS_OFFSET	24
42 #define MVEBU_USER_CMD_CS_MASK		(0xF << MVEBU_USER_CMD_CS_OFFSET)
43 #define MVEBU_USER_CMD_CS_ALL		(0xF << MVEBU_USER_CMD_CS_OFFSET)
44 #define MVEBU_USER_CMD_SR_OFFSET	6
45 #define MVEBU_USER_CMD_SR_MASK		(0x3 << MVEBU_USER_CMD_SR_OFFSET)
46 #define MVEBU_USER_CMD_SR_ENTER		(0x1 << MVEBU_USER_CMD_SR_OFFSET)
47 #define MVEBU_MC_PWR_CTRL_REG		(MVEBU_DRAM_MAC_BASE + 0x54)
48 #define MVEBU_MC_AC_ON_DLY_OFFSET	8
49 #define MVEBU_MC_AC_ON_DLY_MASK		(0xF << MVEBU_MC_AC_ON_DLY_OFFSET)
50 #define MVEBU_MC_AC_ON_DLY_DEF_VAR	(8 << MVEBU_MC_AC_ON_DLY_OFFSET)
51 #define MVEBU_MC_AC_OFF_DLY_OFFSET	4
52 #define MVEBU_MC_AC_OFF_DLY_MASK	(0xF << MVEBU_MC_AC_OFF_DLY_OFFSET)
53 #define MVEBU_MC_AC_OFF_DLY_DEF_VAR	(0xC << MVEBU_MC_AC_OFF_DLY_OFFSET)
54 #define MVEBU_MC_PHY_AUTO_OFF_OFFSET	0
55 #define MVEBU_MC_PHY_AUTO_OFF_MASK	(1 << MVEBU_MC_PHY_AUTO_OFF_OFFSET)
56 #define MVEBU_MC_PHY_AUTO_OFF_EN	(1 << MVEBU_MC_PHY_AUTO_OFF_OFFSET)
57 
58 /* this lock synchronize AP multiple cores execution with MSS */
59 DEFINE_BAKERY_LOCK(pm_sys_lock);
60 
61 /* Weak definitions may be overridden in specific board */
62 #pragma weak plat_marvell_get_pm_cfg
63 
64 /* AP806 CPU power down /power up definitions */
65 enum CPU_ID {
66 	CPU0,
67 	CPU1,
68 	CPU2,
69 	CPU3
70 };
71 
72 #define REG_WR_VALIDATE_TIMEOUT		(2000)
73 
74 #define FEATURE_DISABLE_STATUS_REG			\
75 			(MVEBU_REGS_BASE + 0x6F8230)
76 #define FEATURE_DISABLE_STATUS_CPU_CLUSTER_OFFSET	4
77 #define FEATURE_DISABLE_STATUS_CPU_CLUSTER_MASK		\
78 			(0x1 << FEATURE_DISABLE_STATUS_CPU_CLUSTER_OFFSET)
79 
80 #ifdef MVEBU_SOC_AP807
81 	#define PWRC_CPUN_CR_PWR_DN_RQ_OFFSET		1
82 	#define PWRC_CPUN_CR_LDO_BYPASS_RDY_OFFSET	0
83 #else
84 	#define PWRC_CPUN_CR_PWR_DN_RQ_OFFSET		0
85 	#define PWRC_CPUN_CR_LDO_BYPASS_RDY_OFFSET	31
86 #endif
87 
88 #define PWRC_CPUN_CR_REG(cpu_id)		\
89 			(MVEBU_REGS_BASE + 0x680000 + (cpu_id * 0x10))
90 #define PWRC_CPUN_CR_PWR_DN_RQ_MASK		\
91 			(0x1 << PWRC_CPUN_CR_PWR_DN_RQ_OFFSET)
92 #define PWRC_CPUN_CR_ISO_ENABLE_OFFSET		16
93 #define PWRC_CPUN_CR_ISO_ENABLE_MASK		\
94 			(0x1 << PWRC_CPUN_CR_ISO_ENABLE_OFFSET)
95 #define PWRC_CPUN_CR_LDO_BYPASS_RDY_MASK	\
96 			(0x1U << PWRC_CPUN_CR_LDO_BYPASS_RDY_OFFSET)
97 
98 #define CCU_B_PRCRN_REG(cpu_id)			\
99 			(MVEBU_REGS_BASE + 0x1A50 + \
100 			((cpu_id / 2) * (0x400)) + ((cpu_id % 2) * 4))
101 #define CCU_B_PRCRN_CPUPORESET_STATIC_OFFSET	0
102 #define CCU_B_PRCRN_CPUPORESET_STATIC_MASK	\
103 			(0x1 << CCU_B_PRCRN_CPUPORESET_STATIC_OFFSET)
104 
105 /* power switch fingers */
106 #define AP807_PWRC_LDO_CR0_REG			\
107 			(MVEBU_REGS_BASE + 0x680000 + 0x100)
108 #define AP807_PWRC_LDO_CR0_OFFSET		16
109 #define AP807_PWRC_LDO_CR0_MASK			\
110 			(0xff << AP807_PWRC_LDO_CR0_OFFSET)
111 #define AP807_PWRC_LDO_CR0_VAL			0xfc
112 
113 /*
114  * Power down CPU:
115  * Used to reduce power consumption, and avoid SoC unnecessary temperature rise.
116  */
plat_marvell_cpu_powerdown(int cpu_id)117 static int plat_marvell_cpu_powerdown(int cpu_id)
118 {
119 	uint32_t	reg_val;
120 	int		exit_loop = REG_WR_VALIDATE_TIMEOUT;
121 
122 	INFO("Powering down CPU%d\n", cpu_id);
123 
124 	/* 1. Isolation enable */
125 	reg_val = mmio_read_32(PWRC_CPUN_CR_REG(cpu_id));
126 	reg_val |= 0x1 << PWRC_CPUN_CR_ISO_ENABLE_OFFSET;
127 	mmio_write_32(PWRC_CPUN_CR_REG(cpu_id), reg_val);
128 
129 	/* 2. Read and check Isolation enabled - verify bit set to 1 */
130 	do {
131 		reg_val = mmio_read_32(PWRC_CPUN_CR_REG(cpu_id));
132 		exit_loop--;
133 	} while (!(reg_val & (0x1 << PWRC_CPUN_CR_ISO_ENABLE_OFFSET)) &&
134 		 exit_loop > 0);
135 
136 	/* 3. Switch off CPU power */
137 	reg_val = mmio_read_32(PWRC_CPUN_CR_REG(cpu_id));
138 	reg_val &= ~PWRC_CPUN_CR_PWR_DN_RQ_MASK;
139 	mmio_write_32(PWRC_CPUN_CR_REG(cpu_id), reg_val);
140 
141 	/* 4. Read and check Switch Off - verify bit set to 0 */
142 	exit_loop = REG_WR_VALIDATE_TIMEOUT;
143 	do {
144 		reg_val = mmio_read_32(PWRC_CPUN_CR_REG(cpu_id));
145 		exit_loop--;
146 	} while (reg_val & PWRC_CPUN_CR_PWR_DN_RQ_MASK && exit_loop > 0);
147 
148 	if (exit_loop <= 0)
149 		goto cpu_poweroff_error;
150 
151 	/* 5. De-Assert power ready */
152 	reg_val = mmio_read_32(PWRC_CPUN_CR_REG(cpu_id));
153 	reg_val &= ~PWRC_CPUN_CR_LDO_BYPASS_RDY_MASK;
154 	mmio_write_32(PWRC_CPUN_CR_REG(cpu_id), reg_val);
155 
156 	/* 6. Assert CPU POR reset */
157 	reg_val = mmio_read_32(CCU_B_PRCRN_REG(cpu_id));
158 	reg_val &= ~CCU_B_PRCRN_CPUPORESET_STATIC_MASK;
159 	mmio_write_32(CCU_B_PRCRN_REG(cpu_id), reg_val);
160 
161 	/* 7. Read and poll on Validate the CPU is out of reset */
162 	exit_loop = REG_WR_VALIDATE_TIMEOUT;
163 	do {
164 		reg_val = mmio_read_32(CCU_B_PRCRN_REG(cpu_id));
165 		exit_loop--;
166 	} while (reg_val & CCU_B_PRCRN_CPUPORESET_STATIC_MASK && exit_loop > 0);
167 
168 	if (exit_loop <= 0)
169 		goto cpu_poweroff_error;
170 
171 	INFO("Successfully powered down CPU%d\n", cpu_id);
172 
173 	return 0;
174 
175 cpu_poweroff_error:
176 	ERROR("ERROR: Can't power down CPU%d\n", cpu_id);
177 	return -1;
178 }
179 
180 /*
181  * Power down CPUs 1-3 at early boot stage,
182  * to reduce power consumption and SoC temperature.
183  * This is triggered by BLE prior to DDR initialization.
184  *
185  * Note:
186  * All CPUs will be powered up by plat_marvell_cpu_powerup on Linux boot stage,
187  * which is triggered by PSCI ops (pwr_domain_on).
188  */
plat_marvell_early_cpu_powerdown(void)189 int plat_marvell_early_cpu_powerdown(void)
190 {
191 	uint32_t cpu_cluster_status =
192 		mmio_read_32(FEATURE_DISABLE_STATUS_REG) &
193 			     FEATURE_DISABLE_STATUS_CPU_CLUSTER_MASK;
194 	/* if cpu_cluster_status bit is set,
195 	 * that means we have only single cluster
196 	 */
197 	int cluster_count = cpu_cluster_status ? 1 : 2;
198 
199 	INFO("Powering off unused CPUs\n");
200 
201 	/* CPU1 is in AP806 cluster-0, which always exists, so power it down */
202 	if (plat_marvell_cpu_powerdown(CPU1) == -1)
203 		return -1;
204 
205 	/*
206 	 * CPU2-3 are in AP806 2nd cluster (cluster-1),
207 	 * which doesn't exists in dual-core systems.
208 	 * so need to check if we have dual-core (single cluster)
209 	 * or quad-code (2 clusters)
210 	 */
211 	if (cluster_count == 2) {
212 		/* CPU2-3 are part of 2nd cluster */
213 		if (plat_marvell_cpu_powerdown(CPU2) == -1)
214 			return -1;
215 		if (plat_marvell_cpu_powerdown(CPU3) == -1)
216 			return -1;
217 	}
218 
219 	return 0;
220 }
221 
222 /*
223  * Power up CPU - part of Linux boot stage
224  */
plat_marvell_cpu_powerup(u_register_t mpidr)225 static int plat_marvell_cpu_powerup(u_register_t mpidr)
226 {
227 	uint32_t	reg_val;
228 	int	cpu_id = MPIDR_CPU_GET(mpidr),
229 		cluster = MPIDR_CLUSTER_GET(mpidr);
230 	int	exit_loop = REG_WR_VALIDATE_TIMEOUT;
231 
232 	/* calculate absolute CPU ID */
233 	cpu_id = cluster * PLAT_MARVELL_CLUSTER_CORE_COUNT + cpu_id;
234 
235 	INFO("Powering on CPU%d\n", cpu_id);
236 
237 #ifdef MVEBU_SOC_AP807
238 	/* Activate 2 power switch fingers */
239 	reg_val = mmio_read_32(AP807_PWRC_LDO_CR0_REG);
240 	reg_val &= ~(AP807_PWRC_LDO_CR0_MASK);
241 	reg_val |= (AP807_PWRC_LDO_CR0_VAL << AP807_PWRC_LDO_CR0_OFFSET);
242 	mmio_write_32(AP807_PWRC_LDO_CR0_REG, reg_val);
243 	udelay(100);
244 #endif
245 
246 	/* 1. Switch CPU power ON */
247 	reg_val = mmio_read_32(PWRC_CPUN_CR_REG(cpu_id));
248 	reg_val |= 0x1 << PWRC_CPUN_CR_PWR_DN_RQ_OFFSET;
249 	mmio_write_32(PWRC_CPUN_CR_REG(cpu_id), reg_val);
250 
251 	/* 2. Wait for CPU on, up to 100 uSec: */
252 	udelay(100);
253 
254 	/* 3. Assert power ready */
255 	reg_val = mmio_read_32(PWRC_CPUN_CR_REG(cpu_id));
256 	reg_val |= 0x1U << PWRC_CPUN_CR_LDO_BYPASS_RDY_OFFSET;
257 	mmio_write_32(PWRC_CPUN_CR_REG(cpu_id), reg_val);
258 
259 	/* 4. Read & Validate power ready
260 	 * used in order to generate 16 Host CPU cycles
261 	 */
262 	do {
263 		reg_val = mmio_read_32(PWRC_CPUN_CR_REG(cpu_id));
264 		exit_loop--;
265 	} while (!(reg_val & (0x1U << PWRC_CPUN_CR_LDO_BYPASS_RDY_OFFSET)) &&
266 		 exit_loop > 0);
267 
268 	if (exit_loop <= 0)
269 		goto cpu_poweron_error;
270 
271 	/* 5. Isolation disable */
272 	reg_val = mmio_read_32(PWRC_CPUN_CR_REG(cpu_id));
273 	reg_val &= ~PWRC_CPUN_CR_ISO_ENABLE_MASK;
274 	mmio_write_32(PWRC_CPUN_CR_REG(cpu_id), reg_val);
275 
276 	/* 6. Read and check Isolation enabled - verify bit set to 1 */
277 	exit_loop = REG_WR_VALIDATE_TIMEOUT;
278 	do {
279 		reg_val = mmio_read_32(PWRC_CPUN_CR_REG(cpu_id));
280 		exit_loop--;
281 	} while ((reg_val & (0x1 << PWRC_CPUN_CR_ISO_ENABLE_OFFSET)) &&
282 		 exit_loop > 0);
283 
284 	/* 7. De Assert CPU POR reset & Core reset */
285 	reg_val = mmio_read_32(CCU_B_PRCRN_REG(cpu_id));
286 	reg_val |= 0x1 << CCU_B_PRCRN_CPUPORESET_STATIC_OFFSET;
287 	mmio_write_32(CCU_B_PRCRN_REG(cpu_id), reg_val);
288 
289 	/* 8. Read & Validate CPU POR reset */
290 	exit_loop = REG_WR_VALIDATE_TIMEOUT;
291 	do {
292 		reg_val = mmio_read_32(CCU_B_PRCRN_REG(cpu_id));
293 		exit_loop--;
294 	} while (!(reg_val & (0x1 << CCU_B_PRCRN_CPUPORESET_STATIC_OFFSET)) &&
295 		 exit_loop > 0);
296 
297 	if (exit_loop <= 0)
298 		goto cpu_poweron_error;
299 
300 	INFO("Successfully powered on CPU%d\n", cpu_id);
301 
302 	return 0;
303 
304 cpu_poweron_error:
305 	ERROR("ERROR: Can't power up CPU%d\n", cpu_id);
306 	return -1;
307 }
308 
plat_marvell_cpu_on(u_register_t mpidr)309 static int plat_marvell_cpu_on(u_register_t mpidr)
310 {
311 	int cpu_id;
312 	int cluster;
313 
314 	/* Set barierr */
315 	dsbsy();
316 
317 	/* Get cpu number - use CPU ID */
318 	cpu_id =  MPIDR_CPU_GET(mpidr);
319 
320 	/* Get cluster number - use affinity level 1 */
321 	cluster = MPIDR_CLUSTER_GET(mpidr);
322 
323 	/* Set CPU private UID */
324 	mmio_write_32(MVEBU_REGS_BASE + MVEBU_PRIVATE_UID_REG, cluster + 0x4);
325 
326 	/* Set the cpu start address to BL1 entry point (align to 0x10000) */
327 	mmio_write_32(MVEBU_CCU_RVBAR(cpu_id),
328 		      PLAT_MARVELL_CPU_ENTRY_ADDR >> 16);
329 
330 	/* Get the cpu out of reset */
331 	mmio_write_32(MVEBU_CCU_CPU_UN_RESET(cpu_id), 0x10001);
332 
333 	return 0;
334 }
335 
336 /*****************************************************************************
337  * A8K handler called to check the validity of the power state
338  * parameter.
339  *****************************************************************************
340  */
a8k_validate_power_state(unsigned int power_state,psci_power_state_t * req_state)341 static int a8k_validate_power_state(unsigned int power_state,
342 			    psci_power_state_t *req_state)
343 {
344 	int pstate = psci_get_pstate_type(power_state);
345 	int pwr_lvl = psci_get_pstate_pwrlvl(power_state);
346 	int i;
347 
348 	if (pwr_lvl > PLAT_MAX_PWR_LVL)
349 		return PSCI_E_INVALID_PARAMS;
350 
351 	/* Sanity check the requested state */
352 	if (pstate == PSTATE_TYPE_STANDBY) {
353 		/*
354 		 * It's possible to enter standby only on power level 0
355 		 * Ignore any other power level.
356 		 */
357 		if (pwr_lvl != MARVELL_PWR_LVL0)
358 			return PSCI_E_INVALID_PARAMS;
359 
360 		req_state->pwr_domain_state[MARVELL_PWR_LVL0] =
361 					MARVELL_LOCAL_STATE_RET;
362 	} else {
363 		for (i = MARVELL_PWR_LVL0; i <= pwr_lvl; i++)
364 			req_state->pwr_domain_state[i] =
365 					MARVELL_LOCAL_STATE_OFF;
366 	}
367 
368 	/*
369 	 * We expect the 'state id' to be zero.
370 	 */
371 	if (psci_get_pstate_id(power_state))
372 		return PSCI_E_INVALID_PARAMS;
373 
374 	return PSCI_E_SUCCESS;
375 }
376 
377 /*****************************************************************************
378  * A8K handler called when a CPU is about to enter standby.
379  *****************************************************************************
380  */
a8k_cpu_standby(plat_local_state_t cpu_state)381 static void a8k_cpu_standby(plat_local_state_t cpu_state)
382 {
383 	if (!is_pm_fw_running()) {
384 		ERROR("%s: needs to be implemented\n", __func__);
385 		panic();
386 	}
387 }
388 
389 /*****************************************************************************
390  * A8K handler called when a power domain is about to be turned on. The
391  * mpidr determines the CPU to be turned on.
392  *****************************************************************************
393  */
a8k_pwr_domain_on(u_register_t mpidr)394 static int a8k_pwr_domain_on(u_register_t mpidr)
395 {
396 	/* Power up CPU (CPUs 1-3 are powered off at start of BLE) */
397 	plat_marvell_cpu_powerup(mpidr);
398 
399 	if (is_pm_fw_running()) {
400 		unsigned int target =
401 				((mpidr & 0xFF) + (((mpidr >> 8) & 0xFF) * 2));
402 
403 		/*
404 		 * pm system synchronization - used to synchronize
405 		 * multiple core access to MSS
406 		 */
407 		bakery_lock_get(&pm_sys_lock);
408 
409 		/* send CPU ON IPC Message to MSS */
410 		mss_pm_ipc_msg_send(target, PM_IPC_MSG_CPU_ON, 0);
411 
412 		/* trigger IPC message to MSS */
413 		mss_pm_ipc_msg_trigger();
414 
415 		/* pm system synchronization */
416 		bakery_lock_release(&pm_sys_lock);
417 
418 		/* trace message */
419 		PM_TRACE(TRACE_PWR_DOMAIN_ON | target);
420 	} else {
421 		/* proprietary CPU ON exection flow */
422 		plat_marvell_cpu_on(mpidr);
423 	}
424 
425 	return 0;
426 }
427 
428 /*****************************************************************************
429  * A8K handler called to validate the entry point.
430  *****************************************************************************
431  */
a8k_validate_ns_entrypoint(uintptr_t entrypoint)432 static int a8k_validate_ns_entrypoint(uintptr_t entrypoint)
433 {
434 	return PSCI_E_SUCCESS;
435 }
436 
437 /*****************************************************************************
438  * A8K handler called when a power domain is about to be turned off. The
439  * target_state encodes the power state that each level should transition to.
440  *****************************************************************************
441  */
a8k_pwr_domain_off(const psci_power_state_t * target_state)442 static void a8k_pwr_domain_off(const psci_power_state_t *target_state)
443 {
444 	if (is_pm_fw_running()) {
445 		unsigned int idx = plat_my_core_pos();
446 
447 		/* Prevent interrupts from spuriously waking up this cpu */
448 		gicv2_cpuif_disable();
449 
450 		/* pm system synchronization - used to synchronize multiple
451 		 * core access to MSS
452 		 */
453 		bakery_lock_get(&pm_sys_lock);
454 
455 		/* send CPU OFF IPC Message to MSS */
456 		mss_pm_ipc_msg_send(idx, PM_IPC_MSG_CPU_OFF, target_state);
457 
458 		/* trigger IPC message to MSS */
459 		mss_pm_ipc_msg_trigger();
460 
461 		/* pm system synchronization */
462 		bakery_lock_release(&pm_sys_lock);
463 
464 		/* trace message */
465 		PM_TRACE(TRACE_PWR_DOMAIN_OFF);
466 	} else {
467 		INFO("%s: is not supported without SCP\n", __func__);
468 	}
469 }
470 
471 /* Get PM config to power off the SoC */
plat_marvell_get_pm_cfg(void)472 void *plat_marvell_get_pm_cfg(void)
473 {
474 	return NULL;
475 }
476 
477 /*
478  * This function should be called on restore from
479  * "suspend to RAM" state when the execution flow
480  * has to bypass BootROM image to RAM copy and speed up
481  * the system recovery
482  *
483  */
plat_marvell_exit_bootrom(void)484 static void plat_marvell_exit_bootrom(void)
485 {
486 	marvell_exit_bootrom(PLAT_MARVELL_TRUSTED_ROM_BASE);
487 }
488 
489 /*
490  * Prepare for the power off of the system via GPIO
491  */
plat_marvell_power_off_gpio(struct power_off_method * pm_cfg,register_t * gpio_addr,register_t * gpio_data)492 static void plat_marvell_power_off_gpio(struct power_off_method *pm_cfg,
493 					register_t *gpio_addr,
494 					register_t *gpio_data)
495 {
496 	unsigned int gpio;
497 	unsigned int idx;
498 	unsigned int shift;
499 	unsigned int reg;
500 	unsigned int addr;
501 	gpio_info_t *info;
502 	unsigned int tog_bits;
503 
504 	assert((pm_cfg->cfg.gpio.pin_count < PMIC_GPIO_MAX_NUMBER) &&
505 	       (pm_cfg->cfg.gpio.step_count < PMIC_GPIO_MAX_TOGGLE_STEP));
506 
507 	/* Prepare GPIOs for PMIC */
508 	for (gpio = 0; gpio < pm_cfg->cfg.gpio.pin_count; gpio++) {
509 		info = &pm_cfg->cfg.gpio.info[gpio];
510 		/* Set PMIC GPIO to output mode */
511 		reg = mmio_read_32(MVEBU_CP_GPIO_DATA_OUT_EN(
512 				   info->cp_index, info->gpio_index));
513 		mmio_write_32(MVEBU_CP_GPIO_DATA_OUT_EN(
514 			      info->cp_index, info->gpio_index),
515 			      reg & ~MVEBU_GPIO_MASK(info->gpio_index));
516 
517 		/* Set the appropriate MPP to GPIO mode */
518 		reg = mmio_read_32(MVEBU_PM_MPP_REGS(info->cp_index,
519 						     info->gpio_index));
520 		mmio_write_32(MVEBU_PM_MPP_REGS(info->cp_index,
521 						info->gpio_index),
522 			reg & ~MVEBU_MPP_MASK(info->gpio_index));
523 	}
524 
525 	/* Wait for MPP & GPIO pre-configurations done */
526 	mdelay(pm_cfg->cfg.gpio.delay_ms);
527 
528 	/* Toggle the GPIO values, and leave final step to be triggered
529 	 * after  DDR self-refresh is enabled
530 	 */
531 	for (idx = 0; idx < pm_cfg->cfg.gpio.step_count; idx++) {
532 		tog_bits = pm_cfg->cfg.gpio.seq[idx];
533 
534 		/* The GPIOs must be within same GPIO register,
535 		 * thus could get the original value by first GPIO
536 		 */
537 		info = &pm_cfg->cfg.gpio.info[0];
538 		reg = mmio_read_32(MVEBU_CP_GPIO_DATA_OUT(
539 				   info->cp_index, info->gpio_index));
540 		addr = MVEBU_CP_GPIO_DATA_OUT(info->cp_index, info->gpio_index);
541 
542 		for (gpio = 0; gpio < pm_cfg->cfg.gpio.pin_count; gpio++) {
543 			shift = pm_cfg->cfg.gpio.info[gpio].gpio_index % 32;
544 			if (GPIO_LOW == (tog_bits & (1 << gpio)))
545 				reg &= ~(1 << shift);
546 			else
547 				reg |= (1 << shift);
548 		}
549 
550 		/* Set the GPIO register, for last step just store
551 		 * register address and values to system registers
552 		 */
553 		if (idx < pm_cfg->cfg.gpio.step_count - 1) {
554 			mmio_write_32(MVEBU_CP_GPIO_DATA_OUT(
555 				      info->cp_index, info->gpio_index), reg);
556 			mdelay(pm_cfg->cfg.gpio.delay_ms);
557 		} else {
558 			/* Save GPIO register and address values for
559 			 * finishing the power down operation later
560 			 */
561 			*gpio_addr = addr;
562 			*gpio_data = reg;
563 		}
564 	}
565 }
566 
567 /*
568  * Prepare for the power off of the system
569  */
plat_marvell_power_off_prepare(struct power_off_method * pm_cfg,register_t * addr,register_t * data)570 static void plat_marvell_power_off_prepare(struct power_off_method *pm_cfg,
571 					   register_t *addr, register_t *data)
572 {
573 	switch (pm_cfg->type) {
574 	case PMIC_GPIO:
575 		plat_marvell_power_off_gpio(pm_cfg, addr, data);
576 		break;
577 	default:
578 		break;
579 	}
580 }
581 
582 /*****************************************************************************
583  * A8K handler called when a power domain is about to be suspended. The
584  * target_state encodes the power state that each level should transition to.
585  *****************************************************************************
586  */
a8k_pwr_domain_suspend(const psci_power_state_t * target_state)587 static void a8k_pwr_domain_suspend(const psci_power_state_t *target_state)
588 {
589 	if (is_pm_fw_running()) {
590 		unsigned int idx;
591 
592 		/* Prevent interrupts from spuriously waking up this cpu */
593 		gicv2_cpuif_disable();
594 
595 		idx = plat_my_core_pos();
596 
597 		/* pm system synchronization - used to synchronize multiple
598 		 * core access to MSS
599 		 */
600 		bakery_lock_get(&pm_sys_lock);
601 
602 		/* send CPU Suspend IPC Message to MSS */
603 		mss_pm_ipc_msg_send(idx, PM_IPC_MSG_CPU_SUSPEND, target_state);
604 
605 		/* trigger IPC message to MSS */
606 		mss_pm_ipc_msg_trigger();
607 
608 		/* pm system synchronization */
609 		bakery_lock_release(&pm_sys_lock);
610 
611 		/* trace message */
612 		PM_TRACE(TRACE_PWR_DOMAIN_SUSPEND);
613 	} else {
614 		uintptr_t *mailbox = (void *)PLAT_MARVELL_MAILBOX_BASE;
615 
616 		INFO("Suspending to RAM\n");
617 
618 		marvell_console_runtime_end();
619 
620 		/* Prevent interrupts from spuriously waking up this cpu */
621 		gicv2_cpuif_disable();
622 
623 		mailbox[MBOX_IDX_SUSPEND_MAGIC] = MVEBU_MAILBOX_SUSPEND_STATE;
624 		mailbox[MBOX_IDX_ROM_EXIT_ADDR] = (uintptr_t)&plat_marvell_exit_bootrom;
625 
626 #if PLAT_MARVELL_SHARED_RAM_CACHED
627 		flush_dcache_range(PLAT_MARVELL_MAILBOX_BASE +
628 		MBOX_IDX_SUSPEND_MAGIC * sizeof(uintptr_t),
629 		2 * sizeof(uintptr_t));
630 #endif
631 		/* Flush and disable LLC before going off-power */
632 		llc_disable(0);
633 
634 		isb();
635 		/*
636 		 * Do not halt here!
637 		 * The function must return for allowing the caller function
638 		 * psci_power_up_finish() to do the proper context saving and
639 		 * to release the CPU lock.
640 		 */
641 	}
642 }
643 
644 /*****************************************************************************
645  * A8K handler called when a power domain has just been powered on after
646  * being turned off earlier. The target_state encodes the low power state that
647  * each level has woken up from.
648  *****************************************************************************
649  */
a8k_pwr_domain_on_finish(const psci_power_state_t * target_state)650 static void a8k_pwr_domain_on_finish(const psci_power_state_t *target_state)
651 {
652 	/* arch specific configuration */
653 	marvell_psci_arch_init(0);
654 
655 	/* Interrupt initialization */
656 	gicv2_pcpu_distif_init();
657 	gicv2_cpuif_enable();
658 
659 	if (is_pm_fw_running()) {
660 		/* trace message */
661 		PM_TRACE(TRACE_PWR_DOMAIN_ON_FINISH);
662 	}
663 }
664 
665 /*****************************************************************************
666  * A8K handler called when a power domain has just been powered on after
667  * having been suspended earlier. The target_state encodes the low power state
668  * that each level has woken up from.
669  * TODO: At the moment we reuse the on finisher and reinitialize the secure
670  * context. Need to implement a separate suspend finisher.
671  *****************************************************************************
672  */
a8k_pwr_domain_suspend_finish(const psci_power_state_t * target_state)673 static void a8k_pwr_domain_suspend_finish(
674 					const psci_power_state_t *target_state)
675 {
676 	if (is_pm_fw_running()) {
677 		/* arch specific configuration */
678 		marvell_psci_arch_init(0);
679 
680 		/* Interrupt initialization */
681 		gicv2_cpuif_enable();
682 
683 		/* trace message */
684 		PM_TRACE(TRACE_PWR_DOMAIN_SUSPEND_FINISH);
685 	} else {
686 		uintptr_t *mailbox = (void *)PLAT_MARVELL_MAILBOX_BASE;
687 
688 		/* Only primary CPU requres platform init */
689 		if (!plat_my_core_pos()) {
690 			/* Initialize the console to provide
691 			 * early debug support
692 			 */
693 			marvell_console_runtime_init();
694 
695 			bl31_plat_arch_setup();
696 			marvell_bl31_platform_setup();
697 			/*
698 			 * Remove suspend to RAM marker from the mailbox
699 			 * for treating a regular reset as a cold boot
700 			 */
701 			mailbox[MBOX_IDX_SUSPEND_MAGIC] = 0;
702 			mailbox[MBOX_IDX_ROM_EXIT_ADDR] = 0;
703 #if PLAT_MARVELL_SHARED_RAM_CACHED
704 			flush_dcache_range(PLAT_MARVELL_MAILBOX_BASE +
705 			MBOX_IDX_SUSPEND_MAGIC * sizeof(uintptr_t),
706 			2 * sizeof(uintptr_t));
707 #endif
708 		}
709 	}
710 }
711 
712 /*****************************************************************************
713  * This handler is called by the PSCI implementation during the `SYSTEM_SUSPEND`
714  * call to get the `power_state` parameter. This allows the platform to encode
715  * the appropriate State-ID field within the `power_state` parameter which can
716  * be utilized in `pwr_domain_suspend()` to suspend to system affinity level.
717  *****************************************************************************
718  */
a8k_get_sys_suspend_power_state(psci_power_state_t * req_state)719 static void a8k_get_sys_suspend_power_state(psci_power_state_t *req_state)
720 {
721 	/* lower affinities use PLAT_MAX_OFF_STATE */
722 	for (int i = MPIDR_AFFLVL0; i <= PLAT_MAX_PWR_LVL; i++)
723 		req_state->pwr_domain_state[i] = PLAT_MAX_OFF_STATE;
724 }
725 
726 static void
a8k_pwr_domain_pwr_down_wfi(const psci_power_state_t * target_state)727 __dead2 a8k_pwr_domain_pwr_down_wfi(const psci_power_state_t *target_state)
728 {
729 	struct power_off_method *pm_cfg;
730 	unsigned int srcmd;
731 	unsigned int sdram_reg;
732 	register_t gpio_data = 0, gpio_addr = 0;
733 
734 	if (is_pm_fw_running()) {
735 		psci_power_down_wfi();
736 		panic();
737 	}
738 
739 	pm_cfg = (struct power_off_method *)plat_marvell_get_pm_cfg();
740 
741 	/* Prepare for power off */
742 	plat_marvell_power_off_prepare(pm_cfg, &gpio_addr, &gpio_data);
743 
744 	/* First step to enable DDR self-refresh
745 	 * to keep the data during suspend
746 	 */
747 	mmio_write_32(MVEBU_MC_PWR_CTRL_REG, 0x8C1);
748 
749 	/* Save DDR self-refresh second step register
750 	 * and value to be issued later
751 	 */
752 	sdram_reg = MVEBU_USER_CMD_0_REG;
753 	srcmd = mmio_read_32(sdram_reg);
754 	srcmd &= ~(MVEBU_USER_CMD_CH0_MASK | MVEBU_USER_CMD_CS_MASK |
755 		 MVEBU_USER_CMD_SR_MASK);
756 	srcmd |= (MVEBU_USER_CMD_CH0_EN | MVEBU_USER_CMD_CS_ALL |
757 		 MVEBU_USER_CMD_SR_ENTER);
758 
759 	/*
760 	 * Wait for DRAM is done using registers access only.
761 	 * At this stage any access to DRAM (procedure call) will
762 	 * release it from the self-refresh mode
763 	 */
764 	__asm__ volatile (
765 		/* Align to a cache line */
766 		"	.balign 64\n\t"
767 
768 		/* Enter self refresh */
769 		"	str %[srcmd], [%[sdram_reg]]\n\t"
770 
771 		/*
772 		 * Wait 100 cycles for DDR to enter self refresh, by
773 		 * doing 50 times two instructions.
774 		 */
775 		"	mov x1, #50\n\t"
776 		"1:	subs x1, x1, #1\n\t"
777 		"	bne 1b\n\t"
778 
779 		/* Issue the command to trigger the SoC power off */
780 		"	str	%[gpio_data], [%[gpio_addr]]\n\t"
781 
782 		/* Trap the processor */
783 		"	b .\n\t"
784 		: : [srcmd] "r" (srcmd), [sdram_reg] "r" (sdram_reg),
785 		    [gpio_addr] "r" (gpio_addr),  [gpio_data] "r" (gpio_data)
786 		: "x1");
787 
788 	panic();
789 }
790 
791 /*****************************************************************************
792  * A8K handlers to shutdown/reboot the system
793  *****************************************************************************
794  */
795 
796 /* Set a weak stub for platforms that don't configure system power off */
797 #pragma weak system_power_off
system_power_off(void)798 int system_power_off(void)
799 {
800 	return 0;
801 }
802 
a8k_system_off(void)803 static void __dead2 a8k_system_off(void)
804 {
805 	/* Call the platform specific system power off function */
806 	system_power_off();
807 
808 	/* board doesn't have a system off implementation */
809 	ERROR("%s:  needs to be implemented\n", __func__);
810 	panic();
811 }
812 
plat_marvell_system_reset(void)813 void plat_marvell_system_reset(void)
814 {
815 	mmio_write_32(MVEBU_RFU_BASE + MVEBU_RFU_GLOBL_SW_RST, 0x0);
816 }
817 
a8k_system_reset(void)818 static void __dead2 a8k_system_reset(void)
819 {
820 	plat_marvell_system_reset();
821 
822 	/* we shouldn't get to this point */
823 	panic();
824 }
825 
826 /*****************************************************************************
827  * Export the platform handlers via plat_arm_psci_pm_ops. The ARM Standard
828  * platform layer will take care of registering the handlers with PSCI.
829  *****************************************************************************
830  */
831 const plat_psci_ops_t plat_arm_psci_pm_ops = {
832 	.cpu_standby = a8k_cpu_standby,
833 	.pwr_domain_on = a8k_pwr_domain_on,
834 	.pwr_domain_off = a8k_pwr_domain_off,
835 	.pwr_domain_suspend = a8k_pwr_domain_suspend,
836 	.pwr_domain_on_finish = a8k_pwr_domain_on_finish,
837 	.get_sys_suspend_power_state = a8k_get_sys_suspend_power_state,
838 	.pwr_domain_suspend_finish = a8k_pwr_domain_suspend_finish,
839 	.pwr_domain_pwr_down_wfi = a8k_pwr_domain_pwr_down_wfi,
840 	.system_off = a8k_system_off,
841 	.system_reset = a8k_system_reset,
842 	.validate_power_state = a8k_validate_power_state,
843 	.validate_ns_entrypoint = a8k_validate_ns_entrypoint
844 };
845