1 /*
2 * Copyright (c) 2014-2015, Linaro Ltd and Contributors. All rights reserved.
3 * Copyright (c) 2014-2015, Hisilicon Ltd and Contributors. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * Redistributions of source code must retain the above copyright notice, this
9 * list of conditions and the following disclaimer.
10 *
11 * Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * Neither the name of ARM nor the names of its contributors may be used
16 * to endorse or promote products derived from this software without specific
17 * prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <arch_helpers.h>
33 #include <assert.h>
34 #include <bl_common.h>
35 #include <console.h>
36 #include <debug.h>
37 #include <partitions.h>
38 #include <platform.h>
39 #include <platform_def.h>
40 #include <string.h>
41 #include <mmio.h>
42 #include <hi6220.h>
43 #include <hi6553.h>
44
45 #define ACPU_FREQ_MAX_NUM 5
46 #define ACPU_OPP_NUM 7
47
48 #define ACPU_VALID_VOLTAGE_MAGIC (0x5A5AC5C5)
49
50 #define ACPU_WAIT_TIMEOUT (200)
51 #define ACPU_WAIT_FOR_WFI_TIMOUT (2000)
52 #define ACPU_DFS_STATE_CNT (0x10000)
53
54 struct acpu_dvfs_sram_stru {
55 unsigned int magic;
56 unsigned int support_freq_num;
57 unsigned int support_freq_max;
58 unsigned int start_prof;
59 unsigned int vol[ACPU_OPP_NUM];
60 };
61
62 struct acpu_volt_cal_para {
63 unsigned int freq;
64 unsigned int ul_vol;
65 unsigned int dl_vol;
66 unsigned int core_ref_hpm;
67 };
68
69 struct ddr_volt_cal_para {
70 unsigned int freq;
71 unsigned int ul_vol;
72 unsigned int dl_vol;
73 unsigned int ddr_ref_hpm;
74 };
75
76 struct acpu_dvfs_opp_para {
77 unsigned int freq;
78 unsigned int acpu_clk_profile0;
79 unsigned int acpu_clk_profile1;
80 unsigned int acpu_vol_profile;
81 unsigned int acpu_pll_freq;
82 unsigned int acpu_pll_frac;
83 };
84
85 unsigned int efuse_acpu_freq[]= {
86 1200000, 1250000, 1300000, 1350000,
87 1400000, 1450000, 1500000, 1550000,
88 1600000, 1650000, 1700000, 1750000,
89 1800000, 1850000, 1900000, 1950000,
90 };
91
92 struct acpu_dvfs_opp_para hi6220_acpu_profile[] = {
93 { 208000, 0x61E5, 0x022, 0x3A, 0x5220102B, 0x05555555 },
94 { 432000, 0x10A6, 0x121, 0x3A, 0x5120102D, 0x10000005 },
95 { 729000, 0x2283, 0x100, 0x4A, 0x51101026, 0x10000005 },
96 { 960000, 0x1211, 0x100, 0x5B, 0x51101032, 0x10000005 },
97 { 1200000, 0x1211, 0x100, 0x6B, 0x5110207D, 0x10000005 },
98 { 1400000, 0x1211, 0x100, 0x6B, 0x51101049, 0x10000005 },
99 { 1500000, 0x1211, 0x100, 0x6B, 0x51101049, 0x10000005 },
100 };
101
102 struct acpu_dvfs_opp_para *acpu_dvfs_profile = hi6220_acpu_profile;
103 struct acpu_dvfs_sram_stru *acpu_dvfs_sram_buf =
104 (struct acpu_dvfs_sram_stru *)MEMORY_AXI_ACPU_FREQ_VOL_ADDR;
105
write_reg_mask(uintptr_t addr,uint32_t val,uint32_t mask)106 static inline void write_reg_mask(uintptr_t addr,
107 uint32_t val, uint32_t mask)
108 {
109 uint32_t reg;
110
111 reg = mmio_read_32(addr);
112 reg = (reg & ~(mask)) | val;
113 mmio_write_32(addr, reg);
114 }
115
read_reg_mask(uintptr_t addr,uint32_t mask,uint32_t offset)116 static inline uint32_t read_reg_mask(uintptr_t addr,
117 uint32_t mask, uint32_t offset)
118 {
119 uint32_t reg;
120
121 reg = mmio_read_32(addr);
122 reg &= (mask << offset);
123 return (reg >> offset);
124 }
125
acpu_dvfs_syspll_cfg(unsigned int prof_id)126 static int acpu_dvfs_syspll_cfg(unsigned int prof_id)
127 {
128 uint32_t reg0 = 0;
129 uint32_t count = 0;
130 uint32_t clk_div_status = 0;
131
132 /*
133 * step 1:
134 * - ACPUSYSPLLCFG.acpu_subsys_clk_div_sw = 0x3;
135 * - ACPUSYSPLLCFG.acpu_syspll_clken_cfg = 0x1;
136 */
137 write_reg_mask(PMCTRL_ACPUSYSPLLCFG, 0x3 << 12, 0x3 << 12);
138 write_reg_mask(PMCTRL_ACPUSYSPLLCFG, 0x1 << 4, 0x1 << 4);
139
140 /*
141 * step 2:
142 * - ACPUSYSPLLCFG.acpu_syspll_div_cfg:
143 * 208MHz, set to 0x5;
144 * 500MHz, set to 0x2;
145 * other opps set to 0x1
146 */
147 if (prof_id == 0)
148 write_reg_mask(PMCTRL_ACPUSYSPLLCFG, 0x5 << 0, 0x7 << 0);
149 else if (prof_id == 1)
150 write_reg_mask(PMCTRL_ACPUSYSPLLCFG, 0x2 << 0, 0x7 << 0);
151 else
152 write_reg_mask(PMCTRL_ACPUSYSPLLCFG, 0x1 << 0, 0x7 << 0);
153
154 /*
155 * step 3:
156 * - Polling ACPU_SC_CPU_STAT.clk_div_status_vd == 0x3;
157 * - ACPU_SC_VD_CTRL.tune_en_dif = 0
158 * - ACPU_SC_VD_CTRL.tune_en_int = 0
159 * - PMCTRL_ACPUCLKDIV.acpu_ddr_clk_div_cfg = 0x1
160 * - PMCTRL_ACPUPLLSEL.acpu_pllsw_cfg = 0x1
161 */
162 clk_div_status = 0x3;
163 do {
164 reg0 = read_reg_mask(ACPU_SC_CPU_STAT, 0x3, 20);
165 if ((count++) > ACPU_DFS_STATE_CNT) {
166 ERROR("%s: clk div status timeout!\n", __func__);
167 return -1;
168 }
169 } while(clk_div_status != reg0);
170
171 write_reg_mask(ACPU_SC_VD_CTRL, 0x0, (0x1 << 0) | (0x1 << 11));
172 write_reg_mask(PMCTRL_ACPUCLKDIV, 0x1 << 8, 0x3 << 8);
173 write_reg_mask(PMCTRL_ACPUPLLSEL, 0x1 << 0, 0x1 << 0);
174
175 return 0;
176 }
177
acpu_dvfs_clk_div_cfg(unsigned int prof_id,unsigned int * cpuext_cfg,unsigned int * acpu_ddr_cfg)178 static void acpu_dvfs_clk_div_cfg(unsigned int prof_id,
179 unsigned int *cpuext_cfg,
180 unsigned int *acpu_ddr_cfg)
181 {
182 if (0 == prof_id) {
183 write_reg_mask(PMCTRL_ACPUCLKDIV,
184 (0x1 << SOC_PMCTRL_ACPUCLKDIV_cpuext_clk_div_cfg_START) |
185 (0x1 << SOC_PMCTRL_ACPUCLKDIV_acpu_ddr_clk_div_cfg_START),
186 (0x3 << SOC_PMCTRL_ACPUCLKDIV_cpuext_clk_div_cfg_START) |
187 (0x3 << SOC_PMCTRL_ACPUCLKDIV_acpu_ddr_clk_div_cfg_START));
188 *cpuext_cfg = 0x1;
189 *acpu_ddr_cfg = 0x1;
190 } else if (1 == prof_id) {
191 write_reg_mask(PMCTRL_ACPUCLKDIV,
192 (0x1 << SOC_PMCTRL_ACPUCLKDIV_cpuext_clk_div_cfg_START) |
193 (0x1 << SOC_PMCTRL_ACPUCLKDIV_acpu_ddr_clk_div_cfg_START),
194 (0x3 << SOC_PMCTRL_ACPUCLKDIV_cpuext_clk_div_cfg_START) |
195 (0x3 << SOC_PMCTRL_ACPUCLKDIV_acpu_ddr_clk_div_cfg_START));
196 *cpuext_cfg = 0x1;
197 *acpu_ddr_cfg = 0x1;
198 } else {
199 /* ddr has not been inited */
200 write_reg_mask(PMCTRL_ACPUCLKDIV,
201 (0x1 << SOC_PMCTRL_ACPUCLKDIV_cpuext_clk_div_cfg_START) |
202 (0x0 << SOC_PMCTRL_ACPUCLKDIV_acpu_ddr_clk_div_cfg_START),
203 (0x3 << SOC_PMCTRL_ACPUCLKDIV_cpuext_clk_div_cfg_START) |
204 (0x3 << SOC_PMCTRL_ACPUCLKDIV_acpu_ddr_clk_div_cfg_START));
205 *cpuext_cfg = 0x1;
206 *acpu_ddr_cfg = 0x0;
207 }
208
209 return;
210 }
211
acpu_dvfs_freq_ascend(unsigned int cur_prof,unsigned int tar_prof)212 static int acpu_dvfs_freq_ascend(unsigned int cur_prof, unsigned int tar_prof)
213 {
214 unsigned int reg0 = 0;
215 unsigned int reg1 = 0;
216 unsigned int reg2 = 0;
217 unsigned int count = 0;
218 unsigned int cpuext_cfg_val = 0;
219 unsigned int acpu_ddr_cfg_val = 0;
220 int ret = 0;
221
222 /*
223 * step 1:
224 * - PMCTRL_ACPUSYSPLLCFG.acpu_subsys_clk_div_sw = 0x3;
225 * - ACPUSYSPLLCFG.acpu_syspll_clken_cfg = 0x1;
226 *
227 * step 2:
228 * - PMCTRL_ACPUSYSPLLCFG.acpu_syspll_div_cfg = 0x5 (208MHz)
229 * - PMCTRL_ACPUSYSPLLCFG.acpu_syspll_div_cfg = 0x2 (500MHz)
230 * - PMCTRL_ACPUSYSPLLCFG.acpu_syspll_div_cfg = 0x1 (Other OPPs)
231 *
232 * step 3:
233 * - ACPU_SC_CPU_STAT.clk_div_status_vd = 0x3;
234 * - ACPU_SC_VD_CTRL.tune_en_dif = 0x0;
235 * - ACPU_SC_VD_CTRL.tune_en_int = 0x0;
236 * - PMCTRL_ACPUCLKDIV.acpu_ddr_clk_div_cfg = 0x1;
237 * - PMCTRL_ACPUPLLSEL.acpu_pllsw_cfg = 0x1
238 */
239 ret = acpu_dvfs_syspll_cfg(cur_prof);
240 if (ret)
241 return -1;
242
243 /*
244 * step 4:
245 * - Polling PMCTRL_ACPUPLLSEL.syspll_sw_stat == 0x1
246 */
247 count = 0;
248 do {
249 reg0 = read_reg_mask(PMCTRL_ACPUPLLSEL, 0x1,
250 SOC_PMCTRL_ACPUPLLSEL_syspll_sw_stat_START);
251 if ((count++) > ACPU_DFS_STATE_CNT) {
252 ERROR("%s: syspll sw status timeout\n", __func__);
253 return -1;
254 }
255 } while(0x1 != reg0);
256
257 /* Enable VD functionality if > 800MHz */
258 if (acpu_dvfs_profile[tar_prof].freq > 800000) {
259
260 write_reg_mask(ACPU_SC_VD_HPM_CTRL,
261 HPM_OSC_DIV_VAL, HPM_OSC_DIV_MASK);
262
263 /*
264 * step 5:
265 * - ACPU_SC_VD_HPM_CTRL.hpm_dly_exp = 0xC7A;
266 * - ACPU_SC_VD_MASK_PATTERN_CTRL[12:0] = 0xCCB;
267 */
268 write_reg_mask(ACPU_SC_VD_HPM_CTRL,
269 HPM_DLY_EXP_VAL, HPM_DLY_EXP_MASK);
270 write_reg_mask(ACPU_SC_VD_MASK_PATTERN_CTRL,
271 ACPU_SC_VD_MASK_PATTERN_VAL,
272 ACPU_SC_VD_MASK_PATTERN_MASK);
273
274 /*
275 * step 6:
276 * - ACPU_SC_VD_DLY_TABLE0_CTRL = 0x1FFF;
277 * - ACPU_SC_VD_DLY_TABLE1_CTRL = 0x1FFFFFF;
278 * - ACPU_SC_VD_DLY_TABLE2_CTRL = 0x7FFFFFFF;
279 * - ACPU_SC_VD_DLY_FIXED_CTRL = 0x1;
280 */
281 mmio_write_32(ACPU_SC_VD_DLY_TABLE0_CTRL, 0x1FFF);
282 mmio_write_32(ACPU_SC_VD_DLY_TABLE1_CTRL, 0x1FFFFFF);
283 mmio_write_32(ACPU_SC_VD_DLY_TABLE2_CTRL, 0x7FFFFFFF);
284 mmio_write_32(ACPU_SC_VD_DLY_FIXED_CTRL, 0x1);
285
286 /*
287 * step 7:
288 * - ACPU_SC_VD_CTRL.shift_table0 = 0x1;
289 * - ACPU_SC_VD_CTRL.shift_table1 = 0x3;
290 * - ACPU_SC_VD_CTRL.shift_table2 = 0x5;
291 * - ACPU_SC_VD_CTRL.shift_table3 = 0x6;
292 *
293 * step 8:
294 * - ACPU_SC_VD_CTRL.tune = 0x7;
295 */
296 write_reg_mask(ACPU_SC_VD_CTRL,
297 ACPU_SC_VD_SHIFT_TABLE_TUNE_VAL,
298 ACPU_SC_VD_SHIFT_TABLE_TUNE_MASK);
299 }
300
301 /* step 9: ACPUPLLCTRL.acpupll_en_cfg = 0x0 */
302 write_reg_mask(PMCTRL_ACPUPLLCTRL, 0x0,
303 0x1 << SOC_PMCTRL_ACPUPLLCTRL_acpupll_en_cfg_START);
304
305 /* step 10: set PMCTRL_ACPUPLLFREQ and PMCTRL_ACPUPLLFRAC */
306 mmio_write_32(PMCTRL_ACPUPLLFREQ,
307 acpu_dvfs_profile[tar_prof].acpu_pll_freq);
308 mmio_write_32(PMCTRL_ACPUPLLFRAC,
309 acpu_dvfs_profile[tar_prof].acpu_pll_frac);
310
311 /*
312 * step 11:
313 * - wait for 1us;
314 * - PMCTRL_ACPUPLLCTRL.acpupll_en_cfg = 0x1
315 */
316 count = 0 ;
317 while (count < ACPU_WAIT_TIMEOUT) {
318 count++;
319 }
320 write_reg_mask(PMCTRL_ACPUPLLCTRL,
321 0x1 << SOC_PMCTRL_ACPUPLLCTRL_acpupll_en_cfg_START,
322 0x1 << SOC_PMCTRL_ACPUPLLCTRL_acpupll_en_cfg_START);
323
324 /* step 12: PMCTRL_ACPUVOLPMUADDR = 0x100da */
325 mmio_write_32(PMCTRL_ACPUVOLPMUADDR, 0x100da);
326
327 /*
328 * step 13:
329 * - PMCTRL_ACPUDESTVOL.acpu_dest_vol = 0x13 (208MHz);
330 * - PMCTRL_ACPUDESTVOL.acpu_dest_vol = 0x13 (500MHz);
331 * - PMCTRL_ACPUDESTVOL.acpu_dest_vol = 0x20 (798MHz);
332 * - PMCTRL_ACPUDESTVOL.acpu_dest_vol = 0x3A (1300MHz);
333 * - PMCTRL_ACPUDESTVOL.acpu_dest_vol = 0x3A (1500MHz);
334 */
335 write_reg_mask(PMCTRL_ACPUDESTVOL,
336 acpu_dvfs_profile[tar_prof].acpu_vol_profile,
337 ((0x1 << (SOC_PMCTRL_ACPUDESTVOL_acpu_dest_vol_END + 1)) - 1));
338
339 /*
340 * step 14:
341 * - Polling PMCTRL_ACPUDESTVOL.acpu_vol_using == ACPUDESTVOL.acpu_dest_vol
342 * - Polling ACPUVOLTIMEOUT.acpu_vol_timeout == 0x1
343 * - Config PMCTRL_ACPUCLKDIV.acpu_ddr_clk_div_cfg
344 * - Config ACPUCLKDIV.cpuext_clk_div_cfg;
345 */
346 count = 0;
347 do {
348 reg0 = read_reg_mask(PMCTRL_ACPUDESTVOL, 0x7F,
349 SOC_PMCTRL_ACPUDESTVOL_acpu_dest_vol_START);
350 reg1 = read_reg_mask(PMCTRL_ACPUDESTVOL, 0x7F,
351 SOC_PMCTRL_ACPUDESTVOL_acpu_vol_using_START);
352 reg2 = read_reg_mask(PMCTRL_ACPUVOLTTIMEOUT, 0x1,
353 SOC_PMCTRL_ACPUVOLTIMEOUT_acpu_vol_timeout_START);
354 if ((count++) > ACPU_DFS_STATE_CNT) {
355 ERROR("%s: acpu destvol cfg timeout.\n", __func__);
356 return -1;
357 }
358 } while((reg0 != reg1) || (0x1 != reg2));
359
360 acpu_dvfs_clk_div_cfg(tar_prof, &cpuext_cfg_val, &acpu_ddr_cfg_val);
361
362 /*
363 * step 15:
364 * - Polling PMCTRL_ACPUCLKDIV.cpuext_clk_div_stat;
365 * - Polling ACPUCLKDIV.acpu_ddr_clk_div_stat;
366 * - ACPUPLLCTRL.acpupll_timeout = 0x1;
367 * - PMCTRL_ACPUPLLSEL.acpu_pllsw_cfg = 0x0;
368 */
369 count = 0;
370 do {
371 reg0 = read_reg_mask(PMCTRL_ACPUCLKDIV, 0x3,
372 SOC_PMCTRL_ACPUCLKDIV_acpu_ddr_clk_div_stat_START);
373 reg1 = read_reg_mask(PMCTRL_ACPUCLKDIV, 0x3,
374 SOC_PMCTRL_ACPUCLKDIV_cpuext_clk_div_stat_START);
375 reg2 = read_reg_mask(PMCTRL_ACPUPLLCTRL, 0x1,
376 SOC_PMCTRL_ACPUPLLCTRL_acpupll_timeout_START);
377 if ((count++) > ACPU_DFS_STATE_CNT) {
378 ERROR("%s: acpu clk div cfg timeout.\n", __func__);
379 return -1;
380 }
381 } while((cpuext_cfg_val != reg1) ||
382 (acpu_ddr_cfg_val != reg0) ||
383 (0x1 != reg2));
384
385 write_reg_mask(PMCTRL_ACPUPLLSEL, 0x0,
386 0x1 << SOC_PMCTRL_ACPUPLLSEL_acpu_pllsw_cfg_START);
387
388 /*
389 * step 16:
390 * - Polling PMCTRL_ACPUPLLSEL.acpupll_sw_stat == 0x1;
391 * - ACPU_SC_VD_CTRL.force_clk_en = 0x0;
392 * - ACPU_SC_VD_CTRL.clk_dis_cnt_en = 0x0;
393 * - ACPU_SC_VD_CTRL.calibrate_en_ini = 0x0;
394 * - ACPU_SC_VD_CTRL.calibrate_en_dif = 0x0;
395 * - ACPU_SC_VD_CTRL.div_en_dif = 0x1;
396 * - ACPU_SC_VD_CTRL.tune_en_int = 0x1;
397 * - ACPU_SC_VD_CTRL.tune_en_dif = 0x1;
398 * - PMCTRL_ACPUSYSPLLCFG.acpu_subsys_clk_div_sw = 0x0;
399 * - ACPUSYSPLLCFG.acpu_syspll_clken_cfg = 0x0;
400 */
401 count = 0;
402 do {
403 reg0 = read_reg_mask(PMCTRL_ACPUPLLSEL, 0x1,
404 SOC_PMCTRL_ACPUPLLSEL_acpu_pllsw_stat_START);
405 if ((count++) > ACPU_DFS_STATE_CNT) {
406 ERROR("%s: acpu pll sw status timeout.\n", __func__);
407 return -1;
408 }
409 } while(0x1 != reg0);
410
411 if (acpu_dvfs_profile[tar_prof].freq > 800000)
412 write_reg_mask(ACPU_SC_VD_CTRL,
413 ACPU_SC_VD_EN_ASIC_VAL, ACPU_SC_VD_EN_MASK);
414
415 write_reg_mask(PMCTRL_ACPUSYSPLLCFG, 0x0,
416 (0x3 << SOC_PMCTRL_ACPUSYSPLLCFG_acpu_subsys_clk_div_sw_START) |
417 (0x1 << SOC_PMCTRL_ACPUSYSPLLCFG_acpu_syspll_clken_cfg_START));
418
419 return 0;
420 }
421
acpu_dvfs_freq_descend(unsigned int cur_prof,unsigned int tar_prof)422 static int acpu_dvfs_freq_descend(unsigned int cur_prof, unsigned int tar_prof)
423 {
424 unsigned int reg0 = 0;
425 unsigned int reg1 = 0;
426 unsigned int reg2 = 0;
427 unsigned int count = 0;
428 unsigned int cpuext_cfg_val = 0;
429 unsigned int acpu_ddr_cfg_val = 0;
430 int ret = 0;
431
432 ret = acpu_dvfs_syspll_cfg(tar_prof);
433 if (ret)
434 return -1;
435
436 /*
437 * step 4:
438 * - Polling PMCTRL_ACPUPLLSEL.syspll_sw_stat == 0x1
439 */
440 count = 0;
441 do {
442 reg0 = read_reg_mask(PMCTRL_ACPUPLLSEL, 0x1, 2);
443 if ((count++) > ACPU_DFS_STATE_CNT) {
444 ERROR("%s: syspll sw status timeout.\n", __func__);
445 return -1;
446 }
447 } while(0x1 != reg0);
448
449 /*
450 * Step 5:
451 * - PMCTRL_ACPUPLLCTRL.acpupll_en_cfg = 0x0
452 */
453 write_reg_mask(PMCTRL_ACPUPLLCTRL, 0x0, 0x1 << 0);
454
455 /*
456 * step 6
457 * - Config PMCTRL_ACPUPLLFREQ and ACPUPLLFRAC
458 */
459 mmio_write_32(PMCTRL_ACPUPLLFREQ, acpu_dvfs_profile[tar_prof].acpu_pll_freq);
460 mmio_write_32(PMCTRL_ACPUPLLFRAC, acpu_dvfs_profile[tar_prof].acpu_pll_frac);
461
462 /*
463 * step 7:
464 * - Wait 1us;
465 * - Config PMCTRL_ACPUPLLCTRL.acpupll_en_cfg = 0x1
466 */
467 count = 0 ;
468 while (count < ACPU_WAIT_TIMEOUT) {
469 count++;
470 }
471
472 write_reg_mask(PMCTRL_ACPUPLLCTRL,
473 0x1 << SOC_PMCTRL_ACPUPLLCTRL_acpupll_en_cfg_START,
474 0x1 << SOC_PMCTRL_ACPUPLLCTRL_acpupll_en_cfg_START);
475
476 /* Enable VD functionality if > 800MHz */
477 if (acpu_dvfs_profile[tar_prof].freq > 800000) {
478
479 write_reg_mask(ACPU_SC_VD_HPM_CTRL,
480 HPM_OSC_DIV_VAL, HPM_OSC_DIV_MASK);
481
482 /*
483 * step 9:
484 * - ACPU_SC_VD_HPM_CTRL.hpm_dly_exp = 0xC7A;
485 * - ACPU_SC_VD_MASK_PATTERN_CTRL[12:0] = 0xCCB;
486 */
487 write_reg_mask(ACPU_SC_VD_HPM_CTRL,
488 HPM_DLY_EXP_VAL, HPM_DLY_EXP_MASK);
489 write_reg_mask(ACPU_SC_VD_MASK_PATTERN_CTRL,
490 ACPU_SC_VD_MASK_PATTERN_VAL,
491 ACPU_SC_VD_MASK_PATTERN_MASK);
492
493 /*
494 * step 10:
495 * - ACPU_SC_VD_DLY_TABLE0_CTRL = 0x1FFF;
496 * - ACPU_SC_VD_DLY_TABLE1_CTRL = 0x1FFFFFF;
497 * - ACPU_SC_VD_DLY_TABLE2_CTRL = 0x7FFFFFFF;
498 * - ACPU_SC_VD_DLY_FIXED_CTRL = 0x1;
499 */
500 mmio_write_32(ACPU_SC_VD_DLY_TABLE0_CTRL, 0x1FFF);
501 mmio_write_32(ACPU_SC_VD_DLY_TABLE1_CTRL, 0x1FFFFFF);
502 mmio_write_32(ACPU_SC_VD_DLY_TABLE2_CTRL, 0x7FFFFFFF);
503 mmio_write_32(ACPU_SC_VD_DLY_FIXED_CTRL, 0x1);
504
505 /*
506 * step 11:
507 * - ACPU_SC_VD_CTRL.shift_table0 = 0x1;
508 * - ACPU_SC_VD_CTRL.shift_table1 = 0x3;
509 * - ACPU_SC_VD_CTRL.shift_table2 = 0x5;
510 * - ACPU_SC_VD_CTRL.shift_table3 = 0x6;
511 *
512 * step 12:
513 * - ACPU_SC_VD_CTRL.tune = 0x7;
514 */
515 write_reg_mask(ACPU_SC_VD_CTRL,
516 ACPU_SC_VD_SHIFT_TABLE_TUNE_VAL,
517 ACPU_SC_VD_SHIFT_TABLE_TUNE_MASK);
518 }
519
520 /*
521 * step 13:
522 * - Pollig PMCTRL_ACPUPLLCTRL.acpupll_timeout == 0x1;
523 * - PMCTRL_ACPUPLLSEL.acpu_pllsw_cfg = 0x0;
524 */
525 count = 0;
526 do {
527 reg0 = read_reg_mask(PMCTRL_ACPUPLLCTRL, 0x1,
528 SOC_PMCTRL_ACPUPLLCTRL_acpupll_timeout_START);
529 if ((count++) > ACPU_DFS_STATE_CNT) {
530 ERROR("%s: acpupll timeout.\n", __func__);
531 return -1;
532 }
533 } while(0x1 != reg0);
534
535 write_reg_mask(PMCTRL_ACPUPLLSEL, 0x0,
536 0x1 << SOC_PMCTRL_ACPUPLLSEL_acpu_pllsw_cfg_START);
537
538 /*
539 * step 14:
540 * - Polling PMCTRL_ACPUPLLSEL.acpupll_sw_stat == 0x1;
541 * - ACPU_SC_VD_CTRL.force_clk_en = 0x0;
542 * - ACPU_SC_VD_CTRL.clk_dis_cnt_en = 0x0;
543 * - ACPU_SC_VD_CTRL.calibrate_en_ini = 0x0;
544 * - ACPU_SC_VD_CTRL.calibrate_en_dif = 0x0;
545 * - ACPU_SC_VD_CTRL.div_en_dif = 0x1;
546 * - ACPU_SC_VD_CTRL.tune_en_int = 0x1;
547 * - ACPU_SC_VD_CTRL.tune_en_dif = 0x1;
548 */
549 count = 0;
550 do {
551 reg0 = read_reg_mask(PMCTRL_ACPUPLLSEL, 0x1,
552 SOC_PMCTRL_ACPUPLLSEL_acpu_pllsw_stat_START);
553 if ((count++) > ACPU_DFS_STATE_CNT) {
554 ERROR("%s: acpupll sw status timeout.\n", __func__);
555 return -1;
556 }
557 } while(0x1 != reg0);
558
559 if (acpu_dvfs_profile[tar_prof].freq > 800000)
560 write_reg_mask(ACPU_SC_VD_CTRL,
561 ACPU_SC_VD_EN_ASIC_VAL, ACPU_SC_VD_EN_MASK);
562
563 /*
564 * step 15:
565 * - PMCTRL_ACPUSYSPLLCFG.acpu_subsys_clk_div_sw = 0x0;
566 * - ACPUSYSPLLCFG.acpu_syspll_clken_cfg = 0x0;
567 */
568 write_reg_mask(PMCTRL_ACPUSYSPLLCFG, 0x0,
569 (0x3 << SOC_PMCTRL_ACPUSYSPLLCFG_acpu_subsys_clk_div_sw_START) |
570 (0x1 << SOC_PMCTRL_ACPUSYSPLLCFG_acpu_syspll_clken_cfg_START));
571
572 /*
573 * step 16:
574 * - Polling ACPU_SC_CPU_STAT.clk_div_status_vd == 0x0;
575 */
576 count = 0;
577 do {
578 reg0 = read_reg_mask(ACPU_SC_CPU_STAT, 0x3,
579 ACPU_SC_CPU_STAT_CLK_DIV_STATUS_VD_SHIFT);
580 if ((count++) > ACPU_DFS_STATE_CNT) {
581 ERROR("%s: clk div status timeout.\n", __func__);
582 return -1;
583 }
584 } while(0x0 != reg0);
585
586 acpu_dvfs_clk_div_cfg(tar_prof, &cpuext_cfg_val, &acpu_ddr_cfg_val);
587
588 /*
589 * step 17:
590 * - Polling PMCTRL_ACPUCLKDIV.cpuext_clk_div_stat;
591 * - Polling ACPUCLKDIV.acpu_ddr_clk_div_stat;
592 * - PMCTRL_ACPUVOLPMUADDR = 0x1006C;
593 */
594 count = 0;
595 do {
596 reg0 = read_reg_mask(PMCTRL_ACPUCLKDIV, 0x3,
597 SOC_PMCTRL_ACPUCLKDIV_cpuext_clk_div_stat_START);
598 reg1 = read_reg_mask(PMCTRL_ACPUCLKDIV, 0x3,
599 SOC_PMCTRL_ACPUCLKDIV_acpu_ddr_clk_div_stat_START);
600 if ((count++) > ACPU_DFS_STATE_CNT) {
601 ERROR("%s: acpu clk div cfg timeout.\n", __func__);
602 return -1;
603 }
604 } while((cpuext_cfg_val != reg0) || (acpu_ddr_cfg_val != reg1));
605
606 mmio_write_32(PMCTRL_ACPUVOLPMUADDR, 0x100da);
607
608 /*
609 * step 16:
610 * - Polling PMCTRL_ACPUPLLSEL.acpupll_sw_stat == 0x1;
611 * - ACPU_SC_VD_CTRL.force_clk_en = 0x0;
612 * - ACPU_SC_VD_CTRL.clk_dis_cnt_en = 0x0;
613 * - ACPU_SC_VD_CTRL.calibrate_en_ini = 0x0;
614 * - ACPU_SC_VD_CTRL.calibrate_en_dif = 0x0;
615 * - ACPU_SC_VD_CTRL.div_en_dif = 0x1;
616 * - ACPU_SC_VD_CTRL.tune_en_int = 0x1;
617 * - ACPU_SC_VD_CTRL.tune_en_dif = 0x1;
618 * - PMCTRL_ACPUSYSPLLCFG.acpu_subsys_clk_div_sw = 0x0;
619 * - ACPUSYSPLLCFG.acpu_syspll_clken_cfg = 0x0;
620 */
621 write_reg_mask(PMCTRL_ACPUDESTVOL,
622 acpu_dvfs_profile[tar_prof].acpu_vol_profile,
623 ((0x1 << (SOC_PMCTRL_ACPUDESTVOL_acpu_dest_vol_END + 1)) - 1));
624
625 /*
626 * step 19:
627 * - Polling PMCTRL_ACPUDESTVOL.acpu_vol_using == ACPUDESTVOL.acpu_dest_vol
628 * - ACPUVOLTIMEOUT.acpu_vol_timeout = 0x1;
629 */
630 count = 0;
631 do {
632 reg0 = read_reg_mask(PMCTRL_ACPUDESTVOL, 0x7F,
633 SOC_PMCTRL_ACPUDESTVOL_acpu_dest_vol_START);
634 reg1 = read_reg_mask(PMCTRL_ACPUDESTVOL, 0x7F,
635 SOC_PMCTRL_ACPUDESTVOL_acpu_vol_using_START);
636 reg2 = read_reg_mask(PMCTRL_ACPUVOLTTIMEOUT, 0x1,
637 SOC_PMCTRL_ACPUVOLTIMEOUT_acpu_vol_timeout_START);
638 if ((count++) > ACPU_DFS_STATE_CNT) {
639 ERROR("%s: acpu destvol cfg timeout.\n", __func__);
640 return -1;
641 }
642 } while((reg0 != reg1) || (0x1 != reg2));
643
644 return 0;
645 }
646
acpu_dvfs_target(unsigned int curr_prof,unsigned int target_prof)647 int acpu_dvfs_target(unsigned int curr_prof, unsigned int target_prof)
648 {
649 int ret = 0;
650
651 if (curr_prof == target_prof) {
652 INFO("%s: target_prof is equal curr_prof: is %d!\n",
653 __func__, curr_prof);
654 return 0;
655 }
656
657 if ((curr_prof >= ACPU_FREQ_MAX_NUM) ||
658 (target_prof >= ACPU_FREQ_MAX_NUM)) {
659 INFO("%s: invalid parameter %d %d\n",
660 __func__, curr_prof, target_prof);
661 return -1;
662 }
663
664 if (target_prof > acpu_dvfs_sram_buf->support_freq_num)
665 target_prof = acpu_dvfs_sram_buf->support_freq_num;
666
667 if (target_prof < curr_prof)
668 ret = acpu_dvfs_freq_descend(curr_prof, target_prof);
669 else if (target_prof > curr_prof)
670 ret = acpu_dvfs_freq_ascend(curr_prof, target_prof);
671
672 if (ret) {
673 ERROR("%s: acpu_dvfs_target failed!\n");
674 return -1;
675 }
676
677 /* Complete acpu dvfs setting and set magic number */
678 acpu_dvfs_sram_buf->start_prof = target_prof;
679 acpu_dvfs_sram_buf->magic = ACPU_VALID_VOLTAGE_MAGIC;
680
681 mmio_write_32(DDR_DFS_FREQ_ADDR, 800000);
682 return 0;
683 }
684
acpu_dvfs_set_freq(void)685 static int acpu_dvfs_set_freq(void)
686 {
687 unsigned int i;
688 unsigned int curr_prof;
689 unsigned int target_prof;
690 unsigned int max_freq = 0;
691
692 max_freq = acpu_dvfs_sram_buf->support_freq_max;
693
694 for (i = 0; i < acpu_dvfs_sram_buf->support_freq_num; i++) {
695
696 if (max_freq == hi6220_acpu_profile[i].freq) {
697 target_prof = i;
698 break;
699 }
700 }
701
702 if (i == acpu_dvfs_sram_buf->support_freq_num) {
703 ERROR("%s: cannot found max freq profile\n", __func__);
704 return -1;
705 }
706
707 curr_prof = 0;
708 target_prof = i;
709
710 /* if max freq is 208MHz, do nothing */
711 if (curr_prof == target_prof)
712 return 0;
713
714 if (acpu_dvfs_target(curr_prof, target_prof)) {
715 ERROR("%s: set acpu freq failed!", __func__);
716 return -1;
717 }
718
719 INFO("%s: support freq num is %d\n",
720 __func__, acpu_dvfs_sram_buf->support_freq_num);
721 INFO("%s: start prof is 0x%x\n",
722 __func__, acpu_dvfs_sram_buf->start_prof);
723 INFO("%s: magic is 0x%x\n",
724 __func__, acpu_dvfs_sram_buf->magic);
725 INFO("%s: voltage:\n", __func__);
726 for (i = 0; i < acpu_dvfs_sram_buf->support_freq_num; i++)
727 INFO(" - %d: 0x%x\n", i, acpu_dvfs_sram_buf->vol[i]);
728
729 NOTICE("%s: set acpu freq success!", __func__);
730 return 0;
731 }
732
733 struct acpu_dvfs_volt_setting
734 {
735 unsigned int magic;
736 unsigned int support_freq_num;
737 unsigned int support_freq_max;
738 unsigned int start_prof;
739 unsigned int vol[7];
740 unsigned int hmp_dly_threshold[7];
741 };
742
acpu_dvfs_volt_init(void)743 static void acpu_dvfs_volt_init(void)
744 {
745 struct acpu_dvfs_volt_setting *volt;
746
747 /*
748 * - set default voltage;
749 * - set pmu address;
750 * - set voltage up and down step;
751 * - set voltage stable time;
752 */
753 mmio_write_32(PMCTRL_ACPUDFTVOL, 0x4a);
754 mmio_write_32(PMCTRL_ACPUVOLPMUADDR, 0xda);
755 mmio_write_32(PMCTRL_ACPUVOLUPSTEP, 0x1);
756 mmio_write_32(PMCTRL_ACPUVOLDNSTEP, 0x1);
757 mmio_write_32(PMCTRL_ACPUPMUVOLUPTIME, 0x60);
758 mmio_write_32(PMCTRL_ACPUPMUVOLDNTIME, 0x60);
759 mmio_write_32(PMCTRL_ACPUCLKOFFCFG, 0x1000);
760
761 volt= (void *)MEMORY_AXI_ACPU_FREQ_VOL_ADDR;
762 volt->magic = 0x5a5ac5c5;
763 volt->support_freq_num = 5;
764 volt->support_freq_max = 1200000;
765 volt->start_prof = 4;
766 volt->vol[0] = 0x49;
767 volt->vol[1] = 0x49;
768 volt->vol[2] = 0x50;
769 volt->vol[3] = 0x60;
770 volt->vol[4] = 0x78;
771 volt->vol[5] = 0x78;
772 volt->vol[6] = 0x78;
773
774 volt->hmp_dly_threshold[0] = 0x0;
775 volt->hmp_dly_threshold[1] = 0x0;
776 volt->hmp_dly_threshold[2] = 0x0;
777 volt->hmp_dly_threshold[3] = 0x0e8b0e45;
778 volt->hmp_dly_threshold[4] = 0x10691023;
779 volt->hmp_dly_threshold[5] = 0x10691023;
780 volt->hmp_dly_threshold[6] = 0x10691023;
781
782 INFO("%s: success!\n", __func__);
783 }
784
init_acpu_dvfs(void)785 void init_acpu_dvfs(void)
786 {
787 unsigned int i = 0;
788
789 INFO("%s: pmic version %d\n", __func__, hi6553_read_8(VERSION_REG));
790
791 /* init parameters */
792 mmio_write_32(ACPU_CHIP_MAX_FREQ, efuse_acpu_freq[8]);
793 INFO("%s: ACPU_CHIP_MAX_FREQ=0x%x.\n",
794 __func__, mmio_read_32(ACPU_CHIP_MAX_FREQ));
795
796 /* set maximum support frequency to 1.2GHz */
797 for(i = 0; i < ACPU_FREQ_MAX_NUM; i++)
798 acpu_dvfs_sram_buf->vol[i] = hi6220_acpu_profile[i].acpu_vol_profile;
799
800 acpu_dvfs_sram_buf->support_freq_num = ACPU_FREQ_MAX_NUM;
801 acpu_dvfs_sram_buf->support_freq_max = 1200000;
802
803 /* init acpu dvfs */
804 acpu_dvfs_volt_init();
805 acpu_dvfs_set_freq();
806
807 return;
808 }
809