blob: 1fea69db80d5fdf6d25877855b2eed6b34a09cca [file] [log] [blame]
Jacky Baic71793c2019-11-25 14:43:26 +08001/*
Jacky Bai5277c092020-04-13 17:44:50 +08002 * Copyright 2019-2023 NXP
Jacky Baic71793c2019-11-25 14:43:26 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
Jacky Bai9c336f62019-11-25 13:19:37 +08007#include <bl31/interrupt_mgmt.h>
8#include <common/runtime_svc.h>
Jacky Baic71793c2019-11-25 14:43:26 +08009#include <lib/mmio.h>
Jacky Bai9c336f62019-11-25 13:19:37 +080010#include <lib/spinlock.h>
11#include <plat/common/platform.h>
Jacky Baic71793c2019-11-25 14:43:26 +080012
13#include <dram.h>
14
Jacky Bai9c336f62019-11-25 13:19:37 +080015#define IMX_SIP_DDR_DVFS_GET_FREQ_COUNT 0x10
16#define IMX_SIP_DDR_DVFS_GET_FREQ_INFO 0x11
17
Jacky Baic71793c2019-11-25 14:43:26 +080018struct dram_info dram_info;
19
Jacky Bai9c336f62019-11-25 13:19:37 +080020/* lock used for DDR DVFS */
21spinlock_t dfs_lock;
22
23static volatile uint32_t wfe_done;
24static volatile bool wait_ddrc_hwffc_done = true;
25static unsigned int dev_fsp = 0x1;
26
27static uint32_t fsp_init_reg[3][4] = {
28 { DDRC_INIT3(0), DDRC_INIT4(0), DDRC_INIT6(0), DDRC_INIT7(0) },
29 { DDRC_FREQ1_INIT3(0), DDRC_FREQ1_INIT4(0), DDRC_FREQ1_INIT6(0), DDRC_FREQ1_INIT7(0) },
30 { DDRC_FREQ2_INIT3(0), DDRC_FREQ2_INIT4(0), DDRC_FREQ2_INIT6(0), DDRC_FREQ2_INIT7(0) },
31};
32
33static void get_mr_values(uint32_t (*mr_value)[8])
34{
35 uint32_t init_val;
36 unsigned int i, fsp_index;
37
38 for (fsp_index = 0U; fsp_index < 3U; fsp_index++) {
39 for (i = 0U; i < 4U; i++) {
40 init_val = mmio_read_32(fsp_init_reg[fsp_index][i]);
41 mr_value[fsp_index][2*i] = init_val >> 16;
42 mr_value[fsp_index][2*i + 1] = init_val & 0xFFFF;
43 }
44 }
45}
46
Jacky Bai33300842020-05-08 17:37:24 +080047static void save_rank_setting(void)
48{
49 uint32_t i, offset;
50 uint32_t pstate_num = dram_info.num_fsp;
51
52 for (i = 0U; i < pstate_num; i++) {
53 offset = i ? (i + 1) * 0x1000 : 0U;
54 dram_info.rank_setting[i][0] = mmio_read_32(DDRC_DRAMTMG2(0) + offset);
55 if (dram_info.dram_type != DDRC_LPDDR4) {
56 dram_info.rank_setting[i][1] = mmio_read_32(DDRC_DRAMTMG9(0) + offset);
57 }
58#if !defined(PLAT_imx8mq)
59 dram_info.rank_setting[i][2] = mmio_read_32(DDRC_RANKCTL(0) + offset);
60#endif
61 }
62#if defined(PLAT_imx8mq)
63 dram_info.rank_setting[0][2] = mmio_read_32(DDRC_RANKCTL(0));
64#endif
65}
Jacky Baic71793c2019-11-25 14:43:26 +080066/* Restore the ddrc configs */
67void dram_umctl2_init(struct dram_timing_info *timing)
68{
69 struct dram_cfg_param *ddrc_cfg = timing->ddrc_cfg;
70 unsigned int i;
71
72 for (i = 0U; i < timing->ddrc_cfg_num; i++) {
73 mmio_write_32(ddrc_cfg->reg, ddrc_cfg->val);
74 ddrc_cfg++;
75 }
76
77 /* set the default fsp to P0 */
78 mmio_write_32(DDRC_MSTR2(0), 0x0);
79}
80
81/* Restore the dram PHY config */
82void dram_phy_init(struct dram_timing_info *timing)
83{
84 struct dram_cfg_param *cfg = timing->ddrphy_cfg;
85 unsigned int i;
86
87 /* Restore the PHY init config */
88 cfg = timing->ddrphy_cfg;
89 for (i = 0U; i < timing->ddrphy_cfg_num; i++) {
90 dwc_ddrphy_apb_wr(cfg->reg, cfg->val);
91 cfg++;
92 }
93
94 /* Restore the DDR PHY CSRs */
95 cfg = timing->ddrphy_trained_csr;
96 for (i = 0U; i < timing->ddrphy_trained_csr_num; i++) {
97 dwc_ddrphy_apb_wr(cfg->reg, cfg->val);
98 cfg++;
99 }
100
101 /* Load the PIE image */
102 cfg = timing->ddrphy_pie;
103 for (i = 0U; i < timing->ddrphy_pie_num; i++) {
104 dwc_ddrphy_apb_wr(cfg->reg, cfg->val);
105 cfg++;
106 }
107}
108
Jacky Bai9c336f62019-11-25 13:19:37 +0800109/* EL3 SGI-8 IPI handler for DDR Dynamic frequency scaling */
110static uint64_t waiting_dvfs(uint32_t id, uint32_t flags,
111 void *handle, void *cookie)
112{
113 uint64_t mpidr = read_mpidr_el1();
114 unsigned int cpu_id = MPIDR_AFFLVL0_VAL(mpidr);
115 uint32_t irq;
116
117 irq = plat_ic_acknowledge_interrupt();
118 if (irq < 1022U) {
119 plat_ic_end_of_interrupt(irq);
120 }
121
122 /* set the WFE done status */
123 spin_lock(&dfs_lock);
124 wfe_done |= (1 << cpu_id * 8);
125 dsb();
126 spin_unlock(&dfs_lock);
127
128 while (1) {
129 /* ddr frequency change done */
130 if (!wait_ddrc_hwffc_done)
131 break;
132
133 wfe();
134 }
135
136 return 0;
137}
138
Jacky Baic71793c2019-11-25 14:43:26 +0800139void dram_info_init(unsigned long dram_timing_base)
140{
141 uint32_t ddrc_mstr, current_fsp;
Marco Felsch6c8f5232022-09-21 17:48:35 +0200142 unsigned int idx = 0;
Jacky Bai9c336f62019-11-25 13:19:37 +0800143 uint32_t flags = 0;
144 uint32_t rc;
145 unsigned int i;
Jacky Baic71793c2019-11-25 14:43:26 +0800146
147 /* Get the dram type & rank */
148 ddrc_mstr = mmio_read_32(DDRC_MSTR(0));
149
150 dram_info.dram_type = ddrc_mstr & DDR_TYPE_MASK;
Jacky Bai5277c092020-04-13 17:44:50 +0800151 dram_info.num_rank = ((ddrc_mstr >> 24) & ACTIVE_RANK_MASK) == 0x3 ?
152 DDRC_ACTIVE_TWO_RANK : DDRC_ACTIVE_ONE_RANK;
Jacky Baic71793c2019-11-25 14:43:26 +0800153
154 /* Get current fsp info */
155 current_fsp = mmio_read_32(DDRC_DFIMISC(0)) & 0xf;
156 dram_info.boot_fsp = current_fsp;
157 dram_info.current_fsp = current_fsp;
158
Jacky Bai9c336f62019-11-25 13:19:37 +0800159 get_mr_values(dram_info.mr_table);
160
Jacky Baic71793c2019-11-25 14:43:26 +0800161 dram_info.timing_info = (struct dram_timing_info *)dram_timing_base;
Jacky Bai9c336f62019-11-25 13:19:37 +0800162
163 /* get the num of supported fsp */
164 for (i = 0U; i < 4U; ++i) {
165 if (!dram_info.timing_info->fsp_table[i]) {
166 break;
167 }
Marco Felsch6c8f5232022-09-21 17:48:35 +0200168 idx = i;
Jacky Bai9c336f62019-11-25 13:19:37 +0800169 }
170 dram_info.num_fsp = i;
171
Jacky Bai33300842020-05-08 17:37:24 +0800172 /* save the DRAMTMG2/9 for rank to rank workaround */
173 save_rank_setting();
174
Jacky Bai9c336f62019-11-25 13:19:37 +0800175 /* check if has bypass mode support */
Marco Felsch6c8f5232022-09-21 17:48:35 +0200176 if (dram_info.timing_info->fsp_table[idx] < 666) {
Jacky Bai9c336f62019-11-25 13:19:37 +0800177 dram_info.bypass_mode = true;
178 } else {
179 dram_info.bypass_mode = false;
180 }
181
182 /* Register the EL3 handler for DDR DVFS */
183 set_interrupt_rm_flag(flags, NON_SECURE);
184 rc = register_interrupt_type_handler(INTR_TYPE_EL3, waiting_dvfs, flags);
185 if (rc != 0) {
186 panic();
187 }
Jacky Bai9c336f62019-11-25 13:19:37 +0800188
Jacky Bai0e394882020-04-22 21:26:13 +0800189 if (dram_info.dram_type == DDRC_LPDDR4 && current_fsp != 0x0) {
190 /* flush the L1/L2 cache */
191 dcsw_op_all(DCCSW);
192 lpddr4_swffc(&dram_info, dev_fsp, 0x0);
193 dev_fsp = (~dev_fsp) & 0x1;
194 } else if (current_fsp != 0x0) {
195 /* flush the L1/L2 cache */
196 dcsw_op_all(DCCSW);
197 ddr4_swffc(&dram_info, 0x0);
198 }
199}
Jacky Bai9c336f62019-11-25 13:19:37 +0800200
201/*
202 * For each freq return the following info:
203 *
204 * r1: data rate
205 * r2: 1 + dram_core parent
206 * r3: 1 + dram_alt parent index
207 * r4: 1 + dram_apb parent index
208 *
209 * The parent indices can be used by an OS who manages source clocks to enabled
210 * them ahead of the switch.
211 *
212 * A parent value of "0" means "don't care".
213 *
214 * Current implementation of freq switch is hardcoded in
215 * plat/imx/common/imx8m/clock.c but in theory this can be enhanced to support
216 * a wide variety of rates.
217 */
218int dram_dvfs_get_freq_info(void *handle, u_register_t index)
219{
220 switch (index) {
221 case 0:
222 SMC_RET4(handle, dram_info.timing_info->fsp_table[0],
223 1, 0, 5);
224 case 1:
225 if (!dram_info.bypass_mode) {
226 SMC_RET4(handle, dram_info.timing_info->fsp_table[1],
227 1, 0, 0);
228 }
229 SMC_RET4(handle, dram_info.timing_info->fsp_table[1],
230 2, 2, 4);
231 case 2:
232 if (!dram_info.bypass_mode) {
233 SMC_RET4(handle, dram_info.timing_info->fsp_table[2],
234 1, 0, 0);
235 }
236 SMC_RET4(handle, dram_info.timing_info->fsp_table[2],
237 2, 3, 3);
238 case 3:
239 SMC_RET4(handle, dram_info.timing_info->fsp_table[3],
240 1, 0, 0);
241 default:
242 SMC_RET1(handle, -3);
243 }
244}
245
246int dram_dvfs_handler(uint32_t smc_fid, void *handle,
247 u_register_t x1, u_register_t x2, u_register_t x3)
248{
249 uint64_t mpidr = read_mpidr_el1();
250 unsigned int cpu_id = MPIDR_AFFLVL0_VAL(mpidr);
251 unsigned int fsp_index = x1;
252 uint32_t online_cores = x2;
253
254 if (x1 == IMX_SIP_DDR_DVFS_GET_FREQ_COUNT) {
255 SMC_RET1(handle, dram_info.num_fsp);
256 } else if (x1 == IMX_SIP_DDR_DVFS_GET_FREQ_INFO) {
257 return dram_dvfs_get_freq_info(handle, x2);
258 } else if (x1 < 4) {
259 wait_ddrc_hwffc_done = true;
260 dsb();
261
262 /* trigger the SGI IPI to info other cores */
263 for (int i = 0; i < PLATFORM_CORE_COUNT; i++) {
264 if (cpu_id != i && (online_cores & (0x1 << (i * 8)))) {
265 plat_ic_raise_el3_sgi(0x8, i);
266 }
267 }
268
269 /* make sure all the core in WFE */
270 online_cores &= ~(0x1 << (cpu_id * 8));
271 while (1) {
272 if (online_cores == wfe_done) {
273 break;
274 }
275 }
276
277 /* flush the L1/L2 cache */
278 dcsw_op_all(DCCSW);
279
280 if (dram_info.dram_type == DDRC_LPDDR4) {
281 lpddr4_swffc(&dram_info, dev_fsp, fsp_index);
282 dev_fsp = (~dev_fsp) & 0x1;
Jacky Bai0e394882020-04-22 21:26:13 +0800283 } else {
Jacky Bai9c336f62019-11-25 13:19:37 +0800284 ddr4_swffc(&dram_info, fsp_index);
285 }
286
287 dram_info.current_fsp = fsp_index;
288 wait_ddrc_hwffc_done = false;
289 wfe_done = 0;
290 dsb();
291 sev();
292 isb();
293 }
294
295 SMC_RET1(handle, 0);
Jacky Baic71793c2019-11-25 14:43:26 +0800296}