1 /*
2  * Copyright (c) 2022, Intel Corporation. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 #include <common/debug.h>
10 #include <drivers/delay_timer.h>
11 #include "ddr.h"
12 #include <lib/mmio.h>
13 #include "socfpga_handoff.h"
14 
ddr_calibration_check(void)15 int ddr_calibration_check(void)
16 {
17 	// DDR calibration check
18 	int status = 0;
19 	uint32_t u32data_read = 0;
20 
21 	NOTICE("DDR: Access address 0x%x:...\n", IO96B_0_REG_BASE);
22 	u32data_read = mmio_read_32(IO96B_0_REG_BASE);
23 	NOTICE("DDR: Access address 0x%x: read 0x%04x\n", IO96B_0_REG_BASE, u32data_read);
24 
25 	if (u32data_read == -EPERM) {
26 		status = -EPERM;
27 		assert(u32data_read);
28 	}
29 
30 	u32data_read = 0x0;
31 	NOTICE("DDR: Access address 0x%x: ...\n", IO96B_1_REG_BASE);
32 	u32data_read = mmio_read_32(IO96B_1_REG_BASE);
33 	NOTICE("DDR: Access address 0x%x: read 0x%04x\n", IO96B_1_REG_BASE, u32data_read);
34 
35 	if (u32data_read == -EPERM) {
36 		status = -EPERM;
37 		assert(u32data_read);
38 	}
39 
40 	return status;
41 }
42 
iossm_mb_init(void)43 int iossm_mb_init(void)
44 {
45 	// int status;
46 
47 	// Update according to IOSSM mailbox spec
48 
49 	// if (status) {
50 		// return status;
51 	// }
52 
53 	return 0;
54 }
55 
wait_respond(uint16_t timeout)56 int wait_respond(uint16_t timeout)
57 {
58 	uint32_t status = 0;
59 	uint32_t count = 0;
60 	uint32_t data = 0;
61 
62 	/* Wait status command response ready */
63 	do {
64 		data = mmio_read_32(IO96B_CSR_REG(CMD_RESPONSE_STATUS));
65 		count++;
66 		if (count >= timeout) {
67 			return -ETIMEDOUT;
68 		}
69 
70 	} while (STATUS_COMMAND_RESPONSE(data) != STATUS_COMMAND_RESPONSE_READY);
71 
72 	status = (data & STATUS_GENERAL_ERROR_MASK) >> STATUS_GENERAL_ERROR_OFFSET;
73 	if (status != 0) {
74 		return status;
75 	}
76 
77 	status = (data & STATUS_CMD_RESPONSE_ERROR_MASK) >> STATUS_CMD_RESPONSE_ERROR_OFFSET;
78 	if (status != 0) {
79 		return status;
80 	}
81 
82 	return status;
83 }
84 
iossm_mb_read_response(void)85 int iossm_mb_read_response(void)
86 {
87 	uint32_t status = 0;
88 	unsigned int i;
89 	uint32_t resp_data[IOSSM_RESP_MAX_WORD_SIZE];
90 	uint32_t resp_param_reg;
91 
92 	// Check STATUS_CMD_RESPONSE_DATA_PTR_VALID in
93 	// STATUS_COMMAND_RESPONSE to ensure data pointer response
94 
95 	/* Read CMD_RESPONSE_STATUS and CMD_RESPONSE_DATA_* */
96 	resp_data[0] = mmio_read_32(IO96B_CSR_REG(CMD_RESPONSE_STATUS));
97 	resp_data[0] = (resp_data[0] & CMD_RESPONSE_DATA_SHORT_MASK) >>
98 			CMD_RESPONSE_DATA_SHORT_OFFSET;
99 	resp_param_reg = CMD_RESPONSE_STATUS;
100 	for (i = 1; i < IOSSM_RESP_MAX_WORD_SIZE; i++) {
101 		resp_param_reg = resp_param_reg - CMD_RESPONSE_OFFSET;
102 		resp_data[i] = mmio_read_32(IO96B_CSR_REG(resp_param_reg));
103 	}
104 
105 	/* Wait for STATUS_COMMAND_RESPONSE_READY*/
106 	status = wait_respond(1000);
107 
108 	/* Read CMD_RESPONSE_STATUS and CMD_RESPONSE_DATA_* */
109 	mmio_setbits_32(STATUS_COMMAND_RESPONSE(IO96B_CSR_REG(
110 			CMD_RESPONSE_STATUS)),
111 			STATUS_COMMAND_RESPONSE_READY_CLEAR);
112 
113 	return status;
114 }
115 
iossm_mb_send(uint32_t cmd_target_ip_type,uint32_t cmd_target_ip_instance_id,uint32_t cmd_type,uint32_t cmd_opcode,uint32_t * args,unsigned int len)116 int iossm_mb_send(uint32_t cmd_target_ip_type, uint32_t cmd_target_ip_instance_id,
117 			uint32_t cmd_type, uint32_t cmd_opcode, uint32_t *args,
118 			unsigned int len)
119 {
120 	unsigned int i;
121 	uint32_t status = 0;
122 	uint32_t cmd_req;
123 	uint32_t cmd_param_reg;
124 
125 	cmd_target_ip_type = (cmd_target_ip_type & CMD_TARGET_IP_TYPE_MASK) <<
126 				CMD_TARGET_IP_TYPE_OFFSET;
127 	cmd_target_ip_instance_id = (cmd_target_ip_instance_id &
128 				CMD_TARGET_IP_INSTANCE_ID_MASK) <<
129 				CMD_TARGET_IP_INSTANCE_ID_OFFSET;
130 	cmd_type = (cmd_type & CMD_TYPE_MASK) << CMD_TYPE_OFFSET;
131 	cmd_opcode = (cmd_opcode & CMD_OPCODE_MASK) << CMD_OPCODE_OFFSET;
132 	cmd_req = cmd_target_ip_type | cmd_target_ip_instance_id | cmd_type |
133 			cmd_opcode;
134 
135 	/* send mailbox request */
136 	IOSSM_MB_WRITE(IO96B_CSR_REG(CMD_REQ), cmd_req);
137 	if (len != 0) {
138 		cmd_param_reg = CMD_REQ;
139 		for (i = 0; i < len; i++) {
140 			cmd_param_reg = cmd_param_reg - CMD_PARAM_OFFSET;
141 			IOSSM_MB_WRITE(IO96B_CSR_REG(cmd_param_reg), args[i]);
142 		}
143 	}
144 
145 	status = iossm_mb_read_response();
146 	if (status != 0) {
147 		return status;
148 	}
149 
150 	return status;
151 }
152 
ddr_iossm_mailbox_cmd(uint32_t cmd_opcode)153 int ddr_iossm_mailbox_cmd(uint32_t cmd_opcode)
154 {
155 	// IOSSM
156 	uint32_t status = 0;
157 	unsigned int i = 0;
158 	uint32_t payload[IOSSM_CMD_MAX_WORD_SIZE] = {0U};
159 
160 	switch (cmd_opcode) {
161 	case CMD_INIT:
162 		status = iossm_mb_init();
163 		break;
164 
165 	case OPCODE_GET_MEM_INTF_INFO:
166 		status = iossm_mb_send(0, 0, MBOX_CMD_GET_SYS_INFO,
167 		OPCODE_GET_MEM_INTF_INFO, payload, i);
168 		break;
169 
170 	case OPCODE_GET_MEM_TECHNOLOGY:
171 		status = iossm_mb_send(0, 0, MBOX_CMD_GET_MEM_INFO,
172 		OPCODE_GET_MEM_TECHNOLOGY, payload, i);
173 		break;
174 
175 	case OPCODE_GET_MEM_WIDTH_INFO:
176 		status = iossm_mb_send(0, 0, MBOX_CMD_GET_MEM_INFO,
177 		OPCODE_GET_MEM_WIDTH_INFO, payload, i);
178 		break;
179 
180 	case OPCODE_ECC_ENABLE_STATUS:
181 		status = iossm_mb_send(0, 0,
182 		MBOX_CMD_TRIG_CONTROLLER_OP, OPCODE_ECC_ENABLE_STATUS,
183 		payload, i);
184 		break;
185 
186 	case OPCODE_ECC_INTERRUPT_MASK:
187 		// payload[i] = CMD_PARAM_0 [16:0]: ECC_INTERRUPT_MASK
188 		status = iossm_mb_send(0, 0,
189 		MBOX_CMD_TRIG_CONTROLLER_OP, OPCODE_ECC_INTERRUPT_MASK,
190 		payload, i);
191 		break;
192 
193 	case OPCODE_ECC_SCRUB_MODE_0_START:
194 		// payload[i] = CMD_PARAM_0 [15:0]: ECC_SCRUB_INTERVAL
195 		//i++;
196 		// payload[i] = CMD_PARAM_1 [11:0]: ECC_SCRUB_LEN
197 		//i++;
198 		// payload[i] = CMD_PARAM_2 [0:0]: ECC_SCRUB_FULL_MEM
199 		//i++;
200 		// payload[i]= CMD_PARAM_3 [31:0]: ECC_SCRUB_START_ADDR [31:0]
201 		//i++;
202 		// payload[i] = CMD_PARAM_4 [5:0]: ECC_SCRUB_START_ADDR [36:32]
203 		//i++;
204 		// payload[i] = CMD_PARAM_5 [31:0]: ECC_SCRUB_END_ADDR [31:0]
205 		//i++;
206 		// payload[i] = CMD_PARAM_6 [5:0]: ECC_SCRUB_END_ADDR [36:32]
207 		//i++;
208 		status = iossm_mb_send(0, 0,
209 		MBOX_CMD_TRIG_CONTROLLER_OP, OPCODE_ECC_SCRUB_MODE_0_START,
210 		payload, i);
211 		break;
212 
213 	case OPCODE_ECC_SCRUB_MODE_1_START:
214 		// payload[i] = CMD_PARAM_0 [15:0]: ECC_SCRUB_IDLE_CNT
215 		//i++;
216 		// payload[i] = CMD_PARAM_1 [11:0]: ECC_SCRUB_LEN
217 		//i++;
218 		// payload[i] = CMD_PARAM_2 [0:0]: ECC_SCRUB_FULL_MEM
219 		//i++;
220 		// payload[i] = CMD_PARAM_3 [31:0]: ECC_SCRUB_START_ADDR [31:0]
221 		//i++;
222 		// payload[i] = CMD_PARAM_4 [5:0]: ECC_SCRUB_START_ADDR [36:32]
223 		//i++;
224 		// payload[i] = CMD_PARAM_5 [31:0]: ECC_SCRUB_END_ADDR [31:0]
225 		//i++;
226 		// payload[i] = CMD_PARAM_6 [5:0]: ECC_SCRUB_END_ADDR [36:32]
227 		//i++;
228 		status = iossm_mb_send(0, 0,
229 		MBOX_CMD_TRIG_CONTROLLER_OP, OPCODE_ECC_SCRUB_MODE_1_START,
230 		payload, i);
231 		break;
232 
233 	case OPCODE_BIST_RESULTS_STATUS:
234 		status = iossm_mb_send(0, 0,
235 		MBOX_CMD_TRIG_CONTROLLER_OP, OPCODE_BIST_RESULTS_STATUS,
236 		payload, i);
237 		break;
238 
239 	case OPCODE_BIST_MEM_INIT_START:
240 		status = iossm_mb_send(0, 0,
241 		MBOX_CMD_TRIG_CONTROLLER_OP, OPCODE_BIST_MEM_INIT_START,
242 		payload, i);
243 		break;
244 
245 	case OPCODE_TRIG_MEM_CAL:
246 		status = iossm_mb_send(0, 0, MBOX_CMD_TRIG_MEM_CAL_OP,
247 		OPCODE_TRIG_MEM_CAL, payload, i);
248 		break;
249 
250 	default:
251 		break;
252 	}
253 
254 	if (status == -EPERM) {
255 		assert(status);
256 	}
257 
258 	return status;
259 }
260 
ddr_config_handoff(handoff * hoff_ptr)261 int ddr_config_handoff(handoff *hoff_ptr)
262 {
263 	/* Populate DDR handoff data */
264 	/* TODO: To add in DDR handoff configuration once available */
265 	return 0;
266 }
267 
268 // DDR firewall and non secure access
ddr_enable_ns_access(void)269 void ddr_enable_ns_access(void)
270 {
271 	/* Please set the ddr non secure registers accordingly */
272 
273 	mmio_setbits_32(CCU_REG(DMI0_DMIUSMCTCR),
274 			CCU_DMI_ALLOCEN | CCU_DMI_LOOKUPEN);
275 	mmio_setbits_32(CCU_REG(DMI1_DMIUSMCTCR),
276 			CCU_DMI_ALLOCEN | CCU_DMI_LOOKUPEN);
277 
278 	/* TODO: To add in CCU NCORE OCRAM bypass mask for non secure registers */
279 	NOTICE("DDR non secure configured\n");
280 }
281 
ddr_enable_firewall(void)282 void ddr_enable_firewall(void)
283 {
284 	/* Please set the ddr firewall registers accordingly */
285 	/* TODO: To add in CCU NCORE OCRAM bypass mask for firewall registers */
286 	NOTICE("DDR firewall enabled\n");
287 }
288 
is_ddr_init_in_progress(void)289 bool is_ddr_init_in_progress(void)
290 {
291 	uint32_t reg = mmio_read_32(SOCFPGA_SYSMGR(BOOT_SCRATCH_POR_0));
292 
293 	if (reg & SOCFPGA_SYSMGR_BOOT_SCRATCH_POR_0_MASK) {
294 		return true;
295 	}
296 	return false;
297 }
298 
ddr_init(void)299 int ddr_init(void)
300 {
301 	// DDR driver initialization
302 	int status = -EPERM;
303 	uint32_t cmd_opcode = 0;
304 
305 	// Check and set Boot Scratch Register
306 	if (is_ddr_init_in_progress()) {
307 		return status;
308 	}
309 	mmio_write_32(SOCFPGA_SYSMGR(BOOT_SCRATCH_POR_0), 0x01);
310 
311 	// Populate DDR handoff data
312 	handoff reverse_handoff_ptr;
313 
314 	if (!socfpga_get_handoff(&reverse_handoff_ptr)) {
315 		assert(status);
316 	}
317 	status = ddr_config_handoff(&reverse_handoff_ptr);
318 	if (status == -EPERM) {
319 		assert(status);
320 	}
321 
322 	// CCU and firewall setup
323 	ddr_enable_ns_access();
324 	ddr_enable_firewall();
325 
326 	// DDR calibration check
327 	status = ddr_calibration_check();
328 	if (status == -EPERM) {
329 		assert(status);
330 	}
331 
332 	// DDR mailbox command
333 	status = ddr_iossm_mailbox_cmd(cmd_opcode);
334 	if (status != 0) {
335 		assert(status);
336 	}
337 
338 	// Check and set Boot Scratch Register
339 	mmio_write_32(SOCFPGA_SYSMGR(BOOT_SCRATCH_POR_0), 0x00);
340 
341 	NOTICE("DDR init successfully\n");
342 	return status;
343 }
344 
ddr_config_scrubber(phys_addr_t umctl2_base,enum ddr_type umctl2_type)345 int ddr_config_scrubber(phys_addr_t umctl2_base, enum ddr_type umctl2_type)
346 {
347 	uint32_t temp[9] = {0};
348 	int ret = 0;
349 
350 	/* Write default value to prevent scrubber stop due to lower power */
351 	mmio_write_32(0, umctl2_base + DDR4_PWRCTL_OFFSET);
352 
353 	/* To backup user configurations in temp array */
354 	temp[0] = mmio_read_32(umctl2_base + DDR4_SBRCTL_OFFSET);
355 	temp[1] = mmio_read_32(umctl2_base + DDR4_SBRWDATA0_OFFSET);
356 	temp[2] = mmio_read_32(umctl2_base + DDR4_SBRSTART0_OFFSET);
357 	if (umctl2_type == DDR_TYPE_DDR4) {
358 		temp[3] = mmio_read_32(umctl2_base + DDR4_SBRWDATA1_OFFSET);
359 		temp[4] = mmio_read_32(umctl2_base + DDR4_SBRSTART1_OFFSET);
360 	}
361 	temp[5] = mmio_read_32(umctl2_base + DDR4_SBRRANGE0_OFFSET);
362 	temp[6] = mmio_read_32(umctl2_base + DDR4_SBRRANGE1_OFFSET);
363 	temp[7] = mmio_read_32(umctl2_base + DDR4_ECCCFG0_OFFSET);
364 	temp[8] = mmio_read_32(umctl2_base + DDR4_ECCCFG1_OFFSET);
365 
366 	if (umctl2_type != DDR_TYPE_DDR4) {
367 		/* Lock ECC region, ensure this regions is not being accessed */
368 		mmio_setbits_32(umctl2_base + DDR4_ECCCFG1_OFFSET,
369 			     LPDDR4_ECCCFG1_ECC_REGIONS_PARITY_LOCK);
370 	}
371 	/* Disable input traffic per port */
372 	mmio_clrbits_32(umctl2_base + DDR4_PCTRL0_OFFSET, DDR4_PCTRL0_PORT_EN);
373 	/* Disables scrubber */
374 	mmio_clrbits_32(umctl2_base + DDR4_SBRCTL_OFFSET, DDR4_SBRCTL_SCRUB_EN);
375 	/* Polling all scrub writes data have been sent */
376 	ret = poll_idle_status((umctl2_base + DDR4_SBRSTAT_OFFSET),
377 			       DDR4_SBRSTAT_SCRUB_BUSY, true, 5000);
378 
379 	if (ret) {
380 		INFO("%s: Timeout while waiting for", __func__);
381 		INFO(" sending all scrub data\n");
382 		return ret;
383 	}
384 
385 	/* LPDDR4 supports inline ECC only */
386 	if (umctl2_type != DDR_TYPE_DDR4) {
387 		/*
388 		 * Setting all regions for protected, this is required for
389 		 * srubber to init whole LPDDR4 expect ECC region
390 		 */
391 		mmio_write_32(((ONE_EIGHT <<
392 		       LPDDR4_ECCCFG0_ECC_REGION_MAP_GRANU_SHIFT) |
393 		       (ALL_PROTECTED << LPDDR4_ECCCFG0_ECC_REGION_MAP_SHIFT)),
394 		       umctl2_base + DDR4_ECCCFG0_OFFSET);
395 	}
396 
397 	/* Scrub_burst = 1, scrub_mode = 1(performs writes) */
398 	mmio_write_32(DDR4_SBRCTL_SCRUB_BURST_1 | DDR4_SBRCTL_SCRUB_WRITE,
399 	       umctl2_base + DDR4_SBRCTL_OFFSET);
400 
401 	/* Wipe DDR content after calibration */
402 	ret = ddr_zerofill_scrubber(umctl2_base, umctl2_type);
403 	if (ret) {
404 		ERROR("Failed to clear DDR content\n");
405 	}
406 
407 	/* Polling all scrub writes data have been sent */
408 	ret = poll_idle_status((umctl2_base + DDR4_SBRSTAT_OFFSET),
409 			       DDR4_SBRSTAT_SCRUB_BUSY, true, 5000);
410 	if (ret) {
411 		INFO("%s: Timeout while waiting for", __func__);
412 		INFO(" sending all scrub data\n");
413 		return ret;
414 	}
415 
416 	/* Disables scrubber */
417 	mmio_clrbits_32(umctl2_base + DDR4_SBRCTL_OFFSET, DDR4_SBRCTL_SCRUB_EN);
418 
419 	/* Restore user configurations */
420 	mmio_write_32(temp[0], umctl2_base + DDR4_SBRCTL_OFFSET);
421 	mmio_write_32(temp[1], umctl2_base + DDR4_SBRWDATA0_OFFSET);
422 	mmio_write_32(temp[2], umctl2_base + DDR4_SBRSTART0_OFFSET);
423 	if (umctl2_type == DDR_TYPE_DDR4) {
424 		mmio_write_32(temp[3], umctl2_base + DDR4_SBRWDATA1_OFFSET);
425 		mmio_write_32(temp[4], umctl2_base + DDR4_SBRSTART1_OFFSET);
426 	}
427 	mmio_write_32(temp[5], umctl2_base + DDR4_SBRRANGE0_OFFSET);
428 	mmio_write_32(temp[6], umctl2_base + DDR4_SBRRANGE1_OFFSET);
429 	mmio_write_32(temp[7], umctl2_base + DDR4_ECCCFG0_OFFSET);
430 	mmio_write_32(temp[8], umctl2_base + DDR4_ECCCFG1_OFFSET);
431 
432 	/* Enables ECC scrub on scrubber */
433 	if (!(mmio_read_32(umctl2_base + DDR4_SBRCTL_OFFSET) & DDR4_SBRCTL_SCRUB_WRITE)) {
434 		/* Enables scrubber */
435 		mmio_setbits_32(umctl2_base + DDR4_SBRCTL_OFFSET, DDR4_SBRCTL_SCRUB_EN);
436 	}
437 
438 	return 0;
439 }
440 
ddr_zerofill_scrubber(phys_addr_t umctl2_base,enum ddr_type umctl2_type)441 int ddr_zerofill_scrubber(phys_addr_t umctl2_base, enum ddr_type umctl2_type)
442 {
443 	int ret = 0;
444 
445 	/* Zeroing whole DDR */
446 	mmio_write_32(0, umctl2_base + DDR4_SBRWDATA0_OFFSET);
447 	mmio_write_32(0, umctl2_base + DDR4_SBRSTART0_OFFSET);
448 	if (umctl2_type == DDR_TYPE_DDR4) {
449 		mmio_write_32(0, umctl2_base + DDR4_SBRWDATA1_OFFSET);
450 		mmio_write_32(0, umctl2_base + DDR4_SBRSTART1_OFFSET);
451 	}
452 	mmio_write_32(0, umctl2_base + DDR4_SBRRANGE0_OFFSET);
453 	mmio_write_32(0, umctl2_base + DDR4_SBRRANGE1_OFFSET);
454 
455 	NOTICE("Enabling scrubber (zeroing whole DDR) ...\n");
456 
457 	/* Enables scrubber */
458 	mmio_setbits_32(umctl2_base + DDR4_SBRCTL_OFFSET, DDR4_SBRCTL_SCRUB_EN);
459 	/* Polling all scrub writes commands have been sent */
460 	ret = poll_idle_status((umctl2_base + DDR4_SBRSTAT_OFFSET),
461 			       DDR4_SBRSTAT_SCRUB_DONE, true, 5000);
462 	if (ret) {
463 		INFO("%s: Timeout while waiting for", __func__);
464 		INFO(" sending all scrub commands\n");
465 		return ret;
466 	}
467 
468 	return 0;
469 }
470 
poll_idle_status(uint32_t addr,uint32_t mask,uint32_t match,uint32_t delay_ms)471 int poll_idle_status(uint32_t addr, uint32_t mask, uint32_t match, uint32_t delay_ms)
472 {
473 	int time_out = delay_ms;
474 
475 	while (time_out-- > 0) {
476 
477 		if ((mmio_read_32(addr) & mask) == match) {
478 			return 0;
479 		}
480 		udelay(1000);
481 	}
482 	return -ETIMEDOUT;
483 }
484