xref: /aosp_15_r20/external/arm-trusted-firmware/drivers/nxp/ddr/nxp-ddr/ddr.c (revision 54fd6939e177f8ff529b10183254802c76df6d08)
1 /*
2  * Copyright 2021 NXP
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <errno.h>
8 #include <inttypes.h>
9 #include <stdint.h>
10 #include <stdio.h>
11 #include <stdlib.h>
12 #include <string.h>
13 
14 #include <common/debug.h>
15 #include <ddr.h>
16 #ifndef CONFIG_DDR_NODIMM
17 #include <i2c.h>
18 #endif
19 #include <nxp_timer.h>
20 
21 struct dynamic_odt {
22 	unsigned int odt_rd_cfg;
23 	unsigned int odt_wr_cfg;
24 	unsigned int odt_rtt_norm;
25 	unsigned int odt_rtt_wr;
26 };
27 
28 #ifndef CONFIG_STATIC_DDR
29 #if defined(PHY_GEN2_FW_IMAGE_BUFFER) && !defined(NXP_DDR_PHY_GEN2)
30 #error Missing NXP_DDR_PHY_GEN2
31 #endif
32 #ifdef NXP_DDR_PHY_GEN2
33 static const struct dynamic_odt single_D[4] = {
34 	{	/* cs0 */
35 		DDR_ODT_NEVER,
36 		DDR_ODT_ALL,
37 		DDR4_RTT_80_OHM,
38 		DDR4_RTT_WR_OFF
39 	},
40 	{	/* cs1 */
41 		DDR_ODT_NEVER,
42 		DDR_ODT_NEVER,
43 		DDR4_RTT_OFF,
44 		DDR4_RTT_WR_OFF
45 	},
46 	{},
47 	{}
48 };
49 
50 static const struct dynamic_odt single_S[4] = {
51 	{	/* cs0 */
52 		DDR_ODT_NEVER,
53 		DDR_ODT_ALL,
54 		DDR4_RTT_80_OHM,
55 		DDR4_RTT_WR_OFF
56 	},
57 	{},
58 	{},
59 	{},
60 };
61 
62 static const struct dynamic_odt dual_DD[4] = {
63 	{	/* cs0 */
64 		DDR_ODT_OTHER_DIMM,
65 		DDR_ODT_ALL,
66 		DDR4_RTT_60_OHM,
67 		DDR4_RTT_WR_240_OHM
68 	},
69 	{	/* cs1 */
70 		DDR_ODT_OTHER_DIMM,
71 		DDR_ODT_ALL,
72 		DDR4_RTT_60_OHM,
73 		DDR4_RTT_WR_240_OHM
74 	},
75 	{	/* cs2 */
76 		DDR_ODT_OTHER_DIMM,
77 		DDR_ODT_ALL,
78 		DDR4_RTT_60_OHM,
79 		DDR4_RTT_WR_240_OHM
80 	},
81 	{	/* cs3 */
82 		DDR_ODT_OTHER_DIMM,
83 		DDR_ODT_ALL,
84 		DDR4_RTT_60_OHM,
85 		DDR4_RTT_WR_240_OHM
86 	}
87 };
88 
89 static const struct dynamic_odt dual_SS[4] = {
90 	{	/* cs0 */
91 		DDR_ODT_NEVER,
92 		DDR_ODT_ALL,
93 		DDR4_RTT_80_OHM,
94 		DDR4_RTT_WR_OFF
95 	},
96 	{},
97 	{	/* cs2 */
98 		DDR_ODT_NEVER,
99 		DDR_ODT_ALL,
100 		DDR4_RTT_80_OHM,
101 		DDR4_RTT_WR_OFF
102 	},
103 	{}
104 };
105 
106 static const struct dynamic_odt dual_D0[4] = {
107 	{	/* cs0 */
108 		DDR_ODT_NEVER,
109 		DDR_ODT_SAME_DIMM,
110 		DDR4_RTT_80_OHM,
111 		DDR4_RTT_WR_OFF
112 	},
113 	{	/* cs1 */
114 		DDR_ODT_NEVER,
115 		DDR_ODT_NEVER,
116 		DDR4_RTT_80_OHM,
117 		DDR4_RTT_WR_OFF
118 	},
119 	{},
120 	{}
121 };
122 
123 static const struct dynamic_odt dual_S0[4] = {
124 	{	/* cs0 */
125 		DDR_ODT_NEVER,
126 		DDR_ODT_CS,
127 		DDR4_RTT_80_OHM,
128 		DDR4_RTT_WR_OFF
129 	},
130 	{},
131 	{},
132 	{}
133 };
134 #else
135 static const struct dynamic_odt single_D[4] = {
136 	{	/* cs0 */
137 		DDR_ODT_NEVER,
138 		DDR_ODT_ALL,
139 		DDR4_RTT_40_OHM,
140 		DDR4_RTT_WR_OFF
141 	},
142 	{	/* cs1 */
143 		DDR_ODT_NEVER,
144 		DDR_ODT_NEVER,
145 		DDR4_RTT_OFF,
146 		DDR4_RTT_WR_OFF
147 	},
148 	{},
149 	{}
150 };
151 
152 static const struct dynamic_odt single_S[4] = {
153 	{	/* cs0 */
154 		DDR_ODT_NEVER,
155 		DDR_ODT_ALL,
156 		DDR4_RTT_40_OHM,
157 		DDR4_RTT_WR_OFF
158 	},
159 	{},
160 	{},
161 	{},
162 };
163 
164 static const struct dynamic_odt dual_DD[4] = {
165 	{	/* cs0 */
166 		DDR_ODT_NEVER,
167 		DDR_ODT_SAME_DIMM,
168 		DDR4_RTT_120_OHM,
169 		DDR4_RTT_WR_OFF
170 	},
171 	{	/* cs1 */
172 		DDR_ODT_OTHER_DIMM,
173 		DDR_ODT_OTHER_DIMM,
174 		DDR4_RTT_34_OHM,
175 		DDR4_RTT_WR_OFF
176 	},
177 	{	/* cs2 */
178 		DDR_ODT_NEVER,
179 		DDR_ODT_SAME_DIMM,
180 		DDR4_RTT_120_OHM,
181 		DDR4_RTT_WR_OFF
182 	},
183 	{	/* cs3 */
184 		DDR_ODT_OTHER_DIMM,
185 		DDR_ODT_OTHER_DIMM,
186 		DDR4_RTT_34_OHM,
187 		DDR4_RTT_WR_OFF
188 	}
189 };
190 
191 static const struct dynamic_odt dual_SS[4] = {
192 	{	/* cs0 */
193 		DDR_ODT_OTHER_DIMM,
194 		DDR_ODT_ALL,
195 		DDR4_RTT_34_OHM,
196 		DDR4_RTT_WR_120_OHM
197 	},
198 	{},
199 	{	/* cs2 */
200 		DDR_ODT_OTHER_DIMM,
201 		DDR_ODT_ALL,
202 		DDR4_RTT_34_OHM,
203 		DDR4_RTT_WR_120_OHM
204 	},
205 	{}
206 };
207 
208 static const struct dynamic_odt dual_D0[4] = {
209 	{	/* cs0 */
210 		DDR_ODT_NEVER,
211 		DDR_ODT_SAME_DIMM,
212 		DDR4_RTT_40_OHM,
213 		DDR4_RTT_WR_OFF
214 	},
215 	{	/* cs1 */
216 		DDR_ODT_NEVER,
217 		DDR_ODT_NEVER,
218 		DDR4_RTT_OFF,
219 		DDR4_RTT_WR_OFF
220 	},
221 	{},
222 	{}
223 };
224 
225 static const struct dynamic_odt dual_S0[4] = {
226 	{	/* cs0 */
227 		DDR_ODT_NEVER,
228 		DDR_ODT_CS,
229 		DDR4_RTT_40_OHM,
230 		DDR4_RTT_WR_OFF
231 	},
232 	{},
233 	{},
234 	{}
235 };
236 #endif /* NXP_DDR_PHY_GEN2 */
237 
238 /*
239  * Automatically select bank interleaving mode based on DIMMs
240  * in this order: cs0_cs1_cs2_cs3, cs0_cs1, null.
241  * This function only deal with one or two slots per controller.
242  */
auto_bank_intlv(const int cs_in_use,const struct dimm_params * pdimm)243 static inline unsigned int auto_bank_intlv(const int cs_in_use,
244 					   const struct dimm_params *pdimm)
245 {
246 	switch (cs_in_use) {
247 	case 0xf:
248 		return DDR_BA_INTLV_CS0123;
249 	case 0x3:
250 		return DDR_BA_INTLV_CS01;
251 	case 0x1:
252 		return DDR_BA_NONE;
253 	case 0x5:
254 		return DDR_BA_NONE;
255 	default:
256 		break;
257 	}
258 
259 	return 0U;
260 }
261 
cal_odt(const unsigned int clk,struct memctl_opt * popts,struct ddr_conf * conf,struct dimm_params * pdimm,const int dimm_slot_per_ctrl)262 static int cal_odt(const unsigned int clk,
263 		   struct memctl_opt *popts,
264 		   struct ddr_conf *conf,
265 		   struct dimm_params *pdimm,
266 		   const int dimm_slot_per_ctrl)
267 
268 {
269 	unsigned int i;
270 	const struct dynamic_odt *pdodt = NULL;
271 
272 	const static struct dynamic_odt *table[2][5] = {
273 		{single_S, single_D, NULL, NULL},
274 		{dual_SS, dual_DD, NULL, NULL},
275 	};
276 
277 	if (dimm_slot_per_ctrl != 1 && dimm_slot_per_ctrl != 2) {
278 		ERROR("Unsupported number of DIMMs\n");
279 		return -EINVAL;
280 	}
281 
282 	pdodt = table[dimm_slot_per_ctrl - 1][pdimm->n_ranks - 1];
283 	if (pdodt == dual_SS) {
284 		pdodt = (conf->cs_in_use == 0x5) ? dual_SS :
285 			((conf->cs_in_use == 0x1) ? dual_S0 : NULL);
286 	} else if (pdodt == dual_DD) {
287 		pdodt = (conf->cs_in_use == 0xf) ? dual_DD :
288 			((conf->cs_in_use == 0x3) ? dual_D0 : NULL);
289 	}
290 	if (pdodt == dual_DD && pdimm->package_3ds) {
291 		ERROR("Too many 3DS DIMMs.\n");
292 		return -EINVAL;
293 	}
294 
295 	if (pdodt == NULL) {
296 		ERROR("Error determing ODT.\n");
297 		return -EINVAL;
298 	}
299 
300 	/* Pick chip-select local options. */
301 	for (i = 0U; i < DDRC_NUM_CS; i++) {
302 		debug("cs %d\n", i);
303 		popts->cs_odt[i].odt_rd_cfg = pdodt[i].odt_rd_cfg;
304 		debug("     odt_rd_cfg 0x%x\n",
305 			  popts->cs_odt[i].odt_rd_cfg);
306 		popts->cs_odt[i].odt_wr_cfg = pdodt[i].odt_wr_cfg;
307 		debug("     odt_wr_cfg 0x%x\n",
308 			  popts->cs_odt[i].odt_wr_cfg);
309 		popts->cs_odt[i].odt_rtt_norm = pdodt[i].odt_rtt_norm;
310 		debug("     odt_rtt_norm 0x%x\n",
311 			  popts->cs_odt[i].odt_rtt_norm);
312 		popts->cs_odt[i].odt_rtt_wr = pdodt[i].odt_rtt_wr;
313 		debug("     odt_rtt_wr 0x%x\n",
314 			  popts->cs_odt[i].odt_rtt_wr);
315 		popts->cs_odt[i].auto_precharge = 0;
316 		debug("     auto_precharge %d\n",
317 			  popts->cs_odt[i].auto_precharge);
318 	}
319 
320 	return 0;
321 }
322 
cal_opts(const unsigned int clk,struct memctl_opt * popts,struct ddr_conf * conf,struct dimm_params * pdimm,const int dimm_slot_per_ctrl,const unsigned int ip_rev)323 static int cal_opts(const unsigned int clk,
324 		    struct memctl_opt *popts,
325 		    struct ddr_conf *conf,
326 		    struct dimm_params *pdimm,
327 		    const int dimm_slot_per_ctrl,
328 		    const unsigned int ip_rev)
329 {
330 	popts->rdimm = pdimm->rdimm;
331 	popts->mirrored_dimm = pdimm->mirrored_dimm;
332 #ifdef CONFIG_DDR_ECC_EN
333 	popts->ecc_mode = pdimm->edc_config == 0x02 ? 1 : 0;
334 #endif
335 	popts->ctlr_init_ecc = popts->ecc_mode;
336 	debug("ctlr_init_ecc %d\n", popts->ctlr_init_ecc);
337 	popts->self_refresh_in_sleep = 1;
338 	popts->dynamic_power = 0;
339 
340 	/*
341 	 * check sdram width, allow platform override
342 	 * 0 = 64-bit, 1 = 32-bit, 2 = 16-bit
343 	 */
344 	if (pdimm->primary_sdram_width == 64) {
345 		popts->data_bus_dimm = DDR_DBUS_64;
346 		popts->otf_burst_chop_en = 1;
347 	} else if (pdimm->primary_sdram_width == 32) {
348 		popts->data_bus_dimm = DDR_DBUS_32;
349 		popts->otf_burst_chop_en = 0;
350 	} else if (pdimm->primary_sdram_width == 16) {
351 		popts->data_bus_dimm = DDR_DBUS_16;
352 		popts->otf_burst_chop_en = 0;
353 	} else {
354 		ERROR("primary sdram width invalid!\n");
355 		return -EINVAL;
356 	}
357 	popts->data_bus_used = popts->data_bus_dimm;
358 	popts->x4_en = (pdimm->device_width == 4) ? 1 : 0;
359 	debug("x4_en %d\n", popts->x4_en);
360 
361 	/* for RDIMM and DDR4 UDIMM/discrete memory, address parity enable */
362 	if (popts->rdimm != 0) {
363 		popts->ap_en = 1; /* 0 = disable,  1 = enable */
364 	} else {
365 		popts->ap_en = 0; /* disabled for DDR4 UDIMM/discrete default */
366 	}
367 
368 	if (ip_rev == 0x50500) {
369 		popts->ap_en = 0;
370 	}
371 
372 	debug("ap_en %d\n", popts->ap_en);
373 
374 	/* BSTTOPRE precharge interval uses 1/4 of refint value. */
375 	popts->bstopre = picos_to_mclk(clk, pdimm->refresh_rate_ps) >> 2;
376 	popts->tfaw_ps = pdimm->tfaw_ps;
377 
378 	return 0;
379 }
380 
cal_intlv(const int num_ctlrs,struct memctl_opt * popts,struct ddr_conf * conf,struct dimm_params * pdimm)381 static void cal_intlv(const int num_ctlrs,
382 		      struct memctl_opt *popts,
383 		      struct ddr_conf *conf,
384 		      struct dimm_params *pdimm)
385 {
386 #ifdef NXP_DDR_INTLV_256B
387 	if (num_ctlrs == 2) {
388 		popts->ctlr_intlv = 1;
389 		popts->ctlr_intlv_mode = DDR_256B_INTLV;
390 	}
391 #endif
392 	debug("ctlr_intlv %d\n", popts->ctlr_intlv);
393 	debug("ctlr_intlv_mode %d\n", popts->ctlr_intlv_mode);
394 
395 	popts->ba_intlv = auto_bank_intlv(conf->cs_in_use, pdimm);
396 	debug("ba_intlv 0x%x\n", popts->ba_intlv);
397 }
398 
update_burst_length(struct memctl_opt * popts)399 static int update_burst_length(struct memctl_opt *popts)
400 {
401 	/* Choose burst length. */
402 	if ((popts->data_bus_used == DDR_DBUS_32) ||
403 	    (popts->data_bus_used == DDR_DBUS_16)) {
404 		/* 32-bit or 16-bit bus */
405 		popts->otf_burst_chop_en = 0;
406 		popts->burst_length = DDR_BL8;
407 	} else if (popts->otf_burst_chop_en != 0) { /* on-the-fly burst chop */
408 		popts->burst_length = DDR_OTF;	/* on-the-fly BC4 and BL8 */
409 	} else {
410 		popts->burst_length = DDR_BL8;
411 	}
412 	debug("data_bus_used %d\n", popts->data_bus_used);
413 	debug("otf_burst_chop_en %d\n", popts->otf_burst_chop_en);
414 	debug("burst_length 0x%x\n", popts->burst_length);
415 	/*
416 	 * If a reduced data width is requested, but the SPD
417 	 * specifies a physically wider device, adjust the
418 	 * computed dimm capacities accordingly before
419 	 * assigning addresses.
420 	 * 0 = 64-bit, 1 = 32-bit, 2 = 16-bit
421 	 */
422 	if (popts->data_bus_dimm > popts->data_bus_used) {
423 		ERROR("Data bus configuration error\n");
424 		return -EINVAL;
425 	}
426 	popts->dbw_cap_shift = popts->data_bus_used - popts->data_bus_dimm;
427 	debug("dbw_cap_shift %d\n", popts->dbw_cap_shift);
428 
429 	return 0;
430 }
431 
cal_board_params(struct ddr_info * priv,const struct board_timing * dimm,int len)432 int cal_board_params(struct ddr_info *priv,
433 		     const struct board_timing *dimm,
434 		     int len)
435 {
436 	const unsigned long speed = priv->clk / 1000000;
437 	const struct dimm_params *pdimm = &priv->dimm;
438 	struct memctl_opt *popts = &priv->opt;
439 	struct rc_timing const *prt = NULL;
440 	struct rc_timing const *chosen = NULL;
441 	int i;
442 
443 	for (i = 0; i < len; i++) {
444 		if (pdimm->rc == dimm[i].rc) {
445 			prt = dimm[i].p;
446 			break;
447 		}
448 	}
449 	if (prt == NULL) {
450 		ERROR("Board parameters no match.\n");
451 		return -EINVAL;
452 	}
453 	while (prt->speed_bin != 0) {
454 		if (speed <= prt->speed_bin) {
455 			chosen = prt;
456 			break;
457 		}
458 		prt++;
459 	}
460 	if (chosen == NULL) {
461 		ERROR("timing no match for speed %lu\n", speed);
462 		return -EINVAL;
463 	}
464 	popts->clk_adj = prt->clk_adj;
465 	popts->wrlvl_start = prt->wrlvl;
466 	popts->wrlvl_ctl_2 = (prt->wrlvl * 0x01010101 + dimm[i].add1) &
467 			     0xFFFFFFFF;
468 	popts->wrlvl_ctl_3 = (prt->wrlvl * 0x01010101 + dimm[i].add2) &
469 			     0xFFFFFFFF;
470 
471 	return 0;
472 }
473 
synthesize_ctlr(struct ddr_info * priv)474 static int synthesize_ctlr(struct ddr_info *priv)
475 {
476 	int ret;
477 
478 	ret = cal_odt(priv->clk,
479 		      &priv->opt,
480 		      &priv->conf,
481 		      &priv->dimm,
482 		      priv->dimm_on_ctlr);
483 	if (ret != 0) {
484 		return ret;
485 	}
486 
487 	ret = cal_opts(priv->clk,
488 		       &priv->opt,
489 		       &priv->conf,
490 		       &priv->dimm,
491 		       priv->dimm_on_ctlr,
492 		       priv->ip_rev);
493 
494 	if (ret != 0) {
495 		return ret;
496 	}
497 
498 	cal_intlv(priv->num_ctlrs, &priv->opt, &priv->conf, &priv->dimm);
499 	ret = ddr_board_options(priv);
500 	if (ret != 0) {
501 		ERROR("Failed matching board timing.\n");
502 	}
503 
504 	ret = update_burst_length(&priv->opt);
505 
506 	return ret;
507 }
508 
509 /* Return the bit mask of valid DIMMs found */
parse_spd(struct ddr_info * priv)510 static int parse_spd(struct ddr_info *priv)
511 {
512 	struct ddr_conf *conf = &priv->conf;
513 	struct dimm_params *dimm = &priv->dimm;
514 	int j, valid_mask = 0;
515 
516 #ifdef CONFIG_DDR_NODIMM
517 	valid_mask = ddr_get_ddr_params(dimm, conf);
518 	if (valid_mask < 0) {
519 		ERROR("DDR params error\n");
520 		return valid_mask;
521 	}
522 #else
523 	const int *spd_addr = priv->spd_addr;
524 	const int num_ctlrs = priv->num_ctlrs;
525 	const int num_dimm = priv->dimm_on_ctlr;
526 	struct ddr4_spd spd[2];
527 	unsigned int spd_checksum[2];
528 	int addr_idx = 0;
529 	int spd_idx = 0;
530 	int ret, addr, i;
531 
532 	/* Scan all DIMMs */
533 	for (i = 0; i < num_ctlrs; i++) {
534 		debug("Controller %d\n", i);
535 		for (j = 0; j < num_dimm; j++, addr_idx++) {
536 			debug("DIMM %d\n", j);
537 			addr = spd_addr[addr_idx];
538 			if (addr == 0) {
539 				if (j == 0) {
540 					ERROR("First SPD addr wrong.\n");
541 					return -EINVAL;
542 				}
543 				continue;
544 			}
545 			debug("addr 0x%x\n", addr);
546 			ret = read_spd(addr, &spd[spd_idx],
547 				       sizeof(struct ddr4_spd));
548 			if (ret != 0) {	/* invalid */
549 				debug("Invalid SPD at address 0x%x\n", addr);
550 				continue;
551 			}
552 
553 			spd_checksum[spd_idx] =
554 				(spd[spd_idx].crc[1] << 24) |
555 				(spd[spd_idx].crc[0] << 16) |
556 				(spd[spd_idx].mod_section.uc[127] << 8) |
557 				(spd[spd_idx].mod_section.uc[126] << 0);
558 			debug("checksum 0x%x\n", spd_checksum[spd_idx]);
559 			if (spd_checksum[spd_idx] == 0) {
560 				debug("Bad checksum, ignored.\n");
561 				continue;
562 			}
563 			if (spd_idx == 0) {
564 				/* first valid SPD */
565 				ret = cal_dimm_params(&spd[0], dimm);
566 				if (ret != 0) {
567 					ERROR("SPD calculation error\n");
568 					return -EINVAL;
569 				}
570 			}
571 
572 			if (spd_idx != 0 && spd_checksum[0] !=
573 			    spd_checksum[spd_idx]) {
574 				ERROR("Not identical DIMMs.\n");
575 				return -EINVAL;
576 			}
577 			conf->dimm_in_use[j] = 1;
578 			valid_mask |= 1 << addr_idx;
579 			spd_idx = 1;
580 		}
581 		debug("done with controller %d\n", i);
582 	}
583 	switch (num_ctlrs) {
584 	case 1:
585 		if ((valid_mask & 0x1) == 0) {
586 			ERROR("First slot cannot be empty.\n");
587 			return -EINVAL;
588 		}
589 		break;
590 	case 2:
591 		switch (num_dimm) {
592 		case 1:
593 			if (valid_mask == 0) {
594 				ERROR("Both slot empty\n");
595 				return -EINVAL;
596 			}
597 			break;
598 		case 2:
599 			if (valid_mask != 0x5 &&
600 			    valid_mask != 0xf &&
601 			    (valid_mask & 0x7) != 0x4 &&
602 			    (valid_mask & 0xd) != 0x1) {
603 				ERROR("Invalid DIMM combination.\n");
604 				return -EINVAL;
605 			}
606 			break;
607 		default:
608 			ERROR("Invalid number of DIMMs.\n");
609 			return -EINVAL;
610 		}
611 		break;
612 	default:
613 		ERROR("Invalid number of controllers.\n");
614 		return -EINVAL;
615 	}
616 	/* now we have valid and identical DIMMs on controllers */
617 #endif	/* CONFIG_DDR_NODIMM */
618 
619 	debug("cal cs\n");
620 	conf->cs_in_use = 0;
621 	for (j = 0; j < DDRC_NUM_DIMM; j++) {
622 		if (conf->dimm_in_use[j] == 0) {
623 			continue;
624 		}
625 		switch (dimm->n_ranks) {
626 		case 4:
627 			ERROR("Quad-rank DIMM not supported\n");
628 			return -EINVAL;
629 		case 2:
630 			conf->cs_on_dimm[j] = 0x3 << (j * CONFIG_CS_PER_SLOT);
631 			conf->cs_in_use |= conf->cs_on_dimm[j];
632 			break;
633 		case 1:
634 			conf->cs_on_dimm[j] = 0x1 << (j * CONFIG_CS_PER_SLOT);
635 			conf->cs_in_use |= conf->cs_on_dimm[j];
636 			break;
637 		default:
638 			ERROR("SPD error with n_ranks\n");
639 			return -EINVAL;
640 		}
641 		debug("cs_in_use = %x\n", conf->cs_in_use);
642 		debug("cs_on_dimm[%d] = %x\n", j, conf->cs_on_dimm[j]);
643 	}
644 #ifndef CONFIG_DDR_NODIMM
645 	if (priv->dimm.rdimm != 0) {
646 		NOTICE("RDIMM %s\n", priv->dimm.mpart);
647 	} else {
648 		NOTICE("UDIMM %s\n", priv->dimm.mpart);
649 	}
650 #else
651 	NOTICE("%s\n", priv->dimm.mpart);
652 #endif
653 
654 	return valid_mask;
655 }
656 
assign_intlv_addr(const struct dimm_params * pdimm,const struct memctl_opt * opt,struct ddr_conf * conf,const unsigned long long current_mem_base)657 static unsigned long long assign_intlv_addr(
658 	const struct dimm_params *pdimm,
659 	const struct memctl_opt *opt,
660 	struct ddr_conf *conf,
661 	const unsigned long long current_mem_base)
662 {
663 	int i;
664 	int ctlr_density_mul = 0;
665 	const unsigned long long rank_density = pdimm->rank_density >>
666 						opt->dbw_cap_shift;
667 	unsigned long long total_ctlr_mem;
668 
669 	debug("rank density 0x%llx\n", rank_density);
670 	switch (opt->ba_intlv & DDR_BA_INTLV_CS0123) {
671 	case DDR_BA_INTLV_CS0123:
672 		ctlr_density_mul = 4;
673 		break;
674 	case DDR_BA_INTLV_CS01:
675 		ctlr_density_mul = 2;
676 		break;
677 	default:
678 		ctlr_density_mul = 1;
679 		break;
680 	}
681 	debug("ctlr density mul %d\n", ctlr_density_mul);
682 	switch (opt->ctlr_intlv_mode) {
683 	case DDR_256B_INTLV:
684 		total_ctlr_mem = 2 * ctlr_density_mul * rank_density;
685 		break;
686 	default:
687 		ERROR("Unknown interleaving mode");
688 		return 0;
689 	}
690 	conf->base_addr = current_mem_base;
691 	conf->total_mem = total_ctlr_mem;
692 
693 	/* overwrite cs_in_use bitmask with controller interleaving */
694 	conf->cs_in_use = (1 << ctlr_density_mul) - 1;
695 	debug("Overwrite cs_in_use as %x\n", conf->cs_in_use);
696 
697 	/* Fill addr with each cs in use */
698 	for (i = 0; i < ctlr_density_mul; i++) {
699 		conf->cs_base_addr[i] = current_mem_base;
700 		conf->cs_size[i] = total_ctlr_mem;
701 		debug("CS %d\n", i);
702 		debug("    base_addr 0x%llx\n", conf->cs_base_addr[i]);
703 		debug("    size 0x%llx\n", conf->cs_size[i]);
704 	}
705 
706 	return total_ctlr_mem;
707 }
708 
assign_non_intlv_addr(const struct dimm_params * pdimm,const struct memctl_opt * opt,struct ddr_conf * conf,unsigned long long current_mem_base)709 static unsigned long long assign_non_intlv_addr(
710 	const struct dimm_params *pdimm,
711 	const struct memctl_opt *opt,
712 	struct ddr_conf *conf,
713 	unsigned long long current_mem_base)
714 {
715 	int i;
716 	const unsigned long long rank_density = pdimm->rank_density >>
717 						opt->dbw_cap_shift;
718 	unsigned long long total_ctlr_mem = 0ULL;
719 
720 	debug("rank density 0x%llx\n", rank_density);
721 	conf->base_addr = current_mem_base;
722 
723 	/* assign each cs */
724 	switch (opt->ba_intlv & DDR_BA_INTLV_CS0123) {
725 	case DDR_BA_INTLV_CS0123:
726 		for (i = 0; i < DDRC_NUM_CS; i++) {
727 			conf->cs_base_addr[i] = current_mem_base;
728 			conf->cs_size[i] = rank_density << 2;
729 			total_ctlr_mem += rank_density;
730 		}
731 		break;
732 	case DDR_BA_INTLV_CS01:
733 		for (i = 0; ((conf->cs_in_use & (1 << i)) != 0) && i < 2; i++) {
734 			conf->cs_base_addr[i] = current_mem_base;
735 			conf->cs_size[i] = rank_density << 1;
736 			total_ctlr_mem += rank_density;
737 		}
738 		current_mem_base += total_ctlr_mem;
739 		for (; ((conf->cs_in_use & (1 << i)) != 0) && i < DDRC_NUM_CS;
740 		     i++) {
741 			conf->cs_base_addr[i] = current_mem_base;
742 			conf->cs_size[i] = rank_density;
743 			total_ctlr_mem += rank_density;
744 			current_mem_base += rank_density;
745 		}
746 		break;
747 	case DDR_BA_NONE:
748 		for (i = 0; ((conf->cs_in_use & (1 << i)) != 0) &&
749 			     (i < DDRC_NUM_CS); i++) {
750 			conf->cs_base_addr[i] = current_mem_base;
751 			conf->cs_size[i] = rank_density;
752 			current_mem_base += rank_density;
753 			total_ctlr_mem += rank_density;
754 		}
755 		break;
756 	default:
757 		ERROR("Unsupported bank interleaving\n");
758 		return 0;
759 	}
760 	for (i = 0; ((conf->cs_in_use & (1 << i)) != 0) &&
761 		     (i < DDRC_NUM_CS); i++) {
762 		debug("CS %d\n", i);
763 		debug("    base_addr 0x%llx\n", conf->cs_base_addr[i]);
764 		debug("    size 0x%llx\n", conf->cs_size[i]);
765 	}
766 
767 	return total_ctlr_mem;
768 }
769 
770 unsigned long long assign_addresses(struct ddr_info *priv)
771 		   __attribute__ ((weak));
772 
assign_addresses(struct ddr_info * priv)773 unsigned long long assign_addresses(struct ddr_info *priv)
774 {
775 	struct memctl_opt *opt = &priv->opt;
776 	const struct dimm_params *dimm = &priv->dimm;
777 	struct ddr_conf *conf = &priv->conf;
778 	unsigned long long current_mem_base = priv->mem_base;
779 	unsigned long long total_mem;
780 
781 	total_mem = 0ULL;
782 	debug("ctlr_intlv %d\n", opt->ctlr_intlv);
783 	if (opt->ctlr_intlv != 0) {
784 		total_mem = assign_intlv_addr(dimm, opt, conf,
785 					      current_mem_base);
786 	} else {
787 		/*
788 		 * Simple linear assignment if memory controllers are not
789 		 * interleaved. This is only valid for SoCs with single DDRC.
790 		 */
791 		total_mem = assign_non_intlv_addr(dimm, opt, conf,
792 						  current_mem_base);
793 	}
794 	conf->total_mem = total_mem;
795 	debug("base 0x%llx\n", current_mem_base);
796 	debug("Total mem by assignment is 0x%llx\n", total_mem);
797 
798 	return total_mem;
799 }
800 
cal_ddrc_regs(struct ddr_info * priv)801 static int cal_ddrc_regs(struct ddr_info *priv)
802 {
803 	int ret;
804 
805 	ret = compute_ddrc(priv->clk,
806 			   &priv->opt,
807 			   &priv->conf,
808 			   &priv->ddr_reg,
809 			   &priv->dimm,
810 			   priv->ip_rev);
811 	if (ret != 0) {
812 		ERROR("Calculating DDR registers failed\n");
813 	}
814 
815 	return ret;
816 }
817 
818 #endif /* CONFIG_STATIC_DDR */
819 
write_ddrc_regs(struct ddr_info * priv)820 static int write_ddrc_regs(struct ddr_info *priv)
821 {
822 	int i;
823 	int ret;
824 
825 	for (i = 0; i < priv->num_ctlrs; i++) {
826 		ret = ddrc_set_regs(priv->clk, &priv->ddr_reg, priv->ddr[i], 0);
827 		if (ret != 0) {
828 			ERROR("Writing DDR register(s) failed\n");
829 			return ret;
830 		}
831 	}
832 
833 	return 0;
834 }
835 
dram_init(struct ddr_info * priv,uintptr_t nxp_ccn_hn_f0_addr)836 long long dram_init(struct ddr_info *priv
837 #if defined(NXP_HAS_CCN504) || defined(NXP_HAS_CCN508)
838 		    , uintptr_t nxp_ccn_hn_f0_addr
839 #endif
840 		)
841 {
842 	uint64_t time __unused;
843 	long long dram_size;
844 	int ret;
845 	const uint64_t time_base = get_timer_val(0);
846 	unsigned int ip_rev = get_ddrc_version(priv->ddr[0]);
847 
848 	int valid_spd_mask __unused;
849 	int scratch = 0x0;
850 
851 	priv->ip_rev = ip_rev;
852 
853 #ifndef CONFIG_STATIC_DDR
854 	INFO("time base %" PRIu64 " ms\n", time_base);
855 	debug("Parse DIMM SPD(s)\n");
856 	valid_spd_mask = parse_spd(priv);
857 
858 	if (valid_spd_mask < 0) {
859 		ERROR("Parsing DIMM Error\n");
860 		return valid_spd_mask;
861 	}
862 
863 #if defined(NXP_HAS_CCN504) || defined(NXP_HAS_CCN508)
864 	if (priv->num_ctlrs == 2 || priv->num_ctlrs == 1) {
865 		ret = disable_unused_ddrc(priv, valid_spd_mask,
866 					  nxp_ccn_hn_f0_addr);
867 		if (ret != 0) {
868 			return ret;
869 		}
870 	}
871 #endif
872 
873 	time = get_timer_val(time_base);
874 	INFO("Time after parsing SPD %" PRIu64 " ms\n", time);
875 	debug("Synthesize configurations\n");
876 	ret = synthesize_ctlr(priv);
877 	if (ret != 0) {
878 		ERROR("Synthesize config error\n");
879 		return ret;
880 	}
881 
882 	debug("Assign binding addresses\n");
883 	dram_size = assign_addresses(priv);
884 	if (dram_size == 0) {
885 		ERROR("Assigning address error\n");
886 		return -EINVAL;
887 	}
888 
889 	debug("Calculate controller registers\n");
890 	ret = cal_ddrc_regs(priv);
891 	if (ret != 0) {
892 		ERROR("Calculate register error\n");
893 		return ret;
894 	}
895 
896 	ret = compute_ddr_phy(priv);
897 	if (ret != 0)
898 		ERROR("Calculating DDR PHY registers failed.\n");
899 
900 #else
901 	dram_size = board_static_ddr(priv);
902 	if (dram_size == 0) {
903 		ERROR("Error getting static DDR settings.\n");
904 		return -EINVAL;
905 	}
906 #endif
907 
908 	if (priv->warm_boot_flag == DDR_WARM_BOOT) {
909 		scratch = (priv->ddr_reg).sdram_cfg[1];
910 		scratch = scratch & ~(SDRAM_CFG2_D_INIT);
911 		priv->ddr_reg.sdram_cfg[1] = scratch;
912 	}
913 
914 	time = get_timer_val(time_base);
915 	INFO("Time before programming controller %" PRIu64 " ms\n", time);
916 	debug("Program controller registers\n");
917 	ret = write_ddrc_regs(priv);
918 	if (ret != 0) {
919 		ERROR("Programing DDRC error\n");
920 		return ret;
921 	}
922 
923 	puts("");
924 	NOTICE("%lld GB ", dram_size >> 30);
925 	print_ddr_info(priv->ddr[0]);
926 
927 	time = get_timer_val(time_base);
928 	INFO("Time used by DDR driver %" PRIu64 " ms\n", time);
929 
930 	return dram_size;
931 }
932