1 /* SPDX-License-Identifier: GPL-2.0-only */
2
3 #include <device/mmio.h>
4 #include <commonlib/helpers.h>
5 #include <console/console.h>
6 #include <fast_spi_def.h>
7 #include <intelblocks/fast_spi.h>
8 #include <soc/pci_devs.h>
9 #include <spi_flash.h>
10 #include <string.h>
11 #include <timer.h>
12
13 /* Helper to create a FAST_SPI context on API entry. */
14 #define BOILERPLATE_CREATE_CTX(ctx) \
15 struct fast_spi_flash_ctx real_ctx; \
16 struct fast_spi_flash_ctx *ctx = &real_ctx; \
17 _fast_spi_flash_get_ctx(ctx)
18
19 /*
20 * Anything that's not success is <0. Provided solely for readability, as these
21 * constants are not used outside this file.
22 */
23 enum errors {
24 SUCCESS = 0,
25 E_TIMEOUT = -1,
26 E_HW_ERROR = -2,
27 E_ARGUMENT = -3,
28 };
29
30 /* Reduce data-passing burden by grouping transaction data in a context. */
31 struct fast_spi_flash_ctx {
32 uintptr_t mmio_base;
33 };
34
_fast_spi_flash_get_ctx(struct fast_spi_flash_ctx * ctx)35 static void _fast_spi_flash_get_ctx(struct fast_spi_flash_ctx *ctx)
36 {
37 ctx->mmio_base = (uintptr_t)fast_spi_get_bar();
38 }
39
40 /* Read register from the FAST_SPI flash controller. */
fast_spi_flash_ctrlr_reg_read(struct fast_spi_flash_ctx * ctx,uint16_t reg)41 static uint32_t fast_spi_flash_ctrlr_reg_read(struct fast_spi_flash_ctx *ctx,
42 uint16_t reg)
43 {
44 uintptr_t addr = ALIGN_DOWN(ctx->mmio_base + reg, sizeof(uint32_t));
45 return read32p(addr);
46 }
47
48 /* Write to register in FAST_SPI flash controller. */
fast_spi_flash_ctrlr_reg_write(struct fast_spi_flash_ctx * ctx,uint16_t reg,uint32_t val)49 static void fast_spi_flash_ctrlr_reg_write(struct fast_spi_flash_ctx *ctx,
50 uint16_t reg, uint32_t val)
51 {
52 uintptr_t addr = ALIGN_DOWN(ctx->mmio_base + reg, sizeof(uint32_t));
53 write32p(addr, val);
54 }
55
56 /*
57 * Via component field (bits 15-14) we can select either 1st or 2nd flash
58 * (in dual flash setups).
59 * Via HORD - Header or Data (bits 13-12) - we can select either:
60 * - SFDP Header
61 * - Param Table Header
62 * - Data (JEDEC params, including density)
63 *
64 * It's okay to rely on SFDP, since the SPI flash controller requires an SFDP
65 * 1.5 or newer compliant FAST_SPI flash chip.
66 * NOTE: Due to the register layout of the hardware, all accesses will be
67 * aligned to a 4 byte boundary.
68 */
fast_spi_flash_read_sfdp(struct fast_spi_flash_ctx * ctx,uint32_t ptinx_reg)69 static uint32_t fast_spi_flash_read_sfdp(struct fast_spi_flash_ctx *ctx,
70 uint32_t ptinx_reg)
71 {
72 fast_spi_flash_ctrlr_reg_write(ctx, SPIBAR_PTINX, ptinx_reg);
73 return fast_spi_flash_ctrlr_reg_read(ctx, SPIBAR_PTDATA);
74 }
75
76 /* Fill FDATAn FIFO in preparation for a write transaction. */
fill_xfer_fifo(struct fast_spi_flash_ctx * ctx,const void * data,size_t len)77 static void fill_xfer_fifo(struct fast_spi_flash_ctx *ctx, const void *data,
78 size_t len)
79 {
80 const uint32_t *data32 = (const uint32_t *)data;
81 for (size_t i = 0; i < len / sizeof(uint32_t); i++)
82 write32p(ctx->mmio_base + SPIBAR_FDATA(i), *data32++);
83
84 const uint8_t *data8 = (const uint8_t *)data32;
85 for (size_t i = 0; i < len % sizeof(uint32_t); i++)
86 write8p(ctx->mmio_base + SPIBAR_FDATA(len / sizeof(uint32_t)) + i, *data8++);
87 }
88
89 /* Drain FDATAn FIFO after a read transaction populates data. */
drain_xfer_fifo(struct fast_spi_flash_ctx * ctx,void * data,size_t len)90 static void drain_xfer_fifo(struct fast_spi_flash_ctx *ctx, void *data,
91 size_t len)
92 {
93 uint32_t *data32 = (uint32_t *)data;
94 for (size_t i = 0; i < len / sizeof(uint32_t); i++)
95 *data32++ = read32p(ctx->mmio_base + SPIBAR_FDATA(i));
96
97 uint8_t *data8 = (uint8_t *)data32;
98 for (size_t i = 0; i < len % sizeof(uint32_t); i++)
99 *data8++ = read8p(ctx->mmio_base + SPIBAR_FDATA(len / sizeof(uint32_t)) + i);
100 }
101
102 /* Fire up a transfer using the hardware sequencer. */
start_hwseq_xfer(struct fast_spi_flash_ctx * ctx,uint32_t hsfsts_cycle,uint32_t flash_addr,size_t len)103 static void start_hwseq_xfer(struct fast_spi_flash_ctx *ctx,
104 uint32_t hsfsts_cycle, uint32_t flash_addr, size_t len)
105 {
106 /* Make sure all W1C status bits get cleared. */
107 uint32_t hsfsts = SPIBAR_HSFSTS_W1C_BITS;
108 /* Set up transaction parameters. */
109 hsfsts |= hsfsts_cycle & SPIBAR_HSFSTS_FCYCLE_MASK;
110 hsfsts |= SPIBAR_HSFSTS_FDBC(len - 1);
111
112 fast_spi_flash_ctrlr_reg_write(ctx, SPIBAR_FADDR, flash_addr);
113 fast_spi_flash_ctrlr_reg_write(ctx, SPIBAR_HSFSTS_CTL,
114 hsfsts | SPIBAR_HSFSTS_FGO);
115 }
116
wait_for_hwseq_xfer(struct fast_spi_flash_ctx * ctx,uint32_t flash_addr)117 static int wait_for_hwseq_xfer(struct fast_spi_flash_ctx *ctx,
118 uint32_t flash_addr)
119 {
120 struct stopwatch sw;
121 uint32_t hsfsts;
122
123 stopwatch_init_msecs_expire(&sw, SPIBAR_HWSEQ_XFER_TIMEOUT_MS);
124 do {
125 hsfsts = fast_spi_flash_ctrlr_reg_read(ctx, SPIBAR_HSFSTS_CTL);
126
127 if (hsfsts & SPIBAR_HSFSTS_FCERR) {
128 printk(BIOS_ERR, "SPI Transaction Error at Flash Offset %x HSFSTS = 0x%08x\n",
129 flash_addr, hsfsts);
130 return E_HW_ERROR;
131 }
132
133 if (hsfsts & SPIBAR_HSFSTS_FDONE)
134 return SUCCESS;
135 } while (!(stopwatch_expired(&sw)));
136
137 printk(BIOS_ERR, "SPI Transaction Timeout (Exceeded %d ms) at Flash Offset %x HSFSTS = 0x%08x\n",
138 SPIBAR_HWSEQ_XFER_TIMEOUT_MS, flash_addr, hsfsts);
139 return E_TIMEOUT;
140 }
141
wait_for_hwseq_spi_cycle_complete(struct fast_spi_flash_ctx * ctx)142 static int wait_for_hwseq_spi_cycle_complete(struct fast_spi_flash_ctx *ctx)
143 {
144 struct stopwatch sw;
145 uint32_t hsfsts;
146
147 stopwatch_init_msecs_expire(&sw, SPIBAR_HWSEQ_XFER_TIMEOUT_MS);
148 do {
149 hsfsts = fast_spi_flash_ctrlr_reg_read(ctx, SPIBAR_HSFSTS_CTL);
150
151 if (!(hsfsts & SPIBAR_HSFSTS_SCIP))
152 return SUCCESS;
153 } while (!(stopwatch_expired(&sw)));
154
155 return E_TIMEOUT;
156 }
157
158 /* Execute FAST_SPI flash transfer. This is a blocking call. */
exec_sync_hwseq_xfer(struct fast_spi_flash_ctx * ctx,uint32_t hsfsts_cycle,uint32_t flash_addr,size_t len)159 static int exec_sync_hwseq_xfer(struct fast_spi_flash_ctx *ctx,
160 uint32_t hsfsts_cycle, uint32_t flash_addr,
161 size_t len)
162 {
163 if (wait_for_hwseq_spi_cycle_complete(ctx) != SUCCESS) {
164 printk(BIOS_ERR, "SPI Transaction Timeout (Exceeded %d ms) due to prior"
165 " operation at Flash Offset %x\n",
166 SPIBAR_HWSEQ_XFER_TIMEOUT_MS, flash_addr);
167 return E_TIMEOUT;
168 }
169
170 start_hwseq_xfer(ctx, hsfsts_cycle, flash_addr, len);
171 return wait_for_hwseq_xfer(ctx, flash_addr);
172 }
173
fast_spi_cycle_in_progress(void)174 int fast_spi_cycle_in_progress(void)
175 {
176 BOILERPLATE_CREATE_CTX(ctx);
177
178 int ret = wait_for_hwseq_spi_cycle_complete(ctx);
179 if (ret != SUCCESS)
180 printk(BIOS_ERR, "SPI Transaction Timeout (Exceeded %d ms) due to prior"
181 " operation is pending\n", SPIBAR_HWSEQ_XFER_TIMEOUT_MS);
182
183 return ret;
184 }
185
186 /*
187 * Ensure read/write xfer len is not greater than SPIBAR_FDATA_FIFO_SIZE and
188 * that the operation does not cross page boundary.
189 */
get_xfer_len(const struct spi_flash * flash,uint32_t addr,size_t len)190 static size_t get_xfer_len(const struct spi_flash *flash, uint32_t addr,
191 size_t len)
192 {
193 size_t xfer_len = MIN(len, SPIBAR_FDATA_FIFO_SIZE);
194 size_t bytes_left = ALIGN_UP(addr, flash->page_size) - addr;
195
196 if (bytes_left)
197 xfer_len = MIN(xfer_len, bytes_left);
198
199 return xfer_len;
200 }
201
fast_spi_flash_erase(const struct spi_flash * flash,uint32_t offset,size_t len)202 static int fast_spi_flash_erase(const struct spi_flash *flash,
203 uint32_t offset, size_t len)
204 {
205 int ret;
206 size_t erase_size;
207 uint32_t erase_cycle;
208
209 BOILERPLATE_CREATE_CTX(ctx);
210
211 if (!IS_ALIGNED(offset, 4 * KiB) || !IS_ALIGNED(len, 4 * KiB)) {
212 printk(BIOS_ERR, "BUG! SPI erase region not sector aligned\n");
213 return E_ARGUMENT;
214 }
215
216 while (len) {
217 if (IS_ALIGNED(offset, 64 * KiB) && (len >= 64 * KiB)) {
218 erase_size = 64 * KiB;
219 erase_cycle = SPIBAR_HSFSTS_CYCLE_64K_ERASE;
220 } else {
221 erase_size = 4 * KiB;
222 erase_cycle = SPIBAR_HSFSTS_CYCLE_4K_ERASE;
223 }
224 printk(BIOS_SPEW, "Erasing flash addr %x + %zu KiB\n",
225 offset, erase_size / KiB);
226
227 ret = exec_sync_hwseq_xfer(ctx, erase_cycle, offset, 0);
228 if (ret != SUCCESS)
229 return ret;
230
231 offset += erase_size;
232 len -= erase_size;
233 }
234
235 return SUCCESS;
236 }
237
fast_spi_flash_read(const struct spi_flash * flash,uint32_t addr,size_t len,void * buf)238 static int fast_spi_flash_read(const struct spi_flash *flash,
239 uint32_t addr, size_t len, void *buf)
240 {
241 int ret;
242 size_t xfer_len;
243 uint8_t *data = buf;
244
245 BOILERPLATE_CREATE_CTX(ctx);
246
247 while (len) {
248 xfer_len = get_xfer_len(flash, addr, len);
249
250 ret = exec_sync_hwseq_xfer(ctx, SPIBAR_HSFSTS_CYCLE_READ,
251 addr, xfer_len);
252 if (ret != SUCCESS)
253 return ret;
254
255 drain_xfer_fifo(ctx, data, xfer_len);
256
257 addr += xfer_len;
258 data += xfer_len;
259 len -= xfer_len;
260 }
261
262 return SUCCESS;
263 }
264
fast_spi_flash_write(const struct spi_flash * flash,uint32_t addr,size_t len,const void * buf)265 static int fast_spi_flash_write(const struct spi_flash *flash,
266 uint32_t addr, size_t len, const void *buf)
267 {
268 int ret;
269 size_t xfer_len;
270 const uint8_t *data = buf;
271
272 BOILERPLATE_CREATE_CTX(ctx);
273
274 while (len) {
275 xfer_len = get_xfer_len(flash, addr, len);
276 fill_xfer_fifo(ctx, data, xfer_len);
277
278 ret = exec_sync_hwseq_xfer(ctx, SPIBAR_HSFSTS_CYCLE_WRITE,
279 addr, xfer_len);
280 if (ret != SUCCESS)
281 return ret;
282
283 addr += xfer_len;
284 data += xfer_len;
285 len -= xfer_len;
286 }
287
288 return SUCCESS;
289 }
290
fast_spi_flash_status(const struct spi_flash * flash,uint8_t * reg)291 static int fast_spi_flash_status(const struct spi_flash *flash,
292 uint8_t *reg)
293 {
294 int ret;
295 BOILERPLATE_CREATE_CTX(ctx);
296
297 ret = exec_sync_hwseq_xfer(ctx, SPIBAR_HSFSTS_CYCLE_RD_STATUS, 0,
298 sizeof(*reg));
299 if (ret != SUCCESS)
300 return ret;
301
302 drain_xfer_fifo(ctx, reg, sizeof(*reg));
303 return ret;
304 }
305
306 const struct spi_flash_ops fast_spi_flash_ops = {
307 .read = fast_spi_flash_read,
308 .write = fast_spi_flash_write,
309 .erase = fast_spi_flash_erase,
310 .status = fast_spi_flash_status,
311 };
312
313 /*
314 * We can't use FDOC and FDOD to read FLCOMP, as previous platforms did.
315 * For details see:
316 * Ch 31, SPI: p. 194
317 * The size of the flash component is always taken from density field in the
318 * SFDP table. FLCOMP.C0DEN is no longer used by the Flash Controller.
319 */
fast_spi_flash_probe(const struct spi_slave * dev,struct spi_flash * flash)320 static int fast_spi_flash_probe(const struct spi_slave *dev,
321 struct spi_flash *flash)
322 {
323 BOILERPLATE_CREATE_CTX(ctx);
324 uint32_t flash_bits;
325 uint32_t ptinx_reg;
326
327 /*
328 * bytes = (bits + 1) / 8;
329 * But we need to do the addition in a way which doesn't overflow for
330 * 4 Gbit devices (flash_bits == 0xffffffff).
331 */
332 ptinx_reg = SPIBAR_PTINX_COMP_0 | SPIBAR_PTINX_HORD_JEDEC | SFDP_PARAM_DENSITY;
333 flash_bits = fast_spi_flash_read_sfdp(ctx, ptinx_reg);
334 flash->size = (flash_bits >> 3) + 1;
335
336 /*
337 * Now check if we have a second flash component.
338 * Check SFDP header for the SFDP signature. If valid, then 2nd component is present.
339 * Increase the flash size if 2nd component is found, analogically like the 1st
340 * component.
341 */
342 ptinx_reg = SPIBAR_PTINX_COMP_1 | SPIBAR_PTINX_HORD_SFDP | SFDP_HDR_SIG;
343 if (fast_spi_flash_read_sfdp(ctx, ptinx_reg) == SFDP_SIGNATURE) {
344 ptinx_reg = SPIBAR_PTINX_COMP_1 | SPIBAR_PTINX_HORD_JEDEC | SFDP_PARAM_DENSITY;
345 flash_bits = fast_spi_flash_read_sfdp(ctx, ptinx_reg);
346 flash->size += ((flash_bits >> 3) + 1);
347 }
348
349 memcpy(&flash->spi, dev, sizeof(*dev));
350
351 /* Can erase both 4 KiB and 64 KiB chunks. Declare the smaller size. */
352 flash->sector_size = 4 * KiB;
353 flash->page_size = 256;
354 /*
355 * FIXME: Get erase+cmd, and status_cmd from SFDP.
356 *
357 * flash->erase_cmd = ???
358 * flash->status_cmd = ???
359 */
360
361 flash->ops = &fast_spi_flash_ops;
362 return 0;
363 }
364
fast_spi_flash_ctrlr_setup(const struct spi_slave * dev)365 static int fast_spi_flash_ctrlr_setup(const struct spi_slave *dev)
366 {
367 if (dev->cs != 0) {
368 printk(BIOS_ERR, "%s: Invalid CS for fast SPI bus=0x%x,cs=0x%x!\n",
369 __func__, dev->bus, dev->cs);
370 return -1;
371 }
372
373 return 0;
374 }
375
376 #define SPI_FPR_SHIFT 12
377 #define SPI_FPR_MASK 0x7fff
378 #define SPI_FPR_BASE_SHIFT 0
379 #define SPI_FPR_LIMIT_SHIFT 16
380 #define SPI_FPR_RPE (1 << 15) /* Read Protect */
381 #define SPI_FPR_WPE (1 << 31) /* Write Protect */
382 #define SPI_FPR(base, limit) \
383 (((((limit) >> SPI_FPR_SHIFT) & SPI_FPR_MASK) << SPI_FPR_LIMIT_SHIFT) |\
384 ((((base) >> SPI_FPR_SHIFT) & SPI_FPR_MASK) << SPI_FPR_BASE_SHIFT))
385
386 /*
387 * Protect range of SPI flash defined by [start, start+size-1] using Flash
388 * Protected Range (FPR) register if available.
389 */
fast_spi_flash_protect(const struct spi_flash * flash,const struct region * region,const enum ctrlr_prot_type type)390 static int fast_spi_flash_protect(const struct spi_flash *flash,
391 const struct region *region,
392 const enum ctrlr_prot_type type)
393 {
394 u32 start = region_offset(region);
395 u32 end = start + region_sz(region) - 1;
396 u32 reg;
397 u32 protect_mask = 0;
398 int fpr;
399 uintptr_t fpr_base;
400 BOILERPLATE_CREATE_CTX(ctx);
401
402 fpr_base = ctx->mmio_base + SPIBAR_FPR_BASE;
403
404 /* Find first empty FPR */
405 for (fpr = 0; fpr < SPIBAR_FPR_MAX; fpr++) {
406 reg = read32p(fpr_base);
407 if (reg == 0)
408 break;
409 fpr_base += sizeof(uint32_t);
410 }
411
412 if (fpr >= SPIBAR_FPR_MAX) {
413 printk(BIOS_ERR, "No SPI FPR free!\n");
414 return -1;
415 }
416
417 switch (type) {
418 case WRITE_PROTECT:
419 protect_mask |= SPI_FPR_WPE;
420 break;
421 case READ_PROTECT:
422 protect_mask |= SPI_FPR_RPE;
423 break;
424 case READ_WRITE_PROTECT:
425 protect_mask |= (SPI_FPR_RPE | SPI_FPR_WPE);
426 break;
427 default:
428 printk(BIOS_ERR, "Seeking invalid protection!\n");
429 return -1;
430 }
431
432 /* Set protected range base and limit */
433 reg = SPI_FPR(start, end) | protect_mask;
434
435 /* Set the FPR register and verify it is protected */
436 write32p(fpr_base, reg);
437 reg = read32p(fpr_base);
438 if (!(reg & protect_mask)) {
439 printk(BIOS_ERR, "Unable to set SPI FPR %d\n", fpr);
440 return -1;
441 }
442
443 printk(BIOS_INFO, "%s: FPR %d is enabled for range 0x%08x-0x%08x\n",
444 __func__, fpr, start, end);
445 return 0;
446 }
447
448 const struct spi_ctrlr fast_spi_flash_ctrlr = {
449 .setup = fast_spi_flash_ctrlr_setup,
450 .max_xfer_size = SPI_CTRLR_DEFAULT_MAX_XFER_SIZE,
451 .flash_probe = fast_spi_flash_probe,
452 .flash_protect = fast_spi_flash_protect,
453 };
454