xref: /aosp_15_r20/external/coreboot/payloads/libpayload/drivers/storage/nvme.c (revision b9411a12aaaa7e1e6a6fb7c5e057f44ee179a49c)
1 // SPDX-License-Identifier: BSD-3-Clause
2 /*
3  * Libpayload NVMe device driver
4  * Copyright (C) 2019 secunet Security Networks AG
5  */
6 
7 #include <stdlib.h>
8 #include <stdint.h>
9 #include <stdio.h>
10 #include <pci.h>
11 #include <pci/pci.h>
12 #include <libpayload.h>
13 #include <storage/storage.h>
14 #include <storage/nvme.h>
15 
16 #define NVME_CC_EN	(1 <<  0)
17 #define NVME_CC_CSS	(0 <<  4)
18 #define NVME_CC_MPS	(0 <<  7)
19 #define NVME_CC_AMS	(0 << 11)
20 #define NVME_CC_SHN	(0 << 14)
21 #define NVME_CC_IOSQES	(6 << 16)
22 #define NVME_CC_IOCQES	(4 << 20)
23 
24 #define NVME_QUEUE_SIZE 2
25 #define NVME_SQ_ENTRY_SIZE 64
26 #define NVME_CQ_ENTRY_SIZE 16
27 
28 struct nvme_dev {
29 	storage_dev_t storage_dev;
30 
31 	pcidev_t pci_dev;
32 	void *config;
33 	struct {
34 		void *base;
35 		uint32_t *bell;
36 		uint16_t idx; // bool pos 0 or 1
37 		uint16_t round; // bool round 0 or 1+0xd
38 	} queue[4];
39 
40 	uint64_t *prp_list;
41 };
42 
43 
44 struct nvme_s_queue_entry {
45 	uint32_t dw[16];
46 };
47 
48 struct nvme_c_queue_entry {
49 	uint32_t dw[4];
50 };
51 
52 enum nvme_queue {
53 	NVME_ADMIN_QUEUE = 0,
54 	ads = 0,
55 	adc = 1,
56 	NVME_IO_QUEUE = 2,
57 	ios = 2,
58 	ioc = 3,
59 };
60 
nvme_poll(struct storage_dev * dev)61 static storage_poll_t nvme_poll(struct storage_dev *dev)
62 {
63 	return POLL_MEDIUM_PRESENT;
64 }
65 
nvme_cmd(struct nvme_dev * nvme,enum nvme_queue q,const struct nvme_s_queue_entry * cmd)66 static int nvme_cmd(
67 		struct nvme_dev *nvme, enum nvme_queue q, const struct nvme_s_queue_entry *cmd)
68 {
69 	int sq = q, cq = q+1;
70 
71 	void *s_entry = nvme->queue[sq].base + (nvme->queue[sq].idx * NVME_SQ_ENTRY_SIZE);
72 	memcpy(s_entry, cmd, NVME_SQ_ENTRY_SIZE);
73 	nvme->queue[sq].idx = (nvme->queue[sq].idx + 1) & (NVME_QUEUE_SIZE - 1);
74 	write32(nvme->queue[sq].bell, nvme->queue[sq].idx);
75 
76 	struct nvme_c_queue_entry *c_entry = nvme->queue[cq].base +
77 		(nvme->queue[cq].idx * NVME_CQ_ENTRY_SIZE);
78 	while (((read32(&c_entry->dw[3]) >> 16) & 0x1) == nvme->queue[cq].round)
79 		;
80 	nvme->queue[cq].idx = (nvme->queue[cq].idx + 1) & (NVME_QUEUE_SIZE - 1);
81 	write32(nvme->queue[cq].bell, nvme->queue[cq].idx);
82 	if (nvme->queue[cq].idx == 0)
83 		nvme->queue[cq].round = (nvme->queue[cq].round + 1) & 1;
84 	return c_entry->dw[3] >> 17;
85 }
86 
delete_io_submission_queue(struct nvme_dev * nvme)87 static int delete_io_submission_queue(struct nvme_dev *nvme)
88 {
89 	const struct nvme_s_queue_entry e = {
90 		.dw[0]  = 0,
91 		.dw[10] = ios,
92 	};
93 
94 	int res = nvme_cmd(nvme, NVME_ADMIN_QUEUE, &e);
95 
96 	free(nvme->queue[ios].base);
97 	nvme->queue[ios].base = NULL;
98 	nvme->queue[ios].bell = NULL;
99 	nvme->queue[ios].idx  = 0;
100 	return res;
101 }
102 
delete_io_completion_queue(struct nvme_dev * nvme)103 static int delete_io_completion_queue(struct nvme_dev *nvme)
104 {
105 	const struct nvme_s_queue_entry e = {
106 		.dw[0]  = 1,
107 		.dw[10] = ioc,
108 	};
109 
110 	int res = nvme_cmd(nvme, NVME_ADMIN_QUEUE, &e);
111 	free(nvme->queue[ioc].base);
112 
113 	nvme->queue[ioc].base  = NULL;
114 	nvme->queue[ioc].bell  = NULL;
115 	nvme->queue[ioc].idx   = 0;
116 	nvme->queue[ioc].round = 0;
117 	return res;
118 }
119 
delete_admin_queues(struct nvme_dev * nvme)120 static int delete_admin_queues(struct nvme_dev *nvme)
121 {
122 	if (nvme->queue[ios].base || nvme->queue[ioc].base)
123 		printf("NVMe ERROR: IO queues still active.\n");
124 
125 	free(nvme->queue[ads].base);
126 	nvme->queue[ads].base = NULL;
127 	nvme->queue[ads].bell = NULL;
128 	nvme->queue[ads].idx  = 0;
129 
130 	free(nvme->queue[adc].base);
131 	nvme->queue[adc].base = NULL;
132 	nvme->queue[adc].bell = NULL;
133 	nvme->queue[adc].idx  = 0;
134 	nvme->queue[adc].round = 0;
135 
136 	return 0;
137 }
138 
nvme_detach_device(struct storage_dev * dev)139 static void nvme_detach_device(struct storage_dev *dev)
140 {
141 	struct nvme_dev *nvme = (struct nvme_dev *)dev;
142 
143 	if (delete_io_submission_queue(nvme))
144 		printf("NVMe ERROR: Failed to delete io submission queue\n");
145 	if (delete_io_completion_queue(nvme))
146 		printf("NVME ERROR: Failed to delete io completion queue\n");
147 	if (delete_admin_queues(nvme))
148 		printf("NVME ERROR: Failed to delete admin queues\n");
149 
150 	write32(nvme->config + 0x14, 0);
151 
152 	int status, timeout = (read64(nvme->config) >> 24 & 0xff) * 500;
153 	do {
154 		status = read32(nvme->config + 0x1c) & 0x3;
155 		if (status == 0x2) {
156 			printf("NVMe ERROR: Failed to disable controller. FATAL ERROR\n");
157 			break;
158 		}
159 		if (timeout < 0) {
160 			printf("NVMe ERROR: Failed to disable controller. Timeout.\n");
161 			break;
162 		}
163 		timeout -= 10;
164 		mdelay(10);
165 	} while (status != 0x0);
166 
167 	uint16_t command = pci_read_config16(nvme->pci_dev, PCI_COMMAND);
168 	pci_write_config16(nvme->pci_dev, PCI_COMMAND, command & ~PCI_COMMAND_MASTER);
169 
170 	free(nvme->prp_list);
171 }
172 
nvme_read(struct nvme_dev * nvme,unsigned char * buffer,uint64_t base,uint16_t count)173 static int nvme_read(struct nvme_dev *nvme, unsigned char *buffer, uint64_t base, uint16_t count)
174 {
175 	if (count == 0 || count > 512)
176 		return -1;
177 
178 	struct nvme_s_queue_entry e = {
179 		.dw[0] = 0x02,
180 		.dw[1] = 0x1,
181 		.dw[6] = virt_to_phys(buffer),
182 		.dw[10] = base,
183 		.dw[11] = base >> 32,
184 		.dw[12] = count - 1,
185 	};
186 
187 	const unsigned int start_page = (uintptr_t)buffer >> 12;
188 	const unsigned int end_page = ((uintptr_t)buffer + count * 512 - 1) >> 12;
189 	if (end_page == start_page) {
190 		/* No page crossing, PRP2 is reserved */
191 	} else if (end_page == start_page + 1) {
192 		/* Crossing exactly one page boundary, PRP2 is second page */
193 		e.dw[8] = virt_to_phys(buffer + 0x1000) & ~0xfff;
194 	} else {
195 		/* Use a single page as PRP list, PRP2 points to the list */
196 		unsigned int i;
197 		for (i = 0; i < end_page - start_page; ++i) {
198 			buffer += 0x1000;
199 			nvme->prp_list[i] = virt_to_phys(buffer) & ~0xfff;
200 		}
201 		e.dw[8] = virt_to_phys(nvme->prp_list);
202 	}
203 
204 	return nvme_cmd(nvme, ios, &e);
205 }
206 
nvme_read_blocks512(struct storage_dev * const dev,const lba_t start,const size_t count,unsigned char * const buf)207 static ssize_t nvme_read_blocks512(
208 		struct storage_dev *const dev,
209 		const lba_t start, const size_t count, unsigned char *const buf)
210 {
211 	unsigned int off = 0;
212 	while (off < count) {
213 		const unsigned int blocks = MIN(count - off, 512);
214 		if (nvme_read((struct nvme_dev *)dev, buf + (off * 512), start + off, blocks))
215 			return off;
216 		off += blocks;
217 	}
218 	return count;
219 }
220 
create_io_submission_queue(struct nvme_dev * nvme)221 static int create_io_submission_queue(struct nvme_dev *nvme)
222 {
223 	void *sq_buffer = memalign(0x1000, NVME_SQ_ENTRY_SIZE * NVME_QUEUE_SIZE);
224 	if (!sq_buffer) {
225 		printf("NVMe ERROR: Failed to allocate memory for io submission queue.\n");
226 		return -1;
227 	}
228 	memset(sq_buffer, 0, NVME_SQ_ENTRY_SIZE * NVME_QUEUE_SIZE);
229 
230 	struct nvme_s_queue_entry e = {
231 		.dw[0]  = 0x01,
232 		.dw[6]  = virt_to_phys(sq_buffer),
233 		.dw[10] = ((NVME_QUEUE_SIZE - 1) << 16) | ios >> 1,
234 		.dw[11] = (1 << 16) | 1,
235 	};
236 
237 	int res = nvme_cmd(nvme, NVME_ADMIN_QUEUE, &e);
238 	if (res) {
239 		printf("NVMe ERROR: nvme_cmd returned with %i.\n", res);
240 		free(sq_buffer);
241 		return res;
242 	}
243 
244 	uint8_t cap_dstrd = (read64(nvme->config) >> 32) & 0xf;
245 	nvme->queue[ios].base = sq_buffer;
246 	nvme->queue[ios].bell = nvme->config + 0x1000 + (ios * (4 << cap_dstrd));
247 	nvme->queue[ios].idx = 0;
248 	return 0;
249 }
250 
create_io_completion_queue(struct nvme_dev * nvme)251 static int create_io_completion_queue(struct nvme_dev *nvme)
252 {
253 	void *const cq_buffer = memalign(0x1000, NVME_CQ_ENTRY_SIZE * NVME_QUEUE_SIZE);
254 	if (!cq_buffer) {
255 		printf("NVMe ERROR: Failed to allocate memory for io completion queue.\n");
256 		return -1;
257 	}
258 	memset(cq_buffer, 0, NVME_CQ_ENTRY_SIZE * NVME_QUEUE_SIZE);
259 
260 	const struct nvme_s_queue_entry e = {
261 		.dw[0]  = 0x05,
262 		.dw[6]  = virt_to_phys(cq_buffer),
263 		.dw[10] = ((NVME_QUEUE_SIZE - 1) << 16) | ioc >> 1,
264 		.dw[11] = 1,
265 	};
266 
267 	int res = nvme_cmd(nvme, NVME_ADMIN_QUEUE, &e);
268 	if (res) {
269 		printf("NVMe ERROR: nvme_cmd returned with %i.\n", res);
270 		free(cq_buffer);
271 		return res;
272 	}
273 
274 	uint8_t cap_dstrd = (read64(nvme->config) >> 32) & 0xf;
275 	nvme->queue[ioc].base  = cq_buffer;
276 	nvme->queue[ioc].bell  = nvme->config + 0x1000 + (ioc * (4 << cap_dstrd));
277 	nvme->queue[ioc].idx   = 0;
278 	nvme->queue[ioc].round = 0;
279 
280 	return 0;
281 }
282 
create_admin_queues(struct nvme_dev * nvme)283 static int create_admin_queues(struct nvme_dev *nvme)
284 {
285 	uint8_t cap_dstrd = (read64(nvme->config) >> 32) & 0xf;
286 	write32(nvme->config + 0x24, (NVME_QUEUE_SIZE - 1) << 16 | (NVME_QUEUE_SIZE - 1));
287 
288 	void *sq_buffer = memalign(0x1000, NVME_SQ_ENTRY_SIZE * NVME_QUEUE_SIZE);
289 	if (!sq_buffer) {
290 		printf("NVMe ERROR: Failed to allocated memory for admin submission queue\n");
291 		return -1;
292 	}
293 	memset(sq_buffer, 0, NVME_SQ_ENTRY_SIZE * NVME_QUEUE_SIZE);
294 	write64(nvme->config + 0x28, virt_to_phys(sq_buffer));
295 
296 	nvme->queue[ads].base = sq_buffer;
297 	nvme->queue[ads].bell = nvme->config + 0x1000 + (ads * (4 << cap_dstrd));
298 	nvme->queue[ads].idx = 0;
299 
300 	void *cq_buffer = memalign(0x1000, NVME_CQ_ENTRY_SIZE * NVME_QUEUE_SIZE);
301 	if (!cq_buffer) {
302 		printf("NVMe ERROR: Failed to allocate memory for admin completion queue\n");
303 		free(cq_buffer);
304 		return -1;
305 	}
306 	memset(cq_buffer, 0, NVME_CQ_ENTRY_SIZE * NVME_QUEUE_SIZE);
307 	write64(nvme->config + 0x30, virt_to_phys(cq_buffer));
308 
309 	nvme->queue[adc].base = cq_buffer;
310 	nvme->queue[adc].bell = nvme->config + 0x1000 + (adc * (4 << cap_dstrd));
311 	nvme->queue[adc].idx = 0;
312 	nvme->queue[adc].round = 0;
313 
314 	return 0;
315 }
316 
nvme_init(pcidev_t dev)317 static void nvme_init(pcidev_t dev)
318 {
319 	printf("NVMe init (Device %02x:%02x.%02x)\n",
320 			PCI_BUS(dev), PCI_SLOT(dev), PCI_FUNC(dev));
321 
322 	void *pci_bar0 = phys_to_virt(pci_read_config32(dev, 0x10) & ~0x3ff);
323 
324 	if (!(read64(pci_bar0) >> 37 & 0x01)) {
325 		printf("NVMe ERROR: PCIe device does not support the NVMe command set\n");
326 		return;
327 	}
328 	struct nvme_dev *nvme = malloc(sizeof(*nvme));
329 	if (!nvme) {
330 		printf("NVMe ERROR: Failed to allocate buffer for nvme driver struct\n");
331 		return;
332 	}
333 	nvme->storage_dev.port_type		= PORT_TYPE_NVME;
334 	nvme->storage_dev.poll			= nvme_poll;
335 	nvme->storage_dev.read_blocks512	= nvme_read_blocks512;
336 	nvme->storage_dev.write_blocks512	= NULL;
337 	nvme->storage_dev.detach_device		= nvme_detach_device;
338 	nvme->pci_dev				= dev;
339 	nvme->config				= pci_bar0;
340 	nvme->prp_list				= memalign(0x1000, 0x1000);
341 
342 	if (!nvme->prp_list) {
343 		printf("NVMe ERROR: Failed to allocate buffer for PRP list\n");
344 		goto _free_abort;
345 	}
346 
347 	const uint32_t cc = NVME_CC_EN | NVME_CC_CSS | NVME_CC_MPS | NVME_CC_AMS | NVME_CC_SHN
348 			| NVME_CC_IOSQES | NVME_CC_IOCQES;
349 
350 	write32(nvme->config + 0x14, 0);
351 
352 	int status, timeout = (read64(nvme->config) >> 24 & 0xff) * 500;
353 	do {
354 		status = read32(nvme->config + 0x1c) & 0x3;
355 		if (status == 0x2) {
356 			printf("NVMe ERROR: Failed to disable controller. FATAL ERROR\n");
357 			goto _free_abort;
358 		}
359 		if (timeout < 0) {
360 			printf("NVMe ERROR: Failed to disable controller. Timeout.\n");
361 			goto _free_abort;
362 		}
363 		timeout -= 10;
364 		mdelay(10);
365 	} while (status != 0x0);
366 	if (create_admin_queues(nvme))
367 		goto _free_abort;
368 	write32(nvme->config + 0x14, cc);
369 
370 	timeout = (read64(nvme->config) >> 24 & 0xff) * 500;
371 	do {
372 		status = read32(nvme->config + 0x1c) & 0x3;
373 		if (status == 0x2)
374 			goto _delete_admin_abort;
375 		if (timeout < 0)
376 			goto _delete_admin_abort;
377 		timeout -= 10;
378 		mdelay(10);
379 	} while (status != 0x1);
380 
381 	uint16_t command = pci_read_config16(dev, PCI_COMMAND);
382 	pci_write_config16(dev, PCI_COMMAND, command | PCI_COMMAND_MASTER);
383 	if (create_io_completion_queue(nvme))
384 		goto _delete_admin_abort;
385 	if (create_io_submission_queue(nvme))
386 		goto _delete_completion_abort;
387 	storage_attach_device((storage_dev_t *)nvme);
388 	printf("NVMe init done.\n");
389 	return;
390 
391 _delete_completion_abort:
392 	delete_io_completion_queue(nvme);
393 _delete_admin_abort:
394 	delete_admin_queues(nvme);
395 _free_abort:
396 	free(nvme->prp_list);
397 	free(nvme);
398 	printf("NVMe init failed.\n");
399 }
400 
nvme_initialize(struct pci_dev * dev)401 void nvme_initialize(struct pci_dev *dev)
402 {
403 	nvme_init(PCI_DEV(dev->bus, dev->dev, dev->func));
404 }
405