1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Functions for working with the Flattened Device Tree data format
4 *
5 * Copyright 2009 Benjamin Herrenschmidt, IBM Corp
6 * [email protected]
7 */
8
9 #define pr_fmt(fmt) "OF: fdt: " fmt
10
11 #include <linux/crash_dump.h>
12 #include <linux/crc32.h>
13 #include <linux/kernel.h>
14 #include <linux/initrd.h>
15 #include <linux/memblock.h>
16 #include <linux/mutex.h>
17 #include <linux/of.h>
18 #include <linux/of_fdt.h>
19 #include <linux/sizes.h>
20 #include <linux/string.h>
21 #include <linux/errno.h>
22 #include <linux/slab.h>
23 #include <linux/libfdt.h>
24 #include <linux/debugfs.h>
25 #include <linux/serial_core.h>
26 #include <linux/sysfs.h>
27 #include <linux/random.h>
28
29 #include <asm/setup.h> /* for COMMAND_LINE_SIZE */
30 #include <asm/page.h>
31
32 #include "of_private.h"
33
34 /*
35 * __dtb_empty_root_begin[] and __dtb_empty_root_end[] magically created by
36 * cmd_wrap_S_dtb in scripts/Makefile.dtbs
37 */
38 extern uint8_t __dtb_empty_root_begin[];
39 extern uint8_t __dtb_empty_root_end[];
40
41 /*
42 * of_fdt_limit_memory - limit the number of regions in the /memory node
43 * @limit: maximum entries
44 *
45 * Adjust the flattened device tree to have at most 'limit' number of
46 * memory entries in the /memory node. This function may be called
47 * any time after initial_boot_param is set.
48 */
of_fdt_limit_memory(int limit)49 void __init of_fdt_limit_memory(int limit)
50 {
51 int memory;
52 int len;
53 const void *val;
54 int cell_size = sizeof(uint32_t)*(dt_root_addr_cells + dt_root_size_cells);
55
56 memory = fdt_path_offset(initial_boot_params, "/memory");
57 if (memory > 0) {
58 val = fdt_getprop(initial_boot_params, memory, "reg", &len);
59 if (len > limit*cell_size) {
60 len = limit*cell_size;
61 pr_debug("Limiting number of entries to %d\n", limit);
62 fdt_setprop(initial_boot_params, memory, "reg", val,
63 len);
64 }
65 }
66 }
67
of_fdt_device_is_available(const void * blob,unsigned long node)68 bool of_fdt_device_is_available(const void *blob, unsigned long node)
69 {
70 const char *status = fdt_getprop(blob, node, "status", NULL);
71
72 if (!status)
73 return true;
74
75 if (!strcmp(status, "ok") || !strcmp(status, "okay"))
76 return true;
77
78 return false;
79 }
80
unflatten_dt_alloc(void ** mem,unsigned long size,unsigned long align)81 static void *unflatten_dt_alloc(void **mem, unsigned long size,
82 unsigned long align)
83 {
84 void *res;
85
86 *mem = PTR_ALIGN(*mem, align);
87 res = *mem;
88 *mem += size;
89
90 return res;
91 }
92
populate_properties(const void * blob,int offset,void ** mem,struct device_node * np,const char * nodename,bool dryrun)93 static void populate_properties(const void *blob,
94 int offset,
95 void **mem,
96 struct device_node *np,
97 const char *nodename,
98 bool dryrun)
99 {
100 struct property *pp, **pprev = NULL;
101 int cur;
102 bool has_name = false;
103
104 pprev = &np->properties;
105 for (cur = fdt_first_property_offset(blob, offset);
106 cur >= 0;
107 cur = fdt_next_property_offset(blob, cur)) {
108 const __be32 *val;
109 const char *pname;
110 u32 sz;
111
112 val = fdt_getprop_by_offset(blob, cur, &pname, &sz);
113 if (!val) {
114 pr_warn("Cannot locate property at 0x%x\n", cur);
115 continue;
116 }
117
118 if (!pname) {
119 pr_warn("Cannot find property name at 0x%x\n", cur);
120 continue;
121 }
122
123 if (!strcmp(pname, "name"))
124 has_name = true;
125
126 pp = unflatten_dt_alloc(mem, sizeof(struct property),
127 __alignof__(struct property));
128 if (dryrun)
129 continue;
130
131 /* We accept flattened tree phandles either in
132 * ePAPR-style "phandle" properties, or the
133 * legacy "linux,phandle" properties. If both
134 * appear and have different values, things
135 * will get weird. Don't do that.
136 */
137 if (!strcmp(pname, "phandle") ||
138 !strcmp(pname, "linux,phandle")) {
139 if (!np->phandle)
140 np->phandle = be32_to_cpup(val);
141 }
142
143 /* And we process the "ibm,phandle" property
144 * used in pSeries dynamic device tree
145 * stuff
146 */
147 if (!strcmp(pname, "ibm,phandle"))
148 np->phandle = be32_to_cpup(val);
149
150 pp->name = (char *)pname;
151 pp->length = sz;
152 pp->value = (__be32 *)val;
153 *pprev = pp;
154 pprev = &pp->next;
155 }
156
157 /* With version 0x10 we may not have the name property,
158 * recreate it here from the unit name if absent
159 */
160 if (!has_name) {
161 const char *p = nodename, *ps = p, *pa = NULL;
162 int len;
163
164 while (*p) {
165 if ((*p) == '@')
166 pa = p;
167 else if ((*p) == '/')
168 ps = p + 1;
169 p++;
170 }
171
172 if (pa < ps)
173 pa = p;
174 len = (pa - ps) + 1;
175 pp = unflatten_dt_alloc(mem, sizeof(struct property) + len,
176 __alignof__(struct property));
177 if (!dryrun) {
178 pp->name = "name";
179 pp->length = len;
180 pp->value = pp + 1;
181 *pprev = pp;
182 memcpy(pp->value, ps, len - 1);
183 ((char *)pp->value)[len - 1] = 0;
184 pr_debug("fixed up name for %s -> %s\n",
185 nodename, (char *)pp->value);
186 }
187 }
188 }
189
populate_node(const void * blob,int offset,void ** mem,struct device_node * dad,struct device_node ** pnp,bool dryrun)190 static int populate_node(const void *blob,
191 int offset,
192 void **mem,
193 struct device_node *dad,
194 struct device_node **pnp,
195 bool dryrun)
196 {
197 struct device_node *np;
198 const char *pathp;
199 int len;
200
201 pathp = fdt_get_name(blob, offset, &len);
202 if (!pathp) {
203 *pnp = NULL;
204 return len;
205 }
206
207 len++;
208
209 np = unflatten_dt_alloc(mem, sizeof(struct device_node) + len,
210 __alignof__(struct device_node));
211 if (!dryrun) {
212 char *fn;
213 of_node_init(np);
214 np->full_name = fn = ((char *)np) + sizeof(*np);
215
216 memcpy(fn, pathp, len);
217
218 if (dad != NULL) {
219 np->parent = dad;
220 np->sibling = dad->child;
221 dad->child = np;
222 }
223 }
224
225 populate_properties(blob, offset, mem, np, pathp, dryrun);
226 if (!dryrun) {
227 np->name = of_get_property(np, "name", NULL);
228 if (!np->name)
229 np->name = "<NULL>";
230 }
231
232 *pnp = np;
233 return 0;
234 }
235
reverse_nodes(struct device_node * parent)236 static void reverse_nodes(struct device_node *parent)
237 {
238 struct device_node *child, *next;
239
240 /* In-depth first */
241 child = parent->child;
242 while (child) {
243 reverse_nodes(child);
244
245 child = child->sibling;
246 }
247
248 /* Reverse the nodes in the child list */
249 child = parent->child;
250 parent->child = NULL;
251 while (child) {
252 next = child->sibling;
253
254 child->sibling = parent->child;
255 parent->child = child;
256 child = next;
257 }
258 }
259
260 /**
261 * unflatten_dt_nodes - Alloc and populate a device_node from the flat tree
262 * @blob: The parent device tree blob
263 * @mem: Memory chunk to use for allocating device nodes and properties
264 * @dad: Parent struct device_node
265 * @nodepp: The device_node tree created by the call
266 *
267 * Return: The size of unflattened device tree or error code
268 */
unflatten_dt_nodes(const void * blob,void * mem,struct device_node * dad,struct device_node ** nodepp)269 static int unflatten_dt_nodes(const void *blob,
270 void *mem,
271 struct device_node *dad,
272 struct device_node **nodepp)
273 {
274 struct device_node *root;
275 int offset = 0, depth = 0, initial_depth = 0;
276 #define FDT_MAX_DEPTH 64
277 struct device_node *nps[FDT_MAX_DEPTH];
278 void *base = mem;
279 bool dryrun = !base;
280 int ret;
281
282 if (nodepp)
283 *nodepp = NULL;
284
285 /*
286 * We're unflattening device sub-tree if @dad is valid. There are
287 * possibly multiple nodes in the first level of depth. We need
288 * set @depth to 1 to make fdt_next_node() happy as it bails
289 * immediately when negative @depth is found. Otherwise, the device
290 * nodes except the first one won't be unflattened successfully.
291 */
292 if (dad)
293 depth = initial_depth = 1;
294
295 root = dad;
296 nps[depth] = dad;
297
298 for (offset = 0;
299 offset >= 0 && depth >= initial_depth;
300 offset = fdt_next_node(blob, offset, &depth)) {
301 if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH - 1))
302 continue;
303
304 if (!IS_ENABLED(CONFIG_OF_KOBJ) &&
305 !of_fdt_device_is_available(blob, offset))
306 continue;
307
308 ret = populate_node(blob, offset, &mem, nps[depth],
309 &nps[depth+1], dryrun);
310 if (ret < 0)
311 return ret;
312
313 if (!dryrun && nodepp && !*nodepp)
314 *nodepp = nps[depth+1];
315 if (!dryrun && !root)
316 root = nps[depth+1];
317 }
318
319 if (offset < 0 && offset != -FDT_ERR_NOTFOUND) {
320 pr_err("Error %d processing FDT\n", offset);
321 return -EINVAL;
322 }
323
324 /*
325 * Reverse the child list. Some drivers assumes node order matches .dts
326 * node order
327 */
328 if (!dryrun)
329 reverse_nodes(root);
330
331 return mem - base;
332 }
333
334 /**
335 * __unflatten_device_tree - create tree of device_nodes from flat blob
336 * @blob: The blob to expand
337 * @dad: Parent device node
338 * @mynodes: The device_node tree created by the call
339 * @dt_alloc: An allocator that provides a virtual address to memory
340 * for the resulting tree
341 * @detached: if true set OF_DETACHED on @mynodes
342 *
343 * unflattens a device-tree, creating the tree of struct device_node. It also
344 * fills the "name" and "type" pointers of the nodes so the normal device-tree
345 * walking functions can be used.
346 *
347 * Return: NULL on failure or the memory chunk containing the unflattened
348 * device tree on success.
349 */
__unflatten_device_tree(const void * blob,struct device_node * dad,struct device_node ** mynodes,void * (* dt_alloc)(u64 size,u64 align),bool detached)350 void *__unflatten_device_tree(const void *blob,
351 struct device_node *dad,
352 struct device_node **mynodes,
353 void *(*dt_alloc)(u64 size, u64 align),
354 bool detached)
355 {
356 int size;
357 void *mem;
358 int ret;
359
360 if (mynodes)
361 *mynodes = NULL;
362
363 pr_debug(" -> unflatten_device_tree()\n");
364
365 if (!blob) {
366 pr_debug("No device tree pointer\n");
367 return NULL;
368 }
369
370 pr_debug("Unflattening device tree:\n");
371 pr_debug("magic: %08x\n", fdt_magic(blob));
372 pr_debug("size: %08x\n", fdt_totalsize(blob));
373 pr_debug("version: %08x\n", fdt_version(blob));
374
375 if (fdt_check_header(blob)) {
376 pr_err("Invalid device tree blob header\n");
377 return NULL;
378 }
379
380 /* First pass, scan for size */
381 size = unflatten_dt_nodes(blob, NULL, dad, NULL);
382 if (size <= 0)
383 return NULL;
384
385 size = ALIGN(size, 4);
386 pr_debug(" size is %d, allocating...\n", size);
387
388 /* Allocate memory for the expanded device tree */
389 mem = dt_alloc(size + 4, __alignof__(struct device_node));
390 if (!mem)
391 return NULL;
392
393 memset(mem, 0, size);
394
395 *(__be32 *)(mem + size) = cpu_to_be32(0xdeadbeef);
396
397 pr_debug(" unflattening %p...\n", mem);
398
399 /* Second pass, do actual unflattening */
400 ret = unflatten_dt_nodes(blob, mem, dad, mynodes);
401
402 if (be32_to_cpup(mem + size) != 0xdeadbeef)
403 pr_warn("End of tree marker overwritten: %08x\n",
404 be32_to_cpup(mem + size));
405
406 if (ret <= 0)
407 return NULL;
408
409 if (detached && mynodes && *mynodes) {
410 of_node_set_flag(*mynodes, OF_DETACHED);
411 pr_debug("unflattened tree is detached\n");
412 }
413
414 pr_debug(" <- unflatten_device_tree()\n");
415 return mem;
416 }
417
kernel_tree_alloc(u64 size,u64 align)418 static void *kernel_tree_alloc(u64 size, u64 align)
419 {
420 return kzalloc(size, GFP_KERNEL);
421 }
422
423 static DEFINE_MUTEX(of_fdt_unflatten_mutex);
424
425 /**
426 * of_fdt_unflatten_tree - create tree of device_nodes from flat blob
427 * @blob: Flat device tree blob
428 * @dad: Parent device node
429 * @mynodes: The device tree created by the call
430 *
431 * unflattens the device-tree passed by the firmware, creating the
432 * tree of struct device_node. It also fills the "name" and "type"
433 * pointers of the nodes so the normal device-tree walking functions
434 * can be used.
435 *
436 * Return: NULL on failure or the memory chunk containing the unflattened
437 * device tree on success.
438 */
of_fdt_unflatten_tree(const unsigned long * blob,struct device_node * dad,struct device_node ** mynodes)439 void *of_fdt_unflatten_tree(const unsigned long *blob,
440 struct device_node *dad,
441 struct device_node **mynodes)
442 {
443 void *mem;
444
445 mutex_lock(&of_fdt_unflatten_mutex);
446 mem = __unflatten_device_tree(blob, dad, mynodes, &kernel_tree_alloc,
447 true);
448 mutex_unlock(&of_fdt_unflatten_mutex);
449
450 return mem;
451 }
452 EXPORT_SYMBOL_GPL(of_fdt_unflatten_tree);
453
454 /* Everything below here references initial_boot_params directly. */
455 int __initdata dt_root_addr_cells;
456 int __initdata dt_root_size_cells;
457
458 void *initial_boot_params __ro_after_init;
459 phys_addr_t initial_boot_params_pa __ro_after_init;
460
461 #ifdef CONFIG_OF_EARLY_FLATTREE
462
463 static u32 of_fdt_crc32;
464
465 /*
466 * fdt_reserve_elfcorehdr() - reserves memory for elf core header
467 *
468 * This function reserves the memory occupied by an elf core header
469 * described in the device tree. This region contains all the
470 * information about primary kernel's core image and is used by a dump
471 * capture kernel to access the system memory on primary kernel.
472 */
fdt_reserve_elfcorehdr(void)473 static void __init fdt_reserve_elfcorehdr(void)
474 {
475 if (!IS_ENABLED(CONFIG_CRASH_DUMP) || !elfcorehdr_size)
476 return;
477
478 if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) {
479 pr_warn("elfcorehdr is overlapped\n");
480 return;
481 }
482
483 memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
484
485 pr_info("Reserving %llu KiB of memory at 0x%llx for elfcorehdr\n",
486 elfcorehdr_size >> 10, elfcorehdr_addr);
487 }
488
489 /**
490 * early_init_fdt_scan_reserved_mem() - create reserved memory regions
491 *
492 * This function grabs memory from early allocator for device exclusive use
493 * defined in device tree structures. It should be called by arch specific code
494 * once the early allocator (i.e. memblock) has been fully activated.
495 */
early_init_fdt_scan_reserved_mem(void)496 void __init early_init_fdt_scan_reserved_mem(void)
497 {
498 int n;
499 int res;
500 u64 base, size;
501
502 if (!initial_boot_params)
503 return;
504
505 fdt_scan_reserved_mem();
506 fdt_reserve_elfcorehdr();
507
508 /* Process header /memreserve/ fields */
509 for (n = 0; ; n++) {
510 res = fdt_get_mem_rsv(initial_boot_params, n, &base, &size);
511 if (res) {
512 pr_err("Invalid memory reservation block index %d\n", n);
513 break;
514 }
515 if (!size)
516 break;
517 memblock_reserve(base, size);
518 }
519 }
520
521 /**
522 * early_init_fdt_reserve_self() - reserve the memory used by the FDT blob
523 */
early_init_fdt_reserve_self(void)524 void __init early_init_fdt_reserve_self(void)
525 {
526 if (!initial_boot_params)
527 return;
528
529 /* Reserve the dtb region */
530 memblock_reserve(__pa(initial_boot_params),
531 fdt_totalsize(initial_boot_params));
532 }
533
534 /**
535 * of_scan_flat_dt - scan flattened tree blob and call callback on each.
536 * @it: callback function
537 * @data: context data pointer
538 *
539 * This function is used to scan the flattened device-tree, it is
540 * used to extract the memory information at boot before we can
541 * unflatten the tree
542 */
of_scan_flat_dt(int (* it)(unsigned long node,const char * uname,int depth,void * data),void * data)543 int __init of_scan_flat_dt(int (*it)(unsigned long node,
544 const char *uname, int depth,
545 void *data),
546 void *data)
547 {
548 const void *blob = initial_boot_params;
549 const char *pathp;
550 int offset, rc = 0, depth = -1;
551
552 if (!blob)
553 return 0;
554
555 for (offset = fdt_next_node(blob, -1, &depth);
556 offset >= 0 && depth >= 0 && !rc;
557 offset = fdt_next_node(blob, offset, &depth)) {
558
559 pathp = fdt_get_name(blob, offset, NULL);
560 rc = it(offset, pathp, depth, data);
561 }
562 return rc;
563 }
564
565 /**
566 * of_scan_flat_dt_subnodes - scan sub-nodes of a node call callback on each.
567 * @parent: parent node
568 * @it: callback function
569 * @data: context data pointer
570 *
571 * This function is used to scan sub-nodes of a node.
572 */
of_scan_flat_dt_subnodes(unsigned long parent,int (* it)(unsigned long node,const char * uname,void * data),void * data)573 int __init of_scan_flat_dt_subnodes(unsigned long parent,
574 int (*it)(unsigned long node,
575 const char *uname,
576 void *data),
577 void *data)
578 {
579 const void *blob = initial_boot_params;
580 int node;
581
582 fdt_for_each_subnode(node, blob, parent) {
583 const char *pathp;
584 int rc;
585
586 pathp = fdt_get_name(blob, node, NULL);
587 rc = it(node, pathp, data);
588 if (rc)
589 return rc;
590 }
591 return 0;
592 }
593
594 /**
595 * of_get_flat_dt_subnode_by_name - get the subnode by given name
596 *
597 * @node: the parent node
598 * @uname: the name of subnode
599 * @return offset of the subnode, or -FDT_ERR_NOTFOUND if there is none
600 */
601
of_get_flat_dt_subnode_by_name(unsigned long node,const char * uname)602 int __init of_get_flat_dt_subnode_by_name(unsigned long node, const char *uname)
603 {
604 return fdt_subnode_offset(initial_boot_params, node, uname);
605 }
606
607 /*
608 * of_get_flat_dt_root - find the root node in the flat blob
609 */
of_get_flat_dt_root(void)610 unsigned long __init of_get_flat_dt_root(void)
611 {
612 return 0;
613 }
614
615 /*
616 * of_get_flat_dt_prop - Given a node in the flat blob, return the property ptr
617 *
618 * This function can be used within scan_flattened_dt callback to get
619 * access to properties
620 */
of_get_flat_dt_prop(unsigned long node,const char * name,int * size)621 const void *__init of_get_flat_dt_prop(unsigned long node, const char *name,
622 int *size)
623 {
624 return fdt_getprop(initial_boot_params, node, name, size);
625 }
626
627 /**
628 * of_fdt_is_compatible - Return true if given node from the given blob has
629 * compat in its compatible list
630 * @blob: A device tree blob
631 * @node: node to test
632 * @compat: compatible string to compare with compatible list.
633 *
634 * Return: a non-zero value on match with smaller values returned for more
635 * specific compatible values.
636 */
of_fdt_is_compatible(const void * blob,unsigned long node,const char * compat)637 static int of_fdt_is_compatible(const void *blob,
638 unsigned long node, const char *compat)
639 {
640 const char *cp;
641 int cplen;
642 unsigned long l, score = 0;
643
644 cp = fdt_getprop(blob, node, "compatible", &cplen);
645 if (cp == NULL)
646 return 0;
647 while (cplen > 0) {
648 score++;
649 if (of_compat_cmp(cp, compat, strlen(compat)) == 0)
650 return score;
651 l = strlen(cp) + 1;
652 cp += l;
653 cplen -= l;
654 }
655
656 return 0;
657 }
658
659 /**
660 * of_flat_dt_is_compatible - Return true if given node has compat in compatible list
661 * @node: node to test
662 * @compat: compatible string to compare with compatible list.
663 */
of_flat_dt_is_compatible(unsigned long node,const char * compat)664 int __init of_flat_dt_is_compatible(unsigned long node, const char *compat)
665 {
666 return of_fdt_is_compatible(initial_boot_params, node, compat);
667 }
668
669 /*
670 * of_flat_dt_match - Return true if node matches a list of compatible values
671 */
of_flat_dt_match(unsigned long node,const char * const * compat)672 static int __init of_flat_dt_match(unsigned long node, const char *const *compat)
673 {
674 unsigned int tmp, score = 0;
675
676 if (!compat)
677 return 0;
678
679 while (*compat) {
680 tmp = of_fdt_is_compatible(initial_boot_params, node, *compat);
681 if (tmp && (score == 0 || (tmp < score)))
682 score = tmp;
683 compat++;
684 }
685
686 return score;
687 }
688
689 /*
690 * of_get_flat_dt_phandle - Given a node in the flat blob, return the phandle
691 */
of_get_flat_dt_phandle(unsigned long node)692 uint32_t __init of_get_flat_dt_phandle(unsigned long node)
693 {
694 return fdt_get_phandle(initial_boot_params, node);
695 }
696
of_flat_dt_get_machine_name(void)697 const char * __init of_flat_dt_get_machine_name(void)
698 {
699 const char *name;
700 unsigned long dt_root = of_get_flat_dt_root();
701
702 name = of_get_flat_dt_prop(dt_root, "model", NULL);
703 if (!name)
704 name = of_get_flat_dt_prop(dt_root, "compatible", NULL);
705 return name;
706 }
707
708 /**
709 * of_flat_dt_match_machine - Iterate match tables to find matching machine.
710 *
711 * @default_match: A machine specific ptr to return in case of no match.
712 * @get_next_compat: callback function to return next compatible match table.
713 *
714 * Iterate through machine match tables to find the best match for the machine
715 * compatible string in the FDT.
716 */
of_flat_dt_match_machine(const void * default_match,const void * (* get_next_compat)(const char * const **))717 const void * __init of_flat_dt_match_machine(const void *default_match,
718 const void * (*get_next_compat)(const char * const**))
719 {
720 const void *data = NULL;
721 const void *best_data = default_match;
722 const char *const *compat;
723 unsigned long dt_root;
724 unsigned int best_score = ~1, score = 0;
725
726 dt_root = of_get_flat_dt_root();
727 while ((data = get_next_compat(&compat))) {
728 score = of_flat_dt_match(dt_root, compat);
729 if (score > 0 && score < best_score) {
730 best_data = data;
731 best_score = score;
732 }
733 }
734 if (!best_data) {
735 const char *prop;
736 int size;
737
738 pr_err("\n unrecognized device tree list:\n[ ");
739
740 prop = of_get_flat_dt_prop(dt_root, "compatible", &size);
741 if (prop) {
742 while (size > 0) {
743 printk("'%s' ", prop);
744 size -= strlen(prop) + 1;
745 prop += strlen(prop) + 1;
746 }
747 }
748 printk("]\n\n");
749 return NULL;
750 }
751
752 pr_info("Machine model: %s\n", of_flat_dt_get_machine_name());
753
754 return best_data;
755 }
756
__early_init_dt_declare_initrd(unsigned long start,unsigned long end)757 static void __early_init_dt_declare_initrd(unsigned long start,
758 unsigned long end)
759 {
760 /*
761 * __va() is not yet available this early on some platforms. In that
762 * case, the platform uses phys_initrd_start/phys_initrd_size instead
763 * and does the VA conversion itself.
764 */
765 if (!IS_ENABLED(CONFIG_ARM64) &&
766 !(IS_ENABLED(CONFIG_RISCV) && IS_ENABLED(CONFIG_64BIT))) {
767 initrd_start = (unsigned long)__va(start);
768 initrd_end = (unsigned long)__va(end);
769 initrd_below_start_ok = 1;
770 }
771 }
772
773 /**
774 * early_init_dt_check_for_initrd - Decode initrd location from flat tree
775 * @node: reference to node containing initrd location ('chosen')
776 */
early_init_dt_check_for_initrd(unsigned long node)777 static void __init early_init_dt_check_for_initrd(unsigned long node)
778 {
779 u64 start, end;
780 int len;
781 const __be32 *prop;
782
783 if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
784 return;
785
786 pr_debug("Looking for initrd properties... ");
787
788 prop = of_get_flat_dt_prop(node, "linux,initrd-start", &len);
789 if (!prop)
790 return;
791 start = of_read_number(prop, len/4);
792
793 prop = of_get_flat_dt_prop(node, "linux,initrd-end", &len);
794 if (!prop)
795 return;
796 end = of_read_number(prop, len/4);
797 if (start > end)
798 return;
799
800 __early_init_dt_declare_initrd(start, end);
801 phys_initrd_start = start;
802 phys_initrd_size = end - start;
803
804 pr_debug("initrd_start=0x%llx initrd_end=0x%llx\n", start, end);
805 }
806
807 /**
808 * early_init_dt_check_for_elfcorehdr - Decode elfcorehdr location from flat
809 * tree
810 * @node: reference to node containing elfcorehdr location ('chosen')
811 */
early_init_dt_check_for_elfcorehdr(unsigned long node)812 static void __init early_init_dt_check_for_elfcorehdr(unsigned long node)
813 {
814 const __be32 *prop;
815 int len;
816
817 if (!IS_ENABLED(CONFIG_CRASH_DUMP))
818 return;
819
820 pr_debug("Looking for elfcorehdr property... ");
821
822 prop = of_get_flat_dt_prop(node, "linux,elfcorehdr", &len);
823 if (!prop || (len < (dt_root_addr_cells + dt_root_size_cells)))
824 return;
825
826 elfcorehdr_addr = dt_mem_next_cell(dt_root_addr_cells, &prop);
827 elfcorehdr_size = dt_mem_next_cell(dt_root_size_cells, &prop);
828
829 pr_debug("elfcorehdr_start=0x%llx elfcorehdr_size=0x%llx\n",
830 elfcorehdr_addr, elfcorehdr_size);
831 }
832
833 static unsigned long chosen_node_offset = -FDT_ERR_NOTFOUND;
834
835 /*
836 * The main usage of linux,usable-memory-range is for crash dump kernel.
837 * Originally, the number of usable-memory regions is one. Now there may
838 * be two regions, low region and high region.
839 * To make compatibility with existing user-space and older kdump, the low
840 * region is always the last range of linux,usable-memory-range if exist.
841 */
842 #define MAX_USABLE_RANGES 2
843
844 /**
845 * early_init_dt_check_for_usable_mem_range - Decode usable memory range
846 * location from flat tree
847 */
early_init_dt_check_for_usable_mem_range(void)848 void __init early_init_dt_check_for_usable_mem_range(void)
849 {
850 struct memblock_region rgn[MAX_USABLE_RANGES] = {0};
851 const __be32 *prop, *endp;
852 int len, i;
853 unsigned long node = chosen_node_offset;
854
855 if ((long)node < 0)
856 return;
857
858 pr_debug("Looking for usable-memory-range property... ");
859
860 prop = of_get_flat_dt_prop(node, "linux,usable-memory-range", &len);
861 if (!prop || (len % (dt_root_addr_cells + dt_root_size_cells)))
862 return;
863
864 endp = prop + (len / sizeof(__be32));
865 for (i = 0; i < MAX_USABLE_RANGES && prop < endp; i++) {
866 rgn[i].base = dt_mem_next_cell(dt_root_addr_cells, &prop);
867 rgn[i].size = dt_mem_next_cell(dt_root_size_cells, &prop);
868
869 pr_debug("cap_mem_regions[%d]: base=%pa, size=%pa\n",
870 i, &rgn[i].base, &rgn[i].size);
871 }
872
873 memblock_cap_memory_range(rgn[0].base, rgn[0].size);
874 for (i = 1; i < MAX_USABLE_RANGES && rgn[i].size; i++)
875 memblock_add(rgn[i].base, rgn[i].size);
876 }
877
878 #ifdef CONFIG_SERIAL_EARLYCON
879
early_init_dt_scan_chosen_stdout(void)880 int __init early_init_dt_scan_chosen_stdout(void)
881 {
882 int offset;
883 const char *p, *q, *options = NULL;
884 int l;
885 const struct earlycon_id *match;
886 const void *fdt = initial_boot_params;
887 int ret;
888
889 offset = fdt_path_offset(fdt, "/chosen");
890 if (offset < 0)
891 offset = fdt_path_offset(fdt, "/chosen@0");
892 if (offset < 0)
893 return -ENOENT;
894
895 p = fdt_getprop(fdt, offset, "stdout-path", &l);
896 if (!p)
897 p = fdt_getprop(fdt, offset, "linux,stdout-path", &l);
898 if (!p || !l)
899 return -ENOENT;
900
901 q = strchrnul(p, ':');
902 if (*q != '\0')
903 options = q + 1;
904 l = q - p;
905
906 /* Get the node specified by stdout-path */
907 offset = fdt_path_offset_namelen(fdt, p, l);
908 if (offset < 0) {
909 pr_warn("earlycon: stdout-path %.*s not found\n", l, p);
910 return 0;
911 }
912
913 for (match = __earlycon_table; match < __earlycon_table_end; match++) {
914 if (!match->compatible[0])
915 continue;
916
917 if (fdt_node_check_compatible(fdt, offset, match->compatible))
918 continue;
919
920 ret = of_setup_earlycon(match, offset, options);
921 if (!ret || ret == -EALREADY)
922 return 0;
923 }
924 return -ENODEV;
925 }
926 #endif
927
928 /*
929 * early_init_dt_scan_root - fetch the top level address and size cells
930 */
early_init_dt_scan_root(void)931 int __init early_init_dt_scan_root(void)
932 {
933 const __be32 *prop;
934 const void *fdt = initial_boot_params;
935 int node = fdt_path_offset(fdt, "/");
936
937 if (node < 0)
938 return -ENODEV;
939
940 dt_root_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
941 dt_root_addr_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
942
943 prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
944 if (!WARN(!prop, "No '#size-cells' in root node\n"))
945 dt_root_size_cells = be32_to_cpup(prop);
946 pr_debug("dt_root_size_cells = %x\n", dt_root_size_cells);
947
948 prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
949 if (!WARN(!prop, "No '#address-cells' in root node\n"))
950 dt_root_addr_cells = be32_to_cpup(prop);
951 pr_debug("dt_root_addr_cells = %x\n", dt_root_addr_cells);
952
953 return 0;
954 }
955
dt_mem_next_cell(int s,const __be32 ** cellp)956 u64 __init dt_mem_next_cell(int s, const __be32 **cellp)
957 {
958 const __be32 *p = *cellp;
959
960 *cellp = p + s;
961 return of_read_number(p, s);
962 }
963
964 /*
965 * early_init_dt_scan_memory - Look for and parse memory nodes
966 */
early_init_dt_scan_memory(void)967 int __init early_init_dt_scan_memory(void)
968 {
969 int node, found_memory = 0;
970 const void *fdt = initial_boot_params;
971
972 fdt_for_each_subnode(node, fdt, 0) {
973 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
974 const __be32 *reg, *endp;
975 int l;
976 bool hotpluggable;
977
978 /* We are scanning "memory" nodes only */
979 if (type == NULL || strcmp(type, "memory") != 0)
980 continue;
981
982 if (!of_fdt_device_is_available(fdt, node))
983 continue;
984
985 reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l);
986 if (reg == NULL)
987 reg = of_get_flat_dt_prop(node, "reg", &l);
988 if (reg == NULL)
989 continue;
990
991 endp = reg + (l / sizeof(__be32));
992 hotpluggable = of_get_flat_dt_prop(node, "hotpluggable", NULL);
993
994 pr_debug("memory scan node %s, reg size %d,\n",
995 fdt_get_name(fdt, node, NULL), l);
996
997 while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
998 u64 base, size;
999
1000 base = dt_mem_next_cell(dt_root_addr_cells, ®);
1001 size = dt_mem_next_cell(dt_root_size_cells, ®);
1002
1003 if (size == 0)
1004 continue;
1005 pr_debug(" - %llx, %llx\n", base, size);
1006
1007 early_init_dt_add_memory_arch(base, size);
1008
1009 found_memory = 1;
1010
1011 if (!hotpluggable)
1012 continue;
1013
1014 if (memblock_mark_hotplug(base, size))
1015 pr_warn("failed to mark hotplug range 0x%llx - 0x%llx\n",
1016 base, base + size);
1017 }
1018 }
1019 return found_memory;
1020 }
1021
early_init_dt_scan_chosen(char * cmdline)1022 int __init early_init_dt_scan_chosen(char *cmdline)
1023 {
1024 int l, node;
1025 const char *p;
1026 const void *rng_seed;
1027 const void *fdt = initial_boot_params;
1028
1029 node = fdt_path_offset(fdt, "/chosen");
1030 if (node < 0)
1031 node = fdt_path_offset(fdt, "/chosen@0");
1032 if (node < 0)
1033 /* Handle the cmdline config options even if no /chosen node */
1034 goto handle_cmdline;
1035
1036 chosen_node_offset = node;
1037
1038 early_init_dt_check_for_initrd(node);
1039 early_init_dt_check_for_elfcorehdr(node);
1040
1041 rng_seed = of_get_flat_dt_prop(node, "rng-seed", &l);
1042 if (rng_seed && l > 0) {
1043 add_bootloader_randomness(rng_seed, l);
1044
1045 /* try to clear seed so it won't be found. */
1046 fdt_nop_property(initial_boot_params, node, "rng-seed");
1047
1048 /* update CRC check value */
1049 of_fdt_crc32 = crc32_be(~0, initial_boot_params,
1050 fdt_totalsize(initial_boot_params));
1051 }
1052
1053 /* Retrieve command line */
1054 p = of_get_flat_dt_prop(node, "bootargs", &l);
1055 if (p != NULL && l > 0)
1056 strscpy(cmdline, p, min(l, COMMAND_LINE_SIZE));
1057
1058 handle_cmdline:
1059 /*
1060 * CONFIG_CMDLINE is meant to be a default in case nothing else
1061 * managed to set the command line, unless CONFIG_CMDLINE_FORCE
1062 * is set in which case we override whatever was found earlier.
1063 */
1064 #ifdef CONFIG_CMDLINE
1065 #if defined(CONFIG_CMDLINE_EXTEND)
1066 strlcat(cmdline, " ", COMMAND_LINE_SIZE);
1067 strlcat(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1068 #elif defined(CONFIG_CMDLINE_FORCE)
1069 strscpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1070 #else
1071 /* No arguments from boot loader, use kernel's cmdl*/
1072 if (!((char *)cmdline)[0])
1073 strscpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1074 #endif
1075 #endif /* CONFIG_CMDLINE */
1076
1077 pr_debug("Command line is: %s\n", (char *)cmdline);
1078
1079 return 0;
1080 }
1081
1082 #ifndef MIN_MEMBLOCK_ADDR
1083 #define MIN_MEMBLOCK_ADDR __pa(PAGE_OFFSET)
1084 #endif
1085 #ifndef MAX_MEMBLOCK_ADDR
1086 #define MAX_MEMBLOCK_ADDR ((phys_addr_t)~0)
1087 #endif
1088
early_init_dt_add_memory_arch(u64 base,u64 size)1089 void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
1090 {
1091 const u64 phys_offset = MIN_MEMBLOCK_ADDR;
1092
1093 if (size < PAGE_SIZE - (base & ~PAGE_MASK)) {
1094 pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
1095 base, base + size);
1096 return;
1097 }
1098
1099 if (!PAGE_ALIGNED(base)) {
1100 size -= PAGE_SIZE - (base & ~PAGE_MASK);
1101 base = PAGE_ALIGN(base);
1102 }
1103 size &= PAGE_MASK;
1104
1105 if (base > MAX_MEMBLOCK_ADDR) {
1106 pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
1107 base, base + size);
1108 return;
1109 }
1110
1111 if (base + size - 1 > MAX_MEMBLOCK_ADDR) {
1112 pr_warn("Ignoring memory range 0x%llx - 0x%llx\n",
1113 ((u64)MAX_MEMBLOCK_ADDR) + 1, base + size);
1114 size = MAX_MEMBLOCK_ADDR - base + 1;
1115 }
1116
1117 if (base + size < phys_offset) {
1118 pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
1119 base, base + size);
1120 return;
1121 }
1122 if (base < phys_offset) {
1123 pr_warn("Ignoring memory range 0x%llx - 0x%llx\n",
1124 base, phys_offset);
1125 size -= phys_offset - base;
1126 base = phys_offset;
1127 }
1128 memblock_add(base, size);
1129 }
1130
early_init_dt_alloc_memory_arch(u64 size,u64 align)1131 static void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
1132 {
1133 return memblock_alloc_or_panic(size, align);
1134 }
1135
early_init_dt_verify(void * dt_virt,phys_addr_t dt_phys)1136 bool __init early_init_dt_verify(void *dt_virt, phys_addr_t dt_phys)
1137 {
1138 if (!dt_virt)
1139 return false;
1140
1141 /* check device tree validity */
1142 if (fdt_check_header(dt_virt))
1143 return false;
1144
1145 /* Setup flat device-tree pointer */
1146 initial_boot_params = dt_virt;
1147 initial_boot_params_pa = dt_phys;
1148 of_fdt_crc32 = crc32_be(~0, initial_boot_params,
1149 fdt_totalsize(initial_boot_params));
1150
1151 /* Initialize {size,address}-cells info */
1152 early_init_dt_scan_root();
1153
1154 return true;
1155 }
1156
1157
early_init_dt_scan_nodes(void)1158 void __init early_init_dt_scan_nodes(void)
1159 {
1160 int rc;
1161
1162 /* Retrieve various information from the /chosen node */
1163 rc = early_init_dt_scan_chosen(boot_command_line);
1164 if (rc)
1165 pr_warn("No chosen node found, continuing without\n");
1166
1167 /* Setup memory, calling early_init_dt_add_memory_arch */
1168 early_init_dt_scan_memory();
1169
1170 /* Handle linux,usable-memory-range property */
1171 early_init_dt_check_for_usable_mem_range();
1172 }
1173
early_init_dt_scan(void * dt_virt,phys_addr_t dt_phys)1174 bool __init early_init_dt_scan(void *dt_virt, phys_addr_t dt_phys)
1175 {
1176 bool status;
1177
1178 status = early_init_dt_verify(dt_virt, dt_phys);
1179 if (!status)
1180 return false;
1181
1182 early_init_dt_scan_nodes();
1183 return true;
1184 }
1185
copy_device_tree(void * fdt)1186 static void *__init copy_device_tree(void *fdt)
1187 {
1188 int size;
1189 void *dt;
1190
1191 size = fdt_totalsize(fdt);
1192 dt = early_init_dt_alloc_memory_arch(size,
1193 roundup_pow_of_two(FDT_V17_SIZE));
1194
1195 if (dt)
1196 memcpy(dt, fdt, size);
1197
1198 return dt;
1199 }
1200
1201 /**
1202 * unflatten_device_tree - create tree of device_nodes from flat blob
1203 *
1204 * unflattens the device-tree passed by the firmware, creating the
1205 * tree of struct device_node. It also fills the "name" and "type"
1206 * pointers of the nodes so the normal device-tree walking functions
1207 * can be used.
1208 */
unflatten_device_tree(void)1209 void __init unflatten_device_tree(void)
1210 {
1211 void *fdt = initial_boot_params;
1212
1213 /* Save the statically-placed regions in the reserved_mem array */
1214 fdt_scan_reserved_mem_reg_nodes();
1215
1216 /* Populate an empty root node when bootloader doesn't provide one */
1217 if (!fdt) {
1218 fdt = (void *) __dtb_empty_root_begin;
1219 /* fdt_totalsize() will be used for copy size */
1220 if (fdt_totalsize(fdt) >
1221 __dtb_empty_root_end - __dtb_empty_root_begin) {
1222 pr_err("invalid size in dtb_empty_root\n");
1223 return;
1224 }
1225 of_fdt_crc32 = crc32_be(~0, fdt, fdt_totalsize(fdt));
1226 fdt = copy_device_tree(fdt);
1227 }
1228
1229 __unflatten_device_tree(fdt, NULL, &of_root,
1230 early_init_dt_alloc_memory_arch, false);
1231
1232 /* Get pointer to "/chosen" and "/aliases" nodes for use everywhere */
1233 of_alias_scan(early_init_dt_alloc_memory_arch);
1234
1235 unittest_unflatten_overlay_base();
1236 }
1237
1238 /**
1239 * unflatten_and_copy_device_tree - copy and create tree of device_nodes from flat blob
1240 *
1241 * Copies and unflattens the device-tree passed by the firmware, creating the
1242 * tree of struct device_node. It also fills the "name" and "type"
1243 * pointers of the nodes so the normal device-tree walking functions
1244 * can be used. This should only be used when the FDT memory has not been
1245 * reserved such is the case when the FDT is built-in to the kernel init
1246 * section. If the FDT memory is reserved already then unflatten_device_tree
1247 * should be used instead.
1248 */
unflatten_and_copy_device_tree(void)1249 void __init unflatten_and_copy_device_tree(void)
1250 {
1251 if (initial_boot_params)
1252 initial_boot_params = copy_device_tree(initial_boot_params);
1253
1254 unflatten_device_tree();
1255 }
1256
1257 #ifdef CONFIG_SYSFS
of_fdt_raw_init(void)1258 static int __init of_fdt_raw_init(void)
1259 {
1260 static __ro_after_init BIN_ATTR_SIMPLE_ADMIN_RO(fdt);
1261
1262 if (!initial_boot_params)
1263 return 0;
1264
1265 if (of_fdt_crc32 != crc32_be(~0, initial_boot_params,
1266 fdt_totalsize(initial_boot_params))) {
1267 pr_warn("not creating '/sys/firmware/fdt': CRC check failed\n");
1268 return 0;
1269 }
1270 bin_attr_fdt.private = initial_boot_params;
1271 bin_attr_fdt.size = fdt_totalsize(initial_boot_params);
1272 return sysfs_create_bin_file(firmware_kobj, &bin_attr_fdt);
1273 }
1274 late_initcall(of_fdt_raw_init);
1275 #endif
1276
1277 #endif /* CONFIG_OF_EARLY_FLATTREE */
1278