xref: /aosp_15_r20/external/coreboot/src/lib/memrange.c (revision b9411a12aaaa7e1e6a6fb7c5e057f44ee179a49c)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 #include <assert.h>
4 #include <stdlib.h>
5 #include <commonlib/helpers.h>
6 #include <console/console.h>
7 #include <memrange.h>
8 
range_entry_link(struct range_entry ** prev_ptr,struct range_entry * r)9 static inline void range_entry_link(struct range_entry **prev_ptr,
10 				    struct range_entry *r)
11 {
12 	r->next = *prev_ptr;
13 	*prev_ptr = r;
14 }
15 
range_entry_unlink(struct range_entry ** prev_ptr,struct range_entry * r)16 static inline void range_entry_unlink(struct range_entry **prev_ptr,
17 				      struct range_entry *r)
18 {
19 	*prev_ptr = r->next;
20 	r->next = NULL;
21 }
22 
range_entry_unlink_and_free(struct memranges * ranges,struct range_entry ** prev_ptr,struct range_entry * r)23 static inline void range_entry_unlink_and_free(struct memranges *ranges,
24 					       struct range_entry **prev_ptr,
25 					       struct range_entry *r)
26 {
27 	range_entry_unlink(prev_ptr, r);
28 	range_entry_link(&ranges->free_list, r);
29 }
30 
alloc_range(struct memranges * ranges)31 static struct range_entry *alloc_range(struct memranges *ranges)
32 {
33 	if (ranges->free_list != NULL) {
34 		struct range_entry *r;
35 
36 		r = ranges->free_list;
37 		range_entry_unlink(&ranges->free_list, r);
38 		return r;
39 	}
40 	if (ENV_PAYLOAD_LOADER)
41 		return malloc(sizeof(struct range_entry));
42 	return NULL;
43 }
44 
45 static inline struct range_entry *
range_list_add(struct memranges * ranges,struct range_entry ** prev_ptr,resource_t begin,resource_t end,unsigned long tag)46 range_list_add(struct memranges *ranges, struct range_entry **prev_ptr,
47 	       resource_t begin, resource_t end, unsigned long tag)
48 {
49 	struct range_entry *new_entry;
50 
51 	new_entry = alloc_range(ranges);
52 	if (new_entry == NULL) {
53 		printk(BIOS_ERR, "Could not allocate range_entry!\n");
54 		return NULL;
55 	}
56 	new_entry->begin = begin;
57 	new_entry->end = end;
58 	new_entry->tag = tag;
59 	range_entry_link(prev_ptr, new_entry);
60 
61 	return new_entry;
62 }
63 
merge_neighbor_entries(struct memranges * ranges)64 static void merge_neighbor_entries(struct memranges *ranges)
65 {
66 	struct range_entry *cur;
67 	struct range_entry *prev;
68 
69 	prev = NULL;
70 	/* Merge all neighbors and delete/free the leftover entry. */
71 	for (cur = ranges->entries; cur != NULL; cur = cur->next) {
72 		/* First entry. Just set prev. */
73 		if (prev == NULL) {
74 			prev = cur;
75 			continue;
76 		}
77 
78 		/* If the previous entry merges with the current update the
79 		 * previous entry to cover full range and delete current from
80 		 * the list. */
81 		if (prev->end + 1 >= cur->begin && prev->tag == cur->tag) {
82 			prev->end = cur->end;
83 			range_entry_unlink_and_free(ranges, &prev->next, cur);
84 			/* Set cur to prev so cur->next is valid since cur
85 			 * was just unlinked and free. */
86 			cur = prev;
87 			continue;
88 		}
89 
90 		prev = cur;
91 	}
92 }
93 
remove_memranges(struct memranges * ranges,resource_t begin,resource_t end,unsigned long unused)94 static void remove_memranges(struct memranges *ranges,
95 			     resource_t begin, resource_t end,
96 			     unsigned long unused)
97 {
98 	struct range_entry *cur;
99 	struct range_entry *next;
100 	struct range_entry **prev_ptr;
101 
102 	prev_ptr = &ranges->entries;
103 	for (cur = ranges->entries; cur != NULL; cur = next) {
104 		resource_t tmp_end;
105 
106 		/* Cache the next value to handle unlinks. */
107 		next = cur->next;
108 
109 		/* No other ranges are affected. */
110 		if (end < cur->begin)
111 			break;
112 
113 		/* The removal range starts after this one. */
114 		if (begin > cur->end) {
115 			prev_ptr = &cur->next;
116 			continue;
117 		}
118 
119 		/* The removal range overlaps with the current entry either
120 		 * partially or fully. However, we need to adjust the removal
121 		 * range for any holes. */
122 		if (begin <= cur->begin) {
123 			begin = cur->begin;
124 
125 			/* Full removal. */
126 			if (end >= cur->end) {
127 				begin = cur->end + 1;
128 				range_entry_unlink_and_free(ranges, prev_ptr,
129 							    cur);
130 				continue;
131 			}
132 		}
133 
134 		/* prev_ptr can be set now that the unlink path wasn't taken. */
135 		prev_ptr = &cur->next;
136 
137 		/* Clip the end fragment to do proper splitting. */
138 		tmp_end = end;
139 		if (end > cur->end)
140 			tmp_end = cur->end;
141 
142 		/* Hole punched in middle of entry. */
143 		if (begin > cur->begin && tmp_end < cur->end) {
144 			range_list_add(ranges, &cur->next, end + 1, cur->end,
145 				       cur->tag);
146 			cur->end = begin - 1;
147 			break;
148 		}
149 
150 		/* Removal at beginning. */
151 		if (begin == cur->begin)
152 			cur->begin = tmp_end + 1;
153 
154 		/* Removal at end. */
155 		if (tmp_end == cur->end)
156 			cur->end = begin - 1;
157 	}
158 }
159 
merge_add_memranges(struct memranges * ranges,resource_t begin,resource_t end,unsigned long tag)160 static void merge_add_memranges(struct memranges *ranges,
161 				resource_t begin, resource_t end,
162 				unsigned long tag)
163 {
164 	struct range_entry *cur;
165 	struct range_entry **prev_ptr;
166 
167 	prev_ptr = &ranges->entries;
168 
169 	/* Remove all existing entries covered by the range. */
170 	remove_memranges(ranges, begin, end, -1);
171 
172 	/* Find the entry to place the new entry after. Since
173 	 * remove_memranges() was called above there is a guaranteed
174 	 * spot for this new entry. */
175 	for (cur = ranges->entries; cur != NULL; cur = cur->next) {
176 		/* Found insertion spot before current entry. */
177 		if (end < cur->begin)
178 			break;
179 
180 		/* Keep track of previous entry to insert new entry after it. */
181 		prev_ptr = &cur->next;
182 
183 		/* The new entry starts after this one. */
184 		if (begin > cur->end)
185 			continue;
186 	}
187 
188 	/* Add new entry and merge with neighbors. */
189 	range_list_add(ranges, prev_ptr, begin, end, tag);
190 	merge_neighbor_entries(ranges);
191 }
192 
memranges_update_tag(struct memranges * ranges,unsigned long old_tag,unsigned long new_tag)193 void memranges_update_tag(struct memranges *ranges, unsigned long old_tag,
194 			  unsigned long new_tag)
195 {
196 	struct range_entry *r;
197 
198 	memranges_each_entry(r, ranges) {
199 		if (range_entry_tag(r) == old_tag)
200 			range_entry_update_tag(r, new_tag);
201 	}
202 
203 	merge_neighbor_entries(ranges);
204 }
205 
206 typedef void (*range_action_t)(struct memranges *ranges,
207 			       resource_t begin, resource_t end,
208 			       unsigned long tag);
209 
do_action(struct memranges * ranges,resource_t base,resource_t size,unsigned long tag,range_action_t action)210 static void do_action(struct memranges *ranges,
211 		      resource_t base, resource_t size, unsigned long tag,
212 		       range_action_t action)
213 {
214 	resource_t end;
215 	resource_t begin;
216 
217 	if (size == 0)
218 		return;
219 
220 	/* The addresses are aligned to (1ULL << ranges->align): the begin address is
221 	 * aligned down while the end address is aligned up to be conservative
222 	 * about the full range covered. */
223 	begin = ALIGN_DOWN(base, POWER_OF_2(ranges->align));
224 	end = begin + size + (base - begin);
225 	end = ALIGN_UP(end, POWER_OF_2(ranges->align)) - 1;
226 	action(ranges, begin, end, tag);
227 }
228 
memranges_create_hole(struct memranges * ranges,resource_t base,resource_t size)229 void memranges_create_hole(struct memranges *ranges,
230 			   resource_t base, resource_t size)
231 {
232 	do_action(ranges, base, size, -1, remove_memranges);
233 }
234 
memranges_insert(struct memranges * ranges,resource_t base,resource_t size,unsigned long tag)235 void memranges_insert(struct memranges *ranges,
236 		      resource_t base, resource_t size, unsigned long tag)
237 {
238 	do_action(ranges, base, size, tag, merge_add_memranges);
239 }
240 
241 struct collect_context {
242 	struct memranges *ranges;
243 	unsigned long tag;
244 	memrange_filter_t filter;
245 };
246 
collect_ranges(void * gp,struct device * dev,struct resource * res)247 static void collect_ranges(void *gp, struct device *dev, struct resource *res)
248 {
249 	struct collect_context *ctx = gp;
250 
251 	if (res->size == 0)
252 		return;
253 
254 	if (ctx->filter == NULL || ctx->filter(dev, res))
255 		memranges_insert(ctx->ranges, res->base, res->size, ctx->tag);
256 }
257 
memranges_add_resources_filter(struct memranges * ranges,unsigned long mask,unsigned long match,unsigned long tag,memrange_filter_t filter)258 void memranges_add_resources_filter(struct memranges *ranges,
259 				    unsigned long mask, unsigned long match,
260 				    unsigned long tag,
261 				    memrange_filter_t filter)
262 {
263 	struct collect_context context;
264 
265 	/* Only deal with MEM resources. */
266 	mask |= IORESOURCE_MEM;
267 	match |= IORESOURCE_MEM;
268 
269 	context.ranges = ranges;
270 	context.tag = tag;
271 	context.filter = filter;
272 	search_global_resources(mask, match, collect_ranges, &context);
273 }
274 
memranges_add_resources(struct memranges * ranges,unsigned long mask,unsigned long match,unsigned long tag)275 void memranges_add_resources(struct memranges *ranges,
276 			     unsigned long mask, unsigned long match,
277 			     unsigned long tag)
278 {
279 	memranges_add_resources_filter(ranges, mask, match, tag, NULL);
280 }
281 
memranges_init_empty_with_alignment(struct memranges * ranges,struct range_entry * to_free,size_t num_free,unsigned char align)282 void memranges_init_empty_with_alignment(struct memranges *ranges,
283 					 struct range_entry *to_free,
284 					 size_t num_free, unsigned char align)
285 {
286 	size_t i;
287 
288 	ranges->entries = NULL;
289 	ranges->free_list = NULL;
290 	ranges->align = align;
291 
292 	for (i = 0; i < num_free; i++)
293 		range_entry_link(&ranges->free_list, &to_free[i]);
294 }
295 
memranges_init_with_alignment(struct memranges * ranges,unsigned long mask,unsigned long match,unsigned long tag,unsigned char align)296 void memranges_init_with_alignment(struct memranges *ranges,
297 				   unsigned long mask, unsigned long match,
298 				   unsigned long tag, unsigned char align)
299 {
300 	memranges_init_empty_with_alignment(ranges, NULL, 0, align);
301 	memranges_add_resources(ranges, mask, match, tag);
302 }
303 
304 /* Clone a memrange. The new memrange has the same entries as the old one. */
memranges_clone(struct memranges * newranges,struct memranges * oldranges)305 void memranges_clone(struct memranges *newranges, struct memranges *oldranges)
306 {
307 	struct range_entry *r, *cur;
308 	struct range_entry **prev_ptr;
309 
310 	memranges_init_empty_with_alignment(newranges, NULL, 0, oldranges->align);
311 
312 	prev_ptr = &newranges->entries;
313 	memranges_each_entry(r, oldranges) {
314 		cur = range_list_add(newranges, prev_ptr, r->begin, r->end,
315 				     r->tag);
316 		prev_ptr = &cur->next;
317 	}
318 }
319 
memranges_teardown(struct memranges * ranges)320 void memranges_teardown(struct memranges *ranges)
321 {
322 	while (ranges->entries != NULL) {
323 		range_entry_unlink_and_free(ranges, &ranges->entries,
324 					    ranges->entries);
325 	}
326 }
327 
memranges_fill_holes_up_to(struct memranges * ranges,resource_t limit,unsigned long tag)328 void memranges_fill_holes_up_to(struct memranges *ranges,
329 				resource_t limit, unsigned long tag)
330 {
331 	struct range_entry *cur;
332 	struct range_entry *prev;
333 
334 	prev = NULL;
335 	for (cur = ranges->entries; cur != NULL; cur = cur->next) {
336 		/* First entry. Just set prev. */
337 		if (prev == NULL) {
338 			prev = cur;
339 			continue;
340 		}
341 
342 		/* If the previous entry does not directly precede the current
343 		 * entry then add a new entry just after the previous one. */
344 		if (range_entry_end(prev) != cur->begin) {
345 			resource_t end;
346 
347 			end = cur->begin - 1;
348 			if (end >= limit)
349 				end = limit - 1;
350 			range_list_add(ranges, &prev->next,
351 				       range_entry_end(prev), end, tag);
352 		}
353 
354 		prev = cur;
355 
356 		/* Hit the requested range limit. No other entries after this
357 		 * are affected. */
358 		if (cur->begin >= limit)
359 			break;
360 	}
361 
362 	/* Handle the case where the limit was never reached. A new entry needs
363 	 * to be added to cover the range up to the limit. */
364 	if (prev != NULL && range_entry_end(prev) < limit)
365 		range_list_add(ranges, &prev->next, range_entry_end(prev),
366 			       limit - 1, tag);
367 
368 	/* Merge all entries that were newly added. */
369 	merge_neighbor_entries(ranges);
370 }
371 
memranges_next_entry(struct memranges * ranges,const struct range_entry * r)372 struct range_entry *memranges_next_entry(struct memranges *ranges,
373 					 const struct range_entry *r)
374 {
375 	return r->next;
376 }
377 
378 /* Find a range entry that satisfies the given constraints to fit a hole that matches the
379  * required alignment, is big enough, does not exceed the limit and has a matching tag. */
380 static const struct range_entry *
memranges_find_entry(struct memranges * ranges,resource_t limit,resource_t size,unsigned char align,unsigned long tag,bool last)381 memranges_find_entry(struct memranges *ranges, resource_t limit, resource_t size,
382 		     unsigned char align, unsigned long tag, bool last)
383 {
384 	const struct range_entry *r, *last_entry = NULL;
385 	resource_t base, end;
386 
387 	if (size == 0)
388 		return NULL;
389 
390 	memranges_each_entry(r, ranges) {
391 		if (r->tag != tag)
392 			continue;
393 
394 		base = ALIGN_UP(r->begin, POWER_OF_2(align));
395 		end = base + size - 1;
396 
397 		if (end > r->end)
398 			continue;
399 
400 		/*
401 		 * If end for the hole in the current range entry goes beyond the requested
402 		 * limit, then none of the following ranges can satisfy this request because all
403 		 * range entries are maintained in increasing order.
404 		 */
405 		if (end > limit)
406 			break;
407 
408 		if (!last)
409 			return r;
410 
411 		last_entry = r;
412 	}
413 
414 	return last_entry;
415 }
416 
memranges_steal(struct memranges * ranges,resource_t limit,resource_t size,unsigned char align,unsigned long tag,resource_t * stolen_base,bool from_top)417 bool memranges_steal(struct memranges *ranges, resource_t limit, resource_t size,
418 			unsigned char align, unsigned long tag, resource_t *stolen_base,
419 			bool from_top)
420 {
421 	const struct range_entry *r;
422 
423 	r = memranges_find_entry(ranges, limit, size, align, tag, from_top);
424 	if (r == NULL)
425 		return false;
426 
427 	if (from_top) {
428 		limit = MIN(limit, r->end);
429 		/* Ensure we're within the range, even aligned down.
430 		   Proof is simple: If ALIGN_UP(r->begin) would be
431 		   higher, the stolen range wouldn't fit.*/
432 		assert(r->begin <= ALIGN_DOWN(limit - size + 1, POWER_OF_2(align)));
433 		*stolen_base = ALIGN_DOWN(limit - size + 1, POWER_OF_2(align));
434 	} else {
435 		*stolen_base = ALIGN_UP(r->begin, POWER_OF_2(align));
436 	}
437 	memranges_create_hole(ranges, *stolen_base, size);
438 
439 	return true;
440 }
441