xref: /aosp_15_r20/external/vboot_reference/firmware/lib/cgptlib/cgptlib_internal.c (revision 8617a60d3594060b7ecbd21bc622a7c14f3cf2bc)
1 /* Copyright 2013 The ChromiumOS Authors
2  * Use of this source code is governed by a BSD-style license that can be
3  * found in the LICENSE file.
4  */
5 
6 #include "2sysincludes.h"
7 #include "cgptlib.h"
8 #include "cgptlib_internal.h"
9 #include "crc32.h"
10 #include "gpt.h"
11 #include "gpt_misc.h"
12 
13 static const int MIN_SECTOR_SIZE = 512;
14 
CalculateEntriesSectors(GptHeader * h,uint32_t sector_bytes)15 size_t CalculateEntriesSectors(GptHeader* h, uint32_t sector_bytes)
16 {
17 	size_t bytes = h->number_of_entries * h->size_of_entry;
18 	size_t ret = (bytes + sector_bytes - 1) / sector_bytes;
19 	return ret;
20 }
21 
CheckParameters(GptData * gpt)22 int CheckParameters(GptData *gpt)
23 {
24 	/* Only support 512-byte or larger sectors that are a power of 2 */
25 	if (gpt->sector_bytes < MIN_SECTOR_SIZE ||
26 			(gpt->sector_bytes  & (gpt->sector_bytes  - 1)) != 0)
27 		return GPT_ERROR_INVALID_SECTOR_SIZE;
28 
29 	/*
30 	 * gpt_drive_sectors should be reasonable. It cannot be unset, and it
31 	 * cannot differ from streaming_drive_sectors if the GPT structs are
32 	 * stored on same device.
33 	 */
34 	if (gpt->gpt_drive_sectors == 0 ||
35 		(!(gpt->flags & GPT_FLAG_EXTERNAL) &&
36 		 gpt->gpt_drive_sectors != gpt->streaming_drive_sectors)) {
37 		return GPT_ERROR_INVALID_SECTOR_NUMBER;
38 	}
39 
40 	/*
41 	 * Sector count of a drive should be reasonable. If the given value is
42 	 * too small to contain basic GPT structure (PMBR + Headers + Entries),
43 	 * the value is wrong.
44 	 */
45 	if (gpt->gpt_drive_sectors <
46 		(1 + 2 * (1 + MIN_NUMBER_OF_ENTRIES /
47 				(gpt->sector_bytes / sizeof(GptEntry)))))
48 		return GPT_ERROR_INVALID_SECTOR_NUMBER;
49 
50 	return GPT_SUCCESS;
51 }
52 
HeaderCrc(GptHeader * h)53 uint32_t HeaderCrc(GptHeader *h)
54 {
55 	uint32_t crc32, original_crc32;
56 
57 	/* Original CRC is calculated with the CRC field 0. */
58 	original_crc32 = h->header_crc32;
59 	h->header_crc32 = 0;
60 	crc32 = Crc32((const uint8_t *)h, h->size);
61 	h->header_crc32 = original_crc32;
62 
63 	return crc32;
64 }
65 
CheckHeader(GptHeader * h,int is_secondary,uint64_t streaming_drive_sectors,uint64_t gpt_drive_sectors,uint32_t flags,uint32_t sector_bytes)66 int CheckHeader(GptHeader *h, int is_secondary,
67 		uint64_t streaming_drive_sectors,
68 		uint64_t gpt_drive_sectors, uint32_t flags,
69 		uint32_t sector_bytes)
70 {
71 	if (!h)
72 		return 1;
73 
74 	/*
75 	 * Make sure we're looking at a header of reasonable size before
76 	 * attempting to calculate CRC.
77 	 */
78 	if (memcmp(h->signature, GPT_HEADER_SIGNATURE,
79 		   GPT_HEADER_SIGNATURE_SIZE) &&
80 	    memcmp(h->signature, GPT_HEADER_SIGNATURE2,
81 		   GPT_HEADER_SIGNATURE_SIZE))
82 		return 1;
83 	if (h->revision != GPT_HEADER_REVISION)
84 		return 1;
85 	if (h->size < MIN_SIZE_OF_HEADER || h->size > MAX_SIZE_OF_HEADER)
86 		return 1;
87 
88 	/* Check CRC before looking at remaining fields */
89 	if (HeaderCrc(h) != h->header_crc32)
90 		return 1;
91 
92 	/* Reserved fields must be zero. */
93 	if (h->reserved_zero)
94 		return 1;
95 
96 	/* Could check that padding is zero, but that doesn't matter to us. */
97 
98 	/*
99 	 * If entry size is different than our struct, we won't be able to
100 	 * parse it.  Technically, any size 2^N where N>=7 is valid.
101 	 */
102 	if (h->size_of_entry != sizeof(GptEntry))
103 		return 1;
104 	if ((h->number_of_entries < MIN_NUMBER_OF_ENTRIES) ||
105 	    (h->number_of_entries > MAX_NUMBER_OF_ENTRIES) ||
106 	    (!(flags & GPT_FLAG_EXTERNAL) &&
107 	    h->number_of_entries != MAX_NUMBER_OF_ENTRIES))
108 		return 1;
109 
110 	/*
111 	 * Check locations for the header and its entries.  The primary
112 	 * immediately follows the PMBR, and is followed by its entries.  The
113 	 * secondary is at the end of the drive, preceded by its entries.
114 	 */
115 	if (is_secondary) {
116 		if (h->my_lba != gpt_drive_sectors - GPT_HEADER_SECTORS)
117 			return 1;
118 		if (h->entries_lba != h->my_lba - CalculateEntriesSectors(h,
119 								sector_bytes))
120 			return 1;
121 	} else {
122 		if (h->my_lba != GPT_PMBR_SECTORS)
123 			return 1;
124 		if (h->entries_lba < h->my_lba + 1)
125 			return 1;
126 	}
127 
128 	/* FirstUsableLBA <= LastUsableLBA. */
129 	if (h->first_usable_lba > h->last_usable_lba)
130 		return 1;
131 
132 	if (flags & GPT_FLAG_EXTERNAL) {
133 		if (h->last_usable_lba >= streaming_drive_sectors) {
134 			return 1;
135 		}
136 		return 0;
137 	}
138 
139 	/*
140 	 * FirstUsableLBA must be after the end of the primary GPT table array.
141 	 * LastUsableLBA must be before the start of the secondary GPT table
142 	 * array.
143 	 */
144 	/* TODO(namnguyen): Also check for padding between header & entries. */
145 	if (h->first_usable_lba < 2 + CalculateEntriesSectors(h, sector_bytes))
146 		return 1;
147 	if (h->last_usable_lba >=
148 			streaming_drive_sectors - 1 - CalculateEntriesSectors(h,
149 								sector_bytes))
150 		return 1;
151 
152 	/* Success */
153 	return 0;
154 }
155 
IsKernelEntry(const GptEntry * e)156 int IsKernelEntry(const GptEntry *e)
157 {
158 	static Guid chromeos_kernel = GPT_ENT_TYPE_CHROMEOS_KERNEL;
159 	return !memcmp(&e->type, &chromeos_kernel, sizeof(Guid));
160 }
161 
CheckEntries(GptEntry * entries,GptHeader * h)162 int CheckEntries(GptEntry *entries, GptHeader *h)
163 {
164 	if (!entries)
165 		return GPT_ERROR_INVALID_ENTRIES;
166 	GptEntry *entry;
167 	uint32_t crc32;
168 	uint32_t i;
169 
170 	/* Check CRC before examining entries. */
171 	crc32 = Crc32((const uint8_t *)entries,
172 		      h->size_of_entry * h->number_of_entries);
173 	if (crc32 != h->entries_crc32)
174 		return GPT_ERROR_CRC_CORRUPTED;
175 
176 	/* Check all entries. */
177 	for (i = 0, entry = entries; i < h->number_of_entries; i++, entry++) {
178 		GptEntry *e2;
179 		uint32_t i2;
180 
181 		if (IsUnusedEntry(entry))
182 			continue;
183 
184 		/* Entry must be in valid region. */
185 		if ((entry->starting_lba < h->first_usable_lba) ||
186 		    (entry->ending_lba > h->last_usable_lba) ||
187 		    (entry->ending_lba < entry->starting_lba))
188 			return GPT_ERROR_OUT_OF_REGION;
189 
190 		/* Entry must not overlap other entries. */
191 		for (i2 = 0, e2 = entries; i2 < h->number_of_entries;
192 		     i2++, e2++) {
193 			if (i2 == i || IsUnusedEntry(e2))
194 				continue;
195 
196 			if ((entry->starting_lba >= e2->starting_lba) &&
197 			    (entry->starting_lba <= e2->ending_lba))
198 				return GPT_ERROR_START_LBA_OVERLAP;
199 			if ((entry->ending_lba >= e2->starting_lba) &&
200 			    (entry->ending_lba <= e2->ending_lba))
201 				return GPT_ERROR_END_LBA_OVERLAP;
202 
203 			/* UniqueGuid field must be unique. */
204 			if (0 == memcmp(&entry->unique, &e2->unique,
205 					sizeof(Guid)))
206 				return GPT_ERROR_DUP_GUID;
207 		}
208 	}
209 
210 	/* Success */
211 	return 0;
212 }
213 
HeaderFieldsSame(GptHeader * h1,GptHeader * h2)214 int HeaderFieldsSame(GptHeader *h1, GptHeader *h2)
215 {
216 	if (memcmp(h1->signature, h2->signature, sizeof(h1->signature)))
217 		return 1;
218 	if (h1->revision != h2->revision)
219 		return 1;
220 	if (h1->size != h2->size)
221 		return 1;
222 	if (h1->reserved_zero != h2->reserved_zero)
223 		return 1;
224 	if (h1->first_usable_lba != h2->first_usable_lba)
225 		return 1;
226 	if (h1->last_usable_lba != h2->last_usable_lba)
227 		return 1;
228 	if (memcmp(&h1->disk_uuid, &h2->disk_uuid, sizeof(Guid)))
229 		return 1;
230 	if (h1->number_of_entries != h2->number_of_entries)
231 		return 1;
232 	if (h1->size_of_entry != h2->size_of_entry)
233 		return 1;
234 	if (h1->entries_crc32 != h2->entries_crc32)
235 		return 1;
236 
237 	return 0;
238 }
239 
GptValidityCheck(GptData * gpt)240 int GptValidityCheck(GptData *gpt)
241 {
242 	int retval;
243 	GptHeader *header1 = (GptHeader *)(gpt->primary_header);
244 	GptHeader *header2 = (GptHeader *)(gpt->secondary_header);
245 	GptEntry *entries1 = (GptEntry *)(gpt->primary_entries);
246 	GptEntry *entries2 = (GptEntry *)(gpt->secondary_entries);
247 	GptHeader *goodhdr = NULL;
248 
249 	gpt->valid_headers = 0;
250 	gpt->valid_entries = 0;
251 	gpt->ignored = 0;
252 
253 	retval = CheckParameters(gpt);
254 	if (retval != GPT_SUCCESS)
255 		return retval;
256 
257 	/* Check both headers; we need at least one valid header. */
258 	if (0 == CheckHeader(header1, 0, gpt->streaming_drive_sectors,
259 			     gpt->gpt_drive_sectors, gpt->flags,
260 			     gpt->sector_bytes)) {
261 		gpt->valid_headers |= MASK_PRIMARY;
262 		goodhdr = header1;
263 		if (0 == CheckEntries(entries1, goodhdr))
264 			gpt->valid_entries |= MASK_PRIMARY;
265 	} else if (header1 && !memcmp(header1->signature,
266 		   GPT_HEADER_SIGNATURE_IGNORED, GPT_HEADER_SIGNATURE_SIZE)) {
267 		gpt->ignored |= MASK_PRIMARY;
268 	}
269 	if (0 == CheckHeader(header2, 1, gpt->streaming_drive_sectors,
270 			     gpt->gpt_drive_sectors, gpt->flags,
271 			     gpt->sector_bytes)) {
272 		gpt->valid_headers |= MASK_SECONDARY;
273 		if (!goodhdr)
274 			goodhdr = header2;
275 		/* Check header1+entries2 if it was good, to catch mismatch. */
276 		if (0 == CheckEntries(entries2, goodhdr))
277 			gpt->valid_entries |= MASK_SECONDARY;
278 	} else if (header2 && !memcmp(header2->signature,
279 		   GPT_HEADER_SIGNATURE_IGNORED, GPT_HEADER_SIGNATURE_SIZE)) {
280 		gpt->ignored |= MASK_SECONDARY;
281 	}
282 
283 	if (!gpt->valid_headers)
284 		return GPT_ERROR_INVALID_HEADERS;
285 
286 	/*
287 	 * If both headers are good but neither entries were good, check the
288 	 * entries with the secondary header.
289 	 */
290 	if (MASK_BOTH == gpt->valid_headers && !gpt->valid_entries) {
291 		if (0 == CheckEntries(entries1, header2))
292 			gpt->valid_entries |= MASK_PRIMARY;
293 		if (0 == CheckEntries(entries2, header2))
294 			gpt->valid_entries |= MASK_SECONDARY;
295 		if (gpt->valid_entries) {
296 			/*
297 			 * Sure enough, header2 had a good CRC for one of the
298 			 * entries.  Mark header1 invalid, so we'll update its
299 			 * entries CRC.
300 			 */
301 			gpt->valid_headers &= ~MASK_PRIMARY;
302 			goodhdr = header2;
303 		}
304 	}
305 
306 	if (!gpt->valid_entries)
307 		return GPT_ERROR_INVALID_ENTRIES;
308 
309 	/*
310 	 * Now that we've determined which header contains a good CRC for
311 	 * the entries, make sure the headers are otherwise identical.
312 	 */
313 	if (MASK_BOTH == gpt->valid_headers &&
314 	    0 != HeaderFieldsSame(header1, header2))
315 		gpt->valid_headers &= ~MASK_SECONDARY;
316 
317 	/*
318 	 * When we're ignoring a GPT, make it look in memory like the other one
319 	 * and pretend that everything is fine (until we try to save).
320 	 */
321 	if (MASK_NONE != gpt->ignored) {
322 		GptRepair(gpt);
323 		gpt->modified = 0;
324 	}
325 
326 	return GPT_SUCCESS;
327 }
328 
GptRepair(GptData * gpt)329 void GptRepair(GptData *gpt)
330 {
331 	GptHeader *header1 = (GptHeader *)(gpt->primary_header);
332 	GptHeader *header2 = (GptHeader *)(gpt->secondary_header);
333 	GptEntry *entries1 = (GptEntry *)(gpt->primary_entries);
334 	GptEntry *entries2 = (GptEntry *)(gpt->secondary_entries);
335 	int entries_size;
336 
337 	/* Need at least one good header and one good set of entries. */
338 	if (MASK_NONE == gpt->valid_headers || MASK_NONE == gpt->valid_entries)
339 		return;
340 
341 	/* Repair headers if necessary */
342 	if (MASK_PRIMARY == gpt->valid_headers) {
343 		/* Primary is good, secondary is bad */
344 		memcpy(header2, header1, sizeof(GptHeader));
345 		header2->my_lba = gpt->gpt_drive_sectors - GPT_HEADER_SECTORS;
346 		header2->alternate_lba = GPT_PMBR_SECTORS;  /* Second sector. */
347 		header2->entries_lba = header2->my_lba -
348 			CalculateEntriesSectors(header1, gpt->sector_bytes);
349 		header2->header_crc32 = HeaderCrc(header2);
350 		gpt->modified |= GPT_MODIFIED_HEADER2;
351 	}
352 	else if (MASK_SECONDARY == gpt->valid_headers) {
353 		/* Secondary is good, primary is bad */
354 		memcpy(header1, header2, sizeof(GptHeader));
355 		header1->my_lba = GPT_PMBR_SECTORS;  /* Second sector. */
356 		header1->alternate_lba =
357 			gpt->streaming_drive_sectors - GPT_HEADER_SECTORS;
358 		/* TODO (namnguyen): Preserve (header, entries) padding. */
359 		header1->entries_lba = header1->my_lba + 1;
360 		header1->header_crc32 = HeaderCrc(header1);
361 		gpt->modified |= GPT_MODIFIED_HEADER1;
362 	}
363 	gpt->valid_headers = MASK_BOTH;
364 
365 	/* Repair entries if necessary */
366 	entries_size = header1->size_of_entry * header1->number_of_entries;
367 	if (MASK_PRIMARY == gpt->valid_entries) {
368 		/* Primary is good, secondary is bad */
369 		memcpy(entries2, entries1, entries_size);
370 		gpt->modified |= GPT_MODIFIED_ENTRIES2;
371 	}
372 	else if (MASK_SECONDARY == gpt->valid_entries) {
373 		/* Secondary is good, primary is bad */
374 		memcpy(entries1, entries2, entries_size);
375 		gpt->modified |= GPT_MODIFIED_ENTRIES1;
376 	}
377 	gpt->valid_entries = MASK_BOTH;
378 }
379 
GetEntryRequired(const GptEntry * e)380 int GetEntryRequired(const GptEntry *e)
381 {
382 	return e->attrs.fields.required;
383 }
384 
GetEntryLegacyBoot(const GptEntry * e)385 int GetEntryLegacyBoot(const GptEntry *e)
386 {
387 	return e->attrs.fields.legacy_boot;
388 }
389 
GetEntrySuccessful(const GptEntry * e)390 int GetEntrySuccessful(const GptEntry *e)
391 {
392 	return (e->attrs.fields.gpt_att & CGPT_ATTRIBUTE_SUCCESSFUL_MASK) >>
393 		CGPT_ATTRIBUTE_SUCCESSFUL_OFFSET;
394 }
395 
GetEntryPriority(const GptEntry * e)396 int GetEntryPriority(const GptEntry *e)
397 {
398 	return (e->attrs.fields.gpt_att & CGPT_ATTRIBUTE_PRIORITY_MASK) >>
399 		CGPT_ATTRIBUTE_PRIORITY_OFFSET;
400 }
401 
GetEntryTries(const GptEntry * e)402 int GetEntryTries(const GptEntry *e)
403 {
404 	return (e->attrs.fields.gpt_att & CGPT_ATTRIBUTE_TRIES_MASK) >>
405 		CGPT_ATTRIBUTE_TRIES_OFFSET;
406 }
407 
GetEntryErrorCounter(const GptEntry * e)408 int GetEntryErrorCounter(const GptEntry *e)
409 {
410 	return (e->attrs.fields.gpt_att & CGPT_ATTRIBUTE_ERROR_COUNTER_MASK) >>
411 		CGPT_ATTRIBUTE_ERROR_COUNTER_OFFSET;
412 }
413 
SetEntryRequired(GptEntry * e,int required)414 void SetEntryRequired(GptEntry *e, int required)
415 {
416 	e->attrs.fields.required = required;
417 }
418 
SetEntryLegacyBoot(GptEntry * e,int legacy_boot)419 void SetEntryLegacyBoot(GptEntry *e, int legacy_boot)
420 {
421 	e->attrs.fields.legacy_boot = legacy_boot;
422 }
423 
SetEntrySuccessful(GptEntry * e,int successful)424 void SetEntrySuccessful(GptEntry *e, int successful)
425 {
426 	if (successful)
427 		e->attrs.fields.gpt_att |= CGPT_ATTRIBUTE_SUCCESSFUL_MASK;
428 	else
429 		e->attrs.fields.gpt_att &= ~CGPT_ATTRIBUTE_SUCCESSFUL_MASK;
430 }
431 
SetEntryPriority(GptEntry * e,int priority)432 void SetEntryPriority(GptEntry *e, int priority)
433 {
434 	e->attrs.fields.gpt_att &= ~CGPT_ATTRIBUTE_PRIORITY_MASK;
435 	e->attrs.fields.gpt_att |=
436 		(priority << CGPT_ATTRIBUTE_PRIORITY_OFFSET) &
437 		CGPT_ATTRIBUTE_PRIORITY_MASK;
438 }
439 
SetEntryTries(GptEntry * e,int tries)440 void SetEntryTries(GptEntry *e, int tries)
441 {
442 	e->attrs.fields.gpt_att &= ~CGPT_ATTRIBUTE_TRIES_MASK;
443 	e->attrs.fields.gpt_att |= (tries << CGPT_ATTRIBUTE_TRIES_OFFSET) &
444 		CGPT_ATTRIBUTE_TRIES_MASK;
445 }
446 
SetEntryErrorCounter(GptEntry * e,int error_counter)447 void SetEntryErrorCounter(GptEntry *e, int error_counter)
448 {
449 	e->attrs.fields.gpt_att &= ~CGPT_ATTRIBUTE_ERROR_COUNTER_MASK;
450 	e->attrs.fields.gpt_att |=
451             (error_counter << CGPT_ATTRIBUTE_ERROR_COUNTER_OFFSET) &
452             CGPT_ATTRIBUTE_ERROR_COUNTER_MASK;
453 }
454 
GetCurrentKernelUniqueGuid(GptData * gpt,void * dest)455 void GetCurrentKernelUniqueGuid(GptData *gpt, void *dest)
456 {
457 	GptEntry *entries = (GptEntry *)gpt->primary_entries;
458 	GptEntry *e = entries + gpt->current_kernel;
459 	memcpy(dest, &e->unique, sizeof(Guid));
460 }
461 
GptModified(GptData * gpt)462 void GptModified(GptData *gpt) {
463 	GptHeader *header = (GptHeader *)gpt->primary_header;
464 
465 	/* Update the CRCs */
466 	header->entries_crc32 = Crc32(gpt->primary_entries,
467 				      header->size_of_entry *
468 				      header->number_of_entries);
469 	header->header_crc32 = HeaderCrc(header);
470 	gpt->modified |= GPT_MODIFIED_HEADER1 | GPT_MODIFIED_ENTRIES1;
471 
472 	/*
473 	 * Use the repair function to update the other copy of the GPT.  This
474 	 * is a tad inefficient, but is much faster than the disk I/O to update
475 	 * the GPT on disk so it doesn't matter.
476 	 */
477 	gpt->valid_headers = MASK_PRIMARY;
478 	gpt->valid_entries = MASK_PRIMARY;
479 	GptRepair(gpt);
480 }
481 
482 
GptErrorText(int error_code)483 const char *GptErrorText(int error_code)
484 {
485 	switch(error_code) {
486 	case GPT_SUCCESS:
487 		return "none";
488 
489 	case GPT_ERROR_NO_VALID_KERNEL:
490 		return "Invalid kernel";
491 
492 	case GPT_ERROR_INVALID_HEADERS:
493 		return "Invalid headers";
494 
495 	case GPT_ERROR_INVALID_ENTRIES:
496 		return "Invalid entries";
497 
498 	case GPT_ERROR_INVALID_SECTOR_SIZE:
499 		return "Invalid sector size";
500 
501 	case GPT_ERROR_INVALID_SECTOR_NUMBER:
502 		return "Invalid sector number";
503 
504 	case GPT_ERROR_INVALID_UPDATE_TYPE:
505 		return "Invalid update type";
506 
507 	case GPT_ERROR_CRC_CORRUPTED:
508 		return "Entries' crc corrupted";
509 
510 	case GPT_ERROR_OUT_OF_REGION:
511 		return "Entry outside of valid region";
512 
513 	case GPT_ERROR_START_LBA_OVERLAP:
514 		return "Starting LBA overlaps";
515 
516 	case GPT_ERROR_END_LBA_OVERLAP:
517 		return "Ending LBA overlaps";
518 
519 	case GPT_ERROR_DUP_GUID:
520 		return "Duplicated GUID";
521 
522 	case GPT_ERROR_INVALID_FLASH_GEOMETRY:
523 		return "Invalid flash geometry";
524 
525 	case GPT_ERROR_NO_SUCH_ENTRY:
526 		return "No entry found";
527 
528 	default:
529 		break;
530 	};
531 	return "Unknown";
532 }
533