1 /* Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
2  * Use of this source code is governed by a BSD-style license that can be
3  * found in the LICENSE file.
4  */
5 
6 #include "sysincludes.h"
7 
8 #include "cgptlib.h"
9 #include "cgptlib_internal.h"
10 #include "crc32.h"
11 #include "gpt.h"
12 #include "gpt_misc.h"
13 #include "utility.h"
14 
15 const static int SECTOR_SIZE = 512;
16 
CalculateEntriesSectors(GptHeader * h)17 size_t CalculateEntriesSectors(GptHeader* h) {
18   size_t bytes = h->number_of_entries * h->size_of_entry;
19   size_t ret = (bytes + SECTOR_SIZE - 1) / SECTOR_SIZE;
20   return ret;
21 }
22 
CheckParameters(GptData * gpt)23 int CheckParameters(GptData *gpt)
24 {
25 	/* Currently, we only support 512-byte sectors. */
26 	if (gpt->sector_bytes != SECTOR_SIZE)
27 		return GPT_ERROR_INVALID_SECTOR_SIZE;
28 
29 	/*
30 	 * gpt_drive_sectors should be reasonable. It cannot be unset, and it
31 	 * cannot differ from streaming_drive_sectors if the GPT structs are
32 	 * stored on same device.
33 	 */
34 	if (gpt->gpt_drive_sectors == 0 ||
35 		(!(gpt->flags & GPT_FLAG_EXTERNAL) &&
36 		 gpt->gpt_drive_sectors != gpt->streaming_drive_sectors)) {
37 		return GPT_ERROR_INVALID_SECTOR_NUMBER;
38 	}
39 
40 	/*
41 	 * Sector count of a drive should be reasonable. If the given value is
42 	 * too small to contain basic GPT structure (PMBR + Headers + Entries),
43 	 * the value is wrong.
44 	 */
45 	if (gpt->gpt_drive_sectors <
46 		(1 + 2 * (1 + MIN_NUMBER_OF_ENTRIES /
47 				(SECTOR_SIZE / sizeof(GptEntry)))))
48 		return GPT_ERROR_INVALID_SECTOR_NUMBER;
49 
50 	return GPT_SUCCESS;
51 }
52 
HeaderCrc(GptHeader * h)53 uint32_t HeaderCrc(GptHeader *h)
54 {
55 	uint32_t crc32, original_crc32;
56 
57 	/* Original CRC is calculated with the CRC field 0. */
58 	original_crc32 = h->header_crc32;
59 	h->header_crc32 = 0;
60 	crc32 = Crc32((const uint8_t *)h, h->size);
61 	h->header_crc32 = original_crc32;
62 
63 	return crc32;
64 }
65 
CheckHeader(GptHeader * h,int is_secondary,uint64_t streaming_drive_sectors,uint64_t gpt_drive_sectors,uint32_t flags)66 int CheckHeader(GptHeader *h, int is_secondary,
67 		uint64_t streaming_drive_sectors,
68 		uint64_t gpt_drive_sectors, uint32_t flags)
69 {
70 	if (!h)
71 		return 1;
72 
73 	/*
74 	 * Make sure we're looking at a header of reasonable size before
75 	 * attempting to calculate CRC.
76 	 */
77 	if (Memcmp(h->signature, GPT_HEADER_SIGNATURE,
78 		   GPT_HEADER_SIGNATURE_SIZE) &&
79 	    Memcmp(h->signature, GPT_HEADER_SIGNATURE2,
80 		   GPT_HEADER_SIGNATURE_SIZE))
81 		return 1;
82 	if (h->revision != GPT_HEADER_REVISION)
83 		return 1;
84 	if (h->size < MIN_SIZE_OF_HEADER || h->size > MAX_SIZE_OF_HEADER)
85 		return 1;
86 
87 	/* Check CRC before looking at remaining fields */
88 	if (HeaderCrc(h) != h->header_crc32)
89 		return 1;
90 
91 	/* Reserved fields must be zero. */
92 	if (h->reserved_zero)
93 		return 1;
94 
95 	/* Could check that padding is zero, but that doesn't matter to us. */
96 
97 	/*
98 	 * If entry size is different than our struct, we won't be able to
99 	 * parse it.  Technically, any size 2^N where N>=7 is valid.
100 	 */
101 	if (h->size_of_entry != sizeof(GptEntry))
102 		return 1;
103 	if ((h->number_of_entries < MIN_NUMBER_OF_ENTRIES) ||
104 	    (h->number_of_entries > MAX_NUMBER_OF_ENTRIES) ||
105 	    (!(flags & GPT_FLAG_EXTERNAL) &&
106 	    h->number_of_entries != MAX_NUMBER_OF_ENTRIES))
107 		return 1;
108 
109 	/*
110 	 * Check locations for the header and its entries.  The primary
111 	 * immediately follows the PMBR, and is followed by its entries.  The
112 	 * secondary is at the end of the drive, preceded by its entries.
113 	 */
114 	if (is_secondary) {
115 		if (h->my_lba != gpt_drive_sectors - GPT_HEADER_SECTORS)
116 			return 1;
117 		if (h->entries_lba != h->my_lba - CalculateEntriesSectors(h))
118 			return 1;
119 	} else {
120 		if (h->my_lba != GPT_PMBR_SECTORS)
121 			return 1;
122 		if (h->entries_lba < h->my_lba + 1)
123 			return 1;
124 	}
125 
126 	/* FirstUsableLBA <= LastUsableLBA. */
127 	if (h->first_usable_lba > h->last_usable_lba)
128 		return 1;
129 
130 	if (flags & GPT_FLAG_EXTERNAL) {
131 		if (h->last_usable_lba >= streaming_drive_sectors) {
132 			return 1;
133 		}
134 		return 0;
135 	}
136 
137 	/*
138 	 * FirstUsableLBA must be after the end of the primary GPT table array.
139 	 * LastUsableLBA must be before the start of the secondary GPT table
140 	 * array.
141 	 */
142 	/* TODO(namnguyen): Also check for padding between header & entries. */
143 	if (h->first_usable_lba < 2 + CalculateEntriesSectors(h))
144 		return 1;
145 	if (h->last_usable_lba >=
146 			streaming_drive_sectors - 1 - CalculateEntriesSectors(h))
147 		return 1;
148 
149 	/* Success */
150 	return 0;
151 }
152 
IsKernelEntry(const GptEntry * e)153 int IsKernelEntry(const GptEntry *e)
154 {
155 	static Guid chromeos_kernel = GPT_ENT_TYPE_CHROMEOS_KERNEL;
156 	return !Memcmp(&e->type, &chromeos_kernel, sizeof(Guid));
157 }
158 
CheckEntries(GptEntry * entries,GptHeader * h)159 int CheckEntries(GptEntry *entries, GptHeader *h)
160 {
161 	if (!entries)
162 		return GPT_ERROR_INVALID_ENTRIES;
163 	GptEntry *entry;
164 	uint32_t crc32;
165 	uint32_t i;
166 
167 	/* Check CRC before examining entries. */
168 	crc32 = Crc32((const uint8_t *)entries,
169 		      h->size_of_entry * h->number_of_entries);
170 	if (crc32 != h->entries_crc32)
171 		return GPT_ERROR_CRC_CORRUPTED;
172 
173 	/* Check all entries. */
174 	for (i = 0, entry = entries; i < h->number_of_entries; i++, entry++) {
175 		GptEntry *e2;
176 		uint32_t i2;
177 
178 		if (IsUnusedEntry(entry))
179 			continue;
180 
181 		/* Entry must be in valid region. */
182 		if ((entry->starting_lba < h->first_usable_lba) ||
183 		    (entry->ending_lba > h->last_usable_lba) ||
184 		    (entry->ending_lba < entry->starting_lba))
185 			return GPT_ERROR_OUT_OF_REGION;
186 
187 		/* Entry must not overlap other entries. */
188 		for (i2 = 0, e2 = entries; i2 < h->number_of_entries;
189 		     i2++, e2++) {
190 			if (i2 == i || IsUnusedEntry(e2))
191 				continue;
192 
193 			if ((entry->starting_lba >= e2->starting_lba) &&
194 			    (entry->starting_lba <= e2->ending_lba))
195 				return GPT_ERROR_START_LBA_OVERLAP;
196 			if ((entry->ending_lba >= e2->starting_lba) &&
197 			    (entry->ending_lba <= e2->ending_lba))
198 				return GPT_ERROR_END_LBA_OVERLAP;
199 
200 			/* UniqueGuid field must be unique. */
201 			if (0 == Memcmp(&entry->unique, &e2->unique,
202 					sizeof(Guid)))
203 				return GPT_ERROR_DUP_GUID;
204 		}
205 	}
206 
207 	/* Success */
208 	return 0;
209 }
210 
HeaderFieldsSame(GptHeader * h1,GptHeader * h2)211 int HeaderFieldsSame(GptHeader *h1, GptHeader *h2)
212 {
213 	if (Memcmp(h1->signature, h2->signature, sizeof(h1->signature)))
214 		return 1;
215 	if (h1->revision != h2->revision)
216 		return 1;
217 	if (h1->size != h2->size)
218 		return 1;
219 	if (h1->reserved_zero != h2->reserved_zero)
220 		return 1;
221 	if (h1->first_usable_lba != h2->first_usable_lba)
222 		return 1;
223 	if (h1->last_usable_lba != h2->last_usable_lba)
224 		return 1;
225 	if (Memcmp(&h1->disk_uuid, &h2->disk_uuid, sizeof(Guid)))
226 		return 1;
227 	if (h1->number_of_entries != h2->number_of_entries)
228 		return 1;
229 	if (h1->size_of_entry != h2->size_of_entry)
230 		return 1;
231 	if (h1->entries_crc32 != h2->entries_crc32)
232 		return 1;
233 
234 	return 0;
235 }
236 
GptSanityCheck(GptData * gpt)237 int GptSanityCheck(GptData *gpt)
238 {
239 	int retval;
240 	GptHeader *header1 = (GptHeader *)(gpt->primary_header);
241 	GptHeader *header2 = (GptHeader *)(gpt->secondary_header);
242 	GptEntry *entries1 = (GptEntry *)(gpt->primary_entries);
243 	GptEntry *entries2 = (GptEntry *)(gpt->secondary_entries);
244 	GptHeader *goodhdr = NULL;
245 
246 	gpt->valid_headers = 0;
247 	gpt->valid_entries = 0;
248 
249 	retval = CheckParameters(gpt);
250 	if (retval != GPT_SUCCESS)
251 		return retval;
252 
253 	/* Check both headers; we need at least one valid header. */
254 	if (0 == CheckHeader(header1, 0, gpt->streaming_drive_sectors,
255 			     gpt->gpt_drive_sectors, gpt->flags)) {
256 		gpt->valid_headers |= MASK_PRIMARY;
257 		goodhdr = header1;
258 	}
259 	if (0 == CheckHeader(header2, 1, gpt->streaming_drive_sectors,
260 			     gpt->gpt_drive_sectors, gpt->flags)) {
261 		gpt->valid_headers |= MASK_SECONDARY;
262 		if (!goodhdr)
263 			goodhdr = header2;
264 	}
265 
266 	if (!gpt->valid_headers)
267 		return GPT_ERROR_INVALID_HEADERS;
268 
269 	/*
270 	 * Check if entries are valid.
271 	 *
272 	 * Note that we use the same header in both checks.  This way we'll
273 	 * catch the case where (header1,entries1) and (header2,entries2) are
274 	 * both valid, but (entries1 != entries2).
275 	 */
276 	if (0 == CheckEntries(entries1, goodhdr))
277 		gpt->valid_entries |= MASK_PRIMARY;
278 	if (0 == CheckEntries(entries2, goodhdr))
279 		gpt->valid_entries |= MASK_SECONDARY;
280 
281 	/*
282 	 * If both headers are good but neither entries were good, check the
283 	 * entries with the secondary header.
284 	 */
285 	if (MASK_BOTH == gpt->valid_headers && !gpt->valid_entries) {
286 		if (0 == CheckEntries(entries1, header2))
287 			gpt->valid_entries |= MASK_PRIMARY;
288 		if (0 == CheckEntries(entries2, header2))
289 			gpt->valid_entries |= MASK_SECONDARY;
290 		if (gpt->valid_entries) {
291 			/*
292 			 * Sure enough, header2 had a good CRC for one of the
293 			 * entries.  Mark header1 invalid, so we'll update its
294 			 * entries CRC.
295 			 */
296 			gpt->valid_headers &= ~MASK_PRIMARY;
297 			goodhdr = header2;
298 		}
299 	}
300 
301 	if (!gpt->valid_entries)
302 		return GPT_ERROR_INVALID_ENTRIES;
303 
304 	/*
305 	 * Now that we've determined which header contains a good CRC for
306 	 * the entries, make sure the headers are otherwise identical.
307 	 */
308 	if (MASK_BOTH == gpt->valid_headers &&
309 	    0 != HeaderFieldsSame(header1, header2))
310 		gpt->valid_headers &= ~MASK_SECONDARY;
311 
312 	return GPT_SUCCESS;
313 }
314 
GptRepair(GptData * gpt)315 void GptRepair(GptData *gpt)
316 {
317 	GptHeader *header1 = (GptHeader *)(gpt->primary_header);
318 	GptHeader *header2 = (GptHeader *)(gpt->secondary_header);
319 	GptEntry *entries1 = (GptEntry *)(gpt->primary_entries);
320 	GptEntry *entries2 = (GptEntry *)(gpt->secondary_entries);
321 	int entries_size;
322 
323 	/* Need at least one good header and one good set of entries. */
324 	if (MASK_NONE == gpt->valid_headers || MASK_NONE == gpt->valid_entries)
325 		return;
326 
327 	/* Repair headers if necessary */
328 	if (MASK_PRIMARY == gpt->valid_headers) {
329 		/* Primary is good, secondary is bad */
330 		Memcpy(header2, header1, sizeof(GptHeader));
331 		header2->my_lba = gpt->gpt_drive_sectors - GPT_HEADER_SECTORS;
332 		header2->alternate_lba = GPT_PMBR_SECTORS;  /* Second sector. */
333 		header2->entries_lba = header2->my_lba - CalculateEntriesSectors(header1);
334 		header2->header_crc32 = HeaderCrc(header2);
335 		gpt->modified |= GPT_MODIFIED_HEADER2;
336 	}
337 	else if (MASK_SECONDARY == gpt->valid_headers) {
338 		/* Secondary is good, primary is bad */
339 		Memcpy(header1, header2, sizeof(GptHeader));
340 		header1->my_lba = GPT_PMBR_SECTORS;  /* Second sector. */
341 		header1->alternate_lba =
342 			gpt->streaming_drive_sectors - GPT_HEADER_SECTORS;
343 		/* TODO (namnguyen): Preserve (header, entries) padding. */
344 		header1->entries_lba = header1->my_lba + 1;
345 		header1->header_crc32 = HeaderCrc(header1);
346 		gpt->modified |= GPT_MODIFIED_HEADER1;
347 	}
348 	gpt->valid_headers = MASK_BOTH;
349 
350 	/* Repair entries if necessary */
351 	entries_size = header1->size_of_entry * header1->number_of_entries;
352 	if (MASK_PRIMARY == gpt->valid_entries) {
353 		/* Primary is good, secondary is bad */
354 		Memcpy(entries2, entries1, entries_size);
355 		gpt->modified |= GPT_MODIFIED_ENTRIES2;
356 	}
357 	else if (MASK_SECONDARY == gpt->valid_entries) {
358 		/* Secondary is good, primary is bad */
359 		Memcpy(entries1, entries2, entries_size);
360 		gpt->modified |= GPT_MODIFIED_ENTRIES1;
361 	}
362 	gpt->valid_entries = MASK_BOTH;
363 }
364 
GetEntrySuccessful(const GptEntry * e)365 int GetEntrySuccessful(const GptEntry *e)
366 {
367 	return (e->attrs.fields.gpt_att & CGPT_ATTRIBUTE_SUCCESSFUL_MASK) >>
368 		CGPT_ATTRIBUTE_SUCCESSFUL_OFFSET;
369 }
370 
GetEntryPriority(const GptEntry * e)371 int GetEntryPriority(const GptEntry *e)
372 {
373 	return (e->attrs.fields.gpt_att & CGPT_ATTRIBUTE_PRIORITY_MASK) >>
374 		CGPT_ATTRIBUTE_PRIORITY_OFFSET;
375 }
376 
GetEntryTries(const GptEntry * e)377 int GetEntryTries(const GptEntry *e)
378 {
379 	return (e->attrs.fields.gpt_att & CGPT_ATTRIBUTE_TRIES_MASK) >>
380 		CGPT_ATTRIBUTE_TRIES_OFFSET;
381 }
382 
SetEntrySuccessful(GptEntry * e,int successful)383 void SetEntrySuccessful(GptEntry *e, int successful)
384 {
385 	if (successful)
386 		e->attrs.fields.gpt_att |= CGPT_ATTRIBUTE_SUCCESSFUL_MASK;
387 	else
388 		e->attrs.fields.gpt_att &= ~CGPT_ATTRIBUTE_SUCCESSFUL_MASK;
389 }
390 
SetEntryPriority(GptEntry * e,int priority)391 void SetEntryPriority(GptEntry *e, int priority)
392 {
393 	e->attrs.fields.gpt_att &= ~CGPT_ATTRIBUTE_PRIORITY_MASK;
394 	e->attrs.fields.gpt_att |=
395 		(priority << CGPT_ATTRIBUTE_PRIORITY_OFFSET) &
396 		CGPT_ATTRIBUTE_PRIORITY_MASK;
397 }
398 
SetEntryTries(GptEntry * e,int tries)399 void SetEntryTries(GptEntry *e, int tries)
400 {
401 	e->attrs.fields.gpt_att &= ~CGPT_ATTRIBUTE_TRIES_MASK;
402 	e->attrs.fields.gpt_att |= (tries << CGPT_ATTRIBUTE_TRIES_OFFSET) &
403 		CGPT_ATTRIBUTE_TRIES_MASK;
404 }
405 
GetCurrentKernelUniqueGuid(GptData * gpt,void * dest)406 void GetCurrentKernelUniqueGuid(GptData *gpt, void *dest)
407 {
408 	GptEntry *entries = (GptEntry *)gpt->primary_entries;
409 	GptEntry *e = entries + gpt->current_kernel;
410 	Memcpy(dest, &e->unique, sizeof(Guid));
411 }
412 
GptModified(GptData * gpt)413 void GptModified(GptData *gpt) {
414 	GptHeader *header = (GptHeader *)gpt->primary_header;
415 
416 	/* Update the CRCs */
417 	header->entries_crc32 = Crc32(gpt->primary_entries,
418 				      header->size_of_entry *
419 				      header->number_of_entries);
420 	header->header_crc32 = HeaderCrc(header);
421 	gpt->modified |= GPT_MODIFIED_HEADER1 | GPT_MODIFIED_ENTRIES1;
422 
423 	/*
424 	 * Use the repair function to update the other copy of the GPT.  This
425 	 * is a tad inefficient, but is much faster than the disk I/O to update
426 	 * the GPT on disk so it doesn't matter.
427 	 */
428 	gpt->valid_headers = MASK_PRIMARY;
429 	gpt->valid_entries = MASK_PRIMARY;
430 	GptRepair(gpt);
431 }
432 
433 
GptErrorText(int error_code)434 const char *GptErrorText(int error_code)
435 {
436 	switch(error_code) {
437 	case GPT_SUCCESS:
438 		return "none";
439 
440 	case GPT_ERROR_NO_VALID_KERNEL:
441 		return "Invalid kernel";
442 
443 	case GPT_ERROR_INVALID_HEADERS:
444 		return "Invalid headers";
445 
446 	case GPT_ERROR_INVALID_ENTRIES:
447 		return "Invalid entries";
448 
449 	case GPT_ERROR_INVALID_SECTOR_SIZE:
450 		return "Invalid sector size";
451 
452 	case GPT_ERROR_INVALID_SECTOR_NUMBER:
453 		return "Invalid sector number";
454 
455 	case GPT_ERROR_INVALID_UPDATE_TYPE:
456 		return "Invalid update type";
457 
458 	case GPT_ERROR_CRC_CORRUPTED:
459 		return "Entries' crc corrupted";
460 
461 	case GPT_ERROR_OUT_OF_REGION:
462 		return "Entry outside of valid region";
463 
464 	case GPT_ERROR_START_LBA_OVERLAP:
465 		return "Starting LBA overlaps";
466 
467 	case GPT_ERROR_END_LBA_OVERLAP:
468 		return "Ending LBA overlaps";
469 
470 	case GPT_ERROR_DUP_GUID:
471 		return "Duplicated GUID";
472 
473 	case GPT_ERROR_INVALID_FLASH_GEOMETRY:
474 		return "Invalid flash geometry";
475 
476 	case GPT_ERROR_NO_SUCH_ENTRY:
477 		return "No entry found";
478 
479 	default:
480 		break;
481 	};
482 	return "Unknown";
483 }
484