1 /*
2  * Unsquash a squashfs filesystem.  This is a highly compressed read only
3  * filesystem.
4  *
5  * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
6  * 2012, 2013, 2014
7  * Phillip Lougher <phillip@squashfs.org.uk>
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; either version 2,
12  * or (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22  *
23  * unsquashfs.c
24  */
25 
26 #include "unsquashfs.h"
27 #include "squashfs_swap.h"
28 #include "squashfs_compat.h"
29 #include "compressor.h"
30 #include "xattr.h"
31 #include "unsquashfs_info.h"
32 #include "stdarg.h"
33 
34 #include <sys/sysinfo.h>
35 #include <sys/types.h>
36 #include <sys/time.h>
37 #include <sys/resource.h>
38 #include <limits.h>
39 #include <ctype.h>
40 
41 struct cache *fragment_cache, *data_cache;
42 struct queue *to_reader, *to_inflate, *to_writer, *from_writer;
43 pthread_t *thread, *inflator_thread;
44 pthread_mutex_t	fragment_mutex;
45 
46 /* user options that control parallelisation */
47 int processors = -1;
48 
49 struct super_block sBlk;
50 squashfs_operations s_ops;
51 struct compressor *comp;
52 
53 int bytes = 0, swap, file_count = 0, dir_count = 0, sym_count = 0,
54 	dev_count = 0, fifo_count = 0;
55 char *inode_table = NULL, *directory_table = NULL;
56 struct hash_table_entry *inode_table_hash[65536], *directory_table_hash[65536];
57 int fd;
58 unsigned int *uid_table, *guid_table;
59 unsigned int cached_frag = SQUASHFS_INVALID_FRAG;
60 char *fragment_data;
61 char *file_data;
62 char *data;
63 unsigned int block_size;
64 unsigned int block_log;
65 int lsonly = FALSE, info = FALSE, force = FALSE, short_ls = TRUE;
66 int use_regex = FALSE;
67 char **created_inode;
68 int root_process;
69 int columns;
70 int rotate = 0;
71 pthread_mutex_t	screen_mutex;
72 int progress = TRUE, progress_enabled = FALSE;
73 unsigned int total_blocks = 0, total_files = 0, total_inodes = 0;
74 unsigned int cur_blocks = 0;
75 int inode_number = 1;
76 int no_xattrs = XATTR_DEF;
77 int user_xattrs = FALSE;
78 
79 int lookup_type[] = {
80 	0,
81 	S_IFDIR,
82 	S_IFREG,
83 	S_IFLNK,
84 	S_IFBLK,
85 	S_IFCHR,
86 	S_IFIFO,
87 	S_IFSOCK,
88 	S_IFDIR,
89 	S_IFREG,
90 	S_IFLNK,
91 	S_IFBLK,
92 	S_IFCHR,
93 	S_IFIFO,
94 	S_IFSOCK
95 };
96 
97 struct test table[] = {
98 	{ S_IFMT, S_IFSOCK, 0, 's' },
99 	{ S_IFMT, S_IFLNK, 0, 'l' },
100 	{ S_IFMT, S_IFBLK, 0, 'b' },
101 	{ S_IFMT, S_IFDIR, 0, 'd' },
102 	{ S_IFMT, S_IFCHR, 0, 'c' },
103 	{ S_IFMT, S_IFIFO, 0, 'p' },
104 	{ S_IRUSR, S_IRUSR, 1, 'r' },
105 	{ S_IWUSR, S_IWUSR, 2, 'w' },
106 	{ S_IRGRP, S_IRGRP, 4, 'r' },
107 	{ S_IWGRP, S_IWGRP, 5, 'w' },
108 	{ S_IROTH, S_IROTH, 7, 'r' },
109 	{ S_IWOTH, S_IWOTH, 8, 'w' },
110 	{ S_IXUSR | S_ISUID, S_IXUSR | S_ISUID, 3, 's' },
111 	{ S_IXUSR | S_ISUID, S_ISUID, 3, 'S' },
112 	{ S_IXUSR | S_ISUID, S_IXUSR, 3, 'x' },
113 	{ S_IXGRP | S_ISGID, S_IXGRP | S_ISGID, 6, 's' },
114 	{ S_IXGRP | S_ISGID, S_ISGID, 6, 'S' },
115 	{ S_IXGRP | S_ISGID, S_IXGRP, 6, 'x' },
116 	{ S_IXOTH | S_ISVTX, S_IXOTH | S_ISVTX, 9, 't' },
117 	{ S_IXOTH | S_ISVTX, S_ISVTX, 9, 'T' },
118 	{ S_IXOTH | S_ISVTX, S_IXOTH, 9, 'x' },
119 	{ 0, 0, 0, 0}
120 };
121 
122 void progress_bar(long long current, long long max, int columns);
123 
124 #define MAX_LINE 16384
125 
prep_exit()126 void prep_exit()
127 {
128 }
129 
130 
sigwinch_handler()131 void sigwinch_handler()
132 {
133 	struct winsize winsize;
134 
135 	if(ioctl(1, TIOCGWINSZ, &winsize) == -1) {
136 		if(isatty(STDOUT_FILENO))
137 			ERROR("TIOCGWINSZ ioctl failed, defaulting to 80 "
138 				"columns\n");
139 		columns = 80;
140 	} else
141 		columns = winsize.ws_col;
142 }
143 
144 
sigalrm_handler()145 void sigalrm_handler()
146 {
147 	rotate = (rotate + 1) % 4;
148 }
149 
150 
add_overflow(int a,int b)151 int add_overflow(int a, int b)
152 {
153 	return (INT_MAX - a) < b;
154 }
155 
156 
shift_overflow(int a,int shift)157 int shift_overflow(int a, int shift)
158 {
159 	return (INT_MAX >> shift) < a;
160 }
161 
162 
multiply_overflow(int a,int multiplier)163 int multiply_overflow(int a, int multiplier)
164 {
165 	return (INT_MAX / multiplier) < a;
166 }
167 
168 
queue_init(int size)169 struct queue *queue_init(int size)
170 {
171 	struct queue *queue = malloc(sizeof(struct queue));
172 
173 	if(queue == NULL)
174 		EXIT_UNSQUASH("Out of memory in queue_init\n");
175 
176 	if(add_overflow(size, 1) ||
177 				multiply_overflow(size + 1, sizeof(void *)))
178 		EXIT_UNSQUASH("Size too large in queue_init\n");
179 
180 	queue->data = malloc(sizeof(void *) * (size + 1));
181 	if(queue->data == NULL)
182 		EXIT_UNSQUASH("Out of memory in queue_init\n");
183 
184 	queue->size = size + 1;
185 	queue->readp = queue->writep = 0;
186 	pthread_mutex_init(&queue->mutex, NULL);
187 	pthread_cond_init(&queue->empty, NULL);
188 	pthread_cond_init(&queue->full, NULL);
189 
190 	return queue;
191 }
192 
193 
queue_put(struct queue * queue,void * data)194 void queue_put(struct queue *queue, void *data)
195 {
196 	int nextp;
197 
198 	pthread_mutex_lock(&queue->mutex);
199 
200 	while((nextp = (queue->writep + 1) % queue->size) == queue->readp)
201 		pthread_cond_wait(&queue->full, &queue->mutex);
202 
203 	queue->data[queue->writep] = data;
204 	queue->writep = nextp;
205 	pthread_cond_signal(&queue->empty);
206 	pthread_mutex_unlock(&queue->mutex);
207 }
208 
209 
queue_get(struct queue * queue)210 void *queue_get(struct queue *queue)
211 {
212 	void *data;
213 	pthread_mutex_lock(&queue->mutex);
214 
215 	while(queue->readp == queue->writep)
216 		pthread_cond_wait(&queue->empty, &queue->mutex);
217 
218 	data = queue->data[queue->readp];
219 	queue->readp = (queue->readp + 1) % queue->size;
220 	pthread_cond_signal(&queue->full);
221 	pthread_mutex_unlock(&queue->mutex);
222 
223 	return data;
224 }
225 
226 
dump_queue(struct queue * queue)227 void dump_queue(struct queue *queue)
228 {
229 	pthread_mutex_lock(&queue->mutex);
230 
231 	printf("Max size %d, size %d%s\n", queue->size - 1,
232 		queue->readp <= queue->writep ? queue->writep - queue->readp :
233 			queue->size - queue->readp + queue->writep,
234 		queue->readp == queue->writep ? " (EMPTY)" :
235 			((queue->writep + 1) % queue->size) == queue->readp ?
236 			" (FULL)" : "");
237 
238 	pthread_mutex_unlock(&queue->mutex);
239 }
240 
241 
242 /* Called with the cache mutex held */
insert_hash_table(struct cache * cache,struct cache_entry * entry)243 void insert_hash_table(struct cache *cache, struct cache_entry *entry)
244 {
245 	int hash = CALCULATE_HASH(entry->block);
246 
247 	entry->hash_next = cache->hash_table[hash];
248 	cache->hash_table[hash] = entry;
249 	entry->hash_prev = NULL;
250 	if(entry->hash_next)
251 		entry->hash_next->hash_prev = entry;
252 }
253 
254 
255 /* Called with the cache mutex held */
remove_hash_table(struct cache * cache,struct cache_entry * entry)256 void remove_hash_table(struct cache *cache, struct cache_entry *entry)
257 {
258 	if(entry->hash_prev)
259 		entry->hash_prev->hash_next = entry->hash_next;
260 	else
261 		cache->hash_table[CALCULATE_HASH(entry->block)] =
262 			entry->hash_next;
263 	if(entry->hash_next)
264 		entry->hash_next->hash_prev = entry->hash_prev;
265 
266 	entry->hash_prev = entry->hash_next = NULL;
267 }
268 
269 
270 /* Called with the cache mutex held */
insert_free_list(struct cache * cache,struct cache_entry * entry)271 void insert_free_list(struct cache *cache, struct cache_entry *entry)
272 {
273 	if(cache->free_list) {
274 		entry->free_next = cache->free_list;
275 		entry->free_prev = cache->free_list->free_prev;
276 		cache->free_list->free_prev->free_next = entry;
277 		cache->free_list->free_prev = entry;
278 	} else {
279 		cache->free_list = entry;
280 		entry->free_prev = entry->free_next = entry;
281 	}
282 }
283 
284 
285 /* Called with the cache mutex held */
remove_free_list(struct cache * cache,struct cache_entry * entry)286 void remove_free_list(struct cache *cache, struct cache_entry *entry)
287 {
288 	if(entry->free_prev == NULL || entry->free_next == NULL)
289 		/* not in free list */
290 		return;
291 	else if(entry->free_prev == entry && entry->free_next == entry) {
292 		/* only this entry in the free list */
293 		cache->free_list = NULL;
294 	} else {
295 		/* more than one entry in the free list */
296 		entry->free_next->free_prev = entry->free_prev;
297 		entry->free_prev->free_next = entry->free_next;
298 		if(cache->free_list == entry)
299 			cache->free_list = entry->free_next;
300 	}
301 
302 	entry->free_prev = entry->free_next = NULL;
303 }
304 
305 
cache_init(int buffer_size,int max_buffers)306 struct cache *cache_init(int buffer_size, int max_buffers)
307 {
308 	struct cache *cache = malloc(sizeof(struct cache));
309 
310 	if(cache == NULL)
311 		EXIT_UNSQUASH("Out of memory in cache_init\n");
312 
313 	cache->max_buffers = max_buffers;
314 	cache->buffer_size = buffer_size;
315 	cache->count = 0;
316 	cache->used = 0;
317 	cache->free_list = NULL;
318 	memset(cache->hash_table, 0, sizeof(struct cache_entry *) * 65536);
319 	cache->wait_free = FALSE;
320 	cache->wait_pending = FALSE;
321 	pthread_mutex_init(&cache->mutex, NULL);
322 	pthread_cond_init(&cache->wait_for_free, NULL);
323 	pthread_cond_init(&cache->wait_for_pending, NULL);
324 
325 	return cache;
326 }
327 
328 
cache_get(struct cache * cache,long long block,int size)329 struct cache_entry *cache_get(struct cache *cache, long long block, int size)
330 {
331 	/*
332 	 * Get a block out of the cache.  If the block isn't in the cache
333  	 * it is added and queued to the reader() and inflate() threads for
334  	 * reading off disk and decompression.  The cache grows until max_blocks
335  	 * is reached, once this occurs existing discarded blocks on the free
336  	 * list are reused
337  	 */
338 	int hash = CALCULATE_HASH(block);
339 	struct cache_entry *entry;
340 
341 	pthread_mutex_lock(&cache->mutex);
342 
343 	for(entry = cache->hash_table[hash]; entry; entry = entry->hash_next)
344 		if(entry->block == block)
345 			break;
346 
347 	if(entry) {
348 		/*
349  		 * found the block in the cache.  If the block is currently unused
350 		 * remove it from the free list and increment cache used count.
351  		 */
352 		if(entry->used == 0) {
353 			cache->used ++;
354 			remove_free_list(cache, entry);
355 		}
356 		entry->used ++;
357 		pthread_mutex_unlock(&cache->mutex);
358 	} else {
359 		/*
360  		 * not in the cache
361 		 *
362 		 * first try to allocate new block
363 		 */
364 		if(cache->count < cache->max_buffers) {
365 			entry = malloc(sizeof(struct cache_entry));
366 			if(entry == NULL)
367 				EXIT_UNSQUASH("Out of memory in cache_get\n");
368 			entry->data = malloc(cache->buffer_size);
369 			if(entry->data == NULL)
370 				EXIT_UNSQUASH("Out of memory in cache_get\n");
371 			entry->cache = cache;
372 			entry->free_prev = entry->free_next = NULL;
373 			cache->count ++;
374 		} else {
375 			/*
376 			 * try to get from free list
377 			 */
378 			while(cache->free_list == NULL) {
379 				cache->wait_free = TRUE;
380 				pthread_cond_wait(&cache->wait_for_free,
381 					&cache->mutex);
382 			}
383 			entry = cache->free_list;
384 			remove_free_list(cache, entry);
385 			remove_hash_table(cache, entry);
386 		}
387 
388 		/*
389 		 * Initialise block and insert into the hash table.
390 		 * Increment used which tracks how many buffers in the
391 		 * cache are actively in use (the other blocks, count - used,
392 		 * are in the cache and available for lookup, but can also be
393 		 * re-used).
394 		 */
395 		entry->block = block;
396 		entry->size = size;
397 		entry->used = 1;
398 		entry->error = FALSE;
399 		entry->pending = TRUE;
400 		insert_hash_table(cache, entry);
401 		cache->used ++;
402 
403 		/*
404 		 * queue to read thread to read and ultimately (via the
405 		 * decompress threads) decompress the buffer
406  		 */
407 		pthread_mutex_unlock(&cache->mutex);
408 		queue_put(to_reader, entry);
409 	}
410 
411 	return entry;
412 }
413 
414 
cache_block_ready(struct cache_entry * entry,int error)415 void cache_block_ready(struct cache_entry *entry, int error)
416 {
417 	/*
418 	 * mark cache entry as being complete, reading and (if necessary)
419  	 * decompression has taken place, and the buffer is valid for use.
420  	 * If an error occurs reading or decompressing, the buffer also
421  	 * becomes ready but with an error...
422  	 */
423 	pthread_mutex_lock(&entry->cache->mutex);
424 	entry->pending = FALSE;
425 	entry->error = error;
426 
427 	/*
428 	 * if the wait_pending flag is set, one or more threads may be waiting
429 	 * on this buffer
430 	 */
431 	if(entry->cache->wait_pending) {
432 		entry->cache->wait_pending = FALSE;
433 		pthread_cond_broadcast(&entry->cache->wait_for_pending);
434 	}
435 
436 	pthread_mutex_unlock(&entry->cache->mutex);
437 }
438 
439 
cache_block_wait(struct cache_entry * entry)440 void cache_block_wait(struct cache_entry *entry)
441 {
442 	/*
443 	 * wait for this cache entry to become ready, when reading and (if
444 	 * necessary) decompression has taken place
445 	 */
446 	pthread_mutex_lock(&entry->cache->mutex);
447 
448 	while(entry->pending) {
449 		entry->cache->wait_pending = TRUE;
450 		pthread_cond_wait(&entry->cache->wait_for_pending,
451 			&entry->cache->mutex);
452 	}
453 
454 	pthread_mutex_unlock(&entry->cache->mutex);
455 }
456 
457 
cache_block_put(struct cache_entry * entry)458 void cache_block_put(struct cache_entry *entry)
459 {
460 	/*
461 	 * finished with this cache entry, once the usage count reaches zero it
462  	 * can be reused and is put onto the free list.  As it remains
463  	 * accessible via the hash table it can be found getting a new lease of
464  	 * life before it is reused.
465  	 */
466 	pthread_mutex_lock(&entry->cache->mutex);
467 
468 	entry->used --;
469 	if(entry->used == 0) {
470 		insert_free_list(entry->cache, entry);
471 		entry->cache->used --;
472 
473 		/*
474 		 * if the wait_free flag is set, one or more threads may be
475 		 * waiting on this buffer
476 		 */
477 		if(entry->cache->wait_free) {
478 			entry->cache->wait_free = FALSE;
479 			pthread_cond_broadcast(&entry->cache->wait_for_free);
480 		}
481 	}
482 
483 	pthread_mutex_unlock(&entry->cache->mutex);
484 }
485 
486 
dump_cache(struct cache * cache)487 void dump_cache(struct cache *cache)
488 {
489 	pthread_mutex_lock(&cache->mutex);
490 
491 	printf("Max buffers %d, Current size %d, Used %d,  %s\n",
492 		cache->max_buffers, cache->count, cache->used,
493 		cache->free_list ?  "Free buffers" : "No free buffers");
494 
495 	pthread_mutex_unlock(&cache->mutex);
496 }
497 
498 
modestr(char * str,int mode)499 char *modestr(char *str, int mode)
500 {
501 	int i;
502 
503 	strcpy(str, "----------");
504 
505 	for(i = 0; table[i].mask != 0; i++) {
506 		if((mode & table[i].mask) == table[i].value)
507 			str[table[i].position] = table[i].mode;
508 	}
509 
510 	return str;
511 }
512 
513 
514 #define TOTALCHARS  25
print_filename(char * pathname,struct inode * inode)515 int print_filename(char *pathname, struct inode *inode)
516 {
517 	char str[11], dummy[12], dummy2[12]; /* overflow safe */
518 	char *userstr, *groupstr;
519 	int padchars;
520 	struct passwd *user;
521 	struct group *group;
522 	struct tm *t;
523 
524 	if(short_ls) {
525 		printf("%s\n", pathname);
526 		return 1;
527 	}
528 
529 	user = getpwuid(inode->uid);
530 	if(user == NULL) {
531 		int res = snprintf(dummy, 12, "%d", inode->uid);
532 		if(res < 0)
533 			EXIT_UNSQUASH("snprintf failed in print_filename()\n");
534 		else if(res >= 12)
535 			/* unsigned int shouldn't ever need more than 11 bytes
536 			 * (including terminating '\0') to print in base 10 */
537 			userstr = "*";
538 		else
539 			userstr = dummy;
540 	} else
541 		userstr = user->pw_name;
542 
543 	group = getgrgid(inode->gid);
544 	if(group == NULL) {
545 		int res = snprintf(dummy2, 12, "%d", inode->gid);
546 		if(res < 0)
547 			EXIT_UNSQUASH("snprintf failed in print_filename()\n");
548 		else if(res >= 12)
549 			/* unsigned int shouldn't ever need more than 11 bytes
550 			 * (including terminating '\0') to print in base 10 */
551 			groupstr = "*";
552 		else
553 			groupstr = dummy2;
554 	} else
555 		groupstr = group->gr_name;
556 
557 	printf("%s %s/%s ", modestr(str, inode->mode), userstr, groupstr);
558 
559 	switch(inode->mode & S_IFMT) {
560 		case S_IFREG:
561 		case S_IFDIR:
562 		case S_IFSOCK:
563 		case S_IFIFO:
564 		case S_IFLNK:
565 			padchars = TOTALCHARS - strlen(userstr) -
566 				strlen(groupstr);
567 
568 			printf("%*lld ", padchars > 0 ? padchars : 0,
569 				inode->data);
570 			break;
571 		case S_IFCHR:
572 		case S_IFBLK:
573 			padchars = TOTALCHARS - strlen(userstr) -
574 				strlen(groupstr) - 7;
575 
576 			printf("%*s%3d,%3d ", padchars > 0 ? padchars : 0, " ",
577 				(int) inode->data >> 8, (int) inode->data &
578 				0xff);
579 			break;
580 	}
581 
582 	t = localtime(&inode->time);
583 
584 	printf("%d-%02d-%02d %02d:%02d %s", t->tm_year + 1900, t->tm_mon + 1,
585 		t->tm_mday, t->tm_hour, t->tm_min, pathname);
586 	if((inode->mode & S_IFMT) == S_IFLNK)
587 		printf(" -> %s", inode->symlink);
588 	printf("\n");
589 
590 	return 1;
591 }
592 
593 
add_entry(struct hash_table_entry * hash_table[],long long start,int bytes)594 void add_entry(struct hash_table_entry *hash_table[], long long start,
595 	int bytes)
596 {
597 	int hash = CALCULATE_HASH(start);
598 	struct hash_table_entry *hash_table_entry;
599 
600 	hash_table_entry = malloc(sizeof(struct hash_table_entry));
601 	if(hash_table_entry == NULL)
602 		EXIT_UNSQUASH("Out of memory in add_entry\n");
603 
604 	hash_table_entry->start = start;
605 	hash_table_entry->bytes = bytes;
606 	hash_table_entry->next = hash_table[hash];
607 	hash_table[hash] = hash_table_entry;
608 }
609 
610 
lookup_entry(struct hash_table_entry * hash_table[],long long start)611 int lookup_entry(struct hash_table_entry *hash_table[], long long start)
612 {
613 	int hash = CALCULATE_HASH(start);
614 	struct hash_table_entry *hash_table_entry;
615 
616 	for(hash_table_entry = hash_table[hash]; hash_table_entry;
617 				hash_table_entry = hash_table_entry->next)
618 
619 		if(hash_table_entry->start == start)
620 			return hash_table_entry->bytes;
621 
622 	return -1;
623 }
624 
625 
read_fs_bytes(int fd,long long byte,int bytes,void * buff)626 int read_fs_bytes(int fd, long long byte, int bytes, void *buff)
627 {
628 	off_t off = byte;
629 	int res, count;
630 
631 	TRACE("read_bytes: reading from position 0x%llx, bytes %d\n", byte,
632 		bytes);
633 
634 	if(lseek(fd, off, SEEK_SET) == -1) {
635 		ERROR("Lseek failed because %s\n", strerror(errno));
636 		return FALSE;
637 	}
638 
639 	for(count = 0; count < bytes; count += res) {
640 		res = read(fd, buff + count, bytes - count);
641 		if(res < 1) {
642 			if(res == 0) {
643 				ERROR("Read on filesystem failed because "
644 					"EOF\n");
645 				return FALSE;
646 			} else if(errno != EINTR) {
647 				ERROR("Read on filesystem failed because %s\n",
648 						strerror(errno));
649 				return FALSE;
650 			} else
651 				res = 0;
652 		}
653 	}
654 
655 	return TRUE;
656 }
657 
658 
read_block(int fd,long long start,long long * next,int expected,void * block)659 int read_block(int fd, long long start, long long *next, int expected,
660 								void *block)
661 {
662 	unsigned short c_byte;
663 	int offset = 2, res, compressed;
664 	int outlen = expected ? expected : SQUASHFS_METADATA_SIZE;
665 
666 	if(swap) {
667 		if(read_fs_bytes(fd, start, 2, &c_byte) == FALSE)
668 			goto failed;
669 		c_byte = (c_byte >> 8) | ((c_byte & 0xff) << 8);
670 	} else
671 		if(read_fs_bytes(fd, start, 2, &c_byte) == FALSE)
672 			goto failed;
673 
674 	TRACE("read_block: block @0x%llx, %d %s bytes\n", start,
675 		SQUASHFS_COMPRESSED_SIZE(c_byte), SQUASHFS_COMPRESSED(c_byte) ?
676 		"compressed" : "uncompressed");
677 
678 	if(SQUASHFS_CHECK_DATA(sBlk.s.flags))
679 		offset = 3;
680 
681 	compressed = SQUASHFS_COMPRESSED(c_byte);
682 	c_byte = SQUASHFS_COMPRESSED_SIZE(c_byte);
683 
684 	/*
685 	 * The block size should not be larger than
686 	 * the uncompressed size (or max uncompressed size if
687 	 * expected is 0)
688 	 */
689 	if(c_byte > outlen)
690 		return 0;
691 
692 	if(compressed) {
693 		char buffer[c_byte];
694 		int error;
695 
696 		res = read_fs_bytes(fd, start + offset, c_byte, buffer);
697 		if(res == FALSE)
698 			goto failed;
699 
700 		res = compressor_uncompress(comp, block, buffer, c_byte,
701 			outlen, &error);
702 
703 		if(res == -1) {
704 			ERROR("%s uncompress failed with error code %d\n",
705 				comp->name, error);
706 			goto failed;
707 		}
708 	} else {
709 		res = read_fs_bytes(fd, start + offset, c_byte, block);
710 		if(res == FALSE)
711 			goto failed;
712 		res = c_byte;
713 	}
714 
715 	if(next)
716 		*next = start + offset + c_byte;
717 
718 	/*
719 	 * if expected, then check the (uncompressed) return data
720 	 * is of the expected size
721 	 */
722 	if(expected && expected != res)
723 		return 0;
724 	else
725 		return res;
726 
727 failed:
728 	ERROR("read_block: failed to read block @0x%llx\n", start);
729 	return FALSE;
730 }
731 
732 
read_data_block(long long start,unsigned int size,char * block)733 int read_data_block(long long start, unsigned int size, char *block)
734 {
735 	int error, res;
736 	int c_byte = SQUASHFS_COMPRESSED_SIZE_BLOCK(size);
737 
738 	TRACE("read_data_block: block @0x%llx, %d %s bytes\n", start,
739 		c_byte, SQUASHFS_COMPRESSED_BLOCK(size) ? "compressed" :
740 		"uncompressed");
741 
742 	if(SQUASHFS_COMPRESSED_BLOCK(size)) {
743 		if(read_fs_bytes(fd, start, c_byte, data) == FALSE)
744 			goto failed;
745 
746 		res = compressor_uncompress(comp, block, data, c_byte,
747 			block_size, &error);
748 
749 		if(res == -1) {
750 			ERROR("%s uncompress failed with error code %d\n",
751 				comp->name, error);
752 			goto failed;
753 		}
754 
755 		return res;
756 	} else {
757 		if(read_fs_bytes(fd, start, c_byte, block) == FALSE)
758 			goto failed;
759 
760 		return c_byte;
761 	}
762 
763 failed:
764 	ERROR("read_data_block: failed to read block @0x%llx, size %d\n", start,
765 		c_byte);
766 	return FALSE;
767 }
768 
769 
read_inode_table(long long start,long long end)770 int read_inode_table(long long start, long long end)
771 {
772 	int size = 0, bytes = 0, res;
773 
774 	TRACE("read_inode_table: start %lld, end %lld\n", start, end);
775 
776 	while(start < end) {
777 		if(size - bytes < SQUASHFS_METADATA_SIZE) {
778 			inode_table = realloc(inode_table, size +=
779 				SQUASHFS_METADATA_SIZE);
780 			if(inode_table == NULL) {
781 				ERROR("Out of memory in read_inode_table");
782 				goto failed;
783 			}
784 		}
785 
786 		add_entry(inode_table_hash, start, bytes);
787 
788 		res = read_block(fd, start, &start, 0, inode_table + bytes);
789 		if(res == 0) {
790 			ERROR("read_inode_table: failed to read block\n");
791 			goto failed;
792 		}
793 		bytes += res;
794 
795 		/*
796 		 * If this is not the last metadata block in the inode table
797 		 * then it should be SQUASHFS_METADATA_SIZE in size.
798 		 * Note, we can't use expected in read_block() above for this
799 		 * because we don't know if this is the last block until
800 		 * after reading.
801 		 */
802 		if(start != end && res != SQUASHFS_METADATA_SIZE) {
803 			ERROR("read_inode_table: metadata block should be %d "
804 				"bytes in length, it is %d bytes\n",
805 				SQUASHFS_METADATA_SIZE, res);
806 
807 			goto failed;
808 		}
809 	}
810 
811 	return TRUE;
812 
813 failed:
814 	free(inode_table);
815 	return FALSE;
816 }
817 
818 
set_attributes(char * pathname,int mode,uid_t uid,gid_t guid,time_t time,unsigned int xattr,unsigned int set_mode)819 int set_attributes(char *pathname, int mode, uid_t uid, gid_t guid, time_t time,
820 	unsigned int xattr, unsigned int set_mode)
821 {
822 	struct utimbuf times = { time, time };
823 
824 	write_xattr(pathname, xattr);
825 
826 	if(utime(pathname, &times) == -1) {
827 		ERROR("set_attributes: failed to set time on %s, because %s\n",
828 			pathname, strerror(errno));
829 		return FALSE;
830 	}
831 
832 	if(root_process) {
833 		if(chown(pathname, uid, guid) == -1) {
834 			ERROR("set_attributes: failed to change uid and gids "
835 				"on %s, because %s\n", pathname,
836 				strerror(errno));
837 			return FALSE;
838 		}
839 	} else
840 		mode &= ~07000;
841 
842 	if((set_mode || (mode & 07000)) && chmod(pathname, (mode_t) mode) == -1) {
843 		ERROR("set_attributes: failed to change mode %s, because %s\n",
844 			pathname, strerror(errno));
845 		return FALSE;
846 	}
847 
848 	return TRUE;
849 }
850 
851 
write_bytes(int fd,char * buff,int bytes)852 int write_bytes(int fd, char *buff, int bytes)
853 {
854 	int res, count;
855 
856 	for(count = 0; count < bytes; count += res) {
857 		res = write(fd, buff + count, bytes - count);
858 		if(res == -1) {
859 			if(errno != EINTR) {
860 				ERROR("Write on output file failed because "
861 					"%s\n", strerror(errno));
862 				return -1;
863 			}
864 			res = 0;
865 		}
866 	}
867 
868 	return 0;
869 }
870 
871 
872 int lseek_broken = FALSE;
873 char *zero_data = NULL;
874 
write_block(int file_fd,char * buffer,int size,long long hole,int sparse)875 int write_block(int file_fd, char *buffer, int size, long long hole, int sparse)
876 {
877 	off_t off = hole;
878 
879 	if(hole) {
880 		if(sparse && lseek_broken == FALSE) {
881 			 int error = lseek(file_fd, off, SEEK_CUR);
882 			 if(error == -1)
883 				/* failed to seek beyond end of file */
884 				lseek_broken = TRUE;
885 		}
886 
887 		if((sparse == FALSE || lseek_broken) && zero_data == NULL) {
888 			if((zero_data = malloc(block_size)) == NULL)
889 				EXIT_UNSQUASH("write_block: failed to alloc "
890 					"zero data block\n");
891 			memset(zero_data, 0, block_size);
892 		}
893 
894 		if(sparse == FALSE || lseek_broken) {
895 			int blocks = (hole + block_size -1) / block_size;
896 			int avail_bytes, i;
897 			for(i = 0; i < blocks; i++, hole -= avail_bytes) {
898 				avail_bytes = hole > block_size ? block_size :
899 					hole;
900 				if(write_bytes(file_fd, zero_data, avail_bytes)
901 						== -1)
902 					goto failure;
903 			}
904 		}
905 	}
906 
907 	if(write_bytes(file_fd, buffer, size) == -1)
908 		goto failure;
909 
910 	return TRUE;
911 
912 failure:
913 	return FALSE;
914 }
915 
916 
917 pthread_mutex_t open_mutex = PTHREAD_MUTEX_INITIALIZER;
918 pthread_cond_t open_empty = PTHREAD_COND_INITIALIZER;
919 int open_unlimited, open_count;
920 #define OPEN_FILE_MARGIN 10
921 
922 
open_init(int count)923 void open_init(int count)
924 {
925 	open_count = count;
926 	open_unlimited = count == -1;
927 }
928 
929 
open_wait(char * pathname,int flags,mode_t mode)930 int open_wait(char *pathname, int flags, mode_t mode)
931 {
932 	if (!open_unlimited) {
933 		pthread_mutex_lock(&open_mutex);
934 		while (open_count == 0)
935 			pthread_cond_wait(&open_empty, &open_mutex);
936 		open_count --;
937 		pthread_mutex_unlock(&open_mutex);
938 	}
939 
940 	return open(pathname, flags, mode);
941 }
942 
943 
close_wake(int fd)944 void close_wake(int fd)
945 {
946 	close(fd);
947 
948 	if (!open_unlimited) {
949 		pthread_mutex_lock(&open_mutex);
950 		open_count ++;
951 		pthread_cond_signal(&open_empty);
952 		pthread_mutex_unlock(&open_mutex);
953 	}
954 }
955 
956 
queue_file(char * pathname,int file_fd,struct inode * inode)957 void queue_file(char *pathname, int file_fd, struct inode *inode)
958 {
959 	struct squashfs_file *file = malloc(sizeof(struct squashfs_file));
960 	if(file == NULL)
961 		EXIT_UNSQUASH("queue_file: unable to malloc file\n");
962 
963 	file->fd = file_fd;
964 	file->file_size = inode->data;
965 	file->mode = inode->mode;
966 	file->gid = inode->gid;
967 	file->uid = inode->uid;
968 	file->time = inode->time;
969 	file->pathname = strdup(pathname);
970 	file->blocks = inode->blocks + (inode->frag_bytes > 0);
971 	file->sparse = inode->sparse;
972 	file->xattr = inode->xattr;
973 	queue_put(to_writer, file);
974 }
975 
976 
queue_dir(char * pathname,struct dir * dir)977 void queue_dir(char *pathname, struct dir *dir)
978 {
979 	struct squashfs_file *file = malloc(sizeof(struct squashfs_file));
980 	if(file == NULL)
981 		EXIT_UNSQUASH("queue_dir: unable to malloc file\n");
982 
983 	file->fd = -1;
984 	file->mode = dir->mode;
985 	file->gid = dir->guid;
986 	file->uid = dir->uid;
987 	file->time = dir->mtime;
988 	file->pathname = strdup(pathname);
989 	file->xattr = dir->xattr;
990 	queue_put(to_writer, file);
991 }
992 
993 
write_file(struct inode * inode,char * pathname)994 int write_file(struct inode *inode, char *pathname)
995 {
996 	unsigned int file_fd, i;
997 	unsigned int *block_list;
998 	int file_end = inode->data / block_size;
999 	long long start = inode->start;
1000 
1001 	TRACE("write_file: regular file, blocks %d\n", inode->blocks);
1002 
1003 	file_fd = open_wait(pathname, O_CREAT | O_WRONLY |
1004 		(force ? O_TRUNC : 0), (mode_t) inode->mode & 0777);
1005 	if(file_fd == -1) {
1006 		ERROR("write_file: failed to create file %s, because %s\n",
1007 			pathname, strerror(errno));
1008 		return FALSE;
1009 	}
1010 
1011 	block_list = malloc(inode->blocks * sizeof(unsigned int));
1012 	if(block_list == NULL)
1013 		EXIT_UNSQUASH("write_file: unable to malloc block list\n");
1014 
1015 	s_ops.read_block_list(block_list, inode->block_ptr, inode->blocks);
1016 
1017 	/*
1018 	 * the writer thread is queued a squashfs_file structure describing the
1019  	 * file.  If the file has one or more blocks or a fragment they are
1020  	 * queued separately (references to blocks in the cache).
1021  	 */
1022 	queue_file(pathname, file_fd, inode);
1023 
1024 	for(i = 0; i < inode->blocks; i++) {
1025 		int c_byte = SQUASHFS_COMPRESSED_SIZE_BLOCK(block_list[i]);
1026 		struct file_entry *block = malloc(sizeof(struct file_entry));
1027 
1028 		if(block == NULL)
1029 			EXIT_UNSQUASH("write_file: unable to malloc file\n");
1030 		block->offset = 0;
1031 		block->size = i == file_end ? inode->data & (block_size - 1) :
1032 			block_size;
1033 		if(block_list[i] == 0) /* sparse block */
1034 			block->buffer = NULL;
1035 		else {
1036 			block->buffer = cache_get(data_cache, start,
1037 				block_list[i]);
1038 			start += c_byte;
1039 		}
1040 		queue_put(to_writer, block);
1041 	}
1042 
1043 	if(inode->frag_bytes) {
1044 		int size;
1045 		long long start;
1046 		struct file_entry *block = malloc(sizeof(struct file_entry));
1047 
1048 		if(block == NULL)
1049 			EXIT_UNSQUASH("write_file: unable to malloc file\n");
1050 		s_ops.read_fragment(inode->fragment, &start, &size);
1051 		block->buffer = cache_get(fragment_cache, start, size);
1052 		block->offset = inode->offset;
1053 		block->size = inode->frag_bytes;
1054 		queue_put(to_writer, block);
1055 	}
1056 
1057 	free(block_list);
1058 	return TRUE;
1059 }
1060 
1061 
create_inode(char * pathname,struct inode * i)1062 int create_inode(char *pathname, struct inode *i)
1063 {
1064 	TRACE("create_inode: pathname %s\n", pathname);
1065 
1066 	if(created_inode[i->inode_number - 1]) {
1067 		TRACE("create_inode: hard link\n");
1068 		if(force)
1069 			unlink(pathname);
1070 
1071 		if(link(created_inode[i->inode_number - 1], pathname) == -1) {
1072 			ERROR("create_inode: failed to create hardlink, "
1073 				"because %s\n", strerror(errno));
1074 			return FALSE;
1075 		}
1076 
1077 		return TRUE;
1078 	}
1079 
1080 	switch(i->type) {
1081 		case SQUASHFS_FILE_TYPE:
1082 		case SQUASHFS_LREG_TYPE:
1083 			TRACE("create_inode: regular file, file_size %lld, "
1084 				"blocks %d\n", i->data, i->blocks);
1085 
1086 			if(write_file(i, pathname))
1087 				file_count ++;
1088 			break;
1089 		case SQUASHFS_SYMLINK_TYPE:
1090 		case SQUASHFS_LSYMLINK_TYPE:
1091 			TRACE("create_inode: symlink, symlink_size %lld\n",
1092 				i->data);
1093 
1094 			if(force)
1095 				unlink(pathname);
1096 
1097 			if(symlink(i->symlink, pathname) == -1) {
1098 				ERROR("create_inode: failed to create symlink "
1099 					"%s, because %s\n", pathname,
1100 					strerror(errno));
1101 				break;
1102 			}
1103 
1104 			write_xattr(pathname, i->xattr);
1105 
1106 			if(root_process) {
1107 				if(lchown(pathname, i->uid, i->gid) == -1)
1108 					ERROR("create_inode: failed to change "
1109 						"uid and gids on %s, because "
1110 						"%s\n", pathname,
1111 						strerror(errno));
1112 			}
1113 
1114 			sym_count ++;
1115 			break;
1116  		case SQUASHFS_BLKDEV_TYPE:
1117 	 	case SQUASHFS_CHRDEV_TYPE:
1118  		case SQUASHFS_LBLKDEV_TYPE:
1119 	 	case SQUASHFS_LCHRDEV_TYPE: {
1120 			int chrdev = i->type == SQUASHFS_CHRDEV_TYPE;
1121 			TRACE("create_inode: dev, rdev 0x%llx\n", i->data);
1122 
1123 			if(root_process) {
1124 				if(force)
1125 					unlink(pathname);
1126 
1127 				if(mknod(pathname, chrdev ? S_IFCHR : S_IFBLK,
1128 						makedev((i->data >> 8) & 0xff,
1129 						i->data & 0xff)) == -1) {
1130 					ERROR("create_inode: failed to create "
1131 						"%s device %s, because %s\n",
1132 						chrdev ? "character" : "block",
1133 						pathname, strerror(errno));
1134 					break;
1135 				}
1136 				set_attributes(pathname, i->mode, i->uid,
1137 					i->gid, i->time, i->xattr, TRUE);
1138 				dev_count ++;
1139 			} else
1140 				ERROR("create_inode: could not create %s "
1141 					"device %s, because you're not "
1142 					"superuser!\n", chrdev ? "character" :
1143 					"block", pathname);
1144 			break;
1145 		}
1146 		case SQUASHFS_FIFO_TYPE:
1147 		case SQUASHFS_LFIFO_TYPE:
1148 			TRACE("create_inode: fifo\n");
1149 
1150 			if(force)
1151 				unlink(pathname);
1152 
1153 			if(mknod(pathname, S_IFIFO, 0) == -1) {
1154 				ERROR("create_inode: failed to create fifo %s, "
1155 					"because %s\n", pathname,
1156 					strerror(errno));
1157 				break;
1158 			}
1159 			set_attributes(pathname, i->mode, i->uid, i->gid,
1160 				i->time, i->xattr, TRUE);
1161 			fifo_count ++;
1162 			break;
1163 		case SQUASHFS_SOCKET_TYPE:
1164 		case SQUASHFS_LSOCKET_TYPE:
1165 			TRACE("create_inode: socket\n");
1166 			ERROR("create_inode: socket %s ignored\n", pathname);
1167 			break;
1168 		default:
1169 			ERROR("Unknown inode type %d in create_inode_table!\n",
1170 				i->type);
1171 			return FALSE;
1172 	}
1173 
1174 	created_inode[i->inode_number - 1] = strdup(pathname);
1175 
1176 	return TRUE;
1177 }
1178 
1179 
read_directory_table(long long start,long long end)1180 int read_directory_table(long long start, long long end)
1181 {
1182 	int bytes = 0, size = 0, res;
1183 
1184 	TRACE("read_directory_table: start %lld, end %lld\n", start, end);
1185 
1186 	while(start < end) {
1187 		if(size - bytes < SQUASHFS_METADATA_SIZE) {
1188 			directory_table = realloc(directory_table, size +=
1189 				SQUASHFS_METADATA_SIZE);
1190 			if(directory_table == NULL) {
1191 				ERROR("Out of memory in "
1192 						"read_directory_table\n");
1193 				goto failed;
1194 			}
1195 		}
1196 
1197 		add_entry(directory_table_hash, start, bytes);
1198 
1199 		res = read_block(fd, start, &start, 0, directory_table + bytes);
1200 		if(res == 0) {
1201 			ERROR("read_directory_table: failed to read block\n");
1202 			goto failed;
1203 		}
1204 
1205 		bytes += res;
1206 
1207 		/*
1208 		 * If this is not the last metadata block in the directory table
1209 		 * then it should be SQUASHFS_METADATA_SIZE in size.
1210 		 * Note, we can't use expected in read_block() above for this
1211 		 * because we don't know if this is the last block until
1212 		 * after reading.
1213 		 */
1214 		if(start != end && res != SQUASHFS_METADATA_SIZE) {
1215 			ERROR("read_directory_table: metadata block "
1216 				"should be %d bytes in length, it is %d "
1217 				"bytes\n", SQUASHFS_METADATA_SIZE, res);
1218 			goto failed;
1219 		}
1220 	}
1221 
1222 	return TRUE;
1223 
1224 failed:
1225 	free(directory_table);
1226 	return FALSE;
1227 }
1228 
1229 
squashfs_readdir(struct dir * dir,char ** name,unsigned int * start_block,unsigned int * offset,unsigned int * type)1230 int squashfs_readdir(struct dir *dir, char **name, unsigned int *start_block,
1231 unsigned int *offset, unsigned int *type)
1232 {
1233 	if(dir->cur_entry == dir->dir_count)
1234 		return FALSE;
1235 
1236 	*name = dir->dirs[dir->cur_entry].name;
1237 	*start_block = dir->dirs[dir->cur_entry].start_block;
1238 	*offset = dir->dirs[dir->cur_entry].offset;
1239 	*type = dir->dirs[dir->cur_entry].type;
1240 	dir->cur_entry ++;
1241 
1242 	return TRUE;
1243 }
1244 
1245 
squashfs_closedir(struct dir * dir)1246 void squashfs_closedir(struct dir *dir)
1247 {
1248 	free(dir->dirs);
1249 	free(dir);
1250 }
1251 
1252 
get_component(char * target,char ** targname)1253 char *get_component(char *target, char **targname)
1254 {
1255 	char *start;
1256 
1257 	while(*target == '/')
1258 		target ++;
1259 
1260 	start = target;
1261 	while(*target != '/' && *target != '\0')
1262 		target ++;
1263 
1264 	*targname = strndup(start, target - start);
1265 
1266 	while(*target == '/')
1267 		target ++;
1268 
1269 	return target;
1270 }
1271 
1272 
free_path(struct pathname * paths)1273 void free_path(struct pathname *paths)
1274 {
1275 	int i;
1276 
1277 	for(i = 0; i < paths->names; i++) {
1278 		if(paths->name[i].paths)
1279 			free_path(paths->name[i].paths);
1280 		free(paths->name[i].name);
1281 		if(paths->name[i].preg) {
1282 			regfree(paths->name[i].preg);
1283 			free(paths->name[i].preg);
1284 		}
1285 	}
1286 
1287 	free(paths);
1288 }
1289 
1290 
add_path(struct pathname * paths,char * target,char * alltarget)1291 struct pathname *add_path(struct pathname *paths, char *target, char *alltarget)
1292 {
1293 	char *targname;
1294 	int i, error;
1295 
1296 	TRACE("add_path: adding \"%s\" extract file\n", target);
1297 
1298 	target = get_component(target, &targname);
1299 
1300 	if(paths == NULL) {
1301 		paths = malloc(sizeof(struct pathname));
1302 		if(paths == NULL)
1303 			EXIT_UNSQUASH("failed to allocate paths\n");
1304 
1305 		paths->names = 0;
1306 		paths->name = NULL;
1307 	}
1308 
1309 	for(i = 0; i < paths->names; i++)
1310 		if(strcmp(paths->name[i].name, targname) == 0)
1311 			break;
1312 
1313 	if(i == paths->names) {
1314 		/*
1315 		 * allocate new name entry
1316 		 */
1317 		paths->names ++;
1318 		paths->name = realloc(paths->name, (i + 1) *
1319 			sizeof(struct path_entry));
1320 		if(paths->name == NULL)
1321 			EXIT_UNSQUASH("Out of memory in add_path\n");
1322 		paths->name[i].name = targname;
1323 		paths->name[i].paths = NULL;
1324 		if(use_regex) {
1325 			paths->name[i].preg = malloc(sizeof(regex_t));
1326 			if(paths->name[i].preg == NULL)
1327 				EXIT_UNSQUASH("Out of memory in add_path\n");
1328 			error = regcomp(paths->name[i].preg, targname,
1329 				REG_EXTENDED|REG_NOSUB);
1330 			if(error) {
1331 				char str[1024]; /* overflow safe */
1332 
1333 				regerror(error, paths->name[i].preg, str, 1024);
1334 				EXIT_UNSQUASH("invalid regex %s in export %s, "
1335 					"because %s\n", targname, alltarget,
1336 					str);
1337 			}
1338 		} else
1339 			paths->name[i].preg = NULL;
1340 
1341 		if(target[0] == '\0')
1342 			/*
1343 			 * at leaf pathname component
1344 			*/
1345 			paths->name[i].paths = NULL;
1346 		else
1347 			/*
1348 			 * recurse adding child components
1349 			 */
1350 			paths->name[i].paths = add_path(NULL, target, alltarget);
1351 	} else {
1352 		/*
1353 		 * existing matching entry
1354 		 */
1355 		free(targname);
1356 
1357 		if(paths->name[i].paths == NULL) {
1358 			/*
1359 			 * No sub-directory which means this is the leaf
1360 			 * component of a pre-existing extract which subsumes
1361 			 * the extract currently being added, in which case stop
1362 			 * adding components
1363 			 */
1364 		} else if(target[0] == '\0') {
1365 			/*
1366 			 * at leaf pathname component and child components exist
1367 			 * from more specific extracts, delete as they're
1368 			 * subsumed by this extract
1369 			 */
1370 			free_path(paths->name[i].paths);
1371 			paths->name[i].paths = NULL;
1372 		} else
1373 			/*
1374 			 * recurse adding child components
1375 			 */
1376 			add_path(paths->name[i].paths, target, alltarget);
1377 	}
1378 
1379 	return paths;
1380 }
1381 
1382 
init_subdir()1383 struct pathnames *init_subdir()
1384 {
1385 	struct pathnames *new = malloc(sizeof(struct pathnames));
1386 	if(new == NULL)
1387 		EXIT_UNSQUASH("Out of memory in init_subdir\n");
1388 	new->count = 0;
1389 	return new;
1390 }
1391 
1392 
add_subdir(struct pathnames * paths,struct pathname * path)1393 struct pathnames *add_subdir(struct pathnames *paths, struct pathname *path)
1394 {
1395 	if(paths->count % PATHS_ALLOC_SIZE == 0) {
1396 		paths = realloc(paths, sizeof(struct pathnames *) +
1397 			(paths->count + PATHS_ALLOC_SIZE) *
1398 			sizeof(struct pathname *));
1399 		if(paths == NULL)
1400 			EXIT_UNSQUASH("Out of memory in add_subdir\n");
1401 	}
1402 
1403 	paths->path[paths->count++] = path;
1404 	return paths;
1405 }
1406 
1407 
free_subdir(struct pathnames * paths)1408 void free_subdir(struct pathnames *paths)
1409 {
1410 	free(paths);
1411 }
1412 
1413 
matches(struct pathnames * paths,char * name,struct pathnames ** new)1414 int matches(struct pathnames *paths, char *name, struct pathnames **new)
1415 {
1416 	int i, n;
1417 
1418 	if(paths == NULL) {
1419 		*new = NULL;
1420 		return TRUE;
1421 	}
1422 
1423 	*new = init_subdir();
1424 
1425 	for(n = 0; n < paths->count; n++) {
1426 		struct pathname *path = paths->path[n];
1427 		for(i = 0; i < path->names; i++) {
1428 			int match = use_regex ?
1429 				regexec(path->name[i].preg, name, (size_t) 0,
1430 				NULL, 0) == 0 : fnmatch(path->name[i].name,
1431 				name, FNM_PATHNAME|FNM_PERIOD|FNM_EXTMATCH) ==
1432 				0;
1433 			if(match && path->name[i].paths == NULL)
1434 				/*
1435 				 * match on a leaf component, any subdirectories
1436 				 * will implicitly match, therefore return an
1437 				 * empty new search set
1438 				 */
1439 				goto empty_set;
1440 
1441 			if(match)
1442 				/*
1443 				 * match on a non-leaf component, add any
1444 				 * subdirectories to the new set of
1445 				 * subdirectories to scan for this name
1446 				 */
1447 				*new = add_subdir(*new, path->name[i].paths);
1448 		}
1449 	}
1450 
1451 	if((*new)->count == 0) {
1452 		/*
1453 		 * no matching names found, delete empty search set, and return
1454 		 * FALSE
1455 		 */
1456 		free_subdir(*new);
1457 		*new = NULL;
1458 		return FALSE;
1459 	}
1460 
1461 	/*
1462 	 * one or more matches with sub-directories found (no leaf matches),
1463 	 * return new search set and return TRUE
1464 	 */
1465 	return TRUE;
1466 
1467 empty_set:
1468 	/*
1469 	 * found matching leaf exclude, return empty search set and return TRUE
1470 	 */
1471 	free_subdir(*new);
1472 	*new = NULL;
1473 	return TRUE;
1474 }
1475 
1476 
pre_scan(char * parent_name,unsigned int start_block,unsigned int offset,struct pathnames * paths)1477 void pre_scan(char *parent_name, unsigned int start_block, unsigned int offset,
1478 	struct pathnames *paths)
1479 {
1480 	unsigned int type;
1481 	char *name;
1482 	struct pathnames *new;
1483 	struct inode *i;
1484 	struct dir *dir = s_ops.squashfs_opendir(start_block, offset, &i);
1485 
1486 	if(dir == NULL)
1487 		return;
1488 
1489 	while(squashfs_readdir(dir, &name, &start_block, &offset, &type)) {
1490 		struct inode *i;
1491 		char *pathname;
1492 		int res;
1493 
1494 		TRACE("pre_scan: name %s, start_block %d, offset %d, type %d\n",
1495 			name, start_block, offset, type);
1496 
1497 		if(!matches(paths, name, &new))
1498 			continue;
1499 
1500 		res = asprintf(&pathname, "%s/%s", parent_name, name);
1501 		if(res == -1)
1502 			EXIT_UNSQUASH("asprintf failed in dir_scan\n");
1503 
1504 		if(type == SQUASHFS_DIR_TYPE)
1505 			pre_scan(parent_name, start_block, offset, new);
1506 		else if(new == NULL) {
1507 			if(type == SQUASHFS_FILE_TYPE ||
1508 					type == SQUASHFS_LREG_TYPE) {
1509 				i = s_ops.read_inode(start_block, offset);
1510 				if(created_inode[i->inode_number - 1] == NULL) {
1511 					created_inode[i->inode_number - 1] =
1512 						(char *) i;
1513 					total_blocks += (i->data +
1514 						(block_size - 1)) >> block_log;
1515 				}
1516 				total_files ++;
1517 			}
1518 			total_inodes ++;
1519 		}
1520 
1521 		free_subdir(new);
1522 		free(pathname);
1523 	}
1524 
1525 	squashfs_closedir(dir);
1526 }
1527 
1528 
dir_scan(char * parent_name,unsigned int start_block,unsigned int offset,struct pathnames * paths)1529 void dir_scan(char *parent_name, unsigned int start_block, unsigned int offset,
1530 	struct pathnames *paths)
1531 {
1532 	unsigned int type;
1533 	char *name;
1534 	struct pathnames *new;
1535 	struct inode *i;
1536 	struct dir *dir = s_ops.squashfs_opendir(start_block, offset, &i);
1537 
1538 	if(dir == NULL) {
1539 		ERROR("dir_scan: failed to read directory %s, skipping\n",
1540 			parent_name);
1541 		return;
1542 	}
1543 
1544 	if(lsonly || info)
1545 		print_filename(parent_name, i);
1546 
1547 	if(!lsonly) {
1548 		/*
1549 		 * Make directory with default User rwx permissions rather than
1550 		 * the permissions from the filesystem, as these may not have
1551 		 * write/execute permission.  These are fixed up later in
1552 		 * set_attributes().
1553 		 */
1554 		int res = mkdir(parent_name, S_IRUSR|S_IWUSR|S_IXUSR);
1555 		if(res == -1) {
1556 			/*
1557 			 * Skip directory if mkdir fails, unless we're
1558 			 * forcing and the error is -EEXIST
1559 			 */
1560 			if(!force || errno != EEXIST) {
1561 				ERROR("dir_scan: failed to make directory %s, "
1562 					"because %s\n", parent_name,
1563 					strerror(errno));
1564 				squashfs_closedir(dir);
1565 				return;
1566 			}
1567 
1568 			/*
1569 			 * Try to change permissions of existing directory so
1570 			 * that we can write to it
1571 			 */
1572 			res = chmod(parent_name, S_IRUSR|S_IWUSR|S_IXUSR);
1573 			if (res == -1)
1574 				ERROR("dir_scan: failed to change permissions "
1575 					"for directory %s, because %s\n",
1576 					parent_name, strerror(errno));
1577 		}
1578 	}
1579 
1580 	while(squashfs_readdir(dir, &name, &start_block, &offset, &type)) {
1581 		char *pathname;
1582 		int res;
1583 
1584 		TRACE("dir_scan: name %s, start_block %d, offset %d, type %d\n",
1585 			name, start_block, offset, type);
1586 
1587 
1588 		if(!matches(paths, name, &new))
1589 			continue;
1590 
1591 		res = asprintf(&pathname, "%s/%s", parent_name, name);
1592 		if(res == -1)
1593 			EXIT_UNSQUASH("asprintf failed in dir_scan\n");
1594 
1595 		if(type == SQUASHFS_DIR_TYPE) {
1596 			dir_scan(pathname, start_block, offset, new);
1597 			free(pathname);
1598 		} else if(new == NULL) {
1599 			update_info(pathname);
1600 
1601 			i = s_ops.read_inode(start_block, offset);
1602 
1603 			if(lsonly || info)
1604 				print_filename(pathname, i);
1605 
1606 			if(!lsonly)
1607 				create_inode(pathname, i);
1608 
1609 			if(i->type == SQUASHFS_SYMLINK_TYPE ||
1610 					i->type == SQUASHFS_LSYMLINK_TYPE)
1611 				free(i->symlink);
1612 		} else
1613 			free(pathname);
1614 
1615 		free_subdir(new);
1616 	}
1617 
1618 	if(!lsonly)
1619 		queue_dir(parent_name, dir);
1620 
1621 	squashfs_closedir(dir);
1622 	dir_count ++;
1623 }
1624 
1625 
squashfs_stat(char * source)1626 void squashfs_stat(char *source)
1627 {
1628 	time_t mkfs_time = (time_t) sBlk.s.mkfs_time;
1629 	char *mkfs_str = ctime(&mkfs_time);
1630 
1631 #if __BYTE_ORDER == __BIG_ENDIAN
1632 	printf("Found a valid %sSQUASHFS %d:%d superblock on %s.\n",
1633 		sBlk.s.s_major == 4 ? "" : swap ? "little endian " :
1634 		"big endian ", sBlk.s.s_major, sBlk.s.s_minor, source);
1635 #else
1636 	printf("Found a valid %sSQUASHFS %d:%d superblock on %s.\n",
1637 		sBlk.s.s_major == 4 ? "" : swap ? "big endian " :
1638 		"little endian ", sBlk.s.s_major, sBlk.s.s_minor, source);
1639 #endif
1640 
1641 	printf("Creation or last append time %s", mkfs_str ? mkfs_str :
1642 		"failed to get time\n");
1643 	printf("Filesystem size %.2f Kbytes (%.2f Mbytes)\n",
1644 		sBlk.s.bytes_used / 1024.0, sBlk.s.bytes_used /
1645 		(1024.0 * 1024.0));
1646 
1647 	if(sBlk.s.s_major == 4) {
1648 		printf("Compression %s\n", comp->name);
1649 
1650 		if(SQUASHFS_COMP_OPTS(sBlk.s.flags)) {
1651 			char buffer[SQUASHFS_METADATA_SIZE] __attribute__ ((aligned));
1652 			int bytes;
1653 
1654 			bytes = read_block(fd, sizeof(sBlk.s), NULL, 0, buffer);
1655 			if(bytes == 0) {
1656 				ERROR("Failed to read compressor options\n");
1657 				return;
1658 			}
1659 
1660 			compressor_display_options(comp, buffer, bytes);
1661 		}
1662 	}
1663 
1664 	printf("Block size %d\n", sBlk.s.block_size);
1665 	printf("Filesystem is %sexportable via NFS\n",
1666 		SQUASHFS_EXPORTABLE(sBlk.s.flags) ? "" : "not ");
1667 	printf("Inodes are %scompressed\n",
1668 		SQUASHFS_UNCOMPRESSED_INODES(sBlk.s.flags) ? "un" : "");
1669 	printf("Data is %scompressed\n",
1670 		SQUASHFS_UNCOMPRESSED_DATA(sBlk.s.flags) ? "un" : "");
1671 
1672 	if(sBlk.s.s_major > 1) {
1673 		if(SQUASHFS_NO_FRAGMENTS(sBlk.s.flags))
1674 			printf("Fragments are not stored\n");
1675 		else {
1676 			printf("Fragments are %scompressed\n",
1677 				SQUASHFS_UNCOMPRESSED_FRAGMENTS(sBlk.s.flags) ?
1678 				"un" : "");
1679 			printf("Always-use-fragments option is %sspecified\n",
1680 				SQUASHFS_ALWAYS_FRAGMENTS(sBlk.s.flags) ? "" :
1681 				"not ");
1682 		}
1683 	}
1684 
1685 	if(sBlk.s.s_major == 4) {
1686 		if(SQUASHFS_NO_XATTRS(sBlk.s.flags))
1687 			printf("Xattrs are not stored\n");
1688 		else
1689 			printf("Xattrs are %scompressed\n",
1690 				SQUASHFS_UNCOMPRESSED_XATTRS(sBlk.s.flags) ?
1691 				"un" : "");
1692 	}
1693 
1694 	if(sBlk.s.s_major < 4)
1695 			printf("Check data is %spresent in the filesystem\n",
1696 				SQUASHFS_CHECK_DATA(sBlk.s.flags) ? "" :
1697 				"not ");
1698 
1699 	if(sBlk.s.s_major > 1)
1700 		printf("Duplicates are %sremoved\n",
1701 			SQUASHFS_DUPLICATES(sBlk.s.flags) ? "" : "not ");
1702 	else
1703 		printf("Duplicates are removed\n");
1704 
1705 	if(sBlk.s.s_major > 1)
1706 		printf("Number of fragments %d\n", sBlk.s.fragments);
1707 
1708 	printf("Number of inodes %d\n", sBlk.s.inodes);
1709 
1710 	if(sBlk.s.s_major == 4)
1711 		printf("Number of ids %d\n", sBlk.s.no_ids);
1712 	else {
1713 		printf("Number of uids %d\n", sBlk.no_uids);
1714 		printf("Number of gids %d\n", sBlk.no_guids);
1715 	}
1716 
1717 	TRACE("sBlk.s.inode_table_start 0x%llx\n", sBlk.s.inode_table_start);
1718 	TRACE("sBlk.s.directory_table_start 0x%llx\n",
1719 		sBlk.s.directory_table_start);
1720 
1721 	if(sBlk.s.s_major > 1)
1722 		TRACE("sBlk.s.fragment_table_start 0x%llx\n\n",
1723 			sBlk.s.fragment_table_start);
1724 
1725 	if(sBlk.s.s_major > 2)
1726 		TRACE("sBlk.s.lookup_table_start 0x%llx\n\n",
1727 			sBlk.s.lookup_table_start);
1728 
1729 	if(sBlk.s.s_major == 4) {
1730 		TRACE("sBlk.s.id_table_start 0x%llx\n", sBlk.s.id_table_start);
1731 		TRACE("sBlk.s.xattr_id_table_start 0x%llx\n",
1732 			sBlk.s.xattr_id_table_start);
1733 	} else {
1734 		TRACE("sBlk.uid_start 0x%llx\n", sBlk.uid_start);
1735 		TRACE("sBlk.guid_start 0x%llx\n", sBlk.guid_start);
1736 	}
1737 }
1738 
1739 
check_compression(struct compressor * comp)1740 int check_compression(struct compressor *comp)
1741 {
1742 	int res, bytes = 0;
1743 	char buffer[SQUASHFS_METADATA_SIZE] __attribute__ ((aligned));
1744 
1745 	if(!comp->supported) {
1746 		ERROR("Filesystem uses %s compression, this is "
1747 			"unsupported by this version\n", comp->name);
1748 		ERROR("Decompressors available:\n");
1749 		display_compressors("", "");
1750 		return 0;
1751 	}
1752 
1753 	/*
1754 	 * Read compression options from disk if present, and pass to
1755 	 * the compressor to ensure we know how to decompress a filesystem
1756 	 * compressed with these compression options.
1757 	 *
1758 	 * Note, even if there is no compression options we still call the
1759 	 * compressor because some compression options may be mandatory
1760 	 * for some compressors.
1761 	 */
1762 	if(SQUASHFS_COMP_OPTS(sBlk.s.flags)) {
1763 		bytes = read_block(fd, sizeof(sBlk.s), NULL, 0, buffer);
1764 		if(bytes == 0) {
1765 			ERROR("Failed to read compressor options\n");
1766 			return 0;
1767 		}
1768 	}
1769 
1770 	res = compressor_check_options(comp, sBlk.s.block_size, buffer, bytes);
1771 
1772 	return res != -1;
1773 }
1774 
1775 
read_super(char * source)1776 int read_super(char *source)
1777 {
1778 	squashfs_super_block_3 sBlk_3;
1779 	struct squashfs_super_block sBlk_4;
1780 
1781 	/*
1782 	 * Try to read a Squashfs 4 superblock
1783 	 */
1784 	read_fs_bytes(fd, SQUASHFS_START, sizeof(struct squashfs_super_block),
1785 		&sBlk_4);
1786 	swap = sBlk_4.s_magic != SQUASHFS_MAGIC;
1787 	SQUASHFS_INSWAP_SUPER_BLOCK(&sBlk_4);
1788 
1789 	if(sBlk_4.s_magic == SQUASHFS_MAGIC && sBlk_4.s_major == 4 &&
1790 			sBlk_4.s_minor == 0) {
1791 		s_ops.squashfs_opendir = squashfs_opendir_4;
1792 		s_ops.read_fragment = read_fragment_4;
1793 		s_ops.read_fragment_table = read_fragment_table_4;
1794 		s_ops.read_block_list = read_block_list_2;
1795 		s_ops.read_inode = read_inode_4;
1796 		s_ops.read_uids_guids = read_uids_guids_4;
1797 		memcpy(&sBlk, &sBlk_4, sizeof(sBlk_4));
1798 
1799 		/*
1800 		 * Check the compression type
1801 		 */
1802 		comp = lookup_compressor_id(sBlk.s.compression);
1803 		return TRUE;
1804 	}
1805 
1806 	/*
1807  	 * Not a Squashfs 4 superblock, try to read a squashfs 3 superblock
1808  	 * (compatible with 1 and 2 filesystems)
1809  	 */
1810 	read_fs_bytes(fd, SQUASHFS_START, sizeof(squashfs_super_block_3),
1811 		&sBlk_3);
1812 
1813 	/*
1814 	 * Check it is a SQUASHFS superblock
1815 	 */
1816 	swap = 0;
1817 	if(sBlk_3.s_magic != SQUASHFS_MAGIC) {
1818 		if(sBlk_3.s_magic == SQUASHFS_MAGIC_SWAP) {
1819 			squashfs_super_block_3 sblk;
1820 			ERROR("Reading a different endian SQUASHFS filesystem "
1821 				"on %s\n", source);
1822 			SQUASHFS_SWAP_SUPER_BLOCK_3(&sblk, &sBlk_3);
1823 			memcpy(&sBlk_3, &sblk, sizeof(squashfs_super_block_3));
1824 			swap = 1;
1825 		} else  {
1826 			ERROR("Can't find a SQUASHFS superblock on %s\n",
1827 				source);
1828 			goto failed_mount;
1829 		}
1830 	}
1831 
1832 	sBlk.s.s_magic = sBlk_3.s_magic;
1833 	sBlk.s.inodes = sBlk_3.inodes;
1834 	sBlk.s.mkfs_time = sBlk_3.mkfs_time;
1835 	sBlk.s.block_size = sBlk_3.block_size;
1836 	sBlk.s.fragments = sBlk_3.fragments;
1837 	sBlk.s.block_log = sBlk_3.block_log;
1838 	sBlk.s.flags = sBlk_3.flags;
1839 	sBlk.s.s_major = sBlk_3.s_major;
1840 	sBlk.s.s_minor = sBlk_3.s_minor;
1841 	sBlk.s.root_inode = sBlk_3.root_inode;
1842 	sBlk.s.bytes_used = sBlk_3.bytes_used;
1843 	sBlk.s.inode_table_start = sBlk_3.inode_table_start;
1844 	sBlk.s.directory_table_start = sBlk_3.directory_table_start;
1845 	sBlk.s.fragment_table_start = sBlk_3.fragment_table_start;
1846 	sBlk.s.lookup_table_start = sBlk_3.lookup_table_start;
1847 	sBlk.no_uids = sBlk_3.no_uids;
1848 	sBlk.no_guids = sBlk_3.no_guids;
1849 	sBlk.uid_start = sBlk_3.uid_start;
1850 	sBlk.guid_start = sBlk_3.guid_start;
1851 	sBlk.s.xattr_id_table_start = SQUASHFS_INVALID_BLK;
1852 
1853 	/* Check the MAJOR & MINOR versions */
1854 	if(sBlk.s.s_major == 1 || sBlk.s.s_major == 2) {
1855 		sBlk.s.bytes_used = sBlk_3.bytes_used_2;
1856 		sBlk.uid_start = sBlk_3.uid_start_2;
1857 		sBlk.guid_start = sBlk_3.guid_start_2;
1858 		sBlk.s.inode_table_start = sBlk_3.inode_table_start_2;
1859 		sBlk.s.directory_table_start = sBlk_3.directory_table_start_2;
1860 
1861 		if(sBlk.s.s_major == 1) {
1862 			sBlk.s.block_size = sBlk_3.block_size_1;
1863 			sBlk.s.fragment_table_start = sBlk.uid_start;
1864 			s_ops.squashfs_opendir = squashfs_opendir_1;
1865 			s_ops.read_fragment_table = read_fragment_table_1;
1866 			s_ops.read_block_list = read_block_list_1;
1867 			s_ops.read_inode = read_inode_1;
1868 			s_ops.read_uids_guids = read_uids_guids_1;
1869 		} else {
1870 			sBlk.s.fragment_table_start =
1871 				sBlk_3.fragment_table_start_2;
1872 			s_ops.squashfs_opendir = squashfs_opendir_1;
1873 			s_ops.read_fragment = read_fragment_2;
1874 			s_ops.read_fragment_table = read_fragment_table_2;
1875 			s_ops.read_block_list = read_block_list_2;
1876 			s_ops.read_inode = read_inode_2;
1877 			s_ops.read_uids_guids = read_uids_guids_1;
1878 		}
1879 	} else if(sBlk.s.s_major == 3) {
1880 		s_ops.squashfs_opendir = squashfs_opendir_3;
1881 		s_ops.read_fragment = read_fragment_3;
1882 		s_ops.read_fragment_table = read_fragment_table_3;
1883 		s_ops.read_block_list = read_block_list_2;
1884 		s_ops.read_inode = read_inode_3;
1885 		s_ops.read_uids_guids = read_uids_guids_1;
1886 	} else {
1887 		ERROR("Filesystem on %s is (%d:%d), ", source, sBlk.s.s_major,
1888 			sBlk.s.s_minor);
1889 		ERROR("which is a later filesystem version than I support!\n");
1890 		goto failed_mount;
1891 	}
1892 
1893 	/*
1894 	 * 1.x, 2.x and 3.x filesystems use gzip compression.
1895 	 */
1896 	comp = lookup_compressor("gzip");
1897 	return TRUE;
1898 
1899 failed_mount:
1900 	return FALSE;
1901 }
1902 
1903 
process_extract_files(struct pathname * path,char * filename)1904 struct pathname *process_extract_files(struct pathname *path, char *filename)
1905 {
1906 	FILE *fd;
1907 	char buffer[MAX_LINE + 1]; /* overflow safe */
1908 	char *name;
1909 
1910 	fd = fopen(filename, "r");
1911 	if(fd == NULL)
1912 		EXIT_UNSQUASH("Failed to open extract file \"%s\" because %s\n",
1913 			filename, strerror(errno));
1914 
1915 	while(fgets(name = buffer, MAX_LINE + 1, fd) != NULL) {
1916 		int len = strlen(name);
1917 
1918 		if(len == MAX_LINE && name[len - 1] != '\n')
1919 			/* line too large */
1920 			EXIT_UNSQUASH("Line too long when reading "
1921 				"extract file \"%s\", larger than %d "
1922 				"bytes\n", filename, MAX_LINE);
1923 
1924 		/*
1925 		 * Remove '\n' terminator if it exists (the last line
1926 		 * in the file may not be '\n' terminated)
1927 		 */
1928 		if(len && name[len - 1] == '\n')
1929 			name[len - 1] = '\0';
1930 
1931 		/* Skip any leading whitespace */
1932 		while(isspace(*name))
1933 			name ++;
1934 
1935 		/* if comment line, skip */
1936 		if(*name == '#')
1937 			continue;
1938 
1939 		/* check for initial backslash, to accommodate
1940 		 * filenames with leading space or leading # character
1941 		 */
1942 		if(*name == '\\')
1943 			name ++;
1944 
1945 		/* if line is now empty after skipping characters, skip it */
1946 		if(*name == '\0')
1947 			continue;
1948 
1949 		path = add_path(path, name, name);
1950 	}
1951 
1952 	if(ferror(fd))
1953 		EXIT_UNSQUASH("Reading extract file \"%s\" failed because %s\n",
1954 			filename, strerror(errno));
1955 
1956 	fclose(fd);
1957 	return path;
1958 }
1959 
1960 
1961 /*
1962  * reader thread.  This thread processes read requests queued by the
1963  * cache_get() routine.
1964  */
reader(void * arg)1965 void *reader(void *arg)
1966 {
1967 	while(1) {
1968 		struct cache_entry *entry = queue_get(to_reader);
1969 		int res = read_fs_bytes(fd, entry->block,
1970 			SQUASHFS_COMPRESSED_SIZE_BLOCK(entry->size),
1971 			entry->data);
1972 
1973 		if(res && SQUASHFS_COMPRESSED_BLOCK(entry->size))
1974 			/*
1975 			 * queue successfully read block to the inflate
1976 			 * thread(s) for further processing
1977  			 */
1978 			queue_put(to_inflate, entry);
1979 		else
1980 			/*
1981 			 * block has either been successfully read and is
1982 			 * uncompressed, or an error has occurred, clear pending
1983 			 * flag, set error appropriately, and wake up any
1984 			 * threads waiting on this buffer
1985 			 */
1986 			cache_block_ready(entry, !res);
1987 	}
1988 }
1989 
1990 
1991 /*
1992  * writer thread.  This processes file write requests queued by the
1993  * write_file() routine.
1994  */
writer(void * arg)1995 void *writer(void *arg)
1996 {
1997 	int i;
1998 
1999 	while(1) {
2000 		struct squashfs_file *file = queue_get(to_writer);
2001 		int file_fd;
2002 		long long hole = 0;
2003 		int failed = FALSE;
2004 		int error;
2005 
2006 		if(file == NULL) {
2007 			queue_put(from_writer, NULL);
2008 			continue;
2009 		} else if(file->fd == -1) {
2010 			/* write attributes for directory file->pathname */
2011 			set_attributes(file->pathname, file->mode, file->uid,
2012 				file->gid, file->time, file->xattr, TRUE);
2013 			free(file->pathname);
2014 			free(file);
2015 			continue;
2016 		}
2017 
2018 		TRACE("writer: regular file, blocks %d\n", file->blocks);
2019 
2020 		file_fd = file->fd;
2021 
2022 		for(i = 0; i < file->blocks; i++, cur_blocks ++) {
2023 			struct file_entry *block = queue_get(to_writer);
2024 
2025 			if(block->buffer == 0) { /* sparse file */
2026 				hole += block->size;
2027 				free(block);
2028 				continue;
2029 			}
2030 
2031 			cache_block_wait(block->buffer);
2032 
2033 			if(block->buffer->error)
2034 				failed = TRUE;
2035 
2036 			if(failed)
2037 				continue;
2038 
2039 			error = write_block(file_fd, block->buffer->data +
2040 				block->offset, block->size, hole, file->sparse);
2041 
2042 			if(error == FALSE) {
2043 				ERROR("writer: failed to write data block %d\n",
2044 					i);
2045 				failed = TRUE;
2046 			}
2047 
2048 			hole = 0;
2049 			cache_block_put(block->buffer);
2050 			free(block);
2051 		}
2052 
2053 		if(hole && failed == FALSE) {
2054 			/*
2055 			 * corner case for hole extending to end of file
2056 			 */
2057 			if(file->sparse == FALSE ||
2058 					lseek(file_fd, hole, SEEK_CUR) == -1) {
2059 				/*
2060 				 * for files which we don't want to write
2061 				 * sparsely, or for broken lseeks which cannot
2062 				 * seek beyond end of file, write_block will do
2063 				 * the right thing
2064 				 */
2065 				hole --;
2066 				if(write_block(file_fd, "\0", 1, hole,
2067 						file->sparse) == FALSE) {
2068 					ERROR("writer: failed to write sparse "
2069 						"data block\n");
2070 					failed = TRUE;
2071 				}
2072 			} else if(ftruncate(file_fd, file->file_size) == -1) {
2073 				ERROR("writer: failed to write sparse data "
2074 					"block\n");
2075 				failed = TRUE;
2076 			}
2077 		}
2078 
2079 		close_wake(file_fd);
2080 		if(failed == FALSE)
2081 			set_attributes(file->pathname, file->mode, file->uid,
2082 				file->gid, file->time, file->xattr, force);
2083 		else {
2084 			ERROR("Failed to write %s, skipping\n", file->pathname);
2085 			unlink(file->pathname);
2086 		}
2087 		free(file->pathname);
2088 		free(file);
2089 
2090 	}
2091 }
2092 
2093 
2094 /*
2095  * decompress thread.  This decompresses buffers queued by the read thread
2096  */
inflator(void * arg)2097 void *inflator(void *arg)
2098 {
2099 	char tmp[block_size];
2100 
2101 	while(1) {
2102 		struct cache_entry *entry = queue_get(to_inflate);
2103 		int error, res;
2104 
2105 		res = compressor_uncompress(comp, tmp, entry->data,
2106 			SQUASHFS_COMPRESSED_SIZE_BLOCK(entry->size), block_size,
2107 			&error);
2108 
2109 		if(res == -1)
2110 			ERROR("%s uncompress failed with error code %d\n",
2111 				comp->name, error);
2112 		else
2113 			memcpy(entry->data, tmp, res);
2114 
2115 		/*
2116 		 * block has been either successfully decompressed, or an error
2117  		 * occurred, clear pending flag, set error appropriately and
2118  		 * wake up any threads waiting on this block
2119  		 */
2120 		cache_block_ready(entry, res == -1);
2121 	}
2122 }
2123 
2124 
progress_thread(void * arg)2125 void *progress_thread(void *arg)
2126 {
2127 	struct timespec requested_time, remaining;
2128 	struct itimerval itimerval;
2129 	struct winsize winsize;
2130 
2131 	if(ioctl(1, TIOCGWINSZ, &winsize) == -1) {
2132 		if(isatty(STDOUT_FILENO))
2133 			ERROR("TIOCGWINSZ ioctl failed, defaulting to 80 "
2134 				"columns\n");
2135 		columns = 80;
2136 	} else
2137 		columns = winsize.ws_col;
2138 	signal(SIGWINCH, sigwinch_handler);
2139 	signal(SIGALRM, sigalrm_handler);
2140 
2141 	itimerval.it_value.tv_sec = 0;
2142 	itimerval.it_value.tv_usec = 250000;
2143 	itimerval.it_interval.tv_sec = 0;
2144 	itimerval.it_interval.tv_usec = 250000;
2145 	setitimer(ITIMER_REAL, &itimerval, NULL);
2146 
2147 	requested_time.tv_sec = 0;
2148 	requested_time.tv_nsec = 250000000;
2149 
2150 	while(1) {
2151 		int res = nanosleep(&requested_time, &remaining);
2152 
2153 		if(res == -1 && errno != EINTR)
2154 			EXIT_UNSQUASH("nanosleep failed in progress thread\n");
2155 
2156 		if(progress_enabled) {
2157 			pthread_mutex_lock(&screen_mutex);
2158 			progress_bar(sym_count + dev_count +
2159 				fifo_count + cur_blocks, total_inodes -
2160 				total_files + total_blocks, columns);
2161 			pthread_mutex_unlock(&screen_mutex);
2162 		}
2163 	}
2164 }
2165 
2166 
initialise_threads(int fragment_buffer_size,int data_buffer_size)2167 void initialise_threads(int fragment_buffer_size, int data_buffer_size)
2168 {
2169 	struct rlimit rlim;
2170 	int i, max_files, res;
2171 	sigset_t sigmask, old_mask;
2172 
2173 	/* block SIGQUIT and SIGHUP, these are handled by the info thread */
2174 	sigemptyset(&sigmask);
2175 	sigaddset(&sigmask, SIGQUIT);
2176 	sigaddset(&sigmask, SIGHUP);
2177 	if(pthread_sigmask(SIG_BLOCK, &sigmask, NULL) == -1)
2178 		EXIT_UNSQUASH("Failed to set signal mask in initialise_threads"
2179 			"\n");
2180 
2181 	/*
2182 	 * temporarily block these signals so the created sub-threads will
2183 	 * ignore them, ensuring the main thread handles them
2184 	 */
2185 	sigemptyset(&sigmask);
2186 	sigaddset(&sigmask, SIGINT);
2187 	sigaddset(&sigmask, SIGTERM);
2188 	if(pthread_sigmask(SIG_BLOCK, &sigmask, &old_mask) == -1)
2189 		EXIT_UNSQUASH("Failed to set signal mask in initialise_threads"
2190 			"\n");
2191 
2192 	if(processors == -1) {
2193 #ifndef linux
2194 		int mib[2];
2195 		size_t len = sizeof(processors);
2196 
2197 		mib[0] = CTL_HW;
2198 #ifdef HW_AVAILCPU
2199 		mib[1] = HW_AVAILCPU;
2200 #else
2201 		mib[1] = HW_NCPU;
2202 #endif
2203 
2204 		if(sysctl(mib, 2, &processors, &len, NULL, 0) == -1) {
2205 			ERROR("Failed to get number of available processors.  "
2206 				"Defaulting to 1\n");
2207 			processors = 1;
2208 		}
2209 #else
2210 		processors = sysconf(_SC_NPROCESSORS_ONLN);
2211 #endif
2212 	}
2213 
2214 	if(add_overflow(processors, 3) ||
2215 			multiply_overflow(processors + 3, sizeof(pthread_t)))
2216 		EXIT_UNSQUASH("Processors too large\n");
2217 
2218 	thread = malloc((3 + processors) * sizeof(pthread_t));
2219 	if(thread == NULL)
2220 		EXIT_UNSQUASH("Out of memory allocating thread descriptors\n");
2221 	inflator_thread = &thread[3];
2222 
2223 	/*
2224 	 * dimensioning the to_reader and to_inflate queues.  The size of
2225 	 * these queues is directly related to the amount of block
2226 	 * read-ahead possible.  To_reader queues block read requests to
2227 	 * the reader thread and to_inflate queues block decompression
2228 	 * requests to the inflate thread(s) (once the block has been read by
2229 	 * the reader thread).  The amount of read-ahead is determined by
2230 	 * the combined size of the data_block and fragment caches which
2231 	 * determine the total number of blocks which can be "in flight"
2232 	 * at any one time (either being read or being decompressed)
2233 	 *
2234 	 * The maximum file open limit, however, affects the read-ahead
2235 	 * possible, in that for normal sizes of the fragment and data block
2236 	 * caches, where the incoming files have few data blocks or one fragment
2237 	 * only, the file open limit is likely to be reached before the
2238 	 * caches are full.  This means the worst case sizing of the combined
2239 	 * sizes of the caches is unlikely to ever be necessary.  However, is is
2240 	 * obvious read-ahead up to the data block cache size is always possible
2241 	 * irrespective of the file open limit, because a single file could
2242 	 * contain that number of blocks.
2243 	 *
2244 	 * Choosing the size as "file open limit + data block cache size" seems
2245 	 * to be a reasonable estimate.  We can reasonably assume the maximum
2246 	 * likely read-ahead possible is data block cache size + one fragment
2247 	 * per open file.
2248 	 *
2249 	 * dimensioning the to_writer queue.  The size of this queue is
2250 	 * directly related to the amount of block read-ahead possible.
2251 	 * However, unlike the to_reader and to_inflate queues, this is
2252 	 * complicated by the fact the to_writer queue not only contains
2253 	 * entries for fragments and data_blocks but it also contains
2254 	 * file entries, one per open file in the read-ahead.
2255 	 *
2256 	 * Choosing the size as "2 * (file open limit) +
2257 	 * data block cache size" seems to be a reasonable estimate.
2258 	 * We can reasonably assume the maximum likely read-ahead possible
2259 	 * is data block cache size + one fragment per open file, and then
2260 	 * we will have a file_entry for each open file.
2261 	 */
2262 	res = getrlimit(RLIMIT_NOFILE, &rlim);
2263 	if (res == -1) {
2264 		ERROR("failed to get open file limit!  Defaulting to 1\n");
2265 		rlim.rlim_cur = 1;
2266 	}
2267 
2268 	if (rlim.rlim_cur != RLIM_INFINITY) {
2269 		/*
2270 		 * leave OPEN_FILE_MARGIN free (rlim_cur includes fds used by
2271 		 * stdin, stdout, stderr and filesystem fd
2272 		 */
2273 		if (rlim.rlim_cur <= OPEN_FILE_MARGIN)
2274 			/* no margin, use minimum possible */
2275 			max_files = 1;
2276 		else
2277 			max_files = rlim.rlim_cur - OPEN_FILE_MARGIN;
2278 	} else
2279 		max_files = -1;
2280 
2281 	/* set amount of available files for use by open_wait and close_wake */
2282 	open_init(max_files);
2283 
2284 	/*
2285 	 * allocate to_reader, to_inflate and to_writer queues.  Set based on
2286 	 * open file limit and cache size, unless open file limit is unlimited,
2287 	 * in which case set purely based on cache limits
2288 	 *
2289 	 * In doing so, check that the user supplied values do not overflow
2290 	 * a signed int
2291 	 */
2292 	if (max_files != -1) {
2293 		if(add_overflow(data_buffer_size, max_files) ||
2294 				add_overflow(data_buffer_size, max_files * 2))
2295 			EXIT_UNSQUASH("Data queue size is too large\n");
2296 
2297 		to_reader = queue_init(max_files + data_buffer_size);
2298 		to_inflate = queue_init(max_files + data_buffer_size);
2299 		to_writer = queue_init(max_files * 2 + data_buffer_size);
2300 	} else {
2301 		int all_buffers_size;
2302 
2303 		if(add_overflow(fragment_buffer_size, data_buffer_size))
2304 			EXIT_UNSQUASH("Data and fragment queues combined are"
2305 							" too large\n");
2306 
2307 		all_buffers_size = fragment_buffer_size + data_buffer_size;
2308 
2309 		if(add_overflow(all_buffers_size, all_buffers_size))
2310 			EXIT_UNSQUASH("Data and fragment queues combined are"
2311 							" too large\n");
2312 
2313 		to_reader = queue_init(all_buffers_size);
2314 		to_inflate = queue_init(all_buffers_size);
2315 		to_writer = queue_init(all_buffers_size * 2);
2316 	}
2317 
2318 	from_writer = queue_init(1);
2319 
2320 	fragment_cache = cache_init(block_size, fragment_buffer_size);
2321 	data_cache = cache_init(block_size, data_buffer_size);
2322 	pthread_create(&thread[0], NULL, reader, NULL);
2323 	pthread_create(&thread[1], NULL, writer, NULL);
2324 	pthread_create(&thread[2], NULL, progress_thread, NULL);
2325 	init_info();
2326 	pthread_mutex_init(&fragment_mutex, NULL);
2327 
2328 	for(i = 0; i < processors; i++) {
2329 		if(pthread_create(&inflator_thread[i], NULL, inflator, NULL) !=
2330 				 0)
2331 			EXIT_UNSQUASH("Failed to create thread\n");
2332 	}
2333 
2334 	printf("Parallel unsquashfs: Using %d processor%s\n", processors,
2335 			processors == 1 ? "" : "s");
2336 
2337 	if(pthread_sigmask(SIG_SETMASK, &old_mask, NULL) == -1)
2338 		EXIT_UNSQUASH("Failed to set signal mask in initialise_threads"
2339 			"\n");
2340 }
2341 
2342 
enable_progress_bar()2343 void enable_progress_bar()
2344 {
2345 	pthread_mutex_lock(&screen_mutex);
2346 	progress_enabled = progress;
2347 	pthread_mutex_unlock(&screen_mutex);
2348 }
2349 
2350 
disable_progress_bar()2351 void disable_progress_bar()
2352 {
2353 	pthread_mutex_lock(&screen_mutex);
2354 	if(progress_enabled) {
2355 		progress_bar(sym_count + dev_count + fifo_count + cur_blocks,
2356 			total_inodes - total_files + total_blocks, columns);
2357 		printf("\n");
2358 	}
2359 	progress_enabled = FALSE;
2360 	pthread_mutex_unlock(&screen_mutex);
2361 }
2362 
2363 
progressbar_error(char * fmt,...)2364 void progressbar_error(char *fmt, ...)
2365 {
2366 	va_list ap;
2367 
2368 	pthread_mutex_lock(&screen_mutex);
2369 
2370 	if(progress_enabled)
2371 		fprintf(stderr, "\n");
2372 
2373 	va_start(ap, fmt);
2374 	vfprintf(stderr, fmt, ap);
2375 	va_end(ap);
2376 
2377 	pthread_mutex_unlock(&screen_mutex);
2378 }
2379 
2380 
progressbar_info(char * fmt,...)2381 void progressbar_info(char *fmt, ...)
2382 {
2383 	va_list ap;
2384 
2385 	pthread_mutex_lock(&screen_mutex);
2386 
2387 	if(progress_enabled)
2388 		printf("\n");
2389 
2390 	va_start(ap, fmt);
2391 	vprintf(fmt, ap);
2392 	va_end(ap);
2393 
2394 	pthread_mutex_unlock(&screen_mutex);
2395 }
2396 
progress_bar(long long current,long long max,int columns)2397 void progress_bar(long long current, long long max, int columns)
2398 {
2399 	char rotate_list[] = { '|', '/', '-', '\\' };
2400 	int max_digits, used, hashes, spaces;
2401 	static int tty = -1;
2402 
2403 	if(max == 0)
2404 		return;
2405 
2406 	max_digits = floor(log10(max)) + 1;
2407 	used = max_digits * 2 + 11;
2408 	hashes = (current * (columns - used)) / max;
2409 	spaces = columns - used - hashes;
2410 
2411 	if((current > max) || (columns - used < 0))
2412 		return;
2413 
2414 	if(tty == -1)
2415 		tty = isatty(STDOUT_FILENO);
2416 	if(!tty) {
2417 		static long long previous = -1;
2418 
2419 		/*
2420 		 * Updating much more frequently than this results in huge
2421 		 * log files.
2422 		 */
2423 		if((current % 100) != 0 && current != max)
2424 			return;
2425 		/* Don't update just to rotate the spinner. */
2426 		if(current == previous)
2427 			return;
2428 		previous = current;
2429 	}
2430 
2431 	printf("\r[");
2432 
2433 	while (hashes --)
2434 		putchar('=');
2435 
2436 	putchar(rotate_list[rotate]);
2437 
2438 	while(spaces --)
2439 		putchar(' ');
2440 
2441 	printf("] %*lld/%*lld", max_digits, current, max_digits, max);
2442 	printf(" %3lld%%", current * 100 / max);
2443 	fflush(stdout);
2444 }
2445 
2446 
parse_number(char * arg,int * res)2447 int parse_number(char *arg, int *res)
2448 {
2449 	char *b;
2450 	long number = strtol(arg, &b, 10);
2451 
2452 	/* check for trailing junk after number */
2453 	if(*b != '\0')
2454 		return 0;
2455 
2456 	/*
2457 	 * check for strtol underflow or overflow in conversion.
2458 	 * Note: strtol can validly return LONG_MIN and LONG_MAX
2459 	 * if the user entered these values, but, additional code
2460 	 * to distinguish this scenario is unnecessary, because for
2461 	 * our purposes LONG_MIN and LONG_MAX are too large anyway
2462 	 */
2463 	if(number == LONG_MIN || number == LONG_MAX)
2464 		return 0;
2465 
2466 	/* reject negative numbers as invalid */
2467 	if(number < 0)
2468 		return 0;
2469 
2470 	/* check if long result will overflow signed int */
2471 	if(number > INT_MAX)
2472 		return 0;
2473 
2474 	*res = number;
2475 	return 1;
2476 }
2477 
2478 
2479 #define VERSION() \
2480 	printf("unsquashfs version 4.3 (2014/05/12)\n");\
2481 	printf("copyright (C) 2014 Phillip Lougher "\
2482 		"<phillip@squashfs.org.uk>\n\n");\
2483     	printf("This program is free software; you can redistribute it and/or"\
2484 		"\n");\
2485 	printf("modify it under the terms of the GNU General Public License"\
2486 		"\n");\
2487 	printf("as published by the Free Software Foundation; either version "\
2488 		"2,\n");\
2489 	printf("or (at your option) any later version.\n\n");\
2490 	printf("This program is distributed in the hope that it will be "\
2491 		"useful,\n");\
2492 	printf("but WITHOUT ANY WARRANTY; without even the implied warranty of"\
2493 		"\n");\
2494 	printf("MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the"\
2495 		"\n");\
2496 	printf("GNU General Public License for more details.\n");
main(int argc,char * argv[])2497 int main(int argc, char *argv[])
2498 {
2499 	char *dest = "squashfs-root";
2500 	int i, stat_sys = FALSE, version = FALSE;
2501 	int n;
2502 	struct pathnames *paths = NULL;
2503 	struct pathname *path = NULL;
2504 	long long directory_table_end;
2505 	int fragment_buffer_size = FRAGMENT_BUFFER_DEFAULT;
2506 	int data_buffer_size = DATA_BUFFER_DEFAULT;
2507 
2508 	pthread_mutex_init(&screen_mutex, NULL);
2509 	root_process = geteuid() == 0;
2510 	if(root_process)
2511 		umask(0);
2512 
2513 	for(i = 1; i < argc; i++) {
2514 		if(*argv[i] != '-')
2515 			break;
2516 		if(strcmp(argv[i], "-version") == 0 ||
2517 				strcmp(argv[i], "-v") == 0) {
2518 			VERSION();
2519 			version = TRUE;
2520 		} else if(strcmp(argv[i], "-info") == 0 ||
2521 				strcmp(argv[i], "-i") == 0)
2522 			info = TRUE;
2523 		else if(strcmp(argv[i], "-ls") == 0 ||
2524 				strcmp(argv[i], "-l") == 0)
2525 			lsonly = TRUE;
2526 		else if(strcmp(argv[i], "-no-progress") == 0 ||
2527 				strcmp(argv[i], "-n") == 0)
2528 			progress = FALSE;
2529 		else if(strcmp(argv[i], "-no-xattrs") == 0 ||
2530 				strcmp(argv[i], "-no") == 0)
2531 			no_xattrs = TRUE;
2532 		else if(strcmp(argv[i], "-xattrs") == 0 ||
2533 				strcmp(argv[i], "-x") == 0)
2534 			no_xattrs = FALSE;
2535 		else if(strcmp(argv[i], "-user-xattrs") == 0 ||
2536 				strcmp(argv[i], "-u") == 0) {
2537 			user_xattrs = TRUE;
2538 			no_xattrs = FALSE;
2539 		} else if(strcmp(argv[i], "-dest") == 0 ||
2540 				strcmp(argv[i], "-d") == 0) {
2541 			if(++i == argc) {
2542 				fprintf(stderr, "%s: -dest missing filename\n",
2543 					argv[0]);
2544 				exit(1);
2545 			}
2546 			dest = argv[i];
2547 		} else if(strcmp(argv[i], "-processors") == 0 ||
2548 				strcmp(argv[i], "-p") == 0) {
2549 			if((++i == argc) ||
2550 					!parse_number(argv[i],
2551 						&processors)) {
2552 				ERROR("%s: -processors missing or invalid "
2553 					"processor number\n", argv[0]);
2554 				exit(1);
2555 			}
2556 			if(processors < 1) {
2557 				ERROR("%s: -processors should be 1 or larger\n",
2558 					argv[0]);
2559 				exit(1);
2560 			}
2561 		} else if(strcmp(argv[i], "-data-queue") == 0 ||
2562 					 strcmp(argv[i], "-da") == 0) {
2563 			if((++i == argc) ||
2564 					!parse_number(argv[i],
2565 						&data_buffer_size)) {
2566 				ERROR("%s: -data-queue missing or invalid "
2567 					"queue size\n", argv[0]);
2568 				exit(1);
2569 			}
2570 			if(data_buffer_size < 1) {
2571 				ERROR("%s: -data-queue should be 1 Mbyte or "
2572 					"larger\n", argv[0]);
2573 				exit(1);
2574 			}
2575 		} else if(strcmp(argv[i], "-frag-queue") == 0 ||
2576 					strcmp(argv[i], "-fr") == 0) {
2577 			if((++i == argc) ||
2578 					!parse_number(argv[i],
2579 						&fragment_buffer_size)) {
2580 				ERROR("%s: -frag-queue missing or invalid "
2581 					"queue size\n", argv[0]);
2582 				exit(1);
2583 			}
2584 			if(fragment_buffer_size < 1) {
2585 				ERROR("%s: -frag-queue should be 1 Mbyte or "
2586 					"larger\n", argv[0]);
2587 				exit(1);
2588 			}
2589 		} else if(strcmp(argv[i], "-force") == 0 ||
2590 				strcmp(argv[i], "-f") == 0)
2591 			force = TRUE;
2592 		else if(strcmp(argv[i], "-stat") == 0 ||
2593 				strcmp(argv[i], "-s") == 0)
2594 			stat_sys = TRUE;
2595 		else if(strcmp(argv[i], "-lls") == 0 ||
2596 				strcmp(argv[i], "-ll") == 0) {
2597 			lsonly = TRUE;
2598 			short_ls = FALSE;
2599 		} else if(strcmp(argv[i], "-linfo") == 0 ||
2600 				strcmp(argv[i], "-li") == 0) {
2601 			info = TRUE;
2602 			short_ls = FALSE;
2603 		} else if(strcmp(argv[i], "-ef") == 0 ||
2604 				strcmp(argv[i], "-e") == 0) {
2605 			if(++i == argc) {
2606 				fprintf(stderr, "%s: -ef missing filename\n",
2607 					argv[0]);
2608 				exit(1);
2609 			}
2610 			path = process_extract_files(path, argv[i]);
2611 		} else if(strcmp(argv[i], "-regex") == 0 ||
2612 				strcmp(argv[i], "-r") == 0)
2613 			use_regex = TRUE;
2614 		else
2615 			goto options;
2616 	}
2617 
2618 	if(lsonly || info)
2619 		progress = FALSE;
2620 
2621 #ifdef SQUASHFS_TRACE
2622 	/*
2623 	 * Disable progress bar if full debug tracing is enabled.
2624 	 * The progress bar in this case just gets in the way of the
2625 	 * debug trace output
2626 	 */
2627 	progress = FALSE;
2628 #endif
2629 
2630 	if(i == argc) {
2631 		if(!version) {
2632 options:
2633 			ERROR("SYNTAX: %s [options] filesystem [directories or "
2634 				"files to extract]\n", argv[0]);
2635 			ERROR("\t-v[ersion]\t\tprint version, licence and "
2636 				"copyright information\n");
2637 			ERROR("\t-d[est] <pathname>\tunsquash to <pathname>, "
2638 				"default \"squashfs-root\"\n");
2639 			ERROR("\t-n[o-progress]\t\tdon't display the progress "
2640 				"bar\n");
2641 			ERROR("\t-no[-xattrs]\t\tdon't extract xattrs in file system"
2642 				NOXOPT_STR"\n");
2643 			ERROR("\t-x[attrs]\t\textract xattrs in file system"
2644 				XOPT_STR "\n");
2645 			ERROR("\t-u[ser-xattrs]\t\tonly extract user xattrs in "
2646 				"file system.\n\t\t\t\tEnables extracting "
2647 				"xattrs\n");
2648 			ERROR("\t-p[rocessors] <number>\tuse <number> "
2649 				"processors.  By default will use\n");
2650 			ERROR("\t\t\t\tnumber of processors available\n");
2651 			ERROR("\t-i[nfo]\t\t\tprint files as they are "
2652 				"unsquashed\n");
2653 			ERROR("\t-li[nfo]\t\tprint files as they are "
2654 				"unsquashed with file\n");
2655 			ERROR("\t\t\t\tattributes (like ls -l output)\n");
2656 			ERROR("\t-l[s]\t\t\tlist filesystem, but don't unsquash"
2657 				"\n");
2658 			ERROR("\t-ll[s]\t\t\tlist filesystem with file "
2659 				"attributes (like\n");
2660 			ERROR("\t\t\t\tls -l output), but don't unsquash\n");
2661 			ERROR("\t-f[orce]\t\tif file already exists then "
2662 				"overwrite\n");
2663 			ERROR("\t-s[tat]\t\t\tdisplay filesystem superblock "
2664 				"information\n");
2665 			ERROR("\t-e[f] <extract file>\tlist of directories or "
2666 				"files to extract.\n\t\t\t\tOne per line\n");
2667 			ERROR("\t-da[ta-queue] <size>\tSet data queue to "
2668 				"<size> Mbytes.  Default %d\n\t\t\t\tMbytes\n",
2669 				DATA_BUFFER_DEFAULT);
2670 			ERROR("\t-fr[ag-queue] <size>\tSet fragment queue to "
2671 				"<size> Mbytes.  Default\n\t\t\t\t%d Mbytes\n",
2672 				FRAGMENT_BUFFER_DEFAULT);
2673 			ERROR("\t-r[egex]\t\ttreat extract names as POSIX "
2674 				"regular expressions\n");
2675 			ERROR("\t\t\t\trather than use the default shell "
2676 				"wildcard\n\t\t\t\texpansion (globbing)\n");
2677 			ERROR("\nDecompressors available:\n");
2678 			display_compressors("", "");
2679 		}
2680 		exit(1);
2681 	}
2682 
2683 	for(n = i + 1; n < argc; n++)
2684 		path = add_path(path, argv[n], argv[n]);
2685 
2686 	if((fd = open(argv[i], O_RDONLY)) == -1) {
2687 		ERROR("Could not open %s, because %s\n", argv[i],
2688 			strerror(errno));
2689 		exit(1);
2690 	}
2691 
2692 	if(read_super(argv[i]) == FALSE)
2693 		exit(1);
2694 
2695 	if(stat_sys) {
2696 		squashfs_stat(argv[i]);
2697 		exit(0);
2698 	}
2699 
2700 	if(!check_compression(comp))
2701 		exit(1);
2702 
2703 	block_size = sBlk.s.block_size;
2704 	block_log = sBlk.s.block_log;
2705 
2706 	/*
2707 	 * Sanity check block size and block log.
2708 	 *
2709 	 * Check they're within correct limits
2710 	 */
2711 	if(block_size > SQUASHFS_FILE_MAX_SIZE ||
2712 					block_log > SQUASHFS_FILE_MAX_LOG)
2713 		EXIT_UNSQUASH("Block size or block_log too large."
2714 			"  File system is corrupt.\n");
2715 
2716 	/*
2717 	 * Check block_size and block_log match
2718 	 */
2719 	if(block_size != (1 << block_log))
2720 		EXIT_UNSQUASH("Block size and block_log do not match."
2721 			"  File system is corrupt.\n");
2722 
2723 	/*
2724 	 * convert from queue size in Mbytes to queue size in
2725 	 * blocks.
2726 	 *
2727 	 * In doing so, check that the user supplied values do not
2728 	 * overflow a signed int
2729 	 */
2730 	if(shift_overflow(fragment_buffer_size, 20 - block_log))
2731 		EXIT_UNSQUASH("Fragment queue size is too large\n");
2732 	else
2733 		fragment_buffer_size <<= 20 - block_log;
2734 
2735 	if(shift_overflow(data_buffer_size, 20 - block_log))
2736 		EXIT_UNSQUASH("Data queue size is too large\n");
2737 	else
2738 		data_buffer_size <<= 20 - block_log;
2739 
2740 	initialise_threads(fragment_buffer_size, data_buffer_size);
2741 
2742 	fragment_data = malloc(block_size);
2743 	if(fragment_data == NULL)
2744 		EXIT_UNSQUASH("failed to allocate fragment_data\n");
2745 
2746 	file_data = malloc(block_size);
2747 	if(file_data == NULL)
2748 		EXIT_UNSQUASH("failed to allocate file_data");
2749 
2750 	data = malloc(block_size);
2751 	if(data == NULL)
2752 		EXIT_UNSQUASH("failed to allocate data\n");
2753 
2754 	created_inode = malloc(sBlk.s.inodes * sizeof(char *));
2755 	if(created_inode == NULL)
2756 		EXIT_UNSQUASH("failed to allocate created_inode\n");
2757 
2758 	memset(created_inode, 0, sBlk.s.inodes * sizeof(char *));
2759 
2760 	if(s_ops.read_uids_guids() == FALSE)
2761 		EXIT_UNSQUASH("failed to uid/gid table\n");
2762 
2763 	if(s_ops.read_fragment_table(&directory_table_end) == FALSE)
2764 		EXIT_UNSQUASH("failed to read fragment table\n");
2765 
2766 	if(read_inode_table(sBlk.s.inode_table_start,
2767 				sBlk.s.directory_table_start) == FALSE)
2768 		EXIT_UNSQUASH("failed to read inode table\n");
2769 
2770 	if(read_directory_table(sBlk.s.directory_table_start,
2771 				directory_table_end) == FALSE)
2772 		EXIT_UNSQUASH("failed to read directory table\n");
2773 
2774 	if(no_xattrs)
2775 		sBlk.s.xattr_id_table_start = SQUASHFS_INVALID_BLK;
2776 
2777 	if(read_xattrs_from_disk(fd, &sBlk.s) == 0)
2778 		EXIT_UNSQUASH("failed to read the xattr table\n");
2779 
2780 	if(path) {
2781 		paths = init_subdir();
2782 		paths = add_subdir(paths, path);
2783 	}
2784 
2785 	pre_scan(dest, SQUASHFS_INODE_BLK(sBlk.s.root_inode),
2786 		SQUASHFS_INODE_OFFSET(sBlk.s.root_inode), paths);
2787 
2788 	memset(created_inode, 0, sBlk.s.inodes * sizeof(char *));
2789 	inode_number = 1;
2790 
2791 	printf("%d inodes (%d blocks) to write\n\n", total_inodes,
2792 		total_inodes - total_files + total_blocks);
2793 
2794 	enable_progress_bar();
2795 
2796 	dir_scan(dest, SQUASHFS_INODE_BLK(sBlk.s.root_inode),
2797 		SQUASHFS_INODE_OFFSET(sBlk.s.root_inode), paths);
2798 
2799 	queue_put(to_writer, NULL);
2800 	queue_get(from_writer);
2801 
2802 	disable_progress_bar();
2803 
2804 	if(!lsonly) {
2805 		printf("\n");
2806 		printf("created %d files\n", file_count);
2807 		printf("created %d directories\n", dir_count);
2808 		printf("created %d symlinks\n", sym_count);
2809 		printf("created %d devices\n", dev_count);
2810 		printf("created %d fifos\n", fifo_count);
2811 	}
2812 
2813 	return 0;
2814 }
2815