1 /*
2  * Create a squashfs filesystem.  This is a highly compressed read only
3  * filesystem.
4  *
5  * Copyright (c) 2013, 2014
6  * Phillip Lougher <phillip@squashfs.org.uk>
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License
10  * as published by the Free Software Foundation; either version 2,
11  * or (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21  *
22  * caches-queues-lists.c
23  */
24 
25 #include <pthread.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #include <stdio.h>
29 
30 #include "error.h"
31 #include "caches-queues-lists.h"
32 
33 extern int add_overflow(int, int);
34 extern int multiply_overflow(int, int);
35 
36 #define TRUE 1
37 #define FALSE 0
38 
queue_init(int size)39 struct queue *queue_init(int size)
40 {
41 	struct queue *queue = malloc(sizeof(struct queue));
42 
43 	if(queue == NULL)
44 		MEM_ERROR();
45 
46 	if(add_overflow(size, 1) ||
47 				multiply_overflow(size + 1, sizeof(void *)))
48 		BAD_ERROR("Size too large in queue_init\n");
49 
50 	queue->data = malloc(sizeof(void *) * (size + 1));
51 	if(queue->data == NULL)
52 		MEM_ERROR();
53 
54 	queue->size = size + 1;
55 	queue->readp = queue->writep = 0;
56 	pthread_mutex_init(&queue->mutex, NULL);
57 	pthread_cond_init(&queue->empty, NULL);
58 	pthread_cond_init(&queue->full, NULL);
59 
60 	return queue;
61 }
62 
63 
queue_put(struct queue * queue,void * data)64 void queue_put(struct queue *queue, void *data)
65 {
66 	int nextp;
67 
68 	pthread_cleanup_push((void *) pthread_mutex_unlock, &queue->mutex);
69 	pthread_mutex_lock(&queue->mutex);
70 
71 	while((nextp = (queue->writep + 1) % queue->size) == queue->readp)
72 		pthread_cond_wait(&queue->full, &queue->mutex);
73 
74 	queue->data[queue->writep] = data;
75 	queue->writep = nextp;
76 	pthread_cond_signal(&queue->empty);
77 	pthread_cleanup_pop(1);
78 }
79 
80 
queue_get(struct queue * queue)81 void *queue_get(struct queue *queue)
82 {
83 	void *data;
84 
85 	pthread_cleanup_push((void *) pthread_mutex_unlock, &queue->mutex);
86 	pthread_mutex_lock(&queue->mutex);
87 
88 	while(queue->readp == queue->writep)
89 		pthread_cond_wait(&queue->empty, &queue->mutex);
90 
91 	data = queue->data[queue->readp];
92 	queue->readp = (queue->readp + 1) % queue->size;
93 	pthread_cond_signal(&queue->full);
94 	pthread_cleanup_pop(1);
95 
96 	return data;
97 }
98 
99 
queue_empty(struct queue * queue)100 int queue_empty(struct queue *queue)
101 {
102 	int empty;
103 
104 	pthread_cleanup_push((void *) pthread_mutex_unlock, &queue->mutex);
105 	pthread_mutex_lock(&queue->mutex);
106 
107 	empty = queue->readp == queue->writep;
108 
109 	pthread_cleanup_pop(1);
110 
111 	return empty;
112 }
113 
114 
queue_flush(struct queue * queue)115 void queue_flush(struct queue *queue)
116 {
117 	pthread_cleanup_push((void *) pthread_mutex_unlock, &queue->mutex);
118 	pthread_mutex_lock(&queue->mutex);
119 
120 	queue->readp = queue->writep;
121 
122 	pthread_cleanup_pop(1);
123 }
124 
125 
dump_queue(struct queue * queue)126 void dump_queue(struct queue *queue)
127 {
128 	pthread_cleanup_push((void *) pthread_mutex_unlock, &queue->mutex);
129 	pthread_mutex_lock(&queue->mutex);
130 
131 	printf("\tMax size %d, size %d%s\n", queue->size - 1,
132 		queue->readp <= queue->writep ? queue->writep - queue->readp :
133 			queue->size - queue->readp + queue->writep,
134 		queue->readp == queue->writep ? " (EMPTY)" :
135 			((queue->writep + 1) % queue->size) == queue->readp ?
136 			" (FULL)" : "");
137 
138 	pthread_cleanup_pop(1);
139 }
140 
141 
142 /* define seq queue hash tables */
143 #define CALCULATE_SEQ_HASH(N) CALCULATE_HASH(N)
144 
145 /* Called with the seq queue mutex held */
146 INSERT_HASH_TABLE(seq, struct seq_queue, CALCULATE_SEQ_HASH, sequence, seq)
147 
148 /* Called with the cache mutex held */
149 REMOVE_HASH_TABLE(seq, struct seq_queue, CALCULATE_SEQ_HASH, sequence, seq);
150 
151 static unsigned int sequence = 0;
152 
153 
seq_queue_init()154 struct seq_queue *seq_queue_init()
155 {
156 	struct seq_queue *queue = malloc(sizeof(struct seq_queue));
157 	if(queue == NULL)
158 		MEM_ERROR();
159 
160 	memset(queue, 0, sizeof(struct seq_queue));
161 
162 	pthread_mutex_init(&queue->mutex, NULL);
163 	pthread_cond_init(&queue->wait, NULL);
164 
165 	return queue;
166 }
167 
168 
seq_queue_put(struct seq_queue * queue,struct file_buffer * entry)169 void seq_queue_put(struct seq_queue *queue, struct file_buffer *entry)
170 {
171 	pthread_cleanup_push((void *) pthread_mutex_unlock, &queue->mutex);
172 	pthread_mutex_lock(&queue->mutex);
173 
174 	insert_seq_hash_table(queue, entry);
175 
176 	if(entry->fragment)
177 		queue->fragment_count ++;
178 	else
179 		queue->block_count ++;
180 
181 	if(entry->sequence == sequence)
182 		pthread_cond_signal(&queue->wait);
183 
184 	pthread_cleanup_pop(1);
185 }
186 
187 
seq_queue_get(struct seq_queue * queue)188 struct file_buffer *seq_queue_get(struct seq_queue *queue)
189 {
190 	/*
191 	 * Look-up buffer matching sequence in the queue, if found return
192 	 * it, otherwise wait until it arrives
193 	 */
194 	int hash = CALCULATE_SEQ_HASH(sequence);
195 	struct file_buffer *entry;
196 
197 	pthread_cleanup_push((void *) pthread_mutex_unlock, &queue->mutex);
198 	pthread_mutex_lock(&queue->mutex);
199 
200 	while(1) {
201 		for(entry = queue->hash_table[hash]; entry;
202 						entry = entry->seq_next)
203 			if(entry->sequence == sequence)
204 				break;
205 
206 		if(entry) {
207 			/*
208 			 * found the buffer in the queue, decrement the
209 			 * appropriate count, and remove from hash list
210 			 */
211 			if(entry->fragment)
212 				queue->fragment_count --;
213 			else
214 				queue->block_count --;
215 
216 			remove_seq_hash_table(queue, entry);
217 
218 			sequence ++;
219 
220 			break;
221 		}
222 
223 		/* entry not found, wait for it to arrive */
224 		pthread_cond_wait(&queue->wait, &queue->mutex);
225 	}
226 
227 	pthread_cleanup_pop(1);
228 
229 	return entry;
230 }
231 
232 
seq_queue_flush(struct seq_queue * queue)233 void seq_queue_flush(struct seq_queue *queue)
234 {
235 	int i;
236 
237 	pthread_cleanup_push((void *) pthread_mutex_unlock, &queue->mutex);
238 	pthread_mutex_lock(&queue->mutex);
239 
240 	for(i = 0; i < HASH_SIZE; i++)
241 		queue->hash_table[i] = NULL;
242 
243 	queue->fragment_count = queue->block_count = 0;
244 
245 	pthread_cleanup_pop(1);
246 }
247 
248 
dump_seq_queue(struct seq_queue * queue,int fragment_queue)249 void dump_seq_queue(struct seq_queue *queue, int fragment_queue)
250 {
251 	int size;
252 
253 	pthread_cleanup_push((void *) pthread_mutex_unlock, &queue->mutex);
254 	pthread_mutex_lock(&queue->mutex);
255 
256 	size = fragment_queue ? queue->fragment_count : queue->block_count;
257 
258 	printf("\tMax size unlimited, size %d%s\n", size,
259 						size == 0 ? " (EMPTY)" : "");
260 
261 	pthread_cleanup_pop(1);
262 }
263 
264 
265 /* define cache hash tables */
266 #define CALCULATE_CACHE_HASH(N) CALCULATE_HASH(llabs(N))
267 
268 /* Called with the cache mutex held */
269 INSERT_HASH_TABLE(cache, struct cache, CALCULATE_CACHE_HASH, index, hash)
270 
271 /* Called with the cache mutex held */
272 REMOVE_HASH_TABLE(cache, struct cache, CALCULATE_CACHE_HASH, index, hash);
273 
274 /* define cache free list */
275 
276 /* Called with the cache mutex held */
INSERT_LIST(free,struct file_buffer)277 INSERT_LIST(free, struct file_buffer)
278 
279 /* Called with the cache mutex held */
280 REMOVE_LIST(free, struct file_buffer)
281 
282 
283 struct cache *cache_init(int buffer_size, int max_buffers, int noshrink_lookup,
284 	int first_freelist)
285 {
286 	struct cache *cache = malloc(sizeof(struct cache));
287 
288 	if(cache == NULL)
289 		MEM_ERROR();
290 
291 	cache->max_buffers = max_buffers;
292 	cache->buffer_size = buffer_size;
293 	cache->count = 0;
294 	cache->used = 0;
295 	cache->free_list = NULL;
296 
297 	/*
298 	 * The cache will grow up to max_buffers in size in response to
299 	 * an increase in readhead/number of buffers in flight.  But
300 	 * once the outstanding buffers gets returned, we can either elect
301 	 * to shrink the cache, or to put the freed blocks onto a free list.
302 	 *
303 	 * For the caches where we want to do lookup (fragment/writer),
304 	 * a don't shrink policy is best, for the reader cache it
305 	 * makes no sense to keep buffers around longer than necessary as
306 	 * we don't do any lookup on those blocks.
307 	 */
308 	cache->noshrink_lookup = noshrink_lookup;
309 
310 	/*
311 	 * The default use freelist before growing cache policy behaves
312 	 * poorly with appending - with many duplicates the caches
313 	 * do not grow due to the fact that large queues of outstanding
314 	 * fragments/writer blocks do not occur, leading to small caches
315 	 * and un-uncessary performance loss to frequent cache
316 	 * replacement in the small caches.  Therefore with appending
317 	 * change the policy to grow the caches before reusing blocks
318 	 * from the freelist
319 	 */
320 	cache->first_freelist = first_freelist;
321 
322 	memset(cache->hash_table, 0, sizeof(struct file_buffer *) * 65536);
323 	pthread_mutex_init(&cache->mutex, NULL);
324 	pthread_cond_init(&cache->wait_for_free, NULL);
325 	pthread_cond_init(&cache->wait_for_unlock, NULL);
326 
327 	return cache;
328 }
329 
330 
cache_lookup(struct cache * cache,long long index)331 struct file_buffer *cache_lookup(struct cache *cache, long long index)
332 {
333 	/* Lookup block in the cache, if found return with usage count
334  	 * incremented, if not found return NULL */
335 	int hash = CALCULATE_CACHE_HASH(index);
336 	struct file_buffer *entry;
337 
338 	pthread_cleanup_push((void *) pthread_mutex_unlock, &cache->mutex);
339 	pthread_mutex_lock(&cache->mutex);
340 
341 	for(entry = cache->hash_table[hash]; entry; entry = entry->hash_next)
342 		if(entry->index == index)
343 			break;
344 
345 	if(entry) {
346 		/* found the block in the cache, increment used count and
347  		 * if necessary remove from free list so it won't disappear
348  		 */
349 		if(entry->used == 0) {
350 			remove_free_list(&cache->free_list, entry);
351 			cache->used ++;
352 		}
353 		entry->used ++;
354 	}
355 
356 	pthread_cleanup_pop(1);
357 
358 	return entry;
359 }
360 
361 
cache_freelist(struct cache * cache)362 static struct file_buffer *cache_freelist(struct cache *cache)
363 {
364 	struct file_buffer *entry = cache->free_list;
365 
366 	remove_free_list(&cache->free_list, entry);
367 
368 	/* a block on the free_list is hashed */
369 	remove_cache_hash_table(cache, entry);
370 
371 	cache->used ++;
372 	return entry;
373 }
374 
375 
cache_alloc(struct cache * cache)376 static struct file_buffer *cache_alloc(struct cache *cache)
377 {
378 	struct file_buffer *entry = malloc(sizeof(struct file_buffer) +
379 							cache->buffer_size);
380 	if(entry == NULL)
381 			MEM_ERROR();
382 
383 	entry->cache = cache;
384 	entry->free_prev = entry->free_next = NULL;
385 	cache->count ++;
386 	return entry;
387 }
388 
389 
_cache_get(struct cache * cache,long long index,int hash)390 static struct file_buffer *_cache_get(struct cache *cache, long long index,
391 	int hash)
392 {
393 	/* Get a free block out of the cache indexed on index. */
394 	struct file_buffer *entry = NULL;
395 
396 	pthread_cleanup_push((void *) pthread_mutex_unlock, &cache->mutex);
397 	pthread_mutex_lock(&cache->mutex);
398 
399 	while(1) {
400 		if(cache->noshrink_lookup) {
401 			/* first try to get a block from the free list */
402 			if(cache->first_freelist && cache->free_list)
403 				entry = cache_freelist(cache);
404 			else if(cache->count < cache->max_buffers) {
405 				entry = cache_alloc(cache);
406 				cache->used ++;
407 			} else if(!cache->first_freelist && cache->free_list)
408 				entry = cache_freelist(cache);
409 		} else { /* shrinking non-lookup cache */
410 			if(cache->count < cache->max_buffers) {
411 				entry = cache_alloc(cache);
412 				if(cache->count > cache->max_count)
413 					cache->max_count = cache->count;
414 			}
415 		}
416 
417 		if(entry)
418 			break;
419 
420 		/* wait for a block */
421 		pthread_cond_wait(&cache->wait_for_free, &cache->mutex);
422 	}
423 
424 	/* initialise block and if hash is set insert into the hash table */
425 	entry->used = 1;
426 	entry->locked = FALSE;
427 	entry->wait_on_unlock = FALSE;
428 	entry->error = FALSE;
429 	if(hash) {
430 		entry->index = index;
431 		insert_cache_hash_table(cache, entry);
432 	}
433 
434 	pthread_cleanup_pop(1);
435 
436 	return entry;
437 }
438 
439 
cache_get(struct cache * cache,long long index)440 struct file_buffer *cache_get(struct cache *cache, long long index)
441 {
442 	return _cache_get(cache, index, 1);
443 }
444 
445 
cache_get_nohash(struct cache * cache)446 struct file_buffer *cache_get_nohash(struct cache *cache)
447 {
448 	return _cache_get(cache, 0, 0);
449 }
450 
451 
cache_hash(struct file_buffer * entry,long long index)452 void cache_hash(struct file_buffer *entry, long long index)
453 {
454 	struct cache *cache = entry->cache;
455 
456 	pthread_cleanup_push((void *) pthread_mutex_unlock, &cache->mutex);
457 	pthread_mutex_lock(&cache->mutex);
458 
459 	entry->index = index;
460 	insert_cache_hash_table(cache, entry);
461 
462 	pthread_cleanup_pop(1);
463 }
464 
465 
cache_block_put(struct file_buffer * entry)466 void cache_block_put(struct file_buffer *entry)
467 {
468 	struct cache *cache;
469 
470 	/*
471 	 * Finished with this cache entry, once the usage count reaches zero it
472  	 * can be reused.
473 	 *
474 	 * If noshrink_lookup is set, put the block onto the free list.
475  	 * As blocks remain accessible via the hash table they can be found
476  	 * getting a new lease of life before they are reused.
477 	 *
478 	 * if noshrink_lookup is not set then shrink the cache.
479 	 */
480 
481 	if(entry == NULL)
482 		return;
483 
484 	cache = entry->cache;
485 
486 	pthread_cleanup_push((void *) pthread_mutex_unlock, &cache->mutex);
487 	pthread_mutex_lock(&cache->mutex);
488 
489 	entry->used --;
490 	if(entry->used == 0) {
491 		if(cache->noshrink_lookup) {
492 			insert_free_list(&cache->free_list, entry);
493 			cache->used --;
494 		} else {
495 			free(entry);
496 			cache->count --;
497 		}
498 
499 		/* One or more threads may be waiting on this block */
500 		pthread_cond_signal(&cache->wait_for_free);
501 	}
502 
503 	pthread_cleanup_pop(1);
504 }
505 
506 
dump_cache(struct cache * cache)507 void dump_cache(struct cache *cache)
508 {
509 	pthread_cleanup_push((void *) pthread_mutex_unlock, &cache->mutex);
510 	pthread_mutex_lock(&cache->mutex);
511 
512 	if(cache->noshrink_lookup)
513 		printf("\tMax buffers %d, Current size %d, Used %d,  %s\n",
514 			cache->max_buffers, cache->count, cache->used,
515 			cache->free_list ?  "Free buffers" : "No free buffers");
516 	else
517 		printf("\tMax buffers %d, Current size %d, Maximum historical "
518 			"size %d\n", cache->max_buffers, cache->count,
519 			cache->max_count);
520 
521 	pthread_cleanup_pop(1);
522 }
523 
524 
cache_get_nowait(struct cache * cache,long long index)525 struct file_buffer *cache_get_nowait(struct cache *cache, long long index)
526 {
527 	struct file_buffer *entry = NULL;
528 	/*
529 	 * block doesn't exist, create it, but return it with the
530 	 * locked flag set, so nothing tries to use it while it doesn't
531 	 * contain data.
532 	 *
533 	 * If there's no space in the cache then return NULL.
534 	 */
535 
536 	pthread_cleanup_push((void *) pthread_mutex_unlock, &cache->mutex);
537 	pthread_mutex_lock(&cache->mutex);
538 
539 	/* first try to get a block from the free list */
540 	if(cache->first_freelist && cache->free_list)
541 		entry = cache_freelist(cache);
542 	else if(cache->count < cache->max_buffers) {
543 		entry = cache_alloc(cache);
544 		cache->used ++;
545 	} else if(!cache->first_freelist && cache->free_list)
546 		entry = cache_freelist(cache);
547 
548 	if(entry) {
549 		/* initialise block and insert into the hash table */
550 		entry->used = 1;
551 		entry->locked = TRUE;
552 		entry->wait_on_unlock = FALSE;
553 		entry->error = FALSE;
554 		entry->index = index;
555 		insert_cache_hash_table(cache, entry);
556 	}
557 
558 	pthread_cleanup_pop(1);
559 
560 	return entry;
561 }
562 
563 
cache_lookup_nowait(struct cache * cache,long long index,char * locked)564 struct file_buffer *cache_lookup_nowait(struct cache *cache, long long index,
565 	char *locked)
566 {
567 	/*
568 	 * Lookup block in the cache, if found return it with the locked flag
569 	 * indicating whether it is currently locked.  In both cases increment
570 	 * the used count.
571 	 *
572 	 * If it doesn't exist in the cache return NULL;
573 	 */
574 	int hash = CALCULATE_CACHE_HASH(index);
575 	struct file_buffer *entry;
576 
577 	pthread_cleanup_push((void *) pthread_mutex_unlock, &cache->mutex);
578 	pthread_mutex_lock(&cache->mutex);
579 
580 	/* first check if the entry already exists */
581 	for(entry = cache->hash_table[hash]; entry; entry = entry->hash_next)
582 		if(entry->index == index)
583 			break;
584 
585 	if(entry) {
586 		if(entry->used == 0) {
587 			remove_free_list(&cache->free_list, entry);
588 			cache->used ++;
589 		}
590 		entry->used ++;
591 		*locked = entry->locked;
592 	}
593 
594 	pthread_cleanup_pop(1);
595 
596 	return entry;
597 }
598 
599 
cache_wait_unlock(struct file_buffer * buffer)600 void cache_wait_unlock(struct file_buffer *buffer)
601 {
602 	struct cache *cache = buffer->cache;
603 
604 	pthread_cleanup_push((void *) pthread_mutex_unlock, &cache->mutex);
605 	pthread_mutex_lock(&cache->mutex);
606 
607 	while(buffer->locked) {
608 		/*
609 		 * another thread is filling this in, wait until it
610 		 * becomes unlocked.  Used has been incremented to ensure it
611 		 * doesn't get reused.  By definition a block can't be
612 		 * locked and unused, and so we don't need to worry
613 		 * about it being on the freelist now, but, it may
614 		 * become unused when unlocked unless used is
615 		 * incremented
616 		 */
617 		buffer->wait_on_unlock = TRUE;
618 		pthread_cond_wait(&cache->wait_for_unlock, &cache->mutex);
619 	}
620 
621 	pthread_cleanup_pop(1);
622 }
623 
624 
cache_unlock(struct file_buffer * entry)625 void cache_unlock(struct file_buffer *entry)
626 {
627 	struct cache *cache = entry->cache;
628 
629 	/*
630 	 * Unlock this locked cache entry.  If anything is waiting for this
631 	 * to become unlocked, wake it up.
632 	 */
633 	pthread_cleanup_push((void *) pthread_mutex_unlock, &cache->mutex);
634 	pthread_mutex_lock(&cache->mutex);
635 
636 	entry->locked = FALSE;
637 
638 	if(entry->wait_on_unlock) {
639 		entry->wait_on_unlock = FALSE;
640 		pthread_cond_broadcast(&cache->wait_for_unlock);
641 	}
642 
643 	pthread_cleanup_pop(1);
644 }
645