1
2 //----------------------------------------------------------------------------
3 // Anti-Grain Geometry - Version 2.3
4 // Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
5 //
6 // Permission to copy, use, modify, sell and distribute this software
7 // is granted provided this copyright notice appears in all copies.
8 // This software is provided "as is" without express or implied
9 // warranty, and with no claim as to its suitability for any purpose.
10 //
11 //----------------------------------------------------------------------------
12 // Contact: mcseem@antigrain.com
13 // mcseemagg@yahoo.com
14 // http://www.antigrain.com
15 //----------------------------------------------------------------------------
16 #ifndef AGG_ARRAY_INCLUDED
17 #define AGG_ARRAY_INCLUDED
18
19 #include "agg_basics.h"
20 #include "core/include/fxcrt/fx_memory.h" // For FXSYS_* macros.
21
22 namespace agg
23 {
24 template <class T>
25 class pod_array {
26 public:
27 typedef T value_type;
~pod_array()28 ~pod_array()
29 {
30 FX_Free(m_array);
31 }
pod_array()32 pod_array() : m_size(0), m_capacity(0), m_array(0) {}
33 pod_array(unsigned cap, unsigned extra_tail = 0);
34 pod_array(const pod_array<T>&);
35 const pod_array<T>& operator = (const pod_array<T>&);
36 void capacity(unsigned cap, unsigned extra_tail = 0);
capacity()37 unsigned capacity() const
38 {
39 return m_capacity;
40 }
41 void allocate(unsigned size, unsigned extra_tail = 0);
42 void resize(unsigned new_size);
zero()43 void zero()
44 {
45 FXSYS_memset(m_array, 0, sizeof(T) * m_size);
46 }
add(const T & v)47 void add(const T& v)
48 {
49 m_array[m_size++] = v;
50 }
inc_size(unsigned size)51 void inc_size(unsigned size)
52 {
53 m_size += size;
54 }
size()55 unsigned size() const
56 {
57 return m_size;
58 }
byte_size()59 unsigned byte_size() const
60 {
61 return m_size * sizeof(T);
62 }
63 const T& operator [] (unsigned i) const
64 {
65 return m_array[i];
66 }
67 T& operator [] (unsigned i)
68 {
69 return m_array[i];
70 }
at(unsigned i)71 const T& at(unsigned i) const
72 {
73 return m_array[i];
74 }
at(unsigned i)75 T& at(unsigned i)
76 {
77 return m_array[i];
78 }
value_at(unsigned i)79 T value_at(unsigned i) const
80 {
81 return m_array[i];
82 }
data()83 const T* data() const
84 {
85 return m_array;
86 }
data()87 T* data()
88 {
89 return m_array;
90 }
remove_all()91 void remove_all()
92 {
93 m_size = 0;
94 }
cut_at(unsigned num)95 void cut_at(unsigned num)
96 {
97 if(num < m_size) {
98 m_size = num;
99 }
100 }
101 private:
102 unsigned m_size;
103 unsigned m_capacity;
104 T* m_array;
105 };
106 template<class T>
capacity(unsigned cap,unsigned extra_tail)107 void pod_array<T>::capacity(unsigned cap, unsigned extra_tail)
108 {
109 m_size = 0;
110 unsigned full_cap = cap + extra_tail;
111 if(full_cap < cap) {
112 FX_Free(m_array);
113 m_array = 0;
114 m_capacity = 0;
115 } else if(full_cap > m_capacity) {
116 FX_Free(m_array);
117 m_array = FX_Alloc(T, full_cap);
118 m_capacity = full_cap;
119 }
120 }
121 template<class T>
allocate(unsigned size,unsigned extra_tail)122 void pod_array<T>::allocate(unsigned size, unsigned extra_tail)
123 {
124 capacity(size, extra_tail);
125 m_size = size;
126 }
127 template<class T>
resize(unsigned new_size)128 void pod_array<T>::resize(unsigned new_size)
129 {
130 if(new_size > m_size) {
131 if(new_size > m_capacity) {
132 T* data = FX_Alloc(T, new_size);
133 FXSYS_memcpy(data, m_array, m_size * sizeof(T));
134 FX_Free(m_array);
135 m_array = data;
136 }
137 } else {
138 m_size = new_size;
139 }
140 }
pod_array(unsigned cap,unsigned extra_tail)141 template<class T> pod_array<T>::pod_array(unsigned cap, unsigned extra_tail) :
142 m_size(0), m_capacity(cap + extra_tail), m_array(FX_Alloc(T, m_capacity)) {}
pod_array(const pod_array<T> & v)143 template<class T> pod_array<T>::pod_array(const pod_array<T>& v) :
144 m_size(v.m_size),
145 m_capacity(v.m_capacity),
146 m_array(v.m_capacity ? FX_Alloc(T, v.m_capacity) : 0)
147 {
148 FXSYS_memcpy(m_array, v.m_array, sizeof(T) * v.m_size);
149 }
150 template<class T> const pod_array<T>&
151 pod_array<T>::operator = (const pod_array<T>&v)
152 {
153 allocate(v.m_size);
154 if(v.m_size) {
155 FXSYS_memcpy(m_array, v.m_array, sizeof(T) * v.m_size);
156 }
157 return *this;
158 }
159 template<class T, unsigned S = 6> class pod_deque
160 {
161 public:
162 enum block_scale_e {
163 block_shift = S,
164 block_size = 1 << block_shift,
165 block_mask = block_size - 1
166 };
167 typedef T value_type;
168 ~pod_deque();
169 pod_deque();
170 pod_deque(unsigned block_ptr_inc);
171 pod_deque(const pod_deque<T, S>& v);
172 const pod_deque<T, S>& operator = (const pod_deque<T, S>& v);
remove_all()173 void remove_all()
174 {
175 m_size = 0;
176 }
free_all()177 void free_all()
178 {
179 free_tail(0);
180 }
181 void free_tail(unsigned size);
182 void add(const T& val);
183 void modify_last(const T& val);
184 void remove_last();
185 int allocate_continuous_block(unsigned num_elements);
add_array(const T * ptr,unsigned num_elem)186 void add_array(const T* ptr, unsigned num_elem)
187 {
188 while(num_elem--) {
189 add(*ptr++);
190 }
191 }
add_data(DataAccessor & data)192 template<class DataAccessor> void add_data(DataAccessor& data)
193 {
194 while(data.size()) {
195 add(*data);
196 ++data;
197 }
198 }
cut_at(unsigned size)199 void cut_at(unsigned size)
200 {
201 if(size < m_size) {
202 m_size = size;
203 }
204 }
size()205 unsigned size() const
206 {
207 return m_size;
208 }
209 const T& operator [] (unsigned i) const
210 {
211 return m_blocks[i >> block_shift][i & block_mask];
212 }
213 T& operator [] (unsigned i)
214 {
215 return m_blocks[i >> block_shift][i & block_mask];
216 }
at(unsigned i)217 const T& at(unsigned i) const
218 {
219 return m_blocks[i >> block_shift][i & block_mask];
220 }
at(unsigned i)221 T& at(unsigned i)
222 {
223 return m_blocks[i >> block_shift][i & block_mask];
224 }
value_at(unsigned i)225 T value_at(unsigned i) const
226 {
227 return m_blocks[i >> block_shift][i & block_mask];
228 }
curr(unsigned idx)229 const T& curr(unsigned idx) const
230 {
231 return (*this)[idx];
232 }
curr(unsigned idx)233 T& curr(unsigned idx)
234 {
235 return (*this)[idx];
236 }
prev(unsigned idx)237 const T& prev(unsigned idx) const
238 {
239 return (*this)[(idx + m_size - 1) % m_size];
240 }
prev(unsigned idx)241 T& prev(unsigned idx)
242 {
243 return (*this)[(idx + m_size - 1) % m_size];
244 }
next(unsigned idx)245 const T& next(unsigned idx) const
246 {
247 return (*this)[(idx + 1) % m_size];
248 }
next(unsigned idx)249 T& next(unsigned idx)
250 {
251 return (*this)[(idx + 1) % m_size];
252 }
last()253 const T& last() const
254 {
255 return (*this)[m_size - 1];
256 }
last()257 T& last()
258 {
259 return (*this)[m_size - 1];
260 }
261 unsigned byte_size() const;
block(unsigned nb)262 const T* block(unsigned nb) const
263 {
264 return m_blocks[nb];
265 }
266 public:
267 void allocate_block(unsigned nb);
268 T* data_ptr();
269 unsigned m_size;
270 unsigned m_num_blocks;
271 unsigned m_max_blocks;
272 T** m_blocks;
273 unsigned m_block_ptr_inc;
274 };
~pod_deque()275 template<class T, unsigned S> pod_deque<T, S>::~pod_deque()
276 {
277 if(m_num_blocks) {
278 T** blk = m_blocks + m_num_blocks - 1;
279 while(m_num_blocks--) {
280 FX_Free(*blk);
281 --blk;
282 }
283 FX_Free(m_blocks);
284 }
285 }
286 template<class T, unsigned S>
free_tail(unsigned size)287 void pod_deque<T, S>::free_tail(unsigned size)
288 {
289 if(size < m_size) {
290 unsigned nb = (size + block_mask) >> block_shift;
291 while(m_num_blocks > nb) {
292 FX_Free(m_blocks[--m_num_blocks]);
293 }
294 m_size = size;
295 }
296 }
pod_deque()297 template<class T, unsigned S> pod_deque<T, S>::pod_deque() :
298 m_size(0),
299 m_num_blocks(0),
300 m_max_blocks(0),
301 m_blocks(0),
302 m_block_ptr_inc(block_size)
303 {
304 }
305 template<class T, unsigned S>
pod_deque(unsigned block_ptr_inc)306 pod_deque<T, S>::pod_deque(unsigned block_ptr_inc) :
307 m_size(0),
308 m_num_blocks(0),
309 m_max_blocks(0),
310 m_blocks(0),
311 m_block_ptr_inc(block_ptr_inc)
312 {
313 }
314 template<class T, unsigned S>
pod_deque(const pod_deque<T,S> & v)315 pod_deque<T, S>::pod_deque(const pod_deque<T, S>& v) :
316 m_size(v.m_size),
317 m_num_blocks(v.m_num_blocks),
318 m_max_blocks(v.m_max_blocks),
319 m_blocks(v.m_max_blocks ? FX_Alloc(T*, v.m_max_blocks) : 0),
320 m_block_ptr_inc(v.m_block_ptr_inc)
321 {
322 unsigned i;
323 for(i = 0; i < v.m_num_blocks; ++i) {
324 m_blocks[i] = FX_Alloc(T, block_size);
325 FXSYS_memcpy(m_blocks[i], v.m_blocks[i], block_size * sizeof(T));
326 }
327 }
328 template<class T, unsigned S>
329 const pod_deque<T, S>& pod_deque<T, S>::operator = (const pod_deque<T, S>& v)
330 {
331 unsigned i;
332 for(i = m_num_blocks; i < v.m_num_blocks; ++i) {
333 allocate_block(i);
334 }
335 for(i = 0; i < v.m_num_blocks; ++i) {
336 FXSYS_memcpy(m_blocks[i], v.m_blocks[i], block_size * sizeof(T));
337 }
338 m_size = v.m_size;
339 return *this;
340 }
341 template<class T, unsigned S>
allocate_block(unsigned nb)342 void pod_deque<T, S>::allocate_block(unsigned nb)
343 {
344 if(nb >= m_max_blocks) {
345 T** new_blocks = FX_Alloc(T*, m_max_blocks + m_block_ptr_inc);
346 if(m_blocks) {
347 FXSYS_memcpy(new_blocks,
348 m_blocks,
349 m_num_blocks * sizeof(T*));
350 FX_Free(m_blocks);
351 }
352 m_blocks = new_blocks;
353 m_max_blocks += m_block_ptr_inc;
354 }
355 m_blocks[nb] = FX_Alloc(T, block_size);
356 m_num_blocks++;
357 }
358 template<class T, unsigned S>
data_ptr()359 inline T* pod_deque<T, S>::data_ptr()
360 {
361 unsigned nb = m_size >> block_shift;
362 if(nb >= m_num_blocks) {
363 allocate_block(nb);
364 }
365 return m_blocks[nb] + (m_size & block_mask);
366 }
367 template<class T, unsigned S>
add(const T & val)368 inline void pod_deque<T, S>::add(const T& val)
369 {
370 *data_ptr() = val;
371 ++m_size;
372 }
373 template<class T, unsigned S>
remove_last()374 inline void pod_deque<T, S>::remove_last()
375 {
376 if(m_size) {
377 --m_size;
378 }
379 }
380 template<class T, unsigned S>
modify_last(const T & val)381 void pod_deque<T, S>::modify_last(const T& val)
382 {
383 remove_last();
384 add(val);
385 }
386 template<class T, unsigned S>
allocate_continuous_block(unsigned num_elements)387 int pod_deque<T, S>::allocate_continuous_block(unsigned num_elements)
388 {
389 if(num_elements < block_size) {
390 data_ptr();
391 unsigned rest = block_size - (m_size & block_mask);
392 unsigned index;
393 if(num_elements <= rest) {
394 index = m_size;
395 m_size += num_elements;
396 return index;
397 }
398 m_size += rest;
399 data_ptr();
400 index = m_size;
401 m_size += num_elements;
402 return index;
403 }
404 return -1;
405 }
406 template<class T, unsigned S>
byte_size()407 unsigned pod_deque<T, S>::byte_size() const
408 {
409 return m_size * sizeof(T);
410 }
411 class pod_allocator
412 {
413 public:
remove_all()414 void remove_all()
415 {
416 if(m_num_blocks) {
417 int8u** blk = m_blocks + m_num_blocks - 1;
418 while(m_num_blocks--) {
419 FX_Free(*blk);
420 --blk;
421 }
422 FX_Free(m_blocks);
423 }
424 m_num_blocks = 0;
425 m_max_blocks = 0;
426 m_blocks = 0;
427 m_buf_ptr = 0;
428 m_rest = 0;
429 }
~pod_allocator()430 ~pod_allocator()
431 {
432 remove_all();
433 }
434 pod_allocator(unsigned block_size, unsigned block_ptr_inc = 256 - 8) :
m_block_size(block_size)435 m_block_size(block_size),
436 m_block_ptr_inc(block_ptr_inc),
437 m_num_blocks(0),
438 m_max_blocks(0),
439 m_blocks(0),
440 m_buf_ptr(0),
441 m_rest(0)
442 {
443 }
444 int8u* allocate(unsigned size, unsigned alignment = 1)
445 {
446 if(size == 0) {
447 return 0;
448 }
449 if(size <= m_rest) {
450 int8u* ptr = m_buf_ptr;
451 if(alignment > 1) {
452 unsigned align = (alignment - unsigned((size_t)ptr) % alignment) % alignment;
453 size += align;
454 ptr += align;
455 if(size <= m_rest) {
456 m_rest -= size;
457 m_buf_ptr += size;
458 return ptr;
459 }
460 allocate_block(size);
461 return allocate(size - align, alignment);
462 }
463 m_rest -= size;
464 m_buf_ptr += size;
465 return ptr;
466 }
467 allocate_block(size + alignment - 1);
468 return allocate(size, alignment);
469 }
470 private:
allocate_block(unsigned size)471 void allocate_block(unsigned size)
472 {
473 if(size < m_block_size) {
474 size = m_block_size;
475 }
476 if(m_num_blocks >= m_max_blocks) {
477 int8u** new_blocks = FX_Alloc(int8u*, m_max_blocks + m_block_ptr_inc);
478 if(m_blocks) {
479 FXSYS_memcpy(new_blocks,
480 m_blocks,
481 m_num_blocks * sizeof(int8u*));
482 FX_Free(m_blocks);
483 }
484 m_blocks = new_blocks;
485 m_max_blocks += m_block_ptr_inc;
486 }
487 m_blocks[m_num_blocks] = m_buf_ptr = FX_Alloc(int8u, size);
488 m_num_blocks++;
489 m_rest = size;
490 }
491 unsigned m_block_size;
492 unsigned m_block_ptr_inc;
493 unsigned m_num_blocks;
494 unsigned m_max_blocks;
495 int8u** m_blocks;
496 int8u* m_buf_ptr;
497 unsigned m_rest;
498 };
499 enum quick_sort_threshold_e {
500 quick_sort_threshold = 9
501 };
swap_elements(T & a,T & b)502 template<class T> inline void swap_elements(T& a, T& b)
503 {
504 T temp = a;
505 a = b;
506 b = temp;
507 }
508 }
509 #endif
510