1
2 //----------------------------------------------------------------------------
3 // Anti-Grain Geometry - Version 2.3
4 // Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
5 //
6 // Permission to copy, use, modify, sell and distribute this software
7 // is granted provided this copyright notice appears in all copies.
8 // This software is provided "as is" without express or implied
9 // warranty, and with no claim as to its suitability for any purpose.
10 //
11 //----------------------------------------------------------------------------
12 // Contact: [email protected]
13 // [email protected]
14 // http://www.antigrain.com
15 //----------------------------------------------------------------------------
16 #ifndef AGG_ARRAY_INCLUDED
17 #define AGG_ARRAY_INCLUDED
18
19 #include <string.h>
20
21 #include "agg_basics.h"
22 #include "core/fxcrt/fx_memory.h" // For FXSYS_* macros.
23
24 namespace pdfium
25 {
26 namespace agg
27 {
28 template <class T>
29 class pod_array {
30 public:
31 typedef T value_type;
~pod_array()32 ~pod_array()
33 {
34 FX_Free(m_array);
35 }
pod_array()36 pod_array() : m_size(0), m_capacity(0), m_array(0) {}
37 pod_array(unsigned cap, unsigned extra_tail = 0);
38 pod_array(const pod_array<T>&);
39 pod_array<T>& operator = (const pod_array<T>&);
40 void capacity(unsigned cap, unsigned extra_tail = 0);
capacity()41 unsigned capacity() const
42 {
43 return m_capacity;
44 }
45 void allocate(unsigned size, unsigned extra_tail = 0);
46 void resize(unsigned new_size);
zero()47 void zero() { memset(m_array, 0, sizeof(T) * m_size); }
add(const T & v)48 void add(const T& v)
49 {
50 m_array[m_size++] = v;
51 }
inc_size(unsigned size)52 void inc_size(unsigned size)
53 {
54 m_size += size;
55 }
size()56 unsigned size() const
57 {
58 return m_size;
59 }
byte_size()60 unsigned byte_size() const
61 {
62 return m_size * sizeof(T);
63 }
64 const T& operator [] (unsigned i) const
65 {
66 return m_array[i];
67 }
68 T& operator [] (unsigned i)
69 {
70 return m_array[i];
71 }
at(unsigned i)72 const T& at(unsigned i) const
73 {
74 return m_array[i];
75 }
at(unsigned i)76 T& at(unsigned i)
77 {
78 return m_array[i];
79 }
value_at(unsigned i)80 T value_at(unsigned i) const
81 {
82 return m_array[i];
83 }
data()84 const T* data() const
85 {
86 return m_array;
87 }
data()88 T* data()
89 {
90 return m_array;
91 }
remove_all()92 void remove_all()
93 {
94 m_size = 0;
95 }
cut_at(unsigned num)96 void cut_at(unsigned num)
97 {
98 if(num < m_size) {
99 m_size = num;
100 }
101 }
102 private:
103 unsigned m_size;
104 unsigned m_capacity;
105 T* m_array;
106 };
107 template<class T>
capacity(unsigned cap,unsigned extra_tail)108 void pod_array<T>::capacity(unsigned cap, unsigned extra_tail)
109 {
110 m_size = 0;
111 unsigned full_cap = cap + extra_tail;
112 if(full_cap < cap) {
113 FX_Free(m_array);
114 m_array = 0;
115 m_capacity = 0;
116 } else if(full_cap > m_capacity) {
117 FX_Free(m_array);
118 m_array = FX_Alloc(T, full_cap);
119 m_capacity = full_cap;
120 }
121 }
122 template<class T>
allocate(unsigned size,unsigned extra_tail)123 void pod_array<T>::allocate(unsigned size, unsigned extra_tail)
124 {
125 capacity(size, extra_tail);
126 m_size = size;
127 }
128 template<class T>
resize(unsigned new_size)129 void pod_array<T>::resize(unsigned new_size)
130 {
131 if(new_size > m_size) {
132 if(new_size > m_capacity) {
133 T* data = FX_AllocUninit(T, new_size);
134 memcpy(data, m_array, m_size * sizeof(T));
135 FX_Free(m_array);
136 m_array = data;
137 }
138 } else {
139 m_size = new_size;
140 }
141 }
pod_array(unsigned cap,unsigned extra_tail)142 template<class T> pod_array<T>::pod_array(unsigned cap, unsigned extra_tail) :
143 m_size(0), m_capacity(cap + extra_tail), m_array(FX_Alloc(T, m_capacity)) {}
pod_array(const pod_array<T> & v)144 template<class T> pod_array<T>::pod_array(const pod_array<T>& v) :
145 m_size(v.m_size),
146 m_capacity(v.m_capacity),
147 m_array(v.m_capacity ? FX_Alloc(T, v.m_capacity) : 0)
148 {
149 memcpy(m_array, v.m_array, sizeof(T) * v.m_size);
150 }
151 template<class T> pod_array<T>&
152 pod_array<T>::operator = (const pod_array<T>&v)
153 {
154 allocate(v.m_size);
155 if(v.m_size) {
156 memcpy(m_array, v.m_array, sizeof(T) * v.m_size);
157 }
158 return *this;
159 }
160 template<class T, unsigned S = 6> class pod_deque
161 {
162 public:
163 enum block_scale_e {
164 block_shift = S,
165 block_size = 1 << block_shift,
166 block_mask = block_size - 1
167 };
168 typedef T value_type;
169 ~pod_deque();
170 pod_deque();
171 pod_deque(unsigned block_ptr_inc);
172 pod_deque(const pod_deque<T, S>& v);
173 pod_deque<T, S>& operator = (const pod_deque<T, S>& v);
remove_all()174 void remove_all()
175 {
176 m_size = 0;
177 }
free_all()178 void free_all()
179 {
180 free_tail(0);
181 }
182 void free_tail(unsigned size);
183 void add(const T& val);
184 void modify_last(const T& val);
185 void remove_last();
186 int allocate_continuous_block(unsigned num_elements);
add_array(const T * ptr,unsigned num_elem)187 void add_array(const T* ptr, unsigned num_elem)
188 {
189 while(num_elem--) {
190 add(*ptr++);
191 }
192 }
add_data(DataAccessor & data)193 template<class DataAccessor> void add_data(DataAccessor& data)
194 {
195 while(data.size()) {
196 add(*data);
197 ++data;
198 }
199 }
cut_at(unsigned size)200 void cut_at(unsigned size)
201 {
202 if(size < m_size) {
203 m_size = size;
204 }
205 }
size()206 unsigned size() const
207 {
208 return m_size;
209 }
210 const T& operator [] (unsigned i) const
211 {
212 return m_blocks[i >> block_shift][i & block_mask];
213 }
214 T& operator [] (unsigned i)
215 {
216 return m_blocks[i >> block_shift][i & block_mask];
217 }
at(unsigned i)218 const T& at(unsigned i) const
219 {
220 return m_blocks[i >> block_shift][i & block_mask];
221 }
at(unsigned i)222 T& at(unsigned i)
223 {
224 return m_blocks[i >> block_shift][i & block_mask];
225 }
value_at(unsigned i)226 T value_at(unsigned i) const
227 {
228 return m_blocks[i >> block_shift][i & block_mask];
229 }
curr(unsigned idx)230 const T& curr(unsigned idx) const
231 {
232 return (*this)[idx];
233 }
curr(unsigned idx)234 T& curr(unsigned idx)
235 {
236 return (*this)[idx];
237 }
prev(unsigned idx)238 const T& prev(unsigned idx) const
239 {
240 return (*this)[(idx + m_size - 1) % m_size];
241 }
prev(unsigned idx)242 T& prev(unsigned idx)
243 {
244 return (*this)[(idx + m_size - 1) % m_size];
245 }
next(unsigned idx)246 const T& next(unsigned idx) const
247 {
248 return (*this)[(idx + 1) % m_size];
249 }
next(unsigned idx)250 T& next(unsigned idx)
251 {
252 return (*this)[(idx + 1) % m_size];
253 }
last()254 const T& last() const
255 {
256 return (*this)[m_size - 1];
257 }
last()258 T& last()
259 {
260 return (*this)[m_size - 1];
261 }
262 unsigned byte_size() const;
block(unsigned nb)263 const T* block(unsigned nb) const
264 {
265 return m_blocks[nb];
266 }
267 public:
268 void allocate_block(unsigned nb);
269 T* data_ptr();
270 unsigned m_size;
271 unsigned m_num_blocks;
272 unsigned m_max_blocks;
273 T** m_blocks;
274 unsigned m_block_ptr_inc;
275 };
~pod_deque()276 template<class T, unsigned S> pod_deque<T, S>::~pod_deque()
277 {
278 if(m_num_blocks) {
279 T** blk = m_blocks + m_num_blocks - 1;
280 while(m_num_blocks--) {
281 FX_Free(*blk);
282 --blk;
283 }
284 FX_Free(m_blocks);
285 }
286 }
287 template<class T, unsigned S>
free_tail(unsigned size)288 void pod_deque<T, S>::free_tail(unsigned size)
289 {
290 if(size < m_size) {
291 unsigned nb = (size + block_mask) >> block_shift;
292 while(m_num_blocks > nb) {
293 FX_Free(m_blocks[--m_num_blocks]);
294 }
295 m_size = size;
296 }
297 }
pod_deque()298 template<class T, unsigned S> pod_deque<T, S>::pod_deque() :
299 m_size(0),
300 m_num_blocks(0),
301 m_max_blocks(0),
302 m_blocks(0),
303 m_block_ptr_inc(block_size)
304 {
305 }
306 template<class T, unsigned S>
pod_deque(unsigned block_ptr_inc)307 pod_deque<T, S>::pod_deque(unsigned block_ptr_inc) :
308 m_size(0),
309 m_num_blocks(0),
310 m_max_blocks(0),
311 m_blocks(0),
312 m_block_ptr_inc(block_ptr_inc)
313 {
314 }
315 template<class T, unsigned S>
pod_deque(const pod_deque<T,S> & v)316 pod_deque<T, S>::pod_deque(const pod_deque<T, S>& v) :
317 m_size(v.m_size),
318 m_num_blocks(v.m_num_blocks),
319 m_max_blocks(v.m_max_blocks),
320 m_blocks(v.m_max_blocks ? FX_Alloc(T*, v.m_max_blocks) : 0),
321 m_block_ptr_inc(v.m_block_ptr_inc)
322 {
323 unsigned i;
324 for(i = 0; i < v.m_num_blocks; ++i) {
325 m_blocks[i] = FX_AllocUninit(T, block_size);
326 memcpy(m_blocks[i], v.m_blocks[i], block_size * sizeof(T));
327 }
328 }
329 template<class T, unsigned S>
330 pod_deque<T, S>& pod_deque<T, S>::operator = (const pod_deque<T, S>& v)
331 {
332 unsigned i;
333 for(i = m_num_blocks; i < v.m_num_blocks; ++i) {
334 allocate_block(i);
335 }
336 for(i = 0; i < v.m_num_blocks; ++i) {
337 memcpy(m_blocks[i], v.m_blocks[i], block_size * sizeof(T));
338 }
339 m_size = v.m_size;
340 return *this;
341 }
342 template<class T, unsigned S>
allocate_block(unsigned nb)343 void pod_deque<T, S>::allocate_block(unsigned nb)
344 {
345 if(nb >= m_max_blocks) {
346 T** new_blocks = FX_Alloc(T*, m_max_blocks + m_block_ptr_inc);
347 if(m_blocks) {
348 memcpy(new_blocks, m_blocks, m_num_blocks * sizeof(T*));
349 FX_Free(m_blocks);
350 }
351 m_blocks = new_blocks;
352 m_max_blocks += m_block_ptr_inc;
353 }
354 m_blocks[nb] = FX_Alloc(T, block_size);
355 m_num_blocks++;
356 }
357 template<class T, unsigned S>
data_ptr()358 inline T* pod_deque<T, S>::data_ptr()
359 {
360 unsigned nb = m_size >> block_shift;
361 if(nb >= m_num_blocks) {
362 allocate_block(nb);
363 }
364 return m_blocks[nb] + (m_size & block_mask);
365 }
366 template<class T, unsigned S>
add(const T & val)367 inline void pod_deque<T, S>::add(const T& val)
368 {
369 *data_ptr() = val;
370 ++m_size;
371 }
372 template<class T, unsigned S>
remove_last()373 inline void pod_deque<T, S>::remove_last()
374 {
375 if(m_size) {
376 --m_size;
377 }
378 }
379 template<class T, unsigned S>
modify_last(const T & val)380 void pod_deque<T, S>::modify_last(const T& val)
381 {
382 remove_last();
383 add(val);
384 }
385 template<class T, unsigned S>
allocate_continuous_block(unsigned num_elements)386 int pod_deque<T, S>::allocate_continuous_block(unsigned num_elements)
387 {
388 if(num_elements < block_size) {
389 data_ptr();
390 unsigned rest = block_size - (m_size & block_mask);
391 unsigned index;
392 if(num_elements <= rest) {
393 index = m_size;
394 m_size += num_elements;
395 return index;
396 }
397 m_size += rest;
398 data_ptr();
399 index = m_size;
400 m_size += num_elements;
401 return index;
402 }
403 return -1;
404 }
405 template<class T, unsigned S>
byte_size()406 unsigned pod_deque<T, S>::byte_size() const
407 {
408 return m_size * sizeof(T);
409 }
410 class pod_allocator
411 {
412 public:
remove_all()413 void remove_all()
414 {
415 if(m_num_blocks) {
416 int8u** blk = m_blocks + m_num_blocks - 1;
417 while(m_num_blocks--) {
418 FX_Free(*blk);
419 --blk;
420 }
421 FX_Free(m_blocks);
422 }
423 m_num_blocks = 0;
424 m_max_blocks = 0;
425 m_blocks = 0;
426 m_buf_ptr = 0;
427 m_rest = 0;
428 }
~pod_allocator()429 ~pod_allocator()
430 {
431 remove_all();
432 }
433 pod_allocator(unsigned block_size, unsigned block_ptr_inc = 256 - 8) :
m_block_size(block_size)434 m_block_size(block_size),
435 m_block_ptr_inc(block_ptr_inc),
436 m_num_blocks(0),
437 m_max_blocks(0),
438 m_blocks(0),
439 m_buf_ptr(0),
440 m_rest(0)
441 {
442 }
443 int8u* allocate(unsigned size, unsigned alignment = 1)
444 {
445 if(size == 0) {
446 return 0;
447 }
448 if(size <= m_rest) {
449 int8u* ptr = m_buf_ptr;
450 if(alignment > 1) {
451 unsigned align = (alignment - unsigned((size_t)ptr) % alignment) % alignment;
452 size += align;
453 ptr += align;
454 if(size <= m_rest) {
455 m_rest -= size;
456 m_buf_ptr += size;
457 return ptr;
458 }
459 allocate_block(size);
460 return allocate(size - align, alignment);
461 }
462 m_rest -= size;
463 m_buf_ptr += size;
464 return ptr;
465 }
466 allocate_block(size + alignment - 1);
467 return allocate(size, alignment);
468 }
469 private:
allocate_block(unsigned size)470 void allocate_block(unsigned size)
471 {
472 if(size < m_block_size) {
473 size = m_block_size;
474 }
475 if(m_num_blocks >= m_max_blocks) {
476 int8u** new_blocks = FX_Alloc(int8u*, m_max_blocks + m_block_ptr_inc);
477 if(m_blocks) {
478 memcpy(new_blocks, m_blocks, m_num_blocks * sizeof(int8u*));
479 FX_Free(m_blocks);
480 }
481 m_blocks = new_blocks;
482 m_max_blocks += m_block_ptr_inc;
483 }
484 m_blocks[m_num_blocks] = m_buf_ptr = FX_Alloc(int8u, size);
485 m_num_blocks++;
486 m_rest = size;
487 }
488 unsigned m_block_size;
489 unsigned m_block_ptr_inc;
490 unsigned m_num_blocks;
491 unsigned m_max_blocks;
492 int8u** m_blocks;
493 int8u* m_buf_ptr;
494 unsigned m_rest;
495 };
496 enum quick_sort_threshold_e {
497 quick_sort_threshold = 9
498 };
swap_elements(T & a,T & b)499 template<class T> inline void swap_elements(T& a, T& b)
500 {
501 T temp = a;
502 a = b;
503 b = temp;
504 }
505 }
506 } // namespace pdfium
507 #endif
508