1 /*
2  * Copyright 2006 The Android Open Source Project
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef SkTypes_DEFINED
9 #define SkTypes_DEFINED
10 
11 #include "SkPreConfig.h"
12 #include "SkUserConfig.h"
13 #include "SkPostConfig.h"
14 #include <stdint.h>
15 #include <sys/types.h>
16 
17 #if defined(SK_ARM_HAS_NEON)
18     #include <arm_neon.h>
19 #elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
20     #include <immintrin.h>
21 #endif
22 
23 /** \file SkTypes.h
24 */
25 
26 /** See SkGraphics::GetVersion() to retrieve these at runtime
27  */
28 #define SKIA_VERSION_MAJOR  1
29 #define SKIA_VERSION_MINOR  0
30 #define SKIA_VERSION_PATCH  0
31 
32 /*
33     memory wrappers to be implemented by the porting layer (platform)
34 */
35 
36 /** Called internally if we run out of memory. The platform implementation must
37     not return, but should either throw an exception or otherwise exit.
38 */
39 SK_API extern void sk_out_of_memory(void);
40 /** Called internally if we hit an unrecoverable error.
41     The platform implementation must not return, but should either throw
42     an exception or otherwise exit.
43 */
44 SK_API extern void sk_throw(void);
45 
46 enum {
47     SK_MALLOC_TEMP  = 0x01, //!< hint to sk_malloc that the requested memory will be freed in the scope of the stack frame
48     SK_MALLOC_THROW = 0x02  //!< instructs sk_malloc to call sk_throw if the memory cannot be allocated.
49 };
50 /** Return a block of memory (at least 4-byte aligned) of at least the
51     specified size. If the requested memory cannot be returned, either
52     return null (if SK_MALLOC_TEMP bit is clear) or throw an exception
53     (if SK_MALLOC_TEMP bit is set). To free the memory, call sk_free().
54 */
55 SK_API extern void* sk_malloc_flags(size_t size, unsigned flags);
56 /** Same as sk_malloc(), but hard coded to pass SK_MALLOC_THROW as the flag
57 */
58 SK_API extern void* sk_malloc_throw(size_t size);
59 /** Same as standard realloc(), but this one never returns null on failure. It will throw
60     an exception if it fails.
61 */
62 SK_API extern void* sk_realloc_throw(void* buffer, size_t size);
63 /** Free memory returned by sk_malloc(). It is safe to pass null.
64 */
65 SK_API extern void sk_free(void*);
66 
67 /** Much like calloc: returns a pointer to at least size zero bytes, or NULL on failure.
68  */
69 SK_API extern void* sk_calloc(size_t size);
70 
71 /** Same as sk_calloc, but throws an exception instead of returning NULL on failure.
72  */
73 SK_API extern void* sk_calloc_throw(size_t size);
74 
75 // bzero is safer than memset, but we can't rely on it, so... sk_bzero()
sk_bzero(void * buffer,size_t size)76 static inline void sk_bzero(void* buffer, size_t size) {
77     memset(buffer, 0, size);
78 }
79 
80 ///////////////////////////////////////////////////////////////////////////////
81 
82 #ifdef override_GLOBAL_NEW
83 #include <new>
84 
new(size_t size)85 inline void* operator new(size_t size) {
86     return sk_malloc_throw(size);
87 }
88 
delete(void * p)89 inline void operator delete(void* p) {
90     sk_free(p);
91 }
92 #endif
93 
94 ///////////////////////////////////////////////////////////////////////////////
95 
96 #define SK_INIT_TO_AVOID_WARNING    = 0
97 
98 #ifndef SkDebugf
99     SK_API void SkDebugf(const char format[], ...);
100 #endif
101 
102 #ifdef SK_DEBUG
103     #define SkASSERT(cond)              SK_ALWAYSBREAK(cond)
104     #define SkDEBUGFAIL(message)        SkASSERT(false && message)
105     #define SkDEBUGCODE(code)           code
106     #define SkDECLAREPARAM(type, var)   , type var
107     #define SkPARAM(var)                , var
108 //  #define SkDEBUGF(args       )       SkDebugf##args
109     #define SkDEBUGF(args       )       SkDebugf args
110     #define SkAssertResult(cond)        SkASSERT(cond)
111 #else
112     #define SkASSERT(cond)
113     #define SkDEBUGFAIL(message)
114     #define SkDEBUGCODE(code)
115     #define SkDEBUGF(args)
116     #define SkDECLAREPARAM(type, var)
117     #define SkPARAM(var)
118 
119     // unlike SkASSERT, this guy executes its condition in the non-debug build
120     #define SkAssertResult(cond)        cond
121 #endif
122 
123 #define SkFAIL(message)                 SK_ALWAYSBREAK(false && message)
124 
125 // We want to evaluate cond only once, and inside the SkASSERT somewhere so we see its string form.
126 // So we use the comma operator to make an SkDebugf that always returns false: we'll evaluate cond,
127 // and if it's true the assert passes; if it's false, we'll print the message and the assert fails.
128 #define SkASSERTF(cond, fmt, ...)       SkASSERT((cond) || (SkDebugf(fmt"\n", __VA_ARGS__), false))
129 
130 #ifdef SK_DEVELOPER
131     #define SkDEVCODE(code)             code
132 #else
133     #define SkDEVCODE(code)
134 #endif
135 
136 #ifdef SK_IGNORE_TO_STRING
137     #define SK_TO_STRING_NONVIRT()
138     #define SK_TO_STRING_VIRT()
139     #define SK_TO_STRING_PUREVIRT()
140     #define SK_TO_STRING_OVERRIDE()
141 #else
142     // the 'toString' helper functions convert Sk* objects to human-readable
143     // form in developer mode
144     #define SK_TO_STRING_NONVIRT() void toString(SkString* str) const;
145     #define SK_TO_STRING_VIRT() virtual void toString(SkString* str) const;
146     #define SK_TO_STRING_PUREVIRT() virtual void toString(SkString* str) const = 0;
147     #define SK_TO_STRING_OVERRIDE() void toString(SkString* str) const override;
148 #endif
149 
150 template <bool>
151 struct SkCompileAssert {
152 };
153 
154 // Uses static_cast<bool>(expr) instead of bool(expr) due to
155 // https://connect.microsoft.com/VisualStudio/feedback/details/832915
156 
157 // The extra parentheses in SkCompileAssert<(...)> are a work around for
158 // http://gcc.gnu.org/bugzilla/show_bug.cgi?id=57771
159 // which was fixed in gcc 4.8.2.
160 #define SK_COMPILE_ASSERT(expr, msg) \
161     typedef SkCompileAssert<(static_cast<bool>(expr))> \
162             msg[static_cast<bool>(expr) ? 1 : -1] SK_UNUSED
163 
164 /*
165  *  Usage:  SK_MACRO_CONCAT(a, b)   to construct the symbol ab
166  *
167  *  SK_MACRO_CONCAT_IMPL_PRIV just exists to make this work. Do not use directly
168  *
169  */
170 #define SK_MACRO_CONCAT(X, Y)           SK_MACRO_CONCAT_IMPL_PRIV(X, Y)
171 #define SK_MACRO_CONCAT_IMPL_PRIV(X, Y)  X ## Y
172 
173 /*
174  *  Usage: SK_MACRO_APPEND_LINE(foo)    to make foo123, where 123 is the current
175  *                                      line number. Easy way to construct
176  *                                      unique names for local functions or
177  *                                      variables.
178  */
179 #define SK_MACRO_APPEND_LINE(name)  SK_MACRO_CONCAT(name, __LINE__)
180 
181 /**
182  * For some classes, it's almost always an error to instantiate one without a name, e.g.
183  *   {
184  *       SkAutoMutexAcquire(&mutex);
185  *       <some code>
186  *   }
187  * In this case, the writer meant to hold mutex while the rest of the code in the block runs,
188  * but instead the mutex is acquired and then immediately released.  The correct usage is
189  *   {
190  *       SkAutoMutexAcquire lock(&mutex);
191  *       <some code>
192  *   }
193  *
194  * To prevent callers from instantiating your class without a name, use SK_REQUIRE_LOCAL_VAR
195  * like this:
196  *   class classname {
197  *       <your class>
198  *   };
199  *   #define classname(...) SK_REQUIRE_LOCAL_VAR(classname)
200  *
201  * This won't work with templates, and you must inline the class' constructors and destructors.
202  * Take a look at SkAutoFree and SkAutoMalloc in this file for examples.
203  */
204 #define SK_REQUIRE_LOCAL_VAR(classname) \
205     SK_COMPILE_ASSERT(false, missing_name_for_##classname)
206 
207 ///////////////////////////////////////////////////////////////////////
208 
209 /**
210  *  Fast type for signed 8 bits. Use for parameter passing and local variables,
211  *  not for storage.
212  */
213 typedef int S8CPU;
214 
215 /**
216  *  Fast type for unsigned 8 bits. Use for parameter passing and local
217  *  variables, not for storage
218  */
219 typedef unsigned U8CPU;
220 
221 /**
222  *  Fast type for signed 16 bits. Use for parameter passing and local variables,
223  *  not for storage
224  */
225 typedef int S16CPU;
226 
227 /**
228  *  Fast type for unsigned 16 bits. Use for parameter passing and local
229  *  variables, not for storage
230  */
231 typedef unsigned U16CPU;
232 
233 /**
234  *  Meant to be faster than bool (doesn't promise to be 0 or 1,
235  *  just 0 or non-zero
236  */
237 typedef int SkBool;
238 
239 /**
240  *  Meant to be a small version of bool, for storage purposes. Will be 0 or 1
241  */
242 typedef uint8_t SkBool8;
243 
244 #ifdef SK_DEBUG
245     SK_API int8_t      SkToS8(intmax_t);
246     SK_API uint8_t     SkToU8(uintmax_t);
247     SK_API int16_t     SkToS16(intmax_t);
248     SK_API uint16_t    SkToU16(uintmax_t);
249     SK_API int32_t     SkToS32(intmax_t);
250     SK_API uint32_t    SkToU32(uintmax_t);
251     SK_API int         SkToInt(intmax_t);
252     SK_API unsigned    SkToUInt(uintmax_t);
253     SK_API size_t      SkToSizeT(uintmax_t);
254     SK_API off_t       SkToOffT(intmax_t x);
255 #else
256     #define SkToS8(x)   ((int8_t)(x))
257     #define SkToU8(x)   ((uint8_t)(x))
258     #define SkToS16(x)  ((int16_t)(x))
259     #define SkToU16(x)  ((uint16_t)(x))
260     #define SkToS32(x)  ((int32_t)(x))
261     #define SkToU32(x)  ((uint32_t)(x))
262     #define SkToInt(x)  ((int)(x))
263     #define SkToUInt(x) ((unsigned)(x))
264     #define SkToSizeT(x) ((size_t)(x))
265     #define SkToOffT(x) ((off_t)(x))
266 #endif
267 
268 /** Returns 0 or 1 based on the condition
269 */
270 #define SkToBool(cond)  ((cond) != 0)
271 
272 #define SK_MaxS16   32767
273 #define SK_MinS16   -32767
274 #define SK_MaxU16   0xFFFF
275 #define SK_MinU16   0
276 #define SK_MaxS32   0x7FFFFFFF
277 #define SK_MinS32   -SK_MaxS32
278 #define SK_MaxU32   0xFFFFFFFF
279 #define SK_MinU32   0
280 #define SK_NaN32    (1 << 31)
281 
282 /** Returns true if the value can be represented with signed 16bits
283  */
SkIsS16(long x)284 static inline bool SkIsS16(long x) {
285     return (int16_t)x == x;
286 }
287 
288 /** Returns true if the value can be represented with unsigned 16bits
289  */
SkIsU16(long x)290 static inline bool SkIsU16(long x) {
291     return (uint16_t)x == x;
292 }
293 
294 //////////////////////////////////////////////////////////////////////////////
295 #ifndef SK_OFFSETOF
296     #define SK_OFFSETOF(type, field)    (size_t)((char*)&(((type*)1)->field) - (char*)1)
297 #endif
298 
299 /** Returns the number of entries in an array (not a pointer) */
300 template <typename T, size_t N> char (&SkArrayCountHelper(T (&array)[N]))[N];
301 #define SK_ARRAY_COUNT(array) (sizeof(SkArrayCountHelper(array)))
302 
303 #define SkAlign2(x)     (((x) + 1) >> 1 << 1)
304 #define SkIsAlign2(x)   (0 == ((x) & 1))
305 
306 #define SkAlign4(x)     (((x) + 3) >> 2 << 2)
307 #define SkIsAlign4(x)   (0 == ((x) & 3))
308 
309 #define SkAlign8(x)     (((x) + 7) >> 3 << 3)
310 #define SkIsAlign8(x)   (0 == ((x) & 7))
311 
312 #define SkAlignPtr(x)   (sizeof(void*) == 8 ?   SkAlign8(x) :   SkAlign4(x))
313 #define SkIsAlignPtr(x) (sizeof(void*) == 8 ? SkIsAlign8(x) : SkIsAlign4(x))
314 
315 typedef uint32_t SkFourByteTag;
316 #define SkSetFourByteTag(a, b, c, d)    (((a) << 24) | ((b) << 16) | ((c) << 8) | (d))
317 
318 /** 32 bit integer to hold a unicode value
319 */
320 typedef int32_t SkUnichar;
321 /** 32 bit value to hold a millisecond count
322 */
323 typedef uint32_t SkMSec;
324 /** 1 second measured in milliseconds
325 */
326 #define SK_MSec1 1000
327 /** maximum representable milliseconds
328 */
329 #define SK_MSecMax 0x7FFFFFFF
330 /** Returns a < b for milliseconds, correctly handling wrap-around from 0xFFFFFFFF to 0
331 */
332 #define SkMSec_LT(a, b)     ((int32_t)(a) - (int32_t)(b) < 0)
333 /** Returns a <= b for milliseconds, correctly handling wrap-around from 0xFFFFFFFF to 0
334 */
335 #define SkMSec_LE(a, b)     ((int32_t)(a) - (int32_t)(b) <= 0)
336 
337 /** The generation IDs in Skia reserve 0 has an invalid marker.
338  */
339 #define SK_InvalidGenID     0
340 /** The unique IDs in Skia reserve 0 has an invalid marker.
341  */
342 #define SK_InvalidUniqueID  0
343 
344 /****************************************************************************
345     The rest of these only build with C++
346 */
347 #ifdef __cplusplus
348 
349 /** Faster than SkToBool for integral conditions. Returns 0 or 1
350 */
Sk32ToBool(uint32_t n)351 static inline int Sk32ToBool(uint32_t n) {
352     return (n | (0-n)) >> 31;
353 }
354 
355 /** Generic swap function. Classes with efficient swaps should specialize this function to take
356     their fast path. This function is used by SkTSort. */
SkTSwap(T & a,T & b)357 template <typename T> inline void SkTSwap(T& a, T& b) {
358     T c(a);
359     a = b;
360     b = c;
361 }
362 
SkAbs32(int32_t value)363 static inline int32_t SkAbs32(int32_t value) {
364     SkASSERT(value != SK_NaN32);  // The most negative int32_t can't be negated.
365     if (value < 0) {
366         value = -value;
367     }
368     return value;
369 }
370 
SkTAbs(T value)371 template <typename T> inline T SkTAbs(T value) {
372     if (value < 0) {
373         value = -value;
374     }
375     return value;
376 }
377 
SkMax32(int32_t a,int32_t b)378 static inline int32_t SkMax32(int32_t a, int32_t b) {
379     if (a < b)
380         a = b;
381     return a;
382 }
383 
SkMin32(int32_t a,int32_t b)384 static inline int32_t SkMin32(int32_t a, int32_t b) {
385     if (a > b)
386         a = b;
387     return a;
388 }
389 
SkTMin(const T & a,const T & b)390 template <typename T> const T& SkTMin(const T& a, const T& b) {
391     return (a < b) ? a : b;
392 }
393 
SkTMax(const T & a,const T & b)394 template <typename T> const T& SkTMax(const T& a, const T& b) {
395     return (b < a) ? a : b;
396 }
397 
SkSign32(int32_t a)398 static inline int32_t SkSign32(int32_t a) {
399     return (a >> 31) | ((unsigned) -a >> 31);
400 }
401 
SkFastMin32(int32_t value,int32_t max)402 static inline int32_t SkFastMin32(int32_t value, int32_t max) {
403     if (value > max) {
404         value = max;
405     }
406     return value;
407 }
408 
SkTPin(const T & x,const T & min,const T & max)409 template <typename T> static inline const T& SkTPin(const T& x, const T& min, const T& max) {
410     return SkTMax(SkTMin(x, max), min);
411 }
412 
413 /** Returns signed 32 bit value pinned between min and max, inclusively. */
SkPin32(int32_t value,int32_t min,int32_t max)414 static inline int32_t SkPin32(int32_t value, int32_t min, int32_t max) {
415     return SkTPin(value, min, max);
416 }
417 
SkSetClearShift(uint32_t bits,bool cond,unsigned shift)418 static inline uint32_t SkSetClearShift(uint32_t bits, bool cond,
419                                        unsigned shift) {
420     SkASSERT((int)cond == 0 || (int)cond == 1);
421     return (bits & ~(1 << shift)) | ((int)cond << shift);
422 }
423 
SkSetClearMask(uint32_t bits,bool cond,uint32_t mask)424 static inline uint32_t SkSetClearMask(uint32_t bits, bool cond,
425                                       uint32_t mask) {
426     return cond ? bits | mask : bits & ~mask;
427 }
428 
429 ///////////////////////////////////////////////////////////////////////////////
430 
431 /** Use to combine multiple bits in a bitmask in a type safe way.
432  */
433 template <typename T>
SkTBitOr(T a,T b)434 T SkTBitOr(T a, T b) {
435     return (T)(a | b);
436 }
437 
438 /**
439  *  Use to cast a pointer to a different type, and maintaining strict-aliasing
440  */
SkTCast(const void * ptr)441 template <typename Dst> Dst SkTCast(const void* ptr) {
442     union {
443         const void* src;
444         Dst dst;
445     } data;
446     data.src = ptr;
447     return data.dst;
448 }
449 
450 //////////////////////////////////////////////////////////////////////////////
451 
452 /** \class SkNoncopyable
453 
454 SkNoncopyable is the base class for objects that do not want to
455 be copied. It hides its copy-constructor and its assignment-operator.
456 */
457 class SK_API SkNoncopyable {
458 public:
SkNoncopyable()459     SkNoncopyable() {}
460 
461 private:
462     SkNoncopyable(const SkNoncopyable&);
463     SkNoncopyable& operator=(const SkNoncopyable&);
464 };
465 
466 class SkAutoFree : SkNoncopyable {
467 public:
SkAutoFree()468     SkAutoFree() : fPtr(NULL) {}
SkAutoFree(void * ptr)469     explicit SkAutoFree(void* ptr) : fPtr(ptr) {}
~SkAutoFree()470     ~SkAutoFree() { sk_free(fPtr); }
471 
472     /** Return the currently allocate buffer, or null
473     */
get()474     void* get() const { return fPtr; }
475 
476     /** Assign a new ptr allocated with sk_malloc (or null), and return the
477         previous ptr. Note it is the caller's responsibility to sk_free the
478         returned ptr.
479     */
set(void * ptr)480     void* set(void* ptr) {
481         void* prev = fPtr;
482         fPtr = ptr;
483         return prev;
484     }
485 
486     /** Transfer ownership of the current ptr to the caller, setting the
487         internal reference to null. Note the caller is reponsible for calling
488         sk_free on the returned address.
489     */
detach()490     void* detach() { return this->set(NULL); }
491 
492     /** Free the current buffer, and set the internal reference to NULL. Same
493         as calling sk_free(detach())
494     */
free()495     void free() {
496         sk_free(fPtr);
497         fPtr = NULL;
498     }
499 
500 private:
501     void* fPtr;
502     // illegal
503     SkAutoFree(const SkAutoFree&);
504     SkAutoFree& operator=(const SkAutoFree&);
505 };
506 #define SkAutoFree(...) SK_REQUIRE_LOCAL_VAR(SkAutoFree)
507 
508 /**
509  *  Manage an allocated block of heap memory. This object is the sole manager of
510  *  the lifetime of the block, so the caller must not call sk_free() or delete
511  *  on the block, unless detach() was called.
512  */
513 class SkAutoMalloc : SkNoncopyable {
514 public:
515     explicit SkAutoMalloc(size_t size = 0) {
516         fPtr = size ? sk_malloc_throw(size) : NULL;
517         fSize = size;
518     }
519 
~SkAutoMalloc()520     ~SkAutoMalloc() {
521         sk_free(fPtr);
522     }
523 
524     /**
525      *  Passed to reset to specify what happens if the requested size is smaller
526      *  than the current size (and the current block was dynamically allocated).
527      */
528     enum OnShrink {
529         /**
530          *  If the requested size is smaller than the current size, and the
531          *  current block is dynamically allocated, free the old block and
532          *  malloc a new block of the smaller size.
533          */
534         kAlloc_OnShrink,
535 
536         /**
537          *  If the requested size is smaller than the current size, and the
538          *  current block is dynamically allocated, just return the old
539          *  block.
540          */
541         kReuse_OnShrink
542     };
543 
544     /**
545      *  Reallocates the block to a new size. The ptr may or may not change.
546      */
547     void* reset(size_t size, OnShrink shrink = kAlloc_OnShrink,  bool* didChangeAlloc = NULL) {
548         if (size == fSize || (kReuse_OnShrink == shrink && size < fSize)) {
549             if (didChangeAlloc) {
550                 *didChangeAlloc = false;
551             }
552             return fPtr;
553         }
554 
555         sk_free(fPtr);
556         fPtr = size ? sk_malloc_throw(size) : NULL;
557         fSize = size;
558         if (didChangeAlloc) {
559             *didChangeAlloc = true;
560         }
561 
562         return fPtr;
563     }
564 
565     /**
566      *  Releases the block back to the heap
567      */
free()568     void free() {
569         this->reset(0);
570     }
571 
572     /**
573      *  Return the allocated block.
574      */
get()575     void* get() { return fPtr; }
get()576     const void* get() const { return fPtr; }
577 
578    /** Transfer ownership of the current ptr to the caller, setting the
579        internal reference to null. Note the caller is reponsible for calling
580        sk_free on the returned address.
581     */
detach()582     void* detach() {
583         void* ptr = fPtr;
584         fPtr = NULL;
585         fSize = 0;
586         return ptr;
587     }
588 
589 private:
590     void*   fPtr;
591     size_t  fSize;  // can be larger than the requested size (see kReuse)
592 };
593 #define SkAutoMalloc(...) SK_REQUIRE_LOCAL_VAR(SkAutoMalloc)
594 
595 /**
596  *  Manage an allocated block of memory. If the requested size is <= kSize, then
597  *  the allocation will come from the stack rather than the heap. This object
598  *  is the sole manager of the lifetime of the block, so the caller must not
599  *  call sk_free() or delete on the block.
600  */
601 template <size_t kSize> class SkAutoSMalloc : SkNoncopyable {
602 public:
603     /**
604      *  Creates initially empty storage. get() returns a ptr, but it is to
605      *  a zero-byte allocation. Must call reset(size) to return an allocated
606      *  block.
607      */
SkAutoSMalloc()608     SkAutoSMalloc() {
609         fPtr = fStorage;
610         fSize = kSize;
611     }
612 
613     /**
614      *  Allocate a block of the specified size. If size <= kSize, then the
615      *  allocation will come from the stack, otherwise it will be dynamically
616      *  allocated.
617      */
SkAutoSMalloc(size_t size)618     explicit SkAutoSMalloc(size_t size) {
619         fPtr = fStorage;
620         fSize = kSize;
621         this->reset(size);
622     }
623 
624     /**
625      *  Free the allocated block (if any). If the block was small enought to
626      *  have been allocated on the stack (size <= kSize) then this does nothing.
627      */
~SkAutoSMalloc()628     ~SkAutoSMalloc() {
629         if (fPtr != (void*)fStorage) {
630             sk_free(fPtr);
631         }
632     }
633 
634     /**
635      *  Return the allocated block. May return non-null even if the block is
636      *  of zero size. Since this may be on the stack or dynamically allocated,
637      *  the caller must not call sk_free() on it, but must rely on SkAutoSMalloc
638      *  to manage it.
639      */
get()640     void* get() const { return fPtr; }
641 
642     /**
643      *  Return a new block of the requested size, freeing (as necessary) any
644      *  previously allocated block. As with the constructor, if size <= kSize
645      *  then the return block may be allocated locally, rather than from the
646      *  heap.
647      */
648     void* reset(size_t size,
649                 SkAutoMalloc::OnShrink shrink = SkAutoMalloc::kAlloc_OnShrink,
650                 bool* didChangeAlloc = NULL) {
651         size = (size < kSize) ? kSize : size;
652         bool alloc = size != fSize && (SkAutoMalloc::kAlloc_OnShrink == shrink || size > fSize);
653         if (didChangeAlloc) {
654             *didChangeAlloc = alloc;
655         }
656         if (alloc) {
657             if (fPtr != (void*)fStorage) {
658                 sk_free(fPtr);
659             }
660 
661             if (size == kSize) {
662                 SkASSERT(fPtr != fStorage); // otherwise we lied when setting didChangeAlloc.
663                 fPtr = fStorage;
664             } else {
665                 fPtr = sk_malloc_flags(size, SK_MALLOC_THROW | SK_MALLOC_TEMP);
666             }
667 
668             fSize = size;
669         }
670         SkASSERT(fSize >= size && fSize >= kSize);
671         SkASSERT((fPtr == fStorage) || fSize > kSize);
672         return fPtr;
673     }
674 
675 private:
676     void*       fPtr;
677     size_t      fSize;  // can be larger than the requested size (see kReuse)
678     uint32_t    fStorage[(kSize + 3) >> 2];
679 };
680 // Can't guard the constructor because it's a template class.
681 
682 #endif /* C++ */
683 
684 #endif
685