Home | History | Annotate | Download | only in core
      1 
      2 /*
      3  * Copyright 2006 The Android Open Source Project
      4  *
      5  * Use of this source code is governed by a BSD-style license that can be
      6  * found in the LICENSE file.
      7  */
      8 
      9 
     10 #ifndef SkTypes_DEFINED
     11 #define SkTypes_DEFINED
     12 
     13 #include "SkPreConfig.h"
     14 #include "SkUserConfig.h"
     15 #include "SkPostConfig.h"
     16 
     17 #ifndef SK_IGNORE_STDINT_DOT_H
     18     #include <stdint.h>
     19 #endif
     20 
     21 #include <stdio.h>
     22 
     23 /** \file SkTypes.h
     24 */
     25 
     26 /** See SkGraphics::GetVersion() to retrieve these at runtime
     27  */
     28 #define SKIA_VERSION_MAJOR  1
     29 #define SKIA_VERSION_MINOR  0
     30 #define SKIA_VERSION_PATCH  0
     31 
     32 /*
     33     memory wrappers to be implemented by the porting layer (platform)
     34 */
     35 
     36 /** Called internally if we run out of memory. The platform implementation must
     37     not return, but should either throw an exception or otherwise exit.
     38 */
     39 SK_API extern void sk_out_of_memory(void);
     40 /** Called internally if we hit an unrecoverable error.
     41     The platform implementation must not return, but should either throw
     42     an exception or otherwise exit.
     43 */
     44 SK_API extern void sk_throw(void);
     45 
     46 enum {
     47     SK_MALLOC_TEMP  = 0x01, //!< hint to sk_malloc that the requested memory will be freed in the scope of the stack frame
     48     SK_MALLOC_THROW = 0x02  //!< instructs sk_malloc to call sk_throw if the memory cannot be allocated.
     49 };
     50 /** Return a block of memory (at least 4-byte aligned) of at least the
     51     specified size. If the requested memory cannot be returned, either
     52     return null (if SK_MALLOC_TEMP bit is clear) or call sk_throw()
     53     (if SK_MALLOC_TEMP bit is set). To free the memory, call sk_free().
     54 */
     55 SK_API extern void* sk_malloc_flags(size_t size, unsigned flags);
     56 /** Same as sk_malloc(), but hard coded to pass SK_MALLOC_THROW as the flag
     57 */
     58 SK_API extern void* sk_malloc_throw(size_t size);
     59 /** Same as standard realloc(), but this one never returns null on failure. It will throw
     60     an exception if it fails.
     61 */
     62 SK_API extern void* sk_realloc_throw(void* buffer, size_t size);
     63 /** Free memory returned by sk_malloc(). It is safe to pass null.
     64 */
     65 SK_API extern void sk_free(void*);
     66 
     67 // bzero is safer than memset, but we can't rely on it, so... sk_bzero()
     68 static inline void sk_bzero(void* buffer, size_t size) {
     69     memset(buffer, 0, size);
     70 }
     71 
     72 ///////////////////////////////////////////////////////////////////////////////
     73 
     74 #ifdef SK_OVERRIDE_GLOBAL_NEW
     75 #include <new>
     76 
     77 inline void* operator new(size_t size) {
     78     return sk_malloc_throw(size);
     79 }
     80 
     81 inline void operator delete(void* p) {
     82     sk_free(p);
     83 }
     84 #endif
     85 
     86 ///////////////////////////////////////////////////////////////////////////////
     87 
     88 #define SK_INIT_TO_AVOID_WARNING    = 0
     89 
     90 #ifndef SkDebugf
     91     void SkDebugf(const char format[], ...);
     92 #endif
     93 
     94 #ifdef SK_DEBUG
     95     #define SkASSERT(cond)              SK_DEBUGBREAK(cond)
     96     #define SkDEBUGFAIL(message)        SkASSERT(false && message)
     97     #define SkDEBUGCODE(code)           code
     98     #define SkDECLAREPARAM(type, var)   , type var
     99     #define SkPARAM(var)                , var
    100 //  #define SkDEBUGF(args       )       SkDebugf##args
    101     #define SkDEBUGF(args       )       SkDebugf args
    102     #define SkAssertResult(cond)        SkASSERT(cond)
    103 #else
    104     #define SkASSERT(cond)
    105     #define SkDEBUGFAIL(message)
    106     #define SkDEBUGCODE(code)
    107     #define SkDEBUGF(args)
    108     #define SkDECLAREPARAM(type, var)
    109     #define SkPARAM(var)
    110 
    111     // unlike SkASSERT, this guy executes its condition in the non-debug build
    112     #define SkAssertResult(cond)        cond
    113 #endif
    114 
    115 #ifdef SK_DEVELOPER
    116     #define SkDEVCODE(code)             code
    117     // the 'toString' helper functions convert Sk* objects to human-readable
    118     // form in developer mode
    119     #define SK_DEVELOPER_TO_STRING()    virtual void toString(SkString* str) const SK_OVERRIDE;
    120 #else
    121     #define SkDEVCODE(code)
    122     #define SK_DEVELOPER_TO_STRING()
    123 #endif
    124 
    125 template <bool>
    126 struct SkCompileAssert {
    127 };
    128 
    129 #define SK_COMPILE_ASSERT(expr, msg) \
    130     typedef SkCompileAssert<(bool(expr))> msg[bool(expr) ? 1 : -1] SK_UNUSED
    131 
    132 /*
    133  *  Usage:  SK_MACRO_CONCAT(a, b)   to construct the symbol ab
    134  *
    135  *  SK_MACRO_CONCAT_IMPL_PRIV just exists to make this work. Do not use directly
    136  *
    137  */
    138 #define SK_MACRO_CONCAT(X, Y)           SK_MACRO_CONCAT_IMPL_PRIV(X, Y)
    139 #define SK_MACRO_CONCAT_IMPL_PRIV(X, Y)  X ## Y
    140 
    141 /*
    142  *  Usage: SK_MACRO_APPEND_LINE(foo)    to make foo123, where 123 is the current
    143  *                                      line number. Easy way to construct
    144  *                                      unique names for local functions or
    145  *                                      variables.
    146  */
    147 #define SK_MACRO_APPEND_LINE(name)  SK_MACRO_CONCAT(name, __LINE__)
    148 
    149 ///////////////////////////////////////////////////////////////////////
    150 
    151 /**
    152  *  Fast type for signed 8 bits. Use for parameter passing and local variables,
    153  *  not for storage.
    154  */
    155 typedef int S8CPU;
    156 
    157 /**
    158  *  Fast type for unsigned 8 bits. Use for parameter passing and local
    159  *  variables, not for storage
    160  */
    161 typedef unsigned U8CPU;
    162 
    163 /**
    164  *  Fast type for signed 16 bits. Use for parameter passing and local variables,
    165  *  not for storage
    166  */
    167 typedef int S16CPU;
    168 
    169 /**
    170  *  Fast type for unsigned 16 bits. Use for parameter passing and local
    171  *  variables, not for storage
    172  */
    173 typedef unsigned U16CPU;
    174 
    175 /**
    176  *  Meant to be faster than bool (doesn't promise to be 0 or 1,
    177  *  just 0 or non-zero
    178  */
    179 typedef int SkBool;
    180 
    181 /**
    182  *  Meant to be a small version of bool, for storage purposes. Will be 0 or 1
    183  */
    184 typedef uint8_t SkBool8;
    185 
    186 #ifdef SK_DEBUG
    187     SK_API int8_t      SkToS8(intmax_t);
    188     SK_API uint8_t     SkToU8(uintmax_t);
    189     SK_API int16_t     SkToS16(intmax_t);
    190     SK_API uint16_t    SkToU16(uintmax_t);
    191     SK_API int32_t     SkToS32(intmax_t);
    192     SK_API uint32_t    SkToU32(uintmax_t);
    193 #else
    194     #define SkToS8(x)   ((int8_t)(x))
    195     #define SkToU8(x)   ((uint8_t)(x))
    196     #define SkToS16(x)  ((int16_t)(x))
    197     #define SkToU16(x)  ((uint16_t)(x))
    198     #define SkToS32(x)  ((int32_t)(x))
    199     #define SkToU32(x)  ((uint32_t)(x))
    200 #endif
    201 
    202 /** Returns 0 or 1 based on the condition
    203 */
    204 #define SkToBool(cond)  ((cond) != 0)
    205 
    206 #define SK_MaxS16   32767
    207 #define SK_MinS16   -32767
    208 #define SK_MaxU16   0xFFFF
    209 #define SK_MinU16   0
    210 #define SK_MaxS32   0x7FFFFFFF
    211 #define SK_MinS32   -SK_MaxS32
    212 #define SK_MaxU32   0xFFFFFFFF
    213 #define SK_MinU32   0
    214 #define SK_NaN32    (1 << 31)
    215 
    216 /** Returns true if the value can be represented with signed 16bits
    217  */
    218 static inline bool SkIsS16(long x) {
    219     return (int16_t)x == x;
    220 }
    221 
    222 /** Returns true if the value can be represented with unsigned 16bits
    223  */
    224 static inline bool SkIsU16(long x) {
    225     return (uint16_t)x == x;
    226 }
    227 
    228 //////////////////////////////////////////////////////////////////////////////
    229 #ifndef SK_OFFSETOF
    230     #define SK_OFFSETOF(type, field)    (size_t)((char*)&(((type*)1)->field) - (char*)1)
    231 #endif
    232 
    233 /** Returns the number of entries in an array (not a pointer)
    234 */
    235 #define SK_ARRAY_COUNT(array)       (sizeof(array) / sizeof(array[0]))
    236 
    237 #define SkAlign2(x)     (((x) + 1) >> 1 << 1)
    238 #define SkIsAlign2(x)   (0 == ((x) & 1))
    239 
    240 #define SkAlign4(x)     (((x) + 3) >> 2 << 2)
    241 #define SkIsAlign4(x)   (0 == ((x) & 3))
    242 
    243 #define SkAlign8(x)     (((x) + 7) >> 3 << 3)
    244 #define SkIsAlign8(x)   (0 == ((x) & 7))
    245 
    246 typedef uint32_t SkFourByteTag;
    247 #define SkSetFourByteTag(a, b, c, d)    (((a) << 24) | ((b) << 16) | ((c) << 8) | (d))
    248 
    249 /** 32 bit integer to hold a unicode value
    250 */
    251 typedef int32_t SkUnichar;
    252 /** 32 bit value to hold a millisecond count
    253 */
    254 typedef uint32_t SkMSec;
    255 /** 1 second measured in milliseconds
    256 */
    257 #define SK_MSec1 1000
    258 /** maximum representable milliseconds
    259 */
    260 #define SK_MSecMax 0x7FFFFFFF
    261 /** Returns a < b for milliseconds, correctly handling wrap-around from 0xFFFFFFFF to 0
    262 */
    263 #define SkMSec_LT(a, b)     ((int32_t)(a) - (int32_t)(b) < 0)
    264 /** Returns a <= b for milliseconds, correctly handling wrap-around from 0xFFFFFFFF to 0
    265 */
    266 #define SkMSec_LE(a, b)     ((int32_t)(a) - (int32_t)(b) <= 0)
    267 
    268 /****************************************************************************
    269     The rest of these only build with C++
    270 */
    271 #ifdef __cplusplus
    272 
    273 /** Faster than SkToBool for integral conditions. Returns 0 or 1
    274 */
    275 static inline int Sk32ToBool(uint32_t n) {
    276     return (n | (0-n)) >> 31;
    277 }
    278 
    279 /** Generic swap function. Classes with efficient swaps should specialize this function to take
    280     their fast path. This function is used by SkTSort. */
    281 template <typename T> inline void SkTSwap(T& a, T& b) {
    282     T c(a);
    283     a = b;
    284     b = c;
    285 }
    286 
    287 static inline int32_t SkAbs32(int32_t value) {
    288     if (value < 0) {
    289         value = -value;
    290     }
    291     return value;
    292 }
    293 
    294 template <typename T> inline T SkTAbs(T value) {
    295     if (value < 0) {
    296         value = -value;
    297     }
    298     return value;
    299 }
    300 
    301 static inline int32_t SkMax32(int32_t a, int32_t b) {
    302     if (a < b)
    303         a = b;
    304     return a;
    305 }
    306 
    307 static inline int32_t SkMin32(int32_t a, int32_t b) {
    308     if (a > b)
    309         a = b;
    310     return a;
    311 }
    312 
    313 template <typename T> const T& SkTMin(const T& a, const T& b) {
    314     return (a < b) ? a : b;
    315 }
    316 
    317 template <typename T> const T& SkTMax(const T& a, const T& b) {
    318     return (b < a) ? a : b;
    319 }
    320 
    321 static inline int32_t SkSign32(int32_t a) {
    322     return (a >> 31) | ((unsigned) -a >> 31);
    323 }
    324 
    325 static inline int32_t SkFastMin32(int32_t value, int32_t max) {
    326     if (value > max) {
    327         value = max;
    328     }
    329     return value;
    330 }
    331 
    332 /** Returns signed 32 bit value pinned between min and max, inclusively
    333 */
    334 static inline int32_t SkPin32(int32_t value, int32_t min, int32_t max) {
    335     if (value < min) {
    336         value = min;
    337     }
    338     if (value > max) {
    339         value = max;
    340     }
    341     return value;
    342 }
    343 
    344 static inline uint32_t SkSetClearShift(uint32_t bits, bool cond,
    345                                        unsigned shift) {
    346     SkASSERT((int)cond == 0 || (int)cond == 1);
    347     return (bits & ~(1 << shift)) | ((int)cond << shift);
    348 }
    349 
    350 static inline uint32_t SkSetClearMask(uint32_t bits, bool cond,
    351                                       uint32_t mask) {
    352     return cond ? bits | mask : bits & ~mask;
    353 }
    354 
    355 ///////////////////////////////////////////////////////////////////////////////
    356 
    357 /** Use to combine multiple bits in a bitmask in a type safe way.
    358  */
    359 template <typename T>
    360 T SkTBitOr(T a, T b) {
    361     return (T)(a | b);
    362 }
    363 
    364 /**
    365  *  Use to cast a pointer to a different type, and maintaining strict-aliasing
    366  */
    367 template <typename Dst> Dst SkTCast(const void* ptr) {
    368     union {
    369         const void* src;
    370         Dst dst;
    371     } data;
    372     data.src = ptr;
    373     return data.dst;
    374 }
    375 
    376 //////////////////////////////////////////////////////////////////////////////
    377 
    378 /** \class SkNoncopyable
    379 
    380 SkNoncopyable is the base class for objects that may do not want to
    381 be copied. It hides its copy-constructor and its assignment-operator.
    382 */
    383 class SK_API SkNoncopyable {
    384 public:
    385     SkNoncopyable() {}
    386 
    387 private:
    388     SkNoncopyable(const SkNoncopyable&);
    389     SkNoncopyable& operator=(const SkNoncopyable&);
    390 };
    391 
    392 class SkAutoFree : SkNoncopyable {
    393 public:
    394     SkAutoFree() : fPtr(NULL) {}
    395     explicit SkAutoFree(void* ptr) : fPtr(ptr) {}
    396     ~SkAutoFree() { sk_free(fPtr); }
    397 
    398     /** Return the currently allocate buffer, or null
    399     */
    400     void* get() const { return fPtr; }
    401 
    402     /** Assign a new ptr allocated with sk_malloc (or null), and return the
    403         previous ptr. Note it is the caller's responsibility to sk_free the
    404         returned ptr.
    405     */
    406     void* set(void* ptr) {
    407         void* prev = fPtr;
    408         fPtr = ptr;
    409         return prev;
    410     }
    411 
    412     /** Transfer ownership of the current ptr to the caller, setting the
    413         internal reference to null. Note the caller is reponsible for calling
    414         sk_free on the returned address.
    415     */
    416     void* detach() { return this->set(NULL); }
    417 
    418     /** Free the current buffer, and set the internal reference to NULL. Same
    419         as calling sk_free(detach())
    420     */
    421     void free() {
    422         sk_free(fPtr);
    423         fPtr = NULL;
    424     }
    425 
    426 private:
    427     void* fPtr;
    428     // illegal
    429     SkAutoFree(const SkAutoFree&);
    430     SkAutoFree& operator=(const SkAutoFree&);
    431 };
    432 
    433 /**
    434  *  Manage an allocated block of heap memory. This object is the sole manager of
    435  *  the lifetime of the block, so the caller must not call sk_free() or delete
    436  *  on the block, unless detach() was called.
    437  */
    438 class SkAutoMalloc : public SkNoncopyable {
    439 public:
    440     explicit SkAutoMalloc(size_t size = 0) {
    441         fPtr = size ? sk_malloc_throw(size) : NULL;
    442         fSize = size;
    443     }
    444 
    445     ~SkAutoMalloc() {
    446         sk_free(fPtr);
    447     }
    448 
    449     /**
    450      *  Passed to reset to specify what happens if the requested size is smaller
    451      *  than the current size (and the current block was dynamically allocated).
    452      */
    453     enum OnShrink {
    454         /**
    455          *  If the requested size is smaller than the current size, and the
    456          *  current block is dynamically allocated, free the old block and
    457          *  malloc a new block of the smaller size.
    458          */
    459         kAlloc_OnShrink,
    460 
    461         /**
    462          *  If the requested size is smaller than the current size, and the
    463          *  current block is dynamically allocated, just return the old
    464          *  block.
    465          */
    466         kReuse_OnShrink
    467     };
    468 
    469     /**
    470      *  Reallocates the block to a new size. The ptr may or may not change.
    471      */
    472     void* reset(size_t size, OnShrink shrink = kAlloc_OnShrink,  bool* didChangeAlloc = NULL) {
    473         if (size == fSize || (kReuse_OnShrink == shrink && size < fSize)) {
    474             if (NULL != didChangeAlloc) {
    475                 *didChangeAlloc = false;
    476             }
    477             return fPtr;
    478         }
    479 
    480         sk_free(fPtr);
    481         fPtr = size ? sk_malloc_throw(size) : NULL;
    482         fSize = size;
    483         if (NULL != didChangeAlloc) {
    484             *didChangeAlloc = true;
    485         }
    486 
    487         return fPtr;
    488     }
    489 
    490     /**
    491      *  Releases the block back to the heap
    492      */
    493     void free() {
    494         this->reset(0);
    495     }
    496 
    497     /**
    498      *  Return the allocated block.
    499      */
    500     void* get() { return fPtr; }
    501     const void* get() const { return fPtr; }
    502 
    503    /** Transfer ownership of the current ptr to the caller, setting the
    504        internal reference to null. Note the caller is reponsible for calling
    505        sk_free on the returned address.
    506     */
    507     void* detach() {
    508         void* ptr = fPtr;
    509         fPtr = NULL;
    510         fSize = 0;
    511         return ptr;
    512     }
    513 
    514 private:
    515     void*   fPtr;
    516     size_t  fSize;  // can be larger than the requested size (see kReuse)
    517 };
    518 
    519 /**
    520  *  Manage an allocated block of memory. If the requested size is <= kSize, then
    521  *  the allocation will come from the stack rather than the heap. This object
    522  *  is the sole manager of the lifetime of the block, so the caller must not
    523  *  call sk_free() or delete on the block.
    524  */
    525 template <size_t kSize> class SkAutoSMalloc : SkNoncopyable {
    526 public:
    527     /**
    528      *  Creates initially empty storage. get() returns a ptr, but it is to
    529      *  a zero-byte allocation. Must call reset(size) to return an allocated
    530      *  block.
    531      */
    532     SkAutoSMalloc() {
    533         fPtr = fStorage;
    534         fSize = kSize;
    535     }
    536 
    537     /**
    538      *  Allocate a block of the specified size. If size <= kSize, then the
    539      *  allocation will come from the stack, otherwise it will be dynamically
    540      *  allocated.
    541      */
    542     explicit SkAutoSMalloc(size_t size) {
    543         fPtr = fStorage;
    544         fSize = kSize;
    545         this->reset(size);
    546     }
    547 
    548     /**
    549      *  Free the allocated block (if any). If the block was small enought to
    550      *  have been allocated on the stack (size <= kSize) then this does nothing.
    551      */
    552     ~SkAutoSMalloc() {
    553         if (fPtr != (void*)fStorage) {
    554             sk_free(fPtr);
    555         }
    556     }
    557 
    558     /**
    559      *  Return the allocated block. May return non-null even if the block is
    560      *  of zero size. Since this may be on the stack or dynamically allocated,
    561      *  the caller must not call sk_free() on it, but must rely on SkAutoSMalloc
    562      *  to manage it.
    563      */
    564     void* get() const { return fPtr; }
    565 
    566     /**
    567      *  Return a new block of the requested size, freeing (as necessary) any
    568      *  previously allocated block. As with the constructor, if size <= kSize
    569      *  then the return block may be allocated locally, rather than from the
    570      *  heap.
    571      */
    572     void* reset(size_t size,
    573                 SkAutoMalloc::OnShrink shrink = SkAutoMalloc::kAlloc_OnShrink,
    574                 bool* didChangeAlloc = NULL) {
    575         size = (size < kSize) ? kSize : size;
    576         bool alloc = size != fSize && (SkAutoMalloc::kAlloc_OnShrink == shrink || size > fSize);
    577         if (NULL != didChangeAlloc) {
    578             *didChangeAlloc = alloc;
    579         }
    580         if (alloc) {
    581             if (fPtr != (void*)fStorage) {
    582                 sk_free(fPtr);
    583             }
    584 
    585             if (size == kSize) {
    586                 SkASSERT(fPtr != fStorage); // otherwise we lied when setting didChangeAlloc.
    587                 fPtr = fStorage;
    588             } else {
    589                 fPtr = sk_malloc_flags(size, SK_MALLOC_THROW | SK_MALLOC_TEMP);
    590             }
    591 
    592             fSize = size;
    593         }
    594         SkASSERT(fSize >= size && fSize >= kSize);
    595         SkASSERT((fPtr == fStorage) || fSize > kSize);
    596         return fPtr;
    597     }
    598 
    599 private:
    600     void*       fPtr;
    601     size_t      fSize;  // can be larger than the requested size (see kReuse)
    602     uint32_t    fStorage[(kSize + 3) >> 2];
    603 };
    604 
    605 #endif /* C++ */
    606 
    607 #endif
    608