Home | History | Annotate | Download | only in util
      1 // This file is part of Eigen, a lightweight C++ template library
      2 // for linear algebra.
      3 //
      4 // Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud (at) inria.fr>
      5 // Copyright (C) 2008-2009 Benoit Jacob <jacob.benoit.1 (at) gmail.com>
      6 // Copyright (C) 2009 Kenneth Riddile <kfriddile (at) yahoo.com>
      7 // Copyright (C) 2010 Hauke Heibel <hauke.heibel (at) gmail.com>
      8 // Copyright (C) 2010 Thomas Capricelli <orzel (at) freehackers.org>
      9 //
     10 // This Source Code Form is subject to the terms of the Mozilla
     11 // Public License v. 2.0. If a copy of the MPL was not distributed
     12 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
     13 
     14 
     15 /*****************************************************************************
     16 *** Platform checks for aligned malloc functions                           ***
     17 *****************************************************************************/
     18 
     19 #ifndef EIGEN_MEMORY_H
     20 #define EIGEN_MEMORY_H
     21 
     22 #ifndef EIGEN_MALLOC_ALREADY_ALIGNED
     23 
     24 // Try to determine automatically if malloc is already aligned.
     25 
     26 // On 64-bit systems, glibc's malloc returns 16-byte-aligned pointers, see:
     27 //   http://www.gnu.org/s/libc/manual/html_node/Aligned-Memory-Blocks.html
     28 // This is true at least since glibc 2.8.
     29 // This leaves the question how to detect 64-bit. According to this document,
     30 //   http://gcc.fyxm.net/summit/2003/Porting%20to%2064%20bit.pdf
     31 // page 114, "[The] LP64 model [...] is used by all 64-bit UNIX ports" so it's indeed
     32 // quite safe, at least within the context of glibc, to equate 64-bit with LP64.
     33 #if defined(__GLIBC__) && ((__GLIBC__>=2 && __GLIBC_MINOR__ >= 8) || __GLIBC__>2) \
     34  && defined(__LP64__) && ! defined( __SANITIZE_ADDRESS__ )
     35   #define EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED 1
     36 #else
     37   #define EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED 0
     38 #endif
     39 
     40 // FreeBSD 6 seems to have 16-byte aligned malloc
     41 //   See http://svn.freebsd.org/viewvc/base/stable/6/lib/libc/stdlib/malloc.c?view=markup
     42 // FreeBSD 7 seems to have 16-byte aligned malloc except on ARM and MIPS architectures
     43 //   See http://svn.freebsd.org/viewvc/base/stable/7/lib/libc/stdlib/malloc.c?view=markup
     44 #if defined(__FreeBSD__) && !defined(__arm__) && !defined(__mips__)
     45   #define EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED 1
     46 #else
     47   #define EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED 0
     48 #endif
     49 
     50 #if defined(__APPLE__) \
     51  || defined(_WIN64) \
     52  || EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED \
     53  || EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED
     54   #define EIGEN_MALLOC_ALREADY_ALIGNED 1
     55 #else
     56   #define EIGEN_MALLOC_ALREADY_ALIGNED 0
     57 #endif
     58 
     59 #endif
     60 
     61 // See bug 554 (http://eigen.tuxfamily.org/bz/show_bug.cgi?id=554)
     62 // It seems to be unsafe to check _POSIX_ADVISORY_INFO without including unistd.h first.
     63 // Currently, let's include it only on unix systems:
     64 #if defined(__unix__) || defined(__unix)
     65   #include <unistd.h>
     66   #if ((defined __QNXNTO__) || (defined _GNU_SOURCE) || (defined __PGI) || ((defined _XOPEN_SOURCE) && (_XOPEN_SOURCE >= 600))) && (defined _POSIX_ADVISORY_INFO) && (_POSIX_ADVISORY_INFO > 0)
     67     #define EIGEN_HAS_POSIX_MEMALIGN 1
     68   #endif
     69 #endif
     70 
     71 #ifndef EIGEN_HAS_POSIX_MEMALIGN
     72   #define EIGEN_HAS_POSIX_MEMALIGN 0
     73 #endif
     74 
     75 #if defined(EIGEN_VECTORIZE_SSE) && !defined(EIGEN_ANDROID_POSIX_MEMALIGN_WR)
     76   #define EIGEN_HAS_MM_MALLOC 1
     77 #else
     78   #define EIGEN_HAS_MM_MALLOC 0
     79 #endif
     80 
     81 namespace Eigen {
     82 
     83 namespace internal {
     84 
     85 inline void throw_std_bad_alloc()
     86 {
     87   #ifdef EIGEN_EXCEPTIONS
     88     throw std::bad_alloc();
     89   #else
     90     std::size_t huge = -1;
     91     new int[huge];
     92   #endif
     93 }
     94 
     95 /*****************************************************************************
     96 *** Implementation of handmade aligned functions                           ***
     97 *****************************************************************************/
     98 
     99 /* ----- Hand made implementations of aligned malloc/free and realloc ----- */
    100 
    101 /** \internal Like malloc, but the returned pointer is guaranteed to be 16-byte aligned.
    102   * Fast, but wastes 16 additional bytes of memory. Does not throw any exception.
    103   */
    104 inline void* handmade_aligned_malloc(std::size_t size)
    105 {
    106   void *original = std::malloc(size+16);
    107   if (original == 0) return 0;
    108   void *aligned = reinterpret_cast<void*>((reinterpret_cast<std::size_t>(original) & ~(std::size_t(15))) + 16);
    109   *(reinterpret_cast<void**>(aligned) - 1) = original;
    110   return aligned;
    111 }
    112 
    113 /** \internal Frees memory allocated with handmade_aligned_malloc */
    114 inline void handmade_aligned_free(void *ptr)
    115 {
    116   if (ptr) std::free(*(reinterpret_cast<void**>(ptr) - 1));
    117 }
    118 
    119 /** \internal
    120   * \brief Reallocates aligned memory.
    121   * Since we know that our handmade version is based on std::realloc
    122   * we can use std::realloc to implement efficient reallocation.
    123   */
    124 inline void* handmade_aligned_realloc(void* ptr, std::size_t size, std::size_t = 0)
    125 {
    126   if (ptr == 0) return handmade_aligned_malloc(size);
    127   void *original = *(reinterpret_cast<void**>(ptr) - 1);
    128   std::ptrdiff_t previous_offset = static_cast<char *>(ptr)-static_cast<char *>(original);
    129   original = std::realloc(original,size+16);
    130   if (original == 0) return 0;
    131   void *aligned = reinterpret_cast<void*>((reinterpret_cast<std::size_t>(original) & ~(std::size_t(15))) + 16);
    132   void *previous_aligned = static_cast<char *>(original)+previous_offset;
    133   if(aligned!=previous_aligned)
    134     std::memmove(aligned, previous_aligned, size);
    135 
    136   *(reinterpret_cast<void**>(aligned) - 1) = original;
    137   return aligned;
    138 }
    139 
    140 /*****************************************************************************
    141 *** Implementation of generic aligned realloc (when no realloc can be used)***
    142 *****************************************************************************/
    143 
    144 void* aligned_malloc(std::size_t size);
    145 void  aligned_free(void *ptr);
    146 
    147 /** \internal
    148   * \brief Reallocates aligned memory.
    149   * Allows reallocation with aligned ptr types. This implementation will
    150   * always create a new memory chunk and copy the old data.
    151   */
    152 inline void* generic_aligned_realloc(void* ptr, size_t size, size_t old_size)
    153 {
    154   if (ptr==0)
    155     return aligned_malloc(size);
    156 
    157   if (size==0)
    158   {
    159     aligned_free(ptr);
    160     return 0;
    161   }
    162 
    163   void* newptr = aligned_malloc(size);
    164   if (newptr == 0)
    165   {
    166     #ifdef EIGEN_HAS_ERRNO
    167     errno = ENOMEM; // according to the standard
    168     #endif
    169     return 0;
    170   }
    171 
    172   if (ptr != 0)
    173   {
    174     std::memcpy(newptr, ptr, (std::min)(size,old_size));
    175     aligned_free(ptr);
    176   }
    177 
    178   return newptr;
    179 }
    180 
    181 /*****************************************************************************
    182 *** Implementation of portable aligned versions of malloc/free/realloc     ***
    183 *****************************************************************************/
    184 
    185 #ifdef EIGEN_NO_MALLOC
    186 inline void check_that_malloc_is_allowed()
    187 {
    188   eigen_assert(false && "heap allocation is forbidden (EIGEN_NO_MALLOC is defined)");
    189 }
    190 #elif defined EIGEN_RUNTIME_NO_MALLOC
    191 inline bool is_malloc_allowed_impl(bool update, bool new_value = false)
    192 {
    193   static bool value = true;
    194   if (update == 1)
    195     value = new_value;
    196   return value;
    197 }
    198 inline bool is_malloc_allowed() { return is_malloc_allowed_impl(false); }
    199 inline bool set_is_malloc_allowed(bool new_value) { return is_malloc_allowed_impl(true, new_value); }
    200 inline void check_that_malloc_is_allowed()
    201 {
    202   eigen_assert(is_malloc_allowed() && "heap allocation is forbidden (EIGEN_RUNTIME_NO_MALLOC is defined and g_is_malloc_allowed is false)");
    203 }
    204 #else
    205 inline void check_that_malloc_is_allowed()
    206 {}
    207 #endif
    208 
    209 /** \internal Allocates \a size bytes. The returned pointer is guaranteed to have 16 bytes alignment.
    210   * On allocation error, the returned pointer is null, and std::bad_alloc is thrown.
    211   */
    212 inline void* aligned_malloc(size_t size)
    213 {
    214   check_that_malloc_is_allowed();
    215 
    216   void *result;
    217   #if !EIGEN_ALIGN
    218     result = std::malloc(size);
    219   #elif EIGEN_MALLOC_ALREADY_ALIGNED
    220     result = std::malloc(size);
    221   #elif EIGEN_HAS_POSIX_MEMALIGN
    222     if(posix_memalign(&result, 16, size)) result = 0;
    223   #elif EIGEN_HAS_MM_MALLOC
    224     result = _mm_malloc(size, 16);
    225   #elif defined(_MSC_VER) && (!defined(_WIN32_WCE))
    226     result = _aligned_malloc(size, 16);
    227   #else
    228     result = handmade_aligned_malloc(size);
    229   #endif
    230 
    231   if(!result && size)
    232     throw_std_bad_alloc();
    233 
    234   return result;
    235 }
    236 
    237 /** \internal Frees memory allocated with aligned_malloc. */
    238 inline void aligned_free(void *ptr)
    239 {
    240   #if !EIGEN_ALIGN
    241     std::free(ptr);
    242   #elif EIGEN_MALLOC_ALREADY_ALIGNED
    243     std::free(ptr);
    244   #elif EIGEN_HAS_POSIX_MEMALIGN
    245     std::free(ptr);
    246   #elif EIGEN_HAS_MM_MALLOC
    247     _mm_free(ptr);
    248   #elif defined(_MSC_VER) && (!defined(_WIN32_WCE))
    249     _aligned_free(ptr);
    250   #else
    251     handmade_aligned_free(ptr);
    252   #endif
    253 }
    254 
    255 /**
    256 * \internal
    257 * \brief Reallocates an aligned block of memory.
    258 * \throws std::bad_alloc on allocation failure
    259 **/
    260 inline void* aligned_realloc(void *ptr, size_t new_size, size_t old_size)
    261 {
    262   EIGEN_UNUSED_VARIABLE(old_size);
    263 
    264   void *result;
    265 #if !EIGEN_ALIGN
    266   result = std::realloc(ptr,new_size);
    267 #elif EIGEN_MALLOC_ALREADY_ALIGNED
    268   result = std::realloc(ptr,new_size);
    269 #elif EIGEN_HAS_POSIX_MEMALIGN
    270   result = generic_aligned_realloc(ptr,new_size,old_size);
    271 #elif EIGEN_HAS_MM_MALLOC
    272   // The defined(_mm_free) is just here to verify that this MSVC version
    273   // implements _mm_malloc/_mm_free based on the corresponding _aligned_
    274   // functions. This may not always be the case and we just try to be safe.
    275   #if defined(_MSC_VER) && (!defined(_WIN32_WCE)) && defined(_mm_free)
    276     result = _aligned_realloc(ptr,new_size,16);
    277   #else
    278     result = generic_aligned_realloc(ptr,new_size,old_size);
    279   #endif
    280 #elif defined(_MSC_VER) && (!defined(_WIN32_WCE))
    281   result = _aligned_realloc(ptr,new_size,16);
    282 #else
    283   result = handmade_aligned_realloc(ptr,new_size,old_size);
    284 #endif
    285 
    286   if (!result && new_size)
    287     throw_std_bad_alloc();
    288 
    289   return result;
    290 }
    291 
    292 /*****************************************************************************
    293 *** Implementation of conditionally aligned functions                      ***
    294 *****************************************************************************/
    295 
    296 /** \internal Allocates \a size bytes. If Align is true, then the returned ptr is 16-byte-aligned.
    297   * On allocation error, the returned pointer is null, and a std::bad_alloc is thrown.
    298   */
    299 template<bool Align> inline void* conditional_aligned_malloc(size_t size)
    300 {
    301   return aligned_malloc(size);
    302 }
    303 
    304 template<> inline void* conditional_aligned_malloc<false>(size_t size)
    305 {
    306   check_that_malloc_is_allowed();
    307 
    308   void *result = std::malloc(size);
    309   if(!result && size)
    310     throw_std_bad_alloc();
    311   return result;
    312 }
    313 
    314 /** \internal Frees memory allocated with conditional_aligned_malloc */
    315 template<bool Align> inline void conditional_aligned_free(void *ptr)
    316 {
    317   aligned_free(ptr);
    318 }
    319 
    320 template<> inline void conditional_aligned_free<false>(void *ptr)
    321 {
    322   std::free(ptr);
    323 }
    324 
    325 template<bool Align> inline void* conditional_aligned_realloc(void* ptr, size_t new_size, size_t old_size)
    326 {
    327   return aligned_realloc(ptr, new_size, old_size);
    328 }
    329 
    330 template<> inline void* conditional_aligned_realloc<false>(void* ptr, size_t new_size, size_t)
    331 {
    332   return std::realloc(ptr, new_size);
    333 }
    334 
    335 /*****************************************************************************
    336 *** Construction/destruction of array elements                             ***
    337 *****************************************************************************/
    338 
    339 /** \internal Constructs the elements of an array.
    340   * The \a size parameter tells on how many objects to call the constructor of T.
    341   */
    342 template<typename T> inline T* construct_elements_of_array(T *ptr, size_t size)
    343 {
    344   for (size_t i=0; i < size; ++i) ::new (ptr + i) T;
    345   return ptr;
    346 }
    347 
    348 /** \internal Destructs the elements of an array.
    349   * The \a size parameters tells on how many objects to call the destructor of T.
    350   */
    351 template<typename T> inline void destruct_elements_of_array(T *ptr, size_t size)
    352 {
    353   // always destruct an array starting from the end.
    354   if(ptr)
    355     while(size) ptr[--size].~T();
    356 }
    357 
    358 /*****************************************************************************
    359 *** Implementation of aligned new/delete-like functions                    ***
    360 *****************************************************************************/
    361 
    362 template<typename T>
    363 EIGEN_ALWAYS_INLINE void check_size_for_overflow(size_t size)
    364 {
    365   if(size > size_t(-1) / sizeof(T))
    366     throw_std_bad_alloc();
    367 }
    368 
    369 /** \internal Allocates \a size objects of type T. The returned pointer is guaranteed to have 16 bytes alignment.
    370   * On allocation error, the returned pointer is undefined, but a std::bad_alloc is thrown.
    371   * The default constructor of T is called.
    372   */
    373 template<typename T> inline T* aligned_new(size_t size)
    374 {
    375   check_size_for_overflow<T>(size);
    376   T *result = reinterpret_cast<T*>(aligned_malloc(sizeof(T)*size));
    377   return construct_elements_of_array(result, size);
    378 }
    379 
    380 template<typename T, bool Align> inline T* conditional_aligned_new(size_t size)
    381 {
    382   check_size_for_overflow<T>(size);
    383   T *result = reinterpret_cast<T*>(conditional_aligned_malloc<Align>(sizeof(T)*size));
    384   return construct_elements_of_array(result, size);
    385 }
    386 
    387 /** \internal Deletes objects constructed with aligned_new
    388   * The \a size parameters tells on how many objects to call the destructor of T.
    389   */
    390 template<typename T> inline void aligned_delete(T *ptr, size_t size)
    391 {
    392   destruct_elements_of_array<T>(ptr, size);
    393   aligned_free(ptr);
    394 }
    395 
    396 /** \internal Deletes objects constructed with conditional_aligned_new
    397   * The \a size parameters tells on how many objects to call the destructor of T.
    398   */
    399 template<typename T, bool Align> inline void conditional_aligned_delete(T *ptr, size_t size)
    400 {
    401   destruct_elements_of_array<T>(ptr, size);
    402   conditional_aligned_free<Align>(ptr);
    403 }
    404 
    405 template<typename T, bool Align> inline T* conditional_aligned_realloc_new(T* pts, size_t new_size, size_t old_size)
    406 {
    407   check_size_for_overflow<T>(new_size);
    408   check_size_for_overflow<T>(old_size);
    409   if(new_size < old_size)
    410     destruct_elements_of_array(pts+new_size, old_size-new_size);
    411   T *result = reinterpret_cast<T*>(conditional_aligned_realloc<Align>(reinterpret_cast<void*>(pts), sizeof(T)*new_size, sizeof(T)*old_size));
    412   if(new_size > old_size)
    413     construct_elements_of_array(result+old_size, new_size-old_size);
    414   return result;
    415 }
    416 
    417 
    418 template<typename T, bool Align> inline T* conditional_aligned_new_auto(size_t size)
    419 {
    420   if(size==0)
    421     return 0; // short-cut. Also fixes Bug 884
    422   check_size_for_overflow<T>(size);
    423   T *result = reinterpret_cast<T*>(conditional_aligned_malloc<Align>(sizeof(T)*size));
    424   if(NumTraits<T>::RequireInitialization)
    425     construct_elements_of_array(result, size);
    426   return result;
    427 }
    428 
    429 template<typename T, bool Align> inline T* conditional_aligned_realloc_new_auto(T* pts, size_t new_size, size_t old_size)
    430 {
    431   check_size_for_overflow<T>(new_size);
    432   check_size_for_overflow<T>(old_size);
    433   if(NumTraits<T>::RequireInitialization && (new_size < old_size))
    434     destruct_elements_of_array(pts+new_size, old_size-new_size);
    435   T *result = reinterpret_cast<T*>(conditional_aligned_realloc<Align>(reinterpret_cast<void*>(pts), sizeof(T)*new_size, sizeof(T)*old_size));
    436   if(NumTraits<T>::RequireInitialization && (new_size > old_size))
    437     construct_elements_of_array(result+old_size, new_size-old_size);
    438   return result;
    439 }
    440 
    441 template<typename T, bool Align> inline void conditional_aligned_delete_auto(T *ptr, size_t size)
    442 {
    443   if(NumTraits<T>::RequireInitialization)
    444     destruct_elements_of_array<T>(ptr, size);
    445   conditional_aligned_free<Align>(ptr);
    446 }
    447 
    448 /****************************************************************************/
    449 
    450 /** \internal Returns the index of the first element of the array that is well aligned for vectorization.
    451   *
    452   * \param array the address of the start of the array
    453   * \param size the size of the array
    454   *
    455   * \note If no element of the array is well aligned, the size of the array is returned. Typically,
    456   * for example with SSE, "well aligned" means 16-byte-aligned. If vectorization is disabled or if the
    457   * packet size for the given scalar type is 1, then everything is considered well-aligned.
    458   *
    459   * \note If the scalar type is vectorizable, we rely on the following assumptions: sizeof(Scalar) is a
    460   * power of 2, the packet size in bytes is also a power of 2, and is a multiple of sizeof(Scalar). On the
    461   * other hand, we do not assume that the array address is a multiple of sizeof(Scalar), as that fails for
    462   * example with Scalar=double on certain 32-bit platforms, see bug #79.
    463   *
    464   * There is also the variant first_aligned(const MatrixBase&) defined in DenseCoeffsBase.h.
    465   */
    466 template<typename Scalar, typename Index>
    467 static inline Index first_aligned(const Scalar* array, Index size)
    468 {
    469   static const Index PacketSize = packet_traits<Scalar>::size;
    470   static const Index PacketAlignedMask = PacketSize-1;
    471 
    472   if(PacketSize==1)
    473   {
    474     // Either there is no vectorization, or a packet consists of exactly 1 scalar so that all elements
    475     // of the array have the same alignment.
    476     return 0;
    477   }
    478   else if(size_t(array) & (sizeof(Scalar)-1))
    479   {
    480     // There is vectorization for this scalar type, but the array is not aligned to the size of a single scalar.
    481     // Consequently, no element of the array is well aligned.
    482     return size;
    483   }
    484   else
    485   {
    486     return std::min<Index>( (PacketSize - (Index((size_t(array)/sizeof(Scalar))) & PacketAlignedMask))
    487                            & PacketAlignedMask, size);
    488   }
    489 }
    490 
    491 /** \internal Returns the smallest integer multiple of \a base and greater or equal to \a size
    492   */
    493 template<typename Index>
    494 inline static Index first_multiple(Index size, Index base)
    495 {
    496   return ((size+base-1)/base)*base;
    497 }
    498 
    499 // std::copy is much slower than memcpy, so let's introduce a smart_copy which
    500 // use memcpy on trivial types, i.e., on types that does not require an initialization ctor.
    501 template<typename T, bool UseMemcpy> struct smart_copy_helper;
    502 
    503 template<typename T> void smart_copy(const T* start, const T* end, T* target)
    504 {
    505   smart_copy_helper<T,!NumTraits<T>::RequireInitialization>::run(start, end, target);
    506 }
    507 
    508 template<typename T> struct smart_copy_helper<T,true> {
    509   static inline void run(const T* start, const T* end, T* target)
    510   { memcpy(target, start, std::ptrdiff_t(end)-std::ptrdiff_t(start)); }
    511 };
    512 
    513 template<typename T> struct smart_copy_helper<T,false> {
    514   static inline void run(const T* start, const T* end, T* target)
    515   { std::copy(start, end, target); }
    516 };
    517 
    518 
    519 /*****************************************************************************
    520 *** Implementation of runtime stack allocation (falling back to malloc)    ***
    521 *****************************************************************************/
    522 
    523 // you can overwrite Eigen's default behavior regarding alloca by defining EIGEN_ALLOCA
    524 // to the appropriate stack allocation function
    525 #ifndef EIGEN_ALLOCA
    526   #if (defined __linux__) || (defined __APPLE__) || (defined alloca)
    527     #define EIGEN_ALLOCA alloca
    528   #elif defined(_MSC_VER)
    529     #define EIGEN_ALLOCA _alloca
    530   #endif
    531 #endif
    532 
    533 // This helper class construct the allocated memory, and takes care of destructing and freeing the handled data
    534 // at destruction time. In practice this helper class is mainly useful to avoid memory leak in case of exceptions.
    535 template<typename T> class aligned_stack_memory_handler
    536 {
    537   public:
    538     /* Creates a stack_memory_handler responsible for the buffer \a ptr of size \a size.
    539      * Note that \a ptr can be 0 regardless of the other parameters.
    540      * This constructor takes care of constructing/initializing the elements of the buffer if required by the scalar type T (see NumTraits<T>::RequireInitialization).
    541      * In this case, the buffer elements will also be destructed when this handler will be destructed.
    542      * Finally, if \a dealloc is true, then the pointer \a ptr is freed.
    543      **/
    544     aligned_stack_memory_handler(T* ptr, size_t size, bool dealloc)
    545       : m_ptr(ptr), m_size(size), m_deallocate(dealloc)
    546     {
    547       if(NumTraits<T>::RequireInitialization && m_ptr)
    548         Eigen::internal::construct_elements_of_array(m_ptr, size);
    549     }
    550     ~aligned_stack_memory_handler()
    551     {
    552       if(NumTraits<T>::RequireInitialization && m_ptr)
    553         Eigen::internal::destruct_elements_of_array<T>(m_ptr, m_size);
    554       if(m_deallocate)
    555         Eigen::internal::aligned_free(m_ptr);
    556     }
    557   protected:
    558     T* m_ptr;
    559     size_t m_size;
    560     bool m_deallocate;
    561 };
    562 
    563 } // end namespace internal
    564 
    565 /** \internal
    566   * Declares, allocates and construct an aligned buffer named NAME of SIZE elements of type TYPE on the stack
    567   * if SIZE is smaller than EIGEN_STACK_ALLOCATION_LIMIT, and if stack allocation is supported by the platform
    568   * (currently, this is Linux and Visual Studio only). Otherwise the memory is allocated on the heap.
    569   * The allocated buffer is automatically deleted when exiting the scope of this declaration.
    570   * If BUFFER is non null, then the declared variable is simply an alias for BUFFER, and no allocation/deletion occurs.
    571   * Here is an example:
    572   * \code
    573   * {
    574   *   ei_declare_aligned_stack_constructed_variable(float,data,size,0);
    575   *   // use data[0] to data[size-1]
    576   * }
    577   * \endcode
    578   * The underlying stack allocation function can controlled with the EIGEN_ALLOCA preprocessor token.
    579   */
    580 #ifdef EIGEN_ALLOCA
    581 
    582   #if defined(__arm__) || defined(_WIN32)
    583     #define EIGEN_ALIGNED_ALLOCA(SIZE) reinterpret_cast<void*>((reinterpret_cast<size_t>(EIGEN_ALLOCA(SIZE+16)) & ~(size_t(15))) + 16)
    584   #else
    585     #define EIGEN_ALIGNED_ALLOCA EIGEN_ALLOCA
    586   #endif
    587 
    588   #define ei_declare_aligned_stack_constructed_variable(TYPE,NAME,SIZE,BUFFER) \
    589     Eigen::internal::check_size_for_overflow<TYPE>(SIZE); \
    590     TYPE* NAME = (BUFFER)!=0 ? (BUFFER) \
    591                : reinterpret_cast<TYPE*>( \
    592                       (sizeof(TYPE)*SIZE<=EIGEN_STACK_ALLOCATION_LIMIT) ? EIGEN_ALIGNED_ALLOCA(sizeof(TYPE)*SIZE) \
    593                     : Eigen::internal::aligned_malloc(sizeof(TYPE)*SIZE) );  \
    594     Eigen::internal::aligned_stack_memory_handler<TYPE> EIGEN_CAT(NAME,_stack_memory_destructor)((BUFFER)==0 ? NAME : 0,SIZE,sizeof(TYPE)*SIZE>EIGEN_STACK_ALLOCATION_LIMIT)
    595 
    596 #else
    597 
    598   #define ei_declare_aligned_stack_constructed_variable(TYPE,NAME,SIZE,BUFFER) \
    599     Eigen::internal::check_size_for_overflow<TYPE>(SIZE); \
    600     TYPE* NAME = (BUFFER)!=0 ? BUFFER : reinterpret_cast<TYPE*>(Eigen::internal::aligned_malloc(sizeof(TYPE)*SIZE));    \
    601     Eigen::internal::aligned_stack_memory_handler<TYPE> EIGEN_CAT(NAME,_stack_memory_destructor)((BUFFER)==0 ? NAME : 0,SIZE,true)
    602 
    603 #endif
    604 
    605 
    606 /*****************************************************************************
    607 *** Implementation of EIGEN_MAKE_ALIGNED_OPERATOR_NEW [_IF]                ***
    608 *****************************************************************************/
    609 
    610 #if EIGEN_ALIGN
    611   #ifdef EIGEN_EXCEPTIONS
    612     #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \
    613       void* operator new(size_t size, const std::nothrow_t&) throw() { \
    614         try { return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); } \
    615         catch (...) { return 0; } \
    616       }
    617   #else
    618     #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \
    619       void* operator new(size_t size, const std::nothrow_t&) throw() { \
    620         return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); \
    621       }
    622   #endif
    623 
    624   #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) \
    625       void *operator new(size_t size) { \
    626         return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); \
    627       } \
    628       void *operator new[](size_t size) { \
    629         return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); \
    630       } \
    631       void operator delete(void * ptr) throw() { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \
    632       void operator delete[](void * ptr) throw() { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \
    633       /* in-place new and delete. since (at least afaik) there is no actual   */ \
    634       /* memory allocated we can safely let the default implementation handle */ \
    635       /* this particular case. */ \
    636       static void *operator new(size_t size, void *ptr) { return ::operator new(size,ptr); } \
    637       static void *operator new[](size_t size, void* ptr) { return ::operator new[](size,ptr); } \
    638       void operator delete(void * memory, void *ptr) throw() { return ::operator delete(memory,ptr); } \
    639       void operator delete[](void * memory, void *ptr) throw() { return ::operator delete[](memory,ptr); } \
    640       /* nothrow-new (returns zero instead of std::bad_alloc) */ \
    641       EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \
    642       void operator delete(void *ptr, const std::nothrow_t&) throw() { \
    643         Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); \
    644       } \
    645       typedef void eigen_aligned_operator_new_marker_type;
    646 #else
    647   #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
    648 #endif
    649 
    650 #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(true)
    651 #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(Scalar,Size) \
    652   EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(bool(((Size)!=Eigen::Dynamic) && ((sizeof(Scalar)*(Size))%16==0)))
    653 
    654 /****************************************************************************/
    655 
    656 /** \class aligned_allocator
    657 * \ingroup Core_Module
    658 *
    659 * \brief STL compatible allocator to use with with 16 byte aligned types
    660 *
    661 * Example:
    662 * \code
    663 * // Matrix4f requires 16 bytes alignment:
    664 * std::map< int, Matrix4f, std::less<int>,
    665 *           aligned_allocator<std::pair<const int, Matrix4f> > > my_map_mat4;
    666 * // Vector3f does not require 16 bytes alignment, no need to use Eigen's allocator:
    667 * std::map< int, Vector3f > my_map_vec3;
    668 * \endcode
    669 *
    670 * \sa \ref TopicStlContainers.
    671 */
    672 template<class T>
    673 class aligned_allocator
    674 {
    675 public:
    676     typedef size_t    size_type;
    677     typedef std::ptrdiff_t difference_type;
    678     typedef T*        pointer;
    679     typedef const T*  const_pointer;
    680     typedef T&        reference;
    681     typedef const T&  const_reference;
    682     typedef T         value_type;
    683 
    684     template<class U>
    685     struct rebind
    686     {
    687         typedef aligned_allocator<U> other;
    688     };
    689 
    690     pointer address( reference value ) const
    691     {
    692         return &value;
    693     }
    694 
    695     const_pointer address( const_reference value ) const
    696     {
    697         return &value;
    698     }
    699 
    700     aligned_allocator()
    701     {
    702     }
    703 
    704     aligned_allocator( const aligned_allocator& )
    705     {
    706     }
    707 
    708     template<class U>
    709     aligned_allocator( const aligned_allocator<U>& )
    710     {
    711     }
    712 
    713     ~aligned_allocator()
    714     {
    715     }
    716 
    717     size_type max_size() const
    718     {
    719         return (std::numeric_limits<size_type>::max)();
    720     }
    721 
    722     pointer allocate( size_type num, const void* hint = 0 )
    723     {
    724         EIGEN_UNUSED_VARIABLE(hint);
    725         internal::check_size_for_overflow<T>(num);
    726         return static_cast<pointer>( internal::aligned_malloc( num * sizeof(T) ) );
    727     }
    728 
    729     void construct( pointer p, const T& value )
    730     {
    731         ::new( p ) T( value );
    732     }
    733 
    734     void destroy( pointer p )
    735     {
    736         p->~T();
    737     }
    738 
    739     void deallocate( pointer p, size_type /*num*/ )
    740     {
    741         internal::aligned_free( p );
    742     }
    743 
    744     bool operator!=(const aligned_allocator<T>& ) const
    745     { return false; }
    746 
    747     bool operator==(const aligned_allocator<T>& ) const
    748     { return true; }
    749 };
    750 
    751 //---------- Cache sizes ----------
    752 
    753 #if !defined(EIGEN_NO_CPUID)
    754 #  if defined(__GNUC__) && ( defined(__i386__) || defined(__x86_64__) )
    755 #    if defined(__PIC__) && defined(__i386__)
    756        // Case for x86 with PIC
    757 #      define EIGEN_CPUID(abcd,func,id) \
    758          __asm__ __volatile__ ("xchgl %%ebx, %k1;cpuid; xchgl %%ebx,%k1": "=a" (abcd[0]), "=&r" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "a" (func), "c" (id));
    759 #    elif defined(__PIC__) && defined(__x86_64__)
    760        // Case for x64 with PIC. In theory this is only a problem with recent gcc and with medium or large code model, not with the default small code model.
    761        // However, we cannot detect which code model is used, and the xchg overhead is negligible anyway.
    762 #      define EIGEN_CPUID(abcd,func,id) \
    763         __asm__ __volatile__ ("xchg{q}\t{%%}rbx, %q1; cpuid; xchg{q}\t{%%}rbx, %q1": "=a" (abcd[0]), "=&r" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "0" (func), "2" (id));
    764 #    else
    765        // Case for x86_64 or x86 w/o PIC
    766 #      define EIGEN_CPUID(abcd,func,id) \
    767          __asm__ __volatile__ ("cpuid": "=a" (abcd[0]), "=b" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "0" (func), "2" (id) );
    768 #    endif
    769 #  elif defined(_MSC_VER)
    770 #    if (_MSC_VER > 1500) && ( defined(_M_IX86) || defined(_M_X64) )
    771 #      define EIGEN_CPUID(abcd,func,id) __cpuidex((int*)abcd,func,id)
    772 #    endif
    773 #  endif
    774 #endif
    775 
    776 namespace internal {
    777 
    778 #ifdef EIGEN_CPUID
    779 
    780 inline bool cpuid_is_vendor(int abcd[4], const int vendor[3])
    781 {
    782   return abcd[1]==vendor[0] && abcd[3]==vendor[1] && abcd[2]==vendor[2];
    783 }
    784 
    785 inline void queryCacheSizes_intel_direct(int& l1, int& l2, int& l3)
    786 {
    787   int abcd[4];
    788   l1 = l2 = l3 = 0;
    789   int cache_id = 0;
    790   int cache_type = 0;
    791   do {
    792     abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;
    793     EIGEN_CPUID(abcd,0x4,cache_id);
    794     cache_type  = (abcd[0] & 0x0F) >> 0;
    795     if(cache_type==1||cache_type==3) // data or unified cache
    796     {
    797       int cache_level = (abcd[0] & 0xE0) >> 5;  // A[7:5]
    798       int ways        = (abcd[1] & 0xFFC00000) >> 22; // B[31:22]
    799       int partitions  = (abcd[1] & 0x003FF000) >> 12; // B[21:12]
    800       int line_size   = (abcd[1] & 0x00000FFF) >>  0; // B[11:0]
    801       int sets        = (abcd[2]);                    // C[31:0]
    802 
    803       int cache_size = (ways+1) * (partitions+1) * (line_size+1) * (sets+1);
    804 
    805       switch(cache_level)
    806       {
    807         case 1: l1 = cache_size; break;
    808         case 2: l2 = cache_size; break;
    809         case 3: l3 = cache_size; break;
    810         default: break;
    811       }
    812     }
    813     cache_id++;
    814   } while(cache_type>0 && cache_id<16);
    815 }
    816 
    817 inline void queryCacheSizes_intel_codes(int& l1, int& l2, int& l3)
    818 {
    819   int abcd[4];
    820   abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;
    821   l1 = l2 = l3 = 0;
    822   EIGEN_CPUID(abcd,0x00000002,0);
    823   unsigned char * bytes = reinterpret_cast<unsigned char *>(abcd)+2;
    824   bool check_for_p2_core2 = false;
    825   for(int i=0; i<14; ++i)
    826   {
    827     switch(bytes[i])
    828     {
    829       case 0x0A: l1 = 8; break;   // 0Ah   data L1 cache, 8 KB, 2 ways, 32 byte lines
    830       case 0x0C: l1 = 16; break;  // 0Ch   data L1 cache, 16 KB, 4 ways, 32 byte lines
    831       case 0x0E: l1 = 24; break;  // 0Eh   data L1 cache, 24 KB, 6 ways, 64 byte lines
    832       case 0x10: l1 = 16; break;  // 10h   data L1 cache, 16 KB, 4 ways, 32 byte lines (IA-64)
    833       case 0x15: l1 = 16; break;  // 15h   code L1 cache, 16 KB, 4 ways, 32 byte lines (IA-64)
    834       case 0x2C: l1 = 32; break;  // 2Ch   data L1 cache, 32 KB, 8 ways, 64 byte lines
    835       case 0x30: l1 = 32; break;  // 30h   code L1 cache, 32 KB, 8 ways, 64 byte lines
    836       case 0x60: l1 = 16; break;  // 60h   data L1 cache, 16 KB, 8 ways, 64 byte lines, sectored
    837       case 0x66: l1 = 8; break;   // 66h   data L1 cache, 8 KB, 4 ways, 64 byte lines, sectored
    838       case 0x67: l1 = 16; break;  // 67h   data L1 cache, 16 KB, 4 ways, 64 byte lines, sectored
    839       case 0x68: l1 = 32; break;  // 68h   data L1 cache, 32 KB, 4 ways, 64 byte lines, sectored
    840       case 0x1A: l2 = 96; break;   // code and data L2 cache, 96 KB, 6 ways, 64 byte lines (IA-64)
    841       case 0x22: l3 = 512; break;   // code and data L3 cache, 512 KB, 4 ways (!), 64 byte lines, dual-sectored
    842       case 0x23: l3 = 1024; break;   // code and data L3 cache, 1024 KB, 8 ways, 64 byte lines, dual-sectored
    843       case 0x25: l3 = 2048; break;   // code and data L3 cache, 2048 KB, 8 ways, 64 byte lines, dual-sectored
    844       case 0x29: l3 = 4096; break;   // code and data L3 cache, 4096 KB, 8 ways, 64 byte lines, dual-sectored
    845       case 0x39: l2 = 128; break;   // code and data L2 cache, 128 KB, 4 ways, 64 byte lines, sectored
    846       case 0x3A: l2 = 192; break;   // code and data L2 cache, 192 KB, 6 ways, 64 byte lines, sectored
    847       case 0x3B: l2 = 128; break;   // code and data L2 cache, 128 KB, 2 ways, 64 byte lines, sectored
    848       case 0x3C: l2 = 256; break;   // code and data L2 cache, 256 KB, 4 ways, 64 byte lines, sectored
    849       case 0x3D: l2 = 384; break;   // code and data L2 cache, 384 KB, 6 ways, 64 byte lines, sectored
    850       case 0x3E: l2 = 512; break;   // code and data L2 cache, 512 KB, 4 ways, 64 byte lines, sectored
    851       case 0x40: l2 = 0; break;   // no integrated L2 cache (P6 core) or L3 cache (P4 core)
    852       case 0x41: l2 = 128; break;   // code and data L2 cache, 128 KB, 4 ways, 32 byte lines
    853       case 0x42: l2 = 256; break;   // code and data L2 cache, 256 KB, 4 ways, 32 byte lines
    854       case 0x43: l2 = 512; break;   // code and data L2 cache, 512 KB, 4 ways, 32 byte lines
    855       case 0x44: l2 = 1024; break;   // code and data L2 cache, 1024 KB, 4 ways, 32 byte lines
    856       case 0x45: l2 = 2048; break;   // code and data L2 cache, 2048 KB, 4 ways, 32 byte lines
    857       case 0x46: l3 = 4096; break;   // code and data L3 cache, 4096 KB, 4 ways, 64 byte lines
    858       case 0x47: l3 = 8192; break;   // code and data L3 cache, 8192 KB, 8 ways, 64 byte lines
    859       case 0x48: l2 = 3072; break;   // code and data L2 cache, 3072 KB, 12 ways, 64 byte lines
    860       case 0x49: if(l2!=0) l3 = 4096; else {check_for_p2_core2=true; l3 = l2 = 4096;} break;// code and data L3 cache, 4096 KB, 16 ways, 64 byte lines (P4) or L2 for core2
    861       case 0x4A: l3 = 6144; break;   // code and data L3 cache, 6144 KB, 12 ways, 64 byte lines
    862       case 0x4B: l3 = 8192; break;   // code and data L3 cache, 8192 KB, 16 ways, 64 byte lines
    863       case 0x4C: l3 = 12288; break;   // code and data L3 cache, 12288 KB, 12 ways, 64 byte lines
    864       case 0x4D: l3 = 16384; break;   // code and data L3 cache, 16384 KB, 16 ways, 64 byte lines
    865       case 0x4E: l2 = 6144; break;   // code and data L2 cache, 6144 KB, 24 ways, 64 byte lines
    866       case 0x78: l2 = 1024; break;   // code and data L2 cache, 1024 KB, 4 ways, 64 byte lines
    867       case 0x79: l2 = 128; break;   // code and data L2 cache, 128 KB, 8 ways, 64 byte lines, dual-sectored
    868       case 0x7A: l2 = 256; break;   // code and data L2 cache, 256 KB, 8 ways, 64 byte lines, dual-sectored
    869       case 0x7B: l2 = 512; break;   // code and data L2 cache, 512 KB, 8 ways, 64 byte lines, dual-sectored
    870       case 0x7C: l2 = 1024; break;   // code and data L2 cache, 1024 KB, 8 ways, 64 byte lines, dual-sectored
    871       case 0x7D: l2 = 2048; break;   // code and data L2 cache, 2048 KB, 8 ways, 64 byte lines
    872       case 0x7E: l2 = 256; break;   // code and data L2 cache, 256 KB, 8 ways, 128 byte lines, sect. (IA-64)
    873       case 0x7F: l2 = 512; break;   // code and data L2 cache, 512 KB, 2 ways, 64 byte lines
    874       case 0x80: l2 = 512; break;   // code and data L2 cache, 512 KB, 8 ways, 64 byte lines
    875       case 0x81: l2 = 128; break;   // code and data L2 cache, 128 KB, 8 ways, 32 byte lines
    876       case 0x82: l2 = 256; break;   // code and data L2 cache, 256 KB, 8 ways, 32 byte lines
    877       case 0x83: l2 = 512; break;   // code and data L2 cache, 512 KB, 8 ways, 32 byte lines
    878       case 0x84: l2 = 1024; break;   // code and data L2 cache, 1024 KB, 8 ways, 32 byte lines
    879       case 0x85: l2 = 2048; break;   // code and data L2 cache, 2048 KB, 8 ways, 32 byte lines
    880       case 0x86: l2 = 512; break;   // code and data L2 cache, 512 KB, 4 ways, 64 byte lines
    881       case 0x87: l2 = 1024; break;   // code and data L2 cache, 1024 KB, 8 ways, 64 byte lines
    882       case 0x88: l3 = 2048; break;   // code and data L3 cache, 2048 KB, 4 ways, 64 byte lines (IA-64)
    883       case 0x89: l3 = 4096; break;   // code and data L3 cache, 4096 KB, 4 ways, 64 byte lines (IA-64)
    884       case 0x8A: l3 = 8192; break;   // code and data L3 cache, 8192 KB, 4 ways, 64 byte lines (IA-64)
    885       case 0x8D: l3 = 3072; break;   // code and data L3 cache, 3072 KB, 12 ways, 128 byte lines (IA-64)
    886 
    887       default: break;
    888     }
    889   }
    890   if(check_for_p2_core2 && l2 == l3)
    891     l3 = 0;
    892   l1 *= 1024;
    893   l2 *= 1024;
    894   l3 *= 1024;
    895 }
    896 
    897 inline void queryCacheSizes_intel(int& l1, int& l2, int& l3, int max_std_funcs)
    898 {
    899   if(max_std_funcs>=4)
    900     queryCacheSizes_intel_direct(l1,l2,l3);
    901   else
    902     queryCacheSizes_intel_codes(l1,l2,l3);
    903 }
    904 
    905 inline void queryCacheSizes_amd(int& l1, int& l2, int& l3)
    906 {
    907   int abcd[4];
    908   abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;
    909   EIGEN_CPUID(abcd,0x80000005,0);
    910   l1 = (abcd[2] >> 24) * 1024; // C[31:24] = L1 size in KB
    911   abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;
    912   EIGEN_CPUID(abcd,0x80000006,0);
    913   l2 = (abcd[2] >> 16) * 1024; // C[31;16] = l2 cache size in KB
    914   l3 = ((abcd[3] & 0xFFFC000) >> 18) * 512 * 1024; // D[31;18] = l3 cache size in 512KB
    915 }
    916 #endif
    917 
    918 /** \internal
    919  * Queries and returns the cache sizes in Bytes of the L1, L2, and L3 data caches respectively */
    920 inline void queryCacheSizes(int& l1, int& l2, int& l3)
    921 {
    922   #ifdef EIGEN_CPUID
    923   int abcd[4];
    924   const int GenuineIntel[] = {0x756e6547, 0x49656e69, 0x6c65746e};
    925   const int AuthenticAMD[] = {0x68747541, 0x69746e65, 0x444d4163};
    926   const int AMDisbetter_[] = {0x69444d41, 0x74656273, 0x21726574}; // "AMDisbetter!"
    927 
    928   // identify the CPU vendor
    929   EIGEN_CPUID(abcd,0x0,0);
    930   int max_std_funcs = abcd[1];
    931   if(cpuid_is_vendor(abcd,GenuineIntel))
    932     queryCacheSizes_intel(l1,l2,l3,max_std_funcs);
    933   else if(cpuid_is_vendor(abcd,AuthenticAMD) || cpuid_is_vendor(abcd,AMDisbetter_))
    934     queryCacheSizes_amd(l1,l2,l3);
    935   else
    936     // by default let's use Intel's API
    937     queryCacheSizes_intel(l1,l2,l3,max_std_funcs);
    938 
    939   // here is the list of other vendors:
    940 //   ||cpuid_is_vendor(abcd,"VIA VIA VIA ")
    941 //   ||cpuid_is_vendor(abcd,"CyrixInstead")
    942 //   ||cpuid_is_vendor(abcd,"CentaurHauls")
    943 //   ||cpuid_is_vendor(abcd,"GenuineTMx86")
    944 //   ||cpuid_is_vendor(abcd,"TransmetaCPU")
    945 //   ||cpuid_is_vendor(abcd,"RiseRiseRise")
    946 //   ||cpuid_is_vendor(abcd,"Geode by NSC")
    947 //   ||cpuid_is_vendor(abcd,"SiS SiS SiS ")
    948 //   ||cpuid_is_vendor(abcd,"UMC UMC UMC ")
    949 //   ||cpuid_is_vendor(abcd,"NexGenDriven")
    950   #else
    951   l1 = l2 = l3 = -1;
    952   #endif
    953 }
    954 
    955 /** \internal
    956  * \returns the size in Bytes of the L1 data cache */
    957 inline int queryL1CacheSize()
    958 {
    959   int l1(-1), l2, l3;
    960   queryCacheSizes(l1,l2,l3);
    961   return l1;
    962 }
    963 
    964 /** \internal
    965  * \returns the size in Bytes of the L2 or L3 cache if this later is present */
    966 inline int queryTopLevelCacheSize()
    967 {
    968   int l1, l2(-1), l3(-1);
    969   queryCacheSizes(l1,l2,l3);
    970   return (std::max)(l2,l3);
    971 }
    972 
    973 } // end namespace internal
    974 
    975 } // end namespace Eigen
    976 
    977 #endif // EIGEN_MEMORY_H
    978