20 #ifndef EIGEN_MEMORY_H
21 #define EIGEN_MEMORY_H
23 #ifndef EIGEN_MALLOC_ALREADY_ALIGNED
34 #if defined(__GLIBC__) && ((__GLIBC__>=2 && __GLIBC_MINOR__ >= 8) || __GLIBC__>2) \
35 && defined(__LP64__) && ! defined( __SANITIZE_ADDRESS__ ) && (EIGEN_DEFAULT_ALIGN_BYTES == 16)
36 #define EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED 1
38 #define EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED 0
45 #if defined(__FreeBSD__) && !(EIGEN_ARCH_ARM || EIGEN_ARCH_MIPS) && (EIGEN_DEFAULT_ALIGN_BYTES == 16)
46 #define EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED 1
48 #define EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED 0
51 #if (EIGEN_OS_MAC && (EIGEN_DEFAULT_ALIGN_BYTES == 16)) \
52 || (EIGEN_OS_WIN64 && (EIGEN_DEFAULT_ALIGN_BYTES == 16)) \
53 || EIGEN_GLIBC_MALLOC_ALREADY_ALIGNED \
54 || EIGEN_FREEBSD_MALLOC_ALREADY_ALIGNED
55 #define EIGEN_MALLOC_ALREADY_ALIGNED 1
57 #define EIGEN_MALLOC_ALREADY_ALIGNED 0
69 #ifdef EIGEN_EXCEPTIONS
70 throw std::bad_alloc();
72 std::size_t huge =
static_cast<std::size_t
>(-1);
89 if (original == 0)
return 0;
91 *(
reinterpret_cast<void**
>(aligned) - 1) = original;
98 if (ptr) std::free(*(
reinterpret_cast<void**
>(ptr) - 1));
109 void *original = *(
reinterpret_cast<void**
>(ptr) - 1);
110 std::ptrdiff_t previous_offset =
static_cast<char *
>(ptr)-
static_cast<char *
>(original);
112 if (original == 0)
return 0;
114 void *previous_aligned =
static_cast<char *
>(original)+previous_offset;
115 if(aligned!=previous_aligned)
116 std::memmove(aligned, previous_aligned,
size);
118 *(
reinterpret_cast<void**
>(aligned) - 1) = original;
126 #ifdef EIGEN_NO_MALLOC
129 eigen_assert(
false &&
"heap allocation is forbidden (EIGEN_NO_MALLOC is defined)");
131 #elif defined EIGEN_RUNTIME_NO_MALLOC
132 EIGEN_DEVICE_FUNC
inline bool is_malloc_allowed_impl(
bool update,
bool new_value =
false)
134 static bool value =
true;
139 EIGEN_DEVICE_FUNC
inline bool is_malloc_allowed() {
return is_malloc_allowed_impl(
false); }
140 EIGEN_DEVICE_FUNC
inline bool set_is_malloc_allowed(
bool new_value) {
return is_malloc_allowed_impl(
true, new_value); }
143 eigen_assert(is_malloc_allowed() &&
"heap allocation is forbidden (EIGEN_RUNTIME_NO_MALLOC is defined and g_is_malloc_allowed is false)");
158 #if (EIGEN_DEFAULT_ALIGN_BYTES==0) || EIGEN_MALLOC_ALREADY_ALIGNED
159 result = std::malloc(
size);
160 #if EIGEN_DEFAULT_ALIGN_BYTES==16
161 eigen_assert((
size<16 || (std::size_t(result)%16)==0) &&
"System's malloc returned an unaligned pointer. Compile with EIGEN_MALLOC_ALREADY_ALIGNED=0 to fallback to handmade alignd memory allocator.");
176 #if (EIGEN_DEFAULT_ALIGN_BYTES==0) || EIGEN_MALLOC_ALREADY_ALIGNED
193 #if (EIGEN_DEFAULT_ALIGN_BYTES==0) || EIGEN_MALLOC_ALREADY_ALIGNED
194 result = std::realloc(ptr,new_size);
199 if (!result && new_size)
221 void *result = std::malloc(
size);
245 return std::realloc(ptr, new_size);
270 for (i = 0; i <
size; ++i) ::
new (ptr + i) T;
288 if(
size > std::size_t(-1) /
sizeof(T))
298 check_size_for_overflow<T>(
size);
314 check_size_for_overflow<T>(
size);
315 T *result =
reinterpret_cast<T*
>(conditional_aligned_malloc<Align>(
sizeof(T)*
size));
322 conditional_aligned_free<Align>(result);
333 destruct_elements_of_array<T>(ptr,
size);
342 destruct_elements_of_array<T>(ptr,
size);
343 conditional_aligned_free<Align>(ptr);
348 check_size_for_overflow<T>(new_size);
349 check_size_for_overflow<T>(old_size);
350 if(new_size < old_size)
352 T *result =
reinterpret_cast<T*
>(conditional_aligned_realloc<Align>(
reinterpret_cast<void*
>(pts),
sizeof(T)*new_size,
sizeof(T)*old_size));
353 if(new_size > old_size)
361 conditional_aligned_free<Align>(result);
373 check_size_for_overflow<T>(
size);
374 T *result =
reinterpret_cast<T*
>(conditional_aligned_malloc<Align>(
sizeof(T)*
size));
383 conditional_aligned_free<Align>(result);
392 check_size_for_overflow<T>(new_size);
393 check_size_for_overflow<T>(old_size);
396 T *result =
reinterpret_cast<T*
>(conditional_aligned_realloc<Align>(
reinterpret_cast<void*
>(pts),
sizeof(T)*new_size,
sizeof(T)*old_size));
405 conditional_aligned_free<Align>(result);
415 destruct_elements_of_array<T>(ptr,
size);
416 conditional_aligned_free<Align>(ptr);
438 template<
int Alignment,
typename Scalar,
typename Index>
442 const Index AlignmentSize = Alignment / ScalarSize;
443 const Index AlignmentMask = AlignmentSize-1;
460 return (first <
size) ? first :
size;
466 template<
typename Scalar,
typename Index>
470 return first_aligned<unpacket_traits<DefaultPacketType>::alignment>(
array,
size);
475 template<
typename Index>
478 return ((
size+base-1)/base)*base;
485 template<
typename T> EIGEN_DEVICE_FUNC
void smart_copy(
const T* start,
const T* end, T* target)
491 EIGEN_DEVICE_FUNC
static inline void run(
const T* start,
const T* end, T* target)
496 std::memcpy(target, start,
size);
501 EIGEN_DEVICE_FUNC
static inline void run(
const T* start,
const T* end, T* target)
508 template<
typename T>
void smart_memmove(
const T* start,
const T* end, T* target)
514 static inline void run(
const T* start,
const T* end, T* target)
519 std::memmove(target, start,
size);
524 static inline void run(
const T* start,
const T* end, T* target)
532 std::ptrdiff_t count = (std::ptrdiff_t(end)-std::ptrdiff_t(start)) /
sizeof(T);
533 std::copy_backward(start, end, target + count);
546 #if EIGEN_OS_LINUX || EIGEN_OS_MAC || (defined alloca)
547 #define EIGEN_ALLOCA alloca
548 #elif EIGEN_COMP_MSVC
549 #define EIGEN_ALLOCA _alloca
573 Eigen::internal::destruct_elements_of_array<T>(
m_ptr,
m_size);
599 operator const T*()
const {
return m_ptr; }
626 #if EIGEN_DEFAULT_ALIGN_BYTES>0
629 #define EIGEN_ALIGNED_ALLOCA(SIZE) reinterpret_cast<void*>((internal::UIntPtr(EIGEN_ALLOCA(SIZE+EIGEN_DEFAULT_ALIGN_BYTES-1)) + EIGEN_DEFAULT_ALIGN_BYTES-1) & ~(std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1)))
631 #define EIGEN_ALIGNED_ALLOCA(SIZE) EIGEN_ALLOCA(SIZE)
634 #define ei_declare_aligned_stack_constructed_variable(TYPE,NAME,SIZE,BUFFER) \
635 Eigen::internal::check_size_for_overflow<TYPE>(SIZE); \
636 TYPE* NAME = (BUFFER)!=0 ? (BUFFER) \
637 : reinterpret_cast<TYPE*>( \
638 (sizeof(TYPE)*SIZE<=EIGEN_STACK_ALLOCATION_LIMIT) ? EIGEN_ALIGNED_ALLOCA(sizeof(TYPE)*SIZE) \
639 : Eigen::internal::aligned_malloc(sizeof(TYPE)*SIZE) ); \
640 Eigen::internal::aligned_stack_memory_handler<TYPE> EIGEN_CAT(NAME,_stack_memory_destructor)((BUFFER)==0 ? NAME : 0,SIZE,sizeof(TYPE)*SIZE>EIGEN_STACK_ALLOCATION_LIMIT)
644 #define ei_declare_aligned_stack_constructed_variable(TYPE,NAME,SIZE,BUFFER) \
645 Eigen::internal::check_size_for_overflow<TYPE>(SIZE); \
646 TYPE* NAME = (BUFFER)!=0 ? BUFFER : reinterpret_cast<TYPE*>(Eigen::internal::aligned_malloc(sizeof(TYPE)*SIZE)); \
647 Eigen::internal::aligned_stack_memory_handler<TYPE> EIGEN_CAT(NAME,_stack_memory_destructor)((BUFFER)==0 ? NAME : 0,SIZE,true)
656 #if EIGEN_MAX_ALIGN_BYTES!=0
657 #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \
658 void* operator new(std::size_t size, const std::nothrow_t&) EIGEN_NO_THROW { \
659 EIGEN_TRY { return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); } \
660 EIGEN_CATCH (...) { return 0; } \
662 #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) \
663 void *operator new(std::size_t size) { \
664 return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); \
666 void *operator new[](std::size_t size) { \
667 return Eigen::internal::conditional_aligned_malloc<NeedsToAlign>(size); \
669 void operator delete(void * ptr) EIGEN_NO_THROW { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \
670 void operator delete[](void * ptr) EIGEN_NO_THROW { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \
671 void operator delete(void * ptr, std::size_t ) EIGEN_NO_THROW { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \
672 void operator delete[](void * ptr, std::size_t ) EIGEN_NO_THROW { Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); } \
676 static void *operator new(std::size_t size, void *ptr) { return ::operator new(size,ptr); } \
677 static void *operator new[](std::size_t size, void* ptr) { return ::operator new[](size,ptr); } \
678 void operator delete(void * memory, void *ptr) EIGEN_NO_THROW { return ::operator delete(memory,ptr); } \
679 void operator delete[](void * memory, void *ptr) EIGEN_NO_THROW { return ::operator delete[](memory,ptr); } \
681 EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \
682 void operator delete(void *ptr, const std::nothrow_t&) EIGEN_NO_THROW { \
683 Eigen::internal::conditional_aligned_free<NeedsToAlign>(ptr); \
685 typedef void eigen_aligned_operator_new_marker_type;
687 #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
690 #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(true)
691 #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(Scalar,Size) \
692 EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(bool(((Size)!=Eigen::Dynamic) && ((sizeof(Scalar)*(Size))%EIGEN_MAX_ALIGN_BYTES==0)))
749 internal::check_size_for_overflow<T>(num);
751 #if EIGEN_COMP_GNUC_STRICT && EIGEN_GNUC_AT_LEAST(7,0)
769 #if !defined(EIGEN_NO_CPUID)
770 # if EIGEN_COMP_GNUC && EIGEN_ARCH_i386_OR_x86_64
771 # if defined(__PIC__) && EIGEN_ARCH_i386
773 # define EIGEN_CPUID(abcd,func,id) \
774 __asm__ __volatile__ ("xchgl %%ebx, %k1;cpuid; xchgl %%ebx,%k1": "=a" (abcd[0]), "=&r" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "a" (func), "c" (id));
775 # elif defined(__PIC__) && EIGEN_ARCH_x86_64
778 # define EIGEN_CPUID(abcd,func,id) \
779 __asm__ __volatile__ ("xchg{q}\t{%%}rbx, %q1; cpuid; xchg{q}\t{%%}rbx, %q1": "=a" (abcd[0]), "=&r" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "0" (func), "2" (id));
782 # define EIGEN_CPUID(abcd,func,id) \
783 __asm__ __volatile__ ("cpuid": "=a" (abcd[0]), "=b" (abcd[1]), "=c" (abcd[2]), "=d" (abcd[3]) : "0" (func), "2" (id) );
785 # elif EIGEN_COMP_MSVC
786 # if (EIGEN_COMP_MSVC > 1500) && EIGEN_ARCH_i386_OR_x86_64
787 # define EIGEN_CPUID(abcd,func,id) __cpuidex((int*)abcd,func,id)
796 inline bool cpuid_is_vendor(
int abcd[4],
const int vendor[3])
798 return abcd[1]==vendor[0] && abcd[3]==vendor[1] && abcd[2]==vendor[2];
801 inline void queryCacheSizes_intel_direct(
int& l1,
int& l2,
int& l3)
808 abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;
809 EIGEN_CPUID(abcd,0x4,cache_id);
810 cache_type = (abcd[0] & 0x0F) >> 0;
811 if(cache_type==1||cache_type==3)
813 int cache_level = (abcd[0] & 0xE0) >> 5;
814 int ways = (abcd[1] & 0xFFC00000) >> 22;
815 int partitions = (abcd[1] & 0x003FF000) >> 12;
816 int line_size = (abcd[1] & 0x00000FFF) >> 0;
817 int sets = (abcd[2]);
819 int cache_size = (ways+1) * (partitions+1) * (line_size+1) * (sets+1);
823 case 1: l1 = cache_size;
break;
824 case 2: l2 = cache_size;
break;
825 case 3: l3 = cache_size;
break;
830 }
while(cache_type>0 && cache_id<16);
833 inline void queryCacheSizes_intel_codes(
int& l1,
int& l2,
int& l3)
836 abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;
838 EIGEN_CPUID(abcd,0x00000002,0);
839 unsigned char * bytes =
reinterpret_cast<unsigned char *
>(abcd)+2;
840 bool check_for_p2_core2 =
false;
841 for(
int i=0; i<14; ++i)
845 case 0x0A: l1 = 8;
break;
846 case 0x0C: l1 = 16;
break;
847 case 0x0E: l1 = 24;
break;
848 case 0x10: l1 = 16;
break;
849 case 0x15: l1 = 16;
break;
850 case 0x2C: l1 = 32;
break;
851 case 0x30: l1 = 32;
break;
852 case 0x60: l1 = 16;
break;
853 case 0x66: l1 = 8;
break;
854 case 0x67: l1 = 16;
break;
855 case 0x68: l1 = 32;
break;
856 case 0x1A: l2 = 96;
break;
857 case 0x22: l3 = 512;
break;
858 case 0x23: l3 = 1024;
break;
859 case 0x25: l3 = 2048;
break;
860 case 0x29: l3 = 4096;
break;
861 case 0x39: l2 = 128;
break;
862 case 0x3A: l2 = 192;
break;
863 case 0x3B: l2 = 128;
break;
864 case 0x3C: l2 = 256;
break;
865 case 0x3D: l2 = 384;
break;
866 case 0x3E: l2 = 512;
break;
867 case 0x40: l2 = 0;
break;
868 case 0x41: l2 = 128;
break;
869 case 0x42: l2 = 256;
break;
870 case 0x43: l2 = 512;
break;
871 case 0x44: l2 = 1024;
break;
872 case 0x45: l2 = 2048;
break;
873 case 0x46: l3 = 4096;
break;
874 case 0x47: l3 = 8192;
break;
875 case 0x48: l2 = 3072;
break;
876 case 0x49:
if(l2!=0) l3 = 4096;
else {check_for_p2_core2=
true; l3 = l2 = 4096;}
break;
877 case 0x4A: l3 = 6144;
break;
878 case 0x4B: l3 = 8192;
break;
879 case 0x4C: l3 = 12288;
break;
880 case 0x4D: l3 = 16384;
break;
881 case 0x4E: l2 = 6144;
break;
882 case 0x78: l2 = 1024;
break;
883 case 0x79: l2 = 128;
break;
884 case 0x7A: l2 = 256;
break;
885 case 0x7B: l2 = 512;
break;
886 case 0x7C: l2 = 1024;
break;
887 case 0x7D: l2 = 2048;
break;
888 case 0x7E: l2 = 256;
break;
889 case 0x7F: l2 = 512;
break;
890 case 0x80: l2 = 512;
break;
891 case 0x81: l2 = 128;
break;
892 case 0x82: l2 = 256;
break;
893 case 0x83: l2 = 512;
break;
894 case 0x84: l2 = 1024;
break;
895 case 0x85: l2 = 2048;
break;
896 case 0x86: l2 = 512;
break;
897 case 0x87: l2 = 1024;
break;
898 case 0x88: l3 = 2048;
break;
899 case 0x89: l3 = 4096;
break;
900 case 0x8A: l3 = 8192;
break;
901 case 0x8D: l3 = 3072;
break;
906 if(check_for_p2_core2 && l2 == l3)
913 inline void queryCacheSizes_intel(
int& l1,
int& l2,
int& l3,
int max_std_funcs)
916 queryCacheSizes_intel_direct(l1,l2,l3);
918 queryCacheSizes_intel_codes(l1,l2,l3);
921 inline void queryCacheSizes_amd(
int& l1,
int& l2,
int& l3)
924 abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;
925 EIGEN_CPUID(abcd,0x80000005,0);
926 l1 = (abcd[2] >> 24) * 1024;
927 abcd[0] = abcd[1] = abcd[2] = abcd[3] = 0;
928 EIGEN_CPUID(abcd,0x80000006,0);
929 l2 = (abcd[2] >> 16) * 1024;
930 l3 = ((abcd[3] & 0xFFFC000) >> 18) * 512 * 1024;
940 const int GenuineIntel[] = {0x756e6547, 0x49656e69, 0x6c65746e};
941 const int AuthenticAMD[] = {0x68747541, 0x69746e65, 0x444d4163};
942 const int AMDisbetter_[] = {0x69444d41, 0x74656273, 0x21726574};
945 EIGEN_CPUID(abcd,0x0,0);
946 int max_std_funcs = abcd[1];
947 if(cpuid_is_vendor(abcd,GenuineIntel))
948 queryCacheSizes_intel(l1,l2,l3,max_std_funcs);
949 else if(cpuid_is_vendor(abcd,AuthenticAMD) || cpuid_is_vendor(abcd,AMDisbetter_))
950 queryCacheSizes_amd(l1,l2,l3);
953 queryCacheSizes_intel(l1,l2,l3,max_std_funcs);
984 int l1, l2(-1), l3(-1);
993 #endif // EIGEN_MEMORY_H