optimization.h
Go to the documentation of this file.
00001 //
00002 // Copyright 2017 The Abseil Authors.
00003 //
00004 // Licensed under the Apache License, Version 2.0 (the "License");
00005 // you may not use this file except in compliance with the License.
00006 // You may obtain a copy of the License at
00007 //
00008 //      https://www.apache.org/licenses/LICENSE-2.0
00009 //
00010 // Unless required by applicable law or agreed to in writing, software
00011 // distributed under the License is distributed on an "AS IS" BASIS,
00012 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
00013 // See the License for the specific language governing permissions and
00014 // limitations under the License.
00015 //
00016 // -----------------------------------------------------------------------------
00017 // File: optimization.h
00018 // -----------------------------------------------------------------------------
00019 //
00020 // This header file defines portable macros for performance optimization.
00021 
00022 #ifndef ABSL_BASE_OPTIMIZATION_H_
00023 #define ABSL_BASE_OPTIMIZATION_H_
00024 
00025 #include "absl/base/config.h"
00026 
00027 // ABSL_BLOCK_TAIL_CALL_OPTIMIZATION
00028 //
00029 // Instructs the compiler to avoid optimizing tail-call recursion. Use of this
00030 // macro is useful when you wish to preserve the existing function order within
00031 // a stack trace for logging, debugging, or profiling purposes.
00032 //
00033 // Example:
00034 //
00035 //   int f() {
00036 //     int result = g();
00037 //     ABSL_BLOCK_TAIL_CALL_OPTIMIZATION();
00038 //     return result;
00039 //   }
00040 #if defined(__pnacl__)
00041 #define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() if (volatile int x = 0) { (void)x; }
00042 #elif defined(__clang__)
00043 // Clang will not tail call given inline volatile assembly.
00044 #define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __asm__ __volatile__("")
00045 #elif defined(__GNUC__)
00046 // GCC will not tail call given inline volatile assembly.
00047 #define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __asm__ __volatile__("")
00048 #elif defined(_MSC_VER)
00049 #include <intrin.h>
00050 // The __nop() intrinsic blocks the optimisation.
00051 #define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __nop()
00052 #else
00053 #define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() if (volatile int x = 0) { (void)x; }
00054 #endif
00055 
00056 // ABSL_CACHELINE_SIZE
00057 //
00058 // Explicitly defines the size of the L1 cache for purposes of alignment.
00059 // Setting the cacheline size allows you to specify that certain objects be
00060 // aligned on a cacheline boundary with `ABSL_CACHELINE_ALIGNED` declarations.
00061 // (See below.)
00062 //
00063 // NOTE: this macro should be replaced with the following C++17 features, when
00064 // those are generally available:
00065 //
00066 //   * `std::hardware_constructive_interference_size`
00067 //   * `std::hardware_destructive_interference_size`
00068 //
00069 // See http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0154r1.html
00070 // for more information.
00071 #if defined(__GNUC__)
00072 // Cache line alignment
00073 #if defined(__i386__) || defined(__x86_64__)
00074 #define ABSL_CACHELINE_SIZE 64
00075 #elif defined(__powerpc64__)
00076 #define ABSL_CACHELINE_SIZE 128
00077 #elif defined(__aarch64__)
00078 // We would need to read special register ctr_el0 to find out L1 dcache size.
00079 // This value is a good estimate based on a real aarch64 machine.
00080 #define ABSL_CACHELINE_SIZE 64
00081 #elif defined(__arm__)
00082 // Cache line sizes for ARM: These values are not strictly correct since
00083 // cache line sizes depend on implementations, not architectures.  There
00084 // are even implementations with cache line sizes configurable at boot
00085 // time.
00086 #if defined(__ARM_ARCH_5T__)
00087 #define ABSL_CACHELINE_SIZE 32
00088 #elif defined(__ARM_ARCH_7A__)
00089 #define ABSL_CACHELINE_SIZE 64
00090 #endif
00091 #endif
00092 
00093 #ifndef ABSL_CACHELINE_SIZE
00094 // A reasonable default guess.  Note that overestimates tend to waste more
00095 // space, while underestimates tend to waste more time.
00096 #define ABSL_CACHELINE_SIZE 64
00097 #endif
00098 
00099 // ABSL_CACHELINE_ALIGNED
00100 //
00101 // Indicates that the declared object be cache aligned using
00102 // `ABSL_CACHELINE_SIZE` (see above). Cacheline aligning objects allows you to
00103 // load a set of related objects in the L1 cache for performance improvements.
00104 // Cacheline aligning objects properly allows constructive memory sharing and
00105 // prevents destructive (or "false") memory sharing.
00106 //
00107 // NOTE: this macro should be replaced with usage of `alignas()` using
00108 // `std::hardware_constructive_interference_size` and/or
00109 // `std::hardware_destructive_interference_size` when available within C++17.
00110 //
00111 // See http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0154r1.html
00112 // for more information.
00113 //
00114 // On some compilers, `ABSL_CACHELINE_ALIGNED` expands to an `__attribute__`
00115 // or `__declspec` attribute. For compilers where this is not known to work,
00116 // the macro expands to nothing.
00117 //
00118 // No further guarantees are made here. The result of applying the macro
00119 // to variables and types is always implementation-defined.
00120 //
00121 // WARNING: It is easy to use this attribute incorrectly, even to the point
00122 // of causing bugs that are difficult to diagnose, crash, etc. It does not
00123 // of itself guarantee that objects are aligned to a cache line.
00124 //
00125 // NOTE: Some compilers are picky about the locations of annotations such as
00126 // this attribute, so prefer to put it at the beginning of your declaration.
00127 // For example,
00128 //
00129 //   ABSL_CACHELINE_ALIGNED static Foo* foo = ...
00130 //
00131 //   class ABSL_CACHELINE_ALIGNED Bar { ...
00132 //
00133 // Recommendations:
00134 //
00135 // 1) Consult compiler documentation; this comment is not kept in sync as
00136 //    toolchains evolve.
00137 // 2) Verify your use has the intended effect. This often requires inspecting
00138 //    the generated machine code.
00139 // 3) Prefer applying this attribute to individual variables. Avoid
00140 //    applying it to types. This tends to localize the effect.
00141 #define ABSL_CACHELINE_ALIGNED __attribute__((aligned(ABSL_CACHELINE_SIZE)))
00142 #elif defined(_MSC_VER)
00143 #define ABSL_CACHELINE_SIZE 64
00144 #define ABSL_CACHELINE_ALIGNED __declspec(align(ABSL_CACHELINE_SIZE))
00145 #else
00146 #define ABSL_CACHELINE_SIZE 64
00147 #define ABSL_CACHELINE_ALIGNED
00148 #endif
00149 
00150 // ABSL_PREDICT_TRUE, ABSL_PREDICT_FALSE
00151 //
00152 // Enables the compiler to prioritize compilation using static analysis for
00153 // likely paths within a boolean branch.
00154 //
00155 // Example:
00156 //
00157 //   if (ABSL_PREDICT_TRUE(expression)) {
00158 //     return result;                        // Faster if more likely
00159 //   } else {
00160 //     return 0;
00161 //   }
00162 //
00163 // Compilers can use the information that a certain branch is not likely to be
00164 // taken (for instance, a CHECK failure) to optimize for the common case in
00165 // the absence of better information (ie. compiling gcc with `-fprofile-arcs`).
00166 //
00167 // Recommendation: Modern CPUs dynamically predict branch execution paths,
00168 // typically with accuracy greater than 97%. As a result, annotating every
00169 // branch in a codebase is likely counterproductive; however, annotating
00170 // specific branches that are both hot and consistently mispredicted is likely
00171 // to yield performance improvements.
00172 #if ABSL_HAVE_BUILTIN(__builtin_expect) || \
00173     (defined(__GNUC__) && !defined(__clang__))
00174 #define ABSL_PREDICT_FALSE(x) (__builtin_expect(x, 0))
00175 #define ABSL_PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
00176 #else
00177 #define ABSL_PREDICT_FALSE(x) (x)
00178 #define ABSL_PREDICT_TRUE(x) (x)
00179 #endif
00180 
00181 #endif  // ABSL_BASE_OPTIMIZATION_H_


abseil_cpp
Author(s):
autogenerated on Wed Jun 19 2019 19:42:15