Go to the documentation of this file.00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015 #include "absl/time/clock.h"
00016
00017 #include "absl/base/attributes.h"
00018
00019 #ifdef _WIN32
00020 #include <windows.h>
00021 #endif
00022
00023 #include <algorithm>
00024 #include <atomic>
00025 #include <cerrno>
00026 #include <cstdint>
00027 #include <ctime>
00028 #include <limits>
00029
00030 #include "absl/base/internal/spinlock.h"
00031 #include "absl/base/internal/unscaledcycleclock.h"
00032 #include "absl/base/macros.h"
00033 #include "absl/base/port.h"
00034 #include "absl/base/thread_annotations.h"
00035
00036 namespace absl {
00037 Time Now() {
00038
00039 int64_t n = absl::GetCurrentTimeNanos();
00040 if (n >= 0) {
00041 return time_internal::FromUnixDuration(
00042 time_internal::MakeDuration(n / 1000000000, n % 1000000000 * 4));
00043 }
00044 return time_internal::FromUnixDuration(absl::Nanoseconds(n));
00045 }
00046 }
00047
00048
00049
00050
00051
00052 #ifndef ABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS
00053 #if ABSL_USE_UNSCALED_CYCLECLOCK
00054 #define ABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS 1
00055 #else
00056 #define ABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS 0
00057 #endif
00058 #endif
00059
00060 #if defined(__APPLE__) || defined(_WIN32)
00061 #include "absl/time/internal/get_current_time_chrono.inc"
00062 #else
00063 #include "absl/time/internal/get_current_time_posix.inc"
00064 #endif
00065
00066
00067 #ifndef GET_CURRENT_TIME_NANOS_FROM_SYSTEM
00068 #define GET_CURRENT_TIME_NANOS_FROM_SYSTEM() \
00069 ::absl::time_internal::GetCurrentTimeNanosFromSystem()
00070 #endif
00071
00072 #if !ABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS
00073 namespace absl {
00074 int64_t GetCurrentTimeNanos() {
00075 return GET_CURRENT_TIME_NANOS_FROM_SYSTEM();
00076 }
00077 }
00078 #else // Use the cyclecounter-based implementation below.
00079
00080
00081 #ifndef GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW
00082 #define GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW() \
00083 ::absl::time_internal::UnscaledCycleClockWrapperForGetCurrentTime::Now()
00084 #endif
00085
00086
00087 static int64_t stats_initializations;
00088 static int64_t stats_reinitializations;
00089 static int64_t stats_calibrations;
00090 static int64_t stats_slow_paths;
00091 static int64_t stats_fast_slow_paths;
00092
00093 namespace absl {
00094 namespace time_internal {
00095
00096
00097 class UnscaledCycleClockWrapperForGetCurrentTime {
00098 public:
00099 static int64_t Now() { return base_internal::UnscaledCycleClock::Now(); }
00100 };
00101 }
00102
00103
00104
00105
00106
00107
00108
00109
00110
00111
00112 static int64_t GetCurrentTimeNanosFromKernel(uint64_t last_cycleclock,
00113 uint64_t *cycleclock) {
00114
00115
00116
00117 static std::atomic<uint64_t> approx_syscall_time_in_cycles{10 * 1000};
00118
00119 uint64_t local_approx_syscall_time_in_cycles =
00120 approx_syscall_time_in_cycles.load(std::memory_order_relaxed);
00121
00122 int64_t current_time_nanos_from_system;
00123 uint64_t before_cycles;
00124 uint64_t after_cycles;
00125 uint64_t elapsed_cycles;
00126 int loops = 0;
00127 do {
00128 before_cycles = GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW();
00129 current_time_nanos_from_system = GET_CURRENT_TIME_NANOS_FROM_SYSTEM();
00130 after_cycles = GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW();
00131
00132 elapsed_cycles = after_cycles - before_cycles;
00133 if (elapsed_cycles >= local_approx_syscall_time_in_cycles &&
00134 ++loops == 20) {
00135 loops = 0;
00136 if (local_approx_syscall_time_in_cycles < 1000 * 1000) {
00137 local_approx_syscall_time_in_cycles =
00138 (local_approx_syscall_time_in_cycles + 1) << 1;
00139 }
00140 approx_syscall_time_in_cycles.store(
00141 local_approx_syscall_time_in_cycles,
00142 std::memory_order_relaxed);
00143 }
00144 } while (elapsed_cycles >= local_approx_syscall_time_in_cycles ||
00145 last_cycleclock - after_cycles < (static_cast<uint64_t>(1) << 16));
00146
00147
00148
00149 static std::atomic<uint32_t> seen_smaller{ 0 };
00150
00151
00152
00153 if ((local_approx_syscall_time_in_cycles >> 1) < elapsed_cycles) {
00154
00155 seen_smaller.store(0, std::memory_order_relaxed);
00156 } else if (seen_smaller.fetch_add(1, std::memory_order_relaxed) >= 3) {
00157
00158 const uint64_t new_approximation =
00159 local_approx_syscall_time_in_cycles -
00160 (local_approx_syscall_time_in_cycles >> 3);
00161 approx_syscall_time_in_cycles.store(new_approximation,
00162 std::memory_order_relaxed);
00163 seen_smaller.store(0, std::memory_order_relaxed);
00164 }
00165
00166 *cycleclock = after_cycles;
00167 return current_time_nanos_from_system;
00168 }
00169
00170
00171
00172
00173
00174
00175
00176
00177
00178
00179
00180
00181
00182
00183
00184 static inline uint64_t SeqAcquire(std::atomic<uint64_t> *seq) {
00185 uint64_t x = seq->fetch_add(1, std::memory_order_relaxed);
00186
00187
00188
00189
00190
00191
00192 std::atomic_thread_fence(std::memory_order_release);
00193
00194 return x + 2;
00195 }
00196
00197
00198
00199 static inline void SeqRelease(std::atomic<uint64_t> *seq, uint64_t x) {
00200
00201
00202 seq->store(x, std::memory_order_release);
00203 }
00204
00205
00206
00207
00208 enum { kScale = 30 };
00209
00210
00211
00212
00213
00214 static const uint64_t kMinNSBetweenSamples = 2000 << 20;
00215
00216
00217
00218 static_assert(((kMinNSBetweenSamples << (kScale + 1)) >> (kScale + 1)) ==
00219 kMinNSBetweenSamples,
00220 "cannot represent kMaxBetweenSamplesNSScaled");
00221
00222
00223
00224 static absl::base_internal::SpinLock lock(
00225 absl::base_internal::kLinkerInitialized);
00226 static std::atomic<uint64_t> seq(0);
00227
00228
00229 struct TimeSampleAtomic {
00230 std::atomic<uint64_t> raw_ns;
00231 std::atomic<uint64_t> base_ns;
00232 std::atomic<uint64_t> base_cycles;
00233 std::atomic<uint64_t> nsscaled_per_cycle;
00234
00235
00236 std::atomic<uint64_t> min_cycles_per_sample;
00237 };
00238
00239 struct TimeSample {
00240 uint64_t raw_ns;
00241 uint64_t base_ns;
00242 uint64_t base_cycles;
00243 uint64_t nsscaled_per_cycle;
00244 uint64_t min_cycles_per_sample;
00245 };
00246
00247 static struct TimeSampleAtomic last_sample;
00248
00249 static int64_t GetCurrentTimeNanosSlowPath() ABSL_ATTRIBUTE_COLD;
00250
00251
00252
00253
00254 static void ReadTimeSampleAtomic(const struct TimeSampleAtomic *atomic,
00255 struct TimeSample *sample) {
00256 sample->base_ns = atomic->base_ns.load(std::memory_order_relaxed);
00257 sample->base_cycles = atomic->base_cycles.load(std::memory_order_relaxed);
00258 sample->nsscaled_per_cycle =
00259 atomic->nsscaled_per_cycle.load(std::memory_order_relaxed);
00260 sample->min_cycles_per_sample =
00261 atomic->min_cycles_per_sample.load(std::memory_order_relaxed);
00262 sample->raw_ns = atomic->raw_ns.load(std::memory_order_relaxed);
00263 }
00264
00265
00266
00267
00268
00269
00270
00271
00272
00273
00274
00275
00276
00277
00278
00279
00280
00281
00282
00283
00284
00285
00286
00287
00288
00289
00290
00291
00292
00293
00294 int64_t GetCurrentTimeNanos() {
00295
00296
00297 uint64_t base_ns;
00298 uint64_t base_cycles;
00299 uint64_t nsscaled_per_cycle;
00300 uint64_t min_cycles_per_sample;
00301 uint64_t seq_read0;
00302 uint64_t seq_read1;
00303
00304
00305
00306
00307
00308
00309
00310 uint64_t now_cycles = GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW();
00311
00312
00313
00314
00315 seq_read0 = seq.load(std::memory_order_acquire);
00316
00317 base_ns = last_sample.base_ns.load(std::memory_order_relaxed);
00318 base_cycles = last_sample.base_cycles.load(std::memory_order_relaxed);
00319 nsscaled_per_cycle =
00320 last_sample.nsscaled_per_cycle.load(std::memory_order_relaxed);
00321 min_cycles_per_sample =
00322 last_sample.min_cycles_per_sample.load(std::memory_order_relaxed);
00323
00324
00325
00326
00327 std::atomic_thread_fence(std::memory_order_acquire);
00328
00329
00330
00331
00332
00333 seq_read1 = seq.load(std::memory_order_relaxed);
00334
00335
00336
00337
00338
00339
00340
00341
00342
00343
00344
00345
00346 uint64_t delta_cycles = now_cycles - base_cycles;
00347 if (seq_read0 == seq_read1 && (seq_read0 & 1) == 0 &&
00348 delta_cycles < min_cycles_per_sample) {
00349 return base_ns + ((delta_cycles * nsscaled_per_cycle) >> kScale);
00350 }
00351 return GetCurrentTimeNanosSlowPath();
00352 }
00353
00354
00355
00356
00357 static uint64_t SafeDivideAndScale(uint64_t a, uint64_t b) {
00358
00359
00360 int safe_shift = kScale;
00361 while (((a << safe_shift) >> safe_shift) != a) {
00362 safe_shift--;
00363 }
00364 uint64_t scaled_b = b >> (kScale - safe_shift);
00365 uint64_t quotient = 0;
00366 if (scaled_b != 0) {
00367 quotient = (a << safe_shift) / scaled_b;
00368 }
00369 return quotient;
00370 }
00371
00372 static uint64_t UpdateLastSample(
00373 uint64_t now_cycles, uint64_t now_ns, uint64_t delta_cycles,
00374 const struct TimeSample *sample) ABSL_ATTRIBUTE_COLD;
00375
00376
00377
00378
00379
00380
00381
00382
00383
00384
00385
00386
00387 ABSL_ATTRIBUTE_NOINLINE
00388 static int64_t GetCurrentTimeNanosSlowPath() LOCKS_EXCLUDED(lock) {
00389
00390
00391 lock.Lock();
00392
00393
00394
00395 static uint64_t last_now_cycles;
00396 uint64_t now_cycles;
00397 uint64_t now_ns = GetCurrentTimeNanosFromKernel(last_now_cycles, &now_cycles);
00398 last_now_cycles = now_cycles;
00399
00400 uint64_t estimated_base_ns;
00401
00402
00403
00404 struct TimeSample sample;
00405 ReadTimeSampleAtomic(&last_sample, &sample);
00406
00407
00408
00409
00410 uint64_t delta_cycles = now_cycles - sample.base_cycles;
00411 if (delta_cycles < sample.min_cycles_per_sample) {
00412
00413
00414 estimated_base_ns = sample.base_ns +
00415 ((delta_cycles * sample.nsscaled_per_cycle) >> kScale);
00416 stats_fast_slow_paths++;
00417 } else {
00418 estimated_base_ns =
00419 UpdateLastSample(now_cycles, now_ns, delta_cycles, &sample);
00420 }
00421
00422 lock.Unlock();
00423
00424 return estimated_base_ns;
00425 }
00426
00427
00428
00429
00430 static uint64_t UpdateLastSample(uint64_t now_cycles, uint64_t now_ns,
00431 uint64_t delta_cycles,
00432 const struct TimeSample *sample)
00433 EXCLUSIVE_LOCKS_REQUIRED(lock) {
00434 uint64_t estimated_base_ns = now_ns;
00435 uint64_t lock_value = SeqAcquire(&seq);
00436
00437
00438
00439
00440
00441 if (sample->raw_ns == 0 ||
00442 sample->raw_ns + static_cast<uint64_t>(5) * 1000 * 1000 * 1000 < now_ns ||
00443 now_ns < sample->raw_ns || now_cycles < sample->base_cycles) {
00444
00445 last_sample.raw_ns.store(now_ns, std::memory_order_relaxed);
00446 last_sample.base_ns.store(estimated_base_ns, std::memory_order_relaxed);
00447 last_sample.base_cycles.store(now_cycles, std::memory_order_relaxed);
00448 last_sample.nsscaled_per_cycle.store(0, std::memory_order_relaxed);
00449 last_sample.min_cycles_per_sample.store(0, std::memory_order_relaxed);
00450 stats_initializations++;
00451 } else if (sample->raw_ns + 500 * 1000 * 1000 < now_ns &&
00452 sample->base_cycles + 100 < now_cycles) {
00453
00454 if (sample->nsscaled_per_cycle != 0) {
00455
00456
00457 uint64_t estimated_scaled_ns;
00458 int s = -1;
00459 do {
00460 s++;
00461 estimated_scaled_ns = (delta_cycles >> s) * sample->nsscaled_per_cycle;
00462 } while (estimated_scaled_ns / sample->nsscaled_per_cycle !=
00463 (delta_cycles >> s));
00464 estimated_base_ns = sample->base_ns +
00465 (estimated_scaled_ns >> (kScale - s));
00466 }
00467
00468
00469
00470 uint64_t ns = now_ns - sample->raw_ns;
00471 uint64_t measured_nsscaled_per_cycle = SafeDivideAndScale(ns, delta_cycles);
00472
00473 uint64_t assumed_next_sample_delta_cycles =
00474 SafeDivideAndScale(kMinNSBetweenSamples, measured_nsscaled_per_cycle);
00475
00476 int64_t diff_ns = now_ns - estimated_base_ns;
00477
00478
00479
00480
00481
00482
00483
00484
00485
00486
00487 ns = kMinNSBetweenSamples + diff_ns - (diff_ns / 16);
00488 uint64_t new_nsscaled_per_cycle =
00489 SafeDivideAndScale(ns, assumed_next_sample_delta_cycles);
00490 if (new_nsscaled_per_cycle != 0 &&
00491 diff_ns < 100 * 1000 * 1000 && -diff_ns < 100 * 1000 * 1000) {
00492
00493 last_sample.nsscaled_per_cycle.store(
00494 new_nsscaled_per_cycle, std::memory_order_relaxed);
00495 uint64_t new_min_cycles_per_sample =
00496 SafeDivideAndScale(kMinNSBetweenSamples, new_nsscaled_per_cycle);
00497 last_sample.min_cycles_per_sample.store(
00498 new_min_cycles_per_sample, std::memory_order_relaxed);
00499 stats_calibrations++;
00500 } else {
00501 last_sample.nsscaled_per_cycle.store(0, std::memory_order_relaxed);
00502 last_sample.min_cycles_per_sample.store(0, std::memory_order_relaxed);
00503 estimated_base_ns = now_ns;
00504 stats_reinitializations++;
00505 }
00506 last_sample.raw_ns.store(now_ns, std::memory_order_relaxed);
00507 last_sample.base_ns.store(estimated_base_ns, std::memory_order_relaxed);
00508 last_sample.base_cycles.store(now_cycles, std::memory_order_relaxed);
00509 } else {
00510
00511 stats_slow_paths++;
00512 }
00513
00514 SeqRelease(&seq, lock_value);
00515
00516 return estimated_base_ns;
00517 }
00518 }
00519 #endif // ABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS
00520
00521 namespace absl {
00522 namespace {
00523
00524
00525 constexpr absl::Duration MaxSleep() {
00526 #ifdef _WIN32
00527
00528 return absl::Milliseconds(
00529 std::numeric_limits<unsigned long>::max());
00530 #else
00531 return absl::Seconds(std::numeric_limits<time_t>::max());
00532 #endif
00533 }
00534
00535
00536
00537 void SleepOnce(absl::Duration to_sleep) {
00538 #ifdef _WIN32
00539 Sleep(to_sleep / absl::Milliseconds(1));
00540 #else
00541 struct timespec sleep_time = absl::ToTimespec(to_sleep);
00542 while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR) {
00543
00544 }
00545 #endif
00546 }
00547
00548 }
00549 }
00550
00551 extern "C" {
00552
00553 ABSL_ATTRIBUTE_WEAK void AbslInternalSleepFor(absl::Duration duration) {
00554 while (duration > absl::ZeroDuration()) {
00555 absl::Duration to_sleep = std::min(duration, absl::MaxSleep());
00556 absl::SleepOnce(to_sleep);
00557 duration -= to_sleep;
00558 }
00559 }
00560
00561 }