graphcycles.cc
Go to the documentation of this file.
00001 // Copyright 2017 The Abseil Authors.
00002 //
00003 // Licensed under the Apache License, Version 2.0 (the "License");
00004 // you may not use this file except in compliance with the License.
00005 // You may obtain a copy of the License at
00006 //
00007 //      https://www.apache.org/licenses/LICENSE-2.0
00008 //
00009 // Unless required by applicable law or agreed to in writing, software
00010 // distributed under the License is distributed on an "AS IS" BASIS,
00011 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
00012 // See the License for the specific language governing permissions and
00013 // limitations under the License.
00014 
00015 // GraphCycles provides incremental cycle detection on a dynamic
00016 // graph using the following algorithm:
00017 //
00018 // A dynamic topological sort algorithm for directed acyclic graphs
00019 // David J. Pearce, Paul H. J. Kelly
00020 // Journal of Experimental Algorithmics (JEA) JEA Homepage archive
00021 // Volume 11, 2006, Article No. 1.7
00022 //
00023 // Brief summary of the algorithm:
00024 //
00025 // (1) Maintain a rank for each node that is consistent
00026 //     with the topological sort of the graph. I.e., path from x to y
00027 //     implies rank[x] < rank[y].
00028 // (2) When a new edge (x->y) is inserted, do nothing if rank[x] < rank[y].
00029 // (3) Otherwise: adjust ranks in the neighborhood of x and y.
00030 
00031 #include "absl/base/attributes.h"
00032 // This file is a no-op if the required LowLevelAlloc support is missing.
00033 #include "absl/base/internal/low_level_alloc.h"
00034 #ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
00035 
00036 #include "absl/synchronization/internal/graphcycles.h"
00037 
00038 #include <algorithm>
00039 #include <array>
00040 #include "absl/base/internal/hide_ptr.h"
00041 #include "absl/base/internal/raw_logging.h"
00042 #include "absl/base/internal/spinlock.h"
00043 
00044 // Do not use STL.   This module does not use standard memory allocation.
00045 
00046 namespace absl {
00047 namespace synchronization_internal {
00048 
00049 namespace {
00050 
00051 // Avoid LowLevelAlloc's default arena since it calls malloc hooks in
00052 // which people are doing things like acquiring Mutexes.
00053 static absl::base_internal::SpinLock arena_mu(
00054     absl::base_internal::kLinkerInitialized);
00055 static base_internal::LowLevelAlloc::Arena* arena;
00056 
00057 static void InitArenaIfNecessary() {
00058   arena_mu.Lock();
00059   if (arena == nullptr) {
00060     arena = base_internal::LowLevelAlloc::NewArena(0);
00061   }
00062   arena_mu.Unlock();
00063 }
00064 
00065 // Number of inlined elements in Vec.  Hash table implementation
00066 // relies on this being a power of two.
00067 static const uint32_t kInline = 8;
00068 
00069 // A simple LowLevelAlloc based resizable vector with inlined storage
00070 // for a few elements.  T must be a plain type since constructor
00071 // and destructor are not run on elements of type T managed by Vec.
00072 template <typename T>
00073 class Vec {
00074  public:
00075   Vec() { Init(); }
00076   ~Vec() { Discard(); }
00077 
00078   void clear() {
00079     Discard();
00080     Init();
00081   }
00082 
00083   bool empty() const { return size_ == 0; }
00084   uint32_t size() const { return size_; }
00085   T* begin() { return ptr_; }
00086   T* end() { return ptr_ + size_; }
00087   const T& operator[](uint32_t i) const { return ptr_[i]; }
00088   T& operator[](uint32_t i) { return ptr_[i]; }
00089   const T& back() const { return ptr_[size_-1]; }
00090   void pop_back() { size_--; }
00091 
00092   void push_back(const T& v) {
00093     if (size_ == capacity_) Grow(size_ + 1);
00094     ptr_[size_] = v;
00095     size_++;
00096   }
00097 
00098   void resize(uint32_t n) {
00099     if (n > capacity_) Grow(n);
00100     size_ = n;
00101   }
00102 
00103   void fill(const T& val) {
00104     for (uint32_t i = 0; i < size(); i++) {
00105       ptr_[i] = val;
00106     }
00107   }
00108 
00109   // Guarantees src is empty at end.
00110   // Provided for the hash table resizing code below.
00111   void MoveFrom(Vec<T>* src) {
00112     if (src->ptr_ == src->space_) {
00113       // Need to actually copy
00114       resize(src->size_);
00115       std::copy(src->ptr_, src->ptr_ + src->size_, ptr_);
00116       src->size_ = 0;
00117     } else {
00118       Discard();
00119       ptr_ = src->ptr_;
00120       size_ = src->size_;
00121       capacity_ = src->capacity_;
00122       src->Init();
00123     }
00124   }
00125 
00126  private:
00127   T* ptr_;
00128   T space_[kInline];
00129   uint32_t size_;
00130   uint32_t capacity_;
00131 
00132   void Init() {
00133     ptr_ = space_;
00134     size_ = 0;
00135     capacity_ = kInline;
00136   }
00137 
00138   void Discard() {
00139     if (ptr_ != space_) base_internal::LowLevelAlloc::Free(ptr_);
00140   }
00141 
00142   void Grow(uint32_t n) {
00143     while (capacity_ < n) {
00144       capacity_ *= 2;
00145     }
00146     size_t request = static_cast<size_t>(capacity_) * sizeof(T);
00147     T* copy = static_cast<T*>(
00148         base_internal::LowLevelAlloc::AllocWithArena(request, arena));
00149     std::copy(ptr_, ptr_ + size_, copy);
00150     Discard();
00151     ptr_ = copy;
00152   }
00153 
00154   Vec(const Vec&) = delete;
00155   Vec& operator=(const Vec&) = delete;
00156 };
00157 
00158 // A hash set of non-negative int32_t that uses Vec for its underlying storage.
00159 class NodeSet {
00160  public:
00161   NodeSet() { Init(); }
00162 
00163   void clear() { Init(); }
00164   bool contains(int32_t v) const { return table_[FindIndex(v)] == v; }
00165 
00166   bool insert(int32_t v) {
00167     uint32_t i = FindIndex(v);
00168     if (table_[i] == v) {
00169       return false;
00170     }
00171     if (table_[i] == kEmpty) {
00172       // Only inserting over an empty cell increases the number of occupied
00173       // slots.
00174       occupied_++;
00175     }
00176     table_[i] = v;
00177     // Double when 75% full.
00178     if (occupied_ >= table_.size() - table_.size()/4) Grow();
00179     return true;
00180   }
00181 
00182   void erase(uint32_t v) {
00183     uint32_t i = FindIndex(v);
00184     if (static_cast<uint32_t>(table_[i]) == v) {
00185       table_[i] = kDel;
00186     }
00187   }
00188 
00189   // Iteration: is done via HASH_FOR_EACH
00190   // Example:
00191   //    HASH_FOR_EACH(elem, node->out) { ... }
00192 #define HASH_FOR_EACH(elem, eset) \
00193   for (int32_t elem, _cursor = 0; (eset).Next(&_cursor, &elem); )
00194   bool Next(int32_t* cursor, int32_t* elem) {
00195     while (static_cast<uint32_t>(*cursor) < table_.size()) {
00196       int32_t v = table_[*cursor];
00197       (*cursor)++;
00198       if (v >= 0) {
00199         *elem = v;
00200         return true;
00201       }
00202     }
00203     return false;
00204   }
00205 
00206  private:
00207   enum : int32_t { kEmpty = -1, kDel = -2 };
00208   Vec<int32_t> table_;
00209   uint32_t occupied_;     // Count of non-empty slots (includes deleted slots)
00210 
00211   static uint32_t Hash(uint32_t a) { return a * 41; }
00212 
00213   // Return index for storing v.  May return an empty index or deleted index
00214   int FindIndex(int32_t v) const {
00215     // Search starting at hash index.
00216     const uint32_t mask = table_.size() - 1;
00217     uint32_t i = Hash(v) & mask;
00218     int deleted_index = -1;  // If >= 0, index of first deleted element we see
00219     while (true) {
00220       int32_t e = table_[i];
00221       if (v == e) {
00222         return i;
00223       } else if (e == kEmpty) {
00224         // Return any previously encountered deleted slot.
00225         return (deleted_index >= 0) ? deleted_index : i;
00226       } else if (e == kDel && deleted_index < 0) {
00227         // Keep searching since v might be present later.
00228         deleted_index = i;
00229       }
00230       i = (i + 1) & mask;  // Linear probing; quadratic is slightly slower.
00231     }
00232   }
00233 
00234   void Init() {
00235     table_.clear();
00236     table_.resize(kInline);
00237     table_.fill(kEmpty);
00238     occupied_ = 0;
00239   }
00240 
00241   void Grow() {
00242     Vec<int32_t> copy;
00243     copy.MoveFrom(&table_);
00244     occupied_ = 0;
00245     table_.resize(copy.size() * 2);
00246     table_.fill(kEmpty);
00247 
00248     for (const auto& e : copy) {
00249       if (e >= 0) insert(e);
00250     }
00251   }
00252 
00253   NodeSet(const NodeSet&) = delete;
00254   NodeSet& operator=(const NodeSet&) = delete;
00255 };
00256 
00257 // We encode a node index and a node version in GraphId.  The version
00258 // number is incremented when the GraphId is freed which automatically
00259 // invalidates all copies of the GraphId.
00260 
00261 inline GraphId MakeId(int32_t index, uint32_t version) {
00262   GraphId g;
00263   g.handle =
00264       (static_cast<uint64_t>(version) << 32) | static_cast<uint32_t>(index);
00265   return g;
00266 }
00267 
00268 inline int32_t NodeIndex(GraphId id) {
00269   return static_cast<uint32_t>(id.handle & 0xfffffffful);
00270 }
00271 
00272 inline uint32_t NodeVersion(GraphId id) {
00273   return static_cast<uint32_t>(id.handle >> 32);
00274 }
00275 
00276 struct Node {
00277   int32_t rank;               // rank number assigned by Pearce-Kelly algorithm
00278   uint32_t version;           // Current version number
00279   int32_t next_hash;          // Next entry in hash table
00280   bool visited;               // Temporary marker used by depth-first-search
00281   uintptr_t masked_ptr;       // User-supplied pointer
00282   NodeSet in;                 // List of immediate predecessor nodes in graph
00283   NodeSet out;                // List of immediate successor nodes in graph
00284   int priority;               // Priority of recorded stack trace.
00285   int nstack;                 // Depth of recorded stack trace.
00286   void* stack[40];            // stack[0,nstack-1] holds stack trace for node.
00287 };
00288 
00289 // Hash table for pointer to node index lookups.
00290 class PointerMap {
00291  public:
00292   explicit PointerMap(const Vec<Node*>* nodes) : nodes_(nodes) {
00293     table_.fill(-1);
00294   }
00295 
00296   int32_t Find(void* ptr) {
00297     auto masked = base_internal::HidePtr(ptr);
00298     for (int32_t i = table_[Hash(ptr)]; i != -1;) {
00299       Node* n = (*nodes_)[i];
00300       if (n->masked_ptr == masked) return i;
00301       i = n->next_hash;
00302     }
00303     return -1;
00304   }
00305 
00306   void Add(void* ptr, int32_t i) {
00307     int32_t* head = &table_[Hash(ptr)];
00308     (*nodes_)[i]->next_hash = *head;
00309     *head = i;
00310   }
00311 
00312   int32_t Remove(void* ptr) {
00313     // Advance through linked list while keeping track of the
00314     // predecessor slot that points to the current entry.
00315     auto masked = base_internal::HidePtr(ptr);
00316     for (int32_t* slot = &table_[Hash(ptr)]; *slot != -1; ) {
00317       int32_t index = *slot;
00318       Node* n = (*nodes_)[index];
00319       if (n->masked_ptr == masked) {
00320         *slot = n->next_hash;  // Remove n from linked list
00321         n->next_hash = -1;
00322         return index;
00323       }
00324       slot = &n->next_hash;
00325     }
00326     return -1;
00327   }
00328 
00329  private:
00330   // Number of buckets in hash table for pointer lookups.
00331   static constexpr uint32_t kHashTableSize = 8171;  // should be prime
00332 
00333   const Vec<Node*>* nodes_;
00334   std::array<int32_t, kHashTableSize> table_;
00335 
00336   static uint32_t Hash(void* ptr) {
00337     return reinterpret_cast<uintptr_t>(ptr) % kHashTableSize;
00338   }
00339 };
00340 
00341 }  // namespace
00342 
00343 struct GraphCycles::Rep {
00344   Vec<Node*> nodes_;
00345   Vec<int32_t> free_nodes_;  // Indices for unused entries in nodes_
00346   PointerMap ptrmap_;
00347 
00348   // Temporary state.
00349   Vec<int32_t> deltaf_;  // Results of forward DFS
00350   Vec<int32_t> deltab_;  // Results of backward DFS
00351   Vec<int32_t> list_;    // All nodes to reprocess
00352   Vec<int32_t> merged_;  // Rank values to assign to list_ entries
00353   Vec<int32_t> stack_;   // Emulates recursion stack for depth-first searches
00354 
00355   Rep() : ptrmap_(&nodes_) {}
00356 };
00357 
00358 static Node* FindNode(GraphCycles::Rep* rep, GraphId id) {
00359   Node* n = rep->nodes_[NodeIndex(id)];
00360   return (n->version == NodeVersion(id)) ? n : nullptr;
00361 }
00362 
00363 GraphCycles::GraphCycles() {
00364   InitArenaIfNecessary();
00365   rep_ = new (base_internal::LowLevelAlloc::AllocWithArena(sizeof(Rep), arena))
00366       Rep;
00367 }
00368 
00369 GraphCycles::~GraphCycles() {
00370   for (auto* node : rep_->nodes_) {
00371     node->Node::~Node();
00372     base_internal::LowLevelAlloc::Free(node);
00373   }
00374   rep_->Rep::~Rep();
00375   base_internal::LowLevelAlloc::Free(rep_);
00376 }
00377 
00378 bool GraphCycles::CheckInvariants() const {
00379   Rep* r = rep_;
00380   NodeSet ranks;  // Set of ranks seen so far.
00381   for (uint32_t x = 0; x < r->nodes_.size(); x++) {
00382     Node* nx = r->nodes_[x];
00383     void* ptr = base_internal::UnhidePtr<void>(nx->masked_ptr);
00384     if (ptr != nullptr && static_cast<uint32_t>(r->ptrmap_.Find(ptr)) != x) {
00385       ABSL_RAW_LOG(FATAL, "Did not find live node in hash table %u %p", x, ptr);
00386     }
00387     if (nx->visited) {
00388       ABSL_RAW_LOG(FATAL, "Did not clear visited marker on node %u", x);
00389     }
00390     if (!ranks.insert(nx->rank)) {
00391       ABSL_RAW_LOG(FATAL, "Duplicate occurrence of rank %d", nx->rank);
00392     }
00393     HASH_FOR_EACH(y, nx->out) {
00394       Node* ny = r->nodes_[y];
00395       if (nx->rank >= ny->rank) {
00396         ABSL_RAW_LOG(FATAL, "Edge %u->%d has bad rank assignment %d->%d", x, y,
00397                      nx->rank, ny->rank);
00398       }
00399     }
00400   }
00401   return true;
00402 }
00403 
00404 GraphId GraphCycles::GetId(void* ptr) {
00405   int32_t i = rep_->ptrmap_.Find(ptr);
00406   if (i != -1) {
00407     return MakeId(i, rep_->nodes_[i]->version);
00408   } else if (rep_->free_nodes_.empty()) {
00409     Node* n =
00410         new (base_internal::LowLevelAlloc::AllocWithArena(sizeof(Node), arena))
00411             Node;
00412     n->version = 1;  // Avoid 0 since it is used by InvalidGraphId()
00413     n->visited = false;
00414     n->rank = rep_->nodes_.size();
00415     n->masked_ptr = base_internal::HidePtr(ptr);
00416     n->nstack = 0;
00417     n->priority = 0;
00418     rep_->nodes_.push_back(n);
00419     rep_->ptrmap_.Add(ptr, n->rank);
00420     return MakeId(n->rank, n->version);
00421   } else {
00422     // Preserve preceding rank since the set of ranks in use must be
00423     // a permutation of [0,rep_->nodes_.size()-1].
00424     int32_t r = rep_->free_nodes_.back();
00425     rep_->free_nodes_.pop_back();
00426     Node* n = rep_->nodes_[r];
00427     n->masked_ptr = base_internal::HidePtr(ptr);
00428     n->nstack = 0;
00429     n->priority = 0;
00430     rep_->ptrmap_.Add(ptr, r);
00431     return MakeId(r, n->version);
00432   }
00433 }
00434 
00435 void GraphCycles::RemoveNode(void* ptr) {
00436   int32_t i = rep_->ptrmap_.Remove(ptr);
00437   if (i == -1) {
00438     return;
00439   }
00440   Node* x = rep_->nodes_[i];
00441   HASH_FOR_EACH(y, x->out) {
00442     rep_->nodes_[y]->in.erase(i);
00443   }
00444   HASH_FOR_EACH(y, x->in) {
00445     rep_->nodes_[y]->out.erase(i);
00446   }
00447   x->in.clear();
00448   x->out.clear();
00449   x->masked_ptr = base_internal::HidePtr<void>(nullptr);
00450   if (x->version == std::numeric_limits<uint32_t>::max()) {
00451     // Cannot use x any more
00452   } else {
00453     x->version++;  // Invalidates all copies of node.
00454     rep_->free_nodes_.push_back(i);
00455   }
00456 }
00457 
00458 void* GraphCycles::Ptr(GraphId id) {
00459   Node* n = FindNode(rep_, id);
00460   return n == nullptr ? nullptr
00461                       : base_internal::UnhidePtr<void>(n->masked_ptr);
00462 }
00463 
00464 bool GraphCycles::HasNode(GraphId node) {
00465   return FindNode(rep_, node) != nullptr;
00466 }
00467 
00468 bool GraphCycles::HasEdge(GraphId x, GraphId y) const {
00469   Node* xn = FindNode(rep_, x);
00470   return xn && FindNode(rep_, y) && xn->out.contains(NodeIndex(y));
00471 }
00472 
00473 void GraphCycles::RemoveEdge(GraphId x, GraphId y) {
00474   Node* xn = FindNode(rep_, x);
00475   Node* yn = FindNode(rep_, y);
00476   if (xn && yn) {
00477     xn->out.erase(NodeIndex(y));
00478     yn->in.erase(NodeIndex(x));
00479     // No need to update the rank assignment since a previous valid
00480     // rank assignment remains valid after an edge deletion.
00481   }
00482 }
00483 
00484 static bool ForwardDFS(GraphCycles::Rep* r, int32_t n, int32_t upper_bound);
00485 static void BackwardDFS(GraphCycles::Rep* r, int32_t n, int32_t lower_bound);
00486 static void Reorder(GraphCycles::Rep* r);
00487 static void Sort(const Vec<Node*>&, Vec<int32_t>* delta);
00488 static void MoveToList(
00489     GraphCycles::Rep* r, Vec<int32_t>* src, Vec<int32_t>* dst);
00490 
00491 bool GraphCycles::InsertEdge(GraphId idx, GraphId idy) {
00492   Rep* r = rep_;
00493   const int32_t x = NodeIndex(idx);
00494   const int32_t y = NodeIndex(idy);
00495   Node* nx = FindNode(r, idx);
00496   Node* ny = FindNode(r, idy);
00497   if (nx == nullptr || ny == nullptr) return true;  // Expired ids
00498 
00499   if (nx == ny) return false;  // Self edge
00500   if (!nx->out.insert(y)) {
00501     // Edge already exists.
00502     return true;
00503   }
00504 
00505   ny->in.insert(x);
00506 
00507   if (nx->rank <= ny->rank) {
00508     // New edge is consistent with existing rank assignment.
00509     return true;
00510   }
00511 
00512   // Current rank assignments are incompatible with the new edge.  Recompute.
00513   // We only need to consider nodes that fall in the range [ny->rank,nx->rank].
00514   if (!ForwardDFS(r, y, nx->rank)) {
00515     // Found a cycle.  Undo the insertion and tell caller.
00516     nx->out.erase(y);
00517     ny->in.erase(x);
00518     // Since we do not call Reorder() on this path, clear any visited
00519     // markers left by ForwardDFS.
00520     for (const auto& d : r->deltaf_) {
00521       r->nodes_[d]->visited = false;
00522     }
00523     return false;
00524   }
00525   BackwardDFS(r, x, ny->rank);
00526   Reorder(r);
00527   return true;
00528 }
00529 
00530 static bool ForwardDFS(GraphCycles::Rep* r, int32_t n, int32_t upper_bound) {
00531   // Avoid recursion since stack space might be limited.
00532   // We instead keep a stack of nodes to visit.
00533   r->deltaf_.clear();
00534   r->stack_.clear();
00535   r->stack_.push_back(n);
00536   while (!r->stack_.empty()) {
00537     n = r->stack_.back();
00538     r->stack_.pop_back();
00539     Node* nn = r->nodes_[n];
00540     if (nn->visited) continue;
00541 
00542     nn->visited = true;
00543     r->deltaf_.push_back(n);
00544 
00545     HASH_FOR_EACH(w, nn->out) {
00546       Node* nw = r->nodes_[w];
00547       if (nw->rank == upper_bound) {
00548         return false;  // Cycle
00549       }
00550       if (!nw->visited && nw->rank < upper_bound) {
00551         r->stack_.push_back(w);
00552       }
00553     }
00554   }
00555   return true;
00556 }
00557 
00558 static void BackwardDFS(GraphCycles::Rep* r, int32_t n, int32_t lower_bound) {
00559   r->deltab_.clear();
00560   r->stack_.clear();
00561   r->stack_.push_back(n);
00562   while (!r->stack_.empty()) {
00563     n = r->stack_.back();
00564     r->stack_.pop_back();
00565     Node* nn = r->nodes_[n];
00566     if (nn->visited) continue;
00567 
00568     nn->visited = true;
00569     r->deltab_.push_back(n);
00570 
00571     HASH_FOR_EACH(w, nn->in) {
00572       Node* nw = r->nodes_[w];
00573       if (!nw->visited && lower_bound < nw->rank) {
00574         r->stack_.push_back(w);
00575       }
00576     }
00577   }
00578 }
00579 
00580 static void Reorder(GraphCycles::Rep* r) {
00581   Sort(r->nodes_, &r->deltab_);
00582   Sort(r->nodes_, &r->deltaf_);
00583 
00584   // Adds contents of delta lists to list_ (backwards deltas first).
00585   r->list_.clear();
00586   MoveToList(r, &r->deltab_, &r->list_);
00587   MoveToList(r, &r->deltaf_, &r->list_);
00588 
00589   // Produce sorted list of all ranks that will be reassigned.
00590   r->merged_.resize(r->deltab_.size() + r->deltaf_.size());
00591   std::merge(r->deltab_.begin(), r->deltab_.end(),
00592              r->deltaf_.begin(), r->deltaf_.end(),
00593              r->merged_.begin());
00594 
00595   // Assign the ranks in order to the collected list.
00596   for (uint32_t i = 0; i < r->list_.size(); i++) {
00597     r->nodes_[r->list_[i]]->rank = r->merged_[i];
00598   }
00599 }
00600 
00601 static void Sort(const Vec<Node*>& nodes, Vec<int32_t>* delta) {
00602   struct ByRank {
00603     const Vec<Node*>* nodes;
00604     bool operator()(int32_t a, int32_t b) const {
00605       return (*nodes)[a]->rank < (*nodes)[b]->rank;
00606     }
00607   };
00608   ByRank cmp;
00609   cmp.nodes = &nodes;
00610   std::sort(delta->begin(), delta->end(), cmp);
00611 }
00612 
00613 static void MoveToList(
00614     GraphCycles::Rep* r, Vec<int32_t>* src, Vec<int32_t>* dst) {
00615   for (auto& v : *src) {
00616     int32_t w = v;
00617     v = r->nodes_[w]->rank;         // Replace v entry with its rank
00618     r->nodes_[w]->visited = false;  // Prepare for future DFS calls
00619     dst->push_back(w);
00620   }
00621 }
00622 
00623 int GraphCycles::FindPath(GraphId idx, GraphId idy, int max_path_len,
00624                           GraphId path[]) const {
00625   Rep* r = rep_;
00626   if (FindNode(r, idx) == nullptr || FindNode(r, idy) == nullptr) return 0;
00627   const int32_t x = NodeIndex(idx);
00628   const int32_t y = NodeIndex(idy);
00629 
00630   // Forward depth first search starting at x until we hit y.
00631   // As we descend into a node, we push it onto the path.
00632   // As we leave a node, we remove it from the path.
00633   int path_len = 0;
00634 
00635   NodeSet seen;
00636   r->stack_.clear();
00637   r->stack_.push_back(x);
00638   while (!r->stack_.empty()) {
00639     int32_t n = r->stack_.back();
00640     r->stack_.pop_back();
00641     if (n < 0) {
00642       // Marker to indicate that we are leaving a node
00643       path_len--;
00644       continue;
00645     }
00646 
00647     if (path_len < max_path_len) {
00648       path[path_len] = MakeId(n, rep_->nodes_[n]->version);
00649     }
00650     path_len++;
00651     r->stack_.push_back(-1);  // Will remove tentative path entry
00652 
00653     if (n == y) {
00654       return path_len;
00655     }
00656 
00657     HASH_FOR_EACH(w, r->nodes_[n]->out) {
00658       if (seen.insert(w)) {
00659         r->stack_.push_back(w);
00660       }
00661     }
00662   }
00663 
00664   return 0;
00665 }
00666 
00667 bool GraphCycles::IsReachable(GraphId x, GraphId y) const {
00668   return FindPath(x, y, 0, nullptr) > 0;
00669 }
00670 
00671 void GraphCycles::UpdateStackTrace(GraphId id, int priority,
00672                                    int (*get_stack_trace)(void** stack, int)) {
00673   Node* n = FindNode(rep_, id);
00674   if (n == nullptr || n->priority >= priority) {
00675     return;
00676   }
00677   n->nstack = (*get_stack_trace)(n->stack, ABSL_ARRAYSIZE(n->stack));
00678   n->priority = priority;
00679 }
00680 
00681 int GraphCycles::GetStackTrace(GraphId id, void*** ptr) {
00682   Node* n = FindNode(rep_, id);
00683   if (n == nullptr) {
00684     *ptr = nullptr;
00685     return 0;
00686   } else {
00687     *ptr = n->stack;
00688     return n->nstack;
00689   }
00690 }
00691 
00692 }  // namespace synchronization_internal
00693 }  // namespace absl
00694 
00695 #endif  // ABSL_LOW_LEVEL_ALLOC_MISSING


abseil_cpp
Author(s):
autogenerated on Wed Jun 19 2019 19:42:14