executor.cc
Go to the documentation of this file.
1 /*
2  *
3  * Copyright 2015 gRPC authors.
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  */
18 
20 
22 
23 #include <string.h>
24 
25 #include <grpc/support/alloc.h>
26 #include <grpc/support/cpu.h>
27 #include <grpc/support/log.h>
28 #include <grpc/support/sync.h>
29 
30 #include "src/core/lib/gpr/tls.h"
35 
36 #define MAX_DEPTH 2
37 
38 #define EXECUTOR_TRACE(format, ...) \
39  do { \
40  if (GRPC_TRACE_FLAG_ENABLED(executor_trace)) { \
41  gpr_log(GPR_INFO, "EXECUTOR " format, __VA_ARGS__); \
42  } \
43  } while (0)
44 
45 #define EXECUTOR_TRACE0(str) \
46  do { \
47  if (GRPC_TRACE_FLAG_ENABLED(executor_trace)) { \
48  gpr_log(GPR_INFO, "EXECUTOR " str); \
49  } \
50  } while (0)
51 
52 namespace grpc_core {
53 namespace {
54 
55 GPR_THREAD_LOCAL(ThreadState*) g_this_thread_state;
56 
57 Executor* executors[static_cast<size_t>(ExecutorType::NUM_EXECUTORS)];
58 
59 void default_enqueue_short(grpc_closure* closure, grpc_error_handle error) {
60  executors[static_cast<size_t>(ExecutorType::DEFAULT)]->Enqueue(
61  closure, error, true /* is_short */);
62 }
63 
64 void default_enqueue_long(grpc_closure* closure, grpc_error_handle error) {
65  executors[static_cast<size_t>(ExecutorType::DEFAULT)]->Enqueue(
66  closure, error, false /* is_short */);
67 }
68 
69 void resolver_enqueue_short(grpc_closure* closure, grpc_error_handle error) {
70  executors[static_cast<size_t>(ExecutorType::RESOLVER)]->Enqueue(
71  closure, error, true /* is_short */);
72 }
73 
74 void resolver_enqueue_long(grpc_closure* closure, grpc_error_handle error) {
75  executors[static_cast<size_t>(ExecutorType::RESOLVER)]->Enqueue(
76  closure, error, false /* is_short */);
77 }
78 
79 using EnqueueFunc = void (*)(grpc_closure* closure, grpc_error_handle error);
80 
81 const EnqueueFunc
82  executor_enqueue_fns_[static_cast<size_t>(ExecutorType::NUM_EXECUTORS)]
83  [static_cast<size_t>(ExecutorJobType::NUM_JOB_TYPES)] =
84  {{default_enqueue_short, default_enqueue_long},
85  {resolver_enqueue_short, resolver_enqueue_long}};
86 
87 } // namespace
88 
89 TraceFlag executor_trace(false, "executor");
90 
91 Executor::Executor(const char* name) : name_(name) {
95 }
96 
97 void Executor::Init() { SetThreading(true); }
98 
99 size_t Executor::RunClosures(const char* executor_name,
100  grpc_closure_list list) {
101  size_t n = 0;
102 
103  // In the executor, the ExecCtx for the thread is declared in the executor
104  // thread itself, but this is the point where we could start seeing
105  // application-level callbacks. No need to create a new ExecCtx, though,
106  // since there already is one and it is flushed (but not destructed) in this
107  // function itself. The ApplicationCallbackExecCtx will have its callbacks
108  // invoked on its destruction, which will be after completing any closures in
109  // the executor's closure list (which were explicitly scheduled onto the
110  // executor).
111  ApplicationCallbackExecCtx callback_exec_ctx(
113 
114  grpc_closure* c = list.head;
115  while (c != nullptr) {
116  grpc_closure* next = c->next_data.next;
117 #ifndef NDEBUG
118  EXECUTOR_TRACE("(%s) run %p [created by %s:%d]", executor_name, c,
119  c->file_created, c->line_created);
120  c->scheduled = false;
121 #else
122  EXECUTOR_TRACE("(%s) run %p", executor_name, c);
123 #endif
124 #ifdef GRPC_ERROR_IS_ABSEIL_STATUS
126  internal::StatusMoveFromHeapPtr(c->error_data.error);
127  c->error_data.error = 0;
128  c->cb(c->cb_arg, std::move(error));
129 #else
131  reinterpret_cast<grpc_error_handle>(c->error_data.error);
132  c->error_data.error = 0;
133  c->cb(c->cb_arg, error);
135 #endif
136  c = next;
137  n++;
138  ExecCtx::Get()->Flush();
139  }
140 
141  return n;
142 }
143 
144 bool Executor::IsThreaded() const {
145  return gpr_atm_acq_load(&num_threads_) > 0;
146 }
147 
148 void Executor::SetThreading(bool threading) {
149  gpr_atm curr_num_threads = gpr_atm_acq_load(&num_threads_);
150  EXECUTOR_TRACE("(%s) SetThreading(%d) begin", name_, threading);
151 
152  if (threading) {
153  if (curr_num_threads > 0) {
154  EXECUTOR_TRACE("(%s) SetThreading(true). curr_num_threads > 0", name_);
155  return;
156  }
157 
158  GPR_ASSERT(num_threads_ == 0);
160  thd_state_ = static_cast<ThreadState*>(
161  gpr_zalloc(sizeof(ThreadState) * max_threads_));
162 
163  for (size_t i = 0; i < max_threads_; i++) {
166  thd_state_[i].id = i;
167  thd_state_[i].name = name_;
168  thd_state_[i].thd = Thread();
170  }
171 
173  thd_state_[0].thd.Start();
174  } else { // !threading
175  if (curr_num_threads == 0) {
176  EXECUTOR_TRACE("(%s) SetThreading(false). curr_num_threads == 0", name_);
177  return;
178  }
179 
180  for (size_t i = 0; i < max_threads_; i++) {
182  thd_state_[i].shutdown = true;
185  }
186 
187  /* Ensure no thread is adding a new thread. Once this is past, then no
188  * thread will try to add a new one either (since shutdown is true) */
191 
192  curr_num_threads = gpr_atm_no_barrier_load(&num_threads_);
193  for (gpr_atm i = 0; i < curr_num_threads; i++) {
194  thd_state_[i].thd.Join();
195  EXECUTOR_TRACE("(%s) Thread %" PRIdPTR " of %" PRIdPTR " joined", name_,
196  i + 1, curr_num_threads);
197  }
198 
200  for (size_t i = 0; i < max_threads_; i++) {
204  }
205 
207 
208  // grpc_iomgr_shutdown_background_closure() will close all the registered
209  // fds in the background poller, and wait for all pending closures to
210  // finish. Thus, never call Executor::SetThreading(false) in the middle of
211  // an application.
212  // TODO(guantaol): create another method to finish all the pending closures
213  // registered in the background poller by Executor.
215  }
216 
217  EXECUTOR_TRACE("(%s) SetThreading(%d) done", name_, threading);
218 }
219 
221 
223  ThreadState* ts = static_cast<ThreadState*>(arg);
224  g_this_thread_state = ts;
225 
227 
228  size_t subtract_depth = 0;
229  for (;;) {
230  EXECUTOR_TRACE("(%s) [%" PRIdPTR "]: step (sub_depth=%" PRIdPTR ")",
231  ts->name, ts->id, subtract_depth);
232 
233  gpr_mu_lock(&ts->mu);
234  ts->depth -= subtract_depth;
235  // Wait for closures to be enqueued or for the executor to be shutdown
236  while (grpc_closure_list_empty(ts->elems) && !ts->shutdown) {
237  ts->queued_long_job = false;
239  }
240 
241  if (ts->shutdown) {
242  EXECUTOR_TRACE("(%s) [%" PRIdPTR "]: shutdown", ts->name, ts->id);
243  gpr_mu_unlock(&ts->mu);
244  break;
245  }
246 
247  grpc_closure_list closures = ts->elems;
249  gpr_mu_unlock(&ts->mu);
250 
251  EXECUTOR_TRACE("(%s) [%" PRIdPTR "]: execute", ts->name, ts->id);
252 
254  subtract_depth = RunClosures(ts->name, closures);
255  }
256 
257  g_this_thread_state = nullptr;
258 }
259 
261  bool is_short) {
262  bool retry_push;
263 
264  do {
265  retry_push = false;
266  size_t cur_thread_count =
267  static_cast<size_t>(gpr_atm_acq_load(&num_threads_));
268 
269  // If the number of threads is zero(i.e either the executor is not threaded
270  // or already shutdown), then queue the closure on the exec context itself
271  if (cur_thread_count == 0) {
272 #ifndef NDEBUG
273  EXECUTOR_TRACE("(%s) schedule %p (created %s:%d) inline", name_, closure,
274  closure->file_created, closure->line_created);
275 #else
276  EXECUTOR_TRACE("(%s) schedule %p inline", name_, closure);
277 #endif
278  grpc_closure_list_append(ExecCtx::Get()->closure_list(), closure, error);
279  return;
280  }
281 
283  return;
284  }
285 
286  ThreadState* ts = g_this_thread_state;
287  if (ts == nullptr) {
288  ts = &thd_state_[HashPointer(ExecCtx::Get(), cur_thread_count)];
289  }
290 
291  ThreadState* orig_ts = ts;
292  bool try_new_thread = false;
293 
294  for (;;) {
295 #ifndef NDEBUG
297  "(%s) try to schedule %p (%s) (created %s:%d) to thread "
298  "%" PRIdPTR,
299  name_, closure, is_short ? "short" : "long", closure->file_created,
300  closure->line_created, ts->id);
301 #else
302  EXECUTOR_TRACE("(%s) try to schedule %p (%s) to thread %" PRIdPTR, name_,
303  closure, is_short ? "short" : "long", ts->id);
304 #endif
305 
306  gpr_mu_lock(&ts->mu);
307  if (ts->queued_long_job) {
308  // if there's a long job queued, we never queue anything else to this
309  // queue (since long jobs can take 'infinite' time and we need to
310  // guarantee no starvation). Spin through queues and try again
311  gpr_mu_unlock(&ts->mu);
312  size_t idx = ts->id;
313  ts = &thd_state_[(idx + 1) % cur_thread_count];
314  if (ts == orig_ts) {
315  // We cycled through all the threads. Retry enqueue again by creating
316  // a new thread
317  //
318  // TODO (sreek): There is a potential issue here. We are
319  // unconditionally setting try_new_thread to true here. What if the
320  // executor is shutdown OR if cur_thread_count is already equal to
321  // max_threads ?
322  // (Fortunately, this is not an issue yet (as of july 2018) because
323  // there is only one instance of long job in gRPC and hence we will
324  // not hit this code path)
325  retry_push = true;
326  try_new_thread = true;
327  break;
328  }
329 
330  continue; // Try the next thread-state
331  }
332 
333  // == Found the thread state (i.e thread) to enqueue this closure! ==
334 
335  // Also, if this thread has been waiting for closures, wake it up.
336  // - If grpc_closure_list_empty() is true and the Executor is not
337  // shutdown, it means that the thread must be waiting in ThreadMain()
338  // - Note that gpr_cv_signal() won't immediately wakeup the thread. That
339  // happens after we release the mutex &ts->mu a few lines below
340  if (grpc_closure_list_empty(ts->elems) && !ts->shutdown) {
341  gpr_cv_signal(&ts->cv);
342  }
343 
345 
346  // If we already queued more than MAX_DEPTH number of closures on this
347  // thread, use this as a hint to create more threads
348  ts->depth++;
349  try_new_thread = ts->depth > MAX_DEPTH &&
350  cur_thread_count < max_threads_ && !ts->shutdown;
351 
352  ts->queued_long_job = !is_short;
353 
354  gpr_mu_unlock(&ts->mu);
355  break;
356  }
357 
358  if (try_new_thread && gpr_spinlock_trylock(&adding_thread_lock_)) {
359  cur_thread_count = static_cast<size_t>(gpr_atm_acq_load(&num_threads_));
360  if (cur_thread_count < max_threads_) {
361  // Increment num_threads (safe to do a store instead of a cas because we
362  // always increment num_threads under the 'adding_thread_lock')
363  gpr_atm_rel_store(&num_threads_, cur_thread_count + 1);
364 
365  thd_state_[cur_thread_count].thd =
366  Thread(name_, &Executor::ThreadMain, &thd_state_[cur_thread_count]);
367  thd_state_[cur_thread_count].thd.Start();
368  }
370  }
371  } while (retry_push);
372 }
373 
374 // Executor::InitAll() and Executor::ShutdownAll() functions are called in the
375 // the grpc_init() and grpc_shutdown() code paths which are protected by a
376 // global mutex. So it is okay to assume that these functions are thread-safe
378  EXECUTOR_TRACE0("Executor::InitAll() enter");
379 
380  // Return if Executor::InitAll() is already called earlier
381  if (executors[static_cast<size_t>(ExecutorType::DEFAULT)] != nullptr) {
382  GPR_ASSERT(executors[static_cast<size_t>(ExecutorType::RESOLVER)] !=
383  nullptr);
384  return;
385  }
386 
387  executors[static_cast<size_t>(ExecutorType::DEFAULT)] =
388  new Executor("default-executor");
389  executors[static_cast<size_t>(ExecutorType::RESOLVER)] =
390  new Executor("resolver-executor");
391 
392  executors[static_cast<size_t>(ExecutorType::DEFAULT)]->Init();
393  executors[static_cast<size_t>(ExecutorType::RESOLVER)]->Init();
394 
395  EXECUTOR_TRACE0("Executor::InitAll() done");
396 }
397 
399  ExecutorType executor_type, ExecutorJobType job_type) {
400  executor_enqueue_fns_[static_cast<size_t>(executor_type)]
401  [static_cast<size_t>(job_type)](closure, error);
402 }
403 
405  EXECUTOR_TRACE0("Executor::ShutdownAll() enter");
406 
407  // Return if Executor:SshutdownAll() is already called earlier
408  if (executors[static_cast<size_t>(ExecutorType::DEFAULT)] == nullptr) {
409  GPR_ASSERT(executors[static_cast<size_t>(ExecutorType::RESOLVER)] ==
410  nullptr);
411  return;
412  }
413 
414  executors[static_cast<size_t>(ExecutorType::DEFAULT)]->Shutdown();
415  executors[static_cast<size_t>(ExecutorType::RESOLVER)]->Shutdown();
416 
417  // Delete the executor objects.
418  //
419  // NOTE: It is important to call Shutdown() on all executors first before
420  // calling delete because it is possible for one executor (that is not
421  // shutdown yet) to call Enqueue() on a different executor which is already
422  // shutdown. This is legal and in such cases, the Enqueue() operation
423  // effectively "fails" and enqueues that closure on the calling thread's
424  // exec_ctx.
425  //
426  // By ensuring that all executors are shutdown first, we are also ensuring
427  // that no thread is active across all executors.
428 
429  delete executors[static_cast<size_t>(ExecutorType::DEFAULT)];
430  delete executors[static_cast<size_t>(ExecutorType::RESOLVER)];
431  executors[static_cast<size_t>(ExecutorType::DEFAULT)] = nullptr;
432  executors[static_cast<size_t>(ExecutorType::RESOLVER)] = nullptr;
433 
434  EXECUTOR_TRACE0("Executor::ShutdownAll() done");
435 }
436 
437 bool Executor::IsThreaded(ExecutorType executor_type) {
438  GPR_ASSERT(executor_type < ExecutorType::NUM_EXECUTORS);
439  return executors[static_cast<size_t>(executor_type)]->IsThreaded();
440 }
441 
444 }
445 
446 void Executor::SetThreadingAll(bool enable) {
447  EXECUTOR_TRACE("Executor::SetThreadingAll(%d) called", enable);
448  for (size_t i = 0; i < static_cast<size_t>(ExecutorType::NUM_EXECUTORS);
449  i++) {
450  executors[i]->SetThreading(enable);
451  }
452 }
453 
454 void Executor::SetThreadingDefault(bool enable) {
455  EXECUTOR_TRACE("Executor::SetThreadingDefault(%d) called", enable);
456  executors[static_cast<size_t>(ExecutorType::DEFAULT)]->SetThreading(enable);
457 }
458 
460 
461 } // namespace grpc_core
gpr_cv_signal
GPRAPI void gpr_cv_signal(gpr_cv *cv)
gpr_cpu_num_cores
GPRAPI unsigned gpr_cpu_num_cores(void)
grpc_core::ThreadState::mu
gpr_mu mu
Definition: executor.h:31
gpr_mu_unlock
GPRAPI void gpr_mu_unlock(gpr_mu *mu)
gpr_atm_no_barrier_load
#define gpr_atm_no_barrier_load(p)
Definition: impl/codegen/atm_gcc_atomic.h:53
log.h
grpc_core::Executor::max_threads_
size_t max_threads_
Definition: executor.h:112
iomgr_internal.h
grpc_core::Executor::SetThreading
void SetThreading(bool threading)
Definition: executor.cc:148
grpc_core::Executor::IsThreadedDefault
static bool IsThreadedDefault()
Definition: executor.cc:442
grpc_core
Definition: call_metric_recorder.h:31
GRPC_CLOSURE_LIST_INIT
#define GRPC_CLOSURE_LIST_INIT
Definition: closure.h:167
string.h
gpr_free
GPRAPI void gpr_free(void *ptr)
Definition: alloc.cc:51
useful.h
error
grpc_error_handle error
Definition: retry_filter.cc:499
grpc_iomgr_platform_shutdown_background_closure
void grpc_iomgr_platform_shutdown_background_closure()
Definition: iomgr_internal.cc:41
u
OPENSSL_EXPORT pem_password_cb void * u
Definition: pem.h:351
grpc_core::Executor::Executor
Executor(const char *executor_name)
Definition: executor.cc:91
grpc_core::ThreadState
Definition: executor.h:30
gpr_inf_future
GPRAPI gpr_timespec gpr_inf_future(gpr_clock_type type)
Definition: src/core/lib/gpr/time.cc:55
grpc_core::ApplicationCallbackExecCtx
Definition: exec_ctx.h:283
setup.name
name
Definition: setup.py:542
grpc_iomgr_platform_add_closure_to_background_poller
bool grpc_iomgr_platform_add_closure_to_background_poller(grpc_closure *closure, grpc_error_handle error)
Definition: iomgr_internal.cc:49
absl::Enqueue
static PerThreadSynch * Enqueue(PerThreadSynch *head, SynchWaitParams *waitp, intptr_t mu, int flags)
Definition: abseil-cpp/absl/synchronization/mutex.cc:889
grpc_core::Executor::ShutdownAll
static void ShutdownAll()
Definition: executor.cc:404
grpc_core::Executor::SetThreadingAll
static void SetThreadingAll(bool enable)
Definition: executor.cc:446
name_
const std::string name_
Definition: priority.cc:233
gpr_spinlock_lock
#define gpr_spinlock_lock(lock)
Definition: src/core/lib/gpr/spinlock.h:49
grpc_core::Executor::Run
static void Run(grpc_closure *closure, grpc_error_handle error, ExecutorType executor_type=ExecutorType::DEFAULT, ExecutorJobType job_type=ExecutorJobType::SHORT)
Definition: executor.cc:398
grpc_closure_list_append
bool grpc_closure_list_append(grpc_closure_list *closure_list, grpc_closure *closure)
Definition: closure.h:176
grpc_core::Executor::num_threads_
gpr_atm num_threads_
Definition: executor.h:113
grpc_core::executor_trace
TraceFlag executor_trace(false, "executor")
gpr_zalloc
GPRAPI void * gpr_zalloc(size_t size)
Definition: alloc.cc:40
memory.h
gpr_mu_destroy
GPRAPI void gpr_mu_destroy(gpr_mu *mu)
absl::move
constexpr absl::remove_reference_t< T > && move(T &&t) noexcept
Definition: abseil-cpp/absl/utility/utility.h:221
GPR_ASSERT
#define GPR_ASSERT(x)
Definition: include/grpc/impl/codegen/log.h:94
gpr_cv_destroy
GPRAPI void gpr_cv_destroy(gpr_cv *cv)
grpc_core::ExecutorJobType::NUM_JOB_TYPES
@ NUM_JOB_TYPES
max
int max
Definition: bloaty/third_party/zlib/examples/enough.c:170
grpc_closure_list::head
grpc_closure * head
Definition: closure.h:42
grpc_core::ExecCtx::Flush
bool Flush()
Definition: exec_ctx.cc:69
mu
Mutex mu
Definition: server_config_selector_filter.cc:74
gpr_mu_init
GPRAPI void gpr_mu_init(gpr_mu *mu)
grpc_core::Executor::RunClosures
static size_t RunClosures(const char *executor_name, grpc_closure_list list)
Definition: executor.cc:99
grpc_core::ThreadState::name
const char * name
Definition: executor.h:33
grpc_core::ThreadState::shutdown
bool shutdown
Definition: executor.h:37
closure
grpc_closure closure
Definition: src/core/lib/surface/server.cc:466
grpc_core::Executor::IsThreaded
bool IsThreaded() const
Definition: executor.cc:144
gpr_atm_acq_load
#define gpr_atm_acq_load(p)
Definition: impl/codegen/atm_gcc_atomic.h:52
grpc_core::ThreadState::queued_long_job
bool queued_long_job
Definition: executor.h:38
grpc_core::ThreadState::elems
grpc_closure_list elems
Definition: executor.h:35
grpc_core::Executor::SetThreadingDefault
static void SetThreadingDefault(bool enable)
Definition: executor.cc:454
MAX_DEPTH
#define MAX_DEPTH
Definition: executor.cc:36
cpu.h
arg
Definition: cmdline.cc:40
gpr_spinlock_unlock
#define gpr_spinlock_unlock(lock)
Definition: src/core/lib/gpr/spinlock.h:41
gpr_cv_wait
GPRAPI int gpr_cv_wait(gpr_cv *cv, gpr_mu *mu, gpr_timespec abs_deadline)
EXECUTOR_TRACE
#define EXECUTOR_TRACE(format,...)
Definition: executor.cc:38
gpr_atm_rel_store
#define gpr_atm_rel_store(p, value)
Definition: impl/codegen/atm_gcc_atomic.h:54
GPR_CLOCK_MONOTONIC
@ GPR_CLOCK_MONOTONIC
Definition: gpr_types.h:36
grpc_core::Thread::Join
void Join()
Definition: thd.h:141
gpr_mu_lock
GPRAPI void gpr_mu_lock(gpr_mu *mu)
grpc_core::HashPointer
constexpr size_t HashPointer(T *p, size_t range)
Definition: useful.h:102
GPR_THREAD_LOCAL
#define GPR_THREAD_LOCAL(type)
Definition: tls.h:151
grpc_core::ExecCtx
Definition: exec_ctx.h:97
grpc_core::Thread::Start
void Start()
Definition: thd.h:125
n
int n
Definition: abseil-cpp/absl/container/btree_test.cc:1080
setup.idx
idx
Definition: third_party/bloaty/third_party/capstone/bindings/python/setup.py:197
grpc_core::Executor::thd_state_
ThreadState * thd_state_
Definition: executor.h:111
executor.h
grpc_core::ExecutorType::DEFAULT
@ DEFAULT
grpc_closure_list
Definition: closure.h:41
gpr_spinlock_trylock
#define gpr_spinlock_trylock(lock)
Definition: src/core/lib/gpr/spinlock.h:40
GPR_SPINLOCK_STATIC_INITIALIZER
#define GPR_SPINLOCK_STATIC_INITIALIZER
Definition: src/core/lib/gpr/spinlock.h:37
grpc_core::Executor::InitAll
static void InitAll()
Definition: executor.cc:377
GRPC_APP_CALLBACK_EXEC_CTX_FLAG_IS_INTERNAL_THREAD
#define GRPC_APP_CALLBACK_EXEC_CTX_FLAG_IS_INTERNAL_THREAD
Definition: exec_ctx.h:55
gpr_atm
intptr_t gpr_atm
Definition: impl/codegen/atm_gcc_atomic.h:32
grpc_core::Executor::name_
const char * name_
Definition: executor.h:110
cv
unsigned cv
Definition: cxa_demangle.cpp:4908
grpc_core::Executor::Enqueue
void Enqueue(grpc_closure *closure, grpc_error_handle error, bool is_short)
Definition: executor.cc:260
grpc_core::ExecutorType
ExecutorType
Definition: executor.h:42
exec_ctx
grpc_core::ExecCtx exec_ctx
Definition: end2end_binder_transport_test.cc:75
EXECUTOR_TRACE0
#define EXECUTOR_TRACE0(str)
Definition: executor.cc:45
alloc.h
next
AllocList * next[kMaxLevel]
Definition: abseil-cpp/absl/base/internal/low_level_alloc.cc:100
arg
struct arg arg
exec_ctx.h
grpc_core::Executor::Init
void Init()
Definition: executor.cc:97
closure
Definition: proxy.cc:59
tls.h
grpc_core::Thread
Definition: thd.h:43
GRPC_ERROR_UNREF
#define GRPC_ERROR_UNREF(err)
Definition: error.h:262
grpc_core::internal::StatusMoveFromHeapPtr
absl::Status StatusMoveFromHeapPtr(uintptr_t ptr)
Move the status from a heap ptr. (GetFrom & FreeHeap)
Definition: status_helper.cc:440
grpc_core::Executor::ThreadMain
static void ThreadMain(void *arg)
Definition: executor.cc:222
grpc_core::ExecutorType::RESOLVER
@ RESOLVER
grpc_core::ThreadState::id
size_t id
Definition: executor.h:32
grpc_core::Executor::Shutdown
void Shutdown()
Definition: executor.cc:220
grpc_core::ThreadState::cv
gpr_cv cv
Definition: executor.h:34
grpc_error
Definition: error_internal.h:42
grpc_core::ExecutorType::NUM_EXECUTORS
@ NUM_EXECUTORS
grpc_core::ThreadState::thd
Thread thd
Definition: executor.h:39
GRPC_EXEC_CTX_FLAG_IS_INTERNAL_THREAD
#define GRPC_EXEC_CTX_FLAG_IS_INTERNAL_THREAD
Definition: exec_ctx.h:51
grpc_core::ThreadState::depth
size_t depth
Definition: executor.h:36
grpc_closure_list_empty
bool grpc_closure_list_empty(grpc_closure_list closure_list)
Definition: closure.h:243
grpc_core::Executor::adding_thread_lock_
gpr_spinlock adding_thread_lock_
Definition: executor.h:114
sync.h
grpc_closure
Definition: closure.h:56
grpc_core::ExecutorJobType
ExecutorJobType
Definition: executor.h:49
grpc_core::ExecCtx::Get
static ExecCtx * Get()
Definition: exec_ctx.h:205
grpc_core::grpc_executor_global_init
void grpc_executor_global_init()
Definition: executor.cc:459
i
uint64_t i
Definition: abseil-cpp/absl/container/btree_benchmark.cc:230
gpr_cv_init
GPRAPI void gpr_cv_init(gpr_cv *cv)
grpc_core::ExecCtx::InvalidateNow
void InvalidateNow()
Definition: exec_ctx.h:188
port_platform.h


grpc
Author(s):
autogenerated on Thu Mar 13 2025 02:59:16