sync_test.cc
Go to the documentation of this file.
1 /*
2  *
3  * Copyright 2015 gRPC authors.
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  */
18 
19 /* Test of gpr synchronization support. */
20 
21 #include <stdio.h>
22 #include <stdlib.h>
23 
24 #include <grpc/support/alloc.h>
25 #include <grpc/support/log.h>
26 #include <grpc/support/sync.h>
27 #include <grpc/support/time.h>
28 
29 #include "src/core/lib/gprpp/thd.h"
31 
32 /* ==================Example use of interface===================
33 
34  A producer-consumer queue of up to N integers,
35  illustrating the use of the calls in this interface. */
36 
37 #define N 4
38 
39 typedef struct queue {
40  gpr_cv non_empty; /* Signalled when length becomes non-zero. */
41  gpr_cv non_full; /* Signalled when length becomes non-N. */
42  gpr_mu mu; /* Protects all fields below.
43  (That is, except during initialization or
44  destruction, the fields below should be accessed
45  only by a thread that holds mu.) */
46  int head; /* Index of head of queue 0..N-1. */
47  int length; /* Number of valid elements in queue 0..N. */
48  int elem[N]; /* elem[head .. head+length-1] are queue elements. */
49 } queue;
50 
51 /* Initialize *q. */
52 void queue_init(queue* q) {
53  gpr_mu_init(&q->mu);
54  gpr_cv_init(&q->non_empty);
55  gpr_cv_init(&q->non_full);
56  q->head = 0;
57  q->length = 0;
58 }
59 
60 /* Free storage associated with *q. */
61 void queue_destroy(queue* q) {
62  gpr_mu_destroy(&q->mu);
63  gpr_cv_destroy(&q->non_empty);
64  gpr_cv_destroy(&q->non_full);
65 }
66 
67 /* Wait until there is room in *q, then append x to *q. */
68 void queue_append(queue* q, int x) {
69  gpr_mu_lock(&q->mu);
70  /* To wait for a predicate without a deadline, loop on the negation of the
71  predicate, and use gpr_cv_wait(..., gpr_inf_future(GPR_CLOCK_REALTIME))
72  inside the loop
73  to release the lock, wait, and reacquire on each iteration. Code that
74  makes the condition true should use gpr_cv_broadcast() on the
75  corresponding condition variable. The predicate must be on state
76  protected by the lock. */
77  while (q->length == N) {
78  gpr_cv_wait(&q->non_full, &q->mu, gpr_inf_future(GPR_CLOCK_MONOTONIC));
79  }
80  if (q->length == 0) { /* Wake threads blocked in queue_remove(). */
81  /* It's normal to use gpr_cv_broadcast() or gpr_signal() while
82  holding the lock. */
83  gpr_cv_broadcast(&q->non_empty);
84  }
85  q->elem[(q->head + q->length) % N] = x;
86  q->length++;
87  gpr_mu_unlock(&q->mu);
88 }
89 
90 /* If it can be done without blocking, append x to *q and return non-zero.
91  Otherwise return 0. */
92 int queue_try_append(queue* q, int x) {
93  int result = 0;
94  if (gpr_mu_trylock(&q->mu)) {
95  if (q->length != N) {
96  if (q->length == 0) { /* Wake threads blocked in queue_remove(). */
97  gpr_cv_broadcast(&q->non_empty);
98  }
99  q->elem[(q->head + q->length) % N] = x;
100  q->length++;
101  result = 1;
102  }
103  gpr_mu_unlock(&q->mu);
104  }
105  return result;
106 }
107 
108 /* Wait until the *q is non-empty or deadline abs_deadline passes. If the
109  queue is non-empty, remove its head entry, place it in *head, and return
110  non-zero. Otherwise return 0. */
111 int queue_remove(queue* q, int* head, gpr_timespec abs_deadline) {
112  int result = 0;
113  gpr_mu_lock(&q->mu);
114  /* To wait for a predicate with a deadline, loop on the negation of the
115  predicate or until gpr_cv_wait() returns true. Code that makes
116  the condition true should use gpr_cv_broadcast() on the corresponding
117  condition variable. The predicate must be on state protected by the
118  lock. */
119  while (q->length == 0 && !gpr_cv_wait(&q->non_empty, &q->mu, abs_deadline)) {
120  }
121  if (q->length != 0) { /* Queue is non-empty. */
122  result = 1;
123  if (q->length == N) { /* Wake threads blocked in queue_append(). */
124  gpr_cv_broadcast(&q->non_full);
125  }
126  *head = q->elem[q->head];
127  q->head = (q->head + 1) % N;
128  q->length--;
129  } /* else deadline exceeded */
130  gpr_mu_unlock(&q->mu);
131  return result;
132 }
133 
134 /* ------------------------------------------------- */
135 /* Tests for gpr_mu and gpr_cv, and the queue example. */
136 struct test {
137  int nthreads; /* number of threads */
139 
140  int64_t iterations; /* number of iterations per thread */
142  int thread_count; /* used to allocate thread ids */
143  int done; /* threads not yet completed */
144  int incr_step; /* how much to increment/decrement refcount each time */
145 
146  gpr_mu mu; /* protects iterations, counter, thread_count, done */
147 
148  gpr_cv cv; /* signalling depends on test */
149 
150  gpr_cv done_cv; /* signalled when done == 0 */
151 
153 
155 
159 };
160 
161 /* Return pointer to a new struct test. */
162 static struct test* test_new(int nthreads, int64_t iterations, int incr_step) {
163  struct test* m = static_cast<struct test*>(gpr_malloc(sizeof(*m)));
164  m->nthreads = nthreads;
165  m->threads = static_cast<grpc_core::Thread*>(
166  gpr_malloc(sizeof(*m->threads) * nthreads));
167  m->iterations = iterations;
168  m->counter = 0;
169  m->thread_count = 0;
170  m->done = nthreads;
171  m->incr_step = incr_step;
172  gpr_mu_init(&m->mu);
173  gpr_cv_init(&m->cv);
174  gpr_cv_init(&m->done_cv);
175  queue_init(&m->q);
176  gpr_stats_init(&m->stats_counter, 0);
177  gpr_ref_init(&m->refcount, 0);
178  gpr_ref_init(&m->thread_refcount, nthreads);
179  gpr_event_init(&m->event);
180  return m;
181 }
182 
183 /* Return pointer to a new struct test. */
184 static void test_destroy(struct test* m) {
185  gpr_mu_destroy(&m->mu);
186  gpr_cv_destroy(&m->cv);
187  gpr_cv_destroy(&m->done_cv);
188  queue_destroy(&m->q);
189  gpr_free(m->threads);
190  gpr_free(m);
191 }
192 
193 /* Create m->nthreads threads, each running (*body)(m) */
194 static void test_create_threads(struct test* m, void (*body)(void* arg)) {
195  int i;
196  for (i = 0; i != m->nthreads; i++) {
197  m->threads[i] = grpc_core::Thread("grpc_create_threads", body, m);
198  m->threads[i].Start();
199  }
200 }
201 
202 /* Wait until all threads report done. */
203 static void test_wait(struct test* m) {
204  gpr_mu_lock(&m->mu);
205  while (m->done != 0) {
206  gpr_cv_wait(&m->done_cv, &m->mu, gpr_inf_future(GPR_CLOCK_MONOTONIC));
207  }
208  gpr_mu_unlock(&m->mu);
209  for (int i = 0; i != m->nthreads; i++) {
210  m->threads[i].Join();
211  }
212 }
213 
214 /* Get an integer thread id in the raneg 0..nthreads-1 */
215 static int thread_id(struct test* m) {
216  int id;
217  gpr_mu_lock(&m->mu);
218  id = m->thread_count++;
219  gpr_mu_unlock(&m->mu);
220  return id;
221 }
222 
223 /* Indicate that a thread is done, by decrementing m->done
224  and signalling done_cv if m->done==0. */
225 static void mark_thread_done(struct test* m) {
226  gpr_mu_lock(&m->mu);
227  GPR_ASSERT(m->done != 0);
228  m->done--;
229  if (m->done == 0) {
230  gpr_cv_signal(&m->done_cv);
231  }
232  gpr_mu_unlock(&m->mu);
233 }
234 
235 /* Test several threads running (*body)(struct test *m) for increasing settings
236  of m->iterations, until about timeout_s to 2*timeout_s seconds have elapsed.
237  If extra!=NULL, run (*extra)(m) in an additional thread.
238  incr_step controls by how much m->refcount should be incremented/decremented
239  (if at all) each time in the tests.
240  */
241 static void test(const char* name, void (*body)(void* m),
242  void (*extra)(void* m), int timeout_s, int incr_step) {
243  int64_t iterations = 8;
244  struct test* m;
247  gpr_timespec deadline = gpr_time_add(
248  start, gpr_time_from_micros(static_cast<int64_t>(timeout_s) * 1000000,
249  GPR_TIMESPAN));
250  fprintf(stderr, "%s:", name);
251  fflush(stderr);
252  while (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), deadline) < 0) {
253  fprintf(stderr, " %ld", static_cast<long>(iterations));
254  fflush(stderr);
255  m = test_new(10, iterations, incr_step);
256  grpc_core::Thread extra_thd;
257  if (extra != nullptr) {
258  extra_thd = grpc_core::Thread(name, extra, m);
259  extra_thd.Start();
260  m->done++; /* one more thread to wait for */
261  }
262  test_create_threads(m, body);
263  test_wait(m);
264  if (extra != nullptr) {
265  extra_thd.Join();
266  }
267  if (m->counter != m->nthreads * m->iterations * m->incr_step) {
268  fprintf(stderr, "counter %ld threads %d iterations %ld\n",
269  static_cast<long>(m->counter), m->nthreads,
270  static_cast<long>(m->iterations));
271  fflush(stderr);
272  GPR_ASSERT(0);
273  }
274  test_destroy(m);
275  iterations <<= 1;
276  }
278  fprintf(stderr, " done %lld.%09d s\n",
279  static_cast<long long>(time_taken.tv_sec),
280  static_cast<int>(time_taken.tv_nsec));
281  fflush(stderr);
282 }
283 
284 /* Increment m->counter on each iteration; then mark thread as done. */
285 static void inc(void* v /*=m*/) {
286  struct test* m = static_cast<struct test*>(v);
287  int64_t i;
288  for (i = 0; i != m->iterations; i++) {
289  gpr_mu_lock(&m->mu);
290  m->counter++;
291  gpr_mu_unlock(&m->mu);
292  }
294 }
295 
296 /* Increment m->counter under lock acquired with trylock, m->iterations times;
297  then mark thread as done. */
298 static void inctry(void* v /*=m*/) {
299  struct test* m = static_cast<struct test*>(v);
300  int64_t i;
301  for (i = 0; i != m->iterations;) {
302  if (gpr_mu_trylock(&m->mu)) {
303  m->counter++;
304  gpr_mu_unlock(&m->mu);
305  i++;
306  }
307  }
309 }
310 
311 /* Increment counter only when (m->counter%m->nthreads)==m->thread_id; then mark
312  thread as done. */
313 static void inc_by_turns(void* v /*=m*/) {
314  struct test* m = static_cast<struct test*>(v);
315  int64_t i;
316  int id = thread_id(m);
317  for (i = 0; i != m->iterations; i++) {
318  gpr_mu_lock(&m->mu);
319  while ((m->counter % m->nthreads) != id) {
321  }
322  m->counter++;
323  gpr_cv_broadcast(&m->cv);
324  gpr_mu_unlock(&m->mu);
325  }
327 }
328 
329 /* Wait a millisecond and increment counter on each iteration;
330  then mark thread as done. */
331 static void inc_with_1ms_delay(void* v /*=m*/) {
332  struct test* m = static_cast<struct test*>(v);
333  int64_t i;
334  for (i = 0; i != m->iterations; i++) {
335  gpr_timespec deadline;
336  gpr_mu_lock(&m->mu);
339  while (!gpr_cv_wait(&m->cv, &m->mu, deadline)) {
340  }
341  m->counter++;
342  gpr_mu_unlock(&m->mu);
343  }
345 }
346 
347 /* Wait a millisecond and increment counter on each iteration, using an event
348  for timing; then mark thread as done. */
349 static void inc_with_1ms_delay_event(void* v /*=m*/) {
350  struct test* m = static_cast<struct test*>(v);
351  int64_t i;
352  for (i = 0; i != m->iterations; i++) {
353  gpr_timespec deadline;
356  GPR_ASSERT(gpr_event_wait(&m->event, deadline) == nullptr);
357  gpr_mu_lock(&m->mu);
358  m->counter++;
359  gpr_mu_unlock(&m->mu);
360  }
362 }
363 
364 /* Produce m->iterations elements on queue m->q, then mark thread as done.
365  Even threads use queue_append(), and odd threads use queue_try_append()
366  until it succeeds. */
367 static void many_producers(void* v /*=m*/) {
368  struct test* m = static_cast<struct test*>(v);
369  int64_t i;
370  int x = thread_id(m);
371  if ((x & 1) == 0) {
372  for (i = 0; i != m->iterations; i++) {
373  queue_append(&m->q, 1);
374  }
375  } else {
376  for (i = 0; i != m->iterations; i++) {
377  while (!queue_try_append(&m->q, 1)) {
378  }
379  }
380  }
382 }
383 
384 /* Consume elements from m->q until m->nthreads*m->iterations are seen,
385  wait an extra second to confirm that no more elements are arriving,
386  then mark thread as done. */
387 static void consumer(void* v /*=m*/) {
388  struct test* m = static_cast<struct test*>(v);
389  int64_t n = m->iterations * m->nthreads;
390  int64_t i;
391  int value;
392  for (i = 0; i != n; i++) {
394  }
395  gpr_mu_lock(&m->mu);
396  m->counter = n;
397  gpr_mu_unlock(&m->mu);
398  GPR_ASSERT(
399  !queue_remove(&m->q, &value,
401  gpr_time_from_micros(1000000, GPR_TIMESPAN))));
403 }
404 
405 /* Increment m->stats_counter m->iterations times, transfer counter value to
406  m->counter, then mark thread as done. */
407 static void statsinc(void* v /*=m*/) {
408  struct test* m = static_cast<struct test*>(v);
409  int64_t i;
410  for (i = 0; i != m->iterations; i++) {
411  gpr_stats_inc(&m->stats_counter, 1);
412  }
413  gpr_mu_lock(&m->mu);
414  m->counter = gpr_stats_read(&m->stats_counter);
415  gpr_mu_unlock(&m->mu);
417 }
418 
419 /* Increment m->refcount by m->incr_step for m->iterations times. Decrement
420  m->thread_refcount once, and if it reaches zero, set m->event to (void*)1;
421  then mark thread as done. */
422 static void refinc(void* v /*=m*/) {
423  struct test* m = static_cast<struct test*>(v);
424  int64_t i;
425  for (i = 0; i != m->iterations; i++) {
426  if (m->incr_step == 1) {
427  gpr_ref(&m->refcount);
428  } else {
429  gpr_refn(&m->refcount, m->incr_step);
430  }
431  }
432  if (gpr_unref(&m->thread_refcount)) {
433  gpr_event_set(&m->event, reinterpret_cast<void*>(1));
434  }
436 }
437 
438 /* Wait until m->event is set to (void *)1, then decrement m->refcount by 1
439  (m->nthreads * m->iterations * m->incr_step) times, and ensure that the last
440  decrement caused the counter to reach zero, then mark thread as done. */
441 static void refcheck(void* v /*=m*/) {
442  struct test* m = static_cast<struct test*>(v);
443  int64_t n = m->iterations * m->nthreads * m->incr_step;
444  int64_t i;
446  (void*)1);
447  GPR_ASSERT(gpr_event_get(&m->event) == (void*)1);
448  for (i = 1; i != n; i++) {
449  GPR_ASSERT(!gpr_unref(&m->refcount));
450  m->counter++;
451  }
452  GPR_ASSERT(gpr_unref(&m->refcount));
453  m->counter++;
455 }
456 
457 /* ------------------------------------------------- */
458 
459 int main(int argc, char* argv[]) {
460  grpc::testing::TestEnvironment env(&argc, argv);
461  test("mutex", &inc, nullptr, 1, 1);
462  test("mutex try", &inctry, nullptr, 1, 1);
463  test("cv", &inc_by_turns, nullptr, 1, 1);
464  test("timedcv", &inc_with_1ms_delay, nullptr, 1, 1);
465  test("queue", &many_producers, &consumer, 10, 1);
466  test("stats_counter", &statsinc, nullptr, 1, 1);
467  test("refcount by 1", &refinc, &refcheck, 1, 1);
468  test("refcount by 3", &refinc, &refcheck, 1, 3); /* incr_step of 3 is an
469  arbitrary choice. Any
470  number > 1 is okay here */
471  test("timedevent", &inc_with_1ms_delay_event, nullptr, 1, 1);
472  return 0;
473 }
gpr_cv_signal
GPRAPI void gpr_cv_signal(gpr_cv *cv)
GPR_TIMESPAN
@ GPR_TIMESPAN
Definition: gpr_types.h:45
_gevent_test_main.result
result
Definition: _gevent_test_main.py:96
gpr_mu_unlock
GPRAPI void gpr_mu_unlock(gpr_mu *mu)
log.h
test::stats_counter
gpr_stats_counter stats_counter
Definition: sync_test.cc:154
gpr_refn
GPRAPI void gpr_refn(gpr_refcount *r, int n)
Definition: sync.cc:99
gpr_event_get
GPRAPI void * gpr_event_get(gpr_event *ev)
Definition: sync.cc:69
inc
static void inc(void *v)
Definition: sync_test.cc:285
generate.env
env
Definition: generate.py:37
test::nthreads
int nthreads
Definition: sync_test.cc:137
test
Definition: spinlock_test.cc:36
gpr_cv
pthread_cond_t gpr_cv
Definition: impl/codegen/sync_posix.h:48
gpr_event_set
GPRAPI void gpr_event_set(gpr_event *ev, void *value)
Definition: sync.cc:59
gpr_free
GPRAPI void gpr_free(void *ptr)
Definition: alloc.cc:51
gpr_stats_read
GPRAPI intptr_t gpr_stats_read(const gpr_stats_counter *c)
Definition: sync.cc:121
queue_init
void queue_init(queue *q)
Definition: sync_test.cc:52
gpr_malloc
GPRAPI void * gpr_malloc(size_t size)
Definition: alloc.cc:29
test::counter
int64_t counter
Definition: spinlock_test.cc:41
refcheck
static void refcheck(void *v)
Definition: sync_test.cc:441
gpr_inf_future
GPRAPI gpr_timespec gpr_inf_future(gpr_clock_type type)
Definition: src/core/lib/gpr/time.cc:55
setup.name
name
Definition: setup.py:542
time.h
test
static void test(const char *name, void(*body)(void *m), void(*extra)(void *m), int timeout_s, int incr_step)
Definition: sync_test.cc:241
refinc
static void refinc(void *v)
Definition: sync_test.cc:422
gpr_refcount
Definition: impl/codegen/sync_generic.h:39
gen_build_yaml.struct
def struct(**kwargs)
Definition: test/core/end2end/gen_build_yaml.py:30
inc_with_1ms_delay_event
static void inc_with_1ms_delay_event(void *v)
Definition: sync_test.cc:349
queue_remove
int queue_remove(queue *q, int *head, gpr_timespec abs_deadline)
Definition: sync_test.cc:111
test::done
int done
Definition: sync_test.cc:143
python_utils.port_server.stderr
stderr
Definition: port_server.py:51
gpr_stats_counter
Definition: impl/codegen/sync_generic.h:44
test::done_cv
gpr_cv done_cv
Definition: sync_test.cc:150
queue::elem
int elem[N]
Definition: sync_test.cc:48
gpr_stats_init
GPRAPI void gpr_stats_init(gpr_stats_counter *c, intptr_t n)
Definition: sync.cc:113
gpr_mu_destroy
GPRAPI void gpr_mu_destroy(gpr_mu *mu)
many_producers
static void many_producers(void *v)
Definition: sync_test.cc:367
queue_destroy
void queue_destroy(queue *q)
Definition: sync_test.cc:61
start
static uint64_t start
Definition: benchmark-pound.c:74
test::iterations
int64_t iterations
Definition: spinlock_test.cc:40
queue::length
int length
Definition: sync_test.cc:47
queue::non_empty
gpr_cv non_empty
Definition: sync_test.cc:40
GPR_ASSERT
#define GPR_ASSERT(x)
Definition: include/grpc/impl/codegen/log.h:94
queue
Definition: sync_test.cc:39
int64_t
signed __int64 int64_t
Definition: stdint-msvc2008.h:89
gpr_time_cmp
GPRAPI int gpr_time_cmp(gpr_timespec a, gpr_timespec b)
Definition: src/core/lib/gpr/time.cc:30
gpr_cv_destroy
GPRAPI void gpr_cv_destroy(gpr_cv *cv)
profile_analyzer.time_taken
time_taken
Definition: profile_analyzer.py:176
test::q
queue q
Definition: sync_test.cc:152
gpr_time_sub
GPRAPI gpr_timespec gpr_time_sub(gpr_timespec a, gpr_timespec b)
Definition: src/core/lib/gpr/time.cc:168
gpr_mu_init
GPRAPI void gpr_mu_init(gpr_mu *mu)
inc_with_1ms_delay
static void inc_with_1ms_delay(void *v)
Definition: sync_test.cc:331
setup.v
v
Definition: third_party/bloaty/third_party/capstone/bindings/python/setup.py:42
test::mu
gpr_mu mu
Definition: sync_test.cc:146
test_wait
static void test_wait(struct test *m)
Definition: sync_test.cc:203
consumer
static void consumer(void *v)
Definition: sync_test.cc:387
test::incr_step
int incr_step
Definition: spinlock_test.cc:42
arg
Definition: cmdline.cc:40
gpr_cv_wait
GPRAPI int gpr_cv_wait(gpr_cv *cv, gpr_mu *mu, gpr_timespec abs_deadline)
inctry
static void inctry(void *v)
Definition: sync_test.cc:298
test_create_threads
static void test_create_threads(struct test *m, void(*body)(void *arg))
Definition: sync_test.cc:194
GPR_CLOCK_MONOTONIC
@ GPR_CLOCK_MONOTONIC
Definition: gpr_types.h:36
grpc_core::Thread::Join
void Join()
Definition: thd.h:141
gpr_mu_lock
GPRAPI void gpr_mu_lock(gpr_mu *mu)
x
int x
Definition: bloaty/third_party/googletest/googlemock/test/gmock-matchers_test.cc:3610
gpr_event_init
GPRAPI void gpr_event_init(gpr_event *ev)
Definition: sync.cc:54
nthreads
static unsigned int nthreads
Definition: threadpool.c:37
main
int main(int argc, char *argv[])
Definition: sync_test.cc:459
test::refcount
gpr_refcount refcount
Definition: sync_test.cc:156
mark_thread_done
static void mark_thread_done(struct test *m)
Definition: sync_test.cc:225
gpr_now
GPRAPI gpr_timespec gpr_now(gpr_clock_type clock)
grpc_core::Thread::Start
void Start()
Definition: thd.h:125
n
int n
Definition: abseil-cpp/absl/container/btree_test.cc:1080
gpr_event_wait
GPRAPI void * gpr_event_wait(gpr_event *ev, gpr_timespec abs_deadline)
Definition: sync.cc:73
queue::non_full
gpr_cv non_full
Definition: sync_test.cc:41
test_new
static struct test * test_new(int nthreads, int64_t iterations, int incr_step)
Definition: sync_test.cc:162
test_config.h
value
const char * value
Definition: hpack_parser_table.cc:165
queue_try_append
int queue_try_append(queue *q, int x)
Definition: sync_test.cc:92
test::thread_refcount
gpr_refcount thread_refcount
Definition: sync_test.cc:157
queue
struct queue queue
gpr_event
Definition: impl/codegen/sync_generic.h:31
thread_id
static int thread_id(struct test *m)
Definition: sync_test.cc:215
N
#define N
Definition: sync_test.cc:37
queue_append
void queue_append(queue *q, int x)
Definition: sync_test.cc:68
gpr_time_add
GPRAPI gpr_timespec gpr_time_add(gpr_timespec a, gpr_timespec b)
Definition: src/core/lib/gpr/time.cc:135
inc_by_turns
static void inc_by_turns(void *v)
Definition: sync_test.cc:313
gpr_mu
pthread_mutex_t gpr_mu
Definition: impl/codegen/sync_posix.h:47
queue::head
int head
Definition: sync_test.cc:46
gpr_mu_trylock
GPRAPI int gpr_mu_trylock(gpr_mu *mu)
gpr_time_from_micros
GPRAPI gpr_timespec gpr_time_from_micros(int64_t us, gpr_clock_type clock_type)
Definition: src/core/lib/gpr/time.cc:115
alloc.h
gpr_stats_inc
GPRAPI void gpr_stats_inc(gpr_stats_counter *c, intptr_t inc)
Definition: sync.cc:117
grpc::testing::TestEnvironment
Definition: test/core/util/test_config.h:54
thd.h
statsinc
static void statsinc(void *v)
Definition: sync_test.cc:407
grpc_core::Thread
Definition: thd.h:43
test::event
gpr_event event
Definition: sync_test.cc:158
test::threads
grpc_core::Thread * threads
Definition: spinlock_test.cc:38
test_destroy
static void test_destroy(struct test *m)
Definition: sync_test.cc:184
test::thread_count
int thread_count
Definition: spinlock_test.cc:37
gpr_cv_broadcast
GPRAPI void gpr_cv_broadcast(gpr_cv *cv)
queue::mu
gpr_mu mu
Definition: sync_test.cc:42
gpr_ref_init
GPRAPI void gpr_ref_init(gpr_refcount *r, int n)
Definition: sync.cc:86
gpr_timespec
Definition: gpr_types.h:50
gpr_unref
GPRAPI int gpr_unref(gpr_refcount *r)
Definition: sync.cc:103
GPR_CLOCK_REALTIME
@ GPR_CLOCK_REALTIME
Definition: gpr_types.h:39
regress.m
m
Definition: regress/regress.py:25
sync.h
gpr_ref
GPRAPI void gpr_ref(gpr_refcount *r)
Definition: sync.cc:88
test::cv
gpr_cv cv
Definition: sync_test.cc:148
i
uint64_t i
Definition: abseil-cpp/absl/container/btree_benchmark.cc:230
gpr_cv_init
GPRAPI void gpr_cv_init(gpr_cv *cv)
id
uint32_t id
Definition: flow_control_fuzzer.cc:70


grpc
Author(s):
autogenerated on Fri May 16 2025 03:00:24