23 #ifdef GRPC_POSIX_SOCKET_EV_POLL
30 #include <sys/socket.h>
35 #include "absl/strings/str_cat.h"
51 #define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker*)1)
56 typedef struct grpc_fd_watcher {
57 struct grpc_fd_watcher*
next;
58 struct grpc_fd_watcher* prev;
64 typedef struct grpc_cached_wakeup_fd grpc_cached_wakeup_fd;
67 struct grpc_fork_fd_list {
71 grpc_cached_wakeup_fd* cached_wakeup_fd;
73 grpc_fork_fd_list*
next;
74 grpc_fork_fd_list* prev;
115 grpc_fd_watcher inactive_watcher_root;
116 grpc_fd_watcher* read_watcher;
117 grpc_fd_watcher* write_watcher;
127 grpc_fork_fd_list* fork_fd_list;
131 static bool track_fds_for_fork =
false;
134 static grpc_fork_fd_list* fork_fd_list_head =
nullptr;
135 static gpr_mu fork_fd_list_mu;
155 static void fd_end_poll(grpc_fd_watcher*
watcher,
int got_read,
int got_write);
158 static bool fd_is_orphaned(
grpc_fd* fd);
161 static void fd_ref(
grpc_fd* fd,
const char* reason,
const char*
file,
int line);
162 static void fd_unref(
grpc_fd* fd,
const char* reason,
const char*
file,
164 #define GRPC_FD_REF(fd, reason) fd_ref(fd, reason, __FILE__, __LINE__)
165 #define GRPC_FD_UNREF(fd, reason) fd_unref(fd, reason, __FILE__, __LINE__)
167 static void fd_ref(
grpc_fd* fd);
168 static void fd_unref(
grpc_fd* fd);
169 #define GRPC_FD_REF(fd, reason) fd_ref(fd)
170 #define GRPC_FD_UNREF(fd, reason) fd_unref(fd)
173 #define CLOSURE_NOT_READY ((grpc_closure*)0)
174 #define CLOSURE_READY ((grpc_closure*)1)
180 typedef struct grpc_cached_wakeup_fd {
182 struct grpc_cached_wakeup_fd*
next;
185 grpc_fork_fd_list* fork_fd_list;
186 } grpc_cached_wakeup_fd;
189 grpc_cached_wakeup_fd* wakeup_fd;
190 int reevaluate_polling_on_wakeup;
191 int kicked_specifically;
201 int kicked_without_pollers;
203 int pollset_set_count;
209 grpc_cached_wakeup_fd* local_wakeup_cache;
227 #define GRPC_POLLSET_CAN_KICK_SELF 1
229 #define GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP 2
247 size_t pollset_count;
248 size_t pollset_capacity;
251 size_t pollset_set_count;
252 size_t pollset_set_capacity;
264 static void fork_fd_list_remove_node(grpc_fork_fd_list* node) {
265 if (track_fds_for_fork) {
267 if (fork_fd_list_head == node) {
268 fork_fd_list_head = node->next;
270 if (node->prev !=
nullptr) {
271 node->prev->next = node->next;
273 if (node->next !=
nullptr) {
274 node->next->prev = node->prev;
281 static void fork_fd_list_add_node(grpc_fork_fd_list* node) {
283 node->next = fork_fd_list_head;
284 node->prev =
nullptr;
285 if (fork_fd_list_head !=
nullptr) {
286 fork_fd_list_head->prev = node;
288 fork_fd_list_head = node;
292 static void fork_fd_list_add_grpc_fd(
grpc_fd* fd) {
293 if (track_fds_for_fork) {
295 static_cast<grpc_fork_fd_list*
>(
gpr_malloc(
sizeof(grpc_fork_fd_list)));
296 fd->fork_fd_list->fd = fd;
297 fd->fork_fd_list->cached_wakeup_fd =
nullptr;
298 fork_fd_list_add_node(fd->fork_fd_list);
302 static void fork_fd_list_add_wakeup_fd(grpc_cached_wakeup_fd* fd) {
303 if (track_fds_for_fork) {
305 static_cast<grpc_fork_fd_list*
>(
gpr_malloc(
sizeof(grpc_fork_fd_list)));
306 fd->fork_fd_list->cached_wakeup_fd = fd;
307 fd->fork_fd_list->fd =
nullptr;
308 fork_fd_list_add_node(fd->fork_fd_list);
317 #define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__)
318 #define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__)
319 static void ref_by(
grpc_fd* fd,
int n,
const char* reason,
const char*
file,
323 "FD %d %p ref %d %" PRIdPTR
" -> %" PRIdPTR
" [%s; %s:%d]",
328 #define REF_BY(fd, n, reason) \
333 #define UNREF_BY(fd, n, reason) \
338 static void ref_by(
grpc_fd* fd,
int n) {
344 static void unref_by(
grpc_fd* fd,
int n,
const char* reason,
const char*
file,
348 "FD %d %p unref %d %" PRIdPTR
" -> %" PRIdPTR
" [%s; %s:%d]",
353 static void unref_by(
grpc_fd* fd,
int n) {
359 fork_fd_list_remove_node(fd->fork_fd_list);
363 #ifdef GRPC_ERROR_IS_ABSEIL_STATUS
364 fd->shutdown_error.~Status();
372 static grpc_fd* fd_create(
int fd,
const char*
name,
bool track_err) {
380 #ifdef GRPC_ERROR_IS_ABSEIL_STATUS
383 r->read_closure = CLOSURE_NOT_READY;
384 r->write_closure = CLOSURE_NOT_READY;
386 r->inactive_watcher_root.next =
r->inactive_watcher_root.prev =
387 &
r->inactive_watcher_root;
388 r->read_watcher =
r->write_watcher =
nullptr;
389 r->on_done_closure =
nullptr;
396 fork_fd_list_add_grpc_fd(
r);
400 static bool fd_is_orphaned(
grpc_fd* fd) {
409 GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP);
414 static void maybe_wake_one_watcher_locked(
grpc_fd* fd) {
415 if (fd->inactive_watcher_root.next != &fd->inactive_watcher_root) {
416 (void)pollset_kick_locked(fd->inactive_watcher_root.next);
417 }
else if (fd->read_watcher) {
418 (void)pollset_kick_locked(fd->read_watcher);
419 }
else if (fd->write_watcher) {
420 (void)pollset_kick_locked(fd->write_watcher);
424 static void wake_all_watchers_locked(
grpc_fd* fd) {
426 for (
watcher = fd->inactive_watcher_root.next;
428 (void)pollset_kick_locked(
watcher);
430 if (fd->read_watcher) {
431 (void)pollset_kick_locked(fd->read_watcher);
433 if (fd->write_watcher && fd->write_watcher != fd->read_watcher) {
434 (void)pollset_kick_locked(fd->write_watcher);
438 static int has_watchers(
grpc_fd* fd) {
439 return fd->read_watcher !=
nullptr || fd->write_watcher !=
nullptr ||
440 fd->inactive_watcher_root.next != &fd->inactive_watcher_root;
443 static void close_fd_locked(
grpc_fd* fd) {
451 static int fd_wrapped_fd(
grpc_fd* fd) {
452 if (fd->released || fd->closed) {
460 const char* reason) {
461 fd->on_done_closure = on_done;
462 fd->released = release_fd !=
nullptr;
463 if (release_fd !=
nullptr) {
464 *release_fd = fd->fd;
468 REF_BY(fd, 1, reason);
469 if (!has_watchers(fd)) {
472 wake_all_watchers_locked(fd);
475 UNREF_BY(fd, 2, reason);
480 static void fd_ref(
grpc_fd* fd,
const char* reason,
const char*
file,
485 static void fd_unref(
grpc_fd* fd,
const char* reason,
const char*
file,
487 unref_by(fd, 2, reason,
file,
line);
490 static void fd_ref(
grpc_fd* fd) { ref_by(fd, 2); }
492 static void fd_unref(
grpc_fd* fd) { unref_by(fd, 2); }
500 "FD shutdown", &fd->shutdown_error, 1),
514 }
else if (*st == CLOSURE_NOT_READY) {
517 }
else if (*st == CLOSURE_READY) {
519 *st = CLOSURE_NOT_READY;
521 maybe_wake_one_watcher_locked(fd);
525 "User called a notify_on function with a previous callback still "
533 if (*st == CLOSURE_READY) {
536 }
else if (*st == CLOSURE_NOT_READY) {
543 *st = CLOSURE_NOT_READY;
553 fd->shutdown_error = why;
555 shutdown(fd->fd, SHUT_RDWR);
556 set_ready_locked(fd, &fd->read_closure);
557 set_ready_locked(fd, &fd->write_closure);
564 static bool fd_is_shutdown(
grpc_fd* fd) {
566 bool r = fd->shutdown;
573 notify_on_locked(fd, &fd->read_closure,
closure);
579 notify_on_locked(fd, &fd->write_closure,
closure);
590 static void fd_set_readable(
grpc_fd* fd) {
592 set_ready_locked(fd, &fd->read_closure);
596 static void fd_set_writable(
grpc_fd* fd) {
598 set_ready_locked(fd, &fd->write_closure);
602 static void fd_set_error(
grpc_fd* ) {
616 GRPC_FD_REF(fd,
"poll");
626 GRPC_FD_UNREF(fd,
"poll");
631 cur = fd->read_closure;
632 requested =
cur != CLOSURE_READY;
633 if (read_mask && fd->read_watcher ==
nullptr && requested) {
639 cur = fd->write_closure;
640 requested =
cur != CLOSURE_READY;
641 if (write_mask && fd->write_watcher ==
nullptr && requested) {
646 if (mask == 0 &&
worker !=
nullptr) {
647 watcher->next = &fd->inactive_watcher_root;
659 static void fd_end_poll(grpc_fd_watcher*
watcher,
int got_read,
int got_write) {
670 if (
watcher == fd->read_watcher) {
676 fd->read_watcher =
nullptr;
678 if (
watcher == fd->write_watcher) {
684 fd->write_watcher =
nullptr;
686 if (!was_polling &&
watcher->worker !=
nullptr) {
692 if (set_ready_locked(fd, &fd->read_closure)) {
697 if (set_ready_locked(fd, &fd->write_closure)) {
702 maybe_wake_one_watcher_locked(fd);
704 if (fd_is_orphaned(fd) && !has_watchers(fd) && !fd->closed) {
709 GRPC_FD_UNREF(fd,
"poll");
725 return p->root_worker.next != &
p->root_worker;
729 return p->pollset_set_count;
733 return pollset_has_workers(
p) || pollset_in_pollset_sets(
p);
737 if (pollset_has_workers(
p)) {
747 worker->next = &
p->root_worker;
753 worker->prev = &
p->root_worker;
775 if (specific_worker !=
nullptr) {
776 if (specific_worker == GRPC_POLLSET_KICK_BROADCAST) {
779 for (specific_worker =
p->root_worker.next;
780 specific_worker != &
p->root_worker;
781 specific_worker = specific_worker->next) {
785 p->kicked_without_pollers =
true;
786 }
else if (g_current_thread_worker != specific_worker) {
788 if ((
flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) != 0) {
789 specific_worker->reevaluate_polling_on_wakeup =
true;
791 specific_worker->kicked_specifically =
true;
792 kick_append_error(&
error,
794 }
else if ((
flags & GRPC_POLLSET_CAN_KICK_SELF) != 0) {
796 if ((
flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) != 0) {
797 specific_worker->reevaluate_polling_on_wakeup =
true;
799 specific_worker->kicked_specifically =
true;
800 kick_append_error(&
error,
803 }
else if (g_current_thread_poller !=
p) {
806 specific_worker = pop_front_worker(
p);
807 if (specific_worker !=
nullptr) {
808 if (g_current_thread_worker == specific_worker) {
810 push_back_worker(
p, specific_worker);
811 specific_worker = pop_front_worker(
p);
812 if ((
flags & GRPC_POLLSET_CAN_KICK_SELF) == 0 &&
813 g_current_thread_worker == specific_worker) {
814 push_back_worker(
p, specific_worker);
815 specific_worker =
nullptr;
818 if (specific_worker !=
nullptr) {
820 push_back_worker(
p, specific_worker);
826 p->kicked_without_pollers =
true;
836 return pollset_kick_ext(
p, specific_worker, 0);
848 pollset->root_worker.next = pollset->root_worker.prev = &pollset->root_worker;
849 pollset->shutting_down = 0;
850 pollset->called_shutdown = 0;
851 pollset->kicked_without_pollers = 0;
852 pollset->local_wakeup_cache =
nullptr;
853 pollset->kicked_without_pollers = 0;
854 pollset->fd_count = 0;
855 pollset->fd_capacity = 0;
856 pollset->fds =
nullptr;
857 pollset->pollset_set_count = 0;
862 while (pollset->local_wakeup_cache) {
863 grpc_cached_wakeup_fd*
next = pollset->local_wakeup_cache->next;
864 fork_fd_list_remove_node(pollset->local_wakeup_cache->fork_fd_list);
866 gpr_free(pollset->local_wakeup_cache);
867 pollset->local_wakeup_cache =
next;
877 for (
i = 0;
i < pollset->fd_count;
i++) {
878 if (pollset->fds[
i] == fd)
goto exit;
880 if (pollset->fd_count == pollset->fd_capacity) {
881 pollset->fd_capacity =
882 std::max(pollset->fd_capacity + 8, pollset->fd_count * 3 / 2);
883 pollset->fds =
static_cast<grpc_fd**
>(
886 pollset->fds[pollset->fd_count++] = fd;
887 GRPC_FD_REF(fd,
"multipoller");
895 for (
i = 0;
i < pollset->fd_count;
i++) {
896 GRPC_FD_UNREF(pollset->fds[
i],
"multipoller");
898 pollset->fd_count = 0;
917 if (worker_hdl) *worker_hdl = &
worker;
921 enum { inline_elements = 96 };
922 struct pollfd pollfd_space[inline_elements];
923 struct grpc_fd_watcher watcher_space[inline_elements];
926 int added_worker = 0;
929 int keep_polling = 0;
932 worker.reevaluate_polling_on_wakeup = 0;
933 if (pollset->local_wakeup_cache !=
nullptr) {
934 worker.wakeup_fd = pollset->local_wakeup_cache;
935 pollset->local_wakeup_cache =
worker.wakeup_fd->next;
937 worker.wakeup_fd =
static_cast<grpc_cached_wakeup_fd*
>(
940 fork_fd_list_add_wakeup_fd(
worker.wakeup_fd);
946 worker.kicked_specifically = 0;
948 if (pollset->shutting_down) {
956 g_current_thread_poller = pollset;
957 while (keep_polling) {
959 if (!pollset->kicked_without_pollers ||
962 push_front_worker(pollset, &
worker);
964 g_current_thread_worker = &
worker;
967 #define POLLOUT_CHECK (POLLOUT | POLLHUP | POLLERR)
968 #define POLLIN_CHECK (POLLIN | POLLHUP | POLLERR)
974 grpc_fd_watcher* watchers;
977 timeout = poll_deadline_to_millis_timeout(deadline);
979 if (pollset->fd_count + 2 <= inline_elements) {
981 watchers = watcher_space;
984 const size_t pfd_size =
sizeof(*pfds) * (pollset->fd_count + 2);
985 const size_t watch_size =
sizeof(*watchers) * (pollset->fd_count + 2);
988 watchers =
static_cast<grpc_fd_watcher*
>(
989 static_cast<void*
>((
static_cast<char*
>(
buf) + pfd_size)));
995 pfds[0].events = POLLIN;
997 for (
i = 0;
i < pollset->fd_count;
i++) {
998 if (fd_is_orphaned(pollset->fds[
i]) ||
1000 GRPC_FD_UNREF(pollset->fds[
i],
"multipoller");
1002 pollset->fds[fd_count++] = pollset->fds[
i];
1003 watchers[pfd_count].fd = pollset->fds[
i];
1004 GRPC_FD_REF(watchers[pfd_count].fd,
"multipoller_start");
1005 pfds[pfd_count].fd = pollset->fds[
i]->fd;
1006 pfds[pfd_count].revents = 0;
1010 pollset->fd_count = fd_count;
1013 for (
i = 1;
i < pfd_count;
i++) {
1015 pfds[
i].events =
static_cast<short>(
1016 fd_begin_poll(fd, pollset, &
worker, POLLIN, POLLOUT, &watchers[
i]));
1017 GRPC_FD_UNREF(fd,
"multipoller_start");
1032 if (errno != EINTR) {
1036 for (
i = 1;
i < pfd_count;
i++) {
1037 if (watchers[
i].fd ==
nullptr) {
1038 fd_end_poll(&watchers[
i], 0, 0);
1042 fd_end_poll(&watchers[
i], 1, 1);
1045 }
else if (
r == 0) {
1046 for (
i = 1;
i < pfd_count;
i++) {
1047 fd_end_poll(&watchers[
i], 0, 0);
1050 if (pfds[0].revents & POLLIN_CHECK) {
1057 for (
i = 1;
i < pfd_count;
i++) {
1058 if (watchers[
i].fd ==
nullptr) {
1059 fd_end_poll(&watchers[
i], 0, 0);
1063 pfds[
i].fd, (pfds[
i].revents & POLLIN_CHECK) != 0,
1064 (pfds[
i].revents & POLLOUT_CHECK) != 0, pfds[
i].revents);
1069 if (pfds[
i].revents & POLLHUP) {
1072 fd_end_poll(&watchers[
i], pfds[
i].revents & POLLIN_CHECK,
1073 pfds[
i].revents & POLLOUT_CHECK);
1078 if (pfds != pollfd_space) {
1086 pollset->kicked_without_pollers = 0;
1102 worker.reevaluate_polling_on_wakeup = 0;
1103 pollset->kicked_without_pollers = 0;
1104 if (queued_work ||
worker.kicked_specifically) {
1112 g_current_thread_poller =
nullptr;
1114 remove_worker(pollset, &
worker);
1115 g_current_thread_worker =
nullptr;
1118 worker.wakeup_fd->next = pollset->local_wakeup_cache;
1119 pollset->local_wakeup_cache =
worker.wakeup_fd;
1121 if (pollset->shutting_down) {
1122 if (pollset_has_workers(pollset)) {
1124 }
else if (!pollset->called_shutdown && !pollset_has_observers(pollset)) {
1125 pollset->called_shutdown = 1;
1127 finish_shutdown(pollset);
1136 if (worker_hdl) *worker_hdl =
nullptr;
1143 pollset->shutting_down = 1;
1144 pollset->shutdown_done =
closure;
1145 (void)
pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
1146 if (!pollset->called_shutdown && !pollset_has_observers(pollset)) {
1147 pollset->called_shutdown = 1;
1148 finish_shutdown(pollset);
1156 if (
n < 0)
return 0;
1157 if (
n > INT_MAX)
return -1;
1158 return static_cast<int>(
n);
1175 for (
i = 0;
i < pollset_set->fd_count;
i++) {
1176 GRPC_FD_UNREF(pollset_set->fds[
i],
"pollset_set");
1178 for (
i = 0;
i < pollset_set->pollset_count;
i++) {
1181 pollset->pollset_set_count--;
1183 if (pollset->shutting_down && !pollset->called_shutdown &&
1184 !pollset_has_observers(pollset)) {
1185 pollset->called_shutdown = 1;
1187 finish_shutdown(pollset);
1193 gpr_free(pollset_set->pollset_sets);
1202 pollset->pollset_set_count++;
1205 if (pollset_set->pollset_count == pollset_set->pollset_capacity) {
1206 pollset_set->pollset_capacity =
1207 std::max(
size_t(8), 2 * pollset_set->pollset_capacity);
1209 pollset_set->pollsets,
1210 pollset_set->pollset_capacity *
sizeof(*pollset_set->pollsets)));
1212 pollset_set->pollsets[pollset_set->pollset_count++] = pollset;
1213 for (
i = 0, j = 0;
i < pollset_set->fd_count;
i++) {
1214 if (fd_is_orphaned(pollset_set->fds[
i])) {
1215 GRPC_FD_UNREF(pollset_set->fds[
i],
"pollset_set");
1217 pollset_add_fd(pollset, pollset_set->fds[
i]);
1218 pollset_set->fds[
j++] = pollset_set->fds[
i];
1221 pollset_set->fd_count =
j;
1229 for (
i = 0;
i < pollset_set->pollset_count;
i++) {
1230 if (pollset_set->pollsets[
i] == pollset) {
1231 pollset_set->pollset_count--;
1233 pollset_set->pollsets[pollset_set->pollset_count]);
1239 pollset->pollset_set_count--;
1241 if (pollset->shutting_down && !pollset->called_shutdown &&
1242 !pollset_has_observers(pollset)) {
1243 pollset->called_shutdown = 1;
1245 finish_shutdown(pollset);
1255 if (bag->pollset_set_count == bag->pollset_set_capacity) {
1256 bag->pollset_set_capacity =
1257 std::max(
size_t(8), 2 * bag->pollset_set_capacity);
1260 bag->pollset_set_capacity *
sizeof(*bag->pollset_sets)));
1262 bag->pollset_sets[bag->pollset_set_count++] = item;
1263 for (
i = 0, j = 0;
i < bag->fd_count;
i++) {
1264 if (fd_is_orphaned(bag->fds[
i])) {
1265 GRPC_FD_UNREF(bag->fds[
i],
"pollset_set");
1267 pollset_set_add_fd(item, bag->fds[
i]);
1268 bag->fds[
j++] = bag->fds[
i];
1279 for (
i = 0;
i < bag->pollset_set_count;
i++) {
1280 if (bag->pollset_sets[
i] == item) {
1281 bag->pollset_set_count--;
1283 bag->pollset_sets[bag->pollset_set_count]);
1293 if (pollset_set->fd_count == pollset_set->fd_capacity) {
1294 pollset_set->fd_capacity =
1295 std::max(
size_t(8), 2 * pollset_set->fd_capacity);
1296 pollset_set->fds =
static_cast<grpc_fd**
>(
1298 pollset_set->fd_capacity *
sizeof(*pollset_set->fds)));
1300 GRPC_FD_REF(fd,
"pollset_set");
1301 pollset_set->fds[pollset_set->fd_count++] = fd;
1302 for (
i = 0;
i < pollset_set->pollset_count;
i++) {
1303 pollset_add_fd(pollset_set->pollsets[
i], fd);
1305 for (
i = 0;
i < pollset_set->pollset_set_count;
i++) {
1306 pollset_set_add_fd(pollset_set->pollset_sets[
i], fd);
1314 for (
i = 0;
i < pollset_set->fd_count;
i++) {
1315 if (pollset_set->fds[
i] == fd) {
1316 pollset_set->fd_count--;
1317 std::swap(pollset_set->fds[
i], pollset_set->fds[pollset_set->fd_count]);
1318 GRPC_FD_UNREF(fd,
"pollset_set");
1322 for (
i = 0;
i < pollset_set->pollset_set_count;
i++) {
1323 pollset_set_del_fd(pollset_set->pollset_sets[
i], fd);
1332 static bool is_any_background_poller_thread(
void) {
return false; }
1334 static void shutdown_background_closure(
void) {}
1336 static bool add_closure_to_background_poller(
grpc_closure* ,
1344 static void reset_event_manager_on_fork() {
1346 while (fork_fd_list_head !=
nullptr) {
1347 if (fork_fd_list_head->fd !=
nullptr) {
1348 if (!fork_fd_list_head->fd->closed) {
1349 close(fork_fd_list_head->fd->fd);
1351 fork_fd_list_head->fd->fd = -1;
1353 close(fork_fd_list_head->cached_wakeup_fd->fd.read_fd);
1354 fork_fd_list_head->cached_wakeup_fd->fd.read_fd = -1;
1355 close(fork_fd_list_head->cached_wakeup_fd->fd.write_fd);
1356 fork_fd_list_head->cached_wakeup_fd->fd.write_fd = -1;
1358 fork_fd_list_head = fork_fd_list_head->next;
1388 pollset_set_destroy,
1389 pollset_set_add_pollset,
1390 pollset_set_del_pollset,
1391 pollset_set_add_pollset_set,
1392 pollset_set_del_pollset_set,
1396 is_any_background_poller_thread,
1408 track_fds_for_fork =
true;
1411 reset_event_manager_on_fork);
1416 shutdown_background_closure,
1418 add_closure_to_background_poller,
1425 int phony_poll(
struct pollfd fds[], nfds_t nfds,
int timeout) {
1427 return real_poll_function(fds, nfds, 0);
1429 gpr_log(
GPR_ERROR,
"Attempted a blocking poll when declared non-polling.");
1439 v.check_engine_available = [](
bool explicit_request) {
1440 if (!explicit_request)
return false;
1450 v.init_engine = []() {};
1451 v.shutdown_engine = []() {};