linux-inotify.c
Go to the documentation of this file.
1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2  * Permission is hereby granted, free of charge, to any person obtaining a copy
3  * of this software and associated documentation files (the "Software"), to
4  * deal in the Software without restriction, including without limitation the
5  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
6  * sell copies of the Software, and to permit persons to whom the Software is
7  * furnished to do so, subject to the following conditions:
8  *
9  * The above copyright notice and this permission notice shall be included in
10  * all copies or substantial portions of the Software.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
16  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
18  * IN THE SOFTWARE.
19  */
20 
21 #include "uv.h"
22 #include "uv/tree.h"
23 #include "internal.h"
24 
25 #include <stdint.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <string.h>
29 #include <assert.h>
30 #include <errno.h>
31 
32 #include <sys/inotify.h>
33 #include <sys/types.h>
34 #include <unistd.h>
35 
36 struct watcher_list {
37  RB_ENTRY(watcher_list) entry;
38  QUEUE watchers;
39  int iterating;
40  char* path;
41  int wd;
42 };
43 
44 struct watcher_root {
46 };
47 #define CAST(p) ((struct watcher_root*)(p))
48 
49 
50 static int compare_watchers(const struct watcher_list* a,
51  const struct watcher_list* b) {
52  if (a->wd < b->wd) return -1;
53  if (a->wd > b->wd) return 1;
54  return 0;
55 }
56 
57 
59 
60 
61 static void uv__inotify_read(uv_loop_t* loop,
62  uv__io_t* w,
63  unsigned int revents);
64 
65 static void maybe_free_watcher_list(struct watcher_list* w,
66  uv_loop_t* loop);
67 
68 static int init_inotify(uv_loop_t* loop) {
69  int fd;
70 
71  if (loop->inotify_fd != -1)
72  return 0;
73 
74  fd = inotify_init1(IN_NONBLOCK | IN_CLOEXEC);
75  if (fd < 0)
76  return UV__ERR(errno);
77 
78  loop->inotify_fd = fd;
79  uv__io_init(&loop->inotify_read_watcher, uv__inotify_read, loop->inotify_fd);
80  uv__io_start(loop, &loop->inotify_read_watcher, POLLIN);
81 
82  return 0;
83 }
84 
85 
86 int uv__inotify_fork(uv_loop_t* loop, void* old_watchers) {
87  /* Open the inotify_fd, and re-arm all the inotify watchers. */
88  int err;
89  struct watcher_list* tmp_watcher_list_iter;
90  struct watcher_list* watcher_list;
91  struct watcher_list tmp_watcher_list;
92  QUEUE queue;
93  QUEUE* q;
95  char* tmp_path;
96 
97  if (old_watchers != NULL) {
98  /* We must restore the old watcher list to be able to close items
99  * out of it.
100  */
101  loop->inotify_watchers = old_watchers;
102 
103  QUEUE_INIT(&tmp_watcher_list.watchers);
104  /* Note that the queue we use is shared with the start and stop()
105  * functions, making QUEUE_FOREACH unsafe to use. So we use the
106  * QUEUE_MOVE trick to safely iterate. Also don't free the watcher
107  * list until we're done iterating. c.f. uv__inotify_read.
108  */
110  CAST(&old_watchers), tmp_watcher_list_iter) {
111  watcher_list->iterating = 1;
112  QUEUE_MOVE(&watcher_list->watchers, &queue);
113  while (!QUEUE_EMPTY(&queue)) {
114  q = QUEUE_HEAD(&queue);
115  handle = QUEUE_DATA(q, uv_fs_event_t, watchers);
116  /* It's critical to keep a copy of path here, because it
117  * will be set to NULL by stop() and then deallocated by
118  * maybe_free_watcher_list
119  */
120  tmp_path = uv__strdup(handle->path);
121  assert(tmp_path != NULL);
122  QUEUE_REMOVE(q);
123  QUEUE_INSERT_TAIL(&watcher_list->watchers, q);
125 
126  QUEUE_INSERT_TAIL(&tmp_watcher_list.watchers, &handle->watchers);
127  handle->path = tmp_path;
128  }
129  watcher_list->iterating = 0;
131  }
132 
133  QUEUE_MOVE(&tmp_watcher_list.watchers, &queue);
134  while (!QUEUE_EMPTY(&queue)) {
135  q = QUEUE_HEAD(&queue);
136  QUEUE_REMOVE(q);
137  handle = QUEUE_DATA(q, uv_fs_event_t, watchers);
138  tmp_path = handle->path;
139  handle->path = NULL;
140  err = uv_fs_event_start(handle, handle->cb, tmp_path, 0);
141  uv__free(tmp_path);
142  if (err)
143  return err;
144  }
145  }
146 
147  return 0;
148 }
149 
150 
151 static struct watcher_list* find_watcher(uv_loop_t* loop, int wd) {
152  struct watcher_list w;
153  w.wd = wd;
154  return RB_FIND(watcher_root, CAST(&loop->inotify_watchers), &w);
155 }
156 
158  /* if the watcher_list->watchers is being iterated over, we can't free it. */
159  if ((!w->iterating) && QUEUE_EMPTY(&w->watchers)) {
160  /* No watchers left for this path. Clean up. */
161  RB_REMOVE(watcher_root, CAST(&loop->inotify_watchers), w);
162  inotify_rm_watch(loop->inotify_fd, w->wd);
163  uv__free(w);
164  }
165 }
166 
168  uv__io_t* dummy,
169  unsigned int events) {
170  const struct inotify_event* e;
171  struct watcher_list* w;
172  uv_fs_event_t* h;
173  QUEUE queue;
174  QUEUE* q;
175  const char* path;
176  ssize_t size;
177  const char *p;
178  /* needs to be large enough for sizeof(inotify_event) + strlen(path) */
179  char buf[4096];
180 
181  while (1) {
182  do
183  size = read(loop->inotify_fd, buf, sizeof(buf));
184  while (size == -1 && errno == EINTR);
185 
186  if (size == -1) {
187  assert(errno == EAGAIN || errno == EWOULDBLOCK);
188  break;
189  }
190 
191  assert(size > 0); /* pre-2.6.21 thing, size=0 == read buffer too small */
192 
193  /* Now we have one or more inotify_event structs. */
194  for (p = buf; p < buf + size; p += sizeof(*e) + e->len) {
195  e = (const struct inotify_event*) p;
196 
197  events = 0;
198  if (e->mask & (IN_ATTRIB|IN_MODIFY))
199  events |= UV_CHANGE;
200  if (e->mask & ~(IN_ATTRIB|IN_MODIFY))
201  events |= UV_RENAME;
202 
203  w = find_watcher(loop, e->wd);
204  if (w == NULL)
205  continue; /* Stale event, no watchers left. */
206 
207  /* inotify does not return the filename when monitoring a single file
208  * for modifications. Repurpose the filename for API compatibility.
209  * I'm not convinced this is a good thing, maybe it should go.
210  */
211  path = e->len ? (const char*) (e + 1) : uv__basename_r(w->path);
212 
213  /* We're about to iterate over the queue and call user's callbacks.
214  * What can go wrong?
215  * A callback could call uv_fs_event_stop()
216  * and the queue can change under our feet.
217  * So, we use QUEUE_MOVE() trick to safely iterate over the queue.
218  * And we don't free the watcher_list until we're done iterating.
219  *
220  * First,
221  * tell uv_fs_event_stop() (that could be called from a user's callback)
222  * not to free watcher_list.
223  */
224  w->iterating = 1;
225  QUEUE_MOVE(&w->watchers, &queue);
226  while (!QUEUE_EMPTY(&queue)) {
227  q = QUEUE_HEAD(&queue);
228  h = QUEUE_DATA(q, uv_fs_event_t, watchers);
229 
230  QUEUE_REMOVE(q);
231  QUEUE_INSERT_TAIL(&w->watchers, q);
232 
233  h->cb(h, path, events, 0);
234  }
235  /* done iterating, time to (maybe) free empty watcher_list */
236  w->iterating = 0;
238  }
239  }
240 }
241 
242 
244  uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
245  return 0;
246 }
247 
248 
251  const char* path,
252  unsigned int flags) {
253  struct watcher_list* w;
254  size_t len;
255  int events;
256  int err;
257  int wd;
258 
259  if (uv__is_active(handle))
260  return UV_EINVAL;
261 
262  err = init_inotify(handle->loop);
263  if (err)
264  return err;
265 
266  events = IN_ATTRIB
267  | IN_CREATE
268  | IN_MODIFY
269  | IN_DELETE
270  | IN_DELETE_SELF
271  | IN_MOVE_SELF
272  | IN_MOVED_FROM
273  | IN_MOVED_TO;
274 
275  wd = inotify_add_watch(handle->loop->inotify_fd, path, events);
276  if (wd == -1)
277  return UV__ERR(errno);
278 
279  w = find_watcher(handle->loop, wd);
280  if (w)
281  goto no_insert;
282 
283  len = strlen(path) + 1;
284  w = uv__malloc(sizeof(*w) + len);
285  if (w == NULL)
286  return UV_ENOMEM;
287 
288  w->wd = wd;
289  w->path = memcpy(w + 1, path, len);
290  QUEUE_INIT(&w->watchers);
291  w->iterating = 0;
292  RB_INSERT(watcher_root, CAST(&handle->loop->inotify_watchers), w);
293 
294 no_insert:
296  QUEUE_INSERT_TAIL(&w->watchers, &handle->watchers);
297  handle->path = w->path;
298  handle->cb = cb;
299  handle->wd = wd;
300 
301  return 0;
302 }
303 
304 
306  struct watcher_list* w;
307 
308  if (!uv__is_active(handle))
309  return 0;
310 
311  w = find_watcher(handle->loop, handle->wd);
312  assert(w != NULL);
313 
314  handle->wd = -1;
315  handle->path = NULL;
317  QUEUE_REMOVE(&handle->watchers);
318 
320 
321  return 0;
322 }
323 
324 
327 }
uv__is_active
#define uv__is_active(h)
Definition: uv-common.h:235
async_greeter_server_with_graceful_shutdown.loop
loop
Definition: async_greeter_server_with_graceful_shutdown.py:59
uv_fs_event_s
Definition: uv.h:1533
RB_REMOVE
#define RB_REMOVE(name, x, y)
Definition: tree.h:728
uv__malloc
void * uv__malloc(size_t size)
Definition: uv-common.c:75
string.h
buf
voidpf void * buf
Definition: bloaty/third_party/zlib/contrib/minizip/ioapi.h:136
uv_fs_event_start
int uv_fs_event_start(uv_fs_event_t *handle, uv_fs_event_cb cb, const char *path, unsigned int flags)
Definition: linux-inotify.c:249
error_ref_leak.err
err
Definition: error_ref_leak.py:35
QUEUE_HEAD
#define QUEUE_HEAD(q)
Definition: queue.h:42
check_documentation.path
path
Definition: check_documentation.py:57
a
int a
Definition: abseil-cpp/absl/container/internal/hash_policy_traits_test.cc:88
xds_manager.p
p
Definition: xds_manager.py:60
uv__io_init
void uv__io_init(uv__io_t *w, uv__io_cb cb, int fd)
Definition: unix/core.c:853
QUEUE_DATA
#define QUEUE_DATA(ptr, type, field)
Definition: queue.h:30
CAST
#define CAST(p)
Definition: linux-inotify.c:47
watcher_root
Definition: linux-inotify.c:44
QUEUE_MOVE
#define QUEUE_MOVE(h, n)
Definition: queue.h:72
QUEUE_INIT
#define QUEUE_INIT(q)
Definition: queue.h:45
memcpy
memcpy(mem, inblock.get(), min(CONTAINING_RECORD(inblock.get(), MEMBLOCK, data) ->size, size))
ssize_t
intptr_t ssize_t
Definition: win.h:27
queue
Definition: sync_test.cc:39
uv__inotify_fork
int uv__inotify_fork(uv_loop_t *loop, void *old_watchers)
Definition: linux-inotify.c:86
uv__strdup
char * uv__strdup(const char *s)
Definition: uv-common.c:55
uv__io_start
void uv__io_start(uv_loop_t *loop, uv__io_t *w, unsigned int events)
Definition: unix/core.c:870
RB_FOREACH_SAFE
#define RB_FOREACH_SAFE(x, name, head, y)
Definition: tree.h:746
init_inotify
static int init_inotify(uv_loop_t *loop)
Definition: linux-inotify.c:68
UV_RENAME
@ UV_RENAME
Definition: uv.h:1528
uv__free
void uv__free(void *ptr)
Definition: uv-common.c:81
watcher_list
Definition: linux-inotify.c:36
UV__ERR
#define UV__ERR(x)
Definition: errno.h:29
RB_INSERT
#define RB_INSERT(name, x, y)
Definition: tree.h:727
uv__fs_event_close
void uv__fs_event_close(uv_fs_event_t *handle)
Definition: linux-inotify.c:325
b
uint64_t b
Definition: abseil-cpp/absl/container/internal/layout_test.cc:53
QUEUE_REMOVE
#define QUEUE_REMOVE(q)
Definition: queue.h:101
uv__handle_init
#define uv__handle_init(loop_, h, type_)
Definition: uv-common.h:284
stdint.h
QUEUE_EMPTY
#define QUEUE_EMPTY(q)
Definition: queue.h:39
UV_CHANGE
@ UV_CHANGE
Definition: uv.h:1529
tree.h
uv.h
queue
struct queue queue
internal.h
read
int read(izstream &zs, T *x, Items items)
Definition: bloaty/third_party/zlib/contrib/iostream2/zstream.h:115
compare_watchers
static int compare_watchers(const struct watcher_list *a, const struct watcher_list *b)
Definition: linux-inotify.c:50
absl::flags_internal
Definition: abseil-cpp/absl/flags/commandlineflag.h:40
find_watcher
static struct watcher_list * find_watcher(uv_loop_t *loop, int wd)
Definition: linux-inotify.c:151
watcher_root::rbh_root
struct watcher_list * rbh_root
Definition: linux-inotify.c:45
uv_fs_event_cb
void(* uv_fs_event_cb)(uv_fs_event_t *handle, const char *filename, int events, int status)
Definition: uv.h:366
absl::ABSL_NAMESPACE_BEGIN::dummy
int dummy
Definition: function_type_benchmark.cc:28
handle
static csh handle
Definition: test_arm_regression.c:16
RB_ENTRY
#define RB_ENTRY(type)
Definition: tree.h:310
RB_FIND
#define RB_FIND(name, x, y)
Definition: tree.h:729
uv_handle_s
Definition: uv.h:441
uv_loop_s
Definition: uv.h:1767
len
int len
Definition: abseil-cpp/absl/base/internal/low_level_alloc_test.cc:46
uv__handle_start
#define uv__handle_start(h)
Definition: uv-common.h:241
size
voidpf void uLong size
Definition: bloaty/third_party/zlib/contrib/minizip/ioapi.h:136
uv__io_s
Definition: unix.h:94
maybe_free_watcher_list
static void maybe_free_watcher_list(struct watcher_list *w, uv_loop_t *loop)
Definition: linux-inotify.c:157
uv_fs_event_stop
int uv_fs_event_stop(uv_fs_event_t *handle)
Definition: linux-inotify.c:305
uv__inotify_read
static void uv__inotify_read(uv_loop_t *loop, uv__io_t *w, unsigned int revents)
Definition: linux-inotify.c:167
uv__handle_stop
#define uv__handle_stop(h)
Definition: uv-common.h:249
RB_GENERATE_STATIC
RB_GENERATE_STATIC(uv__signal_tree_s, uv_signal_s, tree_entry, uv__signal_compare)
Definition: unix/signal.c:59
QUEUE_INSERT_TAIL
#define QUEUE_INSERT_TAIL(h, q)
Definition: queue.h:92
errno.h
cb
OPENSSL_EXPORT pem_password_cb * cb
Definition: pem.h:351
QUEUE
void * QUEUE[2]
Definition: queue.h:21
uv_fs_event_init
int uv_fs_event_init(uv_loop_t *loop, uv_fs_event_t *handle)
Definition: linux-inotify.c:243


grpc
Author(s):
autogenerated on Thu Mar 13 2025 03:00:28