generic_mtrie_impl.hpp
Go to the documentation of this file.
1 /* SPDX-License-Identifier: MPL-2.0 */
2 
3 #ifndef __ZMQ_GENERIC_MTRIE_IMPL_HPP_INCLUDED__
4 #define __ZMQ_GENERIC_MTRIE_IMPL_HPP_INCLUDED__
5 
6 
7 #include <stdlib.h>
8 
9 #include <new>
10 #include <algorithm>
11 #include <list>
12 
13 #include "err.hpp"
14 #include "macros.hpp"
15 #include "generic_mtrie.hpp"
16 
17 namespace zmq
18 {
19 template <typename T>
21  _pipes (0), _num_prefixes (0), _min (0), _count (0), _live_nodes (0)
22 {
23 }
24 
25 template <typename T> generic_mtrie_t<T>::~generic_mtrie_t ()
26 {
27  LIBZMQ_DELETE (_pipes);
28 
29  if (_count == 1) {
30  zmq_assert (_next.node);
31  LIBZMQ_DELETE (_next.node);
32  } else if (_count > 1) {
33  for (unsigned short i = 0; i != _count; ++i) {
34  LIBZMQ_DELETE (_next.table[i]);
35  }
36  free (_next.table);
37  }
38 }
39 
40 template <typename T>
41 bool generic_mtrie_t<T>::add (prefix_t prefix_, size_t size_, value_t *pipe_)
42 {
44 
45  while (size_) {
46  const unsigned char c = *prefix_;
47 
48  if (c < it->_min || c >= it->_min + it->_count) {
49  // The character is out of range of currently handled
50  // characters. We have to extend the table.
51  if (!it->_count) {
52  it->_min = c;
53  it->_count = 1;
54  it->_next.node = NULL;
55  } else if (it->_count == 1) {
56  const unsigned char oldc = it->_min;
57  generic_mtrie_t *oldp = it->_next.node;
58  it->_count = (it->_min < c ? c - it->_min : it->_min - c) + 1;
59  it->_next.table = static_cast<generic_mtrie_t **> (
60  malloc (sizeof (generic_mtrie_t *) * it->_count));
61  alloc_assert (it->_next.table);
62  for (unsigned short i = 0; i != it->_count; ++i)
63  it->_next.table[i] = 0;
64  it->_min = std::min (it->_min, c);
65  it->_next.table[oldc - it->_min] = oldp;
66  } else if (it->_min < c) {
67  // The new character is above the current character range.
68  const unsigned short old_count = it->_count;
69  it->_count = c - it->_min + 1;
70  it->_next.table = static_cast<generic_mtrie_t **> (realloc (
71  it->_next.table, sizeof (generic_mtrie_t *) * it->_count));
72  alloc_assert (it->_next.table);
73  for (unsigned short i = old_count; i != it->_count; i++)
74  it->_next.table[i] = NULL;
75  } else {
76  // The new character is below the current character range.
77  const unsigned short old_count = it->_count;
78  it->_count = (it->_min + old_count) - c;
79  it->_next.table = static_cast<generic_mtrie_t **> (realloc (
80  it->_next.table, sizeof (generic_mtrie_t *) * it->_count));
81  alloc_assert (it->_next.table);
82  memmove (it->_next.table + it->_min - c, it->_next.table,
83  old_count * sizeof (generic_mtrie_t *));
84  for (unsigned short i = 0; i != it->_min - c; i++)
85  it->_next.table[i] = NULL;
86  it->_min = c;
87  }
88  }
89 
90  // If next node does not exist, create one.
91  if (it->_count == 1) {
92  if (!it->_next.node) {
93  it->_next.node = new (std::nothrow) generic_mtrie_t;
94  alloc_assert (it->_next.node);
95  ++(it->_live_nodes);
96  }
97 
98  ++prefix_;
99  --size_;
100  it = it->_next.node;
101  } else {
102  if (!it->_next.table[c - it->_min]) {
103  it->_next.table[c - it->_min] =
104  new (std::nothrow) generic_mtrie_t;
105  alloc_assert (it->_next.table[c - it->_min]);
106  ++(it->_live_nodes);
107  }
108 
109  ++prefix_;
110  --size_;
111  it = it->_next.table[c - it->_min];
112  }
113  }
114 
115  // We are at the node corresponding to the prefix. We are done.
116  const bool result = !it->_pipes;
117  if (!it->_pipes) {
118  it->_pipes = new (std::nothrow) pipes_t;
119  alloc_assert (it->_pipes);
120 
121  _num_prefixes.add (1);
122  }
123  it->_pipes->insert (pipe_);
124 
125  return result;
126 }
127 
128 template <typename T>
129 template <typename Arg>
131  void (*func_) (prefix_t data_,
132  size_t size_,
133  Arg arg_),
134  Arg arg_,
135  bool call_on_uniq_)
136 {
137  // This used to be implemented as a non-tail recursive traversal of the trie,
138  // which means remote clients controlled the depth of the recursion and the
139  // stack size.
140  // To simulate the non-tail recursion, with post-recursion changes depending on
141  // the result of the recursive call, a stack is used to re-visit the same node
142  // and operate on it again after children have been visited.
143  // A boolean is used to record whether the node had already been visited and to
144  // determine if the pre- or post- children visit actions have to be taken.
145  // In the case of a node with (N > 1) children, the node has to be re-visited
146  // N times, in the correct order after each child visit.
147  std::list<struct iter> stack;
148  unsigned char *buff = NULL;
149  size_t maxbuffsize = 0;
150  struct iter it = {this, NULL, NULL, 0, 0, 0, 0, false};
151  stack.push_back (it);
152 
153  while (!stack.empty ()) {
154  it = stack.back ();
155  stack.pop_back ();
156 
157  if (!it.processed_for_removal) {
158  // Remove the subscription from this node.
159  if (it.node->_pipes && it.node->_pipes->erase (pipe_)) {
160  if (!call_on_uniq_ || it.node->_pipes->empty ()) {
161  func_ (buff, it.size, arg_);
162  }
163 
164  if (it.node->_pipes->empty ()) {
165  LIBZMQ_DELETE (it.node->_pipes);
166  }
167  }
168 
169  // Adjust the buffer.
170  if (it.size >= maxbuffsize) {
171  maxbuffsize = it.size + 256;
172  buff =
173  static_cast<unsigned char *> (realloc (buff, maxbuffsize));
174  alloc_assert (buff);
175  }
176 
177  switch (it.node->_count) {
178  case 0:
179  // If there are no subnodes in the trie, we are done with this node
180  // pre-processing.
181  break;
182  case 1: {
183  // If there's one subnode (optimisation).
184 
185  buff[it.size] = it.node->_min;
186  // Mark this node as pre-processed and push it, so that the next
187  // visit after the operation on the child can do the removals.
188  it.processed_for_removal = true;
189  stack.push_back (it);
190  struct iter next = {it.node->_next.node,
191  NULL,
192  NULL,
193  ++it.size,
194  0,
195  0,
196  0,
197  false};
198  stack.push_back (next);
199  break;
200  }
201  default: {
202  // If there are multiple subnodes.
203  // When first visiting this node, initialize the new_min/max parameters
204  // which will then be used after each child has been processed, on the
205  // post-children iterations.
206  if (it.current_child == 0) {
207  // New min non-null character in the node table after the removal
208  it.new_min = it.node->_min + it.node->_count - 1;
209  // New max non-null character in the node table after the removal
210  it.new_max = it.node->_min;
211  }
212 
213  // Mark this node as pre-processed and push it, so that the next
214  // visit after the operation on the child can do the removals.
215  buff[it.size] = it.node->_min + it.current_child;
216  it.processed_for_removal = true;
217  stack.push_back (it);
218  if (it.node->_next.table[it.current_child]) {
219  struct iter next = {
220  it.node->_next.table[it.current_child],
221  NULL,
222  NULL,
223  it.size + 1,
224  0,
225  0,
226  0,
227  false};
228  stack.push_back (next);
229  }
230  }
231  }
232  } else {
233  // Reset back for the next time, in case this node doesn't get deleted.
234  // This is done unconditionally, unlike when setting this variable to true.
235  it.processed_for_removal = false;
236 
237  switch (it.node->_count) {
238  case 0:
239  // If there are no subnodes in the trie, we are done with this node
240  // post-processing.
241  break;
242  case 1:
243  // If there's one subnode (optimisation).
244 
245  // Prune the node if it was made redundant by the removal
246  if (it.node->_next.node->is_redundant ()) {
247  LIBZMQ_DELETE (it.node->_next.node);
248  it.node->_count = 0;
249  --it.node->_live_nodes;
250  zmq_assert (it.node->_live_nodes == 0);
251  }
252  break;
253  default:
254  // If there are multiple subnodes.
255  {
256  if (it.node->_next.table[it.current_child]) {
257  // Prune redundant nodes from the mtrie
258  if (it.node->_next.table[it.current_child]
259  ->is_redundant ()) {
260  LIBZMQ_DELETE (
261  it.node->_next.table[it.current_child]);
262 
263  zmq_assert (it.node->_live_nodes > 0);
264  --it.node->_live_nodes;
265  } else {
266  // The node is not redundant, so it's a candidate for being
267  // the new min/max node.
268  //
269  // We loop through the node array from left to right, so the
270  // first non-null, non-redundant node encountered is the new
271  // minimum index. Conversely, the last non-redundant, non-null
272  // node encountered is the new maximum index.
273  if (it.current_child + it.node->_min
274  < it.new_min)
275  it.new_min =
276  it.current_child + it.node->_min;
277  if (it.current_child + it.node->_min
278  > it.new_max)
279  it.new_max =
280  it.current_child + it.node->_min;
281  }
282  }
283 
284  // If there are more children to visit, push again the current
285  // node, so that pre-processing can happen on the next child.
286  // If we are done, reset the child index so that the ::rm is
287  // fully idempotent.
288  ++it.current_child;
289  if (it.current_child >= it.node->_count)
290  it.current_child = 0;
291  else {
292  stack.push_back (it);
293  continue;
294  }
295 
296  // All children have been visited and removed if needed, and
297  // all pre- and post-visit operations have been carried.
298  // Resize/free the node table if needed.
299  zmq_assert (it.node->_count > 1);
300 
301  // Free the node table if it's no longer used.
302  switch (it.node->_live_nodes) {
303  case 0:
304  free (it.node->_next.table);
305  it.node->_next.table = NULL;
306  it.node->_count = 0;
307  break;
308  case 1:
309  // Compact the node table if possible
310 
311  // If there's only one live node in the table we can
312  // switch to using the more compact single-node
313  // representation
314  zmq_assert (it.new_min == it.new_max);
315  zmq_assert (it.new_min >= it.node->_min);
316  zmq_assert (it.new_min
317  < it.node->_min + it.node->_count);
318  {
319  generic_mtrie_t *node =
320  it.node->_next
321  .table[it.new_min - it.node->_min];
322  zmq_assert (node);
323  free (it.node->_next.table);
324  it.node->_next.node = node;
325  }
326  it.node->_count = 1;
327  it.node->_min = it.new_min;
328  break;
329  default:
330  if (it.new_min > it.node->_min
331  || it.new_max < it.node->_min
332  + it.node->_count - 1) {
333  zmq_assert (it.new_max - it.new_min + 1
334  > 1);
335 
336  generic_mtrie_t **old_table =
337  it.node->_next.table;
338  zmq_assert (it.new_min > it.node->_min
339  || it.new_max
340  < it.node->_min
341  + it.node->_count - 1);
342  zmq_assert (it.new_min >= it.node->_min);
343  zmq_assert (it.new_max
344  <= it.node->_min
345  + it.node->_count - 1);
346  zmq_assert (it.new_max - it.new_min + 1
347  < it.node->_count);
348 
349  it.node->_count =
350  it.new_max - it.new_min + 1;
351  it.node->_next.table =
352  static_cast<generic_mtrie_t **> (
353  malloc (sizeof (generic_mtrie_t *)
354  * it.node->_count));
355  alloc_assert (it.node->_next.table);
356 
357  memmove (it.node->_next.table,
358  old_table
359  + (it.new_min - it.node->_min),
360  sizeof (generic_mtrie_t *)
361  * it.node->_count);
362  free (old_table);
363 
364  it.node->_min = it.new_min;
365  }
366  }
367  }
368  }
369  }
370  }
371 
372  free (buff);
373 }
374 
375 template <typename T>
377 generic_mtrie_t<T>::rm (prefix_t prefix_, size_t size_, value_t *pipe_)
378 {
379  // This used to be implemented as a non-tail recursive traversal of the trie,
380  // which means remote clients controlled the depth of the recursion and the
381  // stack size.
382  // To simulate the non-tail recursion, with post-recursion changes depending on
383  // the result of the recursive call, a stack is used to re-visit the same node
384  // and operate on it again after children have been visited.
385  // A boolean is used to record whether the node had already been visited and to
386  // determine if the pre- or post- children visit actions have to be taken.
387  rm_result ret = not_found;
388  std::list<struct iter> stack;
389  struct iter it = {this, NULL, prefix_, size_, 0, 0, 0, false};
390  stack.push_back (it);
391 
392  while (!stack.empty ()) {
393  it = stack.back ();
394  stack.pop_back ();
395 
396  if (!it.processed_for_removal) {
397  if (!it.size) {
398  if (!it.node->_pipes) {
399  ret = not_found;
400  continue;
401  }
402 
403  typename pipes_t::size_type erased =
404  it.node->_pipes->erase (pipe_);
405  if (it.node->_pipes->empty ()) {
406  zmq_assert (erased == 1);
407  LIBZMQ_DELETE (it.node->_pipes);
408  ret = last_value_removed;
409  continue;
410  }
411 
412  ret = (erased == 1) ? values_remain : not_found;
413  continue;
414  }
415 
416  it.current_child = *it.prefix;
417  if (!it.node->_count || it.current_child < it.node->_min
418  || it.current_child >= it.node->_min + it.node->_count) {
419  ret = not_found;
420  continue;
421  }
422 
423  it.next_node =
424  it.node->_count == 1
425  ? it.node->_next.node
426  : it.node->_next.table[it.current_child - it.node->_min];
427  if (!it.next_node) {
428  ret = not_found;
429  continue;
430  }
431 
432  it.processed_for_removal = true;
433  stack.push_back (it);
434  struct iter next = {
435  it.next_node, NULL, it.prefix + 1, it.size - 1, 0, 0, 0, false};
436  stack.push_back (next);
437  } else {
438  it.processed_for_removal = false;
439 
440  if (it.next_node->is_redundant ()) {
441  LIBZMQ_DELETE (it.next_node);
442  zmq_assert (it.node->_count > 0);
443 
444  if (it.node->_count == 1) {
445  it.node->_next.node = NULL;
446  it.node->_count = 0;
447  --it.node->_live_nodes;
448  zmq_assert (it.node->_live_nodes == 0);
449  } else {
450  it.node->_next.table[it.current_child - it.node->_min] = 0;
451  zmq_assert (it.node->_live_nodes > 1);
452  --it.node->_live_nodes;
453 
454  // Compact the table if possible
455  if (it.node->_live_nodes == 1) {
456  // If there's only one live node in the table we can
457  // switch to using the more compact single-node
458  // representation
459  unsigned short i;
460  for (i = 0; i < it.node->_count; ++i)
461  if (it.node->_next.table[i])
462  break;
463 
464  zmq_assert (i < it.node->_count);
465  it.node->_min += i;
466  it.node->_count = 1;
467  generic_mtrie_t *oldp = it.node->_next.table[i];
468  free (it.node->_next.table);
469  it.node->_next.table = NULL;
470  it.node->_next.node = oldp;
471  } else if (it.current_child == it.node->_min) {
472  // We can compact the table "from the left"
473  unsigned short i;
474  for (i = 1; i < it.node->_count; ++i)
475  if (it.node->_next.table[i])
476  break;
477 
478  zmq_assert (i < it.node->_count);
479  it.node->_min += i;
480  it.node->_count -= i;
481  generic_mtrie_t **old_table = it.node->_next.table;
482  it.node->_next.table =
483  static_cast<generic_mtrie_t **> (malloc (
484  sizeof (generic_mtrie_t *) * it.node->_count));
485  alloc_assert (it.node->_next.table);
486  memmove (it.node->_next.table, old_table + i,
487  sizeof (generic_mtrie_t *) * it.node->_count);
488  free (old_table);
489  } else if (it.current_child
490  == it.node->_min + it.node->_count - 1) {
491  // We can compact the table "from the right"
492  unsigned short i;
493  for (i = 1; i < it.node->_count; ++i)
494  if (it.node->_next.table[it.node->_count - 1 - i])
495  break;
496 
497  zmq_assert (i < it.node->_count);
498  it.node->_count -= i;
499  generic_mtrie_t **old_table = it.node->_next.table;
500  it.node->_next.table =
501  static_cast<generic_mtrie_t **> (malloc (
502  sizeof (generic_mtrie_t *) * it.node->_count));
503  alloc_assert (it.node->_next.table);
504  memmove (it.node->_next.table, old_table,
505  sizeof (generic_mtrie_t *) * it.node->_count);
506  free (old_table);
507  }
508  }
509  }
510  }
511  }
512 
513  if (ret == last_value_removed) {
514  zmq_assert (_num_prefixes.get () > 0);
515  _num_prefixes.sub (1);
516  }
517 
518  return ret;
519 }
520 
521 template <typename T>
522 template <typename Arg>
524  size_t size_,
525  void (*func_) (value_t *pipe_, Arg arg_),
526  Arg arg_)
527 {
528  for (generic_mtrie_t *current = this; current; data_++, size_--) {
529  // Signal the pipes attached to this node.
530  if (current->_pipes) {
531  for (typename pipes_t::iterator it = current->_pipes->begin (),
532  end = current->_pipes->end ();
533  it != end; ++it) {
534  func_ (*it, arg_);
535  }
536  }
537 
538  // If we are at the end of the message, there's nothing more to match.
539  if (!size_)
540  break;
541 
542  // If there are no subnodes in the trie, return.
543  if (current->_count == 0)
544  break;
545 
546  if (current->_count == 1) {
547  // If there's one subnode (optimisation).
548  if (data_[0] != current->_min) {
549  break;
550  }
551  current = current->_next.node;
552  } else {
553  // If there are multiple subnodes.
554  if (data_[0] < current->_min
555  || data_[0] >= current->_min + current->_count) {
556  break;
557  }
558  current = current->_next.table[data_[0] - current->_min];
559  }
560  }
561 }
562 
563 template <typename T> bool generic_mtrie_t<T>::is_redundant () const
564 {
565  return !_pipes && _live_nodes == 0;
566 }
567 }
568 
569 
570 #endif
LIBZMQ_DELETE
#define LIBZMQ_DELETE(p_object)
Definition: macros.hpp:7
data_
StringPiece data_
Definition: bytestream_unittest.cc:60
generic_mtrie.hpp
end
GLuint GLuint end
Definition: glcorearb.h:2858
NULL
NULL
Definition: test_security_zap.cpp:405
rm
static bool rm(upb_table *t, lookupkey_t key, upb_value *val, upb_tabkey *removed, uint32_t hash, eqlfunc_t *eql)
Definition: php/ext/google/protobuf/upb.c:4850
zmq_assert
#define zmq_assert(x)
Definition: err.hpp:102
zmq
Definition: zmq.hpp:229
alloc_assert
#define alloc_assert(x)
Definition: err.hpp:146
macros.hpp
zmq::generic_mtrie_t::rm_result
rm_result
Definition: generic_mtrie.hpp:22
zmq::generic_mtrie_t::generic_mtrie_t
generic_mtrie_t()
Definition: generic_mtrie_impl.hpp:20
zmq::generic_mtrie_t< value_t >::value_t
value_t value_t
Definition: generic_mtrie.hpp:19
i
int i
Definition: gmock-matchers_test.cc:764
zmq::generic_mtrie_t< value_t >::pipes_t
std::set< value_t * > pipes_t
Definition: generic_mtrie.hpp:67
zmq::generic_mtrie_t
Definition: generic_mtrie.hpp:16
err.hpp
next
static size_t next(const upb_table *t, size_t i)
Definition: php/ext/google/protobuf/upb.c:4889
it
MapIter it
Definition: php/ext/google/protobuf/map.c:205
prefix_
std::string prefix_
Definition: src/google/protobuf/descriptor.cc:378


libaditof
Author(s):
autogenerated on Wed May 21 2025 02:06:51