00001
00002
00003
00004
00005
00006
00007
00008
00009
00010 #include <linux/etherdevice.h>
00011 #include <linux/list.h>
00012 #include <linux/random.h>
00013 #include <linux/slab.h>
00014 #include <linux/spinlock.h>
00015 #include <linux/string.h>
00016 #include <net/mac80211.h>
00017 #include "wme.h"
00018 #include "ieee80211_i.h"
00019 #include "mesh.h"
00020
00021 #ifdef CONFIG_MAC80211_VERBOSE_MPATH_DEBUG
00022 #define mpath_dbg(fmt, args...) printk(KERN_DEBUG fmt, ##args)
00023 #else
00024 #define mpath_dbg(fmt, args...) do { (void)(0); } while (0)
00025 #endif
00026
00027
00028 #define INIT_PATHS_SIZE_ORDER 2
00029
00030
00031 #define MEAN_CHAIN_LEN 2
00032
00033 #define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
00034 time_after(jiffies, mpath->exp_time) && \
00035 !(mpath->flags & MESH_PATH_FIXED))
00036
00037 struct mpath_node {
00038 struct hlist_node list;
00039 struct rcu_head rcu;
00040
00041
00042
00043 struct mesh_path *mpath;
00044 };
00045
00046 static struct mesh_table __rcu *mesh_paths;
00047 static struct mesh_table __rcu *mpp_paths;
00048
00049 int mesh_paths_generation;
00050
00051
00052
00053
00054
00055
00056
00057 static DEFINE_RWLOCK(pathtbl_resize_lock);
00058
00059
00060 static inline struct mesh_table *resize_dereference_mesh_paths(void)
00061 {
00062 return rcu_dereference_protected(mesh_paths,
00063 lockdep_is_held(&pathtbl_resize_lock));
00064 }
00065
00066 static inline struct mesh_table *resize_dereference_mpp_paths(void)
00067 {
00068 return rcu_dereference_protected(mpp_paths,
00069 lockdep_is_held(&pathtbl_resize_lock));
00070 }
00071
00072 static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath);
00073
00074
00075
00076
00077
00078
00079
00080 #define for_each_mesh_entry(tbl, p, node, i) \
00081 for (i = 0; i <= tbl->hash_mask; i++) \
00082 hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list)
00083
00084
00085 static struct mesh_table *mesh_table_alloc(int size_order)
00086 {
00087 int i;
00088 struct mesh_table *newtbl;
00089
00090 newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
00091 if (!newtbl)
00092 return NULL;
00093
00094 newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
00095 (1 << size_order), GFP_ATOMIC);
00096
00097 if (!newtbl->hash_buckets) {
00098 kfree(newtbl);
00099 return NULL;
00100 }
00101
00102 newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
00103 (1 << size_order), GFP_ATOMIC);
00104 if (!newtbl->hashwlock) {
00105 kfree(newtbl->hash_buckets);
00106 kfree(newtbl);
00107 return NULL;
00108 }
00109
00110 newtbl->size_order = size_order;
00111 newtbl->hash_mask = (1 << size_order) - 1;
00112 atomic_set(&newtbl->entries, 0);
00113 get_random_bytes(&newtbl->hash_rnd,
00114 sizeof(newtbl->hash_rnd));
00115 for (i = 0; i <= newtbl->hash_mask; i++)
00116 spin_lock_init(&newtbl->hashwlock[i]);
00117 spin_lock_init(&newtbl->gates_lock);
00118
00119 return newtbl;
00120 }
00121
00122 static void __mesh_table_free(struct mesh_table *tbl)
00123 {
00124 kfree(tbl->hash_buckets);
00125 kfree(tbl->hashwlock);
00126 kfree(tbl);
00127 }
00128
00129 static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
00130 {
00131 struct hlist_head *mesh_hash;
00132 struct hlist_node *p, *q;
00133 struct mpath_node *gate;
00134 int i;
00135
00136 mesh_hash = tbl->hash_buckets;
00137 for (i = 0; i <= tbl->hash_mask; i++) {
00138 spin_lock_bh(&tbl->hashwlock[i]);
00139 hlist_for_each_safe(p, q, &mesh_hash[i]) {
00140 tbl->free_node(p, free_leafs);
00141 atomic_dec(&tbl->entries);
00142 }
00143 spin_unlock_bh(&tbl->hashwlock[i]);
00144 }
00145 if (free_leafs) {
00146 spin_lock_bh(&tbl->gates_lock);
00147 hlist_for_each_entry_safe(gate, p, q,
00148 tbl->known_gates, list) {
00149 hlist_del(&gate->list);
00150 kfree(gate);
00151 }
00152 kfree(tbl->known_gates);
00153 spin_unlock_bh(&tbl->gates_lock);
00154 }
00155
00156 __mesh_table_free(tbl);
00157 }
00158
00159 static int mesh_table_grow(struct mesh_table *oldtbl,
00160 struct mesh_table *newtbl)
00161 {
00162 struct hlist_head *oldhash;
00163 struct hlist_node *p, *q;
00164 int i;
00165
00166 if (atomic_read(&oldtbl->entries)
00167 < oldtbl->mean_chain_len * (oldtbl->hash_mask + 1))
00168 return -EAGAIN;
00169
00170 newtbl->free_node = oldtbl->free_node;
00171 newtbl->mean_chain_len = oldtbl->mean_chain_len;
00172 newtbl->copy_node = oldtbl->copy_node;
00173 newtbl->known_gates = oldtbl->known_gates;
00174 atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
00175
00176 oldhash = oldtbl->hash_buckets;
00177 for (i = 0; i <= oldtbl->hash_mask; i++)
00178 hlist_for_each(p, &oldhash[i])
00179 if (oldtbl->copy_node(p, newtbl) < 0)
00180 goto errcopy;
00181
00182 return 0;
00183
00184 errcopy:
00185 for (i = 0; i <= newtbl->hash_mask; i++) {
00186 hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
00187 oldtbl->free_node(p, 0);
00188 }
00189 return -ENOMEM;
00190 }
00191
00192 static u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata,
00193 struct mesh_table *tbl)
00194 {
00195
00196 return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd)
00197 & tbl->hash_mask;
00198 }
00199
00200
00210 void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
00211 {
00212 struct sk_buff *skb;
00213 struct ieee80211_hdr *hdr;
00214 struct sk_buff_head tmpq;
00215 unsigned long flags;
00216 struct ieee80211_sub_if_data *sdata = mpath->sdata;
00217
00218 rcu_assign_pointer(mpath->next_hop, sta);
00219
00220 __skb_queue_head_init(&tmpq);
00221
00222 spin_lock_irqsave(&mpath->frame_queue.lock, flags);
00223
00224 while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
00225 hdr = (struct ieee80211_hdr *) skb->data;
00226 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
00227 skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, skb));
00228 ieee80211_set_qos_hdr(sdata, skb);
00229 __skb_queue_tail(&tmpq, skb);
00230 }
00231
00232 skb_queue_splice(&tmpq, &mpath->frame_queue);
00233 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
00234 }
00235
00236 static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
00237 struct mesh_path *gate_mpath)
00238 {
00239 struct ieee80211_hdr *hdr;
00240 struct ieee80211s_hdr *mshdr;
00241 int mesh_hdrlen, hdrlen;
00242 char *next_hop;
00243
00244 hdr = (struct ieee80211_hdr *) skb->data;
00245 hdrlen = ieee80211_hdrlen(hdr->frame_control);
00246 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
00247
00248 if (!(mshdr->flags & MESH_FLAGS_AE)) {
00249
00250 mesh_hdrlen = 6;
00251
00252
00253 skb_push(skb, 2 * ETH_ALEN);
00254 memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
00255
00256 hdr = (struct ieee80211_hdr *) skb->data;
00257
00258
00259
00260 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
00261 mshdr->flags = MESH_FLAGS_AE_A5_A6;
00262 memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
00263 memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
00264 }
00265
00266
00267 hdr = (struct ieee80211_hdr *) skb->data;
00268 rcu_read_lock();
00269 next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
00270 memcpy(hdr->addr1, next_hop, ETH_ALEN);
00271 rcu_read_unlock();
00272 memcpy(hdr->addr3, dst_addr, ETH_ALEN);
00273 }
00274
00293 static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
00294 struct mesh_path *from_mpath,
00295 bool copy)
00296 {
00297 struct sk_buff *skb, *cp_skb = NULL;
00298 struct sk_buff_head gateq, failq;
00299 unsigned long flags;
00300 int num_skbs;
00301
00302 BUG_ON(gate_mpath == from_mpath);
00303 BUG_ON(!gate_mpath->next_hop);
00304
00305 __skb_queue_head_init(&gateq);
00306 __skb_queue_head_init(&failq);
00307
00308 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
00309 skb_queue_splice_init(&from_mpath->frame_queue, &failq);
00310 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
00311
00312 num_skbs = skb_queue_len(&failq);
00313
00314 while (num_skbs--) {
00315 skb = __skb_dequeue(&failq);
00316 if (copy) {
00317 cp_skb = skb_copy(skb, GFP_ATOMIC);
00318 if (cp_skb)
00319 __skb_queue_tail(&failq, cp_skb);
00320 }
00321
00322 prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
00323 __skb_queue_tail(&gateq, skb);
00324 }
00325
00326 spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags);
00327 skb_queue_splice(&gateq, &gate_mpath->frame_queue);
00328 mpath_dbg("Mpath queue for gate %pM has %d frames\n",
00329 gate_mpath->dst,
00330 skb_queue_len(&gate_mpath->frame_queue));
00331 spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags);
00332
00333 if (!copy)
00334 return;
00335
00336 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
00337 skb_queue_splice(&failq, &from_mpath->frame_queue);
00338 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
00339 }
00340
00341
00342 static struct mesh_path *path_lookup(struct mesh_table *tbl, u8 *dst,
00343 struct ieee80211_sub_if_data *sdata)
00344 {
00345 struct mesh_path *mpath;
00346 struct hlist_node *n;
00347 struct hlist_head *bucket;
00348 struct mpath_node *node;
00349
00350 bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
00351 hlist_for_each_entry_rcu(node, n, bucket, list) {
00352 mpath = node->mpath;
00353 if (mpath->sdata == sdata &&
00354 memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
00355 if (MPATH_EXPIRED(mpath)) {
00356 spin_lock_bh(&mpath->state_lock);
00357 mpath->flags &= ~MESH_PATH_ACTIVE;
00358 spin_unlock_bh(&mpath->state_lock);
00359 }
00360 return mpath;
00361 }
00362 }
00363 return NULL;
00364 }
00365
00375 struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
00376 {
00377 return path_lookup(rcu_dereference(mesh_paths), dst, sdata);
00378 }
00379
00380 struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
00381 {
00382 return path_lookup(rcu_dereference(mpp_paths), dst, sdata);
00383 }
00384
00385
00395 struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata)
00396 {
00397 struct mesh_table *tbl = rcu_dereference(mesh_paths);
00398 struct mpath_node *node;
00399 struct hlist_node *p;
00400 int i;
00401 int j = 0;
00402
00403 for_each_mesh_entry(tbl, p, node, i) {
00404 if (sdata && node->mpath->sdata != sdata)
00405 continue;
00406 if (j++ == idx) {
00407 if (MPATH_EXPIRED(node->mpath)) {
00408 spin_lock_bh(&node->mpath->state_lock);
00409 node->mpath->flags &= ~MESH_PATH_ACTIVE;
00410 spin_unlock_bh(&node->mpath->state_lock);
00411 }
00412 return node->mpath;
00413 }
00414 }
00415
00416 return NULL;
00417 }
00418
00419 static void mesh_gate_node_reclaim(struct rcu_head *rp)
00420 {
00421 struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
00422 kfree(node);
00423 }
00424
00433 static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath)
00434 {
00435 struct mpath_node *gate, *new_gate;
00436 struct hlist_node *n;
00437 int err;
00438
00439 rcu_read_lock();
00440 tbl = rcu_dereference(tbl);
00441
00442 hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list)
00443 if (gate->mpath == mpath) {
00444 err = -EEXIST;
00445 goto err_rcu;
00446 }
00447
00448 new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC);
00449 if (!new_gate) {
00450 err = -ENOMEM;
00451 goto err_rcu;
00452 }
00453
00454 mpath->is_gate = true;
00455 mpath->sdata->u.mesh.num_gates++;
00456 new_gate->mpath = mpath;
00457 spin_lock_bh(&tbl->gates_lock);
00458 hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
00459 spin_unlock_bh(&tbl->gates_lock);
00460 rcu_read_unlock();
00461 mpath_dbg("Mesh path (%s): Recorded new gate: %pM. %d known gates\n",
00462 mpath->sdata->name, mpath->dst,
00463 mpath->sdata->u.mesh.num_gates);
00464 return 0;
00465 err_rcu:
00466 rcu_read_unlock();
00467 return err;
00468 }
00469
00479 static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
00480 {
00481 struct mpath_node *gate;
00482 struct hlist_node *p, *q;
00483
00484 tbl = rcu_dereference(tbl);
00485
00486 hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list)
00487 if (gate->mpath == mpath) {
00488 spin_lock_bh(&tbl->gates_lock);
00489 hlist_del_rcu(&gate->list);
00490 call_rcu(&gate->rcu, mesh_gate_node_reclaim);
00491 spin_unlock_bh(&tbl->gates_lock);
00492 mpath->sdata->u.mesh.num_gates--;
00493 mpath->is_gate = false;
00494 mpath_dbg("Mesh path (%s): Deleted gate: %pM. "
00495 "%d known gates\n", mpath->sdata->name,
00496 mpath->dst, mpath->sdata->u.mesh.num_gates);
00497 break;
00498 }
00499
00500 return 0;
00501 }
00502
00508 int mesh_path_add_gate(struct mesh_path *mpath)
00509 {
00510 return mesh_gate_add(mesh_paths, mpath);
00511 }
00512
00517 int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
00518 {
00519 return sdata->u.mesh.num_gates;
00520 }
00521
00531 int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
00532 {
00533 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
00534 struct ieee80211_local *local = sdata->local;
00535 struct mesh_table *tbl;
00536 struct mesh_path *mpath, *new_mpath;
00537 struct mpath_node *node, *new_node;
00538 struct hlist_head *bucket;
00539 struct hlist_node *n;
00540 int grow = 0;
00541 int err = 0;
00542 u32 hash_idx;
00543
00544 if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
00545
00546 return -ENOTSUPP;
00547
00548 if (is_multicast_ether_addr(dst))
00549 return -ENOTSUPP;
00550
00551 if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
00552 return -ENOSPC;
00553
00554 err = -ENOMEM;
00555 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
00556 if (!new_mpath)
00557 goto err_path_alloc;
00558
00559 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
00560 if (!new_node)
00561 goto err_node_alloc;
00562
00563 read_lock_bh(&pathtbl_resize_lock);
00564 memcpy(new_mpath->dst, dst, ETH_ALEN);
00565 new_mpath->sdata = sdata;
00566 new_mpath->flags = 0;
00567 skb_queue_head_init(&new_mpath->frame_queue);
00568 new_node->mpath = new_mpath;
00569 new_mpath->timer.data = (unsigned long) new_mpath;
00570 new_mpath->timer.function = mesh_path_timer;
00571 new_mpath->exp_time = jiffies;
00572 spin_lock_init(&new_mpath->state_lock);
00573 init_timer(&new_mpath->timer);
00574
00575 tbl = resize_dereference_mesh_paths();
00576
00577 hash_idx = mesh_table_hash(dst, sdata, tbl);
00578 bucket = &tbl->hash_buckets[hash_idx];
00579
00580 spin_lock_bh(&tbl->hashwlock[hash_idx]);
00581
00582 err = -EEXIST;
00583 hlist_for_each_entry(node, n, bucket, list) {
00584 mpath = node->mpath;
00585 if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
00586 goto err_exists;
00587 }
00588
00589 hlist_add_head_rcu(&new_node->list, bucket);
00590 if (atomic_inc_return(&tbl->entries) >=
00591 tbl->mean_chain_len * (tbl->hash_mask + 1))
00592 grow = 1;
00593
00594 mesh_paths_generation++;
00595
00596 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
00597 read_unlock_bh(&pathtbl_resize_lock);
00598 if (grow) {
00599 set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
00600 ieee80211_queue_work(&local->hw, &sdata->work);
00601 }
00602 return 0;
00603
00604 err_exists:
00605 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
00606 read_unlock_bh(&pathtbl_resize_lock);
00607 kfree(new_node);
00608 err_node_alloc:
00609 kfree(new_mpath);
00610 err_path_alloc:
00611 atomic_dec(&sdata->u.mesh.mpaths);
00612 return err;
00613 }
00614
00615 static void mesh_table_free_rcu(struct rcu_head *rcu)
00616 {
00617 struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);
00618
00619 mesh_table_free(tbl, false);
00620 }
00621
00622 void mesh_mpath_table_grow(void)
00623 {
00624 struct mesh_table *oldtbl, *newtbl;
00625
00626 write_lock_bh(&pathtbl_resize_lock);
00627 oldtbl = resize_dereference_mesh_paths();
00628 newtbl = mesh_table_alloc(oldtbl->size_order + 1);
00629 if (!newtbl)
00630 goto out;
00631 if (mesh_table_grow(oldtbl, newtbl) < 0) {
00632 __mesh_table_free(newtbl);
00633 goto out;
00634 }
00635 rcu_assign_pointer(mesh_paths, newtbl);
00636
00637 call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
00638
00639 out:
00640 write_unlock_bh(&pathtbl_resize_lock);
00641 }
00642
00643 void mesh_mpp_table_grow(void)
00644 {
00645 struct mesh_table *oldtbl, *newtbl;
00646
00647 write_lock_bh(&pathtbl_resize_lock);
00648 oldtbl = resize_dereference_mpp_paths();
00649 newtbl = mesh_table_alloc(oldtbl->size_order + 1);
00650 if (!newtbl)
00651 goto out;
00652 if (mesh_table_grow(oldtbl, newtbl) < 0) {
00653 __mesh_table_free(newtbl);
00654 goto out;
00655 }
00656 rcu_assign_pointer(mpp_paths, newtbl);
00657 call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
00658
00659 out:
00660 write_unlock_bh(&pathtbl_resize_lock);
00661 }
00662
00663 int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
00664 {
00665 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
00666 struct ieee80211_local *local = sdata->local;
00667 struct mesh_table *tbl;
00668 struct mesh_path *mpath, *new_mpath;
00669 struct mpath_node *node, *new_node;
00670 struct hlist_head *bucket;
00671 struct hlist_node *n;
00672 int grow = 0;
00673 int err = 0;
00674 u32 hash_idx;
00675
00676 if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
00677
00678 return -ENOTSUPP;
00679
00680 if (is_multicast_ether_addr(dst))
00681 return -ENOTSUPP;
00682
00683 err = -ENOMEM;
00684 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
00685 if (!new_mpath)
00686 goto err_path_alloc;
00687
00688 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
00689 if (!new_node)
00690 goto err_node_alloc;
00691
00692 read_lock_bh(&pathtbl_resize_lock);
00693 memcpy(new_mpath->dst, dst, ETH_ALEN);
00694 memcpy(new_mpath->mpp, mpp, ETH_ALEN);
00695 new_mpath->sdata = sdata;
00696 new_mpath->flags = 0;
00697 skb_queue_head_init(&new_mpath->frame_queue);
00698 new_node->mpath = new_mpath;
00699 init_timer(&new_mpath->timer);
00700 new_mpath->exp_time = jiffies;
00701 spin_lock_init(&new_mpath->state_lock);
00702
00703 tbl = resize_dereference_mpp_paths();
00704
00705 hash_idx = mesh_table_hash(dst, sdata, tbl);
00706 bucket = &tbl->hash_buckets[hash_idx];
00707
00708 spin_lock_bh(&tbl->hashwlock[hash_idx]);
00709
00710 err = -EEXIST;
00711 hlist_for_each_entry(node, n, bucket, list) {
00712 mpath = node->mpath;
00713 if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
00714 goto err_exists;
00715 }
00716
00717 hlist_add_head_rcu(&new_node->list, bucket);
00718 if (atomic_inc_return(&tbl->entries) >=
00719 tbl->mean_chain_len * (tbl->hash_mask + 1))
00720 grow = 1;
00721
00722 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
00723 read_unlock_bh(&pathtbl_resize_lock);
00724 if (grow) {
00725 set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
00726 ieee80211_queue_work(&local->hw, &sdata->work);
00727 }
00728 return 0;
00729
00730 err_exists:
00731 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
00732 read_unlock_bh(&pathtbl_resize_lock);
00733 kfree(new_node);
00734 err_node_alloc:
00735 kfree(new_mpath);
00736 err_path_alloc:
00737 return err;
00738 }
00739
00740
00749 void mesh_plink_broken(struct sta_info *sta)
00750 {
00751 struct mesh_table *tbl;
00752 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
00753 struct mesh_path *mpath;
00754 struct mpath_node *node;
00755 struct hlist_node *p;
00756 struct ieee80211_sub_if_data *sdata = sta->sdata;
00757 int i;
00758 __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE);
00759
00760 rcu_read_lock();
00761 tbl = rcu_dereference(mesh_paths);
00762 for_each_mesh_entry(tbl, p, node, i) {
00763 mpath = node->mpath;
00764 if (rcu_dereference(mpath->next_hop) == sta &&
00765 mpath->flags & MESH_PATH_ACTIVE &&
00766 !(mpath->flags & MESH_PATH_FIXED)) {
00767 spin_lock_bh(&mpath->state_lock);
00768 mpath->flags &= ~MESH_PATH_ACTIVE;
00769 ++mpath->sn;
00770 spin_unlock_bh(&mpath->state_lock);
00771 mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl,
00772 mpath->dst, cpu_to_le32(mpath->sn),
00773 reason, bcast, sdata);
00774 }
00775 }
00776 rcu_read_unlock();
00777 }
00778
00779 static void mesh_path_node_reclaim(struct rcu_head *rp)
00780 {
00781 struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
00782 struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
00783
00784 del_timer_sync(&node->mpath->timer);
00785 atomic_dec(&sdata->u.mesh.mpaths);
00786 kfree(node->mpath);
00787 kfree(node);
00788 }
00789
00790
00791 static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
00792 {
00793 struct mesh_path *mpath;
00794 mpath = node->mpath;
00795 spin_lock(&mpath->state_lock);
00796 mpath->flags |= MESH_PATH_RESOLVING;
00797 if (mpath->is_gate)
00798 mesh_gate_del(tbl, mpath);
00799 hlist_del_rcu(&node->list);
00800 call_rcu(&node->rcu, mesh_path_node_reclaim);
00801 spin_unlock(&mpath->state_lock);
00802 atomic_dec(&tbl->entries);
00803 }
00804
00816 void mesh_path_flush_by_nexthop(struct sta_info *sta)
00817 {
00818 struct mesh_table *tbl;
00819 struct mesh_path *mpath;
00820 struct mpath_node *node;
00821 struct hlist_node *p;
00822 int i;
00823
00824 rcu_read_lock();
00825 read_lock_bh(&pathtbl_resize_lock);
00826 tbl = resize_dereference_mesh_paths();
00827 for_each_mesh_entry(tbl, p, node, i) {
00828 mpath = node->mpath;
00829 if (rcu_dereference(mpath->next_hop) == sta) {
00830 spin_lock_bh(&tbl->hashwlock[i]);
00831 __mesh_path_del(tbl, node);
00832 spin_unlock_bh(&tbl->hashwlock[i]);
00833 }
00834 }
00835 read_unlock_bh(&pathtbl_resize_lock);
00836 rcu_read_unlock();
00837 }
00838
00839 static void table_flush_by_iface(struct mesh_table *tbl,
00840 struct ieee80211_sub_if_data *sdata)
00841 {
00842 struct mesh_path *mpath;
00843 struct mpath_node *node;
00844 struct hlist_node *p;
00845 int i;
00846
00847 WARN_ON(!rcu_read_lock_held());
00848 for_each_mesh_entry(tbl, p, node, i) {
00849 mpath = node->mpath;
00850 if (mpath->sdata != sdata)
00851 continue;
00852 spin_lock_bh(&tbl->hashwlock[i]);
00853 __mesh_path_del(tbl, node);
00854 spin_unlock_bh(&tbl->hashwlock[i]);
00855 }
00856 }
00857
00866 void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
00867 {
00868 struct mesh_table *tbl;
00869
00870 rcu_read_lock();
00871 read_lock_bh(&pathtbl_resize_lock);
00872 tbl = resize_dereference_mesh_paths();
00873 table_flush_by_iface(tbl, sdata);
00874 tbl = resize_dereference_mpp_paths();
00875 table_flush_by_iface(tbl, sdata);
00876 read_unlock_bh(&pathtbl_resize_lock);
00877 rcu_read_unlock();
00878 }
00879
00888 int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
00889 {
00890 struct mesh_table *tbl;
00891 struct mesh_path *mpath;
00892 struct mpath_node *node;
00893 struct hlist_head *bucket;
00894 struct hlist_node *n;
00895 int hash_idx;
00896 int err = 0;
00897
00898 read_lock_bh(&pathtbl_resize_lock);
00899 tbl = resize_dereference_mesh_paths();
00900 hash_idx = mesh_table_hash(addr, sdata, tbl);
00901 bucket = &tbl->hash_buckets[hash_idx];
00902
00903 spin_lock_bh(&tbl->hashwlock[hash_idx]);
00904 hlist_for_each_entry(node, n, bucket, list) {
00905 mpath = node->mpath;
00906 if (mpath->sdata == sdata &&
00907 memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
00908 __mesh_path_del(tbl, node);
00909 goto enddel;
00910 }
00911 }
00912
00913 err = -ENXIO;
00914 enddel:
00915 mesh_paths_generation++;
00916 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
00917 read_unlock_bh(&pathtbl_resize_lock);
00918 return err;
00919 }
00920
00929 void mesh_path_tx_pending(struct mesh_path *mpath)
00930 {
00931 if (mpath->flags & MESH_PATH_ACTIVE)
00932 ieee80211_add_pending_skbs(mpath->sdata->local,
00933 &mpath->frame_queue);
00934 }
00935
00946 int mesh_path_send_to_gates(struct mesh_path *mpath)
00947 {
00948 struct ieee80211_sub_if_data *sdata = mpath->sdata;
00949 struct hlist_node *n;
00950 struct mesh_table *tbl;
00951 struct mesh_path *from_mpath = mpath;
00952 struct mpath_node *gate = NULL;
00953 bool copy = false;
00954 struct hlist_head *known_gates;
00955
00956 rcu_read_lock();
00957 tbl = rcu_dereference(mesh_paths);
00958 known_gates = tbl->known_gates;
00959 rcu_read_unlock();
00960
00961 if (!known_gates)
00962 return -EHOSTUNREACH;
00963
00964 hlist_for_each_entry_rcu(gate, n, known_gates, list) {
00965 if (gate->mpath->sdata != sdata)
00966 continue;
00967
00968 if (gate->mpath->flags & MESH_PATH_ACTIVE) {
00969 mpath_dbg("Forwarding to %pM\n", gate->mpath->dst);
00970 mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
00971 from_mpath = gate->mpath;
00972 copy = true;
00973 } else {
00974 mpath_dbg("Not forwarding %p\n", gate->mpath);
00975 mpath_dbg("flags %x\n", gate->mpath->flags);
00976 }
00977 }
00978
00979 hlist_for_each_entry_rcu(gate, n, known_gates, list)
00980 if (gate->mpath->sdata == sdata) {
00981 mpath_dbg("Sending to %pM\n", gate->mpath->dst);
00982 mesh_path_tx_pending(gate->mpath);
00983 }
00984
00985 return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
00986 }
00987
01001 void mesh_path_discard_frame(struct sk_buff *skb,
01002 struct ieee80211_sub_if_data *sdata)
01003 {
01004 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
01005 struct mesh_path *mpath;
01006 u32 sn = 0;
01007 __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_NOFORWARD);
01008
01009 if (memcmp(hdr->addr4, sdata->vif.addr, ETH_ALEN) != 0) {
01010 u8 *ra, *da;
01011
01012 da = hdr->addr3;
01013 ra = hdr->addr1;
01014 rcu_read_lock();
01015 mpath = mesh_path_lookup(da, sdata);
01016 if (mpath) {
01017 spin_lock_bh(&mpath->state_lock);
01018 sn = ++mpath->sn;
01019 spin_unlock_bh(&mpath->state_lock);
01020 }
01021 rcu_read_unlock();
01022 mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, skb->data,
01023 cpu_to_le32(sn), reason, ra, sdata);
01024 }
01025
01026 kfree_skb(skb);
01027 sdata->u.mesh.mshstats.dropped_frames_no_route++;
01028 }
01029
01037 void mesh_path_flush_pending(struct mesh_path *mpath)
01038 {
01039 struct sk_buff *skb;
01040
01041 while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
01042 mesh_path_discard_frame(skb, mpath->sdata);
01043 }
01044
01053 void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
01054 {
01055 spin_lock_bh(&mpath->state_lock);
01056 mesh_path_assign_nexthop(mpath, next_hop);
01057 mpath->sn = 0xffff;
01058 mpath->metric = 0;
01059 mpath->hop_count = 0;
01060 mpath->exp_time = 0;
01061 mpath->flags |= MESH_PATH_FIXED;
01062 mesh_path_activate(mpath);
01063 spin_unlock_bh(&mpath->state_lock);
01064 mesh_path_tx_pending(mpath);
01065 }
01066
01067 static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
01068 {
01069 struct mesh_path *mpath;
01070 struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
01071 mpath = node->mpath;
01072 hlist_del_rcu(p);
01073 if (free_leafs) {
01074 del_timer_sync(&mpath->timer);
01075 kfree(mpath);
01076 }
01077 kfree(node);
01078 }
01079
01080 static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
01081 {
01082 struct mesh_path *mpath;
01083 struct mpath_node *node, *new_node;
01084 u32 hash_idx;
01085
01086 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
01087 if (new_node == NULL)
01088 return -ENOMEM;
01089
01090 node = hlist_entry(p, struct mpath_node, list);
01091 mpath = node->mpath;
01092 new_node->mpath = mpath;
01093 hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
01094 hlist_add_head(&new_node->list,
01095 &newtbl->hash_buckets[hash_idx]);
01096 return 0;
01097 }
01098
01099 int mesh_pathtbl_init(void)
01100 {
01101 struct mesh_table *tbl_path, *tbl_mpp;
01102 int ret;
01103
01104 tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
01105 if (!tbl_path)
01106 return -ENOMEM;
01107 tbl_path->free_node = &mesh_path_node_free;
01108 tbl_path->copy_node = &mesh_path_node_copy;
01109 tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
01110 tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
01111 if (!tbl_path->known_gates) {
01112 ret = -ENOMEM;
01113 goto free_path;
01114 }
01115 INIT_HLIST_HEAD(tbl_path->known_gates);
01116
01117
01118 tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
01119 if (!tbl_mpp) {
01120 ret = -ENOMEM;
01121 goto free_path;
01122 }
01123 tbl_mpp->free_node = &mesh_path_node_free;
01124 tbl_mpp->copy_node = &mesh_path_node_copy;
01125 tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
01126 tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
01127 if (!tbl_mpp->known_gates) {
01128 ret = -ENOMEM;
01129 goto free_mpp;
01130 }
01131 INIT_HLIST_HEAD(tbl_mpp->known_gates);
01132
01133
01134 RCU_INIT_POINTER(mesh_paths, tbl_path);
01135 RCU_INIT_POINTER(mpp_paths, tbl_mpp);
01136
01137 return 0;
01138
01139 free_mpp:
01140 mesh_table_free(tbl_mpp, true);
01141 free_path:
01142 mesh_table_free(tbl_path, true);
01143 return ret;
01144 }
01145
01146 void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
01147 {
01148 struct mesh_table *tbl;
01149 struct mesh_path *mpath;
01150 struct mpath_node *node;
01151 struct hlist_node *p;
01152 int i;
01153
01154 rcu_read_lock();
01155 tbl = rcu_dereference(mesh_paths);
01156 for_each_mesh_entry(tbl, p, node, i) {
01157 if (node->mpath->sdata != sdata)
01158 continue;
01159 mpath = node->mpath;
01160 if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
01161 (!(mpath->flags & MESH_PATH_FIXED)) &&
01162 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
01163 mesh_path_del(mpath->dst, mpath->sdata);
01164 }
01165 rcu_read_unlock();
01166 }
01167
01168 void mesh_pathtbl_unregister(void)
01169 {
01170
01171 mesh_table_free(rcu_dereference_protected(mesh_paths, 1), true);
01172 mesh_table_free(rcu_dereference_protected(mpp_paths, 1), true);
01173 }