00001
00002
00003
00004
00005
00006
00007
00008
00009
00010 #include <linux/etherdevice.h>
00011 #include <linux/list.h>
00012 #include <linux/random.h>
00013 #include <linux/slab.h>
00014 #include <linux/spinlock.h>
00015 #include <linux/string.h>
00016 #include <net/mac80211.h>
00017 #include "wme.h"
00018 #include "ieee80211_i.h"
00019 #include "mesh.h"
00020
00021 #ifdef CONFIG_MAC80211_VERBOSE_MPATH_DEBUG
00022 #define mpath_dbg(fmt, args...) printk(KERN_DEBUG fmt, ##args)
00023 #else
00024 #define mpath_dbg(fmt, args...) do { (void)(0); } while (0)
00025 #endif
00026
00027
00028 #define INIT_PATHS_SIZE_ORDER 2
00029
00030
00031 #define MEAN_CHAIN_LEN 2
00032
00033 #define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
00034 time_after(jiffies, mpath->exp_time) && \
00035 !(mpath->flags & MESH_PATH_FIXED))
00036
00037 struct mpath_node {
00038 struct hlist_node list;
00039 struct rcu_head rcu;
00040
00041
00042
00043 struct mesh_path *mpath;
00044 };
00045
00046 static struct mesh_table __rcu *mesh_paths;
00047 static struct mesh_table __rcu *mpp_paths;
00048
00049 int mesh_paths_generation;
00050
00051
00052
00053
00054
00055
00056
00057 static DEFINE_RWLOCK(pathtbl_resize_lock);
00058
00059
00060 static inline struct mesh_table *resize_dereference_mesh_paths(void)
00061 {
00062 return rcu_dereference_protected(mesh_paths,
00063 lockdep_is_held(&pathtbl_resize_lock));
00064 }
00065
00066 static inline struct mesh_table *resize_dereference_mpp_paths(void)
00067 {
00068 return rcu_dereference_protected(mpp_paths,
00069 lockdep_is_held(&pathtbl_resize_lock));
00070 }
00071
00072
00073
00074
00075
00076
00077
00078 #define for_each_mesh_entry(tbl, p, node, i) \
00079 for (i = 0; i <= tbl->hash_mask; i++) \
00080 hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list)
00081
00082
00083 static struct mesh_table *mesh_table_alloc(int size_order)
00084 {
00085 int i;
00086 struct mesh_table *newtbl;
00087
00088 newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
00089 if (!newtbl)
00090 return NULL;
00091
00092 newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
00093 (1 << size_order), GFP_ATOMIC);
00094
00095 if (!newtbl->hash_buckets) {
00096 kfree(newtbl);
00097 return NULL;
00098 }
00099
00100 newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
00101 (1 << size_order), GFP_ATOMIC);
00102 if (!newtbl->hashwlock) {
00103 kfree(newtbl->hash_buckets);
00104 kfree(newtbl);
00105 return NULL;
00106 }
00107
00108 newtbl->size_order = size_order;
00109 newtbl->hash_mask = (1 << size_order) - 1;
00110 atomic_set(&newtbl->entries, 0);
00111 get_random_bytes(&newtbl->hash_rnd,
00112 sizeof(newtbl->hash_rnd));
00113 for (i = 0; i <= newtbl->hash_mask; i++)
00114 spin_lock_init(&newtbl->hashwlock[i]);
00115 spin_lock_init(&newtbl->gates_lock);
00116
00117 return newtbl;
00118 }
00119
00120 static void __mesh_table_free(struct mesh_table *tbl)
00121 {
00122 kfree(tbl->hash_buckets);
00123 kfree(tbl->hashwlock);
00124 kfree(tbl);
00125 }
00126
00127 static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
00128 {
00129 struct hlist_head *mesh_hash;
00130 struct hlist_node *p, *q;
00131 struct mpath_node *gate;
00132 int i;
00133
00134 mesh_hash = tbl->hash_buckets;
00135 for (i = 0; i <= tbl->hash_mask; i++) {
00136 spin_lock_bh(&tbl->hashwlock[i]);
00137 hlist_for_each_safe(p, q, &mesh_hash[i]) {
00138 tbl->free_node(p, free_leafs);
00139 atomic_dec(&tbl->entries);
00140 }
00141 spin_unlock_bh(&tbl->hashwlock[i]);
00142 }
00143 if (free_leafs) {
00144 spin_lock_bh(&tbl->gates_lock);
00145 hlist_for_each_entry_safe(gate, p, q,
00146 tbl->known_gates, list) {
00147 hlist_del(&gate->list);
00148 kfree(gate);
00149 }
00150 kfree(tbl->known_gates);
00151 spin_unlock_bh(&tbl->gates_lock);
00152 }
00153
00154 __mesh_table_free(tbl);
00155 }
00156
00157 static int mesh_table_grow(struct mesh_table *oldtbl,
00158 struct mesh_table *newtbl)
00159 {
00160 struct hlist_head *oldhash;
00161 struct hlist_node *p, *q;
00162 int i;
00163
00164 if (atomic_read(&oldtbl->entries)
00165 < oldtbl->mean_chain_len * (oldtbl->hash_mask + 1))
00166 return -EAGAIN;
00167
00168 newtbl->free_node = oldtbl->free_node;
00169 newtbl->mean_chain_len = oldtbl->mean_chain_len;
00170 newtbl->copy_node = oldtbl->copy_node;
00171 newtbl->known_gates = oldtbl->known_gates;
00172 atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
00173
00174 oldhash = oldtbl->hash_buckets;
00175 for (i = 0; i <= oldtbl->hash_mask; i++)
00176 hlist_for_each(p, &oldhash[i])
00177 if (oldtbl->copy_node(p, newtbl) < 0)
00178 goto errcopy;
00179
00180 return 0;
00181
00182 errcopy:
00183 for (i = 0; i <= newtbl->hash_mask; i++) {
00184 hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
00185 oldtbl->free_node(p, 0);
00186 }
00187 return -ENOMEM;
00188 }
00189
00190 static u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata,
00191 struct mesh_table *tbl)
00192 {
00193
00194 return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd)
00195 & tbl->hash_mask;
00196 }
00197
00198
00208 void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
00209 {
00210 struct sk_buff *skb;
00211 struct ieee80211_hdr *hdr;
00212 struct sk_buff_head tmpq;
00213 unsigned long flags;
00214
00215 rcu_assign_pointer(mpath->next_hop, sta);
00216
00217 __skb_queue_head_init(&tmpq);
00218
00219 spin_lock_irqsave(&mpath->frame_queue.lock, flags);
00220
00221 while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
00222 hdr = (struct ieee80211_hdr *) skb->data;
00223 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
00224 memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
00225 __skb_queue_tail(&tmpq, skb);
00226 }
00227
00228 skb_queue_splice(&tmpq, &mpath->frame_queue);
00229 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
00230 }
00231
00232 static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
00233 struct mesh_path *gate_mpath)
00234 {
00235 struct ieee80211_hdr *hdr;
00236 struct ieee80211s_hdr *mshdr;
00237 int mesh_hdrlen, hdrlen;
00238 char *next_hop;
00239
00240 hdr = (struct ieee80211_hdr *) skb->data;
00241 hdrlen = ieee80211_hdrlen(hdr->frame_control);
00242 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
00243
00244 if (!(mshdr->flags & MESH_FLAGS_AE)) {
00245
00246 mesh_hdrlen = 6;
00247
00248
00249 skb_push(skb, 2 * ETH_ALEN);
00250 memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
00251
00252 hdr = (struct ieee80211_hdr *) skb->data;
00253
00254
00255
00256 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
00257 mshdr->flags = MESH_FLAGS_AE_A5_A6;
00258 memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
00259 memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
00260 }
00261
00262
00263 hdr = (struct ieee80211_hdr *) skb->data;
00264 rcu_read_lock();
00265 next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
00266 memcpy(hdr->addr1, next_hop, ETH_ALEN);
00267 rcu_read_unlock();
00268 memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN);
00269 memcpy(hdr->addr3, dst_addr, ETH_ALEN);
00270 }
00271
00290 static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
00291 struct mesh_path *from_mpath,
00292 bool copy)
00293 {
00294 struct sk_buff *skb, *cp_skb = NULL;
00295 struct sk_buff_head gateq, failq;
00296 unsigned long flags;
00297 int num_skbs;
00298
00299 BUG_ON(gate_mpath == from_mpath);
00300 BUG_ON(!gate_mpath->next_hop);
00301
00302 __skb_queue_head_init(&gateq);
00303 __skb_queue_head_init(&failq);
00304
00305 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
00306 skb_queue_splice_init(&from_mpath->frame_queue, &failq);
00307 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
00308
00309 num_skbs = skb_queue_len(&failq);
00310
00311 while (num_skbs--) {
00312 skb = __skb_dequeue(&failq);
00313 if (copy) {
00314 cp_skb = skb_copy(skb, GFP_ATOMIC);
00315 if (cp_skb)
00316 __skb_queue_tail(&failq, cp_skb);
00317 }
00318
00319 prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
00320 __skb_queue_tail(&gateq, skb);
00321 }
00322
00323 spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags);
00324 skb_queue_splice(&gateq, &gate_mpath->frame_queue);
00325 mpath_dbg("Mpath queue for gate %pM has %d frames\n",
00326 gate_mpath->dst,
00327 skb_queue_len(&gate_mpath->frame_queue));
00328 spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags);
00329
00330 if (!copy)
00331 return;
00332
00333 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
00334 skb_queue_splice(&failq, &from_mpath->frame_queue);
00335 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
00336 }
00337
00338
00339 static struct mesh_path *mpath_lookup(struct mesh_table *tbl, u8 *dst,
00340 struct ieee80211_sub_if_data *sdata)
00341 {
00342 struct mesh_path *mpath;
00343 struct hlist_node *n;
00344 struct hlist_head *bucket;
00345 struct mpath_node *node;
00346
00347 bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
00348 hlist_for_each_entry_rcu(node, n, bucket, list) {
00349 mpath = node->mpath;
00350 if (mpath->sdata == sdata &&
00351 ether_addr_equal(dst, mpath->dst)) {
00352 if (MPATH_EXPIRED(mpath)) {
00353 spin_lock_bh(&mpath->state_lock);
00354 mpath->flags &= ~MESH_PATH_ACTIVE;
00355 spin_unlock_bh(&mpath->state_lock);
00356 }
00357 return mpath;
00358 }
00359 }
00360 return NULL;
00361 }
00362
00372 struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
00373 {
00374 return mpath_lookup(rcu_dereference(mesh_paths), dst, sdata);
00375 }
00376
00377 struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
00378 {
00379 return mpath_lookup(rcu_dereference(mpp_paths), dst, sdata);
00380 }
00381
00382
00392 struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata)
00393 {
00394 struct mesh_table *tbl = rcu_dereference(mesh_paths);
00395 struct mpath_node *node;
00396 struct hlist_node *p;
00397 int i;
00398 int j = 0;
00399
00400 for_each_mesh_entry(tbl, p, node, i) {
00401 if (sdata && node->mpath->sdata != sdata)
00402 continue;
00403 if (j++ == idx) {
00404 if (MPATH_EXPIRED(node->mpath)) {
00405 spin_lock_bh(&node->mpath->state_lock);
00406 node->mpath->flags &= ~MESH_PATH_ACTIVE;
00407 spin_unlock_bh(&node->mpath->state_lock);
00408 }
00409 return node->mpath;
00410 }
00411 }
00412
00413 return NULL;
00414 }
00415
00420 int mesh_path_add_gate(struct mesh_path *mpath)
00421 {
00422 struct mesh_table *tbl;
00423 struct mpath_node *gate, *new_gate;
00424 struct hlist_node *n;
00425 int err;
00426
00427 rcu_read_lock();
00428 tbl = rcu_dereference(mesh_paths);
00429
00430 hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list)
00431 if (gate->mpath == mpath) {
00432 err = -EEXIST;
00433 goto err_rcu;
00434 }
00435
00436 new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC);
00437 if (!new_gate) {
00438 err = -ENOMEM;
00439 goto err_rcu;
00440 }
00441
00442 mpath->is_gate = true;
00443 mpath->sdata->u.mesh.num_gates++;
00444 new_gate->mpath = mpath;
00445 spin_lock_bh(&tbl->gates_lock);
00446 hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
00447 spin_unlock_bh(&tbl->gates_lock);
00448 rcu_read_unlock();
00449 mpath_dbg("Mesh path (%s): Recorded new gate: %pM. %d known gates\n",
00450 mpath->sdata->name, mpath->dst,
00451 mpath->sdata->u.mesh.num_gates);
00452 return 0;
00453 err_rcu:
00454 rcu_read_unlock();
00455 return err;
00456 }
00457
00467 static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
00468 {
00469 struct mpath_node *gate;
00470 struct hlist_node *p, *q;
00471
00472 hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list)
00473 if (gate->mpath == mpath) {
00474 spin_lock_bh(&tbl->gates_lock);
00475 hlist_del_rcu(&gate->list);
00476 kfree_rcu(gate, rcu);
00477 spin_unlock_bh(&tbl->gates_lock);
00478 mpath->sdata->u.mesh.num_gates--;
00479 mpath->is_gate = false;
00480 mpath_dbg("Mesh path (%s): Deleted gate: %pM. "
00481 "%d known gates\n", mpath->sdata->name,
00482 mpath->dst, mpath->sdata->u.mesh.num_gates);
00483 break;
00484 }
00485
00486 return 0;
00487 }
00488
00493 int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
00494 {
00495 return sdata->u.mesh.num_gates;
00496 }
00497
00507 int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
00508 {
00509 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
00510 struct ieee80211_local *local = sdata->local;
00511 struct mesh_table *tbl;
00512 struct mesh_path *mpath, *new_mpath;
00513 struct mpath_node *node, *new_node;
00514 struct hlist_head *bucket;
00515 struct hlist_node *n;
00516 int grow = 0;
00517 int err = 0;
00518 u32 hash_idx;
00519
00520 if (ether_addr_equal(dst, sdata->vif.addr))
00521
00522 return -ENOTSUPP;
00523
00524 if (is_multicast_ether_addr(dst))
00525 return -ENOTSUPP;
00526
00527 if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
00528 return -ENOSPC;
00529
00530 err = -ENOMEM;
00531 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
00532 if (!new_mpath)
00533 goto err_path_alloc;
00534
00535 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
00536 if (!new_node)
00537 goto err_node_alloc;
00538
00539 read_lock_bh(&pathtbl_resize_lock);
00540 memcpy(new_mpath->dst, dst, ETH_ALEN);
00541 memset(new_mpath->rann_snd_addr, 0xff, ETH_ALEN);
00542 new_mpath->is_root = false;
00543 new_mpath->sdata = sdata;
00544 new_mpath->flags = 0;
00545 skb_queue_head_init(&new_mpath->frame_queue);
00546 new_node->mpath = new_mpath;
00547 new_mpath->timer.data = (unsigned long) new_mpath;
00548 new_mpath->timer.function = mesh_path_timer;
00549 new_mpath->exp_time = jiffies;
00550 spin_lock_init(&new_mpath->state_lock);
00551 init_timer(&new_mpath->timer);
00552
00553 tbl = resize_dereference_mesh_paths();
00554
00555 hash_idx = mesh_table_hash(dst, sdata, tbl);
00556 bucket = &tbl->hash_buckets[hash_idx];
00557
00558 spin_lock(&tbl->hashwlock[hash_idx]);
00559
00560 err = -EEXIST;
00561 hlist_for_each_entry(node, n, bucket, list) {
00562 mpath = node->mpath;
00563 if (mpath->sdata == sdata &&
00564 ether_addr_equal(dst, mpath->dst))
00565 goto err_exists;
00566 }
00567
00568 hlist_add_head_rcu(&new_node->list, bucket);
00569 if (atomic_inc_return(&tbl->entries) >=
00570 tbl->mean_chain_len * (tbl->hash_mask + 1))
00571 grow = 1;
00572
00573 mesh_paths_generation++;
00574
00575 spin_unlock(&tbl->hashwlock[hash_idx]);
00576 read_unlock_bh(&pathtbl_resize_lock);
00577 if (grow) {
00578 set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
00579 ieee80211_queue_work(&local->hw, &sdata->work);
00580 }
00581 return 0;
00582
00583 err_exists:
00584 spin_unlock(&tbl->hashwlock[hash_idx]);
00585 read_unlock_bh(&pathtbl_resize_lock);
00586 kfree(new_node);
00587 err_node_alloc:
00588 kfree(new_mpath);
00589 err_path_alloc:
00590 atomic_dec(&sdata->u.mesh.mpaths);
00591 return err;
00592 }
00593
00594 static void mesh_table_free_rcu(struct rcu_head *rcu)
00595 {
00596 struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);
00597
00598 mesh_table_free(tbl, false);
00599 }
00600
00601 void mesh_mpath_table_grow(void)
00602 {
00603 struct mesh_table *oldtbl, *newtbl;
00604
00605 write_lock_bh(&pathtbl_resize_lock);
00606 oldtbl = resize_dereference_mesh_paths();
00607 newtbl = mesh_table_alloc(oldtbl->size_order + 1);
00608 if (!newtbl)
00609 goto out;
00610 if (mesh_table_grow(oldtbl, newtbl) < 0) {
00611 __mesh_table_free(newtbl);
00612 goto out;
00613 }
00614 rcu_assign_pointer(mesh_paths, newtbl);
00615
00616 call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
00617
00618 out:
00619 write_unlock_bh(&pathtbl_resize_lock);
00620 }
00621
00622 void mesh_mpp_table_grow(void)
00623 {
00624 struct mesh_table *oldtbl, *newtbl;
00625
00626 write_lock_bh(&pathtbl_resize_lock);
00627 oldtbl = resize_dereference_mpp_paths();
00628 newtbl = mesh_table_alloc(oldtbl->size_order + 1);
00629 if (!newtbl)
00630 goto out;
00631 if (mesh_table_grow(oldtbl, newtbl) < 0) {
00632 __mesh_table_free(newtbl);
00633 goto out;
00634 }
00635 rcu_assign_pointer(mpp_paths, newtbl);
00636 call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
00637
00638 out:
00639 write_unlock_bh(&pathtbl_resize_lock);
00640 }
00641
00642 int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
00643 {
00644 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
00645 struct ieee80211_local *local = sdata->local;
00646 struct mesh_table *tbl;
00647 struct mesh_path *mpath, *new_mpath;
00648 struct mpath_node *node, *new_node;
00649 struct hlist_head *bucket;
00650 struct hlist_node *n;
00651 int grow = 0;
00652 int err = 0;
00653 u32 hash_idx;
00654
00655 if (ether_addr_equal(dst, sdata->vif.addr))
00656
00657 return -ENOTSUPP;
00658
00659 if (is_multicast_ether_addr(dst))
00660 return -ENOTSUPP;
00661
00662 err = -ENOMEM;
00663 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
00664 if (!new_mpath)
00665 goto err_path_alloc;
00666
00667 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
00668 if (!new_node)
00669 goto err_node_alloc;
00670
00671 read_lock_bh(&pathtbl_resize_lock);
00672 memcpy(new_mpath->dst, dst, ETH_ALEN);
00673 memcpy(new_mpath->mpp, mpp, ETH_ALEN);
00674 new_mpath->sdata = sdata;
00675 new_mpath->flags = 0;
00676 skb_queue_head_init(&new_mpath->frame_queue);
00677 new_node->mpath = new_mpath;
00678 init_timer(&new_mpath->timer);
00679 new_mpath->exp_time = jiffies;
00680 spin_lock_init(&new_mpath->state_lock);
00681
00682 tbl = resize_dereference_mpp_paths();
00683
00684 hash_idx = mesh_table_hash(dst, sdata, tbl);
00685 bucket = &tbl->hash_buckets[hash_idx];
00686
00687 spin_lock(&tbl->hashwlock[hash_idx]);
00688
00689 err = -EEXIST;
00690 hlist_for_each_entry(node, n, bucket, list) {
00691 mpath = node->mpath;
00692 if (mpath->sdata == sdata &&
00693 ether_addr_equal(dst, mpath->dst))
00694 goto err_exists;
00695 }
00696
00697 hlist_add_head_rcu(&new_node->list, bucket);
00698 if (atomic_inc_return(&tbl->entries) >=
00699 tbl->mean_chain_len * (tbl->hash_mask + 1))
00700 grow = 1;
00701
00702 spin_unlock(&tbl->hashwlock[hash_idx]);
00703 read_unlock_bh(&pathtbl_resize_lock);
00704 if (grow) {
00705 set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
00706 ieee80211_queue_work(&local->hw, &sdata->work);
00707 }
00708 return 0;
00709
00710 err_exists:
00711 spin_unlock(&tbl->hashwlock[hash_idx]);
00712 read_unlock_bh(&pathtbl_resize_lock);
00713 kfree(new_node);
00714 err_node_alloc:
00715 kfree(new_mpath);
00716 err_path_alloc:
00717 return err;
00718 }
00719
00720
00729 void mesh_plink_broken(struct sta_info *sta)
00730 {
00731 struct mesh_table *tbl;
00732 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
00733 struct mesh_path *mpath;
00734 struct mpath_node *node;
00735 struct hlist_node *p;
00736 struct ieee80211_sub_if_data *sdata = sta->sdata;
00737 int i;
00738 __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE);
00739
00740 rcu_read_lock();
00741 tbl = rcu_dereference(mesh_paths);
00742 for_each_mesh_entry(tbl, p, node, i) {
00743 mpath = node->mpath;
00744 if (rcu_dereference(mpath->next_hop) == sta &&
00745 mpath->flags & MESH_PATH_ACTIVE &&
00746 !(mpath->flags & MESH_PATH_FIXED)) {
00747 spin_lock_bh(&mpath->state_lock);
00748 mpath->flags &= ~MESH_PATH_ACTIVE;
00749 ++mpath->sn;
00750 spin_unlock_bh(&mpath->state_lock);
00751 mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl,
00752 mpath->dst, cpu_to_le32(mpath->sn),
00753 reason, bcast, sdata);
00754 }
00755 }
00756 rcu_read_unlock();
00757 }
00758
00759 static void mesh_path_node_reclaim(struct rcu_head *rp)
00760 {
00761 struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
00762 struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
00763
00764 del_timer_sync(&node->mpath->timer);
00765 atomic_dec(&sdata->u.mesh.mpaths);
00766 kfree(node->mpath);
00767 kfree(node);
00768 }
00769
00770
00771 static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
00772 {
00773 struct mesh_path *mpath;
00774 mpath = node->mpath;
00775 spin_lock(&mpath->state_lock);
00776 mpath->flags |= MESH_PATH_RESOLVING;
00777 if (mpath->is_gate)
00778 mesh_gate_del(tbl, mpath);
00779 hlist_del_rcu(&node->list);
00780 call_rcu(&node->rcu, mesh_path_node_reclaim);
00781 spin_unlock(&mpath->state_lock);
00782 atomic_dec(&tbl->entries);
00783 }
00784
00796 void mesh_path_flush_by_nexthop(struct sta_info *sta)
00797 {
00798 struct mesh_table *tbl;
00799 struct mesh_path *mpath;
00800 struct mpath_node *node;
00801 struct hlist_node *p;
00802 int i;
00803
00804 rcu_read_lock();
00805 read_lock_bh(&pathtbl_resize_lock);
00806 tbl = resize_dereference_mesh_paths();
00807 for_each_mesh_entry(tbl, p, node, i) {
00808 mpath = node->mpath;
00809 if (rcu_dereference(mpath->next_hop) == sta) {
00810 spin_lock(&tbl->hashwlock[i]);
00811 __mesh_path_del(tbl, node);
00812 spin_unlock(&tbl->hashwlock[i]);
00813 }
00814 }
00815 read_unlock_bh(&pathtbl_resize_lock);
00816 rcu_read_unlock();
00817 }
00818
00819 static void table_flush_by_iface(struct mesh_table *tbl,
00820 struct ieee80211_sub_if_data *sdata)
00821 {
00822 struct mesh_path *mpath;
00823 struct mpath_node *node;
00824 struct hlist_node *p;
00825 int i;
00826
00827 WARN_ON(!rcu_read_lock_held());
00828 for_each_mesh_entry(tbl, p, node, i) {
00829 mpath = node->mpath;
00830 if (mpath->sdata != sdata)
00831 continue;
00832 spin_lock_bh(&tbl->hashwlock[i]);
00833 __mesh_path_del(tbl, node);
00834 spin_unlock_bh(&tbl->hashwlock[i]);
00835 }
00836 }
00837
00846 void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
00847 {
00848 struct mesh_table *tbl;
00849
00850 rcu_read_lock();
00851 read_lock_bh(&pathtbl_resize_lock);
00852 tbl = resize_dereference_mesh_paths();
00853 table_flush_by_iface(tbl, sdata);
00854 tbl = resize_dereference_mpp_paths();
00855 table_flush_by_iface(tbl, sdata);
00856 read_unlock_bh(&pathtbl_resize_lock);
00857 rcu_read_unlock();
00858 }
00859
00868 int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
00869 {
00870 struct mesh_table *tbl;
00871 struct mesh_path *mpath;
00872 struct mpath_node *node;
00873 struct hlist_head *bucket;
00874 struct hlist_node *n;
00875 int hash_idx;
00876 int err = 0;
00877
00878 read_lock_bh(&pathtbl_resize_lock);
00879 tbl = resize_dereference_mesh_paths();
00880 hash_idx = mesh_table_hash(addr, sdata, tbl);
00881 bucket = &tbl->hash_buckets[hash_idx];
00882
00883 spin_lock(&tbl->hashwlock[hash_idx]);
00884 hlist_for_each_entry(node, n, bucket, list) {
00885 mpath = node->mpath;
00886 if (mpath->sdata == sdata &&
00887 ether_addr_equal(addr, mpath->dst)) {
00888 __mesh_path_del(tbl, node);
00889 goto enddel;
00890 }
00891 }
00892
00893 err = -ENXIO;
00894 enddel:
00895 mesh_paths_generation++;
00896 spin_unlock(&tbl->hashwlock[hash_idx]);
00897 read_unlock_bh(&pathtbl_resize_lock);
00898 return err;
00899 }
00900
00909 void mesh_path_tx_pending(struct mesh_path *mpath)
00910 {
00911 if (mpath->flags & MESH_PATH_ACTIVE)
00912 ieee80211_add_pending_skbs(mpath->sdata->local,
00913 &mpath->frame_queue);
00914 }
00915
00926 int mesh_path_send_to_gates(struct mesh_path *mpath)
00927 {
00928 struct ieee80211_sub_if_data *sdata = mpath->sdata;
00929 struct hlist_node *n;
00930 struct mesh_table *tbl;
00931 struct mesh_path *from_mpath = mpath;
00932 struct mpath_node *gate = NULL;
00933 bool copy = false;
00934 struct hlist_head *known_gates;
00935
00936 rcu_read_lock();
00937 tbl = rcu_dereference(mesh_paths);
00938 known_gates = tbl->known_gates;
00939 rcu_read_unlock();
00940
00941 if (!known_gates)
00942 return -EHOSTUNREACH;
00943
00944 hlist_for_each_entry_rcu(gate, n, known_gates, list) {
00945 if (gate->mpath->sdata != sdata)
00946 continue;
00947
00948 if (gate->mpath->flags & MESH_PATH_ACTIVE) {
00949 mpath_dbg("Forwarding to %pM\n", gate->mpath->dst);
00950 mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
00951 from_mpath = gate->mpath;
00952 copy = true;
00953 } else {
00954 mpath_dbg("Not forwarding %p\n", gate->mpath);
00955 mpath_dbg("flags %x\n", gate->mpath->flags);
00956 }
00957 }
00958
00959 hlist_for_each_entry_rcu(gate, n, known_gates, list)
00960 if (gate->mpath->sdata == sdata) {
00961 mpath_dbg("Sending to %pM\n", gate->mpath->dst);
00962 mesh_path_tx_pending(gate->mpath);
00963 }
00964
00965 return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
00966 }
00967
00976 void mesh_path_discard_frame(struct sk_buff *skb,
00977 struct ieee80211_sub_if_data *sdata)
00978 {
00979 kfree_skb(skb);
00980 sdata->u.mesh.mshstats.dropped_frames_no_route++;
00981 }
00982
00990 void mesh_path_flush_pending(struct mesh_path *mpath)
00991 {
00992 struct sk_buff *skb;
00993
00994 while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
00995 mesh_path_discard_frame(skb, mpath->sdata);
00996 }
00997
01006 void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
01007 {
01008 spin_lock_bh(&mpath->state_lock);
01009 mesh_path_assign_nexthop(mpath, next_hop);
01010 mpath->sn = 0xffff;
01011 mpath->metric = 0;
01012 mpath->hop_count = 0;
01013 mpath->exp_time = 0;
01014 mpath->flags |= MESH_PATH_FIXED;
01015 mesh_path_activate(mpath);
01016 spin_unlock_bh(&mpath->state_lock);
01017 mesh_path_tx_pending(mpath);
01018 }
01019
01020 static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
01021 {
01022 struct mesh_path *mpath;
01023 struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
01024 mpath = node->mpath;
01025 hlist_del_rcu(p);
01026 if (free_leafs) {
01027 del_timer_sync(&mpath->timer);
01028 kfree(mpath);
01029 }
01030 kfree(node);
01031 }
01032
01033 static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
01034 {
01035 struct mesh_path *mpath;
01036 struct mpath_node *node, *new_node;
01037 u32 hash_idx;
01038
01039 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
01040 if (new_node == NULL)
01041 return -ENOMEM;
01042
01043 node = hlist_entry(p, struct mpath_node, list);
01044 mpath = node->mpath;
01045 new_node->mpath = mpath;
01046 hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
01047 hlist_add_head(&new_node->list,
01048 &newtbl->hash_buckets[hash_idx]);
01049 return 0;
01050 }
01051
01052 int mesh_pathtbl_init(void)
01053 {
01054 struct mesh_table *tbl_path, *tbl_mpp;
01055 int ret;
01056
01057 tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
01058 if (!tbl_path)
01059 return -ENOMEM;
01060 tbl_path->free_node = &mesh_path_node_free;
01061 tbl_path->copy_node = &mesh_path_node_copy;
01062 tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
01063 tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
01064 if (!tbl_path->known_gates) {
01065 ret = -ENOMEM;
01066 goto free_path;
01067 }
01068 INIT_HLIST_HEAD(tbl_path->known_gates);
01069
01070
01071 tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
01072 if (!tbl_mpp) {
01073 ret = -ENOMEM;
01074 goto free_path;
01075 }
01076 tbl_mpp->free_node = &mesh_path_node_free;
01077 tbl_mpp->copy_node = &mesh_path_node_copy;
01078 tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
01079 tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
01080 if (!tbl_mpp->known_gates) {
01081 ret = -ENOMEM;
01082 goto free_mpp;
01083 }
01084 INIT_HLIST_HEAD(tbl_mpp->known_gates);
01085
01086
01087 RCU_INIT_POINTER(mesh_paths, tbl_path);
01088 RCU_INIT_POINTER(mpp_paths, tbl_mpp);
01089
01090 return 0;
01091
01092 free_mpp:
01093 mesh_table_free(tbl_mpp, true);
01094 free_path:
01095 mesh_table_free(tbl_path, true);
01096 return ret;
01097 }
01098
01099 void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
01100 {
01101 struct mesh_table *tbl;
01102 struct mesh_path *mpath;
01103 struct mpath_node *node;
01104 struct hlist_node *p;
01105 int i;
01106
01107 rcu_read_lock();
01108 tbl = rcu_dereference(mesh_paths);
01109 for_each_mesh_entry(tbl, p, node, i) {
01110 if (node->mpath->sdata != sdata)
01111 continue;
01112 mpath = node->mpath;
01113 if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
01114 (!(mpath->flags & MESH_PATH_FIXED)) &&
01115 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
01116 mesh_path_del(mpath->dst, mpath->sdata);
01117 }
01118 rcu_read_unlock();
01119 }
01120
01121 void mesh_pathtbl_unregister(void)
01122 {
01123
01124 mesh_table_free(rcu_dereference_protected(mesh_paths, 1), true);
01125 mesh_table_free(rcu_dereference_protected(mpp_paths, 1), true);
01126 }