base.c
Go to the documentation of this file.
00001 /*------------------------------------------------------------------------
00002  *---------------------        ATH5K Driver               --------------------
00003  *------------------------------------------------------------------------
00004  *                                                         V1.0B  04/08/09
00005  *
00006  *
00007  *  Aug 2009 - Samuel Cabrero <samuelcabrero@gmail.com>
00008  *              Initial release
00009  *
00010  *  ----------------------------------------------------------------------
00011  *  Copyright (C) 2000-2009, Universidad de Zaragoza, SPAIN
00012  *
00013  *  Autors:
00014  *              Samuel Cabrero       <samuelcabrero@gmail.com>
00015  *              Danilo Tardioli          <dantard@unizar.es>
00016  *              Jose Luis Villarroel <jlvilla@unizar.es>
00017  *
00018  *  This is a modified version of the original ath5k driver for its use with
00019  *  RT-WMP protocol developped at Universidad de Zaragoza. It should work with 
00020  *  all Atheros 5xxx WLAN cards. The IEEE 802.11 layer have been removed so it 
00021  *  just send and receive frames over the air, as if it were a ethernet bus
00022  *  interface.
00023  * 
00024  *  Please read ath5k_interface.h for instructions.
00025  *
00026  *  This program is distributed in the hope that it will be useful,
00027  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
00028  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00029  *  GNU General Public License for more details.
00030  *
00031  *  Read the following header for redistribution terms.
00032  *
00033  *---------------------------------------------------------------------------*/
00034 
00035 /*-
00036  * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
00037  * Copyright (c) 2004-2005 Atheros Communications, Inc.
00038  * Copyright (c) 2006 Devicescape Software, Inc.
00039  * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
00040  * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
00041  *
00042  * All rights reserved.
00043  *
00044  * Redistribution and use in source and binary forms, with or without
00045  * modification, are permitted provided that the following conditions
00046  * are met:
00047  * 1. Redistributions of source code must retain the above copyright
00048  *    notice, this list of conditions and the following disclaimer,
00049  *    without modification.
00050  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
00051  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
00052  *    redistribution must be conditioned upon including a substantially
00053  *    similar Disclaimer requirement for further binary redistribution.
00054  * 3. Neither the names of the above-listed copyright holders nor the names
00055  *    of any contributors may be used to endorse or promote products derived
00056  *    from this software without specific prior written permission.
00057  *
00058  * Alternatively, this software may be distributed under the terms of the
00059  * GNU General Public License ("GPL") version 2 as published by the Free
00060  * Software Foundation.
00061  *
00062  * NO WARRANTY
00063  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
00064  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
00065  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
00066  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
00067  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
00068  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
00069  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
00070  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
00071  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
00072  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
00073  * THE POSSIBILITY OF SUCH DAMAGES.
00074  *
00075  */
00076 
00077 #include <linux/version.h>
00078 #include <linux/module.h>
00079 #include <linux/delay.h>
00080 #include <linux/if.h>
00081 #include <linux/netdevice.h>
00082 #include <linux/cache.h>
00083 #include <linux/pci.h>
00084 #include <linux/ethtool.h>
00085 #include <linux/uaccess.h>
00086 #include <linux/if_arp.h>
00087 #include <linux/etherdevice.h>
00088 #include <asm/unaligned.h>
00089 
00090 #include "base.h"
00091 #include "reg.h"
00092 #include "debug.h"
00093 
00094 
00095 /* RUBEN */
00096 #include <linux/semaphore.h>
00097 #include <linux/spinlock.h>
00098 
00099 
00100 static u8 ath5k_calinterval = 10; /* Calibrate PHY every 10 secs (TODO: Fixme) */
00101 static int modparam_all_channels = true;
00102 static int use_beacon_frames = 0;
00103 /******************\
00104 * Internal defines *
00105 \******************/
00106 
00107 /* Module info */
00108 MODULE_AUTHOR("Jiri Slaby");
00109 MODULE_AUTHOR("Nick Kossifidis");
00110 MODULE_AUTHOR("Samuel Cabrero");
00111 MODULE_DESCRIPTION("Modified version of the ath5k driver for use with RT-WMP protocol.");
00112 MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards");
00113 MODULE_LICENSE("Dual BSD/GPL");
00114 MODULE_VERSION("0.6.0 (EXPERIMENTAL)");
00115 
00116 
00117 /* Known PCI ids */
00118 static const struct pci_device_id ath5k_pci_id_table[] = {
00119         { PCI_VDEVICE(ATHEROS, 0x0207) }, /* 5210 early */
00120         { PCI_VDEVICE(ATHEROS, 0x0007) }, /* 5210 */
00121         { PCI_VDEVICE(ATHEROS, 0x0011) }, /* 5311 - this is on AHB bus !*/
00122         { PCI_VDEVICE(ATHEROS, 0x0012) }, /* 5211 */
00123         { PCI_VDEVICE(ATHEROS, 0x0013) }, /* 5212 */
00124         { PCI_VDEVICE(3COM_2,  0x0013) }, /* 3com 5212 */
00125         { PCI_VDEVICE(3COM,    0x0013) }, /* 3com 3CRDAG675 5212 */
00126         { PCI_VDEVICE(ATHEROS, 0x1014) }, /* IBM minipci 5212 */
00127         { PCI_VDEVICE(ATHEROS, 0x0014) }, /* 5212 combatible */
00128         { PCI_VDEVICE(ATHEROS, 0x0015) }, /* 5212 combatible */
00129         { PCI_VDEVICE(ATHEROS, 0x0016) }, /* 5212 combatible */
00130         { PCI_VDEVICE(ATHEROS, 0x0017) }, /* 5212 combatible */
00131         { PCI_VDEVICE(ATHEROS, 0x0018) }, /* 5212 combatible */
00132         { PCI_VDEVICE(ATHEROS, 0x0019) }, /* 5212 combatible */
00133         { PCI_VDEVICE(ATHEROS, 0x001a) }, /* 2413 Griffin-lite */
00134         { PCI_VDEVICE(ATHEROS, 0x001b) }, /* 5413 Eagle */
00135         { PCI_VDEVICE(ATHEROS, 0x001c) }, /* PCI-E cards */
00136         { PCI_VDEVICE(ATHEROS, 0x001d) }, /* 2417 Nala */
00137         { 0 }
00138 };
00139 //MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table);
00140 
00141 /* Known SREVs */
00142 static const struct ath5k_srev_name srev_names[] = {
00143         { "5210",       AR5K_VERSION_MAC,       AR5K_SREV_AR5210 },
00144         { "5311",       AR5K_VERSION_MAC,       AR5K_SREV_AR5311 },
00145         { "5311A",      AR5K_VERSION_MAC,       AR5K_SREV_AR5311A },
00146         { "5311B",      AR5K_VERSION_MAC,       AR5K_SREV_AR5311B },
00147         { "5211",       AR5K_VERSION_MAC,       AR5K_SREV_AR5211 },
00148         { "5212",       AR5K_VERSION_MAC,       AR5K_SREV_AR5212 },
00149         { "5213",       AR5K_VERSION_MAC,       AR5K_SREV_AR5213 },
00150         { "5213A",      AR5K_VERSION_MAC,       AR5K_SREV_AR5213A },
00151         { "2413",       AR5K_VERSION_MAC,       AR5K_SREV_AR2413 },
00152         { "2414",       AR5K_VERSION_MAC,       AR5K_SREV_AR2414 },
00153         { "5424",       AR5K_VERSION_MAC,       AR5K_SREV_AR5424 },
00154         { "5413",       AR5K_VERSION_MAC,       AR5K_SREV_AR5413 },
00155         { "5414",       AR5K_VERSION_MAC,       AR5K_SREV_AR5414 },
00156         { "2415",       AR5K_VERSION_MAC,       AR5K_SREV_AR2415 },
00157         { "5416",       AR5K_VERSION_MAC,       AR5K_SREV_AR5416 },
00158         { "5418",       AR5K_VERSION_MAC,       AR5K_SREV_AR5418 },
00159         { "2425",       AR5K_VERSION_MAC,       AR5K_SREV_AR2425 },
00160         { "2417",       AR5K_VERSION_MAC,       AR5K_SREV_AR2417 },
00161         { "xxxxx",      AR5K_VERSION_MAC,       AR5K_SREV_UNKNOWN },
00162         { "5110",       AR5K_VERSION_RAD,       AR5K_SREV_RAD_5110 },
00163         { "5111",       AR5K_VERSION_RAD,       AR5K_SREV_RAD_5111 },
00164         { "5111A",      AR5K_VERSION_RAD,       AR5K_SREV_RAD_5111A },
00165         { "2111",       AR5K_VERSION_RAD,       AR5K_SREV_RAD_2111 },
00166         { "5112",       AR5K_VERSION_RAD,       AR5K_SREV_RAD_5112 },
00167         { "5112A",      AR5K_VERSION_RAD,       AR5K_SREV_RAD_5112A },
00168         { "5112B",      AR5K_VERSION_RAD,       AR5K_SREV_RAD_5112B },
00169         { "2112",       AR5K_VERSION_RAD,       AR5K_SREV_RAD_2112 },
00170         { "2112A",      AR5K_VERSION_RAD,       AR5K_SREV_RAD_2112A },
00171         { "2112B",      AR5K_VERSION_RAD,       AR5K_SREV_RAD_2112B },
00172         { "2413",       AR5K_VERSION_RAD,       AR5K_SREV_RAD_2413 },
00173         { "5413",       AR5K_VERSION_RAD,       AR5K_SREV_RAD_5413 },
00174         { "2316",       AR5K_VERSION_RAD,       AR5K_SREV_RAD_2316 },
00175         { "2317",       AR5K_VERSION_RAD,       AR5K_SREV_RAD_2317 },
00176         { "5424",       AR5K_VERSION_RAD,       AR5K_SREV_RAD_5424 },
00177         { "5133",       AR5K_VERSION_RAD,       AR5K_SREV_RAD_5133 },
00178         { "xxxxx",      AR5K_VERSION_RAD,       AR5K_SREV_UNKNOWN },
00179 };
00180 
00181 static const struct ieee80211_rate ath5k_rates[] = {
00182         { .bitrate = 10,
00183           .hw_value = ATH5K_RATE_CODE_1M, },
00184         { .bitrate = 20,
00185           .hw_value = ATH5K_RATE_CODE_2M,
00186           .hw_value_short = ATH5K_RATE_CODE_2M | AR5K_SET_SHORT_PREAMBLE,
00187           .flags = IEEE80211_RATE_SHORT_PREAMBLE },
00188         { .bitrate = 55,
00189           .hw_value = ATH5K_RATE_CODE_5_5M,
00190           .hw_value_short = ATH5K_RATE_CODE_5_5M | AR5K_SET_SHORT_PREAMBLE,
00191           .flags = IEEE80211_RATE_SHORT_PREAMBLE },
00192         { .bitrate = 110,
00193           .hw_value = ATH5K_RATE_CODE_11M,
00194           .hw_value_short = ATH5K_RATE_CODE_11M | AR5K_SET_SHORT_PREAMBLE,
00195           .flags = IEEE80211_RATE_SHORT_PREAMBLE },
00196         { .bitrate = 60,
00197           .hw_value = ATH5K_RATE_CODE_6M,
00198           .flags = 0 },
00199         { .bitrate = 90,
00200           .hw_value = ATH5K_RATE_CODE_9M,
00201           .flags = 0 },
00202         { .bitrate = 120,
00203           .hw_value = ATH5K_RATE_CODE_12M,
00204           .flags = 0 },
00205         { .bitrate = 180,
00206           .hw_value = ATH5K_RATE_CODE_18M,
00207           .flags = 0 },
00208         { .bitrate = 240,
00209           .hw_value = ATH5K_RATE_CODE_24M,
00210           .flags = 0 },
00211         { .bitrate = 360,
00212           .hw_value = ATH5K_RATE_CODE_36M,
00213           .flags = 0 },
00214         { .bitrate = 480,
00215           .hw_value = ATH5K_RATE_CODE_48M,
00216           .flags = 0 },
00217         { .bitrate = 540,
00218           .hw_value = ATH5K_RATE_CODE_54M,
00219           .flags = 0 },
00220         /* XR missing */
00221 };
00222 
00223 
00224 /*
00225  * Prototypes - PCI stack related functions
00226  */
00227 static int __devinit    ath5k_pci_probe(struct pci_dev *pdev,
00228                                 const struct pci_device_id *id);
00229 static void __devexit   ath5k_pci_remove(struct pci_dev *pdev);
00230 
00231 static struct pci_driver ath5k_pci_driver = {
00232         .name           = "ath5k_pci",
00233         .id_table       = ath5k_pci_id_table,
00234         .probe          = ath5k_pci_probe,
00235         .remove         = __devexit_p(ath5k_pci_remove),
00236 };
00237 
00238 
00239 
00240 /*
00241  * Prototypes - MAC 802.11 stack related functions
00242  */
00243 static int ath5k_tx(struct sk_buff *skb, struct net_device *netdev);
00244 static int ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan);
00245 static int ath5k_reset_wake(struct ath5k_softc *sc);
00246 static int ath5k_start(struct ath5k_softc *sc);
00247 static void ath5k_stop(struct ath5k_softc *sc);
00248 
00249 /*
00250  * Prototypes - Internal functions
00251  */
00252 /* Attach detach */
00253 static int  ath5k_attach(struct ath5k_softc *sc);
00254 static void ath5k_detach(struct pci_dev *pdev, struct ath5k_softc *sc);
00255 /* Channel/mode setup */
00256 static inline short ath5k_ieee2mhz(short chan);
00257 static unsigned int ath5k_copy_channels(struct ath5k_hw *ah,
00258                                 struct ieee80211_channel *channels,
00259                                 unsigned int mode,
00260                                 unsigned int max);
00261 static int      ath5k_setup_bands(struct ath5k_softc *sc);
00262 static void     ath5k_mode_setup(struct ath5k_softc *sc);
00263 
00264 /* Descriptor setup */
00265 static int      ath5k_desc_alloc(struct ath5k_softc *sc,
00266                                 struct pci_dev *pdev);
00267 static void     ath5k_desc_free(struct ath5k_softc *sc,
00268                                 struct pci_dev *pdev);
00269 /* Buffers setup */
00270 static int      ath5k_rxbuf_setup(struct ath5k_softc *sc,
00271                                 struct ath5k_buf *bf);
00272 static int      ath5k_txbuf_setup(struct ath5k_softc *sc,
00273                                 struct ath5k_buf *bf);
00274 static inline void ath5k_txbuf_free(struct ath5k_softc *sc,
00275                                 struct ath5k_buf *bf)
00276 {
00277         BUG_ON(!bf);
00278         if (!bf->skb)
00279                 return;
00280         pci_unmap_single(sc->pdev, bf->skbaddr, bf->skb->len,
00281                         PCI_DMA_TODEVICE);
00282         dev_kfree_skb_any(bf->skb);
00283         bf->skb = NULL;
00284 }
00285 
00286 static inline void ath5k_rxbuf_free(struct ath5k_softc *sc,
00287                                 struct ath5k_buf *bf)
00288 {
00289         BUG_ON(!bf);
00290         if (!bf->skb)
00291                 return;
00292         pci_unmap_single(sc->pdev, bf->skbaddr, sc->rxbufsize,
00293                         PCI_DMA_FROMDEVICE);
00294         dev_kfree_skb_any(bf->skb);
00295         bf->skb = NULL;
00296 }
00297 
00298 
00299 /* Queues setup */
00300 static struct   ath5k_txq *ath5k_txq_setup(struct ath5k_softc *sc,
00301                                 int qtype, int subtype);
00302 static void     ath5k_txq_drainq(struct ath5k_softc *sc,
00303                                 struct ath5k_txq *txq);
00304 static void     ath5k_txq_cleanup(struct ath5k_softc *sc);
00305 static void     ath5k_txq_release(struct ath5k_softc *sc);
00306 /* Rx handling */
00307 static int      ath5k_rx_start(struct ath5k_softc *sc);
00308 static void     ath5k_rx_stop(struct ath5k_softc *sc);
00309 static void     ath5k_rx_done(struct ath5k_softc *sc);
00310 /* Tx handling */
00311 static void     ath5k_tx_done(struct ath5k_softc *sc,
00312                                 struct ath5k_txq *txq);
00313 /* Interrupt handling */
00314 static int ath5k_init(struct ath5k_softc *sc);
00315 static int ath5k_stop_locked(struct ath5k_softc *sc);
00316 static int ath5k_stop_hw(struct ath5k_softc *sc);
00317 static irqreturn_t ath5k_intr(int irq, void *dev_id);
00318 static void ath5k_tasklet_reset(unsigned long data);
00319 //static void ath5k_tasklet_calibrate(unsigned long data);
00320 /* Net device */
00321 static int  ath5k_open(struct net_device *netdev);
00322 static int  ath5k_close (struct net_device *netdev);
00323 /* Configuration and auxiliary */
00324 static void ath5k_config_filter (struct ath5k_softc *sc, bool broadcast, bool control, bool promisc);
00325 static int ath5k_config_tx_control (struct ath5k_softc *sc, 
00326                 unsigned char count, bool wait_for_ack, bool use_short_preamble);
00327 static int ath5k_config_disable_ack (struct ath5k_softc *sc, bool disable);
00328 static int ath5k_config(struct ath5k_softc *sc, unsigned short channel, enum rates rate, unsigned char power,
00329                 enum ath5k_ant_mode antenna_mode);
00330 static struct ieee80211_channel * ath5k_get_channel(struct ath5k_softc *sc, unsigned int freq);
00331 static int ath5k_get_rate_idx(struct ath5k_softc *sc, enum ieee80211_band band, enum rates rate);
00332 static int ath5k_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
00333 
00334 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30)
00335 static const struct net_device_ops ath5k_netdev_ops = {
00336         .ndo_open               = ath5k_open,
00337         .ndo_stop               = ath5k_close,
00338         .ndo_start_xmit         = ath5k_tx,
00339         .ndo_do_ioctl           = ath5k_ioctl,
00340 };
00341 #endif
00342 
00343 
00344 /* RUBEN */
00345 
00346 /* Interface that will be used by RT-WMP */
00347 static struct net_device* ath5k_slave_dev = NULL;
00348 
00349 /* Queue for RT-WMP packet reception */
00350 static struct rx_queue {
00351    struct sk_buff *skb;
00352    struct semaphore rx_sem;
00353    spinlock_t spin;
00354 } rx_rtwmp_queue;
00355 
00356 /* Initialization of the reception semaphore and queue for RT-WMP */
00357 static void init_rx_queue(struct rx_queue *q) {
00358    sema_init(&q->rx_sem, 0);
00359    spin_lock_init(&q->spin);
00360    q->skb=NULL;
00361 }
00362 
00363 /* Free the remaining SKBs in the reception queue */
00364 static void purge_rx_queue(struct rx_queue *q) {
00365    if(q->skb!=NULL)
00366       dev_kfree_skb_any(q->skb);
00367 }
00368 
00369 static void enqueue_rx_queue(struct rx_queue *q, struct sk_buff *skb) {
00370    unsigned long flags;
00371    spin_lock_irqsave(&q->spin, flags);
00372    if(q->skb == NULL){  // No hay elemento
00373       q->skb = skb;
00374       up(&q->rx_sem);
00375    } else {
00376       dev_kfree_skb_any(q->skb);
00377       q->skb = skb;
00378    }
00379    spin_unlock_irqrestore(&q->spin, flags);
00380 }
00381 
00382 static struct sk_buff *dequeue_rx_queue(struct rx_queue *q, long timeout) {
00383    unsigned long flags;
00384    struct sk_buff *skb = NULL;
00385 
00386    if (timeout == 0) {
00387       if (0==down_interruptible(&q->rx_sem)) {
00388          spin_lock_irqsave(&q->spin, flags);
00389          skb = q->skb;
00390          q->skb = NULL;
00391          while(!down_trylock(&q->rx_sem));
00392          spin_unlock_irqrestore(&q->spin, flags);
00393       }
00394    }
00395    else if (0==down_timeout(&q->rx_sem, timeout)) {
00396       spin_lock_irqsave(&q->spin, flags);
00397       skb = q->skb;
00398       q->skb = NULL;
00399       while(!down_trylock(&q->rx_sem));
00400       spin_unlock_irqrestore(&q->spin, flags);
00401    }
00402 
00403    return skb;
00404 }
00405 
00406 
00407 
00408 /*
00409  * Module init/exit functions
00410  */
00411 
00412 
00413 /* *** DANIIIIIIIIIIII
00414         static int
00415         hello_read_proc(char *buffer, char **start, off_t offset, int size, int *eof,
00416                         void *data)
00417         {
00418     char *hello_str = "Hello, world!\n";
00419     int len = strlen(hello_str);
00420     if (size < len)
00421             return -EINVAL;
00422     if (offset != 0)
00423             return 0;
00424     strcpy(buffer, hello_str);
00425     *eof = 1;
00426     return len;
00427 
00428 }
00429 */
00430 
00431 
00432 static int __init
00433 init_ath5k_pci(void)
00434 {
00435         int ret;
00436 
00437         printk(KERN_INFO "Module ath5k_raw loaded\n");
00438 
00439         ret = pci_register_driver(&ath5k_pci_driver);
00440         if (ret) {
00441                 printk(KERN_ERR "ath5k_pci: can't register pci driver\n");
00442                 return ret;
00443         }
00444 
00445 
00446         /* RUBEN */
00447    init_rx_queue(&rx_rtwmp_queue);
00448 
00449 /* DANIIIIIIIIII
00450     if (create_proc_read_entry("hello_world", 0, NULL, hello_read_proc,
00451                                 NULL) == 0) {
00452             printk(KERN_ERR,
00453                    "Unable to register \"Hello, world!\" proc file\n");
00454             return -ENOMEM;
00455     }
00456 */
00457 
00458 
00459         return 0;
00460 }
00461 
00462 static void __exit
00463 exit_ath5k_pci(void)
00464 {
00465         pci_unregister_driver(&ath5k_pci_driver);
00466 
00467    /* RUBEN */
00468    purge_rx_queue(&rx_rtwmp_queue);
00469 
00470         printk(KERN_INFO "Module ath5k_raw unloaded\n");
00471 }
00472 
00473 module_init(init_ath5k_pci);
00474 module_exit(exit_ath5k_pci);
00475 
00476 
00477 /********************\
00478 * PCI Initialization *
00479 \********************/
00480 
00481 static const char *
00482 ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val)
00483 {
00484         const char *name = "xxxxx";
00485         unsigned int i;
00486 
00487         for (i = 0; i < ARRAY_SIZE(srev_names); i++) {
00488                 if (srev_names[i].sr_type != type)
00489                         continue;
00490 
00491                 if ((val & 0xf0) == srev_names[i].sr_val)
00492                         name = srev_names[i].sr_name;
00493 
00494                 if ((val & 0xff) == srev_names[i].sr_val) {
00495                         name = srev_names[i].sr_name;
00496                         break;
00497                 }
00498         }
00499 
00500         return name;
00501 }
00502 
00503 static int __devinit
00504 ath5k_pci_probe(struct pci_dev *pdev,
00505                 const struct pci_device_id *id)
00506 {
00507         struct net_device *netdev;
00508         struct ath5k_softc *sc;
00509         void __iomem *mem;
00510         int ret, rate_idx;
00511         u8 csz;
00512         u8 mac[ETH_ALEN];
00513         
00514         ret = 1;
00515 
00516         /* Inicializar netdevice */
00517         netdev = alloc_etherdev(sizeof(struct ath5k_softc));
00518         if (netdev == NULL) 
00519         {
00520                 ATH5K_ERR(NULL, "can't allocate net_device structure\n");
00521                 goto err;
00522         }
00523 
00524         sc = netdev_priv(netdev);
00525         sc->netdev = netdev;
00526         sc->pdev = pdev;
00527         SET_NETDEV_DEV(netdev, &pdev->dev);
00528 
00529 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
00530         netdev->open = ath5k_open;
00531         netdev->stop = ath5k_close;
00532         netdev->hard_start_xmit = ath5k_tx;
00533         netdev->do_ioctl = ath5k_ioctl;
00534 #else
00535         netdev->netdev_ops = &ath5k_netdev_ops;
00536 #endif
00537         netdev->tx_queue_len = ATH_TXBUF;
00538 
00539         ret = pci_enable_device(pdev);
00540         if (ret) {
00541                 dev_err(&pdev->dev, "can't enable device\n");
00542                 goto err_netdev;
00543         }
00544 
00545         /* XXX 32-bit addressing only */
00546         ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
00547         if (ret) {
00548                 dev_err(&pdev->dev, "32-bit DMA not available\n");
00549                 goto err_dis;
00550         }
00551 
00552         /*
00553          * Cache line size is used to size and align various
00554          * structures used to communicate with the hardware.
00555          */
00556         pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz);
00557         if (csz == 0) {
00558                 /*
00559                  * Linux 2.4.18 (at least) writes the cache line size
00560                  * register as a 16-bit wide register which is wrong.
00561                  * We must have this setup properly for rx buffer
00562                  * DMA to work so force a reasonable value here if it
00563                  * comes up zero.
00564                  */
00565                 csz = L1_CACHE_BYTES >> 2;
00566                 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz);
00567         }
00568         /*
00569          * The default setting of latency timer yields poor results,
00570          * set it to the value used by other systems.  It may be worth
00571          * tweaking this setting more.
00572          */
00573         pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8);
00574 
00575         /* Enable bus mastering */
00576         pci_set_master(pdev);
00577 
00578         /*
00579          * Disable the RETRY_TIMEOUT register (0x41) to keep
00580          * PCI Tx retries from interfering with C3 CPU state.
00581          */
00582         pci_write_config_byte(pdev, 0x41, 0);
00583 
00584         ret = pci_request_region(pdev, 0, "ath5k");
00585         if (ret) {
00586                 dev_err(&pdev->dev, "cannot reserve PCI memory region\n");
00587                 goto err_dis;
00588         }
00589 
00590         mem = pci_iomap(pdev, 0, 0);
00591         if (!mem) {
00592                 dev_err(&pdev->dev, "cannot remap PCI memory region\n") ;
00593                 ret = -EIO;
00594                 goto err_reg;
00595         }
00596 
00597         /* Set debug level */
00598         ath5k_set_debug_level(sc);
00599 
00600         /*
00601          * Mark the device as detached to avoid processing
00602          * interrupts until setup is complete.
00603          */
00604         __set_bit(ATH_STAT_INVALID, sc->status);
00605 
00606         sc->iobase = mem; /* So we can unmap it on detach */
00607         sc->cachelsz = csz << 2; /* convert to bytes */
00608         
00609         mutex_init(&sc->lock);
00610         spin_lock_init(&sc->rxbuflock);
00611         spin_lock_init(&sc->txbuflock);
00612 
00613         /* Set private data */
00614         pci_set_drvdata(pdev, netdev);
00615 
00616         /* Setup interrupt handler */
00617         /* In ath5k_open, when the user brings up the interface */
00618 
00619         /*If we passed the test malloc a ath5k_hw struct*/
00620         sc->ah = kzalloc(sizeof(struct ath5k_hw), GFP_KERNEL);
00621         if (!sc->ah) {
00622                 ret = -ENOMEM;
00623                 ATH5K_ERR(sc, "out of memory\n");
00624                 goto err_reg;
00625         }
00626 
00627         sc->ah->ah_sc = sc;
00628         sc->ah->ah_iobase = sc->iobase;
00629 
00630         /* Initialize device */
00631         ret = ath5k_hw_attach(sc);
00632         if (ret) {
00633                 ATH5K_ERR(sc, "error attaching hardware\n");
00634                 goto err_free;
00635         }
00636 
00637         /* Finish private driver data initialization */
00638         ret = ath5k_attach(sc);
00639         if (ret)
00640                 goto err_ah;
00641 
00642         /* Set MAC address */
00643         ath5k_hw_get_lladdr(sc->ah, mac);
00644         memcpy(netdev->dev_addr, mac, ETH_ALEN);
00645         memcpy(netdev->perm_addr, mac, ETH_ALEN);
00646 
00647         ATH5K_INFO(sc, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n",
00648                         ath5k_chip_name(AR5K_VERSION_MAC, sc->ah->ah_mac_srev),
00649                                         sc->ah->ah_mac_srev,
00650                                         sc->ah->ah_phy_revision);
00651 
00652         if (!sc->ah->ah_single_chip) {
00653                 /* Single chip radio (!RF5111) */
00654                 if (sc->ah->ah_radio_5ghz_revision &&
00655                         !sc->ah->ah_radio_2ghz_revision) {
00656                         /* No 5GHz support -> report 2GHz radio */
00657                         if (!test_bit(AR5K_MODE_11A,
00658                                 sc->ah->ah_capabilities.cap_mode)) {
00659                                 ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
00660                                         ath5k_chip_name(AR5K_VERSION_RAD,
00661                                                 sc->ah->ah_radio_5ghz_revision),
00662                                                 sc->ah->ah_radio_5ghz_revision);
00663                         /* No 2GHz support (5110 and some
00664                          * 5Ghz only cards) -> report 5Ghz radio */
00665                         } else if (!test_bit(AR5K_MODE_11B,
00666                                 sc->ah->ah_capabilities.cap_mode)) {
00667                                 ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
00668                                         ath5k_chip_name(AR5K_VERSION_RAD,
00669                                                 sc->ah->ah_radio_5ghz_revision),
00670                                                 sc->ah->ah_radio_5ghz_revision);
00671                         /* Multiband radio */
00672                         } else {
00673                                 ATH5K_INFO(sc, "RF%s multiband radio found"
00674                                         " (0x%x)\n",
00675                                         ath5k_chip_name(AR5K_VERSION_RAD,
00676                                                 sc->ah->ah_radio_5ghz_revision),
00677                                                 sc->ah->ah_radio_5ghz_revision);
00678                         }
00679                 }
00680                 /* Multi chip radio (RF5111 - RF2111) ->
00681                  * report both 2GHz/5GHz radios */
00682                 else if (sc->ah->ah_radio_5ghz_revision &&
00683                                 sc->ah->ah_radio_2ghz_revision){
00684                         ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
00685                                 ath5k_chip_name(AR5K_VERSION_RAD,
00686                                         sc->ah->ah_radio_5ghz_revision),
00687                                         sc->ah->ah_radio_5ghz_revision);
00688                         ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
00689                                 ath5k_chip_name(AR5K_VERSION_RAD,
00690                                         sc->ah->ah_radio_2ghz_revision),
00691                                         sc->ah->ah_radio_2ghz_revision);
00692                 }
00693         }
00694 
00695 
00696         /* ready to process interrupts */
00697         __clear_bit(ATH_STAT_INVALID, sc->status);
00698 
00699         /* Set channel pointers */
00700         sc->curchan = ath5k_get_channel(sc, 2412);
00701         if (sc->curchan == NULL)
00702         {
00703                 ATH5K_ERR(sc, "Can't set channel %d\n", 2412);
00704                 ret = -1;
00705                 goto err_detach;
00706         }
00707         sc->curband = &sc->sbands[sc->curchan->band];
00708 
00709         /* Configurar tx_info */
00710         sc->tx_info.band = sc->curband->band;
00711         rate_idx = ath5k_get_rate_idx(sc, sc->curband->band, RATE_54M);
00712         if (rate_idx < 0 || rate_idx > AR5K_MAX_RATES)
00713         {
00714                 ATH5K_ERR(sc, "Can't set rate %d\n", RATE_54M);
00715                 ret = -1;
00716                 goto err_detach;
00717         }
00718         sc->tx_info.rate_idx =  rate_idx;
00719         sc->tx_info.count = 1;
00720         sc->tx_info.wait_for_ack = false;
00721         sc->tx_info.use_short_preamble = false;
00722 
00723         /* Configurar filtro */
00724         ath5k_config_filter(sc, true, false, false);
00725 
00726         /* Configurar el envio de ACK en respuesta a tramas unicast */
00727         ath5k_config_disable_ack(sc, false);
00728 
00729         /* Configurar potencia en pasos de medio dB
00730          * Para las tarjetas Dlink -> 15 dBm.
00731          * Para las tarjetas Ubiquity -> 15 dBm. Tienen 10 dBm de offset en la propia
00732          * tarjeta en modo G, con lo que la potencia real es 25 dBm o 300 mW en G y
00733          * 15 dBm en A.
00734          */
00735         sc->power_level = 15;
00736 
00737         /* Registrar netdevice */
00738         ret = register_netdev(netdev);
00739         if (ret)
00740         {
00741                 ATH5K_ERR(NULL, "can't register net_device structure\n");
00742                 goto err_detach;
00743         }
00744 
00745         /* Print initial config */
00746         ATH5K_INFO(sc, "Freq %d Mhz, Rate (%d * 100) kps, Power %d dBm\n", 
00747                         sc->curchan->center_freq, RATE_54M, sc->power_level);
00748 
00749    /* RUBEN */
00750    /* The first registered interface will be the one used by RT-WMP */
00751    if (!ath5k_slave_dev) {
00752       ath5k_slave_dev = netdev;
00753    }
00754 
00755         return 0;
00756 err_detach:
00757         ath5k_detach(pdev, sc);
00758 err_ah:
00759         ath5k_hw_detach(sc->ah);
00760 err_free:
00761         pci_iounmap(pdev, mem);
00762 err_reg:
00763         pci_release_region(pdev, 0);
00764 err_dis:
00765         pci_disable_device(pdev);
00766 err_netdev:
00767         pci_set_drvdata(pdev, NULL);
00768         free_netdev(netdev);
00769 err:
00770         return ret;
00771 }
00772 
00773 static void __devexit
00774 ath5k_pci_remove(struct pci_dev *pdev)
00775 {
00776         struct net_device *netdev = pci_get_drvdata(pdev);
00777 
00778         if (netdev) {
00779                 struct ath5k_softc *sc = netdev_priv(netdev);
00780 
00781                 unregister_netdev(netdev);
00782                 ath5k_detach(pdev, sc);
00783                 ath5k_hw_detach(sc->ah);
00784                 /* Free IRQ in ath5k_close */
00785                 pci_iounmap(pdev, sc->iobase);
00786                 free_netdev(netdev);
00787                 pci_release_region(pdev, 0);
00788                 pci_disable_device(pdev);
00789                 pci_set_drvdata(pdev, NULL);
00790         }
00791 }
00792 
00793         static int
00794 ath5k_open(struct net_device *netdev)
00795 {
00796         struct ath5k_softc *sc = netdev_priv(netdev);
00797         int err;
00798 
00799         if ((err = request_irq(sc->pdev->irq, ath5k_intr, IRQF_SHARED, sc->netdev->name, sc->netdev))) {
00800                 ATH5K_ERR(sc, "request_irq failed\n");
00801                 goto err_irq;
00802         }
00803 
00804         ath5k_start(sc);
00805 
00806         /* Half dB steps */
00807         ath5k_hw_set_txpower_limit(sc->ah, (sc->power_level * 2));
00808 
00809         /* Configurar modo de antena */
00810         ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_DEFAULT);
00811 
00812         /* Arrancar la cola de paquetes para tx cuando este todo listo */
00813         netif_start_queue(netdev);
00814 
00815         return 0;
00816         
00817 err_irq:
00818         return err;
00819 }
00820 
00821 static int
00822 ath5k_close(struct net_device *netdev)
00823 {
00824         struct ath5k_softc *sc = netdev_priv(netdev);
00825 
00826         /* Parar la cola de transmision */
00827         netif_stop_queue(netdev);
00828 
00829         ath5k_stop(sc);
00830         
00831         free_irq(sc->pdev->irq, sc->netdev);
00832 
00833         return 0;
00834 }
00835 
00836 /***********************\
00837 * Driver Initialization *
00838 \***********************/
00839 
00840 static int
00841 ath5k_attach(struct ath5k_softc *sc)
00842 {
00843         struct pci_dev *pdev = sc->pdev;
00844         struct ath5k_hw *ah = sc->ah;
00845         int ret;
00846 
00847         ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "devid 0x%x\n", pdev->device);
00848 
00849         /*
00850          * Check if the MAC has multi-rate retry support.
00851          * We do this by trying to setup a fake extended
00852          * descriptor.  MAC's that don't have support will
00853          * return false w/o doing anything.  MAC's that do
00854          * support it will return true w/o doing anything.
00855          */
00856         ret = ah->ah_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0);
00857         if (ret < 0)
00858                 goto err;
00859         if (ret > 0)
00860                 __set_bit(ATH_STAT_MRRETRY, sc->status);
00861 
00862         /*
00863          * Collect the channel list.  The 802.11 layer
00864          * is resposible for filtering this list based
00865          * on settings like the phy mode and regulatory
00866          * domain restrictions.
00867          */
00868         ret = ath5k_setup_bands(sc);
00869         if (ret) {
00870                 ATH5K_ERR(sc, "can't get channels\n");
00871                 goto err;
00872         }
00873 
00874         /* NB: setup here so ath5k_rate_update is happy */
00875         if (test_bit(AR5K_MODE_11A, ah->ah_modes))
00876                 sc->curband = &sc->sbands[IEEE80211_BAND_5GHZ];
00877         else
00878                 sc->curband = &sc->sbands[IEEE80211_BAND_2GHZ];
00879 
00880         /*
00881          * Allocate tx+rx descriptors and populate the lists.
00882          */
00883         ret = ath5k_desc_alloc(sc, pdev);
00884         if (ret) {
00885                 ATH5K_ERR(sc, "can't allocate descriptors\n");
00886                 goto err;
00887         }
00888 
00889         /*
00890          * Allocate hardware transmit queue. Note that hw functions 
00891          * handle reseting these queues at the needed time.
00892          */
00893         sc->txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK);
00894         if (IS_ERR(sc->txq)) {
00895                 ATH5K_ERR(sc, "can't setup xmit queue\n");
00896                 ret = PTR_ERR(sc->txq);
00897                 goto err_desc;
00898         }
00899 
00900         tasklet_init(&sc->restq, ath5k_tasklet_reset, (unsigned long)sc);
00901 //      tasklet_init(&sc->calib, ath5k_tasklet_calibrate, (unsigned long)sc);
00902 
00903         /* All MAC address bits matter for ACKs */
00904         memset(sc->bssidmask, 0xff, ETH_ALEN);
00905         ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask);
00906 
00907         return 0;
00908 err_desc:
00909         ath5k_desc_free(sc, pdev);
00910 err:
00911         return ret;
00912 }
00913 
00914 static void
00915 ath5k_detach(struct pci_dev *pdev, struct ath5k_softc *sc)
00916 {
00917         /*
00918          * NB: the order of these is important:
00919          * o call the 802.11 layer before detaching ath5k_hw to
00920          *   insure callbacks into the driver to delete global
00921          *   key cache entries can be handled
00922          * o reclaim the tx queue data structures after calling
00923          *   the 802.11 layer as we'll get called back to reclaim
00924          *   node state and potentially want to use them
00925          * o to cleanup the tx queues the hal is called, so detach
00926          *   it last
00927          * XXX: ??? detach ath5k_hw ???
00928          * Other than that, it's straightforward...
00929          */
00930         ath5k_desc_free(sc, pdev);
00931         ath5k_txq_release(sc);
00932 }
00933 
00934 
00935 
00936 
00937 /********************\
00938 * Channel/mode setup *
00939 \********************/
00940 /* Ugly macro to convert literal channel numbers into their mhz equivalents
00941  * There are certianly some conditions that will break this (like feeding it '30')
00942  * but they shouldn't arise since nothing talks on channel 30. */
00943 #define ieee80211chan2mhz(x) \
00944         (((x) <= 14) ? \
00945         (((x) == 14) ? 2484 : ((x) * 5) + 2407) : \
00946         ((x) + 1000) * 5)
00947 
00948 /*
00949  * Convert IEEE channel number to MHz frequency.
00950  */
00951 static inline short
00952 ath5k_ieee2mhz(short chan)
00953 {
00954         if (chan <= 14 || chan >= 27)
00955                 return ieee80211chan2mhz(chan);
00956         else
00957                 return 2212 + chan * 20;
00958 }
00959 
00960 /*
00961  * Returns true for the channel numbers used without all_channels modparam.
00962  */
00963 static bool ath5k_is_standard_channel(short chan)
00964 {
00965         return ((chan <= 14) ||
00966                 /* UNII 1,2 */
00967                 ((chan & 3) == 0 && chan >= 36 && chan <= 64) ||
00968                 /* midband */
00969                 ((chan & 3) == 0 && chan >= 100 && chan <= 140) ||
00970                 /* UNII-3 */
00971                 ((chan & 3) == 1 && chan >= 149 && chan <= 165));
00972 }
00973 
00974 static unsigned int
00975 ath5k_copy_channels(struct ath5k_hw *ah,
00976                 struct ieee80211_channel *channels,
00977                 unsigned int mode,
00978                 unsigned int max)
00979 {
00980         unsigned int i, count, size, chfreq, freq, ch;
00981 
00982         if (!test_bit(mode, ah->ah_modes))
00983                 return 0;
00984 
00985         switch (mode) {
00986         case AR5K_MODE_11A:
00987         case AR5K_MODE_11A_TURBO:
00988                 /* 1..220, but 2GHz frequencies are filtered by check_channel */
00989                 size = 220 ;
00990                 chfreq = CHANNEL_5GHZ;
00991                 break;
00992         case AR5K_MODE_11B:
00993         case AR5K_MODE_11G:
00994         case AR5K_MODE_11G_TURBO:
00995                 size = 26;
00996                 chfreq = CHANNEL_2GHZ;
00997                 break;
00998         default:
00999                 ATH5K_WARN(ah->ah_sc, "bad mode, not copying channels\n");
01000                 return 0;
01001         }
01002 
01003         for (i = 0, count = 0; i < size && max > 0; i++) {
01004                 ch = i + 1;
01005                 freq = ath5k_ieee2mhz(ch);
01006 
01007                 /* Check if channel is supported by the chipset */
01008                 if (!ath5k_channel_ok(ah, freq, chfreq))
01009                         continue;
01010 
01011                 if (!modparam_all_channels && !ath5k_is_standard_channel(ch))
01012                         continue;
01013 
01014                 /* Write channel info and increment counter */
01015                 channels[count].center_freq = freq;
01016                 channels[count].band = (chfreq == CHANNEL_2GHZ) ?
01017                         IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
01018                 switch (mode) {
01019                 case AR5K_MODE_11A:
01020                 case AR5K_MODE_11G:
01021                         channels[count].hw_value = chfreq | CHANNEL_OFDM;
01022                         break;
01023                 case AR5K_MODE_11A_TURBO:
01024                 case AR5K_MODE_11G_TURBO:
01025                         channels[count].hw_value = chfreq |
01026                                 CHANNEL_OFDM | CHANNEL_TURBO;
01027                         break;
01028                 case AR5K_MODE_11B:
01029                         channels[count].hw_value = CHANNEL_B;
01030                 }
01031 
01032                 count++;
01033                 max--;
01034         }
01035 
01036         return count;
01037 }
01038 
01039 static int
01040 ath5k_setup_bands(struct ath5k_softc *sc)
01041 {
01042         struct ath5k_hw *ah = sc->ah;
01043         struct ieee80211_supported_band *sband;
01044         int max_c, count_c = 0;
01045         int i;
01046 
01047         BUILD_BUG_ON(ARRAY_SIZE(sc->sbands) < IEEE80211_NUM_BANDS);
01048         max_c = ARRAY_SIZE(sc->channels);
01049 
01050         /* 2GHz band */
01051         sband = &sc->sbands[IEEE80211_BAND_2GHZ];
01052         sband->band = IEEE80211_BAND_2GHZ;
01053         sband->bitrates = &sc->rates[IEEE80211_BAND_2GHZ][0];
01054 
01055         if (test_bit(AR5K_MODE_11G, sc->ah->ah_capabilities.cap_mode)) {
01056                 /* G mode */
01057                 memcpy(sband->bitrates, &ath5k_rates[0],
01058                        sizeof(struct ieee80211_rate) * 12);
01059                 sband->n_bitrates = 12;
01060 
01061                 sband->channels = sc->channels;
01062                 sband->n_channels = ath5k_copy_channels(ah, sband->channels,
01063                                         AR5K_MODE_11G, max_c);
01064 
01065                 count_c = sband->n_channels;
01066                 max_c -= count_c;
01067         } else if (test_bit(AR5K_MODE_11B, sc->ah->ah_capabilities.cap_mode)) {
01068                 /* B mode */
01069                 memcpy(sband->bitrates, &ath5k_rates[0],
01070                        sizeof(struct ieee80211_rate) * 4);
01071                 sband->n_bitrates = 4;
01072 
01073                 /* 5211 only supports B rates and uses 4bit rate codes
01074                  * (e.g normally we have 0x1B for 1M, but on 5211 we have 0x0B)
01075                  * fix them up here:
01076                  */
01077                 if (ah->ah_version == AR5K_AR5211) {
01078                         for (i = 0; i < 4; i++) {
01079                                 sband->bitrates[i].hw_value =
01080                                         sband->bitrates[i].hw_value & 0xF;
01081                                 sband->bitrates[i].hw_value_short =
01082                                         sband->bitrates[i].hw_value_short & 0xF;
01083                         }
01084                 }
01085 
01086                 sband->channels = sc->channels;
01087                 sband->n_channels = ath5k_copy_channels(ah, sband->channels,
01088                                         AR5K_MODE_11B, max_c);
01089 
01090                 count_c = sband->n_channels;
01091                 max_c -= count_c;
01092         }
01093 
01094         /* 5GHz band, A mode */
01095         if (test_bit(AR5K_MODE_11A, sc->ah->ah_capabilities.cap_mode)) {
01096                 sband = &sc->sbands[IEEE80211_BAND_5GHZ];
01097                 sband->band = IEEE80211_BAND_5GHZ;
01098                 sband->bitrates = &sc->rates[IEEE80211_BAND_5GHZ][0];
01099 
01100                 memcpy(sband->bitrates, &ath5k_rates[4],
01101                        sizeof(struct ieee80211_rate) * 8);
01102                 sband->n_bitrates = 8;
01103 
01104                 sband->channels = &sc->channels[count_c];
01105                 sband->n_channels = ath5k_copy_channels(ah, sband->channels,
01106                                         AR5K_MODE_11A, max_c);
01107 
01108         }
01109 
01110         ath5k_debug_dump_bands(sc);
01111 
01112         return 0;
01113 }
01114 
01115 static void
01116 ath5k_mode_setup(struct ath5k_softc *sc)
01117 {
01118         struct ath5k_hw *ah = sc->ah;
01119         u32 rfilt;
01120 
01121         /* configure rx filter */
01122         rfilt = sc->filter_flags;
01123         ath5k_hw_set_rx_filter(ah, rfilt);
01124 
01125         if (ath5k_hw_hasbssidmask(ah))
01126                 ath5k_hw_set_bssid_mask(ah, sc->bssidmask);
01127 
01128         /* configure operational mode */
01129         ath5k_hw_set_opmode(ah);
01130 
01131         ath5k_hw_set_mcast_filter(ah, 0, 0);
01132         ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
01133 }
01134 
01135 /***************\
01136 * Buffers setup *
01137 \***************/
01138 
01139 static
01140 struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_softc *sc, dma_addr_t *skb_addr)
01141 {
01142         struct sk_buff *skb;
01143         unsigned int off;
01144 
01145         /*
01146          * Allocate buffer with headroom_needed space for the
01147          * fake physical layer header at the start.
01148          */
01149         skb = dev_alloc_skb(sc->rxbufsize + sc->cachelsz - 1);
01150 
01151         if (!skb) {
01152                 ATH5K_ERR(sc, "can't alloc skbuff of size %u\n",
01153                                 sc->rxbufsize + sc->cachelsz - 1);
01154                 return NULL;
01155         }
01156         /*
01157          * Cache-line-align.  This is important (for the
01158          * 5210 at least) as not doing so causes bogus data
01159          * in rx'd frames.
01160          */
01161         off = ((unsigned long)skb->data) % sc->cachelsz;
01162         if (off != 0)
01163                 skb_reserve(skb, sc->cachelsz - off);
01164 
01165         *skb_addr = pci_map_single(sc->pdev,
01166                 skb->data, sc->rxbufsize, PCI_DMA_FROMDEVICE);
01167 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
01168         if (unlikely(pci_dma_mapping_error(sc->pdev, *skb_addr))) {
01169 #else
01170         if (unlikely(pci_dma_mapping_error(*skb_addr))) {
01171 #endif
01172                 ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__);
01173                 dev_kfree_skb(skb);
01174                 return NULL;
01175         }
01176         return skb;
01177 }
01178 
01179 static int
01180 ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
01181 {
01182         struct ath5k_hw *ah = sc->ah;
01183         struct sk_buff *skb = bf->skb;
01184         struct ath5k_desc *ds;
01185 
01186         if (!skb) {
01187                 skb = ath5k_rx_skb_alloc(sc, &bf->skbaddr);
01188                 if (!skb)
01189                         return -ENOMEM;
01190                 bf->skb = skb;
01191         }
01192 
01193         /*
01194          * Setup descriptors.  For receive we always terminate
01195          * the descriptor list with a self-linked entry so we'll
01196          * not get overrun under high load (as can happen with a
01197          * 5212 when ANI processing enables PHY error frames).
01198          *
01199          * To insure the last descriptor is self-linked we create
01200          * each descriptor as self-linked and add it to the end.  As
01201          * each additional descriptor is added the previous self-linked
01202          * entry is ``fixed'' naturally.  This should be safe even
01203          * if DMA is happening.  When processing RX interrupts we
01204          * never remove/process the last, self-linked, entry on the
01205          * descriptor list.  This insures the hardware always has
01206          * someplace to write a new frame.
01207          */
01208         ds = bf->desc;
01209         ds->ds_link = bf->daddr;        /* link to self */
01210         ds->ds_data = bf->skbaddr;
01211         ah->ah_setup_rx_desc(ah, ds,
01212                 skb_tailroom(skb),      /* buffer size */
01213                 0);
01214 
01215         if (sc->rxlink != NULL)
01216                 *sc->rxlink = bf->daddr;
01217         sc->rxlink = &ds->ds_link;
01218         return 0;
01219 }
01220 
01221 /* Contexto: UC
01222  *
01223  * Puede ocurrir:
01224  * - Lo peor, que nos saque una hardIRQ de la cpu -> spin_lock_irq
01225  */
01226 static int
01227 ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
01228 {
01229         struct ath5k_hw *ah = sc->ah;
01230         struct ath5k_txq *txq = sc->txq;
01231         struct ath5k_desc *ds = bf->desc;
01232         struct sk_buff *skb = bf->skb;
01233         struct ieee80211_tx_info *info = &sc->tx_info;
01234         unsigned int pktlen, flags, keyidx = AR5K_TXKEYIX_INVALID;
01235         struct ieee80211_rate *rate;
01236         int ret;
01237         u16 hw_rate;
01238         u16 cts_rate = 0;
01239         u16 duration = 0;
01240 
01241         flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK;
01242 
01243         /* XXX endianness */
01244         bf->skbaddr = pci_map_single(sc->pdev, skb->data, skb->len,
01245                         PCI_DMA_TODEVICE);
01246         
01247         if (info->rate_idx >= 0 && info->rate_idx < AR5K_MAX_RATES)
01248                 rate = &sc->sbands[info->band].bitrates[info->rate_idx];
01249         else
01250         {
01251                 ATH5K_ERR(sc, "Rate index invalid\n");
01252                 return -1;
01253         }
01254 
01255         if (!info->wait_for_ack)
01256                 flags |= AR5K_TXDESC_NOACK;
01257 
01258         hw_rate = info->use_short_preamble ? rate->hw_value_short : rate->hw_value;
01259 
01260         pktlen = skb->len;
01261 
01262         /* FIXME: If we are in g mode and rate is a CCK rate
01263          * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
01264          * from tx power (value is in dB units already) */
01265         ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
01266                 ath5k_get_hdrlen_from_skb(skb), AR5K_PKT_TYPE_NORMAL,
01267                 (sc->power_level * 2),
01268                 hw_rate,
01269                 info->count, keyidx, ah->ah_tx_ant, flags,
01270                 cts_rate, duration);
01271         if (ret)
01272                 goto err_unmap;
01273 
01274         ds->ds_link = 0;
01275         ds->ds_data = bf->skbaddr;
01276 
01277         spin_lock_irq(&txq->lock);
01278         list_add_tail(&bf->list, &txq->q);
01279         if (txq->link == NULL) /* is this first packet? */
01280                 ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr);
01281         else /* no, so only link it */
01282                 *txq->link = bf->daddr;
01283 
01284         txq->link = &ds->ds_link;
01285         ath5k_hw_start_tx_dma(ah, txq->qnum);
01286         mmiowb();
01287         spin_unlock_irq(&txq->lock);
01288 
01289         return 0;
01290 err_unmap:
01291         pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, PCI_DMA_TODEVICE);
01292         return ret;
01293 }
01294 
01295 /*******************\
01296 * Descriptors setup *
01297 \*******************/
01298 
01299 static int
01300 ath5k_desc_alloc(struct ath5k_softc *sc, struct pci_dev *pdev)
01301 {
01302         struct ath5k_desc *ds;
01303         struct ath5k_buf *bf;
01304         dma_addr_t da;
01305         unsigned int i;
01306         int ret;
01307 
01308         /* allocate descriptors */
01309         sc->desc_len = sizeof(struct ath5k_desc) *
01310                         (ATH_TXBUF + ATH_RXBUF + 1);
01311         sc->desc = pci_alloc_consistent(pdev, sc->desc_len, &sc->desc_daddr);
01312         if (sc->desc == NULL) {
01313                 ATH5K_ERR(sc, "can't allocate descriptors\n");
01314                 ret = -ENOMEM;
01315                 goto err;
01316         }
01317         ds = sc->desc;
01318         da = sc->desc_daddr;
01319         ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "DMA map: %p (%zu) -> %llx\n",
01320                 ds, sc->desc_len, (unsigned long long)sc->desc_daddr);
01321 
01322         bf = kcalloc(1 + ATH_TXBUF + ATH_RXBUF,
01323                         sizeof(struct ath5k_buf), GFP_KERNEL);
01324         if (bf == NULL) {
01325                 ATH5K_ERR(sc, "can't allocate bufptr\n");
01326                 ret = -ENOMEM;
01327                 goto err_free;
01328         }
01329         sc->bufptr = bf;
01330 
01331         INIT_LIST_HEAD(&sc->rxbuf);
01332         for (i = 0; i < ATH_RXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
01333                 bf->desc = ds;
01334                 bf->daddr = da;
01335                 list_add_tail(&bf->list, &sc->rxbuf);
01336         }
01337 
01338         INIT_LIST_HEAD(&sc->txbuf);
01339         sc->txbuf_len = ATH_TXBUF;
01340         for (i = 0; i < ATH_TXBUF; i++, bf++, ds++,
01341                         da += sizeof(*ds)) {
01342                 bf->desc = ds;
01343                 bf->daddr = da;
01344                 list_add_tail(&bf->list, &sc->txbuf);
01345         }
01346 
01347         return 0;
01348 err_free:
01349         pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr);
01350 err:
01351         sc->desc = NULL;
01352         return ret;
01353 }
01354 
01355 static void
01356 ath5k_desc_free(struct ath5k_softc *sc, struct pci_dev *pdev)
01357 {
01358         struct ath5k_buf *bf;
01359 
01360         list_for_each_entry(bf, &sc->txbuf, list)
01361                 ath5k_txbuf_free(sc, bf);
01362         list_for_each_entry(bf, &sc->rxbuf, list)
01363                 ath5k_rxbuf_free(sc, bf);
01364 
01365         /* Free memory associated with all descriptors */
01366         pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr);
01367 
01368         kfree(sc->bufptr);
01369         sc->bufptr = NULL;
01370 }
01371 
01372 
01373 
01374 
01375 
01376 /**************\
01377 * Queues setup *
01378 \**************/
01379 
01380 static struct ath5k_txq *
01381 ath5k_txq_setup(struct ath5k_softc *sc,
01382                 int qtype, int subtype)
01383 {
01384         struct ath5k_hw *ah = sc->ah;
01385         struct ath5k_txq *txq;
01386         struct ath5k_txq_info qi = {
01387                 .tqi_subtype = subtype,
01388                 .tqi_aifs = AR5K_TXQ_USEDEFAULT,
01389                 .tqi_cw_min = AR5K_TXQ_USEDEFAULT,
01390                 .tqi_cw_max = AR5K_TXQ_USEDEFAULT
01391         };
01392         int qnum;
01393 
01394         /*
01395          * Enable interrupts only for EOL and DESC conditions.
01396          * We mark tx descriptors to receive a DESC interrupt
01397          * when a tx queue gets deep; otherwise waiting for the
01398          * EOL to reap descriptors.  Note that this is done to
01399          * reduce interrupt load and this only defers reaping
01400          * descriptors, never transmitting frames.  Aside from
01401          * reducing interrupts this also permits more concurrency.
01402          * The only potential downside is if the tx queue backs
01403          * up in which case the top half of the kernel may backup
01404          * due to a lack of tx descriptors.
01405          */
01406         qi.tqi_flags = AR5K_TXQ_FLAG_TXEOLINT_ENABLE |
01407                                 AR5K_TXQ_FLAG_TXDESCINT_ENABLE;
01408         qnum = ath5k_hw_setup_tx_queue(ah, qtype, &qi);
01409         if (qnum < 0) {
01410                 /*
01411                  * NB: don't print a message, this happens
01412                  * normally on parts with too few tx queues
01413                  */
01414                 return ERR_PTR(qnum);
01415         }
01416         if (qnum >= ARRAY_SIZE(sc->txqs)) {
01417                 ATH5K_ERR(sc, "hw qnum %u out of range, max %tu!\n",
01418                         qnum, ARRAY_SIZE(sc->txqs));
01419                 ath5k_hw_release_tx_queue(ah, qnum);
01420                 return ERR_PTR(-EINVAL);
01421         }
01422         txq = &sc->txqs[qnum];
01423         if (!txq->setup) {
01424                 txq->qnum = qnum;
01425                 txq->link = NULL;
01426                 INIT_LIST_HEAD(&txq->q);
01427                 spin_lock_init(&txq->lock);
01428                 txq->setup = true;
01429         }
01430         return &sc->txqs[qnum];
01431 }
01432 
01433 /* Contexto UC y softIRQ
01434  *
01435  * Puede ocurrir:
01436  * - Nos interrumpa una hardIRQ -> spin_lock_irq
01437  * - Nos expulsen de la cpu -> spin_lock
01438  * - Una hardIRQ, softIRQ o UC esta en ejecucion en otra CPU -> spin_lock
01439  */
01440 static void
01441 ath5k_txq_drainq(struct ath5k_softc *sc, struct ath5k_txq *txq)
01442 {
01443         struct ath5k_buf *bf, *bf0;
01444 
01445         /*
01446          * NB: this assumes output has been stopped and
01447          *     we do not need to block ath5k_tx_tasklet
01448          */
01449         spin_lock_irq(&txq->lock);
01450         list_for_each_entry_safe(bf, bf0, &txq->q, list) {
01451                 ath5k_debug_printtxbuf(sc, bf);
01452 
01453                 ath5k_txbuf_free(sc, bf);
01454 
01455                 spin_lock_bh(&sc->txbuflock);
01456                 list_move_tail(&bf->list, &sc->txbuf);
01457                 sc->txbuf_len++;
01458                 spin_unlock_irq(&sc->txbuflock);
01459         }
01460         txq->link = NULL;
01461         spin_unlock_irq(&txq->lock);
01462 }
01463 
01464 /*
01465  * Drain the transmit queues and reclaim resources.
01466  */
01467 static void
01468 ath5k_txq_cleanup(struct ath5k_softc *sc)
01469 {
01470         struct ath5k_hw *ah = sc->ah;
01471         unsigned int i;
01472 
01473         /* XXX return value */
01474         if (likely(!test_bit(ATH_STAT_INVALID, sc->status))) {
01475                 /* don't touch the hardware if marked invalid */
01476                 for (i = 0; i < ARRAY_SIZE(sc->txqs); i++)
01477                         if (sc->txqs[i].setup) {
01478                                 ath5k_hw_stop_tx_dma(ah, sc->txqs[i].qnum);
01479                                 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "txq [%u] %x, "
01480                                         "link %p\n",
01481                                         sc->txqs[i].qnum,
01482                                         ath5k_hw_get_txdp(ah,
01483                                                         sc->txqs[i].qnum),
01484                                         sc->txqs[i].link);
01485                         }
01486         }
01487         netif_wake_queue(sc->netdev);
01488 
01489         for (i = 0; i < ARRAY_SIZE(sc->txqs); i++)
01490                 if (sc->txqs[i].setup)
01491                         ath5k_txq_drainq(sc, &sc->txqs[i]);
01492 }
01493 
01494 static void
01495 ath5k_txq_release(struct ath5k_softc *sc)
01496 {
01497         struct ath5k_txq *txq = sc->txqs;
01498         unsigned int i;
01499 
01500         for (i = 0; i < ARRAY_SIZE(sc->txqs); i++, txq++)
01501                 if (txq->setup) {
01502                         ath5k_hw_release_tx_queue(sc->ah, txq->qnum);
01503                         txq->setup = false;
01504                 }
01505 }
01506 
01507 
01508 
01509 
01510 /*************\
01511 * RX Handling *
01512 \*************/
01513 
01514 /*
01515  * Enable the receive h/w following a reset.
01516  *
01517  * Contexto SoftIRQ y UC.
01518  *
01519  * Mientras ejecuto en una CPU puede ocurrir:
01520  *  - Ser interrumpido por una hardIRQ si estoy en SoftIRQ o UC (bloquear irq)
01521  *  - Otra CPU ejecuta softIRQ si estoy en UC (spinlock)
01522  *  - Otra CPU ejecutando UC si estoy en UC (spinlock)
01523  */
01524 static int
01525 ath5k_rx_start(struct ath5k_softc *sc)
01526 {
01527         struct ath5k_hw *ah = sc->ah;
01528         struct ath5k_buf *bf;
01529         int ret;
01530 
01531         sc->rxbufsize = roundup(IEEE80211_MAX_LEN, sc->cachelsz);
01532 
01533         ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "cachelsz %u rxbufsize %u\n",
01534                 sc->cachelsz, sc->rxbufsize);
01535 
01536         spin_lock_irq(&sc->rxbuflock);
01537         sc->rxlink = NULL;
01538         list_for_each_entry(bf, &sc->rxbuf, list) {
01539                 ret = ath5k_rxbuf_setup(sc, bf);
01540                 if (ret != 0) {
01541                         spin_unlock_irq(&sc->rxbuflock);
01542                         goto err;
01543                 }
01544         }
01545         bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list);
01546         ath5k_hw_set_rxdp(ah, bf->daddr);
01547         spin_unlock_irq(&sc->rxbuflock);
01548 
01549         ath5k_hw_start_rx_dma(ah);      /* enable recv descriptors */
01550         ath5k_mode_setup(sc);           /* set filters, etc. */
01551         ath5k_hw_start_rx_pcu(ah);      /* re-enable PCU/DMA engine */
01552 
01553         return 0;
01554 err:
01555         return ret;
01556 }
01557 
01558 /*
01559  * Disable the receive h/w in preparation for a reset.
01560  */
01561 static void
01562 ath5k_rx_stop(struct ath5k_softc *sc)
01563 {
01564         struct ath5k_hw *ah = sc->ah;
01565 
01566         ath5k_hw_stop_rx_pcu(ah);       /* disable PCU */
01567         ath5k_hw_set_rx_filter(ah, 0);  /* clear recv filter */
01568         ath5k_hw_stop_rx_dma(ah);       /* disable DMA engine */
01569 
01570         ath5k_debug_printrxbuffs(sc, ah);
01571 
01572         sc->rxlink = NULL;              /* just in case */
01573 }
01574 
01575 /* 
01576  * Contexto hardIRQ.
01577  *
01578  * Mientras ejecuto en una CPU puede ocurrir:
01579  *
01580  *  - El kernel garantiza que una ISR no se ejecuta simultaneamente
01581  *    en varios uP, y un tasklet no puede interrumpir a una ISR.
01582  *  - Que un tasklet o UC se este ejecutando en otra CPU, luego hay que proteger
01583  *    con un spin_lock para esperar que termine la tarea en el otro uP.
01584  *
01585  * Ademas, hay que tener en cuenta que esta funcion reserva memoria con una
01586  * llamada a dev_alloc_skb, pero esta permitido ya que se reserva con
01587  * GFP_ATOMIC y el kernel garantiza que no duerme.
01588  */
01589 static void
01590 ath5k_rx_done(struct ath5k_softc *sc)
01591 {
01592         struct ieee80211_rx_status rxs = {};
01593         struct ath5k_rx_status rs = {};
01594         struct sk_buff *skb, *next_skb;
01595         dma_addr_t next_skb_addr;
01596         struct ath5k_buf *bf;
01597         struct ath5k_desc *ds;
01598         int ret;
01599         int hdrlen;
01600         int padsize;
01601 
01602         unsigned char dst[ETH_ALEN];
01603         unsigned char src[ETH_ALEN];
01604         u16 ethertype, frame_control;
01605         struct ieee80211_hdr *hdr;
01606         unsigned char *payload;
01607 
01608         spin_lock(&sc->rxbuflock);
01609         if (list_empty(&sc->rxbuf)) {
01610                 ATH5K_WARN(sc, "empty rx buf pool\n");
01611                 goto unlock;
01612         }
01613         do {
01614                 rxs.flag = 0;
01615 
01616                 bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list);
01617                 BUG_ON(bf->skb == NULL);
01618                 skb = bf->skb;
01619                 ds = bf->desc;
01620 
01621                 /* bail if HW is still using self-linked descriptor */
01622                 if (ath5k_hw_get_rxdp(sc->ah) == bf->daddr)
01623                         break;
01624 
01625                 ret = sc->ah->ah_proc_rx_desc(sc->ah, ds, &rs);
01626 
01627                 if (unlikely(ret == -EINPROGRESS))
01628                         break;
01629                 else if (unlikely(ret)) {
01630                         ATH5K_ERR(sc, "error in processing rx descriptor\n");
01631                         goto unlock;
01632                 }
01633 
01634                 if (unlikely(rs.rs_more)) {
01635                         ATH5K_WARN(sc, "unsupported jumbo\n");
01636                         goto next;
01637                 }
01638 
01639                 if (unlikely(rs.rs_status)) {
01640                         if (rs.rs_status & AR5K_RXERR_PHY)
01641                                 //ATH5K_ERR(sc, "PHY ERR");
01642                                 goto next;
01643                         if (rs.rs_status & AR5K_RXERR_CRC)
01644                                 //ATH5K_ERR(sc, "CRC ERR");
01645                                 goto next;
01646                         if (rs.rs_status & AR5K_RXERR_DECRYPT)
01647                                 //ATH5K_ERR(sc, "DEC ERR");
01648                                 goto next;
01649                         if (rs.rs_status & AR5K_RXERR_MIC) {
01650                                 //ATH5K_ERR(sc, "MIC");
01651                                 goto accept;
01652                         }
01653                 }
01654 accept:
01655                 next_skb = ath5k_rx_skb_alloc(sc, &next_skb_addr);
01656 
01657                 /*
01658                  * If we can't replace bf->skb with a new skb under memory
01659                  * pressure, just skip this packet
01660                  */
01661                 if (!next_skb)
01662                         goto next;
01663 
01664                 pci_unmap_single(sc->pdev, bf->skbaddr, sc->rxbufsize,
01665                                 PCI_DMA_FROMDEVICE);
01666                 skb_put(skb, rs.rs_datalen);
01667 
01668                 /* The MAC header is padded to have 32-bit boundary if the
01669                  * packet payload is non-zero. The general calculation for
01670                  * padsize would take into account odd header lengths:
01671                  * padsize = (4 - hdrlen % 4) % 4; However, since only
01672                  * even-length headers are used, padding can only be 0 or 2
01673                  * bytes and we can optimize this a bit. In addition, we must
01674                  * not try to remove padding from short control frames that do
01675                  * not have payload. */
01676                 hdrlen = ath5k_get_hdrlen_from_skb(skb);
01677                 padsize = ath5k_pad_size(hdrlen);
01678                 if (padsize) {
01679                         memmove(skb->data + padsize, skb->data, hdrlen);
01680                         skb_pull(skb, padsize);
01681                 }
01682 
01683                 rxs.freq = sc->curchan->center_freq;
01684                 rxs.band = sc->curband->band;
01685 
01686                 rxs.noise = sc->ah->ah_noise_floor;
01687                 rxs.signal = rxs.noise + rs.rs_rssi;
01688 
01689                 /* An rssi of 35 indicates you should be able use
01690                  * 54 Mbps reliably. A more elaborate scheme can be used
01691                  * here but it requires a map of SNR/throughput for each
01692                  * possible mode used */
01693                 rxs.qual = rs.rs_rssi * 100 / 35;
01694 
01695                 //unsigned char c = rs.rs_rssi;
01696                 //ATH5K_ERR(sc, "ret:%d, DANI->RSSI: %d %u %d, %d\n",ret,c,c,rxs.qual, rxs.noise);
01697 
01698                 /* rssi can be more than 35 though, anything above that
01699                  * should be considered at 100% */
01700                 if (rxs.qual > 100)
01701                         rxs.qual = 100;
01702 
01703                 rxs.antenna = rs.rs_antenna;
01704 
01705                 ath5k_debug_dump_skb(sc, skb, "RX  ", 0);
01706 
01707                 /* skb contains the frame in IEEE 802.11 format, as it was sent over air */
01708                 
01709                 hdr = (struct ieee80211_hdr *)skb->data;
01710                 frame_control = le16_to_cpu(hdr->frame_control);
01711 
01712                 /* If no payload jump to next frame */
01713                 if (skb->len < 24)
01714                         goto next;
01715 
01716                 /* Extract src/dst addresses */
01717                 switch (frame_control & (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) {
01718                         case IEEE80211_FCTL_FROMDS:
01719                                 memcpy(dst, hdr->addr1, ETH_ALEN);
01720                                 memcpy(src, hdr->addr3, ETH_ALEN);
01721                                 break;
01722                         case IEEE80211_FCTL_TODS:
01723                                 memcpy(dst, hdr->addr3, ETH_ALEN);
01724                                 memcpy(src, hdr->addr2, ETH_ALEN);
01725                                 break;
01726                         case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS:
01727                                 if (skb->len < IEEE80211_4ADDR_LEN)
01728                                         goto next;
01729                                 memcpy(dst, hdr->addr3, ETH_ALEN);
01730                                 memcpy(src, hdr->addr4, ETH_ALEN);
01731                                 break;
01732                         case 0:
01733                                 memcpy(dst, hdr->addr1, ETH_ALEN);
01734                                 memcpy(src, hdr->addr2, ETH_ALEN);
01735                                 break;
01736                 }
01737        
01738                 payload = skb->data + hdrlen;
01739                 ethertype = (payload[6] << 8) | payload[7];
01740 
01741                 /* convert hdr + possible LLC headers into Ethernet header */
01742                 if (skb->len - hdrlen >= 8)
01743                 {
01744                         /* remove RFC1042 or Bridge-Tunnel encapsulation and
01745                          * replace EtherType */
01746                         skb_pull(skb, hdrlen + SNAP_SIZE);
01747                         memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN);
01748                         memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN);
01749                 } else {
01750                         goto next;
01751                 }
01752 
01753 
01754                 /* Set skb fields and send to linux network layer */
01755                 if (skb) 
01756                 {
01757                         skb->dev = sc->netdev;
01758                         skb->protocol = eth_type_trans(skb, sc->netdev);
01759                         skb->ip_summed = CHECKSUM_NONE; /* 802.11 crc not sufficient */
01760 
01761 
01762          /* RUBEN */
01763          /* If 'skb' is a RT-WMP packet, it is enqueued in the reception queue
01764           * if not, it is sent to the upper layer */
01765          if (skb->protocol == 0x6969 || skb->protocol == 0x6970) {
01766             /* XXX: RT-WMP Extension */
01767             //skb->data[14] = (char)rs.rs_rssi;
01768             //skb->data[15] = (char)rxs.noise;
01769             skb->data[0] = (char)rs.rs_rssi;
01770             skb->data[1] = (char)rxs.noise;
01771 
01772             /* 'skb' is enqueued */
01773             enqueue_rx_queue(&rx_rtwmp_queue, skb);
01774          }
01775          else {
01776             /* Not a RT-WMP packet, so 'skb' is sent to the upper layer */
01777             netif_rx(skb);
01778          }
01779                 }
01780 
01781                 bf->skb = next_skb;
01782                 bf->skbaddr = next_skb_addr;
01783 next:
01784                 list_move_tail(&bf->list, &sc->rxbuf);
01785         } while (ath5k_rxbuf_setup(sc, bf) == 0);
01786 unlock:
01787         spin_unlock(&sc->rxbuflock);
01788 }
01789 
01790 
01791 
01792 
01793 /*************\
01794 * TX Handling *
01795 \*************/
01796 
01797 /*
01798  * Contexto HardIRQ
01799  *
01800  * Mientras se ejecuta puede ocurrir:
01801  * 
01802  * - El kernel garantiza que no se ejecutara la rutina de interrupcion en otras
01803  *   CPU.
01804  * - Puede estar ejecutandose un tasklet o UC en otra CPU, hay que bloquear con
01805  *   un spin_lock para esperar que terminen
01806  */  
01807 static void
01808 ath5k_tx_done(struct ath5k_softc *sc, struct ath5k_txq *txq)
01809 {
01810         struct ath5k_tx_status ts = {};
01811         struct ath5k_buf *bf, *bf0;
01812         struct ath5k_desc *ds;
01813         struct sk_buff *skb;
01814         int ret;
01815 
01816         spin_lock(&txq->lock);
01817         list_for_each_entry_safe(bf, bf0, &txq->q, list) {
01818                 ds = bf->desc;
01819 
01820                 ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts);
01821                 if (unlikely(ret == -EINPROGRESS))
01822                         break;
01823                 else if (unlikely(ret)) {
01824                         ATH5K_ERR(sc, "error %d while processing queue %u\n",
01825                                 ret, txq->qnum);
01826                         break;
01827                 }
01828 
01829                 skb = bf->skb;
01830                 dev_kfree_skb_any(skb);
01831                 bf->skb = NULL;
01832 
01833                 pci_unmap_single(sc->pdev, bf->skbaddr, skb->len,
01834                                 PCI_DMA_TODEVICE);
01835 
01836                 if (unlikely(ts.ts_status)) 
01837                         sc->ll_stats.dot11ACKFailureCount++;
01838 
01839                 spin_lock(&sc->txbuflock);
01840                 list_move_tail(&bf->list, &sc->txbuf);
01841                 sc->txbuf_len++;
01842                 spin_unlock(&sc->txbuflock);
01843         }
01844         if (likely(list_empty(&txq->q)))
01845                 txq->link = NULL;
01846         spin_unlock(&txq->lock);
01847         if (sc->txbuf_len > ATH_TXBUF / 5)
01848                 netif_wake_queue(sc->netdev);
01849 }
01850 
01851 
01852 /********************\
01853 * Interrupt handling *
01854 \********************/
01855 
01856 static int
01857 ath5k_init(struct ath5k_softc *sc)
01858 {
01859         struct ath5k_hw *ah = sc->ah;
01860         int ret;
01861 
01862         mutex_lock(&sc->lock);
01863 
01864         /*
01865          * Stop anything previously setup.  This is safe
01866          * no matter this is the first time through or not.
01867          */
01868         ath5k_stop_locked(sc);
01869 
01870         /*
01871          * The basic interface to setting the hardware in a good
01872          * state is ``reset''.  On return the hardware is known to
01873          * be powered up and with interrupts disabled.  This must
01874          * be followed by initialization of the appropriate bits
01875          * and then setup of the interrupt mask.
01876          */
01877         sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
01878                 AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
01879                 AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_SWI;
01880         ret = ath5k_reset(sc, sc->curchan);
01881         if (ret)
01882                 goto done;
01883 
01884         /* Set ack to be sent at low bit-rates */
01885         ath5k_hw_set_ack_bitrate_high(ah, false);
01886 
01887         /* Set PHY calibration inteval */
01888         ah->ah_cal_intval = ath5k_calinterval;
01889 
01890         ret = 0;
01891 done:
01892         mmiowb();
01893         mutex_unlock(&sc->lock);
01894         return ret;
01895 }
01896 
01897 static int
01898 ath5k_stop_locked(struct ath5k_softc *sc)
01899 {
01900         struct ath5k_hw *ah = sc->ah;
01901 
01902         ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "invalid %u\n",
01903                         test_bit(ATH_STAT_INVALID, sc->status));
01904 
01905         /*
01906          * Shutdown the hardware and driver:
01907          *    stop output from above
01908          *    disable interrupts
01909          *    turn off timers
01910          *    turn off the radio
01911          *    clear transmit machinery
01912          *    clear receive machinery
01913          *    drain and release tx queues
01914          *    reclaim beacon resources
01915          *    power down hardware
01916          *
01917          * Note that some of this work is not possible if the
01918          * hardware is gone (invalid).
01919          */
01920         netif_stop_queue(sc->netdev);
01921 
01922         if (!test_bit(ATH_STAT_INVALID, sc->status)) {
01923                 ath5k_hw_set_imr(ah, 0);
01924                 synchronize_irq(sc->pdev->irq);
01925         }
01926         ath5k_txq_cleanup(sc);
01927         if (!test_bit(ATH_STAT_INVALID, sc->status)) {
01928                 ath5k_rx_stop(sc);
01929                 ath5k_hw_phy_disable(ah);
01930         } else
01931                 sc->rxlink = NULL;
01932 
01933         return 0;
01934 }
01935 
01936 /*
01937  * Stop the device, grabbing the top-level lock to protect
01938  * against concurrent entry through ath5k_init (which can happen
01939  * if another thread does a system call and the thread doing the
01940  * stop is preempted).
01941  */
01942 static int
01943 ath5k_stop_hw(struct ath5k_softc *sc)
01944 {
01945         int ret;
01946 
01947         mutex_lock(&sc->lock);
01948         ret = ath5k_stop_locked(sc);
01949         if (ret == 0 && !test_bit(ATH_STAT_INVALID, sc->status)) {
01950                 /*
01951                  * Don't set the card in full sleep mode!
01952                  *
01953                  * a) When the device is in this state it must be carefully
01954                  * woken up or references to registers in the PCI clock
01955                  * domain may freeze the bus (and system).  This varies
01956                  * by chip and is mostly an issue with newer parts
01957                  * (madwifi sources mentioned srev >= 0x78) that go to
01958                  * sleep more quickly.
01959                  *
01960                  * b) On older chips full sleep results a weird behaviour
01961                  * during wakeup. I tested various cards with srev < 0x78
01962                  * and they don't wake up after module reload, a second
01963                  * module reload is needed to bring the card up again.
01964                  *
01965                  * Until we figure out what's going on don't enable
01966                  * full chip reset on any chip (this is what Legacy HAL
01967                  * and Sam's HAL do anyway). Instead Perform a full reset
01968                  * on the device (same as initial state after attach) and
01969                  * leave it idle (keep MAC/BB on warm reset) */
01970                 ret = ath5k_hw_on_hold(sc->ah);
01971 
01972                 ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
01973                                 "putting device to sleep\n");
01974         }
01975 
01976         mmiowb();
01977         mutex_unlock(&sc->lock);
01978 
01979         tasklet_kill(&sc->restq);
01980 //      tasklet_kill(&sc->calib);
01981 
01982         return ret;
01983 }
01984 
01985 static irqreturn_t
01986 ath5k_intr(int irq, void *dev_id)
01987 {
01988         struct net_device *netdev = (struct net_device *)dev_id;
01989         struct ath5k_softc *sc = netdev_priv(netdev);
01990         struct ath5k_hw *ah = sc->ah;
01991         enum ath5k_int status;
01992         unsigned int counter = 1000;
01993 
01994         if (unlikely(test_bit(ATH_STAT_INVALID, sc->status) ||
01995                                 !ath5k_hw_is_intr_pending(ah)))
01996                 return IRQ_NONE;
01997 
01998         do {
01999                 ath5k_hw_get_isr(ah, &status);          /* NB: clears IRQ too */
02000                 ATH5K_DBG(sc, ATH5K_DEBUG_INTR, "status 0x%x/0x%x\n",
02001                                 status, sc->imask);
02002                 if (unlikely(status & AR5K_INT_FATAL)) {
02003                         /*
02004                          * Fatal errors are unrecoverable.
02005                          * Typically these are caused by DMA errors.
02006                          */
02007                         ATH5K_ERR(sc, "Fatal error\n");
02008                         tasklet_schedule(&sc->restq);
02009                 } else if (unlikely(status & AR5K_INT_RXORN)) {
02010                         ATH5K_ERR(sc, "Fatal error\n");
02011                         tasklet_schedule(&sc->restq);
02012                 } else {
02013                         if (status & AR5K_INT_RXEOL) {
02014                                 /*
02015                                 * NB: the hardware should re-read the link when
02016                                 *     RXE bit is written, but it doesn't work at
02017                                 *     least on older hardware revs.
02018                                 */
02019                                 sc->rxlink = NULL;
02020                         }
02021                         if (status & AR5K_INT_TXURN) {
02022                                 /* bump tx trigger level */
02023                                 ath5k_hw_update_tx_triglevel(ah, true);
02024                         }
02025                         if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR))
02026                                 ath5k_rx_done(sc);
02027                         if (status & (AR5K_INT_TXOK | AR5K_INT_TXDESC
02028                                         | AR5K_INT_TXERR | AR5K_INT_TXEOL))
02029                                 ath5k_tx_done(sc, sc->txq);
02030 //                      if (status & AR5K_INT_SWI)
02031 //                              tasklet_schedule(&sc->calib);
02032                         if (status & AR5K_INT_MIB) {
02033                                 /*
02034                                  * These stats are also used for ANI i think
02035                                  * so how about updating them more often ?
02036                                  */
02037                                 ath5k_hw_update_mib_counters(ah, &sc->ll_stats);
02038                         }
02039                 }
02040         } while (ath5k_hw_is_intr_pending(ah) && --counter > 0);
02041 
02042         if (unlikely(!counter))
02043                 ATH5K_WARN(sc, "too many interrupts, giving up for now\n");
02044 
02045         ath5k_hw_calibration_poll(ah);
02046 
02047         return IRQ_HANDLED;
02048 }
02049 
02050 static void
02051 ath5k_tasklet_reset(unsigned long data)
02052 {
02053         struct ath5k_softc *sc = (void *)data;
02054 
02055         ath5k_reset_wake(sc);
02056 }
02057 
02058 /*
02059  * Periodically recalibrate the PHY to account
02060  * for temperature/environment changes.
02061  */
02062 #if 0
02063 static void
02064 ath5k_tasklet_calibrate(unsigned long data)
02065 {
02066         struct ath5k_softc *sc = (void *)data;
02067         struct ath5k_hw *ah = sc->ah;
02068 
02069         /* Only full calibration for now */
02070         if (ah->ah_swi_mask != AR5K_SWI_FULL_CALIBRATION)
02071                 return;
02072         /* Stop queues so that calibration
02073          * doesn't interfere with tx */
02074         netif_stop_queue(sc->netdev);
02075 
02076         ATH5K_DBG(sc, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n",
02077                 ath5k_frequency_to_channel(sc->curchan->center_freq),
02078                 sc->curchan->hw_value);
02079 
02080         if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) {
02081                 /*
02082                  * Rfgain is out of bounds, reset the chip
02083                  * to load new gain values.
02084                  */
02085                 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "calibration, resetting\n");
02086                 ath5k_reset_wake(sc);
02087         }
02088         if (ath5k_hw_phy_calibrate(ah, sc->curchan))
02089                 ATH5K_ERR(sc, "calibration of channel %u failed\n",
02090                         ath5k_frequency_to_channel(
02091                                 sc->curchan->center_freq));
02092 
02093         ah->ah_swi_mask = 0;
02094 
02095         /* Wake queues */
02096         netif_wake_queue(sc->netdev);
02097 }
02098 #endif
02099 
02100 
02101 /**********************\
02102 * Interface functions *
02103 \**********************/
02104 static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
02105 static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
02106 
02107 static int ieee80211_copy_snap(u8 * data, unsigned short h_proto)
02108 {
02109         struct ieee80211_snap_hdr *snap;
02110         u8 *oui;
02111 
02112         snap = (struct ieee80211_snap_hdr *)data;
02113         snap->dsap = 0xaa;
02114         snap->ssap = 0xaa;
02115         snap->ctrl = 0x03;
02116 
02117         if (h_proto == htons(ETH_P_AARP) || h_proto == htons(ETH_P_IPX))
02118                 oui = P802_1H_OUI;
02119         else
02120                 oui = RFC1042_OUI;
02121         snap->oui[0] = oui[0];
02122         snap->oui[1] = oui[1];
02123         snap->oui[2] = oui[2];
02124 
02125         memcpy(data + SNAP_SIZE, &h_proto, sizeof(u16));
02126 
02127         return SNAP_SIZE + sizeof(u16);
02128 }
02129 
02130 /*
02131  * Contexto? UC?
02132  *
02133  * Puede ocurrir:
02134  * - Nos expulse una hardIRQ -> spin_lock_irq (peor caso)
02135  * - Nos expulse una softIRQ -> spin_lock_bh
02136  * - Nos expulsen del uP     -> spin_lock
02137  * - En otra CPU se este ejecutando algo -> spin_lock
02138  */
02139 static int
02140 ath5k_tx(struct sk_buff *skb, struct net_device *netdev)
02141 {
02142         struct ath5k_softc *sc = netdev_priv(netdev);
02143         struct ath5k_buf *bf;
02144 //      unsigned long flags;
02145         int hdrlen;
02146         int padsize;
02147 
02148         struct sk_buff *skb_new;
02149         unsigned short ethertype;
02150         struct ieee80211_hdr header;
02151         int frame_control;
02152         unsigned char dst[ETH_ALEN];
02153         unsigned char src[ETH_ALEN];
02154 
02155         /* Save source and destination addresses and ethertype */
02156         skb_copy_from_linear_data(skb, dst, ETH_ALEN);
02157         skb_copy_from_linear_data_offset(skb, ETH_ALEN, src, ETH_ALEN);
02158         ethertype = ((struct ethhdr *)skb->data)->h_proto;
02159 
02160         /* In ADHOC mode, no From/To DS, Addr1 = DA, Addr2 = SA, Addr3 = BSSID */
02161         memcpy(header.addr1, dst, ETH_ALEN);
02162         memcpy(header.addr2, src, ETH_ALEN);
02163         memcpy(header.addr3, sc->ah->ah_bssid, ETH_ALEN);
02164         
02165         if (use_beacon_frames){
02166                 frame_control = IEEE80211_STYPE_BEACON | IEEE80211_STYPE_DATA;
02167         } else{
02168                 frame_control = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA;
02169         }
02170 
02171         header.frame_control = cpu_to_le16(frame_control);
02172 
02173         //DANI XXX:
02174         header.duration_id = 0;
02175 
02176         hdrlen = 24;
02177 
02178         /* Advance the SKB to the start of the payload */
02179         skb_pull(skb, sizeof(struct ethhdr));
02180 
02181         /* Allocate new skb */
02182         skb_new = dev_alloc_skb(hdrlen + SNAP_SIZE + sizeof(u16) + skb->len);
02183         if (!skb_new)
02184         {
02185                 ATH5K_ERR(sc, "can't alloc skbuff of size %u\n", 
02186                                 hdrlen + SNAP_SIZE + sizeof(u16) + skb->len);
02187                 return -ENOMEM;
02188         }
02189         memset(skb_new->data, 0, skb_new->len);
02190 
02191         /* Copy header */
02192         memcpy(skb_put(skb_new, hdrlen), &header, hdrlen);
02193 
02194         /* Copy SNAP and ethertype */
02195         ieee80211_copy_snap(skb_put(skb_new, SNAP_SIZE + sizeof(u16)), ethertype);
02196 
02197         /* Copy payload */
02198         skb_copy_from_linear_data(skb, skb_put(skb_new, skb->len), skb->len);
02199         
02200         /* Free skb */
02201         dev_kfree_skb_any(skb);
02202         skb = skb_new;
02203 
02204         ath5k_debug_dump_skb(sc, skb, "TX  ", 1);
02205         
02206         /*
02207          * the hardware expects the header padded to 4 byte boundaries
02208          * if this is not the case we add the padding after the header
02209          */
02210         hdrlen = ath5k_get_hdrlen_from_skb(skb);
02211         padsize = ath5k_pad_size(hdrlen);
02212         if (padsize) {
02213                 if (skb_headroom(skb) < padsize) {
02214                         ATH5K_ERR(sc, "tx hdrlen not %%4: %d not enough"
02215                                   " headroom to pad %d\n", hdrlen, padsize);
02216                         goto drop_packet;
02217                 }
02218                 skb_push(skb, padsize);
02219                 memmove(skb->data, skb->data+padsize, hdrlen);
02220         }
02221 
02222         spin_lock_irq(&sc->txbuflock);
02223         if (list_empty(&sc->txbuf)) {
02224                 ATH5K_ERR(sc, "no further txbuf available, dropping packet\n");
02225                 spin_unlock_irq(&sc->txbuflock);
02226                 netif_stop_queue(sc->netdev);
02227                 goto drop_packet;
02228         }
02229         bf = list_first_entry(&sc->txbuf, struct ath5k_buf, list);
02230         list_del(&bf->list);
02231         sc->txbuf_len--;
02232         if (list_empty(&sc->txbuf))
02233                 netif_stop_queue(sc->netdev);
02234         spin_unlock_irq(&sc->txbuflock);
02235 
02236         bf->skb = skb;
02237 
02238         if (ath5k_txbuf_setup(sc, bf)) {
02239                 bf->skb = NULL;
02240                 spin_lock_irq(&sc->txbuflock);
02241                 list_add_tail(&bf->list, &sc->txbuf);
02242                 sc->txbuf_len++;
02243                 spin_unlock_irq(&sc->txbuflock);
02244                 goto drop_packet;
02245         }
02246         return NETDEV_TX_OK;
02247 
02248 drop_packet:
02249         dev_kfree_skb_any(skb);
02250         return NETDEV_TX_OK;
02251 }
02252 
02253 /*
02254  * Reset the hardware.  If chan is not NULL, then also pause rx/tx
02255  * and change to the given channel.
02256  */
02257 static int
02258 ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan)
02259 {
02260         struct ath5k_hw *ah = sc->ah;
02261         int ret;
02262 
02263         ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "resetting\n");
02264 
02265         if (chan) {
02266                 ath5k_hw_set_imr(ah, 0);
02267                 ath5k_txq_cleanup(sc);
02268                 ath5k_rx_stop(sc);
02269 
02270                 sc->curchan = chan;
02271                 sc->curband = &sc->sbands[chan->band];
02272         }
02273         ret = ath5k_hw_reset(ah, sc->curchan, true);
02274         if (ret) {
02275                 ATH5K_ERR(sc, "can't reset hardware (%d)\n", ret);
02276                 goto err;
02277         }
02278 
02279         ret = ath5k_rx_start(sc);
02280         if (ret) {
02281                 ATH5K_ERR(sc, "can't start recv logic\n");
02282                 goto err;
02283         }
02284 
02285         /* Enable interrupts */
02286         ath5k_hw_set_imr(ah, sc->imask);
02287 
02288         return 0;
02289 err:
02290         return ret;
02291 }
02292 
02293 static int
02294 ath5k_reset_wake(struct ath5k_softc *sc)
02295 {
02296         int ret;
02297 
02298         ret = ath5k_reset(sc, sc->curchan);
02299         if (!ret)
02300                 netif_wake_queue(sc->netdev);
02301 
02302         return ret;
02303 }
02304 
02305 static int ath5k_start(struct ath5k_softc *sc)
02306 {
02307         return ath5k_init(sc);
02308 }
02309 
02310 static void ath5k_stop(struct ath5k_softc *sc)
02311 {
02312         ath5k_stop_hw(sc);
02313 }
02314 
02315 static void ath5k_config_filter(struct ath5k_softc *sc, bool broadcast, bool control, bool promisc)
02316 {
02317         struct ath5k_hw *ah = sc->ah;
02318         unsigned int rfilt;
02319 
02320         rfilt = 0;
02321 
02322         /* Always enable Unicast */
02323         rfilt |= AR5K_RX_FILTER_UCAST;
02324 
02325         if (broadcast)
02326                 rfilt |= AR5K_RX_FILTER_BCAST;
02327 
02328         /* Permitir tramas de control? */
02329         if (control)
02330                 rfilt |= AR5K_RX_FILTER_CONTROL;
02331 
02332         /* Modo promiscuo? */
02333         if (promisc)
02334         {
02335                 rfilt |= AR5K_RX_FILTER_PROM;
02336                 __set_bit (ATH_STAT_PROMISC, sc->status);
02337         }
02338         else
02339                 __clear_bit (ATH_STAT_PROMISC, sc->status);
02340 
02341         /* Set filters */
02342         ath5k_hw_set_rx_filter(ah, rfilt);
02343 
02344         /* Set multicast bits */
02345         ath5k_hw_set_mcast_filter(ah, 0, 0);
02346 
02347         /* Set the cached hw filter flags, this will alter actually be set in HW */
02348         sc->filter_flags = rfilt;
02349 }
02350 
02351 static struct ieee80211_channel * ath5k_get_channel(struct ath5k_softc *sc, unsigned int freq)
02352 {
02353         struct ieee80211_channel *aux;
02354         int i, n_channels;
02355 
02356         aux = NULL;
02357         if (freq < 3000)
02358         {
02359                 aux = sc->sbands[IEEE80211_BAND_2GHZ].channels;
02360                 n_channels = sc->sbands[IEEE80211_BAND_2GHZ].n_channels;
02361         }
02362         else if (sc->sbands[IEEE80211_BAND_5GHZ].channels != NULL)
02363         {
02364                 aux = sc->sbands[IEEE80211_BAND_5GHZ].channels;
02365                 n_channels = sc->sbands[IEEE80211_BAND_5GHZ].n_channels;
02366         }
02367 
02368         if (aux != NULL)
02369         {
02370                 for (i = 0; i < n_channels; i++, aux++)
02371                         if (aux->center_freq == freq)                   
02372                                 break;
02373         }
02374 
02375         return aux;
02376 }
02377 
02378 static int ath5k_get_rate_idx(struct ath5k_softc *sc, enum ieee80211_band band, enum rates rate)
02379 {
02380         struct ieee80211_rate *r;
02381         int i;
02382         bool valid = false;
02383 
02384         r = NULL;
02385 
02386         r = sc->sbands[band].bitrates;
02387 
02388         if (r != NULL)
02389                 for (i = 0; i < sc->curband->n_bitrates; i++, r++)
02390                 {
02391                         if (r->bitrate == rate)
02392                         {                       
02393                                 valid = true;
02394                                 break;
02395                         }
02396                 }
02397         if (valid)
02398                 return i;
02399         else
02400                 return -1;
02401 }
02402 
02403 static int ath5k_config_tx_control (struct ath5k_softc *sc, 
02404                 unsigned char count, bool wait_for_ack, bool use_short_preamble)
02405 {
02406         struct ieee80211_rate *rate;
02407 
02408         /* Clear flags */
02409         sc->tx_info.wait_for_ack = false;
02410         sc->tx_info.use_short_preamble = false;
02411 
02412         /* Set tx count if ack not received */
02413         sc->tx_info.count = count;
02414 
02415         /* Wait for ack flag */
02416         if (wait_for_ack)
02417                 sc->tx_info.wait_for_ack = true;
02418 
02419         /* Set rate code flags (short preamble) */
02420         rate = &sc->curband->bitrates[sc->tx_info.rate_idx];
02421         if (use_short_preamble && (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE))
02422                 sc->tx_info.use_short_preamble = true;
02423         
02424         return 0;
02425 }
02426 
02427 static int ath5k_config_disable_ack (struct ath5k_softc *sc, bool disable)
02428 {
02429         struct ath5k_hw *ah;
02430    
02431         if (sc == NULL)
02432                 return 1;       
02433         else
02434                 ah = sc->ah;
02435 
02436         if (disable)
02437                 AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_ACK);
02438         else
02439                 AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_ACK);
02440 
02441         return 0;
02442 }
02443 
02444 /*
02445  * Buscar por sc->channels y tal
02446  */
02447 static int ath5k_config (struct ath5k_softc *sc, 
02448                 unsigned short channel, 
02449                 enum rates rate, 
02450                 unsigned char tx_power_dbm,
02451                 enum ath5k_ant_mode antenna_mode)
02452 {
02453         struct ath5k_hw *ah = sc->ah;
02454         struct ieee80211_channel *target_channel;
02455         int rate_idx;
02456         int ret = 0;
02457 
02458         mutex_lock(&sc->lock);
02459 
02460         /* Encontrar el canal */
02461         target_channel = ath5k_get_channel(sc, channel);
02462         if (target_channel == NULL)
02463         {
02464                 ATH5K_ERR(sc, "Invalid channel\n");
02465                 ret = -1;
02466                 goto unlock;
02467         }
02468 
02469         /* Encontrar el bitrate */
02470         rate_idx = ath5k_get_rate_idx(sc, target_channel->band, rate);
02471         if (rate_idx < 0 || rate_idx > AR5K_MAX_RATES)
02472         {
02473                 ATH5K_ERR(sc, "Invalid rate\n");
02474                 ret = -1;
02475                 goto unlock;
02476         }
02477 
02478         /* Configurar el rate */
02479         sc->tx_info.band = sc->curband->band;
02480         sc->tx_info.rate_idx = rate_idx;
02481         sc->tx_info.count = 1;
02482         sc->tx_info.wait_for_ack = false;
02483         sc->tx_info.use_short_preamble = false;
02484 
02485         if (sc->power_level != tx_power_dbm)
02486         {
02487 
02488 
02489                 /* Half dB steps */
02490                 //DANI
02491                 if (ath5k_hw_set_txpower_limit(ah, (tx_power_dbm * 2)) == 0){
02492                         sc->power_level = tx_power_dbm;
02493                 }else{
02494                         ATH5K_ERR(sc, "Invalid power\n");
02495                         ret = -1;
02496                         goto unlock;
02497                 }
02498         }
02499 
02500         ath5k_hw_set_antenna_mode(ah, antenna_mode);
02501 
02502         /* Hacer un reset para aplicar los cambios */
02503         ath5k_reset(sc, target_channel);
02504 
02505         ATH5K_INFO(sc, "Freq %d Mhz, Rate (%d * 100) kps, Power %d dBm, Ant. mode %d\n", 
02506                         channel, rate, tx_power_dbm, antenna_mode);
02507 
02508 unlock:
02509         mutex_unlock(&sc->lock);
02510 
02511         return ret;
02512 }
02513 
02514 static int ath5k_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
02515 {
02516         struct ath5k_softc *sc;
02517         union ath5k_ioctl_info info;
02518         int ret;
02519 
02520    /*///////////////////////////////////////////////////////////////////////////
02521    struct timespec ts;
02522    */
02523         /* Recuperar la estructura principal */
02524         sc = netdev_priv(netdev);
02525 
02526         ret = 0;
02527         switch(cmd)
02528         {
02529                 case SIO_SEND_PACKET:
02530                         ATH5K_ERR(sc, "SIO_SEND_PACKET not implemented yet.\n");
02531                         ret = -1;
02532                         break;
02533 
02534                 case SIO_RECV_PACKET:
02535                         ATH5K_ERR(sc, "SIO_RECV_PACKET not implemented yet.\n");
02536                         ret = -1;
02537                         break;
02538 
02539                 case SIO_SET_CONFIG:
02540                         ret = copy_from_user(&info.config_info, 
02541                                         (struct ath5k_config_info *)ifr->ifr_data, 
02542                                         sizeof(struct ath5k_config_info));
02543                         if (ret)
02544                                 return ret;
02545 
02546                         ret = ath5k_config(sc, info.config_info.frequency, info.config_info.rate, info.config_info.tx_power_dbm,
02547                                         info.config_info.antenna_mode);
02548 
02549                         if (ret)
02550                                 return ret;
02551 
02552                         break;
02553 
02554                 case SIO_SET_DEBUG:
02555                         ret = copy_from_user(&info.debug_level,
02556                                         (unsigned int *)ifr->ifr_data, 
02557                                         sizeof(int));
02558                         if (ret)
02559                                 return ret;
02560    /*///////////////////////////////////////////////////////////////////////////
02561    getnstimeofday(&ts);
02562    printk(KERN_INFO "%lld\n", timespec_to_ns(&ts));
02563    */
02564 
02565                         sc->debug_level = info.debug_level;
02566 
02567                         break;
02568 
02569                 case SIO_SET_RXFILTER:
02570                         ret = copy_from_user(&info.rxfilter_info,
02571                                         (struct ath5k_rxfilter_info *)ifr->ifr_data,
02572                                         sizeof(struct ath5k_rxfilter_info));
02573 
02574                         if (ret)
02575                                 return ret;
02576 
02577                         ath5k_config_filter(sc, info.rxfilter_info.broadcast,
02578                                                                 info.rxfilter_info.control,
02579                                                                         info.rxfilter_info.promisc);
02580 
02581                         break;
02582 
02583                 case SIO_SET_TXCONTROL:
02584                         ret = copy_from_user(&info.txcontrol_info,
02585                                         (struct ath5k_txcontrol_info *)ifr->ifr_data,
02586                                         sizeof(struct ath5k_txcontrol_info));
02587 
02588                         if (ret)
02589                                 return ret;
02590 
02591                         ret = ath5k_config_tx_control(sc, info.txcontrol_info.count,
02592                                                                                           info.txcontrol_info.wait_for_ack,
02593                                                                                           info.txcontrol_info.use_short_preamble);
02594 
02595                         break;
02596 
02597 
02598                 case SIO_SET_DISABLEACK:
02599                         ret = copy_from_user(&info.disable_ack,
02600                                         (bool *)ifr->ifr_data,
02601                                         sizeof(bool));
02602 
02603                         if (ret)
02604                                 return ret;
02605 
02606                         ret = ath5k_config_disable_ack(sc, info.disable_ack);
02607 
02608                         break;
02609 
02610                 case SIO_SET_BSSID:
02611                         ATH5K_ERR(sc, "SIO_SET_BSSID not implemented yet\n");
02612                         ret = -1;
02613                         break;
02614 
02615                 case SIO_SET_BSSIDFILTER:
02616                         ATH5K_ERR(sc, "SIO_SET_BSSIDFILTER not implemented yet\n");
02617                         ret = -1;
02618                         break;
02619                 case SIO_SET_USE_BEACON_FRAMES:
02620                         ret = copy_from_user(&use_beacon_frames, (int *) ifr->ifr_data, sizeof(int));
02621 
02622                         if (ret)
02623                                 return ret;
02624 
02625                 break;
02626 
02627                         /*              case SIO_SET_POWER:
02628                         int val_dbm = 0;
02629                         ret = copy_from_user(&val_dbm,
02630                                                                 (int *)ifr->ifr_data,
02631                                                                 sizeof(int));
02632                         sc->power_level = val_dbm * 2;
02633                         break;*/
02634         }               
02635         return ret;
02636 }
02637 
02638 
02639 
02640 
02641 
02642 
02643 
02644 /* RUBEN */
02645 
02649 void get_ath5k_slave_dev(struct net_device** netdev)
02650 {
02651    *netdev = ath5k_slave_dev;
02652 }
02653 EXPORT_SYMBOL(get_ath5k_slave_dev);
02654 
02662 int ath5k_rx(struct sk_buff **skb)
02663 {
02664 
02665    *skb = dequeue_rx_queue(&rx_rtwmp_queue, 0);
02666 
02667    if (*skb == NULL){
02668       return 0;
02669    }
02670    else{
02671       return (*skb)->len;
02672    }
02673 }
02674 EXPORT_SYMBOL(ath5k_rx);
02675 
02683 int ath5k_rx_timeout(struct sk_buff **skb, int timeout)
02684 {
02685 
02686    *skb = dequeue_rx_queue(&rx_rtwmp_queue, timeout);
02687 
02688    if (*skb == NULL){
02689       return 0;
02690    }
02691    else{
02692       return (*skb)->len;
02693    }
02694 }
02695 EXPORT_SYMBOL(ath5k_rx_timeout);
02696 
02700 void unlock_rx_sem(void)
02701 {
02702    up(&rx_rtwmp_queue.rx_sem);
02703 }
02704 EXPORT_SYMBOL(unlock_rx_sem);
02705 
02710 int conf_ath5k (struct net_device *netdev, union ath5k_ioctl_info *info, int cmd)
02711 {
02712    struct ath5k_softc *sc;
02713    int ret;
02714 
02715    sc = netdev_priv(netdev);
02716 
02717    ret = 0;
02718    switch(cmd)
02719    {
02720 
02721       case SIO_SET_CONFIG:
02722 
02723          ret = ath5k_config(sc, info->config_info.frequency,
02724                                  info->config_info.rate,
02725                                  info->config_info.tx_power_dbm,
02726                                  info->config_info.antenna_mode);
02727 
02728          break;
02729 
02730       case SIO_SET_DEBUG:
02731 
02732          sc->debug_level = info->debug_level;
02733 
02734          break;
02735 
02736       case SIO_SET_RXFILTER:
02737 
02738          ath5k_config_filter(sc, info->rxfilter_info.broadcast,
02739                              info->rxfilter_info.control,
02740                            info->rxfilter_info.promisc);
02741 
02742          break;
02743 
02744       case SIO_SET_TXCONTROL:
02745 
02746          ret = ath5k_config_tx_control(sc, info->txcontrol_info.count,
02747                                    info->txcontrol_info.wait_for_ack,
02748                                    info->txcontrol_info.use_short_preamble);
02749 
02750          break;
02751 
02752 
02753       case SIO_SET_DISABLEACK:
02754 
02755          ret = ath5k_config_disable_ack(sc, info->disable_ack);
02756 
02757          break;
02758    }
02759    return ret;
02760 }
02761 EXPORT_SYMBOL(conf_ath5k);


ros_rt_wmp
Author(s): Danilo Tardioli, dantard@unizar.es
autogenerated on Fri Jan 3 2014 12:07:54