base.c
Go to the documentation of this file.
00001 /*------------------------------------------------------------------------
00002  *---------------------        ATH5K Driver               --------------------
00003  *------------------------------------------------------------------------
00004  *                                                         V1.0B  04/08/09
00005  *
00006  *
00007  *  Aug 2009 - Samuel Cabrero <samuelcabrero@gmail.com>
00008  *              Initial release
00009  *
00010  *  ----------------------------------------------------------------------
00011  *  Copyright (C) 2000-2009, Universidad de Zaragoza, SPAIN
00012  *
00013  *  Autors:
00014  *              Samuel Cabrero       <samuelcabrero@gmail.com>
00015  *              Danilo Tardioli          <dantard@unizar.es>
00016  *              Jose Luis Villarroel <jlvilla@unizar.es>
00017  *
00018  *  This is a modified version of the original ath5k driver for its use with
00019  *  RT-WMP protocol developped at Universidad de Zaragoza. It should work with 
00020  *  all Atheros 5xxx WLAN cards. The IEEE 802.11 layer have been removed so it 
00021  *  just send and receive frames over the air, as if it were a ethernet bus
00022  *  interface.
00023  * 
00024  *  Please read ath5k_interface.h for instructions.
00025  *
00026  *  This program is distributed in the hope that it will be useful,
00027  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
00028  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00029  *  GNU General Public License for more details.
00030  *
00031  *  Read the following header for redistribution terms.
00032  *
00033  *---------------------------------------------------------------------------*/
00034 
00035 /*-
00036  * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
00037  * Copyright (c) 2004-2005 Atheros Communications, Inc.
00038  * Copyright (c) 2006 Devicescape Software, Inc.
00039  * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
00040  * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
00041  *
00042  * All rights reserved.
00043  *
00044  * Redistribution and use in source and binary forms, with or without
00045  * modification, are permitted provided that the following conditions
00046  * are met:
00047  * 1. Redistributions of source code must retain the above copyright
00048  *    notice, this list of conditions and the following disclaimer,
00049  *    without modification.
00050  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
00051  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
00052  *    redistribution must be conditioned upon including a substantially
00053  *    similar Disclaimer requirement for further binary redistribution.
00054  * 3. Neither the names of the above-listed copyright holders nor the names
00055  *    of any contributors may be used to endorse or promote products derived
00056  *    from this software without specific prior written permission.
00057  *
00058  * Alternatively, this software may be distributed under the terms of the
00059  * GNU General Public License ("GPL") version 2 as published by the Free
00060  * Software Foundation.
00061  *
00062  * NO WARRANTY
00063  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
00064  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
00065  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
00066  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
00067  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
00068  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
00069  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
00070  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
00071  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
00072  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
00073  * THE POSSIBILITY OF SUCH DAMAGES.
00074  *
00075  */
00076 
00077 #include <linux/version.h>
00078 #include <linux/module.h>
00079 #include <linux/delay.h>
00080 #include <linux/if.h>
00081 #include <linux/netdevice.h>
00082 #include <linux/cache.h>
00083 #include <linux/pci.h>
00084 #include <linux/ethtool.h>
00085 #include <linux/uaccess.h>
00086 #include <linux/if_arp.h>
00087 #include <linux/etherdevice.h>
00088 #include <asm/unaligned.h>
00089 
00090 #include "base.h"
00091 #include "reg.h"
00092 #include "debug.h"
00093 
00094 static u8 ath5k_calinterval = 10; /* Calibrate PHY every 10 secs (TODO: Fixme) */
00095 static int modparam_all_channels = true;
00096 static int use_beacon_frames = 0;
00097 /******************\
00098 * Internal defines *
00099 \******************/
00100 
00101 /* Module info */
00102 MODULE_AUTHOR("Jiri Slaby");
00103 MODULE_AUTHOR("Nick Kossifidis");
00104 MODULE_AUTHOR("Samuel Cabrero");
00105 MODULE_DESCRIPTION("Modified version of the ath5k driver for use with RT-WMP protocol.");
00106 MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards");
00107 MODULE_LICENSE("Dual BSD/GPL");
00108 MODULE_VERSION("0.6.0 (EXPERIMENTAL)");
00109 
00110 
00111 /* Known PCI ids */
00112 static const struct pci_device_id ath5k_pci_id_table[] = {
00113         { PCI_VDEVICE(ATHEROS, 0x0207) }, /* 5210 early */
00114         { PCI_VDEVICE(ATHEROS, 0x0007) }, /* 5210 */
00115         { PCI_VDEVICE(ATHEROS, 0x0011) }, /* 5311 - this is on AHB bus !*/
00116         { PCI_VDEVICE(ATHEROS, 0x0012) }, /* 5211 */
00117         { PCI_VDEVICE(ATHEROS, 0x0013) }, /* 5212 */
00118         { PCI_VDEVICE(3COM_2,  0x0013) }, /* 3com 5212 */
00119         { PCI_VDEVICE(3COM,    0x0013) }, /* 3com 3CRDAG675 5212 */
00120         { PCI_VDEVICE(ATHEROS, 0x1014) }, /* IBM minipci 5212 */
00121         { PCI_VDEVICE(ATHEROS, 0x0014) }, /* 5212 combatible */
00122         { PCI_VDEVICE(ATHEROS, 0x0015) }, /* 5212 combatible */
00123         { PCI_VDEVICE(ATHEROS, 0x0016) }, /* 5212 combatible */
00124         { PCI_VDEVICE(ATHEROS, 0x0017) }, /* 5212 combatible */
00125         { PCI_VDEVICE(ATHEROS, 0x0018) }, /* 5212 combatible */
00126         { PCI_VDEVICE(ATHEROS, 0x0019) }, /* 5212 combatible */
00127         { PCI_VDEVICE(ATHEROS, 0x001a) }, /* 2413 Griffin-lite */
00128         { PCI_VDEVICE(ATHEROS, 0x001b) }, /* 5413 Eagle */
00129         { PCI_VDEVICE(ATHEROS, 0x001c) }, /* PCI-E cards */
00130         { PCI_VDEVICE(ATHEROS, 0x001d) }, /* 2417 Nala */
00131         { 0 }
00132 };
00133 MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table);
00134 
00135 /* Known SREVs */
00136 static const struct ath5k_srev_name srev_names[] = {
00137         { "5210",       AR5K_VERSION_MAC,       AR5K_SREV_AR5210 },
00138         { "5311",       AR5K_VERSION_MAC,       AR5K_SREV_AR5311 },
00139         { "5311A",      AR5K_VERSION_MAC,       AR5K_SREV_AR5311A },
00140         { "5311B",      AR5K_VERSION_MAC,       AR5K_SREV_AR5311B },
00141         { "5211",       AR5K_VERSION_MAC,       AR5K_SREV_AR5211 },
00142         { "5212",       AR5K_VERSION_MAC,       AR5K_SREV_AR5212 },
00143         { "5213",       AR5K_VERSION_MAC,       AR5K_SREV_AR5213 },
00144         { "5213A",      AR5K_VERSION_MAC,       AR5K_SREV_AR5213A },
00145         { "2413",       AR5K_VERSION_MAC,       AR5K_SREV_AR2413 },
00146         { "2414",       AR5K_VERSION_MAC,       AR5K_SREV_AR2414 },
00147         { "5424",       AR5K_VERSION_MAC,       AR5K_SREV_AR5424 },
00148         { "5413",       AR5K_VERSION_MAC,       AR5K_SREV_AR5413 },
00149         { "5414",       AR5K_VERSION_MAC,       AR5K_SREV_AR5414 },
00150         { "2415",       AR5K_VERSION_MAC,       AR5K_SREV_AR2415 },
00151         { "5416",       AR5K_VERSION_MAC,       AR5K_SREV_AR5416 },
00152         { "5418",       AR5K_VERSION_MAC,       AR5K_SREV_AR5418 },
00153         { "2425",       AR5K_VERSION_MAC,       AR5K_SREV_AR2425 },
00154         { "2417",       AR5K_VERSION_MAC,       AR5K_SREV_AR2417 },
00155         { "xxxxx",      AR5K_VERSION_MAC,       AR5K_SREV_UNKNOWN },
00156         { "5110",       AR5K_VERSION_RAD,       AR5K_SREV_RAD_5110 },
00157         { "5111",       AR5K_VERSION_RAD,       AR5K_SREV_RAD_5111 },
00158         { "5111A",      AR5K_VERSION_RAD,       AR5K_SREV_RAD_5111A },
00159         { "2111",       AR5K_VERSION_RAD,       AR5K_SREV_RAD_2111 },
00160         { "5112",       AR5K_VERSION_RAD,       AR5K_SREV_RAD_5112 },
00161         { "5112A",      AR5K_VERSION_RAD,       AR5K_SREV_RAD_5112A },
00162         { "5112B",      AR5K_VERSION_RAD,       AR5K_SREV_RAD_5112B },
00163         { "2112",       AR5K_VERSION_RAD,       AR5K_SREV_RAD_2112 },
00164         { "2112A",      AR5K_VERSION_RAD,       AR5K_SREV_RAD_2112A },
00165         { "2112B",      AR5K_VERSION_RAD,       AR5K_SREV_RAD_2112B },
00166         { "2413",       AR5K_VERSION_RAD,       AR5K_SREV_RAD_2413 },
00167         { "5413",       AR5K_VERSION_RAD,       AR5K_SREV_RAD_5413 },
00168         { "2316",       AR5K_VERSION_RAD,       AR5K_SREV_RAD_2316 },
00169         { "2317",       AR5K_VERSION_RAD,       AR5K_SREV_RAD_2317 },
00170         { "5424",       AR5K_VERSION_RAD,       AR5K_SREV_RAD_5424 },
00171         { "5133",       AR5K_VERSION_RAD,       AR5K_SREV_RAD_5133 },
00172         { "xxxxx",      AR5K_VERSION_RAD,       AR5K_SREV_UNKNOWN },
00173 };
00174 
00175 static const struct ieee80211_rate ath5k_rates[] = {
00176         { .bitrate = 10,
00177           .hw_value = ATH5K_RATE_CODE_1M, },
00178         { .bitrate = 20,
00179           .hw_value = ATH5K_RATE_CODE_2M,
00180           .hw_value_short = ATH5K_RATE_CODE_2M | AR5K_SET_SHORT_PREAMBLE,
00181           .flags = IEEE80211_RATE_SHORT_PREAMBLE },
00182         { .bitrate = 55,
00183           .hw_value = ATH5K_RATE_CODE_5_5M,
00184           .hw_value_short = ATH5K_RATE_CODE_5_5M | AR5K_SET_SHORT_PREAMBLE,
00185           .flags = IEEE80211_RATE_SHORT_PREAMBLE },
00186         { .bitrate = 110,
00187           .hw_value = ATH5K_RATE_CODE_11M,
00188           .hw_value_short = ATH5K_RATE_CODE_11M | AR5K_SET_SHORT_PREAMBLE,
00189           .flags = IEEE80211_RATE_SHORT_PREAMBLE },
00190         { .bitrate = 60,
00191           .hw_value = ATH5K_RATE_CODE_6M,
00192           .flags = 0 },
00193         { .bitrate = 90,
00194           .hw_value = ATH5K_RATE_CODE_9M,
00195           .flags = 0 },
00196         { .bitrate = 120,
00197           .hw_value = ATH5K_RATE_CODE_12M,
00198           .flags = 0 },
00199         { .bitrate = 180,
00200           .hw_value = ATH5K_RATE_CODE_18M,
00201           .flags = 0 },
00202         { .bitrate = 240,
00203           .hw_value = ATH5K_RATE_CODE_24M,
00204           .flags = 0 },
00205         { .bitrate = 360,
00206           .hw_value = ATH5K_RATE_CODE_36M,
00207           .flags = 0 },
00208         { .bitrate = 480,
00209           .hw_value = ATH5K_RATE_CODE_48M,
00210           .flags = 0 },
00211         { .bitrate = 540,
00212           .hw_value = ATH5K_RATE_CODE_54M,
00213           .flags = 0 },
00214         /* XR missing */
00215 };
00216 
00217 
00218 /*
00219  * Prototypes - PCI stack related functions
00220  */
00221 static int __devinit    ath5k_pci_probe(struct pci_dev *pdev,
00222                                 const struct pci_device_id *id);
00223 static void __devexit   ath5k_pci_remove(struct pci_dev *pdev);
00224 
00225 static struct pci_driver ath5k_pci_driver = {
00226         .name           = "ath5k_pci",
00227         .id_table       = ath5k_pci_id_table,
00228         .probe          = ath5k_pci_probe,
00229         .remove         = __devexit_p(ath5k_pci_remove),
00230 };
00231 
00232 
00233 
00234 /*
00235  * Prototypes - MAC 802.11 stack related functions
00236  */
00237 static int ath5k_tx(struct sk_buff *skb, struct net_device *netdev);
00238 static int ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan);
00239 static int ath5k_reset_wake(struct ath5k_softc *sc);
00240 static int ath5k_start(struct ath5k_softc *sc);
00241 static void ath5k_stop(struct ath5k_softc *sc);
00242 
00243 /*
00244  * Prototypes - Internal functions
00245  */
00246 /* Attach detach */
00247 static int  ath5k_attach(struct ath5k_softc *sc);
00248 static void ath5k_detach(struct pci_dev *pdev, struct ath5k_softc *sc);
00249 /* Channel/mode setup */
00250 static inline short ath5k_ieee2mhz(short chan);
00251 static unsigned int ath5k_copy_channels(struct ath5k_hw *ah,
00252                                 struct ieee80211_channel *channels,
00253                                 unsigned int mode,
00254                                 unsigned int max);
00255 static int      ath5k_setup_bands(struct ath5k_softc *sc);
00256 static void     ath5k_mode_setup(struct ath5k_softc *sc);
00257 
00258 /* Descriptor setup */
00259 static int      ath5k_desc_alloc(struct ath5k_softc *sc,
00260                                 struct pci_dev *pdev);
00261 static void     ath5k_desc_free(struct ath5k_softc *sc,
00262                                 struct pci_dev *pdev);
00263 /* Buffers setup */
00264 static int      ath5k_rxbuf_setup(struct ath5k_softc *sc,
00265                                 struct ath5k_buf *bf);
00266 static int      ath5k_txbuf_setup(struct ath5k_softc *sc,
00267                                 struct ath5k_buf *bf);
00268 static inline void ath5k_txbuf_free(struct ath5k_softc *sc,
00269                                 struct ath5k_buf *bf)
00270 {
00271         BUG_ON(!bf);
00272         if (!bf->skb)
00273                 return;
00274         pci_unmap_single(sc->pdev, bf->skbaddr, bf->skb->len,
00275                         PCI_DMA_TODEVICE);
00276         dev_kfree_skb_any(bf->skb);
00277         bf->skb = NULL;
00278 }
00279 
00280 static inline void ath5k_rxbuf_free(struct ath5k_softc *sc,
00281                                 struct ath5k_buf *bf)
00282 {
00283         BUG_ON(!bf);
00284         if (!bf->skb)
00285                 return;
00286         pci_unmap_single(sc->pdev, bf->skbaddr, sc->rxbufsize,
00287                         PCI_DMA_FROMDEVICE);
00288         dev_kfree_skb_any(bf->skb);
00289         bf->skb = NULL;
00290 }
00291 
00292 
00293 /* Queues setup */
00294 static struct   ath5k_txq *ath5k_txq_setup(struct ath5k_softc *sc,
00295                                 int qtype, int subtype);
00296 static void     ath5k_txq_drainq(struct ath5k_softc *sc,
00297                                 struct ath5k_txq *txq);
00298 static void     ath5k_txq_cleanup(struct ath5k_softc *sc);
00299 static void     ath5k_txq_release(struct ath5k_softc *sc);
00300 /* Rx handling */
00301 static int      ath5k_rx_start(struct ath5k_softc *sc);
00302 static void     ath5k_rx_stop(struct ath5k_softc *sc);
00303 static void     ath5k_rx_done(struct ath5k_softc *sc);
00304 /* Tx handling */
00305 static void     ath5k_tx_done(struct ath5k_softc *sc,
00306                                 struct ath5k_txq *txq);
00307 /* Interrupt handling */
00308 static int ath5k_init(struct ath5k_softc *sc);
00309 static int ath5k_stop_locked(struct ath5k_softc *sc);
00310 static int ath5k_stop_hw(struct ath5k_softc *sc);
00311 static irqreturn_t ath5k_intr(int irq, void *dev_id);
00312 static void ath5k_tasklet_reset(unsigned long data);
00313 //static void ath5k_tasklet_calibrate(unsigned long data);
00314 /* Net device */
00315 static int  ath5k_open(struct net_device *netdev);
00316 static int  ath5k_close (struct net_device *netdev);
00317 /* Configuration and auxiliary */
00318 static void ath5k_config_filter (struct ath5k_softc *sc, bool broadcast, bool control, bool promisc);
00319 static int ath5k_config_tx_control (struct ath5k_softc *sc, 
00320                 unsigned char count, bool wait_for_ack, bool use_short_preamble);
00321 static int ath5k_config_disable_ack (struct ath5k_softc *sc, bool disable);
00322 static int ath5k_config(struct ath5k_softc *sc, unsigned short channel, enum rates rate, unsigned char power,
00323                 enum ath5k_ant_mode antenna_mode);
00324 static struct ieee80211_channel * ath5k_get_channel(struct ath5k_softc *sc, unsigned int freq);
00325 static int ath5k_get_rate_idx(struct ath5k_softc *sc, enum ieee80211_band band, enum rates rate);
00326 static int ath5k_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
00327 
00328 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30)
00329 static const struct net_device_ops ath5k_netdev_ops = {
00330         .ndo_open               = ath5k_open,
00331         .ndo_stop               = ath5k_close,
00332         .ndo_start_xmit         = ath5k_tx,
00333         .ndo_do_ioctl           = ath5k_ioctl,
00334 };
00335 #endif
00336 
00337 /*
00338  * Module init/exit functions
00339  */
00340 
00341 
00342 /* *** DANIIIIIIIIIIII
00343         static int
00344         hello_read_proc(char *buffer, char **start, off_t offset, int size, int *eof,
00345                         void *data)
00346         {
00347     char *hello_str = "Hello, world!\n";
00348     int len = strlen(hello_str);
00349     if (size < len)
00350             return -EINVAL;
00351     if (offset != 0)
00352             return 0;
00353     strcpy(buffer, hello_str);
00354     *eof = 1;
00355     return len;
00356 
00357 }
00358 */
00359 
00360 
00361 static int __init
00362 init_ath5k_pci(void)
00363 {
00364         int ret;
00365 
00366         printk(KERN_INFO "Module ath5k_raw loaded\n");
00367 
00368         ret = pci_register_driver(&ath5k_pci_driver);
00369         if (ret) {
00370                 printk(KERN_ERR "ath5k_pci: can't register pci driver\n");
00371                 return ret;
00372         }
00373 
00374 /* DANIIIIIIIIII
00375     if (create_proc_read_entry("hello_world", 0, NULL, hello_read_proc,
00376                                 NULL) == 0) {
00377             printk(KERN_ERR,
00378                    "Unable to register \"Hello, world!\" proc file\n");
00379             return -ENOMEM;
00380     }
00381 */
00382 
00383 
00384         return 0;
00385 }
00386 
00387 static void __exit
00388 exit_ath5k_pci(void)
00389 {
00390         pci_unregister_driver(&ath5k_pci_driver);
00391 
00392         printk(KERN_INFO "Module ath5k_raw unloaded\n");
00393 }
00394 
00395 module_init(init_ath5k_pci);
00396 module_exit(exit_ath5k_pci);
00397 
00398 
00399 /********************\
00400 * PCI Initialization *
00401 \********************/
00402 
00403 static const char *
00404 ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val)
00405 {
00406         const char *name = "xxxxx";
00407         unsigned int i;
00408 
00409         for (i = 0; i < ARRAY_SIZE(srev_names); i++) {
00410                 if (srev_names[i].sr_type != type)
00411                         continue;
00412 
00413                 if ((val & 0xf0) == srev_names[i].sr_val)
00414                         name = srev_names[i].sr_name;
00415 
00416                 if ((val & 0xff) == srev_names[i].sr_val) {
00417                         name = srev_names[i].sr_name;
00418                         break;
00419                 }
00420         }
00421 
00422         return name;
00423 }
00424 
00425 static int __devinit
00426 ath5k_pci_probe(struct pci_dev *pdev,
00427                 const struct pci_device_id *id)
00428 {
00429         struct net_device *netdev;
00430         struct ath5k_softc *sc;
00431         void __iomem *mem;
00432         int ret, rate_idx;
00433         u8 csz;
00434         u8 mac[ETH_ALEN];
00435         
00436         ret = 1;
00437 
00438         /* Inicializar netdevice */
00439         netdev = alloc_etherdev(sizeof(struct ath5k_softc));
00440         if (netdev == NULL) 
00441         {
00442                 ATH5K_ERR(NULL, "can't allocate net_device structure\n");
00443                 goto err;
00444         }
00445 
00446         sc = netdev_priv(netdev);
00447         sc->netdev = netdev;
00448         sc->pdev = pdev;
00449         SET_NETDEV_DEV(netdev, &pdev->dev);
00450 
00451 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
00452         netdev->open = ath5k_open;
00453         netdev->stop = ath5k_close;
00454         netdev->hard_start_xmit = ath5k_tx;
00455         netdev->do_ioctl = ath5k_ioctl;
00456 #else
00457         netdev->netdev_ops = &ath5k_netdev_ops;
00458 #endif
00459         netdev->tx_queue_len = ATH_TXBUF;
00460 
00461         ret = pci_enable_device(pdev);
00462         if (ret) {
00463                 dev_err(&pdev->dev, "can't enable device\n");
00464                 goto err_netdev;
00465         }
00466 
00467         /* XXX 32-bit addressing only */
00468         ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
00469         if (ret) {
00470                 dev_err(&pdev->dev, "32-bit DMA not available\n");
00471                 goto err_dis;
00472         }
00473 
00474         /*
00475          * Cache line size is used to size and align various
00476          * structures used to communicate with the hardware.
00477          */
00478         pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz);
00479         if (csz == 0) {
00480                 /*
00481                  * Linux 2.4.18 (at least) writes the cache line size
00482                  * register as a 16-bit wide register which is wrong.
00483                  * We must have this setup properly for rx buffer
00484                  * DMA to work so force a reasonable value here if it
00485                  * comes up zero.
00486                  */
00487                 csz = L1_CACHE_BYTES >> 2;
00488                 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz);
00489         }
00490         /*
00491          * The default setting of latency timer yields poor results,
00492          * set it to the value used by other systems.  It may be worth
00493          * tweaking this setting more.
00494          */
00495         pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8);
00496 
00497         /* Enable bus mastering */
00498         pci_set_master(pdev);
00499 
00500         /*
00501          * Disable the RETRY_TIMEOUT register (0x41) to keep
00502          * PCI Tx retries from interfering with C3 CPU state.
00503          */
00504         pci_write_config_byte(pdev, 0x41, 0);
00505 
00506         ret = pci_request_region(pdev, 0, "ath5k");
00507         if (ret) {
00508                 dev_err(&pdev->dev, "cannot reserve PCI memory region\n");
00509                 goto err_dis;
00510         }
00511 
00512         mem = pci_iomap(pdev, 0, 0);
00513         if (!mem) {
00514                 dev_err(&pdev->dev, "cannot remap PCI memory region\n") ;
00515                 ret = -EIO;
00516                 goto err_reg;
00517         }
00518 
00519         /* Set debug level */
00520         ath5k_set_debug_level(sc);
00521 
00522         /*
00523          * Mark the device as detached to avoid processing
00524          * interrupts until setup is complete.
00525          */
00526         __set_bit(ATH_STAT_INVALID, sc->status);
00527 
00528         sc->iobase = mem; /* So we can unmap it on detach */
00529         sc->cachelsz = csz << 2; /* convert to bytes */
00530         
00531         mutex_init(&sc->lock);
00532         spin_lock_init(&sc->rxbuflock);
00533         spin_lock_init(&sc->txbuflock);
00534 
00535         /* Set private data */
00536         pci_set_drvdata(pdev, netdev);
00537 
00538         /* Setup interrupt handler */
00539         /* In ath5k_open, when the user brings up the interface */
00540 
00541         /*If we passed the test malloc a ath5k_hw struct*/
00542         sc->ah = kzalloc(sizeof(struct ath5k_hw), GFP_KERNEL);
00543         if (!sc->ah) {
00544                 ret = -ENOMEM;
00545                 ATH5K_ERR(sc, "out of memory\n");
00546                 goto err_reg;
00547         }
00548 
00549         sc->ah->ah_sc = sc;
00550         sc->ah->ah_iobase = sc->iobase;
00551 
00552         /* Initialize device */
00553         ret = ath5k_hw_attach(sc);
00554         if (ret) {
00555                 ATH5K_ERR(sc, "error attaching hardware\n");
00556                 goto err_free;
00557         }
00558 
00559         /* Finish private driver data initialization */
00560         ret = ath5k_attach(sc);
00561         if (ret)
00562                 goto err_ah;
00563 
00564         /* Set MAC address */
00565         ath5k_hw_get_lladdr(sc->ah, mac);
00566         memcpy(netdev->dev_addr, mac, ETH_ALEN);
00567         memcpy(netdev->perm_addr, mac, ETH_ALEN);
00568 
00569         ATH5K_INFO(sc, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n",
00570                         ath5k_chip_name(AR5K_VERSION_MAC, sc->ah->ah_mac_srev),
00571                                         sc->ah->ah_mac_srev,
00572                                         sc->ah->ah_phy_revision);
00573 
00574         if (!sc->ah->ah_single_chip) {
00575                 /* Single chip radio (!RF5111) */
00576                 if (sc->ah->ah_radio_5ghz_revision &&
00577                         !sc->ah->ah_radio_2ghz_revision) {
00578                         /* No 5GHz support -> report 2GHz radio */
00579                         if (!test_bit(AR5K_MODE_11A,
00580                                 sc->ah->ah_capabilities.cap_mode)) {
00581                                 ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
00582                                         ath5k_chip_name(AR5K_VERSION_RAD,
00583                                                 sc->ah->ah_radio_5ghz_revision),
00584                                                 sc->ah->ah_radio_5ghz_revision);
00585                         /* No 2GHz support (5110 and some
00586                          * 5Ghz only cards) -> report 5Ghz radio */
00587                         } else if (!test_bit(AR5K_MODE_11B,
00588                                 sc->ah->ah_capabilities.cap_mode)) {
00589                                 ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
00590                                         ath5k_chip_name(AR5K_VERSION_RAD,
00591                                                 sc->ah->ah_radio_5ghz_revision),
00592                                                 sc->ah->ah_radio_5ghz_revision);
00593                         /* Multiband radio */
00594                         } else {
00595                                 ATH5K_INFO(sc, "RF%s multiband radio found"
00596                                         " (0x%x)\n",
00597                                         ath5k_chip_name(AR5K_VERSION_RAD,
00598                                                 sc->ah->ah_radio_5ghz_revision),
00599                                                 sc->ah->ah_radio_5ghz_revision);
00600                         }
00601                 }
00602                 /* Multi chip radio (RF5111 - RF2111) ->
00603                  * report both 2GHz/5GHz radios */
00604                 else if (sc->ah->ah_radio_5ghz_revision &&
00605                                 sc->ah->ah_radio_2ghz_revision){
00606                         ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
00607                                 ath5k_chip_name(AR5K_VERSION_RAD,
00608                                         sc->ah->ah_radio_5ghz_revision),
00609                                         sc->ah->ah_radio_5ghz_revision);
00610                         ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
00611                                 ath5k_chip_name(AR5K_VERSION_RAD,
00612                                         sc->ah->ah_radio_2ghz_revision),
00613                                         sc->ah->ah_radio_2ghz_revision);
00614                 }
00615         }
00616 
00617 
00618         /* ready to process interrupts */
00619         __clear_bit(ATH_STAT_INVALID, sc->status);
00620 
00621         /* Set channel pointers */
00622         sc->curchan = ath5k_get_channel(sc, 2412);
00623         if (sc->curchan == NULL)
00624         {
00625                 ATH5K_ERR(sc, "Can't set channel %d\n", 2412);
00626                 ret = -1;
00627                 goto err_detach;
00628         }
00629         sc->curband = &sc->sbands[sc->curchan->band];
00630 
00631         /* Configurar tx_info */
00632         sc->tx_info.band = sc->curband->band;
00633         rate_idx = ath5k_get_rate_idx(sc, sc->curband->band, RATE_54M);
00634         if (rate_idx < 0 || rate_idx > AR5K_MAX_RATES)
00635         {
00636                 ATH5K_ERR(sc, "Can't set rate %d\n", RATE_54M);
00637                 ret = -1;
00638                 goto err_detach;
00639         }
00640         sc->tx_info.rate_idx =  rate_idx;
00641         sc->tx_info.count = 1;
00642         sc->tx_info.wait_for_ack = false;
00643         sc->tx_info.use_short_preamble = false;
00644 
00645         /* Configurar filtro */
00646         ath5k_config_filter(sc, true, false, true);
00647 
00648         /* Configurar el envio de ACK en respuesta a tramas unicast */
00649         ath5k_config_disable_ack(sc, false);
00650 
00651         /* Configurar potencia en pasos de medio dB
00652          * Para las tarjetas Dlink -> 15 dBm.
00653          * Para las tarjetas Ubiquity -> 15 dBm. Tienen 10 dBm de offset en la propia
00654          * tarjeta en modo G, con lo que la potencia real es 25 dBm o 300 mW en G y
00655          * 15 dBm en A.
00656          */
00657         sc->power_level = 15;
00658 
00659         /* Registrar netdevice */
00660         ret = register_netdev(netdev);
00661         if (ret)
00662         {
00663                 ATH5K_ERR(NULL, "can't register net_device structure\n");
00664                 goto err_detach;
00665         }
00666 
00667         /* Print initial config */
00668         ATH5K_INFO(sc, "Freq %d Mhz, Rate (%d * 100) kps, Power %d dBm\n", 
00669                         sc->curchan->center_freq, RATE_54M, sc->power_level);
00670 
00671         return 0;
00672 err_detach:
00673         ath5k_detach(pdev, sc);
00674 err_ah:
00675         ath5k_hw_detach(sc->ah);
00676 err_free:
00677         pci_iounmap(pdev, mem);
00678 err_reg:
00679         pci_release_region(pdev, 0);
00680 err_dis:
00681         pci_disable_device(pdev);
00682 err_netdev:
00683         pci_set_drvdata(pdev, NULL);
00684         free_netdev(netdev);
00685 err:
00686         return ret;
00687 }
00688 
00689 static void __devexit
00690 ath5k_pci_remove(struct pci_dev *pdev)
00691 {
00692         struct net_device *netdev = pci_get_drvdata(pdev);
00693 
00694         if (netdev) {
00695                 struct ath5k_softc *sc = netdev_priv(netdev);
00696 
00697                 unregister_netdev(netdev);
00698                 ath5k_detach(pdev, sc);
00699                 ath5k_hw_detach(sc->ah);
00700                 /* Free IRQ in ath5k_close */
00701                 pci_iounmap(pdev, sc->iobase);
00702                 free_netdev(netdev);
00703                 pci_release_region(pdev, 0);
00704                 pci_disable_device(pdev);
00705                 pci_set_drvdata(pdev, NULL);
00706         }
00707 }
00708 
00709         static int
00710 ath5k_open(struct net_device *netdev)
00711 {
00712         struct ath5k_softc *sc = netdev_priv(netdev);
00713         int err;
00714 
00715         if ((err = request_irq(sc->pdev->irq, ath5k_intr, IRQF_SHARED, sc->netdev->name, sc->netdev))) {
00716                 ATH5K_ERR(sc, "request_irq failed\n");
00717                 goto err_irq;
00718         }
00719 
00720         ath5k_start(sc);
00721 
00722         /* Half dB steps */
00723         ath5k_hw_set_txpower_limit(sc->ah, (sc->power_level * 2));
00724 
00725         /* Configurar modo de antena */
00726         ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_DEFAULT);
00727 
00728         /* Arrancar la cola de paquetes para tx cuando este todo listo */
00729         netif_start_queue(netdev);
00730 
00731         return 0;
00732         
00733 err_irq:
00734         return err;
00735 }
00736 
00737 static int
00738 ath5k_close(struct net_device *netdev)
00739 {
00740         struct ath5k_softc *sc = netdev_priv(netdev);
00741 
00742         /* Parar la cola de transmision */
00743         netif_stop_queue(netdev);
00744 
00745         ath5k_stop(sc);
00746         
00747         free_irq(sc->pdev->irq, sc->netdev);
00748 
00749         return 0;
00750 }
00751 
00752 /***********************\
00753 * Driver Initialization *
00754 \***********************/
00755 
00756 static int
00757 ath5k_attach(struct ath5k_softc *sc)
00758 {
00759         struct pci_dev *pdev = sc->pdev;
00760         struct ath5k_hw *ah = sc->ah;
00761         int ret;
00762 
00763         ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "devid 0x%x\n", pdev->device);
00764 
00765         /*
00766          * Check if the MAC has multi-rate retry support.
00767          * We do this by trying to setup a fake extended
00768          * descriptor.  MAC's that don't have support will
00769          * return false w/o doing anything.  MAC's that do
00770          * support it will return true w/o doing anything.
00771          */
00772         ret = ah->ah_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0);
00773         if (ret < 0)
00774                 goto err;
00775         if (ret > 0)
00776                 __set_bit(ATH_STAT_MRRETRY, sc->status);
00777 
00778         /*
00779          * Collect the channel list.  The 802.11 layer
00780          * is resposible for filtering this list based
00781          * on settings like the phy mode and regulatory
00782          * domain restrictions.
00783          */
00784         ret = ath5k_setup_bands(sc);
00785         if (ret) {
00786                 ATH5K_ERR(sc, "can't get channels\n");
00787                 goto err;
00788         }
00789 
00790         /* NB: setup here so ath5k_rate_update is happy */
00791         if (test_bit(AR5K_MODE_11A, ah->ah_modes))
00792                 sc->curband = &sc->sbands[IEEE80211_BAND_5GHZ];
00793         else
00794                 sc->curband = &sc->sbands[IEEE80211_BAND_2GHZ];
00795 
00796         /*
00797          * Allocate tx+rx descriptors and populate the lists.
00798          */
00799         ret = ath5k_desc_alloc(sc, pdev);
00800         if (ret) {
00801                 ATH5K_ERR(sc, "can't allocate descriptors\n");
00802                 goto err;
00803         }
00804 
00805         /*
00806          * Allocate hardware transmit queue. Note that hw functions 
00807          * handle reseting these queues at the needed time.
00808          */
00809         sc->txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK);
00810         if (IS_ERR(sc->txq)) {
00811                 ATH5K_ERR(sc, "can't setup xmit queue\n");
00812                 ret = PTR_ERR(sc->txq);
00813                 goto err_desc;
00814         }
00815 
00816         tasklet_init(&sc->restq, ath5k_tasklet_reset, (unsigned long)sc);
00817 //      tasklet_init(&sc->calib, ath5k_tasklet_calibrate, (unsigned long)sc);
00818 
00819         /* All MAC address bits matter for ACKs */
00820         memset(sc->bssidmask, 0xff, ETH_ALEN);
00821         ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask);
00822 
00823         return 0;
00824 err_desc:
00825         ath5k_desc_free(sc, pdev);
00826 err:
00827         return ret;
00828 }
00829 
00830 static void
00831 ath5k_detach(struct pci_dev *pdev, struct ath5k_softc *sc)
00832 {
00833         /*
00834          * NB: the order of these is important:
00835          * o call the 802.11 layer before detaching ath5k_hw to
00836          *   insure callbacks into the driver to delete global
00837          *   key cache entries can be handled
00838          * o reclaim the tx queue data structures after calling
00839          *   the 802.11 layer as we'll get called back to reclaim
00840          *   node state and potentially want to use them
00841          * o to cleanup the tx queues the hal is called, so detach
00842          *   it last
00843          * XXX: ??? detach ath5k_hw ???
00844          * Other than that, it's straightforward...
00845          */
00846         ath5k_desc_free(sc, pdev);
00847         ath5k_txq_release(sc);
00848 }
00849 
00850 
00851 
00852 
00853 /********************\
00854 * Channel/mode setup *
00855 \********************/
00856 /* Ugly macro to convert literal channel numbers into their mhz equivalents
00857  * There are certianly some conditions that will break this (like feeding it '30')
00858  * but they shouldn't arise since nothing talks on channel 30. */
00859 #define ieee80211chan2mhz(x) \
00860         (((x) <= 14) ? \
00861         (((x) == 14) ? 2484 : ((x) * 5) + 2407) : \
00862         ((x) + 1000) * 5)
00863 
00864 /*
00865  * Convert IEEE channel number to MHz frequency.
00866  */
00867 static inline short
00868 ath5k_ieee2mhz(short chan)
00869 {
00870         if (chan <= 14 || chan >= 27)
00871                 return ieee80211chan2mhz(chan);
00872         else
00873                 return 2212 + chan * 20;
00874 }
00875 
00876 /*
00877  * Returns true for the channel numbers used without all_channels modparam.
00878  */
00879 static bool ath5k_is_standard_channel(short chan)
00880 {
00881         return ((chan <= 14) ||
00882                 /* UNII 1,2 */
00883                 ((chan & 3) == 0 && chan >= 36 && chan <= 64) ||
00884                 /* midband */
00885                 ((chan & 3) == 0 && chan >= 100 && chan <= 140) ||
00886                 /* UNII-3 */
00887                 ((chan & 3) == 1 && chan >= 149 && chan <= 165));
00888 }
00889 
00890 static unsigned int
00891 ath5k_copy_channels(struct ath5k_hw *ah,
00892                 struct ieee80211_channel *channels,
00893                 unsigned int mode,
00894                 unsigned int max)
00895 {
00896         unsigned int i, count, size, chfreq, freq, ch;
00897 
00898         if (!test_bit(mode, ah->ah_modes))
00899                 return 0;
00900 
00901         switch (mode) {
00902         case AR5K_MODE_11A:
00903         case AR5K_MODE_11A_TURBO:
00904                 /* 1..220, but 2GHz frequencies are filtered by check_channel */
00905                 size = 220 ;
00906                 chfreq = CHANNEL_5GHZ;
00907                 break;
00908         case AR5K_MODE_11B:
00909         case AR5K_MODE_11G:
00910         case AR5K_MODE_11G_TURBO:
00911                 size = 26;
00912                 chfreq = CHANNEL_2GHZ;
00913                 break;
00914         default:
00915                 ATH5K_WARN(ah->ah_sc, "bad mode, not copying channels\n");
00916                 return 0;
00917         }
00918 
00919         for (i = 0, count = 0; i < size && max > 0; i++) {
00920                 ch = i + 1;
00921                 freq = ath5k_ieee2mhz(ch);
00922 
00923                 /* Check if channel is supported by the chipset */
00924                 if (!ath5k_channel_ok(ah, freq, chfreq))
00925                         continue;
00926 
00927                 if (!modparam_all_channels && !ath5k_is_standard_channel(ch))
00928                         continue;
00929 
00930                 /* Write channel info and increment counter */
00931                 channels[count].center_freq = freq;
00932                 channels[count].band = (chfreq == CHANNEL_2GHZ) ?
00933                         IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
00934                 switch (mode) {
00935                 case AR5K_MODE_11A:
00936                 case AR5K_MODE_11G:
00937                         channels[count].hw_value = chfreq | CHANNEL_OFDM;
00938                         break;
00939                 case AR5K_MODE_11A_TURBO:
00940                 case AR5K_MODE_11G_TURBO:
00941                         channels[count].hw_value = chfreq |
00942                                 CHANNEL_OFDM | CHANNEL_TURBO;
00943                         break;
00944                 case AR5K_MODE_11B:
00945                         channels[count].hw_value = CHANNEL_B;
00946                 }
00947 
00948                 count++;
00949                 max--;
00950         }
00951 
00952         return count;
00953 }
00954 
00955 static int
00956 ath5k_setup_bands(struct ath5k_softc *sc)
00957 {
00958         struct ath5k_hw *ah = sc->ah;
00959         struct ieee80211_supported_band *sband;
00960         int max_c, count_c = 0;
00961         int i;
00962 
00963         BUILD_BUG_ON(ARRAY_SIZE(sc->sbands) < IEEE80211_NUM_BANDS);
00964         max_c = ARRAY_SIZE(sc->channels);
00965 
00966         /* 2GHz band */
00967         sband = &sc->sbands[IEEE80211_BAND_2GHZ];
00968         sband->band = IEEE80211_BAND_2GHZ;
00969         sband->bitrates = &sc->rates[IEEE80211_BAND_2GHZ][0];
00970 
00971         if (test_bit(AR5K_MODE_11G, sc->ah->ah_capabilities.cap_mode)) {
00972                 /* G mode */
00973                 memcpy(sband->bitrates, &ath5k_rates[0],
00974                        sizeof(struct ieee80211_rate) * 12);
00975                 sband->n_bitrates = 12;
00976 
00977                 sband->channels = sc->channels;
00978                 sband->n_channels = ath5k_copy_channels(ah, sband->channels,
00979                                         AR5K_MODE_11G, max_c);
00980 
00981                 count_c = sband->n_channels;
00982                 max_c -= count_c;
00983         } else if (test_bit(AR5K_MODE_11B, sc->ah->ah_capabilities.cap_mode)) {
00984                 /* B mode */
00985                 memcpy(sband->bitrates, &ath5k_rates[0],
00986                        sizeof(struct ieee80211_rate) * 4);
00987                 sband->n_bitrates = 4;
00988 
00989                 /* 5211 only supports B rates and uses 4bit rate codes
00990                  * (e.g normally we have 0x1B for 1M, but on 5211 we have 0x0B)
00991                  * fix them up here:
00992                  */
00993                 if (ah->ah_version == AR5K_AR5211) {
00994                         for (i = 0; i < 4; i++) {
00995                                 sband->bitrates[i].hw_value =
00996                                         sband->bitrates[i].hw_value & 0xF;
00997                                 sband->bitrates[i].hw_value_short =
00998                                         sband->bitrates[i].hw_value_short & 0xF;
00999                         }
01000                 }
01001 
01002                 sband->channels = sc->channels;
01003                 sband->n_channels = ath5k_copy_channels(ah, sband->channels,
01004                                         AR5K_MODE_11B, max_c);
01005 
01006                 count_c = sband->n_channels;
01007                 max_c -= count_c;
01008         }
01009 
01010         /* 5GHz band, A mode */
01011         if (test_bit(AR5K_MODE_11A, sc->ah->ah_capabilities.cap_mode)) {
01012                 sband = &sc->sbands[IEEE80211_BAND_5GHZ];
01013                 sband->band = IEEE80211_BAND_5GHZ;
01014                 sband->bitrates = &sc->rates[IEEE80211_BAND_5GHZ][0];
01015 
01016                 memcpy(sband->bitrates, &ath5k_rates[4],
01017                        sizeof(struct ieee80211_rate) * 8);
01018                 sband->n_bitrates = 8;
01019 
01020                 sband->channels = &sc->channels[count_c];
01021                 sband->n_channels = ath5k_copy_channels(ah, sband->channels,
01022                                         AR5K_MODE_11A, max_c);
01023 
01024         }
01025 
01026         ath5k_debug_dump_bands(sc);
01027 
01028         return 0;
01029 }
01030 
01031 static void
01032 ath5k_mode_setup(struct ath5k_softc *sc)
01033 {
01034         struct ath5k_hw *ah = sc->ah;
01035         u32 rfilt;
01036 
01037         /* configure rx filter */
01038         rfilt = sc->filter_flags;
01039         ath5k_hw_set_rx_filter(ah, rfilt);
01040 
01041         if (ath5k_hw_hasbssidmask(ah))
01042                 ath5k_hw_set_bssid_mask(ah, sc->bssidmask);
01043 
01044         /* configure operational mode */
01045         ath5k_hw_set_opmode(ah);
01046 
01047         ath5k_hw_set_mcast_filter(ah, 0, 0);
01048         ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
01049 }
01050 
01051 /***************\
01052 * Buffers setup *
01053 \***************/
01054 
01055 static
01056 struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_softc *sc, dma_addr_t *skb_addr)
01057 {
01058         struct sk_buff *skb;
01059         unsigned int off;
01060 
01061         /*
01062          * Allocate buffer with headroom_needed space for the
01063          * fake physical layer header at the start.
01064          */
01065         skb = dev_alloc_skb(sc->rxbufsize + sc->cachelsz - 1);
01066 
01067         if (!skb) {
01068                 ATH5K_ERR(sc, "can't alloc skbuff of size %u\n",
01069                                 sc->rxbufsize + sc->cachelsz - 1);
01070                 return NULL;
01071         }
01072         /*
01073          * Cache-line-align.  This is important (for the
01074          * 5210 at least) as not doing so causes bogus data
01075          * in rx'd frames.
01076          */
01077         off = ((unsigned long)skb->data) % sc->cachelsz;
01078         if (off != 0)
01079                 skb_reserve(skb, sc->cachelsz - off);
01080 
01081         *skb_addr = pci_map_single(sc->pdev,
01082                 skb->data, sc->rxbufsize, PCI_DMA_FROMDEVICE);
01083 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
01084         if (unlikely(pci_dma_mapping_error(sc->pdev, *skb_addr))) {
01085 #else
01086         if (unlikely(pci_dma_mapping_error(*skb_addr))) {
01087 #endif
01088                 ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__);
01089                 dev_kfree_skb(skb);
01090                 return NULL;
01091         }
01092         return skb;
01093 }
01094 
01095 static int
01096 ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
01097 {
01098         struct ath5k_hw *ah = sc->ah;
01099         struct sk_buff *skb = bf->skb;
01100         struct ath5k_desc *ds;
01101 
01102         if (!skb) {
01103                 skb = ath5k_rx_skb_alloc(sc, &bf->skbaddr);
01104                 if (!skb)
01105                         return -ENOMEM;
01106                 bf->skb = skb;
01107         }
01108 
01109         /*
01110          * Setup descriptors.  For receive we always terminate
01111          * the descriptor list with a self-linked entry so we'll
01112          * not get overrun under high load (as can happen with a
01113          * 5212 when ANI processing enables PHY error frames).
01114          *
01115          * To insure the last descriptor is self-linked we create
01116          * each descriptor as self-linked and add it to the end.  As
01117          * each additional descriptor is added the previous self-linked
01118          * entry is ``fixed'' naturally.  This should be safe even
01119          * if DMA is happening.  When processing RX interrupts we
01120          * never remove/process the last, self-linked, entry on the
01121          * descriptor list.  This insures the hardware always has
01122          * someplace to write a new frame.
01123          */
01124         ds = bf->desc;
01125         ds->ds_link = bf->daddr;        /* link to self */
01126         ds->ds_data = bf->skbaddr;
01127         ah->ah_setup_rx_desc(ah, ds,
01128                 skb_tailroom(skb),      /* buffer size */
01129                 0);
01130 
01131         if (sc->rxlink != NULL)
01132                 *sc->rxlink = bf->daddr;
01133         sc->rxlink = &ds->ds_link;
01134         return 0;
01135 }
01136 
01137 /* Contexto: UC
01138  *
01139  * Puede ocurrir:
01140  * - Lo peor, que nos saque una hardIRQ de la cpu -> spin_lock_irq
01141  */
01142 static int
01143 ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
01144 {
01145         struct ath5k_hw *ah = sc->ah;
01146         struct ath5k_txq *txq = sc->txq;
01147         struct ath5k_desc *ds = bf->desc;
01148         struct sk_buff *skb = bf->skb;
01149         struct ieee80211_tx_info *info = &sc->tx_info;
01150         unsigned int pktlen, flags, keyidx = AR5K_TXKEYIX_INVALID;
01151         struct ieee80211_rate *rate;
01152         int ret;
01153         u16 hw_rate;
01154         u16 cts_rate = 0;
01155         u16 duration = 0;
01156 
01157         flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK;
01158 
01159         /* XXX endianness */
01160         bf->skbaddr = pci_map_single(sc->pdev, skb->data, skb->len,
01161                         PCI_DMA_TODEVICE);
01162         
01163         if (info->rate_idx >= 0 && info->rate_idx < AR5K_MAX_RATES)
01164                 rate = &sc->sbands[info->band].bitrates[info->rate_idx];
01165         else
01166         {
01167                 ATH5K_ERR(sc, "Rate index invalid\n");
01168                 return -1;
01169         }
01170 
01171         if (!info->wait_for_ack)
01172                 flags |= AR5K_TXDESC_NOACK;
01173 
01174         hw_rate = info->use_short_preamble ? rate->hw_value_short : rate->hw_value;
01175 
01176         pktlen = skb->len;
01177 
01178         /* FIXME: If we are in g mode and rate is a CCK rate
01179          * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
01180          * from tx power (value is in dB units already) */
01181         ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
01182                 ath5k_get_hdrlen_from_skb(skb), AR5K_PKT_TYPE_NORMAL,
01183                 (sc->power_level * 2),
01184                 hw_rate,
01185                 info->count, keyidx, ah->ah_tx_ant, flags,
01186                 cts_rate, duration);
01187         if (ret)
01188                 goto err_unmap;
01189 
01190         ds->ds_link = 0;
01191         ds->ds_data = bf->skbaddr;
01192 
01193         spin_lock_irq(&txq->lock);
01194         list_add_tail(&bf->list, &txq->q);
01195         if (txq->link == NULL) /* is this first packet? */
01196                 ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr);
01197         else /* no, so only link it */
01198                 *txq->link = bf->daddr;
01199 
01200         txq->link = &ds->ds_link;
01201         ath5k_hw_start_tx_dma(ah, txq->qnum);
01202         mmiowb();
01203         spin_unlock_irq(&txq->lock);
01204 
01205         return 0;
01206 err_unmap:
01207         pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, PCI_DMA_TODEVICE);
01208         return ret;
01209 }
01210 
01211 /*******************\
01212 * Descriptors setup *
01213 \*******************/
01214 
01215 static int
01216 ath5k_desc_alloc(struct ath5k_softc *sc, struct pci_dev *pdev)
01217 {
01218         struct ath5k_desc *ds;
01219         struct ath5k_buf *bf;
01220         dma_addr_t da;
01221         unsigned int i;
01222         int ret;
01223 
01224         /* allocate descriptors */
01225         sc->desc_len = sizeof(struct ath5k_desc) *
01226                         (ATH_TXBUF + ATH_RXBUF + 1);
01227         sc->desc = pci_alloc_consistent(pdev, sc->desc_len, &sc->desc_daddr);
01228         if (sc->desc == NULL) {
01229                 ATH5K_ERR(sc, "can't allocate descriptors\n");
01230                 ret = -ENOMEM;
01231                 goto err;
01232         }
01233         ds = sc->desc;
01234         da = sc->desc_daddr;
01235         ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "DMA map: %p (%zu) -> %llx\n",
01236                 ds, sc->desc_len, (unsigned long long)sc->desc_daddr);
01237 
01238         bf = kcalloc(1 + ATH_TXBUF + ATH_RXBUF,
01239                         sizeof(struct ath5k_buf), GFP_KERNEL);
01240         if (bf == NULL) {
01241                 ATH5K_ERR(sc, "can't allocate bufptr\n");
01242                 ret = -ENOMEM;
01243                 goto err_free;
01244         }
01245         sc->bufptr = bf;
01246 
01247         INIT_LIST_HEAD(&sc->rxbuf);
01248         for (i = 0; i < ATH_RXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
01249                 bf->desc = ds;
01250                 bf->daddr = da;
01251                 list_add_tail(&bf->list, &sc->rxbuf);
01252         }
01253 
01254         INIT_LIST_HEAD(&sc->txbuf);
01255         sc->txbuf_len = ATH_TXBUF;
01256         for (i = 0; i < ATH_TXBUF; i++, bf++, ds++,
01257                         da += sizeof(*ds)) {
01258                 bf->desc = ds;
01259                 bf->daddr = da;
01260                 list_add_tail(&bf->list, &sc->txbuf);
01261         }
01262 
01263         return 0;
01264 err_free:
01265         pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr);
01266 err:
01267         sc->desc = NULL;
01268         return ret;
01269 }
01270 
01271 static void
01272 ath5k_desc_free(struct ath5k_softc *sc, struct pci_dev *pdev)
01273 {
01274         struct ath5k_buf *bf;
01275 
01276         list_for_each_entry(bf, &sc->txbuf, list)
01277                 ath5k_txbuf_free(sc, bf);
01278         list_for_each_entry(bf, &sc->rxbuf, list)
01279                 ath5k_rxbuf_free(sc, bf);
01280 
01281         /* Free memory associated with all descriptors */
01282         pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr);
01283 
01284         kfree(sc->bufptr);
01285         sc->bufptr = NULL;
01286 }
01287 
01288 
01289 
01290 
01291 
01292 /**************\
01293 * Queues setup *
01294 \**************/
01295 
01296 static struct ath5k_txq *
01297 ath5k_txq_setup(struct ath5k_softc *sc,
01298                 int qtype, int subtype)
01299 {
01300         struct ath5k_hw *ah = sc->ah;
01301         struct ath5k_txq *txq;
01302         struct ath5k_txq_info qi = {
01303                 .tqi_subtype = subtype,
01304                 .tqi_aifs = AR5K_TXQ_USEDEFAULT,
01305                 .tqi_cw_min = AR5K_TXQ_USEDEFAULT,
01306                 .tqi_cw_max = AR5K_TXQ_USEDEFAULT
01307         };
01308         int qnum;
01309 
01310         /*
01311          * Enable interrupts only for EOL and DESC conditions.
01312          * We mark tx descriptors to receive a DESC interrupt
01313          * when a tx queue gets deep; otherwise waiting for the
01314          * EOL to reap descriptors.  Note that this is done to
01315          * reduce interrupt load and this only defers reaping
01316          * descriptors, never transmitting frames.  Aside from
01317          * reducing interrupts this also permits more concurrency.
01318          * The only potential downside is if the tx queue backs
01319          * up in which case the top half of the kernel may backup
01320          * due to a lack of tx descriptors.
01321          */
01322         qi.tqi_flags = AR5K_TXQ_FLAG_TXEOLINT_ENABLE |
01323                                 AR5K_TXQ_FLAG_TXDESCINT_ENABLE;
01324         qnum = ath5k_hw_setup_tx_queue(ah, qtype, &qi);
01325         if (qnum < 0) {
01326                 /*
01327                  * NB: don't print a message, this happens
01328                  * normally on parts with too few tx queues
01329                  */
01330                 return ERR_PTR(qnum);
01331         }
01332         if (qnum >= ARRAY_SIZE(sc->txqs)) {
01333                 ATH5K_ERR(sc, "hw qnum %u out of range, max %tu!\n",
01334                         qnum, ARRAY_SIZE(sc->txqs));
01335                 ath5k_hw_release_tx_queue(ah, qnum);
01336                 return ERR_PTR(-EINVAL);
01337         }
01338         txq = &sc->txqs[qnum];
01339         if (!txq->setup) {
01340                 txq->qnum = qnum;
01341                 txq->link = NULL;
01342                 INIT_LIST_HEAD(&txq->q);
01343                 spin_lock_init(&txq->lock);
01344                 txq->setup = true;
01345         }
01346         return &sc->txqs[qnum];
01347 }
01348 
01349 /* Contexto UC y softIRQ
01350  *
01351  * Puede ocurrir:
01352  * - Nos interrumpa una hardIRQ -> spin_lock_irq
01353  * - Nos expulsen de la cpu -> spin_lock
01354  * - Una hardIRQ, softIRQ o UC esta en ejecucion en otra CPU -> spin_lock
01355  */
01356 static void
01357 ath5k_txq_drainq(struct ath5k_softc *sc, struct ath5k_txq *txq)
01358 {
01359         struct ath5k_buf *bf, *bf0;
01360 
01361         /*
01362          * NB: this assumes output has been stopped and
01363          *     we do not need to block ath5k_tx_tasklet
01364          */
01365         spin_lock_irq(&txq->lock);
01366         list_for_each_entry_safe(bf, bf0, &txq->q, list) {
01367                 ath5k_debug_printtxbuf(sc, bf);
01368 
01369                 ath5k_txbuf_free(sc, bf);
01370 
01371                 spin_lock_bh(&sc->txbuflock);
01372                 list_move_tail(&bf->list, &sc->txbuf);
01373                 sc->txbuf_len++;
01374                 spin_unlock_irq(&sc->txbuflock);
01375         }
01376         txq->link = NULL;
01377         spin_unlock_irq(&txq->lock);
01378 }
01379 
01380 /*
01381  * Drain the transmit queues and reclaim resources.
01382  */
01383 static void
01384 ath5k_txq_cleanup(struct ath5k_softc *sc)
01385 {
01386         struct ath5k_hw *ah = sc->ah;
01387         unsigned int i;
01388 
01389         /* XXX return value */
01390         if (likely(!test_bit(ATH_STAT_INVALID, sc->status))) {
01391                 /* don't touch the hardware if marked invalid */
01392                 for (i = 0; i < ARRAY_SIZE(sc->txqs); i++)
01393                         if (sc->txqs[i].setup) {
01394                                 ath5k_hw_stop_tx_dma(ah, sc->txqs[i].qnum);
01395                                 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "txq [%u] %x, "
01396                                         "link %p\n",
01397                                         sc->txqs[i].qnum,
01398                                         ath5k_hw_get_txdp(ah,
01399                                                         sc->txqs[i].qnum),
01400                                         sc->txqs[i].link);
01401                         }
01402         }
01403         netif_wake_queue(sc->netdev);
01404 
01405         for (i = 0; i < ARRAY_SIZE(sc->txqs); i++)
01406                 if (sc->txqs[i].setup)
01407                         ath5k_txq_drainq(sc, &sc->txqs[i]);
01408 }
01409 
01410 static void
01411 ath5k_txq_release(struct ath5k_softc *sc)
01412 {
01413         struct ath5k_txq *txq = sc->txqs;
01414         unsigned int i;
01415 
01416         for (i = 0; i < ARRAY_SIZE(sc->txqs); i++, txq++)
01417                 if (txq->setup) {
01418                         ath5k_hw_release_tx_queue(sc->ah, txq->qnum);
01419                         txq->setup = false;
01420                 }
01421 }
01422 
01423 
01424 
01425 
01426 /*************\
01427 * RX Handling *
01428 \*************/
01429 
01430 /*
01431  * Enable the receive h/w following a reset.
01432  *
01433  * Contexto SoftIRQ y UC.
01434  *
01435  * Mientras ejecuto en una CPU puede ocurrir:
01436  *  - Ser interrumpido por una hardIRQ si estoy en SoftIRQ o UC (bloquear irq)
01437  *  - Otra CPU ejecuta softIRQ si estoy en UC (spinlock)
01438  *  - Otra CPU ejecutando UC si estoy en UC (spinlock)
01439  */
01440 static int
01441 ath5k_rx_start(struct ath5k_softc *sc)
01442 {
01443         struct ath5k_hw *ah = sc->ah;
01444         struct ath5k_buf *bf;
01445         int ret;
01446 
01447         sc->rxbufsize = roundup(IEEE80211_MAX_LEN, sc->cachelsz);
01448 
01449         ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "cachelsz %u rxbufsize %u\n",
01450                 sc->cachelsz, sc->rxbufsize);
01451 
01452         spin_lock_irq(&sc->rxbuflock);
01453         sc->rxlink = NULL;
01454         list_for_each_entry(bf, &sc->rxbuf, list) {
01455                 ret = ath5k_rxbuf_setup(sc, bf);
01456                 if (ret != 0) {
01457                         spin_unlock_irq(&sc->rxbuflock);
01458                         goto err;
01459                 }
01460         }
01461         bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list);
01462         ath5k_hw_set_rxdp(ah, bf->daddr);
01463         spin_unlock_irq(&sc->rxbuflock);
01464 
01465         ath5k_hw_start_rx_dma(ah);      /* enable recv descriptors */
01466         ath5k_mode_setup(sc);           /* set filters, etc. */
01467         ath5k_hw_start_rx_pcu(ah);      /* re-enable PCU/DMA engine */
01468 
01469         return 0;
01470 err:
01471         return ret;
01472 }
01473 
01474 /*
01475  * Disable the receive h/w in preparation for a reset.
01476  */
01477 static void
01478 ath5k_rx_stop(struct ath5k_softc *sc)
01479 {
01480         struct ath5k_hw *ah = sc->ah;
01481 
01482         ath5k_hw_stop_rx_pcu(ah);       /* disable PCU */
01483         ath5k_hw_set_rx_filter(ah, 0);  /* clear recv filter */
01484         ath5k_hw_stop_rx_dma(ah);       /* disable DMA engine */
01485 
01486         ath5k_debug_printrxbuffs(sc, ah);
01487 
01488         sc->rxlink = NULL;              /* just in case */
01489 }
01490 
01491 /* 
01492  * Contexto hardIRQ.
01493  *
01494  * Mientras ejecuto en una CPU puede ocurrir:
01495  *
01496  *  - El kernel garantiza que una ISR no se ejecuta simultaneamente
01497  *    en varios uP, y un tasklet no puede interrumpir a una ISR.
01498  *  - Que un tasklet o UC se este ejecutando en otra CPU, luego hay que proteger
01499  *    con un spin_lock para esperar que termine la tarea en el otro uP.
01500  *
01501  * Ademas, hay que tener en cuenta que esta funcion reserva memoria con una
01502  * llamada a dev_alloc_skb, pero esta permitido ya que se reserva con
01503  * GFP_ATOMIC y el kernel garantiza que no duerme.
01504  */
01505 static void
01506 ath5k_rx_done(struct ath5k_softc *sc)
01507 {
01508         struct ieee80211_rx_status rxs = {};
01509         struct ath5k_rx_status rs = {};
01510         struct sk_buff *skb, *next_skb;
01511         dma_addr_t next_skb_addr;
01512         struct ath5k_buf *bf;
01513         struct ath5k_desc *ds;
01514         int ret;
01515         int hdrlen;
01516         int padsize;
01517 
01518         unsigned char dst[ETH_ALEN];
01519         unsigned char src[ETH_ALEN];
01520         u16 ethertype, frame_control;
01521         struct ieee80211_hdr *hdr;
01522         unsigned char *payload;
01523 
01524         spin_lock(&sc->rxbuflock);
01525         if (list_empty(&sc->rxbuf)) {
01526                 ATH5K_WARN(sc, "empty rx buf pool\n");
01527                 goto unlock;
01528         }
01529         do {
01530                 rxs.flag = 0;
01531 
01532                 bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list);
01533                 BUG_ON(bf->skb == NULL);
01534                 skb = bf->skb;
01535                 ds = bf->desc;
01536 
01537                 /* bail if HW is still using self-linked descriptor */
01538                 if (ath5k_hw_get_rxdp(sc->ah) == bf->daddr)
01539                         break;
01540 
01541                 ret = sc->ah->ah_proc_rx_desc(sc->ah, ds, &rs);
01542 
01543                 if (unlikely(ret == -EINPROGRESS))
01544                         break;
01545                 else if (unlikely(ret)) {
01546                         ATH5K_ERR(sc, "error in processing rx descriptor\n");
01547                         goto unlock;
01548                 }
01549 
01550                 if (unlikely(rs.rs_more)) {
01551                         ATH5K_WARN(sc, "unsupported jumbo\n");
01552                         goto next;
01553                 }
01554 
01555                 if (unlikely(rs.rs_status)) {
01556                         if (rs.rs_status & AR5K_RXERR_PHY)
01557                                 //ATH5K_ERR(sc, "PHY ERR");
01558                                 goto next;
01559                         if (rs.rs_status & AR5K_RXERR_CRC)
01560                                 //ATH5K_ERR(sc, "CRC ERR");
01561                                 goto next;
01562                         if (rs.rs_status & AR5K_RXERR_DECRYPT)
01563                                 //ATH5K_ERR(sc, "DEC ERR");
01564                                 goto next;
01565                         if (rs.rs_status & AR5K_RXERR_MIC) {
01566                                 //ATH5K_ERR(sc, "MIC");
01567                                 goto accept;
01568                         }
01569                 }
01570 accept:
01571                 next_skb = ath5k_rx_skb_alloc(sc, &next_skb_addr);
01572 
01573                 /*
01574                  * If we can't replace bf->skb with a new skb under memory
01575                  * pressure, just skip this packet
01576                  */
01577                 if (!next_skb)
01578                         goto next;
01579 
01580                 pci_unmap_single(sc->pdev, bf->skbaddr, sc->rxbufsize,
01581                                 PCI_DMA_FROMDEVICE);
01582                 skb_put(skb, rs.rs_datalen);
01583 
01584                 /* The MAC header is padded to have 32-bit boundary if the
01585                  * packet payload is non-zero. The general calculation for
01586                  * padsize would take into account odd header lengths:
01587                  * padsize = (4 - hdrlen % 4) % 4; However, since only
01588                  * even-length headers are used, padding can only be 0 or 2
01589                  * bytes and we can optimize this a bit. In addition, we must
01590                  * not try to remove padding from short control frames that do
01591                  * not have payload. */
01592                 hdrlen = ath5k_get_hdrlen_from_skb(skb);
01593                 padsize = ath5k_pad_size(hdrlen);
01594                 if (padsize) {
01595                         memmove(skb->data + padsize, skb->data, hdrlen);
01596                         skb_pull(skb, padsize);
01597                 }
01598 
01599                 rxs.freq = sc->curchan->center_freq;
01600                 rxs.band = sc->curband->band;
01601 
01602                 rxs.noise = sc->ah->ah_noise_floor;
01603                 rxs.signal = rxs.noise + rs.rs_rssi;
01604 
01605                 /* An rssi of 35 indicates you should be able use
01606                  * 54 Mbps reliably. A more elaborate scheme can be used
01607                  * here but it requires a map of SNR/throughput for each
01608                  * possible mode used */
01609                 rxs.qual = rs.rs_rssi * 100 / 35;
01610 
01611                 //unsigned char c = rs.rs_rssi;
01612                 //ATH5K_ERR(sc, "ret:%d, DANI->RSSI: %d %u %d, %d\n",ret,c,c,rxs.qual, rxs.noise);
01613 
01614                 /* rssi can be more than 35 though, anything above that
01615                  * should be considered at 100% */
01616                 if (rxs.qual > 100)
01617                         rxs.qual = 100;
01618 
01619                 rxs.antenna = rs.rs_antenna;
01620 
01621                 ath5k_debug_dump_skb(sc, skb, "RX  ", 0);
01622 
01623                 /* skb contains the frame in IEEE 802.11 format, as it was sent over air */
01624                 
01625                 hdr = (struct ieee80211_hdr *)skb->data;
01626                 frame_control = le16_to_cpu(hdr->frame_control);
01627 
01628                 /* If no payload jump to next frame */
01629                 if (skb->len < 24)
01630                         goto next;
01631 
01632                 /* Extract src/dst addresses */
01633                 switch (frame_control & (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) {
01634                         case IEEE80211_FCTL_FROMDS:
01635                                 memcpy(dst, hdr->addr1, ETH_ALEN);
01636                                 memcpy(src, hdr->addr3, ETH_ALEN);
01637                                 break;
01638                         case IEEE80211_FCTL_TODS:
01639                                 memcpy(dst, hdr->addr3, ETH_ALEN);
01640                                 memcpy(src, hdr->addr2, ETH_ALEN);
01641                                 break;
01642                         case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS:
01643                                 if (skb->len < IEEE80211_4ADDR_LEN)
01644                                         goto next;
01645                                 memcpy(dst, hdr->addr3, ETH_ALEN);
01646                                 memcpy(src, hdr->addr4, ETH_ALEN);
01647                                 break;
01648                         case 0:
01649                                 memcpy(dst, hdr->addr1, ETH_ALEN);
01650                                 memcpy(src, hdr->addr2, ETH_ALEN);
01651                                 break;
01652                 }
01653        
01654                 payload = skb->data + hdrlen;
01655                 ethertype = (payload[6] << 8) | payload[7];
01656 
01657                 /* convert hdr + possible LLC headers into Ethernet header */
01658                 if (skb->len - hdrlen >= 8)
01659                 {
01660                         /* remove RFC1042 or Bridge-Tunnel encapsulation and
01661                          * replace EtherType */
01662                         skb_pull(skb, hdrlen + SNAP_SIZE);
01663                         memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN);
01664                         memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN);
01665                 } else {
01666                         goto next;
01667                 }
01668                 
01669                 /* XXX: RT-WMP Extension */
01670                 skb->data[14] = (char)rs.rs_rssi;
01671                 skb->data[15] = (char)rxs.noise;
01672 
01673                 /* Set skb fields and send to linux network layer */
01674                 if (skb) 
01675                 {
01676                         skb->dev = sc->netdev;
01677                         skb->protocol = eth_type_trans(skb, sc->netdev);
01678                         skb->ip_summed = CHECKSUM_NONE; /* 802.11 crc not sufficient */
01679                         
01680                         netif_rx(skb);
01681                 }
01682 
01683                 bf->skb = next_skb;
01684                 bf->skbaddr = next_skb_addr;
01685 next:
01686                 list_move_tail(&bf->list, &sc->rxbuf);
01687         } while (ath5k_rxbuf_setup(sc, bf) == 0);
01688 unlock:
01689         spin_unlock(&sc->rxbuflock);
01690 }
01691 
01692 
01693 
01694 
01695 /*************\
01696 * TX Handling *
01697 \*************/
01698 
01699 /*
01700  * Contexto HardIRQ
01701  *
01702  * Mientras se ejecuta puede ocurrir:
01703  * 
01704  * - El kernel garantiza que no se ejecutara la rutina de interrupcion en otras
01705  *   CPU.
01706  * - Puede estar ejecutandose un tasklet o UC en otra CPU, hay que bloquear con
01707  *   un spin_lock para esperar que terminen
01708  */  
01709 static void
01710 ath5k_tx_done(struct ath5k_softc *sc, struct ath5k_txq *txq)
01711 {
01712         struct ath5k_tx_status ts = {};
01713         struct ath5k_buf *bf, *bf0;
01714         struct ath5k_desc *ds;
01715         struct sk_buff *skb;
01716         int ret;
01717 
01718         spin_lock(&txq->lock);
01719         list_for_each_entry_safe(bf, bf0, &txq->q, list) {
01720                 ds = bf->desc;
01721 
01722                 ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts);
01723                 if (unlikely(ret == -EINPROGRESS))
01724                         break;
01725                 else if (unlikely(ret)) {
01726                         ATH5K_ERR(sc, "error %d while processing queue %u\n",
01727                                 ret, txq->qnum);
01728                         break;
01729                 }
01730 
01731                 skb = bf->skb;
01732                 dev_kfree_skb_any(skb);
01733                 bf->skb = NULL;
01734 
01735                 pci_unmap_single(sc->pdev, bf->skbaddr, skb->len,
01736                                 PCI_DMA_TODEVICE);
01737 
01738                 if (unlikely(ts.ts_status)) 
01739                         sc->ll_stats.dot11ACKFailureCount++;
01740 
01741                 spin_lock(&sc->txbuflock);
01742                 list_move_tail(&bf->list, &sc->txbuf);
01743                 sc->txbuf_len++;
01744                 spin_unlock(&sc->txbuflock);
01745         }
01746         if (likely(list_empty(&txq->q)))
01747                 txq->link = NULL;
01748         spin_unlock(&txq->lock);
01749         if (sc->txbuf_len > ATH_TXBUF / 5)
01750                 netif_wake_queue(sc->netdev);
01751 }
01752 
01753 
01754 /********************\
01755 * Interrupt handling *
01756 \********************/
01757 
01758 static int
01759 ath5k_init(struct ath5k_softc *sc)
01760 {
01761         struct ath5k_hw *ah = sc->ah;
01762         int ret;
01763 
01764         mutex_lock(&sc->lock);
01765 
01766         /*
01767          * Stop anything previously setup.  This is safe
01768          * no matter this is the first time through or not.
01769          */
01770         ath5k_stop_locked(sc);
01771 
01772         /*
01773          * The basic interface to setting the hardware in a good
01774          * state is ``reset''.  On return the hardware is known to
01775          * be powered up and with interrupts disabled.  This must
01776          * be followed by initialization of the appropriate bits
01777          * and then setup of the interrupt mask.
01778          */
01779         sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
01780                 AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
01781                 AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_SWI;
01782         ret = ath5k_reset(sc, sc->curchan);
01783         if (ret)
01784                 goto done;
01785 
01786         /* Set ack to be sent at low bit-rates */
01787         ath5k_hw_set_ack_bitrate_high(ah, false);
01788 
01789         /* Set PHY calibration inteval */
01790         ah->ah_cal_intval = ath5k_calinterval;
01791 
01792         ret = 0;
01793 done:
01794         mmiowb();
01795         mutex_unlock(&sc->lock);
01796         return ret;
01797 }
01798 
01799 static int
01800 ath5k_stop_locked(struct ath5k_softc *sc)
01801 {
01802         struct ath5k_hw *ah = sc->ah;
01803 
01804         ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "invalid %u\n",
01805                         test_bit(ATH_STAT_INVALID, sc->status));
01806 
01807         /*
01808          * Shutdown the hardware and driver:
01809          *    stop output from above
01810          *    disable interrupts
01811          *    turn off timers
01812          *    turn off the radio
01813          *    clear transmit machinery
01814          *    clear receive machinery
01815          *    drain and release tx queues
01816          *    reclaim beacon resources
01817          *    power down hardware
01818          *
01819          * Note that some of this work is not possible if the
01820          * hardware is gone (invalid).
01821          */
01822         netif_stop_queue(sc->netdev);
01823 
01824         if (!test_bit(ATH_STAT_INVALID, sc->status)) {
01825                 ath5k_hw_set_imr(ah, 0);
01826                 synchronize_irq(sc->pdev->irq);
01827         }
01828         ath5k_txq_cleanup(sc);
01829         if (!test_bit(ATH_STAT_INVALID, sc->status)) {
01830                 ath5k_rx_stop(sc);
01831                 ath5k_hw_phy_disable(ah);
01832         } else
01833                 sc->rxlink = NULL;
01834 
01835         return 0;
01836 }
01837 
01838 /*
01839  * Stop the device, grabbing the top-level lock to protect
01840  * against concurrent entry through ath5k_init (which can happen
01841  * if another thread does a system call and the thread doing the
01842  * stop is preempted).
01843  */
01844 static int
01845 ath5k_stop_hw(struct ath5k_softc *sc)
01846 {
01847         int ret;
01848 
01849         mutex_lock(&sc->lock);
01850         ret = ath5k_stop_locked(sc);
01851         if (ret == 0 && !test_bit(ATH_STAT_INVALID, sc->status)) {
01852                 /*
01853                  * Don't set the card in full sleep mode!
01854                  *
01855                  * a) When the device is in this state it must be carefully
01856                  * woken up or references to registers in the PCI clock
01857                  * domain may freeze the bus (and system).  This varies
01858                  * by chip and is mostly an issue with newer parts
01859                  * (madwifi sources mentioned srev >= 0x78) that go to
01860                  * sleep more quickly.
01861                  *
01862                  * b) On older chips full sleep results a weird behaviour
01863                  * during wakeup. I tested various cards with srev < 0x78
01864                  * and they don't wake up after module reload, a second
01865                  * module reload is needed to bring the card up again.
01866                  *
01867                  * Until we figure out what's going on don't enable
01868                  * full chip reset on any chip (this is what Legacy HAL
01869                  * and Sam's HAL do anyway). Instead Perform a full reset
01870                  * on the device (same as initial state after attach) and
01871                  * leave it idle (keep MAC/BB on warm reset) */
01872                 ret = ath5k_hw_on_hold(sc->ah);
01873 
01874                 ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
01875                                 "putting device to sleep\n");
01876         }
01877 
01878         mmiowb();
01879         mutex_unlock(&sc->lock);
01880 
01881         tasklet_kill(&sc->restq);
01882 //      tasklet_kill(&sc->calib);
01883 
01884         return ret;
01885 }
01886 
01887 static irqreturn_t
01888 ath5k_intr(int irq, void *dev_id)
01889 {
01890         struct net_device *netdev = (struct net_device *)dev_id;
01891         struct ath5k_softc *sc = netdev_priv(netdev);
01892         struct ath5k_hw *ah = sc->ah;
01893         enum ath5k_int status;
01894         unsigned int counter = 1000;
01895 
01896         if (unlikely(test_bit(ATH_STAT_INVALID, sc->status) ||
01897                                 !ath5k_hw_is_intr_pending(ah)))
01898                 return IRQ_NONE;
01899 
01900         do {
01901                 ath5k_hw_get_isr(ah, &status);          /* NB: clears IRQ too */
01902                 ATH5K_DBG(sc, ATH5K_DEBUG_INTR, "status 0x%x/0x%x\n",
01903                                 status, sc->imask);
01904                 if (unlikely(status & AR5K_INT_FATAL)) {
01905                         /*
01906                          * Fatal errors are unrecoverable.
01907                          * Typically these are caused by DMA errors.
01908                          */
01909                         ATH5K_ERR(sc, "Fatal error\n");
01910                         tasklet_schedule(&sc->restq);
01911                 } else if (unlikely(status & AR5K_INT_RXORN)) {
01912                         ATH5K_ERR(sc, "Fatal error\n");
01913                         tasklet_schedule(&sc->restq);
01914                 } else {
01915                         if (status & AR5K_INT_RXEOL) {
01916                                 /*
01917                                 * NB: the hardware should re-read the link when
01918                                 *     RXE bit is written, but it doesn't work at
01919                                 *     least on older hardware revs.
01920                                 */
01921                                 sc->rxlink = NULL;
01922                         }
01923                         if (status & AR5K_INT_TXURN) {
01924                                 /* bump tx trigger level */
01925                                 ath5k_hw_update_tx_triglevel(ah, true);
01926                         }
01927                         if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR))
01928                                 ath5k_rx_done(sc);
01929                         if (status & (AR5K_INT_TXOK | AR5K_INT_TXDESC
01930                                         | AR5K_INT_TXERR | AR5K_INT_TXEOL))
01931                                 ath5k_tx_done(sc, sc->txq);
01932 //                      if (status & AR5K_INT_SWI)
01933 //                              tasklet_schedule(&sc->calib);
01934                         if (status & AR5K_INT_MIB) {
01935                                 /*
01936                                  * These stats are also used for ANI i think
01937                                  * so how about updating them more often ?
01938                                  */
01939                                 ath5k_hw_update_mib_counters(ah, &sc->ll_stats);
01940                         }
01941                 }
01942         } while (ath5k_hw_is_intr_pending(ah) && --counter > 0);
01943 
01944         if (unlikely(!counter))
01945                 ATH5K_WARN(sc, "too many interrupts, giving up for now\n");
01946 
01947         ath5k_hw_calibration_poll(ah);
01948 
01949         return IRQ_HANDLED;
01950 }
01951 
01952 static void
01953 ath5k_tasklet_reset(unsigned long data)
01954 {
01955         struct ath5k_softc *sc = (void *)data;
01956 
01957         ath5k_reset_wake(sc);
01958 }
01959 
01960 /*
01961  * Periodically recalibrate the PHY to account
01962  * for temperature/environment changes.
01963  */
01964 #if 0
01965 static void
01966 ath5k_tasklet_calibrate(unsigned long data)
01967 {
01968         struct ath5k_softc *sc = (void *)data;
01969         struct ath5k_hw *ah = sc->ah;
01970 
01971         /* Only full calibration for now */
01972         if (ah->ah_swi_mask != AR5K_SWI_FULL_CALIBRATION)
01973                 return;
01974         /* Stop queues so that calibration
01975          * doesn't interfere with tx */
01976         netif_stop_queue(sc->netdev);
01977 
01978         ATH5K_DBG(sc, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n",
01979                 ath5k_frequency_to_channel(sc->curchan->center_freq),
01980                 sc->curchan->hw_value);
01981 
01982         if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) {
01983                 /*
01984                  * Rfgain is out of bounds, reset the chip
01985                  * to load new gain values.
01986                  */
01987                 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "calibration, resetting\n");
01988                 ath5k_reset_wake(sc);
01989         }
01990         if (ath5k_hw_phy_calibrate(ah, sc->curchan))
01991                 ATH5K_ERR(sc, "calibration of channel %u failed\n",
01992                         ath5k_frequency_to_channel(
01993                                 sc->curchan->center_freq));
01994 
01995         ah->ah_swi_mask = 0;
01996 
01997         /* Wake queues */
01998         netif_wake_queue(sc->netdev);
01999 }
02000 #endif
02001 
02002 
02003 /**********************\
02004 * Interface functions *
02005 \**********************/
02006 static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
02007 static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
02008 
02009 static int ieee80211_copy_snap(u8 * data, unsigned short h_proto)
02010 {
02011         struct ieee80211_snap_hdr *snap;
02012         u8 *oui;
02013 
02014         snap = (struct ieee80211_snap_hdr *)data;
02015         snap->dsap = 0xaa;
02016         snap->ssap = 0xaa;
02017         snap->ctrl = 0x03;
02018 
02019         if (h_proto == htons(ETH_P_AARP) || h_proto == htons(ETH_P_IPX))
02020                 oui = P802_1H_OUI;
02021         else
02022                 oui = RFC1042_OUI;
02023         snap->oui[0] = oui[0];
02024         snap->oui[1] = oui[1];
02025         snap->oui[2] = oui[2];
02026 
02027         memcpy(data + SNAP_SIZE, &h_proto, sizeof(u16));
02028 
02029         return SNAP_SIZE + sizeof(u16);
02030 }
02031 
02032 /*
02033  * Contexto? UC?
02034  *
02035  * Puede ocurrir:
02036  * - Nos expulse una hardIRQ -> spin_lock_irq (peor caso)
02037  * - Nos expulse una softIRQ -> spin_lock_bh
02038  * - Nos expulsen del uP     -> spin_lock
02039  * - En otra CPU se este ejecutando algo -> spin_lock
02040  */
02041 static int
02042 ath5k_tx(struct sk_buff *skb, struct net_device *netdev)
02043 {
02044         struct ath5k_softc *sc = netdev_priv(netdev);
02045         struct ath5k_buf *bf;
02046 //      unsigned long flags;
02047         int hdrlen;
02048         int padsize;
02049 
02050         struct sk_buff *skb_new;
02051         unsigned short ethertype;
02052         struct ieee80211_hdr header;
02053         int frame_control;
02054         unsigned char dst[ETH_ALEN];
02055         unsigned char src[ETH_ALEN];
02056 
02057         /* Save source and destination addresses and ethertype */
02058         skb_copy_from_linear_data(skb, dst, ETH_ALEN);
02059         skb_copy_from_linear_data_offset(skb, ETH_ALEN, src, ETH_ALEN);
02060         ethertype = ((struct ethhdr *)skb->data)->h_proto;
02061 
02062         /* In ADHOC mode, no From/To DS, Addr1 = DA, Addr2 = SA, Addr3 = BSSID */
02063         memcpy(header.addr1, dst, ETH_ALEN);
02064         memcpy(header.addr2, src, ETH_ALEN);
02065         memcpy(header.addr3, sc->ah->ah_bssid, ETH_ALEN);
02066         
02067         if (use_beacon_frames){
02068                 frame_control = IEEE80211_STYPE_BEACON | IEEE80211_STYPE_DATA;
02069         } else{
02070                 frame_control = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA;
02071         }
02072 
02073         header.frame_control = cpu_to_le16(frame_control);
02074 
02075         //DANI XXX:
02076         header.duration_id = 0;
02077 
02078         hdrlen = 24;
02079 
02080         /* Advance the SKB to the start of the payload */
02081         skb_pull(skb, sizeof(struct ethhdr));
02082 
02083         /* Allocate new skb */
02084         skb_new = dev_alloc_skb(hdrlen + SNAP_SIZE + sizeof(u16) + skb->len);
02085         if (!skb_new)
02086         {
02087                 ATH5K_ERR(sc, "can't alloc skbuff of size %u\n", 
02088                                 hdrlen + SNAP_SIZE + sizeof(u16) + skb->len);
02089                 return -ENOMEM;
02090         }
02091         memset(skb_new->data, 0, skb_new->len);
02092 
02093         /* Copy header */
02094         memcpy(skb_put(skb_new, hdrlen), &header, hdrlen);
02095 
02096         /* Copy SNAP and ethertype */
02097         ieee80211_copy_snap(skb_put(skb_new, SNAP_SIZE + sizeof(u16)), ethertype);
02098 
02099         /* Copy payload */
02100         skb_copy_from_linear_data(skb, skb_put(skb_new, skb->len), skb->len);
02101         
02102         /* Free skb */
02103         dev_kfree_skb_any(skb);
02104         skb = skb_new;
02105 
02106         ath5k_debug_dump_skb(sc, skb, "TX  ", 1);
02107         
02108         /*
02109          * the hardware expects the header padded to 4 byte boundaries
02110          * if this is not the case we add the padding after the header
02111          */
02112         hdrlen = ath5k_get_hdrlen_from_skb(skb);
02113         padsize = ath5k_pad_size(hdrlen);
02114         if (padsize) {
02115                 if (skb_headroom(skb) < padsize) {
02116                         ATH5K_ERR(sc, "tx hdrlen not %%4: %d not enough"
02117                                   " headroom to pad %d\n", hdrlen, padsize);
02118                         goto drop_packet;
02119                 }
02120                 skb_push(skb, padsize);
02121                 memmove(skb->data, skb->data+padsize, hdrlen);
02122         }
02123 
02124         spin_lock_irq(&sc->txbuflock);
02125         if (list_empty(&sc->txbuf)) {
02126                 ATH5K_ERR(sc, "no further txbuf available, dropping packet\n");
02127                 spin_unlock_irq(&sc->txbuflock);
02128                 netif_stop_queue(sc->netdev);
02129                 goto drop_packet;
02130         }
02131         bf = list_first_entry(&sc->txbuf, struct ath5k_buf, list);
02132         list_del(&bf->list);
02133         sc->txbuf_len--;
02134         if (list_empty(&sc->txbuf))
02135                 netif_stop_queue(sc->netdev);
02136         spin_unlock_irq(&sc->txbuflock);
02137 
02138         bf->skb = skb;
02139 
02140         if (ath5k_txbuf_setup(sc, bf)) {
02141                 bf->skb = NULL;
02142                 spin_lock_irq(&sc->txbuflock);
02143                 list_add_tail(&bf->list, &sc->txbuf);
02144                 sc->txbuf_len++;
02145                 spin_unlock_irq(&sc->txbuflock);
02146                 goto drop_packet;
02147         }
02148         return NETDEV_TX_OK;
02149 
02150 drop_packet:
02151         dev_kfree_skb_any(skb);
02152         return NETDEV_TX_OK;
02153 }
02154 
02155 /*
02156  * Reset the hardware.  If chan is not NULL, then also pause rx/tx
02157  * and change to the given channel.
02158  */
02159 static int
02160 ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan)
02161 {
02162         struct ath5k_hw *ah = sc->ah;
02163         int ret;
02164 
02165         ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "resetting\n");
02166 
02167         if (chan) {
02168                 ath5k_hw_set_imr(ah, 0);
02169                 ath5k_txq_cleanup(sc);
02170                 ath5k_rx_stop(sc);
02171 
02172                 sc->curchan = chan;
02173                 sc->curband = &sc->sbands[chan->band];
02174         }
02175         ret = ath5k_hw_reset(ah, sc->curchan, true);
02176         if (ret) {
02177                 ATH5K_ERR(sc, "can't reset hardware (%d)\n", ret);
02178                 goto err;
02179         }
02180 
02181         ret = ath5k_rx_start(sc);
02182         if (ret) {
02183                 ATH5K_ERR(sc, "can't start recv logic\n");
02184                 goto err;
02185         }
02186 
02187         /* Enable interrupts */
02188         ath5k_hw_set_imr(ah, sc->imask);
02189 
02190         return 0;
02191 err:
02192         return ret;
02193 }
02194 
02195 static int
02196 ath5k_reset_wake(struct ath5k_softc *sc)
02197 {
02198         int ret;
02199 
02200         ret = ath5k_reset(sc, sc->curchan);
02201         if (!ret)
02202                 netif_wake_queue(sc->netdev);
02203 
02204         return ret;
02205 }
02206 
02207 static int ath5k_start(struct ath5k_softc *sc)
02208 {
02209         return ath5k_init(sc);
02210 }
02211 
02212 static void ath5k_stop(struct ath5k_softc *sc)
02213 {
02214         ath5k_stop_hw(sc);
02215 }
02216 
02217 static void ath5k_config_filter(struct ath5k_softc *sc, bool broadcast, bool control, bool promisc)
02218 {
02219         struct ath5k_hw *ah = sc->ah;
02220         unsigned int rfilt;
02221 
02222         rfilt = 0;
02223 
02224         /* Always enable Unicast */
02225         rfilt |= AR5K_RX_FILTER_UCAST;
02226 
02227         if (broadcast)
02228                 rfilt |= AR5K_RX_FILTER_BCAST;
02229 
02230         /* Permitir tramas de control? */
02231         if (control)
02232                 rfilt |= AR5K_RX_FILTER_CONTROL;
02233 
02234         /* Modo promiscuo? */
02235         if (promisc)
02236         {
02237                 rfilt |= AR5K_RX_FILTER_PROM;
02238                 __set_bit (ATH_STAT_PROMISC, sc->status);
02239         }
02240         else
02241                 __clear_bit (ATH_STAT_PROMISC, sc->status);
02242 
02243         /* Set filters */
02244         ath5k_hw_set_rx_filter(ah, rfilt);
02245 
02246         /* Set multicast bits */
02247         ath5k_hw_set_mcast_filter(ah, 0, 0);
02248 
02249         /* Set the cached hw filter flags, this will alter actually be set in HW */
02250         sc->filter_flags = rfilt;
02251 }
02252 
02253 static struct ieee80211_channel * ath5k_get_channel(struct ath5k_softc *sc, unsigned int freq)
02254 {
02255         struct ieee80211_channel *aux;
02256         int i, n_channels;
02257 
02258         aux = NULL;
02259         if (freq < 3000)
02260         {
02261                 aux = sc->sbands[IEEE80211_BAND_2GHZ].channels;
02262                 n_channels = sc->sbands[IEEE80211_BAND_2GHZ].n_channels;
02263         }
02264         else if (sc->sbands[IEEE80211_BAND_5GHZ].channels != NULL)
02265         {
02266                 aux = sc->sbands[IEEE80211_BAND_5GHZ].channels;
02267                 n_channels = sc->sbands[IEEE80211_BAND_5GHZ].n_channels;
02268         }
02269 
02270         if (aux != NULL)
02271         {
02272                 for (i = 0; i < n_channels; i++, aux++)
02273                         if (aux->center_freq == freq)                   
02274                                 break;
02275         }
02276 
02277         return aux;
02278 }
02279 
02280 static int ath5k_get_rate_idx(struct ath5k_softc *sc, enum ieee80211_band band, enum rates rate)
02281 {
02282         struct ieee80211_rate *r;
02283         int i;
02284         bool valid = false;
02285 
02286         r = NULL;
02287 
02288         r = sc->sbands[band].bitrates;
02289 
02290         if (r != NULL)
02291                 for (i = 0; i < sc->curband->n_bitrates; i++, r++)
02292                 {
02293                         if (r->bitrate == rate)
02294                         {                       
02295                                 valid = true;
02296                                 break;
02297                         }
02298                 }
02299         if (valid)
02300                 return i;
02301         else
02302                 return -1;
02303 }
02304 
02305 static int ath5k_config_tx_control (struct ath5k_softc *sc, 
02306                 unsigned char count, bool wait_for_ack, bool use_short_preamble)
02307 {
02308         struct ieee80211_rate *rate;
02309 
02310         /* Clear flags */
02311         sc->tx_info.wait_for_ack = false;
02312         sc->tx_info.use_short_preamble = false;
02313 
02314         /* Set tx count if ack not received */
02315         sc->tx_info.count = count;
02316 
02317         /* Wait for ack flag */
02318         if (wait_for_ack)
02319                 sc->tx_info.wait_for_ack = true;
02320 
02321         /* Set rate code flags (short preamble) */
02322         rate = &sc->curband->bitrates[sc->tx_info.rate_idx];
02323         if (use_short_preamble && (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE))
02324                 sc->tx_info.use_short_preamble = true;
02325         
02326         return 0;
02327 }
02328 
02329 static int ath5k_config_disable_ack (struct ath5k_softc *sc, bool disable)
02330 {
02331         struct ath5k_hw *ah;
02332    
02333         if (sc == NULL)
02334                 return 1;       
02335         else
02336                 ah = sc->ah;
02337 
02338         if (disable)
02339                 AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_ACK);
02340         else
02341                 AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_ACK);
02342 
02343         return 0;
02344 }
02345 
02346 /*
02347  * Buscar por sc->channels y tal
02348  */
02349 static int ath5k_config (struct ath5k_softc *sc, 
02350                 unsigned short channel, 
02351                 enum rates rate, 
02352                 unsigned char tx_power_dbm,
02353                 enum ath5k_ant_mode antenna_mode)
02354 {
02355         struct ath5k_hw *ah = sc->ah;
02356         struct ieee80211_channel *target_channel;
02357         int rate_idx;
02358         int ret = 0;
02359 
02360         mutex_lock(&sc->lock);
02361 
02362         /* Encontrar el canal */
02363         target_channel = ath5k_get_channel(sc, channel);
02364         if (target_channel == NULL)
02365         {
02366                 ATH5K_ERR(sc, "Invalid channel\n");
02367                 ret = -1;
02368                 goto unlock;
02369         }
02370 
02371         /* Encontrar el bitrate */
02372         rate_idx = ath5k_get_rate_idx(sc, target_channel->band, rate);
02373         if (rate_idx < 0 || rate_idx > AR5K_MAX_RATES)
02374         {
02375                 ATH5K_ERR(sc, "Invalid rate\n");
02376                 ret = -1;
02377                 goto unlock;
02378         }
02379 
02380         /* Configurar el rate */
02381         sc->tx_info.band = sc->curband->band;
02382         sc->tx_info.rate_idx = rate_idx;
02383         sc->tx_info.count = 1;
02384         sc->tx_info.wait_for_ack = false;
02385         sc->tx_info.use_short_preamble = false;
02386 
02387         if (sc->power_level != tx_power_dbm)
02388         {
02389 
02390 
02391                 /* Half dB steps */
02392                 //DANI
02393                 if (ath5k_hw_set_txpower_limit(ah, (tx_power_dbm * 2)) == 0){
02394                         sc->power_level = tx_power_dbm;
02395                 }else{
02396                         ATH5K_ERR(sc, "Invalid power\n");
02397                         ret = -1;
02398                         goto unlock;
02399                 }
02400         }
02401 
02402         ath5k_hw_set_antenna_mode(ah, antenna_mode);
02403 
02404         /* Hacer un reset para aplicar los cambios */
02405         ath5k_reset(sc, target_channel);
02406 
02407         ATH5K_INFO(sc, "Freq %d Mhz, Rate (%d * 100) kps, Power %d dBm, Ant. mode %d\n", 
02408                         channel, rate, tx_power_dbm, antenna_mode);
02409 
02410 unlock:
02411         mutex_unlock(&sc->lock);
02412 
02413         return ret;
02414 }
02415 
02416 static int ath5k_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
02417 {
02418         struct ath5k_softc *sc;
02419         union ath5k_ioctl_info info;
02420         int ret;
02421         
02422         /* Recuperar la estructura principal */
02423         sc = netdev_priv(netdev);
02424 
02425         ret = 0;
02426         switch(cmd)
02427         {
02428                 case SIO_SEND_PACKET:
02429                         ATH5K_ERR(sc, "SIO_SEND_PACKET not implemented yet.\n");
02430                         ret = -1;
02431                         break;
02432 
02433                 case SIO_RECV_PACKET:
02434                         ATH5K_ERR(sc, "SIO_RECV_PACKET not implemented yet.\n");
02435                         ret = -1;
02436                         break;
02437 
02438                 case SIO_SET_CONFIG:
02439                         ret = copy_from_user(&info.config_info, 
02440                                         (struct ath5k_config_info *)ifr->ifr_data, 
02441                                         sizeof(struct ath5k_config_info));
02442                         if (ret)
02443                                 return ret;
02444 
02445                         ret = ath5k_config(sc, info.config_info.frequency, info.config_info.rate, info.config_info.tx_power_dbm,
02446                                         info.config_info.antenna_mode);
02447 
02448                         if (ret)
02449                                 return ret;
02450 
02451                         break;
02452 
02453                 case SIO_SET_DEBUG:
02454                         ret = copy_from_user(&info.debug_level,
02455                                         (unsigned int *)ifr->ifr_data, 
02456                                         sizeof(int));
02457                         if (ret)
02458                                 return ret;
02459 
02460                         sc->debug_level = info.debug_level;
02461 
02462                         break;
02463 
02464                 case SIO_SET_RXFILTER:
02465                         ret = copy_from_user(&info.rxfilter_info,
02466                                         (struct ath5k_rxfilter_info *)ifr->ifr_data,
02467                                         sizeof(struct ath5k_rxfilter_info));
02468 
02469                         if (ret)
02470                                 return ret;
02471 
02472                         ath5k_config_filter(sc, info.rxfilter_info.broadcast,
02473                                                                 info.rxfilter_info.control,
02474                                                                         info.rxfilter_info.promisc);
02475 
02476                         break;
02477 
02478                 case SIO_SET_TXCONTROL:
02479                         ret = copy_from_user(&info.txcontrol_info,
02480                                         (struct ath5k_txcontrol_info *)ifr->ifr_data,
02481                                         sizeof(struct ath5k_txcontrol_info));
02482 
02483                         if (ret)
02484                                 return ret;
02485 
02486                         ret = ath5k_config_tx_control(sc, info.txcontrol_info.count,
02487                                                                                           info.txcontrol_info.wait_for_ack,
02488                                                                                           info.txcontrol_info.use_short_preamble);
02489 
02490                         break;
02491 
02492 
02493                 case SIO_SET_DISABLEACK:
02494                         ret = copy_from_user(&info.disable_ack,
02495                                         (bool *)ifr->ifr_data,
02496                                         sizeof(bool));
02497 
02498                         if (ret)
02499                                 return ret;
02500 
02501                         ret = ath5k_config_disable_ack(sc, info.disable_ack);
02502 
02503                         break;
02504 
02505                 case SIO_SET_BSSID:
02506                         ATH5K_ERR(sc, "SIO_SET_BSSID not implemented yet\n");
02507                         ret = -1;
02508                         break;
02509 
02510                 case SIO_SET_BSSIDFILTER:
02511                         ATH5K_ERR(sc, "SIO_SET_BSSIDFILTER not implemented yet\n");
02512                         ret = -1;
02513                         break;
02514                 case SIO_SET_USE_BEACON_FRAMES:
02515                         ret = copy_from_user(&use_beacon_frames, (int *) ifr->ifr_data, sizeof(int));
02516 
02517                         if (ret)
02518                                 return ret;
02519 
02520                 break;
02521 
02522                         /*              case SIO_SET_POWER:
02523                         int val_dbm = 0;
02524                         ret = copy_from_user(&val_dbm,
02525                                                                 (int *)ifr->ifr_data,
02526                                                                 sizeof(int));
02527                         sc->power_level = val_dbm * 2;
02528                         break;*/
02529         }               
02530         return ret;
02531 }
02532 


ros_rt_wmp
Author(s): Danilo Tardioli, dantard@unizar.es
autogenerated on Fri Jan 3 2014 12:07:54