dma.c
Go to the documentation of this file.
00001 /*
00002  * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
00003  * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
00004  *
00005  * Permission to use, copy, modify, and distribute this software for any
00006  * purpose with or without fee is hereby granted, provided that the above
00007  * copyright notice and this permission notice appear in all copies.
00008  *
00009  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
00010  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
00011  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
00012  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
00013  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
00014  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
00015  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
00016  *
00017  */
00018 
00019 /*************************************\
00020 * DMA and interrupt masking functions *
00021 \*************************************/
00022 
00023 /*
00024  * dma.c - DMA and interrupt masking functions
00025  *
00026  * Here we setup descriptor pointers (rxdp/txdp) start/stop dma engine and
00027  * handle queue setup for 5210 chipset (rest are handled on qcu.c).
00028  * Also we setup interrupt mask register (IMR) and read the various iterrupt
00029  * status registers (ISR).
00030  *
00031  * TODO: Handle SISR on 5211+ and introduce a function to return the queue
00032  * number that resulted the interrupt.
00033  */
00034 
00035 #include "ath5k.h"
00036 #include "reg.h"
00037 #include "debug.h"
00038 #include "base.h"
00039 
00040 /*********\
00041 * Receive *
00042 \*********/
00043 
00049 void ath5k_hw_start_rx_dma(struct ath5k_hw *ah)
00050 {
00051         ATH5K_TRACE(ah->ah_sc);
00052         ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR);
00053         ath5k_hw_reg_read(ah, AR5K_CR);
00054 }
00055 
00061 int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
00062 {
00063         unsigned int i;
00064 
00065         ATH5K_TRACE(ah->ah_sc);
00066         ath5k_hw_reg_write(ah, AR5K_CR_RXD, AR5K_CR);
00067 
00068         /*
00069          * It may take some time to disable the DMA receive unit
00070          */
00071         for (i = 1000; i > 0 &&
00072                         (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) != 0;
00073                         i--)
00074                 udelay(10);
00075 
00076         return i ? 0 : -EBUSY;
00077 }
00078 
00084 u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah)
00085 {
00086         return ath5k_hw_reg_read(ah, AR5K_RXDP);
00087 }
00088 
00097 void ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
00098 {
00099         ATH5K_TRACE(ah->ah_sc);
00100 
00101         ath5k_hw_reg_write(ah, phys_addr, AR5K_RXDP);
00102 }
00103 
00104 
00105 /**********\
00106 * Transmit *
00107 \**********/
00108 
00124 int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
00125 {
00126         u32 tx_queue;
00127 
00128         ATH5K_TRACE(ah->ah_sc);
00129         AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
00130 
00131         /* Return if queue is declared inactive */
00132         if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
00133                 return -EIO;
00134 
00135         if (ah->ah_version == AR5K_AR5210) {
00136                 tx_queue = ath5k_hw_reg_read(ah, AR5K_CR);
00137 
00138                 /*
00139                  * Set the queue by type on 5210
00140                  */
00141                 switch (ah->ah_txq[queue].tqi_type) {
00142                 case AR5K_TX_QUEUE_DATA:
00143                         tx_queue |= AR5K_CR_TXE0 & ~AR5K_CR_TXD0;
00144                         break;
00145                 case AR5K_TX_QUEUE_BEACON:
00146                         tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1;
00147                         ath5k_hw_reg_write(ah, AR5K_BCR_TQ1V | AR5K_BCR_BDMAE,
00148                                         AR5K_BSR);
00149                         break;
00150                 case AR5K_TX_QUEUE_CAB:
00151                         tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1;
00152                         ath5k_hw_reg_write(ah, AR5K_BCR_TQ1FV | AR5K_BCR_TQ1V |
00153                                 AR5K_BCR_BDMAE, AR5K_BSR);
00154                         break;
00155                 default:
00156                         return -EINVAL;
00157                 }
00158                 /* Start queue */
00159                 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
00160                 ath5k_hw_reg_read(ah, AR5K_CR);
00161         } else {
00162                 /* Return if queue is disabled */
00163                 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXD, queue))
00164                         return -EIO;
00165 
00166                 /* Start queue */
00167                 AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXE, queue);
00168         }
00169 
00170         return 0;
00171 }
00172 
00184 int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
00185 {
00186         unsigned int i = 40;
00187         u32 tx_queue, pending;
00188 
00189         ATH5K_TRACE(ah->ah_sc);
00190         AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
00191 
00192         /* Return if queue is declared inactive */
00193         if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
00194                 return -EIO;
00195 
00196         if (ah->ah_version == AR5K_AR5210) {
00197                 tx_queue = ath5k_hw_reg_read(ah, AR5K_CR);
00198 
00199                 /*
00200                  * Set by queue type
00201                  */
00202                 switch (ah->ah_txq[queue].tqi_type) {
00203                 case AR5K_TX_QUEUE_DATA:
00204                         tx_queue |= AR5K_CR_TXD0 & ~AR5K_CR_TXE0;
00205                         break;
00206                 case AR5K_TX_QUEUE_BEACON:
00207                 case AR5K_TX_QUEUE_CAB:
00208                         /* XXX Fix me... */
00209                         tx_queue |= AR5K_CR_TXD1 & ~AR5K_CR_TXD1;
00210                         ath5k_hw_reg_write(ah, 0, AR5K_BSR);
00211                         break;
00212                 default:
00213                         return -EINVAL;
00214                 }
00215 
00216                 /* Stop queue */
00217                 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
00218                 ath5k_hw_reg_read(ah, AR5K_CR);
00219         } else {
00220                 /*
00221                  * Schedule TX disable and wait until queue is empty
00222                  */
00223                 AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXD, queue);
00224 
00225                 /*Check for pending frames*/
00226                 do {
00227                         pending = ath5k_hw_reg_read(ah,
00228                                 AR5K_QUEUE_STATUS(queue)) &
00229                                 AR5K_QCU_STS_FRMPENDCNT;
00230                         udelay(100);
00231                 } while (--i && pending);
00232 
00233                 /* For 2413+ order PCU to drop packets using
00234                  * QUIET mechanism */
00235                 if (ah->ah_mac_version >= (AR5K_SREV_AR2414 >> 4) &&
00236                 pending){
00237                         /* Set periodicity and duration */
00238                         ath5k_hw_reg_write(ah,
00239                                 AR5K_REG_SM(100, AR5K_QUIET_CTL2_QT_PER)|
00240                                 AR5K_REG_SM(10, AR5K_QUIET_CTL2_QT_DUR),
00241                                 AR5K_QUIET_CTL2);
00242 
00243                         /* Enable quiet period for current TSF */
00244                         ath5k_hw_reg_write(ah,
00245                                 AR5K_QUIET_CTL1_QT_EN |
00246                                 AR5K_REG_SM(ath5k_hw_reg_read(ah,
00247                                                 AR5K_TSF_L32_5211) >> 10,
00248                                                 AR5K_QUIET_CTL1_NEXT_QT_TSF),
00249                                 AR5K_QUIET_CTL1);
00250 
00251                         /* Force channel idle high */
00252                         AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW_5211,
00253                                         AR5K_DIAG_SW_CHANEL_IDLE_HIGH);
00254 
00255                         /* Wait a while and disable mechanism */
00256                         udelay(200);
00257                         AR5K_REG_DISABLE_BITS(ah, AR5K_QUIET_CTL1,
00258                                                 AR5K_QUIET_CTL1_QT_EN);
00259 
00260                         /* Re-check for pending frames */
00261                         i = 40;
00262                         do {
00263                                 pending = ath5k_hw_reg_read(ah,
00264                                         AR5K_QUEUE_STATUS(queue)) &
00265                                         AR5K_QCU_STS_FRMPENDCNT;
00266                                 udelay(100);
00267                         } while (--i && pending);
00268 
00269                         AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW_5211,
00270                                         AR5K_DIAG_SW_CHANEL_IDLE_HIGH);
00271                 }
00272 
00273                 /* Clear register */
00274                 ath5k_hw_reg_write(ah, 0, AR5K_QCU_TXD);
00275                 if (pending)
00276                         return -EBUSY;
00277         }
00278 
00279         /* TODO: Check for success on 5210 else return error */
00280         return 0;
00281 }
00282 
00296 u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
00297 {
00298         u16 tx_reg;
00299 
00300         ATH5K_TRACE(ah->ah_sc);
00301         AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
00302 
00303         /*
00304          * Get the transmit queue descriptor pointer from the selected queue
00305          */
00306         /*5210 doesn't have QCU*/
00307         if (ah->ah_version == AR5K_AR5210) {
00308                 switch (ah->ah_txq[queue].tqi_type) {
00309                 case AR5K_TX_QUEUE_DATA:
00310                         tx_reg = AR5K_NOQCU_TXDP0;
00311                         break;
00312                 case AR5K_TX_QUEUE_BEACON:
00313                 case AR5K_TX_QUEUE_CAB:
00314                         tx_reg = AR5K_NOQCU_TXDP1;
00315                         break;
00316                 default:
00317                         return 0xffffffff;
00318                 }
00319         } else {
00320                 tx_reg = AR5K_QUEUE_TXDP(queue);
00321         }
00322 
00323         return ath5k_hw_reg_read(ah, tx_reg);
00324 }
00325 
00339 int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
00340 {
00341         u16 tx_reg;
00342 
00343         ATH5K_TRACE(ah->ah_sc);
00344         AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
00345 
00346         /*
00347          * Set the transmit queue descriptor pointer register by type
00348          * on 5210
00349          */
00350         if (ah->ah_version == AR5K_AR5210) {
00351                 switch (ah->ah_txq[queue].tqi_type) {
00352                 case AR5K_TX_QUEUE_DATA:
00353                         tx_reg = AR5K_NOQCU_TXDP0;
00354                         break;
00355                 case AR5K_TX_QUEUE_BEACON:
00356                 case AR5K_TX_QUEUE_CAB:
00357                         tx_reg = AR5K_NOQCU_TXDP1;
00358                         break;
00359                 default:
00360                         return -EINVAL;
00361                 }
00362         } else {
00363                 /*
00364                  * Set the transmit queue descriptor pointer for
00365                  * the selected queue on QCU for 5211+
00366                  * (this won't work if the queue is still active)
00367                  */
00368                 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
00369                         return -EIO;
00370 
00371                 tx_reg = AR5K_QUEUE_TXDP(queue);
00372         }
00373 
00374         /* Set descriptor pointer */
00375         ath5k_hw_reg_write(ah, phys_addr, tx_reg);
00376 
00377         return 0;
00378 }
00379 
00398 int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase)
00399 {
00400         u32 trigger_level, imr;
00401         int ret = -EIO;
00402 
00403         ATH5K_TRACE(ah->ah_sc);
00404 
00405         /*
00406          * Disable interrupts by setting the mask
00407          */
00408         imr = ath5k_hw_set_imr(ah, ah->ah_imr & ~AR5K_INT_GLOBAL);
00409 
00410         trigger_level = AR5K_REG_MS(ath5k_hw_reg_read(ah, AR5K_TXCFG),
00411                         AR5K_TXCFG_TXFULL);
00412 
00413         if (!increase) {
00414                 if (--trigger_level < AR5K_TUNE_MIN_TX_FIFO_THRES)
00415                         goto done;
00416         } else
00417                 trigger_level +=
00418                         ((AR5K_TUNE_MAX_TX_FIFO_THRES - trigger_level) / 2);
00419 
00420         /*
00421          * Update trigger level on success
00422          */
00423         if (ah->ah_version == AR5K_AR5210)
00424                 ath5k_hw_reg_write(ah, trigger_level, AR5K_TRIG_LVL);
00425         else
00426                 AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG,
00427                                 AR5K_TXCFG_TXFULL, trigger_level);
00428 
00429         ret = 0;
00430 
00431 done:
00432         /*
00433          * Restore interrupt mask
00434          */
00435         ath5k_hw_set_imr(ah, imr);
00436 
00437         return ret;
00438 }
00439 
00440 /*******************\
00441 * Interrupt masking *
00442 \*******************/
00443 
00452 bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
00453 {
00454         ATH5K_TRACE(ah->ah_sc);
00455         return ath5k_hw_reg_read(ah, AR5K_INTPEND) == 1 ? 1 : 0;
00456 }
00457 
00474 int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
00475 {
00476         u32 data;
00477 
00478         ATH5K_TRACE(ah->ah_sc);
00479 
00480         /*
00481          * Read interrupt status from the Interrupt Status register
00482          * on 5210
00483          */
00484         if (ah->ah_version == AR5K_AR5210) {
00485                 data = ath5k_hw_reg_read(ah, AR5K_ISR);
00486                 if (unlikely(data == AR5K_INT_NOCARD)) {
00487                         *interrupt_mask = data;
00488                         return -ENODEV;
00489                 }
00490         } else {
00491                 /*
00492                  * Read interrupt status from Interrupt
00493                  * Status Register shadow copy (Read And Clear)
00494                  *
00495                  * Note: PISR/SISR Not available on 5210
00496                  */
00497                 data = ath5k_hw_reg_read(ah, AR5K_RAC_PISR);
00498                 if (unlikely(data == AR5K_INT_NOCARD)) {
00499                         *interrupt_mask = data;
00500                         return -ENODEV;
00501                 }
00502         }
00503 
00504         /*
00505          * Get abstract interrupt mask (driver-compatible)
00506          */
00507         *interrupt_mask = (data & AR5K_INT_COMMON) & ah->ah_imr;
00508 
00509         if (ah->ah_version != AR5K_AR5210) {
00510                 u32 sisr2 = ath5k_hw_reg_read(ah, AR5K_RAC_SISR2);
00511 
00512                 /*HIU = Host Interface Unit (PCI etc)*/
00513                 if (unlikely(data & (AR5K_ISR_HIUERR)))
00514                         *interrupt_mask |= AR5K_INT_FATAL;
00515 
00516                 /*Beacon Not Ready*/
00517                 if (unlikely(data & (AR5K_ISR_BNR)))
00518                         *interrupt_mask |= AR5K_INT_BNR;
00519 
00520                 if (unlikely(sisr2 & (AR5K_SISR2_SSERR |
00521                                         AR5K_SISR2_DPERR |
00522                                         AR5K_SISR2_MCABT)))
00523                         *interrupt_mask |= AR5K_INT_FATAL;
00524 
00525                 if (data & AR5K_ISR_TIM)
00526                         *interrupt_mask |= AR5K_INT_TIM;
00527 
00528                 if (data & AR5K_ISR_BCNMISC) {
00529                         if (sisr2 & AR5K_SISR2_TIM)
00530                                 *interrupt_mask |= AR5K_INT_TIM;
00531                         if (sisr2 & AR5K_SISR2_DTIM)
00532                                 *interrupt_mask |= AR5K_INT_DTIM;
00533                         if (sisr2 & AR5K_SISR2_DTIM_SYNC)
00534                                 *interrupt_mask |= AR5K_INT_DTIM_SYNC;
00535                         if (sisr2 & AR5K_SISR2_BCN_TIMEOUT)
00536                                 *interrupt_mask |= AR5K_INT_BCN_TIMEOUT;
00537                         if (sisr2 & AR5K_SISR2_CAB_TIMEOUT)
00538                                 *interrupt_mask |= AR5K_INT_CAB_TIMEOUT;
00539                 }
00540 
00541                 if (data & AR5K_ISR_RXDOPPLER)
00542                         *interrupt_mask |= AR5K_INT_RX_DOPPLER;
00543                 if (data & AR5K_ISR_QCBRORN) {
00544                         *interrupt_mask |= AR5K_INT_QCBRORN;
00545                         ah->ah_txq_isr |= AR5K_REG_MS(
00546                                         ath5k_hw_reg_read(ah, AR5K_RAC_SISR3),
00547                                         AR5K_SISR3_QCBRORN);
00548                 }
00549                 if (data & AR5K_ISR_QCBRURN) {
00550                         *interrupt_mask |= AR5K_INT_QCBRURN;
00551                         ah->ah_txq_isr |= AR5K_REG_MS(
00552                                         ath5k_hw_reg_read(ah, AR5K_RAC_SISR3),
00553                                         AR5K_SISR3_QCBRURN);
00554                 }
00555                 if (data & AR5K_ISR_QTRIG) {
00556                         *interrupt_mask |= AR5K_INT_QTRIG;
00557                         ah->ah_txq_isr |= AR5K_REG_MS(
00558                                         ath5k_hw_reg_read(ah, AR5K_RAC_SISR4),
00559                                         AR5K_SISR4_QTRIG);
00560                 }
00561 
00562                 if (data & AR5K_ISR_TXOK)
00563                         ah->ah_txq_isr |= AR5K_REG_MS(
00564                                         ath5k_hw_reg_read(ah, AR5K_RAC_SISR0),
00565                                         AR5K_SISR0_QCU_TXOK);
00566 
00567                 if (data & AR5K_ISR_TXDESC)
00568                         ah->ah_txq_isr |= AR5K_REG_MS(
00569                                         ath5k_hw_reg_read(ah, AR5K_RAC_SISR0),
00570                                         AR5K_SISR0_QCU_TXDESC);
00571 
00572                 if (data & AR5K_ISR_TXERR)
00573                         ah->ah_txq_isr |= AR5K_REG_MS(
00574                                         ath5k_hw_reg_read(ah, AR5K_RAC_SISR1),
00575                                         AR5K_SISR1_QCU_TXERR);
00576 
00577                 if (data & AR5K_ISR_TXEOL)
00578                         ah->ah_txq_isr |= AR5K_REG_MS(
00579                                         ath5k_hw_reg_read(ah, AR5K_RAC_SISR1),
00580                                         AR5K_SISR1_QCU_TXEOL);
00581 
00582                 if (data & AR5K_ISR_TXURN)
00583                         ah->ah_txq_isr |= AR5K_REG_MS(
00584                                         ath5k_hw_reg_read(ah, AR5K_RAC_SISR2),
00585                                         AR5K_SISR2_QCU_TXURN);
00586         } else {
00587                 if (unlikely(data & (AR5K_ISR_SSERR | AR5K_ISR_MCABT
00588                                 | AR5K_ISR_HIUERR | AR5K_ISR_DPERR)))
00589                         *interrupt_mask |= AR5K_INT_FATAL;
00590 
00591                 /*
00592                  * XXX: BMISS interrupts may occur after association.
00593                  * I found this on 5210 code but it needs testing. If this is
00594                  * true we should disable them before assoc and re-enable them
00595                  * after a successful assoc + some jiffies.
00596                         interrupt_mask &= ~AR5K_INT_BMISS;
00597                  */
00598         }
00599 
00600         /*
00601          * In case we didn't handle anything,
00602          * print the register value.
00603          */
00604         if (unlikely(*interrupt_mask == 0 && net_ratelimit()))
00605                 ATH5K_PRINTF("ISR: 0x%08x IMR: 0x%08x\n", data, ah->ah_imr);
00606 
00607         return 0;
00608 }
00609 
00620 enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
00621 {
00622         enum ath5k_int old_mask, int_mask;
00623 
00624         old_mask = ah->ah_imr;
00625 
00626         /*
00627          * Disable card interrupts to prevent any race conditions
00628          * (they will be re-enabled afterwards if AR5K_INT GLOBAL
00629          * is set again on the new mask).
00630          */
00631         if (old_mask & AR5K_INT_GLOBAL) {
00632                 ath5k_hw_reg_write(ah, AR5K_IER_DISABLE, AR5K_IER);
00633                 ath5k_hw_reg_read(ah, AR5K_IER);
00634         }
00635 
00636         /*
00637          * Add additional, chipset-dependent interrupt mask flags
00638          * and write them to the IMR (interrupt mask register).
00639          */
00640         int_mask = new_mask & AR5K_INT_COMMON;
00641 
00642         if (ah->ah_version != AR5K_AR5210) {
00643                 /* Preserve per queue TXURN interrupt mask */
00644                 u32 simr2 = ath5k_hw_reg_read(ah, AR5K_SIMR2)
00645                                 & AR5K_SIMR2_QCU_TXURN;
00646 
00647                 if (new_mask & AR5K_INT_FATAL) {
00648                         int_mask |= AR5K_IMR_HIUERR;
00649                         simr2 |= (AR5K_SIMR2_MCABT | AR5K_SIMR2_SSERR
00650                                 | AR5K_SIMR2_DPERR);
00651                 }
00652 
00653                 /*Beacon Not Ready*/
00654                 if (new_mask & AR5K_INT_BNR)
00655                         int_mask |= AR5K_INT_BNR;
00656 
00657                 if (new_mask & AR5K_INT_TIM)
00658                         int_mask |= AR5K_IMR_TIM;
00659 
00660                 if (new_mask & AR5K_INT_TIM)
00661                         simr2 |= AR5K_SISR2_TIM;
00662                 if (new_mask & AR5K_INT_DTIM)
00663                         simr2 |= AR5K_SISR2_DTIM;
00664                 if (new_mask & AR5K_INT_DTIM_SYNC)
00665                         simr2 |= AR5K_SISR2_DTIM_SYNC;
00666                 if (new_mask & AR5K_INT_BCN_TIMEOUT)
00667                         simr2 |= AR5K_SISR2_BCN_TIMEOUT;
00668                 if (new_mask & AR5K_INT_CAB_TIMEOUT)
00669                         simr2 |= AR5K_SISR2_CAB_TIMEOUT;
00670 
00671                 if (new_mask & AR5K_INT_RX_DOPPLER)
00672                         int_mask |= AR5K_IMR_RXDOPPLER;
00673 
00674                 /* Note: Per queue interrupt masks
00675                  * are set via reset_tx_queue (qcu.c) */
00676                 ath5k_hw_reg_write(ah, int_mask, AR5K_PIMR);
00677                 ath5k_hw_reg_write(ah, simr2, AR5K_SIMR2);
00678 
00679         } else {
00680                 if (new_mask & AR5K_INT_FATAL)
00681                         int_mask |= (AR5K_IMR_SSERR | AR5K_IMR_MCABT
00682                                 | AR5K_IMR_HIUERR | AR5K_IMR_DPERR);
00683 
00684                 ath5k_hw_reg_write(ah, int_mask, AR5K_IMR);
00685         }
00686 
00687         /* If RXNOFRM interrupt is masked disable it
00688          * by setting AR5K_RXNOFRM to zero */
00689         if (!(new_mask & AR5K_INT_RXNOFRM))
00690                 ath5k_hw_reg_write(ah, 0, AR5K_RXNOFRM);
00691 
00692         /* Store new interrupt mask */
00693         ah->ah_imr = new_mask;
00694 
00695         /* ..re-enable interrupts if AR5K_INT_GLOBAL is set */
00696         if (new_mask & AR5K_INT_GLOBAL) {
00697                 ath5k_hw_reg_write(ah, AR5K_IER_ENABLE, AR5K_IER);
00698                 ath5k_hw_reg_read(ah, AR5K_IER);
00699         }
00700 
00701         return old_mask;
00702 }
00703 


ros_rt_wmp
Author(s): Danilo Tardioli, dantard@unizar.es
autogenerated on Fri Jan 3 2014 12:07:54