Home | History | Annotate | Download | only in ath5k
      1 /*
      2  * Copyright (c) 2004-2008 Reyk Floeter <reyk (at) openbsd.org>
      3  * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm (at) gmail.com>
      4  *
      5  * Lightly modified for gPXE, July 2009, by Joshua Oreman <oremanj (at) rwcr.net>.
      6  *
      7  * Permission to use, copy, modify, and distribute this software for any
      8  * purpose with or without fee is hereby granted, provided that the above
      9  * copyright notice and this permission notice appear in all copies.
     10  *
     11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     18  *
     19  */
     20 
     21 FILE_LICENCE ( MIT );
     22 
     23 /*************************************\
     24 * DMA and interrupt masking functions *
     25 \*************************************/
     26 
     27 /*
     28  * dma.c - DMA and interrupt masking functions
     29  *
     30  * Here we setup descriptor pointers (rxdp/txdp) start/stop dma engine and
     31  * handle queue setup for 5210 chipset (rest are handled on qcu.c).
     32  * Also we setup interrupt mask register (IMR) and read the various iterrupt
     33  * status registers (ISR).
     34  *
     35  * TODO: Handle SISR on 5211+ and introduce a function to return the queue
     36  * number that resulted the interrupt.
     37  */
     38 
     39 #include <unistd.h>
     40 
     41 #include "ath5k.h"
     42 #include "reg.h"
     43 #include "base.h"
     44 
     45 /*********\
     46 * Receive *
     47 \*********/
     48 
     49 /**
     50  * ath5k_hw_start_rx_dma - Start DMA receive
     51  *
     52  * @ah:	The &struct ath5k_hw
     53  */
     54 void ath5k_hw_start_rx_dma(struct ath5k_hw *ah)
     55 {
     56 	ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR);
     57 	ath5k_hw_reg_read(ah, AR5K_CR);
     58 }
     59 
     60 /**
     61  * ath5k_hw_stop_rx_dma - Stop DMA receive
     62  *
     63  * @ah:	The &struct ath5k_hw
     64  */
     65 int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
     66 {
     67 	unsigned int i;
     68 
     69 	ath5k_hw_reg_write(ah, AR5K_CR_RXD, AR5K_CR);
     70 
     71 	/*
     72 	 * It may take some time to disable the DMA receive unit
     73 	 */
     74 	for (i = 1000; i > 0 &&
     75 			(ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) != 0;
     76 			i--)
     77 		udelay(10);
     78 
     79 	return i ? 0 : -EBUSY;
     80 }
     81 
     82 /**
     83  * ath5k_hw_get_rxdp - Get RX Descriptor's address
     84  *
     85  * @ah: The &struct ath5k_hw
     86  *
     87  * XXX: Is RXDP read and clear ?
     88  */
     89 u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah)
     90 {
     91 	return ath5k_hw_reg_read(ah, AR5K_RXDP);
     92 }
     93 
     94 /**
     95  * ath5k_hw_set_rxdp - Set RX Descriptor's address
     96  *
     97  * @ah: The &struct ath5k_hw
     98  * @phys_addr: RX descriptor address
     99  *
    100  * XXX: Should we check if rx is enabled before setting rxdp ?
    101  */
    102 void ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
    103 {
    104 	ath5k_hw_reg_write(ah, phys_addr, AR5K_RXDP);
    105 }
    106 
    107 
    108 /**********\
    109 * Transmit *
    110 \**********/
    111 
    112 /**
    113  * ath5k_hw_start_tx_dma - Start DMA transmit for a specific queue
    114  *
    115  * @ah: The &struct ath5k_hw
    116  * @queue: The hw queue number
    117  *
    118  * Start DMA transmit for a specific queue and since 5210 doesn't have
    119  * QCU/DCU, set up queue parameters for 5210 here based on queue type (one
    120  * queue for normal data and one queue for beacons). For queue setup
    121  * on newer chips check out qcu.c. Returns -EINVAL if queue number is out
    122  * of range or if queue is already disabled.
    123  *
    124  * NOTE: Must be called after setting up tx control descriptor for that
    125  * queue (see below).
    126  */
    127 int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
    128 {
    129 	u32 tx_queue;
    130 
    131 	/* Return if queue is declared inactive */
    132 	if (ah->ah_txq.tqi_type == AR5K_TX_QUEUE_INACTIVE)
    133 		return -EIO;
    134 
    135 	if (ah->ah_version == AR5K_AR5210) {
    136 		tx_queue = ath5k_hw_reg_read(ah, AR5K_CR);
    137 
    138 		/* Assume always a data queue */
    139 		tx_queue |= AR5K_CR_TXE0 & ~AR5K_CR_TXD0;
    140 
    141 		/* Start queue */
    142 		ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
    143 		ath5k_hw_reg_read(ah, AR5K_CR);
    144 	} else {
    145 		/* Return if queue is disabled */
    146 		if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXD, queue))
    147 			return -EIO;
    148 
    149 		/* Start queue */
    150 		AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXE, queue);
    151 	}
    152 
    153 	return 0;
    154 }
    155 
    156 /**
    157  * ath5k_hw_stop_tx_dma - Stop DMA transmit on a specific queue
    158  *
    159  * @ah: The &struct ath5k_hw
    160  * @queue: The hw queue number
    161  *
    162  * Stop DMA transmit on a specific hw queue and drain queue so we don't
    163  * have any pending frames. Returns -EBUSY if we still have pending frames,
    164  * -EINVAL if queue number is out of range.
    165  *
    166  */
    167 int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
    168 {
    169 	unsigned int i = 40;
    170 	u32 tx_queue, pending;
    171 
    172 	/* Return if queue is declared inactive */
    173 	if (ah->ah_txq.tqi_type == AR5K_TX_QUEUE_INACTIVE)
    174 		return -EIO;
    175 
    176 	if (ah->ah_version == AR5K_AR5210) {
    177 		tx_queue = ath5k_hw_reg_read(ah, AR5K_CR);
    178 
    179 		/* Assume a data queue */
    180 		tx_queue |= AR5K_CR_TXD0 & ~AR5K_CR_TXE0;
    181 
    182 		/* Stop queue */
    183 		ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
    184 		ath5k_hw_reg_read(ah, AR5K_CR);
    185 	} else {
    186 		/*
    187 		 * Schedule TX disable and wait until queue is empty
    188 		 */
    189 		AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXD, queue);
    190 
    191 		/*Check for pending frames*/
    192 		do {
    193 			pending = ath5k_hw_reg_read(ah,
    194 				AR5K_QUEUE_STATUS(queue)) &
    195 				AR5K_QCU_STS_FRMPENDCNT;
    196 			udelay(100);
    197 		} while (--i && pending);
    198 
    199 		/* For 2413+ order PCU to drop packets using
    200 		 * QUIET mechanism */
    201 		if (ah->ah_mac_version >= (AR5K_SREV_AR2414 >> 4) && pending) {
    202 			/* Set periodicity and duration */
    203 			ath5k_hw_reg_write(ah,
    204 				AR5K_REG_SM(100, AR5K_QUIET_CTL2_QT_PER)|
    205 				AR5K_REG_SM(10, AR5K_QUIET_CTL2_QT_DUR),
    206 				AR5K_QUIET_CTL2);
    207 
    208 			/* Enable quiet period for current TSF */
    209 			ath5k_hw_reg_write(ah,
    210 				AR5K_QUIET_CTL1_QT_EN |
    211 				AR5K_REG_SM(ath5k_hw_reg_read(ah,
    212 						AR5K_TSF_L32_5211) >> 10,
    213 						AR5K_QUIET_CTL1_NEXT_QT_TSF),
    214 				AR5K_QUIET_CTL1);
    215 
    216 			/* Force channel idle high */
    217 			AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW_5211,
    218 					AR5K_DIAG_SW_CHANEL_IDLE_HIGH);
    219 
    220 			/* Wait a while and disable mechanism */
    221 			udelay(200);
    222 			AR5K_REG_DISABLE_BITS(ah, AR5K_QUIET_CTL1,
    223 						AR5K_QUIET_CTL1_QT_EN);
    224 
    225 			/* Re-check for pending frames */
    226 			i = 40;
    227 			do {
    228 				pending = ath5k_hw_reg_read(ah,
    229 					AR5K_QUEUE_STATUS(queue)) &
    230 					AR5K_QCU_STS_FRMPENDCNT;
    231 				udelay(100);
    232 			} while (--i && pending);
    233 
    234 			AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW_5211,
    235 					AR5K_DIAG_SW_CHANEL_IDLE_HIGH);
    236 		}
    237 
    238 		/* Clear register */
    239 		ath5k_hw_reg_write(ah, 0, AR5K_QCU_TXD);
    240 		if (pending)
    241 			return -EBUSY;
    242 	}
    243 
    244 	/* TODO: Check for success on 5210 else return error */
    245 	return 0;
    246 }
    247 
    248 /**
    249  * ath5k_hw_get_txdp - Get TX Descriptor's address for a specific queue
    250  *
    251  * @ah: The &struct ath5k_hw
    252  * @queue: The hw queue number
    253  *
    254  * Get TX descriptor's address for a specific queue. For 5210 we ignore
    255  * the queue number and use tx queue type since we only have 2 queues.
    256  * We use TXDP0 for normal data queue and TXDP1 for beacon queue.
    257  * For newer chips with QCU/DCU we just read the corresponding TXDP register.
    258  *
    259  * XXX: Is TXDP read and clear ?
    260  */
    261 u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
    262 {
    263 	u16 tx_reg;
    264 
    265 	/*
    266 	 * Get the transmit queue descriptor pointer from the selected queue
    267 	 */
    268 	/*5210 doesn't have QCU*/
    269 	if (ah->ah_version == AR5K_AR5210) {
    270 		/* Assume a data queue */
    271 		tx_reg = AR5K_NOQCU_TXDP0;
    272 	} else {
    273 		tx_reg = AR5K_QUEUE_TXDP(queue);
    274 	}
    275 
    276 	return ath5k_hw_reg_read(ah, tx_reg);
    277 }
    278 
    279 /**
    280  * ath5k_hw_set_txdp - Set TX Descriptor's address for a specific queue
    281  *
    282  * @ah: The &struct ath5k_hw
    283  * @queue: The hw queue number
    284  *
    285  * Set TX descriptor's address for a specific queue. For 5210 we ignore
    286  * the queue number and we use tx queue type since we only have 2 queues
    287  * so as above we use TXDP0 for normal data queue and TXDP1 for beacon queue.
    288  * For newer chips with QCU/DCU we just set the corresponding TXDP register.
    289  * Returns -EINVAL if queue type is invalid for 5210 and -EIO if queue is still
    290  * active.
    291  */
    292 int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
    293 {
    294 	u16 tx_reg;
    295 
    296 	/*
    297 	 * Set the transmit queue descriptor pointer register by type
    298 	 * on 5210
    299 	 */
    300 	if (ah->ah_version == AR5K_AR5210) {
    301 		/* Assume a data queue */
    302 		tx_reg = AR5K_NOQCU_TXDP0;
    303 	} else {
    304 		/*
    305 		 * Set the transmit queue descriptor pointer for
    306 		 * the selected queue on QCU for 5211+
    307 		 * (this won't work if the queue is still active)
    308 		 */
    309 		if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
    310 			return -EIO;
    311 
    312 		tx_reg = AR5K_QUEUE_TXDP(queue);
    313 	}
    314 
    315 	/* Set descriptor pointer */
    316 	ath5k_hw_reg_write(ah, phys_addr, tx_reg);
    317 
    318 	return 0;
    319 }
    320 
    321 /**
    322  * ath5k_hw_update_tx_triglevel - Update tx trigger level
    323  *
    324  * @ah: The &struct ath5k_hw
    325  * @increase: Flag to force increase of trigger level
    326  *
    327  * This function increases/decreases the tx trigger level for the tx fifo
    328  * buffer (aka FIFO threshold) that is used to indicate when PCU flushes
    329  * the buffer and transmits it's data. Lowering this results sending small
    330  * frames more quickly but can lead to tx underruns, raising it a lot can
    331  * result other problems (i think bmiss is related). Right now we start with
    332  * the lowest possible (64Bytes) and if we get tx underrun we increase it using
    333  * the increase flag. Returns -EIO if we have have reached maximum/minimum.
    334  *
    335  * XXX: Link this with tx DMA size ?
    336  * XXX: Use it to save interrupts ?
    337  * TODO: Needs testing, i think it's related to bmiss...
    338  */
    339 int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, int increase)
    340 {
    341 	u32 trigger_level, imr;
    342 	int ret = -EIO;
    343 
    344 	/*
    345 	 * Disable interrupts by setting the mask
    346 	 */
    347 	imr = ath5k_hw_set_imr(ah, ah->ah_imr & ~AR5K_INT_GLOBAL);
    348 
    349 	trigger_level = AR5K_REG_MS(ath5k_hw_reg_read(ah, AR5K_TXCFG),
    350 			AR5K_TXCFG_TXFULL);
    351 
    352 	if (!increase) {
    353 		if (--trigger_level < AR5K_TUNE_MIN_TX_FIFO_THRES)
    354 			goto done;
    355 	} else
    356 		trigger_level +=
    357 			((AR5K_TUNE_MAX_TX_FIFO_THRES - trigger_level) / 2);
    358 
    359 	/*
    360 	 * Update trigger level on success
    361 	 */
    362 	if (ah->ah_version == AR5K_AR5210)
    363 		ath5k_hw_reg_write(ah, trigger_level, AR5K_TRIG_LVL);
    364 	else
    365 		AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG,
    366 				AR5K_TXCFG_TXFULL, trigger_level);
    367 
    368 	ret = 0;
    369 
    370 done:
    371 	/*
    372 	 * Restore interrupt mask
    373 	 */
    374 	ath5k_hw_set_imr(ah, imr);
    375 
    376 	return ret;
    377 }
    378 
    379 /*******************\
    380 * Interrupt masking *
    381 \*******************/
    382 
    383 /**
    384  * ath5k_hw_is_intr_pending - Check if we have pending interrupts
    385  *
    386  * @ah: The &struct ath5k_hw
    387  *
    388  * Check if we have pending interrupts to process. Returns 1 if we
    389  * have pending interrupts and 0 if we haven't.
    390  */
    391 int ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
    392 {
    393 	return ath5k_hw_reg_read(ah, AR5K_INTPEND) == 1 ? 1 : 0;
    394 }
    395 
    396 /**
    397  * ath5k_hw_get_isr - Get interrupt status
    398  *
    399  * @ah: The @struct ath5k_hw
    400  * @interrupt_mask: Driver's interrupt mask used to filter out
    401  * interrupts in sw.
    402  *
    403  * This function is used inside our interrupt handler to determine the reason
    404  * for the interrupt by reading Primary Interrupt Status Register. Returns an
    405  * abstract interrupt status mask which is mostly ISR with some uncommon bits
    406  * being mapped on some standard non hw-specific positions
    407  * (check out &ath5k_int).
    408  *
    409  * NOTE: We use read-and-clear register, so after this function is called ISR
    410  * is zeroed.
    411  */
    412 int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
    413 {
    414 	u32 data;
    415 
    416 	/*
    417 	 * Read interrupt status from the Interrupt Status register
    418 	 * on 5210
    419 	 */
    420 	if (ah->ah_version == AR5K_AR5210) {
    421 		data = ath5k_hw_reg_read(ah, AR5K_ISR);
    422 		if (data == AR5K_INT_NOCARD) {
    423 			*interrupt_mask = data;
    424 			return -ENODEV;
    425 		}
    426 	} else {
    427 		/*
    428 		 * Read interrupt status from Interrupt
    429 		 * Status Register shadow copy (Read And Clear)
    430 		 *
    431 		 * Note: PISR/SISR Not available on 5210
    432 		 */
    433 		data = ath5k_hw_reg_read(ah, AR5K_RAC_PISR);
    434 		if (data == AR5K_INT_NOCARD) {
    435 			*interrupt_mask = data;
    436 			return -ENODEV;
    437 		}
    438 	}
    439 
    440 	/*
    441 	 * Get abstract interrupt mask (driver-compatible)
    442 	 */
    443 	*interrupt_mask = (data & AR5K_INT_COMMON) & ah->ah_imr;
    444 
    445 	if (ah->ah_version != AR5K_AR5210) {
    446 		u32 sisr2 = ath5k_hw_reg_read(ah, AR5K_RAC_SISR2);
    447 
    448 		/*HIU = Host Interface Unit (PCI etc)*/
    449 		if (data & (AR5K_ISR_HIUERR))
    450 			*interrupt_mask |= AR5K_INT_FATAL;
    451 
    452 		/*Beacon Not Ready*/
    453 		if (data & (AR5K_ISR_BNR))
    454 			*interrupt_mask |= AR5K_INT_BNR;
    455 
    456 		if (sisr2 & (AR5K_SISR2_SSERR | AR5K_SISR2_DPERR |
    457 			     AR5K_SISR2_MCABT))
    458 			*interrupt_mask |= AR5K_INT_FATAL;
    459 
    460 		if (data & AR5K_ISR_TIM)
    461 			*interrupt_mask |= AR5K_INT_TIM;
    462 
    463 		if (data & AR5K_ISR_BCNMISC) {
    464 			if (sisr2 & AR5K_SISR2_TIM)
    465 				*interrupt_mask |= AR5K_INT_TIM;
    466 			if (sisr2 & AR5K_SISR2_DTIM)
    467 				*interrupt_mask |= AR5K_INT_DTIM;
    468 			if (sisr2 & AR5K_SISR2_DTIM_SYNC)
    469 				*interrupt_mask |= AR5K_INT_DTIM_SYNC;
    470 			if (sisr2 & AR5K_SISR2_BCN_TIMEOUT)
    471 				*interrupt_mask |= AR5K_INT_BCN_TIMEOUT;
    472 			if (sisr2 & AR5K_SISR2_CAB_TIMEOUT)
    473 				*interrupt_mask |= AR5K_INT_CAB_TIMEOUT;
    474 		}
    475 
    476 		if (data & AR5K_ISR_RXDOPPLER)
    477 			*interrupt_mask |= AR5K_INT_RX_DOPPLER;
    478 		if (data & AR5K_ISR_QCBRORN) {
    479 			*interrupt_mask |= AR5K_INT_QCBRORN;
    480 			ah->ah_txq_isr |= AR5K_REG_MS(
    481 					ath5k_hw_reg_read(ah, AR5K_RAC_SISR3),
    482 					AR5K_SISR3_QCBRORN);
    483 		}
    484 		if (data & AR5K_ISR_QCBRURN) {
    485 			*interrupt_mask |= AR5K_INT_QCBRURN;
    486 			ah->ah_txq_isr |= AR5K_REG_MS(
    487 					ath5k_hw_reg_read(ah, AR5K_RAC_SISR3),
    488 					AR5K_SISR3_QCBRURN);
    489 		}
    490 		if (data & AR5K_ISR_QTRIG) {
    491 			*interrupt_mask |= AR5K_INT_QTRIG;
    492 			ah->ah_txq_isr |= AR5K_REG_MS(
    493 					ath5k_hw_reg_read(ah, AR5K_RAC_SISR4),
    494 					AR5K_SISR4_QTRIG);
    495 		}
    496 
    497 		if (data & AR5K_ISR_TXOK)
    498 			ah->ah_txq_isr |= AR5K_REG_MS(
    499 					ath5k_hw_reg_read(ah, AR5K_RAC_SISR0),
    500 					AR5K_SISR0_QCU_TXOK);
    501 
    502 		if (data & AR5K_ISR_TXDESC)
    503 			ah->ah_txq_isr |= AR5K_REG_MS(
    504 					ath5k_hw_reg_read(ah, AR5K_RAC_SISR0),
    505 					AR5K_SISR0_QCU_TXDESC);
    506 
    507 		if (data & AR5K_ISR_TXERR)
    508 			ah->ah_txq_isr |= AR5K_REG_MS(
    509 					ath5k_hw_reg_read(ah, AR5K_RAC_SISR1),
    510 					AR5K_SISR1_QCU_TXERR);
    511 
    512 		if (data & AR5K_ISR_TXEOL)
    513 			ah->ah_txq_isr |= AR5K_REG_MS(
    514 					ath5k_hw_reg_read(ah, AR5K_RAC_SISR1),
    515 					AR5K_SISR1_QCU_TXEOL);
    516 
    517 		if (data & AR5K_ISR_TXURN)
    518 			ah->ah_txq_isr |= AR5K_REG_MS(
    519 					ath5k_hw_reg_read(ah, AR5K_RAC_SISR2),
    520 					AR5K_SISR2_QCU_TXURN);
    521 	} else {
    522 		if (data & (AR5K_ISR_SSERR | AR5K_ISR_MCABT |
    523 			    AR5K_ISR_HIUERR | AR5K_ISR_DPERR))
    524 			*interrupt_mask |= AR5K_INT_FATAL;
    525 
    526 		/*
    527 		 * XXX: BMISS interrupts may occur after association.
    528 		 * I found this on 5210 code but it needs testing. If this is
    529 		 * true we should disable them before assoc and re-enable them
    530 		 * after a successful assoc + some jiffies.
    531 			interrupt_mask &= ~AR5K_INT_BMISS;
    532 		 */
    533 	}
    534 
    535 	return 0;
    536 }
    537 
    538 /**
    539  * ath5k_hw_set_imr - Set interrupt mask
    540  *
    541  * @ah: The &struct ath5k_hw
    542  * @new_mask: The new interrupt mask to be set
    543  *
    544  * Set the interrupt mask in hw to save interrupts. We do that by mapping
    545  * ath5k_int bits to hw-specific bits to remove abstraction and writing
    546  * Interrupt Mask Register.
    547  */
    548 enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
    549 {
    550 	enum ath5k_int old_mask, int_mask;
    551 
    552 	old_mask = ah->ah_imr;
    553 
    554 	/*
    555 	 * Disable card interrupts to prevent any race conditions
    556 	 * (they will be re-enabled afterwards if AR5K_INT GLOBAL
    557 	 * is set again on the new mask).
    558 	 */
    559 	if (old_mask & AR5K_INT_GLOBAL) {
    560 		ath5k_hw_reg_write(ah, AR5K_IER_DISABLE, AR5K_IER);
    561 		ath5k_hw_reg_read(ah, AR5K_IER);
    562 	}
    563 
    564 	/*
    565 	 * Add additional, chipset-dependent interrupt mask flags
    566 	 * and write them to the IMR (interrupt mask register).
    567 	 */
    568 	int_mask = new_mask & AR5K_INT_COMMON;
    569 
    570 	if (ah->ah_version != AR5K_AR5210) {
    571 		/* Preserve per queue TXURN interrupt mask */
    572 		u32 simr2 = ath5k_hw_reg_read(ah, AR5K_SIMR2)
    573 				& AR5K_SIMR2_QCU_TXURN;
    574 
    575 		if (new_mask & AR5K_INT_FATAL) {
    576 			int_mask |= AR5K_IMR_HIUERR;
    577 			simr2 |= (AR5K_SIMR2_MCABT | AR5K_SIMR2_SSERR
    578 				| AR5K_SIMR2_DPERR);
    579 		}
    580 
    581 		/*Beacon Not Ready*/
    582 		if (new_mask & AR5K_INT_BNR)
    583 			int_mask |= AR5K_INT_BNR;
    584 
    585 		if (new_mask & AR5K_INT_TIM)
    586 			int_mask |= AR5K_IMR_TIM;
    587 
    588 		if (new_mask & AR5K_INT_TIM)
    589 			simr2 |= AR5K_SISR2_TIM;
    590 		if (new_mask & AR5K_INT_DTIM)
    591 			simr2 |= AR5K_SISR2_DTIM;
    592 		if (new_mask & AR5K_INT_DTIM_SYNC)
    593 			simr2 |= AR5K_SISR2_DTIM_SYNC;
    594 		if (new_mask & AR5K_INT_BCN_TIMEOUT)
    595 			simr2 |= AR5K_SISR2_BCN_TIMEOUT;
    596 		if (new_mask & AR5K_INT_CAB_TIMEOUT)
    597 			simr2 |= AR5K_SISR2_CAB_TIMEOUT;
    598 
    599 		if (new_mask & AR5K_INT_RX_DOPPLER)
    600 			int_mask |= AR5K_IMR_RXDOPPLER;
    601 
    602 		/* Note: Per queue interrupt masks
    603 		 * are set via reset_tx_queue (qcu.c) */
    604 		ath5k_hw_reg_write(ah, int_mask, AR5K_PIMR);
    605 		ath5k_hw_reg_write(ah, simr2, AR5K_SIMR2);
    606 
    607 	} else {
    608 		if (new_mask & AR5K_INT_FATAL)
    609 			int_mask |= (AR5K_IMR_SSERR | AR5K_IMR_MCABT
    610 				| AR5K_IMR_HIUERR | AR5K_IMR_DPERR);
    611 
    612 		ath5k_hw_reg_write(ah, int_mask, AR5K_IMR);
    613 	}
    614 
    615 	/* If RXNOFRM interrupt is masked disable it
    616 	 * by setting AR5K_RXNOFRM to zero */
    617 	if (!(new_mask & AR5K_INT_RXNOFRM))
    618 		ath5k_hw_reg_write(ah, 0, AR5K_RXNOFRM);
    619 
    620 	/* Store new interrupt mask */
    621 	ah->ah_imr = new_mask;
    622 
    623 	/* ..re-enable interrupts if AR5K_INT_GLOBAL is set */
    624 	if (new_mask & AR5K_INT_GLOBAL) {
    625 		ath5k_hw_reg_write(ah, ah->ah_ier, AR5K_IER);
    626 		ath5k_hw_reg_read(ah, AR5K_IER);
    627 	}
    628 
    629 	return old_mask;
    630 }
    631 
    632