1 /******************************************************************************
3 Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved.
5 802.11 status code portion of this file from ethereal-0.10.6:
6 Copyright 2000, Axis Communications AB
7 Ethereal - Network traffic analyzer
8 By Gerald Combs <gerald@ethereal.com>
9 Copyright 1998 Gerald Combs
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 The full GNU General Public License is included in this distribution in the
28 James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 ******************************************************************************/
35 #define IPW2200_VERSION "1.0.0"
36 #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
37 #define DRV_COPYRIGHT "Copyright(c) 2003-2004 Intel Corporation"
38 #define DRV_VERSION IPW2200_VERSION
40 MODULE_DESCRIPTION(DRV_DESCRIPTION);
41 MODULE_VERSION(DRV_VERSION);
42 MODULE_AUTHOR(DRV_COPYRIGHT);
43 MODULE_LICENSE("GPL");
46 static int channel = 0;
50 static u32 ipw_debug_level;
51 static int associate = 1;
52 static int auto_create = 1;
53 static int disable = 0;
54 static const char ipw_modes[] = {
58 static void ipw_rx(struct ipw_priv *priv);
59 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
60 struct clx2_tx_queue *txq, int qindex);
61 static int ipw_queue_reset(struct ipw_priv *priv);
63 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
66 static void ipw_tx_queue_free(struct ipw_priv *);
68 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
69 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
70 static void ipw_rx_queue_replenish(void *);
72 static int ipw_up(struct ipw_priv *);
73 static void ipw_down(struct ipw_priv *);
74 static int ipw_config(struct ipw_priv *);
75 static int init_supported_rates(struct ipw_priv *priv,
76 struct ipw_supported_rates *prates);
78 static u8 band_b_active_channel[MAX_B_CHANNELS] = {
79 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 0
81 static u8 band_a_active_channel[MAX_A_CHANNELS] = {
82 36, 40, 44, 48, 149, 153, 157, 161, 165, 52, 56, 60, 64, 0
85 static int is_valid_channel(int mode_mask, int channel)
92 if (mode_mask & IEEE_A)
93 for (i = 0; i < MAX_A_CHANNELS; i++)
94 if (band_a_active_channel[i] == channel)
97 if (mode_mask & (IEEE_B | IEEE_G))
98 for (i = 0; i < MAX_B_CHANNELS; i++)
99 if (band_b_active_channel[i] == channel)
100 return mode_mask & (IEEE_B | IEEE_G);
105 static char *snprint_line(char *buf, size_t count,
106 const u8 * data, u32 len, u32 ofs)
111 out = snprintf(buf, count, "%08X", ofs);
113 for (l = 0, i = 0; i < 2; i++) {
114 out += snprintf(buf + out, count - out, " ");
115 for (j = 0; j < 8 && l < len; j++, l++)
116 out += snprintf(buf + out, count - out, "%02X ",
119 out += snprintf(buf + out, count - out, " ");
122 out += snprintf(buf + out, count - out, " ");
123 for (l = 0, i = 0; i < 2; i++) {
124 out += snprintf(buf + out, count - out, " ");
125 for (j = 0; j < 8 && l < len; j++, l++) {
126 c = data[(i * 8 + j)];
127 if (!isascii(c) || !isprint(c))
130 out += snprintf(buf + out, count - out, "%c", c);
134 out += snprintf(buf + out, count - out, " ");
140 static void printk_buf(int level, const u8 * data, u32 len)
144 if (!(ipw_debug_level & level))
148 printk(KERN_DEBUG "%s\n",
149 snprint_line(line, sizeof(line), &data[ofs],
150 min(len, 16U), ofs));
152 len -= min(len, 16U);
156 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
157 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
159 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
160 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
162 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
163 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
165 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
166 __LINE__, (u32) (b), (u32) (c));
167 _ipw_write_reg8(a, b, c);
170 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
171 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
173 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
174 __LINE__, (u32) (b), (u32) (c));
175 _ipw_write_reg16(a, b, c);
178 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
179 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
181 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
182 __LINE__, (u32) (b), (u32) (c));
183 _ipw_write_reg32(a, b, c);
186 #define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs))
187 #define ipw_write8(ipw, ofs, val) \
188 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
189 _ipw_write8(ipw, ofs, val)
191 #define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs))
192 #define ipw_write16(ipw, ofs, val) \
193 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
194 _ipw_write16(ipw, ofs, val)
196 #define _ipw_write32(ipw, ofs, val) writel((val), (ipw)->hw_base + (ofs))
197 #define ipw_write32(ipw, ofs, val) \
198 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
199 _ipw_write32(ipw, ofs, val)
201 #define _ipw_read8(ipw, ofs) readb((ipw)->hw_base + (ofs))
202 static inline u8 __ipw_read8(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
204 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", f, l, (u32) (ofs));
205 return _ipw_read8(ipw, ofs);
208 #define ipw_read8(ipw, ofs) __ipw_read8(__FILE__, __LINE__, ipw, ofs)
210 #define _ipw_read16(ipw, ofs) readw((ipw)->hw_base + (ofs))
211 static inline u16 __ipw_read16(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
213 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", f, l, (u32) (ofs));
214 return _ipw_read16(ipw, ofs);
217 #define ipw_read16(ipw, ofs) __ipw_read16(__FILE__, __LINE__, ipw, ofs)
219 #define _ipw_read32(ipw, ofs) readl((ipw)->hw_base + (ofs))
220 static inline u32 __ipw_read32(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
222 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", f, l, (u32) (ofs));
223 return _ipw_read32(ipw, ofs);
226 #define ipw_read32(ipw, ofs) __ipw_read32(__FILE__, __LINE__, ipw, ofs)
228 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
229 #define ipw_read_indirect(a, b, c, d) \
230 IPW_DEBUG_IO("%s %d: read_inddirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
231 _ipw_read_indirect(a, b, c, d)
233 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
235 #define ipw_write_indirect(a, b, c, d) \
236 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
237 _ipw_write_indirect(a, b, c, d)
239 /* indirect write s */
240 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
242 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
243 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg);
244 _ipw_write32(priv, CX2_INDIRECT_DATA, value);
247 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
249 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
250 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg & CX2_INDIRECT_ADDR_MASK);
251 _ipw_write8(priv, CX2_INDIRECT_DATA, value);
252 IPW_DEBUG_IO(" reg = 0x%8lX : value = 0x%8X\n",
253 (unsigned long)(priv->hw_base + CX2_INDIRECT_DATA), value);
256 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
258 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
259 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg & CX2_INDIRECT_ADDR_MASK);
260 _ipw_write16(priv, CX2_INDIRECT_DATA, value);
263 /* indirect read s */
265 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
268 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg & CX2_INDIRECT_ADDR_MASK);
269 IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
270 word = _ipw_read32(priv, CX2_INDIRECT_DATA);
271 return (word >> ((reg & 0x3) * 8)) & 0xff;
274 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
278 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
280 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg);
281 value = _ipw_read32(priv, CX2_INDIRECT_DATA);
282 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value);
286 /* iterative/auto-increment 32 bit reads and writes */
287 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
290 u32 aligned_addr = addr & CX2_INDIRECT_ADDR_MASK;
291 u32 dif_len = addr - aligned_addr;
295 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
297 /* Read the first nibble byte by byte */
298 if (unlikely(dif_len)) {
299 /* Start reading at aligned_addr + dif_len */
300 _ipw_write32(priv, CX2_INDIRECT_ADDR, aligned_addr);
301 for (i = dif_len; i < 4; i++, buf++)
302 *buf = _ipw_read8(priv, CX2_INDIRECT_DATA + i);
307 /* Read DWs through autoinc register */
308 _ipw_write32(priv, CX2_AUTOINC_ADDR, aligned_addr);
309 aligned_len = num & CX2_INDIRECT_ADDR_MASK;
310 for (i = 0; i < aligned_len; i += 4, buf += 4, aligned_addr += 4)
311 *(u32 *) buf = ipw_read32(priv, CX2_AUTOINC_DATA);
313 /* Copy the last nibble */
314 dif_len = num - aligned_len;
315 _ipw_write32(priv, CX2_INDIRECT_ADDR, aligned_addr);
316 for (i = 0; i < dif_len; i++, buf++)
317 *buf = ipw_read8(priv, CX2_INDIRECT_DATA + i);
320 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
323 u32 aligned_addr = addr & CX2_INDIRECT_ADDR_MASK;
324 u32 dif_len = addr - aligned_addr;
328 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
330 /* Write the first nibble byte by byte */
331 if (unlikely(dif_len)) {
332 /* Start writing at aligned_addr + dif_len */
333 _ipw_write32(priv, CX2_INDIRECT_ADDR, aligned_addr);
334 for (i = dif_len; i < 4; i++, buf++)
335 _ipw_write8(priv, CX2_INDIRECT_DATA + i, *buf);
340 /* Write DWs through autoinc register */
341 _ipw_write32(priv, CX2_AUTOINC_ADDR, aligned_addr);
342 aligned_len = num & CX2_INDIRECT_ADDR_MASK;
343 for (i = 0; i < aligned_len; i += 4, buf += 4, aligned_addr += 4)
344 _ipw_write32(priv, CX2_AUTOINC_DATA, *(u32 *) buf);
346 /* Copy the last nibble */
347 dif_len = num - aligned_len;
348 _ipw_write32(priv, CX2_INDIRECT_ADDR, aligned_addr);
349 for (i = 0; i < dif_len; i++, buf++)
350 _ipw_write8(priv, CX2_INDIRECT_DATA + i, *buf);
353 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
356 memcpy_toio((priv->hw_base + addr), buf, num);
359 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
361 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
364 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
366 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
369 static inline void ipw_enable_interrupts(struct ipw_priv *priv)
371 if (priv->status & STATUS_INT_ENABLED)
373 priv->status |= STATUS_INT_ENABLED;
374 ipw_write32(priv, CX2_INTA_MASK_R, CX2_INTA_MASK_ALL);
377 static inline void ipw_disable_interrupts(struct ipw_priv *priv)
379 if (!(priv->status & STATUS_INT_ENABLED))
381 priv->status &= ~STATUS_INT_ENABLED;
382 ipw_write32(priv, CX2_INTA_MASK_R, ~CX2_INTA_MASK_ALL);
385 static char *ipw_error_desc(u32 val)
388 case IPW_FW_ERROR_OK:
390 case IPW_FW_ERROR_FAIL:
392 case IPW_FW_ERROR_MEMORY_UNDERFLOW:
393 return "MEMORY_UNDERFLOW";
394 case IPW_FW_ERROR_MEMORY_OVERFLOW:
395 return "MEMORY_OVERFLOW";
396 case IPW_FW_ERROR_BAD_PARAM:
397 return "ERROR_BAD_PARAM";
398 case IPW_FW_ERROR_BAD_CHECKSUM:
399 return "ERROR_BAD_CHECKSUM";
400 case IPW_FW_ERROR_NMI_INTERRUPT:
401 return "ERROR_NMI_INTERRUPT";
402 case IPW_FW_ERROR_BAD_DATABASE:
403 return "ERROR_BAD_DATABASE";
404 case IPW_FW_ERROR_ALLOC_FAIL:
405 return "ERROR_ALLOC_FAIL";
406 case IPW_FW_ERROR_DMA_UNDERRUN:
407 return "ERROR_DMA_UNDERRUN";
408 case IPW_FW_ERROR_DMA_STATUS:
409 return "ERROR_DMA_STATUS";
410 case IPW_FW_ERROR_DINOSTATUS_ERROR:
411 return "ERROR_DINOSTATUS_ERROR";
412 case IPW_FW_ERROR_EEPROMSTATUS_ERROR:
413 return "ERROR_EEPROMSTATUS_ERROR";
414 case IPW_FW_ERROR_SYSASSERT:
415 return "ERROR_SYSASSERT";
416 case IPW_FW_ERROR_FATAL_ERROR:
417 return "ERROR_FATALSTATUS_ERROR";
419 return "UNKNOWNSTATUS_ERROR";
423 static void ipw_dump_nic_error_log(struct ipw_priv *priv)
425 u32 desc, time, blink1, blink2, ilink1, ilink2, idata, i, count, base;
427 base = ipw_read32(priv, IPWSTATUS_ERROR_LOG);
428 count = ipw_read_reg32(priv, base);
430 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
431 IPW_ERROR("Start IPW Error Log Dump:\n");
432 IPW_ERROR("Status: 0x%08X, Config: %08X\n",
433 priv->status, priv->config);
436 for (i = ERROR_START_OFFSET;
437 i <= count * ERROR_ELEM_SIZE; i += ERROR_ELEM_SIZE) {
438 desc = ipw_read_reg32(priv, base + i);
439 time = ipw_read_reg32(priv, base + i + 1 * sizeof(u32));
440 blink1 = ipw_read_reg32(priv, base + i + 2 * sizeof(u32));
441 blink2 = ipw_read_reg32(priv, base + i + 3 * sizeof(u32));
442 ilink1 = ipw_read_reg32(priv, base + i + 4 * sizeof(u32));
443 ilink2 = ipw_read_reg32(priv, base + i + 5 * sizeof(u32));
444 idata = ipw_read_reg32(priv, base + i + 6 * sizeof(u32));
446 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
447 ipw_error_desc(desc), time, blink1, blink2,
448 ilink1, ilink2, idata);
452 static void ipw_dump_nic_event_log(struct ipw_priv *priv)
454 u32 ev, time, data, i, count, base;
456 base = ipw_read32(priv, IPW_EVENT_LOG);
457 count = ipw_read_reg32(priv, base);
459 if (EVENT_START_OFFSET <= count * EVENT_ELEM_SIZE)
460 IPW_ERROR("Start IPW Event Log Dump:\n");
462 for (i = EVENT_START_OFFSET;
463 i <= count * EVENT_ELEM_SIZE; i += EVENT_ELEM_SIZE) {
464 ev = ipw_read_reg32(priv, base + i);
465 time = ipw_read_reg32(priv, base + i + 1 * sizeof(u32));
466 data = ipw_read_reg32(priv, base + i + 2 * sizeof(u32));
468 #ifdef CONFIG_IPW_DEBUG
469 IPW_ERROR("%i\t0x%08x\t%i\n", time, data, ev);
474 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
476 u32 addr, field_info, field_len, field_count, total_len;
478 IPW_DEBUG_ORD("ordinal = %i\n", ord);
480 if (!priv || !val || !len) {
481 IPW_DEBUG_ORD("Invalid argument\n");
485 /* verify device ordinal tables have been initialized */
486 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
487 IPW_DEBUG_ORD("Access ordinals before initialization\n");
491 switch (IPW_ORD_TABLE_ID_MASK & ord) {
492 case IPW_ORD_TABLE_0_MASK:
494 * TABLE 0: Direct access to a table of 32 bit values
496 * This is a very simple table with the data directly
497 * read from the table
500 /* remove the table id from the ordinal */
501 ord &= IPW_ORD_TABLE_VALUE_MASK;
504 if (ord > priv->table0_len) {
505 IPW_DEBUG_ORD("ordinal value (%i) longer then "
506 "max (%i)\n", ord, priv->table0_len);
510 /* verify we have enough room to store the value */
511 if (*len < sizeof(u32)) {
512 IPW_DEBUG_ORD("ordinal buffer length too small, "
513 "need %zd\n", sizeof(u32));
517 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
518 ord, priv->table0_addr + (ord << 2));
522 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
525 case IPW_ORD_TABLE_1_MASK:
527 * TABLE 1: Indirect access to a table of 32 bit values
529 * This is a fairly large table of u32 values each
530 * representing starting addr for the data (which is
534 /* remove the table id from the ordinal */
535 ord &= IPW_ORD_TABLE_VALUE_MASK;
538 if (ord > priv->table1_len) {
539 IPW_DEBUG_ORD("ordinal value too long\n");
543 /* verify we have enough room to store the value */
544 if (*len < sizeof(u32)) {
545 IPW_DEBUG_ORD("ordinal buffer length too small, "
546 "need %zd\n", sizeof(u32));
551 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
555 case IPW_ORD_TABLE_2_MASK:
557 * TABLE 2: Indirect access to a table of variable sized values
559 * This table consist of six values, each containing
560 * - dword containing the starting offset of the data
561 * - dword containing the lengh in the first 16bits
562 * and the count in the second 16bits
565 /* remove the table id from the ordinal */
566 ord &= IPW_ORD_TABLE_VALUE_MASK;
569 if (ord > priv->table2_len) {
570 IPW_DEBUG_ORD("ordinal value too long\n");
574 /* get the address of statistic */
575 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
577 /* get the second DW of statistics ;
578 * two 16-bit words - first is length, second is count */
581 priv->table2_addr + (ord << 3) +
584 /* get each entry length */
585 field_len = *((u16 *) & field_info);
587 /* get number of entries */
588 field_count = *(((u16 *) & field_info) + 1);
590 /* abort if not enought memory */
591 total_len = field_len * field_count;
592 if (total_len > *len) {
601 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
602 "field_info = 0x%08x\n",
603 addr, total_len, field_info);
604 ipw_read_indirect(priv, addr, val, total_len);
608 IPW_DEBUG_ORD("Invalid ordinal!\n");
616 static void ipw_init_ordinals(struct ipw_priv *priv)
618 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
619 priv->table0_len = ipw_read32(priv, priv->table0_addr);
621 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
622 priv->table0_addr, priv->table0_len);
624 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
625 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
627 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
628 priv->table1_addr, priv->table1_len);
630 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
631 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
632 priv->table2_len &= 0x0000ffff; /* use first two bytes */
634 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
635 priv->table2_addr, priv->table2_len);
640 * The following adds a new attribute to the sysfs representation
641 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
642 * used for controling the debug level.
644 * See the level definitions in ipw for details.
646 static ssize_t show_debug_level(struct device_driver *d, char *buf)
648 return sprintf(buf, "0x%08X\n", ipw_debug_level);
650 static ssize_t store_debug_level(struct device_driver *d,
651 const char *buf, size_t count)
653 char *p = (char *)buf;
656 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
658 if (p[0] == 'x' || p[0] == 'X')
660 val = simple_strtoul(p, &p, 16);
662 val = simple_strtoul(p, &p, 10);
664 printk(KERN_INFO DRV_NAME
665 ": %s is not in hex or decimal form.\n", buf);
667 ipw_debug_level = val;
669 return strnlen(buf, count);
672 static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
673 show_debug_level, store_debug_level);
675 static ssize_t show_status(struct device *d,
676 struct device_attribute *attr, char *buf)
678 struct ipw_priv *p = d->driver_data;
679 return sprintf(buf, "0x%08x\n", (int)p->status);
682 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
684 static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
687 struct ipw_priv *p = d->driver_data;
688 return sprintf(buf, "0x%08x\n", (int)p->config);
691 static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
693 static ssize_t show_nic_type(struct device *d,
694 struct device_attribute *attr, char *buf)
696 struct ipw_priv *p = d->driver_data;
697 u8 type = p->eeprom[EEPROM_NIC_TYPE];
700 case EEPROM_NIC_TYPE_STANDARD:
701 return sprintf(buf, "STANDARD\n");
702 case EEPROM_NIC_TYPE_DELL:
703 return sprintf(buf, "DELL\n");
704 case EEPROM_NIC_TYPE_FUJITSU:
705 return sprintf(buf, "FUJITSU\n");
706 case EEPROM_NIC_TYPE_IBM:
707 return sprintf(buf, "IBM\n");
708 case EEPROM_NIC_TYPE_HP:
709 return sprintf(buf, "HP\n");
712 return sprintf(buf, "UNKNOWN\n");
715 static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
717 static ssize_t dump_error_log(struct device *d,
718 struct device_attribute *attr, const char *buf,
721 char *p = (char *)buf;
724 ipw_dump_nic_error_log((struct ipw_priv *)d->driver_data);
726 return strnlen(buf, count);
729 static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log);
731 static ssize_t dump_event_log(struct device *d,
732 struct device_attribute *attr, const char *buf,
735 char *p = (char *)buf;
738 ipw_dump_nic_event_log((struct ipw_priv *)d->driver_data);
740 return strnlen(buf, count);
743 static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log);
745 static ssize_t show_ucode_version(struct device *d,
746 struct device_attribute *attr, char *buf)
748 u32 len = sizeof(u32), tmp = 0;
749 struct ipw_priv *p = d->driver_data;
751 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
754 return sprintf(buf, "0x%08x\n", tmp);
757 static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
759 static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
762 u32 len = sizeof(u32), tmp = 0;
763 struct ipw_priv *p = d->driver_data;
765 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
768 return sprintf(buf, "0x%08x\n", tmp);
771 static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
774 * Add a device attribute to view/control the delay between eeprom
777 static ssize_t show_eeprom_delay(struct device *d,
778 struct device_attribute *attr, char *buf)
780 int n = ((struct ipw_priv *)d->driver_data)->eeprom_delay;
781 return sprintf(buf, "%i\n", n);
783 static ssize_t store_eeprom_delay(struct device *d,
784 struct device_attribute *attr,
785 const char *buf, size_t count)
787 struct ipw_priv *p = d->driver_data;
788 sscanf(buf, "%i", &p->eeprom_delay);
789 return strnlen(buf, count);
792 static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
793 show_eeprom_delay, store_eeprom_delay);
795 static ssize_t show_command_event_reg(struct device *d,
796 struct device_attribute *attr, char *buf)
799 struct ipw_priv *p = d->driver_data;
801 reg = ipw_read_reg32(p, CX2_INTERNAL_CMD_EVENT);
802 return sprintf(buf, "0x%08x\n", reg);
804 static ssize_t store_command_event_reg(struct device *d,
805 struct device_attribute *attr,
806 const char *buf, size_t count)
809 struct ipw_priv *p = d->driver_data;
811 sscanf(buf, "%x", ®);
812 ipw_write_reg32(p, CX2_INTERNAL_CMD_EVENT, reg);
813 return strnlen(buf, count);
816 static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
817 show_command_event_reg, store_command_event_reg);
819 static ssize_t show_mem_gpio_reg(struct device *d,
820 struct device_attribute *attr, char *buf)
823 struct ipw_priv *p = d->driver_data;
825 reg = ipw_read_reg32(p, 0x301100);
826 return sprintf(buf, "0x%08x\n", reg);
828 static ssize_t store_mem_gpio_reg(struct device *d,
829 struct device_attribute *attr,
830 const char *buf, size_t count)
833 struct ipw_priv *p = d->driver_data;
835 sscanf(buf, "%x", ®);
836 ipw_write_reg32(p, 0x301100, reg);
837 return strnlen(buf, count);
840 static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
841 show_mem_gpio_reg, store_mem_gpio_reg);
843 static ssize_t show_indirect_dword(struct device *d,
844 struct device_attribute *attr, char *buf)
847 struct ipw_priv *priv = d->driver_data;
848 if (priv->status & STATUS_INDIRECT_DWORD)
849 reg = ipw_read_reg32(priv, priv->indirect_dword);
853 return sprintf(buf, "0x%08x\n", reg);
855 static ssize_t store_indirect_dword(struct device *d,
856 struct device_attribute *attr,
857 const char *buf, size_t count)
859 struct ipw_priv *priv = d->driver_data;
861 sscanf(buf, "%x", &priv->indirect_dword);
862 priv->status |= STATUS_INDIRECT_DWORD;
863 return strnlen(buf, count);
866 static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
867 show_indirect_dword, store_indirect_dword);
869 static ssize_t show_indirect_byte(struct device *d,
870 struct device_attribute *attr, char *buf)
873 struct ipw_priv *priv = d->driver_data;
874 if (priv->status & STATUS_INDIRECT_BYTE)
875 reg = ipw_read_reg8(priv, priv->indirect_byte);
879 return sprintf(buf, "0x%02x\n", reg);
881 static ssize_t store_indirect_byte(struct device *d,
882 struct device_attribute *attr,
883 const char *buf, size_t count)
885 struct ipw_priv *priv = d->driver_data;
887 sscanf(buf, "%x", &priv->indirect_byte);
888 priv->status |= STATUS_INDIRECT_BYTE;
889 return strnlen(buf, count);
892 static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
893 show_indirect_byte, store_indirect_byte);
895 static ssize_t show_direct_dword(struct device *d,
896 struct device_attribute *attr, char *buf)
899 struct ipw_priv *priv = d->driver_data;
901 if (priv->status & STATUS_DIRECT_DWORD)
902 reg = ipw_read32(priv, priv->direct_dword);
906 return sprintf(buf, "0x%08x\n", reg);
908 static ssize_t store_direct_dword(struct device *d,
909 struct device_attribute *attr,
910 const char *buf, size_t count)
912 struct ipw_priv *priv = d->driver_data;
914 sscanf(buf, "%x", &priv->direct_dword);
915 priv->status |= STATUS_DIRECT_DWORD;
916 return strnlen(buf, count);
919 static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
920 show_direct_dword, store_direct_dword);
922 static inline int rf_kill_active(struct ipw_priv *priv)
924 if (0 == (ipw_read32(priv, 0x30) & 0x10000))
925 priv->status |= STATUS_RF_KILL_HW;
927 priv->status &= ~STATUS_RF_KILL_HW;
929 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
932 static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
935 /* 0 - RF kill not enabled
936 1 - SW based RF kill active (sysfs)
937 2 - HW based RF kill active
938 3 - Both HW and SW baed RF kill active */
939 struct ipw_priv *priv = d->driver_data;
940 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
941 (rf_kill_active(priv) ? 0x2 : 0x0);
942 return sprintf(buf, "%i\n", val);
945 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
947 if ((disable_radio ? 1 : 0) ==
948 (priv->status & STATUS_RF_KILL_SW ? 1 : 0))
951 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
952 disable_radio ? "OFF" : "ON");
955 priv->status |= STATUS_RF_KILL_SW;
957 if (priv->workqueue) {
958 cancel_delayed_work(&priv->request_scan);
960 wake_up_interruptible(&priv->wait_command_queue);
961 queue_work(priv->workqueue, &priv->down);
963 priv->status &= ~STATUS_RF_KILL_SW;
964 if (rf_kill_active(priv)) {
965 IPW_DEBUG_RF_KILL("Can not turn radio back on - "
966 "disabled by HW switch\n");
967 /* Make sure the RF_KILL check timer is running */
968 cancel_delayed_work(&priv->rf_kill);
969 queue_delayed_work(priv->workqueue, &priv->rf_kill,
972 queue_work(priv->workqueue, &priv->up);
978 static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
979 const char *buf, size_t count)
981 struct ipw_priv *priv = d->driver_data;
983 ipw_radio_kill_sw(priv, buf[0] == '1');
988 static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
990 static void ipw_irq_tasklet(struct ipw_priv *priv)
992 u32 inta, inta_mask, handled = 0;
996 spin_lock_irqsave(&priv->lock, flags);
998 inta = ipw_read32(priv, CX2_INTA_RW);
999 inta_mask = ipw_read32(priv, CX2_INTA_MASK_R);
1000 inta &= (CX2_INTA_MASK_ALL & inta_mask);
1002 /* Add any cached INTA values that need to be handled */
1003 inta |= priv->isr_inta;
1005 /* handle all the justifications for the interrupt */
1006 if (inta & CX2_INTA_BIT_RX_TRANSFER) {
1008 handled |= CX2_INTA_BIT_RX_TRANSFER;
1011 if (inta & CX2_INTA_BIT_TX_CMD_QUEUE) {
1012 IPW_DEBUG_HC("Command completed.\n");
1013 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
1014 priv->status &= ~STATUS_HCMD_ACTIVE;
1015 wake_up_interruptible(&priv->wait_command_queue);
1016 handled |= CX2_INTA_BIT_TX_CMD_QUEUE;
1019 if (inta & CX2_INTA_BIT_TX_QUEUE_1) {
1020 IPW_DEBUG_TX("TX_QUEUE_1\n");
1021 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
1022 handled |= CX2_INTA_BIT_TX_QUEUE_1;
1025 if (inta & CX2_INTA_BIT_TX_QUEUE_2) {
1026 IPW_DEBUG_TX("TX_QUEUE_2\n");
1027 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
1028 handled |= CX2_INTA_BIT_TX_QUEUE_2;
1031 if (inta & CX2_INTA_BIT_TX_QUEUE_3) {
1032 IPW_DEBUG_TX("TX_QUEUE_3\n");
1033 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
1034 handled |= CX2_INTA_BIT_TX_QUEUE_3;
1037 if (inta & CX2_INTA_BIT_TX_QUEUE_4) {
1038 IPW_DEBUG_TX("TX_QUEUE_4\n");
1039 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
1040 handled |= CX2_INTA_BIT_TX_QUEUE_4;
1043 if (inta & CX2_INTA_BIT_STATUS_CHANGE) {
1044 IPW_WARNING("STATUS_CHANGE\n");
1045 handled |= CX2_INTA_BIT_STATUS_CHANGE;
1048 if (inta & CX2_INTA_BIT_BEACON_PERIOD_EXPIRED) {
1049 IPW_WARNING("TX_PERIOD_EXPIRED\n");
1050 handled |= CX2_INTA_BIT_BEACON_PERIOD_EXPIRED;
1053 if (inta & CX2_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
1054 IPW_WARNING("HOST_CMD_DONE\n");
1055 handled |= CX2_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
1058 if (inta & CX2_INTA_BIT_FW_INITIALIZATION_DONE) {
1059 IPW_WARNING("FW_INITIALIZATION_DONE\n");
1060 handled |= CX2_INTA_BIT_FW_INITIALIZATION_DONE;
1063 if (inta & CX2_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
1064 IPW_WARNING("PHY_OFF_DONE\n");
1065 handled |= CX2_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
1068 if (inta & CX2_INTA_BIT_RF_KILL_DONE) {
1069 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
1070 priv->status |= STATUS_RF_KILL_HW;
1071 wake_up_interruptible(&priv->wait_command_queue);
1072 netif_carrier_off(priv->net_dev);
1073 netif_stop_queue(priv->net_dev);
1074 cancel_delayed_work(&priv->request_scan);
1075 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
1076 handled |= CX2_INTA_BIT_RF_KILL_DONE;
1079 if (inta & CX2_INTA_BIT_FATAL_ERROR) {
1080 IPW_ERROR("Firmware error detected. Restarting.\n");
1081 #ifdef CONFIG_IPW_DEBUG
1082 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
1083 ipw_dump_nic_error_log(priv);
1084 ipw_dump_nic_event_log(priv);
1087 queue_work(priv->workqueue, &priv->adapter_restart);
1088 handled |= CX2_INTA_BIT_FATAL_ERROR;
1091 if (inta & CX2_INTA_BIT_PARITY_ERROR) {
1092 IPW_ERROR("Parity error\n");
1093 handled |= CX2_INTA_BIT_PARITY_ERROR;
1096 if (handled != inta) {
1097 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
1100 /* enable all interrupts */
1101 ipw_enable_interrupts(priv);
1103 spin_unlock_irqrestore(&priv->lock, flags);
1106 #ifdef CONFIG_IPW_DEBUG
1107 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
1108 static char *get_cmd_string(u8 cmd)
1111 IPW_CMD(HOST_COMPLETE);
1112 IPW_CMD(POWER_DOWN);
1113 IPW_CMD(SYSTEM_CONFIG);
1114 IPW_CMD(MULTICAST_ADDRESS);
1116 IPW_CMD(ADAPTER_ADDRESS);
1118 IPW_CMD(RTS_THRESHOLD);
1119 IPW_CMD(FRAG_THRESHOLD);
1120 IPW_CMD(POWER_MODE);
1122 IPW_CMD(TGI_TX_KEY);
1123 IPW_CMD(SCAN_REQUEST);
1124 IPW_CMD(SCAN_REQUEST_EXT);
1126 IPW_CMD(SUPPORTED_RATES);
1127 IPW_CMD(SCAN_ABORT);
1129 IPW_CMD(QOS_PARAMETERS);
1130 IPW_CMD(DINO_CONFIG);
1131 IPW_CMD(RSN_CAPABILITIES);
1133 IPW_CMD(CARD_DISABLE);
1134 IPW_CMD(SEED_NUMBER);
1136 IPW_CMD(COUNTRY_INFO);
1137 IPW_CMD(AIRONET_INFO);
1138 IPW_CMD(AP_TX_POWER);
1140 IPW_CMD(CCX_VER_INFO);
1141 IPW_CMD(SET_CALIBRATION);
1142 IPW_CMD(SENSITIVITY_CALIB);
1143 IPW_CMD(RETRY_LIMIT);
1144 IPW_CMD(IPW_PRE_POWER_DOWN);
1145 IPW_CMD(VAP_BEACON_TEMPLATE);
1146 IPW_CMD(VAP_DTIM_PERIOD);
1147 IPW_CMD(EXT_SUPPORTED_RATES);
1148 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
1149 IPW_CMD(VAP_QUIET_INTERVALS);
1150 IPW_CMD(VAP_CHANNEL_SWITCH);
1151 IPW_CMD(VAP_MANDATORY_CHANNELS);
1152 IPW_CMD(VAP_CELL_PWR_LIMIT);
1153 IPW_CMD(VAP_CF_PARAM_SET);
1154 IPW_CMD(VAP_SET_BEACONING_STATE);
1155 IPW_CMD(MEASUREMENT);
1156 IPW_CMD(POWER_CAPABILITY);
1157 IPW_CMD(SUPPORTED_CHANNELS);
1158 IPW_CMD(TPC_REPORT);
1160 IPW_CMD(PRODUCTION_COMMAND);
1165 #endif /* CONFIG_IPW_DEBUG */
1167 #define HOST_COMPLETE_TIMEOUT HZ
1168 static int ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
1172 if (priv->status & STATUS_HCMD_ACTIVE) {
1173 IPW_ERROR("Already sending a command\n");
1177 priv->status |= STATUS_HCMD_ACTIVE;
1179 IPW_DEBUG_HC("Sending %s command (#%d), %d bytes\n",
1180 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len);
1181 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
1183 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, &cmd->param, cmd->len, 0);
1187 rc = wait_event_interruptible_timeout(priv->wait_command_queue,
1189 status & STATUS_HCMD_ACTIVE),
1190 HOST_COMPLETE_TIMEOUT);
1192 IPW_DEBUG_INFO("Command completion failed out after %dms.\n",
1193 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
1194 priv->status &= ~STATUS_HCMD_ACTIVE;
1197 if (priv->status & STATUS_RF_KILL_MASK) {
1198 IPW_DEBUG_INFO("Command aborted due to RF Kill Switch\n");
1205 static int ipw_send_host_complete(struct ipw_priv *priv)
1207 struct host_cmd cmd = {
1208 .cmd = IPW_CMD_HOST_COMPLETE,
1213 IPW_ERROR("Invalid args\n");
1217 if (ipw_send_cmd(priv, &cmd)) {
1218 IPW_ERROR("failed to send HOST_COMPLETE command\n");
1225 static int ipw_send_system_config(struct ipw_priv *priv,
1226 struct ipw_sys_config *config)
1228 struct host_cmd cmd = {
1229 .cmd = IPW_CMD_SYSTEM_CONFIG,
1230 .len = sizeof(*config)
1233 if (!priv || !config) {
1234 IPW_ERROR("Invalid args\n");
1238 memcpy(&cmd.param, config, sizeof(*config));
1239 if (ipw_send_cmd(priv, &cmd)) {
1240 IPW_ERROR("failed to send SYSTEM_CONFIG command\n");
1247 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
1249 struct host_cmd cmd = {
1250 .cmd = IPW_CMD_SSID,
1251 .len = min(len, IW_ESSID_MAX_SIZE)
1254 if (!priv || !ssid) {
1255 IPW_ERROR("Invalid args\n");
1259 memcpy(&cmd.param, ssid, cmd.len);
1260 if (ipw_send_cmd(priv, &cmd)) {
1261 IPW_ERROR("failed to send SSID command\n");
1268 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
1270 struct host_cmd cmd = {
1271 .cmd = IPW_CMD_ADAPTER_ADDRESS,
1275 if (!priv || !mac) {
1276 IPW_ERROR("Invalid args\n");
1280 IPW_DEBUG_INFO("%s: Setting MAC to " MAC_FMT "\n",
1281 priv->net_dev->name, MAC_ARG(mac));
1283 memcpy(&cmd.param, mac, ETH_ALEN);
1285 if (ipw_send_cmd(priv, &cmd)) {
1286 IPW_ERROR("failed to send ADAPTER_ADDRESS command\n");
1293 static void ipw_adapter_restart(void *adapter)
1295 struct ipw_priv *priv = adapter;
1297 if (priv->status & STATUS_RF_KILL_MASK)
1302 IPW_ERROR("Failed to up device\n");
1307 #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
1309 static void ipw_scan_check(void *data)
1311 struct ipw_priv *priv = data;
1312 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
1313 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
1314 "adapter (%dms).\n",
1315 IPW_SCAN_CHECK_WATCHDOG / 100);
1316 ipw_adapter_restart(priv);
1320 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
1321 struct ipw_scan_request_ext *request)
1323 struct host_cmd cmd = {
1324 .cmd = IPW_CMD_SCAN_REQUEST_EXT,
1325 .len = sizeof(*request)
1328 if (!priv || !request) {
1329 IPW_ERROR("Invalid args\n");
1333 memcpy(&cmd.param, request, sizeof(*request));
1334 if (ipw_send_cmd(priv, &cmd)) {
1335 IPW_ERROR("failed to send SCAN_REQUEST_EXT command\n");
1339 queue_delayed_work(priv->workqueue, &priv->scan_check,
1340 IPW_SCAN_CHECK_WATCHDOG);
1344 static int ipw_send_scan_abort(struct ipw_priv *priv)
1346 struct host_cmd cmd = {
1347 .cmd = IPW_CMD_SCAN_ABORT,
1352 IPW_ERROR("Invalid args\n");
1356 if (ipw_send_cmd(priv, &cmd)) {
1357 IPW_ERROR("failed to send SCAN_ABORT command\n");
1364 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
1366 struct host_cmd cmd = {
1367 .cmd = IPW_CMD_SENSITIVITY_CALIB,
1368 .len = sizeof(struct ipw_sensitivity_calib)
1370 struct ipw_sensitivity_calib *calib = (struct ipw_sensitivity_calib *)
1372 calib->beacon_rssi_raw = sens;
1373 if (ipw_send_cmd(priv, &cmd)) {
1374 IPW_ERROR("failed to send SENSITIVITY CALIB command\n");
1381 static int ipw_send_associate(struct ipw_priv *priv,
1382 struct ipw_associate *associate)
1384 struct host_cmd cmd = {
1385 .cmd = IPW_CMD_ASSOCIATE,
1386 .len = sizeof(*associate)
1389 if (!priv || !associate) {
1390 IPW_ERROR("Invalid args\n");
1394 memcpy(&cmd.param, associate, sizeof(*associate));
1395 if (ipw_send_cmd(priv, &cmd)) {
1396 IPW_ERROR("failed to send ASSOCIATE command\n");
1403 static int ipw_send_supported_rates(struct ipw_priv *priv,
1404 struct ipw_supported_rates *rates)
1406 struct host_cmd cmd = {
1407 .cmd = IPW_CMD_SUPPORTED_RATES,
1408 .len = sizeof(*rates)
1411 if (!priv || !rates) {
1412 IPW_ERROR("Invalid args\n");
1416 memcpy(&cmd.param, rates, sizeof(*rates));
1417 if (ipw_send_cmd(priv, &cmd)) {
1418 IPW_ERROR("failed to send SUPPORTED_RATES command\n");
1425 static int ipw_set_random_seed(struct ipw_priv *priv)
1427 struct host_cmd cmd = {
1428 .cmd = IPW_CMD_SEED_NUMBER,
1433 IPW_ERROR("Invalid args\n");
1437 get_random_bytes(&cmd.param, sizeof(u32));
1439 if (ipw_send_cmd(priv, &cmd)) {
1440 IPW_ERROR("failed to send SEED_NUMBER command\n");
1448 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
1450 struct host_cmd cmd = {
1451 .cmd = IPW_CMD_CARD_DISABLE,
1456 IPW_ERROR("Invalid args\n");
1460 *((u32 *) & cmd.param) = phy_off;
1462 if (ipw_send_cmd(priv, &cmd)) {
1463 IPW_ERROR("failed to send CARD_DISABLE command\n");
1471 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
1473 struct host_cmd cmd = {
1474 .cmd = IPW_CMD_TX_POWER,
1475 .len = sizeof(*power)
1478 if (!priv || !power) {
1479 IPW_ERROR("Invalid args\n");
1483 memcpy(&cmd.param, power, sizeof(*power));
1484 if (ipw_send_cmd(priv, &cmd)) {
1485 IPW_ERROR("failed to send TX_POWER command\n");
1492 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
1494 struct ipw_rts_threshold rts_threshold = {
1495 .rts_threshold = rts,
1497 struct host_cmd cmd = {
1498 .cmd = IPW_CMD_RTS_THRESHOLD,
1499 .len = sizeof(rts_threshold)
1503 IPW_ERROR("Invalid args\n");
1507 memcpy(&cmd.param, &rts_threshold, sizeof(rts_threshold));
1508 if (ipw_send_cmd(priv, &cmd)) {
1509 IPW_ERROR("failed to send RTS_THRESHOLD command\n");
1516 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
1518 struct ipw_frag_threshold frag_threshold = {
1519 .frag_threshold = frag,
1521 struct host_cmd cmd = {
1522 .cmd = IPW_CMD_FRAG_THRESHOLD,
1523 .len = sizeof(frag_threshold)
1527 IPW_ERROR("Invalid args\n");
1531 memcpy(&cmd.param, &frag_threshold, sizeof(frag_threshold));
1532 if (ipw_send_cmd(priv, &cmd)) {
1533 IPW_ERROR("failed to send FRAG_THRESHOLD command\n");
1540 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
1542 struct host_cmd cmd = {
1543 .cmd = IPW_CMD_POWER_MODE,
1546 u32 *param = (u32 *) (&cmd.param);
1549 IPW_ERROR("Invalid args\n");
1553 /* If on battery, set to 3, if AC set to CAM, else user
1556 case IPW_POWER_BATTERY:
1557 *param = IPW_POWER_INDEX_3;
1560 *param = IPW_POWER_MODE_CAM;
1567 if (ipw_send_cmd(priv, &cmd)) {
1568 IPW_ERROR("failed to send POWER_MODE command\n");
1576 * The IPW device contains a Microwire compatible EEPROM that stores
1577 * various data like the MAC address. Usually the firmware has exclusive
1578 * access to the eeprom, but during device initialization (before the
1579 * device driver has sent the HostComplete command to the firmware) the
1580 * device driver has read access to the EEPROM by way of indirect addressing
1581 * through a couple of memory mapped registers.
1583 * The following is a simplified implementation for pulling data out of the
1584 * the eeprom, along with some helper functions to find information in
1585 * the per device private data's copy of the eeprom.
1587 * NOTE: To better understand how these functions work (i.e what is a chip
1588 * select and why do have to keep driving the eeprom clock?), read
1589 * just about any data sheet for a Microwire compatible EEPROM.
1592 /* write a 32 bit value into the indirect accessor register */
1593 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
1595 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
1597 /* the eeprom requires some time to complete the operation */
1598 udelay(p->eeprom_delay);
1603 /* perform a chip select operation */
1604 static inline void eeprom_cs(struct ipw_priv *priv)
1606 eeprom_write_reg(priv, 0);
1607 eeprom_write_reg(priv, EEPROM_BIT_CS);
1608 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
1609 eeprom_write_reg(priv, EEPROM_BIT_CS);
1612 /* perform a chip select operation */
1613 static inline void eeprom_disable_cs(struct ipw_priv *priv)
1615 eeprom_write_reg(priv, EEPROM_BIT_CS);
1616 eeprom_write_reg(priv, 0);
1617 eeprom_write_reg(priv, EEPROM_BIT_SK);
1620 /* push a single bit down to the eeprom */
1621 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
1623 int d = (bit ? EEPROM_BIT_DI : 0);
1624 eeprom_write_reg(p, EEPROM_BIT_CS | d);
1625 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
1628 /* push an opcode followed by an address down to the eeprom */
1629 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
1634 eeprom_write_bit(priv, 1);
1635 eeprom_write_bit(priv, op & 2);
1636 eeprom_write_bit(priv, op & 1);
1637 for (i = 7; i >= 0; i--) {
1638 eeprom_write_bit(priv, addr & (1 << i));
1642 /* pull 16 bits off the eeprom, one bit at a time */
1643 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
1648 /* Send READ Opcode */
1649 eeprom_op(priv, EEPROM_CMD_READ, addr);
1651 /* Send dummy bit */
1652 eeprom_write_reg(priv, EEPROM_BIT_CS);
1654 /* Read the byte off the eeprom one bit at a time */
1655 for (i = 0; i < 16; i++) {
1657 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
1658 eeprom_write_reg(priv, EEPROM_BIT_CS);
1659 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
1660 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
1663 /* Send another dummy bit */
1664 eeprom_write_reg(priv, 0);
1665 eeprom_disable_cs(priv);
1670 /* helper function for pulling the mac address out of the private */
1671 /* data's copy of the eeprom data */
1672 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
1674 u8 *ee = (u8 *) priv->eeprom;
1675 memcpy(mac, &ee[EEPROM_MAC_ADDRESS], 6);
1679 * Either the device driver (i.e. the host) or the firmware can
1680 * load eeprom data into the designated region in SRAM. If neither
1681 * happens then the FW will shutdown with a fatal error.
1683 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
1684 * bit needs region of shared SRAM needs to be non-zero.
1686 static void ipw_eeprom_init_sram(struct ipw_priv *priv)
1689 u16 *eeprom = (u16 *) priv->eeprom;
1691 IPW_DEBUG_TRACE(">>\n");
1693 /* read entire contents of eeprom into private buffer */
1694 for (i = 0; i < 128; i++)
1695 eeprom[i] = eeprom_read_u16(priv, (u8) i);
1698 If the data looks correct, then copy it to our private
1699 copy. Otherwise let the firmware know to perform the operation
1702 if ((priv->eeprom + EEPROM_VERSION) != 0) {
1703 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
1705 /* write the eeprom data to sram */
1706 for (i = 0; i < CX2_EEPROM_IMAGE_SIZE; i++)
1707 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
1709 /* Do not load eeprom data on fatal error or suspend */
1710 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
1712 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
1714 /* Load eeprom data on fatal error or suspend */
1715 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
1718 IPW_DEBUG_TRACE("<<\n");
1721 static inline void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
1726 _ipw_write32(priv, CX2_AUTOINC_ADDR, start);
1728 _ipw_write32(priv, CX2_AUTOINC_DATA, 0);
1731 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
1733 ipw_zero_memory(priv, CX2_SHARED_SRAM_DMA_CONTROL,
1734 CB_NUMBER_OF_ELEMENTS_SMALL *
1735 sizeof(struct command_block));
1738 static int ipw_fw_dma_enable(struct ipw_priv *priv)
1739 { /* start dma engine but no transfers yet */
1741 IPW_DEBUG_FW(">> : \n");
1744 ipw_fw_dma_reset_command_blocks(priv);
1746 /* Write CB base address */
1747 ipw_write_reg32(priv, CX2_DMA_I_CB_BASE, CX2_SHARED_SRAM_DMA_CONTROL);
1749 IPW_DEBUG_FW("<< : \n");
1753 static void ipw_fw_dma_abort(struct ipw_priv *priv)
1757 IPW_DEBUG_FW(">> :\n");
1759 //set the Stop and Abort bit
1760 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
1761 ipw_write_reg32(priv, CX2_DMA_I_DMA_CONTROL, control);
1762 priv->sram_desc.last_cb_index = 0;
1764 IPW_DEBUG_FW("<< \n");
1767 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
1768 struct command_block *cb)
1771 CX2_SHARED_SRAM_DMA_CONTROL +
1772 (sizeof(struct command_block) * index);
1773 IPW_DEBUG_FW(">> :\n");
1775 ipw_write_indirect(priv, address, (u8 *) cb,
1776 (int)sizeof(struct command_block));
1778 IPW_DEBUG_FW("<< :\n");
1783 static int ipw_fw_dma_kick(struct ipw_priv *priv)
1788 IPW_DEBUG_FW(">> :\n");
1790 for (index = 0; index < priv->sram_desc.last_cb_index; index++)
1791 ipw_fw_dma_write_command_block(priv, index,
1792 &priv->sram_desc.cb_list[index]);
1794 /* Enable the DMA in the CSR register */
1795 ipw_clear_bit(priv, CX2_RESET_REG,
1796 CX2_RESET_REG_MASTER_DISABLED |
1797 CX2_RESET_REG_STOP_MASTER);
1799 /* Set the Start bit. */
1800 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
1801 ipw_write_reg32(priv, CX2_DMA_I_DMA_CONTROL, control);
1803 IPW_DEBUG_FW("<< :\n");
1807 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
1810 u32 register_value = 0;
1811 u32 cb_fields_address = 0;
1813 IPW_DEBUG_FW(">> :\n");
1814 address = ipw_read_reg32(priv, CX2_DMA_I_CURRENT_CB);
1815 IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address);
1817 /* Read the DMA Controlor register */
1818 register_value = ipw_read_reg32(priv, CX2_DMA_I_DMA_CONTROL);
1819 IPW_DEBUG_FW_INFO("CX2_DMA_I_DMA_CONTROL is 0x%x \n", register_value);
1821 /* Print the CB values */
1822 cb_fields_address = address;
1823 register_value = ipw_read_reg32(priv, cb_fields_address);
1824 IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value);
1826 cb_fields_address += sizeof(u32);
1827 register_value = ipw_read_reg32(priv, cb_fields_address);
1828 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value);
1830 cb_fields_address += sizeof(u32);
1831 register_value = ipw_read_reg32(priv, cb_fields_address);
1832 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n",
1835 cb_fields_address += sizeof(u32);
1836 register_value = ipw_read_reg32(priv, cb_fields_address);
1837 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value);
1839 IPW_DEBUG_FW(">> :\n");
1842 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
1844 u32 current_cb_address = 0;
1845 u32 current_cb_index = 0;
1847 IPW_DEBUG_FW("<< :\n");
1848 current_cb_address = ipw_read_reg32(priv, CX2_DMA_I_CURRENT_CB);
1850 current_cb_index = (current_cb_address - CX2_SHARED_SRAM_DMA_CONTROL) /
1851 sizeof(struct command_block);
1853 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
1854 current_cb_index, current_cb_address);
1856 IPW_DEBUG_FW(">> :\n");
1857 return current_cb_index;
1861 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
1865 int interrupt_enabled, int is_last)
1868 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
1869 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
1871 struct command_block *cb;
1872 u32 last_cb_element = 0;
1874 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
1875 src_address, dest_address, length);
1877 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
1880 last_cb_element = priv->sram_desc.last_cb_index;
1881 cb = &priv->sram_desc.cb_list[last_cb_element];
1882 priv->sram_desc.last_cb_index++;
1884 /* Calculate the new CB control word */
1885 if (interrupt_enabled)
1886 control |= CB_INT_ENABLED;
1889 control |= CB_LAST_VALID;
1893 /* Calculate the CB Element's checksum value */
1894 cb->status = control ^ src_address ^ dest_address;
1896 /* Copy the Source and Destination addresses */
1897 cb->dest_addr = dest_address;
1898 cb->source_addr = src_address;
1900 /* Copy the Control Word last */
1901 cb->control = control;
1906 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
1907 u32 src_phys, u32 dest_address, u32 length)
1909 u32 bytes_left = length;
1911 u32 dest_offset = 0;
1913 IPW_DEBUG_FW(">> \n");
1914 IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n",
1915 src_phys, dest_address, length);
1916 while (bytes_left > CB_MAX_LENGTH) {
1917 status = ipw_fw_dma_add_command_block(priv,
1918 src_phys + src_offset,
1921 CB_MAX_LENGTH, 0, 0);
1923 IPW_DEBUG_FW_INFO(": Failed\n");
1926 IPW_DEBUG_FW_INFO(": Added new cb\n");
1928 src_offset += CB_MAX_LENGTH;
1929 dest_offset += CB_MAX_LENGTH;
1930 bytes_left -= CB_MAX_LENGTH;
1933 /* add the buffer tail */
1934 if (bytes_left > 0) {
1936 ipw_fw_dma_add_command_block(priv, src_phys + src_offset,
1937 dest_address + dest_offset,
1940 IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
1944 (": Adding new cb - the buffer tail\n");
1947 IPW_DEBUG_FW("<< \n");
1951 static int ipw_fw_dma_wait(struct ipw_priv *priv)
1953 u32 current_index = 0;
1956 IPW_DEBUG_FW(">> : \n");
1958 current_index = ipw_fw_dma_command_block_index(priv);
1959 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%8X\n",
1960 (int)priv->sram_desc.last_cb_index);
1962 while (current_index < priv->sram_desc.last_cb_index) {
1964 current_index = ipw_fw_dma_command_block_index(priv);
1968 if (watchdog > 400) {
1969 IPW_DEBUG_FW_INFO("Timeout\n");
1970 ipw_fw_dma_dump_command_block(priv);
1971 ipw_fw_dma_abort(priv);
1976 ipw_fw_dma_abort(priv);
1978 /*Disable the DMA in the CSR register */
1979 ipw_set_bit(priv, CX2_RESET_REG,
1980 CX2_RESET_REG_MASTER_DISABLED | CX2_RESET_REG_STOP_MASTER);
1982 IPW_DEBUG_FW("<< dmaWaitSync \n");
1986 static void ipw_remove_current_network(struct ipw_priv *priv)
1988 struct list_head *element, *safe;
1989 struct ieee80211_network *network = NULL;
1990 list_for_each_safe(element, safe, &priv->ieee->network_list) {
1991 network = list_entry(element, struct ieee80211_network, list);
1992 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
1994 list_add_tail(&network->list,
1995 &priv->ieee->network_free_list);
2001 * Check that card is still alive.
2002 * Reads debug register from domain0.
2003 * If card is present, pre-defined value should
2007 * @return 1 if card is present, 0 otherwise
2009 static inline int ipw_alive(struct ipw_priv *priv)
2011 return ipw_read32(priv, 0x90) == 0xd55555d5;
2014 static inline int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
2020 if ((ipw_read32(priv, addr) & mask) == mask)
2024 } while (i < timeout);
2029 /* These functions load the firmware and micro code for the operation of
2030 * the ipw hardware. It assumes the buffer has all the bits for the
2031 * image and the caller is handling the memory allocation and clean up.
2034 static int ipw_stop_master(struct ipw_priv *priv)
2038 IPW_DEBUG_TRACE(">> \n");
2039 /* stop master. typical delay - 0 */
2040 ipw_set_bit(priv, CX2_RESET_REG, CX2_RESET_REG_STOP_MASTER);
2042 rc = ipw_poll_bit(priv, CX2_RESET_REG,
2043 CX2_RESET_REG_MASTER_DISABLED, 100);
2045 IPW_ERROR("stop master failed in 10ms\n");
2049 IPW_DEBUG_INFO("stop master %dms\n", rc);
2054 static void ipw_arc_release(struct ipw_priv *priv)
2056 IPW_DEBUG_TRACE(">> \n");
2059 ipw_clear_bit(priv, CX2_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
2061 /* no one knows timing, for safety add some delay */
2075 #define IPW_FW_MAJOR_VERSION 2
2076 #define IPW_FW_MINOR_VERSION 2
2078 #define IPW_FW_MINOR(x) ((x & 0xff) >> 8)
2079 #define IPW_FW_MAJOR(x) (x & 0xff)
2081 #define IPW_FW_VERSION ((IPW_FW_MINOR_VERSION << 8) | \
2082 IPW_FW_MAJOR_VERSION)
2084 #define IPW_FW_PREFIX "ipw-" __stringify(IPW_FW_MAJOR_VERSION) \
2085 "." __stringify(IPW_FW_MINOR_VERSION) "-"
2087 #if IPW_FW_MAJOR_VERSION >= 2 && IPW_FW_MINOR_VERSION > 0
2088 #define IPW_FW_NAME(x) IPW_FW_PREFIX "" x ".fw"
2090 #define IPW_FW_NAME(x) "ipw2200_" x ".fw"
2093 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
2095 int rc = 0, i, addr;
2099 image = (u16 *) data;
2101 IPW_DEBUG_TRACE(">> \n");
2103 rc = ipw_stop_master(priv);
2108 // spin_lock_irqsave(&priv->lock, flags);
2110 for (addr = CX2_SHARED_LOWER_BOUND;
2111 addr < CX2_REGISTER_DOMAIN1_END; addr += 4) {
2112 ipw_write32(priv, addr, 0);
2115 /* no ucode (yet) */
2116 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
2117 /* destroy DMA queues */
2118 /* reset sequence */
2120 ipw_write_reg32(priv, CX2_MEM_HALT_AND_RESET, CX2_BIT_HALT_RESET_ON);
2121 ipw_arc_release(priv);
2122 ipw_write_reg32(priv, CX2_MEM_HALT_AND_RESET, CX2_BIT_HALT_RESET_OFF);
2126 ipw_write_reg32(priv, CX2_INTERNAL_CMD_EVENT, CX2_BASEBAND_POWER_DOWN);
2129 ipw_write_reg32(priv, CX2_INTERNAL_CMD_EVENT, 0);
2132 /* enable ucode store */
2133 ipw_write_reg8(priv, DINO_CONTROL_REG, 0x0);
2134 ipw_write_reg8(priv, DINO_CONTROL_REG, DINO_ENABLE_CS);
2140 * Do NOT set indirect address register once and then
2141 * store data to indirect data register in the loop.
2142 * It seems very reasonable, but in this case DINO do not
2143 * accept ucode. It is essential to set address each time.
2145 /* load new ipw uCode */
2146 for (i = 0; i < len / 2; i++)
2147 ipw_write_reg16(priv, CX2_BASEBAND_CONTROL_STORE, image[i]);
2150 ipw_write_reg8(priv, CX2_BASEBAND_CONTROL_STATUS, 0);
2151 ipw_write_reg8(priv, CX2_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
2153 /* this is where the igx / win driver deveates from the VAP driver. */
2155 /* wait for alive response */
2156 for (i = 0; i < 100; i++) {
2157 /* poll for incoming data */
2158 cr = ipw_read_reg8(priv, CX2_BASEBAND_CONTROL_STATUS);
2159 if (cr & DINO_RXFIFO_DATA)
2164 if (cr & DINO_RXFIFO_DATA) {
2165 /* alive_command_responce size is NOT multiple of 4 */
2166 u32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
2168 for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
2169 response_buffer[i] =
2170 ipw_read_reg32(priv, CX2_BASEBAND_RX_FIFO_READ);
2171 memcpy(&priv->dino_alive, response_buffer,
2172 sizeof(priv->dino_alive));
2173 if (priv->dino_alive.alive_command == 1
2174 && priv->dino_alive.ucode_valid == 1) {
2177 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
2178 "of %02d/%02d/%02d %02d:%02d\n",
2179 priv->dino_alive.software_revision,
2180 priv->dino_alive.software_revision,
2181 priv->dino_alive.device_identifier,
2182 priv->dino_alive.device_identifier,
2183 priv->dino_alive.time_stamp[0],
2184 priv->dino_alive.time_stamp[1],
2185 priv->dino_alive.time_stamp[2],
2186 priv->dino_alive.time_stamp[3],
2187 priv->dino_alive.time_stamp[4]);
2189 IPW_DEBUG_INFO("Microcode is not alive\n");
2193 IPW_DEBUG_INFO("No alive response from DINO\n");
2197 /* disable DINO, otherwise for some reason
2198 firmware have problem getting alive resp. */
2199 ipw_write_reg8(priv, CX2_BASEBAND_CONTROL_STATUS, 0);
2201 // spin_unlock_irqrestore(&priv->lock, flags);
2206 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
2210 struct fw_chunk *chunk;
2211 dma_addr_t shared_phys;
2214 IPW_DEBUG_TRACE("<< : \n");
2215 shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
2220 memmove(shared_virt, data, len);
2223 rc = ipw_fw_dma_enable(priv);
2225 if (priv->sram_desc.last_cb_index > 0) {
2226 /* the DMA is already ready this would be a bug. */
2232 chunk = (struct fw_chunk *)(data + offset);
2233 offset += sizeof(struct fw_chunk);
2234 /* build DMA packet and queue up for sending */
2235 /* dma to chunk->address, the chunk->length bytes from data +
2238 rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset,
2239 chunk->address, chunk->length);
2241 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
2245 offset += chunk->length;
2246 } while (offset < len);
2248 /* Run the DMA and wait for the answer */
2249 rc = ipw_fw_dma_kick(priv);
2251 IPW_ERROR("dmaKick Failed\n");
2255 rc = ipw_fw_dma_wait(priv);
2257 IPW_ERROR("dmaWaitSync Failed\n");
2261 pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys);
2266 static int ipw_stop_nic(struct ipw_priv *priv)
2271 ipw_write32(priv, CX2_RESET_REG, CX2_RESET_REG_STOP_MASTER);
2273 rc = ipw_poll_bit(priv, CX2_RESET_REG,
2274 CX2_RESET_REG_MASTER_DISABLED, 500);
2276 IPW_ERROR("wait for reg master disabled failed\n");
2280 ipw_set_bit(priv, CX2_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
2285 static void ipw_start_nic(struct ipw_priv *priv)
2287 IPW_DEBUG_TRACE(">>\n");
2289 /* prvHwStartNic release ARC */
2290 ipw_clear_bit(priv, CX2_RESET_REG,
2291 CX2_RESET_REG_MASTER_DISABLED |
2292 CX2_RESET_REG_STOP_MASTER |
2293 CBD_RESET_REG_PRINCETON_RESET);
2295 /* enable power management */
2296 ipw_set_bit(priv, CX2_GP_CNTRL_RW,
2297 CX2_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
2299 IPW_DEBUG_TRACE("<<\n");
2302 static int ipw_init_nic(struct ipw_priv *priv)
2306 IPW_DEBUG_TRACE(">>\n");
2309 /* set "initialization complete" bit to move adapter to D0 state */
2310 ipw_set_bit(priv, CX2_GP_CNTRL_RW, CX2_GP_CNTRL_BIT_INIT_DONE);
2312 /* low-level PLL activation */
2313 ipw_write32(priv, CX2_READ_INT_REGISTER,
2314 CX2_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
2316 /* wait for clock stabilization */
2317 rc = ipw_poll_bit(priv, CX2_GP_CNTRL_RW,
2318 CX2_GP_CNTRL_BIT_CLOCK_READY, 250);
2320 IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
2322 /* assert SW reset */
2323 ipw_set_bit(priv, CX2_RESET_REG, CX2_RESET_REG_SW_RESET);
2327 /* set "initialization complete" bit to move adapter to D0 state */
2328 ipw_set_bit(priv, CX2_GP_CNTRL_RW, CX2_GP_CNTRL_BIT_INIT_DONE);
2330 IPW_DEBUG_TRACE(">>\n");
2334 /* Call this function from process context, it will sleep in request_firmware.
2335 * Probe is an ok place to call this from.
2337 static int ipw_reset_nic(struct ipw_priv *priv)
2341 IPW_DEBUG_TRACE(">>\n");
2343 rc = ipw_init_nic(priv);
2345 /* Clear the 'host command active' bit... */
2346 priv->status &= ~STATUS_HCMD_ACTIVE;
2347 wake_up_interruptible(&priv->wait_command_queue);
2349 IPW_DEBUG_TRACE("<<\n");
2353 static int ipw_get_fw(struct ipw_priv *priv,
2354 const struct firmware **fw, const char *name)
2356 struct fw_header *header;
2359 /* ask firmware_class module to get the boot firmware off disk */
2360 rc = request_firmware(fw, name, &priv->pci_dev->dev);
2362 IPW_ERROR("%s load failed: Reason %d\n", name, rc);
2366 header = (struct fw_header *)(*fw)->data;
2367 if (IPW_FW_MAJOR(header->version) != IPW_FW_MAJOR_VERSION) {
2368 IPW_ERROR("'%s' firmware version not compatible (%d != %d)\n",
2370 IPW_FW_MAJOR(header->version), IPW_FW_MAJOR_VERSION);
2374 IPW_DEBUG_INFO("Loading firmware '%s' file v%d.%d (%zd bytes)\n",
2376 IPW_FW_MAJOR(header->version),
2377 IPW_FW_MINOR(header->version),
2378 (*fw)->size - sizeof(struct fw_header));
2382 #define CX2_RX_BUF_SIZE (3000)
2384 static inline void ipw_rx_queue_reset(struct ipw_priv *priv,
2385 struct ipw_rx_queue *rxq)
2387 unsigned long flags;
2390 spin_lock_irqsave(&rxq->lock, flags);
2392 INIT_LIST_HEAD(&rxq->rx_free);
2393 INIT_LIST_HEAD(&rxq->rx_used);
2395 /* Fill the rx_used queue with _all_ of the Rx buffers */
2396 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
2397 /* In the reset function, these buffers may have been allocated
2398 * to an SKB, so we need to unmap and free potential storage */
2399 if (rxq->pool[i].skb != NULL) {
2400 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
2401 CX2_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
2402 dev_kfree_skb(rxq->pool[i].skb);
2404 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
2407 /* Set us so that we have processed and used all buffers, but have
2408 * not restocked the Rx queue with fresh buffers */
2409 rxq->read = rxq->write = 0;
2410 rxq->processed = RX_QUEUE_SIZE - 1;
2411 rxq->free_count = 0;
2412 spin_unlock_irqrestore(&rxq->lock, flags);
2416 static int fw_loaded = 0;
2417 static const struct firmware *bootfw = NULL;
2418 static const struct firmware *firmware = NULL;
2419 static const struct firmware *ucode = NULL;
2422 static int ipw_load(struct ipw_priv *priv)
2425 const struct firmware *bootfw = NULL;
2426 const struct firmware *firmware = NULL;
2427 const struct firmware *ucode = NULL;
2429 int rc = 0, retries = 3;
2434 rc = ipw_get_fw(priv, &bootfw, IPW_FW_NAME("boot"));
2438 switch (priv->ieee->iw_mode) {
2440 rc = ipw_get_fw(priv, &ucode,
2441 IPW_FW_NAME("ibss_ucode"));
2445 rc = ipw_get_fw(priv, &firmware, IPW_FW_NAME("ibss"));
2448 #ifdef CONFIG_IPW_PROMISC
2449 case IW_MODE_MONITOR:
2450 rc = ipw_get_fw(priv, &ucode,
2451 IPW_FW_NAME("ibss_ucode"));
2455 rc = ipw_get_fw(priv, &firmware,
2456 IPW_FW_NAME("sniffer"));
2460 rc = ipw_get_fw(priv, &ucode, IPW_FW_NAME("bss_ucode"));
2464 rc = ipw_get_fw(priv, &firmware, IPW_FW_NAME("bss"));
2480 priv->rxq = ipw_rx_queue_alloc(priv);
2482 ipw_rx_queue_reset(priv, priv->rxq);
2484 IPW_ERROR("Unable to initialize Rx queue\n");
2489 /* Ensure interrupts are disabled */
2490 ipw_write32(priv, CX2_INTA_MASK_R, ~CX2_INTA_MASK_ALL);
2491 priv->status &= ~STATUS_INT_ENABLED;
2493 /* ack pending interrupts */
2494 ipw_write32(priv, CX2_INTA_RW, CX2_INTA_MASK_ALL);
2498 rc = ipw_reset_nic(priv);
2500 IPW_ERROR("Unable to reset NIC\n");
2504 ipw_zero_memory(priv, CX2_NIC_SRAM_LOWER_BOUND,
2505 CX2_NIC_SRAM_UPPER_BOUND - CX2_NIC_SRAM_LOWER_BOUND);
2507 /* DMA the initial boot firmware into the device */
2508 rc = ipw_load_firmware(priv, bootfw->data + sizeof(struct fw_header),
2509 bootfw->size - sizeof(struct fw_header));
2511 IPW_ERROR("Unable to load boot firmware\n");
2515 /* kick start the device */
2516 ipw_start_nic(priv);
2518 /* wait for the device to finish it's initial startup sequence */
2519 rc = ipw_poll_bit(priv, CX2_INTA_RW,
2520 CX2_INTA_BIT_FW_INITIALIZATION_DONE, 500);
2522 IPW_ERROR("device failed to boot initial fw image\n");
2525 IPW_DEBUG_INFO("initial device response after %dms\n", rc);
2527 /* ack fw init done interrupt */
2528 ipw_write32(priv, CX2_INTA_RW, CX2_INTA_BIT_FW_INITIALIZATION_DONE);
2530 /* DMA the ucode into the device */
2531 rc = ipw_load_ucode(priv, ucode->data + sizeof(struct fw_header),
2532 ucode->size - sizeof(struct fw_header));
2534 IPW_ERROR("Unable to load ucode\n");
2541 /* DMA bss firmware into the device */
2542 rc = ipw_load_firmware(priv, firmware->data +
2543 sizeof(struct fw_header),
2544 firmware->size - sizeof(struct fw_header));
2546 IPW_ERROR("Unable to load firmware\n");
2550 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2552 rc = ipw_queue_reset(priv);
2554 IPW_ERROR("Unable to initialize queues\n");
2558 /* Ensure interrupts are disabled */
2559 ipw_write32(priv, CX2_INTA_MASK_R, ~CX2_INTA_MASK_ALL);
2561 /* kick start the device */
2562 ipw_start_nic(priv);
2564 if (ipw_read32(priv, CX2_INTA_RW) & CX2_INTA_BIT_PARITY_ERROR) {
2566 IPW_WARNING("Parity error. Retrying init.\n");
2571 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
2576 /* wait for the device */
2577 rc = ipw_poll_bit(priv, CX2_INTA_RW,
2578 CX2_INTA_BIT_FW_INITIALIZATION_DONE, 500);
2580 IPW_ERROR("device failed to start after 500ms\n");
2583 IPW_DEBUG_INFO("device response after %dms\n", rc);
2585 /* ack fw init done interrupt */
2586 ipw_write32(priv, CX2_INTA_RW, CX2_INTA_BIT_FW_INITIALIZATION_DONE);
2588 /* read eeprom data and initialize the eeprom region of sram */
2589 priv->eeprom_delay = 1;
2590 ipw_eeprom_init_sram(priv);
2592 /* enable interrupts */
2593 ipw_enable_interrupts(priv);
2595 /* Ensure our queue has valid packets */
2596 ipw_rx_queue_replenish(priv);
2598 ipw_write32(priv, CX2_RX_READ_INDEX, priv->rxq->read);
2600 /* ack pending interrupts */
2601 ipw_write32(priv, CX2_INTA_RW, CX2_INTA_MASK_ALL);
2604 release_firmware(bootfw);
2605 release_firmware(ucode);
2606 release_firmware(firmware);
2612 ipw_rx_queue_free(priv, priv->rxq);
2615 ipw_tx_queue_free(priv);
2617 release_firmware(bootfw);
2619 release_firmware(ucode);
2621 release_firmware(firmware);
2624 bootfw = ucode = firmware = NULL;
2633 * Theory of operation
2635 * A queue is a circular buffers with 'Read' and 'Write' pointers.
2636 * 2 empty entries always kept in the buffer to protect from overflow.
2638 * For Tx queue, there are low mark and high mark limits. If, after queuing
2639 * the packet for Tx, free space become < low mark, Tx queue stopped. When
2640 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
2643 * The IPW operates with six queues, one receive queue in the device's
2644 * sram, one transmit queue for sending commands to the device firmware,
2645 * and four transmit queues for data.
2647 * The four transmit queues allow for performing quality of service (qos)
2648 * transmissions as per the 802.11 protocol. Currently Linux does not
2649 * provide a mechanism to the user for utilizing prioritized queues, so
2650 * we only utilize the first data transmit queue (queue1).
2654 * Driver allocates buffers of this size for Rx
2657 static inline int ipw_queue_space(const struct clx2_queue *q)
2659 int s = q->last_used - q->first_empty;
2662 s -= 2; /* keep some reserve to not confuse empty and full situations */
2668 static inline int ipw_queue_inc_wrap(int index, int n_bd)
2670 return (++index == n_bd) ? 0 : index;
2674 * Initialize common DMA queue structure
2676 * @param q queue to init
2677 * @param count Number of BD's to allocate. Should be power of 2
2678 * @param read_register Address for 'read' register
2679 * (not offset within BAR, full address)
2680 * @param write_register Address for 'write' register
2681 * (not offset within BAR, full address)
2682 * @param base_register Address for 'base' register
2683 * (not offset within BAR, full address)
2684 * @param size Address for 'size' register
2685 * (not offset within BAR, full address)
2687 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
2688 int count, u32 read, u32 write, u32 base, u32 size)
2692 q->low_mark = q->n_bd / 4;
2693 if (q->low_mark < 4)
2696 q->high_mark = q->n_bd / 8;
2697 if (q->high_mark < 2)
2700 q->first_empty = q->last_used = 0;
2704 ipw_write32(priv, base, q->dma_addr);
2705 ipw_write32(priv, size, count);
2706 ipw_write32(priv, read, 0);
2707 ipw_write32(priv, write, 0);
2709 _ipw_read32(priv, 0x90);
2712 static int ipw_queue_tx_init(struct ipw_priv *priv,
2713 struct clx2_tx_queue *q,
2714 int count, u32 read, u32 write, u32 base, u32 size)
2716 struct pci_dev *dev = priv->pci_dev;
2718 q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
2720 IPW_ERROR("vmalloc for auxilary BD structures failed\n");
2725 pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
2727 IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
2728 sizeof(q->bd[0]) * count);
2734 ipw_queue_init(priv, &q->q, count, read, write, base, size);
2739 * Free one TFD, those at index [txq->q.last_used].
2740 * Do NOT advance any indexes
2745 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
2746 struct clx2_tx_queue *txq)
2748 struct tfd_frame *bd = &txq->bd[txq->q.last_used];
2749 struct pci_dev *dev = priv->pci_dev;
2753 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
2754 /* nothing to cleanup after for host commands */
2758 if (bd->u.data.num_chunks > NUM_TFD_CHUNKS) {
2759 IPW_ERROR("Too many chunks: %i\n", bd->u.data.num_chunks);
2760 /** @todo issue fatal error, it is quite serious situation */
2764 /* unmap chunks if any */
2765 for (i = 0; i < bd->u.data.num_chunks; i++) {
2766 pci_unmap_single(dev, bd->u.data.chunk_ptr[i],
2767 bd->u.data.chunk_len[i], PCI_DMA_TODEVICE);
2768 if (txq->txb[txq->q.last_used]) {
2769 ieee80211_txb_free(txq->txb[txq->q.last_used]);
2770 txq->txb[txq->q.last_used] = NULL;
2776 * Deallocate DMA queue.
2778 * Empty queue by removing and destroying all BD's.
2784 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
2786 struct clx2_queue *q = &txq->q;
2787 struct pci_dev *dev = priv->pci_dev;
2792 /* first, empty all BD's */
2793 for (; q->first_empty != q->last_used;
2794 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
2795 ipw_queue_tx_free_tfd(priv, txq);
2798 /* free buffers belonging to queue itself */
2799 pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
2803 /* 0 fill whole structure */
2804 memset(txq, 0, sizeof(*txq));
2808 * Destroy all DMA queues and structures
2812 static void ipw_tx_queue_free(struct ipw_priv *priv)
2815 ipw_queue_tx_free(priv, &priv->txq_cmd);
2818 ipw_queue_tx_free(priv, &priv->txq[0]);
2819 ipw_queue_tx_free(priv, &priv->txq[1]);
2820 ipw_queue_tx_free(priv, &priv->txq[2]);
2821 ipw_queue_tx_free(priv, &priv->txq[3]);
2824 static void inline __maybe_wake_tx(struct ipw_priv *priv)
2826 if (netif_running(priv->net_dev)) {
2827 switch (priv->port_type) {
2828 case DCR_TYPE_MU_BSS:
2829 case DCR_TYPE_MU_IBSS:
2830 if (!(priv->status & STATUS_ASSOCIATED)) {
2834 netif_wake_queue(priv->net_dev);
2839 static inline void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
2841 /* First 3 bytes are manufacturer */
2842 bssid[0] = priv->mac_addr[0];
2843 bssid[1] = priv->mac_addr[1];
2844 bssid[2] = priv->mac_addr[2];
2846 /* Last bytes are random */
2847 get_random_bytes(&bssid[3], ETH_ALEN - 3);
2849 bssid[0] &= 0xfe; /* clear multicast bit */
2850 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
2853 static inline u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
2855 struct ipw_station_entry entry;
2858 for (i = 0; i < priv->num_stations; i++) {
2859 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
2860 /* Another node is active in network */
2861 priv->missed_adhoc_beacons = 0;
2862 if (!(priv->config & CFG_STATIC_CHANNEL))
2863 /* when other nodes drop out, we drop out */
2864 priv->config &= ~CFG_ADHOC_PERSIST;
2870 if (i == MAX_STATIONS)
2871 return IPW_INVALID_STATION;
2873 IPW_DEBUG_SCAN("Adding AdHoc station: " MAC_FMT "\n", MAC_ARG(bssid));
2876 entry.support_mode = 0;
2877 memcpy(entry.mac_addr, bssid, ETH_ALEN);
2878 memcpy(priv->stations[i], bssid, ETH_ALEN);
2879 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
2880 &entry, sizeof(entry));
2881 priv->num_stations++;
2886 static inline u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
2890 for (i = 0; i < priv->num_stations; i++)
2891 if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
2894 return IPW_INVALID_STATION;
2897 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
2901 if (!(priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED))) {
2902 IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
2906 IPW_DEBUG_ASSOC("Disassocation attempt from " MAC_FMT " "
2908 MAC_ARG(priv->assoc_request.bssid),
2909 priv->assoc_request.channel);
2911 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
2912 priv->status |= STATUS_DISASSOCIATING;
2915 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
2917 priv->assoc_request.assoc_type = HC_DISASSOCIATE;
2918 err = ipw_send_associate(priv, &priv->assoc_request);
2920 IPW_DEBUG_HC("Attempt to send [dis]associate command "
2927 static void ipw_disassociate(void *data)
2929 ipw_send_disassociate(data, 0);
2932 static void notify_wx_assoc_event(struct ipw_priv *priv)
2934 union iwreq_data wrqu;
2935 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
2936 if (priv->status & STATUS_ASSOCIATED)
2937 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
2939 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
2940 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
2943 struct ipw_status_code {
2948 static const struct ipw_status_code ipw_status_codes[] = {
2949 {0x00, "Successful"},
2950 {0x01, "Unspecified failure"},
2951 {0x0A, "Cannot support all requested capabilities in the "
2952 "Capability information field"},
2953 {0x0B, "Reassociation denied due to inability to confirm that "
2954 "association exists"},
2955 {0x0C, "Association denied due to reason outside the scope of this "
2958 "Responding station does not support the specified authentication "
2961 "Received an Authentication frame with authentication sequence "
2962 "transaction sequence number out of expected sequence"},
2963 {0x0F, "Authentication rejected because of challenge failure"},
2964 {0x10, "Authentication rejected due to timeout waiting for next "
2965 "frame in sequence"},
2966 {0x11, "Association denied because AP is unable to handle additional "
2967 "associated stations"},
2969 "Association denied due to requesting station not supporting all "
2970 "of the datarates in the BSSBasicServiceSet Parameter"},
2972 "Association denied due to requesting station not supporting "
2973 "short preamble operation"},
2975 "Association denied due to requesting station not supporting "
2978 "Association denied due to requesting station not supporting "
2981 "Association denied due to requesting station not supporting "
2982 "short slot operation"},
2984 "Association denied due to requesting station not supporting "
2985 "DSSS-OFDM operation"},
2986 {0x28, "Invalid Information Element"},
2987 {0x29, "Group Cipher is not valid"},
2988 {0x2A, "Pairwise Cipher is not valid"},
2989 {0x2B, "AKMP is not valid"},
2990 {0x2C, "Unsupported RSN IE version"},
2991 {0x2D, "Invalid RSN IE Capabilities"},
2992 {0x2E, "Cipher suite is rejected per security policy"},
2995 #ifdef CONFIG_IPW_DEBUG
2996 static const char *ipw_get_status_code(u16 status)
2999 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
3000 if (ipw_status_codes[i].status == status)
3001 return ipw_status_codes[i].reason;
3002 return "Unknown status value.";
3006 static void inline average_init(struct average *avg)
3008 memset(avg, 0, sizeof(*avg));
3011 static void inline average_add(struct average *avg, s16 val)
3013 avg->sum -= avg->entries[avg->pos];
3015 avg->entries[avg->pos++] = val;
3016 if (unlikely(avg->pos == AVG_ENTRIES)) {
3022 static s16 inline average_value(struct average *avg)
3024 if (!unlikely(avg->init)) {
3026 return avg->sum / avg->pos;
3030 return avg->sum / AVG_ENTRIES;
3033 static void ipw_reset_stats(struct ipw_priv *priv)
3035 u32 len = sizeof(u32);
3039 average_init(&priv->average_missed_beacons);
3040 average_init(&priv->average_rssi);
3041 average_init(&priv->average_noise);
3043 priv->last_rate = 0;
3044 priv->last_missed_beacons = 0;
3045 priv->last_rx_packets = 0;
3046 priv->last_tx_packets = 0;
3047 priv->last_tx_failures = 0;
3049 /* Firmware managed, reset only when NIC is restarted, so we have to
3050 * normalize on the current value */
3051 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
3052 &priv->last_rx_err, &len);
3053 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
3054 &priv->last_tx_failures, &len);
3056 /* Driver managed, reset with each association */
3057 priv->missed_adhoc_beacons = 0;
3058 priv->missed_beacons = 0;
3059 priv->tx_packets = 0;
3060 priv->rx_packets = 0;
3064 static inline u32 ipw_get_max_rate(struct ipw_priv *priv)
3067 u32 mask = priv->rates_mask;
3068 /* If currently associated in B mode, restrict the maximum
3069 * rate match to B rates */
3070 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
3071 mask &= IEEE80211_CCK_RATES_MASK;
3073 /* TODO: Verify that the rate is supported by the current rates
3076 while (i && !(mask & i))
3079 case IEEE80211_CCK_RATE_1MB_MASK: return 1000000;
3080 case IEEE80211_CCK_RATE_2MB_MASK: return 2000000;
3081 case IEEE80211_CCK_RATE_5MB_MASK: return 5500000;
3082 case IEEE80211_OFDM_RATE_6MB_MASK: return 6000000;
3083 case IEEE80211_OFDM_RATE_9MB_MASK: return 9000000;
3084 case IEEE80211_CCK_RATE_11MB_MASK: return 11000000;
3085 case IEEE80211_OFDM_RATE_12MB_MASK: return 12000000;
3086 case IEEE80211_OFDM_RATE_18MB_MASK: return 18000000;
3087 case IEEE80211_OFDM_RATE_24MB_MASK: return 24000000;
3088 case IEEE80211_OFDM_RATE_36MB_MASK: return 36000000;
3089 case IEEE80211_OFDM_RATE_48MB_MASK: return 48000000;
3090 case IEEE80211_OFDM_RATE_54MB_MASK: return 54000000;
3093 if (priv->ieee->mode == IEEE_B)
3099 static u32 ipw_get_current_rate(struct ipw_priv *priv)
3101 u32 rate, len = sizeof(rate);
3104 if (!(priv->status & STATUS_ASSOCIATED))
3107 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
3108 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
3111 IPW_DEBUG_INFO("failed querying ordinals.\n");
3115 return ipw_get_max_rate(priv);
3118 case IPW_TX_RATE_1MB: return 1000000;
3119 case IPW_TX_RATE_2MB: return 2000000;
3120 case IPW_TX_RATE_5MB: return 5500000;
3121 case IPW_TX_RATE_6MB: return 6000000;
3122 case IPW_TX_RATE_9MB: return 9000000;
3123 case IPW_TX_RATE_11MB: return 11000000;
3124 case IPW_TX_RATE_12MB: return 12000000;
3125 case IPW_TX_RATE_18MB: return 18000000;
3126 case IPW_TX_RATE_24MB: return 24000000;
3127 case IPW_TX_RATE_36MB: return 36000000;
3128 case IPW_TX_RATE_48MB: return 48000000;
3129 case IPW_TX_RATE_54MB: return 54000000;
3135 #define PERFECT_RSSI (-50)
3136 #define WORST_RSSI (-85)
3137 #define IPW_STATS_INTERVAL (2 * HZ)
3138 static void ipw_gather_stats(struct ipw_priv *priv)
3140 u32 rx_err, rx_err_delta, rx_packets_delta;
3141 u32 tx_failures, tx_failures_delta, tx_packets_delta;
3142 u32 missed_beacons_percent, missed_beacons_delta;
3144 u32 len = sizeof(u32);
3146 u32 beacon_quality, signal_quality, tx_quality, rx_quality,
3149 if (!(priv->status & STATUS_ASSOCIATED)) {
3154 /* Update the statistics */
3155 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
3156 &priv->missed_beacons, &len);
3157 missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
3158 priv->last_missed_beacons = priv->missed_beacons;
3159 if (priv->assoc_request.beacon_interval) {
3160 missed_beacons_percent = missed_beacons_delta *
3161 (HZ * priv->assoc_request.beacon_interval) /
3162 (IPW_STATS_INTERVAL * 10);
3164 missed_beacons_percent = 0;
3166 average_add(&priv->average_missed_beacons, missed_beacons_percent);
3168 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
3169 rx_err_delta = rx_err - priv->last_rx_err;
3170 priv->last_rx_err = rx_err;
3172 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
3173 tx_failures_delta = tx_failures - priv->last_tx_failures;
3174 priv->last_tx_failures = tx_failures;
3176 rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
3177 priv->last_rx_packets = priv->rx_packets;
3179 tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
3180 priv->last_tx_packets = priv->tx_packets;
3182 /* Calculate quality based on the following:
3184 * Missed beacon: 100% = 0, 0% = 70% missed
3185 * Rate: 60% = 1Mbs, 100% = Max
3186 * Rx and Tx errors represent a straight % of total Rx/Tx
3187 * RSSI: 100% = > -50, 0% = < -80
3188 * Rx errors: 100% = 0, 0% = 50% missed
3190 * The lowest computed quality is used.
3193 #define BEACON_THRESHOLD 5
3194 beacon_quality = 100 - missed_beacons_percent;
3195 if (beacon_quality < BEACON_THRESHOLD)
3198 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
3199 (100 - BEACON_THRESHOLD);
3200 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
3201 beacon_quality, missed_beacons_percent);
3203 priv->last_rate = ipw_get_current_rate(priv);
3204 rate_quality = priv->last_rate * 40 / priv->last_rate + 60;
3205 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
3206 rate_quality, priv->last_rate / 1000000);
3208 if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
3209 rx_quality = 100 - (rx_err_delta * 100) /
3210 (rx_packets_delta + rx_err_delta);
3213 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
3214 rx_quality, rx_err_delta, rx_packets_delta);
3216 if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
3217 tx_quality = 100 - (tx_failures_delta * 100) /
3218 (tx_packets_delta + tx_failures_delta);
3221 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
3222 tx_quality, tx_failures_delta, tx_packets_delta);
3224 rssi = average_value(&priv->average_rssi);
3225 if (rssi > PERFECT_RSSI)
3226 signal_quality = 100;
3227 else if (rssi < WORST_RSSI)
3230 signal_quality = (rssi - WORST_RSSI) * 100 /
3231 (PERFECT_RSSI - WORST_RSSI);
3232 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
3233 signal_quality, rssi);
3235 quality = min(beacon_quality,
3237 min(tx_quality, min(rx_quality, signal_quality))));
3238 if (quality == beacon_quality)
3239 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
3241 if (quality == rate_quality)
3242 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
3244 if (quality == tx_quality)
3245 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
3247 if (quality == rx_quality)
3248 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
3250 if (quality == signal_quality)
3251 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
3254 priv->quality = quality;
3256 queue_delayed_work(priv->workqueue, &priv->gather_stats,
3257 IPW_STATS_INTERVAL);
3261 * Handle host notification packet.
3262 * Called from interrupt routine
3264 static inline void ipw_rx_notification(struct ipw_priv *priv,
3265 struct ipw_rx_notification *notif)
3267 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, notif->size);
3269 switch (notif->subtype) {
3270 case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
3271 struct notif_association *assoc = ¬if->u.assoc;
3273 switch (assoc->state) {
3274 case CMAS_ASSOCIATED:{
3275 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3277 "associated: '%s' " MAC_FMT
3279 escape_essid(priv->essid,
3281 MAC_ARG(priv->bssid));
3283 switch (priv->ieee->iw_mode) {
3285 memcpy(priv->ieee->bssid,
3286 priv->bssid, ETH_ALEN);
3290 memcpy(priv->ieee->bssid,
3291 priv->bssid, ETH_ALEN);
3293 /* clear out the station table */
3294 priv->num_stations = 0;
3297 ("queueing adhoc check\n");
3298 queue_delayed_work(priv->
3308 priv->status &= ~STATUS_ASSOCIATING;
3309 priv->status |= STATUS_ASSOCIATED;
3311 netif_carrier_on(priv->net_dev);
3312 if (netif_queue_stopped(priv->net_dev)) {
3315 netif_wake_queue(priv->net_dev);
3318 ("starting queue\n");
3319 netif_start_queue(priv->
3323 ipw_reset_stats(priv);
3324 /* Ensure the rate is updated immediately */
3326 ipw_get_current_rate(priv);
3327 schedule_work(&priv->gather_stats);
3328 notify_wx_assoc_event(priv);
3330 /* queue_delayed_work(priv->workqueue,
3331 &priv->request_scan,
3332 SCAN_ASSOCIATED_INTERVAL);
3337 case CMAS_AUTHENTICATED:{
3339 status & (STATUS_ASSOCIATED |
3341 #ifdef CONFIG_IPW_DEBUG
3342 struct notif_authenticate *auth
3344 IPW_DEBUG(IPW_DL_NOTIF |
3347 "deauthenticated: '%s' "
3349 ": (0x%04X) - %s \n",
3354 MAC_ARG(priv->bssid),
3355 ntohs(auth->status),
3362 ~(STATUS_ASSOCIATING |
3366 netif_carrier_off(priv->
3368 netif_stop_queue(priv->net_dev);
3369 queue_work(priv->workqueue,
3370 &priv->request_scan);
3371 notify_wx_assoc_event(priv);
3375 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3377 "authenticated: '%s' " MAC_FMT
3379 escape_essid(priv->essid,
3381 MAC_ARG(priv->bssid));
3386 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3388 "disassociated: '%s' " MAC_FMT
3390 escape_essid(priv->essid,
3392 MAC_ARG(priv->bssid));
3395 ~(STATUS_DISASSOCIATING |
3396 STATUS_ASSOCIATING |
3397 STATUS_ASSOCIATED | STATUS_AUTH);
3399 netif_stop_queue(priv->net_dev);
3400 if (!(priv->status & STATUS_ROAMING)) {
3401 netif_carrier_off(priv->
3403 notify_wx_assoc_event(priv);
3405 /* Cancel any queued work ... */
3406 cancel_delayed_work(&priv->
3408 cancel_delayed_work(&priv->
3411 /* Queue up another scan... */
3412 queue_work(priv->workqueue,
3413 &priv->request_scan);
3415 cancel_delayed_work(&priv->
3418 priv->status |= STATUS_ROAMING;
3419 queue_work(priv->workqueue,
3420 &priv->request_scan);
3423 ipw_reset_stats(priv);
3428 IPW_ERROR("assoc: unknown (%d)\n",
3436 case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
3437 struct notif_authenticate *auth = ¬if->u.auth;
3438 switch (auth->state) {
3439 case CMAS_AUTHENTICATED:
3440 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
3441 "authenticated: '%s' " MAC_FMT " \n",
3442 escape_essid(priv->essid,
3444 MAC_ARG(priv->bssid));
3445 priv->status |= STATUS_AUTH;
3449 if (priv->status & STATUS_AUTH) {
3450 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3452 "authentication failed (0x%04X): %s\n",
3453 ntohs(auth->status),
3454 ipw_get_status_code(ntohs
3458 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3460 "deauthenticated: '%s' " MAC_FMT "\n",
3461 escape_essid(priv->essid,
3463 MAC_ARG(priv->bssid));
3465 priv->status &= ~(STATUS_ASSOCIATING |
3469 netif_carrier_off(priv->net_dev);
3470 netif_stop_queue(priv->net_dev);
3471 queue_work(priv->workqueue,
3472 &priv->request_scan);
3473 notify_wx_assoc_event(priv);
3476 case CMAS_TX_AUTH_SEQ_1:
3477 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3478 IPW_DL_ASSOC, "AUTH_SEQ_1\n");
3480 case CMAS_RX_AUTH_SEQ_2:
3481 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3482 IPW_DL_ASSOC, "AUTH_SEQ_2\n");
3484 case CMAS_AUTH_SEQ_1_PASS:
3485 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3486 IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
3488 case CMAS_AUTH_SEQ_1_FAIL:
3489 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3490 IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
3492 case CMAS_TX_AUTH_SEQ_3:
3493 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3494 IPW_DL_ASSOC, "AUTH_SEQ_3\n");
3496 case CMAS_RX_AUTH_SEQ_4:
3497 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3498 IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
3500 case CMAS_AUTH_SEQ_2_PASS:
3501 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3502 IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
3504 case CMAS_AUTH_SEQ_2_FAIL:
3505 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3506 IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
3509 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3510 IPW_DL_ASSOC, "TX_ASSOC\n");
3512 case CMAS_RX_ASSOC_RESP:
3513 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3514 IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
3516 case CMAS_ASSOCIATED:
3517 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3518 IPW_DL_ASSOC, "ASSOCIATED\n");
3521 IPW_DEBUG_NOTIF("auth: failure - %d\n",
3528 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
3529 struct notif_channel_result *x =
3530 ¬if->u.channel_result;
3532 if (notif->size == sizeof(*x)) {
3533 IPW_DEBUG_SCAN("Scan result for channel %d\n",
3536 IPW_DEBUG_SCAN("Scan result of wrong size %d "
3537 "(should be %zd)\n",
3538 notif->size, sizeof(*x));
3543 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
3544 struct notif_scan_complete *x = ¬if->u.scan_complete;
3545 if (notif->size == sizeof(*x)) {
3547 ("Scan completed: type %d, %d channels, "
3548 "%d status\n", x->scan_type,
3549 x->num_channels, x->status);
3551 IPW_ERROR("Scan completed of wrong size %d "
3552 "(should be %zd)\n",
3553 notif->size, sizeof(*x));
3557 ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3559 cancel_delayed_work(&priv->scan_check);
3561 if (!(priv->status & (STATUS_ASSOCIATED |
3562 STATUS_ASSOCIATING |
3564 STATUS_DISASSOCIATING)))
3565 queue_work(priv->workqueue, &priv->associate);
3566 else if (priv->status & STATUS_ROAMING) {
3567 /* If a scan completed and we are in roam mode, then
3568 * the scan that completed was the one requested as a
3569 * result of entering roam... so, schedule the
3571 queue_work(priv->workqueue, &priv->roam);
3572 } else if (priv->status & STATUS_SCAN_PENDING)
3573 queue_work(priv->workqueue,
3574 &priv->request_scan);
3576 priv->ieee->scans++;
3580 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
3581 struct notif_frag_length *x = ¬if->u.frag_len;
3583 if (notif->size == sizeof(*x)) {
3584 IPW_ERROR("Frag length: %d\n", x->frag_length);
3586 IPW_ERROR("Frag length of wrong size %d "
3587 "(should be %zd)\n",
3588 notif->size, sizeof(*x));
3593 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
3594 struct notif_link_deterioration *x =
3595 ¬if->u.link_deterioration;
3596 if (notif->size == sizeof(*x)) {
3597 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
3598 "link deterioration: '%s' " MAC_FMT
3599 " \n", escape_essid(priv->essid,
3601 MAC_ARG(priv->bssid));
3602 memcpy(&priv->last_link_deterioration, x,
3605 IPW_ERROR("Link Deterioration of wrong size %d "
3606 "(should be %zd)\n",
3607 notif->size, sizeof(*x));
3612 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
3613 IPW_ERROR("Dino config\n");
3615 && priv->hcmd->cmd == HOST_CMD_DINO_CONFIG) {
3616 /* TODO: Do anything special? */
3618 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
3623 case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
3624 struct notif_beacon_state *x = ¬if->u.beacon_state;
3625 if (notif->size != sizeof(*x)) {
3627 ("Beacon state of wrong size %d (should "
3628 "be %zd)\n", notif->size, sizeof(*x));
3632 if (x->state == HOST_NOTIFICATION_STATUS_BEACON_MISSING) {
3633 if (priv->status & STATUS_SCANNING) {
3634 /* Stop scan to keep fw from getting
3636 queue_work(priv->workqueue,
3640 if (x->number > priv->missed_beacon_threshold &&
3641 priv->status & STATUS_ASSOCIATED) {
3642 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
3644 "Missed beacon: %d - disassociate\n",
3646 queue_work(priv->workqueue,
3647 &priv->disassociate);
3648 } else if (x->number > priv->roaming_threshold) {
3649 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
3650 "Missed beacon: %d - initiate "
3651 "roaming\n", x->number);
3652 queue_work(priv->workqueue,
3655 IPW_DEBUG_NOTIF("Missed beacon: %d\n",
3659 priv->notif_missed_beacons = x->number;
3666 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
3667 struct notif_tgi_tx_key *x = ¬if->u.tgi_tx_key;
3668 if (notif->size == sizeof(*x)) {
3669 IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
3670 "0x%02x station %d\n",
3671 x->key_state, x->security_type,
3677 ("TGi Tx Key of wrong size %d (should be %zd)\n",
3678 notif->size, sizeof(*x));
3682 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
3683 struct notif_calibration *x = ¬if->u.calibration;
3685 if (notif->size == sizeof(*x)) {
3686 memcpy(&priv->calib, x, sizeof(*x));
3687 IPW_DEBUG_INFO("TODO: Calibration\n");
3692 ("Calibration of wrong size %d (should be %zd)\n",
3693 notif->size, sizeof(*x));
3697 case HOST_NOTIFICATION_NOISE_STATS:{
3698 if (notif->size == sizeof(u32)) {
3700 (u8) (notif->u.noise.value & 0xff);
3701 average_add(&priv->average_noise,
3707 ("Noise stat is wrong size %d (should be %zd)\n",
3708 notif->size, sizeof(u32));
3713 IPW_ERROR("Unknown notification: "
3714 "subtype=%d,flags=0x%2x,size=%d\n",
3715 notif->subtype, notif->flags, notif->size);
3720 * Destroys all DMA structures and initialise them again
3723 * @return error code
3725 static int ipw_queue_reset(struct ipw_priv *priv)
3728 /** @todo customize queue sizes */
3729 int nTx = 64, nTxCmd = 8;
3730 ipw_tx_queue_free(priv);
3732 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
3733 CX2_TX_CMD_QUEUE_READ_INDEX,
3734 CX2_TX_CMD_QUEUE_WRITE_INDEX,
3735 CX2_TX_CMD_QUEUE_BD_BASE,
3736 CX2_TX_CMD_QUEUE_BD_SIZE);
3738 IPW_ERROR("Tx Cmd queue init failed\n");
3742 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
3743 CX2_TX_QUEUE_0_READ_INDEX,
3744 CX2_TX_QUEUE_0_WRITE_INDEX,
3745 CX2_TX_QUEUE_0_BD_BASE, CX2_TX_QUEUE_0_BD_SIZE);
3747 IPW_ERROR("Tx 0 queue init failed\n");
3750 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
3751 CX2_TX_QUEUE_1_READ_INDEX,
3752 CX2_TX_QUEUE_1_WRITE_INDEX,
3753 CX2_TX_QUEUE_1_BD_BASE, CX2_TX_QUEUE_1_BD_SIZE);
3755 IPW_ERROR("Tx 1 queue init failed\n");
3758 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
3759 CX2_TX_QUEUE_2_READ_INDEX,
3760 CX2_TX_QUEUE_2_WRITE_INDEX,
3761 CX2_TX_QUEUE_2_BD_BASE, CX2_TX_QUEUE_2_BD_SIZE);
3763 IPW_ERROR("Tx 2 queue init failed\n");
3766 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
3767 CX2_TX_QUEUE_3_READ_INDEX,
3768 CX2_TX_QUEUE_3_WRITE_INDEX,
3769 CX2_TX_QUEUE_3_BD_BASE, CX2_TX_QUEUE_3_BD_SIZE);
3771 IPW_ERROR("Tx 3 queue init failed\n");
3775 priv->rx_bufs_min = 0;
3776 priv->rx_pend_max = 0;
3780 ipw_tx_queue_free(priv);
3785 * Reclaim Tx queue entries no more used by NIC.
3787 * When FW adwances 'R' index, all entries between old and
3788 * new 'R' index need to be reclaimed. As result, some free space
3789 * forms. If there is enough free space (> low mark), wake Tx queue.
3791 * @note Need to protect against garbage in 'R' index
3795 * @return Number of used entries remains in the queue
3797 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
3798 struct clx2_tx_queue *txq, int qindex)
3802 struct clx2_queue *q = &txq->q;
3804 hw_tail = ipw_read32(priv, q->reg_r);
3805 if (hw_tail >= q->n_bd) {
3807 ("Read index for DMA queue (%d) is out of range [0-%d)\n",
3811 for (; q->last_used != hw_tail;
3812 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3813 ipw_queue_tx_free_tfd(priv, txq);
3817 if (ipw_queue_space(q) > q->low_mark && qindex >= 0) {
3818 __maybe_wake_tx(priv);
3820 used = q->first_empty - q->last_used;
3827 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
3830 struct clx2_tx_queue *txq = &priv->txq_cmd;
3831 struct clx2_queue *q = &txq->q;
3832 struct tfd_frame *tfd;
3834 if (ipw_queue_space(q) < (sync ? 1 : 2)) {
3835 IPW_ERROR("No space for Tx\n");
3839 tfd = &txq->bd[q->first_empty];
3840 txq->txb[q->first_empty] = NULL;
3842 memset(tfd, 0, sizeof(*tfd));
3843 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
3844 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
3846 tfd->u.cmd.index = hcmd;
3847 tfd->u.cmd.length = len;
3848 memcpy(tfd->u.cmd.payload, buf, len);
3849 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
3850 ipw_write32(priv, q->reg_w, q->first_empty);
3851 _ipw_read32(priv, 0x90);
3857 * Rx theory of operation
3859 * The host allocates 32 DMA target addresses and passes the host address
3860 * to the firmware at register CX2_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
3864 * The host/firmware share two index registers for managing the Rx buffers.
3866 * The READ index maps to the first position that the firmware may be writing
3867 * to -- the driver can read up to (but not including) this position and get
3869 * The READ index is managed by the firmware once the card is enabled.
3871 * The WRITE index maps to the last position the driver has read from -- the
3872 * position preceding WRITE is the last slot the firmware can place a packet.
3874 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
3877 * During initialization the host sets up the READ queue position to the first
3878 * INDEX position, and WRITE to the last (READ - 1 wrapped)
3880 * When the firmware places a packet in a buffer it will advance the READ index
3881 * and fire the RX interrupt. The driver can then query the READ index and
3882 * process as many packets as possible, moving the WRITE index forward as it
3883 * resets the Rx queue buffers with new memory.
3885 * The management in the driver is as follows:
3886 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
3887 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
3888 * to replensish the ipw->rxq->rx_free.
3889 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
3890 * ipw->rxq is replenished and the READ INDEX is updated (updating the
3891 * 'processed' and 'read' driver indexes as well)
3892 * + A received packet is processed and handed to the kernel network stack,
3893 * detached from the ipw->rxq. The driver 'processed' index is updated.
3894 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
3895 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
3896 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there
3897 * were enough free buffers and RX_STALLED is set it is cleared.
3902 * ipw_rx_queue_alloc() Allocates rx_free
3903 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls
3904 * ipw_rx_queue_restock
3905 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx
3906 * queue, updates firmware pointers, and updates
3907 * the WRITE index. If insufficient rx_free buffers
3908 * are available, schedules ipw_rx_queue_replenish
3910 * -- enable interrupts --
3911 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the
3912 * READ INDEX, detaching the SKB from the pool.
3913 * Moves the packet buffer from queue to rx_used.
3914 * Calls ipw_rx_queue_restock to refill any empty
3921 * If there are slots in the RX queue that need to be restocked,
3922 * and we have free pre-allocated buffers, fill the ranks as much
3923 * as we can pulling from rx_free.
3925 * This moves the 'write' index forward to catch up with 'processed', and
3926 * also updates the memory address in the firmware to reference the new
3929 static void ipw_rx_queue_restock(struct ipw_priv *priv)
3931 struct ipw_rx_queue *rxq = priv->rxq;
3932 struct list_head *element;
3933 struct ipw_rx_mem_buffer *rxb;
3934 unsigned long flags;
3937 spin_lock_irqsave(&rxq->lock, flags);
3939 while ((rxq->write != rxq->processed) && (rxq->free_count)) {
3940 element = rxq->rx_free.next;
3941 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
3944 ipw_write32(priv, CX2_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
3946 rxq->queue[rxq->write] = rxb;
3947 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
3950 spin_unlock_irqrestore(&rxq->lock, flags);
3952 /* If the pre-allocated buffer pool is dropping low, schedule to
3954 if (rxq->free_count <= RX_LOW_WATERMARK)
3955 queue_work(priv->workqueue, &priv->rx_replenish);
3957 /* If we've added more space for the firmware to place data, tell it */
3958 if (write != rxq->write)
3959 ipw_write32(priv, CX2_RX_WRITE_INDEX, rxq->write);
3963 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
3964 * Also restock the Rx queue via ipw_rx_queue_restock.
3966 * This is called as a scheduled work item (except for during intialization)
3968 static void ipw_rx_queue_replenish(void *data)
3970 struct ipw_priv *priv = data;
3971 struct ipw_rx_queue *rxq = priv->rxq;
3972 struct list_head *element;
3973 struct ipw_rx_mem_buffer *rxb;
3974 unsigned long flags;
3976 spin_lock_irqsave(&rxq->lock, flags);
3977 while (!list_empty(&rxq->rx_used)) {
3978 element = rxq->rx_used.next;
3979 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
3980 rxb->skb = alloc_skb(CX2_RX_BUF_SIZE, GFP_ATOMIC);
3982 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
3983 priv->net_dev->name);
3984 /* We don't reschedule replenish work here -- we will
3985 * call the restock method and if it still needs
3986 * more buffers it will schedule replenish */
3991 rxb->rxb = (struct ipw_rx_buffer *)rxb->skb->data;
3993 pci_map_single(priv->pci_dev, rxb->skb->data,
3994 CX2_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3996 list_add_tail(&rxb->list, &rxq->rx_free);
3999 spin_unlock_irqrestore(&rxq->lock, flags);
4001 ipw_rx_queue_restock(priv);
4004 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
4005 * If an SKB has been detached, the POOL needs to have it's SKB set to NULL
4006 * This free routine walks the list of POOL entries and if SKB is set to
4007 * non NULL it is unmapped and freed
4009 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
4016 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
4017 if (rxq->pool[i].skb != NULL) {
4018 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
4019 CX2_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4020 dev_kfree_skb(rxq->pool[i].skb);
4027 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
4029 struct ipw_rx_queue *rxq;
4032 rxq = (struct ipw_rx_queue *)kmalloc(sizeof(*rxq), GFP_KERNEL);
4033 if (unlikely(!rxq)) {
4034 IPW_ERROR("memory allocation failed\n");
4037 memset(rxq, 0, sizeof(*rxq));
4038 spin_lock_init(&rxq->lock);
4039 INIT_LIST_HEAD(&rxq->rx_free);
4040 INIT_LIST_HEAD(&rxq->rx_used);
4042 /* Fill the rx_used queue with _all_ of the Rx buffers */
4043 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
4044 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
4046 /* Set us so that we have processed and used all buffers, but have
4047 * not restocked the Rx queue with fresh buffers */
4048 rxq->read = rxq->write = 0;
4049 rxq->processed = RX_QUEUE_SIZE - 1;
4050 rxq->free_count = 0;
4055 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
4057 rate &= ~IEEE80211_BASIC_RATE_MASK;
4058 if (ieee_mode == IEEE_A) {
4060 case IEEE80211_OFDM_RATE_6MB:
4061 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ?
4063 case IEEE80211_OFDM_RATE_9MB:
4064 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ?
4066 case IEEE80211_OFDM_RATE_12MB:
4068 rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
4069 case IEEE80211_OFDM_RATE_18MB:
4071 rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
4072 case IEEE80211_OFDM_RATE_24MB:
4074 rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
4075 case IEEE80211_OFDM_RATE_36MB:
4077 rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
4078 case IEEE80211_OFDM_RATE_48MB:
4080 rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
4081 case IEEE80211_OFDM_RATE_54MB:
4083 rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
4091 case IEEE80211_CCK_RATE_1MB:
4092 return priv->rates_mask & IEEE80211_CCK_RATE_1MB_MASK ? 1 : 0;
4093 case IEEE80211_CCK_RATE_2MB:
4094 return priv->rates_mask & IEEE80211_CCK_RATE_2MB_MASK ? 1 : 0;
4095 case IEEE80211_CCK_RATE_5MB:
4096 return priv->rates_mask & IEEE80211_CCK_RATE_5MB_MASK ? 1 : 0;
4097 case IEEE80211_CCK_RATE_11MB:
4098 return priv->rates_mask & IEEE80211_CCK_RATE_11MB_MASK ? 1 : 0;
4101 /* If we are limited to B modulations, bail at this point */
4102 if (ieee_mode == IEEE_B)
4107 case IEEE80211_OFDM_RATE_6MB:
4108 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ? 1 : 0;
4109 case IEEE80211_OFDM_RATE_9MB:
4110 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ? 1 : 0;
4111 case IEEE80211_OFDM_RATE_12MB:
4112 return priv->rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
4113 case IEEE80211_OFDM_RATE_18MB:
4114 return priv->rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
4115 case IEEE80211_OFDM_RATE_24MB:
4116 return priv->rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
4117 case IEEE80211_OFDM_RATE_36MB:
4118 return priv->rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
4119 case IEEE80211_OFDM_RATE_48MB:
4120 return priv->rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
4121 case IEEE80211_OFDM_RATE_54MB:
4122 return priv->rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
4128 static int ipw_compatible_rates(struct ipw_priv *priv,
4129 const struct ieee80211_network *network,
4130 struct ipw_supported_rates *rates)
4134 memset(rates, 0, sizeof(*rates));
4135 num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
4136 rates->num_rates = 0;
4137 for (i = 0; i < num_rates; i++) {
4138 if (!ipw_is_rate_in_mask
4139 (priv, network->mode, network->rates[i])) {
4140 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
4141 network->rates[i], priv->rates_mask);
4145 rates->supported_rates[rates->num_rates++] = network->rates[i];
4149 min(network->rates_ex_len, (u8) (IPW_MAX_RATES - num_rates));
4150 for (i = 0; i < num_rates; i++) {
4151 if (!ipw_is_rate_in_mask
4152 (priv, network->mode, network->rates_ex[i])) {
4153 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
4154 network->rates_ex[i], priv->rates_mask);
4158 rates->supported_rates[rates->num_rates++] =
4159 network->rates_ex[i];
4162 return rates->num_rates;
4165 static inline void ipw_copy_rates(struct ipw_supported_rates *dest,
4166 const struct ipw_supported_rates *src)
4169 for (i = 0; i < src->num_rates; i++)
4170 dest->supported_rates[i] = src->supported_rates[i];
4171 dest->num_rates = src->num_rates;
4174 /* TODO: Look at sniffed packets in the air to determine if the basic rate
4175 * mask should ever be used -- right now all callers to add the scan rates are
4176 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
4177 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
4178 u8 modulation, u32 rate_mask)
4180 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
4181 IEEE80211_BASIC_RATE_MASK : 0;
4183 if (rate_mask & IEEE80211_CCK_RATE_1MB_MASK)
4184 rates->supported_rates[rates->num_rates++] =
4185 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
4187 if (rate_mask & IEEE80211_CCK_RATE_2MB_MASK)
4188 rates->supported_rates[rates->num_rates++] =
4189 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
4191 if (rate_mask & IEEE80211_CCK_RATE_5MB_MASK)
4192 rates->supported_rates[rates->num_rates++] = basic_mask |
4193 IEEE80211_CCK_RATE_5MB;
4195 if (rate_mask & IEEE80211_CCK_RATE_11MB_MASK)
4196 rates->supported_rates[rates->num_rates++] = basic_mask |
4197 IEEE80211_CCK_RATE_11MB;
4200 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
4201 u8 modulation, u32 rate_mask)
4203 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
4204 IEEE80211_BASIC_RATE_MASK : 0;
4206 if (rate_mask & IEEE80211_OFDM_RATE_6MB_MASK)
4207 rates->supported_rates[rates->num_rates++] = basic_mask |
4208 IEEE80211_OFDM_RATE_6MB;
4210 if (rate_mask & IEEE80211_OFDM_RATE_9MB_MASK)
4211 rates->supported_rates[rates->num_rates++] =
4212 IEEE80211_OFDM_RATE_9MB;
4214 if (rate_mask & IEEE80211_OFDM_RATE_12MB_MASK)
4215 rates->supported_rates[rates->num_rates++] = basic_mask |
4216 IEEE80211_OFDM_RATE_12MB;
4218 if (rate_mask & IEEE80211_OFDM_RATE_18MB_MASK)
4219 rates->supported_rates[rates->num_rates++] =
4220 IEEE80211_OFDM_RATE_18MB;
4222 if (rate_mask & IEEE80211_OFDM_RATE_24MB_MASK)
4223 rates->supported_rates[rates->num_rates++] = basic_mask |
4224 IEEE80211_OFDM_RATE_24MB;
4226 if (rate_mask & IEEE80211_OFDM_RATE_36MB_MASK)
4227 rates->supported_rates[rates->num_rates++] =
4228 IEEE80211_OFDM_RATE_36MB;
4230 if (rate_mask & IEEE80211_OFDM_RATE_48MB_MASK)
4231 rates->supported_rates[rates->num_rates++] =
4232 IEEE80211_OFDM_RATE_48MB;
4234 if (rate_mask & IEEE80211_OFDM_RATE_54MB_MASK)
4235 rates->supported_rates[rates->num_rates++] =
4236 IEEE80211_OFDM_RATE_54MB;
4239 struct ipw_network_match {
4240 struct ieee80211_network *network;
4241 struct ipw_supported_rates rates;
4244 static int ipw_best_network(struct ipw_priv *priv,
4245 struct ipw_network_match *match,
4246 struct ieee80211_network *network, int roaming)
4248 struct ipw_supported_rates rates;
4250 /* Verify that this network's capability is compatible with the
4251 * current mode (AdHoc or Infrastructure) */
4252 if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
4253 !(network->capability & WLAN_CAPABILITY_ESS)) ||
4254 (priv->ieee->iw_mode == IW_MODE_ADHOC &&
4255 !(network->capability & WLAN_CAPABILITY_IBSS))) {
4256 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded due to "
4257 "capability mismatch.\n",
4258 escape_essid(network->ssid, network->ssid_len),
4259 MAC_ARG(network->bssid));
4263 /* If we do not have an ESSID for this AP, we can not associate with
4265 if (network->flags & NETWORK_EMPTY_ESSID) {
4266 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4267 "because of hidden ESSID.\n",
4268 escape_essid(network->ssid, network->ssid_len),
4269 MAC_ARG(network->bssid));
4273 if (unlikely(roaming)) {
4274 /* If we are roaming, then ensure check if this is a valid
4275 * network to try and roam to */
4276 if ((network->ssid_len != match->network->ssid_len) ||
4277 memcmp(network->ssid, match->network->ssid,
4278 network->ssid_len)) {
4279 IPW_DEBUG_ASSOC("Netowrk '%s (" MAC_FMT ")' excluded "
4280 "because of non-network ESSID.\n",
4281 escape_essid(network->ssid,
4283 MAC_ARG(network->bssid));
4287 /* If an ESSID has been configured then compare the broadcast
4289 if ((priv->config & CFG_STATIC_ESSID) &&
4290 ((network->ssid_len != priv->essid_len) ||
4291 memcmp(network->ssid, priv->essid,
4292 min(network->ssid_len, priv->essid_len)))) {
4293 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
4295 escape_essid(network->ssid, network->ssid_len),
4297 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4298 "because of ESSID mismatch: '%s'.\n",
4299 escaped, MAC_ARG(network->bssid),
4300 escape_essid(priv->essid,
4306 /* If the old network rate is better than this one, don't bother
4307 * testing everything else. */
4308 if (match->network && match->network->stats.rssi > network->stats.rssi) {
4309 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
4311 escape_essid(network->ssid, network->ssid_len),
4313 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded because "
4314 "'%s (" MAC_FMT ")' has a stronger signal.\n",
4315 escaped, MAC_ARG(network->bssid),
4316 escape_essid(match->network->ssid,
4317 match->network->ssid_len),
4318 MAC_ARG(match->network->bssid));
4322 /* If this network has already had an association attempt within the
4323 * last 3 seconds, do not try and associate again... */
4324 if (network->last_associate &&
4325 time_after(network->last_associate + (HZ * 5UL), jiffies)) {
4326 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4327 "because of storming (%lu since last "
4328 "assoc attempt).\n",
4329 escape_essid(network->ssid, network->ssid_len),
4330 MAC_ARG(network->bssid),
4331 (jiffies - network->last_associate) / HZ);
4335 /* Now go through and see if the requested network is valid... */
4336 if (priv->ieee->scan_age != 0 &&
4337 jiffies - network->last_scanned > priv->ieee->scan_age) {
4338 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4339 "because of age: %lums.\n",
4340 escape_essid(network->ssid, network->ssid_len),
4341 MAC_ARG(network->bssid),
4342 (jiffies - network->last_scanned) / (HZ / 100));
4346 if ((priv->config & CFG_STATIC_CHANNEL) &&
4347 (network->channel != priv->channel)) {
4348 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4349 "because of channel mismatch: %d != %d.\n",
4350 escape_essid(network->ssid, network->ssid_len),
4351 MAC_ARG(network->bssid),
4352 network->channel, priv->channel);
4356 /* Verify privacy compatability */
4357 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
4358 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
4359 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4360 "because of privacy mismatch: %s != %s.\n",
4361 escape_essid(network->ssid, network->ssid_len),
4362 MAC_ARG(network->bssid),
4363 priv->capability & CAP_PRIVACY_ON ? "on" :
4365 network->capability &
4366 WLAN_CAPABILITY_PRIVACY ? "on" : "off");
4370 if ((priv->config & CFG_STATIC_BSSID) &&
4371 memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
4372 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4373 "because of BSSID mismatch: " MAC_FMT ".\n",
4374 escape_essid(network->ssid, network->ssid_len),
4375 MAC_ARG(network->bssid), MAC_ARG(priv->bssid));
4379 /* Filter out any incompatible freq / mode combinations */
4380 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
4381 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4382 "because of invalid frequency/mode "
4384 escape_essid(network->ssid, network->ssid_len),
4385 MAC_ARG(network->bssid));
4389 ipw_compatible_rates(priv, network, &rates);
4390 if (rates.num_rates == 0) {
4391 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4392 "because of no compatible rates.\n",
4393 escape_essid(network->ssid, network->ssid_len),
4394 MAC_ARG(network->bssid));
4398 /* TODO: Perform any further minimal comparititive tests. We do not
4399 * want to put too much policy logic here; intelligent scan selection
4400 * should occur within a generic IEEE 802.11 user space tool. */
4402 /* Set up 'new' AP to this network */
4403 ipw_copy_rates(&match->rates, &rates);
4404 match->network = network;
4406 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' is a viable match.\n",
4407 escape_essid(network->ssid, network->ssid_len),
4408 MAC_ARG(network->bssid));
4413 static void ipw_adhoc_create(struct ipw_priv *priv,
4414 struct ieee80211_network *network)
4417 * For the purposes of scanning, we can set our wireless mode
4418 * to trigger scans across combinations of bands, but when it
4419 * comes to creating a new ad-hoc network, we have tell the FW
4420 * exactly which band to use.
4422 * We also have the possibility of an invalid channel for the
4423 * chossen band. Attempting to create a new ad-hoc network
4424 * with an invalid channel for wireless mode will trigger a
4427 network->mode = is_valid_channel(priv->ieee->mode, priv->channel);
4428 if (network->mode) {
4429 network->channel = priv->channel;
4431 IPW_WARNING("Overriding invalid channel\n");
4432 if (priv->ieee->mode & IEEE_A) {
4433 network->mode = IEEE_A;
4434 priv->channel = band_a_active_channel[0];
4435 } else if (priv->ieee->mode & IEEE_G) {
4436 network->mode = IEEE_G;
4437 priv->channel = band_b_active_channel[0];
4439 network->mode = IEEE_B;
4440 priv->channel = band_b_active_channel[0];
4444 network->channel = priv->channel;
4445 priv->config |= CFG_ADHOC_PERSIST;
4446 ipw_create_bssid(priv, network->bssid);
4447 network->ssid_len = priv->essid_len;
4448 memcpy(network->ssid, priv->essid, priv->essid_len);
4449 memset(&network->stats, 0, sizeof(network->stats));
4450 network->capability = WLAN_CAPABILITY_IBSS;
4451 if (priv->capability & CAP_PRIVACY_ON)
4452 network->capability |= WLAN_CAPABILITY_PRIVACY;
4453 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
4454 memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
4455 network->rates_ex_len = priv->rates.num_rates - network->rates_len;
4456 memcpy(network->rates_ex,
4457 &priv->rates.supported_rates[network->rates_len],
4458 network->rates_ex_len);
4459 network->last_scanned = 0;
4461 network->last_associate = 0;
4462 network->time_stamp[0] = 0;
4463 network->time_stamp[1] = 0;
4464 network->beacon_interval = 100; /* Default */
4465 network->listen_interval = 10; /* Default */
4466 network->atim_window = 0; /* Default */
4467 #ifdef CONFIG_IEEE80211_WPA
4468 network->wpa_ie_len = 0;
4469 network->rsn_ie_len = 0;
4470 #endif /* CONFIG_IEEE80211_WPA */
4473 static void ipw_send_wep_keys(struct ipw_priv *priv)
4475 struct ipw_wep_key *key;
4477 struct host_cmd cmd = {
4478 .cmd = IPW_CMD_WEP_KEY,
4482 key = (struct ipw_wep_key *)&cmd.param;
4483 key->cmd_id = DINO_CMD_WEP_KEY;
4486 for (i = 0; i < 4; i++) {
4488 if (!(priv->sec.flags & (1 << i))) {
4491 key->key_size = priv->sec.key_sizes[i];
4492 memcpy(key->key, priv->sec.keys[i], key->key_size);
4495 if (ipw_send_cmd(priv, &cmd)) {
4496 IPW_ERROR("failed to send WEP_KEY command\n");
4502 static void ipw_adhoc_check(void *data)
4504 struct ipw_priv *priv = data;
4506 if (priv->missed_adhoc_beacons++ > priv->missed_beacon_threshold &&
4507 !(priv->config & CFG_ADHOC_PERSIST)) {
4508 IPW_DEBUG_SCAN("Disassociating due to missed beacons\n");
4509 ipw_remove_current_network(priv);
4510 ipw_disassociate(priv);
4514 queue_delayed_work(priv->workqueue, &priv->adhoc_check,
4515 priv->assoc_request.beacon_interval);
4518 #ifdef CONFIG_IPW_DEBUG
4519 static void ipw_debug_config(struct ipw_priv *priv)
4521 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
4522 "[CFG 0x%08X]\n", priv->config);
4523 if (priv->config & CFG_STATIC_CHANNEL)
4524 IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
4526 IPW_DEBUG_INFO("Channel unlocked.\n");
4527 if (priv->config & CFG_STATIC_ESSID)
4528 IPW_DEBUG_INFO("ESSID locked to '%s'\n",
4529 escape_essid(priv->essid, priv->essid_len));
4531 IPW_DEBUG_INFO("ESSID unlocked.\n");
4532 if (priv->config & CFG_STATIC_BSSID)
4533 IPW_DEBUG_INFO("BSSID locked to %d\n", priv->channel);
4535 IPW_DEBUG_INFO("BSSID unlocked.\n");
4536 if (priv->capability & CAP_PRIVACY_ON)
4537 IPW_DEBUG_INFO("PRIVACY on\n");
4539 IPW_DEBUG_INFO("PRIVACY off\n");
4540 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
4543 #define ipw_debug_config(x) do {} while (0)
4546 static inline void ipw_set_fixed_rate(struct ipw_priv *priv,
4547 struct ieee80211_network *network)
4549 /* TODO: Verify that this works... */
4550 struct ipw_fixed_rate fr = {
4551 .tx_rates = priv->rates_mask
4556 /* Identify 'current FW band' and match it with the fixed
4559 switch (priv->ieee->freq_band) {
4560 case IEEE80211_52GHZ_BAND: /* A only */
4562 if (priv->rates_mask & ~IEEE80211_OFDM_RATES_MASK) {
4563 /* Invalid fixed rate mask */
4568 fr.tx_rates >>= IEEE80211_OFDM_SHIFT_MASK_A;
4571 default: /* 2.4Ghz or Mixed */
4573 if (network->mode == IEEE_B) {
4574 if (fr.tx_rates & ~IEEE80211_CCK_RATES_MASK) {
4575 /* Invalid fixed rate mask */
4582 if (fr.tx_rates & ~(IEEE80211_CCK_RATES_MASK |
4583 IEEE80211_OFDM_RATES_MASK)) {
4584 /* Invalid fixed rate mask */
4589 if (IEEE80211_OFDM_RATE_6MB_MASK & fr.tx_rates) {
4590 mask |= (IEEE80211_OFDM_RATE_6MB_MASK >> 1);
4591 fr.tx_rates &= ~IEEE80211_OFDM_RATE_6MB_MASK;
4594 if (IEEE80211_OFDM_RATE_9MB_MASK & fr.tx_rates) {
4595 mask |= (IEEE80211_OFDM_RATE_9MB_MASK >> 1);
4596 fr.tx_rates &= ~IEEE80211_OFDM_RATE_9MB_MASK;
4599 if (IEEE80211_OFDM_RATE_12MB_MASK & fr.tx_rates) {
4600 mask |= (IEEE80211_OFDM_RATE_12MB_MASK >> 1);
4601 fr.tx_rates &= ~IEEE80211_OFDM_RATE_12MB_MASK;
4604 fr.tx_rates |= mask;
4608 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
4609 ipw_write_reg32(priv, reg, *(u32 *) & fr);
4612 static int ipw_associate_network(struct ipw_priv *priv,
4613 struct ieee80211_network *network,
4614 struct ipw_supported_rates *rates, int roaming)
4618 if (priv->config & CFG_FIXED_RATE)
4619 ipw_set_fixed_rate(priv, network);
4621 if (!(priv->config & CFG_STATIC_ESSID)) {
4622 priv->essid_len = min(network->ssid_len,
4623 (u8) IW_ESSID_MAX_SIZE);
4624 memcpy(priv->essid, network->ssid, priv->essid_len);
4627 network->last_associate = jiffies;
4629 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
4630 priv->assoc_request.channel = network->channel;
4631 if ((priv->capability & CAP_PRIVACY_ON) &&
4632 (priv->capability & CAP_SHARED_KEY)) {
4633 priv->assoc_request.auth_type = AUTH_SHARED_KEY;
4634 priv->assoc_request.auth_key = priv->sec.active_key;
4636 priv->assoc_request.auth_type = AUTH_OPEN;
4637 priv->assoc_request.auth_key = 0;
4640 if (priv->capability & CAP_PRIVACY_ON)
4641 ipw_send_wep_keys(priv);
4644 * It is valid for our ieee device to support multiple modes, but
4645 * when it comes to associating to a given network we have to choose
4648 if (network->mode & priv->ieee->mode & IEEE_A)
4649 priv->assoc_request.ieee_mode = IPW_A_MODE;
4650 else if (network->mode & priv->ieee->mode & IEEE_G)
4651 priv->assoc_request.ieee_mode = IPW_G_MODE;
4652 else if (network->mode & priv->ieee->mode & IEEE_B)
4653 priv->assoc_request.ieee_mode = IPW_B_MODE;
4655 IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, "
4656 "802.11%c [%d], enc=%s%s%s%c%c\n",
4657 roaming ? "Rea" : "A",
4658 escape_essid(priv->essid, priv->essid_len),
4660 ipw_modes[priv->assoc_request.ieee_mode],
4662 priv->capability & CAP_PRIVACY_ON ? "on " : "off",
4663 priv->capability & CAP_PRIVACY_ON ?
4664 (priv->capability & CAP_SHARED_KEY ? "(shared)" :
4666 priv->capability & CAP_PRIVACY_ON ? " key=" : "",
4667 priv->capability & CAP_PRIVACY_ON ?
4668 '1' + priv->sec.active_key : '.',
4669 priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
4671 priv->assoc_request.beacon_interval = network->beacon_interval;
4672 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
4673 (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
4674 priv->assoc_request.assoc_type = HC_IBSS_START;
4675 priv->assoc_request.assoc_tsf_msw = 0;
4676 priv->assoc_request.assoc_tsf_lsw = 0;
4678 if (unlikely(roaming))
4679 priv->assoc_request.assoc_type = HC_REASSOCIATE;
4681 priv->assoc_request.assoc_type = HC_ASSOCIATE;
4682 priv->assoc_request.assoc_tsf_msw = network->time_stamp[1];
4683 priv->assoc_request.assoc_tsf_lsw = network->time_stamp[0];
4686 memcpy(&priv->assoc_request.bssid, network->bssid, ETH_ALEN);
4688 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
4689 memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
4690 priv->assoc_request.atim_window = network->atim_window;
4692 memcpy(&priv->assoc_request.dest, network->bssid, ETH_ALEN);
4693 priv->assoc_request.atim_window = 0;
4696 priv->assoc_request.capability = network->capability;
4697 priv->assoc_request.listen_interval = network->listen_interval;
4699 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
4701 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
4705 rates->ieee_mode = priv->assoc_request.ieee_mode;
4706 rates->purpose = IPW_RATE_CONNECT;
4707 ipw_send_supported_rates(priv, rates);
4709 if (priv->assoc_request.ieee_mode == IPW_G_MODE)
4710 priv->sys_config.dot11g_auto_detection = 1;
4712 priv->sys_config.dot11g_auto_detection = 0;
4713 err = ipw_send_system_config(priv, &priv->sys_config);
4715 IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
4719 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
4720 err = ipw_set_sensitivity(priv, network->stats.rssi);
4722 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
4727 * If preemption is enabled, it is possible for the association
4728 * to complete before we return from ipw_send_associate. Therefore
4729 * we have to be sure and update our priviate data first.
4731 priv->channel = network->channel;
4732 memcpy(priv->bssid, network->bssid, ETH_ALEN);
4733 priv->status |= STATUS_ASSOCIATING;
4734 priv->status &= ~STATUS_SECURITY_UPDATED;
4736 priv->assoc_network = network;
4738 err = ipw_send_associate(priv, &priv->assoc_request);
4740 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
4744 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' " MAC_FMT " \n",
4745 escape_essid(priv->essid, priv->essid_len),
4746 MAC_ARG(priv->bssid));
4751 static void ipw_roam(void *data)
4753 struct ipw_priv *priv = data;
4754 struct ieee80211_network *network = NULL;
4755 struct ipw_network_match match = {
4756 .network = priv->assoc_network
4759 /* The roaming process is as follows:
4761 * 1. Missed beacon threshold triggers the roaming process by
4762 * setting the status ROAM bit and requesting a scan.
4763 * 2. When the scan completes, it schedules the ROAM work
4764 * 3. The ROAM work looks at all of the known networks for one that
4765 * is a better network than the currently associated. If none
4766 * found, the ROAM process is over (ROAM bit cleared)
4767 * 4. If a better network is found, a disassociation request is
4769 * 5. When the disassociation completes, the roam work is again
4770 * scheduled. The second time through, the driver is no longer
4771 * associated, and the newly selected network is sent an
4772 * association request.
4773 * 6. At this point ,the roaming process is complete and the ROAM
4774 * status bit is cleared.
4777 /* If we are no longer associated, and the roaming bit is no longer
4778 * set, then we are not actively roaming, so just return */
4779 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
4782 if (priv->status & STATUS_ASSOCIATED) {
4783 /* First pass through ROAM process -- look for a better
4785 u8 rssi = priv->assoc_network->stats.rssi;
4786 priv->assoc_network->stats.rssi = -128;
4787 list_for_each_entry(network, &priv->ieee->network_list, list) {
4788 if (network != priv->assoc_network)
4789 ipw_best_network(priv, &match, network, 1);
4791 priv->assoc_network->stats.rssi = rssi;
4793 if (match.network == priv->assoc_network) {
4794 IPW_DEBUG_ASSOC("No better APs in this network to "
4796 priv->status &= ~STATUS_ROAMING;
4797 ipw_debug_config(priv);
4801 ipw_send_disassociate(priv, 1);
4802 priv->assoc_network = match.network;
4807 /* Second pass through ROAM process -- request association */
4808 ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
4809 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
4810 priv->status &= ~STATUS_ROAMING;
4813 static void ipw_associate(void *data)
4815 struct ipw_priv *priv = data;
4817 struct ieee80211_network *network = NULL;
4818 struct ipw_network_match match = {
4821 struct ipw_supported_rates *rates;
4822 struct list_head *element;
4824 if (!(priv->config & CFG_ASSOCIATE) &&
4825 !(priv->config & (CFG_STATIC_ESSID |
4826 CFG_STATIC_CHANNEL | CFG_STATIC_BSSID))) {
4827 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
4831 list_for_each_entry(network, &priv->ieee->network_list, list)
4832 ipw_best_network(priv, &match, network, 0);
4834 network = match.network;
4835 rates = &match.rates;
4837 if (network == NULL &&
4838 priv->ieee->iw_mode == IW_MODE_ADHOC &&
4839 priv->config & CFG_ADHOC_CREATE &&
4840 priv->config & CFG_STATIC_ESSID &&
4841 !list_empty(&priv->ieee->network_free_list)) {
4842 element = priv->ieee->network_free_list.next;
4843 network = list_entry(element, struct ieee80211_network, list);
4844 ipw_adhoc_create(priv, network);
4845 rates = &priv->rates;
4847 list_add_tail(&network->list, &priv->ieee->network_list);
4850 /* If we reached the end of the list, then we don't have any valid
4853 ipw_debug_config(priv);
4855 queue_delayed_work(priv->workqueue, &priv->request_scan,
4861 ipw_associate_network(priv, network, rates, 0);
4864 static inline void ipw_handle_data_packet(struct ipw_priv *priv,
4865 struct ipw_rx_mem_buffer *rxb,
4866 struct ieee80211_rx_stats *stats)
4868 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
4870 /* We received data from the HW, so stop the watchdog */
4871 priv->net_dev->trans_start = jiffies;
4873 /* We only process data packets if the
4874 * interface is open */
4875 if (unlikely((pkt->u.frame.length + IPW_RX_FRAME_SIZE) >
4876 skb_tailroom(rxb->skb))) {
4877 priv->ieee->stats.rx_errors++;
4878 priv->wstats.discard.misc++;
4879 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
4881 } else if (unlikely(!netif_running(priv->net_dev))) {
4882 priv->ieee->stats.rx_dropped++;
4883 priv->wstats.discard.misc++;
4884 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
4888 /* Advance skb->data to the start of the actual payload */
4889 skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
4891 /* Set the size of the skb to the size of the frame */
4892 skb_put(rxb->skb, pkt->u.frame.length);
4894 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
4896 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
4897 priv->ieee->stats.rx_errors++;
4898 else /* ieee80211_rx succeeded, so it now owns the SKB */
4903 * Main entry function for recieving a packet with 80211 headers. This
4904 * should be called when ever the FW has notified us that there is a new
4905 * skb in the recieve queue.
4907 static void ipw_rx(struct ipw_priv *priv)
4909 struct ipw_rx_mem_buffer *rxb;
4910 struct ipw_rx_packet *pkt;
4911 struct ieee80211_hdr_4addr *header;
4915 r = ipw_read32(priv, CX2_RX_READ_INDEX);
4916 w = ipw_read32(priv, CX2_RX_WRITE_INDEX);
4917 i = (priv->rxq->processed + 1) % RX_QUEUE_SIZE;
4920 rxb = priv->rxq->queue[i];
4921 #ifdef CONFIG_IPW_DEBUG
4922 if (unlikely(rxb == NULL)) {
4923 printk(KERN_CRIT "Queue not allocated!\n");
4927 priv->rxq->queue[i] = NULL;
4929 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
4931 PCI_DMA_FROMDEVICE);
4933 pkt = (struct ipw_rx_packet *)rxb->skb->data;
4934 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
4935 pkt->header.message_type,
4936 pkt->header.rx_seq_num, pkt->header.control_bits);
4938 switch (pkt->header.message_type) {
4939 case RX_FRAME_TYPE: /* 802.11 frame */ {
4940 struct ieee80211_rx_stats stats = {
4941 .rssi = pkt->u.frame.rssi_dbm -
4943 .signal = pkt->u.frame.signal,
4944 .rate = pkt->u.frame.rate,
4945 .mac_time = jiffies,
4947 pkt->u.frame.received_channel,
4950 control & (1 << 0)) ?
4951 IEEE80211_24GHZ_BAND :
4952 IEEE80211_52GHZ_BAND,
4953 .len = pkt->u.frame.length,
4956 if (stats.rssi != 0)
4957 stats.mask |= IEEE80211_STATMASK_RSSI;
4958 if (stats.signal != 0)
4959 stats.mask |= IEEE80211_STATMASK_SIGNAL;
4960 if (stats.rate != 0)
4961 stats.mask |= IEEE80211_STATMASK_RATE;
4965 #ifdef CONFIG_IPW_PROMISC
4966 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4967 ipw_handle_data_packet(priv, rxb,
4974 (struct ieee80211_hdr_4addr *)(rxb->skb->
4977 /* TODO: Check Ad-Hoc dest/source and make sure
4978 * that we are actually parsing these packets
4979 * correctly -- we should probably use the
4980 * frame control of the packet and disregard
4981 * the current iw_mode */
4982 switch (priv->ieee->iw_mode) {
4985 !memcmp(header->addr1,
4986 priv->net_dev->dev_addr,
4988 !memcmp(header->addr3,
4989 priv->bssid, ETH_ALEN) ||
4990 is_broadcast_ether_addr(header->
4992 || is_multicast_ether_addr(header->
4999 !memcmp(header->addr3,
5000 priv->bssid, ETH_ALEN) ||
5001 !memcmp(header->addr1,
5002 priv->net_dev->dev_addr,
5004 is_broadcast_ether_addr(header->
5006 || is_multicast_ether_addr(header->
5011 if (network_packet && priv->assoc_network) {
5012 priv->assoc_network->stats.rssi =
5014 average_add(&priv->average_rssi,
5016 priv->last_rx_rssi = stats.rssi;
5019 IPW_DEBUG_RX("Frame: len=%u\n",
5020 pkt->u.frame.length);
5022 if (pkt->u.frame.length < frame_hdr_len(header)) {
5024 ("Received packet is too small. "
5026 priv->ieee->stats.rx_errors++;
5027 priv->wstats.discard.misc++;
5031 switch (WLAN_FC_GET_TYPE(header->frame_ctl)) {
5032 case IEEE80211_FTYPE_MGMT:
5033 ieee80211_rx_mgt(priv->ieee, header,
5035 if (priv->ieee->iw_mode == IW_MODE_ADHOC
5038 (header->frame_ctl) ==
5039 IEEE80211_STYPE_PROBE_RESP)
5042 (header->frame_ctl) ==
5043 IEEE80211_STYPE_BEACON))
5044 && !memcmp(header->addr3,
5045 priv->bssid, ETH_ALEN))
5046 ipw_add_station(priv,
5050 case IEEE80211_FTYPE_CTL:
5053 case IEEE80211_FTYPE_DATA:
5055 ipw_handle_data_packet(priv,
5059 IPW_DEBUG_DROP("Dropping: "
5074 case RX_HOST_NOTIFICATION_TYPE:{
5076 ("Notification: subtype=%02X flags=%02X size=%d\n",
5077 pkt->u.notification.subtype,
5078 pkt->u.notification.flags,
5079 pkt->u.notification.size);
5080 ipw_rx_notification(priv, &pkt->u.notification);
5085 IPW_DEBUG_RX("Bad Rx packet of type %d\n",
5086 pkt->header.message_type);
5090 /* For now we just don't re-use anything. We can tweak this
5091 * later to try and re-use notification packets and SKBs that
5092 * fail to Rx correctly */
5093 if (rxb->skb != NULL) {
5094 dev_kfree_skb_any(rxb->skb);
5098 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
5099 CX2_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5100 list_add_tail(&rxb->list, &priv->rxq->rx_used);
5102 i = (i + 1) % RX_QUEUE_SIZE;
5105 /* Backtrack one entry */
5106 priv->rxq->processed = (i ? i : RX_QUEUE_SIZE) - 1;
5108 ipw_rx_queue_restock(priv);
5111 static void ipw_abort_scan(struct ipw_priv *priv)
5115 if (priv->status & STATUS_SCAN_ABORTING) {
5116 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
5119 priv->status |= STATUS_SCAN_ABORTING;
5121 err = ipw_send_scan_abort(priv);
5123 IPW_DEBUG_HC("Request to abort scan failed.\n");
5126 static int ipw_request_scan(struct ipw_priv *priv)
5128 struct ipw_scan_request_ext scan;
5129 int channel_index = 0;
5130 int i, err, scan_type;
5132 if (priv->status & STATUS_EXIT_PENDING) {
5133 IPW_DEBUG_SCAN("Aborting scan due to device shutdown\n");
5134 priv->status |= STATUS_SCAN_PENDING;
5138 if (priv->status & STATUS_SCANNING) {
5139 IPW_DEBUG_HC("Concurrent scan requested. Aborting first.\n");
5140 priv->status |= STATUS_SCAN_PENDING;
5141 ipw_abort_scan(priv);
5145 if (priv->status & STATUS_SCAN_ABORTING) {
5146 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
5147 priv->status |= STATUS_SCAN_PENDING;
5151 if (priv->status & STATUS_RF_KILL_MASK) {
5152 IPW_DEBUG_HC("Aborting scan due to RF Kill activation\n");
5153 priv->status |= STATUS_SCAN_PENDING;
5157 memset(&scan, 0, sizeof(scan));
5159 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] = 20;
5160 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] = 20;
5161 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = 20;
5163 scan.full_scan_index = ieee80211_get_scans(priv->ieee);
5164 /* If we are roaming, then make this a directed scan for the current
5165 * network. Otherwise, ensure that every other scan is a fast
5166 * channel hop scan */
5167 if ((priv->status & STATUS_ROAMING)
5168 || (!(priv->status & STATUS_ASSOCIATED)
5169 && (priv->config & CFG_STATIC_ESSID)
5170 && (scan.full_scan_index % 2))) {
5171 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
5173 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
5177 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
5179 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
5182 if (priv->ieee->freq_band & IEEE80211_52GHZ_BAND) {
5183 int start = channel_index;
5184 for (i = 0; i < MAX_A_CHANNELS; i++) {
5185 if (band_a_active_channel[i] == 0)
5187 if ((priv->status & STATUS_ASSOCIATED) &&
5188 band_a_active_channel[i] == priv->channel)
5191 scan.channels_list[channel_index] =
5192 band_a_active_channel[i];
5193 ipw_set_scan_type(&scan, channel_index, scan_type);
5196 if (start != channel_index) {
5197 scan.channels_list[start] = (u8) (IPW_A_MODE << 6) |
5198 (channel_index - start);
5203 if (priv->ieee->freq_band & IEEE80211_24GHZ_BAND) {
5204 int start = channel_index;
5205 for (i = 0; i < MAX_B_CHANNELS; i++) {
5206 if (band_b_active_channel[i] == 0)
5208 if ((priv->status & STATUS_ASSOCIATED) &&
5209 band_b_active_channel[i] == priv->channel)
5212 scan.channels_list[channel_index] =
5213 band_b_active_channel[i];
5214 ipw_set_scan_type(&scan, channel_index, scan_type);
5217 if (start != channel_index) {
5218 scan.channels_list[start] = (u8) (IPW_B_MODE << 6) |
5219 (channel_index - start);
5223 err = ipw_send_scan_request_ext(priv, &scan);
5225 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
5229 priv->status |= STATUS_SCANNING;
5230 priv->status &= ~STATUS_SCAN_PENDING;
5236 * This file defines the Wireless Extension handlers. It does not
5237 * define any methods of hardware manipulation and relies on the
5238 * functions defined in ipw_main to provide the HW interaction.
5240 * The exception to this is the use of the ipw_get_ordinal()
5241 * function used to poll the hardware vs. making unecessary calls.
5245 static int ipw_wx_get_name(struct net_device *dev,
5246 struct iw_request_info *info,
5247 union iwreq_data *wrqu, char *extra)
5249 struct ipw_priv *priv = ieee80211_priv(dev);
5250 if (!(priv->status & STATUS_ASSOCIATED))
5251 strcpy(wrqu->name, "unassociated");
5253 snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c",
5254 ipw_modes[priv->assoc_request.ieee_mode]);
5255 IPW_DEBUG_WX("Name: %s\n", wrqu->name);
5259 static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
5262 IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
5263 priv->config &= ~CFG_STATIC_CHANNEL;
5264 if (!(priv->status & (STATUS_SCANNING | STATUS_ASSOCIATED |
5265 STATUS_ASSOCIATING))) {
5266 IPW_DEBUG_ASSOC("Attempting to associate with new "
5268 ipw_associate(priv);
5274 priv->config |= CFG_STATIC_CHANNEL;
5276 if (priv->channel == channel) {
5277 IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
5282 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
5283 priv->channel = channel;
5285 /* If we are currently associated, or trying to associate
5286 * then see if this is a new channel (causing us to disassociate) */
5287 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
5288 IPW_DEBUG_ASSOC("Disassociating due to channel change.\n");
5289 ipw_disassociate(priv);
5291 ipw_associate(priv);
5297 static int ipw_wx_set_freq(struct net_device *dev,
5298 struct iw_request_info *info,
5299 union iwreq_data *wrqu, char *extra)
5301 struct ipw_priv *priv = ieee80211_priv(dev);
5302 struct iw_freq *fwrq = &wrqu->freq;
5304 /* if setting by freq convert to channel */
5306 if ((fwrq->m >= (int)2.412e8 && fwrq->m <= (int)2.487e8)) {
5307 int f = fwrq->m / 100000;
5310 while ((c < REG_MAX_CHANNEL) &&
5311 (f != ipw_frequencies[c]))
5314 /* hack to fall through */
5320 if (fwrq->e > 0 || fwrq->m > 1000)
5323 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
5324 return ipw_set_channel(priv, (u8) fwrq->m);
5327 static int ipw_wx_get_freq(struct net_device *dev,
5328 struct iw_request_info *info,
5329 union iwreq_data *wrqu, char *extra)
5331 struct ipw_priv *priv = ieee80211_priv(dev);
5335 /* If we are associated, trying to associate, or have a statically
5336 * configured CHANNEL then return that; otherwise return ANY */
5337 if (priv->config & CFG_STATIC_CHANNEL ||
5338 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED))
5339 wrqu->freq.m = priv->channel;
5343 IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
5347 static int ipw_wx_set_mode(struct net_device *dev,
5348 struct iw_request_info *info,
5349 union iwreq_data *wrqu, char *extra)
5351 struct ipw_priv *priv = ieee80211_priv(dev);
5354 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
5356 if (wrqu->mode == priv->ieee->iw_mode)
5359 switch (wrqu->mode) {
5360 #ifdef CONFIG_IPW_PROMISC
5361 case IW_MODE_MONITOR:
5367 wrqu->mode = IW_MODE_INFRA;
5373 #ifdef CONFIG_IPW_PROMISC
5374 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
5375 priv->net_dev->type = ARPHRD_ETHER;
5377 if (wrqu->mode == IW_MODE_MONITOR)
5378 priv->net_dev->type = ARPHRD_IEEE80211;
5379 #endif /* CONFIG_IPW_PROMISC */
5382 /* Free the existing firmware and reset the fw_loaded
5383 * flag so ipw_load() will bring in the new firmawre */
5388 release_firmware(bootfw);
5389 release_firmware(ucode);
5390 release_firmware(firmware);
5391 bootfw = ucode = firmware = NULL;
5394 priv->ieee->iw_mode = wrqu->mode;
5395 ipw_adapter_restart(priv);
5400 static int ipw_wx_get_mode(struct net_device *dev,
5401 struct iw_request_info *info,
5402 union iwreq_data *wrqu, char *extra)
5404 struct ipw_priv *priv = ieee80211_priv(dev);
5406 wrqu->mode = priv->ieee->iw_mode;
5407 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
5412 #define DEFAULT_RTS_THRESHOLD 2304U
5413 #define MIN_RTS_THRESHOLD 1U
5414 #define MAX_RTS_THRESHOLD 2304U
5415 #define DEFAULT_BEACON_INTERVAL 100U
5416 #define DEFAULT_SHORT_RETRY_LIMIT 7U
5417 #define DEFAULT_LONG_RETRY_LIMIT 4U
5419 /* Values are in microsecond */
5420 static const s32 timeout_duration[] = {
5428 static const s32 period_duration[] = {
5436 static int ipw_wx_get_range(struct net_device *dev,
5437 struct iw_request_info *info,
5438 union iwreq_data *wrqu, char *extra)
5440 struct ipw_priv *priv = ieee80211_priv(dev);
5441 struct iw_range *range = (struct iw_range *)extra;
5445 wrqu->data.length = sizeof(*range);
5446 memset(range, 0, sizeof(*range));
5448 /* 54Mbs == ~27 Mb/s real (802.11g) */
5449 range->throughput = 27 * 1000 * 1000;
5451 range->max_qual.qual = 100;
5452 /* TODO: Find real max RSSI and stick here */
5453 range->max_qual.level = 0;
5454 range->max_qual.noise = 0;
5455 range->max_qual.updated = 7; /* Updated all three */
5457 range->avg_qual.qual = 70;
5458 /* TODO: Find real 'good' to 'bad' threshol value for RSSI */
5459 range->avg_qual.level = 0; /* FIXME to real average level */
5460 range->avg_qual.noise = 0;
5461 range->avg_qual.updated = 7; /* Updated all three */
5463 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
5465 for (i = 0; i < range->num_bitrates; i++)
5466 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
5469 range->max_rts = DEFAULT_RTS_THRESHOLD;
5470 range->min_frag = MIN_FRAG_THRESHOLD;
5471 range->max_frag = MAX_FRAG_THRESHOLD;
5473 range->encoding_size[0] = 5;
5474 range->encoding_size[1] = 13;
5475 range->num_encoding_sizes = 2;
5476 range->max_encoding_tokens = WEP_KEYS;
5478 /* Set the Wireless Extension versions */
5479 range->we_version_compiled = WIRELESS_EXT;
5480 range->we_version_source = 16;
5482 range->num_channels = FREQ_COUNT;
5485 for (i = 0; i < FREQ_COUNT; i++) {
5486 range->freq[val].i = i + 1;
5487 range->freq[val].m = ipw_frequencies[i] * 100000;
5488 range->freq[val].e = 1;
5491 if (val == IW_MAX_FREQUENCIES)
5494 range->num_frequency = val;
5496 IPW_DEBUG_WX("GET Range\n");
5500 static int ipw_wx_set_wap(struct net_device *dev,
5501 struct iw_request_info *info,
5502 union iwreq_data *wrqu, char *extra)
5504 struct ipw_priv *priv = ieee80211_priv(dev);
5506 static const unsigned char any[] = {
5507 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
5509 static const unsigned char off[] = {
5510 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
5513 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
5516 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
5517 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
5518 /* we disable mandatory BSSID association */
5519 IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
5520 priv->config &= ~CFG_STATIC_BSSID;
5521 if (!(priv->status & (STATUS_SCANNING | STATUS_ASSOCIATED |
5522 STATUS_ASSOCIATING))) {
5523 IPW_DEBUG_ASSOC("Attempting to associate with new "
5525 ipw_associate(priv);
5531 priv->config |= CFG_STATIC_BSSID;
5532 if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
5533 IPW_DEBUG_WX("BSSID set to current BSSID.\n");
5537 IPW_DEBUG_WX("Setting mandatory BSSID to " MAC_FMT "\n",
5538 MAC_ARG(wrqu->ap_addr.sa_data));
5540 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
5542 /* If we are currently associated, or trying to associate
5543 * then see if this is a new BSSID (causing us to disassociate) */
5544 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
5545 IPW_DEBUG_ASSOC("Disassociating due to BSSID change.\n");
5546 ipw_disassociate(priv);
5548 ipw_associate(priv);
5554 static int ipw_wx_get_wap(struct net_device *dev,
5555 struct iw_request_info *info,
5556 union iwreq_data *wrqu, char *extra)
5558 struct ipw_priv *priv = ieee80211_priv(dev);
5559 /* If we are associated, trying to associate, or have a statically
5560 * configured BSSID then return that; otherwise return ANY */
5561 if (priv->config & CFG_STATIC_BSSID ||
5562 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
5563 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
5564 memcpy(wrqu->ap_addr.sa_data, &priv->bssid, ETH_ALEN);
5566 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
5568 IPW_DEBUG_WX("Getting WAP BSSID: " MAC_FMT "\n",
5569 MAC_ARG(wrqu->ap_addr.sa_data));
5573 static int ipw_wx_set_essid(struct net_device *dev,
5574 struct iw_request_info *info,
5575 union iwreq_data *wrqu, char *extra)
5577 struct ipw_priv *priv = ieee80211_priv(dev);
5578 char *essid = ""; /* ANY */
5581 if (wrqu->essid.flags && wrqu->essid.length) {
5582 length = wrqu->essid.length - 1;
5586 IPW_DEBUG_WX("Setting ESSID to ANY\n");
5587 priv->config &= ~CFG_STATIC_ESSID;
5588 if (!(priv->status & (STATUS_SCANNING | STATUS_ASSOCIATED |
5589 STATUS_ASSOCIATING))) {
5590 IPW_DEBUG_ASSOC("Attempting to associate with new "
5592 ipw_associate(priv);
5598 length = min(length, IW_ESSID_MAX_SIZE);
5600 priv->config |= CFG_STATIC_ESSID;
5602 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)) {
5603 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
5607 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", escape_essid(essid, length),
5610 priv->essid_len = length;
5611 memcpy(priv->essid, essid, priv->essid_len);
5613 /* If we are currently associated, or trying to associate
5614 * then see if this is a new ESSID (causing us to disassociate) */
5615 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
5616 IPW_DEBUG_ASSOC("Disassociating due to ESSID change.\n");
5617 ipw_disassociate(priv);
5619 ipw_associate(priv);
5625 static int ipw_wx_get_essid(struct net_device *dev,
5626 struct iw_request_info *info,
5627 union iwreq_data *wrqu, char *extra)
5629 struct ipw_priv *priv = ieee80211_priv(dev);
5631 /* If we are associated, trying to associate, or have a statically
5632 * configured ESSID then return that; otherwise return ANY */
5633 if (priv->config & CFG_STATIC_ESSID ||
5634 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
5635 IPW_DEBUG_WX("Getting essid: '%s'\n",
5636 escape_essid(priv->essid, priv->essid_len));
5637 memcpy(extra, priv->essid, priv->essid_len);
5638 wrqu->essid.length = priv->essid_len;
5639 wrqu->essid.flags = 1; /* active */
5641 IPW_DEBUG_WX("Getting essid: ANY\n");
5642 wrqu->essid.length = 0;
5643 wrqu->essid.flags = 0; /* active */
5649 static int ipw_wx_set_nick(struct net_device *dev,
5650 struct iw_request_info *info,
5651 union iwreq_data *wrqu, char *extra)
5653 struct ipw_priv *priv = ieee80211_priv(dev);
5655 IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
5656 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
5659 wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
5660 memset(priv->nick, 0, sizeof(priv->nick));
5661 memcpy(priv->nick, extra, wrqu->data.length);
5662 IPW_DEBUG_TRACE("<<\n");
5667 static int ipw_wx_get_nick(struct net_device *dev,
5668 struct iw_request_info *info,
5669 union iwreq_data *wrqu, char *extra)
5671 struct ipw_priv *priv = ieee80211_priv(dev);
5672 IPW_DEBUG_WX("Getting nick\n");
5673 wrqu->data.length = strlen(priv->nick) + 1;
5674 memcpy(extra, priv->nick, wrqu->data.length);
5675 wrqu->data.flags = 1; /* active */
5679 static int ipw_wx_set_rate(struct net_device *dev,
5680 struct iw_request_info *info,
5681 union iwreq_data *wrqu, char *extra)
5683 IPW_DEBUG_WX("0x%p, 0x%p, 0x%p\n", dev, info, wrqu);
5687 static int ipw_wx_get_rate(struct net_device *dev,
5688 struct iw_request_info *info,
5689 union iwreq_data *wrqu, char *extra)
5691 struct ipw_priv *priv = ieee80211_priv(dev);
5692 wrqu->bitrate.value = priv->last_rate;
5694 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
5698 static int ipw_wx_set_rts(struct net_device *dev,
5699 struct iw_request_info *info,
5700 union iwreq_data *wrqu, char *extra)
5702 struct ipw_priv *priv = ieee80211_priv(dev);
5704 if (wrqu->rts.disabled)
5705 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
5707 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
5708 wrqu->rts.value > MAX_RTS_THRESHOLD)
5711 priv->rts_threshold = wrqu->rts.value;
5714 ipw_send_rts_threshold(priv, priv->rts_threshold);
5715 IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold);
5719 static int ipw_wx_get_rts(struct net_device *dev,
5720 struct iw_request_info *info,
5721 union iwreq_data *wrqu, char *extra)
5723 struct ipw_priv *priv = ieee80211_priv(dev);
5724 wrqu->rts.value = priv->rts_threshold;
5725 wrqu->rts.fixed = 0; /* no auto select */
5726 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
5728 IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value);
5732 static int ipw_wx_set_txpow(struct net_device *dev,
5733 struct iw_request_info *info,
5734 union iwreq_data *wrqu, char *extra)
5736 struct ipw_priv *priv = ieee80211_priv(dev);
5737 struct ipw_tx_power tx_power;
5740 if (ipw_radio_kill_sw(priv, wrqu->power.disabled))
5741 return -EINPROGRESS;
5743 if (wrqu->power.flags != IW_TXPOW_DBM)
5746 if ((wrqu->power.value > 20) || (wrqu->power.value < -12))
5749 priv->tx_power = wrqu->power.value;
5751 memset(&tx_power, 0, sizeof(tx_power));
5753 /* configure device for 'G' band */
5754 tx_power.ieee_mode = IPW_G_MODE;
5755 tx_power.num_channels = 11;
5756 for (i = 0; i < 11; i++) {
5757 tx_power.channels_tx_power[i].channel_number = i + 1;
5758 tx_power.channels_tx_power[i].tx_power = priv->tx_power;
5760 if (ipw_send_tx_power(priv, &tx_power))
5763 /* configure device to also handle 'B' band */
5764 tx_power.ieee_mode = IPW_B_MODE;
5765 if (ipw_send_tx_power(priv, &tx_power))
5774 static int ipw_wx_get_txpow(struct net_device *dev,
5775 struct iw_request_info *info,
5776 union iwreq_data *wrqu, char *extra)
5778 struct ipw_priv *priv = ieee80211_priv(dev);
5780 wrqu->power.value = priv->tx_power;
5781 wrqu->power.fixed = 1;
5782 wrqu->power.flags = IW_TXPOW_DBM;
5783 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
5785 IPW_DEBUG_WX("GET TX Power -> %s %d \n",
5786 wrqu->power.disabled ? "ON" : "OFF", wrqu->power.value);
5791 static int ipw_wx_set_frag(struct net_device *dev,
5792 struct iw_request_info *info,
5793 union iwreq_data *wrqu, char *extra)
5795 struct ipw_priv *priv = ieee80211_priv(dev);
5797 if (wrqu->frag.disabled)
5798 priv->ieee->fts = DEFAULT_FTS;
5800 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
5801 wrqu->frag.value > MAX_FRAG_THRESHOLD)
5804 priv->ieee->fts = wrqu->frag.value & ~0x1;
5807 ipw_send_frag_threshold(priv, wrqu->frag.value);
5808 IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value);
5812 static int ipw_wx_get_frag(struct net_device *dev,
5813 struct iw_request_info *info,
5814 union iwreq_data *wrqu, char *extra)
5816 struct ipw_priv *priv = ieee80211_priv(dev);
5817 wrqu->frag.value = priv->ieee->fts;
5818 wrqu->frag.fixed = 0; /* no auto select */
5819 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
5821 IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
5826 static int ipw_wx_set_retry(struct net_device *dev,
5827 struct iw_request_info *info,
5828 union iwreq_data *wrqu, char *extra)
5830 IPW_DEBUG_WX("0x%p, 0x%p, 0x%p\n", dev, info, wrqu);
5834 static int ipw_wx_get_retry(struct net_device *dev,
5835 struct iw_request_info *info,
5836 union iwreq_data *wrqu, char *extra)
5838 IPW_DEBUG_WX("0x%p, 0x%p, 0x%p\n", dev, info, wrqu);
5842 static int ipw_wx_set_scan(struct net_device *dev,
5843 struct iw_request_info *info,
5844 union iwreq_data *wrqu, char *extra)
5846 struct ipw_priv *priv = ieee80211_priv(dev);
5847 IPW_DEBUG_WX("Start scan\n");
5848 if (ipw_request_scan(priv))
5853 static int ipw_wx_get_scan(struct net_device *dev,
5854 struct iw_request_info *info,
5855 union iwreq_data *wrqu, char *extra)
5857 struct ipw_priv *priv = ieee80211_priv(dev);
5858 return ieee80211_wx_get_scan(priv->ieee, info, wrqu, extra);
5861 static int ipw_wx_set_encode(struct net_device *dev,
5862 struct iw_request_info *info,
5863 union iwreq_data *wrqu, char *key)
5865 struct ipw_priv *priv = ieee80211_priv(dev);
5866 return ieee80211_wx_set_encode(priv->ieee, info, wrqu, key);
5869 static int ipw_wx_get_encode(struct net_device *dev,
5870 struct iw_request_info *info,
5871 union iwreq_data *wrqu, char *key)
5873 struct ipw_priv *priv = ieee80211_priv(dev);
5874 return ieee80211_wx_get_encode(priv->ieee, info, wrqu, key);
5877 static int ipw_wx_set_power(struct net_device *dev,
5878 struct iw_request_info *info,
5879 union iwreq_data *wrqu, char *extra)
5881 struct ipw_priv *priv = ieee80211_priv(dev);
5884 if (wrqu->power.disabled) {
5885 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
5886 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
5888 IPW_DEBUG_WX("failed setting power mode.\n");
5892 IPW_DEBUG_WX("SET Power Management Mode -> off\n");
5897 switch (wrqu->power.flags & IW_POWER_MODE) {
5898 case IW_POWER_ON: /* If not specified */
5899 case IW_POWER_MODE: /* If set all mask */
5900 case IW_POWER_ALL_R: /* If explicitely state all */
5902 default: /* Otherwise we don't support it */
5903 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
5908 /* If the user hasn't specified a power management mode yet, default
5910 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
5911 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
5913 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
5914 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
5916 IPW_DEBUG_WX("failed setting power mode.\n");
5920 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
5925 static int ipw_wx_get_power(struct net_device *dev,
5926 struct iw_request_info *info,
5927 union iwreq_data *wrqu, char *extra)
5929 struct ipw_priv *priv = ieee80211_priv(dev);
5931 if (!(priv->power_mode & IPW_POWER_ENABLED)) {
5932 wrqu->power.disabled = 1;
5934 wrqu->power.disabled = 0;
5937 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
5942 static int ipw_wx_set_powermode(struct net_device *dev,
5943 struct iw_request_info *info,
5944 union iwreq_data *wrqu, char *extra)
5946 struct ipw_priv *priv = ieee80211_priv(dev);
5947 int mode = *(int *)extra;
5950 if ((mode < 1) || (mode > IPW_POWER_LIMIT)) {
5951 mode = IPW_POWER_AC;
5952 priv->power_mode = mode;
5954 priv->power_mode = IPW_POWER_ENABLED | mode;
5957 if (priv->power_mode != mode) {
5958 err = ipw_send_power_mode(priv, mode);
5961 IPW_DEBUG_WX("failed setting power mode.\n");
5969 #define MAX_WX_STRING 80
5970 static int ipw_wx_get_powermode(struct net_device *dev,
5971 struct iw_request_info *info,
5972 union iwreq_data *wrqu, char *extra)
5974 struct ipw_priv *priv = ieee80211_priv(dev);
5975 int level = IPW_POWER_LEVEL(priv->power_mode);
5978 p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
5982 p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
5984 case IPW_POWER_BATTERY:
5985 p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
5988 p += snprintf(p, MAX_WX_STRING - (p - extra),
5989 "(Timeout %dms, Period %dms)",
5990 timeout_duration[level - 1] / 1000,
5991 period_duration[level - 1] / 1000);
5994 if (!(priv->power_mode & IPW_POWER_ENABLED))
5995 p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
5997 wrqu->data.length = p - extra + 1;
6002 static int ipw_wx_set_wireless_mode(struct net_device *dev,
6003 struct iw_request_info *info,
6004 union iwreq_data *wrqu, char *extra)
6006 struct ipw_priv *priv = ieee80211_priv(dev);
6007 int mode = *(int *)extra;
6008 u8 band = 0, modulation = 0;
6010 if (mode == 0 || mode & ~IEEE_MODE_MASK) {
6011 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
6015 if (priv->adapter == IPW_2915ABG) {
6016 priv->ieee->abg_true = 1;
6017 if (mode & IEEE_A) {
6018 band |= IEEE80211_52GHZ_BAND;
6019 modulation |= IEEE80211_OFDM_MODULATION;
6021 priv->ieee->abg_true = 0;
6023 if (mode & IEEE_A) {
6024 IPW_WARNING("Attempt to set 2200BG into "
6029 priv->ieee->abg_true = 0;
6032 if (mode & IEEE_B) {
6033 band |= IEEE80211_24GHZ_BAND;
6034 modulation |= IEEE80211_CCK_MODULATION;
6036 priv->ieee->abg_true = 0;
6038 if (mode & IEEE_G) {
6039 band |= IEEE80211_24GHZ_BAND;
6040 modulation |= IEEE80211_OFDM_MODULATION;
6042 priv->ieee->abg_true = 0;
6044 priv->ieee->mode = mode;
6045 priv->ieee->freq_band = band;
6046 priv->ieee->modulation = modulation;
6047 init_supported_rates(priv, &priv->rates);
6049 /* If we are currently associated, or trying to associate
6050 * then see if this is a new configuration (causing us to
6052 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
6053 /* The resulting association will trigger
6054 * the new rates to be sent to the device */
6055 IPW_DEBUG_ASSOC("Disassociating due to mode change.\n");
6056 ipw_disassociate(priv);
6058 ipw_send_supported_rates(priv, &priv->rates);
6060 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
6061 mode & IEEE_A ? 'a' : '.',
6062 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
6066 static int ipw_wx_get_wireless_mode(struct net_device *dev,
6067 struct iw_request_info *info,
6068 union iwreq_data *wrqu, char *extra)
6070 struct ipw_priv *priv = ieee80211_priv(dev);
6072 switch (priv->ieee->freq_band) {
6073 case IEEE80211_24GHZ_BAND:
6074 switch (priv->ieee->modulation) {
6075 case IEEE80211_CCK_MODULATION:
6076 strncpy(extra, "802.11b (2)", MAX_WX_STRING);
6078 case IEEE80211_OFDM_MODULATION:
6079 strncpy(extra, "802.11g (4)", MAX_WX_STRING);
6082 strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
6087 case IEEE80211_52GHZ_BAND:
6088 strncpy(extra, "802.11a (1)", MAX_WX_STRING);
6091 default: /* Mixed Band */
6092 switch (priv->ieee->modulation) {
6093 case IEEE80211_CCK_MODULATION:
6094 strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
6096 case IEEE80211_OFDM_MODULATION:
6097 strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
6100 strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
6106 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
6108 wrqu->data.length = strlen(extra) + 1;
6113 #ifdef CONFIG_IPW_PROMISC
6114 static int ipw_wx_set_promisc(struct net_device *dev,
6115 struct iw_request_info *info,
6116 union iwreq_data *wrqu, char *extra)
6118 struct ipw_priv *priv = ieee80211_priv(dev);
6119 int *parms = (int *)extra;
6120 int enable = (parms[0] > 0);
6122 IPW_DEBUG_WX("SET PROMISC: %d %d\n", enable, parms[1]);
6124 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
6125 priv->net_dev->type = ARPHRD_IEEE80211;
6126 ipw_adapter_restart(priv);
6129 ipw_set_channel(priv, parms[1]);
6131 if (priv->ieee->iw_mode != IW_MODE_MONITOR)
6133 priv->net_dev->type = ARPHRD_ETHER;
6134 ipw_adapter_restart(priv);
6139 static int ipw_wx_reset(struct net_device *dev,
6140 struct iw_request_info *info,
6141 union iwreq_data *wrqu, char *extra)
6143 struct ipw_priv *priv = ieee80211_priv(dev);
6144 IPW_DEBUG_WX("RESET\n");
6145 ipw_adapter_restart(priv);
6148 #endif // CONFIG_IPW_PROMISC
6150 /* Rebase the WE IOCTLs to zero for the handler array */
6151 #define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
6152 static iw_handler ipw_wx_handlers[] = {
6153 IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name,
6154 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
6155 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
6156 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
6157 IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode,
6158 IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range,
6159 IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap,
6160 IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap,
6161 IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan,
6162 IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan,
6163 IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid,
6164 IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid,
6165 IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick,
6166 IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick,
6167 IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate,
6168 IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate,
6169 IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts,
6170 IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts,
6171 IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag,
6172 IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag,
6173 IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow,
6174 IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow,
6175 IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry,
6176 IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry,
6177 IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode,
6178 IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode,
6179 IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power,
6180 IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power,
6183 #define IPW_PRIV_SET_POWER SIOCIWFIRSTPRIV
6184 #define IPW_PRIV_GET_POWER SIOCIWFIRSTPRIV+1
6185 #define IPW_PRIV_SET_MODE SIOCIWFIRSTPRIV+2
6186 #define IPW_PRIV_GET_MODE SIOCIWFIRSTPRIV+3
6187 #define IPW_PRIV_SET_PROMISC SIOCIWFIRSTPRIV+4
6188 #define IPW_PRIV_RESET SIOCIWFIRSTPRIV+5
6190 static struct iw_priv_args ipw_priv_args[] = {
6192 .cmd = IPW_PRIV_SET_POWER,
6193 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
6194 .name = "set_power"},
6196 .cmd = IPW_PRIV_GET_POWER,
6197 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
6198 .name = "get_power"},
6200 .cmd = IPW_PRIV_SET_MODE,
6201 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
6202 .name = "set_mode"},
6204 .cmd = IPW_PRIV_GET_MODE,
6205 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
6206 .name = "get_mode"},
6207 #ifdef CONFIG_IPW_PROMISC
6209 IPW_PRIV_SET_PROMISC,
6210 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
6213 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
6214 #endif /* CONFIG_IPW_PROMISC */
6217 static iw_handler ipw_priv_handler[] = {
6218 ipw_wx_set_powermode,
6219 ipw_wx_get_powermode,
6220 ipw_wx_set_wireless_mode,
6221 ipw_wx_get_wireless_mode,
6222 #ifdef CONFIG_IPW_PROMISC
6228 static struct iw_handler_def ipw_wx_handler_def = {
6229 .standard = ipw_wx_handlers,
6230 .num_standard = ARRAY_SIZE(ipw_wx_handlers),
6231 .num_private = ARRAY_SIZE(ipw_priv_handler),
6232 .num_private_args = ARRAY_SIZE(ipw_priv_args),
6233 .private = ipw_priv_handler,
6234 .private_args = ipw_priv_args,
6238 * Get wireless statistics.
6239 * Called by /proc/net/wireless
6240 * Also called by SIOCGIWSTATS
6242 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
6244 struct ipw_priv *priv = ieee80211_priv(dev);
6245 struct iw_statistics *wstats;
6247 wstats = &priv->wstats;
6249 /* if hw is disabled, then ipw2100_get_ordinal() can't be called.
6250 * ipw2100_wx_wireless_stats seems to be called before fw is
6251 * initialized. STATUS_ASSOCIATED will only be set if the hw is up
6252 * and associated; if not associcated, the values are all meaningless
6253 * anyway, so set them all to NULL and INVALID */
6254 if (!(priv->status & STATUS_ASSOCIATED)) {
6255 wstats->miss.beacon = 0;
6256 wstats->discard.retries = 0;
6257 wstats->qual.qual = 0;
6258 wstats->qual.level = 0;
6259 wstats->qual.noise = 0;
6260 wstats->qual.updated = 7;
6261 wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
6262 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
6266 wstats->qual.qual = priv->quality;
6267 wstats->qual.level = average_value(&priv->average_rssi);
6268 wstats->qual.noise = average_value(&priv->average_noise);
6269 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
6270 IW_QUAL_NOISE_UPDATED;
6272 wstats->miss.beacon = average_value(&priv->average_missed_beacons);
6273 wstats->discard.retries = priv->last_tx_failures;
6274 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
6276 /* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
6277 goto fail_get_ordinal;
6278 wstats->discard.retries += tx_retry; */
6283 /* net device stuff */
6285 static inline void init_sys_config(struct ipw_sys_config *sys_config)
6287 memset(sys_config, 0, sizeof(struct ipw_sys_config));
6288 sys_config->bt_coexistence = 1; /* We may need to look into prvStaBtConfig */
6289 sys_config->answer_broadcast_ssid_probe = 0;
6290 sys_config->accept_all_data_frames = 0;
6291 sys_config->accept_non_directed_frames = 1;
6292 sys_config->exclude_unicast_unencrypted = 0;
6293 sys_config->disable_unicast_decryption = 1;
6294 sys_config->exclude_multicast_unencrypted = 0;
6295 sys_config->disable_multicast_decryption = 1;
6296 sys_config->antenna_diversity = CFG_SYS_ANTENNA_BOTH;
6297 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
6298 sys_config->dot11g_auto_detection = 0;
6299 sys_config->enable_cts_to_self = 0;
6300 sys_config->bt_coexist_collision_thr = 0;
6301 sys_config->pass_noise_stats_to_host = 1;
6304 static int ipw_net_open(struct net_device *dev)
6306 struct ipw_priv *priv = ieee80211_priv(dev);
6307 IPW_DEBUG_INFO("dev->open\n");
6308 /* we should be verifying the device is ready to be opened */
6309 if (!(priv->status & STATUS_RF_KILL_MASK) &&
6310 (priv->status & STATUS_ASSOCIATED))
6311 netif_start_queue(dev);
6315 static int ipw_net_stop(struct net_device *dev)
6317 IPW_DEBUG_INFO("dev->close\n");
6318 netif_stop_queue(dev);
6325 modify to send one tfd per fragment instead of using chunking. otherwise
6326 we need to heavily modify the ieee80211_skb_to_txb.
6329 static inline void ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb)
6331 struct ieee80211_hdr_3addr *hdr = (struct ieee80211_hdr_3addr *)
6332 txb->fragments[0]->data;
6334 struct tfd_frame *tfd;
6335 struct clx2_tx_queue *txq = &priv->txq[0];
6336 struct clx2_queue *q = &txq->q;
6337 u8 id, hdr_len, unicast;
6338 u16 remaining_bytes;
6340 switch (priv->ieee->iw_mode) {
6342 hdr_len = IEEE80211_3ADDR_LEN;
6343 unicast = !is_broadcast_ether_addr(hdr->addr1) &&
6344 !is_multicast_ether_addr(hdr->addr1);
6345 id = ipw_find_station(priv, hdr->addr1);
6346 if (id == IPW_INVALID_STATION) {
6347 id = ipw_add_station(priv, hdr->addr1);
6348 if (id == IPW_INVALID_STATION) {
6349 IPW_WARNING("Attempt to send data to "
6350 "invalid cell: " MAC_FMT "\n",
6351 MAC_ARG(hdr->addr1));
6359 unicast = !is_broadcast_ether_addr(hdr->addr3) &&
6360 !is_multicast_ether_addr(hdr->addr3);
6361 hdr_len = IEEE80211_3ADDR_LEN;
6366 tfd = &txq->bd[q->first_empty];
6367 txq->txb[q->first_empty] = txb;
6368 memset(tfd, 0, sizeof(*tfd));
6369 tfd->u.data.station_number = id;
6371 tfd->control_flags.message_type = TX_FRAME_TYPE;
6372 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
6374 tfd->u.data.cmd_id = DINO_CMD_TX;
6375 tfd->u.data.len = txb->payload_size;
6376 remaining_bytes = txb->payload_size;
6377 if (unlikely(!unicast))
6378 tfd->u.data.tx_flags = DCT_FLAG_NO_WEP;
6380 tfd->u.data.tx_flags = DCT_FLAG_NO_WEP | DCT_FLAG_ACK_REQD;
6382 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
6383 tfd->u.data.tx_flags_ext = DCT_FLAG_EXT_MODE_CCK;
6385 tfd->u.data.tx_flags_ext = DCT_FLAG_EXT_MODE_OFDM;
6387 if (priv->config & CFG_PREAMBLE)
6388 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREMBL;
6390 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
6393 tfd->u.data.num_chunks = min((u8) (NUM_TFD_CHUNKS - 2), txb->nr_frags);
6394 for (i = 0; i < tfd->u.data.num_chunks; i++) {
6395 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
6396 i, tfd->u.data.num_chunks,
6397 txb->fragments[i]->len - hdr_len);
6398 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
6399 txb->fragments[i]->len - hdr_len);
6401 tfd->u.data.chunk_ptr[i] =
6402 pci_map_single(priv->pci_dev,
6403 txb->fragments[i]->data + hdr_len,
6404 txb->fragments[i]->len - hdr_len,
6406 tfd->u.data.chunk_len[i] = txb->fragments[i]->len - hdr_len;
6409 if (i != txb->nr_frags) {
6410 struct sk_buff *skb;
6411 u16 remaining_bytes = 0;
6414 for (j = i; j < txb->nr_frags; j++)
6415 remaining_bytes += txb->fragments[j]->len - hdr_len;
6417 printk(KERN_INFO "Trying to reallocate for %d bytes\n",
6419 skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
6421 tfd->u.data.chunk_len[i] = remaining_bytes;
6422 for (j = i; j < txb->nr_frags; j++) {
6423 int size = txb->fragments[j]->len - hdr_len;
6424 printk(KERN_INFO "Adding frag %d %d...\n",
6426 memcpy(skb_put(skb, size),
6427 txb->fragments[j]->data + hdr_len, size);
6429 dev_kfree_skb_any(txb->fragments[i]);
6430 txb->fragments[i] = skb;
6431 tfd->u.data.chunk_ptr[i] =
6432 pci_map_single(priv->pci_dev, skb->data,
6433 tfd->u.data.chunk_len[i],
6435 tfd->u.data.num_chunks++;
6440 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
6441 ipw_write32(priv, q->reg_w, q->first_empty);
6443 if (ipw_queue_space(q) < q->high_mark)
6444 netif_stop_queue(priv->net_dev);
6449 IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
6450 ieee80211_txb_free(txb);
6453 static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
6454 struct net_device *dev, int pri)
6456 struct ipw_priv *priv = ieee80211_priv(dev);
6457 unsigned long flags;
6459 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
6461 spin_lock_irqsave(&priv->lock, flags);
6463 if (!(priv->status & STATUS_ASSOCIATED)) {
6464 IPW_DEBUG_INFO("Tx attempt while not associated.\n");
6465 priv->ieee->stats.tx_carrier_errors++;
6466 netif_stop_queue(dev);
6470 ipw_tx_skb(priv, txb);
6472 spin_unlock_irqrestore(&priv->lock, flags);
6476 spin_unlock_irqrestore(&priv->lock, flags);
6480 static struct net_device_stats *ipw_net_get_stats(struct net_device *dev)
6482 struct ipw_priv *priv = ieee80211_priv(dev);
6484 priv->ieee->stats.tx_packets = priv->tx_packets;
6485 priv->ieee->stats.rx_packets = priv->rx_packets;
6486 return &priv->ieee->stats;
6489 static void ipw_net_set_multicast_list(struct net_device *dev)
6494 static int ipw_net_set_mac_address(struct net_device *dev, void *p)
6496 struct ipw_priv *priv = ieee80211_priv(dev);
6497 struct sockaddr *addr = p;
6498 if (!is_valid_ether_addr(addr->sa_data))
6499 return -EADDRNOTAVAIL;
6500 priv->config |= CFG_CUSTOM_MAC;
6501 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
6502 printk(KERN_INFO "%s: Setting MAC to " MAC_FMT "\n",
6503 priv->net_dev->name, MAC_ARG(priv->mac_addr));
6504 ipw_adapter_restart(priv);
6508 static void ipw_ethtool_get_drvinfo(struct net_device *dev,
6509 struct ethtool_drvinfo *info)
6511 struct ipw_priv *p = ieee80211_priv(dev);
6516 strcpy(info->driver, DRV_NAME);
6517 strcpy(info->version, DRV_VERSION);
6520 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
6522 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
6524 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
6526 strcpy(info->bus_info, pci_name(p->pci_dev));
6527 info->eedump_len = CX2_EEPROM_IMAGE_SIZE;
6530 static u32 ipw_ethtool_get_link(struct net_device *dev)
6532 struct ipw_priv *priv = ieee80211_priv(dev);
6533 return (priv->status & STATUS_ASSOCIATED) != 0;
6536 static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
6538 return CX2_EEPROM_IMAGE_SIZE;
6541 static int ipw_ethtool_get_eeprom(struct net_device *dev,
6542 struct ethtool_eeprom *eeprom, u8 * bytes)
6544 struct ipw_priv *p = ieee80211_priv(dev);
6546 if (eeprom->offset + eeprom->len > CX2_EEPROM_IMAGE_SIZE)
6549 memcpy(bytes, &((u8 *) p->eeprom)[eeprom->offset], eeprom->len);
6553 static int ipw_ethtool_set_eeprom(struct net_device *dev,
6554 struct ethtool_eeprom *eeprom, u8 * bytes)
6556 struct ipw_priv *p = ieee80211_priv(dev);
6559 if (eeprom->offset + eeprom->len > CX2_EEPROM_IMAGE_SIZE)
6562 memcpy(&((u8 *) p->eeprom)[eeprom->offset], bytes, eeprom->len);
6563 for (i = IPW_EEPROM_DATA;
6564 i < IPW_EEPROM_DATA + CX2_EEPROM_IMAGE_SIZE; i++)
6565 ipw_write8(p, i, p->eeprom[i]);
6570 static struct ethtool_ops ipw_ethtool_ops = {
6571 .get_link = ipw_ethtool_get_link,
6572 .get_drvinfo = ipw_ethtool_get_drvinfo,
6573 .get_eeprom_len = ipw_ethtool_get_eeprom_len,
6574 .get_eeprom = ipw_ethtool_get_eeprom,
6575 .set_eeprom = ipw_ethtool_set_eeprom,
6578 static irqreturn_t ipw_isr(int irq, void *data, struct pt_regs *regs)
6580 struct ipw_priv *priv = data;
6581 u32 inta, inta_mask;
6586 spin_lock(&priv->lock);
6588 if (!(priv->status & STATUS_INT_ENABLED)) {
6593 inta = ipw_read32(priv, CX2_INTA_RW);
6594 inta_mask = ipw_read32(priv, CX2_INTA_MASK_R);
6596 if (inta == 0xFFFFFFFF) {
6597 /* Hardware disappeared */
6598 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
6602 if (!(inta & (CX2_INTA_MASK_ALL & inta_mask))) {
6603 /* Shared interrupt */
6607 /* tell the device to stop sending interrupts */
6608 ipw_disable_interrupts(priv);
6610 /* ack current interrupts */
6611 inta &= (CX2_INTA_MASK_ALL & inta_mask);
6612 ipw_write32(priv, CX2_INTA_RW, inta);
6614 /* Cache INTA value for our tasklet */
6615 priv->isr_inta = inta;
6617 tasklet_schedule(&priv->irq_tasklet);
6619 spin_unlock(&priv->lock);
6623 spin_unlock(&priv->lock);
6627 static void ipw_rf_kill(void *adapter)
6629 struct ipw_priv *priv = adapter;
6630 unsigned long flags;
6632 spin_lock_irqsave(&priv->lock, flags);
6634 if (rf_kill_active(priv)) {
6635 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
6636 if (priv->workqueue)
6637 queue_delayed_work(priv->workqueue,
6638 &priv->rf_kill, 2 * HZ);
6642 /* RF Kill is now disabled, so bring the device back up */
6644 if (!(priv->status & STATUS_RF_KILL_MASK)) {
6645 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
6648 /* we can not do an adapter restart while inside an irq lock */
6649 queue_work(priv->workqueue, &priv->adapter_restart);
6651 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
6655 spin_unlock_irqrestore(&priv->lock, flags);
6658 static int ipw_setup_deferred_work(struct ipw_priv *priv)
6662 priv->workqueue = create_workqueue(DRV_NAME);
6663 init_waitqueue_head(&priv->wait_command_queue);
6665 INIT_WORK(&priv->adhoc_check, ipw_adhoc_check, priv);
6666 INIT_WORK(&priv->associate, ipw_associate, priv);
6667 INIT_WORK(&priv->disassociate, ipw_disassociate, priv);
6668 INIT_WORK(&priv->rx_replenish, ipw_rx_queue_replenish, priv);
6669 INIT_WORK(&priv->adapter_restart, ipw_adapter_restart, priv);
6670 INIT_WORK(&priv->rf_kill, ipw_rf_kill, priv);
6671 INIT_WORK(&priv->up, (void (*)(void *))ipw_up, priv);
6672 INIT_WORK(&priv->down, (void (*)(void *))ipw_down, priv);
6673 INIT_WORK(&priv->request_scan,
6674 (void (*)(void *))ipw_request_scan, priv);
6675 INIT_WORK(&priv->gather_stats,
6676 (void (*)(void *))ipw_gather_stats, priv);
6677 INIT_WORK(&priv->abort_scan, (void (*)(void *))ipw_abort_scan, priv);
6678 INIT_WORK(&priv->roam, ipw_roam, priv);
6679 INIT_WORK(&priv->scan_check, ipw_scan_check, priv);
6681 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
6682 ipw_irq_tasklet, (unsigned long)priv);
6687 static void shim__set_security(struct net_device *dev,
6688 struct ieee80211_security *sec)
6690 struct ipw_priv *priv = ieee80211_priv(dev);
6693 for (i = 0; i < 4; i++) {
6694 if (sec->flags & (1 << i)) {
6695 priv->sec.key_sizes[i] = sec->key_sizes[i];
6696 if (sec->key_sizes[i] == 0)
6697 priv->sec.flags &= ~(1 << i);
6699 memcpy(priv->sec.keys[i], sec->keys[i],
6701 priv->sec.flags |= (1 << i);
6702 priv->status |= STATUS_SECURITY_UPDATED;
6706 if ((sec->flags & SEC_ACTIVE_KEY) &&
6707 priv->sec.active_key != sec->active_key) {
6708 if (sec->active_key <= 3) {
6709 priv->sec.active_key = sec->active_key;
6710 priv->sec.flags |= SEC_ACTIVE_KEY;
6712 priv->sec.flags &= ~SEC_ACTIVE_KEY;
6713 priv->status |= STATUS_SECURITY_UPDATED;
6716 if ((sec->flags & SEC_AUTH_MODE) &&
6717 (priv->sec.auth_mode != sec->auth_mode)) {
6718 priv->sec.auth_mode = sec->auth_mode;
6719 priv->sec.flags |= SEC_AUTH_MODE;
6720 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
6721 priv->capability |= CAP_SHARED_KEY;
6723 priv->capability &= ~CAP_SHARED_KEY;
6724 priv->status |= STATUS_SECURITY_UPDATED;
6727 if (sec->flags & SEC_ENABLED && priv->sec.enabled != sec->enabled) {
6728 priv->sec.flags |= SEC_ENABLED;
6729 priv->sec.enabled = sec->enabled;
6730 priv->status |= STATUS_SECURITY_UPDATED;
6732 priv->capability |= CAP_PRIVACY_ON;
6734 priv->capability &= ~CAP_PRIVACY_ON;
6737 if (sec->flags & SEC_LEVEL && priv->sec.level != sec->level) {
6738 priv->sec.level = sec->level;
6739 priv->sec.flags |= SEC_LEVEL;
6740 priv->status |= STATUS_SECURITY_UPDATED;
6743 /* To match current functionality of ipw2100 (which works well w/
6744 * various supplicants, we don't force a disassociate if the
6745 * privacy capability changes ... */
6747 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
6748 (((priv->assoc_request.capability &
6749 WLAN_CAPABILITY_PRIVACY) && !sec->enabled) ||
6750 (!(priv->assoc_request.capability &
6751 WLAN_CAPABILITY_PRIVACY) && sec->enabled))) {
6752 IPW_DEBUG_ASSOC("Disassociating due to capability "
6754 ipw_disassociate(priv);
6759 static int init_supported_rates(struct ipw_priv *priv,
6760 struct ipw_supported_rates *rates)
6762 /* TODO: Mask out rates based on priv->rates_mask */
6764 memset(rates, 0, sizeof(*rates));
6765 /* configure supported rates */
6766 switch (priv->ieee->freq_band) {
6767 case IEEE80211_52GHZ_BAND:
6768 rates->ieee_mode = IPW_A_MODE;
6769 rates->purpose = IPW_RATE_CAPABILITIES;
6770 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
6771 IEEE80211_OFDM_DEFAULT_RATES_MASK);
6774 default: /* Mixed or 2.4Ghz */
6775 rates->ieee_mode = IPW_G_MODE;
6776 rates->purpose = IPW_RATE_CAPABILITIES;
6777 ipw_add_cck_scan_rates(rates, IEEE80211_CCK_MODULATION,
6778 IEEE80211_CCK_DEFAULT_RATES_MASK);
6779 if (priv->ieee->modulation & IEEE80211_OFDM_MODULATION) {
6780 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
6781 IEEE80211_OFDM_DEFAULT_RATES_MASK);
6789 static int ipw_config(struct ipw_priv *priv)
6792 struct ipw_tx_power tx_power;
6794 memset(&priv->sys_config, 0, sizeof(priv->sys_config));
6795 memset(&tx_power, 0, sizeof(tx_power));
6797 /* This is only called from ipw_up, which resets/reloads the firmware
6798 so, we don't need to first disable the card before we configure
6801 /* configure device for 'G' band */
6802 tx_power.ieee_mode = IPW_G_MODE;
6803 tx_power.num_channels = 11;
6804 for (i = 0; i < 11; i++) {
6805 tx_power.channels_tx_power[i].channel_number = i + 1;
6806 tx_power.channels_tx_power[i].tx_power = priv->tx_power;
6808 if (ipw_send_tx_power(priv, &tx_power))
6811 /* configure device to also handle 'B' band */
6812 tx_power.ieee_mode = IPW_B_MODE;
6813 if (ipw_send_tx_power(priv, &tx_power))
6816 /* initialize adapter address */
6817 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
6820 /* set basic system config settings */
6821 init_sys_config(&priv->sys_config);
6822 if (ipw_send_system_config(priv, &priv->sys_config))
6825 init_supported_rates(priv, &priv->rates);
6826 if (ipw_send_supported_rates(priv, &priv->rates))
6829 /* Set request-to-send threshold */
6830 if (priv->rts_threshold) {
6831 if (ipw_send_rts_threshold(priv, priv->rts_threshold))
6835 if (ipw_set_random_seed(priv))
6838 /* final state transition to the RUN state */
6839 if (ipw_send_host_complete(priv))
6842 /* If configured to try and auto-associate, kick off a scan */
6843 if ((priv->config & CFG_ASSOCIATE) && ipw_request_scan(priv))
6852 #define MAX_HW_RESTARTS 5
6853 static int ipw_up(struct ipw_priv *priv)
6857 if (priv->status & STATUS_EXIT_PENDING)
6860 for (i = 0; i < MAX_HW_RESTARTS; i++) {
6861 /* Load the microcode, firmware, and eeprom.
6862 * Also start the clocks. */
6863 rc = ipw_load(priv);
6865 IPW_ERROR("Unable to load firmware: 0x%08X\n", rc);
6869 ipw_init_ordinals(priv);
6870 if (!(priv->config & CFG_CUSTOM_MAC))
6871 eeprom_parse_mac(priv, priv->mac_addr);
6872 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
6874 if (priv->status & STATUS_RF_KILL_MASK)
6877 rc = ipw_config(priv);
6879 IPW_DEBUG_INFO("Configured device on count %i\n", i);
6880 priv->notif_missed_beacons = 0;
6881 netif_start_queue(priv->net_dev);
6884 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n",
6888 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
6889 i, MAX_HW_RESTARTS);
6891 /* We had an error bringing up the hardware, so take it
6892 * all the way back down so we can try again */
6896 /* tried to restart and config the device for as long as our
6897 * patience could withstand */
6898 IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
6902 static void ipw_down(struct ipw_priv *priv)
6904 /* Attempt to disable the card */
6906 ipw_send_card_disable(priv, 0);
6909 /* tell the device to stop sending interrupts */
6910 ipw_disable_interrupts(priv);
6912 /* Clear all bits but the RF Kill */
6913 priv->status &= STATUS_RF_KILL_MASK;
6915 netif_carrier_off(priv->net_dev);
6916 netif_stop_queue(priv->net_dev);
6921 /* Called by register_netdev() */
6922 static int ipw_net_init(struct net_device *dev)
6924 struct ipw_priv *priv = ieee80211_priv(dev);
6926 if (priv->status & STATUS_RF_KILL_SW) {
6927 IPW_WARNING("Radio disabled by module parameter.\n");
6929 } else if (rf_kill_active(priv)) {
6930 IPW_WARNING("Radio Frequency Kill Switch is On:\n"
6931 "Kill switch must be turned off for "
6932 "wireless networking to work.\n");
6933 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
6943 /* PCI driver stuff */
6944 static struct pci_device_id card_ids[] = {
6945 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
6946 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
6947 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
6948 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
6949 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
6950 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
6951 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
6952 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
6953 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
6954 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
6955 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
6956 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
6957 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
6958 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
6959 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
6960 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
6961 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
6962 {PCI_VENDOR_ID_INTEL, 0x104f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
6963 {PCI_VENDOR_ID_INTEL, 0x4220, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
6964 {PCI_VENDOR_ID_INTEL, 0x4221, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* 2225BG */
6965 {PCI_VENDOR_ID_INTEL, 0x4223, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
6966 {PCI_VENDOR_ID_INTEL, 0x4224, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
6968 /* required last entry */
6972 MODULE_DEVICE_TABLE(pci, card_ids);
6974 static struct attribute *ipw_sysfs_entries[] = {
6975 &dev_attr_rf_kill.attr,
6976 &dev_attr_direct_dword.attr,
6977 &dev_attr_indirect_byte.attr,
6978 &dev_attr_indirect_dword.attr,
6979 &dev_attr_mem_gpio_reg.attr,
6980 &dev_attr_command_event_reg.attr,
6981 &dev_attr_nic_type.attr,
6982 &dev_attr_status.attr,
6984 &dev_attr_dump_errors.attr,
6985 &dev_attr_dump_events.attr,
6986 &dev_attr_eeprom_delay.attr,
6987 &dev_attr_ucode_version.attr,
6992 static struct attribute_group ipw_attribute_group = {
6993 .name = NULL, /* put in device directory */
6994 .attrs = ipw_sysfs_entries,
6997 static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
7000 struct net_device *net_dev;
7003 struct ipw_priv *priv;
7004 int band, modulation;
7006 net_dev = alloc_ieee80211(sizeof(struct ipw_priv));
7007 if (net_dev == NULL) {
7012 priv = ieee80211_priv(net_dev);
7013 priv->ieee = netdev_priv(net_dev);
7014 priv->net_dev = net_dev;
7015 priv->pci_dev = pdev;
7016 #ifdef CONFIG_IPW_DEBUG
7017 ipw_debug_level = debug;
7019 spin_lock_init(&priv->lock);
7021 if (pci_enable_device(pdev)) {
7023 goto out_free_ieee80211;
7026 pci_set_master(pdev);
7028 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
7030 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
7032 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
7033 goto out_pci_disable_device;
7036 pci_set_drvdata(pdev, priv);
7038 err = pci_request_regions(pdev, DRV_NAME);
7040 goto out_pci_disable_device;
7042 /* We disable the RETRY_TIMEOUT register (0x41) to keep
7043 * PCI Tx retries from interfering with C3 CPU state */
7044 pci_read_config_dword(pdev, 0x40, &val);
7045 if ((val & 0x0000ff00) != 0)
7046 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
7048 length = pci_resource_len(pdev, 0);
7049 priv->hw_len = length;
7051 base = ioremap_nocache(pci_resource_start(pdev, 0), length);
7054 goto out_pci_release_regions;
7057 priv->hw_base = base;
7058 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
7059 IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
7061 err = ipw_setup_deferred_work(priv);
7063 IPW_ERROR("Unable to setup deferred work\n");
7067 /* Initialize module parameter values here */
7069 strncpy(net_dev->name, ifname, IFNAMSIZ);
7072 priv->config |= CFG_ASSOCIATE;
7074 IPW_DEBUG_INFO("Auto associate disabled.\n");
7077 priv->config |= CFG_ADHOC_CREATE;
7079 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
7082 priv->status |= STATUS_RF_KILL_SW;
7083 IPW_DEBUG_INFO("Radio disabled.\n");
7087 priv->config |= CFG_STATIC_CHANNEL;
7088 priv->channel = channel;
7089 IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
7090 IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
7091 /* TODO: Validate that provided channel is in range */
7096 priv->ieee->iw_mode = IW_MODE_ADHOC;
7098 #ifdef CONFIG_IPW_PROMISC
7100 priv->ieee->iw_mode = IW_MODE_MONITOR;
7105 priv->ieee->iw_mode = IW_MODE_INFRA;
7109 if ((priv->pci_dev->device == 0x4223) ||
7110 (priv->pci_dev->device == 0x4224)) {
7111 printk(KERN_INFO DRV_NAME
7112 ": Detected Intel PRO/Wireless 2915ABG Network "
7114 priv->ieee->abg_true = 1;
7115 band = IEEE80211_52GHZ_BAND | IEEE80211_24GHZ_BAND;
7116 modulation = IEEE80211_OFDM_MODULATION |
7117 IEEE80211_CCK_MODULATION;
7118 priv->adapter = IPW_2915ABG;
7119 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
7121 if (priv->pci_dev->device == 0x4221)
7122 printk(KERN_INFO DRV_NAME
7123 ": Detected Intel PRO/Wireless 2225BG Network "
7126 printk(KERN_INFO DRV_NAME
7127 ": Detected Intel PRO/Wireless 2200BG Network "
7130 priv->ieee->abg_true = 0;
7131 band = IEEE80211_24GHZ_BAND;
7132 modulation = IEEE80211_OFDM_MODULATION |
7133 IEEE80211_CCK_MODULATION;
7134 priv->adapter = IPW_2200BG;
7135 priv->ieee->mode = IEEE_G | IEEE_B;
7138 priv->ieee->freq_band = band;
7139 priv->ieee->modulation = modulation;
7141 priv->rates_mask = IEEE80211_DEFAULT_RATES_MASK;
7143 priv->missed_beacon_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
7144 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
7146 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
7148 /* If power management is turned on, default to AC mode */
7149 priv->power_mode = IPW_POWER_AC;
7150 priv->tx_power = IPW_DEFAULT_TX_POWER;
7152 err = request_irq(pdev->irq, ipw_isr, SA_SHIRQ, DRV_NAME, priv);
7154 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
7155 goto out_destroy_workqueue;
7158 SET_MODULE_OWNER(net_dev);
7159 SET_NETDEV_DEV(net_dev, &pdev->dev);
7161 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
7162 priv->ieee->set_security = shim__set_security;
7164 net_dev->open = ipw_net_open;
7165 net_dev->stop = ipw_net_stop;
7166 net_dev->init = ipw_net_init;
7167 net_dev->get_stats = ipw_net_get_stats;
7168 net_dev->set_multicast_list = ipw_net_set_multicast_list;
7169 net_dev->set_mac_address = ipw_net_set_mac_address;
7170 net_dev->get_wireless_stats = ipw_get_wireless_stats;
7171 net_dev->wireless_handlers = &ipw_wx_handler_def;
7172 net_dev->ethtool_ops = &ipw_ethtool_ops;
7173 net_dev->irq = pdev->irq;
7174 net_dev->base_addr = (unsigned long)priv->hw_base;
7175 net_dev->mem_start = pci_resource_start(pdev, 0);
7176 net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
7178 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
7180 IPW_ERROR("failed to create sysfs device attributes\n");
7181 goto out_release_irq;
7184 err = register_netdev(net_dev);
7186 IPW_ERROR("failed to register network device\n");
7187 goto out_remove_group;
7193 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
7195 free_irq(pdev->irq, priv);
7196 out_destroy_workqueue:
7197 destroy_workqueue(priv->workqueue);
7198 priv->workqueue = NULL;
7200 iounmap(priv->hw_base);
7201 out_pci_release_regions:
7202 pci_release_regions(pdev);
7203 out_pci_disable_device:
7204 pci_disable_device(pdev);
7205 pci_set_drvdata(pdev, NULL);
7207 free_ieee80211(priv->net_dev);
7212 static void ipw_pci_remove(struct pci_dev *pdev)
7214 struct ipw_priv *priv = pci_get_drvdata(pdev);
7218 priv->status |= STATUS_EXIT_PENDING;
7220 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
7224 unregister_netdev(priv->net_dev);
7227 ipw_rx_queue_free(priv, priv->rxq);
7230 ipw_tx_queue_free(priv);
7232 /* ipw_down will ensure that there is no more pending work
7233 * in the workqueue's, so we can safely remove them now. */
7234 if (priv->workqueue) {
7235 cancel_delayed_work(&priv->adhoc_check);
7236 cancel_delayed_work(&priv->gather_stats);
7237 cancel_delayed_work(&priv->request_scan);
7238 cancel_delayed_work(&priv->rf_kill);
7239 cancel_delayed_work(&priv->scan_check);
7240 destroy_workqueue(priv->workqueue);
7241 priv->workqueue = NULL;
7244 free_irq(pdev->irq, priv);
7245 iounmap(priv->hw_base);
7246 pci_release_regions(pdev);
7247 pci_disable_device(pdev);
7248 pci_set_drvdata(pdev, NULL);
7249 free_ieee80211(priv->net_dev);
7253 release_firmware(bootfw);
7254 release_firmware(ucode);
7255 release_firmware(firmware);
7262 static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
7264 struct ipw_priv *priv = pci_get_drvdata(pdev);
7265 struct net_device *dev = priv->net_dev;
7267 printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
7269 /* Take down the device; powers it off, etc. */
7272 /* Remove the PRESENT state of the device */
7273 netif_device_detach(dev);
7275 pci_save_state(pdev);
7276 pci_disable_device(pdev);
7277 pci_set_power_state(pdev, pci_choose_state(pdev, state));
7282 static int ipw_pci_resume(struct pci_dev *pdev)
7284 struct ipw_priv *priv = pci_get_drvdata(pdev);
7285 struct net_device *dev = priv->net_dev;
7288 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
7290 pci_set_power_state(pdev, 0);
7291 pci_enable_device(pdev);
7292 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
7293 pci_restore_state(pdev, priv->pm_state);
7295 pci_restore_state(pdev);
7298 * Suspend/Resume resets the PCI configuration space, so we have to
7299 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
7300 * from interfering with C3 CPU state. pci_restore_state won't help
7301 * here since it only restores the first 64 bytes pci config header.
7303 pci_read_config_dword(pdev, 0x40, &val);
7304 if ((val & 0x0000ff00) != 0)
7305 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
7307 /* Set the device back into the PRESENT state; this will also wake
7308 * the queue of needed */
7309 netif_device_attach(dev);
7311 /* Bring the device back up */
7312 queue_work(priv->workqueue, &priv->up);
7318 /* driver initialization stuff */
7319 static struct pci_driver ipw_driver = {
7321 .id_table = card_ids,
7322 .probe = ipw_pci_probe,
7323 .remove = __devexit_p(ipw_pci_remove),
7325 .suspend = ipw_pci_suspend,
7326 .resume = ipw_pci_resume,
7330 static int __init ipw_init(void)
7334 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
7335 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
7337 ret = pci_module_init(&ipw_driver);
7339 IPW_ERROR("Unable to initialize PCI module\n");
7343 ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
7345 IPW_ERROR("Unable to create driver sysfs file\n");
7346 pci_unregister_driver(&ipw_driver);
7353 static void __exit ipw_exit(void)
7355 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
7356 pci_unregister_driver(&ipw_driver);
7359 module_param(disable, int, 0444);
7360 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
7362 module_param(associate, int, 0444);
7363 MODULE_PARM_DESC(associate, "auto associate when scanning (default on)");
7365 module_param(auto_create, int, 0444);
7366 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
7368 module_param(debug, int, 0444);
7369 MODULE_PARM_DESC(debug, "debug output mask");
7371 module_param(channel, int, 0444);
7372 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
7374 module_param(ifname, charp, 0444);
7375 MODULE_PARM_DESC(ifname, "network device name (default eth%d)");
7377 #ifdef CONFIG_IPW_PROMISC
7378 module_param(mode, int, 0444);
7379 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
7381 module_param(mode, int, 0444);
7382 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
7385 module_exit(ipw_exit);
7386 module_init(ipw_init);