File: /usr/src/linux-2.4.7/drivers/net/3c59x.c

1     /* EtherLinkXL.c: A 3Com EtherLink PCI III/XL ethernet driver for linux. */
2     /*
3     	Written 1996-1999 by Donald Becker.
4     
5     	This software may be used and distributed according to the terms
6     	of the GNU General Public License, incorporated herein by reference.
7     
8     	This driver is for the 3Com "Vortex" and "Boomerang" series ethercards.
9     	Members of the series include Fast EtherLink 3c590/3c592/3c595/3c597
10     	and the EtherLink XL 3c900 and 3c905 cards.
11     
12     	The author may be reached as becker@scyld.com, or C/O
13     	Center of Excellence in Space Data and Information Sciences
14     	   Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
15     
16     	Linux Kernel Additions:
17     	
18      	0.99H+lk0.9 - David S. Miller - softnet, PCI DMA updates
19      	0.99H+lk1.0 - Jeff Garzik <jgarzik@mandrakesoft.com>
20     		Remove compatibility defines for kernel versions < 2.2.x.
21     		Update for new 2.3.x module interface
22     	LK1.1.2 (March 19, 2000)
23     	* New PCI interface (jgarzik)
24     
25         LK1.1.3 25 April 2000, Andrew Morton <andrewm@uow.edu.au>
26         - Merged with 3c575_cb.c
27         - Don't set RxComplete in boomerang interrupt enable reg
28         - spinlock in vortex_timer to protect mdio functions
29         - disable local interrupts around call to vortex_interrupt in
30           vortex_tx_timeout() (So vortex_interrupt can use spin_lock())
31         - Select window 3 in vortex_timer()'s write to Wn3_MAC_Ctrl
32         - In vortex_start_xmit(), move the lock to _after_ we've altered
33           vp->cur_tx and vp->tx_full.  This defeats the race between
34           vortex_start_xmit() and vortex_interrupt which was identified
35           by Bogdan Costescu.
36         - Merged back support for six new cards from various sources
37         - Set vortex_have_pci if pci_module_init returns zero (fixes cardbus
38           insertion oops)
39         - Tell it that 3c905C has NWAY for 100bT autoneg
40         - Fix handling of SetStatusEnd in 'Too much work..' code, as
41           per 2.3.99's 3c575_cb (Dave Hinds).
42         - Split ISR into two for vortex & boomerang
43         - Fix MOD_INC/DEC races
44         - Handle resource allocation failures.
45         - Fix 3CCFE575CT LED polarity
46         - Make tx_interrupt_mitigation the default
47     
48         LK1.1.4 25 April 2000, Andrew Morton <andrewm@uow.edu.au>    
49         - Add extra TxReset to vortex_up() to fix 575_cb hotplug initialisation probs.
50         - Put vortex_info_tbl into __devinitdata
51         - In the vortex_error StatsFull HACK, disable stats in vp->intr_enable as well
52           as in the hardware.
53         - Increased the loop counter in wait_for_completion from 2,000 to 4,000.
54     
55         LK1.1.5 28 April 2000, andrewm
56         - Added powerpc defines (John Daniel <jdaniel@etresoft.com> said these work...)
57         - Some extra diagnostics
58         - In vortex_error(), reset the Tx on maxCollisions.  Otherwise most
59           chips usually get a Tx timeout.
60         - Added extra_reset module parm
61         - Replaced some inline timer manip with mod_timer
62           (Franois romieu <Francois.Romieu@nic.fr>)
63         - In vortex_up(), don't make Wn3_config initialisation dependent upon has_nway
64           (this came across from 3c575_cb).
65     
66         LK1.1.6 06 Jun 2000, andrewm
67         - Backed out the PPC defines.
68         - Use del_timer_sync(), mod_timer().
69         - Fix wrapped ulong comparison in boomerang_rx()
70         - Add IS_TORNADO, use it to suppress 3c905C checksum error msg
71           (Donald Becker, I Lee Hetherington <ilh@sls.lcs.mit.edu>)
72         - Replace union wn3_config with BFINS/BFEXT manipulation for
73           sparc64 (Pete Zaitcev, Peter Jones)
74         - In vortex_error, do_tx_reset and vortex_tx_timeout(Vortex):
75           do a netif_wake_queue() to better recover from errors. (Anders Pedersen,
76           Donald Becker)
77         - Print a warning on out-of-memory (rate limited to 1 per 10 secs)
78         - Added two more Cardbus 575 NICs: 5b57 and 6564 (Paul Wagland)
79     
80         LK1.1.7 2 Jul 2000 andrewm
81         - Better handling of shared IRQs
82         - Reset the transmitter on a Tx reclaim error
83         - Fixed crash under OOM during vortex_open() (Mark Hemment)
84         - Fix Rx cessation problem during OOM (help from Mark Hemment)
85         - The spinlocks around the mdio access were blocking interrupts for 300uS.
86           Fix all this to use spin_lock_bh() within mdio_read/write
87         - Only write to TxFreeThreshold if it's a boomerang - other NICs don't
88           have one.
89         - Added 802.3x MAC-layer flow control support
90     
91        LK1.1.8 13 Aug 2000 andrewm
92         - Ignore request_region() return value - already reserved if Cardbus.
93         - Merged some additional Cardbus flags from Don's 0.99Qk
94         - Some fixes for 3c556 (Fred Maciel)
95         - Fix for EISA initialisation (Jan Rkorajski)
96         - Renamed MII_XCVR_PWR and EEPROM_230 to align with 3c575_cb and D. Becker's drivers
97         - Fixed MII_XCVR_PWR for 3CCFE575CT
98         - Added INVERT_LED_PWR, used it.
99         - Backed out the extra_reset stuff
100     
101        LK1.1.9 12 Sep 2000 andrewm
102         - Backed out the tx_reset_resume flags.  It was a no-op.
103         - In vortex_error, don't reset the Tx on txReclaim errors
104         - In vortex_error, don't reset the Tx on maxCollisions errors.
105           Hence backed out all the DownListPtr logic here.
106         - In vortex_error, give Tornado cards a partial TxReset on
107           maxCollisions (David Hinds).  Defined MAX_COLLISION_RESET for this.
108         - Redid some driver flags and device names based on pcmcia_cs-3.1.20.
109         - Fixed a bug where, if vp->tx_full is set when the interface
110           is downed, it remains set when the interface is upped.  Bad
111           things happen.
112     
113        LK1.1.10 17 Sep 2000 andrewm
114         - Added EEPROM_8BIT for 3c555 (Fred Maciel)
115         - Added experimental support for the 3c556B Laptop Hurricane (Louis Gerbarg)
116         - Add HAS_NWAY to "3c900 Cyclone 10Mbps TPO"
117     
118        LK1.1.11 13 Nov 2000 andrewm
119         - Dump MOD_INC/DEC_USE_COUNT, use SET_MODULE_OWNER
120     
121        LK1.1.12 1 Jan 2001 andrewm (2.4.0-pre1)
122         - Call pci_enable_device before we request our IRQ (Tobias Ringstrom)
123         - Add 3c590 PCI latency timer hack to vortex_probe1 (from 0.99Ra)
124         - Added extended wait_for_completion for the 3c905CX.
125         - Look for an MII on PHY index 24 first (3c905CX oddity).
126         - Add HAS_NWAY to 3cSOHO100-TX (Brett Frankenberger)
127         - Don't free skbs we don't own on oom path in vortex_open().
128     
129        LK1.1.13 27 Jan 2001
130         - Added explicit `medialock' flag so we can truly
131           lock the media type down with `options'.
132         - "check ioremap return and some tidbits" (Arnaldo Carvalho de Melo <acme@conectiva.com.br>)
133         - Added and used EEPROM_NORESET for 3c556B PM resumes.
134         - Fixed leakage of vp->rx_ring.
135         - Break out separate HAS_HWCKSM device capability flag.
136         - Kill vp->tx_full (ANK)
137         - Merge zerocopy fragment handling (ANK?)
138     
139        LK1.1.14 15 Feb 2001
140         - Enable WOL.  Can be turned on with `enable_wol' module option.
141         - EISA and PCI initialisation fixes (jgarzik, Manfred Spraul)
142         - If a device's internalconfig register reports it has NWAY,
143           use it, even if autoselect is enabled.
144     
145        LK1.1.15 6 June 2001 akpm
146         - Prevent double counting of received bytes (Lars Christensen)
147         - Add ethtool support (jgarzik)
148         - Add module parm descriptions (Andrzej M. Krzysztofowicz)
149         - Implemented alloc_etherdev() API
150         - Special-case the 'Tx error 82' message.
151     
152         - See http://www.uow.edu.au/~andrewm/linux/#3c59x-2.3 for more details.
153         - Also see Documentation/networking/vortex.txt
154     */
155     
156     /*
157      * FIXME: This driver _could_ support MTU changing, but doesn't.  See Don's hamachi.c implementation
158      * as well as other drivers
159      *
160      * NOTE: If you make 'vortex_debug' a constant (#define vortex_debug 0) the driver shrinks by 2k
161      * due to dead code elimination.  There will be some performance benefits from this due to
162      * elimination of all the tests and reduced cache footprint.
163      */
164     
165     
166     #define DRV_NAME	"3c59x"
167     #define DRV_VERSION	"LK1.1.15"
168     #define DRV_RELDATE	"6 June 2001"
169     
170     
171     
172     /* A few values that may be tweaked. */
173     /* Keep the ring sizes a power of two for efficiency. */
174     #define TX_RING_SIZE	16
175     #define RX_RING_SIZE	32
176     #define PKT_BUF_SZ		1536			/* Size of each temporary Rx buffer.*/
177     
178     /* "Knobs" that adjust features and parameters. */
179     /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
180        Setting to > 1512 effectively disables this feature. */
181     #ifndef __arm__
182     static const int rx_copybreak = 200;
183     #else
184     /* ARM systems perform better by disregarding the bus-master
185        transfer capability of these cards. -- rmk */
186     static const int rx_copybreak = 1513;
187     #endif
188     /* Allow setting MTU to a larger size, bypassing the normal ethernet setup. */
189     static const int mtu = 1500;
190     /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
191     static int max_interrupt_work = 32;
192     /* Tx timeout interval (millisecs) */
193     static int watchdog = 5000;
194     
195     /* Allow aggregation of Tx interrupts.  Saves CPU load at the cost
196      * of possible Tx stalls if the system is blocking interrupts
197      * somewhere else.  Undefine this to disable.
198      */
199     #define tx_interrupt_mitigation 1
200     
201     /* Put out somewhat more debugging messages. (0: no msg, 1 minimal .. 6). */
202     #define vortex_debug debug
203     #ifdef VORTEX_DEBUG
204     static int vortex_debug = VORTEX_DEBUG;
205     #else
206     static int vortex_debug = 1;
207     #endif
208     
209     #ifndef __OPTIMIZE__
210     #error You must compile this file with the correct options!
211     #error See the last lines of the source file.
212     #error You must compile this driver with "-O".
213     #endif
214     
215     #include <linux/config.h>
216     #include <linux/module.h>
217     #include <linux/kernel.h>
218     #include <linux/sched.h>
219     #include <linux/string.h>
220     #include <linux/timer.h>
221     #include <linux/errno.h>
222     #include <linux/in.h>
223     #include <linux/ioport.h>
224     #include <linux/slab.h>
225     #include <linux/interrupt.h>
226     #include <linux/pci.h>
227     #include <linux/mii.h>
228     #include <linux/init.h>
229     #include <linux/netdevice.h>
230     #include <linux/etherdevice.h>
231     #include <linux/skbuff.h>
232     #include <linux/ethtool.h>
233     #include <asm/irq.h>			/* For NR_IRQS only. */
234     #include <asm/bitops.h>
235     #include <asm/io.h>
236     #include <asm/uaccess.h>
237     
238     /* Kernel compatibility defines, some common to David Hinds' PCMCIA package.
239        This is only in the support-all-kernels source code. */
240     
241     #define RUN_AT(x) (jiffies + (x))
242     
243     #include <linux/delay.h>
244     
245     
246     static char version[] __devinitdata =
247     DRV_NAME ".c:" DRV_VERSION " " DRV_RELDATE "  Donald Becker and others. http://www.scyld.com/network/vortex.html\n";
248     
249     MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
250     MODULE_DESCRIPTION("3Com 3c59x/3c90x/3c575 series Vortex/Boomerang/Cyclone driver");
251     MODULE_PARM(debug, "i");
252     MODULE_PARM(options, "1-" __MODULE_STRING(8) "i");
253     MODULE_PARM(full_duplex, "1-" __MODULE_STRING(8) "i");
254     MODULE_PARM(hw_checksums, "1-" __MODULE_STRING(8) "i");
255     MODULE_PARM(flow_ctrl, "1-" __MODULE_STRING(8) "i");
256     MODULE_PARM(enable_wol, "1-" __MODULE_STRING(8) "i");
257     MODULE_PARM(rx_copybreak, "i");
258     MODULE_PARM(max_interrupt_work, "i");
259     MODULE_PARM(compaq_ioaddr, "i");
260     MODULE_PARM(compaq_irq, "i");
261     MODULE_PARM(compaq_device_id, "i");
262     MODULE_PARM(watchdog, "i");
263     MODULE_PARM_DESC(debug, "3c59x debug level (0-6)");
264     MODULE_PARM_DESC(options, "3c59x: Bits 0-3: media type, bit 4: bus mastering, bit 9: full duplex");
265     MODULE_PARM_DESC(full_duplex, "3c59x full duplex setting(s) (1)");
266     MODULE_PARM_DESC(hw_checksums, "3c59x Hardware checksum checking by adapter(s) (0-1)");
267     MODULE_PARM_DESC(flow_ctrl, "3c59x 802.3x flow control usage (PAUSE only) (0-1)");
268     MODULE_PARM_DESC(enable_wol, "3c59x: Turn on Wake-on-LAN for adapter(s) (0-1)");
269     MODULE_PARM_DESC(rx_copybreak, "3c59x copy breakpoint for copy-only-tiny-frames");
270     MODULE_PARM_DESC(max_interrupt_work, "3c59x maximum events handled per interrupt");
271     MODULE_PARM_DESC(compaq_ioaddr, "3c59x PCI I/O base address (Compaq BIOS problem workaround)");
272     MODULE_PARM_DESC(compaq_irq, "3c59x PCI IRQ number (Compaq BIOS problem workaround)");
273     MODULE_PARM_DESC(compaq_device_id, "3c59x PCI device ID (Compaq BIOS problem workaround)");
274     MODULE_PARM_DESC(watchdog, "3c59x transmit timeout in milliseconds");
275     
276     /* Operational parameter that usually are not changed. */
277     
278     /* The Vortex size is twice that of the original EtherLinkIII series: the
279        runtime register window, window 1, is now always mapped in.
280        The Boomerang size is twice as large as the Vortex -- it has additional
281        bus master control registers. */
282     #define VORTEX_TOTAL_SIZE 0x20
283     #define BOOMERANG_TOTAL_SIZE 0x40
284     
285     /* Set iff a MII transceiver on any interface requires mdio preamble.
286        This only set with the original DP83840 on older 3c905 boards, so the extra
287        code size of a per-interface flag is not worthwhile. */
288     static char mii_preamble_required;
289     
290     #define PFX DRV_NAME ": "
291     
292     
293     
294     /*
295     				Theory of Operation
296     
297     I. Board Compatibility
298     
299     This device driver is designed for the 3Com FastEtherLink and FastEtherLink
300     XL, 3Com's PCI to 10/100baseT adapters.  It also works with the 10Mbs
301     versions of the FastEtherLink cards.  The supported product IDs are
302       3c590, 3c592, 3c595, 3c597, 3c900, 3c905
303     
304     The related ISA 3c515 is supported with a separate driver, 3c515.c, included
305     with the kernel source or available from
306         cesdis.gsfc.nasa.gov:/pub/linux/drivers/3c515.html
307     
308     II. Board-specific settings
309     
310     PCI bus devices are configured by the system at boot time, so no jumpers
311     need to be set on the board.  The system BIOS should be set to assign the
312     PCI INTA signal to an otherwise unused system IRQ line.
313     
314     The EEPROM settings for media type and forced-full-duplex are observed.
315     The EEPROM media type should be left at the default "autoselect" unless using
316     10base2 or AUI connections which cannot be reliably detected.
317     
318     III. Driver operation
319     
320     The 3c59x series use an interface that's very similar to the previous 3c5x9
321     series.  The primary interface is two programmed-I/O FIFOs, with an
322     alternate single-contiguous-region bus-master transfer (see next).
323     
324     The 3c900 "Boomerang" series uses a full-bus-master interface with separate
325     lists of transmit and receive descriptors, similar to the AMD LANCE/PCnet,
326     DEC Tulip and Intel Speedo3.  The first chip version retains a compatible
327     programmed-I/O interface that has been removed in 'B' and subsequent board
328     revisions.
329     
330     One extension that is advertised in a very large font is that the adapters
331     are capable of being bus masters.  On the Vortex chip this capability was
332     only for a single contiguous region making it far less useful than the full
333     bus master capability.  There is a significant performance impact of taking
334     an extra interrupt or polling for the completion of each transfer, as well
335     as difficulty sharing the single transfer engine between the transmit and
336     receive threads.  Using DMA transfers is a win only with large blocks or
337     with the flawed versions of the Intel Orion motherboard PCI controller.
338     
339     The Boomerang chip's full-bus-master interface is useful, and has the
340     currently-unused advantages over other similar chips that queued transmit
341     packets may be reordered and receive buffer groups are associated with a
342     single frame.
343     
344     With full-bus-master support, this driver uses a "RX_COPYBREAK" scheme.
345     Rather than a fixed intermediate receive buffer, this scheme allocates
346     full-sized skbuffs as receive buffers.  The value RX_COPYBREAK is used as
347     the copying breakpoint: it is chosen to trade-off the memory wasted by
348     passing the full-sized skbuff to the queue layer for all frames vs. the
349     copying cost of copying a frame to a correctly-sized skbuff.
350     
351     IIIC. Synchronization
352     The driver runs as two independent, single-threaded flows of control.  One
353     is the send-packet routine, which enforces single-threaded use by the
354     dev->tbusy flag.  The other thread is the interrupt handler, which is single
355     threaded by the hardware and other software.
356     
357     IV. Notes
358     
359     Thanks to Cameron Spitzer and Terry Murphy of 3Com for providing development
360     3c590, 3c595, and 3c900 boards.
361     The name "Vortex" is the internal 3Com project name for the PCI ASIC, and
362     the EISA version is called "Demon".  According to Terry these names come
363     from rides at the local amusement park.
364     
365     The new chips support both ethernet (1.5K) and FDDI (4.5K) packet sizes!
366     This driver only supports ethernet packets because of the skbuff allocation
367     limit of 4K.
368     */
369     
370     /* This table drives the PCI probe routines.  It's mostly boilerplate in all
371        of the drivers, and will likely be provided by some future kernel.
372     */
373     enum pci_flags_bit {
374     	PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
375     	PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
376     };
377     
378     enum {	IS_VORTEX=1, IS_BOOMERANG=2, IS_CYCLONE=4, IS_TORNADO=8,
379     	EEPROM_8BIT=0x10,	/* AKPM: Uses 0x230 as the base bitmaps for EEPROM reads */
380     	HAS_PWR_CTRL=0x20, HAS_MII=0x40, HAS_NWAY=0x80, HAS_CB_FNS=0x100,
381     	INVERT_MII_PWR=0x200, INVERT_LED_PWR=0x400, MAX_COLLISION_RESET=0x800,
382     	EEPROM_OFFSET=0x1000, EEPROM_NORESET=0x2000, HAS_HWCKSM=0x4000 };
383     
384     enum vortex_chips {
385     	CH_3C590 = 0,
386     	CH_3C592,
387     	CH_3C597,
388     	CH_3C595_1,
389     	CH_3C595_2,
390     
391     	CH_3C595_3,
392     	CH_3C900_1,
393     	CH_3C900_2,
394     	CH_3C900_3,
395     	CH_3C900_4,
396     
397     	CH_3C900_5,
398     	CH_3C900B_FL,
399     	CH_3C905_1,
400     	CH_3C905_2,
401     	CH_3C905B_1,
402     
403     	CH_3C905B_2,
404     	CH_3C905B_FX,
405     	CH_3C905C,
406     	CH_3C980,
407     	CH_3C9805,
408     
409     	CH_3CSOHO100_TX,
410     	CH_3C555,
411     	CH_3C556,
412     	CH_3C556B,
413     	CH_3C575,
414     
415     	CH_3C575_1,
416     	CH_3CCFE575,
417     	CH_3CCFE575CT,
418     	CH_3CCFE656,
419     	CH_3CCFEM656,
420     
421     	CH_3CCFEM656_1,
422     	CH_3C450,
423     };
424     
425     
426     /* note: this array directly indexed by above enums, and MUST
427      * be kept in sync with both the enums above, and the PCI device
428      * table below
429      */
430     static struct vortex_chip_info {
431     	const char *name;
432     	int flags;
433     	int drv_flags;
434     	int io_size;
435     } vortex_info_tbl[] __devinitdata = {
436     #define EISA_TBL_OFFSET	0		/* Offset of this entry for vortex_eisa_init */
437     	{"3c590 Vortex 10Mbps",
438     	 PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, },
439     	{"3c592 EISA 10mbps Demon/Vortex",					/* AKPM: from Don's 3c59x_cb.c 0.49H */
440     	 PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, },
441     	{"3c597 EISA Fast Demon/Vortex",					/* AKPM: from Don's 3c59x_cb.c 0.49H */
442     	 PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, },
443     	{"3c595 Vortex 100baseTx",
444     	 PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, },
445     	{"3c595 Vortex 100baseT4",
446     	 PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, },
447     
448     	{"3c595 Vortex 100base-MII",
449     	 PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, },
450     	{"3c900 Boomerang 10baseT",
451     	 PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG, 64, },
452     	{"3c900 Boomerang 10Mbps Combo",
453     	 PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG, 64, },
454     	{"3c900 Cyclone 10Mbps TPO",						/* AKPM: from Don's 0.99M */
455     	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
456     	{"3c900 Cyclone 10Mbps Combo",
457     	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
458     
459     	{"3c900 Cyclone 10Mbps TPC",						/* AKPM: from Don's 0.99M */
460     	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
461     	{"3c900B-FL Cyclone 10base-FL",
462     	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
463     	{"3c905 Boomerang 100baseTx",
464     	 PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII, 64, },
465     	{"3c905 Boomerang 100baseT4",
466     	 PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII, 64, },
467     	{"3c905B Cyclone 100baseTx",
468     	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
469     
470     	{"3c905B Cyclone 10/100/BNC",
471     	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
472     	{"3c905B-FX Cyclone 100baseFx",
473     	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
474     	{"3c905C Tornado",
475     	 PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, },
476     	{"3c980 Cyclone",
477     	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
478     	{"3c980 10/100 Base-TX NIC(Python-T)",
479     	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
480     
481     	{"3cSOHO100-TX Hurricane",
482     	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
483     	{"3c555 Laptop Hurricane",
484     	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|EEPROM_8BIT|HAS_HWCKSM, 128, },
485     	{"3c556 Laptop Tornado",
486     	 PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_8BIT|HAS_CB_FNS|INVERT_MII_PWR|
487     									HAS_HWCKSM, 128, },
488     	{"3c556B Laptop Hurricane",
489     	 PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_OFFSET|HAS_CB_FNS|INVERT_MII_PWR|
490     									EEPROM_NORESET|HAS_HWCKSM, 128, },
491     	{"3c575 [Megahertz] 10/100 LAN 	CardBus",
492     	PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, },
493     
494     	{"3c575 Boomerang CardBus",
495     	 PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, },
496     	{"3CCFE575BT Cyclone CardBus",
497     	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|
498     									INVERT_LED_PWR|HAS_HWCKSM, 128, },
499     	{"3CCFE575CT Tornado CardBus",
500     	 PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
501     									MAX_COLLISION_RESET|HAS_HWCKSM, 128, },
502     	{"3CCFE656 Cyclone CardBus",
503     	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
504     									INVERT_LED_PWR|HAS_HWCKSM, 128, },
505     	{"3CCFEM656B Cyclone+Winmodem CardBus",
506     	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
507     									INVERT_LED_PWR|HAS_HWCKSM, 128, },
508     
509     	{"3CXFEM656C Tornado+Winmodem CardBus",			/* From pcmcia-cs-3.1.5 */
510     	 PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
511     									MAX_COLLISION_RESET|HAS_HWCKSM, 128, },
512     	{"3c450 HomePNA Tornado",						/* AKPM: from Don's 0.99Q */
513     	 PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, },
514     	{0,}, /* 0 terminated list. */
515     };
516     
517     
518     static struct pci_device_id vortex_pci_tbl[] __devinitdata = {
519     	{ 0x10B7, 0x5900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C590 },
520     	{ 0x10B7, 0x5920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C592 },
521     	{ 0x10B7, 0x5970, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C597 },
522     	{ 0x10B7, 0x5950, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_1 },
523     	{ 0x10B7, 0x5951, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_2 },
524     
525     	{ 0x10B7, 0x5952, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_3 },
526     	{ 0x10B7, 0x9000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_1 },
527     	{ 0x10B7, 0x9001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_2 },
528     	{ 0x10B7, 0x9004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_3 },
529     	{ 0x10B7, 0x9005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_4 },
530     
531     	{ 0x10B7, 0x9006, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_5 },
532     	{ 0x10B7, 0x900A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900B_FL },
533     	{ 0x10B7, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_1 },
534     	{ 0x10B7, 0x9051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_2 },
535     	{ 0x10B7, 0x9055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_1 },
536     
537     	{ 0x10B7, 0x9058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_2 },
538     	{ 0x10B7, 0x905A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_FX },
539     	{ 0x10B7, 0x9200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905C },
540     	{ 0x10B7, 0x9800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C980 },
541     	{ 0x10B7, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C9805 },
542     
543     	{ 0x10B7, 0x7646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CSOHO100_TX },
544     	{ 0x10B7, 0x5055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C555 },
545     	{ 0x10B7, 0x6055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C556 },
546     	{ 0x10B7, 0x6056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C556B },
547     	{ 0x10B7, 0x5b57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C575 },
548     
549     	{ 0x10B7, 0x5057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C575_1 },
550     	{ 0x10B7, 0x5157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE575 },
551     	{ 0x10B7, 0x5257, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE575CT },
552     	{ 0x10B7, 0x6560, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE656 },
553     	{ 0x10B7, 0x6562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFEM656 },
554     
555     	{ 0x10B7, 0x6564, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFEM656_1 },
556     	{ 0x10B7, 0x4500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C450 },
557     	{0,}						/* 0 terminated list. */
558     };
559     MODULE_DEVICE_TABLE(pci, vortex_pci_tbl);
560     
561     
562     /* Operational definitions.
563        These are not used by other compilation units and thus are not
564        exported in a ".h" file.
565     
566        First the windows.  There are eight register windows, with the command
567        and status registers available in each.
568        */
569     #define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD)
570     #define EL3_CMD 0x0e
571     #define EL3_STATUS 0x0e
572     
573     /* The top five bits written to EL3_CMD are a command, the lower
574        11 bits are the parameter, if applicable.
575        Note that 11 parameters bits was fine for ethernet, but the new chip
576        can handle FDDI length frames (~4500 octets) and now parameters count
577        32-bit 'Dwords' rather than octets. */
578     
579     enum vortex_cmd {
580     	TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11,
581     	RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11,
582     	UpStall = 6<<11, UpUnstall = (6<<11)+1,
583     	DownStall = (6<<11)+2, DownUnstall = (6<<11)+3,
584     	RxDiscard = 8<<11, TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11,
585     	FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11,
586     	SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11,
587     	SetTxThreshold = 18<<11, SetTxStart = 19<<11,
588     	StartDMAUp = 20<<11, StartDMADown = (20<<11)+1, StatsEnable = 21<<11,
589     	StatsDisable = 22<<11, StopCoax = 23<<11, SetFilterBit = 25<<11,};
590     
591     /* The SetRxFilter command accepts the following classes: */
592     enum RxFilter {
593     	RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 };
594     
595     /* Bits in the general status register. */
596     enum vortex_status {
597     	IntLatch = 0x0001, HostError = 0x0002, TxComplete = 0x0004,
598     	TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020,
599     	IntReq = 0x0040, StatsFull = 0x0080,
600     	DMADone = 1<<8, DownComplete = 1<<9, UpComplete = 1<<10,
601     	DMAInProgress = 1<<11,			/* DMA controller is still busy.*/
602     	CmdInProgress = 1<<12,			/* EL3_CMD is still busy.*/
603     };
604     
605     /* Register window 1 offsets, the window used in normal operation.
606        On the Vortex this window is always mapped at offsets 0x10-0x1f. */
607     enum Window1 {
608     	TX_FIFO = 0x10,  RX_FIFO = 0x10,  RxErrors = 0x14,
609     	RxStatus = 0x18,  Timer=0x1A, TxStatus = 0x1B,
610     	TxFree = 0x1C, /* Remaining free bytes in Tx buffer. */
611     };
612     enum Window0 {
613     	Wn0EepromCmd = 10,		/* Window 0: EEPROM command register. */
614     	Wn0EepromData = 12,		/* Window 0: EEPROM results register. */
615     	IntrStatus=0x0E,		/* Valid in all windows. */
616     };
617     enum Win0_EEPROM_bits {
618     	EEPROM_Read = 0x80, EEPROM_WRITE = 0x40, EEPROM_ERASE = 0xC0,
619     	EEPROM_EWENB = 0x30,		/* Enable erasing/writing for 10 msec. */
620     	EEPROM_EWDIS = 0x00,		/* Disable EWENB before 10 msec timeout. */
621     };
622     /* EEPROM locations. */
623     enum eeprom_offset {
624     	PhysAddr01=0, PhysAddr23=1, PhysAddr45=2, ModelID=3,
625     	EtherLink3ID=7, IFXcvrIO=8, IRQLine=9,
626     	NodeAddr01=10, NodeAddr23=11, NodeAddr45=12,
627     	DriverTune=13, Checksum=15};
628     
629     enum Window2 {			/* Window 2. */
630     	Wn2_ResetOptions=12,
631     };
632     enum Window3 {			/* Window 3: MAC/config bits. */
633     	Wn3_Config=0, Wn3_MAC_Ctrl=6, Wn3_Options=8,
634     };
635     
636     #define BFEXT(value, offset, bitcount)  \
637         ((((unsigned long)(value)) >> (offset)) & ((1 << (bitcount)) - 1))
638     
639     #define BFINS(lhs, rhs, offset, bitcount)					\
640     	(((lhs) & ~((((1 << (bitcount)) - 1)) << (offset))) |	\
641     	(((rhs) & ((1 << (bitcount)) - 1)) << (offset)))
642     
643     #define RAM_SIZE(v)		BFEXT(v, 0, 3)
644     #define RAM_WIDTH(v)	BFEXT(v, 3, 1)
645     #define RAM_SPEED(v)	BFEXT(v, 4, 2)
646     #define ROM_SIZE(v)		BFEXT(v, 6, 2)
647     #define RAM_SPLIT(v)	BFEXT(v, 16, 2)
648     #define XCVR(v)			BFEXT(v, 20, 4)
649     #define AUTOSELECT(v)	BFEXT(v, 24, 1)
650     
651     enum Window4 {		/* Window 4: Xcvr/media bits. */
652     	Wn4_FIFODiag = 4, Wn4_NetDiag = 6, Wn4_PhysicalMgmt=8, Wn4_Media = 10,
653     };
654     enum Win4_Media_bits {
655     	Media_SQE = 0x0008,		/* Enable SQE error counting for AUI. */
656     	Media_10TP = 0x00C0,	/* Enable link beat and jabber for 10baseT. */
657     	Media_Lnk = 0x0080,		/* Enable just link beat for 100TX/100FX. */
658     	Media_LnkBeat = 0x0800,
659     };
660     enum Window7 {					/* Window 7: Bus Master control. */
661     	Wn7_MasterAddr = 0, Wn7_MasterLen = 6, Wn7_MasterStatus = 12,
662     };
663     /* Boomerang bus master control registers. */
664     enum MasterCtrl {
665     	PktStatus = 0x20, DownListPtr = 0x24, FragAddr = 0x28, FragLen = 0x2c,
666     	TxFreeThreshold = 0x2f, UpPktStatus = 0x30, UpListPtr = 0x38,
667     };
668     
669     /* The Rx and Tx descriptor lists.
670        Caution Alpha hackers: these types are 32 bits!  Note also the 8 byte
671        alignment contraint on tx_ring[] and rx_ring[]. */
672     #define LAST_FRAG 	0x80000000			/* Last Addr/Len pair in descriptor. */
673     #define DN_COMPLETE	0x00010000			/* This packet has been downloaded */
674     struct boom_rx_desc {
675     	u32 next;					/* Last entry points to 0.   */
676     	s32 status;
677     	u32 addr;					/* Up to 63 addr/len pairs possible. */
678     	s32 length;					/* Set LAST_FRAG to indicate last pair. */
679     };
680     /* Values for the Rx status entry. */
681     enum rx_desc_status {
682     	RxDComplete=0x00008000, RxDError=0x4000,
683     	/* See boomerang_rx() for actual error bits */
684     	IPChksumErr=1<<25, TCPChksumErr=1<<26, UDPChksumErr=1<<27,
685     	IPChksumValid=1<<29, TCPChksumValid=1<<30, UDPChksumValid=1<<31,
686     };
687     
688     #ifdef MAX_SKB_FRAGS
689     #define DO_ZEROCOPY 1
690     #else
691     #define DO_ZEROCOPY 0
692     #endif
693     
694     struct boom_tx_desc {
695     	u32 next;					/* Last entry points to 0.   */
696     	s32 status;					/* bits 0:12 length, others see below.  */
697     #if DO_ZEROCOPY
698     	struct {
699     		u32 addr;
700     		s32 length;
701     	} frag[1+MAX_SKB_FRAGS];
702     #else
703     		u32 addr;
704     		s32 length;
705     #endif
706     };
707     
708     /* Values for the Tx status entry. */
709     enum tx_desc_status {
710     	CRCDisable=0x2000, TxDComplete=0x8000,
711     	AddIPChksum=0x02000000, AddTCPChksum=0x04000000, AddUDPChksum=0x08000000,
712     	TxIntrUploaded=0x80000000,		/* IRQ when in FIFO, but maybe not sent. */
713     };
714     
715     /* Chip features we care about in vp->capabilities, read from the EEPROM. */
716     enum ChipCaps { CapBusMaster=0x20, CapPwrMgmt=0x2000 };
717     
718     struct vortex_private {
719     	/* The Rx and Tx rings should be quad-word-aligned. */
720     	struct boom_rx_desc* rx_ring;
721     	struct boom_tx_desc* tx_ring;
722     	dma_addr_t rx_ring_dma;
723     	dma_addr_t tx_ring_dma;
724     	/* The addresses of transmit- and receive-in-place skbuffs. */
725     	struct sk_buff* rx_skbuff[RX_RING_SIZE];
726     	struct sk_buff* tx_skbuff[TX_RING_SIZE];
727     	struct net_device *next_module;		/* NULL if PCI device */
728     	unsigned int cur_rx, cur_tx;		/* The next free ring entry */
729     	unsigned int dirty_rx, dirty_tx;	/* The ring entries to be free()ed. */
730     	struct net_device_stats stats;
731     	struct sk_buff *tx_skb;				/* Packet being eaten by bus master ctrl.  */
732     	dma_addr_t tx_skb_dma;				/* Allocated DMA address for bus master ctrl DMA.   */
733     
734     	/* PCI configuration space information. */
735     	struct pci_dev *pdev;
736     	char *cb_fn_base;					/* CardBus function status addr space. */
737     
738     	/* Some values here only for performance evaluation and path-coverage */
739     	int rx_nocopy, rx_copy, queued_packet, rx_csumhits;
740     	int card_idx;
741     
742     	/* The remainder are related to chip state, mostly media selection. */
743     	struct timer_list timer;			/* Media selection timer. */
744     	struct timer_list rx_oom_timer;		/* Rx skb allocation retry timer */
745     	int options;						/* User-settable misc. driver options. */
746     	unsigned int media_override:4, 		/* Passed-in media type. */
747     		default_media:4,				/* Read from the EEPROM/Wn3_Config. */
748     		full_duplex:1, force_fd:1, autoselect:1,
749     		bus_master:1,					/* Vortex can only do a fragment bus-m. */
750     		full_bus_master_tx:1, full_bus_master_rx:2, /* Boomerang  */
751     		flow_ctrl:1,					/* Use 802.3x flow control (PAUSE only) */
752     		partner_flow_ctrl:1,			/* Partner supports flow control */
753     		has_nway:1,
754     		enable_wol:1,					/* Wake-on-LAN is enabled */
755     		open:1,
756     		medialock:1,
757     		must_free_region:1;				/* Flag: if zero, Cardbus owns the I/O region */
758     	int drv_flags;
759     	u16 status_enable;
760     	u16 intr_enable;
761     	u16 available_media;				/* From Wn3_Options. */
762     	u16 capabilities, info1, info2;		/* Various, from EEPROM. */
763     	u16 advertising;					/* NWay media advertisement */
764     	unsigned char phys[2];				/* MII device addresses. */
765     	u16 deferred;						/* Resend these interrupts when we
766     										 * bale from the ISR */
767     	u16 io_size;						/* Size of PCI region (for release_region) */
768     	spinlock_t lock;					/* Serialise access to device & its vortex_private */
769     	spinlock_t mdio_lock;				/* Serialise access to mdio hardware */
770     };
771     
772     /* The action to take with a media selection timer tick.
773        Note that we deviate from the 3Com order by checking 10base2 before AUI.
774      */
775     enum xcvr_types {
776     	XCVR_10baseT=0, XCVR_AUI, XCVR_10baseTOnly, XCVR_10base2, XCVR_100baseTx,
777     	XCVR_100baseFx, XCVR_MII=6, XCVR_NWAY=8, XCVR_ExtMII=9, XCVR_Default=10,
778     };
779     
780     static struct media_table {
781     	char *name;
782     	unsigned int media_bits:16,		/* Bits to set in Wn4_Media register. */
783     		mask:8,						/* The transceiver-present bit in Wn3_Config.*/
784     		next:8;						/* The media type to try next. */
785     	int wait;						/* Time before we check media status. */
786     } media_tbl[] = {
787       {	"10baseT",   Media_10TP,0x08, XCVR_10base2, (14*HZ)/10},
788       { "10Mbs AUI", Media_SQE, 0x20, XCVR_Default, (1*HZ)/10},
789       { "undefined", 0,			0x80, XCVR_10baseT, 10000},
790       { "10base2",   0,			0x10, XCVR_AUI,		(1*HZ)/10},
791       { "100baseTX", Media_Lnk, 0x02, XCVR_100baseFx, (14*HZ)/10},
792       { "100baseFX", Media_Lnk, 0x04, XCVR_MII,		(14*HZ)/10},
793       { "MII",		 0,			0x41, XCVR_10baseT, 3*HZ },
794       { "undefined", 0,			0x01, XCVR_10baseT, 10000},
795       { "Autonegotiate", 0,		0x41, XCVR_10baseT, 3*HZ},
796       { "MII-External",	 0,		0x41, XCVR_10baseT, 3*HZ },
797       { "Default",	 0,			0xFF, XCVR_10baseT, 10000},
798     };
799     
800     static int vortex_probe1(struct pci_dev *pdev, long ioaddr, int irq,
801     				   int chip_idx, int card_idx);
802     static void vortex_up(struct net_device *dev);
803     static void vortex_down(struct net_device *dev);
804     static int vortex_open(struct net_device *dev);
805     static void mdio_sync(long ioaddr, int bits);
806     static int mdio_read(struct net_device *dev, int phy_id, int location);
807     static void mdio_write(struct net_device *vp, int phy_id, int location, int value);
808     static void vortex_timer(unsigned long arg);
809     static void rx_oom_timer(unsigned long arg);
810     static int vortex_start_xmit(struct sk_buff *skb, struct net_device *dev);
811     static int boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev);
812     static int vortex_rx(struct net_device *dev);
813     static int boomerang_rx(struct net_device *dev);
814     static void vortex_interrupt(int irq, void *dev_id, struct pt_regs *regs);
815     static void boomerang_interrupt(int irq, void *dev_id, struct pt_regs *regs);
816     static int vortex_close(struct net_device *dev);
817     static void dump_tx_ring(struct net_device *dev);
818     static void update_stats(long ioaddr, struct net_device *dev);
819     static struct net_device_stats *vortex_get_stats(struct net_device *dev);
820     static void set_rx_mode(struct net_device *dev);
821     static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
822     static void vortex_tx_timeout(struct net_device *dev);
823     static void acpi_set_WOL(struct net_device *dev);
824     
825     /* This driver uses 'options' to pass the media type, full-duplex flag, etc. */
826     /* Option count limit only -- unlimited interfaces are supported. */
827     #define MAX_UNITS 8
828     static int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1,};
829     static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
830     static int hw_checksums[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
831     static int flow_ctrl[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
832     static int enable_wol[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
833     
834     /* #define dev_alloc_skb dev_alloc_skb_debug */
835     
836     /* A list of all installed Vortex EISA devices, for removing the driver module. */
837     static struct net_device *root_vortex_eisa_dev;
838     
839     /* Variables to work-around the Compaq PCI BIOS32 problem. */
840     static int compaq_ioaddr, compaq_irq, compaq_device_id = 0x5900;
841     
842     static int vortex_cards_found;
843     
844     #ifdef CONFIG_PM
845     
846     static int vortex_suspend (struct pci_dev *pdev, u32 state)
847     {
848     	struct net_device *dev = pdev->driver_data;
849     
850     	printk(KERN_DEBUG "vortex_suspend(%s)\n", dev->name);
851     
852     	if (dev && dev->priv) {
853     		struct vortex_private *vp = (struct vortex_private *)dev->priv;
854     		if (vp->open) {
855     			netif_device_detach(dev);
856     			vortex_down(dev);
857     		}
858     	}
859     	return 0;
860     }
861     
862     static int vortex_resume (struct pci_dev *pdev)
863     {
864     	struct net_device *dev = pdev->driver_data;
865     
866     	printk(KERN_DEBUG "vortex_resume(%s)\n", dev->name);
867     
868     	if (dev && dev->priv) {
869     		struct vortex_private *vp = (struct vortex_private *)dev->priv;
870     		if (vp->open) {
871     			vortex_up(dev);
872     			netif_device_attach(dev);
873     		}
874     	}
875     	return 0;
876     }
877     
878     #endif /* CONFIG_PM */
879     
880     /* returns count found (>= 0), or negative on error */
881     static int __init vortex_eisa_init (void)
882     {
883     	long ioaddr;
884     	int rc;
885     	int orig_cards_found = vortex_cards_found;
886     
887     	/* Now check all slots of the EISA bus. */
888     	if (!EISA_bus)
889     		return 0;
890     
891     	for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
892     		int device_id;
893     
894     		if (request_region(ioaddr, VORTEX_TOTAL_SIZE, DRV_NAME) == NULL)
895     			continue;
896     
897     		/* Check the standard EISA ID register for an encoded '3Com'. */
898     		if (inw(ioaddr + 0xC80) != 0x6d50) {
899     			release_region (ioaddr, VORTEX_TOTAL_SIZE);
900     			continue;
901     		}
902     
903     		/* Check for a product that we support, 3c59{2,7} any rev. */
904     		device_id = (inb(ioaddr + 0xC82)<<8) + inb(ioaddr + 0xC83);
905     		if ((device_id & 0xFF00) != 0x5900) {
906     			release_region (ioaddr, VORTEX_TOTAL_SIZE);
907     			continue;
908     		}
909     
910     		rc = vortex_probe1(NULL, ioaddr, inw(ioaddr + 0xC88) >> 12,
911     				   EISA_TBL_OFFSET, vortex_cards_found);
912     		if (rc == 0)
913     			vortex_cards_found++;
914     		else
915     			release_region (ioaddr, VORTEX_TOTAL_SIZE);
916     	}
917     
918     	/* Special code to work-around the Compaq PCI BIOS32 problem. */
919     	if (compaq_ioaddr) {
920     		vortex_probe1(NULL, compaq_ioaddr, compaq_irq,
921     					  compaq_device_id, vortex_cards_found++);
922     	}
923     
924     	return vortex_cards_found - orig_cards_found;
925     }
926     
927     /* returns count (>= 0), or negative on error */
928     static int __devinit vortex_init_one (struct pci_dev *pdev,
929     				      const struct pci_device_id *ent)
930     {
931     	int rc;
932     
933     	/* wake up and enable device */		
934     	if (pci_enable_device (pdev)) {
935     		rc = -EIO;
936     	} else {
937     		rc = vortex_probe1 (pdev, pci_resource_start (pdev, 0), pdev->irq,
938     				    ent->driver_data, vortex_cards_found);
939     		if (rc == 0)
940     			vortex_cards_found++;
941     	}
942     	return rc;
943     }
944     
945     /*
946      * Start up the PCI device which is described by *pdev.
947      * Return 0 on success.
948      *
949      * NOTE: pdev can be NULL, for the case of an EISA driver
950      */
951     static int __devinit vortex_probe1(struct pci_dev *pdev,
952     				   long ioaddr, int irq,
953     				   int chip_idx, int card_idx)
954     {
955     	struct vortex_private *vp;
956     	int option;
957     	unsigned int eeprom[0x40], checksum = 0;		/* EEPROM contents */
958     	int i, step;
959     	struct net_device *dev;
960     	static int printed_version;
961     	int retval;
962     	struct vortex_chip_info * const vci = &vortex_info_tbl[chip_idx];
963     	char *print_name;
964     
965     	if (!printed_version) {
966     		printk (KERN_INFO "%s", version);
967     		printk (KERN_INFO "See Documentation/networking/vortex.txt\n");
968     		printed_version = 1;
969     	}
970     
971     	print_name = pdev ? pdev->slot_name : "3c59x";
972     
973     	dev = alloc_etherdev(sizeof(*vp));
974     	retval = -ENOMEM;
975     	if (!dev) {
976     		printk (KERN_ERR PFX "unable to allocate etherdev, aborting\n");
977     		goto out;
978     	}
979     	SET_MODULE_OWNER(dev);
980     
981     	printk(KERN_INFO "%s: 3Com %s %s at 0x%lx, ",
982     	       print_name,
983     	       pdev ? "PCI" : "EISA",
984     	       vci->name,
985     	       ioaddr);
986     
987     	vp = dev->priv;
988     	dev->base_addr = ioaddr;
989     	dev->irq = irq;
990     	dev->mtu = mtu;
991     	vp->drv_flags = vci->drv_flags;
992     	vp->has_nway = (vci->drv_flags & HAS_NWAY) ? 1 : 0;
993     	vp->io_size = vci->io_size;
994     	vp->card_idx = card_idx;
995     
996     	/* module list only for EISA devices */
997     	if (pdev == NULL) {
998     		vp->next_module = root_vortex_eisa_dev;
999     		root_vortex_eisa_dev = dev;
1000     	}
1001     
1002     	/* PCI-only startup logic */
1003     	if (pdev) {
1004     		/* EISA resources already marked, so only PCI needs to do this here */
1005     		/* Ignore return value, because Cardbus drivers already allocate for us */
1006     		if (request_region(ioaddr, vci->io_size, print_name) != NULL)
1007     			vp->must_free_region = 1;
1008     
1009     		/* enable bus-mastering if necessary */		
1010     		if (vci->flags & PCI_USES_MASTER)
1011     			pci_set_master (pdev);
1012     
1013     		if (vci->drv_flags & IS_VORTEX) {
1014     			u8 pci_latency;
1015     			u8 new_latency = 248;
1016     
1017     			/* Check the PCI latency value.  On the 3c590 series the latency timer
1018     			   must be set to the maximum value to avoid data corruption that occurs
1019     			   when the timer expires during a transfer.  This bug exists the Vortex
1020     			   chip only. */
1021     			pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
1022     			if (pci_latency < new_latency) {
1023     				printk(KERN_INFO "%s: Overriding PCI latency"
1024     					" timer (CFLT) setting of %d, new value is %d.\n",
1025     					print_name, pci_latency, new_latency);
1026     					pci_write_config_byte(pdev, PCI_LATENCY_TIMER, new_latency);
1027     			}
1028     		}
1029     	}
1030     
1031     	spin_lock_init(&vp->lock);
1032     	spin_lock_init(&vp->mdio_lock);
1033     	vp->pdev = pdev;
1034     
1035     	/* Makes sure rings are at least 16 byte aligned. */
1036     	vp->rx_ring = pci_alloc_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE
1037     					   + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
1038     					   &vp->rx_ring_dma);
1039     	retval = -ENOMEM;
1040     	if (vp->rx_ring == 0)
1041     		goto free_region;
1042     
1043     	vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE);
1044     	vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE;
1045     
1046     	/* if we are a PCI driver, we store info in pdev->driver_data
1047     	 * instead of a module list */	
1048     	if (pdev)
1049     		pci_set_drvdata(pdev, dev);
1050     
1051     	/* The lower four bits are the media type. */
1052     	if (dev->mem_start) {
1053     		/*
1054     		 * AKPM: ewww..  The 'options' param is passed in as the third arg to the
1055     		 * LILO 'ether=' argument for non-modular use
1056     		 */
1057     		option = dev->mem_start;
1058     	}
1059     	else if (card_idx < MAX_UNITS)
1060     		option = options[card_idx];
1061     	else
1062     		option = -1;
1063     
1064     	vp->media_override = 7;
1065     	if (option >= 0) {
1066     		vp->media_override = ((option & 7) == 2)  ?  0  :  option & 15;
1067     		if (vp->media_override != 7)
1068     			vp->medialock = 1;
1069     		vp->full_duplex = (option & 0x200) ? 1 : 0;
1070     		vp->bus_master = (option & 16) ? 1 : 0;
1071     	}
1072     
1073     	if (card_idx < MAX_UNITS) {
1074     		if (full_duplex[card_idx] > 0)
1075     			vp->full_duplex = 1;
1076     		if (flow_ctrl[card_idx] > 0)
1077     			vp->flow_ctrl = 1;
1078     		if (enable_wol[card_idx] > 0)
1079     			vp->enable_wol = 1;
1080     	}
1081     
1082     	vp->force_fd = vp->full_duplex;
1083     	vp->options = option;
1084     	/* Read the station address from the EEPROM. */
1085     	EL3WINDOW(0);
1086     	{
1087     		int base;
1088     
1089     		if (vci->drv_flags & EEPROM_8BIT)
1090     			base = 0x230;
1091     		else if (vci->drv_flags & EEPROM_OFFSET)
1092     			base = EEPROM_Read + 0x30;
1093     		else
1094     			base = EEPROM_Read;
1095     
1096     		for (i = 0; i < 0x40; i++) {
1097     			int timer;
1098     			outw(base + i, ioaddr + Wn0EepromCmd);
1099     			/* Pause for at least 162 us. for the read to take place. */
1100     			for (timer = 10; timer >= 0; timer--) {
1101     				udelay(162);
1102     				if ((inw(ioaddr + Wn0EepromCmd) & 0x8000) == 0)
1103     					break;
1104     			}
1105     			eeprom[i] = inw(ioaddr + Wn0EepromData);
1106     		}
1107     	}
1108     	for (i = 0; i < 0x18; i++)
1109     		checksum ^= eeprom[i];
1110     	checksum = (checksum ^ (checksum >> 8)) & 0xff;
1111     	if (checksum != 0x00) {		/* Grrr, needless incompatible change 3Com. */
1112     		while (i < 0x21)
1113     			checksum ^= eeprom[i++];
1114     		checksum = (checksum ^ (checksum >> 8)) & 0xff;
1115     	}
1116     	if ((checksum != 0x00) && !(vci->drv_flags & IS_TORNADO))
1117     		printk(" ***INVALID CHECKSUM %4.4x*** ", checksum);
1118     	for (i = 0; i < 3; i++)
1119     		((u16 *)dev->dev_addr)[i] = htons(eeprom[i + 10]);
1120     	for (i = 0; i < 6; i++)
1121     		printk("%c%2.2x", i ? ':' : ' ', dev->dev_addr[i]);
1122     	EL3WINDOW(2);
1123     	for (i = 0; i < 6; i++)
1124     		outb(dev->dev_addr[i], ioaddr + i);
1125     
1126     #ifdef __sparc__
1127     	printk(", IRQ %s\n", __irq_itoa(dev->irq));
1128     #else
1129     	printk(", IRQ %d\n", dev->irq);
1130     	/* Tell them about an invalid IRQ. */
1131     	if (vortex_debug && (dev->irq <= 0 || dev->irq >= NR_IRQS))
1132     		printk(KERN_WARNING " *** Warning: IRQ %d is unlikely to work! ***\n",
1133     			   dev->irq);
1134     #endif
1135     
1136     	EL3WINDOW(4);
1137     	step = (inb(ioaddr + Wn4_NetDiag) & 0x1e) >> 1;
1138     	printk(KERN_INFO "  product code %02x%02x rev %02x.%d date %02d-"
1139     		   "%02d-%02d\n", eeprom[6]&0xff, eeprom[6]>>8, eeprom[0x14],
1140     		   step, (eeprom[4]>>5) & 15, eeprom[4] & 31, eeprom[4]>>9);
1141     
1142     
1143     	if (pdev && vci->drv_flags & HAS_CB_FNS) {
1144     		unsigned long fn_st_addr;			/* Cardbus function status space */
1145     		unsigned short n;
1146     
1147     		fn_st_addr = pci_resource_start (pdev, 2);
1148     		if (fn_st_addr) {
1149     			vp->cb_fn_base = ioremap(fn_st_addr, 128);
1150     			retval = -ENOMEM;
1151     			if (!vp->cb_fn_base)
1152     				goto free_ring;
1153     		}
1154     		printk(KERN_INFO "%s: CardBus functions mapped %8.8lx->%p\n",
1155     			   print_name, fn_st_addr, vp->cb_fn_base);
1156     		EL3WINDOW(2);
1157     
1158     		n = inw(ioaddr + Wn2_ResetOptions) & ~0x4010;
1159     		if (vp->drv_flags & INVERT_LED_PWR)
1160     			n |= 0x10;
1161     		if (vp->drv_flags & INVERT_MII_PWR)
1162     			n |= 0x4000;
1163     		outw(n, ioaddr + Wn2_ResetOptions);
1164     	}
1165     
1166     	/* Extract our information from the EEPROM data. */
1167     	vp->info1 = eeprom[13];
1168     	vp->info2 = eeprom[15];
1169     	vp->capabilities = eeprom[16];
1170     
1171     	if (vp->info1 & 0x8000) {
1172     		vp->full_duplex = 1;
1173     		printk(KERN_INFO "Full duplex capable\n");
1174     	}
1175     
1176     	{
1177     		static const char * ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
1178     		unsigned int config;
1179     		EL3WINDOW(3);
1180     		vp->available_media = inw(ioaddr + Wn3_Options);
1181     		if ((vp->available_media & 0xff) == 0)		/* Broken 3c916 */
1182     			vp->available_media = 0x40;
1183     		config = inl(ioaddr + Wn3_Config);
1184     		if (vortex_debug > 1)
1185     			printk(KERN_DEBUG "  Internal config register is %4.4x, "
1186     				   "transceivers %#x.\n", config, inw(ioaddr + Wn3_Options));
1187     		printk(KERN_INFO "  %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n",
1188     			   8 << RAM_SIZE(config),
1189     			   RAM_WIDTH(config) ? "word" : "byte",
1190     			   ram_split[RAM_SPLIT(config)],
1191     			   AUTOSELECT(config) ? "autoselect/" : "",
1192     			   XCVR(config) > XCVR_ExtMII ? "<invalid transceiver>" :
1193     			   media_tbl[XCVR(config)].name);
1194     		vp->default_media = XCVR(config);
1195     		if (vp->default_media == XCVR_NWAY)
1196     			vp->has_nway = 1;
1197     		vp->autoselect = AUTOSELECT(config);
1198     	}
1199     
1200     	if (vp->media_override != 7) {
1201     		printk(KERN_INFO "  Media override to transceiver type %d (%s).\n",
1202     			   vp->media_override, media_tbl[vp->media_override].name);
1203     		dev->if_port = vp->media_override;
1204     	} else
1205     		dev->if_port = vp->default_media;
1206     
1207     	if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
1208     		int phy, phy_idx = 0;
1209     		EL3WINDOW(4);
1210     		mii_preamble_required++;
1211     		mii_preamble_required++;
1212     		mdio_read(dev, 24, 1);
1213     		for (phy = 0; phy < 32 && phy_idx < 1; phy++) {
1214     			int mii_status, phyx;
1215     
1216     			/*
1217     			 * For the 3c905CX we look at index 24 first, because it bogusly
1218     			 * reports an external PHY at all indices
1219     			 */
1220     			if (phy == 0)
1221     				phyx = 24;
1222     			else if (phy <= 24)
1223     				phyx = phy - 1;
1224     			else
1225     				phyx = phy;
1226     			mii_status = mdio_read(dev, phyx, 1);
1227     			if (mii_status  &&  mii_status != 0xffff) {
1228     				vp->phys[phy_idx++] = phyx;
1229     				printk(KERN_INFO "  MII transceiver found at address %d,"
1230     					   " status %4x.\n", phyx, mii_status);
1231     				if ((mii_status & 0x0040) == 0)
1232     					mii_preamble_required++;
1233     			}
1234     		}
1235     		mii_preamble_required--;
1236     		if (phy_idx == 0) {
1237     			printk(KERN_WARNING"  ***WARNING*** No MII transceivers found!\n");
1238     			vp->phys[0] = 24;
1239     		} else {
1240     			vp->advertising = mdio_read(dev, vp->phys[0], 4);
1241     			if (vp->full_duplex) {
1242     				/* Only advertise the FD media types. */
1243     				vp->advertising &= ~0x02A0;
1244     				mdio_write(dev, vp->phys[0], 4, vp->advertising);
1245     			}
1246     		}
1247     	}
1248     
1249     	if (pdev && vp->enable_wol && (vp->capabilities & CapPwrMgmt))
1250     		acpi_set_WOL(dev);
1251     
1252     	if (vp->capabilities & CapBusMaster) {
1253     		vp->full_bus_master_tx = 1;
1254     		printk(KERN_INFO"  Enabling bus-master transmits and %s receives.\n",
1255     			   (vp->info2 & 1) ? "early" : "whole-frame" );
1256     		vp->full_bus_master_rx = (vp->info2 & 1) ? 1 : 2;
1257     		vp->bus_master = 0;		/* AKPM: vortex only */
1258     	}
1259     
1260     	/* The 3c59x-specific entries in the device structure. */
1261     	dev->open = vortex_open;
1262     	if (vp->full_bus_master_tx) {
1263     		dev->hard_start_xmit = boomerang_start_xmit;
1264     #ifndef CONFIG_HIGHMEM
1265     		/* Actually, it still should work with iommu. */
1266     		dev->features |= NETIF_F_SG;
1267     #endif
1268     		if (((hw_checksums[card_idx] == -1) && (vp->drv_flags & HAS_HWCKSM)) ||
1269     					(hw_checksums[card_idx] == 1)) {
1270     				dev->features |= NETIF_F_IP_CSUM;
1271     		}
1272     	} else {
1273     		dev->hard_start_xmit = vortex_start_xmit;
1274     	}
1275     
1276     	if (vortex_debug > 0) {
1277     		printk(KERN_INFO "%s: scatter/gather %sabled. h/w checksums %sabled\n",
1278     				print_name,
1279     				(dev->features & NETIF_F_SG) ? "en":"dis",
1280     				(dev->features & NETIF_F_IP_CSUM) ? "en":"dis");
1281     	}
1282     
1283     	dev->stop = vortex_close;
1284     	dev->get_stats = vortex_get_stats;
1285     	dev->do_ioctl = vortex_ioctl;
1286     	dev->set_multicast_list = set_rx_mode;
1287     	dev->tx_timeout = vortex_tx_timeout;
1288     	dev->watchdog_timeo = (watchdog * HZ) / 1000;
1289     	retval = register_netdev(dev);
1290     	if (retval == 0)
1291     		return 0;
1292     
1293     free_ring:
1294     	pci_free_consistent(pdev,
1295     						sizeof(struct boom_rx_desc) * RX_RING_SIZE
1296     							+ sizeof(struct boom_tx_desc) * TX_RING_SIZE,
1297     						vp->rx_ring,
1298     						vp->rx_ring_dma);
1299     free_region:
1300     	if (vp->must_free_region)
1301     		release_region(ioaddr, vci->io_size);
1302     	kfree (dev);
1303     	printk(KERN_ERR PFX "vortex_probe1 fails.  Returns %d\n", retval);
1304     out:
1305     	return retval;
1306     }
1307     
1308     static void
1309     wait_for_completion(struct net_device *dev, int cmd)
1310     {
1311     	int i;
1312     
1313     	outw(cmd, dev->base_addr + EL3_CMD);
1314     	for (i = 0; i < 2000; i++) {
1315     		if (!(inw(dev->base_addr + EL3_STATUS) & CmdInProgress))
1316     			return;
1317     	}
1318     
1319     	/* OK, that didn't work.  Do it the slow way.  One second */
1320     	for (i = 0; i < 100000; i++) {
1321     		if (!(inw(dev->base_addr + EL3_STATUS) & CmdInProgress)) {
1322     			if (vortex_debug > 1)
1323     				printk(KERN_INFO "%s: command 0x%04x took %d usecs\n",
1324     					   dev->name, cmd, i * 10);
1325     			return;
1326     		}
1327     		udelay(10);
1328     	}
1329     	printk(KERN_ERR "%s: command 0x%04x did not complete! Status=0x%x\n",
1330     			   dev->name, cmd, inw(dev->base_addr + EL3_STATUS));
1331     }
1332     
1333     static void
1334     vortex_up(struct net_device *dev)
1335     {
1336     	long ioaddr = dev->base_addr;
1337     	struct vortex_private *vp = (struct vortex_private *)dev->priv;
1338     	unsigned int config;
1339     	int i;
1340     
1341     	if (vp->pdev && vp->enable_wol)			/* AKPM: test not needed? */
1342     		pci_set_power_state(vp->pdev, 0);	/* Go active */
1343     
1344     	/* Before initializing select the active media port. */
1345     	EL3WINDOW(3);
1346     	config = inl(ioaddr + Wn3_Config);
1347     
1348     	if (vp->media_override != 7) {
1349     		printk(KERN_INFO "%s: Media override to transceiver %d (%s).\n",
1350     			   dev->name, vp->media_override,
1351     			   media_tbl[vp->media_override].name);
1352     		dev->if_port = vp->media_override;
1353     	} else if (vp->autoselect) {
1354     		if (vp->has_nway) {
1355     			printk(KERN_INFO "%s: using NWAY device table, not %d\n", dev->name, dev->if_port);
1356     			dev->if_port = XCVR_NWAY;
1357     		} else {
1358     			/* Find first available media type, starting with 100baseTx. */
1359     			dev->if_port = XCVR_100baseTx;
1360     			while (! (vp->available_media & media_tbl[dev->if_port].mask))
1361     				dev->if_port = media_tbl[dev->if_port].next;
1362     			printk(KERN_INFO "%s: first available media type: %s\n",
1363     					dev->name, media_tbl[dev->if_port].name);
1364     		}
1365     	} else {
1366     		dev->if_port = vp->default_media;
1367     		printk(KERN_INFO "%s: using default media %s\n",
1368     				dev->name, media_tbl[dev->if_port].name);
1369     	}
1370     
1371     	init_timer(&vp->timer);
1372     	vp->timer.expires = RUN_AT(media_tbl[dev->if_port].wait);
1373     	vp->timer.data = (unsigned long)dev;
1374     	vp->timer.function = vortex_timer;		/* timer handler */
1375     	add_timer(&vp->timer);
1376     
1377     	init_timer(&vp->rx_oom_timer);
1378     	vp->rx_oom_timer.data = (unsigned long)dev;
1379     	vp->rx_oom_timer.function = rx_oom_timer;
1380     
1381     	if (vortex_debug > 1)
1382     		printk(KERN_DEBUG "%s: Initial media type %s.\n",
1383     			   dev->name, media_tbl[dev->if_port].name);
1384     
1385     	vp->full_duplex = vp->force_fd;
1386     	config = BFINS(config, dev->if_port, 20, 4);
1387     	if (vortex_debug > 6)
1388     		printk(KERN_DEBUG "vortex_up(): writing 0x%x to InternalConfig\n", config);
1389     	outl(config, ioaddr + Wn3_Config);
1390     
1391     	if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
1392     		int mii_reg1, mii_reg5;
1393     		EL3WINDOW(4);
1394     		/* Read BMSR (reg1) only to clear old status. */
1395     		mii_reg1 = mdio_read(dev, vp->phys[0], 1);
1396     		mii_reg5 = mdio_read(dev, vp->phys[0], 5);
1397     		if (mii_reg5 == 0xffff  ||  mii_reg5 == 0x0000)
1398     			;					/* No MII device or no link partner report */
1399     		else if ((mii_reg5 & 0x0100) != 0	/* 100baseTx-FD */
1400     				 || (mii_reg5 & 0x00C0) == 0x0040) /* 10T-FD, but not 100-HD */
1401     			vp->full_duplex = 1;
1402     		vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0);
1403     		if (vortex_debug > 1)
1404     			printk(KERN_INFO "%s: MII #%d status %4.4x, link partner capability %4.4x,"
1405     				   " info1 %04x, setting %s-duplex.\n",
1406     					dev->name, vp->phys[0],
1407     					mii_reg1, mii_reg5,
1408     					vp->info1, ((vp->info1 & 0x8000) || vp->full_duplex) ? "full" : "half");
1409     		EL3WINDOW(3);
1410     	}
1411     
1412     	/* Set the full-duplex bit. */
1413     	outw(	((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) |
1414     		 	(dev->mtu > 1500 ? 0x40 : 0) |
1415     			((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? 0x100 : 0),
1416     			ioaddr + Wn3_MAC_Ctrl);
1417     
1418     	if (vortex_debug > 1) {
1419     		printk(KERN_DEBUG "%s: vortex_up() InternalConfig %8.8x.\n",
1420     			dev->name, config);
1421     	}
1422     
1423     	wait_for_completion(dev, TxReset);
1424     	wait_for_completion(dev, RxReset);
1425     
1426     	outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
1427     
1428     	if (vortex_debug > 1) {
1429     		EL3WINDOW(4);
1430     		printk(KERN_DEBUG "%s: vortex_up() irq %d media status %4.4x.\n",
1431     			   dev->name, dev->irq, inw(ioaddr + Wn4_Media));
1432     	}
1433     
1434     	/* Set the station address and mask in window 2 each time opened. */
1435     	EL3WINDOW(2);
1436     	for (i = 0; i < 6; i++)
1437     		outb(dev->dev_addr[i], ioaddr + i);
1438     	for (; i < 12; i+=2)
1439     		outw(0, ioaddr + i);
1440     
1441     	if (vp->cb_fn_base) {
1442     		unsigned short n = inw(ioaddr + Wn2_ResetOptions) & ~0x4010;
1443     		if (vp->drv_flags & INVERT_LED_PWR)
1444     			n |= 0x10;
1445     		if (vp->drv_flags & INVERT_MII_PWR)
1446     			n |= 0x4000;
1447     		outw(n, ioaddr + Wn2_ResetOptions);
1448     	}
1449     
1450     	if (dev->if_port == XCVR_10base2)
1451     		/* Start the thinnet transceiver. We should really wait 50ms...*/
1452     		outw(StartCoax, ioaddr + EL3_CMD);
1453     	if (dev->if_port != XCVR_NWAY) {
1454     		EL3WINDOW(4);
1455     		outw((inw(ioaddr + Wn4_Media) & ~(Media_10TP|Media_SQE)) |
1456     			 media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media);
1457     	}
1458     
1459     	/* Switch to the stats window, and clear all stats by reading. */
1460     	outw(StatsDisable, ioaddr + EL3_CMD);
1461     	EL3WINDOW(6);
1462     	for (i = 0; i < 10; i++)
1463     		inb(ioaddr + i);
1464     	inw(ioaddr + 10);
1465     	inw(ioaddr + 12);
1466     	/* New: On the Vortex we must also clear the BadSSD counter. */
1467     	EL3WINDOW(4);
1468     	inb(ioaddr + 12);
1469     	/* ..and on the Boomerang we enable the extra statistics bits. */
1470     	outw(0x0040, ioaddr + Wn4_NetDiag);
1471     
1472     	/* Switch to register set 7 for normal use. */
1473     	EL3WINDOW(7);
1474     
1475     	if (vp->full_bus_master_rx) { /* Boomerang bus master. */
1476     		vp->cur_rx = vp->dirty_rx = 0;
1477     		/* Initialize the RxEarly register as recommended. */
1478     		outw(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD);
1479     		outl(0x0020, ioaddr + PktStatus);
1480     		outl(vp->rx_ring_dma, ioaddr + UpListPtr);
1481     	}
1482     	if (vp->full_bus_master_tx) { 		/* Boomerang bus master Tx. */
1483     		vp->cur_tx = vp->dirty_tx = 0;
1484     		if (vp->drv_flags & IS_BOOMERANG)
1485     			outb(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); /* Room for a packet. */
1486     		/* Clear the Rx, Tx rings. */
1487     		for (i = 0; i < RX_RING_SIZE; i++)	/* AKPM: this is done in vortex_open, too */
1488     			vp->rx_ring[i].status = 0;
1489     		for (i = 0; i < TX_RING_SIZE; i++)
1490     			vp->tx_skbuff[i] = 0;
1491     		outl(0, ioaddr + DownListPtr);
1492     	}
1493     	/* Set receiver mode: presumably accept b-case and phys addr only. */
1494     	set_rx_mode(dev);
1495     	outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
1496     
1497     //	wait_for_completion(dev, SetTxStart|0x07ff);
1498     	outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
1499     	outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
1500     	/* Allow status bits to be seen. */
1501     	vp->status_enable = SetStatusEnb | HostError|IntReq|StatsFull|TxComplete|
1502     		(vp->full_bus_master_tx ? DownComplete : TxAvailable) |
1503     		(vp->full_bus_master_rx ? UpComplete : RxComplete) |
1504     		(vp->bus_master ? DMADone : 0);
1505     	vp->intr_enable = SetIntrEnb | IntLatch | TxAvailable |
1506     		(vp->full_bus_master_rx ? 0 : RxComplete) |
1507     		StatsFull | HostError | TxComplete | IntReq
1508     		| (vp->bus_master ? DMADone : 0) | UpComplete | DownComplete;
1509     	outw(vp->status_enable, ioaddr + EL3_CMD);
1510     	/* Ack all pending events, and set active indicator mask. */
1511     	outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
1512     		 ioaddr + EL3_CMD);
1513     	outw(vp->intr_enable, ioaddr + EL3_CMD);
1514     	if (vp->cb_fn_base)			/* The PCMCIA people are idiots.  */
1515     		writel(0x8000, vp->cb_fn_base + 4);
1516     	netif_start_queue (dev);
1517     }
1518     
1519     static int
1520     vortex_open(struct net_device *dev)
1521     {
1522     	struct vortex_private *vp = (struct vortex_private *)dev->priv;
1523     	int i;
1524     	int retval;
1525     
1526     	if (vp->pdev && vp->enable_wol)				/* AKPM: test not needed? */
1527     		pci_set_power_state(vp->pdev, 0);		/* Go active */
1528     
1529     	/* Use the now-standard shared IRQ implementation. */
1530     	if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ?
1531     				&boomerang_interrupt : &vortex_interrupt, SA_SHIRQ, dev->name, dev))) {
1532     		printk(KERN_ERR "%s: Could not reserve IRQ %d\n", dev->name, dev->irq);
1533     		goto out;
1534     	}
1535     
1536     	if (vp->full_bus_master_rx) { /* Boomerang bus master. */
1537     		if (vortex_debug > 2)
1538     			printk(KERN_DEBUG "%s:  Filling in the Rx ring.\n", dev->name);
1539     		for (i = 0; i < RX_RING_SIZE; i++) {
1540     			struct sk_buff *skb;
1541     			vp->rx_ring[i].next = cpu_to_le32(vp->rx_ring_dma + sizeof(struct boom_rx_desc) * (i+1));
1542     			vp->rx_ring[i].status = 0;	/* Clear complete bit. */
1543     			vp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ | LAST_FRAG);
1544     			skb = dev_alloc_skb(PKT_BUF_SZ);
1545     			vp->rx_skbuff[i] = skb;
1546     			if (skb == NULL)
1547     				break;			/* Bad news!  */
1548     			skb->dev = dev;			/* Mark as being used by this device. */
1549     			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
1550     			vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(vp->pdev, skb->tail, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
1551     		}
1552     		if (i != RX_RING_SIZE) {
1553     			int j;
1554     			printk(KERN_EMERG "%s: no memory for rx ring\n", dev->name);
1555     			for (j = 0; j < i; j++) {
1556     				if (vp->rx_skbuff[j]) {
1557     					dev_kfree_skb(vp->rx_skbuff[j]);
1558     					vp->rx_skbuff[j] = 0;
1559     				}
1560     			}
1561     			retval = -ENOMEM;
1562     			goto out_free_irq;
1563     		}
1564     		/* Wrap the ring. */
1565     		vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma);
1566     	}
1567     
1568     	vortex_up(dev);
1569     	vp->open = 1;
1570     	return 0;
1571     
1572     out_free_irq:
1573     	free_irq(dev->irq, dev);
1574     out:
1575     	if (vortex_debug > 1)
1576     		printk(KERN_ERR "%s: vortex_open() fails: returning %d\n", dev->name, retval);
1577     	return retval;
1578     }
1579     
1580     static void
1581     vortex_timer(unsigned long data)
1582     {
1583     	struct net_device *dev = (struct net_device *)data;
1584     	struct vortex_private *vp = (struct vortex_private *)dev->priv;
1585     	long ioaddr = dev->base_addr;
1586     	int next_tick = 60*HZ;
1587     	int ok = 0;
1588     	int media_status, mii_status, old_window;
1589     
1590     	if (vortex_debug > 2) {
1591     		printk(KERN_DEBUG "%s: Media selection timer tick happened, %s.\n",
1592     			   dev->name, media_tbl[dev->if_port].name);
1593     		printk(KERN_DEBUG "dev->watchdog_timeo=%d\n", dev->watchdog_timeo);
1594     	}
1595     
1596     	if (vp->medialock)
1597     		goto leave_media_alone;
1598     	disable_irq(dev->irq);
1599     	old_window = inw(ioaddr + EL3_CMD) >> 13;
1600     	EL3WINDOW(4);
1601     	media_status = inw(ioaddr + Wn4_Media);
1602     	switch (dev->if_port) {
1603     	case XCVR_10baseT:  case XCVR_100baseTx:  case XCVR_100baseFx:
1604     		if (media_status & Media_LnkBeat) {
1605     			ok = 1;
1606     			if (vortex_debug > 1)
1607     				printk(KERN_DEBUG "%s: Media %s has link beat, %x.\n",
1608     					   dev->name, media_tbl[dev->if_port].name, media_status);
1609     		} else if (vortex_debug > 1)
1610     			printk(KERN_DEBUG "%s: Media %s has no link beat, %x.\n",
1611     				   dev->name, media_tbl[dev->if_port].name, media_status);
1612     		break;
1613     	case XCVR_MII: case XCVR_NWAY:
1614     		{
1615     			mii_status = mdio_read(dev, vp->phys[0], 1);
1616     			ok = 1;
1617     			if (vortex_debug > 2)
1618     				printk(KERN_DEBUG "%s: MII transceiver has status %4.4x.\n",
1619     					dev->name, mii_status);
1620     			if (mii_status & 0x0004) {
1621     				int mii_reg5 = mdio_read(dev, vp->phys[0], 5);
1622     				if (! vp->force_fd  &&  mii_reg5 != 0xffff) {
1623     					int duplex = (mii_reg5&0x0100) ||
1624     						(mii_reg5 & 0x01C0) == 0x0040;
1625     					if (vp->full_duplex != duplex) {
1626     						vp->full_duplex = duplex;
1627     						printk(KERN_INFO "%s: Setting %s-duplex based on MII "
1628     							"#%d link partner capability of %4.4x.\n",
1629     							dev->name, vp->full_duplex ? "full" : "half",
1630     							vp->phys[0], mii_reg5);
1631     						/* Set the full-duplex bit. */
1632     						EL3WINDOW(3);
1633     						outw(	(vp->full_duplex ? 0x20 : 0) |
1634     								(dev->mtu > 1500 ? 0x40 : 0) |
1635     								((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? 0x100 : 0),
1636     								ioaddr + Wn3_MAC_Ctrl);
1637     						if (vortex_debug > 1)
1638     							printk(KERN_DEBUG "Setting duplex in Wn3_MAC_Ctrl\n");
1639     						/* AKPM: bug: should reset Tx and Rx after setting Duplex.  Page 180 */
1640     					}
1641     				}
1642     			}
1643     		}
1644     		break;
1645     	  default:					/* Other media types handled by Tx timeouts. */
1646     		if (vortex_debug > 1)
1647     		  printk(KERN_DEBUG "%s: Media %s has no indication, %x.\n",
1648     				 dev->name, media_tbl[dev->if_port].name, media_status);
1649     		ok = 1;
1650     	}
1651     	if ( ! ok) {
1652     		unsigned int config;
1653     
1654     		do {
1655     			dev->if_port = media_tbl[dev->if_port].next;
1656     		} while ( ! (vp->available_media & media_tbl[dev->if_port].mask));
1657     		if (dev->if_port == XCVR_Default) { /* Go back to default. */
1658     		  dev->if_port = vp->default_media;
1659     		  if (vortex_debug > 1)
1660     			printk(KERN_DEBUG "%s: Media selection failing, using default "
1661     				   "%s port.\n",
1662     				   dev->name, media_tbl[dev->if_port].name);
1663     		} else {
1664     			if (vortex_debug > 1)
1665     				printk(KERN_DEBUG "%s: Media selection failed, now trying "
1666     					   "%s port.\n",
1667     					   dev->name, media_tbl[dev->if_port].name);
1668     			next_tick = media_tbl[dev->if_port].wait;
1669     		}
1670     		outw((media_status & ~(Media_10TP|Media_SQE)) |
1671     			 media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media);
1672     
1673     		EL3WINDOW(3);
1674     		config = inl(ioaddr + Wn3_Config);
1675     		config = BFINS(config, dev->if_port, 20, 4);
1676     		outl(config, ioaddr + Wn3_Config);
1677     
1678     		outw(dev->if_port == XCVR_10base2 ? StartCoax : StopCoax,
1679     			 ioaddr + EL3_CMD);
1680     		if (vortex_debug > 1)
1681     			printk(KERN_DEBUG "wrote 0x%08x to Wn3_Config\n", config);
1682     		/* AKPM: FIXME: Should reset Rx & Tx here.  P60 of 3c90xc.pdf */
1683     	}
1684     	EL3WINDOW(old_window);
1685     	enable_irq(dev->irq);
1686     
1687     leave_media_alone:
1688     	if (vortex_debug > 2)
1689     	  printk(KERN_DEBUG "%s: Media selection timer finished, %s.\n",
1690     			 dev->name, media_tbl[dev->if_port].name);
1691     
1692     	mod_timer(&vp->timer, RUN_AT(next_tick));
1693     	if (vp->deferred)
1694     		outw(FakeIntr, ioaddr + EL3_CMD);
1695     	return;
1696     }
1697     
1698     static void vortex_tx_timeout(struct net_device *dev)
1699     {
1700     	struct vortex_private *vp = (struct vortex_private *)dev->priv;
1701     	long ioaddr = dev->base_addr;
1702     
1703     	printk(KERN_ERR "%s: transmit timed out, tx_status %2.2x status %4.4x.\n",
1704     		   dev->name, inb(ioaddr + TxStatus),
1705     		   inw(ioaddr + EL3_STATUS));
1706     	EL3WINDOW(4);
1707     	printk(KERN_ERR "  diagnostics: net %04x media %04x dma %8.8x.\n",
1708     		   inw(ioaddr + Wn4_NetDiag), inw(ioaddr + Wn4_Media),
1709     		   inl(ioaddr + PktStatus));
1710     	/* Slight code bloat to be user friendly. */
1711     	if ((inb(ioaddr + TxStatus) & 0x88) == 0x88)
1712     		printk(KERN_ERR "%s: Transmitter encountered 16 collisions --"
1713     			   " network cable problem?\n", dev->name);
1714     	if (inw(ioaddr + EL3_STATUS) & IntLatch) {
1715     		printk(KERN_ERR "%s: Interrupt posted but not delivered --"
1716     			   " IRQ blocked by another device?\n", dev->name);
1717     		/* Bad idea here.. but we might as well handle a few events. */
1718     		{
1719     			/*
1720     			 * Block interrupts because vortex_interrupt does a bare spin_lock()
1721     			 */
1722     			unsigned long flags;
1723     			local_irq_save(flags);
1724     			if (vp->full_bus_master_tx)
1725     				boomerang_interrupt(dev->irq, dev, 0);
1726     			else
1727     				vortex_interrupt(dev->irq, dev, 0);
1728     			local_irq_restore(flags);
1729     		}
1730     	}
1731     
1732     	if (vortex_debug > 0)
1733     		dump_tx_ring(dev);
1734     
1735     	wait_for_completion(dev, TxReset);
1736     
1737     	vp->stats.tx_errors++;
1738     	if (vp->full_bus_master_tx) {
1739     		printk(KERN_DEBUG "%s: Resetting the Tx ring pointer.\n", dev->name);
1740     		if (vp->cur_tx - vp->dirty_tx > 0  &&  inl(ioaddr + DownListPtr) == 0)
1741     			outl(vp->tx_ring_dma + (vp->dirty_tx % TX_RING_SIZE) * sizeof(struct boom_tx_desc),
1742     				 ioaddr + DownListPtr);
1743     		if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE)
1744     			netif_wake_queue (dev);
1745     		if (vp->drv_flags & IS_BOOMERANG)
1746     			outb(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold);
1747     		outw(DownUnstall, ioaddr + EL3_CMD);
1748     	} else {
1749     		vp->stats.tx_dropped++;
1750     		netif_wake_queue(dev);
1751     	}
1752     	
1753     	/* Issue Tx Enable */
1754     	outw(TxEnable, ioaddr + EL3_CMD);
1755     	dev->trans_start = jiffies;
1756     	
1757     	/* Switch to register set 7 for normal use. */
1758     	EL3WINDOW(7);
1759     }
1760     
1761     /*
1762      * Handle uncommon interrupt sources.  This is a separate routine to minimize
1763      * the cache impact.
1764      */
1765     static void
1766     vortex_error(struct net_device *dev, int status)
1767     {
1768     	struct vortex_private *vp = (struct vortex_private *)dev->priv;
1769     	long ioaddr = dev->base_addr;
1770     	int do_tx_reset = 0, reset_mask = 0;
1771     	unsigned char tx_status = 0;
1772     
1773     	if (vortex_debug > 2) {
1774     		printk(KERN_ERR "%s: vortex_error(), status=0x%x\n", dev->name, status);
1775     	}
1776     
1777     	if (status & TxComplete) {			/* Really "TxError" for us. */
1778     		tx_status = inb(ioaddr + TxStatus);
1779     		/* Presumably a tx-timeout. We must merely re-enable. */
1780     		if (vortex_debug > 2
1781     			|| (tx_status != 0x88 && vortex_debug > 0)) {
1782     			printk(KERN_ERR "%s: Transmit error, Tx status register %2.2x.\n",
1783     				   dev->name, tx_status);
1784     			if (tx_status == 0x82) {
1785     				printk(KERN_ERR "Probably a duplex mismatch.  See "
1786     						"Documentation/networking/vortex.txt\n");
1787     			}
1788     			dump_tx_ring(dev);
1789     		}
1790     		if (tx_status & 0x14)  vp->stats.tx_fifo_errors++;
1791     		if (tx_status & 0x38)  vp->stats.tx_aborted_errors++;
1792     		outb(0, ioaddr + TxStatus);
1793     		if (tx_status & 0x30) {			/* txJabber or txUnderrun */
1794     			do_tx_reset = 1;
1795     		} else if ((tx_status & 0x08) && (vp->drv_flags & MAX_COLLISION_RESET)) {	/* maxCollisions */
1796     			do_tx_reset = 1;
1797     			reset_mask = 0x0108;		/* Reset interface logic, but not download logic */
1798     		} else {						/* Merely re-enable the transmitter. */
1799     			outw(TxEnable, ioaddr + EL3_CMD);
1800     		}
1801     	}
1802     
1803     	if (status & RxEarly) {				/* Rx early is unused. */
1804     		vortex_rx(dev);
1805     		outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
1806     	}
1807     	if (status & StatsFull) {			/* Empty statistics. */
1808     		static int DoneDidThat;
1809     		if (vortex_debug > 4)
1810     			printk(KERN_DEBUG "%s: Updating stats.\n", dev->name);
1811     		update_stats(ioaddr, dev);
1812     		/* HACK: Disable statistics as an interrupt source. */
1813     		/* This occurs when we have the wrong media type! */
1814     		if (DoneDidThat == 0  &&
1815     			inw(ioaddr + EL3_STATUS) & StatsFull) {
1816     			printk(KERN_WARNING "%s: Updating statistics failed, disabling "
1817     				   "stats as an interrupt source.\n", dev->name);
1818     			EL3WINDOW(5);
1819     			outw(SetIntrEnb | (inw(ioaddr + 10) & ~StatsFull), ioaddr + EL3_CMD);
1820     			vp->intr_enable &= ~StatsFull;
1821     			EL3WINDOW(7);
1822     			DoneDidThat++;
1823     		}
1824     	}
1825     	if (status & IntReq) {		/* Restore all interrupt sources.  */
1826     		outw(vp->status_enable, ioaddr + EL3_CMD);
1827     		outw(vp->intr_enable, ioaddr + EL3_CMD);
1828     	}
1829     	if (status & HostError) {
1830     		u16 fifo_diag;
1831     		EL3WINDOW(4);
1832     		fifo_diag = inw(ioaddr + Wn4_FIFODiag);
1833     		printk(KERN_ERR "%s: Host error, FIFO diagnostic register %4.4x.\n",
1834     			   dev->name, fifo_diag);
1835     		/* Adapter failure requires Tx/Rx reset and reinit. */
1836     		if (vp->full_bus_master_tx) {
1837     			int bus_status = inl(ioaddr + PktStatus);
1838     			/* 0x80000000 PCI master abort. */
1839     			/* 0x40000000 PCI target abort. */
1840     			if (vortex_debug)
1841     				printk(KERN_ERR "%s: PCI bus error, bus status %8.8x\n", dev->name, bus_status);
1842     
1843     			/* In this case, blow the card away */
1844     			vortex_down(dev);
1845     			wait_for_completion(dev, TotalReset | 0xff);
1846     			vortex_up(dev);		/* AKPM: bug.  vortex_up() assumes that the rx ring is full. It may not be. */
1847     		} else if (fifo_diag & 0x0400)
1848     			do_tx_reset = 1;
1849     		if (fifo_diag & 0x3000) {
1850     			wait_for_completion(dev, RxReset);
1851     			/* Set the Rx filter to the current state. */
1852     			set_rx_mode(dev);
1853     			outw(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */
1854     			outw(AckIntr | HostError, ioaddr + EL3_CMD);
1855     		}
1856     	}
1857     
1858     	if (do_tx_reset) {
1859     		wait_for_completion(dev, TxReset|reset_mask);
1860     		outw(TxEnable, ioaddr + EL3_CMD);
1861     		if (!vp->full_bus_master_tx)
1862     			netif_wake_queue(dev);
1863     	}
1864     }
1865     
1866     static int
1867     vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
1868     {
1869     	struct vortex_private *vp = (struct vortex_private *)dev->priv;
1870     	long ioaddr = dev->base_addr;
1871     
1872     	/* Put out the doubleword header... */
1873     	outl(skb->len, ioaddr + TX_FIFO);
1874     	if (vp->bus_master) {
1875     		/* Set the bus-master controller to transfer the packet. */
1876     		int len = (skb->len + 3) & ~3;
1877     		outl(	vp->tx_skb_dma = pci_map_single(vp->pdev, skb->data, len, PCI_DMA_TODEVICE),
1878     				ioaddr + Wn7_MasterAddr);
1879     		outw(len, ioaddr + Wn7_MasterLen);
1880     		vp->tx_skb = skb;
1881     		outw(StartDMADown, ioaddr + EL3_CMD);
1882     		/* netif_wake_queue() will be called at the DMADone interrupt. */
1883     	} else {
1884     		/* ... and the packet rounded to a doubleword. */
1885     		outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
1886     		dev_kfree_skb (skb);
1887     		if (inw(ioaddr + TxFree) > 1536) {
1888     			netif_start_queue (dev);	/* AKPM: redundant? */
1889     		} else {
1890     			/* Interrupt us when the FIFO has room for max-sized packet. */
1891     			netif_stop_queue(dev);
1892     			outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
1893     		}
1894     	}
1895     
1896     	dev->trans_start = jiffies;
1897     
1898     	/* Clear the Tx status stack. */
1899     	{
1900     		int tx_status;
1901     		int i = 32;
1902     
1903     		while (--i > 0	&&	(tx_status = inb(ioaddr + TxStatus)) > 0) {
1904     			if (tx_status & 0x3C) {		/* A Tx-disabling error occurred.  */
1905     				if (vortex_debug > 2)
1906     				  printk(KERN_DEBUG "%s: Tx error, status %2.2x.\n",
1907     						 dev->name, tx_status);
1908     				if (tx_status & 0x04) vp->stats.tx_fifo_errors++;
1909     				if (tx_status & 0x38) vp->stats.tx_aborted_errors++;
1910     				if (tx_status & 0x30) {
1911     					wait_for_completion(dev, TxReset);
1912     				}
1913     				outw(TxEnable, ioaddr + EL3_CMD);
1914     			}
1915     			outb(0x00, ioaddr + TxStatus); /* Pop the status stack. */
1916     		}
1917     	}
1918     	return 0;
1919     }
1920     
1921     static int
1922     boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
1923     {
1924     	struct vortex_private *vp = (struct vortex_private *)dev->priv;
1925     	long ioaddr = dev->base_addr;
1926     	/* Calculate the next Tx descriptor entry. */
1927     	int entry = vp->cur_tx % TX_RING_SIZE;
1928     	struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
1929     	unsigned long flags;
1930     
1931     	if (vortex_debug > 6) {
1932     		printk(KERN_DEBUG "boomerang_start_xmit()\n");
1933     		if (vortex_debug > 3)
1934     			printk(KERN_DEBUG "%s: Trying to send a packet, Tx index %d.\n",
1935     				   dev->name, vp->cur_tx);
1936     	}
1937     
1938     	if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) {
1939     		if (vortex_debug > 0)
1940     			printk(KERN_WARNING "%s: BUG! Tx Ring full, refusing to send buffer.\n",
1941     				   dev->name);
1942     		netif_stop_queue(dev);
1943     		return 1;
1944     	}
1945     
1946     	vp->tx_skbuff[entry] = skb;
1947     
1948     	vp->tx_ring[entry].next = 0;
1949     #if DO_ZEROCOPY
1950     	if (skb->ip_summed != CHECKSUM_HW)
1951     			vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
1952     	else
1953     			vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum);
1954     
1955     	if (!skb_shinfo(skb)->nr_frags) {
1956     		vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(vp->pdev, skb->data,
1957     										skb->len, PCI_DMA_TODEVICE));
1958     		vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG);
1959     	} else {
1960     		int i;
1961     
1962     		vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(vp->pdev, skb->data,
1963     										skb->len-skb->data_len, PCI_DMA_TODEVICE));
1964     		vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len-skb->data_len);
1965     
1966     		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1967     			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1968     
1969     			vp->tx_ring[entry].frag[i+1].addr =
1970     					cpu_to_le32(pci_map_single(vp->pdev,
1971     											   (void*)page_address(frag->page) + frag->page_offset,
1972     											   frag->size, PCI_DMA_TODEVICE));
1973     
1974     			if (i == skb_shinfo(skb)->nr_frags-1)
1975     					vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size|LAST_FRAG);
1976     			else
1977     					vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size);
1978     		}
1979     	}
1980     #else
1981     	vp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(vp->pdev, skb->data, skb->len, PCI_DMA_TODEVICE));
1982     	vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
1983     	vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
1984     #endif
1985     
1986     	spin_lock_irqsave(&vp->lock, flags);
1987     	/* Wait for the stall to complete. */
1988     	wait_for_completion(dev, DownStall);
1989     	prev_entry->next = cpu_to_le32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc));
1990     	if (inl(ioaddr + DownListPtr) == 0) {
1991     		outl(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc), ioaddr + DownListPtr);
1992     		vp->queued_packet++;
1993     	}
1994     
1995     	vp->cur_tx++;
1996     	if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) {
1997     		netif_stop_queue (dev);
1998     	} else {					/* Clear previous interrupt enable. */
1999     #if defined(tx_interrupt_mitigation)
2000     		/* Dubious. If in boomeang_interrupt "faster" cyclone ifdef
2001     		 * were selected, this would corrupt DN_COMPLETE. No?
2002     		 */
2003     		prev_entry->status &= cpu_to_le32(~TxIntrUploaded);
2004     #endif
2005     	}
2006     	outw(DownUnstall, ioaddr + EL3_CMD);
2007     	spin_unlock_irqrestore(&vp->lock, flags);
2008     	dev->trans_start = jiffies;
2009     	return 0;
2010     }
2011     
2012     /* The interrupt handler does all of the Rx thread work and cleans up
2013        after the Tx thread. */
2014     
2015     /*
2016      * This is the ISR for the vortex series chips.
2017      * full_bus_master_tx == 0 && full_bus_master_rx == 0
2018      */
2019     
2020     static void vortex_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2021     {
2022     	struct net_device *dev = dev_id;
2023     	struct vortex_private *vp = (struct vortex_private *)dev->priv;
2024     	long ioaddr;
2025     	int status;
2026     	int work_done = max_interrupt_work;
2027     	
2028     	ioaddr = dev->base_addr;
2029     	spin_lock(&vp->lock);
2030     
2031     	status = inw(ioaddr + EL3_STATUS);
2032     
2033     	if (vortex_debug > 6)
2034     		printk("vortex_interrupt(). status=0x%4x\n", status);
2035     
2036     	if ((status & IntLatch) == 0)
2037     		goto handler_exit;		/* No interrupt: shared IRQs cause this */
2038     
2039     	if (status & IntReq) {
2040     		status |= vp->deferred;
2041     		vp->deferred = 0;
2042     	}
2043     
2044     	if (status == 0xffff)		/* h/w no longer present (hotplug)? */
2045     		goto handler_exit;
2046     
2047     	if (vortex_debug > 4)
2048     		printk(KERN_DEBUG "%s: interrupt, status %4.4x, latency %d ticks.\n",
2049     			   dev->name, status, inb(ioaddr + Timer));
2050     
2051     	do {
2052     		if (vortex_debug > 5)
2053     				printk(KERN_DEBUG "%s: In interrupt loop, status %4.4x.\n",
2054     					   dev->name, status);
2055     		if (status & RxComplete)
2056     			vortex_rx(dev);
2057     
2058     		if (status & TxAvailable) {
2059     			if (vortex_debug > 5)
2060     				printk(KERN_DEBUG "	TX room bit was handled.\n");
2061     			/* There's room in the FIFO for a full-sized packet. */
2062     			outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
2063     			netif_wake_queue (dev);
2064     		}
2065     
2066     		if (status & DMADone) {
2067     			if (inw(ioaddr + Wn7_MasterStatus) & 0x1000) {
2068     				outw(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
2069     				pci_unmap_single(vp->pdev, vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE);
2070     				dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */
2071     				if (inw(ioaddr + TxFree) > 1536) {
2072     					/*
2073     					 * AKPM: FIXME: I don't think we need this.  If the queue was stopped due to
2074     					 * insufficient FIFO room, the TxAvailable test will succeed and call
2075     					 * netif_wake_queue()
2076     					 */
2077     					netif_wake_queue(dev);
2078     				} else { /* Interrupt when FIFO has room for max-sized packet. */
2079     					outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
2080     					netif_stop_queue(dev);
2081     				}
2082     			}
2083     		}
2084     		/* Check for all uncommon interrupts at once. */
2085     		if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) {
2086     			if (status == 0xffff)
2087     				break;
2088     			vortex_error(dev, status);
2089     		}
2090     
2091     		if (--work_done < 0) {
2092     			printk(KERN_WARNING "%s: Too much work in interrupt, status "
2093     				   "%4.4x.\n", dev->name, status);
2094     			/* Disable all pending interrupts. */
2095     			do {
2096     				vp->deferred |= status;
2097     				outw(SetStatusEnb | (~vp->deferred & vp->status_enable),
2098     					 ioaddr + EL3_CMD);
2099     				outw(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
2100     			} while ((status = inw(ioaddr + EL3_CMD)) & IntLatch);
2101     			/* The timer will reenable interrupts. */
2102     			mod_timer(&vp->timer, jiffies + 1*HZ);
2103     			break;
2104     		}
2105     		/* Acknowledge the IRQ. */
2106     		outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
2107     	} while ((status = inw(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete));
2108     
2109     	if (vortex_debug > 4)
2110     		printk(KERN_DEBUG "%s: exiting interrupt, status %4.4x.\n",
2111     			   dev->name, status);
2112     handler_exit:
2113     	spin_unlock(&vp->lock);
2114     }
2115     
2116     /*
2117      * This is the ISR for the boomerang series chips.
2118      * full_bus_master_tx == 1 && full_bus_master_rx == 1
2119      */
2120     
2121     static void boomerang_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2122     {
2123     	struct net_device *dev = dev_id;
2124     	struct vortex_private *vp = (struct vortex_private *)dev->priv;
2125     	long ioaddr;
2126     	int status;
2127     	int work_done = max_interrupt_work;
2128     
2129     	ioaddr = dev->base_addr;
2130     
2131     	/*
2132     	 * It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout
2133     	 * and boomerang_start_xmit
2134     	 */
2135     	spin_lock(&vp->lock);
2136     
2137     	status = inw(ioaddr + EL3_STATUS);
2138     
2139     	if (vortex_debug > 6)
2140     		printk(KERN_DEBUG "boomerang_interrupt. status=0x%4x\n", status);
2141     
2142     	if ((status & IntLatch) == 0)
2143     		goto handler_exit;		/* No interrupt: shared IRQs can cause this */
2144     
2145     	if (status == 0xffff) {		/* h/w no longer present (hotplug)? */
2146     		if (vortex_debug > 1)
2147     			printk(KERN_DEBUG "boomerang_interrupt(1): status = 0xffff\n");
2148     		goto handler_exit;
2149     	}
2150     
2151     	if (status & IntReq) {
2152     		status |= vp->deferred;
2153     		vp->deferred = 0;
2154     	}
2155     
2156     	if (vortex_debug > 4)
2157     		printk(KERN_DEBUG "%s: interrupt, status %4.4x, latency %d ticks.\n",
2158     			   dev->name, status, inb(ioaddr + Timer));
2159     	do {
2160     		if (vortex_debug > 5)
2161     				printk(KERN_DEBUG "%s: In interrupt loop, status %4.4x.\n",
2162     					   dev->name, status);
2163     		if (status & UpComplete) {
2164     			outw(AckIntr | UpComplete, ioaddr + EL3_CMD);
2165     			if (vortex_debug > 5)
2166     				printk(KERN_DEBUG "boomerang_interrupt->boomerang_rx\n");
2167     			boomerang_rx(dev);
2168     		}
2169     
2170     		if (status & DownComplete) {
2171     			unsigned int dirty_tx = vp->dirty_tx;
2172     
2173     			outw(AckIntr | DownComplete, ioaddr + EL3_CMD);
2174     			while (vp->cur_tx - dirty_tx > 0) {
2175     				int entry = dirty_tx % TX_RING_SIZE;
2176     #if 1	/* AKPM: the latter is faster, but cyclone-only */
2177     				if (inl(ioaddr + DownListPtr) ==
2178     					vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc))
2179     					break;			/* It still hasn't been processed. */
2180     #else
2181     				if ((vp->tx_ring[entry].status & DN_COMPLETE) == 0)
2182     					break;			/* It still hasn't been processed. */
2183     #endif
2184     					
2185     				if (vp->tx_skbuff[entry]) {
2186     					struct sk_buff *skb = vp->tx_skbuff[entry];
2187     #if DO_ZEROCOPY					
2188     					int i;
2189     					for (i=0; i<=skb_shinfo(skb)->nr_frags; i++)
2190     							pci_unmap_single(vp->pdev,
2191     											 le32_to_cpu(vp->tx_ring[entry].frag[i].addr),
2192     											 le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF,
2193     											 PCI_DMA_TODEVICE);
2194     #else
2195     					pci_unmap_single(vp->pdev,
2196     						le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE);
2197     #endif
2198     					dev_kfree_skb_irq(skb);
2199     					vp->tx_skbuff[entry] = 0;
2200     				} else {
2201     					printk(KERN_DEBUG "boomerang_interrupt: no skb!\n");
2202     				}
2203     				/* vp->stats.tx_packets++;  Counted below. */
2204     				dirty_tx++;
2205     			}
2206     			vp->dirty_tx = dirty_tx;
2207     			if (vp->cur_tx - dirty_tx <= TX_RING_SIZE - 1) {
2208     				if (vortex_debug > 6)
2209     					printk(KERN_DEBUG "boomerang_interrupt: wake queue\n");
2210     				netif_wake_queue (dev);
2211     			}
2212     		}
2213     
2214     		/* Check for all uncommon interrupts at once. */
2215     		if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq))
2216     			vortex_error(dev, status);
2217     
2218     		if (--work_done < 0) {
2219     			printk(KERN_WARNING "%s: Too much work in interrupt, status "
2220     				   "%4.4x.\n", dev->name, status);
2221     			/* Disable all pending interrupts. */
2222     			do {
2223     				vp->deferred |= status;
2224     				outw(SetStatusEnb | (~vp->deferred & vp->status_enable),
2225     					 ioaddr + EL3_CMD);
2226     				outw(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
2227     			} while ((status = inw(ioaddr + EL3_CMD)) & IntLatch);
2228     			/* The timer will reenable interrupts. */
2229     			mod_timer(&vp->timer, jiffies + 1*HZ);
2230     			break;
2231     		}
2232     		/* Acknowledge the IRQ. */
2233     		outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
2234     		if (vp->cb_fn_base)			/* The PCMCIA people are idiots.  */
2235     			writel(0x8000, vp->cb_fn_base + 4);
2236     
2237     	} while ((status = inw(ioaddr + EL3_STATUS)) & IntLatch);
2238     
2239     	if (vortex_debug > 4)
2240     		printk(KERN_DEBUG "%s: exiting interrupt, status %4.4x.\n",
2241     			   dev->name, status);
2242     handler_exit:
2243     	spin_unlock(&vp->lock);
2244     }
2245     
2246     static int vortex_rx(struct net_device *dev)
2247     {
2248     	struct vortex_private *vp = (struct vortex_private *)dev->priv;
2249     	long ioaddr = dev->base_addr;
2250     	int i;
2251     	short rx_status;
2252     
2253     	if (vortex_debug > 5)
2254     		printk(KERN_DEBUG "vortex_rx(): status %4.4x, rx_status %4.4x.\n",
2255     			   inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus));
2256     	while ((rx_status = inw(ioaddr + RxStatus)) > 0) {
2257     		if (rx_status & 0x4000) { /* Error, update stats. */
2258     			unsigned char rx_error = inb(ioaddr + RxErrors);
2259     			if (vortex_debug > 2)
2260     				printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error);
2261     			vp->stats.rx_errors++;
2262     			if (rx_error & 0x01)  vp->stats.rx_over_errors++;
2263     			if (rx_error & 0x02)  vp->stats.rx_length_errors++;
2264     			if (rx_error & 0x04)  vp->stats.rx_frame_errors++;
2265     			if (rx_error & 0x08)  vp->stats.rx_crc_errors++;
2266     			if (rx_error & 0x10)  vp->stats.rx_length_errors++;
2267     		} else {
2268     			/* The packet length: up to 4.5K!. */
2269     			int pkt_len = rx_status & 0x1fff;
2270     			struct sk_buff *skb;
2271     
2272     			skb = dev_alloc_skb(pkt_len + 5);
2273     			if (vortex_debug > 4)
2274     				printk(KERN_DEBUG "Receiving packet size %d status %4.4x.\n",
2275     					   pkt_len, rx_status);
2276     			if (skb != NULL) {
2277     				skb->dev = dev;
2278     				skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
2279     				/* 'skb_put()' points to the start of sk_buff data area. */
2280     				if (vp->bus_master &&
2281     					! (inw(ioaddr + Wn7_MasterStatus) & 0x8000)) {
2282     					dma_addr_t dma = pci_map_single(vp->pdev, skb_put(skb, pkt_len),
2283     									   pkt_len, PCI_DMA_FROMDEVICE);
2284     					outl(dma, ioaddr + Wn7_MasterAddr);
2285     					outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
2286     					outw(StartDMAUp, ioaddr + EL3_CMD);
2287     					while (inw(ioaddr + Wn7_MasterStatus) & 0x8000)
2288     						;
2289     					pci_unmap_single(vp->pdev, dma, pkt_len, PCI_DMA_FROMDEVICE);
2290     				} else {
2291     					insl(ioaddr + RX_FIFO, skb_put(skb, pkt_len),
2292     						 (pkt_len + 3) >> 2);
2293     				}
2294     				outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */
2295     				skb->protocol = eth_type_trans(skb, dev);
2296     				netif_rx(skb);
2297     				dev->last_rx = jiffies;
2298     				vp->stats.rx_packets++;
2299     				/* Wait a limited time to go to next packet. */
2300     				for (i = 200; i >= 0; i--)
2301     					if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
2302     						break;
2303     				continue;
2304     			} else if (vortex_debug > 0)
2305     				printk(KERN_NOTICE "%s: No memory to allocate a sk_buff of "
2306     					   "size %d.\n", dev->name, pkt_len);
2307     		}
2308     		vp->stats.rx_dropped++;
2309     		wait_for_completion(dev, RxDiscard);
2310     	}
2311     
2312     	return 0;
2313     }
2314     
2315     static int
2316     boomerang_rx(struct net_device *dev)
2317     {
2318     	struct vortex_private *vp = (struct vortex_private *)dev->priv;
2319     	int entry = vp->cur_rx % RX_RING_SIZE;
2320     	long ioaddr = dev->base_addr;
2321     	int rx_status;
2322     	int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx;
2323     
2324     	if (vortex_debug > 5)
2325     		printk(KERN_DEBUG "boomerang_rx(): status %4.4x\n", inw(ioaddr+EL3_STATUS));
2326     
2327     	while ((rx_status = le32_to_cpu(vp->rx_ring[entry].status)) & RxDComplete){
2328     		if (--rx_work_limit < 0)
2329     			break;
2330     		if (rx_status & RxDError) { /* Error, update stats. */
2331     			unsigned char rx_error = rx_status >> 16;
2332     			if (vortex_debug > 2)
2333     				printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error);
2334     			vp->stats.rx_errors++;
2335     			if (rx_error & 0x01)  vp->stats.rx_over_errors++;
2336     			if (rx_error & 0x02)  vp->stats.rx_length_errors++;
2337     			if (rx_error & 0x04)  vp->stats.rx_frame_errors++;
2338     			if (rx_error & 0x08)  vp->stats.rx_crc_errors++;
2339     			if (rx_error & 0x10)  vp->stats.rx_length_errors++;
2340     		} else {
2341     			/* The packet length: up to 4.5K!. */
2342     			int pkt_len = rx_status & 0x1fff;
2343     			struct sk_buff *skb;
2344     			dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr);
2345     
2346     			if (vortex_debug > 4)
2347     				printk(KERN_DEBUG "Receiving packet size %d status %4.4x.\n",
2348     					   pkt_len, rx_status);
2349     
2350     			/* Check if the packet is long enough to just accept without
2351     			   copying to a properly sized skbuff. */
2352     			if (pkt_len < rx_copybreak && (skb = dev_alloc_skb(pkt_len + 2)) != 0) {
2353     				skb->dev = dev;
2354     				skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
2355     				pci_dma_sync_single(vp->pdev, dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2356     				/* 'skb_put()' points to the start of sk_buff data area. */
2357     				memcpy(skb_put(skb, pkt_len),
2358     					   vp->rx_skbuff[entry]->tail,
2359     					   pkt_len);
2360     				vp->rx_copy++;
2361     			} else {
2362     				/* Pass up the skbuff already on the Rx ring. */
2363     				skb = vp->rx_skbuff[entry];
2364     				vp->rx_skbuff[entry] = NULL;
2365     				skb_put(skb, pkt_len);
2366     				pci_unmap_single(vp->pdev, dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2367     				vp->rx_nocopy++;
2368     			}
2369     			skb->protocol = eth_type_trans(skb, dev);
2370     			{					/* Use hardware checksum info. */
2371     				int csum_bits = rx_status & 0xee000000;
2372     				if (csum_bits &&
2373     					(csum_bits == (IPChksumValid | TCPChksumValid) ||
2374     					 csum_bits == (IPChksumValid | UDPChksumValid))) {
2375     					skb->ip_summed = CHECKSUM_UNNECESSARY;
2376     					vp->rx_csumhits++;
2377     				}
2378     			}
2379     			netif_rx(skb);
2380     			dev->last_rx = jiffies;
2381     			vp->stats.rx_packets++;
2382     		}
2383     		entry = (++vp->cur_rx) % RX_RING_SIZE;
2384     	}
2385     	/* Refill the Rx ring buffers. */
2386     	for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) {
2387     		struct sk_buff *skb;
2388     		entry = vp->dirty_rx % RX_RING_SIZE;
2389     		if (vp->rx_skbuff[entry] == NULL) {
2390     			skb = dev_alloc_skb(PKT_BUF_SZ);
2391     			if (skb == NULL) {
2392     				static unsigned long last_jif;
2393     				if ((jiffies - last_jif) > 10 * HZ) {
2394     					printk(KERN_WARNING "%s: memory shortage\n", dev->name);
2395     					last_jif = jiffies;
2396     				}
2397     				if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE)
2398     					mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1));
2399     				break;			/* Bad news!  */
2400     			}
2401     			skb->dev = dev;			/* Mark as being used by this device. */
2402     			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
2403     			vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(vp->pdev, skb->tail, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
2404     			vp->rx_skbuff[entry] = skb;
2405     		}
2406     		vp->rx_ring[entry].status = 0;	/* Clear complete bit. */
2407     		outw(UpUnstall, ioaddr + EL3_CMD);
2408     	}
2409     	return 0;
2410     }
2411     
2412     /*
2413      * If we've hit a total OOM refilling the Rx ring we poll once a second
2414      * for some memory.  Otherwise there is no way to restart the rx process.
2415      */
2416     static void
2417     rx_oom_timer(unsigned long arg)
2418     {
2419     	struct net_device *dev = (struct net_device *)arg;
2420     	struct vortex_private *vp = (struct vortex_private *)dev->priv;
2421     
2422     	spin_lock_irq(&vp->lock);
2423     	if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE)	/* This test is redundant, but makes me feel good */
2424     		boomerang_rx(dev);
2425     	if (vortex_debug > 1) {
2426     		printk(KERN_DEBUG "%s: rx_oom_timer %s\n", dev->name,
2427     			((vp->cur_rx - vp->dirty_rx) != RX_RING_SIZE) ? "succeeded" : "retrying");
2428     	}
2429     	spin_unlock_irq(&vp->lock);
2430     }
2431     
2432     static void
2433     vortex_down(struct net_device *dev)
2434     {
2435     	struct vortex_private *vp = (struct vortex_private *)dev->priv;
2436     	long ioaddr = dev->base_addr;
2437     
2438     	netif_stop_queue (dev);
2439     
2440     	del_timer_sync(&vp->rx_oom_timer);
2441     	del_timer_sync(&vp->timer);
2442     
2443     	/* Turn off statistics ASAP.  We update vp->stats below. */
2444     	outw(StatsDisable, ioaddr + EL3_CMD);
2445     
2446     	/* Disable the receiver and transmitter. */
2447     	outw(RxDisable, ioaddr + EL3_CMD);
2448     	outw(TxDisable, ioaddr + EL3_CMD);
2449     
2450     	if (dev->if_port == XCVR_10base2)
2451     		/* Turn off thinnet power.  Green! */
2452     		outw(StopCoax, ioaddr + EL3_CMD);
2453     
2454     	outw(SetIntrEnb | 0x0000, ioaddr + EL3_CMD);
2455     
2456     	update_stats(ioaddr, dev);
2457     	if (vp->full_bus_master_rx)
2458     		outl(0, ioaddr + UpListPtr);
2459     	if (vp->full_bus_master_tx)
2460     		outl(0, ioaddr + DownListPtr);
2461     
2462     	if (vp->pdev && vp->enable_wol && (vp->capabilities & CapPwrMgmt))
2463     		acpi_set_WOL(dev);
2464     }
2465     
2466     static int
2467     vortex_close(struct net_device *dev)
2468     {
2469     	struct vortex_private *vp = (struct vortex_private *)dev->priv;
2470     	long ioaddr = dev->base_addr;
2471     	int i;
2472     
2473     	if (netif_device_present(dev))
2474     		vortex_down(dev);
2475     
2476     	if (vortex_debug > 1) {
2477     		printk(KERN_DEBUG"%s: vortex_close() status %4.4x, Tx status %2.2x.\n",
2478     			   dev->name, inw(ioaddr + EL3_STATUS), inb(ioaddr + TxStatus));
2479     		printk(KERN_DEBUG "%s: vortex close stats: rx_nocopy %d rx_copy %d"
2480     			   " tx_queued %d Rx pre-checksummed %d.\n",
2481     			   dev->name, vp->rx_nocopy, vp->rx_copy, vp->queued_packet, vp->rx_csumhits);
2482     	}
2483     
2484     #if DO_ZEROCOPY
2485     	if (	vp->rx_csumhits &&
2486     			((vp->drv_flags & HAS_HWCKSM) == 0) &&
2487     			(hw_checksums[vp->card_idx] == -1)) {
2488     		printk(KERN_WARNING "%s supports hardware checksums, and we're not using them!\n", dev->name);
2489     		printk(KERN_WARNING "Please see http://www.uow.edu.au/~andrewm/zerocopy.html\n");
2490     	}
2491     #endif
2492     		
2493     	free_irq(dev->irq, dev);
2494     
2495     	if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
2496     		for (i = 0; i < RX_RING_SIZE; i++)
2497     			if (vp->rx_skbuff[i]) {
2498     				pci_unmap_single(	vp->pdev, le32_to_cpu(vp->rx_ring[i].addr),
2499     									PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2500     				dev_kfree_skb(vp->rx_skbuff[i]);
2501     				vp->rx_skbuff[i] = 0;
2502     			}
2503     	}
2504     	if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */
2505     		for (i = 0; i < TX_RING_SIZE; i++) {
2506     			if (vp->tx_skbuff[i]) {
2507     				struct sk_buff *skb = vp->tx_skbuff[i];
2508     #if DO_ZEROCOPY
2509     				int k;
2510     
2511     				for (k=0; k<=skb_shinfo(skb)->nr_frags; k++)
2512     						pci_unmap_single(vp->pdev,
2513     										 le32_to_cpu(vp->tx_ring[i].frag[k].addr),
2514     										 le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF,
2515     										 PCI_DMA_TODEVICE);
2516     #else
2517     				pci_unmap_single(vp->pdev, le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE);
2518     #endif
2519     				dev_kfree_skb(skb);
2520     				vp->tx_skbuff[i] = 0;
2521     			}
2522     		}
2523     	}
2524     
2525     	vp->open = 0;
2526     	return 0;
2527     }
2528     
2529     static void
2530     dump_tx_ring(struct net_device *dev)
2531     {
2532     	if (vortex_debug > 0) {
2533     		struct vortex_private *vp = (struct vortex_private *)dev->priv;
2534     		long ioaddr = dev->base_addr;
2535     		
2536     		if (vp->full_bus_master_tx) {
2537     			int i;
2538     			int stalled = inl(ioaddr + PktStatus) & 0x04;	/* Possible racy. But it's only debug stuff */
2539     
2540     			printk(KERN_ERR "  Flags; bus-master %d, dirty %d(%d) current %d(%d)\n",
2541     					vp->full_bus_master_tx,
2542     					vp->dirty_tx, vp->dirty_tx % TX_RING_SIZE,
2543     					vp->cur_tx, vp->cur_tx % TX_RING_SIZE);
2544     			printk(KERN_ERR "  Transmit list %8.8x vs. %p.\n",
2545     				   inl(ioaddr + DownListPtr),
2546     				   &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]);
2547     			wait_for_completion(dev, DownStall);
2548     			for (i = 0; i < TX_RING_SIZE; i++) {
2549     				printk(KERN_ERR "  %d: @%p  length %8.8x status %8.8x\n", i,
2550     					   &vp->tx_ring[i],
2551     #if DO_ZEROCOPY
2552     					   le32_to_cpu(vp->tx_ring[i].frag[0].length),
2553     #else
2554     					   le32_to_cpu(vp->tx_ring[i].length),
2555     #endif
2556     					   le32_to_cpu(vp->tx_ring[i].status));
2557     			}
2558     			if (!stalled)
2559     				outw(DownUnstall, ioaddr + EL3_CMD);
2560     		}
2561     	}
2562     }
2563     
2564     static struct net_device_stats *vortex_get_stats(struct net_device *dev)
2565     {
2566     	struct vortex_private *vp = (struct vortex_private *)dev->priv;
2567     	unsigned long flags;
2568     
2569     	if (netif_device_present(dev)) {	/* AKPM: Used to be netif_running */
2570     		spin_lock_irqsave (&vp->lock, flags);
2571     		update_stats(dev->base_addr, dev);
2572     		spin_unlock_irqrestore (&vp->lock, flags);
2573     	}
2574     	return &vp->stats;
2575     }
2576     
2577     /*  Update statistics.
2578     	Unlike with the EL3 we need not worry about interrupts changing
2579     	the window setting from underneath us, but we must still guard
2580     	against a race condition with a StatsUpdate interrupt updating the
2581     	table.  This is done by checking that the ASM (!) code generated uses
2582     	atomic updates with '+='.
2583     	*/
2584     static void update_stats(long ioaddr, struct net_device *dev)
2585     {
2586     	struct vortex_private *vp = (struct vortex_private *)dev->priv;
2587     	int old_window = inw(ioaddr + EL3_CMD);
2588     
2589     	if (old_window == 0xffff)	/* Chip suspended or ejected. */
2590     		return;
2591     	/* Unlike the 3c5x9 we need not turn off stats updates while reading. */
2592     	/* Switch to the stats window, and read everything. */
2593     	EL3WINDOW(6);
2594     	vp->stats.tx_carrier_errors		+= inb(ioaddr + 0);
2595     	vp->stats.tx_heartbeat_errors	+= inb(ioaddr + 1);
2596     	/* Multiple collisions. */		inb(ioaddr + 2);
2597     	vp->stats.collisions			+= inb(ioaddr + 3);
2598     	vp->stats.tx_window_errors		+= inb(ioaddr + 4);
2599     	vp->stats.rx_fifo_errors		+= inb(ioaddr + 5);
2600     	vp->stats.tx_packets			+= inb(ioaddr + 6);
2601     	vp->stats.tx_packets			+= (inb(ioaddr + 9)&0x30) << 4;
2602     	/* Rx packets	*/				inb(ioaddr + 7);   /* Must read to clear */
2603     	/* Tx deferrals */				inb(ioaddr + 8);
2604     	/* Don't bother with register 9, an extension of registers 6&7.
2605     	   If we do use the 6&7 values the atomic update assumption above
2606     	   is invalid. */
2607     	vp->stats.rx_bytes += inw(ioaddr + 10);
2608     	vp->stats.tx_bytes += inw(ioaddr + 12);
2609     	/* New: On the Vortex we must also clear the BadSSD counter. */
2610     	EL3WINDOW(4);
2611     	inb(ioaddr + 12);
2612     
2613     	{
2614     		u8 up = inb(ioaddr + 13);
2615     		vp->stats.rx_bytes += (up & 0x0f) << 16;
2616     		vp->stats.tx_bytes += (up & 0xf0) << 12;
2617     	}
2618     
2619     	EL3WINDOW(old_window >> 13);
2620     	return;
2621     }
2622     
2623     
2624     static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
2625     {
2626     	struct vortex_private *vp = dev->priv;
2627     	u32 ethcmd;
2628     		
2629     	if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
2630     		return -EFAULT;
2631     
2632             switch (ethcmd) {
2633             case ETHTOOL_GDRVINFO: {
2634     		struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
2635     		strcpy(info.driver, DRV_NAME);
2636     		strcpy(info.version, DRV_VERSION);
2637     		if (vp->pdev)
2638     			strcpy(info.bus_info, vp->pdev->slot_name);
2639     		else
2640     			sprintf(info.bus_info, "EISA 0x%lx %d",
2641     				dev->base_addr, dev->irq);
2642     		if (copy_to_user(useraddr, &info, sizeof(info)))
2643     			return -EFAULT;
2644     		return 0;
2645     	}
2646     
2647             }
2648     	
2649     	return -EOPNOTSUPP;
2650     }
2651     
2652     static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2653     {
2654     	struct vortex_private *vp = (struct vortex_private *)dev->priv;
2655     	long ioaddr = dev->base_addr;
2656     	struct mii_ioctl_data *data = (struct mii_ioctl_data *)&rq->ifr_data;
2657     	int phy = vp->phys[0] & 0x1f;
2658     	int retval;
2659     
2660     	switch(cmd) {
2661     	case SIOCETHTOOL:
2662     		return netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
2663     
2664     	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
2665     	case SIOCDEVPRIVATE:		/* for binary compat, remove in 2.5 */
2666     		data->phy_id = phy;
2667     
2668     	case SIOCGMIIREG:		/* Read MII PHY register. */
2669     	case SIOCDEVPRIVATE+1:		/* for binary compat, remove in 2.5 */
2670     		EL3WINDOW(4);
2671     		data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
2672     		retval = 0;
2673     		break;
2674     
2675     	case SIOCSMIIREG:		/* Write MII PHY register. */
2676     	case SIOCDEVPRIVATE+2:		/* for binary compat, remove in 2.5 */
2677     		if (!capable(CAP_NET_ADMIN)) {
2678     			retval = -EPERM;
2679     		} else {
2680     			EL3WINDOW(4);
2681     			mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
2682     			retval = 0;
2683     		}
2684     		break;
2685     	default:
2686     		retval = -EOPNOTSUPP;
2687     		break;
2688     	}
2689     
2690     	return retval;
2691     }
2692     
2693     /* Pre-Cyclone chips have no documented multicast filter, so the only
2694        multicast setting is to receive all multicast frames.  At least
2695        the chip has a very clean way to set the mode, unlike many others. */
2696     static void set_rx_mode(struct net_device *dev)
2697     {
2698     	long ioaddr = dev->base_addr;
2699     	int new_mode;
2700     
2701     	if (dev->flags & IFF_PROMISC) {
2702     		if (vortex_debug > 0)
2703     			printk(KERN_NOTICE "%s: Setting promiscuous mode.\n", dev->name);
2704     		new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast|RxProm;
2705     	} else	if ((dev->mc_list)  ||  (dev->flags & IFF_ALLMULTI)) {
2706     		new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast;
2707     	} else
2708     		new_mode = SetRxFilter | RxStation | RxBroadcast;
2709     
2710     	outw(new_mode, ioaddr + EL3_CMD);
2711     }
2712     
2713     /* MII transceiver control section.
2714        Read and write the MII registers using software-generated serial
2715        MDIO protocol.  See the MII specifications or DP83840A data sheet
2716        for details. */
2717     
2718     /* The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
2719        met by back-to-back PCI I/O cycles, but we insert a delay to avoid
2720        "overclocking" issues. */
2721     #define mdio_delay() inl(mdio_addr)
2722     
2723     #define MDIO_SHIFT_CLK	0x01
2724     #define MDIO_DIR_WRITE	0x04
2725     #define MDIO_DATA_WRITE0 (0x00 | MDIO_DIR_WRITE)
2726     #define MDIO_DATA_WRITE1 (0x02 | MDIO_DIR_WRITE)
2727     #define MDIO_DATA_READ	0x02
2728     #define MDIO_ENB_IN		0x00
2729     
2730     /* Generate the preamble required for initial synchronization and
2731        a few older transceivers. */
2732     static void mdio_sync(long ioaddr, int bits)
2733     {
2734     	long mdio_addr = ioaddr + Wn4_PhysicalMgmt;
2735     
2736     	/* Establish sync by sending at least 32 logic ones. */
2737     	while (-- bits >= 0) {
2738     		outw(MDIO_DATA_WRITE1, mdio_addr);
2739     		mdio_delay();
2740     		outw(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
2741     		mdio_delay();
2742     	}
2743     }
2744     
2745     static int mdio_read(struct net_device *dev, int phy_id, int location)
2746     {
2747     	struct vortex_private *vp = (struct vortex_private *)dev->priv;
2748     	int i;
2749     	long ioaddr = dev->base_addr;
2750     	int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
2751     	unsigned int retval = 0;
2752     	long mdio_addr = ioaddr + Wn4_PhysicalMgmt;
2753     
2754     	spin_lock_bh(&vp->mdio_lock);
2755     
2756     	if (mii_preamble_required)
2757     		mdio_sync(ioaddr, 32);
2758     
2759     	/* Shift the read command bits out. */
2760     	for (i = 14; i >= 0; i--) {
2761     		int dataval = (read_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
2762     		outw(dataval, mdio_addr);
2763     		mdio_delay();
2764     		outw(dataval | MDIO_SHIFT_CLK, mdio_addr);
2765     		mdio_delay();
2766     	}
2767     	/* Read the two transition, 16 data, and wire-idle bits. */
2768     	for (i = 19; i > 0; i--) {
2769     		outw(MDIO_ENB_IN, mdio_addr);
2770     		mdio_delay();
2771     		retval = (retval << 1) | ((inw(mdio_addr) & MDIO_DATA_READ) ? 1 : 0);
2772     		outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
2773     		mdio_delay();
2774     	}
2775     	spin_unlock_bh(&vp->mdio_lock);
2776     	return retval & 0x20000 ? 0xffff : retval>>1 & 0xffff;
2777     }
2778     
2779     static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
2780     {
2781     	struct vortex_private *vp = (struct vortex_private *)dev->priv;
2782     	long ioaddr = dev->base_addr;
2783     	int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value;
2784     	long mdio_addr = ioaddr + Wn4_PhysicalMgmt;
2785     	int i;
2786     
2787     	spin_lock_bh(&vp->mdio_lock);
2788     
2789     	if (mii_preamble_required)
2790     		mdio_sync(ioaddr, 32);
2791     
2792     	/* Shift the command bits out. */
2793     	for (i = 31; i >= 0; i--) {
2794     		int dataval = (write_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
2795     		outw(dataval, mdio_addr);
2796     		mdio_delay();
2797     		outw(dataval | MDIO_SHIFT_CLK, mdio_addr);
2798     		mdio_delay();
2799     	}
2800     	/* Leave the interface idle. */
2801     	for (i = 1; i >= 0; i--) {
2802     		outw(MDIO_ENB_IN, mdio_addr);
2803     		mdio_delay();
2804     		outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
2805     		mdio_delay();
2806     	}
2807     	spin_unlock_bh(&vp->mdio_lock);
2808     	return;
2809     }
2810     
2811     /* ACPI: Advanced Configuration and Power Interface. */
2812     /* Set Wake-On-LAN mode and put the board into D3 (power-down) state. */
2813     static void acpi_set_WOL(struct net_device *dev)
2814     {
2815     	struct vortex_private *vp = (struct vortex_private *)dev->priv;
2816     	long ioaddr = dev->base_addr;
2817     
2818     	/* Power up on: 1==Downloaded Filter, 2==Magic Packets, 4==Link Status. */
2819     	EL3WINDOW(7);
2820     	outw(2, ioaddr + 0x0c);
2821     	/* The RxFilter must accept the WOL frames. */
2822     	outw(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD);
2823     	outw(RxEnable, ioaddr + EL3_CMD);
2824     	/* Change the power state to D3; RxEnable doesn't take effect. */
2825     	pci_set_power_state(vp->pdev, 0x8103);
2826     }
2827     
2828     
2829     static void __devexit vortex_remove_one (struct pci_dev *pdev)
2830     {
2831     	struct net_device *dev = pci_get_drvdata(pdev);
2832     	struct vortex_private *vp;
2833     
2834     	if (!dev) {
2835     		printk("vortex_remove_one called for EISA device!\n");
2836     		BUG();
2837     	}
2838     
2839     	vp = dev->priv;
2840     
2841     	/* AKPM: FIXME: we should have
2842     	 *	if (vp->cb_fn_base) iounmap(vp->cb_fn_base);
2843     	 * here
2844     	 */
2845     	unregister_netdev(dev);
2846     	/* Should really use wait_for_completion() here */
2847     	outw((vp->drv_flags & EEPROM_NORESET) ? (TotalReset|0x10) : TotalReset, dev->base_addr + EL3_CMD);
2848     	pci_free_consistent(pdev,
2849     						sizeof(struct boom_rx_desc) * RX_RING_SIZE
2850     							+ sizeof(struct boom_tx_desc) * TX_RING_SIZE,
2851     						vp->rx_ring,
2852     						vp->rx_ring_dma);
2853     	if (vp->must_free_region)
2854     		release_region(dev->base_addr, vp->io_size);
2855     	kfree(dev);
2856     }
2857     
2858     
2859     static struct pci_driver vortex_driver = {
2860     	name:		"3c59x",
2861     	probe:		vortex_init_one,
2862     	remove:		vortex_remove_one,
2863     	id_table:	vortex_pci_tbl,
2864     #ifdef CONFIG_PM
2865     	suspend:	vortex_suspend,
2866     	resume:		vortex_resume,
2867     #endif
2868     };
2869     
2870     
2871     static int vortex_have_pci;
2872     static int vortex_have_eisa;
2873     
2874     
2875     static int __init vortex_init (void)
2876     {
2877     	int pci_rc, eisa_rc;
2878     
2879     	pci_rc = pci_module_init(&vortex_driver);
2880     	eisa_rc = vortex_eisa_init();
2881     
2882     	if (pci_rc == 0)
2883     		vortex_have_pci = 1;
2884     	if (eisa_rc > 0)
2885     		vortex_have_eisa = 1;
2886     
2887     	return (vortex_have_pci + vortex_have_eisa) ? 0 : -ENODEV;
2888     }
2889     
2890     
2891     static void __exit vortex_eisa_cleanup (void)
2892     {
2893     	struct net_device *dev, *tmp;
2894     	struct vortex_private *vp;
2895     	long ioaddr;
2896     
2897     	dev = root_vortex_eisa_dev;
2898     
2899     	while (dev) {
2900     		vp = dev->priv;
2901     		ioaddr = dev->base_addr;
2902     
2903     		unregister_netdev (dev);
2904     		outw (TotalReset, ioaddr + EL3_CMD);
2905     		release_region (ioaddr, VORTEX_TOTAL_SIZE);
2906     
2907     		tmp = dev;
2908     		dev = vp->next_module;
2909     
2910     		kfree (tmp);
2911     	}
2912     }
2913     
2914     
2915     static void __exit vortex_cleanup (void)
2916     {
2917     	if (vortex_have_pci)
2918     		pci_unregister_driver (&vortex_driver);
2919     	if (vortex_have_eisa)
2920     		vortex_eisa_cleanup ();
2921     }
2922     
2923     
2924     module_init(vortex_init);
2925     module_exit(vortex_cleanup);
2926     
2927     
2928     /*
2929      * Local variables:
2930      *  c-indent-level: 4
2931      *  c-basic-offset: 4
2932      *  tab-width: 4
2933      * End:
2934      */
2935