Subversion Repositories shark

Rev

Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
442 giacomo 1
/* drivers/net/eepro100.c: An Intel i82557 Ethernet driver for Linux. */
2
/*
3
   NOTICE: this version tested with kernels 1.3.72 and later only!
4
        Written 1996-1998 by Donald Becker.
5
 
6
        This software may be used and distributed according to the terms
7
        of the GNU Public License, incorporated herein by reference.
8
 
9
        This driver is for the Intel EtherExpress Pro 100B boards.
10
        It should work with other i82557 and i82558 boards.
11
        To use a built-in driver, install as drivers/net/eepro100.c.
12
        To use as a module, use the compile-command at the end of the file.
13
 
14
        The author may be reached as becker@CESDIS.usra.edu, or C/O
15
        Center of Excellence in Space Data and Information Sciences
16
           Code 930.5, NASA Goddard Space Flight Center, Greenbelt MD 20771
17
        For updates see
18
                http://cesdis.gsfc.nasa.gov/linux/drivers/eepro100.html
19
        There is also a mailing list based at
20
                linux-eepro100@cesdis.gsfc.nasa.gov
21
*/
22
 
23
static const char *version =
24
"eepro100.c:v1.05 10/16/98 Donald Becker http://cesdis.gsfc.nasa.gov/linux/drivers/eepro100.html\n";
25
 
26
/* A few user-configurable values that apply to all boards.
27
   First set are undocumented and spelled per Intel recommendations. */
28
 
29
static int congenb = 0;         /* Enable congestion control in the DP83840. */
30
static int txfifo = 8;          /* Tx FIFO threshold in 4 byte units, 0-15 */
31
static int rxfifo = 8;          /* Rx FIFO threshold, default 32 bytes. */
32
/* Tx/Rx DMA burst length, 0-127, 0 == no preemption, tx==128 -> disabled. */
33
static int txdmacount = 128;
34
static int rxdmacount = 0;
35
 
36
/* Set the copy breakpoint for the copy-only-tiny-buffer Rx method.
37
   Lower values use more memory, but are faster. */
38
static int rx_copybreak = 200;
39
 
40
/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
41
static int max_interrupt_work = 20;
42
 
43
/* Maximum number of multicast addresses to filter (vs. rx-all-multicast) */
44
static int multicast_filter_limit = 64;
45
 
46
#include <linux/config.h>
47
#ifdef MODULE
48
#ifdef MODVERSIONS
49
#include <linux/modversions.h>
50
#endif
51
#include <linux/module.h>
52
#else
53
#define MOD_INC_USE_COUNT
54
#define MOD_DEC_USE_COUNT
55
#endif
56
 
57
#include <linux/version.h>
58
#include <linux/kernel.h>
59
#include <linux/string.h>
60
#include <linux/timer.h>
61
#include <linux/errno.h>
62
#include <linux/ioport.h>
63
#include <linux/malloc.h>
64
#include <linux/interrupt.h>
65
#include <linux/pci.h>
66
#if LINUX_VERSION_CODE < 0x20155
67
#include <linux/bios32.h>               /* Ignore the bogus warning in 2.1.100+ */
68
#endif
69
#include <asm/bitops.h>
70
#include <asm/io.h>
71
 
72
#include <linux/netdevice.h>
73
#include <linux/etherdevice.h>
74
#include <linux/skbuff.h>
75
#include <linux/delay.h>
76
 
77
/* Unused in the 2.0.* version, but retained for documentation. */
78
#if LINUX_VERSION_CODE > 0x20118  &&  defined(MODULE)
79
MODULE_AUTHOR("Donald Becker <becker@cesdis.gsfc.nasa.gov>");
80
MODULE_DESCRIPTION("Intel i82557/i82558 PCI EtherExpressPro driver");
81
MODULE_PARM(debug, "i");
82
MODULE_PARM(options, "1-" __MODULE_STRING(8) "i");
83
MODULE_PARM(full_duplex, "1-" __MODULE_STRING(8) "i");
84
MODULE_PARM(congenb, "i");
85
MODULE_PARM(txfifo, "i");
86
MODULE_PARM(rxfifo, "i");
87
MODULE_PARM(txdmacount, "i");
88
MODULE_PARM(rxdmacount, "i");
89
MODULE_PARM(rx_copybreak, "i");
90
MODULE_PARM(max_interrupt_work, "i");
91
MODULE_PARM(multicast_filter_limit, "i");
92
#endif
93
 
94
#define RUN_AT(x) (jiffies + (x))
95
 
96
#if (LINUX_VERSION_CODE < 0x20123)
97
#define test_and_set_bit(val, addr) set_bit(val, addr)
98
#endif
99
#if LINUX_VERSION_CODE < 0x20159
100
#define dev_free_skb(skb) dev_kfree_skb(skb, FREE_WRITE);
101
#else
102
#define dev_free_skb(skb) dev_kfree_skb(skb);
103
#endif
104
 
105
/* The total I/O port extent of the board.
106
   The registers beyond 0x18 only exist on the i82558. */
107
#define SPEEDO3_TOTAL_SIZE 0x20
108
 
109
int speedo_debug = 1;
110
 
111
/*
112
                                Theory of Operation
113
 
114
I. Board Compatibility
115
 
116
This device driver is designed for the Intel i82557 "Speedo3" chip, Intel's
117
single-chip fast Ethernet controller for PCI, as used on the Intel
118
EtherExpress Pro 100 adapter.
119
 
120
II. Board-specific settings
121
 
122
PCI bus devices are configured by the system at boot time, so no jumpers
123
need to be set on the board.  The system BIOS should be set to assign the
124
PCI INTA signal to an otherwise unused system IRQ line.  While it's
125
possible to share PCI interrupt lines, it negatively impacts performance and
126
only recent kernels support it.
127
 
128
III. Driver operation
129
 
130
IIIA. General
131
The Speedo3 is very similar to other Intel network chips, that is to say
132
"apparently designed on a different planet".  This chips retains the complex
133
Rx and Tx descriptors and multiple buffers pointers as previous chips, but
134
also has simplified Tx and Rx buffer modes.  This driver uses the "flexible"
135
Tx mode, but in a simplified lower-overhead manner: it associates only a
136
single buffer descriptor with each frame descriptor.
137
 
138
Despite the extra space overhead in each receive skbuff, the driver must use
139
the simplified Rx buffer mode to assure that only a single data buffer is
140
associated with each RxFD. The driver implements this by reserving space
141
for the Rx descriptor at the head of each Rx skbuff.
142
 
143
The Speedo-3 has receive and command unit base addresses that are added to
144
almost all descriptor pointers.  The driver sets these to zero, so that all
145
pointer fields are absolute addresses.
146
 
147
The System Control Block (SCB) of some previous Intel chips exists on the
148
chip in both PCI I/O and memory space.  This driver uses the I/O space
149
registers, but might switch to memory mapped mode to better support non-x86
150
processors.
151
 
152
IIIB. Transmit structure
153
 
154
The driver must use the complex Tx command+descriptor mode in order to
155
have a indirect pointer to the skbuff data section.  Each Tx command block
156
(TxCB) is associated with two immediately appended Tx Buffer Descriptor
157
(TxBD).  A fixed ring of these TxCB+TxBD pairs are kept as part of the
158
speedo_private data structure for each adapter instance.
159
 
160
The newer i82558 explicitly supports this structure, and can read the two
161
TxBDs in the same PCI burst as the TxCB.
162
 
163
This ring structure is used for all normal transmit packets, but the
164
transmit packet descriptors aren't long enough for most non-Tx commands such
165
as CmdConfigure.  This is complicated by the possibility that the chip has
166
already loaded the link address in the previous descriptor.  So for these
167
commands we convert the next free descriptor on the ring to a NoOp, and point
168
that descriptor's link to the complex command.
169
 
170
An additional complexity of these non-transmit commands are that they may be
171
added asynchronous to the normal transmit queue, so we disable interrupts
172
whenever the Tx descriptor ring is manipulated.
173
 
174
A notable aspect of these special configure commands is that they do
175
work with the normal Tx ring entry scavenge method.  The Tx ring scavenge
176
is done at interrupt time using the 'dirty_tx' index, and checking for the
177
command-complete bit.  While the setup frames may have the NoOp command on the
178
Tx ring marked as complete, but not have completed the setup command, this
179
is not a problem.  The tx_ring entry can be still safely reused, as the
180
tx_skbuff[] entry is always empty for config_cmd and mc_setup frames.
181
 
182
Commands may have bits set e.g. CmdSuspend in the command word to either
183
suspend or stop the transmit/command unit.  This driver always flags the last
184
command with CmdSuspend, erases the CmdSuspend in the previous command, and
185
then issues a CU_RESUME.
186
Note: Watch out for the potential race condition here: imagine
187
        erasing the previous suspend
188
                the chip processes the previous command
189
                the chip processes the final command, and suspends
190
        doing the CU_RESUME
191
                the chip processes the next-yet-valid post-final-command.
192
So blindly sending a CU_RESUME is only safe if we do it immediately after
193
after erasing the previous CmdSuspend, without the possibility of an
194
intervening delay.  Thus the resume command is always within the
195
interrupts-disabled region.  This is a timing dependence, but handling this
196
condition in a timing-independent way would considerably complicate the code.
197
 
198
Note: In previous generation Intel chips, restarting the command unit was a
199
notoriously slow process.  This is presumably no longer true.
200
 
201
IIIC. Receive structure
202
 
203
Because of the bus-master support on the Speedo3 this driver uses the new
204
SKBUFF_RX_COPYBREAK scheme, rather than a fixed intermediate receive buffer.
205
This scheme allocates full-sized skbuffs as receive buffers.  The value
206
SKBUFF_RX_COPYBREAK is used as the copying breakpoint: it is chosen to
207
trade-off the memory wasted by passing the full-sized skbuff to the queue
208
layer for all frames vs. the copying cost of copying a frame to a
209
correctly-sized skbuff.
210
 
211
For small frames the copying cost is negligible (esp. considering that we
212
are pre-loading the cache with immediately useful header information), so we
213
allocate a new, minimally-sized skbuff.  For large frames the copying cost
214
is non-trivial, and the larger copy might flush the cache of useful data, so
215
we pass up the skbuff the packet was received into.
216
 
217
IIID. Synchronization
218
The driver runs as two independent, single-threaded flows of control.  One
219
is the send-packet routine, which enforces single-threaded use by the
220
dev->tbusy flag.  The other thread is the interrupt handler, which is single
221
threaded by the hardware and other software.
222
 
223
The send packet thread has partial control over the Tx ring and 'dev->tbusy'
224
flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
225
queue slot is empty, it clears the tbusy flag when finished otherwise it sets
226
the 'sp->tx_full' flag.
227
 
228
The interrupt handler has exclusive control over the Rx ring and records stats
229
from the Tx ring.  (The Tx-done interrupt can't be selectively turned off, so
230
we can't avoid the interrupt overhead by having the Tx routine reap the Tx
231
stats.)  After reaping the stats, it marks the queue entry as empty by setting
232
the 'base' to zero.      Iff the 'sp->tx_full' flag is set, it clears both the
233
tx_full and tbusy flags.
234
 
235
IV. Notes
236
 
237
Thanks to Steve Williams of Intel for arranging the non-disclosure agreement
238
that stated that I could disclose the information.  But I still resent
239
having to sign an Intel NDA when I'm helping Intel sell their own product!
240
 
241
*/
242
 
243
/* A few values that may be tweaked. */
244
/* The ring sizes should be a power of two for efficiency. */
245
#define TX_RING_SIZE    16              /* Effectively 2 entries fewer. */
246
#define RX_RING_SIZE    16
247
/* Size of an pre-allocated Rx buffer: <Ethernet MTU> + slack.*/
248
#define PKT_BUF_SZ              1536
249
 
250
/* Time in jiffies before concluding the transmitter is hung. */
251
#define TX_TIMEOUT  ((800*HZ)/1000)
252
 
253
/* How to wait for the command unit to accept a command.
254
   Typically this takes 0 ticks. */
255
static inline void wait_for_cmd_done(long cmd_ioaddr)
256
{
257
        int wait = 100;
258
        do   ;
259
        while(inb(cmd_ioaddr) && --wait >= 0);
260
}
261
 
262
/* Operational parameter that usually are not changed. */
263
 
264
/* The rest of these values should never change. */
265
 
266
/* Offsets to the various registers.
267
   All accesses need not be longword aligned. */
268
enum speedo_offsets {
269
        SCBStatus = 0, SCBCmd = 2,      /* Rx/Command Unit command and status. */
270
        SCBPointer = 4,                         /* General purpose pointer. */
271
        SCBPort = 8,                            /* Misc. commands and operands.  */
272
        SCBflash = 12, SCBeeprom = 14, /* EEPROM and flash memory control. */
273
        SCBCtrlMDI = 16,                        /* MDI interface control. */
274
        SCBEarlyRx = 20,                        /* Early receive byte count. */
275
};
276
/* Commands that can be put in a command list entry. */
277
enum commands {
278
        CmdNOp = 0, CmdIASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
279
        CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7,
280
        CmdSuspend = 0x4000,            /* Suspend after completion. */
281
        CmdIntr = 0x2000,                       /* Interrupt after completion. */
282
        CmdTxFlex = 0x0008,                     /* Use "Flexible mode" for CmdTx command. */
283
};
284
 
285
/* The SCB accepts the following controls for the Tx and Rx units: */
286
#define  CU_START               0x0010
287
#define  CU_RESUME              0x0020
288
#define  CU_STATSADDR   0x0040
289
#define  CU_SHOWSTATS   0x0050  /* Dump statistics counters. */
290
#define  CU_CMD_BASE    0x0060  /* Base address to add to add CU commands. */
291
#define  CU_DUMPSTATS   0x0070  /* Dump then reset stats counters. */
292
 
293
#define  RX_START       0x0001
294
#define  RX_RESUME      0x0002
295
#define  RX_ABORT       0x0004
296
#define  RX_ADDR_LOAD   0x0006
297
#define  RX_RESUMENR    0x0007
298
#define INT_MASK        0x0100
299
#define DRVR_INT        0x0200          /* Driver generated interrupt. */
300
 
301
/* The Speedo3 Rx and Tx frame/buffer descriptors. */
302
struct descriptor_net {                 /* A generic descriptor. */
303
        s16 status;             /* Offset 0. */
304
        s16 command;            /* Offset 2. */
305
        u32 link;                                       /* struct descriptor *  */
306
        unsigned char params[0];
307
};
308
 
309
/* The Speedo3 Rx and Tx buffer descriptors. */
310
struct RxFD {                                   /* Receive frame descriptor. */
311
        s32 status;
312
        u32 link;                                       /* struct RxFD * */
313
        u32 rx_buf_addr;                        /* void * */
314
        u16 count;
315
        u16 size;
316
};
317
 
318
/* Selected elements of the Tx/RxFD.status word. */
319
enum RxFD_bits {
320
        RxComplete=0x8000, RxOK=0x2000,
321
        RxErrCRC=0x0800, RxErrAlign=0x0400, RxErrTooBig=0x0200, RxErrSymbol=0x0010,
322
        RxEth2Type=0x0020, RxNoMatch=0x0004, RxNoIAMatch=0x0002,
323
        StatusComplete=0x8000,
324
};
325
 
326
struct TxFD {                                   /* Transmit frame descriptor set. */
327
        s32 status;
328
        u32 link;                                       /* void * */
329
        u32 tx_desc_addr;                       /* Always points to the tx_buf_addr element. */
330
        s32 count;                                      /* # of TBD (=1), Tx start thresh., etc. */
331
        /* This constitutes two "TBD" entries -- we only use one. */
332
        u32 tx_buf_addr0;                       /* void *, frame to be transmitted.  */
333
        s32 tx_buf_size0;                       /* Length of Tx frame. */
334
        u32 tx_buf_addr1;                       /* void *, frame to be transmitted.  */
335
        s32 tx_buf_size1;                       /* Length of Tx frame. */
336
};
337
 
338
/* Elements of the dump_statistics block. This block must be lword aligned. */
339
struct speedo_stats {
340
        u32 tx_good_frames;
341
        u32 tx_coll16_errs;
342
        u32 tx_late_colls;
343
        u32 tx_underruns;
344
        u32 tx_lost_carrier;
345
        u32 tx_deferred;
346
        u32 tx_one_colls;
347
        u32 tx_multi_colls;
348
        u32 tx_total_colls;
349
        u32 rx_good_frames;
350
        u32 rx_crc_errs;
351
        u32 rx_align_errs;
352
        u32 rx_resource_errs;
353
        u32 rx_overrun_errs;
354
        u32 rx_colls_errs;
355
        u32 rx_runt_errs;
356
        u32 done_marker;
357
};
358
 
359
struct speedo_private {
360
        char devname[8];                        /* Used only for kernel debugging. */
361
        const char *product_name;
362
        struct device *next_module;
363
        struct TxFD     tx_ring[TX_RING_SIZE];  /* Commands (usually CmdTxPacket). */
364
        /* The saved address of a sent-in-place packet/buffer, for skfree(). */
365
        struct sk_buff* tx_skbuff[TX_RING_SIZE];
366
        struct descriptor_net  *last_cmd;       /* Last command sent. */
367
        /* Rx descriptor ring & addresses of receive-in-place skbuffs. */
368
        struct RxFD *rx_ringp[RX_RING_SIZE];
369
        struct sk_buff* rx_skbuff[RX_RING_SIZE];
370
        struct RxFD *last_rxf;  /* Last command sent. */
371
        struct enet_statistics stats;
372
        struct speedo_stats lstats;
373
        struct timer_list timer;        /* Media selection timer. */
374
        long last_rx_time;                      /* Last Rx, in jiffies, to handle Rx hang. */
375
        unsigned int cur_rx, cur_tx;            /* The next free ring entry */
376
        unsigned int dirty_rx, dirty_tx;        /* The ring entries to be free()ed. */
377
        int mc_setup_frm_len;                           /* The length of an allocated.. */
378
        struct descriptor_net *mc_setup_frm;    /* ..multicast setup frame. */
379
        int mc_setup_busy;                                      /* Avoid double-use of setup frame. */
380
        int in_interrupt;                                       /* Word-aligned dev->interrupt */
381
        char rx_mode;                                           /* Current PROMISC/ALLMULTI setting. */
382
        unsigned int tx_full:1;                         /* The Tx queue is full. */
383
        unsigned int full_duplex:1;                     /* Full-duplex operation requested. */
384
        unsigned int default_port:1;            /* Last dev->if_port value. */
385
        unsigned int rx_bug:1;                          /* Work around receiver hang errata. */
386
        unsigned int rx_bug10:1;                        /* Receiver might hang at 10mbps. */
387
        unsigned int rx_bug100:1;                       /* Receiver might hang at 100mbps. */
388
        unsigned short phy[2];                          /* PHY media interfaces available. */
389
};
390
 
391
/* The parameters for a CmdConfigure operation.
392
   There are so many options that it would be difficult to document each bit.
393
   We mostly use the default or recommended settings. */
394
const char i82557_config_cmd[22] = {
395
        22, 0x08, 0, 0,  0, 0x80, 0x32, 0x03,  1, /* 1=Use MII  0=Use AUI */
396
        0, 0x2E, 0,  0x60, 0,
397
        0xf2, 0x48,   0, 0x40, 0xf2, 0x80,              /* 0x40=Force full-duplex */
398
        0x3f, 0x05, };
399
const char i82558_config_cmd[22] = {
400
        22, 0x08, 0, 1,  0, 0x80, 0x22, 0x03,  1, /* 1=Use MII  0=Use AUI */
401
        0, 0x2E, 0,  0x60, 0x08, 0x88,
402
        0x68, 0, 0x40, 0xf2, 0xBD,              /* 0xBD->0xFD=Force full-duplex */
403
        0x31, 0x05, };
404
 
405
/* PHY media interface chips. */
406
static const char *phys[] = {
407
        "None", "i82553-A/B", "i82553-C", "i82503",
408
        "DP83840", "80c240", "80c24", "i82555",
409
        "unknown-8", "unknown-9", "DP83840A", "unknown-11",
410
        "unknown-12", "unknown-13", "unknown-14", "unknown-15", };
411
enum phy_chips { NonSuchPhy=0, I82553AB, I82553C, I82503, DP83840, S80C240,
412
                                         S80C24, I82555, DP83840A=10, };
413
static const char is_mii[] = { 0, 1, 1, 0, 1, 1, 0, 1 };
414
 
415
static void speedo_found1(struct device *dev, long ioaddr, int irq,
416
                                                  int card_idx);
417
 
418
static int read_eeprom(long ioaddr, int location, int addr_len);
419
static int mdio_read(long ioaddr, int phy_id, int location);
420
static int mdio_write(long ioaddr, int phy_id, int location, int value);
421
static int speedo_open(struct device *dev);
422
static void speedo_timer(unsigned long data);
423
static void speedo_init_rx_ring(struct device *dev);
424
static int speedo_start_xmit(struct sk_buff *skb, struct device *dev);
425
static int speedo_rx(struct device *dev);
426
static void speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
427
static int speedo_close(struct device *dev);
428
static struct enet_statistics *speedo_get_stats(struct device *dev);
429
static int speedo_ioctl(struct device *dev, struct ifreq *rq, int cmd);
430
static void set_rx_mode(struct device *dev);
431
 
432
 
433
 
434
/* The parameters that may be passed in... */
435
/* 'options' is used to pass a transceiver override or full-duplex flag
436
   e.g. "options=16" for FD, "options=32" for 100mbps-only. */
437
static int full_duplex[] = {-1, -1, -1, -1, -1, -1, -1, -1};
438
static int options[] = {-1, -1, -1, -1, -1, -1, -1, -1};
439
#ifdef MODULE
440
static int debug = -1;                  /* The debug level */
441
#endif
442
 
443
#ifdef honor_default_port
444
/* Optional driver feature to allow forcing the transceiver setting.
445
   Not recommended. */
446
static int mii_ctrl[8] = { 0x3300, 0x3100, 0x0000, 0x0100,
447
                                                   0x2000, 0x2100, 0x0400, 0x3100};
448
#endif
449
 
450
/* A list of all installed Speedo devices, for removing the driver module. */
451
static struct device *root_speedo_dev = NULL;
452
 
453
int eepro100_init(struct device *dev)
454
{
455
        int cards_found = 0;
456
        static int pci_index = 0;
457
        unsigned char pci_bus, pci_device_fn;
458
 
459
        if (! pcibios_present())
460
                return cards_found;
461
 
462
        for (; pci_index < 8; pci_index++) {
463
                unsigned char pci_bus, pci_device_fn, pci_latency;
464
                long ioaddr;
465
                int irq;
466
 
467
                u16 pci_command, new_command;
468
 
469
                printk("Finding Device\n");
470
 
471
                if (pcibios_find_class (PCI_CLASS_NETWORK_ETHERNET << 8, pci_index,
472
                                                                &pci_bus, &pci_device_fn)
473
                        != PCIBIOS_SUCCESSFUL)
474
                        break;
475
 
476
 
477
#if defined(PCI_SUPPORT_VER2)                   
478
                {
479
                        struct pci_dev *pdev = pci_find_slot(pci_bus, pci_device_fn);
480
                        ioaddr = pdev->base_address[1];         /* Use [0] to mem-map */
481
                        irq = pdev->irq;
482
                        printk("Device 1 %d %d\n",ioaddr,irq);
483
                }
484
#else
485
                {
486
                        u32 pci_ioaddr;
487
                        u8 pci_irq_line;
488
                        pcibios_read_config_byte(pci_bus, pci_device_fn,
489
                                                                         PCI_INTERRUPT_LINE, &pci_irq_line);
490
                        /* Note: BASE_ADDRESS_0 is for memory-mapping the registers. */
491
                        pcibios_read_config_dword(pci_bus, pci_device_fn,
492
                                                                          PCI_BASE_ADDRESS_1, &pci_ioaddr);
493
                        ioaddr = pci_ioaddr;
494
                        irq = pci_irq_line;
495
                        printk("Device 2 %d %d\n",ioaddr,irq);
496
                }
497
#endif
498
                /* Remove I/O space marker in bit 0. */
499
                ioaddr &= ~3;
500
                if (speedo_debug > 2)
501
                        printk("Found Intel i82557 PCI Speedo at I/O %#lx, IRQ %d.\n",
502
                                   ioaddr, irq);
503
 
504
                /* Get and check the bus-master and latency values. */
505
                pcibios_read_config_word(pci_bus, pci_device_fn,
506
                                                                 PCI_COMMAND, &pci_command);
507
                new_command = pci_command | PCI_COMMAND_MASTER|PCI_COMMAND_IO;
508
                if (pci_command != new_command) {
509
                        printk(KERN_INFO "  The PCI BIOS has not enabled this"
510
                                   " device!  Updating PCI command %4.4x->%4.4x.\n",
511
                                   pci_command, new_command);
512
                        pcibios_write_config_word(pci_bus, pci_device_fn,
513
                                                                          PCI_COMMAND, new_command);
514
                }
515
                pcibios_read_config_byte(pci_bus, pci_device_fn,
516
                                                                 PCI_LATENCY_TIMER, &pci_latency);
517
                if (pci_latency < 32) {
518
                        printk("  PCI latency timer (CFLT) is unreasonably low at %d."
519
                                   "  Setting to 32 clocks.\n", pci_latency);
520
                        pcibios_write_config_byte(pci_bus, pci_device_fn,
521
                                                                          PCI_LATENCY_TIMER, 32);
522
                } else if (speedo_debug > 1)
523
                        printk("  PCI latency timer (CFLT) is %#x.\n", pci_latency);
524
 
525
                speedo_found1(dev, ioaddr, irq, cards_found);
526
                dev = NULL;
527
                cards_found++;
528
        }
529
 
530
        return cards_found;
531
}
532
 
533
static void speedo_found1(struct device *dev, long ioaddr, int irq,
534
                                                  int card_idx)
535
{
536
        static int did_version = 0;                     /* Already printed version info. */
537
        struct speedo_private *sp;
538
        char *product;
539
        int i, option;
540
        u16 eeprom[0x40];
541
 
542
        if (speedo_debug > 0  &&  did_version++ == 0)
543
                printk(version);
544
 
545
        dev = init_etherdev(dev, sizeof(struct speedo_private));
546
 
547
        if (dev->mem_start > 0)
548
                option = dev->mem_start;
549
        else if (card_idx >= 0  &&  options[card_idx] >= 0)
550
                option = options[card_idx];
551
        else
552
                option = 0;
553
 
554
        /* Read the station address EEPROM before doing the reset.
555
           Perhaps this should even be done before accepting the device,
556
           then we wouldn't have a device name with which to report the error. */
557
        {
558
                u16 sum = 0;
559
                int j;
560
                int addr_len = read_eeprom(ioaddr, 0, 6) == 0xffff ? 8 : 6;
561
 
562
                for (j = 0, i = 0; i < 0x40; i++) {
563
                        u16 value = read_eeprom(ioaddr, i, addr_len);
564
                        eeprom[i] = value;
565
                        sum += value;
566
                        if (i < 3) {
567
                                dev->dev_addr[j++] = value;
568
                                dev->dev_addr[j++] = value >> 8;
569
                        }
570
                }
571
                if (sum != 0xBABA)
572
                        printk(KERN_WARNING "%s: Invalid EEPROM checksum %#4.4x, "
573
                                   "check settings before activating this device!\n",
574
                                   dev->name, sum);
575
                /* Don't  unregister_netdev(dev);  as the EEPro may actually be
576
                   usable, especially if the MAC address is set later. */
577
        }
578
 
579
        /* Reset the chip: stop Tx and Rx processes and clear counters.
580
           This takes less than 10usec and will easily finish before the next
581
           action. */
582
        outl(0, ioaddr + SCBPort);
583
 
584
        if (eeprom[3] & 0x0100)
585
                product = "OEM i82557/i82558 10/100 Ethernet";
586
        else
587
                product = "Intel EtherExpress Pro 10/100";
588
 
589
        printk(KERN_INFO "%s: %s at %#3lx, ", dev->name, product, ioaddr);
590
 
591
        for (i = 0; i < 5; i++)
592
                printk("%2.2X:", dev->dev_addr[i]);
593
        printk("%2.2X, IRQ %d.\n", dev->dev_addr[i], irq);
594
 
595
#ifndef kernel_bloat
596
        /* OK, this is pure kernel bloat.  I don't like it when other drivers
597
           waste non-pageable kernel space to emit similar messages, but I need
598
           them for bug reports. */
599
        {
600
                const char *connectors[] = {" RJ45", " BNC", " AUI", " MII"};
601
                /* The self-test results must be paragraph aligned. */
602
                s32 str[6], *volatile self_test_results;
603
                int boguscnt = 16000;   /* Timeout for set-test. */
604
                if (eeprom[3] & 0x03)
605
                        printk(KERN_INFO "  Receiver lock-up bug exists -- enabling"
606
                                   " work-around.\n");
607
                printk(KERN_INFO "  Board assembly %4.4x%2.2x-%3.3d, Physical"
608
                           " connectors present:",
609
                           eeprom[8], eeprom[9]>>8, eeprom[9] & 0xff);
610
                for (i = 0; i < 4; i++)
611
                        if (eeprom[5] & (1<<i))
612
                                printk(connectors[i]);
613
                printk("\n"KERN_INFO"  Primary interface chip %s PHY #%d.\n",
614
                           phys[(eeprom[6]>>8)&15], eeprom[6] & 0x1f);
615
                if (eeprom[7] & 0x0700)
616
                        printk(KERN_INFO "    Secondary interface chip %s.\n",
617
                                   phys[(eeprom[7]>>8)&7]);
618
                if (((eeprom[6]>>8) & 0x3f) == DP83840
619
                        ||  ((eeprom[6]>>8) & 0x3f) == DP83840A) {
620
                        int mdi_reg23 = mdio_read(ioaddr, eeprom[6] & 0x1f, 23) | 0x0422;
621
                        if (congenb)
622
                          mdi_reg23 |= 0x0100;
623
                        printk(KERN_INFO"  DP83840 specific setup, setting register 23 to %4.4x.\n",
624
                                   mdi_reg23);
625
                        mdio_write(ioaddr, eeprom[6] & 0x1f, 23, mdi_reg23);
626
                }
627
                if ((option >= 0) && (option & 0x70)) {
628
                        printk(KERN_INFO "  Forcing %dMbs %s-duplex operation.\n",
629
                                   (option & 0x20 ? 100 : 10),
630
                                   (option & 0x10 ? "full" : "half"));
631
                        mdio_write(ioaddr, eeprom[6] & 0x1f, 0,
632
                                           ((option & 0x20) ? 0x2000 : 0) |     /* 100mbps? */
633
                                           ((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */
634
                }
635
 
636
                /* Perform a system self-test. */
637
                self_test_results = (s32*) ((((long) str) + 15) & ~0xf);
638
                self_test_results[0] = 0;
639
                self_test_results[1] = -1;
640
                outl(virt_to_bus(self_test_results) | 1, ioaddr + SCBPort);
641
                do {
642
                        udelay(10);
643
                } while (self_test_results[1] == -1  &&  --boguscnt >= 0);
644
 
645
                if (boguscnt < 0) {             /* Test optimized out. */
646
                        printk(KERN_ERR "Self test failed, status %8.8x:\n"
647
                                   KERN_ERR " Failure to initialize the i82557.\n"
648
                                   KERN_ERR " Verify that the card is a bus-master"
649
                                   " capable slot.\n",
650
                                   self_test_results[1]);
651
                } else
652
                        printk(KERN_INFO "  General self-test: %s.\n"
653
                                   KERN_INFO "  Serial sub-system self-test: %s.\n"
654
                                   KERN_INFO "  Internal registers self-test: %s.\n"
655
                                   KERN_INFO "  ROM checksum self-test: %s (%#8.8x).\n",
656
                                   self_test_results[1] & 0x1000 ? "failed" : "passed",
657
                                   self_test_results[1] & 0x0020 ? "failed" : "passed",
658
                                   self_test_results[1] & 0x0008 ? "failed" : "passed",
659
                                   self_test_results[1] & 0x0004 ? "failed" : "passed",
660
                                   self_test_results[0]);
661
        }
662
#endif  /* kernel_bloat */
663
 
664
        outl(0, ioaddr + SCBPort);
665
 
666
        /* We do a request_region() only to register /proc/ioports info. */
667
        request_region(ioaddr, SPEEDO3_TOTAL_SIZE, "Intel Speedo3 Ethernet");
668
 
669
        dev->base_addr = ioaddr;
670
        dev->irq = irq;
671
 
672
        if (dev->priv == NULL)
673
                dev->priv = kmalloc(sizeof(*sp), GFP_KERNEL);
674
        sp = dev->priv;
675
        memset(sp, 0, sizeof(*sp));
676
        sp->next_module = root_speedo_dev;
677
        root_speedo_dev = dev;
678
 
679
        sp->full_duplex = option >= 0 && (option & 0x10) ? 1 : 0;
680
        if (card_idx >= 0) {
681
                if (full_duplex[card_idx] >= 0)
682
                        sp->full_duplex = full_duplex[card_idx];
683
        }
684
        sp->default_port = option >= 0 ? (option & 0x0f) : 0;
685
 
686
        sp->phy[0] = eeprom[6];
687
        sp->phy[1] = eeprom[7];
688
        sp->rx_bug = (eeprom[3] & 0x03) == 3 ? 0 : 1;
689
 
690
        if (sp->rx_bug)
691
                printk(KERN_INFO "  Receiver lock-up workaround activated.\n");
692
 
693
        /* The Speedo-specific entries in the device structure. */
694
        dev->open = &speedo_open;
695
        dev->hard_start_xmit = &speedo_start_xmit;
696
        dev->stop = &speedo_close;
697
        dev->get_stats = &speedo_get_stats;
698
        dev->set_multicast_list = &set_rx_mode;
699
        dev->do_ioctl = &speedo_ioctl;
700
 
701
        return;
702
}
703
 
704
/* Serial EEPROM section.
705
   A "bit" grungy, but we work our way through bit-by-bit :->. */
706
/*  EEPROM_Ctrl bits. */
707
#define EE_SHIFT_CLK    0x01    /* EEPROM shift clock. */
708
#define EE_CS                   0x02    /* EEPROM chip select. */
709
#define EE_DATA_WRITE   0x04    /* EEPROM chip data in. */
710
#define EE_WRITE_0              0x01
711
#define EE_WRITE_1              0x05
712
#define EE_DATA_READ    0x08    /* EEPROM chip data out. */
713
#define EE_ENB                  (0x4800 | EE_CS)
714
 
715
/* Delay between EEPROM clock transitions.
716
   This will actually work with no delay on 33Mhz PCI.  */
717
#define eeprom_delay(nanosec)           udelay(1);
718
 
719
/* The EEPROM commands include the alway-set leading bit. */
720
#define EE_WRITE_CMD    (5 << addr_len)
721
#define EE_READ_CMD             (6 << addr_len)
722
#define EE_ERASE_CMD    (7 << addr_len)
723
 
724
static int read_eeprom(long ioaddr, int location, int addr_len)
725
{
726
        unsigned short retval = 0;
727
        int ee_addr = ioaddr + SCBeeprom;
728
        int read_cmd = location | EE_READ_CMD;
729
        int i;
730
 
731
        outw(EE_ENB & ~EE_CS, ee_addr);
732
        outw(EE_ENB, ee_addr);
733
 
734
        /* Shift the read command bits out. */
735
        for (i = 12; i >= 0; i--) {
736
                short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
737
                outw(EE_ENB | dataval, ee_addr);
738
                eeprom_delay(100);
739
                outw(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
740
                eeprom_delay(150);
741
        }
742
        outw(EE_ENB, ee_addr);
743
 
744
        for (i = 15; i >= 0; i--) {
745
                outw(EE_ENB | EE_SHIFT_CLK, ee_addr);
746
                eeprom_delay(100);
747
                retval = (retval << 1) | ((inw(ee_addr) & EE_DATA_READ) ? 1 : 0);
748
                outw(EE_ENB, ee_addr);
749
                eeprom_delay(100);
750
        }
751
 
752
        /* Terminate the EEPROM access. */
753
        outw(EE_ENB & ~EE_CS, ee_addr);
754
        return retval;
755
}
756
 
757
static int mdio_read(long ioaddr, int phy_id, int location)
758
{
759
        int val, boguscnt = 64*10;              /* <64 usec. to complete, typ 27 ticks */
760
        outl(0x08000000 | (location<<16) | (phy_id<<21), ioaddr + SCBCtrlMDI);
761
        do {
762
                val = inl(ioaddr + SCBCtrlMDI);
763
                if (--boguscnt < 0) {
764
                        printk(KERN_ERR " mdio_read() timed out with val = %8.8x.\n", val);
765
                }
766
        } while (! (val & 0x10000000));
767
        return val & 0xffff;
768
}
769
 
770
static int mdio_write(long ioaddr, int phy_id, int location, int value)
771
{
772
        int val, boguscnt = 64*10;              /* <64 usec. to complete, typ 27 ticks */
773
        outl(0x04000000 | (location<<16) | (phy_id<<21) | value,
774
                 ioaddr + SCBCtrlMDI);
775
        do {
776
                val = inl(ioaddr + SCBCtrlMDI);
777
                if (--boguscnt < 0) {
778
                        printk(KERN_ERR" mdio_write() timed out with val = %8.8x.\n", val);
779
                }
780
        } while (! (val & 0x10000000));
781
        return val & 0xffff;
782
}
783
 
784
 
785
static int
786
speedo_open(struct device *dev)
787
{
788
        struct speedo_private *sp = (struct speedo_private *)dev->priv;
789
        long ioaddr = dev->base_addr;
790
 
791
#ifdef notdef
792
        /* We could reset the chip, but should not need to. */
793
        outl(0, ioaddr + SCBPort);
794
        udelay(10);
795
#endif
796
 
797
        if (request_irq(dev->irq, &speedo_interrupt, SA_SHIRQ,
798
                                        "Intel EtherExpress Pro 10/100 Ethernet", dev)) {
799
                return -EAGAIN;
800
        }
801
        if (speedo_debug > 1)
802
                printk(KERN_DEBUG "%s: speedo_open() irq %d.\n", dev->name, dev->irq);
803
 
804
        MOD_INC_USE_COUNT;
805
 
806
        /* Retrigger negotiation to reset previous errors. */
807
        if ((sp->phy[0] & 0x8000) == 0) {
808
                int phy_addr = sp->phy[0] & 0x1f ;
809
                /* Use 0x3300 for restarting NWay, other values to force xcvr:
810
                   0x0000 10-HD
811
                   0x0100 10-FD
812
                   0x2000 100-HD
813
                   0x2100 100-FD
814
                */
815
#ifdef honor_default_port
816
                mdio_write(ioaddr, phy_addr, 0, mii_ctrl[dev->default_port & 7]);
817
#else
818
                mdio_write(ioaddr, phy_addr, 0, 0x3300);
819
#endif
820
        }
821
 
822
        /* Load the statistics block address. */
823
        wait_for_cmd_done(ioaddr + SCBCmd);
824
        outl(virt_to_bus(&sp->lstats), ioaddr + SCBPointer);
825
        outw(INT_MASK | CU_STATSADDR, ioaddr + SCBCmd);
826
        sp->lstats.done_marker = 0;
827
 
828
        speedo_init_rx_ring(dev);
829
        wait_for_cmd_done(ioaddr + SCBCmd);
830
        outl(0, ioaddr + SCBPointer);
831
        outw(INT_MASK | RX_ADDR_LOAD, ioaddr + SCBCmd);
832
 
833
        /* Todo: verify that we must wait for previous command completion. */
834
        wait_for_cmd_done(ioaddr + SCBCmd);
835
        outl(virt_to_bus(sp->rx_ringp[0]), ioaddr + SCBPointer);
836
        outw(INT_MASK | RX_START, ioaddr + SCBCmd);
837
 
838
        /* Fill the first command with our physical address. */
839
        {
840
                u16 *eaddrs = (u16 *)dev->dev_addr;
841
                u16 *setup_frm = (u16 *)&(sp->tx_ring[0].tx_desc_addr);
842
 
843
                /* Avoid a bug(?!) here by marking the command already completed. */
844
                sp->tx_ring[0].status = ((CmdSuspend | CmdIASetup) << 16) | 0xa000;
845
                sp->tx_ring[0].link = virt_to_bus(&(sp->tx_ring[1]));
846
                *setup_frm++ = eaddrs[0];
847
                *setup_frm++ = eaddrs[1];
848
                *setup_frm++ = eaddrs[2];
849
        }
850
        sp->last_cmd = (struct descriptor_net *)&sp->tx_ring[0];
851
        sp->cur_tx = 1;
852
        sp->dirty_tx = 0;
853
        sp->tx_full = 0;
854
 
855
        wait_for_cmd_done(ioaddr + SCBCmd);
856
        outl(0, ioaddr + SCBPointer);
857
        outw(INT_MASK | CU_CMD_BASE, ioaddr + SCBCmd);
858
 
859
        dev->if_port = sp->default_port;
860
 
861
        sp->in_interrupt = 0;
862
        dev->tbusy = 0;
863
        dev->interrupt = 0;
864
        dev->start = 1;
865
 
866
        /* Start the chip's Tx process and unmask interrupts. */
867
        /* Todo: verify that we must wait for previous command completion. */
868
        wait_for_cmd_done(ioaddr + SCBCmd);
869
        outl(virt_to_bus(&sp->tx_ring[0]), ioaddr + SCBPointer);
870
        outw(CU_START, ioaddr + SCBCmd);
871
 
872
        /* Setup the chip and configure the multicast list. */
873
        sp->mc_setup_frm = NULL;
874
        sp->mc_setup_frm_len = 0;
875
        sp->mc_setup_busy = 0;
876
        sp->rx_mode = -1;                       /* Invalid -> always reset the mode. */
877
        set_rx_mode(dev);
878
 
879
        if (speedo_debug > 2) {
880
                printk(KERN_DEBUG "%s: Done speedo_open(), status %8.8x.\n",
881
                           dev->name, inw(ioaddr + SCBStatus));
882
        }
883
        /* Set the timer.  The timer serves a dual purpose:
884
           1) to monitor the media interface (e.g. link beat) and perhaps switch
885
           to an alternate media type
886
           2) to monitor Rx activity, and restart the Rx process if the receiver
887
           hangs. */
888
        init_timer(&sp->timer);
889
        sp->timer.expires = RUN_AT((24*HZ)/10);                         /* 2.4 sec. */
890
        sp->timer.data = (unsigned long)dev;
891
        sp->timer.function = &speedo_timer;                                     /* timer handler */
892
        add_timer(&sp->timer);
893
 
894
        wait_for_cmd_done(ioaddr + SCBCmd);
895
        outw(CU_DUMPSTATS, ioaddr + SCBCmd);
896
        /* No need to wait for the command unit to accept here. */
897
        if ((sp->phy[0] & 0x8000) == 0)
898
                mdio_read(ioaddr, sp->phy[0] & 0x1f, 0);
899
        return 0;
900
}
901
 
902
/* Media monitoring and control. */
903
static void speedo_timer(unsigned long data)
904
{
905
        struct device *dev = (struct device *)data;
906
        struct speedo_private *sp = (struct speedo_private *)dev->priv;
907
 
908
        if (speedo_debug > 3) {
909
                long ioaddr = dev->base_addr;
910
                printk(KERN_DEBUG "%s: Media control tick, status %4.4x.\n",
911
                           dev->name, inw(ioaddr + SCBStatus));
912
        }
913
        if (sp->rx_mode < 0  ||
914
                (sp->rx_bug  && jiffies - sp->last_rx_time > 2*HZ)) {
915
                /* We haven't received a packet in a Long Time.  We might have been
916
                   bitten by the receiver hang bug.  This can be cleared by sending
917
                   a set multicast list command. */
918
                set_rx_mode(dev);
919
        }
920
        /* We must continue to monitor the media. */
921
        sp->timer.expires = RUN_AT(2*HZ);                       /* 2.0 sec. */
922
        add_timer(&sp->timer);
923
}
924
 
925
/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
926
static void
927
speedo_init_rx_ring(struct device *dev)
928
{
929
        struct speedo_private *sp = (struct speedo_private *)dev->priv;
930
        struct RxFD *rxf, *last_rxf = NULL;
931
        int i;
932
 
933
        sp->cur_rx = 0;
934
 
935
        for (i = 0; i < RX_RING_SIZE; i++) {
936
                struct sk_buff *skb;
937
                skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
938
                sp->rx_skbuff[i] = skb;
939
                if (skb == NULL)
940
                        break;                  /* OK.  Just initially short of Rx bufs. */
941
                skb->dev = dev;                 /* Mark as being used by this device. */
942
                rxf = (struct RxFD *)skb->tail;
943
                sp->rx_ringp[i] = rxf;
944
                skb_reserve(skb, sizeof(struct RxFD));
945
                if (last_rxf)
946
                        last_rxf->link = virt_to_bus(rxf);
947
                last_rxf = rxf;
948
                rxf->status = 0x00000001;                       /* '1' is flag value only. */
949
                rxf->link = 0;                                          /* None yet. */
950
                /* This field unused by i82557, we use it as a consistency check. */
951
#ifdef final_version
952
                rxf->rx_buf_addr = 0xffffffff;
953
#else
954
                rxf->rx_buf_addr = virt_to_bus(skb->tail);
955
#endif
956
                rxf->count = 0;
957
                rxf->size = PKT_BUF_SZ;
958
        }
959
        sp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
960
        /* Mark the last entry as end-of-list. */
961
        last_rxf->status = 0xC0000002;                  /* '2' is flag value only. */
962
        sp->last_rxf = last_rxf;
963
}
964
 
965
static void speedo_tx_timeout(struct device *dev)
966
{
967
        struct speedo_private *sp = (struct speedo_private *)dev->priv;
968
        long ioaddr = dev->base_addr;
969
 
970
        printk(KERN_WARNING "%s: Transmit timed out: status %4.4x "
971
                   " %4.4x at %d/%d command %8.8x.\n",
972
                   dev->name, inw(ioaddr + SCBStatus), inw(ioaddr + SCBCmd),
973
                   sp->dirty_tx, sp->cur_tx,
974
                   sp->tx_ring[sp->dirty_tx % TX_RING_SIZE].status);
975
        if ((inw(ioaddr + SCBStatus) & 0x00C0) != 0x0080) {
976
                printk(KERN_WARNING "%s: Trying to restart the transmitter...\n",
977
                           dev->name);
978
                outl(virt_to_bus(&sp->tx_ring[sp->dirty_tx % TX_RING_SIZE]),
979
                         ioaddr + SCBPointer);
980
                outw(CU_START, ioaddr + SCBCmd);
981
        } else {
982
                outw(DRVR_INT, ioaddr + SCBCmd);
983
        }
984
        /* Reset the MII transceiver, suggested by Fred Young @ scalable.com. */
985
        if ((sp->phy[0] & 0x8000) == 0) {
986
                int phy_addr = sp->phy[0] & 0x1f;
987
                mdio_write(ioaddr, phy_addr, 0, 0x0400);
988
                mdio_write(ioaddr, phy_addr, 1, 0x0000);
989
                mdio_write(ioaddr, phy_addr, 4, 0x0000);
990
                mdio_write(ioaddr, phy_addr, 0, 0x8000);
991
#ifdef honor_default_port
992
                mdio_write(ioaddr, phy_addr, 0, mii_ctrl[dev->default_port & 7]);
993
#endif
994
        }
995
        sp->stats.tx_errors++;
996
        dev->trans_start = jiffies;
997
        return;
998
}
999
 
1000
static int
1001
speedo_start_xmit(struct sk_buff *skb, struct device *dev)
1002
{
1003
        struct speedo_private *sp = (struct speedo_private *)dev->priv;
1004
        long ioaddr = dev->base_addr;
1005
        int entry;
1006
 
1007
        /* Block a timer-based transmit from overlapping.  This could better be
1008
           done with atomic_swap(1, dev->tbusy), but set_bit() works as well.
1009
           If this ever occurs the queue layer is doing something evil! */
1010
        if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) {
1011
                int tickssofar = jiffies - dev->trans_start;
1012
                if (tickssofar < TX_TIMEOUT - 2)
1013
                        return 1;
1014
                if (tickssofar < TX_TIMEOUT) {
1015
                        /* Reap sent packets from the full Tx queue. */
1016
                        outw(DRVR_INT, ioaddr + SCBCmd);
1017
                        return 1;
1018
                }
1019
                speedo_tx_timeout(dev);
1020
                return 1;
1021
        }
1022
 
1023
        /* Caution: the write order is important here, set the base address
1024
           with the "ownership" bits last. */
1025
 
1026
        {       /* Prevent interrupts from changing the Tx ring from underneath us. */
1027
                unsigned long flags;
1028
 
1029
                save_flags(flags);
1030
                cli();
1031
                /* Calculate the Tx descriptor entry. */
1032
                entry = sp->cur_tx++ % TX_RING_SIZE;
1033
 
1034
                sp->tx_skbuff[entry] = skb;
1035
                /* Todo: be a little more clever about setting the interrupt bit. */
1036
                sp->tx_ring[entry].status =
1037
                        (CmdSuspend | CmdTx | CmdTxFlex) << 16;
1038
                sp->tx_ring[entry].link =
1039
                  virt_to_bus(&sp->tx_ring[sp->cur_tx % TX_RING_SIZE]);
1040
                sp->tx_ring[entry].tx_desc_addr =
1041
                  virt_to_bus(&sp->tx_ring[entry].tx_buf_addr0);
1042
                /* The data region is always in one buffer descriptor, Tx FIFO
1043
                   threshold of 256. */
1044
                sp->tx_ring[entry].count = 0x01208000;
1045
                sp->tx_ring[entry].tx_buf_addr0 = virt_to_bus(skb->data);
1046
                sp->tx_ring[entry].tx_buf_size0 = skb->len;
1047
                /* Todo: perhaps leave the interrupt bit set if the Tx queue is more
1048
                   than half full.  Argument against: we should be receiving packets
1049
                   and scavenging the queue.  Argument for: if so, it shouldn't
1050
                   matter. */
1051
                sp->last_cmd->command &= ~(CmdSuspend | CmdIntr);
1052
                sp->last_cmd = (struct descriptor_net *)&sp->tx_ring[entry];
1053
                restore_flags(flags);
1054
                /* Trigger the command unit resume. */
1055
                wait_for_cmd_done(ioaddr + SCBCmd);
1056
                outw(CU_RESUME, ioaddr + SCBCmd);
1057
        }
1058
 
1059
        /* Leave room for set_rx_mode() to fill two entries. */
1060
        if (sp->cur_tx - sp->dirty_tx > TX_RING_SIZE - 3)
1061
                sp->tx_full = 1;
1062
        else
1063
                clear_bit(0, (void*)&dev->tbusy);
1064
 
1065
        dev->trans_start = jiffies;
1066
 
1067
        return 0;
1068
}
1069
 
1070
/* The interrupt handler does all of the Rx thread work and cleans up
1071
   after the Tx thread. */
1072
static void speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1073
{
1074
        struct device *dev = (struct device *)dev_instance;
1075
        struct speedo_private *sp;
1076
        long ioaddr, boguscnt = max_interrupt_work;
1077
        unsigned short status;
1078
 
1079
#ifndef final_version
1080
        if (dev == NULL) {
1081
                printk(KERN_ERR "speedo_interrupt(): irq %d for unknown device.\n", irq);
1082
                return;
1083
        }
1084
#endif
1085
 
1086
        ioaddr = dev->base_addr;
1087
        sp = (struct speedo_private *)dev->priv;
1088
#ifndef final_version
1089
        /* A lock to prevent simultaneous entry on SMP machines. */
1090
        if (test_and_set_bit(0, (void*)&sp->in_interrupt)) {
1091
                printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
1092
                           dev->name);
1093
                sp->in_interrupt = 0;   /* Avoid halting machine. */
1094
                return;
1095
        }
1096
        dev->interrupt = 1;
1097
#endif
1098
 
1099
        do {
1100
                status = inw(ioaddr + SCBStatus);
1101
                /* Acknowledge all of the current interrupt sources ASAP. */
1102
                outw(status & 0xfc00, ioaddr + SCBStatus);
1103
 
1104
                if (speedo_debug > 4)
1105
                        printk(KERN_DEBUG "%s: interrupt  status=%#4.4x.\n",
1106
                                   dev->name, status);
1107
 
1108
                if ((status & 0xfc00) == 0)
1109
                        break;
1110
 
1111
                if (status & 0x4000)     /* Packet received. */
1112
                        speedo_rx(dev);
1113
 
1114
                if (status & 0x1000) {
1115
                  if ((status & 0x003c) == 0x0028) /* No more Rx buffers. */
1116
                        outw(RX_RESUMENR, ioaddr + SCBCmd);
1117
                  else if ((status & 0x003c) == 0x0008) { /* No resources (why?!) */
1118
                        /* No idea of what went wrong.  Restart the receiver. */
1119
                        outl(virt_to_bus(sp->rx_ringp[sp->cur_rx % RX_RING_SIZE]),
1120
                                 ioaddr + SCBPointer);
1121
                        outw(RX_START, ioaddr + SCBCmd);
1122
                  }
1123
                  sp->stats.rx_errors++;
1124
                }
1125
 
1126
                /* User interrupt, Command/Tx unit interrupt or CU not active. */
1127
                if (status & 0xA400) {
1128
                        unsigned int dirty_tx = sp->dirty_tx;
1129
 
1130
                        while (sp->cur_tx - dirty_tx > 0) {
1131
                                int entry = dirty_tx % TX_RING_SIZE;
1132
                                int status = sp->tx_ring[entry].status;
1133
 
1134
                                if (speedo_debug > 5)
1135
                                        printk(KERN_DEBUG " scavenge candidate %d status %4.4x.\n",
1136
                                                   entry, status);
1137
                                if ((status & StatusComplete) == 0)
1138
                                        break;                  /* It still hasn't been processed. */
1139
                                /* Free the original skb. */
1140
                                if (sp->tx_skbuff[entry]) {
1141
                                        sp->stats.tx_packets++; /* Count only user packets. */
1142
                                        dev_free_skb(sp->tx_skbuff[entry]);
1143
                                        sp->tx_skbuff[entry] = 0;
1144
                                } else if ((sp->tx_ring[entry].status&0x70000) == CmdNOp << 16)
1145
                                        sp->mc_setup_busy = 0;
1146
                                dirty_tx++;
1147
                        }
1148
 
1149
#ifndef final_version
1150
                        if (sp->cur_tx - dirty_tx > TX_RING_SIZE) {
1151
                                printk(KERN_ERR "out-of-sync dirty pointer, %d vs. %d,"
1152
                                           " full=%d.\n",
1153
                                           dirty_tx, sp->cur_tx, sp->tx_full);
1154
                                dirty_tx += TX_RING_SIZE;
1155
                        }
1156
#endif
1157
 
1158
                        if (sp->tx_full && dev->tbusy
1159
                                && dirty_tx > sp->cur_tx - TX_RING_SIZE + 2) {
1160
                                /* The ring is no longer full, clear tbusy. */
1161
                                sp->tx_full = 0;
1162
                                clear_bit(0, (void*)&dev->tbusy);
1163
                                mark_bh(NET_BH);
1164
                        }
1165
 
1166
                        sp->dirty_tx = dirty_tx;
1167
                }
1168
 
1169
                if (--boguscnt < 0) {
1170
                        printk(KERN_ERR "%s: Too much work at interrupt, status=0x%4.4x.\n",
1171
                                   dev->name, status);
1172
                        /* Clear all interrupt sources. */
1173
                        outl(0xfc00, ioaddr + SCBStatus);
1174
                        break;
1175
                }
1176
        } while (1);
1177
 
1178
        if (speedo_debug > 3)
1179
                printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1180
                           dev->name, inw(ioaddr + SCBStatus));
1181
 
1182
        dev->interrupt = 0;
1183
        clear_bit(0, (void*)&sp->in_interrupt);
1184
        return;
1185
}
1186
 
1187
static int
1188
speedo_rx(struct device *dev)
1189
{
1190
        struct speedo_private *sp = (struct speedo_private *)dev->priv;
1191
        int entry = sp->cur_rx % RX_RING_SIZE;
1192
        int status;
1193
        int rx_work_limit = sp->dirty_rx + RX_RING_SIZE - sp->cur_rx;
1194
 
1195
        if (speedo_debug > 4)
1196
                printk(KERN_DEBUG " In speedo_rx().\n");
1197
        /* If we own the next entry, it's a new packet. Send it up. */
1198
        while (sp->rx_ringp[entry] != NULL &&
1199
                   (status = sp->rx_ringp[entry]->status) & RxComplete) {
1200
 
1201
                if (--rx_work_limit < 0)
1202
                        break;
1203
                if (speedo_debug > 4)
1204
                        printk(KERN_DEBUG "  speedo_rx() status %8.8x len %d.\n", status,
1205
                                   sp->rx_ringp[entry]->count & 0x3fff);
1206
                if ((status & (RxErrTooBig|RxOK)) != RxOK) {
1207
                        if (status & RxErrTooBig)
1208
                                printk(KERN_ERR "%s: Ethernet frame overran the Rx buffer, "
1209
                                           "status %8.8x!\n", dev->name, status);
1210
                        else if ( ! (status & 0x2000)) {
1211
                                /* There was a fatal error.  This *should* be impossible. */
1212
                                sp->stats.rx_errors++;
1213
                                printk(KERN_ERR "%s: Anomalous event in speedo_rx(), "
1214
                                           "status %8.8x.\n",
1215
                                           dev->name, status);
1216
                        }
1217
                } else {
1218
                        int pkt_len = sp->rx_ringp[entry]->count & 0x3fff;
1219
                        struct sk_buff *skb;
1220
 
1221
                        /* Check if the packet is long enough to just accept without
1222
                           copying to a properly sized skbuff. */
1223
                        if (pkt_len < rx_copybreak
1224
                                && (skb = dev_alloc_skb(pkt_len + 2)) != 0) {
1225
                                skb->dev = dev;
1226
                                skb_reserve(skb, 2);    /* Align IP on 16 byte boundaries */
1227
                                /* 'skb_put()' points to the start of sk_buff data area. */
1228
#if 1 || USE_IP_CSUM
1229
                                /* Packet is in one chunk -- we can copy + cksum. */
1230
                                eth_copy_and_sum(skb,
1231
                                                                 bus_to_virt(sp->rx_ringp[entry]->rx_buf_addr),
1232
                                                                 pkt_len, 0);
1233
                                skb_put(skb, pkt_len);
1234
#else
1235
                                memcpy(skb_put(skb, pkt_len),
1236
                                           bus_to_virt(sp->rx_ringp[entry]->rx_buf_addr), pkt_len);
1237
#endif
1238
                        } else {
1239
                                void *temp;
1240
                                /* Pass up the already-filled skbuff. */
1241
                                skb = sp->rx_skbuff[entry];
1242
                                if (skb == NULL) {
1243
                                        printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n",
1244
                                                   dev->name);
1245
                                        break;
1246
                                }
1247
                                sp->rx_skbuff[entry] = NULL;
1248
                                temp = skb_put(skb, pkt_len);
1249
                                if (bus_to_virt(sp->rx_ringp[entry]->rx_buf_addr) != temp)
1250
                                        printk(KERN_ERR "%s: Rx consistency error -- the skbuff "
1251
                                                   "addresses do not match in speedo_rx: %p vs. %p "
1252
                                                   "/ %p.\n", dev->name,
1253
                                                   bus_to_virt(sp->rx_ringp[entry]->rx_buf_addr),
1254
                                                   skb->head, temp);
1255
                                sp->rx_ringp[entry] = NULL;
1256
                        }
1257
                        skb->protocol = eth_type_trans(skb, dev);
1258
                        netif_rx(skb);
1259
                        sp->stats.rx_packets++;
1260
                }
1261
                entry = (++sp->cur_rx) % RX_RING_SIZE;
1262
        }
1263
 
1264
        /* Refill the Rx ring buffers. */
1265
        for (; sp->dirty_rx < sp->cur_rx; sp->dirty_rx++) {
1266
                struct RxFD *rxf;
1267
                entry = sp->dirty_rx % RX_RING_SIZE;
1268
                if (sp->rx_skbuff[entry] == NULL) {
1269
                        struct sk_buff *skb;
1270
                        /* Get a fresh skbuff to replace the consumed one. */
1271
                        skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
1272
                        sp->rx_skbuff[entry] = skb;
1273
                        if (skb == NULL) {
1274
                                sp->rx_ringp[entry] = NULL;
1275
                                break;                  /* Better luck next time!  */
1276
                        }
1277
                        rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->tail;
1278
                        skb->dev = dev;
1279
                        skb_reserve(skb, sizeof(struct RxFD));
1280
                        rxf->rx_buf_addr = virt_to_bus(skb->tail);
1281
                } else {
1282
                        rxf = sp->rx_ringp[entry];
1283
                }
1284
                rxf->status = 0xC0000001;       /* '1' for driver use only. */
1285
                rxf->link = 0;                  /* None yet. */
1286
                rxf->count = 0;
1287
                rxf->size = PKT_BUF_SZ;
1288
                sp->last_rxf->link = virt_to_bus(rxf);
1289
                sp->last_rxf->status &= ~0xC0000000;
1290
                sp->last_rxf = rxf;
1291
        }
1292
 
1293
        sp->last_rx_time = jiffies;
1294
        return 0;
1295
}
1296
 
1297
static int
1298
speedo_close(struct device *dev)
1299
{
1300
        long ioaddr = dev->base_addr;
1301
        struct speedo_private *sp = (struct speedo_private *)dev->priv;
1302
        int i;
1303
 
1304
        dev->start = 0;
1305
        dev->tbusy = 1;
1306
 
1307
        if (speedo_debug > 1)
1308
                printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
1309
                           dev->name, inw(ioaddr + SCBStatus));
1310
 
1311
        /* Shut off the media monitoring timer. */
1312
        del_timer(&sp->timer);
1313
 
1314
        /* Disable interrupts, and stop the chip's Rx process. */
1315
        outw(INT_MASK, ioaddr + SCBCmd);
1316
        outw(INT_MASK | RX_ABORT, ioaddr + SCBCmd);
1317
 
1318
        free_irq(dev->irq, dev);
1319
 
1320
        /* Free all the skbuffs in the Rx and Tx queues. */
1321
        for (i = 0; i < RX_RING_SIZE; i++) {
1322
                struct sk_buff *skb = sp->rx_skbuff[i];
1323
                sp->rx_skbuff[i] = 0;
1324
                /* Clear the Rx descriptors. */
1325
                if (skb)
1326
                        dev_free_skb(skb);
1327
        }
1328
 
1329
        for (i = 0; i < TX_RING_SIZE; i++) {
1330
                struct sk_buff *skb = sp->tx_skbuff[i];
1331
                sp->tx_skbuff[i] = 0;
1332
                /* Clear the Tx descriptors. */
1333
                if (skb)
1334
                        dev_free_skb(skb);
1335
        }
1336
        if (sp->mc_setup_frm) {
1337
                kfree(sp->mc_setup_frm);
1338
                sp->mc_setup_frm_len = 0;
1339
        }
1340
 
1341
        /* Print a few items for debugging. */
1342
        if (speedo_debug > 3) {
1343
                int phy_num = sp->phy[0] & 0x1f;
1344
                printk(KERN_DEBUG "%s:Printing Rx ring (next to receive into %d).\n",
1345
                           dev->name, sp->cur_rx);
1346
 
1347
                for (i = 0; i < RX_RING_SIZE; i++)
1348
                        printk(KERN_DEBUG "  Rx ring entry %d  %8.8x.\n",
1349
                                   i, (int)sp->rx_ringp[i]->status);
1350
 
1351
                for (i = 0; i < 5; i++)
1352
                        printk(KERN_DEBUG "  PHY index %d register %d is %4.4x.\n",
1353
                                   phy_num, i, mdio_read(ioaddr, phy_num, i));
1354
                for (i = 21; i < 26; i++)
1355
                        printk(KERN_DEBUG "  PHY index %d register %d is %4.4x.\n",
1356
                                   phy_num, i, mdio_read(ioaddr, phy_num, i));
1357
        }
1358
        MOD_DEC_USE_COUNT;
1359
 
1360
        return 0;
1361
}
1362
 
1363
/* The Speedo-3 has an especially awkward and unusable method of getting
1364
   statistics out of the chip.  It takes an unpredictable length of time
1365
   for the dump-stats command to complete.  To avoid a busy-wait loop we
1366
   update the stats with the previous dump results, and then trigger a
1367
   new dump.
1368
 
1369
   These problems are mitigated by the current /proc implementation, which
1370
   calls this routine first to judge the output length, and then to emit the
1371
   output.
1372
 
1373
   Oh, and incoming frames are dropped while executing dump-stats!
1374
   */
1375
static struct enet_statistics *
1376
speedo_get_stats(struct device *dev)
1377
{
1378
        struct speedo_private *sp = (struct speedo_private *)dev->priv;
1379
        long ioaddr = dev->base_addr;
1380
 
1381
        if (sp->lstats.done_marker == 0xA007) { /* Previous dump finished */
1382
                sp->stats.tx_aborted_errors += sp->lstats.tx_coll16_errs;
1383
                sp->stats.tx_window_errors += sp->lstats.tx_late_colls;
1384
                sp->stats.tx_fifo_errors += sp->lstats.tx_underruns;
1385
                sp->stats.tx_fifo_errors += sp->lstats.tx_lost_carrier;
1386
                /*sp->stats.tx_deferred += sp->lstats.tx_deferred;*/
1387
                sp->stats.collisions += sp->lstats.tx_total_colls;
1388
                sp->stats.rx_crc_errors += sp->lstats.rx_crc_errs;
1389
                sp->stats.rx_frame_errors += sp->lstats.rx_align_errs;
1390
                sp->stats.rx_over_errors += sp->lstats.rx_resource_errs;
1391
                sp->stats.rx_fifo_errors += sp->lstats.rx_overrun_errs;
1392
                sp->stats.rx_length_errors += sp->lstats.rx_runt_errs;
1393
                sp->lstats.done_marker = 0x0000;
1394
                if (dev->start) {
1395
                        wait_for_cmd_done(ioaddr + SCBCmd);
1396
                        outw(CU_DUMPSTATS, ioaddr + SCBCmd);
1397
                }
1398
        }
1399
        return &sp->stats;
1400
}
1401
 
1402
static int speedo_ioctl(struct device *dev, struct ifreq *rq, int cmd)
1403
{
1404
        struct speedo_private *sp = (struct speedo_private *)dev->priv;
1405
        long ioaddr = dev->base_addr;
1406
        u16 *data = (u16 *)&rq->ifr_data;
1407
        int phy = sp->phy[0] & 0x1f;
1408
 
1409
    switch(cmd) {
1410
        case SIOCDEVPRIVATE:            /* Get the address of the PHY in use. */
1411
                data[0] = phy;
1412
        case SIOCDEVPRIVATE+1:          /* Read the specified MII register. */
1413
                data[3] = mdio_read(ioaddr, data[0], data[1]);
1414
                return 0;
1415
        case SIOCDEVPRIVATE+2:          /* Write the specified MII register */
1416
                if (!suser())
1417
                        return -EPERM;
1418
                mdio_write(ioaddr, data[0], data[1], data[2]);
1419
                return 0;
1420
        default:
1421
                return -EOPNOTSUPP;
1422
        }
1423
}
1424
 
1425
/* Set or clear the multicast filter for this adaptor.
1426
   This is very ugly with Intel chips -- we usually have to execute an
1427
   entire configuration command, plus process a multicast command.
1428
   This is complicated.  We must put a large configuration command and
1429
   an arbitrarily-sized multicast command in the transmit list.
1430
   To minimize the disruption -- the previous command might have already
1431
   loaded the link -- we convert the current command block, normally a Tx
1432
   command, into a no-op and link it to the new command.
1433
*/
1434
static void
1435
set_rx_mode(struct device *dev)
1436
{
1437
        struct speedo_private *sp = (struct speedo_private *)dev->priv;
1438
        long ioaddr = dev->base_addr;
1439
        struct descriptor_net *last_cmd;
1440
        char new_rx_mode;
1441
        unsigned long flags;
1442
        int entry, i;
1443
 
1444
        if (dev->flags & IFF_PROMISC) {                 /* Set promiscuous. */
1445
                new_rx_mode = 3;
1446
        } else if ((dev->flags & IFF_ALLMULTI)  ||
1447
                           dev->mc_count > multicast_filter_limit) {
1448
                new_rx_mode = 1;
1449
        } else
1450
                new_rx_mode = 0;
1451
 
1452
        if (sp->cur_tx - sp->dirty_tx >= TX_RING_SIZE - 1) {
1453
          /* The Tx ring is full -- don't add anything!  Presumably the new mode
1454
                 is in config_cmd_data and will be added anyway. */
1455
                sp->rx_mode = -1;
1456
                return;
1457
        }
1458
 
1459
        if (new_rx_mode != sp->rx_mode) {
1460
                u8 *config_cmd_data;
1461
 
1462
                save_flags(flags);              /* Lock to protect sp->cur_tx. */
1463
                cli();
1464
                entry = sp->cur_tx++ % TX_RING_SIZE;
1465
                last_cmd = sp->last_cmd;
1466
                sp->last_cmd = (struct descriptor_net *)&sp->tx_ring[entry];
1467
                restore_flags(flags);
1468
 
1469
                sp->tx_skbuff[entry] = 0;                       /* Redundant. */
1470
                sp->tx_ring[entry].status = (CmdSuspend | CmdConfigure) << 16;
1471
                sp->tx_ring[entry].link =
1472
                        virt_to_bus(&sp->tx_ring[(entry + 1) % TX_RING_SIZE]);
1473
                config_cmd_data = (void *)&sp->tx_ring[entry].tx_desc_addr;
1474
                /* Construct a full CmdConfig frame. */
1475
                memcpy(config_cmd_data, i82558_config_cmd, sizeof(i82558_config_cmd));
1476
                config_cmd_data[1] = (txfifo << 4) | rxfifo;
1477
                config_cmd_data[4] = rxdmacount;
1478
                config_cmd_data[5] = txdmacount + 0x80;
1479
                config_cmd_data[15] |= (new_rx_mode & 2) ? 1 : 0;
1480
                config_cmd_data[19] |= sp->full_duplex ? 0x40 : 0;
1481
                config_cmd_data[21] = (new_rx_mode & 1) ? 0x0D : 0x05;
1482
                if (sp->phy[0] & 0x8000) {                      /* Use the AUI port instead. */
1483
                        config_cmd_data[15] |= 0x80;
1484
                        config_cmd_data[8] = 0;
1485
                }
1486
                /* Trigger the command unit resume. */
1487
                last_cmd->command &= ~CmdSuspend;
1488
                wait_for_cmd_done(ioaddr + SCBCmd);
1489
                outw(CU_RESUME, ioaddr + SCBCmd);
1490
        }
1491
 
1492
        if (new_rx_mode == 0  &&  dev->mc_count < 4) {
1493
                /* The simple case of 0-3 multicast list entries occurs often, and
1494
                   fits within one tx_ring[] entry. */
1495
                struct dev_mc_list *mclist;
1496
                u16 *setup_params, *eaddrs;
1497
 
1498
                save_flags(flags);              /* Lock to protect sp->cur_tx. */
1499
                cli();
1500
                entry = sp->cur_tx++ % TX_RING_SIZE;
1501
                last_cmd = sp->last_cmd;
1502
                sp->last_cmd = (struct descriptor_net *)&sp->tx_ring[entry];
1503
                restore_flags(flags);
1504
 
1505
                sp->tx_skbuff[entry] = 0;
1506
                sp->tx_ring[entry].status = (CmdSuspend | CmdMulticastList) << 16;
1507
                sp->tx_ring[entry].link =
1508
                        virt_to_bus(&sp->tx_ring[(entry + 1) % TX_RING_SIZE]);
1509
                sp->tx_ring[entry].tx_desc_addr = 0; /* Really MC list count. */
1510
                setup_params = (u16 *)&sp->tx_ring[entry].tx_desc_addr;
1511
                *setup_params++ = dev->mc_count*6;
1512
                /* Fill in the multicast addresses. */
1513
                for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
1514
                         i++, mclist = mclist->next) {
1515
                        eaddrs = (u16 *)mclist->dmi_addr;
1516
                        *setup_params++ = *eaddrs++;
1517
                        *setup_params++ = *eaddrs++;
1518
                        *setup_params++ = *eaddrs++;
1519
                }
1520
 
1521
                last_cmd->command &= ~CmdSuspend;
1522
                /* Immediately trigger the command unit resume. */
1523
                wait_for_cmd_done(ioaddr + SCBCmd);
1524
                outw(CU_RESUME, ioaddr + SCBCmd);
1525
        } else if (new_rx_mode == 0) {
1526
                struct dev_mc_list *mclist;
1527
                u16 *setup_params, *eaddrs;
1528
                struct descriptor_net *mc_setup_frm = sp->mc_setup_frm;
1529
                int i;
1530
 
1531
                if (sp->mc_setup_frm_len < 10 + dev->mc_count*6
1532
                        || sp->mc_setup_frm == NULL) {
1533
                        /* Allocate a full setup frame, 10bytes + <max addrs>. */
1534
                        if (sp->mc_setup_frm)
1535
                                kfree(sp->mc_setup_frm);
1536
                        sp->mc_setup_busy = 0;
1537
                        sp->mc_setup_frm_len = 10 + multicast_filter_limit*6;
1538
                        sp->mc_setup_frm = kmalloc(sp->mc_setup_frm_len, GFP_ATOMIC);
1539
                        if (sp->mc_setup_frm == NULL) {
1540
                                printk(KERN_ERR "%s: Failed to allocate a setup frame.\n",
1541
                                           dev->name);
1542
                                sp->rx_mode = -1; /* We failed, try again. */
1543
                                return;
1544
                        }
1545
                }
1546
                /* If we are busy, someone might be quickly adding to the MC list.
1547
                   Try again later when the list changes stop. */
1548
                if (sp->mc_setup_busy) {
1549
                        sp->rx_mode = -1;
1550
                        return;
1551
                }
1552
                mc_setup_frm = sp->mc_setup_frm;
1553
                /* Fill the setup frame. */
1554
                if (speedo_debug > 1)
1555
                        printk(KERN_DEBUG "%s: Constructing a setup frame at %p, "
1556
                                   "%d bytes.\n",
1557
                                   dev->name, sp->mc_setup_frm, sp->mc_setup_frm_len);
1558
                mc_setup_frm->status = 0;
1559
                mc_setup_frm->command = CmdSuspend | CmdIntr | CmdMulticastList;
1560
                /* Link set below. */
1561
                setup_params = (u16 *)&mc_setup_frm->params;
1562
                *setup_params++ = dev->mc_count*6;
1563
                /* Fill in the multicast addresses. */
1564
                for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
1565
                         i++, mclist = mclist->next) {
1566
                        eaddrs = (u16 *)mclist->dmi_addr;
1567
                        *setup_params++ = *eaddrs++;
1568
                        *setup_params++ = *eaddrs++;
1569
                        *setup_params++ = *eaddrs++;
1570
                }
1571
 
1572
                /* Disable interrupts while playing with the Tx Cmd list. */
1573
                save_flags(flags);
1574
                cli();
1575
                entry = sp->cur_tx++ % TX_RING_SIZE;
1576
                last_cmd = sp->last_cmd;
1577
                sp->last_cmd = mc_setup_frm;
1578
                sp->mc_setup_busy++;
1579
                restore_flags(flags);
1580
 
1581
                /* Change the command to a NoOp, pointing to the CmdMulti command. */
1582
                sp->tx_skbuff[entry] = 0;
1583
                sp->tx_ring[entry].status = CmdNOp << 16;
1584
                sp->tx_ring[entry].link = virt_to_bus(mc_setup_frm);
1585
 
1586
                /* Set the link in the setup frame. */
1587
                mc_setup_frm->link =
1588
                        virt_to_bus(&(sp->tx_ring[(entry+1) % TX_RING_SIZE]));
1589
 
1590
                last_cmd->command &= ~CmdSuspend;
1591
                /* Immediately trigger the command unit resume. */
1592
                wait_for_cmd_done(ioaddr + SCBCmd);
1593
                outw(CU_RESUME, ioaddr + SCBCmd);
1594
                if (speedo_debug > 5)
1595
                        printk(" CmdMCSetup frame length %d in entry %d.\n",
1596
                                   dev->mc_count, entry);
1597
        }
1598
 
1599
        sp->rx_mode = new_rx_mode;
1600
}
1601
 
1602
#ifdef MODULE
1603
 
1604
int
1605
init_module(void)
1606
{
1607
        int cards_found;
1608
 
1609
        if (debug >= 0)
1610
                speedo_debug = debug;
1611
        if (speedo_debug)
1612
                printk(KERN_INFO "%s", version);
1613
 
1614
        root_speedo_dev = NULL;
1615
        cards_found = eepro100_init(NULL);
1616
        return cards_found ? 0 : -ENODEV;
1617
}
1618
 
1619
void
1620
cleanup_module(void)
1621
{
1622
        struct device *next_dev;
1623
 
1624
        /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
1625
        while (root_speedo_dev) {
1626
                next_dev = ((struct speedo_private *)root_speedo_dev->priv)->next_module;
1627
                unregister_netdev(root_speedo_dev);
1628
                release_region(root_speedo_dev->base_addr, SPEEDO3_TOTAL_SIZE);
1629
                kfree(root_speedo_dev);
1630
                root_speedo_dev = next_dev;
1631
        }
1632
}
1633
#else   /* not MODULE */
1634
int eepro100_probe(struct device *dev)
1635
{
1636
        int cards_found = 0;
1637
 
1638
        cards_found = eepro100_init(dev);
1639
 
1640
        if (speedo_debug > 0  &&  cards_found)
1641
                printk(version);
1642
 
1643
        return cards_found ? 0 : -ENODEV;
1644
}
1645
#endif  /* MODULE */
1646
 
1647
/*
1648
 * Local variables:
1649
 *  compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c eepro100.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
1650
 *  SMP-compile-command: "gcc -D__SMP__ -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c eepro100.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
1651
 *  c-indent-level: 4
1652
 *  c-basic-offset: 4
1653
 *  tab-width: 4
1654
 * End:
1655
 */