Rev 629 | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
629 | giacomo | 1 | /* drivers/net/eepro100.c: An Intel i82557 Ethernet driver for Linux. */ |
2 | /* |
||
3 | NOTICE: this version tested with kernels 1.3.72 and later only! |
||
4 | Written 1996-1998 by Donald Becker. |
||
5 | |||
6 | This software may be used and distributed according to the terms |
||
7 | of the GNU Public License, incorporated herein by reference. |
||
8 | |||
9 | This driver is for the Intel EtherExpress Pro 100B boards. |
||
10 | It should work with other i82557 and i82558 boards. |
||
11 | To use a built-in driver, install as drivers/net/eepro100.c. |
||
12 | To use as a module, use the compile-command at the end of the file. |
||
13 | |||
14 | The author may be reached as becker@CESDIS.usra.edu, or C/O |
||
15 | Center of Excellence in Space Data and Information Sciences |
||
16 | Code 930.5, NASA Goddard Space Flight Center, Greenbelt MD 20771 |
||
17 | For updates see |
||
18 | http://cesdis.gsfc.nasa.gov/linux/drivers/eepro100.html |
||
19 | There is also a mailing list based at |
||
20 | linux-eepro100@cesdis.gsfc.nasa.gov |
||
21 | */ |
||
22 | |||
23 | static const char *version = |
||
24 | "eepro100.c:v1.05 10/16/98 Donald Becker http://cesdis.gsfc.nasa.gov/linux/drivers/eepro100.html\n"; |
||
25 | |||
26 | /* A few user-configurable values that apply to all boards. |
||
27 | First set are undocumented and spelled per Intel recommendations. */ |
||
28 | |||
29 | static int congenb = 0; /* Enable congestion control in the DP83840. */ |
||
30 | static int txfifo = 8; /* Tx FIFO threshold in 4 byte units, 0-15 */ |
||
31 | static int rxfifo = 8; /* Rx FIFO threshold, default 32 bytes. */ |
||
32 | /* Tx/Rx DMA burst length, 0-127, 0 == no preemption, tx==128 -> disabled. */ |
||
33 | static int txdmacount = 128; |
||
34 | static int rxdmacount = 0; |
||
35 | |||
36 | /* Set the copy breakpoint for the copy-only-tiny-buffer Rx method. |
||
37 | Lower values use more memory, but are faster. */ |
||
38 | static int rx_copybreak = 200; |
||
39 | |||
40 | /* Maximum events (Rx packets, etc.) to handle at each interrupt. */ |
||
41 | static int max_interrupt_work = 20; |
||
42 | |||
43 | /* Maximum number of multicast addresses to filter (vs. rx-all-multicast) */ |
||
44 | static int multicast_filter_limit = 64; |
||
45 | |||
46 | #include <linux/config.h> |
||
47 | #ifdef MODULE |
||
48 | #ifdef MODVERSIONS |
||
49 | #include <linux/modversions.h> |
||
50 | #endif |
||
51 | #include <linux/module.h> |
||
52 | #else |
||
53 | #define MOD_INC_USE_COUNT |
||
54 | #define MOD_DEC_USE_COUNT |
||
55 | #endif |
||
56 | |||
57 | #include <linux/version.h> |
||
58 | #include <linux/kernel.h> |
||
59 | #include <linux/string.h> |
||
60 | #include <linux/timer.h> |
||
61 | #include <linux/errno.h> |
||
62 | #include <linux/ioport.h> |
||
63 | #include <linux/malloc.h> |
||
64 | #include <linux/interrupt.h> |
||
65 | #include <linux/pci.h> |
||
66 | #if LINUX_VERSION_CODE < 0x20155 |
||
67 | #include <linux/bios32.h> /* Ignore the bogus warning in 2.1.100+ */ |
||
68 | #endif |
||
69 | #include <asm/bitops.h> |
||
70 | #include <asm/io.h> |
||
71 | |||
72 | #include <linux/netdevice.h> |
||
73 | #include <linux/etherdevice.h> |
||
74 | #include <linux/skbuff.h> |
||
75 | #include <linux/delay.h> |
||
76 | |||
77 | /* Unused in the 2.0.* version, but retained for documentation. */ |
||
78 | #if LINUX_VERSION_CODE > 0x20118 && defined(MODULE) |
||
79 | MODULE_AUTHOR("Donald Becker <becker@cesdis.gsfc.nasa.gov>"); |
||
80 | MODULE_DESCRIPTION("Intel i82557/i82558 PCI EtherExpressPro driver"); |
||
81 | MODULE_PARM(debug, "i"); |
||
82 | MODULE_PARM(options, "1-" __MODULE_STRING(8) "i"); |
||
83 | MODULE_PARM(full_duplex, "1-" __MODULE_STRING(8) "i"); |
||
84 | MODULE_PARM(congenb, "i"); |
||
85 | MODULE_PARM(txfifo, "i"); |
||
86 | MODULE_PARM(rxfifo, "i"); |
||
87 | MODULE_PARM(txdmacount, "i"); |
||
88 | MODULE_PARM(rxdmacount, "i"); |
||
89 | MODULE_PARM(rx_copybreak, "i"); |
||
90 | MODULE_PARM(max_interrupt_work, "i"); |
||
91 | MODULE_PARM(multicast_filter_limit, "i"); |
||
92 | #endif |
||
93 | |||
94 | #define RUN_AT(x) (jiffies + (x)) |
||
95 | |||
96 | #if (LINUX_VERSION_CODE < 0x20123) |
||
97 | #define test_and_set_bit(val, addr) set_bit(val, addr) |
||
98 | #endif |
||
99 | #if LINUX_VERSION_CODE < 0x20159 |
||
100 | #define dev_free_skb(skb) dev_kfree_skb(skb, FREE_WRITE); |
||
101 | #else |
||
102 | #define dev_free_skb(skb) dev_kfree_skb(skb); |
||
103 | #endif |
||
104 | |||
105 | /* The total I/O port extent of the board. |
||
106 | The registers beyond 0x18 only exist on the i82558. */ |
||
107 | #define SPEEDO3_TOTAL_SIZE 0x20 |
||
108 | |||
109 | int speedo_debug = 0; |
||
110 | |||
111 | /* |
||
112 | Theory of Operation |
||
113 | |||
114 | I. Board Compatibility |
||
115 | |||
116 | This device driver is designed for the Intel i82557 "Speedo3" chip, Intel's |
||
117 | single-chip fast Ethernet controller for PCI, as used on the Intel |
||
118 | EtherExpress Pro 100 adapter. |
||
119 | |||
120 | II. Board-specific settings |
||
121 | |||
122 | PCI bus devices are configured by the system at boot time, so no jumpers |
||
123 | need to be set on the board. The system BIOS should be set to assign the |
||
124 | PCI INTA signal to an otherwise unused system IRQ line. While it's |
||
125 | possible to share PCI interrupt lines, it negatively impacts performance and |
||
126 | only recent kernels support it. |
||
127 | |||
128 | III. Driver operation |
||
129 | |||
130 | IIIA. General |
||
131 | The Speedo3 is very similar to other Intel network chips, that is to say |
||
132 | "apparently designed on a different planet". This chips retains the complex |
||
133 | Rx and Tx descriptors and multiple buffers pointers as previous chips, but |
||
134 | also has simplified Tx and Rx buffer modes. This driver uses the "flexible" |
||
135 | Tx mode, but in a simplified lower-overhead manner: it associates only a |
||
136 | single buffer descriptor with each frame descriptor. |
||
137 | |||
138 | Despite the extra space overhead in each receive skbuff, the driver must use |
||
139 | the simplified Rx buffer mode to assure that only a single data buffer is |
||
140 | associated with each RxFD. The driver implements this by reserving space |
||
141 | for the Rx descriptor at the head of each Rx skbuff. |
||
142 | |||
143 | The Speedo-3 has receive and command unit base addresses that are added to |
||
144 | almost all descriptor pointers. The driver sets these to zero, so that all |
||
145 | pointer fields are absolute addresses. |
||
146 | |||
147 | The System Control Block (SCB) of some previous Intel chips exists on the |
||
148 | chip in both PCI I/O and memory space. This driver uses the I/O space |
||
149 | registers, but might switch to memory mapped mode to better support non-x86 |
||
150 | processors. |
||
151 | |||
152 | IIIB. Transmit structure |
||
153 | |||
154 | The driver must use the complex Tx command+descriptor mode in order to |
||
155 | have a indirect pointer to the skbuff data section. Each Tx command block |
||
156 | (TxCB) is associated with two immediately appended Tx Buffer Descriptor |
||
157 | (TxBD). A fixed ring of these TxCB+TxBD pairs are kept as part of the |
||
158 | speedo_private data structure for each adapter instance. |
||
159 | |||
160 | The newer i82558 explicitly supports this structure, and can read the two |
||
161 | TxBDs in the same PCI burst as the TxCB. |
||
162 | |||
163 | This ring structure is used for all normal transmit packets, but the |
||
164 | transmit packet descriptors aren't long enough for most non-Tx commands such |
||
165 | as CmdConfigure. This is complicated by the possibility that the chip has |
||
166 | already loaded the link address in the previous descriptor. So for these |
||
167 | commands we convert the next free descriptor on the ring to a NoOp, and point |
||
168 | that descriptor's link to the complex command. |
||
169 | |||
170 | An additional complexity of these non-transmit commands are that they may be |
||
171 | added asynchronous to the normal transmit queue, so we disable interrupts |
||
172 | whenever the Tx descriptor ring is manipulated. |
||
173 | |||
174 | A notable aspect of these special configure commands is that they do |
||
175 | work with the normal Tx ring entry scavenge method. The Tx ring scavenge |
||
176 | is done at interrupt time using the 'dirty_tx' index, and checking for the |
||
177 | command-complete bit. While the setup frames may have the NoOp command on the |
||
178 | Tx ring marked as complete, but not have completed the setup command, this |
||
179 | is not a problem. The tx_ring entry can be still safely reused, as the |
||
180 | tx_skbuff[] entry is always empty for config_cmd and mc_setup frames. |
||
181 | |||
182 | Commands may have bits set e.g. CmdSuspend in the command word to either |
||
183 | suspend or stop the transmit/command unit. This driver always flags the last |
||
184 | command with CmdSuspend, erases the CmdSuspend in the previous command, and |
||
185 | then issues a CU_RESUME. |
||
186 | Note: Watch out for the potential race condition here: imagine |
||
187 | erasing the previous suspend |
||
188 | the chip processes the previous command |
||
189 | the chip processes the final command, and suspends |
||
190 | doing the CU_RESUME |
||
191 | the chip processes the next-yet-valid post-final-command. |
||
192 | So blindly sending a CU_RESUME is only safe if we do it immediately after |
||
193 | after erasing the previous CmdSuspend, without the possibility of an |
||
194 | intervening delay. Thus the resume command is always within the |
||
195 | interrupts-disabled region. This is a timing dependence, but handling this |
||
196 | condition in a timing-independent way would considerably complicate the code. |
||
197 | |||
198 | Note: In previous generation Intel chips, restarting the command unit was a |
||
199 | notoriously slow process. This is presumably no longer true. |
||
200 | |||
201 | IIIC. Receive structure |
||
202 | |||
203 | Because of the bus-master support on the Speedo3 this driver uses the new |
||
204 | SKBUFF_RX_COPYBREAK scheme, rather than a fixed intermediate receive buffer. |
||
205 | This scheme allocates full-sized skbuffs as receive buffers. The value |
||
206 | SKBUFF_RX_COPYBREAK is used as the copying breakpoint: it is chosen to |
||
207 | trade-off the memory wasted by passing the full-sized skbuff to the queue |
||
208 | layer for all frames vs. the copying cost of copying a frame to a |
||
209 | correctly-sized skbuff. |
||
210 | |||
211 | For small frames the copying cost is negligible (esp. considering that we |
||
212 | are pre-loading the cache with immediately useful header information), so we |
||
213 | allocate a new, minimally-sized skbuff. For large frames the copying cost |
||
214 | is non-trivial, and the larger copy might flush the cache of useful data, so |
||
215 | we pass up the skbuff the packet was received into. |
||
216 | |||
217 | IIID. Synchronization |
||
218 | The driver runs as two independent, single-threaded flows of control. One |
||
219 | is the send-packet routine, which enforces single-threaded use by the |
||
220 | dev->tbusy flag. The other thread is the interrupt handler, which is single |
||
221 | threaded by the hardware and other software. |
||
222 | |||
223 | The send packet thread has partial control over the Tx ring and 'dev->tbusy' |
||
224 | flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next |
||
225 | queue slot is empty, it clears the tbusy flag when finished otherwise it sets |
||
226 | the 'sp->tx_full' flag. |
||
227 | |||
228 | The interrupt handler has exclusive control over the Rx ring and records stats |
||
229 | from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so |
||
230 | we can't avoid the interrupt overhead by having the Tx routine reap the Tx |
||
231 | stats.) After reaping the stats, it marks the queue entry as empty by setting |
||
232 | the 'base' to zero. Iff the 'sp->tx_full' flag is set, it clears both the |
||
233 | tx_full and tbusy flags. |
||
234 | |||
235 | IV. Notes |
||
236 | |||
237 | Thanks to Steve Williams of Intel for arranging the non-disclosure agreement |
||
238 | that stated that I could disclose the information. But I still resent |
||
239 | having to sign an Intel NDA when I'm helping Intel sell their own product! |
||
240 | |||
241 | */ |
||
242 | |||
243 | /* Added by Nino - Begin */ |
||
244 | extern int pci20to26_find_class(unsigned int class_code, int index, BYTE *bus, BYTE *dev); |
||
245 | extern int pci20to26_read_config_byte(unsigned int bus, unsigned int dev, int where, u8 *val); |
||
246 | extern int pci20to26_read_config_word(unsigned int bus, unsigned int dev, int where, u16 *val); |
||
247 | extern int pci20to26_read_config_dword(unsigned int bus, unsigned int dev, int where, u32 *val); |
||
248 | extern int pci20to26_write_config_byte(unsigned int bus, unsigned int dev, int where, u8 val); |
||
249 | extern int pci20to26_write_config_word(unsigned int bus, unsigned int dev, int where, u16 val); |
||
250 | extern int pci20to26_write_config_dword(unsigned int bus, unsigned int dev, int where, u32 val); |
||
251 | /* Added by Nino - End */ |
||
252 | |||
253 | |||
254 | /* A few values that may be tweaked. */ |
||
255 | /* The ring sizes should be a power of two for efficiency. */ |
||
256 | #define TX_RING_SIZE 16 /* Effectively 2 entries fewer. */ |
||
257 | #define RX_RING_SIZE 16 |
||
258 | /* Size of an pre-allocated Rx buffer: <Ethernet MTU> + slack.*/ |
||
259 | #define PKT_BUF_SZ 1536 |
||
260 | |||
261 | /* Time in jiffies before concluding the transmitter is hung. */ |
||
262 | #define TX_TIMEOUT ((800*HZ)/1000) |
||
263 | |||
264 | /* How to wait for the command unit to accept a command. |
||
265 | Typically this takes 0 ticks. */ |
||
266 | static inline void wait_for_cmd_done(long cmd_ioaddr) |
||
267 | { |
||
268 | int wait = 100; |
||
269 | do ; |
||
270 | while(inb(cmd_ioaddr) && --wait >= 0); |
||
271 | } |
||
272 | |||
273 | /* Operational parameter that usually are not changed. */ |
||
274 | |||
275 | /* The rest of these values should never change. */ |
||
276 | |||
277 | /* Offsets to the various registers. |
||
278 | All accesses need not be longword aligned. */ |
||
279 | enum speedo_offsets { |
||
280 | SCBStatus = 0, SCBCmd = 2, /* Rx/Command Unit command and status. */ |
||
281 | SCBPointer = 4, /* General purpose pointer. */ |
||
282 | SCBPort = 8, /* Misc. commands and operands. */ |
||
283 | SCBflash = 12, SCBeeprom = 14, /* EEPROM and flash memory control. */ |
||
284 | SCBCtrlMDI = 16, /* MDI interface control. */ |
||
285 | SCBEarlyRx = 20, /* Early receive byte count. */ |
||
286 | }; |
||
287 | /* Commands that can be put in a command list entry. */ |
||
288 | enum commands { |
||
289 | CmdNOp = 0, CmdIASetup = 1, CmdConfigure = 2, CmdMulticastList = 3, |
||
290 | CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7, |
||
291 | CmdSuspend = 0x4000, /* Suspend after completion. */ |
||
292 | CmdIntr = 0x2000, /* Interrupt after completion. */ |
||
293 | CmdTxFlex = 0x0008, /* Use "Flexible mode" for CmdTx command. */ |
||
294 | }; |
||
295 | |||
296 | /* The SCB accepts the following controls for the Tx and Rx units: */ |
||
297 | #define CU_START 0x0010 |
||
298 | #define CU_RESUME 0x0020 |
||
299 | #define CU_STATSADDR 0x0040 |
||
300 | #define CU_SHOWSTATS 0x0050 /* Dump statistics counters. */ |
||
301 | #define CU_CMD_BASE 0x0060 /* Base address to add to add CU commands. */ |
||
302 | #define CU_DUMPSTATS 0x0070 /* Dump then reset stats counters. */ |
||
303 | |||
304 | #define RX_START 0x0001 |
||
305 | #define RX_RESUME 0x0002 |
||
306 | #define RX_ABORT 0x0004 |
||
307 | #define RX_ADDR_LOAD 0x0006 |
||
308 | #define RX_RESUMENR 0x0007 |
||
309 | #define INT_MASK 0x0100 |
||
310 | #define DRVR_INT 0x0200 /* Driver generated interrupt. */ |
||
311 | |||
312 | /* The Speedo3 Rx and Tx frame/buffer descriptors. */ |
||
313 | struct descriptor_net { /* A generic descriptor. */ |
||
314 | s16 status; /* Offset 0. */ |
||
315 | s16 command; /* Offset 2. */ |
||
316 | u32 link; /* struct descriptor * */ |
||
317 | unsigned char params[0]; |
||
318 | }; |
||
319 | |||
320 | /* The Speedo3 Rx and Tx buffer descriptors. */ |
||
321 | struct RxFD { /* Receive frame descriptor. */ |
||
322 | s32 status; |
||
323 | u32 link; /* struct RxFD * */ |
||
324 | u32 rx_buf_addr; /* void * */ |
||
325 | u16 count; |
||
326 | u16 size; |
||
327 | }; |
||
328 | |||
329 | /* Selected elements of the Tx/RxFD.status word. */ |
||
330 | enum RxFD_bits { |
||
331 | RxComplete=0x8000, RxOK=0x2000, |
||
332 | RxErrCRC=0x0800, RxErrAlign=0x0400, RxErrTooBig=0x0200, RxErrSymbol=0x0010, |
||
333 | RxEth2Type=0x0020, RxNoMatch=0x0004, RxNoIAMatch=0x0002, |
||
334 | StatusComplete=0x8000, |
||
335 | }; |
||
336 | |||
337 | struct TxFD { /* Transmit frame descriptor set. */ |
||
338 | s32 status; |
||
339 | u32 link; /* void * */ |
||
340 | u32 tx_desc_addr; /* Always points to the tx_buf_addr element. */ |
||
341 | s32 count; /* # of TBD (=1), Tx start thresh., etc. */ |
||
342 | /* This constitutes two "TBD" entries -- we only use one. */ |
||
343 | u32 tx_buf_addr0; /* void *, frame to be transmitted. */ |
||
344 | s32 tx_buf_size0; /* Length of Tx frame. */ |
||
345 | u32 tx_buf_addr1; /* void *, frame to be transmitted. */ |
||
346 | s32 tx_buf_size1; /* Length of Tx frame. */ |
||
347 | }; |
||
348 | |||
349 | /* Elements of the dump_statistics block. This block must be lword aligned. */ |
||
350 | struct speedo_stats { |
||
351 | u32 tx_good_frames; |
||
352 | u32 tx_coll16_errs; |
||
353 | u32 tx_late_colls; |
||
354 | u32 tx_underruns; |
||
355 | u32 tx_lost_carrier; |
||
356 | u32 tx_deferred; |
||
357 | u32 tx_one_colls; |
||
358 | u32 tx_multi_colls; |
||
359 | u32 tx_total_colls; |
||
360 | u32 rx_good_frames; |
||
361 | u32 rx_crc_errs; |
||
362 | u32 rx_align_errs; |
||
363 | u32 rx_resource_errs; |
||
364 | u32 rx_overrun_errs; |
||
365 | u32 rx_colls_errs; |
||
366 | u32 rx_runt_errs; |
||
367 | u32 done_marker; |
||
368 | }; |
||
369 | |||
370 | struct speedo_private { |
||
371 | char devname[8]; /* Used only for kernel debugging. */ |
||
372 | const char *product_name; |
||
373 | struct device *next_module; |
||
374 | struct TxFD tx_ring[TX_RING_SIZE]; /* Commands (usually CmdTxPacket). */ |
||
375 | /* The saved address of a sent-in-place packet/buffer, for skfree(). */ |
||
376 | struct sk_buff* tx_skbuff[TX_RING_SIZE]; |
||
377 | struct descriptor_net *last_cmd; /* Last command sent. */ |
||
378 | /* Rx descriptor ring & addresses of receive-in-place skbuffs. */ |
||
379 | struct RxFD *rx_ringp[RX_RING_SIZE]; |
||
380 | struct sk_buff* rx_skbuff[RX_RING_SIZE]; |
||
381 | struct RxFD *last_rxf; /* Last command sent. */ |
||
382 | struct enet_statistics stats; |
||
383 | struct speedo_stats lstats; |
||
384 | struct timer_list timer; /* Media selection timer. */ |
||
385 | long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */ |
||
386 | unsigned int cur_rx, cur_tx; /* The next free ring entry */ |
||
387 | unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */ |
||
388 | int mc_setup_frm_len; /* The length of an allocated.. */ |
||
389 | struct descriptor_net *mc_setup_frm; /* ..multicast setup frame. */ |
||
390 | int mc_setup_busy; /* Avoid double-use of setup frame. */ |
||
391 | int in_interrupt; /* Word-aligned dev->interrupt */ |
||
392 | char rx_mode; /* Current PROMISC/ALLMULTI setting. */ |
||
393 | unsigned int tx_full:1; /* The Tx queue is full. */ |
||
394 | unsigned int full_duplex:1; /* Full-duplex operation requested. */ |
||
395 | unsigned int default_port:1; /* Last dev->if_port value. */ |
||
396 | unsigned int rx_bug:1; /* Work around receiver hang errata. */ |
||
397 | unsigned int rx_bug10:1; /* Receiver might hang at 10mbps. */ |
||
398 | unsigned int rx_bug100:1; /* Receiver might hang at 100mbps. */ |
||
399 | unsigned short phy[2]; /* PHY media interfaces available. */ |
||
400 | }; |
||
401 | |||
402 | /* The parameters for a CmdConfigure operation. |
||
403 | There are so many options that it would be difficult to document each bit. |
||
404 | We mostly use the default or recommended settings. */ |
||
405 | const char i82557_config_cmd[22] = { |
||
406 | 22, 0x08, 0, 0, 0, 0x80, 0x32, 0x03, 1, /* 1=Use MII 0=Use AUI */ |
||
407 | 0, 0x2E, 0, 0x60, 0, |
||
408 | 0xf2, 0x48, 0, 0x40, 0xf2, 0x80, /* 0x40=Force full-duplex */ |
||
409 | 0x3f, 0x05, }; |
||
410 | const char i82558_config_cmd[22] = { |
||
411 | 22, 0x08, 0, 1, 0, 0x80, 0x22, 0x03, 1, /* 1=Use MII 0=Use AUI */ |
||
412 | 0, 0x2E, 0, 0x60, 0x08, 0x88, |
||
413 | 0x68, 0, 0x40, 0xf2, 0xBD, /* 0xBD->0xFD=Force full-duplex */ |
||
414 | 0x31, 0x05, }; |
||
415 | |||
416 | /* PHY media interface chips. */ |
||
417 | static const char *phys[] = { |
||
418 | "None", "i82553-A/B", "i82553-C", "i82503", |
||
419 | "DP83840", "80c240", "80c24", "i82555", |
||
420 | "unknown-8", "unknown-9", "DP83840A", "unknown-11", |
||
421 | "unknown-12", "unknown-13", "unknown-14", "unknown-15", }; |
||
422 | enum phy_chips { NonSuchPhy=0, I82553AB, I82553C, I82503, DP83840, S80C240, |
||
423 | S80C24, I82555, DP83840A=10, }; |
||
424 | static const char is_mii[] = { 0, 1, 1, 0, 1, 1, 0, 1 }; |
||
425 | |||
426 | static void speedo_found1(struct device *dev, long ioaddr, int irq, |
||
427 | int card_idx); |
||
428 | |||
429 | static int read_eeprom(long ioaddr, int location, int addr_len); |
||
430 | static int mdio_read(long ioaddr, int phy_id, int location); |
||
431 | static int mdio_write(long ioaddr, int phy_id, int location, int value); |
||
432 | static int speedo_open(struct device *dev); |
||
433 | static void speedo_timer(unsigned long data); |
||
434 | static void speedo_init_rx_ring(struct device *dev); |
||
435 | static int speedo_start_xmit(struct sk_buff *skb, struct device *dev); |
||
436 | static int speedo_rx(struct device *dev); |
||
437 | static void speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs); |
||
438 | static int speedo_close(struct device *dev); |
||
439 | static struct enet_statistics *speedo_get_stats(struct device *dev); |
||
440 | static int speedo_ioctl(struct device *dev, struct ifreq *rq, int cmd); |
||
441 | static void set_rx_mode(struct device *dev); |
||
442 | |||
443 | |||
444 | |||
445 | /* The parameters that may be passed in... */ |
||
446 | /* 'options' is used to pass a transceiver override or full-duplex flag |
||
447 | e.g. "options=16" for FD, "options=32" for 100mbps-only. */ |
||
448 | static int full_duplex[] = {-1, -1, -1, -1, -1, -1, -1, -1}; |
||
449 | static int options[] = {-1, -1, -1, -1, -1, -1, -1, -1}; |
||
450 | #ifdef MODULE |
||
451 | static int debug = -1; /* The debug level */ |
||
452 | #endif |
||
453 | |||
454 | #ifdef honor_default_port |
||
455 | /* Optional driver feature to allow forcing the transceiver setting. |
||
456 | Not recommended. */ |
||
457 | static int mii_ctrl[8] = { 0x3300, 0x3100, 0x0000, 0x0100, |
||
458 | 0x2000, 0x2100, 0x0400, 0x3100}; |
||
459 | #endif |
||
460 | |||
461 | /* A list of all installed Speedo devices, for removing the driver module. */ |
||
462 | static struct device *root_speedo_dev = NULL; |
||
463 | |||
464 | int eepro100_init(struct device *dev) |
||
465 | { |
||
466 | int cards_found = 0; |
||
467 | static int pci_index = 0; |
||
468 | |||
469 | if (! pcibios_present()) |
||
470 | return cards_found; |
||
471 | |||
472 | for (; pci_index < 8; pci_index++) { |
||
473 | unsigned char pci_bus, pci_device_fn, pci_latency; |
||
474 | unsigned short int vendor,device; |
||
475 | long ioaddr; |
||
476 | int irq; |
||
477 | |||
478 | u16 pci_command, new_command; |
||
479 | |||
480 | if (speedo_debug > 0) |
||
481 | printk("Finding Device\n"); |
||
482 | |||
483 | if (pci20to26_find_class (PCI_CLASS_NETWORK_ETHERNET << 8, pci_index, |
||
484 | &pci_bus, &pci_device_fn) |
||
485 | != PCIBIOS_SUCCESSFUL) |
||
486 | break; |
||
487 | |||
488 | pci20to26_read_config_word(pci_bus, pci_device_fn, |
||
489 | PCI_VENDOR_ID, &vendor); |
||
490 | pci20to26_read_config_word(pci_bus, pci_device_fn, |
||
491 | PCI_DEVICE_ID, &device); |
||
492 | |||
493 | if (speedo_debug > 0) |
||
494 | printk("Device = %x Vendor = %x\n",(int)device,(int)vendor); |
||
495 | |||
496 | if (vendor != PCI_VENDOR_ID_INTEL) |
||
497 | break; |
||
498 | |||
499 | |||
500 | #if 0 //defined(PCI_SUPPORT_VER2) |
||
501 | { |
||
502 | struct pci_dev *pdev = pci_find_slot(pci_bus, pci_device_fn); |
||
503 | ioaddr = pdev->base_address[1]; /* Use [0] to mem-map */ |
||
504 | irq = pdev->irq; |
||
505 | printk("Device 1 %d %d\n",ioaddr,irq); |
||
506 | } |
||
507 | #else |
||
508 | { |
||
509 | u32 pci_ioaddr; |
||
510 | u8 pci_irq_line; |
||
511 | pci20to26_read_config_byte(pci_bus, pci_device_fn, |
||
512 | PCI_INTERRUPT_LINE, &pci_irq_line); |
||
513 | /* Note: BASE_ADDRESS_0 is for memory-mapping the registers. */ |
||
514 | pci20to26_read_config_dword(pci_bus, pci_device_fn, |
||
515 | PCI_BASE_ADDRESS_1, &pci_ioaddr); |
||
516 | ioaddr = pci_ioaddr; |
||
517 | irq = pci_irq_line; |
||
518 | printk("Device 2 %d %d\n",ioaddr,irq); |
||
519 | } |
||
520 | #endif |
||
521 | /* Remove I/O space marker in bit 0. */ |
||
522 | ioaddr &= ~3; |
||
523 | if (speedo_debug > 2) |
||
524 | printk("Found Intel i82557 PCI Speedo at I/O %#lx, IRQ %d.\n", |
||
525 | ioaddr, irq); |
||
526 | |||
527 | /* Get and check the bus-master and latency values. */ |
||
528 | pci20to26_read_config_word(pci_bus, pci_device_fn, |
||
529 | PCI_COMMAND, &pci_command); |
||
530 | new_command = pci_command | PCI_COMMAND_MASTER|PCI_COMMAND_IO; |
||
531 | if (pci_command != new_command) { |
||
532 | printk(KERN_INFO " The PCI BIOS has not enabled this" |
||
533 | " device! Updating PCI command %4.4x->%4.4x.\n", |
||
534 | pci_command, new_command); |
||
535 | pci20to26_write_config_word(pci_bus, pci_device_fn, |
||
536 | PCI_COMMAND, new_command); |
||
537 | } |
||
538 | pci20to26_read_config_byte(pci_bus, pci_device_fn, |
||
539 | PCI_LATENCY_TIMER, &pci_latency); |
||
540 | if (pci_latency < 32) { |
||
541 | printk(" PCI latency timer (CFLT) is unreasonably low at %d." |
||
542 | " Setting to 32 clocks.\n", pci_latency); |
||
543 | pci20to26_write_config_byte(pci_bus, pci_device_fn, |
||
544 | PCI_LATENCY_TIMER, 32); |
||
545 | } else if (speedo_debug > 1) |
||
546 | printk(" PCI latency timer (CFLT) is %#x.\n", pci_latency); |
||
547 | |||
548 | speedo_found1(dev, ioaddr, irq, cards_found); |
||
549 | dev = NULL; |
||
550 | cards_found++; |
||
551 | } |
||
552 | |||
553 | return cards_found; |
||
554 | } |
||
555 | |||
556 | static void speedo_found1(struct device *dev, long ioaddr, int irq, |
||
557 | int card_idx) |
||
558 | { |
||
559 | static int did_version = 0; /* Already printed version info. */ |
||
560 | struct speedo_private *sp; |
||
561 | char *product; |
||
562 | int i, option; |
||
563 | u16 eeprom[0x40]; |
||
564 | |||
565 | if (speedo_debug > 0 && did_version++ == 0) |
||
566 | printk(version); |
||
567 | |||
568 | dev = init_etherdev(dev, sizeof(struct speedo_private)); |
||
569 | |||
570 | if (dev->mem_start > 0) |
||
571 | option = dev->mem_start; |
||
572 | else if (card_idx >= 0 && options[card_idx] >= 0) |
||
573 | option = options[card_idx]; |
||
574 | else |
||
575 | option = 0; |
||
576 | |||
577 | /* Read the station address EEPROM before doing the reset. |
||
578 | Perhaps this should even be done before accepting the device, |
||
579 | then we wouldn't have a device name with which to report the error. */ |
||
580 | { |
||
581 | u16 sum = 0; |
||
582 | int j; |
||
583 | int addr_len = read_eeprom(ioaddr, 0, 6) == 0xffff ? 8 : 6; |
||
584 | |||
585 | for (j = 0, i = 0; i < 0x40; i++) { |
||
586 | u16 value = read_eeprom(ioaddr, i, addr_len); |
||
587 | eeprom[i] = value; |
||
588 | sum += value; |
||
589 | if (i < 3) { |
||
590 | dev->dev_addr[j++] = value; |
||
591 | dev->dev_addr[j++] = value >> 8; |
||
592 | } |
||
593 | } |
||
594 | if (sum != 0xBABA) |
||
595 | printk(KERN_WARNING "%s: Invalid EEPROM checksum %#4.4x, " |
||
596 | "check settings before activating this device!\n", |
||
597 | dev->name, sum); |
||
598 | /* Don't unregister_netdev(dev); as the EEPro may actually be |
||
599 | usable, especially if the MAC address is set later. */ |
||
600 | } |
||
601 | |||
602 | /* Reset the chip: stop Tx and Rx processes and clear counters. |
||
603 | This takes less than 10usec and will easily finish before the next |
||
604 | action. */ |
||
605 | outl(0, ioaddr + SCBPort); |
||
606 | |||
607 | if (eeprom[3] & 0x0100) |
||
608 | product = "OEM i82557/i82558 10/100 Ethernet"; |
||
609 | else |
||
610 | product = "Intel EtherExpress Pro 10/100"; |
||
611 | |||
612 | printk(KERN_INFO "%s: %s at %#3lx, ", dev->name, product, ioaddr); |
||
613 | |||
614 | for (i = 0; i < 5; i++) |
||
615 | printk("%2x:", dev->dev_addr[i]); |
||
616 | printk("%2x, IRQ %d.\n", dev->dev_addr[i], irq); |
||
617 | |||
618 | #ifndef kernel_bloat |
||
619 | /* OK, this is pure kernel bloat. I don't like it when other drivers |
||
620 | waste non-pageable kernel space to emit similar messages, but I need |
||
621 | them for bug reports. */ |
||
622 | { |
||
623 | const char *connectors[] = {" RJ45", " BNC", " AUI", " MII"}; |
||
624 | /* The self-test results must be paragraph aligned. */ |
||
625 | s32 str[6], *volatile self_test_results; |
||
626 | int boguscnt = 16000; /* Timeout for set-test. */ |
||
627 | if (eeprom[3] & 0x03) |
||
628 | printk(KERN_INFO " Receiver lock-up bug exists -- enabling" |
||
629 | " work-around.\n"); |
||
630 | printk(KERN_INFO " Board assembly %4.4x%2.2x-%3.3d, Physical" |
||
631 | " connectors present:", |
||
632 | eeprom[8], eeprom[9]>>8, eeprom[9] & 0xff); |
||
633 | for (i = 0; i < 4; i++) |
||
634 | if (eeprom[5] & (1<<i)) |
||
635 | printk(connectors[i]); |
||
636 | printk("\n"KERN_INFO" Primary interface chip %s PHY #%d.\n", |
||
637 | phys[(eeprom[6]>>8)&15], eeprom[6] & 0x1f); |
||
638 | if (eeprom[7] & 0x0700) |
||
639 | printk(KERN_INFO " Secondary interface chip %s.\n", |
||
640 | phys[(eeprom[7]>>8)&7]); |
||
641 | if (((eeprom[6]>>8) & 0x3f) == DP83840 |
||
642 | || ((eeprom[6]>>8) & 0x3f) == DP83840A) { |
||
643 | int mdi_reg23 = mdio_read(ioaddr, eeprom[6] & 0x1f, 23) | 0x0422; |
||
644 | if (congenb) |
||
645 | mdi_reg23 |= 0x0100; |
||
646 | printk(KERN_INFO" DP83840 specific setup, setting register 23 to %4.4x.\n", |
||
647 | mdi_reg23); |
||
648 | mdio_write(ioaddr, eeprom[6] & 0x1f, 23, mdi_reg23); |
||
649 | } |
||
650 | if ((option >= 0) && (option & 0x70)) { |
||
651 | printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n", |
||
652 | (option & 0x20 ? 100 : 10), |
||
653 | (option & 0x10 ? "full" : "half")); |
||
654 | mdio_write(ioaddr, eeprom[6] & 0x1f, 0, |
||
655 | ((option & 0x20) ? 0x2000 : 0) | /* 100mbps? */ |
||
656 | ((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */ |
||
657 | } |
||
658 | |||
659 | /* Perform a system self-test. */ |
||
660 | self_test_results = (s32*) ((((long) str) + 15) & ~0xf); |
||
661 | self_test_results[0] = 0; |
||
662 | self_test_results[1] = -1; |
||
663 | outl(virt_to_bus(self_test_results) | 1, ioaddr + SCBPort); |
||
664 | do { |
||
665 | udelay(10); |
||
666 | } while (self_test_results[1] == -1 && --boguscnt >= 0); |
||
667 | |||
668 | if (boguscnt < 0) { /* Test optimized out. */ |
||
669 | printk(KERN_ERR "Self test failed, status %8.8x:\n" |
||
670 | KERN_ERR " Failure to initialize the i82557.\n" |
||
671 | KERN_ERR " Verify that the card is a bus-master" |
||
672 | " capable slot.\n", |
||
673 | self_test_results[1]); |
||
674 | } else |
||
675 | printk(KERN_INFO " General self-test: %s.\n" |
||
676 | KERN_INFO " Serial sub-system self-test: %s.\n" |
||
677 | KERN_INFO " Internal registers self-test: %s.\n" |
||
678 | KERN_INFO " ROM checksum self-test: %s (%#8.8x).\n", |
||
679 | self_test_results[1] & 0x1000 ? "failed" : "passed", |
||
680 | self_test_results[1] & 0x0020 ? "failed" : "passed", |
||
681 | self_test_results[1] & 0x0008 ? "failed" : "passed", |
||
682 | self_test_results[1] & 0x0004 ? "failed" : "passed", |
||
683 | self_test_results[0]); |
||
684 | } |
||
685 | #endif /* kernel_bloat */ |
||
686 | |||
687 | outl(0, ioaddr + SCBPort); |
||
688 | |||
689 | /* We do a request_region() only to register /proc/ioports info. */ |
||
690 | request_region(ioaddr, SPEEDO3_TOTAL_SIZE, "Intel Speedo3 Ethernet"); |
||
691 | |||
692 | dev->base_addr = ioaddr; |
||
693 | dev->irq = irq; |
||
694 | |||
695 | if (dev->priv == NULL) |
||
696 | dev->priv = kmalloc(sizeof(*sp), GFP_KERNEL); |
||
697 | sp = dev->priv; |
||
698 | memset(sp, 0, sizeof(*sp)); |
||
699 | sp->next_module = root_speedo_dev; |
||
700 | root_speedo_dev = dev; |
||
701 | |||
702 | sp->full_duplex = option >= 0 && (option & 0x10) ? 1 : 0; |
||
703 | if (card_idx >= 0) { |
||
704 | if (full_duplex[card_idx] >= 0) |
||
705 | sp->full_duplex = full_duplex[card_idx]; |
||
706 | } |
||
707 | sp->default_port = option >= 0 ? (option & 0x0f) : 0; |
||
708 | |||
709 | sp->phy[0] = eeprom[6]; |
||
710 | sp->phy[1] = eeprom[7]; |
||
711 | sp->rx_bug = (eeprom[3] & 0x03) == 3 ? 0 : 1; |
||
712 | |||
713 | if (sp->rx_bug) |
||
714 | printk(KERN_INFO " Receiver lock-up workaround activated.\n"); |
||
715 | |||
716 | /* The Speedo-specific entries in the device structure. */ |
||
717 | dev->open = &speedo_open; |
||
718 | dev->hard_start_xmit = &speedo_start_xmit; |
||
719 | dev->stop = &speedo_close; |
||
720 | dev->get_stats = &speedo_get_stats; |
||
721 | dev->set_multicast_list = &set_rx_mode; |
||
722 | dev->do_ioctl = &speedo_ioctl; |
||
723 | |||
724 | return; |
||
725 | } |
||
726 | |||
727 | /* Serial EEPROM section. |
||
728 | A "bit" grungy, but we work our way through bit-by-bit :->. */ |
||
729 | /* EEPROM_Ctrl bits. */ |
||
730 | #define EE_SHIFT_CLK 0x01 /* EEPROM shift clock. */ |
||
731 | #define EE_CS 0x02 /* EEPROM chip select. */ |
||
732 | #define EE_DATA_WRITE 0x04 /* EEPROM chip data in. */ |
||
733 | #define EE_WRITE_0 0x01 |
||
734 | #define EE_WRITE_1 0x05 |
||
735 | #define EE_DATA_READ 0x08 /* EEPROM chip data out. */ |
||
736 | #define EE_ENB (0x4800 | EE_CS) |
||
737 | |||
738 | /* Delay between EEPROM clock transitions. |
||
739 | This will actually work with no delay on 33Mhz PCI. */ |
||
740 | #define eeprom_delay(nanosec) udelay(1); |
||
741 | |||
742 | /* The EEPROM commands include the alway-set leading bit. */ |
||
743 | #define EE_WRITE_CMD (5 << addr_len) |
||
744 | #define EE_READ_CMD (6 << addr_len) |
||
745 | #define EE_ERASE_CMD (7 << addr_len) |
||
746 | |||
747 | static int read_eeprom(long ioaddr, int location, int addr_len) |
||
748 | { |
||
749 | unsigned short retval = 0; |
||
750 | int ee_addr = ioaddr + SCBeeprom; |
||
751 | int read_cmd = location | EE_READ_CMD; |
||
752 | int i; |
||
753 | |||
754 | outw(EE_ENB & ~EE_CS, ee_addr); |
||
755 | outw(EE_ENB, ee_addr); |
||
756 | |||
757 | /* Shift the read command bits out. */ |
||
758 | for (i = 12; i >= 0; i--) { |
||
759 | short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0; |
||
760 | outw(EE_ENB | dataval, ee_addr); |
||
761 | eeprom_delay(100); |
||
762 | outw(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr); |
||
763 | eeprom_delay(150); |
||
764 | } |
||
765 | outw(EE_ENB, ee_addr); |
||
766 | |||
767 | for (i = 15; i >= 0; i--) { |
||
768 | outw(EE_ENB | EE_SHIFT_CLK, ee_addr); |
||
769 | eeprom_delay(100); |
||
770 | retval = (retval << 1) | ((inw(ee_addr) & EE_DATA_READ) ? 1 : 0); |
||
771 | outw(EE_ENB, ee_addr); |
||
772 | eeprom_delay(100); |
||
773 | } |
||
774 | |||
775 | /* Terminate the EEPROM access. */ |
||
776 | outw(EE_ENB & ~EE_CS, ee_addr); |
||
777 | return retval; |
||
778 | } |
||
779 | |||
780 | static int mdio_read(long ioaddr, int phy_id, int location) |
||
781 | { |
||
782 | int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */ |
||
783 | outl(0x08000000 | (location<<16) | (phy_id<<21), ioaddr + SCBCtrlMDI); |
||
784 | do { |
||
785 | val = inl(ioaddr + SCBCtrlMDI); |
||
786 | if (--boguscnt < 0) { |
||
787 | printk(KERN_ERR " mdio_read() timed out with val = %8.8x.\n", val); |
||
788 | } |
||
789 | } while (! (val & 0x10000000)); |
||
790 | return val & 0xffff; |
||
791 | } |
||
792 | |||
793 | static int mdio_write(long ioaddr, int phy_id, int location, int value) |
||
794 | { |
||
795 | int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */ |
||
796 | outl(0x04000000 | (location<<16) | (phy_id<<21) | value, |
||
797 | ioaddr + SCBCtrlMDI); |
||
798 | do { |
||
799 | val = inl(ioaddr + SCBCtrlMDI); |
||
800 | if (--boguscnt < 0) { |
||
801 | printk(KERN_ERR" mdio_write() timed out with val = %8.8x.\n", val); |
||
802 | } |
||
803 | } while (! (val & 0x10000000)); |
||
804 | return val & 0xffff; |
||
805 | } |
||
806 | |||
807 | |||
808 | static int |
||
809 | speedo_open(struct device *dev) |
||
810 | { |
||
811 | struct speedo_private *sp = (struct speedo_private *)dev->priv; |
||
812 | long ioaddr = dev->base_addr; |
||
813 | |||
814 | #ifdef notdef |
||
815 | /* We could reset the chip, but should not need to. */ |
||
816 | outl(0, ioaddr + SCBPort); |
||
817 | udelay(10); |
||
818 | #endif |
||
819 | |||
820 | if (request_irq(dev->irq, &speedo_interrupt, SA_SHIRQ, |
||
821 | "Intel EtherExpress Pro 10/100 Ethernet", dev)) { |
||
822 | return -EAGAIN; |
||
823 | } |
||
824 | if (speedo_debug > 1) |
||
825 | printk(KERN_DEBUG "%s: speedo_open() irq %d.\n", dev->name, dev->irq); |
||
826 | |||
827 | MOD_INC_USE_COUNT; |
||
828 | |||
829 | /* Retrigger negotiation to reset previous errors. */ |
||
830 | if ((sp->phy[0] & 0x8000) == 0) { |
||
831 | int phy_addr = sp->phy[0] & 0x1f ; |
||
832 | /* Use 0x3300 for restarting NWay, other values to force xcvr: |
||
833 | 0x0000 10-HD |
||
834 | 0x0100 10-FD |
||
835 | 0x2000 100-HD |
||
836 | 0x2100 100-FD |
||
837 | */ |
||
838 | #ifdef honor_default_port |
||
839 | mdio_write(ioaddr, phy_addr, 0, mii_ctrl[dev->default_port & 7]); |
||
840 | #else |
||
841 | mdio_write(ioaddr, phy_addr, 0, 0x3300); |
||
842 | #endif |
||
843 | } |
||
844 | |||
845 | /* Load the statistics block address. */ |
||
846 | wait_for_cmd_done(ioaddr + SCBCmd); |
||
847 | outl(virt_to_bus(&sp->lstats), ioaddr + SCBPointer); |
||
848 | outw(INT_MASK | CU_STATSADDR, ioaddr + SCBCmd); |
||
849 | sp->lstats.done_marker = 0; |
||
850 | |||
851 | speedo_init_rx_ring(dev); |
||
852 | wait_for_cmd_done(ioaddr + SCBCmd); |
||
853 | outl(0, ioaddr + SCBPointer); |
||
854 | outw(INT_MASK | RX_ADDR_LOAD, ioaddr + SCBCmd); |
||
855 | |||
856 | /* Todo: verify that we must wait for previous command completion. */ |
||
857 | wait_for_cmd_done(ioaddr + SCBCmd); |
||
858 | outl(virt_to_bus(sp->rx_ringp[0]), ioaddr + SCBPointer); |
||
859 | outw(INT_MASK | RX_START, ioaddr + SCBCmd); |
||
860 | |||
861 | /* Fill the first command with our physical address. */ |
||
862 | { |
||
863 | u16 *eaddrs = (u16 *)dev->dev_addr; |
||
864 | u16 *setup_frm = (u16 *)&(sp->tx_ring[0].tx_desc_addr); |
||
865 | |||
866 | /* Avoid a bug(?!) here by marking the command already completed. */ |
||
867 | sp->tx_ring[0].status = ((CmdSuspend | CmdIASetup) << 16) | 0xa000; |
||
868 | sp->tx_ring[0].link = virt_to_bus(&(sp->tx_ring[1])); |
||
869 | *setup_frm++ = eaddrs[0]; |
||
870 | *setup_frm++ = eaddrs[1]; |
||
871 | *setup_frm++ = eaddrs[2]; |
||
872 | } |
||
873 | sp->last_cmd = (struct descriptor_net *)&sp->tx_ring[0]; |
||
874 | sp->cur_tx = 1; |
||
875 | sp->dirty_tx = 0; |
||
876 | sp->tx_full = 0; |
||
877 | |||
878 | wait_for_cmd_done(ioaddr + SCBCmd); |
||
879 | outl(0, ioaddr + SCBPointer); |
||
880 | outw(INT_MASK | CU_CMD_BASE, ioaddr + SCBCmd); |
||
881 | |||
882 | dev->if_port = sp->default_port; |
||
883 | |||
884 | sp->in_interrupt = 0; |
||
885 | dev->tbusy = 0; |
||
886 | dev->interrupt = 0; |
||
887 | dev->start = 1; |
||
888 | |||
889 | /* Start the chip's Tx process and unmask interrupts. */ |
||
890 | /* Todo: verify that we must wait for previous command completion. */ |
||
891 | wait_for_cmd_done(ioaddr + SCBCmd); |
||
892 | outl(virt_to_bus(&sp->tx_ring[0]), ioaddr + SCBPointer); |
||
893 | outw(CU_START, ioaddr + SCBCmd); |
||
894 | |||
895 | /* Setup the chip and configure the multicast list. */ |
||
896 | sp->mc_setup_frm = NULL; |
||
897 | sp->mc_setup_frm_len = 0; |
||
898 | sp->mc_setup_busy = 0; |
||
899 | sp->rx_mode = -1; /* Invalid -> always reset the mode. */ |
||
900 | set_rx_mode(dev); |
||
901 | |||
902 | if (speedo_debug > 2) { |
||
903 | printk(KERN_DEBUG "%s: Done speedo_open(), status %8.8x.\n", |
||
904 | dev->name, inw(ioaddr + SCBStatus)); |
||
905 | } |
||
906 | /* Set the timer. The timer serves a dual purpose: |
||
907 | 1) to monitor the media interface (e.g. link beat) and perhaps switch |
||
908 | to an alternate media type |
||
909 | 2) to monitor Rx activity, and restart the Rx process if the receiver |
||
910 | hangs. */ |
||
911 | init_timer(&sp->timer); |
||
912 | sp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */ |
||
913 | sp->timer.data = (unsigned long)dev; |
||
914 | sp->timer.function = &speedo_timer; /* timer handler */ |
||
915 | add_timer(&sp->timer); |
||
916 | |||
917 | wait_for_cmd_done(ioaddr + SCBCmd); |
||
918 | outw(CU_DUMPSTATS, ioaddr + SCBCmd); |
||
919 | /* No need to wait for the command unit to accept here. */ |
||
920 | if ((sp->phy[0] & 0x8000) == 0) |
||
921 | mdio_read(ioaddr, sp->phy[0] & 0x1f, 0); |
||
922 | return 0; |
||
923 | } |
||
924 | |||
925 | /* Media monitoring and control. */ |
||
926 | static void speedo_timer(unsigned long data) |
||
927 | { |
||
928 | struct device *dev = (struct device *)data; |
||
929 | struct speedo_private *sp = (struct speedo_private *)dev->priv; |
||
930 | |||
931 | if (speedo_debug > 3) { |
||
932 | long ioaddr = dev->base_addr; |
||
933 | printk(KERN_DEBUG "%s: Media control tick, status %4.4x.\n", |
||
934 | dev->name, inw(ioaddr + SCBStatus)); |
||
935 | } |
||
936 | if (sp->rx_mode < 0 || |
||
937 | (sp->rx_bug && jiffies - sp->last_rx_time > 2*HZ)) { |
||
938 | /* We haven't received a packet in a Long Time. We might have been |
||
939 | bitten by the receiver hang bug. This can be cleared by sending |
||
940 | a set multicast list command. */ |
||
941 | set_rx_mode(dev); |
||
942 | } |
||
943 | /* We must continue to monitor the media. */ |
||
944 | sp->timer.expires = RUN_AT(2*HZ); /* 2.0 sec. */ |
||
945 | add_timer(&sp->timer); |
||
946 | } |
||
947 | |||
948 | /* Initialize the Rx and Tx rings, along with various 'dev' bits. */ |
||
949 | static void |
||
950 | speedo_init_rx_ring(struct device *dev) |
||
951 | { |
||
952 | struct speedo_private *sp = (struct speedo_private *)dev->priv; |
||
953 | struct RxFD *rxf, *last_rxf = NULL; |
||
954 | int i; |
||
955 | |||
956 | sp->cur_rx = 0; |
||
957 | |||
958 | for (i = 0; i < RX_RING_SIZE; i++) { |
||
959 | struct sk_buff *skb; |
||
960 | skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD)); |
||
961 | sp->rx_skbuff[i] = skb; |
||
962 | if (skb == NULL) |
||
963 | break; /* OK. Just initially short of Rx bufs. */ |
||
964 | skb->dev = dev; /* Mark as being used by this device. */ |
||
965 | rxf = (struct RxFD *)skb->tail; |
||
966 | sp->rx_ringp[i] = rxf; |
||
967 | skb_reserve(skb, sizeof(struct RxFD)); |
||
968 | if (last_rxf) |
||
969 | last_rxf->link = virt_to_bus(rxf); |
||
970 | last_rxf = rxf; |
||
971 | rxf->status = 0x00000001; /* '1' is flag value only. */ |
||
972 | rxf->link = 0; /* None yet. */ |
||
973 | /* This field unused by i82557, we use it as a consistency check. */ |
||
974 | #ifdef final_version |
||
975 | rxf->rx_buf_addr = 0xffffffff; |
||
976 | #else |
||
977 | rxf->rx_buf_addr = virt_to_bus(skb->tail); |
||
978 | #endif |
||
979 | rxf->count = 0; |
||
980 | rxf->size = PKT_BUF_SZ; |
||
981 | } |
||
982 | sp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); |
||
983 | /* Mark the last entry as end-of-list. */ |
||
984 | last_rxf->status = 0xC0000002; /* '2' is flag value only. */ |
||
985 | sp->last_rxf = last_rxf; |
||
986 | } |
||
987 | |||
988 | static void speedo_tx_timeout(struct device *dev) |
||
989 | { |
||
990 | struct speedo_private *sp = (struct speedo_private *)dev->priv; |
||
991 | long ioaddr = dev->base_addr; |
||
992 | |||
993 | printk(KERN_WARNING "%s: Transmit timed out: status %4.4x " |
||
994 | " %4.4x at %d/%d command %8.8x.\n", |
||
995 | dev->name, inw(ioaddr + SCBStatus), inw(ioaddr + SCBCmd), |
||
996 | sp->dirty_tx, sp->cur_tx, |
||
997 | sp->tx_ring[sp->dirty_tx % TX_RING_SIZE].status); |
||
998 | if ((inw(ioaddr + SCBStatus) & 0x00C0) != 0x0080) { |
||
999 | printk(KERN_WARNING "%s: Trying to restart the transmitter...\n", |
||
1000 | dev->name); |
||
1001 | outl(virt_to_bus(&sp->tx_ring[sp->dirty_tx % TX_RING_SIZE]), |
||
1002 | ioaddr + SCBPointer); |
||
1003 | outw(CU_START, ioaddr + SCBCmd); |
||
1004 | } else { |
||
1005 | outw(DRVR_INT, ioaddr + SCBCmd); |
||
1006 | } |
||
1007 | /* Reset the MII transceiver, suggested by Fred Young @ scalable.com. */ |
||
1008 | if ((sp->phy[0] & 0x8000) == 0) { |
||
1009 | int phy_addr = sp->phy[0] & 0x1f; |
||
1010 | mdio_write(ioaddr, phy_addr, 0, 0x0400); |
||
1011 | mdio_write(ioaddr, phy_addr, 1, 0x0000); |
||
1012 | mdio_write(ioaddr, phy_addr, 4, 0x0000); |
||
1013 | mdio_write(ioaddr, phy_addr, 0, 0x8000); |
||
1014 | #ifdef honor_default_port |
||
1015 | mdio_write(ioaddr, phy_addr, 0, mii_ctrl[dev->default_port & 7]); |
||
1016 | #endif |
||
1017 | } |
||
1018 | sp->stats.tx_errors++; |
||
1019 | dev->trans_start = jiffies; |
||
1020 | return; |
||
1021 | } |
||
1022 | |||
1023 | static int |
||
1024 | speedo_start_xmit(struct sk_buff *skb, struct device *dev) |
||
1025 | { |
||
1026 | struct speedo_private *sp = (struct speedo_private *)dev->priv; |
||
1027 | long ioaddr = dev->base_addr; |
||
1028 | int entry; |
||
1029 | |||
1030 | /* Block a timer-based transmit from overlapping. This could better be |
||
1031 | done with atomic_swap(1, dev->tbusy), but set_bit() works as well. |
||
1032 | If this ever occurs the queue layer is doing something evil! */ |
||
1033 | if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) { |
||
1034 | int tickssofar = jiffies - dev->trans_start; |
||
1035 | if (tickssofar < TX_TIMEOUT - 2) |
||
1036 | return 1; |
||
1037 | if (tickssofar < TX_TIMEOUT) { |
||
1038 | /* Reap sent packets from the full Tx queue. */ |
||
1039 | outw(DRVR_INT, ioaddr + SCBCmd); |
||
1040 | return 1; |
||
1041 | } |
||
1042 | speedo_tx_timeout(dev); |
||
1043 | return 1; |
||
1044 | } |
||
1045 | |||
1046 | /* Caution: the write order is important here, set the base address |
||
1047 | with the "ownership" bits last. */ |
||
1048 | |||
1049 | { /* Prevent interrupts from changing the Tx ring from underneath us. */ |
||
1050 | unsigned long flags; |
||
1051 | |||
1052 | save_flags(flags); |
||
1053 | cli(); |
||
1054 | /* Calculate the Tx descriptor entry. */ |
||
1055 | entry = sp->cur_tx++ % TX_RING_SIZE; |
||
1056 | |||
1057 | sp->tx_skbuff[entry] = skb; |
||
1058 | /* Todo: be a little more clever about setting the interrupt bit. */ |
||
1059 | sp->tx_ring[entry].status = |
||
1060 | (CmdSuspend | CmdTx | CmdTxFlex) << 16; |
||
1061 | sp->tx_ring[entry].link = |
||
1062 | virt_to_bus(&sp->tx_ring[sp->cur_tx % TX_RING_SIZE]); |
||
1063 | sp->tx_ring[entry].tx_desc_addr = |
||
1064 | virt_to_bus(&sp->tx_ring[entry].tx_buf_addr0); |
||
1065 | /* The data region is always in one buffer descriptor, Tx FIFO |
||
1066 | threshold of 256. */ |
||
1067 | sp->tx_ring[entry].count = 0x01208000; |
||
1068 | sp->tx_ring[entry].tx_buf_addr0 = virt_to_bus(skb->data); |
||
1069 | sp->tx_ring[entry].tx_buf_size0 = skb->len; |
||
1070 | /* Todo: perhaps leave the interrupt bit set if the Tx queue is more |
||
1071 | than half full. Argument against: we should be receiving packets |
||
1072 | and scavenging the queue. Argument for: if so, it shouldn't |
||
1073 | matter. */ |
||
1074 | sp->last_cmd->command &= ~(CmdSuspend | CmdIntr); |
||
1075 | sp->last_cmd = (struct descriptor_net *)&sp->tx_ring[entry]; |
||
1076 | restore_flags(flags); |
||
1077 | /* Trigger the command unit resume. */ |
||
1078 | wait_for_cmd_done(ioaddr + SCBCmd); |
||
1079 | outw(CU_RESUME, ioaddr + SCBCmd); |
||
1080 | } |
||
1081 | |||
1082 | /* Leave room for set_rx_mode() to fill two entries. */ |
||
1083 | if (sp->cur_tx - sp->dirty_tx > TX_RING_SIZE - 3) |
||
1084 | sp->tx_full = 1; |
||
1085 | else |
||
1086 | clear_bit(0, (void*)&dev->tbusy); |
||
1087 | |||
1088 | dev->trans_start = jiffies; |
||
1089 | |||
1090 | return 0; |
||
1091 | } |
||
1092 | |||
1093 | /* The interrupt handler does all of the Rx thread work and cleans up |
||
1094 | after the Tx thread. */ |
||
1095 | static void speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs) |
||
1096 | { |
||
1097 | struct device *dev = (struct device *)dev_instance; |
||
1098 | struct speedo_private *sp; |
||
1099 | long ioaddr, boguscnt = max_interrupt_work; |
||
1100 | unsigned short status; |
||
1101 | |||
1102 | #ifndef final_version |
||
1103 | if (dev == NULL) { |
||
1104 | printk(KERN_ERR "speedo_interrupt(): irq %d for unknown device.\n", irq); |
||
1105 | return; |
||
1106 | } |
||
1107 | #endif |
||
1108 | |||
1109 | ioaddr = dev->base_addr; |
||
1110 | sp = (struct speedo_private *)dev->priv; |
||
1111 | #ifndef final_version |
||
1112 | /* A lock to prevent simultaneous entry on SMP machines. */ |
||
1113 | if (test_and_set_bit(0, (void*)&sp->in_interrupt)) { |
||
1114 | printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n", |
||
1115 | dev->name); |
||
1116 | sp->in_interrupt = 0; /* Avoid halting machine. */ |
||
1117 | return; |
||
1118 | } |
||
1119 | dev->interrupt = 1; |
||
1120 | #endif |
||
1121 | |||
1122 | do { |
||
1123 | status = inw(ioaddr + SCBStatus); |
||
1124 | /* Acknowledge all of the current interrupt sources ASAP. */ |
||
1125 | outw(status & 0xfc00, ioaddr + SCBStatus); |
||
1126 | |||
1127 | if (speedo_debug > 4) |
||
1128 | printk(KERN_DEBUG "%s: interrupt status=%#4.4x.\n", |
||
1129 | dev->name, status); |
||
1130 | |||
1131 | if ((status & 0xfc00) == 0) |
||
1132 | break; |
||
1133 | |||
1134 | if (status & 0x4000) /* Packet received. */ |
||
1135 | speedo_rx(dev); |
||
1136 | |||
1137 | if (status & 0x1000) { |
||
1138 | if ((status & 0x003c) == 0x0028) /* No more Rx buffers. */ |
||
1139 | outw(RX_RESUMENR, ioaddr + SCBCmd); |
||
1140 | else if ((status & 0x003c) == 0x0008) { /* No resources (why?!) */ |
||
1141 | /* No idea of what went wrong. Restart the receiver. */ |
||
1142 | outl(virt_to_bus(sp->rx_ringp[sp->cur_rx % RX_RING_SIZE]), |
||
1143 | ioaddr + SCBPointer); |
||
1144 | outw(RX_START, ioaddr + SCBCmd); |
||
1145 | } |
||
1146 | sp->stats.rx_errors++; |
||
1147 | } |
||
1148 | |||
1149 | /* User interrupt, Command/Tx unit interrupt or CU not active. */ |
||
1150 | if (status & 0xA400) { |
||
1151 | unsigned int dirty_tx = sp->dirty_tx; |
||
1152 | |||
1153 | while (sp->cur_tx - dirty_tx > 0) { |
||
1154 | int entry = dirty_tx % TX_RING_SIZE; |
||
1155 | int status = sp->tx_ring[entry].status; |
||
1156 | |||
1157 | if (speedo_debug > 5) |
||
1158 | printk(KERN_DEBUG " scavenge candidate %d status %4.4x.\n", |
||
1159 | entry, status); |
||
1160 | if ((status & StatusComplete) == 0) |
||
1161 | break; /* It still hasn't been processed. */ |
||
1162 | /* Free the original skb. */ |
||
1163 | if (sp->tx_skbuff[entry]) { |
||
1164 | sp->stats.tx_packets++; /* Count only user packets. */ |
||
1165 | dev_free_skb(sp->tx_skbuff[entry]); |
||
1166 | sp->tx_skbuff[entry] = 0; |
||
1167 | } else if ((sp->tx_ring[entry].status&0x70000) == CmdNOp << 16) |
||
1168 | sp->mc_setup_busy = 0; |
||
1169 | dirty_tx++; |
||
1170 | } |
||
1171 | |||
1172 | #ifndef final_version |
||
1173 | if (sp->cur_tx - dirty_tx > TX_RING_SIZE) { |
||
1174 | printk(KERN_ERR "out-of-sync dirty pointer, %d vs. %d," |
||
1175 | " full=%d.\n", |
||
1176 | dirty_tx, sp->cur_tx, sp->tx_full); |
||
1177 | dirty_tx += TX_RING_SIZE; |
||
1178 | } |
||
1179 | #endif |
||
1180 | |||
1181 | if (sp->tx_full && dev->tbusy |
||
1182 | && dirty_tx > sp->cur_tx - TX_RING_SIZE + 2) { |
||
1183 | /* The ring is no longer full, clear tbusy. */ |
||
1184 | sp->tx_full = 0; |
||
1185 | clear_bit(0, (void*)&dev->tbusy); |
||
1186 | mark_bh(NET_BH); |
||
1187 | } |
||
1188 | |||
1189 | sp->dirty_tx = dirty_tx; |
||
1190 | } |
||
1191 | |||
1192 | if (--boguscnt < 0) { |
||
1193 | printk(KERN_ERR "%s: Too much work at interrupt, status=0x%4.4x.\n", |
||
1194 | dev->name, status); |
||
1195 | /* Clear all interrupt sources. */ |
||
1196 | outl(0xfc00, ioaddr + SCBStatus); |
||
1197 | break; |
||
1198 | } |
||
1199 | } while (1); |
||
1200 | |||
1201 | if (speedo_debug > 3) |
||
1202 | printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n", |
||
1203 | dev->name, inw(ioaddr + SCBStatus)); |
||
1204 | |||
1205 | dev->interrupt = 0; |
||
1206 | clear_bit(0, (void*)&sp->in_interrupt); |
||
1207 | return; |
||
1208 | } |
||
1209 | |||
1210 | static int |
||
1211 | speedo_rx(struct device *dev) |
||
1212 | { |
||
1213 | struct speedo_private *sp = (struct speedo_private *)dev->priv; |
||
1214 | int entry = sp->cur_rx % RX_RING_SIZE; |
||
1215 | int status; |
||
1216 | int rx_work_limit = sp->dirty_rx + RX_RING_SIZE - sp->cur_rx; |
||
1217 | |||
1218 | if (speedo_debug > 4) |
||
1219 | printk(KERN_DEBUG " In speedo_rx().\n"); |
||
1220 | /* If we own the next entry, it's a new packet. Send it up. */ |
||
1221 | while (sp->rx_ringp[entry] != NULL && |
||
1222 | (status = sp->rx_ringp[entry]->status) & RxComplete) { |
||
1223 | |||
1224 | if (--rx_work_limit < 0) |
||
1225 | break; |
||
1226 | if (speedo_debug > 4) |
||
1227 | printk(KERN_DEBUG " speedo_rx() status %8.8x len %d.\n", status, |
||
1228 | sp->rx_ringp[entry]->count & 0x3fff); |
||
1229 | if ((status & (RxErrTooBig|RxOK)) != RxOK) { |
||
1230 | if (status & RxErrTooBig) |
||
1231 | printk(KERN_ERR "%s: Ethernet frame overran the Rx buffer, " |
||
1232 | "status %8.8x!\n", dev->name, status); |
||
1233 | else if ( ! (status & 0x2000)) { |
||
1234 | /* There was a fatal error. This *should* be impossible. */ |
||
1235 | sp->stats.rx_errors++; |
||
1236 | printk(KERN_ERR "%s: Anomalous event in speedo_rx(), " |
||
1237 | "status %8.8x.\n", |
||
1238 | dev->name, status); |
||
1239 | } |
||
1240 | } else { |
||
1241 | int pkt_len = sp->rx_ringp[entry]->count & 0x3fff; |
||
1242 | struct sk_buff *skb; |
||
1243 | |||
1244 | /* Check if the packet is long enough to just accept without |
||
1245 | copying to a properly sized skbuff. */ |
||
1246 | if (pkt_len < rx_copybreak |
||
1247 | && (skb = dev_alloc_skb(pkt_len + 2)) != 0) { |
||
1248 | skb->dev = dev; |
||
1249 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ |
||
1250 | /* 'skb_put()' points to the start of sk_buff data area. */ |
||
1251 | #if 0 |
||
1252 | /* Packet is in one chunk -- we can copy + cksum. */ |
||
1253 | eth_copy_and_sum(skb, |
||
1254 | bus_to_virt(sp->rx_ringp[entry]->rx_buf_addr), |
||
1255 | pkt_len, 0); |
||
1256 | skb_put(skb, pkt_len); |
||
1257 | #else |
||
1258 | memcpy(skb_put(skb, pkt_len), |
||
1259 | bus_to_virt(sp->rx_ringp[entry]->rx_buf_addr), pkt_len); |
||
1260 | #endif |
||
1261 | } else { |
||
1262 | void *temp; |
||
1263 | /* Pass up the already-filled skbuff. */ |
||
1264 | skb = sp->rx_skbuff[entry]; |
||
1265 | if (skb == NULL) { |
||
1266 | printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n", |
||
1267 | dev->name); |
||
1268 | break; |
||
1269 | } |
||
1270 | sp->rx_skbuff[entry] = NULL; |
||
1271 | temp = skb_put(skb, pkt_len); |
||
1272 | if (bus_to_virt(sp->rx_ringp[entry]->rx_buf_addr) != temp) |
||
1273 | printk(KERN_ERR "%s: Rx consistency error -- the skbuff " |
||
1274 | "addresses do not match in speedo_rx: %p vs. %p " |
||
1275 | "/ %p.\n", dev->name, |
||
1276 | bus_to_virt(sp->rx_ringp[entry]->rx_buf_addr), |
||
1277 | skb->head, temp); |
||
1278 | sp->rx_ringp[entry] = NULL; |
||
1279 | } |
||
1280 | skb->protocol = eth_type_trans(skb, dev); |
||
1281 | netif_rx(skb); |
||
1282 | sp->stats.rx_packets++; |
||
1283 | } |
||
1284 | entry = (++sp->cur_rx) % RX_RING_SIZE; |
||
1285 | } |
||
1286 | |||
1287 | /* Refill the Rx ring buffers. */ |
||
1288 | for (; sp->dirty_rx < sp->cur_rx; sp->dirty_rx++) { |
||
1289 | struct RxFD *rxf; |
||
1290 | entry = sp->dirty_rx % RX_RING_SIZE; |
||
1291 | if (sp->rx_skbuff[entry] == NULL) { |
||
1292 | struct sk_buff *skb; |
||
1293 | /* Get a fresh skbuff to replace the consumed one. */ |
||
1294 | skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD)); |
||
1295 | sp->rx_skbuff[entry] = skb; |
||
1296 | if (skb == NULL) { |
||
1297 | sp->rx_ringp[entry] = NULL; |
||
1298 | break; /* Better luck next time! */ |
||
1299 | } |
||
1300 | rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->tail; |
||
1301 | skb->dev = dev; |
||
1302 | skb_reserve(skb, sizeof(struct RxFD)); |
||
1303 | rxf->rx_buf_addr = virt_to_bus(skb->tail); |
||
1304 | } else { |
||
1305 | rxf = sp->rx_ringp[entry]; |
||
1306 | } |
||
1307 | rxf->status = 0xC0000001; /* '1' for driver use only. */ |
||
1308 | rxf->link = 0; /* None yet. */ |
||
1309 | rxf->count = 0; |
||
1310 | rxf->size = PKT_BUF_SZ; |
||
1311 | sp->last_rxf->link = virt_to_bus(rxf); |
||
1312 | sp->last_rxf->status &= ~0xC0000000; |
||
1313 | sp->last_rxf = rxf; |
||
1314 | } |
||
1315 | |||
1316 | sp->last_rx_time = jiffies; |
||
1317 | return 0; |
||
1318 | } |
||
1319 | |||
1320 | static int |
||
1321 | speedo_close(struct device *dev) |
||
1322 | { |
||
1323 | long ioaddr = dev->base_addr; |
||
1324 | struct speedo_private *sp = (struct speedo_private *)dev->priv; |
||
1325 | int i; |
||
1326 | |||
1327 | dev->start = 0; |
||
1328 | dev->tbusy = 1; |
||
1329 | |||
1330 | if (speedo_debug > 1) |
||
1331 | printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n", |
||
1332 | dev->name, inw(ioaddr + SCBStatus)); |
||
1333 | |||
1334 | /* Shut off the media monitoring timer. */ |
||
1335 | del_timer(&sp->timer); |
||
1336 | |||
1337 | /* Disable interrupts, and stop the chip's Rx process. */ |
||
1338 | outw(INT_MASK, ioaddr + SCBCmd); |
||
1339 | outw(INT_MASK | RX_ABORT, ioaddr + SCBCmd); |
||
1340 | |||
1341 | free_irq(dev->irq, dev); |
||
1342 | |||
1343 | /* Free all the skbuffs in the Rx and Tx queues. */ |
||
1344 | for (i = 0; i < RX_RING_SIZE; i++) { |
||
1345 | struct sk_buff *skb = sp->rx_skbuff[i]; |
||
1346 | sp->rx_skbuff[i] = 0; |
||
1347 | /* Clear the Rx descriptors. */ |
||
1348 | if (skb) |
||
1349 | dev_free_skb(skb); |
||
1350 | } |
||
1351 | |||
1352 | for (i = 0; i < TX_RING_SIZE; i++) { |
||
1353 | struct sk_buff *skb = sp->tx_skbuff[i]; |
||
1354 | sp->tx_skbuff[i] = 0; |
||
1355 | /* Clear the Tx descriptors. */ |
||
1356 | if (skb) |
||
1357 | dev_free_skb(skb); |
||
1358 | } |
||
1359 | if (sp->mc_setup_frm) { |
||
1360 | kfree(sp->mc_setup_frm); |
||
1361 | sp->mc_setup_frm_len = 0; |
||
1362 | } |
||
1363 | |||
1364 | /* Print a few items for debugging. */ |
||
1365 | if (speedo_debug > 3) { |
||
1366 | int phy_num = sp->phy[0] & 0x1f; |
||
1367 | printk(KERN_DEBUG "%s:Printing Rx ring (next to receive into %d).\n", |
||
1368 | dev->name, sp->cur_rx); |
||
1369 | |||
1370 | for (i = 0; i < RX_RING_SIZE; i++) |
||
1371 | printk(KERN_DEBUG " Rx ring entry %d %8.8x.\n", |
||
1372 | i, (int)sp->rx_ringp[i]->status); |
||
1373 | |||
1374 | for (i = 0; i < 5; i++) |
||
1375 | printk(KERN_DEBUG " PHY index %d register %d is %4.4x.\n", |
||
1376 | phy_num, i, mdio_read(ioaddr, phy_num, i)); |
||
1377 | for (i = 21; i < 26; i++) |
||
1378 | printk(KERN_DEBUG " PHY index %d register %d is %4.4x.\n", |
||
1379 | phy_num, i, mdio_read(ioaddr, phy_num, i)); |
||
1380 | } |
||
1381 | MOD_DEC_USE_COUNT; |
||
1382 | |||
1383 | return 0; |
||
1384 | } |
||
1385 | |||
1386 | /* The Speedo-3 has an especially awkward and unusable method of getting |
||
1387 | statistics out of the chip. It takes an unpredictable length of time |
||
1388 | for the dump-stats command to complete. To avoid a busy-wait loop we |
||
1389 | update the stats with the previous dump results, and then trigger a |
||
1390 | new dump. |
||
1391 | |||
1392 | These problems are mitigated by the current /proc implementation, which |
||
1393 | calls this routine first to judge the output length, and then to emit the |
||
1394 | output. |
||
1395 | |||
1396 | Oh, and incoming frames are dropped while executing dump-stats! |
||
1397 | */ |
||
1398 | static struct enet_statistics * |
||
1399 | speedo_get_stats(struct device *dev) |
||
1400 | { |
||
1401 | struct speedo_private *sp = (struct speedo_private *)dev->priv; |
||
1402 | long ioaddr = dev->base_addr; |
||
1403 | |||
1404 | if (sp->lstats.done_marker == 0xA007) { /* Previous dump finished */ |
||
1405 | sp->stats.tx_aborted_errors += sp->lstats.tx_coll16_errs; |
||
1406 | sp->stats.tx_window_errors += sp->lstats.tx_late_colls; |
||
1407 | sp->stats.tx_fifo_errors += sp->lstats.tx_underruns; |
||
1408 | sp->stats.tx_fifo_errors += sp->lstats.tx_lost_carrier; |
||
1409 | /*sp->stats.tx_deferred += sp->lstats.tx_deferred;*/ |
||
1410 | sp->stats.collisions += sp->lstats.tx_total_colls; |
||
1411 | sp->stats.rx_crc_errors += sp->lstats.rx_crc_errs; |
||
1412 | sp->stats.rx_frame_errors += sp->lstats.rx_align_errs; |
||
1413 | sp->stats.rx_over_errors += sp->lstats.rx_resource_errs; |
||
1414 | sp->stats.rx_fifo_errors += sp->lstats.rx_overrun_errs; |
||
1415 | sp->stats.rx_length_errors += sp->lstats.rx_runt_errs; |
||
1416 | sp->lstats.done_marker = 0x0000; |
||
1417 | if (dev->start) { |
||
1418 | wait_for_cmd_done(ioaddr + SCBCmd); |
||
1419 | outw(CU_DUMPSTATS, ioaddr + SCBCmd); |
||
1420 | } |
||
1421 | } |
||
1422 | return &sp->stats; |
||
1423 | } |
||
1424 | |||
1425 | static int speedo_ioctl(struct device *dev, struct ifreq *rq, int cmd) |
||
1426 | { |
||
1427 | struct speedo_private *sp = (struct speedo_private *)dev->priv; |
||
1428 | long ioaddr = dev->base_addr; |
||
1429 | u16 *data = (u16 *)&rq->ifr_data; |
||
1430 | int phy = sp->phy[0] & 0x1f; |
||
1431 | |||
1432 | switch(cmd) { |
||
1433 | case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */ |
||
1434 | data[0] = phy; |
||
1435 | case SIOCDEVPRIVATE+1: /* Read the specified MII register. */ |
||
1436 | data[3] = mdio_read(ioaddr, data[0], data[1]); |
||
1437 | return 0; |
||
1438 | case SIOCDEVPRIVATE+2: /* Write the specified MII register */ |
||
1439 | if (!suser()) |
||
1440 | return -EPERM; |
||
1441 | mdio_write(ioaddr, data[0], data[1], data[2]); |
||
1442 | return 0; |
||
1443 | default: |
||
1444 | return -EOPNOTSUPP; |
||
1445 | } |
||
1446 | } |
||
1447 | |||
1448 | /* Set or clear the multicast filter for this adaptor. |
||
1449 | This is very ugly with Intel chips -- we usually have to execute an |
||
1450 | entire configuration command, plus process a multicast command. |
||
1451 | This is complicated. We must put a large configuration command and |
||
1452 | an arbitrarily-sized multicast command in the transmit list. |
||
1453 | To minimize the disruption -- the previous command might have already |
||
1454 | loaded the link -- we convert the current command block, normally a Tx |
||
1455 | command, into a no-op and link it to the new command. |
||
1456 | */ |
||
1457 | static void |
||
1458 | set_rx_mode(struct device *dev) |
||
1459 | { |
||
1460 | struct speedo_private *sp = (struct speedo_private *)dev->priv; |
||
1461 | long ioaddr = dev->base_addr; |
||
1462 | struct descriptor_net *last_cmd; |
||
1463 | char new_rx_mode; |
||
1464 | unsigned long flags; |
||
1465 | int entry, i; |
||
1466 | |||
1467 | if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ |
||
1468 | new_rx_mode = 3; |
||
1469 | } else if ((dev->flags & IFF_ALLMULTI) || |
||
1470 | dev->mc_count > multicast_filter_limit) { |
||
1471 | new_rx_mode = 1; |
||
1472 | } else |
||
1473 | new_rx_mode = 0; |
||
1474 | |||
1475 | if (sp->cur_tx - sp->dirty_tx >= TX_RING_SIZE - 1) { |
||
1476 | /* The Tx ring is full -- don't add anything! Presumably the new mode |
||
1477 | is in config_cmd_data and will be added anyway. */ |
||
1478 | sp->rx_mode = -1; |
||
1479 | return; |
||
1480 | } |
||
1481 | |||
1482 | if (new_rx_mode != sp->rx_mode) { |
||
1483 | u8 *config_cmd_data; |
||
1484 | |||
1485 | save_flags(flags); /* Lock to protect sp->cur_tx. */ |
||
1486 | cli(); |
||
1487 | entry = sp->cur_tx++ % TX_RING_SIZE; |
||
1488 | last_cmd = sp->last_cmd; |
||
1489 | sp->last_cmd = (struct descriptor_net *)&sp->tx_ring[entry]; |
||
1490 | restore_flags(flags); |
||
1491 | |||
1492 | sp->tx_skbuff[entry] = 0; /* Redundant. */ |
||
1493 | sp->tx_ring[entry].status = (CmdSuspend | CmdConfigure) << 16; |
||
1494 | sp->tx_ring[entry].link = |
||
1495 | virt_to_bus(&sp->tx_ring[(entry + 1) % TX_RING_SIZE]); |
||
1496 | config_cmd_data = (void *)&sp->tx_ring[entry].tx_desc_addr; |
||
1497 | /* Construct a full CmdConfig frame. */ |
||
1498 | memcpy(config_cmd_data, i82558_config_cmd, sizeof(i82558_config_cmd)); |
||
1499 | config_cmd_data[1] = (txfifo << 4) | rxfifo; |
||
1500 | config_cmd_data[4] = rxdmacount; |
||
1501 | config_cmd_data[5] = txdmacount + 0x80; |
||
1502 | config_cmd_data[15] |= (new_rx_mode & 2) ? 1 : 0; |
||
1503 | config_cmd_data[19] |= sp->full_duplex ? 0x40 : 0; |
||
1504 | config_cmd_data[21] = (new_rx_mode & 1) ? 0x0D : 0x05; |
||
1505 | if (sp->phy[0] & 0x8000) { /* Use the AUI port instead. */ |
||
1506 | config_cmd_data[15] |= 0x80; |
||
1507 | config_cmd_data[8] = 0; |
||
1508 | } |
||
1509 | /* Trigger the command unit resume. */ |
||
1510 | last_cmd->command &= ~CmdSuspend; |
||
1511 | wait_for_cmd_done(ioaddr + SCBCmd); |
||
1512 | outw(CU_RESUME, ioaddr + SCBCmd); |
||
1513 | } |
||
1514 | |||
1515 | if (new_rx_mode == 0 && dev->mc_count < 4) { |
||
1516 | /* The simple case of 0-3 multicast list entries occurs often, and |
||
1517 | fits within one tx_ring[] entry. */ |
||
1518 | struct dev_mc_list *mclist; |
||
1519 | u16 *setup_params, *eaddrs; |
||
1520 | |||
1521 | save_flags(flags); /* Lock to protect sp->cur_tx. */ |
||
1522 | cli(); |
||
1523 | entry = sp->cur_tx++ % TX_RING_SIZE; |
||
1524 | last_cmd = sp->last_cmd; |
||
1525 | sp->last_cmd = (struct descriptor_net *)&sp->tx_ring[entry]; |
||
1526 | restore_flags(flags); |
||
1527 | |||
1528 | sp->tx_skbuff[entry] = 0; |
||
1529 | sp->tx_ring[entry].status = (CmdSuspend | CmdMulticastList) << 16; |
||
1530 | sp->tx_ring[entry].link = |
||
1531 | virt_to_bus(&sp->tx_ring[(entry + 1) % TX_RING_SIZE]); |
||
1532 | sp->tx_ring[entry].tx_desc_addr = 0; /* Really MC list count. */ |
||
1533 | setup_params = (u16 *)&sp->tx_ring[entry].tx_desc_addr; |
||
1534 | *setup_params++ = dev->mc_count*6; |
||
1535 | /* Fill in the multicast addresses. */ |
||
1536 | for (i = 0, mclist = dev->mc_list; i < dev->mc_count; |
||
1537 | i++, mclist = mclist->next) { |
||
1538 | eaddrs = (u16 *)mclist->dmi_addr; |
||
1539 | *setup_params++ = *eaddrs++; |
||
1540 | *setup_params++ = *eaddrs++; |
||
1541 | *setup_params++ = *eaddrs++; |
||
1542 | } |
||
1543 | |||
1544 | last_cmd->command &= ~CmdSuspend; |
||
1545 | /* Immediately trigger the command unit resume. */ |
||
1546 | wait_for_cmd_done(ioaddr + SCBCmd); |
||
1547 | outw(CU_RESUME, ioaddr + SCBCmd); |
||
1548 | } else if (new_rx_mode == 0) { |
||
1549 | struct dev_mc_list *mclist; |
||
1550 | u16 *setup_params, *eaddrs; |
||
1551 | struct descriptor_net *mc_setup_frm = sp->mc_setup_frm; |
||
1552 | int i; |
||
1553 | |||
1554 | if (sp->mc_setup_frm_len < 10 + dev->mc_count*6 |
||
1555 | || sp->mc_setup_frm == NULL) { |
||
1556 | /* Allocate a full setup frame, 10bytes + <max addrs>. */ |
||
1557 | if (sp->mc_setup_frm) |
||
1558 | kfree(sp->mc_setup_frm); |
||
1559 | sp->mc_setup_busy = 0; |
||
1560 | sp->mc_setup_frm_len = 10 + multicast_filter_limit*6; |
||
1561 | sp->mc_setup_frm = kmalloc(sp->mc_setup_frm_len, GFP_ATOMIC); |
||
1562 | if (sp->mc_setup_frm == NULL) { |
||
1563 | printk(KERN_ERR "%s: Failed to allocate a setup frame.\n", |
||
1564 | dev->name); |
||
1565 | sp->rx_mode = -1; /* We failed, try again. */ |
||
1566 | return; |
||
1567 | } |
||
1568 | } |
||
1569 | /* If we are busy, someone might be quickly adding to the MC list. |
||
1570 | Try again later when the list changes stop. */ |
||
1571 | if (sp->mc_setup_busy) { |
||
1572 | sp->rx_mode = -1; |
||
1573 | return; |
||
1574 | } |
||
1575 | mc_setup_frm = sp->mc_setup_frm; |
||
1576 | /* Fill the setup frame. */ |
||
1577 | if (speedo_debug > 1) |
||
1578 | printk(KERN_DEBUG "%s: Constructing a setup frame at %p, " |
||
1579 | "%d bytes.\n", |
||
1580 | dev->name, sp->mc_setup_frm, sp->mc_setup_frm_len); |
||
1581 | mc_setup_frm->status = 0; |
||
1582 | mc_setup_frm->command = CmdSuspend | CmdIntr | CmdMulticastList; |
||
1583 | /* Link set below. */ |
||
1584 | setup_params = (u16 *)&mc_setup_frm->params; |
||
1585 | *setup_params++ = dev->mc_count*6; |
||
1586 | /* Fill in the multicast addresses. */ |
||
1587 | for (i = 0, mclist = dev->mc_list; i < dev->mc_count; |
||
1588 | i++, mclist = mclist->next) { |
||
1589 | eaddrs = (u16 *)mclist->dmi_addr; |
||
1590 | *setup_params++ = *eaddrs++; |
||
1591 | *setup_params++ = *eaddrs++; |
||
1592 | *setup_params++ = *eaddrs++; |
||
1593 | } |
||
1594 | |||
1595 | /* Disable interrupts while playing with the Tx Cmd list. */ |
||
1596 | save_flags(flags); |
||
1597 | cli(); |
||
1598 | entry = sp->cur_tx++ % TX_RING_SIZE; |
||
1599 | last_cmd = sp->last_cmd; |
||
1600 | sp->last_cmd = mc_setup_frm; |
||
1601 | sp->mc_setup_busy++; |
||
1602 | restore_flags(flags); |
||
1603 | |||
1604 | /* Change the command to a NoOp, pointing to the CmdMulti command. */ |
||
1605 | sp->tx_skbuff[entry] = 0; |
||
1606 | sp->tx_ring[entry].status = CmdNOp << 16; |
||
1607 | sp->tx_ring[entry].link = virt_to_bus(mc_setup_frm); |
||
1608 | |||
1609 | /* Set the link in the setup frame. */ |
||
1610 | mc_setup_frm->link = |
||
1611 | virt_to_bus(&(sp->tx_ring[(entry+1) % TX_RING_SIZE])); |
||
1612 | |||
1613 | last_cmd->command &= ~CmdSuspend; |
||
1614 | /* Immediately trigger the command unit resume. */ |
||
1615 | wait_for_cmd_done(ioaddr + SCBCmd); |
||
1616 | outw(CU_RESUME, ioaddr + SCBCmd); |
||
1617 | if (speedo_debug > 5) |
||
1618 | printk(" CmdMCSetup frame length %d in entry %d.\n", |
||
1619 | dev->mc_count, entry); |
||
1620 | } |
||
1621 | |||
1622 | sp->rx_mode = new_rx_mode; |
||
1623 | } |
||
1624 | |||
1625 | #ifdef MODULE |
||
1626 | |||
1627 | int |
||
1628 | init_module(void) |
||
1629 | { |
||
1630 | int cards_found; |
||
1631 | |||
1632 | if (debug >= 0) |
||
1633 | speedo_debug = debug; |
||
1634 | if (speedo_debug) |
||
1635 | printk(KERN_INFO "%s", version); |
||
1636 | |||
1637 | root_speedo_dev = NULL; |
||
1638 | cards_found = eepro100_init(NULL); |
||
1639 | return cards_found ? 0 : -ENODEV; |
||
1640 | } |
||
1641 | |||
1642 | void |
||
1643 | cleanup_module(void) |
||
1644 | { |
||
1645 | struct device *next_dev; |
||
1646 | |||
1647 | /* No need to check MOD_IN_USE, as sys_delete_module() checks. */ |
||
1648 | while (root_speedo_dev) { |
||
1649 | next_dev = ((struct speedo_private *)root_speedo_dev->priv)->next_module; |
||
1650 | unregister_netdev(root_speedo_dev); |
||
1651 | release_region(root_speedo_dev->base_addr, SPEEDO3_TOTAL_SIZE); |
||
1652 | kfree(root_speedo_dev); |
||
1653 | root_speedo_dev = next_dev; |
||
1654 | } |
||
1655 | } |
||
1656 | #else /* not MODULE */ |
||
1657 | int eepro100_probe(struct device *dev) |
||
1658 | { |
||
1659 | int cards_found = 0; |
||
1660 | |||
1661 | cards_found = eepro100_init(dev); |
||
1662 | |||
1663 | if (speedo_debug > 0 && cards_found) |
||
1664 | printk(version); |
||
1665 | |||
1666 | return cards_found ? 0 : -ENODEV; |
||
1667 | } |
||
1668 | #endif /* MODULE */ |
||
1669 | |||
1670 | /* |
||
1671 | * Local variables: |
||
1672 | * compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c eepro100.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`" |
||
1673 | * SMP-compile-command: "gcc -D__SMP__ -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c eepro100.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`" |
||
1674 | * c-indent-level: 4 |
||
1675 | * c-basic-offset: 4 |
||
1676 | * tab-width: 4 |
||
1677 | * End: |
||
1678 | */ |