Subversion Repositories shark

Compare Revisions

Ignore whitespace Rev 628 → Rev 629

/shark/trunk/drivers/net/netbuff.h
0,0 → 1,78
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: netbuff.h,v 1.4 2004-05-11 14:30:50 giacomo Exp $
 
File: $File$
Revision: $Revision: 1.4 $
Last update: $Date: 2004-05-11 14:30:50 $
------------
**/
 
/*
* Copyright (C) 2000 Luca Abeni
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
/* Author: Luca Abeni */
/* Date: 25/9/1997 */
/* File: NetBuff.H */
/* Revision: 1.00 */
 
#ifndef __NETBUFF_H__
#define __NETBUFF_H__
 
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
typedef struct netbuff{
BYTE *b;
BYTE **pb;
BYTE *free;
BYTE nbuffs;
sem_t buffersem;
} NETBUFF;
 
void netbuff_init(struct netbuff *netb, BYTE nbuffs, WORD buffdim);
void *netbuff_get(struct netbuff *netb, BYTE to);
void *netbuff_sequentialget(struct netbuff *netb, BYTE to);
void netbuff_release(struct netbuff *netb, void *m);
 
__END_DECLS
 
#endif
/shark/trunk/drivers/net/ne.c
0,0 → 1,838
/* ne.c: A general non-shared-memory NS8390 ethernet driver for linux. */
/*
Written 1992-94 by Donald Becker.
 
Copyright 1993 United States Government as represented by the
Director, National Security Agency.
 
This software may be used and distributed according to the terms
of the GNU Public License, incorporated herein by reference.
 
The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
Center of Excellence in Space Data and Information Sciences
Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
 
This driver should work with many programmed-I/O 8390-based ethernet
boards. Currently it supports the NE1000, NE2000, many clones,
and some Cabletron products.
 
Changelog:
 
Paul Gortmaker : use ENISR_RDC to monitor Tx PIO uploads, made
sanity checks and bad clone support optional.
Paul Gortmaker : new reset code, reset card after probe at boot.
Paul Gortmaker : multiple card support for module users.
Paul Gortmaker : Support for PCI ne2k clones, similar to lance.c
Paul Gortmaker : Allow users with bad cards to avoid full probe.
Paul Gortmaker : PCI probe changes, more PCI cards supported.
rjohnson@analogic.com : Changed init order so an interrupt will only
occur after memory is allocated for dev->priv. Deallocated memory
last in cleanup_modue()
 
*/
 
/* Routines for the NatSemi-based designs (NE[12]000). */
 
static const char *version =
"ne.c:v1.10 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
 
 
#include <linux/module.h>
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <asm/system.h>
#include <asm/io.h>
 
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include "8390.h"
 
/* Some defines that people can play with if so inclined. */
 
/* Do we support clones that don't adhere to 14,15 of the SAprom ? */
#define SUPPORT_NE_BAD_CLONES
 
/* Do we perform extra sanity checks on stuff ? */
/* #define NE_SANITY_CHECK */
 
/* Do we implement the read before write bugfix ? */
/* #define NE_RW_BUGFIX */
 
/* Do we have a non std. amount of memory? (in units of 256 byte pages) */
/* #define PACKETBUF_MEMSIZE 0x40 */
 
/* A zero-terminated list of I/O addresses to be probed at boot. */
#ifndef MODULE
static unsigned int netcard_portlist[] __initdata = {
0x300, 0x280, 0x320, 0x340, 0x360, 0x380, 0
};
#endif
 
#ifdef CONFIG_PCI
/* Ack! People are making PCI ne2000 clones! Oh the horror, the horror... */
static struct { unsigned short vendor, dev_id; char *name; }
pci_clone_list[] __initdata = {
{PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8029, "Realtek 8029" },
{PCI_VENDOR_ID_WINBOND2, PCI_DEVICE_ID_WINBOND2_89C940, "Winbond 89C940" },
{PCI_VENDOR_ID_COMPEX, PCI_DEVICE_ID_COMPEX_RL2000, "Compex ReadyLink 2000" },
{PCI_VENDOR_ID_KTI, PCI_DEVICE_ID_KTI_ET32P2, "KTI ET32P2" },
{PCI_VENDOR_ID_NETVIN, PCI_DEVICE_ID_NETVIN_NV5000SC, "NetVin NV5000" },
{PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C926, "VIA 82C926 Amazon" },
{PCI_VENDOR_ID_SURECOM, PCI_DEVICE_ID_SURECOM_NE34, "SureCom NE34"},
{0,}
};
 
static int probe_pci = 1;
#endif
 
#ifdef SUPPORT_NE_BAD_CLONES
/* A list of bad clones that we none-the-less recognize. */
static struct { const char *name8, *name16; unsigned char SAprefix[4];}
bad_clone_list[] __initdata = {
{"DE100", "DE200", {0x00, 0xDE, 0x01,}},
{"DE120", "DE220", {0x00, 0x80, 0xc8,}},
{"DFI1000", "DFI2000", {'D', 'F', 'I',}}, /* Original, eh? */
{"EtherNext UTP8", "EtherNext UTP16", {0x00, 0x00, 0x79}},
{"NE1000","NE2000-invalid", {0x00, 0x00, 0xd8}}, /* Ancient real NE1000. */
{"NN1000", "NN2000", {0x08, 0x03, 0x08}}, /* Outlaw no-name clone. */
{"4-DIM8","4-DIM16", {0x00,0x00,0x4d,}}, /* Outlaw 4-Dimension cards. */
{"Con-Intl_8", "Con-Intl_16", {0x00, 0x00, 0x24}}, /* Connect Int'nl */
{"ET-100","ET-200", {0x00, 0x45, 0x54}}, /* YANG and YA clone */
{"COMPEX","COMPEX16",{0x00,0x80,0x48}}, /* Broken ISA Compex cards */
{"E-LAN100", "E-LAN200", {0x00, 0x00, 0x5d}}, /* Broken ne1000 clones */
{"PCM-4823", "PCM-4823", {0x00, 0xc0, 0x6c}}, /* Broken Advantech MoBo */
{0,}
};
#endif
 
/* ---- No user-serviceable parts below ---- */
 
#define NE_BASE (dev->base_addr)
#define NE_CMD 0x00
#define NE_DATAPORT 0x10 /* NatSemi-defined port window offset. */
#define NE_RESET 0x1f /* Issue a read to reset, a write to clear. */
#define NE_IO_EXTENT 0x20
 
#define NE1SM_START_PG 0x20 /* First page of TX buffer */
#define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */
#define NESM_START_PG 0x40 /* First page of TX buffer */
#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
 
/* Non-zero only if the current card is a PCI with BIOS-set IRQ. */
static unsigned int pci_irq_line = 0;
 
int ne_probe(struct device *dev);
static int ne_probe1(struct device *dev, int ioaddr);
#ifdef CONFIG_PCI
static int ne_probe_pci(struct device *dev);
#endif
 
static int ne_open(struct device *dev);
static int ne_close(struct device *dev);
 
static void ne_reset_8390(struct device *dev);
static void ne_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr,
int ring_page);
static void ne_block_input(struct device *dev, int count,
struct sk_buff *skb, int ring_offset);
static void ne_block_output(struct device *dev, const int count,
const unsigned char *buf, const int start_page);
 
/* Probe for various non-shared-memory ethercards.
 
NEx000-clone boards have a Station Address PROM (SAPROM) in the packet
buffer memory space. NE2000 clones have 0x57,0x57 in bytes 0x0e,0x0f of
the SAPROM, while other supposed NE2000 clones must be detected by their
SA prefix.
 
Reading the SAPROM from a word-wide card with the 8390 set in byte-wide
mode results in doubled values, which can be detected and compensated for.
 
The probe is also responsible for initializing the card and filling
in the 'dev' and 'ei_status' structures.
 
We use the minimum memory size for some ethercard product lines, iff we can't
distinguish models. You can increase the packet buffer size by setting
PACKETBUF_MEMSIZE. Reported Cabletron packet buffer locations are:
E1010 starts at 0x100 and ends at 0x2000.
E1010-x starts at 0x100 and ends at 0x8000. ("-x" means "more memory")
E2010 starts at 0x100 and ends at 0x4000.
E2010-x starts at 0x100 and ends at 0xffff. */
 
#ifdef HAVE_DEVLIST
struct netdev_entry netcard_drv =
{"ne", ne_probe1, NE_IO_EXTENT, netcard_portlist};
#else
 
/*
* Note that at boot, this probe only picks up one card at a time, even for
* multiple PCI ne2k cards. Use "ether=0,0,eth1" if you have a second PCI
* ne2k card. This keeps things consistent regardless of the bus type of
* the card.
*/
 
__initfunc(int ne_probe(struct device *dev))
{
int base_addr = dev ? dev->base_addr : 0;
 
/* First check any supplied i/o locations. User knows best. <cough> */
if (base_addr > 0x1ff) /* Check a single specified location. */
return ne_probe1(dev, base_addr);
else if (base_addr != 0) /* Don't probe at all. */
return ENXIO;
 
#ifdef CONFIG_PCI
/* Then look for any installed PCI clones */
if (probe_pci && pci_present() && (ne_probe_pci(dev) == 0))
return 0;
#endif
 
#ifndef MODULE
/* Last resort. The semi-risky ISA auto-probe. */
for (base_addr = 0; netcard_portlist[base_addr] != 0; base_addr++) {
int ioaddr = netcard_portlist[base_addr];
if (check_region(ioaddr, NE_IO_EXTENT))
continue;
if (ne_probe1(dev, ioaddr) == 0)
return 0;
}
#endif
 
return ENODEV;
}
#endif
 
#ifdef CONFIG_PCI
__initfunc(static int ne_probe_pci(struct device *dev))
{
int i;
 
for (i = 0; pci_clone_list[i].vendor != 0; i++) {
struct pci_dev *pdev = NULL;
unsigned int pci_ioaddr;
 
while ((pdev = pci_find_device(pci_clone_list[i].vendor, pci_clone_list[i].dev_id, pdev))) {
pci_ioaddr = pdev->base_address[0] & PCI_BASE_ADDRESS_IO_MASK;
/* Avoid already found cards from previous calls */
if (check_region(pci_ioaddr, NE_IO_EXTENT))
continue;
pci_irq_line = pdev->irq;
if (pci_irq_line) break; /* Found it */
}
if (!pdev)
continue;
printk(KERN_INFO "ne.c: PCI BIOS reports %s at i/o %x, irq %d.\n",
pci_clone_list[i].name,
pci_ioaddr, pci_irq_line);
printk("*\n* Use of the PCI-NE2000 driver with this card is recommended!\n*\n");
if (ne_probe1(dev, pci_ioaddr) != 0) { /* Shouldn't happen. */
printk(KERN_ERR "ne.c: Probe of PCI card at %x failed.\n", pci_ioaddr);
pci_irq_line = 0;
return -ENXIO;
}
pci_irq_line = 0;
return 0;
}
return -ENODEV;
}
#endif /* CONFIG_PCI */
 
__initfunc(static int ne_probe1(struct device *dev, int ioaddr))
{
int i;
unsigned char SA_prom[32];
int wordlength = 2;
const char *name = NULL;
int start_page, stop_page;
int neX000, ctron, copam, bad_card;
int reg0 = inb_p(ioaddr);
static unsigned version_printed = 0;
 
if (reg0 == 0xFF)
return ENODEV;
 
/* Do a preliminary verification that we have a 8390. */
{
int regd;
outb_p(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD);
regd = inb_p(ioaddr + 0x0d);
outb_p(0xff, ioaddr + 0x0d);
outb_p(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD);
inb_p(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */
if (inb_p(ioaddr + EN0_COUNTER0) != 0) {
outb_p(reg0, ioaddr);
outb_p(regd, ioaddr + 0x0d); /* Restore the old values. */
return ENODEV;
}
}
 
if (load_8390_module("ne.c"))
return -ENOSYS;
 
/* We should have a "dev" from Space.c or the static module table. */
if (dev == NULL)
{
printk(KERN_ERR "ne.c: Passed a NULL device.\n");
dev = init_etherdev(0, 0);
}
 
if (ei_debug && version_printed++ == 0)
printk(version);
 
printk(KERN_INFO "NE*000 ethercard probe at %3x:", ioaddr);
 
/* A user with a poor card that fails to ack the reset, or that
does not have a valid 0x57,0x57 signature can still use this
without having to recompile. Specifying an i/o address along
with an otherwise unused dev->mem_end value of "0xBAD" will
cause the driver to skip these parts of the probe. */
 
bad_card = ((dev->base_addr != 0) && (dev->mem_end == 0xbad));
 
/* Reset card. Who knows what dain-bramaged state it was left in. */
 
{
unsigned long reset_start_time = jiffies;
 
/* DON'T change these to inb_p/outb_p or reset will fail on clones. */
outb(inb(ioaddr + NE_RESET), ioaddr + NE_RESET);
 
while ((inb_p(ioaddr + EN0_ISR) & ENISR_RESET) == 0)
if (jiffies - reset_start_time > 2*HZ/100) {
if (bad_card) {
printk(" (warning: no reset ack)");
break;
} else {
printk(" not found (no reset ack).\n");
return ENODEV;
}
}
outb_p(0xff, ioaddr + EN0_ISR); /* Ack all intr. */
}
 
/* Read the 16 bytes of station address PROM.
We must first initialize registers, similar to NS8390_init(eifdev, 0).
We can't reliably read the SAPROM address without this.
(I learned the hard way!). */
{
struct {unsigned char value, offset; } program_seq[] =
{
{E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/
{0x48, EN0_DCFG}, /* Set byte-wide (0x48) access. */
{0x00, EN0_RCNTLO}, /* Clear the count regs. */
{0x00, EN0_RCNTHI},
{0x00, EN0_IMR}, /* Mask completion irq. */
{0xFF, EN0_ISR},
{E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */
{E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */
{32, EN0_RCNTLO},
{0x00, EN0_RCNTHI},
{0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */
{0x00, EN0_RSARHI},
{E8390_RREAD+E8390_START, E8390_CMD},
};
 
for (i = 0; i < sizeof(program_seq)/sizeof(program_seq[0]); i++)
outb_p(program_seq[i].value, ioaddr + program_seq[i].offset);
 
}
for(i = 0; i < 32 /*sizeof(SA_prom)*/; i+=2) {
SA_prom[i] = inb(ioaddr + NE_DATAPORT);
SA_prom[i+1] = inb(ioaddr + NE_DATAPORT);
if (SA_prom[i] != SA_prom[i+1])
wordlength = 1;
}
 
/* At this point, wordlength *only* tells us if the SA_prom is doubled
up or not because some broken PCI cards don't respect the byte-wide
request in program_seq above, and hence don't have doubled up values.
These broken cards would otherwise be detected as an ne1000. */
 
if (wordlength == 2)
for (i = 0; i < 16; i++)
SA_prom[i] = SA_prom[i+i];
 
if (pci_irq_line || ioaddr >= 0x400)
wordlength = 2; /* Catch broken PCI cards mentioned above. */
 
if (wordlength == 2)
{
/* We must set the 8390 for word mode. */
outb_p(0x49, ioaddr + EN0_DCFG);
start_page = NESM_START_PG;
stop_page = NESM_STOP_PG;
} else {
start_page = NE1SM_START_PG;
stop_page = NE1SM_STOP_PG;
}
 
neX000 = (SA_prom[14] == 0x57 && SA_prom[15] == 0x57);
ctron = (SA_prom[0] == 0x00 && SA_prom[1] == 0x00 && SA_prom[2] == 0x1d);
copam = (SA_prom[14] == 0x49 && SA_prom[15] == 0x00);
 
/* Set up the rest of the parameters. */
if (neX000 || bad_card || copam) {
name = (wordlength == 2) ? "NE2000" : "NE1000";
}
else if (ctron)
{
name = (wordlength == 2) ? "Ctron-8" : "Ctron-16";
start_page = 0x01;
stop_page = (wordlength == 2) ? 0x40 : 0x20;
}
else
{
#ifdef SUPPORT_NE_BAD_CLONES
/* Ack! Well, there might be a *bad* NE*000 clone there.
Check for total bogus addresses. */
for (i = 0; bad_clone_list[i].name8; i++)
{
if (SA_prom[0] == bad_clone_list[i].SAprefix[0] &&
SA_prom[1] == bad_clone_list[i].SAprefix[1] &&
SA_prom[2] == bad_clone_list[i].SAprefix[2])
{
if (wordlength == 2)
{
name = bad_clone_list[i].name16;
} else {
name = bad_clone_list[i].name8;
}
break;
}
}
if (bad_clone_list[i].name8 == NULL)
{
printk(" not found (invalid signature %2.2x %2.2x).\n",
SA_prom[14], SA_prom[15]);
return ENXIO;
}
#else
printk(" not found.\n");
return ENXIO;
#endif
}
 
if (pci_irq_line)
dev->irq = pci_irq_line;
 
if (dev->irq < 2)
{
//!!!autoirq_setup(0);
outb_p(0x50, ioaddr + EN0_IMR); /* Enable one interrupt. */
outb_p(0x00, ioaddr + EN0_RCNTLO);
outb_p(0x00, ioaddr + EN0_RCNTHI);
outb_p(E8390_RREAD+E8390_START, ioaddr); /* Trigger it... */
mdelay(10); /* wait 10ms for interrupt to propagate */
outb_p(0x00, ioaddr + EN0_IMR); /* Mask it again. */
dev->irq = 9; //!!!autoirq_report(0);
if (ei_debug > 2)
printk(" autoirq is %d\n", dev->irq);
} else if (dev->irq == 2)
/* Fixup for users that don't know that IRQ 2 is really IRQ 9,
or don't know which one to set. */
dev->irq = 9;
 
if (! dev->irq) {
printk(" failed to detect IRQ line.\n");
return EAGAIN;
}
 
/* Allocate dev->priv and fill in 8390 specific dev fields. */
if (ethdev_init(dev))
{
printk (" unable to get memory for dev->priv.\n");
return -ENOMEM;
}
/* Snarf the interrupt now. There's no point in waiting since we cannot
share and the board will usually be enabled. */
 
{
int irqval = request_irq(dev->irq, ei_interrupt,
pci_irq_line ? SA_SHIRQ : 0, name, dev);
if (irqval) {
printk (" unable to get IRQ %d (irqval=%d).\n", dev->irq, irqval);
kfree(dev->priv);
dev->priv = NULL;
return EAGAIN;
}
}
dev->base_addr = ioaddr;
request_region(ioaddr, NE_IO_EXTENT, name);
 
for(i = 0; i < ETHER_ADDR_LEN; i++) {
printk(" %2.2x", SA_prom[i]);
dev->dev_addr[i] = SA_prom[i];
}
 
printk("\n%s: %s found at %x, using IRQ %d.\n",
dev->name, name, ioaddr, dev->irq);
 
ei_status.name = name;
ei_status.tx_start_page = start_page;
ei_status.stop_page = stop_page;
ei_status.word16 = (wordlength == 2);
 
ei_status.rx_start_page = start_page + TX_PAGES;
#ifdef PACKETBUF_MEMSIZE
/* Allow the packet buffer size to be overridden by know-it-alls. */
ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE;
#endif
 
ei_status.reset_8390 = &ne_reset_8390;
ei_status.block_input = &ne_block_input;
ei_status.block_output = &ne_block_output;
ei_status.get_8390_hdr = &ne_get_8390_hdr;
dev->open = &ne_open;
dev->stop = &ne_close;
NS8390_init(dev, 0);
return 0;
}
 
static int ne_open(struct device *dev)
{
ei_open(dev);
MOD_INC_USE_COUNT;
return 0;
}
 
static int ne_close(struct device *dev)
{
if (ei_debug > 1)
printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name);
ei_close(dev);
MOD_DEC_USE_COUNT;
return 0;
}
 
/* Hard reset the card. This used to pause for the same period that a
8390 reset command required, but that shouldn't be necessary. */
 
static void ne_reset_8390(struct device *dev)
{
unsigned long reset_start_time = jiffies;
 
if (ei_debug > 1)
printk(KERN_DEBUG "resetting the 8390 t=%ld...", jiffies);
 
/* DON'T change these to inb_p/outb_p or reset will fail on clones. */
outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
 
ei_status.txing = 0;
ei_status.dmaing = 0;
 
/* This check _should_not_ be necessary, omit eventually. */
while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0)
if (jiffies - reset_start_time > 2*HZ/100) {
printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n", dev->name);
break;
}
outb_p(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */
}
 
/* Grab the 8390 specific header. Similar to the block_input routine, but
we don't need to be concerned with ring wrap as the header will be at
the start of a page, so we optimize accordingly. */
 
static void ne_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
{
int nic_base = dev->base_addr;
 
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
 
if (ei_status.dmaing)
{
printk(KERN_EMERG "%s: DMAing conflict in ne_get_8390_hdr "
"[DMAstat:%d][irqlock:%d][intr:%ld].\n",
dev->name, ei_status.dmaing, ei_status.irqlock,
dev->interrupt);
return;
}
 
ei_status.dmaing |= 0x01;
outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
outb_p(0, nic_base + EN0_RCNTHI);
outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */
outb_p(ring_page, nic_base + EN0_RSARHI);
outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD);
 
if (ei_status.word16)
insw(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1);
else
insb(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr));
 
outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
ei_status.dmaing &= ~0x01;
}
 
/* Block input and output, similar to the Crynwr packet driver. If you
are porting to a new ethercard, look at the packet driver source for hints.
The NEx000 doesn't share the on-board packet memory -- you have to put
the packet out through the "remote DMA" dataport using outb. */
 
static void ne_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset)
{
#ifdef NE_SANITY_CHECK
int xfer_count = count;
#endif
int nic_base = dev->base_addr;
char *buf = skb->data;
 
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing)
{
printk(KERN_EMERG "%s: DMAing conflict in ne_block_input "
"[DMAstat:%d][irqlock:%d][intr:%ld].\n",
dev->name, ei_status.dmaing, ei_status.irqlock,
dev->interrupt);
return;
}
ei_status.dmaing |= 0x01;
outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
outb_p(count & 0xff, nic_base + EN0_RCNTLO);
outb_p(count >> 8, nic_base + EN0_RCNTHI);
outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO);
outb_p(ring_offset >> 8, nic_base + EN0_RSARHI);
outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD);
if (ei_status.word16)
{
insw(NE_BASE + NE_DATAPORT,buf,count>>1);
if (count & 0x01)
{
buf[count-1] = inb(NE_BASE + NE_DATAPORT);
#ifdef NE_SANITY_CHECK
xfer_count++;
#endif
}
} else {
insb(NE_BASE + NE_DATAPORT, buf, count);
}
 
#ifdef NE_SANITY_CHECK
/* This was for the ALPHA version only, but enough people have
been encountering problems so it is still here. If you see
this message you either 1) have a slightly incompatible clone
or 2) have noise/speed problems with your bus. */
 
if (ei_debug > 1)
{
/* DMA termination address check... */
int addr, tries = 20;
do {
/* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here
-- it's broken for Rx on some cards! */
int high = inb_p(nic_base + EN0_RSARHI);
int low = inb_p(nic_base + EN0_RSARLO);
addr = (high << 8) + low;
if (((ring_offset + xfer_count) & 0xff) == low)
break;
} while (--tries > 0);
if (tries <= 0)
printk(KERN_WARNING "%s: RX transfer address mismatch,"
"%4.4x (expected) vs. %4.4x (actual).\n",
dev->name, ring_offset + xfer_count, addr);
}
#endif
outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
ei_status.dmaing &= ~0x01;
}
 
static void ne_block_output(struct device *dev, int count,
const unsigned char *buf, const int start_page)
{
int nic_base = NE_BASE;
unsigned long dma_start;
#ifdef NE_SANITY_CHECK
int retries = 0;
#endif
 
/* Round the count up for word writes. Do we need to do this?
What effect will an odd byte count have on the 8390?
I should check someday. */
if (ei_status.word16 && (count & 0x01))
count++;
 
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing)
{
printk(KERN_EMERG "%s: DMAing conflict in ne_block_output."
"[DMAstat:%d][irqlock:%d][intr:%ld]\n",
dev->name, ei_status.dmaing, ei_status.irqlock,
dev->interrupt);
return;
}
ei_status.dmaing |= 0x01;
/* We should already be in page 0, but to be safe... */
outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD);
 
#ifdef NE_SANITY_CHECK
retry:
#endif
 
#ifdef NE8390_RW_BUGFIX
/* Handle the read-before-write bug the same way as the
Crynwr packet driver -- the NatSemi method doesn't work.
Actually this doesn't always work either, but if you have
problems with your NEx000 this is better than nothing! */
outb_p(0x42, nic_base + EN0_RCNTLO);
outb_p(0x00, nic_base + EN0_RCNTHI);
outb_p(0x42, nic_base + EN0_RSARLO);
outb_p(0x00, nic_base + EN0_RSARHI);
outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD);
/* Make certain that the dummy read has occurred. */
udelay(6);
#endif
 
outb_p(ENISR_RDC, nic_base + EN0_ISR);
 
/* Now the normal output. */
outb_p(count & 0xff, nic_base + EN0_RCNTLO);
outb_p(count >> 8, nic_base + EN0_RCNTHI);
outb_p(0x00, nic_base + EN0_RSARLO);
outb_p(start_page, nic_base + EN0_RSARHI);
 
outb_p(E8390_RWRITE+E8390_START, nic_base + NE_CMD);
if (ei_status.word16) {
outsw(NE_BASE + NE_DATAPORT, buf, count>>1);
} else {
outsb(NE_BASE + NE_DATAPORT, buf, count);
}
 
dma_start = jiffies;
 
#ifdef NE_SANITY_CHECK
/* This was for the ALPHA version only, but enough people have
been encountering problems so it is still here. */
if (ei_debug > 1)
{
/* DMA termination address check... */
int addr, tries = 20;
do {
int high = inb_p(nic_base + EN0_RSARHI);
int low = inb_p(nic_base + EN0_RSARLO);
addr = (high << 8) + low;
if ((start_page << 8) + count == addr)
break;
} while (--tries > 0);
 
if (tries <= 0)
{
printk(KERN_WARNING "%s: Tx packet transfer address mismatch,"
"%#4.4x (expected) vs. %#4.4x (actual).\n",
dev->name, (start_page << 8) + count, addr);
if (retries++ == 0)
goto retry;
}
}
#endif
 
while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0)
if (jiffies - dma_start > 2*HZ/100) { /* 20ms */
printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name);
ne_reset_8390(dev);
NS8390_init(dev,1);
break;
}
 
outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
ei_status.dmaing &= ~0x01;
return;
}
 
#ifdef MODULE
#define MAX_NE_CARDS 4 /* Max number of NE cards per module */
#define NAMELEN 8 /* # of chars for storing dev->name */
static char namelist[NAMELEN * MAX_NE_CARDS] = { 0, };
static struct device dev_ne[MAX_NE_CARDS] = {
{
NULL, /* assign a chunk of namelist[] below */
0, 0, 0, 0,
0, 0,
0, 0, 0, NULL, NULL
},
};
 
static int io[MAX_NE_CARDS] = { 0, };
static int irq[MAX_NE_CARDS] = { 0, };
static int bad[MAX_NE_CARDS] = { 0, }; /* 0xbad = bad sig or no reset ack */
 
MODULE_PARM(io, "1-" __MODULE_STRING(MAX_NE_CARDS) "i");
MODULE_PARM(irq, "1-" __MODULE_STRING(MAX_NE_CARDS) "i");
MODULE_PARM(bad, "1-" __MODULE_STRING(MAX_NE_CARDS) "i");
 
#ifdef CONFIG_PCI
MODULE_PARM(probe_pci, "i");
#endif
 
/* This is set up so that no ISA autoprobe takes place. We can't guarantee
that the ne2k probe is the last 8390 based probe to take place (as it
is at boot) and so the probe will get confused by any other 8390 cards.
ISA device autoprobes on a running machine are not recommended anyway. */
 
int init_module(void)
{
int this_dev, found = 0;
 
for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
struct device *dev = &dev_ne[this_dev];
dev->name = namelist+(NAMELEN*this_dev);
dev->irq = irq[this_dev];
dev->mem_end = bad[this_dev];
dev->base_addr = io[this_dev];
dev->init = ne_probe;
if (register_netdev(dev) == 0) {
found++;
continue;
}
if (found != 0) { /* Got at least one. */
lock_8390_module();
return 0;
}
if (io[this_dev] != 0)
printk(KERN_WARNING "ne.c: No NE*000 card found at i/o = %#x\n", io[this_dev]);
else
printk(KERN_NOTICE "ne.c: No PCI cards found. Use \"io=0xNNN\" value(s) for ISA cards.\n");
return -ENXIO;
}
lock_8390_module();
return 0;
}
 
void cleanup_module(void)
{
int this_dev;
 
for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
struct device *dev = &dev_ne[this_dev];
if (dev->priv != NULL) {
void *priv = dev->priv;
free_irq(dev->irq, dev);
release_region(dev->base_addr, NE_IO_EXTENT);
unregister_netdev(dev);
kfree(priv);
}
}
unlock_8390_module();
}
#endif /* MODULE */
/*
* Local variables:
* compile-command: "gcc -DKERNEL -Wall -O6 -fomit-frame-pointer -I/usr/src/linux/net/tcp -c ne.c"
* version-control: t
* kept-new-versions: 5
* End:
*/
/shark/trunk/drivers/net/include/linux/if.h
0,0 → 1,41
#ifndef __IF__
#define __IF__
 
#include <linux/compatib.h>
 
/* Standard interface flags. */
#define IFF_UP 0x1 /* interface is up */
#define IFF_BROADCAST 0x2 /* broadcast address valid */
#define IFF_DEBUG 0x4 /* turn on debugging */
#define IFF_LOOPBACK 0x8 /* is a loopback net */
#define IFF_POINTOPOINT 0x10 /* interface is has p-p link */
#define IFF_NOTRAILERS 0x20 /* avoid use of trailers */
#define IFF_RUNNING 0x40 /* resources allocated */
#define IFF_NOARP 0x80 /* no ARP protocol */
#define IFF_PROMISC 0x100 /* receive all packets */
/* Not supported */
#define IFF_ALLMULTI 0x200 /* receive all multicast packets*/
 
#define IFF_MASTER 0x400 /* master of a load balancer */
#define IFF_SLAVE 0x800 /* slave of a load balancer */
 
#define IFF_MULTICAST 0x1000 /* Supports multicast */
 
 
struct ifmap
{
unsigned long mem_start;
unsigned long mem_end;
unsigned short base_addr;
unsigned char irq;
unsigned char dma;
unsigned char port;
/* 3 bytes spare */
};
 
struct ifreq
{
char *ifr_data;
};
 
#endif
/shark/trunk/drivers/net/include/linux/inet.h
0,0 → 1,5
#ifndef __INET__
#define __INET__
 
#include <linux/compatib.h>
#endif
/shark/trunk/drivers/net/include/linux/if_arp.h
0,0 → 1,20
#ifndef __IF_ARP__
#define __IF_ARP__
 
#include <linux/compatib.h>
 
/* ARP protocol HARDWARE identifiers. */
#define ARPHRD_NETROM 0 /* from KA9Q: NET/ROM pseudo */
#define ARPHRD_ETHER 1 /* Ethernet 10Mbps */
#define ARPHRD_EETHER 2 /* Experimental Ethernet */
#define ARPHRD_AX25 3 /* AX.25 Level 2 */
#define ARPHRD_PRONET 4 /* PROnet token ring */
#define ARPHRD_CHAOS 5 /* Chaosnet */
#define ARPHRD_IEEE802 6 /* IEEE 802.2 Ethernet/TR/TB */
#define ARPHRD_ARCNET 7 /* ARCnet */
#define ARPHRD_APPLETLK 8 /* APPLEtalk */
#define ARPHRD_DLCI 15 /* Frame Relay DLCI */
#define ARPHRD_METRICOM 23 /* Metricom STRIP (new IANA id) */
#define ARPHRD_LOOPBACK 772 /* Loopback device */
 
#endif
/shark/trunk/drivers/net/include/linux/init.h
0,0 → 1,62
#ifndef _LINUX_INIT_H
#define _LINUX_INIT_H
 
/* MODIFIED!!! */
 
/* These macros are used to mark some functions or
* initialized data (doesn't apply to uninitialized data)
* as `initialization' functions. The kernel can take this
* as hint that the function is used only during the initialization
* phase and free up used memory resources after
*
* Usage:
* For functions:
*
* You should add __init immediately before the function name, like:
*
* static void __init initme(int x, int y)
* {
* extern int z; z = x * y;
* }
*
* Depricated: you can surround the whole function declaration
* just before function body into __initfunc() macro, like:
*
* __initfunc (static void initme(int x, int y))
* {
* extern int z; z = x * y;
* }
*
* If the function has a prototype somewhere, you can also add
* __init between closing brace of the prototype and semicolon:
*
* extern int initialize_foobar_device(int, int, int) __init;
*
* For initialized data:
* You should insert __initdata between the variable name and equal
* sign followed by value, e.g.:
*
* static int init_variable __initdata = 0;
* static char linux_logo[] __initdata = { 0x32, 0x36, ... };
*
* For initialized data not at file scope, i.e. within a function,
* you should use __initlocaldata instead, due to a bug in GCC 2.7.
*/
 
/*
* Disable the __initfunc macros if a file that is a part of a
* module attempts to use them. We do not want to interfere
* with module linking.
*/
 
#define __init
#define __initdata
#define __initfunc(__arginit) __arginit
/* For assembly routines */
#define __INIT
#define __FINIT
#define __INITDATA
 
#define __initlocaldata
 
#endif
/shark/trunk/drivers/net/include/linux/types.h
0,0 → 1,6
#ifndef __TYPES__
#define __TYPES__
 
#include <linux/compatib.h>
 
#endif
/shark/trunk/drivers/net/include/linux/config.h
0,0 → 1,9
#ifndef __CONFIG__
#define __CONFIG__
 
#include <linux/compatib.h>
#define CONFIG_PCI
/*#define HAVE_PRIVATE_IOCTL
#define VORTEX_DEBUG 10 */
 
#endif
/shark/trunk/drivers/net/include/linux/fcntl.h
0,0 → 1,6
#ifndef __FCNTL__
#define __FCNTL__
 
#include <linux/compatib.h>
 
#endif
/shark/trunk/drivers/net/include/linux/in.h
0,0 → 1,6
#ifndef __IN__
#define __IN__
 
#include <linux/compatib.h>
 
#endif
/shark/trunk/drivers/net/include/linux/string.h
0,0 → 1,6
#ifndef __STRING__
#define __STRING__
 
#include <linux/compatib.h>
 
#endif
/shark/trunk/drivers/net/include/linux/netdevice.h
0,0 → 1,270
#ifndef __NETDEVICE__
#define __NETDEVICE__
 
#include <linux/config.h>
#include <linux/compatib.h>
 
#include <linux/if.h>
 
#include <linux/skbuff.h>
#include <linux/notifier.h>
#include <time.h>
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
// for 3c59x.c (!!!)
#define le32_to_cpu(val) (val)
#define cpu_to_le32(val) (val)
#define cpu_to_le16(val) (val)
#define test_and_set_bit(val, addr) set_bit(val, addr)
 
static __inline__ void mdelay(int x)
{
struct timespec delay;
delay.tv_sec=x/1000;
delay.tv_nsec=(x%1000)*1000000;
nanosleep(&delay, NULL);
}
 
#define kfree(x) free(x)
#define ioremap(a,b) \
(((a)<0x100000) ? (void *)((u_long)(a)) : 0)
// was vremap(a,b)) instead of 0)) PJ
 
#define iounmap(v)
//PJ was #define iounmap do { if ((u_long)(v) > 0x100000) vfree(v); } while (0)
 
/* for future expansion when we will have different priorities. */
#define DEV_NUMBUFFS 3
#define MAX_ADDR_LEN 7
#ifndef CONFIG_AX25
#ifndef CONFIG_TR
#ifndef CONFIG_NET_IPIP
#define MAX_HEADER 32 /* We really need about 18 worst case .. so 32 is aligned */
#else
#define MAX_HEADER 80 /* We need to allow for having tunnel headers */
#endif /* IPIP */
#else
#define MAX_HEADER 48 /* Token Ring header needs 40 bytes ... 48 is aligned */
#endif /* TR */
#else
#define MAX_HEADER 96 /* AX.25 + NetROM */
#endif /* AX25 */
 
#define IS_MYADDR 1 /* address is (one of) our own */
#define IS_LOOPBACK 2 /* address is for LOOPBACK */
#define IS_BROADCAST 3 /* address is a valid broadcast */
#define IS_INVBCAST 4 /* Wrong netmask bcast not for us (unused)*/
#define IS_MULTICAST 5 /* Multicast IP address */
 
 
 
struct dev_mc_list
{
struct dev_mc_list *next;
__u8 dmi_addr[MAX_ADDR_LEN];
unsigned char dmi_addrlen;
int dmi_users;
int dmi_gusers;
};
 
 
 
/*
* Network device statistics. Akin to the 2.0 ether stats but
* with byte counters.
*/
struct net_device_stats
{
unsigned long rx_packets; /* total packets received */
unsigned long tx_packets; /* total packets transmitted */
unsigned long rx_bytes; /* total bytes received */
unsigned long tx_bytes; /* total bytes transmitted */
unsigned long rx_errors; /* bad packets received */
unsigned long tx_errors; /* packet transmit problems */
unsigned long rx_dropped; /* no space in linux buffers */
unsigned long tx_dropped; /* no space available in linux */
unsigned long multicast; /* multicast packets received */
unsigned long collisions;
 
/* detailed rx_errors: */
unsigned long rx_length_errors;
unsigned long rx_over_errors; /* receiver ring buff overflow */
unsigned long rx_crc_errors; /* recved pkt with crc error */
unsigned long rx_frame_errors; /* recv'd frame alignment error */
unsigned long rx_fifo_errors; /* recv'r fifo overrun */
unsigned long rx_missed_errors; /* receiver missed packet */
 
/* detailed tx_errors */
unsigned long tx_aborted_errors;
unsigned long tx_carrier_errors;
unsigned long tx_fifo_errors;
unsigned long tx_heartbeat_errors;
unsigned long tx_window_errors;
/* for cslip etc */
unsigned long rx_compressed;
unsigned long tx_compressed;
};
 
 
struct hh_cache
{
struct hh_cache *hh_next;
void *hh_arp; /* Opaque pointer, used by
* any address resolution module,
* not only ARP.
*/
int hh_refcnt; /* number of users */
unsigned short hh_type; /* protocol identifier, f.e ETH_P_IP */
char hh_uptodate; /* hh_data is valid */
char hh_data[16]; /* cached hardware header */
};
 
/*
* The DEVICE structure.
* Actually, this whole structure is a big mistake. It mixes I/O
* data with strictly "high-level" data, and it has to know about
* almost every data structure used in the INET module.
*/
struct device
{
 
/*
* This is the first field of the "visible" part of this structure
* (i.e. as seen by users in the "Space.c" file). It is the name
* the interface.
*/
char *name;
 
/* I/O specific fields - FIXME: Merge these and struct ifmap into one */
unsigned long rmem_end; /* shmem "recv" end */
unsigned long rmem_start; /* shmem "recv" start */
unsigned long mem_end; /* shared mem end */
unsigned long mem_start; /* shared mem start */
unsigned long base_addr; /* device I/O address */
unsigned char irq; /* device IRQ number */
 
/* Low-level status flags. */
volatile unsigned char start, /* start an operation */
interrupt; /* interrupt arrived */
unsigned long tbusy; /* transmitter busy must be long for bitops */
 
struct device *next;
 
/* The device initialization function. Called only once. */
int (*init)(struct device *dev);
 
/* Some hardware also needs these fields, but they are not part of the
usual set specified in Space.c. */
unsigned char if_port; /* Selectable AUI, TP,..*/
unsigned char dma; /* DMA channel */
 
struct enet_statistics* (*get_stats)(struct device *dev);
 
/*
* This marks the end of the "visible" part of the structure. All
* fields hereafter are internal to the system, and may change at
* will (read: may be cleaned up at will).
*/
 
/* These may be needed for future network-power-down code. */
unsigned long trans_start; /* Time (in jiffies) of last Tx */
unsigned long last_rx; /* Time of last Rx */
 
unsigned short flags; /* interface flags (a la BSD) */
unsigned short family; /* address family ID (AF_INET) */
unsigned short metric; /* routing metric (not used) */
unsigned short mtu; /* interface MTU value */
unsigned short type; /* interface hardware type */
unsigned short hard_header_len; /* hardware hdr length */
void *priv; /* pointer to private data */
 
/* Interface address info. */
unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
unsigned char pad; /* make dev_addr aligned to 8 bytes */
unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address */
unsigned char addr_len; /* hardware address length */
unsigned long pa_addr; /* protocol address */
unsigned long pa_brdaddr; /* protocol broadcast addr */
unsigned long pa_dstaddr; /* protocol P-P other side addr */
unsigned long pa_mask; /* protocol netmask */
unsigned short pa_alen; /* protocol address length */
 
struct dev_mc_list *mc_list; /* Multicast mac addresses */
int mc_count; /* Number of installed mcasts */
struct ip_mc_list *ip_mc_list; /* IP multicast filter chain */
__u32 tx_queue_len; /* Max frames per queue allowed */
/* For load balancing driver pair support */
unsigned long pkt_queue; /* Packets queued */
struct device *slave; /* Slave device */
struct net_alias_info *alias_info; /* main dev alias info */
struct net_alias *my_alias; /* alias devs */
/* Pointer to the interface buffers. */
struct sk_buff_head buffs[DEV_NUMBUFFS];
 
/* Pointers to interface service routines. */
int (*open)(struct device *dev);
int (*stop)(struct device *dev);
int (*hard_start_xmit) (struct sk_buff *skb,
struct device *dev);
int (*hard_header) (struct sk_buff *skb,
struct device *dev,
unsigned short type,
void *daddr,
void *saddr,
unsigned len);
int (*rebuild_header)(void *eth, struct device *dev,
unsigned long raddr, struct sk_buff *skb);
 
 
 
#define HAVE_PRIVATE_IOCTL
int (*do_ioctl)(struct device *dev, struct ifreq *ifr, int cmd);
#define HAVE_SET_CONFIG
int (*set_config)(struct device *dev, struct ifmap *map);
#define HAVE_CHANGE_MTU
int (*change_mtu)(struct device *dev, int new_mtu);
#define HAVE_SET_MAC_ADDR
int (*set_mac_address)(struct device *dev, void *addr);
 
#define HAVE_HEADER_CACHE
void (*header_cache_bind)(struct hh_cache **hhp, struct device *dev, unsigned short htype, __u32 daddr);
void (*header_cache_update)(struct hh_cache *hh, struct device *dev, unsigned char * haddr);
 
#define HAVE_MULTICAST
void (*set_multicast_list)(struct device *dev);
};
 
extern struct device loopback_dev;
extern struct device *dev_base;
extern struct packet_type *ptype_base[16];
 
 
extern void ether_setup(struct device *dev);
extern void tr_setup(struct device *dev);
extern void fddi_setup(struct device *dev);
extern int ether_config(struct device *dev, struct ifmap *map);
/* Support for loadable net-drivers */
extern int register_netdev(struct device *dev);
extern void unregister_netdev(struct device *dev);
extern int register_netdevice_notifier(struct notifier_block *nb);
extern int unregister_netdevice_notifier(struct notifier_block *nb);
 
/*
unsigned short htons(unsigned short host);
unsigned short ntohs(unsigned short net);
*/
 
void netif_rx(struct sk_buff *skb);
 
__END_DECLS
#endif
 
 
/shark/trunk/drivers/net/include/linux/malloc.h
0,0 → 1,6
#ifndef __MALLOC__
#define __MALLOC__
 
#include <linux/compatib.h>
 
#endif
/shark/trunk/drivers/net/include/linux/mm.h
0,0 → 1,6
#ifndef __MM__
#define __MM__
 
#include <linux/compatib.h>
 
#endif
/shark/trunk/drivers/net/include/linux/stddef.h
0,0 → 1,21
#ifndef _LINUX_STDDEF_H
#define _LINUX_STDDEF_H
 
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
#ifndef _SIZE_T
#define _SIZE_T
typedef unsigned int size_t;
#endif
 
#undef NULL
#define NULL ((void *)0)
 
#undef offsetof
#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
 
__END_DECLS
 
#endif
/shark/trunk/drivers/net/include/linux/ioport.h
0,0 → 1,6
#ifndef __IOPORT__
#define __IOPORT__
 
#include <linux/compatib.h>
 
#endif
/shark/trunk/drivers/net/include/linux/notifier.h
0,0 → 1,18
#ifndef __NOTIFIER__
#define __NOTIFIER__
 
#include <linux/compatib.h>
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
struct notifier_block
{
int (*notifier_call)(struct notifier_block *this, unsigned long, void *);
struct notifier_block *next;
int priority;
};
 
__END_DECLS
 
#endif
/shark/trunk/drivers/net/include/linux/kernel.h
0,0 → 1,22
#ifndef __KERNEL__
#define __KERNEL__
 
#include <linux/compatib.h>
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
/* Optimization barrier */
#define barrier() __asm__("": : :"memory")
 
#define KERN_EMERG "<0>" /* system is unusable */
#define KERN_ALERT "<1>" /* action must be taken immediately */
#define KERN_CRIT "<2>" /* critical conditions */
#define KERN_ERR "<3>" /* error conditions */
#define KERN_WARNING "<4>" /* warning conditions */
#define KERN_NOTICE "<5>" /* normal but significant condition */
#define KERN_INFO "<6>" /* informational */
#define KERN_DEBUG "<7>" /* debug-level messages */
 
__END_DECLS
#endif
/shark/trunk/drivers/net/include/linux/sched.h
0,0 → 1,6
#ifndef __SCHED__
#define __SCHED__
 
#include <linux/compatib.h>
 
#endif
/shark/trunk/drivers/net/include/linux/etherdevice.h
0,0 → 1,34
#ifndef __ETHERDEVICE__
#define __ETHERDEVICE__
 
#include <linux/compatib.h>
 
#include <linux/if_ether.h>
#include <linux/socket.h>
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
#define eth_header panic_stub
#if 0
extern int eth_header(struct sk_buff *skb, struct device *dev,
unsigned short type, void *daddr,
void *saddr, unsigned len);
extern int eth_rebuild_header(void *buff, struct device *dev,
unsigned long dst, struct sk_buff *skb);
#endif
 
/* extern unsigned short eth_type_trans(struct sk_buff *skb, struct device *dev); */
#define eth_type_trans(a,b) 0
 
#if 0
extern void eth_header_cache_bind(struct hh_cache ** hhp, struct device *dev,
unsigned short htype, __u32 daddr);
extern void eth_header_cache_update(struct hh_cache *hh, struct device *dev, unsigned char * haddr);
#endif
extern void eth_copy_and_sum(struct sk_buff *dest,
unsigned char *src, int length, int base);
extern struct device * init_etherdev(struct device *, int);
 
__END_DECLS
#endif
/shark/trunk/drivers/net/include/linux/interrupt.h
0,0 → 1,20
#ifndef __INTERRUPT__
#define __INTERRUPT__
 
#include <linux/compatib.h>
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
#define NR_IRQS 128
 
int request_irq(unsigned int irq, void (*handler)(int, void *dev_id, struct pt_regs *), unsigned long flags, const char *device, void *dev_id);
 
#define disable_irq_nosync(i) irq_mask(i)
#define disable_irq(i) irq_mask(i)
#define enable_irq(i) irq_unmask(i)
 
void free_irq(unsigned int irq, void *d);
 
__END_DECLS
#endif
/shark/trunk/drivers/net/include/linux/compatib.h
0,0 → 1,178
#include <kernel/kern.h>
#include <stdlib.h>
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
#ifndef __UNIXCOMP__
#define __UNIXCOMP__
 
#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
#define LINUX_VERSION_CODE 0x20209
 
#define SA_NOCLDSTOP 1
#define SA_SHIRQ 0x04000000
#define SA_STACK 0x08000000
#define SA_RESTART 0x10000000
#define SA_INTERRUPT 0x20000000
#define SA_NOMASK 0x40000000
#define SA_ONESHOT 0x80000000
 
#define PAGE_SIZE 0x400
 
/* #define USE_SHARED_IRQ */
 
struct pt_regs {
}; /* This have to be checked... */
 
#define __u32 unsigned long int
#define u32 unsigned long int
#define s32 signed long int
//#define u_long unsigned long int
#define __u16 unsigned short int
#define u16 unsigned short int
#define s16 signed short int
//#define u_short unsigned short int
#define ushort unsigned short int
//#define u_char unsigned char
#define u8 unsigned char
#define __u8 unsigned char
#define uint8_t unsigned char
#define uint16_t unsigned short int
#define uint32_t unsigned int
#define uint unsigned int
#define ulong unsigned long
 
#define caddr_t unsigned int
 
#define atomic_t int
 
#ifndef NULL
#define NULL 0
#endif
 
#define HZ 100 /* Has to be controlled... */
#define jiffies (sys_gettime(NULL)*HZ/1000000) /* Has to be controlled... */
extern long unsigned int loops_per_sec; /* ... */
#define EISA_bus 0 /* We do not support EISA buses... */
 
#define NET_BH 1 /* ???? */
 
/* Linux Module stub emulation... */
#define MOD_INC_USE_COUNT /* Do nothing... */
#define MOD_DEC_USE_COUNT /* Do nothing... */
#define MOD_IN_USE 0 /* No module => never in use... */
#define MODULE_AUTHOR(a)
#define MODULE_DESCRIPTION(a)
#define MODULE_LICENSE(a)
#define MODULE_PARM(a,b)
#define MODULE_PARM_DESC(a,b)
 
#define GFP_KERNEL 0x03 /* Don't know what it is... */
#define GFP_ATOMIC 0x01 /* Don't know what it is... */
 
 
/* Linux kernel call emulation */
#define kmalloc(a,b) malloc(a)
//#define printk cprintf I would like to use the kernel printk if possible...
#define check_region(a,b) 0
#define request_region(a,b,c)
#define release_region(a,b)
 
/* Linux funcs emulation... */
#define outb_p(v,p) outp(p,v)
#define outb(v,p) outp(p,v)
#define outw(v,p) outpw(p,v)
#define outl(v,p) outpd(p,v)
#define inb_p(p) inp(p)
#define inb(p) inp(p)
#define inw(p) inpw(p)
#define inl(p) inpd(p)
 
#define mark_bh(NET_BH) /* Don't use soft int emulation... */
 
 
#define __cli() kern_cli()
#define __sti() kern_sti()
#define cli() kern_cli()
#define sti() kern_sti()
#define __save_flags(f) f = kern_fsave()
#define __restore_flags(f) kern_frestore(f)
#define save_flags(f) f = kern_fsave()
#define restore_flags(f) kern_frestore(f)
 
 
/* URKA Stubs */
 
extern void panic_stub(void);
/* #define eth_header panic_stub */
#define eth_rebuild_header panic_stub
#define eth_header_cache_bind panic_stub
#define eth_header_cache_update panic_stub
 
 
#define atomic_sub(a,b)
 
 
#define vremap(a,b) 0
 
 
extern __inline__ int suser(void)
{
return 1;
}
 
 
// spinlocks
#define spinlock_t DWORD
#define spin_lock(x) (void)(x)
#define spin_unlock(x) (void)(x)
 
#define spin_lock_irqsave(x,y) y = kern_fsave()
#define spin_unlock_irqrestore(x,y) kern_frestore(y)
#define spin_lock_init(x)
#define SPIN_LOCK_UNLOCKED (spinlock_t) 0
#define spin_lock_irq(lock) kern_cli()
#define spin_unlock_irq(lock) kern_sti()
 
extern __inline__ void panic(const char *c)
{
cputs((char *)c);
sys_end();
}
 
/* below tuff added for rtl8139 net driver
at some point this stuff should moved in a more conevenient place.
*/
 
/* *** from linux-2.2.17/include/linux/compatmac.h */
#define capable(x) suser()
 
#define __devinitdata
#define __devinit
 
struct pci_device_id {
unsigned int vendor, device; /* Vendor and device ID or PCI_ANY_ID */
unsigned int subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */
unsigned int class, class_mask; /* (class,subclass,prog-if) triplet */
unsigned long driver_data; /* Data private to the driver */
};
#define PCI_ANY_ID (~0)
#define MODULE_DEVICE_TABLE(a,b)
 
#ifdef CONFIG_HIGHMEM
typedef u64 dma_addr_t;
#else
typedef u32 dma_addr_t;
#endif
/* *** from linux-2.2.17/include/linux/capability.h */
#define CAP_NET_ADMIN 12
 
/* *** from linux-2.2.17/include/linux/byteorder/little_endian.h */
#define __le16_to_cpu(x) ((__u16)(x))
 
/* *** from linux-2.2.17/include/linux/byteorder/generic.h */
#define le16_to_cpu __le16_to_cpu
 
__END_DECLS
#endif
/shark/trunk/drivers/net/include/linux/delay.h
0,0 → 1,7
#ifndef __DELAY__
#define __DELAY__
 
#include <linux/compatib.h>
#define udelay(microsec) do { int _i = 4*microsec; while (--_i > 0) { __SLOW_DOWN_IO; }} while (0)
 
#endif
/shark/trunk/drivers/net/include/linux/mca.h
0,0 → 1,6
#ifndef __MCA__
#define __MCA__
 
#include <linux/compatib.h>
 
#endif
/shark/trunk/drivers/net/include/linux/bios32.h
0,0 → 1,6
#ifndef __BIOS32__
#define __BIOS32__
 
#include <linux/compatib.h>
 
#endif
/shark/trunk/drivers/net/include/linux/fs.h
0,0 → 1,6
#ifndef __FS__
#define __FS__
 
#include <linux/compatib.h>
 
#endif
/shark/trunk/drivers/net/include/linux/pci.h
0,0 → 1,23
/* OIO!!! I just have PCI!!! */
 
#ifndef __PCI__
#define __PCI__
 
#include <kernel/kern.h>
#include <drivers/pci.h>
#include <drivers/linuxpci.h>
#include <linux/compatib.h>
 
#if 0
#define PCI_COMMAND 0x04 /* 16 bits */
#define PCI_COMMAND_MASTER 0x4 /* Enable bus mastering */
 
#define PCI_LATENCY_TIMER 0x0d /* 8 bits */
 
#define PCI_BASE_ADDRESS_0 0x10 /* 32 bits */
 
#define PCI_INTERRUPT_LINE 0x3c /* 8 bits */
#endif
 
#endif /* PCI_H */
 
/shark/trunk/drivers/net/include/linux/ptrace.h
0,0 → 1,6
#ifndef __PTRACE__
#define __PTRACE__
 
#include <linux/compatib.h>
 
#endif
/shark/trunk/drivers/net/include/linux/skbuff.h
0,0 → 1,165
#ifndef __SKBUFF__
#define __SKBUFF__
 
#include <linux/compatib.h>
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
#define HAVE_ALLOC_SKB /* For the drivers to know */
#define HAVE_ALIGNABLE_SKB /* Ditto 8) */
 
 
#define FREE_READ 1
#define FREE_WRITE 0
 
#define CHECKSUM_NONE 0
#define CHECKSUM_HW 1
#define CHECKSUM_UNNECESSARY 2
 
 
struct sk_buff_head
{
struct sk_buff * next;
struct sk_buff * prev;
__u32 qlen; /* Must be same length as a pointer
for using debugging */
#if CONFIG_SKB_CHECK
int magic_debug_cookie;
#endif
};
 
 
struct sk_buff
{
struct sk_buff * next; /* Next buffer in list */
struct sk_buff * prev; /* Previous buffer in list */
struct sk_buff_head * list; /* List we are on */
#if CONFIG_SKB_CHECK
int magic_debug_cookie;
#endif
#if 0
struct sk_buff *link3; /* Link for IP protocol level buffer chains */
#endif
 
#if 0
struct sock *sk; /* Socket we are owned by */
#endif
struct device *dev; /* Device we arrived on/are leaving by */
#ifdef NOTWORK
unsigned long when; /* used to compute rtt's */
struct timeval stamp; /* Time we arrived */
union
{
struct tcphdr *th;
struct ethhdr *eth;
struct iphdr *iph;
struct udphdr *uh;
unsigned char *raw;
/* for passing file handles in a unix domain socket */
void *filp;
} h;
union
{
/* As yet incomplete physical layer views */
unsigned char *raw;
struct ethhdr *ethernet;
} mac;
struct iphdr *ip_hdr; /* For IPPROTO_RAW */
 
#endif /*NOTWORK*/
 
char cb[48]; /* added by PJ */
 
unsigned long len; /* Length of actual data */
unsigned long csum; /* Checksum */
#if 0
__u32 saddr; /* IP source address */
__u32 daddr; /* IP target address */
__u32 raddr; /* IP next hop address */
__u32 seq; /* TCP sequence number */
__u32 end_seq; /* seq [+ fin] [+ syn] + datalen */
__u32 ack_seq; /* TCP ack sequence number */
unsigned char proto_priv[16]; /* Protocol private data */
#endif
volatile char acked, /* Are we acked ? */
used, /* Are we in use ? */
free, /* How to free this buffer */
arp; /* Has IP/ARP resolution finished */
#if 0
unsigned char tries, /* Times tried */
lock, /* Are we locked ? */
localroute, /* Local routing asserted for this frame */
pkt_type, /* Packet class */
pkt_bridged, /* Tracker for bridging */
#endif
unsigned char ip_summed; /* Driver fed us an IP checksum */
#if 0
#define PACKET_HOST 0 /* To us */
#define PACKET_BROADCAST 1 /* To all */
#define PACKET_MULTICAST 2 /* To group */
#define PACKET_OTHERHOST 3 /* To someone else */
unsigned short users; /* User count - see datagram.c,tcp.c */
#endif
unsigned short protocol; /* Packet protocol from driver. */
unsigned int truesize; /* Buffer size */
#if 0
 
 
 
atomic_t count; /* reference count */
 
 
struct sk_buff *data_skb; /* Link to the actual data skb */
#endif
unsigned char *head; /* Head of buffer */
unsigned char *data; /* Data head pointer */
unsigned char *tail; /* Tail pointer */
#if 0
unsigned char *end; /* End pointer */
void (*destructor)(struct sk_buff *); /* Destruct function */
__u16 redirport; /* Redirect port */
#endif
};
 
#define dev_kfree_skb(a)
#define alloc_skb(a,b) dev_alloc_skb(a)
/* #define skb_device_unlock(skb); */
extern struct sk_buff * dev_alloc_skb(unsigned int size);
extern unsigned char * skb_put(struct sk_buff *skb, int len);
extern void skb_queue_head_init(struct sk_buff_head *list);
extern struct sk_buff * skb_clone(struct sk_buff *skb, int priority);
extern void skb_reserve(struct sk_buff *skb, int len);
 
extern __inline__ int skb_headroom(struct sk_buff *skb)
{
return skb->data-skb->head;
}
 
extern struct sk_buff * skb_realloc_headroom(struct sk_buff *skb, int newheadroom);
 
extern __inline__ unsigned char *skb_push(struct sk_buff *skb,
unsigned int len)
{
skb->data-=len;
skb->len+=len;
if(skb->data<skb->head)
{
printk(KERN_ERR "skb_push: PANIC!!!");
// __label__ here;
// skb_under_panic(skb, len, &&here);
//here: ;
}
return skb->data;
}
 
extern __inline__ char *skb_pull(struct sk_buff *skb, unsigned int len)
{
skb->len-=len;
return skb->data+=len;
}
 
__END_DECLS
#endif
/shark/trunk/drivers/net/include/linux/timer.h
0,0 → 1,115
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: timer.h,v 1.1 2004-05-11 14:32:02 giacomo Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2004-05-11 14:32:02 $
------------
**/
 
/*
* Copyright (C) 2000 Paolo Gai, Luca Abeni
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#ifndef __TIMER__
#define __TIMER__
 
#include <kernel/kern.h>
#include <time.h>
#include <linux/compatib.h>
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
/*
* This is completely separate from the above, and is the
* "new and improved" way of handling timers more dynamically.
* Hopefully efficient and general enough for most things.
*
* The "hardcoded" timers above are still useful for well-
* defined problems, but the timer-list is probably better
* when you need multiple outstanding timers or similar.
*
* The "data" field is in case you want to use the same
* timeout function for several timeouts. You can use this
* to distinguish between the different invocations.
*/
/*struct timer_list {
struct timer_list *next;
struct timer_list *prev;
unsigned long expires;
unsigned long data;
void (*function)(unsigned long);
PID pid;
char Signat;
time_t sharktimer;
int event_timer;
 
}; Changeed by Nino*/
struct list_head {
struct list_head *next, *prev;
};
 
struct timer_list {
struct list_head entry;
unsigned long expires;
 
spinlock_t lock;
unsigned long magic;
 
void (*function)(unsigned long);
unsigned long data;
 
struct tvec_t_base_s *base;
 
/* Added by Nino */
int event_timer;
};
 
/*extern inline void init_timer(struct timer_list * timer)
{
timer->next = NULL;
timer->prev = NULL;
}
*/
void init_timer(struct timer_list * timer);
void mod_timer(struct timer_list *timer, unsigned long expires);
int add_timer(struct timer_list *timer);
void del_timer(struct timer_list *timer);
__END_DECLS
#endif
/shark/trunk/drivers/net/include/linux/module.h
0,0 → 1,6
#ifndef __MODULE__
#define __MODULE__
 
#include <linux/compatib.h>
 
#endif
/shark/trunk/drivers/net/include/linux/errno.h
0,0 → 1,8
#ifndef __ERRNO__
#define __ERRNO__
 
#include <linux/compatib.h>
 
#endif
 
 
/shark/trunk/drivers/net/include/linux/version.h
0,0 → 1,6
#ifndef __VERSION__
#define __VERSION__
 
#include <linux/compatib.h>
 
#endif
/shark/trunk/drivers/net/include/linux/if_ether.h
0,0 → 1,56
#ifndef __IF_ETHER__
#define __IF_ETHER__
 
#include <linux/compatib.h>
 
/*
* IEEE 802.3 Ethernet magic constants. The frame sizes omit the preamble
* and FCS/CRC (frame check sequence).
*/
 
#define ETH_ALEN 6 /* Octets in one ethernet addr */
#define ETH_HLEN 14 /* Total octets in header. */
#define ETH_ZLEN 60 /* Min. octets in frame sans FCS */
#define ETH_DATA_LEN 1500 /* Max. octets in payload */
#define ETH_FRAME_LEN 1514 /* Max. octets in frame sans FCS */
 
/*
* Ethernet statistics collection data.
*/
 
#define enet_statistics net_device_stats
 
//struct enet_statistics
//{
// unsigned long rx_packets; /* total packets received */
// unsigned long tx_packets; /* total packets transmitted */
// unsigned long rx_bytes; /* total bytes received */
// unsigned long tx_bytes; /* total bytes transmitted */
// unsigned long rx_errors; /* bad packets received */
// unsigned long tx_errors; /* packet transmit problems */
// unsigned long rx_dropped; /* no space in linux buffers */
// unsigned long tx_dropped; /* no space available in linux */
// unsigned long multicast; /* multicast packets received */
// unsigned long collisions;
//
// /* detailed rx_errors: */
// unsigned long rx_length_errors;
// unsigned long rx_over_errors; /* receiver ring buff overflow */
// unsigned long rx_crc_errors; /* recved pkt with crc error */
// unsigned long rx_frame_errors; /* recv'd frame alignment error */
// unsigned long rx_fifo_errors; /* recv'r fifo overrun */
// unsigned long rx_missed_errors; /* receiver missed packet */
//
// /* detailed tx_errors */
// unsigned long tx_aborted_errors;
// unsigned long tx_carrier_errors;
// unsigned long tx_fifo_errors;
// unsigned long tx_heartbeat_errors;
// unsigned long tx_window_errors;
//
// /* for cslip etc */
// unsigned long rx_compressed;
// unsigned long tx_compressed;
//};
 
#endif
/shark/trunk/drivers/net/include/linux/socket.h
0,0 → 1,144
#ifndef _LINUX_SOCKET_H
#define _LINUX_SOCKET_H
 
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
struct sockaddr
{
unsigned short sa_family; /* address family, AF_xxx */
char sa_data[14]; /* 14 bytes of protocol address */
};
 
struct linger {
int l_onoff; /* Linger active */
int l_linger; /* How long to linger for */
};
 
/*
* As we do 4.4BSD message passing we use a 4.4BSD message passing
* system, not 4.3. Thus msg_accrights(len) are now missing. They
* belong in an obscure libc emulation or the bin.
*/
struct msghdr
{
void * msg_name; /* Socket name */
int msg_namelen; /* Length of name */
struct iovec * msg_iov; /* Data blocks */
int msg_iovlen; /* Number of blocks */
void * msg_control; /* Per protocol magic (eg BSD file descriptor passing) */
int msg_controllen; /* Length of rights list */
int msg_flags; /* 4.4 BSD item we dont use */
};
 
/* Control Messages */
 
#define SCM_RIGHTS 1
 
/* Socket types. */
#define SOCK_STREAM 1 /* stream (connection) socket */
#define SOCK_DGRAM 2 /* datagram (conn.less) socket */
#define SOCK_RAW 3 /* raw socket */
#define SOCK_RDM 4 /* reliably-delivered message */
#define SOCK_SEQPACKET 5 /* sequential packet socket */
#define SOCK_PACKET 10 /* linux specific way of */
/* getting packets at the dev */
/* level. For writing rarp and */
/* other similar things on the */
/* user level. */
 
/* Supported address families. */
#define AF_UNSPEC 0
#define AF_UNIX 1 /* Unix domain sockets */
#define AF_INET 2 /* Internet IP Protocol */
#define AF_AX25 3 /* Amateur Radio AX.25 */
#define AF_IPX 4 /* Novell IPX */
#define AF_APPLETALK 5 /* Appletalk DDP */
#define AF_NETROM 6 /* Amateur radio NetROM */
#define AF_BRIDGE 7 /* Multiprotocol bridge */
#define AF_AAL5 8 /* Reserved for Werner's ATM */
#define AF_X25 9 /* Reserved for X.25 project */
#define AF_INET6 10 /* IP version 6 */
#define AF_MAX 12 /* For now.. */
 
/* Protocol families, same as address families. */
#define PF_UNSPEC AF_UNSPEC
#define PF_UNIX AF_UNIX
#define PF_INET AF_INET
#define PF_AX25 AF_AX25
#define PF_IPX AF_IPX
#define PF_APPLETALK AF_APPLETALK
#define PF_NETROM AF_NETROM
#define PF_BRIDGE AF_BRIDGE
#define PF_AAL5 AF_AAL5
#define PF_X25 AF_X25
#define PF_INET6 AF_INET6
 
#define PF_MAX AF_MAX
 
/* Maximum queue length specifiable by listen. */
#define SOMAXCONN 128
 
/* Flags we can use with send/ and recv. */
#define MSG_OOB 1
#define MSG_PEEK 2
#define MSG_DONTROUTE 4
/*#define MSG_CTRUNC 8 - We need to support this for BSD oddments */
#define MSG_PROXY 16 /* Supply or ask second address. */
 
/* Setsockoptions(2) level. Thanks to BSD these must match IPPROTO_xxx */
#define SOL_IP 0
#define SOL_IPX 256
#define SOL_AX25 257
#define SOL_ATALK 258
#define SOL_NETROM 259
#define SOL_TCP 6
#define SOL_UDP 17
 
/* IP options */
#define IP_TOS 1
#define IPTOS_LOWDELAY 0x10
#define IPTOS_THROUGHPUT 0x08
#define IPTOS_RELIABILITY 0x04
#define IPTOS_MINCOST 0x02
#define IP_TTL 2
#define IP_HDRINCL 3
#define IP_OPTIONS 4
 
#define IP_MULTICAST_IF 32
#define IP_MULTICAST_TTL 33
#define IP_MULTICAST_LOOP 34
#define IP_ADD_MEMBERSHIP 35
#define IP_DROP_MEMBERSHIP 36
 
/* These need to appear somewhere around here */
#define IP_DEFAULT_MULTICAST_TTL 1
#define IP_DEFAULT_MULTICAST_LOOP 1
#define IP_MAX_MEMBERSHIPS 20
/* IPX options */
#define IPX_TYPE 1
 
/* TCP options - this way around because someone left a set in the c library includes */
#define TCP_NODELAY 1
#define TCP_MAXSEG 2
 
/* The various priorities. */
#define SOPRI_INTERACTIVE 0
#define SOPRI_NORMAL 1
#define SOPRI_BACKGROUND 2
 
#define SIOCDEVPRIVATE 0x89f0
 
#ifdef __KERNEL__
extern void memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
extern int verify_iovec(struct msghdr *m, struct iovec *iov, char *address, int mode);
extern void memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len);
extern int move_addr_to_user(void *kaddr, int klen, void *uaddr, int *ulen);
extern int move_addr_to_kernel(void *uaddr, int ulen, void *kaddr);
#endif
 
__END_DECLS
#endif /* _LINUX_SOCKET_H */
/shark/trunk/drivers/net/include/asm/irq.h
0,0 → 1,6
#ifndef __IRQ__
#define __IRQ__
 
#include <linux/compatib.h>
 
#endif
/shark/trunk/drivers/net/include/asm/processor.h
0,0 → 1,6
#ifndef __ASM_PROCESSOR_H
#define __ASM_PROCESSOR_H
 
#include <linux/compatib.h>
 
#endif /* __ASM_PROCESSOR_H */
/shark/trunk/drivers/net/include/asm/bitops.h
0,0 → 1,47
#ifndef __BITOPS__
#define __BITOPS__
 
#include <linux/compatib.h>
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
#define LOCK_PREFIX ""
#define SMPVOL
 
/*
* Some hacks to defeat gcc over-optimizations..
*/
struct __dummy { unsigned long a[100]; };
#define ADDR (*(struct __dummy *) addr)
#define CONST_ADDR (*(const struct __dummy *) addr)
 
extern __inline__ int set_bit(int nr, SMPVOL void * addr)
{
int oldbit;
 
__asm__ __volatile__(LOCK_PREFIX
"btsl %2,%1\n\tsbbl %0,%0"
:"=r" (oldbit),"=m" (ADDR)
:"ir" (nr));
return oldbit;
}
 
extern __inline__ int clear_bit(int nr, SMPVOL void * addr)
{
int oldbit;
 
__asm__ __volatile__(LOCK_PREFIX
"btrl %2,%1\n\tsbbl %0,%0"
:"=r" (oldbit),"=m" (ADDR)
:"ir" (nr));
return oldbit;
}
 
extern __inline__ int test_bit(int nr, const SMPVOL void * addr)
{
return ((1UL << (nr & 31)) & (((const unsigned int *) addr)[nr >> 5])) != 0;
}
 
__END_DECLS
#endif
/shark/trunk/drivers/net/include/asm/dma.h
0,0 → 1,6
#ifndef __DMA__
#define __DMA__
 
#include <linux/compatib.h>
 
#endif
/shark/trunk/drivers/net/include/asm/spinlock.h
0,0 → 1,6
#ifndef __SPINLOCK__
#define __SPINLOCK__
 
#include <linux/compatib.h>
 
#endif
/shark/trunk/drivers/net/include/asm/segment.h
0,0 → 1,6
#ifndef __SEGMENT__
#define __SEGMENT__
 
#include <linux/compatib.h>
 
#endif
/shark/trunk/drivers/net/include/asm/system.h
0,0 → 1,6
#ifndef __SYSTEM__
#define __SYSTEM__
 
#include <linux/compatib.h>
 
#endif
/shark/trunk/drivers/net/include/asm/uaccess.h
0,0 → 1,610
#ifndef __i386_UACCESS_H
#define __i386_UACCESS_H
 
// non compilo nulla!!!!
 
#if 0
 
/*
* User space memory access functions
*/
#include <linux/config.h>
#include <linux/sched.h>
#include <asm/page.h>
 
#define VERIFY_READ 0
#define VERIFY_WRITE 1
 
/*
* The fs value determines whether argument validity checking should be
* performed or not. If get_fs() == USER_DS, checking is performed, with
* get_fs() == KERNEL_DS, checking is bypassed.
*
* For historical reasons, these macros are grossly misnamed.
*/
 
#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
 
 
#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
#define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
 
#define get_ds() (KERNEL_DS)
#define get_fs() (current->addr_limit)
#define set_fs(x) (current->addr_limit = (x))
 
#define segment_eq(a,b) ((a).seg == (b).seg)
 
extern int __verify_write(const void *, unsigned long);
 
#define __addr_ok(addr) ((unsigned long)(addr) < (current->addr_limit.seg))
 
/*
* Uhhuh, this needs 33-bit arithmetic. We have a carry..
*/
#define __range_ok(addr,size) ({ \
unsigned long flag,sum; \
asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \
:"=&r" (flag), "=r" (sum) \
:"1" (addr),"g" (size),"g" (current->addr_limit.seg)); \
flag; })
 
#ifdef CONFIG_X86_WP_WORKS_OK
 
#define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
 
#else
 
#define access_ok(type,addr,size) ( (__range_ok(addr,size) == 0) && \
((type) == VERIFY_READ || boot_cpu_data.wp_works_ok || \
segment_eq(get_fs(),KERNEL_DS) || \
__verify_write((void *)(addr),(size))))
 
#endif /* CPU */
 
extern inline int verify_area(int type, const void * addr, unsigned long size)
{
return access_ok(type,addr,size) ? 0 : -EFAULT;
}
 
 
/*
* The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is
* the address at which the program should continue. No registers are
* modified, so it is entirely up to the continuation code to figure out
* what to do.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
* we don't even have to jump over them. Further, they do not intrude
* on our cache or tlb entries.
*/
 
struct exception_table_entry
{
unsigned long insn, fixup;
};
 
/* Returns 0 if exception not found and fixup otherwise. */
extern unsigned long search_exception_table(unsigned long);
 
 
/*
* These are the main single-value transfer routines. They automatically
* use the right size if we just have the right pointer type.
*
* This gets kind of ugly. We want to return _two_ values in "get_user()"
* and yet we don't want to do any pointers, because that is too much
* of a performance impact. Thus we have a few rather ugly macros here,
* and hide all the uglyness from the user.
*
* The "__xxx" versions of the user access functions are versions that
* do not verify the address space, that must have been done previously
* with a separate "access_ok()" call (this is used when we do multiple
* accesses to the same area of user memory).
*/
 
extern void __get_user_1(void);
extern void __get_user_2(void);
extern void __get_user_4(void);
 
#define __get_user_x(size,ret,x,ptr) \
__asm__ __volatile__("call __get_user_" #size \
:"=a" (ret),"=d" (x) \
:"0" (ptr))
 
/* Careful: we have to cast the result to the type of the pointer for sign reasons */
#define get_user(x,ptr) \
({ int __ret_gu,__val_gu; \
switch(sizeof (*(ptr))) { \
case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break; \
case 2: __get_user_x(2,__ret_gu,__val_gu,ptr); break; \
case 4: __get_user_x(4,__ret_gu,__val_gu,ptr); break; \
default: __get_user_x(X,__ret_gu,__val_gu,ptr); break; \
} \
(x) = (__typeof__(*(ptr)))__val_gu; \
__ret_gu; \
})
 
extern void __put_user_1(void);
extern void __put_user_2(void);
extern void __put_user_4(void);
 
extern void __put_user_bad(void);
 
#define __put_user_x(size,ret,x,ptr) \
__asm__ __volatile__("call __put_user_" #size \
:"=a" (ret) \
:"0" (ptr),"d" (x) \
:"cx")
 
#define put_user(x,ptr) \
({ int __ret_pu; \
switch(sizeof (*(ptr))) { \
case 1: __put_user_x(1,__ret_pu,(__typeof__(*(ptr)))(x),ptr); break; \
case 2: __put_user_x(2,__ret_pu,(__typeof__(*(ptr)))(x),ptr); break; \
case 4: __put_user_x(4,__ret_pu,(__typeof__(*(ptr)))(x),ptr); break; \
default: __put_user_x(X,__ret_pu,x,ptr); break; \
} \
__ret_pu; \
})
 
#define __get_user(x,ptr) \
__get_user_nocheck((x),(ptr),sizeof(*(ptr)))
#define __put_user(x,ptr) \
__put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
 
#define __put_user_nocheck(x,ptr,size) \
({ \
long __pu_err; \
__put_user_size((x),(ptr),(size),__pu_err); \
__pu_err; \
})
 
#define __put_user_size(x,ptr,size,retval) \
do { \
retval = 0; \
switch (size) { \
case 1: __put_user_asm(x,ptr,retval,"b","b","iq"); break; \
case 2: __put_user_asm(x,ptr,retval,"w","w","ir"); break; \
case 4: __put_user_asm(x,ptr,retval,"l","","ir"); break; \
default: __put_user_bad(); \
} \
} while (0)
 
struct __large_struct { unsigned long buf[100]; };
#define __m(x) (*(struct __large_struct *)(x))
 
/*
* Tell gcc we read from memory instead of writing: this is because
* we do not write to any memory gcc knows about, so there are no
* aliasing issues.
*/
#define __put_user_asm(x, addr, err, itype, rtype, ltype) \
__asm__ __volatile__( \
"1: mov"itype" %"rtype"1,%2\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: movl %3,%0\n" \
" jmp 2b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 1b,3b\n" \
".previous" \
: "=r"(err) \
: ltype (x), "m"(__m(addr)), "i"(-EFAULT), "0"(err))
 
 
#define __get_user_nocheck(x,ptr,size) \
({ \
long __gu_err, __gu_val; \
__get_user_size(__gu_val,(ptr),(size),__gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
__gu_err; \
})
 
extern long __get_user_bad(void);
 
#define __get_user_size(x,ptr,size,retval) \
do { \
retval = 0; \
switch (size) { \
case 1: __get_user_asm(x,ptr,retval,"b","b","=q"); break; \
case 2: __get_user_asm(x,ptr,retval,"w","w","=r"); break; \
case 4: __get_user_asm(x,ptr,retval,"l","","=r"); break; \
default: (x) = __get_user_bad(); \
} \
} while (0)
 
#define __get_user_asm(x, addr, err, itype, rtype, ltype) \
__asm__ __volatile__( \
"1: mov"itype" %2,%"rtype"1\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: movl %3,%0\n" \
" xor"itype" %"rtype"1,%"rtype"1\n" \
" jmp 2b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 1b,3b\n" \
".previous" \
: "=r"(err), ltype (x) \
: "m"(__m(addr)), "i"(-EFAULT), "0"(err))
 
/*
* The "xxx_ret" versions return constant specified in third argument, if
* something bad happens. These macros can be optimized for the
* case of just returning from the function xxx_ret is used.
*/
 
#define put_user_ret(x,ptr,ret) ({ if (put_user(x,ptr)) return ret; })
 
#define get_user_ret(x,ptr,ret) ({ if (get_user(x,ptr)) return ret; })
 
#define __put_user_ret(x,ptr,ret) ({ if (__put_user(x,ptr)) return ret; })
 
#define __get_user_ret(x,ptr,ret) ({ if (__get_user(x,ptr)) return ret; })
 
 
/*
* Copy To/From Userspace
*/
 
/* Generic arbitrary sized copy. */
#define __copy_user(to,from,size) \
do { \
int __d0, __d1; \
__asm__ __volatile__( \
"0: rep; movsl\n" \
" movl %3,%0\n" \
"1: rep; movsb\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: lea 0(%3,%0,4),%0\n" \
" jmp 2b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 0b,3b\n" \
" .long 1b,2b\n" \
".previous" \
: "=&c"(size), "=&D" (__d0), "=&S" (__d1) \
: "r"(size & 3), "0"(size / 4), "1"(to), "2"(from) \
: "memory"); \
} while (0)
 
#define __copy_user_zeroing(to,from,size) \
do { \
int __d0, __d1; \
__asm__ __volatile__( \
"0: rep; movsl\n" \
" movl %3,%0\n" \
"1: rep; movsb\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: lea 0(%3,%0,4),%0\n" \
"4: pushl %0\n" \
" pushl %%eax\n" \
" xorl %%eax,%%eax\n" \
" rep; stosb\n" \
" popl %%eax\n" \
" popl %0\n" \
" jmp 2b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 0b,3b\n" \
" .long 1b,4b\n" \
".previous" \
: "=&c"(size), "=&D" (__d0), "=&S" (__d1) \
: "r"(size & 3), "0"(size / 4), "1"(to), "2"(from) \
: "memory"); \
} while (0)
 
/* We let the __ versions of copy_from/to_user inline, because they're often
* used in fast paths and have only a small space overhead.
*/
static inline unsigned long
__generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
{
__copy_user_zeroing(to,from,n);
return n;
}
 
static inline unsigned long
__generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
{
__copy_user(to,from,n);
return n;
}
 
 
/* Optimize just a little bit when we know the size of the move. */
#define __constant_copy_user(to, from, size) \
do { \
int __d0, __d1; \
switch (size & 3) { \
default: \
__asm__ __volatile__( \
"0: rep; movsl\n" \
"1:\n" \
".section .fixup,\"ax\"\n" \
"2: shl $2,%0\n" \
" jmp 1b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 0b,2b\n" \
".previous" \
: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
: "1"(from), "2"(to), "0"(size/4) \
: "memory"); \
break; \
case 1: \
__asm__ __volatile__( \
"0: rep; movsl\n" \
"1: movsb\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: shl $2,%0\n" \
"4: incl %0\n" \
" jmp 2b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 0b,3b\n" \
" .long 1b,4b\n" \
".previous" \
: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
: "1"(from), "2"(to), "0"(size/4) \
: "memory"); \
break; \
case 2: \
__asm__ __volatile__( \
"0: rep; movsl\n" \
"1: movsw\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: shl $2,%0\n" \
"4: addl $2,%0\n" \
" jmp 2b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 0b,3b\n" \
" .long 1b,4b\n" \
".previous" \
: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
: "1"(from), "2"(to), "0"(size/4) \
: "memory"); \
break; \
case 3: \
__asm__ __volatile__( \
"0: rep; movsl\n" \
"1: movsw\n" \
"2: movsb\n" \
"3:\n" \
".section .fixup,\"ax\"\n" \
"4: shl $2,%0\n" \
"5: addl $2,%0\n" \
"6: incl %0\n" \
" jmp 3b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 0b,4b\n" \
" .long 1b,5b\n" \
" .long 2b,6b\n" \
".previous" \
: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
: "1"(from), "2"(to), "0"(size/4) \
: "memory"); \
break; \
} \
} while (0)
 
/* Optimize just a little bit when we know the size of the move. */
#define __constant_copy_user_zeroing(to, from, size) \
do { \
int __d0, __d1; \
switch (size & 3) { \
default: \
__asm__ __volatile__( \
"0: rep; movsl\n" \
"1:\n" \
".section .fixup,\"ax\"\n" \
"2: pushl %0\n" \
" pushl %%eax\n" \
" xorl %%eax,%%eax\n" \
" rep; stosl\n" \
" popl %%eax\n" \
" popl %0\n" \
" shl $2,%0\n" \
" jmp 1b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 0b,2b\n" \
".previous" \
: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
: "1"(from), "2"(to), "0"(size/4) \
: "memory"); \
break; \
case 1: \
__asm__ __volatile__( \
"0: rep; movsl\n" \
"1: movsb\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: pushl %0\n" \
" pushl %%eax\n" \
" xorl %%eax,%%eax\n" \
" rep; stosl\n" \
" stosb\n" \
" popl %%eax\n" \
" popl %0\n" \
" shl $2,%0\n" \
" incl %0\n" \
" jmp 2b\n" \
"4: pushl %%eax\n" \
" xorl %%eax,%%eax\n" \
" stosb\n" \
" popl %%eax\n" \
" incl %0\n" \
" jmp 2b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 0b,3b\n" \
" .long 1b,4b\n" \
".previous" \
: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
: "1"(from), "2"(to), "0"(size/4) \
: "memory"); \
break; \
case 2: \
__asm__ __volatile__( \
"0: rep; movsl\n" \
"1: movsw\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: pushl %0\n" \
" pushl %%eax\n" \
" xorl %%eax,%%eax\n" \
" rep; stosl\n" \
" stosw\n" \
" popl %%eax\n" \
" popl %0\n" \
" shl $2,%0\n" \
" addl $2,%0\n" \
" jmp 2b\n" \
"4: pushl %%eax\n" \
" xorl %%eax,%%eax\n" \
" stosw\n" \
" popl %%eax\n" \
" addl $2,%0\n" \
" jmp 2b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 0b,3b\n" \
" .long 1b,4b\n" \
".previous" \
: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
: "1"(from), "2"(to), "0"(size/4) \
: "memory"); \
break; \
case 3: \
__asm__ __volatile__( \
"0: rep; movsl\n" \
"1: movsw\n" \
"2: movsb\n" \
"3:\n" \
".section .fixup,\"ax\"\n" \
"4: pushl %0\n" \
" pushl %%eax\n" \
" xorl %%eax,%%eax\n" \
" rep; stosl\n" \
" stosw\n" \
" stosb\n" \
" popl %%eax\n" \
" popl %0\n" \
" shl $2,%0\n" \
" addl $3,%0\n" \
" jmp 2b\n" \
"5: pushl %%eax\n" \
" xorl %%eax,%%eax\n" \
" stosw\n" \
" stosb\n" \
" popl %%eax\n" \
" addl $3,%0\n" \
" jmp 2b\n" \
"6: pushl %%eax\n" \
" xorl %%eax,%%eax\n" \
" stosb\n" \
" popl %%eax\n" \
" incl %0\n" \
" jmp 2b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 0b,4b\n" \
" .long 1b,5b\n" \
" .long 2b,6b\n" \
".previous" \
: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
: "1"(from), "2"(to), "0"(size/4) \
: "memory"); \
break; \
} \
} while (0)
 
unsigned long __generic_copy_to_user(void *, const void *, unsigned long);
unsigned long __generic_copy_from_user(void *, const void *, unsigned long);
 
static inline unsigned long
__constant_copy_to_user(void *to, const void *from, unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n))
__constant_copy_user(to,from,n);
return n;
}
 
static inline unsigned long
__constant_copy_from_user(void *to, const void *from, unsigned long n)
{
if (access_ok(VERIFY_READ, from, n))
__constant_copy_user_zeroing(to,from,n);
return n;
}
 
static inline unsigned long
__constant_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
{
__constant_copy_user(to,from,n);
return n;
}
 
static inline unsigned long
__constant_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
{
__constant_copy_user_zeroing(to,from,n);
return n;
}
 
#define copy_to_user(to,from,n) \
(__builtin_constant_p(n) ? \
__constant_copy_to_user((to),(from),(n)) : \
__generic_copy_to_user((to),(from),(n)))
 
#define copy_from_user(to,from,n) \
(__builtin_constant_p(n) ? \
__constant_copy_from_user((to),(from),(n)) : \
__generic_copy_from_user((to),(from),(n)))
 
#define copy_to_user_ret(to,from,n,retval) ({ if (copy_to_user(to,from,n)) return retval; })
 
#define copy_from_user_ret(to,from,n,retval) ({ if (copy_from_user(to,from,n)) return retval; })
 
#define __copy_to_user(to,from,n) \
(__builtin_constant_p(n) ? \
__constant_copy_to_user_nocheck((to),(from),(n)) : \
__generic_copy_to_user_nocheck((to),(from),(n)))
 
#define __copy_from_user(to,from,n) \
(__builtin_constant_p(n) ? \
__constant_copy_from_user_nocheck((to),(from),(n)) : \
__generic_copy_from_user_nocheck((to),(from),(n)))
 
long strncpy_from_user(char *dst, const char *src, long count);
long __strncpy_from_user(char *dst, const char *src, long count);
#define strlen_user(str) strnlen_user(str, ~0UL >> 1)
long strnlen_user(const char *str, long n);
unsigned long clear_user(void *mem, unsigned long len);
unsigned long __clear_user(void *mem, unsigned long len);
 
#endif
#endif /* __i386_UACCESS_H */
/shark/trunk/drivers/net/include/asm/io.h
0,0 → 1,78
#ifndef __IO__
#define __IO__
 
#include <linux/compatib.h>
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
/*
* Thanks to James van Artsdalen for a better timing-fix than
* the two short jumps: using outb's to a nonexistent port seems
* to guarantee better timings even on fast machines.
*
* On the other hand, I'd like to be sure of a non-existent port:
* I feel a bit unsafe about using 0x80 (should be safe, though)
*
* Linus
*/
 
#ifdef SLOW_IO_BY_JUMPING
#define __SLOW_DOWN_IO __asm__ __volatile__("jmp 1f\n1:\tjmp 1f\n1:")
#else
#define __SLOW_DOWN_IO __asm__ __volatile__("outb %al,$0x80")
#endif
 
#ifdef REALLY_SLOW_IO
#define SLOW_DOWN_IO { __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; }
#else
#define SLOW_DOWN_IO __SLOW_DOWN_IO
#endif
 
 
#define __INS(s) \
extern inline void ins##s(unsigned short port, void * addr, unsigned long count) \
{ __asm__ __volatile__ ("cld ; rep ; ins" #s \
: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
__INS(b)
__INS(w)
__INS(l)
 
#define __OUTS(s) \
extern inline void outs##s(unsigned short port, const void * addr, unsigned long count) \
{ __asm__ __volatile__ ("cld ; rep ; outs" #s \
: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
__OUTS(b)
__OUTS(w)
__OUTS(l)
 
/* Traslation from virtual to phisical address...*/
/* from asm/io.h */
 
#define __io_virt(x) ((void *)(x))
 
extern inline void * phys_to_virt(unsigned long address)
{
return __io_virt(address);
}
 
 
extern __inline__ DWORD virt_to_phys(volatile void * address)
{
return (DWORD)address;
}
 
#define bus_to_virt phys_to_virt
#define virt_to_bus virt_to_phys
 
 
#define readb(addr) (*(volatile unsigned char *) __io_virt(addr))
#define readw(addr) (*(volatile unsigned short *) __io_virt(addr))
#define readl(addr) (*(volatile unsigned int *) __io_virt(addr))
 
#define writeb(b,addr) (*(volatile unsigned char *) __io_virt(addr) = (b))
#define writew(b,addr) (*(volatile unsigned short *) __io_virt(addr) = (b))
#define writel(b,addr) (*(volatile unsigned int *) __io_virt(addr) = (b))
 
__END_DECLS
#endif
/shark/trunk/drivers/net/include/net/sock.h
0,0 → 1,247
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: sock.h,v 1.1 2004-05-11 14:32:02 giacomo Exp $
 
File: $File$
Revision: $Revision: 1.1 $
Last update: $Date: 2004-05-11 14:32:02 $
------------
**/
 
/*
* Copyright (C) 2000 Paolo Gai, Luca Abeni
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
#ifndef __SOCK__
#define __SOCK__
 
#include <linux/timer.h>
 
#include <linux/netdevice.h>
#include <linux/skbuff.h> /* struct sk_buff */
 
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
 
/*
* This structure really needs to be cleaned up.
* Most of it is for TCP, and not used by any of
* the other protocols.
*/
struct sock
{
/* This must be first. */
struct sock *sklist_next;
struct sock *sklist_prev;
 
struct options *opt;
atomic_t wmem_alloc;
atomic_t rmem_alloc;
unsigned long allocation; /* Allocation mode */
__u32 write_seq;
__u32 sent_seq;
__u32 acked_seq;
__u32 copied_seq;
__u32 rcv_ack_seq;
unsigned short rcv_ack_cnt; /* count of same ack */
__u32 window_seq;
__u32 fin_seq;
__u32 urg_seq;
__u32 urg_data;
__u32 syn_seq;
int users; /* user count */
/*
* Not all are volatile, but some are, so we
* might as well say they all are.
*/
volatile char dead,
urginline,
intr,
blog,
done,
reuse,
keepopen,
linger,
delay_acks,
destroy,
ack_timed,
no_check,
zapped, /* In ax25 & ipx means not linked */
broadcast,
nonagle,
bsdism;
unsigned long lingertime;
int proc;
 
struct sock *next;
struct sock **pprev;
struct sock *bind_next;
struct sock **bind_pprev;
struct sock *pair;
int hashent;
struct sock *prev;
struct sk_buff * volatile send_head;
struct sk_buff * volatile send_next;
struct sk_buff * volatile send_tail;
struct sk_buff_head back_log;
struct sk_buff *partial;
struct timer_list partial_timer;
long retransmits;
struct sk_buff_head write_queue,
receive_queue;
struct proto *prot;
struct wait_queue **sleep;
__u32 daddr;
__u32 saddr; /* Sending source */
__u32 rcv_saddr; /* Bound address */
unsigned short max_unacked;
unsigned short window;
__u32 lastwin_seq; /* sequence number when we last updated the window we offer */
__u32 high_seq; /* sequence number when we did current fast retransmit */
volatile unsigned long ato; /* ack timeout */
volatile unsigned long lrcvtime; /* jiffies at last data rcv */
volatile unsigned long idletime; /* jiffies at last rcv */
unsigned int bytes_rcv;
/*
* mss is min(mtu, max_window)
*/
unsigned short mtu; /* mss negotiated in the syn's */
volatile unsigned short mss; /* current eff. mss - can change */
volatile unsigned short user_mss; /* mss requested by user in ioctl */
volatile unsigned short max_window;
unsigned long window_clamp;
unsigned int ssthresh;
unsigned short num;
volatile unsigned short cong_window;
volatile unsigned short cong_count;
volatile unsigned short packets_out;
volatile unsigned short shutdown;
volatile unsigned long rtt;
volatile unsigned long mdev;
volatile unsigned long rto;
 
/*
* currently backoff isn't used, but I'm maintaining it in case
* we want to go back to a backoff formula that needs it
*/
volatile unsigned short backoff;
int err, err_soft; /* Soft holds errors that don't
cause failure but are the cause
of a persistent failure not just
'timed out' */
unsigned char protocol;
volatile unsigned char state;
unsigned char ack_backlog;
unsigned char max_ack_backlog;
unsigned char priority;
unsigned char debug;
int rcvbuf;
int sndbuf;
unsigned short type;
unsigned char localroute; /* Route locally only */
#if 0
/*
* This is where all the private (optional) areas that don't
* overlap will eventually live.
*/
union
{
struct unix_opt af_unix;
#ifdef CONFIG_INET
struct inet_packet_opt af_packet;
#ifdef CONFIG_NUTCP
struct tcp_opt af_tcp;
#endif
#endif
} protinfo;
#endif
/*
* IP 'private area' or will be eventually
*/
int ip_ttl; /* TTL setting */
int ip_tos; /* TOS */
// struct tcphdr dummy_th;
struct timer_list keepalive_timer; /* TCP keepalive hack */
struct timer_list retransmit_timer; /* TCP retransmit timer */
struct timer_list delack_timer; /* TCP delayed ack timer */
int ip_xmit_timeout; /* Why the timeout is running */
struct rtable *ip_route_cache; /* Cached output route */
unsigned char ip_hdrincl; /* Include headers ? */
#ifdef CONFIG_IP_MULTICAST
int ip_mc_ttl; /* Multicasting TTL */
int ip_mc_loop; /* Loopback */
char ip_mc_name[MAX_ADDR_LEN];/* Multicast device name */
struct ip_mc_socklist *ip_mc_list; /* Group array */
#endif
 
/*
* This part is used for the timeout functions (timer.c).
*/
int timeout; /* What are we waiting for? */
struct timer_list timer; /* This is the TIME_WAIT/receive timer
* when we are doing IP
*/
// struct timeval stamp;
 
/*
* Identd
*/
struct socket *socket;
/*
* Callbacks
*/
void (*state_change)(struct sock *sk);
void (*data_ready)(struct sock *sk,int bytes);
void (*write_space)(struct sock *sk);
void (*error_report)(struct sock *sk);
};
 
__END_DECLS
 
#endif
/shark/trunk/drivers/net/include/stdio.h
0,0 → 1,6
#ifndef __STDIO__
#define __STDIO__
 
#include <linux/compatib.h>
 
#endif
/shark/trunk/drivers/net/3c509.c
0,0 → 1,954
/* 3c509.c: A 3c509 EtherLink3 ethernet driver for linux. */
/*
Written 1993-1998 by Donald Becker.
 
Copyright 1994-1998 by Donald Becker.
Copyright 1993 United States Government as represented by the
Director, National Security Agency. This software may be used and
distributed according to the terms of the GNU Public License,
incorporated herein by reference.
 
This driver is for the 3Com EtherLinkIII series.
 
The author may be reached as becker@cesdis.gsfc.nasa.gov or
C/O Center of Excellence in Space Data and Information Sciences
Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
 
Known limitations:
Because of the way 3c509 ISA detection works it's difficult to predict
a priori which of several ISA-mode cards will be detected first.
 
This driver does not use predictive interrupt mode, resulting in higher
packet latency but lower overhead. If interrupts are disabled for an
unusually long time it could also result in missed packets, but in
practice this rarely happens.
 
 
FIXES:
Alan Cox: Removed the 'Unexpected interrupt' bug.
Michael Meskes: Upgraded to Donald Becker's version 1.07.
Alan Cox: Increased the eeprom delay. Regardless of
what the docs say some people definitely
get problems with lower (but in card spec)
delays
v1.10 4/21/97 Fixed module code so that multiple cards may be detected,
other cleanups. -djb
Andrea Arcangeli: Upgraded to Donald Becker's version 1.12.
Rick Payne: Fixed SMP race condition
v1.13 9/8/97 Made 'max_interrupt_work' an insmod-settable variable -djb
v1.14 10/15/97 Avoided waiting..discard message for fast machines -djb
v1.15 1/31/98 Faster recovery for Tx errors. -djb
v1.16 2/3/98 Different ID port handling to avoid sound cards. -djb
*/
 
static char *version = "3c509.c:1.16 (2.2) 2/3/98 becker@cesdis.gsfc.nasa.gov.\n";
/* A few values that may be tweaked. */
 
/* Time in jiffies before concluding the transmitter is hung. */
#define TX_TIMEOUT (400*HZ/1000)
/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
static int max_interrupt_work = 10;
 
#include <linux/config.h>
#include <linux/module.h>
 
#include <linux/mca.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/in.h>
#include <linux/malloc.h>
#include <linux/ioport.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/delay.h> /* for udelay() */
 
#include <asm/spinlock.h>
#include <asm/bitops.h>
#include <asm/io.h>
#include <asm/irq.h>
 
//#define EL3_DEBUG debug
#ifdef EL3_DEBUG
int el3_debug = EL3_DEBUG;
#else
int el3_debug = 0;
#endif
 
/* To minimize the size of the driver source I only define operating
constants if they are used several times. You'll need the manual
anyway if you want to understand driver details. */
/* Offsets from base I/O address. */
#define EL3_DATA 0x00
#define EL3_CMD 0x0e
#define EL3_STATUS 0x0e
#define EEPROM_READ 0x80
 
#define EL3_IO_EXTENT 16
 
#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD)
 
/* The top five bits written to EL3_CMD are a command, the lower
11 bits are the parameter, if applicable. */
enum c509cmd {
TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11,
RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11, RxDiscard = 8<<11,
TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11,
FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11,
SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11,
SetTxThreshold = 18<<11, SetTxStart = 19<<11, StatsEnable = 21<<11,
StatsDisable = 22<<11, StopCoax = 23<<11,};
 
enum c509status {
IntLatch = 0x0001, AdapterFailure = 0x0002, TxComplete = 0x0004,
TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020,
IntReq = 0x0040, StatsFull = 0x0080, CmdBusy = 0x1000, };
 
/* The SetRxFilter command accepts the following classes: */
enum RxFilter {
RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 };
 
/* Register window 1 offsets, the window used in normal operation. */
#define TX_FIFO 0x00
#define RX_FIFO 0x00
#define RX_STATUS 0x08
#define TX_STATUS 0x0B
#define TX_FREE 0x0C /* Remaining free bytes in Tx buffer. */
 
#define WN0_IRQ 0x08 /* Window 0: Set IRQ line in bits 12-15. */
#define WN4_MEDIA 0x0A /* Window 4: Various transcvr/media bits. */
#define MEDIA_TP 0x00C0 /* Enable link beat and jabber for 10baseT. */
 
/*
* Must be a power of two (we use a binary and in the
* circular queue)
*/
#define SKB_QUEUE_SIZE 64
 
struct el3_private {
struct enet_statistics stats;
struct device *next_dev;
spinlock_t lock;
/* skb send-queue */
int head, size;
struct sk_buff *queue[SKB_QUEUE_SIZE];
char mca_slot;
};
static int id_port = 0x110; /* Start with 0x110 to avoid new sound cards.*/
static struct device *el3_root_dev = NULL;
 
static ushort id_read_eeprom(int index);
static ushort read_eeprom(int ioaddr, int index);
static int el3_open(struct device *dev);
static int el3_start_xmit(struct sk_buff *skb, struct device *dev);
static void el3_interrupt(int irq, void *dev_id, struct pt_regs *regs);
static void update_stats(struct device *dev);
static struct enet_statistics *el3_get_stats(struct device *dev);
static int el3_rx(struct device *dev);
static int el3_close(struct device *dev);
static void set_multicast_list(struct device *dev);
 
#ifdef CONFIG_MCA
struct el3_mca_adapters_struct {
char* name;
int id;
};
 
struct el3_mca_adapters_struct el3_mca_adapters[] = {
{ "3Com 3c529 EtherLink III (10base2)", 0x627c },
{ "3Com 3c529 EtherLink III (10baseT)", 0x627d },
{ "3Com 3c529 EtherLink III (test mode)", 0x62db },
{ "3Com 3c529 EtherLink III (TP or coax)", 0x62f6 },
{ "3Com 3c529 EtherLink III (TP)", 0x62f7 },
{ NULL, 0 },
};
#endif
 
int el3_probe(struct device *dev)
{
short lrs_state = 0xff, i;
int ioaddr, irq, if_port;
u16 phys_addr[3];
static int current_tag = 0;
int mca_slot = -1;
 
/* First check all slots of the EISA bus. The next slot address to
probe is kept in 'eisa_addr' to support multiple probe() calls. */
if (EISA_bus) {
static int eisa_addr = 0x1000;
while (eisa_addr < 0x9000) {
ioaddr = eisa_addr;
eisa_addr += 0x1000;
 
/* Check the standard EISA ID register for an encoded '3Com'. */
if (inw(ioaddr + 0xC80) != 0x6d50)
continue;
 
/* Change the register set to the configuration window 0. */
outw(SelectWindow | 0, ioaddr + 0xC80 + EL3_CMD);
 
irq = inw(ioaddr + WN0_IRQ) >> 12;
if_port = inw(ioaddr + 6)>>14;
for (i = 0; i < 3; i++)
phys_addr[i] = htons(read_eeprom(ioaddr, i));
 
/* Restore the "Product ID" to the EEPROM read register. */
read_eeprom(ioaddr, 3);
 
/* Was the EISA code an add-on hack? Nahhhhh... */
goto found;
}
}
 
#ifdef CONFIG_MCA
/* Based on Erik Nygren's (nygren@mit.edu) 3c529 patch, heavily
* modified by Chris Beauregard (cpbeaure@csclub.uwaterloo.ca)
* to support standard MCA probing.
*
* redone for multi-card detection by ZP Gu (zpg@castle.net)
* now works as a module
*/
 
if( MCA_bus ) {
int slot, j;
u_char pos4, pos5;
 
for( j = 0; el3_mca_adapters[j].name != NULL; j ++ ) {
slot = 0;
while( slot != MCA_NOTFOUND ) {
slot = mca_find_unused_adapter(
el3_mca_adapters[j].id, slot );
if( slot == MCA_NOTFOUND ) break;
 
/* if we get this far, an adapter has been
* detected and is enabled
*/
 
pos4 = mca_read_stored_pos( slot, 4 );
pos5 = mca_read_stored_pos( slot, 5 );
 
ioaddr = ((short)((pos4&0xfc)|0x02)) << 8;
irq = pos5 & 0x0f;
 
/* probing for a card at a particular IO/IRQ */
if(dev && ((dev->irq >= 1 && dev->irq != irq) ||
(dev->base_addr >= 1 && dev->base_addr != ioaddr))) {
slot++; /* probing next slot */
continue;
}
 
printk("3c509: found %s at slot %d\n",
el3_mca_adapters[j].name, slot + 1 );
 
/* claim the slot */
mca_set_adapter_name(slot, el3_mca_adapters[j].name);
mca_set_adapter_procfn(slot, NULL, NULL);
mca_mark_as_used(slot);
 
if_port = pos4 & 0x03;
if (el3_debug > 2) {
printk("3c529: irq %d ioaddr 0x%x ifport %d\n", irq, ioaddr, if_port);
}
for (i = 0; i < 3; i++) {
phys_addr[i] = htons(read_eeprom(ioaddr, i));
}
mca_slot = slot;
 
goto found;
}
}
/* if we get here, we didn't find an MCA adapter */
return -ENODEV;
}
#endif
/* Reset the ISA PnP mechanism on 3c509b. */
outb(0x02, 0x279); /* Select PnP config control register. */
outb(0x02, 0xA79); /* Return to WaitForKey state. */
/* Select an open I/O location at 0x1*0 to do contention select. */
for ( ; id_port < 0x200; id_port += 0x10) {
if (check_region(id_port, 1))
continue;
outb(0x00, id_port);
outb(0xff, id_port);
if (inb(id_port) & 0x01)
break;
}
if (id_port >= 0x200) {
/* Rare -- do we really need a warning? */
printk(" WARNING: No I/O port available for 3c509 activation.\n");
return -ENODEV;
}
/* Next check for all ISA bus boards by sending the ID sequence to the
ID_PORT. We find cards past the first by setting the 'current_tag'
on cards as they are found. Cards with their tag set will not
respond to subsequent ID sequences. */
 
outb(0x00, id_port);
outb(0x00, id_port);
for(i = 0; i < 255; i++) {
outb(lrs_state, id_port);
lrs_state <<= 1;
lrs_state = lrs_state & 0x100 ? lrs_state ^ 0xcf : lrs_state;
}
 
/* For the first probe, clear all board's tag registers. */
if (current_tag == 0)
outb(0xd0, id_port);
else /* Otherwise kill off already-found boards. */
outb(0xd8, id_port);
 
if (id_read_eeprom(7) != 0x6d50) {
return -ENODEV;
}
 
/* Read in EEPROM data, which does contention-select.
Only the lowest address board will stay "on-line".
3Com got the byte order backwards. */
for (i = 0; i < 3; i++) {
phys_addr[i] = htons(id_read_eeprom(i));
}
 
{
unsigned int iobase = id_read_eeprom(8);
if_port = iobase >> 14;
ioaddr = 0x200 + ((iobase & 0x1f) << 4);
}
irq = id_read_eeprom(9) >> 12;
 
if (dev) { /* Set passed-in IRQ or I/O Addr. */
if (dev->irq > 1 && dev->irq < 16)
irq = dev->irq;
 
if (dev->base_addr) {
if (dev->mem_end == 0x3c509 /* Magic key */
&& dev->base_addr >= 0x200 && dev->base_addr <= 0x3e0)
ioaddr = dev->base_addr & 0x3f0;
else if (dev->base_addr != ioaddr)
return -ENODEV;
}
}
 
/* Set the adaptor tag so that the next card can be found. */
outb(0xd0 + ++current_tag, id_port);
 
/* Activate the adaptor at the EEPROM location. */
outb((ioaddr >> 4) | 0xe0, id_port);
 
EL3WINDOW(0);
if (inw(ioaddr) != 0x6d50)
return -ENODEV;
 
/* Free the interrupt so that some other card can use it. */
outw(0x0f00, ioaddr + WN0_IRQ);
found:
if (dev == NULL) {
dev = init_etherdev(dev, sizeof(struct el3_private));
}
memcpy(dev->dev_addr, phys_addr, sizeof(phys_addr));
dev->base_addr = ioaddr;
dev->irq = irq;
dev->if_port = (dev->mem_start & 0x1f) ? dev->mem_start & 3 : if_port;
 
request_region(dev->base_addr, EL3_IO_EXTENT, "3c509");
 
{
const char *if_names[] = {"10baseT", "AUI", "undefined", "BNC"};
printk("%s: 3c509 at %#3.3lx tag %d, %s port, address ",
dev->name, dev->base_addr, current_tag, if_names[dev->if_port]);
}
 
/* Read in the station address. */
for (i = 0; i < 6; i++)
printk(" %2.2x", dev->dev_addr[i]);
printk(", IRQ %d.\n", dev->irq);
 
/* Make up a EL3-specific-data structure. */
if (dev->priv == NULL)
dev->priv = kmalloc(sizeof(struct el3_private), GFP_KERNEL);
if (dev->priv == NULL)
return -ENOMEM;
memset(dev->priv, 0, sizeof(struct el3_private));
((struct el3_private *)dev->priv)->mca_slot = mca_slot;
((struct el3_private *)dev->priv)->next_dev = el3_root_dev;
el3_root_dev = dev;
 
if (el3_debug > 0)
printk(version);
 
/* The EL3-specific entries in the device structure. */
dev->open = &el3_open;
dev->hard_start_xmit = &el3_start_xmit;
dev->stop = &el3_close;
dev->get_stats = &el3_get_stats;
dev->set_multicast_list = &set_multicast_list;
 
/* Fill in the generic fields of the device structure. */
ether_setup(dev);
return 0;
}
 
/* Read a word from the EEPROM using the regular EEPROM access register.
Assume that we are in register window zero.
*/
static ushort read_eeprom(int ioaddr, int index)
{
outw(EEPROM_READ + index, ioaddr + 10);
/* Pause for at least 162 us. for the read to take place. */
udelay (500);
return inw(ioaddr + 12);
}
 
/* Read a word from the EEPROM when in the ISA ID probe state. */
static ushort id_read_eeprom(int index)
{
int bit, word = 0;
 
/* Issue read command, and pause for at least 162 us. for it to complete.
Assume extra-fast 16Mhz bus. */
outb(EEPROM_READ + index, id_port);
 
/* Pause for at least 162 us. for the read to take place. */
udelay (500);
for (bit = 15; bit >= 0; bit--)
word = (word << 1) + (inb(id_port) & 0x01);
 
if (el3_debug > 3)
printk(" 3c509 EEPROM word %d %#4.4x.\n", index, word);
 
return word;
}
 
 
static int
el3_open(struct device *dev)
{
int ioaddr = dev->base_addr;
int i;
 
outw(TxReset, ioaddr + EL3_CMD);
outw(RxReset, ioaddr + EL3_CMD);
outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
 
/* Set the spinlock before grabbing IRQ! */
((struct el3_private *)dev->priv)->lock = (spinlock_t) SPIN_LOCK_UNLOCKED;
 
if (request_irq(dev->irq, &el3_interrupt, 0, dev->name, dev)) {
return -EAGAIN;
}
 
EL3WINDOW(0);
if (el3_debug > 3)
printk("%s: Opening, IRQ %d status@%x %4.4x.\n", dev->name,
dev->irq, ioaddr + EL3_STATUS, inw(ioaddr + EL3_STATUS));
 
/* Activate board: this is probably unnecessary. */
outw(0x0001, ioaddr + 4);
 
/* Set the IRQ line. */
outw((dev->irq << 12) | 0x0f00, ioaddr + WN0_IRQ);
 
/* Set the station address in window 2 each time opened. */
EL3WINDOW(2);
 
for (i = 0; i < 6; i++)
outb(dev->dev_addr[i], ioaddr + i);
 
if (dev->if_port == 3)
/* Start the thinnet transceiver. We should really wait 50ms...*/
outw(StartCoax, ioaddr + EL3_CMD);
else if (dev->if_port == 0) {
/* 10baseT interface, enabled link beat and jabber check. */
EL3WINDOW(4);
outw(inw(ioaddr + WN4_MEDIA) | MEDIA_TP, ioaddr + WN4_MEDIA);
}
 
/* Switch to the stats window, and clear all stats by reading. */
outw(StatsDisable, ioaddr + EL3_CMD);
EL3WINDOW(6);
for (i = 0; i < 9; i++)
inb(ioaddr + i);
inw(ioaddr + 10);
inw(ioaddr + 12);
 
/* Switch to register set 1 for normal use. */
EL3WINDOW(1);
 
/* Accept b-case and phys addr only. */
outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD);
outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
 
dev->interrupt = 0;
dev->tbusy = 0;
dev->start = 1;
 
outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
/* Allow status bits to be seen. */
outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD);
/* Ack all pending events, and set active indicator mask. */
outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
ioaddr + EL3_CMD);
outw(SetIntrEnb | IntLatch|TxAvailable|TxComplete|RxComplete|StatsFull,
ioaddr + EL3_CMD);
 
if (el3_debug > 3)
printk("%s: Opened 3c509 IRQ %d status %4.4x.\n",
dev->name, dev->irq, inw(ioaddr + EL3_STATUS));
 
MOD_INC_USE_COUNT;
return 0; /* Always succeed */
}
 
static int
el3_start_xmit(struct sk_buff *skb, struct device *dev)
{
struct el3_private *lp = (struct el3_private *)dev->priv;
int ioaddr = dev->base_addr;
 
/* Transmitter timeout, serious problems. */
if (dev->tbusy) {
int tickssofar = jiffies - dev->trans_start;
if (tickssofar < TX_TIMEOUT)
return 1;
printk("%s: transmit timed out, Tx_status %2.2x status %4.4x "
"Tx FIFO room %d.\n",
dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS),
inw(ioaddr + TX_FREE));
lp->stats.tx_errors++;
dev->trans_start = jiffies;
/* Issue TX_RESET and TX_START commands. */
outw(TxReset, ioaddr + EL3_CMD);
outw(TxEnable, ioaddr + EL3_CMD);
dev->tbusy = 0;
}
 
lp->stats.tx_bytes += skb->len;
if (el3_debug > 4) {
printk("%s: el3_start_xmit(length = %u) called, status %4.4x.\n",
dev->name, skb->len, inw(ioaddr + EL3_STATUS));
}
#if 0
#ifndef final_version
{ /* Error-checking code, delete someday. */
ushort status = inw(ioaddr + EL3_STATUS);
if (status & 0x0001 /* IRQ line active, missed one. */
&& inw(ioaddr + EL3_STATUS) & 1) { /* Make sure. */
printk("%s: Missed interrupt, status then %04x now %04x"
" Tx %2.2x Rx %4.4x.\n", dev->name, status,
inw(ioaddr + EL3_STATUS), inb(ioaddr + TX_STATUS),
inw(ioaddr + RX_STATUS));
/* Fake interrupt trigger by masking, acknowledge interrupts. */
outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
ioaddr + EL3_CMD);
outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD);
}
}
#endif
#endif
/* Avoid timer-based retransmission conflicts. */
if (test_and_set_bit(0, (void*)&dev->tbusy) != 0)
printk("%s: Transmitter access conflict.\n", dev->name);
else {
/*
* We lock the driver against other processors. Note
* we don't need to lock versus the IRQ as we suspended
* that. This means that we lose the ability to take
* an RX during a TX upload. That sucks a bit with SMP
* on an original 3c509 (2K buffer)
*
* Using disable_irq stops us crapping on other
* time sensitive devices.
*/
 
#ifdef __SMP__
disable_irq_nosync(dev->irq);
spin_lock(&lp->lock);
#endif
/* Put out the doubleword header... */
outw(skb->len, ioaddr + TX_FIFO);
outw(0x00, ioaddr + TX_FIFO);
/* ... and the packet rounded to a doubleword. */
#ifdef __powerpc__
outsl_unswapped(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
#else
outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
#endif
 
dev->trans_start = jiffies;
if (inw(ioaddr + TX_FREE) > 1536) {
dev->tbusy = 0;
} else
/* Interrupt us when the FIFO has room for max-sized packet. */
outw(SetTxThreshold + 1536, ioaddr + EL3_CMD);
#ifdef __SMP__
spin_unlock(&lp->lock);
enable_irq(dev->irq);
#endif
}
 
dev_kfree_skb (skb);
 
/* Clear the Tx status stack. */
{
short tx_status;
int i = 4;
 
while (--i > 0 && (tx_status = inb(ioaddr + TX_STATUS)) > 0) {
if (tx_status & 0x38) lp->stats.tx_aborted_errors++;
if (tx_status & 0x30) outw(TxReset, ioaddr + EL3_CMD);
if (tx_status & 0x3C) outw(TxEnable, ioaddr + EL3_CMD);
outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
}
}
return 0;
}
 
/* The EL3 interrupt handler. */
static void
el3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
struct device *dev = (struct device *)dev_id;
struct el3_private *lp;
int ioaddr, status;
int i = max_interrupt_work;
 
if (dev == NULL) {
printk ("el3_interrupt(): irq %d for unknown device.\n", irq);
return;
}
 
lp = (struct el3_private *)dev->priv;
spin_lock(&lp->lock);
 
if (dev->interrupt)
printk("%s: Re-entering the interrupt handler.\n", dev->name);
dev->interrupt = 1;
 
ioaddr = dev->base_addr;
 
if (el3_debug > 4) {
status = inw(ioaddr + EL3_STATUS);
printk("%s: interrupt, status %4.4x.\n", dev->name, status);
}
 
while ((status = inw(ioaddr + EL3_STATUS)) &
(IntLatch | RxComplete | StatsFull)) {
 
if (status & RxComplete)
el3_rx(dev);
 
if (status & TxAvailable) {
if (el3_debug > 5)
printk(" TX room bit was handled.\n");
/* There's room in the FIFO for a full-sized packet. */
outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
dev->tbusy = 0;
mark_bh(NET_BH);
}
if (status & (AdapterFailure | RxEarly | StatsFull | TxComplete)) {
/* Handle all uncommon interrupts. */
if (status & StatsFull) /* Empty statistics. */
update_stats(dev);
if (status & RxEarly) { /* Rx early is unused. */
el3_rx(dev);
outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
}
if (status & TxComplete) { /* Really Tx error. */
struct el3_private *lp = (struct el3_private *)dev->priv;
short tx_status;
int i = 4;
 
while (--i>0 && (tx_status = inb(ioaddr + TX_STATUS)) > 0) {
if (tx_status & 0x38) lp->stats.tx_aborted_errors++;
if (tx_status & 0x30) outw(TxReset, ioaddr + EL3_CMD);
if (tx_status & 0x3C) outw(TxEnable, ioaddr + EL3_CMD);
outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
}
}
if (status & AdapterFailure) {
/* Adapter failure requires Rx reset and reinit. */
outw(RxReset, ioaddr + EL3_CMD);
/* Set the Rx filter to the current state. */
outw(SetRxFilter | RxStation | RxBroadcast
| (dev->flags & IFF_ALLMULTI ? RxMulticast : 0)
| (dev->flags & IFF_PROMISC ? RxProm : 0),
ioaddr + EL3_CMD);
outw(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */
outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD);
}
}
 
if (--i < 0) {
printk("%s: Infinite loop in interrupt, status %4.4x.\n",
dev->name, status);
/* Clear all interrupts. */
outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
break;
}
/* Acknowledge the IRQ. */
outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); /* Ack IRQ */
}
 
if (el3_debug > 4) {
printk("%s: exiting interrupt, status %4.4x.\n", dev->name,
inw(ioaddr + EL3_STATUS));
}
spin_unlock(&lp->lock);
dev->interrupt = 0;
return;
}
 
 
static struct enet_statistics *
el3_get_stats(struct device *dev)
{
struct el3_private *lp = (struct el3_private *)dev->priv;
unsigned long flags;
 
/*
* This is fast enough not to bother with disable IRQ
* stuff.
*/
spin_lock_irqsave(&lp->lock, flags);
update_stats(dev);
spin_unlock_irqrestore(&lp->lock, flags);
return &lp->stats;
}
 
/* Update statistics. We change to register window 6, so this should be run
single-threaded if the device is active. This is expected to be a rare
operation, and it's simpler for the rest of the driver to assume that
window 1 is always valid rather than use a special window-state variable.
*/
static void update_stats(struct device *dev)
{
struct el3_private *lp = (struct el3_private *)dev->priv;
int ioaddr = dev->base_addr;
 
if (el3_debug > 5)
printk(" Updating the statistics.\n");
/* Turn off statistics updates while reading. */
outw(StatsDisable, ioaddr + EL3_CMD);
/* Switch to the stats window, and read everything. */
EL3WINDOW(6);
lp->stats.tx_carrier_errors += inb(ioaddr + 0);
lp->stats.tx_heartbeat_errors += inb(ioaddr + 1);
/* Multiple collisions. */ inb(ioaddr + 2);
lp->stats.collisions += inb(ioaddr + 3);
lp->stats.tx_window_errors += inb(ioaddr + 4);
lp->stats.rx_fifo_errors += inb(ioaddr + 5);
lp->stats.tx_packets += inb(ioaddr + 6);
/* Rx packets */ inb(ioaddr + 7);
/* Tx deferrals */ inb(ioaddr + 8);
inw(ioaddr + 10); /* Total Rx and Tx octets. */
inw(ioaddr + 12);
 
/* Back to window 1, and turn statistics back on. */
EL3WINDOW(1);
outw(StatsEnable, ioaddr + EL3_CMD);
return;
}
 
static int
el3_rx(struct device *dev)
{
struct el3_private *lp = (struct el3_private *)dev->priv;
int ioaddr = dev->base_addr;
short rx_status;
 
if (el3_debug > 5)
printk(" In rx_packet(), status %4.4x, rx_status %4.4x.\n",
inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS));
while ((rx_status = inw(ioaddr + RX_STATUS)) > 0) {
if (rx_status & 0x4000) { /* Error, update stats. */
short error = rx_status & 0x3800;
 
outw(RxDiscard, ioaddr + EL3_CMD);
lp->stats.rx_errors++;
switch (error) {
case 0x0000: lp->stats.rx_over_errors++; break;
case 0x0800: lp->stats.rx_length_errors++; break;
case 0x1000: lp->stats.rx_frame_errors++; break;
case 0x1800: lp->stats.rx_length_errors++; break;
case 0x2000: lp->stats.rx_frame_errors++; break;
case 0x2800: lp->stats.rx_crc_errors++; break;
}
} else {
short pkt_len = rx_status & 0x7ff;
struct sk_buff *skb;
 
skb = dev_alloc_skb(pkt_len+5);
lp->stats.rx_bytes += pkt_len;
if (el3_debug > 4)
printk("Receiving packet size %d status %4.4x.\n",
pkt_len, rx_status);
if (skb != NULL) {
skb->dev = dev;
skb_reserve(skb, 2); /* Align IP on 16 byte */
 
/* 'skb->data' points to the start of sk_buff data area. */
#ifdef __powerpc__
insl_unswapped(ioaddr+RX_FIFO, skb_put(skb,pkt_len),
(pkt_len + 3) >> 2);
#else
insl(ioaddr + RX_FIFO, skb_put(skb,pkt_len),
(pkt_len + 3) >> 2);
#endif
 
outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */
skb->protocol = eth_type_trans(skb,dev);
netif_rx(skb);
lp->stats.rx_packets++;
continue;
}
outw(RxDiscard, ioaddr + EL3_CMD);
lp->stats.rx_dropped++;
if (el3_debug)
printk("%s: Couldn't allocate a sk_buff of size %d.\n",
dev->name, pkt_len);
}
inw(ioaddr + EL3_STATUS); /* Delay. */
while (inw(ioaddr + EL3_STATUS) & 0x1000)
printk(KERN_DEBUG " Waiting for 3c509 to discard packet, status %x.\n",
inw(ioaddr + EL3_STATUS) );
}
 
return 0;
}
 
/*
* Set or clear the multicast filter for this adaptor.
*/
static void
set_multicast_list(struct device *dev)
{
unsigned long flags;
struct el3_private *lp = (struct el3_private *)dev->priv;
int ioaddr = dev->base_addr;
 
if (el3_debug > 1) {
static int old = 0;
if (old != dev->mc_count) {
old = dev->mc_count;
printk("%s: Setting Rx mode to %d addresses.\n", dev->name, dev->mc_count);
}
}
spin_lock_irqsave(&lp->lock, flags);
if (dev->flags&IFF_PROMISC) {
outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm,
ioaddr + EL3_CMD);
}
else if (dev->mc_count || (dev->flags&IFF_ALLMULTI)) {
outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast, ioaddr + EL3_CMD);
}
else
outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD);
spin_unlock_irqrestore(&lp->lock, flags);
}
 
static int
el3_close(struct device *dev)
{
int ioaddr = dev->base_addr;
 
if (el3_debug > 2)
printk("%s: Shutting down ethercard.\n", dev->name);
 
dev->tbusy = 1;
dev->start = 0;
 
/* Turn off statistics ASAP. We update lp->stats below. */
outw(StatsDisable, ioaddr + EL3_CMD);
 
/* Disable the receiver and transmitter. */
outw(RxDisable, ioaddr + EL3_CMD);
outw(TxDisable, ioaddr + EL3_CMD);
 
if (dev->if_port == 3)
/* Turn off thinnet power. Green! */
outw(StopCoax, ioaddr + EL3_CMD);
else if (dev->if_port == 0) {
/* Disable link beat and jabber, if_port may change ere next open(). */
EL3WINDOW(4);
outw(inw(ioaddr + WN4_MEDIA) & ~MEDIA_TP, ioaddr + WN4_MEDIA);
}
 
free_irq(dev->irq, dev);
/* Switching back to window 0 disables the IRQ. */
EL3WINDOW(0);
/* But we explicitly zero the IRQ line select anyway. */
outw(0x0f00, ioaddr + WN0_IRQ);
 
update_stats(dev);
MOD_DEC_USE_COUNT;
return 0;
}
 
#ifdef MODULE
/* Parameters that may be passed into the module. */
static int debug = -1;
static int irq[] = {-1, -1, -1, -1, -1, -1, -1, -1};
static int xcvr[] = {-1, -1, -1, -1, -1, -1, -1, -1};
 
MODULE_PARM(debug,"i");
MODULE_PARM(irq,"1-8i");
MODULE_PARM(xcvr,"1-8i");
 
int
init_module(void)
{
int el3_cards = 0;
 
if (debug >= 0)
el3_debug = debug;
 
el3_root_dev = NULL;
while (el3_probe(0) == 0) {
if (irq[el3_cards] > 1)
el3_root_dev->irq = irq[el3_cards];
if (xcvr[el3_cards] >= 0)
el3_root_dev->if_port = xcvr[el3_cards];
el3_cards++;
}
 
return el3_cards ? 0 : -ENODEV;
}
 
void
cleanup_module(void)
{
struct device *next_dev;
 
/* No need to check MOD_IN_USE, as sys_delete_module() checks. */
while (el3_root_dev) {
struct el3_private *lp = (struct el3_private *)el3_root_dev->priv;
#ifdef CONFIG_MCA
if(lp->mca_slot!=-1)
mca_mark_as_unused(lp->mca_slot);
#endif
next_dev = lp->next_dev;
unregister_netdev(el3_root_dev);
release_region(el3_root_dev->base_addr, EL3_IO_EXTENT);
kfree(el3_root_dev);
el3_root_dev = next_dev;
}
}
#endif /* MODULE */
 
/*
* Local variables:
* compile-command: "gcc -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c 3c509.c"
* version-control: t
* kept-new-versions: 5
* tab-width: 4
* End:
*/
/shark/trunk/drivers/net/8390.c
0,0 → 1,1141
/* 8390.c: A general NS8390 ethernet driver core for linux. */
/*
Written 1992-94 by Donald Becker.
Copyright 1993 United States Government as represented by the
Director, National Security Agency.
 
This software may be used and distributed according to the terms
of the GNU Public License, incorporated herein by reference.
 
The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
Center of Excellence in Space Data and Information Sciences
Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
This is the chip-specific code for many 8390-based ethernet adaptors.
This is not a complete driver, it must be combined with board-specific
code such as ne.c, wd.c, 3c503.c, etc.
 
Seeing how at least eight drivers use this code, (not counting the
PCMCIA ones either) it is easy to break some card by what seems like
a simple innocent change. Please contact me or Donald if you think
you have found something that needs changing. -- PG
 
 
Changelog:
 
Paul Gortmaker : remove set_bit lock, other cleanups.
Paul Gortmaker : add ei_get_8390_hdr() so we can pass skb's to
ei_block_input() for eth_io_copy_and_sum().
Paul Gortmaker : exchange static int ei_pingpong for a #define,
also add better Tx error handling.
Paul Gortmaker : rewrite Rx overrun handling as per NS specs.
Alexey Kuznetsov : use the 8390's six bit hash multicast filter.
Paul Gortmaker : tweak ANK's above multicast changes a bit.
Paul Gortmaker : update packet statistics for v2.1.x
Alan Cox : support arbitary stupid port mappings on the
68K Macintosh. Support >16bit I/O spaces
Paul Gortmaker : add kmod support for auto-loading of the 8390
module by all drivers that require it.
Alan Cox : Spinlocking work, added 'BUG_83C690'
 
Sources:
The National Semiconductor LAN Databook, and the 3Com 3c503 databook.
 
*/
 
static const char *version =
"8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
 
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/string.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/bitops.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/fcntl.h>
#include <linux/in.h>
#include <linux/interrupt.h>
#include <linux/init.h>
 
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
 
#define NS8390_CORE
#include "8390.h"
 
#define BUG_83C690
 
/* These are the operational function interfaces to board-specific
routines.
void reset_8390(struct device *dev)
Resets the board associated with DEV, including a hardware reset of
the 8390. This is only called when there is a transmit timeout, and
it is always followed by 8390_init().
void block_output(struct device *dev, int count, const unsigned char *buf,
int start_page)
Write the COUNT bytes of BUF to the packet buffer at START_PAGE. The
"page" value uses the 8390's 256-byte pages.
void get_8390_hdr(struct device *dev, struct e8390_hdr *hdr, int ring_page)
Read the 4 byte, page aligned 8390 header. *If* there is a
subsequent read, it will be of the rest of the packet.
void block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset)
Read COUNT bytes from the packet buffer into the skb data area. Start
reading from RING_OFFSET, the address as the 8390 sees it. This will always
follow the read of the 8390 header.
*/
#define ei_reset_8390 (ei_local->reset_8390)
#define ei_block_output (ei_local->block_output)
#define ei_block_input (ei_local->block_input)
#define ei_get_8390_hdr (ei_local->get_8390_hdr)
 
/* use 0 for production, 1 for verification, >2 for debug */
//#define VERBOSE_ERROR_DUMP
#ifndef ei_debug
int ei_debug = 0;
#endif
 
/* Index to functions. */
static void ei_tx_intr(struct device *dev);
static void ei_tx_err(struct device *dev);
static void ei_receive(struct device *dev);
static void ei_rx_overrun(struct device *dev);
 
/* Routines generic to NS8390-based boards. */
static void NS8390_trigger_send(struct device *dev, unsigned int length,
int start_page);
static void set_multicast_list(struct device *dev);
static void do_set_multicast_list(struct device *dev);
 
/*
* SMP and the 8390 setup.
*
* The 8390 isnt exactly designed to be multithreaded on RX/TX. There is
* a page register that controls bank and packet buffer access. We guard
* this with ei_local->page_lock. Nobody should assume or set the page other
* than zero when the lock is not held. Lock holders must restore page 0
* before unlocking. Even pure readers must take the lock to protect in
* page 0.
*
* To make life difficult the chip can also be very slow. We therefore can't
* just use spinlocks. For the longer lockups we disable the irq the device
* sits on and hold the lock. We must hold the lock because there is a dual
* processor case other than interrupts (get stats/set multicast list in
* parallel with each other and transmit).
*
* Note: in theory we can just disable the irq on the card _but_ there is
* a latency on SMP irq delivery. So we can easily go "disable irq" "sync irqs"
* enter lock, take the queued irq. So we waddle instead of flying.
*
* Finally by special arrangement for the purpose of being generally
* annoying the transmit function is called bh atomic. That places
* restrictions on the user context callers as disable_irq won't save
* them.
*/
 
/* Open/initialize the board. This routine goes all-out, setting everything
up anew at each open, even though many of these registers should only
need to be set once at boot.
*/
int ei_open(struct device *dev)
{
unsigned long flags;
struct ei_device *ei_local = (struct ei_device *) dev->priv;
 
/* This can't happen unless somebody forgot to call ethdev_init(). */
if (ei_local == NULL)
{
printk(KERN_EMERG "%s: ei_open passed a non-existent device!\n", dev->name);
return -ENXIO;
}
/*
* Grab the page lock so we own the register set, then call
* the init function.
*/
spin_lock_irqsave(&ei_local->page_lock, flags);
NS8390_init(dev, 1);
/* Set the flag before we drop the lock, That way the IRQ arrives
after its set and we get no silly warnings */
dev->start = 1;
spin_unlock_irqrestore(&ei_local->page_lock, flags);
ei_local->irqlock = 0;
return 0;
}
 
/* Opposite of above. Only used when "ifconfig <devname> down" is done. */
int ei_close(struct device *dev)
{
struct ei_device *ei_local = (struct ei_device *) dev->priv;
unsigned long flags;
 
/*
* Hold the page lock during close
*/
spin_lock_irqsave(&ei_local->page_lock, flags);
NS8390_init(dev, 0);
spin_unlock_irqrestore(&ei_local->page_lock, flags);
dev->start = 0;
return 0;
}
 
static int ei_start_xmit(struct sk_buff *skb, struct device *dev)
{
long e8390_base = dev->base_addr;
struct ei_device *ei_local = (struct ei_device *) dev->priv;
int length, send_length, output_page;
unsigned long flags;
 
/*
* We normally shouldn't be called if dev->tbusy is set, but the
* existing code does anyway. If it has been too long since the
* last Tx, we assume the board has died and kick it. We are
* bh_atomic here.
*/
if (dev->tbusy)
{ /* Do timeouts, just like the 8003 driver. */
int txsr;
int isr;
int tickssofar = jiffies - dev->trans_start;
 
/*
* Need the page lock. Now see what went wrong. This bit is
* fast.
*/
spin_lock_irqsave(&ei_local->page_lock, flags);
txsr = inb(e8390_base+EN0_TSR);
if (tickssofar < TX_TIMEOUT || (tickssofar < (TX_TIMEOUT+5) && ! (txsr & ENTSR_PTX)))
{
spin_unlock_irqrestore(&ei_local->page_lock, flags);
return 1;
}
 
ei_local->stat.tx_errors++;
isr = inb(e8390_base+EN0_ISR);
if (dev->start == 0)
{
spin_unlock_irqrestore(&ei_local->page_lock, flags);
printk(KERN_WARNING "%s: xmit on stopped card\n", dev->name);
return 1;
}
 
/*
* Note that if the Tx posted a TX_ERR interrupt, then the
* error will have been handled from the interrupt handler
* and not here. Error statistics are handled there as well.
*/
 
printk(KERN_DEBUG "%s: Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n",
dev->name, (txsr & ENTSR_ABT) ? "excess collisions." :
(isr) ? "lost interrupt?" : "cable problem?", txsr, isr, tickssofar);
 
if (!isr && !ei_local->stat.tx_packets)
{
/* The 8390 probably hasn't gotten on the cable yet. */
ei_local->interface_num ^= 1; /* Try a different xcvr. */
}
 
/*
* Play shuffle the locks, a reset on some chips takes a few
* mS. We very rarely hit this point.
*/
spin_unlock_irqrestore(&ei_local->page_lock, flags);
 
/* Ugly but a reset can be slow, yet must be protected */
disable_irq_nosync(dev->irq);
spin_lock(&ei_local->page_lock);
/* Try to restart the card. Perhaps the user has fixed something. */
ei_reset_8390(dev);
NS8390_init(dev, 1);
spin_unlock(&ei_local->page_lock);
enable_irq(dev->irq);
dev->trans_start = jiffies;
}
length = skb->len;
 
/* Mask interrupts from the ethercard.
SMP: We have to grab the lock here otherwise the IRQ handler
on another CPU can flip window and race the IRQ mask set. We end
up trashing the mcast filter not disabling irqs if we dont lock */
spin_lock_irqsave(&ei_local->page_lock, flags);
outb_p(0x00, e8390_base + EN0_IMR);
spin_unlock_irqrestore(&ei_local->page_lock, flags);
/*
* Slow phase with lock held.
*/
disable_irq_nosync(dev->irq);
spin_lock(&ei_local->page_lock);
if (dev->interrupt)
{
printk(KERN_WARNING "%s: Tx request while isr active.\n",dev->name);
outb_p(ENISR_ALL, e8390_base + EN0_IMR);
spin_unlock(&ei_local->page_lock);
enable_irq(dev->irq);
ei_local->stat.tx_errors++;
dev_kfree_skb(skb);
return 0;
}
ei_local->irqlock = 1;
 
send_length = ETH_ZLEN < length ? length : ETH_ZLEN;
#ifdef EI_PINGPONG
 
/*
* We have two Tx slots available for use. Find the first free
* slot, and then perform some sanity checks. With two Tx bufs,
* you get very close to transmitting back-to-back packets. With
* only one Tx buf, the transmitter sits idle while you reload the
* card, leaving a substantial gap between each transmitted packet.
*/
 
if (ei_local->tx1 == 0)
{
output_page = ei_local->tx_start_page;
ei_local->tx1 = send_length;
if (ei_debug && ei_local->tx2 > 0)
printk(KERN_DEBUG "%s: idle transmitter tx2=%d, lasttx=%d, txing=%d.\n",
dev->name, ei_local->tx2, ei_local->lasttx, ei_local->txing);
}
else if (ei_local->tx2 == 0)
{
output_page = ei_local->tx_start_page + TX_1X_PAGES;
ei_local->tx2 = send_length;
if (ei_debug && ei_local->tx1 > 0)
printk(KERN_DEBUG "%s: idle transmitter, tx1=%d, lasttx=%d, txing=%d.\n",
dev->name, ei_local->tx1, ei_local->lasttx, ei_local->txing);
}
else
{ /* We should never get here. */
if (ei_debug)
printk(KERN_DEBUG "%s: No Tx buffers free! irq=%ld tx1=%d tx2=%d last=%d\n",
dev->name, dev->interrupt, ei_local->tx1, ei_local->tx2, ei_local->lasttx);
ei_local->irqlock = 0;
dev->tbusy = 1;
outb_p(ENISR_ALL, e8390_base + EN0_IMR);
spin_unlock(&ei_local->page_lock);
enable_irq(dev->irq);
ei_local->stat.tx_errors++;
return 1;
}
 
/*
* Okay, now upload the packet and trigger a send if the transmitter
* isn't already sending. If it is busy, the interrupt handler will
* trigger the send later, upon receiving a Tx done interrupt.
*/
 
ei_block_output(dev, length, skb->data, output_page);
if (! ei_local->txing)
{
ei_local->txing = 1;
NS8390_trigger_send(dev, send_length, output_page);
dev->trans_start = jiffies;
if (output_page == ei_local->tx_start_page)
{
ei_local->tx1 = -1;
ei_local->lasttx = -1;
}
else
{
ei_local->tx2 = -1;
ei_local->lasttx = -2;
}
}
else ei_local->txqueue++;
 
dev->tbusy = (ei_local->tx1 && ei_local->tx2);
 
#else /* EI_PINGPONG */
 
/*
* Only one Tx buffer in use. You need two Tx bufs to come close to
* back-to-back transmits. Expect a 20 -> 25% performance hit on
* reasonable hardware if you only use one Tx buffer.
*/
 
ei_block_output(dev, length, skb->data, ei_local->tx_start_page);
ei_local->txing = 1;
NS8390_trigger_send(dev, send_length, ei_local->tx_start_page);
dev->trans_start = jiffies;
dev->tbusy = 1;
 
#endif /* EI_PINGPONG */
 
/* Turn 8390 interrupts back on. */
ei_local->irqlock = 0;
outb_p(ENISR_ALL, e8390_base + EN0_IMR);
spin_unlock(&ei_local->page_lock);
enable_irq(dev->irq);
 
dev_kfree_skb (skb);
ei_local->stat.tx_bytes += send_length;
return 0;
}
/* The typical workload of the driver:
Handle the ether interface interrupts. */
 
void ei_interrupt(int irq, void *dev_id, struct pt_regs * regs)
{
struct device *dev = dev_id;
long e8390_base;
int interrupts, nr_serviced = 0;
struct ei_device *ei_local;
if (dev == NULL)
{
printk ("net_interrupt(): irq %d for unknown device.\n", irq);
return;
}
e8390_base = dev->base_addr;
ei_local = (struct ei_device *) dev->priv;
 
/*
* Protect the irq test too.
*/
spin_lock(&ei_local->page_lock);
 
if (dev->interrupt || ei_local->irqlock)
{
#if 1 /* This might just be an interrupt for a PCI device sharing this line */
/* The "irqlock" check is only for testing. */
printk(ei_local->irqlock
? "%s: Interrupted while interrupts are masked! isr=%#2x imr=%#2x.\n"
: "%s: Reentering the interrupt handler! isr=%#2x imr=%#2x.\n",
dev->name, inb_p(e8390_base + EN0_ISR),
inb_p(e8390_base + EN0_IMR));
#endif
spin_unlock(&ei_local->page_lock);
return;
}
dev->interrupt = 1;
/* Change to page 0 and read the intr status reg. */
outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
if (ei_debug > 3)
printk(KERN_DEBUG "%s: interrupt(isr=%#2.2x).\n", dev->name,
inb_p(e8390_base + EN0_ISR));
/* !!Assumption!! -- we stay in page 0. Don't break this. */
while ((interrupts = inb_p(e8390_base + EN0_ISR)) != 0
&& ++nr_serviced < MAX_SERVICE)
{
if (dev->start == 0)
{
printk(KERN_WARNING "%s: interrupt from stopped card\n", dev->name);
interrupts = 0;
break;
}
if (interrupts & ENISR_OVER)
ei_rx_overrun(dev);
else if (interrupts & (ENISR_RX+ENISR_RX_ERR))
{
/* Got a good (?) packet. */
ei_receive(dev);
}
/* Push the next to-transmit packet through. */
if (interrupts & ENISR_TX)
ei_tx_intr(dev);
else if (interrupts & ENISR_TX_ERR)
ei_tx_err(dev);
 
if (interrupts & ENISR_COUNTERS)
{
ei_local->stat.rx_frame_errors += inb_p(e8390_base + EN0_COUNTER0);
ei_local->stat.rx_crc_errors += inb_p(e8390_base + EN0_COUNTER1);
ei_local->stat.rx_missed_errors+= inb_p(e8390_base + EN0_COUNTER2);
outb_p(ENISR_COUNTERS, e8390_base + EN0_ISR); /* Ack intr. */
}
/* Ignore any RDC interrupts that make it back to here. */
if (interrupts & ENISR_RDC)
{
outb_p(ENISR_RDC, e8390_base + EN0_ISR);
}
 
outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
}
if (interrupts && ei_debug)
{
outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
if (nr_serviced >= MAX_SERVICE)
{
printk(KERN_WARNING "%s: Too much work at interrupt, status %#2.2x\n",
dev->name, interrupts);
outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */
} else {
printk(KERN_WARNING "%s: unknown interrupt %#2x\n", dev->name, interrupts);
outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */
}
}
dev->interrupt = 0;
spin_unlock(&ei_local->page_lock);
return;
}
 
/*
* A transmitter error has happened. Most likely excess collisions (which
* is a fairly normal condition). If the error is one where the Tx will
* have been aborted, we try and send another one right away, instead of
* letting the failed packet sit and collect dust in the Tx buffer. This
* is a much better solution as it avoids kernel based Tx timeouts, and
* an unnecessary card reset.
*
* Called with lock held
*/
 
static void ei_tx_err(struct device *dev)
{
long e8390_base = dev->base_addr;
struct ei_device *ei_local = (struct ei_device *) dev->priv;
unsigned char txsr = inb_p(e8390_base+EN0_TSR);
unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);
 
#ifdef VERBOSE_ERROR_DUMP
printk(KERN_DEBUG "%s: transmitter error (%#2x): ", dev->name, txsr);
if (txsr & ENTSR_ABT)
printk("excess-collisions ");
if (txsr & ENTSR_ND)
printk("non-deferral ");
if (txsr & ENTSR_CRS)
printk("lost-carrier ");
if (txsr & ENTSR_FU)
printk("FIFO-underrun ");
if (txsr & ENTSR_CDH)
printk("lost-heartbeat ");
printk("\n");
#endif
 
outb_p(ENISR_TX_ERR, e8390_base + EN0_ISR); /* Ack intr. */
 
if (tx_was_aborted)
ei_tx_intr(dev);
else
{
ei_local->stat.tx_errors++;
if (txsr & ENTSR_CRS) ei_local->stat.tx_carrier_errors++;
if (txsr & ENTSR_CDH) ei_local->stat.tx_heartbeat_errors++;
if (txsr & ENTSR_OWC) ei_local->stat.tx_window_errors++;
}
}
 
/* We have finished a transmit: check for errors and then trigger the next
packet to be sent. Called with lock held */
 
static void ei_tx_intr(struct device *dev)
{
long e8390_base = dev->base_addr;
struct ei_device *ei_local = (struct ei_device *) dev->priv;
int status = inb(e8390_base + EN0_TSR);
outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */
 
#ifdef EI_PINGPONG
 
/*
* There are two Tx buffers, see which one finished, and trigger
* the send of another one if it exists.
*/
ei_local->txqueue--;
 
if (ei_local->tx1 < 0)
{
if (ei_local->lasttx != 1 && ei_local->lasttx != -1)
printk(KERN_ERR "%s: bogus last_tx_buffer %d, tx1=%d.\n",
ei_local->name, ei_local->lasttx, ei_local->tx1);
ei_local->tx1 = 0;
dev->tbusy = 0;
if (ei_local->tx2 > 0)
{
ei_local->txing = 1;
NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
dev->trans_start = jiffies;
ei_local->tx2 = -1,
ei_local->lasttx = 2;
}
else ei_local->lasttx = 20, ei_local->txing = 0;
}
else if (ei_local->tx2 < 0)
{
if (ei_local->lasttx != 2 && ei_local->lasttx != -2)
printk("%s: bogus last_tx_buffer %d, tx2=%d.\n",
ei_local->name, ei_local->lasttx, ei_local->tx2);
ei_local->tx2 = 0;
dev->tbusy = 0;
if (ei_local->tx1 > 0)
{
ei_local->txing = 1;
NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
dev->trans_start = jiffies;
ei_local->tx1 = -1;
ei_local->lasttx = 1;
}
else
ei_local->lasttx = 10, ei_local->txing = 0;
}
else printk(KERN_WARNING "%s: unexpected TX-done interrupt, lasttx=%d.\n",
dev->name, ei_local->lasttx);
 
#else /* EI_PINGPONG */
/*
* Single Tx buffer: mark it free so another packet can be loaded.
*/
ei_local->txing = 0;
dev->tbusy = 0;
#endif
 
/* Minimize Tx latency: update the statistics after we restart TXing. */
if (status & ENTSR_COL)
ei_local->stat.collisions++;
if (status & ENTSR_PTX)
ei_local->stat.tx_packets++;
else
{
ei_local->stat.tx_errors++;
if (status & ENTSR_ABT)
{
ei_local->stat.tx_aborted_errors++;
ei_local->stat.collisions += 16;
}
if (status & ENTSR_CRS)
ei_local->stat.tx_carrier_errors++;
if (status & ENTSR_FU)
ei_local->stat.tx_fifo_errors++;
if (status & ENTSR_CDH)
ei_local->stat.tx_heartbeat_errors++;
if (status & ENTSR_OWC)
ei_local->stat.tx_window_errors++;
}
mark_bh (NET_BH);
}
 
/* We have a good packet(s), get it/them out of the buffers.
Called with lock held */
 
static void ei_receive(struct device *dev)
{
long e8390_base = dev->base_addr;
struct ei_device *ei_local = (struct ei_device *) dev->priv;
unsigned char rxing_page, this_frame, next_frame;
unsigned short current_offset;
int rx_pkt_count = 0;
struct e8390_pkt_hdr rx_frame;
int num_rx_pages = ei_local->stop_page-ei_local->rx_start_page;
while (++rx_pkt_count < 10)
{
int pkt_len, pkt_stat;
/* Get the rx page (incoming packet pointer). */
outb_p(E8390_NODMA+E8390_PAGE1, e8390_base + E8390_CMD);
rxing_page = inb_p(e8390_base + EN1_CURPAG);
outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
/* Remove one frame from the ring. Boundary is always a page behind. */
this_frame = inb_p(e8390_base + EN0_BOUNDARY) + 1;
if (this_frame >= ei_local->stop_page)
this_frame = ei_local->rx_start_page;
/* Someday we'll omit the previous, iff we never get this message.
(There is at least one clone claimed to have a problem.) */
if (ei_debug > 0 && this_frame != ei_local->current_page)
printk(KERN_ERR "%s: mismatched read page pointers %2x vs %2x.\n",
dev->name, this_frame, ei_local->current_page);
if (this_frame == rxing_page) /* Read all the frames? */
break; /* Done for now */
current_offset = this_frame << 8;
ei_get_8390_hdr(dev, &rx_frame, this_frame);
pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr);
pkt_stat = rx_frame.status;
next_frame = this_frame + 1 + ((pkt_len+4)>>8);
/* Check for bogosity warned by 3c503 book: the status byte is never
written. This happened a lot during testing! This code should be
cleaned up someday. */
if (rx_frame.next != next_frame
&& rx_frame.next != next_frame + 1
&& rx_frame.next != next_frame - num_rx_pages
&& rx_frame.next != next_frame + 1 - num_rx_pages) {
ei_local->current_page = rxing_page;
outb(ei_local->current_page-1, e8390_base+EN0_BOUNDARY);
ei_local->stat.rx_errors++;
continue;
}
 
if (pkt_len < 60 || pkt_len > 1518)
{
if (ei_debug)
printk(KERN_DEBUG "%s: bogus packet size: %d, status=%#2x nxpg=%#2x.\n",
dev->name, rx_frame.count, rx_frame.status,
rx_frame.next);
ei_local->stat.rx_errors++;
ei_local->stat.rx_length_errors++;
}
else if ((pkt_stat & 0x0F) == ENRSR_RXOK)
{
struct sk_buff *skb;
skb = dev_alloc_skb(pkt_len+2);
if (skb == NULL)
{
if (ei_debug > 1)
printk(KERN_DEBUG "%s: Couldn't allocate a sk_buff of size %d.\n",
dev->name, pkt_len);
ei_local->stat.rx_dropped++;
break;
}
else
{
skb_reserve(skb,2); /* IP headers on 16 byte boundaries */
skb->dev = dev;
skb_put(skb, pkt_len); /* Make room */
ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame));
skb->protocol=eth_type_trans(skb,dev);
netif_rx(skb);
ei_local->stat.rx_packets++;
ei_local->stat.rx_bytes += pkt_len;
if (pkt_stat & ENRSR_PHY)
ei_local->stat.multicast++;
}
}
else
{
if (ei_debug)
printk(KERN_DEBUG "%s: bogus packet: status=%#2x nxpg=%#2x size=%d\n",
dev->name, rx_frame.status, rx_frame.next,
rx_frame.count);
ei_local->stat.rx_errors++;
/* NB: The NIC counts CRC, frame and missed errors. */
if (pkt_stat & ENRSR_FO)
ei_local->stat.rx_fifo_errors++;
}
next_frame = rx_frame.next;
/* This _should_ never happen: it's here for avoiding bad clones. */
if (next_frame >= ei_local->stop_page) {
printk("%s: next frame inconsistency, %#2x\n", dev->name,
next_frame);
next_frame = ei_local->rx_start_page;
}
ei_local->current_page = next_frame;
outb_p(next_frame-1, e8390_base+EN0_BOUNDARY);
}
 
/* We used to also ack ENISR_OVER here, but that would sometimes mask
a real overrun, leaving the 8390 in a stopped state with rec'vr off. */
outb_p(ENISR_RX+ENISR_RX_ERR, e8390_base+EN0_ISR);
return;
}
 
/*
* We have a receiver overrun: we have to kick the 8390 to get it started
* again. Problem is that you have to kick it exactly as NS prescribes in
* the updated datasheets, or "the NIC may act in an unpredictable manner."
* This includes causing "the NIC to defer indefinitely when it is stopped
* on a busy network." Ugh.
* Called with lock held. Don't call this with the interrupts off or your
* computer will hate you - it takes 10mS or so.
*/
 
static void ei_rx_overrun(struct device *dev)
{
long e8390_base = dev->base_addr;
unsigned char was_txing, must_resend = 0;
struct ei_device *ei_local = (struct ei_device *) dev->priv;
/*
* Record whether a Tx was in progress and then issue the
* stop command.
*/
was_txing = inb_p(e8390_base+E8390_CMD) & E8390_TRANS;
outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
if (ei_debug > 1)
printk(KERN_DEBUG "%s: Receiver overrun.\n", dev->name);
ei_local->stat.rx_over_errors++;
/*
* Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total.
* Early datasheets said to poll the reset bit, but now they say that
* it "is not a reliable indicator and subsequently should be ignored."
* We wait at least 10ms.
*/
 
udelay(10*1000);
 
/*
* Reset RBCR[01] back to zero as per magic incantation.
*/
outb_p(0x00, e8390_base+EN0_RCNTLO);
outb_p(0x00, e8390_base+EN0_RCNTHI);
 
/*
* See if any Tx was interrupted or not. According to NS, this
* step is vital, and skipping it will cause no end of havoc.
*/
 
if (was_txing)
{
unsigned char tx_completed = inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR);
if (!tx_completed)
must_resend = 1;
}
 
/*
* Have to enter loopback mode and then restart the NIC before
* you are allowed to slurp packets up off the ring.
*/
outb_p(E8390_TXOFF, e8390_base + EN0_TXCR);
outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD);
 
/*
* Clear the Rx ring of all the debris, and ack the interrupt.
*/
ei_receive(dev);
outb_p(ENISR_OVER, e8390_base+EN0_ISR);
 
/*
* Leave loopback mode, and resend any packet that got stopped.
*/
outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR);
if (must_resend)
outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD);
}
 
/*
* Collect the stats. This is called unlocked and from several contexts.
*/
static struct net_device_stats *get_stats(struct device *dev)
{
long ioaddr = dev->base_addr;
struct ei_device *ei_local = (struct ei_device *) dev->priv;
unsigned long flags;
/* If the card is stopped, just return the present stats. */
if (dev->start == 0)
return &ei_local->stat;
 
spin_lock_irqsave(&ei_local->page_lock,flags);
/* Read the counter registers, assuming we are in page 0. */
ei_local->stat.rx_frame_errors += inb_p(ioaddr + EN0_COUNTER0);
ei_local->stat.rx_crc_errors += inb_p(ioaddr + EN0_COUNTER1);
ei_local->stat.rx_missed_errors+= inb_p(ioaddr + EN0_COUNTER2);
spin_unlock_irqrestore(&ei_local->page_lock, flags);
return &ei_local->stat;
}
 
/*
* Update the given Autodin II CRC value with another data byte.
*/
 
static inline u32 update_crc(u8 byte, u32 current_crc)
{
int bit;
u8 ah = 0;
for (bit=0; bit<8; bit++)
{
u8 carry = (current_crc>>31);
current_crc <<= 1;
ah = ((ah<<1) | carry) ^ byte;
if (ah&1)
current_crc ^= 0x04C11DB7; /* CRC polynomial */
ah >>= 1;
byte >>= 1;
}
return current_crc;
}
 
/*
* Form the 64 bit 8390 multicast table from the linked list of addresses
* associated with this dev structure.
*/
static inline void make_mc_bits(u8 *bits, struct device *dev)
{
struct dev_mc_list *dmi;
 
for (dmi=dev->mc_list; dmi; dmi=dmi->next)
{
int i;
u32 crc;
if (dmi->dmi_addrlen != ETH_ALEN)
{
printk(KERN_INFO "%s: invalid multicast address length given.\n", dev->name);
continue;
}
crc = 0xffffffff; /* initial CRC value */
for (i=0; i<ETH_ALEN; i++)
crc = update_crc(dmi->dmi_addr[i], crc);
/*
* The 8390 uses the 6 most significant bits of the
* CRC to index the multicast table.
*/
bits[crc>>29] |= (1<<((crc>>26)&7));
}
}
 
/*
* Set or clear the multicast filter for this adaptor. May be called
* from a BH in 2.1.x. Must be called with lock held.
*/
static void do_set_multicast_list(struct device *dev)
{
long e8390_base = dev->base_addr;
int i;
struct ei_device *ei_local = (struct ei_device*)dev->priv;
 
if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI)))
{
memset(ei_local->mcfilter, 0, 8);
if (dev->mc_list)
make_mc_bits(ei_local->mcfilter, dev);
}
else
memset(ei_local->mcfilter, 0xFF, 8); /* mcast set to accept-all */
 
/*
* DP8390 manuals don't specify any magic sequence for altering
* the multicast regs on an already running card. To be safe, we
* ensure multicast mode is off prior to loading up the new hash
* table. If this proves to be not enough, we can always resort
* to stopping the NIC, loading the table and then restarting.
*
* Bug Alert! The MC regs on the SMC 83C690 (SMC Elite and SMC
* Elite16) appear to be write-only. The NS 8390 data sheet lists
* them as r/w so this is a bug. The SMC 83C790 (SMC Ultra and
* Ultra32 EISA) appears to have this bug fixed.
*/
if (dev->start)
outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD);
for(i = 0; i < 8; i++)
{
outb_p(ei_local->mcfilter[i], e8390_base + EN1_MULT_SHIFT(i));
#ifndef BUG_83C690
if(inb_p(e8390_base + EN1_MULT_SHIFT(i))!=ei_local->mcfilter[i])
printk(KERN_ERR "Multicast filter read/write mismap %d\n",i);
#endif
}
outb_p(E8390_NODMA + E8390_PAGE0, e8390_base + E8390_CMD);
 
if(dev->flags&IFF_PROMISC)
outb_p(E8390_RXCONFIG | 0x18, e8390_base + EN0_RXCR);
else if(dev->flags&IFF_ALLMULTI || dev->mc_list)
outb_p(E8390_RXCONFIG | 0x08, e8390_base + EN0_RXCR);
else
outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
}
 
/*
* Called without lock held. This is invoked from user context and may
* be parallel to just about everything else. Its also fairly quick and
* not called too often. Must protect against both bh and irq users
*/
static void set_multicast_list(struct device *dev)
{
unsigned long flags;
struct ei_device *ei_local = (struct ei_device*)dev->priv;
spin_lock_irqsave(&ei_local->page_lock, flags);
do_set_multicast_list(dev);
spin_unlock_irqrestore(&ei_local->page_lock, flags);
}
 
/*
* Initialize the rest of the 8390 device structure. Do NOT __initfunc
* this, as it is used by 8390 based modular drivers too.
*/
 
int ethdev_init(struct device *dev)
{
if (ei_debug > 1)
printk(version);
if (dev->priv == NULL)
{
struct ei_device *ei_local;
dev->priv = kmalloc(sizeof(struct ei_device), GFP_KERNEL);
if (dev->priv == NULL)
return -ENOMEM;
memset(dev->priv, 0, sizeof(struct ei_device));
ei_local = (struct ei_device *)dev->priv;
spin_lock_init(&ei_local->page_lock);
}
dev->hard_start_xmit = &ei_start_xmit;
dev->get_stats = get_stats;
dev->set_multicast_list = &set_multicast_list;
 
ether_setup(dev);
return 0;
}
 
 
/* This page of functions should be 8390 generic */
/* Follow National Semi's recommendations for initializing the "NIC". */
 
/*
* Must be called with lock held.
*/
 
void NS8390_init(struct device *dev, int startp)
{
long e8390_base = dev->base_addr;
struct ei_device *ei_local = (struct ei_device *) dev->priv;
int i;
int endcfg = ei_local->word16 ? (0x48 | ENDCFG_WTS) : 0x48;
if(sizeof(struct e8390_pkt_hdr)!=4)
panic("8390.c: header struct mispacked\n");
/* Follow National Semi's recommendations for initing the DP83902. */
outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); /* 0x21 */
outb_p(endcfg, e8390_base + EN0_DCFG); /* 0x48 or 0x49 */
/* Clear the remote byte count registers. */
outb_p(0x00, e8390_base + EN0_RCNTLO);
outb_p(0x00, e8390_base + EN0_RCNTHI);
/* Set to monitor and loopback mode -- this is vital!. */
outb_p(E8390_RXOFF, e8390_base + EN0_RXCR); /* 0x20 */
outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); /* 0x02 */
/* Set the transmit page and receive ring. */
outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR);
ei_local->tx1 = ei_local->tx2 = 0;
outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG);
outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY); /* 3c503 says 0x3f,NS0x26*/
ei_local->current_page = ei_local->rx_start_page; /* assert boundary+1 */
outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG);
/* Clear the pending interrupts and mask. */
outb_p(0xFF, e8390_base + EN0_ISR);
outb_p(0x00, e8390_base + EN0_IMR);
/* Copy the station address into the DS8390 registers. */
 
outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */
for(i = 0; i < 6; i++)
{
outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i));
if(inb_p(e8390_base + EN1_PHYS_SHIFT(i))!=dev->dev_addr[i])
if (ei_debug > 0)
printk(KERN_ERR "Hw. address read/write mismap %d\n",i);
}
 
outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
 
dev->tbusy = 0;
dev->interrupt = 0;
ei_local->tx1 = ei_local->tx2 = 0;
ei_local->txing = 0;
 
if (startp)
{
outb_p(0xff, e8390_base + EN0_ISR);
outb_p(ENISR_ALL, e8390_base + EN0_IMR);
outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD);
outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR); /* xmit on. */
/* 3c503 TechMan says rxconfig only after the NIC is started. */
outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); /* rx on, */
do_set_multicast_list(dev); /* (re)load the mcast table */
}
return;
}
 
/* Trigger a transmit start, assuming the length is valid.
Always called with the page lock held */
static void NS8390_trigger_send(struct device *dev, unsigned int length,
int start_page)
{
long e8390_base = dev->base_addr;
struct ei_device *ei_local = (struct ei_device *) dev->priv;
outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD);
if (inb_p(e8390_base) & E8390_TRANS)
{
printk(KERN_WARNING "%s: trigger_send() called with the transmitter busy.\n",
dev->name);
return;
}
outb_p(length & 0xff, e8390_base + EN0_TCNTLO);
outb_p(length >> 8, e8390_base + EN0_TCNTHI);
outb_p(start_page, e8390_base + EN0_TPSR);
outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base+E8390_CMD);
}
 
#ifdef MODULE
 
EXPORT_SYMBOL(ei_open);
EXPORT_SYMBOL(ei_close);
EXPORT_SYMBOL(ei_interrupt);
EXPORT_SYMBOL(ethdev_init);
EXPORT_SYMBOL(NS8390_init);
 
struct module *NS8390_module = NULL;
 
int init_module(void)
{
NS8390_module = &__this_module;
return 0;
}
 
void cleanup_module(void)
{
NS8390_module = NULL;
}
 
#endif /* MODULE */
/*
* Local variables:
* compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c 8390.c"
* version-control: t
* kept-new-versions: 5
* c-indent-level: 4
* tab-width: 4
* End:
*/
/shark/trunk/drivers/net/8390.h
0,0 → 1,296
/* Generic NS8390 register definitions. */
/* This file is part of Donald Becker's 8390 drivers, and is distributed
under the same license. Auto-loading of 8390.o added by Paul Gortmaker.
Some of these names and comments originated from the Crynwr
packet drivers, which are distributed under the GPL. */
 
#ifndef _8390_h
#define _8390_h
 
#include <linux/config.h>
#include <linux/if_ether.h>
#include <linux/ioport.h>
#include <linux/skbuff.h>
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
/* With kmod, drivers can now load the 8390 module themselves! */
#if 0 /* def CONFIG_KMOD */
#define LOAD_8390_BY_KMOD
#endif
 
#define TX_2X_PAGES 12
#define TX_1X_PAGES 6
 
/* Should always use two Tx slots to get back-to-back transmits. */
#define EI_PINGPONG
 
#ifdef EI_PINGPONG
#define TX_PAGES TX_2X_PAGES
#else
#define TX_PAGES TX_1X_PAGES
#endif
 
#define ETHER_ADDR_LEN 6
 
/* The 8390 specific per-packet-header format. */
struct e8390_pkt_hdr {
unsigned char status; /* status */
unsigned char next; /* pointer to next packet. */
unsigned short count; /* header + packet length in bytes */
};
 
#ifdef notdef
extern int ei_debug;
#else
#define ei_debug 1
#endif
 
#ifndef HAVE_AUTOIRQ
/* From auto_irq.c */
extern void autoirq_setup(int waittime);
extern unsigned long autoirq_report(int waittime);
#endif
 
#if defined(LOAD_8390_BY_KMOD) && defined(MODULE) && !defined(NS8390_CORE)
 
/* Function pointers to be mapped onto the 8390 core support */
static int (*S_ethdev_init)(struct device *dev);
static void (*S_NS8390_init)(struct device *dev, int startp);
static int (*S_ei_open)(struct device *dev);
static int (*S_ei_close)(struct device *dev);
static void (*S_ei_interrupt)(int irq, void *dev_id, struct pt_regs *regs);
 
 
#define NS8390_KSYSMS_PRESENT ( \
get_module_symbol(NULL, "ethdev_init") != 0 && \
get_module_symbol(NULL, "NS8390_init") != 0 && \
get_module_symbol(NULL, "ei_open") != 0 && \
get_module_symbol(NULL, "ei_close") != 0 && \
get_module_symbol(NULL, "ei_interrupt") != 0)
 
extern __inline__ int load_8390_module(const char *driver)
{
 
if (! NS8390_KSYSMS_PRESENT) {
int (*request_mod)(const char *module_name);
 
if (get_module_symbol("", "request_module") == 0) {
printk("%s: module auto-load (kmod) support not present.\n", driver);
printk("%s: unable to auto-load required 8390 module.\n", driver);
printk("%s: try \"modprobe 8390\" as root 1st.\n", driver);
return -ENOSYS;
}
 
request_mod = (void*)get_module_symbol("", "request_module");
if (request_mod("8390")) {
printk("%s: request to load the 8390 module failed.\n", driver);
return -ENOSYS;
}
 
/* Check if module really loaded and is valid */
if (! NS8390_KSYSMS_PRESENT) {
printk("%s: 8390.o not found/invalid or failed to load.\n", driver);
return -ENOSYS;
}
 
printk(KERN_INFO "%s: auto-loaded 8390 module.\n", driver);
}
 
/* Map the functions into place */
S_ethdev_init = (void*)get_module_symbol(0, "ethdev_init");
S_NS8390_init = (void*)get_module_symbol(0, "NS8390_init");
S_ei_open = (void*)get_module_symbol(0, "ei_open");
S_ei_close = (void*)get_module_symbol(0, "ei_close");
S_ei_interrupt = (void*)get_module_symbol(0, "ei_interrupt");
 
return 0;
}
 
/*
* Since a kmod aware driver won't explicitly show a dependence on the
* exported 8390 functions (due to the mapping above), the 8390 module
* (if present, and not in-kernel) needs to be protected from garbage
* collection. NS8390_module is only defined for a modular 8390 core.
*/
 
extern __inline__ void lock_8390_module(void)
{
struct module **mod = (struct module**)get_module_symbol(0, "NS8390_module");
 
if (mod != NULL && *mod != NULL)
__MOD_INC_USE_COUNT(*mod);
}
extern __inline__ void unlock_8390_module(void)
{
struct module **mod = (struct module**)get_module_symbol(0, "NS8390_module");
 
if (mod != NULL && *mod != NULL)
__MOD_DEC_USE_COUNT(*mod);
}
/*
* These are last so they only have scope over the driver
* code (wd, ne, 3c503, etc.) and not over the above code.
*/
#define ethdev_init S_ethdev_init
#define NS8390_init S_NS8390_init
#define ei_open S_ei_open
#define ei_close S_ei_close
#define ei_interrupt S_ei_interrupt
 
#else /* not a module or kmod support not wanted */
 
#define load_8390_module(driver) 0
#define lock_8390_module() do { } while (0)
#define unlock_8390_module() do { } while (0)
extern int ethdev_init(struct device *dev);
extern void NS8390_init(struct device *dev, int startp);
extern int ei_open(struct device *dev);
extern int ei_close(struct device *dev);
extern void ei_interrupt(int irq, void *dev_id, struct pt_regs *regs);
 
#endif
 
/* Most of these entries should be in 'struct device' (or most of the
things in there should be here!) */
/* You have one of these per-board */
struct ei_device {
const char *name;
void (*reset_8390)(struct device *);
void (*get_8390_hdr)(struct device *, struct e8390_pkt_hdr *, int);
void (*block_output)(struct device *, int, const unsigned char *, int);
void (*block_input)(struct device *, int, struct sk_buff *, int);
unsigned char mcfilter[8];
unsigned open:1;
unsigned word16:1; /* We have the 16-bit (vs 8-bit) version of the card. */
unsigned txing:1; /* Transmit Active */
unsigned irqlock:1; /* 8390's intrs disabled when '1'. */
unsigned dmaing:1; /* Remote DMA Active */
unsigned char tx_start_page, rx_start_page, stop_page;
unsigned char current_page; /* Read pointer in buffer */
unsigned char interface_num; /* Net port (AUI, 10bT.) to use. */
unsigned char txqueue; /* Tx Packet buffer queue length. */
short tx1, tx2; /* Packet lengths for ping-pong tx. */
short lasttx; /* Alpha version consistency check. */
unsigned char reg0; /* Register '0' in a WD8013 */
unsigned char reg5; /* Register '5' in a WD8013 */
unsigned char saved_irq; /* Original dev->irq value. */
struct net_device_stats stat; /* The new statistics table. */
u32 *reg_offset; /* Register mapping table */
spinlock_t page_lock; /* Page register locks */
unsigned long priv; /* Private field to store bus IDs etc. */
};
 
/* The maximum number of 8390 interrupt service routines called per IRQ. */
#define MAX_SERVICE 12
 
/* The maximum time waited (in jiffies) before assuming a Tx failed. (20ms) */
#define TX_TIMEOUT (20*HZ/100)
 
#define ei_status (*(struct ei_device *)(dev->priv))
 
/* Some generic ethernet register configurations. */
#define E8390_TX_IRQ_MASK 0xa /* For register EN0_ISR */
#define E8390_RX_IRQ_MASK 0x5
#define E8390_RXCONFIG 0x4 /* EN0_RXCR: broadcasts, no multicast,errors */
#define E8390_RXOFF 0x20 /* EN0_RXCR: Accept no packets */
#define E8390_TXCONFIG 0x00 /* EN0_TXCR: Normal transmit mode */
#define E8390_TXOFF 0x02 /* EN0_TXCR: Transmitter off */
 
/* Register accessed at EN_CMD, the 8390 base addr. */
#define E8390_STOP 0x01 /* Stop and reset the chip */
#define E8390_START 0x02 /* Start the chip, clear reset */
#define E8390_TRANS 0x04 /* Transmit a frame */
#define E8390_RREAD 0x08 /* Remote read */
#define E8390_RWRITE 0x10 /* Remote write */
#define E8390_NODMA 0x20 /* Remote DMA */
#define E8390_PAGE0 0x00 /* Select page chip registers */
#define E8390_PAGE1 0x40 /* using the two high-order bits */
#define E8390_PAGE2 0x80 /* Page 3 is invalid. */
 
/*
* Only generate indirect loads given a machine that needs them.
*/
#if defined(CONFIG_MAC) || defined(CONFIG_AMIGA_PCMCIA) || \
defined(CONFIG_ARIADNE2) || defined(CONFIG_ARIADNE2_MODULE)
#define EI_SHIFT(x) (ei_local->reg_offset[x])
#else
#define EI_SHIFT(x) (x)
#endif
 
#define E8390_CMD EI_SHIFT(0x00) /* The command register (for all pages) */
/* Page 0 register offsets. */
#define EN0_CLDALO EI_SHIFT(0x01) /* Low byte of current local dma addr RD */
#define EN0_STARTPG EI_SHIFT(0x01) /* Starting page of ring bfr WR */
#define EN0_CLDAHI EI_SHIFT(0x02) /* High byte of current local dma addr RD */
#define EN0_STOPPG EI_SHIFT(0x02) /* Ending page +1 of ring bfr WR */
#define EN0_BOUNDARY EI_SHIFT(0x03) /* Boundary page of ring bfr RD WR */
#define EN0_TSR EI_SHIFT(0x04) /* Transmit status reg RD */
#define EN0_TPSR EI_SHIFT(0x04) /* Transmit starting page WR */
#define EN0_NCR EI_SHIFT(0x05) /* Number of collision reg RD */
#define EN0_TCNTLO EI_SHIFT(0x05) /* Low byte of tx byte count WR */
#define EN0_FIFO EI_SHIFT(0x06) /* FIFO RD */
#define EN0_TCNTHI EI_SHIFT(0x06) /* High byte of tx byte count WR */
#define EN0_ISR EI_SHIFT(0x07) /* Interrupt status reg RD WR */
#define EN0_CRDALO EI_SHIFT(0x08) /* low byte of current remote dma address RD */
#define EN0_RSARLO EI_SHIFT(0x08) /* Remote start address reg 0 */
#define EN0_CRDAHI EI_SHIFT(0x09) /* high byte, current remote dma address RD */
#define EN0_RSARHI EI_SHIFT(0x09) /* Remote start address reg 1 */
#define EN0_RCNTLO EI_SHIFT(0x0a) /* Remote byte count reg WR */
#define EN0_RCNTHI EI_SHIFT(0x0b) /* Remote byte count reg WR */
#define EN0_RSR EI_SHIFT(0x0c) /* rx status reg RD */
#define EN0_RXCR EI_SHIFT(0x0c) /* RX configuration reg WR */
#define EN0_TXCR EI_SHIFT(0x0d) /* TX configuration reg WR */
#define EN0_COUNTER0 EI_SHIFT(0x0d) /* Rcv alignment error counter RD */
#define EN0_DCFG EI_SHIFT(0x0e) /* Data configuration reg WR */
#define EN0_COUNTER1 EI_SHIFT(0x0e) /* Rcv CRC error counter RD */
#define EN0_IMR EI_SHIFT(0x0f) /* Interrupt mask reg WR */
#define EN0_COUNTER2 EI_SHIFT(0x0f) /* Rcv missed frame error counter RD */
 
/* Bits in EN0_ISR - Interrupt status register */
#define ENISR_RX 0x01 /* Receiver, no error */
#define ENISR_TX 0x02 /* Transmitter, no error */
#define ENISR_RX_ERR 0x04 /* Receiver, with error */
#define ENISR_TX_ERR 0x08 /* Transmitter, with error */
#define ENISR_OVER 0x10 /* Receiver overwrote the ring */
#define ENISR_COUNTERS 0x20 /* Counters need emptying */
#define ENISR_RDC 0x40 /* remote dma complete */
#define ENISR_RESET 0x80 /* Reset completed */
#define ENISR_ALL 0x3f /* Interrupts we will enable */
 
/* Bits in EN0_DCFG - Data config register */
#define ENDCFG_WTS 0x01 /* word transfer mode selection */
 
/* Page 1 register offsets. */
#define EN1_PHYS EI_SHIFT(0x01) /* This board's physical enet addr RD WR */
#define EN1_PHYS_SHIFT(i) EI_SHIFT(i+1) /* Get and set mac address */
#define EN1_CURPAG EI_SHIFT(0x07) /* Current memory page RD WR */
#define EN1_MULT EI_SHIFT(0x08) /* Multicast filter mask array (8 bytes) RD WR */
#define EN1_MULT_SHIFT(i) EI_SHIFT(8+i) /* Get and set multicast filter */
 
/* Bits in received packet status byte and EN0_RSR*/
#define ENRSR_RXOK 0x01 /* Received a good packet */
#define ENRSR_CRC 0x02 /* CRC error */
#define ENRSR_FAE 0x04 /* frame alignment error */
#define ENRSR_FO 0x08 /* FIFO overrun */
#define ENRSR_MPA 0x10 /* missed pkt */
#define ENRSR_PHY 0x20 /* physical/multicast address */
#define ENRSR_DIS 0x40 /* receiver disable. set in monitor mode */
#define ENRSR_DEF 0x80 /* deferring */
 
/* Transmitted packet status, EN0_TSR. */
#define ENTSR_PTX 0x01 /* Packet transmitted without error */
#define ENTSR_ND 0x02 /* The transmit wasn't deferred. */
#define ENTSR_COL 0x04 /* The transmit collided at least once. */
#define ENTSR_ABT 0x08 /* The transmit collided 16 times, and was deferred. */
#define ENTSR_CRS 0x10 /* The carrier sense was lost. */
#define ENTSR_FU 0x20 /* A "FIFO underrun" occurred during transmit. */
#define ENTSR_CDH 0x40 /* The collision detect "heartbeat" signal was lost. */
#define ENTSR_OWC 0x80 /* There was an out-of-window collision. */
 
__END_DECLS
#endif /* _8390_h */
/shark/trunk/drivers/net/net_init.c
0,0 → 1,443
/* netdrv_init.c: Initialization for network devices. */
/*
Written 1993,1994,1995 by Donald Becker.
 
The author may be reached as becker@cesdis.gsfc.nasa.gov or
C/O Center of Excellence in Space Data and Information Sciences
Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
 
This file contains the initialization for the "pl14+" style ethernet
drivers. It should eventually replace most of drivers/net/Space.c.
It's primary advantage is that it's able to allocate low-memory buffers.
A secondary advantage is that the dangerous NE*000 netcards can reserve
their I/O port region before the SCSI probes start.
 
Modifications/additions by Bjorn Ekwall <bj0rn@blox.se>:
ethdev_index[MAX_ETH_CARDS]
register_netdev() / unregister_netdev()
Modifications by Wolfgang Walter
Use dev_close cleanly so we always shut things down tidily.
Changed 29/10/95, Alan Cox to pass sockaddr's around for mac addresses.
14/06/96 - Paul Gortmaker: Add generic eth_change_mtu() function.
 
August 12, 1996 - Lawrence V. Stefani: Added fddi_change_mtu() and
fddi_setup() functions.
Sept. 10, 1996 - Lawrence V. Stefani: Increased hard_header_len to
include 3 pad bytes.
*/
 
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/types.h>
/* #include <linux/fs.h> */
#include <linux/malloc.h>
#include <linux/if_ether.h>
#include <linux/string.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
/* #include <linux/fddidevice.h>
//#include <linux/trdevice.h> */
#include <linux/if_arp.h>
#ifdef CONFIG_NET_ALIAS
#include <linux/net_alias.h>
#endif
 
#include <string.h>
 
/* The network devices currently exist only in the socket namespace, so these
entries are unused. The only ones that make sense are
open start the ethercard
close stop the ethercard
ioctl To get statistics, perhaps set the interface port (AUI, BNC, etc.)
One can also imagine getting raw packets using
read & write
but this is probably better handled by a raw packet socket.
 
Given that almost all of these functions are handled in the current
socket-based scheme, putting ethercard devices in /dev/ seems pointless.
[Removed all support for /dev network devices. When someone adds
streams then by magic we get them, but otherwise they are un-needed
and a space waste]
*/
 
/* The list of used and available "eth" slots (for "eth0", "eth1", etc.) */
#define MAX_ETH_CARDS 16 /* same as the number if irq's in irq2dev[] */
static struct device *ethdev_index[MAX_ETH_CARDS];
 
 
/* Fill in the fields of the device structure with ethernet-generic values.
 
If no device structure is passed, a new one is constructed, complete with
a SIZEOF_PRIVATE private data area.
 
If an empty string area is passed as dev->name, or a new structure is made,
a new name string is constructed. The passed string area should be 8 bytes
long.
*/
 
struct device *
init_etherdev(struct device *dev, int sizeof_priv)
{
int new_device = 0;
int i;
 
/* Use an existing correctly named device in Space.c:dev_base. */
if (dev == NULL) {
int alloc_size = sizeof(struct device) + sizeof("eth%d ")
+ sizeof_priv + 3;
struct device *cur_dev;
char pname[8]; /* Putative name for the device. */
 
for (i = 0; i < MAX_ETH_CARDS; ++i)
if (ethdev_index[i] == NULL) {
sprintf(pname, "eth%d", i);
for (cur_dev = dev_base; cur_dev; cur_dev = cur_dev->next)
if (strcmp(pname, cur_dev->name) == 0) {
dev = cur_dev;
dev->init = NULL;
sizeof_priv = (sizeof_priv + 3) & ~3;
dev->priv = sizeof_priv
? kmalloc(sizeof_priv, GFP_KERNEL)
: NULL;
if (dev->priv) memset(dev->priv, 0, sizeof_priv);
goto found;
}
}
 
alloc_size &= ~3; /* Round to dword boundary. */
 
dev = (struct device *)kmalloc(alloc_size, GFP_KERNEL);
memset(dev, 0, alloc_size);
if (sizeof_priv)
dev->priv = (void *) (dev + 1);
dev->name = sizeof_priv + (char *)(dev + 1);
new_device = 1;
}
 
found: /* From the double loop above. */
 
if (dev->name &&
((dev->name[0] == '\0') || (dev->name[0] == ' '))) {
for (i = 0; i < MAX_ETH_CARDS; ++i)
if (ethdev_index[i] == NULL) {
sprintf(dev->name, "eth%d", i);
ethdev_index[i] = dev;
break;
}
}
 
ether_setup(dev); /* Hmmm, should this be called here? */
if (new_device) {
/* Append the device to the device queue. */
struct device **old_devp = &dev_base;
while ((*old_devp)->next)
old_devp = & (*old_devp)->next;
(*old_devp)->next = dev;
dev->next = 0;
}
return dev;
}
 
 
static int eth_mac_addr(struct device *dev, void *p)
{
struct sockaddr *addr=p;
if(dev->start)
return -EBUSY;
memcpy(dev->dev_addr, addr->sa_data,dev->addr_len);
return 0;
}
 
static int eth_change_mtu(struct device *dev, int new_mtu)
{
if ((new_mtu < 68) || (new_mtu > 1500))
return -EINVAL;
dev->mtu = new_mtu;
return 0;
}
 
#ifdef CONFIG_FDDI
 
static int fddi_change_mtu(struct device *dev, int new_mtu)
{
if ((new_mtu < FDDI_K_SNAP_HLEN) || (new_mtu > FDDI_K_SNAP_DLEN))
return(-EINVAL);
dev->mtu = new_mtu;
return(0);
}
 
#endif
 
void ether_setup(struct device *dev)
{
int i;
/* Fill in the fields of the device structure with ethernet-generic values.
This should be in a common file instead of per-driver. */
for (i = 0; i < DEV_NUMBUFFS; i++)
skb_queue_head_init(&dev->buffs[i]);
 
/* register boot-defined "eth" devices */
if (dev->name && (strncmp(dev->name, "eth", 3) == 0)) {
/* i = simple_strtoul(dev->name + 3, NULL, 0); */
if (ethdev_index[i] == NULL) {
ethdev_index[i] = dev;
}
else if (dev != ethdev_index[i]) {
/* Really shouldn't happen! */
printk(KERN_ERR "ether_setup: Ouch! Someone else took %s\n",
dev->name);
}
}
 
dev->change_mtu = eth_change_mtu;
/* dev->hard_header = eth_header; */
dev->rebuild_header = (void *)((void *)(eth_rebuild_header));
dev->set_mac_address = eth_mac_addr;
dev->header_cache_bind = (void *)((void *)(eth_header_cache_bind));
dev->header_cache_update= (void *)((void *)(eth_header_cache_update));
 
dev->type = ARPHRD_ETHER;
dev->hard_header_len = ETH_HLEN;
dev->mtu = 1500; /* eth_mtu */
dev->addr_len = ETH_ALEN;
dev->tx_queue_len = 100; /* Ethernet wants good queues */
memset(dev->broadcast,0xFF, ETH_ALEN);
 
/* New-style flags. */
dev->flags = IFF_BROADCAST|IFF_MULTICAST;
dev->family = AF_INET;
dev->pa_addr = 0;
dev->pa_brdaddr = 0;
dev->pa_mask = 0;
dev->pa_alen = 4;
}
 
#ifdef CONFIG_TR
 
void tr_setup(struct device *dev)
{
int i;
/* Fill in the fields of the device structure with ethernet-generic values.
This should be in a common file instead of per-driver. */
for (i = 0; i < DEV_NUMBUFFS; i++)
skb_queue_head_init(&dev->buffs[i]);
 
dev->hard_header = tr_header;
dev->rebuild_header = tr_rebuild_header;
 
dev->type = ARPHRD_IEEE802;
dev->hard_header_len = TR_HLEN;
dev->mtu = 2000; /* bug in fragmenter...*/
dev->addr_len = TR_ALEN;
dev->tx_queue_len = 100; /* Long queues on tr */
memset(dev->broadcast,0xFF, TR_ALEN);
 
/* New-style flags. */
dev->flags = IFF_BROADCAST;
dev->family = AF_INET;
dev->pa_addr = 0;
dev->pa_brdaddr = 0;
dev->pa_mask = 0;
dev->pa_alen = 4;
}
 
#endif
 
#ifdef CONFIG_FDDI
 
void fddi_setup(struct device *dev)
{
int i;
 
/*
* Fill in the fields of the device structure with FDDI-generic values.
* This should be in a common file instead of per-driver.
*/
for (i=0; i < DEV_NUMBUFFS; i++)
skb_queue_head_init(&dev->buffs[i]);
 
dev->change_mtu = fddi_change_mtu;
dev->hard_header = fddi_header;
dev->rebuild_header = fddi_rebuild_header;
 
dev->type = ARPHRD_FDDI;
dev->hard_header_len = FDDI_K_SNAP_HLEN+3; /* Assume 802.2 SNAP hdr len + 3 pad bytes */
dev->mtu = FDDI_K_SNAP_DLEN; /* Assume max payload of 802.2 SNAP frame */
dev->addr_len = FDDI_K_ALEN;
dev->tx_queue_len = 100; /* Long queues on FDDI */
memset(dev->broadcast, 0xFF, FDDI_K_ALEN);
 
/* New-style flags */
dev->flags = IFF_BROADCAST | IFF_MULTICAST;
dev->family = AF_INET;
dev->pa_addr = 0;
dev->pa_brdaddr = 0;
dev->pa_mask = 0;
dev->pa_alen = 4;
return;
}
 
#endif
 
int ether_config(struct device *dev, struct ifmap *map)
{
if (map->mem_start != (u_long)(-1))
dev->mem_start = map->mem_start;
if (map->mem_end != (u_long)(-1))
dev->mem_end = map->mem_end;
if (map->base_addr != (u_short)(-1))
dev->base_addr = map->base_addr;
if (map->irq != (u_char)(-1))
dev->irq = map->irq;
if (map->dma != (u_char)(-1))
dev->dma = map->dma;
if (map->port != (u_char)(-1))
dev->if_port = map->port;
return 0;
}
 
int register_netdev(struct device *dev)
{
struct device *d = dev_base;
unsigned long flags;
int i=MAX_ETH_CARDS;
 
save_flags(flags);
cli();
 
if (dev && dev->init) {
if (dev->name &&
((dev->name[0] == '\0') || (dev->name[0] == ' '))) {
for (i = 0; i < MAX_ETH_CARDS; ++i)
if (ethdev_index[i] == NULL) {
sprintf(dev->name, "eth%d", i);
printk(KERN_INFO "Loading device '%s'...\n", dev->name);
ethdev_index[i] = dev;
break;
}
}
 
sti(); /* device probes assume interrupts enabled */
if (dev->init(dev) != 0) {
if (i < MAX_ETH_CARDS) ethdev_index[i] = NULL;
restore_flags(flags);
return -EIO;
}
cli();
 
/* Add device to end of chain */
if (dev_base) {
while (d->next)
d = d->next;
d->next = dev;
}
else
dev_base = dev;
dev->next = NULL;
}
restore_flags(flags);
return 0;
}
 
void unregister_netdev(struct device *dev)
{
struct device *d = dev_base;
unsigned long flags;
int i;
 
save_flags(flags);
cli();
 
if (dev == NULL)
{
printk("was NULL\n");
restore_flags(flags);
return;
}
/* else */
if (dev->start)
printk("ERROR '%s' busy and not MOD_IN_USE.\n", dev->name);
 
/*
* must jump over main_device+aliases
* avoid alias devices unregistration so that only
* net_alias module manages them
*/
#ifdef CONFIG_NET_ALIAS
if (dev_base == dev)
dev_base = net_alias_nextdev(dev);
else
{
while(d && (net_alias_nextdev(d) != dev)) /* skip aliases */
d = net_alias_nextdev(d);
if (d && (net_alias_nextdev(d) == dev))
{
/*
* Critical: Bypass by consider devices as blocks (maindev+aliases)
*/
net_alias_nextdev_set(d, net_alias_nextdev(dev));
}
#else
if (dev_base == dev)
dev_base = dev->next;
else
{
while (d && (d->next != dev))
d = d->next;
if (d && (d->next == dev))
{
d->next = dev->next;
}
#endif
else
{
printk("unregister_netdev: '%s' not found\n", dev->name);
restore_flags(flags);
return;
}
}
for (i = 0; i < MAX_ETH_CARDS; ++i)
{
if (ethdev_index[i] == dev)
{
ethdev_index[i] = NULL;
break;
}
}
 
restore_flags(flags);
 
/*
* You can i.e use a interfaces in a route though it is not up.
* We call close_dev (which is changed: it will down a device even if
* dev->flags==0 (but it will not call dev->stop if IFF_UP
* is not set).
* This will call notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev),
* dev_mc_discard(dev), ....
*/
/* I Disable this 4 now...
dev_close(dev);
*/
}
 
 
/*
* Local variables:
* compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c net_init.c"
* version-control: t
* kept-new-versions: 5
* tab-width: 4
* End:
*/
/shark/trunk/drivers/net/3c59x.c
0,0 → 1,2405
/* EtherLinkXL.c: A 3Com EtherLink PCI III/XL ethernet driver for linux. */
/*
Written 1996-1998 by Donald Becker.
 
This software may be used and distributed according to the terms
of the GNU Public License, incorporated herein by reference.
 
This driver is for the 3Com "Vortex" and "Boomerang" series ethercards.
Members of the series include Fast EtherLink 3c590/3c592/3c595/3c597
and the EtherLink XL 3c900 and 3c905 cards.
 
The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
Center of Excellence in Space Data and Information Sciences
Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
*/
 
/*
15Apr00, Andrew Morton <andrewm@uow.edu.au>
- Don't set RxComplete in boomerang interrupt enable reg
- Added standard spinlocking.
- Removed setting/clearing of dev->interrupt
- Removed vp->in_interrupt
- spinlock in vortex_timer to protect mdio functions
- disable local interrupts around call to vortex_interrupt in
vortex_tx_timeout() (So vortex_interrupt can use spin_lock())
- Removed global save/restore_flags() and cli() from get_stats
and vortex_start_xmit()
- Select window 3 in vortex_timer()'s write to Wn3_MAC_Ctrl
- In vortex_start_xmit(), move the lock to _after_ we've altered
vp->cur_tx and vp->tx_full. This defeats the race between
vortex_start_xmit() and vortex_interrupt which was identified
by Bogdan Costescu.
- Merged back support for six new cards from various sources
- Tell it that 3c905C has NWAY
- Fix handling of SetStatusEnd in 'Too much work..' code, as
per 2.3.99's 3c575_cb (Dave Hinds). Added vp->deferred for this.
 
24May00 <2.2.16-pre4> andrewm
- Replace union wn3_config with BFINS/BFEXT manipulation for
sparc64 (Pete Zaitcev, Peter Jones)
- Use insl_ns/outsl_ns for __powerpc__ (Andreas Tobler)
- In vortex_error, do_tx_reset and vortex_tx_timeout(Vortex): clear
tbusy and force a BH rerun to better recover from errors.
 
24Jun00 <2.2.16> andrewm
- Better handling of shared interrupts
- Reset the transmitter in vortex_error() on both maxcollisions and Tx reclaim error
- Split the ISR into vortex_interrupt and boomerang_interrupt. This is
to fix once-and-for-all the dubious testing of vortex status bits on
boomerang/hurricane/cyclone/tornado NICs.
- Fixed crash under OOM during vortex_open() (Mark Hemment)
- Fix Rx cessation problem during OOM (help from Mark Hemment)
 
01Aug00 <2.2.17-pre14> andrewm
- Added 3c556 support (Fred Maciel)
 
16Aug00 <2.2.17-pre17> andrem
- In vortex_error: don't reset the Tx after txReclaim or maxCollisions errors
- In vortex_error(do_tx_reset): only reset dev->tbusy for vortex-style NICs.
- In vortex_open(), set vp->tx_full to zero (else we get errors if the device was
closed with a full Tx ring).
 
17Oct00 <2.2.18-pre16> andrewm
- Added support for the 3c556B Laptop Hurricane (Louis Gerbarg)
- Backported transceiver options handling from 2.4. This changes the semantics
of forcing full duplex in the `options' parm! (It's better to use
`full_duplex' anyway). See Documentation/vortex.txt (Maciej Rozycki).
- Set PCI latency timer to maximum for the 3c590 (From Donald's driver)
- Removed all the CARDBUS code (it's never used).
- Added INVERT_MII_PWR, EEPROM_8BIT, EEPROM_OFFSET. Use them.
- Use EEPROM_8BIT for the 3c555
- Merged ACPI WOL support from Donald's drivers.
- Sort-of backported Donald's new medialock code. Called it duplexlock
and we now prefer to use `full_duplex=[-1,0,1]' to force duplex mode.
- Merged the rx_oom_timer from 2.4. This gives better handling of OOM
conditions. Backed out the previous way of handling this.
- Replace suser() with capable(CAP_NET_ADMIN) in ioctl().
 
07Jan01 <2.2.19-pre6> andrewm
- Add HAS_NWAY to "3c900 Cyclone 10Mbps TPO"
- Added and used wait_for_completion(). 3c905CX problems.
- Removed the code for older kernel versions.
- Add HAS_NWAY to 3cSOHO100-TX (Brett Frankenberger)
- Search for phy 24 first for 3c905CX. (D Becker)
- Don't free skbs we don't own on oom path in vortex_open().
- Added explicit `medialock' flag so we can truly
lock the media type down with `options'.
- In vortex_error(), only reset the up/down load engine, not all the
interface stuff.
- Added and used EEPROM_NORESET for 3c556B PM resumes.
- Enable WOL with the `enable_wol' module parm
- Give the 3c980 HAS_NWAY
 
- See http://www.uow.edu.au/~andrewm/linux/#3c59x-2.2 for more details.
*/
 
static char version[] =
"3c59x.c 18Feb01 Donald Becker and others http://www.scyld.com/network/vortex.html\n";
 
/* "Knobs" that adjust features and parameters. */
/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
Setting to > 1512 effectively disables this feature. */
static const int rx_copybreak = 200;
/* Allow setting MTU to a larger size, bypassing the normal ethernet setup. */
static const int mtu = 1500;
/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
static int max_interrupt_work = 20;
 
/* Put out somewhat more debugging messages. (0: no msg, 1 minimal .. 6). */
//#define vortex_debug debug
#ifdef VORTEX_DEBUG
static int vortex_debug = VORTEX_DEBUG;
#else
static int vortex_debug = 0;
#endif
 
/* Some values here only for performance evaluation and path-coverage
debugging. */
static int rx_nocopy = 0, rx_copy = 0, queued_packet = 0, rx_csumhits;
 
/* A few values that may be tweaked. */
/* Time in jiffies before concluding the transmitter is hung. */
#define TX_TIMEOUT ((400*HZ)/1000)
 
/* Keep the ring sizes a power of two for efficiency. */
#define TX_RING_SIZE 16
#define RX_RING_SIZE 32
#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
 
#include <linux/config.h>
#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/in.h>
#include <linux/ioport.h>
#include <linux/malloc.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <asm/irq.h> /* For NR_IRQS only. */
#include <asm/bitops.h>
#include <asm/io.h>
 
/* Kernel compatibility defines, some common to David Hinds' PCMCIA package.
This is only in the support-all-kernels source code. */
 
#define RUN_AT(x) (jiffies + (x))
 
 
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("3Com 3c590/3c900 series Vortex/Boomerang driver");
MODULE_PARM(debug, "i");
MODULE_PARM(options, "1-" __MODULE_STRING(8) "i");
MODULE_PARM(full_duplex, "1-" __MODULE_STRING(8) "i");
MODULE_PARM(enable_wol, "1-" __MODULE_STRING(8) "i");
MODULE_PARM(rx_copybreak, "i");
MODULE_PARM(max_interrupt_work, "i");
MODULE_PARM(compaq_ioaddr, "i");
MODULE_PARM(compaq_irq, "i");
MODULE_PARM(compaq_device_id, "i");
 
/* Operational parameter that usually are not changed. */
 
/* The Vortex size is twice that of the original EtherLinkIII series: the
runtime register window, window 1, is now always mapped in.
The Boomerang size is twice as large as the Vortex -- it has additional
bus master control registers. */
#define VORTEX_TOTAL_SIZE 0x20
#define BOOMERANG_TOTAL_SIZE 0x40
 
/* Set iff a MII transceiver on any interface requires mdio preamble.
This only set with the original DP83840 on older 3c905 boards, so the extra
code size of a per-interface flag is not worthwhile. */
static char mii_preamble_required = 0;
 
/*
Theory of Operation
 
I. Board Compatibility
 
This device driver is designed for the 3Com FastEtherLink and FastEtherLink
XL, 3Com's PCI to 10/100baseT adapters. It also works with the 10Mbs
versions of the FastEtherLink cards. The supported product IDs are
3c590, 3c592, 3c595, 3c597, 3c900, 3c905
 
The related ISA 3c515 is supported with a separate driver, 3c515.c, included
with the kernel source or available from
cesdis.gsfc.nasa.gov:/pub/linux/drivers/3c515.html
 
II. Board-specific settings
 
PCI bus devices are configured by the system at boot time, so no jumpers
need to be set on the board. The system BIOS should be set to assign the
PCI INTA signal to an otherwise unused system IRQ line.
 
The EEPROM settings for media type and forced-full-duplex are observed.
The EEPROM media type should be left at the default "autoselect" unless using
10base2 or AUI connections which cannot be reliably detected.
 
III. Driver operation
 
The 3c59x series use an interface that's very similar to the previous 3c5x9
series. The primary interface is two programmed-I/O FIFOs, with an
alternate single-contiguous-region bus-master transfer (see next).
 
The 3c900 "Boomerang" series uses a full-bus-master interface with separate
lists of transmit and receive descriptors, similar to the AMD LANCE/PCnet,
DEC Tulip and Intel Speedo3. The first chip version retains a compatible
programmed-I/O interface that has been removed in 'B' and subsequent board
revisions.
 
One extension that is advertised in a very large font is that the adapters
are capable of being bus masters. On the Vortex chip this capability was
only for a single contiguous region making it far less useful than the full
bus master capability. There is a significant performance impact of taking
an extra interrupt or polling for the completion of each transfer, as well
as difficulty sharing the single transfer engine between the transmit and
receive threads. Using DMA transfers is a win only with large blocks or
with the flawed versions of the Intel Orion motherboard PCI controller.
 
The Boomerang chip's full-bus-master interface is useful, and has the
currently-unused advantages over other similar chips that queued transmit
packets may be reordered and receive buffer groups are associated with a
single frame.
 
With full-bus-master support, this driver uses a "RX_COPYBREAK" scheme.
Rather than a fixed intermediate receive buffer, this scheme allocates
full-sized skbuffs as receive buffers. The value RX_COPYBREAK is used as
the copying breakpoint: it is chosen to trade-off the memory wasted by
passing the full-sized skbuff to the queue layer for all frames vs. the
copying cost of copying a frame to a correctly-sized skbuff.
 
 
IIIC. Synchronization
The driver runs as two independent, single-threaded flows of control. One
is the send-packet routine, which enforces single-threaded use by the
dev->tbusy flag. The other thread is the interrupt handler, which is single
threaded by the hardware and other software.
 
IV. Notes
 
Thanks to Cameron Spitzer and Terry Murphy of 3Com for providing development
3c590, 3c595, and 3c900 boards.
The name "Vortex" is the internal 3Com project name for the PCI ASIC, and
the EISA version is called "Demon". According to Terry these names come
from rides at the local amusement park.
 
The new chips support both ethernet (1.5K) and FDDI (4.5K) packet sizes!
This driver only supports ethernet packets because of the skbuff allocation
limit of 4K.
*/
 
/* Added by Nino - Begin */
extern int pci20to26_find_class(unsigned int class_code, int index, BYTE *bus, BYTE *dev);
extern int pci20to26_read_config_byte(unsigned int bus, unsigned int dev, int where, u8 *val);
extern int pci20to26_read_config_word(unsigned int bus, unsigned int dev, int where, u16 *val);
extern int pci20to26_read_config_dword(unsigned int bus, unsigned int dev, int where, u32 *val);
extern int pci20to26_write_config_byte(unsigned int bus, unsigned int dev, int where, u8 val);
extern int pci20to26_write_config_word(unsigned int bus, unsigned int dev, int where, u16 val);
extern int pci20to26_write_config_dword(unsigned int bus, unsigned int dev, int where, u32 val);
#ifndef CONFIG_PCI
#define CONFIG_PCI
#endif
/* Added by Nino - End */
 
/* This table drives the PCI probe routines. It's mostly boilerplate in all
of the drivers, and will likely be provided by some future kernel.
*/
enum pci_flags_bit {
PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
};
struct pci_id_info {
const char *name;
u16 vendor_id, device_id, device_id_mask, flags;
int drv_flags, io_size;
struct device *(*probe1)(int pci_bus, int pci_devfn, struct device *dev,
long ioaddr, int irq, int chip_idx, int fnd_cnt);
};
 
enum { IS_VORTEX=1, IS_BOOMERANG=2, IS_CYCLONE=4, IS_TORNADO=8,
HAS_PWR_CTRL=0x10, HAS_MII=0x20, HAS_NWAY=0x40, HAS_CB_FNS=0x80,
INVERT_MII_PWR=0x100, EEPROM_8BIT=0x200, EEPROM_OFFSET=0x400,
EEPROM_NORESET=0x800};
static struct device *vortex_probe1(int pci_bus, int pci_devfn,
struct device *dev, long ioaddr,
int irq, int dev_id, int card_idx);
static struct pci_id_info pci_tbl[] = {
{"3c590 Vortex 10Mbps", 0x10B7, 0x5900, 0xffff,
PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, vortex_probe1},
{"3c592 EISA 10mbps Demon/Vortex", 0x10B7, 0x5920, 0xffff, /* AKPM: from Don's 3c59x_cb.c 0.49H */
PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, vortex_probe1},
{"3c597 EISA Fast Demon/Vortex", 0x10B7, 0x5970, 0xffff, /* AKPM: from Don's 3c59x_cb.c 0.49H */
PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, vortex_probe1},
{"3c595 Vortex 100baseTx", 0x10B7, 0x5950, 0xffff,
PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, vortex_probe1},
{"3c595 Vortex 100baseT4", 0x10B7, 0x5951, 0xffff,
PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, vortex_probe1},
{"3c595 Vortex 100base-MII", 0x10B7, 0x5952, 0xffff,
PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, vortex_probe1},
 
#define EISA_TBL_OFFSET 6 /* AKPM: the offset of this entry */
{"3Com Vortex", 0x10B7, 0x5900, 0xff00,
PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG, 64, vortex_probe1},
{"3c900 Boomerang 10baseT", 0x10B7, 0x9000, 0xffff,
PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG, 64, vortex_probe1},
{"3c900 Boomerang 10Mbps Combo", 0x10B7, 0x9001, 0xffff,
PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG, 64, vortex_probe1},
{"3c900 Cyclone 10Mbps Combo", 0x10B7, 0x9005, 0xffff,
PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE, 128, vortex_probe1},
{"3c900B-FL Cyclone 10base-FL", 0x10B7, 0x900A, 0xffff,
PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE, 128, vortex_probe1},
{"3c900 Cyclone 10Mbps TPO", 0x10B7, 0x9004, 0xffff, /* AKPM: from Don's 0.99M */
PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY, 128, vortex_probe1},
{"3c900 Cyclone 10Mbps TPC", 0x10B7, 0x9006, 0xffff, /* AKPM: from Don's 0.99M */
PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE, 128, vortex_probe1},
{"3c905 Boomerang 100baseTx", 0x10B7, 0x9050, 0xffff,
PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII, 64, vortex_probe1},
{"3c905 Boomerang 100baseT4", 0x10B7, 0x9051, 0xffff,
PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII, 64, vortex_probe1},
{"3c905B Cyclone 100baseTx", 0x10B7, 0x9055, 0xffff,
PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY, 128, vortex_probe1},
{"3c905B Cyclone 10/100/BNC", 0x10B7, 0x9058, 0xffff,
PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY, 128, vortex_probe1},
{"3c905B-FX Cyclone 100baseFx", 0x10B7, 0x905A, 0xffff,
PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE, 128, vortex_probe1},
{"3c905C Tornado", 0x10B7, 0x9200, 0xffff,
PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY, 128, vortex_probe1},
{"3c980 Cyclone", 0x10B7, 0x9800, 0xfff0,
PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY, 128, vortex_probe1},
{"3cSOHO100-TX Hurricane", 0x10B7, 0x7646, 0xffff,
PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY, 128, vortex_probe1},
{"3c555 Laptop Hurricane", 0x10B7, 0x5055, 0xffff,
PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|EEPROM_8BIT, 128, vortex_probe1},
{"3c556 10/100 Mini PCI Adapter", 0x10B7, 0x6055, 0xffff,
PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_8BIT|HAS_CB_FNS|INVERT_MII_PWR, 128, vortex_probe1},
{"3c556B Laptop Hurricane", 0x10B7, 0x6056, 0xffff,
PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_OFFSET|HAS_CB_FNS|INVERT_MII_PWR|EEPROM_NORESET, 128, vortex_probe1},
{"3c575 Boomerang CardBus", 0x10B7, 0x5057, 0xffff,
PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII, 64, vortex_probe1},
{"3CCFE575 Cyclone CardBus", 0x10B7, 0x5157, 0xffff,
PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS,
128, vortex_probe1},
{"3CCFE656 Cyclone CardBus", 0x10B7, 0x6560, 0xffff,
PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS,
128, vortex_probe1},
{"3CCFEM656 Cyclone CardBus", 0x10B7, 0x6562, 0xffff, /* AKPM: From the 2.3 driver ? */
PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS,
128, vortex_probe1},
{"3c575 series CardBus (unknown version)", 0x10B7, 0x5057, 0xf0ff,
PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII, 64, vortex_probe1},
{"3c450 HomePNA Tornado", 0x10B7, 0x4500, 0xffff, /* AKPM: from Don's 0.99P */
PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY, 128, vortex_probe1},
{"3Com Boomerang (unknown version)", 0x10B7, 0x9000, 0xff00,
PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG, 64, vortex_probe1},
{0,}, /* 0 terminated list. */
};
 
/* Operational definitions.
These are not used by other compilation units and thus are not
exported in a ".h" file.
 
First the windows. There are eight register windows, with the command
and status registers available in each.
*/
#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD)
#define EL3_CMD 0x0e
#define EL3_STATUS 0x0e
 
/* The top five bits written to EL3_CMD are a command, the lower
11 bits are the parameter, if applicable.
Note that 11 parameters bits was fine for ethernet, but the new chip
can handle FDDI length frames (~4500 octets) and now parameters count
32-bit 'Dwords' rather than octets. */
 
enum vortex_cmd {
TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11,
RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11,
UpStall = 6<<11, UpUnstall = (6<<11)+1,
DownStall = (6<<11)+2, DownUnstall = (6<<11)+3,
RxDiscard = 8<<11, TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11,
FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11,
SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11,
SetTxThreshold = 18<<11, SetTxStart = 19<<11,
StartDMAUp = 20<<11, StartDMADown = (20<<11)+1, StatsEnable = 21<<11,
StatsDisable = 22<<11, StopCoax = 23<<11, SetFilterBit = 25<<11,};
 
/* The SetRxFilter command accepts the following classes: */
enum RxFilter {
RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 };
 
/* Bits in the general status register. */
enum vortex_status {
IntLatch = 0x0001, HostError = 0x0002, TxComplete = 0x0004,
TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020,
IntReq = 0x0040, StatsFull = 0x0080,
DMADone = 1<<8, DownComplete = 1<<9, UpComplete = 1<<10,
DMAInProgress = 1<<11, /* DMA controller is still busy.*/
CmdInProgress = 1<<12, /* EL3_CMD is still busy.*/
};
 
/* Register window 1 offsets, the window used in normal operation.
On the Vortex this window is always mapped at offsets 0x10-0x1f. */
enum Window1 {
TX_FIFO = 0x10, RX_FIFO = 0x10, RxErrors = 0x14,
RxStatus = 0x18, Timer=0x1A, TxStatus = 0x1B,
TxFree = 0x1C, /* Remaining free bytes in Tx buffer. */
};
enum Window0 {
Wn0EepromCmd = 10, /* Window 0: EEPROM command register. */
Wn0EepromData = 12, /* Window 0: EEPROM results register. */
IntrStatus=0x0E, /* Valid in all windows. */
};
enum Win0_EEPROM_bits {
EEPROM_Read = 0x80, EEPROM_WRITE = 0x40, EEPROM_ERASE = 0xC0,
EEPROM_EWENB = 0x30, /* Enable erasing/writing for 10 msec. */
EEPROM_EWDIS = 0x00, /* Disable EWENB before 10 msec timeout. */
};
/* EEPROM locations. */
enum eeprom_offset {
PhysAddr01=0, PhysAddr23=1, PhysAddr45=2, ModelID=3,
EtherLink3ID=7, IFXcvrIO=8, IRQLine=9,
NodeAddr01=10, NodeAddr23=11, NodeAddr45=12,
DriverTune=13, Checksum=15};
 
enum Window2 { /* Window 2. */
Wn2_ResetOptions=12,
};
enum Window3 { /* Window 3: MAC/config bits. */
Wn3_Config=0, Wn3_MAC_Ctrl=6, Wn3_Options=8,
};
 
#define BFEXT(value, offset, bitcount) \
((((unsigned long)(value)) >> (offset)) & ((1 << (bitcount)) - 1))
 
#define BFINS(lhs, rhs, offset, bitcount) \
(((lhs) & ~((((1 << (bitcount)) - 1)) << (offset))) | \
(((rhs) & ((1 << (bitcount)) - 1)) << (offset)))
 
#define RAM_SIZE(v) BFEXT(v, 0, 3)
#define RAM_WIDTH(v) BFEXT(v, 3, 1)
#define RAM_SPEED(v) BFEXT(v, 4, 2)
#define ROM_SIZE(v) BFEXT(v, 6, 2)
#define RAM_SPLIT(v) BFEXT(v, 16, 2)
#define XCVR(v) BFEXT(v, 20, 4)
#define AUTOSELECT(v) BFEXT(v, 24, 1)
 
enum Window4 { /* Window 4: Xcvr/media bits. */
Wn4_FIFODiag = 4, Wn4_NetDiag = 6, Wn4_PhysicalMgmt=8, Wn4_Media = 10,
};
enum Win4_Media_bits {
Media_SQE = 0x0008, /* Enable SQE error counting for AUI. */
Media_10TP = 0x00C0, /* Enable link beat and jabber for 10baseT. */
Media_Lnk = 0x0080, /* Enable just link beat for 100TX/100FX. */
Media_LnkBeat = 0x0800,
};
enum Window7 { /* Window 7: Bus Master control. */
Wn7_MasterAddr = 0, Wn7_MasterLen = 6, Wn7_MasterStatus = 12,
};
/* Boomerang bus master control registers. */
enum MasterCtrl {
PktStatus = 0x20, DownListPtr = 0x24, FragAddr = 0x28, FragLen = 0x2c,
TxFreeThreshold = 0x2f, UpPktStatus = 0x30, UpListPtr = 0x38,
};
 
/* The Rx and Tx descriptor lists.
Caution Alpha hackers: these types are 32 bits! Note also the 8 byte
alignment contraint on tx_ring[] and rx_ring[]. */
#define LAST_FRAG 0x80000000 /* Last Addr/Len pair in descriptor. */
struct boom_rx_desc {
u32 next; /* Last entry points to 0. */
s32 status;
u32 addr; /* Up to 63 addr/len pairs possible. */
s32 length; /* Set LAST_FRAG to indicate last pair. */
};
/* Values for the Rx status entry. */
enum rx_desc_status {
RxDComplete=0x00008000, RxDError=0x4000,
/* See boomerang_rx() for actual error bits */
IPChksumErr=1<<25, TCPChksumErr=1<<26, UDPChksumErr=1<<27,
IPChksumValid=1<<29, TCPChksumValid=1<<30, UDPChksumValid=1<<31,
};
 
struct boom_tx_desc {
u32 next; /* Last entry points to 0. */
s32 status; /* bits 0:12 length, others see below. */
u32 addr;
s32 length;
};
 
/* Values for the Tx status entry. */
enum tx_desc_status {
CRCDisable=0x2000, TxDComplete=0x8000,
AddIPChksum=0x02000000, AddTCPChksum=0x04000000, AddUDPChksum=0x08000000,
TxIntrUploaded=0x80000000, /* IRQ when in FIFO, but maybe not sent. */
};
 
/* Chip features we care about in vp->capabilities, read from the EEPROM. */
enum ChipCaps { CapBusMaster=0x20, CapPwrMgmt=0x2000 };
 
struct vortex_private {
/* The Rx and Tx rings should be quad-word-aligned. */
struct boom_rx_desc rx_ring[RX_RING_SIZE];
struct boom_tx_desc tx_ring[TX_RING_SIZE];
/* The addresses of transmit- and receive-in-place skbuffs. */
struct sk_buff* rx_skbuff[RX_RING_SIZE];
struct sk_buff* tx_skbuff[TX_RING_SIZE];
struct device *next_module;
void *priv_addr;
unsigned int cur_rx, cur_tx; /* The next free ring entry */
unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
struct net_device_stats stats;
struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */
 
/* PCI configuration space information. */
u8 pci_bus, pci_devfn; /* PCI bus location, for power management. */
char *cb_fn_base; /* CardBus function status addr space. */
int chip_id;
 
/* The remainder are related to chip state, mostly media selection. */
struct timer_list timer; /* Media selection timer. */
struct timer_list rx_oom_timer; /* Rx skb allocation retry timer */
int options; /* User-settable misc. driver options. */
unsigned int media_override:4, /* Passed-in media type. */
default_media:4, /* Read from the EEPROM/Wn3_Config. */
full_duplex:1, /* User wants FD (or we're running at FD) */
duplexlock:1, /* User has forced duplex */
autoselect:1, /* Can use NWAY negotiation */
bus_master:1, /* Vortex can only do a fragment bus-m. */
full_bus_master_tx:1, full_bus_master_rx:2, /* Boomerang */
tx_full:1,
enable_wol:1,
medialock:1;
u16 status_enable;
u16 intr_enable;
u16 available_media; /* From Wn3_Options. */
u16 capabilities, info1, info2; /* Various, from EEPROM. */
u16 advertising; /* NWay media advertisement */
unsigned char phys[2]; /* MII device addresses. */
u16 deferred; /* Resend these interrupts when we
* bale from the ISR */
spinlock_t lock;
};
 
/* The action to take with a media selection timer tick.
Note that we deviate from the 3Com order by checking 10base2 before AUI.
*/
enum xcvr_types {
XCVR_10baseT=0, XCVR_AUI, XCVR_10baseTOnly, XCVR_10base2, XCVR_100baseTx,
XCVR_100baseFx, XCVR_MII=6, XCVR_NWAY=8, XCVR_ExtMII=9, XCVR_Default=10,
};
 
static struct media_table {
char *name;
unsigned int media_bits:16, /* Bits to set in Wn4_Media register. */
mask:8, /* The transceiver-present bit in Wn3_Config.*/
next:8; /* The media type to try next. */
int wait; /* Time before we check media status. */
} media_tbl[] = {
{ "10baseT", Media_10TP,0x08, XCVR_10base2, (14*HZ)/10},
{ "10Mbs AUI", Media_SQE, 0x20, XCVR_Default, (1*HZ)/10},
{ "undefined", 0, 0x80, XCVR_10baseT, 10000},
{ "10base2", 0, 0x10, XCVR_AUI, (1*HZ)/10},
{ "100baseTX", Media_Lnk, 0x02, XCVR_100baseFx, (14*HZ)/10},
{ "100baseFX", Media_Lnk, 0x04, XCVR_MII, (14*HZ)/10},
{ "MII", 0, 0x41, XCVR_10baseT, 3*HZ },
{ "undefined", 0, 0x01, XCVR_10baseT, 10000},
{ "Autonegotiate", 0, 0x41, XCVR_10baseT, 3*HZ},
{ "MII-External", 0, 0x41, XCVR_10baseT, 3*HZ },
{ "Default", 0, 0xFF, XCVR_10baseT, 10000},
};
 
static int vortex_scan(struct device *dev, struct pci_id_info pci_tbl[]);
static int vortex_open(struct device *dev);
static void mdio_sync(long ioaddr, int bits);
static int mdio_read(long ioaddr, int phy_id, int location);
static void mdio_write(long ioaddr, int phy_id, int location, int value);
static void vortex_timer(unsigned long arg);
static void rx_oom_timer(unsigned long arg);
static int vortex_start_xmit(struct sk_buff *skb, struct device *dev);
static int boomerang_start_xmit(struct sk_buff *skb, struct device *dev);
static int vortex_rx(struct device *dev);
static int boomerang_rx(struct device *dev);
static void vortex_interrupt(int irq, void *dev_id, struct pt_regs *regs);
static void boomerang_interrupt(int irq, void *dev_id, struct pt_regs *regs);
static int vortex_close(struct device *dev);
static void update_stats(long ioaddr, struct device *dev);
static struct net_device_stats *vortex_get_stats(struct device *dev);
static void set_rx_mode(struct device *dev);
static int vortex_ioctl(struct device *dev, struct ifreq *rq, int cmd);
static void acpi_wake(int pci_bus, int pci_devfn);
static void acpi_set_WOL(struct device *dev);
 
#if 0
#warning dev_alloc_skb_debug is defined!
#define dev_alloc_skb dev_alloc_skb_debug
#endif
 
/* This driver uses 'options' to pass the media type, full-duplex flag, etc. */
/* Option count limit only -- unlimited interfaces are supported. */
#define MAX_UNITS 8
static int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1};
static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
static int enable_wol[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
/* A list of all installed Vortex devices, for removing the driver module. */
static struct device *root_vortex_dev = NULL;
 
#ifdef MODULE
/* Variables to work-around the Compaq PCI BIOS32 problem. */
static int compaq_ioaddr = 0, compaq_irq = 0, compaq_device_id = 0x5900;
 
int init_module(void)
{
if (vortex_debug)
printk(KERN_INFO "%s", version);
return vortex_scan(0, pci_tbl);
}
 
#else
int tc59x_probe(struct device *dev)
{
static int scanned=0;
if(scanned++)
return -ENODEV;
//printk(KERN_INFO "%s", version);
return vortex_scan(dev, pci_tbl);
}
#endif /* not MODULE */
 
static void wait_for_completion(struct device *dev, int cmd)
{
int i;
 
outw(cmd, dev->base_addr + EL3_CMD);
for (i = 0; i < 2000; i++) {
if (!(inw(dev->base_addr + EL3_STATUS) & CmdInProgress))
return;
}
 
/* OK, that didn't work. Do it the slow way. One second */
for (i = 0; i < 100000; i++) {
if (!(inw(dev->base_addr + EL3_STATUS) & CmdInProgress)) {
if (vortex_debug > 1)
printk(KERN_INFO "%s: command 0x%04x took %d usecs!\n",
dev->name, cmd, i * 10);
return;
}
udelay(10);
}
printk(KERN_ERR "%s: command 0x%04x did not complete! Status=0x%x\n",
dev->name, cmd, inw(dev->base_addr + EL3_STATUS));
}
 
static int vortex_scan(struct device *dev, struct pci_id_info pci_tbl[])
{
int cards_found = 0;
 
/* Allow an EISA-only driver. */
#if defined(CONFIG_PCI) || (defined(MODULE) && !defined(NO_PCI))
/* Ideally we would detect all cards in slot order. That would
be best done a central PCI probe dispatch, which wouldn't work
well with the current structure. So instead we detect 3Com cards
in slot order. */
if (pcibios_present()) {
static int pci_index = 0;
unsigned char pci_bus, pci_device_fn;
 
for (;pci_index < 0xff; pci_index++) {
u16 vendor, device, pci_command, new_command, pwr_cmd;
int chip_idx, irq;
long ioaddr;
 
if (pci20to26_find_class (PCI_CLASS_NETWORK_ETHERNET << 8, pci_index,
&pci_bus, &pci_device_fn) != PCIBIOS_SUCCESSFUL)
break;
pci20to26_read_config_word(pci_bus, pci_device_fn,
PCI_VENDOR_ID, &vendor);
pci20to26_read_config_word(pci_bus, pci_device_fn,
PCI_DEVICE_ID, &device);
for (chip_idx = 0; pci_tbl[chip_idx].vendor_id; chip_idx++)
if (vendor == pci_tbl[chip_idx].vendor_id
&& (device & pci_tbl[chip_idx].device_id_mask) ==
pci_tbl[chip_idx].device_id)
break;
if (pci_tbl[chip_idx].vendor_id == 0) /* Compiled out! */
continue;
 
/* The Cyclone requires config space re-write if powered down. */
acpi_wake(pci_bus, pci_device_fn);
 
{
#if 0 //defined(PCI_SUPPORT_VER2)
struct pci_dev *pdev = pci_find_slot(pci_bus, pci_device_fn);
ioaddr = pdev->base_address[0] & ~3;
irq = pdev->irq;
#else
u32 pci_ioaddr;
u8 pci_irq_line;
pci20to26_read_config_byte(pci_bus, pci_device_fn,
PCI_INTERRUPT_LINE, &pci_irq_line);
pci20to26_read_config_dword(pci_bus, pci_device_fn,
PCI_BASE_ADDRESS_0, &pci_ioaddr);
ioaddr = pci_ioaddr & ~3;
irq = pci_irq_line;
#endif
}
 
/* Power-up the card. */
pci20to26_read_config_word(pci_bus, pci_device_fn,
0xe0, &pwr_cmd);
if (pwr_cmd & 0x3) {
/* Save the ioaddr and IRQ info! */
printk(KERN_INFO " A 3Com network adapter is powered down!"
" Setting the power state %4.4x->%4.4x.\n",
pwr_cmd, pwr_cmd & ~3);
pci20to26_write_config_word(pci_bus, pci_device_fn,
0xe0, pwr_cmd & ~3);
printk(KERN_INFO " Setting the IRQ to %d, IOADDR to %#lx.\n",
irq, ioaddr);
pci20to26_write_config_byte(pci_bus, pci_device_fn,
PCI_INTERRUPT_LINE, irq);
pci20to26_write_config_dword(pci_bus, pci_device_fn,
PCI_BASE_ADDRESS_0, ioaddr);
}
 
if (ioaddr == 0) {
printk(KERN_WARNING " A 3Com network adapter has been found, "
"however it has not been assigned an I/O address.\n"
" You may need to power-cycle the machine for this "
"device to work!\n");
continue;
}
 
if (check_region(ioaddr, pci_tbl[chip_idx].io_size))
continue;
 
/* Activate the card. */
pci20to26_read_config_word(pci_bus, pci_device_fn,
PCI_COMMAND, &pci_command);
new_command = pci_command | PCI_COMMAND_MASTER|PCI_COMMAND_IO;
if (pci_command != new_command) {
printk(KERN_INFO " The PCI BIOS has not enabled the device "
"at %d/%d. Updating PCI command %4.4x->%4.4x.\n",
pci_bus, pci_device_fn, pci_command, new_command);
pci20to26_write_config_word(pci_bus, pci_device_fn,
PCI_COMMAND, new_command);
}
 
dev = vortex_probe1(pci_bus, pci_device_fn, dev, ioaddr, irq,
chip_idx, cards_found);
 
if (dev) {
if ((device & 0xff00) == 0x5900) {
/* Get and check the latency values. On the 3c590 series
the latency timer must be set to the maximum value to avoid
data corruption that occurs when the timer expires during
a transfer -- a bug in the Vortex chip only. */
u8 pci_latency;
u8 new_latency = 248;
pci20to26_read_config_byte(pci_bus, pci_device_fn, PCI_LATENCY_TIMER, &pci_latency);
if (pci_latency < new_latency) {
printk(KERN_INFO "%s: Overriding PCI latency"
" timer (CFLT) setting of %d, new value is %d.\n",
dev->name, pci_latency, new_latency);
pci20to26_write_config_byte(pci_bus, pci_device_fn, PCI_LATENCY_TIMER, new_latency);
}
}
dev = 0;
cards_found++;
}
}
}
#endif /* NO_PCI */
 
/* Now check all slots of the EISA bus. */
if (EISA_bus) {
static long ioaddr = 0x1000;
for ( ; ioaddr < 0x9000; ioaddr += 0x1000) {
int device_id;
if (check_region(ioaddr, VORTEX_TOTAL_SIZE))
continue;
/* Check the standard EISA ID register for an encoded '3Com'. */
if (inw(ioaddr + 0xC80) != 0x6d50)
continue;
/* Check for a product that we support, 3c59{2,7} any rev. */
device_id = (inb(ioaddr + 0xC82)<<8) + inb(ioaddr + 0xC83);
if ((device_id & 0xFF00) != 0x5900)
continue;
vortex_probe1(0, 0, dev, ioaddr, inw(ioaddr + 0xC88) >> 12,
EISA_TBL_OFFSET, cards_found);
dev = 0;
cards_found++;
}
}
 
#ifdef MODULE
/* Special code to work-around the Compaq PCI BIOS32 problem. */
if (compaq_ioaddr) {
vortex_probe1(0, 0, dev, compaq_ioaddr, compaq_irq,
compaq_device_id, cards_found++);
dev = 0;
}
#endif
 
return cards_found ? 0 : -ENODEV;
}
 
static struct device *vortex_probe1(int pci_bus, int pci_devfn,
struct device *dev, long ioaddr,
int irq, int chip_idx, int card_idx)
{
struct vortex_private *vp;
int option;
unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */
int i;
 
dev = init_etherdev(dev, 0);
if (dev == NULL) {
printk(KERN_EMERG "3c59x: init_etherdev failed\n");
return NULL;
}
 
printk(KERN_INFO "%s: 3Com %s at 0x%lx, ",
dev->name, pci_tbl[chip_idx].name, ioaddr);
 
dev->base_addr = ioaddr;
dev->irq = irq;
dev->mtu = mtu;
 
/* Make certain the descriptor lists are aligned. */
{
void *mem = kmalloc(sizeof(*vp) + 15, GFP_KERNEL);
if (!mem) {
printk(KERN_EMERG "3c59x: out of memory for dev->priv\n");
unregister_netdev(dev);
kfree(dev);
return NULL;
}
vp = (void *)(((long)mem + 15) & ~15);
memset(vp, 0, sizeof(*vp));
vp->priv_addr = mem;
}
dev->priv = vp;
 
vp->next_module = root_vortex_dev;
root_vortex_dev = dev;
 
spin_lock_init(&vp->lock);
vp->chip_id = chip_idx;
vp->pci_bus = pci_bus;
vp->pci_devfn = pci_devfn;
 
/* The lower four bits are the media type. */
option = -1;
if (dev->mem_start)
option = dev->mem_start;
else if (card_idx < MAX_UNITS)
option = options[card_idx];
 
if (option >= 0) {
vp->media_override = ((option & 7) == 2) ? 0 : option & 15;
if (vp->media_override != 7)
vp->medialock = 1;
if (option & 0x200) {
vp->full_duplex = 1;
vp->duplexlock = 1;
printk( "\n" KERN_WARNING
"%s: forcing duplex via options is deprecated - use `full_duplex'.\n",
dev->name);
}
vp->bus_master = (option & 16) ? 1 : 0;
} else {
vp->media_override = 7;
vp->full_duplex = 0;
vp->bus_master = 0;
}
if (card_idx < MAX_UNITS) {
if (enable_wol[card_idx] > 0)
vp->enable_wol = 1;
if (full_duplex[card_idx] == 0) { /* full_duplex=0 : force half duplex */
vp->duplexlock = 1;
}
if (full_duplex[card_idx] > 0) { /* full_duplex=1: force full duplex */
vp->duplexlock = 1;
vp->full_duplex = 1;
}
/* full_duplex=-1: duplex is not forced */
}
vp->options = option;
 
/* Read the station address from the EEPROM. */
EL3WINDOW(0);
{
int base;
 
if (pci_tbl[chip_idx].drv_flags & EEPROM_8BIT)
base = 0x230;
else if (pci_tbl[chip_idx].drv_flags & EEPROM_OFFSET)
base = EEPROM_Read + 0x30;
else
base = EEPROM_Read;
 
for (i = 0; i < 0x40; i++) {
int timer;
outw(base + i, ioaddr + Wn0EepromCmd);
/* Pause for at least 162 us. for the read to take place. */
for (timer = 10; timer >= 0; timer--) {
udelay(162);
if ((inw(ioaddr + Wn0EepromCmd) & 0x8000) == 0)
break;
}
eeprom[i] = inw(ioaddr + Wn0EepromData);
}
}
 
for (i = 0; i < 0x18; i++)
checksum ^= eeprom[i];
checksum = (checksum ^ (checksum >> 8)) & 0xff;
if (checksum != 0x00) { /* Grrr, needless incompatible change 3Com. */
while (i < 0x21)
checksum ^= eeprom[i++];
checksum = (checksum ^ (checksum >> 8)) & 0xff;
}
if (checksum != 0x00 && !(pci_tbl[chip_idx].drv_flags & IS_TORNADO))
printk(" ***INVALID CHECKSUM %4.4x*** ", checksum);
 
for (i = 0; i < 3; i++)
((u16 *)dev->dev_addr)[i] = htons(eeprom[i + 10]);
for (i = 0; i < 6; i++)
printk("%c%2.2x", i ? ':' : ' ', dev->dev_addr[i]);
#ifdef __sparc__
printk(", IRQ %s\n", __irq_itoa(dev->irq));
#else
printk(", IRQ %d\n", dev->irq);
/* Tell them about an invalid IRQ. */
if (vortex_debug && (dev->irq <= 0 || dev->irq >= NR_IRQS))
printk(KERN_WARNING " *** Warning: IRQ %d is unlikely to work! ***\n",
dev->irq);
#endif
 
if (pci_tbl[vp->chip_id].drv_flags & HAS_CB_FNS) {
u32 fn_st_addr; /* Cardbus function status space */
u16 n;
pci20to26_read_config_dword(pci_bus, pci_devfn, PCI_BASE_ADDRESS_2,
&fn_st_addr);
if (fn_st_addr)
vp->cb_fn_base = ioremap(fn_st_addr & ~3, 128);
printk("%s: CardBus functions mapped %8.8x->%p (PCMCIA committee"
" brain-damage).\n", dev->name, fn_st_addr, vp->cb_fn_base);
EL3WINDOW(2);
 
n = inw(ioaddr + Wn2_ResetOptions) & ~0x4010;
n |= 0x10;
if (pci_tbl[chip_idx].drv_flags & INVERT_MII_PWR)
n |= 0x4000;
outw(n, ioaddr + Wn2_ResetOptions);
}
 
/* Extract our information from the EEPROM data. */
vp->info1 = eeprom[13];
vp->info2 = eeprom[15];
vp->capabilities = eeprom[16];
 
if (!vp->duplexlock && (vp->info1 & 0x8000))
vp->full_duplex = 1;
 
{
char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
unsigned int config;
EL3WINDOW(3);
vp->available_media = inw(ioaddr + Wn3_Options);
if ((vp->available_media & 0xff) == 0) /* Broken 3c916 */
vp->available_media = 0x40;
config = inl(ioaddr + Wn3_Config);
if (vortex_debug > 1)
printk(KERN_DEBUG " Internal config register is %4.4x, "
"transceivers %#x.\n", config, inw(ioaddr + Wn3_Options));
printk(KERN_INFO " %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n",
8 << RAM_SIZE(config),
RAM_WIDTH(config) ? "word" : "byte",
ram_split[RAM_SPLIT(config)],
AUTOSELECT(config) ? "autoselect/" : "",
XCVR(config) > XCVR_ExtMII ? "<invalid transceiver>" :
media_tbl[XCVR(config)].name);
vp->default_media = XCVR(config);
vp->autoselect = AUTOSELECT(config);
}
 
if (vp->media_override != 7) {
printk(KERN_INFO " Media override to transceiver type %d (%s).\n",
vp->media_override, media_tbl[vp->media_override].name);
dev->if_port = vp->media_override;
} else
dev->if_port = vp->default_media;
 
if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
int phy, phy_idx = 0;
EL3WINDOW(4);
mii_preamble_required++;
mii_preamble_required++;
mdio_read(ioaddr, 24, 1);
for (phy = 0; phy < 32 && phy_idx < 1; phy++) {
int mii_status, phyx;
 
/*
* For the 3c905CX we look at index 24 first, because it bogusly
* reports an external PHY at all indices
*/
if (phy == 0)
phyx = 24;
else if (phy <= 24)
phyx = phy - 1;
else
phyx = phy;
mii_status = mdio_read(ioaddr, phyx, 1);
if (mii_status && mii_status != 0xffff) {
vp->phys[phy_idx++] = phyx;
printk(KERN_INFO " MII transceiver found at address %d,"
" status %4x.\n", phyx, mii_status);
if ((mii_status & 0x0040) == 0)
mii_preamble_required++;
}
}
mii_preamble_required--;
if (phy_idx == 0) {
printk(KERN_WARNING" ***WARNING*** No MII transceivers found!\n");
vp->phys[0] = 24;
} else {
vp->advertising = mdio_read(ioaddr, vp->phys[0], 4);
if (vp->full_duplex) {
/* Only advertise the FD media types. */
vp->advertising &= ~0x02A0;
mdio_write(ioaddr, vp->phys[0], 4, vp->advertising);
}
}
}
 
if (vp->enable_wol && (vp->capabilities & CapPwrMgmt))
acpi_set_WOL(dev);
 
if (vp->capabilities & CapBusMaster) {
vp->full_bus_master_tx = 1;
printk(KERN_INFO" Enabling bus-master transmits and %s receives.\n",
(vp->info2 & 1) ? "early" : "whole-frame" );
vp->full_bus_master_rx = (vp->info2 & 1) ? 1 : 2;
}
 
/* We do a request_region() to register /proc/ioports info. */
request_region(ioaddr, pci_tbl[chip_idx].io_size, dev->name);
 
/* The 3c59x-specific entries in the device structure. */
dev->open = vortex_open;
dev->hard_start_xmit = vp->full_bus_master_tx ?
boomerang_start_xmit : vortex_start_xmit;
dev->stop = vortex_close;
dev->get_stats = vortex_get_stats;
dev->do_ioctl = vortex_ioctl;
dev->set_multicast_list = set_rx_mode;
 
return dev;
}
 
static int
vortex_open(struct device *dev)
{
long ioaddr = dev->base_addr;
struct vortex_private *vp = (struct vortex_private *)dev->priv;
unsigned int config;
int i, retval;
 
MOD_INC_USE_COUNT;
 
if (vp->enable_wol)
acpi_wake(vp->pci_bus, vp->pci_devfn);
 
/* Before initializing select the active media port. */
EL3WINDOW(3);
config = inl(ioaddr + Wn3_Config);
 
if (vp->media_override != 7) {
if (vortex_debug > 1)
printk(KERN_INFO "%s: Media override to transceiver %d (%s).\n",
dev->name, vp->media_override,
media_tbl[vp->media_override].name);
dev->if_port = vp->media_override;
} else if (vp->autoselect && pci_tbl[vp->chip_id].drv_flags & HAS_NWAY) {
if (vortex_debug > 1)
printk(KERN_INFO "%s: using NWAY from config\n", dev->name);
dev->if_port = XCVR_NWAY;
} else if (vp->autoselect) {
/* Find first available media type, starting with 100baseTx. */
dev->if_port = XCVR_100baseTx;
while (! (vp->available_media & media_tbl[dev->if_port].mask))
dev->if_port = media_tbl[dev->if_port].next;
} else
dev->if_port = vp->default_media;
 
printk(KERN_INFO "%s: Initial media type %s.\n", dev->name, media_tbl[dev->if_port].name);
 
if (!vp->duplexlock && (vp->info1 & 0x8000))
vp->full_duplex = 1;
 
config = BFINS(config, dev->if_port, 20, 4);
outl(config, ioaddr + Wn3_Config);
 
if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
int mii_reg1, mii_reg5;
EL3WINDOW(4);
/* Read BMSR (reg1) only to clear old status. */
mii_reg1 = mdio_read(ioaddr, vp->phys[0], 1);
mii_reg5 = mdio_read(ioaddr, vp->phys[0], 5);
if (mii_reg5 == 0xffff || mii_reg5 == 0x0000)
; /* No MII device or no link partner report */
else if (!vp->duplexlock &&
((mii_reg5 & 0x0100) != 0 /* 100baseTx-FD */
|| (mii_reg5 & 0x00C0) == 0x0040)) /* 10T-FD, but not 100-HD */
vp->full_duplex = 1;
printk(KERN_INFO "%s: MII #%d status %4.4x, link partner capability %4.4x,"
" setting %s-duplex.\n", dev->name, vp->phys[0],
mii_reg1, mii_reg5, vp->full_duplex ? "full" : "half");
EL3WINDOW(3);
}
 
/* Set the full-duplex bit. */
outb((vp->full_duplex ? 0x20 : 0) | (dev->mtu > 1500 ? 0x40 : 0), ioaddr + Wn3_MAC_Ctrl);
 
if (vortex_debug > 1) {
printk(KERN_DEBUG "%s: vortex_open() InternalConfig %8.8x.\n", dev->name, config);
}
 
wait_for_completion(dev, TxReset);
wait_for_completion(dev, RxReset);
 
outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
 
/* Use the now-standard shared IRQ implementation. */
if ((retval = request_irq(dev->irq, vp->full_bus_master_tx ?
&boomerang_interrupt : &vortex_interrupt,
SA_SHIRQ, dev->name, dev))) {
printk(KERN_ERR "%s: Cannot allocate IRQ #%d\n", dev->name, dev->irq);
goto out;
}
 
if (vortex_debug > 1) {
EL3WINDOW(4);
printk(KERN_DEBUG "%s: vortex_open() irq %d media status %4.4x.\n",
dev->name, dev->irq, inw(ioaddr + Wn4_Media));
}
 
/* Set the station address and mask in window 2 each time opened. */
EL3WINDOW(2);
for (i = 0; i < 6; i++)
outb(dev->dev_addr[i], ioaddr + i);
for (; i < 12; i+=2)
outw(0, ioaddr + i);
 
if (dev->if_port == XCVR_10base2)
/* Start the thinnet transceiver. We should really wait 50ms...*/
outw(StartCoax, ioaddr + EL3_CMD);
if (dev->if_port != XCVR_NWAY) {
EL3WINDOW(4);
outw((inw(ioaddr + Wn4_Media) & ~(Media_10TP|Media_SQE)) |
media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media);
}
 
/* Switch to the stats window, and clear all stats by reading. */
outw(StatsDisable, ioaddr + EL3_CMD);
EL3WINDOW(6);
for (i = 0; i < 10; i++)
inb(ioaddr + i);
inw(ioaddr + 10);
inw(ioaddr + 12);
/* New: On the Vortex we must also clear the BadSSD counter. */
EL3WINDOW(4);
inb(ioaddr + 12);
/* ..and on the Boomerang we enable the extra statistics bits. */
outw(0x0040, ioaddr + Wn4_NetDiag);
 
/* Switch to register set 7 for normal use. */
EL3WINDOW(7);
 
if (vp->full_bus_master_rx) { /* Boomerang bus master. */
vp->cur_rx = vp->dirty_rx = 0;
/* Initialize the RxEarly register as recommended. */
outw(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD);
outl(0x0020, ioaddr + PktStatus);
if (vortex_debug > 2)
printk(KERN_DEBUG "%s: Filling in the Rx ring.\n", dev->name);
for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb;
vp->rx_ring[i].next = cpu_to_le32(virt_to_bus(&vp->rx_ring[i+1]));
vp->rx_ring[i].status = 0; /* Clear complete bit. */
vp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ | LAST_FRAG);
skb = dev_alloc_skb(PKT_BUF_SZ);
vp->rx_skbuff[i] = skb;
if (skb == NULL)
break; /* Bad news! */
skb->dev = dev; /* Mark as being used by this device. */
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
vp->rx_ring[i].addr = cpu_to_le32(virt_to_bus(skb->tail));
}
if (i != RX_RING_SIZE) {
int j;
printk(KERN_EMERG "%s: no memory for rx ring\n", dev->name);
for (j = 0; j < i; j++) {
if (vp->rx_skbuff[j])
dev_kfree_skb(vp->rx_skbuff[j]);
}
retval = -ENOMEM;
goto out_free_irq;
}
/* Wrap the ring. */
vp->rx_ring[i-1].next = cpu_to_le32(virt_to_bus(&vp->rx_ring[0]));
outl(virt_to_bus(&vp->rx_ring[0]), ioaddr + UpListPtr);
}
if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */
vp->cur_tx = vp->dirty_tx = 0;
outb(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); /* Room for a packet. */
/* Clear the Tx ring. */
for (i = 0; i < TX_RING_SIZE; i++)
vp->tx_skbuff[i] = 0;
outl(0, ioaddr + DownListPtr);
}
/* Set receiver mode: presumably accept b-case and phys addr only. */
set_rx_mode(dev);
outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
 
dev->tbusy = 0;
dev->start = 1;
vp->tx_full = 0;
 
outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
/* Allow status bits to be seen. */
vp->status_enable = SetStatusEnb | HostError|IntReq|StatsFull|TxComplete|
(vp->full_bus_master_tx ? DownComplete : TxAvailable) |
(vp->full_bus_master_rx ? UpComplete : RxComplete) |
(vp->bus_master ? DMADone : 0);
vp->intr_enable = SetIntrEnb | IntLatch | TxAvailable |
(vp->full_bus_master_rx ? 0 : RxComplete) |
StatsFull | HostError | TxComplete | IntReq
| (vp->bus_master ? DMADone : 0) | UpComplete | DownComplete;
outw(vp->status_enable, ioaddr + EL3_CMD);
/* Ack all pending events, and set active indicator mask. */
outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
ioaddr + EL3_CMD);
outw(vp->intr_enable, ioaddr + EL3_CMD);
if (vp->cb_fn_base) /* The PCMCIA people are idiots. */
writel(0x8000, vp->cb_fn_base + 4);
 
init_timer(&vp->timer);
vp->timer.expires = RUN_AT(media_tbl[dev->if_port].wait);
vp->timer.data = (unsigned long)dev;
vp->timer.function = &vortex_timer; /* timer handler */
add_timer(&vp->timer);
 
init_timer(&vp->rx_oom_timer);
vp->rx_oom_timer.data = (unsigned long)dev;
vp->rx_oom_timer.function = rx_oom_timer;
 
return 0;
 
out_free_irq:
free_irq(dev->irq, dev);
out:
return retval;
}
 
static void vortex_timer(unsigned long data)
{
struct device *dev = (struct device *)data;
struct vortex_private *vp = (struct vortex_private *)dev->priv;
long ioaddr = dev->base_addr;
int next_tick = 60 * HZ;
int ok = 0;
int media_status, mii_status, old_window;
 
if (vortex_debug > 1)
printk(KERN_DEBUG "%s: Media selection timer tick happened, %s.\n",
dev->name, media_tbl[dev->if_port].name);
 
if (vp->medialock)
goto leave_media_alone;
disable_irq(dev->irq);
old_window = inw(ioaddr + EL3_CMD) >> 13;
EL3WINDOW(4);
media_status = inw(ioaddr + Wn4_Media);
switch (dev->if_port) {
case XCVR_10baseT: case XCVR_100baseTx: case XCVR_100baseFx:
if (media_status & Media_LnkBeat) {
ok = 1;
if (vortex_debug > 1)
printk(KERN_DEBUG "%s: Media %s has link beat, %x.\n",
dev->name, media_tbl[dev->if_port].name, media_status);
} else if (vortex_debug > 1)
printk(KERN_DEBUG "%s: Media %s is has no link beat, %x.\n",
dev->name, media_tbl[dev->if_port].name, media_status);
break;
case XCVR_MII: case XCVR_NWAY:
{
unsigned long flags;
spin_lock_irqsave(&vp->lock, flags);
mii_status = mdio_read(ioaddr, vp->phys[0], 1);
ok = 1;
if (vortex_debug > 1)
printk(KERN_DEBUG "%s: MII transceiver has status %4.4x.\n",
dev->name, mii_status);
if (mii_status & 0x0004) {
int mii_reg5 = mdio_read(ioaddr, vp->phys[0], 5);
if (!vp->duplexlock && mii_reg5 != 0xffff) {
int duplex = (mii_reg5&0x0100) ||
(mii_reg5 & 0x01C0) == 0x0040;
if (vp->full_duplex != duplex) {
vp->full_duplex = duplex;
printk(KERN_INFO "%s: Setting %s-duplex based on MII "
"#%d link partner capability of %4.4x.\n",
dev->name, vp->full_duplex ? "full" : "half",
vp->phys[0], mii_reg5);
/* Set the full-duplex bit. */
EL3WINDOW(3);
outb((vp->full_duplex ? 0x20 : 0) |
(dev->mtu > 1500 ? 0x40 : 0),
ioaddr + Wn3_MAC_Ctrl);
}
next_tick = 60*HZ;
}
}
spin_unlock_irqrestore(&vp->lock, flags);
}
break;
default: /* Other media types handled by Tx timeouts. */
if (vortex_debug > 1)
printk(KERN_DEBUG "%s: Media %s has no indication, %x.\n",
dev->name, media_tbl[dev->if_port].name, media_status);
ok = 1;
}
if ( ! ok) {
unsigned int config;
 
do {
dev->if_port = media_tbl[dev->if_port].next;
} while ( ! (vp->available_media & media_tbl[dev->if_port].mask));
if (dev->if_port == XCVR_Default) { /* Go back to default. */
dev->if_port = vp->default_media;
if (vortex_debug > 1)
printk(KERN_DEBUG "%s: Media selection failing, using default "
"%s port.\n",
dev->name, media_tbl[dev->if_port].name);
} else {
if (vortex_debug > 1)
printk(KERN_DEBUG "%s: Media selection failed, now trying "
"%s port.\n",
dev->name, media_tbl[dev->if_port].name);
next_tick = media_tbl[dev->if_port].wait;
}
outw((media_status & ~(Media_10TP|Media_SQE)) |
media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media);
 
EL3WINDOW(3);
config = inl(ioaddr + Wn3_Config);
config = BFINS(config, dev->if_port, 20, 4);
outl(config, ioaddr + Wn3_Config);
 
outw(dev->if_port == XCVR_10base2 ? StartCoax : StopCoax,
ioaddr + EL3_CMD);
}
EL3WINDOW(old_window);
enable_irq(dev->irq);
 
leave_media_alone:
if (vortex_debug > 2)
printk(KERN_DEBUG "%s: Media selection timer finished, %s.\n",
dev->name, media_tbl[dev->if_port].name);
 
vp->timer.expires = RUN_AT(next_tick);
add_timer(&vp->timer);
if (vp->deferred)
outw(FakeIntr, ioaddr + EL3_CMD);
return;
}
 
static void vortex_tx_timeout(struct device *dev)
{
struct vortex_private *vp = (struct vortex_private *)dev->priv;
long ioaddr = dev->base_addr;
 
printk(KERN_ERR "%s: transmit timed out, tx_status %2.2x status %4.4x.\n",
dev->name, inb(ioaddr + TxStatus),
inw(ioaddr + EL3_STATUS));
/* Slight code bloat to be user friendly. */
if ((inb(ioaddr + TxStatus) & 0x88) == 0x88)
printk(KERN_ERR "%s: Transmitter encountered 16 collisions --"
" network cable problem?\n", dev->name);
if (inw(ioaddr + EL3_STATUS) & IntLatch) {
printk(KERN_ERR "%s: Interrupt posted but not delivered --"
" IRQ blocked by another device?\n", dev->name);
/* Bad idea here.. but we might as well handle a few events. */
{
/*
* AKPM: block interrupts because vortex_interrupt
* does a bare spin_lock()
*/
unsigned long flags;
__save_flags(flags);
__cli();
if (vp->full_bus_master_tx)
boomerang_interrupt(dev->irq, dev, 0);
else
vortex_interrupt(dev->irq, dev, 0);
__restore_flags(flags);
}
}
 
if (vp->full_bus_master_tx) {
int i;
printk(KERN_DEBUG " Flags; bus-master %d, full %d; dirty %d "
"current %d.\n",
vp->full_bus_master_tx, vp->tx_full, vp->dirty_tx, vp->cur_tx);
printk(KERN_DEBUG " Transmit list %8.8x vs. %p.\n",
inl(ioaddr + DownListPtr),
&vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]);
for (i = 0; i < TX_RING_SIZE; i++) {
printk(KERN_DEBUG " %d: @%p length %8.8x status %8.8x\n", i,
&vp->tx_ring[i],
le32_to_cpu(vp->tx_ring[i].length),
le32_to_cpu(vp->tx_ring[i].status));
}
}
wait_for_completion(dev, TxReset);
vp->stats.tx_errors++;
if (vp->full_bus_master_tx) {
if (vortex_debug > 0)
printk(KERN_DEBUG "%s: Resetting the Tx ring pointer.\n",
dev->name);
if (vp->cur_tx - vp->dirty_tx > 0 && inl(ioaddr + DownListPtr) == 0)
outl(virt_to_bus(&vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]),
ioaddr + DownListPtr);
if (vp->tx_full && (vp->cur_tx - vp->dirty_tx <= TX_RING_SIZE - 1)) {
vp->tx_full = 0;
clear_bit(0, (void*)&dev->tbusy);
}
outb(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold);
outw(DownUnstall, ioaddr + EL3_CMD);
} else {
vp->stats.tx_dropped++;
clear_bit(0, (void*)&dev->tbusy);
}
/* Issue Tx Enable */
outw(TxEnable, ioaddr + EL3_CMD);
dev->trans_start = jiffies;
/* Switch to register set 7 for normal use. */
EL3WINDOW(7);
}
 
/*
* Handle uncommon interrupt sources. This is a separate routine to minimize
* the cache impact.
*/
static void
vortex_error(struct device *dev, int status)
{
struct vortex_private *vp = (struct vortex_private *)dev->priv;
long ioaddr = dev->base_addr;
int do_tx_reset = 0;
 
if (status & TxComplete) { /* Really "TxError" for us. */
unsigned char tx_status = inb(ioaddr + TxStatus);
/* Presumably a tx-timeout. We must merely re-enable. */
if (vortex_debug > 2
|| (tx_status != 0x88 && vortex_debug > 0))
printk(KERN_DEBUG"%s: Transmit error, Tx status register %2.2x.\n",
dev->name, tx_status);
if (tx_status & 0x14) vp->stats.tx_fifo_errors++;
if (tx_status & 0x38) vp->stats.tx_aborted_errors++;
outb(0, ioaddr + TxStatus);
if (tx_status & 0x30) /* txJabber or txUnderrun */
do_tx_reset = 1;
else /* Merely re-enable the transmitter. */
outw(TxEnable, ioaddr + EL3_CMD);
}
if (status & RxEarly) { /* Rx early is unused. */
vortex_rx(dev);
outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
}
if (status & StatsFull) { /* Empty statistics. */
static int DoneDidThat = 0;
if (vortex_debug > 4)
printk(KERN_DEBUG "%s: Updating stats.\n", dev->name);
update_stats(ioaddr, dev);
/* HACK: Disable statistics as an interrupt source. */
/* This occurs when we have the wrong media type! */
if (DoneDidThat == 0 &&
inw(ioaddr + EL3_STATUS) & StatsFull) {
printk(KERN_WARNING "%s: Updating statistics failed, disabling "
"stats as an interrupt source.\n", dev->name);
EL3WINDOW(5);
outw(SetIntrEnb | (inw(ioaddr + 10) & ~StatsFull), ioaddr + EL3_CMD);
EL3WINDOW(7);
DoneDidThat++;
}
}
if (status & IntReq) { /* Restore all interrupt sources. */
outw(vp->status_enable, ioaddr + EL3_CMD);
outw(vp->intr_enable, ioaddr + EL3_CMD);
}
if (status & HostError) {
u16 fifo_diag;
EL3WINDOW(4);
fifo_diag = inw(ioaddr + Wn4_FIFODiag);
if (vortex_debug > 0)
printk(KERN_ERR "%s: Host error, FIFO diagnostic register %4.4x.\n",
dev->name, fifo_diag);
/* Adapter failure requires Tx/Rx reset and reinit. */
if (vp->full_bus_master_tx) {
wait_for_completion(dev, TotalReset|0xff);
/* Re-enable the receiver. */
outw(RxEnable, ioaddr + EL3_CMD);
outw(TxEnable, ioaddr + EL3_CMD);
} else if (fifo_diag & 0x0400)
do_tx_reset = 1;
if (fifo_diag & 0x3000) {
wait_for_completion(dev, RxReset);
/* Set the Rx filter to the current state. */
set_rx_mode(dev);
outw(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */
outw(AckIntr | HostError, ioaddr + EL3_CMD);
}
}
if (do_tx_reset) {
wait_for_completion(dev, TxReset);
outw(TxEnable, ioaddr + EL3_CMD);
if (!vp->full_bus_master_tx) {
clear_bit(0, (void*)&dev->tbusy);
mark_bh(NET_BH);
}
}
 
}
 
static int
vortex_start_xmit(struct sk_buff *skb, struct device *dev)
{
struct vortex_private *vp = (struct vortex_private *)dev->priv;
long ioaddr = dev->base_addr;
 
if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) {
if (jiffies - dev->trans_start >= TX_TIMEOUT)
vortex_tx_timeout(dev);
return 1;
}
if (vortex_debug > 3)
printk(KERN_DEBUG "%s: Trying to send a boomerang packet, Tx index %d.\n",
dev->name, vp->cur_tx);
 
/* Put out the doubleword header... */
outl(skb->len, ioaddr + TX_FIFO);
if (vp->bus_master) {
/* Set the bus-master controller to transfer the packet. */
outl(virt_to_bus(skb->data), ioaddr + Wn7_MasterAddr);
outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
vp->tx_skb = skb;
outw(StartDMADown, ioaddr + EL3_CMD);
/* dev->tbusy will be cleared at the DMADone interrupt. */
} else {
/* ... and the packet rounded to a doubleword. */
#ifdef __powerpc__
outsl_ns(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
#else
outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
#endif
dev_kfree_skb(skb);
if (inw(ioaddr + TxFree) > 1536) {
clear_bit(0, (void*)&dev->tbusy);
} else
/* Interrupt us when the FIFO has room for max-sized packet. */
outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
}
 
dev->trans_start = jiffies;
 
/* Clear the Tx status stack. */
{
int tx_status;
int i = 32;
 
while (--i > 0 && (tx_status = inb(ioaddr + TxStatus)) > 0) {
if (tx_status & 0x3C) { /* A Tx-disabling error occurred. */
if (vortex_debug > 2)
printk(KERN_DEBUG "%s: Tx error, status %2.2x.\n",
dev->name, tx_status);
if (tx_status & 0x04) vp->stats.tx_fifo_errors++;
if (tx_status & 0x38) vp->stats.tx_aborted_errors++;
if (tx_status & 0x30)
wait_for_completion(dev, TxReset);
outw(TxEnable, ioaddr + EL3_CMD);
}
outb(0x00, ioaddr + TxStatus); /* Pop the status stack. */
}
}
vp->stats.tx_bytes += skb->len;
return 0;
}
 
static int
boomerang_start_xmit(struct sk_buff *skb, struct device *dev)
{
struct vortex_private *vp = (struct vortex_private *)dev->priv;
long ioaddr = dev->base_addr;
 
if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) {
if (jiffies - dev->trans_start >= TX_TIMEOUT)
vortex_tx_timeout(dev);
return 1;
} else {
/* Calculate the next Tx descriptor entry. */
int entry = vp->cur_tx % TX_RING_SIZE;
struct boom_tx_desc *prev_entry =
&vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
unsigned long flags;
 
if (vortex_debug > 3)
printk(KERN_DEBUG "%s: Trying to send a boomerang packet, Tx index %d.\n",
dev->name, vp->cur_tx);
if (vp->tx_full) {
if (vortex_debug > 0)
printk(KERN_WARNING "%s: Tx Ring full, refusing to send buffer.\n",
dev->name);
return 1;
}
vp->tx_skbuff[entry] = skb;
vp->tx_ring[entry].next = 0;
vp->tx_ring[entry].addr = cpu_to_le32(virt_to_bus(skb->data));
vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
 
spin_lock_irqsave(&vp->lock, flags);
wait_for_completion(dev, DownStall);
prev_entry->next = cpu_to_le32(virt_to_bus(&vp->tx_ring[entry]));
if (inl(ioaddr + DownListPtr) == 0) {
outl(virt_to_bus(&vp->tx_ring[entry]), ioaddr + DownListPtr);
queued_packet++;
}
 
vp->cur_tx++;
if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1)
vp->tx_full = 1;
else { /* Clear previous interrupt enable. */
prev_entry->status &= cpu_to_le32(~TxIntrUploaded);
clear_bit(0, (void*)&dev->tbusy);
}
outw(DownUnstall, ioaddr + EL3_CMD);
spin_unlock_irqrestore(&vp->lock, flags);
dev->trans_start = jiffies;
vp->stats.tx_bytes += skb->len;
return 0;
}
}
 
/* The interrupt handler does all of the Rx thread work and cleans up
after the Tx thread. */
 
/*
* This is the ISR for the vortex series chips.
* full_bus_master_tx == 0 && full_bus_master_rx == 0
*/
 
static void vortex_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
struct device *dev = dev_id;
struct vortex_private *vp = (struct vortex_private *)dev->priv;
long ioaddr;
int status;
int work_done = max_interrupt_work;
 
ioaddr = dev->base_addr;
spin_lock(&vp->lock);
status = inw(ioaddr + EL3_STATUS);
if ((status & IntLatch) == 0) {
if (vortex_debug > 5)
printk(KERN_DEBUG "%s: no vortex interrupt pending\n", dev->name);
goto no_int; /* Happens during shared interrupts */
}
 
if (status & IntReq) {
status |= vp->deferred;
vp->deferred = 0;
}
 
if (vortex_debug > 4)
printk(KERN_DEBUG "%s: vortex_interrupt, status %4.4x, latency %d ticks.\n",
dev->name, status, inb(ioaddr + Timer));
 
do {
if (vortex_debug > 5)
printk(KERN_DEBUG "%s: In interrupt loop, status %4.4x.\n",
dev->name, status);
 
if (status & RxComplete)
vortex_rx(dev);
 
if (status & TxAvailable) {
if (vortex_debug > 5)
printk(KERN_DEBUG " TX room bit was handled.\n");
/* There's room in the FIFO for a full-sized packet. */
outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
clear_bit(0, (void*)&dev->tbusy);
mark_bh(NET_BH);
}
 
if (status & DMADone) {
if (inw(ioaddr + Wn7_MasterStatus) & 0x1000) {
outw(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
dev_kfree_skb(vp->tx_skb); /* Release the transfered buffer */
if (inw(ioaddr + TxFree) > 1536) {
clear_bit(0, (void*)&dev->tbusy);
mark_bh(NET_BH);
} else /* Interrupt when FIFO has room for max-sized packet. */
outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
}
}
 
/* Check for all uncommon interrupts at once. */
if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) {
if (status == 0xffff)
break;
vortex_error(dev, status);
}
 
if (--work_done < 0) {
printk(KERN_WARNING "%s: Too much work in interrupt, status "
"%4.4x.\n", dev->name, status);
/* Disable all pending interrupts. */
do {
vp->deferred |= status;
outw(SetStatusEnb | (~vp->deferred & vp->status_enable),
ioaddr + EL3_CMD);
outw(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
} while ((status = inw(ioaddr + EL3_CMD)) & IntLatch);
/* The timer will reenable interrupts. */
mod_timer(&vp->timer, RUN_AT(1));
break;
}
 
/* Acknowledge the IRQ. */
outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
} while ((status = inw(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete));
 
if (vortex_debug > 4)
printk(KERN_DEBUG "%s: exiting interrupt, status %4.4x.\n",
dev->name, status);
 
no_int:
spin_unlock(&vp->lock);
}
 
/*
* This is the ISR for the boomerang/cyclone/hurricane/tornado series chips.
* full_bus_master_tx == 1 && full_bus_master_rx == 1
*/
 
static void boomerang_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
struct device *dev = dev_id;
struct vortex_private *vp = (struct vortex_private *)dev->priv;
long ioaddr;
int status;
int work_done = max_interrupt_work;
 
ioaddr = dev->base_addr;
spin_lock(&vp->lock);
status = inw(ioaddr + EL3_STATUS);
if ((status & IntLatch) == 0) {
if (vortex_debug > 5)
printk(KERN_DEBUG "%s: no boomerang interrupt pending\n", dev->name);
goto no_int; /* Happens during shared interrupts */
}
 
if (status & IntReq) {
status |= vp->deferred;
vp->deferred = 0;
}
 
if (vortex_debug > 4)
printk(KERN_DEBUG "%s: boomerang interrupt, status %04x, latency %d, cur_rx %d, dirty_rx %d\n",
dev->name, status, inb(ioaddr + Timer), vp->cur_rx, vp->dirty_rx);
 
do {
if (vortex_debug > 5)
printk(KERN_DEBUG "%s: In interrupt loop, status %4.4x.\n",
dev->name, status);
 
if (status & UpComplete) {
outw(AckIntr | UpComplete, ioaddr + EL3_CMD);
boomerang_rx(dev);
}
 
if (status & DownComplete) {
unsigned int dirty_tx = vp->dirty_tx;
 
while (vp->cur_tx - dirty_tx > 0) {
int entry = dirty_tx % TX_RING_SIZE;
if (inl(ioaddr + DownListPtr) ==
virt_to_bus(&vp->tx_ring[entry]))
break; /* It still hasn't been processed. */
if (vp->tx_skbuff[entry]) {
dev_kfree_skb(vp->tx_skbuff[entry]);
vp->tx_skbuff[entry] = 0;
}
/* vp->stats.tx_packets++; Counted below. */
dirty_tx++;
}
vp->dirty_tx = dirty_tx;
outw(AckIntr | DownComplete, ioaddr + EL3_CMD);
if (vp->tx_full && (vp->cur_tx - dirty_tx <= TX_RING_SIZE - 1)) {
vp->tx_full = 0;
clear_bit(0, (void*)&dev->tbusy);
mark_bh(NET_BH);
}
}
 
/* Check for all uncommon interrupts at once. */
if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) {
if (status == 0xffff)
break;
vortex_error(dev, status);
}
 
if (--work_done < 0) {
printk(KERN_WARNING "%s: Too much work in interrupt, status "
"%4.4x.\n", dev->name, status);
/* Disable all pending interrupts. */
do {
vp->deferred |= status;
outw(SetStatusEnb | (~vp->deferred & vp->status_enable),
ioaddr + EL3_CMD);
outw(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
} while ((status = inw(ioaddr + EL3_CMD)) & IntLatch);
/* The timer will reenable interrupts. */
mod_timer(&vp->timer, RUN_AT(1));
break;
}
 
/* Acknowledge the IRQ. */
outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
if (vp->cb_fn_base) /* The PCMCIA people are idiots. */
writel(0x8000, vp->cb_fn_base + 4);
 
} while ((status = inw(ioaddr + EL3_STATUS)) & IntLatch);
 
if (vortex_debug > 4)
printk(KERN_DEBUG "%s: exiting interrupt, status %4.4x.\n",
dev->name, status);
 
no_int:
spin_unlock(&vp->lock);
}
 
static int vortex_rx(struct device *dev)
{
struct vortex_private *vp = (struct vortex_private *)dev->priv;
long ioaddr = dev->base_addr;
int i;
short rx_status;
 
if (vortex_debug > 5)
printk(KERN_DEBUG" In vortex_rx(), status %4.4x, rx_status %4.4x.\n",
inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus));
while ((rx_status = inw(ioaddr + RxStatus)) > 0) {
if (rx_status & 0x4000) { /* Error, update stats. */
unsigned char rx_error = inb(ioaddr + RxErrors);
if (vortex_debug > 2)
printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error);
vp->stats.rx_errors++;
if (rx_error & 0x01) vp->stats.rx_over_errors++;
if (rx_error & 0x02) vp->stats.rx_length_errors++;
if (rx_error & 0x04) vp->stats.rx_frame_errors++;
if (rx_error & 0x08) vp->stats.rx_crc_errors++;
if (rx_error & 0x10) vp->stats.rx_length_errors++;
} else {
/* The packet length: up to 4.5K!. */
int pkt_len = rx_status & 0x1fff;
struct sk_buff *skb;
 
skb = dev_alloc_skb(pkt_len + 5);
if (vortex_debug > 4)
printk(KERN_DEBUG "Receiving packet size %d status %4.4x.\n",
pkt_len, rx_status);
if (skb != NULL) {
skb->dev = dev;
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
/* 'skb_put()' points to the start of sk_buff data area. */
if (vp->bus_master &&
! (inw(ioaddr + Wn7_MasterStatus) & 0x8000)) {
outl(virt_to_bus(skb_put(skb, pkt_len)),
ioaddr + Wn7_MasterAddr);
outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
outw(StartDMAUp, ioaddr + EL3_CMD);
while (inw(ioaddr + Wn7_MasterStatus) & 0x8000)
;
} else {
#ifdef __powerpc__
insl_ns(ioaddr + RX_FIFO, skb_put(skb, pkt_len),
(pkt_len + 3) >> 2);
#else
insl(ioaddr + RX_FIFO, skb_put(skb, pkt_len),
(pkt_len + 3) >> 2);
#endif
}
outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
dev->last_rx = jiffies;
vp->stats.rx_packets++;
vp->stats.rx_bytes += skb->len;
/* Wait a limited time to go to next packet. */
for (i = 200; i >= 0; i--)
if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
break;
continue;
} else if (vortex_debug)
printk(KERN_NOTICE "%s: No memory to allocate a sk_buff of "
"size %d.\n", dev->name, pkt_len);
}
vp->stats.rx_dropped++;
wait_for_completion(dev, RxDiscard);
}
 
return 0;
}
 
static int
boomerang_rx(struct device *dev)
{
struct vortex_private *vp = (struct vortex_private *)dev->priv;
int entry = vp->cur_rx % RX_RING_SIZE;
long ioaddr = dev->base_addr;
int rx_status;
int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx;
 
if (vortex_debug > 5)
printk(KERN_DEBUG " In boomerang_rx(), status %4.4x, rx_status "
"%4.4x.\n",
inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus));
while ((rx_status = le32_to_cpu(vp->rx_ring[entry].status)) & RxDComplete){
if (--rx_work_limit < 0)
break;
if (rx_status & RxDError) { /* Error, update stats. */
unsigned char rx_error = rx_status >> 16;
if (vortex_debug > 2)
printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error);
vp->stats.rx_errors++;
if (rx_error & 0x01) vp->stats.rx_over_errors++;
if (rx_error & 0x02) vp->stats.rx_length_errors++;
if (rx_error & 0x04) vp->stats.rx_frame_errors++;
if (rx_error & 0x08) vp->stats.rx_crc_errors++;
if (rx_error & 0x10) vp->stats.rx_length_errors++;
} else {
/* The packet length: up to 4.5K!. */
int pkt_len = rx_status & 0x1fff;
struct sk_buff *skb;
 
vp->stats.rx_bytes += pkt_len;
if (vortex_debug > 4)
printk(KERN_DEBUG "Receiving packet size %d status %4.4x.\n",
pkt_len, rx_status);
 
/* Check if the packet is long enough to just accept without
copying to a properly sized skbuff. */
if (pkt_len < rx_copybreak
&& (skb = dev_alloc_skb(pkt_len + 2)) != 0) {
skb->dev = dev;
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
/* 'skb_put()' points to the start of sk_buff data area. */
memcpy(skb_put(skb, pkt_len),
bus_to_virt(le32_to_cpu(vp->rx_ring[entry].addr)),
pkt_len);
rx_copy++;
} else {
void *temp;
/* Pass up the skbuff already on the Rx ring. */
skb = vp->rx_skbuff[entry];
vp->rx_skbuff[entry] = NULL;
temp = skb_put(skb, pkt_len);
/* Remove this checking code for final release. */
if (bus_to_virt(le32_to_cpu(vp->rx_ring[entry].addr)) != temp)
printk(KERN_ERR "%s: Warning -- the skbuff addresses do not match"
" in boomerang_rx: %p vs. %p.\n", dev->name,
bus_to_virt(le32_to_cpu(vp->rx_ring[entry].addr)),
temp);
rx_nocopy++;
}
skb->protocol = eth_type_trans(skb, dev);
{ /* Use hardware checksum info. */
int csum_bits = rx_status & 0xee000000;
if (csum_bits &&
(csum_bits == (IPChksumValid | TCPChksumValid) ||
csum_bits == (IPChksumValid | UDPChksumValid))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
rx_csumhits++;
}
}
netif_rx(skb);
dev->last_rx = jiffies;
vp->stats.rx_packets++;
}
entry = (++vp->cur_rx) % RX_RING_SIZE;
}
/* Refill the Rx ring buffers. */
for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) {
struct sk_buff *skb;
entry = vp->dirty_rx % RX_RING_SIZE;
if (vp->rx_skbuff[entry] == NULL) {
skb = dev_alloc_skb(PKT_BUF_SZ);
if (skb == NULL) {
static unsigned long last_jif;
if ((jiffies - last_jif) > 10 * HZ) {
printk(KERN_WARNING "%s: memory shortage\n", dev->name);
last_jif = jiffies;
}
if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE)
mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1));
break; /* Bad news! */
}
skb->dev = dev; /* Mark as being used by this device. */
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
vp->rx_ring[entry].addr = cpu_to_le32(virt_to_bus(skb->tail));
vp->rx_skbuff[entry] = skb;
}
vp->rx_ring[entry].status = 0; /* Clear complete bit. */
outw(UpUnstall, ioaddr + EL3_CMD);
}
return 0;
}
 
/*
* If we've hit a total OOM refilling the Rx ring we poll once a second
* for some memory. Otherwise there is no way to restart the rx process.
*/
static void
rx_oom_timer(unsigned long arg)
{
struct device *dev = (struct device *)arg;
struct vortex_private *vp = (struct vortex_private *)dev->priv;
 
spin_lock_irq(&vp->lock);
if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) /* This test is redundant, but makes me feel good */
boomerang_rx(dev);
if (vortex_debug > 1) {
printk(KERN_DEBUG "%s: rx_oom_timer %s\n", dev->name,
((vp->cur_rx - vp->dirty_rx) != RX_RING_SIZE) ? "succeeded" : "retrying");
}
spin_unlock_irq(&vp->lock);
}
 
static int
vortex_close(struct device *dev)
{
struct vortex_private *vp = (struct vortex_private *)dev->priv;
long ioaddr = dev->base_addr;
int i;
 
dev->start = 0;
dev->tbusy = 1;
 
if (vortex_debug > 1) {
printk(KERN_DEBUG"%s: vortex_close() status %4.4x, Tx status %2.2x.\n",
dev->name, inw(ioaddr + EL3_STATUS), inb(ioaddr + TxStatus));
printk(KERN_DEBUG "%s: vortex close stats: rx_nocopy %d rx_copy %d"
" tx_queued %d Rx pre-checksummed %d.\n",
dev->name, rx_nocopy, rx_copy, queued_packet, rx_csumhits);
}
 
del_timer(&vp->timer);
del_timer(&vp->rx_oom_timer);
 
/* Turn off statistics ASAP. We update vp->stats below. */
outw(StatsDisable, ioaddr + EL3_CMD);
 
/* Disable the receiver and transmitter. */
outw(RxDisable, ioaddr + EL3_CMD);
outw(TxDisable, ioaddr + EL3_CMD);
 
if (dev->if_port == XCVR_10base2)
/* Turn off thinnet power. Green! */
outw(StopCoax, ioaddr + EL3_CMD);
 
free_irq(dev->irq, dev);
 
outw(SetIntrEnb | 0x0000, ioaddr + EL3_CMD);
 
update_stats(ioaddr, dev);
if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
outl(0, ioaddr + UpListPtr);
for (i = 0; i < RX_RING_SIZE; i++)
if (vp->rx_skbuff[i]) {
dev_kfree_skb(vp->rx_skbuff[i]);
vp->rx_skbuff[i] = 0;
}
}
if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */
outl(0, ioaddr + DownListPtr);
for (i = 0; i < TX_RING_SIZE; i++)
if (vp->tx_skbuff[i]) {
dev_kfree_skb(vp->tx_skbuff[i]);
vp->tx_skbuff[i] = 0;
}
}
 
if (vp->enable_wol && (vp->capabilities & CapPwrMgmt))
acpi_set_WOL(dev);
MOD_DEC_USE_COUNT;
 
return 0;
}
 
static struct net_device_stats *vortex_get_stats(struct device *dev)
{
struct vortex_private *vp = (struct vortex_private *)dev->priv;
unsigned long flags;
 
if (dev->start) {
spin_lock_irqsave(&vp->lock, flags);
update_stats(dev->base_addr, dev);
spin_unlock_irqrestore(&vp->lock, flags);
}
return &vp->stats;
}
 
/* Update statistics.
Unlike with the EL3 we need not worry about interrupts changing
the window setting from underneath us, but we must still guard
against a race condition with a StatsUpdate interrupt updating the
table. This is done by checking that the ASM (!) code generated uses
atomic updates with '+='.
*/
static void update_stats(long ioaddr, struct device *dev)
{
struct vortex_private *vp = (struct vortex_private *)dev->priv;
 
/* Unlike the 3c5x9 we need not turn off stats updates while reading. */
/* Switch to the stats window, and read everything. */
EL3WINDOW(6);
vp->stats.tx_carrier_errors += inb(ioaddr + 0);
vp->stats.tx_heartbeat_errors += inb(ioaddr + 1);
/* Multiple collisions. */ inb(ioaddr + 2);
vp->stats.collisions += inb(ioaddr + 3);
vp->stats.tx_window_errors += inb(ioaddr + 4);
vp->stats.rx_fifo_errors += inb(ioaddr + 5);
vp->stats.tx_packets += inb(ioaddr + 6);
vp->stats.tx_packets += (inb(ioaddr + 9)&0x30) << 4;
/* Rx packets */ inb(ioaddr + 7); /* Must read to clear */
/* Tx deferrals */ inb(ioaddr + 8);
/* Don't bother with register 9, an extension of registers 6&7.
If we do use the 6&7 values the atomic update assumption above
is invalid. */
inw(ioaddr + 10); /* Total Rx and Tx octets. */
inw(ioaddr + 12);
/* New: On the Vortex we must also clear the BadSSD counter. */
EL3WINDOW(4);
inb(ioaddr + 12);
 
/* We change back to window 7 (not 1) with the Vortex. */
EL3WINDOW(7);
return;
}
 
static int vortex_ioctl(struct device *dev, struct ifreq *rq, int cmd)
{
struct vortex_private *vp = (struct vortex_private *)dev->priv;
long ioaddr = dev->base_addr;
u16 *data = (u16 *)&rq->ifr_data;
int phy = vp->phys[0] & 0x1f;
int retval;
unsigned long flags;
 
spin_lock_irqsave(&vp->lock, flags);
 
switch(cmd) {
case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
data[0] = phy;
case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
EL3WINDOW(4);
data[3] = mdio_read(ioaddr, data[0] & 0x1f, data[1] & 0x1f);
retval = 0;
break;
case SIOCDEVPRIVATE+2: /* Write the specified MII register */
if (!capable(CAP_NET_ADMIN)) {
retval = -EPERM;
} else {
EL3WINDOW(4);
mdio_write(ioaddr, data[0] & 0x1f, data[1] & 0x1f, data[2]);
retval = 0;
}
break;
default:
retval = -EOPNOTSUPP;
}
 
spin_unlock_irqrestore(&vp->lock, flags);
return retval;
}
 
/* Pre-Cyclone chips have no documented multicast filter, so the only
multicast setting is to receive all multicast frames. At least
the chip has a very clean way to set the mode, unlike many others. */
static void set_rx_mode(struct device *dev)
{
long ioaddr = dev->base_addr;
int new_mode;
 
if (dev->flags & IFF_PROMISC) {
if (vortex_debug > 0)
printk(KERN_NOTICE "%s: Setting promiscuous mode.\n", dev->name);
new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast|RxProm;
} else if ((dev->mc_list) || (dev->flags & IFF_ALLMULTI)) {
new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast;
} else
new_mode = SetRxFilter | RxStation | RxBroadcast;
 
outw(new_mode, ioaddr + EL3_CMD);
}
 
/* MII transceiver control section.
Read and write the MII registers using software-generated serial
MDIO protocol. See the MII specifications or DP83840A data sheet
for details. */
 
/* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
met by back-to-back PCI I/O cycles, but we insert a delay to avoid
"overclocking" issues. */
#define mdio_delay() inl(mdio_addr)
 
#define MDIO_SHIFT_CLK 0x01
#define MDIO_DIR_WRITE 0x04
#define MDIO_DATA_WRITE0 (0x00 | MDIO_DIR_WRITE)
#define MDIO_DATA_WRITE1 (0x02 | MDIO_DIR_WRITE)
#define MDIO_DATA_READ 0x02
#define MDIO_ENB_IN 0x00
 
/* Generate the preamble required for initial synchronization and
a few older transceivers. */
static void mdio_sync(long ioaddr, int bits)
{
long mdio_addr = ioaddr + Wn4_PhysicalMgmt;
 
/* Establish sync by sending at least 32 logic ones. */
while (-- bits >= 0) {
outw(MDIO_DATA_WRITE1, mdio_addr);
mdio_delay();
outw(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
mdio_delay();
}
}
 
static int mdio_read(long ioaddr, int phy_id, int location)
{
int i;
int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
unsigned int retval = 0;
long mdio_addr = ioaddr + Wn4_PhysicalMgmt;
 
if (mii_preamble_required)
mdio_sync(ioaddr, 32);
 
/* Shift the read command bits out. */
for (i = 14; i >= 0; i--) {
int dataval = (read_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
outw(dataval, mdio_addr);
mdio_delay();
outw(dataval | MDIO_SHIFT_CLK, mdio_addr);
mdio_delay();
}
/* Read the two transition, 16 data, and wire-idle bits. */
for (i = 19; i > 0; i--) {
outw(MDIO_ENB_IN, mdio_addr);
mdio_delay();
retval = (retval << 1) | ((inw(mdio_addr) & MDIO_DATA_READ) ? 1 : 0);
outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
mdio_delay();
}
#if 0
return (retval>>1) & 0x1ffff;
#else
return retval & 0x20000 ? 0xffff : retval>>1 & 0xffff;
#endif
}
 
static void mdio_write(long ioaddr, int phy_id, int location, int value)
{
int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value;
long mdio_addr = ioaddr + Wn4_PhysicalMgmt;
int i;
 
if (mii_preamble_required)
mdio_sync(ioaddr, 32);
 
/* Shift the command bits out. */
for (i = 31; i >= 0; i--) {
int dataval = (write_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
outw(dataval, mdio_addr);
mdio_delay();
outw(dataval | MDIO_SHIFT_CLK, mdio_addr);
mdio_delay();
}
/* Leave the interface idle. */
for (i = 1; i >= 0; i--) {
outw(MDIO_ENB_IN, mdio_addr);
mdio_delay();
outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
mdio_delay();
}
 
return;
}
 
/* ACPI: Advanced Configuration and Power Interface. */
/* Set Wake-On-LAN mode and put the board into D3 (power-down) state. */
static void acpi_set_WOL(struct device *dev)
{
struct vortex_private *vp = (struct vortex_private *)dev->priv;
long ioaddr = dev->base_addr;
 
/* Power up on: 1==Downloaded Filter, 2==Magic Packets, 4==Link Status. */
EL3WINDOW(7);
outw(2, ioaddr + 0x0c);
/* The RxFilter must accept the WOL frames. */
outw(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD);
outw(RxEnable, ioaddr + EL3_CMD);
/* Change the power state to D3; RxEnable doesn't take effect. */
pci20to26_write_config_word(vp->pci_bus, vp->pci_devfn, 0xe0, 0x8103);
}
 
/* Change from D3 (sleep) to D0 (active).
Problem: The Cyclone forgets all PCI config info during the transition! */
static void acpi_wake(int bus, int devfn)
{
u32 base0, base1, romaddr;
u16 pci_command, pwr_command;
u8 pci_latency, pci_cacheline, irq;
 
pci20to26_read_config_word(bus, devfn, 0xe0, &pwr_command);
if ((pwr_command & 3) == 0)
return;
pci20to26_read_config_word( bus, devfn, PCI_COMMAND, &pci_command);
pci20to26_read_config_dword(bus, devfn, PCI_BASE_ADDRESS_0, &base0);
pci20to26_read_config_dword(bus, devfn, PCI_BASE_ADDRESS_1, &base1);
pci20to26_read_config_dword(bus, devfn, PCI_ROM_ADDRESS, &romaddr);
pci20to26_read_config_byte( bus, devfn, PCI_LATENCY_TIMER, &pci_latency);
pci20to26_read_config_byte( bus, devfn, PCI_CACHE_LINE_SIZE, &pci_cacheline);
pci20to26_read_config_byte( bus, devfn, PCI_INTERRUPT_LINE, &irq);
 
pci20to26_write_config_word( bus, devfn, 0xe0, 0x0000);
pci20to26_write_config_dword(bus, devfn, PCI_BASE_ADDRESS_0, base0);
pci20to26_write_config_dword(bus, devfn, PCI_BASE_ADDRESS_1, base1);
pci20to26_write_config_dword(bus, devfn, PCI_ROM_ADDRESS, romaddr);
pci20to26_write_config_byte( bus, devfn, PCI_INTERRUPT_LINE, irq);
pci20to26_write_config_byte( bus, devfn, PCI_LATENCY_TIMER, pci_latency);
pci20to26_write_config_byte( bus, devfn, PCI_CACHE_LINE_SIZE, pci_cacheline);
pci20to26_write_config_word( bus, devfn, PCI_COMMAND, pci_command | 5);
}
 
#ifdef MODULE
void cleanup_module(void)
{
struct device *next_dev;
 
/* No need to check MOD_IN_USE, as sys_delete_module() checks. */
while (root_vortex_dev) {
struct vortex_private *vp=(void *)(root_vortex_dev->priv);
int drv_flags = pci_tbl[vp->chip_id].drv_flags;
next_dev = vp->next_module;
unregister_netdev(root_vortex_dev);
outw((drv_flags & EEPROM_NORESET) ? (TotalReset|0x10) : TotalReset,
root_vortex_dev->base_addr + EL3_CMD);
release_region(root_vortex_dev->base_addr,
pci_tbl[vp->chip_id].io_size);
kfree(root_vortex_dev);
kfree(vp->priv_addr);
root_vortex_dev = next_dev;
}
}
 
#endif /* MODULE */
/*
* Local variables:
* c-indent-level: 4
* c-basic-offset: 4
* tab-width: 4
* End:
*/
/shark/trunk/drivers/net/skbuff.c
0,0 → 1,113
/* Emulates Linux sk buffers using hartik net buffers... */
 
#include <kernel/kern.h>
#include <semaphore.h>
 
#include "../net/netbuff.h"
#include "../net/eth_priv.h"
 
#include"linux/skbuff.h"
 
#define LOWLEV_TX_BUFFERS 50
#define LOWLEV_RX_BUFFERS 50
struct netbuff rxbuff;
struct netbuff lowlevel_txbuff;
 
void skb_init(void)
{
netbuff_init(&lowlevel_txbuff, LOWLEV_TX_BUFFERS, ETH_MAX_LEN);
netbuff_init(&rxbuff, LOWLEV_RX_BUFFERS, ETH_MAX_LEN);
}
 
struct sk_buff *dev_alloc_skb(unsigned int len)
{
struct sk_buff *skb;
 
kern_cli();
skb = kern_alloc(sizeof(struct sk_buff));
kern_sti();
skb->data = netbuff_get(&rxbuff, NON_BLOCK);
if (skb->data == NULL) {
return NULL;
}
skb->head = skb->data;
skb->tail = skb->data;
skb->len = 0;
skb->truesize = len;
 
return skb;
}
 
void skb_reserve(struct sk_buff *skb, int len)
{
/* changed by PJ ... before it did nothing... */
skb->data+=len;
skb->tail+=len;
}
 
unsigned char *skb_put(struct sk_buff *skb, int len)
{
unsigned char *tmp=skb->tail;
skb->tail+=len;
skb->len+=len;
return tmp;
}
 
void skb_queue_head_init(struct sk_buff_head *list)
{
list->prev = (struct sk_buff *)list;
list->next = (struct sk_buff *)list;
list->qlen = 0;
}
 
struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, int newheadroom)
{
struct sk_buff *n;
unsigned long offset;
int headroom = skb_headroom(skb);
 
/* Allocate the copy buffer */
 
n = alloc_skb(skb->truesize + newheadroom-headroom, GFP_ATOMIC);
if (n == NULL)
return NULL;
 
skb_reserve(n, newheadroom);
 
/*
* Shift between the two data areas in bytes
*/
 
offset = n->data - skb->data;
 
/* Set the tail pointer and length */
skb_put(n, skb->len);
 
/* Copy the bytes */
memcpy(n->data, skb->data, skb->len);
n->list = NULL;
// n->sk = NULL;
// n->priority = skb->priority;
n->protocol = skb->protocol;
n->dev = skb->dev;
// n->dst = dst_clone(skb->dst);
// n->h.raw = skb->h.raw+offset;
// n->nh.raw = skb->nh.raw+offset;
// n->mac.raw = skb->mac.raw+offset;
memcpy(n->cb, skb->cb, sizeof(skb->cb));
n->used = skb->used;
// n->is_clone = 0;
// atomic_set(&n->users, 1);
// n->pkt_type = skb->pkt_type;
// n->stamp = skb->stamp;
// n->destructor = NULL;
// n->security = skb->security;
#ifdef CONFIG_IP_FIREWALL
n->fwmark = skb->fwmark;
#endif
 
return n;
}
/shark/trunk/drivers/net/eth.c
0,0 → 1,565
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: eth.c,v 1.9 2004-05-11 14:30:49 giacomo Exp $
 
File: $File$
Revision: $Revision: 1.9 $
Last update: $Date: 2004-05-11 14:30:49 $
------------
**/
 
/*
* Copyright (C) 2000 Luca Abeni
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
/* Author: Luca Abeni */
/* Date: 2/12/1997 */
 
/* File: eth.C */
/* Revision: 2.0 */
 
/*
Ethernet layer: it is an inetrface between the low level driver (
3Com 3c59xx, ...) and the high level protocols (IP, ARP,...).
*/
 
/* only 4 debug... */
int netlev;
 
#include <kernel/kern.h>
#include <modules/hartport.h>
 
#include <drivers/pci.h>
 
#include "eth_priv.h"
#include "netbuff.h"
 
#include <linux/netdevice.h>
/*#include "lowlev.h"
//#include "3com.h" */
 
//#define DEBUG_ETH
 
#define ETH_PAGE 5
 
struct eth_service{
WORD type;
void (*rfun)(void *pkt);
} ETH_SERVICE;
 
int definedprotocols;
struct eth_service eth_table[ETH_MAX_PROTOCOLS];
 
#define ETH_RX_BUFFERS 4
#define ETH_TX_BUFFERS 4
 
#ifndef ETH0_ADDR
# define ETH0_ADDR 0
#endif
#ifndef ETH0_IRQ
# define ETH0_IRQ 0
#endif
 
#define NONE 0
 
/*extern void net_handler(void);
//extern PID net_extern_driver(void);*/
 
PID nettask_pid = NIL;
static PORT NetRxPort;
 
/* void (*vortex_send)(DWORD BaseAddress, DWORD *txbuff, int len); */
 
int ethIsInstalled = FALSE;
 
/* device descriptor */
struct eth_device eth_dev;
struct pci_des pci_devs[5];
/* This is the Linux one!!! */
static struct device device0 = {
"eth0", 0, 0, 0, 0, ETH0_ADDR, ETH0_IRQ, 0, 0, 0, NULL, NULL};
struct device *dev_base = &device0;
 
/* received frames buffers */
extern struct netbuff rxbuff; /* from skbuff.c */
 
/* buffers for the frames to send */
/* struct netbuff txbuff; */
 
/* Called if an unknown frames arrives */
void eth_nullfun(void *pkt)
{
kern_raise(ETH_NULLPROTOCOL_EXC,NIL);
}
 
void dev_tint(struct device *dev)
{
printk(KERN_WARNING "Warning! dev_tint called. (Why?)\n");
sys_abort(201);
}
 
/*
-----------------------------------------------------------------------
The extern process calls this function when a frame arrives
-----------------------------------------------------------------------
*/
 
void netif_rx(struct sk_buff *skb)
{
// cprintf("DENTRO netif_rx, skbuf=%p\n",skb->data);
if (nettask_pid == NIL) {
printk(KERN_CRIT "Net receives packets, but the driver doesn't exist.\n");
sys_abort(300);
}
 
port_send(NetRxPort,skb,NON_BLOCK);
// task_activate(nettask_pid);
}
 
TASK net_extern_driver(void)
{
static PORT NetPort;
struct sk_buff skb;
void *pkt;
int len;
BYTE count;
int i;
 
NetPort = port_connect("NetPort", sizeof(struct sk_buff), STREAM, READ);
while (1) {
/* debug... */
netlev = 1;
 
port_receive(NetPort,&skb,BLOCK);
pkt = skb.data;
len = skb.len;
 
((struct eth_header *)pkt)->type = ntohs(((struct eth_header *)pkt)->type);
count = 0;
/* Search for the frame protocol...*/
for (i = 0; i < definedprotocols; i++) {
/* debug... */
netlev = 10 + i;
 
if (eth_table[i].type == (((struct eth_header *)pkt)->type)) {
count++;
/*...and call the protocol CallBack!!! */
eth_table[i].rfun(pkt);
}
}
 
/* debug... */
netlev = 20;
 
// cprintf("ETH: releasing %p\n", pkt);
 
// NOTE changed by PJ because skb.data not always point to the
// buffer start!!!... it is skb.head that always points there!
netbuff_release(&rxbuff, skb.head);
 
/* debug... */
netlev = 30;
}
}
 
/*
--------------------
Interface functions
--------------------
*/
/* formatted print of an ethernet header */
void eth_printHeader(struct eth_header *p)
{
cprintf("Dest : %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x \n",p->dest.ad[0],
p->dest.ad[1],
p->dest.ad[2],
p->dest.ad[3],
p->dest.ad[4],
p->dest.ad[5]);
cprintf("Source : %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x \n",p->source.ad[0],
p->source.ad[1],
p->source.ad[2],
p->source.ad[3],
p->source.ad[4],
p->source.ad[5]);
cprintf("Type : %x\n",p->type);
}
 
void eth_showinfo(struct eth_device *d)
{
cprintf("IntLine : %d\n",d->IntLine);
cprintf("BaseAddress : %lx\n",d->BaseAddress);
cprintf("Address : %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
d->addr.ad[0],d->addr.ad[1],d->addr.ad[2],
d->addr.ad[3],d->addr.ad[4],d->addr.ad[5]);
}
 
/* formatted print of an ethernet frame*/
void eth_printPkt(char *pkt,int len)
{
int i,j,offset;
eth_printHeader((struct eth_header *)pkt);
offset = sizeof(struct eth_header);
len -= offset;
for (i = 0; i < len; i += 10) {
for (j = 0; j < 10; j++)
cprintf("%2.2x ", pkt[offset+i+j]);
for (j = 0; j < 10; j++)
cputc(pkt[offset+i+j]);
cprintf("\n");
}
cprintf("\n");
}
 
void eth_copy_and_sum(struct sk_buff *dest, unsigned char *src, int length, int base)
{
memcpy(dest->data, src, length);
}
 
#if 0
/*-------------------- User Interface -----------------------------*/
unsigned short htons(unsigned short host)
{
return ((host & 0xff00) >> 8) + ((host & 0x00ff) << 8);
}
 
unsigned short ntohs(unsigned short host)
{
return ((host & 0xff00) >> 8) + ((host & 0x00ff) << 8);
}
#endif
 
/*
Translate an ethernet address from a text string to an eth_addr
structure
*/
void eth_str2Addr(char *add, struct eth_addr *ds)
{
int ad[6];
int i,j;
char c;
 
i = 0;
for(j = 0; j < 6; j++) {
ad[j] = 0;
while((add[i] != ':') && (add[i] != 0)) {
c = add[i++];
if (c <= '9') c = c - '0';
else c = c - 'A' + 10;
ad[j] = ad[j] * 16 + c;
}
i++;
}
for (i=0; i<6; i++) ds->ad[i] = ad[i];
}
 
/* Set a higher level protocol's CallBack */
int eth_setProtocol(WORD type, void (*recv)(void *pkt))
{
int i;
 
if (definedprotocols == ETH_MAX_PROTOCOLS) return FALSE;
for(i = 0; i < definedprotocols; i++) {
if (eth_table[i].type == type) return FALSE;
}
eth_table[definedprotocols].type = type;
eth_table[definedprotocols++].rfun = recv;
 
return TRUE;
}
 
/* Fill an ethernet frame's header and return a pointer to the frame's body */
void *eth_setHeader(void *b,struct eth_addr dest, WORD type)
{
setEthAddr(((struct eth_header *)b)->dest,dest);
setEthAddr(((struct eth_header *)b)->source,eth_dev.addr);
/* the type field is in big-endian format */
((struct eth_header *) b)->type = htons(type);
 
return((BYTE *)b + sizeof(struct eth_header));
}
 
/* getFirstDataByte : Return a pointer to the body of an ethernet frame */
void *eth_getFDB(void *p)
{
return ((void *)((BYTE *)p + sizeof(struct eth_header)));
}
 
/* eth_getAddress : return the local ethernet address */
void eth_getAddress(struct eth_addr *eth)
{
memcpy(eth->ad,&(device0.dev_addr),sizeof(struct eth_addr));
}
 
/* Send an ethernet frame */
int eth_sendPkt(void *p, int len)
{
int i;
int l;
struct sk_buff buff;
 
l = len + sizeof(struct eth_header);
if (l < 60) {
for (i = l; i <= 60; i++) *((BYTE *)p + i) = 0;
l = 60;
}
buff.len = l;
buff.data = p;
device0.hard_start_xmit(&buff, &device0);
/* lowlev_send(eth_dev.BaseAddress, p, l); */
 
return TRUE;
}
 
int eth_exc(int err)
{
int p;
 
if (err != ETH_BUFFERS_FULL) printk(KERN_ERR "Ethernet : ");
switch (err) {
case ETH_DRIVER_NOT_FOUND :
printk(KERN_ERR "NET PANIC --> Etherlink not found.\n");
return 0;
case ETH_RXERROR :
printk(KERN_ERR "Receive error.\n");
return 0;
case ETH_TXERROR :
printk(KERN_ERR "Transimit error: N. Max Retry.\n");
return 0;
case ETH_PROTOCOL_ERROR :
printk(KERN_ERR "Too much protocols.\n");
return 0;
case ETH_BUFFERS_FULL:
printk(KERN_ERR "Buffers full: frame lost!\n");
return 1;
case ETH_NULLPROTOCOL_EXC:
printk(KERN_ERR "Null protocol called!\n");
for (p = 0; p < ETH_MAX_PROTOCOLS; p++) {
printk(KERN_ERR "%d: %d\n", p, eth_table[p].type);
}
return 0;
default :
return 1;
}
}
 
void skb_init(void);
void linuxpci_init(void);
int rtl8139_probe(struct device *dev);
int tc59x_probe(struct device *dev);
int eepro100_probe(struct device *dev);
int el3_probe(struct device *dev);
int ne_probe(struct device *dev);
 
int eth_init(int mode, TASK_MODEL *m)
{
SOFT_TASK_MODEL m_soft;
int i;
#if 0
ndev;
WORD Class;
struct pci_regs *r;
PID p;
 
#endif
 
BYTE cardtype;
int linux_found = 0;
 
if (!ethIsInstalled) {
/* Scan the devices connected to the PCI bus */
cardtype = NONE;
 
skb_init();
NetRxPort = port_create("NetPort",sizeof(struct sk_buff),50,STREAM,WRITE);
if (!m) {
soft_task_default_model(m_soft);
soft_task_def_wcet(m_soft, 1000);
soft_task_def_period(m_soft,20000);
soft_task_def_met(m_soft, 1000);
soft_task_def_aperiodic(m_soft);
soft_task_def_system(m_soft);
soft_task_def_nokill(m_soft);
m = (TASK_MODEL *)&m_soft;
}
 
nettask_pid = task_create("rxProc", net_extern_driver, m, NULL);
if (nettask_pid == NIL) {
printk(KERN_ERR "Can't create extern driver.\n");
return 0;
}
task_activate(nettask_pid);
 
if (1) { //!!!if (pci_init() == 1) {
linuxpci_init();
#ifdef DEBUG_ETH
printk(KERN_DEBUG "LF %d\n", linux_found);
#endif
linux_found += (rtl8139_probe(&device0) == 0);
#ifdef DEBUG_ETH
printk(KERN_DEBUG "LF %d\n", linux_found);
#endif
linux_found += (tc59x_probe(&device0) == 0);
#ifdef DEBUG_ETH
printk(KERN_DEBUG "LF %d\n", linux_found);
#endif
linux_found += (eepro100_probe(&device0) == 0);
#ifdef DEBUG_ETH
printk(KERN_DEBUG "LF %d\n", linux_found);
#endif
 
 
#if 0
ndev = pci_scan_bus(pci_devs);
#ifdef __ETH_DBG__
pci_show(pci_devs, ndev);
#endif
for (i = 0; i < ndev; i++) {
r = (struct pci_regs *) pci_devs[i].mem;
Class = r->ClassCode;
/* Is there a network card? */
if (Class == 0x0200) {
if (cardtype == NONE) {
cardtype = UNKNOWN;
}
/* is it a 3COM card? */
if (r->VendorId == 0x10b7) {
/* YES!!!!!! */
lowlev_info = vortex_info;
lowlev_readregs = vortex_readregs;
lowlev_open = vortex_open;
lowlev_close = vortex_close;
if (mode == TXTASK) {
lowlev_send = vortex_send_msg;
} else {
lowlev_send = vortex_send_mem;
}
printk(KERN_INFO "PCI Ethlink card found:\n");
lowlev_info(r);
cardtype = VORTEX;
}
}
}
}
if ((cardtype == NONE) || (cardtype == UNKNOWN)) {
exc_raise(ETH_DRIVER_NOT_FOUND);
}
/*PUT HERE THE PFUN INIT!!!*/
if (cardtype == VORTEX) {
}
/*
Use it if you want to see the value of the internal
registers of the card
*/
/*vortex_readregs(eth_dev.BaseAddress);*/
 
p = lowlev_open(eth_dev.BaseAddress, mode);
/* Set the Fast Handler and the external process */
handler_set(eth_dev.IntLine, net_handler, p);
#else
}
if (linux_found == 0) {
linux_found += (el3_probe(&device0) == 0);
#ifdef DEBUG_ETH
printk(KERN_DEBUG "LF %d\n", linux_found);
#endif
linux_found += (ne_probe(&device0) == 0);
#ifdef DEBUG_ETH
printk(KERN_DEBUG "LF %d\n", linux_found);
#endif
linux_found += (NS8390_init(&device0, 1) == 0);
#ifdef DEBUG_ETH
printk(KERN_DEBUG "LF %d\n", linux_found);
#endif
}
 
/*
if (mode & LOOPBACK) {
cprintf("Installing loopback device (forced)\n");
loopback_init(&device0);
}
*/
if (linux_found) {
device0.open(&device0);
printk(KERN_INFO "Network card found.\n");
} else {
printk(KERN_INFO "No network card found.\n");
/* cprintf("No network card found. Installing loopback device.\n");
loopback_init(&device0);
device0.open(&device0);*/
return 0;
}
#endif
 
//netbuff_init(&rxbuff, ETH_RX_BUFFERS, ETH_MAX_LEN);
//netbuff_init(&txbuff, ETH_TX_BUFFERS, ETH_MAX_LEN);
 
definedprotocols = 0;
for (i = 0; i < ETH_MAX_PROTOCOLS; i++) {
eth_table[i].type = 0;
eth_table[i].rfun = eth_nullfun;
}
ethIsInstalled = TRUE;
/* Don't crash the system at the exit, please :) */
sys_atrunlevel(eth_close,NULL,RUNLEVEL_BEFORE_EXIT);
 
} else {
printk(KERN_INFO "Ethernet already installed.\n");
return 0;
}
return 1;
}
 
void eth_close(void *a)
{
#ifdef DEBUG_ETH
printk(KERN_DEBUG "Network Closing.\n");
#endif
if (ethIsInstalled == TRUE) {
device0.stop(&device0);
/* This seems to break everithing... */
// lowlev_close(eth_dev.BaseAddress);
ethIsInstalled = FALSE;
}
#ifdef DEBUG_ETH
printk(KERN_DEBUG "Network Closed.\n");
#endif
}
/shark/trunk/drivers/net/udpip.c
0,0 → 1,568
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: udpip.c,v 1.5 2004-05-11 14:30:51 giacomo Exp $
 
File: $File$
Revision: $Revision: 1.5 $
Last update: $Date: 2004-05-11 14:30:51 $
------------
**/
 
/*
* Copyright (C) 2000 Luca Abeni and Giuseppe Lipari
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
/* Author: Giuseppe Lipari & Luca Abeni */
/* Date: 1/12/97 */
 
/* File: UDPIPDrv.C */
/* Revision: 2.0 */
 
/*
UDP and IP layers. IP doesn't support fragmented packets (fragmentation
isn't Real-Time!!!). UDP allows to fill some entries of the ARP table
at the moment of the creation of a socket (see udp_bind).
*/
 
#include <kernel/kern.h>
#include <semaphore.h>
 
#include "eth_priv.h"
#include "netbuff.h"
#include <drivers/udpip.h>
#include "arp.h"
 
//#define __UDP_DBG__
//#define __IP_DBG__
#define UDP_INFO "[UDP] "
#define IP_INFO "[IP] "
 
/*
UDP-buffers number and dimension (UDP-buffers are provided by NetBuff
module
*/
#define UDP_RX_BUFFERS 4
#define UDP_TX_BUFFERS 4
#define UDP_MAX_LEN 1000
#define UDP_MAX_HANDLES 4
 
/* Only 4 debug */
extern int netlev;
 
int ipIsInstalled = FALSE;
extern struct ip_addr myIpAddr;
struct ip_table ipTable[IP_MAX_ENTRIES];
extern ARP_TABLE arpTable[ARP_MAX_ENTRIES];
IP_ADDR IPbroadcastaddress;
 
int udpIsInstalled = FALSE;
struct netbuff udp_txbuff;
struct netbuff udp_rxbuff;
struct udp_table udpTable[UDP_MAX_HANDLES];
struct eth_addr broadcast;
 
IP_ADDR *ip_getAddr()
{
return &myIpAddr;
}
 
/* TRUE if the IP addresses ip1 and ip2 are equal, FALSE otherwise */
int ip_compAddr(IP_ADDR ip1, IP_ADDR ip2)
{
int i;
for (i=0; i < 4; i++)
if (ip1.ad[i] != ip2.ad[i]) return FALSE;
 
return TRUE;
}
 
/* traslate an IP address from text string to 4 bytes */
int ip_str2addr(char *a, IP_ADDR *ip)
{
int ad[6];
int i,j;
char c;
 
i = 0;
for(j = 0; j < 4; j++) {
ad[j] = 0;
while((a[i] != '.') && (a[i] != 0)) {
c = a[i++];
if (c <= '9') c = c - '0';
else c = c - 'A' + 10;
ad[j] = ad[j] * 10 + c;
}
i++;
}
 
for (i=0; i<4; i++)
ip->ad[i] = ad[i];
 
return 1;
}
 
/* give the body of an IP packet */
void *ip_getFDB(void *pkt)
{
return((void *)(((BYTE *)eth_getFDB(pkt)) + sizeof(IP_HEADER)));
}
 
/* received IP packet CallBack */
void ip_server_recv(void *pkt)
{
IP_HEADER *iphd;
WORD *ptcheck;
WORD checksum,oldChk;
int hlen,i;
int done;
char dummystr[50];
BYTE flag;
 
UDP_HEADER *udphd;
struct pseudo_hd ph;
WORD sum, old;
WORD *p;
UDP_MSG usermsg;
void *b, *s;
 
/* debug...*/
netlev = 2;
 
sprintf(dummystr, "Packet arrived!\n");
iphd = (IP_HEADER *)eth_getFDB(pkt);
 
/* compute the checksum */
ptcheck = (WORD *)iphd;
#ifdef __IP_DBG__
//!!!ip_print_header(iphd);
#endif
hlen = getHlen(iphd->vers_hlen) * 2;
checksum = oldChk = *ptcheck;
ptcheck++;
for (i=1; i<hlen; i++) {
checksum += *ptcheck;
if (checksum < oldChk) checksum ++;
oldChk = checksum;
ptcheck++;
}
if (checksum != 0xffff) {
/* wrong ChkSum */
#ifdef __IP_DBG__
printk(KERN_DEBUG IP_INFO "Wrong checksum: %x.\n", checksum);
#endif
} else if (!ip_compAddr(iphd->dest,myIpAddr)) {
#ifdef __IP_DBG__
printk(KERN_DEBUG IP_INFO "Packet not addressed to this host.\n");
#endif
} else if (getFlags(ntohs(iphd->flags_frOffset)) & 0x01) {
#ifdef __IP_DBG__
/* fragment? */
printk(KERN_DEBUG IP_INFO "Gotta a fragment!\n");
#endif
} else if (getFrOffset(ntohs(iphd->flags_frOffset)) != 0) {
#ifdef __IP_DBG__
printk(KERN_DEBUG IP_INFO "Gotta a fragment again!\n");
#endif
} else {
/* OK: the packet is good... */
if (iphd->protocol != IP_UDP_TYPE) {
/* It isn't an UDP packet */
done = FALSE;
i = 0;
while ((i < IP_MAX_ENTRIES) && !done) {
if (ipTable[i].protocol == iphd->protocol) done = TRUE;
else i++;
}
if (!done) {
#ifdef __IP_DBG__
/* Unknown transport protocol */
printk(KERN_DEBUG IP_INFO "Wrong protocol.\n");
#endif
} else {
/* Call the correct transport protocol CallBack */
ipTable[i].rfun((void *)((BYTE *)iphd + 4*getHlen(iphd->vers_hlen)));
}
} else {
/* UDP packet */
netlev = 4;
 
udphd = (UDP_HEADER *)(void *)((BYTE *)iphd + 4*getHlen(iphd->vers_hlen));
/* compute the UDP checksum */
for (i = 0; i < 4; i++) {
ph.source.ad[i] = iphd->source.ad[i];
ph.dest.ad[i] = iphd->dest.ad[i];
}
ph.zero = 0;
ph.protocoll = IP_UDP_TYPE;
ph.len = udphd->mlen;
sum = 0;
old = 0;
p = (WORD *)&ph;
for (i = 0; i < (sizeof(ph) >> 1); i++) {
sum += p[i];
if (sum < old) sum ++;
old = sum;
}
#ifdef __UDP_DBG__
printk(KERN_DEBUG UDP_INFO "Half sum: %x\n",sum);
#endif
p = (WORD *)udphd;
((BYTE *)udphd)[ntohs(udphd->mlen)] = 0;
for (i = 0; i < ((ntohs(udphd->mlen) + 1) >> 1); i++) {
sum += p[i];
if (sum < old) sum ++;
old = sum;
}
#ifdef __UDP_DBG__
printk(KERN_DEBUG UDP_INFO "Packet received.\n");
printk(KERN_DEBUG UDP_INFO "s_port: %x\n",udphd->s_port);
printk(KERN_DEBUG UDP_INFO "d_port: %x\n",udphd->d_port);
printk(KERN_DEBUG UDP_INFO "m_len: %x %d\n",udphd->mlen, ntohs(udphd->mlen));
printk(KERN_DEBUG UDP_INFO "checksum: %x\n",udphd->checksum);
printk(KERN_DEBUG UDP_INFO "mysum: %x \n", sum);
#endif
if(sum != 0xFFFF) {
/* Wrong UDP ChkSum */
cprintf("Error %x!\n", sum);
kern_raise(XUDP_BADCHK_EXC,exec_shadow);
} else {
done = FALSE;
i = 0;
/* searching for the destination socket...*/
while((i < UDP_MAX_HANDLES) && !done) {
if ((udpTable[i].valid == TRUE) && (udpTable[i].port == ntohs(udphd->d_port)))
done = TRUE;
else i++;
}
if (done) {
/*...found! */
s = (void *)(((BYTE *)udphd) + sizeof(UDP_HEADER));
if (udpTable[i].notify == TRUE) {
/* notify function associated to the socket: call it */
udpTable[i].notify_fun(ntohs(udphd->mlen) - sizeof(UDP_HEADER),
s, udpTable[i].notify_par);
} else {
/* otherwise, send the packet to the correct port */
if((b = netbuff_get(&udp_rxbuff, NON_BLOCK)) != 0) {
memcpy(b,s,ntohs(udphd->mlen) - sizeof(UDP_HEADER) + 1);
setIpAddr(usermsg.addr.s_addr, iphd->source);
usermsg.addr.s_port = ntohs(udphd->s_port);
usermsg.mlen = ntohs(udphd->mlen) - sizeof(UDP_HEADER);
usermsg.buff = b;
flag = port_send(udpTable[i].hport,&usermsg,NON_BLOCK);
if (!flag) {
netbuff_release(&udp_rxbuff, b);
#ifdef __UDP_DBG__
printk(KERN_DEBUG "Port is filled up.\n");
#endif
}
}
}
} else {
#ifdef __UDP_DBG__
printk(KERN_DEBUG UDP_INFO "Port not found.\n");
#endif
}
}
}
}
}
 
/* Send an IP packet */
void ip_send(IP_ADDR dest, void *pkt, WORD len)
{
static WORD ip_ident = 0;
IP_HEADER *iphd;
WORD check = 0, oldCheck = 0;
int i, done;
WORD *pt;
 
iphd = (IP_HEADER *)eth_getFDB(pkt);
iphd->vers_hlen = (4 << 4) + 5;
iphd->servType = 8;
iphd->lenght = htons(len + sizeof(IP_HEADER));
iphd->ident = htons(ip_ident++);
iphd->flags_frOffset = 0;
iphd->ttl = 10;
iphd->protocol = IP_UDP_TYPE;
iphd->headChecksum = 0;
iphd->source = myIpAddr;
iphd->dest = dest;
 
/* Compute the checksum */
pt = (WORD *)iphd;
check = oldCheck = *pt;
pt++;
for (i = 1; i < 10; i++) {
check += *pt;
if (oldCheck > check) check++;
oldCheck = check;
pt++;
}
check = ~check;
iphd->headChecksum = check;
#ifdef __IP_DBG__
printk(KERN_DEBUG IP_INFO "Serv type : %d\n", iphd->servType);
#endif
/* Is the destination IP address the broadcast address?*/
if (ip_compAddr(dest,IPbroadcastaddress)) {
/* Send the packet*/
eth_setHeader(pkt,broadcast,ETH_IP_TYPE);
eth_sendPkt(pkt,len + sizeof(IP_HEADER));
netbuff_release(&udp_txbuff, (void *)pkt);
} else {
/* Is the destination ethernet address in the ARP table? */
i = 0; done = 0;
while (i < ARP_MAX_ENTRIES && !done)
if (arpTable[i].valid == TRUE) {
if (ip_compAddr(dest,arpTable[i].ip)) {
done = TRUE;
} else i++;
} else i++;
if (done == FALSE) {
/* No: call ARP to get the ethernet address */
arp_send(pkt, dest, len + sizeof(IP_HEADER));
} else {
/* Yes: directly send the packet */
eth_setHeader(pkt,arpTable[i].eth,ETH_IP_TYPE);
eth_sendPkt(pkt, len + sizeof(IP_HEADER));
netbuff_release(&udp_txbuff, (void *)pkt);
arpTable[i].used++;
if (arpTable[i].used > ARP_MAX_USED) arpTable[i].used = ARP_MAX_USED;
}
}
}
 
/* let IP manage a new transport protocol */
int ip_setProtocol(BYTE proto, void (*recv)(void *m))
{
BYTE done, i;
 
i = 0; done = 0;
while (i < IP_MAX_ENTRIES && !done)
if (ipTable[i].rfun == NULL) done = TRUE;
else i++;
if (!done)
return FALSE;
else {
ipTable[i].protocol = proto;
ipTable[i].rfun = recv;
}
return TRUE;
}
 
/*int ip_error(int code)
{
cprintf("IP error\n");
cprintf("Code [%d]\nCause : %s",code,ip_error_msg[code-IP_ERROR_BASE]);
return(0);
}*/
 
/* Initialize the IP layer: it also call the ARP initialization function, pass a struct ip_params* */
void ip_init(void *p)
{
int i;
 
if (!ipIsInstalled) {
arp_init(((struct ip_params*)p)->localAddr);
//exc_set(IP_INIT_ERROR,ip_error);
for (i=0; i < IP_MAX_ENTRIES; i++)
ipTable[i].rfun = NULL;
 
eth_setProtocol(ETH_IP_TYPE, ip_server_recv);
ip_str2addr(((struct ip_params*)p)->broadcastAddr,&IPbroadcastaddress);
ipIsInstalled = TRUE;
eth_str2Addr("FF:FF:FF:FF:FF:FF",&broadcast);
} else
cprintf("IP: already installed.\n");
}
 
/* Receive an UDP packet from a socket */
int udp_recvfrom(int s, void *buff, UDP_ADDR *from)
{
UDP_MSG u;
 
port_receive(udpTable[s].pport,&u,BLOCK);
memcpy(buff,u.buff,u.mlen);
netbuff_release(&udp_rxbuff, u.buff);
*from = u.addr;
 
return (u.mlen);
}
 
/* Associate a notify function to a socket */
int udp_notify(int s, int (*f)(int len, BYTE *buff, void *p), void *p)
{
if (f == NULL) {
udpTable[s].notify = FALSE;
return 1;
}
if (udpTable[s].valid != TRUE)
return -1;
udpTable[s].notify = TRUE;
udpTable[s].notify_fun = f;
udpTable[s].notify_par = p;
 
return 1;
}
 
/* Create a new socket binding it to a specified IP port */
int udp_bind(UDP_ADDR *local, IP_ADDR *bindlist)
{
int i, j;
BYTE done;
char str[30];
 
/* Search for a free entry in the socket table */
i = 0; done = FALSE;
while ((i < UDP_MAX_HANDLES) && !done) {
kern_cli();
if ((udpTable[i].valid == FALSE)) {
done = TRUE;
udpTable[i].valid = 2;
} else i++;
kern_sti();
}
 
/* No free entries: bind fail! */
if (!done)
return -1;
 
/* Create a receive port for the socket */
udpTable[i].port = local->s_port;
sprintf(str,"UDP%d",i);
udpTable[i].hport = port_create(str,sizeof(UDP_MSG),4,STREAM,WRITE);
udpTable[i].pport = port_connect(str,sizeof(UDP_MSG),STREAM,READ);
udpTable[i].valid = TRUE;
 
/*
* Request for the ethernet addresses associated to the IP addressed
* given in the bindlist.
*/
if (bindlist != NULL) {
while (*(int*)bindlist != 0) {
/* Ignore broadcast IP address */
if (!ip_compAddr(*bindlist,IPbroadcastaddress)) {
j = arp_req(*bindlist);
arp_sendRequest(j);
}
bindlist ++;
}
}
 
return i;
}
 
/* Send an UDP packet */
int udp_sendto(int s, void *buff, int nbytes, UDP_ADDR *to)
{
void *pkt;
UDP_HEADER *udphd;
char *msg;
 
#ifdef __UDP_DBG__
static int num_pack = 0;
#endif
 
WORD sum, old;
int i;
struct pseudo_hd ph;
WORD *p;
IP_ADDR *source;
 
pkt = netbuff_get(&udp_txbuff, BLOCK);
udphd = (UDP_HEADER *)ip_getFDB(pkt);
udphd->s_port = htons(udpTable[s].port);
udphd->d_port = htons(to->s_port);
udphd->mlen = htons((WORD)nbytes + sizeof(UDP_HEADER));
msg = (char *)(((BYTE *)udphd) + sizeof(UDP_HEADER));
if (nbytes > UDP_MAX_LEN) nbytes = UDP_MAX_LEN;
memcpy(msg,buff,nbytes);
 
source = ip_getAddr();
/* Compute the CheckSum */
udphd->checksum = 0;
for (i = 0; i < 4; i++) {
ph.source.ad[i] = source->ad[i];
ph.dest.ad[i] = to->s_addr.ad[i];
}
ph.zero = 0;
ph.protocoll = 17;
ph.len = udphd->mlen;
sum = 0; old = 0;
p = (WORD *)&ph;
for (i = 0; i < (sizeof(ph) >> 1); i++) {
sum += p[i];
if (sum < old) sum ++;
old = sum;
}
p = (WORD *)udphd;
((BYTE *)udphd)[ntohs(udphd->mlen)] = 0;
for (i = 0; i < ((ntohs(udphd->mlen) + 1) >> 1); i++) {
sum += p[i];
if (sum < old) sum++;
old = sum;
}
udphd->checksum = ~sum;
ip_send(to->s_addr, pkt, ((WORD)nbytes + sizeof(UDP_HEADER)));
#ifdef __UDP_DBG__
printk(KERN_DEBUG UDP_INFO "Packets sent: %d.\n", num_pack++);
#endif
return nbytes;
}
 
void udp_init(void *dummy)
{
int i;
 
if (!udpIsInstalled) {
netbuff_init(&udp_rxbuff, UDP_RX_BUFFERS, UDP_MAX_LEN);
netbuff_init(&udp_txbuff, UDP_TX_BUFFERS, ETH_MAX_LEN);
 
for (i = 0; i < UDP_MAX_HANDLES; i++) {
udpTable[i].valid = FALSE;
udpTable[i].notify = FALSE;
}
udpIsInstalled = TRUE;
} else
printk(KERN_WARNING UDP_INFO ": Already installed.\n");
}
/shark/trunk/drivers/net/arp.c
0,0 → 1,456
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: arp.c,v 1.4 2004-05-11 14:30:48 giacomo Exp $
 
File: $File$
Revision: $Revision: 1.4 $
Last update: $Date: 2004-05-11 14:30:48 $
------------
**/
 
/*
* Copyright (C) 2000 Luca Abeni
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
/* Author: Giuseppe Lipari & Luca Abeni */
/* Date: 4/12/97 */
 
/* File: ArpDrv.C */
/* Revision: 2.0 */
 
/*
ARP layer. It associates, the correct ethernet addresses to IP addresses
this is done using an ARP table, created dinamicaly. This mechanism can
introduce unpredictability (it is impossible to predict how much time is
required to obtain an ethernet address from the network). To solve this
problem, ARP is used only the first time that a computer is addressed.
*/
 
#include <kernel/kern.h>
#include <semaphore.h>
 
#include "eth_priv.h"
#include "netbuff.h"
#include <drivers/udpip.h>
#include "arp.h"
#include <signal.h>
 
//#define __ARP_DBG__
#define ARP_INFO "[ARP] "
 
#define FALSE 0
#define TRUE 1
#define PENDING 2
#define ARP_TIMEOUT 5
#define ARP_MAX_RETRANS 4
#define ARP_PRIORITY 10
 
#define ARP_LEN 70
 
/*
Structure used to enqueue the packets destinated to an host whose ethernet
address is still unknown. This structure overwrites the firsts fields of
the ehternet header, but it isn't a problem because it is used only
locally
*/
typedef struct {
int len;
void *next;
} PKT_NXT;
 
typedef struct arp_pkt{
WORD htype;
WORD ptype;
BYTE hlen;
BYTE plen;
WORD operation;
struct eth_addr sha;
IP_ADDR sip;
struct eth_addr tha;
IP_ADDR tip;
} ARP_PKT;
 
struct netbuff arp_txbuff;
struct eth_addr broadcast,nulladdr;
 
BYTE reply[ETH_MAX_LEN];
extern struct netbuff udp_txbuff;
 
ARP_TABLE arpTable[ARP_MAX_ENTRIES];
sem_t arpMutex;
 
int arpIsInstalled = FALSE;
 
struct eth_addr myEthAddr;
IP_ADDR myIpAddr;
 
/*
ARP Exceptions Handler: in particular, it is called when the ARP table
is full. The default action is to free the less used entry in the table
(it's the simpler thing to do!!!). An hard Real-Time task should have
to disable the handler in this case. (for an hard task, the full table
condition is an error condition, 'cause it can generate unpredictability).
*/
void arp_exc(int err)
{
int i, j, minused;
 
minused = ARP_MAX_USED; j = -1;
for (i = 0; i < ARP_MAX_ENTRIES; i++) {
if ((arpTable[i].valid != PENDING) && (arpTable[i].used <= minused)) {
j = i;
minused = arpTable[i].used;
}
}
if (j == -1) {
cprintf("ARP table overflow.\n");
sys_abort(AARPFULL);
}
arpTable[j].valid = FALSE;
}
 
/*
Send an ARP request: if there aren't free buffers, do nothing (there will
be a retry)
*/
void arp_sendRequest(int i)
{
ARP_PKT *pkt;
BYTE *arpBuff;
 
if ((arpBuff= netbuff_get(&arp_txbuff, NON_BLOCK)) != NULL) {
eth_setHeader(arpBuff,broadcast,ETH_ARP_TYPE);
pkt = (ARP_PKT *)eth_getFDB(arpBuff);
pkt->htype = htons(ARP_ETH_TYPE);
pkt->ptype = htons(ARP_IP_TYPE);
pkt->hlen = sizeof(struct eth_addr);
pkt->plen = sizeof(IP_ADDR);
pkt->operation = htons(ARP_REQUEST);
setEthAddr(pkt->sha,myEthAddr);
setEthAddr(pkt->tha,nulladdr);
setIpAddr(pkt->sip,myIpAddr);
setIpAddr(pkt->tip,arpTable[i].ip);
eth_sendPkt(arpBuff,sizeof(ARP_PKT));
netbuff_release(&arp_txbuff, (void *)arpBuff);
}
}
 
/* Retry task (periodic) */
TASK arp_retry(void)
{
int i;
PKT_NXT *p, *p1;
 
while (1) {
/* mutual exclusion on the ARP table */
sem_xwait(&arpMutex, 1, BLOCK);
for (i = 0; i < ARP_MAX_ENTRIES; i++) {
if (arpTable[i].valid == PENDING) {
arpTable[i].time--;
if (arpTable[i].time <= 0) {
arpTable[i].ntrans++;
if (arpTable[i].ntrans > ARP_MAX_RETRANS) {
/* N. Max Retry? If yes, discard all the packets */
p = (PKT_NXT *)arpTable[i].pkt;
while (p != NULL) {
p1 = p->next;
netbuff_release(&udp_txbuff, (void *)p);
#ifdef __ARP_DBG__
printk(KERN_DEBUG ARP_INFO "Pacchetto : %lp scartato.\n",p);
#endif
p = p1;
}
arpTable[i].valid = FALSE;
} else {
arp_sendRequest(i);
arpTable[i].time = ARP_TIMEOUT;
}
}
}
}
sem_post(&arpMutex);
task_endcycle();
}
}
 
/* Search for a free entry in the ARP table (if there isn't any, return -1 */
int arp_req(IP_ADDR dest)
{
int j, done;
 
done = 0; j = 0;
while ((j < ARP_MAX_ENTRIES) && !done) {
if (arpTable[j].valid == FALSE) {
done = 1;
arpTable[j].valid = PENDING;
} else
j++;
}
 
if (!done)
return -1;
 
/* Fill the entry */
setIpAddr(arpTable[j].ip, dest);
#ifdef __ARP_DBG__
printk(KERN_DEBUG ARP_INFO "Indirizzo : %d.%d.%d.%d\n",dest.ad[0], dest.ad[1], dest.ad[2], dest.ad[3]);
printk(KERN_DEBUG ARP_INFO "Indirizzo : %d.%d.%d.%d\n",arpTable[j].ip.ad[0], arpTable[j].ip.ad[1], arpTable[j].ip.ad[2], arpTable[j].ip.ad[3]);
#endif
arpTable[j].time = ARP_TIMEOUT;
arpTable[j].ntrans = 0;
 
return j;
}
 
/*
Send an IP packet. If the ethernet address isn't in the ARP table, send
a request
*/
void arp_send(void *pkt, IP_ADDR dest, int len)
{
int i,j;
PKT_NXT *p,*p1 = NULL;
int caso;
 
sem_xwait(&arpMutex, 1, BLOCK);
caso = 0;
j = -1;
for (i = 0; i < ARP_MAX_ENTRIES; i++) {
if (ip_compAddr(dest,arpTable[i].ip)) {
/* found: CASE 1 */
if (arpTable[i].valid == TRUE) {
caso = 1;
j = i;
}
else if (arpTable[i].valid == PENDING) {
/* Entry found, but the ethernet address is still unknown: CASE 2 */
caso = 2;
j = i;
}
}
}
if (caso == 1) {
/* Send the IP packet */
eth_setHeader(pkt,arpTable[j].eth,ETH_IP_TYPE);
eth_sendPkt(pkt,len);
netbuff_release(&udp_txbuff, (void *)pkt);
arpTable[j].used++;
if (arpTable[j].used > ARP_MAX_USED) arpTable[j].used = ARP_MAX_USED;
} else if (caso == 2) {
/* Enqueue the packet until the ethernet address arrives */
p = arpTable[j].pkt;
while (p != NULL) {
p1 = p;
p = p1->next;
}
p1->next = pkt;
((PKT_NXT *)pkt)->next = NULL;
((PKT_NXT *)pkt)->len = len;
#ifdef __ARP_DBG__
printk(KERN_DEBUG ARP_INFO "Pacchetto : %lp accodato.\n", pkt);
#endif
} else {
/* Search for a free entry in the ARP table...*/
j = -1;
while (j == -1) {
j = arp_req(dest);
if (j == -1) {
cprintf("ARP Table Full.\n");
kern_raise(XARP_TABLE_FULL,NIL);
}
}
/*...fill it...*/
arpTable[j].pkt = pkt;
((PKT_NXT *)arpTable[j].pkt)->next = NULL;
((PKT_NXT *)pkt)->len = len;
#ifdef __ARP_DBG__
printk(KERN_DEBUG ARP_INFO "Pacchetto : %lp accodato\n", pkt);
#endif
/*...and send the request!!! */
arp_sendRequest(j);
}
sem_post(&arpMutex);
}
 
/* ARP packet received CallBack*/
void arp_server_recv(void *pk)
{
ARP_PKT *pkt,*rpkt;
PKT_NXT *p1,*q1;
int len;
int i,j = 0;
BYTE found;
pkt = (ARP_PKT *)eth_getFDB(pk);
 
#if 0
{
int ii;
 
cprintf("Arp PKT...\n");
cprintf ("source...");
for (ii=0; ii<4; ii++) cprintf("%d ", pkt->sip.ad[ii]);
cprintf ("\ndest...");
for (ii=0; ii<4; ii++) cprintf("%d ", pkt->tip.ad[ii]);
}
#endif
 
/* Check if the packet is directed to this host...*/
if (ip_compAddr(pkt->tip,myIpAddr)) {
sem_xwait(&arpMutex, 1, BLOCK);
/* 1 : Search an entry with his IP address */
found = FALSE;
for (i = 0; (i < ARP_MAX_ENTRIES) && !found; i++) {
if ((arpTable[i].valid != FALSE) && ip_compAddr(arpTable[i].ip,pkt->sip)) {
setEthAddr(arpTable[i].eth,pkt->sha);
found = TRUE;
}
}
/* If there isn't any, fill a new entry (if the table is not full) */
if (!found) {
for (i = 0; (i < ARP_MAX_ENTRIES) && !found; i++)
if (arpTable[i].valid == FALSE) {
j = i;
found = TRUE;
}
if (found) {
setIpAddr(arpTable[j].ip,pkt->sip);
setEthAddr(arpTable[j].eth,pkt->sha);
arpTable[j].valid = TRUE;
}
}
 
/* If it is a request, send the reply */
if (ntohs(pkt->operation) == ARP_REQUEST) {
rpkt = (ARP_PKT *)eth_getFDB(reply);
rpkt->htype = htons(ARP_ETH_TYPE);
rpkt->ptype = htons(ARP_IP_TYPE);
rpkt->hlen = sizeof(struct eth_addr);
rpkt->plen = sizeof(IP_ADDR);
rpkt->operation = htons(ARP_REPLY);
setEthAddr(rpkt->sha,myEthAddr);
setIpAddr(rpkt->sip,myIpAddr);
setEthAddr(rpkt->tha,pkt->sha);
setIpAddr(rpkt->tip,pkt->sip);
eth_setHeader(reply, pkt->sha, ETH_ARP_TYPE);
eth_sendPkt(reply,sizeof(ARP_PKT));
}
/* If it is a reply, search for his pending request */
else {
for (i = 0; i < ARP_MAX_ENTRIES; i++) {
if ((arpTable[i].valid == PENDING) && ip_compAddr(arpTable[i].ip,pkt->sip)) {
/* the eth field in the ARP table was filled previously */
arpTable[i].valid = TRUE;
/* Send pending packets */
p1 = (PKT_NXT *)arpTable[i].pkt;
while (p1 != NULL) {
q1 = p1;
p1 = q1->next;
len = q1->len;
eth_setHeader((struct ETH_HEADER *)q1,arpTable[i].eth,ETH_IP_TYPE);
eth_sendPkt(q1,len);
netbuff_release(&udp_txbuff, (void *)q1);
#ifdef __ARP_DBG__
printk(KERN_DEBUG ARP_INFO "Pacchetto : %lp inviato\n", q1);
#endif
}
}
}
}
sem_post(&arpMutex);
}
}
 
void arp_init(char *localAddr)
{
int i;
PID s;
SOFT_TASK_MODEL m;
 
struct sigaction action;
 
if (!arpIsInstalled) {
for (i = 0; i < ARP_MAX_ENTRIES; i++) arpTable[i].valid = FALSE;
 
/* ARP table mutex semaphore */
sem_init(&arpMutex, 0, 1);
 
netbuff_init(&arp_txbuff, 1, ARP_LEN);
 
ip_str2addr(localAddr,&myIpAddr);
 
eth_getAddress(&myEthAddr);
 
/* Retry task */
soft_task_default_model(m);
soft_task_def_wcet(m,1000);
soft_task_def_period(m,1000000);
soft_task_def_met(m,1000);
soft_task_def_periodic(m);
soft_task_def_system(m);
soft_task_def_nokill(m);
s = task_create("ArpRetry",arp_retry,&m,NULL);
if (s == NIL) {
kern_printf("Cannot create ArpRetry\n");
sys_end();
l1_exit(-1);
}
 
eth_setProtocol(ETH_ARP_TYPE,arp_server_recv);
eth_str2Addr("FF:FF:FF:FF:FF:FF",&broadcast);
eth_str2Addr("00:00:00:00:00:00",&nulladdr);
 
// for (i = ARP_ERROR_BASE; i <= XARP_TABLE_FULL; i++)
// exc_set(i, arp_exc);
action.sa_flags = 0;
action.sa_handler = arp_exc;
sigfillset(&action.sa_mask); /* we block all the other signals... */
 
if (sigaction(SIGARPFULL, &action, NULL) == -1) {
perror("Error initializing ARP signal...");
sys_abort(AARPFULL);
}
 
task_activate(s);
arpIsInstalled = TRUE;
} else
cprintf("Arp: already installed.");
}
/shark/trunk/drivers/net/eth_priv.h
0,0 → 1,115
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: eth_priv.h,v 1.4 2004-05-11 14:30:49 giacomo Exp $
 
File: $File$
Revision: $Revision: 1.4 $
Last update: $Date: 2004-05-11 14:30:49 $
------------
**/
 
/*
* Copyright (C) 2000 Luca Abeni
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
/* Author: Luca Abeni */
/* Date: 4/12/1997 */
 
/* File: Eth.H */
/* Revision: 2.0 */
 
#ifndef __ETH_H__
#define __ETH_H__
 
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
#define LOOPBACK 2
#define ETH_MAX_LEN 1528
#define ETH_ARP_TYPE 0x0806
#define ETH_IP_TYPE 0x0800
#define ETH_HRT_TYPE 0x8FFF
#define ETH_MAX_PROTOCOLS 5
 
typedef struct eth_addr {
BYTE ad[6];
} ETH_ADDR;
 
typedef struct eth_header {
struct eth_addr dest;
struct eth_addr source;
WORD type;
} ETH_HEADER;
 
typedef struct eth_device {
DWORD BaseAddress;
BYTE IntLine;
struct eth_addr addr;
} ETH_DEVICE;
 
#define setEthAddr(q,w) memcpy(q.ad,w.ad,sizeof(struct eth_addr))
 
#define ETH_ERROR_BASE 150
#define ETH_DRIVER_NOT_FOUND (ETH_ERROR_BASE+0)
#define ETH_RXERROR (ETH_ERROR_BASE+1)
#define ETH_DRIVER_RELEASE (ETH_ERROR_BASE+2)
#define ETH_DRIVER_ADDRESS (ETH_ERROR_BASE+3)
#define ETH_TXERROR (ETH_ERROR_BASE+4)
#define ETH_PROTOCOL_ERROR (ETH_ERROR_BASE+5)
#define ETH_BUFFERS_FULL (ETH_ERROR_BASE+6)
#define ETH_NULLPROTOCOL_EXC (ETH_ERROR_BASE+7)
 
/*unsigned short htons(unsigned short host);
unsigned short ntohs(unsigned short net);*/
void eth_str2Addr(char *add, struct eth_addr *ds);
void eth_printHeader(struct eth_header *p);
void eth_printPkt(char *pkt,int len);
int eth_setProtocol(WORD type, void (*recv)(void *pkt));
void *eth_setHeader(void *b, struct eth_addr dest, WORD type);
int eth_sendPkt(void *p, int len);
void *eth_getFDB(void *pkt);
int eth_init(int mode, TASK_MODEL *m);
void eth_close(void *a);
void eth_getAddress(struct eth_addr *eth);
 
 
__END_DECLS
#endif
/shark/trunk/drivers/net/rtl8139.c
0,0 → 1,1481
/* rtl8139.c: A RealTek RTL8129/8139 Fast Ethernet driver for Linux. */
/*
Written 1997-1999 by Donald Becker.
 
This software may be used and distributed according to the terms
of the GNU Public License, incorporated herein by reference.
All other rights reserved.
 
This driver is for boards based on the RTL8129 and RTL8139 PCI ethernet
chips.
 
The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
Center of Excellence in Space Data and Information Sciences
Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
 
Support and updates available at
http://cesdis.gsfc.nasa.gov/linux/drivers/rtl8139.html
 
Twister-tuning table provided by Kinston <shangh@realtek.com.tw>.
*/
 
static const char *version =
"rtl8139.c:v1.07 5/6/99 Donald Becker http://cesdis.gsfc.nasa.gov/linux/drivers/rtl8139.html\n";
 
/* A few user-configurable values. */
/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
static int max_interrupt_work = 20;
//#define rtl8129_debug debug
static int rtl8129_debug = 0;
 
/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
The RTL chips use a 64 element hash table based on the Ethernet CRC. */
static int multicast_filter_limit = 32;
 
/* Used to pass the full-duplex flag, etc. */
#define MAX_UNITS 8 /* More are supported, limit only on options */
static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
 
/* Size of the in-memory receive ring. */
#define RX_BUF_LEN_IDX 3 /* 0==8K, 1==16K, 2==32K, 3==64K */
#define RX_BUF_LEN (8192 << RX_BUF_LEN_IDX)
/* Size of the Tx bounce buffers -- must be at least (dev->mtu+14+4). */
#define TX_BUF_SIZE 1536
 
/* PCI Tuning Parameters
Threshold is bytes transferred to chip before transmission starts. */
#define TX_FIFO_THRESH 256 /* In bytes, rounded down to 32 byte units. */
 
/* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024. */
#define RX_FIFO_THRESH 4 /* Rx buffer level before first PCI xfer. */
#define RX_DMA_BURST 4 /* Maximum PCI burst, '4' is 256 bytes */
#define TX_DMA_BURST 4 /* Calculate as 16<<val. */
 
/* Operational parameters that usually are not changed. */
/* Time in jiffies before concluding the transmitter is hung. */
#define TX_TIMEOUT (4*HZ)
 
#include <linux/module.h>
#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/malloc.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <asm/bitops.h>
#include <asm/io.h>
 
/* Kernel compatibility defines, some common to David Hind's PCMCIA package.
This is only in the support-all-kernels source code. */
 
#define RUN_AT(x) (jiffies + (x))
 
#include <linux/delay.h>
 
#if LINUX_VERSION_CODE < 0x20123
#define test_and_set_bit(val, addr) set_bit(val, addr)
#endif
#if LINUX_VERSION_CODE <= 0x20139
#define net_device_stats enet_statistics
#else
#define NETSTATS_VER2
#endif
#if LINUX_VERSION_CODE < 0x20155 || defined(CARDBUS)
/* Grrrr, the PCI code changed, but did not consider CardBus... */
#include <linux/bios32.h>
#define PCI_SUPPORT_VER1
#else
#define PCI_SUPPORT_VER2
#endif
#if LINUX_VERSION_CODE < 0x20159
#define dev_free_skb(skb) dev_kfree_skb(skb, FREE_WRITE);
#else
#define dev_free_skb(skb) dev_kfree_skb(skb);
#endif
 
/* The I/O extent. */
#define RTL8129_TOTAL_SIZE 0x80
 
/*
Theory of Operation
 
I. Board Compatibility
 
This device driver is designed for the RealTek RTL8129, the RealTek Fast
Ethernet controllers for PCI. This chip is used on a few clone boards.
 
 
II. Board-specific settings
 
PCI bus devices are configured by the system at boot time, so no jumpers
need to be set on the board. The system BIOS will assign the
PCI INTA signal to a (preferably otherwise unused) system IRQ line.
Note: Kernel versions earlier than 1.3.73 do not support shared PCI
interrupt lines.
 
III. Driver operation
 
IIIa. Rx Ring buffers
 
The receive unit uses a single linear ring buffer rather than the more
common (and more efficient) descriptor-based architecture. Incoming frames
are sequentially stored into the Rx region, and the host copies them into
skbuffs.
 
Comment: While it is theoretically possible to process many frames in place,
any delay in Rx processing would cause us to drop frames. More importantly,
the Linux protocol stack is not designed to operate in this manner.
 
IIIb. Tx operation
 
The RTL8129 uses a fixed set of four Tx descriptors in register space.
In a stunningly bad design choice, Tx frames must be 32 bit aligned. Linux
aligns the IP header on word boundaries, and 14 byte ethernet header means
that almost all frames will need to be copied to an alignment buffer.
 
IVb. References
 
http://www.realtek.com.tw/cn/cn.html
http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
 
IVc. Errata
 
*/
/* Added by Nino - Begin */
extern int pci20to26_find_class(unsigned int class_code, int index, BYTE *bus, BYTE *dev);
extern int pci20to26_read_config_byte(unsigned int bus, unsigned int dev, int where, u8 *val);
extern int pci20to26_read_config_word(unsigned int bus, unsigned int dev, int where, u16 *val);
extern int pci20to26_read_config_dword(unsigned int bus, unsigned int dev, int where, u32 *val);
extern int pci20to26_write_config_byte(unsigned int bus, unsigned int dev, int where, u8 val);
extern int pci20to26_write_config_word(unsigned int bus, unsigned int dev, int where, u16 val);
extern int pci20to26_write_config_dword(unsigned int bus, unsigned int dev, int where, u32 val);
/* Added by Nino - End */
 
/* This table drives the PCI probe routines. It's mostly boilerplate in all
of the drivers, and will likely be provided by some future kernel.
Note the matching code -- the first table entry matchs all 56** cards but
second only the 1234 card.
*/
enum pci_flags_bit {
PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
};
struct pci_id_info {
const char *name;
u16 vendor_id, device_id, device_id_mask, flags;
int io_size;
struct device *(*probe1)(int pci_bus, int pci_devfn, struct device *dev,
long ioaddr, int irq, int chip_idx, int fnd_cnt);
};
 
static struct device * rtl8129_probe1(int pci_bus, int pci_devfn,
struct device *dev, long ioaddr,
int irq, int chp_idx, int fnd_cnt);
 
static struct pci_id_info pci_tbl[] =
{{ "RealTek RTL8129 Fast Ethernet",
0x10ec, 0x8129, 0xffff, PCI_USES_IO|PCI_USES_MASTER, 0x80, rtl8129_probe1},
{ "RealTek RTL8139 Fast Ethernet",
0x10ec, 0x8139, 0xffff, PCI_USES_IO|PCI_USES_MASTER, 0x80, rtl8129_probe1},
{ "SMC1211TX EZCard 10/100 (RealTek RTL8139)",
0x1113, 0x1211, 0xffff, PCI_USES_IO|PCI_USES_MASTER, 0x80, rtl8129_probe1},
{ "Accton MPX5030 (RealTek RTL8139)",
0x1113, 0x1211, 0xffff, PCI_USES_IO|PCI_USES_MASTER, 0x80, rtl8129_probe1},
{0,}, /* 0 terminated list. */
};
 
/* The capability table matches the chip table above. */
enum {HAS_MII_XCVR=0x01, HAS_CHIP_XCVR=0x02, HAS_LNK_CHNG=0x04};
static int rtl_cap_tbl[] = {
HAS_MII_XCVR, HAS_CHIP_XCVR|HAS_LNK_CHNG, HAS_CHIP_XCVR|HAS_LNK_CHNG,
};
 
 
/* The rest of these values should never change. */
#define NUM_TX_DESC 4 /* Number of Tx descriptor registers. */
 
/* Symbolic offsets to registers. */
enum RTL8129_registers {
MAC0=0, /* Ethernet hardware address. */
MAR0=8, /* Multicast filter. */
TxStatus0=0x10, /* Transmit status (Four 32bit registers). */
TxAddr0=0x20, /* Tx descriptors (also four 32bit). */
RxBuf=0x30, RxEarlyCnt=0x34, RxEarlyStatus=0x36,
ChipCmd=0x37, RxBufPtr=0x38, RxBufAddr=0x3A,
IntrMask=0x3C, IntrStatus=0x3E,
TxConfig=0x40, RxConfig=0x44,
Timer=0x48, /* A general-purpose counter. */
RxMissed=0x4C, /* 24 bits valid, write clears. */
Cfg9346=0x50, Config0=0x51, Config1=0x52,
FlashReg=0x54, GPPinData=0x58, GPPinDir=0x59, MII_SMI=0x5A, HltClk=0x5B,
MultiIntr=0x5C, TxSummary=0x60,
MII_BMCR=0x62, MII_BMSR=0x64, NWayAdvert=0x66, NWayLPAR=0x68,
NWayExpansion=0x6A,
/* Undocumented registers, but required for proper operation. */
FIFOTMS=0x70, /* FIFO Test Mode Select */
CSCR=0x74, /* Chip Status and Configuration Register. */
PARA78=0x78, PARA7c=0x7c, /* Magic transceiver parameter register. */
};
 
enum ChipCmdBits {
CmdReset=0x10, CmdRxEnb=0x08, CmdTxEnb=0x04, RxBufEmpty=0x01, };
 
/* Interrupt register bits, using my own meaningful names. */
enum IntrStatusBits {
PCIErr=0x8000, PCSTimeout=0x4000,
RxFIFOOver=0x40, RxUnderrun=0x20, RxOverflow=0x10,
TxErr=0x08, TxOK=0x04, RxErr=0x02, RxOK=0x01,
};
enum TxStatusBits {
TxHostOwns=0x2000, TxUnderrun=0x4000, TxStatOK=0x8000,
TxOutOfWindow=0x20000000, TxAborted=0x40000000, TxCarrierLost=0x80000000,
};
enum RxStatusBits {
RxMulticast=0x8000, RxPhysical=0x4000, RxBroadcast=0x2000,
RxBadSymbol=0x0020, RxRunt=0x0010, RxTooLong=0x0008, RxCRCErr=0x0004,
RxBadAlign=0x0002, RxStatusOK=0x0001,
};
 
/* Twister tuning parameters from RealTek.
Completely undocumented, but required to tune bad links. */
enum CSCRBits {
CSCR_LinkOKBit=0x0400, CSCR_LinkChangeBit=0x0800,
CSCR_LinkStatusBits=0x0f000, CSCR_LinkDownOffCmd=0x003c0,
CSCR_LinkDownCmd=0x0f3c0,
};
unsigned long param[4][4]={
{0x0cb39de43,0x0cb39ce43,0x0fb38de03,0x0cb38de43},
{0x0cb39de43,0x0cb39ce43,0x0cb39ce83,0x0cb39ce83},
{0x0cb39de43,0x0cb39ce43,0x0cb39ce83,0x0cb39ce83},
{0x0bb39de43,0x0bb39ce43,0x0bb39ce83,0x0bb39ce83}
};
 
struct rtl8129_private {
char devname[8]; /* Used only for kernel debugging. */
const char *product_name;
struct device *next_module;
int chip_id;
int chip_revision;
unsigned char pci_bus, pci_devfn;
#if LINUX_VERSION_CODE > 0x20139
struct net_device_stats stats;
#else
struct enet_statistics stats;
#endif
struct timer_list timer; /* Media selection timer. */
unsigned int cur_rx; /* Index into the Rx buffer of next Rx pkt. */
unsigned int cur_tx, dirty_tx, tx_flag;
/* The saved address of a sent-in-place packet/buffer, for skfree(). */
struct sk_buff* tx_skbuff[NUM_TX_DESC];
unsigned char *tx_buf[NUM_TX_DESC]; /* Tx bounce buffers */
unsigned char *rx_ring;
unsigned char *tx_bufs; /* Tx bounce buffer region. */
char phys[4]; /* MII device addresses. */
char twistie, twist_cnt; /* Twister tune state. */
unsigned int tx_full:1; /* The Tx queue is full. */
unsigned int full_duplex:1; /* Full-duplex operation requested. */
unsigned int duplex_lock:1;
unsigned int default_port:4; /* Last dev->if_port value. */
unsigned int media2:4; /* Secondary monitored media port. */
unsigned int medialock:1; /* Don't sense media type. */
unsigned int mediasense:1; /* Media sensing in progress. */
};
 
#ifdef MODULE
#if LINUX_VERSION_CODE > 0x20115
MODULE_AUTHOR("Donald Becker <becker@cesdis.gsfc.nasa.gov>");
MODULE_DESCRIPTION("RealTek RTL8129/8139 Fast Ethernet driver");
MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
MODULE_PARM(multicast_filter_limit, "i");
MODULE_PARM(max_interrupt_work, "i");
MODULE_PARM(debug, "i");
#endif
#endif
 
static int rtl8129_open(struct device *dev);
static int read_eeprom(long ioaddr, int location);
static int mdio_read(struct device *dev, int phy_id, int location);
static void mdio_write(struct device *dev, int phy_id, int location, int val);
static void rtl8129_timer(unsigned long data);
static void rtl8129_tx_timeout(struct device *dev);
static void rtl8129_init_ring(struct device *dev);
static int rtl8129_start_xmit(struct sk_buff *skb, struct device *dev);
static int rtl8129_rx(struct device *dev);
static void rtl8129_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
static int rtl8129_close(struct device *dev);
static int mii_ioctl(struct device *dev, struct ifreq *rq, int cmd);
static struct enet_statistics *rtl8129_get_stats(struct device *dev);
static inline u32 ether_crc(int length, unsigned char *data);
static void set_rx_mode(struct device *dev);
 
/* A list of all installed RTL8129 devices, for removing the driver module. */
static struct device *root_rtl8129_dev = NULL;
 
/* Ideally we would detect all network cards in slot order. That would
be best done a central PCI probe dispatch, which wouldn't work
well when dynamically adding drivers. So instead we detect just the
Rtl81*9 cards in slot order. */
 
int rtl8139_probe(struct device *dev)
{
int cards_found = 0;
int pci_index = 0;
unsigned char pci_bus, pci_device_fn;
 
if ( ! pcibios_present())
return -ENODEV;
 
for (; pci_index < 0x0f; pci_index++) { // 0x0f was 0xff
u16 vendor, device, pci_command, new_command;
int chip_idx, irq;
long ioaddr;
 
if (pci20to26_find_class (PCI_CLASS_NETWORK_ETHERNET << 8, pci_index,
&pci_bus, &pci_device_fn)
!= PCIBIOS_SUCCESSFUL)
break;
pci20to26_read_config_word(pci_bus, pci_device_fn,
PCI_VENDOR_ID, &vendor);
pci20to26_read_config_word(pci_bus, pci_device_fn,
PCI_DEVICE_ID, &device);
 
for (chip_idx = 0; pci_tbl[chip_idx].vendor_id; chip_idx++)
if (vendor == pci_tbl[chip_idx].vendor_id
&& (device & pci_tbl[chip_idx].device_id_mask) ==
pci_tbl[chip_idx].device_id)
break;
if (pci_tbl[chip_idx].vendor_id == 0) /* Compiled out! */
continue;
 
{
#if 0 //defined(PCI_SUPPORT_VER2)
struct pci_dev *pdev = pci_find_slot(pci_bus, pci_device_fn);
ioaddr = pdev->base_address[0] & ~3;
irq = pdev->irq;
#else
u32 pci_ioaddr;
u8 pci_irq_line;
pci20to26_read_config_byte(pci_bus, pci_device_fn,
PCI_INTERRUPT_LINE, &pci_irq_line);
pci20to26_read_config_dword(pci_bus, pci_device_fn,
PCI_BASE_ADDRESS_0, &pci_ioaddr);
ioaddr = pci_ioaddr & ~3;
irq = pci_irq_line;
#endif
}
 
if ((pci_tbl[chip_idx].flags & PCI_USES_IO) &&
check_region(ioaddr, pci_tbl[chip_idx].io_size))
continue;
 
/* Activate the card: fix for brain-damaged Win98 BIOSes. */
pci20to26_read_config_word(pci_bus, pci_device_fn,
PCI_COMMAND, &pci_command);
new_command = pci_command | (pci_tbl[chip_idx].flags & 7);
if (pci_command != new_command) {
printk(KERN_INFO " The PCI BIOS has not enabled the"
" device at %d/%d! Updating PCI command %4.4x->%4.4x.\n",
pci_bus, pci_device_fn, pci_command, new_command);
pci20to26_write_config_word(pci_bus, pci_device_fn,
PCI_COMMAND, new_command);
}
 
dev = pci_tbl[chip_idx].probe1(pci_bus, pci_device_fn, dev, ioaddr,
irq, chip_idx, cards_found);
 
if (dev && (pci_tbl[chip_idx].flags & PCI_COMMAND_MASTER)) {
u8 pci_latency;
pci20to26_read_config_byte(pci_bus, pci_device_fn,
PCI_LATENCY_TIMER, &pci_latency);
if (pci_latency < 32) {
printk(KERN_NOTICE " PCI latency timer (CFLT) is "
"unreasonably low at %d. Setting to 64 clocks.\n",
pci_latency);
pci20to26_write_config_byte(pci_bus, pci_device_fn,
PCI_LATENCY_TIMER, 64);
}
}
dev = 0;
cards_found++;
}
 
return cards_found ? 0 : -ENODEV;
}
 
static struct device *rtl8129_probe1(int pci_bus, int pci_devfn,
struct device *dev, long ioaddr,
int irq, int chip_idx, int found_cnt)
{
static int did_version = 0; /* Already printed version info. */
struct rtl8129_private *tp;
int i, option = found_cnt < MAX_UNITS ? options[found_cnt] : 0;
 
if (rtl8129_debug > 0 && did_version++ == 0)
printk(KERN_INFO "%s", version);
 
dev = init_etherdev(dev, 0);
 
printk(KERN_INFO "%s: %s at %#lx, IRQ %d, ",
dev->name, pci_tbl[chip_idx].name, ioaddr, irq);
 
/* Bring the chip out of low-power mode. */
outb(0x00, ioaddr + Config1);
 
if (read_eeprom(ioaddr, 0) != 0xffff) {
for (i = 0; i < 3; i++) {
((u16 *)(dev->dev_addr))[i] =
le16_to_cpu(read_eeprom(ioaddr, i + 7));
}
} else {
for (i = 0; i < 6; i++)
dev->dev_addr[i] = inb(ioaddr + MAC0 + i);
}
for (i = 0; i < 5; i++)
printk("%2.2x:", dev->dev_addr[i]);
printk("%2.2x.\n", dev->dev_addr[i]);
 
/* We do a request_region() to register /proc/ioports info. */
request_region(ioaddr, pci_tbl[chip_idx].io_size, dev->name);
 
dev->base_addr = ioaddr;
dev->irq = irq;
 
/* Some data structures must be quadword aligned. */
tp = kmalloc(sizeof(*tp), GFP_KERNEL | GFP_DMA);
memset(tp, 0, sizeof(*tp));
dev->priv = tp;
 
tp->next_module = root_rtl8129_dev;
root_rtl8129_dev = dev;
 
tp->chip_id = chip_idx;
tp->pci_bus = pci_bus;
tp->pci_devfn = pci_devfn;
 
/* Find the connected MII xcvrs.
Doing this in open() would allow detecting external xcvrs later, but
takes too much time. */
if (rtl_cap_tbl[chip_idx] & HAS_MII_XCVR) {
int phy, phy_idx;
for (phy = 0, phy_idx = 0; phy < 32 && phy_idx < sizeof(tp->phys);
phy++) {
int mii_status = mdio_read(dev, phy, 1);
if (mii_status != 0xffff && mii_status != 0x0000) {
tp->phys[phy_idx++] = phy;
printk(KERN_INFO "%s: MII transceiver found at address %d.\n",
dev->name, phy);
}
}
if (phy_idx == 0) {
printk(KERN_INFO "%s: No MII transceivers found! Assuming SYM "
"transceiver.\n",
dev->name);
tp->phys[0] = -1;
}
} else
tp->phys[0] = 32;
 
/* Put the chip into low-power mode. */
outb(0xC0, ioaddr + Cfg9346);
outb(0x03, ioaddr + Config1);
outb('H', ioaddr + HltClk); /* 'R' would leave the clock running. */
 
/* The lower four bits are the media type. */
if (option > 0) {
tp->full_duplex = (option & 0x200) ? 1 : 0;
tp->default_port = option & 15;
if (tp->default_port)
tp->medialock = 1;
}
 
if (found_cnt < MAX_UNITS && full_duplex[found_cnt] > 0)
tp->full_duplex = full_duplex[found_cnt];
 
if (tp->full_duplex) {
printk(KERN_INFO "%s: Media type forced to Full Duplex.\n", dev->name);
mdio_write(dev, tp->phys[0], 4, 0x141);
tp->duplex_lock = 1;
}
 
/* The Rtl8129-specific entries in the device structure. */
dev->open = &rtl8129_open;
dev->hard_start_xmit = &rtl8129_start_xmit;
dev->stop = &rtl8129_close;
dev->get_stats = &rtl8129_get_stats;
dev->set_multicast_list = &set_rx_mode;
dev->do_ioctl = &mii_ioctl;
 
return dev;
}
/* Serial EEPROM section. */
 
/* EEPROM_Ctrl bits. */
#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
#define EE_CS 0x08 /* EEPROM chip select. */
#define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */
#define EE_WRITE_0 0x00
#define EE_WRITE_1 0x02
#define EE_DATA_READ 0x01 /* EEPROM chip data out. */
#define EE_ENB (0x80 | EE_CS)
 
/* Delay between EEPROM clock transitions.
No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
*/
 
#define eeprom_delay() inl(ee_addr)
 
/* The EEPROM commands include the alway-set leading bit. */
#define EE_WRITE_CMD (5 << 6)
#define EE_READ_CMD (6 << 6)
#define EE_ERASE_CMD (7 << 6)
 
static int read_eeprom(long ioaddr, int location)
{
int i;
unsigned retval = 0;
long ee_addr = ioaddr + Cfg9346;
int read_cmd = location | EE_READ_CMD;
 
outb(EE_ENB & ~EE_CS, ee_addr);
outb(EE_ENB, ee_addr);
 
/* Shift the read command bits out. */
for (i = 10; i >= 0; i--) {
int dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
outb(EE_ENB | dataval, ee_addr);
eeprom_delay();
outb(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
eeprom_delay();
}
outb(EE_ENB, ee_addr);
eeprom_delay();
 
for (i = 16; i > 0; i--) {
outb(EE_ENB | EE_SHIFT_CLK, ee_addr);
eeprom_delay();
retval = (retval << 1) | ((inb(ee_addr) & EE_DATA_READ) ? 1 : 0);
outb(EE_ENB, ee_addr);
eeprom_delay();
}
 
/* Terminate the EEPROM access. */
outb(~EE_CS, ee_addr);
return retval;
}
 
/* MII serial management: mostly bogus for now. */
/* Read and write the MII management registers using software-generated
serial MDIO protocol.
The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
met by back-to-back PCI I/O cycles, but we insert a delay to avoid
"overclocking" issues. */
#define MDIO_DIR 0x80
#define MDIO_DATA_OUT 0x04
#define MDIO_DATA_IN 0x02
#define MDIO_CLK 0x01
#define MDIO_WRITE0 (MDIO_DIR)
#define MDIO_WRITE1 (MDIO_DIR | MDIO_DATA_OUT)
 
#define mdio_delay() inb(mdio_addr)
 
static char mii_2_8139_map[8] = {MII_BMCR, MII_BMSR, 0, 0, NWayAdvert,
NWayLPAR, NWayExpansion, 0 };
 
/* Syncronize the MII management interface by shifting 32 one bits out. */
static void mdio_sync(long mdio_addr)
{
int i;
 
for (i = 32; i >= 0; i--) {
outb(MDIO_WRITE1, mdio_addr);
mdio_delay();
outb(MDIO_WRITE1 | MDIO_CLK, mdio_addr);
mdio_delay();
}
return;
}
static int mdio_read(struct device *dev, int phy_id, int location)
{
long mdio_addr = dev->base_addr + MII_SMI;
int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
int retval = 0;
int i;
 
if (phy_id > 31) { /* Really a 8139. Use internal registers. */
return location < 8 && mii_2_8139_map[location] ?
inw(dev->base_addr + mii_2_8139_map[location]) : 0;
}
mdio_sync(mdio_addr);
/* Shift the read command bits out. */
for (i = 15; i >= 0; i--) {
int dataval = (mii_cmd & (1 << i)) ? MDIO_DATA_OUT : 0;
 
outb(MDIO_DIR | dataval, mdio_addr);
mdio_delay();
outb(MDIO_DIR | dataval | MDIO_CLK, mdio_addr);
mdio_delay();
}
 
/* Read the two transition, 16 data, and wire-idle bits. */
for (i = 19; i > 0; i--) {
outb(0, mdio_addr);
mdio_delay();
retval = (retval << 1) | ((inb(mdio_addr) & MDIO_DATA_IN) ? 1 : 0);
outb(MDIO_CLK, mdio_addr);
mdio_delay();
}
return (retval>>1) & 0xffff;
}
 
static void mdio_write(struct device *dev, int phy_id, int location, int value)
{
long mdio_addr = dev->base_addr + MII_SMI;
int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
int i;
 
if (phy_id > 31) { /* Really a 8139. Use internal registers. */
if (location < 8 && mii_2_8139_map[location])
outw(value, dev->base_addr + mii_2_8139_map[location]);
return;
}
mdio_sync(mdio_addr);
 
/* Shift the command bits out. */
for (i = 31; i >= 0; i--) {
int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
outb(dataval, mdio_addr);
mdio_delay();
outb(dataval | MDIO_CLK, mdio_addr);
mdio_delay();
}
/* Clear out extra bits. */
for (i = 2; i > 0; i--) {
outb(0, mdio_addr);
mdio_delay();
outb(MDIO_CLK, mdio_addr);
mdio_delay();
}
return;
}
 
static int
rtl8129_open(struct device *dev)
{
struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
long ioaddr = dev->base_addr;
int i;
 
/* Soft reset the chip. */
outb(CmdReset, ioaddr + ChipCmd);
 
if (request_irq(dev->irq, &rtl8129_interrupt, SA_SHIRQ, dev->name, dev)) {
return -EAGAIN;
}
 
MOD_INC_USE_COUNT;
 
tp->tx_bufs = kmalloc(TX_BUF_SIZE * NUM_TX_DESC, GFP_KERNEL);
tp->rx_ring = kmalloc(RX_BUF_LEN + 16, GFP_KERNEL);
if (tp->tx_bufs == NULL || tp->rx_ring == NULL) {
free_irq(dev->irq, dev);
if (tp->tx_bufs)
kfree(tp->tx_bufs);
if (rtl8129_debug > 0)
printk(KERN_ERR "%s: Couldn't allocate a %d byte receive ring.\n",
dev->name, RX_BUF_LEN);
return -ENOMEM;
}
rtl8129_init_ring(dev);
 
/* Check that the chip has finished the reset. */
for (i = 1000; i > 0; i--)
if ((inb(ioaddr + ChipCmd) & CmdReset) == 0)
break;
 
for (i = 0; i < 6; i++)
outb(dev->dev_addr[i], ioaddr + MAC0 + i);
 
/* Must enable Tx/Rx before setting transfer thresholds! */
outb(CmdRxEnb | CmdTxEnb, ioaddr + ChipCmd);
outl((RX_FIFO_THRESH << 13) | (RX_BUF_LEN_IDX << 11) | (RX_DMA_BURST<<8),
ioaddr + RxConfig);
outl((TX_DMA_BURST<<8)|0x03000000, ioaddr + TxConfig);
tp->tx_flag = (TX_FIFO_THRESH<<11) & 0x003f0000;
 
tp->full_duplex = tp->duplex_lock;
if (tp->phys[0] >= 0 || (rtl_cap_tbl[tp->chip_id] & HAS_MII_XCVR)) {
u16 mii_reg5 = mdio_read(dev, tp->phys[0], 5);
if (mii_reg5 == 0xffff)
; /* Not there */
else if ((mii_reg5 & 0x0100) == 0x0100
|| (mii_reg5 & 0x00C0) == 0x0040)
tp->full_duplex = 1;
if (rtl8129_debug > 1)
printk(KERN_INFO"%s: Setting %s%s-duplex based on"
" auto-negotiated partner ability %4.4x.\n", dev->name,
mii_reg5 == 0 ? "" :
(mii_reg5 & 0x0180) ? "100mbps " : "10mbps ",
tp->full_duplex ? "full" : "half", mii_reg5);
}
 
outb(0xC0, ioaddr + Cfg9346);
outb(tp->full_duplex ? 0x60 : 0x20, ioaddr + Config1);
outb(0x00, ioaddr + Cfg9346);
 
outl(virt_to_bus(tp->rx_ring), ioaddr + RxBuf);
 
/* Start the chip's Tx and Rx process. */
outl(0, ioaddr + RxMissed);
set_rx_mode(dev);
 
outb(CmdRxEnb | CmdTxEnb, ioaddr + ChipCmd);
 
dev->tbusy = 0;
dev->interrupt = 0;
dev->start = 1;
 
/* Enable all known interrupts by setting the interrupt mask. */
outw(PCIErr | PCSTimeout | RxUnderrun | RxOverflow | RxFIFOOver
| TxErr | TxOK | RxErr | RxOK, ioaddr + IntrMask);
 
if (rtl8129_debug > 1)
printk(KERN_DEBUG"%s: rtl8129_open() ioaddr %#lx IRQ %d"
" GP Pins %2.2x %s-duplex.\n",
dev->name, ioaddr, dev->irq, inb(ioaddr + GPPinData),
tp->full_duplex ? "full" : "half");
 
/* Set the timer to switch to check for link beat and perhaps switch
to an alternate media type. */
init_timer(&tp->timer);
tp->timer.expires = RUN_AT((24*HZ)/10); // 2.4 sec.
tp->timer.data = (unsigned long)dev;
tp->timer.function = &rtl8129_timer;
add_timer(&tp->timer);
 
return 0;
}
 
static void rtl8129_timer(unsigned long data)
{
struct device *dev = (struct device *)data;
struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
long ioaddr = dev->base_addr;
int next_tick = 60*HZ;
int mii_reg5 = mdio_read(dev, tp->phys[0], 5);
 
if (! tp->duplex_lock && mii_reg5 != 0xffff) {
int duplex = (mii_reg5&0x0100) || (mii_reg5 & 0x01C0) == 0x0040;
if (tp->full_duplex != duplex) {
tp->full_duplex = duplex;
printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
" partner ability of %4.4x.\n", dev->name,
tp->full_duplex ? "full" : "half", tp->phys[0], mii_reg5);
outb(0xC0, ioaddr + Cfg9346);
outb(tp->full_duplex ? 0x60 : 0x20, ioaddr + Config1);
outb(0x00, ioaddr + Cfg9346);
}
}
/* Check for bogusness. */
if (inw(ioaddr + IntrStatus) & (TxOK | RxOK)) {
int status = inw(ioaddr + IntrStatus);
if (status & (TxOK | RxOK)) { /* Double check */
printk(KERN_ERR "%s: RTL8139 Interrupt line blocked, status %x.\n",
dev->name, status);
rtl8129_interrupt(dev->irq, dev, 0);
}
}
if (dev->tbusy && jiffies - dev->trans_start >= 2*TX_TIMEOUT)
rtl8129_tx_timeout(dev);
 
#if 0
if (tp->twistie) {
unsigned int CSCRval = inw(ioaddr + CSCR); /* Read link status. */
if (tp->twistie == 1) {
if (CSCRval & CSCR_LinkOKBit) {
outw(CSCR_LinkDownOffCmd, ioaddr + CSCR);
tp->twistie = 2;
next_tick = HZ/10;
} else {
outw(CSCR_LinkDownCmd, ioaddr + CSCR);
outl(FIFOTMS_default,ioaddr + FIFOTMS);
outl(PARA78_default ,ioaddr + PARA78);
outl(PARA7c_default ,ioaddr + PARA7c);
tp->twistie = 0;
}
} else if (tp->twistie == 2) {
int linkcase = (CSCRval & CSCR_LinkStatusBits) >> 12;
int row;
if (linkcase >= 0x7000) row = 3;
else if (linkcase >= 0x3000) row = 2;
else if (linkcase >= 0x1000) row = 1;
else row = 0;
tp->twistie == row + 3;
outw(0,ioaddr+FIFOTMS);
outl(param[row][0], ioaddr+PARA7c);
tp->twist_cnt = 1;
} else {
outl(param[tp->twistie-3][tp->twist_cnt], ioaddr+PARA7c);
if (++tp->twist_cnt < 4) {
next_tick = HZ/10;
} else if (tp->twistie-3 == 3) {
if ((CSCRval & CSCR_LinkStatusBits) != 0x7000) {
outl(PARA7c_xxx, ioaddr+PARA7c);
next_tick = HZ/10; /* 100ms. */
outl(FIFOTMS_default, ioaddr+FIFOTMS);
outl(PARA78_default, ioaddr+PARA78);
outl(PARA7c_default, ioaddr+PARA7c);
tp->twistie == 3 + 3;
outw(0,ioaddr+FIFOTMS);
outl(param[3][0], ioaddr+PARA7c);
tp->twist_cnt = 1;
}
}
}
}
#endif
 
if (rtl8129_debug > 2) {
if (rtl_cap_tbl[tp->chip_id] & HAS_MII_XCVR)
printk(KERN_DEBUG"%s: Media selection tick, GP pins %2.2x.\n",
dev->name, inb(ioaddr + GPPinData));
else
printk(KERN_DEBUG"%s: Media selection tick, Link partner %4.4x.\n",
dev->name, inw(ioaddr + NWayLPAR));
printk(KERN_DEBUG"%s: Other registers are IntMask %4.4x IntStatus %4.4x"
" RxStatus %4.4x.\n",
dev->name, inw(ioaddr + IntrMask), inw(ioaddr + IntrStatus),
inl(ioaddr + RxEarlyStatus));
printk(KERN_DEBUG"%s: Chip config %2.2x %2.2x.\n",
dev->name, inb(ioaddr + Config0), inb(ioaddr + Config1));
}
 
tp->timer.expires = RUN_AT(next_tick);
add_timer(&tp->timer);
}
 
static void rtl8129_tx_timeout(struct device *dev)
{
struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
long ioaddr = dev->base_addr;
int mii_reg, i;
 
if (rtl8129_debug > 0)
printk(KERN_WARNING "%s: Transmit timeout, status %2.2x %4.4x "
"media %2.2x.\n",
dev->name, inb(ioaddr + ChipCmd), inw(ioaddr + IntrStatus),
inb(ioaddr + GPPinData));
 
/* Disable interrupts by clearing the interrupt mask. */
outw(0x0000, ioaddr + IntrMask);
/* Emit info to figure out what went wrong. */
printk("%s: Tx queue start entry %d dirty entry %d.\n",
dev->name, tp->cur_tx, tp->dirty_tx);
for (i = 0; i < NUM_TX_DESC; i++)
printk(KERN_DEBUG"%s: Tx descriptor %d is %8.8x.%s\n",
dev->name, i, inl(ioaddr + TxStatus0 + i*4),
i == tp->dirty_tx % NUM_TX_DESC ? " (queue head)" : "");
printk(KERN_DEBUG"%s: MII #%d registers are:", dev->name, tp->phys[0]);
for (mii_reg = 0; mii_reg < 8; mii_reg++)
printk(" %4.4x", mdio_read(dev, tp->phys[0], mii_reg));
printk(".\n");
 
/* Soft reset the chip. */
outb(CmdReset, ioaddr + ChipCmd);
/* Check that the chip has finished the reset. */
for (i = 1000; i > 0; i--)
if ((inb(ioaddr + ChipCmd) & CmdReset) == 0)
break;
for (i = 0; i < 6; i++)
outb(dev->dev_addr[i], ioaddr + MAC0 + i);
 
outb(0x00, ioaddr + Cfg9346);
tp->cur_rx = 0;
/* Must enable Tx/Rx before setting transfer thresholds! */
outb(CmdRxEnb | CmdTxEnb, ioaddr + ChipCmd);
outl((RX_FIFO_THRESH << 13) | (RX_BUF_LEN_IDX << 11) | (RX_DMA_BURST<<8),
ioaddr + RxConfig);
outl((TX_DMA_BURST<<8), ioaddr + TxConfig);
set_rx_mode(dev);
{ /* Save the unsent Tx packets. */
struct sk_buff *saved_skb[NUM_TX_DESC], *skb;
int j;
for (j = 0; tp->cur_tx - tp->dirty_tx > 0 ; j++, tp->dirty_tx++)
saved_skb[j] = tp->tx_skbuff[tp->dirty_tx % NUM_TX_DESC];
tp->dirty_tx = tp->cur_tx = 0;
 
for (i = 0; i < j; i++) {
skb = tp->tx_skbuff[i] = saved_skb[i];
if ((long)skb->data & 3) { /* Must use alignment buffer. */
memcpy(tp->tx_buf[i], skb->data, skb->len);
outl(virt_to_bus(tp->tx_buf[i]), ioaddr + TxAddr0 + i*4);
} else
outl(virt_to_bus(skb->data), ioaddr + TxAddr0 + i*4);
/* Note: the chip doesn't have auto-pad! */
outl(tp->tx_flag | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN),
ioaddr + TxStatus0 + i*4);
}
tp->cur_tx = i;
while (i < NUM_TX_DESC)
tp->tx_skbuff[i++] = 0;
if (tp->cur_tx - tp->dirty_tx < NUM_TX_DESC) {/* Typical path */
dev->tbusy = 0;
tp->tx_full = 0;
} else {
tp->tx_full = 1;
}
}
 
dev->trans_start = jiffies;
tp->stats.tx_errors++;
/* Enable all known interrupts by setting the interrupt mask. */
outw(PCIErr | PCSTimeout | RxUnderrun | RxOverflow | RxFIFOOver
| TxErr | TxOK | RxErr | RxOK, ioaddr + IntrMask);
return;
}
 
 
/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
static void
rtl8129_init_ring(struct device *dev)
{
struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
int i;
 
tp->tx_full = 0;
tp->cur_rx = 0;
tp->dirty_tx = tp->cur_tx = 0;
 
for (i = 0; i < NUM_TX_DESC; i++) {
tp->tx_skbuff[i] = 0;
tp->tx_buf[i] = &tp->tx_bufs[i*TX_BUF_SIZE];
}
}
 
static int
rtl8129_start_xmit(struct sk_buff *skb, struct device *dev)
{
struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
long ioaddr = dev->base_addr;
int entry;
 
/* Block a timer-based transmit from overlapping. This could better be
done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) {
if (jiffies - dev->trans_start >= TX_TIMEOUT)
rtl8129_tx_timeout(dev);
return 1;
}
 
/* Calculate the next Tx descriptor entry. */
entry = tp->cur_tx % NUM_TX_DESC;
 
tp->tx_skbuff[entry] = skb;
if ((long)skb->data & 3) { /* Must use alignment buffer. */
memcpy(tp->tx_buf[entry], skb->data, skb->len);
outl(virt_to_bus(tp->tx_buf[entry]), ioaddr + TxAddr0 + entry*4);
} else
outl(virt_to_bus(skb->data), ioaddr + TxAddr0 + entry*4);
/* Note: the chip doesn't have auto-pad! */
outl(tp->tx_flag | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN),
ioaddr + TxStatus0 + entry*4);
 
if (++tp->cur_tx - tp->dirty_tx < NUM_TX_DESC) { /* Typical path */
clear_bit(0, (void*)&dev->tbusy);
} else {
tp->tx_full = 1;
}
 
dev->trans_start = jiffies;
if (rtl8129_debug > 4)
printk(KERN_DEBUG"%s: Queued Tx packet at %p size %d to slot %d.\n",
dev->name, skb->data, (int)skb->len, entry);
 
return 0;
}
 
/* The interrupt handler does all of the Rx thread work and cleans up
after the Tx thread. */
static void rtl8129_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
{
struct device *dev = (struct device *)dev_instance;
struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
int boguscnt = max_interrupt_work;
int status, link_changed = 0;
long ioaddr = dev->base_addr;
 
#if defined(__i386__)
/* A lock to prevent simultaneous entry bug on Intel SMP machines. */
if (test_and_set_bit(0, (void*)&dev->interrupt)) {
printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
dev->name);
dev->interrupt = 0; /* Avoid halting machine. */
return;
}
#else
if (dev->interrupt) {
printk(KERN_ERR "%s: Re-entering the interrupt handler.\n", dev->name);
return;
}
dev->interrupt = 1;
#endif
 
do {
status = inw(ioaddr + IntrStatus);
/* Acknowledge all of the current interrupt sources ASAP, but
an first get an additional status bit from CSCR. */
if ((status & RxUnderrun) && inw(ioaddr+CSCR) & CSCR_LinkChangeBit)
link_changed = 1;
outw(status, ioaddr + IntrStatus);
 
if (rtl8129_debug > 4)
printk(KERN_DEBUG"%s: interrupt status=%#4.4x new intstat=%#4.4x.\n",
dev->name, status, inw(ioaddr + IntrStatus));
 
if ((status & (PCIErr|PCSTimeout|RxUnderrun|RxOverflow|RxFIFOOver
|TxErr|TxOK|RxErr|RxOK)) == 0)
break;
 
if (status & (RxOK|RxUnderrun|RxOverflow|RxFIFOOver))/* Rx interrupt */
rtl8129_rx(dev);
 
if (status & (TxOK | TxErr)) {
unsigned int dirty_tx;
 
for (dirty_tx = tp->dirty_tx; dirty_tx < tp->cur_tx; dirty_tx++) {
int entry = dirty_tx % NUM_TX_DESC;
int txstatus = inl(ioaddr + TxStatus0 + entry*4);
 
if ( ! (txstatus & (TxStatOK | TxUnderrun | TxAborted)))
break; /* It still hasn't been Txed */
 
/* Note: TxCarrierLost is always asserted at 100mbps. */
if (txstatus & (TxOutOfWindow | TxAborted)) {
/* There was an major error, log it. */
if (rtl8129_debug > 1)
printk(KERN_NOTICE"%s: Transmit error, Tx status %8.8x.\n",
dev->name, txstatus);
tp->stats.tx_errors++;
if (txstatus&TxAborted) {
tp->stats.tx_aborted_errors++;
outl((TX_DMA_BURST<<8)|0x03000001, ioaddr + TxConfig);
}
if (txstatus&TxCarrierLost) tp->stats.tx_carrier_errors++;
if (txstatus&TxOutOfWindow) tp->stats.tx_window_errors++;
#ifdef ETHER_STATS
if ((txstatus & 0x0f000000) == 0x0f000000)
tp->stats.collisions16++;
#endif
} else {
if (txstatus & TxUnderrun) {
/* Add 64 to the Tx FIFO threshold. */
if (tp->tx_flag < 0x00300000)
tp->tx_flag += 0x00020000;
tp->stats.tx_fifo_errors++;
}
tp->stats.collisions += (txstatus >> 24) & 15;
#if LINUX_VERSION_CODE > 0x20119
tp->stats.tx_bytes += txstatus & 0x7ff;
#endif
tp->stats.tx_packets++;
}
 
/* Free the original skb. */
dev_free_skb(tp->tx_skbuff[entry]);
tp->tx_skbuff[entry] = 0;
if (tp->tx_full) {
/* The ring is no longer full, clear tbusy. */
tp->tx_full = 0;
clear_bit(0, (void*)&dev->tbusy);
mark_bh(NET_BH);
}
}
 
#ifndef final_version
if (tp->cur_tx - dirty_tx > NUM_TX_DESC) {
printk(KERN_ERR"%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
dev->name, dirty_tx, tp->cur_tx, tp->tx_full);
dirty_tx += NUM_TX_DESC;
}
#endif
tp->dirty_tx = dirty_tx;
}
 
/* Check uncommon events with one test. */
if (status & (PCIErr|PCSTimeout |RxUnderrun|RxOverflow|RxFIFOOver
|TxErr|RxErr)) {
if (rtl8129_debug > 2)
printk(KERN_NOTICE"%s: Abnormal interrupt, status %8.8x.\n",
dev->name, status);
 
if (status == 0xffffffff)
break;
/* Update the error count. */
tp->stats.rx_missed_errors += inl(ioaddr + RxMissed);
outl(0, ioaddr + RxMissed);
 
if ((status & RxUnderrun) && link_changed &&
(rtl_cap_tbl[tp->chip_id] & HAS_LNK_CHNG)) {
/* Really link-change on new chips. */
int lpar = inw(ioaddr + NWayLPAR);
int duplex = (lpar&0x0100)||(lpar & 0x01C0) == 0x0040;
if (tp->full_duplex != duplex) {
tp->full_duplex = duplex;
outb(0xC0, ioaddr + Cfg9346);
outb(tp->full_duplex ? 0x60 : 0x20, ioaddr + Config1);
outb(0x00, ioaddr + Cfg9346);
}
status &= ~RxUnderrun;
}
if (status & (RxUnderrun | RxOverflow | RxErr | RxFIFOOver))
tp->stats.rx_errors++;
 
if (status & (PCSTimeout)) tp->stats.rx_length_errors++;
if (status & (RxUnderrun|RxFIFOOver)) tp->stats.rx_fifo_errors++;
 
if (status & RxFIFOOver) {
tp->cur_rx = 0;
outb(CmdTxEnb, ioaddr + ChipCmd);
outb(CmdRxEnb | CmdTxEnb, ioaddr + ChipCmd);
outl((RX_FIFO_THRESH << 13) | (RX_BUF_LEN_IDX << 11) |
(RX_DMA_BURST<<8), ioaddr + RxConfig);
}
 
if (status & RxOverflow) {
tp->stats.rx_over_errors++;
tp->cur_rx = inw(ioaddr + RxBufAddr) % RX_BUF_LEN;
outw(tp->cur_rx - 16, ioaddr + RxBufPtr);
}
if (status & PCIErr) {
u32 pci_cmd_status;
pci20to26_read_config_dword(tp->pci_bus, tp->pci_devfn,
PCI_COMMAND, &pci_cmd_status);
 
printk(KERN_ERR "%s: PCI Bus error %4.4x.\n",
dev->name, pci_cmd_status);
}
}
if (--boguscnt < 0) {
printk(KERN_WARNING"%s: Too much work at interrupt, "
"IntrStatus=0x%4.4x.\n",
dev->name, status);
/* Clear all interrupt sources. */
outw(0xffff, ioaddr + IntrStatus);
break;
}
} while (1);
 
if (rtl8129_debug > 3)
printk(KERN_DEBUG"%s: exiting interrupt, intr_status=%#4.4x.\n",
dev->name, inl(ioaddr + IntrStatus));
 
#if defined(__i386__)
clear_bit(0, (void*)&dev->interrupt);
#else
dev->interrupt = 0;
#endif
return;
}
 
/* The data sheet doesn't describe the Rx ring at all, so I'm guessing at the
field alignments and semantics. */
static int rtl8129_rx(struct device *dev)
{
struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
long ioaddr = dev->base_addr;
unsigned char *rx_ring = tp->rx_ring;
u16 cur_rx = tp->cur_rx;
 
if (rtl8129_debug > 4)
printk(KERN_DEBUG"%s: In rtl8129_rx(), current %4.4x BufAddr %4.4x,"
" free to %4.4x, Cmd %2.2x.\n",
dev->name, cur_rx, inw(ioaddr + RxBufAddr),
inw(ioaddr + RxBufPtr), inb(ioaddr + ChipCmd));
 
while ((inb(ioaddr + ChipCmd) & 1) == 0) {
int ring_offset = cur_rx % RX_BUF_LEN;
u32 rx_status = le32_to_cpu(*(u32*)(rx_ring + ring_offset));
int rx_size = rx_status >> 16;
 
if (rtl8129_debug > 4) {
int i;
printk(KERN_DEBUG"%s: rtl8129_rx() status %4.4x, size %4.4x, cur %4.4x.\n",
dev->name, rx_status, rx_size, cur_rx);
printk(KERN_DEBUG"%s: Frame contents ", dev->name);
for (i = 0; i < 70; i++)
printk(" %2.2x", le32_to_cpu(rx_ring[ring_offset + i]));
printk(".\n");
}
if (rx_status & RxTooLong) {
if (rtl8129_debug > 0)
printk(KERN_NOTICE"%s: Oversized Ethernet frame, status %4.4x!\n",
dev->name, rx_status);
tp->stats.rx_length_errors++;
} else if (rx_status &
(RxBadSymbol|RxRunt|RxTooLong|RxCRCErr|RxBadAlign)) {
if (rtl8129_debug > 1)
printk(KERN_DEBUG"%s: Ethernet frame had errors,"
" status %4.4x.\n", dev->name, rx_status);
tp->stats.rx_errors++;
if (rx_status & (RxBadSymbol|RxBadAlign))
tp->stats.rx_frame_errors++;
if (rx_status & (RxRunt|RxTooLong)) tp->stats.rx_length_errors++;
if (rx_status & RxCRCErr) tp->stats.rx_crc_errors++;
/* Reset the receiver, based on RealTek recommendation. (Bug?) */
tp->cur_rx = 0;
outb(CmdTxEnb, ioaddr + ChipCmd);
outb(CmdRxEnb | CmdTxEnb, ioaddr + ChipCmd);
outl((RX_FIFO_THRESH << 13) | (RX_BUF_LEN_IDX << 11) |
(RX_DMA_BURST<<8), ioaddr + RxConfig);
} else {
/* Malloc up new buffer, compatible with net-2e. */
/* Omit the four octet CRC from the length. */
struct sk_buff *skb;
 
skb = dev_alloc_skb(rx_size + 2);
if (skb == NULL) {
printk(KERN_WARNING"%s: Memory squeeze, deferring packet.\n",
dev->name);
/* We should check that some rx space is free.
If not, free one and mark stats->rx_dropped++. */
tp->stats.rx_dropped++;
break;
}
skb->dev = dev;
skb_reserve(skb, 2); /* 16 byte align the IP fields. */
if (ring_offset+rx_size+4 > RX_BUF_LEN) {
int semi_count = RX_BUF_LEN - ring_offset - 4;
memcpy(skb_put(skb, semi_count), &rx_ring[ring_offset + 4],
semi_count);
memcpy(skb_put(skb, rx_size-semi_count), rx_ring,
rx_size-semi_count);
if (rtl8129_debug > 4) {
int i;
printk(KERN_DEBUG"%s: Frame wrap @%d",
dev->name, semi_count);
for (i = 0; i < 16; i++)
printk(" %2.2x", le32_to_cpu(rx_ring[i]));
printk(".\n");
memset(rx_ring, 0xcc, 16);
}
} else {
#if 0 /* USE_IP_COPYSUM */
eth_copy_and_sum(skb, &rx_ring[ring_offset + 4],
rx_size, 0);
skb_put(skb, rx_size);
#else
memcpy(skb_put(skb, rx_size), &rx_ring[ring_offset + 4],
rx_size);
#endif
}
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
#if LINUX_VERSION_CODE > 0x20119
tp->stats.rx_bytes += rx_size;
#endif
tp->stats.rx_packets++;
}
 
cur_rx = (cur_rx + rx_size + 4 + 3) & ~3;
outw(cur_rx - 16, ioaddr + RxBufPtr);
}
if (rtl8129_debug > 4)
printk(KERN_DEBUG"%s: Done rtl8129_rx(), current %4.4x BufAddr %4.4x,"
" free to %4.4x, Cmd %2.2x.\n",
dev->name, cur_rx, inw(ioaddr + RxBufAddr),
inw(ioaddr + RxBufPtr), inb(ioaddr + ChipCmd));
tp->cur_rx = cur_rx;
return 0;
}
 
static int
rtl8129_close(struct device *dev)
{
long ioaddr = dev->base_addr;
struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
int i;
 
dev->start = 0;
dev->tbusy = 1;
 
if (rtl8129_debug > 1)
printk(KERN_DEBUG"%s: Shutting down ethercard, status was 0x%4.4x.\n",
dev->name, inw(ioaddr + IntrStatus));
 
/* Disable interrupts by clearing the interrupt mask. */
outw(0x0000, ioaddr + IntrMask);
 
/* Stop the chip's Tx and Rx DMA processes. */
outb(0x00, ioaddr + ChipCmd);
 
/* Update the error counts. */
tp->stats.rx_missed_errors += inl(ioaddr + RxMissed);
outl(0, ioaddr + RxMissed);
 
del_timer(&tp->timer);
 
free_irq(dev->irq, dev);
 
for (i = 0; i < NUM_TX_DESC; i++) {
if (tp->tx_skbuff[i])
dev_free_skb(tp->tx_skbuff[i]);
tp->tx_skbuff[i] = 0;
}
kfree(tp->rx_ring);
kfree(tp->tx_bufs);
 
/* Green! Put the chip in low-power mode. */
outb(0xC0, ioaddr + Cfg9346);
outb(0x03, ioaddr + Config1);
outb('H', ioaddr + HltClk); /* 'R' would leave the clock running. */
 
MOD_DEC_USE_COUNT;
 
return 0;
}
 
static int mii_ioctl(struct device *dev, struct ifreq *rq, int cmd)
{
struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
u16 *data = (u16 *)&rq->ifr_data;
 
switch(cmd) {
case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
data[0] = tp->phys[0] & 0x3f;
/* Fall Through */
case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
data[3] = mdio_read(dev, data[0], data[1] & 0x1f);
return 0;
case SIOCDEVPRIVATE+2: /* Write the specified MII register */
if (!capable(CAP_NET_ADMIN))
return -EPERM;
mdio_write(dev, data[0], data[1] & 0x1f, data[2]);
return 0;
default:
return -EOPNOTSUPP;
}
}
 
static struct enet_statistics *
rtl8129_get_stats(struct device *dev)
{
struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
long ioaddr = dev->base_addr;
 
if (dev->start) {
tp->stats.rx_missed_errors += inl(ioaddr + RxMissed);
outl(0, ioaddr + RxMissed);
}
 
return &tp->stats;
}
 
/* Set or clear the multicast filter for this adaptor.
This routine is not state sensitive and need not be SMP locked. */
 
static unsigned const ethernet_polynomial = 0x04c11db7U;
static inline u32 ether_crc(int length, unsigned char *data)
{
int crc = -1;
 
while (--length >= 0) {
unsigned char current_octet = *data++;
int bit;
for (bit = 0; bit < 8; bit++, current_octet >>= 1)
crc = (crc << 1) ^
((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0);
}
return crc;
}
 
/* Bits in RxConfig. */
enum rx_mode_bits {
AcceptErr=0x20, AcceptRunt=0x10, AcceptBroadcast=0x08,
AcceptMulticast=0x04, AcceptMyPhys=0x02, AcceptAllPhys=0x01,
};
 
static void set_rx_mode(struct device *dev)
{
long ioaddr = dev->base_addr;
u32 mc_filter[2]; /* Multicast hash filter */
int i, rx_mode;
 
if (rtl8129_debug > 3)
printk(KERN_DEBUG"%s: set_rx_mode(%4.4x) done -- Rx config %8.8x.\n",
dev->name, dev->flags, inl(ioaddr + RxConfig));
 
/* Note: do not reorder, GCC is clever about common statements. */
if (dev->flags & IFF_PROMISC) {
/* Unconditionally log net taps. */
printk(KERN_NOTICE"%s: Promiscuous mode enabled.\n", dev->name);
rx_mode = AcceptBroadcast|AcceptMulticast|AcceptMyPhys|AcceptAllPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
} else if ((dev->mc_count > multicast_filter_limit)
|| (dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
} else {
struct dev_mc_list *mclist;
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
i++, mclist = mclist->next)
set_bit(ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26, mc_filter);
}
/* We can safely update without stopping the chip. */
outb(rx_mode, ioaddr + RxConfig);
outl(mc_filter[0], ioaddr + MAR0 + 0);
outl(mc_filter[1], ioaddr + MAR0 + 4);
return;
}
#ifdef MODULE
int init_module(void)
{
return rtl8139_probe(0);
}
 
void
cleanup_module(void)
{
struct device *next_dev;
 
/* No need to check MOD_IN_USE, as sys_delete_module() checks. */
while (root_rtl8129_dev) {
struct rtl8129_private *tp =
(struct rtl8129_private *)root_rtl8129_dev->priv;
next_dev = tp->next_module;
unregister_netdev(root_rtl8129_dev);
release_region(root_rtl8129_dev->base_addr,
pci_tbl[tp->chip_id].io_size);
kfree(tp);
kfree(root_rtl8129_dev);
root_rtl8129_dev = next_dev;
}
}
 
#endif /* MODULE */
/*
* Local variables:
* compile-command: "gcc -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c rtl8139.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
* SMP-compile-command: "gcc -D__SMP__ -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c rtl8139.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
* c-indent-level: 4
* c-basic-offset: 4
* tab-width: 4
* End:
*/
/shark/trunk/drivers/net/net.c
0,0 → 1,100
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: net.c,v 1.5 2004-05-11 14:30:49 giacomo Exp $
 
File: $File$
Revision: $Revision: 1.5 $
Last update: $Date: 2004-05-11 14:30:49 $
------------
**/
 
/*
* Copyright (C) 2000 Luca Abeni
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
/* Author: Luca Abeni */
/* Date: 4/12/1997 */
 
/* File: net.C */
/* Revision: 1.00 */
 
/*
This module provides an interface function eth_init that initialize
all the network library layers (LowLev, Ethrnet, IP,...) according
to the values passed in a net_model structure
*/
 
#include <kernel/kern.h>
#include <drivers/net.h>
#include "eth_priv.h"
 
//#define DEBUG_NET
 
/* OKKIO!!!!! net_base must change if you change NET_MAX_PROTOCOLS!!!! */
struct net_model net_base = {0, 0, {{NULL, NULL}, {NULL, NULL}, {NULL, NULL}, {NULL, NULL}, {NULL, NULL},
{NULL, NULL}, {NULL, NULL}, {NULL, NULL}, {NULL, NULL}, {NULL, NULL}}};
 
void net_setprotocol(struct net_model *m, void (*initfun)(void *parm), void *initparms)
{
int i;
 
i = m->numprotocol++;
m->protocol[i].initfun = initfun;
m->protocol[i].initparms = initparms;
}
 
int net_init(struct net_model *m)
{
int i;
int app;
 
/* First, initialize the base layers: eth & lowlev */
if ((app = eth_init(m->lowlevparm,NULL)) != 1) {
return app;
}
 
/* Then, the high level layers */
for(i = 0; i < m->numprotocol; i++) {
#ifdef DEBUG_NET
printk(KERN_DEBUG "Protocol %d init.\n", i);
#endif
m->protocol[i].initfun(m->protocol[i].initparms);
}
 
return 1;
}
/shark/trunk/drivers/net/arp.h
0,0 → 1,95
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: arp.h,v 1.4 2004-05-11 14:30:48 giacomo Exp $
 
File: $File$
Revision: $Revision: 1.4 $
Last update: $Date: 2004-05-11 14:30:48 $
------------
**/
 
/*
* Copyright (C) 2000 Luca Abeni
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
 
 
/* Author: Luca Abeni */
/* Date: 4/12/1997 */
 
/* File: ARP.H */
/* Revision: 2.0 */
 
#ifndef __ARP_H__
#define __ARP_H__
 
#include "ll/sys/cdefs.h"
 
__BEGIN_DECLS
 
#define ARP_IP_TYPE 0x0800
#define ARP_ETH_TYPE 1
 
#define ARP_REQUEST 1
#define ARP_REPLY 2
 
#define ARP_MAX_ENTRIES 50
#define ARP_MAX_USED 99
 
//#define ARP_ERROR_BASE (ETH_ERROR_BASE + 10)
//#define ARP_TABLE_FULL (ARP_ERROR_BASE + 0)
 
 
typedef struct arp_table{
IP_ADDR ip;
struct eth_addr eth;
int used;
int time;
int ntrans;
void *pkt;
BYTE valid;
} ARP_TABLE;
 
void arp_send(void *pkt, IP_ADDR dest, int len);
void arp_init(char *localAddr);
int arp_req(IP_ADDR dest);
void arp_sendRequest(int i);
 
__BEGIN_DECLS
 
#endif
/shark/trunk/drivers/net/netbuff.c
0,0 → 1,169
/*
* Project: S.Ha.R.K.
*
* Coordinators:
* Giorgio Buttazzo <giorgio@sssup.it>
* Paolo Gai <pj@gandalf.sssup.it>
*
* Authors :
* Paolo Gai <pj@gandalf.sssup.it>
* Massimiliano Giorgi <massy@gandalf.sssup.it>
* Luca Abeni <luca@gandalf.sssup.it>
* (see the web pages for full authors list)
*
* ReTiS Lab (Scuola Superiore S.Anna - Pisa - Italy)
*
* http://www.sssup.it
* http://retis.sssup.it
* http://shark.sssup.it
*/
 
/**
------------
CVS : $Id: netbuff.c,v 1.3 2004-05-11 14:30:50 giacomo Exp $
 
File: $File$
Revision: $Revision: 1.3 $
Last update: $Date: 2004-05-11 14:30:50 $
------------
**/
 
/*
* Copyright (C) 2000 Luca Abeni
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
 
/* Author: Luca Abeni */
/* Date: 25/9/1997 */
 
/* File: NetBuff.c */
/* Revision: 1.00 */
 
/*
This module manages all the buffers used by the network driver:
the buffers are grouped in netbuff structures (initialized by
netbuff_init) and can be allocated by netbuff_get or released
by netbuff_released. A netbuff_sequentialget function is also
provided for beeing used by tasks that have their own buffer (
if a buffer can be allocated by only one task, it is useless to
allocate it in mutual exclusion). It is usefull for the receive
buffers allocated by the receive process.
*/
#include <kernel/kern.h>
#include <semaphore.h>
 
#include "netbuff.h"
 
/* Init the buffer structures */
void netbuff_init(struct netbuff *netb, BYTE nbuffs, WORD buffdim)
{
int i;
 
kern_cli();
netb->b = kern_alloc(nbuffs * buffdim);
netb->pb = kern_alloc(nbuffs * sizeof(void *));
netb->free = kern_alloc(nbuffs * sizeof(char));
kern_sti();
if ((netb->b == 0) || (netb->pb ==0) || (netb->free == 0)) {
kern_raise(XNETBUFF_INIT_EXC,exec_shadow);
}
netb->nbuffs = nbuffs;
for (i = 0; i < nbuffs; i++) {
netb->pb[i] = netb->b + (i * buffdim);
netb->free[i] = 1;
}
sem_init(&(netb->buffersem), 0, nbuffs);
}
 
/*
Get the first free buffer in the netb pool. All the tasks can call this
function, so a kern_cli is needed
*/
void *netbuff_get(struct netbuff *netb, BYTE to)
{
int i, done;
static int mycount = 0;
 
if (sem_xwait(&(netb->buffersem), 1, to) != 0) {
return NULL;
} else {
mycount++;
}
 
done = 0; i = 0;
kern_cli();
while ((i < netb->nbuffs) && !done) {
if (netb->free[i]) {
done = 1;
netb->free[i] = 0;
} else i++;
}
kern_sti();
if (!done) {
kern_raise(XNETBUFF_GET_EXC,exec_shadow);
}
return netb->pb[i];
}
 
/*
Get the first free buffer in the netb pool. This function can be called
only if netb is private of the process that wants to get the buffer, so
kern_cli is not needed
*/
void *netbuff_sequentialget(struct netbuff *netb, BYTE to)
{
int i, done;
 
if (!sem_xwait(&(netb->buffersem), 1, to)) {
return NULL;
}
 
done = 0; i = 0;
while ((i < netb->nbuffs) && !done) {
if (netb->free[i]) {
done = 1;
netb->free[i] = 0;
} else i++;
}
if (!done) {
kern_raise(XNETBUFF_GET_EXC,exec_shadow);
}
return netb->pb[i];
}
 
/* Release the buffer m of the pool netb */
void netbuff_release(struct netbuff *netb, void *m)
{
int i, done;
 
done = 0; i = 0;
while ((i < netb->nbuffs) && !done) {
if (netb->pb[i] == m) {
if (netb->free[i] == 1) {
cprintf("Trying to free a free buffer :( \n");
kern_raise(XNETBUFF_ALREADYFREE_EXC,exec_shadow);
}
done = 1;
netb->free[i] = 1;
} else i++;
}
if (!done) {
cprintf("Trying to free a STRANGE buffer :( \n");
kern_raise(XNETBUFF_RELEASE_EXC,exec_shadow);
}
sem_post(&(netb->buffersem));
}
/shark/trunk/drivers/net/misc.c
0,0 → 1,10
#include <string.h>
#include <kernel/kern.h>
 
void panic_stub(void)
{
cprintf("Panic: stub called!!!\n");
 
sys_abort(200); /* Just a number... */
}
 
/shark/trunk/drivers/net/eepro100.c
0,0 → 1,1678
/* drivers/net/eepro100.c: An Intel i82557 Ethernet driver for Linux. */
/*
NOTICE: this version tested with kernels 1.3.72 and later only!
Written 1996-1998 by Donald Becker.
 
This software may be used and distributed according to the terms
of the GNU Public License, incorporated herein by reference.
 
This driver is for the Intel EtherExpress Pro 100B boards.
It should work with other i82557 and i82558 boards.
To use a built-in driver, install as drivers/net/eepro100.c.
To use as a module, use the compile-command at the end of the file.
 
The author may be reached as becker@CESDIS.usra.edu, or C/O
Center of Excellence in Space Data and Information Sciences
Code 930.5, NASA Goddard Space Flight Center, Greenbelt MD 20771
For updates see
http://cesdis.gsfc.nasa.gov/linux/drivers/eepro100.html
There is also a mailing list based at
linux-eepro100@cesdis.gsfc.nasa.gov
*/
 
static const char *version =
"eepro100.c:v1.05 10/16/98 Donald Becker http://cesdis.gsfc.nasa.gov/linux/drivers/eepro100.html\n";
 
/* A few user-configurable values that apply to all boards.
First set are undocumented and spelled per Intel recommendations. */
 
static int congenb = 0; /* Enable congestion control in the DP83840. */
static int txfifo = 8; /* Tx FIFO threshold in 4 byte units, 0-15 */
static int rxfifo = 8; /* Rx FIFO threshold, default 32 bytes. */
/* Tx/Rx DMA burst length, 0-127, 0 == no preemption, tx==128 -> disabled. */
static int txdmacount = 128;
static int rxdmacount = 0;
 
/* Set the copy breakpoint for the copy-only-tiny-buffer Rx method.
Lower values use more memory, but are faster. */
static int rx_copybreak = 200;
 
/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
static int max_interrupt_work = 20;
 
/* Maximum number of multicast addresses to filter (vs. rx-all-multicast) */
static int multicast_filter_limit = 64;
 
#include <linux/config.h>
#ifdef MODULE
#ifdef MODVERSIONS
#include <linux/modversions.h>
#endif
#include <linux/module.h>
#else
#define MOD_INC_USE_COUNT
#define MOD_DEC_USE_COUNT
#endif
 
#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/malloc.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#if LINUX_VERSION_CODE < 0x20155
#include <linux/bios32.h> /* Ignore the bogus warning in 2.1.100+ */
#endif
#include <asm/bitops.h>
#include <asm/io.h>
 
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/delay.h>
 
/* Unused in the 2.0.* version, but retained for documentation. */
#if LINUX_VERSION_CODE > 0x20118 && defined(MODULE)
MODULE_AUTHOR("Donald Becker <becker@cesdis.gsfc.nasa.gov>");
MODULE_DESCRIPTION("Intel i82557/i82558 PCI EtherExpressPro driver");
MODULE_PARM(debug, "i");
MODULE_PARM(options, "1-" __MODULE_STRING(8) "i");
MODULE_PARM(full_duplex, "1-" __MODULE_STRING(8) "i");
MODULE_PARM(congenb, "i");
MODULE_PARM(txfifo, "i");
MODULE_PARM(rxfifo, "i");
MODULE_PARM(txdmacount, "i");
MODULE_PARM(rxdmacount, "i");
MODULE_PARM(rx_copybreak, "i");
MODULE_PARM(max_interrupt_work, "i");
MODULE_PARM(multicast_filter_limit, "i");
#endif
 
#define RUN_AT(x) (jiffies + (x))
 
#if (LINUX_VERSION_CODE < 0x20123)
#define test_and_set_bit(val, addr) set_bit(val, addr)
#endif
#if LINUX_VERSION_CODE < 0x20159
#define dev_free_skb(skb) dev_kfree_skb(skb, FREE_WRITE);
#else
#define dev_free_skb(skb) dev_kfree_skb(skb);
#endif
 
/* The total I/O port extent of the board.
The registers beyond 0x18 only exist on the i82558. */
#define SPEEDO3_TOTAL_SIZE 0x20
 
int speedo_debug = 0;
 
/*
Theory of Operation
 
I. Board Compatibility
 
This device driver is designed for the Intel i82557 "Speedo3" chip, Intel's
single-chip fast Ethernet controller for PCI, as used on the Intel
EtherExpress Pro 100 adapter.
 
II. Board-specific settings
 
PCI bus devices are configured by the system at boot time, so no jumpers
need to be set on the board. The system BIOS should be set to assign the
PCI INTA signal to an otherwise unused system IRQ line. While it's
possible to share PCI interrupt lines, it negatively impacts performance and
only recent kernels support it.
 
III. Driver operation
 
IIIA. General
The Speedo3 is very similar to other Intel network chips, that is to say
"apparently designed on a different planet". This chips retains the complex
Rx and Tx descriptors and multiple buffers pointers as previous chips, but
also has simplified Tx and Rx buffer modes. This driver uses the "flexible"
Tx mode, but in a simplified lower-overhead manner: it associates only a
single buffer descriptor with each frame descriptor.
 
Despite the extra space overhead in each receive skbuff, the driver must use
the simplified Rx buffer mode to assure that only a single data buffer is
associated with each RxFD. The driver implements this by reserving space
for the Rx descriptor at the head of each Rx skbuff.
 
The Speedo-3 has receive and command unit base addresses that are added to
almost all descriptor pointers. The driver sets these to zero, so that all
pointer fields are absolute addresses.
 
The System Control Block (SCB) of some previous Intel chips exists on the
chip in both PCI I/O and memory space. This driver uses the I/O space
registers, but might switch to memory mapped mode to better support non-x86
processors.
 
IIIB. Transmit structure
 
The driver must use the complex Tx command+descriptor mode in order to
have a indirect pointer to the skbuff data section. Each Tx command block
(TxCB) is associated with two immediately appended Tx Buffer Descriptor
(TxBD). A fixed ring of these TxCB+TxBD pairs are kept as part of the
speedo_private data structure for each adapter instance.
 
The newer i82558 explicitly supports this structure, and can read the two
TxBDs in the same PCI burst as the TxCB.
 
This ring structure is used for all normal transmit packets, but the
transmit packet descriptors aren't long enough for most non-Tx commands such
as CmdConfigure. This is complicated by the possibility that the chip has
already loaded the link address in the previous descriptor. So for these
commands we convert the next free descriptor on the ring to a NoOp, and point
that descriptor's link to the complex command.
 
An additional complexity of these non-transmit commands are that they may be
added asynchronous to the normal transmit queue, so we disable interrupts
whenever the Tx descriptor ring is manipulated.
 
A notable aspect of these special configure commands is that they do
work with the normal Tx ring entry scavenge method. The Tx ring scavenge
is done at interrupt time using the 'dirty_tx' index, and checking for the
command-complete bit. While the setup frames may have the NoOp command on the
Tx ring marked as complete, but not have completed the setup command, this
is not a problem. The tx_ring entry can be still safely reused, as the
tx_skbuff[] entry is always empty for config_cmd and mc_setup frames.
 
Commands may have bits set e.g. CmdSuspend in the command word to either
suspend or stop the transmit/command unit. This driver always flags the last
command with CmdSuspend, erases the CmdSuspend in the previous command, and
then issues a CU_RESUME.
Note: Watch out for the potential race condition here: imagine
erasing the previous suspend
the chip processes the previous command
the chip processes the final command, and suspends
doing the CU_RESUME
the chip processes the next-yet-valid post-final-command.
So blindly sending a CU_RESUME is only safe if we do it immediately after
after erasing the previous CmdSuspend, without the possibility of an
intervening delay. Thus the resume command is always within the
interrupts-disabled region. This is a timing dependence, but handling this
condition in a timing-independent way would considerably complicate the code.
 
Note: In previous generation Intel chips, restarting the command unit was a
notoriously slow process. This is presumably no longer true.
 
IIIC. Receive structure
 
Because of the bus-master support on the Speedo3 this driver uses the new
SKBUFF_RX_COPYBREAK scheme, rather than a fixed intermediate receive buffer.
This scheme allocates full-sized skbuffs as receive buffers. The value
SKBUFF_RX_COPYBREAK is used as the copying breakpoint: it is chosen to
trade-off the memory wasted by passing the full-sized skbuff to the queue
layer for all frames vs. the copying cost of copying a frame to a
correctly-sized skbuff.
 
For small frames the copying cost is negligible (esp. considering that we
are pre-loading the cache with immediately useful header information), so we
allocate a new, minimally-sized skbuff. For large frames the copying cost
is non-trivial, and the larger copy might flush the cache of useful data, so
we pass up the skbuff the packet was received into.
 
IIID. Synchronization
The driver runs as two independent, single-threaded flows of control. One
is the send-packet routine, which enforces single-threaded use by the
dev->tbusy flag. The other thread is the interrupt handler, which is single
threaded by the hardware and other software.
 
The send packet thread has partial control over the Tx ring and 'dev->tbusy'
flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
queue slot is empty, it clears the tbusy flag when finished otherwise it sets
the 'sp->tx_full' flag.
 
The interrupt handler has exclusive control over the Rx ring and records stats
from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
we can't avoid the interrupt overhead by having the Tx routine reap the Tx
stats.) After reaping the stats, it marks the queue entry as empty by setting
the 'base' to zero. Iff the 'sp->tx_full' flag is set, it clears both the
tx_full and tbusy flags.
 
IV. Notes
 
Thanks to Steve Williams of Intel for arranging the non-disclosure agreement
that stated that I could disclose the information. But I still resent
having to sign an Intel NDA when I'm helping Intel sell their own product!
 
*/
 
/* Added by Nino - Begin */
extern int pci20to26_find_class(unsigned int class_code, int index, BYTE *bus, BYTE *dev);
extern int pci20to26_read_config_byte(unsigned int bus, unsigned int dev, int where, u8 *val);
extern int pci20to26_read_config_word(unsigned int bus, unsigned int dev, int where, u16 *val);
extern int pci20to26_read_config_dword(unsigned int bus, unsigned int dev, int where, u32 *val);
extern int pci20to26_write_config_byte(unsigned int bus, unsigned int dev, int where, u8 val);
extern int pci20to26_write_config_word(unsigned int bus, unsigned int dev, int where, u16 val);
extern int pci20to26_write_config_dword(unsigned int bus, unsigned int dev, int where, u32 val);
/* Added by Nino - End */
 
 
/* A few values that may be tweaked. */
/* The ring sizes should be a power of two for efficiency. */
#define TX_RING_SIZE 16 /* Effectively 2 entries fewer. */
#define RX_RING_SIZE 16
/* Size of an pre-allocated Rx buffer: <Ethernet MTU> + slack.*/
#define PKT_BUF_SZ 1536
 
/* Time in jiffies before concluding the transmitter is hung. */
#define TX_TIMEOUT ((800*HZ)/1000)
 
/* How to wait for the command unit to accept a command.
Typically this takes 0 ticks. */
static inline void wait_for_cmd_done(long cmd_ioaddr)
{
int wait = 100;
do ;
while(inb(cmd_ioaddr) && --wait >= 0);
}
 
/* Operational parameter that usually are not changed. */
 
/* The rest of these values should never change. */
 
/* Offsets to the various registers.
All accesses need not be longword aligned. */
enum speedo_offsets {
SCBStatus = 0, SCBCmd = 2, /* Rx/Command Unit command and status. */
SCBPointer = 4, /* General purpose pointer. */
SCBPort = 8, /* Misc. commands and operands. */
SCBflash = 12, SCBeeprom = 14, /* EEPROM and flash memory control. */
SCBCtrlMDI = 16, /* MDI interface control. */
SCBEarlyRx = 20, /* Early receive byte count. */
};
/* Commands that can be put in a command list entry. */
enum commands {
CmdNOp = 0, CmdIASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7,
CmdSuspend = 0x4000, /* Suspend after completion. */
CmdIntr = 0x2000, /* Interrupt after completion. */
CmdTxFlex = 0x0008, /* Use "Flexible mode" for CmdTx command. */
};
 
/* The SCB accepts the following controls for the Tx and Rx units: */
#define CU_START 0x0010
#define CU_RESUME 0x0020
#define CU_STATSADDR 0x0040
#define CU_SHOWSTATS 0x0050 /* Dump statistics counters. */
#define CU_CMD_BASE 0x0060 /* Base address to add to add CU commands. */
#define CU_DUMPSTATS 0x0070 /* Dump then reset stats counters. */
 
#define RX_START 0x0001
#define RX_RESUME 0x0002
#define RX_ABORT 0x0004
#define RX_ADDR_LOAD 0x0006
#define RX_RESUMENR 0x0007
#define INT_MASK 0x0100
#define DRVR_INT 0x0200 /* Driver generated interrupt. */
 
/* The Speedo3 Rx and Tx frame/buffer descriptors. */
struct descriptor_net { /* A generic descriptor. */
s16 status; /* Offset 0. */
s16 command; /* Offset 2. */
u32 link; /* struct descriptor * */
unsigned char params[0];
};
 
/* The Speedo3 Rx and Tx buffer descriptors. */
struct RxFD { /* Receive frame descriptor. */
s32 status;
u32 link; /* struct RxFD * */
u32 rx_buf_addr; /* void * */
u16 count;
u16 size;
};
 
/* Selected elements of the Tx/RxFD.status word. */
enum RxFD_bits {
RxComplete=0x8000, RxOK=0x2000,
RxErrCRC=0x0800, RxErrAlign=0x0400, RxErrTooBig=0x0200, RxErrSymbol=0x0010,
RxEth2Type=0x0020, RxNoMatch=0x0004, RxNoIAMatch=0x0002,
StatusComplete=0x8000,
};
 
struct TxFD { /* Transmit frame descriptor set. */
s32 status;
u32 link; /* void * */
u32 tx_desc_addr; /* Always points to the tx_buf_addr element. */
s32 count; /* # of TBD (=1), Tx start thresh., etc. */
/* This constitutes two "TBD" entries -- we only use one. */
u32 tx_buf_addr0; /* void *, frame to be transmitted. */
s32 tx_buf_size0; /* Length of Tx frame. */
u32 tx_buf_addr1; /* void *, frame to be transmitted. */
s32 tx_buf_size1; /* Length of Tx frame. */
};
 
/* Elements of the dump_statistics block. This block must be lword aligned. */
struct speedo_stats {
u32 tx_good_frames;
u32 tx_coll16_errs;
u32 tx_late_colls;
u32 tx_underruns;
u32 tx_lost_carrier;
u32 tx_deferred;
u32 tx_one_colls;
u32 tx_multi_colls;
u32 tx_total_colls;
u32 rx_good_frames;
u32 rx_crc_errs;
u32 rx_align_errs;
u32 rx_resource_errs;
u32 rx_overrun_errs;
u32 rx_colls_errs;
u32 rx_runt_errs;
u32 done_marker;
};
 
struct speedo_private {
char devname[8]; /* Used only for kernel debugging. */
const char *product_name;
struct device *next_module;
struct TxFD tx_ring[TX_RING_SIZE]; /* Commands (usually CmdTxPacket). */
/* The saved address of a sent-in-place packet/buffer, for skfree(). */
struct sk_buff* tx_skbuff[TX_RING_SIZE];
struct descriptor_net *last_cmd; /* Last command sent. */
/* Rx descriptor ring & addresses of receive-in-place skbuffs. */
struct RxFD *rx_ringp[RX_RING_SIZE];
struct sk_buff* rx_skbuff[RX_RING_SIZE];
struct RxFD *last_rxf; /* Last command sent. */
struct enet_statistics stats;
struct speedo_stats lstats;
struct timer_list timer; /* Media selection timer. */
long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */
unsigned int cur_rx, cur_tx; /* The next free ring entry */
unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
int mc_setup_frm_len; /* The length of an allocated.. */
struct descriptor_net *mc_setup_frm; /* ..multicast setup frame. */
int mc_setup_busy; /* Avoid double-use of setup frame. */
int in_interrupt; /* Word-aligned dev->interrupt */
char rx_mode; /* Current PROMISC/ALLMULTI setting. */
unsigned int tx_full:1; /* The Tx queue is full. */
unsigned int full_duplex:1; /* Full-duplex operation requested. */
unsigned int default_port:1; /* Last dev->if_port value. */
unsigned int rx_bug:1; /* Work around receiver hang errata. */
unsigned int rx_bug10:1; /* Receiver might hang at 10mbps. */
unsigned int rx_bug100:1; /* Receiver might hang at 100mbps. */
unsigned short phy[2]; /* PHY media interfaces available. */
};
 
/* The parameters for a CmdConfigure operation.
There are so many options that it would be difficult to document each bit.
We mostly use the default or recommended settings. */
const char i82557_config_cmd[22] = {
22, 0x08, 0, 0, 0, 0x80, 0x32, 0x03, 1, /* 1=Use MII 0=Use AUI */
0, 0x2E, 0, 0x60, 0,
0xf2, 0x48, 0, 0x40, 0xf2, 0x80, /* 0x40=Force full-duplex */
0x3f, 0x05, };
const char i82558_config_cmd[22] = {
22, 0x08, 0, 1, 0, 0x80, 0x22, 0x03, 1, /* 1=Use MII 0=Use AUI */
0, 0x2E, 0, 0x60, 0x08, 0x88,
0x68, 0, 0x40, 0xf2, 0xBD, /* 0xBD->0xFD=Force full-duplex */
0x31, 0x05, };
 
/* PHY media interface chips. */
static const char *phys[] = {
"None", "i82553-A/B", "i82553-C", "i82503",
"DP83840", "80c240", "80c24", "i82555",
"unknown-8", "unknown-9", "DP83840A", "unknown-11",
"unknown-12", "unknown-13", "unknown-14", "unknown-15", };
enum phy_chips { NonSuchPhy=0, I82553AB, I82553C, I82503, DP83840, S80C240,
S80C24, I82555, DP83840A=10, };
static const char is_mii[] = { 0, 1, 1, 0, 1, 1, 0, 1 };
 
static void speedo_found1(struct device *dev, long ioaddr, int irq,
int card_idx);
 
static int read_eeprom(long ioaddr, int location, int addr_len);
static int mdio_read(long ioaddr, int phy_id, int location);
static int mdio_write(long ioaddr, int phy_id, int location, int value);
static int speedo_open(struct device *dev);
static void speedo_timer(unsigned long data);
static void speedo_init_rx_ring(struct device *dev);
static int speedo_start_xmit(struct sk_buff *skb, struct device *dev);
static int speedo_rx(struct device *dev);
static void speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
static int speedo_close(struct device *dev);
static struct enet_statistics *speedo_get_stats(struct device *dev);
static int speedo_ioctl(struct device *dev, struct ifreq *rq, int cmd);
static void set_rx_mode(struct device *dev);
 
 
/* The parameters that may be passed in... */
/* 'options' is used to pass a transceiver override or full-duplex flag
e.g. "options=16" for FD, "options=32" for 100mbps-only. */
static int full_duplex[] = {-1, -1, -1, -1, -1, -1, -1, -1};
static int options[] = {-1, -1, -1, -1, -1, -1, -1, -1};
#ifdef MODULE
static int debug = -1; /* The debug level */
#endif
 
#ifdef honor_default_port
/* Optional driver feature to allow forcing the transceiver setting.
Not recommended. */
static int mii_ctrl[8] = { 0x3300, 0x3100, 0x0000, 0x0100,
0x2000, 0x2100, 0x0400, 0x3100};
#endif
 
/* A list of all installed Speedo devices, for removing the driver module. */
static struct device *root_speedo_dev = NULL;
 
int eepro100_init(struct device *dev)
{
int cards_found = 0;
static int pci_index = 0;
 
if (! pcibios_present())
return cards_found;
 
for (; pci_index < 8; pci_index++) {
unsigned char pci_bus, pci_device_fn, pci_latency;
unsigned short int vendor,device;
long ioaddr;
int irq;
 
u16 pci_command, new_command;
 
if (speedo_debug > 0)
printk("Finding Device\n");
 
if (pci20to26_find_class (PCI_CLASS_NETWORK_ETHERNET << 8, pci_index,
&pci_bus, &pci_device_fn)
!= PCIBIOS_SUCCESSFUL)
break;
 
pci20to26_read_config_word(pci_bus, pci_device_fn,
PCI_VENDOR_ID, &vendor);
pci20to26_read_config_word(pci_bus, pci_device_fn,
PCI_DEVICE_ID, &device);
 
if (speedo_debug > 0)
printk("Device = %x Vendor = %x\n",(int)device,(int)vendor);
 
if (vendor != PCI_VENDOR_ID_INTEL)
break;
 
 
#if 0 //defined(PCI_SUPPORT_VER2)
{
struct pci_dev *pdev = pci_find_slot(pci_bus, pci_device_fn);
ioaddr = pdev->base_address[1]; /* Use [0] to mem-map */
irq = pdev->irq;
printk("Device 1 %d %d\n",ioaddr,irq);
}
#else
{
u32 pci_ioaddr;
u8 pci_irq_line;
pci20to26_read_config_byte(pci_bus, pci_device_fn,
PCI_INTERRUPT_LINE, &pci_irq_line);
/* Note: BASE_ADDRESS_0 is for memory-mapping the registers. */
pci20to26_read_config_dword(pci_bus, pci_device_fn,
PCI_BASE_ADDRESS_1, &pci_ioaddr);
ioaddr = pci_ioaddr;
irq = pci_irq_line;
printk("Device 2 %d %d\n",ioaddr,irq);
}
#endif
/* Remove I/O space marker in bit 0. */
ioaddr &= ~3;
if (speedo_debug > 2)
printk("Found Intel i82557 PCI Speedo at I/O %#lx, IRQ %d.\n",
ioaddr, irq);
 
/* Get and check the bus-master and latency values. */
pci20to26_read_config_word(pci_bus, pci_device_fn,
PCI_COMMAND, &pci_command);
new_command = pci_command | PCI_COMMAND_MASTER|PCI_COMMAND_IO;
if (pci_command != new_command) {
printk(KERN_INFO " The PCI BIOS has not enabled this"
" device! Updating PCI command %4.4x->%4.4x.\n",
pci_command, new_command);
pci20to26_write_config_word(pci_bus, pci_device_fn,
PCI_COMMAND, new_command);
}
pci20to26_read_config_byte(pci_bus, pci_device_fn,
PCI_LATENCY_TIMER, &pci_latency);
if (pci_latency < 32) {
printk(" PCI latency timer (CFLT) is unreasonably low at %d."
" Setting to 32 clocks.\n", pci_latency);
pci20to26_write_config_byte(pci_bus, pci_device_fn,
PCI_LATENCY_TIMER, 32);
} else if (speedo_debug > 1)
printk(" PCI latency timer (CFLT) is %#x.\n", pci_latency);
 
speedo_found1(dev, ioaddr, irq, cards_found);
dev = NULL;
cards_found++;
}
 
return cards_found;
}
 
static void speedo_found1(struct device *dev, long ioaddr, int irq,
int card_idx)
{
static int did_version = 0; /* Already printed version info. */
struct speedo_private *sp;
char *product;
int i, option;
u16 eeprom[0x40];
 
if (speedo_debug > 0 && did_version++ == 0)
printk(version);
 
dev = init_etherdev(dev, sizeof(struct speedo_private));
 
if (dev->mem_start > 0)
option = dev->mem_start;
else if (card_idx >= 0 && options[card_idx] >= 0)
option = options[card_idx];
else
option = 0;
 
/* Read the station address EEPROM before doing the reset.
Perhaps this should even be done before accepting the device,
then we wouldn't have a device name with which to report the error. */
{
u16 sum = 0;
int j;
int addr_len = read_eeprom(ioaddr, 0, 6) == 0xffff ? 8 : 6;
 
for (j = 0, i = 0; i < 0x40; i++) {
u16 value = read_eeprom(ioaddr, i, addr_len);
eeprom[i] = value;
sum += value;
if (i < 3) {
dev->dev_addr[j++] = value;
dev->dev_addr[j++] = value >> 8;
}
}
if (sum != 0xBABA)
printk(KERN_WARNING "%s: Invalid EEPROM checksum %#4.4x, "
"check settings before activating this device!\n",
dev->name, sum);
/* Don't unregister_netdev(dev); as the EEPro may actually be
usable, especially if the MAC address is set later. */
}
 
/* Reset the chip: stop Tx and Rx processes and clear counters.
This takes less than 10usec and will easily finish before the next
action. */
outl(0, ioaddr + SCBPort);
 
if (eeprom[3] & 0x0100)
product = "OEM i82557/i82558 10/100 Ethernet";
else
product = "Intel EtherExpress Pro 10/100";
 
printk(KERN_INFO "%s: %s at %#3lx, ", dev->name, product, ioaddr);
 
for (i = 0; i < 5; i++)
printk("%2x:", dev->dev_addr[i]);
printk("%2x, IRQ %d.\n", dev->dev_addr[i], irq);
 
#ifndef kernel_bloat
/* OK, this is pure kernel bloat. I don't like it when other drivers
waste non-pageable kernel space to emit similar messages, but I need
them for bug reports. */
{
const char *connectors[] = {" RJ45", " BNC", " AUI", " MII"};
/* The self-test results must be paragraph aligned. */
s32 str[6], *volatile self_test_results;
int boguscnt = 16000; /* Timeout for set-test. */
if (eeprom[3] & 0x03)
printk(KERN_INFO " Receiver lock-up bug exists -- enabling"
" work-around.\n");
printk(KERN_INFO " Board assembly %4.4x%2.2x-%3.3d, Physical"
" connectors present:",
eeprom[8], eeprom[9]>>8, eeprom[9] & 0xff);
for (i = 0; i < 4; i++)
if (eeprom[5] & (1<<i))
printk(connectors[i]);
printk("\n"KERN_INFO" Primary interface chip %s PHY #%d.\n",
phys[(eeprom[6]>>8)&15], eeprom[6] & 0x1f);
if (eeprom[7] & 0x0700)
printk(KERN_INFO " Secondary interface chip %s.\n",
phys[(eeprom[7]>>8)&7]);
if (((eeprom[6]>>8) & 0x3f) == DP83840
|| ((eeprom[6]>>8) & 0x3f) == DP83840A) {
int mdi_reg23 = mdio_read(ioaddr, eeprom[6] & 0x1f, 23) | 0x0422;
if (congenb)
mdi_reg23 |= 0x0100;
printk(KERN_INFO" DP83840 specific setup, setting register 23 to %4.4x.\n",
mdi_reg23);
mdio_write(ioaddr, eeprom[6] & 0x1f, 23, mdi_reg23);
}
if ((option >= 0) && (option & 0x70)) {
printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
(option & 0x20 ? 100 : 10),
(option & 0x10 ? "full" : "half"));
mdio_write(ioaddr, eeprom[6] & 0x1f, 0,
((option & 0x20) ? 0x2000 : 0) | /* 100mbps? */
((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */
}
 
/* Perform a system self-test. */
self_test_results = (s32*) ((((long) str) + 15) & ~0xf);
self_test_results[0] = 0;
self_test_results[1] = -1;
outl(virt_to_bus(self_test_results) | 1, ioaddr + SCBPort);
do {
udelay(10);
} while (self_test_results[1] == -1 && --boguscnt >= 0);
 
if (boguscnt < 0) { /* Test optimized out. */
printk(KERN_ERR "Self test failed, status %8.8x:\n"
KERN_ERR " Failure to initialize the i82557.\n"
KERN_ERR " Verify that the card is a bus-master"
" capable slot.\n",
self_test_results[1]);
} else
printk(KERN_INFO " General self-test: %s.\n"
KERN_INFO " Serial sub-system self-test: %s.\n"
KERN_INFO " Internal registers self-test: %s.\n"
KERN_INFO " ROM checksum self-test: %s (%#8.8x).\n",
self_test_results[1] & 0x1000 ? "failed" : "passed",
self_test_results[1] & 0x0020 ? "failed" : "passed",
self_test_results[1] & 0x0008 ? "failed" : "passed",
self_test_results[1] & 0x0004 ? "failed" : "passed",
self_test_results[0]);
}
#endif /* kernel_bloat */
 
outl(0, ioaddr + SCBPort);
 
/* We do a request_region() only to register /proc/ioports info. */
request_region(ioaddr, SPEEDO3_TOTAL_SIZE, "Intel Speedo3 Ethernet");
 
dev->base_addr = ioaddr;
dev->irq = irq;
 
if (dev->priv == NULL)
dev->priv = kmalloc(sizeof(*sp), GFP_KERNEL);
sp = dev->priv;
memset(sp, 0, sizeof(*sp));
sp->next_module = root_speedo_dev;
root_speedo_dev = dev;
 
sp->full_duplex = option >= 0 && (option & 0x10) ? 1 : 0;
if (card_idx >= 0) {
if (full_duplex[card_idx] >= 0)
sp->full_duplex = full_duplex[card_idx];
}
sp->default_port = option >= 0 ? (option & 0x0f) : 0;
 
sp->phy[0] = eeprom[6];
sp->phy[1] = eeprom[7];
sp->rx_bug = (eeprom[3] & 0x03) == 3 ? 0 : 1;
 
if (sp->rx_bug)
printk(KERN_INFO " Receiver lock-up workaround activated.\n");
 
/* The Speedo-specific entries in the device structure. */
dev->open = &speedo_open;
dev->hard_start_xmit = &speedo_start_xmit;
dev->stop = &speedo_close;
dev->get_stats = &speedo_get_stats;
dev->set_multicast_list = &set_rx_mode;
dev->do_ioctl = &speedo_ioctl;
 
return;
}
/* Serial EEPROM section.
A "bit" grungy, but we work our way through bit-by-bit :->. */
/* EEPROM_Ctrl bits. */
#define EE_SHIFT_CLK 0x01 /* EEPROM shift clock. */
#define EE_CS 0x02 /* EEPROM chip select. */
#define EE_DATA_WRITE 0x04 /* EEPROM chip data in. */
#define EE_WRITE_0 0x01
#define EE_WRITE_1 0x05
#define EE_DATA_READ 0x08 /* EEPROM chip data out. */
#define EE_ENB (0x4800 | EE_CS)
 
/* Delay between EEPROM clock transitions.
This will actually work with no delay on 33Mhz PCI. */
#define eeprom_delay(nanosec) udelay(1);
 
/* The EEPROM commands include the alway-set leading bit. */
#define EE_WRITE_CMD (5 << addr_len)
#define EE_READ_CMD (6 << addr_len)
#define EE_ERASE_CMD (7 << addr_len)
 
static int read_eeprom(long ioaddr, int location, int addr_len)
{
unsigned short retval = 0;
int ee_addr = ioaddr + SCBeeprom;
int read_cmd = location | EE_READ_CMD;
int i;
 
outw(EE_ENB & ~EE_CS, ee_addr);
outw(EE_ENB, ee_addr);
 
/* Shift the read command bits out. */
for (i = 12; i >= 0; i--) {
short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
outw(EE_ENB | dataval, ee_addr);
eeprom_delay(100);
outw(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
eeprom_delay(150);
}
outw(EE_ENB, ee_addr);
 
for (i = 15; i >= 0; i--) {
outw(EE_ENB | EE_SHIFT_CLK, ee_addr);
eeprom_delay(100);
retval = (retval << 1) | ((inw(ee_addr) & EE_DATA_READ) ? 1 : 0);
outw(EE_ENB, ee_addr);
eeprom_delay(100);
}
 
/* Terminate the EEPROM access. */
outw(EE_ENB & ~EE_CS, ee_addr);
return retval;
}
 
static int mdio_read(long ioaddr, int phy_id, int location)
{
int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
outl(0x08000000 | (location<<16) | (phy_id<<21), ioaddr + SCBCtrlMDI);
do {
val = inl(ioaddr + SCBCtrlMDI);
if (--boguscnt < 0) {
printk(KERN_ERR " mdio_read() timed out with val = %8.8x.\n", val);
}
} while (! (val & 0x10000000));
return val & 0xffff;
}
 
static int mdio_write(long ioaddr, int phy_id, int location, int value)
{
int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
outl(0x04000000 | (location<<16) | (phy_id<<21) | value,
ioaddr + SCBCtrlMDI);
do {
val = inl(ioaddr + SCBCtrlMDI);
if (--boguscnt < 0) {
printk(KERN_ERR" mdio_write() timed out with val = %8.8x.\n", val);
}
} while (! (val & 0x10000000));
return val & 0xffff;
}
 
static int
speedo_open(struct device *dev)
{
struct speedo_private *sp = (struct speedo_private *)dev->priv;
long ioaddr = dev->base_addr;
 
#ifdef notdef
/* We could reset the chip, but should not need to. */
outl(0, ioaddr + SCBPort);
udelay(10);
#endif
 
if (request_irq(dev->irq, &speedo_interrupt, SA_SHIRQ,
"Intel EtherExpress Pro 10/100 Ethernet", dev)) {
return -EAGAIN;
}
if (speedo_debug > 1)
printk(KERN_DEBUG "%s: speedo_open() irq %d.\n", dev->name, dev->irq);
 
MOD_INC_USE_COUNT;
 
/* Retrigger negotiation to reset previous errors. */
if ((sp->phy[0] & 0x8000) == 0) {
int phy_addr = sp->phy[0] & 0x1f ;
/* Use 0x3300 for restarting NWay, other values to force xcvr:
0x0000 10-HD
0x0100 10-FD
0x2000 100-HD
0x2100 100-FD
*/
#ifdef honor_default_port
mdio_write(ioaddr, phy_addr, 0, mii_ctrl[dev->default_port & 7]);
#else
mdio_write(ioaddr, phy_addr, 0, 0x3300);
#endif
}
 
/* Load the statistics block address. */
wait_for_cmd_done(ioaddr + SCBCmd);
outl(virt_to_bus(&sp->lstats), ioaddr + SCBPointer);
outw(INT_MASK | CU_STATSADDR, ioaddr + SCBCmd);
sp->lstats.done_marker = 0;
 
speedo_init_rx_ring(dev);
wait_for_cmd_done(ioaddr + SCBCmd);
outl(0, ioaddr + SCBPointer);
outw(INT_MASK | RX_ADDR_LOAD, ioaddr + SCBCmd);
 
/* Todo: verify that we must wait for previous command completion. */
wait_for_cmd_done(ioaddr + SCBCmd);
outl(virt_to_bus(sp->rx_ringp[0]), ioaddr + SCBPointer);
outw(INT_MASK | RX_START, ioaddr + SCBCmd);
 
/* Fill the first command with our physical address. */
{
u16 *eaddrs = (u16 *)dev->dev_addr;
u16 *setup_frm = (u16 *)&(sp->tx_ring[0].tx_desc_addr);
 
/* Avoid a bug(?!) here by marking the command already completed. */
sp->tx_ring[0].status = ((CmdSuspend | CmdIASetup) << 16) | 0xa000;
sp->tx_ring[0].link = virt_to_bus(&(sp->tx_ring[1]));
*setup_frm++ = eaddrs[0];
*setup_frm++ = eaddrs[1];
*setup_frm++ = eaddrs[2];
}
sp->last_cmd = (struct descriptor_net *)&sp->tx_ring[0];
sp->cur_tx = 1;
sp->dirty_tx = 0;
sp->tx_full = 0;
 
wait_for_cmd_done(ioaddr + SCBCmd);
outl(0, ioaddr + SCBPointer);
outw(INT_MASK | CU_CMD_BASE, ioaddr + SCBCmd);
 
dev->if_port = sp->default_port;
 
sp->in_interrupt = 0;
dev->tbusy = 0;
dev->interrupt = 0;
dev->start = 1;
 
/* Start the chip's Tx process and unmask interrupts. */
/* Todo: verify that we must wait for previous command completion. */
wait_for_cmd_done(ioaddr + SCBCmd);
outl(virt_to_bus(&sp->tx_ring[0]), ioaddr + SCBPointer);
outw(CU_START, ioaddr + SCBCmd);
 
/* Setup the chip and configure the multicast list. */
sp->mc_setup_frm = NULL;
sp->mc_setup_frm_len = 0;
sp->mc_setup_busy = 0;
sp->rx_mode = -1; /* Invalid -> always reset the mode. */
set_rx_mode(dev);
 
if (speedo_debug > 2) {
printk(KERN_DEBUG "%s: Done speedo_open(), status %8.8x.\n",
dev->name, inw(ioaddr + SCBStatus));
}
/* Set the timer. The timer serves a dual purpose:
1) to monitor the media interface (e.g. link beat) and perhaps switch
to an alternate media type
2) to monitor Rx activity, and restart the Rx process if the receiver
hangs. */
init_timer(&sp->timer);
sp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */
sp->timer.data = (unsigned long)dev;
sp->timer.function = &speedo_timer; /* timer handler */
add_timer(&sp->timer);
 
wait_for_cmd_done(ioaddr + SCBCmd);
outw(CU_DUMPSTATS, ioaddr + SCBCmd);
/* No need to wait for the command unit to accept here. */
if ((sp->phy[0] & 0x8000) == 0)
mdio_read(ioaddr, sp->phy[0] & 0x1f, 0);
return 0;
}
 
/* Media monitoring and control. */
static void speedo_timer(unsigned long data)
{
struct device *dev = (struct device *)data;
struct speedo_private *sp = (struct speedo_private *)dev->priv;
 
if (speedo_debug > 3) {
long ioaddr = dev->base_addr;
printk(KERN_DEBUG "%s: Media control tick, status %4.4x.\n",
dev->name, inw(ioaddr + SCBStatus));
}
if (sp->rx_mode < 0 ||
(sp->rx_bug && jiffies - sp->last_rx_time > 2*HZ)) {
/* We haven't received a packet in a Long Time. We might have been
bitten by the receiver hang bug. This can be cleared by sending
a set multicast list command. */
set_rx_mode(dev);
}
/* We must continue to monitor the media. */
sp->timer.expires = RUN_AT(2*HZ); /* 2.0 sec. */
add_timer(&sp->timer);
}
 
/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
static void
speedo_init_rx_ring(struct device *dev)
{
struct speedo_private *sp = (struct speedo_private *)dev->priv;
struct RxFD *rxf, *last_rxf = NULL;
int i;
 
sp->cur_rx = 0;
 
for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb;
skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
sp->rx_skbuff[i] = skb;
if (skb == NULL)
break; /* OK. Just initially short of Rx bufs. */
skb->dev = dev; /* Mark as being used by this device. */
rxf = (struct RxFD *)skb->tail;
sp->rx_ringp[i] = rxf;
skb_reserve(skb, sizeof(struct RxFD));
if (last_rxf)
last_rxf->link = virt_to_bus(rxf);
last_rxf = rxf;
rxf->status = 0x00000001; /* '1' is flag value only. */
rxf->link = 0; /* None yet. */
/* This field unused by i82557, we use it as a consistency check. */
#ifdef final_version
rxf->rx_buf_addr = 0xffffffff;
#else
rxf->rx_buf_addr = virt_to_bus(skb->tail);
#endif
rxf->count = 0;
rxf->size = PKT_BUF_SZ;
}
sp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
/* Mark the last entry as end-of-list. */
last_rxf->status = 0xC0000002; /* '2' is flag value only. */
sp->last_rxf = last_rxf;
}
 
static void speedo_tx_timeout(struct device *dev)
{
struct speedo_private *sp = (struct speedo_private *)dev->priv;
long ioaddr = dev->base_addr;
 
printk(KERN_WARNING "%s: Transmit timed out: status %4.4x "
" %4.4x at %d/%d command %8.8x.\n",
dev->name, inw(ioaddr + SCBStatus), inw(ioaddr + SCBCmd),
sp->dirty_tx, sp->cur_tx,
sp->tx_ring[sp->dirty_tx % TX_RING_SIZE].status);
if ((inw(ioaddr + SCBStatus) & 0x00C0) != 0x0080) {
printk(KERN_WARNING "%s: Trying to restart the transmitter...\n",
dev->name);
outl(virt_to_bus(&sp->tx_ring[sp->dirty_tx % TX_RING_SIZE]),
ioaddr + SCBPointer);
outw(CU_START, ioaddr + SCBCmd);
} else {
outw(DRVR_INT, ioaddr + SCBCmd);
}
/* Reset the MII transceiver, suggested by Fred Young @ scalable.com. */
if ((sp->phy[0] & 0x8000) == 0) {
int phy_addr = sp->phy[0] & 0x1f;
mdio_write(ioaddr, phy_addr, 0, 0x0400);
mdio_write(ioaddr, phy_addr, 1, 0x0000);
mdio_write(ioaddr, phy_addr, 4, 0x0000);
mdio_write(ioaddr, phy_addr, 0, 0x8000);
#ifdef honor_default_port
mdio_write(ioaddr, phy_addr, 0, mii_ctrl[dev->default_port & 7]);
#endif
}
sp->stats.tx_errors++;
dev->trans_start = jiffies;
return;
}
 
static int
speedo_start_xmit(struct sk_buff *skb, struct device *dev)
{
struct speedo_private *sp = (struct speedo_private *)dev->priv;
long ioaddr = dev->base_addr;
int entry;
 
/* Block a timer-based transmit from overlapping. This could better be
done with atomic_swap(1, dev->tbusy), but set_bit() works as well.
If this ever occurs the queue layer is doing something evil! */
if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) {
int tickssofar = jiffies - dev->trans_start;
if (tickssofar < TX_TIMEOUT - 2)
return 1;
if (tickssofar < TX_TIMEOUT) {
/* Reap sent packets from the full Tx queue. */
outw(DRVR_INT, ioaddr + SCBCmd);
return 1;
}
speedo_tx_timeout(dev);
return 1;
}
 
/* Caution: the write order is important here, set the base address
with the "ownership" bits last. */
 
{ /* Prevent interrupts from changing the Tx ring from underneath us. */
unsigned long flags;
 
save_flags(flags);
cli();
/* Calculate the Tx descriptor entry. */
entry = sp->cur_tx++ % TX_RING_SIZE;
 
sp->tx_skbuff[entry] = skb;
/* Todo: be a little more clever about setting the interrupt bit. */
sp->tx_ring[entry].status =
(CmdSuspend | CmdTx | CmdTxFlex) << 16;
sp->tx_ring[entry].link =
virt_to_bus(&sp->tx_ring[sp->cur_tx % TX_RING_SIZE]);
sp->tx_ring[entry].tx_desc_addr =
virt_to_bus(&sp->tx_ring[entry].tx_buf_addr0);
/* The data region is always in one buffer descriptor, Tx FIFO
threshold of 256. */
sp->tx_ring[entry].count = 0x01208000;
sp->tx_ring[entry].tx_buf_addr0 = virt_to_bus(skb->data);
sp->tx_ring[entry].tx_buf_size0 = skb->len;
/* Todo: perhaps leave the interrupt bit set if the Tx queue is more
than half full. Argument against: we should be receiving packets
and scavenging the queue. Argument for: if so, it shouldn't
matter. */
sp->last_cmd->command &= ~(CmdSuspend | CmdIntr);
sp->last_cmd = (struct descriptor_net *)&sp->tx_ring[entry];
restore_flags(flags);
/* Trigger the command unit resume. */
wait_for_cmd_done(ioaddr + SCBCmd);
outw(CU_RESUME, ioaddr + SCBCmd);
}
 
/* Leave room for set_rx_mode() to fill two entries. */
if (sp->cur_tx - sp->dirty_tx > TX_RING_SIZE - 3)
sp->tx_full = 1;
else
clear_bit(0, (void*)&dev->tbusy);
 
dev->trans_start = jiffies;
 
return 0;
}
 
/* The interrupt handler does all of the Rx thread work and cleans up
after the Tx thread. */
static void speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
{
struct device *dev = (struct device *)dev_instance;
struct speedo_private *sp;
long ioaddr, boguscnt = max_interrupt_work;
unsigned short status;
 
#ifndef final_version
if (dev == NULL) {
printk(KERN_ERR "speedo_interrupt(): irq %d for unknown device.\n", irq);
return;
}
#endif
 
ioaddr = dev->base_addr;
sp = (struct speedo_private *)dev->priv;
#ifndef final_version
/* A lock to prevent simultaneous entry on SMP machines. */
if (test_and_set_bit(0, (void*)&sp->in_interrupt)) {
printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
dev->name);
sp->in_interrupt = 0; /* Avoid halting machine. */
return;
}
dev->interrupt = 1;
#endif
 
do {
status = inw(ioaddr + SCBStatus);
/* Acknowledge all of the current interrupt sources ASAP. */
outw(status & 0xfc00, ioaddr + SCBStatus);
 
if (speedo_debug > 4)
printk(KERN_DEBUG "%s: interrupt status=%#4.4x.\n",
dev->name, status);
 
if ((status & 0xfc00) == 0)
break;
 
if (status & 0x4000) /* Packet received. */
speedo_rx(dev);
 
if (status & 0x1000) {
if ((status & 0x003c) == 0x0028) /* No more Rx buffers. */
outw(RX_RESUMENR, ioaddr + SCBCmd);
else if ((status & 0x003c) == 0x0008) { /* No resources (why?!) */
/* No idea of what went wrong. Restart the receiver. */
outl(virt_to_bus(sp->rx_ringp[sp->cur_rx % RX_RING_SIZE]),
ioaddr + SCBPointer);
outw(RX_START, ioaddr + SCBCmd);
}
sp->stats.rx_errors++;
}
 
/* User interrupt, Command/Tx unit interrupt or CU not active. */
if (status & 0xA400) {
unsigned int dirty_tx = sp->dirty_tx;
 
while (sp->cur_tx - dirty_tx > 0) {
int entry = dirty_tx % TX_RING_SIZE;
int status = sp->tx_ring[entry].status;
 
if (speedo_debug > 5)
printk(KERN_DEBUG " scavenge candidate %d status %4.4x.\n",
entry, status);
if ((status & StatusComplete) == 0)
break; /* It still hasn't been processed. */
/* Free the original skb. */
if (sp->tx_skbuff[entry]) {
sp->stats.tx_packets++; /* Count only user packets. */
dev_free_skb(sp->tx_skbuff[entry]);
sp->tx_skbuff[entry] = 0;
} else if ((sp->tx_ring[entry].status&0x70000) == CmdNOp << 16)
sp->mc_setup_busy = 0;
dirty_tx++;
}
 
#ifndef final_version
if (sp->cur_tx - dirty_tx > TX_RING_SIZE) {
printk(KERN_ERR "out-of-sync dirty pointer, %d vs. %d,"
" full=%d.\n",
dirty_tx, sp->cur_tx, sp->tx_full);
dirty_tx += TX_RING_SIZE;
}
#endif
 
if (sp->tx_full && dev->tbusy
&& dirty_tx > sp->cur_tx - TX_RING_SIZE + 2) {
/* The ring is no longer full, clear tbusy. */
sp->tx_full = 0;
clear_bit(0, (void*)&dev->tbusy);
mark_bh(NET_BH);
}
 
sp->dirty_tx = dirty_tx;
}
 
if (--boguscnt < 0) {
printk(KERN_ERR "%s: Too much work at interrupt, status=0x%4.4x.\n",
dev->name, status);
/* Clear all interrupt sources. */
outl(0xfc00, ioaddr + SCBStatus);
break;
}
} while (1);
 
if (speedo_debug > 3)
printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
dev->name, inw(ioaddr + SCBStatus));
 
dev->interrupt = 0;
clear_bit(0, (void*)&sp->in_interrupt);
return;
}
 
static int
speedo_rx(struct device *dev)
{
struct speedo_private *sp = (struct speedo_private *)dev->priv;
int entry = sp->cur_rx % RX_RING_SIZE;
int status;
int rx_work_limit = sp->dirty_rx + RX_RING_SIZE - sp->cur_rx;
 
if (speedo_debug > 4)
printk(KERN_DEBUG " In speedo_rx().\n");
/* If we own the next entry, it's a new packet. Send it up. */
while (sp->rx_ringp[entry] != NULL &&
(status = sp->rx_ringp[entry]->status) & RxComplete) {
 
if (--rx_work_limit < 0)
break;
if (speedo_debug > 4)
printk(KERN_DEBUG " speedo_rx() status %8.8x len %d.\n", status,
sp->rx_ringp[entry]->count & 0x3fff);
if ((status & (RxErrTooBig|RxOK)) != RxOK) {
if (status & RxErrTooBig)
printk(KERN_ERR "%s: Ethernet frame overran the Rx buffer, "
"status %8.8x!\n", dev->name, status);
else if ( ! (status & 0x2000)) {
/* There was a fatal error. This *should* be impossible. */
sp->stats.rx_errors++;
printk(KERN_ERR "%s: Anomalous event in speedo_rx(), "
"status %8.8x.\n",
dev->name, status);
}
} else {
int pkt_len = sp->rx_ringp[entry]->count & 0x3fff;
struct sk_buff *skb;
 
/* Check if the packet is long enough to just accept without
copying to a properly sized skbuff. */
if (pkt_len < rx_copybreak
&& (skb = dev_alloc_skb(pkt_len + 2)) != 0) {
skb->dev = dev;
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
/* 'skb_put()' points to the start of sk_buff data area. */
#if 0
/* Packet is in one chunk -- we can copy + cksum. */
eth_copy_and_sum(skb,
bus_to_virt(sp->rx_ringp[entry]->rx_buf_addr),
pkt_len, 0);
skb_put(skb, pkt_len);
#else
memcpy(skb_put(skb, pkt_len),
bus_to_virt(sp->rx_ringp[entry]->rx_buf_addr), pkt_len);
#endif
} else {
void *temp;
/* Pass up the already-filled skbuff. */
skb = sp->rx_skbuff[entry];
if (skb == NULL) {
printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n",
dev->name);
break;
}
sp->rx_skbuff[entry] = NULL;
temp = skb_put(skb, pkt_len);
if (bus_to_virt(sp->rx_ringp[entry]->rx_buf_addr) != temp)
printk(KERN_ERR "%s: Rx consistency error -- the skbuff "
"addresses do not match in speedo_rx: %p vs. %p "
"/ %p.\n", dev->name,
bus_to_virt(sp->rx_ringp[entry]->rx_buf_addr),
skb->head, temp);
sp->rx_ringp[entry] = NULL;
}
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
sp->stats.rx_packets++;
}
entry = (++sp->cur_rx) % RX_RING_SIZE;
}
 
/* Refill the Rx ring buffers. */
for (; sp->dirty_rx < sp->cur_rx; sp->dirty_rx++) {
struct RxFD *rxf;
entry = sp->dirty_rx % RX_RING_SIZE;
if (sp->rx_skbuff[entry] == NULL) {
struct sk_buff *skb;
/* Get a fresh skbuff to replace the consumed one. */
skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
sp->rx_skbuff[entry] = skb;
if (skb == NULL) {
sp->rx_ringp[entry] = NULL;
break; /* Better luck next time! */
}
rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->tail;
skb->dev = dev;
skb_reserve(skb, sizeof(struct RxFD));
rxf->rx_buf_addr = virt_to_bus(skb->tail);
} else {
rxf = sp->rx_ringp[entry];
}
rxf->status = 0xC0000001; /* '1' for driver use only. */
rxf->link = 0; /* None yet. */
rxf->count = 0;
rxf->size = PKT_BUF_SZ;
sp->last_rxf->link = virt_to_bus(rxf);
sp->last_rxf->status &= ~0xC0000000;
sp->last_rxf = rxf;
}
 
sp->last_rx_time = jiffies;
return 0;
}
 
static int
speedo_close(struct device *dev)
{
long ioaddr = dev->base_addr;
struct speedo_private *sp = (struct speedo_private *)dev->priv;
int i;
 
dev->start = 0;
dev->tbusy = 1;
 
if (speedo_debug > 1)
printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
dev->name, inw(ioaddr + SCBStatus));
 
/* Shut off the media monitoring timer. */
del_timer(&sp->timer);
 
/* Disable interrupts, and stop the chip's Rx process. */
outw(INT_MASK, ioaddr + SCBCmd);
outw(INT_MASK | RX_ABORT, ioaddr + SCBCmd);
 
free_irq(dev->irq, dev);
 
/* Free all the skbuffs in the Rx and Tx queues. */
for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb = sp->rx_skbuff[i];
sp->rx_skbuff[i] = 0;
/* Clear the Rx descriptors. */
if (skb)
dev_free_skb(skb);
}
 
for (i = 0; i < TX_RING_SIZE; i++) {
struct sk_buff *skb = sp->tx_skbuff[i];
sp->tx_skbuff[i] = 0;
/* Clear the Tx descriptors. */
if (skb)
dev_free_skb(skb);
}
if (sp->mc_setup_frm) {
kfree(sp->mc_setup_frm);
sp->mc_setup_frm_len = 0;
}
 
/* Print a few items for debugging. */
if (speedo_debug > 3) {
int phy_num = sp->phy[0] & 0x1f;
printk(KERN_DEBUG "%s:Printing Rx ring (next to receive into %d).\n",
dev->name, sp->cur_rx);
 
for (i = 0; i < RX_RING_SIZE; i++)
printk(KERN_DEBUG " Rx ring entry %d %8.8x.\n",
i, (int)sp->rx_ringp[i]->status);
 
for (i = 0; i < 5; i++)
printk(KERN_DEBUG " PHY index %d register %d is %4.4x.\n",
phy_num, i, mdio_read(ioaddr, phy_num, i));
for (i = 21; i < 26; i++)
printk(KERN_DEBUG " PHY index %d register %d is %4.4x.\n",
phy_num, i, mdio_read(ioaddr, phy_num, i));
}
MOD_DEC_USE_COUNT;
 
return 0;
}
 
/* The Speedo-3 has an especially awkward and unusable method of getting
statistics out of the chip. It takes an unpredictable length of time
for the dump-stats command to complete. To avoid a busy-wait loop we
update the stats with the previous dump results, and then trigger a
new dump.
 
These problems are mitigated by the current /proc implementation, which
calls this routine first to judge the output length, and then to emit the
output.
 
Oh, and incoming frames are dropped while executing dump-stats!
*/
static struct enet_statistics *
speedo_get_stats(struct device *dev)
{
struct speedo_private *sp = (struct speedo_private *)dev->priv;
long ioaddr = dev->base_addr;
 
if (sp->lstats.done_marker == 0xA007) { /* Previous dump finished */
sp->stats.tx_aborted_errors += sp->lstats.tx_coll16_errs;
sp->stats.tx_window_errors += sp->lstats.tx_late_colls;
sp->stats.tx_fifo_errors += sp->lstats.tx_underruns;
sp->stats.tx_fifo_errors += sp->lstats.tx_lost_carrier;
/*sp->stats.tx_deferred += sp->lstats.tx_deferred;*/
sp->stats.collisions += sp->lstats.tx_total_colls;
sp->stats.rx_crc_errors += sp->lstats.rx_crc_errs;
sp->stats.rx_frame_errors += sp->lstats.rx_align_errs;
sp->stats.rx_over_errors += sp->lstats.rx_resource_errs;
sp->stats.rx_fifo_errors += sp->lstats.rx_overrun_errs;
sp->stats.rx_length_errors += sp->lstats.rx_runt_errs;
sp->lstats.done_marker = 0x0000;
if (dev->start) {
wait_for_cmd_done(ioaddr + SCBCmd);
outw(CU_DUMPSTATS, ioaddr + SCBCmd);
}
}
return &sp->stats;
}
 
static int speedo_ioctl(struct device *dev, struct ifreq *rq, int cmd)
{
struct speedo_private *sp = (struct speedo_private *)dev->priv;
long ioaddr = dev->base_addr;
u16 *data = (u16 *)&rq->ifr_data;
int phy = sp->phy[0] & 0x1f;
 
switch(cmd) {
case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
data[0] = phy;
case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
data[3] = mdio_read(ioaddr, data[0], data[1]);
return 0;
case SIOCDEVPRIVATE+2: /* Write the specified MII register */
if (!suser())
return -EPERM;
mdio_write(ioaddr, data[0], data[1], data[2]);
return 0;
default:
return -EOPNOTSUPP;
}
}
 
/* Set or clear the multicast filter for this adaptor.
This is very ugly with Intel chips -- we usually have to execute an
entire configuration command, plus process a multicast command.
This is complicated. We must put a large configuration command and
an arbitrarily-sized multicast command in the transmit list.
To minimize the disruption -- the previous command might have already
loaded the link -- we convert the current command block, normally a Tx
command, into a no-op and link it to the new command.
*/
static void
set_rx_mode(struct device *dev)
{
struct speedo_private *sp = (struct speedo_private *)dev->priv;
long ioaddr = dev->base_addr;
struct descriptor_net *last_cmd;
char new_rx_mode;
unsigned long flags;
int entry, i;
 
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
new_rx_mode = 3;
} else if ((dev->flags & IFF_ALLMULTI) ||
dev->mc_count > multicast_filter_limit) {
new_rx_mode = 1;
} else
new_rx_mode = 0;
 
if (sp->cur_tx - sp->dirty_tx >= TX_RING_SIZE - 1) {
/* The Tx ring is full -- don't add anything! Presumably the new mode
is in config_cmd_data and will be added anyway. */
sp->rx_mode = -1;
return;
}
 
if (new_rx_mode != sp->rx_mode) {
u8 *config_cmd_data;
 
save_flags(flags); /* Lock to protect sp->cur_tx. */
cli();
entry = sp->cur_tx++ % TX_RING_SIZE;
last_cmd = sp->last_cmd;
sp->last_cmd = (struct descriptor_net *)&sp->tx_ring[entry];
restore_flags(flags);
 
sp->tx_skbuff[entry] = 0; /* Redundant. */
sp->tx_ring[entry].status = (CmdSuspend | CmdConfigure) << 16;
sp->tx_ring[entry].link =
virt_to_bus(&sp->tx_ring[(entry + 1) % TX_RING_SIZE]);
config_cmd_data = (void *)&sp->tx_ring[entry].tx_desc_addr;
/* Construct a full CmdConfig frame. */
memcpy(config_cmd_data, i82558_config_cmd, sizeof(i82558_config_cmd));
config_cmd_data[1] = (txfifo << 4) | rxfifo;
config_cmd_data[4] = rxdmacount;
config_cmd_data[5] = txdmacount + 0x80;
config_cmd_data[15] |= (new_rx_mode & 2) ? 1 : 0;
config_cmd_data[19] |= sp->full_duplex ? 0x40 : 0;
config_cmd_data[21] = (new_rx_mode & 1) ? 0x0D : 0x05;
if (sp->phy[0] & 0x8000) { /* Use the AUI port instead. */
config_cmd_data[15] |= 0x80;
config_cmd_data[8] = 0;
}
/* Trigger the command unit resume. */
last_cmd->command &= ~CmdSuspend;
wait_for_cmd_done(ioaddr + SCBCmd);
outw(CU_RESUME, ioaddr + SCBCmd);
}
 
if (new_rx_mode == 0 && dev->mc_count < 4) {
/* The simple case of 0-3 multicast list entries occurs often, and
fits within one tx_ring[] entry. */
struct dev_mc_list *mclist;
u16 *setup_params, *eaddrs;
 
save_flags(flags); /* Lock to protect sp->cur_tx. */
cli();
entry = sp->cur_tx++ % TX_RING_SIZE;
last_cmd = sp->last_cmd;
sp->last_cmd = (struct descriptor_net *)&sp->tx_ring[entry];
restore_flags(flags);
 
sp->tx_skbuff[entry] = 0;
sp->tx_ring[entry].status = (CmdSuspend | CmdMulticastList) << 16;
sp->tx_ring[entry].link =
virt_to_bus(&sp->tx_ring[(entry + 1) % TX_RING_SIZE]);
sp->tx_ring[entry].tx_desc_addr = 0; /* Really MC list count. */
setup_params = (u16 *)&sp->tx_ring[entry].tx_desc_addr;
*setup_params++ = dev->mc_count*6;
/* Fill in the multicast addresses. */
for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
i++, mclist = mclist->next) {
eaddrs = (u16 *)mclist->dmi_addr;
*setup_params++ = *eaddrs++;
*setup_params++ = *eaddrs++;
*setup_params++ = *eaddrs++;
}
 
last_cmd->command &= ~CmdSuspend;
/* Immediately trigger the command unit resume. */
wait_for_cmd_done(ioaddr + SCBCmd);
outw(CU_RESUME, ioaddr + SCBCmd);
} else if (new_rx_mode == 0) {
struct dev_mc_list *mclist;
u16 *setup_params, *eaddrs;
struct descriptor_net *mc_setup_frm = sp->mc_setup_frm;
int i;
 
if (sp->mc_setup_frm_len < 10 + dev->mc_count*6
|| sp->mc_setup_frm == NULL) {
/* Allocate a full setup frame, 10bytes + <max addrs>. */
if (sp->mc_setup_frm)
kfree(sp->mc_setup_frm);
sp->mc_setup_busy = 0;
sp->mc_setup_frm_len = 10 + multicast_filter_limit*6;
sp->mc_setup_frm = kmalloc(sp->mc_setup_frm_len, GFP_ATOMIC);
if (sp->mc_setup_frm == NULL) {
printk(KERN_ERR "%s: Failed to allocate a setup frame.\n",
dev->name);
sp->rx_mode = -1; /* We failed, try again. */
return;
}
}
/* If we are busy, someone might be quickly adding to the MC list.
Try again later when the list changes stop. */
if (sp->mc_setup_busy) {
sp->rx_mode = -1;
return;
}
mc_setup_frm = sp->mc_setup_frm;
/* Fill the setup frame. */
if (speedo_debug > 1)
printk(KERN_DEBUG "%s: Constructing a setup frame at %p, "
"%d bytes.\n",
dev->name, sp->mc_setup_frm, sp->mc_setup_frm_len);
mc_setup_frm->status = 0;
mc_setup_frm->command = CmdSuspend | CmdIntr | CmdMulticastList;
/* Link set below. */
setup_params = (u16 *)&mc_setup_frm->params;
*setup_params++ = dev->mc_count*6;
/* Fill in the multicast addresses. */
for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
i++, mclist = mclist->next) {
eaddrs = (u16 *)mclist->dmi_addr;
*setup_params++ = *eaddrs++;
*setup_params++ = *eaddrs++;
*setup_params++ = *eaddrs++;
}
 
/* Disable interrupts while playing with the Tx Cmd list. */
save_flags(flags);
cli();
entry = sp->cur_tx++ % TX_RING_SIZE;
last_cmd = sp->last_cmd;
sp->last_cmd = mc_setup_frm;
sp->mc_setup_busy++;
restore_flags(flags);
 
/* Change the command to a NoOp, pointing to the CmdMulti command. */
sp->tx_skbuff[entry] = 0;
sp->tx_ring[entry].status = CmdNOp << 16;
sp->tx_ring[entry].link = virt_to_bus(mc_setup_frm);
 
/* Set the link in the setup frame. */
mc_setup_frm->link =
virt_to_bus(&(sp->tx_ring[(entry+1) % TX_RING_SIZE]));
 
last_cmd->command &= ~CmdSuspend;
/* Immediately trigger the command unit resume. */
wait_for_cmd_done(ioaddr + SCBCmd);
outw(CU_RESUME, ioaddr + SCBCmd);
if (speedo_debug > 5)
printk(" CmdMCSetup frame length %d in entry %d.\n",
dev->mc_count, entry);
}
 
sp->rx_mode = new_rx_mode;
}
#ifdef MODULE
 
int
init_module(void)
{
int cards_found;
 
if (debug >= 0)
speedo_debug = debug;
if (speedo_debug)
printk(KERN_INFO "%s", version);
 
root_speedo_dev = NULL;
cards_found = eepro100_init(NULL);
return cards_found ? 0 : -ENODEV;
}
 
void
cleanup_module(void)
{
struct device *next_dev;
 
/* No need to check MOD_IN_USE, as sys_delete_module() checks. */
while (root_speedo_dev) {
next_dev = ((struct speedo_private *)root_speedo_dev->priv)->next_module;
unregister_netdev(root_speedo_dev);
release_region(root_speedo_dev->base_addr, SPEEDO3_TOTAL_SIZE);
kfree(root_speedo_dev);
root_speedo_dev = next_dev;
}
}
#else /* not MODULE */
int eepro100_probe(struct device *dev)
{
int cards_found = 0;
 
cards_found = eepro100_init(dev);
 
if (speedo_debug > 0 && cards_found)
printk(version);
 
return cards_found ? 0 : -ENODEV;
}
#endif /* MODULE */
/*
* Local variables:
* compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c eepro100.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
* SMP-compile-command: "gcc -D__SMP__ -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c eepro100.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
* c-indent-level: 4
* c-basic-offset: 4
* tab-width: 4
* End:
*/