configure your card and that /etc/pcmcia/wireless.opts works :
http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html
+Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards
+CONFIG_AIRO
+ This is the standard Linux driver to support Cisco/Aironet ISA
+ and PCI 802.11 wireless cards.
+ It supports the new 802.11b cards from Cisco (Cisco 34X, Cisco 35X
+ - with or without encryption) as well as card before the Cisco
+ aquisition (Aironet 4500, Aironet 4800, Aironet 4800B).
+
+ This driver support both the standard Linux Wireless Extensions
+ and Cisco proprietary API, so both the Linux Wireless Tools and the
+ Cisco Linux utilities can be used to configure the card.
+
+ The driver can be compiled as a module and will be named "airo.o".
+
+Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards
+CONFIG_AIRO_CS
+ This is the standard Linux driver to support Cisco/Aironet PCMCIA
+ 802.11 wireless cards. This driver is the same as the Aironet
+ driver part of the Linux Pcmcia package.
+ It supports the new 802.11b cards from Cisco (Cisco 34X, Cisco 35X
+ - with or without encryption) as well as card before the Cisco
+ aquisition (Aironet 4500, Aironet 4800, Aironet 4800B). It also
+ supports OEM of Cisco such as the DELL TrueMobile 4800 and Xircom
+ 802.11b cards.
+
+ This driver support both the standard Linux Wireless Extensions
+ and Cisco proprietary API, so both the Linux Wireless Tools and the
+ Cisco Linux utilities can be used to configure the card.
+
+ To use your PC-cards, you will need supporting software from David
+ Hinds' pcmcia-cs package (see the file Documentation/Changes for
+ location). You also want to check out the PCMCIA-HOWTO, available
+ from http://www.linuxdoc.org/docs.html#howto .
+
Aviator/Raytheon 2.4MHz wireless support
CONFIG_PCMCIA_RAYCS
Say Y here if you intend to attach an Aviator/Raytheon PCMCIA
smbfs.txt
- info on using filesystems with the SMB protocol (Windows 3.11 and NT)
sysv-fs.txt
- - info on the SystemV/Coherent filesystem.
+ - info on the SystemV/V7/Xenix/Coherent filesystem.
udf.txt
- info and mount options for the UDF filesystem.
ufs.txt
Changes for patch v180
- Fixed !CONFIG_DEVFS_FS stub declaration of <devfs_get_info>
+===============================================================================
+Changes for patch v181
+
+- Answered question posed by Al Viro and removed his comments from <devfs_open>
+
+- Moved setting of registered flag after other fields are changed
+
+- Fixed race between <devfsd_close> and <devfsd_notify_one>
+
+- Global VFS changes added bogus BKL to devfsd_close(): removed
+
+- Widened locking in <devfs_readlink> and <devfs_follow_link>
+
+- Replaced <devfsd_read> stack usage with <devfsd_ioctl> kmalloc
+
+- Simplified locking in <devfsd_ioctl> and fixed memory leak
lastbus=N [IA-32] Scan all buses till bus #N. Can be useful
if the kernel is unable to find your secondary buses
and you want to tell it explicitly which ones they are.
+ assign-busses [IA-32] Always assign all PCI bus
+ numbers ourselves, overriding
+ whatever the firmware may have
+ done.
pd. [PARIDE]
* Add ETHTOOL_GDRVINFO ioctl support to all ethernet drivers.
-
-
-To-do items to consider for network drivers
--------------------------------------------
-* Make a single function which handles the ethtool ioctl for
- most MII-compatible devices? Ideally the driver would pass function
- pointers to its existing mdio_{read,write} functions when calling the
- generic ioctl handler.
-
other media types does not occur.
+Transmit error, Tx status register 82
+-------------------------------------
+
+This is a common error which is almost always caused by another host on
+the same network being in full-duplex mode, while this host is in
+half-duplex mode. You need to find that other host and make it run in
+half-duplex mode or fix this host to run in full-duplex mode.
+
+As a last resort, you can force the 3c59x driver into full-duplex mode
+with
+
+ options 3c59x full_duplex=1
+
+but this has to be viewed as a workaround for broken network gear and
+should only really be used for equipment which cannot autonegotiate.
+
+
Additional resources
--------------------
S: Unmaintained
SCSI TAPE DRIVER
-P: Kai Mdkisara
+P: Kai Mäkisara
M: Kai.Makisara@metla.fi
L: linux-scsi@vger.kernel.org
S: Maintained
L: samba@samba.org
S: Maintained
-SMP: (except SPARC)
-P: Linus Torvalds
-M: torvalds@transmeta.com
-L: linux-smp@vger.kernel.org
-S: Maintained
-
SOFTWARE RAID (Multiple Disks) SUPPORT
P: Ingo Molnar
M: mingo@redhat.com
VERSION = 2
PATCHLEVEL = 4
SUBLEVEL = 6
-EXTRAVERSION =-pre3
+EXTRAVERSION =-pre4
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
DRIVERS-$(CONFIG_PCI) += drivers/pci/driver.o
DRIVERS-$(CONFIG_MTD) += drivers/mtd/mtdlink.o
DRIVERS-$(CONFIG_PCMCIA) += drivers/pcmcia/pcmcia.o
-DRIVERS-$(CONFIG_PCMCIA_NETCARD) += drivers/net/pcmcia/pcmcia_net.o
+DRIVERS-$(CONFIG_NET_PCMCIA) += drivers/net/pcmcia/pcmcia_net.o
+DRIVERS-$(CONFIG_NET_WIRELESS) += drivers/net/wireless/wireless_net.o
DRIVERS-$(CONFIG_PCMCIA_CHRDEV) += drivers/char/pcmcia/pcmcia_char.o
DRIVERS-$(CONFIG_DIO) += drivers/dio/dio.a
DRIVERS-$(CONFIG_SBUS) += drivers/sbus/sbus_all.o
#include <linux/pci.h>
#include <linux/tty.h>
#include <linux/mm.h>
+#include <linux/delay.h>
#include <asm/io.h>
#include <asm/hwrpb.h>
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(clear_page);
+EXPORT_SYMBOL(__delay);
+EXPORT_SYMBOL(__udelay);
+EXPORT_SYMBOL(udelay);
+
EXPORT_SYMBOL(__direct_map_base);
EXPORT_SYMBOL(__direct_map_size);
int
get_irq_list(char *buf)
{
- int i, j;
+#ifdef CONFIG_SMP
+ int j;
+#endif
+ int i;
struct irqaction * action;
char *p = buf;
#include <linux/mman.h>
#include <linux/elfcore.h>
#include <linux/reboot.h>
+#include <linux/tty.h>
#include <linux/console.h>
#include <asm/reg.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/init.h>
+#include <linux/tty.h>
#include <asm/compiler.h>
#include <asm/ptrace.h>
endif
OBJS = __divqu.o __remqu.o __divlu.o __remlu.o \
+ udelay.o \
$(ev6)memset.o \
$(ev6)memcpy.o \
memmove.o \
--- /dev/null
+#include <linux/config.h>
+#include <linux/sched.h> /* for udelay's use of smp_processor_id */
+#include <asm/param.h>
+#include <asm/smp.h>
+#include <linux/delay.h>
+
+/*
+ * Copyright (C) 1993, 2000 Linus Torvalds
+ *
+ * Delay routines, using a pre-computed "loops_per_jiffy" value.
+ */
+
+/*
+ * Use only for very small delays (< 1 msec).
+ *
+ * The active part of our cycle counter is only 32-bits wide, and
+ * we're treating the difference between two marks as signed. On
+ * a 1GHz box, that's about 2 seconds.
+ */
+
+void __delay(int loops)
+{
+ int tmp;
+ __asm__ __volatile__(
+ " rpcc %0\n"
+ " addl %1,%0,%1\n"
+ "1: rpcc %0\n"
+ " subl %1,%0,%0\n"
+ " bgt %0,1b"
+ : "=&r" (tmp), "=r" (loops) : "1"(loops));
+}
+
+void __udelay(unsigned long usecs, unsigned long lpj)
+{
+ usecs *= (((unsigned long)HZ << 32) / 1000000) * lpj;
+ __delay((long)usecs >> 32);
+}
+
+void udelay(unsigned long usecs)
+{
+#ifdef CONFIG_SMP
+ __udelay(usecs, cpu_data[smp_processor_id()].loops_per_jiffy);
+#else
+ __udelay(usecs, loops_per_jiffy);
+#endif
+}
+
# CONFIG_BLK_DEV_HD_IDE is not set
# CONFIG_BLK_DEV_HD is not set
CONFIG_BLK_DEV_IDEDISK=y
-# CONFIG_IDEDISK_MULTI_MODE is not set
+CONFIG_IDEDISK_MULTI_MODE=y
# CONFIG_BLK_DEV_IDEDISK_VENDOR is not set
# CONFIG_BLK_DEV_IDEDISK_FUJITSU is not set
# CONFIG_BLK_DEV_IDEDISK_IBM is not set
CONFIG_BLK_DEV_RZ1000=y
CONFIG_BLK_DEV_IDEPCI=y
CONFIG_IDEPCI_SHARE_IRQ=y
-# CONFIG_BLK_DEV_IDEDMA_PCI is not set
+CONFIG_BLK_DEV_IDEDMA_PCI=y
# CONFIG_BLK_DEV_OFFBOARD is not set
-# CONFIG_IDEDMA_PCI_AUTO is not set
-# CONFIG_BLK_DEV_IDEDMA is not set
+CONFIG_IDEDMA_PCI_AUTO=y
+CONFIG_BLK_DEV_IDEDMA=y
# CONFIG_IDEDMA_PCI_WIP is not set
# CONFIG_IDEDMA_NEW_DRIVE_LISTINGS is not set
# CONFIG_BLK_DEV_AEC62XX is not set
# CONFIG_BLK_DEV_HPT34X is not set
# CONFIG_HPT34X_AUTODMA is not set
# CONFIG_BLK_DEV_HPT366 is not set
-# CONFIG_BLK_DEV_PIIX is not set
-# CONFIG_PIIX_TUNING is not set
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_PIIX_TUNING=y
# CONFIG_BLK_DEV_NS87415 is not set
# CONFIG_BLK_DEV_OPTI621 is not set
# CONFIG_BLK_DEV_PDC202XX is not set
# CONFIG_BLK_DEV_TRM290 is not set
# CONFIG_BLK_DEV_VIA82CXXX is not set
# CONFIG_IDE_CHIPSETS is not set
-# CONFIG_IDEDMA_AUTO is not set
+CONFIG_IDEDMA_AUTO=y
+# CONFIG_IDEDMA_IVB is not set
# CONFIG_DMA_NONPCI is not set
CONFIG_BLK_DEV_IDE_MODES=y
# CONFIG_PCMCIA_NETWAVE is not set
# CONFIG_PCMCIA_WAVELAN is not set
# CONFIG_AIRONET4500_CS is not set
-CONFIG_PCMCIA_NETCARD=y
#
# Amateur Radio support
#include <linux/mc146818rtc.h>
#include <linux/kernel_stat.h>
+#include <asm/atomic.h>
#include <asm/smp.h>
#include <asm/mtrr.h>
#include <asm/mpspec.h>
* PCI Ne2000 networking cards and PII/PIII processors, dual
* BX chipset. ]
*/
-#if 0
+ /*
+ * Actually disabling the focus CPU check just makes the hang less
+ * frequent as it makes the interrupt distributon model be more
+ * like LRU than MRU (the short-term load is more even across CPUs).
+ * See also the comment in end_level_ioapic_irq(). --macro
+ */
+#if 1
/* Enable focus processor (bit==0) */
value &= ~(1<<9);
#else
apic_write(APIC_ESR, 0);
v1 = apic_read(APIC_ESR);
ack_APIC_irq();
- irq_err_count++;
+ atomic_inc(&irq_err_count);
/* Here is what the APIC error bits mean:
0: Send CS error
*
* On entry, %esi points to the real-mode code as a 32-bit pointer.
*/
-ENTRY(stext)
-ENTRY(_stext)
startup_32:
/*
* Set segments to known values
.org 0x5000
+/*
+ * Real beginning of normal "text" segment
+ */
+ENTRY(stext)
+ENTRY(_stext)
+
/*
* This starts the data section. Note that the above is all
* in the text section because it has alignment requirements
#include <linux/init.h>
#include <linux/kernel_stat.h>
+#include <asm/atomic.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/irq.h>
printk("spurious 8259A interrupt: IRQ%d.\n", irq);
spurious_irq_mask |= irqmask;
}
- irq_err_count++;
+ atomic_inc(&irq_err_count);
/*
* Theoretically we do not have to handle this IRQ,
* but in Linux this does not cause problems and is
#include <asm/smp.h>
#include <asm/desc.h>
+#define APIC_LOCKUP_DEBUG
+
static spinlock_t ioapic_lock = SPIN_LOCK_UNLOCKED;
/*
static void name##_IO_APIC_irq (unsigned int irq) \
__DO_ACTION(R, ACTION, FINAL)
-DO_ACTION( __mask, 0, |= 0x00010000, io_apic_sync(entry->apic))/* mask = 1 */
-DO_ACTION( __unmask, 0, &= 0xfffeffff, ) /* mask = 0 */
+DO_ACTION( __mask, 0, |= 0x00010000, io_apic_sync(entry->apic) )
+ /* mask = 1 */
+DO_ACTION( __unmask, 0, &= 0xfffeffff, )
+ /* mask = 0 */
+DO_ACTION( __mask_and_edge, 0, = (reg & 0xffff7fff) | 0x00010000, )
+ /* mask = 1, trigger = 0 */
+DO_ACTION( __unmask_and_level, 0, = (reg & 0xfffeffff) | 0x00008000, )
+ /* mask = 0, trigger = 1 */
static void mask_IO_APIC_irq (unsigned int irq)
{
v = apic_read(APIC_EOI);
printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
+ v = apic_read(APIC_RRR);
+ printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
v = apic_read(APIC_LDR);
printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
v = apic_read(APIC_DFR);
#define enable_level_ioapic_irq unmask_IO_APIC_irq
#define disable_level_ioapic_irq mask_IO_APIC_irq
-static void end_level_ioapic_irq (unsigned int i)
+static void end_level_ioapic_irq (unsigned int irq)
{
+ unsigned long v;
+
+/*
+ * It appears there is an erratum which affects at least version 0x11
+ * of I/O APIC (that's the 82093AA and cores integrated into various
+ * chipsets). Under certain conditions a level-triggered interrupt is
+ * erroneously delivered as edge-triggered one but the respective IRR
+ * bit gets set nevertheless. As a result the I/O unit expects an EOI
+ * message but it will never arrive and further interrupts are blocked
+ * from the source. The exact reason is so far unknown, but the
+ * phenomenon was observed when two consecutive interrupt requests
+ * from a given source get delivered to the same CPU and the source is
+ * temporarily disabled in between.
+ *
+ * A workaround is to simulate an EOI message manually. We achieve it
+ * by setting the trigger mode to edge and then to level when the edge
+ * trigger mode gets detected in the TMR of a local APIC for a
+ * level-triggered interrupt. We mask the source for the time of the
+ * operation to prevent an edge-triggered interrupt escaping meanwhile.
+ * The idea is from Manfred Spraul. --macro
+ */
+ v = apic_read(APIC_TMR + ((IO_APIC_VECTOR(irq) & ~0x1f) >> 1));
+
ack_APIC_irq();
+
+ if (!(v & (1 << (IO_APIC_VECTOR(irq) & 0x1f)))) {
+#ifdef APIC_MISMATCH_DEBUG
+ atomic_inc(&irq_mis_count);
+#endif
+ spin_lock(&ioapic_lock);
+ __mask_and_edge_IO_APIC_irq(irq);
+#ifdef APIC_LOCKUP_DEBUG
+ for (;;) {
+ struct irq_pin_list *entry = irq_2_pin + irq;
+ unsigned int reg;
+
+ if (entry->pin == -1)
+ break;
+ reg = io_apic_read(entry->apic, 0x10 + entry->pin * 2);
+ if (reg & 0x00004000)
+ printk(KERN_CRIT "Aieee!!! Remote IRR"
+ " still set after unlock!\n");
+ if (!entry->next)
+ break;
+ entry = irq_2_pin + entry->next;
+ }
+#endif
+ __unmask_and_level_IO_APIC_irq(irq);
+ spin_unlock(&ioapic_lock);
+ }
}
-static void mask_and_ack_level_ioapic_irq (unsigned int i) { /* nothing */ }
+static void mask_and_ack_level_ioapic_irq (unsigned int irq) { /* nothing */ }
static void set_ioapic_affinity (unsigned int irq, unsigned long mask)
{
#include <linux/irq.h>
#include <linux/proc_fs.h>
+#include <asm/atomic.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/system.h>
end_none
};
-volatile unsigned long irq_err_count;
+atomic_t irq_err_count;
+#ifdef CONFIG_X86_IO_APIC
+#ifdef APIC_MISMATCH_DEBUG
+atomic_t irq_mis_count;
+#endif
+#endif
/*
* Generic, controller-independent functions:
apic_timer_irqs[cpu_logical_map(j)]);
p += sprintf(p, "\n");
#endif
- p += sprintf(p, "ERR: %10lu\n", irq_err_count);
+ p += sprintf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
+#ifdef CONFIG_X86_IO_APIC
+#ifdef APIC_MISMATCH_DEBUG
+ p += sprintf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
+#endif
+#endif
return p - buf;
}
#define DBG(x...)
#endif
-#define PCI_PROBE_BIOS 1
-#define PCI_PROBE_CONF1 2
-#define PCI_PROBE_CONF2 4
-#define PCI_NO_SORT 0x100
-#define PCI_BIOS_SORT 0x200
-#define PCI_NO_CHECKS 0x400
-#define PCI_ASSIGN_ROMS 0x1000
-#define PCI_BIOS_IRQ_SCAN 0x2000
+#define PCI_PROBE_BIOS 0x0001
+#define PCI_PROBE_CONF1 0x0002
+#define PCI_PROBE_CONF2 0x0004
+#define PCI_NO_SORT 0x0100
+#define PCI_BIOS_SORT 0x0200
+#define PCI_NO_CHECKS 0x0400
+#define PCI_ASSIGN_ROMS 0x1000
+#define PCI_BIOS_IRQ_SCAN 0x2000
+#define PCI_ASSIGN_ALL_BUSSES 0x4000
extern unsigned int pci_probe;
{ "PIIX", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0, pirq_piix_get, pirq_piix_set },
{ "PIIX", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371MX, pirq_piix_get, pirq_piix_set },
{ "PIIX", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_0, pirq_piix_get, pirq_piix_set },
- { "PIIX", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82820FW_0, pirq_piix_get, pirq_piix_set },
+ { "PIIX", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, pirq_piix_get, pirq_piix_set },
{ "ALI", PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, pirq_ali_get, pirq_ali_set },
if (info->irq[pin].link == pirq) {
/* We refuse to override the dev->irq information. Give a warning! */
if (dev2->irq && dev2->irq != irq) {
- printk(KERN_INFO "IRQ routing conflict in pirq table for device %s\n", dev2->slot_name);
+ printk(KERN_INFO "IRQ routing conflict for %s, have irq %d, want irq %d\n",
+ dev2->slot_name, dev2->irq, irq);
continue;
}
dev2->irq = irq;
pirq_penalty[irq]++;
if (dev != dev2)
- printk(KERN_INFO "PCI: The same IRQ used for device %s\n", dev2->slot_name);
+ printk(KERN_INFO "PCI: Sharing IRQ %d with %s\n", irq, dev2->slot_name);
}
}
return 1;
pcibios_max_latency = 32;
}
-static void __init pci_fixup_via_acpi(struct pci_dev *d)
-{
- /*
- * VIA ACPI device: IRQ line in PCI config byte 0x42
- */
- u8 irq;
- pci_read_config_byte(d, 0x42, &irq);
- irq &= 0x0f;
- if (irq && (irq != 2))
- d->irq = irq;
-}
-
static void __init pci_fixup_piix4_acpi(struct pci_dev *d)
{
/*
{ PCI_FIXUP_HEADER, PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, pci_fixup_latency },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5598, pci_fixup_latency },
- { PCI_FIXUP_HEADER, PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, pci_fixup_via_acpi },
- { PCI_FIXUP_HEADER, PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, pci_fixup_via_acpi },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C691, pci_fixup_via691 },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C598_1, pci_fixup_via691_2 },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, pci_fixup_piix4_acpi },
else if (!strcmp(str, "rom")) {
pci_probe |= PCI_ASSIGN_ROMS;
return NULL;
+ } else if (!strcmp(str, "assign-busses")) {
+ pci_probe |= PCI_ASSIGN_ALL_BUSSES;
+ return NULL;
} else if (!strncmp(str, "irqmask=", 8)) {
pcibios_irq_mask = simple_strtol(str+8, NULL, 0);
return NULL;
return str;
}
+unsigned int pcibios_assign_all_busses(void)
+{
+ return (pci_probe & PCI_ASSIGN_ALL_BUSSES) ? 1 : 0;
+}
+
int pcibios_enable_device(struct pci_dev *dev)
{
int err;
i = 1;
module_start = VMALLOC_START;
module_end = VMALLOC_END;
+ module_end = 0;
while (((long) stack & (THREAD_SIZE-1)) != 0) {
addr = *stack++;
/*
void show_trace_task(struct task_struct *tsk)
{
- show_trace(&tsk->thread.esp);
+ unsigned long esp = tsk->thread.esp;
+
+ /* User space on another CPU? */
+ if ((esp ^ (unsigned long)tsk) & (PAGE_MASK<<1))
+ return;
+ show_trace((unsigned long *)esp);
}
void show_stack(unsigned long * esp)
pci_write_config_byte(dev, 0x44, reg);
}
}
- if ((dev = pci_find_device(PCI_VENDOR_ID_VIA,
- PCI_DEVICE_ID_VIA_82C586_2,
- dev)))
- {
- /* Force correct USB function interrupt */
- dev->irq = 11;
- pcibios_write_config_byte(dev->bus->number,
- dev->devfn,
- PCI_INTERRUPT_LINE,
- dev->irq);
- }
}
if ((dev = pci_find_device(PCI_VENDOR_ID_WINBOND,
PCI_DEVICE_ID_WINBOND_82C105, dev))){
*/
#include <stdio.h>
+#include <stdlib.h>
#include <sys/types.h>
-#include <sys/time.h>
+#include <time.h>
char* default_basename = "pca200e"; /* was initially written for the PCA-200E firmware */
char* default_infname = "<stdin>";
idefloppy_flexible_disk_page_t flexible_disk_page; /* Copy of the flexible disk page */
int wp; /* Write protect */
- unsigned int flags; /* Status/Action flags */
+ unsigned long flags; /* Status/Action flags */
} idefloppy_floppy_t;
/*
#define DEVID_PIIX4U ((ide_pci_devid_t){PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_1})
#define DEVID_PIIX4U2 ((ide_pci_devid_t){PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82372FB_1})
#define DEVID_PIIX4NX ((ide_pci_devid_t){PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82451NX})
-#define DEVID_PIIX4U3 ((ide_pci_devid_t){PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82820FW_5})
+#define DEVID_PIIX4U3 ((ide_pci_devid_t){PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_9})
+#define DEVID_PIIX4U4 ((ide_pci_devid_t){PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_8})
#define DEVID_VIA_IDE ((ide_pci_devid_t){PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C561})
#define DEVID_VP_IDE ((ide_pci_devid_t){PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_1})
#define DEVID_PDC20246 ((ide_pci_devid_t){PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20246})
{DEVID_PIIX4U2, "PIIX4", PCI_PIIX, ATA66_PIIX, INIT_PIIX, NULL, {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, ON_BOARD, 0 },
{DEVID_PIIX4NX, "PIIX4", PCI_PIIX, NULL, INIT_PIIX, NULL, {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, ON_BOARD, 0 },
{DEVID_PIIX4U3, "PIIX4", PCI_PIIX, ATA66_PIIX, INIT_PIIX, NULL, {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, ON_BOARD, 0 },
+ {DEVID_PIIX4U4, "PIIX4", PCI_PIIX, ATA66_PIIX, INIT_PIIX, NULL, {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, ON_BOARD, 0 },
{DEVID_VIA_IDE, "VIA_IDE", NULL, NULL, NULL, NULL, {{0x00,0x00,0x00}, {0x00,0x00,0x00}}, ON_BOARD, 0 },
{DEVID_VP_IDE, "VP_IDE", PCI_VIA82CXXX, ATA66_VIA82CXXX,INIT_VIA82CXXX, DMA_VIA82CXXX, {{0x40,0x02,0x02}, {0x40,0x01,0x01}}, ON_BOARD, 0 },
{DEVID_PDC20246,"PDC20246", PCI_PDC202XX, NULL, INIT_PDC202XX, NULL, {{0x50,0x02,0x02}, {0x50,0x04,0x04}}, OFF_BOARD, 16 },
c1 = inb_p((unsigned short)bibma + 0x0a);
switch(bmide_dev->device) {
- case PCI_DEVICE_ID_INTEL_82820FW_5:
+ case PCI_DEVICE_ID_INTEL_82801BA_8:
+ case PCI_DEVICE_ID_INTEL_82801BA_9:
p += sprintf(p, "\n Intel PIIX4 Ultra 100 Chipset.\n");
break;
case PCI_DEVICE_ID_INTEL_82372FB_1:
byte speed;
byte udma_66 = eighty_ninty_three(drive);
- int ultra100 = ((dev->device == PCI_DEVICE_ID_INTEL_82820FW_5)) ? 1 : 0;
+ int ultra100 = ((dev->device == PCI_DEVICE_ID_INTEL_82801BA_8) ||
+ (dev->device == PCI_DEVICE_ID_INTEL_82801BA_9)) ? 1 : 0;
int ultra66 = ((ultra100) ||
(dev->device == PCI_DEVICE_ID_INTEL_82801AA_1) ||
(dev->device == PCI_DEVICE_ID_INTEL_82372FB_1)) ? 1 : 0;
/* disable PCI bus-mastering */
pci_read_config_byte(btv->dev, PCI_COMMAND, &command);
- /* Should this be &=~ ?? */
- command&=~PCI_COMMAND_MASTER;
+ command &= ~PCI_COMMAND_MASTER;
pci_write_config_byte(btv->dev, PCI_COMMAND, command);
/* unmap and free memory */
}
/**
- * el1_probe:
+ * el1_probe1:
* @dev: The device structure to use
* @ioaddr: An I/O address to probe at.
*
static int irq=5;
MODULE_PARM(io, "i");
MODULE_PARM(irq, "i");
+MODULE_PARM_DESC(io, "EtherLink I/O base address");
+MODULE_PARM_DESC(irq, "EtherLink IRQ number");
/**
* init_module:
MODULE_PARM(io, "1-" __MODULE_STRING(MAX_EL2_CARDS) "i");
MODULE_PARM(irq, "1-" __MODULE_STRING(MAX_EL2_CARDS) "i");
MODULE_PARM(xcvr, "1-" __MODULE_STRING(MAX_EL2_CARDS) "i");
+MODULE_PARM_DESC(io, "EtherLink II I/O base address(es)");
+MODULE_PARM_DESC(irq, "EtherLink II IRQ number(s) (assigned)");
+MODULE_PARM_DESC(xcvr, "EtherLink II tranceiver(s) (0=internal, 1=external)");
/* This is set up so that only a single autoprobe takes place per call.
ISA device autoprobes on a running machine are not recommended. */
MODULE_PARM(io, "1-" __MODULE_STRING(ELP_MAX_CARDS) "i");
MODULE_PARM(irq, "1-" __MODULE_STRING(ELP_MAX_CARDS) "i");
MODULE_PARM(dma, "1-" __MODULE_STRING(ELP_MAX_CARDS) "i");
+MODULE_PARM_DESC(io, "EtherLink Plus I/O base address(es)");
+MODULE_PARM_DESC(irq, "EtherLink Plus IRQ number(s) (assigned)");
+MODULE_PARM_DESC(dma, "EtherLink Plus DMA channel(s)");
int init_module(void)
{
static int irq;
MODULE_PARM(io, "i");
MODULE_PARM(irq, "i");
+MODULE_PARM_DESC(io, "EtherLink16 I/O base address");
+MODULE_PARM_DESC(irq, "(ignored)");
int init_module(void)
{
MODULE_PARM(irq,"1-8i");
MODULE_PARM(xcvr,"1-8i");
MODULE_PARM(max_interrupt_work, "i");
-#ifdef CONFIG_ISAPNP
+MODULE_PARM_DESC(debug, "EtherLink III debug level (0-6)");
+MODULE_PARM_DESC(irq, "EtherLink III IRQ number(s) (assigned)");
+MODULE_PARM_DESC(xcvr,"EtherLink III tranceiver(s) (0=internal, 1=external)");
+MODULE_PARM_DESC(max_interrupt_work, "EtherLink III maximum events handled per interrupt");
MODULE_PARM(nopnp, "i");
-#endif
+MODULE_PARM_DESC(nopnp, "EtherLink III disable ISA PnP support (0-1)");
int
init_module(void)
MODULE_PARM(full_duplex, "1-" __MODULE_STRING(8) "i");
MODULE_PARM(rx_copybreak, "i");
MODULE_PARM(max_interrupt_work, "i");
+MODULE_PARM_DESC(debug, "3c515 debug level (0-6)");
+MODULE_PARM_DESC(options, "3c515: Bits 0-2: media type, bit 3: full duplex, bit 4: bus mastering");
+MODULE_PARM_DESC(full_duplex, "(ignored)");
+MODULE_PARM_DESC(rx_copybreak, "3c515 copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(max_interrupt_work, "3c515 maximum events handled per interrupt");
/* "Knobs" for adjusting internal parameters. */
/* Put out somewhat more debugging messages. (0 - no msg, 1 minimal msgs). */
static int io[MAX_3C523_CARDS];
MODULE_PARM(irq, "1-" __MODULE_STRING(MAX_3C523_CARDS) "i");
MODULE_PARM(io, "1-" __MODULE_STRING(MAX_3C523_CARDS) "i");
+MODULE_PARM_DESC(io, "EtherLink/MC I/O base address(es)");
+MODULE_PARM_DESC(irq, "EtherLink/MC IRQ number(s)");
int init_module(void)
{
- If a device's internalconfig register reports it has NWAY,
use it, even if autoselect is enabled.
+ LK1.1.15 6 June 2001 akpm
+ - Prevent double counting of received bytes (Lars Christensen)
+ - Add ethtool support (jgarzik)
+ - Add module parm descriptions (Andrzej M. Krzysztofowicz)
+ - Implemented alloc_etherdev() API
+ - Special-case the 'Tx error 82' message.
+
- See http://www.uow.edu.au/~andrewm/linux/#3c59x-2.3 for more details.
- Also see Documentation/networking/vortex.txt
*/
* elimination of all the tests and reduced cache footprint.
*/
+
+#define DRV_NAME "3c59x"
+#define DRV_VERSION "LK1.1.15"
+#define DRV_RELDATE "6 June 2001"
+
+
+
/* A few values that may be tweaked. */
/* Keep the ring sizes a power of two for efficiency. */
#define TX_RING_SIZE 16
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
+#include <linux/ethtool.h>
#include <asm/irq.h> /* For NR_IRQS only. */
#include <asm/bitops.h>
#include <asm/io.h>
+#include <asm/uaccess.h>
/* Kernel compatibility defines, some common to David Hinds' PCMCIA package.
This is only in the support-all-kernels source code. */
#include <linux/delay.h>
+
static char version[] __devinitdata =
-"3c59x.c:LK1.1.13 27 Jan 2001 Donald Becker and others. http://www.scyld.com/network/vortex.html\n";
+DRV_NAME ".c:" DRV_VERSION " " DRV_RELDATE " Donald Becker and others. http://www.scyld.com/network/vortex.html\n";
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("3Com 3c59x/3c90x/3c575 series Vortex/Boomerang/Cyclone driver");
MODULE_PARM(compaq_irq, "i");
MODULE_PARM(compaq_device_id, "i");
MODULE_PARM(watchdog, "i");
+MODULE_PARM_DESC(debug, "3c59x debug level (0-6)");
+MODULE_PARM_DESC(options, "3c59x: Bits 0-3: media type, bit 4: bus mastering, bit 9: full duplex");
+MODULE_PARM_DESC(full_duplex, "3c59x full duplex setting(s) (1)");
+MODULE_PARM_DESC(hw_checksums, "3c59x Hardware checksum checking by adapter(s) (0-1)");
+MODULE_PARM_DESC(flow_ctrl, "3c59x 802.3x flow control usage (PAUSE only) (0-1)");
+MODULE_PARM_DESC(enable_wol, "3c59x: Turn on Wake-on-LAN for adapter(s) (0-1)");
+MODULE_PARM_DESC(rx_copybreak, "3c59x copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(max_interrupt_work, "3c59x maximum events handled per interrupt");
+MODULE_PARM_DESC(compaq_ioaddr, "3c59x PCI I/O base address (Compaq BIOS problem workaround)");
+MODULE_PARM_DESC(compaq_irq, "3c59x PCI IRQ number (Compaq BIOS problem workaround)");
+MODULE_PARM_DESC(compaq_device_id, "3c59x PCI device ID (Compaq BIOS problem workaround)");
+MODULE_PARM_DESC(watchdog, "3c59x transmit timeout in milliseconds");
/* Operational parameter that usually are not changed. */
code size of a per-interface flag is not worthwhile. */
static char mii_preamble_required;
-#define PFX "3c59x: "
+#define PFX DRV_NAME ": "
static int vortex_cards_found;
-static void vortex_suspend (struct pci_dev *pdev)
+#ifdef CONFIG_PM
+
+static int vortex_suspend (struct pci_dev *pdev, u32 state)
{
struct net_device *dev = pdev->driver_data;
vortex_down(dev);
}
}
+ return 0;
}
-static void vortex_resume (struct pci_dev *pdev)
+static int vortex_resume (struct pci_dev *pdev)
{
struct net_device *dev = pdev->driver_data;
netif_device_attach(dev);
}
}
+ return 0;
}
+#endif /* CONFIG_PM */
+
/* returns count found (>= 0), or negative on error */
static int __init vortex_eisa_init (void)
{
for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
int device_id;
- if (request_region(ioaddr, VORTEX_TOTAL_SIZE, "3c59x") == NULL)
+ if (request_region(ioaddr, VORTEX_TOTAL_SIZE, DRV_NAME) == NULL)
continue;
/* Check the standard EISA ID register for an encoded '3Com'. */
static int printed_version;
int retval;
struct vortex_chip_info * const vci = &vortex_info_tbl[chip_idx];
+ char *print_name;
if (!printed_version) {
printk (KERN_INFO "%s", version);
printed_version = 1;
}
- dev = init_etherdev(NULL, sizeof(*vp));
+ print_name = pdev ? pdev->slot_name : "3c59x";
+
+ dev = alloc_etherdev(sizeof(*vp));
retval = -ENOMEM;
if (!dev) {
printk (KERN_ERR PFX "unable to allocate etherdev, aborting\n");
SET_MODULE_OWNER(dev);
printk(KERN_INFO "%s: 3Com %s %s at 0x%lx, ",
- dev->name,
+ print_name,
pdev ? "PCI" : "EISA",
vci->name,
ioaddr);
if (pdev) {
/* EISA resources already marked, so only PCI needs to do this here */
/* Ignore return value, because Cardbus drivers already allocate for us */
- if (request_region(ioaddr, vci->io_size, dev->name) != NULL)
+ if (request_region(ioaddr, vci->io_size, print_name) != NULL)
vp->must_free_region = 1;
/* enable bus-mastering if necessary */
if (pci_latency < new_latency) {
printk(KERN_INFO "%s: Overriding PCI latency"
" timer (CFLT) setting of %d, new value is %d.\n",
- dev->name, pci_latency, new_latency);
+ print_name, pci_latency, new_latency);
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, new_latency);
}
}
goto free_ring;
}
printk(KERN_INFO "%s: CardBus functions mapped %8.8lx->%p\n",
- dev->name, fn_st_addr, vp->cb_fn_base);
+ print_name, fn_st_addr, vp->cb_fn_base);
EL3WINDOW(2);
n = inw(ioaddr + Wn2_ResetOptions) & ~0x4010;
if (vortex_debug > 0) {
printk(KERN_INFO "%s: scatter/gather %sabled. h/w checksums %sabled\n",
- dev->name,
+ print_name,
(dev->features & NETIF_F_SG) ? "en":"dis",
(dev->features & NETIF_F_IP_CSUM) ? "en":"dis");
}
dev->set_multicast_list = set_rx_mode;
dev->tx_timeout = vortex_tx_timeout;
dev->watchdog_timeo = (watchdog * HZ) / 1000;
- return 0;
+ retval = register_netdev(dev);
+ if (retval == 0)
+ return 0;
free_ring:
pci_free_consistent(pdev,
free_region:
if (vp->must_free_region)
release_region(ioaddr, vci->io_size);
- unregister_netdev(dev);
kfree (dev);
printk(KERN_ERR PFX "vortex_probe1 fails. Returns %d\n", retval);
out:
unsigned char tx_status = 0;
if (vortex_debug > 2) {
- printk(KERN_DEBUG "%s: vortex_error(), status=0x%x\n", dev->name, status);
+ printk(KERN_ERR "%s: vortex_error(), status=0x%x\n", dev->name, status);
}
if (status & TxComplete) { /* Really "TxError" for us. */
/* Presumably a tx-timeout. We must merely re-enable. */
if (vortex_debug > 2
|| (tx_status != 0x88 && vortex_debug > 0)) {
- printk(KERN_DEBUG"%s: Transmit error, Tx status register %2.2x.\n",
+ printk(KERN_ERR "%s: Transmit error, Tx status register %2.2x.\n",
dev->name, tx_status);
+ if (tx_status == 0x82) {
+ printk(KERN_ERR "Probably a duplex mismatch. See "
+ "Documentation/networking/vortex.txt\n");
+ }
dump_tx_ring(dev);
}
if (tx_status & 0x14) vp->stats.tx_fifo_errors++;
struct sk_buff *skb;
dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr);
- vp->stats.rx_bytes += pkt_len;
if (vortex_debug > 4)
printk(KERN_DEBUG "Receiving packet size %d status %4.4x.\n",
pkt_len, rx_status);
return;
}
+
+static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
+{
+ struct vortex_private *vp = dev->priv;
+ u32 ethcmd;
+
+ if (copy_from_user(ðcmd, useraddr, sizeof(ethcmd)))
+ return -EFAULT;
+
+ switch (ethcmd) {
+ case ETHTOOL_GDRVINFO: {
+ struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
+ strcpy(info.driver, DRV_NAME);
+ strcpy(info.version, DRV_VERSION);
+ if (vp->pdev)
+ strcpy(info.bus_info, vp->pdev->slot_name);
+ else
+ sprintf(info.bus_info, "EISA 0x%lx %d",
+ dev->base_addr, dev->irq);
+ if (copy_to_user(useraddr, &info, sizeof(info)))
+ return -EFAULT;
+ return 0;
+ }
+
+ }
+
+ return -EOPNOTSUPP;
+}
+
static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct vortex_private *vp = (struct vortex_private *)dev->priv;
int retval;
switch(cmd) {
+ case SIOCETHTOOL:
+ return netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
data[0] = phy;
case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
name: "3c59x",
probe: vortex_init_one,
remove: vortex_remove_one,
+ id_table: vortex_pci_tbl,
+#ifdef CONFIG_PM
suspend: vortex_suspend,
resume: vortex_resume,
- id_table: vortex_pci_tbl,
+#endif
};
*/
+#define DRV_NAME "8139too"
+#define DRV_VERSION "0.9.18-pre3"
+
+
#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
#include <asm/io.h>
+#include <asm/uaccess.h>
-#define RTL8139_VERSION "0.9.17"
-#define MODNAME "8139too"
-#define RTL8139_DRIVER_NAME MODNAME " Fast Ethernet driver " RTL8139_VERSION
-#define PFX MODNAME ": "
+#define RTL8139_DRIVER_NAME DRV_NAME " Fast Ethernet driver " DRV_VERSION
+#define PFX DRV_NAME ": "
/* enable PIO instead of MMIO, if CONFIG_8139TOO_PIO is selected */
TxOK = 0x04,
RxErr = 0x02,
RxOK = 0x01,
+
+ RxAckBits = RxFIFOOver | RxOverflow | RxOK,
};
+
enum TxStatusBits {
TxHostOwns = 0x2000,
TxUnderrun = 0x4000,
};
+struct rtl_extra_stats {
+ unsigned long early_rx;
+ unsigned long tx_buf_mapped;
+ unsigned long tx_timeouts;
+};
struct rtl8139_private {
void *mmio_addr;
dma_addr_t rx_ring_dma;
dma_addr_t tx_bufs_dma;
signed char phys[4]; /* MII device addresses. */
- u16 advertising; /* NWay media advertisement */
char twistie, twist_row, twist_col; /* Twister tune state. */
unsigned int full_duplex:1; /* Full-duplex operation requested. */
unsigned int duplex_lock:1;
wait_queue_head_t thr_wait;
struct semaphore thr_exited;
u32 rx_config;
+ struct rtl_extra_stats xstats;
};
MODULE_AUTHOR ("Jeff Garzik <jgarzik@mandrakesoft.com>");
MODULE_PARM (max_interrupt_work, "i");
MODULE_PARM (media, "1-" __MODULE_STRING(MAX_UNITS) "i");
MODULE_PARM (full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM_DESC (multicast_filter_limit, "8139too maximum number of filtered multicast addresses");
+MODULE_PARM_DESC (max_interrupt_work, "8139too maximum events handled per interrupt");
+MODULE_PARM_DESC (media, "8139too: Bits 4+9: force full duplex, bit 5: 100Mbps");
+MODULE_PARM_DESC (full_duplex, "8139too: Force full duplex for board(s) (1)");
static int read_eeprom (void *ioaddr, int location, int addr_len);
static int rtl8139_open (struct net_device *dev);
for (phy = 0; phy < 32 && phy_idx < sizeof(tp->phys); phy++) {
int mii_status = mdio_read(dev, phy, 1);
if (mii_status != 0xffff && mii_status != 0x0000) {
+ u16 advertising = mdio_read(dev, phy, 4);
tp->phys[phy_idx++] = phy;
- tp->advertising = mdio_read(dev, phy, 4);
printk(KERN_INFO "%s: MII transceiver %d status 0x%4.4x "
"advertising %4.4x.\n",
- dev->name, phy, mii_status, tp->advertising);
+ dev->name, phy, mii_status, advertising);
}
}
if (phy_idx == 0) {
}
+static void rtl_check_media (struct net_device *dev)
+{
+ struct rtl8139_private *tp = dev->priv;
+
+ DPRINTK("ENTER\n");
+
+ if (tp->phys[0] >= 0) {
+ u16 mii_reg5 = mdio_read(dev, tp->phys[0], 5);
+ if (mii_reg5 == 0xffff)
+ ; /* Not there */
+ else if ((mii_reg5 & 0x0100) == 0x0100
+ || (mii_reg5 & 0x00C0) == 0x0040)
+ tp->full_duplex = 1;
+
+ printk (KERN_INFO"%s: Setting %s%s-duplex based on"
+ " auto-negotiated partner ability %4.4x.\n",
+ dev->name, mii_reg5 == 0 ? "" :
+ (mii_reg5 & 0x0180) ? "100mbps " : "10mbps ",
+ tp->full_duplex ? "full" : "half", mii_reg5);
+ }
+}
+
/* Start the hardware at open or resume. */
static void rtl8139_hw_start (struct net_device *dev)
{
rtl8139_chip_reset (ioaddr);
/* unlock Config[01234] and BMCR register writes */
- RTL_W8 (Cfg9346, Cfg9346_Unlock);
+ RTL_W8_F (Cfg9346, Cfg9346_Unlock);
/* Restore our idea of the MAC address. */
RTL_W32_F (MAC0 + 0, cpu_to_le32 (*(u32 *) (dev->dev_addr + 0)));
- RTL_W32 (MAC0 + 4, cpu_to_le32 (*(u32 *) (dev->dev_addr + 4)));
+ RTL_W32_F (MAC0 + 4, cpu_to_le32 (*(u32 *) (dev->dev_addr + 4)));
/* Must enable Tx/Rx before setting transfer thresholds! */
RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb);
tp->rx_config = rtl8139_rx_config | AcceptBroadcast | AcceptMyPhys;
- RTL_W32 (RxConfig, rtl8139_rx_config);
+ RTL_W32 (RxConfig, tp->rx_config);
/* Check this value: the documentation for IFG contradicts ifself. */
RTL_W32 (TxConfig, (TX_DMA_BURST << TxDMAShift));
tp->cur_rx = 0;
- DPRINTK("check_duplex");
-
- /* This is check_duplex() */
- if (tp->phys[0] >= 0 || (tp->drv_flags & HAS_MII_XCVR)) {
- u16 mii_reg5 = mdio_read(dev, tp->phys[0], 5);
- if (mii_reg5 == 0xffff)
- ; /* Not there */
- else if ((mii_reg5 & 0x0100) == 0x0100
- || (mii_reg5 & 0x00C0) == 0x0040)
- tp->full_duplex = 1;
- if (mii_reg5) {
- printk(KERN_INFO"%s: Setting %s%s-duplex based on"
- " auto-negotiated partner ability %4.4x.\n", dev->name,
- mii_reg5 == 0 ? "" :
- (mii_reg5 & 0x0180) ? "100mbps " : "10mbps ",
- tp->full_duplex ? "full" : "half", mii_reg5);
- } else {
- printk(KERN_INFO"%s: media is unconnected, link down, or incompatible connection\n",
- dev->name);
- }
- }
+ rtl_check_media (dev);
if (tp->chipset >= CH_8139B) {
- tmp = RTL_R8 (Config4) & ~(1<<2);
- /* chip will clear Rx FIFO overflow automatically */
- tmp |= (1<<7);
- RTL_W8 (Config4, tmp);
-
/* disable magic packet scanning, which is enabled
* when PM is enabled in Config1 */
RTL_W8 (Config3, RTL_R8 (Config3) & ~(1<<5));
RTL_R16 (IntrStatus),
RTL_R8 (MediaStatus));
+ tp->xstats.tx_timeouts++;
+
/* disable Tx ASAP, if not already */
tmp8 = RTL_R8 (ChipCmd);
if (tmp8 & CmdTxEnb)
unsigned int entry;
unsigned long flags;
+ /* XXX paranoid + sledgehammer == temporary system crash fix */
+ wmb();
+ spin_lock_irqsave (&tp->lock, flags);
+
/* Calculate the next Tx descriptor entry. */
entry = tp->cur_tx % NUM_TX_DESC;
if ((long) skb->data & 3) { /* Must use alignment buffer. */
/* tp->tx_info[entry].mapping = 0; */
memcpy (tp->tx_buf[entry], skb->data, skb->len);
- RTL_W32 (TxAddr0 + (entry * 4),
- tp->tx_bufs_dma + (tp->tx_buf[entry] - tp->tx_bufs));
+ RTL_W32_F (TxAddr0 + (entry * 4),
+ tp->tx_bufs_dma + (tp->tx_buf[entry] - tp->tx_bufs));
} else {
+ tp->xstats.tx_buf_mapped++;
tp->tx_info[entry].mapping =
pci_map_single (tp->pci_dev, skb->data, skb->len,
PCI_DMA_TODEVICE);
- RTL_W32 (TxAddr0 + (entry * 4), tp->tx_info[entry].mapping);
+ RTL_W32_F (TxAddr0 + (entry * 4), tp->tx_info[entry].mapping);
}
/* Note: the chip doesn't have auto-pad! */
dev->trans_start = jiffies;
- spin_lock_irqsave (&tp->lock, flags);
-
tp->cur_tx++;
if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx)
netif_stop_queue (dev);
tp->stats.tx_errors++;
if (txstatus & TxAborted) {
tp->stats.tx_aborted_errors++;
- RTL_W32 (TxConfig, TxClearAbt | (TX_DMA_BURST << TxDMAShift));
+ RTL_W32_F (TxConfig, TxClearAbt);
}
if (txstatus & TxCarrierLost)
tp->stats.tx_carrier_errors++;
static void rtl8139_rx_interrupt (struct net_device *dev,
- struct rtl8139_private *tp, void *ioaddr,
- u16 status)
+ struct rtl8139_private *tp, void *ioaddr)
{
unsigned char *rx_ring;
- u16 cur_rx, ackstat;
+ u16 cur_rx;
assert (dev != NULL);
assert (tp != NULL);
RTL_R16 (RxBufAddr),
RTL_R16 (RxBufPtr), RTL_R8 (ChipCmd));
- if (status & RxFIFOOver)
- status = RxOverflow | RxOK;
- else
- status = RxOK;
-
while ((RTL_R8 (ChipCmd) & RxBufEmpty) == 0) {
int ring_offset = cur_rx % RX_BUF_LEN;
u32 rx_status;
unsigned int pkt_size;
struct sk_buff *skb;
- mb();
-
/* read size+status of next frame from DMA ring buffer */
rx_status = le32_to_cpu (*(u32 *) (rx_ring + ring_offset));
rx_size = rx_status >> 16;
}
#endif
- if (rx_size == 0xfff0) /* Early Rx in progress */
+ if (rx_size == 0xfff0) { /* Early Rx in progress */
+ tp->xstats.early_rx++;
break;
+ }
/* If Rx err or invalid rx_size/rx_status received
* (which happens if we get lost in the ring),
cur_rx = (cur_rx + rx_size + 4 + 3) & ~3;
RTL_W16 (RxBufPtr, cur_rx - 16);
- ackstat = RTL_R16 (IntrStatus) & status;
- if (ackstat)
- RTL_W16 (IntrStatus, ackstat);
+ if (RTL_R16 (IntrStatus) & RxAckBits)
+ RTL_W16_F (IntrStatus, RxAckBits);
}
DPRINTK ("%s: Done rtl8139_rx(), current %4.4x BufAddr %4.4x,"
tp->cur_rx = cur_rx;
- if (RTL_R8 (ChipCmd) & RxBufEmpty) {
- ackstat = RTL_R16 (IntrStatus) & status;
- if (ackstat)
- RTL_W16_F (IntrStatus, ackstat);
- }
+ if ((RTL_R8 (ChipCmd) & RxBufEmpty) &&
+ (RTL_R16 (IntrStatus) & RxAckBits))
+ RTL_W16_F (IntrStatus, RxAckBits);
}
if (status & RxUnderrun)
link_changed = RTL_R16 (CSCR) & CSCR_LinkChangeBit;
- /* E. Gill */
- /* In case of an RxFIFOOver we must also clear the RxOverflow
- bit to avoid dropping frames for ever. Believe me, I got a
- lot of troubles copying huge data (approximately 2 RxFIFOOver
- errors per 1GB data transfer).
- The following is written in the 'p-guide.pdf' file (RTL8139(A/B)
- Programming guide V0.1, from 1999/1/15) on page 9 from REALTEC.
- -----------------------------------------------------------
- 2. RxFIFOOvw handling:
- When RxFIFOOvw occurs, all incoming packets are discarded.
- Clear ISR(RxFIFOOvw) doesn't dismiss RxFIFOOvw event. To
- dismiss RxFIFOOvw event, the ISR(RxBufOvw) must be written
- with a '1'.
- -----------------------------------------------------------
- Unfortunately I was not able to find any reason for the
- RxFIFOOver error (I got the feeling this depends on the
- CPU speed, lower CPU speed --> more errors).
- After clearing the RxOverflow bit the transfer of the
- packet was repeated and all data are error free transferred */
- ackstat = status & ~(RxFIFOOver | RxOverflow | RxOK);
+ /* The chip takes special action when we clear RxAckBits,
+ * so we clear them later in rtl8139_rx_interrupt
+ */
+ ackstat = status & ~RxAckBits;
RTL_W16 (IntrStatus, ackstat);
DPRINTK ("%s: interrupt status=%#4.4x ackstat=%#4.4x new intstat=%#4.4x.\n",
RxFIFOOver | TxErr | TxOK | RxErr | RxOK)) == 0)
break;
- if (netif_running (dev) &&
- status & (RxOK | RxUnderrun | RxOverflow | RxFIFOOver)) /* Rx interrupt */
- rtl8139_rx_interrupt (dev, tp, ioaddr, status);
+ if (netif_running (dev) && (status & RxAckBits))
+ rtl8139_rx_interrupt (dev, tp, ioaddr);
/* Check uncommon events with one test. */
if (status & (PCIErr | PCSTimeout | RxUnderrun | RxOverflow |
rtl8139_weird_interrupt (dev, tp, ioaddr,
status, link_changed);
- if (netif_running (dev) &&
- status & (TxOK | TxErr)) {
+ if (netif_running (dev) && (status & (TxOK | TxErr))) {
spin_lock (&tp->lock);
rtl8139_tx_interrupt (dev, tp, ioaddr);
spin_unlock (&tp->lock);
} while (boguscnt > 0);
if (boguscnt <= 0) {
- printk (KERN_WARNING
- "%s: Too much work at interrupt, "
- "IntrStatus=0x%4.4x.\n", dev->name,
- status);
+ printk (KERN_WARNING "%s: Too much work at interrupt, "
+ "IntrStatus=0x%4.4x.\n", dev->name, status);
/* Clear all interrupt sources. */
RTL_W16 (IntrStatus, 0xffff);
if (tp->medialock)
tp->full_duplex = (value & 0x0100) ? 1 : 0;
break;
- case 4: tp->advertising = value; break;
+ case 4: /* tp->advertising = value; */ break;
}
}
mdio_write(dev, data[0], data[1] & 0x1f, data[2]);
mc_filter[1] = mc_filter[0] = 0;
for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
i++, mclist = mclist->next) {
- set_bit (ether_crc (ETH_ALEN, mclist->dmi_addr) >> 26,
- mc_filter);
+ int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+
+ mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31));
rx_mode |= AcceptMulticast;
}
}
tp->rx_config = tmp;
}
RTL_W32_F (MAR0 + 0, mc_filter[0]);
- RTL_W32 (MAR0 + 4, mc_filter[1]);
+ RTL_W32_F (MAR0 + 4, mc_filter[1]);
spin_unlock_irqrestore (&tp->lock, flags);
}
-static void rtl8139_suspend (struct pci_dev *pdev)
+#ifdef CONFIG_PM
+
+static int rtl8139_suspend (struct pci_dev *pdev, u32 state)
{
struct net_device *dev = pci_get_drvdata (pdev);
struct rtl8139_private *tp = dev->priv;
unsigned long flags;
if (!netif_running (dev))
- return;
+ return 0;
netif_device_detach (dev);
RTL_W32 (RxMissed, 0);
spin_unlock_irqrestore (&tp->lock, flags);
+ return 0;
}
-static void rtl8139_resume (struct pci_dev *pdev)
+static int rtl8139_resume (struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata (pdev);
if (!netif_running (dev))
- return;
+ return 0;
netif_device_attach (dev);
rtl8139_hw_start (dev);
+ return 0;
}
+#endif /* CONFIG_PM */
+
static struct pci_driver rtl8139_pci_driver = {
- name: MODNAME,
+ name: DRV_NAME,
id_table: rtl8139_pci_tbl,
probe: rtl8139_init_one,
remove: rtl8139_remove_one,
+#ifdef CONFIG_PM
suspend: rtl8139_suspend,
resume: rtl8139_resume,
+#endif /* CONFIG_PM */
};
MODULE_AUTHOR("Richard Hirst");
MODULE_DESCRIPTION("i82596 driver");
MODULE_PARM(i596_debug, "i");
+MODULE_PARM_DESC(i596_debug, "i82596 debug mask");
/* Copy frames shorter than rx_copybreak, otherwise pass on up in
static int io = 0x300;
static int irq = 10;
MODULE_PARM(irq, "i");
+MODULE_PARM_DESC(irq, "Apricot IRQ number");
#endif
MODULE_PARM(debug, "i");
+MODULE_PARM_DESC(debug, "i82596 debug mask");
static int debug = -1;
int init_module(void)
fi
bool ' Pocket and portable adapters' CONFIG_NET_POCKET
if [ "$CONFIG_NET_POCKET" = "y" ]; then
- dep_tristate ' AT-LAN-TEC/RealTek pocket adapter support' CONFIG_ATP $CONFIG_ISA
+ if [ "$CONFIG_ISA" = "y" -a "$CONFIG_X86" = "y" ]; then
+ tristate ' AT-LAN-TEC/RealTek pocket adapter support' CONFIG_ATP
+ fi
tristate ' D-Link DE600 pocket adapter support' CONFIG_DE600
tristate ' D-Link DE620 pocket adapter support' CONFIG_DE620
fi
endif
subdir-$(CONFIG_NET_PCMCIA) += pcmcia
-subdir-$(CONFIG_NET_RADIO) += wireless
+subdir-$(CONFIG_NET_WIRELESS) += wireless
subdir-$(CONFIG_TULIP) += tulip
subdir-$(CONFIG_IRDA) += irda
subdir-$(CONFIG_TR) += tokenring
MODULE_PARM(io, "1-" __MODULE_STRING(MAX_AC32_CARDS) "i");
MODULE_PARM(irq, "1-" __MODULE_STRING(MAX_AC32_CARDS) "i");
MODULE_PARM(mem, "1-" __MODULE_STRING(MAX_AC32_CARDS) "i");
+MODULE_PARM_DESC(io, "ac3200 I/O base adress(es)");
+MODULE_PARM_DESC(irq, "ac3200 IRQ number(s)");
+MODULE_PARM_DESC(mem, "ac3200 Memory base address(es)");
int
init_module(void)
}
-#ifdef MODULE
MODULE_AUTHOR("Jes Sorensen <jes@linuxcare.com>");
MODULE_DESCRIPTION("AceNIC/3C985/GA620 Gigabit Ethernet driver");
MODULE_PARM(link, "1-" __MODULE_STRING(8) "i");
MODULE_PARM(max_tx_desc, "1-" __MODULE_STRING(8) "i");
MODULE_PARM(rx_coal_tick, "1-" __MODULE_STRING(8) "i");
MODULE_PARM(max_rx_desc, "1-" __MODULE_STRING(8) "i");
-#endif
+MODULE_PARM_DESC(link, "Acenic/3C985/NetGear link state");
+MODULE_PARM_DESC(trace, "Acenic/3C985/NetGear firmware trace level");
+MODULE_PARM_DESC(tx_coal_tick, "Acenic/3C985/NetGear maximum clock ticks to wait for packets");
+MODULE_PARM_DESC(max_tx_desc, "Acenic/3C985/NetGear maximum number of transmit descriptors");
+MODULE_PARM_DESC(rx_coal_tick, "Acenic/3C985/NetGear maximum clock ticks to wait for packets");
+MODULE_PARM_DESC(max_rx_desc, "Acenic/3C985/NetGear maximum number of receive descriptors");
static void __exit ace_module_cleanup(void)
awc_i365_card_release(s);
- udelay(100000);
+ mdelay(100);
i365_out(s, 0x2, 0x10 ); // power enable
- udelay(200000);
+ mdelay(200);
i365_out(s, 0x2, 0x10 | 0x01 | 0x04 | 0x80); //power enable
- udelay(250000);
+ mdelay(250);
if (!s->irq)
s->irq = 11;
i365_out(s,0x15,0x3f | 0x40); // enab mem reg bit
i365_out(s,0x06,0x01); // enab mem
- udelay(10000);
+ mdelay(10);
cis[0] = 0x45;
mem[0x3e0] = 0x45;
- udelay(10000);
+ mdelay(10);
memcpy_fromio(cis,0xD000, 0x3e0);
s->socket, s->manufacturer,s->product);
i365_out(s,0x07, 0x1 | 0x2); // enable io 16bit
- udelay(1000);
+ mdelay(1);
port = s->io;
i365_out(s,0x08, port & 0xff);
i365_out(s,0x09, (port & 0xff00)/ 0x100);
i365_out(s,0x06, 0x40); // enable io window
- udelay(1000);
+ mdelay(1);
i365_out(s,0x3e0,0x45);
outw(0x10, s->io + 0x34);
- udelay(10000);
+ mdelay(10);
return 0;
-
-
-
};
MODULE_PARM(rx_queue_len,"i");
MODULE_PARM(tx_rate,"i");
MODULE_PARM(channel,"i");
-MODULE_PARM(tx_full_rate,"i");
+//MODULE_PARM(tx_full_rate,"i");
MODULE_PARM(adhoc,"i");
MODULE_PARM(master,"i");
MODULE_PARM(slave,"i");
MODULE_PARM(large_buff_mem,"i");
MODULE_PARM(small_buff_no,"i");
MODULE_PARM(SSID,"c33");
+MODULE_PARM_DESC(awc_debug,"Aironet debug mask");
+MODULE_PARM_DESC(channel,"Aironet ");
+MODULE_PARM_DESC(adhoc,"Aironet Access Points not available (0-1)");
+MODULE_PARM_DESC(master,"Aironet is Adhoc master (creates network sync) (0-1)");
+MODULE_PARM_DESC(slave,"Aironet is Adhoc slave (0-1)");
+MODULE_PARM_DESC(max_mtu,"Aironet MTU limit (256-2312)");
#endif
/*EXPORT_SYMBOL(tx_queue_len);
MODULE_PARM(arlan_exit_debug, "i");
MODULE_PARM(arlan_entry_and_exit_debug, "i");
MODULE_PARM(arlan_EEPROM_bad, "i");
+MODULE_PARM_DESC(irq, "(unused)");
+MODULE_PARM_DESC(mem, "Arlan memory address for single device probing");
+MODULE_PARM_DESC(probe, "Arlan probe at initialization (0-1)");
+MODULE_PARM_DESC(arlan_debug, "Arlan debug enable (0-1)");
+MODULE_PARM_DESC(numDevices, "Number of Arlan devices; ignored if >1");
+MODULE_PARM_DESC(testMemory, "(unused)");
+MODULE_PARM_DESC(mdebug, "Arlan multicast debugging (0-1)");
+MODULE_PARM_DESC(retries, "Arlan maximum packet retransmisions");
+#ifdef ARLAN_ENTRY_EXIT_DEBUGGING
+MODULE_PARM_DESC(arlan_entry_debug, "Arlan driver function entry debugging");
+MODULE_PARM_DESC(arlan_exit_debug, "Arlan driver function exit debugging");
+MODULE_PARM_DESC(arlan_entry_and_exit_debug, "Arlan driver function entry and exit debugging");
+#else
+MODULE_PARM_DESC(arlan_entry_debug, "(ignored)");
+MODULE_PARM_DESC(arlan_exit_debug, "(ignored)");
+MODULE_PARM_DESC(arlan_entry_and_exit_debug, "(ignored)");
+#endif
EXPORT_SYMBOL(arlan_device);
EXPORT_SYMBOL(arlan_conf);
/* These unusual address orders are used to verify the CONFIG register. */
-static int fmv18x_probe_list[] = {
+static int fmv18x_probe_list[] __initdata = {
0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x300, 0x340, 0
};
* ISA
*/
-static int at1700_probe_list[] = {
+static int at1700_probe_list[] __initdata = {
0x260, 0x280, 0x2a0, 0x240, 0x340, 0x320, 0x380, 0x300, 0
};
* MCA
*/
#ifdef CONFIG_MCA
-static int at1700_ioaddr_pattern[] = {
+static int at1700_ioaddr_pattern[] __initdata = {
0x00, 0x04, 0x01, 0x05, 0x02, 0x06, 0x03, 0x07
};
-static int at1700_mca_probe_list[] = {
+static int at1700_mca_probe_list[] __initdata = {
0x400, 0x1400, 0x2400, 0x3400, 0x4400, 0x5400, 0x6400, 0x7400, 0
};
-static int at1700_irq_pattern[] = {
+static int at1700_irq_pattern[] __initdata = {
0x00, 0x00, 0x00, 0x30, 0x70, 0xb0, 0x00, 0x00,
0x00, 0xf0, 0x34, 0x74, 0xb4, 0x00, 0x00, 0xf4, 0x00
};
};
/* rEnE : maybe there are others I don't know off... */
-static struct at1720_mca_adapters_struct at1720_mca_adapters[] = {
+static struct at1720_mca_adapters_struct at1720_mca_adapters[] __initdata = {
{ "Allied Telesys AT1720AT", 0x6410 },
{ "Allied Telesys AT1720BT", 0x6413 },
- { "Allied Telesys AT1720T", 0x6416 },
+ { "Allied Telesys AT1720T", 0x6416 },
{ NULL, 0 },
};
#endif
#define EE_READ_CMD (6 << 6)
#define EE_ERASE_CMD (7 << 6)
-static int read_eeprom(int ioaddr, int location)
+static int __init read_eeprom(int ioaddr, int location)
{
int i;
unsigned short retval = 0;
MODULE_PARM(io, "i");
MODULE_PARM(irq, "i");
MODULE_PARM(net_debug, "i");
+MODULE_PARM_DESC(io, "AT1700/FMV18X I/O base address");
+MODULE_PARM_DESC(irq, "AT1700/FMV18X IRQ number");
+MODULE_PARM_DESC(net_debug, "AT1700/FMV18X debug level (0-6)");
int init_module(void)
{
*/
unsigned int bionet_debug = NET_DEBUG;
MODULE_PARM(bionet_debug, "i");
+MODULE_PARM_DESC(bionet_debug, "bionet debug level (0-2)");
static unsigned int bionet_min_poll_time = 2;
*/
unsigned int pamsnet_debug = NET_DEBUG;
MODULE_PARM(pamsnet_debug, "i");
+MODULE_PARM_DESC(pamsnet_debug, "pamsnet debug enable (0-1)");
static unsigned int pamsnet_min_poll_time = 2;
static int lance_debug = 1;
#endif
MODULE_PARM(lance_debug, "i");
+MODULE_PARM_DESC(lance_debug, "atarilance debug level (0-3)");
/* Print debug messages on probing? */
#undef LANCE_DEBUG_PROBE
MODULE_DESCRIPTION("RealTek RTL8002/8012 parallel port Ethernet driver");
MODULE_PARM(max_interrupt_work, "i");
MODULE_PARM(debug, "i");
-MODULE_PARM(io, "1-" __MODULE_STRING(MAX_UNITS) "i");
-MODULE_PARM(irq, "1-" __MODULE_STRING(MAX_UNITS) "i");
-MODULE_PARM(xcvr, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM(io, "1-" __MODULE_STRING(NUM_UNITS) "i");
+MODULE_PARM(irq, "1-" __MODULE_STRING(NUM_UNITS) "i");
+MODULE_PARM(xcvr, "1-" __MODULE_STRING(NUM_UNITS) "i");
+MODULE_PARM_DESC(max_interrupt_work, "ATP maximum events handled per interrupt");
+MODULE_PARM_DESC(debug, "ATP debug level (0-7)");
+MODULE_PARM_DESC(io, "ATP I/O base address(es)");
+MODULE_PARM_DESC(irq, "ATP IRQ number(s)");
+MODULE_PARM_DESC(xcvr, "ATP tranceiver(s) (0=internal, 1=external)");
#define RUN_AT(x) (jiffies + (x))
/* $Id$
- * vmelance.c: Ethernet driver for VME Lance cards on Baget/MIPS
+ * bagetlance.c: Ethernet driver for VME Lance cards on Baget/MIPS
* This code stealed and adopted from linux/drivers/net/atarilance.c
* See that for author info
*
static int lance_debug = 1;
#endif
MODULE_PARM(lance_debug, "i");
+MODULE_PARM_DESC(lance_debug, "Lance debug level (0-3)");
/* Print debug messages on probing? */
#undef LANCE_DEBUG_PROBE
MODULE_PARM(dma , "i");
MODULE_PARM(dmasize , "i");
MODULE_PARM(use_dma , "i");
+MODULE_PARM_DESC(io, "cs89x0 I/O base address");
+MODULE_PARM_DESC(irq, "cs89x0 IRQ number");
+#if DEBUGGING
+MODULE_PARM_DESC(debug, "cs89x0 debug level (0-6)");
+#else
+MODULE_PARM_DESC(debug, "(ignored)");
+#endif
+MODULE_PARM_DESC(media, "Set cs89x0 adapter(s) media type(s) (rj45,bnc,aui)");
+/* No other value than -1 for duplex seems to be currently interpreted */
+MODULE_PARM_DESC(duplex, "(ignored)");
+#if ALLOW_DMA
+MODULE_PARM_DESC(dma , "cs89x0 ISA DMA channel; ignored if use_dma=0");
+MODULE_PARM_DESC(dmasize , "cs89x0 DMA size in kB (16,64); ignored if use_dma=0");
+MODULE_PARM_DESC(use_dma , "cs89x0 using DMA (0-1)");
+#else
+MODULE_PARM_DESC(dma , "(ignored)");
+MODULE_PARM_DESC(dmasize , "(ignored)");
+MODULE_PARM_DESC(use_dma , "(ignored)");
+#endif
MODULE_AUTHOR("Mike Cruse, Russwll Nelson <nelson@crynwr.com>, Andrew Morton <andrewm@uow.edu.au>");
MODULE_PARM(de4x5_debug, "i");
MODULE_PARM(dec_only, "i");
MODULE_PARM(args, "s");
+MODULE_PARM_DESC(de4x5_debug, "de4x5 debug mask");
+MODULE_PARM_DESC(dec_only, "de4x5 probe only for Digital boards (0-1)");
+MODULE_PARM_DESC(args, "de4x5 full duplex and media type settings; see de4x5.c for details");
# else
static int loading_module;
#endif /* MODULE */
static struct net_device *mdev = NULL;
static int io=0x0;/* EDIT THIS LINE FOR YOUR CONFIGURATION IF NEEDED */
MODULE_PARM(io, "i");
+MODULE_PARM_DESC(io, "de4x5 I/O base address");
int
init_module(void)
static unsigned int de600_debug = DE600_DEBUG;
MODULE_PARM(de600_debug, "i");
+MODULE_PARM_DESC(de600_debug, "DE-600 debug level (0-2)");
static unsigned int delay_time = 10;
MODULE_PARM(delay_time, "i");
+MODULE_PARM_DESC(delay_time, "DE-600 deley on I/O in microseconds");
#ifdef FAKE_SMALL_MAX
static unsigned long de600_rspace(struct sock *sk);
MODULE_PARM(irq, "i");
MODULE_PARM(clone, "i");
MODULE_PARM(de620_debug, "i");
+MODULE_PARM_DESC(bnc, "DE-620 set BNC medium (0-1)");
+MODULE_PARM_DESC(utp, "DE-620 set UTP medium (0-1)");
+MODULE_PARM_DESC(io, "DE-620 I/O base address,required");
+MODULE_PARM_DESC(irq, "DE-620 IRQ number,required");
+MODULE_PARM_DESC(clone, "Check also for non-D-Link DE-620 clones (0-1)");
+MODULE_PARM_DESC(de620_debug, "DE-620 debug level (0-2)");
/***********************************************
* *
static int io=0x200; /* Or use the irq= io= options to insmod */
MODULE_PARM(irq, "i");
MODULE_PARM(io, "i");
+MODULE_PARM_DESC(irq, "DEPCA IRQ number");
+MODULE_PARM_DESC(io, "DEPCA I/O base address");
/* See depca_probe() for autoprobe messages when a module */
int
MODULE_PARM(iptrap, "1-4i");
MODULE_PARM(ipxnet, "i");
MODULE_PARM(nicmode, "i");
+MODULE_PARM_DESC(debug, "Digi RightSwitch enable debugging (0-1)");
+MODULE_PARM_DESC(dma, "Digi RightSwitch enable BM DMA (0-1)");
+MODULE_PARM_DESC(nicmode, "Digi RightSwitch operating mode (1: switch, 2: multi-NIC)");
static int __init dgrs_init_module (void)
{
MODULE_PARM(HPNA_tx_cmd, "i");
MODULE_PARM(HPNA_NoiseFloor, "i");
MODULE_PARM(SF_mode, "i");
-
+MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)");
+MODULE_PARM_DESC(mode, "Davicom DM9xxx: Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
+MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function (bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)");
+
/* Description:
* when user used insmod to add module, system invoked init_module()
* to initilize and register.
MODULE_PARM(irq, "1-" __MODULE_STRING(MAX_E21_CARDS) "i");
MODULE_PARM(mem, "1-" __MODULE_STRING(MAX_E21_CARDS) "i");
MODULE_PARM(xcvr, "1-" __MODULE_STRING(MAX_E21_CARDS) "i");
+MODULE_PARM_DESC(io, "E2100 I/O base address(es)");
+MODULE_PARM_DESC(irq, "E2100 IRQ number(s)");
+MODULE_PARM_DESC(mem, " E2100 memory base address(es)");
+MODULE_PARM_DESC(xcvr, "E2100 tranceiver(s) (0=internal, 1=external)");
/* This is set up so that only a single autoprobe takes place per call.
ISA device autoprobes on a running machine are not recommended. */
MODULE_PARM(irq, "1-" __MODULE_STRING(MAX_EEPRO) "i");
MODULE_PARM(mem, "1-" __MODULE_STRING(MAX_EEPRO) "i");
MODULE_PARM(autodetect, "1-" __MODULE_STRING(1) "i");
+MODULE_PARM_DESC(io, "EtherExpress Pro/10 I/O base addres(es)");
+MODULE_PARM_DESC(irq, "EtherExpress Pro/10 IRQ number(s)");
+MODULE_PARM_DESC(mem, "EtherExpress Pro/10 Rx buffer size(es) in kB (3-29)");
+MODULE_PARM_DESC(autodetect, "EtherExpress Pro/10 force board(s) detection (0-1)");
int
init_module(void)
MODULE_PARM(rx_copybreak, "i");
MODULE_PARM(max_interrupt_work, "i");
MODULE_PARM(multicast_filter_limit, "i");
+MODULE_PARM_DESC(debug, "eepro100 debug level (0-6)");
+MODULE_PARM_DESC(options, "epro100: Bits 0-3: tranceiver type, bit 4: full duplex, bit 5: 100Mbps");
+MODULE_PARM_DESC(full_duplex, "epro100 full duplex setting(s) (1)");
+MODULE_PARM_DESC(congenb, "epro100 Enable congestion control (1)");
+MODULE_PARM_DESC(txfifo, "epro100 Tx FIFO threshold in 4 byte units, (0-15)");
+MODULE_PARM_DESC(rxfifo, "epro100 Rx FIFO threshold in 4 byte units, (0-15)");
+MODULE_PARM_DESC(txdmaccount, "epro100 Tx DMA burst length; 128 - disable (0-128)");
+MODULE_PARM_DESC(rxdmaccount, "epro100 Rx DMA burst length; 128 - disable (0-128)");
+MODULE_PARM_DESC(rx_copybreak, "epro100 copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(max_interrupt_work, "epro100 maximum events handled per interrupt");
+MODULE_PARM_DESC(multicast_filter_limit, "epro100 maximum number of filtered multicast addresses");
#define RUN_AT(x) (jiffies + (x))
/* ACPI power states don't universally work (yet) */
-#ifndef CONFIG_EEPRO100_PM
+#ifndef CONFIG_PM
#undef pci_set_power_state
#define pci_set_power_state null_set_power_state
static inline int null_set_power_state(struct pci_dev *dev, int state)
{
return 0;
}
-#endif /* CONFIG_EEPRO100_PM */
+#endif /* CONFIG_PM */
#define netdevice_start(dev)
#define netdevice_stop(dev)
static int eepro100_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent);
static void eepro100_remove_one (struct pci_dev *pdev);
-#ifdef CONFIG_EEPRO100_PM
+#ifdef CONFIG_PM
static int eepro100_suspend (struct pci_dev *pdev, u32 state);
static int eepro100_resume (struct pci_dev *pdev);
#endif
timer routine. 2000/05/09 SAW */
saved_acpi = pci_set_power_state(sp->pdev, 0);
t = del_timer_sync(&sp->timer);
- data[3] = mdio_read(ioaddr, data[0], data[1]);
+ data[3] = mdio_read(ioaddr, data[0] & 0x1f, data[1] & 0x1f);
if (t)
add_timer(&sp->timer); /* may be set to the past --SAW */
pci_set_power_state(sp->pdev, saved_acpi);
sp->rx_mode = new_rx_mode;
}
\f
-#ifdef CONFIG_EEPRO100_PM
+#ifdef CONFIG_PM
static int eepro100_suspend(struct pci_dev *pdev, u32 state)
{
struct net_device *dev = pdev->driver_data;
set_rx_mode(dev);
return 0;
}
-#endif /* CONFIG_EEPRO100_PM */
+#endif /* CONFIG_PM */
static void __devexit eepro100_remove_one (struct pci_dev *pdev)
{
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ID1030,
PCI_ANY_ID, PCI_ANY_ID, },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82820FW_4,
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_7,
PCI_ANY_ID, PCI_ANY_ID, },
{ 0,}
};
id_table: eepro100_pci_tbl,
probe: eepro100_init_one,
remove: eepro100_remove_one,
-#ifdef CONFIG_EEPRO100_PM
+#ifdef CONFIG_PM
suspend: eepro100_suspend,
resume: eepro100_resume,
-#endif
+#endif /* CONFIG_PM */
};
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,48)
MODULE_PARM(io, "1-" __MODULE_STRING(EEXP_MAX_CARDS) "i");
MODULE_PARM(irq, "1-" __MODULE_STRING(EEXP_MAX_CARDS) "i");
+MODULE_PARM_DESC(io, "EtherExpress 16 I/O base address(es)");
+MODULE_PARM_DESC(irq, "EtherExpress 16 IRQ number(s)");
/* Ideally the user would give us io=, irq= for every card. If any parameters
* are specified, we verify and then use them. If no parameters are given, we
#error You must compile this driver with "-O".
#endif
+#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/ethtool.h>
+#include <linux/mii.h>
#include <asm/bitops.h>
#include <asm/io.h>
#include <asm/uaccess.h>
MODULE_PARM(rx_copybreak, "i");
MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)");
+MODULE_PARM_DESC(max_interrupt_work, "EPIC/100 maximum events handled per interrupt");
+MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex");
+MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(full_duplex, "EPIC/100 full duplex setting(s) (1)");
/*
Theory of Operation
if (pkt_len > PKT_BUF_SZ - 4) {
printk(KERN_ERR "%s: Oversized Ethernet frame, status %x "
"%d bytes.\n",
- dev->name, pkt_len, status);
+ dev->name, status, pkt_len);
pkt_len = 1514;
}
/* Check if the packet is long enough to accept without copying
}
-static void epic_suspend (struct pci_dev *pdev)
+#ifdef CONFIG_PM
+
+static int epic_suspend (struct pci_dev *pdev, u32 state)
{
struct net_device *dev = pci_get_drvdata(pdev);
long ioaddr = dev->base_addr;
if (!netif_running(dev))
- return;
+ return 0;
epic_pause(dev);
/* Put the chip into low-power mode. */
outl(0x0008, ioaddr + GENCTL);
/* pci_power_off(pdev, -1); */
+ return 0;
}
-static void epic_resume (struct pci_dev *pdev)
+static int epic_resume (struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
if (!netif_running(dev))
- return;
+ return 0;
epic_restart(dev);
/* pci_power_on(pdev); */
+ return 0;
}
+#endif /* CONFIG_PM */
+
static struct pci_driver epic_driver = {
name: DRV_NAME,
id_table: epic_pci_tbl,
probe: epic_init_one,
remove: epic_remove_one,
+#ifdef CONFIG_PM
suspend: epic_suspend,
resume: epic_resume,
+#endif /* CONFIG_PM */
};
MODULE_PARM(io, "1-" __MODULE_STRING(MAX_ES_CARDS) "i");
MODULE_PARM(irq, "1-" __MODULE_STRING(MAX_ES_CARDS) "i");
MODULE_PARM(mem, "1-" __MODULE_STRING(MAX_ES_CARDS) "i");
+MODULE_PARM_DESC(io, "ES3210 I/O base address(es)");
+MODULE_PARM_DESC(irq, "ES3210 IRQ number(s)");
+MODULE_PARM_DESC(mem, "ES3210 memory base address(es)");
int
init_module(void)
MODULE_DESCRIPTION("ICL EtherTeam 16i/32 driver");
MODULE_PARM(io, "1-" __MODULE_STRING(MAX_ETH16I_CARDS) "i");
-MODULE_PARM_DESC(io, "eth16i io base address");
+MODULE_PARM_DESC(io, "eth16i I/O base address(es)");
#if 0
MODULE_PARM(irq, "1-" __MODULE_STRING(MAX_ETH16I_CARDS) "i");
#endif
MODULE_PARM(mediatype, "1-" __MODULE_STRING(MAX_ETH16I_CARDS) "s");
-MODULE_PARM_DESC(mediatype, "eth16i interfaceport mediatype");
+MODULE_PARM_DESC(mediatype, "eth16i media type of interface(s) (bnc,tp,dix,auto,eprom)");
MODULE_PARM(debug, "i");
-MODULE_PARM_DESC(debug, "eth16i debug level (0-4)");
+MODULE_PARM_DESC(debug, "eth16i debug level (0-6)");
#endif
int init_module(void)
static int unit;
MODULE_PARM(unit,"i");
+MODULE_PARM_DESC(unit,"Ethertap device number");
static struct net_device dev_ethertap =
{
MODULE_PARM(io, "i");
MODULE_PARM(irq, "i");
+MODULE_PARM_DESC(io, "EtherWORKS 3 I/O base address");
+MODULE_PARM_DESC(irq, "EtherWORKS 3 IRQ number");
int init_module(void)
{
static int get_scsi_oxid(struct fc_info *fi);
static void update_scsi_oxid(struct fc_info *fi);
-Scsi_Host_Template driver_template = IPH5526_SCSI_FC;
+static Scsi_Host_Template driver_template = IPH5526_SCSI_FC;
static void iph5526_timeout(struct net_device *dev);
// #define RX_RING_SIZE 32
#define TX_RING_SIZE 6
#define RX_RING_SIZE 12
+#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct fealnx_desc)
+#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct fealnx_desc)
/* Operational parameters that usually are not changed. */
/* Time in jiffies before concluding the transmitter is hung. */
MODULE_AUTHOR("Myson or whoever");
MODULE_DESCRIPTION("Myson MTD-8xx 100/10M Ethernet PCI Adapter Driver");
MODULE_PARM(max_interrupt_work, "i");
-MODULE_PARM(min_pci_latency, "i");
+//MODULE_PARM(min_pci_latency, "i");
MODULE_PARM(debug, "i");
MODULE_PARM(rx_copybreak, "i");
MODULE_PARM(multicast_filter_limit, "i");
MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM_DESC(max_interrupt_work, "fealnx maximum events handled per interrupt");
+MODULE_PARM_DESC(debug, "fealnx enable debugging (0-1)");
+MODULE_PARM_DESC(rx_copybreak, "fealnx copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(multicast_filter_limit, "fealnx maximum number of filtered multicast addresses");
+MODULE_PARM_DESC(options, "fealnx: Bits 0-3: media type, bit 17: full duplex");
+MODULE_PARM_DESC(full_duplex, "fealnx full duplex setting(s) (1)");
#define MIN_REGION_SIZE 136
struct netdev_private {
/* Descriptor rings first for alignment. */
- struct fealnx_desc rx_ring[RX_RING_SIZE];
- struct fealnx_desc tx_ring[TX_RING_SIZE];
+ struct fealnx_desc *rx_ring;
+ struct fealnx_desc *tx_ring;
+
+ dma_addr_t rx_ring_dma;
+ dma_addr_t tx_ring_dma;
struct net_device_stats stats;
long ioaddr;
unsigned int chip_id = ent->driver_data;
struct net_device *dev;
+ void *ring_space;
+ dma_addr_t ring_dma;
/* when built into the kernel, we only print version if device is found */
#ifndef MODULE
np->flags = skel_netdrv_tbl[chip_id].flags;
pci_set_drvdata(pdev, dev);
+ ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
+ if (!ring_space) {
+ err = -ENOMEM;
+ goto err_out_free_dev;
+ }
+ np->rx_ring = (struct fealnx_desc *)ring_space;
+ np->rx_ring_dma = ring_dma;
+
+ ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
+ if (!ring_space) {
+ err = -ENOMEM;
+ goto err_out_free_rx;
+ }
+ np->tx_ring = (struct fealnx_desc *)ring_space;
+ np->tx_ring_dma = ring_dma;
+
/* find the connected MII xcvrs */
if (np->flags == HAS_MII_XCVR) {
int phy, phy_idx = 0;
err = register_netdev(dev);
if (err)
- goto err_out_free;
+ goto err_out_free_tx;
printk(KERN_INFO "%s: %s at 0x%lx, ",
dev->name, skel_netdrv_tbl[chip_id].chip_name, ioaddr);
return 0;
-err_out_free:
+err_out_free_tx:
+ pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
+err_out_free_rx:
+ pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
+err_out_free_dev:
kfree(dev);
err_out_unmap:
#ifndef USE_IO_OPS
static void __devexit fealnx_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
+
if (dev) {
+ struct netdev_private *np = dev->priv;
+
+ pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring,
+ np->tx_ring_dma);
+ pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring,
+ np->rx_ring_dma);
unregister_netdev(dev);
#ifndef USE_IO_OPS
iounmap((void *)dev->base_addr);
init_ring(dev);
- writel(virt_to_bus(np->rx_ring), ioaddr + RXLBA);
- writel(virt_to_bus(np->tx_ring), ioaddr + TXLBA);
+ writel(np->rx_ring_dma, ioaddr + RXLBA);
+ writel(np->tx_ring_dma, ioaddr + TXLBA);
/* Initialize other registers. */
/* Configure the PCI bus bursts and FIFO thresholds.
break; /* Better luck next round. */
skb->dev = dev; /* Mark as being used by this device. */
- np->lack_rxbuf->buffer = virt_to_bus(skb->tail);
+ np->lack_rxbuf->buffer = pci_map_single(np->pci_dev, skb->tail,
+ np->rx_buf_sz, PCI_DMA_FROMDEVICE);
np->lack_rxbuf = np->lack_rxbuf->next_desc_logical;
++np->really_rx_count;
}
for (i = 0; i < RX_RING_SIZE; i++) {
np->rx_ring[i].status = 0;
np->rx_ring[i].control = np->rx_buf_sz << RBSShift;
- np->rx_ring[i].next_desc = virt_to_bus(&np->rx_ring[i + 1]);
+ np->rx_ring[i].next_desc = np->rx_ring_dma +
+ (i + 1)*sizeof(struct fealnx_desc);
np->rx_ring[i].next_desc_logical = &np->rx_ring[i + 1];
np->rx_ring[i].skbuff = NULL;
}
/* for the last rx descriptor */
- np->rx_ring[i - 1].next_desc = virt_to_bus(&np->rx_ring[0]);
- np->rx_ring[i - 1].next_desc_logical = &np->rx_ring[0];
+ np->rx_ring[i - 1].next_desc = np->rx_ring_dma;
+ np->rx_ring[i - 1].next_desc_logical = np->rx_ring;
/* allocate skb for rx buffers */
for (i = 0; i < RX_RING_SIZE; i++) {
++np->really_rx_count;
np->rx_ring[i].skbuff = skb;
skb->dev = dev; /* Mark as being used by this device. */
- np->rx_ring[i].buffer = virt_to_bus(skb->tail);
+ np->rx_ring[i].buffer = pci_map_single(np->pci_dev, skb->tail,
+ np->rx_buf_sz, PCI_DMA_FROMDEVICE);
np->rx_ring[i].status = RXOWN;
np->rx_ring[i].control |= RXIC;
}
for (i = 0; i < TX_RING_SIZE; i++) {
np->tx_ring[i].status = 0;
- np->tx_ring[i].next_desc = virt_to_bus(&np->tx_ring[i + 1]);
+ np->tx_ring[i].next_desc = np->tx_ring_dma +
+ (i + 1)*sizeof(struct fealnx_desc);
np->tx_ring[i].next_desc_logical = &np->tx_ring[i + 1];
np->tx_ring[i].skbuff = NULL;
}
/* for the last tx descriptor */
- np->tx_ring[i - 1].next_desc = virt_to_bus(&np->tx_ring[0]);
+ np->tx_ring[i - 1].next_desc = np->tx_ring_dma;
np->tx_ring[i - 1].next_desc_logical = &np->tx_ring[0];
return;
struct netdev_private *np = dev->priv;
np->cur_tx_copy->skbuff = skb;
- np->cur_tx_copy->buffer = virt_to_bus(skb->data);
#define one_buffer
#define BPT 1022
#if defined(one_buffer)
+ np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data,
+ skb->len, PCI_DMA_TODEVICE);
np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable;
np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */
np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */
struct fealnx_desc *next;
/* for the first descriptor */
+ np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data,
+ BPT, PCI_DMA_TODEVICE);
np->cur_tx_copy->control = TXIC | TXFD | CRCEnable | PADEnable;
np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */
np->cur_tx_copy->control |= (BPT << TBSShift); /* buffer size */
// 89/12/29 add,
if (np->pci_dev->device == 0x891)
np->cur_tx_copy->control |= ETIControl | RetryTxLC;
- next->buffer = virt_to_bus(skb->data) + BPT;
+ next->buffer = pci_map_single(ep->pci_dev, skb->data + BPT,
+ skb->len - BPT, PCI_DMA_TODEVICE);
next->status = TXOWN;
np->cur_tx_copy->status = TXOWN;
np->cur_tx_copy = next->next_desc_logical;
np->free_tx_count -= 2;
} else {
+ np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data,
+ skb->len, PCI_DMA_TODEVICE);
np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable;
np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */
np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */
allocate_rx_buffers(dev);
- writel(virt_to_bus(np->cur_rx), dev->base_addr + RXLBA);
+ writel(np->rx_ring_dma + (np->cur_rx - np->rx_ring),
+ dev->base_addr + RXLBA);
writel(np->crvalue, dev->base_addr + TCRRCR);
}
}
/* Free the original skb. */
+ pci_unmap_single(np->pci_dev, np->cur_tx->buffer,
+ np->cur_tx->skbuff->len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq(np->cur_tx->skbuff);
np->cur_tx->skbuff = NULL;
--np->really_tx_count;
printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
" status %x.\n", pkt_len, rx_status);
#endif
+ pci_dma_sync_single(np->pci_dev, np->cur_rx->buffer,
+ np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ pci_unmap_single(np->pci_dev, np->cur_rx->buffer,
+ np->rx_buf_sz, PCI_DMA_FROMDEVICE);
/* Check if the packet is long enough to accept without copying
to a minimally-sized skbuff. */
/* Call copy + cksum if available. */
#if ! defined(__alpha__)
- eth_copy_and_sum(skb, bus_to_virt(np->cur_rx->buffer),
- pkt_len, 0);
+ eth_copy_and_sum(skb,
+ np->cur_rx->skbuff->tail, pkt_len, 0);
skb_put(skb, pkt_len);
#else
memcpy(skb_put(skb, pkt_len),
- bus_to_virt(np->cur_rx->buffer), pkt_len);
+ np->cur_rx->skbuff->tail, pkt_len);
#endif
} else {
skb_put(skb = np->cur_rx->skbuff, pkt_len);
if (skb != NULL) {
skb->dev = dev; /* Mark as being used by this device. */
- np->cur_rx->buffer = virt_to_bus(skb->tail);
+ np->cur_rx->buffer = pci_map_single(np->pci_dev, skb->tail,
+ np->rx_buf_sz, PCI_DMA_FROMDEVICE);
np->cur_rx->skbuff = skb;
++np->really_rx_count;
}
/* Free all the skbuffs in the Rx queue. */
for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = np->rx_ring[i].skbuff;
+
np->rx_ring[i].status = 0;
- if (np->rx_ring[i].skbuff)
- dev_kfree_skb(np->rx_ring[i].skbuff);
- np->rx_ring[i].skbuff = NULL;
+ if (skb) {
+ pci_unmap_single(np->pci_dev, np->rx_ring[i].buffer,
+ np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(skb);
+ np->rx_ring[i].skbuff = NULL;
+ }
}
for (i = 0; i < TX_RING_SIZE; i++) {
- if (np->tx_ring[i].skbuff)
- dev_kfree_skb(np->tx_ring[i].skbuff);
- np->tx_ring[i].skbuff = NULL;
+ struct sk_buff *skb = np->tx_ring[i].skbuff;
+
+ if (skb) {
+ pci_unmap_single(np->pci_dev, np->tx_ring[i].buffer,
+ skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb(skb);
+ np->tx_ring[i].skbuff = NULL;
+ }
}
return 0;
MODULE_PARM(io, "i");
MODULE_PARM(irq, "i");
MODULE_PARM(net_debug, "i");
+MODULE_PARM_DESC(io, "FMV-18X I/O address");
+MODULE_PARM_DESC(irq, "FMV-18X IRQ number");
+MODULE_PARM_DESC(net_debug, "FMV-18X debug level (0-1,5-6)");
int init_module(void)
{
MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
MODULE_PARM(force32, "i");
-
+MODULE_PARM_DESC(max_interrupt_work, "GNIC-II maximum events handled per interrupt");
+MODULE_PARM_DESC(mtu, "GNIC-II MTU (all boards)");
+MODULE_PARM_DESC(debug, "GNIC-II debug level (0-7)");
+MODULE_PARM_DESC(min_rx_pkt, "GNIC-II minimum Rx packets processed between interrupts");
+MODULE_PARM_DESC(max_rx_gap, "GNIC-II maximum Rx inter-packet gap in 8.192 microsecond units");
+MODULE_PARM_DESC(max_rx_latency, "GNIC-II time between Rx interrupts in 8.192 microsecond units");
+MODULE_PARM_DESC(min_tx_pkt, "GNIC-II minimum Tx packets processed between interrupts");
+MODULE_PARM_DESC(max_tx_gap, "GNIC-II maximum Tx inter-packet gap in 8.192 microsecond units");
+MODULE_PARM_DESC(max_tx_latency, "GNIC-II time between Tx interrupts in 8.192 microsecond units");
+MODULE_PARM_DESC(rx_copybreak, "GNIC-II copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(rx_params, "GNIC-II min_rx_pkt+max_rx_gap+max_rx_latency");
+MODULE_PARM_DESC(tx_params, "GNIC-II min_tx_pkt+max_tx_gap+max_tx_latency");
+MODULE_PARM_DESC(options, "GNIC-II Bits 0-3: media type, bits 4-6: as force32, bit 7: half duplex, bit 9 full duplex");
+MODULE_PARM_DESC(full_duplex, "GNIC-II full duplex setting(s) (1)");
+MODULE_PARM_DESC(force32, "GNIC-II: Bit 0: 32 bit PCI, bit 1: disable parity, bit 2: 64 bit PCI (all boards)");
+
static int read_eeprom(long ioaddr, int location);
static int mdio_read(long ioaddr, int phy_id, int location);
static void mdio_write(long ioaddr, int phy_id, int location, int value);
MODULE_PARM(io, "1-" __MODULE_STRING(MAX_HPP_CARDS) "i");
MODULE_PARM(irq, "1-" __MODULE_STRING(MAX_HPP_CARDS) "i");
+MODULE_PARM_DESC(io, "HP PC-LAN+ I/O port address(es)");
+MODULE_PARM_DESC(irq, "HP PC-LAN+ IRQ number(s); ignored if properly detected");
/* This is set up so that only a single autoprobe takes place per call.
ISA device autoprobes on a running machine are not recommended. */
MODULE_PARM(io, "1-" __MODULE_STRING(MAX_HP_CARDS) "i");
MODULE_PARM(irq, "1-" __MODULE_STRING(MAX_HP_CARDS) "i");
+MODULE_PARM_DESC(io, "HP PC-LAN I/O base address(es)");
+MODULE_PARM_DESC(irq, "HP PC-LAN IRQ number(s) (assigned)");
/* This is set up so that only a single autoprobe takes place per call.
ISA device autoprobes on a running machine are not recommended. */
static int io;
MODULE_PARM(irq, "i");
MODULE_PARM(io, "i");
+MODULE_PARM_DESC(irq, "IBM LAN/A IRQ number");
+MODULE_PARM_DESC(io, "IBM LAN/A I/O base address");
int init_module(void)
{
MODULE_PARM(dma, "1-" __MODULE_STRING(MAX_CARDS) "i");
MODULE_PARM(irq, "1-" __MODULE_STRING(MAX_CARDS) "i");
MODULE_PARM(lance_debug, "i");
+MODULE_PARM_DESC(io, "LANCE/PCnet I/O base address(es),required");
+MODULE_PARM_DESC(dma, "LANCE/PCnet ISA DMA channel (ignored for some devices)");
+MODULE_PARM_DESC(irq, "LANCE/PCnet IRQ number (ignored for some devices)");
+MODULE_PARM_DESC(lance_debug, "LANCE/PCnet debug level (0-7)");
int init_module(void)
{
MODULE_AUTHOR("Richard Hirst");
MODULE_DESCRIPTION("i82596 driver");
MODULE_PARM(i596_debug, "i");
-
+MODULE_PARM_DESC(i596_debug, "lasi_82596 debug mask");
/* Copy frames shorter than rx_copybreak, otherwise pass on up in
* a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha).
MODULE_PARM(io, "1-" __MODULE_STRING(MAX_LNE_CARDS) "i");
MODULE_PARM(irq, "1-" __MODULE_STRING(MAX_LNE_CARDS) "i");
MODULE_PARM(mem, "1-" __MODULE_STRING(MAX_LNE_CARDS) "i");
+MODULE_PARM_DESC(io, "LNE390 I/O base address(es)");
+MODULE_PARM_DESC(irq, "LNE390 IRQ number(s)");
+MODULE_PARM_DESC(mem, "LNE390 memory base address(es)");
int init_module(void)
{
static int debug;
MODULE_PARM(debug, "i");
+MODULE_PARM_DESC(debug, "CS89[02]0 debug level (0-5)");
EXPORT_NO_SYMBOLS;
static int port_aaui = -1;
MODULE_PARM(port_aaui, "i");
+MODULE_PARM_DESC(port_aaui, "MACE uses AAUI port (0-1)");
#define N_RX_RING 8
#define N_TX_RING 6
static struct net_device dev_macsonic;
MODULE_PARM(sonic_debug, "i");
+MODULE_PARM_DESC(sonic_debug, "macsonic debug level (1-4)");
EXPORT_NO_SYMBOLS;
* One-liner removal of a duplicate declaration of
netdev_error(). (uzi)
+ Version 1.0.7: (Manfred Spraul)
+ * pci dma
+ * SMP locking update
+ * full reset added into tx_timeout
+ * correct multicast hash generation (both big and little endian)
+ [copied from a natsemi driver version
+ from Myrio Corporation, Greg Smith]
+ * suspend/resume
+
+ TODO:
+ * big endian support with CFG:BEM instead of cpu_to_le32
+ * support for an external PHY
+ * flow control
+ * Wake-On-LAN
*/
#define DRV_NAME "natsemi"
-#define DRV_VERSION "1.07+LK1.0.6"
+#define DRV_VERSION "1.07+LK1.0.7"
#define DRV_RELDATE "May 18, 2001"
/* Time in jiffies before concluding the transmitter is hung. */
#define TX_TIMEOUT (2*HZ)
+#define NATSEMI_HW_TIMEOUT 200
+
#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
#if !defined(__OPTIMIZE__)
#error You must compile this driver with "-O".
#endif
+#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/ethtool.h>
+#include <linux/delay.h>
+#include <linux/rtnetlink.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <asm/bitops.h>
#include <asm/io.h>
KERN_INFO " http://www.scyld.com/network/natsemi.html\n"
KERN_INFO " (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE " Jeff Garzik, Tjeerd Mulder)\n";
-/* Condensed operations for readability. */
-#define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
-#define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
-
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("National Semiconductor DP8381x series PCI Ethernet driver");
MODULE_PARM(max_interrupt_work, "i");
MODULE_PARM(rx_copybreak, "i");
MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM_DESC(max_interrupt_work, "DP8381x maximum events handled per interrupt");
+MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)");
+MODULE_PARM_DESC(debug, "DP8381x debug level (0-5)");
+MODULE_PARM_DESC(rx_copybreak, "DP8381x copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(options, "DP8381x: Bits 0-3: media type, bit 17: full duplex");
+MODULE_PARM_DESC(full_duplex, "DP8381x full duplex setting(s) (1)");
/*
Theory of Operation
DescPktOK=0x08000000, RxTooLong=0x00400000,
};
-#define PRIV_ALIGN 15 /* Required alignment mask */
struct netdev_private {
/* Descriptor rings first for alignment. */
- struct netdev_desc rx_ring[RX_RING_SIZE];
- struct netdev_desc tx_ring[TX_RING_SIZE];
+ dma_addr_t ring_dma;
+ struct netdev_desc* rx_ring;
+ struct netdev_desc* tx_ring;
/* The addresses of receive-in-place skbuffs. */
struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ dma_addr_t rx_dma[RX_RING_SIZE];
/* The saved address of a sent-in-place packet/buffer, for later free(). */
struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ dma_addr_t tx_dma[TX_RING_SIZE];
struct net_device_stats stats;
struct timer_list timer; /* Media monitoring timer. */
/* Frequently used values: keep some adjacent for cache effect. */
unsigned int cur_tx, dirty_tx;
unsigned int rx_buf_sz; /* Based on MTU+slack. */
/* These values are keep track of the transceiver/media in use. */
- unsigned int full_duplex:1; /* Full-duplex operation requested. */
- unsigned int duplex_lock:1;
- unsigned int medialock:1; /* Do not sense media. */
- unsigned int default_port:4; /* Last dev->if_port value. */
+ unsigned int full_duplex;
/* Rx filter. */
u32 cur_rx_mode;
u32 rx_filter[16];
/* original contents of ClkRun register */
u32 SavedClkRun;
/* MII transceiver section. */
- u16 advertising; /* NWay media advertisement */
-
+ u16 advertising; /* NWay media advertisement */
unsigned int iosize;
spinlock_t lock;
};
static int eeprom_read(long ioaddr, int location);
static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void natsemi_reset(struct net_device *dev);
static int netdev_open(struct net_device *dev);
-static void check_duplex(struct net_device *dev);
+static void check_link(struct net_device *dev);
static void netdev_timer(unsigned long data);
static void tx_timeout(struct net_device *dev);
+static int alloc_ring(struct net_device *dev);
static void init_ring(struct net_device *dev);
+static void drain_ring(struct net_device *dev);
+static void free_ring(struct net_device *dev);
+static void init_registers(struct net_device *dev);
static int start_tx(struct sk_buff *skb, struct net_device *dev);
static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
static void netdev_error(struct net_device *dev, int intr_status);
-static int netdev_rx(struct net_device *dev);
+static void netdev_rx(struct net_device *dev);
+static void netdev_tx_done(struct net_device *dev);
+static void __set_rx_mode(struct net_device *dev);
static void set_rx_mode(struct net_device *dev);
+static void __get_stats(struct net_device *dev);
static struct net_device_stats *get_stats(struct net_device *dev);
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static int netdev_close(struct net_device *dev);
}
find_cnt++;
- option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
ioaddr = pci_resource_start(pdev, pcibar);
iosize = pci_resource_len(pdev, pcibar);
irq = pdev->irq;
prev_eedata = eedata;
}
- /* Reset the chip to erase previous misconfiguration. */
- writel(ChipReset, ioaddr + ChipCmd);
-
dev->base_addr = ioaddr;
dev->irq = irq;
np->iosize = iosize;
spin_lock_init(&np->lock);
+ /* Reset the chip to erase previous misconfiguration. */
+ natsemi_reset(dev);
+ option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
if (dev->mem_start)
option = dev->mem_start;
if (option > 0) {
if (option & 0x200)
np->full_duplex = 1;
- np->default_port = option & 15;
- if (np->default_port)
- np->medialock = 1;
+ if (option & 15)
+ printk(KERN_INFO "%s: ignoring user supplied media type %d",
+ dev->name, option & 15);
}
if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
np->full_duplex = 1;
- if (np->full_duplex)
- np->duplex_lock = 1;
-
/* The chip-specific entries in the device structure. */
dev->open = &netdev_open;
dev->hard_start_xmit = &start_tx;
return 0xffff;
}
+static void natsemi_reset(struct net_device *dev)
+{
+ int i;
+
+ writel(ChipReset, dev->base_addr + ChipCmd);
+ for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
+ if (!(readl(dev->base_addr + ChipCmd) & ChipReset))
+ break;
+ udelay(5);
+ }
+ if (i==NATSEMI_HW_TIMEOUT && debug) {
+ printk(KERN_INFO "%s: reset did not complete in %d usec.\n",
+ dev->name, i*5);
+ } else if (debug > 2) {
+ printk(KERN_DEBUG "%s: reset completed in %d usec.\n",
+ dev->name, i*5);
+ }
+}
+
\f
static int netdev_open(struct net_device *dev)
{
int i;
/* Reset the chip, just in case. */
- writel(ChipReset, ioaddr + ChipCmd);
+ natsemi_reset(dev);
+
+ i = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
+ if (i) return i;
+
+ if (debug > 1)
+ printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
+ dev->name, dev->irq);
+ i = alloc_ring(dev);
+ if (i < 0) {
+ free_irq(dev->irq, dev);
+ return i;
+ }
+ init_ring(dev);
+ init_registers(dev);
+
+ netif_start_queue(dev);
+
+ if (debug > 2)
+ printk(KERN_DEBUG "%s: Done netdev_open(), status: %x.\n",
+ dev->name, (int)readl(ioaddr + ChipCmd));
+
+ /* Set the timer to check for link beat. */
+ init_timer(&np->timer);
+ np->timer.expires = jiffies + 3*HZ;
+ np->timer.data = (unsigned long)dev;
+ np->timer.function = &netdev_timer; /* timer handler */
+ add_timer(&np->timer);
+
+ return 0;
+}
+
+static void check_link(struct net_device *dev)
+{
+ struct netdev_private *np = dev->priv;
+ long ioaddr = dev->base_addr;
+ int duplex;
+ int chipcfg = readl(ioaddr + ChipConfig);
+
+ if(!(chipcfg & 0x80000000)) {
+ if (netif_carrier_ok(dev)) {
+ if (debug)
+ printk(KERN_INFO "%s: no link. Disabling watchdog.\n",
+ dev->name);
+ netif_carrier_off(dev);
+ }
+ return;
+ }
+ if (!netif_carrier_ok(dev)) {
+ if (debug)
+ printk(KERN_INFO "%s: link is back. Enabling watchdog.\n",
+ dev->name);
+ netif_carrier_on(dev);
+ }
+
+ duplex = np->full_duplex || (chipcfg & 0x20000000 ? 1 : 0);
+
+ /* if duplex is set then bit 28 must be set, too */
+ if (duplex ^ !!(np->rx_config & 0x10000000)) {
+ if (debug)
+ printk(KERN_INFO "%s: Setting %s-duplex based on negotiated link"
+ " capability.\n", dev->name,
+ duplex ? "full" : "half");
+ if (duplex) {
+ np->rx_config |= 0x10000000;
+ np->tx_config |= 0xC0000000;
+ } else {
+ np->rx_config &= ~0x10000000;
+ np->tx_config &= ~0xC0000000;
+ }
+ writel(np->tx_config, ioaddr + TxConfig);
+ writel(np->rx_config, ioaddr + RxConfig);
+ }
+}
+static void init_registers(struct net_device *dev)
+{
+ struct netdev_private *np = dev->priv;
+ long ioaddr = dev->base_addr;
+ int i;
+
+ if (debug > 4)
+ printk(KERN_DEBUG "%s: found silicon revision %xh.\n",
+ dev->name, readl(ioaddr + SiliconRev));
/* On page 78 of the spec, they recommend some settings for "optimum
performance" to be done in sequence. These settings optimize some
of the 100Mbit autodetection circuitry. Also, we only want to do
this for rev C of the chip.
+
+ There seems to be a typo on page 78: the fixup should be performed
+ for "DP83815CVNG (SRR = 203h)", but the description of the
+ SiliconRev regsiters says "DP83815CVNG: 00000302h"
*/
if (readl(ioaddr + SiliconRev) == 0x302) {
writew(0x0001, ioaddr + PGSEL);
*/
writew(0x0002, ioaddr + MIntrCtrl);
- i = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
- if (i) return i;
-
- if (debug > 1)
- printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
- dev->name, dev->irq);
-
- init_ring(dev);
-
- writel(virt_to_bus(np->rx_ring), ioaddr + RxRingPtr);
- writel(virt_to_bus(np->tx_ring), ioaddr + TxRingPtr);
+ writel(np->ring_dma, ioaddr + RxRingPtr);
+ writel(np->ring_dma + RX_RING_SIZE * sizeof(struct netdev_desc), ioaddr + TxRingPtr);
for (i = 0; i < ETH_ALEN; i += 2) {
writel(i, ioaddr + RxFilterAddr);
ioaddr + RxFilterData);
}
- /* Initialize other registers. */
- /* Configure the PCI bus bursts and FIFO thresholds. */
- /* Configure for standard, in-spec Ethernet. */
+ /* Initialize other registers.
+ * Configure the PCI bus bursts and FIFO thresholds.
+ * Configure for standard, in-spec Ethernet.
+ * Start with half-duplex. check_link will update
+ * to the correct settings.
+ */
- if (readl(ioaddr + ChipConfig) & 0x20000000) { /* Full duplex */
- np->tx_config = 0xD0801002;
- np->rx_config = 0x10000020;
- } else {
- np->tx_config = 0x10801002;
- np->rx_config = 0x0020;
- }
+ /* DRTH: 2: start tx if 64 bytes are in the fifo
+ * FLTH: 0x10: refill with next packet if 512 bytes are free
+ * MXDMA: 0: up to 512 byte bursts.
+ * MXDMA must be <= FLTH
+ * ECRETRY=1
+ * ATP=1
+ */
+ np->tx_config = 0x10801002;
+ /* DRTH 0x10: start copying to memory if 128 bytes are in the fifo
+ * MXDMA 0: up to 512 byte bursts
+ */
+ np->rx_config = 0x0020;
writel(np->tx_config, ioaddr + TxConfig);
writel(np->rx_config, ioaddr + RxConfig);
- if (dev->if_port == 0)
- dev->if_port = np->default_port;
-
/* Disable PME:
* The PME bit is initialized from the EEPROM contents.
* PCI cards probably have PME disabled, but motherboard
np->SavedClkRun = readl(ioaddr + ClkRun);
writel(np->SavedClkRun & ~0x100, ioaddr + ClkRun);
- netif_start_queue(dev);
-
- check_duplex(dev);
- set_rx_mode(dev);
+ check_link(dev);
+ __set_rx_mode(dev);
/* Enable interrupts by setting the interrupt mask. */
writel(IntrNormalSummary | IntrAbnormalSummary | 0x1f, ioaddr + IntrMask);
writel(RxOn | TxOn, ioaddr + ChipCmd);
writel(4, ioaddr + StatsCtrl); /* Clear Stats */
-
- if (debug > 2)
- printk(KERN_DEBUG "%s: Done netdev_open(), status: %x.\n",
- dev->name, (int)readl(ioaddr + ChipCmd));
-
- /* Set the timer to check for link beat. */
- init_timer(&np->timer);
- np->timer.expires = jiffies + 3*HZ;
- np->timer.data = (unsigned long)dev;
- np->timer.function = &netdev_timer; /* timer handler */
- add_timer(&np->timer);
-
- return 0;
-}
-
-static void check_duplex(struct net_device *dev)
-{
- struct netdev_private *np = dev->priv;
- long ioaddr = dev->base_addr;
- int duplex;
-
- if (np->duplex_lock)
- return;
- duplex = readl(ioaddr + ChipConfig) & 0x20000000 ? 1 : 0;
- if (np->full_duplex != duplex) {
- np->full_duplex = duplex;
- if (debug)
- printk(KERN_INFO "%s: Setting %s-duplex based on negotiated link"
- " capability.\n", dev->name,
- duplex ? "full" : "half");
- if (duplex) {
- np->rx_config |= 0x10000000;
- np->tx_config |= 0xC0000000;
- } else {
- np->rx_config &= ~0x10000000;
- np->tx_config &= ~0xC0000000;
- }
- writel(np->tx_config, ioaddr + TxConfig);
- writel(np->rx_config, ioaddr + RxConfig);
- }
}
static void netdev_timer(unsigned long data)
{
struct net_device *dev = (struct net_device *)data;
struct netdev_private *np = dev->priv;
- long ioaddr = dev->base_addr;
int next_tick = 60*HZ;
- if (debug > 3)
- printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x.\n",
- dev->name, (int)readl(ioaddr + IntrStatus));
- check_duplex(dev);
+ if (debug > 3) {
+ /* DO NOT read the IntrStatus register,
+ * a read clears any pending interrupts.
+ */
+ printk(KERN_DEBUG "%s: Media selection timer tick.\n",
+ dev->name);
+ }
+ spin_lock_irq(&np->lock);
+ check_link(dev);
+ spin_unlock_irq(&np->lock);
np->timer.expires = jiffies + next_tick;
add_timer(&np->timer);
}
printk("\n");
}
#endif
-
- /* Perhaps we should reinitialize the hardware here. */
- dev->if_port = 0;
- /* Stop and restart the chip's Tx processes . */
-
- /* Trigger an immediate transmit demand. */
+ spin_lock_irq(&np->lock);
+ natsemi_reset(dev);
+ drain_ring(dev);
+ init_ring(dev);
+ init_registers(dev);
+ spin_unlock_irq(&np->lock);
dev->trans_start = jiffies;
np->stats.tx_errors++;
netif_wake_queue(dev);
}
+static int alloc_ring(struct net_device *dev)
+{
+ struct netdev_private *np = dev->priv;
+ np->rx_ring = pci_alloc_consistent(np->pci_dev,
+ sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE),
+ &np->ring_dma);
+ if (!np->rx_ring)
+ return -ENOMEM;
+ np->tx_ring = &np->rx_ring[RX_RING_SIZE];
+ return 0;
+}
/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
static void init_ring(struct net_device *dev)
/* Initialize all Rx descriptors. */
for (i = 0; i < RX_RING_SIZE; i++) {
- np->rx_ring[i].next_desc = virt_to_le32desc(&np->rx_ring[i+1]);
- np->rx_ring[i].cmd_status = DescOwn;
- np->rx_skbuff[i] = 0;
+ np->rx_ring[i].next_desc = cpu_to_le32(np->ring_dma+sizeof(struct netdev_desc)*(i+1));
+ np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
+ np->rx_skbuff[i] = NULL;
}
/* Mark the last entry as wrapping the ring. */
- np->rx_ring[i-1].next_desc = virt_to_le32desc(&np->rx_ring[0]);
+ np->rx_ring[i-1].next_desc = cpu_to_le32(np->ring_dma);
/* Fill in the Rx buffers. Handle allocation failure gracefully. */
for (i = 0; i < RX_RING_SIZE; i++) {
if (skb == NULL)
break;
skb->dev = dev; /* Mark as being used by this device. */
- np->rx_ring[i].addr = virt_to_le32desc(skb->tail);
- np->rx_ring[i].cmd_status =
- cpu_to_le32(DescIntr | np->rx_buf_sz);
+ np->rx_dma[i] = pci_map_single(np->pci_dev,
+ skb->data, skb->len, PCI_DMA_FROMDEVICE);
+ np->rx_ring[i].addr = cpu_to_le32(np->rx_dma[i]);
+ np->rx_ring[i].cmd_status = cpu_to_le32(DescIntr | np->rx_buf_sz);
}
np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
for (i = 0; i < TX_RING_SIZE; i++) {
- np->tx_skbuff[i] = 0;
- np->tx_ring[i].next_desc = virt_to_le32desc(&np->tx_ring[i+1]);
+ np->tx_skbuff[i] = NULL;
+ np->tx_ring[i].next_desc = cpu_to_le32(np->ring_dma
+ +sizeof(struct netdev_desc)*(i+1+RX_RING_SIZE));
np->tx_ring[i].cmd_status = 0;
}
- np->tx_ring[i-1].next_desc = virt_to_le32desc(&np->tx_ring[0]);
- return;
+ np->tx_ring[i-1].next_desc = cpu_to_le32(np->ring_dma
+ +sizeof(struct netdev_desc)*(RX_RING_SIZE));
+}
+
+static void drain_ring(struct net_device *dev)
+{
+ struct netdev_private *np = dev->priv;
+ int i;
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].cmd_status = 0;
+ np->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */
+ if (np->rx_skbuff[i]) {
+ pci_unmap_single(np->pci_dev,
+ np->rx_dma[i],
+ np->rx_skbuff[i]->len,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(np->rx_skbuff[i]);
+ }
+ np->rx_skbuff[i] = NULL;
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (np->tx_skbuff[i]) {
+ pci_unmap_single(np->pci_dev,
+ np->rx_dma[i],
+ np->rx_skbuff[i]->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb(np->tx_skbuff[i]);
+ }
+ np->tx_skbuff[i] = NULL;
+ }
+}
+
+static void free_ring(struct net_device *dev)
+{
+ struct netdev_private *np = dev->priv;
+ pci_free_consistent(np->pci_dev,
+ sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE),
+ np->rx_ring, np->ring_dma);
}
static int start_tx(struct sk_buff *skb, struct net_device *dev)
entry = np->cur_tx % TX_RING_SIZE;
np->tx_skbuff[entry] = skb;
+ np->tx_dma[entry] = pci_map_single(np->pci_dev,
+ skb->data,skb->len, PCI_DMA_TODEVICE);
- np->tx_ring[entry].addr = virt_to_le32desc(skb->data);
- np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn|DescIntr | skb->len);
- np->cur_tx++;
+ np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]);
+ spin_lock_irq(&np->lock);
+
+#if 0
+ np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | DescIntr | skb->len);
+#else
+ np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len);
+#endif
/* StrongARM: Explicitly cache flush np->tx_ring and skb->data,skb->len. */
wmb();
-
- spin_lock_irq(&np->lock);
- if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1)
- netif_stop_queue(dev);
+ np->cur_tx++;
+ if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
+ netdev_tx_done(dev);
+ if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1)
+ netif_stop_queue(dev);
+ }
spin_unlock_irq(&np->lock);
/* Wake the potentially-idle transmit channel. */
return 0;
}
+static void netdev_tx_done(struct net_device *dev)
+{
+ struct netdev_private *np = dev->priv;
+
+ for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
+ int entry = np->dirty_tx % TX_RING_SIZE;
+ if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescOwn)) {
+ if (debug > 4)
+ printk(KERN_DEBUG "%s: tx frame #%d is busy.\n",
+ dev->name, np->dirty_tx);
+ break;
+ }
+ if (debug > 4)
+ printk(KERN_DEBUG "%s: tx frame #%d finished with status %8.8xh.\n",
+ dev->name, np->dirty_tx,
+ le32_to_cpu(np->tx_ring[entry].cmd_status));
+ if (np->tx_ring[entry].cmd_status & cpu_to_le32(0x08000000)) {
+ np->stats.tx_packets++;
+ np->stats.tx_bytes += np->tx_skbuff[entry]->len;
+ } else { /* Various Tx errors */
+ int tx_status = le32_to_cpu(np->tx_ring[entry].cmd_status);
+ if (tx_status & 0x04010000) np->stats.tx_aborted_errors++;
+ if (tx_status & 0x02000000) np->stats.tx_fifo_errors++;
+ if (tx_status & 0x01000000) np->stats.tx_carrier_errors++;
+ if (tx_status & 0x00200000) np->stats.tx_window_errors++;
+ np->stats.tx_errors++;
+ }
+ pci_unmap_single(np->pci_dev,np->tx_dma[entry],
+ np->tx_skbuff[entry]->len,
+ PCI_DMA_TODEVICE);
+ /* Free the original skb. */
+ dev_kfree_skb_irq(np->tx_skbuff[entry]);
+ np->tx_skbuff[entry] = NULL;
+ }
+ if (netif_queue_stopped(dev)
+ && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
+ /* The ring is no longer full, wake queue. */
+ netif_wake_queue(dev);
+ }
+ spin_unlock(&np->lock);
+
+}
/* The interrupt handler does all of the Rx thread work and cleans up
after the Tx thread. */
static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
np = dev->priv;
do {
+ /* Reading automatically acknowledges all int sources. */
u32 intr_status = readl(ioaddr + IntrStatus);
- /* Acknowledge all of the current interrupt sources ASAP. */
- writel(intr_status & 0x000ffff, ioaddr + IntrStatus);
-
if (debug > 4)
printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
dev->name, intr_status);
if (intr_status & (IntrRxDone | IntrRxIntr))
netdev_rx(dev);
- spin_lock(&np->lock);
-
- for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
- int entry = np->dirty_tx % TX_RING_SIZE;
- if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescOwn))
- break;
- if (np->tx_ring[entry].cmd_status & cpu_to_le32(0x08000000)) {
- np->stats.tx_packets++;
- np->stats.tx_bytes += np->tx_skbuff[entry]->len;
- } else { /* Various Tx errors */
- int tx_status = le32_to_cpu(np->tx_ring[entry].cmd_status);
- if (tx_status & 0x04010000) np->stats.tx_aborted_errors++;
- if (tx_status & 0x02000000) np->stats.tx_fifo_errors++;
- if (tx_status & 0x01000000) np->stats.tx_carrier_errors++;
- if (tx_status & 0x00200000) np->stats.tx_window_errors++;
- np->stats.tx_errors++;
- }
- /* Free the original skb. */
- dev_kfree_skb_irq(np->tx_skbuff[entry]);
- np->tx_skbuff[entry] = 0;
+ if (intr_status & (IntrTxDone | IntrTxIntr | IntrTxIdle | IntrTxErr) ) {
+ spin_lock(&np->lock);
+ netdev_tx_done(dev);
+ spin_unlock(&np->lock);
}
- if (netif_queue_stopped(dev)
- && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
- /* The ring is no longer full, wake queue. */
- netif_wake_queue(dev);
- }
-
- spin_unlock(&np->lock);
/* Abnormal error summary/uncommon events handlers. */
if (intr_status & IntrAbnormalSummary)
} while (1);
if (debug > 3)
- printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
- dev->name, (int)readl(ioaddr + IntrStatus));
+ printk(KERN_DEBUG "%s: exiting interrupt.\n",
+ dev->name);
}
/* This routine is logically part of the interrupt handler, but separated
for clarity and better register allocation. */
-static int netdev_rx(struct net_device *dev)
+static void netdev_rx(struct net_device *dev)
{
struct netdev_private *np = dev->priv;
int entry = np->cur_rx % RX_RING_SIZE;
&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
skb->dev = dev;
skb_reserve(skb, 2); /* 16 byte align the IP header */
+ pci_dma_sync_single(np->pci_dev, np->rx_dma[entry],
+ np->rx_skbuff[entry]->len,
+ PCI_DMA_FROMDEVICE);
#if HAS_IP_COPYSUM
eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
skb_put(skb, pkt_len);
pkt_len);
#endif
} else {
- char *temp = skb_put(skb = np->rx_skbuff[entry], pkt_len);
+ pci_unmap_single(np->pci_dev, np->rx_dma[entry],
+ np->rx_skbuff[entry]->len,
+ PCI_DMA_FROMDEVICE);
+ skb_put(skb = np->rx_skbuff[entry], pkt_len);
np->rx_skbuff[entry] = NULL;
-#ifndef final_version /* Remove after testing. */
- if (le32desc_to_virt(np->rx_ring[entry].addr) != temp)
- printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
- "do not match in netdev_rx: %p vs. %p / %p.\n",
- dev->name,
- le32desc_to_virt(np->rx_ring[entry].addr),
- skb->head, temp);
-#endif
}
skb->protocol = eth_type_trans(skb, dev);
/* W/ hardware checksum: skb->ip_summed = CHECKSUM_UNNECESSARY; */
if (skb == NULL)
break; /* Better luck next round. */
skb->dev = dev; /* Mark as being used by this device. */
- np->rx_ring[entry].addr = virt_to_le32desc(skb->tail);
+ np->rx_dma[entry] = pci_map_single(np->pci_dev,
+ skb->data, skb->len, PCI_DMA_FROMDEVICE);
+ np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]);
}
np->rx_ring[entry].cmd_status =
cpu_to_le32(DescIntr | np->rx_buf_sz);
/* Restart Rx engine if stopped. */
writel(RxOn, dev->base_addr + ChipCmd);
- return 0;
}
static void netdev_error(struct net_device *dev, int intr_status)
struct netdev_private *np = dev->priv;
long ioaddr = dev->base_addr;
+ spin_lock(&np->lock);
if (intr_status & LinkChange) {
printk(KERN_NOTICE "%s: Link changed: Autonegotiation advertising"
" %4.4x partner %4.4x.\n", dev->name,
(int)readl(ioaddr + 0x90), (int)readl(ioaddr + 0x94));
/* read MII int status to clear the flag */
readw(ioaddr + MIntrStatus);
- check_duplex(dev);
+ check_link(dev);
}
if (intr_status & StatsMax) {
- get_stats(dev);
+ __get_stats(dev);
}
if (intr_status & IntrTxUnderrun) {
if ((np->tx_config & 0x3f) < 62)
np->tx_config += 2;
+ if (debug > 2)
+ printk(KERN_NOTICE "%s: increasing Tx theshold, new tx cfg %8.8xh.\n",
+ dev->name, np->tx_config);
writel(np->tx_config, ioaddr + TxConfig);
}
if (intr_status & WOLPkt) {
np->stats.tx_fifo_errors++;
np->stats.rx_fifo_errors++;
}
+ spin_unlock(&np->lock);
}
-static struct net_device_stats *get_stats(struct net_device *dev)
+static void __get_stats(struct net_device *dev)
{
long ioaddr = dev->base_addr;
struct netdev_private *np = dev->priv;
- /* We should lock this segment of code for SMP eventually, although
- the vulnerability window is very small and statistics are
- non-critical. */
/* The chip only need report frame silently dropped. */
np->stats.rx_crc_errors += readl(ioaddr + RxCRCErrs);
np->stats.rx_missed_errors += readl(ioaddr + RxMissed);
+}
+
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+ struct netdev_private *np = dev->priv;
+
+ /* The chip only need report frame silently dropped. */
+ spin_lock_irq(&np->lock);
+ __get_stats(dev);
+ spin_unlock_irq(&np->lock);
return &np->stats;
}
-
/* The little-endian AUTODIN II ethernet CRC calculations.
A big-endian version is also available.
This is slow but compact code. Do not use this routine for bulk data,
Chips may use the upper or lower CRC bits, and may reverse and/or invert
them. Select the endian-ness that results in minimal calculations.
*/
+#if 0
static unsigned const ethernet_polynomial_le = 0xedb88320U;
static inline unsigned ether_crc_le(int length, unsigned char *data)
{
}
return crc;
}
+#else
+#define DP_POLYNOMIAL 0x04C11DB7
+/* dp83815_crc - computer CRC for hash table entries */
+static unsigned ether_crc_le(int length, unsigned char *data)
+{
+ u32 crc;
+ u8 cur_byte;
+ u8 msb;
+ u8 byte, bit;
+
+ crc = ~0;
+ for (byte=0; byte<length; byte++) {
+ cur_byte = *data++;
+ for (bit=0; bit<8; bit++) {
+ msb = crc >> 31;
+ crc <<= 1;
+ if (msb ^ (cur_byte & 1)) {
+ crc ^= DP_POLYNOMIAL;
+ crc |= 1;
+ }
+ cur_byte >>= 1;
+ }
+ }
+ crc >>= 23;
-static void set_rx_mode(struct net_device *dev)
+ return (crc);
+}
+#endif
+
+void set_bit_le(int offset, unsigned char * data)
+{
+ data[offset >> 3] |= (1 << (offset & 0x07));
+}
+#define HASH_TABLE 0x200
+static void __set_rx_mode(struct net_device *dev)
{
long ioaddr = dev->base_addr;
struct netdev_private *np = dev->priv;
memset(mc_filter, 0, sizeof(mc_filter));
for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
i++, mclist = mclist->next) {
- set_bit(ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff,
+ set_bit_le(ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff,
mc_filter);
}
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
for (i = 0; i < 64; i += 2) {
- writew(0x200 + i, ioaddr + RxFilterAddr);
+ writew(HASH_TABLE + i, ioaddr + RxFilterAddr);
writew((mc_filter[i+1]<<8) + mc_filter[i], ioaddr + RxFilterData);
}
}
writel(rx_mode, ioaddr + RxFilterAddr);
np->cur_rx_mode = rx_mode;
+ spin_unlock_irq(&np->lock);
+}
+static void set_rx_mode(struct net_device *dev)
+{
+ struct netdev_private *np = dev->priv;
+ spin_lock_irq(&np->lock);
+ __set_rx_mode(dev);
+ spin_unlock_irq(&np->lock);
}
static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
u16 value = data[2];
writew(value, dev->base_addr + 0x80 + (miireg << 2));
switch (miireg) {
- case 0:
- /* Check for autonegotiation on or reset. */
- np->duplex_lock = (value & 0x9000) ? 0 : 1;
- if (np->duplex_lock)
- np->full_duplex = (value & 0x0100) ? 1 : 0;
- break;
case 4: np->advertising = value; break;
}
}
{
long ioaddr = dev->base_addr;
struct netdev_private *np = dev->priv;
- int i;
netif_stop_queue(dev);
if (debug > 1) {
- printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x "
- "Int %2.2x.\n",
- dev->name, (int)readl(ioaddr + ChipCmd),
- (int)readl(ioaddr + IntrStatus));
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.",
+ dev->name, (int)readl(ioaddr + ChipCmd));
printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
}
#ifdef __i386__
if (debug > 2) {
+ int i;
printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
- (int)virt_to_bus(np->tx_ring));
+ (int)np->tx_ring);
for (i = 0; i < TX_RING_SIZE; i++)
printk(" #%d desc. %8.8x %8.8x.\n",
i, np->tx_ring[i].cmd_status, np->tx_ring[i].addr);
printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
- (int)virt_to_bus(np->rx_ring));
+ (int)np->rx_ring);
for (i = 0; i < RX_RING_SIZE; i++) {
printk(KERN_DEBUG " #%d desc. %8.8x %8.8x\n",
i, np->rx_ring[i].cmd_status, np->rx_ring[i].addr);
#endif /* __i386__ debugging only */
free_irq(dev->irq, dev);
+ drain_ring(dev);
+ free_ring(dev);
- /* Free all the skbuffs in the Rx queue. */
- for (i = 0; i < RX_RING_SIZE; i++) {
- np->rx_ring[i].cmd_status = 0;
- np->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */
- if (np->rx_skbuff[i]) {
- dev_kfree_skb(np->rx_skbuff[i]);
- }
- np->rx_skbuff[i] = 0;
- }
- for (i = 0; i < TX_RING_SIZE; i++) {
- if (np->tx_skbuff[i])
- dev_kfree_skb(np->tx_skbuff[i]);
- np->tx_skbuff[i] = 0;
- }
/* Restore PME enable bit */
writel(np->SavedClkRun, ioaddr + ClkRun);
#if 0
pci_set_drvdata(pdev, NULL);
}
+#ifdef CONFIG_PM
+
+static int natsemi_suspend (struct pci_dev *pdev, u32 state)
+{
+ struct net_device *dev = pci_get_drvdata (pdev);
+ struct netdev_private *np = dev->priv;
+ long ioaddr = dev->base_addr;
+
+ netif_device_detach(dev);
+ /* no more calls to tx_timeout, hard_start_xmit, set_rx_mode */
+ rtnl_lock();
+ rtnl_unlock();
+ /* noone within ->open */
+ if (netif_running (dev)) {
+ int i;
+ del_timer_sync(&np->timer);
+ /* no more link beat timer calls */
+ spin_lock_irq(&np->lock);
+ writel(RxOff | TxOff, ioaddr + ChipCmd);
+ for(i=0;i< NATSEMI_HW_TIMEOUT;i++) {
+ if ((readl(ioaddr + ChipCmd) & (TxOn|RxOn)) == 0)
+ break;
+ udelay(5);
+ }
+ if (i==NATSEMI_HW_TIMEOUT && debug) {
+ printk(KERN_INFO "%s: Tx/Rx process did not stop in %d usec.\n",
+ dev->name, i*5);
+ } else if (debug > 2) {
+ printk(KERN_DEBUG "%s: Tx/Rx process stopped in %d usec.\n",
+ dev->name, i*5);
+ }
+ /* Tx and Rx processes stopped */
+
+ writel(0, ioaddr + IntrEnable);
+ /* all irq events disabled. */
+ spin_unlock_irq(&np->lock);
+
+ synchronize_irq();
+
+ /* Update the error counts. */
+ __get_stats(dev);
+
+ /* pci_power_off(pdev, -1); */
+ drain_ring(dev);
+ }
+ return 0;
+}
+
+
+static int natsemi_resume (struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata (pdev);
+ struct netdev_private *np = dev->priv;
+
+ if (netif_running (dev)) {
+ pci_enable_device(pdev);
+ /* pci_power_on(pdev); */
+
+ natsemi_reset(dev);
+ init_ring(dev);
+ init_registers(dev);
+
+ np->timer.expires = jiffies + 1*HZ;
+ add_timer(&np->timer);
+ }
+ netif_device_attach(dev);
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+
static struct pci_driver natsemi_driver = {
name: DRV_NAME,
id_table: natsemi_pci_tbl,
probe: natsemi_probe1,
remove: natsemi_remove1,
+#ifdef CONFIG_PM
+ suspend: natsemi_suspend,
+ resume: natsemi_resume,
+#endif
};
static int __init natsemi_init_mod (void)
MODULE_PARM(io, "1-" __MODULE_STRING(MAX_NE_CARDS) "i");
MODULE_PARM(irq, "1-" __MODULE_STRING(MAX_NE_CARDS) "i");
MODULE_PARM(bad, "1-" __MODULE_STRING(MAX_NE_CARDS) "i");
+MODULE_PARM_DESC(io, "NEx000 I/O base address(es),required");
+MODULE_PARM_DESC(irq, "NEx000 IRQ number(s)");
+MODULE_PARM_DESC(bad, "NEx000 accept bad clone(s)");
/* This is set up so that no ISA autoprobe takes place. We can't guarantee
that the ne2k probe is the last 8390 based probe to take place (as it
MODULE_PARM(io, "1-" __MODULE_STRING(MAX_NE_CARDS) "i");
MODULE_PARM(irq, "1-" __MODULE_STRING(MAX_NE_CARDS) "i");
MODULE_PARM(bad, "1-" __MODULE_STRING(MAX_NE_CARDS) "i");
+MODULE_PARM_DESC(io, "(ignored)");
+MODULE_PARM_DESC(irq, "(ignored)");
+MODULE_PARM_DESC(bad, "(ignored)");
#endif
/* Module code fixed by David Weinehall */
Limited full-duplex support.
*/
+#define DRV_NAME "ne2k-pci"
+#define DRV_VERSION "1.02"
+#define DRV_RELDATE "10/19/2000"
+
+
/* The user-configurable values.
These may be modified when a driver module is loaded.*/
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/init.h>
+#include <linux/ethtool.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/irq.h>
+#include <asm/uaccess.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
/* These identify the driver base version and may not be removed. */
static char version[] __devinitdata =
-KERN_INFO "ne2k-pci.c:v1.02 10/19/2000 D. Becker/P. Gortmaker\n"
+KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " D. Becker/P. Gortmaker\n"
KERN_INFO " http://www.scyld.com/network/ne2k-pci.html\n";
#if defined(__powerpc__)
#define outsl outsl_ns
#endif
+#define PFX DRV_NAME ": "
+
MODULE_AUTHOR("Donald Becker / Paul Gortmaker");
MODULE_DESCRIPTION("PCI NE2000 clone driver");
MODULE_PARM(debug, "i");
MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM_DESC(debug, "PCI NE2000 debug level (1-2)");
+MODULE_PARM_DESC(options, "PCI NE2000: Bit 5: full duplex");
+MODULE_PARM_DESC(full_duplex, "PCI NE2000 full duplex setting(s) (1)");
/* Some defines that people can play with if so inclined. */
struct sk_buff *skb, int ring_offset);
static void ne2k_pci_block_output(struct net_device *dev, const int count,
const unsigned char *buf, const int start_page);
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
\f
irq = pdev->irq;
if (!ioaddr || ((pci_resource_flags (pdev, 0) & IORESOURCE_IO) == 0)) {
- printk (KERN_ERR "ne2k-pci: no I/O resource at PCI BAR #0\n");
+ printk (KERN_ERR PFX "no I/O resource at PCI BAR #0\n");
return -ENODEV;
}
- if (request_region (ioaddr, NE_IO_EXTENT, "ne2k-pci") == NULL) {
- printk (KERN_ERR "ne2k-pci: I/O resource 0x%x @ 0x%lx busy\n",
+ if (request_region (ioaddr, NE_IO_EXTENT, DRV_NAME) == NULL) {
+ printk (KERN_ERR PFX "I/O resource 0x%x @ 0x%lx busy\n",
NE_IO_EXTENT, ioaddr);
return -EBUSY;
}
dev = alloc_etherdev(0);
if (!dev) {
- printk (KERN_ERR "ne2k-pci: cannot allocate ethernet device\n");
+ printk (KERN_ERR PFX "cannot allocate ethernet device\n");
goto err_out_free_res;
}
SET_MODULE_OWNER(dev);
while ((inb(ioaddr + EN0_ISR) & ENISR_RESET) == 0)
/* Limit wait: '2' avoids jiffy roll-over. */
if (jiffies - reset_start_time > 2) {
- printk("ne2k-pci: Card failure (no reset ack).\n");
+ printk(KERN_ERR PFX "Card failure (no reset ack).\n");
goto err_out_free_netdev;
}
ei_status.block_input = &ne2k_pci_block_input;
ei_status.block_output = &ne2k_pci_block_output;
ei_status.get_8390_hdr = &ne2k_pci_get_8390_hdr;
+ ei_status.priv = (unsigned long) pdev;
dev->open = &ne2k_pci_open;
dev->stop = &ne2k_pci_close;
+ dev->do_ioctl = &netdev_ioctl;
NS8390_init(dev, 0);
i = register_netdev(dev);
return;
}
+static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
+{
+ struct ei_device *ei = dev->priv;
+ struct pci_dev *pci_dev = (struct pci_dev *) ei->priv;
+ u32 ethcmd;
+
+ if (copy_from_user(ðcmd, useraddr, sizeof(ethcmd)))
+ return -EFAULT;
+
+ switch (ethcmd) {
+ case ETHTOOL_GDRVINFO: {
+ struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
+ strcpy(info.driver, DRV_NAME);
+ strcpy(info.version, DRV_VERSION);
+ strcpy(info.bus_info, pci_dev->slot_name);
+ if (copy_to_user(useraddr, &info, sizeof(info)))
+ return -EFAULT;
+ return 0;
+ }
+
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ switch(cmd) {
+ case SIOCETHTOOL:
+ return netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
static void __devexit ne2k_pci_remove_one (struct pci_dev *pdev)
{
static struct pci_driver ne2k_driver = {
- name: "ne2k-pci",
+ name: DRV_NAME,
probe: ne2k_pci_init_one,
remove: ne2k_pci_remove_one,
id_table: ne2k_pci_tbl,
MODULE_PARM(io, "1-" __MODULE_STRING(MAX_NE3210_CARDS) "i");
MODULE_PARM(irq, "1-" __MODULE_STRING(MAX_NE3210_CARDS) "i");
MODULE_PARM(mem, "1-" __MODULE_STRING(MAX_NE3210_CARDS) "i");
+MODULE_PARM_DESC(io, "NE3210 I/O base address(es)");
+MODULE_PARM_DESC(irq, "NE3210 IRQ number(s)");
+MODULE_PARM_DESC(mem, "NE3210 memory base address(es)");
int init_module(void)
{
MODULE_PARM(io, "i");
MODULE_PARM(irq, "i");
+MODULE_PARM_DESC(io, "ni5010 I/O base address");
+MODULE_PARM_DESC(irq, "ni5010 IRQ number");
int init_module(void)
{
MODULE_PARM(irq, "i");
MODULE_PARM(memstart, "l");
MODULE_PARM(memend, "l");
+MODULE_PARM_DESC(io, "NI5210 I/O base address,required");
+MODULE_PARM_DESC(irq, "NI5210 IRQ number,required");
+MODULE_PARM_DESC(memstart, "NI5210 memory base address,required");
+MODULE_PARM_DESC(memend, "NI5210 memory end address,required");
int init_module(void)
{
MODULE_PARM(irq, "i");
MODULE_PARM(io, "i");
MODULE_PARM(dma, "i");
+MODULE_PARM_DESC(irq, "ni6510 IRQ number (ignored for some cards)");
+MODULE_PARM_DESC(io, "ni6510 I/O base address");
+MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)");
int init_module(void)
{
*/
+#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
};
MODULE_AUTHOR ("Jeff Garzik <jgarzik@mandrakesoft.com>");
-MODULE_DESCRIPTION ("RealTek RTL-8139 Fast Ethernet driver");
+MODULE_DESCRIPTION ("Skeleton for a PCI Fast Ethernet driver");
MODULE_PARM (multicast_filter_limit, "i");
MODULE_PARM (max_interrupt_work, "i");
MODULE_PARM (debug, "i");
MODULE_PARM (media, "1-" __MODULE_STRING(8) "i");
+MODULE_PARM_DESC (multicast_filter_limit, "pci-skeleton maximum number of filtered multicast addresses");
+MODULE_PARM_DESC (max_interrupt_work, "pci-skeleton maximum events handled per interrupt");
+MODULE_PARM_DESC (media, "pci-skeleton: Bits 0-3: media type, bit 17: full duplex");
+MODULE_PARM_DESC (debug, "(unused)");
static int read_eeprom (void *ioaddr, int location, int addr_len);
static int netdrv_open (struct net_device *dev);
case SIOCDEVPRIVATE + 1: /* Read the specified MII register. */
spin_lock_irqsave (&tp->lock, flags);
- data[3] = mdio_read (dev, data[0], data[1] & 0x1f);
+ data[3] = mdio_read (dev, data[0] & 0x1f, data[1] & 0x1f);
spin_unlock_irqrestore (&tp->lock, flags);
break;
}
spin_lock_irqsave (&tp->lock, flags);
- mdio_write (dev, data[0], data[1] & 0x1f, data[2]);
+ mdio_write (dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
spin_unlock_irqrestore (&tp->lock, flags);
break;
}
-static void netdrv_suspend (struct pci_dev *pdev)
+#ifdef CONFIG_PM
+
+static int netdrv_suspend (struct pci_dev *pdev, u32 state)
{
struct net_device *dev = pci_get_drvdata (pdev);
struct netdrv_private *tp = dev->priv;
spin_unlock_irqrestore (&tp->lock, flags);
pci_power_off (pdev, -1);
+
+ return 0;
}
-static void netdrv_resume (struct pci_dev *pdev)
+static int netdrv_resume (struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata (pdev);
pci_power_on (pdev);
netif_device_attach (dev);
netdrv_hw_start (dev);
+
+ return 0;
}
+#endif /* CONFIG_PM */
+
static struct pci_driver netdrv_pci_driver = {
name: MODNAME,
id_table: netdrv_pci_tbl,
probe: netdrv_init_one,
remove: netdrv_remove_one,
+#ifdef CONFIG_PM
suspend: netdrv_suspend,
resume: netdrv_resume,
+#endif /* CONFIG_PM */
};
fi
fi
-if [ "$CONFIG_PCMCIA_3C589" = "y" -o "$CONFIG_PCMCIA_3C574" = "y" -o \
- "$CONFIG_PCMCIA_FMVJ18X" = "y" -o "$CONFIG_PCMCIA_PCNET" = "y" -o \
- "$CONFIG_PCMCIA_NMCLAN" = "y" -o "$CONFIG_PCMCIA_SMC91C92" = "y" -o \
- "$CONFIG_PCMCIA_XIRC2PS" = "y" -o "$CONFIG_PCMCIA_RAYCS" = "y" -o \
- "$CONFIG_PCMCIA_NETWAVE" = "y" -o "$CONFIG_PCMCIA_WAVELAN" = "y" -o \
- "$CONFIG_PCMCIA_XIRTULIP" = "y" ]; then
- define_bool CONFIG_PCMCIA_NETCARD y
-fi
-
endmenu
obj-$(CONFIG_PCMCIA_IBMTR) += ibmtr_cs.o
include $(TOPDIR)/Rules.make
-
-tmp-ibmtr.o: ../tokenring/ibmtr.c
- $(CC) $(CFLAGS) -D__NO_VERSION__ -DPCMCIA -c -o $@ ../tokenring/ibmtr.c
-
-ibmtr_cs.o: tmp-ibmtr.o ibmtr_cs.c
- $(CC) $(CFLAGS) -DPCMCIA -c -o tmp-$@ ibmtr_cs.c
- $(LD) -r -o $@ tmp-$@ tmp-ibmtr.o
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
+#define PCMCIA
+#include "../tokenring/ibmtr.c"
+
#ifdef PCMCIA_DEBUG
static int pc_debug = PCMCIA_DEBUG;
MODULE_PARM(pc_debug, "i");
unsigned char pcmcia_reality_check(unsigned char gss);
extern int trdev_init(struct net_device *dev);
-extern void tok_interrupt(int irq, struct pt_regs *regs);
+extern void tok_interrupt (int irq, void *dev_id, struct pt_regs *regs);
extern int tok_init_card(struct net_device *dev);
extern unsigned char get_sram_size(struct tok_info *ti);
/* This table use during operation for capabilities and media timer. */
static void tulip_timer(unsigned long data);
-static void t21142_timer(unsigned long data);
-static void mxic_timer(unsigned long data);
-static void pnic_timer(unsigned long data);
-static void comet_timer(unsigned long data);
enum tbl_flag {
HAS_MII=1, HAS_MEDIA_TABLE=2, CSR12_IN_SROM=4, ALWAYS_CHECK_MII=8,
int flags;
void (*media_timer)(unsigned long data);
} tulip_tbl[] = {
- { "Digital DC21040 Tulip", 128, 0x0001ebef, 0, tulip_timer },
- { "Digital DC21041 Tulip", 128, 0x0001ebef, HAS_MEDIA_TABLE, tulip_timer },
- { "Digital DS21140 Tulip", 128, 0x0001ebef,
- HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, tulip_timer },
- { "Digital DS21143 Tulip", 128, 0x0801fbff,
- HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI | HAS_NWAY143, t21142_timer },
- { "Lite-On 82c168 PNIC", 256, 0x0001ebef,
- HAS_MII, pnic_timer },
- { "Macronix 98713 PMAC", 128, 0x0001ebef,
- HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer },
- { "Macronix 98715 PMAC", 256, 0x0001ebef,
- HAS_MEDIA_TABLE, mxic_timer },
- { "Macronix 98725 PMAC", 256, 0x0001ebef,
- HAS_MEDIA_TABLE, mxic_timer },
- { "ASIX AX88140", 128, 0x0001fbff,
- HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY, tulip_timer },
- { "Lite-On PNIC-II", 256, 0x0001ebef,
- HAS_MII | HAS_NWAY143, pnic_timer },
- { "ADMtek Comet", 256, 0x0001abef,
- MC_HASH_ONLY, comet_timer },
- { "Compex 9881 PMAC", 128, 0x0001ebef,
- HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer },
{ "Xircom Cardbus Adapter (DEC 21143 compatible mode)", 128, 0x0801fbff,
HAS_MII | HAS_ACPI, tulip_timer },
{0},
};
/* This matches the table above. Note 21142 == 21143. */
enum chips {
- DC21040=0, DC21041=1, DC21140=2, DC21142=3, DC21143=3,
- LC82C168, MX98713, MX98715, MX98725, AX88140, PNIC2, COMET, COMPEX9881,
X3201_3,
+ DC21040, DC21041, DC21140, DC21142=4, DC21143=4,
+ LC82C168, MX98713, MX98715, MX98725, AX88140, PNIC2, COMET, COMPEX9881,
};
/* A full-duplex map for media types. */
static u16 t21041_csr14[] = { 0x7F3F, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
-static u16 t21142_csr13[] = { 0x0001, 0x0009, 0x0009, 0x0000, 0x0001, };
static u16 t21142_csr14[] = { 0xFFFF, 0x0705, 0x0705, 0x0000, 0x7F3D, };
-static u16 t21142_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
/* Offsets to the Command and Status Registers, "CSRs". All accesses
must be longword instructions and quadword aligned. */
*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
/* Put the setup frame on the Tx list. */
- tp->tx_ring[0].length = 0x08000000 | 192;
+ tp->tx_ring[0].length = 0x68000000 | 192;
tp->tx_ring[0].buffer1 = virt_to_bus(tp->setup_frame);
tp->tx_ring[0].status = DescOwned;
}
/* Put the setup frame on the Tx list. */
- tp->tx_ring[tp->cur_tx].length = 0x08000000 | 192;
+ tp->tx_ring[tp->cur_tx].length = 0x68000000 | 192;
/* Lie about the address of our setup frame to make the */
/* chip happy */
tp->tx_ring[tp->cur_tx].buffer1 = virt_to_bus(tp->setup_frame);
add_timer(&tp->timer);
}
-/* Handle the 21143 uniquely: do autoselect with NWay, not the EEPROM list
- of available transceivers. */
-static void t21142_timer(unsigned long data)
-{
- struct net_device *dev = (struct net_device *)data;
- struct tulip_private *tp = (struct tulip_private *)dev->priv;
- long ioaddr = dev->base_addr;
- int csr12 = inl(ioaddr + CSR12);
- int next_tick = 60*HZ;
- int new_csr6 = 0;
-
- if ((tulip_debug > 2) && !(media_cap[dev->if_port] & MediaIsMII))
- printk(KERN_INFO"%s: 21143 negotiation status %8.8x, %s.\n",
- dev->name, csr12, medianame[dev->if_port]);
- if (media_cap[dev->if_port] & MediaIsMII) {
- check_duplex(dev);
- next_tick = 60*HZ;
- } else if (tp->nwayset) {
- /* Don't screw up a negotiated session! */
- if (tulip_debug > 1)
- printk(KERN_INFO"%s: Using NWay-set %s media, csr12 %8.8x.\n",
- dev->name, medianame[dev->if_port], csr12);
- } else if (tp->medialock) {
- ;
- } else if (dev->if_port == 3) {
- if (csr12 & 2) { /* No 100mbps link beat, revert to 10mbps. */
- if (tulip_debug > 1)
- printk(KERN_INFO"%s: No 21143 100baseTx link beat, %8.8x, "
- "trying NWay.\n", dev->name, csr12);
- t21142_start_nway(dev);
- next_tick = 3*HZ;
- }
- } else if (((csr12 & 0x7000) != 0x5000)
- && tp->chip_id != X3201_3) {
- /* Negotiation failed. Search media types. */
- if (tulip_debug > 1)
- printk(KERN_INFO"%s: 21143 negotiation failed, status %8.8x.\n",
- dev->name, csr12);
- if (!(csr12 & 4)) { /* 10mbps link beat good. */
- new_csr6 = 0x82420000;
- dev->if_port = 0;
- outl(0, ioaddr + CSR13);
- outl(0x0003FFFF, ioaddr + CSR14);
- outw(t21142_csr15[dev->if_port], ioaddr + CSR15);
- outl(t21142_csr13[dev->if_port], ioaddr + CSR13);
- } else {
- /* Select 100mbps port to check for link beat. */
- new_csr6 = 0x83860000;
- dev->if_port = 3;
- outl(0, ioaddr + CSR13);
- outl(0x0003FF7F, ioaddr + CSR14);
- outw(8, ioaddr + CSR15);
- outl(1, ioaddr + CSR13);
- }
- if (tulip_debug > 1)
- printk(KERN_INFO"%s: Testing new 21143 media %s.\n",
- dev->name, medianame[dev->if_port]);
- if (new_csr6 != (tp->csr6 & ~0x00D5)) {
- tp->csr6 &= 0x00D5;
- tp->csr6 |= new_csr6;
- outl(0x0301, ioaddr + CSR12);
- outl_CSR6(tp->csr6 | 0x0002, ioaddr, tp->chip_id);
- outl_CSR6(tp->csr6 | 0x2002, ioaddr, tp->chip_id);
- }
- next_tick = 3*HZ;
- }
- if (tp->cur_tx - tp->dirty_tx > 0 &&
- jiffies - dev->trans_start > TX_TIMEOUT) {
- printk(KERN_WARNING "%s: Tx hung, %d vs. %d.\n",
- dev->name, tp->cur_tx, tp->dirty_tx);
- tulip_tx_timeout(dev);
- }
-
- tp->timer.expires = RUN_AT(next_tick);
- add_timer(&tp->timer);
-}
-
static void t21142_start_nway(struct net_device *dev)
{
struct tulip_private *tp = (struct tulip_private *)dev->priv;
}
}
-static void mxic_timer(unsigned long data)
-{
- struct net_device *dev = (struct net_device *)data;
- struct tulip_private *tp = (struct tulip_private *)dev->priv;
- long ioaddr = dev->base_addr;
- int next_tick = 60*HZ;
-
- if (tulip_debug > 3) {
- printk(KERN_INFO"%s: MXIC negotiation status %8.8x.\n", dev->name,
- inl(ioaddr + CSR12));
- }
- if (next_tick) {
- tp->timer.expires = RUN_AT(next_tick);
- add_timer(&tp->timer);
- }
-}
-
-static void pnic_timer(unsigned long data)
-{
- struct net_device *dev = (struct net_device *)data;
- struct tulip_private *tp = (struct tulip_private *)dev->priv;
- long ioaddr = dev->base_addr;
- int csr12 = inl(ioaddr + CSR12);
- int next_tick = 60*HZ;
- int new_csr6 = tp->csr6 & ~0x40C40200;
-
- if (media_cap[dev->if_port] & MediaIsMII) {
- int negotiated = mdio_read(dev, tp->phys[0], 5) & tp->advertising[0];
-
- if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: PNIC negotiated capability %8.8x, "
- "CSR5 %8.8x.\n",
- dev->name, negotiated, inl(ioaddr + CSR5));
-
- if (negotiated & 0x0380) /* 10 vs 100mbps */
- new_csr6 |= 0x810E0000;
- else
- new_csr6 |= 0x814E0000;
- if (((negotiated & 0x0300) == 0x0100) /* Duplex */
- || (negotiated & 0x00C0) == 0x0040
- || tp->full_duplex_lock) {
- tp->full_duplex = 1;
- new_csr6 |= 0x0200;
- }
- if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: PNIC MII PHY status %4.4x, Link "
- "partner report %4.4x, csr6 %8.8x/%8.8x.\n",
- dev->name, mdio_read(dev, tp->phys[0], 1), negotiated,
- tp->csr6, inl(ioaddr + CSR6));
- } else {
- int phy_reg = inl(ioaddr + 0xB8);
- int csr5 = inl(ioaddr + CSR5);
-
- if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: PNIC PHY status %8.8x, CSR5 %8.8x.\n",
- dev->name, phy_reg, csr5);
-
- if (phy_reg & 0x04000000) { /* Remote link fault */
- /*outl(0x0201F078, ioaddr + 0xB8);*/
- next_tick = 3*HZ;
- }
- if (inl(ioaddr + CSR5) & TPLnkFail) { /* 100baseTx link beat */
- if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: %s link beat failed, CSR12 %4.4x, "
- "CSR5 %8.8x, PHY %3.3x.\n",
- dev->name, medianame[dev->if_port], csr12,
- inl(ioaddr + CSR5), inl(ioaddr + 0xB8));
- if (tp->medialock) {
- } else if (dev->if_port == 0) {
- dev->if_port = 3;
- outl(0x33, ioaddr + CSR12);
- new_csr6 = 0x01860000;
- outl(0x1F868, ioaddr + 0xB8);
- } else {
- dev->if_port = 0;
- outl(0x32, ioaddr + CSR12);
- new_csr6 = 0x00420000;
- outl(0x1F078, ioaddr + 0xB8);
- }
- new_csr6 |= (tp->csr6 & 0xfdff);
- next_tick = 3*HZ;
- } else
- new_csr6 = tp->csr6;
- if (tp->full_duplex_lock || (phy_reg & 0x30000000) != 0) {
- tp->full_duplex = 1;
- new_csr6 |= 0x00000200;
- }
- }
- if (tp->csr6 != new_csr6) {
- tp->csr6 = new_csr6;
- outl_CSR6(tp->csr6 | 0x0002, ioaddr, tp->chip_id); /* Restart Tx */
- outl_CSR6(tp->csr6 | 0x2002, ioaddr, tp->chip_id);
- dev->trans_start = jiffies;
- if (tulip_debug > 1)
- printk(KERN_INFO "%s: Changing PNIC configuration to %s-duplex, "
- "CSR6 %8.8x.\n",
- dev->name, tp->full_duplex ? "full" : "half", new_csr6);
- }
- tp->timer.expires = RUN_AT(next_tick);
- add_timer(&tp->timer);
-}
-
-static void comet_timer(unsigned long data)
-{
- struct net_device *dev = (struct net_device *)data;
- struct tulip_private *tp = (struct tulip_private *)dev->priv;
- long ioaddr = dev->base_addr;
- int next_tick = 60*HZ;
-
- if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Comet link status %4.4x partner capability "
- "%4.4x.\n",
- dev->name, inl(ioaddr + 0xB8), inl(ioaddr + 0xC8));
- tp->timer.expires = RUN_AT(next_tick);
- add_timer(&tp->timer);
-}
-
static void tulip_tx_timeout(struct net_device *dev)
{
struct tulip_private *tp = (struct tulip_private *)dev->priv;
case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
if (tp->mii_cnt)
data[0] = phy;
- else if (tp->chip_id == DC21142) /* 21142 pseudo-MII */
- data[0] = 32;
- else if (tp->chip_id == PNIC2)
- data[0] = 32;
- else if (tp->chip_id == COMET)
- data[0] = 1;
else
return -ENODEV;
return 0;
for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
i++, mclist = mclist->next)
set_bit(ether_crc(ETH_ALEN, mclist->dmi_addr)>>26, mc_filter);
- if (tp->chip_id == AX88140) {
- outl(2, ioaddr + CSR13);
- outl(mc_filter[0], ioaddr + CSR14);
- outl(3, ioaddr + CSR13);
- outl(mc_filter[1], ioaddr + CSR14);
- } else if (tp->chip_id == COMET) { /* Has a simple hash filter. */
- outl(mc_filter[0], ioaddr + 0xAC);
- outl(mc_filter[1], ioaddr + 0xB0);
- }
}
} else {
u16 *eaddrs, *setup_frm = tp->setup_frame;
struct dev_mc_list *mclist;
- u32 tx_flags = 0x08000000 | 192;
+ u32 tx_flags = 0x68000000 | 192;
int i;
/* Note that only the low-address shortword of setup_frame is valid!
The values are doubled for big-endian architectures. */
- if ((dev->mc_count > 14) || ((dev->mc_count > 6) && (tp->chip_id == X3201_3))) { /* Must use a multicast hash table. */
+ if (dev->mc_count > 14) { /* Must use a multicast hash table. */
u16 hash_table[32];
- tx_flags = 0x08400000 | 192; /* Use hash filter. */
+ tx_flags = 0x68400000 | 192; /* Use hash filter. */
memset(hash_table, 0, sizeof(hash_table));
set_bit(255, hash_table); /* Broadcast entry */
/* This should work on big-endian machines as well. */
*setup_frm++ = hash_table[i];
}
setup_frm = &tp->setup_frame[13*6];
- } else if(tp->chip_id != X3201_3) {
+ } else {
/* We have <= 14 addresses so we can use the wonderful
16 address perfect filtering of the Tulip. */
for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
/* Fill the unused entries with the broadcast address. */
memset(setup_frm, 0xff, (15-i)*12);
setup_frm = &tp->setup_frame[15*6];
- } else {
- /* fill the first two table entries with our address */
- eaddrs = (u16 *)dev->dev_addr;
- for(i=0; i<2; i++) {
- *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
- *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
- *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
- }
- /* Double fill each entry to accomodate chips that */
- /* don't like to parse these correctly */
- for (i=0, mclist=dev->mc_list; i<dev->mc_count;
- i++, mclist=mclist->next) {
- eaddrs = (u16 *)mclist->dmi_addr;
- *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
- *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
- *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
- *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
- *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
- *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
- }
- i=((i+1)*2);
- /* Fill the unused entries with the broadcast address. */
- memset(setup_frm, 0xff, (15-i)*12);
- setup_frm = &tp->setup_frame[15*6];
}
/* Fill the final entry with our physical address. */
}
\f
static struct pci_device_id tulip_pci_table[] __devinitdata = {
-#if 0 /* these entries conflict with regular tulip driver */
- { 0x1011, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21040 },
- { 0x1011, 0x0014, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21041 },
- { 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 },
- { 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21142 },
- { 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 },
- { 0x10d9, 0x0512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98713 },
- { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
- { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98725 },
- { 0x125B, 0x1400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AX88140 },
- { 0x11AD, 0xc115, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PNIC2 },
- { 0x1317, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
- { 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 },
-#endif
{ 0x115D, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, X3201_3 },
{0},
};
return -ENODEV;
}
-static void tulip_suspend(struct pci_dev *pdev)
+static int tulip_suspend(struct pci_dev *pdev, u32 state)
{
struct net_device *dev = pdev->driver_data;
struct tulip_private *tp = (struct tulip_private *)dev->priv;
printk(KERN_INFO "tulip_suspend(%s)\n", dev->name);
if (tp->open) tulip_down(dev);
+ return 0;
}
-static void tulip_resume(struct pci_dev *pdev)
+static int tulip_resume(struct pci_dev *pdev)
{
struct net_device *dev = pdev->driver_data;
struct tulip_private *tp = (struct tulip_private *)dev->priv;
printk(KERN_INFO "tulip_resume(%s)\n", dev->name);
if (tp->open) tulip_up(dev);
+ return 0;
}
static void __devexit tulip_remove(struct pci_dev *pdev)
MODULE_PARM(parport, "1-" __MODULE_STRING(PLIP_MAX) "i");
MODULE_PARM(timid, "1i");
+MODULE_PARM_DESC(parport, "List of parport device numbers to use by plip");
static struct net_device *dev_plip[PLIP_MAX] = { NULL, };
static int flag_time = HZ;
MODULE_PARM(flag_time, "i");
+MODULE_PARM_DESC(flag_time, "ppp_async: interval between flagged packets (in clock ticks)");
/*
* Prototypes.
printk("Error: lan_saa9730_mii_init: timeout\n");
return -1;
}
- udelay(1000); /* wait 1 ms. */
+ mdelay(1); /* wait 1 ms. */
}
/* Now set the control and address register. */
printk("Error: lan_saa9730_mii_init: timeout\n");
return -1;
}
- udelay(1000); /* wait 1 ms. */
+ mdelay(1); /* wait 1 ms. */
}
/* Wait for 1 ms. */
- udelay(1000);
+ mdelay(1);
/* Check the link status. */
if (INL(&lp->lan_saa9730_regs->StationMgmtData) &
&lp->lan_saa9730_regs->StationMgmtCtl);
/* Wait for 1 ms. */
- udelay(1000);
+ mdelay(1);
/* set 'CONTROL' = force reset and renegotiate */
OUTL(PHY_CONTROL_RESET | PHY_CONTROL_AUTO_NEG |
&lp->lan_saa9730_regs->StationMgmtData);
/* Wait for 50 ms. */
- udelay(50 * 1000);
+ mdelay(50);
/* set 'BUSY' to start operation */
OUTL(MD_CA_BUSY | PHY_ADDRESS << MD_CA_PHY_SHF | MD_CA_WR |
("Error: lan_saa9730_mii_init: timeout\n");
return -1;
}
- udelay(1000); /* wait 1 ms. */
+ mdelay(1); /* wait 1 ms. */
}
/* Wait for 1 ms. */
- udelay(1000);
+ mdelay(1);
for (l = 0; l < 2; l++) {
/* set PHY address = 'STATUS' */
("Error: lan_saa9730_mii_init: timeout\n");
return -1;
}
- udelay(1000); /* wait 1 ms. */
+ mdelay(1); /* wait 1 ms. */
}
/* wait for 3 sec. */
- udelay(3000 * 1000);
+ mdelay(3000);
/* check the link status */
if (INL(&lp->lan_saa9730_regs->StationMgmtData) &
("Error: lan_sa9730_stop: MAC reset timeout\n");
return -1;
}
- udelay(1000); /* wait 1 ms. */
+ mdelay(1); /* wait 1 ms. */
}
return 0;
MODULE_DESCRIPTION("General Instruments SB1000 driver");
MODULE_PARM(io, "1-2i");
MODULE_PARM(irq, "i");
+MODULE_PARM_DESC(io, "SB1000 I/O base addresses");
+MODULE_PARM_DESC(irq, "SB1000 IRQ number");
static struct net_device dev_sb1000;
static int io[2];
/* Check for a network adaptor of this type, and return '0' iff one exists.
If dev->base_addr == 0, probe all likely locations.
If dev->base_addr == 1, always return failure.
- If dev->base_addr == 2, allocate space for the device and return success
- (detachable devices only).
*/
int __init
static int irq = 10;
MODULE_PARM(io, "i");
MODULE_PARM(irq, "i");
+MODULE_PARM_DESC(io, "SEEQ 8005 I/O base address");
+MODULE_PARM_DESC(irq, "SEEQ 8005 IRQ number");
int init_module(void)
{
#ifdef MODULE
MODULE_PARM(shapers, "i");
+MODULE_PARM_DESC(shapers, "Traffic shaper: maximum nuber of shapers");
#else /* MODULE */
/* sis900.c: A SiS 900/7016 PCI Fast Ethernet driver for Linux.
Copyright 1999 Silicon Integrated System Corporation
- Revision: 1.07.11 Apr. 10 2001
+ Revision: 1.08.00 Jun. 11 2001
Modified from the driver which is originally written by Donald Becker.
preliminary Rev. 1.0 Jan. 18, 1998
http://www.sis.com.tw/support/databook.htm
+ Rev 1.08.00 Jun. 11 2001 Hui-Fen Hsu workaround for RTL8201 PHY and some bug fix
Rev 1.07.11 Apr. 2 2001 Hui-Fen Hsu updates PCI drivers to use the new pci_set_dma_mask for kernel 2.4.3
Rev 1.07.10 Mar. 1 2001 Hui-Fen Hsu <hfhsu@sis.com.tw> some bug fix & 635M/B support
Rev 1.07.09 Feb. 9 2001 Dave Jones <davej@suse.de> PCI enable cleanup
#include "sis900.h"
static char version[] __devinitdata =
-KERN_INFO "sis900.c: v1.07.11 4/10/2001\n";
+KERN_INFO "sis900.c: v1.08.00 6/11/2001\n";
static int max_interrupt_work = 40;
static int multicast_filter_limit = 128;
{ "AMD 79C901 HomePNA PHY", 0x0000, 0x6B90, HOME},
{ "ICS LAN PHY", 0x0015, 0xF440, LAN },
{ "NS 83851 PHY", 0x2000, 0x5C20, MIX },
+ { "Realtek RTL8201 PHY", 0x0000, 0x8200, LAN },
{0,},
};
MODULE_PARM(multicast_filter_limit, "i");
MODULE_PARM(max_interrupt_work, "i");
MODULE_PARM(debug, "i");
+MODULE_PARM_DESC(multicast_filter_limit, "SiS 900/7016 maximum number of filtered multicast addresses");
+MODULE_PARM_DESC(max_interrupt_work, "SiS 900/7016 maximum events handled per interrupt");
+MODULE_PARM_DESC(debug, "SiS 900/7016 debug level (2-4)");
static int sis900_open(struct net_device *net_dev);
static int sis900_mii_probe (struct net_device * net_dev);
if (ret == 0) {
ret = -ENODEV;
- goto err_out_region;
+ goto err_out_unregister;
}
/* probe for mii transciver */
if (sis900_mii_probe(net_dev) == 0) {
ret = -ENODEV;
- goto err_out_region;
+ goto err_out_unregister;
}
/* print some information about our NIC */
return 0;
+ err_out_unregister:
+ unregister_netdev(net_dev);
err_out_cleardev:
pci_set_drvdata(pci_dev, NULL);
- err_out_region:
pci_release_regions(pci_dev);
err_out:
kfree(net_dev);
static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex)
{
struct sis900_private *sis_priv = net_dev->priv;
+ struct mii_phy *phy = sis_priv->mii;
int phy_addr = sis_priv->cur_phy;
u32 status;
u16 autoadv, autorec;
autoadv = mdio_read(net_dev, phy_addr, MII_ANADV);
autorec = mdio_read(net_dev, phy_addr, MII_ANLPAR);
status = autoadv & autorec;
+
+ *speed = HW_SPEED_10_MBPS;
+ *duplex = FDX_CAPABLE_HALF_SELECTED;
if (status & (MII_NWAY_TX | MII_NWAY_TX_FDX))
*speed = HW_SPEED_100_MBPS;
- else
- *speed = HW_SPEED_10_MBPS;
if (status & ( MII_NWAY_TX_FDX | MII_NWAY_T_FDX))
*duplex = FDX_CAPABLE_FULL_SELECTED;
- else
- *duplex = FDX_CAPABLE_HALF_SELECTED;
-
- sis_priv->autong_complete = 1;
+ sis_priv->autong_complete = 1;
+
+ /* Workaround for Realtek RTL8201 PHY issue */
+ if((phy->phy_id0 == 0x0000) && ((phy->phy_id1 & 0xFFF0) == 0x8200)){
+ if(mdio_read(net_dev, phy_addr, MII_CONTROL) & MII_CNTL_FDX)
+ *duplex = FDX_CAPABLE_FULL_SELECTED;
+ if(mdio_read(net_dev, phy_addr, 0x0019) & 0x01)
+ *speed = HW_SPEED_100_MBPS;
+ }
+
printk(KERN_INFO "%s: Media Link On %s %s-duplex \n",
net_dev->name,
*speed == HW_SPEED_100_MBPS ?
int slip_maxdev = SL_NRUNIT; /* Can be overridden with insmod! */
MODULE_PARM(slip_maxdev, "i");
+MODULE_PARM_DESC(slip_maxdev, "Maximum number of slip devices");
static struct tty_ldisc sl_ldisc;
MODULE_PARM(io, "1-" __MODULE_STRING(MAX_ULTRAMCA_CARDS) "i");
MODULE_PARM(irq, "1-" __MODULE_STRING(MAX_ULTRAMCA_CARDS) "i");
+MODULE_PARM_DESC(io, "SMC Ultra/EtherEZ MCA I/O base address(es)");
+MODULE_PARM_DESC(irq, "SMC Ultra/EtherEZ MCA IRQ number(s)");
int init_module(void)
{
MODULE_PARM(io, "1-" __MODULE_STRING(MAX_ULTRA_CARDS) "i");
MODULE_PARM(irq, "1-" __MODULE_STRING(MAX_ULTRA_CARDS) "i");
+MODULE_PARM_DESC(io, "SMC Ultra I/O base address(es)");
+MODULE_PARM_DESC(irq, "SMC Ultra IRQ number(s) (assigned)");
EXPORT_NO_SYMBOLS;
MODULE_PARM(io, "i");
MODULE_PARM(irq, "i");
MODULE_PARM(ifport, "i");
+MODULE_PARM_DESC(io, "SMC 99194 I/O base address");
+MODULE_PARM_DESC(irq, "SMC 99194 IRQ number");
+MODULE_PARM_DESC(ifport, "SMC 99194 interface port (0-default, 1-TP, 2-AUI)");
int init_module(void)
{
/*
Written 1998-2000 by Donald Becker.
+ Current maintainer is Ion Badulescu <ionut@cs.columbia.edu>. Please
+ send all bug reports to me, and not to Donald Becker, as this code
+ has been modified quite a bit from Donald's original version.
+
This software may be used and distributed according to the terms of
the GNU General Public License (GPL), incorporated herein by reference.
Drivers based on or derived from this code fall under the GPL and must
-----------------------------------------------------------
Linux kernel-specific changes:
-
+
LK1.1.1 (jgarzik):
- Use PCI driver interface
- Fix MOD_xxx races
LK1.1.3 (Andrew Morton)
- Timer cleanups
-
+
LK1.1.4 (jgarzik):
- Merge Becker version 1.03
+
+ LK1.2.1 (Ion Badulescu <ionut@cs.columbia.edu>)
+ - Support hardware Rx/Tx checksumming
+ - Use the GFP firmware taken from Adaptec's Netware driver
+
+ LK1.2.2 (Ion Badulescu)
+ - Backported to 2.2.x
+
+ LK1.2.3 (Ion Badulescu)
+ - Fix the flaky mdio interface
+ - More compat clean-ups
+
+ LK1.2.4 (Ion Badulescu)
+ - More 2.2.x initialization fixes
+
+ LK1.2.5 (Ion Badulescu)
+ - Several fixes from Manfred Spraul
+
+ LK1.2.6 (Ion Badulescu)
+ - Fixed ifup/ifdown/ifup problem in 2.4.x
+
+ LK1.2.7 (Ion Badulescu)
+ - Removed unused code
+ - Made more functions static and __init
+
+ LK1.2.8 (Ion Badulescu)
+ - Quell bogus error messages, inform about the Tx threshold
+ - Removed #ifdef CONFIG_PCI, this driver is PCI only
+
+ LK1.2.9 (Ion Badulescu)
+ - Merged Jeff Garzik's changes from 2.4.4-pre5
+ - Added 2.2.x compatibility stuff required by the above changes
+
+ LK1.2.9a (Ion Badulescu)
+ - More updates from Jeff Garzik
+
+ LK1.3.0 (Ion Badulescu)
+ - Merged zerocopy support
+
+ LK1.3.1 (Ion Badulescu)
+ - Added ethtool support
+ - Added GPIO (media change) interrupt support
+
+ LK1.3.2 (Ion Badulescu)
+ - Fixed 2.2.x compatibility issues introduced in 1.3.1
+ - Fixed ethtool ioctl returning uninitialized memory
+
+TODO:
+ - implement tx_timeout() properly
*/
+#define DRV_NAME "starfire"
+#define DRV_VERSION "1.03+LK1.3.2"
+#define DRV_RELDATE "June 04, 2001"
+
+/*
+ * Adaptec's license for their Novell drivers (which is where I got the
+ * firmware files) does not allow one to redistribute them. Thus, we can't
+ * include the firmware with this driver.
+ *
+ * However, an end-user is allowed to download and use it, after
+ * converting it to C header files using starfire_firmware.pl.
+ * Once that's done, the #undef below must be changed into a #define
+ * for this driver to really use the firmware. Note that Rx/Tx
+ * hardware TCP checksumming is not possible without the firmware.
+ *
+ * I'm currently [Feb 2001] talking to Adaptec about this redistribution
+ * issue. Stay tuned...
+ */
+#undef HAS_FIRMWARE
+/*
+ * The current frame processor firmware fails to checksum a fragment
+ * of length 1. If and when this is fixed, the #define below can be removed.
+ */
+#define HAS_BROKEN_FIRMWARE
+/*
+ * Define this if using the driver with the zero-copy patch
+ */
+#if defined(HAS_FIRMWARE) && defined(MAX_SKB_FRAGS)
+#define ZEROCOPY
+#endif
+
/* The user-configurable values.
These may be modified when a driver module is loaded.*/
/* Used for tuning interrupt latency vs. overhead. */
-static int interrupt_mitigation = 0x0;
+static int interrupt_mitigation;
static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
static int max_interrupt_work = 20;
static int mtu;
/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
- The Starfire has a 512 element hash table based on the Ethernet CRC. */
-static int multicast_filter_limit = 32;
+ The Starfire has a 512 element hash table based on the Ethernet CRC. */
+static int multicast_filter_limit = 512;
-/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
- Setting to > 1518 effectively disables this feature. */
+#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
+/*
+ * Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ * Setting to > 1518 effectively disables this feature.
+ *
+ * NOTE:
+ * The ia64 doesn't allow for unaligned loads even of integers being
+ * misaligned on a 2 byte boundary. Thus always force copying of
+ * packets as the starfire doesn't allow for misaligned DMAs ;-(
+ * 23/10/2000 - Jes
+ *
+ * The Alpha and the Sparc don't allow unaligned loads, either. -Ion
+ */
+#if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
+static int rx_copybreak = PKT_BUF_SZ;
+#else
static int rx_copybreak = 0;
+#endif
/* Used to pass the media type, etc.
Both 'options[]' and 'full_duplex[]' exist for driver interoperability.
The media type is usually passed in 'options[]'.
*/
#define MAX_UNITS 8 /* More are supported, limit only on options */
-static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
-static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int options[MAX_UNITS] = {0, };
+static int full_duplex[MAX_UNITS] = {0, };
/* Operational parameters that are set at compile time. */
/* Operational parameters that usually are not changed. */
/* Time in jiffies before concluding the transmitter is hung. */
-#define TX_TIMEOUT (2*HZ)
-
-#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
-
-/*
- * The ia64 doesn't allow for unaligned loads even of integers being
- * misaligned on a 2 byte boundary. Thus always force copying of
- * packets as the starfire doesn't allow for misaligned DMAs ;-(
- * 23/10/2000 - Jes
- */
-#ifdef __ia64__
-#define PKT_SHOULD_COPY(pkt_len) 1
-#else
-#define PKT_SHOULD_COPY(pkt_len) (pkt_len < rx_copybreak)
-#endif
+#define TX_TIMEOUT (2 * HZ)
+
+#ifdef ZEROCOPY
+#if MAX_SKB_FRAGS <= 6
+#define MAX_STARFIRE_FRAGS 6
+#else /* MAX_STARFIRE_FRAGS > 6 */
+#warning This driver will not work with more than 6 skb fragments.
+#warning Turning off zerocopy support.
+#undef ZEROCOPY
+#endif /* MAX_STARFIRE_FRAGS > 6 */
+#endif /* ZEROCOPY */
+
+#ifdef ZEROCOPY
+#define skb_first_frag_len(skb) skb_headlen(skb)
+#else /* not ZEROCOPY */
+#define skb_first_frag_len(skb) (skb->len)
+#endif /* not ZEROCOPY */
#if !defined(__OPTIMIZE__)
#warning You must compile this file with the correct options!
#error You must compile this driver with "-O".
#endif
+#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
#include <linux/init.h>
+#include <linux/delay.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
-#include <asm/bitops.h>
-#include <asm/io.h>
+#include <asm/uaccess.h>
+
+#ifdef HAS_FIRMWARE
+#include "starfire_firmware.h"
+#endif /* HAS_FIRMWARE */
+
+/* 2.2.x compatibility code */
+#if LINUX_VERSION_CODE < 0x20300
+
+#include "starfire-kcomp22.h"
+
+#else /* LINUX_VERSION_CODE > 0x20300 */
+
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+
+#define COMPAT_MOD_INC_USE_COUNT
+#define COMPAT_MOD_DEC_USE_COUNT
+
+#define init_tx_timer(dev, func, timeout) \
+ dev->tx_timeout = func; \
+ dev->watchdog_timeo = timeout;
+#define kick_tx_timer(dev, func, timeout)
+
+#define netif_start_if(dev)
+#define netif_stop_if(dev)
+
+#define PCI_SLOT_NAME(pci_dev) (pci_dev)->slot_name
+
+#endif /* LINUX_VERSION_CODE > 0x20300 */
+/* end of compatibility code */
+
/* These identify the driver base version and may not be removed. */
static char version[] __devinitdata =
KERN_INFO "starfire.c:v1.03 7/26/2000 Written by Donald Becker <becker@scyld.com>\n"
KERN_INFO " Updates and info at http://www.scyld.com/network/starfire.html\n"
-KERN_INFO " (unofficial 2.4.x kernel port, version 1.1.4a, April 17, 2001)\n";
+KERN_INFO " (unofficial 2.2/2.4 kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver");
MODULE_PARM(mtu, "i");
MODULE_PARM(debug, "i");
MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM(interrupt_mitigation, "i");
MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM_DESC(max_interrupt_work, "Starfire maximum events handled per interrupt");
+MODULE_PARM_DESC(mtu, "Starfire MTU (all boards)");
+MODULE_PARM_DESC(debug, "Starfire debug level (0-6)");
+MODULE_PARM_DESC(rx_copybreak, "Starfire copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(options, "Starfire: Bits 0-3: media type, bit 17: full duplex");
+MODULE_PARM_DESC(full_duplex, "Starfire full duplex setting(s) (1)");
/*
Theory of Operation
See the Adaptec manual for the many possible structures, and options for
each structure. There are far too many to document here.
-For transmit this driver uses type 1 transmit descriptors, and relies on
-automatic minimum-length padding. It does not use the completion queue
+For transmit this driver uses type 0/1 transmit descriptors (depending
+on the presence of the zerocopy infrastructure), and relies on automatic
+minimum-length padding. It does not use the completion queue
consumer index, but instead checks for non-zero status entries.
For receive this driver uses type 0 receive descriptors. The driver
phase of receive.
A notable aspect of operation is that unaligned buffers are not permitted by
-the Starfire hardware. The IP header at offset 14 in an ethernet frame thus
+the Starfire hardware. Thus the IP header at offset 14 in an ethernet frame
isn't longword aligned, which may cause problems on some machine
-e.g. Alphas. Copied frames are put into the skbuff at an offset of "+2",
-16-byte aligning the IP header.
+e.g. Alphas and IA64. For these architectures, the driver is forced to copy
+the frame into a new skbuff unconditionally. Copied frames are put into the
+skbuff at an offset of "+2", thus 16-byte aligning the IP header.
IIId. Synchronization
enum chip_capability_flags {CanHaveMII=1, };
#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR0)
-#define MEM_ADDR_SZ 0x80000 /* And maps in 0.5MB(!). */
#if 0
#define ADDR_64BITS 1 /* This chip uses 64 bit addresses. */
/* A chip capabilities table, matching the CH_xxx entries in xxx_pci_tbl[] above. */
static struct chip_info {
const char *name;
- int io_size;
int drv_flags;
} netdrv_tbl[] __devinitdata = {
- { "Adaptec Starfire 6915", MEM_ADDR_SZ, CanHaveMII },
+ { "Adaptec Starfire 6915", CanHaveMII },
};
PCIDeviceConfig=0x50040, GenCtrl=0x50070, IntrTimerCtrl=0x50074,
IntrClear=0x50080, IntrStatus=0x50084, IntrEnable=0x50088,
MIICtrl=0x52000, StationAddr=0x50120, EEPROMCtrl=0x51000,
- TxDescCtrl=0x50090,
+ GPIOCtrl=0x5008C, TxDescCtrl=0x50090,
TxRingPtr=0x50098, HiPriTxRingPtr=0x50094, /* Low and High priority. */
TxRingHiAddr=0x5009C, /* 64 bit address extension. */
TxProducerIdx=0x500A0, TxConsumerIdx=0x500A4,
TxThreshold=0x500B0,
CompletionHiAddr=0x500B4, TxCompletionAddr=0x500B8,
RxCompletionAddr=0x500BC, RxCompletionQ2Addr=0x500C0,
- CompletionQConsumerIdx=0x500C4,
+ CompletionQConsumerIdx=0x500C4, RxDMACtrl=0x500D0,
RxDescQCtrl=0x500D4, RxDescQHiAddr=0x500DC, RxDescQAddr=0x500E0,
RxDescQIdx=0x500E8, RxDMAStatus=0x500F0, RxFilterMode=0x500F4,
- TxMode=0x55000,
+ TxMode=0x55000, PerfFilterTable=0x56000, HashTable=0x56100,
+ TxGfpMem=0x58000, RxGfpMem=0x5a000,
};
/* Bits in the interrupt status/mask registers. */
enum intr_status_bits {
- IntrNormalSummary=0x8000, IntrAbnormalSummary=0x02000000,
- IntrRxDone=0x0300, IntrRxEmpty=0x10040, IntrRxPCIErr=0x80000,
- IntrTxDone=0x4000, IntrTxEmpty=0x1000, IntrTxPCIErr=0x80000,
- StatsMax=0x08000000, LinkChange=0xf0000000,
- IntrTxDataLow=0x00040000,
+ IntrLinkChange=0xf0000000, IntrStatsMax=0x08000000,
+ IntrAbnormalSummary=0x02000000, IntrGeneralTimer=0x01000000,
+ IntrSoftware=0x800000, IntrRxComplQ1Low=0x400000,
+ IntrTxComplQLow=0x200000, IntrPCI=0x100000,
+ IntrDMAErr=0x080000, IntrTxDataLow=0x040000,
+ IntrRxComplQ2Low=0x020000, IntrRxDescQ1Low=0x010000,
+ IntrNormalSummary=0x8000, IntrTxDone=0x4000,
+ IntrTxDMADone=0x2000, IntrTxEmpty=0x1000,
+ IntrEarlyRxQ2=0x0800, IntrEarlyRxQ1=0x0400,
+ IntrRxQ2Done=0x0200, IntrRxQ1Done=0x0100,
+ IntrRxGFPDead=0x80, IntrRxDescQ2Low=0x40,
+ IntrNoTxCsum=0x20, IntrTxBadID=0x10,
+ IntrHiPriTxBadID=0x08, IntrRxGfp=0x04,
+ IntrTxGfp=0x02, IntrPCIPad=0x01,
+ /* not quite bits */
+ IntrRxDone=IntrRxQ2Done | IntrRxQ1Done,
+ IntrRxEmpty=IntrRxDescQ1Low | IntrRxDescQ2Low,
+ IntrNormalMask=0xf0, IntrAbnormalMask=0x3f0e,
};
/* Bits in the RxFilterMode register. */
AcceptMulticast=0x10, AcceptMyPhys=0xE040,
};
+/* Bits in the TxDescCtrl register. */
+enum tx_ctrl_bits {
+ TxDescSpaceUnlim=0x00, TxDescSpace32=0x10, TxDescSpace64=0x20,
+ TxDescSpace128=0x30, TxDescSpace256=0x40,
+ TxDescType0=0x00, TxDescType1=0x01, TxDescType2=0x02,
+ TxDescType3=0x03, TxDescType4=0x04,
+ TxNoDMACompletion=0x08, TxDescQ64bit=0x80,
+ TxHiPriFIFOThreshShift=24, TxPadLenShift=16,
+ TxDMABurstSizeShift=8,
+};
+
+/* Bits in the RxDescQCtrl register. */
+enum rx_ctrl_bits {
+ RxBufferLenShift=16, RxMinDescrThreshShift=0,
+ RxPrefetchMode=0x8000, Rx2048QEntries=0x4000,
+ RxVariableQ=0x2000, RxDesc64bit=0x1000,
+ RxDescQAddr64bit=0x0100,
+ RxDescSpace4=0x000, RxDescSpace8=0x100,
+ RxDescSpace16=0x200, RxDescSpace32=0x300,
+ RxDescSpace64=0x400, RxDescSpace128=0x500,
+ RxConsumerWrEn=0x80,
+};
+
+/* Bits in the RxCompletionAddr register */
+enum rx_compl_bits {
+ RxComplQAddr64bit=0x80, TxComplProducerWrEn=0x40,
+ RxComplType0=0x00, RxComplType1=0x10,
+ RxComplType2=0x20, RxComplType3=0x30,
+ RxComplThreshShift=0,
+};
+
/* The Rx and Tx buffer descriptors. */
struct starfire_rx_desc {
- u32 rxaddr; /* Optionally 64 bits. */
+ u32 rxaddr; /* Optionally 64 bits. */
};
enum rx_desc_bits {
RxDescValid=1, RxDescEndRing=2,
/* Completion queue entry.
You must update the page allocation, init_ring and the shift count in rx()
if using a larger format. */
+#ifdef HAS_FIRMWARE
+#define csum_rx_status
+#endif /* HAS_FIRMWARE */
struct rx_done_desc {
- u32 status; /* Low 16 bits is length. */
+ u32 status; /* Low 16 bits is length. */
+#ifdef csum_rx_status
+ u32 status2; /* Low 16 bits is csum */
+#endif /* csum_rx_status */
#ifdef full_rx_status
u32 status2;
u16 vlanid;
- u16 csum; /* partial checksum */
+ u16 csum; /* partial checksum */
u32 timestamp;
-#endif
+#endif /* full_rx_status */
};
enum rx_done_bits {
RxOK=0x20000000, RxFIFOErr=0x10000000, RxBufQ2=0x08000000,
};
+#ifdef ZEROCOPY
+/* Type 0 Tx descriptor. */
+/* If more fragments are needed, don't forget to change the
+ descriptor spacing as well! */
+struct starfire_tx_desc {
+ u32 status;
+ u32 nbufs;
+ u32 first_addr;
+ u16 first_len;
+ u16 total_len;
+ struct {
+ u32 addr;
+ u32 len;
+ } frag[MAX_STARFIRE_FRAGS];
+};
+#else /* not ZEROCOPY */
/* Type 1 Tx descriptor. */
struct starfire_tx_desc {
- u32 status; /* Upper bits are status, lower 16 length. */
- u32 addr;
+ u32 status; /* Upper bits are status, lower 16 length. */
+ u32 first_addr;
};
+#endif /* not ZEROCOPY */
enum tx_desc_bits {
- TxDescID=0xB1010000, /* Also marks single fragment, add CRC. */
- TxDescIntr=0x08000000, TxRingWrap=0x04000000,
+ TxDescID=0xB0000000,
+ TxCRCEn=0x01000000, TxDescIntr=0x08000000,
+ TxRingWrap=0x04000000, TxCalTCP=0x02000000,
};
struct tx_done_report {
- u32 status; /* timestamp, index. */
+ u32 status; /* timestamp, index. */
#if 0
- u32 intrstatus; /* interrupt status */
+ u32 intrstatus; /* interrupt status */
#endif
};
-#define PRIV_ALIGN 15 /* Required alignment mask */
-struct ring_info {
+struct rx_ring_info {
struct sk_buff *skb;
dma_addr_t mapping;
};
+struct tx_ring_info {
+ struct sk_buff *skb;
+ dma_addr_t first_mapping;
+#ifdef ZEROCOPY
+ dma_addr_t frag_mapping[MAX_STARFIRE_FRAGS];
+#endif /* ZEROCOPY */
+};
-#define MII_CNT 4
+#define PHY_CNT 2
struct netdev_private {
/* Descriptor rings first for alignment. */
struct starfire_rx_desc *rx_ring;
dma_addr_t rx_ring_dma;
dma_addr_t tx_ring_dma;
/* The addresses of rx/tx-in-place skbuffs. */
- struct ring_info rx_info[RX_RING_SIZE];
- struct ring_info tx_info[TX_RING_SIZE];
- /* Pointers to completion queues (full pages). I should cache line pad..*/
- u8 pad0[100];
+ struct rx_ring_info rx_info[RX_RING_SIZE];
+ struct tx_ring_info tx_info[TX_RING_SIZE];
+ /* Pointers to completion queues (full pages). */
struct rx_done_desc *rx_done_q;
dma_addr_t rx_done_q_dma;
unsigned int rx_done;
struct tx_done_report *tx_done_q;
- unsigned int tx_done;
dma_addr_t tx_done_q_dma;
+ unsigned int tx_done;
struct net_device_stats stats;
- struct timer_list timer; /* Media monitoring timer. */
struct pci_dev *pci_dev;
/* Frequently used values: keep some adjacent for cache effect. */
- unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
unsigned int cur_tx, dirty_tx;
- unsigned int rx_buf_sz; /* Based on MTU+slack. */
- unsigned int tx_full:1; /* The Tx queue is full. */
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ unsigned int tx_full:1, /* The Tx queue is full. */
/* These values are keep track of the transceiver/media in use. */
- unsigned int full_duplex:1, /* Full-duplex operation requested. */
- medialock:1, /* Xcvr set to fixed speed/duplex. */
- rx_flowctrl:1,
- tx_flowctrl:1; /* Use 802.3x flow control. */
- unsigned int default_port:4; /* Last dev->if_port value. */
+ autoneg:1, /* Autonegotiation allowed. */
+ full_duplex:1, /* Full-duplex operation. */
+ speed100:1; /* Set if speed == 100MBit. */
+ unsigned int intr_mitigation;
u32 tx_mode;
u8 tx_threshold;
/* MII transceiver section. */
- int mii_cnt; /* MII device addresses. */
- u16 advertising; /* NWay media advertisement */
- unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
+ u16 advertising; /* NWay media advertisement */
+ int phy_cnt; /* MII device addresses. */
+ unsigned char phys[PHY_CNT]; /* MII device addresses. */
};
-static int mdio_read(struct net_device *dev, int phy_id, int location);
-static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
-static int netdev_open(struct net_device *dev);
-static void check_duplex(struct net_device *dev, int startup);
-static void netdev_timer(unsigned long data);
-static void tx_timeout(struct net_device *dev);
-static void init_ring(struct net_device *dev);
-static int start_tx(struct sk_buff *skb, struct net_device *dev);
-static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
-static void netdev_error(struct net_device *dev, int intr_status);
-static int netdev_rx(struct net_device *dev);
-static void netdev_error(struct net_device *dev, int intr_status);
-static void set_rx_mode(struct net_device *dev);
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
+static int netdev_open(struct net_device *dev);
+static void check_duplex(struct net_device *dev);
+static void tx_timeout(struct net_device *dev);
+static void init_ring(struct net_device *dev);
+static int start_tx(struct sk_buff *skb, struct net_device *dev);
+static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
+static void netdev_error(struct net_device *dev, int intr_status);
+static int netdev_rx(struct net_device *dev);
+static void netdev_error(struct net_device *dev, int intr_status);
+static void set_rx_mode(struct net_device *dev);
static struct net_device_stats *get_stats(struct net_device *dev);
-static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-static int netdev_close(struct net_device *dev);
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int netdev_close(struct net_device *dev);
+static void netdev_media_change(struct net_device *dev);
\f
-static int __devinit starfire_init_one (struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int __devinit starfire_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct netdev_private *np;
int i, irq, option, chip_idx = ent->driver_data;
struct net_device *dev;
static int card_idx = -1;
long ioaddr;
- int drv_flags, io_size = netdrv_tbl[chip_idx].io_size;
+ int drv_flags, io_size;
+ int boguscnt;
+ u8 cache;
/* when built into the kernel, we only print version if device is found */
#ifndef MODULE
#endif
card_idx++;
- option = card_idx < MAX_UNITS ? options[card_idx] : 0;
if (pci_enable_device (pdev))
return -EIO;
ioaddr = pci_resource_start (pdev, 0);
+ io_size = pci_resource_len (pdev, 0);
if (!ioaddr || ((pci_resource_flags (pdev, 0) & IORESOURCE_MEM) == 0)) {
- printk (KERN_ERR "starfire %d: no PCI MEM resources, aborting\n", card_idx);
+ printk (KERN_ERR DRV_NAME " %d: no PCI MEM resources, aborting\n", card_idx);
return -ENODEV;
}
-
+
dev = alloc_etherdev(sizeof(*np));
if (!dev) {
- printk (KERN_ERR "starfire %d: cannot alloc etherdev, aborting\n", card_idx);
+ printk (KERN_ERR DRV_NAME " %d: cannot alloc etherdev, aborting\n", card_idx);
return -ENOMEM;
}
SET_MODULE_OWNER(dev);
-
- irq = pdev->irq;
- if (pci_request_regions (pdev, "starfire"))
+ irq = pdev->irq;
+
+ if (pci_request_regions (pdev, dev->name)) {
+ printk (KERN_ERR DRV_NAME " %d: cannot reserve PCI resources, aborting\n", card_idx);
goto err_out_free_netdev;
+ }
ioaddr = (long) ioremap (ioaddr, io_size);
if (!ioaddr) {
- printk (KERN_ERR "starfire %d: cannot remap 0x%x @ 0x%lx, aborting\n",
+ printk (KERN_ERR DRV_NAME " %d: cannot remap 0x%x @ 0x%lx, aborting\n",
card_idx, io_size, ioaddr);
goto err_out_free_res;
}
pci_set_master (pdev);
-
+
+ /* set PCI cache size */
+ pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache);
+ if ((cache << 2) != SMP_CACHE_BYTES) {
+ printk(KERN_INFO " PCI cache line size set incorrectly "
+ "(%i bytes) by BIOS/FW, correcting to %i\n",
+ (cache << 2), SMP_CACHE_BYTES);
+ pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
+ SMP_CACHE_BYTES >> 2);
+ }
+
+#ifdef ZEROCOPY
+ /* Starfire can do SG and TCP/UDP checksumming */
+ dev->features |= NETIF_F_SG;
+#ifdef HAS_FIRMWARE
+ dev->features |= NETIF_F_IP_CSUM;
+#endif /* HAS_FIRMWARE */
+#endif /* ZEROCOPY */
+
/* Serial EEPROM reads are hidden by the hardware. */
for (i = 0; i < 6; i++)
dev->dev_addr[i] = readb(ioaddr + EEPROMCtrl + 20-i);
#if ! defined(final_version) /* Dump the EEPROM contents during development. */
if (debug > 4)
for (i = 0; i < 0x20; i++)
- printk("%2.2x%s", (unsigned int)readb(ioaddr + EEPROMCtrl + i),
- i % 16 != 15 ? " " : "\n");
+ printk("%2.2x%s",
+ (unsigned int)readb(ioaddr + EEPROMCtrl + i),
+ i % 16 != 15 ? " " : "\n");
#endif
+ /* Issue soft reset */
+ writel(0x8000, ioaddr + TxMode);
+ udelay(1000);
+ writel(0, ioaddr + TxMode);
+
/* Reset the chip to erase previous misconfiguration. */
writel(1, ioaddr + PCIDeviceConfig);
+ boguscnt = 1000;
+ while (--boguscnt > 0) {
+ udelay(10);
+ if ((readl(ioaddr + PCIDeviceConfig) & 1) == 0)
+ break;
+ }
+ if (boguscnt == 0)
+ printk("%s: chipset reset never completed!\n", dev->name);
+ /* wait a little longer */
+ udelay(1000);
dev->base_addr = ioaddr;
dev->irq = irq;
np->pci_dev = pdev;
drv_flags = netdrv_tbl[chip_idx].drv_flags;
+ option = card_idx < MAX_UNITS ? options[card_idx] : 0;
if (dev->mem_start)
option = dev->mem_start;
/* The lower four bits are the media type. */
- if (option > 0) {
- if (option & 0x200)
- np->full_duplex = 1;
- np->default_port = option & 15;
- if (np->default_port)
- np->medialock = 1;
- }
- if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
+ if (option & 0x200)
+ np->full_duplex = 1;
+
+ if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
np->full_duplex = 1;
if (np->full_duplex)
- np->medialock = 1;
+ np->autoneg = 0;
+ else
+ np->autoneg = 1;
+ np->speed100 = 1;
/* The chip-specific entries in the device structure. */
dev->open = &netdev_open;
dev->hard_start_xmit = &start_tx;
- dev->tx_timeout = &tx_timeout;
- dev->watchdog_timeo = TX_TIMEOUT;
+ init_tx_timer(dev, tx_timeout, TX_TIMEOUT);
dev->stop = &netdev_close;
dev->get_stats = &get_stats;
dev->set_multicast_list = &set_rx_mode;
- dev->do_ioctl = &mii_ioctl;
+ dev->do_ioctl = &netdev_ioctl;
if (mtu)
dev->mtu = mtu;
printk(KERN_INFO "%s: %s at 0x%lx, ",
dev->name, netdrv_tbl[chip_idx].name, ioaddr);
for (i = 0; i < 5; i++)
- printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x:", dev->dev_addr[i]);
printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
if (drv_flags & CanHaveMII) {
int phy, phy_idx = 0;
- for (phy = 0; phy < 32 && phy_idx < MII_CNT; phy++) {
- int mii_status = mdio_read(dev, phy, 1);
- if (mii_status != 0xffff && mii_status != 0x0000) {
+ int mii_status;
+ for (phy = 0; phy < 32 && phy_idx < PHY_CNT; phy++) {
+ mdio_write(dev, phy, MII_BMCR, BMCR_RESET);
+ udelay(500);
+ boguscnt = 1000;
+ while (--boguscnt > 0)
+ if ((mdio_read(dev, phy, MII_BMCR) & BMCR_RESET) == 0)
+ break;
+ if (boguscnt == 0) {
+ printk("%s: PHY reset never completed!\n", dev->name);
+ continue;
+ }
+ mii_status = mdio_read(dev, phy, MII_BMSR);
+ if (mii_status != 0) {
np->phys[phy_idx++] = phy;
- np->advertising = mdio_read(dev, phy, 4);
+ np->advertising = mdio_read(dev, phy, MII_ADVERTISE);
printk(KERN_INFO "%s: MII PHY found at address %d, status "
"0x%4.4x advertising %4.4x.\n",
dev->name, phy, mii_status, np->advertising);
+ /* there can be only one PHY on-board */
+ break;
}
}
- np->mii_cnt = phy_idx;
+ np->phy_cnt = phy_idx;
}
return 0;
/* ??? Should we add a busy-wait here? */
do
result = readl(mdio_addr);
- while ((result & 0xC0000000) != 0x80000000 && --boguscnt >= 0);
+ while ((result & 0xC0000000) != 0x80000000 && --boguscnt > 0);
+ if (boguscnt == 0)
+ return 0;
+ if ((result & 0xffff) == 0xffff)
+ return 0;
return result & 0xffff;
}
/* Do we ever need to reset the chip??? */
+ COMPAT_MOD_INC_USE_COUNT;
+
retval = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
- if (retval)
+ if (retval) {
+ COMPAT_MOD_DEC_USE_COUNT;
return retval;
+ }
/* Disable the Rx and Tx, and reset the chip. */
writel(0, ioaddr + GenCtrl);
writel(1, ioaddr + PCIDeviceConfig);
if (debug > 1)
printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
- dev->name, dev->irq);
+ dev->name, dev->irq);
/* Allocate the various queues, failing gracefully. */
if (np->tx_done_q == 0)
np->tx_done_q = pci_alloc_consistent(np->pci_dev, PAGE_SIZE, &np->tx_done_q_dma);
if (np->rx_done_q == 0)
- np->rx_done_q = pci_alloc_consistent(np->pci_dev, PAGE_SIZE, &np->rx_done_q_dma);
+ np->rx_done_q = pci_alloc_consistent(np->pci_dev, sizeof(struct rx_done_desc) * DONE_Q_SIZE, &np->rx_done_q_dma);
if (np->tx_ring == 0)
np->tx_ring = pci_alloc_consistent(np->pci_dev, PAGE_SIZE, &np->tx_ring_dma);
if (np->rx_ring == 0)
np->rx_ring = pci_alloc_consistent(np->pci_dev, PAGE_SIZE, &np->rx_ring_dma);
- if (np->tx_done_q == 0 || np->rx_done_q == 0
- || np->rx_ring == 0 || np->tx_ring == 0) {
+ if (np->tx_done_q == 0 || np->rx_done_q == 0
+ || np->rx_ring == 0 || np->tx_ring == 0) {
if (np->tx_done_q)
pci_free_consistent(np->pci_dev, PAGE_SIZE,
- np->tx_done_q, np->tx_done_q_dma);
+ np->tx_done_q, np->tx_done_q_dma);
if (np->rx_done_q)
- pci_free_consistent(np->pci_dev, PAGE_SIZE,
- np->rx_done_q, np->rx_done_q_dma);
+ pci_free_consistent(np->pci_dev, sizeof(struct rx_done_desc) * DONE_Q_SIZE,
+ np->rx_done_q, np->rx_done_q_dma);
if (np->tx_ring)
pci_free_consistent(np->pci_dev, PAGE_SIZE,
- np->tx_ring, np->tx_ring_dma);
+ np->tx_ring, np->tx_ring_dma);
if (np->rx_ring)
pci_free_consistent(np->pci_dev, PAGE_SIZE,
- np->rx_ring, np->rx_ring_dma);
+ np->rx_ring, np->rx_ring_dma);
+ COMPAT_MOD_DEC_USE_COUNT;
return -ENOMEM;
}
init_ring(dev);
/* Set the size of the Rx buffers. */
- writel((np->rx_buf_sz<<16) | 0xA000, ioaddr + RxDescQCtrl);
-
+ writel((np->rx_buf_sz << RxBufferLenShift) |
+ (0 << RxMinDescrThreshShift) |
+ RxPrefetchMode | RxVariableQ |
+ RxDescSpace4,
+ ioaddr + RxDescQCtrl);
+
+#ifdef ZEROCOPY
+ /* Set Tx descriptor to type 0 and spacing to 64 bytes. */
+ writel((2 << TxHiPriFIFOThreshShift) |
+ (0 << TxPadLenShift) |
+ (4 << TxDMABurstSizeShift) |
+ TxDescSpace64 | TxDescType0,
+ ioaddr + TxDescCtrl);
+#else /* not ZEROCOPY */
/* Set Tx descriptor to type 1 and padding to 0 bytes. */
- writel(0x02000401, ioaddr + TxDescCtrl);
+ writel((2 << TxHiPriFIFOThreshShift) |
+ (0 << TxPadLenShift) |
+ (4 << TxDMABurstSizeShift) |
+ TxDescSpaceUnlim | TxDescType1,
+ ioaddr + TxDescCtrl);
+#endif /* not ZEROCOPY */
#if defined(ADDR_64BITS) && defined(__alpha__)
/* XXX We really need a 64-bit PCI dma interfaces too... -DaveM */
writel(np->tx_ring_dma, ioaddr + TxRingPtr);
writel(np->tx_done_q_dma, ioaddr + TxCompletionAddr);
- writel(np->rx_done_q_dma, ioaddr + RxCompletionAddr);
+#ifdef full_rx_status
+ writel(np->rx_done_q_dma |
+ RxComplType3 |
+ (0 << RxComplThreshShift),
+ ioaddr + RxCompletionAddr);
+#else /* not full_rx_status */
+#ifdef csum_rx_status
+ writel(np->rx_done_q_dma |
+ RxComplType2 |
+ (0 << RxComplThreshShift),
+ ioaddr + RxCompletionAddr);
+#else /* not csum_rx_status */
+ writel(np->rx_done_q_dma |
+ RxComplType0 |
+ (0 << RxComplThreshShift),
+ ioaddr + RxCompletionAddr);
+#endif /* not csum_rx_status */
+#endif /* not full_rx_status */
if (debug > 1)
- printk(KERN_DEBUG "%s: Filling in the station address.\n", dev->name);
+ printk(KERN_DEBUG "%s: Filling in the station address.\n", dev->name);
/* Fill both the unused Tx SA register and the Rx perfect filter. */
for (i = 0; i < 6; i++)
writeb(dev->dev_addr[i], ioaddr + StationAddr + 5-i);
for (i = 0; i < 16; i++) {
u16 *eaddrs = (u16 *)dev->dev_addr;
- long setup_frm = ioaddr + 0x56000 + i*16;
+ long setup_frm = ioaddr + PerfFilterTable + i * 16;
writew(cpu_to_be16(eaddrs[2]), setup_frm); setup_frm += 4;
writew(cpu_to_be16(eaddrs[1]), setup_frm); setup_frm += 4;
writew(cpu_to_be16(eaddrs[0]), setup_frm); setup_frm += 8;
/* Initialize other registers. */
/* Configure the PCI bus bursts and FIFO thresholds. */
- np->tx_mode = 0; /* Initialized when TxMode set. */
+ np->tx_mode = 0x0C04; /* modified when link is up. */
np->tx_threshold = 4;
writel(np->tx_threshold, ioaddr + TxThreshold);
- writel(interrupt_mitigation, ioaddr + IntrTimerCtrl);
- if (dev->if_port == 0)
- dev->if_port = np->default_port;
+ interrupt_mitigation &= 0x1f;
+ np->intr_mitigation = interrupt_mitigation;
+ writel(np->intr_mitigation, ioaddr + IntrTimerCtrl);
+ netif_start_if(dev);
netif_start_queue(dev);
if (debug > 1)
- printk(KERN_DEBUG "%s: Setting the Rx and Tx modes.\n", dev->name);
+ printk(KERN_DEBUG "%s: Setting the Rx and Tx modes.\n", dev->name);
set_rx_mode(dev);
- np->advertising = mdio_read(dev, np->phys[0], 4);
- check_duplex(dev, 1);
+ np->advertising = mdio_read(dev, np->phys[0], MII_ADVERTISE);
+ check_duplex(dev);
+
+ /* Enable GPIO interrupts on link change */
+ writel(0x0f00ff00, ioaddr + GPIOCtrl);
/* Set the interrupt mask and enable PCI interrupts. */
- writel(IntrRxDone | IntrRxEmpty | IntrRxPCIErr |
- IntrTxDone | IntrTxEmpty | IntrTxPCIErr |
- StatsMax | LinkChange | IntrNormalSummary | IntrAbnormalSummary
- | 0x0010 , ioaddr + IntrEnable);
+ writel(IntrRxDone | IntrRxEmpty | IntrDMAErr |
+ IntrTxDone | IntrStatsMax | IntrLinkChange |
+ IntrNormalSummary | IntrAbnormalSummary |
+ IntrRxGFPDead | IntrNoTxCsum | IntrTxBadID,
+ ioaddr + IntrEnable);
writel(0x00800000 | readl(ioaddr + PCIDeviceConfig),
- ioaddr + PCIDeviceConfig);
-
- /* Enable the Rx and Tx units. */
+ ioaddr + PCIDeviceConfig);
+
+#ifdef HAS_FIRMWARE
+ /* Load Rx/Tx firmware into the frame processors */
+ for (i = 0; i < FIRMWARE_RX_SIZE * 2; i++)
+ writel(cpu_to_le32(firmware_rx[i]), ioaddr + RxGfpMem + i * 4);
+ for (i = 0; i < FIRMWARE_TX_SIZE * 2; i++)
+ writel(cpu_to_le32(firmware_tx[i]), ioaddr + TxGfpMem + i * 4);
+ /* Enable the Rx and Tx units, and the Rx/Tx frame processors. */
+ writel(0x003F, ioaddr + GenCtrl);
+#else /* not HAS_FIRMWARE */
+ /* Enable the Rx and Tx units only. */
writel(0x000F, ioaddr + GenCtrl);
+#endif /* not HAS_FIRMWARE */
if (debug > 2)
printk(KERN_DEBUG "%s: Done netdev_open().\n",
- dev->name);
-
- /* Set the timer to check for link beat. */
- init_timer(&np->timer);
- np->timer.expires = jiffies + 3*HZ;
- np->timer.data = (unsigned long)dev;
- np->timer.function = &netdev_timer; /* timer handler */
- add_timer(&np->timer);
+ dev->name);
return 0;
}
-static void check_duplex(struct net_device *dev, int startup)
+
+static void check_duplex(struct net_device *dev)
{
struct netdev_private *np = dev->priv;
- long ioaddr = dev->base_addr;
- int new_tx_mode ;
+ u16 reg0;
- new_tx_mode = 0x0C04 | (np->tx_flowctrl ? 0x0800:0)
- | (np->rx_flowctrl ? 0x0400:0);
- if (np->medialock) {
- if (np->full_duplex)
- new_tx_mode |= 2;
- } else {
- int mii_reg5 = mdio_read(dev, np->phys[0], 5);
- int negotiated = mii_reg5 & np->advertising;
- int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
- if (duplex)
- new_tx_mode |= 2;
- if (np->full_duplex != duplex) {
- np->full_duplex = duplex;
- if (debug > 1)
- printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d"
- " negotiated capability %4.4x.\n", dev->name,
- duplex ? "full" : "half", np->phys[0], negotiated);
- }
- }
- if (new_tx_mode != np->tx_mode) {
- np->tx_mode = new_tx_mode;
- writel(np->tx_mode | 0x8000, ioaddr + TxMode);
- writel(np->tx_mode, ioaddr + TxMode);
- }
-}
+ mdio_write(dev, np->phys[0], MII_ADVERTISE, np->advertising);
+ mdio_write(dev, np->phys[0], MII_BMCR, BMCR_RESET);
+ udelay(500);
+ while (mdio_read(dev, np->phys[0], MII_BMCR) & BMCR_RESET);
-static void netdev_timer(unsigned long data)
-{
- struct net_device *dev = (struct net_device *)data;
- struct netdev_private *np = dev->priv;
- long ioaddr = dev->base_addr;
- int next_tick = 60*HZ; /* Check before driver release. */
+ reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
- if (debug > 3) {
- printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x.\n",
- dev->name, (int)readl(ioaddr + IntrStatus));
- }
- check_duplex(dev, 0);
-#if ! defined(final_version)
- /* This is often falsely triggered. */
- if (readl(ioaddr + IntrStatus) & 1) {
- int new_status = readl(ioaddr + IntrStatus);
- /* Bogus hardware IRQ: Fake an interrupt handler call. */
- if (new_status & 1) {
- printk(KERN_ERR "%s: Interrupt blocked, status %8.8x/%8.8x.\n",
- dev->name, new_status, (int)readl(ioaddr + IntrStatus));
- intr_handler(dev->irq, dev, 0);
- }
+ if (np->autoneg) {
+ reg0 |= BMCR_ANENABLE | BMCR_ANRESTART;
+ } else {
+ reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
+ if (np->speed100)
+ reg0 |= BMCR_SPEED100;
+ if (np->full_duplex)
+ reg0 |= BMCR_FULLDPLX;
+ printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n",
+ dev->name,
+ np->speed100 ? "100" : "10",
+ np->full_duplex ? "full" : "half");
}
-#endif
-
- np->timer.expires = jiffies + next_tick;
- add_timer(&np->timer);
+ mdio_write(dev, np->phys[0], MII_BMCR, reg0);
}
+
static void tx_timeout(struct net_device *dev)
{
struct netdev_private *np = dev->priv;
long ioaddr = dev->base_addr;
printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
- " resetting...\n", dev->name, (int)readl(ioaddr + IntrStatus));
+ " resetting...\n", dev->name, (int)readl(ioaddr + IntrStatus));
#ifndef __alpha__
{
#endif
/* Perhaps we should reinitialize the hardware here. */
- dev->if_port = 0;
/* Stop and restart the chip's Tx processes . */
/* Trigger an immediate transmit demand. */
dev->trans_start = jiffies;
np->stats.tx_errors++;
- return;
+ netif_wake_queue(dev);
}
/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
static void init_ring(struct net_device *dev)
{
- struct netdev_private *np = (struct netdev_private *)dev->priv;
+ struct netdev_private *np = dev->priv;
int i;
np->tx_full = 0;
for (i = 0; i < TX_RING_SIZE; i++) {
np->tx_info[i].skb = NULL;
- np->tx_info[i].mapping = 0;
+ np->tx_info[i].first_mapping = 0;
+#ifdef ZEROCOPY
+ {
+ int j;
+ for (j = 0; j < MAX_STARFIRE_FRAGS; j++)
+ np->tx_info[i].frag_mapping[j] = 0;
+ }
+#endif /* ZEROCOPY */
np->tx_ring[i].status = 0;
}
return;
{
struct netdev_private *np = dev->priv;
unsigned int entry;
+#ifdef ZEROCOPY
+ int i;
+#endif
+
+ kick_tx_timer(dev, tx_timeout, TX_TIMEOUT);
/* Caution: the write order is important here, set the field
with the "ownership" bits last. */
/* Calculate the next Tx descriptor entry. */
entry = np->cur_tx % TX_RING_SIZE;
+#if defined(ZEROCOPY) && defined(HAS_FIRMWARE) && defined(HAS_BROKEN_FIRMWARE)
+ {
+ int has_bad_length = 0;
+
+ if (skb_first_frag_len(skb) == 1)
+ has_bad_length = 1;
+ else {
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+ if (skb_shinfo(skb)->frags[i].size == 1) {
+ has_bad_length = 1;
+ break;
+ }
+ }
+
+ if (has_bad_length)
+ skb_checksum_help(skb);
+ }
+#endif /* ZEROCOPY && HAS_FIRMWARE && HAS_BROKEN_FIRMWARE */
+
np->tx_info[entry].skb = skb;
- np->tx_info[entry].mapping =
- pci_map_single(np->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
+ np->tx_info[entry].first_mapping =
+ pci_map_single(np->pci_dev, skb->data, skb_first_frag_len(skb), PCI_DMA_TODEVICE);
+
+ np->tx_ring[entry].first_addr = cpu_to_le32(np->tx_info[entry].first_mapping);
+#ifdef ZEROCOPY
+ np->tx_ring[entry].first_len = cpu_to_le32(skb_first_frag_len(skb));
+ np->tx_ring[entry].total_len = cpu_to_le32(skb->len);
+ /* Add "| TxDescIntr" to generate Tx-done interrupts. */
+ np->tx_ring[entry].status = cpu_to_le32(TxDescID | TxCRCEn);
+ np->tx_ring[entry].nbufs = cpu_to_le32(skb_shinfo(skb)->nr_frags + 1);
+#else /* not ZEROCOPY */
+ /* Add "| TxDescIntr" to generate Tx-done interrupts. */
+ np->tx_ring[entry].status = cpu_to_le32(skb->len | TxDescID | TxCRCEn | 1 << 16);
+#endif /* not ZEROCOPY */
+
+ if (entry >= TX_RING_SIZE-1) /* Wrap ring */
+ np->tx_ring[entry].status |= cpu_to_le32(TxRingWrap | TxDescIntr);
+
+#ifdef ZEROCOPY
+ if (skb->ip_summed == CHECKSUM_HW)
+ np->tx_ring[entry].status |= cpu_to_le32(TxCalTCP);
+#endif /* ZEROCOPY */
- np->tx_ring[entry].addr = cpu_to_le32(np->tx_info[entry].mapping);
- /* Add "| TxDescIntr" to generate Tx-done interrupts. */
- np->tx_ring[entry].status = cpu_to_le32(skb->len | TxDescID);
if (debug > 5) {
- printk(KERN_DEBUG "%s: Tx #%d slot %d %8.8x %8.8x.\n",
- dev->name, np->cur_tx, entry,
- le32_to_cpu(np->tx_ring[entry].status),
- le32_to_cpu(np->tx_ring[entry].addr));
+#ifdef ZEROCOPY
+ printk(KERN_DEBUG "%s: Tx #%d slot %d status %8.8x nbufs %d len %4.4x/%4.4x.\n",
+ dev->name, np->cur_tx, entry,
+ le32_to_cpu(np->tx_ring[entry].status),
+ le32_to_cpu(np->tx_ring[entry].nbufs),
+ le32_to_cpu(np->tx_ring[entry].first_len),
+ le32_to_cpu(np->tx_ring[entry].total_len));
+#else /* not ZEROCOPY */
+ printk(KERN_DEBUG "%s: Tx #%d slot %d status %8.8x.\n",
+ dev->name, np->cur_tx, entry,
+ le32_to_cpu(np->tx_ring[entry].status));
+#endif /* not ZEROCOPY */
}
+
+#ifdef ZEROCOPY
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i];
+
+ /* we already have the proper value in entry */
+ np->tx_info[entry].frag_mapping[i] =
+ pci_map_single(np->pci_dev, page_address(this_frag->page) + this_frag->page_offset, this_frag->size, PCI_DMA_TODEVICE);
+
+ np->tx_ring[entry].frag[i].addr = cpu_to_le32(np->tx_info[entry].frag_mapping[i]);
+ np->tx_ring[entry].frag[i].len = cpu_to_le32(this_frag->size);
+ if (debug > 5) {
+ printk(KERN_DEBUG "%s: Tx #%d frag %d len %4.4x.\n",
+ dev->name, np->cur_tx, i,
+ le32_to_cpu(np->tx_ring[entry].frag[i].len));
+ }
+ }
+#endif /* ZEROCOPY */
+
np->cur_tx++;
-#if 1
- if (entry >= TX_RING_SIZE-1) { /* Wrap ring */
- np->tx_ring[entry].status |= cpu_to_le32(TxRingWrap | TxDescIntr);
+
+ if (entry >= TX_RING_SIZE-1) /* Wrap ring */
entry = -1;
- }
-#endif
+ entry++;
/* Non-x86: explicitly flush descriptor cache lines here. */
/* Ensure everything is written back above before the transmit is
wmb();
/* Update the producer index. */
- writel(++entry, dev->base_addr + TxProducerIdx);
+ writel(entry * (sizeof(struct starfire_tx_desc) / 8), dev->base_addr + TxProducerIdx);
if (np->cur_tx - np->dirty_tx >= TX_RING_SIZE - 1) {
np->tx_full = 1;
netif_stop_queue(dev);
}
+
dev->trans_start = jiffies;
- if (debug > 4) {
- printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
- dev->name, np->cur_tx, entry);
- }
return 0;
}
struct netdev_private *np;
long ioaddr;
int boguscnt = max_interrupt_work;
+ int consumer;
+ int tx_status;
#ifndef final_version /* Can never occur. */
if (dev == NULL) {
- printk (KERN_ERR "Netdev interrupt handler(): IRQ %d for unknown "
- "device.\n", irq);
+ printk (KERN_ERR "Netdev interrupt handler(): IRQ %d for unknown device.\n", irq);
return;
}
#endif
ioaddr = dev->base_addr;
- np = (struct netdev_private *)dev->priv;
+ np = dev->priv;
do {
u32 intr_status = readl(ioaddr + IntrClear);
if (debug > 4)
printk(KERN_DEBUG "%s: Interrupt status %4.4x.\n",
- dev->name, intr_status);
+ dev->name, intr_status);
if (intr_status == 0)
break;
/* Scavenge the skbuff list based on the Tx-done queue.
There are redundant checks here that may be cleaned up
after the driver has proven to be reliable. */
- {
- int consumer = readl(ioaddr + TxConsumerIdx);
- int tx_status;
- if (debug > 4)
- printk(KERN_DEBUG "%s: Tx Consumer index is %d.\n",
- dev->name, consumer);
+ consumer = readl(ioaddr + TxConsumerIdx);
+ if (debug > 4)
+ printk(KERN_DEBUG "%s: Tx Consumer index is %d.\n",
+ dev->name, consumer);
#if 0
- if (np->tx_done >= 250 || np->tx_done == 0)
- printk(KERN_DEBUG "%s: Tx completion entry %d is %8.8x, "
- "%d is %8.8x.\n", dev->name,
- np->tx_done, le32_to_cpu(np->tx_done_q[np->tx_done].status),
- (np->tx_done+1) & (DONE_Q_SIZE-1),
- le32_to_cpu(np->tx_done_q[(np->tx_done+1)&(DONE_Q_SIZE-1)].status));
+ if (np->tx_done >= 250 || np->tx_done == 0)
+ printk(KERN_DEBUG "%s: Tx completion entry %d is %8.8x, %d is %8.8x.\n",
+ dev->name, np->tx_done,
+ le32_to_cpu(np->tx_done_q[np->tx_done].status),
+ (np->tx_done+1) & (DONE_Q_SIZE-1),
+ le32_to_cpu(np->tx_done_q[(np->tx_done+1)&(DONE_Q_SIZE-1)].status));
#endif
- while ((tx_status = le32_to_cpu(np->tx_done_q[np->tx_done].status))
- != 0) {
- if (debug > 4)
- printk(KERN_DEBUG "%s: Tx completion entry %d is %8.8x.\n",
- dev->name, np->tx_done, tx_status);
- if ((tx_status & 0xe0000000) == 0xa0000000) {
- np->stats.tx_packets++;
- } else if ((tx_status & 0xe0000000) == 0x80000000) {
- struct sk_buff *skb;
- u16 entry = tx_status; /* Implicit truncate */
- entry >>= 3;
-
- skb = np->tx_info[entry].skb;
+
+ while ((tx_status = le32_to_cpu(np->tx_done_q[np->tx_done].status)) != 0) {
+ if (debug > 4)
+ printk(KERN_DEBUG "%s: Tx completion entry %d is %8.8x.\n",
+ dev->name, np->tx_done, tx_status);
+ if ((tx_status & 0xe0000000) == 0xa0000000) {
+ np->stats.tx_packets++;
+ } else if ((tx_status & 0xe0000000) == 0x80000000) {
+ struct sk_buff *skb;
+#ifdef ZEROCOPY
+ int i;
+#endif /* ZEROCOPY */
+ u16 entry = tx_status; /* Implicit truncate */
+ entry /= sizeof(struct starfire_tx_desc);
+
+ skb = np->tx_info[entry].skb;
+ np->tx_info[entry].skb = NULL;
+ pci_unmap_single(np->pci_dev,
+ np->tx_info[entry].first_mapping,
+ skb_first_frag_len(skb),
+ PCI_DMA_TODEVICE);
+ np->tx_info[entry].first_mapping = 0;
+
+#ifdef ZEROCOPY
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
pci_unmap_single(np->pci_dev,
- np->tx_info[entry].mapping,
- skb->len, PCI_DMA_TODEVICE);
-
- /* Scavenge the descriptor. */
- dev_kfree_skb_irq(skb);
- np->tx_info[entry].skb = NULL;
- np->tx_info[entry].mapping = 0;
- np->dirty_tx++;
+ np->tx_info[entry].frag_mapping[i],
+ skb_shinfo(skb)->frags[i].size,
+ PCI_DMA_TODEVICE);
+ np->tx_info[entry].frag_mapping[i] = 0;
}
- np->tx_done_q[np->tx_done].status = 0;
- np->tx_done = (np->tx_done+1) & (DONE_Q_SIZE-1);
+#endif /* ZEROCOPY */
+
+ /* Scavenge the descriptor. */
+ dev_kfree_skb_irq(skb);
+
+ np->dirty_tx++;
}
- writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2);
+ np->tx_done_q[np->tx_done].status = 0;
+ np->tx_done = (np->tx_done+1) & (DONE_Q_SIZE-1);
}
+ writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2);
+
if (np->tx_full && np->cur_tx - np->dirty_tx < TX_RING_SIZE - 4) {
/* The ring is no longer full, wake the queue. */
np->tx_full = 0;
netif_wake_queue(dev);
}
+ /* Stats overflow */
+ if (intr_status & IntrStatsMax) {
+ get_stats(dev);
+ }
+
+ /* Media change interrupt. */
+ if (intr_status & IntrLinkChange)
+ netdev_media_change(dev);
+
/* Abnormal error summary/uncommon events handlers. */
if (intr_status & IntrAbnormalSummary)
netdev_error(dev, intr_status);
if (--boguscnt < 0) {
printk(KERN_WARNING "%s: Too much work at interrupt, "
- "status=0x%4.4x.\n",
- dev->name, intr_status);
+ "status=0x%4.4x.\n",
+ dev->name, intr_status);
break;
}
} while (1);
if (debug > 4)
printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
- dev->name, (int)readl(ioaddr + IntrStatus));
+ dev->name, (int)readl(ioaddr + IntrStatus));
#ifndef final_version
/* Code that should never be run! Remove after testing.. */
{
static int stopit = 10;
- if (!netif_running(dev) && --stopit < 0) {
+ if (!netif_running(dev) && --stopit < 0) {
printk(KERN_ERR "%s: Emergency stop, looping startup interrupt.\n",
- dev->name);
+ dev->name);
free_irq(irq, dev);
}
}
if (np->rx_done_q == 0) {
printk(KERN_ERR "%s: rx_done_q is NULL! rx_done is %d. %p.\n",
- dev->name, np->rx_done, np->tx_done_q);
+ dev->name, np->rx_done, np->tx_done_q);
return 0;
}
/* If EOP is set on the next entry, it's a new packet. Send it up. */
while ((desc_status = le32_to_cpu(np->rx_done_q[np->rx_done].status)) != 0) {
+ struct sk_buff *skb;
+ u16 pkt_len;
+ int entry;
+
if (debug > 4)
- printk(KERN_DEBUG " netdev_rx() status of %d was %8.8x.\n",
- np->rx_done, desc_status);
+ printk(KERN_DEBUG " netdev_rx() status of %d was %8.8x.\n", np->rx_done, desc_status);
if (--boguscnt < 0)
break;
if ( ! (desc_status & RxOK)) {
/* There was a error. */
if (debug > 2)
- printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
- desc_status);
+ printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n", desc_status);
np->stats.rx_errors++;
if (desc_status & RxFIFOErr)
np->stats.rx_fifo_errors++;
- } else {
- struct sk_buff *skb;
- u16 pkt_len = desc_status; /* Implicitly Truncate */
- int entry = (desc_status >> 16) & 0x7ff;
+ goto next_rx;
+ }
+
+ pkt_len = desc_status; /* Implicitly Truncate */
+ entry = (desc_status >> 16) & 0x7ff;
#ifndef final_version
- if (debug > 4)
- printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
- ", bogus_cnt %d.\n",
- pkt_len, boguscnt);
+ if (debug > 4)
+ printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d, bogus_cnt %d.\n", pkt_len, boguscnt);
#endif
- /* Check if the packet is long enough to accept without copying
- to a minimally-sized skbuff. */
- if (PKT_SHOULD_COPY(pkt_len)
- && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
- skb->dev = dev;
- skb_reserve(skb, 2); /* 16 byte align the IP header */
- pci_dma_sync_single(np->pci_dev,
- np->rx_info[entry].mapping,
- pkt_len, PCI_DMA_FROMDEVICE);
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+ pci_dma_sync_single(np->pci_dev,
+ np->rx_info[entry].mapping,
+ pkt_len, PCI_DMA_FROMDEVICE);
#if HAS_IP_COPYSUM /* Call copy + cksum if available. */
- eth_copy_and_sum(skb, np->rx_info[entry].skb->tail, pkt_len, 0);
- skb_put(skb, pkt_len);
+ eth_copy_and_sum(skb, np->rx_info[entry].skb->tail, pkt_len, 0);
+ skb_put(skb, pkt_len);
#else
- memcpy(skb_put(skb, pkt_len), np->rx_info[entry].skb->tail,
- pkt_len);
+ memcpy(skb_put(skb, pkt_len), np->rx_info[entry].skb->tail, pkt_len);
#endif
- } else {
- char *temp;
+ } else {
+ char *temp;
- pci_unmap_single(np->pci_dev, np->rx_info[entry].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
- skb = np->rx_info[entry].skb;
- temp = skb_put(skb, pkt_len);
- np->rx_info[entry].skb = NULL;
- np->rx_info[entry].mapping = 0;
- }
-#ifndef final_version /* Remove after testing. */
- /* You will want this info for the initial debug. */
- if (debug > 5)
- printk(KERN_DEBUG " Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
- "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x "
- "%d.%d.%d.%d.\n",
- skb->data[0], skb->data[1], skb->data[2], skb->data[3],
- skb->data[4], skb->data[5], skb->data[6], skb->data[7],
- skb->data[8], skb->data[9], skb->data[10],
- skb->data[11], skb->data[12], skb->data[13],
- skb->data[14], skb->data[15], skb->data[16],
- skb->data[17]);
-#endif
- skb->protocol = eth_type_trans(skb, dev);
-#ifdef full_rx_status
- if (le32_to_cpu(np->rx_done_q[np->rx_done].status2) & 0x01000000)
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ pci_unmap_single(np->pci_dev, np->rx_info[entry].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ skb = np->rx_info[entry].skb;
+ temp = skb_put(skb, pkt_len);
+ np->rx_info[entry].skb = NULL;
+ np->rx_info[entry].mapping = 0;
+ }
+#ifndef final_version /* Remove after testing. */
+ /* You will want this info for the initial debug. */
+ if (debug > 5)
+ printk(KERN_DEBUG " Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
+ "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x "
+ "%d.%d.%d.%d.\n",
+ skb->data[0], skb->data[1], skb->data[2], skb->data[3],
+ skb->data[4], skb->data[5], skb->data[6], skb->data[7],
+ skb->data[8], skb->data[9], skb->data[10],
+ skb->data[11], skb->data[12], skb->data[13],
+ skb->data[14], skb->data[15], skb->data[16],
+ skb->data[17]);
#endif
- netif_rx(skb);
- dev->last_rx = jiffies;
- np->stats.rx_packets++;
+ skb->protocol = eth_type_trans(skb, dev);
+#if defined(full_rx_status) || defined(csum_rx_status)
+ if (le32_to_cpu(np->rx_done_q[np->rx_done].status2) & 0x01000000) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
}
+ /*
+ * This feature doesn't seem to be working, at least
+ * with the two firmware versions I have. If the GFP sees
+ * a fragment, it either ignores it completely, or reports
+ * "bad checksum" on it.
+ *
+ * Maybe I missed something -- corrections are welcome.
+ * Until then, the printk stays. :-) -Ion
+ */
+ else if (le32_to_cpu(np->rx_done_q[np->rx_done].status2) & 0x00400000) {
+ skb->ip_summed = CHECKSUM_HW;
+ skb->csum = le32_to_cpu(np->rx_done_q[np->rx_done].status2) & 0xffff;
+ printk(KERN_DEBUG "%s: checksum_hw, status2 = %x\n", dev->name, np->rx_done_q[np->rx_done].status2);
+ }
+#endif
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ np->stats.rx_packets++;
+
+next_rx:
np->cur_rx++;
np->rx_done_q[np->rx_done].status = 0;
np->rx_done = (np->rx_done + 1) & (DONE_Q_SIZE-1);
skb = dev_alloc_skb(np->rx_buf_sz);
np->rx_info[entry].skb = skb;
if (skb == NULL)
- break; /* Better luck next round. */
+ break; /* Better luck next round. */
np->rx_info[entry].mapping =
pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
- skb->dev = dev; /* Mark as being used by this device. */
+ skb->dev = dev; /* Mark as being used by this device. */
np->rx_ring[entry].rxaddr =
cpu_to_le32(np->rx_info[entry].mapping | RxDescValid);
}
writew(entry, dev->base_addr + RxDescQIdx);
}
- if (debug > 5
- || memcmp(np->pad0, np->pad0 + 1, sizeof(np->pad0) -1))
- printk(KERN_DEBUG " exiting netdev_rx() status of %d was %8.8x %d.\n",
- np->rx_done, desc_status,
- memcmp(np->pad0, np->pad0 + 1, sizeof(np->pad0) -1));
+ if (debug > 5)
+ printk(KERN_DEBUG " exiting netdev_rx() status of %d was %8.8x.\n",
+ np->rx_done, desc_status);
/* Restart Rx engine if stopped. */
return 0;
}
-static void netdev_error(struct net_device *dev, int intr_status)
+
+static void netdev_media_change(struct net_device *dev)
{
struct netdev_private *np = dev->priv;
+ long ioaddr = dev->base_addr;
+ u16 reg0, reg1, reg4, reg5;
+ u32 new_tx_mode;
+
+ /* reset status first */
+ mdio_read(dev, np->phys[0], MII_BMCR);
+ mdio_read(dev, np->phys[0], MII_BMSR);
+
+ reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
+ reg1 = mdio_read(dev, np->phys[0], MII_BMSR);
+
+ if (reg1 & BMSR_LSTATUS) {
+ /* link is up */
+ if (reg0 & BMCR_ANENABLE) {
+ /* autonegotiation is enabled */
+ reg4 = mdio_read(dev, np->phys[0], MII_ADVERTISE);
+ reg5 = mdio_read(dev, np->phys[0], MII_LPA);
+ if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) {
+ np->speed100 = 1;
+ np->full_duplex = 1;
+ } else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) {
+ np->speed100 = 1;
+ np->full_duplex = 0;
+ } else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) {
+ np->speed100 = 0;
+ np->full_duplex = 1;
+ } else {
+ np->speed100 = 0;
+ np->full_duplex = 0;
+ }
+ } else {
+ /* autonegotiation is disabled */
+ if (reg0 & BMCR_SPEED100)
+ np->speed100 = 1;
+ else
+ np->speed100 = 0;
+ if (reg0 & BMCR_FULLDPLX)
+ np->full_duplex = 1;
+ else
+ np->full_duplex = 0;
+ }
+ printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n",
+ dev->name,
+ np->speed100 ? "100" : "10",
+ np->full_duplex ? "full" : "half");
- if (intr_status & LinkChange) {
- printk(KERN_NOTICE "%s: Link changed: Autonegotiation advertising"
- " %4.4x partner %4.4x.\n", dev->name,
- mdio_read(dev, np->phys[0], 4),
- mdio_read(dev, np->phys[0], 5));
- check_duplex(dev, 0);
- }
- if (intr_status & StatsMax) {
- get_stats(dev);
+ new_tx_mode = np->tx_mode & ~0x2; /* duplex setting */
+ if (np->full_duplex)
+ new_tx_mode |= 2;
+ if (np->tx_mode != new_tx_mode) {
+ np->tx_mode = new_tx_mode;
+ writel(np->tx_mode | 0x8000, ioaddr + TxMode);
+ writel(np->tx_mode, ioaddr + TxMode);
+ }
+ } else {
+ printk(KERN_DEBUG "%s: Link is down\n", dev->name);
}
+}
+
+
+static void netdev_error(struct net_device *dev, int intr_status)
+{
+ struct netdev_private *np = dev->priv;
+
/* Came close to underrunning the Tx FIFO, increase threshold. */
- if (intr_status & IntrTxDataLow)
+ if (intr_status & IntrTxDataLow) {
writel(++np->tx_threshold, dev->base_addr + TxThreshold);
- if ((intr_status &
- ~(IntrAbnormalSummary|LinkChange|StatsMax|IntrTxDataLow|1)) && debug)
+ printk(KERN_NOTICE "%s: Increasing Tx FIFO threshold to %d bytes\n",
+ dev->name, np->tx_threshold * 16);
+ }
+ if ((intr_status & ~(IntrNormalMask | IntrAbnormalSummary | IntrLinkChange | IntrStatsMax | IntrTxDataLow | IntrPCIPad)) && debug)
printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
- dev->name, intr_status);
- /* Hmmmmm, it's not clear how to recover from PCI faults. */
- if (intr_status & IntrTxPCIErr)
+ dev->name, intr_status);
+ /* Hmmmmm, it's not clear how to recover from DMA faults. */
+ if (intr_status & IntrDMAErr)
np->stats.tx_fifo_errors++;
- if (intr_status & IntrRxPCIErr)
- np->stats.rx_fifo_errors++;
}
static struct net_device_stats *get_stats(struct net_device *dev)
np->stats.tx_aborted_errors =
readl(ioaddr + 0x57024) + readl(ioaddr + 0x57028);
np->stats.tx_window_errors = readl(ioaddr + 0x57018);
- np->stats.collisions = readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008);
+ np->stats.collisions =
+ readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008);
/* The chip only need report frame silently dropped. */
- np->stats.rx_dropped += readw(ioaddr + RxDMAStatus);
+ np->stats.rx_dropped += readw(ioaddr + RxDMAStatus);
writew(0, ioaddr + RxDMAStatus);
- np->stats.rx_crc_errors = readl(ioaddr + 0x5703C);
+ np->stats.rx_crc_errors = readl(ioaddr + 0x5703C);
np->stats.rx_frame_errors = readl(ioaddr + 0x57040);
np->stats.rx_length_errors = readl(ioaddr + 0x57058);
np->stats.rx_missed_errors = readl(ioaddr + 0x5707C);
struct dev_mc_list *mclist;
int i;
- if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
- /* Unconditionally log net taps. */
- printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
rx_mode = AcceptBroadcast|AcceptAllMulticast|AcceptAll|AcceptMyPhys;
} else if ((dev->mc_count > multicast_filter_limit)
- || (dev->flags & IFF_ALLMULTI)) {
+ || (dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
rx_mode = AcceptBroadcast|AcceptAllMulticast|AcceptMyPhys;
} else if (dev->mc_count <= 15) {
- /* Use the 16 element perfect filter. */
- long filter_addr = ioaddr + 0x56000 + 1*16;
- for (i = 1, mclist = dev->mc_list; mclist && i <= dev->mc_count;
- i++, mclist = mclist->next) {
+ /* Use the 16 element perfect filter, skip first entry. */
+ long filter_addr = ioaddr + PerfFilterTable + 1 * 16;
+ for (i = 1, mclist = dev->mc_list; mclist && i <= dev->mc_count;
+ i++, mclist = mclist->next) {
u16 *eaddrs = (u16 *)mclist->dmi_addr;
writew(cpu_to_be16(eaddrs[2]), filter_addr); filter_addr += 4;
writew(cpu_to_be16(eaddrs[1]), filter_addr); filter_addr += 4;
memset(mc_filter, 0, sizeof(mc_filter));
for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
i++, mclist = mclist->next) {
- set_bit(ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 23, mc_filter);
+ int bit_nr = ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 23;
+ __u32 *fptr = (__u32 *) &mc_filter[(bit_nr >> 4) & ~1];
+
+ *fptr |= cpu_to_le32(1 << (bit_nr & 31));
}
/* Clear the perfect filter list. */
filter_addr = ioaddr + 0x56000 + 1*16;
switch (data[1]) {
case 0:
if (value & 0x9000) /* Autonegotiation. */
- np->medialock = 0;
+ np->autoneg = 1;
else {
np->full_duplex = (value & 0x0100) ? 1 : 0;
- np->medialock = 1;
+ np->autoneg = 0;
}
break;
- case 4: np->advertising = value; break;
+ case 4:
+ np->advertising = value;
+ break;
}
- check_duplex(dev, 0);
+ check_duplex(dev);
}
mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
return 0;
int i;
netif_stop_queue(dev);
-
- del_timer_sync(&np->timer);
+ netif_stop_if(dev);
if (debug > 1) {
printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %4.4x.\n",
np->tx_ring_dma);
for (i = 0; i < 8 /* TX_RING_SIZE is huge! */; i++)
printk(KERN_DEBUG " #%d desc. %8.8x %8.8x -> %8.8x.\n",
- i, le32_to_cpu(np->tx_ring[i].status),
- le32_to_cpu(np->tx_ring[i].addr),
- le32_to_cpu(np->tx_done_q[i].status));
+ i, le32_to_cpu(np->tx_ring[i].status),
+ le32_to_cpu(np->tx_ring[i].first_addr),
+ le32_to_cpu(np->tx_done_q[i].status));
printk(KERN_DEBUG " Rx ring at %8.8x -> %p:\n",
- np->rx_ring_dma, np->rx_done_q);
+ np->rx_ring_dma, np->rx_done_q);
if (np->rx_done_q)
for (i = 0; i < 8 /* RX_RING_SIZE */; i++) {
printk(KERN_DEBUG " #%d desc. %8.8x -> %8.8x\n",
- i, le32_to_cpu(np->rx_ring[i].rxaddr), le32_to_cpu(np->rx_done_q[i].status));
+ i, le32_to_cpu(np->rx_ring[i].rxaddr), le32_to_cpu(np->rx_done_q[i].status));
}
}
#endif /* __i386__ debugging only */
}
for (i = 0; i < TX_RING_SIZE; i++) {
struct sk_buff *skb = np->tx_info[i].skb;
- if (skb != NULL) {
- pci_unmap_single(np->pci_dev,
- np->tx_info[i].mapping,
- skb->len, PCI_DMA_TODEVICE);
- dev_kfree_skb(skb);
- }
+#ifdef ZEROCOPY
+ int j;
+#endif /* ZEROCOPY */
+ if (skb == NULL)
+ continue;
+ pci_unmap_single(np->pci_dev,
+ np->tx_info[i].first_mapping,
+ skb_first_frag_len(skb), PCI_DMA_TODEVICE);
+ np->tx_info[i].first_mapping = 0;
+ dev_kfree_skb(skb);
np->tx_info[i].skb = NULL;
- np->tx_info[i].mapping = 0;
+#ifdef ZEROCOPY
+ for (j = 0; j < MAX_STARFIRE_FRAGS; j++)
+ if (np->tx_info[i].frag_mapping[j]) {
+ pci_unmap_single(np->pci_dev,
+ np->tx_info[i].frag_mapping[j],
+ skb_shinfo(skb)->frags[j].size,
+ PCI_DMA_TODEVICE);
+ np->tx_info[i].frag_mapping[j] = 0;
+ } else
+ break;
+#endif /* ZEROCOPY */
}
+ COMPAT_MOD_DEC_USE_COUNT;
+
return 0;
}
{
struct net_device *dev = pci_get_drvdata(pdev);
struct netdev_private *np;
-
+
if (!dev)
BUG();
unregister_netdev(dev);
iounmap((char *)dev->base_addr);
- pci_release_regions (pdev);
+ pci_release_regions(pdev);
if (np->tx_done_q)
pci_free_consistent(np->pci_dev, PAGE_SIZE,
static struct pci_driver starfire_driver = {
- name: "starfire",
+ name: DRV_NAME,
probe: starfire_init_one,
remove: starfire_remove_one,
id_table: starfire_pci_tbl,
/*
* Local variables:
- * compile-command: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c starfire.c"
- * simple-compile-command: "gcc -DMODULE -O6 -c starfire.c"
- * c-indent-level: 4
- * c-basic-offset: 4
- * tab-width: 4
+ * compile-command: "gcc -DMODULE -Wall -Wstrict-prototypes -O2 -c starfire.c"
+ * simple-compile-command: "gcc -DMODULE -O2 -c starfire.c"
+ * c-basic-offset: 8
+ * tab-width: 8
* End:
*/
static int lance_debug = 1;
#endif
MODULE_PARM(lance_debug, "i");
+MODULE_PARM_DESC(lance_debug, "SUN3 Lance debug level (0-3)");
#define DPRINTK(n,a) \
do { \
http://www.scyld.com/network/sundance.html
*/
+#define DRV_NAME "sundance"
+#define DRV_VERSION "1.01"
+#define DRV_RELDATE "4/09/00"
+
+
/* The user-configurable values.
These may be modified when a driver module is loaded.*/
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/init.h>
+#include <linux/ethtool.h>
+#include <asm/uaccess.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <asm/bitops.h>
#include <asm/io.h>
/* These identify the driver base version and may not be removed. */
static char version[] __devinitdata =
-KERN_INFO "sundance.c:v1.01 4/09/00 Written by Donald Becker\n"
+KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n"
KERN_INFO " http://www.scyld.com/network/sundance.html\n";
/* Condensed operations for readability. */
MODULE_PARM(rx_copybreak, "i");
MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM_DESC(max_interrupt_work, "Sundance Alta maximum events handled per interrupt");
+MODULE_PARM_DESC(mtu, "Sundance Alta MTU (all boards)");
+MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
+MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(options, "Sundance Alta: Bits 0-3: media type, bit 17: full duplex");
+MODULE_PARM_DESC(full_duplex, "Sundance Alta full duplex setting(s) (1)");
/*
Theory of Operation
int mii_cnt; /* MII device addresses. */
u16 advertising; /* NWay media advertisement */
unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
+ struct pci_dev *pci_dev;
};
/* The station address location in the EEPROM. */
static void netdev_error(struct net_device *dev, int intr_status);
static void set_rx_mode(struct net_device *dev);
static struct net_device_stats *get_stats(struct net_device *dev);
-static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static int netdev_close(struct net_device *dev);
\f
return -ENOMEM;
SET_MODULE_OWNER(dev);
- if (pci_request_regions(pdev, "sundance"))
+ if (pci_request_regions(pdev, DRV_NAME))
goto err_out_netdev;
#ifdef USE_IO_OPS
np = dev->priv;
np->chip_id = chip_idx;
np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
+ np->pci_dev = pdev;
spin_lock_init(&np->lock);
if (dev->mem_start)
dev->stop = &netdev_close;
dev->get_stats = &get_stats;
dev->set_multicast_list = &set_rx_mode;
- dev->do_ioctl = &mii_ioctl;
+ dev->do_ioctl = &netdev_ioctl;
dev->tx_timeout = &tx_timeout;
dev->watchdog_timeo = TX_TIMEOUT;
pci_set_drvdata(pdev, dev);
writeb(rx_mode, ioaddr + RxMode);
}
-static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
+{
+ struct netdev_private *np = dev->priv;
+ u32 ethcmd;
+
+ if (copy_from_user(ðcmd, useraddr, sizeof(ethcmd)))
+ return -EFAULT;
+
+ switch (ethcmd) {
+ case ETHTOOL_GDRVINFO: {
+ struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
+ strcpy(info.driver, DRV_NAME);
+ strcpy(info.version, DRV_VERSION);
+ strcpy(info.bus_info, np->pci_dev->slot_name);
+ if (copy_to_user(useraddr, &info, sizeof(info)))
+ return -EFAULT;
+ return 0;
+ }
+
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
u16 *data = (u16 *)&rq->ifr_data;
switch(cmd) {
+ case SIOCETHTOOL:
+ return netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
data[0] = ((struct netdev_private *)dev->priv)->phys[0] & 0x1f;
/* Fall Through */
}
static struct pci_driver sundance_driver = {
- name: "sundance",
+ name: DRV_NAME,
id_table: sundance_pci_tbl,
probe: sundance_probe1,
remove: sundance_remove1,
MODULE_AUTHOR("David S. Miller (davem@redhat.com)");
MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver");
MODULE_PARM(gem_debug, "i");
+MODULE_PARM_DESC(gem_debug, "(ignored)");
#define GEM_MODULE_NAME "gem"
#define PFX GEM_MODULE_NAME ": "
}
-static void gem_suspend(struct pci_dev *pdev)
-{
-}
-
-static void gem_resume(struct pci_dev *pdev)
-{
-}
-
static void __devexit gem_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
id_table: gem_pci_tbl,
probe: gem_init_one,
remove: gem_remove_one,
- suspend: gem_suspend,
- resume: gem_resume,
};
static int __init gem_init(void)
/* accept MAC address of the form macaddr=0x08,0x00,0x20,0x30,0x40,0x50 */
MODULE_PARM(macaddr, "6i");
+MODULE_PARM_DESC(macaddr, "Happy Meal MAC address to set");
static struct happy_meal *root_happy_dev;
MODULE_PARM(speed, "1-" __MODULE_STRING(MAX_TLAN_BOARDS) "i");
MODULE_PARM(debug, "i");
MODULE_PARM(bbuf, "i");
+MODULE_PARM_DESC(aui, "ThunderLAN use AUI port(s) (0-1)");
+MODULE_PARM_DESC(duplex, "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)");
+MODULE_PARM_DESC(speed, "ThunderLAN port speen setting(s) (0,10,100)");
+MODULE_PARM_DESC(debug, "ThunderLAN debug mask");
+MODULE_PARM_DESC(bbuf, "ThunderLAN use big buffer (0-1)");
EXPORT_NO_SYMBOLS;
/* Define this to enable Link beat monitoring */
data[0] = phy;
case SIOCDEVPRIVATE+1: /* Read MII register */
- TLan_MiiReadReg(dev, data[0], data[1], &data[3]);
+ TLan_MiiReadReg(dev, data[0] & 0x1f, data[1] & 0x1f, &data[3]);
return 0;
case SIOCDEVPRIVATE+2: /* Write MII register */
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- TLan_MiiWriteReg(dev, data[0], data[1], data[2]);
+ TLan_MiiWriteReg(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
return 0;
default:
return -EOPNOTSUPP;
#endif
#endif
-static void streamer_suspend(struct pci_dev *pdev) {
-#if STREAMER_DEBUG
- printk("lanstreamer::streamer_suspend entry pdev %p\n",pdev);
-#endif
-}
-
-static void streamer_resume(struct pci_dev *pdev) {
-#if STREAMER_DEBUG
- printk("lanstreamer::streamer_resume entry pdev %p\n",pdev);
-#endif
-}
-
static struct pci_driver streamer_pci_driver = {
name: "lanstreamer",
id_table: streamer_pci_tbl,
probe: streamer_init_one,
remove: streamer_remove_one,
- suspend: streamer_suspend,
- resume: streamer_resume,
};
static int __init streamer_init_module(void) {
* 04/09/01 - Couple of bug fixes to the dma unmaps and ejecting the
* adapter when live does not take the system down with it.
*
+ * 06/02/01 - Clean up, copy skb for small packets
+ *
* To Do:
*
* Complete full Cardbus / hot-swap support.
#define OLYMPIC_DEBUG 0
+
+#include <linux/config.h>
#include <linux/module.h>
+
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/errno.h>
* Official releases will only have an a.b.c version number format.
*/
-static char *version =
-"Olympic.c v0.9.C 4/18/01 - Peter De Schrijver & Mike Phillips" ;
+static char version[] __devinitdata =
+"Olympic.c v0.9.7 6/02/01 - Peter De Schrijver & Mike Phillips" ;
static char *open_maj_error[] = {"No error", "Lobe Media Test", "Physical Insertion",
"Address Verification", "Neighbor Notification (Ring Poll)",
/* Module paramters */
+MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ;
+MODULE_DESCRIPTION("Olympic PCI/Cardbus Chipset Driver \n") ;
+
/* Ring Speed 0,4,16,100
* 0 = Autosense
* 4,16 = Selected speed only, no autosense
};
MODULE_DEVICE_TABLE(pci,olympic_pci_tbl) ;
-static int __init olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+
+static int olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static int olympic_init(struct net_device *dev);
static int olympic_open(struct net_device *dev);
static int olympic_xmit(struct sk_buff *skb, struct net_device *dev);
static void olympic_asb_bh(struct net_device *dev) ;
static int olympic_proc_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data) ;
-static int __init olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *dev ;
struct olympic_private *olympic_priv;
dev->set_multicast_list=&olympic_set_rx_mode;
dev->get_stats=&olympic_get_stats ;
dev->set_mac_address=&olympic_set_mac_address ;
+ SET_MODULE_OWNER(dev) ;
pci_set_drvdata(pdev,dev) ;
register_netdev(dev) ;
return 0 ;
}
-static int __init olympic_init(struct net_device *dev)
+static int __devinit olympic_init(struct net_device *dev)
{
struct olympic_private *olympic_priv;
- __u8 *olympic_mmio, *init_srb,*adapter_addr;
+ u8 *olympic_mmio, *init_srb,*adapter_addr;
unsigned long t;
unsigned int uaa_addr;
writel(readl(olympic_mmio+BCTL) | BCTL_SOFTRESET,olympic_mmio+BCTL);
t=jiffies;
- while((readl(olympic_priv->olympic_mmio+BCTL)) & BCTL_SOFTRESET) {
+ while((readl(olympic_mmio+BCTL)) & BCTL_SOFTRESET) {
schedule();
if(jiffies-t > 40*HZ) {
printk(KERN_ERR "IBM PCI tokenring card not responding.\n");
static int olympic_open(struct net_device *dev)
{
struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
- __u8 *olympic_mmio=olympic_priv->olympic_mmio,*init_srb;
+ u8 *olympic_mmio=olympic_priv->olympic_mmio,*init_srb;
unsigned long flags, t;
char open_error[255] ;
int i, open_finished = 1 ;
#endif
if (olympic_priv->olympic_network_monitor) {
- __u8 *oat ;
- __u8 *opt ;
- oat = (__u8 *)(olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr) ;
- opt = (__u8 *)(olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr) ;
+ u8 *oat ;
+ u8 *opt ;
+ oat = (u8 *)(olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr) ;
+ opt = (u8 *)(olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr) ;
printk("%s: Node Address: %02x:%02x:%02x:%02x:%02x:%02x\n",dev->name,
readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)),
}
netif_start_queue(dev);
- MOD_INC_USE_COUNT ;
return 0;
}
* This means that we may process the frame before we receive the end
* of frame interrupt. This is why we always test the status instead
* of blindly processing the next frame.
+ *
+ * We also remove the last 4 bytes from the packet as well, these are
+ * just token ring trailer info and upset protocols that don't check
+ * their own length, i.e. SNA.
*
*/
static void olympic_rx(struct net_device *dev)
{
struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
- __u8 *olympic_mmio=olympic_priv->olympic_mmio;
+ u8 *olympic_mmio=olympic_priv->olympic_mmio;
struct olympic_rx_status *rx_status;
struct olympic_rx_desc *rx_desc ;
int rx_ring_last_received,length, buffer_cnt, cpy_length, frag_len;
rx_status=&(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received + 1) & (OLYMPIC_RX_RING_SIZE - 1)]) ;
while (rx_status->status_buffercnt) {
- __u32 l_status_buffercnt;
+ u32 l_status_buffercnt;
olympic_priv->rx_status_last_received++ ;
olympic_priv->rx_status_last_received &= (OLYMPIC_RX_RING_SIZE -1);
If only one buffer is used we can simply swap the buffers around.
If more than one then we must use the new buffer and copy the information
first. Ideally all frames would be in a single buffer, this can be tuned by
- altering the buffer size. */
+ altering the buffer size. If the length of the packet is less than
+ 1500 bytes we're going to copy it over anyway to stop packets getting
+ dropped from sockets with buffers small than our pkt_buf_sz. */
if (buffer_cnt==1) {
olympic_priv->rx_ring_last_received++ ;
olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1);
rx_ring_last_received = olympic_priv->rx_ring_last_received ;
- skb2=olympic_priv->rx_ring_skb[rx_ring_last_received] ;
- /* unmap buffer */
- pci_unmap_single(olympic_priv->pdev,
- le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
- olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
- skb_put(skb2,length);
- skb2->protocol = tr_type_trans(skb2,dev);
- olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer =
- cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data,
- olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));
- olympic_priv->olympic_rx_ring[rx_ring_last_received].res_length =
- cpu_to_le32(olympic_priv->pkt_buf_sz);
- olympic_priv->rx_ring_skb[rx_ring_last_received] = skb ;
- netif_rx(skb2) ;
+ if (length > 1500) {
+ skb2=olympic_priv->rx_ring_skb[rx_ring_last_received] ;
+ /* unmap buffer */
+ pci_unmap_single(olympic_priv->pdev,
+ le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
+ olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
+ skb_put(skb2,length-4);
+ skb2->protocol = tr_type_trans(skb2,dev);
+ olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer =
+ cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data,
+ olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));
+ olympic_priv->olympic_rx_ring[rx_ring_last_received].res_length =
+ cpu_to_le32(olympic_priv->pkt_buf_sz);
+ olympic_priv->rx_ring_skb[rx_ring_last_received] = skb ;
+ netif_rx(skb2) ;
+ } else {
+ pci_dma_sync_single(olympic_priv->pdev,
+ le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
+ olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
+ memcpy(skb_put(skb,length-4),olympic_priv->rx_ring_skb[rx_ring_last_received]->data,length-4) ;
+ skb->protocol = tr_type_trans(skb,dev) ;
+ netif_rx(skb) ;
+ }
} else {
do { /* Walk the buffers */
olympic_priv->rx_ring_last_received++ ;
olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1);
rx_ring_last_received = olympic_priv->rx_ring_last_received ;
+ pci_dma_sync_single(olympic_priv->pdev,
+ le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
+ olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
rx_desc = &(olympic_priv->olympic_rx_ring[rx_ring_last_received]);
cpy_length = (i == 1 ? frag_len : le32_to_cpu(rx_desc->res_length));
memcpy(skb_put(skb, cpy_length), olympic_priv->rx_ring_skb[rx_ring_last_received]->data, cpy_length) ;
} while (--i) ;
-
+ skb_trim(skb,skb->len-4) ;
skb->protocol = tr_type_trans(skb,dev);
netif_rx(skb) ;
}
{
struct net_device *dev= (struct net_device *)dev_id;
struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
- __u8 *olympic_mmio=olympic_priv->olympic_mmio;
- __u32 sisr;
- __u8 *adapter_check_area ;
+ u8 *olympic_mmio=olympic_priv->olympic_mmio;
+ u32 sisr;
+ u8 *adapter_check_area ;
/*
* Read sisr but don't reset it yet.
netif_stop_queue(dev);
printk(KERN_WARNING "%s: Adapter Check Interrupt Raised, 8 bytes of information follow:\n", dev->name);
writel(readl(olympic_mmio+LAPWWO),olympic_mmio+LAPA);
- adapter_check_area = (__u8 *)(olympic_mmio+LAPWWO) ;
+ adapter_check_area = (u8 *)(olympic_mmio+LAPWWO) ;
printk(KERN_WARNING "%s: Bytes %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",dev->name, readb(adapter_check_area+0), readb(adapter_check_area+1), readb(adapter_check_area+2), readb(adapter_check_area+3), readb(adapter_check_area+4), readb(adapter_check_area+5), readb(adapter_check_area+6), readb(adapter_check_area+7)) ;
/* The adapter is effectively dead, clean up and exit */
for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {
sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE, PCI_DMA_TODEVICE);
free_irq(dev->irq, dev) ;
- MOD_DEC_USE_COUNT ;
dev->stop = NULL ;
spin_unlock(&olympic_priv->olympic_lock) ;
return ;
static int olympic_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
- __u8 *olympic_mmio=olympic_priv->olympic_mmio;
+ u8 *olympic_mmio=olympic_priv->olympic_mmio;
unsigned long flags ;
spin_lock_irqsave(&olympic_priv->olympic_lock, flags);
static int olympic_close(struct net_device *dev)
{
struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
- __u8 *olympic_mmio=olympic_priv->olympic_mmio,*srb;
+ u8 *olympic_mmio=olympic_priv->olympic_mmio,*srb;
unsigned long t,flags;
int i;
#endif
free_irq(dev->irq,dev);
- MOD_DEC_USE_COUNT ;
return 0;
}
static void olympic_set_rx_mode(struct net_device *dev)
{
struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv ;
- __u8 *olympic_mmio = olympic_priv->olympic_mmio ;
- __u8 options = 0;
- __u8 *srb;
+ u8 *olympic_mmio = olympic_priv->olympic_mmio ;
+ u8 options = 0;
+ u8 *srb;
struct dev_mc_list *dmi ;
unsigned char dev_mc_address[4] ;
int i ;
static void olympic_srb_bh(struct net_device *dev)
{
struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv ;
- __u8 *olympic_mmio = olympic_priv->olympic_mmio ;
- __u8 *srb;
+ u8 *olympic_mmio = olympic_priv->olympic_mmio ;
+ u8 *srb;
writel(olympic_priv->srb,olympic_mmio+LAPA);
srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
static void olympic_arb_cmd(struct net_device *dev)
{
struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv;
- __u8 *olympic_mmio=olympic_priv->olympic_mmio;
- __u8 *arb_block, *asb_block, *srb ;
- __u8 header_len ;
- __u16 frame_len, buffer_len ;
+ u8 *olympic_mmio=olympic_priv->olympic_mmio;
+ u8 *arb_block, *asb_block, *srb ;
+ u8 header_len ;
+ u16 frame_len, buffer_len ;
struct sk_buff *mac_frame ;
- __u8 *buf_ptr ;
- __u8 *frame_data ;
- __u16 buff_off ;
- __u16 lan_status = 0, lan_status_diff ; /* Initialize to stop compiler warning */
- __u8 fdx_prot_error ;
- __u16 next_ptr;
+ u8 *buf_ptr ;
+ u8 *frame_data ;
+ u16 buff_off ;
+ u16 lan_status = 0, lan_status_diff ; /* Initialize to stop compiler warning */
+ u8 fdx_prot_error ;
+ u16 next_ptr;
int i ;
- arb_block = (__u8 *)(olympic_priv->olympic_lap + olympic_priv->arb) ;
- asb_block = (__u8 *)(olympic_priv->olympic_lap + olympic_priv->asb) ;
- srb = (__u8 *)(olympic_priv->olympic_lap + olympic_priv->srb) ;
+ arb_block = (u8 *)(olympic_priv->olympic_lap + olympic_priv->arb) ;
+ asb_block = (u8 *)(olympic_priv->olympic_lap + olympic_priv->asb) ;
+ srb = (u8 *)(olympic_priv->olympic_lap + olympic_priv->srb) ;
writel(readl(olympic_mmio+LAPA),olympic_mmio+LAPWWO);
if (readb(arb_block+0) == ARB_RECEIVE_DATA) { /* Receive.data, MAC frames */
free_irq(dev->irq,dev);
dev->stop=NULL;
printk(KERN_WARNING "%s: Adapter has been closed \n", dev->name) ;
- MOD_DEC_USE_COUNT ;
} /* If serious error */
if (olympic_priv->olympic_message_level) {
static void olympic_asb_bh(struct net_device *dev)
{
struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv ;
- __u8 *arb_block, *asb_block ;
+ u8 *arb_block, *asb_block ;
- arb_block = (__u8 *)(olympic_priv->olympic_lap + olympic_priv->arb) ;
- asb_block = (__u8 *)(olympic_priv->olympic_lap + olympic_priv->asb) ;
+ arb_block = (u8 *)(olympic_priv->olympic_lap + olympic_priv->arb) ;
+ asb_block = (u8 *)(olympic_priv->olympic_lap + olympic_priv->asb) ;
if (olympic_priv->asb_queued == 1) { /* Dropped through the first time */
static int olympic_change_mtu(struct net_device *dev, int mtu)
{
struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv;
- __u16 max_mtu ;
+ u16 max_mtu ;
if (olympic_priv->olympic_ring_speed == 4)
max_mtu = 4500 ;
{
struct net_device *dev = (struct net_device *)data ;
struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
- __u8 *oat = (__u8 *)(olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr) ;
- __u8 *opt = (__u8 *)(olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr) ;
+ u8 *oat = (u8 *)(olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr) ;
+ u8 *opt = (u8 *)(olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr) ;
int size = 0 ;
int len=0;
off_t begin=0;
unregister_trdev(dev) ;
iounmap(olympic_priv->olympic_mmio) ;
iounmap(olympic_priv->olympic_lap) ;
- pci_release_regions(pdev) ;
+ pci_release_regions(pdev) ;
+ pci_set_drvdata(pdev,NULL) ;
kfree(dev) ;
}
/* xxxx These structures are all little endian in hardware. */
struct olympic_tx_desc {
- __u32 buffer;
- __u32 status_length;
+ u32 buffer;
+ u32 status_length;
};
struct olympic_tx_status {
- __u32 status;
+ u32 status;
};
struct olympic_rx_desc {
- __u32 buffer;
- __u32 res_length;
+ u32 buffer;
+ u32 res_length;
};
struct olympic_rx_status {
- __u32 fragmentcnt_framelen;
- __u32 status_buffercnt;
+ u32 fragmentcnt_framelen;
+ u32 status_buffercnt;
};
/* xxxx END These structures are all little endian in hardware. */
/* xxxx There may be more, but I'm pretty sure about these */
struct mac_receive_buffer {
- __u16 next ;
- __u8 padding ;
- __u8 frame_status ;
- __u16 buffer_length ;
- __u8 frame_data ;
+ u16 next ;
+ u8 padding ;
+ u8 frame_status ;
+ u16 buffer_length ;
+ u8 frame_data ;
};
struct olympic_private {
- __u16 srb; /* be16 */
- __u16 trb; /* be16 */
- __u16 arb; /* be16 */
- __u16 asb; /* be16 */
+ u16 srb; /* be16 */
+ u16 trb; /* be16 */
+ u16 arb; /* be16 */
+ u16 asb; /* be16 */
- __u8 *olympic_mmio;
- __u8 *olympic_lap;
+ u8 *olympic_mmio;
+ u8 *olympic_lap;
struct pci_dev *pdev ;
char *olympic_card_name ;
int tx_ring_free, tx_ring_last_status, rx_ring_last_received,rx_status_last_received, free_tx_ring_entries;
struct net_device_stats olympic_stats ;
- __u16 olympic_lan_status ;
- __u8 olympic_ring_speed ;
- __u16 pkt_buf_sz ;
- __u8 olympic_receive_options, olympic_copy_all_options,olympic_message_level, olympic_network_monitor;
- __u16 olympic_addr_table_addr, olympic_parms_addr ;
- __u8 olympic_laa[6] ;
- __u32 rx_ring_dma_addr;
- __u32 rx_status_ring_dma_addr;
- __u32 tx_ring_dma_addr;
- __u32 tx_status_ring_dma_addr;
+ u16 olympic_lan_status ;
+ u8 olympic_ring_speed ;
+ u16 pkt_buf_sz ;
+ u8 olympic_receive_options, olympic_copy_all_options,olympic_message_level, olympic_network_monitor;
+ u16 olympic_addr_table_addr, olympic_parms_addr ;
+ u8 olympic_laa[6] ;
+ u32 rx_ring_dma_addr;
+ u32 rx_status_ring_dma_addr;
+ u32 tx_ring_dma_addr;
+ u32 tx_status_ring_dma_addr;
};
struct olympic_adapter_addr_table {
- __u8 node_addr[6] ;
- __u8 reserved[4] ;
- __u8 func_addr[4] ;
+ u8 node_addr[6] ;
+ u8 reserved[4] ;
+ u8 func_addr[4] ;
} ;
struct olympic_parameters_table {
- __u8 phys_addr[4] ;
- __u8 up_node_addr[6] ;
- __u8 up_phys_addr[4] ;
- __u8 poll_addr[6] ;
- __u16 reserved ;
- __u16 acc_priority ;
- __u16 auth_source_class ;
- __u16 att_code ;
- __u8 source_addr[6] ;
- __u16 beacon_type ;
- __u16 major_vector ;
- __u16 lan_status ;
- __u16 soft_error_time ;
- __u16 reserved1 ;
- __u16 local_ring ;
- __u16 mon_error ;
- __u16 beacon_transmit ;
- __u16 beacon_receive ;
- __u16 frame_correl ;
- __u8 beacon_naun[6] ;
- __u32 reserved2 ;
- __u8 beacon_phys[4] ;
+ u8 phys_addr[4] ;
+ u8 up_node_addr[6] ;
+ u8 up_phys_addr[4] ;
+ u8 poll_addr[6] ;
+ u16 reserved ;
+ u16 acc_priority ;
+ u16 auth_source_class ;
+ u16 att_code ;
+ u8 source_addr[6] ;
+ u16 beacon_type ;
+ u16 major_vector ;
+ u16 lan_status ;
+ u16 soft_error_time ;
+ u16 reserved1 ;
+ u16 local_ring ;
+ u16 mon_error ;
+ u16 beacon_transmit ;
+ u16 beacon_receive ;
+ u16 frame_correl ;
+ u8 beacon_naun[6] ;
+ u32 reserved2 ;
+ u8 beacon_phys[4] ;
};
* 1. Multicast support.
*/
+#if defined(__alpha__) || defined(__ia64__)
+#error FIXME: driver does not support 64-bit platforms
+#endif
+
#ifdef MODULE
#include <linux/module.h>
#include <linux/version.h>
tp->csr6 &= 0x00D5;
tp->csr6 |= new_csr6;
outl(0x0301, ioaddr + CSR12);
- tulip_restart_rxtx(tp, tp->csr6);
+ tulip_restart_rxtx(tp);
}
next_tick = 3*HZ;
}
udelay(100);
outl(csr14, ioaddr + CSR14);
tp->csr6 = 0x82420000 | (tp->sym_advertise & 0x0040 ? FullDuplex : 0);
- tulip_outl_csr(tp, tp->csr6, CSR6);
+ outl(tp->csr6, ioaddr + CSR6);
if (tp->mtable && tp->mtable->csr15dir) {
outl(tp->mtable->csr15dir, ioaddr + CSR15);
outl(tp->mtable->csr15val, ioaddr + CSR15);
outl(1, ioaddr + CSR13);
}
#if 0 /* Restart shouldn't be needed. */
- tulip_outl_csr(tp, tp->csr6 | csr6_sr, CSR6);
+ outl(tp->csr6 | RxOn, ioaddr + CSR6);
if (tulip_debug > 2)
printk(KERN_DEBUG "%s: Restarting Tx and Rx, CSR5 is %8.8x.\n",
dev->name, inl(ioaddr + CSR5));
#endif
- tulip_outl_csr(tp, tp->csr6 | csr6_st | csr6_sr, CSR6);
+ tulip_start_rxtx(tp);
if (tulip_debug > 2)
printk(KERN_DEBUG "%s: Setting CSR6 %8.8x/%x CSR12 %8.8x.\n",
dev->name, tp->csr6, inl(ioaddr + CSR6),
tp->csr6 = 0x83860000;
outl(0x0003FF7F, ioaddr + CSR14);
outl(0x0301, ioaddr + CSR12);
- tulip_restart_rxtx(tp, tp->csr6);
+ tulip_restart_rxtx(tp);
}
}
+2001-06-16 Jeff Garzik <jgarzik@mandrakesoft.com>
+
+ * tulip.h, tulip_core.c:
+ Integrate MMIO support from devel branch, but default
+ it to off for stable kernel and driver series.
+
+2001-06-16 Jeff Garzik <jgarzik@mandrakesoft.com>
+
+ * tulip_core.c (tulip_init_one):
+ Free descriptor rings on error.
+
+2001-06-16 Jeff Garzik <jgarzik@mandrakesoft.com>
+
+ * tulip_core.c (tulip_mwi_config, tulip_init_one):
+ Large update to csr0 bus configuration code. This is not stable
+ yet, so it is only conditionally enabled, via CONFIG_TULIP_MWI.
+
+2001-06-16 Jeff Garzik <jgarzik@mandrakesoft.com>
+
+ * tulip_core.c:
+ Initialize timer in tulip_init_one and tulip_down,
+ not in tulip_up.
+
+2001-06-14 Jeff Garzik <jgarzik@mandrakesoft.com>
+
+ * tulip_core.c:
+ - Update tulip_suspend, tulip_resume for new PCI PM API.
+ - Surround suspend/resume code with CONFIG_PM.
+
+2001-06-12 Jeff Golds <jgolds@resilience.com>
+
+ * tulip_core.c:
+ - Reset sw ring ptrs in tulip_up. Fixes PM resume case.
+ - Clean rx and tx rings on device down.
+
+2001-06-05 David Miller <davem@redhat.com>
+
+ * tulip_core (set_rx_mode): Do not use set_bit
+ on an integer variable. Also fix endianness issue.
+
+2001-06-04 Jeff Garzik <jgarzik@mandrakesoft.com>
+
+ * interrupt.c:
+ Simplify rx processing when CONFIG_NET_HW_FLOWCONTROL is
+ active, and in the process fix a bug where flow control
+ and low load caused rx not to be acknowledged properly.
+
+2001-06-01 Jeff Garzik <jgarzik@mandrakesoft.com>
+
+ * tulip.h:
+ - Remove tulip_outl_csr helper, redundant.
+ - Add tulip_start_rxtx inline helper.
+ - tulip_stop_rxtx helper: Add synchronization. Always use current
+ csr6 value, instead of tp->csr6 value or value passed as arg.
+ - tulip_restart_rxtx helper: Add synchronization. Always
+ use tp->csr6 for desired mode, not value passed as arg.
+ - New RxOn, TxOn, RxTx constants for csr6 modes.
+ - Remove now-redundant constants csr6_st, csr6_sr.
+
+ * 21142.c, interrupt.c, media.c, pnic.c, tulip_core.c:
+ Update for above rxtx helper changes.
+
+ * interrupt.c:
+ - whitespace cleanup around #ifdef CONFIG_NET_HW_FLOWCONTROL,
+ convert tabs to spaces.
+ - Move tp->stats.rx_missed_errors update outside the ifdef.
+
2001-05-18 Jeff Garzik <jgarzik@mandrakesoft.com>
* tulip_core.c: Added ethtool support.
if (csr5 & (RxIntr | RxNoBuf)) {
#ifdef CONFIG_NET_HW_FLOWCONTROL
- if (tp->fc_bit) {
- if (test_bit(tp->fc_bit, &netdev_fc_xoff)) {
- tulip_refill_rx(dev);
- } else {
- tulip_refill_rx(dev);
- rx += tulip_rx(dev);
- }
- } else { /* not in fc mode */
- rx += tulip_rx(dev);
- tulip_refill_rx(dev);
- }
-#else
- rx += tulip_rx(dev);
- tulip_refill_rx(dev);
+ if ((!tp->fc_bit) ||
+ (!test_bit(tp->fc_bit, &netdev_fc_xoff)))
#endif
+ rx += tulip_rx(dev);
+ tulip_refill_rx(dev);
}
if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
printk(KERN_WARNING "%s: The transmitter stopped."
" CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
dev->name, csr5, inl(ioaddr + CSR6), tp->csr6);
- tulip_restart_rxtx(tp, tp->csr6);
+ tulip_restart_rxtx(tp);
}
spin_unlock(&tp->lock);
}
else
tp->csr6 |= 0x00200000; /* Store-n-forward. */
/* Restart the transmit process. */
- tulip_restart_rxtx(tp, tp->csr6);
+ tulip_restart_rxtx(tp);
outl(0, ioaddr + CSR1);
}
if (csr5 & (RxDied | RxNoBuf)) {
}
}
if (csr5 & RxDied) { /* Missed a Rx frame. */
-#ifdef CONFIG_NET_HW_FLOWCONTROL
- if (tp->fc_bit && !test_bit(tp->fc_bit, &netdev_fc_xoff)) {
- tp->stats.rx_errors++;
- tulip_outl_csr(tp, tp->csr6 | csr6_st | csr6_sr, CSR6);
- }
tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
+#ifdef CONFIG_NET_HW_FLOWCONTROL
+ if (tp->fc_bit && !test_bit(tp->fc_bit, &netdev_fc_xoff)) {
+ tp->stats.rx_errors++;
+ tulip_start_rxtx(tp);
+ }
#else
tp->stats.rx_errors++;
- tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
- tulip_outl_csr(tp, tp->csr6 | csr6_st | csr6_sr, CSR6);
+ tulip_start_rxtx(tp);
#endif
}
/*
*/
int tulip_check_duplex(struct net_device *dev)
{
- long ioaddr = dev->base_addr;
struct tulip_private *tp = dev->priv;
unsigned int bmsr, lpa, negotiated, new_csr6;
else new_csr6 &= ~FullDuplex;
if (new_csr6 != tp->csr6) {
- if (inl(ioaddr + CSR6) & (csr6_st | csr6_sr))
- tulip_restart_rxtx(tp, new_csr6);
- else
- outl(new_csr6, ioaddr + CSR6);
tp->csr6 = new_csr6;
+ tulip_restart_rxtx(tp);
if (tulip_debug > 0)
printk(KERN_INFO "%s: Setting %s-duplex based on MII"
if (tp->csr6 != new_csr6) {
tp->csr6 = new_csr6;
/* Restart Tx */
- tulip_restart_rxtx(tp, tp->csr6);
+ tulip_restart_rxtx(tp);
dev->trans_start = jiffies;
}
}
return;
if (! tp->nwayset || jiffies - dev->trans_start > 1*HZ) {
tp->csr6 = 0x00420000 | (tp->csr6 & 0x0000fdff);
- tulip_outl_csr(tp, tp->csr6, CSR6);
+ outl(tp->csr6, ioaddr + CSR6);
outl(0x30, ioaddr + CSR12);
outl(0x0201F078, ioaddr + 0xB8); /* Turn on autonegotiation. */
dev->trans_start = jiffies;
if (tp->csr6 != new_csr6) {
tp->csr6 = new_csr6;
/* Restart Tx */
- tulip_restart_rxtx(tp, tp->csr6);
+ tulip_restart_rxtx(tp);
dev->trans_start = jiffies;
if (tulip_debug > 1)
printk(KERN_INFO "%s: Changing PNIC configuration to %s "
medianame[tp->mtable->mleaf[tp->cur_index].media]);
tulip_select_media(dev, 0);
/* Restart the transmit process. */
- tulip_restart_rxtx(tp, tp->csr6);
+ tulip_restart_rxtx(tp);
next_tick = (24*HZ)/10;
break;
}
#include <linux/spinlock.h>
#include <linux/netdevice.h>
#include <linux/timer.h>
+#include <linux/delay.h>
#include <asm/io.h>
#include <asm/irq.h>
/* undefine, or define to various debugging levels (>4 == obscene levels) */
#define TULIP_DEBUG 1
+/* undefine USE_IO_OPS for MMIO, define for PIO */
+#ifdef CONFIG_TULIP_MMIO
+# undef USE_IO_OPS
+#else
+# define USE_IO_OPS 1
+#endif
+
+
struct tulip_chip_table {
char *chip_name;
enum tulip_mode_bits {
TxThreshold = (1 << 22),
FullDuplex = (1 << 9),
+ TxOn = 0x2000,
AcceptBroadcast = 0x0100,
AcceptAllMulticast = 0x0080,
AcceptAllPhys = 0x0040,
AcceptRunt = 0x0008,
+ RxOn = 0x0002,
+ RxTx = (TxOn | RxOn),
};
* (1,1) * 1024 * 160 *
***********************************/
- csr6_st = (1<<13), /* Transmit conrol: 1 = transmit, 0 = stop */
csr6_fc = (1<<12), /* Forces a collision in next transmission (for testing in loopback mode) */
csr6_om_int_loop = (1<<10), /* internal (FIFO) loopback flag */
csr6_om_ext_loop = (1<<11), /* external (PMD) loopback flag */
csr6_if = (1<<4), /* Inverse Filtering, rejects only addresses in address table: can't be set */
csr6_pb = (1<<3), /* Pass Bad Frames, (1) causes even bad frames to be passed on */
csr6_ho = (1<<2), /* Hash-only filtering mode: can't be set */
- csr6_sr = (1<<1), /* Start(1)/Stop(0) Receive */
csr6_hp = (1<<0), /* Hash/Perfect Receive Filtering Mode: can't be set */
csr6_mask_capture = (csr6_sc | csr6_ca),
extern u16 t21041_csr14[];
extern u16 t21041_csr15[];
+#ifndef USE_IO_OPS
+#undef inb
+#undef inw
+#undef inl
+#undef outb
+#undef outw
+#undef outl
+#define inb(addr) readb((void*)(addr))
+#define inw(addr) readw((void*)(addr))
+#define inl(addr) readl((void*)(addr))
+#define outb(val,addr) writeb((val), (void*)(addr))
+#define outw(val,addr) writew((val), (void*)(addr))
+#define outl(val,addr) writel((val), (void*)(addr))
+#endif /* !USE_IO_OPS */
+
+
-static inline void tulip_outl_csr (struct tulip_private *tp, u32 newValue, enum tulip_offsets offset)
+static inline void tulip_start_rxtx(struct tulip_private *tp)
{
- outl (newValue, tp->base_addr + offset);
+ long ioaddr = tp->base_addr;
+ outl(tp->csr6 | RxTx, ioaddr + CSR6);
+ barrier();
+ (void) inl(ioaddr + CSR6); /* mmio sync */
}
-static inline void tulip_stop_rxtx(struct tulip_private *tp, u32 csr6mask)
+static inline void tulip_stop_rxtx(struct tulip_private *tp)
{
- tulip_outl_csr(tp, csr6mask & ~(csr6_st | csr6_sr), CSR6);
+ long ioaddr = tp->base_addr;
+ u32 csr6 = inl(ioaddr + CSR6);
+
+ if (csr6 & RxTx) {
+ outl(csr6 & ~RxTx, ioaddr + CSR6);
+ barrier();
+ (void) inl(ioaddr + CSR6); /* mmio sync */
+ }
}
-static inline void tulip_restart_rxtx(struct tulip_private *tp, u32 csr6mask)
+static inline void tulip_restart_rxtx(struct tulip_private *tp)
{
- tulip_outl_csr(tp, csr6mask | csr6_sr, CSR6);
- tulip_outl_csr(tp, csr6mask | csr6_st | csr6_sr, CSR6);
+ tulip_stop_rxtx(tp);
+ udelay(5);
+ tulip_start_rxtx(tp);
}
#endif /* __NET_TULIP_H__ */
-/* tulip_core.c: A DEC 21040-family ethernet driver for Linux. */
+/* tulip_core.c: A DEC 21x4x-family ethernet driver for Linux. */
/*
Maintained by Jeff Garzik <jgarzik@mandrakesoft.com>
*/
#define DRV_NAME "tulip"
-#define DRV_VERSION "0.9.15-pre2"
-#define DRV_RELDATE "May 16, 2001"
+#define DRV_VERSION "0.9.15-pre5"
+#define DRV_RELDATE "June 16, 2001"
#include <linux/config.h>
#include <linux/module.h>
/* On some chip revs we must set the MII/SYM port before the reset!? */
if (tp->mii_cnt || (tp->mtable && tp->mtable->has_mii))
- tulip_outl_csr (tp, 0x00040000, CSR6);
+ outl(0x00040000, ioaddr + CSR6);
/* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
outl(0x00000001, ioaddr + CSR0);
if (tulip_debug > 1)
printk(KERN_DEBUG "%s: tulip_up(), irq==%d.\n", dev->name, dev->irq);
+ outl(tp->rx_ring_dma, ioaddr + CSR3);
+ outl(tp->tx_ring_dma, ioaddr + CSR4);
+ tp->cur_rx = tp->cur_tx = 0;
+ tp->dirty_rx = tp->dirty_tx = 0;
+
if (tp->chip_id == PNIC2) {
u32 addr_high = (dev->dev_addr[1]<<8) + (dev->dev_addr[0]<<0);
/* This address setting does not appear to impact chip operation?? */
tp->cur_tx++;
}
- outl(tp->rx_ring_dma, ioaddr + CSR3);
- outl(tp->tx_ring_dma, ioaddr + CSR4);
-
tp->saved_if_port = dev->if_port;
if (dev->if_port == 0)
dev->if_port = tp->default_port;
printk(KERN_INFO "%s: Using MII transceiver %d, status "
"%4.4x.\n",
dev->name, tp->phys[0], tulip_mdio_read(dev, tp->phys[0], 1));
- tulip_outl_csr(tp, csr6_mask_defstate, CSR6);
+ outl(csr6_mask_defstate, ioaddr + CSR6);
tp->csr6 = csr6_mask_hdcap;
dev->if_port = 11;
outl(0x0000, ioaddr + CSR13);
tulip_select_media(dev, 1);
/* Start the chip's Tx to process setup frame. */
- tulip_outl_csr(tp, tp->csr6, CSR6);
- tulip_outl_csr(tp, tp->csr6 | csr6_st, CSR6);
+ tulip_stop_rxtx(tp);
+ barrier();
+ udelay(5);
+ outl(tp->csr6 | TxOn, ioaddr + CSR6);
/* Enable interrupts by setting the interrupt mask. */
outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
- tulip_outl_csr(tp, tp->csr6 | csr6_st | csr6_sr, CSR6);
+ tulip_start_rxtx(tp);
outl(0, ioaddr + CSR2); /* Rx poll demand */
if (tulip_debug > 2) {
- printk(KERN_DEBUG "%s: Done tulip_open(), CSR0 %8.8x, CSR5 %8.8x CSR6 %8.8x.\n",
+ printk(KERN_DEBUG "%s: Done tulip_up(), CSR0 %8.8x, CSR5 %8.8x CSR6 %8.8x.\n",
dev->name, inl(ioaddr + CSR0), inl(ioaddr + CSR5),
inl(ioaddr + CSR6));
}
+
/* Set the timer to switch to check for link beat and perhaps switch
to an alternate media type. */
- init_timer(&tp->timer);
tp->timer.expires = RUN_AT(next_tick);
- tp->timer.data = (unsigned long)dev;
- tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
add_timer(&tp->timer);
}
if (tp->fc_bit && test_bit(tp->fc_bit,&netdev_fc_xoff))
printk("BUG tx_timeout restarting rx when fc on\n");
#endif
- tulip_restart_rxtx(tp, tp->csr6);
+ tulip_restart_rxtx(tp);
/* Trigger an immediate transmit demand. */
outl(0, ioaddr + CSR1);
struct tulip_private *tp = (struct tulip_private *)dev->priv;
int i;
- tp->cur_rx = tp->cur_tx = 0;
- tp->dirty_rx = tp->dirty_tx = 0;
tp->susp_rx = 0;
tp->ttimer = 0;
tp->nir = 0;
return 0;
}
+static void tulip_release_unconsumed_tx_buffers(struct tulip_private *tp)
+{
+ unsigned int dirty_tx;
+
+ for (dirty_tx = tp->dirty_tx ; tp->cur_tx - dirty_tx > 0;
+ dirty_tx++) {
+ int entry = dirty_tx % TX_RING_SIZE;
+ int status = le32_to_cpu(tp->tx_ring[entry].status);
+
+ if (status > 0)
+ break; /* It has been Txed */
+
+ /* Check for Rx filter setup frames. */
+ if (tp->tx_buffers[entry].skb == NULL) {
+ /* test because dummy frames not mapped */
+ if (tp->tx_buffers[entry].mapping)
+ pci_unmap_single(tp->pdev,
+ tp->tx_buffers[entry].mapping,
+ sizeof(tp->setup_frame),
+ PCI_DMA_TODEVICE);
+ continue;
+ }
+ tp->stats.tx_errors++;
+
+ pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
+ tp->tx_buffers[entry].skb->len,
+ PCI_DMA_TODEVICE);
+
+ /* Free the original skb. */
+ dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
+ tp->tx_buffers[entry].skb = NULL;
+ tp->tx_buffers[entry].mapping = 0;
+ }
+}
+
static void tulip_down (struct net_device *dev)
{
long ioaddr = dev->base_addr;
outl (0x00000000, ioaddr + CSR7);
/* Stop the Tx and Rx processes. */
- tulip_stop_rxtx(tp, inl(ioaddr + CSR6));
+ tulip_stop_rxtx(tp);
+
+ /* prepare receive buffers */
+ tulip_refill_rx(dev);
+
+ /* release any unconsumed transmit buffers */
+ tulip_release_unconsumed_tx_buffers(tp);
/* 21040 -- Leave the card in 10baseT state. */
if (tp->chip_id == DC21040)
spin_unlock_irqrestore (&tp->lock, flags);
+ init_timer(&tp->timer);
+ tp->timer.data = (unsigned long)dev;
+ tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
+
dev->if_port = tp->saved_if_port;
/* Leave the driver in snooze, not sleep, mode. */
else
filterbit = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
filterbit &= 0x3f;
- set_bit(filterbit, mc_filter);
+ mc_filter[filterbit >> 5] |= cpu_to_le32(1 << (filterbit & 31));
if (tulip_debug > 2) {
printk(KERN_INFO "%s: Added filter for %2.2x:%2.2x:%2.2x:"
"%2.2x:%2.2x:%2.2x %8.8x bit %d.\n", dev->name,
spin_unlock_irqrestore(&tp->lock, flags);
}
- /* Can someone explain to me what the OR here is supposed to accomplish???? */
- tulip_outl_csr(tp, csr6 | 0x0000, CSR6);
+ outl(csr6, ioaddr + CSR6);
}
+#ifdef CONFIG_TULIP_MWI
static void __devinit tulip_mwi_config (struct pci_dev *pdev,
struct net_device *dev)
{
struct tulip_private *tp = dev->priv;
- u8 pci_cacheline;
+ u8 cache;
u16 pci_command, new_command;
- unsigned mwi = 1;
+ u32 csr0;
if (tulip_debug > 3)
printk(KERN_DEBUG "%s: tulip_mwi_config()\n", pdev->slot_name);
- /* get a sane cache line size, if possible */
- pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &pci_cacheline);
- if (pci_cacheline < (SMP_CACHE_BYTES / 4))
- pci_cacheline = SMP_CACHE_BYTES / 4;
- if (pci_cacheline > TULIP_MAX_CACHE_LINE)
- pci_cacheline = TULIP_MAX_CACHE_LINE;
- switch (pci_cacheline) {
- case 8:
- case 16:
- case 32: break;
- default: pci_cacheline = TULIP_MIN_CACHE_LINE; break;
+ tp->csr0 = 0;
+
+ /* check for sane cache line size. from acenic.c. */
+ pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache);
+ if ((cache << 2) != SMP_CACHE_BYTES) {
+ printk(KERN_WARNING "%s: PCI cache line size set incorrectly "
+ "(%i bytes) by BIOS/FW, correcting to %i\n",
+ pdev->slot_name, (cache << 2), SMP_CACHE_BYTES);
+ pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
+ SMP_CACHE_BYTES >> 2);
+ udelay(5);
}
- pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, pci_cacheline);
- /* read back the result. if zero, or if a buggy chip rev,
- * disable MWI
+ /* read cache line size again, hardware may not have accepted
+ * our cache line size change
*/
- pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &pci_cacheline);
- if (!pci_cacheline || (tp->chip_id == DC21143 && tp->revision == 65))
- mwi = 0;
-
- /* re-clamp cache line values to ones supported by tulip */
- /* From this point forward, 'pci_cacheline' is really
- * the value used for csr0 cache alignment and
- * csr0 programmable burst length
- */
- switch (pci_cacheline) {
- case 0:
- case 8:
- case 16:
- case 32: break;
- default: pci_cacheline = TULIP_MIN_CACHE_LINE; break;
- }
+ pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache);
+ if (!cache)
+ goto out;
+
+ /* if we have any cache line size at all, we can do MRM */
+ csr0 |= MRM;
+
+ /* ...and barring hardware bugs, MWI */
+ if (!(tp->chip_id == DC21143 && tp->revision == 65))
+ csr0 |= MWI;
/* set or disable MWI in the standard PCI command bit.
* Check for the case where mwi is desired but not available
*/
pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
- if (mwi) new_command = pci_command | PCI_COMMAND_INVALIDATE;
+ if (csr0 & MWI) new_command = pci_command | PCI_COMMAND_INVALIDATE;
else new_command = pci_command & ~PCI_COMMAND_INVALIDATE;
if (new_command != pci_command) {
pci_write_config_word(pdev, PCI_COMMAND, new_command);
+ udelay(5);
pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
- if ((new_command != pci_command) && mwi &&
- ((pci_command & PCI_COMMAND_INVALIDATE) == 0))
- mwi = 0;
+ if ((csr0 & MWI) && (!(pci_command & PCI_COMMAND_INVALIDATE)))
+ csr0 &= ~MWI;
}
- tp->csr0 = MRL | MRM;
-
- /* if no PCI cache line size, bail out with minimal
- * burst size and cache alignment of 8 dwords.
- * We always want to have some sort of limit.
+ /* assign per-cacheline-size cache alignment and
+ * burst length values
*/
- if (!pci_cacheline) {
- tp->csr0 |= (8 << BurstLenShift) | (1 << CALShift);
+ switch (cache) {
+ case 8:
+ csr0 |= MRL | (1 << CALShift) | (16 << BurstLenShift);
+ break;
+ case 16:
+ csr0 |= MRL | (2 << CALShift) | (16 << BurstLenShift);
+ break;
+ case 32:
+ csr0 |= MRL | (3 << CALShift) | (32 << BurstLenShift);
+ break;
+ default:
goto out;
}
+
+ tp->csr0 = csr0;
+ goto out;
- /* finally, build the csr0 value */
- if (mwi)
- tp->csr0 |= MWI;
- tp->csr0 |= (pci_cacheline << BurstLenShift);
- switch (pci_cacheline) {
- case 8: tp->csr0 |= (1 << CALShift);
- case 16: tp->csr0 |= (2 << CALShift);
- case 32: tp->csr0 |= (3 << CALShift);
+early_out:
+ if (csr0 & MWI) {
+ pci_command &= ~PCI_COMMAND_INVALIDATE;
+ pci_write_config_word(pdev, PCI_COMMAND, pci_command);
+ csr0 &= ~MWI;
}
+ tp->csr0 = csr0 | (8 << BurstLenShift) | (1 << CALShift);
out:
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: MWI config mwi=%d, cacheline=%d, csr0=%08x\n",
- pdev->slot_name, mwi, pci_cacheline, tp->csr0);
+ printk(KERN_DEBUG "%s: MWI config cacheline=%d, csr0=%08x\n",
+ pdev->slot_name, cache, csr0);
}
+#endif
static int __devinit tulip_init_one (struct pci_dev *pdev,
const struct pci_device_id *ent)
if (pci_request_regions (pdev, "tulip"))
goto err_out_free_netdev;
+#ifndef USE_IO_OPS
+ ioaddr = (unsigned long) ioremap (pci_resource_start (pdev, 1),
+ tulip_tbl[chip_idx].io_size);
+ if (!ioaddr)
+ goto err_out_free_res;
+#endif
+
pci_set_master(pdev);
pci_read_config_byte (pdev, PCI_REVISION_ID, &chip_rev);
sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
&tp->rx_ring_dma);
if (!tp->rx_ring)
- goto err_out_free_res;
+ goto err_out_mtable;
tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE);
tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE;
tp->csr0 = csr0;
spin_lock_init(&tp->lock);
spin_lock_init(&tp->mii_lock);
+ init_timer(&tp->timer);
+ tp->timer.data = (unsigned long)dev;
+ tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
dev->base_addr = ioaddr;
dev->irq = irq;
+#ifdef CONFIG_TULIP_MWI
if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
tulip_mwi_config (pdev, dev);
+#endif
/* Stop the chip's Tx and Rx processes. */
- tulip_stop_rxtx(tp, inl(ioaddr + CSR6));
+ tulip_stop_rxtx(tp);
/* Clear the missed-packet counter. */
inl(ioaddr + CSR8);
dev->set_multicast_list = set_rx_mode;
if (register_netdev(dev))
- goto err_out_mtable;
+ goto err_out_free_ring;
printk(KERN_INFO "%s: %s rev %d at %#3lx,",
dev->name, tulip_tbl[chip_idx].chip_name, chip_rev, ioaddr);
outl(0x00000000, ioaddr + CSR13);
outl(0xFFFFFFFF, ioaddr + CSR14);
outl(0x00000008, ioaddr + CSR15); /* Listen on AUI also. */
- tulip_outl_csr(tp, inl(ioaddr + CSR6) | csr6_fd, CSR6);
+ outl(inl(ioaddr + CSR6) | csr6_fd, ioaddr + CSR6);
outl(0x0000EF01, ioaddr + CSR13);
break;
case DC21040:
case DC21142:
case PNIC2:
if (tp->mii_cnt || tulip_media_cap[dev->if_port] & MediaIsMII) {
- tulip_outl_csr(tp, csr6_mask_defstate, CSR6);
+ outl(csr6_mask_defstate, ioaddr + CSR6);
outl(0x0000, ioaddr + CSR13);
outl(0x0000, ioaddr + CSR14);
- tulip_outl_csr(tp, csr6_mask_hdcap, CSR6);
+ outl(csr6_mask_hdcap, ioaddr + CSR6);
} else
t21142_start_nway(dev);
break;
if ( ! tp->mii_cnt) {
tp->nway = 1;
tp->nwayset = 0;
- tulip_outl_csr(tp, csr6_ttm | csr6_ca, CSR6);
+ outl(csr6_ttm | csr6_ca, ioaddr + CSR6);
outl(0x30, ioaddr + CSR12);
- tulip_outl_csr(tp, 0x0001F078, CSR6);
- tulip_outl_csr(tp, 0x0201F078, CSR6); /* Turn on autonegotiation. */
+ outl(0x0001F078, ioaddr + CSR6);
+ outl(0x0201F078, ioaddr + CSR6); /* Turn on autonegotiation. */
}
break;
case MX98713:
case COMPEX9881:
- tulip_outl_csr(tp, 0x00000000, CSR6);
+ outl(0x00000000, ioaddr + CSR6);
outl(0x000711C0, ioaddr + CSR14); /* Turn on NWay. */
outl(0x00000001, ioaddr + CSR13);
break;
case MX98715:
case MX98725:
- tulip_outl_csr(tp, 0x01a80000, CSR6);
+ outl(0x01a80000, ioaddr + CSR6);
outl(0xFFFFFFFF, ioaddr + CSR14);
outl(0x00001000, ioaddr + CSR12);
break;
return 0;
+err_out_free_ring:
+ pci_free_consistent (pdev,
+ sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
+ sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
+ tp->rx_ring, tp->rx_ring_dma);
+
err_out_mtable:
if (tp->mtable)
kfree (tp->mtable);
+#ifndef USE_IO_OPS
+ iounmap((void *)ioaddr);
+#endif
+
err_out_free_res:
pci_release_regions (pdev);
+
err_out_free_netdev:
kfree (dev);
return -ENODEV;
}
-static void tulip_suspend (struct pci_dev *pdev)
+#ifdef CONFIG_PM
+
+static int tulip_suspend (struct pci_dev *pdev, u32 state)
{
struct net_device *dev = pci_get_drvdata(pdev);
tulip_down (dev);
/* pci_power_off(pdev, -1); */
}
+ return 0;
}
-static void tulip_resume(struct pci_dev *pdev)
+static int tulip_resume(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
tulip_up (dev);
netif_device_attach (dev);
}
+ return 0;
}
+#endif /* CONFIG_PM */
+
static void __devexit tulip_remove_one (struct pci_dev *pdev)
{
unregister_netdev (dev);
if (tp->mtable)
kfree (tp->mtable);
+#ifndef USE_IO_OPS
+ iounmap((void *)dev->base_addr);
+#endif
kfree (dev);
pci_release_regions (pdev);
pci_set_drvdata (pdev, NULL);
id_table: tulip_pci_tbl,
probe: tulip_init_one,
remove: tulip_remove_one,
+#ifdef CONFIG_PM
suspend: tulip_suspend,
resume: tulip_resume,
+#endif /* CONFIG_PM */
};
/* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
/*
- Written 1998-2000 by Donald Becker.
+ Written 1998-2001 by Donald Becker.
This software may be used and distributed according to the terms of
the GNU General Public License (GPL), incorporated herein by reference.
LK1.1.7:
- Manfred Spraul: added reset into tx_timeout
+
+ LK1.1.9:
+ - Urban Widmark: merges from Beckers 1.10 version
+ (media selection + eeprom reload)
+ - David Vrabel: merges from D-Link "1.11" version
+ (disable WOL and PME on startup)
*/
Both 'options[]' and 'full_duplex[]' should exist for driver
interoperability.
The media type is usually passed in 'options[]'.
+ The default is autonegotation for speed and duplex.
+ This should rarely be overridden.
+ Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
+ Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
+ Use option values 0x20 and 0x200 for forcing full duplex operation.
*/
#define MAX_UNITS 8 /* More are supported, limit only on options */
static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
+/* max time out delay time */
+#define W_MAX_TIMEOUT 0x0FFFU
+
#if !defined(__OPTIMIZE__) || !defined(__KERNEL__)
#warning You must compile this file with the correct options!
/* These identify the driver base version and may not be removed. */
static char version[] __devinitdata =
-KERN_INFO "via-rhine.c:v1.08b-LK1.1.8 4/17/2000 Written by Donald Becker\n"
+KERN_INFO "via-rhine.c:v1.10-LK1.1.9 05/31/2001 Written by Donald Becker\n"
KERN_INFO " http://www.scyld.com/network/via-rhine.html\n";
+static char shortname[] __devinitdata = "via-rhine";
/* This driver was written to use PCI memory space, however most versions
MODULE_PARM(rx_copybreak, "i");
MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
-
+MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
+MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
+MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(options, "VIA Rhine: Bits 0-3: media type, bit 17: full duplex");
+MODULE_PARM_DESC(full_duplex, "VIA Rhine full duplex setting(s) (1)");
/*
Theory of Operation
Preliminary VT86C100A manual from http://www.via.com.tw/
http://www.scyld.com/expert/100mbps.html
http://www.scyld.com/expert/NWay.html
+ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
+ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
+
IVc. Errata
*/
-
/* This table drives the PCI probe routines. It's mostly boilerplate in all
of the drivers, and will likely be provided by some future kernel.
Note the matching code -- the first table entry matchs all 56** cards but
StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
IntrStatus=0x0C, IntrEnable=0x0E,
MulticastFilter0=0x10, MulticastFilter1=0x14,
- RxRingPtr=0x18, TxRingPtr=0x1C,
+ RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
- MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72,
+ MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
Config=0x78, ConfigA=0x7A, RxMissed=0x7C, RxCRCErrs=0x7E,
StickyHW=0x83, WOLcrClr=0xA4, WOLcgClr=0xA7, PwrcsrClr=0xAC,
};
static int via_rhine_close(struct net_device *dev);
static inline void clear_tally_counters(long ioaddr);
-static void wait_for_reset(struct net_device *dev)
+static void wait_for_reset(struct net_device *dev, char *name)
{
+ struct netdev_private *np = dev->priv;
long ioaddr = dev->base_addr;
+ int chip_id = np->chip_id;
int i;
+ /* 3043 may need long delay after reset (dlink) */
+ if (chip_id == VT3043 || chip_id == VT86C100A)
+ udelay(100);
+
i = 0;
do {
udelay(5);
i++;
if(i > 2000) {
- printk(KERN_ERR "%s: reset did not complete in 10 ms.\n",
- dev->name);
+ printk(KERN_ERR "%s: reset did not complete in 10 ms.\n", name);
break;
}
} while(readw(ioaddr + ChipCmd) & CmdReset);
if (debug > 1)
printk(KERN_INFO "%s: reset finished after %d microseconds.\n",
- dev->name, 5*i);
+ name, 5*i);
}
static int __devinit via_rhine_init_one (struct pci_dev *pdev,
dev = alloc_etherdev(sizeof(*np));
if (dev == NULL) {
- printk (KERN_ERR "init_ethernet failed for card #%d\n",
- card_idx);
+ printk (KERN_ERR "init_ethernet failed for card #%d\n", card_idx);
goto err_out;
}
SET_MODULE_OWNER(dev);
- if (pci_request_regions(pdev, "via-rhine"))
+ if (pci_request_regions(pdev, shortname))
goto err_out_free_netdev;
#ifndef USE_IO
}
#endif
- /* Ideally we would read the EEPROM but access may be locked. */
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = readb(ioaddr + StationAddr + i);
+ /* D-Link provided reset code (with comment additions) */
+ if (via_rhine_chip_info[chip_id].drv_flags & HasWOL) {
+ unsigned char byOrgValue;
+
+ /* clear sticky bit before reset & read ethernet address */
+ byOrgValue = readb(ioaddr + StickyHW);
+ byOrgValue = byOrgValue & 0xFC;
+ writeb(byOrgValue, ioaddr + StickyHW);
+
+ /* (bits written are cleared?) */
+ /* disable force PME-enable */
+ writeb(0x80, ioaddr + WOLcgClr);
+ /* disable power-event config bit */
+ writeb(0xFF, ioaddr + WOLcrClr);
+ /* clear power status (undocumented in vt6102 docs?) */
+ writeb(0xFF, ioaddr + PwrcsrClr);
+ }
/* Reset the chip to erase previous misconfiguration. */
writew(CmdReset, ioaddr + ChipCmd);
- wait_for_reset(dev);
+ wait_for_reset(dev, shortname);
+
+ /* Reload the station address from the EEPROM. */
+ writeb(0x20, ioaddr + MACRegEEcsr);
+ /* Typically 2 cycles to reload. */
+ for (i = 0; i < 150; i++)
+ if (! (readb(ioaddr + MACRegEEcsr) & 0x20))
+ break;
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = readb(ioaddr + StationAddr + i);
+
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ printk(KERN_ERR "Invalid MAC address for card #%d\n", card_idx);
+ goto err_out_unmap;
+ }
+
+ if (chip_id == VT6102) {
+ /*
+ * for 3065D, EEPROM reloaded will cause bit 0 in MAC_REG_CFGA
+ * turned on. it makes MAC receive magic packet
+ * automatically. So, we turn it off. (D-Link)
+ */
+ writeb(readb(ioaddr + ConfigA) & 0xFE, ioaddr + ConfigA);
+ }
dev->base_addr = ioaddr;
dev->irq = pdev->irq;
if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
np->full_duplex = 1;
- if (np->full_duplex)
+ if (np->full_duplex) {
+ printk(KERN_INFO "%s: Set to forced full duplex, autonegotiation"
+ " disabled.\n", dev->name);
np->duplex_lock = 1;
+ }
/* The chip-specific entries in the device structure. */
dev->open = via_rhine_open;
np->mii_cnt = phy_idx;
}
+ /* Allow forcing the media type. */
+ if (option > 0) {
+ if (option & 0x220)
+ np->full_duplex = 1;
+ np->default_port = option & 0x3ff;
+ if (np->default_port & 0x330) {
+ /* FIXME: shouldn't someone check this variable? */
+ /* np->medialock = 1; */
+ printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
+ (option & 0x300 ? 100 : 10),
+ (option & 0x220 ? "full" : "half"));
+ if (np->mii_cnt)
+ mdio_write(dev, np->phys[0], 0,
+ ((option & 0x300) ? 0x2000 : 0) | /* 100mbps? */
+ ((option & 0x220) ? 0x0100 : 0)); /* Full duplex? */
+ }
+ }
+
return 0;
err_out_unmap:
return i;
alloc_rbufs(dev);
alloc_tbufs(dev);
- wait_for_reset(dev);
+ wait_for_reset(dev, dev->name);
init_registers(dev);
if (debug > 2)
printk(KERN_DEBUG "%s: Done via_rhine_open(), status %4.4x "
alloc_rbufs(dev);
/* Reinitialize the hardware. */
- wait_for_reset(dev);
+ wait_for_reset(dev, dev->name);
init_registers(dev);
spin_unlock(&np->lock);
memset(mc_filter, 0, sizeof(mc_filter));
for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
i++, mclist = mclist->next) {
- set_bit(ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26, mc_filter);
+ int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+
+ mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31));
}
writel(mc_filter[0], ioaddr + MulticastFilter0);
writel(mc_filter[1], ioaddr + MulticastFilter1);
dev->name, readw(ioaddr + ChipCmd));
/* Switch to loopback mode to avoid hardware races. */
- writeb(np->tx_thresh | 0x01, ioaddr + TxConfig);
+ writeb(np->tx_thresh | 0x02, ioaddr + TxConfig);
/* Disable interrupts by clearing the interrupt mask. */
writew(0x0000, ioaddr + IntrEnable);
MODULE_PARM(irq, "1-" __MODULE_STRING(MAX_WD_CARDS) "i");
MODULE_PARM(mem, "1-" __MODULE_STRING(MAX_WD_CARDS) "i");
MODULE_PARM(mem_end, "1-" __MODULE_STRING(MAX_WD_CARDS) "i");
+MODULE_PARM_DESC(io, "WD80x3 I/O base address(es)");
+MODULE_PARM_DESC(irq, "WD80x3 IRQ number(s) (ignored for PureData boards)");
+MODULE_PARM_DESC(mem, "WD80x3 memory base address(es)(ignored for PureData boards)");
+MODULE_PARM_DESC(mem_end, "WD80x3 memory end address(es)");
/* This is set up so that only a single autoprobe takes place per call.
ISA device autoprobes on a running machine are not recommended. */
-/* winbond-840.c: A Linux PCI network adapter skeleton device driver. */
+/* winbond-840.c: A Linux PCI network adapter device driver. */
/*
Written 1998-2001 by Donald Becker.
descriptors. Remove cpu_to_le32, enable BE descriptors.
*/
+#define DRV_NAME "winbond-840"
+#define DRV_VERSION "1.01"
+#define DRV_RELDATE "5/15/2000"
+
+
/* Automatically extracted configuration info:
probe-func: winbond840_probe
config-in: tristate 'Winbond W89c840 Ethernet support' CONFIG_WINBOND_840
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <asm/uaccess.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <asm/bitops.h>
#include <asm/io.h>
/* These identify the driver base version and may not be removed. */
static char version[] __devinitdata =
-KERN_INFO "winbond-840.c:v1.01 (2.4 port) 5/15/2000 Donald Becker <becker@scyld.com>\n"
+KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) " DRV_RELDATE " Donald Becker <becker@scyld.com>\n"
KERN_INFO " http://www.scyld.com/network/drivers.html\n";
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_PARM(multicast_filter_limit, "i");
MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_PARM_DESC(max_interrupt_work, "winbond-840 maximum events handled per interrupt");
+MODULE_PARM_DESC(debug, "winbond-840 debug level (0-6)");
+MODULE_PARM_DESC(rx_copybreak, "winbond-840 copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(multicast_filter_limit, "winbond-840 maximum number of filtered multicast addresses");
+MODULE_PARM_DESC(options, "winbond-840: Bits 0-3: media type, bit 17: full duplex");
+MODULE_PARM_DESC(full_duplex, "winbond-840 full duplex setting(s) (1)");
/*
Theory of Operation
static inline unsigned ether_crc(int length, unsigned char *data);
static void set_rx_mode(struct net_device *dev);
static struct net_device_stats *get_stats(struct net_device *dev);
-static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static int netdev_close(struct net_device *dev);
\f
return -ENOMEM;
SET_MODULE_OWNER(dev);
- if (pci_request_regions(pdev, "winbond-840"))
+ if (pci_request_regions(pdev, DRV_NAME))
goto err_out_netdev;
#ifdef USE_IO_OPS
dev->stop = &netdev_close;
dev->get_stats = &get_stats;
dev->set_multicast_list = &set_rx_mode;
- dev->do_ioctl = &mii_ioctl;
+ dev->do_ioctl = &netdev_ioctl;
dev->tx_timeout = &tx_timeout;
dev->watchdog_timeo = TX_TIMEOUT;
writel(np->csr6, ioaddr + NetworkConfig);
}
-static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
+{
+ struct netdev_private *np = dev->priv;
+ u32 ethcmd;
+
+ if (copy_from_user(ðcmd, useraddr, sizeof(ethcmd)))
+ return -EFAULT;
+
+ switch (ethcmd) {
+ case ETHTOOL_GDRVINFO: {
+ struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
+ strcpy(info.driver, DRV_NAME);
+ strcpy(info.version, DRV_VERSION);
+ strcpy(info.bus_info, np->pci_dev->slot_name);
+ if (copy_to_user(useraddr, &info, sizeof(info)))
+ return -EFAULT;
+ return 0;
+ }
+
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
u16 *data = (u16 *)&rq->ifr_data;
switch(cmd) {
+ case SIOCETHTOOL:
+ return netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
data[0] = ((struct netdev_private *)dev->priv)->phys[0] & 0x1f;
/* Fall Through */
}
static struct pci_driver w840_driver = {
- name: "winbond-840",
+ name: DRV_NAME,
id_table: w840_pci_tbl,
probe: w840_probe1,
remove: w840_remove1,
# Wireless LAN device configuration
#
-###tristate ' Hermes chipset support' CONFIG_NET_ORINOCO
-###dep_tristate ' PCMCIA Hermes support (Orinoco/WavelanIEEE/PrismII/Symbol 802.11b cards)' CONFIG_PCMCIA_HERMES $CONFIG_NET_ORINOCO $CONFIG_PCMCIA
+if [ "$CONFIG_ISA" = "y" -o "$CONFIG_PCI" = "y" ]; then
+ tristate ' Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards' CONFIG_AIRO
+fi
if [ "$CONFIG_ALL_PPC" = "y" ]; then
tristate ' Apple Airport support (built-in)' CONFIG_APPLE_AIRPORT
fi
# If Pcmcia is compiled in, offer Pcmcia cards...
-if [ "$CONFIG_HOTPLUG" = "y" -a "$CONFIG_PCMCIA" != "n" ]; then
+if [ "$CONFIG_PCMCIA" != "n" ]; then
comment 'Wireless Pcmcia cards support'
- dep_tristate ' Hermes support (Orinoco/WavelanIEEE/PrismII/Symbol 802.11b cards)' CONFIG_PCMCIA_HERMES $CONFIG_PCMCIA
+ tristate ' Hermes support (Orinoco/WavelanIEEE/PrismII/Symbol 802.11b cards)' CONFIG_PCMCIA_HERMES
+ tristate ' Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards' CONFIG_AIRO_CS
+fi
-# If one of the Pcmcia cards above is enabled, activate Pcmcia network support
- if [ "$CONFIG_PCMCIA_HERMES" = "y" ]; then
- define_bool CONFIG_PCMCIA_NETCARD y
- fi
+# yes, this works even when no drivers are selected
+if [ "$CONFIG_ISA" = "y" -o "$CONFIG_PCI" = "y" -o \
+ "$CONFIG_ALL_PPC" = "y" -o "$CONFIG_PCMCIA" != "n" ]; then
+ define_bool CONFIG_NET_WIRELESS y
+else
+ define_bool CONFIG_NET_WIRELESS n
fi
# Makefile for the Linux Wireless network device drivers.
#
-O_TARGET := orinoco_drvs.o
+O_TARGET := wireless_net.o
obj-y :=
obj-m :=
obj- :=
# Things that need to export symbols
-export-objs := orinoco.o hermes.o
+export-objs := airo.o orinoco.o hermes.o
-# ISA Bus cards
-
-# PCI bus cards
-
-# Other cards
+obj-$(CONFIG_PCMCIA_HERMES) += orinoco_cs.o orinoco.o hermes.o
obj-$(CONFIG_APPLE_AIRPORT) += airport.o orinoco.o hermes.o
-# 16-bit Pcmcia wireless client drivers
-obj-$(CONFIG_PCMCIA_HERMES) += orinoco_cs.o orinoco.o hermes.o
+obj-$(CONFIG_AIRO) += airo.o
+obj-$(CONFIG_AIRO_CS) += airo_cs.o airo.o
include $(TOPDIR)/Rules.make
--- /dev/null
+/*======================================================================
+
+ Aironet driver for 4500 and 4800 series cards
+
+ This code is released under both the GPL version 2 and BSD licenses.
+ Either license may be used. The respective licenses are found at
+ the end of this file.
+
+ This code was developed by Benjamin Reed <breed@users.sourceforge.net>
+ including portions of which come from the Aironet PC4500
+ Developer's Reference Manual and used with permission. Copyright
+ (C) 1999 Benjamin Reed. All Rights Reserved. Permission to use
+ code in the Developer's manual was granted for this driver by
+ Aironet. Major code contributions were received from Javier Achirica
+ and Jean Tourrilhes <jt@hpl.hp.com>. Code was also integrated from
+ the Cisco Aironet driver for Linux.
+
+======================================================================*/
+
+#include <linux/config.h>
+#include <linux/version.h>
+#include <asm/segment.h>
+#include <linux/init.h>
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/malloc.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/ioport.h>
+#include <linux/config.h>
+#include <linux/pci.h>
+
+#ifdef CONFIG_PCI
+static struct pci_device_id card_ids[] = __devinitdata {
+ { 0x14b9, 1, PCI_ANY_ID, PCI_ANY_ID, },
+ { 0x14b9, 0x4500, PCI_ANY_ID, PCI_ANY_ID },
+ { 0x14b9, 0x4800, PCI_ANY_ID, PCI_ANY_ID, },
+ { 0x14b9, 0x0340, PCI_ANY_ID, PCI_ANY_ID, },
+ { 0x14b9, 0x0350, PCI_ANY_ID, PCI_ANY_ID, },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, card_ids);
+
+static int airo_pci_probe(struct pci_dev *, const struct pci_device_id *);
+static void airo_pci_remove(struct pci_dev *);
+
+static struct pci_driver airo_driver = {
+ name: "airo",
+ id_table: card_ids,
+ probe: airo_pci_probe,
+ remove: airo_pci_remove,
+};
+#endif /* CONFIG_PCI */
+
+/* Include Wireless Extension definition and check version - Jean II */
+#include <linux/wireless.h>
+#if WIRELESS_EXT < 9
+#warning "Wireless extension v9 or newer required - please upgrade your kernel"
+#undef WIRELESS_EXT
+#endif
+#define WIRELESS_SPY // enable iwspy support
+#define CISCO_EXT // enable Cisco extensions
+
+#ifdef CISCO_EXT
+#include <linux/delay.h>
+#endif
+
+/* As you can see this list is HUGH!
+ I really don't know what a lot of these counts are about, but they
+ are all here for completeness. If the IGNLABEL macro is put in
+ infront of the label, that statistic will not be included in the list
+ of statistics in the /proc filesystem */
+
+#define IGNLABEL 0&(int)
+static char *statsLabels[] = {
+ "RxOverrun",
+ IGNLABEL "RxPlcpCrcErr",
+ IGNLABEL "RxPlcpFormatErr",
+ IGNLABEL "RxPlcpLengthErr",
+ "RxMacCrcErr",
+ "RxMacCrcOk",
+ "RxWepErr",
+ "RxWepOk",
+ "RetryLong",
+ "RetryShort",
+ "MaxRetries",
+ "NoAck",
+ "NoCts",
+ "RxAck",
+ "RxCts",
+ "TxAck",
+ "TxRts",
+ "TxCts",
+ "TxMc",
+ "TxBc",
+ "TxUcFrags",
+ "TxUcPackets",
+ "TxBeacon",
+ "RxBeacon",
+ "TxSinColl",
+ "TxMulColl",
+ "DefersNo",
+ "DefersProt",
+ "DefersEngy",
+ "DupFram",
+ "RxFragDisc",
+ "TxAged",
+ "RxAged",
+ "LostSync-MaxRetry",
+ "LostSync-MissedBeacons",
+ "LostSync-ArlExceeded",
+ "LostSync-Deauth",
+ "LostSync-Disassoced",
+ "LostSync-TsfTiming",
+ "HostTxMc",
+ "HostTxBc",
+ "HostTxUc",
+ "HostTxFail",
+ "HostRxMc",
+ "HostRxBc",
+ "HostRxUc",
+ "HostRxDiscard",
+ IGNLABEL "HmacTxMc",
+ IGNLABEL "HmacTxBc",
+ IGNLABEL "HmacTxUc",
+ IGNLABEL "HmacTxFail",
+ IGNLABEL "HmacRxMc",
+ IGNLABEL "HmacRxBc",
+ IGNLABEL "HmacRxUc",
+ IGNLABEL "HmacRxDiscard",
+ IGNLABEL "HmacRxAccepted",
+ "SsidMismatch",
+ "ApMismatch",
+ "RatesMismatch",
+ "AuthReject",
+ "AuthTimeout",
+ "AssocReject",
+ "AssocTimeout",
+ IGNLABEL "ReasonOutsideTable",
+ IGNLABEL "ReasonStatus1",
+ IGNLABEL "ReasonStatus2",
+ IGNLABEL "ReasonStatus3",
+ IGNLABEL "ReasonStatus4",
+ IGNLABEL "ReasonStatus5",
+ IGNLABEL "ReasonStatus6",
+ IGNLABEL "ReasonStatus7",
+ IGNLABEL "ReasonStatus8",
+ IGNLABEL "ReasonStatus9",
+ IGNLABEL "ReasonStatus10",
+ IGNLABEL "ReasonStatus11",
+ IGNLABEL "ReasonStatus12",
+ IGNLABEL "ReasonStatus13",
+ IGNLABEL "ReasonStatus14",
+ IGNLABEL "ReasonStatus15",
+ IGNLABEL "ReasonStatus16",
+ IGNLABEL "ReasonStatus17",
+ IGNLABEL "ReasonStatus18",
+ IGNLABEL "ReasonStatus19",
+ "RxMan",
+ "TxMan",
+ "RxRefresh",
+ "TxRefresh",
+ "RxPoll",
+ "TxPoll",
+ "HostRetries",
+ "LostSync-HostReq",
+ "HostTxBytes",
+ "HostRxBytes",
+ "ElapsedUsec",
+ "ElapsedSec",
+ "LostSyncBetterAP",
+ "PrivacyMismatch",
+ "Jammed",
+ "DiscRxNotWepped",
+ "PhyEleMismatch",
+ (char*)-1 };
+#ifndef RUN_AT
+#define RUN_AT(x) (jiffies+(x))
+#endif
+
+
+/* These variables are for insmod, since it seems that the rates
+ can only be set in setup_card. Rates should be a comma separated
+ (no spaces) list of rates (up to 8). */
+
+static int rates[8];
+static int basic_rate;
+static char *ssids[3];
+
+static int io[4];
+static int irq[4];
+
+static
+int maxencrypt /* = 0 */; /* The highest rate that the card can encrypt at.
+ 0 means no limit. For old cards this was 4 */
+
+static int auto_wep /* = 0 */; /* If set, it tries to figure out the wep mode */
+static int aux_bap /* = 0 */; /* Checks to see if the aux ports are needed to read
+ the bap, needed on some older cards and buses. */
+static int adhoc;
+
+static int proc_uid /* = 0 */;
+
+static int proc_gid /* = 0 */;
+
+static int airo_perm = 0555;
+
+static int proc_perm = 0644;
+
+MODULE_AUTHOR("Benjamin Reed");
+MODULE_DESCRIPTION("Support for Cisco/Aironet 802.11 wireless ethernet \
+ cards. Direct support for ISA/PCI cards and support \
+ for PCMCIA when used with airo_cs.");
+MODULE_SUPPORTED_DEVICE("Aironet 4500, 4800 and Cisco 340");
+MODULE_PARM(io,"1-4i");
+MODULE_PARM(irq,"1-4i");
+MODULE_PARM(basic_rate,"i");
+MODULE_PARM(rates,"1-8i");
+MODULE_PARM(ssids,"1-3s");
+MODULE_PARM(auto_wep,"i");
+MODULE_PARM_DESC(auto_wep, "If non-zero, the driver will keep looping through \
+the authentication options until an association is made. The value of \
+auto_wep is number of the wep keys to check. A value of 2 will try using \
+the key at index 0 and index 1.");
+MODULE_PARM(aux_bap,"i");
+MODULE_PARM_DESC(aux_bap, "If non-zero, the driver will switch into a mode \
+than seems to work better for older cards with some older buses. Before \
+switching it checks that the switch is needed.");
+MODULE_PARM(maxencrypt, "i");
+MODULE_PARM_DESC(maxencrypt, "The maximum speed that the card can do \
+encryption. Units are in 512kbs. Zero (default) means there is no limit. \
+Older cards used to be limited to 2mbs (4).");
+MODULE_PARM(adhoc, "i");
+MODULE_PARM_DESC(adhoc, "If non-zero, the card will start in adhoc mode.");
+
+MODULE_PARM(proc_uid, "i");
+MODULE_PARM_DESC(proc_uid, "The uid that the /proc files will belong to.");
+MODULE_PARM(proc_gid, "i");
+MODULE_PARM_DESC(proc_gid, "The gid that the /proc files will belong to.");
+MODULE_PARM(airo_perm, "i");
+MODULE_PARM_DESC(airo_perm, "The permission bits of /proc/[driver/]aironet.");
+MODULE_PARM(proc_perm, "i");
+MODULE_PARM_DESC(proc_perm, "The permission bits of the files in /proc");
+
+#include <asm/uaccess.h>
+
+#define min(x,y) ((x<y)?x:y)
+
+/* This is a kind of sloppy hack to get this information to OUT4500 and
+ IN4500. I would be extremely interested in the situation where this
+ doesnt work though!!! */
+static int do8bitIO = 0;
+
+/* Return codes */
+#define SUCCESS 0
+#define ERROR -1
+#define NO_PACKET -2
+
+/* Commands */
+#define NOP 0x0010
+#define MAC_ENABLE 0x0001
+#define MAC_DISABLE 0x0002
+#define CMD_ACCESS 0x0021
+#define CMD_ALLOCATETX 0x000a
+#define CMD_TRANSMIT 0x000b
+#define HOSTSLEEP 0x85
+#define CMD_SETMODE 0x0009
+#define CMD_ENABLEAUX 0x0111
+#define CMD_SOFTRESET 0x0004
+
+/* Registers */
+#define COMMAND 0x00
+#define PARAM0 0x02
+#define PARAM1 0x04
+#define PARAM2 0x06
+#define STATUS 0x08
+#define RESP0 0x0a
+#define RESP1 0x0c
+#define RESP2 0x0e
+#define LINKSTAT 0x10
+#define SELECT0 0x18
+#define OFFSET0 0x1c
+#define RXFID 0x20
+#define TXALLOCFID 0x22
+#define TXCOMPLFID 0x24
+#define DATA0 0x36
+#define EVSTAT 0x30
+#define EVINTEN 0x32
+#define EVACK 0x34
+#define SWS0 0x28
+#define SWS1 0x2a
+#define SWS2 0x2c
+#define SWS3 0x2e
+#define AUXPAGE 0x3A
+#define AUXOFF 0x3C
+#define AUXDATA 0x3E
+
+/* BAP selectors */
+#define BAP0 0 // Used for receiving packets
+#define BAP1 2 // Used for xmiting packets and working with RIDS
+
+/* Flags */
+#define COMMAND_BUSY 0x8000
+
+#define BAP_BUSY 0x8000
+#define BAP_ERR 0x4000
+#define BAP_DONE 0x2000
+
+#define PROMISC 0xffff
+#define NOPROMISC 0x0000
+
+#define EV_CMD 0x10
+#define EV_CLEARCOMMANDBUSY 0x4000
+#define EV_RX 0x01
+#define EV_TX 0x02
+#define EV_TXEXC 0x04
+#define EV_ALLOC 0x08
+#define EV_LINK 0x80
+#define EV_AWAKE 0x100
+#define EV_UNKNOWN 0x800
+#define STATUS_INTS ( EV_AWAKE | EV_LINK | EV_TXEXC | EV_TX | EV_RX | EV_UNKNOWN)
+
+/* The RIDs */
+#define RID_CAPABILITIES 0xFF00
+#define RID_CONFIG 0xFF10
+#define RID_SSID 0xFF11
+#define RID_APLIST 0xFF12
+#define RID_DRVNAME 0xFF13
+#define RID_ETHERENCAP 0xFF14
+#define RID_WEP_TEMP 0xFF15
+#define RID_WEP_PERM 0xFF16
+#define RID_MODULATION 0xFF17
+#define RID_ACTUALCONFIG 0xFF20 /*readonly*/
+#define RID_LEAPUSERNAME 0xFF23
+#define RID_LEAPPASSWORD 0xFF24
+#define RID_STATUS 0xFF50
+#define RID_STATS 0xFF68
+#define RID_STATSDELTA 0xFF69
+#define RID_STATSDELTACLEAR 0xFF6A
+
+/*
+ * Rids and endian-ness: The Rids will always be in cpu endian, since
+ * this all the patches from the big-endian guys end up doing that.
+ * so all rid access should use the read/writeXXXRid routines.
+ */
+
+/* This structure came from an email sent to me from an engineer at
+ aironet for inclusion into this driver */
+typedef struct {
+ u16 len;
+ u16 kindex;
+ u8 mac[6];
+ u16 klen;
+ u8 key[16];
+} WepKeyRid;
+
+/* These structures are from the Aironet's PC4500 Developers Manual */
+typedef struct {
+ u16 len;
+ u8 ssid[32];
+} Ssid;
+
+typedef struct {
+ u16 len;
+ Ssid ssids[3];
+} SsidRid;
+
+typedef struct {
+ u16 len;
+ u16 modulation;
+#define MOD_DEFAULT 0
+#define MOD_CCK 1
+#define MOD_MOK 2
+} ModulationRid;
+
+typedef struct {
+ u16 cmd;
+ u16 parm0;
+ u16 parm1;
+ u16 parm2;
+} Cmd;
+
+typedef struct {
+ u16 status;
+ u16 rsp0;
+ u16 rsp1;
+ u16 rsp2;
+} Resp;
+
+typedef struct {
+ u16 len; /* sizeof(ConfigRid) */
+ u16 opmode; /* operating mode */
+#define MODE_STA_IBSS 0
+#define MODE_STA_ESS 1
+#define MODE_AP 2
+#define MODE_AP_RPTR 3
+#define MODE_ETHERNET_HOST (0<<8) /* rx payloads converted */
+#define MODE_LLC_HOST (1<<8) /* rx payloads left as is */
+#define MODE_AIRONET_EXTEND (1<<9) /* enable Aironet extenstions */
+#define MODE_AP_INTERFACE (1<<10) /* enable ap interface extensions */
+#define MODE_ANTENNA_ALIGN (1<<11) /* enable antenna alignment */
+#define MODE_ETHER_LLC (1<<12) /* enable ethernet LLC */
+#define MODE_LEAF_NODE (1<<13) /* enable leaf node bridge */
+#define MODE_CF_POLLABLE (1<<14) /* enable CF pollable */
+ u16 rmode; /* receive mode */
+#define RXMODE_BC_MC_ADDR 0
+#define RXMODE_BC_ADDR 1 /* ignore multicasts */
+#define RXMODE_ADDR 2 /* ignore multicast and broadcast */
+#define RXMODE_RFMON 3 /* wireless monitor mode */
+#define RXMODE_RFMON_ANYBSS 4
+#define RXMODE_LANMON 5 /* lan style monitor -- data packets only */
+#define RXMODE_DISABLE_802_3_HEADER (1<<8) /* disables 802.3 header on rx */
+#define RXMODE_NORMALIZED_RSSI (1<<9) /* return normalized RSSI */
+ u16 fragThresh;
+ u16 rtsThres;
+ u8 macAddr[6];
+ u8 rates[8];
+ u16 shortRetryLimit;
+ u16 longRetryLimit;
+ u16 txLifetime; /* in kusec */
+ u16 rxLifetime; /* in kusec */
+ u16 stationary;
+ u16 ordering;
+ u16 u16deviceType; /* for overriding device type */
+ u16 cfpRate;
+ u16 cfpDuration;
+ u16 _reserved1[3];
+ /*---------- Scanning/Associating ----------*/
+ u16 scanMode;
+#define SCANMODE_ACTIVE 0
+#define SCANMODE_PASSIVE 1
+#define SCANMODE_AIROSCAN 2
+ u16 probeDelay; /* in kusec */
+ u16 probeEnergyTimeout; /* in kusec */
+ u16 probeResponseTimeout;
+ u16 beaconListenTimeout;
+ u16 joinNetTimeout;
+ u16 authTimeout;
+ u16 authType;
+#define AUTH_OPEN 0x1
+#define AUTH_ENCRYPT 0x101
+#define AUTH_SHAREDKEY 0x102
+#define AUTH_ALLOW_UNENCRYPTED 0x200
+ u16 associationTimeout;
+ u16 specifiedApTimeout;
+ u16 offlineScanInterval;
+ u16 offlineScanDuration;
+ u16 linkLossDelay;
+ u16 maxBeaconLostTime;
+ u16 refreshInterval;
+#define DISABLE_REFRESH 0xFFFF
+ u16 _reserved1a[1];
+ /*---------- Power save operation ----------*/
+ u16 powerSaveMode;
+#define POWERSAVE_CAM 0
+#define POWERSAVE_PSP 1
+#define POWERSAVE_PSPCAM 2
+ u16 sleepForDtims;
+ u16 listenInterval;
+ u16 fastListenInterval;
+ u16 listenDecay;
+ u16 fastListenDelay;
+ u16 _reserved2[2];
+ /*---------- Ap/Ibss config items ----------*/
+ u16 beaconPeriod;
+ u16 atimDuration;
+ u16 hopPeriod;
+ u16 channelSet;
+ u16 channel;
+ u16 dtimPeriod;
+ u16 bridgeDistance;
+ u16 radioID;
+ /*---------- Radio configuration ----------*/
+ u16 radioType;
+#define RADIOTYPE_DEFAULT 0
+#define RADIOTYPE_802_11 1
+#define RADIOTYPE_LEGACY 2
+ u8 rxDiversity;
+ u8 txDiversity;
+ u16 txPower;
+#define TXPOWER_DEFAULT 0
+ u16 rssiThreshold;
+#define RSSI_DEFAULT 0
+ u16 modulation;
+ u16 shortPreamble;
+ u16 homeProduct;
+ u16 radioSpecific;
+ /*---------- Aironet Extensions ----------*/
+ u8 nodeName[16];
+ u16 arlThreshold;
+ u16 arlDecay;
+ u16 arlDelay;
+ u16 _reserved4[1];
+ /*---------- Aironet Extensions ----------*/
+ u16 magicAction;
+#define MAGIC_ACTION_STSCHG 1
+#define MACIC_ACTION_RESUME 2
+#define MAGIC_IGNORE_MCAST (1<<8)
+#define MAGIC_IGNORE_BCAST (1<<9)
+#define MAGIC_SWITCH_TO_PSP (0<<10)
+#define MAGIC_STAY_IN_CAM (1<<10)
+ u16 magicControl;
+ u16 autoWake;
+} ConfigRid;
+
+typedef struct {
+ u16 len;
+ u8 mac[6];
+ u16 mode;
+ u16 errorCode;
+ u16 sigQuality;
+ u16 SSIDlen;
+ char SSID[32];
+ char apName[16];
+ char bssid[4][6];
+ u16 beaconPeriod;
+ u16 dimPeriod;
+ u16 atimDuration;
+ u16 hopPeriod;
+ u16 channelSet;
+ u16 channel;
+ u16 hopsToBackbone;
+ u16 apTotalLoad;
+ u16 generatedLoad;
+ u16 accumulatedArl;
+ u16 signalQuality;
+ u16 currentXmitRate;
+ u16 apDevExtensions;
+ u16 normalizedSignalStrength;
+ u16 _reserved[10];
+} StatusRid;
+
+typedef struct {
+ u16 len;
+ u16 spacer;
+ u32 vals[100];
+} StatsRid;
+
+
+typedef struct {
+ u16 len;
+ u8 ap[4][6];
+} APListRid;
+
+typedef struct {
+ u16 len;
+ char oui[3];
+ u16 prodNum;
+ char manName[32];
+ char prodName[16];
+ char prodVer[8];
+ char factoryAddr[6];
+ char aironetAddr[6];
+ u16 radioType;
+ u16 country;
+ char callid[6];
+ char supportedRates[8];
+ char rxDiversity;
+ char txDiversity;
+ u16 txPowerLevels[8];
+ u16 hardVer;
+ u16 hardCap;
+ u16 tempRange;
+ u16 softVer;
+ u16 softSubVer;
+ u16 interfaceVer;
+ u16 softCap;
+ u16 bootBlockVer;
+ u16 requiredHard;
+} CapabilityRid;
+
+#define TXCTL_TXOK (1<<1) /* report if tx is ok */
+#define TXCTL_TXEX (1<<2) /* report if tx fails */
+#define TXCTL_802_3 (0<<3) /* 802.3 packet */
+#define TXCTL_802_11 (1<<3) /* 802.11 mac packet */
+#define TXCTL_ETHERNET (0<<4) /* payload has ethertype */
+#define TXCTL_LLC (1<<4) /* payload is llc */
+#define TXCTL_RELEASE (0<<5) /* release after completion */
+#define TXCTL_NORELEASE (1<<5) /* on completion returns to host */
+
+#define BUSY_FID 0x10000
+
+#ifdef CISCO_EXT
+#define AIROMAGIC 0xa55a
+#define AIROIOCTL SIOCDEVPRIVATE
+#define AIROIDIFC AIROIOCTL + 1
+
+/* Ioctl constants to be used in airo_ioctl.command */
+
+#define AIROGCAP 0 // Capability rid
+#define AIROGCFG 1 // USED A LOT
+#define AIROGSLIST 2 // System ID list
+#define AIROGVLIST 3 // List of specified AP's
+#define AIROGDRVNAM 4 // NOTUSED
+#define AIROGEHTENC 5 // NOTUSED
+#define AIROGWEPKTMP 6
+#define AIROGWEPKNV 7
+#define AIROGSTAT 8
+#define AIROGSTATSC32 9
+#define AIROGSTATSD32 10
+
+/* Leave gap of 40 commands after AIROGSTATSD32 for future */
+
+#define AIROPCAP AIROGSTATSD32 + 40
+#define AIROPVLIST AIROPCAP + 1
+#define AIROPSLIST AIROPVLIST + 1
+#define AIROPCFG AIROPSLIST + 1
+#define AIROPSIDS AIROPCFG + 1
+#define AIROPAPLIST AIROPSIDS + 1
+#define AIROPMACON AIROPAPLIST + 1 /* Enable mac */
+#define AIROPMACOFF AIROPMACON + 1 /* Disable mac */
+#define AIROPSTCLR AIROPMACOFF + 1
+#define AIROPWEPKEY AIROPSTCLR + 1
+#define AIROPWEPKEYNV AIROPWEPKEY + 1
+#define AIROPLEAPPWD AIROPWEPKEYNV + 1
+#define AIROPLEAPUSR AIROPLEAPPWD + 1
+
+/* Flash codes */
+
+#define AIROFLSHRST AIROPWEPKEYNV + 40
+#define AIROFLSHGCHR AIROFLSHRST + 1
+#define AIROFLSHSTFL AIROFLSHGCHR + 1
+#define AIROFLSHPCHR AIROFLSHSTFL + 1
+#define AIROFLPUTBUF AIROFLSHPCHR + 1
+#define AIRORESTART AIROFLPUTBUF + 1
+
+#define FLASHSIZE 32768
+
+typedef struct aironet_ioctl {
+ unsigned short command; // What to do
+ unsigned short len; // Len of data
+ unsigned char *data; // d-data
+} aironet_ioctl;
+#endif /* CISCO_EXT */
+
+#ifdef WIRELESS_EXT
+// Frequency list (map channels to frequencies)
+const long frequency_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442,
+ 2447, 2452, 2457, 2462, 2467, 2472, 2484 };
+
+// A few details needed for WEP (Wireless Equivalent Privacy)
+#define MAX_KEY_SIZE 13 // 128 (?) bits
+#define MIN_KEY_SIZE 5 // 40 bits RC4 - WEP
+#define MAX_KEYS 4 // 4 different keys
+typedef struct wep_key_t {
+ u16 len;
+ u8 key[16]; /* 40-bit and 104-bit keys */
+} wep_key_t;
+#endif /* WIRELESS_EXT */
+
+static const char version[] = "airo.c 0.2 (Ben Reed & Javier Achirica)";
+
+struct airo_info;
+
+static int get_dec_u16( char *buffer, int *start, int limit );
+static void OUT4500( struct airo_info *, u16 register, u16 value );
+static unsigned short IN4500( struct airo_info *, u16 register );
+static u16 setup_card(struct airo_info*, u8 *mac, ConfigRid *);
+static void enable_interrupts(struct airo_info*);
+static void disable_interrupts(struct airo_info*);
+static u16 issuecommand(struct airo_info*, Cmd *pCmd, Resp *pRsp);
+static int bap_setup(struct airo_info*, u16 rid, u16 offset, int whichbap);
+static int aux_bap_read(struct airo_info*, u16 *pu16Dst, int bytelen,
+ int whichbap);
+static int fast_bap_read(struct airo_info*, u16 *pu16Dst, int bytelen,
+ int whichbap);
+static int bap_write(struct airo_info*, const u16 *pu16Src, int bytelen,
+ int whichbap);
+static int PC4500_accessrid(struct airo_info*, u16 rid, u16 accmd);
+static int PC4500_readrid(struct airo_info*, u16 rid, void *pBuf, int len);
+static int PC4500_writerid(struct airo_info*, u16 rid, const void
+ *pBuf, int len);
+static int do_writerid( struct airo_info*, u16 rid, const void *rid_data,
+ int len );
+static u16 transmit_allocate(struct airo_info*, int lenPayload);
+static int transmit_802_3_packet(struct airo_info*, u16 TxFid, char
+ *pPacket, int len);
+
+static void airo_interrupt( int irq, void* dev_id, struct pt_regs
+ *regs);
+static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+#ifdef WIRELESS_EXT
+struct iw_statistics *airo_get_wireless_stats (struct net_device *dev);
+#endif /* WIRELESS_EXT */
+#ifdef CISCO_EXT
+static int readrids(struct net_device *dev, aironet_ioctl *comp);
+static int writerids(struct net_device *dev, aironet_ioctl *comp);
+int flashcard(struct net_device *dev, aironet_ioctl *comp);
+#endif /* CISCO_EXT */
+
+struct airo_info {
+ struct net_device_stats stats;
+ int open;
+ struct net_device *dev;
+ /* Note, we can have MAX_FIDS outstanding. FIDs are 16-bits, so we
+ use the high bit to mark wether it is in use. */
+#define MAX_FIDS 6
+ int fids[MAX_FIDS];
+ int registered;
+ ConfigRid config;
+ u16 authtype; // Used with auto_wep
+ char keyindex; // Used with auto wep
+ char defindex; // Used with auto wep
+ struct timer_list timer;
+ struct proc_dir_entry *proc_entry;
+ struct airo_info *next;
+ spinlock_t bap0_lock;
+ spinlock_t bap1_lock;
+ spinlock_t aux_lock;
+ spinlock_t cmd_lock;
+ int flags;
+#define FLAG_PROMISC 0x01
+#define FLAG_RADIO_OFF 0x02
+ int (*bap_read)(struct airo_info*, u16 *pu16Dst, int bytelen,
+ int whichbap);
+ int (*header_parse)(struct sk_buff*, unsigned char *);
+#ifdef WIRELESS_EXT
+ int need_commit; // Need to set config
+ struct iw_statistics wstats; // wireless stats
+#ifdef WIRELESS_SPY
+ int spy_number;
+ u_char spy_address[IW_MAX_SPY][6];
+ struct iw_quality spy_stat[IW_MAX_SPY];
+#endif /* WIRELESS_SPY */
+#endif /* WIRELESS_EXT */
+};
+
+static inline int bap_read(struct airo_info *ai, u16 *pu16Dst, int bytelen,
+ int whichbap) {
+ return ai->bap_read(ai, pu16Dst, bytelen, whichbap);
+}
+
+static int setup_proc_entry( struct net_device *dev,
+ struct airo_info *apriv );
+static int takedown_proc_entry( struct net_device *dev,
+ struct airo_info *apriv );
+
+static int readWepKeyRid(struct airo_info*ai, WepKeyRid *wkr, int temp) {
+ int rc = PC4500_readrid(ai, temp ? RID_WEP_TEMP : RID_WEP_PERM,
+ wkr, sizeof(*wkr));
+
+ wkr->len = le16_to_cpu(wkr->len);
+ wkr->kindex = le16_to_cpu(wkr->kindex);
+ wkr->klen = le16_to_cpu(wkr->klen);
+ return rc;
+}
+/* In the writeXXXRid routines we copy the rids so that we don't screwup
+ * the originals when we endian them... */
+static int writeWepKeyRid(struct airo_info*ai, WepKeyRid *pwkr, int perm) {
+ int rc;
+ WepKeyRid wkr = *pwkr;
+
+ wkr.len = cpu_to_le16(wkr.len);
+ wkr.kindex = cpu_to_le16(wkr.kindex);
+ wkr.klen = cpu_to_le16(wkr.klen);
+ rc = do_writerid(ai, RID_WEP_TEMP, &wkr, sizeof(wkr));
+ if (rc!=SUCCESS) printk(KERN_ERR "airo: WEP_TEMP set %x\n", rc);
+ if (perm) {
+ rc = do_writerid(ai, RID_WEP_PERM, &wkr, sizeof(wkr));
+ if (rc!=SUCCESS) {
+ printk(KERN_ERR "airo: WEP_PERM set %x\n", rc);
+ }
+ }
+ return rc;
+}
+
+static int readSsidRid(struct airo_info*ai, SsidRid *ssidr) {
+ int i;
+ int rc = PC4500_readrid(ai, RID_SSID, ssidr, sizeof(*ssidr));
+
+ ssidr->len = le16_to_cpu(ssidr->len);
+ for(i = 0; i < 3; i++) {
+ ssidr->ssids[i].len = le16_to_cpu(ssidr->ssids[i].len);
+ }
+ return rc;
+}
+static int writeSsidRid(struct airo_info*ai, SsidRid *pssidr) {
+ int rc;
+ int i;
+ SsidRid ssidr = *pssidr;
+
+ ssidr.len = cpu_to_le16(ssidr.len);
+ for(i = 0; i < 3; i++) {
+ ssidr.ssids[i].len = cpu_to_le16(ssidr.ssids[i].len);
+ }
+ rc = do_writerid(ai, RID_SSID, &ssidr, sizeof(ssidr));
+ return rc;
+}
+static int readConfigRid(struct airo_info*ai, ConfigRid *cfgr) {
+ int rc = PC4500_readrid(ai, RID_ACTUALCONFIG, cfgr, sizeof(*cfgr));
+ u16 *s;
+
+ for(s = &cfgr->len; s <= &cfgr->rtsThres; s++) *s = le16_to_cpu(*s);
+
+ for(s = &cfgr->shortRetryLimit; s <= &cfgr->radioType; s++)
+ *s = le16_to_cpu(*s);
+
+ for(s = &cfgr->txPower; s <= &cfgr->radioSpecific; s++)
+ *s = le16_to_cpu(*s);
+
+ for(s = &cfgr->arlThreshold; s <= &cfgr->autoWake; s++)
+ *s = le16_to_cpu(*s);
+
+ return rc;
+}
+static int writeConfigRid(struct airo_info*ai, ConfigRid *pcfgr) {
+ u16 *s;
+ ConfigRid cfgr = *pcfgr;
+
+ for(s = &cfgr.len; s <= &cfgr.rtsThres; s++) *s = cpu_to_le16(*s);
+
+ for(s = &cfgr.shortRetryLimit; s <= &cfgr.radioType; s++)
+ *s = cpu_to_le16(*s);
+
+ for(s = &cfgr.txPower; s <= &cfgr.radioSpecific; s++)
+ *s = cpu_to_le16(*s);
+
+ for(s = &cfgr.arlThreshold; s <= &cfgr.autoWake; s++)
+ *s = cpu_to_le16(*s);
+
+ return do_writerid( ai, RID_CONFIG, &cfgr, sizeof(cfgr));
+}
+static int readStatusRid(struct airo_info*ai, StatusRid *statr) {
+ int rc = PC4500_readrid(ai, RID_STATUS, statr, sizeof(*statr));
+ u16 *s;
+
+ statr->len = le16_to_cpu(statr->len);
+ for(s = &statr->mode; s <= &statr->SSIDlen; s++) *s = le16_to_cpu(*s);
+
+ for(s = &statr->beaconPeriod; s <= &statr->_reserved[9]; s++)
+ *s = le16_to_cpu(*s);
+
+ return rc;
+}
+static int readAPListRid(struct airo_info*ai, APListRid *aplr) {
+ int rc = PC4500_readrid(ai, RID_APLIST, aplr, sizeof(*aplr));
+ aplr->len = le16_to_cpu(aplr->len);
+ return rc;
+}
+static int writeAPListRid(struct airo_info*ai, APListRid *aplr) {
+ int rc;
+ aplr->len = cpu_to_le16(aplr->len);
+ rc = do_writerid(ai, RID_APLIST, aplr, sizeof(*aplr));
+ return rc;
+}
+static int readCapabilityRid(struct airo_info*ai, CapabilityRid *capr) {
+ int rc = PC4500_readrid(ai, RID_CAPABILITIES, capr, sizeof(*capr));
+ u16 *s;
+
+ capr->len = le16_to_cpu(capr->len);
+ capr->prodNum = le16_to_cpu(capr->prodNum);
+ capr->radioType = le16_to_cpu(capr->radioType);
+ capr->country = le16_to_cpu(capr->country);
+ for(s = &capr->txPowerLevels[0]; s <= &capr->requiredHard; s++)
+ *s = le16_to_cpu(*s);
+ return rc;
+}
+static int readStatsRid(struct airo_info*ai, StatsRid *sr, int rid) {
+ int rc = PC4500_readrid(ai, rid, sr, sizeof(*sr));
+ u32 *i;
+
+ sr->len = le16_to_cpu(sr->len);
+ for(i = &sr->vals[0]; i <= &sr->vals[99]; i++) *i = le32_to_cpu(*i);
+ return rc;
+}
+
+static int airo_open(struct net_device *dev) {
+ struct airo_info *info = dev->priv;
+
+ enable_interrupts(info);
+
+ netif_start_queue(dev);
+ return 0;
+}
+
+static int airo_start_xmit(struct sk_buff *skb, struct net_device *dev) {
+ s16 len;
+ s16 retval = 0;
+ u16 status;
+ u32 flags;
+ s8 *buffer;
+ int i,j;
+ struct airo_info *priv = (struct airo_info*)dev->priv;
+ u32 *fids = priv->fids;
+
+ if ( skb == NULL ) {
+ printk( KERN_ERR "airo: skb == NULL!!!\n" );
+ return 0;
+ }
+
+ /* Find a vacant FID */
+ spin_lock_irqsave(&priv->bap1_lock, flags);
+ for( j = 0, i = -1; j < MAX_FIDS; j++ ) {
+ if ( !( fids[j] & 0xffff0000 ) ) {
+ if ( i == -1 ) i = j;
+ else break;
+ }
+ }
+ if ( j == MAX_FIDS ) netif_stop_queue(dev);
+ if ( i == -1 ) {
+ retval = -EBUSY;
+ goto tx_done;
+ }
+
+ len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; /* check min length*/
+ buffer = skb->data;
+
+ status = transmit_802_3_packet( priv,
+ fids[i],
+ skb->data, len );
+
+ if ( status == SUCCESS ) {
+ /* Mark fid as used & save length for later */
+ fids[i] |= (len << 16);
+ dev->trans_start = jiffies;
+ } else {
+ priv->stats.tx_errors++;
+ }
+ tx_done:
+ spin_unlock_irqrestore(&priv->bap1_lock, flags);
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+static struct net_device_stats *airo_get_stats(struct net_device *dev) {
+ return &(((struct airo_info*)dev->priv)->stats);
+}
+
+static int enable_MAC( struct airo_info *ai, Resp *rsp );
+static void disable_MAC(struct airo_info *ai);
+
+static void airo_set_multicast_list(struct net_device *dev) {
+ struct airo_info *ai = (struct airo_info*)dev->priv;
+ Cmd cmd;
+ Resp rsp;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cmd=CMD_SETMODE;
+ cmd.parm0=(dev->flags&IFF_PROMISC) ? PROMISC : NOPROMISC;
+ issuecommand(ai, &cmd, &rsp);
+
+ if ((dev->flags&IFF_ALLMULTI)||dev->mc_count>0) {
+ /* Turn on multicast. (Should be already setup...) */
+ }
+}
+
+static int airo_set_mac_address(struct net_device *dev, void *p)
+{
+ struct airo_info *ai = (struct airo_info*)dev->priv;
+ struct sockaddr *addr = p;
+ ConfigRid cfg;
+
+ readConfigRid (ai, &cfg);
+ memcpy (cfg.macAddr, addr->sa_data, dev->addr_len);
+ writeConfigRid (ai, &cfg);
+ memcpy (dev->dev_addr, addr->sa_data, dev->addr_len);
+ return 0;
+}
+
+static int airo_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if ((new_mtu < 68) || (new_mtu > 2400))
+ return -EINVAL;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+
+static int airo_close(struct net_device *dev) {
+ struct airo_info *ai = (struct airo_info*)dev->priv;
+
+ netif_stop_queue(dev);
+ disable_interrupts( ai );
+ return 0;
+}
+
+static void del_airo_dev( struct net_device *dev );
+
+void stop_airo_card( struct net_device *dev, int freeres )
+{
+ struct airo_info *ai = (struct airo_info*)dev->priv;
+ takedown_proc_entry( dev, ai );
+ if (ai->registered) {
+ unregister_netdev( dev );
+ ai->registered = 0;
+ }
+ disable_interrupts(ai);
+ free_irq( dev->irq, dev );
+ if (auto_wep) del_timer_sync(&ai->timer);
+ if (freeres) {
+ /* PCMCIA frees this stuff, so only for PCI and ISA */
+ release_region( dev->base_addr, 64 );
+ }
+ del_airo_dev( dev );
+ kfree( dev );
+}
+
+static int add_airo_dev( struct net_device *dev );
+
+struct net_device *init_airo_card( unsigned short irq, int port, int is_pcmcia )
+{
+ struct net_device *dev;
+ struct airo_info *ai;
+ int i, rc;
+
+ /* Create the network device object. */
+ dev = alloc_etherdev(sizeof(*ai));
+ if (!dev) {
+ printk(KERN_ERR "airo: Couldn't alloc_etherdev\n");
+ return NULL;
+ }
+ ai = dev->priv;
+ ai->registered = 1;
+ ai->dev = dev;
+ ai->bap0_lock = SPIN_LOCK_UNLOCKED;
+ ai->bap1_lock = SPIN_LOCK_UNLOCKED;
+ ai->aux_lock = SPIN_LOCK_UNLOCKED;
+ ai->cmd_lock = SPIN_LOCK_UNLOCKED;
+ ai->header_parse = dev->hard_header_parse;
+ rc = add_airo_dev( dev );
+ if (rc)
+ goto err_out_free;
+
+ /* The Airo-specific entries in the device structure. */
+ dev->hard_start_xmit = &airo_start_xmit;
+ dev->get_stats = &airo_get_stats;
+ dev->set_multicast_list = &airo_set_multicast_list;
+ dev->set_mac_address = &airo_set_mac_address;
+ dev->do_ioctl = &airo_ioctl;
+#ifdef WIRELESS_EXT
+ dev->get_wireless_stats = airo_get_wireless_stats;
+#endif /* WIRELESS_EXT */
+ dev->change_mtu = &airo_change_mtu;
+ dev->open = &airo_open;
+ dev->stop = &airo_close;
+ dev->irq = irq;
+ dev->base_addr = port;
+
+ rc = register_netdev(dev);
+ if (rc)
+ goto err_out_unlink;
+
+ rc = request_irq( dev->irq, airo_interrupt,
+ SA_SHIRQ | SA_INTERRUPT, dev->name, dev );
+ if (rc) {
+ printk(KERN_ERR "airo: register interrupt %d failed, rc %d\n", irq, rc );
+ goto err_out_unregister;
+ }
+ if (!is_pcmcia) {
+ if (!request_region( dev->base_addr, 64, dev->name )) {
+ rc = -EBUSY;
+ goto err_out_irq;
+ }
+ }
+
+ if ( setup_card( ai, dev->dev_addr, &ai->config) != SUCCESS ) {
+ printk( KERN_ERR "airo: MAC could not be enabled\n" );
+ rc = -EIO;
+ goto err_out_res;
+ }
+
+ printk( KERN_INFO "airo: MAC enabled %s %x:%x:%x:%x:%x:%x\n",
+ dev->name,
+ dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+ dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5] );
+
+ /* Allocate the transmit buffers */
+ for( i = 0; i < MAX_FIDS; i++ )
+ ai->fids[i] = transmit_allocate( ai, 2312 );
+
+ setup_proc_entry( dev, dev->priv ); /* XXX check for failure */
+ netif_start_queue(dev);
+ SET_MODULE_OWNER(dev);
+ return dev;
+
+err_out_res:
+ if (!is_pcmcia)
+ release_region( dev->base_addr, 64 );
+err_out_irq:
+ free_irq(dev->irq, dev);
+err_out_unregister:
+ unregister_netdev(dev);
+err_out_unlink:
+ del_airo_dev(dev);
+err_out_free:
+ kfree(dev);
+ return NULL;
+}
+
+int waitbusy (struct airo_info *ai) {
+ int delay = 0;
+ while ((IN4500 (ai, COMMAND) & COMMAND_BUSY) & (delay < 10000)) {
+ udelay (10);
+ if (++delay % 20)
+ OUT4500(ai, EVACK, EV_CLEARCOMMANDBUSY);
+ }
+ return delay < 10000;
+}
+
+int reset_airo_card( struct net_device *dev ) {
+ int i, flags;
+ struct airo_info *ai = (struct airo_info*)dev->priv;
+
+ disable_MAC(ai);
+ spin_lock_irqsave(&ai->cmd_lock, flags);
+ waitbusy (ai);
+ OUT4500(ai,COMMAND,CMD_SOFTRESET);
+ set_current_state (TASK_UNINTERRUPTIBLE);
+ schedule_timeout (HZ/5);
+ waitbusy (ai);
+ set_current_state (TASK_UNINTERRUPTIBLE);
+ schedule_timeout (HZ/5);
+ spin_unlock_irqrestore(&ai->cmd_lock, flags);
+ if ( setup_card(ai, dev->dev_addr, &(ai)->config) != SUCCESS ) {
+ printk( KERN_ERR "airo: MAC could not be enabled\n" );
+ return -1;
+ } else {
+ printk( KERN_INFO "airo: MAC enabled %s %x:%x:%x:%x:%x:%x\n",
+ dev->name,
+ dev->dev_addr[0],
+ dev->dev_addr[1],
+ dev->dev_addr[2],
+ dev->dev_addr[3],
+ dev->dev_addr[4],
+ dev->dev_addr[5]
+ );
+ /* Allocate the transmit buffers */
+ for( i = 0; i < MAX_FIDS; i++ )
+ ai->fids[i] = transmit_allocate( ai, 2312 );
+ }
+ enable_interrupts( ai );
+ netif_wake_queue(dev);
+ return 0;
+}
+
+int wll_header_parse(struct sk_buff *skb, unsigned char *haddr)
+{
+ memcpy(haddr, skb->mac.raw + 10, ETH_ALEN);
+ return ETH_ALEN;
+}
+
+static void airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs) {
+ struct net_device *dev = (struct net_device *)dev_id;
+ u16 status;
+ u16 fid;
+ struct airo_info *apriv = (struct airo_info *)dev->priv;
+ u16 savedInterrupts;
+
+
+ if (!netif_device_present(dev))
+ return;
+
+ status = IN4500( apriv, EVSTAT );
+ if ( !status || status == 0xffff ) return;
+
+ if ( status & EV_AWAKE ) {
+ OUT4500( apriv, EVACK, EV_AWAKE );
+ OUT4500( apriv, EVACK, EV_AWAKE );
+ }
+
+ savedInterrupts = IN4500( apriv, EVINTEN );
+ OUT4500( apriv, EVINTEN, 0 );
+
+ if ( status & EV_LINK ) {
+ /* The link status has changed, if you want to put a
+ monitor hook in, do it here. (Remember that
+ interrupts are still disabled!)
+ */
+ u16 newStatus = IN4500(apriv, LINKSTAT);
+ /* Here is what newStatus means: */
+#define NOBEACON 0x8000 /* Loss of sync - missed beacons */
+#define MAXRETRIES 0x8001 /* Loss of sync - max retries */
+#define MAXARL 0x8002 /* Loss of sync - average retry level exceeded*/
+#define FORCELOSS 0x8003 /* Loss of sync - host request */
+#define TSFSYNC 0x8004 /* Loss of sync - TSF synchronization */
+#define DEAUTH 0x8100 /* Deauthentication (low byte is reason code) */
+#define DISASS 0x8200 /* Disassociation (low byte is reason code) */
+#define ASSFAIL 0x8400 /* Association failure (low byte is reason
+ code) */
+#define AUTHFAIL 0x0300 /* Authentication failure (low byte is reason
+ code) */
+#define ASSOCIATED 0x0400 /* Assocatied */
+#define RC_RESERVED 0 /* Reserved return code */
+#define RC_NOREASON 1 /* Unspecified reason */
+#define RC_AUTHINV 2 /* Previous authentication invalid */
+#define RC_DEAUTH 3 /* Deauthenticated because sending station is
+ leaving */
+#define RC_NOACT 4 /* Disassociated due to inactivity */
+#define RC_MAXLOAD 5 /* Disassociated because AP is unable to handle
+ all currently associated stations */
+#define RC_BADCLASS2 6 /* Class 2 frame received from
+ non-Authenticated station */
+#define RC_BADCLASS3 7 /* Class 3 frame received from
+ non-Associated station */
+#define RC_STATLEAVE 8 /* Disassociated because sending station is
+ leaving BSS */
+#define RC_NOAUTH 9 /* Station requesting (Re)Association is not
+ Authenticated with the responding station */
+ if (newStatus != ASSOCIATED) {
+ if (auto_wep && !timer_pending(&apriv->timer)) {
+ apriv->timer.expires = RUN_AT(HZ*3);
+ add_timer(&apriv->timer);
+ }
+ }
+ }
+
+ /* Check to see if there is something to receive */
+ if ( status & EV_RX ) {
+ struct sk_buff *skb = NULL;
+ int flags;
+ u16 fc, len, hdrlen = 0;
+ struct {
+ u16 status, len;
+ u8 rssi[2];
+ } hdr;
+
+ fid = IN4500( apriv, RXFID );
+
+ /* Get the packet length */
+ spin_lock_irqsave(&apriv->bap0_lock, flags);
+ if (dev->type == ARPHRD_IEEE80211) {
+ bap_setup (apriv, fid, 4, BAP0);
+ bap_read (apriv, (u16*)&hdr, sizeof(hdr), BAP0);
+ /* Bad CRC. Ignore packet */
+ if (le16_to_cpu(hdr.status) == 2) {
+ apriv->stats.rx_crc_errors++;
+ apriv->stats.rx_errors++;
+ hdr.len = 0;
+ }
+ } else {
+ bap_setup (apriv, fid, 6, BAP0);
+ bap_read (apriv, (u16*)&hdr.len, 4, BAP0);
+ }
+ len = le16_to_cpu(hdr.len);
+
+ if (len > 2312) {
+ apriv->stats.rx_length_errors++;
+ apriv->stats.rx_errors++;
+ printk( KERN_ERR
+ "airo: Bad size %d\n", len );
+ len = 0;
+ }
+ if (len) {
+ if (dev->type == ARPHRD_IEEE80211) {
+ bap_setup (apriv, fid, 0x14, BAP0);
+ bap_read (apriv, (u16*)&fc, sizeof(fc), BAP0);
+ if ((le16_to_cpu(fc) & 0x300) == 0x300)
+ hdrlen = 30;
+ else
+ hdrlen = 24;
+ } else
+ hdrlen = 12;
+
+ skb = dev_alloc_skb( len + hdrlen + 2 );
+ if ( !skb ) {
+ apriv->stats.rx_dropped++;
+ len = 0;
+ }
+ }
+ if (len) {
+ u16 *buffer;
+ buffer = (u16*)skb_put (skb, len + hdrlen);
+ if (dev->type == ARPHRD_IEEE80211) {
+ u16 gap, tmpbuf[4];
+ buffer[0] = fc;
+ bap_read (apriv, buffer + 1, hdrlen - 2, BAP0);
+ if (hdrlen == 24)
+ bap_read (apriv, tmpbuf, 6, BAP0);
+
+ bap_read (apriv, &gap, sizeof(gap), BAP0);
+ gap = le16_to_cpu(gap);
+ if (gap && gap <= 8)
+ bap_read (apriv, tmpbuf, gap, BAP0);
+
+ bap_read (apriv, buffer + hdrlen/2, len, BAP0);
+ } else {
+ bap_setup (apriv, fid, 0x38, BAP0);
+ bap_read (apriv, buffer,len + hdrlen,BAP0);
+ }
+#ifdef WIRELESS_SPY
+ if (apriv->spy_number > 0) {
+ int i;
+ char *sa;
+
+ sa = (char*)buffer + ((dev->type == ARPHRD_IEEE80211) ? 10 : 6);
+
+ for (i=0; i<apriv->spy_number; i++)
+ if (!memcmp(sa,apriv->spy_address[i],6))
+ {
+ apriv->spy_stat[i].qual = hdr.rssi[0];
+ apriv->spy_stat[i].level = hdr.rssi[1];
+ apriv->spy_stat[i].noise = 0;
+ apriv->spy_stat[i].updated = 3;
+ break;
+ }
+ }
+#endif /* WIRELESS_SPY */
+ apriv->stats.rx_packets++;
+ apriv->stats.rx_bytes += len + hdrlen;
+ dev->last_rx = jiffies;
+ skb->dev = dev;
+ skb->ip_summed = CHECKSUM_NONE;
+ if (dev->type == ARPHRD_IEEE80211) {
+ skb->mac.raw = skb->data;
+ skb_pull (skb, hdrlen);
+ skb->pkt_type = PACKET_OTHERHOST;
+ skb->protocol = htons(ETH_P_802_2);
+ } else
+ skb->protocol = eth_type_trans( skb, dev );
+
+ netif_rx( skb );
+ }
+ spin_unlock_irqrestore(&apriv->bap0_lock, flags);
+ }
+
+ /* Check to see if a packet has been transmitted */
+ if ( status & ( EV_TX|EV_TXEXC ) ) {
+ int i;
+ int len = 0;
+ int full = 1;
+ int index = -1;
+
+ fid = IN4500(apriv, TXCOMPLFID);
+
+ for( i = 0; i < MAX_FIDS; i++ ) {
+ if (!(apriv->fids[i] & 0xffff0000)) full = 0;
+ if ( ( apriv->fids[i] & 0xffff ) == fid ) {
+ len = apriv->fids[i] >> 16;
+ index = i;
+ /* Set up to be used again */
+ apriv->fids[i] &= 0xffff;
+ }
+ }
+ if (full) netif_wake_queue(dev);
+ if (index==-1) {
+ printk( KERN_ERR
+ "airo: Unallocated FID was used to xmit\n" );
+ }
+ if ( status & EV_TX ) {
+ apriv->stats.tx_packets++;
+ if(index!=-1)
+ apriv->stats.tx_bytes += len;
+ } else {
+ apriv->stats.tx_errors++;
+ }
+ }
+ if ( status & ~STATUS_INTS )
+ printk( KERN_WARNING
+ "airo: Got weird status %x\n",
+ status & ~STATUS_INTS );
+ OUT4500( apriv, EVACK, status & STATUS_INTS );
+ OUT4500( apriv, EVINTEN, savedInterrupts );
+
+ /* done.. */
+ return;
+}
+
+/*
+ * Routines to talk to the card
+ */
+
+/*
+ * This was originally written for the 4500, hence the name
+ * NOTE: If use with 8bit mode and SMP bad things will happen!
+ * Why would some one do 8 bit IO in an SMP machine?!?
+ */
+static void OUT4500( struct airo_info *ai, u16 reg, u16 val ) {
+ if ( !do8bitIO )
+ outw( val, ai->dev->base_addr + reg );
+ else {
+ outb( val & 0xff, ai->dev->base_addr + reg );
+ outb( val >> 8, ai->dev->base_addr + reg + 1 );
+ }
+}
+
+static u16 IN4500( struct airo_info *ai, u16 reg ) {
+ unsigned short rc;
+
+ if ( !do8bitIO )
+ rc = inw( ai->dev->base_addr + reg );
+ else {
+ rc = inb( ai->dev->base_addr + reg );
+ rc += ((int)inb( ai->dev->base_addr + reg + 1 )) << 8;
+ }
+ return rc;
+}
+
+static int enable_MAC( struct airo_info *ai, Resp *rsp ) {
+ Cmd cmd;
+
+ if (ai->flags&FLAG_RADIO_OFF) return SUCCESS;
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cmd = MAC_ENABLE;
+ return issuecommand(ai, &cmd, rsp);
+}
+
+static void disable_MAC( struct airo_info *ai ) {
+ Cmd cmd;
+ Resp rsp;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cmd = MAC_DISABLE; // disable in case already enabled
+ issuecommand(ai, &cmd, &rsp);
+}
+
+static void enable_interrupts( struct airo_info *ai ) {
+ /* Reset the status register */
+ u16 status = IN4500( ai, EVSTAT );
+ OUT4500( ai, EVACK, status );
+ /* Enable the interrupts */
+ OUT4500( ai, EVINTEN, STATUS_INTS );
+ /* Note there is a race condition between the last two lines that
+ I dont know how to get rid of right now... */
+}
+
+static void disable_interrupts( struct airo_info *ai ) {
+ OUT4500( ai, EVINTEN, 0 );
+}
+
+static u16 setup_card(struct airo_info *ai, u8 *mac,
+ ConfigRid *config)
+{
+ Cmd cmd;
+ Resp rsp;
+ ConfigRid cfg;
+ int status;
+ int i;
+ SsidRid mySsid;
+ u16 lastindex;
+ WepKeyRid wkr;
+ int rc;
+
+ memset( &mySsid, 0, sizeof( mySsid ) );
+
+ /* The NOP is the first step in getting the card going */
+ cmd.cmd = NOP;
+ cmd.parm0 = cmd.parm1 = cmd.parm2 = 0;
+ if ( issuecommand( ai, &cmd, &rsp ) != SUCCESS ) {
+ return ERROR;
+ }
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cmd = MAC_DISABLE; // disable in case already enabled
+ if ( issuecommand( ai, &cmd, &rsp ) != SUCCESS ) {
+ return ERROR;
+ }
+
+ // Let's figure out if we need to use the AUX port
+ cmd.cmd = CMD_ENABLEAUX;
+ if (issuecommand(ai, &cmd, &rsp) != SUCCESS) {
+ printk(KERN_ERR "airo: Error checking for AUX port\n");
+ return ERROR;
+ }
+ if (!aux_bap || rsp.status & 0xff00) {
+ ai->bap_read = fast_bap_read;
+ printk(KERN_DEBUG "airo: Doing fast bap_reads\n");
+ } else {
+ ai->bap_read = aux_bap_read;
+ printk(KERN_DEBUG "airo: Doing AUX bap_reads\n");
+ }
+ if ( config->len ) {
+ cfg = *config;
+ } else {
+ // general configuration (read/modify/write)
+ status = readConfigRid(ai, &cfg);
+ if ( status != SUCCESS ) return ERROR;
+ cfg.opmode = adhoc ? MODE_STA_IBSS : MODE_STA_ESS;
+
+ /* Save off the MAC */
+ for( i = 0; i < 6; i++ ) {
+ mac[i] = cfg.macAddr[i];
+ }
+
+ /* Check to see if there are any insmod configured
+ rates to add */
+ if ( rates ) {
+ int i = 0;
+ if ( rates[0] ) memset(cfg.rates,0,sizeof(cfg.rates));
+ for( i = 0; i < 8 && rates[i]; i++ ) {
+ cfg.rates[i] = rates[i];
+ }
+ }
+ if ( basic_rate > 0 ) {
+ int i;
+ for( i = 0; i < 8; i++ ) {
+ if ( cfg.rates[i] == basic_rate ||
+ !cfg.rates ) {
+ cfg.rates[i] = basic_rate | 0x80;
+ break;
+ }
+ }
+ }
+ cfg.authType = ai->authtype;
+ *config = cfg;
+ }
+
+ /* Setup the SSIDs if present */
+ if ( ssids[0] ) {
+ int i = 0;
+ for( i = 0; i < 3 && ssids[i]; i++ ) {
+ mySsid.ssids[i].len = strlen(ssids[i]);
+ if ( mySsid.ssids[i].len > 32 )
+ mySsid.ssids[i].len = 32;
+ memcpy(mySsid.ssids[i].ssid, ssids[i],
+ mySsid.ssids[i].len);
+ mySsid.ssids[i].len = mySsid.ssids[i].len;
+ }
+ }
+
+ status = writeConfigRid(ai, &cfg);
+ if ( status != SUCCESS ) return ERROR;
+
+ /* Set up the SSID list */
+ status = writeSsidRid(ai, &mySsid);
+ if ( status != SUCCESS ) return ERROR;
+
+ /* Grab the initial wep key, we gotta save it for auto_wep */
+ rc = readWepKeyRid(ai, &wkr, 1);
+ if (rc == SUCCESS) do {
+ lastindex = wkr.kindex;
+ if (wkr.kindex == 0xffff) {
+ ai->defindex = wkr.mac[0];
+ }
+ rc = readWepKeyRid(ai, &wkr, 0);
+ } while(lastindex != wkr.kindex);
+
+ if (auto_wep && !timer_pending(&ai->timer)) {
+ ai->timer.expires = RUN_AT(HZ*3);
+ add_timer(&ai->timer);
+ }
+ return SUCCESS;
+}
+
+static u16 issuecommand(struct airo_info *ai, Cmd *pCmd, Resp *pRsp) {
+ // Im really paranoid about letting it run forever!
+ int max_tries = 600000;
+ int rc = SUCCESS;
+ int flags;
+
+ spin_lock_irqsave(&ai->cmd_lock, flags);
+ OUT4500(ai, PARAM0, pCmd->parm0);
+ OUT4500(ai, PARAM1, pCmd->parm1);
+ OUT4500(ai, PARAM2, pCmd->parm2);
+ OUT4500(ai, COMMAND, pCmd->cmd);
+ while ( max_tries-- &&
+ (IN4500(ai, EVSTAT) & EV_CMD) == 0) {
+ if ( IN4500(ai, COMMAND) == pCmd->cmd) {
+ // PC4500 didn't notice command, try again
+ OUT4500(ai, COMMAND, pCmd->cmd);
+ }
+ }
+ if ( max_tries == -1 ) {
+ printk( KERN_ERR
+ "airo: Max tries exceeded when issueing command\n" );
+ rc = ERROR;
+ goto done;
+ }
+ // command completed
+ pRsp->status = IN4500(ai, STATUS);
+ pRsp->rsp0 = IN4500(ai, RESP0);
+ pRsp->rsp1 = IN4500(ai, RESP1);
+ pRsp->rsp2 = IN4500(ai, RESP2);
+
+ // clear stuck command busy if necessary
+ if (IN4500(ai, COMMAND) & COMMAND_BUSY) {
+ OUT4500(ai, EVACK, EV_CLEARCOMMANDBUSY);
+ }
+ // acknowledge processing the status/response
+ OUT4500(ai, EVACK, EV_CMD);
+ done:
+ spin_unlock_irqrestore(&ai->cmd_lock, flags);
+ return rc;
+}
+
+/* Sets up the bap to start exchange data. whichbap should
+ * be one of the BAP0 or BAP1 defines. Locks should be held before
+ * calling! */
+static int bap_setup(struct airo_info *ai, u16 rid, u16 offset, int whichbap )
+{
+ int timeout = 50;
+ int max_tries = 3;
+
+ OUT4500(ai, SELECT0+whichbap, rid);
+ OUT4500(ai, OFFSET0+whichbap, offset);
+ while (1) {
+ int status = IN4500(ai, OFFSET0+whichbap);
+ if (status & BAP_BUSY) {
+ /* This isn't really a timeout, but its kinda
+ close */
+ if (timeout--) {
+ continue;
+ }
+ } else if ( status & BAP_ERR ) {
+ /* invalid rid or offset */
+ printk( KERN_ERR "airo: BAP error %x %d\n",
+ status, whichbap );
+ return ERROR;
+ } else if (status & BAP_DONE) { // success
+ return SUCCESS;
+ }
+ if ( !(max_tries--) ) {
+ printk( KERN_ERR
+ "airo: BAP setup error too many retries\n" );
+ return ERROR;
+ }
+ // -- PC4500 missed it, try again
+ OUT4500(ai, SELECT0+whichbap, rid);
+ OUT4500(ai, OFFSET0+whichbap, offset);
+ timeout = 50;
+ }
+}
+
+/* should only be called by aux_bap_read. This aux function and the
+ following use concepts not documented in the developers guide. I
+ got them from a patch given to my by Aironet */
+static u16 aux_setup(struct airo_info *ai, u16 page,
+ u16 offset, u16 *len)
+{
+ u16 next;
+
+ OUT4500(ai, AUXPAGE, page);
+ OUT4500(ai, AUXOFF, 0);
+ next = IN4500(ai, AUXDATA);
+ *len = IN4500(ai, AUXDATA)&0xff;
+ if (offset != 4) OUT4500(ai, AUXOFF, offset);
+ return next;
+}
+
+/* requires call to bap_setup() first */
+static int aux_bap_read(struct airo_info *ai, u16 *pu16Dst,
+ int bytelen, int whichbap)
+{
+ u16 len;
+ u16 page;
+ u16 offset;
+ u16 next;
+ int words;
+ int i;
+ int flags;
+
+ spin_lock_irqsave(&ai->aux_lock, flags);
+ page = IN4500(ai, SWS0+whichbap);
+ offset = IN4500(ai, SWS2+whichbap);
+ next = aux_setup(ai, page, offset, &len);
+ words = (bytelen+1)>>1;
+
+ for (i=0; i<words;) {
+ int count;
+ count = (len>>1) < (words-i) ? (len>>1) : (words-i);
+ if ( !do8bitIO )
+ insw( ai->dev->base_addr+DATA0+whichbap,
+ pu16Dst+i,count );
+ else
+ insb( ai->dev->base_addr+DATA0+whichbap,
+ pu16Dst+i, count << 1 );
+ i += count;
+ if (i<words) {
+ next = aux_setup(ai, next, 4, &len);
+ }
+ }
+ spin_unlock_irqrestore(&ai->aux_lock, flags);
+ return SUCCESS;
+}
+
+
+/* requires call to bap_setup() first */
+static int fast_bap_read(struct airo_info *ai, u16 *pu16Dst,
+ int bytelen, int whichbap)
+{
+ bytelen = (bytelen + 1) & (~1); // round up to even value
+ if ( !do8bitIO )
+ insw( ai->dev->base_addr+DATA0+whichbap, pu16Dst, bytelen>>1 );
+ else
+ insb( ai->dev->base_addr+DATA0+whichbap, pu16Dst, bytelen );
+ return SUCCESS;
+}
+
+/* requires call to bap_setup() first */
+static int bap_write(struct airo_info *ai, const u16 *pu16Src,
+ int bytelen, int whichbap)
+{
+ bytelen = (bytelen + 1) & (~1); // round up to even value
+ if ( !do8bitIO )
+ outsw( ai->dev->base_addr+DATA0+whichbap,
+ pu16Src, bytelen>>1 );
+ else
+ outsb( ai->dev->base_addr+DATA0+whichbap, pu16Src, bytelen );
+ return SUCCESS;
+}
+
+static int PC4500_accessrid(struct airo_info *ai, u16 rid, u16 accmd)
+{
+ Cmd cmd; /* for issuing commands */
+ Resp rsp; /* response from commands */
+ u16 status;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cmd = accmd;
+ cmd.parm0 = rid;
+ status = issuecommand(ai, &cmd, &rsp);
+ if (status != 0) return status;
+ if ( (rsp.status & 0x7F00) != 0) {
+ return (accmd << 8) + (rsp.rsp0 & 0xFF);
+ }
+ return 0;
+}
+
+/* Note, that we are using BAP1 which is also used by transmit, so
+ * we must get a lock. */
+static int PC4500_readrid(struct airo_info *ai, u16 rid, void *pBuf, int len)
+{
+ u16 status;
+ int flags;
+ int rc = SUCCESS;
+
+ spin_lock_irqsave(&ai->bap1_lock, flags);
+ if ( (status = PC4500_accessrid(ai, rid, CMD_ACCESS)) != SUCCESS) {
+ rc = status;
+ goto done;
+ }
+ if (bap_setup(ai, rid, 0, BAP1) != SUCCESS) {
+ rc = ERROR;
+ goto done;
+ }
+ // read the rid length field
+ bap_read(ai, pBuf, 2, BAP1);
+ // length for remaining part of rid
+ len = min(len, le16_to_cpu(*(u16*)pBuf)) - 2;
+
+ if ( len <= 2 ) {
+ printk( KERN_ERR
+ "airo: Rid %x has a length of %d which is too short\n",
+ (int)rid,
+ (int)len );
+ rc = ERROR;
+ goto done;
+ }
+ // read remainder of the rid
+ if (bap_setup(ai, rid, 2, BAP1) != SUCCESS) {
+ rc = ERROR;
+ goto done;
+ }
+ rc = bap_read(ai, ((u16*)pBuf)+1, len, BAP1);
+ done:
+ spin_unlock_irqrestore(&ai->bap1_lock, flags);
+ return rc;
+}
+
+/* Note, that we are using BAP1 which is also used by transmit, so
+ * make sure this isnt called when a transmit is happening */
+static int PC4500_writerid(struct airo_info *ai, u16 rid,
+ const void *pBuf, int len)
+{
+ u16 status;
+ int flags;
+ int rc = SUCCESS;
+
+ spin_lock_irqsave(&ai->bap1_lock, flags);
+ // --- first access so that we can write the rid data
+ if ( (status = PC4500_accessrid(ai, rid, CMD_ACCESS)) != 0) {
+ rc = status;
+ goto done;
+ }
+ // --- now write the rid data
+ if (bap_setup(ai, rid, 0, BAP1) != SUCCESS) {
+ rc = ERROR;
+ goto done;
+ }
+ bap_write(ai, pBuf, len, BAP1);
+ // ---now commit the rid data
+ rc = PC4500_accessrid(ai, rid, 0x100|CMD_ACCESS);
+ done:
+ spin_unlock_irqrestore(&ai->bap1_lock, flags);
+ return rc;
+}
+
+/* Allocates a FID to be used for transmitting packets. We only use
+ one for now. */
+static u16 transmit_allocate(struct airo_info *ai, int lenPayload)
+{
+ Cmd cmd;
+ Resp rsp;
+ u16 txFid;
+ u16 txControl;
+ int flags;
+
+ cmd.cmd = CMD_ALLOCATETX;
+ cmd.parm0 = lenPayload;
+ if (issuecommand(ai, &cmd, &rsp) != SUCCESS) return 0;
+ if ( (rsp.status & 0xFF00) != 0) return 0;
+ /* wait for the allocate event/indication
+ * It makes me kind of nervous that this can just sit here and spin,
+ * but in practice it only loops like four times. */
+ while ( (IN4500(ai, EVSTAT) & EV_ALLOC) == 0) ;
+ // get the allocated fid and acknowledge
+ txFid = IN4500(ai, TXALLOCFID);
+ OUT4500(ai, EVACK, EV_ALLOC);
+
+ /* The CARD is pretty cool since it converts the ethernet packet
+ * into 802.11. Also note that we don't release the FID since we
+ * will be using the same one over and over again. */
+ /* We only have to setup the control once since we are not
+ * releasing the fid. */
+ txControl = cpu_to_le16(TXCTL_TXOK | TXCTL_TXEX | TXCTL_802_3
+ | TXCTL_ETHERNET | TXCTL_NORELEASE);
+ spin_lock_irqsave(&ai->bap1_lock, flags);
+ if (bap_setup(ai, txFid, 0x0008, BAP1) != SUCCESS) {
+ spin_unlock_irqrestore(&ai->bap1_lock, flags);
+ return ERROR;
+ }
+ bap_write(ai, &txControl, sizeof(txControl), BAP1);
+ spin_unlock_irqrestore(&ai->bap1_lock, flags);
+
+ return txFid;
+}
+
+/* In general BAP1 is dedicated to transmiting packets. However,
+ since we need a BAP when accessing RIDs, we also use BAP1 for that.
+ Make sure the BAP1 spinlock is held when this is called. */
+static int transmit_802_3_packet(struct airo_info *ai, u16 txFid,
+ char *pPacket, int len)
+{
+ u16 payloadLen;
+ Cmd cmd;
+ Resp rsp;
+
+ if (len < 12) {
+ printk( KERN_WARNING "Short packet %d\n", len );
+ return ERROR;
+ }
+
+ // packet is destination[6], source[6], payload[len-12]
+ // write the payload length and dst/src/payload
+ if (bap_setup(ai, txFid, 0x0036, BAP1) != SUCCESS) return ERROR;
+ /* The hardware addresses aren't counted as part of the payload, so
+ * we have to subtract the 12 bytes for the addresses off */
+ payloadLen = cpu_to_le16(len-12);
+ bap_write(ai, &payloadLen, sizeof(payloadLen),BAP1);
+ bap_write(ai, (const u16*)pPacket, len, BAP1);
+ // issue the transmit command
+ memset( &cmd, 0, sizeof( cmd ) );
+ cmd.cmd = CMD_TRANSMIT;
+ cmd.parm0 = txFid;
+ if (issuecommand(ai, &cmd, &rsp) != SUCCESS) return ERROR;
+ if ( (rsp.status & 0xFF00) != 0) return ERROR;
+ return SUCCESS;
+}
+
+/*
+ * This is the proc_fs routines. It is a bit messier than I would
+ * like! Feel free to clean it up!
+ */
+
+/*
+ * Unfortunately sometime between 2.0 and 2.2 the proc interface changed...
+ * Unfortunately I dont know when it was...
+ * Im guessing it is sometime around 0x20155... Anybody know?
+ */
+
+static ssize_t proc_read( struct file *file,
+ char *buffer,
+ size_t len,
+ loff_t *offset);
+
+static ssize_t proc_write( struct file *file,
+ const char *buffer,
+ size_t len,
+ loff_t *offset );
+static int proc_close( struct inode *inode, struct file *file );
+
+static int proc_stats_open( struct inode *inode, struct file *file );
+static int proc_statsdelta_open( struct inode *inode, struct file *file );
+static int proc_status_open( struct inode *inode, struct file *file );
+static int proc_SSID_open( struct inode *inode, struct file *file );
+static int proc_APList_open( struct inode *inode, struct file *file );
+static int proc_config_open( struct inode *inode, struct file *file );
+static int proc_wepkey_open( struct inode *inode, struct file *file );
+
+static struct file_operations proc_statsdelta_ops = {
+ read: proc_read,
+ open: proc_statsdelta_open,
+ release: proc_close
+};
+
+static struct file_operations proc_stats_ops = {
+ read: proc_read,
+ open: proc_stats_open,
+ release: proc_close
+};
+
+static struct file_operations proc_status_ops = {
+ read: proc_read,
+ open: proc_status_open,
+ release: proc_close
+};
+
+static struct file_operations proc_SSID_ops = {
+ read: proc_read,
+ write: proc_write,
+ open: proc_SSID_open,
+ release: proc_close
+};
+
+static struct file_operations proc_APList_ops = {
+ read: proc_read,
+ write: proc_write,
+ open: proc_APList_open,
+ release: proc_close
+};
+
+static struct file_operations proc_config_ops = {
+ read: proc_read,
+ write: proc_write,
+ open: proc_config_open,
+ release: proc_close
+};
+
+static struct file_operations proc_wepkey_ops = {
+ read: proc_read,
+ write: proc_write,
+ open: proc_wepkey_open,
+ release: proc_close
+};
+
+static struct proc_dir_entry *airo_entry = 0;
+
+struct proc_data {
+ int release_buffer;
+ int readlen;
+ char *rbuffer;
+ int writelen;
+ int maxwritelen;
+ char *wbuffer;
+ void (*on_close) (struct inode *, struct file *);
+};
+
+static int setup_proc_entry( struct net_device *dev,
+ struct airo_info *apriv ) {
+ struct proc_dir_entry *entry;
+ /* First setup the device directory */
+ apriv->proc_entry = create_proc_entry(dev->name,
+ S_IFDIR|airo_perm,
+ airo_entry);
+ apriv->proc_entry->uid = proc_uid;
+ apriv->proc_entry->gid = proc_gid;
+
+ /* Setup the StatsDelta */
+ entry = create_proc_entry("StatsDelta",
+ S_IFREG | (S_IRUGO&proc_perm),
+ apriv->proc_entry);
+ entry->uid = proc_uid;
+ entry->gid = proc_gid;
+ entry->data = dev;
+/* This is what was needed right up to the last few versions
+ of 2.3:
+ entry->ops = &proc_inode_statsdelta_ops;
+*/
+ entry->proc_fops = &proc_statsdelta_ops;
+
+ /* Setup the Stats */
+ entry = create_proc_entry("Stats",
+ S_IFREG | (S_IRUGO&proc_perm),
+ apriv->proc_entry);
+ entry->uid = proc_uid;
+ entry->gid = proc_gid;
+ entry->data = dev;
+ entry->proc_fops = &proc_stats_ops;
+
+ /* Setup the Status */
+ entry = create_proc_entry("Status",
+ S_IFREG | (S_IRUGO&proc_perm),
+ apriv->proc_entry);
+ entry->uid = proc_uid;
+ entry->gid = proc_gid;
+ entry->data = dev;
+ entry->proc_fops = &proc_status_ops;
+
+ /* Setup the Config */
+ entry = create_proc_entry("Config",
+ S_IFREG | proc_perm,
+ apriv->proc_entry);
+ entry->uid = proc_uid;
+ entry->gid = proc_gid;
+ entry->data = dev;
+ entry->proc_fops = &proc_config_ops;
+
+ /* Setup the SSID */
+ entry = create_proc_entry("SSID",
+ S_IFREG | proc_perm,
+ apriv->proc_entry);
+ entry->uid = proc_uid;
+ entry->gid = proc_gid;
+ entry->data = dev;
+ entry->proc_fops = &proc_SSID_ops;
+
+ /* Setup the APList */
+ entry = create_proc_entry("APList",
+ S_IFREG | proc_perm,
+ apriv->proc_entry);
+ entry->uid = proc_uid;
+ entry->gid = proc_gid;
+ entry->data = dev;
+ entry->proc_fops = &proc_APList_ops;
+
+ /* Setup the WepKey */
+ entry = create_proc_entry("WepKey",
+ S_IFREG | proc_perm,
+ apriv->proc_entry);
+ entry->uid = proc_uid;
+ entry->gid = proc_gid;
+ entry->data = dev;
+ entry->proc_fops = &proc_wepkey_ops;
+
+ return 0;
+}
+
+static int takedown_proc_entry( struct net_device *dev,
+ struct airo_info *apriv ) {
+ if ( !apriv->proc_entry->namelen ) return 0;
+ remove_proc_entry("Stats",apriv->proc_entry);
+ remove_proc_entry("StatsDelta",apriv->proc_entry);
+ remove_proc_entry("Status",apriv->proc_entry);
+ remove_proc_entry("Config",apriv->proc_entry);
+ remove_proc_entry("SSID",apriv->proc_entry);
+ remove_proc_entry("APList",apriv->proc_entry);
+ remove_proc_entry("WepKey",apriv->proc_entry);
+ remove_proc_entry(dev->name,airo_entry);
+ return 0;
+}
+
+/*
+ * What we want from the proc_fs is to be able to efficiently read
+ * and write the configuration. To do this, we want to read the
+ * configuration when the file is opened and write it when the file is
+ * closed. So basically we allocate a read buffer at open and fill it
+ * with data, and allocate a write buffer and read it at close.
+ */
+
+/*
+ * The read routine is generic, it relies on the preallocated rbuffer
+ * to supply the data.
+ */
+static ssize_t proc_read( struct file *file,
+ char *buffer,
+ size_t len,
+ loff_t *offset )
+{
+ int i;
+ int pos;
+ struct proc_data *priv = (struct proc_data*)file->private_data;
+
+ if( !priv->rbuffer ) return -EINVAL;
+
+ pos = *offset;
+ for( i = 0; i+pos < priv->readlen && i < len; i++ ) {
+ if (put_user( priv->rbuffer[i+pos], buffer+i ))
+ return -EFAULT;
+ }
+ *offset += i;
+ return i;
+}
+
+/*
+ * The write routine is generic, it fills in a preallocated rbuffer
+ * to supply the data.
+ */
+static ssize_t proc_write( struct file *file,
+ const char *buffer,
+ size_t len,
+ loff_t *offset )
+{
+ int i;
+ int pos;
+ struct proc_data *priv = (struct proc_data*)file->private_data;
+
+ if ( !priv->wbuffer ) {
+ return -EINVAL;
+ }
+
+ pos = *offset;
+
+ for( i = 0; i + pos < priv->maxwritelen &&
+ i < len; i++ ) {
+ if (get_user( priv->wbuffer[i+pos], buffer + i ))
+ return -EFAULT;
+ }
+ if ( i+pos > priv->writelen ) priv->writelen = i+file->f_pos;
+ *offset += i;
+ return i;
+}
+
+static int proc_status_open( struct inode *inode, struct file *file ) {
+ struct proc_data *data;
+ struct proc_dir_entry *dp = inode->u.generic_ip;
+ struct net_device *dev = dp->data;
+ struct airo_info *apriv = (struct airo_info *)dev->priv;
+ CapabilityRid cap_rid;
+ StatusRid status_rid;
+
+ MOD_INC_USE_COUNT;
+
+ dp = inode->u.generic_ip;
+
+ if ((file->private_data = kmalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
+ return -ENOMEM;
+ memset(file->private_data, 0, sizeof(struct proc_data));
+ data = (struct proc_data *)file->private_data;
+ if ((data->rbuffer = kmalloc( 2048, GFP_KERNEL )) == NULL) {
+ kfree (file->private_data);
+ return -ENOMEM;
+ }
+
+ readStatusRid(apriv, &status_rid);
+ readCapabilityRid(apriv, &cap_rid);
+
+ sprintf( data->rbuffer, "Mode: %x\n"
+ "Signal Strength: %d\n"
+ "Signal Quality: %d\n"
+ "SSID: %-.*s\n"
+ "AP: %-.16s\n"
+ "Freq: %d\n"
+ "BitRate: %dmbs\n"
+ "Driver Version: %s\n"
+ "Device: %s\nManufacturer: %s\nFirmware Version: %s\n"
+ "Radio type: %x\nCountry: %x\nHardware Version: %x\n"
+ "Software Version: %x\nSoftware Subversion: %x\n"
+ "Boot block version: %x\n",
+ (int)status_rid.mode,
+ (int)status_rid.normalizedSignalStrength,
+ (int)status_rid.signalQuality,
+ (int)status_rid.SSIDlen,
+ status_rid.SSID,
+ status_rid.apName,
+ (int)status_rid.channel,
+ (int)status_rid.currentXmitRate/2,
+ version,
+ cap_rid.prodName,
+ cap_rid.manName,
+ cap_rid.prodVer,
+ cap_rid.radioType,
+ cap_rid.country,
+ cap_rid.hardVer,
+ (int)cap_rid.softVer,
+ (int)cap_rid.softSubVer,
+ (int)cap_rid.bootBlockVer );
+ data->readlen = strlen( data->rbuffer );
+ return 0;
+}
+
+static int proc_stats_rid_open(struct inode*, struct file*, u16);
+static int proc_statsdelta_open( struct inode *inode,
+ struct file *file ) {
+ if (file->f_mode&FMODE_WRITE) {
+ return proc_stats_rid_open(inode, file, RID_STATSDELTACLEAR);
+ }
+ return proc_stats_rid_open(inode, file, RID_STATSDELTA);
+}
+
+static int proc_stats_open( struct inode *inode, struct file *file ) {
+ return proc_stats_rid_open(inode, file, RID_STATS);
+}
+
+static int proc_stats_rid_open( struct inode *inode,
+ struct file *file,
+ u16 rid ) {
+ struct proc_data *data;
+ struct proc_dir_entry *dp = inode->u.generic_ip;
+ struct net_device *dev = dp->data;
+ struct airo_info *apriv = (struct airo_info *)dev->priv;
+ StatsRid stats;
+ int i, j;
+ int *vals = stats.vals;
+ MOD_INC_USE_COUNT;
+
+
+ dp = inode->u.generic_ip;
+
+ if ((file->private_data = kmalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
+ return -ENOMEM;
+ memset(file->private_data, 0, sizeof(struct proc_data));
+ data = (struct proc_data *)file->private_data;
+ if ((data->rbuffer = kmalloc( 4096, GFP_KERNEL )) == NULL) {
+ kfree (file->private_data);
+ return -ENOMEM;
+ }
+
+ readStatsRid(apriv, &stats, rid);
+
+ j = 0;
+ for(i=0; (int)statsLabels[i]!=-1 &&
+ i*4<stats.len; i++){
+ if (!statsLabels[i]) continue;
+ if (j+strlen(statsLabels[i])+16>4096) {
+ printk(KERN_WARNING
+ "airo: Potentially disasterous buffer overflow averted!\n");
+ break;
+ }
+ j+=sprintf(data->rbuffer+j, "%s: %d\n", statsLabels[i], vals[i]);
+ }
+ if (i*4>=stats.len){
+ printk(KERN_WARNING
+ "airo: Got a short rid\n");
+ }
+ data->readlen = j;
+ return 0;
+}
+
+static int get_dec_u16( char *buffer, int *start, int limit ) {
+ u16 value;
+ int valid = 0;
+ for( value = 0; buffer[*start] >= '0' &&
+ buffer[*start] <= '9' &&
+ *start < limit; (*start)++ ) {
+ valid = 1;
+ value *= 10;
+ value += buffer[*start] - '0';
+ }
+ if ( !valid ) return -1;
+ return value;
+}
+
+static void checkThrottle(ConfigRid *config) {
+ int i;
+/* Old hardware had a limit on encryption speed */
+ if (config->authType != AUTH_OPEN && maxencrypt) {
+ for(i=0; i<8; i++) {
+ if (config->rates[i] > maxencrypt) {
+ config->rates[i] = 0;
+ }
+ }
+ }
+}
+
+static void proc_config_on_close( struct inode *inode, struct file *file ) {
+ struct proc_data *data = file->private_data;
+ struct proc_dir_entry *dp = inode->u.generic_ip;
+ struct net_device *dev = dp->data;
+ struct airo_info *ai = (struct airo_info*)dev->priv;
+ ConfigRid config;
+ Resp rsp;
+ char *line;
+ int need_reset = 0;
+
+ if ( !data->writelen ) return;
+ dp = (struct proc_dir_entry *) inode->u.generic_ip;
+
+ disable_MAC(ai);
+ readConfigRid(ai, &config);
+
+ line = data->wbuffer;
+ while( line[0] ) {
+/*** Mode processing */
+ if ( !strncmp( line, "Mode: ", 6 ) ) {
+ line += 6;
+ config.rmode &= 0xfe00;
+ if ( line[0] == 'a' ) {
+ config.opmode = 0;
+ } else {
+ config.opmode = 1;
+ if ( line[0] == 'r' )
+ config.rmode |= RXMODE_RFMON | RXMODE_DISABLE_802_3_HEADER;
+ else if ( line[0] == 'y' )
+ config.rmode |= RXMODE_RFMON_ANYBSS | RXMODE_DISABLE_802_3_HEADER;
+ }
+ if (config.rmode & RXMODE_DISABLE_802_3_HEADER) {
+ dev->type = ARPHRD_IEEE80211;
+ dev->hard_header_parse = wll_header_parse;
+ } else if (dev->type == ARPHRD_IEEE80211) {
+ dev->type = ARPHRD_ETHER;
+ dev->hard_header_parse = ai->header_parse;
+ need_reset = 1;
+ }
+ }
+
+/*** Radio status */
+ else if (!strncmp(line,"Radio: ", 7)) {
+ line += 7;
+ if (!strncmp(line,"off",3)) {
+ ai->flags |= FLAG_RADIO_OFF;
+ } else {
+ ai->flags &= ~FLAG_RADIO_OFF;
+ }
+ }
+/*** NodeName processing */
+ else if ( !strncmp( line, "NodeName: ", 10 ) ) {
+ int j;
+
+ line += 10;
+ memset( config.nodeName, 0, 16 );
+/* Do the name, assume a space between the mode and node name */
+ for( j = 0; j < 16 && line[j] != '\n'; j++ ) {
+ config.nodeName[j] = line[j];
+ }
+ }
+
+/*** PowerMode processing */
+ else if ( !strncmp( line, "PowerMode: ", 11 ) ) {
+ line += 11;
+ if ( !strncmp( line, "PSPCAM", 6 ) ) {
+ config.powerSaveMode = POWERSAVE_PSPCAM;
+ } else if ( !strncmp( line, "PSP", 3 ) ) {
+ config.powerSaveMode = POWERSAVE_PSP;
+ } else {
+ config.powerSaveMode = POWERSAVE_CAM;
+ }
+ } else if ( !strncmp( line, "DataRates: ", 11 ) ) {
+ int v, i = 0, k = 0; /* i is index into line,
+ k is index to rates */
+
+ line += 11;
+ while((v = get_dec_u16(line, &i, 3))!=-1) {
+ config.rates[k++] = (u8)v;
+ line += i + 1;
+ i = 0;
+ }
+ } else if ( !strncmp( line, "Channel: ", 9 ) ) {
+ int v, i = 0;
+ line += 9;
+ v = get_dec_u16(line, &i, i+3);
+ if ( v != -1 )
+ config.channelSet = (u16)v;
+ } else if ( !strncmp( line, "XmitPower: ", 11 ) ) {
+ int v, i = 0;
+ line += 11;
+ v = get_dec_u16(line, &i, i+3);
+ if ( v != -1 ) config.txPower = (u16)v;
+ } else if ( !strncmp( line, "WEP: ", 5 ) ) {
+ line += 5;
+ switch( line[0] ) {
+ case 's':
+ config.authType = (u16)AUTH_SHAREDKEY;
+ break;
+ case 'e':
+ config.authType = (u16)AUTH_ENCRYPT;
+ break;
+ default:
+ config.authType = (u16)AUTH_OPEN;
+ break;
+ }
+ } else if ( !strncmp( line, "LongRetryLimit: ", 16 ) ) {
+ int v, i = 0;
+
+ line += 16;
+ v = get_dec_u16(line, &i, 3);
+ v = (v<0) ? 0 : ((v>255) ? 255 : v);
+ config.longRetryLimit = (u16)v;
+ } else if ( !strncmp( line, "ShortRetryLimit: ", 17 ) ) {
+ int v, i = 0;
+
+ line += 17;
+ v = get_dec_u16(line, &i, 3);
+ v = (v<0) ? 0 : ((v>255) ? 255 : v);
+ config.shortRetryLimit = (u16)v;
+ } else if ( !strncmp( line, "RTSThreshold: ", 14 ) ) {
+ int v, i = 0;
+
+ line += 14;
+ v = get_dec_u16(line, &i, 4);
+ v = (v<0) ? 0 : ((v>2312) ? 2312 : v);
+ config.rtsThres = (u16)v;
+ } else if ( !strncmp( line, "TXMSDULifetime: ", 16 ) ) {
+ int v, i = 0;
+
+ line += 16;
+ v = get_dec_u16(line, &i, 5);
+ v = (v<0) ? 0 : v;
+ config.txLifetime = (u16)v;
+ } else if ( !strncmp( line, "RXMSDULifetime: ", 16 ) ) {
+ int v, i = 0;
+
+ line += 16;
+ v = get_dec_u16(line, &i, 5);
+ v = (v<0) ? 0 : v;
+ config.rxLifetime = (u16)v;
+ } else if ( !strncmp( line, "TXDiversity: ", 13 ) ) {
+ config.txDiversity =
+ (line[13]=='l') ? 1 :
+ ((line[13]=='r')? 2: 3);
+ } else if ( !strncmp( line, "RXDiversity: ", 13 ) ) {
+ config.rxDiversity =
+ (line[13]=='l') ? 1 :
+ ((line[13]=='r')? 2: 3);
+ } else if ( !strncmp( line, "FragThreshold: ", 15 ) ) {
+ int v, i = 0;
+
+ line += 15;
+ v = get_dec_u16(line, &i, 4);
+ v = (v<256) ? 256 : ((v>2312) ? 2312 : v);
+ v = v & 0xfffe; /* Make sure its even */
+ config.fragThresh = (u16)v;
+ } else if (!strncmp(line, "Modulation: ", 12)) {
+ line += 12;
+ switch(*line) {
+ case 'd': config.modulation=MOD_DEFAULT; break;
+ case 'c': config.modulation=MOD_CCK; break;
+ case 'm': config.modulation=MOD_MOK; break;
+ default:
+ printk( KERN_WARNING "airo: Unknown modulation\n" );
+ }
+ } else {
+ printk( KERN_WARNING "Couldn't figure out %s\n", line );
+ }
+ while( line[0] && line[0] != '\n' ) line++;
+ if ( line[0] ) line++;
+ }
+ checkThrottle(&config);
+ ai->config = config;
+ if (need_reset) {
+ APListRid APList_rid;
+ SsidRid SSID_rid;
+
+ readAPListRid(ai, &APList_rid);
+ readSsidRid(ai, &SSID_rid);
+ reset_airo_card(dev);
+ writeSsidRid(ai, &SSID_rid);
+ writeAPListRid(ai, &APList_rid);
+ }
+ writeConfigRid(ai, &config);
+ enable_MAC(ai, &rsp);
+}
+
+static int proc_config_open( struct inode *inode, struct file *file ) {
+ struct proc_data *data;
+ struct proc_dir_entry *dp = inode->u.generic_ip;
+ struct net_device *dev = dp->data;
+ struct airo_info *ai = (struct airo_info*)dev->priv;
+ ConfigRid config;
+ int i;
+
+ MOD_INC_USE_COUNT;
+
+ dp = (struct proc_dir_entry *) inode->u.generic_ip;
+
+ if ((file->private_data = kmalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
+ return -ENOMEM;
+ memset(file->private_data, 0, sizeof(struct proc_data));
+ data = (struct proc_data *)file->private_data;
+ if ((data->rbuffer = kmalloc( 2048, GFP_KERNEL )) == NULL) {
+ kfree (file->private_data);
+ return -ENOMEM;
+ }
+ if ((data->wbuffer = kmalloc( 2048, GFP_KERNEL )) == NULL) {
+ kfree (data->rbuffer);
+ kfree (file->private_data);
+ return -ENOMEM;
+ }
+ memset( data->wbuffer, 0, 2048 );
+ data->maxwritelen = 2048;
+ data->on_close = proc_config_on_close;
+
+ readConfigRid(ai, &config);
+
+ i = sprintf( data->rbuffer,
+ "Mode: %s\n"
+ "Radio: %s\n"
+ "NodeName: %-16s\n"
+ "PowerMode: %s\n"
+ "DataRates: %d %d %d %d %d %d %d %d\n"
+ "Channel: %d\n"
+ "XmitPower: %d\n",
+ config.opmode == 0 ? "adhoc" :
+ config.opmode == 1 ? "ESS" :
+ config.opmode == 2 ? "AP" :
+ config.opmode == 3 ? "AP RPTR" : "Error",
+ ai->flags&FLAG_RADIO_OFF ? "off" : "on",
+ config.nodeName,
+ config.powerSaveMode == 0 ? "CAM" :
+ config.powerSaveMode == 1 ? "PSP" :
+ config.powerSaveMode == 2 ? "PSPCAM" : "Error",
+ (int)config.rates[0],
+ (int)config.rates[1],
+ (int)config.rates[2],
+ (int)config.rates[3],
+ (int)config.rates[4],
+ (int)config.rates[5],
+ (int)config.rates[6],
+ (int)config.rates[7],
+ (int)config.channelSet,
+ (int)config.txPower
+ );
+ sprintf( data->rbuffer + i,
+ "LongRetryLimit: %d\n"
+ "ShortRetryLimit: %d\n"
+ "RTSThreshold: %d\n"
+ "TXMSDULifetime: %d\n"
+ "RXMSDULifetime: %d\n"
+ "TXDiversity: %s\n"
+ "RXDiversity: %s\n"
+ "FragThreshold: %d\n"
+ "WEP: %s\n"
+ "Modulation: %s\n",
+ (int)config.longRetryLimit,
+ (int)config.shortRetryLimit,
+ (int)config.rtsThres,
+ (int)config.txLifetime,
+ (int)config.rxLifetime,
+ config.txDiversity == 1 ? "left" :
+ config.txDiversity == 2 ? "right" : "both",
+ config.rxDiversity == 1 ? "left" :
+ config.rxDiversity == 2 ? "right" : "both",
+ (int)config.fragThresh,
+ config.authType == AUTH_ENCRYPT ? "encrypt" :
+ config.authType == AUTH_SHAREDKEY ? "shared" : "open",
+ config.modulation == 0 ? "default" :
+ config.modulation == MOD_CCK ? "cck" :
+ config.modulation == MOD_MOK ? "mok" : "error"
+ );
+ data->readlen = strlen( data->rbuffer );
+ return 0;
+}
+
+static void proc_SSID_on_close( struct inode *inode, struct file *file ) {
+ struct proc_data *data = (struct proc_data *)file->private_data;
+ struct proc_dir_entry *dp = inode->u.generic_ip;
+ struct net_device *dev = dp->data;
+ struct airo_info *ai = (struct airo_info*)dev->priv;
+ SsidRid SSID_rid;
+ int i;
+ int offset = 0;
+
+ if ( !data->writelen ) return;
+
+ memset( &SSID_rid, 0, sizeof( SSID_rid ) );
+
+ for( i = 0; i < 3; i++ ) {
+ int j;
+ for( j = 0; j+offset < data->writelen && j < 32 &&
+ data->wbuffer[offset+j] != '\n'; j++ ) {
+ SSID_rid.ssids[i].ssid[j] = data->wbuffer[offset+j];
+ }
+ if ( j == 0 ) break;
+ SSID_rid.ssids[i].len = j;
+ offset += j;
+ while( data->wbuffer[offset] != '\n' &&
+ offset < data->writelen ) offset++;
+ offset++;
+ }
+ writeSsidRid(ai, &SSID_rid);
+}
+
+inline static u8 hexVal(char c) {
+ if (c>='0' && c<='9') return c -= '0';
+ if (c>='a' && c<='f') return c -= 'a'-10;
+ if (c>='A' && c<='F') return c -= 'A'-10;
+ return 0;
+}
+
+static void proc_APList_on_close( struct inode *inode, struct file *file ) {
+ struct proc_data *data = (struct proc_data *)file->private_data;
+ struct proc_dir_entry *dp = inode->u.generic_ip;
+ struct net_device *dev = dp->data;
+ struct airo_info *ai = (struct airo_info*)dev->priv;
+ APListRid APList_rid;
+ int i;
+
+ if ( !data->writelen ) return;
+
+ memset( &APList_rid, 0, sizeof(APList_rid) );
+ APList_rid.len = sizeof(APList_rid);
+
+ for( i = 0; i < 4 && data->writelen >= (i+1)*6*3; i++ ) {
+ int j;
+ for( j = 0; j < 6*3 && data->wbuffer[j+i*6*3]; j++ ) {
+ switch(j%3) {
+ case 0:
+ APList_rid.ap[i][j/3]=
+ hexVal(data->wbuffer[j+i*6*3])<<4;
+ break;
+ case 1:
+ APList_rid.ap[i][j/3]|=
+ hexVal(data->wbuffer[j+i*6*3]);
+ break;
+ }
+ }
+ }
+ writeAPListRid(ai, &APList_rid);
+}
+
+/* This function wraps PC4500_writerid with a MAC disable */
+static int do_writerid( struct airo_info *ai, u16 rid, const void *rid_data,
+ int len ) {
+ int rc;
+ Resp rsp;
+
+ disable_MAC(ai);
+ rc = PC4500_writerid(ai, rid, rid_data, len);
+ enable_MAC(ai, &rsp);
+ return rc;
+}
+
+/* Returns the length of the key at the index. If index == 0xffff
+ * the index of the transmit key is returned. If the key doesn't exist,
+ * -1 will be returned.
+ */
+static int get_wep_key(struct airo_info *ai, u16 index) {
+ WepKeyRid wkr;
+ int rc;
+ u16 lastindex;
+
+ rc = readWepKeyRid(ai, &wkr, 1);
+ if (rc == SUCCESS) do {
+ lastindex = wkr.kindex;
+ if (wkr.kindex == index) {
+ if (index == 0xffff) {
+ return wkr.mac[0];
+ }
+ return wkr.klen;
+ }
+ readWepKeyRid(ai, &wkr, 0);
+ } while(lastindex != wkr.kindex);
+ return -1;
+}
+
+static int set_wep_key(struct airo_info *ai, u16 index,
+ const char *key, u16 keylen, int perm ) {
+ static const unsigned char macaddr[6] = { 0x01, 0, 0, 0, 0, 0 };
+ WepKeyRid wkr;
+
+ memset(&wkr, 0, sizeof(wkr));
+ if (keylen == 0) {
+// We are selecting which key to use
+ wkr.len = sizeof(wkr);
+ wkr.kindex = 0xffff;
+ wkr.mac[0] = (char)index;
+ if (perm) printk(KERN_INFO "Setting transmit key to %d\n", index);
+ if (perm) ai->defindex = (char)index;
+ } else {
+// We are actually setting the key
+ wkr.len = sizeof(wkr);
+ wkr.kindex = index;
+ wkr.klen = keylen;
+ memcpy( wkr.key, key, keylen );
+ memcpy( wkr.mac, macaddr, 6 );
+ printk(KERN_INFO "Setting key %d\n", index);
+ }
+
+ writeWepKeyRid(ai, &wkr, perm);
+ return 0;
+}
+
+static void proc_wepkey_on_close( struct inode *inode, struct file *file ) {
+ struct proc_data *data;
+ struct proc_dir_entry *dp = inode->u.generic_ip;
+ struct net_device *dev = dp->data;
+ struct airo_info *ai = (struct airo_info*)dev->priv;
+ int i;
+ char key[16];
+ u16 index = 0;
+ int j = 0;
+
+ memset(key, 0, sizeof(key));
+
+ dp = (struct proc_dir_entry *) inode->u.generic_ip;
+ data = (struct proc_data *)file->private_data;
+ if ( !data->writelen ) return;
+
+ if (data->wbuffer[0] >= '0' && data->wbuffer[0] <= '3' &&
+ (data->wbuffer[1] == ' ' || data->wbuffer[1] == '\n')) {
+ index = data->wbuffer[0] - '0';
+ if (data->wbuffer[1] == '\n') {
+ set_wep_key(ai, index, 0, 0, 1);
+ return;
+ }
+ j = 2;
+ } else {
+ printk(KERN_ERR "airo: WepKey passed invalid key index\n");
+ return;
+ }
+
+ for( i = 0; i < 16*3 && data->wbuffer[i+j]; i++ ) {
+ switch(i%3) {
+ case 0:
+ key[i/3] = hexVal(data->wbuffer[i+j])<<4;
+ break;
+ case 1:
+ key[i/3] |= hexVal(data->wbuffer[i+j]);
+ break;
+ }
+ }
+ set_wep_key(ai, index, key, i/3, 1);
+}
+
+static int proc_wepkey_open( struct inode *inode, struct file *file ) {
+ struct proc_data *data;
+ struct proc_dir_entry *dp = inode->u.generic_ip;
+ struct net_device *dev = dp->data;
+ struct airo_info *ai = (struct airo_info*)dev->priv;
+ char *ptr;
+ WepKeyRid wkr;
+ u16 lastindex;
+ int j=0;
+ int rc;
+
+ MOD_INC_USE_COUNT;
+
+ dp = (struct proc_dir_entry *) inode->u.generic_ip;
+
+ if ((file->private_data = kmalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
+ return -ENOMEM;
+ memset(file->private_data, 0, sizeof(struct proc_data));
+ memset(&wkr, 0, sizeof(wkr));
+ data = (struct proc_data *)file->private_data;
+ if ((data->rbuffer = kmalloc( 180, GFP_KERNEL )) == NULL) {
+ kfree (file->private_data);
+ return -ENOMEM;
+ }
+ memset(data->rbuffer, 0, 180);
+ data->writelen = 0;
+ data->maxwritelen = 80;
+ if ((data->wbuffer = kmalloc( 80, GFP_KERNEL )) == NULL) {
+ kfree (data->rbuffer);
+ kfree (file->private_data);
+ return -ENOMEM;
+ }
+ memset( data->wbuffer, 0, 80 );
+ data->on_close = proc_wepkey_on_close;
+
+ ptr = data->rbuffer;
+ strcpy(ptr, "No wep keys\n");
+ rc = readWepKeyRid(ai, &wkr, 1);
+ if (rc == SUCCESS) do {
+ lastindex = wkr.kindex;
+ if (wkr.kindex == 0xffff) {
+ j += sprintf(ptr+j, "Tx key = %d\n",
+ (int)wkr.mac[0]);
+ } else {
+ j += sprintf(ptr+j, "Key %d set with length = %d\n",
+ (int)wkr.kindex, (int)wkr.klen);
+ }
+ readWepKeyRid(ai, &wkr, 0);
+ } while((lastindex != wkr.kindex) && (j < 180-30));
+
+ data->readlen = strlen( data->rbuffer );
+ return 0;
+}
+
+static int proc_SSID_open( struct inode *inode, struct file *file ) {
+ struct proc_data *data;
+ struct proc_dir_entry *dp = inode->u.generic_ip;
+ struct net_device *dev = dp->data;
+ struct airo_info *ai = (struct airo_info*)dev->priv;
+ int i;
+ char *ptr;
+ SsidRid SSID_rid;
+
+ MOD_INC_USE_COUNT;
+
+ dp = (struct proc_dir_entry *) inode->u.generic_ip;
+
+ if ((file->private_data = kmalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
+ return -ENOMEM;
+ memset(file->private_data, 0, sizeof(struct proc_data));
+ data = (struct proc_data *)file->private_data;
+ if ((data->rbuffer = kmalloc( 104, GFP_KERNEL )) == NULL) {
+ kfree (file->private_data);
+ return -ENOMEM;
+ }
+ data->writelen = 0;
+ data->maxwritelen = 33*3;
+ if ((data->wbuffer = kmalloc( 33*3, GFP_KERNEL )) == NULL) {
+ kfree (data->rbuffer);
+ kfree (file->private_data);
+ return -ENOMEM;
+ }
+ memset( data->wbuffer, 0, 33*3 );
+ data->on_close = proc_SSID_on_close;
+
+ readSsidRid(ai, &SSID_rid);
+ ptr = data->rbuffer;
+ for( i = 0; i < 3; i++ ) {
+ int j;
+ if ( !SSID_rid.ssids[i].len ) break;
+ for( j = 0; j < 32 &&
+ j < SSID_rid.ssids[i].len &&
+ SSID_rid.ssids[i].ssid[j]; j++ ) {
+ *ptr++ = SSID_rid.ssids[i].ssid[j];
+ }
+ *ptr++ = '\n';
+ }
+ *ptr = '\0';
+ data->readlen = strlen( data->rbuffer );
+ return 0;
+}
+
+static int proc_APList_open( struct inode *inode, struct file *file ) {
+ struct proc_data *data;
+ struct proc_dir_entry *dp = inode->u.generic_ip;
+ struct net_device *dev = dp->data;
+ struct airo_info *ai = (struct airo_info*)dev->priv;
+ int i;
+ char *ptr;
+ APListRid APList_rid;
+
+ MOD_INC_USE_COUNT;
+
+ dp = (struct proc_dir_entry *) inode->u.generic_ip;
+
+ if ((file->private_data = kmalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
+ return -ENOMEM;
+ memset(file->private_data, 0, sizeof(struct proc_data));
+ data = (struct proc_data *)file->private_data;
+ if ((data->rbuffer = kmalloc( 104, GFP_KERNEL )) == NULL) {
+ kfree (file->private_data);
+ return -ENOMEM;
+ }
+ data->writelen = 0;
+ data->maxwritelen = 4*6*3;
+ if ((data->wbuffer = kmalloc( data->maxwritelen, GFP_KERNEL )) == NULL) {
+ kfree (data->rbuffer);
+ kfree (file->private_data);
+ return -ENOMEM;
+ }
+ memset( data->wbuffer, 0, data->maxwritelen );
+ data->on_close = proc_APList_on_close;
+
+ readAPListRid(ai, &APList_rid);
+ ptr = data->rbuffer;
+ for( i = 0; i < 4; i++ ) {
+// We end when we find a zero MAC
+ if ( !*(int*)APList_rid.ap[i] &&
+ !*(int*)&APList_rid.ap[i][2]) break;
+ ptr += sprintf(ptr, "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ (int)APList_rid.ap[i][0],
+ (int)APList_rid.ap[i][1],
+ (int)APList_rid.ap[i][2],
+ (int)APList_rid.ap[i][3],
+ (int)APList_rid.ap[i][4],
+ (int)APList_rid.ap[i][5]);
+ }
+ if (i==0) ptr += sprintf(ptr, "Not using specific APs\n");
+
+ *ptr = '\0';
+ data->readlen = strlen( data->rbuffer );
+ return 0;
+}
+
+static int proc_close( struct inode *inode, struct file *file )
+{
+ struct proc_data *data = (struct proc_data *)file->private_data;
+ if ( data->on_close != NULL ) data->on_close( inode, file );
+ MOD_DEC_USE_COUNT;
+ if ( data->rbuffer ) kfree( data->rbuffer );
+ if ( data->wbuffer ) kfree( data->wbuffer );
+ kfree( data );
+ return 0;
+}
+
+static struct net_device_list {
+ struct net_device *dev;
+ struct net_device_list *next;
+} *airo_devices = 0;
+
+/* Since the card doesnt automatically switch to the right WEP mode,
+ we will make it do it. If the card isn't associated, every secs we
+ will switch WEP modes to see if that will help. If the card is
+ associated we will check every minute to see if anything has
+ changed. */
+static void timer_func( u_long data ) {
+ struct net_device *dev = (struct net_device*)data;
+ struct airo_info *apriv = (struct airo_info *)dev->priv;
+ u16 linkstat = IN4500(apriv, LINKSTAT);
+
+ if (linkstat != 0x400 ) {
+/* We don't have a link so try changing the authtype */
+ ConfigRid config = apriv->config;
+
+ switch(apriv->authtype) {
+ case AUTH_ENCRYPT:
+/* So drop to OPEN */
+ config.authType = AUTH_OPEN;
+ apriv->authtype = AUTH_OPEN;
+ break;
+ case AUTH_SHAREDKEY:
+ if (apriv->keyindex < auto_wep) {
+ set_wep_key(apriv, apriv->keyindex, 0, 0, 0);
+ config.authType = AUTH_SHAREDKEY;
+ apriv->authtype = AUTH_SHAREDKEY;
+ apriv->keyindex++;
+ } else {
+ /* Drop to ENCRYPT */
+ apriv->keyindex = 0;
+ set_wep_key(apriv, apriv->defindex, 0, 0, 0);
+ config.authType = AUTH_ENCRYPT;
+ apriv->authtype = AUTH_ENCRYPT;
+ }
+ break;
+ default: /* We'll escalate to SHAREDKEY */
+ config.authType = AUTH_SHAREDKEY;
+ apriv->authtype = AUTH_SHAREDKEY;
+ }
+ checkThrottle(&config);
+ writeConfigRid(apriv, &config);
+
+/* Schedule check to see if the change worked */
+ apriv->timer.expires = RUN_AT(HZ*3);
+ add_timer(&apriv->timer);
+ }
+}
+
+static int add_airo_dev( struct net_device *dev ) {
+ struct net_device_list *node = kmalloc( sizeof( *node ), GFP_KERNEL );
+ if ( !node )
+ return -ENOMEM;
+
+ if ( auto_wep ) {
+ struct airo_info *apriv=dev->priv;
+ struct timer_list *timer = &apriv->timer;
+
+ timer->function = timer_func;
+ timer->data = (u_long)dev;
+ init_timer(timer);
+ apriv->authtype = AUTH_SHAREDKEY;
+ }
+
+ node->dev = dev;
+ node->next = airo_devices;
+ airo_devices = node;
+
+ return 0;
+}
+
+static void del_airo_dev( struct net_device *dev ) {
+ struct net_device_list **p = &airo_devices;
+ while( *p && ( (*p)->dev != dev ) )
+ p = &(*p)->next;
+ if ( *p && (*p)->dev == dev )
+ *p = (*p)->next;
+}
+
+#ifdef CONFIG_PCI
+static int __devinit airo_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pent)
+{
+ pdev->driver_data = init_airo_card(pdev->irq,
+ pdev->resource[2].start, 0);
+ if (!pdev->driver_data) {
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static void __devexit airo_pci_remove(struct pci_dev *pdev)
+{
+ stop_airo_card(pdev->driver_data, 1);
+}
+
+#endif
+
+static int __init airo_init_module( void )
+{
+ int i, rc = 0, have_isa_dev = 0;
+
+ airo_entry = create_proc_entry("aironet",
+ S_IFDIR | airo_perm,
+ proc_root_driver);
+ airo_entry->uid = proc_uid;
+ airo_entry->gid = proc_gid;
+
+ for( i = 0; i < 4 && io[i] && irq[i]; i++ ) {
+ printk( KERN_INFO
+ "airo: Trying to configure ISA adapter at irq=%d io=0x%x\n",
+ irq[i], io[i] );
+ if (init_airo_card( irq[i], io[i], 0 ))
+ have_isa_dev = 1;
+ }
+
+#ifdef CONFIG_PCI
+ printk( KERN_INFO "airo: Probing for PCI adapters\n" );
+ rc = pci_module_init(&airo_driver);
+ printk( KERN_INFO "airo: Finished probing for PCI adapters\n" );
+#endif
+
+ /* arguably, we should clean up and error exit if pci_module_init
+ * fails with an error other than -ENODEV, instead of proceeding,
+ * if ISA devs are present.
+ */
+ if (have_isa_dev)
+ return 0;
+ if (rc && (rc != -ENODEV))
+ return rc;
+ return 0;
+}
+
+static void __exit airo_cleanup_module( void )
+{
+ while( airo_devices ) {
+ printk( KERN_INFO "airo: Unregistering %s\n", airo_devices->dev->name );
+ stop_airo_card( airo_devices->dev, 1 );
+ }
+ remove_proc_entry("aironet", proc_root_driver);
+}
+
+#ifdef WIRELESS_EXT
+/*
+ * Initial Wireless Extension code for Aironet driver by :
+ * Jean Tourrilhes <jt@hpl.hp.com> - HPL - 17 November 00
+ */
+#ifndef IW_ENCODE_NOKEY
+#define IW_ENCODE_NOKEY 0x0800 /* Key is write only, so not present */
+#define IW_ENCODE_MODE (IW_ENCODE_DISABLED | IW_ENCODE_RESTRICTED | IW_ENCODE_OPEN)
+#endif /* IW_ENCODE_NOKEY */
+#endif /* WIRELESS_EXT */
+
+/*
+ * This defines the configuration part of the Wireless Extensions
+ * Note : irq and spinlock protection will occur in the subroutines
+ *
+ * TODO :
+ * o Check input value more carefully and fill correct values in range
+ * o Implement : POWER, SPY, APLIST
+ * o Optimise when adapter is closed (aggregate changes, commit later)
+ * o Test and shakeout the bugs (if any)
+ *
+ * Jean II
+ *
+ * Javier Achirica did a great job of merging code from the unnamed CISCO
+ * developer that added support for flashing the card.
+ */
+static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ int rc = 0;
+#ifdef WIRELESS_EXT
+ struct airo_info *local = (struct airo_info*) dev->priv;
+ struct iwreq *wrq = (struct iwreq *) rq;
+ ConfigRid config; /* Configuration info */
+ CapabilityRid cap_rid; /* Card capability info */
+ StatusRid status_rid; /* Card status info */
+ int i;
+
+#ifdef CISCO_EXT
+ if (cmd != SIOCGIWPRIV && cmd != AIROIOCTL && cmd != AIROIDIFC)
+#endif /* CISCO_EXT */
+ {
+ /* If the command read some stuff, we better get it out of
+ * the card first... */
+ if(IW_IS_GET(cmd))
+ readStatusRid(local, &status_rid);
+ if(IW_IS_GET(cmd) || (cmd == SIOCSIWRATE) || (cmd == SIOCSIWENCODE))
+ readCapabilityRid(local, &cap_rid);
+ /* Get config in all cases, because SET will just modify it */
+ readConfigRid(local, &config);
+ }
+#endif /* WIRELESS_EXT */
+
+ switch (cmd) {
+#ifdef WIRELESS_EXT
+ // Get name
+ case SIOCGIWNAME:
+ strcpy(wrq->u.name, "IEEE 802.11-DS");
+ break;
+
+ // Set frequency/channel
+ case SIOCSIWFREQ:
+ /* If setting by frequency, convert to a channel */
+ if((wrq->u.freq.e == 1) &&
+ (wrq->u.freq.m >= (int) 2.412e8) &&
+ (wrq->u.freq.m <= (int) 2.487e8)) {
+ int f = wrq->u.freq.m / 100000;
+ int c = 0;
+ while((c < 14) && (f != frequency_list[c]))
+ c++;
+ /* Hack to fall through... */
+ wrq->u.freq.e = 0;
+ wrq->u.freq.m = c + 1;
+ }
+ /* Setting by channel number */
+ if((wrq->u.freq.m > 1000) || (wrq->u.freq.e > 0))
+ rc = -EOPNOTSUPP;
+ else {
+ int channel = wrq->u.freq.m;
+ /* We should do a better check than that,
+ * based on the card capability !!! */
+ if((channel < 1) || (channel > 16)) {
+ printk(KERN_DEBUG "%s: New channel value of %d is invalid!\n", dev->name, wrq->u.freq.m);
+ rc = -EINVAL;
+ } else {
+ /* Yes ! We can set it !!! */
+ config.channelSet = (u16)(channel - 1);
+ local->need_commit = 1;
+ }
+ }
+ break;
+
+ // Get frequency/channel
+ case SIOCGIWFREQ:
+#ifdef WEXT_USECHANNELS
+ wrq->u.freq.m = ((int)status_rid.channel) + 1;
+ wrq->u.freq.e = 0;
+#else
+ {
+ int f = (int)status_rid.channel;
+ wrq->u.freq.m = frequency_list[f] * 100000;
+ wrq->u.freq.e = 1;
+ }
+#endif
+ break;
+
+ // Set desired network name (ESSID)
+ case SIOCSIWESSID:
+ if (wrq->u.data.pointer) {
+ char essid[IW_ESSID_MAX_SIZE + 1];
+ SsidRid SSID_rid; /* SSIDs */
+
+ /* Reload the list of current SSID */
+ readSsidRid(local, &SSID_rid);
+
+ /* Check if we asked for `any' */
+ if(wrq->u.data.flags == 0) {
+ /* Just send an empty SSID list */
+ memset(&SSID_rid, 0, sizeof(SSID_rid));
+ } else {
+ int index = (wrq->u.data.flags &
+ IW_ENCODE_INDEX) - 1;
+
+ /* Check the size of the string */
+ if(wrq->u.data.length > IW_ESSID_MAX_SIZE+1) {
+ rc = -E2BIG;
+ break;
+ }
+ /* Check if index is valid */
+ if((index < 0) || (index >= 4)) {
+ rc = -EINVAL;
+ break;
+ }
+
+ /* Set the SSID */
+ memset(essid, 0, sizeof(essid));
+ copy_from_user(essid,
+ wrq->u.data.pointer,
+ wrq->u.data.length);
+ memcpy(SSID_rid.ssids[index].ssid, essid,
+ sizeof(essid) - 1);
+ SSID_rid.ssids[index].len = wrq->u.data.length - 1;
+ }
+ /* Write it to the card */
+ writeSsidRid(local, &SSID_rid);
+ }
+ break;
+
+ // Get current network name (ESSID)
+ case SIOCGIWESSID:
+ if (wrq->u.data.pointer) {
+ char essid[IW_ESSID_MAX_SIZE + 1];
+
+ /* Note : if wrq->u.data.flags != 0, we should
+ * get the relevant SSID from the SSID list... */
+
+ /* Get the current SSID */
+ memcpy(essid, status_rid.SSID, status_rid.SSIDlen);
+ essid[status_rid.SSIDlen] = '\0';
+ /* If none, we may want to get the one that was set */
+
+ /* Push it out ! */
+ wrq->u.data.length = strlen(essid) + 1;
+ wrq->u.data.flags = 1; /* active */
+ if (copy_to_user(wrq->u.data.pointer, essid, sizeof(essid)))
+ rc = -EFAULT;
+ }
+ break;
+
+ case SIOCSIWAP:
+ if (wrq->u.ap_addr.sa_family != ARPHRD_ETHER)
+ rc = -EINVAL;
+ else {
+ APListRid APList_rid;
+
+ memset(&APList_rid, 0, sizeof(APList_rid));
+ APList_rid.len = sizeof(APList_rid);
+ memcpy(APList_rid.ap[0], wrq->u.ap_addr.sa_data, 6);
+ writeAPListRid(local, &APList_rid);
+ local->need_commit = 1;
+ }
+ break;
+
+ // Get current Access Point (BSSID)
+ case SIOCGIWAP:
+ /* Tentative. This seems to work, wow, I'm lucky !!! */
+ memcpy(wrq->u.ap_addr.sa_data, status_rid.bssid[0], 6);
+ wrq->u.ap_addr.sa_family = ARPHRD_ETHER;
+ break;
+
+ // Set desired station name
+ case SIOCSIWNICKN:
+ if (wrq->u.data.pointer) {
+ char name[16 + 1];
+
+ /* Check the size of the string */
+ if(wrq->u.data.length > 16 + 1) {
+ rc = -E2BIG;
+ break;
+ }
+ memset(name, 0, sizeof(name));
+ copy_from_user(name, wrq->u.data.pointer, wrq->u.data.length);
+ memcpy(config.nodeName, name, 16);
+ local->need_commit = 1;
+ }
+ break;
+
+ // Get current station name
+ case SIOCGIWNICKN:
+ if (wrq->u.data.pointer) {
+ char name[IW_ESSID_MAX_SIZE + 1];
+
+ strncpy(name, config.nodeName, 16);
+ name[16] = '\0';
+ wrq->u.data.length = strlen(name) + 1;
+ if (copy_to_user(wrq->u.data.pointer, name, sizeof(name)))
+ rc = -EFAULT;
+ }
+ break;
+
+ // Set the desired bit-rate
+ case SIOCSIWRATE:
+ {
+ /* First : get a valid bit rate value */
+ u8 brate = 0;
+ int i;
+
+ /* Which type of value ? */
+ if((wrq->u.bitrate.value < 8) &&
+ (wrq->u.bitrate.value >= 0)) {
+ /* Setting by rate index */
+ /* Find value in the magic rate table */
+ brate = cap_rid.supportedRates[wrq->u.bitrate.value];
+ } else {
+ /* Setting by frequency value */
+ u8 normvalue = (u8) (wrq->u.bitrate.value/500000);
+
+ /* Check if rate is valid */
+ for(i = 0 ; i < 8 ; i++) {
+ if(normvalue == cap_rid.supportedRates[i]) {
+ brate = normvalue;
+ break;
+ }
+ }
+ }
+ /* -1 designed the max rate (mostly auto mode) */
+ if(wrq->u.bitrate.value == -1) {
+ /* Get the highest available rate */
+ for(i = 0 ; i < 8 ; i++) {
+ if(cap_rid.supportedRates[i] == 0)
+ break;
+ }
+ if(i != 0)
+ brate = cap_rid.supportedRates[i - 1];
+ }
+ /* Check that it is valid */
+ if(brate == 0) {
+ rc = -EINVAL;
+ break;
+ }
+
+ /* Now, check if we want a fixed or auto value */
+ if(wrq->u.bitrate.fixed == 0) {
+ /* Fill all the rates up to this max rate */
+ memset(config.rates, 0, 8);
+ for(i = 0 ; i < 8 ; i++) {
+ config.rates[i] = cap_rid.supportedRates[i];
+ if(config.rates[i] == brate)
+ break;
+ }
+ local->need_commit = 1;
+ } else {
+ /* Fixed mode */
+ /* One rate, fixed */
+ memset(config.rates, 0, 8);
+ config.rates[0] = brate;
+ local->need_commit = 1;
+ }
+ break;
+ }
+
+ // Get the current bit-rate
+ case SIOCGIWRATE:
+ {
+ int brate = status_rid.currentXmitRate;
+ wrq->u.bitrate.value = brate * 500000;
+ /* If more than one rate, set auto */
+ wrq->u.rts.fixed = (config.rates[1] == 0);
+ }
+ break;
+
+ // Set the desired RTS threshold
+ case SIOCSIWRTS:
+ {
+ int rthr = wrq->u.rts.value;
+ if(wrq->u.rts.disabled)
+ rthr = 2312;
+ if((rthr < 0) || (rthr > 2312)) {
+ rc = -EINVAL;
+ } else {
+ config.rtsThres = rthr;
+ local->need_commit = 1;
+ }
+ }
+ break;
+
+ // Get the current RTS threshold
+ case SIOCGIWRTS:
+ wrq->u.rts.value = config.rtsThres;
+ wrq->u.rts.disabled = (wrq->u.rts.value >= 2312);
+ wrq->u.rts.fixed = 1;
+ break;
+
+ // Set the desired fragmentation threshold
+ case SIOCSIWFRAG:
+ {
+ int fthr = wrq->u.frag.value;
+ if(wrq->u.frag.disabled)
+ fthr = 2312;
+ if((fthr < 256) || (fthr > 2312)) {
+ rc = -EINVAL;
+ } else {
+ fthr &= ~0x1; /* Get an even value */
+ config.fragThresh = (u16)fthr;
+ local->need_commit = 1;
+ }
+ }
+ break;
+
+ // Get the current fragmentation threshold
+ case SIOCGIWFRAG:
+ wrq->u.frag.value = config.fragThresh;
+ wrq->u.frag.disabled = (wrq->u.frag.value >= 2312);
+ wrq->u.frag.fixed = 1;
+ break;
+
+ // Set mode of operation
+ case SIOCSIWMODE:
+ switch(wrq->u.mode) {
+ case IW_MODE_ADHOC:
+ config.opmode = MODE_STA_IBSS;
+ local->need_commit = 1;
+ break;
+ case IW_MODE_INFRA:
+ config.opmode = MODE_STA_ESS;
+ local->need_commit = 1;
+ break;
+ case IW_MODE_MASTER:
+ config.opmode = MODE_AP;
+ local->need_commit = 1;
+ break;
+ case IW_MODE_REPEAT:
+ config.opmode = MODE_AP_RPTR;
+ local->need_commit = 1;
+ break;
+ default:
+ rc = -EINVAL;
+ }
+ break;
+
+ // Get mode of operation
+ case SIOCGIWMODE:
+ /* If not managed, assume it's ad-hoc */
+ switch (config.opmode & 0xFF) {
+ case MODE_STA_ESS:
+ wrq->u.mode = IW_MODE_INFRA;
+ break;
+ case MODE_AP:
+ wrq->u.mode = IW_MODE_MASTER;
+ break;
+ case MODE_AP_RPTR:
+ wrq->u.mode = IW_MODE_REPEAT;
+ break;
+ default:
+ wrq->u.mode = IW_MODE_ADHOC;
+ }
+ break;
+
+ // Set WEP keys and mode
+ case SIOCSIWENCODE:
+ /* Is WEP supported ? */
+ /* Older firmware doesn't support this...
+ if(!(cap_rid.softCap & 2)) {
+ rc = -EOPNOTSUPP;
+ break;
+ } */
+ /* Basic checking: do we have a key to set ? */
+ if (wrq->u.encoding.pointer != (caddr_t) 0) {
+ wep_key_t key;
+ int index = (wrq->u.encoding.flags & IW_ENCODE_INDEX) - 1;
+ int current_index = get_wep_key(local, 0xffff);
+ /* Check the size of the key */
+ if (wrq->u.encoding.length > MAX_KEY_SIZE) {
+ rc = -EINVAL;
+ break;
+ }
+ /* Check the index (none -> use current) */
+ if ((index < 0) || (index >= MAX_KEYS))
+ index = current_index;
+ /* Set the length */
+ if (wrq->u.encoding.length > MIN_KEY_SIZE)
+ key.len = MAX_KEY_SIZE;
+ else
+ if (wrq->u.encoding.length > 0)
+ key.len = MIN_KEY_SIZE;
+ else
+ /* Disable the key */
+ key.len = 0;
+ /* Check if the key is not marked as invalid */
+ if(!(wrq->u.encoding.flags & IW_ENCODE_NOKEY)) {
+ /* Cleanup */
+ memset(key.key, 0, MAX_KEY_SIZE);
+ /* Copy the key in the driver */
+ if(copy_from_user(key.key,
+ wrq->u.encoding.pointer,
+ wrq->u.encoding.length)) {
+ key.len = 0;
+ rc = -EFAULT;
+ break;
+ }
+ /* Send the key to the card */
+ set_wep_key(local, index, key.key,
+ key.len, 1);
+ }
+ /* WE specify that if a valid key is set, encryption
+ * should be enabled (user may turn it off later)
+ * This is also how "iwconfig ethX key on" works */
+ if((index == current_index) && (key.len > 0) &&
+ (config.authType == AUTH_OPEN)) {
+ config.authType = AUTH_ENCRYPT;
+ local->need_commit = 1;
+ }
+ } else {
+ /* Do we want to just set the transmit key index ? */
+ int index = (wrq->u.encoding.flags & IW_ENCODE_INDEX) - 1;
+ if ((index >= 0) && (index < MAX_KEYS)) {
+ set_wep_key(local, index, 0, 0, 1);
+ } else
+ /* Don't complain if only change the mode */
+ if(!wrq->u.encoding.flags & IW_ENCODE_MODE) {
+ rc = -EINVAL;
+ break;
+ }
+ }
+ /* Read the flags */
+ if(wrq->u.encoding.flags & IW_ENCODE_DISABLED)
+ config.authType = AUTH_OPEN; // disable encryption
+ if(wrq->u.encoding.flags & IW_ENCODE_RESTRICTED)
+ config.authType = AUTH_SHAREDKEY; // Only Both
+ if(wrq->u.encoding.flags & IW_ENCODE_OPEN)
+ config.authType = AUTH_ENCRYPT; // Only Wep
+ /* Commit the changes if needed */
+ if(wrq->u.encoding.flags & IW_ENCODE_MODE)
+ local->need_commit = 1;
+ break;
+
+ // Get the WEP keys and mode
+ case SIOCGIWENCODE:
+ /* Is it supported ? */
+ if(!(cap_rid.softCap & 2)) {
+ rc = -EOPNOTSUPP;
+ break;
+ }
+ // Only super-user can see WEP key
+ if (!capable(CAP_NET_ADMIN)) {
+ rc = -EPERM;
+ break;
+ }
+
+ // Basic checking...
+ if (wrq->u.encoding.pointer != (caddr_t) 0) {
+ char zeros[16];
+ int index = (wrq->u.encoding.flags & IW_ENCODE_INDEX) - 1;
+
+ memset(zeros,0, sizeof(zeros));
+ /* Check encryption mode */
+ wrq->u.encoding.flags = IW_ENCODE_NOKEY;
+ /* Is WEP enabled ??? */
+ switch(config.authType) {
+ case AUTH_ENCRYPT:
+ wrq->u.encoding.flags |= IW_ENCODE_OPEN;
+ break;
+ case AUTH_SHAREDKEY:
+ wrq->u.encoding.flags |= IW_ENCODE_RESTRICTED;
+ break;
+ default:
+ case AUTH_OPEN:
+ wrq->u.encoding.flags |= IW_ENCODE_DISABLED;
+ break;
+ }
+
+ /* Which key do we want ? -1 -> tx index */
+ if((index < 0) || (index >= MAX_KEYS))
+ index = get_wep_key(local, 0xffff);
+ wrq->u.encoding.flags |= index + 1;
+ /* Copy the key to the user buffer */
+ wrq->u.encoding.length = get_wep_key(local, index);
+ if (wrq->u.encoding.length > 16) {
+ wrq->u.encoding.length=0;
+ }
+
+ if(copy_to_user(wrq->u.encoding.pointer, zeros,
+ wrq->u.encoding.length))
+ rc = -EFAULT;
+ }
+ break;
+
+#if WIRELESS_EXT > 9
+ // Get the current Tx-Power
+ case SIOCGIWTXPOW:
+ wrq->u.txpower.value = config.txPower;
+ wrq->u.txpower.fixed = 1; /* No power control */
+ wrq->u.txpower.disabled = (local->flags & FLAG_RADIO_OFF);
+ wrq->u.txpower.flags = IW_TXPOW_MWATT;
+ break;
+ case SIOCSIWTXPOW:
+ if (wrq->u.txpower.disabled) {
+ local->flags |= FLAG_RADIO_OFF;
+ local->need_commit = 1;
+ break;
+ }
+ if (wrq->u.txpower.flags != IW_TXPOW_MWATT) {
+ rc = -EINVAL;
+ break;
+ }
+ local->flags &= ~FLAG_RADIO_OFF;
+ rc = -EINVAL;
+ for (i = 0; cap_rid.txPowerLevels[i] && (i < 8); i++)
+ if ((wrq->u.txpower.value==cap_rid.txPowerLevels[i])) {
+ config.txPower = wrq->u.txpower.value;
+ local->need_commit = 1;
+ rc = 0;
+ break;
+ }
+ break;
+#endif /* WIRELESS_EXT > 9 */
+
+#if WIRELESS_EXT > 10
+ case SIOCGIWRETRY:
+ wrq->u.retry.disabled = 0;
+ if ((wrq->u.retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME)
+ wrq->u.retry.value = (int)config.txLifetime * 1024;
+ else {
+ wrq->u.retry.value = (int)config.shortRetryLimit;
+ wrq->u.retry.flags = IW_RETRY_LIMIT;
+ }
+ break;
+
+ case SIOCSIWRETRY:
+ if (wrq->u.retry.disabled) {
+ config.shortRetryLimit = 0;
+ config.longRetryLimit = 0;
+ config.txLifetime = 0;
+ local->need_commit = 1;
+ break;
+ }
+ if ((wrq->u.retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
+ config.txLifetime = (wrq->u.retry.value + 500) / 1024;
+ local->need_commit = 1;
+ } else if ((wrq->u.retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIMIT) {
+ config.shortRetryLimit = config.longRetryLimit = wrq->u.retry.value;
+ local->need_commit = 1;
+ }
+ break;
+#endif /* WIRELESS_EXT > 10 */
+
+ // Get range of parameters
+ case SIOCGIWRANGE:
+ if (wrq->u.data.pointer) {
+ struct iw_range range;
+ int i;
+ int k;
+
+ wrq->u.data.length = sizeof(range);
+ /* Should adapt depending on max rate */
+ range.throughput = 1.6 * 1024 * 1024;
+ range.min_nwid = 0x0000;
+ range.max_nwid = 0x0000;
+ range.num_channels = 14;
+ /* Should be based on cap_rid.country to give only
+ * what the current card support */
+ k = 0;
+ for(i = 0; i < 14; i++) {
+ range.freq[k].i = i + 1; /* List index */
+ range.freq[k].m = frequency_list[i] * 100000;
+ range.freq[k++].e = 1; /* Values in table in MHz -> * 10^5 * 10 */
+ }
+ range.num_frequency = k;
+
+ /* Hum... Should put the right values there */
+ range.max_qual.qual = 10;
+ range.max_qual.level = 100;
+ range.max_qual.noise = 0;
+ range.sensitivity = 65535;
+
+ for(i = 0 ; i < 8 ; i++) {
+ range.bitrate[i] = cap_rid.supportedRates[i] * 500000;
+ if(range.bitrate[i] == 0)
+ break;
+ }
+ range.num_bitrates = i;
+
+ range.min_rts = 0;
+ range.max_rts = 2312;
+ range.min_frag = 256;
+ range.max_frag = 2312;
+
+ if(cap_rid.softCap & 2) {
+ // WEP: RC4 40 bits
+ range.encoding_size[0] = 5;
+ // RC4 ~128 bits
+ if (cap_rid.softCap & 0x100) {
+ range.encoding_size[1] = 13;
+ range.num_encoding_sizes = 2;
+ } else
+ range.num_encoding_sizes = 1;
+ range.max_encoding_tokens = 4; // 4 keys
+ } else {
+ range.num_encoding_sizes = 0;
+ range.max_encoding_tokens = 0;
+ }
+#if WIRELESS_EXT > 9
+ range.min_pmp = 0;
+ range.max_pmp = 5000000; /* 5 secs */
+ range.min_pmt = 0;
+ range.max_pmt = 65535 * 1024; /* ??? */
+ range.pmp_flags = IW_POWER_PERIOD;
+ range.pmt_flags = IW_POWER_TIMEOUT;
+ range.pm_capa = IW_POWER_PERIOD | IW_POWER_TIMEOUT | IW_POWER_ALL_R;
+
+ /* Transmit Power - values are in mW */
+ for(i = 0 ; i < 8 ; i++) {
+ range.txpower[i] = cap_rid.txPowerLevels[i];
+ if(range.txpower[i] == 0)
+ break;
+ }
+ range.num_txpower = i;
+ range.txpower_capa = IW_TXPOW_MWATT;
+#endif /* WIRELESS_EXT > 9 */
+#if WIRELESS_EXT > 10
+ range.we_version_source = 11;
+ range.we_version_compiled = WIRELESS_EXT;
+ range.retry_capa = IW_RETRY_LIMIT | IW_RETRY_LIFETIME;
+ range.retry_flags = IW_RETRY_LIMIT;
+ range.r_time_flags = IW_RETRY_LIFETIME;
+ range.min_retry = 1;
+ range.max_retry = 65535;
+ range.min_r_time = 1024;
+ range.max_r_time = 65535 * 1024;
+#endif /* WIRELESS_EXT > 10 */
+
+ if (copy_to_user(wrq->u.data.pointer, &range, sizeof(struct iw_range)))
+ rc = -EFAULT;
+ }
+ break;
+
+ case SIOCGIWPOWER:
+ {
+ int mode = config.powerSaveMode;
+ if ((wrq->u.power.disabled = (mode == POWERSAVE_CAM)))
+ break;
+ if ((wrq->u.power.flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
+ wrq->u.power.value = (int)config.fastListenDelay * 1024;
+ wrq->u.power.flags = IW_POWER_TIMEOUT;
+ } else {
+ wrq->u.power.value = (int)config.fastListenInterval * 1024;
+ wrq->u.power.flags = IW_POWER_PERIOD;
+ }
+ if ((config.rmode & 0xFF) == RXMODE_ADDR)
+ wrq->u.power.flags |= IW_POWER_UNICAST_R;
+ else
+ wrq->u.power.flags |= IW_POWER_ALL_R;
+ }
+ break;
+
+ case SIOCSIWPOWER:
+ if (wrq->u.power.disabled) {
+ if ((config.rmode & 0xFF) >= RXMODE_RFMON) {
+ rc = -EINVAL;
+ break;
+ }
+ config.powerSaveMode = POWERSAVE_CAM;
+ config.rmode &= 0xFF00;
+ config.rmode |= RXMODE_BC_MC_ADDR;
+ local->need_commit = 1;
+ break;
+ }
+ if ((wrq->u.power.flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
+ config.fastListenDelay = (wrq->u.power.value + 500) / 1024;
+ config.powerSaveMode = POWERSAVE_PSPCAM;
+ local->need_commit = 1;
+ } else if ((wrq->u.power.flags & IW_POWER_TYPE) == IW_POWER_PERIOD) {
+ config.fastListenInterval = config.listenInterval = (wrq->u.power.value + 500) / 1024;
+ config.powerSaveMode = POWERSAVE_PSPCAM;
+ local->need_commit = 1;
+ }
+ switch (wrq->u.power.flags & IW_POWER_MODE) {
+ case IW_POWER_UNICAST_R:
+ if ((config.rmode & 0xFF) >= RXMODE_RFMON) {
+ rc = -EINVAL;
+ break;
+ }
+ config.rmode &= 0xFF00;
+ config.rmode |= RXMODE_ADDR;
+ local->need_commit = 1;
+ break;
+ case IW_POWER_ALL_R:
+ if ((config.rmode & 0xFF) >= RXMODE_RFMON) {
+ rc = -EINVAL;
+ break;
+ }
+ config.rmode &= 0xFF00;
+ config.rmode |= RXMODE_BC_MC_ADDR;
+ local->need_commit = 1;
+ case IW_POWER_ON:
+ break;
+ default:
+ rc = -EINVAL;
+ }
+ break;
+
+ case SIOCGIWSENS:
+ wrq->u.sens.value = config.rssiThreshold;
+ wrq->u.sens.disabled = (wrq->u.sens.value == 0);
+ wrq->u.sens.fixed = 1;
+ break;
+
+ case SIOCSIWSENS:
+ config.rssiThreshold = wrq->u.sens.disabled ? RSSI_DEFAULT : wrq->u.sens.value;
+ local->need_commit = 1;
+ break;
+
+ case SIOCGIWAPLIST:
+ if (wrq->u.data.pointer) {
+ int i;
+ struct sockaddr s[4];
+
+ for (i = 0; i < 4; i++) {
+ memcpy(s[i].sa_data, status_rid.bssid[i], 6);
+ s[i].sa_family = ARPHRD_ETHER;
+ }
+ wrq->u.data.length = 4;
+ if (copy_to_user(wrq->u.data.pointer, &s, sizeof(s)))
+ rc = -EFAULT;
+ }
+ break;
+
+#ifdef WIRELESS_SPY
+ // Set the spy list
+ case SIOCSIWSPY:
+ if (wrq->u.data.length > IW_MAX_SPY)
+ {
+ rc = -E2BIG;
+ break;
+ }
+ local->spy_number = wrq->u.data.length;
+ if (local->spy_number > 0)
+ {
+ struct sockaddr address[IW_MAX_SPY];
+ int i;
+
+ if (copy_from_user(address, wrq->u.data.pointer,
+ sizeof(struct sockaddr) * local->spy_number)) {
+ rc = -EFAULT;
+ break;
+ }
+ for (i=0; i<local->spy_number; i++)
+ memcpy(local->spy_address[i], address[i].sa_data, 6);
+ memset(local->spy_stat, 0, sizeof(struct iw_quality) * IW_MAX_SPY);
+ }
+ break;
+
+ // Get the spy list
+ case SIOCGIWSPY:
+ wrq->u.data.length = local->spy_number;
+ if ((local->spy_number > 0) && (wrq->u.data.pointer))
+ {
+ struct sockaddr address[IW_MAX_SPY];
+ int i;
+ rc = verify_area(VERIFY_WRITE, wrq->u.data.pointer, (sizeof(struct iw_quality)+sizeof(struct sockaddr)) * IW_MAX_SPY);
+ if (rc)
+ break;
+ for (i=0; i<local->spy_number; i++)
+ {
+ memcpy(address[i].sa_data, local->spy_address[i], 6);
+ address[i].sa_family = AF_UNIX;
+ }
+ copy_to_user(wrq->u.data.pointer, address, sizeof(struct sockaddr) * local->spy_number);
+ copy_to_user(wrq->u.data.pointer + (sizeof(struct sockaddr)*local->spy_number), local->spy_stat, sizeof(struct iw_quality) * local->spy_number);
+ for (i=0; i<local->spy_number; i++)
+ local->spy_stat[i].updated = 0;
+ }
+ break;
+#endif /* WIRELESS_SPY */
+
+#ifdef CISCO_EXT
+ case SIOCGIWPRIV:
+ if(wrq->u.data.pointer)
+ {
+ struct iw_priv_args priv[] =
+ { /* cmd, set_args, get_args, name */
+ { AIROIOCTL, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | sizeof (aironet_ioctl), IW_PRIV_TYPE_BYTE | 2047, "airoioctl" },
+ { AIROIDIFC, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | sizeof (aironet_ioctl), IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "airoidifc" },
+ };
+
+ /* Set the number of ioctl available */
+ wrq->u.data.length = 2;
+
+ /* Copy structure to the user buffer */
+ if(copy_to_user(wrq->u.data.pointer, (u_char *) priv,
+ sizeof(priv)))
+ rc = -EFAULT;
+ }
+ break;
+#endif /* CISCO_EXT */
+#endif /* WIRELESS_EXT */
+
+#ifdef CISCO_EXT
+ case AIROIDIFC:
+ {
+ int val = AIROMAGIC;
+ aironet_ioctl com;
+ if (copy_from_user(&com,rq->ifr_data,sizeof(com)))
+ rc = -EFAULT;
+ else if (copy_to_user(com.data,(char *)&val,sizeof(val)))
+ rc = -EFAULT;
+ }
+ break;
+
+ case AIROIOCTL:
+ /* Get the command struct and hand it off for evaluation by
+ * the proper subfunction
+ */
+ {
+ aironet_ioctl com;
+ copy_from_user(&com,rq->ifr_data,sizeof(com));
+
+ /* Seperate R/W functions bracket legality here
+ */
+ if ( com.command <= AIROGSTATSD32 )
+ rc = readrids(dev,&com);
+ else if ( com.command >= AIROPCAP && com.command <= AIROPLEAPUSR )
+ rc = writerids(dev,&com);
+ else if ( com.command >= AIROFLSHRST && com.command <= AIRORESTART )
+ rc = flashcard(dev,&com);
+ else
+ rc = -EINVAL; /* Bad command in ioctl */
+ }
+ break;
+#endif /* CISCO_EXT */
+
+ // All other calls are currently unsupported
+ default:
+ rc = -EOPNOTSUPP;
+ }
+
+#ifdef WIRELESS_EXT
+ /* Some of the "SET" function may have modified some of the
+ * parameters. It's now time to commit them in the card */
+ if(local->need_commit) {
+ /* A classical optimisation here is to not commit any change
+ * if the card is not "opened". This is what we do in
+ * wvlan_cs (see for details).
+ * For that, we would need to have the config RID saved in
+ * the airo_info struct and make sure to not re-read it if
+ * local->need_commit != 0. Then, you need to patch "open"
+ * to do the final commit of all parameters...
+ * Jean II */
+ Resp rsp;
+
+ disable_MAC(local);
+ local->config = config; /* ???? config is local !!! */
+ checkThrottle(&config);
+ writeConfigRid(local, &config);
+ enable_MAC(local, &rsp);
+
+ local->need_commit = 0;
+ }
+#endif /* WIRELESS_EXT */
+
+ return(rc);
+}
+
+#ifdef WIRELESS_EXT
+/*
+ * Get the Wireless stats out of the driver
+ * Note : irq and spinlock protection will occur in the subroutines
+ *
+ * TODO :
+ * o Check if work in Ad-Hoc mode (otherwise, use SPY, as in wvlan_cs)
+ * o Find the noise level
+ * o Convert values to dBm
+ * o Fill out discard.misc with something interesting
+ *
+ * Jean
+ */
+struct iw_statistics *airo_get_wireless_stats(struct net_device *dev)
+{
+ struct airo_info *local = (struct airo_info*) dev->priv;
+ StatusRid status_rid;
+ StatsRid stats_rid;
+ int *vals = stats_rid.vals;
+
+ /* Get stats out of the card */
+ readStatusRid(local, &status_rid);
+ readStatsRid(local, &stats_rid, RID_STATS);
+
+ /* The status */
+ local->wstats.status = status_rid.mode;
+
+ /* Signal quality and co. But where is the noise level ??? */
+ local->wstats.qual.qual = status_rid.signalQuality;
+ local->wstats.qual.level = status_rid.normalizedSignalStrength;
+ local->wstats.qual.noise = 0;
+ local->wstats.qual.updated = 3;
+
+ /* Packets discarded in the wireless adapter due to wireless
+ * specific problems */
+ local->wstats.discard.nwid = vals[56] + vals[57] + vals[58];/* SSID Mismatch */
+ local->wstats.discard.code = vals[6];/* RxWepErr */
+ local->wstats.discard.misc = vals[1] + vals[2] + vals[3] + vals[4] + vals[30] + vals[32];
+ return (&local->wstats);
+}
+#endif /* WIRELESS_EXT */
+
+#ifdef CISCO_EXT
+/*
+ * This just translates from driver IOCTL codes to the command codes to
+ * feed to the radio's host interface. Things can be added/deleted
+ * as needed. This represents the READ side of control I/O to
+ * the card
+ */
+static int readrids(struct net_device *dev, aironet_ioctl *comp) {
+ unsigned short ridcode;
+ unsigned char iobuf[2048];
+
+ switch(comp->command)
+ {
+ case AIROGCAP: ridcode = RID_CAPABILITIES; break;
+ case AIROGCFG: ridcode = RID_CONFIG; break;
+ case AIROGSLIST: ridcode = RID_SSID; break;
+ case AIROGVLIST: ridcode = RID_APLIST; break;
+ case AIROGDRVNAM: ridcode = RID_DRVNAME; break;
+ case AIROGEHTENC: ridcode = RID_ETHERENCAP; break;
+ case AIROGWEPKTMP: ridcode = RID_WEP_TEMP;
+ /* Only super-user can read WEP keys */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ break;
+ case AIROGWEPKNV: ridcode = RID_WEP_PERM;
+ /* Only super-user can read WEP keys */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ break;
+ case AIROGSTAT: ridcode = RID_STATUS; break;
+ case AIROGSTATSD32: ridcode = RID_STATSDELTA; break;
+ case AIROGSTATSC32: ridcode = RID_STATS; break;
+ default:
+ return -EINVAL;
+ break;
+ }
+
+ PC4500_readrid((struct airo_info *)dev->priv,ridcode,iobuf,sizeof(iobuf));
+ /* get the count of bytes in the rid docs say 1st 2 bytes is it.
+ * then return it to the user
+ * 9/22/2000 Honor user given length
+ */
+
+ if (copy_to_user(comp->data, iobuf, min (comp->len, sizeof(iobuf))))
+ return -EFAULT;
+ return 0;
+}
+
+/*
+ * Danger Will Robinson write the rids here
+ */
+
+static int writerids(struct net_device *dev, aironet_ioctl *comp) {
+ int ridcode;
+ Resp rsp;
+ static int (* writer)(struct airo_info *, u16 rid, const void *, int);
+ unsigned char iobuf[2048];
+
+ /* Only super-user can write RIDs */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ ridcode = 0;
+ writer = do_writerid;
+
+ switch(comp->command)
+ {
+ case AIROPSIDS: ridcode = RID_SSID; break;
+ case AIROPCAP: ridcode = RID_CAPABILITIES; break;
+ case AIROPAPLIST: ridcode = RID_APLIST; break;
+ case AIROPCFG: ridcode = RID_CONFIG; break;
+ case AIROPWEPKEYNV: ridcode = RID_WEP_PERM; break;
+ case AIROPLEAPUSR: ridcode = RID_LEAPUSERNAME; break;
+ case AIROPLEAPPWD: ridcode = RID_LEAPPASSWORD; break;
+ case AIROPWEPKEY: ridcode = RID_WEP_TEMP; writer = PC4500_writerid;
+ break;
+
+ /* this is not really a rid but a command given to the card
+ * same with MAC off
+ */
+ case AIROPMACON:
+ if (enable_MAC(dev->priv, &rsp) != 0)
+ return -EIO;
+ return 0;
+
+ /*
+ * Evidently this code in the airo driver does not get a symbol
+ * as disable_MAC. it's probably so short the compiler does not gen one.
+ */
+ case AIROPMACOFF:
+ disable_MAC(dev->priv);
+ return 0;
+
+ /* This command merely clears the counts does not actually store any data
+ * only reads rid. But as it changes the cards state, I put it in the
+ * writerid routines.
+ */
+ case AIROPSTCLR:
+ ridcode = RID_STATSDELTACLEAR;
+
+ PC4500_readrid(dev->priv,ridcode,iobuf,sizeof(iobuf));
+
+ if (copy_to_user(comp->data,iobuf,min(comp->len,sizeof(iobuf))))
+ return -EFAULT;
+ return 0;
+
+ default:
+ return -EOPNOTSUPP; /* Blarg! */
+ }
+ if(comp->len > sizeof(iobuf))
+ return -EINVAL;
+
+ copy_from_user(iobuf,comp->data,comp->len);
+ if((*writer)((struct airo_info *)dev->priv, ridcode, iobuf,comp->len))
+ return -EIO;
+ return 0;
+}
+
+/*****************************************************************************
+ * Ancillary flash / mod functions much black magic lurkes here *
+ *****************************************************************************
+ */
+
+/*
+ * Flash command switch table
+ */
+
+int flashcard(struct net_device *dev, aironet_ioctl *comp) {
+ int z;
+ int cmdreset(struct airo_info *);
+ int setflashmode(struct airo_info *);
+ int flashgchar(struct airo_info *,int,int);
+ int flashpchar(struct airo_info *,int,int);
+ int flashputbuf(struct airo_info *, unsigned short *);
+ int flashrestart(struct airo_info *,struct net_device *);
+ unsigned short * flashbuffer;
+
+ /* Only super-user can modify flash */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ switch(comp->command)
+ {
+ case AIROFLSHRST:
+ return cmdreset((struct airo_info *)dev->priv);
+
+ case AIROFLSHSTFL:
+ return setflashmode((struct airo_info *)dev->priv);
+
+ case AIROFLSHGCHR: /* Get char from aux */
+ if(comp->len != sizeof(int))
+ return -EINVAL;
+ copy_from_user(&z,comp->data,comp->len);
+ return flashgchar((struct airo_info *)dev->priv,z,8000);
+
+ case AIROFLSHPCHR: /* Send char to card. */
+ if(comp->len != sizeof(int))
+ return -EINVAL;
+ copy_from_user(&z,comp->data,comp->len);
+ return flashpchar((struct airo_info *)dev->priv,z,8000);
+
+ case AIROFLPUTBUF: /* Send 32k to card */
+ if(comp->len > FLASHSIZE)
+ return -EINVAL;
+ if ((flashbuffer = kmalloc (FLASHSIZE, GFP_KERNEL)) == NULL)
+ return -ENOMEM;
+ if(copy_from_user(flashbuffer,comp->data,comp->len)) {
+ kfree (flashbuffer);
+ return -EINVAL;
+ }
+
+ flashputbuf((struct airo_info *)dev->priv,flashbuffer);
+ kfree (flashbuffer);
+ return 0;
+
+ case AIRORESTART:
+ if(flashrestart((struct airo_info *)dev->priv,dev))
+ return -EIO;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+#define FLASH_COMMAND 0x7e7e
+
+/*
+ * STEP 1)
+ * Disable MAC and do soft reset on
+ * card.
+ */
+
+int cmdreset(struct airo_info *ai) {
+ int flags;
+
+ disable_MAC(ai);
+
+ spin_lock_irqsave(&ai->cmd_lock, flags);
+ if(!waitbusy (ai)){
+ printk(KERN_INFO "Waitbusy hang before RESET\n");
+ return -EBUSY;
+ }
+
+ OUT4500(ai,COMMAND,CMD_SOFTRESET);
+
+ set_current_state (TASK_UNINTERRUPTIBLE);
+ schedule_timeout (HZ); /* WAS 600 12/7/00 */
+
+ if(!waitbusy (ai)){
+ printk(KERN_INFO "Waitbusy hang AFTER RESET\n");
+ return -EBUSY;
+ }
+ spin_unlock_irqrestore(&ai->cmd_lock, flags);
+ return 0;
+}
+
+/* STEP 2)
+ * Put the card in legendary flash
+ * mode
+ */
+
+int setflashmode (struct airo_info *ai) {
+ int flags;
+
+ spin_lock_irqsave(&ai->cmd_lock, flags);
+ OUT4500(ai, SWS0, FLASH_COMMAND);
+ OUT4500(ai, SWS1, FLASH_COMMAND);
+ OUT4500(ai, SWS0, FLASH_COMMAND);
+ OUT4500(ai, COMMAND,0x10);
+ set_current_state (TASK_UNINTERRUPTIBLE);
+ schedule_timeout (HZ/2); /* 500ms delay */
+ spin_unlock_irqrestore(&ai->cmd_lock, flags);
+
+ if(!waitbusy(ai)) {
+ printk(KERN_INFO "Waitbusy hang after setflash mode\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+/* Put character to SWS0 wait for dwelltime
+ * x 50us for echo .
+ */
+
+int flashpchar(struct airo_info *ai,int byte,int dwelltime) {
+ int echo;
+ int waittime;
+
+ byte |= 0x8000;
+
+ if(dwelltime == 0 )
+ dwelltime = 200;
+
+ waittime=dwelltime;
+
+ /* Wait for busy bit d15 to go false indicating buffer empty */
+ while ((IN4500 (ai, SWS0) & 0x8000) && waittime > 0) {
+ udelay (50);
+ waittime -= 50;
+ }
+
+ /* timeout for busy clear wait */
+ if(waittime <= 0 ){
+ printk(KERN_INFO "flash putchar busywait timeout! \n");
+ return -EBUSY;
+ }
+
+ /* Port is clear now write byte and wait for it to echo back */
+ do {
+ OUT4500(ai,SWS0,byte);
+ udelay(50);
+ dwelltime -= 50;
+ echo = IN4500(ai,SWS1);
+ } while (dwelltime >= 0 && echo != byte);
+
+ OUT4500(ai,SWS1,0);
+
+ return (echo == byte) ? 0 : -EIO;
+}
+
+/*
+ * Get a character from the card matching matchbyte
+ * Step 3)
+ */
+int flashgchar(struct airo_info *ai,int matchbyte,int dwelltime){
+ int rchar;
+ unsigned char rbyte=0;
+
+ do {
+ rchar = IN4500(ai,SWS1);
+
+ if(dwelltime && !(0x8000 & rchar)){
+ dwelltime -= 10;
+ mdelay(10);
+ continue;
+ }
+ rbyte = 0xff & rchar;
+
+ if( (rbyte == matchbyte) && (0x8000 & rchar) ){
+ OUT4500(ai,SWS1,0);
+ return 0;
+ }
+ if( rbyte == 0x81 || rbyte == 0x82 || rbyte == 0x83 || rbyte == 0x1a || 0xffff == rchar)
+ break;
+ OUT4500(ai,SWS1,0);
+
+ }while(dwelltime > 0);
+ return -EIO;
+}
+
+/*
+ * Transfer 32k of firmware data from user buffer to our buffer and
+ * send to the card
+ */
+
+int flashputbuf(struct airo_info *ai, unsigned short *bufp){
+ int nwords;
+
+ /* Write stuff */
+ OUT4500(ai,AUXPAGE,0x100);
+ OUT4500(ai,AUXOFF,0);
+
+ for(nwords=0;nwords != FLASHSIZE / 2;nwords++){
+ OUT4500(ai,AUXDATA,bufp[nwords] & 0xffff);
+ }
+
+ OUT4500(ai,SWS0,0x8000);
+
+ return 0;
+}
+
+/*
+ *
+ */
+int flashrestart(struct airo_info *ai,struct net_device *dev){
+ int i,status;
+
+ set_current_state (TASK_UNINTERRUPTIBLE);
+ schedule_timeout (HZ); /* Added 12/7/00 */
+ status = setup_card(ai, dev->dev_addr,&((struct airo_info*)dev->priv)->config);
+
+ for( i = 0; i < MAX_FIDS; i++ ) {
+ ai->fids[i] = transmit_allocate( ai, 2312 );
+ }
+
+ set_current_state (TASK_UNINTERRUPTIBLE);
+ schedule_timeout (HZ); /* Added 12/7/00 */
+ return status;
+}
+#endif /* CISCO_EXT */
+
+/*
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ In addition:
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+*/
+
+module_init(airo_init_module);
+module_exit(airo_cleanup_module);
--- /dev/null
+/*======================================================================
+
+ Aironet driver for 4500 and 4800 series cards
+
+ This code is released under both the GPL version 2 and BSD licenses.
+ Either license may be used. The respective licenses are found at
+ the end of this file.
+
+ This code was developed by Benjamin Reed <breed@users.sourceforge.net>
+ including portions of which come from the Aironet PC4500
+ Developer's Reference Manual and used with permission. Copyright
+ (C) 1999 Benjamin Reed. All Rights Reserved. Permission to use
+ code in the Developer's manual was granted for this driver by
+ Aironet.
+
+ In addition this module was derived from dummy_cs.
+ The initial developer of dummy_cs is David A. Hinds
+ <dhinds@hyper.stanford.edu>. Portions created by David A. Hinds
+ are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+
+======================================================================*/
+
+#include <linux/config.h>
+#ifdef __IN_PCMCIA_PACKAGE__
+#include <pcmcia/k_compat.h>
+#endif
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/malloc.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/netdevice.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include <pcmcia/version.h>
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/ds.h>
+
+/*
+ All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
+ you do not define PCMCIA_DEBUG at all, all the debug code will be
+ left out. If you compile with PCMCIA_DEBUG=0, the debug code will
+ be present but disabled -- but it can then be enabled for specific
+ modules at load time with a 'pc_debug=#' option to insmod.
+*/
+#ifdef PCMCIA_DEBUG
+static int pc_debug = PCMCIA_DEBUG;
+MODULE_PARM(pc_debug, "i");
+static char *version = "$Revision: 1.1.18.1 $";
+#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args);
+#else
+#define DEBUG(n, args...)
+#endif
+
+/*====================================================================*/
+
+/* Parameters that can be set with 'insmod' */
+
+/* The old way: bit map of interrupts to choose from */
+/* This means pick from 15, 14, 12, 11, 10, 9, 7, 5, 4, and 3 */
+static u_int irq_mask = 0xdeb8;
+/* Newer, simpler way of listing specific interrupts */
+static int irq_list[4] = { -1 };
+
+MODULE_AUTHOR("Benjamin Reed");
+MODULE_DESCRIPTION("Support for Cisco/Aironet 802.11 wireless ethernet \
+ cards. This is the module that links the PCMCIA card \
+ with the airo module.");
+MODULE_SUPPORTED_DEVICE("Aironet 4500, 4800 and Cisco 340 PCMCIA cards");
+MODULE_PARM(irq_mask, "i");
+MODULE_PARM(irq_list, "1-4i");
+
+/*====================================================================*/
+
+/*
+ The event() function is this driver's Card Services event handler.
+ It will be called by Card Services when an appropriate card status
+ event is received. The config() and release() entry points are
+ used to configure or release a socket, in response to card
+ insertion and ejection events. They are invoked from the airo_cs
+ event handler.
+*/
+
+struct net_device *init_airo_card( int, int, int );
+void stop_airo_card( struct net_device *, int );
+int reset_airo_card( struct net_device * );
+
+static void airo_config(dev_link_t *link);
+static void airo_release(u_long arg);
+static int airo_event(event_t event, int priority,
+ event_callback_args_t *args);
+
+/*
+ The attach() and detach() entry points are used to create and destroy
+ "instances" of the driver, where each instance represents everything
+ needed to manage one actual PCMCIA card.
+*/
+
+static dev_link_t *airo_attach(void);
+static void airo_detach(dev_link_t *);
+
+/*
+ You'll also need to prototype all the functions that will actually
+ be used to talk to your device. See 'pcmem_cs' for a good example
+ of a fully self-sufficient driver; the other drivers rely more or
+ less on other parts of the kernel.
+*/
+
+/*
+ The dev_info variable is the "key" that is used to match up this
+ device driver with appropriate cards, through the card configuration
+ database.
+*/
+
+static dev_info_t dev_info = "airo_cs";
+
+/*
+ A linked list of "instances" of the aironet device. Each actual
+ PCMCIA card corresponds to one device instance, and is described
+ by one dev_link_t structure (defined in ds.h).
+
+ You may not want to use a linked list for this -- for example, the
+ memory card driver uses an array of dev_link_t pointers, where minor
+ device numbers are used to derive the corresponding array index.
+*/
+
+static dev_link_t *dev_list = NULL;
+
+/*
+ A dev_link_t structure has fields for most things that are needed
+ to keep track of a socket, but there will usually be some device
+ specific information that also needs to be kept track of. The
+ 'priv' pointer in a dev_link_t structure can be used to point to
+ a device-specific private data structure, like this.
+
+ A driver needs to provide a dev_node_t structure for each device
+ on a card. In some cases, there is only one device per card (for
+ example, ethernet cards, modems). In other cases, there may be
+ many actual or logical devices (SCSI adapters, memory cards with
+ multiple partitions). The dev_node_t structures need to be kept
+ in a linked list starting at the 'dev' field of a dev_link_t
+ structure. We allocate them in the card's private data structure,
+ because they generally shouldn't be allocated dynamically.
+
+ In this case, we also provide a flag to indicate if a device is
+ "stopped" due to a power management event, or card ejection. The
+ device IO routines can use a flag like this to throttle IO to a
+ card that is not ready to accept it.
+*/
+
+typedef struct local_info_t {
+ dev_node_t node;
+ struct net_device *eth_dev;
+} local_info_t;
+
+/*====================================================================*/
+
+static void cs_error(client_handle_t handle, int func, int ret)
+{
+ error_info_t err = { func, ret };
+ CardServices(ReportError, handle, &err);
+}
+
+/*======================================================================
+
+ This bit of code is used to avoid unregistering network devices
+ at inappropriate times. 2.2 and later kernels are fairly picky
+ about when this can happen.
+
+ ======================================================================*/
+
+static void flush_stale_links(void)
+{
+ dev_link_t *link, *next;
+ for (link = dev_list; link; link = next) {
+ next = link->next;
+ if (link->state & DEV_STALE_LINK)
+ airo_detach(link);
+ }
+}
+
+/*======================================================================
+
+ airo_attach() creates an "instance" of the driver, allocating
+ local data structures for one device. The device is registered
+ with Card Services.
+
+ The dev_link structure is initialized, but we don't actually
+ configure the card at this point -- we wait until we receive a
+ card insertion event.
+
+ ======================================================================*/
+
+static dev_link_t *airo_attach(void)
+{
+ client_reg_t client_reg;
+ dev_link_t *link;
+ local_info_t *local;
+ int ret, i;
+
+ DEBUG(0, "airo_attach()\n");
+ flush_stale_links();
+
+ /* Initialize the dev_link_t structure */
+ link = kmalloc(sizeof(struct dev_link_t), GFP_KERNEL);
+ if (!link) {
+ printk(KERN_ERR "airo_cs: no memory for new device\n");
+ return NULL;
+ }
+ memset(link, 0, sizeof(struct dev_link_t));
+ link->release.function = &airo_release;
+ link->release.data = (u_long)link;
+
+ /* Interrupt setup */
+ link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
+ link->irq.IRQInfo1 = IRQ_INFO2_VALID|IRQ_LEVEL_ID;
+ if (irq_list[0] == -1)
+ link->irq.IRQInfo2 = irq_mask;
+ else
+ for (i = 0; i < 4; i++)
+ link->irq.IRQInfo2 |= 1 << irq_list[i];
+ link->irq.Handler = NULL;
+
+ /*
+ General socket configuration defaults can go here. In this
+ client, we assume very little, and rely on the CIS for almost
+ everything. In most clients, many details (i.e., number, sizes,
+ and attributes of IO windows) are fixed by the nature of the
+ device, and can be hard-wired here.
+ */
+ link->conf.Attributes = 0;
+ link->conf.Vcc = 50;
+ link->conf.IntType = INT_MEMORY_AND_IO;
+
+ /* Allocate space for private device-specific data */
+ local = kmalloc(sizeof(local_info_t), GFP_KERNEL);
+ memset(local, 0, sizeof(local_info_t));
+ link->priv = local;
+
+ /* Register with Card Services */
+ link->next = dev_list;
+ dev_list = link;
+ client_reg.dev_info = &dev_info;
+ client_reg.Attributes = INFO_IO_CLIENT | INFO_CARD_SHARE;
+ client_reg.EventMask =
+ CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
+ CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
+ CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
+ client_reg.event_handler = &airo_event;
+ client_reg.Version = 0x0210;
+ client_reg.event_callback_args.client_data = link;
+ ret = CardServices(RegisterClient, &link->handle, &client_reg);
+ if (ret != 0) {
+ cs_error(link->handle, RegisterClient, ret);
+ airo_detach(link);
+ return NULL;
+ }
+
+ return link;
+} /* airo_attach */
+
+/*======================================================================
+
+ This deletes a driver "instance". The device is de-registered
+ with Card Services. If it has been released, all local data
+ structures are freed. Otherwise, the structures will be freed
+ when the device is released.
+
+ ======================================================================*/
+
+static void airo_detach(dev_link_t *link)
+{
+ dev_link_t **linkp;
+
+ DEBUG(0, "airo_detach(0x%p)\n", link);
+
+ /* Locate device structure */
+ for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
+ if (*linkp == link) break;
+ if (*linkp == NULL)
+ return;
+
+ del_timer(&link->release);
+ if ( link->state & DEV_CONFIG ) {
+ airo_release( (int)link );
+ if ( link->state & DEV_STALE_CONFIG ) {
+ link->state |= DEV_STALE_LINK;
+ return;
+ }
+ }
+
+ if ( ((local_info_t*)link->priv)->eth_dev ) {
+ stop_airo_card( ((local_info_t*)link->priv)->eth_dev, 0 );
+ }
+ ((local_info_t*)link->priv)->eth_dev = 0;
+
+ /* Break the link with Card Services */
+ if (link->handle)
+ CardServices(DeregisterClient, link->handle);
+
+
+
+ /* Unlink device structure, free pieces */
+ *linkp = link->next;
+ if (link->priv) {
+ kfree(link->priv);
+ }
+ kfree(link);
+
+} /* airo_detach */
+
+/*======================================================================
+
+ airo_config() is scheduled to run after a CARD_INSERTION event
+ is received, to configure the PCMCIA socket, and to make the
+ device available to the system.
+
+ ======================================================================*/
+
+#define CS_CHECK(fn, args...) \
+while ((last_ret=CardServices(last_fn=(fn),args))!=0) goto cs_failed
+
+#define CFG_CHECK(fn, args...) \
+if (CardServices(fn, args) != 0) goto next_entry
+
+static void airo_config(dev_link_t *link)
+{
+ client_handle_t handle;
+ tuple_t tuple;
+ cisparse_t parse;
+ local_info_t *dev;
+ int last_fn, last_ret;
+ u_char buf[64];
+ win_req_t req;
+ memreq_t map;
+
+ handle = link->handle;
+ dev = link->priv;
+
+ DEBUG(0, "airo_config(0x%p)\n", link);
+
+ /*
+ This reads the card's CONFIG tuple to find its configuration
+ registers.
+ */
+ tuple.DesiredTuple = CISTPL_CONFIG;
+ tuple.Attributes = 0;
+ tuple.TupleData = buf;
+ tuple.TupleDataMax = sizeof(buf);
+ tuple.TupleOffset = 0;
+ CS_CHECK(GetFirstTuple, handle, &tuple);
+ CS_CHECK(GetTupleData, handle, &tuple);
+ CS_CHECK(ParseTuple, handle, &tuple, &parse);
+ link->conf.ConfigBase = parse.config.base;
+ link->conf.Present = parse.config.rmask[0];
+
+ /* Configure card */
+ link->state |= DEV_CONFIG;
+
+ /*
+ In this loop, we scan the CIS for configuration table entries,
+ each of which describes a valid card configuration, including
+ voltage, IO window, memory window, and interrupt settings.
+
+ We make no assumptions about the card to be configured: we use
+ just the information available in the CIS. In an ideal world,
+ this would work for any PCMCIA card, but it requires a complete
+ and accurate CIS. In practice, a driver usually "knows" most of
+ these things without consulting the CIS, and most client drivers
+ will only use the CIS to fill in implementation-defined details.
+ */
+ tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
+ CS_CHECK(GetFirstTuple, handle, &tuple);
+ while (1) {
+ cistpl_cftable_entry_t dflt = { 0 };
+ cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
+ CFG_CHECK(GetTupleData, handle, &tuple);
+ CFG_CHECK(ParseTuple, handle, &tuple, &parse);
+
+ if (cfg->flags & CISTPL_CFTABLE_DEFAULT) dflt = *cfg;
+ if (cfg->index == 0) goto next_entry;
+ link->conf.ConfigIndex = cfg->index;
+
+ /* Does this card need audio output? */
+ if (cfg->flags & CISTPL_CFTABLE_AUDIO) {
+ link->conf.Attributes |= CONF_ENABLE_SPKR;
+ link->conf.Status = CCSR_AUDIO_ENA;
+ }
+
+ /* Use power settings for Vcc and Vpp if present */
+ /* Note that the CIS values need to be rescaled */
+ if (cfg->vcc.present & (1<<CISTPL_POWER_VNOM))
+ link->conf.Vcc = cfg->vcc.param[CISTPL_POWER_VNOM]/10000;
+ else if (dflt.vcc.present & (1<<CISTPL_POWER_VNOM))
+ link->conf.Vcc = dflt.vcc.param[CISTPL_POWER_VNOM]/10000;
+
+ if (cfg->vpp1.present & (1<<CISTPL_POWER_VNOM))
+ link->conf.Vpp1 = link->conf.Vpp2 =
+ cfg->vpp1.param[CISTPL_POWER_VNOM]/10000;
+ else if (dflt.vpp1.present & (1<<CISTPL_POWER_VNOM))
+ link->conf.Vpp1 = link->conf.Vpp2 =
+ dflt.vpp1.param[CISTPL_POWER_VNOM]/10000;
+
+ /* Do we need to allocate an interrupt? */
+ if (cfg->irq.IRQInfo1 || dflt.irq.IRQInfo1)
+ link->conf.Attributes |= CONF_ENABLE_IRQ;
+
+ /* IO window settings */
+ link->io.NumPorts1 = link->io.NumPorts2 = 0;
+ if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) {
+ cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt.io;
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
+ if (!(io->flags & CISTPL_IO_8BIT))
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
+ if (!(io->flags & CISTPL_IO_16BIT))
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
+ link->io.BasePort1 = io->win[0].base;
+ link->io.NumPorts1 = io->win[0].len;
+ if (io->nwin > 1) {
+ link->io.Attributes2 = link->io.Attributes1;
+ link->io.BasePort2 = io->win[1].base;
+ link->io.NumPorts2 = io->win[1].len;
+ }
+ }
+
+ /* This reserves IO space but doesn't actually enable it */
+ CFG_CHECK(RequestIO, link->handle, &link->io);
+
+ /*
+ Now set up a common memory window, if needed. There is room
+ in the dev_link_t structure for one memory window handle,
+ but if the base addresses need to be saved, or if multiple
+ windows are needed, the info should go in the private data
+ structure for this device.
+
+ Note that the memory window base is a physical address, and
+ needs to be mapped to virtual space with ioremap() before it
+ is used.
+ */
+ if ((cfg->mem.nwin > 0) || (dflt.mem.nwin > 0)) {
+ cistpl_mem_t *mem =
+ (cfg->mem.nwin) ? &cfg->mem : &dflt.mem;
+ req.Attributes = WIN_DATA_WIDTH_16|WIN_MEMORY_TYPE_CM;
+ req.Base = mem->win[0].host_addr;
+ req.Size = mem->win[0].len;
+ req.AccessSpeed = 0;
+ link->win = (window_handle_t)link->handle;
+ CFG_CHECK(RequestWindow, &link->win, &req);
+ map.Page = 0; map.CardOffset = mem->win[0].card_addr;
+ CFG_CHECK(MapMemPage, link->win, &map);
+ }
+ /* If we got this far, we're cool! */
+ break;
+
+ next_entry:
+ CS_CHECK(GetNextTuple, handle, &tuple);
+ }
+
+ /*
+ Allocate an interrupt line. Note that this does not assign a
+ handler to the interrupt, unless the 'Handler' member of the
+ irq structure is initialized.
+ */
+ if (link->conf.Attributes & CONF_ENABLE_IRQ)
+ CS_CHECK(RequestIRQ, link->handle, &link->irq);
+
+ /*
+ This actually configures the PCMCIA socket -- setting up
+ the I/O windows and the interrupt mapping, and putting the
+ card and host interface into "Memory and IO" mode.
+ */
+ CS_CHECK(RequestConfiguration, link->handle, &link->conf);
+ ((local_info_t*)link->priv)->eth_dev =
+ init_airo_card( link->irq.AssignedIRQ,
+ link->io.BasePort1, 1 );
+ if (!((local_info_t*)link->priv)->eth_dev) goto cs_failed;
+
+ /*
+ At this point, the dev_node_t structure(s) need to be
+ initialized and arranged in a linked list at link->dev.
+ */
+ strcpy(dev->node.dev_name, ((local_info_t*)link->priv)->eth_dev->name );
+ dev->node.major = dev->node.minor = 0;
+ link->dev = &dev->node;
+
+ /* Finally, report what we've done */
+ printk(KERN_INFO "%s: index 0x%02x: Vcc %d.%d",
+ dev->node.dev_name, link->conf.ConfigIndex,
+ link->conf.Vcc/10, link->conf.Vcc%10);
+ if (link->conf.Vpp1)
+ printk(", Vpp %d.%d", link->conf.Vpp1/10, link->conf.Vpp1%10);
+ if (link->conf.Attributes & CONF_ENABLE_IRQ)
+ printk(", irq %d", link->irq.AssignedIRQ);
+ if (link->io.NumPorts1)
+ printk(", io 0x%04x-0x%04x", link->io.BasePort1,
+ link->io.BasePort1+link->io.NumPorts1-1);
+ if (link->io.NumPorts2)
+ printk(" & 0x%04x-0x%04x", link->io.BasePort2,
+ link->io.BasePort2+link->io.NumPorts2-1);
+ if (link->win)
+ printk(", mem 0x%06lx-0x%06lx", req.Base,
+ req.Base+req.Size-1);
+ printk("\n");
+
+ link->state &= ~DEV_CONFIG_PENDING;
+ return;
+
+ cs_failed:
+ cs_error(link->handle, last_fn, last_ret);
+ airo_release((u_long)link);
+
+} /* airo_config */
+
+/*======================================================================
+
+ After a card is removed, airo_release() will unregister the
+ device, and release the PCMCIA configuration. If the device is
+ still open, this will be postponed until it is closed.
+
+ ======================================================================*/
+
+static void airo_release(u_long arg)
+{
+ dev_link_t *link = (dev_link_t *)arg;
+
+ DEBUG(0, "airo_release(0x%p)\n", link);
+
+ /*
+ If the device is currently in use, we won't release until it
+ is actually closed, because until then, we can't be sure that
+ no one will try to access the device or its data structures.
+ */
+ if (link->open) {
+ DEBUG(1, "airo_cs: release postponed, '%s' still open\n",
+ link->dev->dev_name);
+ link->state |= DEV_STALE_CONFIG;
+ return;
+ }
+
+ /* Unlink the device chain */
+ link->dev = NULL;
+
+ /*
+ In a normal driver, additional code may be needed to release
+ other kernel data structures associated with this device.
+ */
+
+ /* Don't bother checking to see if these succeed or not */
+ if (link->win)
+ CardServices(ReleaseWindow, link->win);
+ CardServices(ReleaseConfiguration, link->handle);
+ if (link->io.NumPorts1)
+ CardServices(ReleaseIO, link->handle, &link->io);
+ if (link->irq.AssignedIRQ)
+ CardServices(ReleaseIRQ, link->handle, &link->irq);
+ link->state &= ~DEV_CONFIG;
+
+} /* airo_release */
+
+/*======================================================================
+
+ The card status event handler. Mostly, this schedules other
+ stuff to run after an event is received.
+
+ When a CARD_REMOVAL event is received, we immediately set a
+ private flag to block future accesses to this device. All the
+ functions that actually access the device should check this flag
+ to make sure the card is still present.
+
+ ======================================================================*/
+
+static int airo_event(event_t event, int priority,
+ event_callback_args_t *args)
+{
+ dev_link_t *link = args->client_data;
+ local_info_t *local = link->priv;
+
+ DEBUG(1, "airo_event(0x%06x)\n", event);
+
+ switch (event) {
+ case CS_EVENT_CARD_REMOVAL:
+ link->state &= ~DEV_PRESENT;
+ if (link->state & DEV_CONFIG) {
+ netif_device_detach(local->eth_dev);
+ mod_timer(&link->release, jiffies + HZ/20);
+ }
+ break;
+ case CS_EVENT_CARD_INSERTION:
+ link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
+ airo_config(link);
+ break;
+ case CS_EVENT_PM_SUSPEND:
+ link->state |= DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_RESET_PHYSICAL:
+ if (link->state & DEV_CONFIG) {
+ netif_device_detach(local->eth_dev);
+ CardServices(ReleaseConfiguration, link->handle);
+ }
+ break;
+ case CS_EVENT_PM_RESUME:
+ link->state &= ~DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_CARD_RESET:
+ if (link->state & DEV_CONFIG) {
+ CardServices(RequestConfiguration, link->handle, &link->conf);
+ reset_airo_card(local->eth_dev);
+ netif_device_attach(local->eth_dev);
+ }
+ break;
+ }
+ return 0;
+} /* airo_event */
+
+/*====================================================================*/
+
+static int airo_cs_init(void)
+{
+ servinfo_t serv;
+ DEBUG(0, "%s\n", version);
+ CardServices(GetCardServicesInfo, &serv);
+ if (serv.Revision != CS_RELEASE_CODE) {
+ printk(KERN_NOTICE "airo_cs: Card Services release "
+ "does not match!\n");
+ return -1;
+ }
+ register_pcmcia_driver(&dev_info, &airo_attach, &airo_detach);
+ return 0;
+}
+
+static void airo_cs_cleanup(void)
+{
+ DEBUG(0, "airo_cs: unloading\n");
+ unregister_pcmcia_driver(&dev_info);
+ while (dev_list != NULL) {
+ if (dev_list->state & DEV_CONFIG)
+ airo_release((u_long)dev_list);
+ airo_detach(dev_list);
+ }
+}
+
+/*
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ In addition:
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+*/
+
+module_init(airo_cs_init);
+module_exit(airo_cs_cleanup);
struct dldwd_priv priv;
} dldwd_card_t;
-static char *version = "orinoco_cs.c 0.06 (David Gibson <hermes@gibson.dropbear.id.au> and others)";
+static char version[] __initdata =
+"orinoco_cs.c 0.06 (David Gibson <hermes@gibson.dropbear.id.au> and others)";
/*====================================================================*/
TRACE_ENTER("dldwd");
- printk(KERN_INFO "dldwd: David's Less Dodgy WaveLAN/IEEE Driver\n");
-
- DEBUG(0, "%s\n", version);
+ printk(KERN_INFO "dldwd: David's Less Dodgy WaveLAN/IEEE Driver\n"
+ KERN_INFO "%s\n", version);
CardServices(GetCardServicesInfo, &serv);
if (serv.Revision != CS_RELEASE_CODE) {
*/
+#define DRV_NAME "yellowfin"
+#define DRV_VERSION "1.05+LK1.1.3"
+#define DRV_RELDATE "May 10, 2001"
+
+#define PFX DRV_NAME ": "
+
/* The user-configurable values.
These may be modified when a driver module is loaded.*/
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <asm/uaccess.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <asm/unaligned.h>
#include <asm/bitops.h>
/* These identify the driver base version and may not be removed. */
static char version[] __devinitdata =
-KERN_INFO "yellowfin.c:v1.05 1/09/2001 Written by Donald Becker <becker@scyld.com>\n"
+KERN_INFO DRV_NAME ".c:v1.05 1/09/2001 Written by Donald Becker <becker@scyld.com>\n"
KERN_INFO " http://www.scyld.com/network/yellowfin.html\n"
-KERN_INFO " (unofficial 2.4.x port, LK1.1.3, May 10, 2001)\n";
+KERN_INFO " (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n";
/* Condensed operations for readability. */
#define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
MODULE_PARM(gx_fix, "i");
+MODULE_PARM_DESC(max_interrupt_work, "G-NIC maximum events handled per interrupt");
+MODULE_PARM_DESC(mtu, "G-NIC MTU (all boards)");
+MODULE_PARM_DESC(debug, "G-NIC debug level (0-7)");
+MODULE_PARM_DESC(rx_copybreak, "G-NIC copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(options, "G-NIC: Bits 0-3: media type, bit 17: full duplex");
+MODULE_PARM_DESC(full_duplex, "G-NIC full duplex setting(s) (1)");
+MODULE_PARM_DESC(gx_fix, "G-NIC: enable GX server chipset bug workaround (0-1)");
/*
Theory of Operation
static int read_eeprom(long ioaddr, int location);
static int mdio_read(long ioaddr, int phy_id, int location);
static void mdio_write(long ioaddr, int phy_id, int location, int value);
-static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static int yellowfin_open(struct net_device *dev);
static void yellowfin_timer(unsigned long data);
static void yellowfin_tx_timeout(struct net_device *dev);
dev = alloc_etherdev(sizeof(*np));
if (!dev) {
- printk (KERN_ERR "yellowfin: cannot allocate ethernet device\n");
+ printk (KERN_ERR PFX "cannot allocate ethernet device\n");
return -ENOMEM;
}
SET_MODULE_OWNER(dev);
dev->stop = &yellowfin_close;
dev->get_stats = &yellowfin_get_stats;
dev->set_multicast_list = &set_rx_mode;
- dev->do_ioctl = &mii_ioctl;
+ dev->do_ioctl = &netdev_ioctl;
dev->tx_timeout = yellowfin_tx_timeout;
dev->watchdog_timeo = TX_TIMEOUT;
outw(cfg_value | 0x1000, ioaddr + Cnfg);
}
-static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
+{
+ struct yellowfin_private *np = dev->priv;
+ u32 ethcmd;
+
+ if (copy_from_user(ðcmd, useraddr, sizeof(ethcmd)))
+ return -EFAULT;
+
+ switch (ethcmd) {
+ case ETHTOOL_GDRVINFO: {
+ struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
+ strcpy(info.driver, DRV_NAME);
+ strcpy(info.version, DRV_VERSION);
+ strcpy(info.bus_info, np->pci_dev->slot_name);
+ if (copy_to_user(useraddr, &info, sizeof(info)))
+ return -EFAULT;
+ return 0;
+ }
+
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct yellowfin_private *np = dev->priv;
long ioaddr = dev->base_addr;
u16 *data = (u16 *)&rq->ifr_data;
switch(cmd) {
+ case SIOCETHTOOL:
+ return netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
data[0] = np->phys[0] & 0x1f;
/* Fall Through */
static struct pci_driver yellowfin_driver = {
- name: "yellowfin",
+ name: DRV_NAME,
id_table: yellowfin_pci_tbl,
probe: yellowfin_init_one,
remove: yellowfin_remove_one,
11d4 0048 SoundMAX Integrated Digital Audio
2426 82801AB AC'97 Modem
2428 82801AB PCI Bridge
- 2440 82820 820 (Camino 2) Chipset ISA Bridge (ICH2)
- 2442 82820 820 (Camino 2) Chipset USB (Hub A)
- 2443 82820 820 (Camino 2) Chipset SMBus
- 2444 82820 820 (Camino 2) Chipset USB (Hub B)
- 2449 82820 820 (Camino 2) Chipset Ethernet
- 244b 82820 820 (Camino 2) Chipset IDE U100
- 244e 82820 820 (Camino 2) Chipset PCI
+ 2440 82801BA ISA Bridge (ICH2)
+ 2442 82801BA(M) USB (Hub A)
+ 2443 82801BA(M) SMBus
+ 2444 82801BA(M) USB (Hub B)
+ 2445 82801BA(M) AC'97 Audio
+ 2446 82801BA(M) AC'97 Modem
+ 2448 82801BA PCI
+ 2449 82801BA(M) Ethernet
+ 244a 82801BAM IDE U100
+ 244b 82801BA IDE U100
+ 244c 82801BAM ISA Bridge (ICH2)
+ 244e 82801BAM PCI
2500 82820 820 (Camino) Chipset Host Bridge (MCH)
1043 801c P3C-2000 system chipset
2501 82820 820 (Camino) Chipset Host Bridge (MCH)
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/init.h>
+#include <linux/delay.h>
#undef DEBUG
/*
* VIA 686A/B: If an IO-APIC is active, we need to route all on-chip
* devices to the external APIC.
+ *
+ * TODO: When we have device-specific interrupt routers,
+ * this code will go away from quirks.
*/
static void __init quirk_via_ioapic(struct pci_dev *dev)
{
else
tmp = 0x1f; /* all known bits (4-0) routed to external APIC */
+ printk(KERN_INFO "PCI: %sbling Via external APIC routing\n",
+ tmp == 0 ? "Disa" : "Ena");
+
/* Offset 0x58: External APIC IRQ output control */
pci_write_config_byte (dev, 0x58, tmp);
}
#endif /* CONFIG_X86_IO_APIC */
+/*
+ * Via 686A/B: The PCI_INTERRUPT_LINE register for the on-chip
+ * devices, USB0/1, AC97, MC97, and ACPI, has an unusual feature:
+ * when written, it makes an internal connection to the PIC.
+ * For these devices, this register is defined to be 4 bits wide.
+ * Normally this is fine. However for IO-APIC motherboards, or
+ * non-x86 architectures (yes Via exists on PPC among other places),
+ * we must mask the PCI_INTERRUPT_LINE value versus 0xf to get
+ * interrupts delivered properly.
+ *
+ * TODO: When we have device-specific interrupt routers,
+ * quirk_via_irqpic will go away from quirks.
+ */
+
+/*
+ * FIXME: it is questionable that quirk_via_acpi
+ * is needed. It shows up as an ISA bridge, and does not
+ * support the PCI_INTERRUPT_LINE register at all. Therefore
+ * it seems like setting the pci_dev's 'irq' to the
+ * value of the ACPI SCI interrupt is only done for convenience.
+ * -jgarzik
+ */
+static void __init quirk_via_acpi(struct pci_dev *d)
+{
+ /*
+ * VIA ACPI device: SCI IRQ line in PCI config byte 0x42
+ */
+ u8 irq;
+ pci_read_config_byte(d, 0x42, &irq);
+ irq &= 0xf;
+ if (irq && (irq != 2))
+ d->irq = irq;
+}
+
+static void __init quirk_via_irqpic(struct pci_dev *dev)
+{
+ u8 irq, new_irq = dev->irq & 0xf;
+
+ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
+
+ if (new_irq != irq) {
+ printk(KERN_INFO "PCI: Via IRQ fixup for %s, from %d to %d\n",
+ dev->slot_name, irq, new_irq);
+
+ udelay(15);
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, new_irq);
+ }
+}
+
+
/*
* PIIX3 USB: We have to disable USB interrupts that are
* hardwired to PIRQD# and may be shared with an
#ifdef CONFIG_X86_IO_APIC
{ PCI_FIXUP_FINAL, PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic },
#endif
+ { PCI_FIXUP_HEADER, PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_via_acpi },
+ { PCI_FIXUP_HEADER, PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_via_acpi },
+ { PCI_FIXUP_FINAL, PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_2, quirk_via_irqpic },
+ { PCI_FIXUP_FINAL, PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_5, quirk_via_irqpic },
+ { PCI_FIXUP_FINAL, PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_6, quirk_via_irqpic },
{ 0 }
};
/*======================================================================
- PCMCIA Card Services -- core services
+ Kernel Card Services -- core services
cs.c 1.271 2000/10/02 20:27:49
#define OPTIONS PCI_OPT CB_OPT PM_OPT
#endif
-static const char *release = "Linux PCMCIA Card Services " CS_RELEASE;
+static const char *release = "Linux Kernel Card Services " CS_RELEASE;
static const char *options = "options: " OPTIONS;
MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
-MODULE_DESCRIPTION("Linux PCMCIA Card Services " CS_RELEASE
+MODULE_DESCRIPTION("Linux Kernel Card Services " CS_RELEASE
"\n options:" OPTIONS);
/*====================================================================*/
static void __exit exit_pcmcia_cs(void)
{
- printk(KERN_INFO "unloading PCMCIA Card Services\n");
+ printk(KERN_INFO "unloading Kernel Card Services\n");
#ifdef CONFIG_PROC_FS
if (proc_pccard) {
remove_proc_entry("pccard", proc_bus);
/* MAGIC NUMBERS! Fixme */
config_writeb(socket, PCI_CACHE_LINE_SIZE, L1_CACHE_BYTES / 4);
config_writeb(socket, PCI_LATENCY_TIMER, 168);
- config_writeb(socket, PCI_SEC_LATENCY_TIMER, 176);
- config_writeb(socket, PCI_PRIMARY_BUS, dev->bus->number);
- config_writeb(socket, PCI_SECONDARY_BUS, dev->subordinate->number);
- config_writeb(socket, PCI_SUBORDINATE_BUS, dev->subordinate->number);
+ config_writel(socket, PCI_PRIMARY_BUS,
+ (176 << 24) | /* sec. latency timer */
+ (dev->subordinate->subordinate << 16) | /* subordinate bus */
+ (dev->subordinate->secondary << 8) | /* secondary bus */
+ dev->subordinate->primary); /* primary bus */
/*
* Set up the bridging state:
-/* $Id: aurora.h,v 1.5 1999/12/02 09:55:16 davem Exp $
+/* $Id: aurora.h,v 1.6 2001/06/05 12:23:38 davem Exp $
* linux/drivers/sbus/char/aurora.h -- Aurora multiport driver
*
* Copyright (c) 1999 by Oliver Aldulea (oli@bv.ro)
struct tty_struct * tty;
int count;
int blocked_open;
- int event;
+ long event;
int timeout;
int close_delay;
long session;
#ifdef MODULE
-Scsi_Host_Template driver_template = CPQFCTS;
+static Scsi_Host_Template driver_template = CPQFCTS;
#include "scsi_module.c"
#else
-Scsi_Host_Template driver_template = GDTH;
+static Scsi_Host_Template driver_template = GDTH;
#include "scsi_module.c"
#ifndef MODULE
__setup("gdth=", option_setup);
#define DEFAULT_LOOP_COUNT 1000000
-#define LinuxVersionCode(v, p, s) (((v)<<16)+((p)<<8)+(s))
-
/* End Configuration section *************************************************/
#include <linux/module.h>
}
#ifndef NOT_CS4281_PM
-void printpm(struct cs4281_state *s)
+static void printpm(struct cs4281_state *s)
{
CS_DBGOUT(CS_PM, 9, printk("pm struct:\n"));
CS_DBGOUT(CS_PM, 9, printk("flags:0x%x u32CLKCR1_SAVE: 0%x u32SSPMValue: 0x%x\n",
s->pm.u32MIDCR_Save));
}
-void printpipe(struct cs4281_pipeline *pl)
+static void printpipe(struct cs4281_pipeline *pl)
{
CS_DBGOUT(CS_PM, 9, printk("pm struct:\n"));
CS_DBGOUT(CS_PM, 9, printk("u32FPDRnValue: 0x%x u32FPDRnAddress: 0x%x\n",
pl->u32FPDRnValue,pl->u32FPDRnAddress));
}
-void printpipelines(struct cs4281_state *s)
+static void printpipelines(struct cs4281_state *s)
{
int i;
for(i=0;i<CS4281_NUMBER_OF_PIPELINES;i++)
return 0;
}
-void printpm(struct cs_card *s)
+static void printpm(struct cs_card *s)
{
CS_DBGOUT(CS_PM, 9, printk("pm struct:\n"));
CS_DBGOUT(CS_PM, 9, printk("flags:0x%x u32CLKCR1_SAVE: 0%x u32SSPMValue: 0x%x\n",
static void cs461x_reset(struct cs_card *card);
static void cs461x_proc_stop(struct cs_card *card);
-int cs46xx_suspend(struct cs_card *card)
+static int cs46xx_suspend(struct cs_card *card)
{
unsigned int tmp;
CS_DBGOUT(CS_PM | CS_FUNCTION, 4,
return 0;
}
-int cs46xx_resume(struct cs_card *card)
+static int cs46xx_resume(struct cs_card *card)
{
int i;
{
int rc;
struct via_info *card;
- u8 tmp;
static int printed_version = 0;
DPRINTK ("ENTER\n");
goto err_out_have_proc;
}
- pci_read_config_byte (pdev, 0x3C, &tmp);
- if ((tmp & 0x0F) != pdev->irq) {
- printk (KERN_WARNING PFX "IRQ fixup, 0x3C==0x%02X\n", tmp);
- udelay (15);
- tmp &= 0xF0;
- tmp |= pdev->irq;
- pci_write_config_byte (pdev, 0x3C, tmp);
- DPRINTK ("new 0x3c==0x%02x\n", tmp);
- } else {
- DPRINTK ("IRQ reg 0x3c==0x%02x, irq==%d\n",
- tmp, tmp & 0x0F);
- }
-
printk (KERN_INFO PFX "board #%d at 0x%04lX, IRQ %d\n",
card->card_num + 1, card->baseaddr, pdev->irq);
return (((DWORD) wHi << 16) | wLo);
}
-#ifndef CONFIG_PCMCIA
#ifndef CONFIG_ISAPNP
-static int dspio[IXJMAX + 1] =
-{
- 0,
-};
-static int xio[IXJMAX + 1] =
-{
- 0,
-};
+static int dspio[IXJMAX + 1];
+static int xio[IXJMAX + 1];
MODULE_PARM(dspio, "1-" __MODULE_STRING(IXJMAX) "i");
MODULE_PARM(xio, "1-" __MODULE_STRING(IXJMAX) "i");
#endif
-#endif
void ixj_exit(void)
{
dep_tristate 'SMB file system support (to mount Windows shares etc.)' CONFIG_SMB_FS $CONFIG_INET
if [ "$CONFIG_SMB_FS" != "n" ]; then
- bool ' Use a default NLS' CONFIG_SMB_NLS_DEFAULT
+ bool ' Use a default NLS' CONFIG_SMB_NLS_DEFAULT
if [ "$CONFIG_SMB_NLS_DEFAULT" = "y" ]; then
- string ' Default Remote NLS Option' CONFIG_SMB_NLS_REMOTE "cp437"
+ string ' Default Remote NLS Option' CONFIG_SMB_NLS_REMOTE "cp437"
fi
fi
if [ "$CONFIG_IPX" != "n" -o "$CONFIG_INET" != "n" ]; then
#define AUTOFS_FIRST_SYMLINK 2
#define AUTOFS_FIRST_DIR_INO (AUTOFS_FIRST_SYMLINK+AUTOFS_MAX_SYMLINKS)
-#define AUTOFS_SYMLINK_BITMAP_LEN ((AUTOFS_MAX_SYMLINKS+31)/32)
+#define AUTOFS_SYMLINK_BITMAP_LEN \
+ ((AUTOFS_MAX_SYMLINKS+((sizeof(long)*1)-1))/(sizeof(long)*8))
#define AUTOFS_SBI_MAGIC 0x6d4a556d
struct autofs_wait_queue *queues; /* Wait queue pointer */
struct autofs_dirhash dirhash; /* Root directory hash */
struct autofs_symlink symlink[AUTOFS_MAX_SYMLINKS];
- u32 symlink_bitmap[AUTOFS_SYMLINK_BITMAP_LEN];
+ unsigned long symlink_bitmap[AUTOFS_SYMLINK_BITMAP_LEN];
};
extern inline struct autofs_sb_info *autofs_sbi(struct super_block *sb)
sbi->oz_pgrp = current->pgrp;
autofs_initialize_hash(&sbi->dirhash);
sbi->queues = NULL;
- memset(sbi->symlink_bitmap, 0, sizeof(u32)*AUTOFS_SYMLINK_BITMAP_LEN);
+ memset(sbi->symlink_bitmap, 0, sizeof(long)*AUTOFS_SYMLINK_BITMAP_LEN);
sbi->next_dir_ino = AUTOFS_FIRST_DIR_INO;
s->s_blocksize = 1024;
s->s_blocksize_bits = 10;
20010604 Richard Gooch <rgooch@atnf.csiro.au>
Adjusted <try_modload> to account for <devfs_generate_path> fix.
v0.105
+ 20010617 Richard Gooch <rgooch@atnf.csiro.au>
+ Answered question posed by Al Viro and removed his comments.
+ Moved setting of registered flag after other fields are changed.
+ Fixed race between <devfsd_close> and <devfsd_notify_one>.
+ Global VFS changes added bogus BKL to <devfsd_close>: removed.
+ Widened locking in <devfs_readlink> and <devfs_follow_link>.
+ Replaced <devfsd_read> stack usage with <devfsd_ioctl> kmalloc.
+ Simplified locking in <devfsd_ioctl> and fixed memory leak.
+ v0.106
*/
#include <linux/types.h>
#include <linux/errno.h>
#include <asm/bitops.h>
#include <asm/atomic.h>
-#define DEVFS_VERSION "0.105 (20010604)"
+#define DEVFS_VERSION "0.106 (20010617)"
#define DEVFS_NAME "devfs"
gid_t gid;
};
-struct fs_info /* This structure is for each mounted devfs */
+struct fs_info /* This structure is for the mounted devfs */
{
unsigned int num_inodes; /* Number of inodes created */
unsigned int table_size; /* Size of the inode pointer table */
struct devfs_entry **table;
struct super_block *sb;
volatile struct devfsd_buf_entry *devfsd_buffer;
+ spinlock_t devfsd_buffer_lock;
volatile unsigned int devfsd_buf_in;
volatile unsigned int devfsd_buf_out;
volatile int devfsd_sleeping;
- volatile int devfsd_buffer_in_use;
volatile struct task_struct *devfsd_task;
volatile struct file *devfsd_file;
+ struct devfsd_notify_struct *devfsd_info;
volatile unsigned long devfsd_event_mask;
atomic_t devfsd_overrun_count;
wait_queue_head_t devfsd_wait_queue;
wait_queue_head_t revalidate_wait_queue;
};
-static struct fs_info fs_info;
+static struct fs_info fs_info = {devfsd_buffer_lock: SPIN_LOCK_UNLOCKED};
static unsigned int next_devnum_char = MIN_DEVNUM;
static unsigned int next_devnum_block = MIN_DEVNUM;
static const int devfsd_buf_size = PAGE_SIZE / sizeof(struct devfsd_buf_entry);
/* Always ensure the root is created */
if (root_entry != NULL) return root_entry;
if ( ( root_entry = create_entry (NULL, NULL, 0) ) == NULL ) return NULL;
- root_entry->registered = TRUE;
root_entry->mode = S_IFDIR;
/* Force an inode update, because lookup() is never done for the root */
update_devfs_inode_from_entry (root_entry);
+ root_entry->registered = TRUE;
/* And create the entry for ".devfsd" */
if ( ( new = create_entry (root_entry, ".devfsd", 0) ) == NULL )
return NULL;
- new->registered = TRUE;
new->u.fcb.u.device.major = next_devnum_char >> 8;
new->u.fcb.u.device.minor = next_devnum_char & 0xff;
++next_devnum_char;
new->u.fcb.default_uid = 0;
new->u.fcb.default_gid = 0;
new->u.fcb.ops = &devfsd_fops;
+ new->registered = TRUE;
return root_entry;
} /* End Function get_root_entry */
return NULL;
}
/* Ensure an unregistered entry is re-registered and visible */
- entry->registered = TRUE;
entry->hide = FALSE;
+ entry->registered = TRUE;
subname = ptr + 1;
dir = entry;
}
return find_by_dev (root_entry, major, minor, type);
} /* End Function find_entry */
-static struct devfs_entry *get_devfs_entry_from_vfs_inode (struct inode *inode)
+static struct devfs_entry *get_devfs_entry_from_vfs_inode (struct inode *inode,
+ int do_check)
{
+ struct devfs_entry *de;
struct fs_info *fs_info;
if (inode == NULL) return NULL;
fs_info = inode->i_sb->u.generic_sbp;
if (fs_info == NULL) return NULL;
if (inode->i_ino - FIRST_INODE >= fs_info->num_inodes) return NULL;
- return fs_info->table[inode->i_ino - FIRST_INODE];
+ de = fs_info->table[inode->i_ino - FIRST_INODE];
+ if (do_check && de && !de->registered) de = NULL;
+ return de;
} /* End Function get_devfs_entry_from_vfs_inode */
{
struct dentry *dentry;
- spin_lock(&dcache_lock);
+ spin_lock (&dcache_lock);
dentry = de->inode.dentry;
if (dentry != NULL)
{
dget_locked (dentry);
de->inode.dentry = NULL;
- spin_unlock(&dcache_lock);
+ spin_unlock (&dcache_lock);
/* Forcefully remove the inode */
if (dentry->d_inode != NULL) dentry->d_inode->i_nlink = 0;
d_drop (dentry);
dput (dentry);
- } else
- spin_unlock(&dcache_lock);
+ }
+ else spin_unlock (&dcache_lock);
} /* End Function free_dentries */
add_wait_queue (&fs_info->revalidate_wait_queue, &wait);
current->state = TASK_UNINTERRUPTIBLE;
if (!devfsd_queue_empty (fs_info) || !fs_info->devfsd_sleeping)
- if (fs_info->devfsd_task) schedule();
+ if (fs_info->devfsd_task) schedule ();
remove_wait_queue (&fs_info->revalidate_wait_queue, &wait);
current->state = TASK_RUNNING;
return (TRUE);
unsigned int next_pos;
unsigned long flags;
struct devfsd_buf_entry *entry;
- static spinlock_t lock = SPIN_LOCK_UNLOCKED;
if ( !( fs_info->devfsd_event_mask & (1 << type) ) ) return (FALSE);
next_pos = fs_info->devfsd_buf_in + 1;
atomic_inc (&fs_info->devfsd_overrun_count);
return (FALSE);
}
- spin_lock_irqsave (&lock, flags);
- fs_info->devfsd_buffer_in_use = TRUE;
+ spin_lock_irqsave (&fs_info->devfsd_buffer_lock, flags);
next_pos = fs_info->devfsd_buf_in + 1;
if (next_pos >= devfsd_buf_size) next_pos = 0;
entry = (struct devfsd_buf_entry *) fs_info->devfsd_buffer +
entry->uid = uid;
entry->gid = gid;
fs_info->devfsd_buf_in = next_pos;
- fs_info->devfsd_buffer_in_use = FALSE;
- spin_unlock_irqrestore (&lock, flags);
+ spin_unlock_irqrestore (&fs_info->devfsd_buffer_lock, flags);
wake_up_interruptible (&fs_info->devfsd_wait_queue);
return (TRUE);
} /* End Function devfsd_notify_one */
return NULL;
}
}
- de->registered = TRUE;
if ( S_ISCHR (mode) || S_ISBLK (mode) )
{
de->u.fcb.u.device.major = major;
de->u.fcb.default_uid = 0;
de->u.fcb.default_gid = 0;
}
- de->registered = TRUE;
de->u.fcb.ops = ops;
de->u.fcb.auto_owner = (flags & DEVFS_FL_AUTO_OWNER) ? TRUE : FALSE;
de->u.fcb.aopen_notify = (flags & DEVFS_FL_AOPEN_NOTIFY) ? TRUE : FALSE;
|| (flags & DEVFS_FL_SHOW_UNREG) ) ? TRUE : FALSE;
de->hide = (flags & DEVFS_FL_HIDE) ? TRUE : FALSE;
de->no_persistence = (flags & DEVFS_FL_NO_PERSISTENCE) ? TRUE : FALSE;
+ de->registered = TRUE;
devfsd_notify (de, DEVFSD_NOTIFY_REGISTERED, flags & DEVFS_FL_WAIT);
return de;
} /* End Function devfs_register */
de->mode = S_IFDIR | S_IRUGO | S_IXUGO;
de->info = info;
if (!de->registered) de->u.dir.num_removable = 0;
- de->registered = TRUE;
de->show_unreg = (boot_options & OPTION_SHOW) ? TRUE : FALSE;
de->hide = FALSE;
+ de->registered = TRUE;
return de;
} /* End Function devfs_mk_dir */
{
if (!inode || !inode->i_sb) return NULL;
if (inode->i_sb->s_magic != DEVFS_SUPER_MAGIC) return NULL;
- return get_devfs_entry_from_vfs_inode (inode);
+ return get_devfs_entry_from_vfs_inode (inode, TRUE);
} /* End Function devfs_get_handle_from_inode */
{
struct devfs_entry *de;
- de = get_devfs_entry_from_vfs_inode (inode);
+ de = get_devfs_entry_from_vfs_inode (inode, TRUE);
if (de == NULL)
{
printk ("%s: read_inode(%d): VFS inode: %p NO devfs_entry\n",
if (inode->i_ino < FIRST_INODE) return;
index = inode->i_ino - FIRST_INODE;
- lock_kernel();
+ lock_kernel ();
if (index >= fs_info->num_inodes)
{
printk ("%s: writing inode: %lu for which there is no entry!\n",
DEVFS_NAME, inode->i_ino);
- unlock_kernel();
+ unlock_kernel ();
return;
}
de = fs_info->table[index];
de->inode.atime = inode->i_atime;
de->inode.mtime = inode->i_mtime;
de->inode.ctime = inode->i_ctime;
- unlock_kernel();
+ unlock_kernel ();
} /* End Function devfs_write_inode */
static int devfs_notify_change (struct dentry *dentry, struct iattr *iattr)
struct inode *inode = dentry->d_inode;
struct fs_info *fs_info = inode->i_sb->u.generic_sbp;
- de = get_devfs_entry_from_vfs_inode (inode);
+ de = get_devfs_entry_from_vfs_inode (inode, TRUE);
if (de == NULL) return -ENODEV;
retval = inode_change_ok (inode, iattr);
if (retval != 0) return retval;
struct inode *inode = file->f_dentry->d_inode;
fs_info = inode->i_sb->u.generic_sbp;
- parent = get_devfs_entry_from_vfs_inode (file->f_dentry->d_inode);
+ parent = get_devfs_entry_from_vfs_inode (file->f_dentry->d_inode, TRUE);
if ( (long) file->f_pos < 0 ) return -EINVAL;
#ifdef CONFIG_DEVFS_DEBUG
if (devfs_debug & DEBUG_F_READDIR)
struct devfs_entry *de;
struct fs_info *fs_info = inode->i_sb->u.generic_sbp;
- lock_kernel();
- de = get_devfs_entry_from_vfs_inode (inode);
+ lock_kernel ();
+ de = get_devfs_entry_from_vfs_inode (inode, TRUE);
err = -ENODEV;
if (de == NULL)
goto out;
if ( S_ISDIR (de->mode) )
goto out;
df = &de->u.fcb;
- err = -ENODEV;
- if (!de->registered)
- goto out;
file->private_data = de->info;
if ( S_ISBLK (inode->i_mode) )
{
file->f_op = &def_blk_fops;
if (df->ops) inode->i_bdev->bd_op = df->ops;
}
- else file->f_op = fops_get((struct file_operations*)df->ops);
+ else file->f_op = fops_get ( (struct file_operations*) df->ops );
if (file->f_op)
err = file->f_op->open ? (*file->f_op->open) (inode, file) : 0;
else
{
/* Fallback to legacy scheme */
- /*
- * Do we need it? Richard, could you verify it?
- * It can legitimately happen if
- * it is a character device and
- * df->ops == NULL and
- * de->registered is true,
- * but AFAICS it can't happen - in devfs_register() we never set
- * ->ops to NULL, in unregister() we set ->registered to false,
- * in devfs_mknod() we set it to NULL only if ->register is false.
- *
- * Looks like this fallback is not needed at all.
- * AV
- */
if ( S_ISCHR (inode->i_mode) ) err = chrdev_open (inode, file);
else err = -ENODEV;
}
devfsd_notify_one (de, DEVFSD_NOTIFY_ASYNC_OPEN, inode->i_mode,
current->euid, current->egid, fs_info);
out:
- unlock_kernel();
+ unlock_kernel ();
return err;
} /* End Function devfs_open */
struct devfs_entry *de;
lock_kernel ();
- de = get_devfs_entry_from_vfs_inode (inode);
+ de = get_devfs_entry_from_vfs_inode (inode, FALSE);
#ifdef CONFIG_DEVFS_DEBUG
if (devfs_debug & DEBUG_D_IPUT)
printk ("%s: d_iput(): dentry: %p inode: %p de: %p de->dentry: %p\n",
return 1;
}
fs_info = inode->i_sb->u.generic_sbp;
- de = get_devfs_entry_from_vfs_inode (inode);
+ de = get_devfs_entry_from_vfs_inode (inode, TRUE);
#ifdef CONFIG_DEVFS_DEBUG
if (devfs_debug & DEBUG_D_DELETE)
printk ("%s: d_delete(): dentry: %p inode: %p devfs_entry: %p\n",
{
devfs_handle_t parent;
- parent = get_devfs_entry_from_vfs_inode (dir);
+ parent = get_devfs_entry_from_vfs_inode (dir, TRUE);
de = search_for_entry_in_dir (parent, dentry->d_name.name,
dentry->d_name.len, FALSE);
}
#endif
fs_info = dir->i_sb->u.generic_sbp;
/* First try to get the devfs entry for this directory */
- parent = get_devfs_entry_from_vfs_inode (dir);
- if (parent == NULL) return ERR_PTR (-EINVAL);
- if (!parent->registered) return ERR_PTR (-ENOENT);
+ parent = get_devfs_entry_from_vfs_inode (dir, TRUE);
+ if (parent == NULL) return ERR_PTR (-ENOENT);
/* Try to reclaim an existing devfs entry */
de = search_for_entry_in_dir (parent,
dentry->d_name.name, dentry->d_name.len,
}
#endif
- de = get_devfs_entry_from_vfs_inode (dentry->d_inode);
+ de = get_devfs_entry_from_vfs_inode (dentry->d_inode, TRUE);
if (de == NULL) return -ENOENT;
- if (!de->registered) return -ENOENT;
de->registered = FALSE;
de->hide = TRUE;
if ( S_ISLNK (de->mode) ) kfree (de->u.symlink.linkname);
fs_info = dir->i_sb->u.generic_sbp;
/* First try to get the devfs entry for this directory */
- parent = get_devfs_entry_from_vfs_inode (dir);
- if (parent == NULL) return -EINVAL;
- if (!parent->registered) return -ENOENT;
+ parent = get_devfs_entry_from_vfs_inode (dir, TRUE);
+ if (parent == NULL) return -ENOENT;
err = devfs_do_symlink (parent, dentry->d_name.name, DEVFS_FL_NONE,
symname, &de, NULL);
#ifdef CONFIG_DEVFS_DEBUG
mode = (mode & ~S_IFMT) | S_IFDIR;
fs_info = dir->i_sb->u.generic_sbp;
/* First try to get the devfs entry for this directory */
- parent = get_devfs_entry_from_vfs_inode (dir);
- if (parent == NULL) return -EINVAL;
- if (!parent->registered) return -ENOENT;
+ parent = get_devfs_entry_from_vfs_inode (dir, TRUE);
+ if (parent == NULL) return -ENOENT;
/* Try to reclaim an existing devfs entry, create if there isn't one */
de = search_for_entry (parent, dentry->d_name.name, dentry->d_name.len,
FALSE, TRUE, &is_new, FALSE);
printk ("%s: mkdir(): existing entry\n", DEVFS_NAME);
return -EEXIST;
}
- de->registered = TRUE;
de->hide = FALSE;
if (!S_ISDIR (de->mode) && !is_new)
{
de->inode.atime = CURRENT_TIME;
de->inode.mtime = CURRENT_TIME;
de->inode.ctime = CURRENT_TIME;
+ de->registered = TRUE;
if ( ( inode = get_vfs_inode (dir->i_sb, de, dentry) ) == NULL )
return -ENOMEM;
#ifdef CONFIG_DEVFS_DEBUG
if (dir->i_sb->u.generic_sbp != inode->i_sb->u.generic_sbp) return -EINVAL;
fs_info = dir->i_sb->u.generic_sbp;
- de = get_devfs_entry_from_vfs_inode (inode);
+ de = get_devfs_entry_from_vfs_inode (inode, TRUE);
if (de == NULL) return -ENOENT;
- if (!de->registered) return -ENOENT;
if ( !S_ISDIR (de->mode) ) return -ENOTDIR;
for (child = de->u.dir.first; child != NULL; child = child->next)
{
}
}
if (has_children) return -ENOTEMPTY;
- de->registered = FALSE;
de->hide = TRUE;
+ de->registered = FALSE;
free_dentries (de);
return 0;
} /* End Function devfs_rmdir */
fs_info = dir->i_sb->u.generic_sbp;
/* First try to get the devfs entry for this directory */
- parent = get_devfs_entry_from_vfs_inode (dir);
- if (parent == NULL) return -EINVAL;
- if (!parent->registered) return -ENOENT;
+ parent = get_devfs_entry_from_vfs_inode (dir, TRUE);
+ if (parent == NULL) return -ENOENT;
/* Try to reclaim an existing devfs entry, create if there isn't one */
de = search_for_entry (parent, dentry->d_name.name, dentry->d_name.len,
FALSE, TRUE, &is_new, FALSE);
de->u.fifo.gid = current->egid;
}
}
- de->registered = TRUE;
de->show_unreg = FALSE;
de->hide = FALSE;
de->inode.mode = mode;
de->inode.atime = CURRENT_TIME;
de->inode.mtime = CURRENT_TIME;
de->inode.ctime = CURRENT_TIME;
+ de->registered = TRUE;
if ( ( inode = get_vfs_inode (dir->i_sb, de, dentry) ) == NULL )
return -ENOMEM;
#ifdef CONFIG_DEVFS_DEBUG
static int devfs_readlink (struct dentry *dentry, char *buffer, int buflen)
{
+ int err;
struct devfs_entry *de;
lock_kernel ();
- de = get_devfs_entry_from_vfs_inode (dentry->d_inode);
+ de = get_devfs_entry_from_vfs_inode (dentry->d_inode, TRUE);
+ err = de ? vfs_readlink (dentry, buffer, buflen,
+ de->u.symlink.linkname) : -ENODEV;
unlock_kernel ();
- return vfs_readlink (dentry, buffer, buflen, de->u.symlink.linkname);
+ return err;
} /* End Function devfs_readlink */
static int devfs_follow_link (struct dentry *dentry, struct nameidata *nd)
{
+ int err;
struct devfs_entry *de;
lock_kernel ();
- de = get_devfs_entry_from_vfs_inode (dentry->d_inode);
+ de = get_devfs_entry_from_vfs_inode (dentry->d_inode, TRUE);
+ err = de ? vfs_follow_link (nd, de->u.symlink.linkname) : -ENODEV;
unlock_kernel ();
- return vfs_follow_link (nd, de->u.symlink.linkname);
+ return err;
} /* End Function devfs_follow_link */
static struct inode_operations devfs_iops =
int done = FALSE;
int ival;
loff_t pos, devname_offset, tlen, rpos;
- struct devfsd_notify_struct info;
struct devfsd_buf_entry *entry;
struct fs_info *fs_info = file->f_dentry->d_inode->i_sb->u.generic_sbp;
+ struct devfsd_notify_struct *info = fs_info->devfsd_info;
DECLARE_WAITQUEUE (wait, current);
/* Can't seek (pread) on this device */
if (ppos != &file->f_pos) return -ESPIPE;
/* Verify the task has grabbed the queue */
if (fs_info->devfsd_task != current) return -EPERM;
- info.major = 0;
- info.minor = 0;
+ info->major = 0;
+ info->minor = 0;
/* Block for a new entry */
add_wait_queue (&fs_info->devfsd_wait_queue, &wait);
current->state = TASK_INTERRUPTIBLE;
/* Now play with the data */
ival = atomic_read (&fs_info->devfsd_overrun_count);
if (ival > 0) atomic_sub (ival, &fs_info->devfsd_overrun_count);
- info.overrun_count = ival;
+ info->overrun_count = ival;
entry = (struct devfsd_buf_entry *) fs_info->devfsd_buffer +
fs_info->devfsd_buf_out;
- info.type = entry->type;
- info.mode = entry->mode;
- info.uid = entry->uid;
- info.gid = entry->gid;
+ info->type = entry->type;
+ info->mode = entry->mode;
+ info->uid = entry->uid;
+ info->gid = entry->gid;
if (entry->type == DEVFSD_NOTIFY_LOOKUP)
{
- info.namelen = strlen (entry->data);
+ info->namelen = strlen (entry->data);
pos = 0;
- memcpy (info.devname, entry->data, info.namelen + 1);
+ memcpy (info->devname, entry->data, info->namelen + 1);
}
else
{
if ( S_ISCHR (de->mode) || S_ISBLK (de->mode) || S_ISREG (de->mode) )
{
- info.major = de->u.fcb.u.device.major;
- info.minor = de->u.fcb.u.device.minor;
+ info->major = de->u.fcb.u.device.major;
+ info->minor = de->u.fcb.u.device.minor;
}
- pos = devfs_generate_path (de, info.devname, DEVFS_PATHLEN);
+ pos = devfs_generate_path (de, info->devname, DEVFS_PATHLEN);
if (pos < 0) return pos;
- info.namelen = DEVFS_PATHLEN - pos - 1;
- if (info.mode == 0) info.mode = de->mode;
+ info->namelen = DEVFS_PATHLEN - pos - 1;
+ if (info->mode == 0) info->mode = de->mode;
}
- devname_offset = info.devname - (char *) &info;
+ devname_offset = info->devname - (char *) info;
rpos = *ppos;
if (rpos < devname_offset)
{
/* Copy parts of the header */
tlen = devname_offset - rpos;
if (tlen > len) tlen = len;
- if ( copy_to_user (buf, (char *) &info + rpos, tlen) )
+ if ( copy_to_user (buf, (char *) info + rpos, tlen) )
{
return -EFAULT;
}
if ( (rpos >= devname_offset) && (len > 0) )
{
/* Copy the name */
- tlen = info.namelen + 1;
+ tlen = info->namelen + 1;
if (tlen > len) tlen = len;
else done = TRUE;
- if ( copy_to_user (buf, info.devname + pos + rpos - devname_offset,
+ if ( copy_to_user (buf, info->devname + pos + rpos - devname_offset,
tlen) )
{
return -EFAULT;
{
int ival;
struct fs_info *fs_info = inode->i_sb->u.generic_sbp;
+ static spinlock_t lock = SPIN_LOCK_UNLOCKED;
switch (cmd)
{
doesn't matter who gets in first, as long as only one gets it */
if (fs_info->devfsd_task == NULL)
{
-#ifdef CONFIG_SMP
- /* Looks like no-one has it: check again and grab, with interrupts
- disabled */
- __cli ();
- if (fs_info->devfsd_task == NULL)
-#endif
+ if ( !spin_trylock (&lock) ) return -EBUSY;
+ fs_info->devfsd_task = current;
+ spin_unlock (&lock);
+ fs_info->devfsd_file = file;
+ fs_info->devfsd_buffer = (void *) __get_free_page (GFP_KERNEL);
+ fs_info->devfsd_info = kmalloc (sizeof *fs_info->devfsd_info,
+ GFP_KERNEL);
+ if (!fs_info->devfsd_buffer || !fs_info->devfsd_info)
{
- fs_info->devfsd_event_mask = 0; /* Temporary disable */
- fs_info->devfsd_task = current;
+ devfsd_close (inode, file);
+ return -ENOMEM;
}
-#ifdef CONFIG_SMP
- __sti ();
-#endif
+ fs_info->devfsd_buf_out = fs_info->devfsd_buf_in;
}
- /* Verify the task has grabbed the queue */
- if (fs_info->devfsd_task != current) return -EBUSY;
- fs_info->devfsd_file = file;
- fs_info->devfsd_buffer = (void *) __get_free_page (GFP_KERNEL);
- if (fs_info->devfsd_buffer == NULL)
- {
- devfsd_close (inode, file);
- return -ENOMEM;
- }
- fs_info->devfsd_buf_out = fs_info->devfsd_buf_in;
+ else if (fs_info->devfsd_task != current) return -EBUSY;
fs_info->devfsd_event_mask = arg; /* Let the masses come forth */
break;
case DEVFSDIOC_RELEASE_EVENT_QUEUE:
static int devfsd_close (struct inode *inode, struct file *file)
{
+ unsigned long flags;
struct fs_info *fs_info = inode->i_sb->u.generic_sbp;
- lock_kernel();
- if (fs_info->devfsd_file != file)
- {
- unlock_kernel();
- return 0;
- }
+ if (fs_info->devfsd_file != file) return 0;
fs_info->devfsd_event_mask = 0;
fs_info->devfsd_file = NULL;
+ spin_lock_irqsave (&fs_info->devfsd_buffer_lock, flags);
if (fs_info->devfsd_buffer)
{
- while (fs_info->devfsd_buffer_in_use) schedule ();
free_page ( (unsigned long) fs_info->devfsd_buffer );
+ fs_info->devfsd_buffer = NULL;
+ }
+ if (fs_info->devfsd_info)
+ {
+ kfree (fs_info->devfsd_info);
+ fs_info->devfsd_info = NULL;
}
- fs_info->devfsd_buffer = NULL;
+ spin_unlock_irqrestore (&fs_info->devfsd_buffer_lock, flags);
fs_info->devfsd_task = NULL;
wake_up (&fs_info->revalidate_wait_queue);
- unlock_kernel();
return 0;
} /* End Function devfsd_close */
static int ext2_commit_chunk(struct page *page, unsigned from, unsigned to)
{
- struct inode *dir = (struct inode *)page->mapping->host;
+ struct inode *dir = page->mapping->host;
int err = 0;
dir->i_version = ++event;
page->mapping->a_ops->commit_write(NULL, page, from, to);
static void ext2_check_page(struct page *page)
{
- struct inode *dir = (struct inode *)page->mapping->host;
+ struct inode *dir = page->mapping->host;
struct super_block *sb = dir->i_sb;
unsigned chunk_size = ext2_chunk_size(dir);
- char *kaddr = (char*)page_address(page);
+ char *kaddr = page_address(page);
u32 max_inumber = le32_to_cpu(sb->u.ext2_sb.s_es->s_inodes_count);
unsigned offs, rec_len;
unsigned limit = PAGE_CACHE_SIZE;
if (IS_ERR(page))
continue;
- kaddr = (char *)page_address(page);
+ kaddr = page_address(page);
if (need_revalidate) {
offset = ext2_validate_entry(kaddr, offset, chunk_mask);
need_revalidate = 0;
if (IS_ERR(page))
continue;
- kaddr = (char*)page_address(page);
+ kaddr = page_address(page);
de = (ext2_dirent *) kaddr;
kaddr += PAGE_CACHE_SIZE - reclen;
for ( ; (char *) de <= kaddr ; de = ext2_next_entry(de))
err = PTR_ERR(page);
if (IS_ERR(page))
goto out;
- kaddr = (char*)page_address(page);
+ kaddr = page_address(page);
de = (ext2_dirent *)kaddr;
kaddr += PAGE_CACHE_SIZE - reclen;
while ((char *)de <= kaddr) {
int ext2_delete_entry (struct ext2_dir_entry_2 * dir, struct page * page )
{
struct address_space *mapping = page->mapping;
- struct inode *inode = (struct inode*)mapping->host;
- char *kaddr = (char*)page_address(page);
+ struct inode *inode = mapping->host;
+ char *kaddr = page_address(page);
unsigned from = ((char*)dir - kaddr) & ~(ext2_chunk_size(inode)-1);
unsigned to = ((char*)dir - kaddr) + le16_to_cpu(dir->rec_len);
ext2_dirent * pde = NULL;
if (err)
goto fail;
- base = (char*)page_address(page);
+ base = page_address(page);
de = (struct ext2_dir_entry_2 *) base;
de->name_len = 1;
if (IS_ERR(page))
continue;
- kaddr = (char *)page_address(page);
+ kaddr = page_address(page);
de = (ext2_dirent *)kaddr;
kaddr += PAGE_CACHE_SIZE-EXT2_DIR_REC_LEN(1);
new_fl2 = locks_alloc_lock(0);
error = -ENOLCK; /* "no luck" */
if (!(new_fl && new_fl2))
- goto out;
+ goto out_nolock;
lock_kernel();
if (caller->fl_type != F_UNLCK) {
}
out:
unlock_kernel();
+out_nolock:
/*
* Free any unused locks.
*/
break;
branch[n].key = cpu_to_block(nr);
bh = getblk(inode->i_dev, parent, BLOCK_SIZE);
- if (!buffer_uptodate(bh))
- wait_on_buffer(bh);
+ lock_buffer(bh);
memset(bh->b_data, 0, BLOCK_SIZE);
branch[n].bh = bh;
branch[n].p = (block_t*) bh->b_data + offsets[n];
*branch[n].p = branch[n].key;
mark_buffer_uptodate(bh, 1);
+ unlock_buffer(bh);
mark_buffer_dirty(bh);
parent = nr;
}
#define GET_BLOCK_CREATE 1 /* add anything you need to find block */
#define GET_BLOCK_NO_HOLE 2 /* return -ENOENT for file holes */
#define GET_BLOCK_READ_DIRECT 4 /* read the tail if indirect item not found */
+#define GET_BLOCK_NO_ISEM 8 /* i_sem is not held, don't preallocate */
//
// initially this function was derived from minix or ext2's analog and
return retval ;
}
+static inline int _allocate_block(struct reiserfs_transaction_handle *th,
+ struct inode *inode,
+ b_blocknr_t *allocated_block_nr,
+ unsigned long tag,
+ int flags) {
+
+#ifdef REISERFS_PREALLOCATE
+ if (!(flags & GET_BLOCK_NO_ISEM)) {
+ return reiserfs_new_unf_blocknrs2(th, inode, allocated_block_nr, tag);
+ }
+#endif
+ return reiserfs_new_unf_blocknrs (th, allocated_block_nr, tag);
+}
//
// initially this function was derived from ext2's analog and evolved
// as the prototype did. You'll need to look at the ext2 version to
goto research ;
}
-#ifdef REISERFS_PREALLOCATE
- repeat = reiserfs_new_unf_blocknrs2 (&th, inode, &allocated_block_nr, tag);
-#else
- repeat = reiserfs_new_unf_blocknrs (&th, &allocated_block_nr, tag);
-#endif
+ repeat = _allocate_block(&th, inode, &allocated_block_nr, tag, create);
if (repeat == NO_DISK_SPACE) {
/* restart the transaction to give the journal a chance to free
** research if we succeed on the second try
*/
restart_transaction(&th, inode, &path) ;
-#ifdef REISERFS_PREALLOCATE
- repeat = reiserfs_new_unf_blocknrs2 (&th, inode, &allocated_block_nr, tag);
-#else
- repeat = reiserfs_new_unf_blocknrs (&th, &allocated_block_nr, tag);
-#endif
+ repeat = _allocate_block(&th, inode,&allocated_block_nr,tag,create);
if (repeat != NO_DISK_SPACE) {
goto research ;
retval = reiserfs_insert_item (&th, &path, &tmp_key, &tmp_ih, (char *)&unp);
if (retval) {
reiserfs_free_block (&th, allocated_block_nr);
-
-#ifdef REISERFS_PREALLOCATE
- reiserfs_discard_prealloc (&th, inode);
-#endif
goto failure; // retval == -ENOSPC or -EIO or -EEXIST
}
if (unp)
mark_buffer_uptodate (unbh, 1);
if (retval) {
reiserfs_free_block (&th, allocated_block_nr);
-
-#ifdef REISERFS_PREALLOCATE
- reiserfs_discard_prealloc (&th, inode);
-#endif
goto failure;
}
/* we've converted the tail, so we must
retval = reiserfs_paste_into_item (&th, &path, &tmp_key, (char *)&un, UNFM_P_SIZE);
if (retval) {
reiserfs_free_block (&th, allocated_block_nr);
-
-#ifdef REISERFS_PREALLOCATE
- reiserfs_discard_prealloc (&th, inode);
-#endif
goto failure;
}
if (un.unfm_nodenum)
reiserfs_warning ("vs-: reiserfs_get_block: "
"%k should not be found", &key);
retval = -EEXIST;
+ if (allocated_block_nr)
+ reiserfs_free_block (&th, allocated_block_nr);
pathrelse(&path) ;
goto failure;
}
inode->i_generation = INODE_PKEY (inode)->k_dir_id;
inode->i_blksize = PAGE_SIZE;
+ INIT_LIST_HEAD(&inode->u.reiserfs_i.i_prealloc_list) ;
+
if (stat_data_v1 (ih)) {
struct stat_data_v1 * sd = (struct stat_data_v1 *)B_I_PITEM (bh, ih);
unsigned long blocks;
inode->u.reiserfs_i.i_first_direct_byte = S_ISLNK(mode) ? 1 :
U32_MAX/*NO_BYTES_IN_DIRECT_ITEM*/;
+ INIT_LIST_HEAD(&inode->u.reiserfs_i.i_prealloc_list) ;
+
if (old_format_only (sb))
inode2sd_v1 (&sd, inode);
else
/* this is where we fill in holes in the file. */
if (use_get_block) {
kmap(bh_result->b_page) ;
- retval = reiserfs_get_block(inode, block, bh_result, 1) ;
+ retval = reiserfs_get_block(inode, block, bh_result,
+ GET_BLOCK_CREATE | GET_BLOCK_NO_ISEM) ;
kunmap(bh_result->b_page) ;
if (!retval) {
if (!buffer_mapped(bh_result) || bh_result->b_blocknr == 0) {
ChangeLog for smbfs.
+2001-06-12 Urban Widmark <urban@teststation.com>
+
+ * proc.c: replace the win95-flush fix with smb_seek, when needed.
+ * proc.c: readdir 'lastname' bug (NetApp dir listing fix)
+
2001-05-08 Urban Widmark <urban@teststation.com>
* inode.c: Fix for changes on the server side not being detected
* Update the inode now rather than waiting for a refresh.
*/
inode->i_mtime = inode->i_atime = CURRENT_TIME;
+ inode->u.smbfs_i.flags |= SMB_F_LOCALWRITE;
if (offset > inode->i_size)
inode->i_size = offset;
} while (count);
inode->u.smbfs_i.oldmtime = jiffies;
if (inode->i_mtime != last_time || inode->i_size != last_sz) {
- VERBOSE("%s/%s changed, old=%ld, new=%ld, oz=%ld, nz=%ld\n",
- DENTRY_PATH(dentry),
+ VERBOSE("%ld changed, old=%ld, new=%ld, oz=%ld, nz=%ld\n",
+ inode->i_ino,
(long) last_time, (long) inode->i_mtime,
(long) last_sz, (long) inode->i_size);
SMB_HEADER_LEN + 2 * SMB_WCT(packet) - 2 + bcc);
}
+/*
+ * Called with the server locked
+ */
+static int
+smb_proc_seek(struct smb_sb_info *server, __u16 fileid,
+ __u16 mode, off_t offset)
+{
+ int result;
+
+ smb_setup_header(server, SMBlseek, 4, 0);
+ WSET(server->packet, smb_vwv0, fileid);
+ WSET(server->packet, smb_vwv1, mode);
+ DSET(server->packet, smb_vwv2, offset);
+
+ result = smb_request_ok(server, SMBlseek, 2, 0);
+ if (result < 0) {
+ result = 0;
+ goto out;
+ }
+
+ result = DVAL(server->packet, smb_vwv0);
+out:
+ return result;
+}
+
/*
* We're called with the server locked, and we leave it that way.
*/
if (result >= 0)
result = WVAL(server->packet, smb_vwv0);
- /* flush to disk, to trigger win9x to update its filesize */
- /* FIXME: this will be rather costly, won't it? */
- if (server->mnt->flags & SMB_MOUNT_WIN95)
- smb_proc_flush(server, fileid);
-
smb_unlock_server(server);
return result;
}
result = mask_len;
goto unlock_return;
}
+ mask_len--; /* mask_len is strlen, not #bytes */
first = 1;
VERBOSE("starting mask_len=%d, mask=%s\n", mask_len, mask);
* Note that some servers (win95?) point to the filename and
* others (NT4, Samba using NT1) to the dir entry. We assume
* here that those who do not point to a filename do not need
- * this info to continue the listing. OS/2 needs this, but it
- * talks "infolevel 1"
+ * this info to continue the listing.
+ *
+ * OS/2 needs this and talks infolevel 1
+ * NetApps want lastname with infolevel 260
+ *
+ * Both are happy if we return the data they point to. So we do.
*/
mask_len = 0;
- if (info_level == 1 && ff_lastname > 0 &&
- ff_lastname < resp_data_len) {
+ if (ff_lastname > 0 && ff_lastname < resp_data_len) {
lastname = resp_data + ff_lastname;
- /* lastname points to a length byte */
- mask_len = *lastname++;
- if (ff_lastname + 1 + mask_len > resp_data_len)
- mask_len = resp_data_len-ff_lastname-1;
+ switch (info_level) {
+ case 260:
+ mask_len = resp_data_len - ff_lastname;
+ break;
+ case 1:
+ /* lastname points to a length byte */
+ mask_len = *lastname++;
+ if (ff_lastname + 1 + mask_len > resp_data_len)
+ mask_len = resp_data_len - ff_lastname - 1;
+ break;
+ }
/*
* Update the mask string for the next message.
DSET(param, 8, 0);
result = smb_trans2_request(server, TRANSACT2_FINDFIRST,
- 0, NULL, 12 + mask_len + 1, param,
+ 0, NULL, 12 + mask_len, param,
&resp_data_len, &resp_data,
&resp_param_len, &resp_param);
if (result < 0)
struct smb_fattr *fattr)
{
int result;
+ struct inode *inode = dir->d_inode;
smb_init_dirent(server, fattr);
result = smb_proc_getattr_trans2(server, dir, fattr);
}
+ /*
+ * None of the getattr versions here can make win9x return the right
+ * filesize if there are changes made to an open file.
+ * A seek-to-end does return the right size, but we only need to do
+ * that on files we have written.
+ */
+ if (server->mnt->flags & SMB_MOUNT_WIN95 &&
+ inode &&
+ inode->u.smbfs_i.flags & SMB_F_LOCALWRITE &&
+ smb_is_open(inode))
+ {
+ __u16 fileid = inode->u.smbfs_i.fileid;
+ fattr->f_size = smb_proc_seek(server, fileid, 2, 0);
+ }
+
smb_finish_dirent(server, fattr);
return result;
}
* filesystems which don't use real block-devices. -- jrs
*/
-static unsigned int unnamed_dev_in_use[256/(8*sizeof(unsigned int))];
+static unsigned long unnamed_dev_in_use[256/(8*sizeof(unsigned long))];
kdev_t get_unnamed_dev(void)
{
#ifndef __ALPHA_DELAY_H
#define __ALPHA_DELAY_H
-#include <linux/config.h>
-#include <asm/param.h>
-#include <asm/smp.h>
-
-/*
- * Copyright (C) 1993, 2000 Linus Torvalds
- *
- * Delay routines, using a pre-computed "loops_per_jiffy" value.
- */
-
-/*
- * Use only for very small delays (< 1 msec).
- *
- * The active part of our cycle counter is only 32-bits wide, and
- * we're treating the difference between two marks as signed. On
- * a 1GHz box, that's about 2 seconds.
- */
-
-extern __inline__ void
-__delay(int loops)
-{
- int tmp;
- __asm__ __volatile__(
- " rpcc %0\n"
- " addl %1,%0,%1\n"
- "1: rpcc %0\n"
- " subl %1,%0,%0\n"
- " bgt %0,1b"
- : "=&r" (tmp), "=r" (loops) : "1"(loops));
-}
-
-extern __inline__ void
-__udelay(unsigned long usecs, unsigned long lpj)
-{
- usecs *= (((unsigned long)HZ << 32) / 1000000) * lpj;
- __delay((long)usecs >> 32);
-}
-
-#ifdef CONFIG_SMP
-#include <linux/sched.h> /* for smp_processor_id */
-#define udelay(u) __udelay((u), cpu_data[smp_processor_id()].loops_per_jiffy)
-#else
-#define udelay(u) __udelay((u), loops_per_jiffy)
-#endif
+extern void __delay(int loops);
+extern void __udelay(unsigned long usecs, unsigned long lpj);
+extern void udelay(unsigned long usecs);
#endif /* defined(__ALPHA_DELAY_H) */
*/
#include <linux/config.h>
+#include <asm/atomic.h>
#include <asm/irq.h>
/*
extern void send_IPI(int dest, int vector);
extern unsigned long io_apic_irqs;
-extern volatile unsigned long irq_err_count;
+
+extern atomic_t irq_err_count;
+extern atomic_t irq_mis_count;
extern char _stext, _etext;
#ifdef CONFIG_X86_IO_APIC
+#define APIC_MISMATCH_DEBUG
+
#define IO_APIC_BASE(idx) \
((volatile int *)__fix_to_virt(FIX_IO_APIC_BASE_0 + idx))
#ifndef __i386_PCI_H
#define __i386_PCI_H
+#include <linux/config.h>
+
#ifdef __KERNEL__
/* Can be used to override the logic in pci_scan_bus for skipping
already-configured bus numbers - to be used for buggy BIOSes
or architectures with incomplete PCI setup by the loader */
+#ifdef CONFIG_PCI
+extern unsigned int pcibios_assign_all_busses(void);
+#else
#define pcibios_assign_all_busses() 0
+#endif
extern unsigned long pci_mem_start;
#define PCIBIOS_MIN_IO 0x1000
+#ifndef __LINUX_IBMTR_H__
+#define __LINUX_IBMTR_H__
+
/* Definitions for an IBM Token Ring card. */
/* This file is distributed under the GNU GPL */
unsigned char funct_address[4];
};
+#endif /* __LINUX_IBMTR_H__ */
#define ARPHRD_FCFABRIC 787 /* Fibrechannel fabric */
/* 787->799 reserved for fibrechannel media types */
#define ARPHRD_IEEE802_TR 800 /* Magic type ident for TR */
+#define ARPHRD_IEEE80211 801 /* IEEE 802.11 */
#define ARPHRD_VOID 0xFFFF /* Void type, nothing is known */
extern hw_irq_controller no_irq_type; /* needed in every arch ? */
extern void no_action(int cpl, void *dev_id, struct pt_regs *regs);
-extern volatile unsigned long irq_err_count;
-
#endif /* __asm_h */
-
* library, the executable area etc).
*/
struct vm_area_struct {
- struct mm_struct * vm_mm; /* VM area parameters */
- unsigned long vm_start;
- unsigned long vm_end;
+ struct mm_struct * vm_mm; /* The address space we belong to. */
+ unsigned long vm_start; /* Our start address within vm_mm. */
+ unsigned long vm_end; /* Our end address within vm_mm. */
/* linked list of VM areas per task, sorted by address */
struct vm_area_struct *vm_next;
- pgprot_t vm_page_prot;
- unsigned long vm_flags;
+ pgprot_t vm_page_prot; /* Access permissions of this VMA. */
+ unsigned long vm_flags; /* Flags, listed below. */
/* AVL tree of VM areas per task, sorted by address */
short vm_avl_height;
struct vm_area_struct * vm_avl_left;
struct vm_area_struct * vm_avl_right;
- /* For areas with an address space and backing store,
+ /*
+ * For areas with an address space and backing store,
* one of the address_space->i_mmap{,shared} lists,
* for shm areas, the list of attaches, otherwise unused.
*/
struct vm_area_struct *vm_next_share;
struct vm_area_struct **vm_pprev_share;
+ /* Function pointers to deal with this struct. */
struct vm_operations_struct * vm_ops;
- unsigned long vm_pgoff; /* offset in PAGE_SIZE units, *not* PAGE_CACHE_SIZE */
- struct file * vm_file;
- unsigned long vm_raend;
+
+ /* Information about our backing store: */
+ unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
+ units, *not* PAGE_CACHE_SIZE */
+ struct file * vm_file; /* File we map to (can be NULL). */
+ unsigned long vm_raend; /* XXX: put full readahead info here. */
void * vm_private_data; /* was vm_pte (shared mem) */
};
#define VM_LOCKED 0x00002000
#define VM_IO 0x00004000 /* Memory mapped I/O or similar */
+ /* Used by sys_madvise() */
#define VM_SEQ_READ 0x00008000 /* App will access data sequentially */
#define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */
};
/*
+ * Each physical page in the system has a struct page associated with
+ * it to keep track of whatever it is we are using the page for at the
+ * moment. Note that we have no way to track which tasks are using
+ * a page.
+ *
* Try to keep the most commonly accessed fields in single cache lines
* here (16 bytes or greater). This ordering should be particularly
* beneficial on 32-bit processors.
*
* The first line is data used in page cache lookup, the second line
* is used for linear searches (eg. clock algorithm scans).
+ *
+ * TODO: make this structure smaller, it could be as small as 32 bytes.
*/
typedef struct page {
- struct list_head list;
- struct address_space *mapping;
- unsigned long index;
- struct page *next_hash;
- atomic_t count;
- unsigned long flags; /* atomic flags, some possibly updated asynchronously */
- struct list_head lru;
- unsigned long age;
- wait_queue_head_t wait;
- struct page **pprev_hash;
- struct buffer_head * buffers;
- void *virtual; /* non-NULL if kmapped */
- struct zone_struct *zone;
+ struct list_head list; /* ->mapping has some page lists. */
+ struct address_space *mapping; /* The inode (or ...) we belong to. */
+ unsigned long index; /* Our offset within mapping. */
+ struct page *next_hash; /* Next page sharing our hash bucket in
+ the pagecache hash table. */
+ atomic_t count; /* Usage count, see below. */
+ unsigned long flags; /* atomic flags, some possibly
+ updated asynchronously */
+ struct list_head lru; /* Pageout list, eg. active_list;
+ protected by pagemap_lru_lock !! */
+ unsigned long age; /* Page aging counter. */
+ wait_queue_head_t wait; /* Page locked? Stand in line... */
+ struct page **pprev_hash; /* Complement to *next_hash. */
+ struct buffer_head * buffers; /* Buffer maps us to a disk block. */
+ void *virtual; /* Kernel virtual address (NULL if
+ not kmapped, ie. highmem) */
+ struct zone_struct *zone; /* Memory zone we are in. */
} mem_map_t;
+/*
+ * Methods to modify the page usage count.
+ *
+ * What counts for a page usage:
+ * - cache mapping (page->mapping)
+ * - disk mapping (page->buffers)
+ * - page mapped in a task's page tables, each mapping
+ * is counted separately
+ *
+ * Also, many kernel routines increase the page count before a critical
+ * routine so they can be sure the page doesn't go away from under them.
+ */
#define get_page(p) atomic_inc(&(p)->count)
#define put_page(p) __free_page(p)
#define put_page_testzero(p) atomic_dec_and_test(&(p)->count)
#define page_count(p) atomic_read(&(p)->count)
#define set_page_count(p,v) atomic_set(&(p)->count, v)
-/* Page flag bit values */
-#define PG_locked 0
+/*
+ * Various page->flags bits:
+ *
+ * PG_reserved is set for special pages, which can never be swapped
+ * out. Some of them might not even exist (eg. empty_bad_page)...
+ *
+ * Multiple processes may "see" the same page. E.g. for untouched
+ * mappings of /dev/null, all processes see the same page full of
+ * zeroes, and text pages of executables and shared libraries have
+ * only one copy in memory, at most, normally.
+ *
+ * For the non-reserved pages, page->count denotes a reference count.
+ * page->count == 0 means the page is free.
+ * page->count == 1 means the page is used for exactly one purpose
+ * (e.g. a private data page of one process).
+ *
+ * A page may be used for kmalloc() or anyone else who does a
+ * __get_free_page(). In this case the page->count is at least 1, and
+ * all other fields are unused but should be 0 or NULL. The
+ * management of this page is the responsibility of the one who uses
+ * it.
+ *
+ * The other pages (we may call them "process pages") are completely
+ * managed by the Linux memory manager: I/O, buffers, swapping etc.
+ * The following discussion applies only to them.
+ *
+ * A page may belong to an inode's memory mapping. In this case,
+ * page->mapping is the pointer to the inode, and page->offset is the
+ * file offset of the page (not necessarily a multiple of PAGE_SIZE).
+ *
+ * A page may have buffers allocated to it. In this case,
+ * page->buffers is a circular list of these buffer heads. Else,
+ * page->buffers == NULL.
+ *
+ * For pages belonging to inodes, the page->count is the number of
+ * attaches, plus 1 if buffers are allocated to the page, plus one
+ * for the page cache itself.
+ *
+ * All pages belonging to an inode are in these doubly linked lists:
+ * mapping->clean_pages, mapping->dirty_pages and mapping->locked_pages;
+ * using the page->list list_head. These fields are also used for
+ * freelist managemet (when page->count==0).
+ *
+ * There is also a hash table mapping (inode,offset) to the page
+ * in memory if present. The lists for this hash table use the fields
+ * page->next_hash and page->pprev_hash.
+ *
+ * All process pages can do I/O:
+ * - inode pages may need to be read from disk,
+ * - inode pages which have been modified and are MAP_SHARED may need
+ * to be written to disk,
+ * - private pages which have been modified may need to be swapped out
+ * to swap space and (later) to be read back into memory.
+ * During disk I/O, PG_locked is used. This bit is set before I/O
+ * and reset when I/O completes. page->wait is a wait queue of all
+ * tasks waiting for the I/O on this page to complete.
+ * PG_uptodate tells whether the page's contents is valid.
+ * When a read completes, the page becomes uptodate, unless a disk I/O
+ * error happened.
+ *
+ * For choosing which pages to swap out, inode pages carry a
+ * PG_referenced bit, which is set any time the system accesses
+ * that page through the (inode,offset) hash table. This referenced
+ * bit, together with the referenced bit in the page tables, is used
+ * to manipulate page->age and move the page across the active,
+ * inactive_dirty and inactive_clean lists.
+ *
+ * Note that the referenced bit, the page->lru list_head and the
+ * active, inactive_dirty and inactive_clean lists are protected by
+ * the pagemap_lru_lock, and *NOT* by the usual PG_locked bit!
+ *
+ * PG_skip is used on sparc/sparc64 architectures to "skip" certain
+ * parts of the address space.
+ *
+ * PG_error is set to indicate that an I/O error occurred on this page.
+ *
+ * PG_arch_1 is an architecture specific page state bit. The generic
+ * code guarentees that this bit is cleared for a page when it first
+ * is entered into the page cache.
+ *
+ * PG_highmem pages are not permanently mapped into the kernel virtual
+ * address space, they need to be kmapped separately for doing IO on
+ * the pages. The struct page (these bits with information) are always
+ * mapped into kernel address space...
+ */
+#define PG_locked 0 /* Page is locked. Don't touch. */
#define PG_error 1
#define PG_referenced 2
#define PG_uptodate 3
#define NOPAGE_SIGBUS (NULL)
#define NOPAGE_OOM ((struct page *) (-1))
-
-/*
- * Various page->flags bits:
- *
- * PG_reserved is set for a page which must never be accessed (which
- * may not even be present).
- *
- * PG_DMA has been removed, page->zone now tells exactly wether the
- * page is suited to do DMAing into.
- *
- * Multiple processes may "see" the same page. E.g. for untouched
- * mappings of /dev/null, all processes see the same page full of
- * zeroes, and text pages of executables and shared libraries have
- * only one copy in memory, at most, normally.
- *
- * For the non-reserved pages, page->count denotes a reference count.
- * page->count == 0 means the page is free.
- * page->count == 1 means the page is used for exactly one purpose
- * (e.g. a private data page of one process).
- *
- * A page may be used for kmalloc() or anyone else who does a
- * __get_free_page(). In this case the page->count is at least 1, and
- * all other fields are unused but should be 0 or NULL. The
- * management of this page is the responsibility of the one who uses
- * it.
- *
- * The other pages (we may call them "process pages") are completely
- * managed by the Linux memory manager: I/O, buffers, swapping etc.
- * The following discussion applies only to them.
- *
- * A page may belong to an inode's memory mapping. In this case,
- * page->inode is the pointer to the inode, and page->offset is the
- * file offset of the page (not necessarily a multiple of PAGE_SIZE).
- *
- * A page may have buffers allocated to it. In this case,
- * page->buffers is a circular list of these buffer heads. Else,
- * page->buffers == NULL.
- *
- * For pages belonging to inodes, the page->count is the number of
- * attaches, plus 1 if buffers are allocated to the page.
- *
- * All pages belonging to an inode make up a doubly linked list
- * inode->i_pages, using the fields page->next and page->prev. (These
- * fields are also used for freelist management when page->count==0.)
- * There is also a hash table mapping (inode,offset) to the page
- * in memory if present. The lists for this hash table use the fields
- * page->next_hash and page->pprev_hash.
- *
- * All process pages can do I/O:
- * - inode pages may need to be read from disk,
- * - inode pages which have been modified and are MAP_SHARED may need
- * to be written to disk,
- * - private pages which have been modified may need to be swapped out
- * to swap space and (later) to be read back into memory.
- * During disk I/O, PG_locked is used. This bit is set before I/O
- * and reset when I/O completes. page->wait is a wait queue of all
- * tasks waiting for the I/O on this page to complete.
- * PG_uptodate tells whether the page's contents is valid.
- * When a read completes, the page becomes uptodate, unless a disk I/O
- * error happened.
- *
- * For choosing which pages to swap out, inode pages carry a
- * PG_referenced bit, which is set any time the system accesses
- * that page through the (inode,offset) hash table.
- *
- * PG_skip is used on sparc/sparc64 architectures to "skip" certain
- * parts of the address space.
- *
- * PG_error is set to indicate that an I/O error occurred on this page.
- *
- * PG_arch_1 is an architecture specific page state bit. The generic
- * code guarentees that this bit is cleared for a page when it first
- * is entered into the page cache.
- */
-
+/* The array of struct pages */
extern mem_map_t * mem_map;
/*
extern struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr);
-#define buffer_under_min() (atomic_read(&buffermem_pages) * 100 < \
- buffer_mem.min_percent * num_physpages)
-#define pgcache_under_min() (atomic_read(&page_cache_size) * 100 < \
- page_cache.min_percent * num_physpages)
-
#endif /* __KERNEL__ */
#endif
#define PCI_DEVICE_ID_INTEL_82801AB_5 0x2425
#define PCI_DEVICE_ID_INTEL_82801AB_6 0x2426
#define PCI_DEVICE_ID_INTEL_82801AB_8 0x2428
-#define PCI_DEVICE_ID_INTEL_82820FW_0 0x2440
-#define PCI_DEVICE_ID_INTEL_82820FW_1 0x2442
-#define PCI_DEVICE_ID_INTEL_82820FW_2 0x2443
-#define PCI_DEVICE_ID_INTEL_82820FW_3 0x2444
-#define PCI_DEVICE_ID_INTEL_82820FW_4 0x2449
-#define PCI_DEVICE_ID_INTEL_82820FW_5 0x244b
-#define PCI_DEVICE_ID_INTEL_82820FW_6 0x244e
+#define PCI_DEVICE_ID_INTEL_82801BA_0 0x2440
+#define PCI_DEVICE_ID_INTEL_82801BA_1 0x2442
+#define PCI_DEVICE_ID_INTEL_82801BA_2 0x2443
+#define PCI_DEVICE_ID_INTEL_82801BA_3 0x2444
+#define PCI_DEVICE_ID_INTEL_82801BA_4 0x2445
+#define PCI_DEVICE_ID_INTEL_82801BA_5 0x2446
+#define PCI_DEVICE_ID_INTEL_82801BA_6 0x2448
+#define PCI_DEVICE_ID_INTEL_82801BA_7 0x2449
+#define PCI_DEVICE_ID_INTEL_82801BA_8 0x244a
+#define PCI_DEVICE_ID_INTEL_82801BA_9 0x244b
+#define PCI_DEVICE_ID_INTEL_82801BA_10 0x244c
+#define PCI_DEVICE_ID_INTEL_82801BA_11 0x244e
#define PCI_DEVICE_ID_INTEL_82810_MC1 0x7120
#define PCI_DEVICE_ID_INTEL_82810_IG1 0x7121
#define PCI_DEVICE_ID_INTEL_82810_MC3 0x7122
#endif /* DEBUG_SMB_MALLOC */
+/*
+ * Flags for the in-memory inode
+ */
+#define SMB_F_LOCALWRITE 0x02 /* file modified locally */
+
/* NT1 protocol capability bits */
#define SMB_CAP_RAW_MODE 0x0001
__u16 attr; /* Attribute fields, DOS value */
__u16 access; /* Access mode */
+ __u16 flags;
unsigned long oldmtime; /* last time refreshed */
unsigned long closed; /* timestamp when closed */
unsigned openers; /* number of fileid users */
struct rpc_wait_queue reconn; /* waiting for reconnect */
struct rpc_rqst * free; /* free slots */
struct rpc_rqst slot[RPC_MAXREQS];
- unsigned int sockstate; /* Socket state */
+ unsigned long sockstate; /* Socket state */
unsigned char shutdown : 1, /* being shut down */
nocong : 1, /* no congestion control */
stream : 1, /* TCP */
+++ /dev/null
-/* Separate to keep compilation of protocols.c simpler */
-extern void irda_proto_init(struct net_proto *pro);
#endif
#ifdef CONFIG_IRDA
-#include <net/irda/irda_device.h>
+extern int irda_proto_init(void);
+extern int irda_device_init(void);
#endif
#ifdef CONFIG_X86_IO_APIC
= { 12, 12, ICMP_NOT_ERROR, 0, 0 } };
/* Can't do anything if it's a fragment. */
- if (!offset)
+ if (offset)
return 1;
/* Must cover type and code. */
/* If not embedded. */
if (!embedded) {
- /* Bad checksum? Don't print, just drop. */
+ /* Bad checksum? Don't print, just ignore. */
if (!more_frags
&& ip_compute_csum((unsigned char *) icmph, datalen) != 0)
return 0;
length of iph + 8 bytes. */
struct iphdr *inner = (void *)icmph + 8;
+ /* datalen > 8 since all ICMP_IS_ERROR types
+ have min length > 8 */
if (datalen - 8 < sizeof(struct iphdr)) {
limpk("ICMP error internal way too short\n");
return 0;
u_int32_t arg = ntohl(icmph->un.gateway);
if (icmph->code == 0) {
+ /* Code 0 means that upper 8 bits is pointer
+ to problem. */
if ((arg >> 24) >= iph->ihl*4) {
limpk("ICMP PARAMETERPROB ptr = %u\n",
ntohl(icmph->un.gateway) >> 24);
int embedded)
{
/* Can't do anything if it's a fragment. */
- if (!offset)
+ if (offset)
return 1;
/* CHECK: Must cover UDP header. */
return 0;
}
- /* Bad checksum? Don't print, just drop. */
+ /* Bad checksum? Don't print, just say it's unclean. */
/* FIXME: SRC ROUTE packets won't match checksum --RR */
if (!more_frags && !embedded
&& csum_tcpudp_magic(iph->saddr, iph->daddr, datalen, IPPROTO_UDP,
int more_frags,
int embedded)
{
- u_int8_t *opt = (u_int8_t *)(tcph + 1);
+ u_int8_t *opt = (u_int8_t *)tcph;
u_int8_t tcpflags;
int end_of_options = 0;
size_t i;
/* In fact, this is caught below (offset < 516). */
/* Can't do anything if it's a fragment. */
- if (!offset)
+ if (offset)
return 1;
/* CHECK: Smaller than minimal TCP hdr. */
limpk("Packet length %u < TCP header.\n", datalen);
return 0;
}
- /* Must have ports available (datalen >= 8). */
+ /* Must have ports available (datalen >= 8), from
+ check_icmp which set embedded = 1 */
/* CHECK: TCP ports inside ICMP error */
if (!tcph->source || !tcph->dest) {
limpk("Zero TCP ports %u/%u.\n",
return 1;
}
- /* Bad checksum? Don't print, just drop. */
+ /* Bad checksum? Don't print, just say it's unclean. */
/* FIXME: SRC ROUTE packets won't match checksum --RR */
if (!more_frags && !embedded
&& csum_tcpudp_magic(iph->saddr, iph->daddr, datalen, IPPROTO_TCP,
(unsigned int) opt[i], i);
return 0;
}
+ /* Move to next option */
+ i += opt[i+1];
}
}
static int
check_ip(struct iphdr *iph, size_t length, int embedded)
{
- u_int8_t *opt = (u_int8_t *)(iph + 1);
+ u_int8_t *opt = (u_int8_t *)iph;
int end_of_options = 0;
void *protoh;
size_t datalen;
opt[i]);
return 0;
}
- /* CHECK: zero-length options. */
- else if (opt[i+1] == 0) {
- limpk("IP option %u 0 len\n",
- opt[i]);
+ /* CHECK: zero-length or one-length options. */
+ else if (opt[i+1] < 2) {
+ limpk("IP option %u %u len\n",
+ opt[i], opt[i+1]);
return 0;
}
/* CHECK: oversize options. */
- else if (opt[i+1] + i >= iph->ihl * 4) {
+ else if (opt[i+1] + i > iph->ihl * 4) {
limpk("IP option %u at %u too long\n",
opt[i], i);
return 0;
}
+ /* Move to next option */
+ i += opt[i+1];
}
}
return 0;
}
- /* CHECK: Min offset of frag = 128 - 60 (max IP hdr len). */
- if (offset && offset * 8 < MIN_LIKELY_MTU - 60) {
+ /* CHECK: Min offset of frag = 128 - IP hdr len. */
+ if (offset && offset * 8 < MIN_LIKELY_MTU - iph->ihl * 4) {
limpk("Fragment starts at %u < %u\n", offset * 8,
- MIN_LIKELY_MTU-60);
+ MIN_LIKELY_MTU - iph->ihl * 4);
return 0;
}
*
* RAW - implementation of IP "raw" sockets.
*
- * Version: $Id: raw.c,v 1.61 2001/05/03 20:56:04 davem Exp $
+ * Version: $Id: raw.c,v 1.62 2001/06/05 10:52:15 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
int type;
type = skb->h.icmph->type;
- if (type < 32)
- return test_bit(type, &sk->tp_pinfo.tp_raw4.filter);
+ if (type < 32) {
+ __u32 data = sk->tp_pinfo.tp_raw4.filter.data;
+
+ return ((1 << type) & data) != 0;
+ }
/* Do not block unknown ICMP types */
return 0;
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
- * $Id: ip6_fib.c,v 1.23 2001/03/19 20:31:17 davem Exp $
+ * $Id: ip6_fib.c,v 1.24 2001/06/05 11:36:55 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
xb = ntohl(xb);
- while (test_bit(j, &xb) == 0)
+ while ((xb & (1 << j)) == 0)
j--;
return (i * 32 + 31 - j);
*
* Adapted from linux/net/ipv4/raw.c
*
- * $Id: raw.c,v 1.45 2001/02/18 09:10:42 davem Exp $
+ * $Id: raw.c,v 1.46 2001/06/05 11:36:55 davem Exp $
*
* Fixes:
* Hideaki YOSHIFUJI : sin6_scope_id support
opt = &sk->tp_pinfo.tp_raw;
if (pskb_may_pull(skb, sizeof(struct icmp6hdr))) {
+ __u32 *data = &opt->filter.data[0];
+ int bit_nr;
+
icmph = (struct icmp6hdr *) skb->data;
- return test_bit(icmph->icmp6_type, &opt->filter);
+ bit_nr = icmph->icmp6_type;
+
+ return (data[bit_nr >> 5] & (1 << (bit_nr & 31))) != 0;
}
return 0;
}