say M here and read Documentation/modules.txt. This is recommended.
The module will be called sis900.o.
-Packet Engines Yellowfin Gigabit-NIC support
+Packet Engines Yellowfin Gigabit-NIC / Symbios 53c885 support
CONFIG_YELLOWFIN
Say Y here if you have a Packet Engines G-NIC PCI Gigabit Ethernet
- adapter. This adapter is used by the Beowulf Linux cluster project.
- See http://cesdis.gsfc.nasa.gov/linux/drivers/yellowfin.html for
- more information about this driver in particular and Beowulf in
- general.
+ adapter or the SYM53C885 Ethernet controller. The Gigabit adapter is
+ used by the Beowulf Linux cluster project. See
+ http://cesdis.gsfc.nasa.gov/linux/drivers/yellowfin.html for more
+ information about this driver in particular and Beowulf in general.
If you want to compile this driver as a module ( = code which can be
inserted in and removed from the running kernel whenever you want),
whenever you want). If you want to compile it as a module, say M
here and read Documentation/modules.txt.
-Symbios 53c885 (Synergy ethernet) support
-CONFIG_NCR885E
- This is and Ethernet driver for the dual-function NCR 53C885
- SCSI/Ethernet controller.
-
- This driver is also available as a module called ncr885e.o ( = code
- which can be inserted in and removed from the running kernel
- whenever you want). If you want to compile it as a module, say M
- here and read Documentation/modules.txt.
-
National DP83902AV (Oak ethernet) support
CONFIG_OAKNET
Say Y if your machine has this type of Ethernet network card.
10mbps_fd 10Mbps full duplex.
100mbps_hd 100Mbps half duplex.
100mbps_fd 100Mbps full duplex.
- 1000mbps_fd 1000Mbps full duplex.
- 1000mbps_hd 1000Mbps half duplex.
0 Autosensing active media.
1 10Mbps half duplex.
2 10Mbps full duplex.
3 100Mbps half duplex.
4 100Mbps full duplex.
- 5 1000Mbps full duplex.
- 6 1000Mbps half duplex.
By default, the NIC operates at autosense.
vlan=x - Specifies the VLAN ID. If vlan=0, the
jumbo=x - Specifies the jumbo frame support. If jumbo=1,
the NIC accept jumbo frames. By default, this
function is disabled.
-
+ Jumbo frame usually improve the performance
+ int gigabit.
+
+int_count - Rx frame count each interrupt.
+int_timeout - Rx DMA wait time for an interrupt. Proper
+ values of int_count and int_timeout bring
+ a conspicuous performance in the fast machine.
+ For P4 1.5GHz systems, a setting of
+ int_count=5 and int_timeout=750 is
+ recommendable.
Configuration Script Sample
===========================
Here is a sample of a simple configuration script:
Troubleshooting
===============
-Q1. Source files contain behind every line.
+Q1. Source files contain ^ M behind every line.
Make sure all files are Unix file format (no LF). Try the following
shell command to convert files.
VERSION = 2
PATCHLEVEL = 4
SUBLEVEL = 10
-EXTRAVERSION =-pre12
+EXTRAVERSION =-pre13
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
#include <asm/dma.h>
#include <asm/mmu_context.h>
#include <asm/console.h>
+#include <asm/tlb.h>
+
+mmu_gather_t mmu_gathers[NR_CPUS];
unsigned long totalram_pages;
unsigned int nmi_watchdog = NMI_NONE;
static unsigned int nmi_hz = HZ;
unsigned int nmi_perfctr_msr; /* the MSR to reset in NMI handler */
+extern void show_registers(struct pt_regs *regs);
#define K7_EVNTSEL_ENABLE (1 << 22)
#define K7_EVNTSEL_INT (1 << 20)
#include <asm/fixmap.h>
#include <asm/e820.h>
#include <asm/apic.h>
+#include <asm/tlb.h>
+mmu_gather_t mmu_gathers[NR_CPUS];
unsigned long highstart_pfn, highend_pfn;
static unsigned long totalram_pages;
static unsigned long totalhigh_pages;
#include <asm/sal.h>
#include <asm/system.h>
#include <asm/uaccess.h>
+#include <asm/tlb.h>
+
+mmu_gather_t mmu_gathers[NR_CPUS];
/* References to section boundaries: */
extern char _stext, _etext, _edata, __init_begin, __init_end;
#ifdef CONFIG_ATARI
#include <asm/atari_stram.h>
#endif
+#include <asm/tlb.h>
+
+mmu_gather_t mmu_gathers[NR_CPUS];
unsigned long totalram_pages = 0;
#include <asm/pgalloc.h>
#include <asm/dma.h>
#include <asm/lowcore.h>
+#include <asm/tlb.h>
+
+mmu_gather_t mmu_gathers[NR_CPUS];
static unsigned long totalram_pages;
#include <asm/pgalloc.h>
#include <asm/dma.h>
#include <asm/lowcore.h>
+#include <asm/tlb.h>
+
+mmu_gather_t mmu_gathers[NR_CPUS];
static unsigned long totalram_pages;
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
+#include <asm/tlb.h>
+
+mmu_gather_t mmu_gathers[NR_CPUS];
/*
* Cache of MMU context last used.
-/* $Id: setup.c,v 1.124 2001/04/14 21:13:46 davem Exp $
+/* $Id: setup.c,v 1.125 2001/09/20 00:35:30 davem Exp $
* linux/arch/sparc/kernel/setup.c
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
breakpoint();
}
- /* Due to stack alignment restrictions and assumptions... */
- init_mm.mmap->vm_page_prot = PAGE_SHARED;
- init_mm.mmap->vm_start = PAGE_OFFSET;
- init_mm.mmap->vm_end = PAGE_OFFSET + highest_paddr;
init_mm.context = (unsigned long) NO_CONTEXT;
init_task.thread.kregs = &fake_swapper_regs;
-/* $Id: debuglocks.c,v 1.10 1999/09/10 10:40:36 davem Exp $
+/* $Id: debuglocks.c,v 1.11 2001/09/20 00:35:31 davem Exp $
* debuglocks.c: Debugging versions of SMP locking primitives.
*
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/vaddrs.h>
+#include <asm/tlb.h>
+
+mmu_gather_t mmu_gathers[NR_CPUS];
unsigned long *sparc_valid_addr_bitmap;
-/* $Id: srmmu.c,v 1.230 2001/07/17 16:17:33 anton Exp $
+/* $Id: srmmu.c,v 1.231 2001/09/20 00:35:31 davem Exp $
* srmmu.c: SRMMU specific routines for memory management.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
map_spbank((unsigned long)__va(sp_banks[i].base_addr), i);
}
- init_mm.mmap->vm_start = PAGE_OFFSET;
BTFIXUPSET_SIMM13(user_ptrs_per_pgd, PAGE_OFFSET / SRMMU_PGDIR_SIZE);
}
-# $Id: config.in,v 1.149 2001/08/09 17:47:51 davem Exp $
+# $Id: config.in,v 1.150 2001/09/18 00:36:03 davem Exp $
# For a description of the syntax of this configuration file,
# see the Configure script.
#
fi
dep_tristate 'Qlogic ISP SCSI support' CONFIG_SCSI_QLOGIC_ISP $CONFIG_SCSI
dep_tristate 'Qlogic ISP FC SCSI support' CONFIG_SCSI_QLOGIC_FC $CONFIG_SCSI
+ if [ "$CONFIG_SCSI_QLOGIC_FC" != "n" ]; then
+ define_bool CONFIG_SCSI_QLOGIC_FC_FIRMWARE y
+ fi
fi
endmenu
# CONFIG_FB_PM2_FIFO_DISCONNECT is not set
CONFIG_FB_PM2_PCI=y
# CONFIG_FB_CYBER2000 is not set
-# CONFIG_FB_PVR2 is not set
-# CONFIG_FB_PVR2_DEBUG is not set
-# CONFIG_FB_E1355 is not set
# CONFIG_FB_MATROX is not set
CONFIG_FB_ATY=y
# CONFIG_FB_ATY_GX is not set
CONFIG_FB_ATY_CT=y
+# CONFIG_FB_ATY_CT_VAIO_LCD is not set
+# CONFIG_FB_RADEON is not set
# CONFIG_FB_ATY128 is not set
-# CONFIG_FB_3DFX is not set
# CONFIG_FB_SIS is not set
+# CONFIG_FB_3DFX is not set
+# CONFIG_FB_VOODOO1 is not set
CONFIG_FB_SBUS=y
CONFIG_FB_CREATOR=y
CONFIG_FB_CGSIX=y
# CONFIG_MD_RAID0 is not set
# CONFIG_MD_RAID1 is not set
# CONFIG_MD_RAID5 is not set
+# CONFIG_MD_MULTIPATH is not set
# CONFIG_BLK_DEV_LVM is not set
# CONFIG_BLK_DEV_RAM is not set
# CONFIG_BLK_DEV_INITRD is not set
# CONFIG_IDEDMA_IVB is not set
# CONFIG_DMA_NONPCI is not set
CONFIG_BLK_DEV_IDE_MODES=y
+CONFIG_BLK_DEV_ATARAID=m
+CONFIG_BLK_DEV_ATARAID_PDC=m
+CONFIG_BLK_DEV_ATARAID_HPT=m
#
# SCSI support
# CONFIG_SCSI_NCR53C8XX_SYMBIOS_COMPAT is not set
CONFIG_SCSI_QLOGIC_ISP=m
CONFIG_SCSI_QLOGIC_FC=y
+CONFIG_SCSI_QLOGIC_FC_FIRMWARE=y
#
# Fibre Channel support
# Ethernet (10 or 100Mbit)
#
CONFIG_NET_ETHERNET=y
-# CONFIG_ARM_AM79C961A is not set
CONFIG_SUNLANCE=y
CONFIG_HAPPYMEAL=y
CONFIG_SUNBMAC=m
# CONFIG_TLAN is not set
CONFIG_VIA_RHINE=m
CONFIG_WINBOND_840=m
+# CONFIG_LAN_SAA9730 is not set
# CONFIG_NET_POCKET is not set
#
# CONFIG_ACENIC_OMIT_TIGON_I is not set
CONFIG_DL2K=m
CONFIG_MYRI_SBUS=m
+CONFIG_NS83820=m
CONFIG_HAMACHI=m
CONFIG_YELLOWFIN=m
CONFIG_SK98LIN=m
CONFIG_USB_SERIAL_MCT_U232=m
CONFIG_USB_SERIAL_PL2303=m
CONFIG_USB_SERIAL_CYBERJACK=m
+CONFIG_USB_SERIAL_XIRCOM=m
CONFIG_USB_SERIAL_OMNINET=m
#
#
CONFIG_BLUEZ_HCIUSB=m
CONFIG_BLUEZ_HCIUART=m
-CONFIG_BLUEZ_HCIEMU=m
+CONFIG_BLUEZ_HCIVHCI=m
#
# Watchdog
-/* $Id: dtlb_backend.S,v 1.13 2001/08/17 04:55:09 kanoj Exp $
+/* $Id: dtlb_backend.S,v 1.14 2001/09/07 18:26:17 kanoj Exp $
* dtlb_backend.S: Back end to DTLB miss replacement strategy.
* This is included directly into the trap table.
*
#define VPTE_BITS (_PAGE_CP | _PAGE_CV | _PAGE_P )
#define VPTE_SHIFT (PAGE_SHIFT - 3)
#define TLB_PMD_SHIFT (PAGE_SHIFT - 3 + 3)
-#define TLB_PGD_SHIFT (PAGE_SHIFT - 2 + PAGE_SHIFT - 3 + 3)
+#define TLB_PGD_SHIFT (PMD_BITS + PAGE_SHIFT - 3 + 3)
#define TLB_PMD_MASK (((1 << PMD_BITS) - 1) << 1)
#define TLB_PGD_MASK (((1 << (VA_BITS - PAGE_SHIFT - (PAGE_SHIFT - 3) - PMD_BITS)) - 1) << 2)
-/* $Id: dtlb_base.S,v 1.12 2001/08/17 04:55:09 kanoj Exp $
+/* $Id: dtlb_base.S,v 1.14 2001/09/11 02:20:23 kanoj Exp $
* dtlb_base.S: Front end to DTLB miss replacement strategy.
* This is included directly into the trap table.
*
* %g7 __pa(current->mm->pgd)
*
* The VPTE base value is completely magic, but note that
- * nothing else in the kernel other than these TLB miss
+ * few places in the kernel other than these TLB miss
* handlers know anything about the VPTE mechanism or
- * how it works. Consider the 44-bit VADDR Ultra-I/II
- * case as an example:
+ * how it works (see VPTE_SIZE, TASK_SIZE and PTRS_PER_PGD).
+ * Consider the 44-bit VADDR Ultra-I/II case as an example:
*
* VA[0 : (1<<43)] produce VPTE index [%g3 : 0]
* VA[0 : -(1<<43)] produce VPTE index [%g3-(1<<(43-PAGE_SHIFT+3)) : %g3]
* If you're paying attention you'll notice that this means half of
* the VPTE table is above %g3 and half is below, low VA addresses
* map progressively upwards from %g3, and high VA addresses map
- * progressively downwards from %g3. This trick was needed to make
+ * progressively upwards towards %g3. This trick was needed to make
* the same 8 instruction handler work both for Spitfire/Blackbird's
* peculiar VA space hole configuration and the full 64-bit VA space
* one of Cheetah at the same time.
-/* $Id: etrap.S,v 1.44 2001/03/22 00:51:25 davem Exp $
+/* $Id: etrap.S,v 1.45 2001/09/07 21:04:40 kanoj Exp $
* etrap.S: Preparing for entry into the kernel on Sparc V9.
*
* Copyright (C) 1996, 1997 David S. Miller (davem@caip.rutgers.edu)
#include <asm/page.h>
#include <asm/spitfire.h>
#include <asm/head.h>
+#include <asm/processor.h>
-#define TASK_REGOFF ((PAGE_SIZE<<1)-TRACEREG_SZ-REGWIN_SZ)
+#define TASK_REGOFF (THREAD_SIZE-TRACEREG_SZ-REGWIN_SZ)
#define ETRAP_PSTATE1 (PSTATE_RMO | PSTATE_PRIV)
#define ETRAP_PSTATE2 (PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV | PSTATE_IE)
-/* $Id: head.S,v 1.78 2001/08/30 03:22:00 kanoj Exp $
+/* $Id: head.S,v 1.81 2001/09/07 23:00:15 kanoj Exp $
* head.S: Initial boot code for the Sparc64 port of Linux.
*
* Copyright (C) 1996,1997 David S. Miller (davem@caip.rutgers.edu)
wr %g0, ASI_P, %asi
mov 1, %g5
- sllx %g5, (PAGE_SHIFT + 1), %g5
+ sllx %g5, THREAD_SHIFT, %g5
sub %g5, (REGWIN_SZ + STACK_BIAS), %g5
add %g6, %g5, %sp
mov 0, %fp
#include <asm/pgtable.h>
#include <asm/uaccess.h>
+#include <asm/processor.h>
static struct fs_struct init_fs = INIT_FS;
static struct files_struct init_files = INIT_FILES;
*/
__asm__ (".text");
union task_union init_task_union = { INIT_TASK(init_task_union.task) };
+
+/*
+ * This is to make the init_task+stack of the right size for >8k pagesize.
+ * The definition of task_union in sched.h makes it 16k wide.
+ */
+#if PAGE_SHIFT != 13
+char init_task_stack[THREAD_SIZE - INIT_TASK_SIZE] = { 0 };
+#endif
-/* $Id: ioctl32.c,v 1.123 2001/09/02 03:52:07 davem Exp $
+/* $Id: ioctl32.c,v 1.125 2001/09/18 22:29:05 davem Exp $
* ioctl32.c: Conversion between 32bit and 64bit native ioctls.
*
* Copyright (C) 1997-2000 Jakub Jelinek (jakub@redhat.com)
#include <linux/usb.h>
#include <linux/usbdevice_fs.h>
+#include <linux/nbd.h>
/* Use this to get at 32-bit user passed pointers.
See sys_sparc32.c for description about these. */
COMPATIBLE_IOCTL(BLKSSZGET)
COMPATIBLE_IOCTL(BLKBSZGET)
COMPATIBLE_IOCTL(BLKBSZSET)
+COMPATIBLE_IOCTL(BLKGETSIZE64)
/* RAID */
COMPATIBLE_IOCTL(RAID_VERSION)
COMPATIBLE_IOCTL(MEMUNLOCK)
COMPATIBLE_IOCTL(MEMGETREGIONCOUNT)
COMPATIBLE_IOCTL(MEMGETREGIONINFO)
+/* NBD */
+COMPATIBLE_IOCTL(NBD_SET_SOCK)
+COMPATIBLE_IOCTL(NBD_SET_BLKSIZE)
+COMPATIBLE_IOCTL(NBD_SET_SIZE)
+COMPATIBLE_IOCTL(NBD_DO_IT)
+COMPATIBLE_IOCTL(NBD_CLEAR_SOCK)
+COMPATIBLE_IOCTL(NBD_CLEAR_QUE)
+COMPATIBLE_IOCTL(NBD_PRINT_DEBUG)
+COMPATIBLE_IOCTL(NBD_SET_SIZE_BLOCKS)
+COMPATIBLE_IOCTL(NBD_DISCONNECT)
/* And these ioctls need translation */
HANDLE_IOCTL(MEMREADOOB32, mtd_rw_oob)
HANDLE_IOCTL(MEMWRITEOOB32, mtd_rw_oob)
-/* $Id: process.c,v 1.118 2001/06/03 13:41:13 ecd Exp $
+/* $Id: process.c,v 1.119 2001/09/07 21:04:40 kanoj Exp $
* arch/sparc64/kernel/process.c
*
* Copyright (C) 1995, 1996 David S. Miller (davem@caip.rutgers.edu)
char *child_trap_frame;
/* Calculate offset to stack_frame & pt_regs */
- child_trap_frame = ((char *)p) + ((PAGE_SIZE << 1) - (TRACEREG_SZ+REGWIN_SZ));
+ child_trap_frame = ((char *)p) + (THREAD_SIZE - (TRACEREG_SZ+REGWIN_SZ));
memcpy(child_trap_frame, (((struct reg_window *)regs)-1), (TRACEREG_SZ+REGWIN_SZ));
t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS;
t->flags |= SPARC_FLAG_NEWCHILD;
-/* $Id: setup.c,v 1.65 2001/06/03 13:41:13 ecd Exp $
+/* $Id: setup.c,v 1.66 2001/09/20 00:35:31 davem Exp $
* linux/arch/sparc64/kernel/setup.c
*
* Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu)
rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0);
#endif
- /* Due to stack alignment restrictions and assumptions... */
- init_mm.mmap->vm_page_prot = PAGE_SHARED;
- init_mm.mmap->vm_start = PAGE_OFFSET;
- init_mm.mmap->vm_end = PAGE_OFFSET + highest_paddr;
init_task.thread.kregs = &fake_swapper_regs;
#ifdef CONFIG_IP_PNP
}
static void smp_setup_percpu_timer(void);
-static void smp_tune_scheduling(void);
static volatile unsigned long callin_flag = 0;
printk("Entering UltraSMPenguin Mode...\n");
__sti();
smp_store_cpu_info(boot_cpu_id);
- smp_tune_scheduling();
init_idle();
if (linux_num_cpus == 1)
return base;
}
-cycles_t cacheflush_time;
-
-extern unsigned long cheetah_tune_scheduling(void);
-
-static void __init smp_tune_scheduling (void)
-{
- unsigned long orig_flush_base, flush_base, flags, *p;
- unsigned int ecache_size, order;
- cycles_t tick1, tick2, raw;
-
- /* Approximate heuristic for SMP scheduling. It is an
- * estimation of the time it takes to flush the L2 cache
- * on the local processor.
- *
- * The ia32 chooses to use the L1 cache flush time instead,
- * and I consider this complete nonsense. The Ultra can service
- * a miss to the L1 with a hit to the L2 in 7 or 8 cycles, and
- * L2 misses are what create extra bus traffic (ie. the "cost"
- * of moving a process from one cpu to another).
- */
- printk("SMP: Calibrating ecache flush... ");
- if (tlb_type == cheetah) {
- cacheflush_time = cheetah_tune_scheduling();
- goto report;
- }
-
- ecache_size = prom_getintdefault(linux_cpus[0].prom_node,
- "ecache-size", (512 * 1024));
- if (ecache_size > (4 * 1024 * 1024))
- ecache_size = (4 * 1024 * 1024);
- orig_flush_base = flush_base =
- __get_free_pages(GFP_KERNEL, order = get_order(ecache_size));
-
- if (flush_base != 0UL) {
- __save_and_cli(flags);
-
- /* Scan twice the size once just to get the TLB entries
- * loaded and make sure the second scan measures pure misses.
- */
- for (p = (unsigned long *)flush_base;
- ((unsigned long)p) < (flush_base + (ecache_size<<1));
- p += (64 / sizeof(unsigned long)))
- *((volatile unsigned long *)p);
-
- /* Now the real measurement. */
- __asm__ __volatile__("
- b,pt %%xcc, 1f
- rd %%tick, %0
-
- .align 64
-1: ldx [%2 + 0x000], %%g1
- ldx [%2 + 0x040], %%g2
- ldx [%2 + 0x080], %%g3
- ldx [%2 + 0x0c0], %%g5
- add %2, 0x100, %2
- cmp %2, %4
- bne,pt %%xcc, 1b
- nop
-
- rd %%tick, %1"
- : "=&r" (tick1), "=&r" (tick2), "=&r" (flush_base)
- : "2" (flush_base), "r" (flush_base + ecache_size)
- : "g1", "g2", "g3", "g5");
-
- __restore_flags(flags);
-
- raw = (tick2 - tick1);
-
- /* Dampen it a little, considering two processes
- * sharing the cache and fitting.
- */
- cacheflush_time = (raw - (raw >> 2));
-
- free_pages(orig_flush_base, order);
- } else {
- cacheflush_time = ((ecache_size << 2) +
- (ecache_size << 1));
- }
-report:
- printk("Using heuristic of %d cycles.\n",
- (int) cacheflush_time);
-}
-
/* /proc/profile writes can call this, don't __init it please. */
int setup_profiling_timer(unsigned int multiplier)
{
-/* $Id: time.c,v 1.39 2001/06/08 02:33:37 davem Exp $
+/* $Id: time.c,v 1.40 2001/09/06 02:44:28 davem Exp $
* time.c: UltraSparc timer and TOD clock support.
*
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
if (strcmp(model, "mk48t02") &&
strcmp(model, "mk48t08") &&
strcmp(model, "mk48t59") &&
+ strcmp(model, "m5819") &&
strcmp(model, "ds1287")) {
if (node)
node = prom_getsibling(node);
if (edev->prom_node == node)
break;
if (edev == NULL) {
+ if (isa_chain != NULL)
+ goto try_isa_clock;
prom_printf("%s: Mostek not probed by EBUS\n",
__FUNCTION__);
prom_halt();
}
- if (!strcmp(model, "ds1287")) {
+ if (!strcmp(model, "ds1287") ||
+ !strcmp(model, "m5819")) {
ds1287_regs = edev->resource[0].start;
} else {
mstk48t59_regs = edev->resource[0].start;
} else if (isa_chain != NULL) {
struct isa_device *isadev;
+try_isa_clock:
for_each_isadev(isadev, isa_br)
if (isadev->prom_node == node)
break;
prom_printf("%s: Mostek not probed by ISA\n");
prom_halt();
}
- if (!strcmp(model, "ds1287")) {
+ if (!strcmp(model, "ds1287") ||
+ !strcmp(model, "m5819")) {
ds1287_regs = isadev->resource.start;
} else {
mstk48t59_regs = isadev->resource.start;
mstk48t02_regs = mstk48t59_regs + MOSTEK_48T59_48T02;
}
+ break;
}
#endif
else {
-/* $Id: trampoline.S,v 1.21 2001/04/05 12:44:34 davem Exp $
+/* $Id: trampoline.S,v 1.22 2001/09/07 21:04:40 kanoj Exp $
* trampoline.S: Jump start slave processors on sparc64.
*
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/spitfire.h>
+#include <asm/processor.h>
#include <asm/asm_offsets.h>
.data
membar #Sync
mov 1, %g5
- sllx %g5, (PAGE_SHIFT + 1), %g5
+ sllx %g5, THREAD_SHIFT, %g5
sub %g5, (REGWIN_SZ + STACK_BIAS), %g5
add %g6, %g5, %sp
mov 0, %fp
-/* $Id: traps.c,v 1.76 2001/04/03 13:46:31 davem Exp $
+/* $Id: traps.c,v 1.78 2001/09/14 19:49:32 kanoj Exp $
* arch/sparc64/kernel/traps.c
*
* Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
#include <asm/estate.h>
#include <asm/chafsr.h>
#include <asm/psrcompat.h>
+#include <asm/processor.h>
#ifdef CONFIG_KMOD
#include <linux/kmod.h>
#endif
"i" (ASI_PHYS_USE_EC));
}
-#ifdef CONFIG_SMP
-unsigned long cheetah_tune_scheduling(void)
-{
- unsigned long tick1, tick2, raw;
-
- __asm__ __volatile__("rd %%tick, %0" : "=r" (tick1));
- cheetah_flush_ecache();
- __asm__ __volatile__("rd %%tick, %0" : "=r" (tick2));
-
- raw = (tick2 - tick1);
-
- return (raw - (raw >> 2));
-}
-#endif
-
/* Unfortunately, the diagnostic access to the I-cache tags we need to
* use to clear the thing interferes with I-cache coherency transactions.
*
do {
/* Bogus frame pointer? */
if (fp < (task_base + sizeof(struct task_struct)) ||
- fp >= (task_base + (2 * PAGE_SIZE)))
+ fp >= (task_base + THREAD_SIZE))
break;
rw = (struct reg_window *)fp;
pc = rw->ins[7];
-/* $Id: blockops.S,v 1.34 2001/09/03 01:34:18 kanoj Exp $
+/* $Id: blockops.S,v 1.35 2001/09/04 16:39:53 kanoj Exp $
* blockops.S: UltraSparc block zero optimized routines.
*
* Copyright (C) 1996, 1998, 1999, 2000 David S. Miller (davem@redhat.com)
#define TLBTEMP_ENT2 (62 << 3)
#define TLBTEMP_ENTSZ (1 << 3)
+#if (PAGE_SHIFT == 13) || (PAGE_SHIFT == 19)
+#define PAGE_SIZE_REM 0x80
+#elif (PAGE_SHIFT == 16) || (PAGE_SHIFT == 21)
+#define PAGE_SIZE_REM 0x100
+#else
+#error Wrong PAGE_SHIFT specified
+#endif
+
.text
.align 32
add %o1, 0x40, %o1
ldda [%o1] ASI_BLK_P, %f16
add %o1, 0x40, %o1
- sethi %hi(8192), %o2
+ sethi %hi(PAGE_SIZE), %o2
1: TOUCH(f0, f2, f4, f6, f8, f10, f12, f14)
ldda [%o1] ASI_BLK_P, %f32
stda %f48, [%o0] ASI_BLK_P
stda %f48, [%o0] ASI_BLK_P
sub %o2, 0x40, %o2
add %o1, 0x40, %o1
- cmp %o2, 0x80
+ cmp %o2, PAGE_SIZE_REM
bne,pt %xcc, 1b
add %o0, 0x40, %o0
+#if (PAGE_SHIFT == 16) || (PAGE_SHIFT == 21)
+ TOUCH(f0, f2, f4, f6, f8, f10, f12, f14)
+ ldda [%o1] ASI_BLK_P, %f32
+ stda %f48, [%o0] ASI_BLK_P
+ add %o1, 0x40, %o1
+ sub %o2, 0x40, %o2
+ add %o0, 0x40, %o0
+ TOUCH(f16, f18, f20, f22, f24, f26, f28, f30)
+ ldda [%o1] ASI_BLK_P, %f0
+ stda %f48, [%o0] ASI_BLK_P
+ add %o1, 0x40, %o1
+ sub %o2, 0x40, %o2
+ add %o0, 0x40, %o0
+ membar #Sync
+ stda %f32, [%o0] ASI_BLK_P
+ add %o0, 0x40, %o0
+ stda %f0, [%o0] ASI_BLK_P
+#else
membar #Sync
stda %f0, [%o0] ASI_BLK_P
add %o0, 0x40, %o0
stda %f16, [%o0] ASI_BLK_P
+#endif
membar #Sync
VISExit
retl
sub %o0, %g4, %g1
and %o2, %g3, %o0
sethi %hi(TLBTEMP_BASE), %o3
- sethi %uhi(_PAGE_VALID), %g3
+ sethi %uhi(_PAGE_VALID | _PAGE_SZBITS), %g3
sub %o1, %g4, %g2
sllx %g3, 32, %g3
mov TLB_TAG_ACCESS, %o2
add %o1, 0x40, %o1
ldda [%o1] ASI_BLK_P, %f16
add %o1, 0x40, %o1
- sethi %hi(8192), %o2
+ sethi %hi(PAGE_SIZE), %o2
1: TOUCH(f0, f2, f4, f6, f8, f10, f12, f14)
ldda [%o1] ASI_BLK_P, %f32
stda %f48, [%o0] ASI_BLK_P
stda %f48, [%o0] ASI_BLK_P
sub %o2, 0x40, %o2
add %o1, 0x40, %o1
- cmp %o2, 0x80
+ cmp %o2, PAGE_SIZE_REM
bne,pt %xcc, 1b
add %o0, 0x40, %o0
+#if (PAGE_SHIFT == 16) || (PAGE_SHIFT == 21)
+ TOUCH(f0, f2, f4, f6, f8, f10, f12, f14)
+ ldda [%o1] ASI_BLK_P, %f32
+ stda %f48, [%o0] ASI_BLK_P
+ add %o1, 0x40, %o1
+ sub %o2, 0x40, %o2
+ add %o0, 0x40, %o0
+ TOUCH(f16, f18, f20, f22, f24, f26, f28, f30)
+ ldda [%o1] ASI_BLK_P, %f0
+ stda %f48, [%o0] ASI_BLK_P
+ add %o1, 0x40, %o1
+ sub %o2, 0x40, %o2
+ add %o0, 0x40, %o0
+ membar #Sync
+ stda %f32, [%o0] ASI_BLK_P
+ add %o0, 0x40, %o0
+ stda %f0, [%o0] ASI_BLK_P
+#else
membar #Sync
stda %f0, [%o0] ASI_BLK_P
add %o0, 0x40, %o0
stda %f16, [%o0] ASI_BLK_P
+#endif
copy_user_page_continue:
membar #Sync
VISExit
add %o1, 0x40, %o1
ldda [%o1] ASI_BLK_P, %f16
add %o1, 0x40, %o1
- sethi %hi(8192), %o2
+ sethi %hi(PAGE_SIZE), %o2
1: TOUCH(f0, f2, f4, f6, f8, f10, f12, f14)
ldda [%o1] ASI_BLK_P, %f32
stda %f48, [%o0] ASI_BLK_COMMIT_P
stda %f48, [%o0] ASI_BLK_COMMIT_P
sub %o2, 0x40, %o2
add %o1, 0x40, %o1
- cmp %o2, 0x80
+ cmp %o2, PAGE_SIZE_REM
bne,pt %xcc, 1b
add %o0, 0x40, %o0
+#if (PAGE_SHIFT == 16) || (PAGE_SHIFT == 21)
+ TOUCH(f0, f2, f4, f6, f8, f10, f12, f14)
+ ldda [%o1] ASI_BLK_P, %f32
+ stda %f48, [%o0] ASI_BLK_COMMIT_P
+ add %o1, 0x40, %o1
+ sub %o2, 0x40, %o2
+ add %o0, 0x40, %o0
+ TOUCH(f16, f18, f20, f22, f24, f26, f28, f30)
+ ldda [%o1] ASI_BLK_P, %f0
+ stda %f48, [%o0] ASI_BLK_COMMIT_P
+ add %o1, 0x40, %o1
+ sub %o2, 0x40, %o2
+ add %o0, 0x40, %o0
+ membar #Sync
+ stda %f32, [%o0] ASI_BLK_COMMIT_P
+ add %o0, 0x40, %o0
+ ba,pt %xcc, copy_user_page_continue
+ stda %f0, [%o0] ASI_BLK_COMMIT_P
+#else
membar #Sync
stda %f0, [%o0] ASI_BLK_COMMIT_P
add %o0, 0x40, %o0
ba,pt %xcc, copy_user_page_continue
stda %f16, [%o0] ASI_BLK_COMMIT_P
+#endif
.align 32
.globl _clear_page
sub %o0, %g4, %g1
and %o1, %g3, %o0
mov TLB_TAG_ACCESS, %o2
- sethi %uhi(_PAGE_VALID), %g3
+ sethi %uhi(_PAGE_VALID | _PAGE_SZBITS), %g3
sethi %hi(TLBTEMP_BASE), %o3
sllx %g3, 32, %g3
or %g3, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W), %g3
clear_page_common:
membar #StoreLoad | #StoreStore | #LoadStore ! LSU Group
fzero %f0 ! FPA Group
- mov 32, %o1 ! IEU0
+ mov PAGE_SIZE/256, %o1 ! IEU0
fzero %f2 ! FPA Group
faddd %f0, %f2, %f4 ! FPA Group
fmuld %f0, %f2, %f6 ! FPM
#undef FIX_INSN1
#undef FIX_INSN2
+#undef PAGE_SIZE_REM
#include <asm/mmu_context.h>
#include <asm/dma.h>
#include <asm/starfire.h>
+#include <asm/tlb.h>
+
+mmu_gather_t mmu_gathers[NR_CPUS];
extern void device_scan(void);
-/* $Id: ultra.S,v 1.56 2001/08/30 10:10:32 davem Exp $
+/* $Id: ultra.S,v 1.57 2001/09/06 19:27:17 kanoj Exp $
* ultra.S: Don't expand these all over the place...
*
* Copyright (C) 1997, 2000 David S. Miller (davem@redhat.com)
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/spitfire.h>
+#include <asm/mmu_context.h>
/* Basically, all this madness has to do with the
* fact that Cheetah does not support IMMU flushes
.text
.align 32
.globl __flush_tlb_page, __flush_tlb_mm, __flush_tlb_range
-__flush_tlb_page: /* %o0=(ctx & 0x3ff), %o1=page&PAGE_MASK, %o2=SECONDARY_CONTEXT */
+__flush_tlb_page: /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=page&PAGE_MASK, %o2=SECONDARY_CONTEXT */
/*IC1*/ BRANCH_IF_CHEETAH(g2, g3, __cheetah_flush_tlb_page)
__spitfire_flush_tlb_page:
/*IC2*/ ldxa [%o2] ASI_DMMU, %g2
wrpr %g5, 0x0, %pstate
nop
nop
-__flush_tlb_mm: /* %o0=(ctx & 0x3ff), %o1=SECONDARY_CONTEXT */
+__flush_tlb_mm: /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
/*IC5*/ BRANCH_IF_CHEETAH(g2, g3, __cheetah_flush_tlb_mm)
__spitfire_flush_tlb_mm:
/*IC6*/ ldxa [%o1] ASI_DMMU, %g2
retl
wrpr %g5, 0x0, %pstate
nop
-__flush_tlb_range: /* %o0=(ctx&0x3ff), %o1=start&PAGE_MASK, %o2=SECONDARY_CONTEXT,
+__flush_tlb_range: /* %o0=(ctx&TAG_CONTEXT_BITS), %o1=start&PAGE_MASK, %o2=SECONDARY_CONTEXT,
* %o3=end&PAGE_MASK, %o4=PAGE_SIZE, %o5=(end - start)
*/
/*IC9*/ BRANCH_IF_CHEETAH(g2, g3, __cheetah_flush_tlb_range)
#define TLB_MAGIC 207 /* Students, do you know how I calculated this? -DaveM */
/*IC10*/cmp %o5, %o4
bleu,pt %xcc, __flush_tlb_page
- srlx %o5, 13, %g5
+ srlx %o5, PAGE_SHIFT, %g5
cmp %g5, TLB_MAGIC
bgeu,pn %icc, __spitfire_flush_tlb_range_constant_time
or %o1, 0x10, %g5
flush %g6
1: ldxa [%g2] ASI_ITLB_TAG_READ, %o4
- and %o4, 0x3ff, %o5
+ and %o4, TAG_CONTEXT_BITS, %o5
cmp %o5, %o0
bne,pt %icc, 2f
-/*IC13*/ andn %o4, 0x3ff, %o4
+/*IC13*/ andn %o4, TAG_CONTEXT_BITS, %o4
cmp %o4, %o1
blu,pt %xcc, 2f
cmp %o4, %o3
blu,pn %xcc, 4f
2: ldxa [%g2] ASI_DTLB_TAG_READ, %o4
- and %o4, 0x3ff, %o5
+ and %o4, TAG_CONTEXT_BITS, %o5
cmp %o5, %o0
-/*IC14*/andn %o4, 0x3ff, %o4
+/*IC14*/andn %o4, TAG_CONTEXT_BITS, %o4
bne,pt %icc, 3f
cmp %o4, %o1
blu,pt %xcc, 3f
.globl __update_mmu_cache
__update_mmu_cache: /* %o0=vma, %o1=address, %o2=pte */
ldub [%g6 + AOFF_task_thread + AOFF_thread_fault_code], %o3
- srlx %o1, 13, %o1
+ srlx %o1, PAGE_SHIFT, %o1
ldx [%o0 + 0x0], %o4 /* XXX vma->vm_mm */
brz,pn %o3, 1f
- sllx %o1, 13, %o0
+ sllx %o1, PAGE_SHIFT, %o0
ldx [%o4 + AOFF_mm_context], %o5
andcc %o3, FAULT_CODE_DTLB, %g0
mov %o2, %o1
- and %o5, 0x3ff, %o5
+ and %o5, TAG_CONTEXT_BITS, %o5
bne,pt %xcc, __prefill_dtlb
or %o0, %o5, %o0
ba,a,pt %xcc, __prefill_itlb
-/* $Id: fs.c,v 1.24 2001/02/13 01:16:44 davem Exp $
+/* $Id: fs.c,v 1.25 2001/09/19 00:04:30 davem Exp $
* fs.c: fs related syscall emulation for Solaris
*
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
-/* $Id: misc.c,v 1.32 2001/03/24 09:36:11 davem Exp $
+/* $Id: misc.c,v 1.33 2001/09/18 22:29:06 davem Exp $
* misc.c: Miscelaneous syscall emulation for Solaris
*
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
extern long solaris_to_linux_signals[], linux_to_solaris_signals[];
struct exec_domain solaris_exec_domain = {
- "Solaris",
- NULL,
- 1, 1, /* PER_SVR4 personality */
- solaris_to_linux_signals,
- linux_to_solaris_signals,
- THIS_MODULE,
- NULL
+ name: "Solaris",
+ handler: NULL,
+ pers_low: 1, /* PER_SVR4 personality */
+ pers_high: 1,
+ signal_map: solaris_to_linux_signals,
+ signal_invmap: linux_to_solaris_signals,
+ module: THIS_MODULE,
+ next: NULL
};
extern int init_socksys(void);
-/* $Id: timod.c,v 1.15 2001/08/13 18:56:10 davem Exp $
+/* $Id: timod.c,v 1.16 2001/09/18 22:29:06 davem Exp $
* timod.c: timod emulation.
*
* Copyright (C) 1998 Patrik Rak (prak3264@ss1000.ms.mff.cuni.cz)
#include <asm/io.h>
#include <linux/blk.h>
#include <linux/highmem.h>
-#include <linux/raid/md.h>
-
+#include <linux/slab.h>
#include <linux/module.h>
/*
/* bdev->bd_sem is held by caller */
bdev->bd_openers++;
bdev->bd_cache_openers++;
- bdev->bd_inode = inode;
}
}
#include <asm/ptrace.h>
-extern void wakeup_bdflush(int);
extern void reset_vc(unsigned int);
extern struct list_head super_blocks;
static void sysrq_handle_sync(int key, struct pt_regs *pt_regs,
struct kbd_struct *kbd, struct tty_struct *tty) {
emergency_sync_scheduled = EMERG_SYNC;
- wakeup_bdflush(0);
+ wakeup_bdflush();
}
static struct sysrq_key_op sysrq_sync_op = {
handler: sysrq_handle_sync,
static void sysrq_handle_mountro(int key, struct pt_regs *pt_regs,
struct kbd_struct *kbd, struct tty_struct *tty) {
emergency_sync_scheduled = EMERG_REMOUNT;
- wakeup_bdflush(0);
+ wakeup_bdflush();
}
static struct sysrq_key_op sysrq_mountro_op = {
handler: sysrq_handle_mountro,
return 0;
}
-static void __exit sco_cleanup(void)
+static void __exit soc_cleanup(void)
{
struct soc *s;
int irq;
return -EFAULT;
nr = info.number;
- if (nr >= mddev->sb->raid_disks+mddev->sb->spare_disks)
+ if (nr >= MD_SB_DISKS)
return -EINVAL;
SET_FROM_SB(major);
fi
tristate ' BMAC (G3 ethernet) support' CONFIG_BMAC
tristate ' GMAC (G4/iBook ethernet) support' CONFIG_GMAC
- tristate ' Symbios 53c885 (Synergy ethernet) support' CONFIG_NCR885E
tristate ' National DP83902AV (Oak ethernet) support' CONFIG_OAKNET
fi
if [ "$CONFIG_ZORRO" = "y" ]; then
obj-$(CONFIG_MACE) += mace.o
obj-$(CONFIG_BMAC) += bmac.o
obj-$(CONFIG_GMAC) += gmac.o
-obj-$(CONFIG_NCR885E) += ncr885e.o
obj-$(CONFIG_OAKNET) += oaknet.o 8390.o
0.01 2001/05/03 Create DL2000-based linux driver
0.02 2001/05/21 Add VLAN and hardware checksum support.
1.00 2001/06/26 Add jumbo frame support.
+ 1.01 2001/08/21 Add two parameters, int_count and int_timeout.
*/
#include "dl2k.h"
-
static char version[] __devinitdata =
-KERN_INFO "D-Link DL2000-based linux driver v1.00 2001/06/26\n";
-
-
+ KERN_INFO "D-Link DL2000-based linux driver v1.01 2001/08/30\n";
#define MAX_UNITS 8
static int mtu[MAX_UNITS];
static int jumbo[MAX_UNITS];
static char *media[MAX_UNITS];
static int copy_thresh;
+static int int_count; /* Rx frame count each interrupt */
+static int int_timeout; /* Rx DMA wait time in 64ns increments */
MODULE_AUTHOR ("Edward Peng");
MODULE_DESCRIPTION ("D-Link DL2000-based Gigabit Ethernet Adapter");
MODULE_PARM (vlan, "1-" __MODULE_STRING (MAX_UNITS) "i");
MODULE_PARM (jumbo, "1-" __MODULE_STRING (MAX_UNITS) "i");
MODULE_PARM (copy_thresh, "i");
+MODULE_PARM (int_count, "i");
+MODULE_PARM (int_timeout, "i");
/* Enable the default interrupts */
#define EnableInt() \
-writew(RxComplete| RxDMAComplete | HostError | IntRequested | TxComplete| \
- TxDMAComplete| UpdateStats | LinkEvent, ioaddr + IntEnable)
-static int max_intrloop = 25;
+writew(RxDMAComplete | HostError | IntRequested | TxComplete| \
+ UpdateStats | LinkEvent, ioaddr + IntEnable)
+
+static int max_intrloop = 50;
static int multicast_filter_limit = 0x40;
static int rio_open (struct net_device *dev);
}
np->vlan = (vlan[card_idx] > 0 && vlan[card_idx] < 4096) ?
vlan[card_idx] : 0;
+ if (int_count != 0 && int_timeout != 0) {
+ np->int_count = int_count;
+ np->int_timeout = int_timeout;
+ np->coalesce = 1;
+ }
}
dev->open = &rio_open;
dev->hard_start_xmit = &start_xmit;
#endif
pci_set_drvdata (pdev, dev);
- ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
+ ring_space = pci_alloc_consistent (pdev, TX_TOTAL_SIZE, &ring_dma);
if (!ring_space)
goto err_out_iounmap;
- np->tx_ring = (struct netdev_desc *)ring_space;
+ np->tx_ring = (struct netdev_desc *) ring_space;
np->tx_ring_dma = ring_dma;
- ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
+ ring_space = pci_alloc_consistent (pdev, RX_TOTAL_SIZE, &ring_dma);
if (!ring_space)
goto err_out_unmap_tx;
- np->rx_ring = (struct netdev_desc *)ring_space;
+ np->rx_ring = (struct netdev_desc *) ring_space;
np->rx_ring_dma = ring_dma;
/* Parse eeprom data */
printk (KERN_INFO "%s: %s, %2x:%2x:%2x:%2x:%2x:%2x, IRQ %d\n",
dev->name, np->name,
dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
- dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5],
- irq);
+ dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5], irq);
return 0;
-err_out_unmap_rx:
- pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
-err_out_unmap_tx:
- pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
-err_out_iounmap:
+ err_out_unmap_rx:
+ pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
+ err_out_unmap_tx:
+ pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
+ err_out_iounmap:
#ifndef USE_IO_OPS
iounmap ((void *) ioaddr);
-err_out_dev:
+ err_out_dev:
#endif
kfree (dev);
-err_out_res:
+ err_out_res:
pci_release_regions (pdev);
-err_out_disable:
+ err_out_disable:
pci_disable_device (pdev);
return err;
}
writeb (dev->dev_addr[i], ioaddr + StationAddr0 + i);
set_multicast (dev);
-
+ if (np->coalesce) {
+ writel (np->int_count | np->int_timeout << 16,
+ ioaddr + RxDMAIntCtrl);
+ }
/* Set RIO to poll every N*320nsec. */
writeb (0xff, ioaddr + RxDMAPollPeriod);
writeb (0xff, ioaddr + TxDMAPollPeriod);
netif_start_queue (dev);
writel (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl);
-
/* VLAN supported */
if (np->vlan) {
/* priority field in RxDMAIntCtrl */
- writel (0x7 << 10, ioaddr + RxDMAIntCtrl);
+ writel (readl(ioaddr + RxDMAIntCtrl) | 0x7 << 10,
+ ioaddr + RxDMAIntCtrl);
/* VLANId */
writew (np->vlan, ioaddr + VLANId);
/* Length/Type should be 0x8100 */
/* Initialize Rx descriptors */
for (i = 0; i < RX_RING_SIZE; i++) {
- np->rx_ring[i].next_desc = cpu_to_le64(np->rx_ring_dma +
- ((i+1)%RX_RING_SIZE)*sizeof(struct netdev_desc));
+ np->rx_ring[i].next_desc = cpu_to_le64 (np->rx_ring_dma +
+ ((i +
+ 1) % RX_RING_SIZE) *
+ sizeof (struct
+ netdev_desc));
np->rx_ring[i].status = 0;
np->rx_ring[i].fraginfo = 0;
np->rx_skbuff[i] = 0;
skb->dev = dev; /* Mark as being used by this device. */
skb_reserve (skb, 2); /* 16 byte align the IP header. */
/* Rubicon now supports 40 bits of addressing space. */
- np->rx_ring[i].fraginfo = cpu_to_le64(pci_map_single(
- np->pdev, skb->tail, np->rx_buf_sz,
- PCI_DMA_FROMDEVICE));
+ np->rx_ring[i].fraginfo =
+ cpu_to_le64 (pci_map_single
+ (np->pdev, skb->tail, np->rx_buf_sz,
+ PCI_DMA_FROMDEVICE));
np->rx_ring[i].fraginfo |= cpu_to_le64 (np->rx_buf_sz) << 48;
}
#endif
if (np->vlan) {
txdesc->status |=
- cpu_to_le64 (VLANTagInsert) |
- (cpu_to_le64 (np->vlan) << 32) |
- (cpu_to_le64 (skb->priority) << 45);
+ cpu_to_le64 (VLANTagInsert) |
+ (cpu_to_le64 (np->vlan) << 32) |
+ (cpu_to_le64 (skb->priority) << 45);
}
/* Send one packet each time at 10Mbps mode */
+ /* Tx coalescing loop do not exceed 8 */
if (entry % 0x08 == 0 || np->speed == 10)
txdesc->status |= cpu_to_le64 (TxIndicate);
- txdesc->fraginfo = cpu_to_le64 (pci_map_single(np->pdev, skb->data,
- skb->len, PCI_DMA_TODEVICE));
+ txdesc->fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data,
+ skb->len,
+ PCI_DMA_TODEVICE));
txdesc->fraginfo |= cpu_to_le64 (skb->len) << 48;
/* Chain the last descriptor's pointer to this one */
if (np->last_tx)
np->last_tx->next_desc = cpu_to_le64 (np->tx_ring_dma +
- entry*sizeof(struct netdev_desc));
+ entry *
+ sizeof (struct
+ netdev_desc));
np->last_tx = txdesc;
/* Clear TFDDone, then TxDMA start to send this descriptor */
/* TxDMAPollNow */
writel (readl (ioaddr + DMACtrl) | 0x00001000, ioaddr + DMACtrl);
np->cur_tx++;
-
if (np->cur_tx - np->old_tx < TX_QUEUE_LEN - 1 && np->speed != 10) {
/* do nothing */
} else {
/* The first TFDListPtr */
if (readl (dev->base_addr + TFDListPtr0) == 0) {
- writel (np->tx_ring_dma + entry*sizeof(struct netdev_desc),
+ writel (np->tx_ring_dma + entry * sizeof (struct netdev_desc),
dev->base_addr + TFDListPtr0);
writel (0, dev->base_addr + TFDListPtr1);
}
-
+
spin_lock_irqsave (&np->lock, flags);
if (np->old_tx > TX_RING_SIZE) {
tx_shift = TX_RING_SIZE;
np->old_tx -= tx_shift;
np->cur_tx -= tx_shift;
}
- spin_unlock_irqrestore (&np->lock, flags);
-
+ spin_unlock_irqrestore (&np->lock, flags);
+
/* NETDEV WATCHDOG timer */
dev->trans_start = jiffies;
return 0;
np = dev->priv;
spin_lock (&np->lock);
while (1) {
- int_status = readw (ioaddr + IntStatus);
+ int_status = readw (ioaddr + IntStatus) &
+ (HostError | TxComplete | IntRequested |
+ UpdateStats | LinkEvent | RxDMAComplete);
writew (int_status & (HostError | TxComplete | RxComplete |
IntRequested | UpdateStats | LinkEvent |
TxDMAComplete | RxDMAComplete | RFDListEnd
if (int_status == 0)
break;
/* Processing received packets */
- receive_packet (dev);
+ if (int_status & RxDMAComplete)
+ receive_packet (dev);
/* TxComplete interrupt */
if (int_status & TxComplete) {
- int cnt = 20;
int tx_status = readl (ioaddr + TxStatus);
- while (tx_status & 0x80) { /* TxComplete */
- /* Handle TxError */
- if (tx_status & 0x01)
- tx_error (dev, tx_status);
- tx_status = readl (ioaddr + TxStatus);
- /* too much TxError */
- if (--cnt < 0)
- break;
- }
+ if (tx_status & 0x01)
+ tx_error (dev, tx_status);
/* Send one packet each time at 10Mbps mode */
if (np->speed == 10) {
np->tx_full = 0;
netif_wake_queue (dev);
}
- }
- /* Free used tx skbuffs */
- for (; np->cur_tx - np->old_tx > 0; np->old_tx++) {
- int entry = np->old_tx % TX_RING_SIZE;
- struct sk_buff *skb;
- if (!(np->tx_ring[entry].status & TFDDone))
- break;
- skb = np->tx_skbuff[entry];
- pci_unmap_single(np->pdev, np->tx_ring[entry].fraginfo,
- skb->len, PCI_DMA_TODEVICE);
- dev_kfree_skb_irq (skb);
- np->tx_skbuff[entry] = 0;
+ /* Free used tx skbuffs */
+ for (; np->cur_tx - np->old_tx > 0; np->old_tx++) {
+ int entry = np->old_tx % TX_RING_SIZE;
+ struct sk_buff *skb;
+
+ if (!(np->tx_ring[entry].status & TFDDone))
+ break;
+ skb = np->tx_skbuff[entry];
+ pci_unmap_single (np->pdev,
+ np->tx_ring[entry].fraginfo,
+ skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq (skb);
+ np->tx_skbuff[entry] = 0;
+ }
}
/* If the ring is no longer full, clear tx_full and
call netif_wake_queue() */
np->tx_full = 0;
netif_wake_queue (dev);
}
-
/* Handle uncommon events */
if (int_status &
(IntRequested | HostError | LinkEvent | UpdateStats))
be caught by rio_error() to recovery the interrupts */
if (--cnt < 0) {
get_stats (dev);
- writel (1000, ioaddr + CountDown);
+ writel (1, ioaddr + CountDown);
writew (IntRequested, ioaddr + IntEnable);
break;
}
/* Ttransmit Underrun */
if (tx_status & 0x10) {
np->stats.tx_fifo_errors++;
+ writew (readw (ioaddr + TxStartThresh) + 0x10,
+ ioaddr + TxStartThresh);
/* Transmit Underrun need to set TxReset, DMARest, FIFOReset */
- writew (TxReset | DMAReset | FIFOReset, ioaddr + ASICCtrl + 2);
+ writew (TxReset | DMAReset | FIFOReset | NetworkReset,
+ ioaddr + ASICCtrl + 2);
/* Wait for ResetBusy bit clear */
for (i = 50; i > 0; i--) {
if ((readw (ioaddr + ASICCtrl + 2) & ResetBusy) == 0)
break;
mdelay (1);
}
+ /* Free completed descriptors */
+ for (; np->cur_tx - np->old_tx > 0; np->old_tx++) {
+ int entry = np->old_tx % TX_RING_SIZE;
+ struct sk_buff *skb;
+ if (!(np->tx_ring[entry].status & TFDDone))
+ break;
+
+ skb = np->tx_skbuff[entry];
+ pci_unmap_single (np->pdev, np->tx_ring[entry].fraginfo,
+ skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq (skb);
+ np->tx_skbuff[entry] = 0;
+ }
+
/* Reset TFDListPtr */
- writel (np->tx_ring_dma + frame_id*sizeof(struct netdev_desc),
+ writel (np->tx_ring_dma +
+ np->old_tx * sizeof (struct netdev_desc),
dev->base_addr + TFDListPtr0);
writel (0, dev->base_addr + TFDListPtr1);
int entry = np->cur_rx % RX_RING_SIZE;
int cnt = np->old_rx + RX_RING_SIZE - np->cur_rx;
int rx_shift;
-
if (np->old_rx > RX_RING_SIZE) {
rx_shift = RX_RING_SIZE;
np->old_rx -= rx_shift;
while (1) {
struct netdev_desc *desc = &np->rx_ring[entry];
int pkt_len;
- u64 frame_status;
+ u64 frame_status;
- if (!(desc->status & RFDDone) ||
- !(desc->status & FrameStart) ||
- !(desc->status & FrameEnd))
+ if (!(desc->status & RFDDone) ||
+ !(desc->status & FrameStart) || !(desc->status & FrameEnd))
break;
/* Chip omits the CRC. */
- pkt_len = le64_to_cpu (desc->status & 0xffff);
+ pkt_len = le64_to_cpu (desc->status & 0xffff);
frame_status = le64_to_cpu (desc->status);
if (--cnt < 0)
break;
DEBUG_PKT_DUMP (np, pkt_len);
- pci_dma_sync_single(np->pdev, desc->fraginfo, np->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
+ pci_dma_sync_single (np->pdev, desc->fraginfo, np->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
/* Update rx error statistics, drop packet. */
if (frame_status & 0x003f0000) {
np->stats.rx_errors++;
- if (frame_status & 0x00100000)
+ if (frame_status & 0x00300000)
np->stats.rx_length_errors++;
if (frame_status & 0x00010000)
np->stats.rx_fifo_errors++;
/* Small skbuffs for short packets */
if (pkt_len > copy_thresh) {
- pci_unmap_single(np->pdev, desc->fraginfo,
- np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ pci_unmap_single (np->pdev, desc->fraginfo,
+ np->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
skb_put (skb = np->rx_skbuff[entry], pkt_len);
np->rx_skbuff[entry] = NULL;
} else if ((skb = dev_alloc_skb (pkt_len + 2)) != NULL) {
dev->last_rx = jiffies;
}
entry = (++np->cur_rx) % RX_RING_SIZE;
- }
+ }
/* Re-allocate skbuffs to fill the descriptor ring */
for (; np->cur_rx - np->old_rx > 0; np->old_rx++) {
struct sk_buff *skb;
skb->dev = dev;
/* 16 byte align the IP header */
skb_reserve (skb, 2);
- np->rx_ring[entry].fraginfo = cpu_to_le64(pci_map_single(
- np->pdev, skb->tail, np->rx_buf_sz,
- PCI_DMA_FROMDEVICE));
+ np->rx_ring[entry].fraginfo =
+ cpu_to_le64 (pci_map_single
+ (np->pdev, skb->tail, np->rx_buf_sz,
+ PCI_DMA_FROMDEVICE));
}
np->rx_ring[entry].fraginfo |=
cpu_to_le64 (np->rx_buf_sz) << 48;
np->rx_ring[entry].status = 0;
}
+
/* RxDMAPollNow */
writel (readl (dev->base_addr + DMACtrl) | 0x00000010,
dev->base_addr + DMACtrl);
/* Stop the down counter and recovery the interrupt */
if (int_status & IntRequested) {
-
writew (0, ioaddr + IntEnable);
writel (0, ioaddr + CountDown);
/* Enable default interrupts */
if (dev->flags & IFF_PROMISC) {
/* Receive all frames promiscuously. */
rx_mode |= ReceiveAllFrames;
- } else
- if (((dev->flags & IFF_MULTICAST)
- && (dev->mc_count > multicast_filter_limit))
- || (dev->flags & IFF_ALLMULTI)) {
+ } else if (((dev->flags & IFF_MULTICAST)
+ && (dev->mc_count > multicast_filter_limit))
+ || (dev->flags & IFF_ALLMULTI)) {
/* Receive broadcast and multicast frames */
rx_mode |= ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast;
} else if ((dev->flags & IFF_MULTICAST) & (dev->mc_count > 0)) {
netif_wake_queue (dev);
break;
case SIOCDEVPRIVATE + 7:
- printk ("tx_full=%x cur_tx=%lx old_tx=%lx cur_rx=%lx old_rx=%lx\n",
- np->tx_full, np->cur_tx, np->old_tx,
- np->cur_rx, np->old_rx);
+ printk
+ ("tx_full=%x cur_tx=%lx old_tx=%lx cur_rx=%lx old_rx=%lx\n",
+ np->tx_full, np->cur_tx, np->old_tx, np->cur_rx,
+ np->old_rx);
break;
case SIOCDEVPRIVATE + 8:
for (i = 0; i < TX_RING_SIZE; i++) {
desc = &np->tx_ring[i];
printk
- ("cur:%08x next:%08x status:%08x frag1:%08x frag0:%08x",
- (u32) (np->tx_ring_dma + i*sizeof(*desc)),
- (u32) desc->next_desc,
- (u32) desc->status, (u32) (desc->fraginfo >> 32),
- (u32) desc->fraginfo);
+ ("cur:%08x next:%08x status:%08x frag1:%08x frag0:%08x",
+ (u32) (np->tx_ring_dma + i * sizeof (*desc)),
+ (u32) desc->next_desc,
+ (u32) desc->status, (u32) (desc->fraginfo >> 32),
+ (u32) desc->fraginfo);
printk ("\n");
}
printk ("\n");
}
return 0;
-invalid_cmd:
+ invalid_cmd:
return -1;
}
#endif
mii_getbit (dev);
return (retval >> 1) & 0xffff;
-err_out:
+ err_out:
return 0;
}
static int
/* Preamble */
mii_send_bits (dev, 0xffffffff, 32);
/* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
- /* ST,OP,AAAAA,RRRRR,TA = 0101xxxxxxxxxx10'b = 0x0502 for write */
+ /* ST,OP,AAAAA,RRRRR,TA = 0101xxxxxxxxxx10'b = 0x5002 for write */
cmd = (0x5002 << 16) | (phy_addr << 23) | (reg_num << 18) | data;
mii_send_bits (dev, cmd, 32);
/* End cycle */
np->rx_ring[i].fraginfo = 0;
skb = np->rx_skbuff[i];
if (skb) {
- pci_unmap_single(np->pdev, np->rx_ring[i].fraginfo,
- skb->len, PCI_DMA_FROMDEVICE);
+ pci_unmap_single (np->pdev, np->rx_ring[i].fraginfo,
+ skb->len, PCI_DMA_FROMDEVICE);
dev_kfree_skb (skb);
np->rx_skbuff[i] = 0;
}
for (i = 0; i < TX_RING_SIZE; i++) {
skb = np->tx_skbuff[i];
if (skb) {
- pci_unmap_single(np->pdev, np->tx_ring[i].fraginfo,
- skb->len, PCI_DMA_TODEVICE);
+ pci_unmap_single (np->pdev, np->tx_ring[i].fraginfo,
+ skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb (skb);
np->tx_skbuff[i] = 0;
}
}
+
return 0;
}
struct netdev_private *np = dev->priv;
unregister_netdev (dev);
- pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring,
- np->rx_ring_dma);
- pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring,
- np->tx_ring_dma);
+ pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring,
+ np->rx_ring_dma);
+ pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring,
+ np->tx_ring_dma);
#ifndef USE_IO_OPS
iounmap ((char *) (dev->base_addr));
#endif
}
static struct pci_driver rio_driver = {
- name: "dl2k",
- id_table: rio_pci_tbl,
- probe: rio_probe1,
- remove: rio_remove1,
+ name:"dl2k",
+ id_table:rio_pci_tbl,
+ probe:rio_probe1,
+ remove:rio_remove1,
};
static int __init
#include <asm/io.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
-
-#define TX_RING_SIZE 16
-#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
-#define RX_RING_SIZE 16
+#include <linux/time.h>
+#define TX_RING_SIZE 128
+#define TX_QUEUE_LEN 96 /* Limit ring entries actually used. */
+#define RX_RING_SIZE 128
#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
/* Bits in MACCtrl. */
enum MACCtrl_bits {
DuplexSelect = 0x20,
+ TxFlowControlEnable = 0x80,
+ RxFlowControlEnable = 0x0100,
RcvFCS = 0x200,
AutoVLANtagging = 0x1000,
AutoVLANuntagging = 0x2000,
struct sk_buff *tx_skbuff[TX_RING_SIZE];
dma_addr_t tx_ring_dma;
dma_addr_t rx_ring_dma;
- struct pci_dev * pdev;
+ struct pci_dev *pdev;
spinlock_t lock;
struct net_device_stats stats;
unsigned int rx_buf_sz; /* Based on MTU+slack. */
unsigned int an_enable; /* Auto-Negotiated Enable */
unsigned int chip_id; /* PCI table chip id */
unsigned int jumbo;
+ unsigned int int_count;
+ unsigned int int_timeout;
+ unsigned int coalesce:1;
struct netdev_desc *last_tx; /* Last Tx descriptor used. */
unsigned long cur_rx, old_rx; /* Producer/consumer ring indices */
unsigned long cur_tx, old_tx;
for (i = 0; i < TX_RING_SIZE; i++) {
desc = &np->tx_ring[i];
printk
- ("cur:%08x next:%08x status:%08x frag1:%08x frag0:%08x",
- (u32) np->tx_ring_dma + i*sizeof(*desc),
- (u32) desc->next_desc, (u32) desc->status,
- (u32) (desc->fraginfo >> 32),
- (u32) desc->fraginfo);
+ ("cur:%08x next:%08x status:%08x frag1:%08x frag0:%08x",
+ (u32) np->tx_ring_dma + i * sizeof (*desc),
+ (u32) desc->next_desc, (u32) desc->status,
+ (u32) (desc->fraginfo >> 32),
+ (u32) desc->fraginfo);
printk ("\n");
}
printk ("\n");
for (i = 0; i < RX_RING_SIZE; i++) {
desc = &np->rx_ring[i];
printk
- ("cur:%08x next:%08x status:%08x frag1:%08x frag0:%08x",
- (u32) np->rx_ring_dma + i*sizeof(*desc),
- (u32) desc->next_desc, (u32) desc->status,
- (u32) (desc->fraginfo >> 32),
- (u32) desc->fraginfo);
+ ("cur:%08x next:%08x status:%08x frag1:%08x frag0:%08x",
+ (u32) np->rx_ring_dma + i * sizeof (*desc),
+ (u32) desc->next_desc, (u32) desc->status,
+ (u32) (desc->fraginfo >> 32),
+ (u32) desc->fraginfo);
printk ("\n");
}
printk ("\n");
(u32) (frame_status >> 32), (u32) frame_status);
}
if (np->rx_debug == 7) {
-
- phead = bus_to_virt(le64_to_cpu(desc->fraginfo & 0xffffffffff));
+
+ phead =
+ bus_to_virt (le64_to_cpu (desc->fraginfo & 0xffffffffff));
for (pchar = phead, i = 0; i < pkt_len; i++, pchar++) {
printk ("%02x ", *pchar);
if ((i + 1) % 20 == 0)
#define DEBUG_PRINT() {}
#endif
-#endif /* __DL2K_H__ */
+#endif /* __DL2K_H__ */
+++ /dev/null
-#ifndef _H_NCR885_DEBUG
-#define _H_NCR885_DEBUG
-
-struct ncr885e_regs {
- unsigned long tx_status;
- unsigned long rx_status;
- unsigned long mac_config;
- unsigned long tx_control;
- unsigned long rx_control;
- unsigned long tx_cmd_ptr;
- unsigned long rx_cmd_ptr;
- unsigned long int_status;
-};
-
-#ifndef __KERNEL__
-
-struct ncr885e_private {
-
- struct dbdma_cmd *head;
- struct dbdma_cmd *tx_cmds;
- struct dbdma_cmd *rx_cmds;
- struct dbdma_cmd *stop_cmd;
-
- struct sk_buff *tx_skbufs[NR_TX_RING];
- struct sk_buff *rx_skbufs[NR_RX_RING];
-
- int rx_current;
- int rx_dirty;
-
- int tx_dirty;
- int tx_current;
-
- unsigned short tx_status[NR_TX_RING];
-
- unsigned char tx_fullup;
- unsigned char tx_active;
-
- struct net_device_stats stats;
-
- struct device *dev;
-
- struct timer_list tx_timeout;
- int timeout_active;
-
- spinlock_t lock;
-};
-
-#endif /* __KERNEL__ */
-
-
-#define NCR885E_GET_PRIV _IOR('N',1,sizeof( struct ncr885e_private ))
-#define NCR885E_GET_REGS _IOR('N',2,sizeof( struct ncr885e_regs ))
-
-#endif
+++ /dev/null
-/*
- * An Ethernet driver for the dual-function NCR 53C885 SCSI/Ethernet
- * controller.
- *
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- */
-
-static const char *version =
-"ncr885e.c:v1.0 02/10/00 dan@synergymicro.com, cort@fsmlabs.com\n";
-
-#include <linux/module.h>
-#include <linux/version.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/timer.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/netdevice.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/ioport.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <asm/io.h>
-#include <asm/dbdma.h>
-#include <asm/uaccess.h>
-
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-
-#include "ncr885e.h"
-#include "ncr885_debug.h"
-
-static const char *chipname = "ncr885e";
-
-#define NCR885E_DEBUG 0
-
-/* The 885's Ethernet PCI device id. */
-#ifndef PCI_DEVICE_ID_NCR_53C885_ETHERNET
-#define PCI_DEVICE_ID_NCR_53C885_ETHERNET 0x0701
-#endif
-
-#define NR_RX_RING 8
-#define NR_TX_RING 8
-#define MAX_TX_ACTIVE (NR_TX_RING-1)
-#define NCMDS_TX NR_TX_RING
-
-#define RX_BUFLEN (ETH_FRAME_LEN + 8)
-#define TX_TIMEOUT 5*HZ
-
-#define NCR885E_TOTAL_SIZE 0xe0
-
-#define TXSR (1<<6) /* tx: xfer status written */
-#define TXABORT (1<<7) /* tx: abort */
-#define EOP (1<<7) /* rx: end of packet written to buffer */
-
-int ncr885e_debug = NCR885E_DEBUG;
-static int print_version;
-
-struct ncr885e_private {
-
- /* preserve a 1-1 marking with buffs */
- struct dbdma_cmd *head;
- struct dbdma_cmd *tx_cmds;
- struct dbdma_cmd *rx_cmds;
- struct dbdma_cmd *stop_cmd;
-
- struct sk_buff *tx_skbufs[NR_TX_RING];
- struct sk_buff *rx_skbufs[NR_RX_RING];
-
- int rx_current;
- int rx_dirty;
-
- int tx_dirty;
- int tx_current;
-
- unsigned short tx_status[NR_TX_RING];
-
- unsigned char tx_fullup;
- unsigned char tx_active;
-
- struct net_device_stats stats;
-
- struct net_device *dev;
-
- struct timer_list tx_timeout;
- int timeout_active;
-
- spinlock_t lock;
-};
-
-static struct net_device *root_dev;
-
-static int ncr885e_open( struct net_device *dev );
-static int ncr885e_close( struct net_device *dev );
-static void ncr885e_rx( struct net_device *dev );
-static void ncr885e_tx( struct net_device *dev );
-static int ncr885e_probe1( unsigned long ioaddr, unsigned char irq );
-static int ncr885e_xmit_start( struct sk_buff *skb, struct net_device *dev );
-static struct net_device_stats *ncr885e_stats( struct net_device *dev );
-static void ncr885e_set_multicast( struct net_device *dev );
-static void ncr885e_config( struct net_device *dev );
-static int ncr885e_set_address( struct net_device *dev, void *addr );
-static void ncr885e_interrupt( int irq, void *dev_id, struct pt_regs *regs );
-static void show_dbdma_cmd( volatile struct dbdma_cmd *cmd );
-#if 0
-static int read_eeprom( unsigned int ioadddr, int location );
-#endif
-
-#ifdef NCR885E_DEBUG_MII
-static void show_mii( unsigned long ioaddr );
-static int read_mii( unsigned long ioaddr, int reg );
-static void write_mii( unsigned long ioaddr, int reg, int data );
-#endif /* NCR885E_DEBUG_MII */
-
-#define TX_RESET_FLAGS (TX_CHANNEL_RUN|TX_CHANNEL_PAUSE|TX_CHANNEL_WAKE)
-#define RX_RESET_FLAGS (RX_CHANNEL_RUN|RX_CHANNEL_PAUSE|RX_CHANNEL_WAKE)
-
-
-static struct pci_device_id ncr885e_pci_tbl[] __initdata = {
- { PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C885_ETHERNET, PCI_ANY_ID, PCI_ANY_ID, },
- { } /* Terminating entry */
-};
-MODULE_DEVICE_TABLE(pci, ncr885e_pci_tbl);
-
-#if 0
-static int
-debug_ioctl( struct net_device *dev, struct ifreq *req, int cmd )
-{
- unsigned long ioaddr = dev->base_addr;
- struct ncr885e_private *sp = (struct ncr885e_private *) dev->priv;
- struct ncr885e_private *data;
- struct ncr885e_regs *regs;
- unsigned long flags;
-
- union {
- struct ncr885e_regs dump;
- struct ncr885e_private priv;
- } temp;
-
- switch( cmd ) {
-
- /* dump the rx ring status */
- case NCR885E_GET_PRIV:
-
- data = (struct ncr885e_private *) &req->ifr_data;
-
- if ( verify_area(VERIFY_WRITE, &req->ifr_data,
- sizeof( struct ncr885e_private )))
- return -EFAULT;
-
- memcpy((char *) &temp.priv, sp, sizeof( struct ncr885e_private ));
- copy_to_user( data, (char *) &temp.priv, sizeof( struct ncr885e_private));
- break;
-
- case NCR885E_GET_REGS:
-
- regs = (struct ncr885e_regs *) &req->ifr_data;
-
- if ( verify_area( VERIFY_WRITE, &req->ifr_data,
- sizeof( struct ncr885e_regs )))
- return -EFAULT;
-
- spin_lock_irqsave( &sp->lock, flags );
-
- temp.dump.tx_status = inl( ioaddr + TX_CHANNEL_STATUS );
- temp.dump.rx_status = inl( ioaddr + RX_CHANNEL_STATUS );
- temp.dump.mac_config = inl( ioaddr + MAC_CONFIG );
- temp.dump.tx_control = inl( ioaddr + TX_CHANNEL_CONTROL );
- temp.dump.rx_control = inl( ioaddr + RX_CHANNEL_CONTROL );
- temp.dump.tx_cmd_ptr = inl( ioaddr + TX_CMD_PTR_LO );
- temp.dump.rx_cmd_ptr = inl( ioaddr + RX_CMD_PTR_LO );
- temp.dump.int_status = inl( ioaddr + INTERRUPT_STATUS_REG );
-
- spin_unlock_irqrestore( &sp->lock, flags );
- copy_to_user( regs, (char *) &temp.dump, sizeof( struct ncr885e_regs ));
-
- break;
-
- default:
- return -EOPNOTSUPP;
- }
- return 0;
-}
-#endif
-
-/* Enable interrupts on the 53C885 */
-static inline void
-ncr885e_enable( struct net_device *dev )
-
-{
- unsigned long ioaddr = dev->base_addr;
- unsigned short reg;
-
- reg = inw(ioaddr + INTERRUPT_ENABLE);
- outw(reg | INTERRUPT_INTE, ioaddr + INTERRUPT_ENABLE);
-}
-
-/* Disable interrupts on the 53c885 */
-static inline void
-ncr885e_disable( struct net_device *dev )
-
-{
- unsigned long ioaddr = dev->base_addr;
- unsigned short reg;
-
- reg = inw( ioaddr + INTERRUPT_ENABLE );
- outw( reg & ~INTERRUPT_INTE, ioaddr + INTERRUPT_ENABLE );
-}
-
-
-static inline void
-ncr885e_reset( struct net_device *dev )
-
-{
- unsigned short reg;
- unsigned long cntl;
- int i;
- unsigned long ioaddr = dev->base_addr;
-
- if (ncr885e_debug > 1)
- printk( KERN_INFO "%s: Resetting 53C885...\n", dev->name );
-
- /* disable interrupts on the 53C885 */
- ncr885e_disable( dev );
-
- /* disable rx in the MAC */
- reg = inw( ioaddr + MAC_CONFIG );
- outw( reg & ~MAC_CONFIG_RXEN, ioaddr + MAC_CONFIG );
-
- for( i=0; i < 100; i++ ) {
-
- if ( !(inw( ioaddr + MAC_CONFIG ) & MAC_CONFIG_RXEN ))
- break;
- udelay( 10 );
- }
-
- reg = inw( ioaddr + MAC_CONFIG );
- outw( reg | MAC_CONFIG_SRST, ioaddr + MAC_CONFIG );
- outw( reg, ioaddr + MAC_CONFIG );
-
- /* disable both rx and tx DBDMA channels */
- outl( TX_DBDMA_ENABLE << 16, ioaddr + TX_CHANNEL_CONTROL );
- outl( RX_DBDMA_ENABLE << 16, ioaddr + RX_CHANNEL_CONTROL );
-
- for( i=0; i < 100; i++ ) {
-
- if ( !(inw( ioaddr + TX_CHANNEL_STATUS ) & TX_DBDMA_ENABLE ) &&
- !(inw( ioaddr + RX_CHANNEL_STATUS ) & RX_DBDMA_ENABLE ))
- break;
- udelay( 10 );
- }
-
- /* perform a "software reset" */
- cntl = inl( ioaddr + DBDMA_CONTROL );
- outl( cntl | DBDMA_SRST, ioaddr + DBDMA_CONTROL );
-
- for( i=0; i < 100; i++ ) {
-
- if ( !(inl( ioaddr + DBDMA_CONTROL ) & DBDMA_SRST ))
- break;
- udelay( 10 );
- }
-
- /* books says that a software reset should be done to the MAC, as
- well. This true??? */
-
- if (ncr885e_debug > 3)
- printk( KERN_INFO "%s: reset complete\n", dev->name );
-
-}
-
-
-/* configure the 53C885 chip.
-
- The DBDMA command descriptors on the 53C885 can be programmed to
- branch, interrupt or pause conditionally or always by using the
- interrupt, branch and wait select registers. */
-
-static void
-ncr885e_config( struct net_device *dev )
-
-{
- unsigned long ioaddr = dev->base_addr;
-
- if (ncr885e_debug > 3)
- printk( KERN_INFO "%s: Configuring 53C885.\n", dev->name );
-
- ncr885e_reset( dev );
-
- /* The 53C885 can be programmed to perform conditional DBDMA
- branches, interrupts or waits.
-
- Neither channel makes use of "wait", as it requires that the
- DBDMA engine to be restarted. Don't go there. The rx channel
- will branch upon the successful reception of a packet ('EOP' in
- the xfer_status field). The branch address is to the STOP
- DBDMA command descriptor, which shuts down the rx channel until
- the interrupt is serviced. */
-
- /* cause tx channel to stop after "status received" */
- outl( 0, ioaddr + TX_INT_SELECT );
- outl( (TX_WAIT_STAT_RECV << 16) | TX_WAIT_STAT_RECV,
- ioaddr + TX_WAIT_SELECT );
- outl( 0, ioaddr + TX_BRANCH_SELECT );
-
- /* cause rx channel to branch to the STOP descriptor on "End-of-Packet" */
-#if 0
- outl( (RX_INT_SELECT_EOP << 16) | RX_INT_SELECT_EOP,
- ioaddr + RX_INT_SELECT );
-#else
- outl( 0, ioaddr + RX_INT_SELECT );
-#endif
-#if 0
- outl( 0, ioaddr + RX_WAIT_SELECT );
-#else
- outl( (RX_WAIT_SELECT_EOP << 16) | RX_WAIT_SELECT_EOP,
- ioaddr + RX_WAIT_SELECT );
-#endif
-#if 1
- outl( 0, ioaddr + RX_BRANCH_SELECT );
-#else
- outl( (RX_BRANCH_SELECT_EOP << 16) | RX_BRANCH_SELECT_EOP,
- ioaddr + RX_BRANCH_SELECT );
-#endif
-
- /* configure DBDMA */
- outl( (DBDMA_BE | DBDMA_DPMRLE | DBDMA_TDPCE |
- DBDMA_DDPE | DBDMA_TDPE |
- (DBDMA_BURST_4 << DBDMA_TX_BST_SHIFT) |
- (DBDMA_BURST_4 << DBDMA_RX_BST_SHIFT) |
- (DBDMA_TX_ARBITRATION_DEFAULT) |
- (DBDMA_RX_ARBITRATION_DEFAULT)), ioaddr + DBDMA_CONTROL );
-
- outl( 0, ioaddr + TX_THRESHOLD );
-
- /* disable MAC loopback */
- outl( (MAC_CONFIG_ITXA | MAC_CONFIG_RXEN | MAC_CONFIG_RETRYL |
- MAC_CONFIG_PADEN | (0x18 << 16)),
- ioaddr + MAC_CONFIG );
-
- /* configure MAC */
- outl( (MAC_CONFIG_ITXA | MAC_CONFIG_RXEN | MAC_CONFIG_RETRYL |
- MAC_CONFIG_PADEN | ( 0x18 << 16)), ioaddr + MAC_CONFIG );
-
- outw( (0x1018), ioaddr + NBTOB_INTP_GAP );
-
- /* clear and enable interrupts */
- inw( ioaddr + INTERRUPT_CLEAR );
- ncr885e_enable( dev );
-
- /* and enable them in the chip */
- outl( (INTERRUPT_INTE|INTERRUPT_TX_MASK|INTERRUPT_RX_MASK)<<16,
- ioaddr + INTERRUPT_ENABLE - 2);
-
- if (ncr885e_debug > 3)
- printk( KERN_INFO "%s: 53C885 config complete.\n", dev->name );
-
- return;
-}
-
-
-
-/*
- transmit interrupt */
-
-static void
-ncr885e_tx( struct net_device *dev )
-
-{
- struct ncr885e_private *sp = (struct ncr885e_private *) dev->priv;
- volatile struct dbdma_cmd *cp, *dp;
- unsigned short txbits, xfer;
- int i;
-
- del_timer( &sp->tx_timeout );
-
- if (ncr885e_debug > 3)
- printk( KERN_INFO "%s: ncr885e_tx: active=%d, dirty=%d, current=%d\n",
- dev->name, sp->tx_active, sp->tx_dirty, sp->tx_current );
-
- sp->timeout_active = 0;
-
- i = sp->tx_dirty;
- cp = sp->tx_cmds + (i*3);
- dp = cp+1;
- sp->tx_active--;
-
- xfer = inw( &dp->xfer_status );
- txbits = inw( &sp->tx_status[i] );
-
- if (ncr885e_debug > 4) {
- show_dbdma_cmd( cp );
- show_dbdma_cmd( dp );
- }
-
- /* get xmit result */
- txbits = inw( &sp->tx_status[i] );
-
- if (ncr885e_debug > 3)
- printk( KERN_INFO "%s: tx xfer=%04x, txbits=%04x\n", dev->name,
- xfer, txbits );
-
- /* look for any channel status (?) */
- if ( xfer ) {
-
- dev_kfree_skb_irq( sp->tx_skbufs[i] );
-
- if ( txbits & TX_STATUS_TXOK ) {
- sp->stats.tx_packets++;
- sp->stats.tx_bytes += inw( &cp->req_count );
- }
-
- /* dropped packets */
- if ( txbits & (TX_STATUS_TDLC|TX_STATUS_TDEC) ) {
- sp->stats.tx_dropped++;
- }
-
- /* add the collisions */
- sp->stats.collisions += ( txbits & 0x04 );
-
- }
-
- netif_start_queue(dev);
-
- return;
-}
-
-/* rx interrupt handling */
-static void
-ncr885e_rx( struct net_device *dev )
-
-{
- struct ncr885e_private *sp = (struct ncr885e_private *) dev->priv;
- volatile struct dbdma_cmd *cp;
- struct sk_buff *skb;
- int i, nb;
- unsigned short status;
- unsigned char *data, *stats;
- unsigned long rxbits, ioaddr = dev->base_addr;
-
- i = sp->rx_current;
- cp = sp->rx_cmds + (i*2);
-
- if (ncr885e_debug > 3)
- printk( KERN_INFO "%s: ncr885e_rx dirty=%d, current=%d (cp@%p)\n",
- dev->name, sp->rx_dirty, sp->rx_current, cp );
-
- nb = inw( &cp->req_count ) - inw( &cp->res_count );
- status = inw( &cp->xfer_status );
-
- if (ncr885e_debug > 3)
- printk( KERN_INFO "%s: (rx %d) bytes=%d, xfer_status=%04x\n",
- dev->name, i, nb, status );
-
- if ( status ) {
-
- skb = sp->rx_skbufs[i];
- data = skb->data;
- stats = data + nb - 3;
- rxbits = (stats[0]|stats[1]<<8|stats[2]<<16);
-
- if (ncr885e_debug > 3)
- printk( KERN_INFO " rx_bits=%06lx\n", rxbits );
-
- skb->dev = dev;
- skb_put( skb, nb-3 );
- skb->protocol = eth_type_trans( skb, dev );
- netif_rx( skb );
- sp->rx_skbufs[i] = 0;
-
- if ( rxbits & RX_STATUS_RXOK ) {
- sp->stats.rx_packets++;
- sp->stats.rx_bytes += nb;
- }
-
- if ( rxbits & RX_STATUS_MCAST )
- sp->stats.multicast++;
-
- }
-
- sp->rx_dirty = sp->rx_current;
-
- if ( ++sp->rx_current >= NR_RX_RING )
- sp->rx_current = 0;
-
- /* fix up the one we just trashed */
- cp = sp->rx_cmds + (sp->rx_dirty * 2);
-
- skb = dev_alloc_skb( RX_BUFLEN + 2 );
- if ( skb != 0 ) {
- skb_reserve( skb, 2 );
- sp->rx_skbufs[sp->rx_dirty] = skb;
- }
-
- if (ncr885e_debug > 2)
- printk( KERN_INFO "%s: ncr885e_rx: using ring index %d, filling cp @ %p\n",
- dev->name, sp->rx_current, cp );
-
- outw( RX_BUFLEN, &cp->req_count );
- outw( 0, &cp->res_count );
- data = skb->data;
- outl( virt_to_bus( data ), &cp->phy_addr );
- outw( 0, &cp->xfer_status );
-
- cp = sp->rx_cmds + (sp->rx_current * 2);
-
- /* restart rx DMA */
- outl( virt_to_bus( cp ), ioaddr + RX_CMD_PTR_LO );
- outl( (RX_DBDMA_ENABLE << 16)|RX_CHANNEL_RUN,
- ioaddr + RX_CHANNEL_CONTROL );
-
- return;
-}
-
-static void
-ncr885e_misc_ints( struct net_device *dev, unsigned short status )
-
-{
- struct ncr885e_private *sp = (struct ncr885e_private *) dev->priv;
- struct dbdma_cmd *cp;
- unsigned long ioaddr = dev->base_addr;
-
- if (ncr885e_debug > 1)
- printk( KERN_INFO "miscellaneous interrupt handled; status=%02x\n",
- status );
-
- /* various transmit errors */
- if ( status &
- (INTERRUPT_PPET | INTERRUPT_PBFT | INTERRUPT_IIDT) ) {
-
- /* illegal instruction in tx dma */
- if ( status & INTERRUPT_IIDT ) {
-
- cp = (struct dbdma_cmd *) bus_to_virt( inl( ioaddr + TX_CMD_PTR_LO ));
- printk( KERN_INFO "%s: tx illegal insn:\n", dev->name );
- printk( KERN_INFO " tx DBDMA - cmd = %p, status = %04x\n",
- cp, inw( ioaddr + TX_CHANNEL_STATUS ));
- printk( KERN_INFO " command = %04x, phy_addr=%08x, req_count=%04x\n",
- inw( &cp->command ), inw( &cp->phy_addr ), inw( &cp->req_count ));
- }
-
- if ( status & INTERRUPT_PPET )
- printk( KERN_INFO "%s: tx PCI parity error\n", dev->name );
-
- if ( status & INTERRUPT_PBFT )
- printk( KERN_INFO "%s: tx PCI bus fault\n", dev->name );
- }
-
- /* look for rx errors */
- if ( status &
- (INTERRUPT_PPER | INTERRUPT_PBFR | INTERRUPT_IIDR)) {
-
- /* illegal instruction in rx dma */
- if ( status & INTERRUPT_IIDR ) {
-#if 0
- cmd = inl( ioaddr + RX_CMD_PTR_LO );
-#endif
- printk( KERN_ERR "%s: rx illegal DMA instruction:\n", dev->name );
- printk( KERN_ERR " channel status=%04x,\n",
- inl( ioaddr + RX_CHANNEL_STATUS ));
-#if 0
- show_dbdma_cmd( bus_to_virt( inl( ioaddr + RX_CMD_PTR_LO )));
- printk( KERN_ERR " instr (%08x) %08x %08x %08x\n",
- (int) cmd, cmd[0], cmd[1], cmd[2] );
-#endif
- }
-
- /* PCI parity error */
- if ( status & INTERRUPT_PPER )
- printk( KERN_INFO "%s: rx PCI parity error\n", dev->name );
-
- if ( status & INTERRUPT_PBFR )
- printk( KERN_INFO "%s: rx PCI bus fault\n", dev->name );
-
- sp->stats.rx_errors++;
- }
-
- if ( status & INTERRUPT_WI ) {
- printk( KERN_INFO "%s: link pulse\n", dev->name );
- }
-
- /* bump any counters */
-
-
- return;
-}
-
-static void
-ncr885e_interrupt( int irq, void *dev_id, struct pt_regs *regs )
-
-{
- struct net_device *dev = (struct net_device *) dev_id;
- struct ncr885e_private *sp;
- unsigned short status;
- int ioaddr;
-
- if ( dev == NULL ) {
- printk( KERN_ERR "symba: Interrupt IRQ %d for unknown device\n", irq );
- return;
- }
-
- ioaddr = dev->base_addr;
- sp = (struct ncr885e_private *) dev->priv;
- spin_lock( &sp->lock );
-
- status = inw( ioaddr + INTERRUPT_CLEAR );
-
- if (ncr885e_debug > 2)
- printk( KERN_INFO "%s: 53C885 interrupt 0x%02x\n", dev->name, status );
-
- /* handle non-tx and rx interrupts first */
- if ( status & ~(INTERRUPT_DIT|INTERRUPT_DIR))
- ncr885e_misc_ints( dev, status );
-
- /* look for tx interrupt: more to transmit, DBDMA stopped, or tx done */
- if ( ( status & INTERRUPT_DIT ) ) {
-
- if (ncr885e_debug > 2)
- printk( KERN_INFO "%s: tx int; int=%02x, chan stat=%02x\n",
- dev->name, status, inw( ioaddr + TX_CHANNEL_STATUS ));
-
- /* turn off timer */
- del_timer( &sp->tx_timeout );
- sp->timeout_active = 0;
-
- /* stop DMA */
- outl( TX_DBDMA_ENABLE << 16, ioaddr + TX_CHANNEL_CONTROL );
-
- ncr885e_tx( dev );
- }
-
- if ( status & INTERRUPT_DIR ) {
-
- if ( ncr885e_debug > 2 )
- printk( KERN_INFO "%s: rx interrupt; int=%02x, rx channel stat=%02x\n",
- dev->name, status, inw( ioaddr + RX_CHANNEL_STATUS ));
-
- /* stop DMA */
- outl( RX_DBDMA_ENABLE << 16, ioaddr + RX_CHANNEL_CONTROL );
-
- /* and handle the interrupt */
- ncr885e_rx( dev );
- }
-
- spin_unlock( &sp->lock );
-
- return;
-}
-
-
-/* doesn't set the address permanently, however... */
-static int
-ncr885e_set_address( struct net_device *dev, void *addr )
-
-{
- struct ncr885e_private *sp = (struct ncr885e_private *) dev->priv;
- struct sockaddr *saddr = addr;
- unsigned long flags;
- unsigned short reg[3];
- unsigned char *ioaddr, *p;
- int i;
-
- memcpy( dev->dev_addr, saddr->sa_data, dev->addr_len );
-
- p = (unsigned char *) dev->dev_addr;
- printk( KERN_INFO "%s: setting new MAC address - ", dev->name );
-#if 0
- for( p = (unsigned char *) dev->dev_addr, i=0; i < 6; i++, p++ )
- printk("%c%2.2x", i ? ':' : ' ', *p );
-#endif
-
-
- p = (unsigned char *) ®
- for( i=0; i < 6; i++ )
- p[i] = dev->dev_addr[i];
-
-#if 0
- printk("%s: Setting new mac address - ", dev->name );
- for( i=0; i < 6; i++ ) {
- printk("%02x", i ? ':' : ' ', p[i] );
- }
-
- printk("\n");
-#endif
-
- /* stop rx for the change */
- outl( RX_DBDMA_ENABLE << 16, ioaddr + RX_CHANNEL_CONTROL );
-
- spin_lock_irqsave( &sp->lock, flags );
-
- ioaddr = (unsigned char *) dev->base_addr;
-
- for( i = 0; i < 3; i++ ) {
- reg[i] = ((reg[i] & 0xff) << 8) | ((reg[i] >> 8) & 0xff);
- printk("%04x ", reg[i] );
- outw( reg[i], ioaddr + STATION_ADDRESS_0 + (i*2));
- }
- printk("\n");
-
- spin_unlock_irqrestore( &sp->lock, flags );
-
- /* restart rx */
- outl((RX_DBDMA_ENABLE << 16)|RX_CHANNEL_RUN,
- ioaddr + RX_CHANNEL_CONTROL );
-
- return 0;
-}
-
-static void
-ncr885e_tx_timeout( unsigned long data )
-
-{
- struct net_device *dev = (struct net_device *) data;
- struct ncr885e_private *sp = (struct ncr885e_private *) dev->priv;
- unsigned long flags, ioaddr;
- int i;
-
- save_flags( flags );
- cli();
-
- ioaddr = dev->base_addr;
- sp->timeout_active = 0;
- i = sp->tx_dirty;
-
- /* if we weren't active, bail... */
- if ( sp->tx_active == 0 ) {
- printk( KERN_INFO "%s: ncr885e_timeout...tx not active!\n", dev->name );
- goto out;
- }
-
- printk( KERN_ERR "%s: 53C885 timed out. Resetting...\n", dev->name );
-
- /* disable rx and tx DMA */
- outl( (TX_DBDMA_ENABLE << 16), ioaddr + TX_CHANNEL_CONTROL );
- outl( (RX_DBDMA_ENABLE << 16), ioaddr + RX_CHANNEL_CONTROL );
-
- /* reset the chip */
- ncr885e_config( dev );
- ncr885e_enable( dev );
-
- /* clear the wedged skb in the tx ring */
- sp->tx_active = 0;
- ++sp->stats.tx_errors;
-
- if ( sp->tx_skbufs[i] ) {
- dev_kfree_skb( sp->tx_skbufs[i] );
- sp->tx_skbufs[i] = 0;
- }
-
- /* start anew from the beginning of the ring buffer (why not?) */
- sp->tx_current = 0;
- netif_wake_queue(dev);
-
- /* restart rx dma */
- outl( (RX_DBDMA_ENABLE << 16) | RX_CHANNEL_RUN,
- ioaddr + RX_CHANNEL_CONTROL );
- out:
-
- restore_flags( flags );
-}
-
-static inline void
-ncr885e_set_timeout( struct net_device *dev )
-
-{
- struct ncr885e_private *sp = (struct ncr885e_private *) dev->priv;
- unsigned long flags;
-
- save_flags(flags);
- cli();
-
- if ( sp->timeout_active )
- del_timer( &sp->tx_timeout );
-
- sp->tx_timeout.expires = jiffies + TX_TIMEOUT;
- sp->tx_timeout.function = ncr885e_tx_timeout;
- sp->tx_timeout.data = (unsigned long) dev;
- add_timer( &sp->tx_timeout );
- sp->timeout_active = 1;
- restore_flags( flags );
-}
-
-
-/*
- * The goal is to set up DBDMA such that the rx ring contains only
- * one DMA descriptor per ring element and the tx ring has two (using
- * the cool features of branch- and wait-select. However, I'm not sure
- * if it's possible. For now, we plod through it with 3 descriptors
- * for tx, and two for rx.
- */
-
-static int
-ncr885e_open( struct net_device *dev )
-
-{
- struct ncr885e_private *sp = (struct ncr885e_private *) dev->priv;
- unsigned long ioaddr = dev->base_addr;
- struct sk_buff *skb;
- int i, size;
- char *data;
- struct dbdma_cmd *cp;
- unsigned long flags;
-
- /* allocate enough space for the tx and rx rings and a STOP descriptor */
- size = (sizeof( struct dbdma_cmd ) *
- ((NR_TX_RING * 3) + (NR_RX_RING * 2) + 1));
-
- cp = kmalloc( size, GFP_KERNEL );
-
- if ( cp == 0 ) {
- printk( KERN_ERR "Insufficient memory (%d bytes) for DBDMA\n", size );
- return -ENOMEM;
- }
-
- spin_lock_init( &sp->lock );
- spin_lock_irqsave( &sp->lock, flags );
-
- memset((char *) cp, 0, size );
- sp->head = cp;
-
- sp->stop_cmd = cp;
- outl( DBDMA_STOP, &cp->command );
-
- sp->rx_cmds = ++cp;
-
- for( i = 0; i < NR_RX_RING; i++ ) {
-
- cp = sp->rx_cmds + (i*2);
- skb = dev_alloc_skb( RX_BUFLEN + 2 );
-
- /* if there is insufficient memory, make this last ring use a
- static buffer and leave the loop with that skb as final one */
- if ( skb == 0 ) {
- printk( KERN_ERR "%s: insufficient memory for rx ring buffer\n",
- dev->name );
- break;
- }
-
- skb_reserve( skb, 2 );
- sp->rx_skbufs[i] = skb;
- data = skb->data;
-
- /* The DMA commands here are done such that an EOP is the only
- way that we should get an interrupt. This means that we could
- fill more than one skbuff before getting the interrupt at EOP. */
-
- /* Handle rx DMA such that it always interrupts.... */
- outw( (INPUT_MORE|INTR_ALWAYS), &cp->command );
- outw( RX_BUFLEN, &cp->req_count );
- outw( 0, &cp->res_count );
- outl( virt_to_bus( data ), &cp->phy_addr );
- outl( virt_to_bus( sp->stop_cmd ), &cp->cmd_dep );
- outw( 0, &cp->xfer_status );
-#if 0
- printk( KERN_INFO "rx at %p\n", cp );
- show_dbdma_cmd( cp );
-#endif
- ++cp;
-
- outw( DBDMA_STOP, &cp->command );
-
- }
-
- /* initialize to all rx buffers are available, fill limit is the end */
- sp->rx_dirty = 0;
- sp->rx_current = 0;
-
- /* fill the tx ring */
- sp->tx_cmds = cp+1;
-
- for( i = 0; i < NR_TX_RING; i++ ) {
-
- /* minimal setup for tx command */
- cp = sp->tx_cmds + (i*3);
- outw( OUTPUT_LAST, &cp->command );
- if (ncr885e_debug > 3) {
- printk( KERN_INFO "tx OUTPUT_LAST at %p\n", cp );
- show_dbdma_cmd( cp );
- }
-
- /* full setup for the status cmd */
- cp++;
- outw( INPUT_LAST|INTR_ALWAYS|WAIT_IFCLR, &cp->command );
- outl( virt_to_bus( &sp->tx_status[i] ), &cp->phy_addr );
- outw( 2, &cp->req_count );
- if ( ncr885e_debug > 3) {
- printk( KERN_INFO "tx INPUT_LAST cmd at %p\n", cp );
- show_dbdma_cmd( cp );
- }
-
- ++cp;
- outw( DBDMA_STOP, &cp->command );
-
- }
-#if 0
- /* chain the last tx DMA command to the STOP cmd */
- outw((INPUT_LAST|INTR_ALWAYS|BR_ALWAYS), &cp->command );
- outl( virt_to_bus( sp->stop_cmd ), &cp->cmd_dep );
-#endif
- sp->tx_active = 0;
- sp->tx_current = 0;
- sp->tx_dirty = 0;
-
- spin_unlock_irqrestore( &sp->lock, flags );
-
- /* the order seems important here for some reason. If the MPIC isn't
- enabled before the ethernet chip is enabled, shrapnel from the
- bootloader causes us to receive interrupts even though we've not
- yet enabled the tx channel. Go figure. It'd be better to configure
- the chip in the probe1() routine, but then we don't see interrupts
- at all. Everything looks all right on the logic analyzer, but... */
-
- ncr885e_config( dev );
-
- /* enable ethernet interrupts */
- if ( request_irq( dev->irq, &ncr885e_interrupt, SA_SHIRQ, chipname, dev )) {
- printk( KERN_ERR "%s: can't get irq %d\n", dev->name, dev->irq );
- return -EAGAIN;
- }
-
- (void) inw( ioaddr + INTERRUPT_CLEAR );
-
- ncr885e_enable( dev );
-
- /* start rx DBDMA */
- outl( virt_to_bus( sp->rx_cmds ), ioaddr + RX_CMD_PTR_LO );
- outl( (RX_DBDMA_ENABLE << 16)|RX_CHANNEL_RUN,
- ioaddr + RX_CHANNEL_CONTROL );
-
- netif_start_queue(dev);
-
- return 0;
-}
-
-static int
-ncr885e_xmit_start( struct sk_buff *skb, struct net_device *dev )
-
-{
- struct ncr885e_private *sp = (struct ncr885e_private *) dev->priv;
- volatile struct dbdma_cmd *cp, *dp;
- unsigned long flags, ioaddr = dev->base_addr;
- int len, next, fill, entry;
-
- if ( ncr885e_debug > 3)
- printk( KERN_INFO "%s: xmit_start len=%d, dirty=%d, current=%d, active=%d\n",
- dev->name, skb->len, sp->tx_dirty, sp->tx_current, sp->tx_active );
-
- spin_lock_irqsave( &sp->lock, flags );
-
- /* find the free slot in the ring buffer */
- fill = sp->tx_current;
- next = fill + 1;
-
- if ( next >= NR_TX_RING )
- next = 0;
-#if 0
- /* mark ourselves as busy, even if we have too many packets waiting */
- netif_stop_queue(dev);
-#endif
-
- /* see if it's necessary to defer this packet */
- if ( sp->tx_active >= MAX_TX_ACTIVE ) {
- spin_unlock_irqrestore( &sp->lock, flags );
- return -1;
- }
-
- sp->tx_active++; /* bump "active tx" count */
- sp->tx_current = next; /* and show that we've used this buffer */
- sp->tx_dirty = fill; /* and mark this one to get picked up */
-
- len = skb->len;
-
- if ( len > ETH_FRAME_LEN ) {
- printk( KERN_DEBUG "%s: xmit frame too long (%d)\n", dev->name, len );
- len = ETH_FRAME_LEN;
- }
-
- /* get index into the tx DBDMA chain */
- entry = fill * 3;
- sp->tx_skbufs[fill] = skb;
- cp = sp->tx_cmds + entry;
- dp = cp + 1;
-
- /* update the rest of the OUTPUT_MORE descriptor */
- outw( len, &cp->req_count );
- outl( virt_to_bus( skb->data ), &cp->phy_addr );
- outw( 0, &cp->xfer_status );
- outw( 0, &cp->res_count );
-
- /* and finish off the INPUT_MORE */
- outw( 0, &dp->xfer_status );
- outw( 0, &dp->res_count );
- sp->tx_status[fill] = 0;
- outl( virt_to_bus( &sp->tx_status[fill] ), &dp->phy_addr );
-
- if ( ncr885e_debug > 2 )
- printk(KERN_INFO "%s: xmit_start: active %d, tx_current %d, tx_dirty %d\n",
- dev->name, sp->tx_active, sp->tx_current, sp->tx_dirty );
-
- if ( ncr885e_debug > 4 ) {
- show_dbdma_cmd( cp );
- show_dbdma_cmd( dp );
- }
-
-
- /* restart the tx DMA engine */
- outl( virt_to_bus( cp ), ioaddr + TX_CMD_PTR_LO );
- outl( (TX_DBDMA_ENABLE << 16)|TX_CHANNEL_RUN,
- ioaddr + TX_CHANNEL_CONTROL );
-
- ncr885e_set_timeout( dev );
-
- spin_unlock_irqrestore( &sp->lock, flags );
- dev->trans_start = jiffies;
-
- return 0;
-}
-
-static int
-ncr885e_close(struct net_device *dev)
-
-{
- int i;
- struct ncr885e_private *np = (struct ncr885e_private *) dev->priv;
- unsigned long ioaddr = dev->base_addr;
-
- netif_stop_queue(dev);
-
- spin_lock( &np->lock );
-
- printk(KERN_INFO "%s: NCR885E Ethernet closing...\n", dev->name );
-
- if (ncr885e_debug > 1)
- printk(KERN_DEBUG "%s: Shutting down Ethernet chip\n", dev->name);
-
- ncr885e_disable(dev);
-
- del_timer(&np->tx_timeout);
-
- /* flip off rx and tx */
- outl( (RX_DBDMA_ENABLE << 16), ioaddr + RX_CHANNEL_CONTROL );
- outl( (TX_DBDMA_ENABLE << 16), ioaddr + TX_CHANNEL_CONTROL );
-
- /* free up the IRQ */
- free_irq( dev->irq, dev );
-
- for( i = 0; i < NR_RX_RING; i++ ) {
- if (np->rx_skbufs[i])
- dev_kfree_skb( np->rx_skbufs[i] );
- np->rx_skbufs[i] = 0;
- }
-#if 0
- for (i = 0; i < NR_TX_RING; i++) {
- if (np->tx_skbufs[i])
- dev_kfree_skb(np->tx_skbufs[i]);
- np->tx_skbufs[i] = 0;
- }
-#endif
- spin_unlock( &np->lock );
-
- kfree( np->head );
-
- return 0;
-}
-
-
-/*
- * multicast promiscuous mode isn't used here. Allow code in the
- * IP stack to determine which multicast packets are good or bad....
- * (this avoids having to use the hash table registers)
- */
-static void
-ncr885e_set_multicast( struct net_device *dev )
-
-{
- int ioaddr = dev->base_addr;
-
- if ( ncr885e_debug > 3 )
- printk("%s: set_multicast: dev->flags = %x, AF=%04x\n",
- dev->name, dev->flags, inw( ioaddr + ADDRESS_FILTER ));
-
- if ( dev->flags & IFF_PROMISC ) {
- printk( KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name );
- outw( ADDRESS_RPPRO, ioaddr + ADDRESS_FILTER );
- }
-
- /* accept all multicast packets without checking the mc_list. */
- else if ( dev->flags & IFF_ALLMULTI ) {
- printk( KERN_INFO "%s: Enabling all multicast packets.\n",
- dev->name );
- outw( ADDRESS_RPPRM, ioaddr + ADDRESS_FILTER );
- }
-
- /* enable broadcast rx */
- else {
- outw( ADDRESS_RPABC, ioaddr + ADDRESS_FILTER );
- }
-}
-
-static struct net_device_stats *
-ncr885e_stats( struct net_device *dev )
-
-{
- struct ncr885e_private *np = (struct ncr885e_private *) dev->priv;
-
- return &np->stats;
-}
-
-/* By this function, we're certain that we have a 885 Ethernet controller
- * so we finish setting it up and wrap up all the required Linux ethernet
- * configuration.
- */
-
-static int __init ncr885e_probe1(unsigned long ioaddr, unsigned char irq )
-
-{
- struct net_device *dev;
- struct ncr885e_private *sp;
- unsigned short station_addr[3], val;
- unsigned char *p;
- int i;
-
- dev = init_etherdev( NULL, sizeof( struct ncr885e_private ) );
- if (!dev)
- return -ENOMEM;
- SET_MODULE_OWNER(dev);
-
- sp = dev->priv;
-
- /* snag the station address and display it */
- for( i = 0; i < 3; i++ ) {
- val = inw( ioaddr + STATION_ADDRESS_0 + (i*2));
- station_addr[i] = ((val >> 8) & 0xff) | ((val << 8) & 0xff00);
- }
-
- printk( KERN_INFO "%s: %s at %08lx,", dev->name, chipname, ioaddr );
-
- p = (unsigned char *) &station_addr;
-
- for( i=0; i < 6; i++ ) {
- dev->dev_addr[i] = *p;
- printk("%c%2.2x", i ? ':' : ' ', dev->dev_addr[i] );
- p++;
- }
-
- printk(", IRQ %d.\n", irq );
-
- /* set up a timer */
- init_timer( &sp->tx_timeout );
- sp->timeout_active = 0;
-
- dev->base_addr = ioaddr;
- dev->irq = irq;
-
- ether_setup( dev );
-
- /* everything else */
- dev->open = ncr885e_open;
- dev->stop = ncr885e_close;
- dev->get_stats = ncr885e_stats;
- dev->hard_start_xmit = ncr885e_xmit_start;
- dev->set_multicast_list = ncr885e_set_multicast;
- dev->set_mac_address = ncr885e_set_address;
-
- root_dev = dev;
-
- return 0;
-}
-
-/* Since the NCR 53C885 is a multi-function chip, I'm not worrying about
- * trying to get the the device(s) in slot order. For our (Synergy's)
- * purpose, there's just a single 53C885 on the board and we don't
- * worry about the rest.
- */
-
-static int __init ncr885e_probe(void)
-{
- struct pci_dev *pdev = NULL;
- unsigned int ioaddr, ret;
- unsigned char irq;
-
- /* use 'if' not 'while' where because driver only supports one device */
- if (( pdev = pci_find_device( PCI_VENDOR_ID_NCR,
- PCI_DEVICE_ID_NCR_53C885_ETHERNET,
- pdev )) != NULL ) {
-
- if ( !print_version ) {
- print_version++;
- printk( KERN_INFO "%s", version );
- }
-
- if (pci_enable_device(pdev))
- return -ENODEV;
-
- /* Use I/O space */
- ioaddr = pci_resource_start (pdev, 0);
- irq = pdev->irq;
-
- if ( !request_region( ioaddr, NCR885E_TOTAL_SIZE, "ncr885e" ))
- return -ENOMEM;
-
- /* finish off the probe */
- ret = ncr885e_probe1(ioaddr, irq);
- if (ret)
- release_region(ioaddr, NCR885E_TOTAL_SIZE);
- else
- pci_set_master(pdev);
- }
-
- return ret;
-}
-
-/* debugging to peek at dma descriptors */
-static void
-show_dbdma_cmd( volatile struct dbdma_cmd *cmd )
-
-{
- printk( KERN_INFO " cmd %04x, physaddr %08x, req_count %04x\n",
- inw( &cmd->command ), inl( &cmd->phy_addr ), inw( &cmd->req_count ));
- printk( KERN_INFO " res_count %04x, xfer_status %04x, branch %08x\n",
- inw( &cmd->res_count ), inw( &cmd->xfer_status ),inl( &cmd->cmd_dep ));
-}
-
-#if 0
-static int
-read_eeprom( unsigned int ioaddr, int location )
-
-{
- int loop;
- unsigned char val;
-
- outb( (location & 0xff), ioaddr + EE_WORD_ADDR );
-
- /* take spillover from location in control reg */
- outb(EE_CONTROL_RND_READB | (location & (0x7<<8)), ioaddr + EE_CONTROL);
-
- loop = 1000;
- while( (inb( ioaddr + EE_STATUS) & EE_SEB) &&
- (loop > 0) ) {
- udelay( 10 );
- loop--;
- }
-
- if ( inb( ioaddr + EE_STATUS ) & EE_SEE ) {
- printk("%s: Serial EEPROM read error\n", chipname);
- val = 0xff;
- }
-
- else
- val = inb( ioaddr + EE_READ_DATA );
-
- return (int) val;
-}
-#endif
-
-#ifdef NCR885E_DEBUG_MII
-static void
-show_mii( unsigned long ioaddr )
-
-{
- int phyctrl, phystat, phyadvert, phypartner, phyexpan;
-
- phyctrl = read_mii( ioaddr, MII_AUTO_NEGOTIATION_CONTROL );
- phystat = read_mii( ioaddr, MII_AUTO_NEGOTIATION_STATUS );
- phyadvert = read_mii( ioaddr, MII_AUTO_NEGOTIATION_ADVERTISEMENT );
- phypartner = read_mii( ioaddr, MII_AUTO_NEGOTIATION_LINK_PARTNER );
- phyexpan = read_mii( ioaddr, MII_AUTO_NEGOTIATION_EXPANSION );
-
- printk( KERN_INFO "PHY: advert=%d %s, partner=%s %s, link=%d, %s%s\n",
- (phyadvert & MANATECH_100BASETX_FULL_DUPLEX ? 100 : 10),
- (phyctrl & MANC_AUTO_NEGOTIATION_ENABLE ? "auto" : "fixed"),
- (phypartner & MANLP_ACKNOWLEDGE ?
- (phypartner & MANATECH_100BASETX_FULL_DUPLEX ? "100" : "10") :
- "?"),
- (phyexpan & MANE_LINK_PARTNER_AUTO_ABLE ? "auto" : "fixed"),
- (phyctrl & MANC_PHY_SPEED_100 ? 100 : 10),
- (phystat & MANS_LINK_STATUS ? "up" : "down"),
- (phyexpan & MANE_PARALLEL_DETECTION_FAULT ? " PD-fault" : "" ));
- return;
-}
-
-
-static int
-read_mii( unsigned long ioaddr, int reg )
-
-{
- int timeout;
-
-
- timeout = 100000;
-
- while( inw( ioaddr + MII_INDICATOR ) & MII_BUSY ) {
-
- if ( timeout-- < 0 ) {
- printk( KERN_INFO "Timed out waiting for MII\n" );
- return -1;
- }
- }
-
- outw( (1<<8) + reg, ioaddr + MII_ADDRESS );
- outw( MIIM_RSTAT, ioaddr + MIIM_COMMAND );
-
- timeout = 100000;
- while( inw( ioaddr + MII_INDICATOR ) & MII_BUSY ) {
- if ( timeout-- < 0 ) {
- printk( KERN_INFO "Timed out waiting for MII\n" );
- return -1;
- }
- }
-
- return( inw( ioaddr + MII_READ_DATA ));
-}
-
-static void
-write_mii( unsigned long ioaddr, int reg, int data )
-
-{
- int timeout=100000;
-
- printk( KERN_INFO "MII indicator: %02x\n", inw( ioaddr + MII_INDICATOR ));
-
- while( inw( ioaddr + MII_INDICATOR ) & MII_BUSY ) {
- if ( timeout-- <= 0 ) {
- printk( KERN_INFO "Timeout waiting to write to MII\n" );
- return;
- }
- udelay( 10 );
- }
-
- outw( (1<<8) + reg, ioaddr + MII_ADDRESS );
- outw( data, ioaddr + MII_WRITE_DATA );
-
- return;
-}
-
-#endif /* NCR885E_DEBUG_MII */
-
-static void __exit ncr885e_cleanup(void)
-{
- if ( root_dev ) {
- unregister_netdev( root_dev );
- release_region( root_dev->base_addr, NCR885E_TOTAL_SIZE );
- kfree( root_dev );
- root_dev = NULL;
- }
-}
-
-module_init(ncr885e_probe);
-module_exit(ncr885e_cleanup);
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
+++ /dev/null
-#ifndef _NET_H_SYMBA
-#define _NET_H_SYMBA
-
-/* transmit status bit definitions */
-#define TX_STATUS_TXOK (1<<13) /* success */
-#define TX_STATUS_TDLC (1<<12) /* dropped for late colls */
-#define TX_STATUS_TCXSDFR (1<<11) /* excessive deferral */
-#define TX_STATUS_TDEC (1<<10) /* excessive collisions */
-#define TX_STATUS_TAUR (1<<9) /* abort on underrun/"jumbo" */
-#define TX_STATUS_PDFRD (1<<8) /* packet deferred */
-#define TX_STATUS_BCAST (1<<7) /* broadcast ok */
-#define TX_STATUS_MCAST (1<<6) /* multicast ok */
-#define TX_STATUS_CRCERR (1<<5) /* CRC error */
-#define TX_STATUS_LC (1<<4) /* late collision */
-#define TX_STATUS_CCNT_MASK 0xf /* collision count */
-
-#define T_TXOK (1<<13)
-#define T_TDLC (1<<12)
-#define T_TCXSDFR (1<<11)
-#define T_TDEC (1<<10)
-#define T_TAUR (1<<9)
-#define T_PDFRD (1<<8)
-#define T_BCAST (1<<7)
-#define T_MCAST (1<<6)
-#define T_LC (1<<4)
-#define T_CCNT_MASK 0xf
-
-/* receive status bit definitions */
-#define RX_STATUS_RXOVRN (1<<23) /* overrun */
-#define RX_STATUS_CEPS (1<<22) /* carrier event already seen */
-#define RX_STATUS_RXOK (1<<21) /* success */
-#define RX_STATUS_BCAST (1<<20) /* broadcast ok */
-#define RX_STATUS_MCAST (1<<19) /* multicast ok */
-#define RX_STATUS_CRCERR (1<<18) /* CRC error */
-#define RX_STATUS_DR (1<<17) /* dribble nibble */
-#define RX_STATUS_RCV (1<<16) /* rx code violation */
-#define RX_STATUS_PTL (1<<15) /* pkt > 1518 bytes */
-#define RX_STATUS_PTS (1<<14) /* pkt < 64 bytes */
-#define RX_STATUS_LEN_MASK 0x1fff /* length mask */
-
-#define EEPROM_LENGTH 100
-
-
-/* Serial EEPROM interface */
-#define EE_STATUS 0xf0
-#define EE_CONTROL 0xf1
-#define EE_WORD_ADDR 0xf2
-#define EE_READ_DATA 0xf3
-#define EE_WRITE_DATA 0xf4
-#define EE_FEATURE_ENB 0xf5
-
-/* Use on EE_STATUS */
-#define EE_SEB (1<<8)
-#define EE_SEE 1
-
-/* Serial EEPROM commands */
-#define EE_CONTROL_SEQ_READB (1<<4)
-#define EE_CONTROL_RND_WRITEB (1<<5)
-#define EE_CONTROL_RND_READB ((1<<4)|(1<<5))
-
-/* Enable writing to serial EEPROM */
-#define EE_WRITE_ENB 1
-
-/* The 885 configuration register */
-#define MAC_CONFIG 0xa0
-#define MAC_CONFIG_SRST 1<<15
-#define MAC_CONFIG_ITXA 1<<13
-#define MAC_CONFIG_RXEN 1<<12
-#define MAC_CONFIG_INTLB 1<<10
-#define MAC_CONFIG_MODE_MASK (1<<8|1<<9)
-#define MAC_CONFIG_MODE_TP 1<<8
-#define MAC_CONFIG_HUGEN 1<<5
-#define MAC_CONFIG_RETRYL 1<<4
-#define MAC_CONFIG_CRCEN 1<<3
-#define MAC_CONFIG_PADEN 1<<2
-#define MAC_CONFIG_FULLD 1<<1
-#define MAC_CONFIG_NOCFR 1<<0
-
-
-
-
-
-#define TX_WAIT_SELECT 0x18
-#define RX_CHANNEL_CONTROL 0x40
-
-/* Tx channel status */
-#define TX_DBDMA_REG 0x00
-#define TX_CHANNEL_CONTROL 0x00
-#define TX_CHANNEL_STATUS 0x04
-#define TX_STATUS_RUN 1<<15
-#define TX_STATUS_PAUSE 1<<14
-#define TX_STATUS_WAKE 1<<12
-#define TX_STATUS_DEAD 1<<11
-#define TX_STATUS_ACTIVE 1<<10
-#define TX_STATUS_BT 1<<8
-#define TX_STATUS_TXABORT 1<<7
-#define TX_STATUS_TXSR 1<<6
-
-#define TX_CHANNEL_RUN TX_STATUS_RUN
-#define TX_CHANNEL_PAUSE TX_STATUS_PAUSE
-#define TX_CHANNEL_WAKE TX_STATUS_WAKE
-#define TX_CHANNEL_DEAD TX_STATUS_DEAD
-#define TX_CHANNEL_ACTIVE TX_STATUS_ACTIVE
-#define TX_CHANNEL_BT TX_STATUS_BT
-#define TX_CHANNEL_TXABORT TX_STATUS_TXABORT
-#define TX_CHANNEL_TXSR TX_STATUS_TXSR
-
-#define TX_DBDMA_ENABLE (TX_CHANNEL_WAKE | TX_CHANNEL_PAUSE | \
- TX_CHANNEL_RUN )
-
-/* Transmit command ptr lo register */
-#define TX_CMD_PTR_LO 0x0c
-
-/* Transmit interrupt select register */
-#define TX_INT_SELECT 0x10
-
-/* Transmit branch select register */
-#define TX_BRANCH_SELECT 0x14
-
-/* Transmit wait select register */
-#define TX_WAIT_SELECT 0x18
-#define TX_WAIT_STAT_RECV 0x40
-
-/* Rx channel status */
-#define RX_DBDMA_REG 0x40
-#define RX_CHANNEL_CONTROL 0x40
-#define RX_CHANNEL_STATUS 0x44
-#define RX_STATUS_RUN 1<<15
-#define RX_STATUS_PAUSE 1<<14
-#define RX_STATUS_WAKE 1<<12
-#define RX_STATUS_DEAD 1<<11
-#define RX_STATUS_ACTIVE 1<<10
-#define RX_STATUS_BT 1<<8
-#define RX_STATUS_EOP 1<<6
-
-#define RX_CHANNEL_RUN RX_STATUS_RUN
-#define RX_CHANNEL_PAUSE RX_STATUS_PAUSE
-#define RX_CHANNEL_WAKE RX_STATUS_WAKE
-#define RX_CHANNEL_DEAD RX_STATUS_DEAD
-#define RX_CHANNEL_ACTIVE RX_STATUS_ACTIVE
-#define RX_CHANNEL_BT RX_STATUS_BT
-#define RX_CHANNEL_EOP RX_STATUS_EOP
-
-#define RX_DBDMA_ENABLE (RX_CHANNEL_WAKE | RX_CHANNEL_PAUSE | \
- RX_CHANNEL_RUN)
-
-/* Receive command ptr lo */
-#define RX_CMD_PTR_LO 0x4c
-
-/* Receive interrupt select register */
-#define RX_INT_SELECT 0x50
-#define RX_INT_SELECT_EOP 0x40
-
-/* Receive branch select */
-#define RX_BRANCH_SELECT 0x54
-#define RX_BRANCH_SELECT_EOP 0x40
-
-/* Receive wait select */
-#define RX_WAIT_SELECT 0x58
-#define RX_WAIT_SELECT_EOP 0x40
-
-/* Event status register */
-#define EVENT_STATUS 0x80
-#define EVENT_TXSR 1<<2
-#define EVENT_EOP 1<<1
-#define EVENT_TXABORT 1<<0
-
-/* Interrupt enable register */
-#define INTERRUPT_ENABLE 0x82
-
-/* Interrupt clear register */
-#define INTERRUPT_CLEAR 0x84
-
-/* Interrupt status register */
-#define INTERRUPT_STATUS_REG 0x86
-
-/* bits for the above three interrupt registers */
-#define INTERRUPT_INTE 1<<15 /* interrupt enable */
-#define INTERRUPT_WI 1<<9 /* wakeup interrupt */
-#define INTERRUPT_ERI 1<<8 /* early receive interrupt */
-#define INTERRUPT_PPET 1<<7 /* PCI Tx parity error */
-#define INTERRUPT_PBFT 1<<6 /* PCI Tx bus fault */
-#define INTERRUPT_IIDT 1<<5 /* illegal instruction Tx */
-#define INTERRUPT_DIT 1<<4 /* DBDMA Tx interrupt */
-#define INTERRUPT_PPER 1<<3 /* PCI Rx parity error */
-#define INTERRUPT_PBFR 1<<2 /* PCI Rx bus fault */
-#define INTERRUPT_IIDR 1<<1 /* illegal instruction Rx */
-#define INTERRUPT_DIR 1<<0 /* DBDMA Rx interrupt */
-
-#define INTERRUPT_TX_MASK (INTERRUPT_PBFT|INTERRUPT_IIDT| \
- INTERRUPT_PPET|INTERRUPT_DIT)
-#define INTERRUPT_RX_MASK (INTERRUPT_PBFR|INTERRUPT_IIDR| \
- INTERRUPT_PPER|INTERRUPT_DIR)
-
-/* chip revision register */
-#define CHIP_REVISION_REG 0x8c
-#define CHIP_PCIREV_MASK (0xf<<16)
-#define CHIP_PCIDEV_MASK 0xff
-
-/* Tx threshold register */
-#define TX_THRESHOLD 0x94
-
-/* General purpose register */
-#define GEN_PURPOSE_REG 0x9e
-
-/* General purpose pin control reg */
-#define GEN_PIN_CONTROL_REG 0x9f
-
-/* DBDMA control register */
-#define DBDMA_CONTROL 0x90
-#define DBDMA_SRST 1<<31
-#define DBDMA_TDPCE 1<<23
-#define DBDMA_BE 1<<22
-#define DBDMA_TAP_MASK (1<<19|1<<20|1<<21)
-#define DBDMA_RAP_MASK (1<<16|1<<17|1<<18)
-#define DBDMA_DPMRLE 1<<15
-#define DBDMA_WIE 1<<14
-#define DBDMA_MP 1<<13
-#define DBDMA_SME 1<<12
-#define DBDMA_CME 1<<11
-#define DBDMA_DDPE 1<<10
-#define DBDMA_TDPE 1<<9
-#define DBDMA_EXTE 1<<8
-#define DBDMA_BST_MASK (1<<4|1<<5|1<<6)
-#define DBDMA_BSR_MASK (1<<0|1<<1|1<<2)
-
-#define DBDMA_BURST_1 (0x00)
-#define DBDMA_BURST_2 (0x01)
-#define DBDMA_BURST_4 (0x02)
-#define DBDMA_BURST_8 (0x03)
-#define DBDMA_BURST_16 (0x04)
-#define DBDMA_BURST_32 (0x05)
-#define DBDMA_BURST_64 (0x06)
-#define DBDMA_BURST_128 (0x07)
-
-#define DBDMA_TX_BST_SHIFT (4)
-#define DBDMA_RX_BST_SHIFT (0)
-
-#define DBDMA_TX_ARBITRATION_DEFAULT ( 1 << 19 )
-#define DBDMA_RX_ARBITRATION_DEFAULT ( 2 << 16 )
-
-
-/* Back-to-back interpacket gap register */
-#define BTOB_INTP_GAP 0xa2
-#define BTOB_INTP_DEFAULT 0x18
-
-/* Non-back-to-back interpacket gap register */
-#define NBTOB_INTP_GAP 0xa4
-
-/* MIIM command register */
-#define MIIM_COMMAND 0xa6
-#define MIIM_SCAN 1<<1
-#define MIIM_RSTAT 1<<0
-
-/* MII address register */
-#define MII_ADDRESS 0xa8
-#define MII_FIAD_MASK (1<<8|1<<9|1<<10|1<<11|1<<12)
-#define MII_RGAD_MASK (1<<0|1<<1|1<<2|1<<3|1<<4)
-
-#define TPPMD_CONTROL_REG 0xa8
-#define TPPMD_FO 1<<1
-#define TPPMD_LB 1<<0
-
-/* MII read and write registers */
-#define MII_WRITE_DATA 0xaa
-#define MII_READ_DATA 0xac
-
-/* MII indicators */
-#define MII_INDICATOR 0xae
-#define MII_NVALID 1<<2
-#define MII_SCAN 1<<1
-#define MII_BUSY 1<<0
-
-/* Address filter */
-#define ADDRESS_FILTER 0xd0
-#define ADDRESS_RPPRM 1<<3 /* multicast promis. mode */
-#define ADDRESS_RPPRO 1<<2 /* promiscuous mode */
-#define ADDRESS_RPAMC 1<<1 /* accept multicasts */
-#define ADDRESS_RPABC 1<<0 /* accept broadcasts */
-
-/* Station addresses
-
- Note that if the serial EEPROM is disabled, these values are all
- zero. If, like us, you get the chips when they're fresh, they're
- also zero and you have to initialize the address */
-#define STATION_ADDRESS_0 0xd2
-#define STATION_ADDRESS_1 0xd4
-#define STATION_ADDRESS_2 0xd6
-
-/* Hash tables */
-#define HASH_TABLE_0 0xd8
-#define HASH_TABLE_1 0xda
-#define HASH_TABLE_2 0xdc
-#define HASH_TABLE_3 0xde
-
-/* PHY indentifiers */
-#define PHY_IDENTIFIER_0 0xe4
-#define PHY_IDENTIFIER_1 0xe6
-
-/* MII Auto-negotiation register definitions */
-
-#define MII_AUTO_NEGOTIATION_CONTROL (0x0000)
-#define MANC_PHY_RESET (0x8000)
-#define MANC_PHY_LOOPBACK_ENABLE (0x4000)
-#define MANC_PHY_LOOPBACK_DISABLE (0x0000)
-#define MANC_PHY_SPEED_100 (0x2000)
-#define MANC_PHY_SPEED_10 (0x0000)
-#define MANC_AUTO_NEGOTIATION_ENABLE (0x1000)
-#define MANC_AUTO_NEGOTIATION_DISABLE (0x0000)
-#define MANC_PHY_POWER_DOWN (0x0800)
-#define MANC_PHY_POWER_UP (0x0000)
-#define MANC_ISOLATE_ENABLE (0x0400)
-#define MANC_ISOLATE_DISABLE (0x0000)
-#define MANC_RESTART_AUTO_NEGOTIATION (0x0200)
-#define MANC_FULL_DUPLEX (0x0100)
-#define MANC_HALF_DUPLEX (0x0000)
-
-#define MII_AUTO_NEGOTIATION_STATUS (0x0001)
-#define MANS_100BASE_T4_HALF_DUPLEX (0x8000)
-#define MANS_100BASE_X_FULL_DUPLEX (0x4000)
-#define MANS_100BASE_X_HALF_DUPLEX (0x2000)
-#define MANS_10MBS_FULL_DUPLEX (0x1000)
-#define MANS_10MBS_HALF_DUPLEX (0x0800)
-#define MANS_AUTO_NEGOTIATION_COMPLETE (0x0020)
-#define MANS_REMOTE_FAULT (0x0010)
-#define MANS_AUTO_NEGOTIATION_ABILITY (0x0008)
-#define MANS_LINK_STATUS (0x0004)
-#define MANS_JABBER_DETECT (0x0002)
-#define MANS_EXTENDED_CAPABILITY (0x0001)
-
-#define MII_PHY_IDENTIFIER_1 (0x0002)
-#define MII_PHY_IDENTIFIER_2 (0x0003)
-
-#define MII_AUTO_NEGOTIATION_ADVERTISEMENT (0x0004)
-#define MANA_NEXT_PAGE (0x8000)
-#define MANA_REMOTE_FAULT (0x2000)
-#define MANA_TECHNOLOGY_ABILITY_MASK (0x1FE0)
-#define MANATECH_10BASET_HALF_DUPLEX (0x0020)
-#define MANATECH_10BASET_FULL_DUPLEX (0x0040)
-#define MANATECH_100BASETX_HALF_DUPLEX (0x0080)
-#define MANATECH_100BASETX_FULL_DUPLEX (0x0100)
-#define MANATECH_100BASET4 (0x0200)
-#define MANA_SELECTOR_MASK (0x001F)
-#define MANASELECTOR_802_3 (0x0001)
-
-#define MII_AUTO_NEGOTIATION_LINK_PARTNER (0x0005)
-#define MANLP_NEXT_PAGE (0x8000)
-#define MANLP_ACKNOWLEDGE (0x4000)
-#define MANLP_REMOTE_FAULT (0x2000)
-#define MANLP_TECHNOLOGY_ABILITY_MASK (0x1FE0)
-#define MANLP_SELECTOR_MASK (0x001F)
-
-#define MII_AUTO_NEGOTIATION_EXPANSION (0x0006)
-#define MANE_PARALLEL_DETECTION_FAULT (0x0010)
-#define MANE_LINK_PARTNER_NEXT_PAGE_ABLE (0x0008)
-#define MANE_NEXT_PAGE_ABLE (0x0004)
-#define MANE_PAGE_RECEIVED (0x0002)
-#define MANE_LINK_PARTNER_AUTO_ABLE (0x0001)
-
-#define MII_AUTO_NEGOTIATION_NEXT_PAGE_TRANSMIT (0x0007)
-#define MANNPT_NEXT_PAGE (0x8000)
-#define MANNPT_MESSAGE_PAGE (0x2000)
-#define MANNPT_ACKNOWLEDGE_2 (0x1000)
-#define MANNPT_TOGGLE (0x0800)
-#define MANNPT_MESSAGE_FIELD_MASK (0x07FF)
-
-#endif
#include <linux/compiler.h>
//#include <linux/skbrefill.h>
+#include <asm/io.h>
+
/* Dprintk is used for more interesting debug events */
#undef Dprintk
#define Dprintk dprintk
#ifdef ZEROCOPY
printk(KERN_INFO "%s: scatter-gather and hardware TCP cksumming enabled.\n",
- dev->name,
+ dev->name);
#else /* not ZEROCOPY */
printk(KERN_INFO "%s: scatter-gather and hardware TCP cksumming disabled.\n",
dev->name);
-/* $Id: sungem.c,v 1.19 2001/08/13 14:40:07 davem Exp $
+/* $Id: sungem.c,v 1.20 2001/09/19 00:04:32 davem Exp $
* sungem.c: Sun GEM ethernet driver.
*
* Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com)
if (tulip_debug > 3)
printk(KERN_DEBUG "%s: tulip_mwi_config()\n", pdev->slot_name);
- tp->csr0 = 0;
+ tp->csr0 = csr0 = 0;
/* check for sane cache line size. from acenic.c. */
pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache);
* Various cleanups
* Update yellowfin_timer to correctly calculate duplex.
(suggested by Manfred Spraul)
+
+ LK1.1.4 (val@nmt.edu):
+ * Fix three endian-ness bugs
+ * Support dual function SYM53C885E ethernet chip
*/
PCI_IOTYPE, YELLOWFIN_SIZE,
FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug},
{"Symbios SYM83C885", { 0x07011000, 0xffffffff},
- PCI_IOTYPE, YELLOWFIN_SIZE, HasMII },
+ PCI_IOTYPE, YELLOWFIN_SIZE, HasMII | IsGigabit | FullTxStatus },
{0,},
};
};
struct tx_status_words {
-#if defined(__powerpc__)
+#ifdef __BIG_ENDIAN
u16 tx_errs;
u16 tx_cnt;
u16 paused;
u16 tx_errs;
u16 total_tx_cnt;
u16 paused;
-#endif
+#endif /* __BIG_ENDIAN */
};
/* Bits in yellowfin_desc.cmd */
break;
skb->dev = dev; /* Mark as being used by this device. */
skb_reserve(skb, 2); /* 16 byte align the IP header. */
- yp->rx_ring[i].addr = pci_map_single(yp->pci_dev, skb->tail,
- yp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
+ skb->tail, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
}
yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
buf_addr = rx_skb->tail;
data_size = (le32_to_cpu(desc->dbdma_cmd) -
le32_to_cpu(desc->result_status)) & 0xffff;
- frame_status = get_unaligned((s16*)&(buf_addr[data_size - 2]));
+ frame_status = le16_to_cpu(get_unaligned((s16*)&(buf_addr[data_size - 2])));
if (yellowfin_debug > 4)
printk(KERN_DEBUG " yellowfin_rx() status was %4.4x.\n",
frame_status);
yp->rx_skbuff[entry] = skb;
skb->dev = dev; /* Mark as being used by this device. */
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
- yp->rx_ring[entry].addr = pci_map_single(yp->pci_dev,
- skb->tail, yp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
+ skb->tail, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
}
yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
yp->rx_ring[entry].result_status = 0; /* Clear complete bit. */
#include <linux/poll.h>
#include <linux/sound.h>
#include <linux/soundcard.h>
+#include <linux/slab.h>
#include <asm/uaccess.h>
+#include <asm/io.h>
/* mmio access */
hub->descriptor = kmalloc(sizeof(*hub->descriptor), GFP_KERNEL);
if (!hub->descriptor) {
- err("Unable to kmalloc %d bytes for hub descriptor", sizeof(*hub->descriptor));
+ err("Unable to kmalloc %Zd bytes for hub descriptor", sizeof(*hub->descriptor));
return -1;
}
-/* $Id: bwtwofb.c,v 1.14 2001/02/13 01:17:14 davem Exp $
+/* $Id: bwtwofb.c,v 1.15 2001/09/19 00:04:33 davem Exp $
* bwtwofb.c: BWtwo frame buffer driver
*
* Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
-/* $Id: cgfourteenfb.c,v 1.10 2001/07/27 09:44:00 davem Exp $
+/* $Id: cgfourteenfb.c,v 1.11 2001/09/19 00:04:33 davem Exp $
* cgfourteenfb.c: CGfourteen frame buffer driver
*
* Copyright (C) 1996,1998 Jakub Jelinek (jj@ultra.linux.cz)
-/* $Id: cgsixfb.c,v 1.24 2001/02/13 01:17:14 davem Exp $
+/* $Id: cgsixfb.c,v 1.25 2001/09/19 00:04:33 davem Exp $
* cgsixfb.c: CGsix (GX,GXplus) frame buffer driver
*
* Copyright (C) 1996,1998 Jakub Jelinek (jj@ultra.linux.cz)
-/* $Id: cgthreefb.c,v 1.10 2001/02/13 01:17:14 davem Exp $
+/* $Id: cgthreefb.c,v 1.11 2001/09/19 00:04:33 davem Exp $
* cgthreefb.c: CGthree frame buffer driver
*
* Copyright (C) 1996,1998 Jakub Jelinek (jj@ultra.linux.cz)
-/* $Id: creatorfb.c,v 1.35 2001/06/08 21:48:37 davem Exp $
+/* $Id: creatorfb.c,v 1.36 2001/09/19 00:04:33 davem Exp $
* creatorfb.c: Creator/Creator3D frame buffer driver
*
* Copyright (C) 1997,1998,1999 Jakub Jelinek (jj@ultra.linux.cz)
-/* $Id: leofb.c,v 1.12 2001/02/13 01:17:15 davem Exp $
+/* $Id: leofb.c,v 1.13 2001/09/19 00:04:33 davem Exp $
* leofb.c: Leo (ZX) 24/8bit frame buffer driver
*
* Copyright (C) 1996-1999 Jakub Jelinek (jj@ultra.linux.cz)
-/* $Id: tcxfb.c,v 1.12 2001/02/13 01:17:15 davem Exp $
+/* $Id: tcxfb.c,v 1.13 2001/09/19 00:04:33 davem Exp $
* tcxfb.c: TCX 24/8bit frame buffer driver
*
* Copyright (C) 1996,1998 Jakub Jelinek (jj@ultra.linux.cz)
#include <linux/iobuf.h>
#include <linux/highmem.h>
#include <linux/blkdev.h>
+#include <linux/module.h>
#include <asm/uaccess.h>
return __block_fsync(inode);
}
+/*
+ * pseudo-fs
+ */
+
+static struct super_block *bd_read_super(struct super_block *sb, void *data, int silent)
+{
+ static struct super_operations sops = {};
+ struct inode *root = new_inode(sb);
+ if (!root)
+ return NULL;
+ root->i_mode = S_IFDIR | S_IRUSR | S_IWUSR;
+ root->i_uid = root->i_gid = 0;
+ root->i_atime = root->i_mtime = root->i_ctime = CURRENT_TIME;
+ sb->s_blocksize = 1024;
+ sb->s_blocksize_bits = 10;
+ sb->s_magic = 0x62646576;
+ sb->s_op = &sops;
+ sb->s_root = d_alloc(NULL, &(const struct qstr) { "bdev:", 5, 0 });
+ if (!sb->s_root) {
+ iput(root);
+ return NULL;
+ }
+ sb->s_root->d_sb = sb;
+ sb->s_root->d_parent = sb->s_root;
+ d_instantiate(sb->s_root, root);
+ return sb;
+}
+
+static DECLARE_FSTYPE(bd_type, "bdev", bd_read_super, FS_NOMOUNT);
+
+static struct vfsmount *bd_mnt;
+
+static int get_inode(struct block_device *bdev)
+{
+ if (!bdev->bd_inode) {
+ struct inode *inode = new_inode(bd_mnt->mnt_sb);
+ if (!inode)
+ return -ENOMEM;
+ inode->i_rdev = to_kdev_t(bdev->bd_dev);
+ atomic_inc(&bdev->bd_count); /* will go away */
+ inode->i_bdev = bdev;
+ inode->i_data.a_ops = &def_blk_aops;
+ bdev->bd_inode = inode;
+ }
+ return 0;
+}
+
/*
* bdev cache handling - shamelessly stolen from inode.c
* We use smaller hashtable, though.
void __init bdev_cache_init(void)
{
- int i;
+ int i, err;
struct list_head *head = bdev_hashtable;
i = HASH_SIZE;
NULL);
if (!bdev_cachep)
panic("Cannot create bdev_cache SLAB cache");
+ err = register_filesystem(&bd_type);
+ if (err)
+ panic("Cannot register bdev pseudo-fs");
+ bd_mnt = kern_mount(&bd_type);
+ err = PTR_ERR(bd_mnt);
+ if (IS_ERR(bd_mnt))
+ panic("Cannot create bdev pseudo-fs");
}
/*
int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg)
{
- struct inode inode_fake;
int res;
mm_segment_t old_fs = get_fs();
if (!bdev->bd_op->ioctl)
return -EINVAL;
- memset(&inode_fake, 0, sizeof(inode_fake));
- inode_fake.i_rdev = to_kdev_t(bdev->bd_dev);
- inode_fake.i_bdev = bdev;
- init_waitqueue_head(&inode_fake.i_wait);
set_fs(KERNEL_DS);
- res = bdev->bd_op->ioctl(&inode_fake, NULL, cmd, arg);
+ res = bdev->bd_op->ioctl(bdev->bd_inode, NULL, cmd, arg);
set_fs(old_fs);
return res;
}
int ret = -ENODEV;
kdev_t rdev = to_kdev_t(bdev->bd_dev); /* this should become bdev */
down(&bdev->bd_sem);
+
+ if (get_inode(bdev)) {
+ up(&bdev->bd_sem);
+ return -ENOMEM;
+ }
+
lock_kernel();
if (!bdev->bd_op)
bdev->bd_op = get_blkfops(MAJOR(rdev));
*/
struct file fake_file = {};
struct dentry fake_dentry = {};
- struct inode *fake_inode = get_empty_inode();
ret = -ENOMEM;
- if (fake_inode) {
- fake_file.f_mode = mode;
- fake_file.f_flags = flags;
- fake_file.f_dentry = &fake_dentry;
- fake_dentry.d_inode = fake_inode;
- fake_inode->i_rdev = rdev;
- ret = 0;
- if (bdev->bd_op->open)
- ret = bdev->bd_op->open(fake_inode, &fake_file);
- if (!ret) {
- bdev->bd_openers++;
- atomic_inc(&bdev->bd_count);
- } else if (!bdev->bd_openers)
- bdev->bd_op = NULL;
- iput(fake_inode);
+ fake_file.f_mode = mode;
+ fake_file.f_flags = flags;
+ fake_file.f_dentry = &fake_dentry;
+ fake_dentry.d_inode = bdev->bd_inode;
+ ret = 0;
+ if (bdev->bd_op->open)
+ ret = bdev->bd_op->open(bdev->bd_inode, &fake_file);
+ if (!ret) {
+ bdev->bd_openers++;
+ atomic_inc(&bdev->bd_count);
+ } else if (!bdev->bd_openers) {
+ struct inode *bd_inode = bdev->bd_inode;
+ bdev->bd_op = NULL;
+ bdev->bd_inode = NULL;
+ iput(bd_inode);
}
}
unlock_kernel();
filp->f_flags |= O_LARGEFILE;
down(&bdev->bd_sem);
+
+ if (get_inode(bdev)) {
+ up(&bdev->bd_sem);
+ return -ENOMEM;
+ }
+
lock_kernel();
if (!bdev->bd_op)
bdev->bd_op = get_blkfops(MAJOR(inode->i_rdev));
ret = bdev->bd_op->open(inode,filp);
if (!ret) {
bdev->bd_openers++;
- if (!bdev->bd_cache_openers && bdev->bd_inode)
- BUG();
- if (bdev->bd_cache_openers && !bdev->bd_inode)
- BUG();
- if (!bdev->bd_cache_openers++)
- bdev->bd_inode = inode;
- else {
- if (bdev->bd_inode != inode && !inode->i_mapping_overload++) {
- inode->i_mapping = bdev->bd_inode->i_mapping;
- atomic_inc(&bdev->bd_inode->i_count);
- }
- }
- } else if (!bdev->bd_openers)
+ bdev->bd_cache_openers++;
+ inode->i_mapping = bdev->bd_inode->i_mapping;
+ inode->i_mapping_overload++;
+ } else if (!bdev->bd_openers) {
+ struct inode *bd_inode = bdev->bd_inode;
bdev->bd_op = NULL;
+ bdev->bd_inode = NULL;
+ iput(bd_inode);
+ }
}
unlock_kernel();
up(&bdev->bd_sem);
{
int ret = 0;
kdev_t rdev = to_kdev_t(bdev->bd_dev); /* this should become bdev */
+ struct inode *bd_inode = bdev->bd_inode;
+
down(&bdev->bd_sem);
lock_kernel();
if (kind == BDEV_FILE)
- fsync_dev(rdev);
+ __block_fsync(bd_inode);
else if (kind == BDEV_FS)
fsync_no_super(rdev);
/* only filesystems uses buffer cache for the metadata these days */
if (kind == BDEV_FS)
invalidate_buffers(rdev);
- if (bdev->bd_op->release) {
- struct inode * fake_inode = get_empty_inode();
- ret = -ENOMEM;
- if (fake_inode) {
- fake_inode->i_rdev = rdev;
- ret = bdev->bd_op->release(fake_inode, NULL);
- iput(fake_inode);
- } else
- printk(KERN_WARNING "blkdev_put: ->release couldn't be run due -ENOMEM\n");
+ if (bdev->bd_op->release)
+ ret = bdev->bd_op->release(bd_inode, NULL);
+ if (!--bdev->bd_openers) {
+ bdev->bd_op = NULL;
+ bdev->bd_inode = NULL;
+ iput(bd_inode);
}
- if (!--bdev->bd_openers)
- bdev->bd_op = NULL; /* we can't rely on driver being */
- /* kind to stay around. */
unlock_kernel();
up(&bdev->bd_sem);
bdput(bdev);
int ret = 0;
struct inode * bd_inode = bdev->bd_inode;
- if (bd_inode->i_mapping != inode->i_mapping)
- BUG();
down(&bdev->bd_sem);
lock_kernel();
/* cache coherency protocol */
struct super_block * sb;
/* flush the pagecache to disk */
- __block_fsync(inode);
+ __block_fsync(bd_inode);
/* drop the pagecache, uptodate info is on disk by now */
truncate_inode_pages(inode->i_mapping, 0);
- /* forget the bdev pagecache address space */
- bdev->bd_inode = NULL;
/* if the fs was mounted ro just throw away most of its caches */
sb = get_super(inode->i_rdev);
drop_super(sb);
}
}
- if (inode != bd_inode && !--inode->i_mapping_overload) {
+ if (!--inode->i_mapping_overload)
inode->i_mapping = &inode->i_data;
- iput(bd_inode);
- }
/* release the device driver */
if (bdev->bd_op->release)
ret = bdev->bd_op->release(inode, NULL);
- if (!--bdev->bd_openers)
+ if (!--bdev->bd_openers) {
bdev->bd_op = NULL;
+ bdev->bd_inode = NULL;
+ iput(bd_inode);
+ }
unlock_kernel();
up(&bdev->bd_sem);
schedule();
}
-/*
- * We used to try various strange things. Let's not.
- * We'll just try to balance dirty buffers, and possibly
- * launder some pages and do our best to make more memory
- * available.
- */
-static void refill_freelist(int size)
-{
- if (!grow_buffers(size))
- free_more_memory();
-}
-
void init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
{
bh->b_list = BUF_CLEAN;
*/
write_unlock(&hash_table_lock);
spin_unlock(&lru_list_lock);
- refill_freelist(size);
+
+ if (!grow_buffers(size))
+ free_more_memory();
+
/* FIXME: getblk should fail if there's no enough memory */
goto repeat;
}
inode->i_cdev = cdget(rdev);
} else if (S_ISBLK(mode)) {
inode->i_fop = &def_blk_fops;
- inode->i_mapping->a_ops = &def_blk_aops;
inode->i_rdev = to_kdev_t(rdev);
inode->i_bdev = bdget(rdev);
} else if (S_ISFIFO(mode))
struct file_lock *fl = &lock->fl;
s32 start, len, end;
- if (!(p = xdr_decode_string(p, &lock->caller, &len, NLM_MAXSTRLEN))
+ if (!(p = xdr_decode_string_inplace(p, &lock->caller,
+ &lock->len,
+ NLM_MAXSTRLEN))
|| !(p = nlm_decode_fh(p, &lock->fh))
|| !(p = nlm_decode_oh(p, &lock->oh)))
return NULL;
nlmsvc_decode_shareargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp)
{
struct nlm_lock *lock = &argp->lock;
- int len;
memset(lock, 0, sizeof(*lock));
locks_init_lock(&lock->fl);
lock->fl.fl_pid = ~(u32) 0;
if (!(p = nlm_decode_cookie(p, &argp->cookie))
- || !(p = xdr_decode_string(p, &lock->caller, &len, NLM_MAXSTRLEN))
+ || !(p = xdr_decode_string_inplace(p, &lock->caller,
+ &lock->len, NLM_MAXSTRLEN))
|| !(p = nlm_decode_fh(p, &lock->fh))
|| !(p = nlm_decode_oh(p, &lock->oh)))
return 0;
nlmsvc_decode_notify(struct svc_rqst *rqstp, u32 *p, struct nlm_args *argp)
{
struct nlm_lock *lock = &argp->lock;
- int len;
- if (!(p = xdr_decode_string(p, &lock->caller, &len, NLM_MAXSTRLEN)))
+ if (!(p = xdr_decode_string_inplace(p, &lock->caller,
+ &lock->len, NLM_MAXSTRLEN)))
return 0;
argp->state = ntohl(*p++);
return xdr_argsize_check(rqstp, p);
int
nlmsvc_decode_reboot(struct svc_rqst *rqstp, u32 *p, struct nlm_reboot *argp)
{
- if (!(p = xdr_decode_string(p, &argp->mon, &argp->len, SM_MAXSTRLEN)))
+ if (!(p = xdr_decode_string_inplace(p, &argp->mon, &argp->len, SM_MAXSTRLEN)))
return 0;
argp->state = ntohl(*p++);
argp->addr = ntohl(*p++);
{
struct file_lock *fl = &lock->fl;
__s64 len, start, end;
- int tmp;
- if (!(p = xdr_decode_string(p, &lock->caller, &tmp, NLM_MAXSTRLEN))
+ if (!(p = xdr_decode_string_inplace(p, &lock->caller,
+ &lock->len, NLM_MAXSTRLEN))
|| !(p = nlm4_decode_fh(p, &lock->fh))
|| !(p = nlm4_decode_oh(p, &lock->oh)))
return NULL;
nlm4svc_decode_shareargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp)
{
struct nlm_lock *lock = &argp->lock;
- int len;
memset(lock, 0, sizeof(*lock));
locks_init_lock(&lock->fl);
lock->fl.fl_pid = ~(u32) 0;
if (!(p = nlm4_decode_cookie(p, &argp->cookie))
- || !(p = xdr_decode_string(p, &lock->caller, &len, NLM_MAXSTRLEN))
+ || !(p = xdr_decode_string_inplace(p, &lock->caller,
+ &lock->len, NLM_MAXSTRLEN))
|| !(p = nlm4_decode_fh(p, &lock->fh))
|| !(p = nlm4_decode_oh(p, &lock->oh)))
return 0;
nlm4svc_decode_notify(struct svc_rqst *rqstp, u32 *p, struct nlm_args *argp)
{
struct nlm_lock *lock = &argp->lock;
- int len;
- if (!(p = xdr_decode_string(p, &lock->caller, &len, NLM_MAXSTRLEN)))
+ if (!(p = xdr_decode_string_inplace(p, &lock->caller,
+ &lock->len, NLM_MAXSTRLEN)))
return 0;
argp->state = ntohl(*p++);
return xdr_argsize_check(rqstp, p);
int
nlm4svc_decode_reboot(struct svc_rqst *rqstp, u32 *p, struct nlm_reboot *argp)
{
- if (!(p = xdr_decode_string(p, &argp->mon, &argp->len, SM_MAXSTRLEN)))
+ if (!(p = xdr_decode_string_inplace(p, &argp->mon, &argp->len, SM_MAXSTRLEN)))
return 0;
argp->state = ntohl(*p++);
argp->addr = ntohl(*p++);
{
int nfserr;
- dprintk("nfsd: LOOKUP(3) %s %s\n",
+ dprintk("nfsd: LOOKUP(3) %s %.*s\n",
SVCFH_fmt(&argp->fh),
+ argp->len,
argp->name);
fh_copy(&resp->dirfh, &argp->fh);
struct iattr *attr;
u32 nfserr;
- dprintk("nfsd: CREATE(3) %s %s\n",
+ dprintk("nfsd: CREATE(3) %s %.*s\n",
SVCFH_fmt(&argp->fh),
+ argp->len,
argp->name);
dirfhp = fh_copy(&resp->dirfh, &argp->fh);
{
int nfserr;
- dprintk("nfsd: MKDIR(3) %s %s\n",
+ dprintk("nfsd: MKDIR(3) %s %.*s\n",
SVCFH_fmt(&argp->fh),
+ argp->len,
argp->name);
argp->attrs.ia_valid &= ~ATTR_SIZE;
{
int nfserr;
- dprintk("nfsd: SYMLINK(3) %s %s -> %s\n",
+ dprintk("nfsd: SYMLINK(3) %s %.*s -> %.*s\n",
SVCFH_fmt(&argp->ffh),
- argp->fname, argp->tname);
+ argp->flen, argp->fname,
+ argp->tlen, argp->tname);
fh_copy(&resp->dirfh, &argp->ffh);
fh_init(&resp->fh, NFS3_FHSIZE);
int nfserr, type;
dev_t rdev = 0;
- dprintk("nfsd: MKNOD(3) %s %s\n",
+ dprintk("nfsd: MKNOD(3) %s %.*s\n",
SVCFH_fmt(&argp->fh),
+ argp->len,
argp->name);
fh_copy(&resp->dirfh, &argp->fh);
{
int nfserr;
- dprintk("nfsd: REMOVE(3) %s %s\n",
+ dprintk("nfsd: REMOVE(3) %s %.*s\n",
SVCFH_fmt(&argp->fh),
+ argp->len,
argp->name);
/* Unlink. -S_IFDIR means file must not be a directory */
{
int nfserr;
- dprintk("nfsd: RMDIR(3) %s %s\n",
+ dprintk("nfsd: RMDIR(3) %s %.*s\n",
SVCFH_fmt(&argp->fh),
+ argp->len,
argp->name);
fh_copy(&resp->fh, &argp->fh);
{
int nfserr;
- dprintk("nfsd: RENAME(3) %s %s ->\n",
+ dprintk("nfsd: RENAME(3) %s %.*s ->\n",
SVCFH_fmt(&argp->ffh),
+ argp->flen,
argp->fname);
- dprintk("nfsd: -> %s %s\n",
+ dprintk("nfsd: -> %s %.*s\n",
SVCFH_fmt(&argp->tfh),
+ argp->tlen,
argp->tname);
fh_copy(&resp->ffh, &argp->ffh);
dprintk("nfsd: LINK(3) %s ->\n",
SVCFH_fmt(&argp->ffh));
- dprintk("nfsd: -> %s %s\n",
+ dprintk("nfsd: -> %s %.*s\n",
SVCFH_fmt(&argp->tfh),
+ argp->tlen,
argp->tname);
fh_copy(&resp->fh, &argp->ffh);
char *name;
int i;
- if ((p = xdr_decode_string(p, namp, lenp, NFS3_MAXNAMLEN)) != NULL) {
+ if ((p = xdr_decode_string_inplace(p, namp, lenp, NFS3_MAXNAMLEN)) != NULL) {
for (i = 0, name = *namp; i < *lenp; i++, name++) {
if (*name == '\0' || *name == '/')
return NULL;
}
- *name = '\0';
}
return p;
char *name;
int i;
- if ((p = xdr_decode_string(p, namp, lenp, NFS3_MAXPATHLEN)) != NULL) {
+ if ((p = xdr_decode_string_inplace(p, namp, lenp, NFS3_MAXPATHLEN)) != NULL) {
for (i = 0, name = *namp; i < *lenp; i++, name++) {
if (*name == '\0')
return NULL;
}
- *name = '\0';
}
return p;
struct nfsd_getdents_callback {
- struct qstr *name; /* name that was found. name->name already points to a buffer */
+ char *name; /* name that was found. It already points to a buffer NAME_MAX+1 is size */
unsigned long ino; /* the inum we are looking for */
int found; /* inode matched? */
int sequence; /* sequence counter */
loff_t pos, ino_t ino, unsigned int d_type)
{
struct nfsd_getdents_callback *buf = __buf;
- struct qstr *qs = buf->name;
- char *nbuf = (char*)qs->name; /* cast is to get rid of "const" */
int result = 0;
buf->sequence++;
dprintk("filldir_one: seq=%d, ino=%ld, name=%s\n", buf->sequence, ino, name);
#endif
if (buf->ino == ino) {
- qs->len = len;
- memcpy(nbuf, name, len);
- nbuf[len] = '\0';
+ memcpy(buf->name, name, len);
+ buf->name[len] = '\0';
buf->found = 1;
result = -1;
}
return result;
}
-/*
- * Read a directory and return the name of the specified entry.
- * i_sem is already down().
- * The whole thing is a total BS. It should not be done via readdir(), damnit!
- * Oh, well, as soon as it will be in filesystems...
+/**
+ * nfsd_get_name - default nfsd_operations->get_name function
+ * @dentry: the directory in which to find a name
+ * @name: a pointer to a %NAME_MAX+1 char buffer to store the name
+ * @child: the dentry for the child directory.
+ *
+ * calls readdir on the parent until it finds an entry with
+ * the same inode number as the child, and returns that.
*/
-static int get_ino_name(struct dentry *dentry, struct qstr *name, unsigned long ino)
+static int nfsd_get_name(struct dentry *dentry, char *name,
+ struct dentry *child)
{
struct inode *dir = dentry->d_inode;
int error;
goto out_close;
buffer.name = name;
- buffer.ino = ino;
+ buffer.ino = child->d_inode->i_ino;
buffer.found = 0;
buffer.sequence = 0;
while (1) {
int old_seq = buffer.sequence;
- error = file.f_op->readdir(&file, &buffer, filldir_one);
+
+ error = vfs_readdir(&file, filldir_one, &buffer);
+
if (error < 0)
break;
if (!(target->d_flags & DCACHE_NFSD_DISCONNECTED))
printk("nfsd: d_splice with non-DISCONNECTED target: %s/%s\n", parent->d_name.name, name->name);
#endif
- name->hash = full_name_hash(name->name, name->len);
tdentry = d_alloc(parent, name);
if (tdentry == NULL)
return -ENOMEM;
static struct dentry *splice(struct dentry *child, struct dentry *parent)
{
- int err = 0;
+ int err = 0, nerr;
struct qstr qs;
char namebuf[256];
struct list_head *lp;
- struct dentry *tmp;
/* child is an IS_ROOT (anonymous) dentry, but it is hypothesised that
* it should be a child of parent.
* We see if we can find a name and, if we can - splice it in.
- * We hold the i_sem on the parent the whole time to try to follow locking protocols.
+ * We lookup the name before locking (i_sem) the directory as namelookup
+ * also claims i_sem. If the name gets changed then we will loop around
+ * and try again in find_fh_dentry.
*/
- qs.name = namebuf;
+
+ nerr = nfsd_get_name(parent, namebuf, child);
+
+ /*
+ * We now claim the parent i_sem so that no-one else tries to create
+ * a dentry in the parent while we are.
+ */
+
down(&parent->d_inode->i_sem);
/* Now, things might have changed while we waited.
* to a lookup (though nobody does this yet). In this case, just succeed.
*/
if (child->d_parent == parent) goto out;
+
/* Possibly a new dentry has been made for this child->d_inode in
- * parent by a lookup. In this case return that dentry. caller must
+ * parent by a lookup. In this case return that dentry. Caller must
* notice and act accordingly
*/
spin_lock(&dcache_lock);
- for (lp = child->d_inode->i_dentry.next; lp != &child->d_inode->i_dentry ; lp=lp->next) {
- tmp = list_entry(lp,struct dentry, d_alias);
- if (tmp->d_parent == parent) {
+ list_for_each(lp, &child->d_inode->i_dentry) {
+ struct dentry *tmp = list_entry(lp,struct dentry, d_alias);
+ if (!list_empty(&tmp->d_hash) &&
+ tmp->d_parent == parent) {
child = dget_locked(tmp);
spin_unlock(&dcache_lock);
goto out;
}
}
spin_unlock(&dcache_lock);
- /* well, if we can find a name for child in parent, it should be safe to splice it in */
- err = get_ino_name(parent, &qs, child->d_inode->i_ino);
- if (err)
+
+ /* now we need that name. If there was an error getting it, now is th
+ * time to bail out.
+ */
+ if ((err = nerr))
goto out;
- tmp = d_lookup(parent, &qs);
- if (tmp) {
+ qs.name = namebuf;
+ qs.len = strlen(namebuf);
+ if (find_inode_number(parent, &qs) != 0) {
/* Now that IS odd. I wonder what it means... */
err = -EEXIST;
printk("nfsd-fh: found a name that I didn't expect: %s/%s\n", parent->d_name.name, qs.name);
- dput(tmp);
goto out;
}
err = d_splice(child, parent, &qs);
if (!error) {
error = nfsd_permission(exp, dentry, access);
}
-#ifdef NFSD_PARANOIA
+#ifdef NFSD_PARANOIA_EXTREME
if (error) {
printk("fh_verify: %s/%s permission failure, acc=%x, error=%d\n",
dentry->d_parent->d_name.name, dentry->d_name.name, access, (error >> 24));
#define NFSDDBG_FACILITY NFSDDBG_SVC
#define NFSD_BUFSIZE (1024 + NFSSVC_MAXBLKSIZE)
+/* these signals will be delivered to an nfsd thread
+ * when handling a request
+ */
#define ALLOWED_SIGS (sigmask(SIGKILL))
-#define SHUTDOWN_SIGS (sigmask(SIGKILL) | sigmask(SIGINT) | sigmask(SIGQUIT))
+/* these signals will be delivered to an nfsd thread
+ * when not handling a request. i.e. when waiting
+ */
+#define SHUTDOWN_SIGS (sigmask(SIGKILL) | sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT))
+/* if the last thread dies with SIGHUP, then the exports table is
+ * left unchanged ( like 2.4-{0-9} ). Any other signal will clear
+ * the exports table (like 2.2).
+ */
+#define SIG_NOCLEAN SIGHUP
extern struct svc_program nfsd_program;
static void nfsd(struct svc_rqst *rqstp);
struct nfsd_list *nl =
list_entry(victim,struct nfsd_list, list);
victim = victim->next;
- send_sig(SIGKILL, nl->task, 1);
+ send_sig(SIG_NOCLEAN, nl->task, 1);
nrservs++;
}
failure:
if (sigismember(¤t->pending.signal, signo) &&
!sigismember(¤t->blocked, signo))
break;
- printk(KERN_WARNING "nfsd: terminating on signal %d\n", signo);
+ err = signo;
}
/* Release lockd */
/* Check if this is last thread */
if (serv->sv_nrthreads==1) {
+
+ printk(KERN_WARNING "nfsd: last server has exited\n");
+ if (err != SIG_NOCLEAN) {
+ printk(KERN_WARNING "nfsd: unexporting all filesystems\n");
+ nfsd_export_shutdown();
+ }
nfsd_serv = NULL;
nfsd_racache_shutdown(); /* release read-ahead cache */
}
struct dentry *dentry;
int err;
- dprintk("nfsd: nfsd_lookup(fh %s, %*.*s)\n", SVCFH_fmt(fhp), len,len,name);
+ dprintk("nfsd: nfsd_lookup(fh %s, %.*s)\n", SVCFH_fmt(fhp), len,name);
/* Obtain dentry and export. */
err = fh_verify(rqstp, fhp, S_IFDIR, MAY_EXEC);
*/
int
nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
- char *fname, int len, struct svc_fh *tfhp)
+ char *name, int len, struct svc_fh *tfhp)
{
struct dentry *ddir, *dnew, *dold;
struct inode *dirp, *dest;
if (!len)
goto out;
err = nfserr_exist;
- if (isdotent(fname, len))
+ if (isdotent(name, len))
goto out;
fh_lock(ffhp);
ddir = ffhp->fh_dentry;
dirp = ddir->d_inode;
- dnew = lookup_one_len(fname, ddir, len);
+ dnew = lookup_one_len(name, ddir, len);
err = PTR_ERR(dnew);
if (IS_ERR(dnew))
goto out_nfserr;
/*
* For the /proc/<pid>/maps file, we use fixed length records, each containing
* a single line.
- */
-#define MAPS_LINE_LENGTH 4096
-#define MAPS_LINE_SHIFT 12
-/*
- * f_pos = (number of the vma in the task->mm->mmap list) * MAPS_LINE_LENGTH
+ *
+ * f_pos = (number of the vma in the task->mm->mmap list) * PAGE_SIZE
* + (index into the line)
*/
/* for systems with sizeof(void*) == 4: */
#define MAPS_LINE_FORMAT8 "%016lx-%016lx %s %016lx %s %lu"
#define MAPS_LINE_MAX8 73 /* sum of 16 1 16 1 4 1 16 1 5 1 10 1 */
-#define MAPS_LINE_MAX MAPS_LINE_MAX8
+#define MAPS_LINE_FORMAT (sizeof(void*) == 4 ? MAPS_LINE_FORMAT4 : MAPS_LINE_FORMAT8)
+#define MAPS_LINE_MAX (sizeof(void*) == 4 ? MAPS_LINE_MAX4 : MAPS_LINE_MAX8)
+static int proc_pid_maps_get_line (char *buf, struct vm_area_struct *map)
+{
+ /* produce the next line */
+ char *line;
+ char str[5];
+ int flags;
+ kdev_t dev;
+ unsigned long ino;
+ int len;
+
+ flags = map->vm_flags;
+
+ str[0] = flags & VM_READ ? 'r' : '-';
+ str[1] = flags & VM_WRITE ? 'w' : '-';
+ str[2] = flags & VM_EXEC ? 'x' : '-';
+ str[3] = flags & VM_MAYSHARE ? 's' : 'p';
+ str[4] = 0;
+
+ dev = 0;
+ ino = 0;
+ if (map->vm_file != NULL) {
+ dev = map->vm_file->f_dentry->d_inode->i_dev;
+ ino = map->vm_file->f_dentry->d_inode->i_ino;
+ line = d_path(map->vm_file->f_dentry,
+ map->vm_file->f_vfsmnt,
+ buf, PAGE_SIZE);
+ buf[PAGE_SIZE-1] = '\n';
+ line -= MAPS_LINE_MAX;
+ if(line < buf)
+ line = buf;
+ } else
+ line = buf;
+
+ len = sprintf(line,
+ MAPS_LINE_FORMAT,
+ map->vm_start, map->vm_end, str, map->vm_pgoff << PAGE_SHIFT,
+ kdevname(dev), ino);
+
+ if(map->vm_file) {
+ int i;
+ for(i = len; i < MAPS_LINE_MAX; i++)
+ line[i] = ' ';
+ len = buf + PAGE_SIZE - line;
+ memmove(buf, line, len);
+ } else
+ line[len++] = '\n';
+ return len;
+}
ssize_t proc_pid_read_maps (struct task_struct *task, struct file * file, char * buf,
size_t count, loff_t *ppos)
{
struct mm_struct *mm;
- struct vm_area_struct * map, * next;
- char * destptr = buf, * buffer;
- loff_t lineno;
- ssize_t column, i;
- int volatile_task;
+ struct vm_area_struct * map;
+ char *tmp, *kbuf;
long retval;
+ int off, lineno, loff;
+ /* reject calls with out of range parameters immediately */
+ retval = 0;
+ if (*ppos > LONG_MAX)
+ goto out;
+ if (count == 0)
+ goto out;
+ off = (long)*ppos;
/*
* We might sleep getting the page, so get it first.
*/
retval = -ENOMEM;
- buffer = (char*)__get_free_page(GFP_KERNEL);
- if (!buffer)
+ kbuf = (char*)__get_free_page(GFP_KERNEL);
+ if (!kbuf)
goto out;
- if (count == 0)
- goto getlen_out;
+ tmp = (char*)__get_free_page(GFP_KERNEL);
+ if (!tmp)
+ goto out_free1;
+
task_lock(task);
mm = task->mm;
if (mm)
atomic_inc(&mm->mm_users);
task_unlock(task);
+ retval = 0;
if (!mm)
- goto getlen_out;
-
- /* Check whether the mmaps could change if we sleep */
- volatile_task = (task != current || atomic_read(&mm->mm_users) > 2);
-
- /* decode f_pos */
- lineno = *ppos >> MAPS_LINE_SHIFT;
- column = *ppos & (MAPS_LINE_LENGTH-1);
+ goto out_free2;
- /* quickly go to line lineno */
down_read(&mm->mmap_sem);
- for (map = mm->mmap, i = 0; map && (i < lineno); map = map->vm_next, i++)
- continue;
-
- for ( ; map ; map = next ) {
- /* produce the next line */
- char *line;
- char str[5], *cp = str;
- int flags;
- kdev_t dev;
- unsigned long ino;
- int maxlen = (sizeof(void*) == 4) ?
- MAPS_LINE_MAX4 : MAPS_LINE_MAX8;
+ map = mm->mmap;
+ lineno = 0;
+ loff = 0;
+ if (count > PAGE_SIZE)
+ count = PAGE_SIZE;
+ while (map) {
int len;
-
- /*
- * Get the next vma now (but it won't be used if we sleep).
- */
- next = map->vm_next;
- flags = map->vm_flags;
-
- *cp++ = flags & VM_READ ? 'r' : '-';
- *cp++ = flags & VM_WRITE ? 'w' : '-';
- *cp++ = flags & VM_EXEC ? 'x' : '-';
- *cp++ = flags & VM_MAYSHARE ? 's' : 'p';
- *cp++ = 0;
-
- dev = 0;
- ino = 0;
- if (map->vm_file != NULL) {
- dev = map->vm_file->f_dentry->d_inode->i_dev;
- ino = map->vm_file->f_dentry->d_inode->i_ino;
- line = d_path(map->vm_file->f_dentry,
- map->vm_file->f_vfsmnt,
- buffer, PAGE_SIZE);
- buffer[PAGE_SIZE-1] = '\n';
- line -= maxlen;
- if(line < buffer)
- line = buffer;
- } else
- line = buffer;
-
- len = sprintf(line,
- sizeof(void*) == 4 ? MAPS_LINE_FORMAT4 : MAPS_LINE_FORMAT8,
- map->vm_start, map->vm_end, str, map->vm_pgoff << PAGE_SHIFT,
- kdevname(dev), ino);
-
- if(map->vm_file) {
- for(i = len; i < maxlen; i++)
- line[i] = ' ';
- len = buffer + PAGE_SIZE - line;
- } else
- line[len++] = '\n';
- if (column >= len) {
- column = 0; /* continue with next line at column 0 */
- lineno++;
- continue; /* we haven't slept */
+ if (off > PAGE_SIZE) {
+ off -= PAGE_SIZE;
+ goto next;
}
-
- i = len-column;
- if (i > count)
- i = count;
- copy_to_user(destptr, line+column, i); /* may have slept */
- destptr += i;
- count -= i;
- column += i;
- if (column >= len) {
- column = 0; /* next time: next line at column 0 */
- lineno++;
+ len = proc_pid_maps_get_line(tmp, map);
+ len -= off;
+ if (len > 0) {
+ if (retval+len > count) {
+ /* only partial line transfer possible */
+ len = count - retval;
+ /* save the offset where the next read
+ * must start */
+ loff = len+off;
+ }
+ memcpy(kbuf+retval, tmp+off, len);
+ retval += len;
}
-
- /* done? */
- if (count == 0)
- break;
-
- /* By writing to user space, we might have slept.
- * Stop the loop, to avoid a race condition.
- */
- if (volatile_task)
+ off = 0;
+next:
+ if (!loff)
+ lineno++;
+ if (retval >= count)
break;
+ if (loff) BUG();
+ map = map->vm_next;
}
up_read(&mm->mmap_sem);
-
- /* encode f_pos */
- *ppos = (lineno << MAPS_LINE_SHIFT) + column;
mmput(mm);
-getlen_out:
- retval = destptr - buf;
- free_page((unsigned long)buffer);
+ if (retval > count) BUG();
+ if (copy_to_user(buf, kbuf, retval))
+ retval = -EFAULT;
+ else
+ *ppos = (lineno << PAGE_SHIFT) + loff;
+
+out_free2:
+ free_page((unsigned long)tmp);
+out_free1:
+ free_page((unsigned long)kbuf);
out:
return retval;
}
--- /dev/null
+#include <asm-generic/tlb.h>
--- /dev/null
+#include <asm-generic/tlb.h>
--- /dev/null
+/* asm-generic/tlb.h
+ *
+ * Generic TLB shootdown code
+ *
+ * Copyright 2001 Red Hat, Inc.
+ * Based on code from mm/memory.c Copyright Linus Torvalds and others.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_GENERIC__TLB_H
+#define _ASM_GENERIC__TLB_H
+
+#include <linux/config.h>
+
+#ifdef CONFIG_SMP
+/* aim for something that fits in the L1 cache */
+#define FREE_PTE_NR 508
+
+/* mmu_gather_t is an opaque type used by the mm code for passing around any
+ * data needed by arch specific code for tlb_remove_page. This structure can
+ * be per-CPU or per-MM as the page table lock is held for the duration of TLB
+ * shootdown.
+ */
+typedef struct free_pte_ctx {
+ struct mm_struct *mm;
+ unsigned long nr; /* set to ~0UL means fast mode */
+ unsigned long start_addr, end_addr;
+ pte_t ptes[FREE_PTE_NR];
+} mmu_gather_t;
+
+/* Users of the generic TLB shootdown code must declare this storage space. */
+extern mmu_gather_t mmu_gathers[NR_CPUS];
+
+/* tlb_gather_mmu
+ * Return a pointer to an initialized mmu_gather_t.
+ */
+static inline mmu_gather_t *tlb_gather_mmu(struct mm_struct *mm)
+{
+ mmu_gather_t *tlb = &mmu_gathers[smp_processor_id()];
+
+ tlb->mm = mm;
+ /* Use fast mode if there is only one user of this mm (this process) */
+ tlb->nr = (atomic_read(&(mm)->mm_users) == 1) ? ~0UL : 0UL;
+ return tlb;
+}
+
+/* void tlb_remove_page(mmu_gather_t *tlb, pte_t *ptep, unsigned long addr)
+ * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
+ * handling the additional races in SMP caused by other CPUs caching valid
+ * mappings in their TLBs.
+ */
+#define tlb_remove_page(ctxp, pte, addr) do {\
+ /* Handle the common case fast, first. */\
+ if ((ctxp)->nr == ~0UL) {\
+ __free_pte(*(pte));\
+ pte_clear((pte));\
+ break;\
+ }\
+ if (!(ctxp)->nr) \
+ (ctxp)->start_addr = (addr);\
+ (ctxp)->ptes[(ctxp)->nr++] = ptep_get_and_clear(pte);\
+ (ctxp)->end_addr = (addr) + PAGE_SIZE;\
+ if ((ctxp)->nr >= FREE_PTE_NR)\
+ tlb_finish_mmu((ctxp), 0, 0);\
+ } while (0)
+
+/* tlb_finish_mmu
+ * Called at the end of the shootdown operation to free up any resources
+ * that were required. The page talbe lock is still held at this point.
+ */
+static inline void tlb_finish_mmu(struct free_pte_ctx *ctx, unsigned long start, unsigned long end)
+{
+ unsigned long i, nr;
+
+ /* Handle the fast case first. */
+ if (ctx->nr == ~0UL) {
+ flush_tlb_range(ctx->mm, start, end);
+ return;
+ }
+ nr = ctx->nr;
+ ctx->nr = 0;
+ if (nr)
+ flush_tlb_range(ctx->mm, ctx->start_addr, ctx->end_addr);
+ for (i=0; i < nr; i++) {
+ pte_t pte = ctx->ptes[i];
+ __free_pte(pte);
+ }
+}
+
+#else
+
+/* The uniprocessor functions are quite simple and are inline macros in an
+ * attempt to get gcc to generate optimal code since this code is run on each
+ * page in a process at exit.
+ */
+typedef struct mm_struct mmu_gather_t;
+
+#define tlb_gather_mmu(mm) (mm)
+#define tlb_finish_mmu(tlb, start, end) flush_tlb_range(tlb, start, end)
+#define tlb_remove_page(tlb, ptep, addr) do {\
+ pte_t __pte = *(ptep);\
+ pte_clear(ptep);\
+ __free_pte(__pte);\
+ } while (0)
+
+#endif
+
+
+#endif /* _ASM_GENERIC__TLB_H */
+
--- /dev/null
+#include <asm-generic/tlb.h>
--- /dev/null
+#include <asm-generic/tlb.h>
--- /dev/null
+#include <asm-generic/tlb.h>
--- /dev/null
+#include <asm-generic/tlb.h>
--- /dev/null
+#include <asm-generic/tlb.h>
--- /dev/null
+#include <asm-generic/tlb.h>
-/* $Id: fcntl.h,v 1.15 2000/09/23 02:09:21 davem Exp $ */
+/* $Id: fcntl.h,v 1.16 2001/09/20 00:35:33 davem Exp $ */
#ifndef _SPARC_FCNTL_H
#define _SPARC_FCNTL_H
-/* $Id: processor.h,v 1.81 2001/03/27 02:36:37 davem Exp $
+/* $Id: processor.h,v 1.82 2001/09/20 00:35:34 davem Exp $
* include/asm-sparc/processor.h
*
* Copyright (C) 1994 David S. Miller (davem@caip.rutgers.edu)
--- /dev/null
+#include <asm-generic/tlb.h>
-/* $Id: fcntl.h,v 1.11 2000/09/23 02:09:21 davem Exp $ */
+/* $Id: fcntl.h,v 1.12 2001/09/20 00:35:34 davem Exp $ */
#ifndef _SPARC64_FCNTL_H
#define _SPARC64_FCNTL_H
-/* $Id: pgtable.h,v 1.145 2001/08/30 03:22:00 kanoj Exp $
+/* $Id: pgtable.h,v 1.146 2001/09/11 02:20:23 kanoj Exp $
* pgtable.h: SpitFire page table operations.
*
* Copyright 1996,1997 David S. Miller (davem@caip.rutgers.edu)
#include <asm/mmu_context.h>
#include <asm/system.h>
#include <asm/page.h>
+#include <asm/processor.h>
/* XXX All of this needs to be rethought so we can take advantage
* XXX cheetah's full 64-bit virtual address space, ie. no more hole
* long). Finally, the higher few bits determine pgde#.
*/
-#define VA_BITS 44
-
/* PMD_SHIFT determines the size of the area a second-level page table can map */
#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3))
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PTRS_PER_PMD ((const int)((current->thread.flags & SPARC_FLAG_32BIT) ? \
(1UL << (32 - (PAGE_SHIFT-3) - PAGE_SHIFT)) : (REAL_PTRS_PER_PMD)))
-/* We cannot use the top 16G because VPTE table lives there. */
-#define PTRS_PER_PGD ((1UL << (VA_BITS - PAGE_SHIFT - (PAGE_SHIFT-3) - PMD_BITS))-1)
+/*
+ * We cannot use the top address range because VPTE table lives there. This
+ * formula finds the total legal virtual space in the processor, subtracts the
+ * vpte size, then aligns it to the number of bytes mapped by one pgde, and
+ * thus calculates the number of pgdes needed.
+ */
+#define PTRS_PER_PGD (((1UL << VA_BITS) - VPTE_SIZE + (1UL << (PAGE_SHIFT + \
+ (PAGE_SHIFT-3) + PMD_BITS)) - 1) / (1UL << (PAGE_SHIFT + \
+ (PAGE_SHIFT-3) + PMD_BITS)))
/* Kernel has a separate 44bit address space. */
#define USER_PTRS_PER_PGD ((const int)((current->thread.flags & SPARC_FLAG_32BIT) ? \
-/* $Id: processor.h,v 1.70 2001/03/27 02:36:38 davem Exp $
+/* $Id: processor.h,v 1.75 2001/09/20 00:35:34 davem Exp $
* include/asm-sparc64/processor.h
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
#include <asm/ptrace.h>
#include <asm/signal.h>
#include <asm/segment.h>
+#include <asm/page.h>
/* Bus types */
#define EISA_bus 0
#define wp_works_ok 1
#define wp_works_ok__is_a_macro /* for versions in ksyms.c */
-/* User lives in his very own context, and cannot reference us. */
-#ifndef __ASSEMBLY__
-#define TASK_SIZE ((unsigned long)-PGDIR_SIZE)
-#else
-#define TASK_SIZE 0xfffffffc00000000
-#endif
+/*
+ * User lives in his very own context, and cannot reference us. Note
+ * that TASK_SIZE is a misnomer, it really gives maximum user virtual
+ * address that the kernel will allocate out.
+ */
+#define VA_BITS 44
+#define VPTE_SIZE (1UL << (VA_BITS - PAGE_SHIFT + 3))
+#define TASK_SIZE ((unsigned long)-VPTE_SIZE)
#ifndef __ASSEMBLY__
0, 0, 0, 0, 0, \
}
+#ifdef __KERNEL__
+#if PAGE_SHIFT == 13
+#define THREAD_SIZE (2*PAGE_SIZE)
+#define THREAD_SHIFT (PAGE_SHIFT + 1)
+#else /* PAGE_SHIFT == 13 */
+#define THREAD_SIZE PAGE_SIZE
+#define THREAD_SHIFT PAGE_SHIFT
+#endif /* PAGE_SHIFT == 13 */
+#endif /* __KERNEL__ */
+
#ifndef __ASSEMBLY__
/* Return saved PC of a blocked thread. */
do { \
/* Bogus frame pointer? */ \
if (fp < (task_base + sizeof(struct task_struct)) || \
- fp >= (task_base + (2 * PAGE_SIZE))) \
+ fp >= (task_base + THREAD_SIZE)) \
break; \
rw = (struct reg_window *) fp; \
pc = rw->ins[7]; \
#define KSTK_ESP(tsk) ((tsk)->thread.kregs->u_regs[UREG_FP])
#ifdef __KERNEL__
-#define THREAD_SIZE (2*PAGE_SIZE)
/* Allocation and freeing of task_struct and kernel stack. */
+#if PAGE_SHIFT == 13
#define alloc_task_struct() ((struct task_struct *)__get_free_pages(GFP_KERNEL, 1))
#define free_task_struct(tsk) free_pages((unsigned long)(tsk),1)
+#else /* PAGE_SHIFT == 13 */
+#define alloc_task_struct() ((struct task_struct *)__get_free_pages(GFP_KERNEL, 0))
+#define free_task_struct(tsk) free_pages((unsigned long)(tsk),0)
+#endif /* PAGE_SHIFT == 13 */
#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count)
#define init_task (init_task_union.task)
/* Getting on the cycle counter on sparc64. */
typedef unsigned long cycles_t;
-extern cycles_t cacheflush_time;
#define get_cycles() \
({ cycles_t ret; \
__asm__("rd %%tick, %0" : "=r" (ret)); \
--- /dev/null
+#include <asm-generic/tlb.h>
/* credit winbond-840.c
*/
+#include <asm/io.h>
struct eeprom_ops {
void (*set_cs)(void *ee);
void (*clear_cs)(void *ee);
/* Lock info passed via NLM */
struct nlm_lock {
char * caller;
+ int len; /* length of "caller" */
struct nfs_fh fh;
struct xdr_netobj oh;
struct file_lock fl;
return count == 3; /* =3: total */
}
+extern void __free_pte(pte_t);
+
/* mmap.c */
extern void lock_vma_mappings(struct vm_area_struct *);
extern void unlock_vma_mappings(struct vm_area_struct *);
#define SONYPI_EVENT_FNKEY_S 29
#define SONYPI_EVENT_FNKEY_B 30
#define SONYPI_EVENT_BLUETOOTH_PRESSED 31
+#define SONYPI_EVENT_PKEY_P1 32
+#define SONYPI_EVENT_PKEY_P2 33
+#define SONYPI_EVENT_PKEY_P3 34
+
/* brightness etc. ioctls */
#define SONYPI_IOCGBRT _IOR('v', 0, __u8)
u32 * xdr_encode_array(u32 *p, const char *s, unsigned int len);
u32 * xdr_encode_string(u32 *p, const char *s);
u32 * xdr_decode_string(u32 *p, char **sp, int *lenp, int maxlen);
+u32 * xdr_decode_string_inplace(u32 *p, char **sp, int *lenp, int maxlen);
u32 * xdr_encode_netobj(u32 *p, const struct xdr_netobj *);
u32 * xdr_decode_netobj(u32 *p, struct xdr_netobj *);
u32 * xdr_decode_netobj_fixed(u32 *p, void *obj, unsigned int len);
#include <linux/utsname.h>
#include <linux/ioport.h>
#include <linux/init.h>
-#include <linux/raid/md.h>
#include <linux/smp_lock.h>
#include <linux/blk.h>
#include <linux/hdreg.h>
#ifdef CONFIG_X86_IO_APIC
static void __init smp_init(void)
{
- IO_APIC_init_uniprocessor();
+ APIC_init_uniprocessor();
}
#else
#define smp_init() do { } while (0)
if (vma->vm_flags & VM_LOCKED)
return -EINVAL;
- flush_cache_range(vma->vm_mm, start, end);
zap_page_range(vma->vm_mm, start, end - start);
- flush_tlb_range(vma->vm_mm, start, end);
return 0;
}
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
+#include <asm/tlb.h>
unsigned long max_mapnr;
unsigned long num_physpages;
mem_map_t * mem_map;
+/*
+ * Called by TLB shootdown
+ */
+void __free_pte(pte_t pte)
+{
+ struct page *page = pte_page(pte);
+ if ((!VALID_PAGE(page)) || PageReserved(page))
+ return;
+ /*
+ * free_page() used to be able to clear swap cache
+ * entries. We may now have to do it manually.
+ */
+ if (pte_dirty(pte) && page->mapping)
+ set_page_dirty(page);
+ free_page_and_swap_cache(page);
+}
+
+
/*
* Note: this doesn't free the actual pages themselves. That
* has been handled earlier when unmapping all the memory regions.
/*
* Return indicates whether a page was freed so caller can adjust rss
*/
-static inline int free_pte(pte_t pte)
-{
- if (pte_present(pte)) {
- struct page *page = pte_page(pte);
- if ((!VALID_PAGE(page)) || PageReserved(page))
- return 0;
- /*
- * free_page() used to be able to clear swap cache
- * entries. We may now have to do it manually.
- */
- if (pte_dirty(pte) && page->mapping)
- set_page_dirty(page);
- free_page_and_swap_cache(page);
- return 1;
- }
- swap_free(pte_to_swp_entry(pte));
- return 0;
-}
-
static inline void forget_pte(pte_t page)
{
if (!pte_none(page)) {
printk("forget_pte: old mapping existed!\n");
- free_pte(page);
+ BUG();
}
}
-static inline int zap_pte_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size)
+static inline int zap_pte_range(mmu_gather_t *tlb, pmd_t * pmd, unsigned long address, unsigned long size)
{
- pte_t * pte;
- int freed;
+ unsigned long offset;
+ pte_t * ptep;
+ int freed = 0;
if (pmd_none(*pmd))
return 0;
pmd_clear(pmd);
return 0;
}
- pte = pte_offset(pmd, address);
- address &= ~PMD_MASK;
- if (address + size > PMD_SIZE)
- size = PMD_SIZE - address;
- size >>= PAGE_SHIFT;
- freed = 0;
- for (;;) {
- pte_t page;
- if (!size)
- break;
- page = ptep_get_and_clear(pte);
- pte++;
- size--;
- if (pte_none(page))
+ ptep = pte_offset(pmd, address);
+ offset = address & ~PMD_MASK;
+ if (offset + size > PMD_SIZE)
+ size = PMD_SIZE - offset;
+ size &= PAGE_MASK;
+ for (offset=0; offset < size; ptep++, offset += PAGE_SIZE) {
+ pte_t pte = *ptep;
+ if (pte_none(pte))
continue;
- freed += free_pte(page);
+ if (pte_present(pte)) {
+ freed ++;
+ /* This will eventually call __free_pte on the pte. */
+ tlb_remove_page(tlb, ptep, address + offset);
+ } else {
+ swap_free(pte_to_swp_entry(pte));
+ pte_clear(ptep);
+ }
}
+
return freed;
}
-static inline int zap_pmd_range(struct mm_struct *mm, pgd_t * dir, unsigned long address, unsigned long size)
+static inline int zap_pmd_range(mmu_gather_t *tlb, pgd_t * dir, unsigned long address, unsigned long size)
{
pmd_t * pmd;
unsigned long end;
return 0;
}
pmd = pmd_offset(dir, address);
- address &= ~PGDIR_MASK;
end = address + size;
- if (end > PGDIR_SIZE)
- end = PGDIR_SIZE;
+ if (end > ((address + PGDIR_SIZE) & PGDIR_MASK))
+ end = ((address + PGDIR_SIZE) & PGDIR_MASK);
freed = 0;
do {
- freed += zap_pte_range(mm, pmd, address, end - address);
+ freed += zap_pte_range(tlb, pmd, address, end - address);
address = (address + PMD_SIZE) & PMD_MASK;
pmd++;
} while (address < end);
*/
void zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size)
{
+ mmu_gather_t *tlb;
pgd_t * dir;
- unsigned long end = address + size;
+ unsigned long start = address, end = address + size;
int freed = 0;
dir = pgd_offset(mm, address);
if (address >= end)
BUG();
spin_lock(&mm->page_table_lock);
+ flush_cache_range(mm, address, end);
+ tlb = tlb_gather_mmu(mm);
+
do {
- freed += zap_pmd_range(mm, dir, address, end - address);
+ freed += zap_pmd_range(tlb, dir, address, end - address);
address = (address + PGDIR_SIZE) & PGDIR_MASK;
dir++;
} while (address && (address < end));
+
+ /* this will flush any remaining tlb entries */
+ tlb_finish_mmu(tlb, start, end);
+
/*
* Update rss for the mm_struct (not necessarily current->mm)
* Notice that rss is an unsigned long.
/* mapping wholly truncated? */
if (mpnt->vm_pgoff >= pgoff) {
- flush_cache_range(mm, start, end);
zap_page_range(mm, start, len);
- flush_tlb_range(mm, start, end);
continue;
}
/* Ok, partially affected.. */
start += diff << PAGE_SHIFT;
len = (len - diff) << PAGE_SHIFT;
- flush_cache_range(mm, start, end);
zap_page_range(mm, start, len);
- flush_tlb_range(mm, start, end);
} while ((mpnt = mpnt->vm_next_share) != NULL);
}
pte = mk_pte(page, vma->vm_page_prot);
swap_free(entry);
- if (exclusive_swap_page(page)) {
-#if 0
- if (write_access)
- pte = pte_mkwrite(pte_mkdirty(pte));
-#else
+ if (exclusive_swap_page(page)) {
+ if (vma->vm_flags & VM_WRITE)
+ pte = pte_mkwrite(pte);
+ pte = pte_mkdirty(pte);
delete_from_swap_cache_nolock(page);
- pte = pte_mkwrite(pte_mkdirty(pte));
-#endif
}
UnlockPage(page);
atomic_inc(&file->f_dentry->d_inode->i_writecount);
vma->vm_file = NULL;
fput(file);
+
/* Undo any partial mapping done by a device driver. */
- flush_cache_range(mm, vma->vm_start, vma->vm_end);
zap_page_range(mm, vma->vm_start, vma->vm_end - vma->vm_start);
- flush_tlb_range(mm, vma->vm_start, vma->vm_end);
free_vma:
kmem_cache_free(vm_area_cachep, vma);
return error;
remove_shared_vm_struct(mpnt);
mm->map_count--;
- flush_cache_range(mm, st, end);
zap_page_range(mm, st, size);
- flush_tlb_range(mm, st, end);
/*
* Fix the mapping, and free the old area if it wasn't reused.
while ((offset += PAGE_SIZE) < len)
move_one_page(mm, new_addr + offset, old_addr + offset);
zap_page_range(mm, new_addr, len);
- flush_tlb_range(mm, new_addr, new_addr + len);
return -1;
}
BUG();
if (PageInactive(page))
BUG();
- if (PageDirty(page))
- BUG();
+ page->flags &= ~((1<<PG_referenced) | (1<<PG_dirty));
if (current->flags & PF_FREE_PAGES)
goto local_freelist;
return NULL;
}
+ rebalance:
page = balance_classzone(classzone, gfp_mask, order, &freed);
if (page)
return page;
if (!z)
break;
- page = rmqueue(z, order);
- if (page)
- return page;
+ if (zone_free_pages(z, order) > z->pages_min) {
+ page = rmqueue(z, order);
+ if (page)
+ return page;
+ }
}
+ goto rebalance;
} else {
/*
* Check that no other task is been killed meanwhile,
if (!wait) {
SetPageDecrAfter(page);
atomic_inc(&nr_async_pages);
- } else
- /*
- * Must hold a reference until after wait_on_page()
- * returned or it could be freed by the VM after
- * I/O is completed and the page is been unlocked.
- * The asynchronous path is fine since it never
- * references the page after brw_page().
- */
- page_cache_get(page);
+ }
/* block_size == PAGE_SIZE/zones_used */
brw_page(rw, page, dev, zones, block_size);
/* This shouldn't happen, but check to be sure. */
if (page_count(page) == 0)
printk(KERN_ERR "rw_swap_page: page unused while waiting!\n");
- page_cache_release(page);
return 1;
}
error = -ENOMEM;
if (!swap.val) {
activate_page(page);
+ SetPageDirty(page);
goto out;
}
info->swapped++;
spin_unlock(&info->lock);
-out:
set_page_dirty(page);
+out:
UnlockPage(page);
return error;
}
* cache and swap cache. We need to recheck the page cache
* under the protection of the info->lock spinlock. */
- page = __find_get_page(mapping, idx, page_hash(mapping, idx));
+ page = find_get_page(mapping, idx);
if (page) {
if (TryLockPage(page))
goto wait_retry;
unsigned long flags;
/* Look it up and read it in.. */
- page = __find_get_page(&swapper_space, entry->val,
- page_hash(&swapper_space, entry->val));
+ page = find_get_page(&swapper_space, entry->val);
if (!page) {
+ swp_entry_t swap = *entry;
spin_unlock (&info->lock);
lock_kernel();
swapin_readahead(*entry);
page = read_swap_cache_async(*entry);
unlock_kernel();
- if (!page)
+ if (!page) {
+ if (entry->val != swap.val)
+ goto repeat;
return ERR_PTR(-ENOMEM);
+ }
wait_on_page(page);
- if (!Page_Uptodate(page)) {
+ if (!Page_Uptodate(page) && entry->val == swap.val) {
page_cache_release(page);
return ERR_PTR(-EIO);
}
return 0;
found:
add_to_page_cache(page, inode->i_mapping, offset + idx);
- set_page_dirty(page);
+ SetPageDirty(page);
SetPageUptodate(page);
UnlockPage(page);
info->swapped--;
*/
static int swap_writepage(struct page *page)
{
+ /* One for the page cache, one for this user, one for page->buffers */
+ if (page_count(page) > 2 + !!page->buffers)
+ goto in_use;
+ if (swap_count(page) > 1)
+ goto in_use;
+
+ delete_from_swap_cache_nolock(page);
+ UnlockPage(page);
+ return 0;
+
+in_use:
rw_swap_page(WRITE, page);
return 0;
}
for (offset = si->lowest_bit; offset <= si->highest_bit ; offset++) {
if (si->swap_map[offset])
continue;
+ si->lowest_bit = offset+1;
got_page:
if (offset == si->lowest_bit)
si->lowest_bit++;
if (offset == si->highest_bit)
si->highest_bit--;
+ if (si->lowest_bit > si->highest_bit) {
+ si->lowest_bit = si->max;
+ si->highest_bit = 0;
+ }
si->swap_map[offset] = count;
nr_swap_pages--;
si->cluster_next = offset+1;
return offset;
}
+ si->lowest_bit = si->max;
+ si->highest_bit = 0;
return 0;
}
lock_page(page);
if (PageSwapCache(page))
delete_from_swap_cache_nolock(page);
+ SetPageDirty(page);
UnlockPage(page);
flush_page_to_ram(page);
mmput(start_mm);
start_mm = new_start_mm;
}
- ClearPageDirty(page);
page_cache_release(page);
/*
asmlinkage long sys_swapoff(const char * specialfile)
{
struct swap_info_struct * p = NULL;
+ unsigned short *swap_map;
struct nameidata nd;
int i, type, prev;
int err;
blkdev_put(nd.dentry->d_inode->i_bdev, BDEV_SWAP);
path_release(&nd);
+ swap_list_lock();
nd.dentry = p->swap_file;
p->swap_file = NULL;
nd.mnt = p->swap_vfsmnt;
p->swap_vfsmnt = NULL;
p->swap_device = 0;
p->max = 0;
- vfree(p->swap_map);
+ swap_map = p->swap_map;
p->swap_map = NULL;
p->flags = 0;
+ swap_list_unlock();
+ vfree(swap_map);
err = 0;
out_dput:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
lock_kernel();
+ swap_list_lock();
p = swap_info;
for (type = 0 ; type < nr_swapfiles ; type++,p++)
if (!(p->flags & SWP_USED))
break;
error = -EPERM;
- if (type >= MAX_SWAPFILES)
+ if (type >= MAX_SWAPFILES) {
+ swap_list_unlock();
goto out;
+ }
if (type >= nr_swapfiles)
nr_swapfiles = type+1;
p->flags = SWP_USED;
} else {
p->prio = --least_priority;
}
+ swap_list_unlock();
error = user_path_walk(specialfile, &nd);
if (error)
goto bad_swap_2;
}
p->lowest_bit = 1;
- p->highest_bit = swap_header->info.last_page - 1;
maxpages = SWP_OFFSET(SWP_ENTRY(0,~0UL)) - 1;
if (maxpages > swap_header->info.last_page)
maxpages = swap_header->info.last_page;
+ p->highest_bit = maxpages - 1;
error = -EINVAL;
if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
goto bad_swap;
}
p->swap_map[0] = SWAP_MAP_BAD;
+ swap_list_lock();
p->max = maxpages;
p->flags = SWP_WRITEOK;
p->pages = nr_good_pages;
- swap_list_lock();
nr_swap_pages += nr_good_pages;
total_swap_pages += nr_good_pages;
printk(KERN_INFO "Adding Swap: %dk swap-space (priority %d)\n",
vfree(p->swap_map);
nd.mnt = p->swap_vfsmnt;
nd.dentry = p->swap_file;
+ swap_list_lock();
p->swap_device = 0;
p->swap_file = NULL;
p->swap_vfsmnt = NULL;
p->flags = 0;
if (!(swap_flags & SWAP_FLAG_PREFER))
++least_priority;
+ swap_list_unlock();
path_release(&nd);
out:
if (swap_header)
void si_swapinfo(struct sysinfo *val)
{
unsigned int i;
- unsigned long freeswap = 0;
- unsigned long totalswap = 0;
+ unsigned long nr_to_be_unused = 0;
+ swap_list_lock();
for (i = 0; i < nr_swapfiles; i++) {
unsigned int j;
- if ((swap_info[i].flags & SWP_WRITEOK) != SWP_WRITEOK)
+ if (swap_info[i].flags != SWP_USED)
continue;
for (j = 0; j < swap_info[i].max; ++j) {
switch (swap_info[i].swap_map[j]) {
+ case 0:
case SWAP_MAP_BAD:
continue;
- case 0:
- freeswap++;
default:
- totalswap++;
+ nr_to_be_unused++;
}
}
}
- val->freeswap = freeswap;
- val->totalswap = totalswap;
- return;
+ val->freeswap = nr_swap_pages + nr_to_be_unused;
+ val->totalswap = total_swap_pages + nr_to_be_unused;
+ swap_list_unlock();
}
/*
int (*writepage)(struct page *);
writepage = page->mapping->a_ops->writepage;
- if (gfp_mask & __GFP_FS && writepage) {
+ if ((gfp_mask & __GFP_FS) && writepage) {
+ ClearPageDirty(page);
+ page_cache_get(page);
spin_unlock(&pagemap_lru_lock);
- ClearPageDirty(page);
writepage(page);
+ page_cache_release(page);
spin_lock(&pagemap_lru_lock);
continue;
if (try_to_free_buffers(page, gfp_mask)) {
if (!page->mapping) {
- UnlockPage(page);
-
/*
* Account we successfully freed a page
* of buffer cache.
*/
atomic_dec(&buffermem_pages);
+ /*
+ * We must not allow an anon page
+ * with no buffers to be visible on
+ * the LRU, so we unlock the page after
+ * taking the lru lock
+ */
spin_lock(&pagemap_lru_lock);
+ UnlockPage(page);
__lru_cache_del(page);
/* effectively free the page here */
return nr_pages;
}
-static void refill_inactive(int nr_pages)
+/*
+ * This moves pages from the active list to
+ * the inactive list.
+ *
+ * We move them the other way when we see the
+ * reference bit on the page.
+ */
+static void balance_inactive(int nr_pages)
{
struct list_head * entry;
+ /* If we have more inactive pages than active don't do anything */
+ if (nr_active_pages < nr_inactive_pages)
+ return;
+
spin_lock(&pagemap_lru_lock);
entry = active_list.prev;
while (nr_pages-- && entry != &active_list) {
page = list_entry(entry, struct page, lru);
entry = entry->prev;
- if (!page->buffers && page_count(page) != 1)
- continue;
-
del_page_from_active_list(page);
add_page_to_inactive_list(page);
}
if (nr_pages <= 0)
return 0;
- refill_inactive(nr_pages / 2);
+ balance_inactive(nr_pages);
nr_pages = shrink_cache(&inactive_list, &max_scan, nr_inactive_pages, nr_pages, classzone, gfp_mask);
if (nr_pages <= 0)
return 0;
dev->iflink = -1;
/* Init, if this function is available */
- if (dev->init && dev->init(dev) != 0)
+ if (dev->init && dev->init(dev) != 0) {
+#ifdef CONFIG_NET_DIVERT
+ free_divert_blk(dev);
+#endif
return -EIO;
+ }
dev->ifindex = dev_new_index();
if (dev->iflink == -1)
/* Check for existence, and append to tail of chain */
for (dp=&dev_base; (d=*dp) != NULL; dp=&d->next) {
if (d == dev || strcmp(d->name, dev->name) == 0) {
+#ifdef CONFIG_NET_DIVERT
+ free_divert_blk(dev);
+#endif
return -EEXIST;
}
}
*
* The IP to API glue.
*
- * Version: $Id: ip_sockglue.c,v 1.59 2001/08/13 18:56:12 davem Exp $
+ * Version: $Id: ip_sockglue.c,v 1.60 2001/09/18 22:29:09 davem Exp $
*
* Authors: see ip.c
*
/*
* Linux NET3: IP/IP protocol decoder.
*
- * Version: $Id: ipip.c,v 1.46 2001/05/17 04:12:18 davem Exp $
+ * Version: $Id: ipip.c,v 1.47 2001/09/18 00:36:07 davem Exp $
*
* Authors:
* Sam Lantinga (slouken@cs.ucdavis.edu) 02/01/95
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * Version: $Id: ipmr.c,v 1.63 2001/08/13 18:56:12 davem Exp $
+ * Version: $Id: ipmr.c,v 1.64 2001/09/18 22:29:09 davem Exp $
*
* Fixes:
* Michael Chastain : Incorrect size of copying.
* license in recognition of the original copyright.
* -- Alan Cox.
*
- * $Id: ipfwadm_core.c,v 1.8 2001/08/13 18:56:12 davem Exp $
+ * $Id: ipfwadm_core.c,v 1.9 2001/09/18 22:29:10 davem Exp $
*
* Ported from BSD to Linux,
* Alan Cox 22/Nov/1994.
*
* ROUTE - implementation of the IP router.
*
- * Version: $Id: route.c,v 1.98 2001/08/13 18:56:12 davem Exp $
+ * Version: $Id: route.c,v 1.99 2001/09/18 22:29:09 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
*
* Implementation of the Transmission Control Protocol(TCP).
*
- * Version: $Id: tcp.c,v 1.209 2001/08/28 00:31:04 davem Exp $
+ * Version: $Id: tcp.c,v 1.211 2001/09/20 00:35:35 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
*
* Implementation of the Transmission Control Protocol(TCP).
*
- * Version: $Id: tcp_input.c,v 1.235 2001/08/13 18:56:12 davem Exp $
+ * Version: $Id: tcp_input.c,v 1.236 2001/09/18 22:29:09 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
*
* Implementation of the Transmission Control Protocol(TCP).
*
- * Version: $Id: tcp_minisocks.c,v 1.12 2001/08/13 18:56:13 davem Exp $
+ * Version: $Id: tcp_minisocks.c,v 1.13 2001/09/18 22:29:10 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
*
* Implementation of the Transmission Control Protocol(TCP).
*
- * Version: $Id: tcp_output.c,v 1.140 2001/08/13 18:56:12 davem Exp $
+ * Version: $Id: tcp_output.c,v 1.141 2001/09/18 22:29:10 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
*
* Implementation of the Transmission Control Protocol(TCP).
*
- * Version: $Id: tcp_timer.c,v 1.85 2001/08/13 18:56:12 davem Exp $
+ * Version: $Id: tcp_timer.c,v 1.86 2001/09/18 22:29:10 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
- * $Id: icmp.c,v 1.36 2001/09/01 00:31:50 davem Exp $
+ * $Id: icmp.c,v 1.37 2001/09/18 22:29:10 davem Exp $
*
* Based on net/ipv4/icmp.c
*
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
- * $Id: ip6_output.c,v 1.32 2001/09/01 00:31:50 davem Exp $
+ * $Id: ip6_output.c,v 1.33 2001/09/20 00:35:35 davem Exp $
*
* Based on linux/net/ipv4/ip_output.c
*
*
* Based on linux/net/ipv4/ip_sockglue.c
*
- * $Id: ipv6_sockglue.c,v 1.39 2001/08/13 18:56:13 davem Exp $
+ * $Id: ipv6_sockglue.c,v 1.40 2001/09/18 22:29:10 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
*
* Adapted from linux/net/ipv4/raw.c
*
- * $Id: raw.c,v 1.49 2001/08/13 18:56:13 davem Exp $
+ * $Id: raw.c,v 1.50 2001/09/18 22:29:10 davem Exp $
*
* Fixes:
* Hideaki YOSHIFUJI : sin6_scope_id support
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
- * $Id: route.c,v 1.54 2001/08/13 18:56:13 davem Exp $
+ * $Id: route.c,v 1.55 2001/09/18 22:29:10 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
EXPORT_SYMBOL(xdr_encode_array);
EXPORT_SYMBOL(xdr_encode_string);
EXPORT_SYMBOL(xdr_decode_string);
+EXPORT_SYMBOL(xdr_decode_string_inplace);
EXPORT_SYMBOL(xdr_decode_netobj);
EXPORT_SYMBOL(xdr_encode_netobj);
EXPORT_SYMBOL(xdr_shift_iovec);
return p + XDR_QUADLEN(len);
}
+u32 *
+xdr_decode_string_inplace(u32 *p, char **sp, int *lenp, int maxlen)
+{
+ unsigned int len;
+
+ if ((len = ntohl(*p++)) > maxlen)
+ return NULL;
+ *lenp = len;
+ *sp = p;
+ return p + XDR_QUADLEN(len);
+}
+
+
/*
* Realign the iovec if the server missed out some reply elements
* (such as post-op attributes,...)
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * Version: $Id: af_unix.c,v 1.121 2001/08/13 18:56:13 davem Exp $
+ * Version: $Id: af_unix.c,v 1.123 2001/09/19 04:50:32 davem Exp $
*
* Fixes:
* Linus Torvalds : Assorted bug cures.