Changes for patch v193
- Went back to global rwsem for symlinks (refcount scheme no good)
+===============================================================================
+Changes for patch v194
+
+- Fixed overrun in <devfs_link> by removing function (not needed)
+
+- Updated README from master HTML file
+===============================================================================
+Changes for patch v195
+
+- Fixed buffer underrun in <try_modload>
+
+- Moved down_read() from <search_for_entry_in_dir> to <find_entry>
Linux Devfs (Device File System) FAQ
Richard Gooch
-23-AUG-2001
+29-SEP-2001
-----------------------------------------------------------------------------
Douglas Gilbert has written yet another useful document at
-http://www.torque.net/scsi/linux_scsi_24/ which
+http://www.torque.net/scsi/SCSI-2.4-HOWTO/ which
discusses the Linux SCSI subsystem in 2.4.
VERSION = 2
PATCHLEVEL = 4
-SUBLEVEL = 11
+SUBLEVEL = 12
EXTRAVERSION =
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
$(TOPDIR)/include/linux/compile.h: include/linux/compile.h
newversion:
- . scripts/mkversion > .version
+ . scripts/mkversion > .tmpversion
+ @mv -f .tmpversion .version
include/linux/compile.h: $(CONFIGURATION) include/linux/version.h newversion
@echo -n \#define UTS_VERSION \"\#`cat .version` > .ver
-/* $Id: process.c,v 1.155 2001/02/13 01:16:43 davem Exp $
+/* $Id: process.c,v 1.156 2001/10/02 02:22:26 davem Exp $
* linux/arch/sparc/kernel/process.c
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
-/* $Id: systbls.S,v 1.99 2000/08/12 20:49:49 jj Exp $
+/* $Id: systbls.S,v 1.100 2001/10/09 10:54:38 davem Exp $
* systbls.S: System call entry point tables for OS compatibility.
* The native Linux system call table lives here also.
*
/*125*/ .long sys_nis_syscall, sys_setreuid16, sys_setregid16, sys_rename, sys_truncate
/*130*/ .long sys_ftruncate, sys_flock, sys_lstat64, sys_nis_syscall, sys_nis_syscall
/*135*/ .long sys_nis_syscall, sys_mkdir, sys_rmdir, sys_utimes, sys_stat64
-/*140*/ .long sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_getrlimit
+/*140*/ .long sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_gettid, sys_getrlimit
/*145*/ .long sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write
/*150*/ .long sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_poll, sys_getdents64
/*155*/ .long sys_fcntl64, sys_nis_syscall, sys_statfs, sys_fstatfs, sys_oldumount
CONFIG_VFAT_FS=m
CONFIG_EFS_FS=m
# CONFIG_JFFS_FS is not set
+# CONFIG_JFFS2_FS is not set
CONFIG_CRAMFS=m
# CONFIG_TMPFS is not set
CONFIG_RAMFS=m
CONFIG_USB_MDC800=m
CONFIG_USB_SCANNER=m
CONFIG_USB_MICROTEK=m
+CONFIG_USB_HPUSBSCSI=m
#
# USB Multimedia devices
CONFIG_USB_SERIAL_EMPEG=m
CONFIG_USB_SERIAL_FTDI_SIO=m
CONFIG_USB_SERIAL_VISOR=m
+# CONFIG_USB_SERIAL_IR is not set
CONFIG_USB_SERIAL_EDGEPORT=m
CONFIG_USB_SERIAL_KEYSPAN_PDA=m
CONFIG_USB_SERIAL_KEYSPAN=m
# CONFIG_USB_SERIAL_KEYSPAN_USA28 is not set
# CONFIG_USB_SERIAL_KEYSPAN_USA28X is not set
+# CONFIG_USB_SERIAL_KEYSPAN_USA28XA is not set
+# CONFIG_USB_SERIAL_KEYSPAN_USA28XB is not set
# CONFIG_USB_SERIAL_KEYSPAN_USA19 is not set
# CONFIG_USB_SERIAL_KEYSPAN_USA18X is not set
# CONFIG_USB_SERIAL_KEYSPAN_USA19W is not set
-/* $Id: dtlb_backend.S,v 1.15 2001/09/24 21:54:09 davem Exp $
+/* $Id: dtlb_backend.S,v 1.16 2001/10/09 04:02:11 davem Exp $
* dtlb_backend.S: Back end to DTLB miss replacement strategy.
* This is included directly into the trap table.
*
ldxa [%g1 + %g1] ASI_DMMU, %g4 ! Get TAG_ACCESS
add %g3, %g3, %g5 ! Compute VPTE base
cmp %g4, %g5 ! VPTE miss?
- blu,pn %xcc, from_tl1_trap ! Fall to tl0 miss
+ bgeu,pt %xcc, 1f ! Continue here
andcc %g4, TAG_CONTEXT_BITS, %g5 ! From Nucleus? (for tl0 miss)
- sllx %g6, VPTE_SHIFT, %g4 ! Position TAG_ACCESS
- or %g4, %g5, %g4 ! Prepare TAG_ACCESS
- mov TSB_REG, %g1 ! Grab TSB reg
+ ba,pt %xcc, from_tl1_trap ! Fall to tl0 miss
+ rdpr %tl, %g5 ! For tl0 miss TL==3 test
+1: sllx %g6, VPTE_SHIFT, %g4 ! Position TAG_ACCESS
/* TLB1 ** ICACHE line 2: Quick VPTE miss */
+ or %g4, %g5, %g4 ! Prepare TAG_ACCESS
+ mov TSB_REG, %g1 ! Grab TSB reg
ldxa [%g1] ASI_DMMU, %g5 ! Doing PGD caching?
srlx %g6, (TLB_PMD_SHIFT - 1), %g1 ! Position PMD offset
be,pn %xcc, sparc64_vpte_nucleus ! Is it from Nucleus?
and %g1, TLB_PMD_MASK, %g1 ! Mask PMD offset bits
brnz,pt %g5, sparc64_vpte_continue ! Yep, go like smoke
add %g1, %g1, %g1 ! Position PMD offset some more
- srlx %g6, (TLB_PGD_SHIFT - 2), %g5 ! Position PGD offset
- and %g5, TLB_PGD_MASK, %g5 ! Mask PGD offset
/* TLB1 ** ICACHE line 3: Quick VPTE miss */
+ srlx %g6, (TLB_PGD_SHIFT - 2), %g5 ! Position PGD offset
+ and %g5, TLB_PGD_MASK, %g5 ! Mask PGD offset
lduwa [%g7 + %g5] ASI_PHYS_USE_EC, %g5! Load PGD
brz,pn %g5, vpte_noent ! Valid?
sparc64_kpte_continue:
lduwa [%g5 + %g1] ASI_PHYS_USE_EC, %g5! Load PMD
sllx %g5, 11, %g5 ! Shift into place
brz,pn %g5, vpte_noent ! Valid?
+
+/* TLB1 ** ICACHE line 4: Quick VPTE miss */
FILL_VALID_SZ_BITS1(%g1) ! Put _PAGE_VALID into %g1
FILL_VALID_SZ_BITS2(%g1) ! Put _PAGE_VALID into %g1
or %g5, VPTE_BITS, %g5 ! Prepare VPTE data
-
-/* TLB1 ** ICACHE line 4: Quick VPTE miss */
or %g5, %g1, %g5 ! ...
mov TLB_SFSR, %g1 ! Restore %g1 value
stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Load VPTE into TLB
stxa %g4, [%g1 + %g1] ASI_DMMU ! Restore previous TAG_ACCESS
retry ! Load PTE once again
- nop
- nop
FILL_VALID_SZ_BITS_NOP
#undef VPTE_SHIFT
-/* $Id: dtlb_base.S,v 1.14 2001/09/11 02:20:23 kanoj Exp $
+/* $Id: dtlb_base.S,v 1.16 2001/10/09 04:02:11 davem Exp $
* dtlb_base.S: Front end to DTLB miss replacement strategy.
* This is included directly into the trap table.
*
/* DTLB ** ICACHE line 1: Quick user TLB misses */
ldxa [%g1 + %g1] ASI_DMMU, %g4 ! Get TAG_ACCESS
andcc %g4, TAG_CONTEXT_BITS, %g0 ! From Nucleus?
+ mov 1, %g5 ! For TL==3 test
from_tl1_trap:
CREATE_VPTE_OFFSET1(%g4, %g6) ! Create VPTE offset
be,pn %xcc, 3f ! Yep, special processing
CREATE_VPTE_OFFSET2(%g4, %g6) ! Create VPTE offset
+ cmp %g5, 3 ! Last trap level?
+ be,a,pn %xcc, 1f ! Yep, use non-faulting load
+ ldxa [%g3 + %g6] ASI_SNF, %g5 ! Load VPTE (no-VPTE-fault)
+
+/* DTLB ** ICACHE line 2: User finish + quick kernel TLB misses */
ldxa [%g3 + %g6] ASI_S, %g5 ! Load VPTE
-1: brlz,pt %g5, 9f ! Valid, load into TLB
+1: brgez,pn %g5, longpath ! Invalid, branch out
nop ! Delay-slot
- ba,a,pt %xcc, longpath ! Invalid, branch out
-
-/* DTLB ** ICACHE line 2: Quick kernel TLB misses */
-3: brlz,pt %g4, 9f ! Kernel virtual map?
- xor %g2, %g4, %g5 ! Finish bit twiddles
- ba,pt %xcc, kvmap ! Yep, go check for obp/vmalloc
- nop
- nop
9: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
retry ! Trap return
-longpath:
- rdpr %pstate, %g5 ! Move into alternate globals
+3: brlz,pt %g4, 9b ! Kernel virtual map?
+ xor %g2, %g4, %g5 ! Finish bit twiddles
+ ba,a,pt %xcc, kvmap ! Yep, go check for obp/vmalloc
/* DTLB ** ICACHE line 3: winfixups+real_faults */
+longpath:
+ rdpr %pstate, %g5 ! Move into alternate globals
wrpr %g5, PSTATE_AG|PSTATE_MG, %pstate
rdpr %tl, %g4 ! See where we came from.
cmp %g4, 1 ! Is etrap/rtrap window fault?
ldxa [%g4] ASI_DMMU, %g5 ! Load faulting VA page
be,pt %xcc, sparc64_realfault_common ! Jump to normal fault handling
mov FAULT_CODE_DTLB, %g4 ! It was read from DTLB
- ba,a,pt %xcc, winfix_trampoline ! Call window fixup code
/* DTLB ** ICACHE line 4: Unused... */
- nop
+ ba,a,pt %xcc, winfix_trampoline ! Call window fixup code
nop
nop
nop
-/* $Id: pci.c,v 1.35 2001/06/13 06:34:30 davem Exp $
+/* $Id: pci.c,v 1.36 2001/10/06 00:38:25 davem Exp $
* pci.c: UltraSparc PCI controller support.
*
* Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com)
{
struct pcidev_cookie *pcp = pdev->sysdata;
struct pci_pbm_info *pbm;
+ struct pci_controller_info *p;
unsigned long space_size, user_offset, user_size;
if (!pcp)
if (!pbm)
return -ENXIO;
- if (mmap_state == pci_mmap_io) {
- space_size = (pbm->io_space.end -
- pbm->io_space.start) + 1;
+ p = pbm->parent;
+ if (p->pbms_same_domain) {
+ unsigned long lowest, highest;
+
+ lowest = ~0UL; highest = 0UL;
+ if (mmap_state == pci_mmap_io) {
+ if (p->pbm_A.io_space.flags) {
+ lowest = p->pbm_A.io_space.start;
+ highest = p->pbm_A.io_space.end + 1;
+ }
+ if (p->pbm_B.io_space.flags) {
+ if (lowest > p->pbm_B.io_space.start)
+ lowest = p->pbm_B.io_space.start;
+ if (highest < p->pbm_B.io_space.end + 1)
+ highest = p->pbm_B.io_space.end + 1;
+ }
+ space_size = highest - lowest;
+ } else {
+ if (p->pbm_A.mem_space.flags) {
+ lowest = p->pbm_A.mem_space.start;
+ highest = p->pbm_A.mem_space.end + 1;
+ }
+ if (p->pbm_B.mem_space.flags) {
+ if (lowest > p->pbm_B.mem_space.start)
+ lowest = p->pbm_B.mem_space.start;
+ if (highest < p->pbm_B.mem_space.end + 1)
+ highest = p->pbm_B.mem_space.end + 1;
+ }
+ space_size = highest - lowest;
+ }
} else {
- space_size = (pbm->mem_space.end -
- pbm->mem_space.start) + 1;
+ if (mmap_state == pci_mmap_io) {
+ space_size = (pbm->io_space.end -
+ pbm->io_space.start) + 1;
+ } else {
+ space_size = (pbm->mem_space.end -
+ pbm->mem_space.start) + 1;
+ }
}
/* Make sure the request is in range. */
(user_offset + user_size) > space_size)
return -EINVAL;
- if (mmap_state == pci_mmap_io) {
- vma->vm_pgoff = (pbm->io_space.start +
- user_offset) >> PAGE_SHIFT;
+ if (p->pbms_same_domain) {
+ unsigned long lowest = ~0UL;
+
+ if (mmap_state == pci_mmap_io) {
+ if (p->pbm_A.io_space.flags)
+ lowest = p->pbm_A.io_space.start;
+ if (p->pbm_B.io_space.flags &&
+ lowest > p->pbm_B.io_space.start)
+ lowest = p->pbm_B.io_space.start;
+ } else {
+ if (p->pbm_A.mem_space.flags)
+ lowest = p->pbm_A.mem_space.start;
+ if (p->pbm_B.mem_space.flags &&
+ lowest > p->pbm_B.mem_space.start)
+ lowest = p->pbm_B.mem_space.start;
+ }
+ vma->vm_pgoff = (lowest + user_offset) >> PAGE_SHIFT;
} else {
- vma->vm_pgoff = (pbm->mem_space.start +
- user_offset) >> PAGE_SHIFT;
+ if (mmap_state == pci_mmap_io) {
+ vma->vm_pgoff = (pbm->io_space.start +
+ user_offset) >> PAGE_SHIFT;
+ } else {
+ vma->vm_pgoff = (pbm->mem_space.start +
+ user_offset) >> PAGE_SHIFT;
+ }
}
return 0;
-/* $Id: process.c,v 1.119 2001/09/07 21:04:40 kanoj Exp $
+/* $Id: process.c,v 1.120 2001/10/02 02:22:26 davem Exp $
* arch/sparc64/kernel/process.c
*
* Copyright (C) 1995, 1996 David S. Miller (davem@caip.rutgers.edu)
-/* $Id: systbls.S,v 1.77 2000/08/22 10:09:10 jj Exp $
+/* $Id: systbls.S,v 1.78 2001/10/09 10:54:38 davem Exp $
* systbls.S: System call entry point tables for OS compatibility.
* The native Linux system call table lives here also.
*
.word sys_nis_syscall, sys32_setreuid16, sys32_setregid16, sys_rename, sys_truncate
/*130*/ .word sys_ftruncate, sys_flock, sys_lstat64, sys_nis_syscall, sys_nis_syscall
.word sys_nis_syscall, sys_mkdir, sys_rmdir, sys32_utimes, sys_stat64
-/*140*/ .word sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys32_getrlimit
+/*140*/ .word sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_gettid, sys32_getrlimit
.word sys32_setrlimit, sys_pivot_root, sys32_prctl, sys32_pciconfig_read, sys32_pciconfig_write
/*150*/ .word sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_poll, sys_getdents64
.word sys32_fcntl64, sys_nis_syscall, sys32_statfs, sys32_fstatfs, sys_oldumount
.word sys_recvfrom, sys_setreuid, sys_setregid, sys_rename, sys_truncate
/*130*/ .word sys_ftruncate, sys_flock, sys_nis_syscall, sys_sendto, sys_shutdown
.word sys_socketpair, sys_mkdir, sys_rmdir, sys_utimes, sys_nis_syscall
-/*140*/ .word sys_nis_syscall, sys_getpeername, sys_nis_syscall, sys_nis_syscall, sys_getrlimit
+/*140*/ .word sys_nis_syscall, sys_getpeername, sys_nis_syscall, sys_gettid, sys_getrlimit
.word sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write
/*150*/ .word sys_getsockname, sys_nis_syscall, sys_nis_syscall, sys_poll, sys_getdents64
.word sys_nis_syscall, sys_nis_syscall, sys_statfs, sys_fstatfs, sys_oldumount
* acenic.c: Linux driver for the Alteon AceNIC Gigabit Ethernet card
* and other Tigon based cards.
*
- * Copyright 1998-2001 by Jes Sorensen, <jes@linuxcare.com>.
+ * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
*
* Thanks to Alteon and 3Com for providing hardware and documentation
* enabling me to write this driver.
#endif
+#ifndef MODULE_LICENSE
+#define MODULE_LICENSE(a)
+#endif
+
#ifndef wmb
#define wmb() mb()
#endif
#define SMP_CACHE_BYTES L1_CACHE_BYTES
#endif
-#if (BITS_PER_LONG == 64)
+#if (BITS_PER_LONG == 64) || defined(CONFIG_HIGHMEM)
#define ACE_64BIT_PTR 1
#endif
#define pci_set_dma_mask(dev, mask) dev->dma_mask = mask;
#endif
-
#if (LINUX_VERSION_CODE >= 0x02031b)
#define NEW_NETINIT
#define ACE_PROBE_ARG void
#define ACE_PROBE_ARG struct net_device *dev
#endif
+#ifndef min_t
+#define min_t(type,a,b) (((a)<(b))?(a):(b))
+#endif
+
+#ifndef ARCH_HAS_PREFETCHW
+#ifndef prefetchw
+#define prefetchw(x) {do{} while(0);}
+#endif
+#endif
+
#define ACE_MAX_MOD_PARMS 8
#define BOARD_IDX_STATIC 0
#define BOARD_IDX_OVERFLOW -1
static int dis_pci_mem_inval[ACE_MAX_MOD_PARMS] = {1, 1, 1, 1, 1, 1, 1, 1};
static char version[] __initdata =
- "acenic.c: v0.81 04/20/2001 Jes Sorensen, linux-acenic@SunSITE.dk\n"
+ "acenic.c: v0.83 09/30/2001 Jes Sorensen, linux-acenic@SunSITE.dk\n"
" http://home.cern.ch/~jes/gige/acenic.html\n";
static struct net_device *root_dev;
#ifdef MODULE
MODULE_AUTHOR("Jes Sorensen <jes@trained-monkey.org>");
-MODULE_DESCRIPTION("AceNIC/3C985/GA620 Gigabit Ethernet driver");
MODULE_LICENSE("GPL");
-
+MODULE_DESCRIPTION("AceNIC/3C985/GA620 Gigabit Ethernet driver");
MODULE_PARM(link, "1-" __MODULE_STRING(8) "i");
MODULE_PARM(trace, "1-" __MODULE_STRING(8) "i");
MODULE_PARM(tx_coal_tick, "1-" __MODULE_STRING(8) "i");
MODULE_PARM(max_tx_desc, "1-" __MODULE_STRING(8) "i");
MODULE_PARM(rx_coal_tick, "1-" __MODULE_STRING(8) "i");
MODULE_PARM(max_rx_desc, "1-" __MODULE_STRING(8) "i");
+MODULE_PARM_DESC(link, "Acenic/3C985/NetGear link state");
+MODULE_PARM_DESC(trace, "Acenic/3C985/NetGear firmware trace level");
+MODULE_PARM_DESC(tx_coal_tick, "AceNIC/3C985/GA620 max clock ticks to wait from first tx descriptor arrives");
+MODULE_PARM_DESC(max_tx_desc, "AceNIC/3C985/GA620 max number of transmit descriptors to wait");
+MODULE_PARM_DESC(rx_coal_tick, "AceNIC/3C985/GA620 max clock ticks to wait from first rx descriptor arrives");
+MODULE_PARM_DESC(max_rx_desc, "AceNIC/3C985/GA620 max number of receive descriptors to wait");
#endif
struct sk_buff *skb = ap->skb->rx_std_skbuff[i].skb;
if (skb) {
-#ifndef DUMMY_PCI_UNMAP
dma_addr_t mapping;
mapping = ap->skb->rx_std_skbuff[i].mapping;
pci_unmap_single(ap->pdev, mapping,
ACE_STD_BUFSIZE - (2 + 16),
PCI_DMA_FROMDEVICE);
-#endif
ap->rx_std_ring[i].size = 0;
ap->skb->rx_std_skbuff[i].skb = NULL;
struct sk_buff *skb = ap->skb->rx_mini_skbuff[i].skb;
if (skb) {
-#ifndef DUMMY_PCI_UNMAP
dma_addr_t mapping;
mapping = ap->skb->rx_mini_skbuff[i].mapping;
pci_unmap_single(ap->pdev, mapping,
ACE_MINI_BUFSIZE - (2 + 16),
PCI_DMA_FROMDEVICE);
-#endif
+
ap->rx_mini_ring[i].size = 0;
ap->skb->rx_mini_skbuff[i].skb = NULL;
dev_kfree_skb(skb);
for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) {
struct sk_buff *skb = ap->skb->rx_jumbo_skbuff[i].skb;
if (skb) {
-#ifndef DUMMY_PCI_UNMAP
dma_addr_t mapping;
mapping = ap->skb->rx_jumbo_skbuff[i].mapping;
pci_unmap_single(ap->pdev, mapping,
ACE_JUMBO_BUFSIZE - (2 + 16),
PCI_DMA_FROMDEVICE);
-#endif
ap->rx_jumbo_ring[i].size = 0;
ap->skb->rx_jumbo_skbuff[i].skb = NULL;
regs = ap->regs;
+ prefetchw(&ap->cur_rx_bufs);
+
idx = ap->rx_std_skbprd;
for (i = 0; i < nr_bufs; i++) {
ACE_STD_BUFSIZE - (2 + 16),
PCI_DMA_FROMDEVICE);
ap->skb->rx_std_skbuff[idx].skb = skb;
-#ifndef DUMMY_PCI_UNMAP
ap->skb->rx_std_skbuff[idx].mapping = mapping;
-#endif
rd = &ap->rx_std_ring[idx];
set_aceaddr(&rd->addr, mapping);
regs = ap->regs;
+ prefetchw(&ap->cur_mini_bufs);
+
idx = ap->rx_mini_skbprd;
for (i = 0; i < nr_bufs; i++) {
struct sk_buff *skb;
ACE_MINI_BUFSIZE - (2 + 16),
PCI_DMA_FROMDEVICE);
ap->skb->rx_mini_skbuff[idx].skb = skb;
-#ifndef DUMMY_PCI_UNMAP
ap->skb->rx_mini_skbuff[idx].mapping = mapping;
-#endif
rd = &ap->rx_mini_ring[idx];
set_aceaddr(&rd->addr, mapping);
ACE_JUMBO_BUFSIZE - (2 + 16),
PCI_DMA_FROMDEVICE);
ap->skb->rx_jumbo_skbuff[idx].skb = skb;
-#ifndef DUMMY_PCI_UNMAP
ap->skb->rx_jumbo_skbuff[idx].mapping = mapping;
-#endif
rd = &ap->rx_jumbo_ring[idx];
set_aceaddr(&rd->addr, mapping);
idx = rxretcsm;
+ prefetchw(&ap->cur_rx_bufs);
+ prefetchw(&ap->cur_mini_bufs);
+
while (idx != rxretprd) {
struct ring_info *rip;
struct sk_buff *skb;
skb = rip->skb;
rip->skb = NULL;
-#ifndef DUMMY_PCI_UNMAP
pci_unmap_single(ap->pdev, rip->mapping, mapsize,
PCI_DMA_FROMDEVICE);
-#endif
skb_put(skb, retdesc->size);
-#if 0
- /* unncessary */
- rxdesc->size = 0;
-#endif
/*
* Fly baby, fly!
do {
struct sk_buff *skb;
-#ifndef DUMMY_PCI_UNMAP
dma_addr_t mapping;
-#endif
struct tx_ring_info *info;
info = ap->skb->tx_skbuff + idx;
skb = info->skb;
-#ifndef DUMMY_PCI_UNMAP
mapping = info->mapping;
if (mapping) {
PCI_DMA_TODEVICE);
info->mapping = 0;
}
-#endif
+
if (skb) {
ap->stats.tx_packets++;
ap->stats.tx_bytes += skb->len;
for (i = 0; i < TX_RING_ENTRIES; i++) {
struct sk_buff *skb;
-#ifndef DUMMY_PCI_UNMAP
dma_addr_t mapping;
-#endif
struct tx_ring_info *info;
info = ap->skb->tx_skbuff + i;
skb = info->skb;
-#ifndef DUMMY_PCI_UNMAP
mapping = info->mapping;
if (mapping) {
PCI_DMA_TODEVICE);
info->mapping = 0;
}
-#endif
+
if (skb) {
dev_kfree_skb(skb);
info->skb = NULL;
* For now, let it stay here.
*/
#if defined(CONFIG_HIGHMEM) && MAX_SKB_FRAGS
-#ifndef DUMMY_PCI_UNMAP
-#error Sorry, cannot DMA from high memory on this architecture.
-#endif
#if defined(CONFIG_X86)
#define DMAADDR_OFFSET 0
info = ap->skb->tx_skbuff + idx;
info->skb = tail;
-#ifndef DUMMY_PCI_UNMAP
info->mapping = addr;
info->maplen = skb->len;
-#endif
+
return addr;
}
} else {
info->skb = NULL;
}
-#ifndef DUMMY_PCI_UNMAP
info->mapping = phys;
info->maplen = frag->size;
-#endif
+
ace_load_tx_bd(desc, phys, flagsize);
}
}
while (size > 0) {
tsize = min_t(u32, ((~dest & (ACE_WINDOW_SIZE - 1)) + 1),
- min_t(u32, size, ACE_WINDOW_SIZE));
+ min_t(u32, size, ACE_WINDOW_SIZE));
tdest = (unsigned long)®s->Window +
(dest & (ACE_WINDOW_SIZE - 1));
writel(dest & ~(ACE_WINDOW_SIZE - 1), ®s->WinBase);
aceaddr stats2_ptr;
};
-#if defined(CONFIG_X86) || defined(CONFIG_PPC)
-/* Intel has null pci_unmap_single, no reasons to remember mapping. */
-#define DUMMY_PCI_UNMAP
-#endif
struct ring_info {
struct sk_buff *skb;
-#ifndef DUMMY_PCI_UNMAP
dma_addr_t mapping;
-#endif
};
-/* Funny... As soon as we add maplen on alpha, it starts to work
+/*
+ * Funny... As soon as we add maplen on alpha, it starts to work
* much slower. Hmm... is it because struct does not fit to one cacheline?
* So, split tx_ring_info.
*/
struct tx_ring_info {
struct sk_buff *skb;
-#ifndef DUMMY_PCI_UNMAP
dma_addr_t mapping;
int maplen;
-#endif
};
/*
extern int awc_start_xmit(struct sk_buff *, struct net_device *);
extern void awc_interrupt(int irq, void *dev_id, struct pt_regs *regs);
extern struct net_device_stats * awc_get_stats(struct net_device *dev);
-extern int awc_rx(struct net_device *dev, struct awc_fid * rx_fid);
extern void awc_set_multicast_list(struct net_device *dev);
extern int awc_change_mtu(struct net_device *dev, int new_mtu);
extern int awc_close(struct net_device *dev);
extern int at1700_probe(struct net_device *dev);
static int at1700_probe1(struct net_device *dev, int ioaddr);
-static int read_eeprom(int ioaddr, int location);
+static int read_eeprom(long ioaddr, int location);
static int net_open(struct net_device *dev);
static int net_send_packet(struct sk_buff *skb, struct net_device *dev);
static void net_interrupt(int irq, void *dev_id, struct pt_regs *regs);
if ((lp->rba = (char *)
kmalloc(SONIC_NUM_RRS * SONIC_RBSIZE, GFP_KERNEL | GFP_DMA)) == NULL) {
printk(KERN_ERR "%s: couldn't allocate receive buffers\n", dev->name);
+ kfree(lp->sonic_desc);
+ lp->sonic_desc = NULL;
return -ENOMEM;
}
/* methinks this will always be true but better safe than sorry */
if (dev->priv == NULL) {
dev->priv = kmalloc(sizeof(struct sonic_local), GFP_KERNEL);
- if (!dev->priv) /* FIXME: kfree dev if necessary */
+ if (!dev->priv)
return -ENOMEM;
}
} else {
if (dev) {
dev = init_etherdev(dev, sizeof(struct sonic_local));
+ if (!dev)
+ return -ENOMEM;
/* methinks this will always be true but better safe than sorry */
- if (dev->priv == NULL)
+ if (dev->priv == NULL) {
dev->priv = kmalloc(sizeof(struct sonic_local), GFP_KERNEL);
+ if (!dev->priv) /* FIXME: kfree dev if necessary */
+ return -ENOMEM;
+ }
} else {
dev = init_etherdev(NULL, sizeof(struct sonic_local));
}
* <linux/string.h>
* <linux/errno.h>
* <linux/ioport.h>
- * <linux/malloc.h>
+ * <linux/slab.h>
* <linux/interrupt.h>
* <linux/pci.h>
* <asm/byteorder.h>
*/
static int sonic_open(struct net_device *dev)
{
- if (sonic_debug > 2)
- printk("sonic_open: initializing sonic driver.\n");
-
- /*
- * We don't need to deal with auto-irq stuff since we
- * hardwire the sonic interrupt.
- */
+ if (sonic_debug > 2)
+ printk("sonic_open: initializing sonic driver.\n");
+
+ /*
+ * We don't need to deal with auto-irq stuff since we
+ * hardwire the sonic interrupt.
+ */
/*
* XXX Horrible work around: We install sonic_interrupt as fast interrupt.
* This means that during execution of the handler interrupt are disabled
* this glue works ok under all situations.
*/
// if (sonic_request_irq(dev->irq, &sonic_interrupt, 0, "sonic", dev)) {
- if (sonic_request_irq(dev->irq, &sonic_interrupt, SA_INTERRUPT, "sonic", dev)) {
- printk ("\n%s: unable to get IRQ %d .\n", dev->name, dev->irq);
- return -EAGAIN;
- }
-
- /*
- * Initialize the SONIC
- */
- sonic_init(dev);
-
- dev->tbusy = 0;
- dev->interrupt = 0;
- dev->start = 1;
-
- if (sonic_debug > 2)
- printk("sonic_open: Initialization done.\n");
-
- return 0;
+ if (sonic_request_irq(dev->irq, &sonic_interrupt, SA_INTERRUPT,
+ "sonic", dev)) {
+ printk("\n%s: unable to get IRQ %d .\n", dev->name, dev->irq);
+ return -EAGAIN;
+ }
+
+ /*
+ * Initialize the SONIC
+ */
+ sonic_init(dev);
+
+ netif_start_queue(dev);
+
+ if (sonic_debug > 2)
+ printk("sonic_open: Initialization done.\n");
+
+ return 0;
}
/*
* Close the SONIC device
*/
-static int
-sonic_close(struct net_device *dev)
+static int sonic_close(struct net_device *dev)
{
- unsigned int base_addr = dev->base_addr;
-
- if (sonic_debug > 2)
- printk ("sonic_close\n");
-
- dev->tbusy = 1;
- dev->start = 0;
-
- /*
- * stop the SONIC, disable interrupts
- */
- SONIC_WRITE(SONIC_ISR,0x7fff);
- SONIC_WRITE(SONIC_IMR,0);
- SONIC_WRITE(SONIC_CMD,SONIC_CR_RST);
-
- sonic_free_irq(dev->irq, dev); /* release the IRQ */
-
- return 0;
+ unsigned int base_addr = dev->base_addr;
+
+ if (sonic_debug > 2)
+ printk("sonic_close\n");
+
+ netif_stop_queue(dev);
+
+ /*
+ * stop the SONIC, disable interrupts
+ */
+ SONIC_WRITE(SONIC_ISR, 0x7fff);
+ SONIC_WRITE(SONIC_IMR, 0);
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
+
+ sonic_free_irq(dev->irq, dev); /* release the IRQ */
+
+ return 0;
}
+static void sonic_tx_timeout(struct net_device *dev)
+{
+ struct sonic_local *lp = (struct sonic_local *) dev->priv;
+ printk("%s: transmit timed out.\n", dev->name);
+
+ /* Try to restart the adaptor. */
+ sonic_init(dev);
+ lp->stats.tx_errors++;
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+}
/*
* transmit packet
*/
static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
{
- struct sonic_local *lp = (struct sonic_local *)dev->priv;
- unsigned int base_addr = dev->base_addr;
- unsigned int laddr;
- int entry,length;
-
- if (sonic_debug > 2)
- printk("sonic_send_packet: skb=%p, dev=%p\n",skb,dev);
-
- if (dev->tbusy) {
- int tickssofar = jiffies - dev->trans_start;
-
- /* If we get here, some higher level has decided we are broken.
- There should really be a "kick me" function call instead. */
-
- if (sonic_debug > 1)
- printk("sonic_send_packet: called with dev->tbusy = 1 !\n");
-
- if (tickssofar < 5)
- return 1;
-
- printk("%s: transmit timed out.\n", dev->name);
-
- /* Try to restart the adaptor. */
- sonic_init(dev);
- dev->tbusy=0;
+ struct sonic_local *lp = (struct sonic_local *) dev->priv;
+ unsigned int base_addr = dev->base_addr;
+ unsigned int laddr;
+ int entry, length;
+
+ netif_stop_queue(dev);
+
+ if (sonic_debug > 2)
+ printk("sonic_send_packet: skb=%p, dev=%p\n", skb, dev);
+
+ /*
+ * Block a timer-based transmit from overlapping. This could better be
+ * done with atomic_swap(1, dev->tbusy), but set_bit() works as well.
+ */
+ if (test_and_set_bit(0, (void *) &dev->tbusy) != 0) {
+ printk("%s: Transmitter access conflict.\n", dev->name);
+ return 1;
+ }
+
+ /*
+ * Map the packet data into the logical DMA address space
+ */
+ if ((laddr = vdma_alloc(PHYSADDR(skb->data), skb->len)) == ~0UL) {
+ printk("%s: no VDMA entry for transmit available.\n",
+ dev->name);
+ dev_kfree_skb(skb);
+ netif_start_queue(dev);
+ return 1;
+ }
+ entry = lp->cur_tx & SONIC_TDS_MASK;
+ lp->tx_laddr[entry] = laddr;
+ lp->tx_skb[entry] = skb;
+
+ length = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
+ flush_cache_all();
+
+ /*
+ * Setup the transmit descriptor and issue the transmit command.
+ */
+ lp->tda[entry].tx_status = 0; /* clear status */
+ lp->tda[entry].tx_frag_count = 1; /* single fragment */
+ lp->tda[entry].tx_pktsize = length; /* length of packet */
+ lp->tda[entry].tx_frag_ptr_l = laddr & 0xffff;
+ lp->tda[entry].tx_frag_ptr_h = laddr >> 16;
+ lp->tda[entry].tx_frag_size = length;
+ lp->cur_tx++;
+ lp->stats.tx_bytes += length;
+
+ if (sonic_debug > 2)
+ printk("sonic_send_packet: issueing Tx command\n");
+
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
+
dev->trans_start = jiffies;
- }
-
- /*
- * Block a timer-based transmit from overlapping. This could better be
- * done with atomic_swap(1, dev->tbusy), but set_bit() works as well.
- */
- if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) {
- printk("%s: Transmitter access conflict.\n", dev->name);
- return 1;
- }
-
- /*
- * Map the packet data into the logical DMA address space
- */
- if ((laddr = vdma_alloc(PHYSADDR(skb->data),skb->len)) == ~0UL) {
- printk("%s: no VDMA entry for transmit available.\n",dev->name);
- dev_kfree_skb(skb);
- dev->tbusy = 0;
- return 1;
- }
- entry = lp->cur_tx & SONIC_TDS_MASK;
- lp->tx_laddr[entry] = laddr;
- lp->tx_skb[entry] = skb;
-
- length = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
- flush_cache_all();
-
- /*
- * Setup the transmit descriptor and issue the transmit command.
- */
- lp->tda[entry].tx_status = 0; /* clear status */
- lp->tda[entry].tx_frag_count = 1; /* single fragment */
- lp->tda[entry].tx_pktsize = length; /* length of packet */
- lp->tda[entry].tx_frag_ptr_l = laddr & 0xffff;
- lp->tda[entry].tx_frag_ptr_h = laddr >> 16;
- lp->tda[entry].tx_frag_size = length;
- lp->cur_tx++;
- lp->stats.tx_bytes += length;
-
- if (sonic_debug > 2)
- printk("sonic_send_packet: issueing Tx command\n");
-
- SONIC_WRITE(SONIC_CMD,SONIC_CR_TXP);
-
- dev->trans_start = jiffies;
-
- if (lp->cur_tx < lp->dirty_tx + SONIC_NUM_TDS)
- dev->tbusy = 0;
- else
- lp->tx_full = 1;
-
- return 0;
+
+ if (lp->cur_tx < lp->dirty_tx + SONIC_NUM_TDS)
+ netif_start_queue(dev);
+ else
+ lp->tx_full = 1;
+
+ return 0;
}
-\f
/*
* The typical workload of the driver:
* Handle the network interface interrupts.
*/
-static void
-sonic_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+static void sonic_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
- struct net_device *dev = (struct net_device *)dev_id;
- unsigned int base_addr = dev->base_addr;
- struct sonic_local *lp;
- int status;
-
- if (dev == NULL) {
- printk ("sonic_interrupt: irq %d for unknown device.\n", irq);
- return;
- }
- dev->interrupt = 1;
- lp = (struct sonic_local *)dev->priv;
-
- status = SONIC_READ(SONIC_ISR);
- SONIC_WRITE(SONIC_ISR,0x7fff); /* clear all bits */
-
- if (sonic_debug > 2)
- printk("sonic_interrupt: ISR=%x\n",status);
-
- if (status & SONIC_INT_PKTRX) {
- sonic_rx(dev); /* got packet(s) */
- }
-
- if (status & SONIC_INT_TXDN) {
- int dirty_tx = lp->dirty_tx;
-
- while (dirty_tx < lp->cur_tx) {
- int entry = dirty_tx & SONIC_TDS_MASK;
- int status = lp->tda[entry].tx_status;
-
- if (sonic_debug > 3)
- printk ("sonic_interrupt: status %d, cur_tx %d, dirty_tx %d\n",
- status,lp->cur_tx,lp->dirty_tx);
-
- if (status == 0) {
- /* It still hasn't been Txed, kick the sonic again */
- SONIC_WRITE(SONIC_CMD,SONIC_CR_TXP);
- break;
- }
-
- /* put back EOL and free descriptor */
- lp->tda[entry].tx_frag_count = 0;
- lp->tda[entry].tx_status = 0;
-
- if (status & 0x0001)
- lp->stats.tx_packets++;
- else {
- lp->stats.tx_errors++;
- if (status & 0x0642) lp->stats.tx_aborted_errors++;
- if (status & 0x0180) lp->stats.tx_carrier_errors++;
- if (status & 0x0020) lp->stats.tx_window_errors++;
- if (status & 0x0004) lp->stats.tx_fifo_errors++;
- }
-
- /* We must free the original skb */
- if (lp->tx_skb[entry]) {
- dev_kfree_skb(lp->tx_skb[entry]);
- lp->tx_skb[entry] = 0;
- }
- /* and the VDMA address */
- vdma_free(lp->tx_laddr[entry]);
- dirty_tx++;
+ struct net_device *dev = (struct net_device *) dev_id;
+ unsigned int base_addr = dev->base_addr;
+ struct sonic_local *lp;
+ int status;
+
+ if (dev == NULL) {
+ printk("sonic_interrupt: irq %d for unknown device.\n", irq);
+ return;
+ }
+
+ lp = (struct sonic_local *) dev->priv;
+
+ status = SONIC_READ(SONIC_ISR);
+ SONIC_WRITE(SONIC_ISR, 0x7fff); /* clear all bits */
+
+ if (sonic_debug > 2)
+ printk("sonic_interrupt: ISR=%x\n", status);
+
+ if (status & SONIC_INT_PKTRX) {
+ sonic_rx(dev); /* got packet(s) */
}
-
- if (lp->tx_full && dev->tbusy
- && dirty_tx + SONIC_NUM_TDS > lp->cur_tx + 2) {
- /* The ring is no longer full, clear tbusy. */
- lp->tx_full = 0;
- dev->tbusy = 0;
- mark_bh(NET_BH);
+
+ if (status & SONIC_INT_TXDN) {
+ int dirty_tx = lp->dirty_tx;
+
+ while (dirty_tx < lp->cur_tx) {
+ int entry = dirty_tx & SONIC_TDS_MASK;
+ int status = lp->tda[entry].tx_status;
+
+ if (sonic_debug > 3)
+ printk
+ ("sonic_interrupt: status %d, cur_tx %d, dirty_tx %d\n",
+ status, lp->cur_tx, lp->dirty_tx);
+
+ if (status == 0) {
+ /* It still hasn't been Txed, kick the sonic again */
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
+ break;
+ }
+
+ /* put back EOL and free descriptor */
+ lp->tda[entry].tx_frag_count = 0;
+ lp->tda[entry].tx_status = 0;
+
+ if (status & 0x0001)
+ lp->stats.tx_packets++;
+ else {
+ lp->stats.tx_errors++;
+ if (status & 0x0642)
+ lp->stats.tx_aborted_errors++;
+ if (status & 0x0180)
+ lp->stats.tx_carrier_errors++;
+ if (status & 0x0020)
+ lp->stats.tx_window_errors++;
+ if (status & 0x0004)
+ lp->stats.tx_fifo_errors++;
+ }
+
+ /* We must free the original skb */
+ if (lp->tx_skb[entry]) {
+ dev_kfree_skb(lp->tx_skb[entry]);
+ lp->tx_skb[entry] = 0;
+ }
+ /* and the VDMA address */
+ vdma_free(lp->tx_laddr[entry]);
+ dirty_tx++;
+ }
+
+ if (lp->tx_full
+ && dirty_tx + SONIC_NUM_TDS > lp->cur_tx + 2) {
+ /* The ring is no longer full, clear tbusy. */
+ lp->tx_full = 0;
+ netif_wake_queue(dev);
+ }
+
+ lp->dirty_tx = dirty_tx;
+ }
+
+ /*
+ * check error conditions
+ */
+ if (status & SONIC_INT_RFO) {
+ printk("%s: receive fifo underrun\n", dev->name);
+ lp->stats.rx_fifo_errors++;
+ }
+ if (status & SONIC_INT_RDE) {
+ printk("%s: receive descriptors exhausted\n", dev->name);
+ lp->stats.rx_dropped++;
+ }
+ if (status & SONIC_INT_RBE) {
+ printk("%s: receive buffer exhausted\n", dev->name);
+ lp->stats.rx_dropped++;
+ }
+ if (status & SONIC_INT_RBAE) {
+ printk("%s: receive buffer area exhausted\n", dev->name);
+ lp->stats.rx_dropped++;
}
-
- lp->dirty_tx = dirty_tx;
- }
-
- /*
- * check error conditions
- */
- if (status & SONIC_INT_RFO) {
- printk ("%s: receive fifo underrun\n",dev->name);
- lp->stats.rx_fifo_errors++;
- }
- if (status & SONIC_INT_RDE) {
- printk ("%s: receive descriptors exhausted\n",dev->name);
- lp->stats.rx_dropped++;
- }
- if (status & SONIC_INT_RBE) {
- printk ("%s: receive buffer exhausted\n",dev->name);
- lp->stats.rx_dropped++;
- }
- if (status & SONIC_INT_RBAE) {
- printk ("%s: receive buffer area exhausted\n",dev->name);
- lp->stats.rx_dropped++;
- }
-
- /* counter overruns; all counters are 16bit wide */
- if (status & SONIC_INT_FAE)
- lp->stats.rx_frame_errors += 65536;
- if (status & SONIC_INT_CRC)
- lp->stats.rx_crc_errors += 65536;
- if (status & SONIC_INT_MP)
- lp->stats.rx_missed_errors += 65536;
-
- /* transmit error */
- if (status & SONIC_INT_TXER)
- lp->stats.tx_errors++;
-
- /*
- * clear interrupt bits and return
- */
- SONIC_WRITE(SONIC_ISR,status);
- dev->interrupt = 0;
- return;
+
+ /* counter overruns; all counters are 16bit wide */
+ if (status & SONIC_INT_FAE)
+ lp->stats.rx_frame_errors += 65536;
+ if (status & SONIC_INT_CRC)
+ lp->stats.rx_crc_errors += 65536;
+ if (status & SONIC_INT_MP)
+ lp->stats.rx_missed_errors += 65536;
+
+ /* transmit error */
+ if (status & SONIC_INT_TXER)
+ lp->stats.tx_errors++;
+
+ /*
+ * clear interrupt bits and return
+ */
+ SONIC_WRITE(SONIC_ISR, status);
}
/*
* We have a good packet(s), get it/them out of the buffers.
*/
-static void
-sonic_rx(struct net_device *dev)
+static void sonic_rx(struct net_device *dev)
{
- unsigned int base_addr = dev->base_addr;
- struct sonic_local *lp = (struct sonic_local *)dev->priv;
- sonic_rd_t *rd = &lp->rda[lp->cur_rx & SONIC_RDS_MASK];
- int status;
-
- while (rd->in_use == 0) {
- struct sk_buff *skb;
- int pkt_len;
- unsigned char *pkt_ptr;
-
- status = rd->rx_status;
- if (sonic_debug > 3)
- printk ("status %x, cur_rx %d, cur_rra %x\n",status,lp->cur_rx,lp->cur_rra);
- if (status & SONIC_RCR_PRX) {
- pkt_len = rd->rx_pktlen;
- pkt_ptr = (char *)sonic_chiptomem((rd->rx_pktptr_h << 16) +
- rd->rx_pktptr_l);
-
- if (sonic_debug > 3)
- printk ("pktptr %p (rba %p) h:%x l:%x, bsize h:%x l:%x\n", pkt_ptr,lp->rba,
- rd->rx_pktptr_h,rd->rx_pktptr_l,
- SONIC_READ(SONIC_RBWC1),SONIC_READ(SONIC_RBWC0));
-
- /* Malloc up new buffer. */
- skb = dev_alloc_skb(pkt_len+2);
- if (skb == NULL) {
- printk("%s: Memory squeeze, dropping packet.\n", dev->name);
- lp->stats.rx_dropped++;
- break;
- }
- skb->dev = dev;
- skb_reserve(skb,2); /* 16 byte align */
- skb_put(skb,pkt_len); /* Make room */
- eth_copy_and_sum(skb, pkt_ptr, pkt_len, 0);
- skb->protocol=eth_type_trans(skb,dev);
- netif_rx(skb); /* pass the packet to upper layers */
- dev->last_rx = jiffies;
- lp->stats.rx_packets++;
- lp->stats.rx_bytes += pkt_len;
-
- } else {
- /* This should only happen, if we enable accepting broken packets. */
- lp->stats.rx_errors++;
- if (status & SONIC_RCR_FAER) lp->stats.rx_frame_errors++;
- if (status & SONIC_RCR_CRCR) lp->stats.rx_crc_errors++;
+ unsigned int base_addr = dev->base_addr;
+ struct sonic_local *lp = (struct sonic_local *) dev->priv;
+ sonic_rd_t *rd = &lp->rda[lp->cur_rx & SONIC_RDS_MASK];
+ int status;
+
+ while (rd->in_use == 0) {
+ struct sk_buff *skb;
+ int pkt_len;
+ unsigned char *pkt_ptr;
+
+ status = rd->rx_status;
+ if (sonic_debug > 3)
+ printk("status %x, cur_rx %d, cur_rra %x\n",
+ status, lp->cur_rx, lp->cur_rra);
+ if (status & SONIC_RCR_PRX) {
+ pkt_len = rd->rx_pktlen;
+ pkt_ptr =
+ (char *)
+ sonic_chiptomem((rd->rx_pktptr_h << 16) +
+ rd->rx_pktptr_l);
+
+ if (sonic_debug > 3)
+ printk
+ ("pktptr %p (rba %p) h:%x l:%x, bsize h:%x l:%x\n",
+ pkt_ptr, lp->rba, rd->rx_pktptr_h,
+ rd->rx_pktptr_l,
+ SONIC_READ(SONIC_RBWC1),
+ SONIC_READ(SONIC_RBWC0));
+
+ /* Malloc up new buffer. */
+ skb = dev_alloc_skb(pkt_len + 2);
+ if (skb == NULL) {
+ printk
+ ("%s: Memory squeeze, dropping packet.\n",
+ dev->name);
+ lp->stats.rx_dropped++;
+ break;
+ }
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align */
+ skb_put(skb, pkt_len); /* Make room */
+ eth_copy_and_sum(skb, pkt_ptr, pkt_len, 0);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb); /* pass the packet to upper layers */
+ dev->last_rx = jiffies;
+ lp->stats.rx_packets++;
+ lp->stats.rx_bytes += pkt_len;
+
+ } else {
+ /* This should only happen, if we enable accepting broken packets. */
+ lp->stats.rx_errors++;
+ if (status & SONIC_RCR_FAER)
+ lp->stats.rx_frame_errors++;
+ if (status & SONIC_RCR_CRCR)
+ lp->stats.rx_crc_errors++;
+ }
+
+ rd->in_use = 1;
+ rd = &lp->rda[(++lp->cur_rx) & SONIC_RDS_MASK];
+ /* now give back the buffer to the receive buffer area */
+ if (status & SONIC_RCR_LPKT) {
+ /*
+ * this was the last packet out of the current receice buffer
+ * give the buffer back to the SONIC
+ */
+ lp->cur_rra += sizeof(sonic_rr_t);
+ if (lp->cur_rra >
+ (lp->rra_laddr +
+ (SONIC_NUM_RRS -
+ 1) * sizeof(sonic_rr_t))) lp->cur_rra =
+ lp->rra_laddr;
+ SONIC_WRITE(SONIC_RWP, lp->cur_rra & 0xffff);
+ } else
+ printk
+ ("%s: rx desc without RCR_LPKT. Shouldn't happen !?\n",
+ dev->name);
}
-
- rd->in_use = 1;
- rd = &lp->rda[(++lp->cur_rx) & SONIC_RDS_MASK];
- /* now give back the buffer to the receive buffer area */
- if (status & SONIC_RCR_LPKT) {
- /*
- * this was the last packet out of the current receice buffer
- * give the buffer back to the SONIC
- */
- lp->cur_rra += sizeof(sonic_rr_t);
- if (lp->cur_rra > (lp->rra_laddr + (SONIC_NUM_RRS-1) * sizeof(sonic_rr_t)))
- lp->cur_rra = lp->rra_laddr;
- SONIC_WRITE(SONIC_RWP, lp->cur_rra & 0xffff);
- } else
- printk ("%s: rx desc without RCR_LPKT. Shouldn't happen !?\n",dev->name);
- }
- /*
- * If any worth-while packets have been received, dev_rint()
- * has done a mark_bh(NET_BH) for us and will work on them
- * when we get to the bottom-half routine.
- */
- return;
+ /*
+ * If any worth-while packets have been received, dev_rint()
+ * has done a mark_bh(NET_BH) for us and will work on them
+ * when we get to the bottom-half routine.
+ */
}
* Get the current statistics.
* This may be called with the device open or closed.
*/
-static struct net_device_stats *
-sonic_get_stats(struct net_device *dev)
+static struct net_device_stats *sonic_get_stats(struct net_device *dev)
{
- struct sonic_local *lp = (struct sonic_local *)dev->priv;
- unsigned int base_addr = dev->base_addr;
-
- /* read the tally counter from the SONIC and reset them */
- lp->stats.rx_crc_errors += SONIC_READ(SONIC_CRCT);
- SONIC_WRITE(SONIC_CRCT,0xffff);
- lp->stats.rx_frame_errors += SONIC_READ(SONIC_FAET);
- SONIC_WRITE(SONIC_FAET,0xffff);
- lp->stats.rx_missed_errors += SONIC_READ(SONIC_MPT);
- SONIC_WRITE(SONIC_MPT,0xffff);
-
- return &lp->stats;
+ struct sonic_local *lp = (struct sonic_local *) dev->priv;
+ unsigned int base_addr = dev->base_addr;
+
+ /* read the tally counter from the SONIC and reset them */
+ lp->stats.rx_crc_errors += SONIC_READ(SONIC_CRCT);
+ SONIC_WRITE(SONIC_CRCT, 0xffff);
+ lp->stats.rx_frame_errors += SONIC_READ(SONIC_FAET);
+ SONIC_WRITE(SONIC_FAET, 0xffff);
+ lp->stats.rx_missed_errors += SONIC_READ(SONIC_MPT);
+ SONIC_WRITE(SONIC_MPT, 0xffff);
+
+ return &lp->stats;
}
/*
* Set or clear the multicast filter for this adaptor.
*/
-static void
-sonic_multicast_list(struct net_device *dev)
+static void sonic_multicast_list(struct net_device *dev)
{
- struct sonic_local *lp = (struct sonic_local *)dev->priv;
- unsigned int base_addr = dev->base_addr;
- unsigned int rcr;
- struct dev_mc_list *dmi = dev->mc_list;
- unsigned char *addr;
- int i;
-
- rcr = SONIC_READ(SONIC_RCR) & ~(SONIC_RCR_PRO | SONIC_RCR_AMC);
- rcr |= SONIC_RCR_BRD; /* accept broadcast packets */
-
- if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
- rcr |= SONIC_RCR_PRO;
- } else {
- if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 15)) {
- rcr |= SONIC_RCR_AMC;
+ struct sonic_local *lp = (struct sonic_local *) dev->priv;
+ unsigned int base_addr = dev->base_addr;
+ unsigned int rcr;
+ struct dev_mc_list *dmi = dev->mc_list;
+ unsigned char *addr;
+ int i;
+
+ rcr = SONIC_READ(SONIC_RCR) & ~(SONIC_RCR_PRO | SONIC_RCR_AMC);
+ rcr |= SONIC_RCR_BRD; /* accept broadcast packets */
+
+ if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
+ rcr |= SONIC_RCR_PRO;
} else {
- if (sonic_debug > 2)
- printk ("sonic_multicast_list: mc_count %d\n",dev->mc_count);
- lp->cda.cam_enable = 1; /* always enable our own address */
- for (i = 1; i <= dev->mc_count; i++) {
- addr = dmi->dmi_addr;
- dmi = dmi->next;
- lp->cda.cam_desc[i].cam_cap0 = addr[1] << 8 | addr[0];
- lp->cda.cam_desc[i].cam_cap1 = addr[3] << 8 | addr[2];
- lp->cda.cam_desc[i].cam_cap2 = addr[5] << 8 | addr[4];
- lp->cda.cam_enable |= (1 << i);
- }
- SONIC_WRITE(SONIC_CDC,16);
- /* issue Load CAM command */
- SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
- SONIC_WRITE(SONIC_CMD,SONIC_CR_LCAM);
+ if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 15)) {
+ rcr |= SONIC_RCR_AMC;
+ } else {
+ if (sonic_debug > 2)
+ printk
+ ("sonic_multicast_list: mc_count %d\n",
+ dev->mc_count);
+ lp->cda.cam_enable = 1; /* always enable our own address */
+ for (i = 1; i <= dev->mc_count; i++) {
+ addr = dmi->dmi_addr;
+ dmi = dmi->next;
+ lp->cda.cam_desc[i].cam_cap0 =
+ addr[1] << 8 | addr[0];
+ lp->cda.cam_desc[i].cam_cap1 =
+ addr[3] << 8 | addr[2];
+ lp->cda.cam_desc[i].cam_cap2 =
+ addr[5] << 8 | addr[4];
+ lp->cda.cam_enable |= (1 << i);
+ }
+ SONIC_WRITE(SONIC_CDC, 16);
+ /* issue Load CAM command */
+ SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
+ }
}
- }
-
- if (sonic_debug > 2)
- printk("sonic_multicast_list: setting RCR=%x\n",rcr);
-
- SONIC_WRITE(SONIC_RCR,rcr);
+
+ if (sonic_debug > 2)
+ printk("sonic_multicast_list: setting RCR=%x\n", rcr);
+
+ SONIC_WRITE(SONIC_RCR, rcr);
}
*/
static int sonic_init(struct net_device *dev)
{
- unsigned int base_addr = dev->base_addr;
- unsigned int cmd;
- struct sonic_local *lp = (struct sonic_local *)dev->priv;
- unsigned int rra_start;
- unsigned int rra_end;
- int i;
-
- /*
- * put the Sonic into software-reset mode and
- * disable all interrupts
- */
- SONIC_WRITE(SONIC_ISR,0x7fff);
- SONIC_WRITE(SONIC_IMR,0);
- SONIC_WRITE(SONIC_CMD,SONIC_CR_RST);
-
- /*
- * clear software reset flag, disable receiver, clear and
- * enable interrupts, then completely initialize the SONIC
- */
- SONIC_WRITE(SONIC_CMD,0);
- SONIC_WRITE(SONIC_CMD,SONIC_CR_RXDIS);
-
- /*
- * initialize the receive resource area
- */
- if (sonic_debug > 2)
- printk ("sonic_init: initialize receive resource area\n");
-
- rra_start = lp->rra_laddr & 0xffff;
- rra_end = (rra_start + (SONIC_NUM_RRS * sizeof(sonic_rr_t))) & 0xffff;
-
- for (i = 0; i < SONIC_NUM_RRS; i++) {
- lp->rra[i].rx_bufadr_l = (lp->rba_laddr + i * SONIC_RBSIZE) & 0xffff;
- lp->rra[i].rx_bufadr_h = (lp->rba_laddr + i * SONIC_RBSIZE) >> 16;
- lp->rra[i].rx_bufsize_l = SONIC_RBSIZE >> 1;
- lp->rra[i].rx_bufsize_h = 0;
- }
-
- /* initialize all RRA registers */
- SONIC_WRITE(SONIC_RSA,rra_start);
- SONIC_WRITE(SONIC_REA,rra_end);
- SONIC_WRITE(SONIC_RRP,rra_start);
- SONIC_WRITE(SONIC_RWP,rra_end);
- SONIC_WRITE(SONIC_URRA,lp->rra_laddr >> 16);
- SONIC_WRITE(SONIC_EOBC,(SONIC_RBSIZE-2) >> 1);
-
- lp->cur_rra = lp->rra_laddr + (SONIC_NUM_RRS-1) * sizeof(sonic_rr_t);
-
- /* load the resource pointers */
- if (sonic_debug > 3)
- printk("sonic_init: issueing RRRA command\n");
-
- SONIC_WRITE(SONIC_CMD,SONIC_CR_RRRA);
- i = 0;
- while (i++ < 100) {
- if (SONIC_READ(SONIC_CMD) & SONIC_CR_RRRA)
- break;
- }
-
- if (sonic_debug > 2)
- printk("sonic_init: status=%x\n",SONIC_READ(SONIC_CMD));
-
- /*
- * Initialize the receive descriptors so that they
- * become a circular linked list, ie. let the last
- * descriptor point to the first again.
- */
- if (sonic_debug > 2)
- printk ("sonic_init: initialize receive descriptors\n");
- for (i=0; i<SONIC_NUM_RDS; i++) {
- lp->rda[i].rx_status = 0;
- lp->rda[i].rx_pktlen = 0;
- lp->rda[i].rx_pktptr_l = 0;
- lp->rda[i].rx_pktptr_h = 0;
- lp->rda[i].rx_seqno = 0;
- lp->rda[i].in_use = 1;
- lp->rda[i].link = lp->rda_laddr + (i+1) * sizeof (sonic_rd_t);
- }
- /* fix last descriptor */
- lp->rda[SONIC_NUM_RDS-1].link = lp->rda_laddr;
- lp->cur_rx = 0;
- SONIC_WRITE(SONIC_URDA,lp->rda_laddr >> 16);
- SONIC_WRITE(SONIC_CRDA,lp->rda_laddr & 0xffff);
-
- /*
- * initialize transmit descriptors
- */
- if (sonic_debug > 2)
- printk ("sonic_init: initialize transmit descriptors\n");
- for (i = 0; i < SONIC_NUM_TDS; i++) {
- lp->tda[i].tx_status = 0;
- lp->tda[i].tx_config = 0;
- lp->tda[i].tx_pktsize = 0;
- lp->tda[i].tx_frag_count = 0;
- lp->tda[i].link = (lp->tda_laddr + (i+1) * sizeof (sonic_td_t)) | SONIC_END_OF_LINKS;
- }
- lp->tda[SONIC_NUM_TDS-1].link = (lp->tda_laddr & 0xffff) | SONIC_END_OF_LINKS;
-
- SONIC_WRITE(SONIC_UTDA,lp->tda_laddr >> 16);
- SONIC_WRITE(SONIC_CTDA,lp->tda_laddr & 0xffff);
- lp->cur_tx = lp->dirty_tx = 0;
-
- /*
- * put our own address to CAM desc[0]
- */
- lp->cda.cam_desc[0].cam_cap0 = dev->dev_addr[1] << 8 | dev->dev_addr[0];
- lp->cda.cam_desc[0].cam_cap1 = dev->dev_addr[3] << 8 | dev->dev_addr[2];
- lp->cda.cam_desc[0].cam_cap2 = dev->dev_addr[5] << 8 | dev->dev_addr[4];
- lp->cda.cam_enable = 1;
-
- for (i=0; i < 16; i++)
- lp->cda.cam_desc[i].cam_entry_pointer = i;
-
- /*
- * initialize CAM registers
- */
- SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
- SONIC_WRITE(SONIC_CDC,16);
-
- /*
- * load the CAM
- */
- SONIC_WRITE(SONIC_CMD,SONIC_CR_LCAM);
-
- i = 0;
- while (i++ < 100) {
- if (SONIC_READ(SONIC_ISR) & SONIC_INT_LCD)
- break;
- }
- if (sonic_debug > 2) {
- printk("sonic_init: CMD=%x, ISR=%x\n",
- SONIC_READ(SONIC_CMD),
- SONIC_READ(SONIC_ISR));
- }
-
- /*
- * enable receiver, disable loopback
- * and enable all interrupts
- */
- SONIC_WRITE(SONIC_CMD,SONIC_CR_RXEN | SONIC_CR_STP);
- SONIC_WRITE(SONIC_RCR,SONIC_RCR_DEFAULT);
- SONIC_WRITE(SONIC_TCR,SONIC_TCR_DEFAULT);
- SONIC_WRITE(SONIC_ISR,0x7fff);
- SONIC_WRITE(SONIC_IMR,SONIC_IMR_DEFAULT);
-
- cmd = SONIC_READ(SONIC_CMD);
- if ((cmd & SONIC_CR_RXEN) == 0 ||
- (cmd & SONIC_CR_STP) == 0)
- printk("sonic_init: failed, status=%x\n",cmd);
-
- if (sonic_debug > 2)
- printk("sonic_init: new status=%x\n",SONIC_READ(SONIC_CMD));
-
- return(0);
-}
+ unsigned int base_addr = dev->base_addr;
+ unsigned int cmd;
+ struct sonic_local *lp = (struct sonic_local *) dev->priv;
+ unsigned int rra_start;
+ unsigned int rra_end;
+ int i;
+
+ /*
+ * put the Sonic into software-reset mode and
+ * disable all interrupts
+ */
+ SONIC_WRITE(SONIC_ISR, 0x7fff);
+ SONIC_WRITE(SONIC_IMR, 0);
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
+
+ /*
+ * clear software reset flag, disable receiver, clear and
+ * enable interrupts, then completely initialize the SONIC
+ */
+ SONIC_WRITE(SONIC_CMD, 0);
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
+
+ /*
+ * initialize the receive resource area
+ */
+ if (sonic_debug > 2)
+ printk("sonic_init: initialize receive resource area\n");
+
+ rra_start = lp->rra_laddr & 0xffff;
+ rra_end =
+ (rra_start + (SONIC_NUM_RRS * sizeof(sonic_rr_t))) & 0xffff;
+
+ for (i = 0; i < SONIC_NUM_RRS; i++) {
+ lp->rra[i].rx_bufadr_l =
+ (lp->rba_laddr + i * SONIC_RBSIZE) & 0xffff;
+ lp->rra[i].rx_bufadr_h =
+ (lp->rba_laddr + i * SONIC_RBSIZE) >> 16;
+ lp->rra[i].rx_bufsize_l = SONIC_RBSIZE >> 1;
+ lp->rra[i].rx_bufsize_h = 0;
+ }
-\f
-/*
- * Local variables:
- * compile-command: "mipsel-linux-gcc -D__KERNEL__ -D__mips64 -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O2 -mcpu=r4000 -c sonic.c"
- * version-control: t
- * kept-new-versions: 5
- * tab-width: 4
- * End:
- */
+ /* initialize all RRA registers */
+ SONIC_WRITE(SONIC_RSA, rra_start);
+ SONIC_WRITE(SONIC_REA, rra_end);
+ SONIC_WRITE(SONIC_RRP, rra_start);
+ SONIC_WRITE(SONIC_RWP, rra_end);
+ SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16);
+ SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE - 2) >> 1);
+
+ lp->cur_rra =
+ lp->rra_laddr + (SONIC_NUM_RRS - 1) * sizeof(sonic_rr_t);
+
+ /* load the resource pointers */
+ if (sonic_debug > 3)
+ printk("sonic_init: issueing RRRA command\n");
+
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA);
+ i = 0;
+ while (i++ < 100) {
+ if (SONIC_READ(SONIC_CMD) & SONIC_CR_RRRA)
+ break;
+ }
+
+ if (sonic_debug > 2)
+ printk("sonic_init: status=%x\n", SONIC_READ(SONIC_CMD));
+
+ /*
+ * Initialize the receive descriptors so that they
+ * become a circular linked list, ie. let the last
+ * descriptor point to the first again.
+ */
+ if (sonic_debug > 2)
+ printk("sonic_init: initialize receive descriptors\n");
+ for (i = 0; i < SONIC_NUM_RDS; i++) {
+ lp->rda[i].rx_status = 0;
+ lp->rda[i].rx_pktlen = 0;
+ lp->rda[i].rx_pktptr_l = 0;
+ lp->rda[i].rx_pktptr_h = 0;
+ lp->rda[i].rx_seqno = 0;
+ lp->rda[i].in_use = 1;
+ lp->rda[i].link =
+ lp->rda_laddr + (i + 1) * sizeof(sonic_rd_t);
+ }
+ /* fix last descriptor */
+ lp->rda[SONIC_NUM_RDS - 1].link = lp->rda_laddr;
+ lp->cur_rx = 0;
+ SONIC_WRITE(SONIC_URDA, lp->rda_laddr >> 16);
+ SONIC_WRITE(SONIC_CRDA, lp->rda_laddr & 0xffff);
+
+ /*
+ * initialize transmit descriptors
+ */
+ if (sonic_debug > 2)
+ printk("sonic_init: initialize transmit descriptors\n");
+ for (i = 0; i < SONIC_NUM_TDS; i++) {
+ lp->tda[i].tx_status = 0;
+ lp->tda[i].tx_config = 0;
+ lp->tda[i].tx_pktsize = 0;
+ lp->tda[i].tx_frag_count = 0;
+ lp->tda[i].link =
+ (lp->tda_laddr +
+ (i + 1) * sizeof(sonic_td_t)) | SONIC_END_OF_LINKS;
+ }
+ lp->tda[SONIC_NUM_TDS - 1].link =
+ (lp->tda_laddr & 0xffff) | SONIC_END_OF_LINKS;
+
+ SONIC_WRITE(SONIC_UTDA, lp->tda_laddr >> 16);
+ SONIC_WRITE(SONIC_CTDA, lp->tda_laddr & 0xffff);
+ lp->cur_tx = lp->dirty_tx = 0;
+
+ /*
+ * put our own address to CAM desc[0]
+ */
+ lp->cda.cam_desc[0].cam_cap0 =
+ dev->dev_addr[1] << 8 | dev->dev_addr[0];
+ lp->cda.cam_desc[0].cam_cap1 =
+ dev->dev_addr[3] << 8 | dev->dev_addr[2];
+ lp->cda.cam_desc[0].cam_cap2 =
+ dev->dev_addr[5] << 8 | dev->dev_addr[4];
+ lp->cda.cam_enable = 1;
+
+ for (i = 0; i < 16; i++)
+ lp->cda.cam_desc[i].cam_entry_pointer = i;
+
+ /*
+ * initialize CAM registers
+ */
+ SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
+ SONIC_WRITE(SONIC_CDC, 16);
+
+ /*
+ * load the CAM
+ */
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
+
+ i = 0;
+ while (i++ < 100) {
+ if (SONIC_READ(SONIC_ISR) & SONIC_INT_LCD)
+ break;
+ }
+ if (sonic_debug > 2) {
+ printk("sonic_init: CMD=%x, ISR=%x\n",
+ SONIC_READ(SONIC_CMD), SONIC_READ(SONIC_ISR));
+ }
+
+ /*
+ * enable receiver, disable loopback
+ * and enable all interrupts
+ */
+ SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN | SONIC_CR_STP);
+ SONIC_WRITE(SONIC_RCR, SONIC_RCR_DEFAULT);
+ SONIC_WRITE(SONIC_TCR, SONIC_TCR_DEFAULT);
+ SONIC_WRITE(SONIC_ISR, 0x7fff);
+ SONIC_WRITE(SONIC_IMR, SONIC_IMR_DEFAULT);
+
+ cmd = SONIC_READ(SONIC_CMD);
+ if ((cmd & SONIC_CR_RXEN) == 0 || (cmd & SONIC_CR_STP) == 0)
+ printk("sonic_init: failed, status=%x\n", cmd);
+
+ if (sonic_debug > 2)
+ printk("sonic_init: new status=%x\n",
+ SONIC_READ(SONIC_CMD));
+
+ return 0;
+}
* see CONFIG_MACSONIC branch below.
*
*/
-
#ifndef SONIC_H
#define SONIC_H
/*
* SONIC register offsets
*/
-
+
#define SONIC_CMD 0x00
#define SONIC_DCR 0x01
#define SONIC_RCR 0x02
#ifdef CONFIG_MACSONIC
-/* Big endian like structures on Mac
- * (680x0)
+/*
+ * Big endian like structures on 680x0 Macs
*/
typedef struct {
- u32 rx_bufadr_l; /* receive buffer ptr */
- u32 rx_bufadr_h;
+ u32 rx_bufadr_l; /* receive buffer ptr */
+ u32 rx_bufadr_h;
- u32 rx_bufsize_l; /* no. of words in the receive buffer */
- u32 rx_bufsize_h;
+ u32 rx_bufsize_l; /* no. of words in the receive buffer */
+ u32 rx_bufsize_h;
} sonic_rr_t;
/*
*/
typedef struct {
- SREGS_PAD(pad0);
- u16 rx_status; /* status after reception of a packet */
- SREGS_PAD(pad1);
- u16 rx_pktlen; /* length of the packet incl. CRC */
-
- /*
- * Pointers to the location in the receive buffer area (RBA)
- * where the packet resides. A packet is always received into
- * a contiguous piece of memory.
- */
- SREGS_PAD(pad2);
- u16 rx_pktptr_l;
- SREGS_PAD(pad3);
- u16 rx_pktptr_h;
-
- SREGS_PAD(pad4);
- u16 rx_seqno; /* sequence no. */
-
- SREGS_PAD(pad5);
- u16 link; /* link to next RDD (end if EOL bit set) */
-
- /*
- * Owner of this descriptor, 0= driver, 1=sonic
- */
-
- SREGS_PAD(pad6);
- u16 in_use;
-
- caddr_t rda_next; /* pointer to next RD */
+ SREGS_PAD(pad0);
+ u16 rx_status; /* status after reception of a packet */
+ SREGS_PAD(pad1);
+ u16 rx_pktlen; /* length of the packet incl. CRC */
+
+ /*
+ * Pointers to the location in the receive buffer area (RBA)
+ * where the packet resides. A packet is always received into
+ * a contiguous piece of memory.
+ */
+ SREGS_PAD(pad2);
+ u16 rx_pktptr_l;
+ SREGS_PAD(pad3);
+ u16 rx_pktptr_h;
+
+ SREGS_PAD(pad4);
+ u16 rx_seqno; /* sequence no. */
+
+ SREGS_PAD(pad5);
+ u16 link; /* link to next RDD (end if EOL bit set) */
+
+ /*
+ * Owner of this descriptor, 0= driver, 1=sonic
+ */
+
+ SREGS_PAD(pad6);
+ u16 in_use;
+
+ caddr_t rda_next; /* pointer to next RD */
} sonic_rd_t;
* Describes a Transmit Descriptor
*/
typedef struct {
- SREGS_PAD(pad0);
- u16 tx_status; /* status after transmission of a packet */
- SREGS_PAD(pad1);
- u16 tx_config; /* transmit configuration for this packet */
- SREGS_PAD(pad2);
- u16 tx_pktsize; /* size of the packet to be transmitted */
- SREGS_PAD(pad3);
- u16 tx_frag_count; /* no. of fragments */
-
- SREGS_PAD(pad4);
- u16 tx_frag_ptr_l;
- SREGS_PAD(pad5);
- u16 tx_frag_ptr_h;
- SREGS_PAD(pad6);
- u16 tx_frag_size;
-
- SREGS_PAD(pad7);
- u16 link; /* ptr to next descriptor */
+ SREGS_PAD(pad0);
+ u16 tx_status; /* status after transmission of a packet */
+ SREGS_PAD(pad1);
+ u16 tx_config; /* transmit configuration for this packet */
+ SREGS_PAD(pad2);
+ u16 tx_pktsize; /* size of the packet to be transmitted */
+ SREGS_PAD(pad3);
+ u16 tx_frag_count; /* no. of fragments */
+
+ SREGS_PAD(pad4);
+ u16 tx_frag_ptr_l;
+ SREGS_PAD(pad5);
+ u16 tx_frag_ptr_h;
+ SREGS_PAD(pad6);
+ u16 tx_frag_size;
+
+ SREGS_PAD(pad7);
+ u16 link; /* ptr to next descriptor */
} sonic_td_t;
*/
typedef struct {
- SREGS_PAD(pad0);
- u16 cam_entry_pointer;
- SREGS_PAD(pad1);
- u16 cam_cap0;
- SREGS_PAD(pad2);
- u16 cam_cap1;
- SREGS_PAD(pad3);
- u16 cam_cap2;
+ SREGS_PAD(pad0);
+ u16 cam_entry_pointer;
+ SREGS_PAD(pad1);
+ u16 cam_cap0;
+ SREGS_PAD(pad2);
+ u16 cam_cap1;
+ SREGS_PAD(pad3);
+ u16 cam_cap2;
} sonic_cd_t;
#define CAM_DESCRIPTORS 16
typedef struct {
- sonic_cd_t cam_desc[CAM_DESCRIPTORS];
- SREGS_PAD(pad);
- u16 cam_enable;
+ sonic_cd_t cam_desc[CAM_DESCRIPTORS];
+ SREGS_PAD(pad);
+ u16 cam_enable;
} sonic_cda_t;
-#else /* original declarations, little endian 32 bit */
+#else /* original declarations, little endian 32 bit */
/*
* structure definitions
*/
typedef struct {
- u32 rx_bufadr_l; /* receive buffer ptr */
- u32 rx_bufadr_h;
+ u32 rx_bufadr_l; /* receive buffer ptr */
+ u32 rx_bufadr_h;
- u32 rx_bufsize_l; /* no. of words in the receive buffer */
- u32 rx_bufsize_h;
+ u32 rx_bufsize_l; /* no. of words in the receive buffer */
+ u32 rx_bufsize_h;
} sonic_rr_t;
/*
*/
typedef struct {
- u16 rx_status; /* status after reception of a packet */
- SREGS_PAD(pad0);
- u16 rx_pktlen; /* length of the packet incl. CRC */
- SREGS_PAD(pad1);
-
- /*
- * Pointers to the location in the receive buffer area (RBA)
- * where the packet resides. A packet is always received into
- * a contiguous piece of memory.
- */
- u16 rx_pktptr_l;
- SREGS_PAD(pad2);
- u16 rx_pktptr_h;
- SREGS_PAD(pad3);
-
- u16 rx_seqno; /* sequence no. */
- SREGS_PAD(pad4);
-
- u16 link; /* link to next RDD (end if EOL bit set) */
- SREGS_PAD(pad5);
-
- /*
- * Owner of this descriptor, 0= driver, 1=sonic
- */
-
- u16 in_use;
- SREGS_PAD(pad6);
-
- caddr_t rda_next; /* pointer to next RD */
+ u16 rx_status; /* status after reception of a packet */
+ SREGS_PAD(pad0);
+ u16 rx_pktlen; /* length of the packet incl. CRC */
+ SREGS_PAD(pad1);
+
+ /*
+ * Pointers to the location in the receive buffer area (RBA)
+ * where the packet resides. A packet is always received into
+ * a contiguous piece of memory.
+ */
+ u16 rx_pktptr_l;
+ SREGS_PAD(pad2);
+ u16 rx_pktptr_h;
+ SREGS_PAD(pad3);
+
+ u16 rx_seqno; /* sequence no. */
+ SREGS_PAD(pad4);
+
+ u16 link; /* link to next RDD (end if EOL bit set) */
+ SREGS_PAD(pad5);
+
+ /*
+ * Owner of this descriptor, 0= driver, 1=sonic
+ */
+
+ u16 in_use;
+ SREGS_PAD(pad6);
+
+ caddr_t rda_next; /* pointer to next RD */
} sonic_rd_t;
* Describes a Transmit Descriptor
*/
typedef struct {
- u16 tx_status; /* status after transmission of a packet */
- SREGS_PAD(pad0);
- u16 tx_config; /* transmit configuration for this packet */
- SREGS_PAD(pad1);
- u16 tx_pktsize; /* size of the packet to be transmitted */
- SREGS_PAD(pad2);
- u16 tx_frag_count; /* no. of fragments */
- SREGS_PAD(pad3);
-
- u16 tx_frag_ptr_l;
- SREGS_PAD(pad4);
- u16 tx_frag_ptr_h;
- SREGS_PAD(pad5);
- u16 tx_frag_size;
- SREGS_PAD(pad6);
-
- u16 link; /* ptr to next descriptor */
- SREGS_PAD(pad7);
+ u16 tx_status; /* status after transmission of a packet */
+ SREGS_PAD(pad0);
+ u16 tx_config; /* transmit configuration for this packet */
+ SREGS_PAD(pad1);
+ u16 tx_pktsize; /* size of the packet to be transmitted */
+ SREGS_PAD(pad2);
+ u16 tx_frag_count; /* no. of fragments */
+ SREGS_PAD(pad3);
+
+ u16 tx_frag_ptr_l;
+ SREGS_PAD(pad4);
+ u16 tx_frag_ptr_h;
+ SREGS_PAD(pad5);
+ u16 tx_frag_size;
+ SREGS_PAD(pad6);
+
+ u16 link; /* ptr to next descriptor */
+ SREGS_PAD(pad7);
} sonic_td_t;
*/
typedef struct {
- u16 cam_entry_pointer;
- SREGS_PAD(pad0);
- u16 cam_cap0;
- SREGS_PAD(pad1);
- u16 cam_cap1;
- SREGS_PAD(pad2);
- u16 cam_cap2;
- SREGS_PAD(pad3);
+ u16 cam_entry_pointer;
+ SREGS_PAD(pad0);
+ u16 cam_cap0;
+ SREGS_PAD(pad1);
+ u16 cam_cap1;
+ SREGS_PAD(pad2);
+ u16 cam_cap2;
+ SREGS_PAD(pad3);
} sonic_cd_t;
#define CAM_DESCRIPTORS 16
typedef struct {
- sonic_cd_t cam_desc[CAM_DESCRIPTORS];
- u16 cam_enable;
- SREGS_PAD(pad);
+ sonic_cd_t cam_desc[CAM_DESCRIPTORS];
+ u16 cam_enable;
+ SREGS_PAD(pad);
} sonic_cda_t;
-#endif /* endianness */
+#endif /* endianness */
/*
* Some tunables for the buffer areas. Power of 2 is required
* MSch: use more buffer space for the slow m68k Macs!
*/
#ifdef CONFIG_MACSONIC
-#define SONIC_NUM_RRS 32 /* number of receive resources */
-#define SONIC_NUM_RDS SONIC_NUM_RRS /* number of receive descriptors */
-#define SONIC_NUM_TDS 32 /* number of transmit descriptors */
+#define SONIC_NUM_RRS 32 /* number of receive resources */
+#define SONIC_NUM_RDS SONIC_NUM_RRS /* number of receive descriptors */
+#define SONIC_NUM_TDS 32 /* number of transmit descriptors */
#else
-#define SONIC_NUM_RRS 16 /* number of receive resources */
-#define SONIC_NUM_RDS SONIC_NUM_RRS /* number of receive descriptors */
-#define SONIC_NUM_TDS 16 /* number of transmit descriptors */
+#define SONIC_NUM_RRS 16 /* number of receive resources */
+#define SONIC_NUM_RDS SONIC_NUM_RRS /* number of receive descriptors */
+#define SONIC_NUM_TDS 16 /* number of transmit descriptors */
#endif
-#define SONIC_RBSIZE 1520 /* size of one resource buffer */
+#define SONIC_RBSIZE 1520 /* size of one resource buffer */
#define SONIC_RDS_MASK (SONIC_NUM_RDS-1)
#define SONIC_TDS_MASK (SONIC_NUM_TDS-1)
/* Information that need to be kept for each board. */
struct sonic_local {
- sonic_cda_t cda; /* virtual CPU address of CDA */
- sonic_td_t tda[SONIC_NUM_TDS]; /* transmit descriptor area */
- sonic_rr_t rra[SONIC_NUM_RRS]; /* receive resource area */
- sonic_rd_t rda[SONIC_NUM_RDS]; /* receive descriptor area */
- struct sk_buff* tx_skb[SONIC_NUM_TDS]; /* skbuffs for packets to transmit */
- unsigned int tx_laddr[SONIC_NUM_TDS]; /* logical DMA address fro skbuffs */
- unsigned char *rba; /* start of receive buffer areas */
- unsigned int cda_laddr; /* logical DMA address of CDA */
- unsigned int tda_laddr; /* logical DMA address of TDA */
- unsigned int rra_laddr; /* logical DMA address of RRA */
- unsigned int rda_laddr; /* logical DMA address of RDA */
- unsigned int rba_laddr; /* logical DMA address of RBA */
- unsigned int cur_rra; /* current indexes to resource areas */
- unsigned int cur_rx;
- unsigned int cur_tx;
- unsigned int dirty_tx; /* last unacked transmit packet */
- char tx_full;
- struct net_device_stats stats;
+ sonic_cda_t cda; /* virtual CPU address of CDA */
+ sonic_td_t tda[SONIC_NUM_TDS]; /* transmit descriptor area */
+ sonic_rr_t rra[SONIC_NUM_RRS]; /* receive resource area */
+ sonic_rd_t rda[SONIC_NUM_RDS]; /* receive descriptor area */
+ struct sk_buff *tx_skb[SONIC_NUM_TDS]; /* skbuffs for packets to transmit */
+ unsigned int tx_laddr[SONIC_NUM_TDS]; /* logical DMA address fro skbuffs */
+ unsigned char *rba; /* start of receive buffer areas */
+ unsigned int cda_laddr; /* logical DMA address of CDA */
+ unsigned int tda_laddr; /* logical DMA address of TDA */
+ unsigned int rra_laddr; /* logical DMA address of RRA */
+ unsigned int rda_laddr; /* logical DMA address of RDA */
+ unsigned int rba_laddr; /* logical DMA address of RBA */
+ unsigned int cur_rra; /* current indexes to resource areas */
+ unsigned int cur_rx;
+ unsigned int cur_tx;
+ unsigned int dirty_tx; /* last unacked transmit packet */
+ char tx_full;
+ struct net_device_stats stats;
};
+#define TX_TIMEOUT 6
+
/* Index to functions, as function prototypes. */
static int sonic_open(struct net_device *dev);
static struct net_device_stats *sonic_get_stats(struct net_device *dev);
static void sonic_multicast_list(struct net_device *dev);
static int sonic_init(struct net_device *dev);
+static void sonic_tx_timeout(struct net_device *dev);
static const char *version =
- "sonic.c:v0.92 20.9.98 tsbogend@alpha.franken.de\n";
+ "sonic.c:v0.92 20.9.98 tsbogend@alpha.franken.de\n";
#endif /* SONIC_H */
+2001-10-10 Tim Waugh <twaugh@redhat.com>
+
+ * parport_pc.c: Support for OX16PCI954 PCI card.
+
+2001-10-10 Tim Waugh <twaugh@redhat.com>
+
+ * parport_pc.c: Support for OX12PCI840 PCI card (reported by
+ mk@daveg.com). Lock-ups diagnosed by Ronnie Arosa (and now we
+ just don't trust its ECR).
+
+2001-10-10 Gunther Mayer <gunther.mayer@braunschweig.okersurf.de>
+
+ * parport_pc.c: Support for AVLAB cards.
+
+2001-10-10 Tim Waugh <twaugh@redhat.com>
+
+ * ieee1284_ops.c (ecp_forward_to_reverse, ecp_reverse_to_forward):
+ Remember to retry direction switch if it fails. Patch from David
+ Lambert.
+
2001-10-08 David C. Hansen <haveblue@us.ibm.com>
* share.c: Make driverlist_lock and parportlist_lock static.
DPRINTK (KERN_DEBUG "%s: ECP direction: reverse\n",
port->name);
port->ieee1284.phase = IEEE1284_PH_REV_IDLE;
+ } else {
+ DPRINTK (KERN_DEBUG "%s: ECP direction: failed to reverse\n",
+ port->name);
+ port->ieee1284.phase = IEEE1284_PH_DIR_UNKNOWN;
}
return retval;
DPRINTK (KERN_DEBUG "%s: ECP direction: forward\n",
port->name);
port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
+ } else {
+ DPRINTK (KERN_DEBUG
+ "%s: ECP direction: failed to switch forward\n",
+ port->name);
+ port->ieee1284.phase = IEEE1284_PH_DIR_UNKNOWN;
}
+
return retval;
}
lava_parallel_dual_b,
boca_ioppar,
plx_9050,
- afavlab_tk9902,
timedia_4078a,
timedia_4079h,
timedia_4085h,
syba_1p_ecp,
titan_010l,
titan_1284p2,
+ avlab_1p,
+ avlab_2p,
+ oxsemi_954,
+ oxsemi_840,
};
/* lava_parallel_dual_b */ { 1, { { 0, -1 }, } },
/* boca_ioppar */ { 1, { { 0, -1 }, } },
/* plx_9050 */ { 2, { { 4, -1 }, { 5, -1 }, } },
- /* afavlab_tk9902 */ { 1, { { 0, 1 }, } },
/* timedia_4078a */ { 1, { { 2, -1 }, } },
/* timedia_4079h */ { 1, { { 2, 3 }, } },
/* timedia_4085h */ { 2, { { 2, -1 }, { 4, -1 }, } },
/* syba_1p_ecp W83787 */ { 1, { { 0, 0x078 }, } },
/* titan_010l */ { 1, { { 3, -1 }, } },
/* titan_1284p2 */ { 2, { { 0, 1 }, { 2, 3 }, } },
+ /* avlab_1p */ { 1, { { 0, 1}, } },
+ /* avlab_2p */ { 2, { { 0, 1}, { 2, 3 },} },
+ /* The Oxford Semi cards are unusual: 954 doesn't support ECP,
+ * and 840 locks up if you write 1 to bit 2! */
+ /* oxsemi_954 */ { 1, { { 0, -1 }, } },
+ /* oxsemi_840 */ { 1, { { 0, -1 }, } },
};
static struct pci_device_id parport_pc_pci_tbl[] __devinitdata = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, boca_ioppar },
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
PCI_SUBVENDOR_ID_EXSYS, PCI_SUBDEVICE_ID_EXSYS_4014, 0,0, plx_9050 },
- { PCI_VENDOR_ID_AFAVLAB, PCI_DEVICE_ID_AFAVLAB_TK9902,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, afavlab_tk9902 },
/* PCI_VENDOR_ID_TIMEDIA/SUNIX has many differing cards ...*/
{ 0x1409, 0x7168, 0x1409, 0x4078, 0, 0, timedia_4078a },
{ 0x1409, 0x7168, 0x1409, 0x4079, 0, 0, timedia_4079h },
{ PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_010L,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, titan_010l },
{ 0x9710, 0x9815, 0x1000, 0x0020, 0, 0, titan_1284p2 },
+ /* PCI_VENDOR_ID_AVLAB/Intek21 has another bunch of cards ...*/
+ { 0x14db, 0x2120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_1p}, /* AFAVLAB_TK9902 */
+ { 0x14db, 0x2121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_2p},
+ { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI954PP,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_954 },
+ { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_12PCI840,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_840 },
{ 0, } /* terminate list */
};
MODULE_DEVICE_TABLE(pci,parport_pc_pci_tbl);
-/* $Id: audio.c,v 1.61 2001/08/13 14:40:12 davem Exp $
+/* $Id: audio.c,v 1.62 2001/10/08 22:19:50 davem Exp $
* drivers/sbus/audio/audio.c
*
* Copyright 1996 Thomas K. Dyas (tdyas@noc.rutgers.edu)
devfs_unregister (devfs_handle);
}
-module_init(sparcaudio_init)
-module_exit(sparcaudio_exit)
+module_init(sparcaudio_init);
+module_exit(sparcaudio_exit);
+MODULE_LICENSE("GPL");
/*
* Code from Linux Streams, Copyright 1995 by
-/* $Id: cs4231.c,v 1.46 2001/05/21 01:25:22 davem Exp $
+/* $Id: cs4231.c,v 1.47 2001/10/08 22:19:50 davem Exp $
* drivers/sbus/audio/cs4231.c
*
* Copyright 1996, 1997, 1998, 1999 Derrick J Brashear (shadow@andrew.cmu.edu)
module_init(cs4231_init);
module_exit(cs4231_exit);
+MODULE_LICENSE("GPL");
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
-/* $Id: dbri.c,v 1.26 2001/05/21 01:25:22 davem Exp $
+/* $Id: dbri.c,v 1.27 2001/10/08 22:19:50 davem Exp $
* drivers/sbus/audio/dbri.c
*
* Copyright (C) 1997 Rudolf Koenig (rfkoenig@immd4.informatik.uni-erlangen.de)
module_init(dbri_init);
module_exit(dbri_exit);
+MODULE_LICENSE("GPL");
+
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
-/* $Id: dmy.c,v 1.9 2001/05/22 23:16:10 davem Exp $
+/* $Id: dmy.c,v 1.10 2001/10/08 22:19:50 davem Exp $
* drivers/sbus/audio/dummy.c
*
* Copyright 1998 Derrick J Brashear (shadow@andrew.cmu.edu)
module_init(dummy_init);
module_exit(dummy_exit);
+MODULE_LICENSE("GPL");
+
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
-/* $Id: aurora.c,v 1.15 2001/08/13 14:40:08 davem Exp $
+/* $Id: aurora.c,v 1.16 2001/10/08 22:19:51 davem Exp $
* linux/drivers/sbus/char/aurora.c -- Aurora multiport driver
*
* Copyright (c) 1999 by Oliver Aldulea (oli at bv dot ro)
module_init(aurora_init);
module_exit(aurora_cleanup);
+MODULE_LICENSE("GPL");
module_init(bpp_init);
module_exit(bpp_cleanup);
+MODULE_LICENSE("GPL");
("Eric Brower <ebrower@usa.net>");
MODULE_DESCRIPTION
("Hardware watchdog driver for Sun Microsystems CP1400/1500");
+MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE
("watchdog");
#endif /* ifdef MODULE */
-/* $Id: display7seg.c,v 1.4 2000/11/08 05:08:23 davem Exp $
+/* $Id: display7seg.c,v 1.5 2001/10/08 22:19:51 davem Exp $
*
* display7seg - Driver implementation for the 7-segment display
* present on Sun Microsystems CP1400 and CP1500
("Eric Brower <ebrower@usa.net>");
MODULE_DESCRIPTION
("7-Segment Display driver for Sun Microsystems CP1400/1500");
+MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE
("d7s");
#endif /* ifdef MODULE */
-/* $Id: envctrl.c,v 1.23 2001/08/09 23:42:09 davem Exp $
+/* $Id: envctrl.c,v 1.24 2001/10/08 22:19:51 davem Exp $
* envctrl.c: Temperature and Fan monitoring on Machines providing it.
*
* Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
module_init(envctrl_init);
module_exit(envctrl_cleanup);
+MODULE_LICENSE("GPL");
-/* $Id: flash.c,v 1.23 2001/03/02 06:32:40 davem Exp $
+/* $Id: flash.c,v 1.24 2001/10/08 22:19:51 davem Exp $
* flash.c: Allow mmap access to the OBP Flash, for OBP updates.
*
* Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
module_init(flash_init);
module_exit(flash_cleanup);
+MODULE_LICENSE("GPL");
}
#ifdef MODULE
+MODULE_LICENSE("GPL");
int init_module(void) {
int rc;
module_init(openprom_init);
module_exit(openprom_cleanup);
+MODULE_LICENSE("GPL");
-/* $Id: riowatchdog.c,v 1.2 2001/03/26 23:47:18 davem Exp $
+/* $Id: riowatchdog.c,v 1.3 2001/10/08 22:19:51 davem Exp $
* riowatchdog.c - driver for hw watchdog inside Super I/O of RIO
*
* Copyright (C) 2001 David S. Miller (davem@redhat.com)
MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
MODULE_DESCRIPTION("Hardware watchdog driver for Sun RIO");
MODULE_SUPPORTED_DEVICE("watchdog");
+MODULE_LICENSE("GPL");
#define RIOWD_NAME "pmc"
#define RIOWD_MINOR 215
-/* $Id: rtc.c,v 1.27 2001/08/13 14:40:08 davem Exp $
+/* $Id: rtc.c,v 1.28 2001/10/08 22:19:51 davem Exp $
*
* Linux/SPARC Real Time Clock Driver
* Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu)
module_init(rtc_sun_init);
module_exit(rtc_sun_cleanup);
+MODULE_LICENSE("GPL");
-/* $Id: sab82532.c,v 1.63 2001/06/29 21:23:44 davem Exp $
+/* $Id: sab82532.c,v 1.64 2001/10/08 22:19:51 davem Exp $
* sab82532.c: ASYNC Driver for the SIEMENS SAB82532 DUSCC.
*
* Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
static inline void __init show_serial_version(void)
{
- char *revision = "$Revision: 1.63 $";
+ char *revision = "$Revision: 1.64 $";
char *version, *p;
version = strchr(revision, ' ');
}
#ifdef MODULE
+MODULE_LICENSE("GPL");
+
int init_module(void)
{
if (get_sab82532(0))
-/* $Id: uctrl.c,v 1.11 2001/08/13 14:40:08 davem Exp $
+/* $Id: uctrl.c,v 1.12 2001/10/08 22:19:51 davem Exp $
* uctrl.c: TS102 Microcontroller interface on Tadpole Sparcbook 3
*
* Copyright 1999 Derrick J Brashear (shadow@dementia.org)
module_init(ts102_uctrl_init);
module_exit(ts102_uctrl_cleanup);
+MODULE_LICENSE("GPL");
* #define AIC7XXX_VERBOSE_DEBUGGING
*/
-#if defined(MODULE) || defined(PCMCIA)
#include <linux/module.h>
-#endif
#if defined(PCMCIA)
# undef MODULE
* framework in, but haven't analyzed the "tty_flip" interface yet.
* -- Add support for flush commands
* -- Add everything that is missing :)
- *
- * (30-May-2001 gkh
+ *
+ * 30-May-2001 gkh
* switched from using spinlock to a semaphore, which fixes lots of problems.
*
* 08-Apr-2001 gb
static void belkin_sa_close (struct usb_serial_port *port, struct file *filp)
{
+ struct usb_serial *serial;
+
+ if (port_paranoia_check (port, __FUNCTION__))
+ return;
+
+ serial = get_usb_serial (port, __FUNCTION__);
+ if (!serial)
+ return;
+
dbg(__FUNCTION__" port %d", port->number);
down (&port->sem);
--port->open_count;
if (port->open_count <= 0) {
- /* shutdown our bulk reads and writes */
- usb_unlink_urb (port->write_urb);
- usb_unlink_urb (port->read_urb);
- usb_unlink_urb (port->interrupt_in_urb); /* wgg - do I need this? I think so. */
+ if (serial->dev) {
+ /* shutdown our bulk reads and writes */
+ usb_unlink_urb (port->write_urb);
+ usb_unlink_urb (port->read_urb);
+ usb_unlink_urb (port->interrupt_in_urb);
+ }
port->active = 0;
}
usb_serial_register (&belkin_old_device);
usb_serial_register (&peracom_device);
usb_serial_register (&gocom232_device);
- info(DRIVER_VERSION ":" DRIVER_DESC);
+ info(DRIVER_DESC " " DRIVER_VERSION);
return 0;
}
--port->open_count;
if (port->open_count <= 0) {
- /* shutdown any bulk reads that might be going on */
- usb_unlink_urb (port->write_urb);
- usb_unlink_urb (port->read_urb);
- usb_unlink_urb (port->interrupt_in_urb);
+ if (port->serial->dev) {
+ /* shutdown any bulk reads that might be going on */
+ usb_unlink_urb (port->write_urb);
+ usb_unlink_urb (port->read_urb);
+ usb_unlink_urb (port->interrupt_in_urb);
+ }
port->active = 0;
port->open_count = 0;
if( tty->ldisc.flush_buffer )
tty->ldisc.flush_buffer( tty );
- /* wait for transmit idle */
- if( (filp->f_flags&(O_NDELAY|O_NONBLOCK)) == 0 ) {
- digi_transmit_idle( port, DIGI_CLOSE_TIMEOUT );
- }
-
- /* drop DTR and RTS */
- digi_set_modem_signals( port, 0, 0 );
-
- /* disable input flow control */
- buf[0] = DIGI_CMD_SET_INPUT_FLOW_CONTROL;
- buf[1] = priv->dp_port_num;
- buf[2] = DIGI_DISABLE;
- buf[3] = 0;
-
- /* disable output flow control */
- buf[4] = DIGI_CMD_SET_OUTPUT_FLOW_CONTROL;
- buf[5] = priv->dp_port_num;
- buf[6] = DIGI_DISABLE;
- buf[7] = 0;
-
- /* disable reading modem signals automatically */
- buf[8] = DIGI_CMD_READ_INPUT_SIGNALS;
- buf[9] = priv->dp_port_num;
- buf[10] = DIGI_DISABLE;
- buf[11] = 0;
-
- /* disable receive */
- buf[12] = DIGI_CMD_RECEIVE_ENABLE;
- buf[13] = priv->dp_port_num;
- buf[14] = DIGI_DISABLE;
- buf[15] = 0;
-
- /* flush fifos */
- buf[16] = DIGI_CMD_IFLUSH_FIFO;
- buf[17] = priv->dp_port_num;
- buf[18] = DIGI_FLUSH_TX | DIGI_FLUSH_RX;
- buf[19] = 0;
-
- if( (ret=digi_write_oob_command( port, buf, 20, 0 )) != 0 )
- dbg( "digi_close: write oob failed, ret=%d", ret );
-
- /* wait for final commands on oob port to complete */
- interruptible_sleep_on_timeout( &priv->dp_flush_wait,
- DIGI_CLOSE_TIMEOUT );
+ if (port->serial->dev) {
+ /* wait for transmit idle */
+ if( (filp->f_flags&(O_NDELAY|O_NONBLOCK)) == 0 ) {
+ digi_transmit_idle( port, DIGI_CLOSE_TIMEOUT );
+ }
- /* shutdown any outstanding bulk writes */
- usb_unlink_urb (port->write_urb);
+ /* drop DTR and RTS */
+ digi_set_modem_signals( port, 0, 0 );
+
+ /* disable input flow control */
+ buf[0] = DIGI_CMD_SET_INPUT_FLOW_CONTROL;
+ buf[1] = priv->dp_port_num;
+ buf[2] = DIGI_DISABLE;
+ buf[3] = 0;
+
+ /* disable output flow control */
+ buf[4] = DIGI_CMD_SET_OUTPUT_FLOW_CONTROL;
+ buf[5] = priv->dp_port_num;
+ buf[6] = DIGI_DISABLE;
+ buf[7] = 0;
+
+ /* disable reading modem signals automatically */
+ buf[8] = DIGI_CMD_READ_INPUT_SIGNALS;
+ buf[9] = priv->dp_port_num;
+ buf[10] = DIGI_DISABLE;
+ buf[11] = 0;
+
+ /* disable receive */
+ buf[12] = DIGI_CMD_RECEIVE_ENABLE;
+ buf[13] = priv->dp_port_num;
+ buf[14] = DIGI_DISABLE;
+ buf[15] = 0;
+
+ /* flush fifos */
+ buf[16] = DIGI_CMD_IFLUSH_FIFO;
+ buf[17] = priv->dp_port_num;
+ buf[18] = DIGI_FLUSH_TX | DIGI_FLUSH_RX;
+ buf[19] = 0;
+
+ if( (ret=digi_write_oob_command( port, buf, 20, 0 )) != 0 )
+ dbg( "digi_close: write oob failed, ret=%d", ret );
+
+ /* wait for final commands on oob port to complete */
+ interruptible_sleep_on_timeout( &priv->dp_flush_wait,
+ DIGI_CLOSE_TIMEOUT );
+
+ /* shutdown any outstanding bulk writes */
+ usb_unlink_urb (port->write_urb);
+ }
tty->closing = 0;
--port->open_count;
if (port->open_count <= 0) {
- /* shutdown our bulk read */
- usb_unlink_urb (port->read_urb);
+ if (serial->dev) {
+ /* shutdown our bulk read */
+ usb_unlink_urb (port->read_urb);
+ }
port->active = 0;
port->open_count = 0;
}
--port->open_count;
if (port->open_count <= 0) {
- if (c_cflag & HUPCL){
- /* Disable flow control */
- if (usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
- FTDI_SIO_SET_FLOW_CTRL_REQUEST,
- FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
- 0, 0,
- buf, 0, WDR_TIMEOUT) < 0) {
- err("error from flowcontrol urb");
- }
-
- /* drop DTR */
- if (set_dtr(serial->dev, usb_sndctrlpipe(serial->dev, 0), LOW) < 0){
- err("Error from DTR LOW urb");
- }
- /* drop RTS */
- if (set_rts(serial->dev, usb_sndctrlpipe(serial->dev, 0),LOW) < 0) {
- err("Error from RTS LOW urb");
- }
- } /* Note change no line is hupcl is off */
-
- /* shutdown our bulk reads and writes */
- usb_unlink_urb (port->write_urb);
- usb_unlink_urb (port->read_urb);
+ if (serial->dev) {
+ if (c_cflag & HUPCL){
+ /* Disable flow control */
+ if (usb_control_msg(serial->dev,
+ usb_sndctrlpipe(serial->dev, 0),
+ FTDI_SIO_SET_FLOW_CTRL_REQUEST,
+ FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
+ 0, 0, buf, 0, WDR_TIMEOUT) < 0) {
+ err("error from flowcontrol urb");
+ }
+
+ /* drop DTR */
+ if (set_dtr(serial->dev, usb_sndctrlpipe(serial->dev, 0), LOW) < 0){
+ err("Error from DTR LOW urb");
+ }
+ /* drop RTS */
+ if (set_rts(serial->dev, usb_sndctrlpipe(serial->dev, 0),LOW) < 0) {
+ err("Error from RTS LOW urb");
+ }
+ } /* Note change no line is hupcl is off */
+
+ /* shutdown our bulk reads and writes */
+ usb_unlink_urb (port->write_urb);
+ usb_unlink_urb (port->read_urb);
+ }
port->active = 0;
port->open_count = 0;
} else {
--port->open_count;
if (port->open_count <= 0) {
- // block until tx is empty
- block_until_tx_empty(edge_port);
-
- edge_port->closePending = TRUE;
-
- /* flush and chase */
- edge_port->chaseResponsePending = TRUE;
-
- dbg(__FUNCTION__" - Sending IOSP_CMD_CHASE_PORT");
- status = send_iosp_ext_cmd (edge_port, IOSP_CMD_CHASE_PORT, 0);
- if (status == 0) {
- // block until chase finished
- block_until_chase_response(edge_port);
- } else {
- edge_port->chaseResponsePending = FALSE;
+ if (serial->dev) {
+ // block until tx is empty
+ block_until_tx_empty(edge_port);
+
+ edge_port->closePending = TRUE;
+
+ /* flush and chase */
+ edge_port->chaseResponsePending = TRUE;
+
+ dbg(__FUNCTION__" - Sending IOSP_CMD_CHASE_PORT");
+ status = send_iosp_ext_cmd (edge_port, IOSP_CMD_CHASE_PORT, 0);
+ if (status == 0) {
+ // block until chase finished
+ block_until_chase_response(edge_port);
+ } else {
+ edge_port->chaseResponsePending = FALSE;
+ }
+
+ /* close the port */
+ dbg(__FUNCTION__" - Sending IOSP_CMD_CLOSE_PORT");
+ send_iosp_ext_cmd (edge_port, IOSP_CMD_CLOSE_PORT, 0);
+
+ //port->close = TRUE;
+ edge_port->closePending = FALSE;
+ edge_port->open = FALSE;
+ edge_port->openPending = FALSE;
+
+ if (edge_port->write_urb) {
+ usb_unlink_urb (edge_port->write_urb);
+ }
}
- /* close the port */
- dbg(__FUNCTION__" - Sending IOSP_CMD_CLOSE_PORT");
- send_iosp_ext_cmd (edge_port, IOSP_CMD_CLOSE_PORT, 0);
-
- //port->close = TRUE;
- edge_port->closePending = FALSE;
- edge_port->open = FALSE;
- edge_port->openPending = FALSE;
-
if (edge_port->write_urb) {
/* if this urb had a transfer buffer already (old transfer) free it */
if (edge_port->write_urb->transfer_buffer != NULL) {
kfree(edge_port->write_urb->transfer_buffer);
}
-
- usb_unlink_urb (edge_port->write_urb);
usb_free_urb (edge_port->write_urb);
}
-
if (edge_port->txfifo.fifo) {
kfree(edge_port->txfifo.fifo);
}
* please use the usb-irda driver, as it contains the proper error checking and
* other goodness of a full IrDA stack.
*
+ * Portions of this driver were taken from drivers/net/irda/irda-usb.c, which
+ * was written by Roman Weissgaerber <weissg@vienna.at>, Dag Brattli
+ * <dag@brattli.net>, and Jean Tourrilhes <jt@hpl.hp.com>
+
* See Documentation/usb/usb-serial.txt for more information on using this driver
*
* 2001_Oct_07 greg kh
* offer to us, describing their IrDA characteristics. We will use that in
* irda_usb_init_qos()
*/
-static inline struct irda_class_desc *irda_usb_find_class_desc(struct usb_device *dev, unsigned int ifnum)
+static struct irda_class_desc *irda_usb_find_class_desc(struct usb_device *dev, unsigned int ifnum)
{
struct usb_interface_descriptor *interface;
struct irda_class_desc *desc;
--port->open_count;
if (port->open_count <= 0) {
- /* shutdown our bulk read */
- usb_unlink_urb (port->read_urb);
+ if (serial->dev) {
+ /* shutdown our bulk read */
+ usb_unlink_urb (port->read_urb);
+ }
port->active = 0;
port->open_count = 0;
static void keyspan_close(struct usb_serial_port *port, struct file *filp)
{
int i;
- struct usb_serial *serial = port->serial; /* FIXME should so sanity check */
+ struct usb_serial *serial;
struct keyspan_serial_private *s_priv;
struct keyspan_port_private *p_priv;
+ serial = get_usb_serial (port, __FUNCTION__);
+ if (!serial)
+ return;
+
dbg("keyspan_close called\n");
s_priv = (struct keyspan_serial_private *)(serial->private);
p_priv = (struct keyspan_port_private *)(port->private);
p_priv->rts_state = 0;
p_priv->dtr_state = 0;
- keyspan_send_setup(port, 1);
+ if (serial->dev)
+ keyspan_send_setup(port, 1);
/*while (p_priv->outcont_urb->status == -EINPROGRESS) {
dbg("close - urb in progress\n");
if (--port->open_count <= 0) {
if (port->active) {
- /* Stop reading/writing urbs */
- stop_urb(p_priv->inack_urb);
- stop_urb(p_priv->outcont_urb);
- for (i = 0; i < 2; i++) {
- stop_urb(p_priv->in_urbs[i]);
- stop_urb(p_priv->out_urbs[i]);
+ if (serial->dev) {
+ /* Stop reading/writing urbs */
+ stop_urb(p_priv->inack_urb);
+ stop_urb(p_priv->outcont_urb);
+ for (i = 0; i < 2; i++) {
+ stop_urb(p_priv->in_urbs[i]);
+ stop_urb(p_priv->out_urbs[i]);
+ }
}
- /* Now done in shutdown
- if (atomic_dec_return(&s_priv->active_count) <= 0) {
- stop_urb(s_priv->instat_urb);
- stop_urb(s_priv->glocont_urb);
- } */
}
port->active = 0;
port->open_count = 0;
--port->open_count;
if (port->open_count <= 0) {
- /* the normal serial device seems to always shut off DTR and RTS now */
- if (port->tty->termios->c_cflag & HUPCL)
- keyspan_pda_set_modem_info(serial, 0);
-
- /* shutdown our bulk reads and writes */
- usb_unlink_urb (port->write_urb);
- usb_unlink_urb (port->interrupt_in_urb);
+ if (serial->dev) {
+ /* the normal serial device seems to always shut off DTR and RTS now */
+ if (port->tty->termios->c_cflag & HUPCL)
+ keyspan_pda_set_modem_info(serial, 0);
+
+ /* shutdown our bulk reads and writes */
+ usb_unlink_urb (port->write_urb);
+ usb_unlink_urb (port->interrupt_in_urb);
+ }
port->active = 0;
port->open_count = 0;
}
--port->open_count;
if (port->open_count <= 0) {
- /* shutdown our bulk reads and writes */
- usb_unlink_urb (port->write_urb);
- usb_unlink_urb (port->read_urb);
- /* wgg - do I need this? I think so. */
- usb_unlink_urb (port->interrupt_in_urb);
+ if (port->serial->dev) {
+ /* shutdown our urbs */
+ usb_unlink_urb (port->write_urb);
+ usb_unlink_urb (port->read_urb);
+ usb_unlink_urb (port->interrupt_in_urb);
+ }
port->active = 0;
}
--port->open_count;
if (port->open_count <= 0) {
- od = (struct omninet_data *)port->private;
- wport = &serial->port[1];
-
- usb_unlink_urb (wport->write_urb);
- usb_unlink_urb (port->read_urb);
+ if (serial->dev) {
+ wport = &serial->port[1];
+ usb_unlink_urb (wport->write_urb);
+ usb_unlink_urb (port->read_urb);
+ }
port->active = 0;
port->open_count = 0;
+ od = (struct omninet_data *)port->private;
if (od)
kfree(od);
}
static void pl2303_close (struct usb_serial_port *port, struct file *filp)
{
+ struct usb_serial *serial;
struct pl2303_private *priv;
unsigned int c_cflag;
int result;
if (port_paranoia_check (port, __FUNCTION__))
return;
-
+ serial = get_usb_serial (port, __FUNCTION__);
+ if (!serial)
+ return;
+
dbg (__FUNCTION__ " - port %d", port->number);
down (&port->sem);
--port->open_count;
if (port->open_count <= 0) {
- c_cflag = port->tty->termios->c_cflag;
- if (c_cflag & HUPCL) {
- /* drop DTR and RTS */
- priv = port->private;
- priv->line_control = 0;
- set_control_lines (port->serial->dev, priv->line_control);
- }
+ if (serial->dev) {
+ c_cflag = port->tty->termios->c_cflag;
+ if (c_cflag & HUPCL) {
+ /* drop DTR and RTS */
+ priv = port->private;
+ priv->line_control = 0;
+ set_control_lines (port->serial->dev,
+ priv->line_control);
+ }
- /* shutdown our urbs */
- dbg (__FUNCTION__ " - shutting down urbs");
- result = usb_unlink_urb (port->write_urb);
- if (result)
- dbg (__FUNCTION__ " - usb_unlink_urb (write_urb) failed with reason: %d", result);
+ /* shutdown our urbs */
+ dbg (__FUNCTION__ " - shutting down urbs");
+ result = usb_unlink_urb (port->write_urb);
+ if (result)
+ dbg (__FUNCTION__ " - usb_unlink_urb "
+ "(write_urb) failed with reason: %d",
+ result);
- result = usb_unlink_urb (port->read_urb);
- if (result)
- dbg (__FUNCTION__ " - usb_unlink_urb (read_urb) failed with reason: %d", result);
+ result = usb_unlink_urb (port->read_urb);
+ if (result)
+ dbg (__FUNCTION__ " - usb_unlink_urb "
+ "(read_urb) failed with reason: %d",
+ result);
- result = usb_unlink_urb (port->interrupt_in_urb);
- if (result)
- dbg (__FUNCTION__ " - usb_unlink_urb (interrupt_in_urb) failed with reason: %d", result);
+ result = usb_unlink_urb (port->interrupt_in_urb);
+ if (result)
+ dbg (__FUNCTION__ " - usb_unlink_urb "
+ "(interrupt_in_urb) failed with reason: %d",
+ result);
+ }
port->active = 0;
port->open_count = 0;
*
* See Documentation/usb/usb-serial.txt for more information on using this driver
*
+ * (10/10/2001) gkh
+ * added vendor and product to serial structure. Needed to determine device
+ * owner when the device is disconnected.
+ *
* (05/30/2001) gkh
* added sem to port structure and removed port_lock
*
char num_interrupt_in; /* number of interrupt in endpoints we have */
char num_bulk_in; /* number of bulk in endpoints we have */
char num_bulk_out; /* number of bulk out endpoints we have */
+ __u16 vendor; /* vendor id of this device */
+ __u16 product; /* product id of this device */
struct usb_serial_port port[MAX_NUM_PORTS];
void * private; /* data private to the specific driver */
* based on a driver by Brad Keryan)
*
* See Documentation/usb/usb-serial.txt for more information on using this driver
- *
+ *
+ * (10/10/2001) gkh
+ * usb_serial_disconnect() now sets the serial->dev pointer is to NULL to
+ * help prevent child drivers from accessing the device since it is now
+ * gone.
+ *
* (09/13/2001) gkh
* Moved generic driver initialize after we have registered with the USB
* core. Thanks to Randy Dunlap for pointing this problem out.
shutdown: generic_shutdown,
};
-#define if_generic_do(x) \
- if ((serial->dev->descriptor.idVendor == vendor) && \
- (serial->dev->descriptor.idProduct == product)) \
+#define if_generic_do(x) \
+ if ((serial->vendor == vendor) && \
+ (serial->product == product)) \
x
#else
#define if_generic_do(x)
int ezusb_writememory (struct usb_serial *serial, int address, unsigned char *data, int length, __u8 bRequest)
{
int result;
- unsigned char *transfer_buffer = kmalloc (length, GFP_KERNEL);
+ unsigned char *transfer_buffer;
-// dbg("ezusb_writememory %x, %d", address, length);
+ /* dbg("ezusb_writememory %x, %d", address, length); */
+ if (!serial->dev) {
+ dbg(__FUNCTION__ " - no physical device present, failing.");
+ return -ENODEV;
+ }
+ transfer_buffer = kmalloc (length, GFP_KERNEL);
if (!transfer_buffer) {
err(__FUNCTION__ " - kmalloc(%d) failed.", length);
return -ENOMEM;
--port->open_count;
if (port->open_count <= 0) {
- /* shutdown any bulk reads that might be going on */
- if (serial->num_bulk_out)
- usb_unlink_urb (port->write_urb);
- if (serial->num_bulk_in)
- usb_unlink_urb (port->read_urb);
+ if (serial->dev) {
+ /* shutdown any bulk reads that might be going on */
+ if (serial->num_bulk_out)
+ usb_unlink_urb (port->write_urb);
+ if (serial->num_bulk_in)
+ usb_unlink_urb (port->read_urb);
+ }
port->active = 0;
port->open_count = 0;
serial->num_bulk_in = num_bulk_in;
serial->num_bulk_out = num_bulk_out;
serial->num_interrupt_in = num_interrupt_in;
+ serial->vendor = dev->descriptor.idVendor;
+ serial->product = dev->descriptor.idProduct;
/* if this device type has a startup function, call it */
if (type->startup) {
serial->port[i].tty->driver_data = NULL;
}
+ serial->dev = NULL;
serial_shutdown (serial);
for (i = 0; i < serial->num_ports; ++i)
--port->open_count;
if (port->open_count <= 0) {
- transfer_buffer = kmalloc (0x12, GFP_KERNEL);
- if (!transfer_buffer) {
- err(__FUNCTION__ " - kmalloc(%d) failed.", 0x12);
- } else {
- /* send a shutdown message to the device */
- usb_control_msg (serial->dev, usb_rcvctrlpipe(serial->dev, 0), VISOR_CLOSE_NOTIFICATION,
- 0xc2, 0x0000, 0x0000, transfer_buffer, 0x12, 300);
- kfree (transfer_buffer);
+ if (serial->dev) {
+ /* only send a shutdown message if the
+ * device is still here */
+ transfer_buffer = kmalloc (0x12, GFP_KERNEL);
+ if (!transfer_buffer) {
+ err(__FUNCTION__ " - kmalloc(%d) failed.", 0x12);
+ } else {
+ /* send a shutdown message to the device */
+ usb_control_msg (serial->dev,
+ usb_rcvctrlpipe(serial->dev, 0),
+ VISOR_CLOSE_NOTIFICATION, 0xc2,
+ 0x0000, 0x0000,
+ transfer_buffer, 0x12, 300);
+ kfree (transfer_buffer);
+ }
+ /* shutdown our bulk read */
+ usb_unlink_urb (port->read_urb);
}
-
- /* shutdown our bulk read */
- usb_unlink_urb (port->read_urb);
port->active = 0;
port->open_count = 0;
-
}
up (&port->sem);
}
uhci_unlink_generic(uhci, urb);
- uhci_destroy_urb_priv(urb);
-
- usb_dec_dev_use(urb->dev);
+ uhci_call_completion(urb);
return ret;
}
return ret;
}
-unsigned long usbvideo_uvirt_to_bus(unsigned long adr)
-{
- unsigned long kva, ret;
-
- kva = usbvideo_uvirt_to_kva(pgd_offset(current->mm, adr), adr);
- ret = virt_to_bus((void *)kva);
- MDEBUG(printk("uv2b(%lx-->%lx)", adr, ret));
- return ret;
-}
-
-unsigned long usbvideo_kvirt_to_bus(unsigned long adr)
-{
- unsigned long va, kva, ret;
-
- va = VMALLOC_VMADDR(adr);
- kva = usbvideo_uvirt_to_kva(pgd_offset_k(va), va);
- ret = virt_to_bus((void *)kva);
- MDEBUG(printk("kv2b(%lx-->%lx)", adr, ret));
- return ret;
-}
-
/*
* Here we want the physical address of the memory.
* This is used when initializing the contents of the
/* Memory allocation routines */
unsigned long usbvideo_uvirt_to_kva(pgd_t *pgd, unsigned long adr);
-unsigned long usbvideo_uvirt_to_bus(unsigned long adr);
-unsigned long usbvideo_kvirt_to_bus(unsigned long adr);
unsigned long usbvideo_kvirt_to_pa(unsigned long adr);
void *usbvideo_rvmalloc(unsigned long size);
void usbvideo_rvfree(void *mem, unsigned long size);
extern int e1355fb_setup(char*);
extern int pvr2fb_init(void);
extern int pvr2fb_setup(char*);
+extern int sstfb_init(void);
+extern int sstfb_setup(char*);
static struct {
const char *name;
#ifdef CONFIG_FB_PVR2
{ "pvr2", pvr2fb_init, pvr2fb_setup },
#endif
-
+#ifdef CONFIG_FB_VOODOO1
+ { "sst", sstfb_init, sstfb_setup },
+#endif
/*
* Generic drivers that don't use resource management (yet)
*/
20010927 Richard Gooch <rgooch@atnf.csiro.au>
Went back to global rwsem for symlinks (refcount scheme no good)
v0.117
+ 20011008 Richard Gooch <rgooch@atnf.csiro.au>
+ Fixed overrun in <devfs_link> by removing function (not needed).
+ v0.118
+ 20011009 Richard Gooch <rgooch@atnf.csiro.au>
+ Fixed buffer underrun in <try_modload>.
+ Moved down_read() from <search_for_entry_in_dir> to <find_entry>
+ v0.119
*/
#include <linux/types.h>
#include <linux/errno.h>
#include <asm/bitops.h>
#include <asm/atomic.h>
-#define DEVFS_VERSION "0.117 (20010927)"
+#define DEVFS_VERSION "0.119 (20011009)"
#define DEVFS_NAME "devfs"
if (curr == NULL) return NULL;
if (!S_ISLNK (curr->mode) || !traverse_symlink) return curr;
/* Need to follow the link: this is a stack chomper */
- down_read (&symlink_rwsem);
retval = curr->registered ?
search_for_entry (parent, curr->u.symlink.linkname,
curr->u.symlink.length, FALSE, FALSE, NULL,
TRUE) : NULL;
- up_read (&symlink_rwsem);
return retval;
} /* End Function search_for_entry_in_dir */
++name;
--namelen;
}
+ if (traverse_symlink) down_read (&symlink_rwsem);
entry = search_for_entry (dir, name, namelen, FALSE, FALSE, NULL,
traverse_symlink);
+ if (traverse_symlink) up_read (&symlink_rwsem);
if (entry != NULL) return entry;
}
/* Have to search by major and minor: slow */
devfs_handle_t de;
if ( (name != NULL) && (name[0] == '\0') ) name = NULL;
- de = find_entry (dir, name, 0, major, minor, type,
- traverse_symlinks);
+ de = find_entry (dir, name, 0, major, minor, type, traverse_symlinks);
if (de == NULL) return NULL;
if (!de->registered) return NULL;
return de;
if ( !( fs_info->devfsd_event_mask & (1 << DEVFSD_NOTIFY_LOOKUP) ) )
return -ENOENT;
if ( is_devfsd_or_child (fs_info) ) return -ENOENT;
- if (namelen >= STRING_LENGTH) return -ENAMETOOLONG;
+ if (namelen >= STRING_LENGTH - 1) return -ENAMETOOLONG;
memcpy (buf + pos, name, namelen);
buf[STRING_LENGTH - 1] = '\0';
if (parent->parent != NULL) pos = devfs_generate_path (parent, buf, pos);
return NULL;
} /* End Function devfs_lookup */
-static int devfs_link (struct dentry *old_dentry, struct inode *dir,
- struct dentry *dentry)
-{
- /*struct inode *inode = old_dentry->d_inode;*/
- char txt[STRING_LENGTH];
-
- memset (txt, 0, STRING_LENGTH);
- memcpy (txt, old_dentry->d_name.name, old_dentry->d_name.len);
- txt[STRING_LENGTH - 1] = '\0';
- printk ("%s: link of \"%s\"\n", DEVFS_NAME, txt);
- return -EPERM;
-} /* End Function devfs_link */
-
static int devfs_unlink (struct inode *dir, struct dentry *dentry)
{
struct devfs_entry *de;
static struct inode_operations devfs_dir_iops =
{
lookup: devfs_lookup,
- link: devfs_link,
unlink: devfs_unlink,
symlink: devfs_symlink,
mkdir: devfs_mkdir,
{
return ext2_update_inode (inode, 1);
}
-
-int ext2_notify_change(struct dentry *dentry, struct iattr *iattr)
-{
- struct inode *inode = dentry->d_inode;
- int retval;
- unsigned int flags;
-
- retval = -EPERM;
- if (iattr->ia_valid & ATTR_ATTR_FLAG &&
- ((!(iattr->ia_attr_flags & ATTR_FLAG_APPEND) !=
- !(inode->u.ext2_i.i_flags & EXT2_APPEND_FL)) ||
- (!(iattr->ia_attr_flags & ATTR_FLAG_IMMUTABLE) !=
- !(inode->u.ext2_i.i_flags & EXT2_IMMUTABLE_FL)))) {
- if (!capable(CAP_LINUX_IMMUTABLE))
- goto out;
- } else if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
- goto out;
-
- retval = inode_change_ok(inode, iattr);
- if (retval != 0 || (((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
- (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) &&
- DQUOT_TRANSFER(inode, iattr)))
- goto out;
-
- inode_setattr(inode, iattr);
-
- flags = iattr->ia_attr_flags;
- if (flags & ATTR_FLAG_SYNCRONOUS) {
- inode->i_flags |= S_SYNC;
- inode->u.ext2_i.i_flags |= EXT2_SYNC_FL;
- } else {
- inode->i_flags &= ~S_SYNC;
- inode->u.ext2_i.i_flags &= ~EXT2_SYNC_FL;
- }
- if (flags & ATTR_FLAG_NOATIME) {
- inode->i_flags |= S_NOATIME;
- inode->u.ext2_i.i_flags |= EXT2_NOATIME_FL;
- } else {
- inode->i_flags &= ~S_NOATIME;
- inode->u.ext2_i.i_flags &= ~EXT2_NOATIME_FL;
- }
- if (flags & ATTR_FLAG_APPEND) {
- inode->i_flags |= S_APPEND;
- inode->u.ext2_i.i_flags |= EXT2_APPEND_FL;
- } else {
- inode->i_flags &= ~S_APPEND;
- inode->u.ext2_i.i_flags &= ~EXT2_APPEND_FL;
- }
- if (flags & ATTR_FLAG_IMMUTABLE) {
- inode->i_flags |= S_IMMUTABLE;
- inode->u.ext2_i.i_flags |= EXT2_IMMUTABLE_FL;
- } else {
- inode->i_flags &= ~S_IMMUTABLE;
- inode->u.ext2_i.i_flags &= ~EXT2_IMMUTABLE_FL;
- }
- mark_inode_dirty(inode);
-out:
- return retval;
-}
-
}
/*
- * Yes, this really increments the link_count by 5, and
- * decrements it by 4. Together with checking against 40,
- * this limits recursive symlink follows to 8, while
+ * This limits recursive symlink follows to 8, while
* limiting consecutive symlinks to 40.
*
* Without that kind of total limit, nasty chains of consecutive
static inline int do_follow_link(struct dentry *dentry, struct nameidata *nd)
{
int err;
- if (current->link_count >= 40)
+ if (current->link_count >= 5)
+ goto loop;
+ if (current->total_link_count >= 40)
goto loop;
if (current->need_resched) {
current->state = TASK_RUNNING;
schedule();
}
- current->link_count += 5;
+ current->link_count++;
+ current->total_link_count++;
UPDATE_ATIME(dentry->d_inode);
err = dentry->d_inode->i_op->follow_link(dentry, nd);
- current->link_count -= 4;
+ current->link_count--;
return err;
loop:
path_release(nd);
int path_walk(const char * name, struct nameidata *nd)
{
- current->link_count = 0;
+ current->total_link_count = 0;
return link_path_walk(name, nd);
}
struct vfsmount *p;
LIST_HEAD(kill);
- if (list_empty(&mnt->mnt_list))
- return;
-
for (p = mnt; p; p = next_mnt(p, mnt)) {
list_del(&p->mnt_list);
list_add(&p->mnt_list, &kill);
}
retval = -EBUSY;
if (atomic_read(&mnt->mnt_count) == 2 || flags & MNT_DETACH) {
- umount_tree(mnt);
+ if (!list_empty(&mnt->mnt_list))
+ umount_tree(mnt);
retval = 0;
}
spin_unlock(&dcache_lock);
if (mnt) {
err = graft_tree(mnt, nd);
- if (err && recurse)
+ if (err)
umount_tree(mnt);
- mntput(mnt);
+ else
+ mntput(mnt);
}
up(&mount_sem);
/* permission checks */
-static int standard_permission(struct inode *inode, int mask)
-{
- int mode = inode->i_mode;
-
- if ((mask & S_IWOTH) && IS_RDONLY(inode) &&
- (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
- return -EROFS; /* Nobody gets write access to a read-only fs */
- else if ((mask & S_IWOTH) && IS_IMMUTABLE(inode))
- return -EACCES; /* Nobody gets write access to an immutable file */
- else if (current->fsuid == inode->i_uid)
- mode >>= 6;
- else if (in_group_p(inode->i_gid))
- mode >>= 3;
- if (((mode & mask & S_IRWXO) == mask) || capable(CAP_DAC_OVERRIDE))
- return 0;
- /* read and search access */
- if ((mask == S_IROTH) ||
- (S_ISDIR(mode) && !(mask & ~(S_IROTH | S_IXOTH))))
- if (capable(CAP_DAC_READ_SEARCH))
- return 0;
- return -EACCES;
-}
-
static int proc_check_root(struct inode *inode)
{
struct dentry *de, *base, *root;
static int proc_permission(struct inode *inode, int mask)
{
- if (standard_permission(inode, mask) != 0)
+ if (vfs_permission(inode, mask) != 0)
return -EACCES;
return proc_check_root(inode);
}
static struct super_block * read_super(kdev_t dev, struct block_device *bdev,
struct file_system_type *type, int flags,
- void *data, int silent)
+ void *data)
{
struct super_block * s;
s = alloc_super();
spin_unlock(&sb_lock);
down_write(&s->s_umount);
lock_super(s);
- if (!type->read_super(s, data, silent))
+ if (!type->read_super(s, data, flags & MS_VERBOSE ? 1 : 0))
goto out_fail;
unlock_super(s);
/* tell bdcache that we are going to keep this one */
error = -EINVAL;
lock_super(s);
- if (!fs_type->read_super(s, data, 0))
+ if (!fs_type->read_super(s, data, flags & MS_VERBOSE ? 1 : 0))
goto out_fail;
unlock_super(s);
get_filesystem(fs_type);
if (dev) {
struct super_block * sb;
error = -EINVAL;
- sb = read_super(dev, NULL, fs_type, flags, data, 0);
+ sb = read_super(dev, NULL, fs_type, flags, data);
if (sb) {
get_filesystem(fs_type);
return sb;
s->s_count += S_BIAS;
spin_unlock(&sb_lock);
lock_super(s);
- if (!fs_type->read_super(s, data, 0))
+ if (!fs_type->read_super(s, data, flags & MS_VERBOSE ? 1 : 0))
goto out_fail;
unlock_super(s);
get_filesystem(fs_type);
return do_kern_mount((char *)type->name, 0, (char *)type->name, NULL);
}
+static char * __initdata root_fs_names;
+static int __init fs_names_setup(char *str)
+{
+ root_fs_names = str;
+ return 0;
+}
+
+__setup("rootfstype=", fs_names_setup);
+
+static void __init get_fs_names(char *page)
+{
+ char *s = page;
+
+ if (root_fs_names) {
+ strcpy(page, root_fs_names);
+ while (*s++) {
+ if (s[-1] == ',')
+ s[-1] = '\0';
+ }
+ } else {
+ int len = get_filesystem_list(page);
+ char *p, *next;
+
+ page[len] = '\0';
+ for (p = page-1; p; p = next) {
+ next = strchr(++p, '\n');
+ if (*p++ != '\t')
+ continue;
+ while ((*s++ = *p++) != '\n')
+ ;
+ s[-1] = '\0';
+ }
+ }
+ *s = '\0';
+}
+
void __init mount_root(void)
{
struct nameidata root_nd;
- struct file_system_type * fs_type;
struct super_block * sb;
struct vfsmount *vfsmnt;
struct block_device *bdev = NULL;
char path[64];
int path_start = -1;
char *name = "/dev/root";
-
+ char *fs_names, *p;
#ifdef CONFIG_ROOT_NFS
void *data;
+#endif
+ root_mountflags |= MS_VERBOSE;
+
+#ifdef CONFIG_ROOT_NFS
if (MAJOR(ROOT_DEV) != UNNAMED_MAJOR)
goto skip_nfs;
- fs_type = get_fs_type("nfs");
- if (!fs_type)
- goto no_nfs;
- ROOT_DEV = get_unnamed_dev();
- if (!ROOT_DEV)
- /*
- * Your /linuxrc sucks worse than MSExchange - that's the
- * only way you could run out of anon devices at that point.
- */
- goto no_anon;
data = nfs_root_data();
if (!data)
- goto no_server;
- sb = read_super(ROOT_DEV, NULL, fs_type, root_mountflags, data, 1);
- if (sb)
- /*
- * We _can_ fail there, but if that will happen we have no
- * chance anyway (no memory for vfsmnt and we _will_ need it,
- * no matter which fs we try to mount).
- */
- goto mount_it;
-no_server:
- put_unnamed_dev(ROOT_DEV);
-no_anon:
- put_filesystem(fs_type);
+ goto no_nfs;
+ vfsmnt = do_kern_mount("nfs", root_mountflags, "/dev/root", data);
+ if (!IS_ERR(vfsmnt)) {
+ printk ("VFS: Mounted root (%s filesystem).\n", "nfs");
+ ROOT_DEV = vfsmnt->mnt_sb->s_dev;
+ goto attach_it;
+ }
no_nfs:
printk(KERN_ERR "VFS: Unable to mount root fs via NFS, trying floppy.\n");
ROOT_DEV = MKDEV(FLOPPY_MAJOR, 0);
}
#endif
+ fs_names = __getname();
+ get_fs_names(fs_names);
+
devfs_make_root (root_device_name);
handle = devfs_find_handle (NULL, ROOT_DEVICE_NAME,
MAJOR (ROOT_DEV), MINOR (ROOT_DEV),
sb = get_super(ROOT_DEV);
if (sb) {
/* FIXME */
- fs_type = sb->s_type;
+ p = (char *)sb->s_type->name;
atomic_inc(&sb->s_active);
up_read(&sb->s_umount);
down_write(&sb->s_umount);
goto mount_it;
}
- read_lock(&file_systems_lock);
- for (fs_type = file_systems ; fs_type ; fs_type = fs_type->next) {
- if (!(fs_type->fs_flags & FS_REQUIRES_DEV))
+ for (p = fs_names; *p; p += strlen(p)+1) {
+ struct file_system_type * fs_type = get_fs_type(p);
+ if (!fs_type)
continue;
- if (!try_inc_mod_count(fs_type->owner))
- continue;
- read_unlock(&file_systems_lock);
- sb = read_super(ROOT_DEV,bdev,fs_type,root_mountflags,NULL,1);
+ sb = read_super(ROOT_DEV,bdev,fs_type,root_mountflags,NULL);
if (sb)
goto mount_it;
- read_lock(&file_systems_lock);
put_filesystem(fs_type);
}
- read_unlock(&file_systems_lock);
panic("VFS: Unable to mount root fs on %s", kdevname(ROOT_DEV));
mount_it:
/* FIXME */
up_write(&sb->s_umount);
- printk ("VFS: Mounted root (%s filesystem)%s.\n",
- fs_type->name,
+ printk ("VFS: Mounted root (%s filesystem)%s.\n", p,
(sb->s_flags & MS_RDONLY) ? " readonly" : "");
+ putname(fs_names);
if (path_start >= 0) {
name = path + path_start;
devfs_mk_symlink (NULL, "root", DEVFS_FL_DEFAULT,
set_devname(vfsmnt, name);
vfsmnt->mnt_sb = sb;
vfsmnt->mnt_root = dget(sb->s_root);
+ bdput(bdev); /* sb holds a reference */
+attach_it:
root_nd.mnt = root_vfsmnt;
root_nd.dentry = root_vfsmnt->mnt_sb->s_root;
graft_tree(vfsmnt, &root_nd);
- mntput(vfsmnt);
- /* FIXME: if something will try to umount us right now... */
- if (vfsmnt) {
- set_fs_root(current->fs, vfsmnt, sb->s_root);
- set_fs_pwd(current->fs, vfsmnt, sb->s_root);
- if (bdev)
- bdput(bdev); /* sb holds a reference */
- return;
- }
+ set_fs_root(current->fs, vfsmnt, vfsmnt->mnt_root);
+ set_fs_pwd(current->fs, vfsmnt, vfsmnt->mnt_root);
+
+ mntput(vfsmnt);
}
# define INT_DELIVERY_MODE 1 /* logical delivery broadcast to all procs */
# endif
#else
-# define INT_DELIVERY_MODE 0 /* physical delivery on LOCAL quad */
+# define INT_DELIVERY_MODE 1 /* logical delivery */
# define TARGET_CPUS 0x01
#endif
-/* $Id: processor.h,v 1.82 2001/09/20 00:35:34 davem Exp $
+/* $Id: processor.h,v 1.83 2001/10/08 09:32:13 davem Exp $
* include/asm-sparc/processor.h
*
* Copyright (C) 1994 David S. Miller (davem@caip.rutgers.edu)
-/* $Id: unistd.h,v 1.70 2000/08/14 05:39:07 jj Exp $ */
+/* $Id: unistd.h,v 1.71 2001/10/09 10:54:39 davem Exp $ */
#ifndef _SPARC_UNISTD_H
#define _SPARC_UNISTD_H
/* #define __NR_adjtime 140 SunOS Specific */
#define __NR_getpeername 141 /* Common */
/* #define __NR_gethostid 142 SunOS Specific */
-/* #define __NR_ni_syscall 143 ENOSYS under SunOS */
+#define __NR_gettid 143 /* ENOSYS under SunOS */
#define __NR_getrlimit 144 /* Common */
#define __NR_setrlimit 145 /* Common */
#define __NR_pivot_root 146 /* Linux Specific, killpg under SunOS */
-/* $Id: processor.h,v 1.75 2001/09/20 00:35:34 davem Exp $
+/* $Id: processor.h,v 1.76 2001/10/08 09:32:13 davem Exp $
* include/asm-sparc64/processor.h
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
-/* $Id: unistd.h,v 1.47 2000/08/14 05:39:07 jj Exp $ */
+/* $Id: unistd.h,v 1.48 2001/10/09 10:54:39 davem Exp $ */
#ifndef _SPARC64_UNISTD_H
#define _SPARC64_UNISTD_H
/* #define __NR_adjtime 140 SunOS Specific */
#define __NR_getpeername 141 /* Common */
/* #define __NR_gethostid 142 SunOS Specific */
-/* #define __NR_ni_syscall 143 ENOSYS under SunOS */
+#define __NR_gettid 143 /* ENOSYS under SunOS */
#define __NR_getrlimit 144 /* Common */
#define __NR_setrlimit 145 /* Common */
#define __NR_pivot_root 146 /* Linux Specific, killpg under SunOS */
# define ATTRIB_NORET __attribute__((noreturn))
# define NORET_AND noreturn,
-/* acl.c */
-extern int ext2_permission (struct inode *, int);
-
/* balloc.c */
extern int ext2_bg_has_super(struct super_block *sb, int group);
extern unsigned long ext2_bg_num_gdb(struct super_block *sb, int group);
#define MS_NODIRATIME 2048 /* Do not update directory access times */
#define MS_BIND 4096
#define MS_REC 16384
+#define MS_VERBOSE 32768
#define MS_NOUSER (1<<31)
/*
}
}
+/*
+ * Network interface message level settings
+ */
+#define HAVE_NETIF_MSG 1
+
+enum {
+ NETIF_MSG_DRV = 0x0001,
+ NETIF_MSG_PROBE = 0x0002,
+ NETIF_MSG_LINK = 0x0004,
+ NETIF_MSG_TIMER = 0x0008,
+ NETIF_MSG_IFDOWN = 0x0010,
+ NETIF_MSG_IFUP = 0x0020,
+ NETIF_MSG_RX_ERR = 0x0040,
+ NETIF_MSG_TX_ERR = 0x0080,
+ NETIF_MSG_TX_QUEUED = 0x0100,
+ NETIF_MSG_INTR = 0x0200,
+ NETIF_MSG_TX_DONE = 0x0400,
+ NETIF_MSG_RX_STATUS = 0x0800,
+ NETIF_MSG_PKTDATA = 0x1000,
+};
+
+#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
+#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
+#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
+#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
+#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
+#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
+#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
+#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
+#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
+#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
+#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
+#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
+#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
+
/* These functions live elsewhere (drivers/net/net_init.c, but related) */
extern void ether_setup(struct net_device *dev);
IEEE1284_PH_REV_DATA,
IEEE1284_PH_ECP_SETUP,
IEEE1284_PH_ECP_FWD_TO_REV,
- IEEE1284_PH_ECP_REV_TO_FWD
+ IEEE1284_PH_ECP_REV_TO_FWD,
+ IEEE1284_PH_ECP_DIR_UNKNOWN,
};
struct ieee1284_info {
int mode;
#define PCI_DEVICE_ID_TIMEDIA_1889 0x7168
#define PCI_VENDOR_ID_OXSEMI 0x1415
+#define PCI_DEVICE_ID_OXSEMI_12PCI840 0x8403
#define PCI_DEVICE_ID_OXSEMI_16PCI954 0x9501
#define PCI_DEVICE_ID_OXSEMI_16PCI952 0x950A
#define PCI_DEVICE_ID_OXSEMI_16PCI95N 0x9511
+#define PCI_DEVICE_ID_OXSEMI_16PCI954PP 0x9513
#define PCI_VENDOR_ID_AIRONET 0x14b9
#define PCI_DEVICE_ID_AIRONET_4800_1 0x0001
#define PCI_DEVICE_ID_PANACOM_QUADMODEM 0x0400
#define PCI_DEVICE_ID_PANACOM_DUALMODEM 0x0402
-#define PCI_VENDOR_ID_AFAVLAB 0x14db
-#define PCI_DEVICE_ID_AFAVLAB_TK9902 0x2120
-
#define PCI_VENDOR_ID_BROADCOM 0x14e4
#define PCI_DEVICE_ID_TIGON3 0x1644
unsigned short used_math;
char comm[16];
/* file system info */
- int link_count;
+ int link_count, total_link_count;
struct tty_struct *tty; /* NULL if no tty */
unsigned int locks; /* How many file locks are being held */
/* ipc stuff */
unsigned long val;
} swp_entry_t;
+extern atomic_t shmem_nrpages;
+
struct shmem_inode_info {
- spinlock_t lock;
- unsigned long max_index;
- swp_entry_t i_direct[SHMEM_NR_DIRECT]; /* for the first blocks */
- swp_entry_t **i_indirect; /* doubly indirect blocks */
- unsigned long swapped;
- int locked; /* into memory */
+ spinlock_t lock;
+ struct semaphore sem;
+ unsigned long next_index;
+ swp_entry_t i_direct[SHMEM_NR_DIRECT]; /* for the first blocks */
+ void **i_indirect; /* indirect blocks */
+ unsigned long swapped;
+ int locked; /* into memory */
struct list_head list;
+ struct inode *inode;
};
struct shmem_sb_info {
spinlock_t stat_lock;
};
+#define SHMEM_I(inode) (&inode->u.shmem_i)
+
#endif
#include <linux/pagemap.h>
#include <linux/string.h>
#include <linux/locks.h>
-#include <asm/smplock.h>
+#include <linux/smp_lock.h>
#include <asm/uaccess.h>
/* This magic number is used in glibc for posix shared memory */
#define TMPFS_MAGIC 0x01021994
-#define ENTRIES_PER_PAGE (PAGE_SIZE/sizeof(unsigned long))
-#define SHMEM_MAX_BLOCKS (SHMEM_NR_DIRECT + ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
+#define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
+
+#define SHMEM_SB(sb) (&sb->u.shmem_sb)
static struct super_operations shmem_ops;
static struct address_space_operations shmem_aops;
static struct inode_operations shmem_inode_operations;
static struct file_operations shmem_dir_operations;
static struct inode_operations shmem_dir_inode_operations;
-static struct inode_operations shmem_symlink_inode_operations;
static struct vm_operations_struct shmem_vm_ops;
LIST_HEAD (shmem_inodes);
static spinlock_t shmem_ilock = SPIN_LOCK_UNLOCKED;
+atomic_t shmem_nrpages = ATOMIC_INIT(0); /* Not used right now */
-#define BLOCKS_PER_PAGE (PAGE_SIZE/512)
+#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512)
/*
* shmem_recalc_inode - recalculate the size of an inode
*
* @inode: inode to recalc
+ * @swap: additional swap pages freed externally
*
* We have to calculate the free blocks since the mm can drop pages
* behind our back
*
* So the mm freed
* inodes->i_blocks/BLOCKS_PER_PAGE -
- * (inode->i_mapping->nrpages + info->swapped)
+ * (inode->i_mapping->nrpages + info->swapped)
*
* It has to be called with the spinlock held.
*/
unsigned long freed;
freed = (inode->i_blocks/BLOCKS_PER_PAGE) -
- (inode->i_mapping->nrpages + inode->u.shmem_i.swapped);
+ (inode->i_mapping->nrpages + SHMEM_I(inode)->swapped);
if (freed){
- struct shmem_sb_info * info = &inode->i_sb->u.shmem_sb;
+ struct shmem_sb_info * sbinfo = SHMEM_SB(inode->i_sb);
inode->i_blocks -= freed*BLOCKS_PER_PAGE;
- spin_lock (&info->stat_lock);
- info->free_blocks += freed;
- spin_unlock (&info->stat_lock);
+ spin_lock (&sbinfo->stat_lock);
+ sbinfo->free_blocks += freed;
+ spin_unlock (&sbinfo->stat_lock);
}
}
-static swp_entry_t * shmem_swp_entry (struct shmem_inode_info *info, unsigned long index)
+/*
+ * shmem_swp_entry - find the swap vector position in the info structure
+ *
+ * @info: info structure for the inode
+ * @index: index of the page to find
+ * @page: optional page to add to the structure. Has to be preset to
+ * all zeros
+ *
+ * If there is no space allocated yet it will return -ENOMEM when
+ * page == 0 else it will use the page for the needed block.
+ *
+ * returns -EFBIG if the index is too big.
+ *
+ *
+ * The swap vector is organized the following way:
+ *
+ * There are SHMEM_NR_DIRECT entries directly stored in the
+ * shmem_inode_info structure. So small files do not need an addional
+ * allocation.
+ *
+ * For pages with index > SHMEM_NR_DIRECT there is the pointer
+ * i_indirect which points to a page which holds in the first half
+ * doubly indirect blocks, in the second half triple indirect blocks:
+ *
+ * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the
+ * following layout (for SHMEM_NR_DIRECT == 16):
+ *
+ * i_indirect -> dir --> 16-19
+ * | +-> 20-23
+ * |
+ * +-->dir2 --> 24-27
+ * | +-> 28-31
+ * | +-> 32-35
+ * | +-> 36-39
+ * |
+ * +-->dir3 --> 40-43
+ * +-> 44-47
+ * +-> 48-51
+ * +-> 52-55
+ */
+
+#define SHMEM_MAX_BLOCKS (SHMEM_NR_DIRECT + ENTRIES_PER_PAGE * ENTRIES_PER_PAGE/2*(ENTRIES_PER_PAGE+1))
+
+static swp_entry_t * shmem_swp_entry (struct shmem_inode_info *info, unsigned long index, unsigned long page)
{
unsigned long offset;
+ void **dir;
if (index < SHMEM_NR_DIRECT)
return info->i_direct+index;
offset = index % ENTRIES_PER_PAGE;
index /= ENTRIES_PER_PAGE;
- if (index >= ENTRIES_PER_PAGE)
- return ERR_PTR(-EFBIG);
-
if (!info->i_indirect) {
- info->i_indirect = (swp_entry_t **) get_zeroed_page(GFP_USER);
- if (!info->i_indirect)
+ info->i_indirect = (void *) page;
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dir = info->i_indirect + index;
+ if (index >= ENTRIES_PER_PAGE/2) {
+ index -= ENTRIES_PER_PAGE/2;
+ dir = info->i_indirect + ENTRIES_PER_PAGE/2
+ + index/ENTRIES_PER_PAGE;
+ index %= ENTRIES_PER_PAGE;
+
+ if(!*dir) {
+ *dir = (void *) page;
+ /* We return since we will need another page
+ in the next step */
return ERR_PTR(-ENOMEM);
+ }
+ dir = ((void **)*dir) + index;
}
- if(!(info->i_indirect[index])) {
- info->i_indirect[index] = (swp_entry_t *) get_zeroed_page(GFP_USER);
- if (!info->i_indirect[index])
+ if (!*dir) {
+ if (!page)
return ERR_PTR(-ENOMEM);
+ *dir = (void *)page;
}
-
- return info->i_indirect[index]+offset;
+ return ((swp_entry_t *)*dir) + offset;
+}
+
+/*
+ * shmem_alloc_entry - get the position of the swap entry for the
+ * page. If it does not exist allocate the entry
+ *
+ * @info: info structure for the inode
+ * @index: index of the page to find
+ */
+static inline swp_entry_t * shmem_alloc_entry (struct shmem_inode_info *info, unsigned long index)
+{
+ unsigned long page = 0;
+ swp_entry_t * res;
+
+ if (index >= SHMEM_MAX_BLOCKS)
+ return ERR_PTR(-EFBIG);
+
+ if (info->next_index <= index)
+ info->next_index = index + 1;
+
+ while ((res = shmem_swp_entry(info,index,page)) == ERR_PTR(-ENOMEM)) {
+ page = get_zeroed_page(GFP_USER);
+ if (!page)
+ break;
+ }
+ return res;
}
+/*
+ * shmem_free_swp - free some swap entries in a directory
+ *
+ * @dir: pointer to the directory
+ * @count: number of entries to scan
+ */
static int shmem_free_swp(swp_entry_t *dir, unsigned int count)
{
swp_entry_t *ptr, entry;
}
/*
- * shmem_truncate_part - free a bunch of swap entries
- *
- * @dir: pointer to swp_entries
- * @size: number of entries in dir
- * @start: offset to start from
- * @freed: counter for freed pages
+ * shmem_truncate_direct - free the swap entries of a whole doubly
+ * indirect block
*
- * It frees the swap entries from dir+start til dir+size
+ * @dir: pointer to the pointer to the block
+ * @start: offset to start from (in pages)
+ * @len: how many pages are stored in this block
*
- * returns 0 if it truncated something, else (offset-size)
+ * Returns the number of freed swap entries.
*/
-static unsigned long
-shmem_truncate_part (swp_entry_t * dir, unsigned long size,
- unsigned long start, unsigned long *freed) {
- if (start > size)
- return start - size;
- if (dir)
- *freed += shmem_free_swp (dir+start, size-start);
+static inline unsigned long
+shmem_truncate_direct(swp_entry_t *** dir, unsigned long start, unsigned long len) {
+ swp_entry_t **last, **ptr;
+ unsigned long off, freed = 0;
+
+ if (!*dir)
+ return 0;
+
+ last = *dir + (len + ENTRIES_PER_PAGE-1) / ENTRIES_PER_PAGE;
+ off = start % ENTRIES_PER_PAGE;
+
+ for (ptr = *dir + start/ENTRIES_PER_PAGE; ptr < last; ptr++) {
+ if (!*ptr) {
+ off = 0;
+ continue;
+ }
+
+ if (!off) {
+ freed += shmem_free_swp(*ptr, ENTRIES_PER_PAGE);
+ free_page ((unsigned long) *ptr);
+ *ptr = 0;
+ } else {
+ freed += shmem_free_swp(*ptr+off,ENTRIES_PER_PAGE-off);
+ off = 0;
+ }
+ }
- return 0;
+ if (!start) {
+ free_page((unsigned long) *dir);
+ *dir = 0;
+ }
+ return freed;
+}
+
+/*
+ * shmem_truncate_indirect - truncate an inode
+ *
+ * @info: the info structure of the inode
+ * @index: the index to truncate
+ *
+ * This function locates the last doubly indirect block and calls
+ * then shmem_truncate_direct to do the real work
+ */
+static inline unsigned long
+shmem_truncate_indirect(struct shmem_inode_info *info, unsigned long index)
+{
+ swp_entry_t ***base;
+ unsigned long baseidx, len, start;
+ unsigned long max = info->next_index-1;
+
+ if (max < SHMEM_NR_DIRECT) {
+ info->next_index = index;
+ return shmem_free_swp(info->i_direct + index,
+ SHMEM_NR_DIRECT - index);
+ }
+
+ if (max < ENTRIES_PER_PAGE * ENTRIES_PER_PAGE/2 + SHMEM_NR_DIRECT) {
+ max -= SHMEM_NR_DIRECT;
+ base = (swp_entry_t ***) &info->i_indirect;
+ baseidx = SHMEM_NR_DIRECT;
+ len = max+1;
+ } else {
+ max -= ENTRIES_PER_PAGE*ENTRIES_PER_PAGE/2+SHMEM_NR_DIRECT;
+ if (max >= ENTRIES_PER_PAGE*ENTRIES_PER_PAGE*ENTRIES_PER_PAGE/2)
+ BUG();
+
+ baseidx = max & ~(ENTRIES_PER_PAGE*ENTRIES_PER_PAGE-1);
+ base = (swp_entry_t ***) info->i_indirect + ENTRIES_PER_PAGE/2 + baseidx/ENTRIES_PER_PAGE/ENTRIES_PER_PAGE ;
+ len = max - baseidx + 1;
+ baseidx += ENTRIES_PER_PAGE*ENTRIES_PER_PAGE/2+SHMEM_NR_DIRECT;
+ }
+
+ if (index > baseidx) {
+ info->next_index = index;
+ start = index - baseidx;
+ } else {
+ info->next_index = baseidx;
+ start = 0;
+ }
+ return shmem_truncate_direct(base, start, len);
}
static void shmem_truncate (struct inode * inode)
{
- int clear_base;
- unsigned long index, start;
+ unsigned long index;
unsigned long freed = 0;
- swp_entry_t **base, **ptr, **last;
- struct shmem_inode_info * info = &inode->u.shmem_i;
+ struct shmem_inode_info * info = SHMEM_I(inode);
+ down(&info->sem);
inode->i_ctime = inode->i_mtime = CURRENT_TIME;
spin_lock (&info->lock);
index = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
- if (index > info->max_index)
- goto out;
- start = shmem_truncate_part (info->i_direct, SHMEM_NR_DIRECT, index, &freed);
+ while (index < info->next_index)
+ freed += shmem_truncate_indirect(info, index);
- if (!(base = info->i_indirect))
- goto out;
+ info->swapped -= freed;
+ shmem_recalc_inode(inode);
+ spin_unlock (&info->lock);
+ up(&info->sem);
+}
- clear_base = 1;
- last = base + ((info->max_index - SHMEM_NR_DIRECT + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE);
- for (ptr = base; ptr < last; ptr++) {
- if (!start) {
- if (!*ptr)
- continue;
- freed += shmem_free_swp (*ptr, ENTRIES_PER_PAGE);
- free_page ((unsigned long) *ptr);
- *ptr = 0;
- continue;
- }
- clear_base = 0;
- start = shmem_truncate_part (*ptr, ENTRIES_PER_PAGE, start, &freed);
+static void shmem_delete_inode(struct inode * inode)
+{
+ struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
+
+ inode->i_size = 0;
+ if (inode->i_op->truncate == shmem_truncate){
+ spin_lock (&shmem_ilock);
+ list_del (&SHMEM_I(inode)->list);
+ spin_unlock (&shmem_ilock);
+ shmem_truncate (inode);
}
+ spin_lock (&sbinfo->stat_lock);
+ sbinfo->free_inodes++;
+ spin_unlock (&sbinfo->stat_lock);
+ clear_inode(inode);
+}
- if (clear_base) {
- free_page ((unsigned long)base);
- info->i_indirect = 0;
+static int shmem_clear_swp (swp_entry_t entry, swp_entry_t *ptr, int size) {
+ swp_entry_t *test;
+
+ for (test = ptr; test < ptr + size; test++) {
+ if (test->val == entry.val) {
+ swap_free (entry);
+ *test = (swp_entry_t) {0};
+ return test - ptr;
+ }
}
+ return -1;
+}
-out:
- /*
- * We have no chance to give an error, so we limit it to max
- * size here and the application will fail later
- */
- if (index > SHMEM_MAX_BLOCKS)
- info->max_index = SHMEM_MAX_BLOCKS;
- else
- info->max_index = index;
- info->swapped -= freed;
- shmem_recalc_inode(inode);
+static int shmem_unuse_inode (struct shmem_inode_info *info, swp_entry_t entry, struct page *page)
+{
+ swp_entry_t *ptr;
+ unsigned long idx;
+ int offset;
+
+ idx = 0;
+ spin_lock (&info->lock);
+ offset = shmem_clear_swp (entry, info->i_direct, SHMEM_NR_DIRECT);
+ if (offset >= 0)
+ goto found;
+
+ for (idx = SHMEM_NR_DIRECT; idx < info->next_index;
+ idx += ENTRIES_PER_PAGE) {
+ ptr = shmem_swp_entry(info, idx, 0);
+ if (IS_ERR(ptr))
+ continue;
+ offset = shmem_clear_swp (entry, ptr, ENTRIES_PER_PAGE);
+ if (offset >= 0)
+ goto found;
+ }
spin_unlock (&info->lock);
+ return 0;
+found:
+ add_to_page_cache(page, info->inode->i_mapping, offset + idx);
+ SetPageDirty(page);
+ SetPageUptodate(page);
+ UnlockPage(page);
+ info->swapped--;
+ spin_unlock(&info->lock);
+ return 1;
}
-static void shmem_delete_inode(struct inode * inode)
+/*
+ * unuse_shmem() search for an eventually swapped out shmem page.
+ */
+void shmem_unuse(swp_entry_t entry, struct page *page)
{
- struct shmem_sb_info *info = &inode->i_sb->u.shmem_sb;
+ struct list_head *p;
+ struct shmem_inode_info * info;
spin_lock (&shmem_ilock);
- list_del (&inode->u.shmem_i.list);
+ list_for_each(p, &shmem_inodes) {
+ info = list_entry(p, struct shmem_inode_info, list);
+
+ if (shmem_unuse_inode(info, entry, page))
+ break;
+ }
spin_unlock (&shmem_ilock);
- inode->i_size = 0;
- shmem_truncate (inode);
- spin_lock (&info->stat_lock);
- info->free_inodes++;
- spin_unlock (&info->stat_lock);
- clear_inode(inode);
}
/*
mapping = page->mapping;
index = page->index;
inode = mapping->host;
- info = &inode->u.shmem_i;
+ info = SHMEM_I(inode);
getswap:
swap = get_swap_page();
if (!swap.val) {
}
spin_lock(&info->lock);
- entry = shmem_swp_entry(info, index);
+ entry = shmem_swp_entry(info, index, 0);
if (IS_ERR(entry)) /* this had been allocated on page allocation */
BUG();
shmem_recalc_inode(inode);
* still need to guard against racing with shm_writepage(), which might
* be trying to move the page to the swap cache as we run.
*/
-static struct page * shmem_getpage_locked(struct inode * inode, unsigned long idx)
+static struct page * shmem_getpage_locked(struct shmem_inode_info *info, struct inode * inode, unsigned long idx)
{
struct address_space * mapping = inode->i_mapping;
- struct shmem_inode_info *info;
+ struct shmem_sb_info *sbinfo;
struct page * page;
swp_entry_t *entry;
- info = &inode->u.shmem_i;
-
repeat:
page = find_lock_page(mapping, idx);
if (page)
return page;
- entry = shmem_swp_entry (info, idx);
+ entry = shmem_alloc_entry (info, idx);
if (IS_ERR(entry))
return (void *)entry;
spin_lock (&info->lock);
- /* The shmem_swp_entry() call may have blocked, and
+ /* The shmem_alloc_entry() call may have blocked, and
* shmem_writepage may have been moving a page between the page
* cache and swap cache. We need to recheck the page cache
* under the protection of the info->lock spinlock. */
swap_free(*entry);
*entry = (swp_entry_t) {0};
delete_from_swap_cache(page);
- flags = page->flags & ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_referenced | 1 << PG_arch_1);
+ flags = page->flags & ~((1 << PG_uptodate) | (1 << PG_error) | (1 << PG_referenced) | (1 << PG_arch_1));
page->flags = flags | (1 << PG_dirty);
add_to_page_cache_locked(page, mapping, idx);
info->swapped--;
spin_unlock (&info->lock);
} else {
+ sbinfo = SHMEM_SB(inode->i_sb);
spin_unlock (&info->lock);
- spin_lock (&inode->i_sb->u.shmem_sb.stat_lock);
- if (inode->i_sb->u.shmem_sb.free_blocks == 0)
+ spin_lock (&sbinfo->stat_lock);
+ if (sbinfo->free_blocks == 0)
goto no_space;
- inode->i_sb->u.shmem_sb.free_blocks--;
- spin_unlock (&inode->i_sb->u.shmem_sb.stat_lock);
+ sbinfo->free_blocks--;
+ spin_unlock (&sbinfo->stat_lock);
/* Ok, get a new page. We don't have to worry about the
* info->lock spinlock here: we cannot race against
page_cache_get(page);
return page;
no_space:
- spin_unlock (&inode->i_sb->u.shmem_sb.stat_lock);
+ spin_unlock (&sbinfo->stat_lock);
return ERR_PTR(-ENOSPC);
wait_retry:
static int shmem_getpage(struct inode * inode, unsigned long idx, struct page **ptr)
{
+ struct shmem_inode_info *info = SHMEM_I(inode);
int error;
- down (&inode->i_sem);
+ down (&info->sem);
+ *ptr = ERR_PTR(-EFAULT);
if (inode->i_size <= (loff_t) idx * PAGE_CACHE_SIZE)
- goto sigbus;
- *ptr = shmem_getpage_locked(inode, idx);
+ goto failed;
+
+ *ptr = shmem_getpage_locked(info, inode, idx);
if (IS_ERR (*ptr))
goto failed;
+
UnlockPage(*ptr);
- up (&inode->i_sem);
+ up (&info->sem);
return 0;
failed:
- up (&inode->i_sem);
+ up (&info->sem);
error = PTR_ERR(*ptr);
- *ptr = NOPAGE_OOM;
- if (error != -EFBIG)
- *ptr = NOPAGE_SIGBUS;
- return error;
-sigbus:
- up (&inode->i_sem);
*ptr = NOPAGE_SIGBUS;
- return -EFAULT;
+ if (error == -ENOMEM)
+ *ptr = NOPAGE_OOM;
+ return error;
}
struct page * shmem_nopage(struct vm_area_struct * vma, unsigned long address, int no_share)
unsigned int idx;
struct inode * inode = vma->vm_file->f_dentry->d_inode;
- idx = (address - vma->vm_start) >> PAGE_SHIFT;
+ idx = (address - vma->vm_start) >> PAGE_CACHE_SHIFT;
idx += vma->vm_pgoff;
if (shmem_getpage(inode, idx, &page))
void shmem_lock(struct file * file, int lock)
{
struct inode * inode = file->f_dentry->d_inode;
- struct shmem_inode_info * info = &inode->u.shmem_i;
+ struct shmem_inode_info * info = SHMEM_I(inode);
struct page * page;
unsigned long idx, size;
- if (info->locked == lock)
- return;
- down(&inode->i_sem);
+ down(&info->sem);
+ if (info->locked == lock)
+ goto out;
info->locked = lock;
size = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
for (idx = 0; idx < size; idx++) {
}
UnlockPage(page);
}
- up(&inode->i_sem);
+out:
+ up(&info->sem);
}
static int shmem_mmap(struct file * file, struct vm_area_struct * vma)
struct inode *shmem_get_inode(struct super_block *sb, int mode, int dev)
{
struct inode * inode;
+ struct shmem_inode_info *info;
+ struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
- spin_lock (&sb->u.shmem_sb.stat_lock);
- if (!sb->u.shmem_sb.free_inodes) {
- spin_unlock (&sb->u.shmem_sb.stat_lock);
+ spin_lock (&sbinfo->stat_lock);
+ if (!sbinfo->free_inodes) {
+ spin_unlock (&sbinfo->stat_lock);
return NULL;
}
- sb->u.shmem_sb.free_inodes--;
- spin_unlock (&sb->u.shmem_sb.stat_lock);
+ sbinfo->free_inodes--;
+ spin_unlock (&sbinfo->stat_lock);
inode = new_inode(sb);
if (inode) {
inode->i_rdev = NODEV;
inode->i_mapping->a_ops = &shmem_aops;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
- spin_lock_init (&inode->u.shmem_i.lock);
+ info = SHMEM_I(inode);
+ info->inode = inode;
+ spin_lock_init (&info->lock);
+ sema_init (&info->sem, 1);
switch (mode & S_IFMT) {
default:
init_special_inode(inode, mode, dev);
case S_IFREG:
inode->i_op = &shmem_inode_operations;
inode->i_fop = &shmem_file_operations;
+ spin_lock (&shmem_ilock);
+ list_add (&SHMEM_I(inode)->list, &shmem_inodes);
+ spin_unlock (&shmem_ilock);
break;
case S_IFDIR:
inode->i_nlink++;
inode->i_fop = &shmem_dir_operations;
break;
case S_IFLNK:
- inode->i_op = &shmem_symlink_inode_operations;
break;
}
- spin_lock (&shmem_ilock);
- list_add (&inode->u.shmem_i.list, &shmem_inodes);
- spin_unlock (&shmem_ilock);
}
return inode;
}
}
#ifdef CONFIG_TMPFS
+
+static struct inode_operations shmem_symlink_inode_operations;
+static struct inode_operations shmem_symlink_inline_operations;
+
static ssize_t
shmem_file_write(struct file *file,const char *buf,size_t count,loff_t *ppos)
{
struct inode *inode = file->f_dentry->d_inode;
+ struct shmem_inode_info *info;
unsigned long limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
loff_t pos;
struct page *page;
__get_user(dummy, buf+bytes-1);
}
- page = shmem_getpage_locked(inode, index);
+ info = SHMEM_I(inode);
+ down (&info->sem);
+ page = shmem_getpage_locked(info, inode, index);
+ up (&info->sem);
+
status = PTR_ERR(page);
if (IS_ERR(page))
break;
}
kaddr = kmap(page);
-// can this do a truncated write? cr
status = copy_from_user(kaddr+offset, buf, bytes);
kunmap(page);
if (status)
buf += bytes;
if (pos > inode->i_size)
inode->i_size = pos;
- if (inode->u.shmem_i.max_index <= index)
- inode->u.shmem_i.max_index = index+1;
-
}
unlock:
/* Mark it unlocked again and drop the page.. */
static int shmem_statfs(struct super_block *sb, struct statfs *buf)
{
+ struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
+
buf->f_type = TMPFS_MAGIC;
buf->f_bsize = PAGE_CACHE_SIZE;
- spin_lock (&sb->u.shmem_sb.stat_lock);
- buf->f_blocks = sb->u.shmem_sb.max_blocks;
- buf->f_bavail = buf->f_bfree = sb->u.shmem_sb.free_blocks;
- buf->f_files = sb->u.shmem_sb.max_inodes;
- buf->f_ffree = sb->u.shmem_sb.free_inodes;
- spin_unlock (&sb->u.shmem_sb.stat_lock);
+ spin_lock (&sbinfo->stat_lock);
+ buf->f_blocks = sbinfo->max_blocks;
+ buf->f_bavail = buf->f_bfree = sbinfo->free_blocks;
+ buf->f_files = sbinfo->max_inodes;
+ buf->f_ffree = sbinfo->free_inodes;
+ spin_unlock (&sbinfo->stat_lock);
buf->f_namelen = 255;
return 0;
}
struct inode *inode;
struct page *page;
char *kaddr;
+ struct shmem_inode_info * info;
error = shmem_mknod(dir, dentry, S_IFLNK | S_IRWXUGO, 0);
if (error)
return error;
- len = strlen(symname);
- if (len > PAGE_SIZE)
+ len = strlen(symname) + 1;
+ if (len > PAGE_CACHE_SIZE)
return -ENAMETOOLONG;
inode = dentry->d_inode;
- down(&inode->i_sem);
- page = shmem_getpage_locked(inode, 0);
- if (IS_ERR(page))
- goto fail;
- kaddr = kmap(page);
- memcpy(kaddr, symname, len);
- kunmap(page);
+ info = SHMEM_I(inode);
inode->i_size = len;
- SetPageDirty(page);
- UnlockPage(page);
- page_cache_release(page);
- up(&inode->i_sem);
+ if (len <= sizeof(struct shmem_inode_info)) {
+ /* do it inline */
+ memcpy(info, symname, len);
+ inode->i_op = &shmem_symlink_inline_operations;
+ } else {
+ spin_lock (&shmem_ilock);
+ list_add (&info->list, &shmem_inodes);
+ spin_unlock (&shmem_ilock);
+ down(&info->sem);
+ page = shmem_getpage_locked(info, inode, 0);
+ if (IS_ERR(page)) {
+ up(&info->sem);
+ return PTR_ERR(page);
+ }
+ kaddr = kmap(page);
+ memcpy(kaddr, symname, len);
+ kunmap(page);
+ SetPageDirty(page);
+ UnlockPage(page);
+ page_cache_release(page);
+ up(&info->sem);
+ inode->i_op = &shmem_symlink_inode_operations;
+ }
dir->i_ctime = dir->i_mtime = CURRENT_TIME;
return 0;
-fail:
- up(&inode->i_sem);
- return PTR_ERR(page);
+}
+
+static int shmem_readlink_inline(struct dentry *dentry, char *buffer, int buflen)
+{
+ return vfs_readlink(dentry,buffer,buflen, (const char *)SHMEM_I(dentry->d_inode));
+}
+
+static int shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
+{
+ return vfs_follow_link(nd, (const char *)SHMEM_I(dentry->d_inode));
}
static int shmem_readlink(struct dentry *dentry, char *buffer, int buflen)
return res;
}
+static struct inode_operations shmem_symlink_inline_operations = {
+ readlink: shmem_readlink_inline,
+ follow_link: shmem_follow_link_inline,
+};
+
+static struct inode_operations shmem_symlink_inode_operations = {
+ truncate: shmem_truncate,
+ readlink: shmem_readlink,
+ follow_link: shmem_follow_link,
+};
+
static int shmem_parse_options(char *options, int *mode, unsigned long * blocks, unsigned long *inodes)
{
char *this_char, *value;
static int shmem_remount_fs (struct super_block *sb, int *flags, char *data)
{
- struct shmem_sb_info *info = &sb->u.shmem_sb;
- unsigned long max_blocks = info->max_blocks;
- unsigned long max_inodes = info->max_inodes;
+ struct shmem_sb_info *sbinfo = &sb->u.shmem_sb;
+ unsigned long max_blocks = sbinfo->max_blocks;
+ unsigned long max_inodes = sbinfo->max_inodes;
if (shmem_parse_options (data, NULL, &max_blocks, &max_inodes))
return -EINVAL;
- return shmem_set_size(info, max_blocks, max_inodes);
+ return shmem_set_size(sbinfo, max_blocks, max_inodes);
}
int shmem_sync_file(struct file * file, struct dentry *dentry, int datasync)
struct dentry * root;
unsigned long blocks, inodes;
int mode = S_IRWXUGO | S_ISVTX;
+ struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
struct sysinfo si;
/*
}
#endif
- spin_lock_init (&sb->u.shmem_sb.stat_lock);
- sb->u.shmem_sb.max_blocks = blocks;
- sb->u.shmem_sb.free_blocks = blocks;
- sb->u.shmem_sb.max_inodes = inodes;
- sb->u.shmem_sb.free_inodes = inodes;
- sb->s_maxbytes = (unsigned long long)SHMEM_MAX_BLOCKS << PAGE_CACHE_SHIFT;
+ spin_lock_init (&sbinfo->stat_lock);
+ sbinfo->max_blocks = blocks;
+ sbinfo->free_blocks = blocks;
+ sbinfo->max_inodes = inodes;
+ sbinfo->free_inodes = inodes;
+ sb->s_maxbytes = (unsigned long long) SHMEM_MAX_BLOCKS << PAGE_CACHE_SHIFT;
sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_magic = TMPFS_MAGIC;
static struct address_space_operations shmem_aops = {
- writepage: shmem_writepage
+ writepage: shmem_writepage,
};
static struct file_operations shmem_file_operations = {
truncate: shmem_truncate,
};
-static struct inode_operations shmem_symlink_inode_operations = {
- truncate: shmem_truncate,
-#ifdef CONFIG_TMPFS
- readlink: shmem_readlink,
- follow_link: shmem_follow_link,
-#endif
-};
-
static struct file_operations shmem_dir_operations = {
read: generic_read_dir,
readdir: dcache_readdir,
shm_mnt = res;
/* The internal instance should not do size checking */
- if ((error = shmem_set_size(&res->mnt_sb->u.shmem_sb, ULONG_MAX, ULONG_MAX)))
+ if ((error = shmem_set_size(SHMEM_SB(res->mnt_sb), ULONG_MAX, ULONG_MAX)))
printk (KERN_ERR "could not set limits on internal tmpfs\n");
return 0;
module_init(init_shmem_fs)
module_exit(exit_shmem_fs)
-static int shmem_clear_swp (swp_entry_t entry, swp_entry_t *ptr, int size) {
- swp_entry_t *test;
-
- for (test = ptr; test < ptr + size; test++) {
- if (test->val == entry.val) {
- swap_free (entry);
- *test = (swp_entry_t) {0};
- return test - ptr;
- }
- }
- return -1;
-}
-
-static int shmem_unuse_inode (struct inode *inode, swp_entry_t entry, struct page *page)
-{
- swp_entry_t **base, **ptr;
- unsigned long idx;
- int offset;
- struct shmem_inode_info *info = &inode->u.shmem_i;
-
- idx = 0;
- spin_lock (&info->lock);
- if ((offset = shmem_clear_swp (entry,info->i_direct, SHMEM_NR_DIRECT)) >= 0)
- goto found;
-
- idx = SHMEM_NR_DIRECT;
- if (!(base = info->i_indirect))
- goto out;
-
- for (ptr = base; ptr < base + ENTRIES_PER_PAGE; ptr++) {
- if (*ptr &&
- (offset = shmem_clear_swp (entry, *ptr, ENTRIES_PER_PAGE)) >= 0)
- goto found;
- idx += ENTRIES_PER_PAGE;
- }
-out:
- spin_unlock (&info->lock);
- return 0;
-found:
- add_to_page_cache(page, inode->i_mapping, offset + idx);
- SetPageDirty(page);
- SetPageUptodate(page);
- UnlockPage(page);
- info->swapped--;
- spin_unlock(&info->lock);
- return 1;
-}
-
-/*
- * unuse_shmem() search for an eventually swapped out shmem page.
- */
-void shmem_unuse(swp_entry_t entry, struct page *page)
-{
- struct list_head *p;
- struct inode * inode;
-
- spin_lock (&shmem_ilock);
- list_for_each(p, &shmem_inodes) {
- inode = list_entry(p, struct inode, u.shmem_i.list);
-
- if (shmem_unuse_inode(inode, entry, page))
- break;
- }
- spin_unlock (&shmem_ilock);
-}
-
-
/*
* shmem_file_setup - get an unlinked file living in shmem fs
*
if (size > (unsigned long long) SHMEM_MAX_BLOCKS << PAGE_CACHE_SHIFT)
return ERR_PTR(-EINVAL);
- if (!vm_enough_memory((size) >> PAGE_SHIFT))
+ if (!vm_enough_memory((size) >> PAGE_CACHE_SHIFT))
return ERR_PTR(-ENOMEM);
this.name = name;
*
* Implementation of the Transmission Control Protocol(TCP).
*
- * Version: $Id: tcp.c,v 1.212 2001/09/21 21:27:34 davem Exp $
+ * Version: $Id: tcp.c,v 1.213 2001/10/10 23:54:50 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
page = pages[poffset/PAGE_SIZE];
offset = poffset % PAGE_SIZE;
- size = min(psize, PAGE_SIZE-offset);
+ size = min_t(size_t, psize, PAGE_SIZE-offset);
if (tp->send_head==NULL || (copy = mss_now - skb->len) <= 0) {
new_segment: