alias char-major-10-200 tun
Run:
- modprobe -a
+ depmod -a
Driver will be automatically loaded when application access /dev/net/tun.
M: fizban@tin.it
S: Maintained
+AFFS FILE SYSTEM
+P: Roman Zippel
+M: zippel@linux-m68k.org
+S: Maintained
+
AHA152X SCSI DRIVER
P: Juergen E. Fischer
M: Juergen Fischer <fischer@norbit.de>
VERSION = 2
PATCHLEVEL = 4
SUBLEVEL = 4
-EXTRAVERSION =-pre6
+EXTRAVERSION =-pre7
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
EXPORT_SYMBOL_NOVERS(__down_failed_interruptible);
EXPORT_SYMBOL_NOVERS(__down_failed_trylock);
EXPORT_SYMBOL_NOVERS(__up_wakeup);
-#ifdef CONFIG_RWSEM_XCHGADD_ALGORITHM
-EXPORT_SYMBOL_NOVERS(__rwsem_down_write_failed);
-EXPORT_SYMBOL_NOVERS(__rwsem_down_read_failed);
-EXPORT_SYMBOL_NOVERS(__rwsem_wake);
-#endif
/* Networking helper routines. */
EXPORT_SYMBOL(csum_partial_copy_generic);
/* Delay loops */
obj-y = checksum.o old-checksum.o delay.o \
usercopy.o getuser.o putuser.o \
- memcpy.o strstr.o rwsem.o
+ memcpy.o strstr.o
obj-$(CONFIG_X86_USE_3DNOW) += mmx.o
obj-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o
+++ /dev/null
-/* rwsem.S: R/W semaphores, register saving wrapper function stubs
- *
- * Written by David Howells (dhowells@redhat.com).
- * Derived from arch/i386/kernel/semaphore.c
- */
-
-.text
-.align 4
-.globl __rwsem_down_read_failed
-__rwsem_down_read_failed:
- pushl %edx
- pushl %ecx
- call rwsem_down_read_failed
- popl %ecx
- popl %edx
- ret
-
-.align 4
-.globl __rwsem_down_write_failed
-__rwsem_down_write_failed:
- pushl %edx
- pushl %ecx
- call rwsem_down_write_failed
- popl %ecx
- popl %edx
- ret
-
-.align 4
-.globl __rwsem_wake
-__rwsem_wake:
- pushl %edx
- pushl %ecx
- call rwsem_wake
- popl %ecx
- popl %edx
- ret
#include <linux/kbd_kern.h>
#if defined(CONFIG_X86) || defined(CONFIG_IA64) || defined(__alpha__) || \
- defined(__mips__) || defined(CONFIG_SPARC64) || defined(CONFIG_SUPERH)
+ defined(__mips__) || defined(CONFIG_SPARC64) || defined(CONFIG_SUPERH) || \
+ defined(CONFIG_PPC) || defined(__mc68000__)
static int x86_sysrq_alt = 0;
#ifdef CONFIG_SPARC64
308,310,313,314,315,317,318,319,320,321,322,323,324,325,326,330,
332,340,341,342,343,344,345,346,356,359,365,368,369,370,371,372 };
+#ifdef CONFIG_MAC_EMUMOUSEBTN
+extern int mac_hid_mouse_emulate_buttons(int, int, int);
+#endif /* CONFIG_MAC_EMUMOUSEBTN */
+#ifdef CONFIG_MAC_ADBKEYCODES
+extern int mac_hid_keyboard_sends_linux_keycodes(void);
+#else
+#define mac_hid_keyboard_sends_linux_keycodes() 0
+#endif /* CONFIG_MAC_ADBKEYCODES */
+#if defined(CONFIG_MAC_ADBKEYCODES) || defined(CONFIG_ADB_KEYBOARD)
+static unsigned char mac_keycodes[256] = {
+ 0, 53, 18, 19, 20, 21, 23, 22, 26, 28, 25, 29, 27, 24, 51, 48,
+ 12, 13, 14, 15, 17, 16, 32, 34, 31, 35, 33, 30, 36, 54,128, 1,
+ 2, 3, 5, 4, 38, 40, 37, 41, 39, 50, 56, 42, 6, 7, 8, 9,
+ 11, 45, 46, 43, 47, 44,123, 67, 58, 49, 57,122,120, 99,118, 96,
+ 97, 98,100,101,109, 71,107, 89, 91, 92, 78, 86, 87, 88, 69, 83,
+ 84, 85, 82, 65, 42, 0, 10,103,111, 0, 0, 0, 0, 0, 0, 0,
+ 76,125, 75,105,124,110,115, 62,116, 59, 60,119, 61,121,114,117,
+ 0, 0, 0, 0,127, 81, 0,113, 0, 0, 0, 0, 95, 55, 55, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 94, 0, 93, 0, 0, 0, 0, 0, 0,104,102 };
+#endif /* CONFIG_MAC_ADBKEYCODES || CONFIG_ADB_KEYBOARD */
+
static int emulate_raw(unsigned int keycode, int down)
{
+#ifdef CONFIG_MAC_EMUMOUSEBTN
+ if (mac_hid_mouse_emulate_buttons(1, keycode, down))
+ return 0;
+#endif /* CONFIG_MAC_EMUMOUSEBTN */
+#if defined(CONFIG_MAC_ADBKEYCODES) || defined(CONFIG_ADB_KEYBOARD)
+ if (!mac_hid_keyboard_sends_linux_keycodes()) {
+ if (keycode > 255 || !mac_keycodes[keycode])
+ return -1;
+
+ handle_scancode((mac_keycodes[keycode] & 0x7f), down);
+ return 0;
+ }
+#endif /* CONFIG_MAC_ADBKEYCODES || CONFIG_ADB_KEYBOARD */
+
if (keycode > 255 || !x86_keycodes[keycode])
return -1;
if (keycode == KEY_SYSRQ && x86_sysrq_alt) {
handle_scancode(0x54, down);
+
return 0;
}
return 0;
}
-#elif defined(CONFIG_ADB_KEYBOARD)
-
-static unsigned char mac_keycodes[128] =
- { 0, 53, 18, 19, 20, 21, 23, 22, 26, 28, 25, 29, 27, 24, 51, 48,
- 12, 13, 14, 15, 17, 16, 32, 34, 31, 35, 33, 30, 36, 54,128, 1,
- 2, 3, 5, 4, 38, 40, 37, 41, 39, 50, 56, 42, 6, 7, 8, 9,
- 11, 45, 46, 43, 47, 44,123, 67, 58, 49, 57,122,120, 99,118, 96,
- 97, 98,100,101,109, 71,107, 89, 91, 92, 78, 86, 87, 88, 69, 83,
- 84, 85, 82, 65, 42, 0, 10,103,111, 0, 0, 0, 0, 0, 0, 0,
- 76,125, 75,105,124, 0,115, 62,116, 59, 60,119, 61,121,114,117,
- 0, 0, 0, 0,127, 81, 0,113, 0, 0, 0, 0, 0, 55, 55 };
-
-static int emulate_raw(unsigned int keycode, int down)
-{
- if (keycode > 127 || !mac_keycodes[keycode])
- return -1;
-
- handle_scancode(mac_keycodes[keycode] & 0x7f, down);
-
- return 0;
-}
-
-#endif
+#endif /* CONFIG_X86 || CONFIG_IA64 || __alpha__ || __mips__ || CONFIG_PPC */
static struct input_handler keybdev_handler;
tristate 'PPP (point-to-point protocol) support' CONFIG_PPP
if [ ! "$CONFIG_PPP" = "n" ]; then
dep_bool ' PPP multilink support (EXPERIMENTAL)' CONFIG_PPP_MULTILINK $CONFIG_EXPERIMENTAL
- bool ' PPP filtering' CONFIG_PPP_FILTER
+ dep_bool ' PPP filtering' CONFIG_PPP_FILTER $CONFIG_FILTER
dep_tristate ' PPP support for async serial ports' CONFIG_PPP_ASYNC $CONFIG_PPP
dep_tristate ' PPP support for sync tty ports' CONFIG_PPP_SYNC_TTY $CONFIG_PPP
dep_tristate ' PPP Deflate compression' CONFIG_PPP_DEFLATE $CONFIG_PPP
for (i = 0; i < 64; i++)
((u16 *) db->srom)[i] = read_srom_word(pci_iobase, i);
- printk(KERN_INFO "%s: Davicom DM%04lx at 0x%lx,",
- dev->name,
- ent->driver_data >> 16,
- pci_iobase);
-
/* Set Node address */
- for (i = 0; i < 6; i++) {
+ for (i = 0; i < 6; i++)
dev->dev_addr[i] = db->srom[20 + i];
- printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]);
- }
-
- printk(", IRQ %d\n", pci_irqline);
i = register_netdev(dev);
if (i)
goto err_out_res;
+ printk(KERN_INFO "%s: Davicom DM%04lx at 0x%lx,",
+ dev->name,
+ ent->driver_data >> 16,
+ pci_iobase);
+ for (i = 0; i < 6; i++)
+ printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]);
+ printk(", IRQ %d\n", pci_irqline);
+
return 0;
err_out_res:
struct tx_desc *txptr;
struct dmfe_board_info *db;
u32 ioaddr;
+ unsigned long flags;
if (!dev) {
DMFE_DBUG(1, "dmfe_interrupt() without device arg", 0);
DMFE_DBUG(0, "dmfe_interrupt()", 0);
- spin_lock_irq(&db->lock);
+ spin_lock_irqsave(&db->lock, flags);
/* Disable all interrupt in CR7 to solve the interrupt edge problem */
outl(0, ioaddr + DCR7);
netif_stop_queue(dev);
db->wait_reset = 1; /* Need to RESET */
outl(0, ioaddr + DCR7); /* disable all interrupt */
- spin_unlock_irq(&db->lock);
+ spin_unlock_irqrestore(&db->lock, flags);
return;
}
db->cr7_data = 0x1a2cd;
outl(db->cr7_data, ioaddr + DCR7);
- spin_unlock_irq(&db->lock);
+ spin_unlock_irqrestore(&db->lock, flags);
}
/*
static void dmfe_set_filter_mode(struct net_device *dev)
{
struct dmfe_board_info *db = dev->priv;
+ unsigned long flags;
DMFE_DBUG(0, "dmfe_set_filter_mode()", 0);
+ spin_lock_irqsave(&db->lock, flags);
if (dev->flags & IFF_PROMISC) {
DMFE_DBUG(0, "Enable PROM Mode", 0);
db->cr6_data |= CR6_PM | CR6_PBF;
update_cr6(db->cr6_data, db->ioaddr);
+ spin_unlock_irqrestore(&db->lock, flags);
return;
}
if (dev->flags & IFF_ALLMULTI || dev->mc_count > DMFE_MAX_MULTICAST) {
DMFE_DBUG(0, "Pass all multicast address", dev->mc_count);
db->cr6_data &= ~(CR6_PM | CR6_PBF);
db->cr6_data |= CR6_PAM;
+ spin_unlock_irqrestore(&db->lock, flags);
return;
}
DMFE_DBUG(0, "Set multicast address", dev->mc_count);
dm9132_id_table(dev, dev->mc_count); /* DM9132 */
else
send_filter_frame(dev, dev->mc_count); /* DM9102/DM9102A */
+ spin_unlock_irqrestore(&db->lock, flags);
}
/*
unsigned char tmp_cr12;
struct net_device *dev = (struct net_device *) data;
struct dmfe_board_info *db = (struct dmfe_board_info *) dev->priv;
+ unsigned long flags;
DMFE_DBUG(0, "dmfe_timer()", 0);
+ spin_lock_irqsave(&db->lock, flags);
/* Do reset now */
- if (db->in_reset_state)
+ if (db->in_reset_state) {
+ spin_unlock_irqrestore(&db->lock, flags);
return;
+ }
/* Operating Mode Check */
if ((db->dm910x_chk_mode & 0x1) && (db->stats.rx_packets > MAX_CHECK_PACKET)) {
dmfe_dynamic_reset(dev);
db->timer.expires = jiffies + DMFE_TIMER_WUT;
add_timer(&db->timer);
+ spin_unlock_irqrestore(&db->lock, flags);
return;
}
db->rx_error_cnt = 0; /* Clear previos counter */
/* Timer active again */
db->timer.expires = jiffies + DMFE_TIMER_WUT;
add_timer(&db->timer);
+ spin_unlock_irqrestore(&db->lock, flags);
}
/*
struct dmfe_board_info *db = dev->priv;
struct dev_mc_list *mcptr;
struct tx_desc *txptr;
- unsigned long flags;
u16 *addrptr;
u32 *suptr;
int i;
DMFE_DBUG(0, "send_filter_frame()", 0);
- spin_lock_irqsave(&db->lock, flags);
txptr = db->tx_insert_ptr;
suptr = (u32 *) txptr->tx_buf_ptr;
if (txptr->tdes0 & 0x80000000) {
- spin_unlock_irqrestore(&db->lock, flags);
printk(KERN_WARNING "%s: Too busy to send filter frame\n",
dev->name);
return;
update_cr6(db->cr6_data | 0x2000, dev->base_addr);
outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling command */
update_cr6(db->cr6_data, dev->base_addr);
- spin_unlock_irqrestore(&db->lock, flags);
}
/*
#include <linux/module.h>
#include <linux/types.h>
+#include <linux/threads.h>
#include <linux/smp.h>
#include <asm/io.h>
#include <linux/ptrace.h>
/*
* Limits and constants
*/
-#define HERMES_ALLOC_LEN_MIN ((uint16_t)4)
-#define HERMES_ALLOC_LEN_MAX ((uint16_t)2400)
+#define HERMES_ALLOC_LEN_MIN (4)
+#define HERMES_ALLOC_LEN_MAX (2400)
#define HERMES_LTV_LEN_MAX (34)
-#define HERMES_BAP_DATALEN_MAX ((uint16_t)4096)
-#define HERMES_BAP_OFFSET_MAX ((uint16_t)4096)
-#define HERMES_PORTID_MAX ((uint16_t)7)
-#define HERMES_NUMPORTS_MAX ((uint16_t)(HERMES_PORTID_MAX+1))
-#define HERMES_PDR_LEN_MAX ((uint16_t)260) /* in bytes, from EK */
-#define HERMES_PDA_RECS_MAX ((uint16_t)200) /* a guess */
-#define HERMES_PDA_LEN_MAX ((uint16_t)1024) /* in bytes, from EK */
-#define HERMES_SCANRESULT_MAX ((uint16_t)35)
-#define HERMES_CHINFORESULT_MAX ((uint16_t)8)
+#define HERMES_BAP_DATALEN_MAX (4096)
+#define HERMES_BAP_OFFSET_MAX (4096)
+#define HERMES_PORTID_MAX (7)
+#define HERMES_NUMPORTS_MAX (HERMES_PORTID_MAX+1)
+#define HERMES_PDR_LEN_MAX (260) /* in bytes, from EK */
+#define HERMES_PDA_RECS_MAX (200) /* a guess */
+#define HERMES_PDA_LEN_MAX (1024) /* in bytes, from EK */
+#define HERMES_SCANRESULT_MAX (35)
+#define HERMES_CHINFORESULT_MAX (8)
#define HERMES_FRAME_LEN_MAX (2304)
#define HERMES_MAX_MULTICAST (16)
#define HERMES_MAGIC (0x7d1f)
/*
* CMD register bitmasks
*/
-#define HERMES_CMD_BUSY ((uint16_t)0x8000)
-#define HERMES_CMD_AINFO ((uint16_t)0x7f00)
-#define HERMES_CMD_MACPORT ((uint16_t)0x0700)
-#define HERMES_CMD_RECL ((uint16_t)0x0100)
-#define HERMES_CMD_WRITE ((uint16_t)0x0100)
-#define HERMES_CMD_PROGMODE ((uint16_t)0x0300)
-#define HERMES_CMD_CMDCODE ((uint16_t)0x003f)
+#define HERMES_CMD_BUSY (0x8000)
+#define HERMES_CMD_AINFO (0x7f00)
+#define HERMES_CMD_MACPORT (0x0700)
+#define HERMES_CMD_RECL (0x0100)
+#define HERMES_CMD_WRITE (0x0100)
+#define HERMES_CMD_PROGMODE (0x0300)
+#define HERMES_CMD_CMDCODE (0x003f)
/*
* STATUS register bitmasks
*/
-#define HERMES_STATUS_RESULT ((uint16_t)0x7f00)
-#define HERMES_STATUS_CMDCODE ((uint16_t)0x003f)
+#define HERMES_STATUS_RESULT (0x7f00)
+#define HERMES_STATUS_CMDCODE (0x003f)
/*
* OFFSET refister bitmasks
*/
-#define HERMES_OFFSET_BUSY ((uint16_t)0x8000)
-#define HERMES_OFFSET_ERR ((uint16_t)0x4000)
-#define HERMES_OFFSET_DATAOFF ((uint16_t)0x0ffe)
+#define HERMES_OFFSET_BUSY (0x8000)
+#define HERMES_OFFSET_ERR (0x4000)
+#define HERMES_OFFSET_DATAOFF (0x0ffe)
/*
* Event register bitmasks (INTEN, EVSTAT, EVACK)
*/
-#define HERMES_EV_TICK ((uint16_t)0x8000)
-#define HERMES_EV_WTERR ((uint16_t)0x4000)
-#define HERMES_EV_INFDROP ((uint16_t)0x2000)
-#define HERMES_EV_INFO ((uint16_t)0x0080)
-#define HERMES_EV_DTIM ((uint16_t)0x0020)
-#define HERMES_EV_CMD ((uint16_t)0x0010)
-#define HERMES_EV_ALLOC ((uint16_t)0x0008)
-#define HERMES_EV_TXEXC ((uint16_t)0x0004)
-#define HERMES_EV_TX ((uint16_t)0x0002)
-#define HERMES_EV_RX ((uint16_t)0x0001)
+#define HERMES_EV_TICK (0x8000)
+#define HERMES_EV_WTERR (0x4000)
+#define HERMES_EV_INFDROP (0x2000)
+#define HERMES_EV_INFO (0x0080)
+#define HERMES_EV_DTIM (0x0020)
+#define HERMES_EV_CMD (0x0010)
+#define HERMES_EV_ALLOC (0x0008)
+#define HERMES_EV_TXEXC (0x0004)
+#define HERMES_EV_TX (0x0002)
+#define HERMES_EV_RX (0x0001)
/*
* Command codes
*/
/*--- Controller Commands --------------------------*/
-#define HERMES_CMD_INIT ((uint16_t)0x00)
-#define HERMES_CMD_ENABLE ((uint16_t)0x01)
-#define HERMES_CMD_DISABLE ((uint16_t)0x02)
-#define HERMES_CMD_DIAG ((uint16_t)0x03)
+#define HERMES_CMD_INIT (0x0000)
+#define HERMES_CMD_ENABLE (0x0001)
+#define HERMES_CMD_DISABLE (0x0002)
+#define HERMES_CMD_DIAG (0x0003)
/*--- Buffer Mgmt Commands --------------------------*/
-#define HERMES_CMD_ALLOC ((uint16_t)0x0A)
-#define HERMES_CMD_TX ((uint16_t)0x0B)
-#define HERMES_CMD_CLRPRST ((uint16_t)0x12)
+#define HERMES_CMD_ALLOC (0x000A)
+#define HERMES_CMD_TX (0x000B)
+#define HERMES_CMD_CLRPRST (0x0012)
/*--- Regulate Commands --------------------------*/
-#define HERMES_CMD_NOTIFY ((uint16_t)0x10)
-#define HERMES_CMD_INQ ((uint16_t)0x11)
+#define HERMES_CMD_NOTIFY (0x0010)
+#define HERMES_CMD_INQ (0x0011)
/*--- Configure Commands --------------------------*/
-#define HERMES_CMD_ACCESS ((uint16_t)0x21)
-#define HERMES_CMD_DOWNLD ((uint16_t)0x22)
+#define HERMES_CMD_ACCESS (0x0021)
+#define HERMES_CMD_DOWNLD (0x0022)
/*--- Debugging Commands -----------------------------*/
-#define HERMES_CMD_MONITOR ((uint16_t)(0x38))
-#define HERMES_MONITOR_ENABLE ((uint16_t)(0x0b))
-#define HERMES_MONITOR_DISABLE ((uint16_t)(0x0f))
+#define HERMES_CMD_MONITOR (0x0038)
+#define HERMES_MONITOR_ENABLE (0x000b)
+#define HERMES_MONITOR_DISABLE (0x000f)
/*
* Configuration RIDs
*/
-#define HERMES_RID_CNF_PORTTYPE ((uint16_t)0xfc00)
-#define HERMES_RID_CNF_MACADDR ((uint16_t)0xfc01)
-#define HERMES_RID_CNF_DESIRED_SSID ((uint16_t)0xfc02)
-#define HERMES_RID_CNF_CHANNEL ((uint16_t)0xfc03)
-#define HERMES_RID_CNF_OWN_SSID ((uint16_t)0xfc04)
-#define HERMES_RID_CNF_SYSTEM_SCALE ((uint16_t)0xfc06)
-#define HERMES_RID_CNF_MAX_DATA_LEN ((uint16_t)0xfc07)
-#define HERMES_RID_CNF_PM_ENABLE ((uint16_t)0xfc09)
-#define HERMES_RID_CNF_PM_MCAST_RX ((uint16_t)0xfc0b)
-#define HERMES_RID_CNF_PM_PERIOD ((uint16_t)0xfc0c)
-#define HERMES_RID_CNF_PM_HOLDOVER ((uint16_t)0xfc0d)
-#define HERMES_RID_CNF_NICKNAME ((uint16_t)0xfc0e)
-#define HERMES_RID_CNF_WEP_ON ((uint16_t)0xfc20)
-#define HERMES_RID_CNF_MWO_ROBUST ((uint16_t)0xfc25)
-#define HERMES_RID_CNF_PRISM2_WEP_ON ((uint16_t)0xfc28)
-#define HERMES_RID_CNF_MULTICAST_LIST ((uint16_t)0xfc80)
-#define HERMES_RID_CNF_CREATEIBSS ((uint16_t)0xfc81)
-#define HERMES_RID_CNF_FRAG_THRESH ((uint16_t)0xfc82)
-#define HERMES_RID_CNF_RTS_THRESH ((uint16_t)0xfc83)
-#define HERMES_RID_CNF_TX_RATE_CTRL ((uint16_t)0xfc84)
-#define HERMES_RID_CNF_PROMISCUOUS ((uint16_t)0xfc85)
-#define HERMES_RID_CNF_KEYS ((uint16_t)0xfcb0)
-#define HERMES_RID_CNF_TX_KEY ((uint16_t)0xfcb1)
-#define HERMES_RID_CNF_TICKTIME ((uint16_t)0xfce0)
-
-#define HERMES_RID_CNF_PRISM2_TX_KEY ((uint16_t)0xfc23)
-#define HERMES_RID_CNF_PRISM2_KEY0 ((uint16_t)0xfc24)
-#define HERMES_RID_CNF_PRISM2_KEY1 ((uint16_t)0xfc25)
-#define HERMES_RID_CNF_PRISM2_KEY2 ((uint16_t)0xfc26)
-#define HERMES_RID_CNF_PRISM2_KEY3 ((uint16_t)0xfc27)
-#define HERMES_RID_CNF_SYMBOL_AUTH_TYPE ((uint16_t)0xfc2A)
-/* This one is read only */
-#define HERMES_RID_CNF_SYMBOL_KEY_LENGTH ((uint16_t)0xfc2B)
-#define HERMES_RID_CNF_SYMBOL_BASIC_RATES ((uint16_t)0xfc8A)
+#define HERMES_RID_CNF_PORTTYPE (0xfc00)
+#define HERMES_RID_CNF_MACADDR (0xfc01)
+#define HERMES_RID_CNF_DESIRED_SSID (0xfc02)
+#define HERMES_RID_CNF_CHANNEL (0xfc03)
+#define HERMES_RID_CNF_OWN_SSID (0xfc04)
+#define HERMES_RID_CNF_SYSTEM_SCALE (0xfc06)
+#define HERMES_RID_CNF_MAX_DATA_LEN (0xfc07)
+#define HERMES_RID_CNF_PM_ENABLE (0xfc09)
+#define HERMES_RID_CNF_PM_MCAST_RX (0xfc0b)
+#define HERMES_RID_CNF_PM_PERIOD (0xfc0c)
+#define HERMES_RID_CNF_PM_HOLDOVER (0xfc0d)
+#define HERMES_RID_CNF_NICKNAME (0xfc0e)
+#define HERMES_RID_CNF_WEP_ON (0xfc20)
+#define HERMES_RID_CNF_MWO_ROBUST (0xfc25)
+#define HERMES_RID_CNF_PRISM2_WEP_ON (0xfc28)
+#define HERMES_RID_CNF_MULTICAST_LIST (0xfc80)
+#define HERMES_RID_CNF_CREATEIBSS (0xfc81)
+#define HERMES_RID_CNF_FRAG_THRESH (0xfc82)
+#define HERMES_RID_CNF_RTS_THRESH (0xfc83)
+#define HERMES_RID_CNF_TX_RATE_CTRL (0xfc84)
+#define HERMES_RID_CNF_PROMISCUOUS (0xfc85)
+#define HERMES_RID_CNF_KEYS (0xfcb0)
+#define HERMES_RID_CNF_TX_KEY (0xfcb1)
+#define HERMES_RID_CNF_TICKTIME (0xfce0)
+
+#define HERMES_RID_CNF_PRISM2_TX_KEY (0xfc23)
+#define HERMES_RID_CNF_PRISM2_KEY0 (0xfc24)
+#define HERMES_RID_CNF_PRISM2_KEY1 (0xfc25)
+#define HERMES_RID_CNF_PRISM2_KEY2 (0xfc26)
+#define HERMES_RID_CNF_PRISM2_KEY3 (0xfc27)
+#define HERMES_RID_CNF_SYMBOL_MANDATORY_BSSID (0xfc21)
+#define HERMES_RID_CNF_SYMBOL_AUTH_TYPE (0xfc2A)
+#define HERMES_RID_CNF_SYMBOL_BASIC_RATES (0xfc8A)
+#define HERMES_RID_CNF_SYMBOL_PREAMBLE (0xfc8C)
/*
* Information RIDs
*/
-#define HERMES_RID_CHANNEL_LIST ((uint16_t)0xfd10)
-#define HERMES_RID_STAIDENTITY ((uint16_t)0xfd20)
-#define HERMES_RID_CURRENT_SSID ((uint16_t)0xfd41)
-#define HERMES_RID_CURRENT_BSSID ((uint16_t)0xfd42)
-#define HERMES_RID_COMMSQUALITY ((uint16_t)0xfd43)
-#define HERMES_RID_CURRENT_TX_RATE ((uint16_t)0xfd44)
-#define HERMES_RID_WEP_AVAIL ((uint16_t)0xfd4f)
-#define HERMES_RID_CURRENT_CHANNEL ((uint16_t)0xfdc1)
-#define HERMES_RID_DATARATES ((uint16_t)0xfdc6)
+#define HERMES_RID_CHANNEL_LIST (0xfd10)
+#define HERMES_RID_STAIDENTITY (0xfd20)
+#define HERMES_RID_CURRENT_SSID (0xfd41)
+#define HERMES_RID_CURRENT_BSSID (0xfd42)
+#define HERMES_RID_COMMSQUALITY (0xfd43)
+#define HERMES_RID_CURRENT_TX_RATE (0xfd44)
+#define HERMES_RID_SHORT_RETRY_LIMIT (0xfd48)
+#define HERMES_RID_LONG_RETRY_LIMIT (0xfd49)
+#define HERMES_RID_MAX_TX_LIFETIME (0xfd4A)
+#define HERMES_RID_WEP_AVAIL (0xfd4f)
+#define HERMES_RID_CURRENT_CHANNEL (0xfdc1)
+#define HERMES_RID_DATARATES (0xfdc6)
+#define HERMES_RID_SYMBOL_PRIMARY_VER (0xfd03)
+#define HERMES_RID_SYMBOL_SECONDARY_VER (0xfd21)
+#define HERMES_RID_SYMBOL_KEY_LENGTH (0xfc2B)
/*
* Frame structures and constants
uint16_t tx_ctl; /* 0xC */
} __attribute__ ((packed)) hermes_frame_desc_t;
-#define HERMES_RXSTAT_ERR ((uint16_t)0x0003)
-#define HERMES_RXSTAT_MACPORT ((uint16_t)0x0700)
-#define HERMES_RXSTAT_MSGTYPE ((uint16_t)0xE000)
+#define HERMES_RXSTAT_ERR (0x0003)
+#define HERMES_RXSTAT_MACPORT (0x0700)
+#define HERMES_RXSTAT_MSGTYPE (0xE000)
-#define HERMES_RXSTAT_BADCRC ((uint16_t)0x0001)
-#define HERMES_RXSTAT_UNDECRYPTABLE ((uint16_t)0x0002)
+#define HERMES_RXSTAT_BADCRC (0x0001)
+#define HERMES_RXSTAT_UNDECRYPTABLE (0x0002)
/* RFC-1042 encoded frame */
-#define HERMES_RXSTAT_1042 ((uint16_t)0x2000)
+#define HERMES_RXSTAT_1042 (0x2000)
/* Bridge-tunnel encoded frame */
-#define HERMES_RXSTAT_TUNNEL ((uint16_t)0x4000)
+#define HERMES_RXSTAT_TUNNEL (0x4000)
/* Wavelan-II Management Protocol frame */
-#define HERMES_RXSTAT_WMP ((uint16_t)0x6000)
+#define HERMES_RXSTAT_WMP (0x6000)
#ifdef __KERNEL__
(hermes_read_ltv((hw),(bap),(rid), sizeof(*buf), NULL, (buf)))
#define HERMES_WRITE_RECORD(hw, bap, rid, buf) \
(hermes_write_ltv((hw),(bap),(rid),HERMES_BYTES_TO_RECLEN(sizeof(*buf)),(buf)))
-#define HERMES_WRITE_RECORD_LEN(hw, bap, rid, buf, len) \
- (hermes_write_ltv((hw),(bap),(rid),HERMES_BYTES_TO_RECLEN(len),(buf)))
static inline int hermes_read_wordrec(hermes_t *hw, int bap, uint16_t rid, uint16_t *word)
{
if(wrq->u.data.pointer != (caddr_t) 0) {
struct iw_range range;
- /* Set the length (useless : its constant...) */
+ /* Set the length (very important for backward compatibility) */
wrq->u.data.length = sizeof(struct iw_range);
+
+ /* Set all the info we don't care or don't know about to zero */
+ memset(&range, 0, sizeof(range));
+
+#if WIRELESS_EXT > 10
+ /* Set the Wireless Extension versions */
+ range.we_version_compiled = WIRELESS_EXT;
+ range.we_version_source = 9; /* Nothing for us in v10 and v11 */
+#endif /* WIRELESS_EXT > 10 */
/* Set information in the range struct */
range.throughput = 450 * 1000; /* don't argue on this ! */
-/* orinoco_cs.c 0.03 - (formerly known as dldwd_cs.c)
+/* orinoco_cs.c 0.04 - (formerly known as dldwd_cs.c)
*
* A driver for "Hermes" chipset based PCMCIA wireless adaptors, such
* as the Lucent WavelanIEEE/Orinoco cards and their OEM (Cabletron/
* o Finish external renaming to orinoco...
* o Testing with various Wavelan firmwares
*
+ * v0.03 -> v0.04 - 30/3/2001 - Jean II
+ * o Update to Wireless 11 -> add retry limit/lifetime support
+ * o Tested with a D-Link DWL 650 card, fill in firmware support
+ * o Warning on Vcc mismatch (D-Link 3.3v card in Lucent 5v only slot)
+ * o Fixed the Prims2 WEP bugs that I introduced in v0.03 :-(
+ * It work on D-Link *only* after a tcpdump. Weird...
+ * And still doesn't work on Intel card. Grrrr...
+ * o Update the mode after a setport3
+ * o Add preamble setting for Symbol cards (not yet enabled)
+ * o Don't complain as much about Symbol cards...
+ *
+ * v0.04 -> v0.04b - 22/4/2001 - David Gibson
+ * o Removed the 'eth' parameter - always use ethXX as the
+ * interface name instead of dldwdXX. The other was racy
+ * anyway.
+ * o Clean up RID definitions in hermes.h, other cleanups
+ *
+ * v0.04b -> v0.04c - 24/4/2001 - Jean II
+ * o Tim Hurley <timster@seiki.bliztech.com> reported a D-Link card
+ * with vendor 02 and firmware 0.08. Added in the capabilities...
+ * o Tested Lucent firmware 7.28, everything works...
+ *
* TODO - Jean II
* o inline functions (lot's of candidate, need to reorder code)
* o Separate Pcmcia specific code to help Airport/Mini PCI driver
#ifdef PCMCIA_DEBUG
static int pc_debug = PCMCIA_DEBUG;
-static char *version = "orinoco_cs.c 0.03 (David Gibson <hermes@gibson.dropbear.id.au>)";
+static char *version = "orinoco_cs.c 0.04 (David Gibson <hermes@gibson.dropbear.id.au>)";
MODULE_PARM(pc_debug, "i");
#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
#define DEBUGMORE(n, args...) do { if (pc_debug>(n)) printk(args); } while (0)
static uint irq_mask = 0xdeb8;
/* Newer, simpler way of listing specific interrupts */
static int irq_list[4] = { -1 };
-/* Control device name allocation. 0 -> dldwdX ; 1 -> ethX */
-static int eth = 1;
+/* Do a Pcmcia soft reset (may help some cards) */
+static int reset_cor = 0;
MODULE_PARM(irq_mask, "i");
MODULE_PARM(irq_list, "1-4i");
-MODULE_PARM(eth, "i");
+MODULE_PARM(reset_cor, "i");
/*====================================================================*/
int has_wep, has_big_wep;
int has_mwo;
int has_pm;
+ int has_retry;
+ int has_preamble;
int broken_reset, broken_allocate;
uint16_t channel_mask;
/* Current configuration */
uint32_t iw_mode;
int port_type, allow_ibss;
- uint16_t wep_on, wep_auth, tx_key;
+ uint16_t wep_on, wep_restrict, tx_key;
dldwd_keys_t keys;
char nick[IW_ESSID_MAX_SIZE+1];
char desired_essid[IW_ESSID_MAX_SIZE+1];
uint16_t ap_density, rts_thresh;
uint16_t tx_rate_ctrl;
uint16_t pm_on, pm_mcast, pm_period, pm_timeout;
+ uint16_t retry_short, retry_long, retry_time;
+ uint16_t preamble;
int promiscuous, allmulti, mc_count;
goto out;
}
+ /* Set retry settings - will fail on lot's of firmwares */
+ if (priv->has_retry) {
+ err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_SHORT_RETRY_LIMIT,
+ priv->retry_short);
+ if (err) {
+ printk(KERN_WARNING "%s: Can't set retry limit!\n", dev->name);
+ goto out;
+ }
+ err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_LONG_RETRY_LIMIT,
+ priv->retry_long);
+ if (err)
+ goto out;
+ err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_MAX_TX_LIFETIME,
+ priv->retry_time);
+ if (err)
+ goto out;
+ }
+
+ /* Set preamble - only for Symbol so far... */
+ if (priv->has_preamble) {
+ err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNF_SYMBOL_PREAMBLE,
+ priv->preamble);
+ if (err) {
+ printk(KERN_WARNING "%s: Can't set preamble!\n", dev->name);
+ goto out;
+ }
+ }
+
/* Set promiscuity / multicast*/
priv->promiscuous = 0;
priv->allmulti = 0;
{
hermes_t *hw = &priv->hw;
int err = 0;
-
+ int extra_wep_flag = 0;
+
switch (priv->firmware_type) {
case FIRMWARE_TYPE_LUCENT: /* Lucent style WEP */
if (priv->wep_on) {
int keylen;
int i;
- err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNF_PRISM2_TX_KEY,
- priv->tx_key);
- if (err)
- return err;
-
- keybuf[LARGE_KEY_SIZE] = '\0';
-
/* Write all 4 keys */
for(i = 0; i < MAX_KEYS; i++) {
keylen = priv->keys[i].len;
- keybuf[SMALL_KEY_SIZE] = '\0';
+ keybuf[keylen] = '\0';
memcpy(keybuf, priv->keys[i].data, keylen);
- err = HERMES_WRITE_RECORD_LEN(hw, USER_BAP, HERMES_RID_CNF_PRISM2_KEY0, &keybuf, keylen);
+ err = hermes_write_ltv(hw, USER_BAP,
+ HERMES_RID_CNF_PRISM2_KEY0 + i,
+ HERMES_BYTES_TO_RECLEN(keylen + 1),
+ &keybuf);
if (err)
return err;
}
- /* Symbol cards : set the authentication :
- * 0 -> no encryption, 1 -> open,
- * 2 -> shared key, 3 -> shared key 128bit only */
+
+ err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNF_PRISM2_TX_KEY,
+ priv->tx_key);
+ if (err)
+ return err;
+
+ /* Authentication is where Prism2 and Symbol
+ * firmware differ... */
if (priv->firmware_type == FIRMWARE_TYPE_SYMBOL) {
- err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNF_SYMBOL_AUTH_TYPE, priv->wep_auth);
+ /* Symbol cards : set the authentication :
+ * 0 -> no encryption, 1 -> open,
+ * 2 -> shared key, 3 -> shared key 128bit */
+ if(priv->wep_restrict) {
+ if(priv->keys[priv->tx_key].len >
+ SMALL_KEY_SIZE)
+ extra_wep_flag = 3;
+ else
+ extra_wep_flag = 2;
+ } else
+ extra_wep_flag = 1;
+ err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNF_SYMBOL_AUTH_TYPE, priv->wep_restrict);
if (err)
return err;
+ } else {
+ /* Prism2 card : we need to modify master
+ * WEP setting */
+ if(priv->wep_restrict)
+ extra_wep_flag = 2;
+ else
+ extra_wep_flag = 0;
}
}
- err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNF_PRISM2_WEP_ON, priv->wep_on);
+ /* Master WEP setting : on/off */
+ err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNF_PRISM2_WEP_ON, (priv->wep_on | extra_wep_flag));
if (err)
return err;
break;
}
firmver = ((uint32_t)priv->firmware_info.major << 16) | priv->firmware_info.minor;
+ DEBUG(2, "%s: firmver = 0x%X\n", dev->name, firmver);
/* Determine capabilities from the firmware version */
priv->firmware_type = FIRMWARE_TYPE_LUCENT;
priv->broken_reset = 0;
priv->broken_allocate = 0;
- priv->has_port3 = 1;
+ priv->has_port3 = 1; /* Still works in 7.28 */
priv->has_ibss = (firmver >= 0x60006);
priv->has_ibss_any = (firmver >= 0x60010);
priv->has_wep = (firmver >= 0x40020);
Gold cards from the others? */
priv->has_mwo = (firmver >= 0x60000);
priv->has_pm = (firmver >= 0x40020);
+ priv->has_retry = 0;
+ priv->has_preamble = 0;
/* Tested with Lucent firmware :
- * 1.16 ; 4.08 ; 4.52 ; 6.04 ; 6.16 => Jean II
+ * 1.16 ; 4.08 ; 4.52 ; 6.04 ; 6.16 ; 7.28 => Jean II
* Tested CableTron firmware : 4.32 => Anton */
break;
case 0x2:
vendor_str = "Generic Prism II";
- /* Note : my Intel card report this value, but I can't do
- * much with it, so I guess it's broken - Jean II */
+ /* Some D-Link cards report vendor 0x02... */
priv->firmware_type = FIRMWARE_TYPE_PRISM2;
priv->broken_reset = 0;
- priv->broken_allocate = (firmver <= 0x10001);
+ priv->broken_allocate = 0;
priv->has_port3 = 1;
- priv->has_ibss = 0; /* FIXME: no idea if this is right */
- priv->has_wep = (firmver >= 0x20000);
- priv->has_big_wep = 1;
+ priv->has_ibss = (firmver >= 0x00007); /* FIXME */
+ priv->has_wep = (firmver >= 0x00007); /* FIXME */
+ priv->has_big_wep = 0;
priv->has_mwo = 0;
- priv->has_pm = (firmver >= 0x20000);
- /* Tested with Intel firmware : 1.01 => Jean II */
- /* Note : firmware 1.01 is *seriously* broken */
+ priv->has_pm = (firmver >= 0x00007); /* FIXME */
+ priv->has_retry = 0;
+ priv->has_preamble = 0;
+
+ /* Tim Hurley -> D-Link card, vendor 02, firmware 0.08 */
+
+ /* Special case for Symbol cards */
+ if(firmver == 0x10001) {
+ /* Symbol , 3Com AirConnect, Intel, Ericsson WLAN */
+ vendor_str = "Symbol";
+ /* Intel MAC : 00:02:B3:* */
+ /* 3Com MAC : 00:50:DA:* */
+
+ /* FIXME : probably need to use SYMBOL_***ARY_VER
+ * to get proper firmware version */
+ priv->firmware_type = FIRMWARE_TYPE_SYMBOL;
+ priv->broken_reset = 0;
+ priv->broken_allocate = 1;
+ priv->has_port3 = 1;
+ priv->has_ibss = 1; /* FIXME */
+ priv->has_wep = 1; /* FIXME */
+ priv->has_big_wep = 1; /* RID_SYMBOL_KEY_LENGTH */
+ priv->has_mwo = 0;
+ priv->has_pm = 1; /* FIXME */
+ priv->has_retry = 0;
+ priv->has_preamble = 0; /* FIXME */
+ /* Tested with Intel firmware : v15 => Jean II */
+ }
break;
case 0x3:
vendor_str = "Samsung";
priv->has_big_wep = 0; /* FIXME */
priv->has_mwo = 0;
priv->has_pm = (firmver >= 0x20000); /* FIXME */
+ priv->has_retry = 0;
+ priv->has_preamble = 0;
break;
case 0x6:
+ /* D-Link DWL 650, ... */
vendor_str = "LinkSys/D-Link";
- /* To check */
+ /* D-Link MAC : 00:40:05:* */
priv->firmware_type = FIRMWARE_TYPE_PRISM2;
priv->broken_reset = 0;
priv->broken_allocate = 0;
priv->has_port3 = 1;
- priv->has_ibss = 0; /* FIXME: available in later firmwares */
- priv->has_wep = (firmver >= 0x20000); /* FIXME */
+ priv->has_ibss = (firmver >= 0x00007); /* FIXME */
+ priv->has_wep = (firmver >= 0x00007); /* FIXME */
priv->has_big_wep = 0;
priv->has_mwo = 0;
- priv->has_pm = (firmver >= 0x20000); /* FIXME */
+ priv->has_pm = (firmver >= 0x00007); /* FIXME */
+ priv->has_retry = 0;
+ priv->has_preamble = 0;
+ /* Tested with D-Link firmware 0.07 => Jean II */
+ /* Note : with 0.07, IBSS to a Lucent card seem flaky */
break;
-#if 0
- case 0x???: /* Could someone help here ??? */
- vendor_str = "Symbol";
- /* Symbol , 3Com AirConnect, Ericsson WLAN */
-
- priv->firmware_type = FIRMWARE_TYPE_SYMBOL;
- priv->broken_reset = 0;
- priv->broken_allocate = 0;
- priv->has_port3 = 1;
- priv->has_ibss = 0; /* FIXME: available in later firmwares */
- priv->has_wep = (firmver >= 0x20000); /* FIXME */
- priv->has_big_wep = 1; /* Probably RID_SYMBOL_KEY_LENGTH */
- priv->has_mwo = 0;
- priv->has_pm = (firmver >= 0x20000);
- break;
-#endif
default:
vendor_str = "UNKNOWN";
priv->has_big_wep = 0;
priv->has_mwo = 0;
priv->has_pm = 0;
+ priv->has_retry = 0;
+ priv->has_preamble = 0;
}
printk(KERN_INFO "%s: Firmware ID %02X vendor 0x%x (%s) version %d.%02d\n",
dev->name, priv->firmware_info.id, priv->firmware_info.vendor,
vendor_str, priv->firmware_info.major, priv->firmware_info.minor);
- if ((priv->broken_reset) || (priv->broken_allocate))
- printk(KERN_INFO "%s: Buggy firmware, please upgrade ASAP.\n", dev->name);
if (priv->has_port3)
printk(KERN_INFO "%s: Ad-hoc demo mode supported.\n", dev->name);
if (priv->has_ibss)
if (priv->has_big_wep)
printk("\"128\"-bit key.\n");
else
- printk("40-bit key.");
+ printk("40-bit key.\n");
}
/* Get the MAC address */
goto out;
}
}
+
+ /* Retry setup */
+ if (priv->has_retry) {
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_SHORT_RETRY_LIMIT, &priv->retry_short);
+ if (err)
+ goto out;
+
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_LONG_RETRY_LIMIT, &priv->retry_long);
+ if (err)
+ goto out;
+
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_MAX_TX_LIFETIME, &priv->retry_time);
+ if (err)
+ goto out;
+ }
+
+ /* Preamble setup */
+ if (priv->has_preamble) {
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNF_SYMBOL_PREAMBLE, &priv->preamble);
+ if (err)
+ goto out;
+ }
/* Set up the default configuration */
priv->iw_mode = IW_MODE_INFRA;
/* Much of this shamelessly taken from wvlan_cs.c. No idea
* what it all means -dgibson */
+#if WIRELESS_EXT > 10
+ range.we_version_compiled = WIRELESS_EXT;
+ range.we_version_source = 11;
+#endif /* WIRELESS_EXT > 10 */
+
range.min_nwid = range.max_nwid = 0; /* We don't use nwids */
/* Set available channels/frequencies */
range.txpower[0] = 15; /* 15dBm */
range.txpower_capa = IW_TXPOW_DBM;
+#if WIRELESS_EXT > 10
+ range.retry_capa = IW_RETRY_LIMIT | IW_RETRY_LIFETIME;
+ range.retry_flags = IW_RETRY_LIMIT;
+ range.r_time_flags = IW_RETRY_LIFETIME;
+ range.min_retry = 0;
+ range.max_retry = 65535; /* ??? */
+ range.min_r_time = 0;
+ range.max_r_time = 65535 * 1000; /* ??? */
+#endif /* WIRELESS_EXT > 10 */
+
if (copy_to_user(rrq->pointer, &range, sizeof(range)))
return -EFAULT;
int index = (erq->flags & IW_ENCODE_INDEX) - 1;
int setindex = priv->tx_key;
int enable = priv->wep_on;
- int auth = priv->wep_auth;
+ int restricted = priv->wep_restrict;
uint16_t xlen = 0;
int err = 0;
char keybuf[MAX_KEY_SIZE];
if (erq->flags & IW_ENCODE_DISABLED)
enable = 0;
- /* Only for symbol cards (so far) - Jean II */
+ /* Only for Prism2 & Symbol cards (so far) - Jean II */
if (erq->flags & IW_ENCODE_OPEN)
- auth = 1;
+ restricted = 0;
if (erq->flags & IW_ENCODE_RESTRICTED)
- auth = 2; /* If all key are 128 -> should be 3 ??? */
- /* Agree with master wep setting */
- if (enable == 0)
- auth = 0;
- else if(auth == 0)
- auth = 1; /* Encryption require some authentication */
+ restricted = 1;
if (erq->pointer) {
priv->keys[index].len = cpu_to_le16(xlen);
}
priv->tx_key = setindex;
priv->wep_on = enable;
- priv->wep_auth = auth;
+ priv->wep_restrict = restricted;
out:
dldwd_unlock(priv);
erq->flags |= index + 1;
/* Only for symbol cards - Jean II */
- if (priv->firmware_type == FIRMWARE_TYPE_SYMBOL) {
- switch(priv->wep_auth) {
- case 1:
- erq->flags |= IW_ENCODE_OPEN;
- break;
- case 2:
- case 3:
+ if (priv->firmware_type != FIRMWARE_TYPE_LUCENT) {
+ if(priv->wep_restrict)
erq->flags |= IW_ENCODE_RESTRICTED;
- break;
- case 0:
- default:
- break;
- }
+ else
+ erq->flags |= IW_ENCODE_OPEN;
}
xlen = le16_to_cpu(priv->keys[index].len);
return err;
}
+#if WIRELESS_EXT > 10
+static int dldwd_ioctl_setretry(struct net_device *dev, struct iw_param *rrq)
+{
+ dldwd_priv_t *priv = dev->priv;
+ int err = 0;
+
+
+ dldwd_lock(priv);
+
+ if ((rrq->disabled) || (!priv->has_retry)){
+ err = -EOPNOTSUPP;
+ goto out;
+ } else {
+ if (rrq->flags & IW_RETRY_LIMIT) {
+ if (rrq->flags & IW_RETRY_MAX)
+ priv->retry_long = rrq->value;
+ else if (rrq->flags & IW_RETRY_MIN)
+ priv->retry_short = rrq->value;
+ else {
+ /* No modifier : set both */
+ priv->retry_long = rrq->value;
+ priv->retry_short = rrq->value;
+ }
+ }
+ if (rrq->flags & IW_RETRY_LIFETIME) {
+ priv->retry_time = rrq->value / 1000;
+ }
+ if ((rrq->flags & IW_RETRY_TYPE) == 0) {
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ out:
+ dldwd_unlock(priv);
+
+ return err;
+}
+
+static int dldwd_ioctl_getretry(struct net_device *dev, struct iw_param *rrq)
+{
+ dldwd_priv_t *priv = dev->priv;
+ hermes_t *hw = &priv->hw;
+ int err = 0;
+ uint16_t short_limit, long_limit, lifetime;
+
+ dldwd_lock(priv);
+
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_SHORT_RETRY_LIMIT, &short_limit);
+ if (err)
+ goto out;
+
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_LONG_RETRY_LIMIT, &long_limit);
+ if (err)
+ goto out;
+
+ err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_MAX_TX_LIFETIME, &lifetime);
+ if (err)
+ goto out;
+
+ rrq->disabled = 0; /* Can't be disabled */
+
+ /* Note : by default, display the retry number */
+ if ((rrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
+ rrq->flags = IW_RETRY_LIFETIME;
+ rrq->value = lifetime * 1000; /* ??? */
+ } else {
+ /* By default, display the min number */
+ if ((rrq->flags & IW_RETRY_MAX)) {
+ rrq->flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
+ rrq->value = long_limit;
+ } else {
+ rrq->flags = IW_RETRY_LIMIT;
+ rrq->value = short_limit;
+ if(short_limit != long_limit)
+ rrq->flags |= IW_RETRY_MIN;
+ }
+ }
+
+ out:
+ dldwd_unlock(priv);
+
+ return err;
+}
+#endif /* WIRELESS_EXT > 10 */
+
static int dldwd_ioctl_setport3(struct net_device *dev, struct iwreq *wrq)
{
dldwd_priv_t *priv = dev->priv;
err = -EINVAL;
}
+ if (! err)
+ /* Actually update the mode we are using */
+ set_port_type(priv);
+
dldwd_unlock(priv);
return err;
wrq->u.txpower.flags = IW_TXPOW_DBM;
break;
+#if WIRELESS_EXT > 10
+ case SIOCSIWRETRY:
+ DEBUG(1, "%s: SIOCSIWRETRY\n", dev->name);
+ err = dldwd_ioctl_setretry(dev, &wrq->u.retry);
+ if (! err)
+ changed = 1;
+ break;
+
+ case SIOCGIWRETRY:
+ DEBUG(1, "%s: SIOCGIWRETRY\n", dev->name);
+ err = dldwd_ioctl_getretry(dev, &wrq->u.retry);
+ break;
+#endif /* WIRELESS_EXT > 10 */
+
case SIOCSIWSPY:
DEBUG(1, "%s: SIOCSIWSPY\n", dev->name);
0, "set_port3" },
{ SIOCDEVPRIVATE + 0x3, 0,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
- "get_port3" }
+ "get_port3" },
+ { SIOCDEVPRIVATE + 0x4,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ 0, "set_preamble" },
+ { SIOCDEVPRIVATE + 0x5, 0,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+ "get_preamble" }
};
err = verify_area(VERIFY_WRITE, wrq->u.data.pointer, sizeof(privtab));
err = dldwd_ioctl_getport3(dev, wrq);
break;
+ case SIOCDEVPRIVATE + 0x4: /* set_preamble */
+ DEBUG(1, "%s: SIOCDEVPRIVATE + 0x4 (set_preamble)\n",
+ dev->name);
+ if (! capable(CAP_NET_ADMIN)) {
+ err = -EPERM;
+ break;
+ }
+
+ /* 802.11b has recently defined some short preamble.
+ * Basically, the Phy header has been reduced in size.
+ * This increase performance, especially at high rates
+ * (the preamble is transmitted at 1Mb/s), unfortunately
+ * this give compatibility troubles... - Jean II */
+ if(priv->has_preamble) {
+ int val = *( (int *) wrq->u.name );
+
+ dldwd_lock(priv);
+ if(val)
+ priv->preamble = 1;
+ else
+ priv->preamble = 0;
+ dldwd_unlock(priv);
+ changed = 1;
+ } else
+ err = -EOPNOTSUPP;
+ break;
+
+ case SIOCDEVPRIVATE + 0x5: /* get_preamble */
+ DEBUG(1, "%s: SIOCDEVPRIVATE + 0x5 (get_preamble)\n",
+ dev->name);
+ if(priv->has_preamble) {
+ int *val = (int *)wrq->u.name;
+
+ dldwd_lock(priv);
+ *val = priv->preamble;
+ dldwd_unlock(priv);
+ } else
+ err = -EOPNOTSUPP;
+ break;
+
default:
err = -EOPNOTSUPP;
}
TRACE_EXIT("dldwd");
} /* dldwd_detach */
+/*
+ * Do a soft reset of the Pcmcia card using the Configuration Option Register
+ * Can't do any harm, and actually may do some good on some cards...
+ */
+static int dldwd_cor_reset(dev_link_t *link)
+{
+ conf_reg_t reg;
+ u_long default_cor;
+
+ /* Save original COR value */
+ reg.Function = 0;
+ reg.Action = CS_READ;
+ reg.Offset = CISREG_COR;
+ reg.Value = 0;
+ CardServices(AccessConfigurationRegister, link->handle, ®);
+ default_cor = reg.Value;
+
+ DEBUG(2, "dldwd : dldwd_cor_reset() : cor=0x%lX\n", default_cor);
+
+ /* Soft-Reset card */
+ reg.Action = CS_WRITE;
+ reg.Offset = CISREG_COR;
+ reg.Value = (default_cor | COR_SOFT_RESET);
+ CardServices(AccessConfigurationRegister, link->handle, ®);
+
+ /* Wait until the card has acknowledged our reset */
+ mdelay(1);
+
+ /* Restore original COR configuration index */
+ reg.Value = (default_cor & COR_CONFIG_MASK);
+ CardServices(AccessConfigurationRegister, link->handle, ®);
+
+ /* Wait until the card has finished restarting */
+ mdelay(1);
+
+ return(0);
+}
+
/*======================================================================
dldwd_config() is scheduled to run after a CARD_INSERTION event
is received, to configure the PCMCIA socket, and to make the
int last_fn, last_ret;
u_char buf[64];
config_info_t conf;
+ cistpl_cftable_entry_t dflt = { 0 };
cisinfo_t info;
TRACE_ENTER("dldwd");
tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
CS_CHECK(GetFirstTuple, handle, &tuple);
while (1) {
- cistpl_cftable_entry_t dflt = { 0 };
cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
CFG_CHECK(GetTupleData, handle, &tuple);
CFG_CHECK(ParseTuple, handle, &tuple, &parse);
/* Note that the CIS values need to be rescaled */
if (cfg->vcc.present & (1 << CISTPL_POWER_VNOM)) {
if (conf.Vcc !=
- cfg->vcc.param[CISTPL_POWER_VNOM] /
- 10000) goto next_entry;
+ cfg->vcc.param[CISTPL_POWER_VNOM] / 10000) {
+ DEBUG(2, "dldwd_config: Vcc mismatch (conf.Vcc = %d, CIS = %d)\n", conf.Vcc, cfg->vcc.param[CISTPL_POWER_VNOM] / 10000);
+ goto next_entry;
+ }
} else if (dflt.vcc.present & (1 << CISTPL_POWER_VNOM)) {
if (conf.Vcc !=
- dflt.vcc.param[CISTPL_POWER_VNOM] /
- 10000) goto next_entry;
+ dflt.vcc.param[CISTPL_POWER_VNOM] / 10000) {
+ DEBUG(2, "dldwd_config: Vcc mismatch (conf.Vcc = %d, CIS = %d)\n", conf.Vcc, dflt.vcc.param[CISTPL_POWER_VNOM] / 10000);
+ goto next_entry;
+ }
}
if (cfg->vpp1.present & (1 << CISTPL_POWER_VNOM))
ndev->base_addr = link->io.BasePort1;
ndev->irq = link->irq.AssignedIRQ;
- /* Instance name : by default, use hermesX, on demand use the
- * regular ethX (less risky) - Jean II */
- if(!eth)
- sprintf(ndev->name, "hermes%d", priv->instance);
- else
- ndev->name[0] = '\0';
+ /* Do a Pcmcia soft reset of the card (optional) */
+ if(reset_cor)
+ dldwd_cor_reset(link);
+
+ /* register_netdev will give us an ethX name */
+ ndev->name[0] = '\0';
/* Tell the stack we exist */
if (register_netdev(ndev) != 0) {
printk(KERN_ERR "orinoco_cs: register_netdev() failed\n");
struct iw_range range;
memset((char *) &range, 0, sizeof(struct iw_range));
- /* Set the length (useless : its constant...) */
+ /* Set the length (very important for backward compatibility) */
wrq->u.data.length = sizeof(struct iw_range);
+#if WIRELESS_EXT > 10
+ /* Set the Wireless Extension versions */
+ range.we_version_compiled = WIRELESS_EXT;
+ range.we_version_source = 9;
+#endif /* WIRELESS_EXT > 10 */
+
/* Set information in the range struct */
range.throughput = 1.1 * 1000 * 1000; /* Put the right number here */
range.num_channels = hop_pattern_length[(int)country];
{
struct iw_range range;
- /* Set the length (useless : its constant...) */
+ /* Set the length (very important for backward compatibility) */
wrq->u.data.length = sizeof(struct iw_range);
+ /* Set all the info we don't care or don't know about to zero */
+ memset(&range, 0, sizeof(range));
+
+ /* Set the Wireless Extension versions */
+ range.we_version_compiled = WIRELESS_EXT;
+ range.we_version_source = 9; /* Nothing for us in v10 and v11 */
+
/* Set information in the range struct */
range.throughput = 1.4 * 1000 * 1000; /* don't argue on this ! */
range.min_nwid = 0x0000;
switch (CCP_CODE(dp)) {
case CCP_CONFREQ:
+
+ /* A ConfReq starts negotiation of compression
+ * in one direction of transmission,
+ * and hence brings it down...but which way?
+ *
+ * Remember:
+ * A ConfReq indicates what the sender would like to receive
+ */
+ if(inbound)
+ /* He is proposing what I should send */
+ ppp->xstate &= ~SC_COMP_RUN;
+ else
+ /* I am proposing to what he should send */
+ ppp->rstate &= ~SC_DECOMP_RUN;
+
+ break;
+
case CCP_TERMREQ:
case CCP_TERMACK:
/*
- * CCP is going down - disable compression.
+ * CCP is going down, both directions of transmission
*/
- if (inbound)
- ppp->rstate &= ~SC_DECOMP_RUN;
- else
- ppp->xstate &= ~SC_COMP_RUN;
+ ppp->rstate &= ~SC_DECOMP_RUN;
+ ppp->xstate &= ~SC_COMP_RUN;
break;
case CCP_CONFACK:
if (wrq->u.data.pointer != (caddr_t) 0) {
struct iw_range range;
- /* Set the length (useless: it's constant). */
+ /* Set the length (very important for backward
+ * compatibility) */
wrq->u.data.length = sizeof(struct iw_range);
+ /* Set all the info we don't care or don't know
+ * about to zero */
+ memset(&range, 0, sizeof(range));
+
+ /* Set the Wireless Extension versions */
+ range.we_version_compiled = WIRELESS_EXT;
+ range.we_version_source = 9;
+
/* Set information in the range struct. */
range.throughput = 1.6 * 1000 * 1000; /* don't argue on this ! */
range.min_nwid = 0x0000;
const unsigned char scsi_command_size[8] =
{
6, 10, 10, 12,
- 12, 12, 10, 10
+ 16, 12, 10, 10
};
static unsigned long serial_number;
static Scsi_Cmnd *scsi_bh_queue_head;
SRpnt->sr_done = done;
SRpnt->sr_timeout_per_command = timeout;
- memcpy((void *) SRpnt->sr_cmnd, (const void *) cmnd,
- sizeof(SRpnt->sr_cmnd));
-
if (SRpnt->sr_cmd_len == 0)
SRpnt->sr_cmd_len = COMMAND_SIZE(SRpnt->sr_cmnd[0]);
/* move this volume setup to mixer */
ymfpci_writel(codec, YDSXGR_NATIVEDACOUTVOL, 0x3fff3fff);
- ymfpci_writel(codec, YDSXGR_BUF441OUTVOL, 0x3fff3fff);
+ ymfpci_writel(codec, YDSXGR_BUF441OUTVOL, 0);
ymfpci_writel(codec, YDSXGR_NATIVEADCINVOL, 0x3fff3fff);
ymfpci_writel(codec, YDSXGR_NATIVEDACINVOL, 0x3fff3fff);
static void hub_put(struct usb_hub *hub)
{
if (atomic_dec_and_test(&hub->refcnt)) {
- if (hub->urb) {
- usb_unlink_urb(hub->urb);
- usb_free_urb(hub->urb);
- hub->urb = NULL;
- }
-
if (hub->descriptor) {
kfree(hub->descriptor);
hub->descriptor = NULL;
struct usb_hub *hub = (struct usb_hub *)ptr;
unsigned long flags;
+ if (hub->urb) {
+ usb_unlink_urb(hub->urb);
+ usb_free_urb(hub->urb);
+ hub->urb = NULL;
+ }
+
spin_lock_irqsave(&hub_event_lock, flags);
/* Delete it and then reset it */
* visible pointers are surrounded in ()'s
*
* (C) Copyright 1999 Linus Torvalds
- * (C) Copyright 1999 Johannes Erdfelt
+ * (C) Copyright 1999-2001 Johannes Erdfelt
*/
#include <linux/kernel.h>
+#include <linux/proc_fs.h>
#include <asm/io.h>
#include "uhci.h"
-void uhci_show_td(struct uhci_td *td)
+/* Handle REALLY large printk's so we don't overflow buffers */
+static void inline lprintk(char *buf)
{
+ char *p;
+
+ /* Just write one line at a time */
+ while (buf) {
+ p = strchr(buf, '\n');
+ if (p)
+ *p = 0;
+ printk("%s\n", buf);
+ buf = p;
+ if (buf)
+ buf++;
+ }
+}
+
+static int inline uhci_is_skeleton_td(struct uhci *uhci, struct uhci_td *td)
+{
+ int i;
+
+ for (i = 0; i < UHCI_NUM_SKELTD; i++)
+ if (td == uhci->skeltd[i])
+ return 1;
+
+ return 0;
+}
+
+static int inline uhci_is_skeleton_qh(struct uhci *uhci, struct uhci_qh *qh)
+{
+ int i;
+
+ for (i = 0; i < UHCI_NUM_SKELQH; i++)
+ if (qh == uhci->skelqh[i])
+ return 1;
+
+ return 0;
+}
+
+static int uhci_show_td(struct uhci_td *td, char *buf, int len, int space)
+{
+ char *out = buf;
char *spid;
- printk("%08x ", td->link);
- printk("e%d %s%s%s%s%s%s%s%s%s%sLength=%x ",
+ /* Try to make sure there's enough memory */
+ if (len < 160)
+ return 0;
+
+ out += sprintf(out, "%*s[%p] link (%08x) ", space, "", td, td->link);
+ out += sprintf(out, "e%d %s%s%s%s%s%s%s%s%s%sLength=%x ",
((td->status >> 27) & 3),
(td->status & TD_CTRL_SPD) ? "SPD " : "",
(td->status & TD_CTRL_LS) ? "LS " : "",
break;
}
- printk("MaxLen=%x DT%d EndPt=%x Dev=%x, PID=%x(%s) ",
+ out += sprintf(out, "MaxLen=%x DT%d EndPt=%x Dev=%x, PID=%x(%s) ",
td->info >> 21,
((td->info >> 19) & 1),
(td->info >> 15) & 15,
(td->info >> 8) & 127,
(td->info & 0xff),
spid);
- printk("(buf=%08x)\n", td->buffer);
+ out += sprintf(out, "(buf=%08x)\n", td->buffer);
+
+ return out - buf;
}
-static void uhci_show_sc(int port, unsigned short status)
+static int uhci_show_sc(int port, unsigned short status, char *buf, int len)
{
- printk(" stat%d = %04x %s%s%s%s%s%s%s%s\n",
+ char *out = buf;
+
+ /* Try to make sure there's enough memory */
+ if (len < 80)
+ return 0;
+
+ out += sprintf(out, " stat%d = %04x %s%s%s%s%s%s%s%s\n",
port,
status,
(status & USBPORTSC_SUSP) ? "PortSuspend " : "",
(status & USBPORTSC_PE) ? "PortEnabled " : "",
(status & USBPORTSC_CSC) ? "ConnectChange " : "",
(status & USBPORTSC_CCS) ? "PortConnected " : "");
+
+ return out - buf;
}
-void uhci_show_status(struct uhci *uhci)
+static int uhci_show_status(struct uhci *uhci, char *buf, int len)
{
+ char *out = buf;
unsigned int io_addr = uhci->io_addr;
unsigned short usbcmd, usbstat, usbint, usbfrnum;
unsigned int flbaseadd;
unsigned char sof;
unsigned short portsc1, portsc2;
+ /* Try to make sure there's enough memory */
+ if (len < 80 * 6)
+ return 0;
+
usbcmd = inw(io_addr + 0);
usbstat = inw(io_addr + 2);
usbint = inw(io_addr + 4);
portsc1 = inw(io_addr + 16);
portsc2 = inw(io_addr + 18);
- printk(" usbcmd = %04x %s%s%s%s%s%s%s%s\n",
+ out += sprintf(out, " usbcmd = %04x %s%s%s%s%s%s%s%s\n",
usbcmd,
(usbcmd & USBCMD_MAXP) ? "Maxp64 " : "Maxp32 ",
(usbcmd & USBCMD_CF) ? "CF " : "",
(usbcmd & USBCMD_HCRESET) ? "HCRESET " : "",
(usbcmd & USBCMD_RS) ? "RS " : "");
- printk(" usbstat = %04x %s%s%s%s%s%s\n",
+ out += sprintf(out, " usbstat = %04x %s%s%s%s%s%s\n",
usbstat,
(usbstat & USBSTS_HCH) ? "HCHalted " : "",
(usbstat & USBSTS_HCPE) ? "HostControllerProcessError " : "",
(usbstat & USBSTS_ERROR) ? "USBError " : "",
(usbstat & USBSTS_USBINT) ? "USBINT " : "");
- printk(" usbint = %04x\n", usbint);
- printk(" usbfrnum = (%d)%03x\n", (usbfrnum >> 10) & 1,
+ out += sprintf(out, " usbint = %04x\n", usbint);
+ out += sprintf(out, " usbfrnum = (%d)%03x\n", (usbfrnum >> 10) & 1,
0xfff & (4*(unsigned int)usbfrnum));
- printk(" flbaseadd = %08x\n", flbaseadd);
- printk(" sof = %02x\n", sof);
- uhci_show_sc(1, portsc1);
- uhci_show_sc(2, portsc2);
-}
+ out += sprintf(out, " flbaseadd = %08x\n", flbaseadd);
+ out += sprintf(out, " sof = %02x\n", sof);
+ out += uhci_show_sc(1, portsc1, out, len - (out - buf));
+ out += uhci_show_sc(2, portsc2, out, len - (out - buf));
-#define uhci_link_to_qh(x) ((struct uhci_qh *) uhci_link_to_td(x))
-
-struct uhci_td *uhci_link_to_td(unsigned int link)
-{
- if (link & UHCI_PTR_TERM)
- return NULL;
-
- return bus_to_virt(link & ~UHCI_PTR_BITS);
+ return out - buf;
}
-void uhci_show_urb_queue(struct urb *urb)
+static int uhci_show_qh(struct uhci_qh *qh, char *buf, int len, int space)
{
- struct urb_priv *urbp = urb->hcpriv;
+ char *out = buf;
+ struct urb_priv *urbp;
struct list_head *head, *tmp;
- int i, checked = 0, prevactive = 0;
+ struct uhci_td *td;
+ int i = 0, checked = 0, prevactive = 0;
- printk(" URB [%p] urbp [%p]\n", urb, urbp);
+ /* Try to make sure there's enough memory */
+ if (len < 80 * 6)
+ return 0;
- if (urbp->qh)
- printk(" QH [%p]\n", urbp->qh);
- else
- printk(" QH [%p] element (%08x) link (%08x)\n", urbp->qh,
- urbp->qh->element, urbp->qh->link);
+ out += sprintf(out, "%*s[%p] link (%08x) element (%08x)\n", space, "",
+ qh, qh->link, qh->element);
+
+ if (qh->element & UHCI_PTR_QH)
+ out += sprintf(out, "%*s Element points to QH (bug?)\n", space, "");
+
+ if (qh->element & UHCI_PTR_DEPTH)
+ out += sprintf(out, "%*s Depth traverse\n", space, "");
- i = 0;
+ if (qh->element & 8)
+ out += sprintf(out, "%*s Bit 3 set (bug?)\n", space, "");
- head = &urbp->list;
+ if (!(qh->element & ~(UHCI_PTR_QH | UHCI_PTR_DEPTH)))
+ out += sprintf(out, "%*s Element is NULL (bug?)\n", space, "");
+
+ if (!qh->urbp) {
+ out += sprintf(out, "%*s urbp == NULL\n", space, "");
+ goto out;
+ }
+
+ urbp = qh->urbp;
+
+ head = &urbp->td_list;
tmp = head->next;
+
+ td = list_entry(tmp, struct uhci_td, list);
+
+ if (td->dma_handle != (qh->element & ~UHCI_PTR_BITS))
+ out += sprintf(out, "%*s Element != First TD\n", space, "");
+
while (tmp != head) {
struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
tmp = tmp->next;
- printk(" td %d: [%p]\n", i++, td);
- printk(" ");
- uhci_show_td(td);
+ out += sprintf(out, "%*s%d: ", space + 2, "", i++);
+ out += uhci_show_td(td, out, len - (out - buf), 0);
- if (i > 10 && !checked && prevactive && tmp != head) {
+ if (i > 10 && !checked && prevactive && tmp != head &&
+ debug <= 2) {
struct list_head *ntmp = tmp;
struct uhci_td *ntd = td;
int active = 1, ni = i;
}
if (active && ni > i) {
- printk(" [skipped %d active TD's]\n", ni - i);
+ out += sprintf(out, "%*s[skipped %d active TD's]\n", space, "", ni - i);
tmp = ntmp;
td = ntd;
i = ni;
prevactive = td->status & TD_CTRL_ACTIVE;
}
-}
-void uhci_show_queue(struct uhci_qh *qh)
-{
- struct uhci_td *td, *first;
- int i = 0, count = 1000;
+ if (list_empty(&urbp->queue_list) || urbp->queued)
+ goto out;
- if (qh->element & UHCI_PTR_QH)
- printk(" Element points to QH (bug?)\n");
+ out += sprintf(out, "%*sQueued QH's:\n", -space, "--");
- if (qh->element & UHCI_PTR_DEPTH)
- printk(" Depth traverse\n");
+ head = &urbp->queue_list;
+ tmp = head->next;
- if (qh->element & UHCI_PTR_TERM)
- printk(" Terminate\n");
+ while (tmp != head) {
+ struct urb_priv *nurbp = list_entry(tmp, struct urb_priv,
+ queue_list);
+ tmp = tmp->next;
- if (!(qh->element & ~UHCI_PTR_BITS)) {
- printk(" td 0: [NULL]\n");
- return;
+ out += uhci_show_qh(nurbp->qh, out, len - (out - buf), space);
}
- first = uhci_link_to_td(qh->element);
+out:
+ return out - buf;
+}
+
+static const char *td_names[] = {"skel_int1_td", "skel_int2_td",
+ "skel_int4_td", "skel_int8_td",
+ "skel_int16_td", "skel_int32_td",
+ "skel_int64_td", "skel_int128_td",
+ "skel_int256_td", "skel_term_td" };
+static const char *qh_names[] = { "skel_ls_control_qh", "skel_hs_control_qh",
+ "skel_bulk_qh", "skel_term_qh" };
+
+#define show_frame_num() \
+ if (!shown) { \
+ shown = 1; \
+ out += sprintf(out, "- Frame %d\n", i); \
+ }
- /* Make sure it doesn't runaway */
- for (td = first; td && count > 0;
- td = uhci_link_to_td(td->link), --count) {
- printk(" td %d: [%p]\n", i++, td);
- printk(" ");
- uhci_show_td(td);
+#define show_td_name() \
+ if (!shown) { \
+ shown = 1; \
+ out += sprintf(out, "- %s\n", td_names[i]); \
+ }
- if (td == uhci_link_to_td(td->link)) {
- printk(KERN_ERR "td links to itself!\n");
- break;
- }
+#define show_qh_name() \
+ if (!shown) { \
+ shown = 1; \
+ out += sprintf(out, "- %s\n", qh_names[i]); \
}
-}
-static int uhci_is_skeleton_td(struct uhci *uhci, struct uhci_td *td)
+static int uhci_sprint_schedule(struct uhci *uhci, char *buf, int len)
{
- int j;
+ char *out = buf;
+ int i;
+ struct uhci_qh *qh;
+ struct uhci_td *td;
+ struct list_head *tmp, *head;
- for (j = 0; j < UHCI_NUM_SKELTD; j++)
- if (td == uhci->skeltd + j)
- return 1;
+ out += sprintf(out, "HC status\n");
+ out += uhci_show_status(uhci, out, len - (out - buf));
- return 0;
-}
+ out += sprintf(out, "Frame List\n");
+ for (i = 0; i < UHCI_NUMFRAMES; ++i) {
+ int shown = 0;
+ td = uhci->fl->frame_cpu[i];
+ if (!td)
+ continue;
-static int uhci_is_skeleton_qh(struct uhci *uhci, struct uhci_qh *qh)
-{
- int j;
+ if (td->dma_handle != (dma_addr_t)uhci->fl->frame[i]) {
+ show_frame_num();
+ out += sprintf(out, " frame list does not match td->dma_handle!\n");
+ }
+ if (uhci_is_skeleton_td(uhci, td))
+ continue;
+ show_frame_num();
+
+ head = &td->fl_list;
+ tmp = head;
+ do {
+ td = list_entry(tmp, struct uhci_td, fl_list);
+ tmp = tmp->next;
+ out += uhci_show_td(td, out, len - (out - buf), 4);
+ } while (tmp != head);
+ }
- for (j = 0; j < UHCI_NUM_SKELQH; j++)
- if (qh == uhci->skelqh + j)
- return 1;
+ out += sprintf(out, "Skeleton TD's\n");
+ for (i = UHCI_NUM_SKELTD - 1; i >= 0; i--) {
+ int shown = 0;
- return 0;
-}
+ td = uhci->skeltd[i];
-static const char *td_names[] = {"interrupt1", "interrupt2", "interrupt4",
- "interrupt8", "interrupt16", "interrupt32",
- "interrupt64", "interrupt128", "interrupt256" };
-static const char *qh_names[] = { "control", "bulk" };
+ if (debug > 1) {
+ show_td_name();
+ out += uhci_show_td(td, out, len - (out - buf), 4);
+ }
-void uhci_show_queues(struct uhci *uhci)
-{
- int i, isqh = 0;
- struct uhci_qh *qh;
- struct uhci_td *td;
+ if (list_empty(&td->fl_list)) {
+ /* TD 0 is the int1 TD and links to control_ls_qh */
+ if (!i) {
+ if (td->link !=
+ (uhci->skel_ls_control_qh->dma_handle | UHCI_PTR_QH)) {
+ show_td_name();
+ out += sprintf(out, " skeleton TD not linked to ls_control QH!\n");
+ }
+ } else if (i < 9) {
+ if (td->link != uhci->skeltd[i - 1]->dma_handle) {
+ show_td_name();
+ out += sprintf(out, " skeleton TD not linked to next skeleton TD!\n");
+ }
+ } else {
+ show_td_name();
+
+ if (td->link != td->dma_handle)
+ out += sprintf(out, " skel_term_td does not link to self\n");
+
+ out += uhci_show_td(td, out, len - (out - buf), 4);
+ }
- for (i = 0; i < UHCI_NUMFRAMES; ++i) {
- int shown = 0;
+ continue;
+ }
- td = uhci_link_to_td(uhci->fl->frame[i]);
- if (td)
- isqh = uhci->fl->frame[i] & UHCI_PTR_QH;
- while (td && !isqh) {
- if (uhci_is_skeleton_td(uhci, td))
- break;
+ show_td_name();
- if (!shown) {
- printk(" Frame %d\n", i);
- shown = 1;
- }
+ head = &td->fl_list;
+ tmp = head->next;
- printk("[%p] ", td);
+ while (tmp != head) {
+ td = list_entry(tmp, struct uhci_td, fl_list);
- uhci_show_td(td);
- td = uhci_link_to_td(td->link);
- if (td)
- isqh = td->link & UHCI_PTR_QH;
+ tmp = tmp->next;
+
+ out += uhci_show_td(td, out, len - (out - buf), 4);
}
- }
- for (i = 0; i < UHCI_NUM_SKELTD; ++i) {
- printk(" %s: [%p] (%08x)\n", td_names[i],
- &uhci->skeltd[i],
- uhci->skeltd[i].link);
-
- td = uhci_link_to_td(uhci->skeltd[i].link);
- if (td)
- isqh = uhci->skeltd[i].link & UHCI_PTR_QH;
- while (td && !isqh) {
- if (uhci_is_skeleton_td(uhci, td))
- break;
-
- printk("[%p] ", td);
-
- uhci_show_td(td);
- td = uhci_link_to_td(td->link);
- if (td)
- isqh = td->link & UHCI_PTR_QH;
+
+ if (!i) {
+ if (td->link !=
+ (uhci->skel_ls_control_qh->dma_handle | UHCI_PTR_QH))
+ out += sprintf(out, " last TD not linked to ls_control QH!\n");
+ } else if (i < 9) {
+ if (td->link != uhci->skeltd[i - 1]->dma_handle)
+ out += sprintf(out, " last TD not linked to next skeleton!\n");
}
}
+
+ out += sprintf(out, "Skeleton QH's\n");
+
for (i = 0; i < UHCI_NUM_SKELQH; ++i) {
- printk(" %s: [%p] (%08x) (%08x)\n", qh_names[i],
- &uhci->skelqh[i],
- uhci->skelqh[i].link, uhci->skelqh[i].element);
+ int shown = 0;
+
+ qh = uhci->skelqh[i];
+
+ if (debug > 1) {
+ show_qh_name();
+ out += uhci_show_qh(qh, out, len - (out - buf), 4);
+ }
+
+ /* QH 3 is the Terminating QH, it's different */
+ if (i == 3) {
+ if (qh->link != UHCI_PTR_TERM) {
+ show_qh_name();
+ out += sprintf(out, " bandwidth reclamation on!\n");
+ }
+
+ if (qh->element != uhci->skel_term_td->dma_handle) {
+ show_qh_name();
+ out += sprintf(out, " skel_term_qh element is not set to skel_term_td\n");
+ }
+ }
+
+ if (list_empty(&qh->list)) {
+ if (i < 3) {
+ if (qh->link !=
+ (uhci->skelqh[i + 1]->dma_handle | UHCI_PTR_QH)) {
+ show_qh_name();
+ out += sprintf(out, " skeleton QH not linked to next skeleton QH!\n");
+ }
+ }
+
+ continue;
+ }
+
+ show_qh_name();
+
+ head = &qh->list;
+ tmp = head->next;
- qh = uhci_link_to_qh(uhci->skelqh[i].link);
- for (; qh; qh = uhci_link_to_qh(qh->link)) {
- if (uhci_is_skeleton_qh(uhci, qh))
- break;
+ while (tmp != head) {
+ qh = list_entry(tmp, struct uhci_qh, list);
- printk(" [%p] (%08x) (%08x)\n",
- qh, qh->link, qh->element);
+ tmp = tmp->next;
- uhci_show_queue(qh);
+ out += uhci_show_qh(qh, out, len - (out - buf), 4);
}
+
+ if (i < 3) {
+ if (qh->link !=
+ (uhci->skelqh[i + 1]->dma_handle | UHCI_PTR_QH))
+ out += sprintf(out, " last QH not linked to next skeleton!\n");
+ }
+ }
+
+ return out - buf;
+}
+
+#ifdef CONFIG_PROC_FS
+#define MAX_OUTPUT (PAGE_SIZE * 8)
+
+static struct proc_dir_entry *uhci_proc_root = NULL;
+
+struct uhci_proc {
+ int size;
+ char *data;
+ struct uhci *uhci;
+};
+
+static int uhci_proc_open(struct inode *inode, struct file *file)
+{
+ const struct proc_dir_entry *dp = inode->u.generic_ip;
+ struct uhci *uhci = dp->data;
+ struct uhci_proc *up;
+ unsigned long flags;
+ int ret = -ENOMEM;
+
+ lock_kernel();
+ up = kmalloc(sizeof(*up), GFP_KERNEL);
+ if (!up)
+ goto out;
+
+ up->data = kmalloc(MAX_OUTPUT, GFP_KERNEL);
+ if (!up->data) {
+ kfree(up);
+ goto out;
+ }
+
+ spin_lock_irqsave(&uhci->frame_list_lock, flags);
+ up->size = uhci_sprint_schedule(uhci, up->data, MAX_OUTPUT);
+ spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
+
+ file->private_data = up;
+
+ ret = 0;
+out:
+ unlock_kernel();
+ return ret;
+}
+
+static loff_t uhci_proc_lseek(struct file *file, loff_t off, int whence)
+{
+ struct uhci_proc *up = file->private_data;
+ loff_t new;
+
+ switch (whence) {
+ case 0:
+ new = off;
+ break;
+ case 1:
+ new = file->f_pos + off;
+ break;
+ case 2:
+ default:
+ return -EINVAL;
}
+ if (new < 0 || new > up->size)
+ return -EINVAL;
+ return (file->f_pos = new);
+}
+
+static ssize_t uhci_proc_read(struct file *file, char *buf, size_t nbytes,
+ loff_t *ppos)
+{
+ struct uhci_proc *up = file->private_data;
+ unsigned int pos;
+ unsigned int size;
+
+ pos = *ppos;
+ size = up->size;
+ if (pos >= size)
+ return 0;
+ if (nbytes >= size)
+ nbytes = size;
+ if (pos + nbytes > size)
+ nbytes = size - pos;
+
+ if (!access_ok(VERIFY_WRITE, buf, nbytes))
+ return -EINVAL;
+
+ copy_to_user(buf, up->data + pos, nbytes);
+
+ *ppos += nbytes;
+
+ return nbytes;
+}
+
+static int uhci_proc_release(struct inode *inode, struct file *file)
+{
+ struct uhci_proc *up = file->private_data;
+
+ kfree(up->data);
+ kfree(up);
+
+ return 0;
}
+static struct file_operations uhci_proc_operations = {
+ open: uhci_proc_open,
+ llseek: uhci_proc_lseek,
+ read: uhci_proc_read,
+// write: uhci_proc_write,
+ release: uhci_proc_release,
+};
+#endif
+
/*
* Universal Host Controller Interface driver for USB.
*
+ * Maintainer: Johannes Erdfelt <johannes@erdfelt.com>
+ *
* (C) Copyright 1999 Linus Torvalds
- * (C) Copyright 1999-2000 Johannes Erdfelt, johannes@erdfelt.com
+ * (C) Copyright 1999-2001 Johannes Erdfelt, johannes@erdfelt.com
* (C) Copyright 1999 Randy Dunlap
* (C) Copyright 1999 Georg Acher, acher@in.tum.de
* (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
* support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
* (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
*
- *
* Intel documents this fairly well, and as far as I know there
* are no royalties or anything like that, but even so there are
* people who decided that they want to do the same thing in a
#include <linux/unistd.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
+#include <linux/proc_fs.h>
+#ifdef CONFIG_USB_DEBUG
#define DEBUG
+#else
+#undef DEBUG
+#endif
#include <linux/usb.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include "uhci.h"
-#include "uhci-debug.h"
#include <linux/pm.h>
+/*
+ * debug = 0, no debugging messages
+ * debug = 1, dump failed URB's except for stalls
+ * debug = 2, dump all failed URB's (including stalls)
+ * show all queues in /proc/uhci/hc*
+ * debug = 3, show all TD's in URB's when dumping
+ */
+#ifdef DEBUG
static int debug = 1;
+#else
+static int debug = 0;
+#endif
MODULE_PARM(debug, "i");
MODULE_PARM_DESC(debug, "Debug level");
+static char *errbuf;
+#define ERRBUF_LEN (PAGE_SIZE * 8)
+
+#include "uhci-debug.h"
-static kmem_cache_t *uhci_td_cachep;
-static kmem_cache_t *uhci_qh_cachep;
static kmem_cache_t *uhci_up_cachep; /* urb_priv */
static int rh_submit_urb(struct urb *urb);
static int rh_unlink_urb(struct urb *urb);
static int uhci_get_current_frame_number(struct usb_device *dev);
-static int uhci_unlink_generic(struct urb *urb);
static int uhci_unlink_urb(struct urb *urb);
+static void uhci_unlink_generic(struct uhci *uhci, struct urb *urb);
+static void uhci_call_completion(struct urb *urb);
static int ports_active(struct uhci *uhci);
static void suspend_hc(struct uhci *uhci);
/* If a transfer is still active after this much time, turn off FSBR */
#define IDLE_TIMEOUT (HZ / 20) /* 50 ms */
+#define MAX_URB_LOOP 2048 /* Maximum number of linked URB's */
+
/*
* Only the USB core should call uhci_alloc_dev and uhci_free_dev
*/
static int uhci_free_dev(struct usb_device *dev)
{
struct uhci *uhci = (struct uhci *)dev->bus->hcpriv;
- struct list_head *tmp, *head = &uhci->urb_list;
+ struct list_head list, *tmp, *head;
unsigned long flags;
/* Walk through the entire URB list and forcefully remove any */
/* URBs that are still active for that device */
- nested_lock(&uhci->urblist_lock, flags);
+
+ /* Two stage unlink so we don't deadlock on urb_list_lock */
+ INIT_LIST_HEAD(&list);
+
+ spin_lock_irqsave(&uhci->urb_list_lock, flags);
+ head = &uhci->urb_list;
tmp = head->next;
while (tmp != head) {
- struct urb *u = list_entry(tmp, struct urb, urb_list);
+ struct urb *urb = list_entry(tmp, struct urb, urb_list);
tmp = tmp->next;
- if (u->dev == dev)
- uhci_unlink_urb(u);
+ if (urb->dev == dev) {
+ list_del(&urb->urb_list);
+ list_add(&urb->urb_list, &list);
+ }
}
- nested_unlock(&uhci->urblist_lock, flags);
+ spin_unlock_irqrestore(&uhci->urb_list_lock, flags);
- return 0;
-}
+ head = &list;
+ tmp = head->next;
+ while (tmp != head) {
+ struct urb *urb = list_entry(tmp, struct urb, urb_list);
+ tmp = tmp->next;
-static void uhci_add_urb_list(struct uhci *uhci, struct urb *urb)
-{
- unsigned long flags;
+ /* Make sure we block waiting on these to die */
+ urb->transfer_flags &= ~USB_ASYNC_UNLINK;
+
+ /* uhci_unlink_urb will unlink from the temp list */
+ uhci_unlink_urb(urb);
+ }
- nested_lock(&uhci->urblist_lock, flags);
- list_add(&urb->urb_list, &uhci->urb_list);
- nested_unlock(&uhci->urblist_lock, flags);
+ return 0;
}
-static void uhci_remove_urb_list(struct uhci *uhci, struct urb *urb)
+static inline void uhci_set_next_interrupt(struct uhci *uhci)
{
unsigned long flags;
- nested_lock(&uhci->urblist_lock, flags);
- if (!list_empty(&urb->urb_list)) {
- list_del(&urb->urb_list);
- INIT_LIST_HEAD(&urb->urb_list);
- }
- nested_unlock(&uhci->urblist_lock, flags);
+ spin_lock_irqsave(&uhci->frame_list_lock, flags);
+ uhci->skel_term_td->status |= TD_CTRL_IOC;
+ spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
}
-void uhci_set_next_interrupt(struct uhci *uhci)
+static inline void uhci_clear_next_interrupt(struct uhci *uhci)
{
unsigned long flags;
- spin_lock_irqsave(&uhci->framelist_lock, flags);
- uhci->skel_term_td.status |= TD_CTRL_IOC;
- spin_unlock_irqrestore(&uhci->framelist_lock, flags);
+ spin_lock_irqsave(&uhci->frame_list_lock, flags);
+ uhci->skel_term_td->status &= ~TD_CTRL_IOC;
+ spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
}
-void uhci_clear_next_interrupt(struct uhci *uhci)
+static inline void uhci_add_complete(struct urb *urb)
{
+ struct uhci *uhci = (struct uhci *)urb->dev->bus->hcpriv;
+ struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
unsigned long flags;
- spin_lock_irqsave(&uhci->framelist_lock, flags);
- uhci->skel_term_td.status &= ~TD_CTRL_IOC;
- spin_unlock_irqrestore(&uhci->framelist_lock, flags);
+ spin_lock_irqsave(&uhci->complete_list_lock, flags);
+ list_add(&urbp->complete_list, &uhci->complete_list);
+ spin_unlock_irqrestore(&uhci->complete_list_lock, flags);
}
-static struct uhci_td *uhci_alloc_td(struct usb_device *dev)
+static struct uhci_td *uhci_alloc_td(struct uhci *uhci, struct usb_device *dev)
{
+ dma_addr_t dma_handle;
struct uhci_td *td;
- td = kmem_cache_alloc(uhci_td_cachep, in_interrupt() ? SLAB_ATOMIC : SLAB_KERNEL);
+ td = pci_pool_alloc(uhci->td_pool, GFP_DMA | GFP_ATOMIC, &dma_handle);
if (!td)
return NULL;
+ td->dma_handle = dma_handle;
+
td->link = UHCI_PTR_TERM;
td->buffer = 0;
- td->frameptr = NULL;
- td->nexttd = td->prevtd = NULL;
+ td->frame = -1;
td->dev = dev;
+
INIT_LIST_HEAD(&td->list);
+ INIT_LIST_HEAD(&td->fl_list);
usb_inc_dev_use(dev);
static void uhci_insert_td(struct uhci *uhci, struct uhci_td *skeltd, struct uhci_td *td)
{
unsigned long flags;
+ struct uhci_td *ltd;
+
+ spin_lock_irqsave(&uhci->frame_list_lock, flags);
- spin_lock_irqsave(&uhci->framelist_lock, flags);
+ ltd = list_entry(skeltd->fl_list.prev, struct uhci_td, fl_list);
- /* Fix the linked list pointers */
- td->nexttd = skeltd->nexttd;
- td->prevtd = skeltd;
- if (skeltd->nexttd)
- skeltd->nexttd->prevtd = td;
- skeltd->nexttd = td;
+ td->link = ltd->link;
+ mb();
+ ltd->link = td->dma_handle;
- td->link = skeltd->link;
- skeltd->link = virt_to_bus(td);
+ list_add_tail(&td->fl_list, &skeltd->fl_list);
- spin_unlock_irqrestore(&uhci->framelist_lock, flags);
+ spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
}
/*
* frame list pointer -> iso td's (if any) ->
* periodic interrupt td (if frame 0) -> irq td's -> control qh -> bulk qh
*/
-
static void uhci_insert_td_frame_list(struct uhci *uhci, struct uhci_td *td, unsigned framenum)
{
unsigned long flags;
- struct uhci_td *nexttd;
framenum %= UHCI_NUMFRAMES;
- spin_lock_irqsave(&uhci->framelist_lock, flags);
+ spin_lock_irqsave(&uhci->frame_list_lock, flags);
+
+ td->frame = framenum;
+
+ /* Is there a TD already mapped there? */
+ if (uhci->fl->frame_cpu[framenum]) {
+ struct uhci_td *ftd, *ltd;
+
+ ftd = uhci->fl->frame_cpu[framenum];
+ ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
+
+ list_add_tail(&td->fl_list, &ftd->fl_list);
- td->frameptr = &uhci->fl->frame[framenum];
- td->link = uhci->fl->frame[framenum];
- if (!(td->link & (UHCI_PTR_TERM | UHCI_PTR_QH))) {
- nexttd = (struct uhci_td *)uhci_ptr_to_virt(td->link);
- td->nexttd = nexttd;
- nexttd->prevtd = td;
- nexttd->frameptr = NULL;
+ td->link = ltd->link;
+ mb();
+ ltd->link = td->dma_handle;
+ } else {
+ td->link = uhci->fl->frame[framenum];
+ mb();
+ uhci->fl->frame[framenum] = td->dma_handle;
+ uhci->fl->frame_cpu[framenum] = td;
}
- uhci->fl->frame[framenum] = virt_to_bus(td);
- spin_unlock_irqrestore(&uhci->framelist_lock, flags);
+ spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
}
static void uhci_remove_td(struct uhci *uhci, struct uhci_td *td)
unsigned long flags;
/* If it's not inserted, don't remove it */
- if (!td->frameptr && !td->prevtd && !td->nexttd)
+ if (td->frame == -1 && list_empty(&td->fl_list))
return;
- spin_lock_irqsave(&uhci->framelist_lock, flags);
- if (td->frameptr) {
- *(td->frameptr) = td->link;
- if (td->nexttd) {
- td->nexttd->frameptr = td->frameptr;
- td->nexttd->prevtd = NULL;
- td->nexttd = NULL;
+ spin_lock_irqsave(&uhci->frame_list_lock, flags);
+ if (td->frame != -1 && uhci->fl->frame_cpu[td->frame] == td) {
+ if (list_empty(&td->fl_list)) {
+ uhci->fl->frame[td->frame] = td->link;
+ uhci->fl->frame_cpu[td->frame] = NULL;
+ } else {
+ struct uhci_td *ntd;
+
+ ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list);
+ uhci->fl->frame[td->frame] = ntd->dma_handle;
+ uhci->fl->frame_cpu[td->frame] = ntd;
}
- td->frameptr = NULL;
} else {
- if (td->prevtd) {
- td->prevtd->nexttd = td->nexttd;
- td->prevtd->link = td->link;
- }
- if (td->nexttd)
- td->nexttd->prevtd = td->prevtd;
- td->prevtd = td->nexttd = NULL;
+ struct uhci_td *ptd;
+
+ ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);
+ ptd->link = td->link;
}
+
+ mb();
td->link = UHCI_PTR_TERM;
- spin_unlock_irqrestore(&uhci->framelist_lock, flags);
+
+ list_del(&td->fl_list);
+ INIT_LIST_HEAD(&td->fl_list);
+ td->frame = -1;
+
+ spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
}
/*
{
struct list_head *tmp, *head;
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
- struct uhci_td *td, *prevtd;
+ struct uhci_td *td, *ptd;
- if (!urbp)
+ if (list_empty(&urbp->td_list))
return;
- head = &urbp->list;
+ head = &urbp->td_list;
tmp = head->next;
- if (head == tmp)
- return;
+ /* Ordering isn't important here yet since the QH hasn't been */
+ /* inserted into the schedule yet */
td = list_entry(tmp, struct uhci_td, list);
/* Add the first TD to the QH element pointer */
- qh->element = virt_to_bus(td) | (breadth ? 0 : UHCI_PTR_DEPTH);
+ qh->element = td->dma_handle | (breadth ? 0 : UHCI_PTR_DEPTH);
- prevtd = td;
+ ptd = td;
/* Then link the rest of the TD's */
tmp = tmp->next;
tmp = tmp->next;
- prevtd->link = virt_to_bus(td) | (breadth ? 0 : UHCI_PTR_DEPTH);
+ ptd->link = td->dma_handle | (breadth ? 0 : UHCI_PTR_DEPTH);
- prevtd = td;
+ ptd = td;
}
- prevtd->link = UHCI_PTR_TERM;
+ ptd->link = UHCI_PTR_TERM;
}
-static void uhci_free_td(struct uhci_td *td)
+static void uhci_free_td(struct uhci *uhci, struct uhci_td *td)
{
- if (!list_empty(&td->list))
+ if (!list_empty(&td->list) || !list_empty(&td->fl_list))
dbg("td is still in URB list!");
if (td->dev)
usb_dec_dev_use(td->dev);
- kmem_cache_free(uhci_td_cachep, td);
+ pci_pool_free(uhci->td_pool, td, td->dma_handle);
}
-static struct uhci_qh *uhci_alloc_qh(struct usb_device *dev)
+static struct uhci_qh *uhci_alloc_qh(struct uhci *uhci, struct usb_device *dev)
{
+ dma_addr_t dma_handle;
struct uhci_qh *qh;
- qh = kmem_cache_alloc(uhci_qh_cachep, in_interrupt() ? SLAB_ATOMIC : SLAB_KERNEL);
+ qh = pci_pool_alloc(uhci->qh_pool, GFP_DMA | GFP_ATOMIC, &dma_handle);
if (!qh)
return NULL;
+ qh->dma_handle = dma_handle;
+
qh->element = UHCI_PTR_TERM;
qh->link = UHCI_PTR_TERM;
qh->dev = dev;
- qh->prevqh = qh->nextqh = NULL;
+ INIT_LIST_HEAD(&qh->list);
INIT_LIST_HEAD(&qh->remove_list);
usb_inc_dev_use(dev);
return qh;
}
-static void uhci_free_qh(struct uhci_qh *qh)
+static void uhci_free_qh(struct uhci *uhci, struct uhci_qh *qh)
{
+ if (!list_empty(&qh->list))
+ dbg("qh list not empty!");
+ if (!list_empty(&qh->remove_list))
+ dbg("qh still in remove_list!");
+
if (qh->dev)
usb_dec_dev_use(qh->dev);
- kmem_cache_free(uhci_qh_cachep, qh);
+ pci_pool_free(uhci->qh_pool, qh, qh->dma_handle);
}
static void uhci_insert_qh(struct uhci *uhci, struct uhci_qh *skelqh, struct uhci_qh *qh)
{
+ struct uhci_qh *lqh;
unsigned long flags;
- spin_lock_irqsave(&uhci->framelist_lock, flags);
+ spin_lock_irqsave(&uhci->frame_list_lock, flags);
+
+ /* Grab the last QH */
+ lqh = list_entry(skelqh->list.prev, struct uhci_qh, list);
- /* Fix the linked list pointers */
- qh->nextqh = skelqh->nextqh;
- qh->prevqh = skelqh;
- if (skelqh->nextqh)
- skelqh->nextqh->prevqh = qh;
- skelqh->nextqh = qh;
+ qh->link = lqh->link;
+ mb(); /* Ordering is important */
+ lqh->link = qh->dma_handle | UHCI_PTR_QH;
- qh->link = skelqh->link;
- skelqh->link = virt_to_bus(qh) | UHCI_PTR_QH;
+ list_add_tail(&qh->list, &skelqh->list);
- spin_unlock_irqrestore(&uhci->framelist_lock, flags);
+ spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
}
static void uhci_remove_qh(struct uhci *uhci, struct uhci_qh *qh)
{
unsigned long flags;
- int delayed;
+ struct uhci_qh *prevqh;
- /* If the QH isn't queued, then we don't need to delay unlink it */
- delayed = (qh->prevqh || qh->nextqh);
-
- spin_lock_irqsave(&uhci->framelist_lock, flags);
- if (qh->prevqh) {
- qh->prevqh->nextqh = qh->nextqh;
- qh->prevqh->link = qh->link;
+ /* Only go through the hoops if it's actually linked in */
+ if (list_empty(&qh->list)) {
+ uhci_free_qh(uhci, qh);
+ return;
}
- if (qh->nextqh)
- qh->nextqh->prevqh = qh->prevqh;
- qh->prevqh = qh->nextqh = NULL;
+
+ qh->urbp = NULL;
+
+ spin_lock_irqsave(&uhci->frame_list_lock, flags);
+
+ prevqh = list_entry(qh->list.prev, struct uhci_qh, list);
+
+ prevqh->link = qh->link;
+ mb();
qh->element = qh->link = UHCI_PTR_TERM;
- spin_unlock_irqrestore(&uhci->framelist_lock, flags);
- if (delayed) {
- spin_lock_irqsave(&uhci->qh_remove_lock, flags);
+ list_del(&qh->list);
+ INIT_LIST_HEAD(&qh->list);
- /* Check to see if the remove list is empty */
- /* Set the IOC bit to force an interrupt so we can remove the QH */
- if (list_empty(&uhci->qh_remove_list))
- uhci_set_next_interrupt(uhci);
+ spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
- /* Add it */
- list_add(&qh->remove_list, &uhci->qh_remove_list);
+ spin_lock_irqsave(&uhci->qh_remove_list_lock, flags);
- spin_unlock_irqrestore(&uhci->qh_remove_lock, flags);
- } else
- uhci_free_qh(qh);
+ /* Check to see if the remove list is empty. Set the IOC bit */
+ /* to force an interrupt so we can remove the QH */
+ if (list_empty(&uhci->qh_remove_list))
+ uhci_set_next_interrupt(uhci);
+
+ list_add(&qh->remove_list, &uhci->qh_remove_list);
+
+ spin_unlock_irqrestore(&uhci->qh_remove_list_lock, flags);
}
-static spinlock_t uhci_append_urb_lock = SPIN_LOCK_UNLOCKED;
+static int uhci_fixup_toggle(struct urb *urb, unsigned int toggle)
+{
+ struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
+ struct list_head *head, *tmp;
+
+ head = &urbp->td_list;
+ tmp = head->next;
+ while (head != tmp) {
+ struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
+
+ tmp = tmp->next;
+
+ td->info &= ~(1 << TD_TOKEN_TOGGLE);
+ if (toggle)
+ td->info |= (1 << TD_TOKEN_TOGGLE);
+
+ toggle ^= 1;
+ }
+
+ return toggle;
+}
/* This function will append one URB's QH to another URB's QH. This is for */
-/* USB_QUEUE_BULK support */
+/* USB_QUEUE_BULK support for bulk transfers and soon implicitily for */
+/* control transfers */
static void uhci_append_queued_urb(struct uhci *uhci, struct urb *eurb, struct urb *urb)
{
struct urb_priv *eurbp, *urbp, *furbp, *lurbp;
struct list_head *tmp;
- struct uhci_td *td, *ltd;
+ struct uhci_td *ftd, *lltd;
unsigned long flags;
eurbp = eurb->hcpriv;
urbp = urb->hcpriv;
- spin_lock_irqsave(&uhci_append_urb_lock, flags);
+ spin_lock_irqsave(&uhci->frame_list_lock, flags);
- /* Find the beginning URB in the queue */
+ /* Find the first URB in the queue */
if (eurbp->queued) {
- struct list_head *head = &eurbp->urb_queue_list;
+ struct list_head *head = &eurbp->queue_list;
tmp = head->next;
while (tmp != head) {
struct urb_priv *turbp =
- list_entry(tmp, struct urb_priv, urb_queue_list);
-
- tmp = tmp->next;
+ list_entry(tmp, struct urb_priv, queue_list);
if (!turbp->queued)
break;
+
+ tmp = tmp->next;
}
} else
- tmp = &eurbp->urb_queue_list;
+ tmp = &eurbp->queue_list;
- furbp = list_entry(tmp, struct urb_priv, urb_queue_list);
+ furbp = list_entry(tmp, struct urb_priv, queue_list);
+ lurbp = list_entry(furbp->queue_list.prev, struct urb_priv, queue_list);
- tmp = furbp->urb_queue_list.prev;
- lurbp = list_entry(tmp, struct urb_priv, urb_queue_list);
+ lltd = list_entry(lurbp->td_list.prev, struct uhci_td, list);
+ ftd = list_entry(urbp->td_list.next, struct uhci_td, list);
- /* Add this one to the end */
- list_add_tail(&urbp->urb_queue_list, &furbp->urb_queue_list);
+ uhci_fixup_toggle(urb, uhci_toggle(lltd->info) ^ 1);
- /* Grab the last TD from the last URB */
- ltd = list_entry(lurbp->list.prev, struct uhci_td, list);
+ mb(); /* Make sure we flush everything */
+ /* Only support bulk right now, so no depth */
+ lltd->link = ftd->dma_handle;
- /* Grab the first TD from the first URB */
- td = list_entry(urbp->list.next, struct uhci_td, list);
+ list_add_tail(&urbp->queue_list, &furbp->queue_list);
- /* No breadth since this will only be called for bulk transfers */
- ltd->link = virt_to_bus(td);
+ urbp->queued = 1;
- spin_unlock_irqrestore(&uhci_append_urb_lock, flags);
+ spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
}
static void uhci_delete_queued_urb(struct uhci *uhci, struct urb *urb)
{
struct urb_priv *urbp, *nurbp;
+ struct list_head *head, *tmp;
+ struct urb_priv *purbp;
+ struct uhci_td *pltd;
+ unsigned int toggle;
unsigned long flags;
urbp = urb->hcpriv;
- spin_lock_irqsave(&uhci_append_urb_lock, flags);
+ spin_lock_irqsave(&uhci->frame_list_lock, flags);
+
+ if (list_empty(&urbp->queue_list))
+ goto out;
- nurbp = list_entry(urbp->urb_queue_list.next, struct urb_priv,
- urb_queue_list);
+ nurbp = list_entry(urbp->queue_list.next, struct urb_priv, queue_list);
+
+ /* Fix up the toggle for the next URB's */
+ if (!urbp->queued)
+ toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe));
+ else {
+ /* If we're in the middle of the queue, grab the toggle */
+ /* from the TD previous to us */
+ purbp = list_entry(urbp->queue_list.prev, struct urb_priv,
+ queue_list);
+
+ pltd = list_entry(purbp->td_list.prev, struct uhci_td, list);
+
+ toggle = uhci_toggle(pltd->info) ^ 1;
+ }
+
+ head = &urbp->queue_list;
+ tmp = head->next;
+ while (head != tmp) {
+ struct urb_priv *turbp;
+
+ turbp = list_entry(tmp, struct urb_priv, queue_list);
+
+ tmp = tmp->next;
+
+ if (!turbp->queued)
+ break;
+
+ toggle = uhci_fixup_toggle(turbp->urb, toggle);
+ }
+
+ usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
+ usb_pipeout(urb->pipe), toggle);
if (!urbp->queued) {
- /* We're the head, so just insert the QH for the next URB */
- uhci_insert_qh(uhci, &uhci->skel_bulk_qh, nurbp->qh);
+ int status;
+
+ /* The HC may continue using the current QH if it finished */
+ /* all of the TD's in this URB and may have started on the */
+ /* next URB's TD's already, so we'll take over ownership */
+ /* of this QH and use it instead. Don't forget to delete */
+ /* the old QH first */
+ uhci_free_qh(uhci, nurbp->qh);
+
+ nurbp->qh = urbp->qh;
+ nurbp->qh->urbp = nurbp;
+ urbp->qh = NULL;
+
+ /* If the last TD from the first (this) urb didn't */
+ /* complete, reset qh->element to the first TD in the */
+ /* next urb */
+ pltd = list_entry(urbp->td_list.prev, struct uhci_td, list);
+ status = uhci_status_bits(pltd->status);
+ if ((status & TD_CTRL_ACTIVE) || uhci_actual_length(pltd->status) < uhci_expected_length(pltd->info)) {
+ struct uhci_td *ftd = list_entry(nurbp->td_list.next, struct uhci_td, list);
+ nurbp->qh->element = ftd->dma_handle;
+ }
+
nurbp->queued = 0;
} else {
- struct urb_priv *purbp;
- struct uhci_td *ptd;
-
/* We're somewhere in the middle (or end). A bit trickier */
/* than the head scenario */
- purbp = list_entry(urbp->urb_queue_list.prev, struct urb_priv,
- urb_queue_list);
+ purbp = list_entry(urbp->queue_list.prev, struct urb_priv,
+ queue_list);
+
+ pltd = list_entry(purbp->td_list.prev, struct uhci_td, list);
+ if (nurbp->queued) {
+ struct uhci_td *nftd;
- ptd = list_entry(purbp->list.prev, struct uhci_td, list);
- if (nurbp->queued)
/* Close the gap between the two */
- ptd->link = virt_to_bus(list_entry(nurbp->list.next,
- struct uhci_td, list));
- else
+ nftd = list_entry(nurbp->td_list.next, struct uhci_td,
+ list);
+ pltd->link = nftd->dma_handle;
+ } else
/* The next URB happens to be the beggining, so */
/* we're the last, end the chain */
- ptd->link = UHCI_PTR_TERM;
-
+ pltd->link = UHCI_PTR_TERM;
}
- list_del(&urbp->urb_queue_list);
+ list_del(&urbp->queue_list);
+ INIT_LIST_HEAD(&urbp->queue_list);
- spin_unlock_irqrestore(&uhci_append_urb_lock, flags);
+out:
+ spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
}
-struct urb_priv *uhci_alloc_urb_priv(struct urb *urb)
+static struct urb_priv *uhci_alloc_urb_priv(struct uhci *uhci, struct urb *urb)
{
struct urb_priv *urbp;
urbp = kmem_cache_alloc(uhci_up_cachep, in_interrupt() ? SLAB_ATOMIC : SLAB_KERNEL);
- if (!urbp)
+ if (!urbp) {
+ err("uhci_alloc_urb_priv: couldn't allocate memory for urb_priv\n");
return NULL;
+ }
memset((void *)urbp, 0, sizeof(*urbp));
urbp->inserttime = jiffies;
urbp->urb = urb;
+ urbp->dev = urb->dev;
- INIT_LIST_HEAD(&urbp->list);
- INIT_LIST_HEAD(&urbp->urb_queue_list);
+ INIT_LIST_HEAD(&urbp->td_list);
+ INIT_LIST_HEAD(&urbp->queue_list);
+ INIT_LIST_HEAD(&urbp->complete_list);
urb->hcpriv = urbp;
+ if (urb->transfer_buffer_length) {
+ urbp->transfer_buffer_dma_handle = pci_map_single(uhci->dev,
+ urb->transfer_buffer, urb->transfer_buffer_length,
+ usb_pipein(urb->pipe) ? PCI_DMA_FROMDEVICE :
+ PCI_DMA_TODEVICE);
+ if (!urbp->transfer_buffer_dma_handle)
+ return NULL;
+ }
+
+ if (usb_pipetype(urb->pipe) == PIPE_CONTROL && urb->setup_packet) {
+ urbp->setup_packet_dma_handle = pci_map_single(uhci->dev,
+ urb->setup_packet, sizeof(devrequest),
+ PCI_DMA_TODEVICE);
+ if (!urbp->setup_packet_dma_handle)
+ return NULL;
+ }
+
return urbp;
}
td->urb = urb;
- list_add_tail(&td->list, &urbp->list);
+ list_add_tail(&td->list, &urbp->td_list);
}
-static void uhci_remove_td_from_urb(struct urb *urb, struct uhci_td *td)
+static void uhci_remove_td_from_urb(struct uhci_td *td)
{
- urb = NULL; /* No warnings */
-
if (list_empty(&td->list))
return;
static void uhci_destroy_urb_priv(struct urb *urb)
{
- struct list_head *tmp, *head;
+ struct list_head *head, *tmp;
struct urb_priv *urbp;
struct uhci *uhci;
- struct uhci_td *td;
unsigned long flags;
spin_lock_irqsave(&urb->lock, flags);
urbp = (struct urb_priv *)urb->hcpriv;
if (!urbp)
- goto unlock;
+ goto out;
- if (!urb->dev || !urb->dev->bus || !urb->dev->bus->hcpriv)
- goto unlock;
+ if (!urbp->dev || !urbp->dev->bus || !urbp->dev->bus->hcpriv) {
+ warn("uhci_destroy_urb_priv: urb %p belongs to disconnected device or bus?", urb);
+ goto out;
+ }
+
+ if (!list_empty(&urb->urb_list))
+ warn("uhci_destroy_urb_priv: urb %p still on uhci->urb_list or uhci->remove_list", urb);
- uhci = urb->dev->bus->hcpriv;
+ if (!list_empty(&urbp->complete_list))
+ warn("uhci_destroy_urb_priv: urb %p still on uhci->complete_list", urb);
- head = &urbp->list;
+ uhci = urbp->dev->bus->hcpriv;
+
+ head = &urbp->td_list;
tmp = head->next;
while (tmp != head) {
- td = list_entry(tmp, struct uhci_td, list);
+ struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
tmp = tmp->next;
- uhci_remove_td_from_urb(urb, td);
-
+ uhci_remove_td_from_urb(td);
uhci_remove_td(uhci, td);
-
- uhci_free_td(td);
+ uhci_free_td(uhci, td);
}
+ if (urb->setup_packet)
+ pci_unmap_single(uhci->dev, urbp->setup_packet_dma_handle,
+ sizeof(devrequest), PCI_DMA_TODEVICE);
+
+ if (urb->transfer_buffer_length)
+ pci_unmap_single(uhci->dev, urbp->transfer_buffer_dma_handle,
+ urb->transfer_buffer_length, usb_pipein(urb->pipe) ?
+ PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
+
urb->hcpriv = NULL;
kmem_cache_free(uhci_up_cachep, urbp);
-unlock:
+out:
spin_unlock_irqrestore(&urb->lock, flags);
}
unsigned long flags;
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
- if (!urbp)
- return;
+ spin_lock_irqsave(&uhci->frame_list_lock, flags);
- spin_lock_irqsave(&uhci->framelist_lock, flags);
-
- if ((!(urb->transfer_flags & USB_NO_FSBR)) && (!urbp->fsbr)) {
+ if ((!(urb->transfer_flags & USB_NO_FSBR)) && !urbp->fsbr) {
urbp->fsbr = 1;
if (!uhci->fsbr++)
- uhci->skel_term_qh.link = virt_to_bus(&uhci->skel_hs_control_qh) | UHCI_PTR_QH;
+ uhci->skel_term_qh->link = uhci->skel_hs_control_qh->dma_handle | UHCI_PTR_QH;
}
- spin_unlock_irqrestore(&uhci->framelist_lock, flags);
+ spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
}
static void uhci_dec_fsbr(struct uhci *uhci, struct urb *urb)
unsigned long flags;
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
- if (!urbp)
- return;
-
- spin_lock_irqsave(&uhci->framelist_lock, flags);
+ spin_lock_irqsave(&uhci->frame_list_lock, flags);
if ((!(urb->transfer_flags & USB_NO_FSBR)) && urbp->fsbr) {
urbp->fsbr = 0;
if (!--uhci->fsbr)
- uhci->skel_term_qh.link = UHCI_PTR_TERM;
+ uhci->skel_term_qh->link = UHCI_PTR_TERM;
}
- spin_unlock_irqrestore(&uhci->framelist_lock, flags);
+ spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
}
/*
unsigned long destination, status;
int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
int len = urb->transfer_buffer_length;
- unsigned char *data = urb->transfer_buffer;
+ dma_addr_t data = urbp->transfer_buffer_dma_handle;
/* The "pipe" thing contains the destination in bits 8--18 */
destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
/*
* Build the TD for the control request
*/
- td = uhci_alloc_td(urb->dev);
+ td = uhci_alloc_td(uhci, urb->dev);
if (!td)
return -ENOMEM;
uhci_add_td_to_urb(urb, td);
uhci_fill_td(td, status, destination | (7 << 21),
- virt_to_bus(urb->setup_packet));
+ urbp->setup_packet_dma_handle);
/*
* If direction is "send", change the frame from SETUP (0x2D)
if (pktsze > maxsze)
pktsze = maxsze;
- td = uhci_alloc_td(urb->dev);
+ td = uhci_alloc_td(uhci, urb->dev);
if (!td)
return -ENOMEM;
uhci_add_td_to_urb(urb, td);
uhci_fill_td(td, status, destination | ((pktsze - 1) << 21),
- virt_to_bus(data));
+ data);
data += pktsze;
len -= pktsze;
/*
* Build the final TD for control status
*/
- td = uhci_alloc_td(urb->dev);
+ td = uhci_alloc_td(uhci, urb->dev);
if (!td)
return -ENOMEM;
uhci_fill_td(td, status | TD_CTRL_IOC,
destination | (UHCI_NULL_DATA_SIZE << 21), 0);
- qh = uhci_alloc_qh(urb->dev);
+ qh = uhci_alloc_qh(uhci, urb->dev);
if (!qh)
return -ENOMEM;
/* Low speed or small transfers gets a different queue and treatment */
if (urb->pipe & TD_CTRL_LS) {
uhci_insert_tds_in_qh(qh, urb, 0);
- uhci_insert_qh(uhci, &uhci->skel_ls_control_qh, qh);
+ uhci_insert_qh(uhci, uhci->skel_ls_control_qh, qh);
} else {
uhci_insert_tds_in_qh(qh, urb, 1);
- uhci_insert_qh(uhci, &uhci->skel_hs_control_qh, qh);
+ uhci_insert_qh(uhci, uhci->skel_hs_control_qh, qh);
uhci_inc_fsbr(uhci, urb);
}
urbp->qh = qh;
-
- uhci_add_urb_list(uhci, urb);
+ qh->urbp = urbp;
return -EINPROGRESS;
}
unsigned int status;
int ret = 0;
- if (!urbp)
+ if (list_empty(&urbp->td_list))
return -EINVAL;
- head = &urbp->list;
- if (head->next == head)
- return -EINVAL;
+ head = &urbp->td_list;
if (urbp->short_control_packet) {
tmp = head->prev;
uhci_packetout(td->info));
err:
- if (debug && ret != -EPIPE) {
+ if ((debug == 1 && ret != -EPIPE) || debug > 1) {
/* Some debugging code */
dbg("uhci_result_control() failed with status %x", status);
- /* Print the chain for debugging purposes */
- uhci_show_urb_queue(urb);
+ if (errbuf) {
+ /* Print the chain for debugging purposes */
+ uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0);
+
+ lprintk(errbuf);
+ }
}
return ret;
uhci_remove_qh(uhci, urbp->qh);
/* Delete all of the TD's except for the status TD at the end */
- head = &urbp->list;
+ head = &urbp->td_list;
tmp = head->next;
while (tmp != head && tmp->next != head) {
struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
tmp = tmp->next;
- uhci_remove_td_from_urb(urb, td);
-
+ uhci_remove_td_from_urb(td);
uhci_remove_td(uhci, td);
-
- uhci_free_td(td);
+ uhci_free_td(uhci, td);
}
- urbp->qh = uhci_alloc_qh(urb->dev);
+ urbp->qh = uhci_alloc_qh(uhci, urb->dev);
if (!urbp->qh) {
err("unable to allocate new QH for control retrigger");
return -ENOMEM;
}
+ urbp->qh->urbp = urbp;
+
/* One TD, who cares about Breadth first? */
uhci_insert_tds_in_qh(urbp->qh, urb, 0);
/* Low speed or small transfers gets a different queue and treatment */
if (urb->pipe & TD_CTRL_LS)
- uhci_insert_qh(uhci, &uhci->skel_ls_control_qh, urbp->qh);
+ uhci_insert_qh(uhci, uhci->skel_ls_control_qh, urbp->qh);
else
- uhci_insert_qh(uhci, &uhci->skel_hs_control_qh, urbp->qh);
+ uhci_insert_qh(uhci, uhci->skel_hs_control_qh, urbp->qh);
return -EINPROGRESS;
}
struct uhci_td *td;
unsigned long destination, status;
struct uhci *uhci = (struct uhci *)urb->dev->bus->hcpriv;
+ struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
if (urb->transfer_buffer_length > usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)))
return -EINVAL;
status = (urb->pipe & TD_CTRL_LS) | TD_CTRL_ACTIVE | TD_CTRL_IOC;
- td = uhci_alloc_td(urb->dev);
+ td = uhci_alloc_td(uhci, urb->dev);
if (!td)
return -ENOMEM;
usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe));
uhci_add_td_to_urb(urb, td);
- uhci_fill_td(td, status, destination,
- virt_to_bus(urb->transfer_buffer));
-
- uhci_insert_td(uhci, &uhci->skeltd[__interval_to_skel(urb->interval)], td);
+ uhci_fill_td(td, status, destination, urbp->transfer_buffer_dma_handle);
- uhci_add_urb_list(uhci, urb);
+ uhci_insert_td(uhci, uhci->skeltd[__interval_to_skel(urb->interval)], td);
return -EINPROGRESS;
}
unsigned int status;
int ret = 0;
- if (!urbp)
- return -EINVAL;
-
urb->actual_length = 0;
- head = &urbp->list;
+ head = &urbp->td_list;
tmp = head->next;
while (tmp != head) {
td = list_entry(tmp, struct uhci_td, list);
uhci_packetout(td->info));
err:
- if (debug && ret != -EPIPE) {
+ if ((debug == 1 && ret != -EPIPE) || debug > 1) {
/* Some debugging code */
dbg("uhci_result_interrupt/bulk() failed with status %x",
status);
- /* Print the chain for debugging purposes */
- if (urbp->qh)
- uhci_show_urb_queue(urb);
- else
- uhci_show_td(td);
+ if (errbuf) {
+ /* Print the chain for debugging purposes */
+ if (urbp->qh)
+ uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0);
+ else
+ uhci_show_td(td, errbuf, ERRBUF_LEN, 0);
+
+ lprintk(errbuf);
+ }
}
return ret;
static void uhci_reset_interrupt(struct urb *urb)
{
- struct list_head *tmp;
+ struct uhci *uhci = (struct uhci *)urb->dev->bus->hcpriv;
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
struct uhci_td *td;
+ unsigned long flags;
- if (!urbp)
- return;
+ spin_lock_irqsave(&urb->lock, flags);
- tmp = urbp->list.next;
- td = list_entry(tmp, struct uhci_td, list);
- if (!td)
- return;
+ /* Root hub is special */
+ if (urb->dev == uhci->rh.dev)
+ goto out;
+
+ td = list_entry(urbp->td_list.next, struct uhci_td, list);
td->status = (td->status & 0x2F000000) | TD_CTRL_ACTIVE | TD_CTRL_IOC;
td->info &= ~(1 << TD_TOKEN_TOGGLE);
td->info |= (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE);
usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe));
+out:
urb->status = -EINPROGRESS;
+
+ spin_unlock_irqrestore(&urb->lock, flags);
}
/*
struct uhci *uhci = (struct uhci *)urb->dev->bus->hcpriv;
int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
int len = urb->transfer_buffer_length;
- unsigned char *data = urb->transfer_buffer;
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
+ dma_addr_t data = urbp->transfer_buffer_dma_handle;
if (len < 0)
return -EINVAL;
if (pktsze > maxsze)
pktsze = maxsze;
- td = uhci_alloc_td(urb->dev);
+ td = uhci_alloc_td(uhci, urb->dev);
if (!td)
return -ENOMEM;
uhci_fill_td(td, status, destination | ((pktsze - 1) << 21) |
(usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE),
- virt_to_bus(data));
+ data);
data += pktsze;
len -= maxsze;
usb_pipeout(urb->pipe));
} while (len > 0);
- qh = uhci_alloc_qh(urb->dev);
+ qh = uhci_alloc_qh(uhci, urb->dev);
if (!qh)
return -ENOMEM;
urbp->qh = qh;
+ qh->urbp = urbp;
- /* Always assume depth first */
+ /* Always assume breadth first */
uhci_insert_tds_in_qh(qh, urb, 1);
- if (urb->transfer_flags & USB_QUEUE_BULK && eurb) {
- urbp->queued = 1;
+ if (urb->transfer_flags & USB_QUEUE_BULK && eurb)
uhci_append_queued_urb(uhci, eurb, urb);
- } else
- uhci_insert_qh(uhci, &uhci->skel_bulk_qh, qh);
-
- uhci_add_urb_list(uhci, urb);
+ else
+ uhci_insert_qh(uhci, uhci->skel_bulk_qh, qh);
uhci_inc_fsbr(uhci, urb);
{
struct urb *last_urb = NULL;
struct uhci *uhci = (struct uhci *)urb->dev->bus->hcpriv;
- struct list_head *tmp, *head = &uhci->urb_list;
+ struct list_head *tmp, *head;
int ret = 0;
unsigned long flags;
- nested_lock(&uhci->urblist_lock, flags);
+ spin_lock_irqsave(&uhci->urb_list_lock, flags);
+ head = &uhci->urb_list;
tmp = head->next;
while (tmp != head) {
struct urb *u = list_entry(tmp, struct urb, urb_list);
} else
ret = -1; /* no previous urb found */
- nested_unlock(&uhci->urblist_lock, flags);
+ spin_unlock_irqrestore(&uhci->urb_list_lock, flags);
return ret;
}
return 0;
}
+/*
+ * Isochronous transfers
+ */
static int uhci_submit_isochronous(struct urb *urb)
{
struct uhci_td *td;
struct uhci *uhci = (struct uhci *)urb->dev->bus->hcpriv;
int i, ret, framenum;
int status, destination;
+ struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
if (!urb->iso_frame_desc[i].length)
continue;
- td = uhci_alloc_td(urb->dev);
+ td = uhci_alloc_td(uhci, urb->dev);
if (!td)
return -ENOMEM;
uhci_add_td_to_urb(urb, td);
uhci_fill_td(td, status, destination | ((urb->iso_frame_desc[i].length - 1) << 21),
- virt_to_bus(urb->transfer_buffer + urb->iso_frame_desc[i].offset));
+ urbp->transfer_buffer_dma_handle + urb->iso_frame_desc[i].offset);
if (i + 1 >= urb->number_of_packets)
td->status |= TD_CTRL_IOC;
uhci_insert_td_frame_list(uhci, td, framenum);
}
- uhci_add_urb_list(uhci, urb);
-
return -EINPROGRESS;
}
int status;
int i, ret = 0;
- if (!urbp)
- return -EINVAL;
-
urb->actual_length = 0;
i = 0;
- head = &urbp->list;
+ head = &urbp->td_list;
tmp = head->next;
while (tmp != head) {
struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
status = uhci_map_status(uhci_status_bits(td->status), usb_pipeout(urb->pipe));
urb->iso_frame_desc[i].status = status;
- if (status != 0) {
+ if (status) {
urb->error_count++;
ret = status;
}
static struct urb *uhci_find_urb_ep(struct uhci *uhci, struct urb *urb)
{
- struct list_head *tmp, *head = &uhci->urb_list;
+ struct list_head *tmp, *head;
unsigned long flags;
struct urb *u = NULL;
+ /* We don't match Isoc transfers since they are special */
if (usb_pipeisoc(urb->pipe))
return NULL;
- nested_lock(&uhci->urblist_lock, flags);
+ spin_lock_irqsave(&uhci->urb_list_lock, flags);
+ head = &uhci->urb_list;
tmp = head->next;
while (tmp != head) {
u = list_entry(tmp, struct urb, urb_list);
tmp = tmp->next;
- if (u->dev == urb->dev &&
- u->pipe == urb->pipe)
- goto found;
+ if (u->dev == urb->dev && u->pipe == urb->pipe &&
+ u->status == -EINPROGRESS)
+ goto out;
}
u = NULL;
-found:
- nested_unlock(&uhci->urblist_lock, flags);
+out:
+ spin_unlock_irqrestore(&uhci->urb_list_lock, flags);
return u;
}
int ret = -EINVAL;
struct uhci *uhci;
unsigned long flags;
- struct urb *u;
+ struct urb *eurb;
int bustime;
if (!urb)
return -EINVAL;
- if (!urb->dev || !urb->dev->bus || !urb->dev->bus->hcpriv)
+ if (!urb->dev || !urb->dev->bus || !urb->dev->bus->hcpriv) {
+ warn("uhci_submit_urb: urb %p belongs to disconnected device or bus?", urb);
return -ENODEV;
+ }
uhci = (struct uhci *)urb->dev->bus->hcpriv;
- /* Short circuit the virtual root hub */
- if (usb_pipedevice(urb->pipe) == uhci->rh.devnum)
- return rh_submit_urb(urb);
-
- u = uhci_find_urb_ep(uhci, urb);
- if (u && !(urb->transfer_flags & USB_QUEUE_BULK))
- return -ENXIO;
-
+ INIT_LIST_HEAD(&urb->urb_list);
usb_inc_dev_use(urb->dev);
+
spin_lock_irqsave(&urb->lock, flags);
- if (!uhci_alloc_urb_priv(urb)) {
+ if (urb->status == -EINPROGRESS || urb->status == -ECONNRESET ||
+ urb->status == -ECONNABORTED) {
+ dbg("uhci_submit_urb: urb not available to submit (status = %d)", urb->status);
+ /* Since we can have problems on the out path */
spin_unlock_irqrestore(&urb->lock, flags);
usb_dec_dev_use(urb->dev);
- return -ENOMEM;
+ return ret;
+ }
+
+ if (!uhci_alloc_urb_priv(uhci, urb)) {
+ ret = -ENOMEM;
+
+ goto out;
+ }
+
+ eurb = uhci_find_urb_ep(uhci, urb);
+ if (eurb && !(urb->transfer_flags & USB_QUEUE_BULK)) {
+ ret = -ENXIO;
+
+ goto out;
+ }
+
+ /* Short circuit the virtual root hub */
+ if (urb->dev == uhci->rh.dev) {
+ ret = rh_submit_urb(urb);
+
+ goto out;
}
switch (usb_pipetype(urb->pipe)) {
ret = uhci_submit_interrupt(urb);
break;
case PIPE_BULK:
- ret = uhci_submit_bulk(urb, u);
+ ret = uhci_submit_bulk(urb, eurb);
break;
case PIPE_ISOCHRONOUS:
if (urb->bandwidth == 0) { /* not yet checked/allocated */
break;
}
+out:
urb->status = ret;
spin_unlock_irqrestore(&urb->lock, flags);
- if (ret == -EINPROGRESS)
- ret = 0;
- else {
- uhci_unlink_generic(urb);
- usb_dec_dev_use(urb->dev);
+ if (ret == -EINPROGRESS) {
+ spin_lock_irqsave(&uhci->urb_list_lock, flags);
+ /* We use _tail to make find_urb_ep more efficient */
+ list_add_tail(&urb->urb_list, &uhci->urb_list);
+ spin_unlock_irqrestore(&uhci->urb_list_lock, flags);
+
+ return 0;
}
+ uhci_unlink_generic(uhci, urb);
+ uhci_destroy_urb_priv(urb);
+
+ usb_dec_dev_use(urb->dev);
+
return ret;
}
/*
* Return the result of a transfer
*
- * Must be called with urblist_lock acquired
+ * Must be called with urb_list_lock acquired
*/
-static void uhci_transfer_result(struct urb *urb)
+static void uhci_transfer_result(struct uhci *uhci, struct urb *urb)
{
- struct usb_device *dev = urb->dev;
- struct urb *turb;
- int proceed = 0, is_ring = 0;
int ret = -EINVAL;
unsigned long flags;
+ struct urb_priv *urbp;
+
+ /* The root hub is special */
+ if (urb->dev == uhci->rh.dev)
+ return;
spin_lock_irqsave(&urb->lock, flags);
+ urbp = (struct urb_priv *)urb->hcpriv;
+
+ if (urb->status != -EINPROGRESS) {
+ info("uhci_transfer_result: called for URB %p not in flight?", urb);
+ spin_unlock_irqrestore(&urb->lock, flags);
+ return;
+ }
+
switch (usb_pipetype(urb->pipe)) {
case PIPE_CONTROL:
ret = uhci_result_control(urb);
break;
}
- urb->status = ret;
+ urbp->status = ret;
spin_unlock_irqrestore(&urb->lock, flags);
/* Spinlock needed ? */
if (urb->bandwidth)
usb_release_bandwidth(urb->dev, urb, 1);
- uhci_unlink_generic(urb);
+ uhci_unlink_generic(uhci, urb);
break;
case PIPE_INTERRUPT:
/* Interrupts are an exception */
if (urb->interval) {
- urb->complete(urb);
- uhci_reset_interrupt(urb);
- return;
+ uhci_add_complete(urb);
+ return; /* <-- note return */
}
/* Release bandwidth for Interrupt or Isoc. transfers */
/* Spinlock needed ? */
if (urb->bandwidth)
usb_release_bandwidth(urb->dev, urb, 0);
- uhci_unlink_generic(urb);
+ uhci_unlink_generic(uhci, urb);
break;
+ default:
+ info("uhci_transfer_result: unknown pipe type %d for urb %p\n",
+ usb_pipetype(urb->pipe), urb);
}
- if (urb->next) {
- turb = urb->next;
- do {
- if (turb->status != -EINPROGRESS) {
- proceed = 1;
- break;
- }
-
- turb = turb->next;
- } while (turb && turb != urb && turb != urb->next);
-
- if (turb == urb || turb == urb->next)
- is_ring = 1;
- }
-
- if (urb->complete && !proceed) {
- urb->complete(urb);
- if (!proceed && is_ring)
- uhci_submit_urb(urb);
- }
-
- if (proceed && urb->next) {
- turb = urb->next;
- do {
- if (turb->status != -EINPROGRESS &&
- uhci_submit_urb(turb) != 0)
+ list_del(&urb->urb_list);
+ INIT_LIST_HEAD(&urb->urb_list);
- turb = turb->next;
- } while (turb && turb != urb->next);
-
- if (urb->complete)
- urb->complete(urb);
- }
-
- /* We decrement the usage count after we're done with everything */
- usb_dec_dev_use(dev);
+ uhci_add_complete(urb);
}
-static int uhci_unlink_generic(struct urb *urb)
+static void uhci_unlink_generic(struct uhci *uhci, struct urb *urb)
{
struct urb_priv *urbp = urb->hcpriv;
- struct uhci *uhci = (struct uhci *)urb->dev->bus->hcpriv;
+ /* We can get called when urbp allocation fails, so check */
if (!urbp)
- return -EINVAL;
+ return;
uhci_dec_fsbr(uhci, urb); /* Safe since it checks */
- uhci_remove_urb_list(uhci, urb);
+ uhci_delete_queued_urb(uhci, urb);
if (urbp->qh)
/* The interrupt loop will reclaim the QH's */
uhci_remove_qh(uhci, urbp->qh);
-
- if (!list_empty(&urbp->urb_queue_list))
- uhci_delete_queued_urb(uhci, urb);
-
- uhci_destroy_urb_priv(urb);
-
- urb->dev = NULL;
-
- return 0;
}
+/* FIXME: If we forcefully unlink an urb, we should reset the toggle for */
+/* that pipe to match what actually completed */
static int uhci_unlink_urb(struct urb *urb)
{
struct uhci *uhci;
- int ret = 0;
unsigned long flags;
+ struct urb_priv *urbp = urb->hcpriv;
if (!urb)
return -EINVAL;
- if (!urb->dev || !urb->dev->bus)
+ if (!urb->dev || !urb->dev->bus || !urb->dev->bus->hcpriv)
return -ENODEV;
uhci = (struct uhci *)urb->dev->bus->hcpriv;
- /* Short circuit the virtual root hub */
- if (usb_pipedevice(urb->pipe) == uhci->rh.devnum)
- return rh_unlink_urb(urb);
-
/* Release bandwidth for Interrupt or Isoc. transfers */
/* Spinlock needed ? */
if (urb->bandwidth) {
}
}
- if (urb->status == -EINPROGRESS) {
- uhci_unlink_generic(urb);
+ if (urb->status != -EINPROGRESS)
+ return 0;
+
+ spin_lock_irqsave(&uhci->urb_list_lock, flags);
+ list_del(&urb->urb_list);
+ INIT_LIST_HEAD(&urb->urb_list);
+ spin_unlock_irqrestore(&uhci->urb_list_lock, flags);
+
+ uhci_unlink_generic(uhci, urb);
+ /* Short circuit the virtual root hub */
+ if (urb->dev == uhci->rh.dev) {
+ rh_unlink_urb(urb);
+ uhci_call_completion(urb);
+ } else {
if (urb->transfer_flags & USB_ASYNC_UNLINK) {
- urb->status = -ECONNABORTED;
+ /* urb_list is available now since we called */
+ /* uhci_unlink_generic already */
- spin_lock_irqsave(&uhci->urb_remove_lock, flags);
+ urbp->status = urb->status = -ECONNABORTED;
+
+ spin_lock_irqsave(&uhci->urb_remove_list_lock, flags);
/* Check to see if the remove list is empty */
if (list_empty(&uhci->urb_remove_list))
list_add(&urb->urb_list, &uhci->urb_remove_list);
- spin_unlock_irqrestore(&uhci->urb_remove_lock, flags);
+ spin_unlock_irqrestore(&uhci->urb_remove_list_lock, flags);
} else {
urb->status = -ENOENT;
} else
schedule_timeout(1+1*HZ/1000);
- if (urb->complete)
- urb->complete(urb);
+ uhci_call_completion(urb);
}
}
- return ret;
+ return 0;
}
static int uhci_fsbr_timeout(struct uhci *uhci, struct urb *urb)
/* and we'd be turning on FSBR next frame anyway, so it's a wash */
urbp->fsbr_timeout = 1;
- head = &urbp->list;
+ head = &urbp->td_list;
tmp = head->next;
while (tmp != head) {
struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
uhci_unlink_urb
};
-/* -------------------------------------------------------------------
- Virtual Root Hub
- ------------------------------------------------------------------- */
+/* Virtual Root Hub */
static __u8 root_hub_dev_des[] =
{
0xff /* __u8 PortPwrCtrlMask; *** 7 ports max *** */
};
-/*-------------------------------------------------------------------------*/
/* prepare Interrupt pipe transaction data; HUB INTERRUPT ENDPOINT */
static int rh_send_irq(struct urb *urb)
{
struct uhci *uhci = (struct uhci *)urb->dev->bus->hcpriv;
unsigned int io_addr = uhci->io_addr;
__u16 data = 0;
+ struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
for (i = 0; i < uhci->rh.numports; i++) {
data |= ((inw(io_addr + USBPORTSC1 + i * 2) & 0xa) > 0 ? (1 << (i + 1)) : 0);
*(__u16 *) urb->transfer_buffer = cpu_to_le16(data);
urb->actual_length = len;
- urb->status = USB_ST_NOERROR;
+ urbp->status = 0;
if ((data > 0) && (uhci->rh.send != 0)) {
dbg("root-hub INT complete: port1: %x port2: %x data: %x",
inw(io_addr + USBPORTSC1), inw(io_addr + USBPORTSC2), data);
- urb->complete(urb);
+ uhci_call_completion(urb);
}
- return USB_ST_NOERROR;
+ return 0;
}
-/*-------------------------------------------------------------------------*/
/* Virtual Root Hub INTs are polled by this timer every "interval" ms */
static int rh_init_int_timer(struct urb *urb);
{
struct urb *urb = (struct urb *)ptr;
struct uhci *uhci = (struct uhci *)urb->dev->bus->hcpriv;
- struct list_head *tmp, *head = &uhci->urb_list;
- struct urb_priv *urbp;
- int len;
+ struct list_head list, *tmp, *head;
unsigned long flags;
- if (uhci->rh.send) {
- len = rh_send_irq(urb);
- if (len > 0) {
- urb->actual_length = len;
- if (urb->complete)
- urb->complete(urb);
- }
- }
+ if (uhci->rh.send)
+ rh_send_irq(urb);
+
+ INIT_LIST_HEAD(&list);
+
+ spin_lock_irqsave(&uhci->urb_list_lock, flags);
+ head = &uhci->urb_list;
- nested_lock(&uhci->urblist_lock, flags);
tmp = head->next;
while (tmp != head) {
- struct urb *u = list_entry(tmp, urb_t, urb_list);
+ struct urb *u = list_entry(tmp, struct urb, urb_list);
+ struct urb_priv *urbp = (struct urb_priv *)u->hcpriv;
tmp = tmp->next;
- urbp = (struct urb_priv *)u->hcpriv;
- if (urbp) {
- /* Check if the FSBR timed out */
- if (urbp->fsbr && time_after_eq(jiffies, urbp->inserttime + IDLE_TIMEOUT))
- uhci_fsbr_timeout(uhci, u);
+ /* Check if the FSBR timed out */
+ if (urbp->fsbr && time_after_eq(jiffies, urbp->inserttime + IDLE_TIMEOUT))
+ uhci_fsbr_timeout(uhci, u);
- /* Check if the URB timed out */
- if (u->timeout && time_after_eq(jiffies, u->timeout)) {
- u->transfer_flags |= USB_ASYNC_UNLINK | USB_TIMEOUT_KILLED;
- uhci_unlink_urb(u);
- }
+ /* Check if the URB timed out */
+ if (u->timeout && time_after_eq(jiffies, u->timeout)) {
+ list_del(&u->urb_list);
+ list_add_tail(&u->urb_list, &list);
}
}
- nested_unlock(&uhci->urblist_lock, flags);
+ spin_unlock_irqrestore(&uhci->urb_list_lock, flags);
+
+ head = &list;
+ tmp = head->next;
+ while (tmp != head) {
+ struct urb *u = list_entry(tmp, struct urb, urb_list);
+
+ tmp = tmp->next;
+
+ u->transfer_flags |= USB_ASYNC_UNLINK | USB_TIMEOUT_KILLED;
+ uhci_unlink_urb(u);
+ }
/* enter global suspend if nothing connected */
if (!uhci->is_suspended && !ports_active(uhci))
rh_init_int_timer(urb);
}
-/*-------------------------------------------------------------------------*/
/* Root Hub INTs are polled by this timer */
static int rh_init_int_timer(struct urb *urb)
{
return 0;
}
-/*-------------------------------------------------------------------------*/
#define OK(x) len = (x); break
#define CLR_RH_PORTSTAT(x) \
outw(status, io_addr + USBPORTSC1 + 2 * (wIndex-1))
-/*-------------------------------------------------------------------------*/
-/*************************
- ** Root Hub Control Pipe
- *************************/
-
+/* Root Hub Control Pipe */
static int rh_submit_urb(struct urb *urb)
{
struct uhci *uhci = (struct uhci *)urb->dev->bus->hcpriv;
int leni = urb->transfer_buffer_length;
int len = 0;
int status = 0;
- int stat = USB_ST_NOERROR;
+ int stat = 0;
int i;
unsigned int io_addr = uhci->io_addr;
__u16 cstatus;
uhci->rh.interval = urb->interval;
rh_init_int_timer(urb);
- return USB_ST_NOERROR;
+ return -EINPROGRESS;
}
bmRType_bReq = cmd->requesttype | cmd->request << 8;
}
urb->actual_length = len;
- urb->status = stat;
- if (urb->complete)
- urb->complete(urb);
- return USB_ST_NOERROR;
+ return stat;
}
-/*-------------------------------------------------------------------------*/
static int rh_unlink_urb(struct urb *urb)
{
struct uhci *uhci = (struct uhci *)urb->dev->bus->hcpriv;
if (uhci->rh.urb == urb) {
+ urb->status = -ENOENT;
uhci->rh.send = 0;
+ uhci->rh.urb = NULL;
del_timer(&uhci->rh.rh_int_timer);
}
return 0;
}
-/*-------------------------------------------------------------------*/
-void uhci_free_pending_qhs(struct uhci *uhci)
+static void uhci_free_pending_qhs(struct uhci *uhci)
{
struct list_head *tmp, *head;
unsigned long flags;
- /* Free any pending QH's */
- spin_lock_irqsave(&uhci->qh_remove_lock, flags);
+ spin_lock_irqsave(&uhci->qh_remove_list_lock, flags);
head = &uhci->qh_remove_list;
tmp = head->next;
while (tmp != head) {
tmp = tmp->next;
list_del(&qh->remove_list);
+ INIT_LIST_HEAD(&qh->remove_list);
+
+ uhci_free_qh(uhci, qh);
+ }
+ spin_unlock_irqrestore(&uhci->qh_remove_list_lock, flags);
+}
+
+static void uhci_call_completion(struct urb *urb)
+{
+ struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
+ struct usb_device *dev = urb->dev;
+ struct uhci *uhci = (struct uhci *)dev->bus->hcpriv;
+ int is_ring = 0, killed, resubmit_interrupt, status;
+ struct urb *nurb;
+
+ killed = (urb->status == -ENOENT || urb->status == -ECONNABORTED ||
+ urb->status == -ECONNRESET);
+ resubmit_interrupt = (usb_pipetype(urb->pipe) == PIPE_INTERRUPT &&
+ urb->interval && !killed);
+
+ nurb = urb->next;
+ if (nurb && !killed) {
+ int count = 0;
+
+ while (nurb && nurb != urb && count < MAX_URB_LOOP) {
+ if (nurb->status == -ENOENT ||
+ nurb->status == -ECONNABORTED ||
+ nurb->status == -ECONNRESET) {
+ killed = 1;
+ break;
+ }
+
+ nurb = nurb->next;
+ count++;
+ }
+
+ if (count == MAX_URB_LOOP)
+ err("uhci_call_completion: too many linked URB's, loop? (first loop)");
+
+ /* Check to see if chain is a ring */
+ is_ring = (nurb == urb);
+ }
+
+ status = urbp->status;
+ if (!resubmit_interrupt)
+ /* We don't need urb_priv anymore */
+ uhci_destroy_urb_priv(urb);
+
+ if (!killed)
+ urb->status = status;
+
+ if (urb->transfer_buffer_length)
+ pci_dma_sync_single(uhci->dev, urbp->transfer_buffer_dma_handle,
+ urb->transfer_buffer_length, usb_pipein(urb->pipe) ?
+ PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
+
+ if (usb_pipetype(urb->pipe) == PIPE_CONTROL && urb->setup_packet)
+ pci_dma_sync_single(uhci->dev, urbp->setup_packet_dma_handle,
+ sizeof(devrequest), PCI_DMA_TODEVICE);
+
+ urb->dev = NULL;
+ if (urb->complete)
+ urb->complete(urb);
+
+ if (resubmit_interrupt) {
+ urb->dev = dev;
+ uhci_reset_interrupt(urb);
+ } else {
+ if (is_ring && !killed) {
+ urb->dev = dev;
+ uhci_submit_urb(urb);
+ } else {
+ /* We decrement the usage count after we're done */
+ /* with everything */
+ usb_dec_dev_use(dev);
+ }
+ }
+}
+
+static void uhci_finish_completion(struct uhci *uhci)
+{
+ struct list_head *tmp, *head;
+ unsigned long flags;
+
+ spin_lock_irqsave(&uhci->complete_list_lock, flags);
+ head = &uhci->complete_list;
+ tmp = head->next;
+ while (tmp != head) {
+ struct urb_priv *urbp = list_entry(tmp, struct urb_priv, complete_list);
+ struct urb *urb = urbp->urb;
+
+ tmp = tmp->next;
+
+ list_del(&urbp->complete_list);
+ INIT_LIST_HEAD(&urbp->complete_list);
+
+ uhci_call_completion(urb);
+ }
+ spin_unlock_irqrestore(&uhci->complete_list_lock, flags);
+}
+
+static void uhci_remove_pending_qhs(struct uhci *uhci)
+{
+ struct list_head *tmp, *head;
+ unsigned long flags;
- uhci_free_qh(qh);
+ spin_lock_irqsave(&uhci->urb_remove_list_lock, flags);
+ head = &uhci->urb_remove_list;
+ tmp = head->next;
+ while (tmp != head) {
+ struct urb *urb = list_entry(tmp, struct urb, urb_list);
+ struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
+
+ tmp = tmp->next;
+
+ list_del(&urb->urb_list);
+ INIT_LIST_HEAD(&urb->urb_list);
+
+ urbp->status = urb->status = -ECONNRESET;
+ uhci_call_completion(urb);
}
- spin_unlock_irqrestore(&uhci->qh_remove_lock, flags);
+ spin_unlock_irqrestore(&uhci->urb_remove_list_lock, flags);
}
static void uhci_interrupt(int irq, void *__uhci, struct pt_regs *regs)
struct uhci *uhci = __uhci;
unsigned int io_addr = uhci->io_addr;
unsigned short status;
- unsigned long flags;
struct list_head *tmp, *head;
/*
status = inw(io_addr + USBSTS);
if (!status) /* shared interrupt, not mine */
return;
- outw(status, io_addr + USBSTS);
+ outw(status, io_addr + USBSTS); /* Clear it */
if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) {
if (status & USBSTS_HSE)
}
}
- if (status & USBSTS_RD) {
+ if (status & USBSTS_RD)
wakeup_hc(uhci);
- }
uhci_free_pending_qhs(uhci);
- spin_lock(&uhci->urb_remove_lock);
- head = &uhci->urb_remove_list;
- tmp = head->next;
- while (tmp != head) {
- struct urb *urb = list_entry(tmp, struct urb, urb_list);
-
- tmp = tmp->next;
-
- list_del(&urb->urb_list);
-
- if (urb->complete)
- urb->complete(urb);
- }
- spin_unlock(&uhci->urb_remove_lock);
+ uhci_remove_pending_qhs(uhci);
uhci_clear_next_interrupt(uhci);
- /* Walk the list of pending TD's to see which ones completed */
- nested_lock(&uhci->urblist_lock, flags);
+ /* Walk the list of pending URB's to see which ones completed */
+ spin_lock(&uhci->urb_list_lock);
head = &uhci->urb_list;
tmp = head->next;
while (tmp != head) {
tmp = tmp->next;
/* Checks the status and does all of the magic necessary */
- uhci_transfer_result(urb);
+ uhci_transfer_result(uhci, urb);
}
- nested_unlock(&uhci->urblist_lock, flags);
+ spin_unlock(&uhci->urb_list_lock);
+
+ uhci_finish_completion(uhci);
}
static void reset_hc(struct uhci *uhci)
/* Start at frame 0 */
outw(0, io_addr + USBFRNUM);
- outl(virt_to_bus(uhci->fl), io_addr + USBFLBASEADD);
+ outl(uhci->fl->dma_handle, io_addr + USBFLBASEADD);
/* Run and mark it configured with a 64-byte max packet */
outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP, io_addr + USBCMD);
}
+static int uhci_alloc_root_hub(struct uhci *uhci)
+{
+ struct usb_device *dev;
+
+ dev = usb_alloc_dev(NULL, uhci->bus);
+ if (!dev)
+ return -1;
+
+ uhci->bus->root_hub = dev;
+ uhci->rh.dev = dev;
+
+ return 0;
+}
+
+static int uhci_start_root_hub(struct uhci *uhci)
+{
+ usb_connect(uhci->rh.dev);
+
+ if (usb_new_device(uhci->rh.dev) != 0) {
+ usb_free_dev(uhci->rh.dev);
+
+ return -1;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PROC_FS
+static int uhci_num = 0;
+#endif
+
/*
* Allocate a frame list, and then setup the skeleton
*
* - The second queue is the "control queue", split into low and high speed
* - The third queue is "bulk data".
*/
-static struct uhci *alloc_uhci(unsigned int io_addr, unsigned int io_size)
+static struct uhci *alloc_uhci(struct pci_dev *dev, unsigned int io_addr, unsigned int io_size)
{
int i, port;
struct uhci *uhci;
struct usb_bus *bus;
+ dma_addr_t dma_handle;
uhci = kmalloc(sizeof(*uhci), GFP_KERNEL);
if (!uhci)
memset(uhci, 0, sizeof(*uhci));
+ uhci->dev = dev;
uhci->irq = -1;
uhci->io_addr = io_addr;
uhci->io_size = io_size;
- spin_lock_init(&uhci->qh_remove_lock);
+ spin_lock_init(&uhci->qh_remove_list_lock);
INIT_LIST_HEAD(&uhci->qh_remove_list);
- spin_lock_init(&uhci->urb_remove_lock);
+ spin_lock_init(&uhci->urb_remove_list_lock);
INIT_LIST_HEAD(&uhci->urb_remove_list);
- nested_init(&uhci->urblist_lock);
+ spin_lock_init(&uhci->urb_list_lock);
INIT_LIST_HEAD(&uhci->urb_list);
- spin_lock_init(&uhci->framelist_lock);
+ spin_lock_init(&uhci->complete_list_lock);
+ INIT_LIST_HEAD(&uhci->complete_list);
+
+ spin_lock_init(&uhci->frame_list_lock);
/* We need exactly one page (per UHCI specs), how convenient */
/* We assume that one page is atleast 4k (1024 frames * 4 bytes) */
- uhci->fl = (void *)__get_free_page(GFP_KERNEL);
- if (!uhci->fl)
- goto au_free_uhci;
+ uhci->fl = pci_alloc_consistent(uhci->dev, sizeof(*uhci->fl), &dma_handle);
+ if (!uhci->fl) {
+ printk(KERN_ERR "Unable to allocate consistent memory for frame list\n");
+ goto free_uhci;
+ }
+
+ memset((void *)uhci->fl, 0, sizeof(*uhci->fl));
+
+ uhci->fl->dma_handle = dma_handle;
+
+ uhci->td_pool = pci_pool_create("uhci_td", uhci->dev,
+ sizeof(struct uhci_td), 16, 0, GFP_DMA | GFP_ATOMIC);
+ if (!uhci->td_pool) {
+ printk(KERN_ERR "Unable to create td pci_pool\n");
+ goto free_fl;
+ }
+
+ uhci->qh_pool = pci_pool_create("uhci_qh", uhci->dev,
+ sizeof(struct uhci_qh), 16, 0, GFP_DMA | GFP_ATOMIC);
+ if (!uhci->qh_pool) {
+ printk(KERN_ERR "Unable to create qh pci_pool\n");
+ goto free_td_pool;
+ }
bus = usb_alloc_bus(&uhci_device_operations);
if (!bus)
- goto au_free_fl;
+ goto free_qh_pool;
uhci->bus = bus;
bus->hcpriv = uhci;
uhci->rh.numports = port;
+ if (uhci_alloc_root_hub(uhci)) {
+ err("unable to allocate root hub");
+ goto free_fl;
+ }
+
+ uhci->skeltd[0] = uhci_alloc_td(uhci, uhci->rh.dev);
+ if (!uhci->skeltd[0]) {
+ err("unable to allocate TD 0");
+ goto free_fl;
+ }
+
/*
* 9 Interrupt queues; link int2 to int1, int4 to int2, etc
* then link int1 to control and control to bulk
*/
for (i = 1; i < 9; i++) {
- struct uhci_td *td = &uhci->skeltd[i];
+ struct uhci_td *td;
+
+ td = uhci->skeltd[i] = uhci_alloc_td(uhci, uhci->rh.dev);
+ if (!td) {
+ err("unable to allocate TD %d", i);
+ goto free_tds;
+ }
uhci_fill_td(td, 0, (UHCI_NULL_DATA_SIZE << 21) | (0x7f << 8) | USB_PID_IN, 0);
- td->link = virt_to_bus(&uhci->skeltd[i - 1]);
+ td->link = uhci->skeltd[i - 1]->dma_handle;
}
+ uhci->skel_term_td = uhci_alloc_td(uhci, uhci->rh.dev);
+ if (!uhci->skel_term_td) {
+ err("unable to allocate TD 0");
+ goto free_fl;
+ }
+
+ for (i = 0; i < UHCI_NUM_SKELQH; i++) {
+ uhci->skelqh[i] = uhci_alloc_qh(uhci, uhci->rh.dev);
+ if (!uhci->skelqh[i]) {
+ err("unable to allocate QH %d", i);
+ goto free_qhs;
+ }
+ }
- uhci_fill_td(&uhci->skel_int1_td, 0, (UHCI_NULL_DATA_SIZE << 21) | (0x7f << 8) | USB_PID_IN, 0);
- uhci->skel_int1_td.link = virt_to_bus(&uhci->skel_ls_control_qh) | UHCI_PTR_QH;
+ uhci_fill_td(uhci->skel_int1_td, 0, (UHCI_NULL_DATA_SIZE << 21) | (0x7f << 8) | USB_PID_IN, 0);
+ uhci->skel_int1_td->link = uhci->skel_ls_control_qh->dma_handle | UHCI_PTR_QH;
- uhci->skel_ls_control_qh.link = virt_to_bus(&uhci->skel_hs_control_qh) | UHCI_PTR_QH;
- uhci->skel_ls_control_qh.element = UHCI_PTR_TERM;
+ uhci->skel_ls_control_qh->link = uhci->skel_hs_control_qh->dma_handle | UHCI_PTR_QH;
+ uhci->skel_ls_control_qh->element = UHCI_PTR_TERM;
- uhci->skel_hs_control_qh.link = virt_to_bus(&uhci->skel_bulk_qh) | UHCI_PTR_QH;
- uhci->skel_hs_control_qh.element = UHCI_PTR_TERM;
+ uhci->skel_hs_control_qh->link = uhci->skel_bulk_qh->dma_handle | UHCI_PTR_QH;
+ uhci->skel_hs_control_qh->element = UHCI_PTR_TERM;
- uhci->skel_bulk_qh.link = virt_to_bus(&uhci->skel_term_qh) | UHCI_PTR_QH;
- uhci->skel_bulk_qh.element = UHCI_PTR_TERM;
+ uhci->skel_bulk_qh->link = uhci->skel_term_qh->dma_handle | UHCI_PTR_QH;
+ uhci->skel_bulk_qh->element = UHCI_PTR_TERM;
/* This dummy TD is to work around a bug in Intel PIIX controllers */
- uhci_fill_td(&uhci->skel_term_td, 0, (UHCI_NULL_DATA_SIZE << 21) | (0x7f << 8) | USB_PID_IN, 0);
- uhci->skel_term_td.link = UHCI_PTR_TERM;
+ uhci_fill_td(uhci->skel_term_td, 0, (UHCI_NULL_DATA_SIZE << 21) | (0x7f << 8) | USB_PID_IN, 0);
+ uhci->skel_term_td->link = uhci->skel_term_td->dma_handle;
- uhci->skel_term_qh.link = UHCI_PTR_TERM;
- uhci->skel_term_qh.element = virt_to_bus(&uhci->skel_term_td);
+ uhci->skel_term_qh->link = UHCI_PTR_TERM;
+ uhci->skel_term_qh->element = uhci->skel_term_td->dma_handle;
/*
* Fill the frame list: make all entries point to
* scatter the interrupt queues in a way that gives
* us a reasonable dynamic range for irq latencies.
*/
- for (i = 0; i < 1024; i++) {
- struct uhci_td *irq = &uhci->skel_int1_td;
+ for (i = 0; i < UHCI_NUMFRAMES; i++) {
+ int irq = 0;
if (i & 1) {
irq++;
}
/* Only place we don't use the frame list routines */
- uhci->fl->frame[i] = virt_to_bus(irq);
+ uhci->fl->frame[i] = uhci->skeltd[irq]->dma_handle;
}
return uhci;
/*
* error exits:
*/
-au_free_fl:
- free_page((unsigned long)uhci->fl);
-au_free_uhci:
+free_qhs:
+ for (i = 0; i < UHCI_NUM_SKELQH; i++)
+ if (uhci->skelqh[i]) {
+ uhci_free_qh(uhci, uhci->skelqh[i]);
+ uhci->skelqh[i] = NULL;
+ }
+
+free_tds:
+ for (i = 0; i < UHCI_NUM_SKELTD; i++)
+ if (uhci->skeltd[i]) {
+ uhci_free_td(uhci, uhci->skeltd[i]);
+ uhci->skeltd[i] = NULL;
+ }
+
+free_qh_pool:
+ pci_pool_destroy(uhci->qh_pool);
+
+free_td_pool:
+ pci_pool_destroy(uhci->td_pool);
+
+free_fl:
+ pci_free_consistent(uhci->dev, sizeof(*uhci->fl), uhci->fl, uhci->fl->dma_handle);
+
+free_uhci:
kfree(uhci);
return NULL;
*/
static void release_uhci(struct uhci *uhci)
{
+ int i;
+#ifdef CONFIG_PROC_FS
+ char buf[8];
+#endif
+
if (uhci->irq >= 0) {
free_irq(uhci->irq, uhci);
uhci->irq = -1;
}
+ for (i = 0; i < UHCI_NUM_SKELQH; i++)
+ if (uhci->skelqh[i]) {
+ uhci_free_qh(uhci, uhci->skelqh[i]);
+ uhci->skelqh[i] = NULL;
+ }
+
+ for (i = 0; i < UHCI_NUM_SKELTD; i++)
+ if (uhci->skeltd[i]) {
+ uhci_free_td(uhci, uhci->skeltd[i]);
+ uhci->skeltd[i] = NULL;
+ }
+
+ if (uhci->qh_pool) {
+ pci_pool_destroy(uhci->qh_pool);
+ uhci->qh_pool = NULL;
+ }
+
+ if (uhci->td_pool) {
+ pci_pool_destroy(uhci->td_pool);
+ uhci->td_pool = NULL;
+ }
+
if (uhci->fl) {
- free_page((unsigned long)uhci->fl);
+ pci_free_consistent(uhci->dev, sizeof(*uhci->fl), uhci->fl, uhci->fl->dma_handle);
uhci->fl = NULL;
}
usb_free_bus(uhci->bus);
- kfree(uhci);
-}
-int uhci_start_root_hub(struct uhci *uhci)
-{
- struct usb_device *dev;
-
- dev = usb_alloc_dev(NULL, uhci->bus);
- if (!dev)
- return -1;
-
- uhci->bus->root_hub = dev;
- usb_connect(dev);
-
- if (usb_new_device(dev) != 0) {
- usb_free_dev(dev);
+#ifdef CONFIG_PROC_FS
+ sprintf(buf, "hc%d", uhci->num);
- return -1;
- }
+ remove_proc_entry(buf, uhci_proc_root);
+ uhci->proc_entry = NULL;
+#endif
- return 0;
+ kfree(uhci);
}
/*
- * If we've successfully found a UHCI, now is the time to increment the
- * module usage count, and return success..
+ * If we've successfully found a UHCI, now is the time to return success..
*/
static int setup_uhci(struct pci_dev *dev, int irq, unsigned int io_addr, unsigned int io_size)
{
int retval;
struct uhci *uhci;
char buf[8], *bufp = buf;
+#ifdef CONFIG_PROC_FS
+ struct proc_dir_entry *ent;
+#endif
#ifndef __sparc__
sprintf(buf, "%d", irq);
printk(KERN_INFO __FILE__ ": USB UHCI at I/O 0x%x, IRQ %s\n",
io_addr, bufp);
- uhci = alloc_uhci(io_addr, io_size);
+ uhci = alloc_uhci(dev, io_addr, io_size);
if (!uhci)
return -ENOMEM;
+
dev->driver_data = uhci;
+#ifdef CONFIG_PROC_FS
+ uhci->num = uhci_num++;
+
+ sprintf(buf, "hc%d", uhci->num);
+
+ ent = create_proc_entry(buf, S_IFREG|S_IRUGO|S_IWUSR, uhci_proc_root);
+ if (!ent)
+ return -ENOMEM;
+
+ ent->data = uhci;
+ ent->proc_fops = &uhci_proc_operations;
+ ent->size = 0;
+ uhci->proc_entry = ent;
+#endif
+
request_region(uhci->io_addr, io_size, "usb-uhci");
reset_hc(uhci);
{
int i;
+ if (!pci_dma_supported(dev, 0xFFFFFFFF)) {
+ err("PCI subsystem doesn't support 32 bit addressing?");
+ return -ENODEV;
+ }
+ dev->dma_mask = 0xFFFFFFFF;
+
/* disable legacy emulation */
pci_write_config_word(dev, USBLEGSUP, 0);
usb_deregister_bus(uhci->bus);
+ /*
+ * At this point, we're guaranteed that no new connects can be made
+ * to this bus since there are no more parents
+ */
+ uhci_free_pending_qhs(uhci);
+ uhci_remove_pending_qhs(uhci);
+
reset_hc(uhci);
release_region(uhci->io_addr, uhci->io_size);
start_hc((struct uhci *) dev->driver_data);
}
-/*-------------------------------------------------------------------------*/
-
-static const struct pci_device_id __devinitdata uhci_pci_ids [] = { {
+static const struct pci_device_id __devinitdata uhci_pci_ids[] = { {
/* handle any USB UHCI controller */
class: ((PCI_CLASS_SERIAL_USB << 8) | 0x00),
static int __init uhci_hcd_init(void)
{
- int retval;
-
- retval = -ENOMEM;
+ int retval = -ENOMEM;
- /* We throw all of the TD's and QH's into a kmem cache */
- /* TD's and QH's need to be 16 byte aligned and SLAB_HWCACHE_ALIGN */
- /* does this for us */
- uhci_td_cachep = kmem_cache_create("uhci_td",
- sizeof(struct uhci_td), 0,
- SLAB_HWCACHE_ALIGN, NULL, NULL);
-
- if (!uhci_td_cachep)
- goto td_failed;
-
- uhci_qh_cachep = kmem_cache_create("uhci_qh",
- sizeof(struct uhci_qh), 0,
- SLAB_HWCACHE_ALIGN, NULL, NULL);
+ if (debug) {
+ errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL);
+ if (!errbuf)
+ goto errbuf_failed;
+ }
- if (!uhci_qh_cachep)
- goto qh_failed;
+#ifdef CONFIG_PROC_FS
+ uhci_proc_root = create_proc_entry("driver/uhci", S_IFDIR, 0);
+ if (!uhci_proc_root)
+ goto proc_failed;
+#endif
uhci_up_cachep = kmem_cache_create("uhci_urb_priv",
sizeof(struct urb_priv), 0, 0, NULL, NULL);
-
if (!uhci_up_cachep)
goto up_failed;
- retval = pci_module_init (&uhci_pci_driver);
+ retval = pci_module_init(&uhci_pci_driver);
if (retval)
goto init_failed;
printk(KERN_INFO "uhci: not all urb_priv's were freed\n");
up_failed:
- if (kmem_cache_destroy(uhci_qh_cachep))
- printk(KERN_INFO "uhci: not all QH's were freed\n");
-qh_failed:
- if (kmem_cache_destroy(uhci_td_cachep))
- printk(KERN_INFO "uhci: not all TD's were freed\n");
+#ifdef CONFIG_PROC_FS
+ remove_proc_entry("uhci", 0);
+
+proc_failed:
+#endif
+ if (errbuf)
+ kfree(errbuf);
+
+errbuf_failed:
-td_failed:
return retval;
}
static void __exit uhci_hcd_cleanup (void)
{
- pci_unregister_driver (&uhci_pci_driver);
+ pci_unregister_driver(&uhci_pci_driver);
if (kmem_cache_destroy(uhci_up_cachep))
printk(KERN_INFO "uhci: not all urb_priv's were freed\n");
- if (kmem_cache_destroy(uhci_qh_cachep))
- printk(KERN_INFO "uhci: not all QH's were freed\n");
+#ifdef CONFIG_PROC_FS
+ remove_proc_entry("uhci", 0);
+#endif
- if (kmem_cache_destroy(uhci_td_cachep))
- printk(KERN_INFO "uhci: not all TD's were freed\n");
+ if (errbuf)
+ kfree(errbuf);
}
module_init(uhci_hcd_init);
#include <linux/list.h>
#include <linux/usb.h>
-/*
- * This nested spinlock code is courtesy of Davide Libenzi <dlibenzi@maticad.it>
- */
-struct s_nested_lock {
- spinlock_t lock;
- void *uniq;
- short int count;
-};
-
-#define nested_init(snl) \
- spin_lock_init(&(snl)->lock); \
- (snl)->uniq = NULL; \
- (snl)->count = 0;
-
-#define nested_lock(snl, flags) \
- if ((snl)->uniq == current) { \
- (snl)->count++; \
- flags = 0; /* No warnings */ \
- } else { \
- spin_lock_irqsave(&(snl)->lock, flags); \
- (snl)->count++; \
- (snl)->uniq = current; \
- }
-
-#define nested_unlock(snl, flags) \
- if (!--(snl)->count) { \
- (snl)->uniq = NULL; \
- spin_unlock_irqrestore(&(snl)->lock, flags); \
- }
-
/*
* Universal Host Controller Interface data structures and defines
*/
#define UHCI_MAX_SOF_NUMBER 2047 /* in an SOF packet */
#define CAN_SCHEDULE_FRAMES 1000 /* how far future frames can be scheduled */
-struct uhci_framelist {
+struct uhci_frame_list {
__u32 frame[UHCI_NUMFRAMES];
-} __attribute__((aligned(4096)));
-struct uhci_td;
+ void *frame_cpu[UHCI_NUMFRAMES];
+
+ dma_addr_t dma_handle;
+};
+
+struct urb_priv;
struct uhci_qh {
/* Hardware fields */
- __u32 link; /* Next queue */
- __u32 element; /* Queue element pointer */
+ __u32 link; /* Next queue */
+ __u32 element; /* Queue element pointer */
/* Software fields */
- /* Can't use list_head since we want a specific order */
- struct usb_device *dev; /* The owning device */
+ dma_addr_t dma_handle;
- struct uhci_qh *prevqh, *nextqh;
+ struct usb_device *dev;
+ struct urb_priv *urbp;
- struct list_head remove_list;
+ struct list_head list; /* P: uhci->frame_list_lock */
+ struct list_head remove_list; /* P: uhci->remove_list_lock */
} __attribute__((aligned(16)));
/*
#define uhci_status_bits(ctrl_sts) (ctrl_sts & 0xFE0000)
#define uhci_actual_length(ctrl_sts) ((ctrl_sts + 1) & TD_CTRL_ACTLEN_MASK) /* 1-based */
-#define uhci_ptr_to_virt(x) bus_to_virt(x & ~UHCI_PTR_BITS)
-
/*
* for TD <info>: (a.k.a. Token)
*/
* On 64-bit machines we probably want to take advantage of the fact that
* hw doesn't really care about the size of the sw-only area.
*
- * Alas, not anymore, we have more than 4 words for software, woops
+ * Alas, not anymore, we have more than 4 words for software, woops.
+ * Everything still works tho, surprise! -jerdfelt
*/
struct uhci_td {
/* Hardware fields */
__u32 buffer;
/* Software fields */
- unsigned int *frameptr; /* Frame list pointer */
- struct uhci_td *prevtd, *nexttd; /* Previous and next TD in queue */
+ dma_addr_t dma_handle;
struct usb_device *dev;
- struct urb *urb; /* URB this TD belongs to */
+ struct urb *urb;
- struct list_head list;
+ struct list_head list; /* P: urb->lock */
+
+ int frame;
+ struct list_head fl_list; /* P: frame_list_lock */
} __attribute__((aligned(16)));
/*
}
struct virt_root_hub {
+ struct usb_device *dev;
int devnum; /* Address of Root Hub endpoint */
- void *urb;
+ struct urb *urb;
void *int_addr;
int send;
int interval;
* a subset of what the full implementation needs.
*/
struct uhci {
+ struct pci_dev *dev;
+
+ /* procfs */
+ int num;
+ struct proc_dir_entry *proc_entry;
+
/* Grabbed from PCI */
int irq;
unsigned int io_addr;
struct list_head uhci_list;
+ struct pci_pool *qh_pool;
+ struct pci_pool *td_pool;
+
struct usb_bus *bus;
- struct uhci_td skeltd[UHCI_NUM_SKELTD]; /* Skeleton TD's */
- struct uhci_qh skelqh[UHCI_NUM_SKELQH]; /* Skeleton QH's */
+ struct uhci_td *skeltd[UHCI_NUM_SKELTD]; /* Skeleton TD's */
+ struct uhci_qh *skelqh[UHCI_NUM_SKELQH]; /* Skeleton QH's */
- spinlock_t framelist_lock;
- struct uhci_framelist *fl; /* Frame list */
+ spinlock_t frame_list_lock;
+ struct uhci_frame_list *fl; /* Frame list */
int fsbr; /* Full speed bandwidth reclamation */
int is_suspended;
- spinlock_t qh_remove_lock;
+ spinlock_t qh_remove_list_lock;
struct list_head qh_remove_list;
- spinlock_t urb_remove_lock;
+ spinlock_t urb_remove_list_lock;
struct list_head urb_remove_list;
- struct s_nested_lock urblist_lock;
+ spinlock_t urb_list_lock;
struct list_head urb_list;
+ spinlock_t complete_list_lock;
+ struct list_head complete_list;
+
struct virt_root_hub rh; /* private data of the virtual root hub */
};
struct urb_priv {
struct urb *urb;
+ struct usb_device *dev;
+
+ dma_addr_t setup_packet_dma_handle;
+ dma_addr_t transfer_buffer_dma_handle;
struct uhci_qh *qh; /* QH for this URB */
+ struct list_head td_list;
int fsbr : 1; /* URB turned on FSBR */
int fsbr_timeout : 1; /* URB timed out on FSBR */
/* a control transfer, retrigger */
/* the status phase */
- unsigned long inserttime; /* In jiffies */
+ int status; /* Final status */
- struct list_head list;
+ unsigned long inserttime; /* In jiffies */
- struct list_head urb_queue_list; /* URB's linked together */
+ struct list_head queue_list;
+ struct list_head complete_list;
};
/* -------------------------------------------------------------------------
#define RH_REQ_ERR -1
#define RH_NACK 0x00
-/* needed for the debugging code */
-struct uhci_td *uhci_link_to_td(unsigned int element);
-
-/* Debugging code */
-void uhci_show_td(struct uhci_td *td);
-void uhci_show_status(struct uhci *uhci);
-void uhci_show_urb_queue(struct urb *urb);
-void uhci_show_queue(struct uhci_qh *qh);
-void uhci_show_queues(struct uhci *uhci);
-
#endif
static LIST_HEAD (ohci_hcd_list);
static spinlock_t usb_ed_lock = SPIN_LOCK_UNLOCKED;
+
+/*-------------------------------------------------------------------------*/
+
+/* AMD-756 (D2 rev) reports corrupt register contents in some cases.
+ * The erratum (#4) description is incorrect. AMD's workaround waits
+ * till some bits (mostly reserved) are clear; ok for all revs.
+ */
+#define read_roothub(hc, register, mask) ({ \
+ u32 temp = readl (&hc->regs->roothub.register); \
+ if (hc->flags & OHCI_QUIRK_AMD756) \
+ while (temp & mask) \
+ temp = readl (&hc->regs->roothub.register); \
+ temp; })
+
+static u32 roothub_a (struct ohci *hc)
+ { return read_roothub (hc, a, 0xfc0fe000); }
+static inline u32 roothub_b (struct ohci *hc)
+ { return readl (&hc->regs->roothub.b); }
+static inline u32 roothub_status (struct ohci *hc)
+ { return readl (&hc->regs->roothub.status); }
+static u32 roothub_portstatus (struct ohci *hc, int i)
+ { return read_roothub (hc, portstatus [i], 0xffe0fce0); }
+
+
/*-------------------------------------------------------------------------*
* URB support functions
*-------------------------------------------------------------------------*/
if (urb_priv) {
urb->hcpriv = NULL;
+#ifdef DO_TIMEOUTS
if (urb->timeout) {
list_del (&urb->urb_list);
urb->timeout -= jiffies;
}
+#endif
/* Release int/iso bandwidth */
if (urb->bandwidth) {
static void ohci_dump_roothub (ohci_t *controller, int verbose)
{
- struct ohci_regs *regs = controller->regs;
__u32 temp, ndp, i;
- temp = readl (®s->roothub.a);
+ temp = roothub_a (controller);
ndp = (temp & RH_A_NDP);
if (verbose) {
(temp & RH_A_PSM) ? " PSM" : "",
ndp
);
- temp = readl (®s->roothub.b);
+ temp = roothub_b (controller);
dbg ("roothub.b: %08x PPCM=%04x DR=%04x",
temp,
(temp & RH_B_PPCM) >> 16,
(temp & RH_B_DR)
);
- temp = readl (®s->roothub.status);
+ temp = roothub_status (controller);
dbg ("roothub.status: %08x%s%s%s%s%s%s",
temp,
(temp & RH_HS_CRWE) ? " CRWE" : "",
}
for (i = 0; i < ndp; i++) {
- temp = readl (®s->roothub.portstatus [i]);
+ temp = roothub_portstatus (controller, i);
dbg ("roothub.portstatus [%d] = 0x%08x%s%s%s%s%s%s%s%s%s%s%s%s",
i,
temp,
return bustime;
}
usb_claim_bandwidth (urb->dev, urb, bustime, usb_pipeisoc (urb->pipe));
+#ifdef DO_TIMEOUTS
urb->timeout = 0;
+#endif
}
spin_lock_irqsave (&usb_ed_lock, flags);
/* fill the TDs and link it to the ed */
td_submit_urb (urb);
+#ifdef DO_TIMEOUTS
/* maybe add to ordered list of timeouts */
if (urb->timeout) {
struct list_head *entry;
/* drive timeouts by SF (messy, but works) */
writel (OHCI_INTR_SF, &ohci->regs->intrenable);
}
+#endif
spin_unlock_irqrestore (&usb_ed_lock, flags);
__u8 data[8];
- num_ports = readl (&ohci->regs->roothub.a) & RH_A_NDP;
+ num_ports = roothub_a (ohci) & RH_A_NDP;
if (num_ports > MAX_ROOT_PORTS) {
err ("bogus NDP=%d for OHCI usb-%s", num_ports,
ohci->ohci_dev->slot_name);
/* retry later; "should not happen" */
return 0;
}
- *(__u8 *) data = (readl (&ohci->regs->roothub.status) & (RH_HS_LPSC | RH_HS_OCIC))
+ *(__u8 *) data = (roothub_status (ohci) & (RH_HS_LPSC | RH_HS_OCIC))
? 1: 0;
ret = *(__u8 *) data;
for ( i = 0; i < num_ports; i++) {
*(__u8 *) (data + (i + 1) / 8) |=
- ((readl (&ohci->regs->roothub.portstatus[i]) &
+ ((roothub_portstatus (ohci, i) &
(RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC | RH_PS_OCIC | RH_PS_PRSC))
? 1: 0) << ((i + 1) % 8);
ret += *(__u8 *) (data + (i + 1) / 8);
#define OK(x) len = (x); break
#define WR_RH_STAT(x) writel((x), &ohci->regs->roothub.status)
#define WR_RH_PORTSTAT(x) writel((x), &ohci->regs->roothub.portstatus[wIndex-1])
-#define RD_RH_STAT readl(&ohci->regs->roothub.status)
-#define RD_RH_PORTSTAT readl(&ohci->regs->roothub.portstatus[wIndex-1])
+#define RD_RH_STAT roothub_status(ohci)
+#define RD_RH_PORTSTAT roothub_portstatus(ohci,wIndex-1)
/* request to virtual root hub */
case RH_GET_DESCRIPTOR | RH_CLASS:
{
- __u32 temp = readl (&ohci->regs->roothub.a);
+ __u32 temp = roothub_a (ohci);
data_buf [0] = 9; // min length;
data_buf [1] = 0x29;
datab [1] = 0;
data_buf [5] = (temp & RH_A_POTPGT) >> 24;
- temp = readl (&ohci->regs->roothub.b);
+ temp = roothub_b (ohci);
data_buf [7] = temp & RH_B_DR;
if (data_buf [2] < 7) {
data_buf [8] = 0xff;
writel (mask, &ohci->regs->intrstatus);
#ifdef OHCI_USE_NPS
- writel ((readl(&ohci->regs->roothub.a) | RH_A_NPS) & ~RH_A_PSM,
+ /* required for AMD-756 and some Mac platforms */
+ writel ((roothub_a (ohci) | RH_A_NPS) & ~RH_A_PSM,
&ohci->regs->roothub.a);
writel (RH_HS_LPSC, &ohci->regs->roothub.status);
#endif /* OHCI_USE_NPS */
// POTPGT delay is bits 24-31, in 2 ms units.
- mdelay ((readl(&ohci->regs->roothub.a) >> 23) & 0x1fe);
+ mdelay ((roothub_a (ohci) >> 23) & 0x1fe);
/* connect the virtual root hub */
ohci->rh.devnum = 0;
static struct pci_driver ohci_pci_driver;
static int __devinit
-hc_found_ohci (struct pci_dev *dev, int irq, void * mem_base)
+hc_found_ohci (struct pci_dev *dev, int irq,
+ void *mem_base, const struct pci_device_id *id)
{
ohci_t * ohci;
u8 latency, limit;
hc_release_ohci (ohci);
return ret;
}
+ ohci->flags = id->driver_data;
+ if (ohci->flags & OHCI_QUIRK_AMD756)
+ printk (KERN_INFO __FILE__ ": AMD756 erratum 4 workaround\n");
/* bad pci latencies can contribute to overruns */
pci_read_config_byte (dev, PCI_LATENCY_TIMER, &latency);
unsigned long mem_resource, mem_len;
void *mem_base;
- /* blacklisted hardware? */
- if (id->driver_data) {
- info ("%s (%s): %s", dev->slot_name,
- dev->name, (char *) id->driver_data);
- return -ENODEV;
- }
-
if (pci_enable_device(dev) < 0)
return -ENODEV;
/* controller writes into our memory */
pci_set_master (dev);
- return hc_found_ohci (dev, dev->irq, mem_base);
+ return hc_found_ohci (dev, dev->irq, mem_base, id);
}
/*-------------------------------------------------------------------------*/
/*
* AMD-756 [Viper] USB has a serious erratum when used with
- * lowspeed devices like mice; oopses have been seen. The
- * vendor workaround needs an NDA ... for now, blacklist it.
+ * lowspeed devices like mice.
*/
vendor: 0x1022,
device: 0x740c,
subvendor: PCI_ANY_ID,
subdevice: PCI_ANY_ID,
- driver_data: (unsigned long) "blacklisted, erratum #4",
+ driver_data: OHCI_QUIRK_AMD756,
} , {
int irq;
int disabled; /* e.g. got a UE, we're hung */
atomic_t resume_count; /* defending against multiple resumes */
+ unsigned long flags; /* for HC bugs */
+#define OHCI_QUIRK_AMD756 0x01 /* erratum #4 */
struct ohci_regs * regs; /* OHCI controller's memory */
struct list_head ohci_hcd_list; /* list of all ohci_hcd */
* drivers/usb/usb.c
*
* (C) Copyright Linus Torvalds 1999
- * (C) Copyright Johannes Erdfelt 1999
+ * (C) Copyright Johannes Erdfelt 1999-2001
* (C) Copyright Andreas Gal 1999
* (C) Copyright Gregory P. Smith 1999
* (C) Copyright Deti Fliegl 1999 (new USB architecture)
* Think of this as a "USB library" rather than anything else.
* It should be considered a slave, with no callbacks. Callbacks
* are evil.
- *
- * $Id: usb.c,v 1.53 2000/01/14 16:19:09 acher Exp $
*/
#include <linux/config.h>
urb->bandwidth = 0;
}
+static void usb_bus_get(struct usb_bus *bus)
+{
+ atomic_inc(&bus->refcnt);
+}
+
+static void usb_bus_put(struct usb_bus *bus)
+{
+ if (atomic_dec_and_test(&bus->refcnt))
+ kfree(bus);
+}
+
/**
* usb_alloc_bus - creates a new USB host controller structure
* @op: pointer to a struct usb_operations that this bus structure should use
INIT_LIST_HEAD(&bus->bus_list);
INIT_LIST_HEAD(&bus->inodes);
+ atomic_set(&bus->refcnt, 1);
+
return bus;
}
if (!bus)
return;
- kfree(bus);
+ usb_bus_put(bus);
}
/**
} else
warn("too many buses");
+ usb_bus_get(bus);
+
/* Add it to the list of buses */
list_add(&bus->bus_list, &usb_bus_list);
usbdevfs_remove_bus(bus);
clear_bit(bus->busnum, busmap.busmap);
+
+ usb_bus_put(bus);
}
/*
memset(dev, 0, sizeof(*dev));
+ usb_bus_get(bus);
+
dev->bus = bus;
dev->parent = parent;
atomic_set(&dev->refcnt, 1);
if (atomic_dec_and_test(&dev->refcnt)) {
dev->bus->op->deallocate(dev);
usb_destroy_configuration(dev);
+
+ usb_bus_put(dev->bus);
+
kfree(dev);
}
}
#ifdef CONFIG_FB_SIS
{ "sisfb", sisfb_init, sisfb_setup },
#endif
-#ifdef CONFIG_FB_E1355
- { "e1355fb", e1355fb_init, e1355fb_setup },
-#endif
/*
* Generic drivers that are used as fallbacks
#ifdef CONFIG_FB_HIT
{ "hitfb", hitfb_init, NULL },
#endif
+#ifdef CONFIG_FB_E1355
+ { "e1355fb", e1355fb_init, e1355fb_setup },
+#endif
#ifdef CONFIG_FB_DC
{ "dcfb", dcfb_init, NULL },
#endif
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/locks.h>
+#include <linux/bitops.h>
#include <linux/amigaffs.h>
-#include <asm/bitops.h>
-
/* This is, of course, shamelessly stolen from fs/minix */
static int nibblemap[] = { 0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4 };
goto out;
}
- error = inode_setattr(inode, attr);
+ inode_setattr(inode, attr);
if (!error && (attr->ia_valid & ATTR_MODE))
mode_to_prot(inode);
out:
u32 ino = bh->b_blocknr;
/* store the real header ino in d_fsdata for faster lookups */
- dentry->d_fsdata = (void *)ino;
+ dentry->d_fsdata = (void *)(long)ino;
switch (be32_to_cpu(AFFS_TAIL(sb, bh)->stype)) {
case ST_LINKDIR:
case ST_LINKFILE:
/*
* Cache operations for Coda.
* For Linux 2.1: (C) 1997 Carnegie Mellon University
+ * For Linux 2.3: (C) 2000 Carnegie Mellon University
*
* Carnegie Mellon encourages users of this code to contribute improvements
- * to the Coda project. Contact Peter Braam <coda@cs.cmu.edu>.
+ * to the Coda project http://www.coda.cs.cmu.edu/ <coda@cs.cmu.edu>.
*/
#include <linux/types.h>
list_for_each(tmp, &sbi->sbi_cihead)
{
cii = list_entry(tmp, struct coda_inode_info, c_cilist);
- if ( cii->c_magic != CODA_CNODE_MAGIC ) BUG();
-
if (!cred || coda_cred_eq(cred, &cii->c_cached_cred))
cii->c_cached_perm = 0;
}
flag is C_VATTR
*/
-/*
- Some of this is pretty scary: what can disappear underneath us?
- - shrink_dcache_parent calls on purge_one_dentry which is safe:
- it only purges children.
- - dput is evil since it may recurse up the dentry tree
- */
-
-void coda_purge_dentries(struct inode *inode)
-{
- if (!inode)
- return ;
-
- /* better safe than sorry: dput could kill us */
- iget(inode->i_sb, inode->i_ino);
- /* catch the dentries later if some are still busy */
- coda_flag_inode(inode, C_PURGE);
- d_prune_aliases(inode);
- iput(inode);
-}
-
/* this won't do any harm: just flag all children */
static void coda_flag_children(struct dentry *parent, int flag)
{
extern int coda_debug;
extern int coda_print_entry;
-static ViceFid NullFID = { 0, 0, 0 };
-
inline int coda_fideq(ViceFid *fid1, ViceFid *fid2)
{
if (fid1->Vnode != fid2->Vnode) return 0;
return 1;
}
+inline int coda_isnullfid(ViceFid *fid)
+{
+ if (fid->Vnode || fid->Volume || fid->Unique) return 0;
+ return 1;
+}
+
static int coda_inocmp(struct inode *inode, unsigned long ino, void *opaque)
{
return (coda_fideq((ViceFid *)opaque, &(ITOC(inode)->c_fid)));
{
struct inode *inode;
struct coda_inode_info *cii;
- ino_t ino = attr->va_fileid;
+ ino_t ino = coda_f2i(fid);
inode = iget4(sb, ino, coda_inocmp, fid);
/* check if the inode is already initialized */
cii = ITOC(inode);
- if (coda_fideq(&cii->c_fid, &NullFID)) {
+ if (coda_isnullfid(&cii->c_fid))
/* new, empty inode found... initializing */
- cii->c_fid = *fid;
- cii->c_vnode = inode;
- }
+ cii->c_fid = *fid;
/* we shouldnt see inode collisions anymore */
- if ( !coda_fideq(fid, &cii->c_fid) ) BUG();
+ if (!coda_fideq(fid, &cii->c_fid)) BUG();
/* always replace the attributes, type might have changed */
coda_fill_inode(inode, attr);
CDEBUG(D_DOWNCALL, "Done making inode: ino %ld, count %d with %s\n",
(*inode)->i_ino, atomic_read(&(*inode)->i_count),
- coda_f2s(&(*inode)->u.coda_i.c_fid));
+ coda_f2s(&ITOC(*inode)->c_fid));
EXIT;
return 0;
}
cii = ITOC(inode);
- if ( ! coda_fideq(&cii->c_fid, oldfid) )
- printk("What? oldfid != cii->c_fid. Call 911.\n");
+ if (!coda_fideq(&cii->c_fid, oldfid))
+ BUG();
+ /* replace fid and rehash inode */
+ /* XXX we probably need to hold some lock here! */
+ remove_inode_hash(inode);
cii->c_fid = *newfid;
+ inode->i_ino = coda_f2i(newfid);
+ insert_inode_hash(inode);
}
-
-
-
-/* convert a fid to an inode. Mostly we can compute
- the inode number from the FID, but not for volume
- mount points: those are in a list */
+/* convert a fid to an inode. */
struct inode *coda_fid_to_inode(ViceFid *fid, struct super_block *sb)
{
ino_t nr;
CDEBUG(D_INODE, "%s\n", coda_f2s(fid));
-
- /* weird fids cannot be hashed, have to look for them the hard way */
- if ( coda_fid_is_weird(fid) ) {
- struct coda_sb_info *sbi = coda_sbp(sb);
- struct list_head *le;
-
- list_for_each(le, &sbi->sbi_cihead)
- {
- cii = list_entry(le, struct coda_inode_info, c_cilist);
- if ( cii->c_magic != CODA_CNODE_MAGIC ) BUG();
-
- CDEBUG(D_DOWNCALL, "iterating, now doing %s, ino %ld\n",
- coda_f2s(&cii->c_fid), cii->c_vnode->i_ino);
-
- if ( coda_fideq(&cii->c_fid, fid) ) {
- inode = cii->c_vnode;
- CDEBUG(D_INODE, "volume root, found %ld\n", inode->i_ino);
- iget4(sb, inode->i_ino, coda_inocmp, fid);
- return inode;
- }
- }
- return NULL;
- }
-
- /* fid is not weird: ino should be computable */
nr = coda_f2i(fid);
inode = iget4(sb, nr, coda_inocmp, fid);
if ( !inode ) {
cii = ITOC(inode);
- /* The inode might already be purged due to memory pressure */
- if ( coda_fideq(&cii->c_fid, &NullFID) ) {
+ /* The inode could already be purged due to memory pressure */
+ if (coda_isnullfid(&cii->c_fid)) {
+ inode->i_nlink = 0;
iput(inode);
return NULL;
}
- /* we shouldn't have inode collisions anymore */
+ /* we shouldn't see inode collisions anymore */
if ( !coda_fideq(fid, &cii->c_fid) ) BUG();
CDEBUG(D_INODE, "found %ld\n", inode->i_ino);
#include <linux/coda_fs_i.h>
/* initialize the debugging variables */
-int coda_debug = 0;
-int coda_print_entry = 0;
+int coda_debug;
+int coda_print_entry;
int coda_access_cache = 1;
+int coda_fake_statfs;
/* print a fid */
char * coda_f2s(ViceFid *f)
/* recognize special .CONTROL name */
int coda_iscontrol(const char *name, size_t length)
{
- if ((CODA_CONTROLLEN == length) &&
- (strncmp(name, CODA_CONTROL, CODA_CONTROLLEN) == 0))
- return 1;
- return 0;
+ return ((CODA_CONTROLLEN == length) &&
+ (strncmp(name, CODA_CONTROL, CODA_CONTROLLEN) == 0));
}
/* recognize /coda inode */
int coda_isroot(struct inode *i)
{
- if ( i->i_sb->s_root->d_inode == i ) {
- return 1;
- } else {
- return 0;
- }
+ return ( i->i_sb->s_root->d_inode == i );
}
-int coda_fid_is_weird(struct ViceFid *fid)
-{
- /* volume roots */
- if ( (fid->Vnode == 1) && (fid->Unique == 1 ) )
- return 1;
- /* tmpfid unique (simulate.cc) */
- if ( fid->Unique == 0xffffffff )
- return 1;
- /* LocalFakeVnode (local.h) */
- if ( fid->Vnode == 0xfffffffd )
- return 1;
- /* LocalFileVnode (venus.private.h) */
- if ( fid->Vnode == 0xfffffffe )
- return 1;
- /* local fake vid (local.h) */
- if ( fid->Volume == 0xffffffff )
- return 1;
- /* local DirVnode (venus.private.h) */
- if ( fid->Vnode == 0xffffffff )
- return 1;
- /* FakeVnode (venus.private.h) */
- if ( fid->Vnode == 0xfffffffc )
- return 1;
-
- return 0;
-}
-
-
-
/* put the current process credentials in the cred */
void coda_load_creds(struct coda_cred *cred)
{
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/fs.h>
+#include <linux/file.h>
#include <linux/stat.h>
#include <linux/errno.h>
#include <linux/locks.h>
static int coda_dentry_delete(struct dentry *);
/* support routines */
-static void coda_prepare_fakefile(struct inode *coda_inode,
- struct file *coda_file,
- struct inode *open_inode,
- struct file *open_file,
- struct dentry *open_dentry);
+static void coda_prepare_fakefile(struct file *coda_file,
+ struct dentry *open_dentry,
+ struct file *open_file);
static int coda_venus_readdir(struct file *filp, void *dirent,
filldir_t filldir);
int coda_fsync(struct file *, struct dentry *dentry, int datasync);
read: generic_read_dir,
readdir: coda_readdir,
open: coda_open,
+ flush: coda_flush,
release: coda_release,
fsync: coda_fsync,
};
error = coda_cnode_make(&res_inode, &resfid, dir->i_sb);
if (error) return ERR_PTR(error);
-
- /* make sure we drop unexpected weird fid's */
- if (coda_f2i(&resfid) != res_inode->i_ino &&
- !coda_fid_is_weird(&resfid))
- dropme = 1;
} else if (error != -ENOENT) {
CDEBUG(D_INODE, "error for %s(%*s)%d\n",
coda_i2s(dir), (int)length, name, error);
int coda_readdir(struct file *file, void *dirent, filldir_t filldir)
{
int result = 0;
- struct file open_file;
- struct dentry open_dentry;
- struct inode *inode=file->f_dentry->d_inode, *container;
+ struct dentry *cdentry;
+ struct inode *cinode, *inode = file->f_dentry->d_inode;
+ struct file *cfile, fakefile;
+ struct coda_inode_info *cii = ITOC(inode);
ENTRY;
coda_vfs_stat.readdir++;
- if ( inode->i_mapping == &inode->i_data ) {
- CDEBUG(D_FILE, "no container inode.\n");
- return -EIO;
- }
+ cfile = cii->c_container;
+ if (!cfile) BUG();
- container = inode->i_mapping->host;
+ cinode = cii->c_container->f_dentry->d_inode;
+ if ( S_ISREG(cinode->i_mode) ) {
+ /* Venus: we must read Venus dirents from the file */
+ cdentry = cii->c_container->f_dentry;
+ coda_prepare_fakefile(file, cdentry, &fakefile);
- coda_prepare_fakefile(inode, file, container, &open_file, &open_dentry);
+ result = coda_venus_readdir(&fakefile, dirent, filldir);
- if ( S_ISREG(container->i_mode) ) {
- /* Venus: we must read Venus dirents from the file */
- result = coda_venus_readdir(&open_file, dirent, filldir);
+ file->f_pos = fakefile.f_pos;
+ file->f_version = fakefile.f_version;
} else {
/* potemkin case: we are handed a directory inode */
- result = vfs_readdir(&open_file, filldir, dirent);
+ result = vfs_readdir(file, filldir, dirent);
}
- /* we only have to restore the file position (and f_version?) */
- file->f_pos = open_file.f_pos;
- file->f_version = open_file.f_version;
-
EXIT;
return result;
}
-/* grab the ext2 inode of the container file */
-static int coda_inode_grab(dev_t dev, ino_t ino, struct inode **ind)
-{
- struct super_block *sbptr;
-
- sbptr = get_super(dev);
-
- if ( !sbptr ) {
- printk("coda_inode_grab: coda_find_super returns NULL.\n");
- return -ENXIO;
- }
-
- *ind = NULL;
- *ind = iget(sbptr, ino);
-
- if ( *ind == NULL ) {
- printk("coda_inode_grab: iget(dev: %d, ino: %ld) "
- "returns NULL.\n", dev, (long)ino);
- return -ENOENT;
- }
- CDEBUG(D_FILE, "ino: %ld, ops at %p\n", (long)ino, (*ind)->i_op);
- return 0;
-}
-
-/* ask venus to cache the file and return the inode of the container file,
- put this inode pointer in the cnode for future read/writes */
-int coda_open(struct inode *i, struct file *f)
-{
- ino_t ino;
- dev_t dev;
- int error = 0;
- struct inode *cont_inode = NULL, *old_container;
- unsigned short flags = f->f_flags & (~O_EXCL);
- unsigned short coda_flags = coda_flags_to_cflags(flags);
- struct coda_cred *cred;
- struct coda_inode_info *cii;
-
- lock_kernel();
- ENTRY;
- coda_vfs_stat.open++;
-
- CDEBUG(D_SPECIAL, "OPEN inode number: %ld, count %d, flags %o.\n",
- f->f_dentry->d_inode->i_ino, atomic_read(&f->f_dentry->d_count), flags);
-
- error = venus_open(i->i_sb, coda_i2f(i), coda_flags, &ino, &dev);
- if (error) {
- CDEBUG(D_FILE, "venus: dev %d, inode %ld, out->result %d\n",
- dev, (long)ino, error);
- unlock_kernel();
- return error;
- }
-
- /* coda_upcall returns ino number of cached object, get inode */
- CDEBUG(D_FILE, "cache file dev %d, ino %ld\n", dev, (long)ino);
- error = coda_inode_grab(dev, ino, &cont_inode);
-
- if ( error || !cont_inode ){
- printk("coda_open: coda_inode_grab error %d.", error);
- if (cont_inode)
- iput(cont_inode);
- unlock_kernel();
- return error;
- }
-
- CODA_ALLOC(cred, struct coda_cred *, sizeof(*cred));
- coda_load_creds(cred);
- f->private_data = cred;
-
- if ( i->i_mapping != &i->i_data ) {
- old_container = i->i_mapping->host;
- i->i_mapping = &i->i_data;
- iput(old_container);
- }
- i->i_mapping = cont_inode->i_mapping;
-
- cii = ITOC(i);
- cii->c_contcount++;
-
- CDEBUG(D_FILE, "result %d, coda i->i_count is %d, cii->contcount is %d for ino %ld\n",
- error, atomic_read(&i->i_count), cii->c_contcount, i->i_ino);
- CDEBUG(D_FILE, "cache ino: %ld, count %d, ops %p\n",
- cont_inode->i_ino, atomic_read(&cont_inode->i_count),
- cont_inode->i_op);
- EXIT;
- unlock_kernel();
- return 0;
-}
-
-int coda_release(struct inode *i, struct file *f)
-{
- struct inode *container = NULL;
- int error = 0;
- unsigned short flags = (f->f_flags) & (~O_EXCL);
- unsigned short cflags = coda_flags_to_cflags(flags);
- struct coda_cred *cred;
- struct coda_inode_info *cii;
-
- lock_kernel();
- ENTRY;
- coda_vfs_stat.release++;
-
- cred = (struct coda_cred *)f->private_data;
-
- if (i->i_mapping != &i->i_data)
- container = i->i_mapping->host;
-
- cii = ITOC(i);
- CDEBUG(D_FILE, "RELEASE coda (ino %ld, ct %d, cc %d) cache (ino %ld, ct %d)\n",
- i->i_ino, atomic_read(&i->i_count), cii->c_contcount,
- (container ? container->i_ino : 0),
- (container ? atomic_read(&container->i_count) : -99));
-
- if (--cii->c_contcount == 0 && container) {
- i->i_mapping = &i->i_data;
- iput(container);
- }
-
- error = venus_release(i->i_sb, coda_i2f(i), cflags, cred);
-
- f->private_data = NULL;
- if (cred)
- CODA_FREE(cred, sizeof(*cred));
-
- CDEBUG(D_FILE, "coda_release: result: %d\n", error);
- unlock_kernel();
- return error;
-}
-
/* support routines */
-/* instantiate a fake file and dentry to pass to coda_venus_readdir */
-static void coda_prepare_fakefile(struct inode *i, struct file *coda_file,
- struct inode *cont_inode,
- struct file *cont_file,
- struct dentry *cont_dentry)
+/* instantiate a fake file to pass to coda_venus_readdir */
+static void coda_prepare_fakefile(struct file *coda_file,
+ struct dentry *cont_dentry,
+ struct file *fake_file)
{
- cont_file->f_dentry = cont_dentry;
- cont_file->f_dentry->d_inode = cont_inode;
- cont_file->f_pos = coda_file->f_pos;
- cont_file->f_version = coda_file->f_version;
- cont_file->f_op = cont_inode->i_fop;
+ fake_file->f_dentry = cont_dentry;
+ fake_file->f_pos = coda_file->f_pos;
+ fake_file->f_version = coda_file->f_version;
+ fake_file->f_op = cont_dentry->d_inode->i_fop;
return ;
}
}
pos += (unsigned int) vdirent->d_reclen;
i++;
- }
+ }
- if ( i >= 1024 ) {
- printk("Repeating too much in readdir %ld\n",
- filp->f_dentry->d_inode->i_ino);
- result = -EINVAL;
- }
+ if ( i >= 1024 ) {
+ printk("Repeating too much in readdir %ld\n",
+ filp->f_dentry->d_inode->i_ino);
+ result = -EINVAL;
+ }
exit:
CODA_FREE(buff, DIR_BUFSIZE);
goto bad;
cii = ITOC(de->d_inode);
- if (! (cii->c_flags & (C_PURGE | C_FLUSH)) )
+ if (cii->c_flags & (C_PURGE | C_FLUSH))
goto out;
shrink_dcache_parent(de);
int error = 0;
int old_mode;
ino_t old_ino;
- struct inode *inode = dentry->d_inode, *container;
+ struct inode *inode = dentry->d_inode;
struct coda_inode_info *cii = ITOC(inode);
ENTRY;
dentry->d_parent->d_name.len, dentry->d_parent->d_name.name);
lock_kernel();
- if ( cii->c_flags == 0 )
+ if ( !cii->c_flags )
goto ok;
if (cii->c_flags & (C_VATTR | C_PURGE | C_FLUSH)) {
return 0;
return_bad_inode:
- if ( inode->i_mapping != &inode->i_data ) {
- container = inode->i_mapping->host;
- inode->i_mapping = &inode->i_data;
- iput(container);
+ inode->i_mapping = &inode->i_data;
+ if (cii->c_container) {
+ fput(cii->c_container);
+ cii->c_container = NULL;
}
make_bad_inode(inode);
unlock_kernel();
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
+#include <linux/file.h>
#include <linux/fs.h>
#include <linux/stat.h>
#include <linux/errno.h>
#include <linux/coda_psdev.h>
#include <linux/coda_proc.h>
+/* if CODA_STORE fails with EOPNOTSUPP, venus clearly doesn't support
+ * CODA_STORE/CODA_RELEASE and we fall back on using the CODA_CLOSE upcall */
+int use_coda_close;
+
static ssize_t
coda_file_write(struct file *file,const char *buf,size_t count,loff_t *ppos)
{
- struct inode *inode = file->f_dentry->d_inode;
- struct inode *container = inode->i_mapping->host;
+ struct file *cfile;
+ struct inode *cinode, *inode = file->f_dentry->d_inode;
+ struct coda_inode_info *cii = ITOC(inode);
ssize_t n;
- down(&container->i_sem);
+ cfile = cii->c_container;
+ if (!cfile) BUG();
+
+ if (!cfile->f_op || cfile->f_op->write != generic_file_write)
+ BUG();
+
+ cinode = cfile->f_dentry->d_inode;
+ down(&cinode->i_sem);
n = generic_file_write(file, buf, count, ppos);
- inode->i_size = container->i_size;
+ inode->i_size = cinode->i_size;
- up(&container->i_sem);
+ up(&cinode->i_sem);
return n;
}
-/* exported from this file (used for dirs) */
-int coda_fsync(struct file *coda_file, struct dentry *coda_dentry, int datasync)
+int coda_open(struct inode *i, struct file *f)
{
- struct inode *inode = coda_dentry->d_inode;
- struct dentry cont_dentry;
- int result = 0;
+ struct file *fh = NULL;
+ int error = 0;
+ unsigned short flags = f->f_flags & (~O_EXCL);
+ unsigned short coda_flags = coda_flags_to_cflags(flags);
+ struct coda_cred *cred;
+ struct coda_inode_info *cii;
+
+ lock_kernel();
+ ENTRY;
+ coda_vfs_stat.open++;
+
+ CDEBUG(D_SPECIAL, "OPEN inode number: %ld, count %d, flags %o.\n",
+ f->f_dentry->d_inode->i_ino, atomic_read(&f->f_dentry->d_count), flags);
+
+ error = venus_open(i->i_sb, coda_i2f(i), coda_flags, &fh);
+ if (error || !fh) {
+ CDEBUG(D_FILE, "coda_open: venus_open result %d\n", error);
+ unlock_kernel();
+ return error;
+ }
+
+ /* coda_upcall returns filehandle of container file object */
+ cii = ITOC(i);
+ if (cii->c_container)
+ fput(cii->c_container);
+
+ cii->c_contcount++;
+ cii->c_container = fh;
+ i->i_mapping = &cii->c_container->f_dentry->d_inode->i_data;
+
+ cred = kmalloc(sizeof(struct coda_cred), GFP_KERNEL);
+
+ /* If the allocation failed, we'll just muddle on. This actually works
+ * fine for normal cases. (i.e. when open credentials are the same as
+ * close credentials) */
+ if (cred) {
+ coda_load_creds(cred);
+ f->private_data = cred;
+ }
+
+ CDEBUG(D_FILE, "result %d, coda i->i_count is %d, cii->contcount is %d for ino %ld\n",
+ error, atomic_read(&i->i_count), cii->c_contcount, i->i_ino);
+ CDEBUG(D_FILE, "cache ino: %ld, count %d, ops %p\n",
+ fh->f_dentry->d_inode->i_ino,
+ atomic_read(&fh->f_dentry->d_inode->i_count),
+ fh->f_dentry->d_inode->i_op);
+ EXIT;
+ unlock_kernel();
+ return 0;
+}
+
+
+int coda_flush(struct file *file)
+{
+ unsigned short flags = (file->f_flags) & (~O_EXCL);
+ unsigned short cflags;
+ struct coda_inode_info *cii;
+ struct file *cfile;
+ struct inode *cinode, *inode;
+ int err = 0, fcnt;
+
+ ENTRY;
+ coda_vfs_stat.flush++;
+
+ /* No need to make an upcall when we have not made any modifications
+ * to the file */
+ if ((file->f_flags & O_ACCMODE) == O_RDONLY)
+ return 0;
+
+ if (use_coda_close)
+ return 0;
+
+ fcnt = file_count(file);
+ if (fcnt > 1) return 0;
+
+ cflags = coda_flags_to_cflags(flags);
+
+ inode = file->f_dentry->d_inode;
+ cii = ITOC(inode);
+ cfile = cii->c_container;
+ if (!cfile) BUG();
+
+ cinode = cfile->f_dentry->d_inode;
+
+ CDEBUG(D_FILE, "FLUSH coda (file %p ct %d)\n", file, fcnt);
+
+ err = venus_store(inode->i_sb, coda_i2f(inode), cflags,
+ (struct coda_cred *)file->private_data);
+ if (err == -EOPNOTSUPP) {
+ use_coda_close = 1;
+ err = 0;
+ }
+
+ CDEBUG(D_FILE, "coda_flush: result: %d\n", err);
+ return err;
+}
+
+int coda_release(struct inode *i, struct file *f)
+{
+ unsigned short flags = (f->f_flags) & (~O_EXCL);
+ unsigned short cflags = coda_flags_to_cflags(flags);
+ struct coda_inode_info *cii;
+ struct file *cfile;
+ int err = 0;
+
+ lock_kernel();
+ ENTRY;
+ coda_vfs_stat.release++;
+
+ if (!use_coda_close) {
+ err = venus_release(i->i_sb, coda_i2f(i), cflags);
+ if (err == -EOPNOTSUPP) {
+ use_coda_close = 1;
+ err = 0;
+ }
+ }
+
+ if (use_coda_close)
+ err = venus_close(i->i_sb, coda_i2f(i), cflags,
+ (struct coda_cred *)f->private_data);
+
+ cii = ITOC(i);
+ cfile = cii->c_container;
+ if (!cfile) BUG();
+
+ if (--cii->c_contcount) {
+ unlock_kernel();
+ return err;
+ }
+
+ i->i_mapping = &i->i_data;
+ fput(cfile);
+ cii->c_container = NULL;
+
+ if (f->private_data) {
+ kfree(f->private_data);
+ f->private_data = NULL;
+ }
+
+ unlock_kernel();
+ return err;
+}
+
+int coda_fsync(struct file *file, struct dentry *dentry, int datasync)
+{
+ struct file *cfile;
+ struct dentry *cdentry;
+ struct inode *cinode, *inode = dentry->d_inode;
+ struct coda_inode_info *cii = ITOC(inode);
+ int err = 0;
ENTRY;
- coda_vfs_stat.fsync++;
if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
S_ISLNK(inode->i_mode)))
return -EINVAL;
- if ( inode->i_mapping == &inode->i_data ) {
- printk("coda_fsync: no container inode!\n");
- return -1;
- }
+ cfile = cii->c_container;
+ if (!cfile) BUG();
- cont_dentry.d_inode = inode->i_mapping->host;
-
- down(&cont_dentry.d_inode->i_sem);
- result = file_fsync(NULL, &cont_dentry, datasync);
- up(&cont_dentry.d_inode->i_sem);
+ coda_vfs_stat.fsync++;
+
+ if (cfile->f_op && cfile->f_op->fsync) {
+ cdentry = cfile->f_dentry;
+ cinode = cdentry->d_inode;
+ down(&cinode->i_sem);
+ err = cfile->f_op->fsync(cfile, cdentry, datasync);
+ up(&cinode->i_sem);
+ }
- if ( result == 0 && datasync == 0 ) {
+ if ( !err && !datasync ) {
lock_kernel();
- result = venus_fsync(inode->i_sb, coda_i2f(inode));
+ err = venus_fsync(inode->i_sb, coda_i2f(inode));
unlock_kernel();
}
- return result;
+ return err;
}
struct file_operations coda_file_operations = {
write: coda_file_write,
mmap: generic_file_mmap,
open: coda_open,
+ flush: coda_flush,
release: coda_release,
fsync: coda_fsync,
};
if(vc)
vc->vc_sb = NULL;
}
- if (root) {
+ if (root)
iput(root);
- }
+
return NULL;
}
if (!sbi) BUG();
+#if 0
+ /* check if the inode is already initialized */
+ if (inode->u.generic_ip) {
+ printk("coda_read_inode: initialized inode");
+ return;
+ }
+
+ inode->u.generic_ip = cii_alloc();
+ if (!inode->u.generic_ip) {
+ CDEBUG(D_CNODE, "coda_read_inode: failed to allocate inode info\n");
+ make_bad_inode(inode);
+ return;
+ }
+ memset(inode->u.generic_ip, 0, sizeof(struct coda_inode_info));
+#endif
+
cii = ITOC(inode);
- if (cii->c_magic == CODA_CNODE_MAGIC) {
+ if (!coda_isnullfid(&cii->c_fid)) {
printk("coda_read_inode: initialized inode");
return;
}
- memset(cii, 0, sizeof(struct coda_inode_info));
list_add(&cii->c_cilist, &sbi->sbi_cihead);
- cii->c_magic = CODA_CNODE_MAGIC;
}
static void coda_clear_inode(struct inode *inode)
{
struct coda_inode_info *cii = ITOC(inode);
- struct inode *open_inode;
ENTRY;
CDEBUG(D_SUPER, " inode->ino: %ld, count: %d\n",
inode->i_ino, atomic_read(&inode->i_count));
+ CDEBUG(D_DOWNCALL, "clearing inode: %ld, %x\n", inode->i_ino, cii->c_flags);
- if ( cii->c_magic != CODA_CNODE_MAGIC )
- return;
-
+ if (cii->c_container) BUG();
+
list_del_init(&cii->c_cilist);
+ inode->i_mapping = &inode->i_data;
+ coda_cache_clear_inode(inode);
- if ( inode->i_ino == CTL_INO )
- goto out;
+#if 0
+ cii_free(inode->u.generic_ip);
+ inode->u.generic_ip = NULL;
+#endif
- if ( inode->i_mapping != &inode->i_data ) {
- open_inode = inode->i_mapping->host;
- CDEBUG(D_SUPER, "DELINO cached file: ino %ld count %d.\n",
- open_inode->i_ino, atomic_read(&open_inode->i_count));
- inode->i_mapping = &inode->i_data;
- iput(open_inode);
- }
-
- CDEBUG(D_DOWNCALL, "clearing inode: %ld, %x\n", inode->i_ino, cii->c_flags);
- coda_cache_clear_inode(inode);
-out:
- inode->u.coda_i.c_magic = 0;
- memset(&inode->u.coda_i.c_fid, 0, sizeof(struct ViceFid));
EXIT;
}
#include <linux/devfs_fs_kernel.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
+#include <linux/file.h>
#include <linux/poll.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/coda_psdev.h>
#include <linux/coda_proc.h>
+#define upc_free(r) kfree(r)
+
/*
* Coda stuff
*/
struct venus_comm coda_comms[MAX_CODADEVS];
+kmem_cache_t *cii_cache, *cred_cache, *upc_cache;
/*
* Device operations
}
count = nbytes;
goto out;
- }
+ }
- /* Look for the message on the processing queue. */
- lock_kernel();
- lh = &vcp->vc_processing;
- while ( (lh = lh->next) != &vcp->vc_processing ) {
+ /* Look for the message on the processing queue. */
+ lock_kernel();
+ list_for_each(lh, &vcp->vc_processing) {
tmp = list_entry(lh, struct upc_req , uc_chain);
- if (tmp->uc_unique == hdr.unique) {
+ if (tmp->uc_unique == hdr.unique) {
req = tmp;
list_del(&req->uc_chain);
- CDEBUG(D_PSDEV,"Eureka: uniq %ld on queue!\n",
- hdr.unique);
break;
}
}
- unlock_kernel();
+ unlock_kernel();
- if (!req) {
- printk("psdev_write: msg (%ld, %ld) not found\n",
- hdr.opcode, hdr.unique);
+ if (!req) {
+ printk("psdev_write: msg (%ld, %ld) not found\n",
+ hdr.opcode, hdr.unique);
retval = -ESRCH;
goto out;
- }
+ }
+
+ CDEBUG(D_PSDEV,"Eureka: uniq %ld on queue!\n", hdr.unique);
/* move data into response buffer. */
if (req->uc_outSize < nbytes) {
req->uc_flags |= REQ_WRITE;
count = nbytes;
+ /* Convert filedescriptor into a file handle */
+ if (req->uc_opcode == CODA_OPEN_BY_FD) {
+ struct coda_open_by_fd_out *outp =
+ (struct coda_open_by_fd_out *)req->uc_data;
+ outp->fh = fget(outp->fd);
+ }
+
CDEBUG(D_PSDEV,
"Found! Count %ld for (opc,uniq)=(%ld,%ld), upc_req at %p\n",
(long)count, hdr.opcode, hdr.unique, &req);
count = nbytes;
}
- if (copy_to_user(buf, req->uc_data, count)) {
+ if (copy_to_user(buf, req->uc_data, count))
retval = -EFAULT;
- goto free_out;
- }
/* If request was not a signal, enqueue and don't free */
- if (req->uc_opcode != CODA_SIGNAL) {
+ if (!(req->uc_flags & REQ_ASYNC)) {
req->uc_flags |= REQ_READ;
list_add(&(req->uc_chain), vcp->vc_processing.prev);
goto out;
CDEBUG(D_PSDEV, "vcread: signal msg (%d, %d)\n",
req->uc_opcode, req->uc_unique);
-free_out:
CODA_FREE(req->uc_data, sizeof(struct coda_in_hdr));
- CODA_FREE(req, sizeof(struct upc_req));
+ upc_free(req);
out:
unlock_kernel();
return (count ? count : retval);
lock_kernel();
idx = MINOR(inode->i_rdev);
- if(idx >= MAX_CODADEVS)
+ if(idx >= MAX_CODADEVS) {
+ unlock_kernel();
return -ENODEV;
+ }
vcp = &coda_comms[idx];
- if(vcp->vc_inuse)
+ if(vcp->vc_inuse) {
+ unlock_kernel();
return -EBUSY;
+ }
if (!vcp->vc_inuse++) {
INIT_LIST_HEAD(&vcp->vc_pending);
/* Async requests need to be freed here */
if (req->uc_flags & REQ_ASYNC) {
CODA_FREE(req->uc_data, sizeof(struct coda_in_hdr));
- CODA_FREE(req, (u_int)sizeof(struct upc_req));
+ upc_free(req);
continue;
}
req->uc_flags |= REQ_ABORT;
static int __init init_coda(void)
{
int status;
- printk(KERN_INFO "Coda Kernel/Venus communications, v5.3.9, coda@cs.cmu.edu\n");
+ printk(KERN_INFO "Coda Kernel/Venus communications, v5.3.14, coda@cs.cmu.edu\n");
-
status = init_coda_psdev();
if ( status ) {
printk("Problem (%d) in init_coda_psdev\n", status);
status = register_filesystem(&coda_fs_type);
if (status) {
- printk("coda: failed in init_coda_fs!\n");
+ printk("coda: failed to register filesystem!\n");
+ devfs_unregister(devfs_handle);
+ devfs_unregister_chrdev(CODA_PSDEV_MAJOR,"coda_psdev");
+ coda_sysctl_clean();
}
return status;
}
if ( err != 0 ) {
printk("coda: failed to unregister filesystem\n");
}
- devfs_unregister (devfs_handle);
- devfs_unregister_chrdev(CODA_PSDEV_MAJOR,"coda_psdev");
+ devfs_unregister(devfs_handle);
+ devfs_unregister_chrdev(CODA_PSDEV_MAJOR, "coda_psdev");
coda_sysctl_clean();
}
module_init(init_coda);
module_exit(exit_coda);
+
{
struct inode *inode = page->mapping->host;
int error;
- struct coda_inode_info *cnp;
+ struct coda_inode_info *cii;
unsigned int len = PAGE_SIZE;
char *p = kmap(page);
lock_kernel();
- cnp = ITOC(inode);
+ cii = ITOC(inode);
coda_vfs_stat.follow_link++;
- error = venus_readlink(inode->i_sb, &(cnp->c_fid), p, &len);
+ error = venus_readlink(inode->i_sb, &cii->c_fid, p, &len);
unlock_kernel();
if (error)
goto fail;
#define CODA_UPCALL 7 /* upcall statistics */
#define CODA_PERMISSION 8 /* permission statistics */
#define CODA_CACHE_INV 9 /* cache invalidation statistics */
+#define CODA_FAKE_STATFS 10 /* don't query venus for actual cache usage */
static ctl_table coda_table[] = {
{CODA_DEBUG, "debug", &coda_debug, sizeof(int), 0644, NULL, &proc_dointvec},
{CODA_UPCALL, "upcall_stats", NULL, 0, 0644, NULL, &do_reset_coda_upcall_stats},
{CODA_PERMISSION, "permission_stats", NULL, 0, 0644, NULL, &do_reset_coda_permission_stats},
{CODA_CACHE_INV, "cache_inv_stats", NULL, 0, 0644, NULL, &do_reset_coda_cache_inv_stats},
+ {CODA_FAKE_STATFS, "fake_statfs", &coda_fake_statfs, sizeof(int), 0600, NULL, &proc_dointvec},
{ 0 }
};
"open_by_path", /* 31 */
"resolve ", /* 32 */
"reintegrate ", /* 33 */
- "statfs " /* 34 */
+ "statfs ", /* 34 */
+ "store ", /* 35 */
+ "release " /* 36 */
};
"===================\n\n"
"File Operations:\n"
"\topen\t\t%9d\n"
- "\trelase\t\t%9d\n"
+ "\tflush\t\t%9d\n"
+ "\trelease\t\t%9d\n"
"\tfsync\t\t%9d\n\n"
"Dir Operations:\n"
"\treaddir\t\t%9d\n\n"
/* file operations */
ps->open,
+ ps->flush,
ps->release,
ps->fsync,
#ifdef CONFIG_PROC_FS
proc_fs_coda = proc_mkdir("coda", proc_root_fs);
- proc_fs_coda->owner = THIS_MODULE;
- coda_proc_create("vfs_stats", coda_vfs_stats_get_info);
- coda_proc_create("upcall_stats", coda_upcall_stats_get_info);
- coda_proc_create("permission_stats", coda_permission_stats_get_info);
- coda_proc_create("cache_inv_stats", coda_cache_inv_stats_get_info);
+ if (proc_fs_coda) {
+ proc_fs_coda->owner = THIS_MODULE;
+ coda_proc_create("vfs_stats", coda_vfs_stats_get_info);
+ coda_proc_create("upcall_stats", coda_upcall_stats_get_info);
+ coda_proc_create("permission_stats", coda_permission_stats_get_info);
+ coda_proc_create("cache_inv_stats", coda_cache_inv_stats_get_info);
+ }
#endif
#ifdef CONFIG_SYSCTL
#include <linux/coda_cache.h>
#include <linux/coda_proc.h>
+#define upc_alloc() kmalloc(sizeof(struct upc_req), GFP_KERNEL)
+#define upc_free(r) kfree(r)
static int coda_upcall(struct coda_sb_info *mntinfo, int inSize, int *outSize,
union inputArgs *buffer);
return error;
}
-int venus_setattr(struct super_block *sb, struct ViceFid *fid,
- struct coda_vattr *vattr)
+int venus_setattr(struct super_block *sb, struct ViceFid *fid,
+ struct coda_vattr *vattr)
{
union inputArgs *inp;
union outputArgs *outp;
return error;
}
+int venus_store(struct super_block *sb, struct ViceFid *fid, int flags,
+ struct coda_cred *cred)
+{
+ union inputArgs *inp;
+ union outputArgs *outp;
+ int insize, outsize, error;
+
+ insize = SIZE(store);
+ UPARG(CODA_STORE);
+
+ if ( cred ) {
+ memcpy(&(inp->ih.cred), cred, sizeof(*cred));
+ } else
+ printk("CODA: store without valid file creds.\n");
+
+ inp->coda_store.VFid = *fid;
+ inp->coda_store.flags = flags;
-int venus_release(struct super_block *sb, struct ViceFid *fid, int flags,
- struct coda_cred *cred)
+ error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+
+ CODA_FREE(inp, insize);
+ return error;
+}
+
+int venus_release(struct super_block *sb, struct ViceFid *fid, int flags)
{
union inputArgs *inp;
union outputArgs *outp;
int insize, outsize, error;
- insize = SIZE(close);
+ insize = SIZE(release);
+ UPARG(CODA_RELEASE);
+
+ inp->coda_release.VFid = *fid;
+ inp->coda_release.flags = flags;
+
+ error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+
+ CODA_FREE(inp, insize);
+ return error;
+}
+
+int venus_close(struct super_block *sb, struct ViceFid *fid, int flags,
+ struct coda_cred *cred)
+{
+ union inputArgs *inp;
+ union outputArgs *outp;
+ int insize, outsize, error;
+
+ insize = SIZE(release);
UPARG(CODA_CLOSE);
if ( cred ) {
}
int venus_open(struct super_block *sb, struct ViceFid *fid,
- int flags, ino_t *ino, dev_t *dev)
+ int flags, struct file **fh)
{
union inputArgs *inp;
union outputArgs *outp;
int insize, outsize, error;
- insize = SIZE(open);
- UPARG(CODA_OPEN);
+ insize = SIZE(open_by_fd);
+ UPARG(CODA_OPEN_BY_FD);
inp->coda_open.VFid = *fid;
inp->coda_open.flags = flags;
error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
- *ino = outp->coda_open.inode;
- *dev = outp->coda_open.dev;
+ *fh = outp->coda_open_by_fd.fh;
CODA_FREE(inp, insize);
return error;
/* build packet for Venus */
if (data->vi.in_size > VC_MAXDATASIZE) {
- error = -EINVAL;
+ error = -EINVAL;
goto exit;
}
/* get the data out of user space */
if ( copy_from_user((char*)inp + (long)inp->coda_ioctl.data,
data->vi.in, data->vi.in_size) ) {
- error = -EINVAL;
+ error = -EINVAL;
goto exit;
}
CDEBUG(D_FILE, "return len %d <= request len %d\n",
outp->coda_ioctl.len,
data->vi.out_size);
- error = -EINVAL;
+ error = -EINVAL;
} else {
error = verify_area(VERIFY_WRITE, data->vi.out,
data->vi.out_size);
if (copy_to_user(data->vi.out,
(char *)outp + (long)outp->coda_ioctl.data,
data->vi.out_size)) {
- error = -EINVAL;
+ error = -EINVAL;
goto exit;
}
}
insize = max(INSIZE(statfs), OUTSIZE(statfs));
UPARG(CODA_STATFS);
- error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
+ error = coda_upcall(coda_sbp(sb), insize, &outsize, inp);
if (!error) {
sfs->f_blocks = outp->coda_statfs.stat.f_blocks;
}
/* Format the request message. */
- CODA_ALLOC(req,struct upc_req *,sizeof(struct upc_req));
+ req = upc_alloc();
+ if (!req) {
+ printk("Failed to allocate upc_req structure\n");
+ return -ENOMEM;
+ }
req->uc_data = (void *)buffer;
req->uc_flags = 0;
req->uc_inSize = inSize;
req->uc_opcode, req->uc_unique, req->uc_flags);
list_del(&(req->uc_chain));
- error = -EINTR;
- CODA_ALLOC(sig_req, struct upc_req *, sizeof (struct upc_req));
+ error = -ENOMEM;
+ sig_req = upc_alloc();
+ if (!sig_req) goto exit;
+
CODA_ALLOC((sig_req->uc_data), char *, sizeof(struct coda_in_hdr));
+ if (!sig_req->uc_data) {
+ upc_free(sig_req);
+ goto exit;
+ }
+ error = -EINTR;
sig_inputArgs = (union inputArgs *)sig_req->uc_data;
sig_inputArgs->ih.opcode = CODA_SIGNAL;
sig_inputArgs->ih.unique = req->uc_unique;
}
exit:
- CODA_FREE(req, sizeof(struct upc_req));
+ upc_free(req);
if (error)
badclstats();
return error;
clstats(CODA_PURGEFID);
inode = coda_fid_to_inode(fid, sb);
if ( inode ) {
- CDEBUG(D_DOWNCALL, "purgefid: inode = %ld\n",
- inode->i_ino);
- coda_flag_inode_children(inode, C_PURGE);
- coda_purge_dentries(inode);
- iput(inode);
+ CDEBUG(D_DOWNCALL, "purgefid: inode = %ld\n",
+ inode->i_ino);
+ coda_flag_inode_children(inode, C_PURGE);
+
+ /* catch the dentries later if some are still busy */
+ coda_flag_inode(inode, C_PURGE);
+ d_prune_aliases(inode);
+
+ iput(inode);
} else
- CDEBUG(D_DOWNCALL, "purgefid: no inode\n");
+ CDEBUG(D_DOWNCALL, "purgefid: no inode\n");
return 0;
}
static inline void write_inode(struct inode *inode, int sync)
{
- if (inode->i_sb && inode->i_sb->s_op && inode->i_sb->s_op->write_inode)
+ if (inode->i_sb && inode->i_sb->s_op && inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
inode->i_sb->s_op->write_inode(inode, sync);
}
if (server->namelen == 0 || server->namelen > maxlen)
server->namelen = maxlen;
- if(version > 2)
- sb->s_maxbytes = ~0ULL; /* Unlimited on NFSv3 */
+ sb->s_maxbytes = fsinfo.maxfilesize;
/* Fire up the writeback cache */
if (nfs_reqlist_alloc(server) < 0) {
if (sb->s_magic == 0x4d44 /* MSDOS_SUPER_MAGIC */) {
resp->f_properties = NFS3_FSF_BILLYBOY;
}
+ resp->f_maxfilesize = sb->s_maxbytes;
}
fh_put(&argp->fh);
char *opt, *val;
uopt->novrs = 0;
- uopt->blocksize = 512;
+ uopt->blocksize = 2048;
uopt->partition = 0xFFFF;
uopt->session = 0xFFFFFFFF;
uopt->lastblock = 0xFFFFFFFF;
__asm__("stw %1,%0" : "=m"(mem) : "r"(val))
#endif
-/* Somewhere in the middle of the GCC 2.96 development cycle, we implemented
- a mechanism by which the user can annotate likely branch directions and
- expect the blocks to be reordered appropriately. Define __builtin_expect
- to nothing for earlier compilers. */
-
-#if __GNUC__ == 2 && __GNUC_MINOR__ < 96
-#define __builtin_expect(x, expected_value) (x)
-#endif
-
#endif /* __ALPHA_COMPILER_H */
#define pte_free(pte) pte_free_fast(pte)
#define pmd_free(pmd) pmd_free_fast(pmd)
#define pgd_free(pgd) free_pgd_fast(pgd)
-#define pgd_alloc() get_pgd_fast()
+#define pgd_alloc(mm) get_pgd_fast()
extern int do_check_pgt_cache(int, int);
#include <asm/current.h>
#include <asm/system.h>
#include <asm/atomic.h>
-#include <asm/compiler.h> /* __builtin_expect */
+#include <linux/compiler.h>
#include <linux/wait.h>
#include <linux/rwsem.h>
/* pgd handling */
#define pgd_free(pgd) free_pgd_slow(pgd)
-#define pgd_alloc() get_pgd_fast()
+#define pgd_alloc(mm) get_pgd_fast()
/* other stuff */
#define pte_free(pte) pte_free_slow(pte)
#define pgd_free(pgd) free_pgd_slow(pgd)
-#define pgd_alloc() get_pgd_fast()
+#define pgd_alloc(mm) get_pgd_fast()
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
* Written by David Howells (dhowells@redhat.com).
*
* Derived from asm-i386/semaphore.h
+ *
+ *
+ * The MSW of the count is the negated number of active writers and waiting
+ * lockers, and the LSW is the total number of active locks
+ *
+ * The lock count is initialized to 0 (no active and no waiting lockers).
+ *
+ * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
+ * uncontended lock. This can be determined because XADD returns the old value.
+ * Readers increment by 1 and see a positive value when uncontended, negative
+ * if there are writers (and maybe) readers waiting (in which case it goes to
+ * sleep).
+ *
+ * The value of WAITING_BIAS supports up to 32766 waiting processes. This can
+ * be extended to 65534 by manually checking the whole MSW rather than relying
+ * on the S flag.
+ *
+ * The value of ACTIVE_BIAS supports up to 65535 active processes.
+ *
+ * This should be totally fair - if anything is waiting, a process that wants a
+ * lock will go to the back of the queue. When the currently active lock is
+ * released, if there's a writer at the front of the queue, then that and only
+ * that will be woken up; if there's a bunch of consequtive readers at the
+ * front, then they'll all be woken up, but no other readers will be.
*/
#ifndef _I386_RWSEM_H
#include <linux/list.h>
#include <linux/spinlock.h>
-/* we use FASTCALL convention for the helpers */
-extern struct rw_semaphore *FASTCALL(__rwsem_down_read_failed(struct rw_semaphore *sem));
-extern struct rw_semaphore *FASTCALL(__rwsem_down_write_failed(struct rw_semaphore *sem));
-extern struct rw_semaphore *FASTCALL(__rwsem_wake(struct rw_semaphore *sem));
-
struct rwsem_waiter;
+extern struct rw_semaphore *FASTCALL(rwsem_down_read_failed(struct rw_semaphore *sem));
+extern struct rw_semaphore *FASTCALL(rwsem_down_write_failed(struct rw_semaphore *sem));
+extern struct rw_semaphore *FASTCALL(rwsem_wake(struct rw_semaphore *));
+
/*
* the semaphore definition
*/
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
spinlock_t wait_lock;
- struct rwsem_waiter *wait_front;
- struct rwsem_waiter **wait_back;
+ struct list_head wait_list;
#if RWSEM_DEBUG
int debug;
#endif
-#if RWSEM_DEBUG_MAGIC
- long __magic;
- atomic_t readers;
- atomic_t writers;
-#endif
};
/*
#else
#define __RWSEM_DEBUG_INIT /* */
#endif
-#if RWSEM_DEBUG_MAGIC
-#define __RWSEM_DEBUG_MINIT(name) , (int)&(name).__magic, ATOMIC_INIT(0), ATOMIC_INIT(0)
-#else
-#define __RWSEM_DEBUG_MINIT(name) /* */
-#endif
#define __RWSEM_INITIALIZER(name) \
-{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, NULL, &(name).wait_front \
- __RWSEM_DEBUG_INIT __RWSEM_DEBUG_MINIT(name) }
+{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \
+ __RWSEM_DEBUG_INIT }
#define DECLARE_RWSEM(name) \
struct rw_semaphore name = __RWSEM_INITIALIZER(name)
{
sem->count = RWSEM_UNLOCKED_VALUE;
spin_lock_init(&sem->wait_lock);
- sem->wait_front = NULL;
- sem->wait_back = &sem->wait_front;
+ INIT_LIST_HEAD(&sem->wait_list);
#if RWSEM_DEBUG
sem->debug = 0;
#endif
-#if RWSEM_DEBUG_MAGIC
- sem->__magic = (long)&sem->__magic;
- atomic_set(&sem->readers, 0);
- atomic_set(&sem->writers, 0);
-#endif
}
/*
"1:\n\t"
".section .text.lock,\"ax\"\n"
"2:\n\t"
- " call __rwsem_down_read_failed\n\t"
+ " pushl %%ecx\n\t"
+ " pushl %%edx\n\t"
+ " call rwsem_down_read_failed\n\t"
+ " popl %%edx\n\t"
+ " popl %%ecx\n\t"
" jmp 1b\n"
".previous"
"# ending down_read\n\t"
- : "=m"(sem->count)
- : "a"(sem), "m"(sem->count)
- : "memory");
+ : "+m"(sem->count)
+ : "a"(sem)
+ : "memory", "cc");
}
/*
tmp = RWSEM_ACTIVE_WRITE_BIAS;
__asm__ __volatile__(
"# beginning down_write\n\t"
-LOCK_PREFIX " xadd %0,(%%eax)\n\t" /* subtract 0x00010001, returns the old value */
+LOCK_PREFIX " xadd %0,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */
" testl %0,%0\n\t" /* was the count 0 before? */
" jnz 2f\n\t" /* jump if we weren't granted the lock */
"1:\n\t"
".section .text.lock,\"ax\"\n"
"2:\n\t"
- " call __rwsem_down_write_failed\n\t"
+ " pushl %%ecx\n\t"
+ " call rwsem_down_write_failed\n\t"
+ " popl %%ecx\n\t"
" jmp 1b\n"
".previous\n"
"# ending down_write"
- : "+r"(tmp), "=m"(sem->count)
- : "a"(sem), "m"(sem->count)
- : "memory");
+ : "+d"(tmp), "+m"(sem->count)
+ : "a"(sem)
+ : "memory", "cc");
}
/*
*/
static inline void __up_read(struct rw_semaphore *sem)
{
- int tmp;
-
- tmp = -RWSEM_ACTIVE_READ_BIAS;
+ __s32 tmp = -RWSEM_ACTIVE_READ_BIAS;
__asm__ __volatile__(
"# beginning __up_read\n\t"
-LOCK_PREFIX " xadd %0,(%%eax)\n\t" /* subtracts 1, returns the old value */
+LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */
" js 2f\n\t" /* jump if the lock is being waited upon */
"1:\n\t"
".section .text.lock,\"ax\"\n"
"2:\n\t"
- " decl %0\n\t" /* xadd gave us the old count */
- " testl %3,%0\n\t" /* do nothing if still outstanding active readers */
+ " decw %%dx\n\t" /* do nothing if still outstanding active readers */
" jnz 1b\n\t"
- " call __rwsem_wake\n\t"
+ " pushl %%ecx\n\t"
+ " call rwsem_wake\n\t"
+ " popl %%ecx\n\t"
" jmp 1b\n"
".previous\n"
"# ending __up_read\n"
- : "+r"(tmp), "=m"(sem->count)
- : "a"(sem), "i"(RWSEM_ACTIVE_MASK), "m"(sem->count)
- : "memory");
+ : "+m"(sem->count), "+d"(tmp)
+ : "a"(sem)
+ : "memory", "cc");
}
/*
{
__asm__ __volatile__(
"# beginning __up_write\n\t"
-LOCK_PREFIX " addl %2,(%%eax)\n\t" /* adds 0x0000ffff */
- " js 2f\n\t" /* jump if the lock is being waited upon */
+ " movl %2,%%edx\n\t"
+LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */
+ " jnz 2f\n\t" /* jump if the lock is being waited upon */
"1:\n\t"
".section .text.lock,\"ax\"\n"
"2:\n\t"
- " call __rwsem_wake\n\t"
+ " decw %%dx\n\t" /* did the active count reduce to 0? */
+ " jnz 1b\n\t" /* jump back if not */
+ " pushl %%ecx\n\t"
+ " call rwsem_wake\n\t"
+ " popl %%ecx\n\t"
" jmp 1b\n"
".previous\n"
"# ending __up_write\n"
- : "=m"(sem->count)
- : "a"(sem), "i"(-RWSEM_ACTIVE_WRITE_BIAS), "m"(sem->count)
- : "memory");
+ : "+m"(sem->count)
+ : "a"(sem), "i"(-RWSEM_ACTIVE_WRITE_BIAS)
+ : "memory", "cc", "edx");
+}
+
+/*
+ * implement atomic add functionality
+ */
+static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
+{
+ __asm__ __volatile__(
+LOCK_PREFIX "addl %1,%0"
+ :"=m"(sem->count)
+ :"ir"(delta), "m"(sem->count));
}
/*
int tmp = delta;
__asm__ __volatile__(
- LOCK_PREFIX "xadd %0,(%1)"
- : "+r"(tmp)
- : "r"(sem)
+LOCK_PREFIX "xadd %0,(%2)"
+ : "+r"(tmp), "=m"(sem->count)
+ : "r"(sem), "m"(sem->count)
: "memory");
return tmp+delta;
}
-/*
- * implement compare and exchange functionality on the rw-semaphore count LSW
- */
-static inline __u16 rwsem_cmpxchgw(struct rw_semaphore *sem, __u16 old, __u16 new)
-{
- return cmpxchg((__u16*)&sem->count,0,RWSEM_ACTIVE_BIAS);
-}
-
#endif /* __KERNEL__ */
#endif /* _I386_RWSEM_H */
}
static inline pgd_t*
-pgd_alloc (void)
+pgd_alloc (struct mm_struct *mm)
{
/* the VM system never calls pgd_alloc_one_fast(), so we do it here. */
pgd_t *pgd = pgd_alloc_one_fast();
free_pmd_fast((pmd_t *)pgd);
}
-extern inline pgd_t *pgd_alloc(void)
+extern inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd = (pgd_t *)get_pmd_fast();
if (!pgd)
free_page((unsigned long) pgd);
}
-extern inline pgd_t * pgd_alloc(void)
+extern inline pgd_t * pgd_alloc(struct mm_struct *mm)
{
pgd_t *new_pgd;
#define pte_free_kernel(pte) free_pte_fast(pte)
#define pte_free(pte) free_pte_fast(pte)
#define pgd_free(pgd) free_pgd_fast(pgd)
-#define pgd_alloc() get_pgd_fast()
+#define pgd_alloc(mm) get_pgd_fast()
extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
{
#define pte_free(pte) free_pte_fast(pte)
#define pmd_free(pte) free_pmd_fast(pte)
#define pgd_free(pgd) free_pgd_fast(pgd)
-#define pgd_alloc() get_pgd_fast()
+#define pgd_alloc(mm) get_pgd_fast()
extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
{
#define pte_free(pte) free_pte_fast(pte)
#define pmd_free(pmd) free_pmd_fast(pmd)
#define pgd_free(pgd) free_pgd_fast(pgd)
-#define pgd_alloc() get_pgd_fast()
+#define pgd_alloc(mm) get_pgd_fast()
extern void __bad_pmd(pmd_t *pmd);
}
#define pgd_free(pgd) free_pgd_fast(pgd)
-#define pgd_alloc() get_pgd_fast()
+#define pgd_alloc(mm) get_pgd_fast()
/*
* We don't have any real pmd's, and this code never triggers because
return (pgd_t *)ret;
}
-extern __inline__ pgd_t *pgd_alloc(void)
+extern __inline__ pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd;
return (pgd_t *) ret;
}
-extern __inline__ pgd_t *pgd_alloc (void)
+extern __inline__ pgd_t *pgd_alloc (struct mm_struct *mm)
{
pgd_t *pgd;
#define pte_free(pte) pte_free_slow(pte)
#define pgd_free(pgd) free_pgd_slow(pgd)
-#define pgd_alloc() get_pgd_fast()
+#define pgd_alloc(mm) get_pgd_fast()
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
BTFIXUPDEF_CALL(pgd_t *, pgd_alloc, void)
#define pgd_free(pgd) BTFIXUP_CALL(pgd_free)(pgd)
-#define pgd_alloc() BTFIXUP_CALL(pgd_alloc)()
+#define pgd_alloc(mm) BTFIXUP_CALL(pgd_alloc)()
#endif /* _SPARC64_PGALLOC_H */
#define pte_free(pte) free_pte_fast(pte)
#define pmd_free(pmd) free_pmd_fast(pmd)
#define pgd_free(pgd) free_pgd_fast(pgd)
-#define pgd_alloc() get_pgd_fast()
+#define pgd_alloc(mm) get_pgd_fast()
extern int do_check_pgt_cache(int, int);
* rwsem.h: R/W semaphores implemented using CAS
*
* Written by David S. Miller (davem@redhat.com), 2001.
- * Derived from asm-i386/rwsem-xadd.h
+ * Derived from asm-i386/rwsem.h
*/
#ifndef _SPARC64_RWSEM_H
#define _SPARC64_RWSEM_H
"save %%sp, -160, %%sp\n\t"
"mov %%g2, %%l2\n\t"
"mov %%g3, %%l3\n\t"
+ " mov %%g7, %%o0\n\t"
"call %1\n\t"
- " mov %%g5, %%o0\n\t"
+ " mov %%g5, %%o1\n\t"
"mov %%l2, %%g2\n\t"
"ba,pt %%xcc, 2b\n\t"
" restore %%l3, %%g0, %%g3\n\t"
".previous\n\t"
"! ending __up_read"
- : : "r" (sem), "i" (rwsem_wake),
+ : : "r" (sem), "i" (rwsem_up_read_wake),
"i" (RWSEM_ACTIVE_MASK)
: "g1", "g5", "g7", "memory", "cc");
}
"! beginning __up_write\n\t"
"sethi %%hi(%2), %%g1\n\t"
"or %%g1, %%lo(%2), %%g1\n"
- "1:\tlduw [%0], %%g5\n\t"
- "sub %%g5, %%g1, %%g7\n\t"
- "cas [%0], %%g5, %%g7\n\t"
- "cmp %%g5, %%g7\n\t"
- "bne,pn %%icc, 1b\n\t"
- " sub %%g7, %%g1, %%g7\n\t"
- "cmp %%g7, 0\n\t"
- "bl,pn %%icc, 3f\n\t"
+ "sub %%g5, %%g5, %%g5\n\t"
+ "cas [%0], %%g1, %%g5\n\t"
+ "cmp %%g1, %%g5\n\t"
+ "bne,pn %%icc, 1f\n\t"
" membar #StoreStore\n"
"2:\n\t"
".subsection 2\n"
- "3:\tmov %0, %%g5\n\t"
+ "3:\tmov %0, %%g1\n\t"
"save %%sp, -160, %%sp\n\t"
"mov %%g2, %%l2\n\t"
"mov %%g3, %%l3\n\t"
+ "mov %%g1, %%o0\n\t"
"call %1\n\t"
- " mov %%g5, %%o0\n\t"
+ " mov %%g5, %%o1\n\t"
"mov %%l2, %%g2\n\t"
"ba,pt %%xcc, 2b\n\t"
" restore %%l3, %%g0, %%g3\n\t"
".previous\n\t"
"! ending __up_write"
- : : "r" (sem), "i" (rwsem_wake),
+ : : "r" (sem), "i" (rwsem_up_write_wake),
"i" (RWSEM_ACTIVE_WRITE_BIAS)
- : "g1", "g5", "g7", "memory", "cc");
+ : "g1", "g5", "memory", "cc");
}
static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
return tmp + delta;
}
+#define rwsem_atomic_add rwsem_atomic_update
+
static inline __u16 rwsem_cmpxchgw(struct rw_semaphore *sem, __u16 __old, __u16 __new)
{
u32 old = (sem->count & 0xffff0000) | (u32) __old;
return prev & 0xffff;
}
+static inline signed long rwsem_cmpxchg(struct rw_semaphore *sem, signed long old, signed long new)
+{
+ return cmpxchg(&sem->count,old,new);
+}
+
#endif /* __KERNEL__ */
#endif /* _SPARC64_RWSEM_H */
#define CODA_RESOLVE 32
#define CODA_REINTEGRATE 33
#define CODA_STATFS 34
-#define CODA_NCALLS 35
+#define CODA_STORE 35
+#define CODA_RELEASE 36
+#define CODA_NCALLS 37
#define DOWNCALL(opcode) (opcode >= CODA_REPLACE && opcode <= CODA_PURGEFID)
};
+/* coda_store: */
+struct coda_store_in {
+ struct coda_in_hdr ih;
+ ViceFid VFid;
+ int flags;
+};
+
+struct coda_store_out {
+ struct coda_out_hdr out;
+};
+
+/* coda_release: */
+struct coda_release_in {
+ struct coda_in_hdr ih;
+ ViceFid VFid;
+ int flags;
+};
+
+struct coda_release_out {
+ struct coda_out_hdr out;
+};
+
/* coda_close: */
struct coda_close_in {
struct coda_in_hdr ih;
struct coda_open_by_fd_out {
struct coda_out_hdr oh;
int fd;
+
+#ifdef __KERNEL__
+ struct file *fh; /* not passed from userspace but used in-kernel only */
+#endif
};
/* coda_open_by_path: */
union inputArgs {
struct coda_in_hdr ih; /* NB: every struct below begins with an ih */
struct coda_open_in coda_open;
+ struct coda_store_in coda_store;
+ struct coda_release_in coda_release;
struct coda_close_in coda_close;
struct coda_ioctl_in coda_ioctl;
struct coda_getattr_in coda_getattr;
#include <linux/list.h>
#include <linux/coda.h>
-#define CODA_CNODE_MAGIC 0x47114711
/*
* coda fs inode data
*/
struct coda_inode_info {
struct ViceFid c_fid; /* Coda identifier */
u_short c_flags; /* flags (see below) */
- struct list_head c_volrootlist; /* list of volroot cnoddes */
struct list_head c_cilist; /* list of all coda inodes */
- struct inode *c_vnode; /* inode associated with cnode */
- unsigned int c_contcount; /* refcount for container inode */
+ struct file *c_container; /* container file for this cnode */
+ unsigned int c_contcount; /* refcount for container file */
struct coda_cred c_cached_cred; /* credentials of cached perms */
unsigned int c_cached_perm; /* cached access permissions */
- int c_magic; /* to verify the data structure */
};
/* flags */
/* operations shared over more than one file */
int coda_open(struct inode *i, struct file *f);
+int coda_flush(struct file *f);
int coda_release(struct inode *i, struct file *f);
int coda_permission(struct inode *inode, int mask);
int coda_revalidate_inode(struct dentry *);
int coda_notify_change(struct dentry *, struct iattr *);
+int coda_isnullfid(ViceFid *fid);
/* global variables */
extern int coda_debug;
extern int coda_print_entry;
extern int coda_access_cache;
+extern int coda_fake_statfs;
/* this file: heloers */
static __inline__ struct ViceFid *coda_i2f(struct inode *);
char *coda_f2s(ViceFid *f);
char *coda_f2s2(ViceFid *f);
int coda_isroot(struct inode *i);
-int coda_fid_is_volroot(struct ViceFid *);
-int coda_fid_is_weird(struct ViceFid *fid);
int coda_iscontrol(const char *name, size_t length);
void coda_load_creds(struct coda_cred *cred);
int coda_cred_ok(struct coda_cred *cred);
int coda_cred_eq(struct coda_cred *cred1, struct coda_cred *cred2);
-/* cache.c */
-void coda_purge_children(struct inode *, int);
-void coda_purge_dentries(struct inode *);
-
/* sysctl.h */
void coda_sysctl_init(void);
void coda_sysctl_clean(void);
/* inode to cnode access functions */
+#define ITOC(inode) (&((inode)->u.coda_i))
+
static __inline__ struct ViceFid *coda_i2f(struct inode *inode)
{
- return &(inode->u.coda_i.c_fid);
+ return &(ITOC(inode)->c_fid);
}
static __inline__ char *coda_i2s(struct inode *inode)
{
- return coda_f2s(&(inode->u.coda_i.c_fid));
+ return coda_f2s(&(ITOC(inode)->c_fid));
}
/* this will not zap the inode away */
static __inline__ void coda_flag_inode(struct inode *inode, int flag)
{
- inode->u.coda_i.c_flags |= flag;
+ ITOC(inode)->c_flags |= flag;
}
-#define ITOC(inode) (&((inode)->u.coda_i))
-
#endif
{
/* file operations */
int open;
+ int flush;
int release;
int fsync;
int venus_lookup(struct super_block *sb, struct ViceFid *fid,
const char *name, int length, int *type,
struct ViceFid *resfid);
-int venus_release(struct super_block *sb, struct ViceFid *fid, int flags,
- struct coda_cred *);
+int venus_store(struct super_block *sb, struct ViceFid *fid, int flags,
+ struct coda_cred *);
+int venus_release(struct super_block *sb, struct ViceFid *fid, int flags);
+int venus_close(struct super_block *sb, struct ViceFid *fid, int flags,
+ struct coda_cred *);
int venus_open(struct super_block *sb, struct ViceFid *fid,
- int flags, ino_t *ino, dev_t *dev);
+ int flags, struct file **f);
int venus_mkdir(struct super_block *sb, struct ViceFid *dirfid,
const char *name, int length,
struct ViceFid *newfid, struct coda_vattr *attrs);
--- /dev/null
+#ifndef __LINUX_COMPILER_H
+#define __LINUX_COMPILER_H
+
+/* Somewhere in the middle of the GCC 2.96 development cycle, we implemented
+ a mechanism by which the user can annotate likely branch directions and
+ expect the blocks to be reordered appropriately. Define __builtin_expect
+ to nothing for earlier compilers. */
+
+#if __GNUC__ == 2 && __GNUC_MINOR__ < 96
+#define __builtin_expect(x, expected_value) (x)
+#endif
+
+#endif /* __LINUX_COMPILER_H */
int (*lock) (struct file *, int, struct file_lock *);
ssize_t (*readv) (struct file *, const struct iovec *, unsigned long, loff_t *);
ssize_t (*writev) (struct file *, const struct iovec *, unsigned long, loff_t *);
- ssize_t (*writepage) (struct file *, struct page *, int, size_t, loff_t *, int);
+ ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
};
#include <linux/netfilter_ipv4/ip_nat.h>
#endif
-#if defined(CONFIG_IP_NF_FTP) || defined(CONFIG_IP_NF_FTP_MODULE)
#include <linux/netfilter_ipv4/ip_conntrack_ftp.h>
-#ifdef CONFIG_IP_NF_NAT_NEEDED
-#include <linux/netfilter_ipv4/ip_nat_ftp.h>
-#endif
-#endif
struct ip_conntrack
{
} proto;
union {
-#if defined(CONFIG_IP_NF_FTP) || defined(CONFIG_IP_NF_FTP_MODULE)
struct ip_ct_ftp ct_ftp_info;
-#endif
} help;
#ifdef CONFIG_IP_NF_NAT_NEEDED
struct {
struct ip_nat_info info;
union {
-#if defined(CONFIG_IP_NF_FTP) || defined(CONFIG_IP_NF_FTP_MODULE)
- struct ip_nat_ftp_info ftp_info[IP_CT_DIR_MAX];
-#endif
+ /* insert nat helper private data here */
} help;
#if defined(CONFIG_IP_NF_TARGET_MASQUERADE) || \
defined(CONFIG_IP_NF_TARGET_MASQUERADE_MODULE)
enum ip_ct_ftp_type
{
/* PORT command from client */
- IP_CT_FTP_PORT = IP_CT_DIR_ORIGINAL,
+ IP_CT_FTP_PORT,
/* PASV response from server */
- IP_CT_FTP_PASV = IP_CT_DIR_REPLY
+ IP_CT_FTP_PASV,
+ /* EPRT command from client */
+ IP_CT_FTP_EPRT,
+ /* EPSV response from server */
+ IP_CT_FTP_EPSV,
};
/* We record seq number and length of ftp ip/port text here: all in
/* Used internally by get_unique_tuple(). */
#define IP_NAT_RANGE_FULL 4
+/* NAT sequence number modifications */
+struct ip_nat_seq {
+ /* position of the last TCP sequence number
+ * modification (if any) */
+ u_int32_t correction_pos;
+ /* sequence number offset before and after last modification */
+ int32_t offset_before, offset_after;
+};
+
/* Single range specification. */
struct ip_nat_range
{
/* Helper (NULL if none). */
struct ip_nat_helper *helper;
+
+ struct ip_nat_seq seq[IP_CT_DIR_MAX];
};
/* Set up the info structure to map into this range. */
+++ /dev/null
-#ifndef _IP_NAT_FTP_H
-#define _IP_NAT_FTP_H
-/* FTP extension for TCP NAT alteration. */
-
-#ifndef __KERNEL__
-#error Only in kernel.
-#endif
-
-/* Protects ftp part of conntracks */
-DECLARE_LOCK_EXTERN(ip_ftp_lock);
-
-/* We keep track of where the last SYN correction was, and the SYN
- offsets before and after that correction. Two of these (indexed by
- direction). */
-struct ip_nat_ftp_info
-{
- u_int32_t syn_correction_pos;
- int32_t syn_offset_before, syn_offset_after;
-};
-
-#endif /* _IP_NAT_FTP_H */
const char *name;
};
+extern struct list_head helpers;
+
extern int ip_nat_helper_register(struct ip_nat_helper *me);
extern void ip_nat_helper_unregister(struct ip_nat_helper *me);
+extern int ip_nat_mangle_tcp_packet(struct sk_buff **skb,
+ struct ip_conntrack *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int match_offset,
+ unsigned int match_len,
+ char *rep_buffer,
+ unsigned int rep_len);
+extern int ip_nat_seq_adjust(struct sk_buff *skb,
+ struct ip_conntrack *ct,
+ enum ip_conntrack_info ctinfo);
+extern void ip_nat_delete_sack(struct sk_buff *skb, struct tcphdr *tcph);
#endif
unsigned long index, struct page **hash);
extern void lock_page(struct page *page);
#define find_lock_page(mapping, index) \
- __find_lock_page(mapping, index, page_hash(mapping, index))
+ __find_lock_page(mapping, index, page_hash(mapping, index))
+
+extern struct page * __find_get_swapcache_page (struct address_space * mapping,
+ unsigned long index, struct page **hash);
+#define find_get_swapcache_page(mapping, index) \
+ __find_get_swapcache_page(mapping, index, page_hash(mapping, index))
extern void __add_page_to_hash_queue(struct page * page, struct page **p);
struct pipe_inode_info {
wait_queue_head_t wait;
char *base;
+ unsigned int len;
unsigned int start;
unsigned int readers;
unsigned int writers;
#define PIPE_WAIT(inode) (&(inode).i_pipe->wait)
#define PIPE_BASE(inode) ((inode).i_pipe->base)
#define PIPE_START(inode) ((inode).i_pipe->start)
-#define PIPE_LEN(inode) ((inode).i_size)
+#define PIPE_LEN(inode) ((inode).i_pipe->len)
#define PIPE_READERS(inode) ((inode).i_pipe->readers)
#define PIPE_WRITERS(inode) ((inode).i_pipe->writers)
#define PIPE_WAITING_READERS(inode) ((inode).i_pipe->waiting_readers)
/* rwsem-spinlock.h: fallback C implementation
*
* Copyright (c) 2001 David Howells (dhowells@redhat.com).
+ * - Derived partially from ideas by Andrea Arcangeli <andrea@suse.de>
+ * - Derived also from comments by Linus
*/
#ifndef _LINUX_RWSEM_SPINLOCK_H
#endif
#include <linux/spinlock.h>
+#include <linux/list.h>
#ifdef __KERNEL__
struct rwsem_waiter;
/*
- * the semaphore definition
+ * the rw-semaphore definition
+ * - if activity is 0 then there are no active readers or writers
+ * - if activity is +ve then that is the number of active readers
+ * - if activity is -1 then there is one active writer
+ * - if wait_list is not empty, then there are processes waiting for the semaphore
*/
struct rw_semaphore {
- signed long count;
-#define RWSEM_UNLOCKED_VALUE 0x00000000
-#define RWSEM_ACTIVE_BIAS 0x00000001
-#define RWSEM_ACTIVE_MASK 0x0000ffff
-#define RWSEM_WAITING_BIAS (-0x00010000)
-#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
-#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
+ __s32 activity;
spinlock_t wait_lock;
- struct rwsem_waiter *wait_front;
- struct rwsem_waiter **wait_back;
+ struct list_head wait_list;
#if RWSEM_DEBUG
int debug;
#endif
-#if RWSEM_DEBUG_MAGIC
- long __magic;
- atomic_t readers;
- atomic_t writers;
-#endif
};
/*
#else
#define __RWSEM_DEBUG_INIT /* */
#endif
-#if RWSEM_DEBUG_MAGIC
-#define __RWSEM_DEBUG_MINIT(name) , (int)&(name).__magic, ATOMIC_INIT(0), ATOMIC_INIT(0)
-#else
-#define __RWSEM_DEBUG_MINIT(name) /* */
-#endif
#define __RWSEM_INITIALIZER(name) \
-{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, NULL, &(name).wait_front \
- __RWSEM_DEBUG_INIT __RWSEM_DEBUG_MINIT(name) }
+{ 0, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) __RWSEM_DEBUG_INIT }
#define DECLARE_RWSEM(name) \
struct rw_semaphore name = __RWSEM_INITIALIZER(name)
-static inline void init_rwsem(struct rw_semaphore *sem)
-{
- sem->count = RWSEM_UNLOCKED_VALUE;
- spin_lock_init(&sem->wait_lock);
- sem->wait_front = NULL;
- sem->wait_back = &sem->wait_front;
-#if RWSEM_DEBUG
- sem->debug = 0;
-#endif
-#if RWSEM_DEBUG_MAGIC
- sem->__magic = (long)&sem->__magic;
- atomic_set(&sem->readers, 0);
- atomic_set(&sem->writers, 0);
-#endif
-}
-
-/*
- * lock for reading
- */
-static inline void __down_read(struct rw_semaphore *sem)
-{
- int count;
- spin_lock(&sem->wait_lock);
- sem->count += RWSEM_ACTIVE_READ_BIAS;
- count = sem->count;
- spin_unlock(&sem->wait_lock);
- if (count<0)
- rwsem_down_read_failed(sem);
-}
-
-/*
- * lock for writing
- */
-static inline void __down_write(struct rw_semaphore *sem)
-{
- int count;
- spin_lock(&sem->wait_lock);
- count = sem->count;
- sem->count += RWSEM_ACTIVE_WRITE_BIAS;
- spin_unlock(&sem->wait_lock);
- if (count)
- rwsem_down_write_failed(sem);
-}
-
-/*
- * unlock after reading
- */
-static inline void __up_read(struct rw_semaphore *sem)
-{
- int count;
- spin_lock(&sem->wait_lock);
- count = sem->count;
- sem->count -= RWSEM_ACTIVE_READ_BIAS;
- spin_unlock(&sem->wait_lock);
- if (count<0 && !((count-RWSEM_ACTIVE_READ_BIAS)&RWSEM_ACTIVE_MASK))
- rwsem_wake(sem);
-}
-
-/*
- * unlock after writing
- */
-static inline void __up_write(struct rw_semaphore *sem)
-{
- int count;
- spin_lock(&sem->wait_lock);
- sem->count -= RWSEM_ACTIVE_WRITE_BIAS;
- count = sem->count;
- spin_unlock(&sem->wait_lock);
- if (count<0)
- rwsem_wake(sem);
-}
-
-/*
- * implement exchange and add functionality
- * - only called when spinlock is already held
- */
-static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
-{
- int count;
-
- sem->count += delta;
- count = sem->count;
-
- return count;
-}
-
-/*
- * implement compare and exchange functionality on the rw-semaphore count LSW
- * - only called by __rwsem_do_wake(), so spinlock is already held when called
- */
-static inline __u16 rwsem_cmpxchgw(struct rw_semaphore *sem, __u16 old, __u16 new)
-{
- __u16 prev;
-
- prev = sem->count & RWSEM_ACTIVE_MASK;
- if (prev==old)
- sem->count = (sem->count & ~RWSEM_ACTIVE_MASK) | new;
-
- return prev;
-}
+extern void FASTCALL(init_rwsem(struct rw_semaphore *sem));
+extern void FASTCALL(__down_read(struct rw_semaphore *sem));
+extern void FASTCALL(__down_write(struct rw_semaphore *sem));
+extern void FASTCALL(__up_read(struct rw_semaphore *sem));
+extern void FASTCALL(__up_write(struct rw_semaphore *sem));
#endif /* __KERNEL__ */
#endif /* _LINUX_RWSEM_SPINLOCK_H */
*
* Written by David Howells (dhowells@redhat.com).
* Derived from asm-i386/semaphore.h
- *
- *
- * The MSW of the count is the negated number of active writers and waiting
- * lockers, and the LSW is the total number of active locks
- *
- * The lock count is initialized to 0 (no active and no waiting lockers).
- *
- * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
- * uncontended lock. This can be determined because XADD returns the old value.
- * Readers increment by 1 and see a positive value when uncontended, negative
- * if there are writers (and maybe) readers waiting (in which case it goes to
- * sleep).
- *
- * The value of WAITING_BIAS supports up to 32766 waiting processes. This can
- * be extended to 65534 by manually checking the whole MSW rather than relying
- * on the S flag.
- *
- * The value of ACTIVE_BIAS supports up to 65535 active processes.
- *
- * This should be totally fair - if anything is waiting, a process that wants a
- * lock will go to the back of the queue. When the currently active lock is
- * released, if there's a writer at the front of the queue, then that and only
- * that will be woken up; if there's a bunch of consequtive readers at the
- * front, then they'll all be woken up, but no other readers will be.
*/
#ifndef _LINUX_RWSEM_H
#include <linux/linkage.h>
#define RWSEM_DEBUG 0
-#define RWSEM_DEBUG_MAGIC 0
#ifdef __KERNEL__
#include <linux/types.h>
+#include <linux/kernel.h>
#include <asm/system.h>
#include <asm/atomic.h>
struct rw_semaphore;
-/* defined contention handler functions for the generic case
- * - these are also used for the exchange-and-add based algorithm
- */
-#if defined(CONFIG_RWSEM_GENERIC_SPINLOCK) || defined(CONFIG_RWSEM_XCHGADD_ALGORITHM)
-/* we use FASTCALL convention for the helpers */
-extern struct rw_semaphore *FASTCALL(rwsem_down_read_failed(struct rw_semaphore *sem));
-extern struct rw_semaphore *FASTCALL(rwsem_down_write_failed(struct rw_semaphore *sem));
-extern struct rw_semaphore *FASTCALL(rwsem_wake(struct rw_semaphore *sem));
-#endif
-
#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
#include <linux/rwsem-spinlock.h> /* use a generic implementation */
#else
static inline void down_read(struct rw_semaphore *sem)
{
rwsemtrace(sem,"Entering down_read");
-
-#if RWSEM_DEBUG_MAGIC
- if (sem->__magic != (long)&sem->__magic)
- BUG();
-#endif
-
__down_read(sem);
-
-#if RWSEM_DEBUG_MAGIC
- if (atomic_read(&sem->writers))
- BUG();
- atomic_inc(&sem->readers);
-#endif
-
rwsemtrace(sem,"Leaving down_read");
}
static inline void down_write(struct rw_semaphore *sem)
{
rwsemtrace(sem,"Entering down_write");
-
-#if RWSEM_DEBUG_MAGIC
- if (sem->__magic != (long)&sem->__magic)
- BUG();
-#endif
-
__down_write(sem);
-
-#if RWSEM_DEBUG_MAGIC
- if (atomic_read(&sem->writers))
- BUG();
- if (atomic_read(&sem->readers))
- BUG();
- atomic_inc(&sem->writers);
-#endif
-
rwsemtrace(sem,"Leaving down_write");
}
static inline void up_read(struct rw_semaphore *sem)
{
rwsemtrace(sem,"Entering up_read");
-
-#if RWSEM_DEBUG_MAGIC
- if (atomic_read(&sem->writers))
- BUG();
- atomic_dec(&sem->readers);
-#endif
__up_read(sem);
-
rwsemtrace(sem,"Leaving up_read");
}
static inline void up_write(struct rw_semaphore *sem)
{
rwsemtrace(sem,"Entering up_write");
-
-#if RWSEM_DEBUG_MAGIC
- if (atomic_read(&sem->readers))
- BUG();
- if (atomic_read(&sem->writers) != 1)
- BUG();
- atomic_dec(&sem->writers);
-#endif
__up_write(sem);
-
rwsemtrace(sem,"Leaving up_write");
}
extern int try_to_free_pages(unsigned int gfp_mask);
/* linux/mm/page_io.c */
-extern void rw_swap_page(int, struct page *, int);
-extern void rw_swap_page_nolock(int, swp_entry_t, char *, int);
+extern void rw_swap_page(int, struct page *);
+extern void rw_swap_page_nolock(int, swp_entry_t, char *);
/* linux/mm/page_alloc.c */
extern void add_to_swap_cache(struct page *, swp_entry_t);
extern int swap_check_entry(unsigned long);
extern struct page * lookup_swap_cache(swp_entry_t);
-extern struct page * read_swap_cache_async(swp_entry_t, int);
-#define read_swap_cache(entry) read_swap_cache_async(entry, 1);
+extern struct page * read_swap_cache_async(swp_entry_t);
/* linux/mm/oom_kill.c */
extern int out_of_memory(void);
/* usbdevfs inode list */
struct list_head inodes;
+
+ atomic_t refcnt;
};
#define USB_MAXCHILDREN (16) /* This is arbitrary */
bsdism;
unsigned char debug;
unsigned char rcvtstamp;
+ unsigned char use_write_queue;
unsigned char userlocks;
+ /* Hole of 3 bytes. Try to pack. */
int route_caps;
int proc;
unsigned long lingertime;
atomic_set(&mm->mm_count, 1);
init_rwsem(&mm->mmap_sem);
mm->page_table_lock = SPIN_LOCK_UNLOCKED;
- mm->pgd = pgd_alloc();
+ mm->pgd = pgd_alloc(mm);
if (mm->pgd)
return mm;
free_mm(mm);
L_TARGET := lib.a
-export-objs := cmdline.o
+export-objs := cmdline.o rwsem-spinlock.o rwsem.o
obj-y := errno.o ctype.o string.o vsprintf.o brlock.o cmdline.o
-ifneq ($(CONFIG_RWSEM_GENERIC_SPINLOCK)$(CONFIG_RWSEM_XCHGADD_ALGORITHM),nn)
-export-objs += rwsem.o
-obj-y += rwsem.o
-endif
+obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
+obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
obj-y += dec_and_lock.o
--- /dev/null
+/* rwsem-spinlock.c: R/W semaphores: contention handling functions for generic spinlock
+ * implementation
+ *
+ * Copyright (c) 2001 David Howells (dhowells@redhat.com).
+ * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
+ * - Derived also from comments by Linus
+ */
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+
+struct rwsem_waiter {
+ struct list_head list;
+ struct task_struct *task;
+ unsigned int flags;
+#define RWSEM_WAITING_FOR_READ 0x00000001
+#define RWSEM_WAITING_FOR_WRITE 0x00000002
+};
+
+#if RWSEM_DEBUG
+void rwsemtrace(struct rw_semaphore *sem, const char *str)
+{
+ if (sem->debug)
+ printk("[%d] %s({%d,%d})\n",
+ current->pid,str,sem->activity,list_empty(&sem->wait_list)?0:1);
+}
+#endif
+
+/*
+ * initialise the semaphore
+ */
+void init_rwsem(struct rw_semaphore *sem)
+{
+ sem->activity = 0;
+ spin_lock_init(&sem->wait_lock);
+ INIT_LIST_HEAD(&sem->wait_list);
+#if RWSEM_DEBUG
+ sem->debug = 0;
+#endif
+}
+
+/*
+ * handle the lock being released whilst there are processes blocked on it that can now run
+ * - if we come here, then:
+ * - the 'active count' _reached_ zero
+ * - the 'waiting count' is non-zero
+ * - the spinlock must be held by the caller
+ * - woken process blocks are discarded from the list after having flags zeroised
+ */
+static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem)
+{
+ struct rwsem_waiter *waiter;
+ int woken;
+
+ rwsemtrace(sem,"Entering __rwsem_do_wake");
+
+ waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
+
+ /* try to grant a single write lock if there's a writer at the front of the queue
+ * - we leave the 'waiting count' incremented to signify potential contention
+ */
+ if (waiter->flags & RWSEM_WAITING_FOR_WRITE) {
+ sem->activity = -1;
+ list_del(&waiter->list);
+ waiter->flags = 0;
+ wake_up_process(waiter->task);
+ goto out;
+ }
+
+ /* grant an infinite number of read locks to the readers at the front of the queue */
+ woken = 0;
+ do {
+ list_del(&waiter->list);
+ waiter->flags = 0;
+ wake_up_process(waiter->task);
+ woken++;
+ if (list_empty(&sem->wait_list))
+ break;
+ waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
+ } while (waiter->flags&RWSEM_WAITING_FOR_READ);
+
+ sem->activity += woken;
+
+ out:
+ rwsemtrace(sem,"Leaving __rwsem_do_wake");
+ return sem;
+}
+
+/*
+ * wake a single writer
+ */
+static inline struct rw_semaphore *__rwsem_wake_one_writer(struct rw_semaphore *sem)
+{
+ struct rwsem_waiter *waiter;
+
+ sem->activity = -1;
+
+ waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
+ list_del(&waiter->list);
+
+ waiter->flags = 0;
+ wake_up_process(waiter->task);
+ return sem;
+}
+
+/*
+ * get a read lock on the semaphore
+ */
+void __down_read(struct rw_semaphore *sem)
+{
+ struct rwsem_waiter waiter;
+ struct task_struct *tsk;
+
+ rwsemtrace(sem,"Entering __down_read");
+
+ spin_lock(&sem->wait_lock);
+
+ if (sem->activity>=0 && list_empty(&sem->wait_list)) {
+ /* granted */
+ sem->activity++;
+ spin_unlock(&sem->wait_lock);
+ goto out;
+ }
+
+ tsk = current;
+ set_task_state(tsk,TASK_UNINTERRUPTIBLE);
+
+ /* set up my own style of waitqueue */
+ waiter.task = tsk;
+ waiter.flags = RWSEM_WAITING_FOR_READ;
+
+ list_add_tail(&waiter.list,&sem->wait_list);
+
+ /* we don't need to touch the semaphore struct anymore */
+ spin_unlock(&sem->wait_lock);
+
+ /* wait to be given the lock */
+ for (;;) {
+ if (!waiter.flags)
+ break;
+ schedule();
+ set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+ }
+
+ tsk->state = TASK_RUNNING;
+
+ out:
+ rwsemtrace(sem,"Leaving __down_read");
+}
+
+/*
+ * get a write lock on the semaphore
+ * - note that we increment the waiting count anyway to indicate an exclusive lock
+ */
+void __down_write(struct rw_semaphore *sem)
+{
+ struct rwsem_waiter waiter;
+ struct task_struct *tsk;
+
+ rwsemtrace(sem,"Entering __down_write");
+
+ spin_lock(&sem->wait_lock);
+
+ if (sem->activity==0 && list_empty(&sem->wait_list)) {
+ /* granted */
+ sem->activity = -1;
+ spin_unlock(&sem->wait_lock);
+ goto out;
+ }
+
+ tsk = current;
+ set_task_state(tsk,TASK_UNINTERRUPTIBLE);
+
+ /* set up my own style of waitqueue */
+ waiter.task = tsk;
+ waiter.flags = RWSEM_WAITING_FOR_WRITE;
+
+ list_add_tail(&waiter.list,&sem->wait_list);
+
+ /* we don't need to touch the semaphore struct anymore */
+ spin_unlock(&sem->wait_lock);
+
+ /* wait to be given the lock */
+ for (;;) {
+ if (!waiter.flags)
+ break;
+ schedule();
+ set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+ }
+
+ tsk->state = TASK_RUNNING;
+
+ out:
+ rwsemtrace(sem,"Leaving __down_write");
+}
+
+/*
+ * release a read lock on the semaphore
+ */
+void __up_read(struct rw_semaphore *sem)
+{
+ rwsemtrace(sem,"Entering __up_read");
+
+ spin_lock(&sem->wait_lock);
+
+ if (--sem->activity==0 && !list_empty(&sem->wait_list))
+ sem = __rwsem_wake_one_writer(sem);
+
+ spin_unlock(&sem->wait_lock);
+
+ rwsemtrace(sem,"Leaving __up_read");
+}
+
+/*
+ * release a write lock on the semaphore
+ */
+void __up_write(struct rw_semaphore *sem)
+{
+ rwsemtrace(sem,"Entering __up_write");
+
+ spin_lock(&sem->wait_lock);
+
+ sem->activity = 0;
+ if (!list_empty(&sem->wait_list))
+ sem = __rwsem_do_wake(sem);
+
+ spin_unlock(&sem->wait_lock);
+
+ rwsemtrace(sem,"Leaving __up_write");
+}
+
+EXPORT_SYMBOL(init_rwsem);
+EXPORT_SYMBOL(__down_read);
+EXPORT_SYMBOL(__down_write);
+EXPORT_SYMBOL(__up_read);
+EXPORT_SYMBOL(__up_write);
+#if RWSEM_DEBUG
+EXPORT_SYMBOL(rwsemtrace);
+#endif
#include <linux/module.h>
struct rwsem_waiter {
- struct rwsem_waiter *next;
+ struct list_head list;
struct task_struct *task;
unsigned int flags;
#define RWSEM_WAITING_FOR_READ 0x00000001
#define RWSEM_WAITING_FOR_WRITE 0x00000002
};
-#define RWSEM_WAITER_MAGIC 0x52575345
-
-static struct rw_semaphore *FASTCALL(__rwsem_do_wake(struct rw_semaphore *sem));
#if RWSEM_DEBUG
+#undef rwsemtrace
void rwsemtrace(struct rw_semaphore *sem, const char *str)
{
+ printk("sem=%p\n",sem);
+ printk("(sem)=%08lx\n",sem->count);
if (sem->debug)
- printk("[%d] %s(count=%08lx)\n",current->pid,str,sem->count);
+ printk("[%d] %s({%08lx})\n",current->pid,str,sem->count);
}
#endif
/*
* handle the lock being released whilst there are processes blocked on it that can now run
* - if we come here, then:
- * - the 'active part' of the count (&0x0000ffff) reached zero (but may no longer be zero)
+ * - the 'active part' of the count (&0x0000ffff) reached zero but has been re-incremented
* - the 'waiting part' of the count (&0xffff0000) is negative (and will still be so)
- * - the spinlock must be held before entry
- * - woken process blocks are discarded from the list after having flags zeroised
+ * - there must be someone on the queue
+ * - the spinlock must be held by the caller
+ * - woken process blocks are discarded from the list after having flags zeroised
*/
-static struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem)
+static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem)
{
- struct rwsem_waiter *waiter, *next;
+ struct rwsem_waiter *waiter;
+ struct list_head *next;
+ signed long oldcount;
int woken, loop;
rwsemtrace(sem,"Entering __rwsem_do_wake");
- /* try to grab an 'activity' marker
- * - need to make sure two copies of rwsem_wake() don't do this for two separate processes
- * simultaneously
- * - be horribly naughty, and only deal with the LSW of the atomic counter
- */
- if (rwsem_cmpxchgw(sem,0,RWSEM_ACTIVE_BIAS)!=0) {
- rwsemtrace(sem,"__rwsem_do_wake: abort wakeup due to renewed activity");
- goto out;
- }
+ /* only wake someone up if we can transition the active part of the count from 0 -> 1 */
+ try_again:
+ oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS,sem) - RWSEM_ACTIVE_BIAS;
+ if (oldcount & RWSEM_ACTIVE_MASK)
+ goto undo;
- /* check the wait queue is populated */
- waiter = sem->wait_front;
-
- if (__builtin_expect(!waiter,0)) {
- printk("__rwsem_do_wake(): wait_list unexpectedly empty\n");
- BUG();
- goto out;
- }
-
- if (__builtin_expect(!waiter->flags,0)) {
- printk("__rwsem_do_wake(): wait_list front apparently not waiting\n");
- BUG();
- goto out;
- }
-
- next = NULL;
+ waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
/* try to grant a single write lock if there's a writer at the front of the queue
* - note we leave the 'active part' of the count incremented by 1 and the waiting part
* incremented by 0x00010000
*/
- if (waiter->flags & RWSEM_WAITING_FOR_WRITE) {
- next = waiter->next;
- waiter->flags = 0;
- wake_up_process(waiter->task);
- goto discard_woken_processes;
- }
+ if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE))
+ goto readers_only;
+
+ list_del(&waiter->list);
+ waiter->flags = 0;
+ wake_up_process(waiter->task);
+ goto out;
/* grant an infinite number of read locks to the readers at the front of the queue
* - note we increment the 'active part' of the count by the number of readers (less one
* for the activity decrement we've already done) before waking any processes up
*/
+ readers_only:
woken = 0;
do {
woken++;
- waiter = waiter->next;
- } while (waiter && waiter->flags&RWSEM_WAITING_FOR_READ);
+
+ if (waiter->list.next==&sem->wait_list)
+ break;
+
+ waiter = list_entry(waiter->list.next,struct rwsem_waiter,list);
+
+ } while (waiter->flags & RWSEM_WAITING_FOR_READ);
loop = woken;
woken *= RWSEM_ACTIVE_BIAS-RWSEM_WAITING_BIAS;
woken -= RWSEM_ACTIVE_BIAS;
- rwsem_atomic_update(woken,sem);
+ rwsem_atomic_add(woken,sem);
- waiter = sem->wait_front;
+ next = sem->wait_list.next;
for (; loop>0; loop--) {
- next = waiter->next;
+ waiter = list_entry(next,struct rwsem_waiter,list);
+ next = waiter->list.next;
waiter->flags = 0;
wake_up_process(waiter->task);
- waiter = next;
}
- discard_woken_processes:
- sem->wait_front = next;
- if (!next) sem->wait_back = &sem->wait_front;
+ sem->wait_list.next = next;
+ next->prev = &sem->wait_list;
out:
rwsemtrace(sem,"Leaving __rwsem_do_wake");
return sem;
+
+ /* undo the change to count, but check for a transition 1->0 */
+ undo:
+ if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS,sem)!=0)
+ goto out;
+ goto try_again;
}
/*
- * wait for the read lock to be granted
- * - need to repeal the increment made inline by the caller
- * - need to throw a write-lock style spanner into the works (sub 0x00010000 from count)
+ * wait for a lock to be granted
*/
-struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem)
+static inline struct rw_semaphore *rwsem_down_failed_common(struct rw_semaphore *sem,
+ struct rwsem_waiter *waiter,
+ __s32 adjustment)
{
- struct rwsem_waiter waiter;
struct task_struct *tsk = current;
signed long count;
- rwsemtrace(sem,"Entering rwsem_down_read_failed");
-
set_task_state(tsk,TASK_UNINTERRUPTIBLE);
/* set up my own style of waitqueue */
- waiter.next = NULL;
- waiter.task = tsk;
- waiter.flags = RWSEM_WAITING_FOR_READ;
-
spin_lock(&sem->wait_lock);
+ waiter->task = tsk;
- *sem->wait_back = &waiter; /* add to back of queue */
- sem->wait_back = &waiter.next;
+ list_add_tail(&waiter->list,&sem->wait_list);
/* note that we're now waiting on the lock, but no longer actively read-locking */
- count = rwsem_atomic_update(RWSEM_WAITING_BIAS-RWSEM_ACTIVE_BIAS,sem);
+ count = rwsem_atomic_update(adjustment,sem);
/* if there are no longer active locks, wake the front queued process(es) up
* - it might even be this process, since the waker takes a more active part
*/
if (!(count & RWSEM_ACTIVE_MASK))
- __rwsem_do_wake(sem);
+ sem = __rwsem_do_wake(sem);
spin_unlock(&sem->wait_lock);
/* wait to be given the lock */
for (;;) {
- if (!waiter.flags)
+ if (!waiter->flags)
break;
schedule();
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
tsk->state = TASK_RUNNING;
+ return sem;
+}
+
+/*
+ * wait for the read lock to be granted
+ */
+struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem)
+{
+ struct rwsem_waiter waiter;
+
+ rwsemtrace(sem,"Entering rwsem_down_read_failed");
+
+ waiter.flags = RWSEM_WAITING_FOR_READ;
+ rwsem_down_failed_common(sem,&waiter,RWSEM_WAITING_BIAS-RWSEM_ACTIVE_BIAS);
+
rwsemtrace(sem,"Leaving rwsem_down_read_failed");
return sem;
}
struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem)
{
struct rwsem_waiter waiter;
- struct task_struct *tsk = current;
- signed long count;
rwsemtrace(sem,"Entering rwsem_down_write_failed");
- set_task_state(tsk, TASK_UNINTERRUPTIBLE);
-
- /* set up my own style of waitqueue */
- waiter.next = NULL;
- waiter.task = tsk;
waiter.flags = RWSEM_WAITING_FOR_WRITE;
-
- spin_lock(&sem->wait_lock);
-
- *sem->wait_back = &waiter; /* add to back of queue */
- sem->wait_back = &waiter.next;
-
- /* note that we're waiting on the lock, but no longer actively locking */
- count = rwsem_atomic_update(-RWSEM_ACTIVE_BIAS,sem);
-
- /* if there are no longer active locks, wake the front queued process(es) up
- * - it might even be this process, since the waker takes a more active part
- */
- if (!(count & RWSEM_ACTIVE_MASK))
- __rwsem_do_wake(sem);
-
- spin_unlock(&sem->wait_lock);
-
- /* wait to be given the lock */
- for (;;) {
- if (!waiter.flags)
- break;
- schedule();
- set_task_state(tsk, TASK_UNINTERRUPTIBLE);
- }
-
- tsk->state = TASK_RUNNING;
+ rwsem_down_failed_common(sem,&waiter,-RWSEM_ACTIVE_BIAS);
rwsemtrace(sem,"Leaving rwsem_down_write_failed");
return sem;
}
/*
- * spinlock grabbing wrapper for __rwsem_do_wake()
+ * handle waking up a waiter on the semaphore
+ * - up_read has decremented the active part of the count if we come here
*/
struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
{
spin_lock(&sem->wait_lock);
- sem = __rwsem_do_wake(sem);
+ /* do nothing if list empty */
+ if (!list_empty(&sem->wait_list))
+ sem = __rwsem_do_wake(sem);
spin_unlock(&sem->wait_lock);
rwsemtrace(sem,"Leaving rwsem_wake");
+
return sem;
}
return page;
}
+/*
+ * Find a swapcache page (and get a reference) or return NULL.
+ * The SwapCache check is protected by the pagecache lock.
+ */
+struct page * __find_get_swapcache_page(struct address_space *mapping,
+ unsigned long offset, struct page **hash)
+{
+ struct page *page;
+
+ /*
+ * We need the LRU lock to protect against page_launder().
+ */
+
+ spin_lock(&pagecache_lock);
+ page = __find_page_nolock(mapping, offset, *hash);
+ if (page) {
+ spin_lock(&pagemap_lru_lock);
+ if (PageSwapCache(page))
+ page_cache_get(page);
+ else
+ page = NULL;
+ spin_unlock(&pagemap_lru_lock);
+ }
+ spin_unlock(&pagecache_lock);
+
+ return page;
+}
+
/*
* Same as the above, but lock the page too, verifying that
* it's still valid once we own it.
if (size > count)
size = count;
- if (file->f_op->writepage) {
- written = file->f_op->writepage(file, page, offset,
- size, &file->f_pos, size<count);
+ if (file->f_op->sendpage) {
+ written = file->f_op->sendpage(file, page, offset,
+ size, &file->f_pos, size<count);
} else {
char *kaddr;
mm_segment_t old_fs;
break;
}
/* Ok, do the async read-ahead now */
- new_page = read_swap_cache_async(SWP_ENTRY(SWP_TYPE(entry), offset), 0);
+ new_page = read_swap_cache_async(SWP_ENTRY(SWP_TYPE(entry), offset));
if (new_page != NULL)
page_cache_release(new_page);
swap_free(SWP_ENTRY(SWP_TYPE(entry), offset));
if (!page) {
lock_kernel();
swapin_readahead(entry);
- page = read_swap_cache(entry);
+ page = read_swap_cache_async(entry);
unlock_kernel();
if (!page) {
spin_lock(&mm->page_table_lock);
return -1;
}
-
+ wait_on_page(page);
flush_page_to_ram(page);
flush_icache_page(vma, page);
}
if (gfp_mask & __GFP_WAIT) {
memory_pressure++;
try_to_free_pages(gfp_mask);
- wakeup_bdflush(0);
goto try_again;
}
}
* that shared pages stay shared while being swapped.
*/
-static int rw_swap_page_base(int rw, swp_entry_t entry, struct page *page, int wait)
+static int rw_swap_page_base(int rw, swp_entry_t entry, struct page *page)
{
unsigned long offset;
int zones[PAGE_SIZE/512];
kdev_t dev = 0;
int block_size;
struct inode *swapf = 0;
+ int wait = 0;
/* Don't allow too many pending pages in flight.. */
if ((rw == WRITE) && atomic_read(&nr_async_pages) >
* - it's marked as being swap-cache
* - it's associated with the swap inode
*/
-void rw_swap_page(int rw, struct page *page, int wait)
+void rw_swap_page(int rw, struct page *page)
{
swp_entry_t entry;
PAGE_BUG(page);
if (page->mapping != &swapper_space)
PAGE_BUG(page);
- if (!rw_swap_page_base(rw, entry, page, wait))
+ if (!rw_swap_page_base(rw, entry, page))
UnlockPage(page);
}
* Therefore we can't use it. Later when we can remove the need for the
* lock map and we can reduce the number of functions exported.
*/
-void rw_swap_page_nolock(int rw, swp_entry_t entry, char *buf, int wait)
+void rw_swap_page_nolock(int rw, swp_entry_t entry, char *buf)
{
struct page *page = virt_to_page(buf);
PAGE_BUG(page);
/* needs sync_page to wait I/O completation */
page->mapping = &swapper_space;
- if (!rw_swap_page_base(rw, entry, page, wait))
+ if (!rw_swap_page_base(rw, entry, page))
UnlockPage(page);
+ wait_on_page(page);
page->mapping = NULL;
}
static int shmem_free_swp(swp_entry_t *dir, unsigned int count)
{
swp_entry_t *ptr, entry;
- struct page * page;
int freed = 0;
for (ptr = dir; ptr < dir + count; ptr++) {
entry = *ptr;
*ptr = (swp_entry_t){0};
freed++;
- if ((page = lookup_swap_cache(entry)) != NULL) {
- delete_from_swap_cache(page);
- page_cache_release(page);
- }
+
+ /* vmscan will do the actual page freeing later.. */
swap_free (entry);
}
return freed;
inode = page->mapping->host;
info = &inode->u.shmem_i;
swap = __get_swap_page(2);
- if (!swap.val) {
- set_page_dirty(page);
- UnlockPage(page);
- return -ENOMEM;
- }
+ error = -ENOMEM;
+ if (!swap.val)
+ goto out;
spin_lock(&info->lock);
entry = shmem_swp_entry(info, page->index);
/* Add it to the swap cache */
add_to_swap_cache(page, swap);
page_cache_release(page);
- set_page_dirty(page);
info->swapped++;
spin_unlock(&info->lock);
out:
+ set_page_dirty(page);
UnlockPage(page);
return error;
}
spin_unlock (&info->lock);
lock_kernel();
swapin_readahead(*entry);
- page = read_swap_cache(*entry);
+ page = read_swap_cache_async(*entry);
unlock_kernel();
if (!page)
return ERR_PTR(-ENOMEM);
+ wait_on_page(page);
if (!Page_Uptodate(page)) {
page_cache_release(page);
return ERR_PTR(-EIO);
if (TryLockPage(page))
goto wait_retry;
- if (swap_count(page) > 2)
- BUG();
-
swap_free(*entry);
*entry = (swp_entry_t) {0};
delete_from_swap_cache_nolock(page);
*ptr = NOPAGE_SIGBUS;
return error;
sigbus:
+ up (&inode->i_sem);
*ptr = NOPAGE_SIGBUS;
return -EFAULT;
}
return 0;
in_use:
- rw_swap_page(WRITE, page, 0);
+ rw_swap_page(WRITE, page);
return 0;
}
/*
* Right now the pagecache is 32-bit only. But it's a 32 bit index. =)
*/
-repeat:
- found = find_lock_page(&swapper_space, entry.val);
+ found = find_get_swapcache_page(&swapper_space, entry.val);
if (!found)
return 0;
- /*
- * Though the "found" page was in the swap cache an instant
- * earlier, it might have been removed by refill_inactive etc.
- * Re search ... Since find_lock_page grabs a reference on
- * the page, it can not be reused for anything else, namely
- * it can not be associated with another swaphandle, so it
- * is enough to check whether the page is still in the scache.
- */
- if (!PageSwapCache(found)) {
- UnlockPage(found);
- page_cache_release(found);
- goto repeat;
- }
+ if (!PageSwapCache(found))
+ BUG();
if (found->mapping != &swapper_space)
- goto out_bad;
+ BUG();
#ifdef SWAP_CACHE_INFO
swap_cache_find_success++;
#endif
- UnlockPage(found);
return found;
}
-
-out_bad:
- printk (KERN_ERR "VM: Found a non-swapper swap page!\n");
- UnlockPage(found);
- page_cache_release(found);
- return 0;
}
/*
* the swap entry is no longer in use.
*/
-struct page * read_swap_cache_async(swp_entry_t entry, int wait)
+struct page * read_swap_cache_async(swp_entry_t entry)
{
struct page *found_page = 0, *new_page;
unsigned long new_page_addr;
*/
lock_page(new_page);
add_to_swap_cache(new_page, entry);
- rw_swap_page(READ, new_page, wait);
+ rw_swap_page(READ, new_page);
return new_page;
out_free_page:
/* Get a page for the entry, using the existing swap
cache page if there is one. Otherwise, get a clean
page and read the swap into it. */
- page = read_swap_cache(entry);
+ page = read_swap_cache_async(entry);
if (!page) {
swap_free(entry);
return -ENOMEM;
}
+ lock_page(page);
if (PageSwapCache(page))
- delete_from_swap_cache(page);
+ delete_from_swap_cache_nolock(page);
+ UnlockPage(page);
read_lock(&tasklist_lock);
for_each_task(p)
unuse_process(p->mm, entry, page);
}
lock_page(virt_to_page(swap_header));
- rw_swap_page_nolock(READ, SWP_ENTRY(type,0), (char *) swap_header, 1);
+ rw_swap_page_nolock(READ, SWP_ENTRY(type,0), (char *) swap_header);
if (!memcmp("SWAP-SPACE",swap_header->magic.magic,10))
swap_header_version = 1;
* handler for protocols to use and generic option handler.
*
*
- * Version: $Id: sock.c,v 1.109 2001/03/03 01:20:10 davem Exp $
+ * Version: $Id: sock.c,v 1.110 2001/04/20 20:46:19 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
/* In case it might be waiting for more memory. */
atomic_sub(skb->truesize, &sk->wmem_alloc);
- sk->write_space(sk);
+ if (!sk->use_write_queue)
+ sk->write_space(sk);
sock_put(sk);
}
/*
* Linux NET3: IP/IP protocol decoder.
*
- * Version: $Id: ipip.c,v 1.44 2001/03/29 06:29:09 davem Exp $
+ * Version: $Id: ipip.c,v 1.45 2001/04/19 22:32:55 davem Exp $
*
* Authors:
* Sam Lantinga (slouken@cs.ucdavis.edu) 02/01/95
# objects for the standalone - connection tracking / NAT
ip_conntrack-objs := ip_conntrack_standalone.o $(ip_nf_conntrack-objs)
-iptable_nat-objs := ip_nat_standalone.o ip_nat_rule.o $(ip_nf_nat-objs)
+iptable_nat-objs := ip_nat_standalone.o ip_nat_rule.o ip_nat_helper.o $(ip_nf_nat-objs)
# objects for backwards compatibility mode
ip_nf_compat-objs := ip_fw_compat.o ip_fw_compat_redir.o ip_fw_compat_masq.o $(ip_nf_conntrack-objs) $(ip_nf_nat-objs)
#include <linux/module.h>
#include <linux/netfilter.h>
#include <linux/ip.h>
+#include <linux/ctype.h>
#include <net/checksum.h>
#include <net/tcp.h>
DECLARE_LOCK(ip_ftp_lock);
struct module *ip_conntrack_ftp = THIS_MODULE;
-#define SERVER_STRING "227 Entering Passive Mode ("
-#define CLIENT_STRING "PORT "
+#define MAX_PORTS 8
+static int ports[MAX_PORTS];
+static int ports_c;
+#ifdef MODULE_PARM
+MODULE_PARM(ports, "1-" __MODULE_STRING(MAX_PORTS) "i");
+#endif
+
+static int loose = 0;
+MODULE_PARM(loose, "i");
#if 0
#define DEBUGP printk
#define DEBUGP(format, args...)
#endif
-static struct {
+static int try_rfc959(const char *, size_t, u_int32_t [], char);
+static int try_eprt(const char *, size_t, u_int32_t [], char);
+static int try_espv_response(const char *, size_t, u_int32_t [], char);
+
+static struct ftp_search {
+ enum ip_conntrack_dir dir;
const char *pattern;
size_t plen;
+ char skip;
char term;
-} search[2] = {
- [IP_CT_FTP_PORT] { CLIENT_STRING, sizeof(CLIENT_STRING) - 1, '\r' },
- [IP_CT_FTP_PASV] { SERVER_STRING, sizeof(SERVER_STRING) - 1, ')' }
+ enum ip_ct_ftp_type ftptype;
+ int (*getnum)(const char *, size_t, u_int32_t[], char);
+} search[] = {
+ {
+ IP_CT_DIR_ORIGINAL,
+ "PORT", sizeof("PORT") - 1, ' ', '\r',
+ IP_CT_FTP_PORT,
+ try_rfc959,
+ },
+ {
+ IP_CT_DIR_REPLY,
+ "227 ", sizeof("227 ") - 1, '(', ')',
+ IP_CT_FTP_PASV,
+ try_rfc959,
+ },
+ {
+ IP_CT_DIR_ORIGINAL,
+ "EPRT", sizeof("EPRT") - 1, ' ', '\r',
+ IP_CT_FTP_EPRT,
+ try_eprt,
+ },
+ {
+ IP_CT_DIR_REPLY,
+ "229 ", sizeof("229 ") - 1, '(', ')',
+ IP_CT_FTP_EPSV,
+ try_espv_response,
+ },
};
-/* Returns 0, or length of numbers */
-static int try_number(const char *data, size_t dlen, u_int32_t array[6],
- char term)
+static int try_number(const char *data, size_t dlen, u_int32_t array[],
+ int array_size, char sep, char term)
{
u_int32_t i, len;
+ memset(array, 0, sizeof(array[0])*array_size);
+
/* Keep data pointing at next char. */
- for (i = 0, len = 0; len < dlen; len++, data++) {
+ for (i = 0, len = 0; len < dlen && i < array_size; len++, data++) {
if (*data >= '0' && *data <= '9') {
array[i] = array[i]*10 + *data - '0';
}
- else if (*data == ',')
+ else if (*data == sep)
i++;
else {
/* Unexpected character; true if it's the
terminator and we're finished. */
- if (*data == term && i == 5)
+ if (*data == term && i == array_size - 1)
return len;
DEBUGP("Char %u (got %u nums) `%u' unexpected\n",
return 0;
}
}
+ DEBUGP("Failed to fill %u numbers separated by %c\n", array_size, sep);
return 0;
}
+/* Returns 0, or length of numbers: 192,168,1,1,5,6 */
+static int try_rfc959(const char *data, size_t dlen, u_int32_t array[6],
+ char term)
+{
+ return try_number(data, dlen, array, 6, ',', term);
+}
+
+/* Grab port: number up to delimiter */
+static int get_port(const char *data, int start, size_t dlen, char delim,
+ u_int32_t array[2])
+{
+ u_int16_t port = 0;
+ int i;
+
+ for (i = start; i < dlen; i++) {
+ /* Finished? */
+ if (data[i] == delim) {
+ if (port == 0)
+ break;
+ array[0] = port >> 8;
+ array[1] = port;
+ return i + 1;
+ }
+ else if (data[i] >= '0' && data[i] <= '9')
+ port = port*10 + data[i] - '0';
+ else /* Some other crap */
+ break;
+ }
+ return 0;
+}
+
+/* Returns 0, or length of numbers: |1|132.235.1.2|6275| */
+static int try_eprt(const char *data, size_t dlen, u_int32_t array[6],
+ char term)
+{
+ char delim;
+ int length;
+
+ /* First character is delimiter, then "1" for IPv4, then
+ delimiter again. */
+ if (dlen <= 3) return 0;
+ delim = data[0];
+ if (isdigit(delim) || delim < 33 || delim > 126
+ || data[1] != '1' || data[2] != delim)
+ return 0;
+
+ DEBUGP("EPRT: Got |1|!\n");
+ /* Now we have IP address. */
+ length = try_number(data + 3, dlen - 3, array, 4, '.', delim);
+ if (length == 0)
+ return 0;
+
+ DEBUGP("EPRT: Got IP address!\n");
+ /* Start offset includes initial "|1|", and trailing delimiter */
+ return get_port(data, 3 + length + 1, dlen, delim, array+4);
+}
+
+/* Returns 0, or length of numbers: |||6446| */
+static int try_espv_response(const char *data, size_t dlen, u_int32_t array[6],
+ char term)
+{
+ char delim;
+
+ /* Three delimiters. */
+ if (dlen <= 3) return 0;
+ delim = data[0];
+ if (isdigit(delim) || delim < 33 || delim > 126
+ || data[1] != delim || data[2] != delim)
+ return 0;
+
+ return get_port(data, 3, dlen, delim, array+4);
+}
+
/* Return 1 for match, 0 for accept, -1 for partial. */
static int find_pattern(const char *data, size_t dlen,
const char *pattern, size_t plen,
- char term,
+ char skip, char term,
unsigned int *numoff,
unsigned int *numlen,
- u_int32_t array[6])
+ u_int32_t array[6],
+ int (*getnum)(const char *, size_t, u_int32_t[], char))
{
+ size_t i;
+
+ DEBUGP("find_pattern `%s': dlen = %u\n", pattern, dlen);
if (dlen == 0)
return 0;
- if (dlen < plen) {
+ if (dlen <= plen) {
/* Short packet: try for partial? */
if (strnicmp(data, pattern, dlen) == 0)
return -1;
return 0;
}
- *numoff = plen;
- *numlen = try_number(data + plen, dlen - plen, array, term);
+ DEBUGP("Pattern matches!\n");
+ /* Now we've found the constant string, try to skip
+ to the 'skip' character */
+ for (i = plen; data[i] != skip; i++)
+ if (i == dlen - 1) return -1;
+
+ /* Skip over the last character */
+ i++;
+
+ DEBUGP("Skipped up to `%c'!\n", skip);
+
+ *numoff = i;
+ *numlen = getnum(data + i, dlen - i, array, term);
if (!*numlen)
return -1;
+ DEBUGP("Match succeeded!\n");
return 1;
}
unsigned int matchlen, matchoff;
struct ip_conntrack_tuple t, mask;
struct ip_ct_ftp *info = &ct->help.ct_ftp_info;
+ unsigned int i;
+ int found = 0;
/* Until there's been traffic both ways, don't look in packets. */
if (ctinfo != IP_CT_ESTABLISHED
return NF_ACCEPT;
}
- switch (find_pattern(data, datalen,
- search[dir].pattern,
- search[dir].plen, search[dir].term,
- &matchoff, &matchlen,
- array)) {
- case -1: /* partial */
+ /* Initialize IP array to expected address (it's not mentioned
+ in EPSV responses) */
+ array[0] = (ntohl(ct->tuplehash[dir].tuple.src.ip) >> 24) & 0xFF;
+ array[1] = (ntohl(ct->tuplehash[dir].tuple.src.ip) >> 16) & 0xFF;
+ array[2] = (ntohl(ct->tuplehash[dir].tuple.src.ip) >> 8) & 0xFF;
+ array[3] = ntohl(ct->tuplehash[dir].tuple.src.ip) & 0xFF;
+
+ for (i = 0; i < sizeof(search) / sizeof(search[0]); i++) {
+ if (search[i].dir != dir) continue;
+
+ found = find_pattern(data, datalen,
+ search[i].pattern,
+ search[i].plen,
+ search[i].skip,
+ search[i].term,
+ &matchoff, &matchlen,
+ array,
+ search[i].getnum);
+ if (found) break;
+ }
+ if (found == -1) {
/* We don't usually drop packets. After all, this is
connection tracking, not packet filtering.
However, it is neccessary for accurate tracking in
this case. */
if (net_ratelimit())
- printk("conntrack_ftp: partial %u+%u\n",
+ printk("conntrack_ftp: partial %s %u+%u\n",
+ search[i].pattern,
ntohl(tcph->seq), datalen);
return NF_DROP;
-
- case 0: /* no match */
- DEBUGP("ip_conntrack_ftp_help: no match\n");
+ } else if (found == 0) /* No match */
return NF_ACCEPT;
- }
DEBUGP("conntrack_ftp: match `%.*s' (%u bytes at %u)\n",
(int)matchlen, data + matchoff,
info->is_ftp = 1;
info->seq = ntohl(tcph->seq) + matchoff;
info->len = matchlen;
- info->ftptype = dir;
+ info->ftptype = search[i].ftptype;
info->port = array[4] << 8 | array[5];
} else {
/* Enrico Scholz's passive FTP to partially RNAT'd ftp
DEBUGP("conntrack_ftp: NOT RECORDING: %u,%u,%u,%u != %u.%u.%u.%u\n",
array[0], array[1], array[2], array[3],
NIPQUAD(ct->tuplehash[dir].tuple.src.ip));
+
+ /* Thanks to Cristiano Lincoln Mattos
+ <lincoln@cesar.org.br> for reporting this potential
+ problem (DMZ machines opening holes to internal
+ networks, or the packet filter itself). */
+ if (!loose) goto out;
}
t = ((struct ip_conntrack_tuple)
{ 0xFFFFFFFF, { 0xFFFF }, 0xFFFF }});
/* Ignore failure; should only happen with NAT */
ip_conntrack_expect_related(ct, &t, &mask, NULL);
+ out:
UNLOCK_BH(&ip_ftp_lock);
return NF_ACCEPT;
}
-static struct ip_conntrack_helper ftp = { { NULL, NULL },
- { { 0, { __constant_htons(21) } },
- { 0, { 0 }, IPPROTO_TCP } },
- { { 0, { 0xFFFF } },
- { 0, { 0 }, 0xFFFF } },
- help };
+static struct ip_conntrack_helper ftp[MAX_PORTS];
-static int __init init(void)
+/* Not __exit: called from init() */
+static void fini(void)
{
- return ip_conntrack_helper_register(&ftp);
+ int i;
+ for (i = 0; (i < MAX_PORTS) && ports[i]; i++) {
+ DEBUGP("ip_ct_ftp: unregistering helper for port %d\n",
+ ports[i]);
+ ip_conntrack_helper_unregister(&ftp[i]);
+ }
}
-static void __exit fini(void)
+static int __init init(void)
{
- ip_conntrack_helper_unregister(&ftp);
+ int i, ret;
+
+ if (ports[0] == 0)
+ ports[0] = 21;
+
+ for (i = 0; (i < MAX_PORTS) && ports[i]; i++) {
+ memset(&ftp[i], 0, sizeof(struct ip_conntrack_helper));
+ ftp[i].tuple.src.u.tcp.port = htons(ports[i]);
+ ftp[i].tuple.dst.protonum = IPPROTO_TCP;
+ ftp[i].mask.src.u.tcp.port = 0xFFFF;
+ ftp[i].mask.dst.protonum = 0xFFFF;
+ ftp[i].help = help;
+ DEBUGP("ip_ct_ftp: registering helper for port %d\n",
+ ports[i]);
+ ret = ip_conntrack_helper_register(&ftp[i]);
+
+ if (ret) {
+ fini();
+ return ret;
+ }
+ ports_c++;
+ }
+ return 0;
}
+
EXPORT_SYMBOL(ip_ftp_lock);
EXPORT_SYMBOL(ip_conntrack_ftp);
static struct list_head bysource[IP_NAT_HTABLE_SIZE];
static struct list_head byipsproto[IP_NAT_HTABLE_SIZE];
LIST_HEAD(protos);
-static LIST_HEAD(helpers);
extern struct ip_nat_protocol unknown_nat_protocol;
return NF_ACCEPT;
}
-int ip_nat_helper_register(struct ip_nat_helper *me)
-{
- int ret = 0;
-
- WRITE_LOCK(&ip_nat_lock);
- if (LIST_FIND(&helpers, helper_cmp, struct ip_nat_helper *,&me->tuple))
- ret = -EBUSY;
- else {
- list_prepend(&helpers, me);
- MOD_INC_USE_COUNT;
- }
- WRITE_UNLOCK(&ip_nat_lock);
-
- return ret;
-}
-
-static int
-kill_helper(const struct ip_conntrack *i, void *helper)
-{
- int ret;
-
- READ_LOCK(&ip_nat_lock);
- ret = (i->nat.info.helper == helper);
- READ_UNLOCK(&ip_nat_lock);
-
- return ret;
-}
-
-void ip_nat_helper_unregister(struct ip_nat_helper *me)
-{
- WRITE_LOCK(&ip_nat_lock);
- LIST_DELETE(&helpers, me);
- WRITE_UNLOCK(&ip_nat_lock);
-
- /* Someone could be still looking at the helper in a bh. */
- br_write_lock_bh(BR_NETPROTO_LOCK);
- br_write_unlock_bh(BR_NETPROTO_LOCK);
-
- /* Find anything using it, and umm, kill them. We can't turn
- them into normal connections: if we've adjusted SYNs, then
- they'll ackstorm. So we just drop it. We used to just
- bump module count when a connection existed, but that
- forces admins to gen fake RSTs or bounce box, either of
- which is just a long-winded way of making things
- worse. --RR */
- ip_ct_selective_cleanup(kill_helper, me);
-
- MOD_DEC_USE_COUNT;
-}
-
int __init ip_nat_init(void)
{
size_t i;
#include <linux/netfilter_ipv4/ip_nat.h>
#include <linux/netfilter_ipv4/ip_nat_helper.h>
#include <linux/netfilter_ipv4/ip_nat_rule.h>
-#include <linux/netfilter_ipv4/ip_nat_ftp.h>
#include <linux/netfilter_ipv4/ip_conntrack_ftp.h>
#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
#define DEBUGP(format, args...)
#endif
+#define MAX_PORTS 8
+static int ports[MAX_PORTS];
+static int ports_c = 0;
+
+#ifdef MODULE_PARM
+MODULE_PARM(ports, "1-" __MODULE_STRING(MAX_PORTS) "i");
+#endif
+
+DECLARE_LOCK_EXTERN(ip_ftp_lock);
+
/* FIXME: Time out? --RR */
static int
return 0;
}
- if (ftpinfo->ftptype == IP_CT_FTP_PORT) {
+ if (ftpinfo->ftptype == IP_CT_FTP_PORT
+ || ftpinfo->ftptype == IP_CT_FTP_EPRT) {
/* PORT command: make connection go to the client. */
newdstip = master->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip;
newsrcip = master->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip;
return 1;
}
-/* This is interesting. We simply use the port given us by the client
- or server. In practice it's extremely unlikely to clash; if it
- does, the rule won't be able to get a unique tuple and will drop
- the packets. */
static int
-mangle_packet(struct sk_buff **pskb,
- u_int32_t newip,
- u_int16_t port,
- unsigned int matchoff,
- unsigned int matchlen,
- struct ip_nat_ftp_info *this_way,
- struct ip_nat_ftp_info *other_way)
+mangle_rfc959_packet(struct sk_buff **pskb,
+ u_int32_t newip,
+ u_int16_t port,
+ unsigned int matchoff,
+ unsigned int matchlen,
+ struct ip_conntrack *ct,
+ enum ip_conntrack_info ctinfo)
{
- struct iphdr *iph = (*pskb)->nh.iph;
- struct tcphdr *tcph;
- unsigned char *data;
- unsigned int tcplen, newlen, newtcplen;
char buffer[sizeof("nnn,nnn,nnn,nnn,nnn,nnn")];
MUST_BE_LOCKED(&ip_ftp_lock);
+
sprintf(buffer, "%u,%u,%u,%u,%u,%u",
NIPQUAD(newip), port>>8, port&0xFF);
- tcplen = (*pskb)->len - iph->ihl * 4;
- newtcplen = tcplen - matchlen + strlen(buffer);
- newlen = iph->ihl*4 + newtcplen;
-
- /* So there I am, in the middle of my `netfilter-is-wonderful'
- talk in Sydney, and someone asks `What happens if you try
- to enlarge a 64k packet here?'. I think I said something
- eloquent like `fuck'. */
- if (newlen > 65535) {
- if (net_ratelimit())
- printk("nat_ftp cheat: %u.%u.%u.%u->%u.%u.%u.%u %u\n",
- NIPQUAD((*pskb)->nh.iph->saddr),
- NIPQUAD((*pskb)->nh.iph->daddr),
- (*pskb)->nh.iph->protocol);
- return 0;
- }
+ DEBUGP("calling ip_nat_mangle_tcp_packet\n");
- if (newlen > (*pskb)->len + skb_tailroom(*pskb)) {
- struct sk_buff *newskb;
- newskb = skb_copy_expand(*pskb, skb_headroom(*pskb),
- newlen - (*pskb)->len,
- GFP_ATOMIC);
- if (!newskb) {
- DEBUGP("ftp: oom\n");
- return 0;
- } else {
- kfree_skb(*pskb);
- *pskb = newskb;
- iph = (*pskb)->nh.iph;
- }
- }
+ return ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff,
+ matchlen, buffer, strlen(buffer));
+}
- tcph = (void *)iph + iph->ihl*4;
- data = (void *)tcph + tcph->doff*4;
-
- DEBUGP("Mapping `%.*s' [%u %u %u] to new `%s' [%u]\n",
- (int)matchlen, data+matchoff,
- data[matchoff], data[matchoff+1],
- matchlen, buffer, strlen(buffer));
-
- /* SYN adjust. If it's uninitialized, or this is after last
- correction, record it: we don't handle more than one
- adjustment in the window, but do deal with common case of a
- retransmit. */
- if (this_way->syn_offset_before == this_way->syn_offset_after
- || before(this_way->syn_correction_pos, ntohl(tcph->seq))) {
- this_way->syn_correction_pos = ntohl(tcph->seq);
- this_way->syn_offset_before = this_way->syn_offset_after;
- this_way->syn_offset_after = (int32_t)
- this_way->syn_offset_before + newlen - (*pskb)->len;
- }
+/* |1|132.235.1.2|6275| */
+static int
+mangle_eprt_packet(struct sk_buff **pskb,
+ u_int32_t newip,
+ u_int16_t port,
+ unsigned int matchoff,
+ unsigned int matchlen,
+ struct ip_conntrack *ct,
+ enum ip_conntrack_info ctinfo)
+{
+ char buffer[sizeof("|1|255.255.255.255|65535|")];
- /* Move post-replacement */
- memmove(data + matchoff + strlen(buffer),
- data + matchoff + matchlen,
- (*pskb)->tail - (data + matchoff + matchlen));
- memcpy(data + matchoff, buffer, strlen(buffer));
-
- /* Resize packet. */
- if (newlen > (*pskb)->len) {
- DEBUGP("ip_nat_ftp: Extending packet by %u to %u bytes\n",
- newlen - (*pskb)->len, newlen);
- skb_put(*pskb, newlen - (*pskb)->len);
- } else {
- DEBUGP("ip_nat_ftp: Shrinking packet from %u to %u bytes\n",
- (*pskb)->len, newlen);
- skb_trim(*pskb, newlen);
- }
+ MUST_BE_LOCKED(&ip_ftp_lock);
- /* Fix checksums */
- iph->tot_len = htons(newlen);
- (*pskb)->csum = csum_partial((char *)tcph + tcph->doff*4,
- newtcplen - tcph->doff*4, 0);
- tcph->check = 0;
- tcph->check = tcp_v4_check(tcph, newtcplen, iph->saddr, iph->daddr,
- csum_partial((char *)tcph, tcph->doff*4,
- (*pskb)->csum));
- ip_send_check(iph);
- return 1;
+ sprintf(buffer, "|1|%u.%u.%u.%u|%u|", NIPQUAD(newip), port);
+
+ DEBUGP("calling ip_nat_mangle_tcp_packet\n");
+
+ return ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff,
+ matchlen, buffer, strlen(buffer));
}
-/* Grrr... SACK. Fuck me even harder. Don't want to fix it on the
- fly, so blow it away. */
-static void
-delete_sack(struct sk_buff *skb, struct tcphdr *tcph)
+/* |1|132.235.1.2|6275| */
+static int
+mangle_epsv_packet(struct sk_buff **pskb,
+ u_int32_t newip,
+ u_int16_t port,
+ unsigned int matchoff,
+ unsigned int matchlen,
+ struct ip_conntrack *ct,
+ enum ip_conntrack_info ctinfo)
{
- unsigned int i;
- u_int8_t *opt = (u_int8_t *)tcph;
-
- DEBUGP("Seeking SACKPERM in SYN packet (doff = %u).\n",
- tcph->doff * 4);
- for (i = sizeof(struct tcphdr); i < tcph->doff * 4;) {
- DEBUGP("%u ", opt[i]);
- switch (opt[i]) {
- case TCPOPT_NOP:
- case TCPOPT_EOL:
- i++;
- break;
+ char buffer[sizeof("|||65535|")];
- case TCPOPT_SACK_PERM:
- goto found_opt;
+ MUST_BE_LOCKED(&ip_ftp_lock);
- default:
- /* Worst that can happen: it will take us over. */
- i += opt[i+1] ?: 1;
- }
- }
- DEBUGP("\n");
- return;
-
- found_opt:
- DEBUGP("\n");
- DEBUGP("Found SACKPERM at offset %u.\n", i);
-
- /* Must be within TCP header, and valid SACK perm. */
- if (i + opt[i+1] <= tcph->doff*4 && opt[i+1] == 2) {
- /* Replace with NOPs. */
- tcph->check
- = ip_nat_cheat_check(*((u_int16_t *)(opt + i))^0xFFFF,
- 0, tcph->check);
- opt[i] = opt[i+1] = 0;
- }
- else DEBUGP("Something wrong with SACK_PERM.\n");
+ sprintf(buffer, "|||%u|", port);
+
+ DEBUGP("calling ip_nat_mangle_tcp_packet\n");
+
+ return ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff,
+ matchlen, buffer, strlen(buffer));
}
+static int (*mangle[])(struct sk_buff **, u_int32_t, u_int16_t,
+ unsigned int,
+ unsigned int,
+ struct ip_conntrack *,
+ enum ip_conntrack_info)
+= { [IP_CT_FTP_PORT] mangle_rfc959_packet,
+ [IP_CT_FTP_PASV] mangle_rfc959_packet,
+ [IP_CT_FTP_EPRT] mangle_eprt_packet,
+ [IP_CT_FTP_EPSV] mangle_epsv_packet
+};
+
static int ftp_data_fixup(const struct ip_ct_ftp *ct_ftp_info,
struct ip_conntrack *ct,
- struct ip_nat_ftp_info *ftp,
unsigned int datalen,
- struct sk_buff **pskb)
+ struct sk_buff **pskb,
+ enum ip_conntrack_info ctinfo)
{
u_int32_t newip;
struct iphdr *iph = (*pskb)->nh.iph;
/* Change address inside packet to match way we're mapping
this connection. */
- if (ct_ftp_info->ftptype == IP_CT_FTP_PASV) {
- /* PASV response: must be where client thinks server
+ if (ct_ftp_info->ftptype == IP_CT_FTP_PASV
+ || ct_ftp_info->ftptype == IP_CT_FTP_EPSV) {
+ /* PASV/EPSV response: must be where client thinks server
is */
newip = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip;
/* Expect something from client->server */
if (port == 0)
return 0;
- if (!mangle_packet(pskb, newip, port,
- ct_ftp_info->seq - ntohl(tcph->seq),
- ct_ftp_info->len,
- &ftp[ct_ftp_info->ftptype],
- &ftp[!ct_ftp_info->ftptype]))
+ if (!mangle[ct_ftp_info->ftptype](pskb, newip, port,
+ ct_ftp_info->seq - ntohl(tcph->seq),
+ ct_ftp_info->len, ct, ctinfo))
return 0;
return 1;
{
struct iphdr *iph = (*pskb)->nh.iph;
struct tcphdr *tcph = (void *)iph + iph->ihl*4;
- u_int32_t newseq, newack;
unsigned int datalen;
int dir;
int score;
struct ip_ct_ftp *ct_ftp_info
= &ct->help.ct_ftp_info;
- struct ip_nat_ftp_info *ftp
- = &ct->nat.help.ftp_info[0];
/* Delete SACK_OK on initial TCP SYNs. */
if (tcph->syn && !tcph->ack)
- delete_sack(*pskb, tcph);
+ ip_nat_delete_sack(*pskb, tcph);
/* Only mangle things once: original direction in POST_ROUTING
and reply direction on PRE_ROUTING. */
UNLOCK_BH(&ip_ftp_lock);
return NF_DROP;
} else if (score == 2) {
- if (!ftp_data_fixup(ct_ftp_info, ct, ftp, datalen,
- pskb)) {
+ if (!ftp_data_fixup(ct_ftp_info, ct, datalen,
+ pskb, ctinfo)) {
UNLOCK_BH(&ip_ftp_lock);
return NF_DROP;
}
-
/* skb may have been reallocated */
iph = (*pskb)->nh.iph;
tcph = (void *)iph + iph->ihl*4;
}
}
- /* Sequence adjust */
- if (after(ntohl(tcph->seq), ftp[dir].syn_correction_pos))
- newseq = ntohl(tcph->seq) + ftp[dir].syn_offset_after;
- else
- newseq = ntohl(tcph->seq) + ftp[dir].syn_offset_before;
- newseq = htonl(newseq);
-
- /* Ack adjust: other dir sees offset seq numbers */
- if (after(ntohl(tcph->ack_seq) - ftp[!dir].syn_offset_before,
- ftp[!dir].syn_correction_pos))
- newack = ntohl(tcph->ack_seq) - ftp[!dir].syn_offset_after;
- else
- newack = ntohl(tcph->ack_seq) - ftp[!dir].syn_offset_before;
- newack = htonl(newack);
UNLOCK_BH(&ip_ftp_lock);
- tcph->check = ip_nat_cheat_check(~tcph->seq, newseq,
- ip_nat_cheat_check(~tcph->ack_seq,
- newack,
- tcph->check));
- tcph->seq = newseq;
- tcph->ack_seq = newack;
+ ip_nat_seq_adjust(*pskb, ct, ctinfo);
return NF_ACCEPT;
}
-static struct ip_nat_helper ftp = { { NULL, NULL },
- { { 0, { __constant_htons(21) } },
- { 0, { 0 }, IPPROTO_TCP } },
- { { 0, { 0xFFFF } },
- { 0, { 0 }, 0xFFFF } },
- help, "ftp" };
+static struct ip_nat_helper ftp[MAX_PORTS];
+static char ftp_names[MAX_PORTS][6];
+
static struct ip_nat_expect ftp_expect
= { { NULL, NULL }, ftp_nat_expected };
+/* Not __exit: called from init() */
+static void fini(void)
+{
+ int i;
+
+ for (i = 0; (i < MAX_PORTS) && ports[i]; i++) {
+ DEBUGP("ip_nat_ftp: unregistering port %d\n", ports[i]);
+ ip_nat_helper_unregister(&ftp[i]);
+ }
+
+ ip_nat_expect_unregister(&ftp_expect);
+}
+
static int __init init(void)
{
- int ret;
+ int i, ret;
+ char *tmpname;
ret = ip_nat_expect_register(&ftp_expect);
if (ret == 0) {
- ret = ip_nat_helper_register(&ftp);
+ if (ports[0] == 0)
+ ports[0] = 21;
- if (ret != 0)
- ip_nat_expect_unregister(&ftp_expect);
+ for (i = 0; (i < MAX_PORTS) && ports[i]; i++) {
+
+ memset(&ftp[i], 0, sizeof(struct ip_nat_helper));
+
+ ftp[i].tuple.dst.protonum = IPPROTO_TCP;
+ ftp[i].tuple.src.u.tcp.port = htons(ports[i]);
+ ftp[i].mask.dst.protonum = 0xFFFF;
+ ftp[i].mask.src.u.tcp.port = 0xFFFF;
+ ftp[i].help = help;
+
+ tmpname = &ftp_names[i][0];
+ sprintf(tmpname, "ftp%2.2d", i);
+ ftp[i].name = tmpname;
+
+ DEBUGP("ip_nat_ftp: Trying to register for port %d\n",
+ ports[i]);
+ ret = ip_nat_helper_register(&ftp[i]);
+
+ if (ret) {
+ printk("ip_nat_ftp: error registering helper for port %d\n", ports[i]);
+ fini();
+ return ret;
+ }
+ ports_c++;
+ }
+
+ } else {
+ ip_nat_expect_unregister(&ftp_expect);
}
return ret;
}
-static void __exit fini(void)
-{
- ip_nat_helper_unregister(&ftp);
- ip_nat_expect_unregister(&ftp_expect);
-}
-
module_init(init);
module_exit(fini);
--- /dev/null
+/* ip_nat_mangle.c - generic support functions for NAT helpers
+ *
+ * (C) 2000 by Harald Welte <laforge@gnumonks.org>
+ *
+ * distributed under the terms of GNU GPL
+ */
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/skbuff.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/brlock.h>
+#include <net/checksum.h>
+#include <net/icmp.h>
+#include <net/ip.h>
+#include <net/tcp.h>
+
+#define ASSERT_READ_LOCK(x) MUST_BE_READ_LOCKED(&ip_nat_lock)
+#define ASSERT_WRITE_LOCK(x) MUST_BE_WRITE_LOCKED(&ip_nat_lock)
+
+#include <linux/netfilter_ipv4/ip_nat.h>
+#include <linux/netfilter_ipv4/ip_nat_protocol.h>
+#include <linux/netfilter_ipv4/ip_nat_core.h>
+#include <linux/netfilter_ipv4/ip_nat_helper.h>
+#include <linux/netfilter_ipv4/listhelp.h>
+
+#if 0
+#define DEBUGP printk
+#define DUMP_OFFSET(x) printk("offset_before=%d, offset_after=%d, correction_pos=%u\n", x->offset_before, x->offset_after, x->correction_pos);
+#else
+#define DEBUGP(format, args...)
+#define DUMP_OFFSET(x)
+#endif
+
+DECLARE_LOCK(ip_nat_seqofs_lock);
+LIST_HEAD(helpers);
+
+static inline int
+ip_nat_resize_packet(struct sk_buff **skb,
+ struct ip_conntrack *ct,
+ enum ip_conntrack_info ctinfo,
+ int new_size)
+{
+ struct iphdr *iph;
+ struct tcphdr *tcph;
+ void *data;
+ int dir;
+ struct ip_nat_seq *this_way, *other_way;
+
+ DEBUGP("ip_nat_resize_packet: old_size = %u, new_size = %u\n",
+ (*skb)->len, new_size);
+
+ iph = (*skb)->nh.iph;
+ tcph = (void *)iph + iph->ihl*4;
+ data = (void *)tcph + tcph->doff*4;
+
+ dir = CTINFO2DIR(ctinfo);
+
+ this_way = &ct->nat.info.seq[dir];
+ other_way = &ct->nat.info.seq[!dir];
+
+ if (new_size > (*skb)->len + skb_tailroom(*skb)) {
+ struct sk_buff *newskb;
+ newskb = skb_copy_expand(*skb, skb_headroom(*skb),
+ new_size - (*skb)->len,
+ GFP_ATOMIC);
+
+ if (!newskb) {
+ printk("ip_nat_resize_packet: oom\n");
+ return 0;
+ } else {
+ kfree_skb(*skb);
+ *skb = newskb;
+ }
+ }
+
+ iph = (*skb)->nh.iph;
+ tcph = (void *)iph + iph->ihl*4;
+ data = (void *)tcph + tcph->doff*4;
+
+ DEBUGP("ip_nat_resize_packet: Seq_offset before: ");
+ DUMP_OFFSET(this_way);
+
+ LOCK_BH(&ip_nat_seqofs_lock);
+
+ /* SYN adjust. If it's uninitialized, of this is after last
+ * correction, record it: we don't handle more than one
+ * adjustment in the window, but do deal with common case of a
+ * retransmit */
+ if (this_way->offset_before == this_way->offset_after
+ || before(this_way->correction_pos, ntohl(tcph->seq))) {
+ this_way->correction_pos = ntohl(tcph->seq);
+ this_way->offset_before = this_way->offset_after;
+ this_way->offset_after = (int32_t)
+ this_way->offset_before + new_size - (*skb)->len;
+ }
+
+ UNLOCK_BH(&ip_nat_seqofs_lock);
+
+ DEBUGP("ip_nat_resize_packet: Seq_offset after: ");
+ DUMP_OFFSET(this_way);
+
+ return 1;
+}
+
+
+/* Generic function for mangling variable-length address changes inside
+ * NATed connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX command in FTP).
+ *
+ * Takes care about all the nasty sequence number changes, checksumming,
+ * skb enlargement, ...
+ *
+ * */
+int
+ip_nat_mangle_tcp_packet(struct sk_buff **skb,
+ struct ip_conntrack *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int match_offset,
+ unsigned int match_len,
+ char *rep_buffer,
+ unsigned int rep_len)
+{
+ struct iphdr *iph = (*skb)->nh.iph;
+ struct tcphdr *tcph;
+ unsigned char *data;
+ u_int32_t tcplen, newlen, newtcplen;
+
+ tcplen = (*skb)->len - iph->ihl*4;
+ newtcplen = tcplen - match_len + rep_len;
+ newlen = iph->ihl*4 + newtcplen;
+
+ if (newlen > 65535) {
+ if (net_ratelimit())
+ printk("ip_nat_mangle_tcp_packet: nat'ed packet "
+ "exceeds maximum packet size\n");
+ return 0;
+ }
+
+ if ((*skb)->len != newlen) {
+ if (!ip_nat_resize_packet(skb, ct, ctinfo, newlen)) {
+ printk("resize_packet failed!!\n");
+ return 0;
+ }
+ }
+
+ /* skb may be copied !! */
+ iph = (*skb)->nh.iph;
+ tcph = (void *)iph + iph->ihl*4;
+ data = (void *)tcph + tcph->doff*4;
+
+ /* move post-replacement */
+ memmove(data + match_offset + rep_len,
+ data + match_offset + match_len,
+ (*skb)->tail - (data + match_offset + match_len));
+
+ /* insert data from buffer */
+ memcpy(data + match_offset, rep_buffer, rep_len);
+
+ /* update skb info */
+ if (newlen > (*skb)->len) {
+ DEBUGP("ip_nat_mangle_tcp_packet: Extending packet by "
+ "%u to %u bytes\n", newlen - (*skb)->len, newlen);
+ skb_put(*skb, newlen - (*skb)->len);
+ } else {
+ DEBUGP("ip_nat_mangle_tcp_packet: Shrinking packet from "
+ "%u to %u bytes\n", (*skb)->len, newlen);
+ skb_trim(*skb, newlen);
+ }
+
+ /* fix checksum information */
+
+ iph->tot_len = htons(newlen);
+ (*skb)->csum = csum_partial((char *)tcph + tcph->doff*4,
+ newtcplen - tcph->doff*4, 0);
+
+ tcph->check = 0;
+ tcph->check = tcp_v4_check(tcph, newtcplen, iph->saddr, iph->daddr,
+ csum_partial((char *)tcph, tcph->doff*4,
+ (*skb)->csum));
+ ip_send_check(iph);
+
+ return 1;
+}
+
+/* TCP sequence number adjustment */
+int
+ip_nat_seq_adjust(struct sk_buff *skb,
+ struct ip_conntrack *ct,
+ enum ip_conntrack_info ctinfo)
+{
+ struct iphdr *iph;
+ struct tcphdr *tcph;
+ int dir, newseq, newack;
+ struct ip_nat_seq *this_way, *other_way;
+
+ iph = skb->nh.iph;
+ tcph = (void *)iph + iph->ihl*4;
+
+ dir = CTINFO2DIR(ctinfo);
+
+ this_way = &ct->nat.info.seq[dir];
+ other_way = &ct->nat.info.seq[!dir];
+
+ if (after(ntohl(tcph->seq), this_way->correction_pos))
+ newseq = ntohl(tcph->seq) + this_way->offset_after;
+ else
+ newseq = ntohl(tcph->seq) + this_way->offset_before;
+ newseq = htonl(newseq);
+
+ if (after(ntohl(tcph->ack_seq) - other_way->offset_before,
+ other_way->correction_pos))
+ newack = ntohl(tcph->ack_seq) - other_way->offset_after;
+ else
+ newack = ntohl(tcph->ack_seq) - other_way->offset_before;
+ newack = htonl(newack);
+
+ tcph->check = ip_nat_cheat_check(~tcph->seq, newseq,
+ ip_nat_cheat_check(~tcph->ack_seq,
+ newack,
+ tcph->check));
+
+ DEBUGP("Adjusting sequence number from %u->%u, ack from %u->%u\n",
+ ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq),
+ ntohl(newack));
+
+ tcph->seq = newseq;
+ tcph->ack_seq = newack;
+
+ return 0;
+}
+
+/* Grrr... SACK. Fuck me even harder. Don't want to fix it on the
+ fly, so blow it away. */
+void
+ip_nat_delete_sack(struct sk_buff *skb, struct tcphdr *tcph)
+{
+ unsigned int i;
+ u_int8_t *opt = (u_int8_t *)tcph;
+
+ DEBUGP("Seeking SACKPERM in SYN packet (doff = %u).\n",
+ tcph->doff * 4);
+ for (i = sizeof(struct tcphdr); i < tcph->doff * 4;) {
+ DEBUGP("%u ", opt[i]);
+ switch (opt[i]) {
+ case TCPOPT_NOP:
+ case TCPOPT_EOL:
+ i++;
+ break;
+
+ case TCPOPT_SACK_PERM:
+ goto found_opt;
+
+ default:
+ /* Worst that can happen: it will take us over. */
+ i += opt[i+1] ?: 1;
+ }
+ }
+ DEBUGP("\n");
+ return;
+
+ found_opt:
+ DEBUGP("\n");
+ DEBUGP("Found SACKPERM at offset %u.\n", i);
+
+ /* Must be within TCP header, and valid SACK perm. */
+ if (i + opt[i+1] <= tcph->doff*4 && opt[i+1] == 2) {
+ /* Replace with NOPs. */
+ tcph->check
+ = ip_nat_cheat_check(*((u_int16_t *)(opt + i))^0xFFFF,
+ 0, tcph->check);
+ opt[i] = opt[i+1] = 0;
+ }
+ else DEBUGP("Something wrong with SACK_PERM.\n");
+}
+
+static inline int
+helper_cmp(const struct ip_nat_helper *helper,
+ const struct ip_conntrack_tuple *tuple)
+{
+ return ip_ct_tuple_mask_cmp(tuple, &helper->tuple, &helper->mask);
+}
+
+int ip_nat_helper_register(struct ip_nat_helper *me)
+{
+ int ret = 0;
+
+ WRITE_LOCK(&ip_nat_lock);
+ if (LIST_FIND(&helpers, helper_cmp, struct ip_nat_helper *,&me->tuple))
+ ret = -EBUSY;
+ else {
+ list_prepend(&helpers, me);
+ MOD_INC_USE_COUNT;
+ }
+ WRITE_UNLOCK(&ip_nat_lock);
+
+ return ret;
+}
+
+static int
+kill_helper(const struct ip_conntrack *i, void *helper)
+{
+ int ret;
+
+ READ_LOCK(&ip_nat_lock);
+ ret = (i->nat.info.helper == helper);
+ READ_UNLOCK(&ip_nat_lock);
+
+ return ret;
+}
+
+void ip_nat_helper_unregister(struct ip_nat_helper *me)
+{
+ WRITE_LOCK(&ip_nat_lock);
+ LIST_DELETE(&helpers, me);
+ WRITE_UNLOCK(&ip_nat_lock);
+
+ /* Someone could be still looking at the helper in a bh. */
+ br_write_lock_bh(BR_NETPROTO_LOCK);
+ br_write_unlock_bh(BR_NETPROTO_LOCK);
+
+ /* Find anything using it, and umm, kill them. We can't turn
+ them into normal connections: if we've adjusted SYNs, then
+ they'll ackstorm. So we just drop it. We used to just
+ bump module count when a connection existed, but that
+ forces admins to gen fake RSTs or bounce box, either of
+ which is just a long-winded way of making things
+ worse. --RR */
+ ip_ct_selective_cleanup(kill_helper, me);
+
+ MOD_DEC_USE_COUNT;
+}
EXPORT_SYMBOL(ip_nat_expect_register);
EXPORT_SYMBOL(ip_nat_expect_unregister);
EXPORT_SYMBOL(ip_nat_cheat_check);
+EXPORT_SYMBOL(ip_nat_mangle_tcp_packet);
+EXPORT_SYMBOL(ip_nat_seq_adjust);
+EXPORT_SYMBOL(ip_nat_delete_sack);
*
* Implementation of the Transmission Control Protocol(TCP).
*
- * Version: $Id: tcp.c,v 1.201 2001/03/06 22:42:56 davem Exp $
+ * Version: $Id: tcp.c,v 1.202 2001/04/20 20:46:19 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
}
/*
- * TCP socket write_space callback. Not used.
+ * TCP socket write_space callback.
*/
void tcp_write_space(struct sock *sk)
{
+ struct socket *sock = sk->socket;
+
+ if (tcp_wspace(sk) >= tcp_min_write_space(sk) && sock) {
+ clear_bit(SOCK_NOSPACE, &sock->flags);
+
+ if (sk->sleep && waitqueue_active(sk->sleep))
+ wake_up_interruptible(sk->sleep);
+
+ if (sock->fasync_list && !(sk->shutdown&SEND_SHUTDOWN))
+ sock_wake_async(sock, 2, POLL_OUT);
+ }
}
int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
*
* Implementation of the Transmission Control Protocol(TCP).
*
- * Version: $Id: tcp_input.c,v 1.226 2001/03/07 22:00:57 davem Exp $
+ * Version: $Id: tcp_input.c,v 1.228 2001/04/20 20:46:19 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
tp->snd_cwnd_stamp = tcp_time_stamp;
}
- /* Wakeup users. */
- if (tcp_wspace(sk) >= tcp_min_write_space(sk)) {
- struct socket *sock = sk->socket;
-
- clear_bit(SOCK_NOSPACE, &sock->flags);
-
- if (sk->sleep && waitqueue_active(sk->sleep))
- wake_up_interruptible(sk->sleep);
-
- if (sock->fasync_list && !(sk->shutdown&SEND_SHUTDOWN))
- sock_wake_async(sock, 2, POLL_OUT);
-
- /* Satisfy those who hook write_space() callback. */
- if (sk->write_space != tcp_write_space)
- sk->write_space(sk);
- }
+ sk->write_space(sk);
}
static inline void tcp_check_space(struct sock *sk)
tp->copied_seq = tp->rcv_nxt;
mb();
tcp_set_state(sk, TCP_ESTABLISHED);
+ sk->state_change(sk);
/* Note, that this wakeup is only for marginal
* crossed SYN case. Passively open sockets
* and sk->socket == NULL.
*/
if (sk->socket) {
- sk->state_change(sk);
sk_wake_async(sk,0,POLL_OUT);
}
*
* Implementation of the Transmission Control Protocol(TCP).
*
- * Version: $Id: tcp_ipv4.c,v 1.228 2001/04/06 18:41:36 davem Exp $
+ * Version: $Id: tcp_ipv4.c,v 1.229 2001/04/20 20:46:19 davem Exp $
*
* IPv4 specific functions
*
sk->state = TCP_CLOSE;
- sk->write_space = tcp_write_space;
+ sk->write_space = tcp_write_space;
+ sk->use_write_queue = 1;
sk->tp_pinfo.af_tcp.af_specific = &ipv4_specific;
return 0;
case NDISC_NEIGHBOUR_ADVERTISEMENT:
- if ((ipv6_addr_type(saddr)&IPV6_ADDR_MULTICAST) &&
+ if ((ipv6_addr_type(daddr)&IPV6_ADDR_MULTICAST) &&
msg->icmph.icmp6_solicited) {
ND_PRINTK0("NDISC: solicited NA is multicasted\n");
return 0;
* Pedro Roque <roque@di.fc.ul.pt>
* Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
*
- * $Id: sit.c,v 1.49 2001/03/19 20:31:17 davem Exp $
+ * $Id: sit.c,v 1.50 2001/04/19 22:32:55 davem Exp $
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
- * $Id: tcp_ipv6.c,v 1.135 2001/04/06 18:41:36 davem Exp $
+ * $Id: tcp_ipv6.c,v 1.136 2001/04/20 20:46:19 davem Exp $
*
* Based on:
* linux/net/ipv4/tcp.c
sk->tp_pinfo.af_tcp.af_specific = &ipv6_specific;
sk->write_space = tcp_write_space;
+ sk->use_write_queue = 1;
sk->sndbuf = sysctl_tcp_wmem[1];
sk->rcvbuf = sysctl_tcp_rmem[1];
#ifdef CONFIG_FILTER
EXPORT_SYMBOL(sk_run_filter);
+EXPORT_SYMBOL(sk_chk_filter);
#endif
EXPORT_SYMBOL(neigh_table_init);
unsigned long count, loff_t *ppos);
static ssize_t sock_writev(struct file *file, const struct iovec *vector,
unsigned long count, loff_t *ppos);
-static ssize_t sock_writepage(struct file *file, struct page *page,
- int offset, size_t size, loff_t *ppos, int more);
+static ssize_t sock_sendpage(struct file *file, struct page *page,
+ int offset, size_t size, loff_t *ppos, int more);
/*
fasync: sock_fasync,
readv: sock_readv,
writev: sock_writev,
- writepage: sock_writepage
+ sendpage: sock_sendpage
};
/*
return sock_sendmsg(sock, &msg, size);
}
-ssize_t sock_writepage(struct file *file, struct page *page,
- int offset, size_t size, loff_t *ppos, int more)
+ssize_t sock_sendpage(struct file *file, struct page *page,
+ int offset, size_t size, loff_t *ppos, int more)
{
struct socket *sock;
int flags;
my $output_mode = "man";
my %highlights = %highlights_man;
my $blankline = $blankline_man;
-my $modulename = "API Documentation";
+my $modulename = "Kernel API";
my $function_only = 0;
# Essentially these are globals
my ($parameter, $section);
my $count;
- print ".TH \"$args{'module'}\" 4 \"$args{'function'}\" \"25 May 1998\" \"API Manual\" LINUX\n";
+ print ".TH \"$args{'module'}\" 9 \"$args{'function'}\" \"April 2001\" \"API Manual\" LINUX\n";
print ".SH NAME\n";
print $args{'function'}." \\- ".$args{'purpose'}."\n";
my ($parameter, $section);
my $count;
- print ".TH \"$args{'module'}\" 4 \"$args{'module'}\" \"25 May 1998\" \"API Manual\" LINUX\n";
+ print ".TH \"$args{'module'}\" 9 \"$args{'module'}\" \"April 2001\" \"API Manual\" LINUX\n";
foreach $section (@{$args{'sectionlist'}}) {
print ".SH \"$section\"\n";