S: Bellevue, Washington 98007
S: USA
+N: Christopher L. Cheney
+E: ccheney@debian.org
+E: ccheney@cheney.cx
+W: http://www.cheney.cx
+P: 1024D/8E384AF2 2D31 1927 87D7 1F24 9FF9 1BC5 D106 5AB3 8E38 4AF2
+D: Vista Imaging usb webcam driver
+S: 314 Prince of Wales
+S: Conroe, TX 77304
+S: USA
+
N: Stuart Cheshire
E: cheshire@cs.stanford.edu
D: Author of Starmode Radio IP (STRIP) driver
S: Germany
N: Mark W. McClelland
-E: mwm@i.am
+E: mmcclell@bigfoot.com
E: mark@alpha.dyndns.org
W: http://alpha.dyndns.org/ov511/
+P: 1024D/357375CC 317C 58AC 1B39 2AB0 AB96 EB38 0B6F 731F 3573 75CC
D: OV511 driver
S: (address available on request)
S: USA
S: Fullarton 5063
S: South Australia
+N. Wolfgang Muees
+E: wmues@nexgo.de
+D: Auerswald USB driver
+
N: Ian A. Murdock
E: imurdock@gnu.ai.mit.edu
D: Creator of Debian distribution
The module will be called rio500.o. If you want to compile it as
a module, say M here and read <file:Documentation/modules.txt>.
+USB Auerswald ISDN device support
+CONFIG_USB_AUERSWALD
+ Say Y here if you want to connect an Auerswald USB ISDN Device
+ to your computer's USB port.
+
+ This code is also available as a module ( = code which can be
+ inserted in and removed from the running kernel whenever you want).
+ The module will be called auerswald.o. If you want to compile it as
+ a module, say M here and read <file:Documentation/modules.txt>.
+
D-Link DSB-R100 FM radio support
CONFIG_USB_DSBR
Say Y here if you want to connect this type of radio to your
--- /dev/null
+ Auerswald USB kernel driver
+ ===========================
+
+What is it? What can I do with it?
+==================================
+The auerswald USB kernel driver connects your linux 2.4.x
+system to the auerswald usb-enabled devices.
+
+There are two types of auerswald usb devices:
+a) small PBX systems (ISDN)
+b) COMfort system telephones (ISDN)
+
+The driver installation creates the devices
+/dev/usb/auer0..15. These devices carry a vendor-
+specific protocol. You may run all auerswald java
+software on it. The java software needs a native
+library "libAuerUsbJNINative.so" installed on
+your system. This library is available from
+auerswald and shipped as part of the java software.
+
+You may create the devices with:
+ mknod -m 666 /dev/usb/auer0 c 180 80
+ ...
+ mknod -m 666 /dev/usb/auer15 c 180 95
+
+Future plans
+============
+- Connection to ISDN4LINUX (the hisax interface)
+
+The maintainer of this driver is wmues@nexgo.de
USB OV511 DRIVER
P: Mark McClelland
-M: mwm@i.am
+M: mmcclell@bigfoot.com
L: linux-usb-users@lists.sourceforge.net
L: linux-usb-devel@lists.sourceforge.net
W: http://alpha.dyndns.org/ov511/
L: linux-usb-devel@lists.sourceforge.net
S: Supported
+USB AUERSWALD DRIVER
+P: Wolfgang Muees
+M: wmues@nexgo.de
+L: linux-usb-users@lists.sourceforge.net
+L: linux-usb-devel@lists.sourceforge.net
+S: Maintained
+
USB SERIAL EMPEG EMPEG-CAR MARK I/II DRIVER
P: Gary Brubaker
M: xavyer@ix.netcom.com
VERSION = 2
PATCHLEVEL = 5
SUBLEVEL = 2
-EXTRAVERSION =-pre9
+EXTRAVERSION =-pre10
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
+++ /dev/null
-/*
- * linux/arch/arm/kernel/debug-armv.S
- *
- * Copyright (C) 1994-1999 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * 32-bit debugging code
- */
-#include <linux/config.h>
-#include <linux/linkage.h>
-#include <asm/hardware.h>
-
- .text
-
-/*
- * Some debugging routines (useful if you've got MM problems and
- * printk isn't working). For DEBUGGING ONLY!!! Do not leave
- * references to these in a production kernel!
- */
-#if defined(CONFIG_ARCH_RPC)
- .macro addruart,rx
- mov \rx, #0xe0000000
- orr \rx, \rx, #0x00010000
- orr \rx, \rx, #0x00000fe0
- .endm
-
- .macro senduart,rd,rx
- strb \rd, [\rx]
- .endm
-
- .macro busyuart,rd,rx
-1001: ldrb \rd, [\rx, #0x14]
- and \rd, \rd, #0x60
- teq \rd, #0x60
- bne 1001b
- .endm
-
- .macro waituart,rd,rx
-1001: ldrb \rd, [\rx, #0x18]
- tst \rd, #0x10
- beq 1001b
- .endm
-
-#elif defined(CONFIG_ARCH_EBSA110)
- .macro addruart,rx
- mov \rx, #0xf0000000
- orr \rx, \rx, #0x00000be0
- .endm
-
- .macro senduart,rd,rx
- strb \rd, [\rx]
- .endm
-
- .macro busyuart,rd,rx
-1002: ldrb \rd, [\rx, #0x14]
- and \rd, \rd, #0x60
- teq \rd, #0x60
- bne 1002b
- .endm
-
- .macro waituart,rd,rx
-1001: ldrb \rd, [\rx, #0x18]
- tst \rd, #0x10
- beq 1001b
- .endm
-
-#elif defined(CONFIG_ARCH_SHARK)
- .macro addruart,rx
- mov \rx, #0xe0000000
- orr \rx, \rx, #0x000003f8
- .endm
-
- .macro senduart,rd,rx
- strb \rd, [\rx]
- .endm
-
- .macro busyuart,rd,rx
- mov \rd, #0
-1001: add \rd, \rd, #1
- teq \rd, #0x10000
- bne 1001b
- .endm
-
- .macro waituart,rd,rx
- .endm
-
-#elif defined(CONFIG_FOOTBRIDGE)
-
-#include <asm/hardware/dec21285.h>
-
-#ifndef CONFIG_DEBUG_DC21285_PORT
- /* For NetWinder debugging */
- .macro addruart,rx
- mrc p15, 0, \rx, c1, c0
- tst \rx, #1 @ MMU enabled?
- moveq \rx, #0x7c000000 @ physical
- movne \rx, #0xff000000 @ virtual
- orr \rx, \rx, #0x000003f8
- .endm
-
- .macro senduart,rd,rx
- strb \rd, [\rx]
- .endm
-
- .macro busyuart,rd,rx
-1002: ldrb \rd, [\rx, #0x5]
- and \rd, \rd, #0x60
- teq \rd, #0x60
- bne 1002b
- .endm
-
- .macro waituart,rd,rx
-1001: ldrb \rd, [\rx, #0x6]
- tst \rd, #0x10
- beq 1001b
- .endm
-#else
- /* For EBSA285 debugging */
- .equ dc21285_high, ARMCSR_BASE & 0xff000000
- .equ dc21285_low, ARMCSR_BASE & 0x00ffffff
-
- .macro addruart,rx
- mov \rx, #dc21285_high
- .if dc21285_low
- orr \rx, \rx, #dc21285_low
- .endif
- .endm
-
- .macro senduart,rd,rx
- str \rd, [\rx, #0x160] @ UARTDR
- .endm
-
- .macro busyuart,rd,rx
-1001: ldr \rd, [\rx, #0x178] @ UARTFLG
- tst \rd, #1 << 3
- bne 1001b
- .endm
-
- .macro waituart,rd,rx
- .endm
-#endif
-#elif defined(CONFIG_ARCH_FTVPCI)
- .macro addruart,rx
- mrc p15, 0, \rx, c1, c0
- tst \rx, #1 @ MMU enabled?
- movne \rx, #0xe0000000
- moveq \rx, #0x10000000
- .endm
-
- .macro senduart,rd,rx
- str \rd, [\rx, #0xc]
- .endm
-
- .macro busyuart,rd,rx
-1001: ldr \rd, [\rx, #0x4]
- tst \rd, #1 << 2
- beq 1001b
- .endm
-
- .macro waituart,rd,rx
- .endm
-
-#elif defined(CONFIG_ARCH_SA1100)
- .macro addruart,rx
- mrc p15, 0, \rx, c1, c0
- tst \rx, #1 @ MMU enabled?
- moveq \rx, #0x80000000 @ physical base address
- movne \rx, #0xf8000000 @ virtual address
- @add \rx, \rx, #0x00050000 @ Ser3
- add \rx, \rx, #0x00010000 @ Ser1
- .endm
-
- .macro senduart,rd,rx
- str \rd, [\rx, #0x14] @ UTDR
- .endm
-
- .macro waituart,rd,rx
-1001: ldr \rd, [\rx, #0x20] @ UTSR1
- tst \rd, #1 << 2 @ UTSR1_TNF
- beq 1001b
- .endm
-
- .macro busyuart,rd,rx
-1001: ldr \rd, [\rx, #0x20] @ UTSR1
- tst \rd, #1 << 0 @ UTSR1_TBY
- bne 1001b
- .endm
-
-#elif defined(CONFIG_ARCH_CLPS7500)
- .macro addruart,rx
- mov \rx, #0xe0000000
- orr \rx, \rx, #0x00010000
- orr \rx, \rx, #0x00000be0
- .endm
-
- .macro senduart,rd,rx
- strb \rd, [\rx]
- .endm
-
- .macro busyuart,rd,rx
- .endm
-
- .macro waituart,rd,rx
-1001: ldrb \rd, [\rx, #0x14]
- tst \rd, #0x20
- beq 1001b
- .endm
-
-#elif defined(CONFIG_ARCH_L7200)
-
- .equ io_virt, IO_BASE
- .equ io_phys, IO_START
-
- .macro addruart,rx
- mrc p15, 0, \rx, c1, c0
- tst \rx, #1 @ MMU enabled?
- moveq \rx, #io_phys @ physical base address
- movne \rx, #io_virt @ virtual address
- add \rx, \rx, #0x00044000 @ UART1
-@ add \rx, \rx, #0x00045000 @ UART2
- .endm
-
- .macro senduart,rd,rx
- str \rd, [\rx, #0x0] @ UARTDR
- .endm
-
- .macro waituart,rd,rx
-1001: ldr \rd, [\rx, #0x18] @ UARTFLG
- tst \rd, #1 << 5 @ UARTFLGUTXFF - 1 when full
- bne 1001b
- .endm
-
- .macro busyuart,rd,rx
-1001: ldr \rd, [\rx, #0x18] @ UARTFLG
- tst \rd, #1 << 3 @ UARTFLGUBUSY - 1 when busy
- bne 1001b
- .endm
-
-#elif defined(CONFIG_ARCH_INTEGRATOR)
-
-#include <asm/hardware/serial_amba.h>
-
- .macro addruart,rx
- mrc p15, 0, \rx, c1, c0
- tst \rx, #1 @ MMU enabled?
- moveq \rx, #0x16000000 @ physical base address
- movne \rx, #0xf0000000 @ virtual base
- addne \rx, \rx, #0x16000000 >> 4
- .endm
-
- .macro senduart,rd,rx
- strb \rd, [\rx, #AMBA_UARTDR]
- .endm
-
- .macro waituart,rd,rx
-1001: ldr \rd, [\rx, #0x18] @ UARTFLG
- tst \rd, #1 << 5 @ UARTFLGUTXFF - 1 when full
- bne 1001b
- .endm
-
- .macro busyuart,rd,rx
-1001: ldr \rd, [\rx, #0x18] @ UARTFLG
- tst \rd, #1 << 3 @ UARTFLGUBUSY - 1 when busy
- bne 1001b
- .endm
-
-#elif defined(CONFIG_ARCH_CLPS711X)
-
-#include <asm/hardware/clps7111.h>
-
- .macro addruart,rx
- mrc p15, 0, \rx, c1, c0
- tst \rx, #1 @ MMU enabled?
- moveq \rx, #CLPS7111_PHYS_BASE
- movne \rx, #CLPS7111_VIRT_BASE
-#ifndef CONFIG_DEBUG_CLPS711X_UART2
- add \rx, \rx, #0x0000 @ UART1
-#else
- add \rx, \rx, #0x1000 @ UART2
-#endif
- .endm
-
- .macro senduart,rd,rx
- str \rd, [\rx, #0x0480] @ UARTDR
- .endm
-
- .macro waituart,rd,rx
-1001: ldr \rd, [\rx, #0x0140] @ SYSFLGx
- tst \rd, #1 << 11 @ UBUSYx
- bne 1001b
- .endm
-
- .macro busyuart,rd,rx
- tst \rx, #0x1000 @ UART2 does not have CTS here
- bne 1002f
-1001: ldr \rd, [\rx, #0x0140] @ SYSFLGx
- tst \rd, #1 << 8 @ CTS
- bne 1001b
-1002:
- .endm
-
-#elif defined(CONFIG_ARCH_ANAKIN)
-
-//#//include <asm/arch/serial_reg.h>
-
- .macro addruart,rx
- mrc p15, 0, \rx, c1, c0
- tst \rx, #1 @ MMU enabled?
- moveq \rx, #IO_START
- movne \rx, #IO_BASE
- add \rx, \rx, #UART0
- .endm
-
- .macro senduart,rd,rx
- str \rd, [\rx, #0x14] @ tx
- ldr \rd, [\rx, #0x18]
- orr \rd, \rd, #SENDREQUEST
- str \rd, [\rx, #0x18]
- .endm
-
- .macro waituart,rd,rx
-1001: ldr \rd, [\rx, #0x10]
- tst \rd, #TXEMPTY
- beq 1001b
- .endm
-
- .macro busyuart,rd,rx
-1001: ldr \rd, [\rx, #0x10]
- tst \rd, #CTS
- bne 1001b
- .endm
-
-#elif defined(CONFIG_ARCH_CAMELOT)
-
-#include <asm/arch/excalibur.h>
-#define UART00_TYPE
-#include <asm/arch/uart00.h>
-
- .macro addruart,rx
- mrc p15, 0, \rx, c1, c0
- tst \rx, #1 @ MMU enabled?
- ldr \rx, =EXC_UART00_BASE @ physical base address
- orrne \rx, \rx, #0xff000000 @ virtual base
- orrne \rx, \rx, #0x00f00000
- .endm
-
- .macro senduart,rd,rx
- str \rd, [\rx, #UART_TD(0)]
- .endm
-
- .macro waituart,rd,rx
-1001: ldr \rd, [\rx, #UART_TSR(0)]
- and \rd, \rd, #UART_TSR_TX_LEVEL_MSK
- cmp \rd, #15
- beq 1001b
- .endm
-
- .macro busyuart,rd,rx
-1001: ldr \rd, [\rx, #UART_TSR(0)]
- ands \rd, \rd, #UART_TSR_TX_LEVEL_MSK
- bne 1001b
- .endm
-
-#else
-#error Unknown architecture
-#endif
-
-/*
- * Useful debugging routines
- */
-ENTRY(printhex8)
- mov r1, #8
- b printhex
-
-ENTRY(printhex4)
- mov r1, #4
- b printhex
-
-ENTRY(printhex2)
- mov r1, #2
-printhex: adr r2, hexbuf
- add r3, r2, r1
- mov r1, #0
- strb r1, [r3]
-1: and r1, r0, #15
- mov r0, r0, lsr #4
- cmp r1, #10
- addlt r1, r1, #'0'
- addge r1, r1, #'a' - 10
- strb r1, [r3, #-1]!
- teq r3, r2
- bne 1b
- mov r0, r2
- b printascii
-
- .ltorg
-
-ENTRY(printascii)
- addruart r3
- b 2f
-1: waituart r2, r3
- senduart r1, r3
- busyuart r2, r3
- teq r1, #'\n'
- moveq r1, #'\r'
- beq 1b
-2: teq r0, #0
- ldrneb r1, [r0], #1
- teqne r1, #0
- bne 1b
- mov pc, lr
-
-ENTRY(printch)
- addruart r3
- mov r1, r0
- mov r0, #0
- b 1b
-
-hexbuf: .space 16
--- /dev/null
+/*
+ * linux/arch/arm/kernel/debug-armv.S
+ *
+ * Copyright (C) 1994-1999 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * 32-bit debugging code
+ */
+#include <linux/config.h>
+#include <linux/linkage.h>
+#include <asm/hardware.h>
+
+ .text
+
+/*
+ * Some debugging routines (useful if you've got MM problems and
+ * printk isn't working). For DEBUGGING ONLY!!! Do not leave
+ * references to these in a production kernel!
+ */
+#if defined(CONFIG_ARCH_RPC)
+ .macro addruart,rx
+ mov \rx, #0xe0000000
+ orr \rx, \rx, #0x00010000
+ orr \rx, \rx, #0x00000fe0
+ .endm
+
+ .macro senduart,rd,rx
+ strb \rd, [\rx]
+ .endm
+
+ .macro busyuart,rd,rx
+1001: ldrb \rd, [\rx, #0x14]
+ and \rd, \rd, #0x60
+ teq \rd, #0x60
+ bne 1001b
+ .endm
+
+ .macro waituart,rd,rx
+1001: ldrb \rd, [\rx, #0x18]
+ tst \rd, #0x10
+ beq 1001b
+ .endm
+
+#elif defined(CONFIG_ARCH_EBSA110)
+ .macro addruart,rx
+ mov \rx, #0xf0000000
+ orr \rx, \rx, #0x00000be0
+ .endm
+
+ .macro senduart,rd,rx
+ strb \rd, [\rx]
+ .endm
+
+ .macro busyuart,rd,rx
+1002: ldrb \rd, [\rx, #0x14]
+ and \rd, \rd, #0x60
+ teq \rd, #0x60
+ bne 1002b
+ .endm
+
+ .macro waituart,rd,rx
+1001: ldrb \rd, [\rx, #0x18]
+ tst \rd, #0x10
+ beq 1001b
+ .endm
+
+#elif defined(CONFIG_ARCH_SHARK)
+ .macro addruart,rx
+ mov \rx, #0xe0000000
+ orr \rx, \rx, #0x000003f8
+ .endm
+
+ .macro senduart,rd,rx
+ strb \rd, [\rx]
+ .endm
+
+ .macro busyuart,rd,rx
+ mov \rd, #0
+1001: add \rd, \rd, #1
+ teq \rd, #0x10000
+ bne 1001b
+ .endm
+
+ .macro waituart,rd,rx
+ .endm
+
+#elif defined(CONFIG_FOOTBRIDGE)
+
+#include <asm/hardware/dec21285.h>
+
+#ifndef CONFIG_DEBUG_DC21285_PORT
+ /* For NetWinder debugging */
+ .macro addruart,rx
+ mrc p15, 0, \rx, c1, c0
+ tst \rx, #1 @ MMU enabled?
+ moveq \rx, #0x7c000000 @ physical
+ movne \rx, #0xff000000 @ virtual
+ orr \rx, \rx, #0x000003f8
+ .endm
+
+ .macro senduart,rd,rx
+ strb \rd, [\rx]
+ .endm
+
+ .macro busyuart,rd,rx
+1002: ldrb \rd, [\rx, #0x5]
+ and \rd, \rd, #0x60
+ teq \rd, #0x60
+ bne 1002b
+ .endm
+
+ .macro waituart,rd,rx
+1001: ldrb \rd, [\rx, #0x6]
+ tst \rd, #0x10
+ beq 1001b
+ .endm
+#else
+ /* For EBSA285 debugging */
+ .equ dc21285_high, ARMCSR_BASE & 0xff000000
+ .equ dc21285_low, ARMCSR_BASE & 0x00ffffff
+
+ .macro addruart,rx
+ mov \rx, #dc21285_high
+ .if dc21285_low
+ orr \rx, \rx, #dc21285_low
+ .endif
+ .endm
+
+ .macro senduart,rd,rx
+ str \rd, [\rx, #0x160] @ UARTDR
+ .endm
+
+ .macro busyuart,rd,rx
+1001: ldr \rd, [\rx, #0x178] @ UARTFLG
+ tst \rd, #1 << 3
+ bne 1001b
+ .endm
+
+ .macro waituart,rd,rx
+ .endm
+#endif
+#elif defined(CONFIG_ARCH_FTVPCI)
+ .macro addruart,rx
+ mrc p15, 0, \rx, c1, c0
+ tst \rx, #1 @ MMU enabled?
+ movne \rx, #0xe0000000
+ moveq \rx, #0x10000000
+ .endm
+
+ .macro senduart,rd,rx
+ str \rd, [\rx, #0xc]
+ .endm
+
+ .macro busyuart,rd,rx
+1001: ldr \rd, [\rx, #0x4]
+ tst \rd, #1 << 2
+ beq 1001b
+ .endm
+
+ .macro waituart,rd,rx
+ .endm
+
+#elif defined(CONFIG_ARCH_SA1100)
+ .macro addruart,rx
+ mrc p15, 0, \rx, c1, c0
+ tst \rx, #1 @ MMU enabled?
+ moveq \rx, #0x80000000 @ physical base address
+ movne \rx, #0xf8000000 @ virtual address
+ @add \rx, \rx, #0x00050000 @ Ser3
+ add \rx, \rx, #0x00010000 @ Ser1
+ .endm
+
+ .macro senduart,rd,rx
+ str \rd, [\rx, #0x14] @ UTDR
+ .endm
+
+ .macro waituart,rd,rx
+1001: ldr \rd, [\rx, #0x20] @ UTSR1
+ tst \rd, #1 << 2 @ UTSR1_TNF
+ beq 1001b
+ .endm
+
+ .macro busyuart,rd,rx
+1001: ldr \rd, [\rx, #0x20] @ UTSR1
+ tst \rd, #1 << 0 @ UTSR1_TBY
+ bne 1001b
+ .endm
+
+#elif defined(CONFIG_ARCH_CLPS7500)
+ .macro addruart,rx
+ mov \rx, #0xe0000000
+ orr \rx, \rx, #0x00010000
+ orr \rx, \rx, #0x00000be0
+ .endm
+
+ .macro senduart,rd,rx
+ strb \rd, [\rx]
+ .endm
+
+ .macro busyuart,rd,rx
+ .endm
+
+ .macro waituart,rd,rx
+1001: ldrb \rd, [\rx, #0x14]
+ tst \rd, #0x20
+ beq 1001b
+ .endm
+
+#elif defined(CONFIG_ARCH_L7200)
+
+ .equ io_virt, IO_BASE
+ .equ io_phys, IO_START
+
+ .macro addruart,rx
+ mrc p15, 0, \rx, c1, c0
+ tst \rx, #1 @ MMU enabled?
+ moveq \rx, #io_phys @ physical base address
+ movne \rx, #io_virt @ virtual address
+ add \rx, \rx, #0x00044000 @ UART1
+@ add \rx, \rx, #0x00045000 @ UART2
+ .endm
+
+ .macro senduart,rd,rx
+ str \rd, [\rx, #0x0] @ UARTDR
+ .endm
+
+ .macro waituart,rd,rx
+1001: ldr \rd, [\rx, #0x18] @ UARTFLG
+ tst \rd, #1 << 5 @ UARTFLGUTXFF - 1 when full
+ bne 1001b
+ .endm
+
+ .macro busyuart,rd,rx
+1001: ldr \rd, [\rx, #0x18] @ UARTFLG
+ tst \rd, #1 << 3 @ UARTFLGUBUSY - 1 when busy
+ bne 1001b
+ .endm
+
+#elif defined(CONFIG_ARCH_INTEGRATOR)
+
+#include <asm/hardware/serial_amba.h>
+
+ .macro addruart,rx
+ mrc p15, 0, \rx, c1, c0
+ tst \rx, #1 @ MMU enabled?
+ moveq \rx, #0x16000000 @ physical base address
+ movne \rx, #0xf0000000 @ virtual base
+ addne \rx, \rx, #0x16000000 >> 4
+ .endm
+
+ .macro senduart,rd,rx
+ strb \rd, [\rx, #AMBA_UARTDR]
+ .endm
+
+ .macro waituart,rd,rx
+1001: ldr \rd, [\rx, #0x18] @ UARTFLG
+ tst \rd, #1 << 5 @ UARTFLGUTXFF - 1 when full
+ bne 1001b
+ .endm
+
+ .macro busyuart,rd,rx
+1001: ldr \rd, [\rx, #0x18] @ UARTFLG
+ tst \rd, #1 << 3 @ UARTFLGUBUSY - 1 when busy
+ bne 1001b
+ .endm
+
+#elif defined(CONFIG_ARCH_CLPS711X)
+
+#include <asm/hardware/clps7111.h>
+
+ .macro addruart,rx
+ mrc p15, 0, \rx, c1, c0
+ tst \rx, #1 @ MMU enabled?
+ moveq \rx, #CLPS7111_PHYS_BASE
+ movne \rx, #CLPS7111_VIRT_BASE
+#ifndef CONFIG_DEBUG_CLPS711X_UART2
+ add \rx, \rx, #0x0000 @ UART1
+#else
+ add \rx, \rx, #0x1000 @ UART2
+#endif
+ .endm
+
+ .macro senduart,rd,rx
+ str \rd, [\rx, #0x0480] @ UARTDR
+ .endm
+
+ .macro waituart,rd,rx
+1001: ldr \rd, [\rx, #0x0140] @ SYSFLGx
+ tst \rd, #1 << 11 @ UBUSYx
+ bne 1001b
+ .endm
+
+ .macro busyuart,rd,rx
+ tst \rx, #0x1000 @ UART2 does not have CTS here
+ bne 1002f
+1001: ldr \rd, [\rx, #0x0140] @ SYSFLGx
+ tst \rd, #1 << 8 @ CTS
+ bne 1001b
+1002:
+ .endm
+
+#elif defined(CONFIG_ARCH_ANAKIN)
+
+//#//include <asm/arch/serial_reg.h>
+
+ .macro addruart,rx
+ mrc p15, 0, \rx, c1, c0
+ tst \rx, #1 @ MMU enabled?
+ moveq \rx, #IO_START
+ movne \rx, #IO_BASE
+ add \rx, \rx, #UART0
+ .endm
+
+ .macro senduart,rd,rx
+ str \rd, [\rx, #0x14] @ tx
+ ldr \rd, [\rx, #0x18]
+ orr \rd, \rd, #SENDREQUEST
+ str \rd, [\rx, #0x18]
+ .endm
+
+ .macro waituart,rd,rx
+1001: ldr \rd, [\rx, #0x10]
+ tst \rd, #TXEMPTY
+ beq 1001b
+ .endm
+
+ .macro busyuart,rd,rx
+1001: ldr \rd, [\rx, #0x10]
+ tst \rd, #CTS
+ bne 1001b
+ .endm
+
+#elif defined(CONFIG_ARCH_CAMELOT)
+
+#include <asm/arch/excalibur.h>
+#define UART00_TYPE
+#include <asm/arch/uart00.h>
+
+ .macro addruart,rx
+ mrc p15, 0, \rx, c1, c0
+ tst \rx, #1 @ MMU enabled?
+ ldr \rx, =EXC_UART00_BASE @ physical base address
+ orrne \rx, \rx, #0xff000000 @ virtual base
+ orrne \rx, \rx, #0x00f00000
+ .endm
+
+ .macro senduart,rd,rx
+ str \rd, [\rx, #UART_TD(0)]
+ .endm
+
+ .macro waituart,rd,rx
+1001: ldr \rd, [\rx, #UART_TSR(0)]
+ and \rd, \rd, #UART_TSR_TX_LEVEL_MSK
+ cmp \rd, #15
+ beq 1001b
+ .endm
+
+ .macro busyuart,rd,rx
+1001: ldr \rd, [\rx, #UART_TSR(0)]
+ ands \rd, \rd, #UART_TSR_TX_LEVEL_MSK
+ bne 1001b
+ .endm
+
+#else
+#error Unknown architecture
+#endif
+
+/*
+ * Useful debugging routines
+ */
+ENTRY(printhex8)
+ mov r1, #8
+ b printhex
+
+ENTRY(printhex4)
+ mov r1, #4
+ b printhex
+
+ENTRY(printhex2)
+ mov r1, #2
+printhex: adr r2, hexbuf
+ add r3, r2, r1
+ mov r1, #0
+ strb r1, [r3]
+1: and r1, r0, #15
+ mov r0, r0, lsr #4
+ cmp r1, #10
+ addlt r1, r1, #'0'
+ addge r1, r1, #'a' - 10
+ strb r1, [r3, #-1]!
+ teq r3, r2
+ bne 1b
+ mov r0, r2
+ b printascii
+
+ .ltorg
+
+ENTRY(printascii)
+ addruart r3
+ b 2f
+1: waituart r2, r3
+ senduart r1, r3
+ busyuart r2, r3
+ teq r1, #'\n'
+ moveq r1, #'\r'
+ beq 1b
+2: teq r0, #0
+ ldrneb r1, [r0], #1
+ teqne r1, #0
+ bne 1b
+ mov pc, lr
+
+ENTRY(printch)
+ addruart r3
+ mov r1, r0
+ mov r0, #0
+ b 1b
+
+hexbuf: .space 16
+++ /dev/null
-/*
- * linux/arch/arm/kernel/dec21285.c: PCI functions for DC21285
- *
- * Copyright (C) 1998-2000 Russell King, Phil Blundell
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/ptrace.h>
-#include <linux/interrupt.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/system.h>
-#include <asm/mach/pci.h>
-#include <asm/hardware/dec21285.h>
-
-#define MAX_SLOTS 21
-
-#define PCICMD_ERROR_BITS ((PCI_STATUS_DETECTED_PARITY | \
- PCI_STATUS_REC_MASTER_ABORT | \
- PCI_STATUS_REC_TARGET_ABORT | \
- PCI_STATUS_PARITY) << 16)
-
-extern int setup_arm_irq(int, struct irqaction *);
-extern void pcibios_report_status(u_int status_mask, int warn);
-extern void register_isa_ports(unsigned int, unsigned int, unsigned int);
-
-static unsigned long
-dc21285_base_address(struct pci_dev *dev)
-{
- unsigned long addr = 0;
- unsigned int devfn = dev->devfn;
-
- if (dev->bus->number == 0) {
- if (PCI_SLOT(devfn) == 0)
- /*
- * For devfn 0, point at the 21285
- */
- addr = ARMCSR_BASE;
- else {
- devfn -= 1 << 3;
-
- if (devfn < PCI_DEVFN(MAX_SLOTS, 0))
- addr = PCICFG0_BASE | 0xc00000 | (devfn << 8);
- }
- } else
- addr = PCICFG1_BASE | (dev->bus->number << 16) | (devfn << 8);
-
- return addr;
-}
-
-static int
-dc21285_read_config_byte(struct pci_dev *dev, int where, u8 *value)
-{
- unsigned long addr = dc21285_base_address(dev);
- u8 v;
-
- if (addr)
- asm("ldr%?b %0, [%1, %2]"
- : "=r" (v) : "r" (addr), "r" (where));
- else
- v = 0xff;
-
- *value = v;
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int
-dc21285_read_config_word(struct pci_dev *dev, int where, u16 *value)
-{
- unsigned long addr = dc21285_base_address(dev);
- u16 v;
-
- if (addr)
- asm("ldr%?h %0, [%1, %2]"
- : "=r" (v) : "r" (addr), "r" (where));
- else
- v = 0xffff;
-
- *value = v;
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int
-dc21285_read_config_dword(struct pci_dev *dev, int where, u32 *value)
-{
- unsigned long addr = dc21285_base_address(dev);
- u32 v;
-
- if (addr)
- asm("ldr%? %0, [%1, %2]"
- : "=r" (v) : "r" (addr), "r" (where));
- else
- v = 0xffffffff;
-
- *value = v;
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int
-dc21285_write_config_byte(struct pci_dev *dev, int where, u8 value)
-{
- unsigned long addr = dc21285_base_address(dev);
-
- if (addr)
- asm("str%?b %0, [%1, %2]"
- : : "r" (value), "r" (addr), "r" (where));
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int
-dc21285_write_config_word(struct pci_dev *dev, int where, u16 value)
-{
- unsigned long addr = dc21285_base_address(dev);
-
- if (addr)
- asm("str%?h %0, [%1, %2]"
- : : "r" (value), "r" (addr), "r" (where));
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int
-dc21285_write_config_dword(struct pci_dev *dev, int where, u32 value)
-{
- unsigned long addr = dc21285_base_address(dev);
-
- if (addr)
- asm("str%? %0, [%1, %2]"
- : : "r" (value), "r" (addr), "r" (where));
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static struct pci_ops dc21285_ops = {
- dc21285_read_config_byte,
- dc21285_read_config_word,
- dc21285_read_config_dword,
- dc21285_write_config_byte,
- dc21285_write_config_word,
- dc21285_write_config_dword,
-};
-
-static struct timer_list serr_timer;
-static struct timer_list perr_timer;
-
-static void dc21285_enable_error(unsigned long __data)
-{
- switch (__data) {
- case IRQ_PCI_SERR:
- del_timer(&serr_timer);
- break;
-
- case IRQ_PCI_PERR:
- del_timer(&perr_timer);
- break;
- }
-
- enable_irq(__data);
-}
-
-/*
- * Warn on PCI errors.
- */
-static void dc21285_abort_irq(int irq, void *dev_id, struct pt_regs *regs)
-{
- unsigned int cmd;
- unsigned int status;
-
- cmd = *CSR_PCICMD;
- status = cmd >> 16;
- cmd = cmd & 0xffff;
-
- if (status & PCI_STATUS_REC_MASTER_ABORT) {
- printk(KERN_DEBUG "PCI: master abort: ");
- pcibios_report_status(PCI_STATUS_REC_MASTER_ABORT, 1);
- printk("\n");
-
- cmd |= PCI_STATUS_REC_MASTER_ABORT << 16;
- }
-
- if (status & PCI_STATUS_REC_TARGET_ABORT) {
- printk(KERN_DEBUG "PCI: target abort: ");
- pcibios_report_status(PCI_STATUS_SIG_TARGET_ABORT, 1);
- printk("\n");
-
- cmd |= PCI_STATUS_REC_TARGET_ABORT << 16;
- }
-
- *CSR_PCICMD = cmd;
-}
-
-static void dc21285_serr_irq(int irq, void *dev_id, struct pt_regs *regs)
-{
- struct timer_list *timer = dev_id;
- unsigned int cntl;
-
- printk(KERN_DEBUG "PCI: system error received: ");
- pcibios_report_status(PCI_STATUS_SIG_SYSTEM_ERROR, 1);
- printk("\n");
-
- cntl = *CSR_SA110_CNTL & 0xffffdf07;
- *CSR_SA110_CNTL = cntl | SA110_CNTL_RXSERR;
-
- /*
- * back off this interrupt
- */
- disable_irq(irq);
- timer->expires = jiffies + HZ;
- add_timer(timer);
-}
-
-static void dc21285_discard_irq(int irq, void *dev_id, struct pt_regs *regs)
-{
- printk(KERN_DEBUG "PCI: discard timer expired\n");
- *CSR_SA110_CNTL &= 0xffffde07;
-}
-
-static void dc21285_dparity_irq(int irq, void *dev_id, struct pt_regs *regs)
-{
- unsigned int cmd;
-
- printk(KERN_DEBUG "PCI: data parity error detected: ");
- pcibios_report_status(PCI_STATUS_PARITY | PCI_STATUS_DETECTED_PARITY, 1);
- printk("\n");
-
- cmd = *CSR_PCICMD & 0xffff;
- *CSR_PCICMD = cmd | 1 << 24;
-}
-
-static void dc21285_parity_irq(int irq, void *dev_id, struct pt_regs *regs)
-{
- struct timer_list *timer = dev_id;
- unsigned int cmd;
-
- printk(KERN_DEBUG "PCI: parity error detected: ");
- pcibios_report_status(PCI_STATUS_PARITY | PCI_STATUS_DETECTED_PARITY, 1);
- printk("\n");
-
- cmd = *CSR_PCICMD & 0xffff;
- *CSR_PCICMD = cmd | 1 << 31;
-
- /*
- * back off this interrupt
- */
- disable_irq(irq);
- timer->expires = jiffies + HZ;
- add_timer(timer);
-}
-
-void __init dc21285_setup_resources(struct resource **resource)
-{
- struct resource *busmem, *busmempf;
-
- busmem = kmalloc(sizeof(*busmem), GFP_KERNEL);
- busmempf = kmalloc(sizeof(*busmempf), GFP_KERNEL);
- memset(busmem, 0, sizeof(*busmem));
- memset(busmempf, 0, sizeof(*busmempf));
-
- busmem->flags = IORESOURCE_MEM;
- busmem->name = "Footbridge non-prefetch";
- busmempf->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
- busmempf->name = "Footbridge prefetch";
-
- allocate_resource(&iomem_resource, busmempf, 0x20000000,
- 0x80000000, 0xffffffff, 0x20000000, NULL, NULL);
- allocate_resource(&iomem_resource, busmem, 0x40000000,
- 0x80000000, 0xffffffff, 0x40000000, NULL, NULL);
-
- resource[0] = &ioport_resource;
- resource[1] = busmem;
- resource[2] = busmempf;
-}
-
-void __init dc21285_init(void *sysdata)
-{
- unsigned int mem_size, mem_mask;
- int cfn_mode;
-
- mem_size = (unsigned int)high_memory - PAGE_OFFSET;
- for (mem_mask = 0x00100000; mem_mask < 0x10000000; mem_mask <<= 1)
- if (mem_mask >= mem_size)
- break;
-
- /*
- * These registers need to be set up whether we're the
- * central function or not.
- */
- *CSR_SDRAMBASEMASK = (mem_mask - 1) & 0x0ffc0000;
- *CSR_SDRAMBASEOFFSET = 0;
- *CSR_ROMBASEMASK = 0x80000000;
- *CSR_CSRBASEMASK = 0;
- *CSR_CSRBASEOFFSET = 0;
- *CSR_PCIADDR_EXTN = 0;
-
- cfn_mode = __footbridge_cfn_mode();
-
- printk(KERN_INFO "PCI: DC21285 footbridge, revision %02lX, in "
- "%s mode\n", *CSR_CLASSREV & 0xff, cfn_mode ?
- "central function" : "addin");
-
- if (cfn_mode) {
- static struct resource csrmem, csrio;
-
- csrio.flags = IORESOURCE_IO;
- csrio.name = "Footbridge";
- csrmem.flags = IORESOURCE_MEM;
- csrmem.name = "Footbridge";
-
- allocate_resource(&ioport_resource, &csrio, 128,
- 0xff00, 0xffff, 128, NULL, NULL);
- allocate_resource(&iomem_resource, &csrmem, 128,
- 0xf4000000, 0xf8000000, 128, NULL, NULL);
-
- /*
- * Map our SDRAM at a known address in PCI space, just in case
- * the firmware had other ideas. Using a nonzero base is
- * necessary, since some VGA cards forcefully use PCI addresses
- * in the range 0x000a0000 to 0x000c0000. (eg, S3 cards).
- */
- *CSR_PCICSRBASE = csrmem.start;
- *CSR_PCICSRIOBASE = csrio.start;
- *CSR_PCISDRAMBASE = __virt_to_bus(PAGE_OFFSET);
- *CSR_PCIROMBASE = 0;
- *CSR_PCICMD = PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
- PCI_COMMAND_INVALIDATE | PCICMD_ERROR_BITS;
-
- pci_scan_bus(0, &dc21285_ops, sysdata);
-
- /*
- * Clear any existing errors - we aren't
- * interested in historical data...
- */
- *CSR_SA110_CNTL = (*CSR_SA110_CNTL & 0xffffde07) |
- SA110_CNTL_RXSERR;
- *CSR_PCICMD = (*CSR_PCICMD & 0xffff) | PCICMD_ERROR_BITS;
- } else if (footbridge_cfn_mode() != 0) {
- /*
- * If we are not compiled to accept "add-in" mode, then
- * we are using a constant virt_to_bus translation which
- * can not hope to cater for the way the host BIOS has
- * set up the machine.
- */
- panic("PCI: this kernel is compiled for central "
- "function mode only");
- }
-
- /*
- * Initialise PCI error IRQ after we've finished probing
- */
- request_irq(IRQ_PCI_ABORT, dc21285_abort_irq, SA_INTERRUPT, "PCI abort", NULL);
- request_irq(IRQ_DISCARD_TIMER, dc21285_discard_irq, SA_INTERRUPT, "Discard timer", NULL);
- request_irq(IRQ_PCI_DPERR, dc21285_dparity_irq, SA_INTERRUPT, "PCI data parity", NULL);
-
- init_timer(&serr_timer);
- init_timer(&perr_timer);
-
- serr_timer.data = IRQ_PCI_SERR;
- serr_timer.function = dc21285_enable_error;
- perr_timer.data = IRQ_PCI_PERR;
- perr_timer.function = dc21285_enable_error;
-
- request_irq(IRQ_PCI_SERR, dc21285_serr_irq, SA_INTERRUPT,
- "PCI system error", &serr_timer);
- request_irq(IRQ_PCI_PERR, dc21285_parity_irq, SA_INTERRUPT,
- "PCI parity error", &perr_timer);
-
- register_isa_ports(DC21285_PCI_MEM, DC21285_PCI_IO, 0);
-}
+++ /dev/null
-/*
- * linux/arch/arm/kernel/dma-ebsa285.c
- *
- * Copyright (C) 1998 Phil Blundell
- *
- * DMA functions specific to EBSA-285/CATS architectures
- *
- * Changelog:
- * 09-Nov-1998 RMK Split out ISA DMA functions to dma-isa.c
- * 17-Mar-1999 RMK Allow any EBSA285-like architecture to have
- * ISA DMA controllers.
- */
-#include <linux/config.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-
-#include <asm/dma.h>
-#include <asm/io.h>
-
-#include <asm/mach/dma.h>
-#include <asm/hardware/dec21285.h>
-
-#if 0
-static int fb_dma_request(dmach_t channel, dma_t *dma)
-{
- return -EINVAL;
-}
-
-static void fb_dma_enable(dmach_t channel, dma_t *dma)
-{
-}
-
-static void fb_dma_disable(dmach_t channel, dma_t *dma)
-{
-}
-
-static struct dma_ops fb_dma_ops = {
- type: "fb",
- request: fb_dma_request,
- enable: fb_dma_enable,
- disable: fb_dma_disable,
-};
-#endif
-
-void __init arch_dma_init(dma_t *dma)
-{
-#if 0
- dma[_DC21285_DMA(0)].d_ops = &fb_dma_ops;
- dma[_DC21285_DMA(1)].d_ops = &fb_dma_ops;
-#endif
-#ifdef CONFIG_ISA_DMA
- if (footbridge_cfn_mode())
- isa_init_dma(dma + _ISA_DMA(0));
-#endif
-}
+++ /dev/null
-/*
- * linux/arch/arm/kernel/dma-rpc.c
- *
- * Copyright (C) 1998 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * DMA functions specific to RiscPC architecture
- */
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/mman.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-
-#include <asm/page.h>
-#include <asm/dma.h>
-#include <asm/fiq.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/hardware.h>
-#include <asm/uaccess.h>
-
-#include <asm/mach/dma.h>
-#include <asm/hardware/iomd.h>
-
-#if 0
-typedef enum {
- dma_size_8 = 1,
- dma_size_16 = 2,
- dma_size_32 = 4,
- dma_size_128 = 16
-} dma_size_t;
-
-typedef struct {
- dma_size_t transfersize;
-} dma_t;
-#endif
-
-#define TRANSFER_SIZE 2
-
-#define CURA (0)
-#define ENDA (IOMD_IO0ENDA - IOMD_IO0CURA)
-#define CURB (IOMD_IO0CURB - IOMD_IO0CURA)
-#define ENDB (IOMD_IO0ENDB - IOMD_IO0CURA)
-#define CR (IOMD_IO0CR - IOMD_IO0CURA)
-#define ST (IOMD_IO0ST - IOMD_IO0CURA)
-
-#define state_prog_a 0
-#define state_wait_a 1
-#define state_wait_b 2
-
-static void iomd_get_next_sg(struct scatterlist *sg, dma_t *dma)
-{
- unsigned long end, offset, flags = 0;
-
- if (dma->sg) {
- sg->dma_address = dma->sg->dma_address;
- offset = sg->dma_address & ~PAGE_MASK;
-
- end = offset + dma->sg->length;
-
- if (end > PAGE_SIZE)
- end = PAGE_SIZE;
-
- if (offset + (int) TRANSFER_SIZE > end)
- flags |= DMA_END_L;
-
- sg->length = end - TRANSFER_SIZE;
-
- dma->sg->length -= end - offset;
- dma->sg->dma_address += end - offset;
-
- if (dma->sg->length == 0) {
- if (dma->sgcount > 1) {
- dma->sg++;
- dma->sgcount--;
- } else {
- dma->sg = NULL;
- flags |= DMA_END_S;
- }
- }
- } else {
- flags = DMA_END_S | DMA_END_L;
- sg->dma_address = 0;
- sg->length = 0;
- }
-
- sg->length |= flags;
-}
-
-static inline void iomd_setup_dma_a(struct scatterlist *sg, dma_t *dma)
-{
- iomd_writel(sg->dma_address, dma->dma_base + CURA);
- iomd_writel(sg->length, dma->dma_base + ENDA);
-}
-
-static inline void iomd_setup_dma_b(struct scatterlist *sg, dma_t *dma)
-{
- iomd_writel(sg->dma_address, dma->dma_base + CURB);
- iomd_writel(sg->length, dma->dma_base + ENDB);
-}
-
-static void iomd_dma_handle(int irq, void *dev_id, struct pt_regs *regs)
-{
- dma_t *dma = (dma_t *)dev_id;
- unsigned int status = 0, no_buffer = dma->sg == NULL;
-
- do {
- switch (dma->state) {
- case state_prog_a:
- iomd_get_next_sg(&dma->cur_sg, dma);
- iomd_setup_dma_a(&dma->cur_sg, dma);
- dma->state = state_wait_a;
-
- case state_wait_a:
- status = iomd_readb(dma->dma_base + ST);
- switch (status & (DMA_ST_OFL|DMA_ST_INT|DMA_ST_AB)) {
- case DMA_ST_OFL|DMA_ST_INT:
- iomd_get_next_sg(&dma->cur_sg, dma);
- iomd_setup_dma_a(&dma->cur_sg, dma);
- break;
-
- case DMA_ST_INT:
- iomd_get_next_sg(&dma->cur_sg, dma);
- iomd_setup_dma_b(&dma->cur_sg, dma);
- dma->state = state_wait_b;
- break;
-
- case DMA_ST_OFL|DMA_ST_INT|DMA_ST_AB:
- iomd_setup_dma_b(&dma->cur_sg, dma);
- dma->state = state_wait_b;
- break;
- }
- break;
-
- case state_wait_b:
- status = iomd_readb(dma->dma_base + ST);
- switch (status & (DMA_ST_OFL|DMA_ST_INT|DMA_ST_AB)) {
- case DMA_ST_OFL|DMA_ST_INT|DMA_ST_AB:
- iomd_get_next_sg(&dma->cur_sg, dma);
- iomd_setup_dma_b(&dma->cur_sg, dma);
- break;
-
- case DMA_ST_INT|DMA_ST_AB:
- iomd_get_next_sg(&dma->cur_sg, dma);
- iomd_setup_dma_a(&dma->cur_sg, dma);
- dma->state = state_wait_a;
- break;
-
- case DMA_ST_OFL|DMA_ST_INT:
- iomd_setup_dma_a(&dma->cur_sg, dma);
- dma->state = state_wait_a;
- break;
- }
- break;
- }
- } while (dma->sg && (status & DMA_ST_INT));
-
- if (no_buffer)
- disable_irq(irq);
-}
-
-static int iomd_request_dma(dmach_t channel, dma_t *dma)
-{
- return request_irq(dma->dma_irq, iomd_dma_handle,
- SA_INTERRUPT, dma->device_id, dma);
-}
-
-static void iomd_free_dma(dmach_t channel, dma_t *dma)
-{
- free_irq(dma->dma_irq, dma);
-}
-
-static void iomd_enable_dma(dmach_t channel, dma_t *dma)
-{
- unsigned long dma_base = dma->dma_base;
- unsigned int ctrl = TRANSFER_SIZE | DMA_CR_E;
-
- if (dma->invalid) {
- dma->invalid = 0;
-
- /*
- * Cope with ISA-style drivers which expect cache
- * coherence.
- */
- if (!dma->using_sg) {
- dma->buf.dma_address = pci_map_single(NULL,
- dma->buf.address, dma->buf.length,
- dma->dma_mode == DMA_MODE_READ ?
- PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
- }
-
- iomd_writeb(DMA_CR_C, dma_base + CR);
- dma->state = state_prog_a;
- }
-
- if (dma->dma_mode == DMA_MODE_READ)
- ctrl |= DMA_CR_D;
-
- iomd_writeb(ctrl, dma_base + CR);
- enable_irq(dma->dma_irq);
-}
-
-static void iomd_disable_dma(dmach_t channel, dma_t *dma)
-{
- unsigned long dma_base = dma->dma_base;
- unsigned int ctrl;
-
- disable_irq(dma->dma_irq);
- ctrl = iomd_readb(dma_base + CR);
- iomd_writeb(ctrl & ~DMA_CR_E, dma_base + CR);
-}
-
-static int iomd_set_dma_speed(dmach_t channel, dma_t *dma, int cycle)
-{
- int tcr, speed;
-
- if (cycle < 188)
- speed = 3;
- else if (cycle <= 250)
- speed = 2;
- else if (cycle < 438)
- speed = 1;
- else
- speed = 0;
-
- tcr = iomd_readb(IOMD_DMATCR);
- speed &= 3;
-
- switch (channel) {
- case DMA_0:
- tcr = (tcr & ~0x03) | speed;
- break;
-
- case DMA_1:
- tcr = (tcr & ~0x0c) | (speed << 2);
- break;
-
- case DMA_2:
- tcr = (tcr & ~0x30) | (speed << 4);
- break;
-
- case DMA_3:
- tcr = (tcr & ~0xc0) | (speed << 6);
- break;
-
- default:
- break;
- }
-
- iomd_writeb(tcr, IOMD_DMATCR);
-
- return speed;
-}
-
-static struct dma_ops iomd_dma_ops = {
- type: "IOMD",
- request: iomd_request_dma,
- free: iomd_free_dma,
- enable: iomd_enable_dma,
- disable: iomd_disable_dma,
- setspeed: iomd_set_dma_speed,
-};
-
-static struct fiq_handler fh = {
- name: "floppydma"
-};
-
-static void floppy_enable_dma(dmach_t channel, dma_t *dma)
-{
- void *fiqhandler_start;
- unsigned int fiqhandler_length;
- struct pt_regs regs;
-
- if (dma->dma_mode == DMA_MODE_READ) {
- extern unsigned char floppy_fiqin_start, floppy_fiqin_end;
- fiqhandler_start = &floppy_fiqin_start;
- fiqhandler_length = &floppy_fiqin_end - &floppy_fiqin_start;
- } else {
- extern unsigned char floppy_fiqout_start, floppy_fiqout_end;
- fiqhandler_start = &floppy_fiqout_start;
- fiqhandler_length = &floppy_fiqout_end - &floppy_fiqout_start;
- }
-
- regs.ARM_r9 = dma->buf.length;
- regs.ARM_r10 = (unsigned long)dma->buf.address;
- regs.ARM_fp = FLOPPYDMA_BASE;
-
- if (claim_fiq(&fh)) {
- printk("floppydma: couldn't claim FIQ.\n");
- return;
- }
-
- set_fiq_handler(fiqhandler_start, fiqhandler_length);
- set_fiq_regs(®s);
- enable_fiq(dma->dma_irq);
-}
-
-static void floppy_disable_dma(dmach_t channel, dma_t *dma)
-{
- disable_fiq(dma->dma_irq);
- release_fiq(&fh);
-}
-
-static int floppy_get_residue(dmach_t channel, dma_t *dma)
-{
- struct pt_regs regs;
- get_fiq_regs(®s);
- return regs.ARM_r9;
-}
-
-static struct dma_ops floppy_dma_ops = {
- type: "FIQDMA",
- enable: floppy_enable_dma,
- disable: floppy_disable_dma,
- residue: floppy_get_residue,
-};
-
-/*
- * This is virtual DMA - we don't need anything here.
- */
-static void sound_enable_disable_dma(dmach_t channel, dma_t *dma)
-{
-}
-
-static struct dma_ops sound_dma_ops = {
- type: "VIRTUAL",
- enable: sound_enable_disable_dma,
- disable: sound_enable_disable_dma,
-};
-
-void __init arch_dma_init(dma_t *dma)
-{
- iomd_writeb(0, IOMD_IO0CR);
- iomd_writeb(0, IOMD_IO1CR);
- iomd_writeb(0, IOMD_IO2CR);
- iomd_writeb(0, IOMD_IO3CR);
-
- iomd_writeb(0xa0, IOMD_DMATCR);
-
- dma[DMA_0].dma_base = IOMD_IO0CURA;
- dma[DMA_0].dma_irq = IRQ_DMA0;
- dma[DMA_0].d_ops = &iomd_dma_ops;
- dma[DMA_1].dma_base = IOMD_IO1CURA;
- dma[DMA_1].dma_irq = IRQ_DMA1;
- dma[DMA_1].d_ops = &iomd_dma_ops;
- dma[DMA_2].dma_base = IOMD_IO2CURA;
- dma[DMA_2].dma_irq = IRQ_DMA2;
- dma[DMA_2].d_ops = &iomd_dma_ops;
- dma[DMA_3].dma_base = IOMD_IO3CURA;
- dma[DMA_3].dma_irq = IRQ_DMA3;
- dma[DMA_3].d_ops = &iomd_dma_ops;
- dma[DMA_S0].dma_base = IOMD_SD0CURA;
- dma[DMA_S0].dma_irq = IRQ_DMAS0;
- dma[DMA_S0].d_ops = &iomd_dma_ops;
- dma[DMA_S1].dma_base = IOMD_SD1CURA;
- dma[DMA_S1].dma_irq = IRQ_DMAS1;
- dma[DMA_S1].d_ops = &iomd_dma_ops;
- dma[DMA_VIRTUAL_FLOPPY].dma_irq = FIQ_FLOPPYDATA;
- dma[DMA_VIRTUAL_FLOPPY].d_ops = &floppy_dma_ops;
- dma[DMA_VIRTUAL_SOUND].d_ops = &sound_dma_ops;
-
- /*
- * Setup DMA channels 2,3 to be for podules
- * and channels 0,1 for internal devices
- */
- iomd_writeb(DMA_EXT_IO3|DMA_EXT_IO2, IOMD_DMAEXT);
-}
+++ /dev/null
-/*
- * linux/arch/arm/kernel/ftv-pci.c
- *
- * PCI bios-type initialisation for PCI machines
- *
- * Bits taken from various places.
- */
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-
-#include <asm/irq.h>
-#include <asm/mach/pci.h>
-
-/*
- * Owing to a PCB cockup, issue A backplanes are wired thus:
- *
- * Slot 1 2 3 4 5 Bridge S1 S2 S3 S4
- * IRQ D C B A A C B A D
- * A D C B B D C B A
- * B A D C C A D C B
- * C B A D D B A D C
- *
- * ID A31 A30 A29 A28 A27 A26 DEV4 DEV5 DEV6 DEV7
- *
- * Actually, this isn't too bad, because with the processor card
- * in slot 5 on the primary bus, the IRQs rotate on both sides
- * as you'd expect.
- */
-
-static int irqmap_ftv[] __initdata = { IRQ_PCI_D, IRQ_PCI_C, IRQ_PCI_B, IRQ_PCI_A };
-
-static int __init ftv_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
-{
- if (slot > 0x10)
- slot--;
- return irqmap_ftv[(slot - pin) & 3];
-}
-
-static u8 __init ftv_swizzle(struct pci_dev *dev, u8 *pin)
-{
- return PCI_SLOT(dev->devfn);
-}
-
-/* ftv host-specific stuff */
-struct hw_pci ftv_pci __initdata = {
- init: plx90x0_init,
- swizzle: ftv_swizzle,
- map_irq: ftv_map_irq,
-};
-
+++ /dev/null
-/*
- * linux/arch/arm/kernel/head-armv.S
- *
- * Copyright (C) 1994-1999 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * 32-bit kernel startup code for all architectures
- */
-#include <linux/config.h>
-#include <linux/linkage.h>
-
-#include <asm/assembler.h>
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-
-#define K(a,b,c) ((a) << 24 | (b) << 12 | (c))
-
-/*
- * We place the page tables 16K below TEXTADDR. Therefore, we must make sure
- * that TEXTADDR is correctly set. Currently, we expect the least significant
- * "short" to be 0x8000, but we could probably relax this restriction to
- * TEXTADDR > PAGE_OFFSET + 0x4000
- *
- * Note that swapper_pg_dir is the virtual address of the page tables, and
- * pgtbl gives us a position-independent reference to these tables. We can
- * do this because stext == TEXTADDR
- *
- * swapper_pg_dir, pgtbl and krnladr are all closely related.
- */
-#if (TEXTADDR & 0xffff) != 0x8000
-#error TEXTADDR must start at 0xXXXX8000
-#endif
-
- .globl SYMBOL_NAME(swapper_pg_dir)
- .equ SYMBOL_NAME(swapper_pg_dir), TEXTADDR - 0x4000
-
- .macro pgtbl, reg, rambase
- adr \reg, stext
- sub \reg, \reg, #0x4000
- .endm
-
-/*
- * Since the page table is closely related to the kernel start address, we
- * can convert the page table base address to the base address of the section
- * containing both.
- */
- .macro krnladr, rd, pgtable, rambase
- bic \rd, \pgtable, #0x000ff000
- .endm
-
-/*
- * Kernel startup entry point.
- *
- * The rules are:
- * r0 - should be 0
- * r1 - unique architecture number
- * MMU - off
- * I-cache - on or off
- * D-cache - off
- *
- * See linux/arch/arm/tools/mach-types for the complete list of numbers
- * for r1.
- */
- .section ".text.init",#alloc,#execinstr
- .type stext, #function
-ENTRY(stext)
- mov r12, r0
-/*
- * NOTE! Any code which is placed here should be done for one of
- * the following reasons:
- *
- * 1. Compatability with old production boot firmware (ie, users
- * actually have and are booting the kernel with the old firmware)
- * and therefore will be eventually removed.
- * 2. Cover the case when there is no boot firmware. This is not
- * ideal, but in this case, it should ONLY set r0 and r1 to the
- * appropriate value.
- */
-#if defined(CONFIG_ARCH_NETWINDER)
-/*
- * Compatability cruft for old NetWinder NeTTroms. This
- * code is currently scheduled for destruction in 2.5.xx
- */
- .rept 8
- mov r0, r0
- .endr
-
- adr r2, 1f
- ldmdb r2, {r7, r8}
- and r3, r2, #0xc000
- teq r3, #0x8000
- beq __entry
- bic r3, r2, #0xc000
- orr r3, r3, #0x8000
- mov r0, r3
- mov r4, #64
- sub r5, r8, r7
- b 1f
-
- .word _stext
- .word __bss_start
-
-1:
- .rept 4
- ldmia r2!, {r6, r7, r8, r9}
- stmia r3!, {r6, r7, r8, r9}
- .endr
- subs r4, r4, #64
- bcs 1b
- movs r4, r5
- mov r5, #0
- movne pc, r0
-
- mov r1, #MACH_TYPE_NETWINDER @ (will go in 2.5)
- mov r12, #2 << 24 @ scheduled for removal in 2.5.xx
- orr r12, r12, #5 << 12
-__entry:
-#endif
-#if defined(CONFIG_ARCH_L7200)
-/*
- * FIXME - No bootloader, so manually set 'r1' with our architecture number.
- */
- mov r1, #MACH_TYPE_L7200
-#endif
-
- mov r0, #F_BIT | I_BIT | MODE_SVC @ make sure svc mode
- msr cpsr_c, r0 @ and all irqs disabled
- bl __lookup_processor_type
- teq r10, #0 @ invalid processor?
- moveq r0, #'p' @ yes, error 'p'
- beq __error
- bl __lookup_architecture_type
- teq r7, #0 @ invalid architecture?
- moveq r0, #'a' @ yes, error 'a'
- beq __error
- bl __create_page_tables
- adr lr, __ret @ return address
- add pc, r10, #12 @ initialise processor
- @ (return control reg)
-
- .type __switch_data, %object
-__switch_data: .long __mmap_switched
- .long SYMBOL_NAME(compat)
- .long SYMBOL_NAME(__bss_start)
- .long SYMBOL_NAME(_end)
- .long SYMBOL_NAME(processor_id)
- .long SYMBOL_NAME(__machine_arch_type)
- .long SYMBOL_NAME(cr_alignment)
- .long SYMBOL_NAME(init_task_union)+8192
-
- .type __ret, %function
-__ret: ldr lr, __switch_data
- mcr p15, 0, r0, c1, c0
- mov r0, r0
- mov r0, r0
- mov r0, r0
- mov pc, lr
-
- /*
- * This code follows on after the page
- * table switch and jump above.
- *
- * r0 = processor control register
- * r1 = machine ID
- * r9 = processor ID
- */
- .align 5
-__mmap_switched:
- adr r3, __switch_data + 4
- ldmia r3, {r2, r4, r5, r6, r7, r8, sp}@ r2 = compat
- @ sp = stack pointer
- str r12, [r2]
-
- mov fp, #0 @ Clear BSS (and zero fp)
-1: cmp r4, r5
- strcc fp, [r4],#4
- bcc 1b
-
- str r9, [r6] @ Save processor ID
- str r1, [r7] @ Save machine type
-#ifdef CONFIG_ALIGNMENT_TRAP
- orr r0, r0, #2 @ ...........A.
-#endif
- bic r2, r0, #2 @ Clear 'A' bit
- stmia r8, {r0, r2} @ Save control register values
- b SYMBOL_NAME(start_kernel)
-
-
-
-/*
- * Setup the initial page tables. We only setup the barest
- * amount which are required to get the kernel running, which
- * generally means mapping in the kernel code.
- *
- * We only map in 4MB of RAM, which should be sufficient in
- * all cases.
- *
- * r5 = physical address of start of RAM
- * r6 = physical IO address
- * r7 = byte offset into page tables for IO
- * r8 = page table flags
- */
-__create_page_tables:
- pgtbl r4, r5 @ page table address
-
- /*
- * Clear the 16K level 1 swapper page table
- */
- mov r0, r4
- mov r3, #0
- add r2, r0, #0x4000
-1: str r3, [r0], #4
- str r3, [r0], #4
- str r3, [r0], #4
- str r3, [r0], #4
- teq r0, r2
- bne 1b
-
- /*
- * Create identity mapping for first MB of kernel to
- * cater for the MMU enable. This identity mapping
- * will be removed by paging_init()
- */
- krnladr r2, r4, r5 @ start of kernel
- add r3, r8, r2 @ flags + kernel base
- str r3, [r4, r2, lsr #18] @ identity mapping
-
- /*
- * Now setup the pagetables for our kernel direct
- * mapped region. We round TEXTADDR down to the
- * nearest megabyte boundary.
- */
- add r0, r4, #(TEXTADDR & 0xff000000) >> 18 @ start of kernel
- bic r2, r3, #0x00f00000
- str r2, [r0] @ PAGE_OFFSET + 0MB
- add r0, r0, #(TEXTADDR & 0x00f00000) >> 18
- str r3, [r0], #4 @ KERNEL + 0MB
- add r3, r3, #1 << 20
- str r3, [r0], #4 @ KERNEL + 1MB
- add r3, r3, #1 << 20
- str r3, [r0], #4 @ KERNEL + 2MB
- add r3, r3, #1 << 20
- str r3, [r0], #4 @ KERNEL + 3MB
-
- /*
- * Ensure that the first section of RAM is present.
- * we assume that:
- * 1. the RAM is aligned to a 32MB boundary
- * 2. the kernel is executing in the same 32MB chunk
- * as the start of RAM.
- */
- bic r0, r0, #0x01f00000 >> 18 @ round down
- and r2, r5, #0xfe000000 @ round down
- add r3, r8, r2 @ flags + rambase
- str r3, [r0]
-
- bic r8, r8, #0x0c @ turn off cacheable
- @ and bufferable bits
-#ifdef CONFIG_DEBUG_LL
- /*
- * Map in IO space for serial debugging.
- * This allows debug messages to be output
- * via a serial console before paging_init.
- */
- add r0, r4, r7
- rsb r3, r7, #0x4000 @ PTRS_PER_PGD*sizeof(long)
- cmp r3, #0x0800
- addge r2, r0, #0x0800
- addlt r2, r0, r3
- orr r3, r6, r8
-1: str r3, [r0], #4
- add r3, r3, #1 << 20
- teq r0, r2
- bne 1b
-#if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS)
- /*
- * If we're using the NetWinder, we need to map in
- * the 16550-type serial port for the debug messages
- */
- teq r1, #MACH_TYPE_NETWINDER
- teqne r1, #MACH_TYPE_CATS
- bne 1f
- add r0, r4, #0x3fc0
- mov r3, #0x7c000000
- orr r3, r3, r8
- str r3, [r0], #4
- add r3, r3, #1 << 20
- str r3, [r0], #4
-1:
-#endif
-#endif
-#ifdef CONFIG_ARCH_RPC
- /*
- * Map in screen at 0x02000000 & SCREEN2_BASE
- * Similar reasons here - for debug. This is
- * only for Acorn RiscPC architectures.
- */
- add r0, r4, #0x80 @ 02000000
- mov r3, #0x02000000
- orr r3, r3, r8
- str r3, [r0]
- add r0, r4, #0x3600 @ d8000000
- str r3, [r0]
-#endif
- mov pc, lr
-
-
-
-/*
- * Exception handling. Something went wrong and we can't
- * proceed. We ought to tell the user, but since we
- * don't have any guarantee that we're even running on
- * the right architecture, we do virtually nothing.
- * r0 = ascii error character:
- * a = invalid architecture
- * p = invalid processor
- * i = invalid calling convention
- *
- * Generally, only serious errors cause this.
- */
-__error:
-#ifdef CONFIG_DEBUG_LL
- mov r8, r0 @ preserve r0
- adr r0, err_str
- bl printascii
- mov r0, r8
- bl printch
-#endif
-#ifdef CONFIG_ARCH_RPC
-/*
- * Turn the screen red on a error - RiscPC only.
- */
- mov r0, #0x02000000
- mov r3, #0x11
- orr r3, r3, r3, lsl #8
- orr r3, r3, r3, lsl #16
- str r3, [r0], #4
- str r3, [r0], #4
- str r3, [r0], #4
- str r3, [r0], #4
-#endif
-1: mov r0, r0
- b 1b
-
-#ifdef CONFIG_DEBUG_LL
-err_str: .asciz "\nError: "
- .align
-#endif
-
-/*
- * Read processor ID register (CP#15, CR0), and look up in the linker-built
- * supported processor list. Note that we can't use the absolute addresses
- * for the __proc_info lists since we aren't running with the MMU on
- * (and therefore, we are not in the correct address space). We have to
- * calculate the offset.
- *
- * Returns:
- * r5, r6, r7 corrupted
- * r8 = page table flags
- * r9 = processor ID
- * r10 = pointer to processor structure
- */
-__lookup_processor_type:
- adr r5, 2f
- ldmia r5, {r7, r9, r10}
- sub r5, r5, r10 @ convert addresses
- add r7, r7, r5 @ to our address space
- add r10, r9, r5
- mrc p15, 0, r9, c0, c0 @ get processor id
-1: ldmia r10, {r5, r6, r8} @ value, mask, mmuflags
- and r6, r6, r9 @ mask wanted bits
- teq r5, r6
- moveq pc, lr
- add r10, r10, #36 @ sizeof(proc_info_list)
- cmp r10, r7
- blt 1b
- mov r10, #0 @ unknown processor
- mov pc, lr
-
-/*
- * Look in include/asm-arm/procinfo.h and arch/arm/kernel/arch.[ch] for
- * more information about the __proc_info and __arch_info structures.
- */
-2: .long __proc_info_end
- .long __proc_info_begin
- .long 2b
- .long __arch_info_begin
- .long __arch_info_end
-
-/*
- * Lookup machine architecture in the linker-build list of architectures.
- * Note that we can't use the absolute addresses for the __arch_info
- * lists since we aren't running with the MMU on (and therefore, we are
- * not in the correct address space). We have to calculate the offset.
- *
- * r1 = machine architecture number
- * Returns:
- * r2, r3, r4 corrupted
- * r5 = physical start address of RAM
- * r6 = physical address of IO
- * r7 = byte offset into page tables for IO
- */
-__lookup_architecture_type:
- adr r4, 2b
- ldmia r4, {r2, r3, r5, r6, r7} @ throw away r2, r3
- sub r5, r4, r5 @ convert addresses
- add r4, r6, r5 @ to our address space
- add r7, r7, r5
-1: ldr r5, [r4] @ get machine type
- teq r5, r1
- beq 2f
- add r4, r4, #SIZEOF_MACHINE_DESC
- cmp r4, r7
- blt 1b
- mov r7, #0 @ unknown architecture
- mov pc, lr
-2: ldmib r4, {r5, r6, r7} @ found, get results
- mov pc, lr
--- /dev/null
+/*
+ * linux/arch/arm/kernel/head-armv.S
+ *
+ * Copyright (C) 1994-1999 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * 32-bit kernel startup code for all architectures
+ */
+#include <linux/config.h>
+#include <linux/linkage.h>
+
+#include <asm/assembler.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+
+#define K(a,b,c) ((a) << 24 | (b) << 12 | (c))
+
+/*
+ * We place the page tables 16K below TEXTADDR. Therefore, we must make sure
+ * that TEXTADDR is correctly set. Currently, we expect the least significant
+ * "short" to be 0x8000, but we could probably relax this restriction to
+ * TEXTADDR > PAGE_OFFSET + 0x4000
+ *
+ * Note that swapper_pg_dir is the virtual address of the page tables, and
+ * pgtbl gives us a position-independent reference to these tables. We can
+ * do this because stext == TEXTADDR
+ *
+ * swapper_pg_dir, pgtbl and krnladr are all closely related.
+ */
+#if (TEXTADDR & 0xffff) != 0x8000
+#error TEXTADDR must start at 0xXXXX8000
+#endif
+
+ .globl SYMBOL_NAME(swapper_pg_dir)
+ .equ SYMBOL_NAME(swapper_pg_dir), TEXTADDR - 0x4000
+
+ .macro pgtbl, reg, rambase
+ adr \reg, stext
+ sub \reg, \reg, #0x4000
+ .endm
+
+/*
+ * Since the page table is closely related to the kernel start address, we
+ * can convert the page table base address to the base address of the section
+ * containing both.
+ */
+ .macro krnladr, rd, pgtable, rambase
+ bic \rd, \pgtable, #0x000ff000
+ .endm
+
+/*
+ * Kernel startup entry point.
+ *
+ * The rules are:
+ * r0 - should be 0
+ * r1 - unique architecture number
+ * MMU - off
+ * I-cache - on or off
+ * D-cache - off
+ *
+ * See linux/arch/arm/tools/mach-types for the complete list of numbers
+ * for r1.
+ */
+ .section ".text.init",#alloc,#execinstr
+ .type stext, #function
+ENTRY(stext)
+ mov r12, r0
+/*
+ * NOTE! Any code which is placed here should be done for one of
+ * the following reasons:
+ *
+ * 1. Compatability with old production boot firmware (ie, users
+ * actually have and are booting the kernel with the old firmware)
+ * and therefore will be eventually removed.
+ * 2. Cover the case when there is no boot firmware. This is not
+ * ideal, but in this case, it should ONLY set r0 and r1 to the
+ * appropriate value.
+ */
+#if defined(CONFIG_ARCH_NETWINDER)
+/*
+ * Compatability cruft for old NetWinder NeTTroms. This
+ * code is currently scheduled for destruction in 2.5.xx
+ */
+ .rept 8
+ mov r0, r0
+ .endr
+
+ adr r2, 1f
+ ldmdb r2, {r7, r8}
+ and r3, r2, #0xc000
+ teq r3, #0x8000
+ beq __entry
+ bic r3, r2, #0xc000
+ orr r3, r3, #0x8000
+ mov r0, r3
+ mov r4, #64
+ sub r5, r8, r7
+ b 1f
+
+ .word _stext
+ .word __bss_start
+
+1:
+ .rept 4
+ ldmia r2!, {r6, r7, r8, r9}
+ stmia r3!, {r6, r7, r8, r9}
+ .endr
+ subs r4, r4, #64
+ bcs 1b
+ movs r4, r5
+ mov r5, #0
+ movne pc, r0
+
+ mov r1, #MACH_TYPE_NETWINDER @ (will go in 2.5)
+ mov r12, #2 << 24 @ scheduled for removal in 2.5.xx
+ orr r12, r12, #5 << 12
+__entry:
+#endif
+#if defined(CONFIG_ARCH_L7200)
+/*
+ * FIXME - No bootloader, so manually set 'r1' with our architecture number.
+ */
+ mov r1, #MACH_TYPE_L7200
+#endif
+
+ mov r0, #F_BIT | I_BIT | MODE_SVC @ make sure svc mode
+ msr cpsr_c, r0 @ and all irqs disabled
+ bl __lookup_processor_type
+ teq r10, #0 @ invalid processor?
+ moveq r0, #'p' @ yes, error 'p'
+ beq __error
+ bl __lookup_architecture_type
+ teq r7, #0 @ invalid architecture?
+ moveq r0, #'a' @ yes, error 'a'
+ beq __error
+ bl __create_page_tables
+ adr lr, __ret @ return address
+ add pc, r10, #12 @ initialise processor
+ @ (return control reg)
+
+ .type __switch_data, %object
+__switch_data: .long __mmap_switched
+ .long SYMBOL_NAME(compat)
+ .long SYMBOL_NAME(__bss_start)
+ .long SYMBOL_NAME(_end)
+ .long SYMBOL_NAME(processor_id)
+ .long SYMBOL_NAME(__machine_arch_type)
+ .long SYMBOL_NAME(cr_alignment)
+ .long SYMBOL_NAME(init_task_union)+8192
+
+ .type __ret, %function
+__ret: ldr lr, __switch_data
+ mcr p15, 0, r0, c1, c0
+ mov r0, r0
+ mov r0, r0
+ mov r0, r0
+ mov pc, lr
+
+ /*
+ * This code follows on after the page
+ * table switch and jump above.
+ *
+ * r0 = processor control register
+ * r1 = machine ID
+ * r9 = processor ID
+ */
+ .align 5
+__mmap_switched:
+ adr r3, __switch_data + 4
+ ldmia r3, {r2, r4, r5, r6, r7, r8, sp}@ r2 = compat
+ @ sp = stack pointer
+ str r12, [r2]
+
+ mov fp, #0 @ Clear BSS (and zero fp)
+1: cmp r4, r5
+ strcc fp, [r4],#4
+ bcc 1b
+
+ str r9, [r6] @ Save processor ID
+ str r1, [r7] @ Save machine type
+#ifdef CONFIG_ALIGNMENT_TRAP
+ orr r0, r0, #2 @ ...........A.
+#endif
+ bic r2, r0, #2 @ Clear 'A' bit
+ stmia r8, {r0, r2} @ Save control register values
+ b SYMBOL_NAME(start_kernel)
+
+
+
+/*
+ * Setup the initial page tables. We only setup the barest
+ * amount which are required to get the kernel running, which
+ * generally means mapping in the kernel code.
+ *
+ * We only map in 4MB of RAM, which should be sufficient in
+ * all cases.
+ *
+ * r5 = physical address of start of RAM
+ * r6 = physical IO address
+ * r7 = byte offset into page tables for IO
+ * r8 = page table flags
+ */
+__create_page_tables:
+ pgtbl r4, r5 @ page table address
+
+ /*
+ * Clear the 16K level 1 swapper page table
+ */
+ mov r0, r4
+ mov r3, #0
+ add r2, r0, #0x4000
+1: str r3, [r0], #4
+ str r3, [r0], #4
+ str r3, [r0], #4
+ str r3, [r0], #4
+ teq r0, r2
+ bne 1b
+
+ /*
+ * Create identity mapping for first MB of kernel to
+ * cater for the MMU enable. This identity mapping
+ * will be removed by paging_init()
+ */
+ krnladr r2, r4, r5 @ start of kernel
+ add r3, r8, r2 @ flags + kernel base
+ str r3, [r4, r2, lsr #18] @ identity mapping
+
+ /*
+ * Now setup the pagetables for our kernel direct
+ * mapped region. We round TEXTADDR down to the
+ * nearest megabyte boundary.
+ */
+ add r0, r4, #(TEXTADDR & 0xff000000) >> 18 @ start of kernel
+ bic r2, r3, #0x00f00000
+ str r2, [r0] @ PAGE_OFFSET + 0MB
+ add r0, r0, #(TEXTADDR & 0x00f00000) >> 18
+ str r3, [r0], #4 @ KERNEL + 0MB
+ add r3, r3, #1 << 20
+ str r3, [r0], #4 @ KERNEL + 1MB
+ add r3, r3, #1 << 20
+ str r3, [r0], #4 @ KERNEL + 2MB
+ add r3, r3, #1 << 20
+ str r3, [r0], #4 @ KERNEL + 3MB
+
+ /*
+ * Ensure that the first section of RAM is present.
+ * we assume that:
+ * 1. the RAM is aligned to a 32MB boundary
+ * 2. the kernel is executing in the same 32MB chunk
+ * as the start of RAM.
+ */
+ bic r0, r0, #0x01f00000 >> 18 @ round down
+ and r2, r5, #0xfe000000 @ round down
+ add r3, r8, r2 @ flags + rambase
+ str r3, [r0]
+
+ bic r8, r8, #0x0c @ turn off cacheable
+ @ and bufferable bits
+#ifdef CONFIG_DEBUG_LL
+ /*
+ * Map in IO space for serial debugging.
+ * This allows debug messages to be output
+ * via a serial console before paging_init.
+ */
+ add r0, r4, r7
+ rsb r3, r7, #0x4000 @ PTRS_PER_PGD*sizeof(long)
+ cmp r3, #0x0800
+ addge r2, r0, #0x0800
+ addlt r2, r0, r3
+ orr r3, r6, r8
+1: str r3, [r0], #4
+ add r3, r3, #1 << 20
+ teq r0, r2
+ bne 1b
+#if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS)
+ /*
+ * If we're using the NetWinder, we need to map in
+ * the 16550-type serial port for the debug messages
+ */
+ teq r1, #MACH_TYPE_NETWINDER
+ teqne r1, #MACH_TYPE_CATS
+ bne 1f
+ add r0, r4, #0x3fc0
+ mov r3, #0x7c000000
+ orr r3, r3, r8
+ str r3, [r0], #4
+ add r3, r3, #1 << 20
+ str r3, [r0], #4
+1:
+#endif
+#endif
+#ifdef CONFIG_ARCH_RPC
+ /*
+ * Map in screen at 0x02000000 & SCREEN2_BASE
+ * Similar reasons here - for debug. This is
+ * only for Acorn RiscPC architectures.
+ */
+ add r0, r4, #0x80 @ 02000000
+ mov r3, #0x02000000
+ orr r3, r3, r8
+ str r3, [r0]
+ add r0, r4, #0x3600 @ d8000000
+ str r3, [r0]
+#endif
+ mov pc, lr
+
+
+
+/*
+ * Exception handling. Something went wrong and we can't
+ * proceed. We ought to tell the user, but since we
+ * don't have any guarantee that we're even running on
+ * the right architecture, we do virtually nothing.
+ * r0 = ascii error character:
+ * a = invalid architecture
+ * p = invalid processor
+ * i = invalid calling convention
+ *
+ * Generally, only serious errors cause this.
+ */
+__error:
+#ifdef CONFIG_DEBUG_LL
+ mov r8, r0 @ preserve r0
+ adr r0, err_str
+ bl printascii
+ mov r0, r8
+ bl printch
+#endif
+#ifdef CONFIG_ARCH_RPC
+/*
+ * Turn the screen red on a error - RiscPC only.
+ */
+ mov r0, #0x02000000
+ mov r3, #0x11
+ orr r3, r3, r3, lsl #8
+ orr r3, r3, r3, lsl #16
+ str r3, [r0], #4
+ str r3, [r0], #4
+ str r3, [r0], #4
+ str r3, [r0], #4
+#endif
+1: mov r0, r0
+ b 1b
+
+#ifdef CONFIG_DEBUG_LL
+err_str: .asciz "\nError: "
+ .align
+#endif
+
+/*
+ * Read processor ID register (CP#15, CR0), and look up in the linker-built
+ * supported processor list. Note that we can't use the absolute addresses
+ * for the __proc_info lists since we aren't running with the MMU on
+ * (and therefore, we are not in the correct address space). We have to
+ * calculate the offset.
+ *
+ * Returns:
+ * r5, r6, r7 corrupted
+ * r8 = page table flags
+ * r9 = processor ID
+ * r10 = pointer to processor structure
+ */
+__lookup_processor_type:
+ adr r5, 2f
+ ldmia r5, {r7, r9, r10}
+ sub r5, r5, r10 @ convert addresses
+ add r7, r7, r5 @ to our address space
+ add r10, r9, r5
+ mrc p15, 0, r9, c0, c0 @ get processor id
+1: ldmia r10, {r5, r6, r8} @ value, mask, mmuflags
+ and r6, r6, r9 @ mask wanted bits
+ teq r5, r6
+ moveq pc, lr
+ add r10, r10, #36 @ sizeof(proc_info_list)
+ cmp r10, r7
+ blt 1b
+ mov r10, #0 @ unknown processor
+ mov pc, lr
+
+/*
+ * Look in include/asm-arm/procinfo.h and arch/arm/kernel/arch.[ch] for
+ * more information about the __proc_info and __arch_info structures.
+ */
+2: .long __proc_info_end
+ .long __proc_info_begin
+ .long 2b
+ .long __arch_info_begin
+ .long __arch_info_end
+
+/*
+ * Lookup machine architecture in the linker-build list of architectures.
+ * Note that we can't use the absolute addresses for the __arch_info
+ * lists since we aren't running with the MMU on (and therefore, we are
+ * not in the correct address space). We have to calculate the offset.
+ *
+ * r1 = machine architecture number
+ * Returns:
+ * r2, r3, r4 corrupted
+ * r5 = physical start address of RAM
+ * r6 = physical address of IO
+ * r7 = byte offset into page tables for IO
+ */
+__lookup_architecture_type:
+ adr r4, 2b
+ ldmia r4, {r2, r3, r5, r6, r7} @ throw away r2, r3
+ sub r5, r4, r5 @ convert addresses
+ add r4, r6, r5 @ to our address space
+ add r7, r7, r5
+1: ldr r5, [r4] @ get machine type
+ teq r5, r1
+ beq 2f
+ add r4, r4, #SIZEOF_MACHINE_DESC
+ cmp r4, r7
+ blt 1b
+ mov r7, #0 @ unknown architecture
+ mov pc, lr
+2: ldmib r4, {r5, r6, r7} @ found, get results
+ mov pc, lr
+++ /dev/null
-/*
- * linux/arch/arm/kernel/irq-arch.c
- *
- * Copyright (C) 1995-2000 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * We contain the architecture-specific parts of interrupt handling
- * in this file. In 2.5, it will move into the various arch/arm/mach-*
- * directories.
- */
-#include <linux/ptrace.h>
-#include <linux/kernel_stat.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/random.h>
-#include <linux/smp.h>
-#include <linux/init.h>
-
-#include <asm/hardware.h>
-#include <asm/io.h>
-#include <asm/system.h>
-
-#include <asm/mach/irq.h>
-
-/*
- * Get architecture specific interrupt handlers
- * and interrupt initialisation.
- */
-#include <asm/arch/irq.h>
-
-void __init genarch_init_irq(void)
-{
- irq_init_irq();
-}
-
+++ /dev/null
-/*
- * linux/arch/arm/kernel/leds-ftvpci.c
- *
- * Copyright (C) 1999 FutureTV Labs Ltd
- */
-
-#include <linux/module.h>
-
-#include <asm/hardware.h>
-#include <asm/leds.h>
-#include <asm/system.h>
-#include <asm/io.h>
-
-static void ftvpci_leds_event(led_event_t ledevt)
-{
- static int led_state = 0;
-
- switch(ledevt) {
- case led_timer:
- led_state ^= 1;
- raw_writeb(0x1a | led_state, INTCONT_BASE);
- break;
-
- default:
- break;
- }
-}
-
-void (*leds_event)(led_event_t) = ftvpci_leds_event;
-
-EXPORT_SYMBOL(leds_event);
--- /dev/null
+/*
+ * linux/arch/arm/mm/small_page.c
+ *
+ * Copyright (C) 1996 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Changelog:
+ * 26/01/1996 RMK Cleaned up various areas to make little more generic
+ * 07/02/1999 RMK Support added for 16K and 32K page sizes
+ * containing 8K blocks
+ */
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/smp.h>
+
+#include <asm/bitops.h>
+#include <asm/pgtable.h>
+
+#define PEDANTIC
+
+/*
+ * Requirement:
+ * We need to be able to allocate naturally aligned memory of finer
+ * granularity than the page size. This is typically used for the
+ * second level page tables on 32-bit ARMs.
+ *
+ * Theory:
+ * We "misuse" the Linux memory management system. We use alloc_page
+ * to allocate a page and then mark it as reserved. The Linux memory
+ * management system will then ignore the "offset", "next_hash" and
+ * "pprev_hash" entries in the mem_map for this page.
+ *
+ * We then use a bitstring in the "offset" field to mark which segments
+ * of the page are in use, and manipulate this as required during the
+ * allocation and freeing of these small pages.
+ *
+ * We also maintain a queue of pages being used for this purpose using
+ * the "next_hash" and "pprev_hash" entries of mem_map;
+ */
+
+struct order {
+ struct page *queue;
+ unsigned int mask; /* (1 << shift) - 1 */
+ unsigned int shift; /* (1 << shift) size of page */
+ unsigned int block_mask; /* nr_blocks - 1 */
+ unsigned int all_used; /* (1 << nr_blocks) - 1 */
+};
+
+
+static struct order orders[] = {
+#if PAGE_SIZE == 4096
+ { NULL, 2047, 11, 1, 0x00000003 }
+#elif PAGE_SIZE == 32768
+ { NULL, 2047, 11, 15, 0x0000ffff },
+ { NULL, 8191, 13, 3, 0x0000000f }
+#else
+#error unsupported page size
+#endif
+};
+
+#define USED_MAP(pg) ((pg)->index)
+#define TEST_AND_CLEAR_USED(pg,off) (test_and_clear_bit(off, &USED_MAP(pg)))
+#define SET_USED(pg,off) (set_bit(off, &USED_MAP(pg)))
+
+static spinlock_t small_page_lock = SPIN_LOCK_UNLOCKED;
+
+static void add_page_to_queue(struct page *page, struct page **p)
+{
+#ifdef PEDANTIC
+ if (page->pprev_hash)
+ PAGE_BUG(page);
+#endif
+ page->next_hash = *p;
+ if (*p)
+ (*p)->pprev_hash = &page->next_hash;
+ *p = page;
+ page->pprev_hash = p;
+}
+
+static void remove_page_from_queue(struct page *page)
+{
+ if (page->pprev_hash) {
+ if (page->next_hash)
+ page->next_hash->pprev_hash = page->pprev_hash;
+ *page->pprev_hash = page->next_hash;
+ page->pprev_hash = NULL;
+ }
+}
+
+static unsigned long __get_small_page(int priority, struct order *order)
+{
+ unsigned long flags;
+ struct page *page;
+ int offset;
+
+ if (!order->queue)
+ goto need_new_page;
+
+ spin_lock_irqsave(&small_page_lock, flags);
+ page = order->queue;
+again:
+#ifdef PEDANTIC
+ if (USED_MAP(page) & ~order->all_used)
+ PAGE_BUG(page);
+#endif
+ offset = ffz(USED_MAP(page));
+ SET_USED(page, offset);
+ if (USED_MAP(page) == order->all_used)
+ remove_page_from_queue(page);
+ spin_unlock_irqrestore(&small_page_lock, flags);
+
+ return (unsigned long) page_address(page) + (offset << order->shift);
+
+need_new_page:
+ page = alloc_page(priority);
+
+ spin_lock_irqsave(&small_page_lock, flags);
+ if (!order->queue) {
+ if (!page)
+ goto no_page;
+ SetPageReserved(page);
+ USED_MAP(page) = 0;
+ cli();
+ add_page_to_queue(page, &order->queue);
+ } else {
+ __free_page(page);
+ cli();
+ page = order->queue;
+ }
+ goto again;
+
+no_page:
+ spin_unlock_irqrestore(&small_page_lock, flags);
+ return 0;
+}
+
+static void __free_small_page(unsigned long spage, struct order *order)
+{
+ unsigned long flags;
+ struct page *page;
+
+ page = virt_to_page(spage);
+ if (VALID_PAGE(page)) {
+
+ /*
+ * The container-page must be marked Reserved
+ */
+ if (!PageReserved(page) || spage & order->mask)
+ goto non_small;
+
+#ifdef PEDANTIC
+ if (USED_MAP(page) & ~order->all_used)
+ PAGE_BUG(page);
+#endif
+
+ spage = spage >> order->shift;
+ spage &= order->block_mask;
+
+ /*
+ * the following must be atomic wrt get_page
+ */
+ spin_lock_irqsave(&small_page_lock, flags);
+
+ if (USED_MAP(page) == order->all_used)
+ add_page_to_queue(page, &order->queue);
+
+ if (!TEST_AND_CLEAR_USED(page, spage))
+ goto already_free;
+
+ if (USED_MAP(page) == 0)
+ goto free_page;
+
+ spin_unlock_irqrestore(&small_page_lock, flags);
+ }
+ return;
+
+free_page:
+ /*
+ * unlink the page from the small page queue and free it
+ */
+ remove_page_from_queue(page);
+ spin_unlock_irqrestore(&small_page_lock, flags);
+ ClearPageReserved(page);
+ __free_page(page);
+ return;
+
+non_small:
+ printk("Trying to free non-small page from %p\n", __builtin_return_address(0));
+ return;
+already_free:
+ printk("Trying to free free small page from %p\n", __builtin_return_address(0));
+}
+
+unsigned long get_page_8k(int priority)
+{
+ return __get_small_page(priority, orders+1);
+}
+
+void free_page_8k(unsigned long spage)
+{
+ __free_small_page(spage, orders+1);
+}
--- /dev/null
+/*
+ * linux/arch/arm/kernel/dec21285.c: PCI functions for DC21285
+ *
+ * Copyright (C) 1998-2000 Russell King, Phil Blundell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/ptrace.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/system.h>
+#include <asm/mach/pci.h>
+#include <asm/hardware/dec21285.h>
+
+#define MAX_SLOTS 21
+
+#define PCICMD_ERROR_BITS ((PCI_STATUS_DETECTED_PARITY | \
+ PCI_STATUS_REC_MASTER_ABORT | \
+ PCI_STATUS_REC_TARGET_ABORT | \
+ PCI_STATUS_PARITY) << 16)
+
+extern int setup_arm_irq(int, struct irqaction *);
+extern void pcibios_report_status(u_int status_mask, int warn);
+extern void register_isa_ports(unsigned int, unsigned int, unsigned int);
+
+static unsigned long
+dc21285_base_address(struct pci_dev *dev)
+{
+ unsigned long addr = 0;
+ unsigned int devfn = dev->devfn;
+
+ if (dev->bus->number == 0) {
+ if (PCI_SLOT(devfn) == 0)
+ /*
+ * For devfn 0, point at the 21285
+ */
+ addr = ARMCSR_BASE;
+ else {
+ devfn -= 1 << 3;
+
+ if (devfn < PCI_DEVFN(MAX_SLOTS, 0))
+ addr = PCICFG0_BASE | 0xc00000 | (devfn << 8);
+ }
+ } else
+ addr = PCICFG1_BASE | (dev->bus->number << 16) | (devfn << 8);
+
+ return addr;
+}
+
+static int
+dc21285_read_config_byte(struct pci_dev *dev, int where, u8 *value)
+{
+ unsigned long addr = dc21285_base_address(dev);
+ u8 v;
+
+ if (addr)
+ asm("ldr%?b %0, [%1, %2]"
+ : "=r" (v) : "r" (addr), "r" (where));
+ else
+ v = 0xff;
+
+ *value = v;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+dc21285_read_config_word(struct pci_dev *dev, int where, u16 *value)
+{
+ unsigned long addr = dc21285_base_address(dev);
+ u16 v;
+
+ if (addr)
+ asm("ldr%?h %0, [%1, %2]"
+ : "=r" (v) : "r" (addr), "r" (where));
+ else
+ v = 0xffff;
+
+ *value = v;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+dc21285_read_config_dword(struct pci_dev *dev, int where, u32 *value)
+{
+ unsigned long addr = dc21285_base_address(dev);
+ u32 v;
+
+ if (addr)
+ asm("ldr%? %0, [%1, %2]"
+ : "=r" (v) : "r" (addr), "r" (where));
+ else
+ v = 0xffffffff;
+
+ *value = v;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+dc21285_write_config_byte(struct pci_dev *dev, int where, u8 value)
+{
+ unsigned long addr = dc21285_base_address(dev);
+
+ if (addr)
+ asm("str%?b %0, [%1, %2]"
+ : : "r" (value), "r" (addr), "r" (where));
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+dc21285_write_config_word(struct pci_dev *dev, int where, u16 value)
+{
+ unsigned long addr = dc21285_base_address(dev);
+
+ if (addr)
+ asm("str%?h %0, [%1, %2]"
+ : : "r" (value), "r" (addr), "r" (where));
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+dc21285_write_config_dword(struct pci_dev *dev, int where, u32 value)
+{
+ unsigned long addr = dc21285_base_address(dev);
+
+ if (addr)
+ asm("str%? %0, [%1, %2]"
+ : : "r" (value), "r" (addr), "r" (where));
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops dc21285_ops = {
+ dc21285_read_config_byte,
+ dc21285_read_config_word,
+ dc21285_read_config_dword,
+ dc21285_write_config_byte,
+ dc21285_write_config_word,
+ dc21285_write_config_dword,
+};
+
+static struct timer_list serr_timer;
+static struct timer_list perr_timer;
+
+static void dc21285_enable_error(unsigned long __data)
+{
+ switch (__data) {
+ case IRQ_PCI_SERR:
+ del_timer(&serr_timer);
+ break;
+
+ case IRQ_PCI_PERR:
+ del_timer(&perr_timer);
+ break;
+ }
+
+ enable_irq(__data);
+}
+
+/*
+ * Warn on PCI errors.
+ */
+static void dc21285_abort_irq(int irq, void *dev_id, struct pt_regs *regs)
+{
+ unsigned int cmd;
+ unsigned int status;
+
+ cmd = *CSR_PCICMD;
+ status = cmd >> 16;
+ cmd = cmd & 0xffff;
+
+ if (status & PCI_STATUS_REC_MASTER_ABORT) {
+ printk(KERN_DEBUG "PCI: master abort: ");
+ pcibios_report_status(PCI_STATUS_REC_MASTER_ABORT, 1);
+ printk("\n");
+
+ cmd |= PCI_STATUS_REC_MASTER_ABORT << 16;
+ }
+
+ if (status & PCI_STATUS_REC_TARGET_ABORT) {
+ printk(KERN_DEBUG "PCI: target abort: ");
+ pcibios_report_status(PCI_STATUS_SIG_TARGET_ABORT, 1);
+ printk("\n");
+
+ cmd |= PCI_STATUS_REC_TARGET_ABORT << 16;
+ }
+
+ *CSR_PCICMD = cmd;
+}
+
+static void dc21285_serr_irq(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct timer_list *timer = dev_id;
+ unsigned int cntl;
+
+ printk(KERN_DEBUG "PCI: system error received: ");
+ pcibios_report_status(PCI_STATUS_SIG_SYSTEM_ERROR, 1);
+ printk("\n");
+
+ cntl = *CSR_SA110_CNTL & 0xffffdf07;
+ *CSR_SA110_CNTL = cntl | SA110_CNTL_RXSERR;
+
+ /*
+ * back off this interrupt
+ */
+ disable_irq(irq);
+ timer->expires = jiffies + HZ;
+ add_timer(timer);
+}
+
+static void dc21285_discard_irq(int irq, void *dev_id, struct pt_regs *regs)
+{
+ printk(KERN_DEBUG "PCI: discard timer expired\n");
+ *CSR_SA110_CNTL &= 0xffffde07;
+}
+
+static void dc21285_dparity_irq(int irq, void *dev_id, struct pt_regs *regs)
+{
+ unsigned int cmd;
+
+ printk(KERN_DEBUG "PCI: data parity error detected: ");
+ pcibios_report_status(PCI_STATUS_PARITY | PCI_STATUS_DETECTED_PARITY, 1);
+ printk("\n");
+
+ cmd = *CSR_PCICMD & 0xffff;
+ *CSR_PCICMD = cmd | 1 << 24;
+}
+
+static void dc21285_parity_irq(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct timer_list *timer = dev_id;
+ unsigned int cmd;
+
+ printk(KERN_DEBUG "PCI: parity error detected: ");
+ pcibios_report_status(PCI_STATUS_PARITY | PCI_STATUS_DETECTED_PARITY, 1);
+ printk("\n");
+
+ cmd = *CSR_PCICMD & 0xffff;
+ *CSR_PCICMD = cmd | 1 << 31;
+
+ /*
+ * back off this interrupt
+ */
+ disable_irq(irq);
+ timer->expires = jiffies + HZ;
+ add_timer(timer);
+}
+
+void __init dc21285_setup_resources(struct resource **resource)
+{
+ struct resource *busmem, *busmempf;
+
+ busmem = kmalloc(sizeof(*busmem), GFP_KERNEL);
+ busmempf = kmalloc(sizeof(*busmempf), GFP_KERNEL);
+ memset(busmem, 0, sizeof(*busmem));
+ memset(busmempf, 0, sizeof(*busmempf));
+
+ busmem->flags = IORESOURCE_MEM;
+ busmem->name = "Footbridge non-prefetch";
+ busmempf->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
+ busmempf->name = "Footbridge prefetch";
+
+ allocate_resource(&iomem_resource, busmempf, 0x20000000,
+ 0x80000000, 0xffffffff, 0x20000000, NULL, NULL);
+ allocate_resource(&iomem_resource, busmem, 0x40000000,
+ 0x80000000, 0xffffffff, 0x40000000, NULL, NULL);
+
+ resource[0] = &ioport_resource;
+ resource[1] = busmem;
+ resource[2] = busmempf;
+}
+
+void __init dc21285_init(void *sysdata)
+{
+ unsigned int mem_size, mem_mask;
+ int cfn_mode;
+
+ mem_size = (unsigned int)high_memory - PAGE_OFFSET;
+ for (mem_mask = 0x00100000; mem_mask < 0x10000000; mem_mask <<= 1)
+ if (mem_mask >= mem_size)
+ break;
+
+ /*
+ * These registers need to be set up whether we're the
+ * central function or not.
+ */
+ *CSR_SDRAMBASEMASK = (mem_mask - 1) & 0x0ffc0000;
+ *CSR_SDRAMBASEOFFSET = 0;
+ *CSR_ROMBASEMASK = 0x80000000;
+ *CSR_CSRBASEMASK = 0;
+ *CSR_CSRBASEOFFSET = 0;
+ *CSR_PCIADDR_EXTN = 0;
+
+ cfn_mode = __footbridge_cfn_mode();
+
+ printk(KERN_INFO "PCI: DC21285 footbridge, revision %02lX, in "
+ "%s mode\n", *CSR_CLASSREV & 0xff, cfn_mode ?
+ "central function" : "addin");
+
+ if (cfn_mode) {
+ static struct resource csrmem, csrio;
+
+ csrio.flags = IORESOURCE_IO;
+ csrio.name = "Footbridge";
+ csrmem.flags = IORESOURCE_MEM;
+ csrmem.name = "Footbridge";
+
+ allocate_resource(&ioport_resource, &csrio, 128,
+ 0xff00, 0xffff, 128, NULL, NULL);
+ allocate_resource(&iomem_resource, &csrmem, 128,
+ 0xf4000000, 0xf8000000, 128, NULL, NULL);
+
+ /*
+ * Map our SDRAM at a known address in PCI space, just in case
+ * the firmware had other ideas. Using a nonzero base is
+ * necessary, since some VGA cards forcefully use PCI addresses
+ * in the range 0x000a0000 to 0x000c0000. (eg, S3 cards).
+ */
+ *CSR_PCICSRBASE = csrmem.start;
+ *CSR_PCICSRIOBASE = csrio.start;
+ *CSR_PCISDRAMBASE = __virt_to_bus(PAGE_OFFSET);
+ *CSR_PCIROMBASE = 0;
+ *CSR_PCICMD = PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
+ PCI_COMMAND_INVALIDATE | PCICMD_ERROR_BITS;
+
+ pci_scan_bus(0, &dc21285_ops, sysdata);
+
+ /*
+ * Clear any existing errors - we aren't
+ * interested in historical data...
+ */
+ *CSR_SA110_CNTL = (*CSR_SA110_CNTL & 0xffffde07) |
+ SA110_CNTL_RXSERR;
+ *CSR_PCICMD = (*CSR_PCICMD & 0xffff) | PCICMD_ERROR_BITS;
+ } else if (footbridge_cfn_mode() != 0) {
+ /*
+ * If we are not compiled to accept "add-in" mode, then
+ * we are using a constant virt_to_bus translation which
+ * can not hope to cater for the way the host BIOS has
+ * set up the machine.
+ */
+ panic("PCI: this kernel is compiled for central "
+ "function mode only");
+ }
+
+ /*
+ * Initialise PCI error IRQ after we've finished probing
+ */
+ request_irq(IRQ_PCI_ABORT, dc21285_abort_irq, SA_INTERRUPT, "PCI abort", NULL);
+ request_irq(IRQ_DISCARD_TIMER, dc21285_discard_irq, SA_INTERRUPT, "Discard timer", NULL);
+ request_irq(IRQ_PCI_DPERR, dc21285_dparity_irq, SA_INTERRUPT, "PCI data parity", NULL);
+
+ init_timer(&serr_timer);
+ init_timer(&perr_timer);
+
+ serr_timer.data = IRQ_PCI_SERR;
+ serr_timer.function = dc21285_enable_error;
+ perr_timer.data = IRQ_PCI_PERR;
+ perr_timer.function = dc21285_enable_error;
+
+ request_irq(IRQ_PCI_SERR, dc21285_serr_irq, SA_INTERRUPT,
+ "PCI system error", &serr_timer);
+ request_irq(IRQ_PCI_PERR, dc21285_parity_irq, SA_INTERRUPT,
+ "PCI parity error", &perr_timer);
+
+ register_isa_ports(DC21285_PCI_MEM, DC21285_PCI_IO, 0);
+}
--- /dev/null
+/*
+ * linux/arch/arm/kernel/dma-ebsa285.c
+ *
+ * Copyright (C) 1998 Phil Blundell
+ *
+ * DMA functions specific to EBSA-285/CATS architectures
+ *
+ * Changelog:
+ * 09-Nov-1998 RMK Split out ISA DMA functions to dma-isa.c
+ * 17-Mar-1999 RMK Allow any EBSA285-like architecture to have
+ * ISA DMA controllers.
+ */
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+
+#include <asm/dma.h>
+#include <asm/io.h>
+
+#include <asm/mach/dma.h>
+#include <asm/hardware/dec21285.h>
+
+#if 0
+static int fb_dma_request(dmach_t channel, dma_t *dma)
+{
+ return -EINVAL;
+}
+
+static void fb_dma_enable(dmach_t channel, dma_t *dma)
+{
+}
+
+static void fb_dma_disable(dmach_t channel, dma_t *dma)
+{
+}
+
+static struct dma_ops fb_dma_ops = {
+ type: "fb",
+ request: fb_dma_request,
+ enable: fb_dma_enable,
+ disable: fb_dma_disable,
+};
+#endif
+
+void __init arch_dma_init(dma_t *dma)
+{
+#if 0
+ dma[_DC21285_DMA(0)].d_ops = &fb_dma_ops;
+ dma[_DC21285_DMA(1)].d_ops = &fb_dma_ops;
+#endif
+#ifdef CONFIG_ISA_DMA
+ if (footbridge_cfn_mode())
+ isa_init_dma(dma + _ISA_DMA(0));
+#endif
+}
--- /dev/null
+/*
+ * linux/arch/arm/kernel/leds-ftvpci.c
+ *
+ * Copyright (C) 1999 FutureTV Labs Ltd
+ */
+
+#include <linux/module.h>
+
+#include <asm/hardware.h>
+#include <asm/leds.h>
+#include <asm/system.h>
+#include <asm/io.h>
+
+static void ftvpci_leds_event(led_event_t ledevt)
+{
+ static int led_state = 0;
+
+ switch(ledevt) {
+ case led_timer:
+ led_state ^= 1;
+ raw_writeb(0x1a | led_state, INTCONT_BASE);
+ break;
+
+ default:
+ break;
+ }
+}
+
+void (*leds_event)(led_event_t) = ftvpci_leds_event;
+
+EXPORT_SYMBOL(leds_event);
--- /dev/null
+/*
+ * linux/arch/arm/kernel/ftv-pci.c
+ *
+ * PCI bios-type initialisation for PCI machines
+ *
+ * Bits taken from various places.
+ */
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+
+#include <asm/irq.h>
+#include <asm/mach/pci.h>
+
+/*
+ * Owing to a PCB cockup, issue A backplanes are wired thus:
+ *
+ * Slot 1 2 3 4 5 Bridge S1 S2 S3 S4
+ * IRQ D C B A A C B A D
+ * A D C B B D C B A
+ * B A D C C A D C B
+ * C B A D D B A D C
+ *
+ * ID A31 A30 A29 A28 A27 A26 DEV4 DEV5 DEV6 DEV7
+ *
+ * Actually, this isn't too bad, because with the processor card
+ * in slot 5 on the primary bus, the IRQs rotate on both sides
+ * as you'd expect.
+ */
+
+static int irqmap_ftv[] __initdata = { IRQ_PCI_D, IRQ_PCI_C, IRQ_PCI_B, IRQ_PCI_A };
+
+static int __init ftv_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+ if (slot > 0x10)
+ slot--;
+ return irqmap_ftv[(slot - pin) & 3];
+}
+
+static u8 __init ftv_swizzle(struct pci_dev *dev, u8 *pin)
+{
+ return PCI_SLOT(dev->devfn);
+}
+
+/* ftv host-specific stuff */
+struct hw_pci ftv_pci __initdata = {
+ init: plx90x0_init,
+ swizzle: ftv_swizzle,
+ map_irq: ftv_map_irq,
+};
+
--- /dev/null
+/*
+ * linux/arch/arm/kernel/dma-rpc.c
+ *
+ * Copyright (C) 1998 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * DMA functions specific to RiscPC architecture
+ */
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/mman.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+
+#include <asm/page.h>
+#include <asm/dma.h>
+#include <asm/fiq.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/hardware.h>
+#include <asm/uaccess.h>
+
+#include <asm/mach/dma.h>
+#include <asm/hardware/iomd.h>
+
+#if 0
+typedef enum {
+ dma_size_8 = 1,
+ dma_size_16 = 2,
+ dma_size_32 = 4,
+ dma_size_128 = 16
+} dma_size_t;
+
+typedef struct {
+ dma_size_t transfersize;
+} dma_t;
+#endif
+
+#define TRANSFER_SIZE 2
+
+#define CURA (0)
+#define ENDA (IOMD_IO0ENDA - IOMD_IO0CURA)
+#define CURB (IOMD_IO0CURB - IOMD_IO0CURA)
+#define ENDB (IOMD_IO0ENDB - IOMD_IO0CURA)
+#define CR (IOMD_IO0CR - IOMD_IO0CURA)
+#define ST (IOMD_IO0ST - IOMD_IO0CURA)
+
+#define state_prog_a 0
+#define state_wait_a 1
+#define state_wait_b 2
+
+static void iomd_get_next_sg(struct scatterlist *sg, dma_t *dma)
+{
+ unsigned long end, offset, flags = 0;
+
+ if (dma->sg) {
+ sg->dma_address = dma->sg->dma_address;
+ offset = sg->dma_address & ~PAGE_MASK;
+
+ end = offset + dma->sg->length;
+
+ if (end > PAGE_SIZE)
+ end = PAGE_SIZE;
+
+ if (offset + (int) TRANSFER_SIZE > end)
+ flags |= DMA_END_L;
+
+ sg->length = end - TRANSFER_SIZE;
+
+ dma->sg->length -= end - offset;
+ dma->sg->dma_address += end - offset;
+
+ if (dma->sg->length == 0) {
+ if (dma->sgcount > 1) {
+ dma->sg++;
+ dma->sgcount--;
+ } else {
+ dma->sg = NULL;
+ flags |= DMA_END_S;
+ }
+ }
+ } else {
+ flags = DMA_END_S | DMA_END_L;
+ sg->dma_address = 0;
+ sg->length = 0;
+ }
+
+ sg->length |= flags;
+}
+
+static inline void iomd_setup_dma_a(struct scatterlist *sg, dma_t *dma)
+{
+ iomd_writel(sg->dma_address, dma->dma_base + CURA);
+ iomd_writel(sg->length, dma->dma_base + ENDA);
+}
+
+static inline void iomd_setup_dma_b(struct scatterlist *sg, dma_t *dma)
+{
+ iomd_writel(sg->dma_address, dma->dma_base + CURB);
+ iomd_writel(sg->length, dma->dma_base + ENDB);
+}
+
+static void iomd_dma_handle(int irq, void *dev_id, struct pt_regs *regs)
+{
+ dma_t *dma = (dma_t *)dev_id;
+ unsigned int status = 0, no_buffer = dma->sg == NULL;
+
+ do {
+ switch (dma->state) {
+ case state_prog_a:
+ iomd_get_next_sg(&dma->cur_sg, dma);
+ iomd_setup_dma_a(&dma->cur_sg, dma);
+ dma->state = state_wait_a;
+
+ case state_wait_a:
+ status = iomd_readb(dma->dma_base + ST);
+ switch (status & (DMA_ST_OFL|DMA_ST_INT|DMA_ST_AB)) {
+ case DMA_ST_OFL|DMA_ST_INT:
+ iomd_get_next_sg(&dma->cur_sg, dma);
+ iomd_setup_dma_a(&dma->cur_sg, dma);
+ break;
+
+ case DMA_ST_INT:
+ iomd_get_next_sg(&dma->cur_sg, dma);
+ iomd_setup_dma_b(&dma->cur_sg, dma);
+ dma->state = state_wait_b;
+ break;
+
+ case DMA_ST_OFL|DMA_ST_INT|DMA_ST_AB:
+ iomd_setup_dma_b(&dma->cur_sg, dma);
+ dma->state = state_wait_b;
+ break;
+ }
+ break;
+
+ case state_wait_b:
+ status = iomd_readb(dma->dma_base + ST);
+ switch (status & (DMA_ST_OFL|DMA_ST_INT|DMA_ST_AB)) {
+ case DMA_ST_OFL|DMA_ST_INT|DMA_ST_AB:
+ iomd_get_next_sg(&dma->cur_sg, dma);
+ iomd_setup_dma_b(&dma->cur_sg, dma);
+ break;
+
+ case DMA_ST_INT|DMA_ST_AB:
+ iomd_get_next_sg(&dma->cur_sg, dma);
+ iomd_setup_dma_a(&dma->cur_sg, dma);
+ dma->state = state_wait_a;
+ break;
+
+ case DMA_ST_OFL|DMA_ST_INT:
+ iomd_setup_dma_a(&dma->cur_sg, dma);
+ dma->state = state_wait_a;
+ break;
+ }
+ break;
+ }
+ } while (dma->sg && (status & DMA_ST_INT));
+
+ if (no_buffer)
+ disable_irq(irq);
+}
+
+static int iomd_request_dma(dmach_t channel, dma_t *dma)
+{
+ return request_irq(dma->dma_irq, iomd_dma_handle,
+ SA_INTERRUPT, dma->device_id, dma);
+}
+
+static void iomd_free_dma(dmach_t channel, dma_t *dma)
+{
+ free_irq(dma->dma_irq, dma);
+}
+
+static void iomd_enable_dma(dmach_t channel, dma_t *dma)
+{
+ unsigned long dma_base = dma->dma_base;
+ unsigned int ctrl = TRANSFER_SIZE | DMA_CR_E;
+
+ if (dma->invalid) {
+ dma->invalid = 0;
+
+ /*
+ * Cope with ISA-style drivers which expect cache
+ * coherence.
+ */
+ if (!dma->using_sg) {
+ dma->buf.dma_address = pci_map_single(NULL,
+ dma->buf.address, dma->buf.length,
+ dma->dma_mode == DMA_MODE_READ ?
+ PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
+ }
+
+ iomd_writeb(DMA_CR_C, dma_base + CR);
+ dma->state = state_prog_a;
+ }
+
+ if (dma->dma_mode == DMA_MODE_READ)
+ ctrl |= DMA_CR_D;
+
+ iomd_writeb(ctrl, dma_base + CR);
+ enable_irq(dma->dma_irq);
+}
+
+static void iomd_disable_dma(dmach_t channel, dma_t *dma)
+{
+ unsigned long dma_base = dma->dma_base;
+ unsigned int ctrl;
+
+ disable_irq(dma->dma_irq);
+ ctrl = iomd_readb(dma_base + CR);
+ iomd_writeb(ctrl & ~DMA_CR_E, dma_base + CR);
+}
+
+static int iomd_set_dma_speed(dmach_t channel, dma_t *dma, int cycle)
+{
+ int tcr, speed;
+
+ if (cycle < 188)
+ speed = 3;
+ else if (cycle <= 250)
+ speed = 2;
+ else if (cycle < 438)
+ speed = 1;
+ else
+ speed = 0;
+
+ tcr = iomd_readb(IOMD_DMATCR);
+ speed &= 3;
+
+ switch (channel) {
+ case DMA_0:
+ tcr = (tcr & ~0x03) | speed;
+ break;
+
+ case DMA_1:
+ tcr = (tcr & ~0x0c) | (speed << 2);
+ break;
+
+ case DMA_2:
+ tcr = (tcr & ~0x30) | (speed << 4);
+ break;
+
+ case DMA_3:
+ tcr = (tcr & ~0xc0) | (speed << 6);
+ break;
+
+ default:
+ break;
+ }
+
+ iomd_writeb(tcr, IOMD_DMATCR);
+
+ return speed;
+}
+
+static struct dma_ops iomd_dma_ops = {
+ type: "IOMD",
+ request: iomd_request_dma,
+ free: iomd_free_dma,
+ enable: iomd_enable_dma,
+ disable: iomd_disable_dma,
+ setspeed: iomd_set_dma_speed,
+};
+
+static struct fiq_handler fh = {
+ name: "floppydma"
+};
+
+static void floppy_enable_dma(dmach_t channel, dma_t *dma)
+{
+ void *fiqhandler_start;
+ unsigned int fiqhandler_length;
+ struct pt_regs regs;
+
+ if (dma->dma_mode == DMA_MODE_READ) {
+ extern unsigned char floppy_fiqin_start, floppy_fiqin_end;
+ fiqhandler_start = &floppy_fiqin_start;
+ fiqhandler_length = &floppy_fiqin_end - &floppy_fiqin_start;
+ } else {
+ extern unsigned char floppy_fiqout_start, floppy_fiqout_end;
+ fiqhandler_start = &floppy_fiqout_start;
+ fiqhandler_length = &floppy_fiqout_end - &floppy_fiqout_start;
+ }
+
+ regs.ARM_r9 = dma->buf.length;
+ regs.ARM_r10 = (unsigned long)dma->buf.address;
+ regs.ARM_fp = FLOPPYDMA_BASE;
+
+ if (claim_fiq(&fh)) {
+ printk("floppydma: couldn't claim FIQ.\n");
+ return;
+ }
+
+ set_fiq_handler(fiqhandler_start, fiqhandler_length);
+ set_fiq_regs(®s);
+ enable_fiq(dma->dma_irq);
+}
+
+static void floppy_disable_dma(dmach_t channel, dma_t *dma)
+{
+ disable_fiq(dma->dma_irq);
+ release_fiq(&fh);
+}
+
+static int floppy_get_residue(dmach_t channel, dma_t *dma)
+{
+ struct pt_regs regs;
+ get_fiq_regs(®s);
+ return regs.ARM_r9;
+}
+
+static struct dma_ops floppy_dma_ops = {
+ type: "FIQDMA",
+ enable: floppy_enable_dma,
+ disable: floppy_disable_dma,
+ residue: floppy_get_residue,
+};
+
+/*
+ * This is virtual DMA - we don't need anything here.
+ */
+static void sound_enable_disable_dma(dmach_t channel, dma_t *dma)
+{
+}
+
+static struct dma_ops sound_dma_ops = {
+ type: "VIRTUAL",
+ enable: sound_enable_disable_dma,
+ disable: sound_enable_disable_dma,
+};
+
+void __init arch_dma_init(dma_t *dma)
+{
+ iomd_writeb(0, IOMD_IO0CR);
+ iomd_writeb(0, IOMD_IO1CR);
+ iomd_writeb(0, IOMD_IO2CR);
+ iomd_writeb(0, IOMD_IO3CR);
+
+ iomd_writeb(0xa0, IOMD_DMATCR);
+
+ dma[DMA_0].dma_base = IOMD_IO0CURA;
+ dma[DMA_0].dma_irq = IRQ_DMA0;
+ dma[DMA_0].d_ops = &iomd_dma_ops;
+ dma[DMA_1].dma_base = IOMD_IO1CURA;
+ dma[DMA_1].dma_irq = IRQ_DMA1;
+ dma[DMA_1].d_ops = &iomd_dma_ops;
+ dma[DMA_2].dma_base = IOMD_IO2CURA;
+ dma[DMA_2].dma_irq = IRQ_DMA2;
+ dma[DMA_2].d_ops = &iomd_dma_ops;
+ dma[DMA_3].dma_base = IOMD_IO3CURA;
+ dma[DMA_3].dma_irq = IRQ_DMA3;
+ dma[DMA_3].d_ops = &iomd_dma_ops;
+ dma[DMA_S0].dma_base = IOMD_SD0CURA;
+ dma[DMA_S0].dma_irq = IRQ_DMAS0;
+ dma[DMA_S0].d_ops = &iomd_dma_ops;
+ dma[DMA_S1].dma_base = IOMD_SD1CURA;
+ dma[DMA_S1].dma_irq = IRQ_DMAS1;
+ dma[DMA_S1].d_ops = &iomd_dma_ops;
+ dma[DMA_VIRTUAL_FLOPPY].dma_irq = FIQ_FLOPPYDATA;
+ dma[DMA_VIRTUAL_FLOPPY].d_ops = &floppy_dma_ops;
+ dma[DMA_VIRTUAL_SOUND].d_ops = &sound_dma_ops;
+
+ /*
+ * Setup DMA channels 2,3 to be for podules
+ * and channels 0,1 for internal devices
+ */
+ iomd_writeb(DMA_EXT_IO3|DMA_EXT_IO2, IOMD_DMAEXT);
+}
+++ /dev/null
-/*
- * linux/arch/arm/mm/small_page.c
- *
- * Copyright (C) 1996 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Changelog:
- * 26/01/1996 RMK Cleaned up various areas to make little more generic
- * 07/02/1999 RMK Support added for 16K and 32K page sizes
- * containing 8K blocks
- */
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
-#include <linux/smp.h>
-
-#include <asm/bitops.h>
-#include <asm/pgtable.h>
-
-#define PEDANTIC
-
-/*
- * Requirement:
- * We need to be able to allocate naturally aligned memory of finer
- * granularity than the page size. This is typically used for the
- * second level page tables on 32-bit ARMs.
- *
- * Theory:
- * We "misuse" the Linux memory management system. We use alloc_page
- * to allocate a page and then mark it as reserved. The Linux memory
- * management system will then ignore the "offset", "next_hash" and
- * "pprev_hash" entries in the mem_map for this page.
- *
- * We then use a bitstring in the "offset" field to mark which segments
- * of the page are in use, and manipulate this as required during the
- * allocation and freeing of these small pages.
- *
- * We also maintain a queue of pages being used for this purpose using
- * the "next_hash" and "pprev_hash" entries of mem_map;
- */
-
-struct order {
- struct page *queue;
- unsigned int mask; /* (1 << shift) - 1 */
- unsigned int shift; /* (1 << shift) size of page */
- unsigned int block_mask; /* nr_blocks - 1 */
- unsigned int all_used; /* (1 << nr_blocks) - 1 */
-};
-
-
-static struct order orders[] = {
-#if PAGE_SIZE == 4096
- { NULL, 2047, 11, 1, 0x00000003 }
-#elif PAGE_SIZE == 32768
- { NULL, 2047, 11, 15, 0x0000ffff },
- { NULL, 8191, 13, 3, 0x0000000f }
-#else
-#error unsupported page size
-#endif
-};
-
-#define USED_MAP(pg) ((pg)->index)
-#define TEST_AND_CLEAR_USED(pg,off) (test_and_clear_bit(off, &USED_MAP(pg)))
-#define SET_USED(pg,off) (set_bit(off, &USED_MAP(pg)))
-
-static spinlock_t small_page_lock = SPIN_LOCK_UNLOCKED;
-
-static void add_page_to_queue(struct page *page, struct page **p)
-{
-#ifdef PEDANTIC
- if (page->pprev_hash)
- PAGE_BUG(page);
-#endif
- page->next_hash = *p;
- if (*p)
- (*p)->pprev_hash = &page->next_hash;
- *p = page;
- page->pprev_hash = p;
-}
-
-static void remove_page_from_queue(struct page *page)
-{
- if (page->pprev_hash) {
- if (page->next_hash)
- page->next_hash->pprev_hash = page->pprev_hash;
- *page->pprev_hash = page->next_hash;
- page->pprev_hash = NULL;
- }
-}
-
-static unsigned long __get_small_page(int priority, struct order *order)
-{
- unsigned long flags;
- struct page *page;
- int offset;
-
- if (!order->queue)
- goto need_new_page;
-
- spin_lock_irqsave(&small_page_lock, flags);
- page = order->queue;
-again:
-#ifdef PEDANTIC
- if (USED_MAP(page) & ~order->all_used)
- PAGE_BUG(page);
-#endif
- offset = ffz(USED_MAP(page));
- SET_USED(page, offset);
- if (USED_MAP(page) == order->all_used)
- remove_page_from_queue(page);
- spin_unlock_irqrestore(&small_page_lock, flags);
-
- return (unsigned long) page_address(page) + (offset << order->shift);
-
-need_new_page:
- page = alloc_page(priority);
-
- spin_lock_irqsave(&small_page_lock, flags);
- if (!order->queue) {
- if (!page)
- goto no_page;
- SetPageReserved(page);
- USED_MAP(page) = 0;
- cli();
- add_page_to_queue(page, &order->queue);
- } else {
- __free_page(page);
- cli();
- page = order->queue;
- }
- goto again;
-
-no_page:
- spin_unlock_irqrestore(&small_page_lock, flags);
- return 0;
-}
-
-static void __free_small_page(unsigned long spage, struct order *order)
-{
- unsigned long flags;
- struct page *page;
-
- page = virt_to_page(spage);
- if (VALID_PAGE(page)) {
-
- /*
- * The container-page must be marked Reserved
- */
- if (!PageReserved(page) || spage & order->mask)
- goto non_small;
-
-#ifdef PEDANTIC
- if (USED_MAP(page) & ~order->all_used)
- PAGE_BUG(page);
-#endif
-
- spage = spage >> order->shift;
- spage &= order->block_mask;
-
- /*
- * the following must be atomic wrt get_page
- */
- spin_lock_irqsave(&small_page_lock, flags);
-
- if (USED_MAP(page) == order->all_used)
- add_page_to_queue(page, &order->queue);
-
- if (!TEST_AND_CLEAR_USED(page, spage))
- goto already_free;
-
- if (USED_MAP(page) == 0)
- goto free_page;
-
- spin_unlock_irqrestore(&small_page_lock, flags);
- }
- return;
-
-free_page:
- /*
- * unlink the page from the small page queue and free it
- */
- remove_page_from_queue(page);
- spin_unlock_irqrestore(&small_page_lock, flags);
- ClearPageReserved(page);
- __free_page(page);
- return;
-
-non_small:
- printk("Trying to free non-small page from %p\n", __builtin_return_address(0));
- return;
-already_free:
- printk("Trying to free free small page from %p\n", __builtin_return_address(0));
-}
-
-unsigned long get_page_8k(int priority)
-{
- return __get_small_page(priority, orders+1);
-}
-
-void free_page_8k(unsigned long spage)
-{
- __free_small_page(spage, orders+1);
-}
struct usb_device *usb_dev = urb->dev;
etrax_hc_t *hc = usb_dev->bus->hcpriv;
unsigned int pipe = urb->pipe;
- devrequest *cmd = (devrequest *) urb->setup_packet;
+ struct usb_ctrlrequest *cmd = (struct usb_ctrlrequest *) urb->setup_packet;
void *data = urb->transfer_buffer;
int leni = urb->transfer_buffer_length;
int len = 0;
return 0;
}
- bmRType_bReq = cmd->requesttype | cmd->request << 8;
- wValue = le16_to_cpu(cmd->value);
- wIndex = le16_to_cpu(cmd->index);
- wLength = le16_to_cpu(cmd->length);
+ bmRType_bReq = cmd->bRequestType | cmd->bRequest << 8;
+ wValue = le16_to_cpu(cmd->wValue);
+ wIndex = le16_to_cpu(cmd->wIndex);
+ wLength = le16_to_cpu(cmd->wLength);
dbg_rh("bmRType_bReq : 0x%04X (%d)", bmRType_bReq, bmRType_bReq);
dbg_rh("wValue : 0x%04X (%d)", wValue, wValue);
# USB Miscellaneous drivers
#
# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_AUERSWALD is not set
#
# Kernel hacking
*/
slice = clocks / (smp_num_cpus+1);
- printk("cpu: %d, clocks: %d, slice: %d\n",
- smp_processor_id(), clocks, slice);
+ printk("cpu: %d, clocks: %d, slice: %d\n", smp_processor_id(), clocks, slice);
/*
* Wait for IRQ0's slice:
__setup_APIC_LVTT(clocks);
- printk("CPU%d<T0:%d,T1:%d,D:%d,S:%d,C:%d>\n",
- smp_processor_id(), t0, t1, delta, slice, clocks);
+ printk("CPU%d<T0:%d,T1:%d,D:%d,S:%d,C:%d>\n", smp_processor_id(), t0, t1, delta, slice, clocks);
__restore_flags(flags);
}
* to get a message out.
*/
bust_spinlocks(1);
- printk("NMI Watchdog detected LOCKUP on CPU%d, registers:\n", cpu);
+ printk("NMI Watchdog detected LOCKUP on CPU%d, eip %08lx, registers:\n", cpu, regs->eip);
show_registers(regs);
printk("console shuts up ...\n");
console_silent();
void cpu_idle (void)
{
/* endless idle loop with no priority at all */
- init_idle();
- current->nice = 20;
-
while (1) {
void (*idle)(void) = pm_idle;
if (!idle)
/* The 'big kernel lock' */
spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
-struct tlb_state cpu_tlbstate[NR_CPUS] = {[0 ... NR_CPUS-1] = { &init_mm, 0 }};
+struct tlb_state cpu_tlbstate[NR_CPUS] __cacheline_aligned = {[0 ... NR_CPUS-1] = { &init_mm, 0, }};
/*
* the following functions deal with sending IPIs between CPUs.
* it goes straight through and wastes no time serializing
* anything. Worst case is that we lose a reschedule ...
*/
-
void smp_send_reschedule(int cpu)
{
send_IPI_mask(1 << cpu, RESCHEDULE_VECTOR);
}
+/*
+ * this function sends a reschedule IPI to all (other) CPUs.
+ * This should only be used if some 'global' task became runnable,
+ * such as a RT task, that must be handled now. The first CPU
+ * that manages to grab the task will run it.
+ */
+void smp_send_reschedule_all(void)
+{
+ send_IPI_allbutself(RESCHEDULE_VECTOR);
+}
+
/*
* Structure and data for smp_call_function(). This is designed to minimise
* static memory requirements. It also looks cleaner.
if (tsc_values[i] < avg)
realdelta = -realdelta;
- printk("BIOS BUG: CPU#%d improperly initialized, has %ld usecs TSC skew! FIXED.\n",
- i, realdelta);
+ printk("BIOS BUG: CPU#%d improperly initialized, has %ld usecs TSC skew! FIXED.\n", i, realdelta);
}
sum += delta;
}
if (!buggy)
printk("passed.\n");
+ ;
}
static void __init synchronize_tsc_ap (void)
* (This works even if the APIC is not enabled.)
*/
phys_id = GET_APIC_ID(apic_read(APIC_ID));
- cpuid = current->processor;
+ cpuid = smp_processor_id();
if (test_and_set_bit(cpuid, &cpu_online_map)) {
printk("huh, phys CPU#%d, CPU#%d already present??\n",
phys_id, cpuid);
*/
local_flush_tlb();
+ init_idle();
return cpu_idle();
}
if (!idle)
panic("No idle process for CPU %d", cpu);
- idle->processor = cpu;
- idle->cpus_runnable = 1 << cpu; /* we schedule the first task manually */
+ idle->cpu = cpu;
map_cpu_to_boot_apicid(cpu, apicid);
idle->thread.eip = (unsigned long) start_secondary;
- del_from_runqueue(idle);
unhash_process(idle);
- init_tasks[cpu] = idle;
/* start_eip had better be page-aligned! */
start_eip = setup_trampoline();
map_cpu_to_boot_apicid(0, boot_cpu_apicid);
global_irq_holder = 0;
- current->processor = 0;
- init_idle();
+ current->cpu = 0;
smp_tune_scheduling();
/*
out_of_memory:
if (current->pid == 1) {
- current->policy |= SCHED_YIELD;
- schedule();
+ yield();
goto survive;
}
goto bad_area;
out_of_memory:
up_read(&mm->mmap_sem);
if (tsk->pid == 1) {
- tsk->policy |= SCHED_YIELD;
- schedule();
+ yield();
down_read(&mm->mmap_sem);
goto survive;
}
ret = do_bio_blockbacked(lo, bio, rbh);
- bio_endio(rbh, !ret, bio_sectors(bio));
+ bio_endio(rbh, !ret, bio_sectors(rbh));
loop_put_buffer(bio);
}
}
flush_signals(current);
spin_unlock_irq(¤t->sigmask_lock);
- current->policy = SCHED_OTHER;
- current->nice = -20;
+ set_user_nice(current, -20);
spin_lock_irq(&lo->lo_lock);
lo->lo_state = Lo_bound;
static int hci_usb_ctrl_msg(struct hci_usb *husb, struct sk_buff *skb)
{
struct urb *urb = husb->ctrl_urb;
- devrequest *dr = &husb->dev_req;
+ struct usb_ctrlrequest *dr = &husb->dev_req;
int pipe, status;
DBG("%s len %d", husb->hdev.name, skb->len);
pipe = usb_sndctrlpipe(husb->udev, 0);
- dr->requesttype = HCI_CTRL_REQ;
- dr->request = 0;
- dr->index = 0;
- dr->value = 0;
- dr->length = cpu_to_le16(skb->len);
+ dr->bRequestType = HCI_CTRL_REQ;
+ dr->bRequest = 0;
+ dr->wIndex = 0;
+ dr->wValue = 0;
+ dr->wLength = cpu_to_le16(skb->len);
FILL_CONTROL_URB(urb, husb->udev, pipe, (void*)dr, skb->data, skb->len,
hci_usb_ctrl, skb);
#ifdef IFORCE_USB
struct usb_device *usbdev; /* USB transfer */
struct urb irq, out, ctrl;
- devrequest dr;
+ struct usb_ctrlrequest dr;
#endif
/* Force Feedback */
wait_queue_head_t wait;
#ifdef IFORCE_USB
case IFORCE_USB:
- iforce->dr.request = packet[0];
+ iforce->dr.bRequest = packet[0];
iforce->ctrl.dev = iforce->usbdev;
set_current_state(TASK_INTERRUPTIBLE);
iforce->bus = IFORCE_USB;
iforce->usbdev = dev;
- iforce->dr.requesttype = USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_INTERFACE;
- iforce->dr.index = 0;
- iforce->dr.length = 16;
+ iforce->dr.bRequestType = USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_INTERFACE;
+ iforce->dr.wIndex = 0;
+ iforce->dr.wLength = 16;
FILL_INT_URB(&iforce->irq, dev, usb_rcvintpipe(dev, epirq->bEndpointAddress),
iforce->data, 16, iforce_usb_irq, iforce, epirq->bInterval);
pDrvData->IPCs[ipcnum].bIsHere = FALSE;
pDrvData->IPCs[ipcnum].bIsEnabled = TRUE;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
- current->nice = -20; /* boost to provide priority timing */
#else
current->priority = 0x28; /* boost to provide priority timing */
#endif
void *ptr = NULL;
while (!ptr) {
ptr=kmalloc(sizeof(struct buffer_head),GFP_NOIO);
- if (!ptr) {
- __set_current_state(TASK_RUNNING);
- current->policy |= SCHED_YIELD;
- schedule();
- }
+ if (!ptr)
+ yield();
}
return ptr;
}
void *ptr = NULL;
while (!ptr) {
ptr=kmalloc(sizeof(struct ataraid_bh_private),GFP_NOIO);
- if (!ptr) {
- __set_current_state(TASK_RUNNING);
- current->policy |= SCHED_YIELD;
- schedule();
- }
+ if (!ptr)
+ yield();
}
return ptr;
}
/* ------------------------------------------------------------- */
-static struct pci_device_id b1pci_pci_tbl[] __initdata = {
+static struct pci_device_id b1pci_pci_tbl[] __devinitdata = {
{ PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_B1, PCI_ANY_ID, PCI_ANY_ID },
{ } /* Terminating entry */
};
static int ncards = 0;
-static int add_card(struct pci_dev *dev)
+static int __devinit b1pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
{
struct capi_driver *driver = &b1pci_driver;
struct capicardparams param;
return retval;
}
+static struct pci_driver b1pci_pci_driver = {
+ name: "b1pci",
+ id_table: b1pci_pci_tbl,
+ probe: b1pci_probe,
+};
+
static int __init b1pci_init(void)
{
struct capi_driver *driver = &b1pci_driver;
#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
struct capi_driver *driverv4 = &b1pciv4_driver;
#endif
- struct pci_dev *dev = NULL;
char *p;
MOD_INC_USE_COUNT;
}
#endif
- while ((dev = pci_find_device(PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_B1, dev))) {
- if (add_card(dev) == 0)
- ncards++;
- }
+ ncards = pci_register_driver(&b1pci_pci_driver);
if (ncards) {
printk(KERN_INFO "%s: %d B1-PCI card(s) detected\n",
driver->name, ncards);
return 0;
}
printk(KERN_ERR "%s: NO B1-PCI card detected\n", driver->name);
+ pci_unregister_driver(&b1pci_pci_driver);
detach_capi_driver(driver);
#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
detach_capi_driver(driverv4);
static void __exit b1pci_exit(void)
{
- detach_capi_driver(&b1pci_driver);
+ pci_unregister_driver(&b1pci_pci_driver);
+ detach_capi_driver(&b1pci_driver);
#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
- detach_capi_driver(&b1pciv4_driver);
+ detach_capi_driver(&b1pciv4_driver);
#endif
}
static int suppress_pollack;
-static struct pci_device_id c4_pci_tbl[] __initdata = {
- { PCI_VENDOR_ID_DEC,PCI_DEVICE_ID_DEC_21285, PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_C4 },
- { PCI_VENDOR_ID_DEC,PCI_DEVICE_ID_DEC_21285, PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_C2 },
+static struct pci_device_id c4_pci_tbl[] __devinitdata = {
+ { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_C4, 4 },
+ { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_C2, 2 },
{ } /* Terminating entry */
};
add_card: 0, /* no add_card function */
};
-static int ncards = 0;
-
static int c4_attach_driver (struct capi_driver * driver)
{
char *p;
return 0;
}
-static int __init search_cards(struct capi_driver * driver,
- int pci_id, int nr)
+static int __devinit c4_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
{
- struct pci_dev * dev = NULL;
- int retval = 0;
-
- while ((dev = pci_find_subsys(
- PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285,
- PCI_VENDOR_ID_AVM, pci_id, dev))) {
- struct capicardparams param;
-
- if (pci_enable_device(dev) < 0) {
- printk(KERN_ERR "%s: failed to enable AVM-C%d\n",
- driver->name, nr);
- continue;
- }
- pci_set_master(dev);
-
- param.port = pci_resource_start(dev, 1);
- param.irq = dev->irq;
- param.membase = pci_resource_start(dev, 0);
-
- printk(KERN_INFO
- "%s: PCI BIOS reports AVM-C%d at i/o %#x, irq %d, mem %#x\n",
- driver->name, nr, param.port, param.irq, param.membase);
- retval = c4_add_card(driver, ¶m, dev, nr);
- if (retval != 0) {
- printk(KERN_ERR
- "%s: no AVM-C%d at i/o %#x, irq %d detected, mem %#x\n",
- driver->name, nr, param.port, param.irq, param.membase);
- continue;
- }
- ncards++;
+ int nr = ent->driver_data;
+ struct capi_driver *driver = (nr == 2) ? &c2_driver : &c4_driver;
+ int retval = 0;
+ struct capicardparams param;
+
+ if (pci_enable_device(dev) < 0) {
+ printk(KERN_ERR "%s: failed to enable AVM-C%d\n",
+ driver->name, nr);
+ return -ENODEV;
}
- return retval;
+ pci_set_master(dev);
+
+ param.port = pci_resource_start(dev, 1);
+ param.irq = dev->irq;
+ param.membase = pci_resource_start(dev, 0);
+
+ printk(KERN_INFO
+ "%s: PCI BIOS reports AVM-C%d at i/o %#x, irq %d, mem %#x\n",
+ driver->name, nr, param.port, param.irq, param.membase);
+
+ retval = c4_add_card(driver, ¶m, dev, nr);
+ if (retval != 0) {
+ printk(KERN_ERR
+ "%s: no AVM-C%d at i/o %#x, irq %d detected, mem %#x\n",
+ driver->name, nr, param.port, param.irq, param.membase);
+ return -ENODEV;
+ }
+ return 0;
}
+static struct pci_driver c4_pci_driver = {
+ name: "c4",
+ id_table: c4_pci_tbl,
+ probe: c4_probe,
+};
+
static int __init c4_init(void)
{
int retval;
+ int ncards;
MOD_INC_USE_COUNT;
return retval;
}
- retval = search_cards(&c4_driver, PCI_DEVICE_ID_AVM_C4, 4);
- if (retval && ncards == 0) {
- detach_capi_driver(&c2_driver);
- detach_capi_driver(&c4_driver);
- MOD_DEC_USE_COUNT;
- return retval;
- }
- retval = search_cards(&c2_driver, PCI_DEVICE_ID_AVM_C2, 2);
- if (retval && ncards == 0) {
- detach_capi_driver(&c2_driver);
- detach_capi_driver(&c4_driver);
- MOD_DEC_USE_COUNT;
- return retval;
- }
-
+ ncards = pci_register_driver(&c4_pci_driver);
if (ncards) {
printk(KERN_INFO "%s: %d C4/C2 card(s) detected\n",
c4_driver.name, ncards);
return 0;
}
printk(KERN_ERR "%s: NO C4/C2 card detected\n", c4_driver.name);
+ pci_unregister_driver(&c4_pci_driver);
detach_capi_driver(&c4_driver);
detach_capi_driver(&c2_driver);
MOD_DEC_USE_COUNT;
static void __exit c4_exit(void)
{
- detach_capi_driver(&c2_driver);
- detach_capi_driver(&c4_driver);
+ pci_unregister_driver(&c4_pci_driver);
+ detach_capi_driver(&c2_driver);
+ detach_capi_driver(&c4_driver);
}
module_init(c4_init);
-/* $Id: capi.c,v 1.44.6.15 2001/09/28 08:05:29 kai Exp $
+/* $Id: capi.c,v 1.1.4.1.2.2 2001/12/21 15:00:17 kai Exp $
*
* CAPI 2.0 Interface for Linux
*
#include <linux/netdevice.h>
#include <linux/ppp_defs.h>
#include <linux/if_ppp.h>
-#undef CAPI_PPP_ON_RAW_DEVICE
#endif /* CONFIG_PPP */
#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
#include <linux/skbuff.h>
#include "capifs.h"
#endif
-static char *revision = "$Revision: 1.44.6.15 $";
+static char *revision = "$Revision: 1.1.4.1.2.2 $";
MODULE_DESCRIPTION("CAPI4Linux: Userspace /dev/capi20 interface");
MODULE_AUTHOR("Carsten Paeth");
int capi_major = 68; /* allocated */
#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
-int capi_rawmajor = 190;
int capi_ttymajor = 191;
#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
MODULE_PARM(capi_major, "i");
#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
-MODULE_PARM(capi_rawmajor, "i");
MODULE_PARM(capi_ttymajor, "i");
#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
struct capiminor;
struct capiminor {
- struct capiminor *next;
+ struct list_head list;
struct capincci *nccip;
unsigned int minor;
u16 datahandle;
u16 msgid;
- struct file *file;
struct tty_struct *tty;
int ttyinstop;
int ttyoutstop;
struct sk_buff_head outqueue;
int outbytes;
- /* for raw device */
- struct sk_buff_head recvqueue;
- wait_queue_head_t recvwait;
- wait_queue_head_t sendwait;
-
/* transmit path */
struct datahandle_queue {
struct datahandle_queue *next;
- u16 datahandle;
+ u16 datahandle;
} *ackqueue;
int nack;
/* -------- global variables ---------------------------------------- */
static struct capi_interface *capifuncs;
-static rwlock_t capidev_list_lock;
+
+static rwlock_t capidev_list_lock = RW_LOCK_UNLOCKED;
static LIST_HEAD(capidev_list);
+
#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
-static struct capiminor *minors;
+static rwlock_t capiminor_list_lock = RW_LOCK_UNLOCKED;
+static LIST_HEAD(capiminor_list);
#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
static kmem_cache_t *capidev_cachep;
static struct capiminor *capiminor_alloc(u16 applid, u32 ncci)
{
- struct capiminor *mp, **pp;
- unsigned int minor = 0;
-
- MOD_INC_USE_COUNT;
- mp = (struct capiminor *)kmem_cache_alloc(capiminor_cachep, GFP_ATOMIC);
- if (!mp) {
- MOD_DEC_USE_COUNT;
- printk(KERN_ERR "capi: can't alloc capiminor\n");
+ struct capiminor *mp, *p;
+ struct list_head *l;
+ unsigned int minor = 0;
+ unsigned long flags;
+
+ MOD_INC_USE_COUNT;
+ mp = kmem_cache_alloc(capiminor_cachep, GFP_ATOMIC);
+ if (!mp) {
+ MOD_DEC_USE_COUNT;
+ printk(KERN_ERR "capi: can't alloc capiminor\n");
return 0;
}
#ifdef _DEBUG_REFCOUNT
skb_queue_head_init(&mp->inqueue);
skb_queue_head_init(&mp->outqueue);
- skb_queue_head_init(&mp->recvqueue);
- init_waitqueue_head(&mp->recvwait);
- init_waitqueue_head(&mp->sendwait);
-
- for (pp = &minors; *pp; pp = &(*pp)->next) {
- if ((*pp)->minor < minor)
- continue;
- if ((*pp)->minor > minor)
+ write_lock_irqsave(&capiminor_list_lock, flags);
+ list_for_each(l, &capiminor_list) {
+ p = list_entry(l, struct capiminor, list);
+ if (p->minor > minor) {
+ mp->minor = minor;
+ list_add_tail(&mp->list, &p->list);
break;
+ }
minor++;
}
- mp->minor = minor;
- mp->next = *pp;
- *pp = mp;
+ write_unlock_irqrestore(&capiminor_list_lock, flags);
+ if (l == &capiminor_list) {
+ kfree(mp);
+ return NULL;
+ }
return mp;
}
static void capiminor_free(struct capiminor *mp)
{
- struct capiminor **pp;
+ unsigned long flags;
- pp = &minors;
- while (*pp) {
- if (*pp == mp) {
- *pp = (*pp)->next;
- if (mp->ttyskb) kfree_skb(mp->ttyskb);
- mp->ttyskb = 0;
- skb_queue_purge(&mp->recvqueue);
- skb_queue_purge(&mp->inqueue);
- skb_queue_purge(&mp->outqueue);
- capiminor_del_all_ack(mp);
- kmem_cache_free(capiminor_cachep, mp);
- MOD_DEC_USE_COUNT;
+ write_lock_irqsave(&capiminor_list_lock, flags);
+ list_del(&mp->list);
+ write_unlock_irqrestore(&capiminor_list_lock, flags);
+
+ if (mp->ttyskb) kfree_skb(mp->ttyskb);
+ mp->ttyskb = 0;
+ skb_queue_purge(&mp->inqueue);
+ skb_queue_purge(&mp->outqueue);
+ capiminor_del_all_ack(mp);
+ kmem_cache_free(capiminor_cachep, mp);
+ MOD_DEC_USE_COUNT;
#ifdef _DEBUG_REFCOUNT
- printk(KERN_DEBUG "capiminor_free %d\n", GET_USE_COUNT(THIS_MODULE));
+ printk(KERN_DEBUG "capiminor_free %d\n", GET_USE_COUNT(THIS_MODULE));
#endif
- return;
- } else {
- pp = &(*pp)->next;
- }
- }
}
-static struct capiminor *capiminor_find(unsigned int minor)
+struct capiminor *capiminor_find(unsigned int minor)
{
- struct capiminor *p;
- for (p = minors; p && p->minor != minor; p = p->next)
- ;
+ struct list_head *l;
+ struct capiminor *p = NULL;
+
+ read_lock(&capiminor_list_lock);
+ list_for_each(l, &capiminor_list) {
+ p = list_entry(l, struct capiminor, list);
+ if (p->minor == minor)
+ break;
+ }
+ read_unlock(&capiminor_list_lock);
+ if (l == &capiminor_list)
+ return NULL;
+
return p;
}
#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
printk(KERN_DEBUG "set mp->nccip\n");
#endif
#if defined(CONFIG_ISDN_CAPI_CAPIFS) || defined(CONFIG_ISDN_CAPI_CAPIFS_MODULE)
- kdev = mk_kdev(capi_rawmajor, mp->minor);
- capifs_new_ncci('r', mp->minor, kdev);
kdev = mk_kdev(capi_ttymajor, mp->minor);
capifs_new_ncci(0, mp->minor, kdev);
#endif
printk(KERN_DEBUG "reset mp->nccip\n");
#endif
tty_hangup(mp->tty);
- } else if (mp->file) {
- mp->nccip = 0;
-#ifdef _DEBUG_REFCOUNT
- printk(KERN_DEBUG "reset mp->nccip\n");
-#endif
- wake_up_interruptible(&mp->recvwait);
- wake_up_interruptible(&mp->sendwait);
} else {
capiminor_free(mp);
}
/* -------- struct capidev ------------------------------------------ */
-static struct capidev *capidev_alloc(struct file *file)
+static struct capidev *capidev_alloc(void)
{
struct capidev *cdev;
unsigned long flags;
- cdev = (struct capidev *)kmem_cache_alloc(capidev_cachep, GFP_KERNEL);
+ cdev = kmem_cache_alloc(capidev_cachep, GFP_KERNEL);
if (!cdev)
return 0;
memset(cdev, 0, sizeof(struct capidev));
if (cdev->applid)
(*capifuncs->capi_release) (cdev->applid);
cdev->applid = 0;
+
skb_queue_purge(&cdev->recvqueue);
+
write_lock_irqsave(&capidev_list_lock, flags);
list_del(&cdev->list);
write_unlock_irqrestore(&capidev_list_lock, flags);
kfree_skb(skb);
return 0;
- } else if (mp->file) {
- if (skb_queue_len(&mp->recvqueue) > CAPINC_MAX_RECVQUEUE) {
-#if defined(_DEBUG_DATAFLOW) || defined(_DEBUG_TTYFUNCS)
- printk(KERN_DEBUG "capi: no room in raw queue\n");
-#endif
- return -1;
- }
- if ((nskb = gen_data_b3_resp_for(mp, skb)) == 0) {
- printk(KERN_ERR "capi: gen_data_b3_resp failed\n");
- return -1;
- }
- datahandle = CAPIMSG_U16(skb->data,CAPIMSG_BASELEN+4);
- errcode = (*capifuncs->capi_put_message)(mp->applid, nskb);
- if (errcode != CAPI_NOERROR) {
- printk(KERN_ERR "capi: send DATA_B3_RESP failed=%x\n",
- errcode);
- kfree_skb(nskb);
- return -1;
- }
- (void)skb_pull(skb, CAPIMSG_LEN(skb->data));
-#ifdef _DEBUG_DATAFLOW
- printk(KERN_DEBUG "capi: DATA_B3_RESP %u len=%d => raw\n",
- datahandle, skb->len);
-#endif
- skb_queue_tail(&mp->recvqueue, skb);
- wake_up_interruptible(&mp->recvwait);
- return 0;
}
#ifdef _DEBUG_DATAFLOW
printk(KERN_DEBUG "capi: currently no receiver\n");
mp->outbytes -= len;
kfree_skb(skb);
}
- if (count)
- wake_up_interruptible(&mp->sendwait);
return count;
}
if (mp->tty) {
if (mp->tty->ldisc.write_wakeup)
mp->tty->ldisc.write_wakeup(mp->tty);
- } else {
- wake_up_interruptible(&mp->sendwait);
}
(void)handle_minor_send(mp);
#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
if ((mp = nccip->minorp) != 0) {
count += atomic_read(&mp->ttyopencount);
- if (mp->file)
- count++;
}
#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
return count;
if (file->private_data)
return -EEXIST;
- if ((file->private_data = capidev_alloc(file)) == 0)
+ if ((file->private_data = capidev_alloc()) == 0)
return -ENOMEM;
- MOD_INC_USE_COUNT;
-#ifdef _DEBUG_REFCOUNT
- printk(KERN_DEBUG "capi_open %d\n", GET_USE_COUNT(THIS_MODULE));
-#endif
return 0;
}
capidev_free(cdev);
file->private_data = NULL;
- MOD_DEC_USE_COUNT;
-#ifdef _DEBUG_REFCOUNT
- printk(KERN_DEBUG "capi_release %d\n", GET_USE_COUNT(THIS_MODULE));
-#endif
return 0;
}
};
#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
-/* -------- file_operations for capincci ---------------------------- */
-
-static int
-capinc_raw_open(struct inode *inode, struct file *file)
-{
- struct capiminor *mp;
-
- if (file->private_data)
- return -EEXIST;
- if ((mp = capiminor_find(minor(file->f_dentry->d_inode->i_rdev))) == 0)
- return -ENXIO;
- if (mp->nccip == 0)
- return -ENXIO;
- if (mp->file)
- return -EBUSY;
-
-#ifdef _DEBUG_REFCOUNT
- printk(KERN_DEBUG "capi_raw_open %d\n", GET_USE_COUNT(THIS_MODULE));
-#endif
-
- mp->datahandle = 0;
- mp->file = file;
- file->private_data = (void *)mp;
- handle_minor_recv(mp);
- return 0;
-}
-
-static ssize_t
-capinc_raw_read(struct file *file, char *buf, size_t count, loff_t *ppos)
-{
- struct capiminor *mp = (struct capiminor *)file->private_data;
- struct sk_buff *skb;
- int retval;
- size_t copied = 0;
-
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
- if (!mp || !mp->nccip)
- return -EINVAL;
-
- if ((skb = skb_dequeue(&mp->recvqueue)) == 0) {
-
- if (file->f_flags & O_NONBLOCK)
- return -EAGAIN;
-
- for (;;) {
- interruptible_sleep_on(&mp->recvwait);
- if (mp->nccip == 0)
- return 0;
- if ((skb = skb_dequeue(&mp->recvqueue)) != 0)
- break;
- if (signal_pending(current))
- break;
- }
- if (skb == 0)
- return -ERESTARTNOHAND;
- }
- do {
- if (count < skb->len) {
- retval = copy_to_user(buf, skb->data, count);
- if (retval) {
- skb_queue_head(&mp->recvqueue, skb);
- return retval;
- }
- skb_pull(skb, count);
- skb_queue_head(&mp->recvqueue, skb);
- copied += count;
- return copied;
- } else {
- retval = copy_to_user(buf, skb->data, skb->len);
- if (retval) {
- skb_queue_head(&mp->recvqueue, skb);
- return copied;
- }
- copied += skb->len;
- count -= skb->len;
- buf += skb->len;
- kfree_skb(skb);
- }
- } while ((skb = skb_dequeue(&mp->recvqueue)) != 0);
-
- return copied;
-}
-
-static ssize_t
-capinc_raw_write(struct file *file, const char *buf, size_t count, loff_t *ppos)
-{
- struct capiminor *mp = (struct capiminor *)file->private_data;
- struct sk_buff *skb;
- int retval;
-
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
- if (!mp || !mp->nccip)
- return -EINVAL;
-
- skb = alloc_skb(CAPI_DATA_B3_REQ_LEN+count, GFP_USER);
- if (!skb)
- return -ENOMEM;
-
- skb_reserve(skb, CAPI_DATA_B3_REQ_LEN);
- if ((retval = copy_from_user(skb_put(skb, count), buf, count))) {
- kfree_skb(skb);
- return -EFAULT;
- }
-
- while (skb_queue_len(&mp->outqueue) > CAPINC_MAX_SENDQUEUE) {
- if (file->f_flags & O_NONBLOCK)
- return -EAGAIN;
- interruptible_sleep_on(&mp->sendwait);
- if (mp->nccip == 0) {
- kfree_skb(skb);
- return -EIO;
- }
- if (signal_pending(current))
- return -ERESTARTNOHAND;
- }
- skb_queue_tail(&mp->outqueue, skb);
- mp->outbytes += skb->len;
- (void)handle_minor_send(mp);
- return count;
-}
-
-static unsigned int
-capinc_raw_poll(struct file *file, poll_table * wait)
-{
- struct capiminor *mp = (struct capiminor *)file->private_data;
- unsigned int mask = 0;
-
- if (!mp || !mp->nccip)
- return POLLERR|POLLHUP;
-
- poll_wait(file, &(mp->recvwait), wait);
- if (!skb_queue_empty(&mp->recvqueue))
- mask |= POLLIN | POLLRDNORM;
- poll_wait(file, &(mp->sendwait), wait);
- if (skb_queue_len(&mp->outqueue) > CAPINC_MAX_SENDQUEUE)
- mask = POLLOUT | POLLWRNORM;
- return mask;
-}
-
-static int
-capinc_raw_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct capiminor *mp = (struct capiminor *)file->private_data;
- if (!mp || !mp->nccip)
- return -EINVAL;
-
- switch (cmd) {
- }
- return -EINVAL;
-}
-
-static int
-capinc_raw_release(struct inode *inode, struct file *file)
-{
- struct capiminor *mp = (struct capiminor *)file->private_data;
-
- if (mp) {
- mp->file = 0;
- if (mp->nccip == 0) {
- capiminor_free(mp);
- file->private_data = NULL;
- }
- }
-
-#ifdef _DEBUG_REFCOUNT
- printk(KERN_DEBUG "capinc_raw_release %d\n", GET_USE_COUNT(THIS_MODULE));
-#endif
- return 0;
-}
-
-static struct file_operations capinc_raw_fops =
-{
- owner: THIS_MODULE,
- llseek: no_llseek,
- read: capinc_raw_read,
- write: capinc_raw_write,
- poll: capinc_raw_poll,
- ioctl: capinc_raw_ioctl,
- open: capinc_raw_open,
- release: capinc_raw_release,
-};
-
/* -------- tty_operations for capincci ----------------------------- */
static int capinc_tty_open(struct tty_struct * tty, struct file * file)
return -ENXIO;
if (mp->nccip == 0)
return -ENXIO;
- if (mp->file)
- return -EBUSY;
- skb_queue_head_init(&mp->recvqueue);
- init_waitqueue_head(&mp->recvwait);
- init_waitqueue_head(&mp->sendwait);
tty->driver_data = (void *)mp;
#ifdef _DEBUG_REFCOUNT
printk(KERN_DEBUG "capi_tty_open %d\n", GET_USE_COUNT(THIS_MODULE));
return room;
}
-static int capinc_tty_chars_in_buffer(struct tty_struct *tty)
+int capinc_tty_chars_in_buffer(struct tty_struct *tty)
{
struct capiminor *mp = (struct capiminor *)tty->driver_data;
if (!mp || !mp->nccip) {
list_for_each(l, &capidev_list) {
cdev = list_entry(l, struct capidev, list);
for (np=cdev->nccis; np; np = np->next) {
- len += sprintf(page+len, "%d 0x%x%s\n",
- cdev->applid,
- np->ncci,
-#ifndef CONFIG_ISDN_CAPI_MIDDLEWARE
- "");
-#else /* CONFIG_ISDN_CAPI_MIDDLEWARE */
- np->minorp && np->minorp->file ? " open" : "");
-#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
+ len += sprintf(page+len, "%d 0x%x\n",
+ cdev->applid,
+ np->ncci);
if (len <= off) {
off -= len;
len = 0;
return -EIO;
}
-#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
- if (devfs_register_chrdev(capi_rawmajor, "capi/r%d", &capinc_raw_fops)) {
- devfs_unregister_chrdev(capi_major, "capi20");
- printk(KERN_ERR "capi20: unable to get major %d\n", capi_rawmajor);
- MOD_DEC_USE_COUNT;
- return -EIO;
- }
- devfs_register_series (NULL, "capi/r%u", CAPINC_NR_PORTS,
- DEVFS_FL_DEFAULT,
- capi_rawmajor, 0,
- S_IFCHR | S_IRUSR | S_IWUSR,
- &capinc_raw_fops, NULL);
-#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
devfs_register (NULL, "isdn/capi20", DEVFS_FL_DEFAULT,
capi_major, 0, S_IFCHR | S_IRUSR | S_IWUSR,
&capi_fops, NULL);
MOD_DEC_USE_COUNT;
devfs_unregister_chrdev(capi_major, "capi20");
-#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
- devfs_unregister_chrdev(capi_rawmajor, "capi/r%d");
-#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
devfs_unregister(devfs_find_handle(NULL, "capi20",
capi_major, 0,
DEVFS_SPECIAL_CHR, 0));
if (capinc_tty_init() < 0) {
(void) detach_capi_interface(&cuser);
devfs_unregister_chrdev(capi_major, "capi20");
- devfs_unregister_chrdev(capi_rawmajor, "capi/r%d");
MOD_DEC_USE_COUNT;
return -ENOMEM;
}
if (alloc_init() < 0) {
#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
- unsigned int j;
- devfs_unregister_chrdev(capi_rawmajor, "capi/r%d");
- for (j = 0; j < CAPINC_NR_PORTS; j++) {
- char devname[32];
- sprintf(devname, "capi/r%u", j);
- devfs_unregister(devfs_find_handle(NULL, devname, capi_rawmajor, j, DEVFS_SPECIAL_CHR, 0));
- }
capinc_tty_exit();
#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
(void) detach_capi_interface(&cuser);
static void __exit capi_exit(void)
{
-#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
- unsigned int j;
-#endif
alloc_exit();
(void)proc_exit();
#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
capinc_tty_exit();
- devfs_unregister_chrdev(capi_rawmajor, "capi/r%d");
- for (j = 0; j < CAPINC_NR_PORTS; j++) {
- char devname[32];
- sprintf(devname, "capi/r%u", j);
- devfs_unregister(devfs_find_handle(NULL, devname, capi_rawmajor, j, DEVFS_SPECIAL_CHR, 0));
- }
#endif
(void) detach_capi_interface(&cuser);
printk(KERN_NOTICE "capi: Rev %s: unloaded\n", rev);
/* ------------------------------------------------------------- */
-static struct pci_device_id t1pci_pci_tbl[] __initdata = {
+static struct pci_device_id t1pci_pci_tbl[] __devinitdata = {
{ PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_T1, PCI_ANY_ID, PCI_ANY_ID },
{ } /* Terminating entry */
};
add_card: 0, /* no add_card function */
};
-static int ncards = 0;
+/* ------------------------------------------------------------- */
+
+static int __devinit t1pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ struct capi_driver *driver = &t1pci_driver;
+ struct capicardparams param;
+ int retval;
+
+ if (pci_enable_device(dev) < 0) {
+ printk(KERN_ERR "%s: failed to enable AVM-T1-PCI\n",
+ driver->name);
+ return -ENODEV;
+ }
+ pci_set_master(dev);
+
+ param.port = pci_resource_start(dev, 1);
+ param.irq = dev->irq;
+ param.membase = pci_resource_start(dev, 0);
+
+ printk(KERN_INFO
+ "%s: PCI BIOS reports AVM-T1-PCI at i/o %#x, irq %d, mem %#x\n",
+ driver->name, param.port, param.irq, param.membase);
+
+ retval = t1pci_add_card(driver, ¶m, dev);
+ if (retval != 0) {
+ printk(KERN_ERR
+ "%s: no AVM-T1-PCI at i/o %#x, irq %d detected, mem %#x\n",
+ driver->name, param.port, param.irq, param.membase);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static struct pci_driver t1pci_pci_driver = {
+ name: "t1pci",
+ id_table: t1pci_pci_tbl,
+ probe: t1pci_probe,
+};
static int __init t1pci_init(void)
{
struct capi_driver *driver = &t1pci_driver;
- struct pci_dev *dev = NULL;
char *p;
- int retval;
+ int ncards;
MOD_INC_USE_COUNT;
if ((p = strchr(revision, ':')) != 0 && p[1]) {
- strncpy(driver->revision, p + 2, sizeof(driver->revision));
- driver->revision[sizeof(driver->revision)-1] = 0;
+ strncpy(driver->revision, p + 2, sizeof(driver->revision) - 1);
if ((p = strchr(driver->revision, '$')) != 0 && p > driver->revision)
*(p-1) = 0;
}
printk(KERN_INFO "%s: revision %s\n", driver->name, driver->revision);
- di = attach_capi_driver(driver);
+ di = attach_capi_driver(&t1pci_driver);
if (!di) {
printk(KERN_ERR "%s: failed to attach capi_driver\n",
driver->name);
return -EIO;
}
- while ((dev = pci_find_device(PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_T1, dev))) {
- struct capicardparams param;
-
- if (pci_enable_device(dev) < 0) {
- printk(KERN_ERR "%s: failed to enable AVM-T1-PCI\n",
- driver->name);
- continue;
- }
- pci_set_master(dev);
-
- param.port = pci_resource_start(dev, 1);
- param.irq = dev->irq;
- param.membase = pci_resource_start(dev, 0);
-
- printk(KERN_INFO
- "%s: PCI BIOS reports AVM-T1-PCI at i/o %#x, irq %d, mem %#x\n",
- driver->name, param.port, param.irq, param.membase);
- retval = t1pci_add_card(driver, ¶m, dev);
- if (retval != 0) {
- printk(KERN_ERR
- "%s: no AVM-T1-PCI at i/o %#x, irq %d detected, mem %#x\n",
- driver->name, param.port, param.irq, param.membase);
- continue;
- }
- ncards++;
- }
+ ncards = pci_register_driver(&t1pci_pci_driver);
if (ncards) {
printk(KERN_INFO "%s: %d T1-PCI card(s) detected\n",
driver->name, ncards);
return 0;
}
printk(KERN_ERR "%s: NO T1-PCI card detected\n", driver->name);
+ pci_unregister_driver(&t1pci_pci_driver);
detach_capi_driver(&t1pci_driver);
MOD_DEC_USE_COUNT;
return -ENODEV;
static void __exit t1pci_exit(void)
{
- detach_capi_driver(&t1pci_driver);
+ pci_unregister_driver(&t1pci_pci_driver);
+ detach_capi_driver(&t1pci_driver);
}
module_init(t1pci_init);
typedef void (*ctrl_complete_t)(void *);
typedef struct ctrl_msg {
- devrequest dr;
+ struct usb_ctrlrequest dr;
ctrl_complete_t complete;
void *context;
} ctrl_msg;
(unsigned char *)&ctrl->msg_fifo.data[r_index];
DBG(1,"request=0x%02x,value=0x%04x,index=%x",
- ((struct ctrl_msg *)urb->setup_packet)->dr.request,
- ((struct ctrl_msg *)urb->setup_packet)->dr.value,
- ((struct ctrl_msg *)urb->setup_packet)->dr.index);
+ ((struct ctrl_msg *)urb->setup_packet)->dr.bRequest,
+ ((struct ctrl_msg *)urb->setup_packet)->dr.wValue,
+ ((struct ctrl_msg *)urb->setup_packet)->dr.wIndex);
// Prepare the URB
urb->dev = adapter->usb_dev;
}
ctrl_msg = &ctrl->msg_fifo.data[w_index];
- ctrl_msg->dr.requesttype = requesttype;
- ctrl_msg->dr.request = request;
- ctrl_msg->dr.value = cpu_to_le16p(&value);
- ctrl_msg->dr.index = cpu_to_le16p(&index);
- ctrl_msg->dr.length = 0;
+ ctrl_msg->dr.bRequestType = requesttype;
+ ctrl_msg->dr.bRequest = request;
+ ctrl_msg->dr.wValue = cpu_to_le16p(&value);
+ ctrl_msg->dr.wIndex = cpu_to_le16p(&index);
+ ctrl_msg->dr.wLength = 0;
ctrl_msg->complete = complete;
ctrl_msg->context = context;
ctrl_msg = (struct ctrl_msg *)urb->setup_packet;
- if (ctrl_msg->dr.request == USB_REQ_CLEAR_FEATURE) {
+ if (ctrl_msg->dr.bRequest == USB_REQ_CLEAR_FEATURE) {
/* Special case handling for pipe reset */
- le16_to_cpus(&ctrl_msg->dr.index);
+ le16_to_cpus(&ctrl_msg->dr.wIndex);
usb_endpoint_running(adapter->usb_dev,
- ctrl_msg->dr.index & ~USB_DIR_IN,
- (ctrl_msg->dr.index & USB_DIR_IN) == 0);
+ ctrl_msg->dr.wIndex & ~USB_DIR_IN,
+ (ctrl_msg->dr.wIndex & USB_DIR_IN) == 0);
/* toggle is reset on clear */
usb_settoggle(adapter->usb_dev,
- ctrl_msg->dr.index & ~USB_DIR_IN,
- (ctrl_msg->dr.index & USB_DIR_IN) == 0,
+ ctrl_msg->dr.wIndex & ~USB_DIR_IN,
+ (ctrl_msg->dr.wIndex & USB_DIR_IN) == 0,
0);
*/
int st5481_isoc_flatten(struct urb *urb)
{
- piso_packet_descriptor_t pipd,pend;
+ iso_packet_descriptor_t *pipd,*pend;
unsigned char *src,*dst;
unsigned int len;
* bdflush, otherwise bdflush will deadlock if there are too
* many dirty RAID5 blocks.
*/
- current->policy = SCHED_OTHER;
- current->nice = -20;
unlock_kernel();
complete(thread->event);
"(but not more than %d KB/sec) for reconstruction.\n",
sysctl_speed_limit_max);
- /*
- * Resync has low priority.
- */
- current->nice = 19;
-
is_mddev_idle(mddev); /* this also initializes IO event counters */
for (m = 0; m < SYNC_MARKS; m++) {
mark[m] = jiffies;
currspeed = (j-mddev->resync_mark_cnt)/2/((jiffies-mddev->resync_mark)/HZ +1) +1;
if (currspeed > sysctl_speed_limit_min) {
- current->nice = 19;
-
if ((currspeed > sysctl_speed_limit_max) ||
!is_mddev_idle(mddev)) {
current->state = TASK_INTERRUPTIBLE;
schedule_timeout(HZ/4);
goto repeat;
}
- } else
- current->nice = -20;
+ }
}
printk(KERN_INFO "md: md%d: sync done.\n",mdidx(mddev));
err = 0;
/* First of all: check for active disciplines and hangup them.
*/
do {
- if (busy) {
- current->time_slice = 0;
- schedule();
- }
+ if (busy)
+ yield();
busy = 0;
local_bh_disable();
unsigned char *scsi_bios_ptable(kdev_t dev)
{
+ struct block_device *bdev;
unsigned char *res = kmalloc(66, GFP_KERNEL);
kdev_t rdev = mk_kdev(major(dev), minor(dev) & ~0x0f);
if (res) {
- struct buffer_head *bh = bread(rdev, 0, block_size(rdev));
- if (bh) {
- memcpy(res, bh->b_data + 0x1be, 66);
- } else {
- kfree(res);
- res = NULL;
- }
+ struct buffer_head *bh;
+ int err;
+
+ bdev = bdget(kdev_t_to_nr(rdev));
+ if (!bdev)
+ goto fail;
+ err = blkdev_get(bdev, FMODE_READ, 0, BDEV_FILE);
+ if (err)
+ goto fail;
+ bh = __bread(bdev, 0, block_size(rdev));
+ if (!bh)
+ goto fail2;
+ memcpy(res, bh->b_data + 0x1be, 66);
+ brelse(bh);
+ blkdev_put(bdev, BDEV_FILE);
}
return res;
+fail2:
+ blkdev_put(bdev, BDEV_FILE);
+fail:
+ kfree(res);
+ return NULL;
}
/*
comment 'USB Device Class drivers'
dep_tristate ' USB Audio support' CONFIG_USB_AUDIO $CONFIG_USB $CONFIG_SOUND
dep_tristate ' USB Bluetooth support (EXPERIMENTAL)' CONFIG_USB_BLUETOOTH $CONFIG_USB $CONFIG_EXPERIMENTAL
+if [ "$CONFIG_SCSI" = "n" ]; then
+ comment ' SCSI support is needed for USB Storage'
+fi
dep_tristate ' USB Mass Storage support' CONFIG_USB_STORAGE $CONFIG_USB $CONFIG_SCSI
dep_mbool ' USB Mass Storage verbose debug' CONFIG_USB_STORAGE_DEBUG $CONFIG_USB_STORAGE
dep_mbool ' Datafab MDCFE-B Compact Flash Reader support' CONFIG_USB_STORAGE_DATAFAB $CONFIG_USB_STORAGE $CONFIG_EXPERIMENTAL
comment 'USB Miscellaneous drivers'
dep_tristate ' USB Diamond Rio500 support (EXPERIMENTAL)' CONFIG_USB_RIO500 $CONFIG_USB $CONFIG_EXPERIMENTAL
+dep_tristate ' USB Auerswald ISDN support (EXPERIMENTAL)' CONFIG_USB_AUERSWALD $CONFIG_USB $CONFIG_EXPERIMENTAL
endmenu
# Each configuration option enables a list of files.
obj-$(CONFIG_USB) += usbcore.o
+
+# EHCI needs to be linked before the other HCD drivers
+ifeq ($(CONFIG_USB_EHCI_HCD),y)
+ obj-y += hcd/ehci-hcd.o
+endif
+
obj-$(CONFIG_USB_UHCI) += usb-uhci.o
obj-$(CONFIG_USB_UHCI_ALT) += uhci.o
obj-$(CONFIG_USB_OHCI) += usb-ohci.o
obj-$(CONFIG_USB_HPUSBSCSI) += hpusbscsi.o
obj-$(CONFIG_USB_BLUETOOTH) += bluetooth.o
obj-$(CONFIG_USB_USBNET) += usbnet.o
+obj-$(CONFIG_USB_AUERSWALD) += auerswald.o
# Object files in subdirectories
mod-subdirs := serial hcd
subdir-$(CONFIG_USB_SERIAL) += serial
subdir-$(CONFIG_USB_STORAGE) += storage
-ifeq ($(CONFIG_USB_EHCI_HCD),y)
- obj-y += hcd/ehci-hcd.o
-endif
-
ifeq ($(CONFIG_USB_SERIAL),y)
obj-y += serial/usb-serial.o
endif
static void acm_ctrl_irq(struct urb *urb)
{
struct acm *acm = urb->context;
- devrequest *dr = urb->transfer_buffer;
+ struct usb_ctrlrequest *dr = urb->transfer_buffer;
unsigned char *data = (unsigned char *)(dr + 1);
int newctrl;
return;
}
- switch (dr->request) {
+ switch (dr->bRequest) {
case ACM_IRQ_NETWORK:
default:
dbg("unknown control event received: request %d index %d len %d data0 %d data1 %d",
- dr->request, dr->index, dr->length, data[0], data[1]);
+ dr->bRequest, dr->wIndex, dr->wLength, data[0], data[1]);
return;
}
}
--- /dev/null
+/*****************************************************************************/
+/*
+ * auerswald.c -- Auerswald PBX/System Telephone usb driver.
+ *
+ * Copyright (C) 2001 Wolfgang Mües (wmues@nexgo.de)
+ *
+ * Very much code of this driver is borrowed from dabusb.c (Deti Fliegl)
+ * and from the USB Skeleton driver (Greg Kroah-Hartman). Thank you.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+ /*****************************************************************************/
+
+/* Standard Linux module include files */
+#include <asm/uaccess.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/devfs_fs_kernel.h>
+#undef DEBUG /* include debug macros until it's done */
+#include <linux/usb.h>
+
+/*-------------------------------------------------------------------*/
+/* Debug support */
+#ifdef DEBUG
+#define dump( adr, len) \
+do { \
+ unsigned int u; \
+ printk (KERN_DEBUG); \
+ for (u = 0; u < len; u++) \
+ printk (" %02X", adr[u] & 0xFF); \
+ printk ("\n"); \
+} while (0)
+#else
+#define dump( adr, len)
+#endif
+
+/*-------------------------------------------------------------------*/
+/* Version Information */
+#define DRIVER_VERSION "0.9.9"
+#define DRIVER_AUTHOR "Wolfgang Mües <wmues@nexgo.de>"
+#define DRIVER_DESC "Auerswald PBX/System Telephone usb driver"
+
+/*-------------------------------------------------------------------*/
+/* Private declarations for Auerswald USB driver */
+
+/* Auerswald Vendor ID */
+#define ID_AUERSWALD 0x09BF
+
+#ifndef AUER_MINOR_BASE /* allow external override */
+#define AUER_MINOR_BASE 80 /* auerswald driver minor number */
+#endif
+
+/* we can have up to this number of device plugged in at once */
+#define AUER_MAX_DEVICES 16
+
+/* prefix for the device descriptors in /dev/usb */
+#define AU_PREFIX "auer"
+
+/* Number of read buffers for each device */
+#define AU_RBUFFERS 10
+
+/* Number of chain elements for each control chain */
+#define AUCH_ELEMENTS 20
+
+/* Number of retries in communication */
+#define AU_RETRIES 10
+
+/*-------------------------------------------------------------------*/
+/* vendor specific protocol */
+/* Header Byte */
+#define AUH_INDIRMASK 0x80 /* mask for direct/indirect bit */
+#define AUH_DIRECT 0x00 /* data is for USB device */
+#define AUH_INDIRECT 0x80 /* USB device is relay */
+
+#define AUH_SPLITMASK 0x40 /* mask for split bit */
+#define AUH_UNSPLIT 0x00 /* data block is full-size */
+#define AUH_SPLIT 0x40 /* data block is part of a larger one,
+ split-byte follows */
+
+#define AUH_TYPEMASK 0x3F /* mask for type of data transfer */
+#define AUH_TYPESIZE 0x40 /* different types */
+#define AUH_DCHANNEL 0x00 /* D channel data */
+#define AUH_B1CHANNEL 0x01 /* B1 channel transparent */
+#define AUH_B2CHANNEL 0x02 /* B2 channel transparent */
+/* 0x03..0x0F reserved for driver internal use */
+#define AUH_COMMAND 0x10 /* Command channel */
+#define AUH_BPROT 0x11 /* Configuration block protocol */
+#define AUH_DPROTANA 0x12 /* D channel protocol analyzer */
+#define AUH_TAPI 0x13 /* telephone api data (ATD) */
+/* 0x14..0x3F reserved for other protocols */
+#define AUH_UNASSIGNED 0xFF /* if char device has no assigned service */
+#define AUH_FIRSTUSERCH 0x11 /* first channel which is available for driver users */
+
+#define AUH_SIZE 1 /* Size of Header Byte */
+
+/* Split Byte. Only present if split bit in header byte set.*/
+#define AUS_STARTMASK 0x80 /* mask for first block of splitted frame */
+#define AUS_FIRST 0x80 /* first block */
+#define AUS_FOLLOW 0x00 /* following block */
+
+#define AUS_ENDMASK 0x40 /* mask for last block of splitted frame */
+#define AUS_END 0x40 /* last block */
+#define AUS_NOEND 0x00 /* not the last block */
+
+#define AUS_LENMASK 0x3F /* mask for block length information */
+
+/* Request types */
+#define AUT_RREQ (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER) /* Read Request */
+#define AUT_WREQ (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER) /* Write Request */
+
+/* Vendor Requests */
+#define AUV_GETINFO 0x00 /* GetDeviceInfo */
+#define AUV_WBLOCK 0x01 /* Write Block */
+#define AUV_RBLOCK 0x02 /* Read Block */
+#define AUV_CHANNELCTL 0x03 /* Channel Control */
+#define AUV_DUMMY 0x04 /* Dummy Out for retry */
+
+/* Device Info Types */
+#define AUDI_NUMBCH 0x0000 /* Number of supported B channels */
+#define AUDI_OUTFSIZE 0x0001 /* Size of OUT B channel fifos */
+#define AUDI_MBCTRANS 0x0002 /* max. Blocklength of control transfer */
+
+/* Interrupt endpoint definitions */
+#define AU_IRQENDP 1 /* Endpoint number */
+#define AU_IRQCMDID 16 /* Command-block ID */
+#define AU_BLOCKRDY 0 /* Command: Block data ready on ctl endpoint */
+#define AU_IRQMINSIZE 5 /* Nr. of bytes decoded in this driver */
+
+/* Device String Descriptors */
+#define AUSI_VENDOR 1 /* "Auerswald GmbH & Co. KG" */
+#define AUSI_DEVICE 2 /* Name of the Device */
+#define AUSI_SERIALNR 3 /* Serial Number */
+#define AUSI_MSN 4 /* "MSN ..." (first) Multiple Subscriber Number */
+
+#define AUSI_DLEN 100 /* Max. Length of Device Description */
+
+#define AUV_RETRY 0x101 /* First Firmware version which can do control retries */
+
+/*-------------------------------------------------------------------*/
+/* External data structures / Interface */
+typedef struct
+{
+ char *buf; /* return buffer for string contents */
+ unsigned int bsize; /* size of return buffer */
+} audevinfo_t,*paudevinfo_t;
+
+/* IO controls */
+#define IOCTL_AU_SLEN _IOR( 'U', 0xF0, int) /* return the max. string descriptor length */
+#define IOCTL_AU_DEVINFO _IOWR('U', 0xF1, audevinfo_t) /* get name of a specific device */
+#define IOCTL_AU_SERVREQ _IOW( 'U', 0xF2, int) /* request a service channel */
+#define IOCTL_AU_BUFLEN _IOR( 'U', 0xF3, int) /* return the max. buffer length for the device */
+#define IOCTL_AU_RXAVAIL _IOR( 'U', 0xF4, int) /* return != 0 if Receive Data available */
+#define IOCTL_AU_CONNECT _IOR( 'U', 0xF5, int) /* return != 0 if connected to a service channel */
+#define IOCTL_AU_TXREADY _IOR( 'U', 0xF6, int) /* return != 0 if Transmitt channel ready to send */
+/* 'U' 0xF7..0xFF reseved */
+
+/*-------------------------------------------------------------------*/
+/* Internal data structures */
+
+/* ..................................................................*/
+/* urb chain element */
+struct auerchain; /* forward for circular reference */
+typedef struct
+{
+ struct auerchain *chain; /* pointer to the chain to which this element belongs */
+ urb_t * urbp; /* pointer to attached urb */
+ void *context; /* saved URB context */
+ usb_complete_t complete; /* saved URB completion function */
+ struct list_head list; /* to include element into a list */
+} auerchainelement_t,*pauerchainelement_t;
+
+/* urb chain */
+typedef struct auerchain
+{
+ pauerchainelement_t active; /* element which is submitted to urb */
+ spinlock_t lock; /* protection agains interrupts */
+ struct list_head waiting_list; /* list of waiting elements */
+ struct list_head free_list; /* list of available elements */
+} auerchain_t,*pauerchain_t;
+
+/* ...................................................................*/
+/* buffer element */
+struct auerbufctl; /* forward */
+typedef struct
+{
+ char *bufp; /* reference to allocated data buffer */
+ unsigned int len; /* number of characters in data buffer */
+ unsigned int retries; /* for urb retries */
+ struct usb_ctrlrequest *dr; /* for setup data in control messages */
+ urb_t * urbp; /* USB urb */
+ struct auerbufctl *list; /* pointer to list */
+ struct list_head buff_list; /* reference to next buffer in list */
+} auerbuf_t,*pauerbuf_t;
+
+/* buffer list control block */
+typedef struct auerbufctl
+{
+ spinlock_t lock; /* protection in interrupt */
+ struct list_head free_buff_list;/* free buffers */
+ struct list_head rec_buff_list; /* buffers with receive data */
+} auerbufctl_t,*pauerbufctl_t;
+
+/* ...................................................................*/
+/* service context */
+struct auerscon; /* forward */
+typedef void (*auer_dispatch_t)(struct auerscon*, pauerbuf_t);
+typedef void (*auer_disconn_t) (struct auerscon*);
+typedef struct auerscon
+{
+ unsigned int id; /* protocol service id AUH_xxxx */
+ auer_dispatch_t dispatch; /* dispatch read buffer */
+ auer_disconn_t disconnect; /* disconnect from device, wake up all char readers */
+} auerscon_t,*pauerscon_t;
+
+/* ...................................................................*/
+/* USB device context */
+typedef struct
+{
+ struct semaphore mutex; /* protection in user context */
+ char name[16]; /* name of the /dev/usb entry */
+ unsigned int dtindex; /* index in the device table */
+ devfs_handle_t devfs; /* devfs device node */
+ struct usb_device * usbdev; /* USB device handle */
+ int open_count; /* count the number of open character channels */
+ char dev_desc[AUSI_DLEN];/* for storing a textual description */
+ unsigned int maxControlLength; /* max. Length of control paket (without header) */
+ urb_t * inturbp; /* interrupt urb */
+ char * intbufp; /* data buffer for interrupt urb */
+ unsigned int irqsize; /* size of interrupt endpoint 1 */
+ struct auerchain controlchain; /* for chaining of control messages */
+ auerbufctl_t bufctl; /* Buffer control for control transfers */
+ pauerscon_t services[AUH_TYPESIZE];/* context pointers for each service */
+ unsigned int version; /* Version of the device */
+ wait_queue_head_t bufferwait; /* wait for a control buffer */
+} auerswald_t,*pauerswald_t;
+
+/* the global usb devfs handle */
+extern devfs_handle_t usb_devfs_handle;
+
+/* array of pointers to our devices that are currently connected */
+static pauerswald_t dev_table[AUER_MAX_DEVICES];
+
+/* lock to protect the dev_table structure */
+static struct semaphore dev_table_mutex;
+
+/* ................................................................... */
+/* character device context */
+typedef struct
+{
+ struct semaphore mutex; /* protection in user context */
+ pauerswald_t auerdev; /* context pointer of assigned device */
+ auerbufctl_t bufctl; /* controls the buffer chain */
+ auerscon_t scontext; /* service context */
+ wait_queue_head_t readwait; /* for synchronous reading */
+ struct semaphore readmutex; /* protection against multiple reads */
+ pauerbuf_t readbuf; /* buffer held for partial reading */
+ unsigned int readoffset; /* current offset in readbuf */
+ unsigned int removed; /* is != 0 if device is removed */
+} auerchar_t,*pauerchar_t;
+
+
+/*-------------------------------------------------------------------*/
+/* Forwards */
+static void auerswald_ctrlread_complete (urb_t * urb);
+static void auerswald_removeservice (pauerswald_t cp, pauerscon_t scp);
+
+
+/*-------------------------------------------------------------------*/
+/* USB chain helper functions */
+/* -------------------------- */
+
+/* completion function for chained urbs */
+static void auerchain_complete (urb_t * urb)
+{
+ unsigned long flags;
+ int result;
+
+ /* get pointer to element and to chain */
+ pauerchainelement_t acep = (pauerchainelement_t) urb->context;
+ pauerchain_t acp = acep->chain;
+
+ /* restore original entries in urb */
+ urb->context = acep->context;
+ urb->complete = acep->complete;
+
+ dbg ("auerchain_complete called");
+
+ /* call original completion function
+ NOTE: this function may lead to more urbs submitted into the chain.
+ (no chain lock at calling complete()!)
+ acp->active != NULL is protecting us against recursion.*/
+ urb->complete (urb);
+
+ /* detach element from chain data structure */
+ spin_lock_irqsave (&acp->lock, flags);
+ if (acp->active != acep) /* paranoia debug check */
+ dbg ("auerchain_complete: completion on non-active element called!");
+ else
+ acp->active = NULL;
+
+ /* add the used chain element to the list of free elements */
+ list_add_tail (&acep->list, &acp->free_list);
+ acep = NULL;
+
+ /* is there a new element waiting in the chain? */
+ if (!acp->active && !list_empty (&acp->waiting_list)) {
+ /* yes: get the entry */
+ struct list_head *tmp = acp->waiting_list.next;
+ list_del (tmp);
+ acep = list_entry (tmp, auerchainelement_t, list);
+ acp->active = acep;
+ }
+ spin_unlock_irqrestore (&acp->lock, flags);
+
+ /* submit the new urb */
+ if (acep) {
+ urb = acep->urbp;
+ dbg ("auerchain_complete: submitting next urb from chain");
+ urb->status = 0; /* needed! */
+ result = usb_submit_urb( urb);
+
+ /* check for submit errors */
+ if (result) {
+ urb->status = result;
+ dbg("auerchain_complete: usb_submit_urb with error code %d", result);
+ /* and do error handling via *this* completion function (recursive) */
+ auerchain_complete( urb);
+ }
+ } else {
+ /* simple return without submitting a new urb.
+ The empty chain is detected with acp->active == NULL. */
+ };
+}
+
+
+/* submit function for chained urbs
+ this function may be called from completion context or from user space!
+ early = 1 -> submit in front of chain
+*/
+static int auerchain_submit_urb_list (pauerchain_t acp, urb_t * urb, int early)
+{
+ int result;
+ unsigned long flags;
+ pauerchainelement_t acep = NULL;
+
+ dbg ("auerchain_submit_urb called");
+
+ /* try to get a chain element */
+ spin_lock_irqsave (&acp->lock, flags);
+ if (!list_empty (&acp->free_list)) {
+ /* yes: get the entry */
+ struct list_head *tmp = acp->free_list.next;
+ list_del (tmp);
+ acep = list_entry (tmp, auerchainelement_t, list);
+ }
+ spin_unlock_irqrestore (&acp->lock, flags);
+
+ /* if no chain element available: return with error */
+ if (!acep) {
+ return -ENOMEM;
+ }
+
+ /* fill in the new chain element values */
+ acep->chain = acp;
+ acep->context = urb->context;
+ acep->complete = urb->complete;
+ acep->urbp = urb;
+ INIT_LIST_HEAD (&acep->list);
+
+ /* modify urb */
+ urb->context = acep;
+ urb->complete = auerchain_complete;
+ urb->status = -EINPROGRESS; /* usb_submit_urb does this, too */
+
+ /* add element to chain - or start it immediately */
+ spin_lock_irqsave (&acp->lock, flags);
+ if (acp->active) {
+ /* there is traffic in the chain, simple add element to chain */
+ if (early) {
+ dbg ("adding new urb to head of chain");
+ list_add (&acep->list, &acp->waiting_list);
+ } else {
+ dbg ("adding new urb to end of chain");
+ list_add_tail (&acep->list, &acp->waiting_list);
+ }
+ acep = NULL;
+ } else {
+ /* the chain is empty. Prepare restart */
+ acp->active = acep;
+ }
+ /* Spin has to be removed before usb_submit_urb! */
+ spin_unlock_irqrestore (&acp->lock, flags);
+
+ /* Submit urb if immediate restart */
+ if (acep) {
+ dbg("submitting urb immediate");
+ urb->status = 0; /* needed! */
+ result = usb_submit_urb( urb);
+ /* check for submit errors */
+ if (result) {
+ urb->status = result;
+ dbg("auerchain_submit_urb: usb_submit_urb with error code %d", result);
+ /* and do error handling via completion function */
+ auerchain_complete( urb);
+ }
+ }
+
+ return 0;
+}
+
+/* submit function for chained urbs
+ this function may be called from completion context or from user space!
+*/
+static int auerchain_submit_urb (pauerchain_t acp, urb_t * urb)
+{
+ return auerchain_submit_urb_list (acp, urb, 0);
+}
+
+/* cancel an urb which is submitted to the chain
+ the result is 0 if the urb is cancelled, or -EINPROGRESS if
+ USB_ASYNC_UNLINK is set and the function is successfully started.
+*/
+static int auerchain_unlink_urb (pauerchain_t acp, urb_t * urb)
+{
+ unsigned long flags;
+ urb_t * urbp;
+ pauerchainelement_t acep;
+ struct list_head *tmp;
+
+ dbg ("auerchain_unlink_urb called");
+
+ /* search the chain of waiting elements */
+ spin_lock_irqsave (&acp->lock, flags);
+ list_for_each (tmp, &acp->waiting_list) {
+ acep = list_entry (tmp, auerchainelement_t, list);
+ if (acep->urbp == urb) {
+ list_del (tmp);
+ urb->context = acep->context;
+ urb->complete = acep->complete;
+ list_add_tail (&acep->list, &acp->free_list);
+ spin_unlock_irqrestore (&acp->lock, flags);
+ dbg ("unlink waiting urb");
+ urb->status = -ENOENT;
+ urb->complete (urb);
+ return 0;
+ }
+ }
+ /* not found. */
+ spin_unlock_irqrestore (&acp->lock, flags);
+
+ /* get the active urb */
+ acep = acp->active;
+ if (acep) {
+ urbp = acep->urbp;
+
+ /* check if we have to cancel the active urb */
+ if (urbp == urb) {
+ /* note that there is a race condition between the check above
+ and the unlink() call because of no lock. This race is harmless,
+ because the usb module will detect the unlink() after completion.
+ We can't use the acp->lock here because the completion function
+ wants to grab it.
+ */
+ dbg ("unlink active urb");
+ return usb_unlink_urb (urbp);
+ }
+ }
+
+ /* not found anyway
+ ... is some kind of success
+ */
+ dbg ("urb to unlink not found in chain");
+ return 0;
+}
+
+/* cancel all urbs which are in the chain.
+ this function must not be called from interrupt or completion handler.
+*/
+static void auerchain_unlink_all (pauerchain_t acp)
+{
+ unsigned long flags;
+ urb_t * urbp;
+ pauerchainelement_t acep;
+
+ dbg ("auerchain_unlink_all called");
+
+ /* clear the chain of waiting elements */
+ spin_lock_irqsave (&acp->lock, flags);
+ while (!list_empty (&acp->waiting_list)) {
+ /* get the next entry */
+ struct list_head *tmp = acp->waiting_list.next;
+ list_del (tmp);
+ acep = list_entry (tmp, auerchainelement_t, list);
+ urbp = acep->urbp;
+ urbp->context = acep->context;
+ urbp->complete = acep->complete;
+ list_add_tail (&acep->list, &acp->free_list);
+ spin_unlock_irqrestore (&acp->lock, flags);
+ dbg ("unlink waiting urb");
+ urbp->status = -ENOENT;
+ urbp->complete (urbp);
+ spin_lock_irqsave (&acp->lock, flags);
+ }
+ spin_unlock_irqrestore (&acp->lock, flags);
+
+ /* clear the active urb */
+ acep = acp->active;
+ if (acep) {
+ urbp = acep->urbp;
+ urbp->transfer_flags &= ~USB_ASYNC_UNLINK;
+ dbg ("unlink active urb");
+ usb_unlink_urb (urbp);
+ }
+}
+
+
+/* free the chain.
+ this function must not be called from interrupt or completion handler.
+*/
+static void auerchain_free (pauerchain_t acp)
+{
+ unsigned long flags;
+ pauerchainelement_t acep;
+
+ dbg ("auerchain_free called");
+
+ /* first, cancel all pending urbs */
+ auerchain_unlink_all (acp);
+
+ /* free the elements */
+ spin_lock_irqsave (&acp->lock, flags);
+ while (!list_empty (&acp->free_list)) {
+ /* get the next entry */
+ struct list_head *tmp = acp->free_list.next;
+ list_del (tmp);
+ spin_unlock_irqrestore (&acp->lock, flags);
+ acep = list_entry (tmp, auerchainelement_t, list);
+ kfree (acep);
+ spin_lock_irqsave (&acp->lock, flags);
+ }
+ spin_unlock_irqrestore (&acp->lock, flags);
+}
+
+
+/* Init the chain control structure */
+static void auerchain_init (pauerchain_t acp)
+{
+ /* init the chain data structure */
+ acp->active = NULL;
+ spin_lock_init (&acp->lock);
+ INIT_LIST_HEAD (&acp->waiting_list);
+ INIT_LIST_HEAD (&acp->free_list);
+}
+
+/* setup a chain.
+ It is assumed that there is no concurrency while setting up the chain
+ requirement: auerchain_init()
+*/
+static int auerchain_setup (pauerchain_t acp, unsigned int numElements)
+{
+ pauerchainelement_t acep;
+
+ dbg ("auerchain_setup called with %d elements", numElements);
+
+ /* fill the list of free elements */
+ for (;numElements; numElements--) {
+ acep = (pauerchainelement_t) kmalloc (sizeof (auerchainelement_t), GFP_KERNEL);
+ if (!acep) goto ac_fail;
+ memset (acep, 0, sizeof (auerchainelement_t));
+ INIT_LIST_HEAD (&acep->list);
+ list_add_tail (&acep->list, &acp->free_list);
+ }
+ return 0;
+
+ac_fail:/* free the elements */
+ while (!list_empty (&acp->free_list)) {
+ /* get the next entry */
+ struct list_head *tmp = acp->free_list.next;
+ list_del (tmp);
+ acep = list_entry (tmp, auerchainelement_t, list);
+ kfree (acep);
+ }
+ return -ENOMEM;
+}
+
+
+/* completion handler for synchronous chained URBs */
+static void auerchain_blocking_completion (urb_t *urb)
+{
+ wait_queue_head_t *wakeup = (wait_queue_head_t *)urb->context;
+ wake_up (wakeup);
+}
+
+
+/* Starts chained urb and waits for completion or timeout */
+static int auerchain_start_wait_urb (pauerchain_t acp, urb_t *urb, int timeout, int* actual_length)
+{
+ DECLARE_WAITQUEUE (wait, current);
+ DECLARE_WAIT_QUEUE_HEAD (wqh);
+ int status;
+
+ dbg ("auerchain_start_wait_urb called");
+ init_waitqueue_head (&wqh);
+ current->state = TASK_INTERRUPTIBLE;
+ add_wait_queue (&wqh, &wait);
+ urb->context = &wqh;
+ status = auerchain_submit_urb ( acp, urb);
+ if (status) {
+ /* something went wrong */
+ current->state = TASK_RUNNING;
+ remove_wait_queue (&wqh, &wait);
+ return status;
+ }
+
+ if (urb->status == -EINPROGRESS) {
+ while (timeout && urb->status == -EINPROGRESS)
+ status = timeout = schedule_timeout (timeout);
+ } else
+ status = 1;
+
+ current->state = TASK_RUNNING;
+ remove_wait_queue (&wqh, &wait);
+
+ if (!status) {
+ /* timeout */
+ dbg ("auerchain_start_wait_urb: timeout");
+ auerchain_unlink_urb (acp, urb); /* remove urb safely */
+ status = -ETIMEDOUT;
+ } else
+ status = urb->status;
+
+ if (actual_length)
+ *actual_length = urb->actual_length;
+
+ return status;
+}
+
+
+/* auerchain_control_msg - Builds a control urb, sends it off and waits for completion
+ acp: pointer to the auerchain
+ dev: pointer to the usb device to send the message to
+ pipe: endpoint "pipe" to send the message to
+ request: USB message request value
+ requesttype: USB message request type value
+ value: USB message value
+ index: USB message index value
+ data: pointer to the data to send
+ size: length in bytes of the data to send
+ timeout: time to wait for the message to complete before timing out (if 0 the wait is forever)
+
+ This function sends a simple control message to a specified endpoint
+ and waits for the message to complete, or timeout.
+
+ If successful, it returns the transfered length, othwise a negative error number.
+
+ Don't use this function from within an interrupt context, like a
+ bottom half handler. If you need a asyncronous message, or need to send
+ a message from within interrupt context, use auerchain_submit_urb()
+*/
+static int auerchain_control_msg (pauerchain_t acp, struct usb_device *dev, unsigned int pipe, __u8 request, __u8 requesttype,
+ __u16 value, __u16 index, void *data, __u16 size, int timeout)
+{
+ int ret;
+ struct usb_ctrlrequest *dr;
+ urb_t *urb;
+ int length;
+
+ dbg ("auerchain_control_msg");
+ dr = kmalloc (sizeof (struct usb_ctrlrequest), GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+ urb = usb_alloc_urb (0);
+ if (!urb) {
+ kfree (dr);
+ return -ENOMEM;
+ }
+
+ dr->bRequestType = requesttype;
+ dr->bRequest = request;
+ dr->wValue = cpu_to_le16 (value);
+ dr->wIndex = cpu_to_le16 (index);
+ dr->wlength = cpu_to_le16 (size);
+
+ FILL_CONTROL_URB (urb, dev, pipe, (unsigned char*)dr, data, size, /* build urb */
+ (usb_complete_t)auerchain_blocking_completion,0);
+ ret = auerchain_start_wait_urb (acp, urb, timeout, &length);
+
+ usb_free_urb (urb);
+ kfree (dr);
+
+ if (ret < 0)
+ return ret;
+ else
+ return length;
+}
+
+
+/*-------------------------------------------------------------------*/
+/* Buffer List helper functions */
+
+/* free a single auerbuf */
+static void auerbuf_free (pauerbuf_t bp)
+{
+ if (bp->bufp) {
+ kfree (bp->bufp);
+ }
+ if (bp->dr) {
+ kfree (bp->dr);
+ }
+ if (bp->urbp) {
+ usb_free_urb (bp->urbp);
+ }
+ kfree (bp);
+}
+
+/* free the buffers from an auerbuf list */
+static void auerbuf_free_list (struct list_head *q)
+{
+ struct list_head *tmp;
+ struct list_head *p;
+ pauerbuf_t bp;
+
+ dbg ("auerbuf_free_list");
+ for (p = q->next; p != q;) {
+ bp = list_entry (p, auerbuf_t, buff_list);
+ tmp = p->next;
+ list_del (p);
+ p = tmp;
+ auerbuf_free (bp);
+ }
+}
+
+/* init the members of a list control block */
+static void auerbuf_init (pauerbufctl_t bcp)
+{
+ dbg ("auerbuf_init");
+ spin_lock_init (&bcp->lock);
+ INIT_LIST_HEAD (&bcp->free_buff_list);
+ INIT_LIST_HEAD (&bcp->rec_buff_list);
+}
+
+/* free all buffers from an auerbuf chain */
+static void auerbuf_free_buffers (pauerbufctl_t bcp)
+{
+ unsigned long flags;
+ dbg ("auerbuf_free_buffers");
+
+ spin_lock_irqsave (&bcp->lock, flags);
+
+ auerbuf_free_list (&bcp->free_buff_list);
+ auerbuf_free_list (&bcp->rec_buff_list);
+
+ spin_unlock_irqrestore (&bcp->lock, flags);
+}
+
+/* setup a list of buffers */
+/* requirement: auerbuf_init() */
+static int auerbuf_setup (pauerbufctl_t bcp, unsigned int numElements, unsigned int bufsize)
+{
+ pauerbuf_t bep;
+
+ dbg ("auerbuf_setup called with %d elements of %d bytes", numElements, bufsize);
+
+ /* fill the list of free elements */
+ for (;numElements; numElements--) {
+ bep = (pauerbuf_t) kmalloc (sizeof (auerbuf_t), GFP_KERNEL);
+ if (!bep) goto bl_fail;
+ memset (bep, 0, sizeof (auerbuf_t));
+ bep->list = bcp;
+ INIT_LIST_HEAD (&bep->buff_list);
+ bep->bufp = (char *) kmalloc (bufsize, GFP_KERNEL);
+ if (!bep->bufp) goto bl_fail;
+ bep->dr = (struct usb_ctrlrequest *) kmalloc (sizeof (struct usb_ctrlrequest), GFP_KERNEL);
+ if (!bep->dr) goto bl_fail;
+ bep->urbp = usb_alloc_urb (0);
+ if (!bep->urbp) goto bl_fail;
+ list_add_tail (&bep->buff_list, &bcp->free_buff_list);
+ }
+ return 0;
+
+bl_fail:/* not enought memory. Free allocated elements */
+ dbg ("auerbuf_setup: no more memory");
+ auerbuf_free_buffers (bcp);
+ return -ENOMEM;
+}
+
+/* insert a used buffer into the free list */
+static void auerbuf_releasebuf( pauerbuf_t bp)
+{
+ unsigned long flags;
+ pauerbufctl_t bcp = bp->list;
+ bp->retries = 0;
+
+ dbg ("auerbuf_releasebuf called");
+ spin_lock_irqsave (&bcp->lock, flags);
+ list_add_tail (&bp->buff_list, &bcp->free_buff_list);
+ spin_unlock_irqrestore (&bcp->lock, flags);
+}
+
+
+/*-------------------------------------------------------------------*/
+/* Completion handlers */
+
+/* Values of urb->status or results of usb_submit_urb():
+0 Initial, OK
+-EINPROGRESS during submission until end
+-ENOENT if urb is unlinked
+-ETIMEDOUT Transfer timed out, NAK
+-ENOMEM Memory Overflow
+-ENODEV Specified USB-device or bus doesn't exist
+-ENXIO URB already queued
+-EINVAL a) Invalid transfer type specified (or not supported)
+ b) Invalid interrupt interval (0n256)
+-EAGAIN a) Specified ISO start frame too early
+ b) (using ISO-ASAP) Too much scheduled for the future wait some time and try again.
+-EFBIG Too much ISO frames requested (currently uhci900)
+-EPIPE Specified pipe-handle/Endpoint is already stalled
+-EMSGSIZE Endpoint message size is zero, do interface/alternate setting
+-EPROTO a) Bitstuff error
+ b) Unknown USB error
+-EILSEQ CRC mismatch
+-ENOSR Buffer error
+-EREMOTEIO Short packet detected
+-EXDEV ISO transfer only partially completed look at individual frame status for details
+-EINVAL ISO madness, if this happens: Log off and go home
+-EOVERFLOW babble
+*/
+
+/* check if a status code allows a retry */
+static int auerswald_status_retry (int status)
+{
+ switch (status) {
+ case 0:
+ case -ETIMEDOUT:
+ case -EOVERFLOW:
+ case -EAGAIN:
+ case -EPIPE:
+ case -EPROTO:
+ case -EILSEQ:
+ case -ENOSR:
+ case -EREMOTEIO:
+ return 1; /* do a retry */
+ }
+ return 0; /* no retry possible */
+}
+
+/* Completion of asynchronous write block */
+static void auerchar_ctrlwrite_complete (urb_t * urb)
+{
+ pauerbuf_t bp = (pauerbuf_t) urb->context;
+ pauerswald_t cp = ((pauerswald_t)((char *)(bp->list)-(unsigned long)(&((pauerswald_t)0)->bufctl)));
+ dbg ("auerchar_ctrlwrite_complete called");
+
+ /* reuse the buffer */
+ auerbuf_releasebuf (bp);
+ /* Wake up all processes waiting for a buffer */
+ wake_up (&cp->bufferwait);
+}
+
+/* Completion handler for dummy retry packet */
+static void auerswald_ctrlread_wretcomplete (urb_t * urb)
+{
+ pauerbuf_t bp = (pauerbuf_t) urb->context;
+ pauerswald_t cp;
+ int ret;
+ dbg ("auerswald_ctrlread_wretcomplete called");
+ dbg ("complete with status: %d", urb->status);
+ cp = ((pauerswald_t)((char *)(bp->list)-(unsigned long)(&((pauerswald_t)0)->bufctl)));
+
+ /* check if it is possible to advance */
+ if (!auerswald_status_retry (urb->status) || !cp->usbdev) {
+ /* reuse the buffer */
+ err ("control dummy: transmission error %d, can not retry", urb->status);
+ auerbuf_releasebuf (bp);
+ /* Wake up all processes waiting for a buffer */
+ wake_up (&cp->bufferwait);
+ return;
+ }
+
+ /* fill the control message */
+ bp->dr->requesttype = AUT_RREQ;
+ bp->dr->request = AUV_RBLOCK;
+ bp->dr->length = bp->dr->value; /* temporary stored */
+ bp->dr->value = cpu_to_le16 (1); /* Retry Flag */
+ /* bp->dr->index = channel id; remains */
+ FILL_CONTROL_URB (bp->urbp, cp->usbdev, usb_rcvctrlpipe (cp->usbdev, 0),
+ (unsigned char*)bp->dr, bp->bufp, le16_to_cpu (bp->dr->length),
+ (usb_complete_t)auerswald_ctrlread_complete,bp);
+
+ /* submit the control msg as next paket */
+ ret = auerchain_submit_urb_list (&cp->controlchain, bp->urbp, 1);
+ if (ret) {
+ dbg ("auerswald_ctrlread_complete: nonzero result of auerchain_submit_urb_list %d", ret);
+ bp->urbp->status = ret;
+ auerswald_ctrlread_complete (bp->urbp);
+ }
+}
+
+/* completion handler for receiving of control messages */
+static void auerswald_ctrlread_complete (urb_t * urb)
+{
+ unsigned int serviceid;
+ pauerswald_t cp;
+ pauerscon_t scp;
+ pauerbuf_t bp = (pauerbuf_t) urb->context;
+ int ret;
+ dbg ("auerswald_ctrlread_complete called");
+
+ cp = ((pauerswald_t)((char *)(bp->list)-(unsigned long)(&((pauerswald_t)0)->bufctl)));
+
+ /* check if there is valid data in this urb */
+ if (urb->status) {
+ dbg ("complete with non-zero status: %d", urb->status);
+ /* should we do a retry? */
+ if (!auerswald_status_retry (urb->status)
+ || !cp->usbdev
+ || (cp->version < AUV_RETRY)
+ || (bp->retries >= AU_RETRIES)) {
+ /* reuse the buffer */
+ err ("control read: transmission error %d, can not retry", urb->status);
+ auerbuf_releasebuf (bp);
+ return;
+ }
+ bp->retries++;
+ dbg ("Retry count = %d", bp->retries);
+ /* send a long dummy control-write-message to allow device firmware to react */
+ bp->dr->requesttype = AUT_WREQ;
+ bp->dr->request = AUV_DUMMY;
+ bp->dr->value = bp->dr->length; /* temporary storage */
+ // bp->dr->index channel ID remains
+ bp->dr->length = cpu_to_le16 (32); /* >= 8 bytes */
+ FILL_CONTROL_URB (bp->urbp, cp->usbdev, usb_sndctrlpipe (cp->usbdev, 0),
+ (unsigned char*)bp->dr, bp->bufp, 32,
+ (usb_complete_t)auerswald_ctrlread_wretcomplete,bp);
+
+ /* submit the control msg as next paket */
+ ret = auerchain_submit_urb_list (&cp->controlchain, bp->urbp, 1);
+ if (ret) {
+ dbg ("auerswald_ctrlread_complete: nonzero result of auerchain_submit_urb_list %d", ret);
+ bp->urbp->status = ret;
+ auerswald_ctrlread_wretcomplete (bp->urbp);
+ }
+ return;
+ }
+
+ /* get the actual bytecount (incl. headerbyte) */
+ bp->len = urb->actual_length;
+ serviceid = bp->bufp[0] & AUH_TYPEMASK;
+ dbg ("Paket with serviceid %d and %d bytes received", serviceid, bp->len);
+
+ /* dispatch the paket */
+ scp = cp->services[serviceid];
+ if (scp) {
+ /* look, Ma, a listener! */
+ scp->dispatch (scp, bp);
+ }
+
+ /* release the paket */
+ auerbuf_releasebuf (bp);
+ /* Wake up all processes waiting for a buffer */
+ wake_up (&cp->bufferwait);
+}
+
+/*-------------------------------------------------------------------*/
+/* Handling of Interrupt Endpoint */
+/* This interrupt Endpoint is used to inform the host about waiting
+ messages from the USB device.
+*/
+/* int completion handler. */
+static void auerswald_int_complete (urb_t * urb)
+{
+ unsigned long flags;
+ unsigned int channelid;
+ unsigned int bytecount;
+ int ret;
+ pauerbuf_t bp = NULL;
+ pauerswald_t cp = (pauerswald_t) urb->context;
+
+ dbg ("auerswald_int_complete called");
+
+ /* do not respond to an error condition */
+ if (urb->status != 0) {
+ dbg ("nonzero URB status = %d", urb->status);
+ return;
+ }
+
+ /* check if all needed data was received */
+ if (urb->actual_length < AU_IRQMINSIZE) {
+ dbg ("invalid data length received: %d bytes", urb->actual_length);
+ return;
+ }
+
+ /* check the command code */
+ if (cp->intbufp[0] != AU_IRQCMDID) {
+ dbg ("invalid command received: %d", cp->intbufp[0]);
+ return;
+ }
+
+ /* check the command type */
+ if (cp->intbufp[1] != AU_BLOCKRDY) {
+ dbg ("invalid command type received: %d", cp->intbufp[1]);
+ return;
+ }
+
+ /* now extract the information */
+ channelid = cp->intbufp[2];
+ bytecount = le16_to_cpup (&cp->intbufp[3]);
+
+ /* check the channel id */
+ if (channelid >= AUH_TYPESIZE) {
+ dbg ("invalid channel id received: %d", channelid);
+ return;
+ }
+
+ /* check the byte count */
+ if (bytecount > (cp->maxControlLength+AUH_SIZE)) {
+ dbg ("invalid byte count received: %d", bytecount);
+ return;
+ }
+ dbg ("Service Channel = %d", channelid);
+ dbg ("Byte Count = %d", bytecount);
+
+ /* get a buffer for the next data paket */
+ spin_lock_irqsave (&cp->bufctl.lock, flags);
+ if (!list_empty (&cp->bufctl.free_buff_list)) {
+ /* yes: get the entry */
+ struct list_head *tmp = cp->bufctl.free_buff_list.next;
+ list_del (tmp);
+ bp = list_entry (tmp, auerbuf_t, buff_list);
+ }
+ spin_unlock_irqrestore (&cp->bufctl.lock, flags);
+
+ /* if no buffer available: skip it */
+ if (!bp) {
+ dbg ("auerswald_int_complete: no data buffer available");
+ /* can we do something more?
+ This is a big problem: if this int packet is ignored, the
+ device will wait forever and not signal any more data.
+ The only real solution is: having enought buffers!
+ Or perhaps temporary disabling the int endpoint?
+ */
+ return;
+ }
+
+ /* fill the control message */
+ bp->dr->requesttype = AUT_RREQ;
+ bp->dr->request = AUV_RBLOCK;
+ bp->dr->value = cpu_to_le16 (0);
+ bp->dr->index = cpu_to_le16 (channelid | AUH_DIRECT | AUH_UNSPLIT);
+ bp->dr->length = cpu_to_le16 (bytecount);
+ FILL_CONTROL_URB (bp->urbp, cp->usbdev, usb_rcvctrlpipe (cp->usbdev, 0),
+ (unsigned char*)bp->dr, bp->bufp, bytecount,
+ (usb_complete_t)auerswald_ctrlread_complete,bp);
+
+ /* submit the control msg */
+ ret = auerchain_submit_urb (&cp->controlchain, bp->urbp);
+ if (ret) {
+ dbg ("auerswald_int_complete: nonzero result of auerchain_submit_urb %d", ret);
+ bp->urbp->status = ret;
+ auerswald_ctrlread_complete( bp->urbp);
+ /* here applies the same problem as above: device locking! */
+ }
+}
+
+/* int memory deallocation
+ NOTE: no mutex please!
+*/
+static void auerswald_int_free (pauerswald_t cp)
+{
+ if (cp->inturbp) {
+ usb_free_urb (cp->inturbp);
+ cp->inturbp = NULL;
+ }
+ if (cp->intbufp) {
+ kfree (cp->intbufp);
+ cp->intbufp = NULL;
+ }
+}
+
+/* This function is called to activate the interrupt
+ endpoint. This function returns 0 if successfull or an error code.
+ NOTE: no mutex please!
+*/
+static int auerswald_int_open (pauerswald_t cp)
+{
+ int ret;
+ struct usb_endpoint_descriptor *ep;
+ int irqsize;
+ dbg ("auerswald_int_open");
+
+ ep = usb_epnum_to_ep_desc (cp->usbdev, USB_DIR_IN | AU_IRQENDP);
+ if (!ep) {
+ ret = -EFAULT;
+ goto intoend;
+ }
+ irqsize = ep->wMaxPacketSize;
+ cp->irqsize = irqsize;
+
+ /* allocate the urb and data buffer */
+ if (!cp->inturbp) {
+ cp->inturbp = usb_alloc_urb (0);
+ if (!cp->inturbp) {
+ ret = -ENOMEM;
+ goto intoend;
+ }
+ }
+ if (!cp->intbufp) {
+ cp->intbufp = (char *) kmalloc (irqsize, GFP_KERNEL);
+ if (!cp->intbufp) {
+ ret = -ENOMEM;
+ goto intoend;
+ }
+ }
+ /* setup urb */
+ FILL_INT_URB (cp->inturbp, cp->usbdev, usb_rcvintpipe (cp->usbdev,AU_IRQENDP), cp->intbufp, irqsize, auerswald_int_complete, cp, ep->bInterval);
+ /* start the urb */
+ cp->inturbp->status = 0; /* needed! */
+ ret = usb_submit_urb (cp->inturbp);
+
+intoend:
+ if (ret < 0) {
+ /* activation of interrupt endpoint has failed. Now clean up. */
+ dbg ("auerswald_int_open: activation of int endpoint failed");
+
+ /* deallocate memory */
+ auerswald_int_free (cp);
+ }
+ return ret;
+}
+
+/* This function is called to deactivate the interrupt
+ endpoint. This function returns 0 if successfull or an error code.
+ NOTE: no mutex please!
+*/
+static int auerswald_int_release (pauerswald_t cp)
+{
+ int ret = 0;
+ dbg ("auerswald_int_release");
+
+ /* stop the int endpoint */
+ if (cp->inturbp) {
+ ret = usb_unlink_urb (cp->inturbp);
+ if (ret)
+ dbg ("nonzero int unlink result received: %d", ret);
+ }
+
+ /* deallocate memory */
+ auerswald_int_free (cp);
+
+ return ret;
+}
+
+/* --------------------------------------------------------------------- */
+/* Helper functions */
+
+/* wake up waiting readers */
+static void auerchar_disconnect (pauerscon_t scp)
+{
+ pauerchar_t ccp = ((pauerchar_t)((char *)(scp)-(unsigned long)(&((pauerchar_t)0)->scontext)));
+ dbg ("auerchar_disconnect called");
+ ccp->removed = 1;
+ wake_up (&ccp->readwait);
+}
+
+
+/* dispatch a read paket to a waiting character device */
+static void auerchar_ctrlread_dispatch (pauerscon_t scp, pauerbuf_t bp)
+{
+ unsigned long flags;
+ pauerchar_t ccp;
+ pauerbuf_t newbp = NULL;
+ char * charp;
+ dbg ("auerchar_ctrlread_dispatch called");
+ ccp = ((pauerchar_t)((char *)(scp)-(unsigned long)(&((pauerchar_t)0)->scontext)));
+
+ /* get a read buffer from character device context */
+ spin_lock_irqsave (&ccp->bufctl.lock, flags);
+ if (!list_empty (&ccp->bufctl.free_buff_list)) {
+ /* yes: get the entry */
+ struct list_head *tmp = ccp->bufctl.free_buff_list.next;
+ list_del (tmp);
+ newbp = list_entry (tmp, auerbuf_t, buff_list);
+ }
+ spin_unlock_irqrestore (&ccp->bufctl.lock, flags);
+
+ if (!newbp) {
+ dbg ("No read buffer available, discard paket!");
+ return; /* no buffer, no dispatch */
+ }
+
+ /* copy information to new buffer element
+ (all buffers have the same length) */
+ charp = newbp->bufp;
+ newbp->bufp = bp->bufp;
+ bp->bufp = charp;
+ newbp->len = bp->len;
+
+ /* insert new buffer in read list */
+ spin_lock_irqsave (&ccp->bufctl.lock, flags);
+ list_add_tail (&newbp->buff_list, &ccp->bufctl.rec_buff_list);
+ spin_unlock_irqrestore (&ccp->bufctl.lock, flags);
+ dbg ("read buffer appended to rec_list");
+
+ /* wake up pending synchronous reads */
+ wake_up (&ccp->readwait);
+}
+
+
+/* Delete an auerswald driver context */
+static void auerswald_delete( pauerswald_t cp)
+{
+ dbg( "auerswald_delete");
+ if (cp == NULL) return;
+
+ /* Wake up all processes waiting for a buffer */
+ wake_up (&cp->bufferwait);
+
+ /* Cleaning up */
+ auerswald_int_release (cp);
+ auerchain_free (&cp->controlchain);
+ auerbuf_free_buffers (&cp->bufctl);
+
+ /* release the memory */
+ kfree( cp);
+}
+
+
+/* Delete an auerswald character context */
+static void auerchar_delete( pauerchar_t ccp)
+{
+ dbg ("auerchar_delete");
+ if (ccp == NULL) return;
+
+ /* wake up pending synchronous reads */
+ ccp->removed = 1;
+ wake_up (&ccp->readwait);
+
+ /* remove the read buffer */
+ if (ccp->readbuf) {
+ auerbuf_releasebuf (ccp->readbuf);
+ ccp->readbuf = NULL;
+ }
+
+ /* remove the character buffers */
+ auerbuf_free_buffers (&ccp->bufctl);
+
+ /* release the memory */
+ kfree( ccp);
+}
+
+
+/* add a new service to the device
+ scp->id must be set!
+ return: 0 if OK, else error code
+*/
+static int auerswald_addservice (pauerswald_t cp, pauerscon_t scp)
+{
+ int ret;
+
+ /* is the device available? */
+ if (!cp->usbdev) {
+ dbg ("usbdev == NULL");
+ return -EIO; /*no: can not add a service, sorry*/
+ }
+
+ /* is the service available? */
+ if (cp->services[scp->id]) {
+ dbg ("service is busy");
+ return -EBUSY;
+ }
+
+ /* device is available, service is free */
+ cp->services[scp->id] = scp;
+
+ /* register service in device */
+ ret = auerchain_control_msg(
+ &cp->controlchain, /* pointer to control chain */
+ cp->usbdev, /* pointer to device */
+ usb_sndctrlpipe (cp->usbdev, 0), /* pipe to control endpoint */
+ AUV_CHANNELCTL, /* USB message request value */
+ AUT_WREQ, /* USB message request type value */
+ 0x01, /* open USB message value */
+ scp->id, /* USB message index value */
+ NULL, /* pointer to the data to send */
+ 0, /* length in bytes of the data to send */
+ HZ * 2); /* time to wait for the message to complete before timing out */
+ if (ret < 0) {
+ dbg ("auerswald_addservice: auerchain_control_msg returned error code %d", ret);
+ /* undo above actions */
+ cp->services[scp->id] = NULL;
+ return ret;
+ }
+
+ dbg ("auerswald_addservice: channel open OK");
+ return 0;
+}
+
+
+/* remove a service from the the device
+ scp->id must be set! */
+static void auerswald_removeservice (pauerswald_t cp, pauerscon_t scp)
+{
+ dbg ("auerswald_removeservice called");
+
+ /* check if we have a service allocated */
+ if (scp->id == AUH_UNASSIGNED) return;
+
+ /* If there is a device: close the channel */
+ if (cp->usbdev) {
+ /* Close the service channel inside the device */
+ int ret = auerchain_control_msg(
+ &cp->controlchain, /* pointer to control chain */
+ cp->usbdev, /* pointer to device */
+ usb_sndctrlpipe (cp->usbdev, 0), /* pipe to control endpoint */
+ AUV_CHANNELCTL, /* USB message request value */
+ AUT_WREQ, /* USB message request type value */
+ 0x00, // close /* USB message value */
+ scp->id, /* USB message index value */
+ NULL, /* pointer to the data to send */
+ 0, /* length in bytes of the data to send */
+ HZ * 2); /* time to wait for the message to complete before timing out */
+ if (ret < 0) {
+ dbg ("auerswald_removeservice: auerchain_control_msg returned error code %d", ret);
+ }
+ else {
+ dbg ("auerswald_removeservice: channel close OK");
+ }
+ }
+
+ /* remove the service from the device */
+ cp->services[scp->id] = NULL;
+ scp->id = AUH_UNASSIGNED;
+}
+
+
+/* --------------------------------------------------------------------- */
+/* Char device functions */
+
+/* Open a new character device */
+static int auerchar_open (struct inode *inode, struct file *file)
+{
+ int dtindex = minor(inode->i_rdev) - AUER_MINOR_BASE;
+ pauerswald_t cp = NULL;
+ pauerchar_t ccp = NULL;
+ int ret;
+
+ /* minor number in range? */
+ if ((dtindex < 0) || (dtindex >= AUER_MAX_DEVICES)) {
+ return -ENODEV;
+ }
+ /* usb device available? */
+ if (down_interruptible (&dev_table_mutex)) {
+ return -ERESTARTSYS;
+ }
+ cp = dev_table[dtindex];
+ if (cp == NULL) {
+ up (&dev_table_mutex);
+ return -ENODEV;
+ }
+ if (down_interruptible (&cp->mutex)) {
+ up (&dev_table_mutex);
+ return -ERESTARTSYS;
+ }
+ up (&dev_table_mutex);
+
+ /* prevent module unloading */
+ MOD_INC_USE_COUNT;
+
+ /* we have access to the device. Now lets allocate memory */
+ ccp = (pauerchar_t) kmalloc(sizeof(auerchar_t), GFP_KERNEL);
+ if (ccp == NULL) {
+ err ("out of memory");
+ ret = -ENOMEM;
+ goto ofail;
+ }
+
+ /* Initialize device descriptor */
+ memset( ccp, 0, sizeof(auerchar_t));
+ init_MUTEX( &ccp->mutex);
+ init_MUTEX( &ccp->readmutex);
+ auerbuf_init (&ccp->bufctl);
+ ccp->scontext.id = AUH_UNASSIGNED;
+ ccp->scontext.dispatch = auerchar_ctrlread_dispatch;
+ ccp->scontext.disconnect = auerchar_disconnect;
+ init_waitqueue_head (&ccp->readwait);
+
+ ret = auerbuf_setup (&ccp->bufctl, AU_RBUFFERS, cp->maxControlLength+AUH_SIZE);
+ if (ret) {
+ goto ofail;
+ }
+
+ cp->open_count++;
+ ccp->auerdev = cp;
+ dbg("open %s as /dev/usb/%s", cp->dev_desc, cp->name);
+ up (&cp->mutex);
+
+ /* file IO stuff */
+ file->f_pos = 0;
+ file->private_data = ccp;
+ return 0;
+
+ /* Error exit */
+ofail: up (&cp->mutex);
+ auerchar_delete (ccp);
+ MOD_DEC_USE_COUNT;
+ return ret;
+}
+
+
+/* IOCTL functions */
+static int auerchar_ioctl (struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
+{
+ pauerchar_t ccp = (pauerchar_t) file->private_data;
+ int ret = 0;
+ audevinfo_t devinfo;
+ pauerswald_t cp = NULL;
+ unsigned int u;
+ dbg ("ioctl");
+
+ /* get the mutexes */
+ if (down_interruptible (&ccp->mutex)) {
+ return -ERESTARTSYS;
+ }
+ cp = ccp->auerdev;
+ if (!cp) {
+ up (&ccp->mutex);
+ return -ENODEV;
+ }
+ if (down_interruptible (&cp->mutex)) {
+ up(&ccp->mutex);
+ return -ERESTARTSYS;
+ }
+
+ /* Check for removal */
+ if (!cp->usbdev) {
+ up(&cp->mutex);
+ up(&ccp->mutex);
+ return -ENODEV;
+ }
+
+ switch (cmd) {
+
+ /* return != 0 if Transmitt channel ready to send */
+ case IOCTL_AU_TXREADY:
+ dbg ("IOCTL_AU_TXREADY");
+ u = ccp->auerdev
+ && (ccp->scontext.id != AUH_UNASSIGNED)
+ && !list_empty (&cp->bufctl.free_buff_list);
+ ret = put_user (u, (unsigned int *) arg);
+ break;
+
+ /* return != 0 if connected to a service channel */
+ case IOCTL_AU_CONNECT:
+ dbg ("IOCTL_AU_CONNECT");
+ u = (ccp->scontext.id != AUH_UNASSIGNED);
+ ret = put_user (u, (unsigned int *) arg);
+ break;
+
+ /* return != 0 if Receive Data available */
+ case IOCTL_AU_RXAVAIL:
+ dbg ("IOCTL_AU_RXAVAIL");
+ if (ccp->scontext.id == AUH_UNASSIGNED) {
+ ret = -EIO;
+ break;
+ }
+ u = 0; /* no data */
+ if (ccp->readbuf) {
+ int restlen = ccp->readbuf->len - ccp->readoffset;
+ if (restlen > 0) u = 1;
+ }
+ if (!u) {
+ if (!list_empty (&ccp->bufctl.rec_buff_list)) {
+ u = 1;
+ }
+ }
+ ret = put_user (u, (unsigned int *) arg);
+ break;
+
+ /* return the max. buffer length for the device */
+ case IOCTL_AU_BUFLEN:
+ dbg ("IOCTL_AU_BUFLEN");
+ u = cp->maxControlLength;
+ ret = put_user (u, (unsigned int *) arg);
+ break;
+
+ /* requesting a service channel */
+ case IOCTL_AU_SERVREQ:
+ dbg ("IOCTL_AU_SERVREQ");
+ /* requesting a service means: release the previous one first */
+ auerswald_removeservice (cp, &ccp->scontext);
+ /* get the channel number */
+ ret = get_user (u, (unsigned int *) arg);
+ if (ret) {
+ break;
+ }
+ if ((u < AUH_FIRSTUSERCH) || (u >= AUH_TYPESIZE)) {
+ ret = -EIO;
+ break;
+ }
+ dbg ("auerchar service request parameters are ok");
+ ccp->scontext.id = u;
+
+ /* request the service now */
+ ret = auerswald_addservice (cp, &ccp->scontext);
+ if (ret) {
+ /* no: revert service entry */
+ ccp->scontext.id = AUH_UNASSIGNED;
+ }
+ break;
+
+ /* get a string descriptor for the device */
+ case IOCTL_AU_DEVINFO:
+ dbg ("IOCTL_AU_DEVINFO");
+ if (copy_from_user (&devinfo, (void *) arg, sizeof (audevinfo_t))) {
+ ret = -EFAULT;
+ break;
+ }
+ u = strlen(cp->dev_desc)+1;
+ if (u > devinfo.bsize) {
+ u = devinfo.bsize;
+ }
+ ret = copy_to_user(devinfo.buf, cp->dev_desc, u);
+ break;
+
+ /* get the max. string descriptor length */
+ case IOCTL_AU_SLEN:
+ dbg ("IOCTL_AU_SLEN");
+ u = AUSI_DLEN;
+ ret = put_user (u, (unsigned int *) arg);
+ break;
+
+ default:
+ dbg ("IOCTL_AU_UNKNOWN");
+ ret = -ENOIOCTLCMD;
+ break;
+ }
+ /* release the mutexes */
+ up(&cp->mutex);
+ up(&ccp->mutex);
+ return ret;
+}
+
+
+/* Seek is not supported */
+static loff_t auerchar_llseek (struct file *file, loff_t offset, int origin)
+{
+ dbg ("auerchar_seek");
+ return -ESPIPE;
+}
+
+
+/* Read data from the device */
+static ssize_t auerchar_read (struct file *file, char *buf, size_t count, loff_t * ppos)
+{
+ unsigned long flags;
+ pauerchar_t ccp = (pauerchar_t) file->private_data;
+ pauerbuf_t bp = NULL;
+ dbg ("auerchar_read");
+
+ /* Error checking */
+ if (!ccp)
+ return -EIO;
+ if (*ppos)
+ return -ESPIPE;
+ if (count == 0)
+ return 0;
+
+ /* get the mutex */
+ if (down_interruptible (&ccp->mutex))
+ return -ERESTARTSYS;
+
+ /* Can we expect to read something? */
+ if (ccp->scontext.id == AUH_UNASSIGNED) {
+ up (&ccp->mutex);
+ return -EIO;
+ }
+
+ /* only one reader per device allowed */
+ if (down_interruptible (&ccp->readmutex)) {
+ up (&ccp->mutex);
+ return -ERESTARTSYS;
+ }
+
+ /* read data from readbuf, if available */
+doreadbuf:
+ bp = ccp->readbuf;
+ if (bp) {
+ /* read the maximum bytes */
+ int restlen = bp->len - ccp->readoffset;
+ if (restlen < 0)
+ restlen = 0;
+ if (count > restlen)
+ count = restlen;
+ if (count) {
+ if (copy_to_user (buf, bp->bufp+ccp->readoffset, count)) {
+ dbg ("auerswald_read: copy_to_user failed");
+ up (&ccp->readmutex);
+ up (&ccp->mutex);
+ return -EFAULT;
+ }
+ }
+ /* advance the read offset */
+ ccp->readoffset += count;
+ restlen -= count;
+ // reuse the read buffer
+ if (restlen <= 0) {
+ auerbuf_releasebuf (bp);
+ ccp->readbuf = NULL;
+ }
+ /* return with number of bytes read */
+ if (count) {
+ up (&ccp->readmutex);
+ up (&ccp->mutex);
+ return count;
+ }
+ }
+
+ /* a read buffer is not available. Try to get the next data block. */
+doreadlist:
+ bp = NULL;
+ spin_lock_irqsave (&ccp->bufctl.lock, flags);
+ if (!list_empty (&ccp->bufctl.rec_buff_list)) {
+ /* yes: get the entry */
+ struct list_head *tmp = ccp->bufctl.rec_buff_list.next;
+ list_del (tmp);
+ bp = list_entry (tmp, auerbuf_t, buff_list);
+ }
+ spin_unlock_irqrestore (&ccp->bufctl.lock, flags);
+
+ /* have we got data? */
+ if (bp) {
+ ccp->readbuf = bp;
+ ccp->readoffset = AUH_SIZE; /* for headerbyte */
+ goto doreadbuf; /* now we can read! */
+ }
+
+ /* no data available. Should we wait? */
+ if (file->f_flags & O_NONBLOCK) {
+ dbg ("No read buffer available, returning -EAGAIN");
+ up (&ccp->readmutex);
+ up (&ccp->mutex);
+ return -EAGAIN; /* nonblocking, no data available */
+ }
+
+ /* yes, we should wait! */
+ up (&ccp->mutex); /* allow other operations while we wait */
+ interruptible_sleep_on (&ccp->readwait);
+ if (signal_pending (current)) {
+ /* waked up by a signal */
+ up (&ccp->readmutex);
+ return -ERESTARTSYS;
+ }
+
+ /* Anything left to read? */
+ if ((ccp->scontext.id == AUH_UNASSIGNED) || ccp->removed) {
+ up (&ccp->readmutex);
+ return -EIO;
+ }
+
+ if (down_interruptible (&ccp->mutex)) {
+ up (&ccp->readmutex);
+ return -ERESTARTSYS;
+ }
+
+ /* try to read the incomming data again */
+ goto doreadlist;
+}
+
+
+/* Write a data block into the right service channel of the device */
+static ssize_t auerchar_write (struct file *file, const char *buf, size_t len, loff_t *ppos)
+{
+ pauerchar_t ccp = (pauerchar_t) file->private_data;
+ pauerswald_t cp = NULL;
+ pauerbuf_t bp;
+ unsigned long flags;
+ int ret;
+
+ dbg ("auerchar_write %d bytes", len);
+
+ /* Error checking */
+ if (!ccp)
+ return -EIO;
+ if (*ppos)
+ return -ESPIPE;
+ if (len == 0)
+ return 0;
+
+write_again:
+ /* get the mutex */
+ if (down_interruptible (&ccp->mutex))
+ return -ERESTARTSYS;
+
+ /* Can we expect to write something? */
+ if (ccp->scontext.id == AUH_UNASSIGNED) {
+ up (&ccp->mutex);
+ return -EIO;
+ }
+
+ cp = ccp->auerdev;
+ if (!cp) {
+ up (&ccp->mutex);
+ return -ERESTARTSYS;
+ }
+ if (down_interruptible (&cp->mutex)) {
+ up (&ccp->mutex);
+ return -ERESTARTSYS;
+ }
+ if (!cp->usbdev) {
+ up (&cp->mutex);
+ up (&ccp->mutex);
+ return -EIO;
+ }
+ /* Try to get a buffer from the device pool.
+ We can't use a buffer from ccp->bufctl because the write
+ command will last beond a release() */
+ bp = NULL;
+ spin_lock_irqsave (&cp->bufctl.lock, flags);
+ if (!list_empty (&cp->bufctl.free_buff_list)) {
+ /* yes: get the entry */
+ struct list_head *tmp = cp->bufctl.free_buff_list.next;
+ list_del (tmp);
+ bp = list_entry (tmp, auerbuf_t, buff_list);
+ }
+ spin_unlock_irqrestore (&cp->bufctl.lock, flags);
+
+ /* are there any buffers left? */
+ if (!bp) {
+ up (&cp->mutex);
+ up (&ccp->mutex);
+
+ /* NONBLOCK: don't wait */
+ if (file->f_flags & O_NONBLOCK) {
+ return -EAGAIN;
+ }
+
+ /* BLOCKING: wait */
+ interruptible_sleep_on (&cp->bufferwait);
+ if (signal_pending (current)) {
+ /* waked up by a signal */
+ return -ERESTARTSYS;
+ }
+ goto write_again;
+ }
+
+ /* protect against too big write requests */
+ if (len > cp->maxControlLength) len = cp->maxControlLength;
+
+ /* Fill the buffer */
+ if (copy_from_user ( bp->bufp+AUH_SIZE, buf, len)) {
+ dbg ("copy_from_user failed");
+ auerbuf_releasebuf (bp);
+ up (&cp->mutex);
+ up (&ccp->mutex);
+ return -EIO;
+ }
+
+ /* set the header byte */
+ *(bp->bufp) = ccp->scontext.id | AUH_DIRECT | AUH_UNSPLIT;
+
+ /* Set the transfer Parameters */
+ bp->len = len+AUH_SIZE;
+ bp->dr->requesttype = AUT_WREQ;
+ bp->dr->request = AUV_WBLOCK;
+ bp->dr->value = cpu_to_le16 (0);
+ bp->dr->index = cpu_to_le16 (ccp->scontext.id | AUH_DIRECT | AUH_UNSPLIT);
+ bp->dr->length = cpu_to_le16 (len+AUH_SIZE);
+ FILL_CONTROL_URB (bp->urbp, cp->usbdev, usb_sndctrlpipe (cp->usbdev, 0),
+ (unsigned char*)bp->dr, bp->bufp, len+AUH_SIZE,
+ auerchar_ctrlwrite_complete, bp);
+ /* up we go */
+ ret = auerchain_submit_urb (&cp->controlchain, bp->urbp);
+ up (&cp->mutex);
+ if (ret) {
+ dbg ("auerchar_write: nonzero result of auerchain_submit_urb %d", ret);
+ auerbuf_releasebuf (bp);
+ up (&ccp->mutex);
+ return -EIO;
+ }
+ else {
+ dbg ("auerchar_write: Write OK");
+ up (&ccp->mutex);
+ return len;
+ }
+}
+
+
+/* Close a character device */
+static int auerchar_release (struct inode *inode, struct file *file)
+{
+ pauerchar_t ccp = (pauerchar_t) file->private_data;
+ pauerswald_t cp;
+ dbg("release");
+
+ /* get the mutexes */
+ if (down_interruptible (&ccp->mutex)) {
+ return -ERESTARTSYS;
+ }
+ cp = ccp->auerdev;
+ if (cp) {
+ if (down_interruptible (&cp->mutex)) {
+ up (&ccp->mutex);
+ return -ERESTARTSYS;
+ }
+ /* remove an open service */
+ auerswald_removeservice (cp, &ccp->scontext);
+ /* detach from device */
+ if ((--cp->open_count <= 0) && (cp->usbdev == NULL)) {
+ /* usb device waits for removal */
+ up (&cp->mutex);
+ auerswald_delete (cp);
+ } else {
+ up (&cp->mutex);
+ }
+ cp = NULL;
+ ccp->auerdev = NULL;
+ }
+ up (&ccp->mutex);
+ auerchar_delete (ccp);
+
+ /* release the module */
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+
+/*----------------------------------------------------------------------*/
+/* File operation structure */
+static struct file_operations auerswald_fops =
+{
+ owner: THIS_MODULE,
+ llseek: auerchar_llseek,
+ read: auerchar_read,
+ write: auerchar_write,
+ ioctl: auerchar_ioctl,
+ open: auerchar_open,
+ release: auerchar_release,
+};
+
+
+/* --------------------------------------------------------------------- */
+/* Special USB driver functions */
+
+/* Probe if this driver wants to serve an USB device
+
+ This entry point is called whenever a new device is attached to the bus.
+ Then the device driver has to create a new instance of its internal data
+ structures for the new device.
+
+ The dev argument specifies the device context, which contains pointers
+ to all USB descriptors. The interface argument specifies the interface
+ number. If a USB driver wants to bind itself to a particular device and
+ interface it has to return a pointer. This pointer normally references
+ the device driver's context structure.
+
+ Probing normally is done by checking the vendor and product identifications
+ or the class and subclass definitions. If they match the interface number
+ is compared with the ones supported by the driver. When probing is done
+ class based it might be necessary to parse some more USB descriptors because
+ the device properties can differ in a wide range.
+*/
+static void *auerswald_probe (struct usb_device *usbdev, unsigned int ifnum,
+ const struct usb_device_id *id)
+{
+ pauerswald_t cp = NULL;
+ DECLARE_WAIT_QUEUE_HEAD (wqh);
+ unsigned int dtindex;
+ unsigned int u = 0;
+ char *pbuf;
+ int ret;
+
+ dbg ("probe: vendor id 0x%x, device id 0x%x ifnum:%d",
+ usbdev->descriptor.idVendor, usbdev->descriptor.idProduct, ifnum);
+
+ /* See if the device offered us matches that we can accept */
+ if (usbdev->descriptor.idVendor != ID_AUERSWALD) return NULL;
+
+ /* we use only the first -and only- interface */
+ if (ifnum != 0) return NULL;
+
+ /* prevent module unloading while sleeping */
+ MOD_INC_USE_COUNT;
+
+ /* allocate memory for our device and intialize it */
+ cp = kmalloc (sizeof(auerswald_t), GFP_KERNEL);
+ if (cp == NULL) {
+ err ("out of memory");
+ goto pfail;
+ }
+
+ /* Initialize device descriptor */
+ memset (cp, 0, sizeof(auerswald_t));
+ init_MUTEX (&cp->mutex);
+ cp->usbdev = usbdev;
+ auerchain_init (&cp->controlchain);
+ auerbuf_init (&cp->bufctl);
+ init_waitqueue_head (&cp->bufferwait);
+
+ /* find a free slot in the device table */
+ down (&dev_table_mutex);
+ for (dtindex = 0; dtindex < AUER_MAX_DEVICES; ++dtindex) {
+ if (dev_table[dtindex] == NULL)
+ break;
+ }
+ if ( dtindex >= AUER_MAX_DEVICES) {
+ err ("more than %d devices plugged in, can not handle this device", AUER_MAX_DEVICES);
+ up (&dev_table_mutex);
+ goto pfail;
+ }
+
+ /* Give the device a name */
+ sprintf (cp->name, AU_PREFIX "%d", dtindex);
+
+ /* Store the index */
+ cp->dtindex = dtindex;
+ dev_table[dtindex] = cp;
+ up (&dev_table_mutex);
+
+ /* initialize the devfs node for this device and register it */
+ cp->devfs = devfs_register (usb_devfs_handle, cp->name,
+ DEVFS_FL_DEFAULT, USB_MAJOR,
+ AUER_MINOR_BASE + dtindex,
+ S_IFCHR | S_IRUGO | S_IWUGO,
+ &auerswald_fops, NULL);
+
+ /* Get the usb version of the device */
+ cp->version = cp->usbdev->descriptor.bcdDevice;
+ dbg ("Version is %X", cp->version);
+
+ /* allow some time to settle the device */
+ sleep_on_timeout (&wqh, HZ / 3 );
+
+ /* Try to get a suitable textual description of the device */
+ /* Device name:*/
+ ret = usb_string( cp->usbdev, AUSI_DEVICE, cp->dev_desc, AUSI_DLEN-1);
+ if (ret >= 0) {
+ u += ret;
+ /* Append Serial Number */
+ memcpy(&cp->dev_desc[u], ",Ser# ", 6);
+ u += 6;
+ ret = usb_string( cp->usbdev, AUSI_SERIALNR, &cp->dev_desc[u], AUSI_DLEN-u-1);
+ if (ret >= 0) {
+ u += ret;
+ /* Append subscriber number */
+ memcpy(&cp->dev_desc[u], ", ", 2);
+ u += 2;
+ ret = usb_string( cp->usbdev, AUSI_MSN, &cp->dev_desc[u], AUSI_DLEN-u-1);
+ if (ret >= 0) {
+ u += ret;
+ }
+ }
+ }
+ cp->dev_desc[u] = '\0';
+ info("device is a %s", cp->dev_desc);
+
+ /* get the maximum allowed control transfer length */
+ pbuf = (char *) kmalloc (2, GFP_KERNEL); /* use an allocated buffer because of urb target */
+ if (!pbuf) {
+ err( "out of memory");
+ goto pfail;
+ }
+ ret = usb_control_msg(cp->usbdev, /* pointer to device */
+ usb_rcvctrlpipe( cp->usbdev, 0 ), /* pipe to control endpoint */
+ AUV_GETINFO, /* USB message request value */
+ AUT_RREQ, /* USB message request type value */
+ 0, /* USB message value */
+ AUDI_MBCTRANS, /* USB message index value */
+ pbuf, /* pointer to the receive buffer */
+ 2, /* length of the buffer */
+ HZ * 2); /* time to wait for the message to complete before timing out */
+ if (ret == 2) {
+ cp->maxControlLength = le16_to_cpup(pbuf);
+ kfree(pbuf);
+ dbg("setup: max. allowed control transfersize is %d bytes", cp->maxControlLength);
+ } else {
+ kfree(pbuf);
+ err("setup: getting max. allowed control transfer length failed with error %d", ret);
+ goto pfail;
+ }
+
+ /* allocate a chain for the control messages */
+ if (auerchain_setup (&cp->controlchain, AUCH_ELEMENTS)) {
+ err ("out of memory");
+ goto pfail;
+ }
+
+ /* allocate buffers for control messages */
+ if (auerbuf_setup (&cp->bufctl, AU_RBUFFERS, cp->maxControlLength+AUH_SIZE)) {
+ err ("out of memory");
+ goto pfail;
+ }
+
+ /* start the interrupt endpoint */
+ if (auerswald_int_open (cp)) {
+ err ("int endpoint failed");
+ goto pfail;
+ }
+
+ /* all OK */
+ return cp;
+
+ /* Error exit: clean up the memory */
+pfail: auerswald_delete (cp);
+ MOD_DEC_USE_COUNT;
+ return NULL;
+}
+
+
+/* Disconnect driver from a served device
+
+ This function is called whenever a device which was served by this driver
+ is disconnected.
+
+ The argument dev specifies the device context and the driver_context
+ returns a pointer to the previously registered driver_context of the
+ probe function. After returning from the disconnect function the USB
+ framework completly deallocates all data structures associated with
+ this device. So especially the usb_device structure must not be used
+ any longer by the usb driver.
+*/
+static void auerswald_disconnect (struct usb_device *usbdev, void *driver_context)
+{
+ pauerswald_t cp = (pauerswald_t) driver_context;
+ unsigned int u;
+
+ down (&cp->mutex);
+ info ("device /dev/usb/%s now disconnecting", cp->name);
+
+ /* remove from device table */
+ /* Nobody can open() this device any more */
+ down (&dev_table_mutex);
+ dev_table[cp->dtindex] = NULL;
+ up (&dev_table_mutex);
+
+ /* remove our devfs node */
+ /* Nobody can see this device any more */
+ devfs_unregister (cp->devfs);
+
+ /* Stop the interrupt endpoint */
+ auerswald_int_release (cp);
+
+ /* remove the control chain allocated in auerswald_probe
+ This has the benefit of
+ a) all pending (a)synchronous urbs are unlinked
+ b) all buffers dealing with urbs are reclaimed
+ */
+ auerchain_free (&cp->controlchain);
+
+ if (cp->open_count == 0) {
+ /* nobody is using this device. So we can clean up now */
+ up (&cp->mutex);/* up() is possible here because no other task
+ can open the device (see above). I don't want
+ to kfree() a locked mutex. */
+ auerswald_delete (cp);
+ } else {
+ /* device is used. Remove the pointer to the
+ usb device (it's not valid any more). The last
+ release() will do the clean up */
+ cp->usbdev = NULL;
+ up (&cp->mutex);
+ /* Terminate waiting writers */
+ wake_up (&cp->bufferwait);
+ /* Inform all waiting readers */
+ for ( u = 0; u < AUH_TYPESIZE; u++) {
+ pauerscon_t scp = cp->services[u];
+ if (scp) scp->disconnect( scp);
+ }
+ }
+
+ /* The device releases this module */
+ MOD_DEC_USE_COUNT;
+}
+
+/* Descriptor for the devices which are served by this driver.
+ NOTE: this struct is parsed by the usbmanager install scripts.
+ Don't change without caution!
+*/
+static struct usb_device_id auerswald_ids [] = {
+ { USB_DEVICE (ID_AUERSWALD, 0x00C0) }, /* COMpact 2104 USB */
+ { USB_DEVICE (ID_AUERSWALD, 0x00DB) }, /* COMpact 4410/2206 USB */
+ { USB_DEVICE (ID_AUERSWALD, 0x00F1) }, /* Comfort 2000 System Telephone */
+ { USB_DEVICE (ID_AUERSWALD, 0x00F2) }, /* Comfort 1200 System Telephone */
+ { } /* Terminating entry */
+};
+
+/* Standard module device table */
+MODULE_DEVICE_TABLE (usb, auerswald_ids);
+
+/* Standard usb driver struct */
+static struct usb_driver auerswald_driver = {
+ name: "auerswald",
+ probe: auerswald_probe,
+ disconnect: auerswald_disconnect,
+ fops: &auerswald_fops,
+ minor: AUER_MINOR_BASE,
+ id_table: auerswald_ids,
+};
+
+
+/* --------------------------------------------------------------------- */
+/* Module loading/unloading */
+
+/* Driver initialisation. Called after module loading.
+ NOTE: there is no concurrency at _init
+*/
+static int __init auerswald_init (void)
+{
+ int result;
+ dbg ("init");
+
+ /* initialize the device table */
+ memset (&dev_table, 0, sizeof(dev_table));
+ init_MUTEX (&dev_table_mutex);
+
+ /* register driver at the USB subsystem */
+ result = usb_register (&auerswald_driver);
+ if (result < 0) {
+ err ("driver could not be registered");
+ return -1;
+ }
+ return 0;
+}
+
+/* Driver deinit. Called before module removal.
+ NOTE: there is no concurrency at _cleanup
+*/
+static void __exit auerswald_cleanup (void)
+{
+ dbg ("cleanup");
+ usb_deregister (&auerswald_driver);
+}
+
+/* --------------------------------------------------------------------- */
+/* Linux device driver module description */
+
+MODULE_AUTHOR (DRIVER_AUTHOR);
+MODULE_DESCRIPTION (DRIVER_DESC);
+
+module_init (auerswald_init);
+module_exit (auerswald_cleanup);
+
+/* --------------------------------------------------------------------- */
__u8 control_out_bInterfaceNum;
struct urb * control_urb_pool[NUM_CONTROL_URBS];
- devrequest dr[NUM_CONTROL_URBS];
+ struct usb_ctrlrequest dr[NUM_CONTROL_URBS];
unsigned char * interrupt_in_buffer;
struct urb * interrupt_in_urb;
static int bluetooth_ctrl_msg (struct usb_bluetooth *bluetooth, int request, int value, const unsigned char *buf, int len)
{
struct urb *urb = NULL;
- devrequest *dr = NULL;
+ struct usb_ctrlrequest *dr = NULL;
int i;
int status;
}
memcpy (urb->transfer_buffer, buf, len);
- dr->requesttype = BLUETOOTH_CONTROL_REQUEST_TYPE;
- dr->request = request;
- dr->value = cpu_to_le16((u16) value);
- dr->index = cpu_to_le16((u16) bluetooth->control_out_bInterfaceNum);
- dr->length = cpu_to_le16((u16) len);
+ dr->bRequestType= BLUETOOTH_CONTROL_REQUEST_TYPE;
+ dr->bRequest = request;
+ dr->wValue = cpu_to_le16((u16) value);
+ dr->wIndex = cpu_to_le16((u16) bluetooth->control_out_bInterfaceNum);
+ dr->wLength = cpu_to_le16((u16) len);
FILL_CONTROL_URB (urb, bluetooth->dev, usb_sndctrlpipe(bluetooth->dev, 0),
(unsigned char*)dr, urb->transfer_buffer, len, bluetooth_ctrl_callback, bluetooth);
u8 rx_buf[RX_MAX_BURST * (PKT_SZ + 2)];
u8 irq_buf[2];
u8 ctrl_buf[64];
- devrequest ctrl_dr;
+ struct usb_ctrlrequest ctrl_dr;
struct timer_list timer;
u8 stats_buf[8];
struct ctrl_queue *q = catc->ctrl_queue + catc->ctrl_tail;
struct usb_device *usbdev = catc->usbdev;
struct urb *urb = &catc->ctrl_urb;
- devrequest *dr = &catc->ctrl_dr;
+ struct usb_ctrlrequest *dr = &catc->ctrl_dr;
int status;
- dr->request = q->request;
- dr->requesttype = 0x40 | q->dir;
- dr->value = cpu_to_le16(q->value);
- dr->index = cpu_to_le16(q->index);
- dr->length = cpu_to_le16(q->len);
+ dr->bRequest = q->request;
+ dr->bRequestType = 0x40 | q->dir;
+ dr->wValue = cpu_to_le16(q->value);
+ dr->wIndex = cpu_to_le16(q->index);
+ dr->wLength = cpu_to_le16(q->len);
urb->pipe = q->dir ? usb_rcvctrlpipe(usbdev, 0) : usb_sndctrlpipe(usbdev, 0);
urb->transfer_buffer_length = q->len;
if (copy_from_user(&ctrl, (void *)arg, sizeof(ctrl)))
return -EFAULT;
- if ((ret = check_ctrlrecip(ps, ctrl.requesttype, ctrl.index)))
+ if ((ret = check_ctrlrecip(ps, ctrl.bRequestType, ctrl.wIndex)))
return ret;
- if (ctrl.length > PAGE_SIZE)
+ if (ctrl.wLength > PAGE_SIZE)
return -EINVAL;
if (!(tbuf = (unsigned char *)__get_free_page(GFP_KERNEL)))
return -ENOMEM;
tmo = (ctrl.timeout * HZ + 999) / 1000;
- if (ctrl.requesttype & 0x80) {
- if (ctrl.length && !access_ok(VERIFY_WRITE, ctrl.data, ctrl.length)) {
+ if (ctrl.bRequestType & 0x80) {
+ if (ctrl.wLength && !access_ok(VERIFY_WRITE, ctrl.data, ctrl.wLength)) {
free_page((unsigned long)tbuf);
return -EINVAL;
}
- i = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), ctrl.request, ctrl.requesttype,
- ctrl.value, ctrl.index, tbuf, ctrl.length, tmo);
- if ((i > 0) && ctrl.length) {
- if (copy_to_user(ctrl.data, tbuf, ctrl.length)) {
+ i = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), ctrl.bRequest, ctrl.bRequestType,
+ ctrl.wValue, ctrl.wIndex, tbuf, ctrl.wLength, tmo);
+ if ((i > 0) && ctrl.wLength) {
+ if (copy_to_user(ctrl.data, tbuf, ctrl.wLength)) {
free_page((unsigned long)tbuf);
return -EFAULT;
}
}
} else {
- if (ctrl.length) {
- if (copy_from_user(tbuf, ctrl.data, ctrl.length)) {
+ if (ctrl.wLength) {
+ if (copy_from_user(tbuf, ctrl.data, ctrl.wLength)) {
free_page((unsigned long)tbuf);
return -EFAULT;
}
}
- i = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), ctrl.request, ctrl.requesttype,
- ctrl.value, ctrl.index, tbuf, ctrl.length, tmo);
+ i = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), ctrl.bRequest, ctrl.bRequestType,
+ ctrl.wValue, ctrl.wIndex, tbuf, ctrl.wLength, tmo);
}
free_page((unsigned long)tbuf);
if (i<0) {
printk(KERN_DEBUG "usbdevfs: USBDEVFS_CONTROL failed dev %d rqt %u rq %u len %u ret %d\n",
- dev->devnum, ctrl.requesttype, ctrl.request, ctrl.length, i);
+ dev->devnum, ctrl.bRequestType, ctrl.bRequest, ctrl.wLength, i);
}
return i;
}
struct usbdevfs_iso_packet_desc *isopkt = NULL;
struct usb_endpoint_descriptor *ep_desc;
struct async *as;
- devrequest *dr = NULL;
+ struct usb_ctrlrequest *dr = NULL;
unsigned int u, totlen, isofrmlen;
int ret;
/* min 8 byte setup packet, max arbitrary */
if (uurb.buffer_length < 8 || uurb.buffer_length > PAGE_SIZE)
return -EINVAL;
- if (!(dr = kmalloc(sizeof(devrequest), GFP_KERNEL)))
+ if (!(dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL)))
return -ENOMEM;
if (copy_from_user(dr, (unsigned char*)uurb.buffer, 8)) {
kfree(dr);
return -EFAULT;
}
- if (uurb.buffer_length < (le16_to_cpup(&dr->length) + 8)) {
+ if (uurb.buffer_length < (le16_to_cpup(&dr->wLength) + 8)) {
kfree(dr);
return -EINVAL;
}
- if ((ret = check_ctrlrecip(ps, dr->requesttype, le16_to_cpup(&dr->index)))) {
+ if ((ret = check_ctrlrecip(ps, dr->bRequestType, le16_to_cpup(&dr->wIndex)))) {
kfree(dr);
return ret;
}
- uurb.endpoint = (uurb.endpoint & ~USB_ENDPOINT_DIR_MASK) | (dr->requesttype & USB_ENDPOINT_DIR_MASK);
+ uurb.endpoint = (uurb.endpoint & ~USB_ENDPOINT_DIR_MASK) | (dr->bRequestType & USB_ENDPOINT_DIR_MASK);
uurb.number_of_packets = 0;
- uurb.buffer_length = le16_to_cpup(&dr->length);
+ uurb.buffer_length = le16_to_cpup(&dr->wLength);
uurb.buffer += 8;
if (!access_ok((uurb.endpoint & USB_DIR_IN) ? VERIFY_WRITE : VERIFY_READ, uurb.buffer, uurb.buffer_length)) {
kfree(dr);
/* Root hub control transfers execute synchronously */
static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
{
- devrequest *cmd = (devrequest *) urb->setup_packet;
+ struct usb_ctrlrequest *cmd = (struct usb_ctrlrequest *) urb->setup_packet;
u16 typeReq, wValue, wIndex, wLength;
const u8 *bufp = 0;
u8 *ubuf = urb->transfer_buffer;
int len = 0;
- typeReq = (cmd->requesttype << 8) | cmd->request;
- wValue = le16_to_cpu (cmd->value);
- wIndex = le16_to_cpu (cmd->index);
- wLength = le16_to_cpu (cmd->length);
+ typeReq = (cmd->bRequestType << 8) | cmd->bRequest;
+ wValue = le16_to_cpu (cmd->wValue);
+ wIndex = le16_to_cpu (cmd->wIndex);
+ wLength = le16_to_cpu (cmd->wLength);
if (wLength > urb->transfer_buffer_length)
goto error;
/* SETUP for control urb? */
if (unlikely (QTD_PID (token) == 2))
pci_unmap_single (ehci->hcd.pdev,
- qtd->buf_dma, sizeof (devrequest),
+ qtd->buf_dma, sizeof (struct usb_ctrlrequest),
PCI_DMA_TODEVICE);
/* another queued urb? */
qtd->buf_dma = pci_map_single (
ehci->hcd.pdev,
urb->setup_packet,
- sizeof (devrequest),
+ sizeof (struct usb_ctrlrequest),
PCI_DMA_TODEVICE);
if (unlikely (!qtd->buf_dma))
goto cleanup;
/* SETUP pid */
- qtd_fill (qtd, qtd->buf_dma, sizeof (devrequest),
+ qtd_fill (qtd, qtd->buf_dma, sizeof (struct usb_ctrlrequest),
token | (2 /* "setup" */ << 8));
/* ... and always at least one more pid */
static int hid_submit_out(struct hid_device *hid)
{
- hid->urbout.transfer_buffer_length = le16_to_cpup(&hid->out[hid->outtail].dr.length);
+ hid->urbout.transfer_buffer_length = le16_to_cpup(&hid->out[hid->outtail].dr.wLength);
hid->urbout.transfer_buffer = hid->out[hid->outtail].buffer;
hid->urbout.setup_packet = (void *) &(hid->out[hid->outtail].dr);
hid->urbout.dev = hid->dev;
{
hid_output_report(report, hid->out[hid->outhead].buffer);
- hid->out[hid->outhead].dr.value = cpu_to_le16(0x200 | report->id);
- hid->out[hid->outhead].dr.length = cpu_to_le16((report->size + 7) >> 3);
+ hid->out[hid->outhead].dr.wValue = cpu_to_le16(0x200 | report->id);
+ hid->out[hid->outhead].dr.wLength = cpu_to_le16((report->size + 7) >> 3);
hid->outhead = (hid->outhead + 1) & (HID_CONTROL_FIFO_SIZE - 1);
hid->ifnum = interface->bInterfaceNumber;
for (n = 0; n < HID_CONTROL_FIFO_SIZE; n++) {
- hid->out[n].dr.requesttype = USB_TYPE_CLASS | USB_RECIP_INTERFACE;
- hid->out[n].dr.request = HID_REQ_SET_REPORT;
- hid->out[n].dr.index = cpu_to_le16(hid->ifnum);
+ hid->out[n].dr.bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE;
+ hid->out[n].dr.bRequest = HID_REQ_SET_REPORT;
+ hid->out[n].dr.wIndex = cpu_to_le16(hid->ifnum);
}
hid->name[0] = 0;
#define HID_CONTROL_FIFO_SIZE 8
struct hid_control_fifo {
- devrequest dr;
+ struct usb_ctrlrequest dr;
char buffer[HID_BUFFER_SIZE];
};
);
static void kaweth_disconnect(struct usb_device *dev, void *ptr);
int kaweth_internal_control_msg(struct usb_device *usb_dev, unsigned int pipe,
- devrequest *cmd, void *data, int len,
- int timeout);
+ struct usb_ctrlrequest *cmd, void *data,
+ int len, int timeout);
/****************************************************************
* usb_device_id
__u16 size,
int timeout)
{
- devrequest *dr;
+ struct usb_ctrlrequest *dr;
kaweth_dbg("kaweth_control()");
return -EBUSY;
}
- dr = kmalloc(sizeof(devrequest),
+ dr = kmalloc(sizeof(struct usb_ctrlrequest),
in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
- if(!dr)
- {
+ if (!dr) {
kaweth_dbg("kmalloc() failed");
return -ENOMEM;
}
- dr->requesttype = requesttype;
- dr->request = request;
- dr->value = cpu_to_le16p(&value);
- dr->index = cpu_to_le16p(&index);
- dr->length = cpu_to_le16p(&size);
+ dr->bRequestType= requesttype;
+ dr->bRequest = request;
+ dr->wValue = cpu_to_le16p(&value);
+ dr->wIndex = cpu_to_le16p(&index);
+ dr->wLength = cpu_to_le16p(&size);
return kaweth_internal_control_msg(kaweth->dev,
pipe,
/*-------------------------------------------------------------------*/
// returns status (negative) or length (positive)
int kaweth_internal_control_msg(struct usb_device *usb_dev, unsigned int pipe,
- devrequest *cmd, void *data, int len, int timeout)
+ struct usb_ctrlrequest *cmd, void *data, int len,
+ int timeout)
{
urb_t *urb;
int retv;
remove_wait_queue(&pegasus->ctrl_wait, &wait);
set_current_state(TASK_RUNNING);
- pegasus->dr.requesttype = PEGASUS_REQT_READ;
- pegasus->dr.request = PEGASUS_REQ_GET_REGS;
- pegasus->dr.value = cpu_to_le16 (0);
- pegasus->dr.index = cpu_to_le16p(&indx);
- pegasus->dr.length = cpu_to_le16p(&size);
+ pegasus->dr.bRequestType = PEGASUS_REQT_READ;
+ pegasus->dr.bRequest = PEGASUS_REQ_GET_REGS;
+ pegasus->dr.wValue = cpu_to_le16 (0);
+ pegasus->dr.wIndex = cpu_to_le16p(&indx);
+ pegasus->dr.wLength = cpu_to_le16p(&size);
pegasus->ctrl_urb.transfer_buffer_length = size;
FILL_CONTROL_URB( &pegasus->ctrl_urb, pegasus->usb,
remove_wait_queue(&pegasus->ctrl_wait, &wait);
set_current_state(TASK_RUNNING);
- pegasus->dr.requesttype = PEGASUS_REQT_WRITE;
- pegasus->dr.request = PEGASUS_REQ_SET_REGS;
- pegasus->dr.value = cpu_to_le16 (0);
- pegasus->dr.index = cpu_to_le16p( &indx );
- pegasus->dr.length = cpu_to_le16p( &size );
+ pegasus->dr.bRequestType = PEGASUS_REQT_WRITE;
+ pegasus->dr.bRequest = PEGASUS_REQ_SET_REGS;
+ pegasus->dr.wValue = cpu_to_le16 (0);
+ pegasus->dr.wIndex = cpu_to_le16p( &indx );
+ pegasus->dr.wLength = cpu_to_le16p( &size );
pegasus->ctrl_urb.transfer_buffer_length = size;
FILL_CONTROL_URB( &pegasus->ctrl_urb, pegasus->usb,
remove_wait_queue(&pegasus->ctrl_wait, &wait);
set_current_state(TASK_RUNNING);
- pegasus->dr.requesttype = PEGASUS_REQT_WRITE;
- pegasus->dr.request = PEGASUS_REQ_SET_REG;
- pegasus->dr.value = cpu_to_le16p( &dat);
- pegasus->dr.index = cpu_to_le16p( &indx );
- pegasus->dr.length = cpu_to_le16( 1 );
+ pegasus->dr.bRequestType = PEGASUS_REQT_WRITE;
+ pegasus->dr.bRequest = PEGASUS_REQ_SET_REG;
+ pegasus->dr.wValue = cpu_to_le16p( &dat);
+ pegasus->dr.wIndex = cpu_to_le16p( &indx );
+ pegasus->dr.wLength = cpu_to_le16( 1 );
pegasus->ctrl_urb.transfer_buffer_length = 1;
FILL_CONTROL_URB( &pegasus->ctrl_urb, pegasus->usb,
{
int ret;
- pegasus->dr.requesttype = PEGASUS_REQT_WRITE;
- pegasus->dr.request = PEGASUS_REQ_SET_REGS;
- pegasus->dr.value = 0;
- pegasus->dr.index = cpu_to_le16(EthCtrl0);
- pegasus->dr.length = cpu_to_le16(3);
+ pegasus->dr.bRequestType = PEGASUS_REQT_WRITE;
+ pegasus->dr.bRequest = PEGASUS_REQ_SET_REGS;
+ pegasus->dr.wValue = 0;
+ pegasus->dr.wIndex = cpu_to_le16(EthCtrl0);
+ pegasus->dr.wLength = cpu_to_le16(3);
pegasus->ctrl_urb.transfer_buffer_length = 3;
FILL_CONTROL_URB( &pegasus->ctrl_urb, pegasus->usb,
int dev_index;
int intr_interval;
struct urb ctrl_urb, rx_urb, tx_urb, intr_urb;
- devrequest dr;
+ struct usb_ctrlrequest dr;
wait_queue_head_t ctrl_wait;
struct semaphore ctrl_sem;
unsigned char ALIGN(rx_buff[PEGASUS_MAX_MTU]);
* 0.4.7 11/28/2001
* - Fixed typo in Documentation/scanner.txt. Thanks to
* Karel <karel.vervaeke@pandora.be> for pointing it out.
- * - Added ID's for a Memorex 6136u. Thanks to =C1lvaro Gaspar de
+ * - Added ID's for a Memorex 6136u. Thanks to Álvaro Gaspar de
* Valenzuela" <agaspard@utsi.edu>.
* - Added ID's for Agfa e25. Thanks to Heinrich
* Rust <Heinrich.Rust@gmx.de>. Also reported to work with
case SCANNER_IOCTL_CTRLMSG:
{
struct ctrlmsg_ioctl {
- devrequest req;
- void *data;
+ struct usb_ctrlrequest req;
+ void *data;
} cmsg;
int pipe, nb, ret;
unsigned char buf[64];
if (copy_from_user(&cmsg, (void *)arg, sizeof(cmsg)))
return -EFAULT;
- nb = le16_to_cpup(&cmsg.req.length);
+ nb = le16_to_cpup(&cmsg.req.wLength);
if (nb > sizeof(buf))
return -EINVAL;
- if ((cmsg.req.requesttype & 0x80) == 0) {
+ if ((cmsg.req.bRequestType & 0x80) == 0) {
pipe = usb_sndctrlpipe(dev, 0);
if (nb > 0 && copy_from_user(buf, cmsg.data, nb))
return -EFAULT;
pipe = usb_rcvctrlpipe(dev, 0);
}
- ret = usb_control_msg(dev, pipe, cmsg.req.request,
- cmsg.req.requesttype,
- le16_to_cpup(&cmsg.req.value),
- le16_to_cpup(&cmsg.req.index),
+ ret = usb_control_msg(dev, pipe, cmsg.req.bRequest,
+ cmsg.req.bRequestType,
+ le16_to_cpup(&cmsg.req.wValue),
+ le16_to_cpup(&cmsg.req.wIndex),
buf, nb, HZ);
if (ret < 0) {
return -EIO;
}
- if (nb > 0 && (cmsg.req.requesttype & 0x80) && copy_to_user(cmsg.data, buf, nb))
+ if (nb > 0 && (cmsg.req.bRequestType & 0x80) && copy_to_user(cmsg.data, buf, nb))
return -EFAULT;
return 0;
#define SCANNER_IOCTL_VENDOR _IOR('U', 0x20, int)
#define SCANNER_IOCTL_PRODUCT _IOR('U', 0x21, int)
/* send/recv a control message to the scanner */
-#define SCANNER_IOCTL_CTRLMSG _IOWR('U', 0x22, devrequest )
+#define SCANNER_IOCTL_CTRLMSG _IOWR('U', 0x22, struct usb_ctrlrequest)
#define SCN_MAX_MNR 16 /* We're allocated 16 minors */
/* All of the device info needed for the serial converters */
static struct usb_serial_device_type belkin_device = {
+ owner: THIS_MODULE,
name: "Belkin / Peracom / GoHubs USB Serial Adapter",
id_table: id_table_combined,
num_interrupt_in: 1,
down (&port->sem);
++port->open_count;
- MOD_INC_USE_COUNT;
if (port->open_count == 1) {
/*Start reading from the device*/
}
up (&port->sem);
- MOD_DEC_USE_COUNT;
} /* belkin_sa_close */
MODULE_DEVICE_TABLE (usb, id_table);
static struct usb_serial_device_type cyberjack_device = {
+ owner: THIS_MODULE,
name: "Reiner SCT Cyberjack USB card reader",
id_table: id_table,
num_interrupt_in: 1,
if (port_paranoia_check (port, __FUNCTION__))
return -ENODEV;
- MOD_INC_USE_COUNT;
-
dbg(__FUNCTION__ " - port %d", port->number);
down (&port->sem);
}
up (&port->sem);
- MOD_DEC_USE_COUNT;
}
static int cyberjack_write (struct usb_serial_port *port, int from_user, const unsigned char *buf, int count)
/* device info needed for the Digi serial converter */
static struct usb_serial_device_type digi_acceleport_2_device = {
+ owner: THIS_MODULE,
name: "Digi USB",
id_table: id_table_2,
num_interrupt_in: 0,
};
static struct usb_serial_device_type digi_acceleport_4_device = {
+ owner: THIS_MODULE,
name: "Digi USB",
id_table: id_table_4,
num_interrupt_in: 0,
spin_lock_irqsave( &priv->dp_port_lock, flags );
digi_wakeup_write( port );
spin_unlock_irqrestore( &priv->dp_port_lock, flags );
- MOD_DEC_USE_COUNT;
}
static void digi_wakeup_write( struct usb_serial_port *port )
/* also queue up a wakeup at scheduler time, in case we */
/* lost the race in write_chan(). */
- MOD_INC_USE_COUNT;
- if (schedule_task(&priv->dp_wakeup_task) == 0)
- MOD_DEC_USE_COUNT;
+ schedule_task(&priv->dp_wakeup_task);
spin_unlock( &priv->dp_port_lock );
/* inc module use count before sleeping to wait for closes */
++port->open_count;
- MOD_INC_USE_COUNT;
/* wait for a close in progress to finish */
while( priv->dp_in_close ) {
&priv->dp_port_lock, flags );
if( signal_pending(current) ) {
--port->open_count;
- MOD_DEC_USE_COUNT;
return( -EINTR );
}
spin_lock_irqsave( &priv->dp_port_lock, flags );
spin_lock_irqsave( &priv->dp_port_lock, flags );
if( port->open_count > 1 ) {
--port->open_count;
- MOD_DEC_USE_COUNT;
spin_unlock_irqrestore( &priv->dp_port_lock, flags );
return;
} else if( port->open_count <= 0 ) {
priv->dp_write_urb_in_use = 0;
priv->dp_in_close = 0;
--port->open_count;
- MOD_DEC_USE_COUNT;
wake_up_interruptible( &priv->dp_close_wait );
spin_unlock_irqrestore( &priv->dp_port_lock, flags );
priv = serial->port[i].private;
spin_lock_irqsave( &priv->dp_port_lock, flags );
while( serial->port[i].open_count > 0 ) {
- MOD_DEC_USE_COUNT;
--serial->port[i].open_count;
}
spin_unlock_irqrestore( &priv->dp_port_lock, flags );
MODULE_DEVICE_TABLE (usb, id_table);
static struct usb_serial_device_type empeg_device = {
+ owner: THIS_MODULE,
name: "Empeg",
id_table: id_table,
num_interrupt_in: 0,
down (&port->sem);
++port->open_count;
- MOD_INC_USE_COUNT;
if (port->open_count == 1) {
/* Uncomment the following line if you want to see some statistics in your syslog */
/* info ("Bytes In = %d Bytes Out = %d", bytes_in, bytes_out); */
-
- MOD_DEC_USE_COUNT;
}
which share common code */
static struct usb_serial_device_type ftdi_sio_device = {
+ owner: THIS_MODULE,
name: "FTDI SIO",
id_table: id_table_sio,
num_interrupt_in: 0,
down (&port->sem);
- MOD_INC_USE_COUNT;
++port->open_count;
if (port->open_count == 1){
}
up (&port->sem);
- MOD_DEC_USE_COUNT;
} /* ftdi_sio_close */
return -ENODEV;
++port->open_count;
- MOD_INC_USE_COUNT;
if (port->open_count == 1) {
/* force low_latency on so that our tty_push actually forces the data through,
edge_serial = (struct edgeport_serial *)serial->private;
if (edge_serial == NULL) {
port->open_count = 0;
- MOD_DEC_USE_COUNT;
return -ENODEV;
}
if (edge_serial->interrupt_in_buffer == NULL) {
err(__FUNCTION__" - error sending open port command");
edge_port->openPending = FALSE;
port->open_count = 0;
- MOD_DEC_USE_COUNT;
return -ENODEV;
}
dbg(__FUNCTION__" - open timedout");
edge_port->openPending = FALSE;
port->open_count = 0;
- MOD_DEC_USE_COUNT;
return -ENODEV;
}
port->open_count = 0;
}
- MOD_DEC_USE_COUNT;
dbg(__FUNCTION__" exited");
}
MODULE_DEVICE_TABLE (usb, id_table_combined);
static struct usb_serial_device_type edgeport_1port_device = {
+ owner: THIS_MODULE,
name: "Edgeport 1 port adapter",
id_table: edgeport_1port_id_table,
num_interrupt_in: 1,
};
static struct usb_serial_device_type edgeport_2port_device = {
+ owner: THIS_MODULE,
name: "Edgeport 2 port adapter",
id_table: edgeport_2port_id_table,
num_interrupt_in: 1,
};
static struct usb_serial_device_type edgeport_4port_device = {
+ owner: THIS_MODULE,
name: "Edgeport 4 port adapter",
id_table: edgeport_4port_id_table,
num_interrupt_in: 1,
};
static struct usb_serial_device_type edgeport_8port_device = {
+ owner: THIS_MODULE,
name: "Edgeport 8 port adapter",
id_table: edgeport_8port_id_table,
num_interrupt_in: 1,
struct usb_serial_device_type ir_device = {
+ owner: THIS_MODULE,
name: "IR Dongle",
id_table: id_table,
num_interrupt_in: 1,
down (&port->sem);
++port->open_count;
- MOD_INC_USE_COUNT;
if (port->open_count == 1) {
if (buffer_size) {
}
up (&port->sem);
- MOD_DEC_USE_COUNT;
}
static int ir_write (struct usb_serial_port *port, int from_user, const unsigned char *buf, int count)
dbg("keyspan_open called for port%d.\n", port->number);
- MOD_INC_USE_COUNT;
-
down (&port->sem);
already_active = port->open_count;
++port->open_count;
port->tty = 0;
}
up (&port->sem);
-
- MOD_DEC_USE_COUNT;
}
port = &serial->port[i];
while (port->open_count > 0) {
--port->open_count;
- MOD_DEC_USE_COUNT;
}
kfree(port->private);
}
/* Structs for the devices, pre and post renumeration. */
static struct usb_serial_device_type keyspan_usa18x_pre_device = {
+ owner: THIS_MODULE,
name: "Keyspan USA18X - (without firmware)",
id_table: keyspan_usa18x_pre_ids,
num_interrupt_in: NUM_DONT_CARE,
};
static struct usb_serial_device_type keyspan_usa19_pre_device = {
+ owner: THIS_MODULE,
name: "Keyspan USA19 - (without firmware)",
id_table: keyspan_usa19_pre_ids,
num_interrupt_in: NUM_DONT_CARE,
static struct usb_serial_device_type keyspan_usa19w_pre_device = {
+ owner: THIS_MODULE,
name: "Keyspan USA19W - (without firmware)",
id_table: keyspan_usa19w_pre_ids,
num_interrupt_in: NUM_DONT_CARE,
static struct usb_serial_device_type keyspan_usa28_pre_device = {
+ owner: THIS_MODULE,
name: "Keyspan USA28 - (without firmware)",
id_table: keyspan_usa28_pre_ids,
num_interrupt_in: NUM_DONT_CARE,
};
static struct usb_serial_device_type keyspan_usa28x_pre_device = {
+ owner: THIS_MODULE,
name: "Keyspan USA28X - (without firmware)",
id_table: keyspan_usa28x_pre_ids,
num_interrupt_in: NUM_DONT_CARE,
};
static struct usb_serial_device_type keyspan_usa28xa_pre_device = {
+ owner: THIS_MODULE,
name: "Keyspan USA28XA - (without firmware)",
id_table: keyspan_usa28xa_pre_ids,
num_interrupt_in: NUM_DONT_CARE,
};
static struct usb_serial_device_type keyspan_usa28xb_pre_device = {
+ owner: THIS_MODULE,
name: "Keyspan USA28XB - (without firmware)",
id_table: keyspan_usa28xb_pre_ids,
num_interrupt_in: NUM_DONT_CARE,
};
static struct usb_serial_device_type keyspan_usa49w_pre_device = {
+ owner: THIS_MODULE,
name: "Keyspan USA49W - (without firmware)",
id_table: keyspan_usa49w_pre_ids,
num_interrupt_in: NUM_DONT_CARE,
};
static struct usb_serial_device_type keyspan_usa18x_device = {
+ owner: THIS_MODULE,
name: "Keyspan USA18X",
id_table: keyspan_usa18x_ids,
num_interrupt_in: NUM_DONT_CARE,
};
static struct usb_serial_device_type keyspan_usa19_device = {
+ owner: THIS_MODULE,
name: "Keyspan USA19",
id_table: keyspan_usa19_ids,
num_interrupt_in: NUM_DONT_CARE,
static struct usb_serial_device_type keyspan_usa19w_device = {
+ owner: THIS_MODULE,
name: "Keyspan USA19W",
id_table: keyspan_usa19w_ids,
num_interrupt_in: NUM_DONT_CARE,
static struct usb_serial_device_type keyspan_usa28_device = {
+ owner: THIS_MODULE,
name: "Keyspan USA28",
id_table: keyspan_usa28_ids,
num_interrupt_in: NUM_DONT_CARE,
static struct usb_serial_device_type keyspan_usa28x_device = {
+ owner: THIS_MODULE,
name: "Keyspan USA28X/XB",
id_table: keyspan_usa28x_ids,
num_interrupt_in: NUM_DONT_CARE,
};
static struct usb_serial_device_type keyspan_usa28xa_device = {
+ owner: THIS_MODULE,
name: "Keyspan USA28XA",
id_table: keyspan_usa28xa_ids,
num_interrupt_in: NUM_DONT_CARE,
};
static struct usb_serial_device_type keyspan_usa49w_device = {
+ owner: THIS_MODULE,
name: "Keyspan USA49W",
id_table: keyspan_usa49w_ids,
num_interrupt_in: NUM_DONT_CARE,
/* wake up other tty processes */
wake_up_interruptible( &tty->write_wait );
/* For 2.2.16 backport -- wake_up_interruptible( &tty->poll_wait ); */
- MOD_DEC_USE_COUNT;
}
static void keyspan_pda_request_unthrottle( struct usb_serial *serial )
NULL,
0,
2*HZ);
- MOD_DEC_USE_COUNT;
}
tty = serial->port[0].tty;
priv->tx_throttled = 0;
/* queue up a wakeup at scheduler time */
- MOD_INC_USE_COUNT;
- if (schedule_task(&priv->wakeup_task) == 0)
- MOD_DEC_USE_COUNT;
+ schedule_task(&priv->wakeup_task);
break;
default:
break;
if (request_unthrottle) {
priv->tx_throttled = 1; /* block writers */
- MOD_INC_USE_COUNT;
- if (schedule_task(&priv->unthrottle_task) == 0)
- MOD_DEC_USE_COUNT;
+ schedule_task(&priv->unthrottle_task);
}
rc = count;
}
/* queue up a wakeup at scheduler time */
- MOD_INC_USE_COUNT;
- if (schedule_task(&priv->wakeup_task) == 0)
- MOD_DEC_USE_COUNT;
+ schedule_task(&priv->wakeup_task);
}
down (&port->sem);
- MOD_INC_USE_COUNT;
++port->open_count;
if (port->open_count == 1) {
return rc;
error:
--port->open_count;
- MOD_DEC_USE_COUNT;
up (&port->sem);
return rc;
}
}
up (&port->sem);
- MOD_DEC_USE_COUNT;
}
static struct usb_serial_device_type mct_u232_device = {
+ owner: THIS_MODULE,
name: "Magic Control Technology USB-RS232",
id_table: id_table_combined,
num_interrupt_in: 2,
down (&port->sem);
++port->open_count;
- MOD_INC_USE_COUNT;
if (port->open_count == 1) {
/* Compensate for a hardware bug: although the Sitecom U232-P25
}
up (&port->sem);
- MOD_DEC_USE_COUNT;
} /* mct_u232_close */
static struct usb_serial_device_type zyxel_omninet_device = {
+ owner: THIS_MODULE,
name: "ZyXEL - omni.net lcd plus usb",
id_table: id_table,
num_interrupt_in: 1,
down (&port->sem);
- MOD_INC_USE_COUNT;
++port->open_count;
if (port->open_count == 1) {
err(__FUNCTION__"- kmalloc(%Zd) failed.", sizeof(struct omninet_data));
port->open_count = 0;
up (&port->sem);
- MOD_DEC_USE_COUNT;
return -ENOMEM;
}
}
up (&port->sem);
- MOD_DEC_USE_COUNT;
}
/* All of the device info needed for the PL2303 SIO serial converter */
static struct usb_serial_device_type pl2303_device = {
+ owner: THIS_MODULE,
name: "PL-2303",
id_table: id_table,
num_interrupt_in: NUM_DONT_CARE,
down (&port->sem);
++port->open_count;
- MOD_INC_USE_COUNT;
if (port->open_count == 1) {
#define FISH(a,b,c,d) \
}
up (&port->sem);
- MOD_DEC_USE_COUNT;
}
static int set_modem_info (struct usb_serial_port *port, unsigned int cmd, unsigned int *value)
static void whiteheat_real_shutdown (struct usb_serial *serial);
static struct usb_serial_device_type whiteheat_fake_device = {
+ owner: THIS_MODULE,
name: "Connect Tech - WhiteHEAT - (prerenumeration)",
id_table: id_table_prerenumeration,
num_interrupt_in: NUM_DONT_CARE,
};
static struct usb_serial_device_type whiteheat_device = {
+ owner: THIS_MODULE,
name: "Connect Tech - WhiteHEAT",
id_table: id_table_std,
num_interrupt_in: NUM_DONT_CARE,
down (&port->sem);
++port->open_count;
- MOD_INC_USE_COUNT;
if (port->open_count == 1) {
/* set up some stuff for our command port */
error_exit:
--port->open_count;
- MOD_DEC_USE_COUNT;
dbg(__FUNCTION__ " - error_exit");
up (&port->sem);
usb_unlink_urb (port->read_urb);
port->open_count = 0;
}
- MOD_DEC_USE_COUNT;
up (&port->sem);
}
{
struct completion urb_done;
int status;
- devrequest *dr;
+ struct usb_ctrlrequest *dr;
/* allocate the device request structure */
- dr = kmalloc(sizeof(devrequest), GFP_NOIO);
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
if (!dr)
return -ENOMEM;
/* fill in the structure */
- dr->requesttype = requesttype;
- dr->request = request;
- dr->value = cpu_to_le16(value);
- dr->index = cpu_to_le16(index);
- dr->length = cpu_to_le16(size);
+ dr->bRequestType = requesttype;
+ dr->bRequest = request;
+ dr->wValue = cpu_to_le16(value);
+ dr->wIndex = cpu_to_le16(index);
+ dr->wLength = cpu_to_le16(size);
/* set up data structures for the wakeup system */
init_completion(&urb_done);
if (usb_pipetype(urb->pipe) == PIPE_CONTROL && urb->setup_packet) {
urbp->setup_packet_dma_handle = pci_map_single(uhci->dev,
- urb->setup_packet, sizeof(devrequest),
+ urb->setup_packet, sizeof(struct usb_ctrlrequest),
PCI_DMA_TODEVICE);
if (!urbp->setup_packet_dma_handle)
return NULL;
if (urbp->setup_packet_dma_handle)
pci_unmap_single(uhci->dev, urbp->setup_packet_dma_handle,
- sizeof(devrequest), PCI_DMA_TODEVICE);
+ sizeof(struct usb_ctrlrequest), PCI_DMA_TODEVICE);
if (urbp->transfer_buffer_dma_handle)
pci_unmap_single(uhci->dev, urbp->transfer_buffer_dma_handle,
{
struct uhci *uhci = (struct uhci *)urb->dev->bus->hcpriv;
unsigned int pipe = urb->pipe;
- devrequest *cmd = (devrequest *)urb->setup_packet;
+ struct usb_ctrlrequest *cmd = (struct usb_ctrlrequest *)urb->setup_packet;
void *data = urb->transfer_buffer;
int leni = urb->transfer_buffer_length;
int len = 0;
return -EINPROGRESS;
}
- bmRType_bReq = cmd->requesttype | cmd->request << 8;
- wValue = le16_to_cpu(cmd->value);
- wIndex = le16_to_cpu(cmd->index);
- wLength = le16_to_cpu(cmd->length);
+ bmRType_bReq = cmd->bRequestType | cmd->bRequest << 8;
+ wValue = le16_to_cpu(cmd->wValue);
+ wIndex = le16_to_cpu(cmd->wIndex);
+ wLength = le16_to_cpu(cmd->wLength);
for (i = 0; i < 8; i++)
uhci->rh.c_p_r[i] = 0;
if (urbp->setup_packet_dma_handle)
pci_dma_sync_single(uhci->dev, urbp->setup_packet_dma_handle,
- sizeof(devrequest), PCI_DMA_TODEVICE);
+ sizeof(struct usb_ctrlrequest), PCI_DMA_TODEVICE);
urb->dev = NULL;
if (urb->complete)
struct usb_device * usb_dev = urb->dev;
ohci_t * ohci = usb_dev->bus->hcpriv;
unsigned int pipe = urb->pipe;
- devrequest * cmd = (devrequest *) urb->setup_packet;
+ struct usb_ctrlrequest * cmd = (struct usb_ctrlrequest *) urb->setup_packet;
void * data = urb->transfer_buffer;
int leni = urb->transfer_buffer_length;
int len = 0;
return 0;
}
- bmRType_bReq = cmd->requesttype | (cmd->request << 8);
- wValue = le16_to_cpu (cmd->value);
- wIndex = le16_to_cpu (cmd->index);
- wLength = le16_to_cpu (cmd->length);
+ bmRType_bReq = cmd->bRequestType | (cmd->bRequest << 8);
+ wValue = le16_to_cpu (cmd->wValue);
+ wIndex = le16_to_cpu (cmd->wIndex);
+ wLength = le16_to_cpu (cmd->wLength);
switch (bmRType_bReq) {
/* Request Destination:
/* lock to protect the minor_table structure */
static DECLARE_MUTEX (minor_table_mutex);
-/* file operations needed when we register this driver */
+/*
+ * File operations needed when we register this driver.
+ * This assumes that this driver NEEDS file operations,
+ * of course, which means that the driver is expected
+ * to have a node in the /dev directory. If the USB
+ * device were for a network interface then the driver
+ * would use "struct net_driver" instead, and a serial
+ * device would use "struct tty_driver".
+ */
static struct file_operations skel_fops = {
+ /*
+ * The owner field is part of the module-locking
+ * mechanism. The idea is that the kernel knows
+ * which module to increment the use-counter of
+ * BEFORE it calls the device's open() function.
+ * This also means that the kernel can decrement
+ * the use-counter again before calling release()
+ * or should the open() function fail.
+ *
+ * Not all device structures have an "owner" field
+ * yet. "struct file_operations" and "struct net_device"
+ * do, while "struct tty_driver" does not. If the struct
+ * has an "owner" field, then initialize it to the value
+ * THIS_MODULE and the kernel will handle all module
+ * locking for you automatically. Otherwise, you must
+ * increment the use-counter in the open() function
+ * and decrement it again in the release() function
+ * yourself.
+ */
owner: THIS_MODULE,
+
read: skel_read,
write: skel_write,
ioctl: skel_ioctl,
return -ENODEV;
}
- /* increment our usage count for the module */
+ /* Increment our usage count for the module.
+ * This is redundant here, because "struct file_operations"
+ * has an "owner" field. This line is included here soley as
+ * a reference for drivers using lesser structures... ;-)
+ */
MOD_INC_USE_COUNT;
/* lock our minor table and get our local data for this minor */
/* the device was unplugged before the file was released */
up (&dev->sem);
skel_delete (dev);
- MOD_DEC_USE_COUNT;
up (&minor_table_mutex);
+ MOD_DEC_USE_COUNT;
return 0;
}
{
if (urb_priv->setup_packet_dma)
pci_dma_sync_single(s->uhci_pci, urb_priv->setup_packet_dma,
- sizeof(devrequest), PCI_DMA_TODEVICE);
+ sizeof(struct usb_ctrlrequest), PCI_DMA_TODEVICE);
if (urb_priv->transfer_buffer_dma)
pci_dma_sync_single(s->uhci_pci, urb_priv->transfer_buffer_dma,
{
if (urb_priv->setup_packet_dma) {
pci_unmap_single(s->uhci_pci, urb_priv->setup_packet_dma,
- sizeof(devrequest), PCI_DMA_TODEVICE);
+ sizeof(struct usb_ctrlrequest), PCI_DMA_TODEVICE);
urb_priv->setup_packet_dma = 0;
}
if (urb_priv->transfer_buffer_dma) {
if (type == PIPE_CONTROL)
urb_priv->setup_packet_dma = pci_map_single(s->uhci_pci, urb->setup_packet,
- sizeof(devrequest), PCI_DMA_TODEVICE);
+ sizeof(struct usb_ctrlrequest), PCI_DMA_TODEVICE);
if (urb->transfer_buffer_length)
urb_priv->transfer_buffer_dma = pci_map_single(s->uhci_pci,
struct usb_device *usb_dev = urb->dev;
uhci_t *uhci = usb_dev->bus->hcpriv;
unsigned int pipe = urb->pipe;
- devrequest *cmd = (devrequest *) urb->setup_packet;
+ struct usb_ctrlrequest *cmd = (struct usb_ctrlrequest *) urb->setup_packet;
void *data = urb->transfer_buffer;
int leni = urb->transfer_buffer_length;
int len = 0;
}
- bmRType_bReq = cmd->requesttype | cmd->request << 8;
- wValue = le16_to_cpu (cmd->value);
- wIndex = le16_to_cpu (cmd->index);
- wLength = le16_to_cpu (cmd->length);
+ bmRType_bReq = cmd->bRequestType | cmd->bRequest << 8;
+ wValue = le16_to_cpu (cmd->wValue);
+ wIndex = le16_to_cpu (cmd->wIndex);
+ wLength = le16_to_cpu (cmd->wLength);
for (i = 0; i < 8; i++)
uhci->rh.c_p_r[i] = 0;
/*-------------------------------------------------------------------*/
// returns status (negative) or length (positive)
int usb_internal_control_msg(struct usb_device *usb_dev, unsigned int pipe,
- devrequest *cmd, void *data, int len, int timeout)
+ struct usb_ctrlrequest *cmd, void *data, int len, int timeout)
{
urb_t *urb;
int retv;
int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request, __u8 requesttype,
__u16 value, __u16 index, void *data, __u16 size, int timeout)
{
- devrequest *dr = kmalloc(sizeof(devrequest), GFP_KERNEL);
+ struct usb_ctrlrequest *dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
int ret;
if (!dr)
return -ENOMEM;
- dr->requesttype = requesttype;
- dr->request = request;
- dr->value = cpu_to_le16p(&value);
- dr->index = cpu_to_le16p(&index);
- dr->length = cpu_to_le16p(&size);
+ dr->bRequestType= requesttype;
+ dr->bRequest = request;
+ dr->wValue = cpu_to_le16p(&value);
+ dr->wIndex = cpu_to_le16p(&index);
+ dr->wLength = cpu_to_le16p(&size);
//dbg("usb_control_msg");
}
ifp = interface->altsetting + interface->num_altsetting;
+ ifp->endpoint = NULL;
+ ifp->extra = NULL;
+ ifp->extralen = 0;
interface->num_altsetting++;
memcpy(ifp, buffer, USB_DT_INTERFACE_SIZE);
/* Copy any unknown descriptors into a storage area for */
/* drivers to later parse */
len = (int)(buffer - begin);
- if (!len) {
- ifp->extra = NULL;
- ifp->extralen = 0;
- } else {
+ if (len) {
ifp->extra = kmalloc(len, GFP_KERNEL);
if (!ifp->extra) {
unsigned char new[8];
unsigned char old[8];
struct urb irq, led;
- devrequest dr;
+ struct usb_ctrlrequest dr;
unsigned char leds, newleds;
char name[128];
int open;
FILL_INT_URB(&kbd->irq, dev, pipe, kbd->new, maxp > 8 ? 8 : maxp,
usb_kbd_irq, kbd, endpoint->bInterval);
- kbd->dr.requesttype = USB_TYPE_CLASS | USB_RECIP_INTERFACE;
- kbd->dr.request = HID_REQ_SET_REPORT;
- kbd->dr.value = 0x200;
- kbd->dr.index = interface->bInterfaceNumber;
- kbd->dr.length = 1;
+ kbd->dr.bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE;
+ kbd->dr.bRequest = HID_REQ_SET_REPORT;
+ kbd->dr.wValue = 0x200;
+ kbd->dr.wIndex = interface->bInterfaceNumber;
+ kbd->dr.wLength = 1;
kbd->dev.name = kbd->name;
kbd->dev.idbus = BUS_USB;
psinfo.pr_state = i;
psinfo.pr_sname = (i < 0 || i > 5) ? '.' : "RSDZTD"[i];
psinfo.pr_zomb = psinfo.pr_sname == 'Z';
- psinfo.pr_nice = current->nice;
+ psinfo.pr_nice = current->__nice;
psinfo.pr_flag = current->flags;
psinfo.pr_uid = NEW_TO_OLD_UID(current->uid);
psinfo.pr_gid = NEW_TO_OLD_GID(current->gid);
int blkdev_put(struct block_device *bdev, int kind)
{
int ret = 0;
- kdev_t rdev = to_kdev_t(bdev->bd_dev); /* this should become bdev */
struct inode *bd_inode = bdev->bd_inode;
down(&bdev->bd_sem);
if (kind == BDEV_FILE)
__block_fsync(bd_inode);
else if (kind == BDEV_FS)
- fsync_no_super(rdev);
+ fsync_no_super(bdev);
if (!--bdev->bd_openers)
kill_bdev(bdev);
if (bdev->bd_op->release)
return sync_buffers(dev, 1);
}
-int fsync_no_super(kdev_t dev)
+int fsync_no_super(struct block_device *bdev)
{
+ kdev_t dev = to_kdev_t(bdev->bd_dev);
sync_buffers(dev, 0);
return sync_buffers(dev, 1);
}
wakeup_bdflush();
try_to_free_pages(zone, GFP_NOFS, 0);
run_task_queue(&tq_disk);
- current->policy |= SCHED_YIELD;
- __set_current_state(TASK_RUNNING);
- schedule();
+ yield();
}
void init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
* 14.02.92: changed it to sync dirty buffers a bit: better performance
* when the filesystem starts to get full of dirty blocks (I hope).
*/
-struct buffer_head * getblk(kdev_t dev, sector_t block, int size)
+struct buffer_head * __getblk(struct block_device *bdev, sector_t block, int size)
{
+ kdev_t dev = to_kdev_t(bdev->bd_dev);
for (;;) {
struct buffer_head * bh;
* Reads a specified block, and returns buffer head that
* contains it. It returns NULL if the block was unreadable.
*/
-struct buffer_head * bread(kdev_t dev, int block, int size)
+struct buffer_head * __bread(struct block_device *bdev, int block, int size)
{
- struct buffer_head * bh;
+ struct buffer_head * bh = __getblk(bdev, block, size);
- bh = getblk(dev, block, size);
touch_buffer(bh);
if (buffer_uptodate(bh))
return bh;
* amount of time. This is for crash/recovery testing.
*/
-static void make_rdonly(kdev_t dev, int *no_write)
+static void make_rdonly(struct block_device *bdev, int *no_write)
{
- if (kdev_val(dev)) {
+ if (bdev) {
printk(KERN_WARNING "Turning device %s read-only\n",
- bdevname(dev));
- *no_write = 0xdead0000 + kdev_val(dev);
+ bdevname(to_kdev_t(bdev->bd_dev)));
+ *no_write = 0xdead0000 + bdev->bd_dev;
}
}
{
struct super_block *sb = (struct super_block *)arg;
- make_rdonly(sb->s_dev, &journal_no_write[0]);
+ make_rdonly(sb->s_bdev, &journal_no_write[0]);
make_rdonly(EXT3_SB(sb)->s_journal->j_dev, &journal_no_write[1]);
wake_up(&EXT3_SB(sb)->ro_wait_queue);
}
{
struct ext3_sb_info *sbi = EXT3_SB(sb);
struct ext3_super_block *es = sbi->s_es;
- kdev_t j_dev = sbi->s_journal->j_dev;
int i;
journal_destroy(sbi->s_journal);
dump_orphan_list(sb, sbi);
J_ASSERT(list_empty(&sbi->s_orphan));
- invalidate_buffers(sb->s_dev);
- if (!kdev_same(j_dev, sb->s_dev)) {
+ invalidate_bdev(sb->s_bdev, 0);
+ if (sbi->journal_bdev != sb->s_bdev) {
/*
* Invalidate the journal device's buffers. We don't want them
* floating about in memory - the physical journal device may
* hotswapped, and it breaks the `ro-after' testing code.
*/
- fsync_no_super(j_dev);
- invalidate_buffers(j_dev);
+ fsync_no_super(sbi->journal_bdev);
+ invalidate_bdev(sbi->journal_bdev, 0);
ext3_blkdev_remove(sbi);
}
clear_ro_after(sb);
sb->s_id);
if (EXT3_SB(sb)->s_journal->j_inode == NULL) {
printk("external journal on %s\n",
- bdevname(EXT3_SB(sb)->s_journal->j_dev));
+ bdevname(to_kdev_t(EXT3_SB(sb)->s_journal->j_dev->bd_dev)));
} else {
printk("internal journal\n");
}
sb_block = EXT3_MIN_BLOCK_SIZE / blocksize;
offset = EXT3_MIN_BLOCK_SIZE % blocksize;
set_blocksize(j_dev, blocksize);
- if (!(bh = bread(j_dev, sb_block, blocksize))) {
+ if (!(bh = __bread(bdev, sb_block, blocksize))) {
printk(KERN_ERR "EXT3-fs: couldn't read superblock of "
"external journal\n");
goto out_bdev;
start = sb_block + 1;
brelse(bh); /* we're done with the superblock */
- journal = journal_init_dev(j_dev, sb->s_dev,
+ journal = journal_init_dev(bdev, sb->s_bdev,
start, len, blocksize);
if (!journal) {
printk(KERN_ERR "EXT3-fs: failed to create device journal\n");
printk (KERN_NOTICE __FUNCTION__
": ENOMEM at get_unused_buffer_head, "
"trying again.\n");
- current->policy |= SCHED_YIELD;
- schedule();
+ yield();
}
} while (!new_bh);
/* keep subsequent assertions sane */
new_jh->b_transaction = NULL;
new_bh->b_size = jh2bh(jh_in)->b_size;
- new_bh->b_dev = transaction->t_journal->j_dev;
+ new_bh->b_dev = to_kdev_t(transaction->t_journal->j_dev->bd_dev);
new_bh->b_blocknr = blocknr;
new_bh->b_state |= (1 << BH_Mapped) | (1 << BH_Dirty);
printk (KERN_ALERT __FUNCTION__
": journal block not found "
"at offset %lu on %s\n",
- blocknr, bdevname(journal->j_dev));
+ blocknr,
+ bdevname(to_kdev_t(journal->j_dev->bd_dev)));
err = -EIO;
__journal_abort_soft(journal, err);
}
if (err)
return NULL;
- bh = getblk(journal->j_dev, blocknr, journal->j_blocksize);
+ bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
bh->b_state |= (1 << BH_Dirty);
BUFFER_TRACE(bh, "return this buffer");
return journal_add_journal_head(bh);
* must have all data blocks preallocated.
*/
-journal_t * journal_init_dev(kdev_t dev, kdev_t fs_dev,
+journal_t * journal_init_dev(struct block_device *bdev,
+ struct block_device *fs_dev,
int start, int len, int blocksize)
{
journal_t *journal = journal_init_common();
if (!journal)
return NULL;
- journal->j_dev = dev;
+ journal->j_dev = bdev;
journal->j_fs_dev = fs_dev;
journal->j_blk_offset = start;
journal->j_maxlen = len;
journal->j_blocksize = blocksize;
- bh = getblk(journal->j_dev, start, journal->j_blocksize);
+ bh = __getblk(journal->j_dev, start, journal->j_blocksize);
J_ASSERT(bh != NULL);
journal->j_sb_buffer = bh;
journal->j_superblock = (journal_superblock_t *)bh->b_data;
if (!journal)
return NULL;
- journal->j_dev = inode->i_dev;
- journal->j_fs_dev = inode->i_dev;
+ journal->j_dev = journal->j_fs_dev = inode->i_sb->s_bdev;
journal->j_inode = inode;
jbd_debug(1,
"journal %p: inode %s/%ld, size %Ld, bits %d, blksize %ld\n",
return NULL;
}
- bh = getblk(journal->j_dev, blocknr, journal->j_blocksize);
+ bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
J_ASSERT(bh != NULL);
journal->j_sb_buffer = bh;
journal->j_superblock = (journal_superblock_t *)bh->b_data;
err = journal_bmap(journal, i, &blocknr);
if (err)
return err;
- bh = getblk(journal->j_dev, blocknr, journal->j_blocksize);
- wait_on_buffer(bh);
+ bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
+ lock_buffer(bh);
memset (bh->b_data, 0, journal->j_blocksize);
BUFFER_TRACE(bh, "marking dirty");
mark_buffer_dirty(bh);
BUFFER_TRACE(bh, "marking uptodate");
mark_buffer_uptodate(bh, 1);
+ unlock_buffer(bh);
__brelse(bh);
}
- sync_dev(journal->j_dev);
+ fsync_dev(to_kdev_t(journal->j_dev->bd_dev));
jbd_debug(1, "JBD: journal cleared.\n");
/* OK, fill in the initial static fields in the new superblock */
const char * journal_dev_name(journal_t *journal)
{
- kdev_t dev;
+ struct block_device *bdev;
if (journal->j_inode)
- dev = journal->j_inode->i_dev;
+ bdev = journal->j_inode->i_sb->s_bdev;
else
- dev = journal->j_dev;
+ bdev = journal->j_dev;
- return bdevname(dev);
+ return bdevname(to_kdev_t(bdev->bd_dev));
}
/*
last_warning = jiffies;
}
- current->policy |= SCHED_YIELD;
- schedule();
+ yield();
}
}
last_warning = jiffies;
}
while (ret == 0) {
- current->policy |= SCHED_YIELD;
- schedule();
+ yield();
ret = kmem_cache_alloc(journal_head_cache, GFP_NOFS);
}
}
goto failed;
}
- bh = getblk(journal->j_dev, blocknr, journal->j_blocksize);
+ bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
if (!bh) {
err = -ENOMEM;
goto failed;
return err;
}
- bh = getblk(journal->j_dev, blocknr, journal->j_blocksize);
+ bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
if (!bh)
return -ENOMEM;
/* Find a buffer for the new
* data being restored */
- nbh = getblk(journal->j_fs_dev, blocknr,
- journal->j_blocksize);
+ nbh = __getblk(journal->j_fs_dev,
+ blocknr,
+ journal->j_blocksize);
if (nbh == NULL) {
printk(KERN_ERR
"JBD: Out of memory "
goto failed;
}
+ lock_buffer(nbh);
memcpy(nbh->b_data, obh->b_data,
journal->j_blocksize);
if (flags & JFS_FLAG_ESCAPE) {
mark_buffer_uptodate(nbh, 1);
++info->nr_replays;
/* ll_rw_block(WRITE, 1, &nbh); */
+ unlock_buffer(nbh);
brelse(obh);
brelse(nbh);
}
if (!journal_oom_retry)
return -ENOMEM;
jbd_debug(1, "ENOMEM in " __FUNCTION__ ", retrying.\n");
- current->policy |= SCHED_YIELD;
- schedule();
+ yield();
goto repeat;
}
return -EINVAL;
}
- dev = journal->j_fs_dev;
+ dev = to_kdev_t(journal->j_fs_dev->bd_dev);
bh = bh_in;
if (!bh) {
if (handle->h_sync) {
do {
old_handle_count = transaction->t_handle_count;
- set_current_state(TASK_RUNNING);
- current->policy |= SCHED_YIELD;
- schedule();
+ yield();
} while (old_handle_count != transaction->t_handle_count);
}
sprintf(current->comm, "jffs2_gcd_mtd%d", c->mtd->index);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
/* FIXME in the 2.2 backport */
current->nice = 10;
+#endif
for (;;) {
spin_lock_irq(¤t->sigmask_lock);
/* Let the blocked process remove waiter from the
* block list when it gets scheduled.
*/
- current->policy |= SCHED_YIELD;
- schedule();
+ yield();
} else {
/* Remove waiter from the block list, because by the
* time it wakes up blocker won't exist any more.
inode->i_blocks = fattr->du.nfs2.blocks;
inode->i_blksize = fattr->du.nfs2.blocksize;
}
- inode->i_rdev = NODEV;
- if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
- inode->i_rdev = to_kdev_t(fattr->rdev);
/* Update attrtimeo value */
if (!invalid && time_after(jiffies, NFS_ATTRTIMEO_UPDATE(inode)+NFS_ATTRTIMEO(inode))) {
continue;
if (signalled() && (server->flags & NFS_MOUNT_INTR))
return ERR_PTR(-ERESTARTSYS);
- current->policy = SCHED_YIELD;
- schedule();
+ yield();
}
/* Initialize the request struct. Initially, we assume a
/* scale priority and nice values from timeslices to -20..20 */
/* to make it look like a "normal" Unix priority/nice value */
- priority = task->dyn_prio;
- nice = task->nice;
+ priority = task->prio;
+ if (priority >= MAX_RT_PRIO)
+ priority -= MAX_RT_PRIO;
+ else
+ priority = priority-100;
+ nice = task->__nice;
read_lock(&tasklist_lock);
ppid = task->pid ? task->p_opptr->pid : 0;
task->nswap,
task->cnswap,
task->exit_signal,
- task->processor);
+ task->cpu);
if(mm)
mmput(mm);
return res;
a = avenrun[0] + (FIXED_1/200);
b = avenrun[1] + (FIXED_1/200);
c = avenrun[2] + (FIXED_1/200);
- len = sprintf(page,"%d.%02d %d.%02d %d.%02d %d/%d %d\n",
+ len = sprintf(page,"%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
LOAD_INT(a), LOAD_FRAC(a),
LOAD_INT(b), LOAD_FRAC(b),
LOAD_INT(c), LOAD_FRAC(c),
- nr_running, nr_threads, last_pid);
+ nr_running(), nr_threads, last_pid);
return proc_calc_metrics(page, start, off, count, eof, len);
}
int len;
uptime = jiffies;
- idle = init_tasks[0]->times.tms_utime + init_tasks[0]->times.tms_stime;
+ idle = init_task.times.tms_utime + init_task.times.tms_stime;
/* The formula for the fraction parts really is ((t * 100) / HZ) % 100, but
that would overflow about every five days at HZ == 100.
}
len += sprintf(page + len,
- "\nctxt %u\n"
+ "\nctxt %lu\n"
"btime %lu\n"
"processes %lu\n",
- kstat.context_swtch,
+ nr_context_switches(),
xtime.tv_sec - jif / HZ,
total_forks);
buffer_journal_dirty(bh) ? ' ' : '!');
}
run_task_queue(&tq_disk);
- current->policy |= SCHED_YIELD;
- /* current->dyn_prio = 0; */
- schedule();
+ yield();
}
if (repeat_counter > 30000000) {
reiserfs_warning("vs-3051: done waiting, ignore vs-3050 messages for (%b)\n", bh) ;
struct buffer_head * reiserfs_bread (struct super_block *super, int n_block)
{
struct buffer_head *result;
- PROC_EXP( unsigned int ctx_switches = kstat.context_swtch );
+ PROC_EXP( unsigned int ctx_switches = nr_context_switches() );
result = sb_bread(super, n_block);
PROC_INFO_INC( super, breads );
- PROC_EXP( if( kstat.context_swtch != ctx_switches )
+ PROC_EXP( if( nr_context_switches() != ctx_switches )
PROC_INFO_INC( super, bread_miss ) );
return result;
}
if ( ! (++repeat_counter % 10000) )
printk("get_new_buffer(%u): counter(%d) too big", current->pid, repeat_counter);
#endif
-
- current->time_slice = 0;
- schedule();
+ yield();
}
#ifdef CONFIG_REISERFS_CHECK
}
bn = allocate_bitmap_node(p_s_sb) ;
if (!bn) {
- current->policy |= SCHED_YIELD ;
- schedule() ;
+ yield();
goto repeat ;
}
return bn ;
#if 0
// FIXME: do we need this? shouldn't we simply continue?
run_task_queue(&tq_disk);
- current->policy |= SCHED_YIELD;
- /*current->time_slice = 0;*/
- schedule();
+ yield();
#endif
continue;
}
#endif
run_task_queue(&tq_disk);
- current->policy |= SCHED_YIELD;
- schedule();
+ yield();
}
/* This loop can be optimized. */
} while ( (*p_n_removed < n_unfm_number || need_research) &&
if (IS_SYNC(inode) && (inode->i_state & I_DIRTY))
ufs_sync_inode (inode);
run_task_queue(&tq_disk);
- current->policy |= SCHED_YIELD;
- schedule ();
-
-
+ yield();
}
offset = inode->i_size & uspi->s_fshift;
if (offset) {
:"=m" (ADDR)
:"Ir" (nr));
}
+
+static __inline__ void __clear_bit(int nr, volatile void * addr)
+{
+ __asm__ __volatile__(
+ "btrl %1,%0"
+ :"=m" (ADDR)
+ :"Ir" (nr));
+}
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
#include <asm/atomic.h>
#include <asm/pgalloc.h>
+/*
+ * Every architecture must define this function. It's the fastest
+ * way of searching a 168-bit bitmap where the first 128 bits are
+ * unlikely to be set. It's guaranteed that at least one of the 168
+ * bits is cleared.
+ */
+#if MAX_RT_PRIO != 128 || MAX_PRIO != 168
+# error update this function.
+#endif
+
+static inline int sched_find_first_zero_bit(char *bitmap)
+{
+ unsigned int *b = (unsigned int *)bitmap;
+ unsigned int rt;
+
+ rt = b[0] & b[1] & b[2] & b[3];
+ if (unlikely(rt != 0xffffffff))
+ return find_first_zero_bit(bitmap, MAX_RT_PRIO);
+
+ if (b[4] != ~0)
+ return ffz(b[4]) + MAX_RT_PRIO;
+ return ffz(b[5]) + 32 + MAX_RT_PRIO;
+}
/*
* possibly do the LDT unload here?
*/
{
struct mm_struct *active_mm;
int state;
+ char __cacheline_padding[24];
};
extern struct tlb_state cpu_tlbstate[NR_CPUS];
extern void smp_flush_tlb(void);
extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
extern void smp_send_reschedule(int cpu);
+extern void smp_send_reschedule_all(void);
extern void smp_invalidate_rcv(void); /* Process an NMI */
extern void (*mtrr_hook) (void);
extern void zap_low_mappings (void);
* so this is correct in the x86 case.
*/
-#define smp_processor_id() (current->processor)
+#define smp_processor_id() (current->cpu)
static __inline int hard_smp_processor_id(void)
{
#define NO_PROC_ID 0xFF /* No processor magic marker */
-/*
- * This magic constant controls our willingness to transfer
- * a process across CPUs. Such a transfer incurs misses on the L1
- * cache, and on a P6 or P5 with multiple L2 caches L2 hits. My
- * gut feeling is this will vary by board in value. For a board
- * with separate L2 cache it probably depends also on the RSS, and
- * for a board with shared L2 cache it ought to decay fast as other
- * processes are run.
- */
-
-#define PROC_CHANGE_PENALTY 15 /* Schedule penalty */
-
#endif
#endif
/*
* Release global kernel lock and global interrupt lock
*/
-#define release_kernel_lock(task, cpu) \
-do { \
- if (task->lock_depth >= 0) \
- spin_unlock(&kernel_flag); \
- release_irqlock(cpu); \
- __sti(); \
+#define release_kernel_lock(task, cpu) \
+do { \
+ if (unlikely(task->lock_depth >= 0)) { \
+ spin_unlock(&kernel_flag); \
+ release_irqlock(cpu); \
+ __sti(); \
+ } \
} while (0)
/*
* Re-acquire the kernel lock
*/
-#define reacquire_kernel_lock(task) \
-do { \
- if (task->lock_depth >= 0) \
- spin_lock(&kernel_flag); \
+#define reacquire_kernel_lock(task) \
+do { \
+ if (unlikely(task->lock_depth >= 0)) \
+ spin_lock(&kernel_flag); \
} while (0)
extern void sync_dev(kdev_t);
extern int fsync_dev(kdev_t);
extern int fsync_super(struct super_block *);
-extern int fsync_no_super(kdev_t);
+extern int fsync_no_super(struct block_device *);
extern void sync_inodes_sb(struct super_block *);
extern int osync_inode_buffers(struct inode *);
extern int osync_inode_data_buffers(struct inode *);
extern struct file * get_empty_filp(void);
extern void file_move(struct file *f, struct list_head *list);
extern struct buffer_head * get_hash_table(kdev_t, sector_t, int);
-extern struct buffer_head * getblk(kdev_t, sector_t, int);
+extern struct buffer_head * __getblk(struct block_device *, sector_t, int);
+static inline struct buffer_head * getblk(kdev_t dev, sector_t block, int size)
+{
+ struct block_device *bdev;
+ struct buffer_head *bh;
+ bdev = bdget(kdev_t_to_nr(dev));
+ if (!bdev) {
+ printk("No block device for %s\n", bdevname(dev));
+ BUG();
+ }
+ bh = __getblk(bdev, block, size);
+ atomic_dec(&bdev->bd_count);
+ return bh;
+}
extern void ll_rw_block(int, int, struct buffer_head * bh[]);
extern int submit_bh(int, struct buffer_head *);
struct bio;
extern int set_blocksize(kdev_t, int);
extern int sb_set_blocksize(struct super_block *, int);
extern int sb_min_blocksize(struct super_block *, int);
-extern struct buffer_head * bread(kdev_t, int, int);
+extern struct buffer_head * __bread(struct block_device *, int, int);
+static inline struct buffer_head * bread(kdev_t dev, int block, int size)
+{
+ struct block_device *bdev;
+ struct buffer_head *bh;
+ bdev = bdget(kdev_t_to_nr(dev));
+ if (!bdev) {
+ printk("No block device for %s\n", bdevname(dev));
+ BUG();
+ }
+ bh = __bread(bdev, block, size);
+ atomic_dec(&bdev->bd_count);
+ return bh;
+}
static inline struct buffer_head * sb_bread(struct super_block *sb, int block)
{
- return bread(sb->s_dev, block, sb->s_blocksize);
+ return __bread(sb->s_bdev, block, sb->s_blocksize);
}
static inline struct buffer_head * sb_getblk(struct super_block *sb, int block)
{
- return getblk(sb->s_dev, block, sb->s_blocksize);
+ return __getblk(sb->s_bdev, block, sb->s_blocksize);
}
static inline struct buffer_head * sb_get_hash_table(struct super_block *sb, int block)
{
/* Device, blocksize and starting block offset for the location
* where we store the journal. */
- kdev_t j_dev;
+ struct block_device * j_dev;
int j_blocksize;
unsigned int j_blk_offset;
/* Device which holds the client fs. For internal journal this
* will be equal to j_dev. */
- kdev_t j_fs_dev;
+ struct block_device * j_fs_dev;
/* Total maximum capacity of the journal region on disk. */
unsigned int j_maxlen;
extern void journal_lock_updates (journal_t *);
extern void journal_unlock_updates (journal_t *);
-extern journal_t * journal_init_dev(kdev_t dev, kdev_t fs_dev,
+extern journal_t * journal_init_dev(struct block_device *bdev,
+ struct block_device *fs_dev,
int start, int len, int bsize);
extern journal_t * journal_init_inode (struct inode *);
extern int journal_update_format (journal_t *);
unsigned int ipackets, opackets;
unsigned int ierrors, oerrors;
unsigned int collisions;
- unsigned int context_swtch;
};
extern struct kernel_stat kstat;
+extern unsigned long nr_context_switches(void);
+
#if !defined(CONFIG_ARCH_S390)
/*
* Number of interrupts per specific IRQ source, since bootup
struct list_head *next, *prev;
};
+typedef struct list_head list_t;
+
#define LIST_HEAD_INIT(name) { &(name), &(name) }
#define LIST_HEAD(name) \
#define CT_TO_SECS(x) ((x) / HZ)
#define CT_TO_USECS(x) (((x) % HZ) * 1000000/HZ)
-extern int nr_running, nr_threads;
+extern int nr_threads;
extern int last_pid;
+extern unsigned long nr_running(void);
#include <linux/fs.h>
#include <linux/time.h>
#define SCHED_FIFO 1
#define SCHED_RR 2
-/*
- * This is an additional bit set when we want to
- * yield the CPU for one re-schedule..
- */
-#define SCHED_YIELD 0x10
-
struct sched_param {
int sched_priority;
};
* a separate lock).
*/
extern rwlock_t tasklist_lock;
-extern spinlock_t runqueue_lock;
extern spinlock_t mmlist_lock;
extern void sched_init(void);
extern void update_one_process(struct task_struct *p, unsigned long user,
unsigned long system, int cpu);
extern void expire_task(struct task_struct *p);
+extern void idle_tick(void);
#define MAX_SCHEDULE_TIMEOUT LONG_MAX
extern signed long FASTCALL(schedule_timeout(signed long timeout));
extern struct user_struct root_user;
#define INIT_USER (&root_user)
+typedef struct task_struct task_t;
+typedef struct prio_array prio_array_t;
+
struct task_struct {
/*
* offsets of these are hardcoded elsewhere - touch with care
int lock_depth; /* Lock depth */
-/*
- * offset 32 begins here on 32-bit platforms. We keep
- * all fields in a single cacheline that are needed for
- * the goodness() loop in schedule().
- */
- unsigned long dyn_prio;
- long nice;
- unsigned long policy;
- struct mm_struct *mm;
- int processor;
/*
- * cpus_runnable is ~0 if the process is not running on any
- * CPU. It's (1 << cpu) if it's running on a CPU. This mask
- * is updated under the runqueue lock.
- *
- * To determine whether a process might run on a CPU, this
- * mask is AND-ed with cpus_allowed.
+ * offset 32 begins here on 32-bit platforms.
*/
- unsigned long cpus_runnable, cpus_allowed;
- /*
- * (only the 'next' pointer fits into the cacheline, but
- * that's just fine.)
- */
- struct list_head run_list;
- long time_slice;
- /* recalculation loop checkpoint */
- unsigned long rcl_last;
+ unsigned int cpu;
+ int prio;
+ long __nice;
+ list_t run_list;
+ prio_array_t *array;
+
+ unsigned int time_slice;
+ unsigned long sleep_timestamp, run_timestamp;
+
+ #define SLEEP_HIST_SIZE 4
+ int sleep_hist[SLEEP_HIST_SIZE];
+ int sleep_idx;
+
+ unsigned long policy;
+ unsigned long cpus_allowed;
struct task_struct *next_task, *prev_task;
- struct mm_struct *active_mm;
+
+ struct mm_struct *mm, *active_mm;
struct list_head local_pages;
+
unsigned int allocation_order, nr_local_pages;
/* task state */
*/
#define _STK_LIM (8*1024*1024)
-#define MAX_DYNPRIO 40
-#define DEF_TSLICE (6 * HZ / 100)
-#define MAX_TSLICE (20 * HZ / 100)
-#define DEF_NICE (0)
+/*
+ * RT priorites go from 0 to 99, but internally we max
+ * them out at 128 to make it easier to search the
+ * scheduler bitmap.
+ */
+#define MAX_RT_PRIO 128
+/*
+ * The lower the priority of a process, the more likely it is
+ * to run. Priority of a process goes from 0 to 167. The 0-99
+ * priority range is allocated to RT tasks, the 128-167 range
+ * is for SCHED_OTHER tasks.
+ */
+#define MAX_PRIO (MAX_RT_PRIO+40)
+#define DEF_USER_NICE 0
+
+/*
+ * Scales user-nice values [ -20 ... 0 ... 19 ]
+ * to static priority [ 24 ... 63 (MAX_PRIO-1) ]
+ *
+ * User-nice value of -20 == static priority 24, and
+ * user-nice value 19 == static priority 63. The lower
+ * the priority value, the higher the task's priority.
+ *
+ * Note that while static priority cannot go below 24,
+ * the priority of a process can go as low as 0.
+ */
+#define NICE_TO_PRIO(n) (MAX_PRIO-1 + (n) - 19)
+
+#define DEF_PRIO NICE_TO_PRIO(DEF_USER_NICE)
+/*
+ * Default timeslice is 90 msecs, maximum is 150 msecs.
+ * Minimum timeslice is 20 msecs.
+ */
+#define MIN_TIMESLICE ( 20 * HZ / 1000)
+#define MAX_TIMESLICE (150 * HZ / 1000)
+
+#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
+#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
+
+/*
+ * PRIO_TO_TIMESLICE scales priority values [ 100 ... 139 ]
+ * to initial time slice values [ MAX_TIMESLICE (150 msec) ... 2 ]
+ *
+ * The higher a process's priority, the bigger timeslices
+ * it gets during one round of execution. But even the lowest
+ * priority process gets MIN_TIMESLICE worth of execution time.
+ */
+#define PRIO_TO_TIMESLICE(p) \
+ ((( (MAX_USER_PRIO-1-USER_PRIO(p))*(MAX_TIMESLICE-MIN_TIMESLICE) + \
+ MAX_USER_PRIO-1) / MAX_USER_PRIO) + MIN_TIMESLICE)
+
+#define RT_PRIO_TO_TIMESLICE(p) \
+ ((( (MAX_RT_PRIO-(p)-1)*(MAX_TIMESLICE-MIN_TIMESLICE) + \
+ MAX_RT_PRIO-1) / MAX_RT_PRIO) + MIN_TIMESLICE)
+
+extern void set_cpus_allowed(task_t *p, unsigned long new_mask);
+extern void set_user_nice(task_t *p, long nice);
+asmlinkage long sys_sched_yield(void);
+#define yield() sys_sched_yield()
/*
* The default (Linux) execution domain.
addr_limit: KERNEL_DS, \
exec_domain: &default_exec_domain, \
lock_depth: -1, \
- dyn_prio: 0, \
- nice: DEF_NICE, \
+ __nice: DEF_USER_NICE, \
policy: SCHED_OTHER, \
+ cpus_allowed: -1, \
mm: NULL, \
active_mm: &init_mm, \
- cpus_runnable: -1, \
- cpus_allowed: -1, \
- run_list: { NULL, NULL }, \
- rcl_last: 0, \
- time_slice: DEF_TSLICE, \
+ run_list: LIST_HEAD_INIT(tsk.run_list), \
+ time_slice: PRIO_TO_TIMESLICE(DEF_PRIO), \
next_task: &tsk, \
prev_task: &tsk, \
p_opptr: &tsk, \
return p;
}
-#define task_has_cpu(tsk) ((tsk)->cpus_runnable != ~0UL)
-
-static inline void task_set_cpu(struct task_struct *tsk, unsigned int cpu)
-{
- tsk->processor = cpu;
- tsk->cpus_runnable = 1UL << cpu;
-}
-
-static inline void task_release_cpu(struct task_struct *tsk)
-{
- tsk->cpus_runnable = ~0UL;
-}
-
/* per-UID process charging. */
extern struct user_struct * alloc_uid(uid_t);
extern void free_uid(struct user_struct *);
extern long FASTCALL(interruptible_sleep_on_timeout(wait_queue_head_t *q,
signed long timeout));
extern int FASTCALL(wake_up_process(struct task_struct * tsk));
+extern void FASTCALL(wake_up_forked_process(struct task_struct * tsk));
#define wake_up(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1)
#define wake_up_nr(x, nr) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr)
extern void reparent_to_init(void);
extern void daemonize(void);
+extern task_t *child_reaper;
extern int do_execve(char *, char **, char **, struct pt_regs *);
extern int do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long);
extern void FASTCALL(add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait));
extern void FASTCALL(remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait));
+extern void wait_task_inactive(task_t * p);
+extern void kick_if_running(task_t * p);
+
#define __wait_event(wq, condition) \
do { \
wait_queue_t __wait; \
#define next_thread(p) \
list_entry((p)->thread_group.next, struct task_struct, thread_group)
-static inline void del_from_runqueue(struct task_struct * p)
-{
- nr_running--;
- list_del(&p->run_list);
- p->run_list.next = NULL;
-}
-
-static inline int task_on_runqueue(struct task_struct *p)
-{
- return (p->run_list.next != NULL);
-}
-
static inline void unhash_process(struct task_struct *p)
{
- if (task_on_runqueue(p)) BUG();
write_lock_irq(&tasklist_lock);
nr_threads--;
unhash_pid(p);
#define cpu_number_map(cpu) 0
#define smp_call_function(func,info,retry,wait) ({ 0; })
#define cpu_online_map 1
+static inline void smp_send_reschedule(int cpu) { }
+static inline void smp_send_reschedule_all(void) { }
#endif
#endif
mdelay(ms);
}
-typedef struct {
- __u8 requesttype;
- __u8 request;
- __u16 value;
- __u16 index;
- __u16 length;
-} devrequest __attribute__ ((packed));
+/**
+ * struct usb_ctrlrequest - structure used to make USB device control requests easier to create and decode
+ * @bRequestType: matches the USB bmRequestType field
+ * @bRequest: matches the USB bRequest field
+ * @wValue: matches the USB wValue field
+ * @wIndex: matches the USB wIndex field
+ * @wLength: matches the USB wLength field
+ *
+ * This structure is used to send control requests to a USB device. It matches
+ * the different fields of the USB 2.0 Spec section 9.3, table 9-2. See the
+ * USB spec for a fuller description of the different fields, and what they are
+ * used for.
+ */
+struct usb_ctrlrequest {
+ __u8 bRequestType;
+ __u8 bRequest;
+ __u16 wValue;
+ __u16 wIndex;
+ __u16 wLength;
+} __attribute__ ((packed));
/*
* USB device number allocation bitmap. There's one bitmap
/* usbdevfs ioctl codes */
struct usbdevfs_ctrltransfer {
- __u8 requesttype;
- __u8 request;
- __u16 value;
- __u16 index;
- __u16 length;
+ __u8 bRequestType;
+ __u8 bRequest;
+ __u16 wValue;
+ __u16 wIndex;
+ __u16 wLength;
__u32 timeout; /* in milliseconds */
void *data;
};
struct hci_usb {
struct usb_device *udev;
- devrequest dev_req;
+ struct usb_ctrlrequest dev_req;
struct urb *ctrl_urb;
struct urb *intr_urb;
struct urb *read_urb;
pid = kernel_thread(do_linuxrc, "/linuxrc", SIGCHLD);
if (pid > 0) {
- while (pid != wait(&i)) {
- current->policy |= SCHED_YIELD;
- schedule();
- }
+ while (pid != wait(&i))
+ yield();
}
sys_mount("..", ".", NULL, MS_MOVE, NULL);
/* Get other processors into their bootup holding patterns. */
smp_boot_cpus();
wait_init_idle = cpu_online_map;
- clear_bit(current->processor, &wait_init_idle); /* Don't wait on me! */
smp_threads_ready=1;
smp_commence();
-
- /* Wait for the other cpus to set up their idle processes */
- printk("Waiting on wait_init_idle (map = 0x%lx)\n", wait_init_idle);
- while (wait_init_idle) {
- cpu_relax();
- barrier();
- }
- printk("All processors have done init_idle\n");
}
#endif
{
kernel_thread(init, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGNAL);
unlock_kernel();
- current->need_resched = 1;
+ init_idle(); /* This will also wait for all other CPUs */
cpu_idle();
}
#include <linux/mm.h>
#include <asm/uaccess.h>
+unsigned securebits = SECUREBITS_DEFAULT; /* systemwide security settings */
+
kernel_cap_t cap_bset = CAP_INIT_EFF_SET;
/* Note: never hold tasklist_lock while spinning for this one */
static void release_task(struct task_struct * p)
{
- if (p != current) {
+ unsigned long flags;
+
+ if (p == current)
+ BUG();
#ifdef CONFIG_SMP
- /*
- * Wait to make sure the process isn't on the
- * runqueue (active on some other CPU still)
- */
- for (;;) {
- task_lock(p);
- if (!task_has_cpu(p))
- break;
- task_unlock(p);
- do {
- cpu_relax();
- barrier();
- } while (task_has_cpu(p));
- }
- task_unlock(p);
+ wait_task_inactive(p);
#endif
- atomic_dec(&p->user->processes);
- free_uid(p->user);
- unhash_process(p);
-
- release_thread(p);
- current->cmin_flt += p->min_flt + p->cmin_flt;
- current->cmaj_flt += p->maj_flt + p->cmaj_flt;
- current->cnswap += p->nswap + p->cnswap;
- /*
- * Potentially available timeslices are retrieved
- * here - this way the parent does not get penalized
- * for creating too many processes.
- *
- * (this cannot be used to artificially 'generate'
- * timeslices, because any timeslice recovered here
- * was given away by the parent in the first place.)
- */
- current->time_slice += p->time_slice;
- if (current->time_slice > MAX_TSLICE)
- current->time_slice = MAX_TSLICE;
- p->pid = 0;
- free_task_struct(p);
- } else {
- printk("task releasing itself\n");
- }
+ atomic_dec(&p->user->processes);
+ free_uid(p->user);
+ unhash_process(p);
+
+ release_thread(p);
+ current->cmin_flt += p->min_flt + p->cmin_flt;
+ current->cmaj_flt += p->maj_flt + p->cmaj_flt;
+ current->cnswap += p->nswap + p->cnswap;
+ /*
+ * Potentially available timeslices are retrieved
+ * here - this way the parent does not get penalized
+ * for creating too many processes.
+ *
+ * (this cannot be used to artificially 'generate'
+ * timeslices, because any timeslice recovered here
+ * was given away by the parent in the first place.)
+ */
+ __save_flags(flags);
+ __cli();
+ current->time_slice += p->time_slice;
+ if (current->time_slice > MAX_TIMESLICE)
+ current->time_slice = MAX_TIMESLICE;
+ __restore_flags(flags);
+
+ p->pid = 0;
+ free_task_struct(p);
}
/*
return retval;
}
+/**
+ * reparent_to_init() - Reparent the calling kernel thread to the init task.
+ *
+ * If a kernel thread is launched as a result of a system call, or if
+ * it ever exits, it should generally reparent itself to init so that
+ * it is correctly cleaned up on exit.
+ *
+ * The various task state such as scheduling policy and priority may have
+ * been inherited from a user process, so we reset them to sane values here.
+ *
+ * NOTE that reparent_to_init() gives the caller full capabilities.
+ */
+void reparent_to_init(void)
+{
+ write_lock_irq(&tasklist_lock);
+
+ /* Reparent to init */
+ REMOVE_LINKS(current);
+ current->p_pptr = child_reaper;
+ current->p_opptr = child_reaper;
+ SET_LINKS(current);
+
+ /* Set the exit signal to SIGCHLD so we signal init on exit */
+ current->exit_signal = SIGCHLD;
+
+ current->ptrace = 0;
+ if ((current->policy == SCHED_OTHER) &&
+ (current->__nice < DEF_USER_NICE))
+ set_user_nice(current, DEF_USER_NICE);
+ /* cpus_allowed? */
+ /* rt_priority? */
+ /* signals? */
+ current->cap_effective = CAP_INIT_EFF_SET;
+ current->cap_inheritable = CAP_INIT_INH_SET;
+ current->cap_permitted = CAP_FULL_SET;
+ current->keep_capabilities = 0;
+ memcpy(current->rlim, init_task.rlim, sizeof(*(current->rlim)));
+ current->user = INIT_USER;
+
+ write_unlock_irq(&tasklist_lock);
+}
+
+/*
+ * Put all the gunge required to become a kernel thread without
+ * attached user resources in one place where it belongs.
+ */
+
+void daemonize(void)
+{
+ struct fs_struct *fs;
+
+
+ /*
+ * If we were started as result of loading a module, close all of the
+ * user space pages. We don't need them, and if we didn't close them
+ * they would be locked into memory.
+ */
+ exit_mm(current);
+
+ current->session = 1;
+ current->pgrp = 1;
+ current->tty = NULL;
+
+ /* Become as one with the init task */
+
+ exit_fs(current); /* current->fs->count--; */
+ fs = init_task.fs;
+ current->fs = fs;
+ atomic_inc(&fs->count);
+ exit_files(current);
+ current->files = init_task.files;
+ atomic_inc(¤t->files->count);
+}
+
/*
* When we die, we re-parent all our children.
* Try to give them to another thread in our process
/* The idle threads do not count.. */
int nr_threads;
-int nr_running;
int max_threads;
unsigned long total_forks; /* Handle normal Linux uptimes. */
struct task_struct *pidhash[PIDHASH_SZ];
+rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED; /* outer */
+
void add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
{
unsigned long flags;
struct pt_regs *regs, unsigned long stack_size)
{
int retval;
+ unsigned long flags;
struct task_struct *p;
struct completion vfork;
copy_flags(clone_flags, p);
p->pid = get_pid(clone_flags);
- p->run_list.next = NULL;
- p->run_list.prev = NULL;
+ INIT_LIST_HEAD(&p->run_list);
p->p_cptr = NULL;
init_waitqueue_head(&p->wait_chldexit);
#ifdef CONFIG_SMP
{
int i;
- p->cpus_runnable = ~0UL;
- p->processor = current->processor;
+
+ p->cpu = smp_processor_id();
+
/* ?? should we just memset this ?? */
for(i = 0; i < smp_num_cpus; i++)
p->per_cpu_utime[i] = p->per_cpu_stime[i] = 0;
spin_lock_init(&p->sigmask_lock);
}
#endif
+ p->array = NULL;
p->lock_depth = -1; /* -1 = no lock */
p->start_time = jiffies;
p->pdeath_signal = 0;
/*
- * "share" dynamic priority between parent and child, thus the
- * total amount of dynamic priorities in the system doesnt change,
- * more scheduling fairness. This is only important in the first
- * timeslice, on the long run the scheduling behaviour is unchanged.
+ * Share the timeslice between parent and child, thus the
+ * total amount of pending timeslices in the system doesnt change,
+ * resulting in more scheduling fairness.
*/
+ __save_flags(flags);
+ __cli();
p->time_slice = (current->time_slice + 1) >> 1;
current->time_slice >>= 1;
- if (!current->time_slice)
- current->need_resched = 1;
+ if (!current->time_slice) {
+ /*
+ * This case is rare, it happens when the parent has only
+ * a single jiffy left from its timeslice. Taking the
+ * runqueue lock is not a problem.
+ */
+ current->time_slice = 1;
+ expire_task(current);
+ }
+ p->sleep_timestamp = p->run_timestamp = jiffies;
+ memset(p->sleep_hist, 0, sizeof(p->sleep_hist[0])*SLEEP_HIST_SIZE);
+ p->sleep_idx = 0;
+ __restore_flags(flags);
/*
* Ok, add it to the run-queues and make it
if (p->ptrace & PT_PTRACED)
send_sig(SIGSTOP, p, 1);
- wake_up_process(p); /* do this last */
+#define RUN_CHILD_FIRST 1
+#if RUN_CHILD_FIRST
+ wake_up_forked_process(p); /* do this last */
+#else
+ wake_up_process(p); /* do this last */
+#endif
++total_forks;
if (clone_flags & CLONE_VFORK)
wait_for_completion(&vfork);
+#if RUN_CHILD_FIRST
+ else
+ /*
+ * Let the child process run first, to avoid most of the
+ * COW overhead when the child exec()s afterwards.
+ */
+ current->need_resched = 1;
+#endif
fork_out:
return retval;
EXPORT_SYMBOL(set_blocksize);
EXPORT_SYMBOL(sb_set_blocksize);
EXPORT_SYMBOL(sb_min_blocksize);
-EXPORT_SYMBOL(getblk);
+EXPORT_SYMBOL(__getblk);
EXPORT_SYMBOL(cdget);
EXPORT_SYMBOL(cdput);
EXPORT_SYMBOL(bdget);
EXPORT_SYMBOL(bdput);
-EXPORT_SYMBOL(bread);
+EXPORT_SYMBOL(__bread);
EXPORT_SYMBOL(__brelse);
EXPORT_SYMBOL(__bforget);
EXPORT_SYMBOL(ll_rw_block);
EXPORT_SYMBOL(interruptible_sleep_on_timeout);
EXPORT_SYMBOL(schedule);
EXPORT_SYMBOL(schedule_timeout);
+EXPORT_SYMBOL(sys_sched_yield);
+EXPORT_SYMBOL(set_user_nice);
EXPORT_SYMBOL(jiffies);
EXPORT_SYMBOL(xtime);
EXPORT_SYMBOL(do_gettimeofday);
EXPORT_SYMBOL(kstat);
EXPORT_SYMBOL(nr_running);
+EXPORT_SYMBOL(nr_context_switches);
/* misc */
EXPORT_SYMBOL(panic);
#include <linux/module.h>
#include <linux/interrupt.h> /* For in_interrupt() */
#include <linux/config.h>
+#include <linux/delay.h>
#include <asm/uaccess.h>
if (child->state != TASK_STOPPED)
return -ESRCH;
#ifdef CONFIG_SMP
- /* Make sure the child gets off its CPU.. */
- for (;;) {
- task_lock(child);
- if (!task_has_cpu(child))
- break;
- task_unlock(child);
- do {
- if (child->state != TASK_STOPPED)
- return -ESRCH;
- barrier();
- cpu_relax();
- } while (task_has_cpu(child));
- }
- task_unlock(child);
+ wait_task_inactive(child);
#endif
}
* 1998-12-28 Implemented better SMP scheduling by Ingo Molnar
*/
-/*
- * 'sched.c' is the main kernel file. It contains scheduling primitives
- * (sleep_on, wakeup, schedule etc) as well as a number of simple system
- * call functions (type getpid()), which just extract a field from
- * current-task
- */
-
-#include <linux/config.h>
#include <linux/mm.h>
+#include <linux/nmi.h>
#include <linux/init.h>
+#include <asm/uaccess.h>
#include <linux/smp_lock.h>
-#include <linux/nmi.h>
#include <linux/interrupt.h>
-#include <linux/kernel_stat.h>
-#include <linux/completion.h>
-#include <linux/prefetch.h>
-#include <linux/compiler.h>
-
-#include <asm/uaccess.h>
#include <asm/mmu_context.h>
-extern void timer_bh(void);
-extern void tqueue_bh(void);
-extern void immediate_bh(void);
-
-/*
- * scheduler variables
- */
+#define BITMAP_SIZE ((MAX_PRIO+7)/8)
-unsigned securebits = SECUREBITS_DEFAULT; /* systemwide security settings */
+typedef struct runqueue runqueue_t;
-extern void mem_use(void);
+struct prio_array {
+ int nr_active;
+ spinlock_t *lock;
+ runqueue_t *rq;
+ char bitmap[BITMAP_SIZE];
+ list_t queue[MAX_PRIO];
+};
/*
- * Scheduling quanta.
+ * This is the main, per-CPU runqueue data structure.
*
- * NOTE! The unix "nice" value influences how long a process
- * gets. The nice value ranges from -20 to +19, where a -20
- * is a "high-priority" task, and a "+10" is a low-priority
- * task. The default time slice for zero-nice tasks will be 37ms.
- */
-#define NICE_RANGE 40
-#define MIN_NICE_TSLICE 10000
-#define MAX_NICE_TSLICE 90000
-#define TASK_TIMESLICE(p) ((int) ts_table[19 - (p)->nice])
-
-static unsigned char ts_table[NICE_RANGE];
-
-#define MM_AFFINITY_BONUS 1
-
-/*
- * Init task must be ok at boot for the ix86 as we will check its signals
- * via the SMP irq return path.
- */
-
-struct task_struct * init_tasks[NR_CPUS] = {&init_task, };
-
-/*
- * The tasklist_lock protects the linked list of processes.
+ * Locking rule: those places that want to lock multiple runqueues
+ * (such as the load balancing or the process migration code), lock
+ * acquire operations must be ordered by rq->cpu.
*
- * The runqueue_lock locks the parts that actually access
- * and change the run-queues, and have to be interrupt-safe.
- *
- * If both locks are to be concurrently held, the runqueue_lock
- * nests inside the tasklist_lock.
- *
- * task->alloc_lock nests inside tasklist_lock.
+ * The RT event id is used to avoid calling into the the RT scheduler
+ * if there is a RT task active in an SMP system but there is no
+ * RT scheduling activity otherwise.
*/
-spinlock_t runqueue_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED; /* inner */
-rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED; /* outer */
-
-static LIST_HEAD(runqueue_head);
-
-static unsigned long rcl_curr;
+static struct runqueue {
+ int cpu;
+ spinlock_t lock;
+ unsigned long nr_running, nr_switches, last_rt_event;
+ task_t *curr, *idle;
+ prio_array_t *active, *expired, arrays[2];
+ char __pad [SMP_CACHE_BYTES];
+} runqueues [NR_CPUS] __cacheline_aligned;
+
+#define this_rq() (runqueues + smp_processor_id())
+#define task_rq(p) (runqueues + (p)->cpu)
+#define cpu_rq(cpu) (runqueues + (cpu))
+#define cpu_curr(cpu) (runqueues[(cpu)].curr)
+#define rt_task(p) ((p)->policy != SCHED_OTHER)
+
+#define lock_task_rq(rq,p,flags) \
+do { \
+repeat_lock_task: \
+ rq = task_rq(p); \
+ spin_lock_irqsave(&rq->lock, flags); \
+ if (unlikely((rq)->cpu != (p)->cpu)) { \
+ spin_unlock_irqrestore(&rq->lock, flags); \
+ goto repeat_lock_task; \
+ } \
+} while (0)
+
+#define unlock_task_rq(rq,p,flags) \
+ spin_unlock_irqrestore(&rq->lock, flags)
/*
- * We align per-CPU scheduling data on cacheline boundaries,
- * to prevent cacheline ping-pong.
+ * Adding/removing a task to/from a priority array:
*/
-static union {
- struct schedule_data {
- struct task_struct * curr;
- cycles_t last_schedule;
- } schedule_data;
- char __pad [SMP_CACHE_BYTES];
-} aligned_data [NR_CPUS] __cacheline_aligned = { {{&init_task,0}}};
-
-#define cpu_curr(cpu) aligned_data[(cpu)].schedule_data.curr
-#define last_schedule(cpu) aligned_data[(cpu)].schedule_data.last_schedule
-
-struct kernel_stat kstat;
-extern struct task_struct *child_reaper;
-
-#ifdef CONFIG_SMP
-
-#define idle_task(cpu) (init_tasks[cpu_number_map(cpu)])
-#define can_schedule(p,cpu) \
- ((p)->cpus_runnable & (p)->cpus_allowed & (1 << cpu))
-
-#else
-
-#define idle_task(cpu) (&init_task)
-#define can_schedule(p,cpu) (1)
-
-#endif
+static inline void dequeue_task(struct task_struct *p, prio_array_t *array)
+{
+ array->nr_active--;
+ list_del_init(&p->run_list);
+ if (list_empty(array->queue + p->prio))
+ __set_bit(p->prio, array->bitmap);
+}
-void scheduling_functions_start_here(void) { }
+static inline void enqueue_task(struct task_struct *p, prio_array_t *array)
+{
+ list_add_tail(&p->run_list, array->queue + p->prio);
+ __clear_bit(p->prio, array->bitmap);
+ array->nr_active++;
+ p->array = array;
+}
/*
- * This is the function that decides how desirable a process is..
- * You can weigh different processes against each other depending
- * on what CPU they've run on lately etc to try to handle cache
- * and TLB miss penalties.
+ * This is the per-process load estimator. Processes that generate
+ * more load than the system can handle get a priority penalty.
*
- * Return values:
- * -1000: never select this
- * 0: out of time, recalculate counters (but it might still be
- * selected)
- * +ve: "goodness" value (the larger, the better)
- * +1000: realtime process, select this.
+ * The estimator uses a 4-entry load-history ringbuffer which is
+ * updated whenever a task is moved to/from the runqueue. The load
+ * estimate is also updated from the timer tick to get an accurate
+ * estimation of currently executing tasks as well.
*/
+#define NEXT_IDX(idx) (((idx) + 1) % SLEEP_HIST_SIZE)
-static inline int goodness(struct task_struct * p, int this_cpu, struct mm_struct *this_mm)
+static inline void update_sleep_avg_deactivate(task_t *p)
{
- int weight;
-
- /*
- * select the current process after every other
- * runnable process, but before the idle thread.
- * Also, dont trigger a counter recalculation.
- */
- weight = -1;
- if (p->policy & SCHED_YIELD)
- goto out;
-
- /*
- * Non-RT process - normal case first.
- */
- if (p->policy == SCHED_OTHER) {
- /*
- * Give the process a first-approximation goodness value
- * according to the number of clock-ticks it has left.
- *
- * Don't do any other calculations if the time slice is
- * over..
- */
- if (!p->time_slice)
- return 0;
-
- weight = p->dyn_prio + 1;
+ unsigned int idx;
+ unsigned long j = jiffies, last_sample = p->run_timestamp / HZ,
+ curr_sample = j / HZ, delta = curr_sample - last_sample;
+
+ if (unlikely(delta)) {
+ if (delta < SLEEP_HIST_SIZE) {
+ for (idx = 0; idx < delta; idx++) {
+ p->sleep_idx++;
+ p->sleep_idx %= SLEEP_HIST_SIZE;
+ p->sleep_hist[p->sleep_idx] = 0;
+ }
+ } else {
+ for (idx = 0; idx < SLEEP_HIST_SIZE; idx++)
+ p->sleep_hist[idx] = 0;
+ p->sleep_idx = 0;
+ }
+ }
+ p->sleep_timestamp = j;
+}
-#ifdef CONFIG_SMP
- /* Give a largish advantage to the same processor... */
- /* (this is equivalent to penalizing other processors) */
- if (p->processor == this_cpu)
- weight += PROC_CHANGE_PENALTY;
+#if SLEEP_HIST_SIZE != 4
+# error update this code.
#endif
- /* .. and a slight advantage to the current MM */
- if (p->mm == this_mm || !p->mm)
- weight += MM_AFFINITY_BONUS;
- weight += 20 - p->nice;
- goto out;
- }
+static inline unsigned int get_sleep_avg(task_t *p, unsigned long j)
+{
+ unsigned int sum;
- /*
- * Realtime process, select the first one on the
- * runqueue (taking priorities within processes
- * into account).
- */
- weight = 1000 + p->rt_priority;
-out:
- return weight;
+ sum = p->sleep_hist[0];
+ sum += p->sleep_hist[1];
+ sum += p->sleep_hist[2];
+ sum += p->sleep_hist[3];
+
+ return sum * HZ / ((SLEEP_HIST_SIZE-1)*HZ + (j % HZ));
}
-/*
- * the 'goodness value' of replacing a process on a given CPU.
- * positive value means 'replace', zero or negative means 'dont'.
- */
-static inline int preemption_goodness(struct task_struct * prev, struct task_struct * p, int cpu)
+static inline void update_sleep_avg_activate(task_t *p, unsigned long j)
{
- return goodness(p, cpu, prev->active_mm) - goodness(prev, cpu, prev->active_mm);
+ unsigned int idx;
+ unsigned long delta_ticks, last_sample = p->sleep_timestamp / HZ,
+ curr_sample = j / HZ, delta = curr_sample - last_sample;
+
+ if (unlikely(delta)) {
+ if (delta < SLEEP_HIST_SIZE) {
+ p->sleep_hist[p->sleep_idx] += HZ - (p->sleep_timestamp % HZ);
+ p->sleep_idx++;
+ p->sleep_idx %= SLEEP_HIST_SIZE;
+
+ for (idx = 1; idx < delta; idx++) {
+ p->sleep_idx++;
+ p->sleep_idx %= SLEEP_HIST_SIZE;
+ p->sleep_hist[p->sleep_idx] = HZ;
+ }
+ } else {
+ for (idx = 0; idx < SLEEP_HIST_SIZE; idx++)
+ p->sleep_hist[idx] = HZ;
+ p->sleep_idx = 0;
+ }
+ p->sleep_hist[p->sleep_idx] = 0;
+ delta_ticks = j % HZ;
+ } else
+ delta_ticks = j - p->sleep_timestamp;
+ p->sleep_hist[p->sleep_idx] += delta_ticks;
+ p->run_timestamp = j;
}
-/*
- * This is ugly, but reschedule_idle() is very timing-critical.
- * We are called with the runqueue spinlock held and we must
- * not claim the tasklist_lock.
- */
-static FASTCALL(void reschedule_idle(struct task_struct * p));
-
-static void reschedule_idle(struct task_struct * p)
+static inline void activate_task(task_t *p, runqueue_t *rq)
{
-#ifdef CONFIG_SMP
- int this_cpu = smp_processor_id();
- struct task_struct *tsk, *target_tsk;
- int cpu, best_cpu, i, max_prio;
- cycles_t oldest_idle;
+ prio_array_t *array = rq->active;
+ unsigned long j = jiffies;
+ unsigned int sleep, load;
+ int penalty;
+ if (likely(p->run_timestamp == j))
+ goto enqueue;
/*
- * shortcut if the woken up task's last CPU is
- * idle now.
+ * Give the process a priority penalty if it has not slept often
+ * enough in the past. We scale the priority penalty according
+ * to the current load of the runqueue, and the 'load history'
+ * this process has. Eg. if the CPU has 3 processes running
+ * right now then a process that has slept more than two-thirds
+ * of the time is considered to be 'interactive'. The higher
+ * the load of the CPUs is, the easier it is for a process to
+ * get an non-interactivity penalty.
*/
- best_cpu = p->processor;
- if (can_schedule(p, best_cpu)) {
- tsk = idle_task(best_cpu);
- if (cpu_curr(best_cpu) == tsk) {
- int need_resched;
-send_now_idle:
- /*
- * If need_resched == -1 then we can skip sending
- * the IPI altogether, tsk->need_resched is
- * actively watched by the idle thread.
- */
- need_resched = tsk->need_resched;
- tsk->need_resched = 1;
- if ((best_cpu != this_cpu) && !need_resched)
- smp_send_reschedule(best_cpu);
- return;
- }
+#define MAX_PENALTY (MAX_USER_PRIO/3)
+ update_sleep_avg_activate(p, j);
+ sleep = get_sleep_avg(p, j);
+ load = HZ - sleep;
+ penalty = (MAX_PENALTY * load)/HZ;
+ if (!rt_task(p)) {
+ p->prio = NICE_TO_PRIO(p->__nice) + penalty;
+ if (p->prio > MAX_PRIO-1)
+ p->prio = MAX_PRIO-1;
}
+enqueue:
+ enqueue_task(p, array);
+ rq->nr_running++;
+}
- /*
- * We know that the preferred CPU has a cache-affine current
- * process, lets try to find a new idle CPU for the woken-up
- * process. Select the least recently active idle CPU. (that
- * one will have the least active cache context.) Also find
- * the executing process which has the least priority.
- */
- oldest_idle = (cycles_t) -1;
- target_tsk = NULL;
- max_prio = 0;
-
- for (i = 0; i < smp_num_cpus; i++) {
- cpu = cpu_logical_map(i);
- if (!can_schedule(p, cpu))
- continue;
- tsk = cpu_curr(cpu);
- /*
- * We use the first available idle CPU. This creates
- * a priority list between idle CPUs, but this is not
- * a problem.
- */
- if (tsk == idle_task(cpu)) {
-#if defined(__i386__) && defined(CONFIG_SMP)
- /*
- * Check if two siblings are idle in the same
- * physical package. Use them if found.
- */
- if (smp_num_siblings == 2) {
- if (cpu_curr(cpu_sibling_map[cpu]) ==
- idle_task(cpu_sibling_map[cpu])) {
- oldest_idle = last_schedule(cpu);
- target_tsk = tsk;
- break;
- }
-
- }
-#endif
- if (last_schedule(cpu) < oldest_idle) {
- oldest_idle = last_schedule(cpu);
- target_tsk = tsk;
- }
- } else {
- if (oldest_idle == -1ULL) {
- int prio = preemption_goodness(tsk, p, cpu);
-
- if (prio > max_prio) {
- max_prio = prio;
- target_tsk = tsk;
- }
- }
- }
- }
- tsk = target_tsk;
- if (tsk) {
- if (oldest_idle != -1ULL) {
- best_cpu = tsk->processor;
- goto send_now_idle;
- }
- tsk->need_resched = 1;
- if (tsk->processor != this_cpu)
- smp_send_reschedule(tsk->processor);
- }
- return;
-
+static inline void deactivate_task(struct task_struct *p, runqueue_t *rq)
+{
+ rq->nr_running--;
+ dequeue_task(p, p->array);
+ p->array = NULL;
+ update_sleep_avg_deactivate(p);
+}
-#else /* UP */
- int this_cpu = smp_processor_id();
- struct task_struct *tsk;
+static inline void resched_task(task_t *p)
+{
+ int need_resched;
- tsk = cpu_curr(this_cpu);
- if (preemption_goodness(tsk, p, this_cpu) > 0)
- tsk->need_resched = 1;
-#endif
+ need_resched = p->need_resched;
+ wmb();
+ p->need_resched = 1;
+ if (!need_resched)
+ smp_send_reschedule(p->cpu);
}
+#ifdef CONFIG_SMP
+
/*
- * Careful!
- *
- * This has to add the process to the _beginning_ of the
- * run-queue, not the end. See the comment about "This is
- * subtle" in the scheduler proper..
+ * Wait for a process to unschedule. This is used by the exit() and
+ * ptrace() code.
*/
-static inline void add_to_runqueue(struct task_struct * p)
+void wait_task_inactive(task_t * p)
{
- p->dyn_prio += rcl_curr - p->rcl_last;
- p->rcl_last = rcl_curr;
- if (p->dyn_prio > MAX_DYNPRIO)
- p->dyn_prio = MAX_DYNPRIO;
- list_add(&p->run_list, &runqueue_head);
- nr_running++;
-}
+ unsigned long flags;
+ runqueue_t *rq;
-static inline void move_last_runqueue(struct task_struct * p)
-{
- list_del(&p->run_list);
- list_add_tail(&p->run_list, &runqueue_head);
+repeat:
+ rq = task_rq(p);
+ while (unlikely(rq->curr == p)) {
+ cpu_relax();
+ barrier();
+ }
+ lock_task_rq(rq, p, flags);
+ if (unlikely(rq->curr == p)) {
+ unlock_task_rq(rq, p, flags);
+ goto repeat;
+ }
+ unlock_task_rq(rq, p, flags);
}
-static inline void move_first_runqueue(struct task_struct * p)
+/*
+ * Kick the remote CPU if the task is running currently,
+ * this code is used by the signal code to signal tasks
+ * which are in user-mode as quickly as possible.
+ *
+ * (Note that we do this lockless - if the task does anything
+ * while the message is in flight then it will notice the
+ * sigpending condition anyway.)
+ */
+void kick_if_running(task_t * p)
{
- list_del(&p->run_list);
- list_add(&p->run_list, &runqueue_head);
+ if (p == task_rq(p)->curr)
+ resched_task(p);
}
+#endif
/*
* Wake up a process. Put it on the run-queue if it's not
* "current->state = TASK_RUNNING" to mark yourself runnable
* without the overhead of this.
*/
-static inline int try_to_wake_up(struct task_struct * p, int synchronous)
+static int try_to_wake_up(task_t * p, int synchronous)
{
unsigned long flags;
int success = 0;
+ runqueue_t *rq;
- /*
- * We want the common case fall through straight, thus the goto.
- */
- spin_lock_irqsave(&runqueue_lock, flags);
+ lock_task_rq(rq, p, flags);
p->state = TASK_RUNNING;
- if (task_on_runqueue(p))
- goto out;
- add_to_runqueue(p);
- if (!synchronous || !(p->cpus_allowed & (1 << smp_processor_id())))
- reschedule_idle(p);
- success = 1;
-out:
- spin_unlock_irqrestore(&runqueue_lock, flags);
+ if (!p->array) {
+ if (!rt_task(p) && synchronous && (smp_processor_id() < p->cpu)) {
+ spin_lock(&this_rq()->lock);
+ p->cpu = smp_processor_id();
+ activate_task(p, this_rq());
+ spin_unlock(&this_rq()->lock);
+ } else {
+ activate_task(p, rq);
+ if ((rq->curr == rq->idle) ||
+ (p->prio < rq->curr->prio))
+ resched_task(rq->curr);
+ }
+ success = 1;
+ }
+ unlock_task_rq(rq, p, flags);
return success;
}
-inline int wake_up_process(struct task_struct * p)
+inline int wake_up_process(task_t * p)
{
return try_to_wake_up(p, 0);
}
-static void process_timeout(unsigned long __data)
+void wake_up_forked_process(task_t * p)
{
- wake_up_process((struct task_struct *)__data);
-}
+ runqueue_t *rq = this_rq();
-/**
- * schedule_timeout - sleep until timeout
- * @timeout: timeout value in jiffies
- *
- * Make the current task sleep until @timeout jiffies have
- * elapsed. The routine will return immediately unless
- * the current task state has been set (see set_current_state()).
- *
- * You can set the task state as follows -
- *
- * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
- * pass before the routine returns. The routine will return 0
- *
- * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
- * delivered to the current task. In this case the remaining time
- * in jiffies will be returned, or 0 if the timer expired in time
- *
- * The current task state is guaranteed to be TASK_RUNNING when this
- * routine returns.
- *
- * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
- * the CPU away without a bound on the timeout. In this case the return
- * value will be %MAX_SCHEDULE_TIMEOUT.
- *
- * In all cases the return value is guaranteed to be non-negative.
- */
-signed long schedule_timeout(signed long timeout)
-{
- struct timer_list timer;
- unsigned long expire;
-
- switch (timeout)
- {
- case MAX_SCHEDULE_TIMEOUT:
- /*
- * These two special cases are useful to be comfortable
- * in the caller. Nothing more. We could take
- * MAX_SCHEDULE_TIMEOUT from one of the negative value
- * but I' d like to return a valid offset (>=0) to allow
- * the caller to do everything it want with the retval.
- */
- schedule();
- goto out;
- default:
- /*
- * Another bit of PARANOID. Note that the retval will be
- * 0 since no piece of kernel is supposed to do a check
- * for a negative retval of schedule_timeout() (since it
- * should never happens anyway). You just have the printk()
- * that will tell you if something is gone wrong and where.
- */
- if (timeout < 0)
- {
- printk(KERN_ERR "schedule_timeout: wrong timeout "
- "value %lx from %p\n", timeout,
- __builtin_return_address(0));
- current->state = TASK_RUNNING;
- goto out;
- }
+ spin_lock_irq(&rq->lock);
+ p->state = TASK_RUNNING;
+ if (!rt_task(p)) {
+ p->prio += MAX_USER_PRIO/10;
+ if (p->prio > MAX_PRIO-1)
+ p->prio = MAX_PRIO-1;
}
+ activate_task(p, rq);
+ spin_unlock_irq(&rq->lock);
+}
- expire = timeout + jiffies;
-
- init_timer(&timer);
- timer.expires = expire;
- timer.data = (unsigned long) current;
- timer.function = process_timeout;
-
- add_timer(&timer);
- schedule();
- del_timer_sync(&timer);
-
- timeout = expire - jiffies;
-
- out:
- return timeout < 0 ? 0 : timeout;
+asmlinkage void schedule_tail(task_t *prev)
+{
+ spin_unlock_irq(&this_rq()->lock);
}
-/*
- * schedule_tail() is getting called from the fork return path. This
- * cleans up all remaining scheduler things, without impacting the
- * common case.
- */
-static inline void __schedule_tail(struct task_struct *prev)
+static inline void context_switch(task_t *prev, task_t *next)
{
-#ifdef CONFIG_SMP
- int policy;
+ struct mm_struct *mm = next->mm;
+ struct mm_struct *oldmm = prev->active_mm;
- /*
- * prev->policy can be written from here only before `prev'
- * can be scheduled (before setting prev->cpus_runnable to ~0UL).
- * Of course it must also be read before allowing prev
- * to be rescheduled, but since the write depends on the read
- * to complete, wmb() is enough. (the spin_lock() acquired
- * before setting cpus_runnable is not enough because the spin_lock()
- * common code semantics allows code outside the critical section
- * to enter inside the critical section)
- */
- policy = prev->policy;
- prev->policy = policy & ~SCHED_YIELD;
- wmb();
+ prepare_to_switch();
- /*
- * fast path falls through. We have to clear cpus_runnable before
- * checking prev->state to avoid a wakeup race. Protect against
- * the task exiting early.
- */
- task_lock(prev);
- task_release_cpu(prev);
- mb();
- if (prev->state == TASK_RUNNING)
- goto needs_resched;
+ if (!mm) {
+ next->active_mm = oldmm;
+ atomic_inc(&oldmm->mm_count);
+ enter_lazy_tlb(oldmm, next, smp_processor_id());
+ } else
+ switch_mm(oldmm, mm, next, smp_processor_id());
-out_unlock:
- task_unlock(prev); /* Synchronise here with release_task() if prev is TASK_ZOMBIE */
- return;
+ if (!prev->mm) {
+ prev->active_mm = NULL;
+ mmdrop(oldmm);
+ }
/*
- * Slow path - we 'push' the previous process and
- * reschedule_idle() will attempt to find a new
- * processor for it. (but it might preempt the
- * current process as well.) We must take the runqueue
- * lock and re-check prev->state to be correct. It might
- * still happen that this process has a preemption
- * 'in progress' already - but this is not a problem and
- * might happen in other circumstances as well.
+ * Here we just switch the register state and the stack. There are
+ * 3 processes affected by a context switch:
+ *
+ * prev ==> .... ==> (last => next)
+ *
+ * It's the 'much more previous' 'prev' that is on next's stack,
+ * but prev is set to (the just run) 'last' process by switch_to().
+ * This might sound slightly confusing but makes tons of sense.
*/
-needs_resched:
- {
- unsigned long flags;
+ switch_to(prev, next, prev);
+}
- /*
- * Avoid taking the runqueue lock in cases where
- * no preemption-check is necessery:
- */
- if ((prev == idle_task(smp_processor_id())) ||
- (policy & SCHED_YIELD))
- goto out_unlock;
+unsigned long nr_running(void)
+{
+ unsigned long i, sum = 0;
- spin_lock_irqsave(&runqueue_lock, flags);
- if ((prev->state == TASK_RUNNING) && !task_has_cpu(prev))
- reschedule_idle(prev);
- spin_unlock_irqrestore(&runqueue_lock, flags);
- goto out_unlock;
- }
-#else
- prev->policy &= ~SCHED_YIELD;
-#endif /* CONFIG_SMP */
+ for (i = 0; i < smp_num_cpus; i++)
+ sum += cpu_rq(i)->nr_running;
+
+ return sum;
}
-asmlinkage void schedule_tail(struct task_struct *prev)
+unsigned long nr_context_switches(void)
{
- __schedule_tail(prev);
+ unsigned long i, sum = 0;
+
+ for (i = 0; i < smp_num_cpus; i++)
+ sum += cpu_rq(i)->nr_switches;
+
+ return sum;
}
-void expire_task(struct task_struct *p)
+static inline unsigned long max_rq_len(void)
{
- if (unlikely(!p->time_slice))
- goto need_resched;
+ unsigned long i, curr, max = 0;
- if (!--p->time_slice) {
- if (p->dyn_prio)
- p->dyn_prio--;
-need_resched:
- p->need_resched = 1;
+ for (i = 0; i < smp_num_cpus; i++) {
+ curr = cpu_rq(i)->nr_running;
+ if (curr > max)
+ max = curr;
}
+ return max;
}
/*
- * 'schedule()' is the scheduler function. It's a very simple and nice
- * scheduler: it's not perfect, but certainly works for most things.
- *
- * The goto is "interesting".
+ * Current runqueue is empty, try to find work on
+ * other runqueues.
*
- * NOTE!! Task 0 is the 'idle' task, which gets called when no other
- * tasks can run. It can not be killed, and it cannot sleep. The 'state'
- * information in task[0] is never used.
+ * We call this with the current runqueue locked,
+ * irqs disabled.
*/
-asmlinkage void schedule(void)
+static void load_balance(runqueue_t *this_rq)
{
- struct schedule_data * sched_data;
- struct task_struct *prev, *next, *p;
- struct list_head *tmp;
- int this_cpu, c;
-
-
- spin_lock_prefetch(&runqueue_lock);
-
- if (!current->active_mm) BUG();
-need_resched_back:
- prev = current;
- this_cpu = prev->processor;
-
- if (unlikely(in_interrupt())) {
- printk("Scheduling in interrupt\n");
- BUG();
- }
-
- release_kernel_lock(prev, this_cpu);
-
+ int nr_tasks, load, prev_max_load, max_load, idx, i;
+ task_t *next = this_rq->idle, *tmp;
+ runqueue_t *busiest, *rq_tmp;
+ prio_array_t *array;
+ list_t *head, *curr;
+
+ prev_max_load = max_rq_len();
+ nr_tasks = prev_max_load - this_rq->nr_running;
/*
- * 'sched_data' is protected by the fact that we can run
- * only one process per CPU.
+ * It needs an at least ~10% imbalance to trigger balancing:
*/
- sched_data = & aligned_data[this_cpu].schedule_data;
-
- spin_lock_irq(&runqueue_lock);
+ if (nr_tasks <= 1 + prev_max_load/8)
+ return;
+ prev_max_load++;
- /* move an exhausted RR process to be last.. */
- if (unlikely(prev->policy == SCHED_RR))
- if (!prev->time_slice) {
- prev->time_slice = TASK_TIMESLICE(prev);
- move_last_runqueue(prev);
- }
-
- switch (prev->state) {
- case TASK_INTERRUPTIBLE:
- if (signal_pending(prev)) {
- prev->state = TASK_RUNNING;
- break;
+repeat_search:
+ /*
+ * We search all runqueues to find the most busy one.
+ * We do this lockless to reduce cache-bouncing overhead,
+ * we re-check the source CPU with the lock held.
+ */
+ busiest = NULL;
+ max_load = 0;
+ for (i = 0; i < smp_num_cpus; i++) {
+ rq_tmp = cpu_rq(i);
+ load = rq_tmp->nr_running;
+ if ((load > max_load) && (load < prev_max_load) &&
+ (rq_tmp != this_rq)) {
+ busiest = rq_tmp;
+ max_load = load;
}
- default:
- del_from_runqueue(prev);
- case TASK_RUNNING:;
}
- prev->need_resched = 0;
+
+ if (likely(!busiest))
+ return;
+ if (max_load <= this_rq->nr_running)
+ return;
+ prev_max_load = max_load;
+ if (busiest->cpu < this_rq->cpu) {
+ spin_unlock(&this_rq->lock);
+ spin_lock(&busiest->lock);
+ spin_lock(&this_rq->lock);
+ } else
+ spin_lock(&busiest->lock);
+ if (busiest->nr_running <= this_rq->nr_running + 1)
+ goto out_unlock;
/*
- * this is the scheduler proper:
+ * We first consider expired tasks. Those will likely not run
+ * in the near future, thus switching CPUs has the least effect
+ * on them.
*/
+ if (busiest->expired->nr_active)
+ array = busiest->expired;
+ else
+ array = busiest->active;
-repeat_schedule:
+new_array:
/*
- * Default process to select..
+ * Load-balancing does not affect RT tasks, so we start the
+ * searching at priority 128.
*/
- next = idle_task(this_cpu);
- c = -1000;
- list_for_each(tmp, &runqueue_head) {
- p = list_entry(tmp, struct task_struct, run_list);
- if (can_schedule(p, this_cpu)) {
- int weight = goodness(p, this_cpu, prev->active_mm);
- if (weight > c)
- c = weight, next = p;
+ idx = MAX_RT_PRIO;
+skip_bitmap:
+ idx = find_next_zero_bit(array->bitmap, MAX_PRIO, idx);
+ if (idx == MAX_PRIO) {
+ if (array == busiest->expired) {
+ array = busiest->active;
+ goto new_array;
}
+ spin_unlock(&busiest->lock);
+ goto repeat_search;
}
- /* Do we need to re-calculate counters? */
- if (unlikely(!c)) {
- rcl_curr++;
- list_for_each(tmp, &runqueue_head) {
- p = list_entry(tmp, struct task_struct, run_list);
- p->time_slice = TASK_TIMESLICE(p);
- p->rcl_last = rcl_curr;
- }
- goto repeat_schedule;
+ head = array->queue + idx;
+ curr = head->next;
+skip_queue:
+ tmp = list_entry(curr, task_t, run_list);
+ if ((tmp == busiest->curr) || !(tmp->cpus_allowed & (1 << smp_processor_id()))) {
+ curr = curr->next;
+ if (curr != head)
+ goto skip_queue;
+ idx++;
+ goto skip_bitmap;
}
-
+ next = tmp;
/*
- * from this point on nothing can prevent us from
- * switching to the next task, save this fact in
- * sched_data.
+ * take the task out of the other runqueue and
+ * put it into this one:
*/
- sched_data->curr = next;
- task_set_cpu(next, this_cpu);
- spin_unlock_irq(&runqueue_lock);
-
- if (unlikely(prev == next)) {
- /* We won't go through the normal tail, so do this by hand */
- prev->policy &= ~SCHED_YIELD;
- goto same_process;
+ dequeue_task(next, array);
+ busiest->nr_running--;
+ next->cpu = smp_processor_id();
+ this_rq->nr_running++;
+ enqueue_task(next, this_rq->active);
+ if (next->prio < current->prio)
+ current->need_resched = 1;
+ if (--nr_tasks) {
+ if (array == busiest->expired) {
+ array = busiest->active;
+ goto new_array;
+ }
+ spin_unlock(&busiest->lock);
+ goto repeat_search;
}
+out_unlock:
+ spin_unlock(&busiest->lock);
+}
-#ifdef CONFIG_SMP
- /*
- * maintain the per-process 'last schedule' value.
- * (this has to be recalculated even if we reschedule to
- * the same process) Currently this is only used on SMP,
- * and it's approximate, so we do not have to maintain
- * it while holding the runqueue spinlock.
- */
- sched_data->last_schedule = get_cycles();
+#define REBALANCE_TICK (HZ/100)
- /*
- * We drop the scheduler lock early (it's a global spinlock),
- * thus we have to lock the previous process from getting
- * rescheduled during switch_to().
- */
+void idle_tick(void)
+{
+ unsigned long flags;
-#endif /* CONFIG_SMP */
+ if (!(jiffies % REBALANCE_TICK) && likely(this_rq()->curr != NULL)) {
+ spin_lock_irqsave(&this_rq()->lock, flags);
+ load_balance(this_rq());
+ spin_unlock_irqrestore(&this_rq()->lock, flags);
+ }
+}
+
+void expire_task(task_t *p)
+{
+ runqueue_t *rq = this_rq();
+ unsigned long flags;
- kstat.context_swtch++;
+ if (p->array != rq->active) {
+ p->need_resched = 1;
+ return;
+ }
/*
- * there are 3 processes which are affected by a context switch:
- *
- * prev == .... ==> (last => next)
- *
- * It's the 'much more previous' 'prev' that is on next's stack,
- * but prev is set to (the just run) 'last' process by switch_to().
- * This might sound slightly confusing but makes tons of sense.
+ * The task cannot change CPUs because it's the current task.
*/
- prepare_to_switch();
- {
- struct mm_struct *mm = next->mm;
- struct mm_struct *oldmm = prev->active_mm;
- if (!mm) {
- if (next->active_mm) BUG();
- next->active_mm = oldmm;
- atomic_inc(&oldmm->mm_count);
- enter_lazy_tlb(oldmm, next, this_cpu);
- } else {
- if (next->active_mm != mm) BUG();
- switch_mm(oldmm, mm, next, this_cpu);
- }
+ spin_lock_irqsave(&rq->lock, flags);
+ if ((p->policy != SCHED_FIFO) && !--p->time_slice) {
+ p->need_resched = 1;
+ if (rt_task(p))
+ p->time_slice = RT_PRIO_TO_TIMESLICE(p->prio);
+ else
+ p->time_slice = PRIO_TO_TIMESLICE(p->prio);
- if (!prev->mm) {
- prev->active_mm = NULL;
- mmdrop(oldmm);
+ /*
+ * Timeslice used up - discard any possible
+ * priority penalty:
+ */
+ dequeue_task(p, rq->active);
+ /*
+ * Tasks that have nice values of -20 ... -15 are put
+ * back into the active array. If they use up too much
+ * CPU time then they'll get a priority penalty anyway
+ * so this can not starve other processes accidentally.
+ * Otherwise this is pretty handy for sysadmins ...
+ */
+ if (p->prio <= MAX_RT_PRIO + MAX_PENALTY/2)
+ enqueue_task(p, rq->active);
+ else
+ enqueue_task(p, rq->expired);
+ } else {
+ /*
+ * Deactivate + activate the task so that the
+ * load estimator gets updated properly:
+ */
+ if (!rt_task(p)) {
+ deactivate_task(p, rq);
+ activate_task(p, rq);
}
}
+ load_balance(rq);
+ spin_unlock_irqrestore(&rq->lock, flags);
+}
- /*
- * This just switches the register state and the
- * stack.
- */
- switch_to(prev, next, prev);
- __schedule_tail(prev);
+void scheduling_functions_start_here(void) { }
+
+/*
+ * 'schedule()' is the main scheduler function.
+ */
+asmlinkage void schedule(void)
+{
+ task_t *prev, *next;
+ prio_array_t *array;
+ runqueue_t *rq;
+ list_t *queue;
+ int idx;
+
+ if (unlikely(in_interrupt()))
+ BUG();
+need_resched_back:
+ prev = current;
+ release_kernel_lock(prev, smp_processor_id());
+ rq = this_rq();
+ spin_lock_irq(&rq->lock);
+
+ switch (prev->state) {
+ case TASK_INTERRUPTIBLE:
+ if (unlikely(signal_pending(prev))) {
+ prev->state = TASK_RUNNING;
+ break;
+ }
+ default:
+ deactivate_task(prev, rq);
+ case TASK_RUNNING:
+ }
+pick_next_task:
+ if (unlikely(!rq->nr_running)) {
+ load_balance(rq);
+ if (rq->nr_running)
+ goto pick_next_task;
+ next = rq->idle;
+ goto switch_tasks;
+ }
+
+ array = rq->active;
+ if (unlikely(!array->nr_active)) {
+ /*
+ * Switch the active and expired arrays.
+ */
+ rq->active = rq->expired;
+ rq->expired = array;
+ array = rq->active;
+ }
+
+ idx = sched_find_first_zero_bit(array->bitmap);
+ queue = array->queue + idx;
+ next = list_entry(queue->next, task_t, run_list);
+
+switch_tasks:
+ prev->need_resched = 0;
+
+ if (likely(prev != next)) {
+ rq->nr_switches++;
+ rq->curr = next;
+ next->cpu = prev->cpu;
+ context_switch(prev, next);
+ /*
+ * The runqueue pointer might be from another CPU
+ * if the new task was last running on a different
+ * CPU - thus re-load it.
+ */
+ barrier();
+ rq = this_rq();
+ }
+ spin_unlock_irq(&rq->lock);
-same_process:
reacquire_kernel_lock(current);
- if (current->need_resched)
+ if (unlikely(current->need_resched))
goto need_resched_back;
return;
}
/*
- * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just wake everything
- * up. If it's an exclusive wakeup (nr_exclusive == small +ve number) then we wake all the
- * non-exclusive tasks and one exclusive task.
+ * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
+ * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
+ * number) then we wake all the non-exclusive tasks and one exclusive task.
*
* There are circumstances in which we can try to wake a task which has already
- * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns zero
- * in this (rare) case, and we handle it by contonuing to scan the queue.
+ * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
+ * zero in this (rare) case, and we handle it by continuing to scan the queue.
*/
static inline void __wake_up_common (wait_queue_head_t *q, unsigned int mode,
int nr_exclusive, const int sync)
{
struct list_head *tmp;
- struct task_struct *p;
+ task_t *p;
list_for_each(tmp,&q->task_list) {
unsigned int state;
return timeout;
}
+/*
+ * Change the current task's CPU affinity. Migrate the process to a
+ * proper CPU and schedule away if the current CPU is removed from
+ * the allowed bitmask.
+ */
+void set_cpus_allowed(task_t *p, unsigned long new_mask)
+{
+ runqueue_t *this_rq = this_rq(), *target_rq;
+ unsigned long this_mask = 1UL << smp_processor_id();
+ int target_cpu;
+
+ new_mask &= cpu_online_map;
+ p->cpus_allowed = new_mask;
+ /*
+ * Can the task run on the current CPU? If not then
+ * migrate the process off to a proper CPU.
+ */
+ if (new_mask & this_mask)
+ return;
+ target_cpu = ffz(~new_mask);
+ target_rq = cpu_rq(target_cpu);
+ if (target_cpu < smp_processor_id()) {
+ spin_lock_irq(&target_rq->lock);
+ spin_lock(&this_rq->lock);
+ } else {
+ spin_lock_irq(&target_rq->lock);
+ spin_lock(&this_rq->lock);
+ }
+ dequeue_task(p, p->array);
+ this_rq->nr_running--;
+ target_rq->nr_running++;
+ enqueue_task(p, target_rq->active);
+ target_rq->curr->need_resched = 1;
+ spin_unlock(&target_rq->lock);
+
+ /*
+ * The easiest solution is to context switch into
+ * the idle thread - which will pick the best task
+ * afterwards:
+ */
+ this_rq->nr_switches++;
+ this_rq->curr = this_rq->idle;
+ this_rq->idle->need_resched = 1;
+ context_switch(current, this_rq->idle);
+ barrier();
+ spin_unlock_irq(&this_rq()->lock);
+}
+
void scheduling_functions_end_here(void) { }
+void set_user_nice(task_t *p, long nice)
+{
+ unsigned long flags;
+ prio_array_t *array;
+ runqueue_t *rq;
+
+ if (p->__nice == nice)
+ return;
+ /*
+ * We have to be careful, if called from sys_setpriority(),
+ * the task might be in the middle of scheduling on another CPU.
+ */
+ lock_task_rq(rq, p, flags);
+ if (rt_task(p)) {
+ p->__nice = nice;
+ goto out_unlock;
+ }
+ array = p->array;
+ if (array) {
+ dequeue_task(p, array);
+ }
+ p->__nice = nice;
+ p->prio = NICE_TO_PRIO(nice);
+ if (array) {
+ enqueue_task(p, array);
+ /*
+ * If the task is runnable and lowered its priority,
+ * or increased its priority then reschedule its CPU:
+ */
+ if ((nice < p->__nice) ||
+ ((p->__nice < nice) && (p == rq->curr)))
+ resched_task(rq->curr);
+ }
+out_unlock:
+ unlock_task_rq(rq, p, flags);
+}
+
#ifndef __alpha__
/*
asmlinkage long sys_nice(int increment)
{
- long newprio;
+ long nice;
/*
* Setpriority might change our priority at the same moment.
if (increment > 40)
increment = 40;
- newprio = current->nice + increment;
- if (newprio < -20)
- newprio = -20;
- if (newprio > 19)
- newprio = 19;
- current->nice = newprio;
+ nice = current->__nice + increment;
+ if (nice < -20)
+ nice = -20;
+ if (nice > 19)
+ nice = 19;
+ set_user_nice(current, nice);
return 0;
}
#endif
-static inline struct task_struct *find_process_by_pid(pid_t pid)
+static inline task_t *find_process_by_pid(pid_t pid)
{
return pid ? find_task_by_pid(pid) : current;
}
-static int setscheduler(pid_t pid, int policy,
- struct sched_param *param)
+static int setscheduler(pid_t pid, int policy, struct sched_param *param)
{
struct sched_param lp;
- struct task_struct *p;
+ prio_array_t *array;
+ unsigned long flags;
+ runqueue_t *rq;
int retval;
+ task_t *p;
retval = -EINVAL;
if (!param || pid < 0)
* We play safe to avoid deadlocks.
*/
read_lock_irq(&tasklist_lock);
- spin_lock(&runqueue_lock);
p = find_process_by_pid(pid);
retval = -ESRCH;
if (!p)
- goto out_unlock;
-
+ goto out_unlock_tasklist;
+
+ /*
+ * To be able to change p->policy safely, the apropriate
+ * runqueue lock must be held.
+ */
+ lock_task_rq(rq,p,flags);
+
if (policy < 0)
policy = p->policy;
else {
!capable(CAP_SYS_NICE))
goto out_unlock;
+ array = p->array;
+ if (array)
+ deactivate_task(p, task_rq(p));
retval = 0;
p->policy = policy;
p->rt_priority = lp.sched_priority;
- if (task_on_runqueue(p))
- move_first_runqueue(p);
-
- current->need_resched = 1;
+ if (rt_task(p))
+ p->prio = 99-p->rt_priority;
+ else
+ p->prio = NICE_TO_PRIO(p->__nice);
+ if (array)
+ activate_task(p, task_rq(p));
out_unlock:
- spin_unlock(&runqueue_lock);
+ unlock_task_rq(rq,p,flags);
+out_unlock_tasklist:
read_unlock_irq(&tasklist_lock);
out_nounlock:
asmlinkage long sys_sched_getscheduler(pid_t pid)
{
- struct task_struct *p;
+ task_t *p;
int retval;
retval = -EINVAL;
read_lock(&tasklist_lock);
p = find_process_by_pid(pid);
if (p)
- retval = p->policy & ~SCHED_YIELD;
+ retval = p->policy;
read_unlock(&tasklist_lock);
out_nounlock:
asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param *param)
{
- struct task_struct *p;
+ task_t *p;
struct sched_param lp;
int retval;
asmlinkage long sys_sched_yield(void)
{
+ runqueue_t *rq = this_rq();
+ prio_array_t *array;
+
/*
- * Trick. sched_yield() first counts the number of truly
- * 'pending' runnable processes, then returns if it's
- * only the current processes. (This test does not have
- * to be atomic.) In threaded applications this optimization
- * gets triggered quite often.
+ * Decrease the yielding task's priority by one, to avoid
+ * livelocks. This priority loss is temporary, it's recovered
+ * once the current timeslice expires.
+ *
+ * If priority is already MAX_PRIO-1 then we still
+ * roundrobin the task within the runlist.
*/
+ spin_lock_irq(&rq->lock);
+ array = current->array;
+ dequeue_task(current, array);
+ if (likely(!rt_task(current)))
+ if (current->prio < MAX_PRIO-1)
+ current->prio++;
+ enqueue_task(current, array);
+ spin_unlock_irq(&rq->lock);
- int nr_pending = nr_running;
-
-#if CONFIG_SMP
- int i;
-
- // Subtract non-idle processes running on other CPUs.
- for (i = 0; i < smp_num_cpus; i++) {
- int cpu = cpu_logical_map(i);
- if (aligned_data[cpu].schedule_data.curr != idle_task(cpu))
- nr_pending--;
- }
-#else
- // on UP this process is on the runqueue as well
- nr_pending--;
-#endif
- if (nr_pending) {
- /*
- * This process can only be rescheduled by us,
- * so this is safe without any locking.
- */
- if (current->policy == SCHED_OTHER)
- current->policy |= SCHED_YIELD;
- current->need_resched = 1;
+ schedule();
- current->time_slice = 0;
- if (++current->dyn_prio > MAX_DYNPRIO)
- current->dyn_prio = MAX_DYNPRIO;
- }
return 0;
}
asmlinkage long sys_sched_rr_get_interval(pid_t pid, struct timespec *interval)
{
struct timespec t;
- struct task_struct *p;
+ task_t *p;
int retval = -EINVAL;
if (pid < 0)
read_lock(&tasklist_lock);
p = find_process_by_pid(pid);
if (p)
- jiffies_to_timespec(p->policy & SCHED_FIFO ? 0 : TASK_TIMESLICE(p),
- &t);
+ jiffies_to_timespec(p->policy & SCHED_FIFO ?
+ 0 : RT_PRIO_TO_TIMESLICE(p->prio), &t);
read_unlock(&tasklist_lock);
if (p)
retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
return retval;
}
-static void show_task(struct task_struct * p)
+static void show_task(task_t * p)
{
unsigned long free = 0;
int state;
printk(" (NOTLB)\n");
{
- extern void show_trace_task(struct task_struct *tsk);
+ extern void show_trace_task(task_t *tsk);
show_trace_task(p);
}
}
void show_state(void)
{
- struct task_struct *p;
+ task_t *p;
#if (BITS_PER_LONG == 32)
printk("\n"
read_unlock(&tasklist_lock);
}
-/**
- * reparent_to_init() - Reparent the calling kernel thread to the init task.
- *
- * If a kernel thread is launched as a result of a system call, or if
- * it ever exits, it should generally reparent itself to init so that
- * it is correctly cleaned up on exit.
- *
- * The various task state such as scheduling policy and priority may have
- * been inherited fro a user process, so we reset them to sane values here.
- *
- * NOTE that reparent_to_init() gives the caller full capabilities.
- */
-void reparent_to_init(void)
+extern unsigned long wait_init_idle;
+
+static inline void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2)
{
- write_lock_irq(&tasklist_lock);
-
- /* Reparent to init */
- REMOVE_LINKS(current);
- current->p_pptr = child_reaper;
- current->p_opptr = child_reaper;
- SET_LINKS(current);
-
- /* Set the exit signal to SIGCHLD so we signal init on exit */
- current->exit_signal = SIGCHLD;
-
- /* We also take the runqueue_lock while altering task fields
- * which affect scheduling decisions */
- spin_lock(&runqueue_lock);
-
- current->ptrace = 0;
- current->nice = DEF_NICE;
- current->policy = SCHED_OTHER;
- /* cpus_allowed? */
- /* rt_priority? */
- /* signals? */
- current->cap_effective = CAP_INIT_EFF_SET;
- current->cap_inheritable = CAP_INIT_INH_SET;
- current->cap_permitted = CAP_FULL_SET;
- current->keep_capabilities = 0;
- memcpy(current->rlim, init_task.rlim, sizeof(*(current->rlim)));
- current->user = INIT_USER;
-
- spin_unlock(&runqueue_lock);
- write_unlock_irq(&tasklist_lock);
+ if (rq1 == rq2)
+ spin_lock(&rq1->lock);
+ else {
+ if (rq1->cpu < rq2->cpu) {
+ spin_lock(&rq1->lock);
+ spin_lock(&rq2->lock);
+ } else {
+ spin_lock(&rq2->lock);
+ spin_lock(&rq1->lock);
+ }
+ }
}
-/*
- * Put all the gunge required to become a kernel thread without
- * attached user resources in one place where it belongs.
- */
-
-void daemonize(void)
+static inline void double_rq_unlock(runqueue_t *rq1, runqueue_t *rq2)
{
- struct fs_struct *fs;
-
-
- /*
- * If we were started as result of loading a module, close all of the
- * user space pages. We don't need them, and if we didn't close them
- * they would be locked into memory.
- */
- exit_mm(current);
-
- current->session = 1;
- current->pgrp = 1;
- current->tty = NULL;
-
- /* Become as one with the init task */
-
- exit_fs(current); /* current->fs->count--; */
- fs = init_task.fs;
- current->fs = fs;
- atomic_inc(&fs->count);
- exit_files(current);
- current->files = init_task.files;
- atomic_inc(¤t->files->count);
+ spin_unlock(&rq1->lock);
+ if (rq1 != rq2)
+ spin_unlock(&rq2->lock);
}
-extern unsigned long wait_init_idle;
-
void __init init_idle(void)
{
- struct schedule_data * sched_data;
- sched_data = &aligned_data[smp_processor_id()].schedule_data;
+ runqueue_t *this_rq = this_rq(), *rq = current->array->rq;
+ unsigned long flags;
- if (current != &init_task && task_on_runqueue(current)) {
- printk("UGH! (%d:%d) was on the runqueue, removing.\n",
- smp_processor_id(), current->pid);
- del_from_runqueue(current);
+ __save_flags(flags);
+ __cli();
+ double_rq_lock(this_rq, rq);
+
+ this_rq->curr = this_rq->idle = current;
+ deactivate_task(current, rq);
+ current->array = NULL;
+ current->prio = MAX_PRIO;
+ current->state = TASK_RUNNING;
+ clear_bit(smp_processor_id(), &wait_init_idle);
+ double_rq_unlock(this_rq, rq);
+ while (wait_init_idle) {
+ cpu_relax();
+ barrier();
}
- current->dyn_prio = 0;
- sched_data->curr = current;
- sched_data->last_schedule = get_cycles();
- clear_bit(current->processor, &wait_init_idle);
+ current->need_resched = 1;
+ __sti();
}
-extern void init_timervecs (void);
-
-static void fill_tslice_map(void)
-{
- int i;
-
- for (i = 0; i < NICE_RANGE; i++) {
- ts_table[i] = ((MIN_NICE_TSLICE +
- ((MAX_NICE_TSLICE -
- MIN_NICE_TSLICE) / (NICE_RANGE - 1)) * i) * HZ) / 1000000;
- if (!ts_table[i]) ts_table[i] = 1;
- }
-}
+extern void init_timervecs(void);
+extern void timer_bh(void);
+extern void tqueue_bh(void);
+extern void immediate_bh(void);
void __init sched_init(void)
{
+ runqueue_t *rq;
+ int i, j, k;
+
+ for (i = 0; i < NR_CPUS; i++) {
+ runqueue_t *rq = cpu_rq(i);
+ prio_array_t *array;
+
+ rq->active = rq->arrays + 0;
+ rq->expired = rq->arrays + 1;
+ spin_lock_init(&rq->lock);
+ rq->cpu = i;
+
+ for (j = 0; j < 2; j++) {
+ array = rq->arrays + j;
+ array->rq = rq;
+ array->lock = &rq->lock;
+ for (k = 0; k < MAX_PRIO; k++)
+ INIT_LIST_HEAD(array->queue + k);
+ memset(array->bitmap, 0xff, BITMAP_SIZE);
+ // zero delimiter for bitsearch
+ clear_bit(MAX_PRIO, array->bitmap);
+ }
+ }
/*
* We have to do a little magic to get the first
* process right in SMP mode.
*/
- int cpu = smp_processor_id();
- int nr;
-
- init_task.processor = cpu;
+ rq = this_rq();
+ rq->curr = current;
+ rq->idle = NULL;
+ wake_up_process(current);
- for(nr = 0; nr < PIDHASH_SZ; nr++)
- pidhash[nr] = NULL;
-
- fill_tslice_map();
+ for (i = 0; i < PIDHASH_SZ; i++)
+ pidhash[i] = NULL;
init_timervecs();
-
init_bh(TIMER_BH, timer_bh);
init_bh(TQUEUE_BH, tqueue_bh);
init_bh(IMMEDIATE_BH, immediate_bh);
* The boot idle thread does lazy MMU switching as well:
*/
atomic_inc(&init_mm.mm_count);
- enter_lazy_tlb(&init_mm, current, cpu);
+ enter_lazy_tlb(&init_mm, current, smp_processor_id());
}
* process of changing - but no harm is done by that
* other than doing an extra (lightweight) IPI interrupt.
*/
- spin_lock(&runqueue_lock);
- if (task_has_cpu(t) && t->processor != smp_processor_id())
- smp_send_reschedule(t->processor);
- spin_unlock(&runqueue_lock);
-#endif /* CONFIG_SMP */
-
+ if ((t->state == TASK_RUNNING) && (t->cpu != smp_processor_id()))
+ kick_if_running(t);
+#endif
if (t->state & TASK_INTERRUPTIBLE) {
wake_up_process(t);
return;
while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
current->state = TASK_RUNNING;
- do {
- current->policy |= SCHED_YIELD;
- schedule();
- } while (test_bit(TASKLET_STATE_SCHED, &t->state));
+ do
+ yield();
+ while (test_bit(TASKLET_STATE_SCHED, &t->state));
}
tasklet_unlock_wait(t);
clear_bit(TASKLET_STATE_SCHED, &t->state);
int cpu = cpu_logical_map(bind_cpu);
daemonize();
- current->nice = 19;
+ set_user_nice(current, 19);
sigfillset(¤t->blocked);
/* Migrate to the right CPU */
- current->cpus_allowed = 1UL << cpu;
- while (smp_processor_id() != cpu)
- schedule();
+ set_cpus_allowed(current, 1UL << cpu);
+ if (smp_processor_id() != cpu)
+ BUG();
sprintf(current->comm, "ksoftirqd_CPU%d", bind_cpu);
{
int cpu;
- for (cpu = 0; cpu < smp_num_cpus; cpu++) {
+ for (cpu = 0; cpu < smp_num_cpus; cpu++)
if (kernel_thread(ksoftirqd, (void *) (long) cpu,
CLONE_FS | CLONE_FILES | CLONE_SIGNAL) < 0)
printk("spawn_ksoftirqd() failed for cpu %d\n", cpu);
- else {
- while (!ksoftirqd_task(cpu_logical_map(cpu))) {
- current->policy |= SCHED_YIELD;
- schedule();
- }
- }
- }
-
+ else
+ while (!ksoftirqd_task(cpu_logical_map(cpu)))
+ yield();
return 0;
}
}
if (error == -ESRCH)
error = 0;
- if (niceval < p->nice && !capable(CAP_SYS_NICE))
+ if (niceval < p->__nice && !capable(CAP_SYS_NICE))
error = -EACCES;
else
- p->nice = niceval;
+ set_user_nice(p, niceval);
}
read_unlock(&tasklist_lock);
long niceval;
if (!proc_sel(p, which, who))
continue;
- niceval = 20 - p->nice;
+ niceval = 20 - p->__nice;
if (niceval > retval)
retval = niceval;
}
#include <asm/uaccess.h>
+struct kernel_stat kstat;
+
/*
* Timekeeping variables
*/
update_one_process(p, user_tick, system, cpu);
if (p->pid) {
expire_task(p);
- if (p->nice > 0)
+ if (p->__nice > 0)
kstat.per_cpu_nice[cpu] += user_tick;
else
kstat.per_cpu_user[cpu] += user_tick;
kstat.per_cpu_system[cpu] += system;
- } else if (local_bh_count(cpu) || local_irq_count(cpu) > 1)
- kstat.per_cpu_system[cpu] += system;
+ } else {
+ idle_tick();
+ if (local_bh_count(cpu) || local_irq_count(cpu) > 1)
+ kstat.per_cpu_system[cpu] += system;
+ }
}
/*
#endif
+static void process_timeout(unsigned long __data)
+{
+ wake_up_process((task_t *)__data);
+}
+
+/**
+ * schedule_timeout - sleep until timeout
+ * @timeout: timeout value in jiffies
+ *
+ * Make the current task sleep until @timeout jiffies have
+ * elapsed. The routine will return immediately unless
+ * the current task state has been set (see set_current_state()).
+ *
+ * You can set the task state as follows -
+ *
+ * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
+ * pass before the routine returns. The routine will return 0
+ *
+ * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
+ * delivered to the current task. In this case the remaining time
+ * in jiffies will be returned, or 0 if the timer expired in time
+ *
+ * The current task state is guaranteed to be TASK_RUNNING when this
+ * routine returns.
+ *
+ * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
+ * the CPU away without a bound on the timeout. In this case the return
+ * value will be %MAX_SCHEDULE_TIMEOUT.
+ *
+ * In all cases the return value is guaranteed to be non-negative.
+ */
+signed long schedule_timeout(signed long timeout)
+{
+ struct timer_list timer;
+ unsigned long expire;
+
+ switch (timeout)
+ {
+ case MAX_SCHEDULE_TIMEOUT:
+ /*
+ * These two special cases are useful to be comfortable
+ * in the caller. Nothing more. We could take
+ * MAX_SCHEDULE_TIMEOUT from one of the negative value
+ * but I' d like to return a valid offset (>=0) to allow
+ * the caller to do everything it want with the retval.
+ */
+ schedule();
+ goto out;
+ default:
+ /*
+ * Another bit of PARANOID. Note that the retval will be
+ * 0 since no piece of kernel is supposed to do a check
+ * for a negative retval of schedule_timeout() (since it
+ * should never happens anyway). You just have the printk()
+ * that will tell you if something is gone wrong and where.
+ */
+ if (timeout < 0)
+ {
+ printk(KERN_ERR "schedule_timeout: wrong timeout "
+ "value %lx from %p\n", timeout,
+ __builtin_return_address(0));
+ current->state = TASK_RUNNING;
+ goto out;
+ }
+ }
+
+ expire = timeout + jiffies;
+
+ init_timer(&timer);
+ timer.expires = expire;
+ timer.data = (unsigned long) current;
+ timer.function = process_timeout;
+
+ add_timer(&timer);
+ schedule();
+ del_timer_sync(&timer);
+
+ timeout = expire - jiffies;
+
+ out:
+ return timeout < 0 ? 0 : timeout;
+}
+
/* Thread ID - the internal kernel "pid" */
asmlinkage long sys_gettid(void)
{
* Niced processes are most likely less important, so double
* their badness points.
*/
- if (p->nice > 0)
+ if (p->__nice > 0)
points *= 2;
/*
* all the memory it needs. That way it should be able to
* exit() and clear out its resources quickly...
*/
- p->time_slice = 2 * MAX_TSLICE;
- p->dyn_prio = MAX_DYNPRIO + 1;
+ p->time_slice = 2 * MAX_TIMESLICE;
p->flags |= PF_MEMALLOC | PF_MEMDIE;
/* This process has hardware access, be more careful. */
* killing itself before someone else gets the chance to ask
* for more memory.
*/
- current->policy |= SCHED_YIELD;
- schedule();
+ yield();
return;
}
return NULL;
/* Yield for kswapd, and try again */
- current->policy |= SCHED_YIELD;
- __set_current_state(TASK_RUNNING);
- schedule();
+ yield();
goto rebalance;
}
skb = alloc_skb(MAX_TCP_HEADER, GFP_KERNEL);
if (skb)
break;
- current->policy |= SCHED_YIELD;
- schedule();
+ yield();
}
/* Reserve space for headers and prepare control bits. */
dev_watchdog_down(dev);
- while (test_bit(__LINK_STATE_SCHED, &dev->state)) {
- current->policy |= SCHED_YIELD;
- schedule();
- }
+ while (test_bit(__LINK_STATE_SCHED, &dev->state))
+ yield();
spin_unlock_wait(&dev->xmit_lock);
}
while (atomic_read(&net_family_lockct) != 0) {
spin_unlock(&net_family_lock);
- current->policy |= SCHED_YIELD;
- schedule();
+ yield();
spin_lock(&net_family_lock);
}
}
if (flags & RPC_TASK_ASYNC)
return NULL;
- current->policy |= SCHED_YIELD;
- schedule();
+ yield();
} while (!signalled());
return NULL;
__rpc_schedule();
if (all_tasks) {
dprintk("rpciod_killall: waiting for tasks to exit\n");
- current->policy |= SCHED_YIELD;
- schedule();
+ yield();
}
}
* wait briefly before checking the process id.
*/
current->sigpending = 0;
- current->policy |= SCHED_YIELD;
- schedule();
+ yield();
/*
* Display a message if we're going to wait longer.
*/
addr->hash)) {
write_unlock(&unix_table_lock);
/* Sanity yield. It is unusual case, but yet... */
- if (!(ordernum&0xFF)) {
- current->policy |= SCHED_YIELD;
- schedule();
- }
+ if (!(ordernum&0xFF))
+ yield();
goto retry;
}
addr->hash ^= sk->type;