TI PCILynx IEEE 1394 support
CONFIG_IEEE1394_PCILYNX
- Say Y here if you have a IEEE-1394 controller with the Texas
+ Say Y here if you have an IEEE-1394 controller with the Texas
Instruments PCILynx chip. Note: this driver is written for revision
2 of this chip and may not work with revision 0.
If unsure, say N.
-Adaptec AIC-5800 IEEE 1394 support
-CONFIG_IEEE1394_AIC5800
- Say Y here if you have a IEEE 1394 controller using the Adaptec
- AIC-5800 chip. All Adaptec host adapters (89xx series) use this
- chip, as well as miro's DV boards.
-
- If you want to compile this as a module ( = code which can be
- inserted in and removed from the running kernel whenever you want),
- say M here and read Documentation/modules.txt. The module will be
- called aic5800.o.
-
-OHCI (Open Host Controller Interface) support
+OHCI-1394 support
CONFIG_IEEE1394_OHCI1394
- Say Y here if you have a IEEE 1394 controller based on OHCI.
- The current driver was only tested with OHCI chipsets made
- by Texas Instruments. However, most third-party vendors use
- TI chips.
+ Enable this driver if you have an IEEE 1394 controller based on the
+ OHCI-1394 specification. The current driver is only tested with OHCI
+ chipsets made by Texas Instruments and NEC. Most third-party vendors
+ use one of these chipsets. It should work with any OHCI-1394 compliant
+ card, however.
If you want to compile this as a module ( = code which can be
inserted in and removed from the running kernel whenever you want),
say M here and read Documentation/modules.txt. The module will be
called ohci1394.o.
+OHCI-1394 Video support
+CONFIG_IEEE1394_VIDEO1394
+ This option enables video device usage for OHCI-1394 cards. Enable this
+ option only if you have an IEEE 1394 video device connected to an
+ OHCI-1394 card.
+
+SBP-2 support (Harddisks etc.)
+CONFIG_IEEE1394_SBP2
+ This option enables you to use SBP-2 devices connected to your IEEE 1394
+ bus. SBP-2 devices include harddrives and DVD devices.
+
Raw IEEE 1394 I/O support
CONFIG_IEEE1394_RAWIO
Say Y here if you want support for the raw device. This is generally
mind the filesystem becoming unreadable to future kernels.
+For /usr/share/magic
+------------------
+
+0 long 0x28cd3d45 Linux cramfs
+>4 long x size %d
+>8 long x flags 0x%x
+>12 long x future 0x%x
+>16 string >\0 signature "%.16s"
+>32 long x fsid.crc 0x%x
+>36 long x fsid.edition %d
+>40 long x fsid.blocks %d
+>44 long x fsid.files %d
+>48 string >\0 name "%.16s"
+
+
Hacker Notes
------------
VERSION = 2
PATCHLEVEL = 4
SUBLEVEL = 7
-EXTRAVERSION =-pre8
+EXTRAVERSION =-pre9
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
ret = -EPERM;
if (child == current)
goto out;
- if ((!child->dumpable ||
+ if ((!child->mm->dumpable ||
(current->uid != child->euid) ||
(current->uid != child->suid) ||
(current->uid != child->uid) ||
if (request == PTRACE_ATTACH) {
if (child == current)
goto out_tsk;
- if ((!child->dumpable ||
+ if ((!child->mm->dumpable ||
(current->uid != child->euid) ||
(current->uid != child->suid) ||
(current->uid != child->uid) ||
if (request == PTRACE_ATTACH) {
if (child == current)
goto out_tsk;
- if ((!child->dumpable ||
+ if ((!child->mm->dumpable ||
(current->uid != child->euid) ||
(current->uid != child->suid) ||
(current->uid != child->uid) ||
(current->gid != child->gid)) && !capable(CAP_SYS_PTRACE))
goto out_tsk;
rmb();
- if (!child->dumpable && !capable(CAP_SYS_PTRACE))
+ if (!child->mm->dumpable && !capable(CAP_SYS_PTRACE))
goto out_tsk;
/* the same process cannot be attached many times */
if (child->ptrace & PT_PTRACED)
/*
* In theory, we'd have to zap this state only to prevent leaking of
- * security sensitive state (e.g., if current->dumpable is zero). However,
+ * security sensitive state (e.g., if current->mm->dumpable is zero). However,
* this executes in less than 20 cycles even on Itanium, so it's not worth
* optimizing for...).
*/
if (request == PTRACE_ATTACH) {
if (child == current)
goto out_tsk;
- if ((!child->dumpable ||
+ if ((!child->mm->dumpable ||
(current->uid != child->euid) ||
(current->uid != child->suid) ||
(current->uid != child->uid) ||
if (request == PTRACE_ATTACH) {
if (child == current)
goto out_tsk;
- if ((!child->dumpable ||
+ if ((!child->mm->dumpable ||
(current->uid != child->euid) ||
(current->uid != child->suid) ||
(current->uid != child->uid) ||
if (request == PTRACE_ATTACH) {
if (child == current)
goto out_tsk;
- if ((!child->dumpable ||
+ if ((!child->mm->dumpable ||
(current->uid != child->euid) ||
(current->uid != child->suid) ||
(current->uid != child->uid) ||
if (request == PTRACE_ATTACH) {
if (child == current)
goto out_tsk;
- if ((!child->dumpable ||
+ if ((!child->mm->dumpable ||
(current->uid != child->euid) ||
(current->uid != child->suid) ||
(current->uid != child->uid) ||
if (request == PTRACE_ATTACH) {
if (child == current)
goto out_tsk;
- if ((!child->dumpable ||
+ if ((!child->mm->dumpable ||
(current->uid != child->euid) ||
(current->uid != child->suid) ||
(current->uid != child->uid) ||
if (request == PTRACE_ATTACH) {
if (child == current)
goto out_tsk;
- if ((!child->dumpable ||
+ if ((!child->mm->dumpable ||
(current->uid != child->euid) ||
(current->uid != child->suid) ||
(current->uid != child->uid) ||
-# BK Id: SCCS/s.Makefile 1.8 05/18/01 06:20:29 patch
+# BK Id: SCCS/s.Makefile 1.10 07/19/01 09:11:27 trini
#
# Makefile for making ELF bootable images for booting on CHRP
# using Open Firmware.
MSIZE=
endif
+CFLAGS += -fno-builtin
+
.c.o:
$(CC) $(CFLAGS) -DKERNELBASE=$(KERNELBASE) -c -o $*.o $<
.S.o:
-# BK Id: SCCS/s.Makefile 1.10 06/05/01 20:22:51 paulus
+# BK Id: SCCS/s.Makefile 1.12 07/19/01 09:11:28 trini
#
# Makefile for making XCOFF bootable images for booting on PowerMacs
# using Open Firmware.
CHRPOBJS = ../common/crt0.o $(COMMONOBJS) chrpmain.o
LIBS = $(TOPDIR)/lib/lib.a ../lib/zlib.a
+CFLAGS += -fno-builtin
+
MKNOTE := ../utils/mknote
SIZE := ../utils/size
OFFSET := ../utils/offset
-# BK Id: SCCS/s.Makefile 1.17 06/12/01 16:47:44 paulus
+# BK Id: SCCS/s.Makefile 1.19 07/19/01 09:11:28 trini
#
# arch/ppc/boot/Makefile
#
# modified by Cort (cort@cs.nmt.edu)
#
-.c.s:
- $(CC) $(CFLAGS) -S -o $*.s $<
-.s.o:
- $(AS) -o $*.o $<
-.c.o:
- $(CC) $(CFLAGS) -c -o $*.o $<
-.S.s:
- $(CPP) $(AFLAGS) -traditional -o $*.o $<
-.S.o:
- $(CC) $(AFLAGS) -traditional -c -o $*.o $<
+USE_STANDARD_AS_RULE := true
+
+CFLAGS += -fno-builtin
ifeq ($(CONFIG_SMP),y)
-TFTPIMAGE=/tftpboot/zImage.prep.smp
+TFTPIMAGE = /tftpboot/zImage.prep.smp
else
-TFTPIMAGE=/tftpboot/zImage.prep
+TFTPIMAGE = /tftpboot/zImage.prep
endif
-ZLINKFLAGS = -T $(TOPDIR)/arch/$(ARCH)/vmlinux.lds -Ttext 0x00800000
-OBJECTS := head.o misc.o ../common/misc-common.o \
- ../common/string.o of1275.o
-OBJCOPY_ARGS = -O elf32-powerpc
-LIBS = ../lib/zlib.a
-
-ifeq ($(CONFIG_SERIAL_CONSOLE),y)
-OBJECTS += ns16550.o
-endif
+ZLINKFLAGS = -T $(TOPDIR)/arch/$(ARCH)/vmlinux.lds \
+ -Ttext 0x00800000
+obj-y := head.o misc.o ../common/misc-common.o \
+ ../common/string.o of1275.o
+OBJCOPY_ARGS = -O elf32-powerpc
+LIBS = ../lib/zlib.a
-ifeq ($(CONFIG_VGA_CONSOLE),y)
-OBJECTS += vreset.o kbd.o
-endif
+obj-$(CONFIG_SERIAL_CONSOLE) += ns16550.o
+obj-$(CONFIG_VGA_CONSOLE) += vreset.o kbd.o
# Tools
-MKPREP := ../utils/mkprep
-SIZE := ../utils/size
-OFFSET := ../utils/offset
+MKPREP := ../utils/mkprep
+SIZE := ../utils/size
+OFFSET := ../utils/offset
all: zImage
$(CC) $(CFLAGS) -DIOOFFSET=0x80000000 -c -o $@ ../common/$*.c
zvmlinux.initrd: zvmlinux ../images/vmlinux.gz
- $(LD) $(ZLINKFLAGS) -o $@.tmp $(OBJECTS) $(LIBS)
+ $(LD) $(ZLINKFLAGS) -o $@.tmp $(obj-y) $(LIBS)
$(OBJCOPY) $(OBJCOPY_ARGS) -R .comment \
--add-section=initrd=../images/ramdisk.image.gz \
--add-section=image=../images/vmlinux.gz \
-DZIMAGE_OFFSET=`sh $(OFFSET) $(OBJDUMP) $@ image` \
-DZIMAGE_SIZE=`sh $(SIZE) $(OBJDUMP) $@ image` \
-c -o misc.o misc.c
- $(LD) $(ZLINKFLAGS) -o $@.tmp $(OBJECTS) $(LIBS)
+ $(LD) $(ZLINKFLAGS) -o $@.tmp $(obj-y) $(LIBS)
$(OBJCOPY) $(OBJCOPY_ARGS) -R .comment \
--add-section=initrd=../images/ramdisk.image.gz \
--add-section=image=../images/vmlinux.gz \
$(MKPREP) -pbp zvmlinux.initrd ../images/$@.prep
rm -f zvmlinux.initrd
-zvmlinux: $(OBJECTS) $(LIBS) ../images/vmlinux.gz
+zvmlinux: $(obj-y) $(LIBS) ../images/vmlinux.gz
#
# build the boot loader image and then compute the offset into it
# for the kernel image
#
- $(LD) $(ZLINKFLAGS) -o zvmlinux.tmp $(OBJECTS) $(LIBS)
+ $(LD) $(ZLINKFLAGS) -o zvmlinux.tmp $(obj-y) $(LIBS)
$(OBJCOPY) $(OBJCOPY_ARGS) -R .comment \
--add-section=image=../images/vmlinux.gz zvmlinux.tmp $@
#
-DZIMAGE_OFFSET=`sh $(OFFSET) $(OBJDUMP) zvmlinux image` \
-DZIMAGE_SIZE=`sh $(SIZE) $(OBJDUMP) zvmlinux image` \
-c -o misc.o misc.c
- $(LD) $(ZLINKFLAGS) -o zvmlinux.tmp $(OBJECTS) $(LIBS)
+ $(LD) $(ZLINKFLAGS) -o zvmlinux.tmp $(obj-y) $(LIBS)
$(OBJCOPY) $(OBJCOPY_ARGS) -R .comment \
--add-section=image=../images/vmlinux.gz $@.tmp $@
rm $@.tmp
/*
- * BK Id: SCCS/s.entry.S 1.17 06/19/01 22:40:51 paulus
+ * BK Id: SCCS/s.entry.S 1.20 07/19/01 23:02:48 paulus
*/
/*
* PowerPC version
* state of one is saved on its kernel stack. Then the state
* of the other is restored from its kernel stack. The memory
* management hardware is updated to the second process's state.
- * Finally, we can return to the second process, via ret_from_except.
+ * Finally, we can return to the second process.
* On entry, r3 points to the THREAD for the current task, r4
* points to the THREAD for the new task.
*
* Note: there are two ways to get to the "going out" portion
* of this code; either by coming in via the entry (_switch)
* or via "fork" which must set up an environment equivalent
- * to the "_switch" path. If you change this (or in particular, the
- * SAVE_REGS macro), you'll have to change the fork code also.
+ * to the "_switch" path. If you change this , you'll have to
+ * change the fork code also.
*
* The code which creates the new task context is in 'copy_thread'
* in arch/ppc/kernel/process.c
oris r0,r0,MSR_VEC@h
#endif /* CONFIG_ALTIVEC */
andc r22,r22,r0
+ mtmsr r22
+ isync
stw r20,_NIP(r1)
stw r22,_MSR(r1)
stw r20,_LINK(r1)
li r0,0x0ff0
stw r0,TRAP(r1)
stw r1,KSP(r3) /* Set old stack pointer */
+
tophys(r0,r4)
CLR_TOP32(r0)
mtspr SPRG3,r0 /* Update current THREAD phys addr */
/* save the old current 'last' for return value */
mr r3,r2
addi r2,r4,-THREAD /* Update current */
- lwz r9,_MSR(r1) /* Returning to user mode? */
- andi. r9,r9,MSR_PR
- beq+ 10f /* if not, don't adjust kernel stack */
-8: addi r4,r1,INT_FRAME_SIZE /* size of frame */
- stw r4,THREAD+KSP(r2) /* save kernel stack pointer */
- tophys(r9,r1)
- CLR_TOP32(r9)
- mtspr SPRG2,r9 /* phys exception stack pointer */
-10: lwz r2,_CTR(r1)
- lwz r0,_LINK(r1)
- mtctr r2
- mtlr r0
- lwz r2,_XER(r1)
lwz r0,_CCR(r1)
- mtspr XER,r2
mtcrf 0xFF,r0
/* r3-r13 are destroyed -- Cort */
- REST_GPR(14, r1)
- REST_8GPRS(15, r1)
- REST_8GPRS(23, r1)
- REST_GPR(31, r1)
- lwz r2,_NIP(r1) /* Restore environment */
- /*
- * We need to hard disable here even if RTL is active since
- * being interrupted after here trashes SRR{0,1}
- * -- Cort
- */
- mfmsr r0 /* Get current interrupt state */
- rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */
- SYNC /* Some chip revs have problems here... */
- mtmsr r0 /* Update machine state */
-
- lwz r0,_MSR(r1)
- mtspr SRR0,r2
- FIX_SRR1(r0,r2)
- mtspr SRR1,r0
- lwz r0,GPR0(r1)
- lwz r2,GPR2(r1)
- lwz r1,GPR1(r1)
- SYNC
- RFI
+ REST_2GPRS(14, r1)
+ REST_8GPRS(16, r1)
+ REST_8GPRS(24, r1)
+
+ lwz r4,_NIP(r1) /* Return to _switch caller in new task */
+ mtlr r4
+ addi r1,r1,INT_FRAME_SIZE
+ blr
.globl ret_from_fork
ret_from_fork:
bl schedule_tail
+ lwz r0,TASK_PTRACE(r2)
+ andi. r0,r0,PT_TRACESYS
+ bnel- syscall_trace
b ret_from_except
.globl ret_from_intercept
/*
- * BK Id: SCCS/s.head.S 1.23 06/28/01 15:50:16 paulus
+ * BK Id: SCCS/s.head.S 1.25 07/07/01 17:08:44 paulus
*/
/*
* PowerPC version
mtspr SPRG3,r4
li r3,0
mtspr SPRG2,r3 /* 0 => r1 has kernel sp */
+ stw r3,PT_REGS(r4) /* set thread.regs to 0 for kernel thread */
/* enable MMU and jump to start_secondary */
li r4,MSR_KERNEL
/*
- * BK Id: SCCS/s.irq.c 1.28 06/28/01 16:15:56 paulus
+ * BK Id: SCCS/s.irq.c 1.30 07/19/01 16:51:32 paulus
*/
/*
* arch/ppc/kernel/irq.c
#include "local_irq.h"
-atomic_t ipi_recv;
-atomic_t ipi_sent;
+extern atomic_t ipi_recv;
+extern atomic_t ipi_sent;
void enable_irq(unsigned int irq_nr);
void disable_irq(unsigned int irq_nr);
/*
- * BK Id: SCCS/s.misc.S 1.19 06/15/01 13:56:56 paulus
+ * BK Id: SCCS/s.misc.S 1.21 07/07/01 17:00:08 paulus
*/
/*
* This file contains miscellaneous low-level functions.
bnelr /* return if parent */
li r0,0 /* clear out p->thread.regs */
stw r0,THREAD+PT_REGS(r2) /* since we don't have user ctx */
+ addi r1,r2,TASK_UNION_SIZE-STACK_FRAME_OVERHEAD
+ stw r0,0(r1)
mtlr r6 /* fn addr in lr */
mr r3,r4 /* load arg and call fn */
blrl
/*
- * BK Id: SCCS/s.ppc_asm.h 1.10 05/17/01 18:14:21 cort
+ * BK Id: SCCS/s.ppc_asm.h 1.14 07/02/01 22:08:05 paulus
*/
/*
* arch/ppc/kernel/ppc_asm.h
#define SYNC
#endif
+#ifndef CONFIG_SMP
+#define TLBSYNC
+#else /* CONFIG_SMP */
+#define TLBSYNC \
+ tlbsync; \
+ sync
+#endif /* CONFIG_SMP */
+
/*
* This instruction is not implemented on the PPC 603 or 601; however, on
* the 403GCX and 405GP tlbia IS defined and tlbie is not.
/*
- * BK Id: SCCS/s.process.c 1.19 06/15/01 13:56:56 paulus
+ * BK Id: SCCS/s.process.c 1.23 07/19/01 23:02:48 paulus
*/
/*
* linux/arch/ppc/kernel/process.c
char *sysmap = NULL;
unsigned long sysmap_size = 0;
-#undef SHOW_TASK_SWITCHES 1
-#undef CHECK_STACK 1
+#undef SHOW_TASK_SWITCHES
+#undef CHECK_STACK
#if defined(CHECK_STACK)
unsigned long
#else
giveup_altivec(last_task_used_altivec);
#endif /* __SMP __ */
- printk("MSR_VEC in enable_altivec_kernel\n");
}
#endif /* CONFIG_ALTIVEC */
check_stack(new);
#endif
-#ifdef SHOW_TASK_SWITCHES
- printk("%s/%d -> %s/%d NIP %08lx cpu %d root %x/%x\n",
- prev->comm,prev->pid,
- new->comm,new->pid,new->thread.regs->nip,new->processor,
- new->fs->root,prev->fs->root);
-#endif
#ifdef CONFIG_SMP
/* avoid complexity of lazy save/restore of fpu
* by just saving it every time we switch out if
/* Avoid the trap. On smp this this never happens since
* we don't set last_task_used_altivec -- Cort
*/
- if ( last_task_used_altivec == new )
+ if (new->thread.regs && last_task_used_altivec == new)
new->thread.regs->msr |= MSR_VEC;
new_thread = &new->thread;
old_thread = ¤t->thread;
int
copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
unsigned long unused,
- struct task_struct * p, struct pt_regs * regs)
+ struct task_struct *p, struct pt_regs *regs)
{
unsigned long msr;
- struct pt_regs * childregs, *kregs;
+ struct pt_regs *childregs, *kregs;
extern void ret_from_fork(void);
-
+ unsigned long sp = (unsigned long)p + sizeof(union task_union);
+ unsigned long childframe;
+
/* Copy registers */
- childregs = ((struct pt_regs *)
- ((unsigned long)p + sizeof(union task_union)
- - STACK_FRAME_OVERHEAD)) - 2;
+ sp -= sizeof(struct pt_regs);
+ childregs = (struct pt_regs *) sp;
*childregs = *regs;
- if ((childregs->msr & MSR_PR) == 0)
- childregs->gpr[2] = (unsigned long) p; /* `current' in new task */
+ if ((childregs->msr & MSR_PR) == 0) {
+ /* for kernel thread, set `current' and stackptr in new task */
+ childregs->gpr[1] = sp + sizeof(struct pt_regs);
+ childregs->gpr[2] = (unsigned long) p;
+ }
childregs->gpr[3] = 0; /* Result from fork() */
p->thread.regs = childregs;
- p->thread.ksp = (unsigned long) childregs - STACK_FRAME_OVERHEAD;
- p->thread.ksp -= sizeof(struct pt_regs ) + STACK_FRAME_OVERHEAD;
- kregs = (struct pt_regs *)(p->thread.ksp + STACK_FRAME_OVERHEAD);
+ sp -= STACK_FRAME_OVERHEAD;
+ childframe = sp;
+
+ /*
+ * The way this works is that at some point in the future
+ * some task will call _switch to switch to the new task.
+ * That will pop off the stack frame created below and start
+ * the new task running at ret_from_fork. The new task will
+ * do some house keeping and then return from the fork or clone
+ * system call, using the stack frame created above.
+ */
+ sp -= sizeof(struct pt_regs);
+ kregs = (struct pt_regs *) sp;
+ sp -= STACK_FRAME_OVERHEAD;
+ p->thread.ksp = sp;
kregs->nip = (unsigned long)ret_from_fork;
- asm volatile("mfmsr %0" : "=r" (msr):);
- kregs->msr = msr;
- kregs->gpr[1] = (unsigned long)childregs - STACK_FRAME_OVERHEAD;
- kregs->gpr[2] = (unsigned long)p;
-
- if (usp >= (unsigned long) regs) {
- /* Stack is in kernel space - must adjust */
- childregs->gpr[1] = (unsigned long)(childregs + 1);
- } else {
- /* Provided stack is in user space */
- childregs->gpr[1] = usp;
- }
- p->thread.last_syscall = -1;
-
+
/*
* copy fpu info - assume lazy fpu switch now always
* -- Cort
*/
if (regs->msr & MSR_VEC)
giveup_altivec(current);
-
memcpy(&p->thread.vr, ¤t->thread.vr, sizeof(p->thread.vr));
p->thread.vscr = current->thread.vscr;
childregs->msr &= ~MSR_VEC;
#endif /* CONFIG_ALTIVEC */
+ p->thread.last_syscall = -1;
+
return 0;
}
void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp)
{
set_fs(USER_DS);
+ memset(regs->gpr, 0, sizeof(regs->gpr));
+ memset(®s->ctr, 0, 5 * sizeof(regs->ctr));
regs->nip = nip;
regs->gpr[1] = sp;
regs->msr = MSR_USER;
int sys_clone(int p1, int p2, int p3, int p4, int p5, int p6,
struct pt_regs *regs)
{
- unsigned long clone_flags = p1;
- int res;
- lock_kernel();
- res = do_fork(clone_flags, regs->gpr[1], regs, 0);
-#ifdef CONFIG_SMP
- /* When we clone the idle task we keep the same pid but
- * the return value of 0 for both causes problems.
- * -- Cort
- */
- if ((current->pid == 0) && (current == &init_task))
- res = 1;
-#endif /* CONFIG_SMP */
- unlock_kernel();
- return res;
+ return do_fork(p1, regs->gpr[1], regs, 0);
}
int sys_fork(int p1, int p2, int p3, int p4, int p5, int p6,
struct pt_regs *regs)
{
-
- int res;
-
- res = do_fork(SIGCHLD, regs->gpr[1], regs, 0);
-#ifdef CONFIG_SMP
- /* When we clone the idle task we keep the same pid but
- * the return value of 0 for both causes problems.
- * -- Cort
- */
- if ((current->pid == 0) && (current == &init_task))
- res = 1;
-#endif /* CONFIG_SMP */
- return res;
+ return do_fork(SIGCHLD, regs->gpr[1], regs, 0);
}
int sys_vfork(int p1, int p2, int p3, int p4, int p5, int p6,
/*
- * BK Id: SCCS/s.ptrace.c 1.5 05/17/01 18:14:22 cort
+ * BK Id: SCCS/s.ptrace.c 1.8 07/07/01 17:00:08 paulus
*/
/*
* linux/arch/ppc/kernel/ptrace.c
*/
static inline unsigned long get_reg(struct task_struct *task, int regno)
{
- if (regno < sizeof(struct pt_regs) / sizeof(unsigned long))
+ if (regno < sizeof(struct pt_regs) / sizeof(unsigned long)
+ && task->thread.regs != NULL)
return ((unsigned long *)task->thread.regs)[regno];
return (0);
}
static inline int put_reg(struct task_struct *task, int regno,
unsigned long data)
{
- if (regno <= PT_MQ) {
+ if (regno <= PT_MQ && task->thread.regs != NULL) {
if (regno == PT_MSR)
data = (data & MSR_DEBUGCHANGE)
| (task->thread.regs->msr & ~MSR_DEBUGCHANGE);
set_single_step(struct task_struct *task)
{
struct pt_regs *regs = task->thread.regs;
- regs->msr |= MSR_SE;
+
+ if (regs != NULL)
+ regs->msr |= MSR_SE;
}
static inline void
clear_single_step(struct task_struct *task)
{
struct pt_regs *regs = task->thread.regs;
- regs->msr &= ~MSR_SE;
+
+ if (regs != NULL)
+ regs->msr &= ~MSR_SE;
}
int sys_ptrace(long request, long pid, long addr, long data)
if (request == PTRACE_ATTACH) {
if (child == current)
goto out_tsk;
- if ((!child->dumpable ||
+ if ((!child->mm->dumpable ||
(current->uid != child->euid) ||
(current->uid != child->suid) ||
(current->uid != child->uid) ||
if (index < PT_FPR0) {
tmp = get_reg(child, (int) index);
} else {
- if (child->thread.regs->msr & MSR_FP)
+ if (child->thread.regs != NULL
+ && child->thread.regs->msr & MSR_FP)
giveup_fpu(child);
tmp = ((unsigned long *)child->thread.fpr)[index - PT_FPR0];
}
if (index < PT_FPR0) {
ret = put_reg(child, index, data);
} else {
- if (child->thread.regs->msr & MSR_FP)
+ if (child->thread.regs != NULL
+ && child->thread.regs->msr & MSR_FP)
giveup_fpu(child);
((unsigned long *)child->thread.fpr)[index - PT_FPR0] = data;
ret = 0;
{
if (child == current)
goto out;
- if ((!child->dumpable ||
+ if ((!child->mm->dumpable ||
(current->uid != child->euid) ||
(current->uid != child->suid) ||
(current->uid != child->uid) ||
{
if (child == current)
goto out;
- if ((!child->dumpable ||
+ if ((!child->mm->dumpable ||
(current->uid != child->euid) ||
(current->uid != child->suid) ||
(current->uid != child->uid) ||
{
if (child == current)
goto out;
- if ((!child->dumpable ||
+ if ((!child->mm->dumpable ||
(current->uid != child->euid) ||
(current->uid != child->suid) ||
(current->uid != child->uid) ||
(tsk->gid != child->gid)) && !capable(CAP_SYS_PTRACE))
goto out_tsk;
rmb();
- if (!child->dumpable && !capable(CAP_SYS_PTRACE))
+ if (!child->mm->dumpable && !capable(CAP_SYS_PTRACE))
goto out_tsk;
/* the same process cannot be attached many times */
if (child->ptrace & PT_PTRACED)
CONFIG_SUN_CONSOLE=y
CONFIG_SUN_AUXIO=y
CONFIG_SUN_IO=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
# CONFIG_SUN4 is not set
# CONFIG_PCI is not set
CONFIG_SUN_OPENPROMFS=m
CONFIG_FB=y
CONFIG_DUMMY_CONSOLE=y
# CONFIG_FB_CYBER2000 is not set
+# CONFIG_FB_E1355 is not set
CONFIG_FB_SBUS=y
CONFIG_FB_CGSIX=y
CONFIG_FB_BWTWO=y
CONFIG_BLK_DEV_SD=y
CONFIG_SD_EXTRA_DEVS=40
CONFIG_CHR_DEV_ST=y
-CONFIG_ST_EXTRA_DEVS=2
+# CONFIG_CHR_DEV_OSST is not set
CONFIG_BLK_DEV_SR=y
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_SR_EXTRA_DEVS=2
CONFIG_EFS_FS=m
# CONFIG_JFFS_FS is not set
# CONFIG_CRAMFS is not set
+# CONFIG_TMPFS is not set
# CONFIG_RAMFS is not set
CONFIG_ISO9660_FS=m
# CONFIG_JOLIET is not set
CONFIG_MINIX_FS=m
+# CONFIG_VXFS_FS is not set
# CONFIG_NTFS_FS is not set
# CONFIG_NTFS_RW is not set
CONFIG_HPFS_FS=m
CONFIG_ROMFS_FS=m
CONFIG_EXT2_FS=y
CONFIG_SYSV_FS=m
-# CONFIG_SYSV_FS_WRITE is not set
# CONFIG_UDF_FS is not set
# CONFIG_UDF_RW is not set
CONFIG_UFS_FS=m
# CONFIG_NLS_CODEPAGE_865 is not set
# CONFIG_NLS_CODEPAGE_866 is not set
# CONFIG_NLS_CODEPAGE_869 is not set
-# CONFIG_NLS_CODEPAGE_874 is not set
-# CONFIG_NLS_CODEPAGE_932 is not set
# CONFIG_NLS_CODEPAGE_936 is not set
-# CONFIG_NLS_CODEPAGE_949 is not set
# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
# CONFIG_NLS_ISO8859_1 is not set
# CONFIG_NLS_ISO8859_2 is not set
# CONFIG_NLS_ISO8859_3 is not set
# CONFIG_NLS_ISO8859_5 is not set
# CONFIG_NLS_ISO8859_6 is not set
# CONFIG_NLS_ISO8859_7 is not set
-# CONFIG_NLS_ISO8859_8 is not set
# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
# CONFIG_NLS_ISO8859_14 is not set
# CONFIG_NLS_ISO8859_15 is not set
# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
# CONFIG_NLS_UTF8 is not set
+#
+# Bluetooth support
+#
+# CONFIG_BLUEZ is not set
+
#
# Watchdog
#
-/* $Id: irq.c,v 1.112 2001/04/27 07:02:42 davem Exp $
+/* $Id: irq.c,v 1.113 2001/07/17 16:17:33 anton Exp $
* arch/sparc/kernel/irq.c: Interrupt request handling routines. On the
* Sparc the IRQ's are basically 'cast in stone'
* and you are supposed to probe the prom's device
} while (action);
enable_pil_irq(irq);
irq_exit(cpu, irq);
+ if (softirq_pending(cpu))
+ do_softirq();
}
#ifdef CONFIG_BLK_DEV_FD
floppy_interrupt(irq, dev_id, regs);
irq_exit(cpu, irq);
enable_pil_irq(irq);
+ if (softirq_pending(cpu))
+ do_softirq();
}
#endif
pt_error_return(regs, EPERM);
goto out_tsk;
}
- if((!child->dumpable ||
+ if((!child->mm->dumpable ||
(current->uid != child->euid) ||
(current->uid != child->uid) ||
(current->uid != child->suid) ||
-/* $Id: rtrap.S,v 1.56 2001/06/05 09:56:06 davem Exp $
+/* $Id: rtrap.S,v 1.57 2001/07/17 16:17:33 anton Exp $
* rtrap.S: Return from Sparc trap low-level code.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
.globl rtrap_patch3, rtrap_patch4, rtrap_patch5
.globl C_LABEL(ret_trap_lockless_ipi)
ret_trap_entry:
- ld [%curptr + AOFF_task_processor], %l3
- sll %l3, 5, %l3
- sethi %hi(C_LABEL(irq_stat)), %l4 ! &softirq_active
- add %l4, %l3, %l4
- ld [%l4 + %lo(C_LABEL(irq_stat))], %g5 ! softirq_pending
- cmp %g5, 0
- be C_LABEL(ret_trap_lockless_ipi)
- nop
- call C_LABEL(do_softirq)
- nop
-
C_LABEL(ret_trap_lockless_ipi):
andcc %t_psr, PSR_PS, %g0
be 1f
-/* $Id: sparc_ksyms.c,v 1.106 2001/01/11 15:07:09 davem Exp $
+/* $Id: sparc_ksyms.c,v 1.107 2001/07/17 16:17:33 anton Exp $
* arch/sparc/kernel/ksyms.c: Sparc specific ksyms support.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
EXPORT_SYMBOL(__down);
EXPORT_SYMBOL(__down_trylock);
EXPORT_SYMBOL(__down_interruptible);
-/* rw semaphores */
-EXPORT_SYMBOL_NOVERS(___down_read);
-EXPORT_SYMBOL_NOVERS(___down_write);
-EXPORT_SYMBOL_NOVERS(___up_read);
-EXPORT_SYMBOL_NOVERS(___up_write);
EXPORT_SYMBOL(sparc_valid_addr_bitmap);
EXPORT_SYMBOL_PRIVATE(_set_bit);
EXPORT_SYMBOL_PRIVATE(_clear_bit);
EXPORT_SYMBOL_PRIVATE(_change_bit);
-EXPORT_SYMBOL_PRIVATE(_set_le_bit);
-EXPORT_SYMBOL_PRIVATE(_clear_le_bit);
#ifdef CONFIG_SMP
/* Kernel wide locking */
-/* $Id: sun4d_irq.c,v 1.27 2001/02/13 01:16:43 davem Exp $
+/* $Id: sun4d_irq.c,v 1.28 2001/07/17 16:17:33 anton Exp $
* arch/sparc/kernel/sun4d_irq.c:
* SS1000/SC2000 interrupt handling.
*
}
}
irq_exit(cpu, irq);
+ if (softirq_pending(cpu))
+ do_softirq();
}
unsigned int sun4d_build_irq(struct sbus_dev *sdev, int irq)
jmpl %o7, %g0
mov %g4, %o7
- /* Now the little endian versions. */
- .globl ___set_le_bit
-___set_le_bit:
- rd %psr, %g3
- nop; nop; nop
- or %g3, PSR_PIL, %g5
- wr %g5, 0x0, %psr
- nop; nop; nop
-#ifdef CONFIG_SMP
- set C_LABEL(bitops_spinlock), %g5
-2: ldstub [%g5], %g7 ! Spin on the byte lock for SMP.
- orcc %g7, 0x0, %g0 ! Did we get it?
- bne 2b ! Nope...
-#endif
- ldub [%g1], %g7
- or %g7, %g2, %g5
- and %g7, %g2, %g2
-#ifdef CONFIG_SMP
- stb %g5, [%g1]
- set C_LABEL(bitops_spinlock), %g5
- stb %g0, [%g5]
-#else
- stb %g5, [%g1]
-#endif
- wr %g3, 0x0, %psr
- nop; nop; nop
- jmpl %o7, %g0
- mov %g4, %o7
-
- .globl ___clear_le_bit
-___clear_le_bit:
- rd %psr, %g3
- nop; nop; nop
- or %g3, PSR_PIL, %g5
- wr %g5, 0x0, %psr
- nop; nop; nop
-#ifdef CONFIG_SMP
- set C_LABEL(bitops_spinlock), %g5
-2: ldstub [%g5], %g7 ! Spin on the byte lock for SMP.
- orcc %g7, 0x0, %g0 ! Did we get it?
- bne 2b ! Nope...
-#endif
- ldub [%g1], %g7
- andn %g7, %g2, %g5
- and %g7, %g2, %g2
-#ifdef CONFIG_SMP
- stb %g5, [%g1]
- set C_LABEL(bitops_spinlock), %g5
- stb %g0, [%g5]
-#else
- stb %g5, [%g1]
-#endif
- wr %g3, 0x0, %psr
- nop; nop; nop
- jmpl %o7, %g0
- mov %g4, %o7
-
.globl __bitops_end
__bitops_end:
-/* $Id: generic.c,v 1.12 2001/04/09 21:40:46 davem Exp $
+/* $Id: generic.c,v 1.13 2001/07/17 16:17:33 anton Exp $
* generic.c: Generic Sparc mm routines that are not dependent upon
* MMU type but are Sparc specific.
*
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/swap.h>
+#include <linux/pagemap.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
end = PGDIR_SIZE;
offset -= address;
do {
- pte_t * pte = pte_alloc(pmd, address);
+ pte_t * pte = pte_alloc(current->mm, pmd, address);
if (!pte)
return -ENOMEM;
- spin_lock(¤t->mm->page_table_lock);
io_remap_pte_range(pte, address, end - address, address + offset, prot, space);
- spin_unlock(¤t->mm->page_table_lock);
address = (address + PMD_SIZE) & PMD_MASK;
pmd++;
} while (address < end);
pgd_t * dir;
unsigned long beg = from;
unsigned long end = from + size;
+ struct mm_struct *mm = current->mm;
prot = __pgprot(pg_iobits);
offset -= from;
- dir = pgd_offset(current->mm, from);
- flush_cache_range(current->mm, beg, end);
+ dir = pgd_offset(mm, from);
+ flush_cache_range(mm, beg, end);
+
+ spin_lock(&mm->page_table_lock);
while (from < end) {
- pmd_t *pmd = pmd_alloc(dir, from);
+ pmd_t *pmd = pmd_alloc(current->mm, dir, from);
error = -ENOMEM;
if (!pmd)
break;
from = (from + PGDIR_SIZE) & PGDIR_MASK;
dir++;
}
+ spin_unlock(&mm->page_table_lock);
+
flush_tlb_range(current->mm, beg, end);
return error;
}
-/* $Id: init.c,v 1.98 2001/04/14 21:13:45 davem Exp $
+/* $Id: init.c,v 1.99 2001/07/17 16:17:33 anton Exp $
* linux/arch/sparc/mm/init.c
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
unsigned long totalram_pages;
unsigned long totalhigh_pages;
-/*
- * BAD_PAGE is the page that is used for page faults when linux
- * is out-of-memory. Older versions of linux just did a
- * do_exit(), but using this instead means there is less risk
- * for a process dying in kernel mode, possibly leaving an inode
- * unused etc..
- *
- * BAD_PAGETABLE is the accompanying page-table: it is initialized
- * to point to BAD_PAGE entries.
- *
- * ZERO_PAGE is a special page that is used for zero-initialized
- * data and COW.
- */
-pte_t *__bad_pagetable(void)
-{
- memset((void *) &empty_bad_page_table, 0, PAGE_SIZE);
- return (pte_t *) &empty_bad_page_table;
-}
-
-pte_t __bad_page(void)
-{
- memset((void *) &empty_bad_page, 0, PAGE_SIZE);
- return pte_mkdirty(mk_pte_phys((unsigned long)__pa(&empty_bad_page) + phys_base,
- PAGE_SHARED));
-}
-
pte_t *kmap_pte;
pgprot_t kmap_prot;
-/* $Id: srmmu.c,v 1.229 2001/04/14 21:13:45 davem Exp $
+/* $Id: srmmu.c,v 1.230 2001/07/17 16:17:33 anton Exp $
* srmmu.c: SRMMU specific routines for memory management.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
flush_tlb_all();
}
-static inline pgd_t *srmmu_pgd_alloc(void)
+static inline pgd_t *srmmu_get_pgd_fast(void)
{
pgd_t *pgd = NULL;
return pgd;
}
-static void srmmu_pgd_free(pgd_t *pgd)
+static void srmmu_free_pgd_fast(pgd_t *pgd)
{
srmmu_free_nocache((unsigned long)pgd, SRMMU_PGD_TABLE_SIZE);
}
-pmd_t *empty_bad_pmd_table;
-pte_t *empty_bad_pte_table;
-
-/*
- * We init them before every return and make them writable-shared.
- * This guarantees we get out of the kernel in some more or less sane
- * way.
- */
-static pmd_t * get_bad_pmd_table(void)
-{
- int i;
-
- for (i = 0; i < PAGE_SIZE/sizeof(pmd_t); i++)
- srmmu_pmd_set(&(empty_bad_pmd_table[i]), empty_bad_pte_table);
-
- return empty_bad_pmd_table;
-}
-
-static pte_t * get_bad_pte_table(void)
+static pte_t *srmmu_pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
{
- pte_t v;
- int i;
-
- memset((void *)&empty_bad_page, 0, PAGE_SIZE);
-
- v = srmmu_pte_mkdirty(srmmu_mk_pte_phys(__pa(&empty_bad_page) + phys_base, PAGE_SHARED));
-
- for (i = 0; i < PAGE_SIZE/sizeof(pte_t); i++)
- srmmu_set_pte(&(empty_bad_pte_table[i]), v);
-
- return empty_bad_pte_table;
+ return (pte_t *)srmmu_get_nocache(SRMMU_PTE_TABLE_SIZE, SRMMU_PTE_TABLE_SIZE);
}
-void __handle_bad_pgd(pgd_t *pgd)
+static pte_t *srmmu_pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
- pgd_ERROR(*pgd);
- srmmu_pgd_set(pgd, get_bad_pmd_table());
+ BUG();
}
-void __handle_bad_pmd(pmd_t *pmd)
-{
- pmd_ERROR(*pmd);
- srmmu_pmd_set(pmd, get_bad_pte_table());
-}
-
-static pte_t *srmmu_pte_alloc(pmd_t * pmd, unsigned long address)
-{
- address = (address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1);
- if(srmmu_pmd_none(*pmd)) {
- pte_t *page = (pte_t *)srmmu_get_nocache(SRMMU_PTE_TABLE_SIZE, SRMMU_PTE_TABLE_SIZE);
- if(page) {
- srmmu_pmd_set(pmd, page);
- return page + address;
- }
- srmmu_pmd_set(pmd, get_bad_pte_table());
- return NULL;
- }
- if(srmmu_pmd_bad(*pmd)) {
- __handle_bad_pmd(pmd);
- return NULL;
- }
- return ((pte_t *) pmd_page(*pmd)) + address;
-}
-
-static inline void srmmu_pte_free(pte_t *pte)
+static void srmmu_free_pte_fast(pte_t *pte)
{
srmmu_free_nocache((unsigned long)pte, SRMMU_PTE_TABLE_SIZE);
}
-static pmd_t *srmmu_pmd_alloc(pgd_t * pgd, unsigned long address)
+static pmd_t *srmmu_pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address)
{
- address = (address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1);
- if(srmmu_pgd_none(*pgd)) {
- pmd_t *page = (pmd_t *)srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
- if(page) {
- srmmu_pgd_set(pgd, page);
- return page + address;
- }
- srmmu_pgd_set(pgd, get_bad_pmd_table());
- return NULL;
- }
- if(srmmu_pgd_bad(*pgd)) {
- __handle_bad_pgd(pgd);
- return NULL;
- }
- return (pmd_t *) srmmu_pgd_page(*pgd) + address;
+ return (pmd_t *)srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
}
-static void srmmu_pmd_free(pmd_t * pmd)
+static void srmmu_free_pmd_fast(pmd_t * pmd)
{
srmmu_free_nocache((unsigned long)pmd, SRMMU_PMD_TABLE_SIZE);
}
flush_cache_all();
flush_tlb_all();
- empty_bad_pmd_table = (pte_t *)srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
- empty_bad_pte_table = (pte_t *)srmmu_get_nocache(SRMMU_PTE_TABLE_SIZE, SRMMU_PTE_TABLE_SIZE);
-
/*
* This does not logically belong here, but we need to
* call it at the moment we are able to use the bootmem
BTFIXUPSET_INT(pte_modify_mask, SRMMU_CHG_MASK);
BTFIXUPSET_CALL(pmd_offset, srmmu_pmd_offset, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pte_offset, srmmu_pte_offset, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(pte_free_kernel, srmmu_pte_free, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(pmd_free_kernel, srmmu_pmd_free, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(pte_alloc_kernel, srmmu_pte_alloc, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(pmd_alloc_kernel, srmmu_pmd_alloc, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(pte_free, srmmu_pte_free, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(pte_alloc, srmmu_pte_alloc, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(pmd_free, srmmu_pmd_free, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(pmd_alloc, srmmu_pmd_alloc, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(pgd_free, srmmu_pgd_free, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(pgd_alloc, srmmu_pgd_alloc, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(free_pte_fast, srmmu_free_pte_fast, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_alloc_one_fast, srmmu_pte_alloc_one_fast, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_alloc_one, srmmu_pte_alloc_one, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(free_pmd_fast, srmmu_free_pmd_fast, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pmd_alloc_one_fast, srmmu_pmd_alloc_one_fast, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(free_pgd_fast, srmmu_free_pgd_fast, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(get_pgd_fast, srmmu_get_pgd_fast, BTFIXUPCALL_NORM);
BTFIXUPSET_HALF(pte_writei, SRMMU_WRITE);
BTFIXUPSET_HALF(pte_dirtyi, SRMMU_DIRTY);
-/* $Id: sun4c.c,v 1.206 2001/04/14 21:13:45 davem Exp $
+/* $Id: sun4c.c,v 1.207 2001/07/17 16:17:33 anton Exp $
* sun4c.c: Doing in software what should be done in hardware.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
}
/* to find an entry in a page-table-directory */
-extern inline pgd_t *sun4c_pgd_offset(struct mm_struct * mm, unsigned long address)
+static inline pgd_t *sun4c_pgd_offset(struct mm_struct * mm, unsigned long address)
{
return mm->pgd + (address >> SUN4C_PGDIR_SHIFT);
}
return (pte_t *) sun4c_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SUN4C_PTRS_PER_PTE - 1));
}
-/* Please take special note on the foo_kernel() routines below, our
- * fast in window fault handler wants to get at the pte's for vmalloc
- * area with traps off, therefore they _MUST_ be locked down to prevent
- * a watchdog from happening. It only takes 4 pages of pte's to lock
- * down the maximum vmalloc space possible on sun4c so we statically
- * allocate these page table pieces in the kernel image. Therefore
- * we should never have to really allocate or free any kernel page
- * table information.
- */
-
-/* Allocate and free page tables. The xxx_kernel() versions are
- * used to allocate a kernel page table - this turns on ASN bits
- * if any, and marks the page tables reserved.
- */
-static void sun4c_pte_free_kernel(pte_t *pte)
-{
- /* This should never get called. */
- panic("sun4c_pte_free_kernel called, can't happen...");
-}
-
-static pte_t *sun4c_pte_alloc_kernel(pmd_t *pmd, unsigned long address)
-{
- if (address >= SUN4C_LOCK_VADDR)
- return NULL;
- address = (address >> PAGE_SHIFT) & (SUN4C_PTRS_PER_PTE - 1);
- if (sun4c_pmd_none(*pmd))
- panic("sun4c_pmd_none for kernel pmd, can't happen...");
- if (sun4c_pmd_bad(*pmd)) {
- printk("Bad pmd in pte_alloc_kernel: %08lx\n", pmd_val(*pmd));
- *pmd = __pmd(PGD_TABLE | (unsigned long) BAD_PAGETABLE);
- return NULL;
- }
- return (pte_t *) sun4c_pmd_page(*pmd) + address;
-}
-
static void sun4c_free_pte_slow(pte_t *pte)
{
free_page((unsigned long)pte);
free_page((unsigned long)pgd);
}
-/*
- * allocating and freeing a pmd is trivial: the 1-entry pmd is
- * inside the pgd, so has no extra memory associated with it.
- */
-static void sun4c_pmd_free_kernel(pmd_t *pmd)
-{
-}
-
-static pmd_t *sun4c_pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
-{
- return (pmd_t *) pgd;
-}
-
-extern __inline__ pgd_t *sun4c_get_pgd_fast(void)
+static pgd_t *sun4c_get_pgd_fast(void)
{
unsigned long *ret;
return (pgd_t *)ret;
}
-extern __inline__ void sun4c_free_pgd_fast(pgd_t *pgd)
+static void sun4c_free_pgd_fast(pgd_t *pgd)
{
*(unsigned long *)pgd = (unsigned long) pgd_quicklist;
pgd_quicklist = (unsigned long *) pgd;
pgtable_cache_size++;
}
-extern __inline__ pte_t *sun4c_get_pte_fast(void)
+static pte_t *sun4c_pte_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+ pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL);
+ if (pte)
+ memset(pte, 0, PAGE_SIZE);
+ return pte;
+}
+
+pte_t *sun4c_pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
{
unsigned long *ret;
return (pte_t *)ret;
}
-extern __inline__ void sun4c_free_pte_fast(pte_t *pte)
+static __inline__ void sun4c_free_pte_fast(pte_t *pte)
{
*(unsigned long *)pte = (unsigned long) pte_quicklist;
pte_quicklist = (unsigned long *) pte;
pgtable_cache_size++;
}
-static void sun4c_pte_free(pte_t *pte)
-{
- sun4c_free_pte_fast(pte);
-}
-
-static pte_t *sun4c_pte_alloc(pmd_t * pmd, unsigned long address)
-{
- address = (address >> PAGE_SHIFT) & (SUN4C_PTRS_PER_PTE - 1);
- if (sun4c_pmd_none(*pmd)) {
- pte_t *page = (pte_t *) sun4c_get_pte_fast();
-
- if (page) {
- *pmd = __pmd(PGD_TABLE | (unsigned long) page);
- return page + address;
- }
- page = (pte_t *) get_free_page(GFP_KERNEL);
- if (sun4c_pmd_none(*pmd)) {
- if (page) {
- *pmd = __pmd(PGD_TABLE | (unsigned long) page);
- return page + address;
- }
- *pmd = __pmd(PGD_TABLE | (unsigned long) BAD_PAGETABLE);
- return NULL;
- }
- free_page((unsigned long) page);
- }
- if (sun4c_pmd_bad(*pmd)) {
- printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
- *pmd = __pmd(PGD_TABLE | (unsigned long) BAD_PAGETABLE);
- return NULL;
- }
- return (pte_t *) sun4c_pmd_page(*pmd) + address;
-}
-
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
* inside the pgd, so has no extra memory associated with it.
*/
-static void sun4c_pmd_free(pmd_t * pmd)
-{
-}
-
-static pmd_t *sun4c_pmd_alloc(pgd_t * pgd, unsigned long address)
+static pmd_t *sun4c_pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address)
{
- return (pmd_t *) pgd;
+ BUG();
+ return NULL;
}
-static void sun4c_pgd_free(pgd_t *pgd)
+static void sun4c_free_pmd_fast(pmd_t * pmd)
{
- sun4c_free_pgd_fast(pgd);
-}
-
-static pgd_t *sun4c_pgd_alloc(void)
-{
- return sun4c_get_pgd_fast();
}
static int sun4c_check_pgt_cache(int low, int high)
do {
if (pgd_quicklist)
sun4c_free_pgd_slow(sun4c_get_pgd_fast()), freed++;
- /* Only two level page tables at the moment, sun4 3 level mmu is not supported - Anton */
-#if 0
- if (pmd_quicklist)
- sun4c_free_pmd_slow(sun4c_get_pmd_fast()), freed++;
-#endif
if (pte_quicklist)
- sun4c_free_pte_slow(sun4c_get_pte_fast()), freed++;
+ sun4c_free_pte_slow(sun4c_pte_alloc_one_fast(NULL, 0)), freed++;
} while (pgtable_cache_size > low);
}
return freed;
BTFIXUPSET_INT(pte_modify_mask, _SUN4C_PAGE_CHG_MASK);
BTFIXUPSET_CALL(pmd_offset, sun4c_pmd_offset, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pte_offset, sun4c_pte_offset, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(pte_free_kernel, sun4c_pte_free_kernel, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(pmd_free_kernel, sun4c_pmd_free_kernel, BTFIXUPCALL_NOP);
- BTFIXUPSET_CALL(pte_alloc_kernel, sun4c_pte_alloc_kernel, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(pmd_alloc_kernel, sun4c_pmd_alloc_kernel, BTFIXUPCALL_RETO0);
- BTFIXUPSET_CALL(pte_free, sun4c_pte_free, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(pte_alloc, sun4c_pte_alloc, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(pmd_free, sun4c_pmd_free, BTFIXUPCALL_NOP);
- BTFIXUPSET_CALL(pmd_alloc, sun4c_pmd_alloc, BTFIXUPCALL_RETO0);
- BTFIXUPSET_CALL(pgd_free, sun4c_pgd_free, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(pgd_alloc, sun4c_pgd_alloc, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(free_pte_fast, sun4c_free_pte_fast, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_alloc_one, sun4c_pte_alloc_one, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_alloc_one_fast, sun4c_pte_alloc_one_fast, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(free_pmd_fast, sun4c_free_pmd_fast, BTFIXUPCALL_NOP);
+ BTFIXUPSET_CALL(pmd_alloc_one_fast, sun4c_pmd_alloc_one_fast, BTFIXUPCALL_RETO0);
+ BTFIXUPSET_CALL(free_pgd_fast, sun4c_free_pgd_fast, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(get_pgd_fast, sun4c_get_pgd_fast, BTFIXUPCALL_NORM);
BTFIXUPSET_HALF(pte_writei, _SUN4C_PAGE_WRITE);
BTFIXUPSET_HALF(pte_dirtyi, _SUN4C_PAGE_MODIFIED);
-# $Id: config.in,v 1.146 2001/06/16 04:15:26 davem Exp $
+# $Id: config.in,v 1.147 2001/07/18 07:52:34 davem Exp $
# For a description of the syntax of this configuration file,
# see the Configure script.
#
endmenu
source drivers/fc4/Config.in
-
-if [ "$CONFIG_PCI" = "y" -a "$CONFIG_EXPERIMENTAL" = "y" ]; then
- mainmenu_option next_comment
- comment 'IEEE 1394 (FireWire) support'
-
- dep_tristate 'IEEE 1394 (FireWire) support (EXPERIMENTAL)' CONFIG_IEEE1394 $CONFIG_PCI
-
- if [ "$CONFIG_IEEE1394" != "n" ]; then
- dep_tristate 'Texas Instruments PCILynx support' CONFIG_IEEE1394_PCILYNX $CONFIG_IEEE1394
- if [ "$CONFIG_IEEE1394_PCILYNX" != "n" ]; then
- bool ' Use PCILynx local RAM' CONFIG_IEEE1394_PCILYNX_LOCALRAM
- bool ' Support for non-IEEE1394 local ports' CONFIG_IEEE1394_PCILYNX_PORTS
- fi
-
- dep_tristate 'OHCI (Open Host Controller Interface) support' CONFIG_IEEE1394_OHCI1394 $CONFIG_IEEE1394
-
- dep_tristate 'Raw IEEE1394 I/O support' CONFIG_IEEE1394_RAWIO $CONFIG_IEEE1394
-
- bool 'Excessive debugging output' CONFIG_IEEE1394_VERBOSEDEBUG
- fi
- endmenu
-fi
+source drivers/message/fusion/Config.in
+source drivers/ieee1394/Config.in
if [ "$CONFIG_NET" = "y" ]; then
mainmenu_option next_comment
#
CONFIG_PARPORT=m
CONFIG_PARPORT_PC=m
+CONFIG_PARPORT_PC_CML1=m
+# CONFIG_PARPORT_SERIAL is not set
CONFIG_PARPORT_PC_FIFO=y
# CONFIG_PARPORT_PC_SUPERIO is not set
# CONFIG_PARPORT_AMIGA is not set
# CONFIG_FB_PM2_FIFO_DISCONNECT is not set
CONFIG_FB_PM2_PCI=y
# CONFIG_FB_CYBER2000 is not set
+# CONFIG_FB_PVR2 is not set
+# CONFIG_FB_PVR2_DEBUG is not set
# CONFIG_FB_E1355 is not set
# CONFIG_FB_MATROX is not set
CONFIG_FB_ATY=y
CONFIG_SCSI_PLUTO=m
CONFIG_SCSI_FCAL=m
+#
+# Fusion MPT device support
+#
+CONFIG_FUSION=m
+# CONFIG_FUSION_BOOT is not set
+
+#
+# (ability to boot linux kernel from Fusion device is DISABLED!)
+#
+CONFIG_FUSION_ISENSE=m
+CONFIG_FUSION_CTL=m
+CONFIG_FUSION_LAN=m
+CONFIG_NET_FC=y
+
#
# IEEE 1394 (FireWire) support
#
# CONFIG_8139TOO_PIO is not set
# CONFIG_8139TOO_TUNE_TWISTER is not set
# CONFIG_8139TOO_8129 is not set
-# CONFIG_SIS900 is not set
+CONFIG_SIS900=m
CONFIG_EPIC100=m
-# CONFIG_SUNDANCE is not set
+CONFIG_SUNDANCE=m
# CONFIG_TLAN is not set
CONFIG_VIA_RHINE=m
CONFIG_WINBOND_840=m
#
CONFIG_ACENIC=m
# CONFIG_ACENIC_OMIT_TIGON_I is not set
+CONFIG_DL2K=m
CONFIG_MYRI_SBUS=m
-# CONFIG_HAMACHI is not set
-# CONFIG_YELLOWFIN is not set
+CONFIG_HAMACHI=m
+CONFIG_YELLOWFIN=m
CONFIG_SK98LIN=m
+# CONFIG_TIGON3 is not set
CONFIG_FDDI=y
# CONFIG_DEFXX is not set
CONFIG_SKFP=m
# Token Ring devices
#
# CONFIG_TR is not set
-# CONFIG_NET_FC is not set
+CONFIG_NET_FC=y
+# CONFIG_IPHASE5526 is not set
# CONFIG_RCPCI is not set
# CONFIG_SHAPER is not set
CONFIG_ISO9660_FS=m
CONFIG_JOLIET=y
CONFIG_MINIX_FS=m
-CONFIG_VXFS_FS=m
+# CONFIG_VXFS_FS is not set
# CONFIG_NTFS_FS is not set
# CONFIG_NTFS_RW is not set
CONFIG_HPFS_FS=m
CONFIG_ROMFS_FS=m
CONFIG_EXT2_FS=y
CONFIG_SYSV_FS=m
-CONFIG_SYSV_FS_WRITE=y
CONFIG_UDF_FS=m
CONFIG_UDF_RW=y
CONFIG_UFS_FS=m
# CONFIG_SOUND_MSNDCLAS is not set
# CONFIG_SOUND_MSNDPIN is not set
# CONFIG_SOUND_VIA82CXXX is not set
+# CONFIG_MIDI_VIA82CXXX is not set
# CONFIG_SOUND_OSS is not set
# CONFIG_SOUND_TVMIXER is not set
* Currently only a stub-function.
*
* Note that setuid/setgid files won't make a core-dump if the uid/gid
- * changed due to the set[u|g]id. It's enforced by the "current->dumpable"
+ * changed due to the set[u|g]id. It's enforced by the "current->mm->dumpable"
* field, which also makes sure the core-dumps won't be recursive if the
* dumping of the process results in another error..
*/
pt_error_return(regs, EPERM);
goto out_tsk;
}
- if ((!child->dumpable ||
+ if ((!child->mm->dumpable ||
(current->uid != child->euid) ||
(current->uid != child->uid) ||
(current->uid != child->suid) ||
if (__rq->elevator_sequence-- <= 0)
break;
- if (__rq->sem)
+ if (__rq->waiting)
continue;
if (__rq->rq_dev != bh->b_rdev)
continue;
continue;
if (__rq->nr_sectors + count > max_sectors)
continue;
- if (__rq->sem)
+ if (__rq->waiting)
continue;
if (__rq->sector + __rq->nr_sectors == bh->b_rsector) {
*req = __rq;
extern int atmdev_init(void);
extern int i2o_init(void);
extern int cpqarray_init(void);
-extern void ieee1394_init(void);
int __init device_init(void)
{
/* This has to be done before scsi_dev_init */
soc_probe();
#endif
-#ifdef CONFIG_IEEE1394
- ieee1394_init();
-#endif
#ifdef CONFIG_BLK_CPQ_DA
cpqarray_init();
#endif
#include <linux/swap.h>
#include <linux/init.h>
#include <linux/smp_lock.h>
+#include <linux/completion.h>
#include <asm/system.h>
#include <asm/io.h>
if (req->cmd != next->cmd
|| req->rq_dev != next->rq_dev
|| req->nr_sectors + next->nr_sectors > max_sectors
- || next->sem)
+ || next->waiting)
return;
/*
* If we are not allowed to merge these requests, then
req->nr_segments = 1; /* Always 1 for a new request. */
req->nr_hw_segments = 1; /* Always 1 for a new request. */
req->buffer = bh->b_data;
- req->sem = NULL;
+ req->waiting = NULL;
req->bh = bh;
req->bhtail = bh;
req->rq_dev = bh->b_rdev;
void end_that_request_last(struct request *req)
{
- if (req->sem != NULL)
- up(req->sem);
+ if (req->waiting != NULL)
+ complete(req->waiting);
blkdev_release_request(req);
}
*/
req->errors = 0;
blkdev_dequeue_request(req);
- req->sem = NULL;
+ req->waiting = NULL;
ireq = i2ob_queues[dev->unit]->i2ob_qhead;
i2ob_queues[dev->unit]->i2ob_qhead = ireq->next;
#include <linux/errno.h>
#include <linux/cdrom.h>
#include <linux/ide.h>
+#include <linux/completion.h>
#include <asm/irq.h>
#include <asm/io.h>
}
static void cdrom_queue_request_sense(ide_drive_t *drive,
- struct semaphore *sem,
+ struct completion *wait,
struct request_sense *sense,
struct packet_command *failed_command)
{
ide_init_drive_cmd(rq);
rq->cmd = REQUEST_SENSE_COMMAND;
rq->buffer = (char *) pc;
- rq->sem = sem;
+ rq->waiting = wait;
(void) ide_do_drive_cmd(drive, rq, ide_preempt);
}
} else if (rq->cmd == PACKET_COMMAND) {
/* All other functions, except for READ. */
- struct semaphore *sem = NULL;
+ struct completion *wait = NULL;
pc = (struct packet_command *) rq->buffer;
/* Check for tray open. */
command request to the request sense request. */
if ((stat & ERR_STAT) != 0) {
- sem = rq->sem;
- rq->sem = NULL;
+ wait = rq->waiting;
+ rq->waiting = NULL;
}
pc->stat = 1;
cdrom_end_request (1, drive);
if ((stat & ERR_STAT) != 0)
- cdrom_queue_request_sense(drive, sem, pc->sense, pc);
+ cdrom_queue_request_sense(drive, wait, pc->sense, pc);
} else {
/* Handle errors from READ and WRITE requests. */
#include <linux/delay.h>
#include <linux/ide.h>
#include <linux/devfs_fs_kernel.h>
+#include <linux/completion.h>
#include <asm/byteorder.h>
#include <asm/irq.h>
ide_hwgroup_t *hwgroup = HWGROUP(drive);
unsigned int major = HWIF(drive)->major;
struct list_head *queue_head = &drive->queue.queue_head;
- DECLARE_MUTEX_LOCKED(sem);
+ DECLARE_COMPLETION(wait);
#ifdef CONFIG_BLK_DEV_PDC4030
if (HWIF(drive)->chipset == ide_pdc4030 && rq->buffer != NULL)
rq->rq_status = RQ_ACTIVE;
rq->rq_dev = MKDEV(major,(drive->select.b.unit)<<PARTN_BITS);
if (action == ide_wait)
- rq->sem = &sem;
+ rq->waiting = &wait;
spin_lock_irqsave(&io_request_lock, flags);
if (list_empty(queue_head) || action == ide_preempt) {
if (action == ide_preempt)
ide_do_request(hwgroup, 0);
spin_unlock_irqrestore(&io_request_lock, flags);
if (action == ide_wait) {
- down(&sem); /* wait for it to be serviced */
+ wait_for_completion(&wait); /* wait for it to be serviced */
return rq->errors ? -EIO : 0; /* return -EIO if errors */
}
return 0;
if [ "$CONFIG_PCI" = "y" -a "$CONFIG_EXPERIMENTAL" = "y" ]; then
mainmenu_option next_comment
- comment 'IEEE 1394 (FireWire) support'
+ comment 'IEEE 1394 (FireWire) support (EXPERIMENTAL)'
dep_tristate 'IEEE 1394 (FireWire) support (EXPERIMENTAL)' CONFIG_IEEE1394 $CONFIG_PCI
if [ "$CONFIG_IEEE1394" != "n" ]; then
- dep_tristate 'Texas Instruments PCILynx support' CONFIG_IEEE1394_PCILYNX $CONFIG_IEEE1394
+ comment "Device Drivers"
+ dep_tristate ' Texas Instruments PCILynx support' CONFIG_IEEE1394_PCILYNX $CONFIG_IEEE1394
if [ "$CONFIG_IEEE1394_PCILYNX" != "n" ]; then
- bool ' Use PCILynx local RAM' CONFIG_IEEE1394_PCILYNX_LOCALRAM
- bool ' Support for non-IEEE1394 local ports' CONFIG_IEEE1394_PCILYNX_PORTS
+ bool ' Use PCILynx local RAM' CONFIG_IEEE1394_PCILYNX_LOCALRAM
+ bool ' Support for non-IEEE1394 local ports' CONFIG_IEEE1394_PCILYNX_PORTS
fi
+ dep_tristate ' OHCI-1394 support' CONFIG_IEEE1394_OHCI1394 $CONFIG_IEEE1394
-# this driver is unsupported now:
-# dep_tristate 'Adaptec AIC-5800 (AHA-89xx) support' CONFIG_IEEE1394_AIC5800 $CONFIG_IEEE1394
-
- dep_tristate 'OHCI (Open Host Controller Interface) support' CONFIG_IEEE1394_OHCI1394 $CONFIG_IEEE1394
- dep_tristate 'Video1394 support' CONFIG_IEEE1394_VIDEO1394 $CONFIG_IEEE1394_OHCI1394
-
- dep_tristate 'Raw IEEE1394 I/O support' CONFIG_IEEE1394_RAWIO $CONFIG_IEEE1394
+ comment "Protocol Drivers"
+ dep_tristate ' OHCI-1394 Video support' CONFIG_IEEE1394_VIDEO1394 $CONFIG_IEEE1394_OHCI1394
+ dep_tristate ' SBP-2 support (Harddisks etc.)' CONFIG_IEEE1394_SBP2 $CONFIG_SCSI $CONFIG_IEEE1394
+ dep_tristate ' Raw IEEE1394 I/O support' CONFIG_IEEE1394_RAWIO $CONFIG_IEEE1394
bool 'Excessive debugging output' CONFIG_IEEE1394_VERBOSEDEBUG
fi
#
# Makefile for the Linux IEEE 1394 implementation
#
-# Note! Dependencies are done automagically by 'make dep', which also
-# removes any old dependencies. DON'T put your own dependencies here
-# unless it's something special (not a .c file).
-#
-# Note 2! The CFLAGS definitions are now in the main makefile.
-#
O_TARGET := ieee1394drv.o
obj-$(CONFIG_IEEE1394) += ieee1394.o
obj-$(CONFIG_IEEE1394_PCILYNX) += pcilynx.o
-obj-$(CONFIG_IEEE1394_AIC5800) += aic5800.o
obj-$(CONFIG_IEEE1394_OHCI1394) += ohci1394.o
obj-$(CONFIG_IEEE1394_VIDEO1394) += video1394.o
obj-$(CONFIG_IEEE1394_RAWIO) += raw1394.o
+obj-$(CONFIG_IEEE1394_SBP2) += sbp2.o
+obj-$(CONFIG_IEEE1394_ETH1394) += eth1394.o
+obj-$(CONFIG_IEEE1394_IP1394) += ip1394.o
include $(TOPDIR)/Rules.make
+++ /dev/null
-/*
- * +++ THIS DRIVER IS ORPHANED AND UNSUPPORTED +++
- *
- * aic5800.c - Adaptec AIC-5800 PCI-IEEE1394 chip driver
- * Copyright (C)1999 Emanuel Pirker <epirker@edu.uni-klu.ac.at>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/wait.h>
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/fs.h>
-#include <linux/poll.h>
-#include <linux/delay.h>
-#include <asm/byteorder.h>
-#include <asm/atomic.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-
-#include "ieee1394_types.h"
-#include "hosts.h"
-#include "ieee1394_core.h"
-#include "ieee1394.h"
-#include "aic5800.h"
-
-
-
-/// print general (card independent) information
-#define PRINT_G(level, fmt, args...) printk(level "aic5800: " fmt "\n" , ## args)
-/// print card specific information
-#define PRINT(level, card, fmt, args...) printk(level "aic5800-%d: " fmt "\n" , card , ## args)
-
-/// card array
-static struct aic5800 cards[MAX_AIC5800_CARDS];
-/// holds the number of installed aic5800 cards
-static int num_of_cards = 0;
-
-static int add_card(struct pci_dev *dev);
-static void remove_card(struct aic5800 *aic);
-static int init_driver(void);
-
-
-/*****************************************************************
- * Auxiliary functions needed to read the EEPROM
- * Daniel Minitti
- *****************************************************************/
-#define SEEPDOUT 0x1
-#define SEEPDIN 0x02
-#define SEEPSK 0x04
-#define SEEPCS 0x08
-#define SEEPCYC 0x10
-#define SEEPBUSY 0x20
-
-#define CLOCK_PULSE() {\
- int cnt=200;\
- while(cnt-->0 && reg_read(aic, misc_SEEPCTL) & SEEPBUSY);\
- if (reg_read(aic, misc_SEEPCTL) & SEEPBUSY) printk("BUSY ");\
- }
-
-static inline unsigned short read_seeprom_word(struct aic5800 *aic,
- int offset)
-{
- int i;
- unsigned char temp;
- unsigned char read_cmd[3] = {1,1,0};
- unsigned short rd;
-
- // send chip select for one clock cycle.
- reg_write(aic, misc_SEEPCTL, SEEPSK|SEEPCS);
- CLOCK_PULSE();
-
- // write start bit (1) & READ op-code (10b)
- for (i=0; i<sizeof(read_cmd); i++) {
- temp = SEEPCS | SEEPCYC | read_cmd[i];
- reg_write(aic, misc_SEEPCTL, temp);
- CLOCK_PULSE();
- temp = temp ^ SEEPSK;
- reg_write(aic, misc_SEEPCTL, temp);
- CLOCK_PULSE();
- }
- // write 8 bit address (MSB --> LSB)
- for (i=7; i>=0; i--) {
- temp = offset;
- temp = (temp >> i) & 1;
- temp = SEEPCS | SEEPCYC | temp;
- reg_write(aic, misc_SEEPCTL, temp);
- CLOCK_PULSE();
- temp = temp ^ SEEPSK;
- reg_write(aic, misc_SEEPCTL, temp);
- CLOCK_PULSE();
- }
- // read 16 bit (MSB --> LSB)
- rd = 0;
- for (i=0; i<=16; i++) {
- temp = SEEPCS | SEEPCYC;
- reg_write(aic, misc_SEEPCTL, temp);
- CLOCK_PULSE();
- temp = temp ^ SEEPSK;
- rd = (rd << 1) | (unsigned short)((reg_read(aic, misc_SEEPCTL)
-& SEEPDIN)>>1);
- reg_write(aic, misc_SEEPCTL, temp);
- CLOCK_PULSE();
- }
-
- // reset chip select for the next command cycle
- reg_write(aic, misc_SEEPCTL, SEEPCYC);
- CLOCK_PULSE();
- reg_write(aic, misc_SEEPCTL, SEEPCYC | SEEPSK);
- CLOCK_PULSE();
- reg_write(aic, misc_SEEPCTL, SEEPCYC);
- CLOCK_PULSE();
-
- reg_write(aic, misc_SEEPCTL, 0);
- CLOCK_PULSE();
-
- return rd;
-}
-
-#undef DEBUG_SEEPROM
-
-/** Read 64-bit GUID (Global Unique ID) from SEEPROM
- *
- * It works well on AHA-8945.
- * On AHA-8920 it works well only on first time, It returns ffff... on
- * the other times.
- *****************************************************************/
-static unsigned long long read_guid(struct aic5800 *aic)
-{
- int i;
- unsigned long long guid;
-
-#ifdef DEBUG_SEEPROM
- printk("\n");
- printk("SEEPCTL value = 0x%x\n", reg_read(aic, misc_SEEPCTL));
-#endif
-
- /* read GUID */
- guid = 0;
- for (i=0x10; i<0x14; i++)
- guid = (guid << 16) | read_seeprom_word(aic,i);
-
-#ifdef DEBUG_SEEPROM
- for (i=0; i<3; i++)
- printk("%x ", (unsigned int) read_seeprom_word(aic,i));
- printk("\nGUID = ");
- for (i=3; i>=0; i--)
- printk("%x ", (unsigned int)(guid>>(16*i))&0xffff);
-
- printk("\nSEEPCTL value = 0x%x\n", reg_read(aic, misc_SEEPCTL));
-#endif
- return guid;
-}
-
-#undef CLOCK_PULSE()
-
-static int aic_detect(struct hpsb_host_template *tmpl)
-{
- struct hpsb_host *host;
- int i;
-
- init_driver();
-
- for (i = 0; i < num_of_cards; i++) {
- host = hpsb_get_host(tmpl, 0);
- if (host == NULL) {
- /* simply don't init more after out of mem */
- return i;
- }
- host->hostdata = &cards[i];
- cards[i].host = host;
- }
-
- return num_of_cards;
-}
-
-static int aic_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
-{
- struct aic5800 *aic = host->hostdata;
- int retval = 0;
- unsigned long flags;
- struct hpsb_packet *packet, *lastpacket;
-
- switch (cmd) {
- case RESET_BUS:
- reg_write(aic, misc_PhyControl, 0x00004140 );
- break;
-
- case GET_CYCLE_COUNTER:
- arg = reg_read(aic, misc_CycleTimer);
- break;
-
- case SET_CYCLE_COUNTER:
- reg_write(aic, misc_CycleTimer, arg);
- break;
-
- case SET_BUS_ID:
- reg_clear_bits(aic, misc_NodeID, 0xFFC0);
- reg_set_bits(aic, misc_NodeID, (arg<<6));
- break;
-
- case ACT_CYCLE_MASTER:
- if (arg) {
- /* enable cycleMaster */
- reg_set_bits(aic, misc_Control, 0x20000);
- } else {
- /* disable cycleMaster */
- reg_clear_bits(aic, misc_Control, 0x20000);
- };
- break;
-
- case CANCEL_REQUESTS:
- spin_lock_irqsave(&aic->async_queue_lock, flags);
- /* stop any chip activity */
- reg_write( aic, AT_ChannelControl, 0x80000000);
- packet = aic->async_queue;
- aic->async_queue = NULL;
- spin_unlock_irqrestore(&aic->async_queue_lock, flags);
-
- while (packet != NULL) {
- lastpacket = packet;
- packet = packet->xnext;
- hpsb_packet_sent(host, lastpacket, ACKX_ABORTED);
- }
-
- break;
-
- case MODIFY_USAGE:
- if (arg) {
- MOD_INC_USE_COUNT;
- } else {
- MOD_DEC_USE_COUNT;
- }
- break;
-
-#if 0
- case DEBUG_DUMPINFO:
- PRINT(KERN_INFO, aic->id, AIC5800_DRIVER_NAME);
- PRINT(KERN_INFO, aic->id, " Register MMIO base: 0x%p\n",
- aic->registers);
- PRINT(KERN_INFO, aic->id, " NodeID: 0x%x\n",
- reg_read(aic, misc_NodeID) );
- PRINT(KERN_INFO,aic->id, " #Intr: %lu BusResets: %lu\n",
- aic->NumInterrupts, aic->NumBusResets);
- PRINT(KERN_INFO, aic->id, " TxPackets: %lu RxPackets: %lu\n",
- aic->TxPackets, aic->RxPackets);
- PRINT(KERN_INFO,aic->id, " TxRdy: %lu ATErr: %lu HdrErr: %lu TcodeErr: %lu SendRej: %lu\n",
- aic->TxRdy, aic->ATError, aic->HdrErr,
- aic->TCodeErr, aic->SendRej);
- break;
-#endif
-
- default:
- PRINT(KERN_ERR, aic->id, "unknown devctl command %d", cmd);
- retval = -1;
- }
-
- return retval;
-
-}
-
-/** Initialize the host adapter chip and corresponding data
- structures. We reset the chip, enable transmitter, receiver,
- the physical DMA units, cycle timer, cycle source, reception
- of selfid packets and initialize several other registers. */
-static int aic_initialize(struct hpsb_host *host)
-{
- int i;
- struct aic5800 *aic = host->hostdata;
-
- /* Reset data structures */
- aic->async_queue = NULL;
- spin_lock_init(&aic->async_queue_lock);
-
- /* Reset the chip */
- reg_write( aic, misc_Reset, 0x37);
- udelay(10); // FIXME
- reg_write( aic, misc_Reset, 0);
-
- /* Enable Transmitter/Receiver, enable physDMA,
- * enable CycleTimer, cycleSource */
- reg_write( aic, misc_Control, 0x82050003);
-
- /* Enable reception of SelfID packets */
- reg_set_bits(aic, misc_PacketControl, 0x20);
-
- reg_write(aic, AT_InterruptSelect, 0x00F0001);
- reg_write(aic, AT_BranchSelect, 0x0100010);
- reg_write(aic, AT_WaitSelect, 0x00F0001);
- reg_write(aic, misc_ATRetries, reg_read(aic, misc_ATRetries) | 0x7);
-
- /* initialize AR DMA */
-
- /* unset run bit */
- reg_write( aic, AR_ChannelControl, 0x80000000);
-
- /* here we should have 0 iterations because of the code
- in the DmaAR handler. However, to be sure we do it */
- i = 0;
- while (reg_read(aic, AR_ChannelStatus) & 0x400) {
- i++;
- if (i>100000) {
- PRINT(KERN_ERR, aic->id,
- "Huh! Can't set AR_ChannelControl... card can not receive!");
- break;
- }
- }
-
- (aic->AR_program)->control = ( DMA_CMD_INPUTLAST | DMA_KEY_STREAM0
- | DMA_INTR_ALWAYS | DMA_BRANCH_ALWAYS)
- + AIC5800_ARFIFO_SIZE;
- (aic->AR_program)->address = virt_to_bus(aic->rcv_page);
- (aic->AR_program)->branchAddress = virt_to_bus(aic->AR_program);
- (aic->AR_program)->status = AIC5800_ARFIFO_SIZE;
-
- (aic->AR_program+1)->control = DMA_CMD_STOP;
- (aic->AR_program+1)->address = 0;
- (aic->AR_program+1)->branchAddress = 0;
- (aic->AR_program+1)->status = 0;
-
- reg_write( aic, AR_CommandPtr, (u32) virt_to_bus(aic->AR_program));
- reg_write( aic, AR_ChannelControl, 0x80008000);
-
- /* Enable Interrupts */
- reg_write(aic, misc_InterruptClear, 0xFFFFFFFF);
- reg_write(aic, misc_InterruptMask, 0xFFFFFFFF);
- /*reg_write(aic, misc_InterruptMask, 0x00F1F03F);*/
-
- return 1;
-}
-
-static void aic_release(struct hpsb_host *host)
-{
- struct aic5800 *aic;
-
- if (host != NULL) {
- aic = host->hostdata;
- remove_card(aic);
- }
-}
-
-/* This must be called with the async_queue_lock held. */
-static void send_next_async(struct aic5800 *aic)
-{
- int i;
- struct hpsb_packet *packet = aic->async_queue;
-
- /* stop the channel program if it's still running */
- reg_write( aic, AT_ChannelControl, 0x80000000);
-
- /* re-format packet header for AIC-5800 chip */
- packet->header[1] = (packet->header[1] & 0xFFFF) |
- (packet->header[0] & 0xFFFF0000);
- packet->header[0] = (packet->header[0] & 0xFFFF);
-
-#ifndef __BIG_ENDIAN
- /* Packet must be byte-swapped in non-big-endian environments,
- * see AIC-5800 specification...
- */
- { u32 i;
- for ( i = 0 ; i < packet->header_size/sizeof(u32) ; i++ )
- packet->header[i] = cpu_to_be32( packet->header[i] );
- for ( i = 0 ; i < packet->data_size/sizeof(u32) ; i++ )
- packet->data[i] = cpu_to_be32( packet->data[i] );
- }
-
-#endif
-
- /* typically we use only a few iterations here */
- i = 0;
- while (reg_read(aic, AT_ChannelStatus) & 0x400) {
- i++;
- if (i>5000) {
- PRINT(KERN_ERR, aic->id,
- "runaway loop 1 in send_next_async() - bailing out...");
- break;
- };
- };
-
- /* set data buffer address and packet length */
- memset(aic->AT_program, 0, MAX_AT_PROGRAM_SIZE * sizeof(struct dma_cmd));
-
- if (packet->data_size) {
- aic->AT_program[0].control = ( DMA_CMD_OUTPUTMORE | DMA_KEY_STREAM0 ) +
- packet -> header_size;
- aic->AT_program[0].address = virt_to_bus( packet->header );
- aic->AT_program[1].control = ( DMA_CMD_OUTPUTLAST | DMA_KEY_STREAM0
- | DMA_INTR_ALWAYS )
- + packet -> data_size;
- aic->AT_program[1].address = virt_to_bus( packet->data );
-
- aic->AT_program[2].control = DMA_CMD_STOP;
-
- } else {
- aic->AT_program[0].control = ( DMA_CMD_OUTPUTLAST | DMA_INTR_ALWAYS |
- DMA_KEY_STREAM0 ) +
- packet -> header_size;
- aic->AT_program[0].address = virt_to_bus( packet->header );
-
- aic->AT_program[1].control = DMA_CMD_STOP;
- };
-
- /* set program start address */
- reg_write(aic, AT_CommandPtr, (unsigned int) virt_to_bus(aic->AT_program));
-
- /* typically we use only a few iterations here */
- i = 0;
- while (reg_read(aic, AT_CommandPtr) != (unsigned int)
- virt_to_bus(aic->AT_program)) {
- i++;
- if (i>5000) {
- PRINT(KERN_ERR, aic->id,
- "runaway loop 2 in send_next_async() - bailing out...");
- break;
- };
- };
-
- /* run program */
- reg_write( aic, AT_ChannelControl, 0x80008000);
-}
-
-
-static int aic_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
-{
- struct aic5800 *aic = host->hostdata;
- struct hpsb_packet *p;
- unsigned long flags;
-
- if (packet->data_size >= 4096) {
- PRINT(KERN_ERR, aic->id, "transmit packet data too big (%d)",
- packet->data_size);
- return 0;
- }
-
- packet->xnext = NULL;
-
- spin_lock_irqsave(&aic->async_queue_lock, flags);
-
- if (aic->async_queue == NULL) {
- aic->async_queue = packet;
- send_next_async(aic);
- } else {
- p = aic->async_queue;
- while (p->xnext != NULL) {
- p = p->xnext;
- }
-
- p->xnext = packet;
- }
-
- spin_unlock_irqrestore(&aic->async_queue_lock, flags);
-
- return 1;
-}
-
-static int get_phy_reg(struct aic5800 *aic, int addr)
-{
- int retval;
- int i = 0;
-
- /* sanity check */
- if (addr > 15) {
- PRINT(KERN_ERR, aic->id, __FUNCTION__
- ": PHY register address %d out of range", addr);
- return -1;
- }
-
- /* request data from PHY */
- reg_write(aic, misc_PhyControl, LINK_PHY_READ | LINK_PHY_ADDR(addr));
-
- /* read data from PhyControl register */
- /* note that we have to wait until the register is updated */
- do {
- retval = reg_read(aic, misc_PhyControl);
-
- if (i > 10000) {
- PRINT(KERN_ERR, aic->id, __FUNCTION__
- ": runaway loop, aborting");
- retval = -1;
- break;
- }
- i++;
- } while ((retval & 0xf000000) != LINK_PHY_RADDR(addr));
-
- /* we don't want a PhyInt interrupt */
- reg_write(aic, misc_InterruptClear, INT_PhyInt);
-
- if (retval != -1) {
- return ((retval & 0xff0000)>>16);
- } else {
- return -1;
- }
-}
-
-static quadlet_t generate_own_selfid(struct aic5800 *aic, int phyid)
-{
- quadlet_t lsid;
- char phyreg[7];
- int i;
-
- for (i = 1; i < 7; i++) {
- phyreg[i] = get_phy_reg(aic, i);
- }
-
- /* Standard PHY register map */
- lsid = 0x80400000 | (phyid << 24);
- lsid |= (phyreg[1] & 0x3f) << 16; /* gap count */
- lsid |= (phyreg[2] & 0xc0) << 8; /* max speed */
- lsid |= (phyreg[6] & 0x01) << 11; /* contender (phy dep) */
- lsid |= (phyreg[6] & 0x10) >> 3; /* initiated reset */
-
- for (i = 0; i < (phyreg[2] & 0x1f); i++) { /* ports */
- if (phyreg[3 + i] & 0x4) {
- lsid |= (((phyreg[3 + i] & 0x8) | 0x10) >> 3)
- << (6 - i*2);
- } else {
- lsid |= 1 << (6 - i*2);
- }
- }
-
- return lsid;
-};
-
-/* moved out to make interrupt routine more readable */
-inline static void handle_selfid(struct aic5800 *aic, struct hpsb_host *host,
- int phyid, int isroot, size_t size)
-{
- quadlet_t *q = aic->rcv_page;
- quadlet_t lsid;
-
- /* we need our own self-id packet */
- lsid = generate_own_selfid(aic, phyid);
-
- /* unconnected state? only begin and end marker in rcv_page */
- if (size==8) {
- hpsb_selfid_received(host, lsid);
- }
-
- /* process buffer... AIC's FIFO often contains some strangenesses */
- while (size > 0) {
- if (q[0] == 0xe0) {
- /* marker */
- q += 1;
- size -= 4;
- continue;
- };
- if (q[0] == 0x1) {
- /* marker */
- q += 1;
- size -= 4;
- break;
- };
-
- if (q[0] == ~q[1]) {
- /* correct self-id */
-
- if ((q[0] & 0x3f800000) == ((phyid + 1) << 24)) {
- /* its our turn now! */
- //PRINT(KERN_INFO,
- // aic->id, "selfid packet 0x%x included", lsid);
-
- hpsb_selfid_received(host, lsid);
- }
-
- //PRINT(KERN_INFO, aic->id, "selfid packet 0x%x rcvd", q[0]);
- hpsb_selfid_received(host, q[0]);
- q += 2;
- size -= 8;
- continue;
- };
- }
-
- /* if we are root, our self-id packet is last */
- if (isroot && phyid != 0) {
- hpsb_selfid_received(host, lsid);
- }
-
- hpsb_selfid_complete(host, phyid, isroot);
-}
-
-static void aic_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
-{
- struct aic5800 *aic = (struct aic5800 *)dev_id;
- struct hpsb_host *host = aic->host;
- quadlet_t *q = aic->rcv_page;
-
- int phyid = -1, isroot = 0;
-
- u32 interruptEvent = reg_read(aic, misc_InterruptEvents);
- reg_write(aic, misc_InterruptClear, interruptEvent);
-
- //printk("InterruptEvent 0x%x\n", interruptEvent);
- if ( (interruptEvent & 0x3f) == 0x3f ) {
- PRINT(KERN_INFO, aic->id, "Dma Engine Error");
- };
-
- if ( interruptEvent & INT_DmaAT ) {
- if (aic->AT_program[0].status & 0xFFFF)
- PRINT(KERN_INFO, aic->id, "AT: could not transfer %d bytes",
- aic->AT_program[0].status & 0xFFFF);
- };
-
- if ( interruptEvent & INT_PhyInt) {
- PRINT(KERN_INFO, aic->id, "PhyInt");
- };
-
- if ( interruptEvent & INT_DmaAR ) {
- int rcv_bytes;
- int i;
-
- /* we calculate the number of received bytes from the
- residual count field */
- rcv_bytes = AIC5800_ARFIFO_SIZE - (aic->AR_program->status & 0xFFFF);
-
- //PRINT(KERN_INFO, aic->id, "AR_status 0x%x, %d bytes read", aic->AR_program->status, rcv_bytes);
-
- if ((aic->AR_program->status & 0x84000000)
- && (aic->AR_program->status & 0xFFFF) >= 8 ) {
-
-#ifndef __BIG_ENDIAN
- /* we have to do byte-swapping on non-bigendian architectures */
- for (i=0; i< (rcv_bytes / sizeof(quadlet_t)); i++) {
- *q = be32_to_cpu(*q);
- q++;
- };
- q = aic->rcv_page;
-#endif
-
- if (*q == 0xe0) {
- phyid = reg_read(aic, misc_NodeID);
- isroot = phyid & 0x800000;
- phyid = phyid & 0x3F;
- handle_selfid(aic, host, phyid, isroot, rcv_bytes);
- } else {
- hpsb_packet_received(host, aic->rcv_page, rcv_bytes, 0);
- };
- } else {
- PRINT(KERN_ERR, aic->id,
- "AR DMA program status value 0x%x is incorrect!",
- aic->AR_program->status);
- };
- }
- if ( interruptEvent & INT_BusReset ) {
- PRINT(KERN_INFO, aic->id, "bus reset occurred");
- if (!host->in_bus_reset) {
- hpsb_bus_reset(host);
- }
- reg_set_bits(aic, misc_Control, 0x1);
- aic->NumBusResets++;
- };
-
- if (interruptEvent & INT_RcvData ) {
- aic->RxPackets++;
- };
-
- if (interruptEvent & INT_TxRdy) {
- /* async packet sent - transmitter ready */
- u32 ack;
- struct hpsb_packet *packet;
-
- if (aic->async_queue) {
-
- spin_lock(&aic->async_queue_lock);
-
-
- ack = reg_read(aic, AT_ChannelStatus) & 0xF;
-
- packet = aic->async_queue;
- aic->async_queue = packet->xnext;
-
- if (aic->async_queue != NULL) {
- send_next_async(aic);
- }
- spin_unlock(&aic->async_queue_lock);
- PRINT(KERN_INFO,aic->id,"packet sent with ack code %d",ack);
- hpsb_packet_sent(host, packet, ack);
- } // else
- //PRINT(KERN_INFO,aic->id,"packet sent without async_queue (self-id?)");
-
- aic->TxRdy++;
- };
- if (interruptEvent & INT_ATError ) {
- PRINT(KERN_INFO,aic->id,"ATError");
- aic->ATError++;
- };
- if (interruptEvent & INT_SendRej ) {
- aic->SendRej++;
- };
- if (interruptEvent & INT_HdrErr ) {
- aic->HdrErr++;
- };
- if (interruptEvent & INT_TCodeErr ) {
- PRINT(KERN_INFO,aic->id,"TCodeErr");
- aic->TCodeErr++;
- };
-
- aic->NumInterrupts++;
-
-}
-
-inline static void * quadquadalign(void *buf)
-{
- if ((unsigned int) buf % 0x10 != 0) {
- return (void *)(((unsigned int)buf + 0x10) & 0xFFFFFFF0);
- } else {
- return buf;
- };
-}
-
-static int add_card(struct pci_dev *dev)
-{
-#define FAIL(fmt, args...) do {\
- PRINT_G(KERN_ERR, fmt , ## args); \
- num_of_cards--; \
- remove_card(aic); \
- return 1; } while (0)
-
- struct aic5800 *aic; /* shortcut to currently handled device */
- unsigned long page;
-
- if (pci_enable_device(dev))
- return 1;
-
- if (num_of_cards == MAX_AIC5800_CARDS) {
- PRINT_G(KERN_WARNING, "cannot handle more than %d cards. "
- "Adjust MAX_AIC5800_CARDS in aic5800.h.",
- MAX_AIC5800_CARDS);
- return 1;
- }
-
- aic = &cards[num_of_cards++];
-
- aic->id = num_of_cards-1;
- aic->dev = dev;
-
- if (!request_irq(dev->irq, aic_irq_handler, SA_SHIRQ,
- AIC5800_DRIVER_NAME, aic)) {
- PRINT(KERN_INFO, aic->id, "allocated interrupt %d", dev->irq);
- } else {
- FAIL("failed to allocate shared interrupt %d", dev->irq);
- }
-
- page = get_free_page(GFP_KERNEL);
- if (page != 0) {
- aic->rcv_page = phys_to_virt(page);
- } else {
- FAIL("failed to allocate receive buffer");
- }
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,13)
- aic->registers = ioremap_nocache(dev->base_address[0],
- AIC5800_REGSPACE_SIZE);
-#else
- aic->registers = ioremap_nocache(dev->resource[0].start,
- AIC5800_REGSPACE_SIZE);
-#endif
-
- if (aic->registers == NULL) {
- FAIL("failed to remap registers - card not accessible");
- }
-
- PRINT(KERN_INFO, aic->id, "remapped memory space reg 0x%p",
- aic->registers);
-
- aic->pbuf = kmalloc(AIC5800_PBUF_SIZE, GFP_KERNEL);
-
- if (!aic->pbuf) {
- FAIL("failed to allocate program buffer");
- }
-
- aic->AT_program = quadquadalign(aic->pbuf);
- aic->AT_program[2].control = DMA_CMD_STOP;
-
- aic->AR_program = aic->AT_program + MAX_AT_PROGRAM_SIZE *
- sizeof(struct dma_cmd);
-
- return 0;
-#undef FAIL
-}
-
-static void remove_card(struct aic5800 *aic)
-{
- /* Disable interrupts of this controller */
- reg_write(aic, misc_InterruptMask, 0);
- /* Free AR buffer */
- free_page(virt_to_phys(aic->rcv_page));
- /* Free channel program buffer */
- kfree(aic->pbuf);
- /* Free interrupt request */
- free_irq(aic->dev->irq, aic);
- /* Unmap register space */
- iounmap(aic->registers);
-}
-
-static int init_driver()
-{
- struct pci_dev *dev = NULL;
- int success = 0;
-
- if (num_of_cards) {
- PRINT_G(KERN_DEBUG, __PRETTY_FUNCTION__ " called again");
- return 0;
- }
-
- while ((dev = pci_find_device(PCI_VENDOR_ID_ADAPTEC,
- PCI_DEVICE_ID_ADAPTEC_5800, dev))
- != NULL) {
- if (add_card(dev) == 0) {
- success = 1;
- }
- }
-
- if (success == 0) {
- PRINT_G(KERN_WARNING, "no operable AIC-5800 based cards found");
- return -ENXIO;
- }
-
- return 0;
-}
-
-/** Prepare our local CSR ROM. This is done by using the software-stored
- ROM and inserting the GUID read from the EEPROM */
-static size_t get_aic_rom(struct hpsb_host *host, const quadlet_t **ptr)
-{
- struct aic5800 *aic = host -> hostdata;
- u64 guid;
-
- /* Read the GUID from the card's EEPROM and put it into the right
- place in the CONFIG ROM. */
- guid = read_guid(aic);
- aic5800_csr_rom[15] = (u32) (guid >> 32);
- aic5800_csr_rom[16] = (u32) (guid & 0xFFFF);
-
- *ptr = aic5800_csr_rom;
-
- return sizeof(aic5800_csr_rom);
-}
-
-struct hpsb_host_template *get_aic_template(void)
-{
- static struct hpsb_host_template tmpl;
- static int initialized = 0;
-
- if (!initialized) {
- /* Initialize by field names so that a template structure
- * reorganization does not influence this code. */
- tmpl.name = "aic5800";
-
- tmpl.detect_hosts = aic_detect;
- tmpl.initialize_host = aic_initialize;
- tmpl.release_host = aic_release;
- tmpl.get_rom = get_aic_rom;
- tmpl.transmit_packet = aic_transmit;
- tmpl.devctl = aic_devctl;
-
- initialized = 1;
- }
-
- return &tmpl;
-}
-
-#ifdef MODULE
-
-/* EXPORT_NO_SYMBOLS; */
-
-MODULE_AUTHOR("Emanuel Pirker <epirker@edu.uni-klu.ac.at>");
-MODULE_DESCRIPTION("Adaptec AIC-5800 PCI-to-IEEE1394 controller driver");
-MODULE_SUPPORTED_DEVICE("aic5800");
-
-void cleanup_module(void)
-{
- hpsb_unregister_lowlevel(get_aic_template());
- PRINT_G(KERN_INFO, "removed " AIC5800_DRIVER_NAME " module");
-}
-
-int init_module(void)
-{
- if (hpsb_register_lowlevel(get_aic_template())) {
- PRINT_G(KERN_ERR, "registering failed");
- return -ENXIO;
- } else {
- return 0;
- }
-}
-
-#endif /* MODULE */
+++ /dev/null
-/*
-** aic5800.h - Adaptec AIC-5800 PCI-IEEE1394 chip driver header file
-** Copyright (C)1999 Emanuel Pirker <epirker@edu.uni-klu.ac.at>
-**
-** This program is free software; you can redistribute it and/or modify
-** it under the terms of the GNU General Public License as published by
-** the Free Software Foundation; either version 2 of the License, or
-** (at your option) any later version.
-**
-** This program is distributed in the hope that it will be useful,
-** but WITHOUT ANY WARRANTY; without even the implied warranty of
-** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-** GNU General Public License for more details.
-**
-** You should have received a copy of the GNU General Public License
-** along with this program; if not, write to the Free Software
-** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-**
-*/
-
-#ifndef AIC5800_H
-#define AIC5800_H
-
-#define AIC5800_DRIVER_NAME "aic5800"
-
-#define MAX_AIC5800_CARDS 4
-#define AIC5800_REGSPACE_SIZE 512
-#define AIC5800_PBUF_SIZE 512
-
-#define MAX_AT_PROGRAM_SIZE 10
-#define AIC5800_ARFIFO_SIZE 128
-
-struct dma_cmd {
- u32 control;
- u32 address;
- u32 branchAddress;
- u32 status;
-};
-
-struct aic5800 {
- int id; /* sequential card number */
-
- struct pci_dev *dev;
-
- /* remapped memory spaces */
- void *registers;
-
- struct hpsb_host *host;
-
- int phyid, isroot;
-
- void *rcv_page;
- void *pbuf;
-
- struct dma_cmd *AT_program;
- u32 *AT_status;
- struct dma_cmd *AR_program;
- u32 *AR_status;
- int AR_active;
-
- struct hpsb_packet *async_queue;
- spinlock_t async_queue_lock;
-
- unsigned long NumInterrupts, NumBusResets;
- unsigned long TxPackets, RxPackets;
- unsigned long TxErrors, RxErrors;
- unsigned long TxRdy, ATError, HdrErr, TCodeErr, SendRej;
-
-};
-
-
-/*
- * Register read and write helper functions.
- */
-inline static void reg_write(const struct aic5800 *aic, int offset, u32 data)
-{
- writel(data, aic->registers + offset);
-}
-
-inline static u32 reg_read(const struct aic5800 *aic, int offset)
-{
- return readl(aic->registers + offset);
-}
-
-inline static void reg_set_bits(const struct aic5800 *aic, int offset,
- u32 mask)
-{
- reg_write(aic, offset, (reg_read(aic, offset) | mask));
-}
-
-inline static void reg_clear_bits(const struct aic5800 *aic, int offset,
- u32 mask)
-{
- reg_write(aic, offset, (reg_read(aic, offset) & ~mask));
-}
-
-
-/* AIC-5800 Registers */
-
-#define AT_ChannelControl 0x0
-#define AT_ChannelStatus 0x4
-#define AT_CommandPtr 0xC
-#define AT_InterruptSelect 0x10
-#define AT_BranchSelect 0x14
-#define AT_WaitSelect 0x18
-
-/* Asynchronous receive */
-#define AR_ChannelControl 0x20
-#define AR_ChannelStatus 0x24
-#define AR_CommandPtr 0x2C
-
-/* ITA */
-#define ITA_ChannelControl 0x40
-#define ITA_ChannelStatus 0x44
-#define ITA_CommandPtr 0x4C
-
-/* ITB */
-#define ITB_ChannelControl 0x60
-#define ITB_ChannelStatus 0x64
-#define ITB_CommandPtr 0x6C
-
-/* IRA */
-#define IRA_ChannelControl 0x80
-#define IRA_ChannelStatus 0x84
-#define IRA_CommandPtr 0x8C
-
-/* IRB */
-#define IRB_ChannelControl 0xA0
-#define IRB_ChannelStatus 0xA4
-#define IRB_CommandPtr 0xAC
-
-/* miscellaneous */
-#define misc_Version 0x100
-#define misc_Control 0x104
-#define misc_NodeID 0x108
-#define misc_Reset 0x10C
-#define misc_PacketControl 0x110
-#define misc_Diagnostic 0x114
-#define misc_PhyControl 0x118
-#define misc_ATRetries 0x11C
-#define misc_SSNinterface 0x120
-#define misc_CycleTimer 0x124
-
-/* ITA */
-#define ITA_EventCycle 0x130
-#define ITA_Configuration 0x134
-#define ITA_Bandwidth 0x138
-
-/* ITB */
-#define ITB_EventCycle 0x140
-#define ITB_Configuration 0x144
-#define ITB_Bandwidth 0x148
-
-/* IRA */
-#define IRA_EventCycle 0x150
-#define IRA_Configuration 0x154
-
-/* IRB */
-#define IRB_EventCycle 0x160
-#define IRB_Configuration 0x164
-
-/* RSU */
-#define RSU_Enable 0x170
-#define RSU_Interrupt 0x174
-#define RSU_TablePtr 0x178
-#define RSU_InterruptSet 0x17C
-
-/* misc */
-#define misc_InterruptEvents 0x180
-#define misc_InterruptMask 0x184
-#define misc_InterruptClear 0x188
-#define misc_CardBusEvent 0x1E0
-#define misc_CardBusMask 0x1E4
-#define misc_CardBusState 0x1E8
-#define misc_CardBusForce 0x1EC
-#define misc_SEEPCTL 0x1F0
-
-/* Interrupts */
-#define INT_DmaAT 1
-#define INT_DmaAR (1<<1)
-#define INT_DmaITA (1<<2)
-#define INT_DmaITB (1<<3)
-#define INT_DmaIRA (1<<4)
-#define INT_DmaIRB (1<<5)
-#define INT_PERResponse (1<<7)
-#define INT_CycleEventITA (1<<8)
-#define INT_CycleEventITB (1<<9)
-#define INT_CycleEventIRA (1<<10)
-#define INT_CycleEventIRB (1<<11)
-#define INT_BusReset (1<<12)
-#define INT_CmdReset (1<<13)
-#define INT_PhyInt (1<<14)
-#define INT_RcvData (1<<15)
-#define INT_TxRdy (1<<16)
-#define INT_CycleStart (1<<17)
-#define INT_CycleSeconds (1<<18)
-#define INT_CycleLost (1<<19)
-#define INT_ATError (1<<20)
-#define INT_SendRej (1<<21)
-#define INT_HdrErr (1<<22)
-#define INT_TCodeErr (1<<23)
-#define INT_PRQUxferErr (1<<24)
-#define INT_PWQUxferErr (1<<25)
-#define INT_RSUxferErr (1<<26)
-#define INT_RSDone (1<<27)
-#define INT_PSOutOfRetries (1<<28)
-#define INT_cycleTooLong (1<<29)
-
-/* DB DMA constants */
-#define DMA_CMD_OUTPUTMORE 0
-#define DMA_CMD_OUTPUTLAST 0x10000000
-#define DMA_CMD_INPUTMORE 0x20000000
-#define DMA_CMD_INPUTLAST 0x30000000
-#define DMA_CMD_STOREQUAD 0x40000000
-#define DMA_CMD_LOADQUAD 0x50000000
-#define DMA_CMD_NOP 0x60000000
-#define DMA_CMD_STOP 0x70000000
-
-#define DMA_KEY_STREAM0 0
-#define DMA_KEY_STREAM1 (1<<24)
-#define DMA_KEY_STREAM2 (2<<24)
-#define DMA_KEY_STREAM3 (3<<24)
-#define DMA_KEY_REGS (5<<24)
-#define DMA_KEY_SYSTEM (6<<24)
-#define DMA_KEY_DEVICE (7<<24)
-
-#define DMA_INTR_NEVER 0
-#define DMA_INTR_TRUE (1<<20)
-#define DMA_INTR_FALSE (2<<20)
-#define DMA_INTR_ALWAYS (3<<20)
-#define DMA_WAIT_NEVER 0
-#define DMA_WAIT_TRUE (1<<16)
-#define DMA_WAIT_FALSE (2<<16)
-#define DMA_WAIT_ALWAYS (3<<16)
-#define DMA_BRANCH_NEVER 0
-#define DMA_BRANCH_TRUE (1<<18)
-#define DMA_BRANCH_FALSE (2<<18)
-#define DMA_BRANCH_ALWAYS (3<<18)
-
-#define DMA_SPEED_100 0
-#define DMA_SPEED_200 (1<<16)
-#define DMA_SPEED_400 (2<<16)
-
-/* PHY access */
-#define LINK_PHY_READ (1<<15)
-#define LINK_PHY_WRITE (1<<14)
-#define LINK_PHY_ADDR(addr) (addr<<8)
-#define LINK_PHY_WDATA(data) (data)
-#define LINK_PHY_RADDR(addr) (addr<<24)
-
-quadlet_t aic5800_csr_rom[] = {
- /* bus info block */
- 0x041ffb82, // length of bus info block, CRC
- 0x31333934, // 1394 designator
- 0xf005a000, // various capabilites
- 0x0000d189, // node_vendor_id, chip_id_hi
- 0x401010fc, // chip_id_lo
- /* root directory */
- 0x00040e54, // length of root directory, CRC
- 0x030000d1, // module_vendor_id
- 0x0c008000, // various capabilities
- 0x8d000006, // offset of node unique id leaf
- 0xd1000001, // offset of unit directory
- /* unit directory */
- 0x0003e60d, // length of unit directory, CRC
- 0x12000000, // unit_spec_id
- 0x13000000, // unit_sw_version
- 0xd4000004, // offset of unit dependent directory
- /* node unique id leaf */
- 0x00026ba7, // length of leaf, CRC
- 0x0000d189, // node_vendor_id, chip_id_hi
- 0x401010fc, // chip_id_lo
- /* unit dependent directory */
- 0x0002ae47, // length of directory, CRC
- 0x81000002, // offset of vendor name leaf
- 0x82000006, // offset of model name leaf
- /* vendor name leaf */
- 0x000486a3, // length of leaf, CRC
- 0x00000000,
- 0x00000000,
- 0x41444150, // ADAP
- 0x54454300, // TEC
- /* model name leaf */
- 0x0004f420, // length of leaf, CRC
- 0x00000000,
- 0x00000000,
- 0x4148412d, // AHA-
- 0x38393430 // 8940
-};
-
-#endif
-
int oldcycle;
quadlet_t ret;
- if ((csraddr | length) & 0x3) {
+ if ((csraddr | length) & 0x3)
return RCODE_TYPE_ERROR;
- }
length /= 4;
return RCODE_COMPLETE;
}
-static int write_regs(struct hpsb_host *host, int nodeid, quadlet_t *data,
- u64 addr, unsigned int length)
+static int write_regs(struct hpsb_host *host, int nodeid, int destid,
+ quadlet_t *data, u64 addr, unsigned int length)
{
int csraddr = addr - CSR_REGISTER_BASE;
- if ((csraddr | length) & 0x3) {
+ if ((csraddr | length) & 0x3)
return RCODE_TYPE_ERROR;
- }
length /= 4;
unsigned long flags;
quadlet_t *regptr = NULL;
- if (csraddr & 0x3) return RCODE_TYPE_ERROR;
+ if (csraddr & 0x3)
+ return RCODE_TYPE_ERROR;
if (csraddr < CSR_BUS_MANAGER_ID || csraddr > CSR_CHANNELS_AVAILABLE_LO
|| extcode != EXTCODE_COMPARE_SWAP)
}
}
-static int write_fcp(struct hpsb_host *host, int nodeid, quadlet_t *data,
- u64 addr, unsigned int length)
+static int write_fcp(struct hpsb_host *host, int nodeid, int dest,
+ quadlet_t *data, u64 addr, unsigned int length)
{
int csraddr = addr - CSR_REGISTER_BASE;
- if (length > 512) {
+ if (length > 512)
return RCODE_TYPE_ERROR;
- }
switch (csraddr) {
case CSR_FCP_COMMAND:
};
struct guid_req {
- struct hpsb_packet *pkt;
- struct tq_struct tq;
+ struct hpsb_packet *pkt;
+ int retry;
+ unsigned int hdr_size;
+ int hdr_ptr;
+ u32 bus_info[5];
+ struct tq_struct tq;
};
-
static struct guid_entry *create_guid_entry(void)
{
struct guid_entry *ge;
struct guid_entry *ge;
unsigned long flags;
- HPSB_DEBUG("node %d on host 0x%p has GUID 0x%08x%08x",
- nodeid & NODE_MASK, host, (unsigned int)(guid >> 32),
- (unsigned int)(guid & 0xffffffff));
+ HPSB_DEBUG("Node %d on %s host: GUID %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
+ nodeid & NODE_MASK, host->template->name, ((u8 *)&guid)[0],
+ ((u8 *)&guid)[1], ((u8 *)&guid)[2], ((u8 *)&guid)[3],
+ ((u8 *)&guid)[4], ((u8 *)&guid)[5], ((u8 *)&guid)[6],
+ ((u8 *)&guid)[7]);
read_lock_irqsave(&guid_lock, flags);
ge = find_entry(guid);
static void pkt_complete(struct guid_req *req)
{
struct hpsb_packet *pkt = req->pkt;
- int rcode = (pkt->header[1] >> 12) & 0xf;
-
- if (pkt->ack_code == ACK_PENDING && rcode == RCODE_COMPLETE) {
- if (*(char *)pkt->data > 1) {
- associate_guid(pkt->host, pkt->node_id,
- ((u64)be32_to_cpu(pkt->data[3]) << 32)
- | be32_to_cpu(pkt->data[4]));
- } else {
- HPSB_DEBUG("minimal ROM on node %d",
- pkt->node_id & NODE_MASK);
- }
- } else {
- HPSB_DEBUG("guid transaction error: ack %d, rcode %d",
- pkt->ack_code, rcode);
+ struct hpsb_host *host = pkt->host;
+ nodeid_t nodeid = pkt->node_id;
+
+ if (hpsb_packet_success (pkt)) {
+ HPSB_ERR("GUID quadlet transaction error for %d, retry: %d", nodeid,
+ req->retry);
+ req->retry++;
+ if (req->retry > 3)
+ goto finish;
+ else
+ goto retry;
}
- free_tlabel(pkt->host, pkt->node_id, pkt->tlabel);
- free_hpsb_packet(pkt);
- kfree(req);
-
- if (atomic_dec_and_test(&outstanding_requests)) {
- /* FIXME: free unreferenced and inactive GUID entries. */
- }
+ /* Copy our received quadlet */
+ req->bus_info[req->hdr_ptr++] = be32_to_cpu(pkt->header[3]);
+
+ /* First quadlet, let's get some info */
+ if (req->hdr_ptr == 1) {
+ /* Get the bus_info_length from first quadlet */
+ req->hdr_size = req->bus_info[0] >> 24;
+
+ /* Make sure this isn't one of those minimal proprietary
+ * ROMs. IMO, we should just barf all over them. We need
+ * atleast four bus_info quads to get our EUI-64. */
+ if (req->hdr_size < 4) {
+ HPSB_INFO("Node %d on %s host has non-standard ROM format (%d quads), "
+ "cannot parse", nodeid, host->template->name, req->hdr_size);
+ goto finish;
+ }
+
+ /* Make sure we don't overflow. We have one quad for this
+ * first bus info block, the other 4 should be part of the
+ * bus info itself. */
+ if (req->hdr_size > (sizeof (req->bus_info) >> 2) - 1)
+ req->hdr_size = (sizeof (req->bus_info) >> 2) - 1;
+ }
+
+ /* We've got all the info we need, so let's check the EUI-64, and
+ * add it to our list. */
+ if (req->hdr_ptr >= req->hdr_size + 1) {
+ associate_guid(pkt->host, pkt->node_id,
+ ((u64)req->bus_info[3] << 32) | req->bus_info[4]);
+ goto finish;
+ }
+
+retry:
+
+ /* Here, we either retry a failed retrieve, or we have incremented
+ * our counter, to get the next quad in our header. */
+ free_tlabel(pkt->host, pkt->node_id, pkt->tlabel);
+ free_hpsb_packet(pkt);
+ pkt = hpsb_make_readqpacket(host, nodeid, CSR_REGISTER_BASE +
+ CSR_CONFIG_ROM + (req->hdr_ptr<<2));
+ if (!pkt) {
+ kfree(req);
+ HPSB_ERR("Out of memory in GUID processing");
+ return;
+ }
+
+ req->pkt = pkt;
+ req->retry = 0;
+
+ queue_task(&req->tq, &pkt->complete_tq);
+ if (!hpsb_send_packet(pkt)) {
+ HPSB_NOTICE("Failed to send GUID request to node %d", nodeid);
+ goto finish;
+ }
+
+ return;
+
+finish:
+
+ free_tlabel(pkt->host, nodeid, pkt->tlabel);
+ free_hpsb_packet(pkt);
+ kfree(req);
+
+ if (atomic_dec_and_test(&outstanding_requests)) {
+ /* Do something useful */
+ }
+
+ return;
}
nodeid_t nodeid = LOCAL_BUS;
for (; nodecount; nodecount--, nodeid++, sid++) {
- while (sid->extended) sid++;
- if (!sid->link_active) continue;
- if (nodeid == host->node_id) continue;
+ while (sid->extended)
+ sid++;
+ if (!sid->link_active)
+ continue;
+ if (nodeid == host->node_id)
+ continue;
greq = kmalloc(sizeof(struct guid_req), SLAB_ATOMIC);
if (!greq) {
- HPSB_ERR("out of memory in GUID processing");
+ HPSB_ERR("Out of memory in GUID processing");
return;
}
- pkt = hpsb_make_readbpacket(host, nodeid,
- CSR_REGISTER_BASE + CSR_CONFIG_ROM,
- 20);
+ pkt = hpsb_make_readqpacket(host, nodeid, CSR_REGISTER_BASE +
+ CSR_CONFIG_ROM);
+
if (!pkt) {
kfree(greq);
- HPSB_ERR("out of memory in GUID processing");
+ HPSB_ERR("Out of memory in GUID processing");
return;
}
- INIT_TQ_LINK(greq->tq);
- greq->tq.sync = 0;
- greq->tq.routine = (void (*)(void*))pkt_complete;
- greq->tq.data = greq;
+ INIT_TQUEUE(&greq->tq, (void (*)(void*))pkt_complete, greq);
+
+ greq->hdr_size = 4;
+ greq->hdr_ptr = 0;
+ greq->retry = 0;
greq->pkt = pkt;
queue_task(&greq->tq, &pkt->complete_tq);
free_tlabel(pkt->host, pkt->node_id, pkt->tlabel);
free_hpsb_packet(pkt);
kfree(greq);
- HPSB_NOTICE("failed to send packet in GUID processing");
+ HPSB_NOTICE("Failed to send GUID request to node %d", nodeid);
}
- HPSB_INFO("GUID request sent to node %d", nodeid & NODE_MASK);
+ HPSB_DEBUG("GUID request sent to node %d", nodeid & NODE_MASK);
+
atomic_inc(&outstanding_requests);
}
}
DEFINE_MULTIPLEXER(host_reset)
#undef DEFINE_MULTIPLEXER
+/* Add one host to our list */
+void highlevel_add_one_host (struct hpsb_host *host)
+{
+ if (host->template->initialize_host)
+ if (!host->template->initialize_host(host))
+ goto fail;
+ host->initialized = 1;
+ highlevel_add_host (host);
+ hpsb_reset_bus (host, LONG_RESET);
+fail:
+ host->template->number_of_hosts++;
+}
+
void highlevel_iso_receive(struct hpsb_host *host, quadlet_t *data,
unsigned int length)
{
{
struct list_head *entry;
struct hpsb_highlevel *hl;
- int cts = data[0];
+ int cts = data[0] >> 4;
read_lock(&hl_drivers_lock);
entry = hl_drivers.next;
return rcode;
}
-int highlevel_write(struct hpsb_host *host, int nodeid, quadlet_t *data,
- u64 addr, unsigned int length)
+int highlevel_write(struct hpsb_host *host, int nodeid, int destid,
+ quadlet_t *data, u64 addr, unsigned int length)
{
struct hpsb_address_serve *as;
struct list_head *entry;
length);
if (as->op->write != NULL) {
- rcode = as->op->write(host, nodeid, data, addr,
- partlength);
+ rcode = as->op->write(host, nodeid, destid, data,
+ addr, partlength);
} else {
rcode = RCODE_TYPE_ERROR;
}
return rcode;
}
-
-
-#ifndef MODULE
-
-void register_builtin_highlevels(void)
-{
-#ifdef CONFIG_IEEE1394_RAWIO
- {
- int init_raw1394(void);
- init_raw1394();
- }
-#endif
-}
-
-#endif /* !MODULE */
-
-
void init_hpsb_highlevel(void)
{
INIT_LIST_HEAD(&dummy_zero_addr.as_list);
list_add_tail(&dummy_zero_addr.as_list, &addr_space);
list_add_tail(&dummy_max_addr.as_list, &addr_space);
-
-#ifndef MODULE
- register_builtin_highlevels();
-#endif
}
/* These functions have to implement block reads for themselves. */
int (*read) (struct hpsb_host *host, int nodeid, quadlet_t *buffer,
u64 addr, unsigned int length);
- int (*write) (struct hpsb_host *host, int nodeid, quadlet_t *data,
- u64 addr, unsigned int length);
+ int (*write) (struct hpsb_host *host, int nodeid, int destid,
+ quadlet_t *data, u64 addr, unsigned int length);
/* Lock transactions: write results of ext_tcode operation into
* *store. */
void init_hpsb_highlevel(void);
void highlevel_add_host(struct hpsb_host *host);
+void highlevel_add_one_host(struct hpsb_host *host);
void highlevel_remove_host(struct hpsb_host *host);
void highlevel_host_reset(struct hpsb_host *host);
int highlevel_read(struct hpsb_host *host, int nodeid, quadlet_t *buffer,
u64 addr, unsigned int length);
-int highlevel_write(struct hpsb_host *host, int nodeid, quadlet_t *data,
- u64 addr, unsigned int length);
+int highlevel_write(struct hpsb_host *host, int nodeid, int destid,
+ quadlet_t *data, u64 addr, unsigned int length);
int highlevel_lock(struct hpsb_host *host, int nodeid, quadlet_t *store,
u64 addr, quadlet_t data, quadlet_t arg, int ext_tcode);
int highlevel_lock64(struct hpsb_host *host, int nodeid, octlet_t *store,
sema_init(&h->tlabel_count, 64);
spin_lock_init(&h->tlabel_lock);
- INIT_TQ_LINK(h->timeout_tq);
- h->timeout_tq.routine = (void (*)(void*))abort_timedouts;
- h->timeout_tq.data = h;
+ INIT_TQUEUE(&h->timeout_tq, (void (*)(void*))abort_timedouts, h);
h->topology_map = h->csr.topology_map + 3;
h->speed_map = (u8 *)(h->csr.speed_map + 2);
int count;
struct hpsb_host *host;
+ /* PCI cards should register one host at a time */
+ if (tmpl->detect_hosts == NULL)
+ return;
+
count = tmpl->detect_hosts(tmpl);
for (host = tmpl->hosts; host != NULL; host = host->next) {
host->initialized = 1;
highlevel_add_host(host);
- hpsb_reset_bus(host);
+ hpsb_reset_bus(host, LONG_RESET);
}
}
tmpl->number_of_hosts = count;
- HPSB_INFO("detected %d %s adapter%c", count, tmpl->name,
- (count != 1 ? 's' : ' '));
+ HPSB_INFO("detected %d %s adapter%s", count, tmpl->name,
+ (count != 1 ? "s" : ""));
}
static void shutdown_hosts(struct hpsb_host_template *tmpl)
int hpsb_register_lowlevel(struct hpsb_host_template *tmpl)
{
add_template(tmpl);
- HPSB_INFO("registered %s driver, initializing now", tmpl->name);
+ HPSB_DEBUG("Registered %s driver, initializing now", tmpl->name);
init_hosts(tmpl);
return 0;
HPSB_PANIC("remove_template failed on %s", tmpl->name);
}
}
-
-
-
-#ifndef MODULE
-
-/*
- * This is the init function for builtin lowlevel drivers. To add new drivers
- * put their setup code (get and register template) here. Module only
- * drivers don't need to touch this.
- */
-
-#define SETUP_TEMPLATE(name, visname) \
-do { \
- extern struct hpsb_host_template *get_ ## name ## _template(void); \
- t = get_ ## name ## _template(); \
- \
- if (t != NULL) { \
- if(!hpsb_register_lowlevel(t)) { \
- count++; \
- } \
- } else { \
- HPSB_WARN(visname " driver returned no host template"); \
- } \
-} while (0)
-
-void __init register_builtin_lowlevels()
-{
- struct hpsb_host_template *t;
- int count = 0;
-
- /* Touch t to avoid warning if no drivers are configured to
- * be built directly into the kernel. */
- t = NULL;
-
-#ifdef CONFIG_IEEE1394_PCILYNX
- SETUP_TEMPLATE(lynx, "Lynx");
-#endif
-
-#ifdef CONFIG_IEEE1394_AIC5800
- SETUP_TEMPLATE(aic, "AIC-5800");
-#endif
-
-#ifdef CONFIG_IEEE1394_OHCI1394
- SETUP_TEMPLATE(ohci, "OHCI-1394");
-#endif
-
- HPSB_INFO("%d host adapter%s initialized", count,
- (count != 1 ? "s" : ""));
-}
-
-#undef SETUP_TEMPLATE
-
-#endif /* !MODULE */
/* fields readable and writeable by the hosts */
void *hostdata;
+ struct pci_dev *pdev;
int embedded_hostdata[0];
};
ISO_UNLISTEN_CHANNEL
};
+enum reset_types {
+ /* 166 microsecond reset -- only type of reset available on
+ non-1394a capable IEEE 1394 controllers */
+ LONG_RESET,
+
+ /* Short (arbitrated) reset -- only available on 1394a capable
+ IEEE 1394 capable controllers */
+ SHORT_RESET
+};
+
struct hpsb_host_template {
struct hpsb_host_template *next;
* directory of the kernel sources for details.
*/
+#include <linux/module.h>
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/list.h>
struct hpsb_packet *alloc_hpsb_packet(size_t data_size)
{
struct hpsb_packet *packet = NULL;
- void *header = NULL, *data = NULL;
+ void *data = NULL;
int kmflags = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
packet = kmalloc(sizeof(struct hpsb_packet), kmflags);
- header = kmalloc(5 * 4, kmflags);
- if (header == NULL || packet == NULL) {
- kfree(header);
- kfree(packet);
- return NULL;
- }
+ if (!packet) return NULL;
memset(packet, 0, sizeof(struct hpsb_packet));
- packet->header = header;
+ packet->header = packet->embedded_header;
if (data_size) {
data = kmalloc(data_size + 8, kmflags);
- if (data == NULL) {
- kfree(header);
+ if (!data) {
kfree(packet);
return NULL;
}
packet->data_size = data_size;
}
- INIT_TQ_HEAD(packet->complete_tq);
+ INIT_LIST_HEAD(&packet->complete_tq);
INIT_LIST_HEAD(&packet->list);
sema_init(&packet->state_change, 0);
packet->state = unused;
*/
void free_hpsb_packet(struct hpsb_packet *packet)
{
- if (packet == NULL) {
- return;
- }
+ if (!packet) return;
kfree(packet->data);
- kfree(packet->header);
kfree(packet);
}
-int hpsb_reset_bus(struct hpsb_host *host)
+int hpsb_reset_bus(struct hpsb_host *host, int type)
{
if (!host->initialized) {
return 1;
}
if (!hpsb_bus_reset(host)) {
- host->template->devctl(host, RESET_BUS, 0);
+ host->template->devctl(host, RESET_BUS, type);
return 0;
} else {
return 1;
}
}
+
void hpsb_selfid_received(struct hpsb_host *host, quadlet_t sid)
{
if (host->in_bus_reset) {
- HPSB_DEBUG("including selfid 0x%x", sid);
+ HPSB_DEBUG("Including SelfID 0x%x", sid);
host->topology_map[host->selfid_count++] = sid;
} else {
/* FIXME - info on which host */
- HPSB_NOTICE("spurious selfid packet (0x%8.8x) received", sid);
+ HPSB_NOTICE("Spurious SelfID packet (0x%08x) received from %s",
+ sid, host->template->name);
}
}
if (!host->node_count) {
if (host->reset_retries++ < 20) {
/* selfid stage did not complete without error */
- HPSB_NOTICE("error in SelfID stage - resetting");
- hpsb_reset_bus(host);
+ HPSB_NOTICE("Error in SelfID stage, resetting");
+ hpsb_reset_bus(host, LONG_RESET);
return;
} else {
- HPSB_NOTICE("stopping out-of-control reset loop");
- HPSB_NOTICE("warning - topology map and speed map will "
- "therefore not be valid");
+ HPSB_NOTICE("Stopping out-of-control reset loop");
+ HPSB_NOTICE("Warning - topology map and speed map will not be valid");
}
} else {
build_speed_map(host, host->node_count);
}
if (lh == &host->pending_packets) {
- HPSB_INFO("unsolicited response packet received - np");
+ HPSB_DEBUG("unsolicited response packet received - np");
dump_packet("contents:", data, 16);
spin_unlock_irqrestore(&host->pending_pkt_lock, flags);
return;
{
struct hpsb_packet *packet;
int length, rcode, extcode;
- int source = data[1] >> 16;
+ nodeid_t source = data[1] >> 16;
+ nodeid_t dest = data[0] >> 16;
u64 addr;
/* big FIXME - no error checking is done for an out of bounds length */
switch (tcode) {
case TCODE_WRITEQ:
addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
- rcode = highlevel_write(host, source, data+3, addr, 4);
+ rcode = highlevel_write(host, source, dest, data+3,
+ addr, 4);
if (!write_acked
&& ((data[0] >> 16) & NODE_MASK) != NODE_MASK) {
case TCODE_WRITEB:
addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
- rcode = highlevel_write(host, source, data+4, addr,
- data[3]>>16);
+ rcode = highlevel_write(host, source, dest, data+4,
+ addr, data[3]>>16);
if (!write_acked
&& ((data[0] >> 16) & NODE_MASK) != NODE_MASK) {
}
-#ifndef MODULE
-
-void __init ieee1394_init(void)
-{
- register_builtin_lowlevels();
- init_hpsb_highlevel();
- init_csr();
- init_ieee1394_guid();
-}
-
-#else
-
-int init_module(void)
+static int __init ieee1394_init(void)
{
init_hpsb_highlevel();
init_csr();
init_ieee1394_guid();
-
- return 0;
+ return 0;
}
-void cleanup_module(void)
+static void __exit ieee1394_cleanup(void)
{
cleanup_ieee1394_guid();
cleanup_csr();
}
-#endif
+module_init(ieee1394_init);
+module_exit(ieee1394_cleanup);
/* Store jiffies for implementing bus timeouts. */
unsigned long sendtime;
+
+ quadlet_t embedded_header[5];
};
/* Initiate bus reset on the given host. Returns 1 if bus reset already in
* progress, 0 otherwise. */
-int hpsb_reset_bus(struct hpsb_host *host);
+int hpsb_reset_bus(struct hpsb_host *host, int type);
/*
* The following functions are exported for host driver module usage. All of
EXPORT_SYMBOL(fill_async_lock);
EXPORT_SYMBOL(fill_async_lock_resp);
EXPORT_SYMBOL(fill_iso_packet);
+EXPORT_SYMBOL(fill_phy_packet);
EXPORT_SYMBOL(hpsb_make_readqpacket);
EXPORT_SYMBOL(hpsb_make_readbpacket);
EXPORT_SYMBOL(hpsb_make_writeqpacket);
EXPORT_SYMBOL(hpsb_make_writebpacket);
EXPORT_SYMBOL(hpsb_make_lockpacket);
+EXPORT_SYMBOL(hpsb_make_phypacket);
+EXPORT_SYMBOL(hpsb_packet_success);
+EXPORT_SYMBOL(hpsb_make_packet);
EXPORT_SYMBOL(hpsb_read);
EXPORT_SYMBOL(hpsb_write);
EXPORT_SYMBOL(hpsb_lock);
EXPORT_SYMBOL(highlevel_write);
EXPORT_SYMBOL(highlevel_lock);
EXPORT_SYMBOL(highlevel_lock64);
-
+EXPORT_SYMBOL(highlevel_add_host);
+EXPORT_SYMBOL(highlevel_remove_host);
+EXPORT_SYMBOL(highlevel_host_reset);
+EXPORT_SYMBOL(highlevel_add_one_host);
EXPORT_SYMBOL(hpsb_guid_get_handle);
EXPORT_SYMBOL(hpsb_guid_localhost);
EXPORT_SYMBOL(hpsb_guid_fill_packet);
packet->header_size = 4;
packet->data_size = length;
+ packet->type = iso;
packet->tcode = TCODE_ISO_DATA;
}
+void fill_phy_packet(struct hpsb_packet *packet, quadlet_t data)
+{
+ packet->header[0] = data;
+ packet->header[1] = ~data;
+ packet->header_size = 8;
+ packet->data_size = 0;
+ packet->expect_response = 0;
+ packet->type = raw; /* No CRC added */
+ packet->speed_code = SPEED_100; /* Force speed to be 100Mbps */
+}
+
/**
* get_tlabel - allocate a transaction label
return p;
}
+struct hpsb_packet *hpsb_make_phypacket(struct hpsb_host *host,
+ quadlet_t data)
+{
+ struct hpsb_packet *p;
+
+ p = alloc_hpsb_packet(0);
+ if (!p) return NULL;
+
+ p->host = host;
+ fill_phy_packet(p, data);
+
+ return p;
+}
+
/*
* FIXME - these functions should probably read from / write to user space to
* avoid in kernel buffers for user space callers
return retval;
}
-
-int hpsb_write(struct hpsb_host *host, nodeid_t node, u64 addr,
- quadlet_t *buffer, size_t length)
+struct hpsb_packet *hpsb_make_packet (struct hpsb_host *host, nodeid_t node,
+ u64 addr, quadlet_t *buffer, size_t length)
{
struct hpsb_packet *packet;
- int retval = 0;
- if (length == 0) {
- return -EINVAL;
- }
+ if (length == 0)
+ return NULL;
- if (host->node_id == node) {
- switch(highlevel_write(host, node, buffer, addr, length)) {
- case RCODE_COMPLETE:
- return 0;
- case RCODE_TYPE_ERROR:
- return -EACCES;
- case RCODE_ADDRESS_ERROR:
- default:
- return -EINVAL;
- }
- }
-
- if (length == 4) {
+ if (length == 4)
packet = hpsb_make_writeqpacket(host, node, addr, *buffer);
- } else {
+ else
packet = hpsb_make_writebpacket(host, node, addr, length);
- }
- if (!packet) {
- return -ENOMEM;
- }
+ if (!packet)
+ return NULL;
- if (length != 4) {
+ if (length != 4)
memcpy(packet->data, buffer, length);
- }
+
+ return packet;
+}
+
+int hpsb_write(struct hpsb_host *host, nodeid_t node, u64 addr,
+ quadlet_t *buffer, size_t length)
+{
+ struct hpsb_packet *packet;
+ int retval;
+
+ if (length == 0)
+ return -EINVAL;
+
+ if (host->node_id == node) {
+ switch(highlevel_write(host, node, node, buffer, addr, length)) {
+ case RCODE_COMPLETE:
+ return 0;
+ case RCODE_TYPE_ERROR:
+ return -EACCES;
+ case RCODE_ADDRESS_ERROR:
+ default:
+ return -EINVAL;
+ }
+ }
+
+ packet = hpsb_make_packet (host, node, addr, buffer, length);
+
+ if (!packet)
+ return -ENOMEM;
hpsb_send_packet(packet);
down(&packet->state_change);
int length);
void fill_iso_packet(struct hpsb_packet *packet, int length, int channel,
int tag, int sync);
+void fill_phy_packet(struct hpsb_packet *packet, quadlet_t data);
/*
* Get and free transaction labels.
size_t length);
struct hpsb_packet *hpsb_make_lockpacket(struct hpsb_host *host, nodeid_t node,
u64 addr, int extcode);
+struct hpsb_packet *hpsb_make_phypacket(struct hpsb_host *host,
+ quadlet_t data) ;
/*
int hpsb_lock(struct hpsb_host *host, nodeid_t node, u64 addr, int extcode,
quadlet_t *data, quadlet_t arg);
+/* Generic packet creation. Used by hpsb_write. Also useful for protocol
+ * drivers that want to implement their own hpsb_write replacement. */
+struct hpsb_packet *hpsb_make_packet (struct hpsb_host *host, nodeid_t node,
+ u64 addr, quadlet_t *buffer, size_t length);
+
#endif /* _IEEE1394_TRANSACTIONS_H */
#include <asm/byteorder.h>
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0)
-#include "linux22compat.h"
-#else
-#define V22_COMPAT_MOD_INC_USE_COUNT do {} while (0)
-#define V22_COMPAT_MOD_DEC_USE_COUNT do {} while (0)
-#define OWNER_THIS_MODULE owner: THIS_MODULE,
-
-#define INIT_TQ_LINK(tq) INIT_LIST_HEAD(&(tq).list)
-#define INIT_TQ_HEAD(tq) INIT_LIST_HEAD(&(tq))
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,18)
-#include <asm/spinlock.h>
-#else
#include <linux/spinlock.h>
-#endif
#ifndef MIN
#define MIN(a,b) ((a) < (b) ? (a) : (b))
#ifdef __BIG_ENDIAN
-static __inline__ void *memcpy_le32(u32 *dest, const u32 *src, size_t count)
+static __inline__ void *memcpy_le32(u32 *dest, const u32 *__src, size_t count)
{
void *tmp = dest;
+ u32 *src = (u32 *)__src;
count /= 4;
* ohci1394.c - driver for OHCI 1394 boards
* Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
* Gord Peters <GordPeters@smarttech.com>
+ * 2001 Ben Collins <bcollins@debian.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* . Async Response Transmit
* . Iso Receive
* . DMA mmap for iso receive
+ * . Config ROM generation
+ *
+ * Things implemented, but still in test phase:
+ * . Iso Transmit
*
* Things not implemented:
- * . Iso Transmit
+ * . Async Stream Packets
* . DMA error recovery
*
* Things to be fixed:
- * . Config ROM
+ * . Latency problems on UltraSPARC
*
* Known bugs:
- * . Self-id are sometimes not received properly
+ * . SelfID are sometimes not received properly
* if card is initialized with no other nodes
* on the bus
* . Apple PowerBook detected but not working yet
*
* Adam J Richter <adam@yggdrasil.com>
* . Use of pci_class to find device
+ *
* Andreas Tobler <toa@pop.agri.ch>
* . Updated proc_fs calls
+ *
* Emilie Chung <emilie.chung@axis.com>
* . Tip on Async Request Filter
+ *
* Pascal Drolet <pascal.drolet@informission.ca>
* . Various tips for optimization and functionnalities
+ *
* Robert Ficklin <rficklin@westengineering.com>
* . Loop in irq_handler
+ *
* James Goodwin <jamesg@Filanet.com>
* . Various tips on initialization, self-id reception, etc.
+ *
* Albrecht Dress <ad@mpifr-bonn.mpg.de>
* . Apple PowerBook detection
+ *
* Daniel Kobras <daniel.kobras@student.uni-tuebingen.de>
* . Reset the board properly before leaving + misc cleanups
+ *
* Leon van Stuivenberg <leonvs@iae.nl>
* . Bug fixes
+ *
+ * Ben Collins <bcollins@debian.org>
+ * . Working big-endian support
+ * . Updated to 2.4.x module scheme (PCI aswell)
+ * . Removed procfs support since it trashes random mem
+ * . Config ROM generation
*/
#include <linux/config.h>
#include <linux/kernel.h>
+#include <linux/list.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/wait.h>
#include <asm/atomic.h>
#include <asm/io.h>
#include <asm/uaccess.h>
-#include <linux/proc_fs.h>
#include <linux/tqueue.h>
#include <linux/delay.h>
+#include <linux/spinlock.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include "ieee1394_types.h"
#include "hosts.h"
#include "ieee1394_core.h"
+#include "highlevel.h"
#include "ohci1394.h"
-/* This structure is not properly initialized ... it is taken from
- the lynx_csr_rom written by Andreas ... Some fields in the root
- directory and the module dependent info needs to be modified
- I do not have the proper doc */
-quadlet_t ohci_csr_rom[] = {
- /* bus info block */
- 0x04040000, /* info/CRC length, CRC */
- 0x31333934, /* 1394 magic number */
- 0xf07da002, /* cyc_clk_acc = 125us, max_rec = 1024 */
- 0x00000000, /* vendor ID, chip ID high (written from card info) */
- 0x00000000, /* chip ID low (written from card info) */
- /* root directory - FIXME */
- 0x00090000, /* CRC length, CRC */
- 0x03080028, /* vendor ID (Texas Instr.) */
- 0x81000009, /* offset to textual ID */
- 0x0c000200, /* node capabilities */
- 0x8d00000e, /* offset to unique ID */
- 0xc7000010, /* offset to module independent info */
- 0x04000000, /* module hardware version */
- 0x81000026, /* offset to textual ID */
- 0x09000000, /* node hardware version */
- 0x81000026, /* offset to textual ID */
- /* module vendor ID textual */
- 0x00080000, /* CRC length, CRC */
- 0x00000000,
- 0x00000000,
- 0x54455841, /* "Texas Instruments" */
- 0x5320494e,
- 0x53545255,
- 0x4d454e54,
- 0x53000000,
- /* node unique ID leaf */
- 0x00020000, /* CRC length, CRC */
- 0x08002856, /* vendor ID, chip ID high */
- 0x0000083E, /* chip ID low */
- /* module dependent info - FIXME */
- 0x00060000, /* CRC length, CRC */
- 0xb8000006, /* ??? offset to module textual ID */
- 0x81000004, /* ??? textual descriptor */
- 0x00000000, /* SRAM size */
- 0x00000000, /* AUXRAM size */
- 0x00000000, /* AUX device */
- /* module textual ID */
- 0x00050000, /* CRC length, CRC */
- 0x00000000,
- 0x00000000,
- 0x54534231, /* "TSB12LV22" */
- 0x324c5632,
- 0x32000000,
- /* part number */
- 0x00060000, /* CRC length, CRC */
- 0x00000000,
- 0x00000000,
- 0x39383036, /* "9806000-0001" */
- 0x3030342d,
- 0x30303431,
- 0x20000001,
- /* module hardware version textual */
- 0x00050000, /* CRC length, CRC */
- 0x00000000,
- 0x00000000,
- 0x5453424b, /* "TSBKOHCI403" */
- 0x4f484349,
- 0x34303300,
- /* node hardware version textual */
- 0x00050000, /* CRC length, CRC */
- 0x00000000,
- 0x00000000,
- 0x54534234, /* "TSB41LV03" */
- 0x314c5630,
- 0x33000000
-};
-
-
#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
#define OHCI1394_DEBUG
#endif
#define DBGMSG(card, fmt, args...)
#endif
+#ifdef CONFIG_IEEE1394_OHCI_DMA_DEBUG
+#define OHCI_DMA_ALLOC(fmt, args...) \
+ HPSB_ERR("ohci1394("__FUNCTION__")alloc(%d): "fmt, \
+ ++global_outstanding_dmas, ## args)
+#define OHCI_DMA_FREE(fmt, args...) \
+ HPSB_ERR("ohci1394("__FUNCTION__")free(%d): "fmt, \
+ --global_outstanding_dmas, ## args)
+u32 global_outstanding_dmas = 0;
+#else
+#define OHCI_DMA_ALLOC(fmt, args...)
+#define OHCI_DMA_FREE(fmt, args...)
+#endif
+
/* print general (card independent) information */
#define PRINT_G(level, fmt, args...) \
printk(level "ohci1394: " fmt "\n" , ## args)
#define PRINT(level, card, fmt, args...) \
printk(level "ohci1394_%d: " fmt "\n" , card , ## args)
-#define FAIL(fmt, args...) \
- PRINT_G(KERN_ERR, fmt , ## args); \
- num_of_cards--; \
- remove_card(ohci); \
- return 1;
-
-#if USE_DEVICE
-
-int supported_chips[][2] = {
- { PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_OHCI1394_LV22 },
- { PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_OHCI1394_LV23 },
- { PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_OHCI1394_LV26 },
- { PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_OHCI1394_PCI4450 },
- { PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_OHCI1394 },
- { PCI_VENDOR_ID_SONY, PCI_DEVICE_ID_SONY_CXD3222 },
- { PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_UPD72862 },
- { PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_UPD72870 },
- { PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_UPD72871 },
- { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW },
- { PCI_VENDOR_ID_AL, PCI_DEVICE_ID_ALI_OHCI1394_M5251 },
- { PCI_VENDOR_ID_LUCENT, PCI_DEVICE_ID_LUCENT_FW323 },
- { -1, -1 }
-};
-
-#else
+#define FAIL(fmt, args...) \
+do { \
+ PRINT_G(KERN_ERR, fmt , ## args); \
+ remove_card(ohci); \
+ return 1; \
+} while(0)
#define PCI_CLASS_FIREWIRE_OHCI ((PCI_CLASS_SERIAL_FIREWIRE << 8) | 0x10)
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
-static struct pci_device_id ohci1394_pci_tbl[] __initdata = {
+static struct pci_device_id ohci1394_pci_tbl[] __devinitdata = {
{
class: PCI_CLASS_FIREWIRE_OHCI,
class_mask: 0x00ffffff,
{ 0, },
};
MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl);
-#endif
-#endif /* USE_DEVICE */
+static char version[] __devinitdata =
+ "ohci1394.c:v0.50 15/Jul/01 Ben Collins <bcollins@debian.org>";
+/* Module Parameters */
MODULE_PARM(attempt_root,"i");
+MODULE_PARM_DESC(attempt_root, "Attempt to make the host root.");
static int attempt_root = 0;
-static struct ti_ohci cards[MAX_OHCI1394_CARDS];
-static int num_of_cards = 0;
+#ifdef __LITTLE_ENDIAN
+/* Don't waste cycles on same sex byte swaps */
+#define packet_swab(w,x,y,z)
+#define block_swab32(x,y)
+#else
+static void packet_swab(quadlet_t *data, char tcode, int len, int payload_swap);
+static __inline__ void block_swab32(quadlet_t *data, size_t size);
+#endif
+
+static unsigned int card_id_counter = 0;
-static int add_card(struct pci_dev *dev);
+static void dma_trm_tasklet(unsigned long data);
static void remove_card(struct ti_ohci *ohci);
-static int init_driver(void);
-static void dma_trm_bh(void *data);
-static void dma_rcv_bh(void *data);
static void dma_trm_reset(struct dma_trm_ctx *d);
/***********************************
* IEEE-1394 functionality section *
***********************************/
-#if 0 /* not needed at this time */
-static int get_phy_reg(struct ti_ohci *ohci, int addr)
+static u8 get_phy_reg(struct ti_ohci *ohci, u8 addr)
{
- int timeout=10000;
- static quadlet_t r;
-
- if ((addr < 1) || (addr > 15)) {
- PRINT(KERN_ERR, ohci->id, __FUNCTION__
- ": PHY register address %d out of range", addr);
- return -EFAULT;
- }
-
- spin_lock(&ohci->phy_reg_lock);
+ int i, flags;
+ quadlet_t r;
- /* initiate read request */
- reg_write(ohci, OHCI1394_PhyControl,
- ((addr<<8)&0x00000f00) | 0x00008000);
+ spin_lock_irqsave (&ohci->phy_reg_lock, flags);
- /* wait */
- while (!(reg_read(ohci, OHCI1394_PhyControl)&0x80000000) && timeout)
- timeout--;
+ reg_write(ohci, OHCI1394_PhyControl, (((u16)addr << 8) & 0x00000f00) | 0x00008000);
-
- if (!timeout) {
- PRINT(KERN_ERR, ohci->id, "get_phy_reg timeout !!!\n");
- spin_unlock(&ohci->phy_reg_lock);
- return -EFAULT;
+ for (i = 0; i < OHCI_LOOP_COUNT; i++) {
+ if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
+ break;
+ mdelay(1);
}
+
r = reg_read(ohci, OHCI1394_PhyControl);
+
+ if (i >= OHCI_LOOP_COUNT)
+ PRINT (KERN_ERR, ohci->id, "Get PHY Reg timeout [0x%08x/0x%08x/%d]\n",
+ r, r & 0x80000000, i);
- spin_unlock(&ohci->phy_reg_lock);
+ spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
- return (r&0x00ff0000)>>16;
+ return (r & 0x00ff0000) >> 16;
}
-static int set_phy_reg(struct ti_ohci *ohci, int addr, unsigned char data) {
- int timeout=10000;
+static void set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
+{
+ int i, flags;
u32 r;
- if ((addr < 1) || (addr > 15)) {
- PRINT(KERN_ERR, ohci->id, __FUNCTION__
- ": PHY register address %d out of range", addr);
- return -EFAULT;
- }
+ spin_lock_irqsave (&ohci->phy_reg_lock, flags);
- r = ((addr<<8)&0x00000f00) | 0x00004000 | ((u32)data & 0x000000ff);
+ reg_write(ohci, OHCI1394_PhyControl, 0x00004000 | (((u16)addr << 8) & 0x00000f00) | data);
- spin_lock(&ohci->phy_reg_lock);
+ for (i = 0; i < OHCI_LOOP_COUNT; i++) {
+ r = reg_read(ohci, OHCI1394_PhyControl);
+ if (!(r & 0x00004000))
+ break;
+ mdelay(1);
+ }
- reg_write(ohci, OHCI1394_PhyControl, r);
+ if (i == OHCI_LOOP_COUNT)
+ PRINT (KERN_ERR, ohci->id, "Set PHY Reg timeout [0x%08x/0x%08x/%d]\n",
+ r, r & 0x00004000, i);
- /* wait */
- while (!(reg_read(ohci, OHCI1394_PhyControl)&0x80000000) && timeout)
- timeout--;
+ spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
- spin_unlock(&ohci->phy_reg_lock);
+ return;
+}
- if (!timeout) {
- PRINT(KERN_ERR, ohci->id, "set_phy_reg timeout !!!\n");
- return -EFAULT;
- }
+/* Or's our value into the current value */
+static void set_phy_reg_mask(struct ti_ohci *ohci, u8 addr, u8 data)
+{
+ u8 old;
- return 0;
+ old = get_phy_reg (ohci, addr);
+ old |= data;
+ set_phy_reg (ohci, addr, old);
+
+ return;
}
-#endif /* unneeded functions */
-inline static int handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
+static int handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
int phyid, int isroot)
{
quadlet_t *q = ohci->selfid_buf_cpu;
quadlet_t self_id_count=reg_read(ohci, OHCI1394_SelfIDCount);
size_t size;
- quadlet_t lsid;
+ quadlet_t q0, q1;
- /* Self-id handling seems much easier than for the aic5800 chip.
- All the self-id packets, including this device own self-id,
+ /* SelfID handling seems much easier than for the aic5800 chip.
+ All the self-id packets, including this devices own self-id,
should be correctly arranged in the selfid buffer at this
stage */
/* Check status of self-id reception */
- if ((self_id_count&0x80000000) ||
- ((self_id_count&0x00FF0000) != (q[0]&0x00FF0000))) {
+
+ if (ohci->selfid_swap)
+ q0 = le32_to_cpu(q[0]);
+ else
+ q0 = q[0];
+
+ if ((self_id_count & 0x80000000) ||
+ ((self_id_count & 0x00FF0000) != (q0 & 0x00FF0000))) {
PRINT(KERN_ERR, ohci->id,
- "Error in reception of self-id packets"
- "Self-id count: %08x q[0]: %08x",
- self_id_count, q[0]);
-
- /*
- * Tip by James Goodwin <jamesg@Filanet.com>:
- * We had an error, generate another bus reset in response.
- * TODO. Actually read the current value in the phy before
- * generating a bus reset (read modify write). This way
- * we don't stomp any current gap count settings, etc.
- */
+ "Error in reception of SelfID packets [0x%08x/0x%08x]",
+ self_id_count, q0);
+
+ /* Tip by James Goodwin <jamesg@Filanet.com>:
+ * We had an error, generate another bus reset in response. */
if (ohci->self_id_errors<OHCI1394_MAX_SELF_ID_ERRORS) {
- reg_write(ohci, OHCI1394_PhyControl, 0x000041ff);
+ set_phy_reg_mask (ohci, 1, 0x40);
ohci->self_id_errors++;
- }
- else {
+ } else {
PRINT(KERN_ERR, ohci->id,
- "Timeout on self-id error reception");
+ "Too many errors on SelfID error reception, giving up!");
}
return -1;
}
- size = ((self_id_count&0x00001FFC)>>2) - 1;
+ size = ((self_id_count & 0x00001FFC) >> 2) - 1;
q++;
while (size > 0) {
- if (q[0] == ~q[1]) {
- PRINT(KERN_INFO, ohci->id, "selfid packet 0x%x rcvd",
- q[0]);
- hpsb_selfid_received(host, cpu_to_be32(q[0]));
- if (((q[0]&0x3f000000)>>24)==phyid) {
- lsid=q[0];
- PRINT(KERN_INFO, ohci->id,
- "This node self-id is 0x%08x", lsid);
- }
+ if (ohci->selfid_swap) {
+ q0 = le32_to_cpu(q[0]);
+ q1 = le32_to_cpu(q[1]);
+ } else {
+ q0 = q[0];
+ q1 = q[1];
+ }
+
+ if (q0 == ~q1) {
+ PRINT(KERN_DEBUG, ohci->id, "SelfID packet 0x%x received", q0);
+ hpsb_selfid_received(host, cpu_to_be32(q0));
+ if (((q0 & 0x3f000000) >> 24) == phyid)
+ DBGMSG (ohci->id, "SelfID for this node is 0x%08x", q0);
} else {
PRINT(KERN_ERR, ohci->id,
- "inconsistent selfid 0x%x/0x%x", q[0], q[1]);
+ "SelfID is inconsistent [0x%08x/0x%08x]", q0, q1);
}
q += 2;
size -= 2;
}
- PRINT(KERN_INFO, ohci->id, "calling self-id complete");
+ PRINT(KERN_DEBUG, ohci->id, "SelfID complete");
hpsb_selfid_complete(host, phyid, isroot);
return 0;
}
-static int ohci_detect(struct hpsb_host_template *tmpl)
-{
- struct hpsb_host *host;
- int i;
-
- init_driver();
-
- for (i = 0; i < num_of_cards; i++) {
- host = hpsb_get_host(tmpl, 0);
- if (host == NULL) {
- /* simply don't init more after out of mem */
- return i;
- }
- host->hostdata = &cards[i];
- cards[i].host = host;
- }
-
- return num_of_cards;
-}
-
static int ohci_soft_reset(struct ti_ohci *ohci) {
- int timeout=10000;
+ int i;
reg_write(ohci, OHCI1394_HCControlSet, 0x00010000);
- while ((reg_read(ohci, OHCI1394_HCControlSet)&0x00010000) && timeout)
- timeout--;
- if (!timeout) {
- PRINT(KERN_ERR, ohci->id, "soft reset timeout !!!");
- return -EFAULT;
+ for (i = 0; i < OHCI_LOOP_COUNT; i++) {
+ if (reg_read(ohci, OHCI1394_HCControlSet) & 0x00010000)
+ break;
+ mdelay(10);
}
- else PRINT(KERN_INFO, ohci->id, "soft reset finished");
+
+ PRINT(KERN_DEBUG, ohci->id, "Soft reset finished");
+
return 0;
}
nodeId = reg_read(ohci, OHCI1394_NodeID);
if (!(nodeId&0x80000000)) {
PRINT(KERN_ERR, ohci->id,
- "Running dma failed because Node ID not valid");
+ "Running dma failed because Node ID is not valid");
return -1;
}
/* Run the dma context */
reg_write(ohci, reg, 0x8000);
- if (msg) PRINT(KERN_INFO, ohci->id, "%s", msg);
+ if (msg) PRINT(KERN_DEBUG, ohci->id, "%s", msg);
return 0;
}
for (i=0; i<d->num_desc; i++) {
- /* end of descriptor list? */
+ d->prg_cpu[i]->control =
+ cpu_to_le32((0x280C << 16) | d->buf_size);
+
+ /* End of descriptor list? */
if ((i+1) < d->num_desc) {
- d->prg_cpu[i]->control = (0x283C << 16) | d->buf_size;
d->prg_cpu[i]->branchAddress =
- (d->prg_bus[i+1] & 0xfffffff0) | 0x1;
+ cpu_to_le32((d->prg_bus[i+1] & 0xfffffff0) | 0x1);
} else {
- d->prg_cpu[i]->control = (0x283C << 16) | d->buf_size;
d->prg_cpu[i]->branchAddress =
- d->prg_bus[0] & 0xfffffff0;
+ cpu_to_le32((d->prg_bus[0] & 0xfffffff0));
}
- d->prg_cpu[i]->address = d->buf_bus[i];
- d->prg_cpu[i]->status = d->buf_size;
+ d->prg_cpu[i]->address = cpu_to_le32(d->buf_bus[i]);
+ d->prg_cpu[i]->status = cpu_to_le32(d->buf_size);
}
d->buf_ind = 0;
/* Run AR context */
reg_write(ohci, d->ctrlSet, 0x00008000);
- PRINT(KERN_INFO, ohci->id, "Receive DMA ctx=%d initialized", d->ctx);
+ DBGMSG(ohci->id, "Receive DMA ctx=%d initialized", d->ctx);
}
/* Initialize the dma transmit context */
d->pending_first = NULL;
d->pending_last = NULL;
- PRINT(KERN_INFO, ohci->id, "AT dma ctx=%d initialized", d->ctx);
+ DBGMSG(ohci->id, "Transmit dma ctx=%d initialized", d->ctx);
}
/* Count the number of available iso contexts */
int retval, i;
spin_lock_init(&ohci->phy_reg_lock);
+ spin_lock_init(&ohci->event_lock);
/*
* Tip by James Goodwin <jamesg@Filanet.com>:
*/
/* Soft reset */
- if ((retval=ohci_soft_reset(ohci))<0) return retval;
+ if ((retval = ohci_soft_reset(ohci)) < 0)
+ return retval;
/*
* Delay after soft reset to make sure everything has settled
* down (sanity)
*/
- mdelay(100);
+ mdelay(10);
/* Set Link Power Status (LPS) */
reg_write(ohci, OHCI1394_HCControlSet, 0x00080000);
* Delay after setting LPS in order to make sure link/phy
* communication is established
*/
- mdelay(100);
+ mdelay(10);
/* Set the bus number */
reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
ohci->max_packet_size =
1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1);
- PRINT(KERN_INFO, ohci->id, "max packet size = %d bytes",
- ohci->max_packet_size);
+ PRINT(KERN_DEBUG, ohci->id, "Max packet size = %d bytes",
+ ohci->max_packet_size);
/* Don't accept phy packets into AR request context */
reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400);
/* Initialize IR dma */
ohci->nb_iso_rcv_ctx =
get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
- PRINT(KERN_INFO, ohci->id, "%d iso receive contexts available",
- ohci->nb_iso_rcv_ctx);
+ DBGMSG(ohci->id, "%d iso receive contexts available",
+ ohci->nb_iso_rcv_ctx);
for (i=0;i<ohci->nb_iso_rcv_ctx;i++) {
reg_write(ohci, OHCI1394_IsoRcvContextControlClear+32*i,
0xffffffff);
/* Initialize IT dma */
ohci->nb_iso_xmit_ctx =
get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
- PRINT(KERN_INFO, ohci->id, "%d iso transmit contexts available",
- ohci->nb_iso_xmit_ctx);
+ DBGMSG(ohci->id, "%d iso transmit contexts available",
+ ohci->nb_iso_xmit_ctx);
for (i=0;i<ohci->nb_iso_xmit_ctx;i++) {
reg_write(ohci, OHCI1394_IsoXmitContextControlClear+32*i,
0xffffffff);
/* Initialize IR dma */
initialize_dma_rcv_ctx(ohci->ir_context);
+ /* Initialize IT dma */
+ initialize_dma_trm_ctx(ohci->it_context);
+
/* Set up isoRecvIntMask to generate interrupts for context 0
(thanks to Michael Greger for seeing that I forgot this) */
reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 0x00000001);
+ /* Set up isoXmitIntMask to generate interrupts for context 0 */
+ reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 0x00000001);
+
/*
* Accept AT requests from all nodes. This probably
* will have to be controlled from the subsystem
(OHCI1394_MAX_AT_RESP_RETRIES<<4) |
(OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
-#ifndef __BIG_ENDIAN
+ /* We don't want hardware swapping */
reg_write(ohci, OHCI1394_HCControlClear, 0x40000000);
-#else
- reg_write(ohci, OHCI1394_HCControlSet, 0x40000000);
-#endif
/* Enable interrupts */
reg_write(ohci, OHCI1394_IntMaskSet,
OHCI1394_selfIDComplete |
OHCI1394_RSPkt |
OHCI1394_RQPkt |
- OHCI1394_ARRS |
- OHCI1394_ARRQ |
OHCI1394_respTxComplete |
OHCI1394_reqTxComplete |
OHCI1394_isochRx |
- OHCI1394_isochTx
+ OHCI1394_isochTx |
+ OHCI1394_unrecoverableError
);
/* Enable link */
static void ohci_remove(struct hpsb_host *host)
{
struct ti_ohci *ohci;
-
+
if (host != NULL) {
ohci = host->hostdata;
remove_card(ohci);
u32 cycleTimer;
int idx = d->prg_ind;
+ DBGMSG(ohci->id, "Inserting packet for node %d, tlabel=%d, tcode=0x%x, speed=%d\n",
+ packet->node_id, packet->tlabel, packet->tcode, packet->speed_code);
+
d->prg_cpu[idx]->begin.address = 0;
d->prg_cpu[idx]->begin.branchAddress = 0;
if (d->ctx==1) {
* the 16 lower bits of the status... let's try 1 sec timeout
*/
cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
- d->prg_cpu[idx]->begin.status =
+ d->prg_cpu[idx]->begin.status = cpu_to_le32(
(((((cycleTimer>>25)&0x7)+1)&0x7)<<13) |
- ((cycleTimer&0x01fff000)>>12);
+ ((cycleTimer&0x01fff000)>>12));
DBGMSG(ohci->id, "cycleTimer: %08x timeStamp: %08x",
cycleTimer, d->prg_cpu[idx]->begin.status);
- }
- else
+ } else
d->prg_cpu[idx]->begin.status = 0;
- if (packet->type == raw) {
- d->prg_cpu[idx]->data[0] = OHCI1394_TCODE_PHY<<4;
- d->prg_cpu[idx]->data[1] = packet->header[0];
- d->prg_cpu[idx]->data[2] = packet->header[1];
- }
- else {
- d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
- (packet->header[0] & 0xFFFF);
- d->prg_cpu[idx]->data[1] = (packet->header[1] & 0xFFFF) |
- (packet->header[0] & 0xFFFF0000);
- d->prg_cpu[idx]->data[2] = packet->header[2];
- d->prg_cpu[idx]->data[3] = packet->header[3];
- }
+ if ( (packet->type == async) || (packet->type == raw) ) {
- if (packet->data_size) { /* block transmit */
- d->prg_cpu[idx]->begin.control = OUTPUT_MORE_IMMEDIATE | 0x10;
- d->prg_cpu[idx]->end.control = OUTPUT_LAST | packet->data_size;
- /*
- * FIXME: check that the packet data buffer
- * do not cross a page boundary
- */
- if (cross_bound((unsigned long)packet->data,
- packet->data_size)>0) {
- /* FIXME: do something about it */
- PRINT(KERN_ERR, ohci->id, __FUNCTION__
- ": packet data addr: %p size %d bytes "
- "cross page boundary",
- packet->data, packet->data_size);
- }
+ if (packet->type == raw) {
+ d->prg_cpu[idx]->data[0] = cpu_to_le32(OHCI1394_TCODE_PHY<<4);
+ d->prg_cpu[idx]->data[1] = packet->header[0];
+ d->prg_cpu[idx]->data[2] = packet->header[1];
+ } else {
+ d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
+ (packet->header[0] & 0xFFFF);
+ d->prg_cpu[idx]->data[1] =
+ (packet->header[1] & 0xFFFF) |
+ (packet->header[0] & 0xFFFF0000);
+ d->prg_cpu[idx]->data[2] = packet->header[2];
+ d->prg_cpu[idx]->data[3] = packet->header[3];
+ packet_swab(d->prg_cpu[idx]->data, packet->tcode,
+ packet->header_size>>2, ohci->payload_swap);
+ }
- d->prg_cpu[idx]->end.address =
- pci_map_single(ohci->dev, packet->data,
- packet->data_size, PCI_DMA_TODEVICE);
- d->prg_cpu[idx]->end.branchAddress = 0;
- d->prg_cpu[idx]->end.status = 0;
- if (d->branchAddrPtr)
- *(d->branchAddrPtr) = d->prg_bus[idx] | 0x3;
- d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress);
- }
- else { /* quadlet transmit */
- if (packet->type == raw)
+ if (packet->data_size) { /* block transmit */
d->prg_cpu[idx]->begin.control =
- OUTPUT_LAST_IMMEDIATE|(packet->header_size+4);
- else
- d->prg_cpu[idx]->begin.control =
- OUTPUT_LAST_IMMEDIATE|packet->header_size;
+ cpu_to_le32(OUTPUT_MORE_IMMEDIATE | 0x10);
+ d->prg_cpu[idx]->end.control =
+ cpu_to_le32(OUTPUT_LAST | packet->data_size);
+ /*
+ * Check that the packet data buffer
+ * does not cross a page boundary.
+ */
+ if (cross_bound((unsigned long)packet->data,
+ packet->data_size)>0) {
+ /* FIXME: do something about it */
+ PRINT(KERN_ERR, ohci->id, __FUNCTION__
+ ": packet data addr: %p size %Zd bytes "
+ "cross page boundary",
+ packet->data, packet->data_size);
+ }
+
+ d->prg_cpu[idx]->end.address = cpu_to_le32(
+ pci_map_single(ohci->dev, packet->data,
+ packet->data_size,
+ PCI_DMA_TODEVICE));
+ OHCI_DMA_ALLOC("single, block transmit packet");
+
+ if (ohci->payload_swap)
+ block_swab32(packet->data, packet->data_size>>2);
+
+ d->prg_cpu[idx]->end.branchAddress = 0;
+ d->prg_cpu[idx]->end.status = 0;
+ if (d->branchAddrPtr)
+ *(d->branchAddrPtr) =
+ cpu_to_le32(d->prg_bus[idx] | 0x3);
+ d->branchAddrPtr =
+ &(d->prg_cpu[idx]->end.branchAddress);
+ } else { /* quadlet transmit */
+ if (packet->type == raw)
+ d->prg_cpu[idx]->begin.control = cpu_to_le32(
+ OUTPUT_LAST_IMMEDIATE |
+ (packet->header_size+4));
+ else
+ d->prg_cpu[idx]->begin.control = cpu_to_le32(
+ OUTPUT_LAST_IMMEDIATE |
+ packet->header_size);
+
+ if (d->branchAddrPtr)
+ *(d->branchAddrPtr) =
+ cpu_to_le32(d->prg_bus[idx] | 0x2);
+ d->branchAddrPtr =
+ &(d->prg_cpu[idx]->begin.branchAddress);
+ }
- if (d->branchAddrPtr)
- *(d->branchAddrPtr) = d->prg_bus[idx] | 0x2;
- d->branchAddrPtr = &(d->prg_cpu[idx]->begin.branchAddress);
- }
+ } else { /* iso packet */
+ d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
+ (packet->header[0] & 0xFFFF);
+ d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
+ packet_swab(d->prg_cpu[idx]->data, packet->tcode, packet->header_size>>2,
+ ohci->payload_swap);
+
+ d->prg_cpu[idx]->begin.control = cpu_to_le32(OUTPUT_MORE_IMMEDIATE | 0x8);
+ d->prg_cpu[idx]->end.control = cpu_to_le32(
+ OUTPUT_LAST | 0x08000000 | packet->data_size);
+ d->prg_cpu[idx]->end.address = cpu_to_le32(
+ pci_map_single(ohci->dev, packet->data,
+ packet->data_size, PCI_DMA_TODEVICE));
+ OHCI_DMA_ALLOC("single, iso transmit packet");
+
+ if (ohci->payload_swap)
+ block_swab32(packet->data, packet->data_size>>2);
+
+ d->prg_cpu[idx]->end.branchAddress = 0;
+ d->prg_cpu[idx]->end.status = 0;
+ DBGMSG(ohci->id, "iso xmit context info: header[%08x %08x]\n"
+ " begin=%08x %08x %08x %08x\n"
+ " %08x %08x %08x %08x\n"
+ " end =%08x %08x %08x %08x",
+ d->prg_cpu[idx]->data[0], d->prg_cpu[idx]->data[1],
+ d->prg_cpu[idx]->begin.control,
+ d->prg_cpu[idx]->begin.address,
+ d->prg_cpu[idx]->begin.branchAddress,
+ d->prg_cpu[idx]->begin.status,
+ d->prg_cpu[idx]->data[0],
+ d->prg_cpu[idx]->data[1],
+ d->prg_cpu[idx]->data[2],
+ d->prg_cpu[idx]->data[3],
+ d->prg_cpu[idx]->end.control,
+ d->prg_cpu[idx]->end.address,
+ d->prg_cpu[idx]->end.branchAddress,
+ d->prg_cpu[idx]->end.status);
+ if (d->branchAddrPtr)
+ *(d->branchAddrPtr) = cpu_to_le32(d->prg_bus[idx] | 0x3);
+ d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress);
+ }
d->free_prgs--;
/* queue the packet in the appropriate context queue */
if (d->fifo_last) {
d->fifo_last->xnext = packet;
d->fifo_last = packet;
- }
- else {
+ } else {
d->fifo_first = packet;
d->fifo_last = packet;
}
/*
* This function fills the AT FIFO with the (eventual) pending packets
- * and runs or wake up the AT DMA prg if necessary.
+ * and runs or wakes up the AT DMA prg if necessary.
+ *
* The function MUST be called with the d->lock held.
*/
static int dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
d->pending_last = NULL;
else
PRINT(KERN_INFO, ohci->id,
- "AT DMA FIFO ctx=%d full... waiting",d->ctx);
+ "Transmit DMA FIFO ctx=%d is full... waiting",d->ctx);
/* Is the context running ? (should be unless it is
the first packet to be sent in this context) */
if (!(reg_read(ohci, d->ctrlSet) & 0x8000)) {
- DBGMSG(ohci->id,"Starting AT DMA ctx=%d",d->ctx);
+ DBGMSG(ohci->id,"Starting transmit DMA ctx=%d",d->ctx);
reg_write(ohci, d->cmdPtr, d->prg_bus[idx]|z);
run_context(ohci, d->ctrlSet, NULL);
}
else {
- DBGMSG(ohci->id,"Waking AT DMA ctx=%d",d->ctx);
- /* wake up the dma context if necessary */
- if (!(reg_read(ohci, d->ctrlSet) & 0x400))
+ /* Wake up the dma context if necessary */
+ if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
+ DBGMSG(ohci->id,"Waking transmit DMA ctx=%d",d->ctx);
reg_write(ohci, d->ctrlSet, 0x1000);
+ }
}
return 1;
}
-/*
- * Transmission of an async packet
- */
+/* Transmission of an async packet */
static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
{
struct ti_ohci *ohci = host->hostdata;
if (packet->data_size > ohci->max_packet_size) {
PRINT(KERN_ERR, ohci->id,
- "transmit packet size = %d too big",
+ "Transmit packet size %Zd is too big",
packet->data_size);
return 0;
}
packet->xnext = NULL;
- /* Decide wether we have a request or a response packet */
+ /* Decide wether we have an iso, a request, or a response packet */
tcode = (packet->header[0]>>4)&0xf;
- if (tcode & 0x02) d = ohci->at_resp_context;
+ if (tcode == TCODE_ISO_DATA) d = ohci->it_context;
+ else if (tcode & 0x02) d = ohci->at_resp_context;
else d = ohci->at_req_context;
spin_lock_irqsave(&d->lock,flags);
- /* queue the packet for later insertion into to dma fifo */
+ /* queue the packet for later insertion into the dma fifo */
if (d->pending_last) {
d->pending_last->xnext = packet;
d->pending_last = packet;
switch (cmd) {
case RESET_BUS:
- /*
- * FIXME: this flag might be necessary in some case
- */
- PRINT(KERN_INFO, ohci->id, "resetting bus on request%s",
- ((host->attempt_root || attempt_root) ?
+ PRINT (KERN_DEBUG, ohci->id, "Resetting bus on request%s",
+ ((host->attempt_root || attempt_root) ?
" and attempting to become root" : ""));
- reg_write(ohci, OHCI1394_PhyControl,
- (host->attempt_root || attempt_root) ?
- 0x000041ff : 0x0000417f);
+ set_phy_reg_mask (ohci, 1, 0x40 | ((host->attempt_root || attempt_root) ?
+ 0x80 : 0));
break;
case GET_CYCLE_COUNTER:
if (arg<0 || arg>63) {
PRINT(KERN_ERR, ohci->id, __FUNCTION__
- "IS0_LISTEN_CHANNEL channel %d out of range",
+ "IS0 listne channel %d is out of range",
arg);
return -EFAULT;
}
if (ohci->ISO_channel_usage & mask) {
PRINT(KERN_ERR, ohci->id, __FUNCTION__
- "IS0_LISTEN_CHANNEL channel %d already used",
+ "IS0 listen channel %d is already used",
arg);
spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
return -EFAULT;
1<<arg);
spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
- DBGMSG(ohci->id, "listening enabled on channel %d", arg);
+ DBGMSG(ohci->id, "Listening enabled on channel %d", arg);
break;
}
case ISO_UNLISTEN_CHANNEL:
if (arg<0 || arg>63) {
PRINT(KERN_ERR, ohci->id, __FUNCTION__
- "IS0_UNLISTEN_CHANNEL channel %d out of range",
+ "IS0 unlisten channel %d is out of range",
arg);
return -EFAULT;
}
if (!(ohci->ISO_channel_usage & mask)) {
PRINT(KERN_ERR, ohci->id, __FUNCTION__
- "IS0_UNLISTEN_CHANNEL channel %d not used",
+ "IS0 unlisten channel %d is not used",
arg);
spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
return -EFAULT;
1<<arg);
spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
- DBGMSG(ohci->id, "listening disabled on channel %d", arg);
+ DBGMSG(ohci->id, "Listening disabled on channel %d", arg);
break;
}
default:
spin_lock_irqsave(&d->lock,flags);
- /* is there still any packet pending in the fifo ? */
+ /* Is there still any packet pending in the fifo ? */
while(d->fifo_first) {
PRINT(KERN_INFO, ohci->id,
"AT dma reset ctx=%d, aborting transmission",
static void ohci_irq_handler(int irq, void *dev_id,
struct pt_regs *regs_are_unused)
{
- quadlet_t event,node_id;
+ quadlet_t event, node_id;
struct ti_ohci *ohci = (struct ti_ohci *)dev_id;
struct hpsb_host *host = ohci->host;
- int phyid = -1, isroot = 0;
- int timeout = 255;
+ int phyid = -1, isroot = 0, flags;
- do {
- /* read the interrupt event register */
- event=reg_read(ohci, OHCI1394_IntEventClear);
+ /* Read the interrupt event register */
+ spin_lock_irqsave(&ohci->event_lock, flags);
+ event = reg_read(ohci, OHCI1394_IntEventClear);
+ reg_write(ohci, OHCI1394_IntEventClear, event);
+ spin_unlock_irqrestore(&ohci->event_lock, flags);
- if (!event) return;
+ if (!event) return;
- DBGMSG(ohci->id, "IntEvent: %08x",event);
+ DBGMSG(ohci->id, "IntEvent: %08x", event);
- /* clear the interrupt event register */
- reg_write(ohci, OHCI1394_IntEventClear, event);
+ /* Die right here an now */
+ if (event & OHCI1394_unrecoverableError) {
+ PRINT(KERN_ERR, ohci->id, "Unrecoverable error, shutting down card!");
+ remove_card(ohci);
+ return;
+ }
- if (event & OHCI1394_busReset) {
- if (!host->in_bus_reset) {
- PRINT(KERN_INFO, ohci->id, "Bus reset");
-
- /* Wait for the AT fifo to be flushed */
- dma_trm_reset(ohci->at_req_context);
- dma_trm_reset(ohci->at_resp_context);
+ /* Someone wants a bus reset. Better watch what you wish for...
+ *
+ * XXX: Read 6.1.1 of the OHCI1394 spec. We need to take special
+ * care with the BusReset Interrupt, before and until the SelfID
+ * phase is over. This is why the SelfID phase sometimes fails for
+ * this driver. */
+ if (event & OHCI1394_busReset) {
+ if (!host->in_bus_reset) {
+ PRINT(KERN_DEBUG, ohci->id, "Bus reset requested");
+
+ /* Wait for the AT fifo to be flushed */
+ dma_trm_reset(ohci->at_req_context);
+ dma_trm_reset(ohci->at_resp_context);
- /* Subsystem call */
- hpsb_bus_reset(ohci->host);
-
- ohci->NumBusResets++;
- }
+ /* Subsystem call */
+ hpsb_bus_reset(ohci->host);
+
+ ohci->NumBusResets++;
}
- /*
- * Problem: How can I ensure that the AT bottom half will be
- * executed before the AR bottom half (both events may have
- * occurred within a single irq event)
- * Quick hack: just launch it within the IRQ handler
- */
- if (event & OHCI1394_reqTxComplete) {
- struct dma_trm_ctx *d = ohci->at_req_context;
- DBGMSG(ohci->id, "Got reqTxComplete interrupt "
- "status=0x%08X", reg_read(ohci, d->ctrlSet));
+ event &= ~OHCI1394_busReset;
+ }
+
+ /* XXX: We need a way to also queue the OHCI1394_reqTxComplete,
+ * but for right now we simply run it upon reception, to make sure
+ * we get sent acks before response packets. This sucks mainly
+ * because it halts the interrupt handler. */
+ if (event & OHCI1394_reqTxComplete) {
+ struct dma_trm_ctx *d = ohci->at_req_context;
+ DBGMSG(ohci->id, "Got reqTxComplete interrupt "
+ "status=0x%08X", reg_read(ohci, d->ctrlSet));
+ if (reg_read(ohci, d->ctrlSet) & 0x800)
+ ohci1394_stop_context(ohci, d->ctrlClear,
+ "reqTxComplete");
+ else
+ dma_trm_tasklet ((unsigned long)d);
+ event &= ~OHCI1394_reqTxComplete;
+ }
+ if (event & OHCI1394_respTxComplete) {
+ struct dma_trm_ctx *d = ohci->at_resp_context;
+ DBGMSG(ohci->id, "Got respTxComplete interrupt "
+ "status=0x%08X", reg_read(ohci, d->ctrlSet));
+ if (reg_read(ohci, d->ctrlSet) & 0x800)
+ ohci1394_stop_context(ohci, d->ctrlClear,
+ "respTxComplete");
+ else
+ tasklet_schedule(&d->task);
+ event &= ~OHCI1394_respTxComplete;
+ }
+ if (event & OHCI1394_RQPkt) {
+ struct dma_rcv_ctx *d = ohci->ar_req_context;
+ DBGMSG(ohci->id, "Got RQPkt interrupt status=0x%08X",
+ reg_read(ohci, d->ctrlSet));
+ if (reg_read(ohci, d->ctrlSet) & 0x800)
+ ohci1394_stop_context(ohci, d->ctrlClear, "RQPkt");
+ else
+ tasklet_schedule(&d->task);
+ event &= ~OHCI1394_RQPkt;
+ }
+ if (event & OHCI1394_RSPkt) {
+ struct dma_rcv_ctx *d = ohci->ar_resp_context;
+ DBGMSG(ohci->id, "Got RSPkt interrupt status=0x%08X",
+ reg_read(ohci, d->ctrlSet));
+ if (reg_read(ohci, d->ctrlSet) & 0x800)
+ ohci1394_stop_context(ohci, d->ctrlClear, "RSPkt");
+ else
+ tasklet_schedule(&d->task);
+ event &= ~OHCI1394_RSPkt;
+ }
+ if (event & OHCI1394_isochRx) {
+ quadlet_t isoRecvIntEvent;
+ struct dma_rcv_ctx *d = ohci->ir_context;
+ isoRecvIntEvent =
+ reg_read(ohci, OHCI1394_IsoRecvIntEventSet);
+ reg_write(ohci, OHCI1394_IsoRecvIntEventClear,
+ isoRecvIntEvent);
+ DBGMSG(ohci->id, "Got isochRx interrupt "
+ "status=0x%08X isoRecvIntEvent=%08x",
+ reg_read(ohci, d->ctrlSet), isoRecvIntEvent);
+ if (isoRecvIntEvent & 0x1) {
if (reg_read(ohci, d->ctrlSet) & 0x800)
ohci1394_stop_context(ohci, d->ctrlClear,
- "reqTxComplete");
+ "isochRx");
else
- dma_trm_bh((void *)d);
+ tasklet_schedule(&d->task);
}
- if (event & OHCI1394_respTxComplete) {
- struct dma_trm_ctx *d = ohci->at_resp_context;
- DBGMSG(ohci->id, "Got respTxComplete interrupt "
- "status=0x%08X", reg_read(ohci, d->ctrlSet));
+ if (ohci->video_tmpl)
+ ohci->video_tmpl->irq_handler(ohci->id, isoRecvIntEvent,
+ 0);
+ event &= ~OHCI1394_isochRx;
+ }
+ if (event & OHCI1394_isochTx) {
+ quadlet_t isoXmitIntEvent;
+ struct dma_trm_ctx *d = ohci->it_context;
+ isoXmitIntEvent =
+ reg_read(ohci, OHCI1394_IsoXmitIntEventSet);
+ reg_write(ohci, OHCI1394_IsoXmitIntEventClear,
+ isoXmitIntEvent);
+ DBGMSG(ohci->id, "Got isochTx interrupt "
+ "status=0x%08x isoXmitIntEvent=%08x",
+ reg_read(ohci, d->ctrlSet), isoXmitIntEvent);
+ if (ohci->video_tmpl)
+ ohci->video_tmpl->irq_handler(ohci->id, 0,
+ isoXmitIntEvent);
+ if (isoXmitIntEvent & 0x1) {
if (reg_read(ohci, d->ctrlSet) & 0x800)
- ohci1394_stop_context(ohci, d->ctrlClear,
- "respTxComplete");
+ ohci1394_stop_context(ohci, d->ctrlClear, "isochTx");
else
- dma_trm_bh((void *)d);
+ tasklet_schedule(&d->task);
}
- if (event & OHCI1394_RQPkt) {
- struct dma_rcv_ctx *d = ohci->ar_req_context;
- DBGMSG(ohci->id, "Got RQPkt interrupt status=0x%08X",
- reg_read(ohci, d->ctrlSet));
- if (reg_read(ohci, d->ctrlSet) & 0x800)
- ohci1394_stop_context(ohci, d->ctrlClear, "RQPkt");
- else {
-#if IEEE1394_USE_BOTTOM_HALVES
- queue_task(&d->task, &tq_immediate);
- mark_bh(IMMEDIATE_BH);
-#else
- dma_rcv_bh((void *)d);
-#endif
- }
- }
- if (event & OHCI1394_RSPkt) {
- struct dma_rcv_ctx *d = ohci->ar_resp_context;
- DBGMSG(ohci->id, "Got RSPkt interrupt status=0x%08X",
- reg_read(ohci, d->ctrlSet));
- if (reg_read(ohci, d->ctrlSet) & 0x800)
- ohci1394_stop_context(ohci, d->ctrlClear, "RSPkt");
- else {
-#if IEEE1394_USE_BOTTOM_HALVES
- queue_task(&d->task, &tq_immediate);
- mark_bh(IMMEDIATE_BH);
-#else
- dma_rcv_bh((void *)d);
-#endif
- }
- }
- if (event & OHCI1394_isochRx) {
- quadlet_t isoRecvIntEvent;
- struct dma_rcv_ctx *d = ohci->ir_context;
- isoRecvIntEvent =
- reg_read(ohci, OHCI1394_IsoRecvIntEventSet);
- reg_write(ohci, OHCI1394_IsoRecvIntEventClear,
- isoRecvIntEvent);
- DBGMSG(ohci->id, "Got isochRx interrupt "
- "status=0x%08X isoRecvIntEvent=%08x",
- reg_read(ohci, d->ctrlSet), isoRecvIntEvent);
- if (isoRecvIntEvent & 0x1) {
- if (reg_read(ohci, d->ctrlSet) & 0x800)
- ohci1394_stop_context(ohci, d->ctrlClear,
- "isochRx");
- else {
-#if IEEE1394_USE_BOTTOM_HALVES
- queue_task(&d->task, &tq_immediate);
- mark_bh(IMMEDIATE_BH);
-#else
- dma_rcv_bh((void *)d);
-#endif
- }
- }
- if (ohci->video_tmpl)
- ohci->video_tmpl->irq_handler(ohci->id,
- isoRecvIntEvent,
- 0);
- }
- if (event & OHCI1394_isochTx) {
- quadlet_t isoXmitIntEvent;
- isoXmitIntEvent =
- reg_read(ohci, OHCI1394_IsoXmitIntEventSet);
- reg_write(ohci, OHCI1394_IsoXmitIntEventClear,
- isoXmitIntEvent);
- DBGMSG(ohci->id, "Got isochTx interrupt");
- if (ohci->video_tmpl)
- ohci->video_tmpl->irq_handler(ohci->id, 0,
- isoXmitIntEvent);
- }
- if (event & OHCI1394_selfIDComplete) {
- if (host->in_bus_reset) {
- /*
- * Begin Fix (JSG): Check to make sure our
- * node id is valid
- */
- node_id = reg_read(ohci, OHCI1394_NodeID);
- if (!(node_id & 0x80000000)) {
- mdelay(1); /* phy is upset -
- * this happens once in
- * a while on hot-plugs...
- * give it a ms to recover
- */
- }
- /* End Fix (JSG) */
+ event &= ~OHCI1394_isochTx;
+ }
+ if (event & OHCI1394_selfIDComplete) {
+ if (host->in_bus_reset) {
+ node_id = reg_read(ohci, OHCI1394_NodeID);
+ /* If our nodeid is not valid, give a msec delay
+ * to let it settle in and try again. */
+ if (!(node_id & 0x80000000)) {
+ mdelay(1);
node_id = reg_read(ohci, OHCI1394_NodeID);
- if (node_id & 0x80000000) { /* NodeID valid */
- phyid = node_id & 0x0000003f;
- isroot = (node_id & 0x40000000) != 0;
-
- PRINT(KERN_INFO, ohci->id,
- "SelfID process finished "
- "(phyid %d, %s)", phyid,
- (isroot ? "root" : "not root"));
-
- handle_selfid(ohci, host,
- phyid, isroot);
- }
- else
- PRINT(KERN_ERR, ohci->id,
- "SelfID process finished but "
- "NodeID not valid: %08X",
- node_id);
-
- /* Accept Physical requests from all nodes. */
- reg_write(ohci,OHCI1394_AsReqFilterHiSet,
- 0xffffffff);
- reg_write(ohci,OHCI1394_AsReqFilterLoSet,
- 0xffffffff);
- /*
- * Tip by James Goodwin <jamesg@Filanet.com>
- * Turn on phys dma reception. We should
- * probably manage the filtering somehow,
- * instead of blindly turning it on.
- */
- reg_write(ohci,OHCI1394_PhyReqFilterHiSet,
- 0xffffffff);
- reg_write(ohci,OHCI1394_PhyReqFilterLoSet,
- 0xffffffff);
- reg_write(ohci,OHCI1394_PhyUpperBound,
- 0xffff0000);
- }
- else PRINT(KERN_ERR, ohci->id,
- "self-id received outside of bus reset"
- "sequence");
- }
- if (event & OHCI1394_phyRegRcvd) {
-#if 1
- if (host->in_bus_reset) {
- PRINT(KERN_INFO, ohci->id, "PhyControl: %08X",
- reg_read(ohci, OHCI1394_PhyControl));
}
- else PRINT(KERN_ERR, ohci->id,
- "phy reg received outside of bus reset"
- "sequence");
-#endif
- }
- } while (--timeout);
- PRINT(KERN_ERR, ohci->id, "irq_handler timeout event=0x%08x", event);
+ if (node_id & 0x80000000) { /* NodeID valid */
+ phyid = node_id & 0x0000003f;
+ isroot = (node_id & 0x40000000) != 0;
+
+ PRINT(KERN_DEBUG, ohci->id,
+ "SelfID interrupt received "
+ "(phyid %d, %s)", phyid,
+ (isroot ? "root" : "not root"));
+
+ handle_selfid(ohci, host,
+ phyid, isroot);
+ } else
+ PRINT(KERN_ERR, ohci->id,
+ "SelfID interrupt received, but "
+ "NodeID is not valid: %08X",
+ node_id);
+
+ /* Accept Physical requests from all nodes. */
+ reg_write(ohci,OHCI1394_AsReqFilterHiSet,
+ 0xffffffff);
+ reg_write(ohci,OHCI1394_AsReqFilterLoSet,
+ 0xffffffff);
+ /* Turn on phys dma reception. We should
+ * probably manage the filtering somehow,
+ * instead of blindly turning it on. */
+ reg_write(ohci,OHCI1394_PhyReqFilterHiSet,
+ 0xffffffff);
+ reg_write(ohci,OHCI1394_PhyReqFilterLoSet,
+ 0xffffffff);
+ reg_write(ohci,OHCI1394_PhyUpperBound,
+ 0xffff0000);
+ } else
+ PRINT(KERN_ERR, ohci->id,
+ "SelfID received outside of bus reset sequence");
+ event &= ~OHCI1394_selfIDComplete;
+ }
+ if (event & OHCI1394_phyRegRcvd) {
+ if (host->in_bus_reset) {
+ DBGMSG (ohci->id, "PhyControl: %08X",
+ reg_read(ohci, OHCI1394_PhyControl));
+ } else
+ PRINT(KERN_ERR, ohci->id,
+ "Physical register received outside of bus reset sequence");
+ event &= ~OHCI1394_phyRegRcvd;
+ }
+ if (event)
+ PRINT(KERN_ERR, ohci->id, "Unhandled interrupt(s) 0x%08x\n",
+ event);
}
/* Put the buffer back into the dma context */
struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
DBGMSG(ohci->id, "Inserting dma buf ctx=%d idx=%d", d->ctx, idx);
- d->prg_cpu[idx]->status = d->buf_size;
- d->prg_cpu[idx]->branchAddress &= 0xfffffff0;
+ d->prg_cpu[idx]->status = cpu_to_le32(d->buf_size);
+ d->prg_cpu[idx]->branchAddress &= le32_to_cpu(0xfffffff0);
idx = (idx + d->num_desc - 1 ) % d->num_desc;
- d->prg_cpu[idx]->branchAddress |= 0x1;
+ d->prg_cpu[idx]->branchAddress |= le32_to_cpu(0x00000001);
/* wake up the dma context if necessary */
if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
PRINT(KERN_INFO, ohci->id,
- "Waking dma cxt=%d ... processing is probably too slow",
+ "Waking dma ctx=%d ... processing is probably too slow",
d->ctx);
reg_write(ohci, d->ctrlSet, 0x1000);
}
-}
-
-static int block_length(struct dma_rcv_ctx *d, int idx,
- quadlet_t *buf_ptr, int offset)
-{
- int length=0;
-
- /* Where is the data length ? */
- if (offset+12>=d->buf_size)
- length = (d->buf_cpu[(idx+1)%d->num_desc]
- [3-(d->buf_size-offset)/4]>>16);
- else
- length = (buf_ptr[3]>>16);
- if (length % 4) length += 4 - (length % 4);
- return length;
}
-const int TCODE_SIZE[16] = {20, 0, 16, -1, 16, 20, 20, 0,
+#define cond_le32_to_cpu(data, noswap) \
+ (noswap ? data : le32_to_cpu(data))
+
+static const int TCODE_SIZE[16] = {20, 0, 16, -1, 16, 20, 20, 0,
-1, 0, -1, 0, -1, -1, 16, -1};
/*
* Determine the length of a packet in the buffer
* Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca>
*/
-static int packet_length(struct dma_rcv_ctx *d, int idx, quadlet_t *buf_ptr,
-int offset)
+static __inline__ int packet_length(struct dma_rcv_ctx *d, int idx, quadlet_t *buf_ptr,
+ int offset, unsigned char tcode, int noswap)
{
- unsigned char tcode;
- int length = -1;
-
- /* Let's see what kind of packet is in there */
- tcode = (buf_ptr[0] >> 4) & 0xf;
+ int length = -1;
if (d->ctx < 2) { /* Async Receive Response/Request */
length = TCODE_SIZE[tcode];
- if (length == 0)
- length = block_length(d, idx, buf_ptr, offset) + 20;
- }
- else if (d->ctx==2) { /* Iso receive */
+ if (length == 0) {
+ if (offset + 12 >= d->buf_size) {
+ length = (cond_le32_to_cpu(d->buf_cpu[(idx + 1) % d->num_desc]
+ [3 - ((d->buf_size - offset) >> 2)], noswap) >> 16);
+ } else {
+ length = (cond_le32_to_cpu(buf_ptr[3], noswap) >> 16);
+ }
+ length += 20;
+ }
+ } else if (d->ctx == 2) { /* Iso receive */
/* Assumption: buffer fill mode with header/trailer */
- length = (buf_ptr[0]>>16);
- if (length % 4) length += 4 - (length % 4);
- length+=8;
+ length = (cond_le32_to_cpu(buf_ptr[0], noswap) >> 16) + 8;
}
+
+ if (length > 0 && length % 4)
+ length += 4 - (length % 4);
+
return length;
}
-/* Bottom half that processes dma receive buffers */
-static void dma_rcv_bh(void *data)
+/* Tasklet that processes dma receive buffers */
+static void dma_rcv_tasklet (unsigned long data)
{
struct dma_rcv_ctx *d = (struct dma_rcv_ctx*)data;
struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
unsigned int split_left, idx, offset, rescount;
unsigned char tcode;
- int length, bytes_left, ack;
+ int length, bytes_left, ack, flags;
quadlet_t *buf_ptr;
char *split_ptr;
char msg[256];
- spin_lock(&d->lock);
+ spin_lock_irqsave(&d->lock, flags);
idx = d->buf_ind;
offset = d->buf_offset;
buf_ptr = d->buf_cpu[idx] + offset/4;
- rescount = d->prg_cpu[idx]->status&0xffff;
+ dma_cache_wback_inv(&(d->prg_cpu[idx]->status), sizeof(d->prg_cpu[idx]->status));
+ rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
+
bytes_left = d->buf_size - rescount - offset;
+ dma_cache_wback_inv(buf_ptr, bytes_left);
+
+ while (bytes_left > 0) {
+ tcode = (cond_le32_to_cpu(buf_ptr[0], ohci->payload_swap) >> 4) & 0xf;
- while (bytes_left>0) {
- tcode = (buf_ptr[0]>>4)&0xf;
- length = packet_length(d, idx, buf_ptr, offset);
+ /* packet_length() will return < 4 for an error */
+ length = packet_length(d, idx, buf_ptr, offset, tcode, ohci->payload_swap);
- if (length<4) { /* something is wrong */
- sprintf(msg,"unexpected tcode 0x%X in AR ctx=%d",
- tcode, d->ctx);
+ if (length < 4) { /* something is wrong */
+ sprintf(msg,"Unexpected tcode 0x%x(0x%08x) in AR ctx=%d, length=%d",
+ tcode, cond_le32_to_cpu(buf_ptr[0], ohci->payload_swap),
+ d->ctx, length);
ohci1394_stop_context(ohci, d->ctrlClear, msg);
- spin_unlock(&d->lock);
+ spin_unlock_irqrestore(&d->lock, flags);
return;
}
- if ((offset+length)>d->buf_size) { /* Split packet */
- if (length>d->split_buf_size) {
+ /* The first case is where we have a packet that crosses
+ * over more than one descriptor. The next case is where
+ * it's all in the first descriptor. */
+ if ((offset + length) > d->buf_size) {
+ DBGMSG(ohci->id,"Split packet rcv'd\n");
+ if (length > d->split_buf_size) {
ohci1394_stop_context(ohci, d->ctrlClear,
- "split packet size exceeded");
+ "Split packet size exceeded");
d->buf_ind = idx;
d->buf_offset = offset;
- spin_unlock(&d->lock);
+ spin_unlock_irqrestore(&d->lock, flags);
return;
}
- if (d->prg_cpu[(idx+1)%d->num_desc]->status
- ==d->buf_size) {
- /* other part of packet not written yet */
- /* this should never happen I think */
- /* anyway we'll get it on the next call */
+#if 0
+ if (le32_to_cpu(d->prg_cpu[(idx+1)%d->num_desc]->status)
+ == d->buf_size) {
+ /* Other part of packet not written yet.
+ * this should never happen I think
+ * anyway we'll get it on the next call. */
PRINT(KERN_INFO, ohci->id,
- "Got only half a packet !!!");
+ "Got only half a packet!");
d->buf_ind = idx;
d->buf_offset = offset;
- spin_unlock(&d->lock);
+ spin_unlock_irqrestore(&d->lock, flags);
return;
}
+#endif
split_left = length;
split_ptr = (char *)d->spb;
memcpy(split_ptr,buf_ptr,d->buf_size-offset);
insert_dma_buffer(d, idx);
idx = (idx+1) % d->num_desc;
buf_ptr = d->buf_cpu[idx];
+ dma_cache_wback_inv(buf_ptr, d->buf_size);
offset=0;
+
while (split_left >= d->buf_size) {
memcpy(split_ptr,buf_ptr,d->buf_size);
split_ptr += d->buf_size;
insert_dma_buffer(d, idx);
idx = (idx+1) % d->num_desc;
buf_ptr = d->buf_cpu[idx];
+ dma_cache_wback_inv(buf_ptr, d->buf_size);
}
- if (split_left>0) {
+
+ if (split_left > 0) {
memcpy(split_ptr, buf_ptr, split_left);
offset = split_left;
buf_ptr += offset/4;
}
-
- /*
- * We get one phy packet for each bus reset.
- * we know that from now on the bus topology may
- * have changed. Just ignore it for the moment
- */
- if (tcode != 0xE) {
- DBGMSG(ohci->id, "Split packet received from"
- " node %d ack=0x%02X spd=%d tcode=0x%X"
- " length=%d data=0x%08x ctx=%d",
- (d->spb[1]>>16)&0x3f,
- (d->spb[length/4-1]>>16)&0x1f,
- (d->spb[length/4-1]>>21)&0x3,
- tcode, length, d->spb[3], d->ctx);
-
- ack = (((d->spb[length/4-1]>>16)&0x1f)
- == 0x11) ? 1 : 0;
-
- hpsb_packet_received(ohci->host, d->spb,
- length, ack);
- }
- else
- PRINT(KERN_INFO, ohci->id,
- "Got phy packet ctx=%d ... discarded",
- d->ctx);
- }
- else {
- /*
- * We get one phy packet for each bus reset.
- * we know that from now on the bus topology may
- * have changed. Just ignore it for the moment
- */
- if (tcode != 0xE) {
- DBGMSG(ohci->id, "Packet received from node"
- " %d ack=0x%02X spd=%d tcode=0x%X"
- " length=%d data=0x%08x ctx=%d",
- (buf_ptr[1]>>16)&0x3f,
- (buf_ptr[length/4-1]>>16)&0x1f,
- (buf_ptr[length/4-1]>>21)&0x3,
- tcode, length, buf_ptr[3], d->ctx);
-
- ack = (((buf_ptr[length/4-1]>>16)&0x1f)
- == 0x11) ? 1 : 0;
-
- hpsb_packet_received(ohci->host, buf_ptr,
- length, ack);
- }
- else
- PRINT(KERN_INFO, ohci->id,
- "Got phy packet ctx=%d ... discarded",
- d->ctx);
+ } else {
+ DBGMSG(ohci->id,"Single packet rcv'd\n");
+ memcpy(d->spb, buf_ptr, length);
offset += length;
buf_ptr += length/4;
if (offset==d->buf_size) {
offset=0;
}
}
- rescount = d->prg_cpu[idx]->status & 0xffff;
+
+ /* We get one phy packet to the async descriptor for each
+ * bus reset. We always ignore it. */
+ if (tcode != OHCI1394_TCODE_PHY) {
+ if (!ohci->payload_swap)
+ packet_swab(d->spb, tcode, (length - 4) >> 2, 0);
+
+ DBGMSG(ohci->id, "Packet received from node"
+ " %d ack=0x%02X spd=%d tcode=0x%X"
+ " length=%d ctx=%d tlabel=%d",
+ (d->spb[1]>>16)&0x3f,
+ (cond_le32_to_cpu(d->spb[length/4-1], ohci->payload_swap)>>16)&0x1f,
+ (cond_le32_to_cpu(d->spb[length/4-1], ohci->payload_swap)>>21)&0x3,
+ tcode, length, d->ctx,
+ (cond_le32_to_cpu(d->spb[length/4-1], ohci->payload_swap)>>10)&0x3f);
+
+ ack = (((cond_le32_to_cpu(d->spb[length/4-1], ohci->payload_swap)>>16)&0x1f)
+ == 0x11) ? 1 : 0;
+
+ hpsb_packet_received(ohci->host, d->spb,
+ length-4, ack);
+ }
+#if OHCI1394_DEBUG
+ else
+ PRINT (KERN_DEBUG, ohci->id, "Got phy packet ctx=%d ... discarded",
+ d->ctx);
+#endif
+
+ dma_cache_wback_inv(&(d->prg_cpu[idx]->status),
+ sizeof(d->prg_cpu[idx]->status));
+ rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
+
bytes_left = d->buf_size - rescount - offset;
}
d->buf_ind = idx;
d->buf_offset = offset;
- spin_unlock(&d->lock);
+ spin_unlock_irqrestore(&d->lock, flags);
}
/* Bottom half that processes sent packets */
-static void dma_trm_bh(void *data)
+static void dma_trm_tasklet (unsigned long data)
{
struct dma_trm_ctx *d = (struct dma_trm_ctx*)data;
struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
spin_lock_irqsave(&d->lock, flags);
- if (d->fifo_first==NULL) {
+ if (d->fifo_first == NULL) {
#if 0
ohci1394_stop_context(ohci, d->ctrlClear,
"Packet sent ack received but queue is empty");
while (d->fifo_first) {
packet = d->fifo_first;
datasize = d->fifo_first->data_size;
- if (datasize)
- ack = d->prg_cpu[d->sent_ind]->end.status>>16;
+ if (datasize && packet->type != raw)
+ ack = le32_to_cpu(
+ d->prg_cpu[d->sent_ind]->end.status) >> 16;
else
- ack = d->prg_cpu[d->sent_ind]->begin.status>>16;
+ ack = le32_to_cpu(
+ d->prg_cpu[d->sent_ind]->begin.status) >> 16;
- if (ack==0)
+ if (ack == 0)
/* this packet hasn't been sent yet*/
break;
DBGMSG(ohci->id,
"Packet sent to node %d tcode=0x%X tLabel="
"0x%02X ack=0x%X spd=%d dataLength=%d ctx=%d",
- (d->prg_cpu[d->sent_ind]->data[1]>>16)&0x3f,
- (d->prg_cpu[d->sent_ind]->data[0]>>4)&0xf,
- (d->prg_cpu[d->sent_ind]->data[0]>>10)&0x3f,
- ack&0x1f, (ack>>5)&0x3,
- d->prg_cpu[d->sent_ind]->data[3]>>16,
- d->ctx);
+ (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])
+ >>16)&0x3f,
+ (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
+ >>4)&0xf,
+ (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
+ >>10)&0x3f,
+ ack&0x1f, (ack>>5)&0x3,
+ le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3])
+ >>16,
+ d->ctx);
else
DBGMSG(ohci->id,
"Packet sent to node %d tcode=0x%X tLabel="
"0x%02X ack=0x%X spd=%d data=0x%08X ctx=%d",
- (d->prg_cpu[d->sent_ind]->data[1]>>16)&0x3f,
- (d->prg_cpu[d->sent_ind]->data[0]>>4)&0xf,
- (d->prg_cpu[d->sent_ind]->data[0]>>10)&0x3f,
- ack&0x1f, (ack>>5)&0x3,
- d->prg_cpu[d->sent_ind]->data[3],
- d->ctx);
+ (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])
+ >>16)&0x3f,
+ (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
+ >>4)&0xf,
+ (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
+ >>10)&0x3f,
+ ack&0x1f, (ack>>5)&0x3,
+ le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3]),
+ d->ctx);
#endif
nextpacket = packet->xnext;
- hpsb_packet_sent(ohci->host, packet, ack&0xf);
+ hpsb_packet_sent(ohci->host, packet, ack & 0xf);
- if (datasize)
+ if (datasize) {
pci_unmap_single(ohci->dev,
- d->prg_cpu[d->sent_ind]->end.address,
+ cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address),
datasize, PCI_DMA_TODEVICE);
+ OHCI_DMA_FREE("single Xmit data packet");
+ }
d->sent_ind = (d->sent_ind+1)%d->num_desc;
d->free_prgs++;
d->fifo_first = nextpacket;
}
- if (d->fifo_first==NULL) d->fifo_last=NULL;
+ if (d->fifo_first == NULL)
+ d->fifo_last = NULL;
dma_trm_flush(ohci, d);
ohci1394_stop_context(ohci, (*d)->ctrlClear, NULL);
+ tasklet_kill(&(*d)->task);
+
if ((*d)->buf_cpu) {
for (i=0; i<(*d)->num_desc; i++)
- if ((*d)->buf_cpu[i] && (*d)->buf_bus[i])
+ if ((*d)->buf_cpu[i] && (*d)->buf_bus[i]) {
pci_free_consistent(
ohci->dev, (*d)->buf_size,
(*d)->buf_cpu[i], (*d)->buf_bus[i]);
+ OHCI_DMA_FREE("consistent dma_rcv buf[%d]", i);
+ }
kfree((*d)->buf_cpu);
kfree((*d)->buf_bus);
}
if ((*d)->prg_cpu) {
for (i=0; i<(*d)->num_desc; i++)
- if ((*d)->prg_cpu[i] && (*d)->prg_bus[i])
+ if ((*d)->prg_cpu[i] && (*d)->prg_bus[i]) {
pci_free_consistent(
ohci->dev, sizeof(struct dma_cmd),
(*d)->prg_cpu[i], (*d)->prg_bus[i]);
+ OHCI_DMA_FREE("consistent dma_rcv prg[%d]", i);
+ }
kfree((*d)->prg_cpu);
kfree((*d)->prg_bus);
}
if ((*d)->spb) kfree((*d)->spb);
-
+
kfree(*d);
*d = NULL;
d = (struct dma_rcv_ctx *)kmalloc(sizeof(struct dma_rcv_ctx),
GFP_KERNEL);
- if (d==NULL) {
- PRINT(KERN_ERR, ohci->id, "failed to allocate dma_rcv_ctx");
+ if (d == NULL) {
+ PRINT(KERN_ERR, ohci->id, "Failed to allocate dma_rcv_ctx");
return NULL;
}
+ memset (d, 0, sizeof (struct dma_rcv_ctx));
+
d->ohci = (void *)ohci;
d->ctx = ctx;
d->buf_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_KERNEL);
if (d->buf_cpu == NULL || d->buf_bus == NULL) {
- PRINT(KERN_ERR, ohci->id, "failed to allocate dma buffer");
+ PRINT(KERN_ERR, ohci->id, "Failed to allocate dma buffer");
free_dma_rcv_ctx(&d);
return NULL;
}
d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_KERNEL);
if (d->prg_cpu == NULL || d->prg_bus == NULL) {
- PRINT(KERN_ERR, ohci->id, "failed to allocate dma prg");
+ PRINT(KERN_ERR, ohci->id, "Failed to allocate dma prg");
free_dma_rcv_ctx(&d);
return NULL;
}
d->spb = kmalloc(d->split_buf_size, GFP_KERNEL);
if (d->spb == NULL) {
- PRINT(KERN_ERR, ohci->id, "failed to allocate split buffer");
+ PRINT(KERN_ERR, ohci->id, "Failed to allocate split buffer");
free_dma_rcv_ctx(&d);
return NULL;
}
for (i=0; i<d->num_desc; i++) {
- d->buf_cpu[i] = pci_alloc_consistent(ohci->dev,
+ d->buf_cpu[i] = pci_alloc_consistent(ohci->dev,
d->buf_size,
d->buf_bus+i);
+ OHCI_DMA_ALLOC("consistent dma_rcv buf[%d]", i);
- if (d->buf_cpu[i] != NULL) {
+ if (d->buf_cpu[i] != NULL) {
memset(d->buf_cpu[i], 0, d->buf_size);
} else {
PRINT(KERN_ERR, ohci->id,
- "failed to allocate dma buffer");
+ "Failed to allocate dma buffer");
free_dma_rcv_ctx(&d);
return NULL;
}
d->prg_cpu[i] = pci_alloc_consistent(ohci->dev,
sizeof(struct dma_cmd),
d->prg_bus+i);
+ OHCI_DMA_ALLOC("consistent dma_rcv prg[%d]", i);
if (d->prg_cpu[i] != NULL) {
memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd));
} else {
PRINT(KERN_ERR, ohci->id,
- "failed to allocate dma prg");
+ "Failed to allocate dma prg");
free_dma_rcv_ctx(&d);
return NULL;
}
spin_lock_init(&d->lock);
- /* initialize bottom handler */
- d->task.sync = 0;
- INIT_TQ_LINK(d->task);
- d->task.routine = dma_rcv_bh;
- d->task.data = (void*)d;
+ /* initialize tasklet */
+ tasklet_init (&d->task, dma_rcv_tasklet, (unsigned long)d);
return d;
}
ohci1394_stop_context(ohci, (*d)->ctrlClear, NULL);
+ tasklet_kill(&(*d)->task);
+
if ((*d)->prg_cpu) {
for (i=0; i<(*d)->num_desc; i++)
- if ((*d)->prg_cpu[i] && (*d)->prg_bus[i])
+ if ((*d)->prg_cpu[i] && (*d)->prg_bus[i]) {
pci_free_consistent(
ohci->dev, sizeof(struct at_dma_prg),
(*d)->prg_cpu[i], (*d)->prg_bus[i]);
+ OHCI_DMA_FREE("consistent dma_trm prg[%d]", i);
+ }
kfree((*d)->prg_cpu);
kfree((*d)->prg_bus);
}
GFP_KERNEL);
if (d==NULL) {
- PRINT(KERN_ERR, ohci->id, "failed to allocate dma_trm_ctx");
+ PRINT(KERN_ERR, ohci->id, "Failed to allocate dma_trm_ctx");
return NULL;
}
+ memset (d, 0, sizeof (struct dma_trm_ctx));
+
d->ohci = (void *)ohci;
d->ctx = ctx;
d->num_desc = num_desc;
d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_KERNEL);
if (d->prg_cpu == NULL || d->prg_bus == NULL) {
- PRINT(KERN_ERR, ohci->id, "failed to allocate at dma prg");
+ PRINT(KERN_ERR, ohci->id, "Failed to allocate at dma prg");
free_dma_trm_ctx(&d);
return NULL;
}
d->prg_cpu[i] = pci_alloc_consistent(ohci->dev,
sizeof(struct at_dma_prg),
d->prg_bus+i);
+ OHCI_DMA_ALLOC("consistent dma_trm prg[%d]", i);
if (d->prg_cpu[i] != NULL) {
memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg));
} else {
PRINT(KERN_ERR, ohci->id,
- "failed to allocate at dma prg");
+ "Failed to allocate at dma prg");
free_dma_trm_ctx(&d);
return NULL;
}
spin_lock_init(&d->lock);
/* initialize bottom handler */
- INIT_TQ_LINK(d->task);
- d->task.routine = dma_trm_bh;
- d->task.data = (void*)d;
+ tasklet_init (&d->task, dma_trm_tasklet, (unsigned long)d);
return d;
}
-static u32 ohci_crc16(unsigned *data, int length)
+static u16 ohci_crc16 (u32 *ptr, int length)
{
- int check=0, i;
- int shift, sum, next=0;
+ int shift;
+ u32 crc, sum, data;
+
+ crc = 0;
+ for (; length > 0; length--) {
+ data = *ptr++;
+ for (shift = 28; shift >= 0; shift -= 4) {
+ sum = ((crc >> 12) ^ (data >> shift)) & 0x000f;
+ crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ sum;
+ }
+ crc &= 0xffff;
+ }
+ return crc;
+}
- for (i = length; i; i--) {
- for (next = check, shift = 28; shift >= 0; shift -= 4 ) {
- sum = ((next >> 12) ^ (*data >> shift)) & 0xf;
- next = (next << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum);
- }
- check = next & 0xffff;
- data++;
- }
+/* Config ROM macro implementation influenced by NetBSD OHCI driver */
- return check;
-}
+struct config_rom_unit {
+ u32 *start;
+ u32 *refer;
+ int length;
+ int refunit;
+};
+
+struct config_rom_ptr {
+ u32 *data;
+ int unitnum;
+ struct config_rom_unit unitdir[10];
+};
+
+#define cf_put_1quad(cr, q) (((cr)->data++)[0] = cpu_to_be32(q))
+
+#define cf_put_4bytes(cr, b1, b2, b3, b4) \
+ (((cr)->data++)[0] = cpu_to_be32(((b1) << 24) | ((b2) << 16) | ((b3) << 8) | (b4)))
+
+#define cf_put_keyval(cr, key, val) (((cr)->data++)[0] = cpu_to_be32((key) << 24) | (val))
+
+#define cf_put_crc16(cr, unit) \
+ (*(cr)->unitdir[unit].start = cpu_to_be32(((cr)->unitdir[unit].length << 16) | \
+ ohci_crc16((cr)->unitdir[unit].start + 1, (cr)->unitdir[unit].length)))
+
+#define cf_unit_begin(cr, unit) \
+do { \
+ if ((cr)->unitdir[unit].refer != NULL) { \
+ *(cr)->unitdir[unit].refer |= \
+ (cr)->data - (cr)->unitdir[unit].refer; \
+ cf_put_crc16(cr, (cr)->unitdir[unit].refunit); \
+ } \
+ (cr)->unitnum = (unit); \
+ (cr)->unitdir[unit].start = (cr)->data++; \
+} while (0)
+
+#define cf_put_refer(cr, key, unit) \
+do { \
+ (cr)->unitdir[unit].refer = (cr)->data; \
+ (cr)->unitdir[unit].refunit = (cr)->unitnum; \
+ ((cr)->data++)[0] = cpu_to_be32((key) << 24); \
+} while(0)
+
+#define cf_unit_end(cr) \
+do { \
+ (cr)->unitdir[(cr)->unitnum].length = (cr)->data - \
+ ((cr)->unitdir[(cr)->unitnum].start + 1); \
+ cf_put_crc16((cr), (cr)->unitnum); \
+} while(0)
static void ohci_init_config_rom(struct ti_ohci *ohci)
+{
+ struct config_rom_ptr cr;
+
+ memset(&cr, 0, sizeof(cr));
+ memset (ohci->csr_config_rom_cpu, 0, sizeof (ohci->csr_config_rom_cpu));
+
+ cr.data = ohci->csr_config_rom_cpu;
+
+ /* Bus info block */
+ cf_unit_begin(&cr, 0);
+ cf_put_1quad(&cr, reg_read(ohci, OHCI1394_BusID));
+ cf_put_1quad(&cr, reg_read(ohci, OHCI1394_BusOptions));
+ cf_put_1quad(&cr, reg_read(ohci, OHCI1394_GUIDHi));
+ cf_put_1quad(&cr, reg_read(ohci, OHCI1394_GUIDLo));
+ cf_unit_end(&cr);
+
+ /* IEEE P1212 suggests the initial ROM header CRC should only
+ * cover the header itself (and not the entire ROM). Since we use
+ * this, then we can make our bus_info_len the same as the CRC
+ * length. */
+ ohci->csr_config_rom_cpu[0] |= cpu_to_be32(
+ (be32_to_cpu(ohci->csr_config_rom_cpu[0]) & 0x00ff0000) << 8);
+ reg_write(ohci, OHCI1394_ConfigROMhdr,
+ be32_to_cpu(ohci->csr_config_rom_cpu[0]));
+
+ /* Root directory */
+ cf_unit_begin(&cr, 1);
+ cf_put_keyval(&cr, 0x03, 0x00005e); /* Vendor ID */
+ cf_put_refer(&cr, 0x81, 2); /* Textual description unit */
+ cf_put_keyval(&cr, 0x0c, 0x0083c0); /* Node capabilities */
+ cf_put_refer(&cr, 0xd1, 3); /* IPv4 unit directory */
+ cf_put_refer(&cr, 0xd1, 4); /* IPv6 unit directory */
+ /* NOTE: Add other unit referers here, and append at bottom */
+ cf_unit_end(&cr);
+
+ /* Textual description - "Linux 1394" */
+ cf_unit_begin(&cr, 2);
+ cf_put_keyval(&cr, 0, 0);
+ cf_put_1quad(&cr, 0);
+ cf_put_4bytes(&cr, 'L', 'i', 'n', 'u');
+ cf_put_4bytes(&cr, 'x', ' ', '1', '3');
+ cf_put_4bytes(&cr, '9', '4', 0x0, 0x0);
+ cf_unit_end(&cr);
+
+ /* IPv4 unit directory, RFC 2734 */
+ cf_unit_begin(&cr, 3);
+ cf_put_keyval(&cr, 0x12, 0x00005e); /* Unit spec ID */
+ cf_put_refer(&cr, 0x81, 6); /* Textual description unit */
+ cf_put_keyval(&cr, 0x13, 0x000001); /* Unit software version */
+ cf_put_refer(&cr, 0x81, 7); /* Textual description unit */
+ cf_unit_end(&cr);
+
+ cf_unit_begin(&cr, 6);
+ cf_put_keyval(&cr, 0, 0);
+ cf_put_1quad(&cr, 0);
+ cf_put_4bytes(&cr, 'I', 'A', 'N', 'A');
+ cf_unit_end(&cr);
+
+ cf_unit_begin(&cr, 7);
+ cf_put_keyval(&cr, 0, 0);
+ cf_put_1quad(&cr, 0);
+ cf_put_4bytes(&cr, 'I', 'P', 'v', '4');
+ cf_unit_end(&cr);
+
+ /* IPv6 unit directory, draft-ietf-ipngwg-1394-01.txt */
+ cf_unit_begin(&cr, 4);
+ cf_put_keyval(&cr, 0x12, 0x00005e); /* Unit spec ID */
+ cf_put_refer(&cr, 0x81, 8); /* Textual description unit */
+ cf_put_keyval(&cr, 0x13, 0x000002); /* (Proposed) Unit software version */
+ cf_put_refer(&cr, 0x81, 9); /* Textual description unit */
+ cf_unit_end(&cr);
+
+ cf_unit_begin(&cr, 8);
+ cf_put_keyval(&cr, 0, 0);
+ cf_put_1quad(&cr, 0);
+ cf_put_4bytes(&cr, 'I', 'A', 'N', 'A');
+ cf_unit_end(&cr);
+
+ cf_unit_begin(&cr, 9);
+ cf_put_keyval(&cr, 0, 0);
+ cf_put_1quad(&cr, 0);
+ cf_put_4bytes(&cr, 'I', 'P', 'v', '6');
+ cf_unit_end(&cr);
+
+ return;
+}
+
+static size_t get_ohci_rom(struct hpsb_host *host, const quadlet_t **ptr)
+{
+ struct ti_ohci *ohci=host->hostdata;
+
+ DBGMSG(ohci->id, "request csr_rom address: %p",
+ ohci->csr_config_rom_cpu);
+
+ *ptr = ohci->csr_config_rom_cpu;
+
+ return sizeof(ohci->csr_config_rom_cpu);
+}
+
+int ohci_compare_swap(struct ti_ohci *ohci, quadlet_t *data,
+ quadlet_t compare, int sel)
{
int i;
+ reg_write(ohci, OHCI1394_CSRData, *data);
+ reg_write(ohci, OHCI1394_CSRCompareData, compare);
+ reg_write(ohci, OHCI1394_CSRControl, sel & 0x3);
- ohci_csr_rom[3] = reg_read(ohci, OHCI1394_GUIDHi);
- ohci_csr_rom[4] = reg_read(ohci, OHCI1394_GUIDLo);
-
- ohci_csr_rom[0] = 0x04040000 | ohci_crc16(ohci_csr_rom+1, 4);
+ for (i = 0; i < OHCI_LOOP_COUNT; i++) {
+ if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
+ break;
+ mdelay(10);
+ }
- for (i=0;i<sizeof(ohci_csr_rom)/4;i++)
- ohci->csr_config_rom_cpu[i] = cpu_to_be32(ohci_csr_rom[i]);
+ *data = reg_read(ohci, OHCI1394_CSRData);
+ return 0;
}
-static int add_card(struct pci_dev *dev)
+static quadlet_t ohci_hw_csr_reg(struct hpsb_host *host, int reg,
+ quadlet_t data, quadlet_t compare)
{
- struct ti_ohci *ohci; /* shortcut to currently handled device */
+ struct ti_ohci *ohci=host->hostdata;
+
+ ohci_compare_swap (ohci, &data, compare, reg);
- if (num_of_cards == MAX_OHCI1394_CARDS) {
- PRINT_G(KERN_WARNING, "cannot handle more than %d cards. "
- "Adjust MAX_OHCI1394_CARDS in ti_ohci1394.h.",
- MAX_OHCI1394_CARDS);
- return 1;
+ return data;
+}
+
+struct hpsb_host_template *get_ohci_template(void)
+{
+ static struct hpsb_host_template tmpl;
+ static int initialized = 0;
+
+ if (!initialized) {
+ memset (&tmpl, 0, sizeof (struct hpsb_host_template));
+
+ /* Initialize by field names so that a template structure
+ * reorganization does not influence this code. */
+ tmpl.name = "ohci1394";
+
+ tmpl.initialize_host = ohci_initialize;
+ tmpl.release_host = ohci_remove;
+ tmpl.get_rom = get_ohci_rom;
+ tmpl.transmit_packet = ohci_transmit;
+ tmpl.devctl = ohci_devctl;
+ tmpl.hw_csr_reg = ohci_hw_csr_reg;
+ initialized = 1;
}
+ return &tmpl;
+}
+
+static int __devinit ohci1394_add_one(struct pci_dev *dev, const struct pci_device_id *ent)
+{
+ struct ti_ohci *ohci; /* shortcut to currently handled device */
+ struct hpsb_host *host;
+ unsigned long ohci_base, ohci_len;
+ static int version_printed = 0;
+
+ if (version_printed++ == 0)
+ PRINT_G(KERN_INFO, "%s", version);
+
if (pci_enable_device(dev)) {
- PRINT_G(KERN_NOTICE, "failed to enable OHCI hardware %d",
- num_of_cards);
- return 1;
+ /* Skip ID's that fail */
+ PRINT_G(KERN_NOTICE, "Failed to enable OHCI hardware %d",
+ card_id_counter++);
+ return -ENXIO;
}
pci_set_master(dev);
- ohci = &cards[num_of_cards++];
-
- ohci->id = num_of_cards-1;
+ host = hpsb_get_host(get_ohci_template(), sizeof (struct ti_ohci));
+ if (!host) {
+ PRINT_G(KERN_ERR, "Out of memory trying to allocate host structure");
+ return -ENOMEM;
+ }
+ ohci = host->hostdata;
+ ohci->host = host;
+ INIT_LIST_HEAD(&ohci->list);
+ ohci->id = card_id_counter++;
ohci->dev = dev;
-
- ohci->state = 0;
+ host->pdev = dev;
+ ohci->host = host;
+ pci_set_drvdata(dev, ohci);
+
+ PRINT(KERN_INFO, ohci->id, "OHCI (PCI) IEEE-1394 Controller");
+
+ /* We don't want hardware swapping */
+ pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
+
+ /* Some oddball Apple controllers do not order the selfid
+ * properly, so we make up for it here. */
+#ifndef __LITTLE_ENDIAN
+ /* XXX: Need a better way to check this. I'm wondering if we can
+ * read the values of the OHCI1394_PCI_HCI_Control and the
+ * noByteSwapData registers to see if they were not cleared to
+ * zero. Should this work? Obviously it's not defined what these
+ * registers will read when they aren't supported. Bleh! */
+ if (dev->vendor == PCI_VENDOR_ID_APPLE) {
+ ohci->payload_swap = 1;
+ if (dev->device != PCI_DEVICE_ID_APPLE_UNI_N_FW)
+ ohci->selfid_swap = 1;
+ } else
+ ohci->selfid_swap = 1;
+#endif
/* csr_config rom allocation */
ohci->csr_config_rom_cpu =
- pci_alloc_consistent(ohci->dev, sizeof(ohci_csr_rom),
+ pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
&ohci->csr_config_rom_bus);
- if (ohci->csr_config_rom_cpu == NULL) {
- FAIL("failed to allocate buffer config rom");
- }
+ OHCI_DMA_ALLOC("consistent csr_config_rom");
+ if (ohci->csr_config_rom_cpu == NULL)
+ FAIL("Failed to allocate buffer config rom");
/*
* self-id dma buffer allocation
- * FIXME: some early chips may need 8KB alignment for the
- * selfid buffer... if you have problems a temporary fic
- * is to allocate 8192 bytes instead of 2048
*/
ohci->selfid_buf_cpu =
- pci_alloc_consistent(ohci->dev, 8192, &ohci->selfid_buf_bus);
- if (ohci->selfid_buf_cpu == NULL) {
- FAIL("failed to allocate DMA buffer for self-id packets");
- }
+ pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
+ &ohci->selfid_buf_bus);
+ OHCI_DMA_ALLOC("consistent selfid_buf");
+ if (ohci->selfid_buf_cpu == NULL)
+ FAIL("Failed to allocate DMA buffer for self-id packets");
+
if ((unsigned long)ohci->selfid_buf_cpu & 0x1fff)
- PRINT(KERN_INFO, ohci->id, "Selfid buffer %p not aligned on "
- "8Kb boundary... may cause pb on some CXD3222 chip",
+ PRINT(KERN_INFO, ohci->id, "SelfID buffer %p is not aligned on "
+ "8Kb boundary... may cause problems on some CXD3222 chip",
ohci->selfid_buf_cpu);
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,13)
- ohci->registers = ioremap_nocache(dev->base_address[0],
- OHCI1394_REGISTER_SIZE);
-#else
- ohci->registers = ioremap_nocache(dev->resource[0].start,
- OHCI1394_REGISTER_SIZE);
-#endif
+ ohci->it_context =
+ alloc_dma_trm_ctx(ohci, 2, IT_NUM_DESC,
+ OHCI1394_IsoXmitContextControlSet,
+ OHCI1394_IsoXmitContextControlClear,
+ OHCI1394_IsoXmitCommandPtr);
- if (ohci->registers == NULL) {
- FAIL("failed to remap registers - card not accessible");
- }
+ if (ohci->it_context == NULL)
+ FAIL("Failed to allocate IT context");
+
+ ohci_base = pci_resource_start(dev, 0);
+ ohci_len = pci_resource_len(dev, 0);
- PRINT(KERN_INFO, ohci->id, "remapped memory spaces reg 0x%p",
+ if (!request_mem_region (ohci_base, ohci_len, host->template->name))
+ FAIL("MMIO resource (0x%lx@0x%lx) unavailable, aborting.",
+ ohci_base, ohci_len);
+
+ ohci->registers = ioremap(ohci_base, ohci_len);
+
+ if (ohci->registers == NULL)
+ FAIL("Failed to remap registers - card not accessible");
+
+ DBGMSG(ohci->id, "Remapped memory spaces reg 0x%p",
ohci->registers);
ohci->ar_req_context =
OHCI1394_AsReqRcvContextControlClear,
OHCI1394_AsReqRcvCommandPtr);
- if (ohci->ar_req_context == NULL) {
- FAIL("failed to allocate AR Req context");
- }
+ if (ohci->ar_req_context == NULL)
+ FAIL("Failed to allocate AR Req context");
ohci->ar_resp_context =
alloc_dma_rcv_ctx(ohci, 1, AR_RESP_NUM_DESC,
OHCI1394_AsRspRcvContextControlClear,
OHCI1394_AsRspRcvCommandPtr);
- if (ohci->ar_resp_context == NULL) {
- FAIL("failed to allocate AR Resp context");
- }
+ if (ohci->ar_resp_context == NULL)
+ FAIL("Failed to allocate AR Resp context");
ohci->at_req_context =
alloc_dma_trm_ctx(ohci, 0, AT_REQ_NUM_DESC,
OHCI1394_AsReqTrContextControlClear,
OHCI1394_AsReqTrCommandPtr);
- if (ohci->at_req_context == NULL) {
- FAIL("failed to allocate AT Req context");
- }
+ if (ohci->at_req_context == NULL)
+ FAIL("Failed to allocate AT Req context");
ohci->at_resp_context =
alloc_dma_trm_ctx(ohci, 1, AT_RESP_NUM_DESC,
OHCI1394_AsRspTrContextControlClear,
OHCI1394_AsRspTrCommandPtr);
- if (ohci->at_resp_context == NULL) {
- FAIL("failed to allocate AT Resp context");
- }
-
+ if (ohci->at_resp_context == NULL)
+ FAIL("Failed to allocate AT Resp context");
+
ohci->ir_context =
alloc_dma_rcv_ctx(ohci, 2, IR_NUM_DESC,
IR_BUF_SIZE, IR_SPLIT_BUF_SIZE,
OHCI1394_IsoRcvContextControlClear,
OHCI1394_IsoRcvCommandPtr);
- if (ohci->ir_context == NULL) {
- FAIL("failed to allocate IR context");
- }
+ if (ohci->ir_context == NULL)
+ FAIL("Failed to allocate IR context");
ohci->ISO_channel_usage= 0;
spin_lock_init(&ohci->IR_channel_lock);
if (!request_irq(dev->irq, ohci_irq_handler, SA_SHIRQ,
- OHCI1394_DRIVER_NAME, ohci)) {
- PRINT(KERN_INFO, ohci->id, "allocated interrupt %d", dev->irq);
- } else {
- FAIL("failed to allocate shared interrupt %d", dev->irq);
- }
+ OHCI1394_DRIVER_NAME, ohci))
+ PRINT(KERN_DEBUG, ohci->id, "Allocated interrupt %d", dev->irq);
+ else
+ FAIL("Failed to allocate shared interrupt %d", dev->irq);
ohci_init_config_rom(ohci);
DBGMSG(ohci->id, "The 1st byte at offset 0x404 is: 0x%02x",
*((char *)ohci->csr_config_rom_cpu+4));
- return 0;
-#undef FAIL
-}
-
-#ifdef CONFIG_PROC_FS
-
-#define SR(fmt, reg0, reg1, reg2)\
-p += sprintf(p,fmt,reg_read(ohci, reg0),\
- reg_read(ohci, reg1),reg_read(ohci, reg2));
-
-static int ohci_get_status(char *buf)
-{
- struct ti_ohci *ohci=&cards[0];
- struct hpsb_host *host=ohci->host;
- char *p=buf;
- //unsigned char phyreg;
- //int i, nports;
- int i;
-
- struct dma_rcv_ctx *d=NULL;
- struct dma_trm_ctx *dt=NULL;
-
- p += sprintf(p,"IEEE-1394 OHCI Driver status report:\n");
- p += sprintf(p," bus number: 0x%x Node ID: 0x%x\n",
- (reg_read(ohci, OHCI1394_NodeID) & 0xFFC0) >> 6,
- reg_read(ohci, OHCI1394_NodeID)&0x3f);
-#if 0
- p += sprintf(p," hardware version %d.%d GUID_ROM is %s\n\n",
- (reg_read(ohci, OHCI1394_Version) & 0xFF0000) >>16,
- reg_read(ohci, OHCI1394_Version) & 0xFF,
- (reg_read(ohci, OHCI1394_Version) & 0x01000000)
- ? "set" : "clear");
-#endif
- p += sprintf(p,"\n### Host data ###\n");
- p += sprintf(p,"node_count: %8d ",host->node_count);
- p += sprintf(p,"node_id : %08X\n",host->node_id);
- p += sprintf(p,"irm_id : %08X ",host->irm_id);
- p += sprintf(p,"busmgr_id : %08X\n",host->busmgr_id);
- p += sprintf(p,"%s %s %s\n",
- host->initialized ? "initialized" : "",
- host->in_bus_reset ? "in_bus_reset" : "",
- host->attempt_root ? "attempt_root" : "");
- p += sprintf(p,"%s %s %s %s\n",
- host->is_root ? "root" : "",
- host->is_cycmst ? "cycle_master" : "",
- host->is_irm ? "iso_res_mgr" : "",
- host->is_busmgr ? "bus_mgr" : "");
-
- p += sprintf(p,"\n---Iso Receive DMA---\n");
- d = ohci->ir_context;
-#if 0
- for (i=0; i<d->num_desc; i++) {
- p += sprintf(p, "IR buf[%d] : %p prg[%d]: %p\n",
- i, d->buf[i], i, d->prg[i]);
- }
-#endif
- p += sprintf(p, "Current buf: %d offset: %d\n",
- d->buf_ind,d->buf_offset);
+ /* Tell the highlevel this host is ready */
+ highlevel_add_one_host (host);
- p += sprintf(p,"\n---Async Receive DMA---\n");
- d = ohci->ar_req_context;
-#if 0
- for (i=0; i<d->num_desc; i++) {
- p += sprintf(p, "AR req buf[%d] : %p prg[%d]: %p\n",
- i, d->buf[i], i, d->prg[i]);
- }
-#endif
- p += sprintf(p, "Ar req current buf: %d offset: %d\n",
- d->buf_ind,d->buf_offset);
-
- d = ohci->ar_resp_context;
-#if 0
- for (i=0; i<d->num_desc; i++) {
- p += sprintf(p, "AR resp buf[%d] : %p prg[%d]: %p\n",
- i, d->buf[i], i, d->prg[i]);
- }
-#endif
- p += sprintf(p, "AR resp current buf: %d offset: %d\n",
- d->buf_ind,d->buf_offset);
-
- p += sprintf(p,"\n---Async Transmit DMA---\n");
- dt = ohci->at_req_context;
- p += sprintf(p, "AT req prg: %d sent: %d free: %d branchAddrPtr: %p\n",
- dt->prg_ind, dt->sent_ind, dt->free_prgs,
- dt->branchAddrPtr);
- p += sprintf(p, "AT req queue: first: %p last: %p\n",
- dt->fifo_first, dt->fifo_last);
- dt = ohci->at_resp_context;
-#if 0
- for (i=0; i<dt->num_desc; i++) {
- p += sprintf(p, "------- AT resp prg[%02d] ------\n",i);
- p += sprintf(p, "%p: control : %08x\n",
- &(dt->prg[i].begin.control),
- dt->prg[i].begin.control);
- p += sprintf(p, "%p: address : %08x\n",
- &(dt->prg[i].begin.address),
- dt->prg[i].begin.address);
- p += sprintf(p, "%p: brancAddr: %08x\n",
- &(dt->prg[i].begin.branchAddress),
- dt->prg[i].begin.branchAddress);
- p += sprintf(p, "%p: status : %08x\n",
- &(dt->prg[i].begin.status),
- dt->prg[i].begin.status);
- p += sprintf(p, "%p: header[0]: %08x\n",
- &(dt->prg[i].data[0]),
- dt->prg[i].data[0]);
- p += sprintf(p, "%p: header[1]: %08x\n",
- &(dt->prg[i].data[1]),
- dt->prg[i].data[1]);
- p += sprintf(p, "%p: header[2]: %08x\n",
- &(dt->prg[i].data[2]),
- dt->prg[i].data[2]);
- p += sprintf(p, "%p: header[3]: %08x\n",
- &(dt->prg[i].data[3]),
- dt->prg[i].data[3]);
- p += sprintf(p, "%p: control : %08x\n",
- &(dt->prg[i].end.control),
- dt->prg[i].end.control);
- p += sprintf(p, "%p: address : %08x\n",
- &(dt->prg[i].end.address),
- dt->prg[i].end.address);
- p += sprintf(p, "%p: brancAddr: %08x\n",
- &(dt->prg[i].end.branchAddress),
- dt->prg[i].end.branchAddress);
- p += sprintf(p, "%p: status : %08x\n",
- &(dt->prg[i].end.status),
- dt->prg[i].end.status);
- }
-#endif
- p += sprintf(p, "AR resp prg: %d sent: %d free: %d"
- " branchAddrPtr: %p\n",
- dt->prg_ind, dt->sent_ind, dt->free_prgs,
- dt->branchAddrPtr);
- p += sprintf(p, "AT resp queue: first: %p last: %p\n",
- dt->fifo_first, dt->fifo_last);
-
- /* ----- Register Dump ----- */
- p += sprintf(p,"\n### HC Register dump ###\n");
- SR("Version : %08x GUID_ROM : %08x ATRetries : %08x\n",
- OHCI1394_Version, OHCI1394_GUID_ROM, OHCI1394_ATRetries);
- SR("CSRData : %08x CSRCompData : %08x CSRControl : %08x\n",
- OHCI1394_CSRData, OHCI1394_CSRCompareData, OHCI1394_CSRControl);
- SR("ConfigROMhdr: %08x BusID : %08x BusOptions : %08x\n",
- OHCI1394_ConfigROMhdr, OHCI1394_BusID, OHCI1394_BusOptions);
- SR("GUIDHi : %08x GUIDLo : %08x ConfigROMmap: %08x\n",
- OHCI1394_GUIDHi, OHCI1394_GUIDLo, OHCI1394_ConfigROMmap);
- SR("PtdWrAddrLo : %08x PtdWrAddrHi : %08x VendorID : %08x\n",
- OHCI1394_PostedWriteAddressLo, OHCI1394_PostedWriteAddressHi,
- OHCI1394_VendorID);
- SR("HCControl : %08x SelfIDBuffer: %08x SelfIDCount : %08x\n",
- OHCI1394_HCControlSet, OHCI1394_SelfIDBuffer, OHCI1394_SelfIDCount);
- SR("IRMuChMaskHi: %08x IRMuChMaskLo: %08x IntEvent : %08x\n",
- OHCI1394_IRMultiChanMaskHiSet, OHCI1394_IRMultiChanMaskLoSet,
- OHCI1394_IntEventSet);
- SR("IntMask : %08x IsoXmIntEvnt: %08x IsoXmIntMask: %08x\n",
- OHCI1394_IntMaskSet, OHCI1394_IsoXmitIntEventSet,
- OHCI1394_IsoXmitIntMaskSet);
- SR("IsoRcvIntEvt: %08x IsoRcvIntMsk: %08x FairnessCtrl: %08x\n",
- OHCI1394_IsoRecvIntEventSet, OHCI1394_IsoRecvIntMaskSet,
- OHCI1394_FairnessControl);
- SR("LinkControl : %08x NodeID : %08x PhyControl : %08x\n",
- OHCI1394_LinkControlSet, OHCI1394_NodeID, OHCI1394_PhyControl);
- SR("IsoCyclTimer: %08x AsRqFilterHi: %08x AsRqFilterLo: %08x\n",
- OHCI1394_IsochronousCycleTimer,
- OHCI1394_AsReqFilterHiSet, OHCI1394_AsReqFilterLoSet);
- SR("PhyReqFiltHi: %08x PhyReqFiltLo: %08x PhyUpperBnd : %08x\n",
- OHCI1394_PhyReqFilterHiSet, OHCI1394_PhyReqFilterLoSet,
- OHCI1394_PhyUpperBound);
- SR("AsRqTrCxtCtl: %08x AsRqTrCmdPtr: %08x AsRsTrCtxCtl: %08x\n",
- OHCI1394_AsReqTrContextControlSet, OHCI1394_AsReqTrCommandPtr,
- OHCI1394_AsRspTrContextControlSet);
- SR("AsRsTrCmdPtr: %08x AsRqRvCtxCtl: %08x AsRqRvCmdPtr: %08x\n",
- OHCI1394_AsRspTrCommandPtr, OHCI1394_AsReqRcvContextControlSet,
- OHCI1394_AsReqRcvCommandPtr);
- SR("AsRsRvCtxCtl: %08x AsRsRvCmdPtr: %08x IntEvent : %08x\n",
- OHCI1394_AsRspRcvContextControlSet, OHCI1394_AsRspRcvCommandPtr,
- OHCI1394_IntEventSet);
- for (i=0;i<ohci->nb_iso_rcv_ctx;i++) {
- p += sprintf(p,"IsoRCtxCtl%02d: %08x IsoRCmdPtr%02d: %08x"
- " IsoRCxtMch%02d: %08x\n", i,
- reg_read(ohci,
- OHCI1394_IsoRcvContextControlSet+32*i),
- i,reg_read(ohci, OHCI1394_IsoRcvCommandPtr+32*i),
- i,reg_read(ohci,
- OHCI1394_IsoRcvContextMatch+32*i));
- }
- for (i=0;i<ohci->nb_iso_xmit_ctx;i++) {
- p += sprintf(p,"IsoTCtxCtl%02d: %08x IsoTCmdPtr%02d: %08x\n",
- i,
- reg_read(ohci,
- OHCI1394_IsoXmitContextControlSet+32*i),
- i,reg_read(ohci,OHCI1394_IsoXmitCommandPtr+32*i));
- }
-
-#if 0
- p += sprintf(p,"\n### Phy Register dump ###\n");
- phyreg=get_phy_reg(ohci,1);
- p += sprintf(p,"offset: %d val: 0x%02x -> RHB: %d"
- "IBR: %d Gap_count: %d\n",
- 1,phyreg,(phyreg&0x80) != 0,
- (phyreg&0x40) !=0, phyreg&0x3f);
- phyreg=get_phy_reg(ohci,2);
- nports=phyreg&0x1f;
- p += sprintf(p,"offset: %d val: 0x%02x -> SPD: %d"
- " E : %d Ports : %2d\n",
- 2,phyreg, (phyreg&0xC0)>>6, (phyreg&0x20) !=0, nports);
- for (i=0;i<nports;i++) {
- phyreg=get_phy_reg(ohci,3+i);
- p += sprintf(p,"offset: %d val: 0x%02x -> [port %d]"
- " TPA: %d TPB: %d | %s %s\n",
- 3+i,phyreg,
- i, (phyreg&0xC0)>>6, (phyreg&0x30)>>4,
- (phyreg&0x08) ? "child" : "parent",
- (phyreg&0x04) ? "connected" : "disconnected");
- }
- phyreg=get_phy_reg(ohci,3+i);
- p += sprintf(p,"offset: %d val: 0x%02x -> ENV: %s Reg_count: %d\n",
- 3+i,phyreg,
- (((phyreg&0xC0)>>6)==0) ? "backplane" :
- (((phyreg&0xC0)>>6)==1) ? "cable" : "reserved",
- phyreg&0x3f);
-#endif
-
- return p - buf;
-}
-
-static int ohci1394_read_proc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- int len = ohci_get_status(page);
- if (len <= off+count) *eof = 1;
- *start = page + off;
- len -= off;
- if (len>count) len = count;
- if (len<0) len = 0;
- return len;
+ return 0;
+#undef FAIL
}
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0)
-struct proc_dir_entry *ohci_proc_entry;
-#endif /* LINUX_VERSION_CODE */
-#endif /* CONFIG_PROC_FS */
-
static void remove_card(struct ti_ohci *ohci)
{
- /*
- * Reset the board properly before leaving
- * Daniel Kobras <daniel.kobras@student.uni-tuebingen.de>
- */
+ /* Reset the board properly before leaving */
ohci_soft_reset(ohci);
/* Free AR dma */
/* Free IR dma */
free_dma_rcv_ctx(&ohci->ir_context);
+ /* Free IT dma */
+ free_dma_trm_ctx(&ohci->it_context);
+
/* Free self-id buffer */
- if (ohci->selfid_buf_cpu)
- pci_free_consistent(ohci->dev, 2048,
+ if (ohci->selfid_buf_cpu) {
+ pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
ohci->selfid_buf_cpu,
ohci->selfid_buf_bus);
+ OHCI_DMA_FREE("consistent selfid_buf");
+ }
/* Free config rom */
- if (ohci->csr_config_rom_cpu)
- pci_free_consistent(ohci->dev, sizeof(ohci_csr_rom),
+ if (ohci->csr_config_rom_cpu) {
+ pci_free_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
ohci->csr_config_rom_cpu,
ohci->csr_config_rom_bus);
+ OHCI_DMA_FREE("consistent csr_config_rom");
+ }
/* Free the IRQ */
free_irq(ohci->dev->irq, ohci);
- if (ohci->registers)
+ if (ohci->registers)
iounmap(ohci->registers);
- ohci->state = 0;
-}
-
-static int init_driver()
-{
- struct pci_dev *dev = NULL;
- int success = 0;
-#if USE_DEVICE
- int i;
-#endif
- if (num_of_cards) {
- PRINT_G(KERN_DEBUG, __PRETTY_FUNCTION__ " called again");
- return 0;
- }
-
- PRINT_G(KERN_INFO, "looking for Ohci1394 cards");
-
-#if USE_DEVICE
- for (i = 0; supported_chips[i][0] != -1; i++) {
- while ((dev = pci_find_device(supported_chips[i][0],
- supported_chips[i][1], dev))
- != NULL) {
- if (add_card(dev) == 0) {
- success = 1;
- }
- }
- }
-#else
- while ((dev = pci_find_class(PCI_CLASS_FIREWIRE_OHCI, dev)) != NULL ) {
- if (add_card(dev) == 0) success = 1;
- }
-#endif /* USE_DEVICE */
- if (success == 0) {
- PRINT_G(KERN_WARNING, "no operable Ohci1394 cards found");
- return -ENXIO;
- }
-
-#ifdef CONFIG_PROC_FS
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0)
- create_proc_read_entry ("ohci1394", 0, NULL, ohci1394_read_proc, NULL);
-#else
- if ((ohci_proc_entry = create_proc_entry("ohci1394", 0, NULL)))
- ohci_proc_entry->read_proc = ohci1394_read_proc;
-#endif
-#endif
- return 0;
-}
-
-static size_t get_ohci_rom(struct hpsb_host *host, const quadlet_t **ptr)
-{
- struct ti_ohci *ohci=host->hostdata;
-
- DBGMSG(ohci->id, "request csr_rom address: %08X",
- (u32)ohci->csr_config_rom_cpu);
-
- *ptr = ohci->csr_config_rom_cpu;
- return sizeof(ohci_csr_rom);
-}
-
-static quadlet_t ohci_hw_csr_reg(struct hpsb_host *host, int reg,
- quadlet_t data, quadlet_t compare)
-{
- struct ti_ohci *ohci=host->hostdata;
- int timeout = 255;
-
- reg_write(ohci, OHCI1394_CSRData, data);
- reg_write(ohci, OHCI1394_CSRCompareData, compare);
- reg_write(ohci, OHCI1394_CSRControl, reg&0x3);
+ release_mem_region (pci_resource_start(ohci->dev, 0),
+ pci_resource_len(ohci->dev, 0));
- while (timeout-- && !(reg_read(ohci, OHCI1394_CSRControl)&0x80000000));
-
- if (!timeout)
- PRINT(KERN_ERR, ohci->id, __FUNCTION__ "timeout!");
-
- return reg_read(ohci, OHCI1394_CSRData);
-}
-
-struct hpsb_host_template *get_ohci_template(void)
-{
- static struct hpsb_host_template tmpl;
- static int initialized = 0;
-
- if (!initialized) {
- /* Initialize by field names so that a template structure
- * reorganization does not influence this code. */
- tmpl.name = "ohci1394";
-
- tmpl.detect_hosts = ohci_detect;
- tmpl.initialize_host = ohci_initialize;
- tmpl.release_host = ohci_remove;
- tmpl.get_rom = get_ohci_rom;
- tmpl.transmit_packet = ohci_transmit;
- tmpl.devctl = ohci_devctl;
- tmpl.hw_csr_reg = ohci_hw_csr_reg;
- initialized = 1;
- }
-
- return &tmpl;
+ pci_set_drvdata(ohci->dev, NULL);
}
void ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
i++;
if (i>5000) {
PRINT(KERN_ERR, ohci->id,
- "runaway loop while stopping context...");
+ "Runaway loop while stopping context...");
break;
}
}
- if (msg) PRINT(KERN_ERR, ohci->id, "%s\n dma prg stopped\n", msg);
-}
-
-struct ti_ohci *ohci1394_get_struct(int card_num)
-{
- if (card_num>=0 && card_num<num_of_cards)
- return &cards[card_num];
- return NULL;
+ if (msg) PRINT(KERN_ERR, ohci->id, "%s: dma prg stopped", msg);
}
int ohci1394_register_video(struct ti_ohci *ohci,
if (ohci->video_tmpl != tmpl) {
PRINT(KERN_ERR, ohci->id,
"Trying to unregister wrong video device");
- }
- else {
+ } else {
ohci->video_tmpl = NULL;
MOD_DEC_USE_COUNT;
}
}
-#if 0
-int ohci_compare_swap(struct ti_ohci *ohci, quadlet_t *data,
- quadlet_t compare, int sel)
+#ifndef __LITTLE_ENDIAN
+
+/* Swap a series of quads inplace. */
+static __inline__ void block_swab32(quadlet_t *data, size_t size) {
+ while (size--)
+ data[size] = swab32(data[size]);
+}
+
+/* Swap headers and sometimes data too */
+static void packet_swab(quadlet_t *data, char tcode, int len, int payload_swap)
{
- int timeout = 255;
- reg_write(ohci, OHCI1394_CSRData, *data);
- reg_write(ohci, OHCI1394_CSRCompareData, compare);
- reg_write(ohci, OHCI1394_CSRControl, sel);
- while(!(reg_read(ohci, OHCI1394_CSRControl)&0x80000000)) {
- if (timeout--) {
- PRINT(KERN_INFO, ohci->id, "request_channel timeout");
- return -1;
- }
+ if (payload_swap) {
+ block_swab32(data, len);
+ return;
}
- *data = reg_read(ohci, OHCI1394_CSRData);
- return 0;
+
+ switch(tcode)
+ {
+ /* 4 quad header */
+ case TCODE_READB_RESPONSE:
+ case TCODE_LOCK_RESPONSE:
+ case TCODE_LOCK_REQUEST:
+ case TCODE_WRITEB:
+ case TCODE_READB:
+ block_swab32(data, 4);
+ break;
+
+ /* 3 quad header, 1 quad payload */
+ case TCODE_WRITEQ:
+ case TCODE_READQ_RESPONSE:
+ block_swab32(data, 3);
+ break;
+
+ /* 3 quad header */
+ case TCODE_WRITE_RESPONSE:
+ case TCODE_READQ:
+ block_swab32(data, 3);
+ break;
+
+ /* 2 quad header */
+ case TCODE_ISO_DATA:
+ block_swab32(data, 2);
+ break;
+
+ case OHCI1394_TCODE_PHY:
+ break; /* should never happen anyway */
+
+ case TCODE_CYCLE_START:
+ PRINT_G(KERN_ERR, "Unhandled tcode in packet_swab (0x%x)", tcode);
+ /* Atleast swap one quad */
+ block_swab32(data, 1);
+ break;
+ default:
+ PRINT_G(KERN_ERR, "Invalid tcode in packet_swab (0x%x)\n", tcode);
+ break;
+ }
+ return;
}
+#endif /* !LITTLE_ENDIAN */
+
+
+#if 0
int ohci1394_request_channel(struct ti_ohci *ohci, int channel)
{
int csrSel;
#endif
EXPORT_SYMBOL(ohci1394_stop_context);
-EXPORT_SYMBOL(ohci1394_get_struct);
EXPORT_SYMBOL(ohci1394_register_video);
EXPORT_SYMBOL(ohci1394_unregister_video);
-#ifdef MODULE
+MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
+MODULE_DESCRIPTION("Driver for PCI OHCI IEEE-1394 controllers");
-/* EXPORT_NO_SYMBOLS; */
+static void __devexit ohci1394_remove_one(struct pci_dev *pdev)
+{
+ struct ti_ohci *ohci = pci_get_drvdata(pdev);
-MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
-MODULE_DESCRIPTION("driver for PCI Ohci IEEE-1394 controller");
-MODULE_SUPPORTED_DEVICE("ohci1394");
+ if (ohci) {
+ remove_card (ohci);
+ pci_set_drvdata(pdev, NULL);
+ }
+}
+
+static struct pci_driver ohci1394_driver = {
+ name: "ohci1394",
+ id_table: ohci1394_pci_tbl,
+ probe: ohci1394_add_one,
+ remove: ohci1394_remove_one,
+};
-void cleanup_module(void)
+static void __exit ohci1394_cleanup (void)
{
hpsb_unregister_lowlevel(get_ohci_template());
-#ifdef CONFIG_PROC_FS
- remove_proc_entry ("ohci1394", NULL);
-#endif
-
- PRINT_G(KERN_INFO, "removed " OHCI1394_DRIVER_NAME " module");
+ pci_unregister_driver(&ohci1394_driver);
}
-int init_module(void)
+static int __init ohci1394_init(void)
{
- memset(cards, 0, MAX_OHCI1394_CARDS * sizeof (struct ti_ohci));
-
+ int ret;
if (hpsb_register_lowlevel(get_ohci_template())) {
- PRINT_G(KERN_ERR, "registering failed");
+ PRINT_G(KERN_ERR, "Registering failed");
return -ENXIO;
- }
- return 0;
+ }
+ if ((ret = pci_module_init(&ohci1394_driver))) {
+ PRINT_G(KERN_ERR, "PCI module init failed\n");
+ hpsb_unregister_lowlevel(get_ohci_template());
+ return ret;
+ }
+ return ret;
}
-#endif /* MODULE */
+module_init(ohci1394_init);
+module_exit(ohci1394_cleanup);
#include "ieee1394_types.h"
-#define IEEE1394_USE_BOTTOM_HALVES 1
-
#define OHCI1394_DRIVER_NAME "ohci1394"
-#define USE_DEVICE 0
-
-#if USE_DEVICE
-
-#ifndef PCI_DEVICE_ID_TI_OHCI1394_LV22
-#define PCI_DEVICE_ID_TI_OHCI1394_LV22 0x8009
-#endif
-
-#ifndef PCI_DEVICE_ID_TI_OHCI1394_LV23
-#define PCI_DEVICE_ID_TI_OHCI1394_LV23 0x8019
-#endif
-
-#ifndef PCI_DEVICE_ID_TI_OHCI1394_LV26
-#define PCI_DEVICE_ID_TI_OHCI1394_LV26 0x8020
-#endif
-
-#ifndef PCI_DEVICE_ID_TI_OHCI1394_PCI4450
-#define PCI_DEVICE_ID_TI_OHCI1394_PCI4450 0x8011
-#endif
-
-#ifndef PCI_DEVICE_ID_VIA_OHCI1394
-#define PCI_DEVICE_ID_VIA_OHCI1394 0x3044
-#endif
-
-#ifndef PCI_VENDOR_ID_SONY
-#define PCI_VENDOR_ID_SONY 0x104d
-#endif
-
-#ifndef PCI_DEVICE_ID_SONY_CXD3222
-#define PCI_DEVICE_ID_SONY_CXD3222 0x8039
-#endif
-
-#ifndef PCI_DEVICE_ID_NEC_1394
-#define PCI_DEVICE_ID_NEC_1394 0x00cd
-#endif
-
-#ifndef PCI_DEVICE_ID_NEC_UPD72862
-#define PCI_DEVICE_ID_NEC_UPD72862 0x0063
-#endif
-
-#ifndef PCI_DEVICE_ID_NEC_UPD72870
-#define PCI_DEVICE_ID_NEC_UPD72870 0x00cd
-#endif
-
-#ifndef PCI_DEVICE_ID_NEC_UPD72871
-#define PCI_DEVICE_ID_NEC_UPD72871 0x00ce
-#endif
-
-#ifndef PCI_DEVICE_ID_ALI_OHCI1394_M5251
-#define PCI_DEVICE_ID_ALI_OHCI1394_M5251 0x5251
-#endif
-
-#ifndef PCI_VENDOR_ID_LUCENT
-#define PCI_VENDOR_ID_LUCENT 0x11c1
-#endif
+#define OHCI1394_MAX_AT_REQ_RETRIES 0x2
+#define OHCI1394_MAX_AT_RESP_RETRIES 0x2
+#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
+#define OHCI1394_MAX_SELF_ID_ERRORS 16
-#ifndef PCI_DEVICE_ID_LUCENT_FW323
-#define PCI_DEVICE_ID_LUCENT_FW323 0x5811
-#endif
+#define AR_REQ_NUM_DESC 4 /* number of AR req descriptors */
+#define AR_REQ_BUF_SIZE PAGE_SIZE /* size of AR req buffers */
+#define AR_REQ_SPLIT_BUF_SIZE PAGE_SIZE /* split packet buffer */
-#endif /* USE_DEVICE */
+#define AR_RESP_NUM_DESC 4 /* number of AR resp descriptors */
+#define AR_RESP_BUF_SIZE PAGE_SIZE /* size of AR resp buffers */
+#define AR_RESP_SPLIT_BUF_SIZE PAGE_SIZE /* split packet buffer */
+#define IR_NUM_DESC 16 /* number of IR descriptors */
+#define IR_BUF_SIZE PAGE_SIZE /* 4096 bytes/buffer */
+#define IR_SPLIT_BUF_SIZE PAGE_SIZE /* split packet buffer */
-#define MAX_OHCI1394_CARDS 4
+#define IT_NUM_DESC 16 /* number of IT descriptors */
-#define OHCI1394_MAX_AT_REQ_RETRIES 0x2
-#define OHCI1394_MAX_AT_RESP_RETRIES 0x2
-#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
-#define OHCI1394_MAX_SELF_ID_ERRORS 16
+#define AT_REQ_NUM_DESC 32 /* number of AT req descriptors */
+#define AT_RESP_NUM_DESC 32 /* number of AT resp descriptors */
-#define AR_REQ_NUM_DESC 4 /* number of AR req descriptors */
-#define AR_REQ_BUF_SIZE PAGE_SIZE /* size of AR req buffers */
-#define AR_REQ_SPLIT_BUF_SIZE PAGE_SIZE /* split packet buffer */
+#define OHCI_LOOP_COUNT 100 /* Number of loops for reg read waits */
-#define AR_RESP_NUM_DESC 4 /* number of AR resp descriptors */
-#define AR_RESP_BUF_SIZE PAGE_SIZE /* size of AR resp buffers */
-#define AR_RESP_SPLIT_BUF_SIZE PAGE_SIZE /* split packet buffer */
+#define OHCI_CONFIG_ROM_LEN 1024 /* Length of the mapped configrom space */
-#define IR_NUM_DESC 16 /* number of IR descriptors */
-#define IR_BUF_SIZE PAGE_SIZE /* 4096 bytes/buffer */
-#define IR_SPLIT_BUF_SIZE PAGE_SIZE /* split packet buffer */
+#define OHCI1394_SI_DMA_BUF_SIZE 8192 /* length of the selfid buffer */
-#define AT_REQ_NUM_DESC 32 /* number of AT req descriptors */
-#define AT_RESP_NUM_DESC 32 /* number of AT resp descriptors */
+/* PCI configuration space addresses */
+#define OHCI1394_PCI_HCI_Control 0x40
struct dma_cmd {
u32 control;
void *ohci;
int ctx;
unsigned int num_desc;
+
unsigned int buf_size;
unsigned int split_buf_size;
unsigned int buf_offset;
quadlet_t *spb;
spinlock_t lock;
- struct tq_struct task;
+ struct tasklet_struct task;
int ctrlClear;
int ctrlSet;
int cmdPtr;
struct hpsb_packet *pending_last;
spinlock_t lock;
- struct tq_struct task;
+ struct tasklet_struct task;
int ctrlClear;
int ctrlSet;
int cmdPtr;
struct ti_ohci {
int id; /* sequential card number */
+ struct list_head list;
+
struct pci_dev *dev;
u32 state;
int nb_iso_rcv_ctx;
/* iso transmit */
+ struct dma_trm_ctx *it_context;
int nb_iso_xmit_ctx;
u64 ISO_channel_usage;
int phyid, isroot;
spinlock_t phy_reg_lock;
+ spinlock_t event_lock;
int self_id_errors;
int NumBusResets;
/* video device */
struct video_template *video_tmpl;
+
+ /* Swap the selfid buffer? */
+ unsigned int selfid_swap:1;
+ /* Swap the payload? */
+ unsigned int payload_swap:1;
};
static inline int cross_bound(unsigned long addr, unsigned int size)
#include <linux/wait.h>
#include <linux/errno.h>
#include <linux/module.h>
+#include <linux/init.h>
#include <linux/pci.h>
#include <linux/fs.h>
#include <linux/poll.h>
}
if (q[0] == ~q[1]) {
- PRINT(KERN_DEBUG, lynx->id, "selfid packet 0x%x rcvd",
+ PRINT(KERN_DEBUG, lynx->id, "SelfID packet 0x%x rcvd",
q[0]);
hpsb_selfid_received(host, q[0]);
} else {
unsigned long flags;
if (packet->data_size >= 4096) {
- PRINT(KERN_ERR, lynx->id, "transmit packet data too big (%d)",
+ PRINT(KERN_ERR, lynx->id, "transmit packet data too big (%Zd)",
packet->data_size);
return 0;
}
static struct file_operations aux_ops = {
- OWNER_THIS_MODULE
+ owner: THIS_MODULE,
read: mem_read,
write: mem_write,
poll: aux_poll,
enum { t_rom, t_aux, t_ram } type;
struct memdata *md;
- V22_COMPAT_MOD_INC_USE_COUNT;
-
if (cid < PCILYNX_MINOR_AUX_START) {
/* just for completeness */
- V22_COMPAT_MOD_DEC_USE_COUNT;
return -ENXIO;
} else if (cid < PCILYNX_MINOR_ROM_START) {
cid -= PCILYNX_MINOR_AUX_START;
if (cid >= num_of_cards || !cards[cid].aux_port) {
- V22_COMPAT_MOD_DEC_USE_COUNT;
return -ENXIO;
}
type = t_aux;
} else if (cid < PCILYNX_MINOR_RAM_START) {
cid -= PCILYNX_MINOR_ROM_START;
if (cid >= num_of_cards || !cards[cid].local_rom) {
- V22_COMPAT_MOD_DEC_USE_COUNT;
return -ENXIO;
}
type = t_rom;
* It is currently used inside the driver! */
cid -= PCILYNX_MINOR_RAM_START;
if (cid >= num_of_cards || !cards[cid].local_ram) {
- V22_COMPAT_MOD_DEC_USE_COUNT;
return -ENXIO;
}
type = t_ram;
}
md = (struct memdata *)kmalloc(sizeof(struct memdata), SLAB_KERNEL);
- if (md == NULL) {
- V22_COMPAT_MOD_DEC_USE_COUNT;
+ if (md == NULL)
return -ENOMEM;
- }
+
+ MOD_INC_USE_COUNT;
md->lynx = &cards[cid];
md->cid = cid;
kfree(md);
- V22_COMPAT_MOD_DEC_USE_COUNT;
+ MOD_DEC_USE_COUNT;
return 0;
}
PRINTD(KERN_DEBUG, lynx->id, "interrupt: 0x%08x / 0x%08x", intmask,
linkint);
+ if (!(intmask & PCI_INT_INT_PEND)) return;
+
reg_write(lynx, LINK_INT_STATUS, linkint);
reg_write(lynx, PCI_INT_STATUS, intmask);
init_waitqueue_head(&lynx->aux_intr_wait);
#endif
- INIT_TQ_LINK(lynx->iso_rcv.tq);
- lynx->iso_rcv.tq.routine = (void (*)(void*))iso_rcv_bh;
- lynx->iso_rcv.tq.data = lynx;
+ INIT_TQUEUE(&lynx->iso_rcv.tq, (void (*)(void*))iso_rcv_bh, lynx);
+
lynx->iso_rcv.lock = SPIN_LOCK_UNLOCKED;
lynx->async.queue_lock = SPIN_LOCK_UNLOCKED;
}
-#ifdef MODULE
-
-/* EXPORT_NO_SYMBOLS; */
-
MODULE_AUTHOR("Andreas E. Bombe <andreas.bombe@munich.netsurf.de>");
MODULE_DESCRIPTION("driver for Texas Instruments PCI Lynx IEEE-1394 controller");
MODULE_SUPPORTED_DEVICE("pcilynx");
-void cleanup_module(void)
+static void __exit pcilynx_cleanup(void)
{
hpsb_unregister_lowlevel(get_lynx_template());
PRINT_G(KERN_INFO, "removed " PCILYNX_DRIVER_NAME " module");
}
-int init_module(void)
+static int __init pcilynx_init(void)
{
if (hpsb_register_lowlevel(get_lynx_template())) {
PRINT_G(KERN_ERR, "registering failed");
}
}
-#endif /* MODULE */
+module_init(pcilynx_init);
+module_exit(pcilynx_cleanup);
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/module.h>
+#include <linux/init.h>
#include <linux/version.h>
#include <linux/smp_lock.h>
#include <asm/uaccess.h>
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,3,0)
#include <linux/devfs_fs_kernel.h>
-#endif
#include "ieee1394.h"
#include "ieee1394_types.h"
if (req != NULL) {
memset(req, 0, sizeof(struct pending_request));
INIT_LIST_HEAD(&req->list);
- INIT_TQ_LINK(req->tq);
- req->tq.routine = (void(*)(void*))queue_complete_cb;
+ INIT_TQUEUE(&req->tq, (void(*)(void*))queue_complete_cb, NULL);
}
return req;
break;
}
- req->req.error = highlevel_write(fi->host, node, req->data,
+ req->req.error = highlevel_write(fi->host, node, node, req->data,
addr, req->req.length);
req->req.length = 0;
break;
req->req.error = RAW1394_ERROR_STATE_ORDER;
}
- if (req->req.error) req->req.length = 0;
- req->req.error |= 0x00100000;
+ if (req->req.error)
+ req->req.length = 0;
+ if (req->req.error >= 0)
+ req->req.error |= ACK_PENDING << 16;
+
queue_complete_req(req);
return sizeof(struct raw1394_request);
}
return sizeof(struct raw1394_request);
case RAW1394_REQ_RESET_BUS:
- hpsb_reset_bus(fi->host);
+ hpsb_reset_bus(fi->host, LONG_RESET);
return sizeof(struct raw1394_request);
}
return -ENXIO;
}
- V22_COMPAT_MOD_INC_USE_COUNT;
+ MOD_INC_USE_COUNT;
fi = kmalloc(sizeof(struct file_info), SLAB_KERNEL);
if (fi == NULL) {
- V22_COMPAT_MOD_DEC_USE_COUNT;
+ MOD_DEC_USE_COUNT;
return -ENOMEM;
}
-
+
memset(fi, 0, sizeof(struct file_info));
INIT_LIST_HEAD(&fi->list);
kfree(fi);
- V22_COMPAT_MOD_DEC_USE_COUNT;
+ MOD_DEC_USE_COUNT;
unlock_kernel();
return 0;
}
};
static struct file_operations file_ops = {
- OWNER_THIS_MODULE
+ owner: THIS_MODULE,
read: dev_read,
write: dev_write,
poll: dev_poll,
release: dev_release,
};
-int init_raw1394(void)
+static int __init init_raw1394(void)
{
hl_handle = hpsb_register_highlevel(RAW1394_DEVICE_NAME, &hl_ops);
if (hl_handle == NULL) {
return 0;
}
-void cleanup_raw1394(void)
+static void __exit cleanup_raw1394(void)
{
devfs_unregister_chrdev(RAW1394_DEVICE_MAJOR, RAW1394_DEVICE_NAME);
devfs_unregister(devfs_handle);
hpsb_unregister_highlevel(hl_handle);
}
-#ifdef MODULE
-
-int init_module(void)
-{
- return init_raw1394();
-}
-
-void cleanup_module(void)
-{
- return cleanup_raw1394();
-}
-
-#endif
+module_init(init_raw1394);
+module_exit(cleanup_raw1394);
--- /dev/null
+/*
+ * sbp2.c - SBP-2 protocol driver for IEEE-1394
+ *
+ * Copyright (C) 2000 James Goodwin, Filanet Corporation (www.filanet.com)
+ * jamesg@filanet.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+ * Brief Description:
+ *
+ * This driver implements the Serial Bus Protocol 2 (SBP-2) over IEEE-1394
+ * under Linux. The SBP-2 driver is implemented as an IEEE-1394 high-level
+ * driver. It also registers as a SCSI lower-level driver in order to accept
+ * SCSI commands for transport using SBP-2.
+ *
+ * Driver Loading:
+ *
+ * Currently, the SBP-2 driver is supported only as a module. Because the
+ * Linux SCSI stack is not Plug-N-Play aware, module load order is
+ * important. Assuming the SCSI core drivers are either built into the
+ * kernel or already loaded as modules, you should load the IEEE-1394 modules
+ * in the following order:
+ *
+ * ieee1394 (e.g. insmod ieee1394)
+ * ohci1394 (e.g. insmod ohci1394)
+ * sbp2 (e.g. insmod sbp2)
+ *
+ * The SBP-2 driver will attempt to discover any attached SBP-2 devices when first
+ * loaded, or after any IEEE-1394 bus reset (e.g. a hot-plug). It will then print
+ * out a debug message indicating if it was able to discover a SBP-2 device.
+ *
+ * Currently, the SBP-2 driver will catch any attached SBP-2 devices during the
+ * initial scsi bus scan (when the driver is first loaded). To add or remove
+ * SBP-2 devices after this initial scan (i.e. if you plug-in or un-plug a
+ * device after the SBP-2 driver is loaded), you must either use the scsi procfs
+ * add-single-device, remove-single-device, or a shell script such as
+ * rescan-scsi-bus.sh.
+ *
+ * The easiest way to add/detect new SBP-2 devices is to run the shell script
+ * rescan-scsi-bus.sh (or re-load the SBP-2 driver). This script may be
+ * found at:
+ * http://www.garloff.de/kurt/linux/rescan-scsi-bus.sh
+ *
+ * As an alternative, you may manually add/remove SBP-2 devices via the procfs with
+ * add-single-device <h> <b> <t> <l> or remove-single-device <h> <b> <t> <l>, where:
+ * <h> = host (starting at zero for first SCSI adapter)
+ * <b> = bus (normally zero)
+ * <t> = target (starting at zero for first SBP-2 device)
+ * <l> = lun (normally zero)
+ *
+ * e.g. To manually add/detect a new SBP-2 device
+ * echo "scsi add-single-device 0 0 0 0" > /proc/scsi/scsi
+ *
+ * e.g. To manually remove a SBP-2 device after it's been unplugged
+ * echo "scsi remove-single-device 0 0 0 0" > /proc/scsi/scsi
+ *
+ * e.g. To check to see which SBP-2/SCSI devices are currently registered
+ * cat /proc/scsi/scsi
+ *
+ * After scanning for new SCSI devices (above), you may access any attached
+ * SBP-2 storage devices as if they were SCSI devices (e.g. mount /dev/sda1,
+ * fdisk, mkfs, etc.).
+ *
+ *
+ * Module Load Options:
+ *
+ * The SBP-2 driver now has a number of module load parameters available for use
+ * in debugging/testing. Following are the valid parameters
+ *
+ * no_bus_scan - Skip the initial scsi bus scan during module load
+ * (1 = skip bus scan, 0 = perform bus scan, default = 0)
+ *
+ * mode_sense_hack - Emulate mode sense for devices like 1394 memory stick readers
+ * (1 = emulate/fake mode sense, 0 = do not emulate/fake mode sense, default = 0)
+ *
+ * max_speed - Force max speed allowed
+ * (0 = 100mb, 1 = 200mb, 2 = 400mb, default = auto configure)
+ *
+ * serialize_io - Force scsi stack to send down one command at a time, for debugging
+ * (1 = serialize all I/O, 0 = do not serialize I/O, default = 1)
+ *
+ * no_large_packets - Force scsi stack to limit max packet size sent down, for debugging
+ * (1 = limit max transfer size, 0 = do not limit max packet size, default = 0)
+ *
+ * (e.g. insmod sbp2 no_bus_scan=1)
+ *
+ *
+ * Current Support:
+ *
+ * The SBP-2 driver is still in an early state, but supports a variety of devices.
+ * I have read/written many gigabytes of data from/to SBP-2 drives, and have seen
+ * performance of more than 16 MBytes/s on individual drives (limit of the media
+ * transfer rate).
+ *
+ * Following are the devices that have been tested successfully:
+ *
+ * - Western Digital IEEE-1394 hard drives
+ * - Maxtor IEEE-1394 hard drives
+ * - VST (SmartDisk) IEEE-1394 hard drives and Zip drives (several flavors)
+ * - LaCie IEEE-1394 hard drives (several flavors)
+ * - QPS IEEE-1394 CD-RW/DVD drives and hard drives
+ * - BusLink IEEE-1394 hard drives
+ * - Iomega IEEE-1394 Zip/Jazz drives
+ * - ClubMac IEEE-1394 hard drives
+ * - FirePower IEEE-1394 hard drives
+ * - EzQuest IEEE-1394 hard drives and CD-RW drives
+ * - Castlewood/ADS IEEE-1394 ORB drives
+ * - Evergreen IEEE-1394 hard drives and CD-RW drives
+ * - Addonics IEEE-1394 CD-RW drives
+ * - Bellstor IEEE-1394 hard drives and CD-RW drives
+ * - APDrives IEEE-1394 hard drives
+ * - Fujitsu IEEE-1394 MO drives
+ * - Sony IEEE-1394 CD-RW drives
+ * - Epson IEEE-1394 scanner
+ * - ADS IEEE-1394 memory stick and compact flash readers
+ * (e.g. "insmod sbp2 mode_sense_hack=1" for mem stick and flash readers))
+ * - SBP-2 bridge-based devices (LSI, Oxford Semiconductor, Indigita bridges)
+ * - Various other standard IEEE-1394 hard drives and enclosures
+ *
+ *
+ * Performance Issues:
+ *
+ * - Make sure you are "not" running fat/fat32 on your attached SBP-2 drives. You'll
+ * get much better performance formatting the drive ext2 (but you will lose the
+ * ability to easily move the drive between Windows/Linux).
+ *
+ *
+ * Current Issues:
+ *
+ * - Currently, all I/O from the scsi stack is serialized by default, as there
+ * are some stress issues under investigation with deserialized I/O. To enable
+ * deserialized I/O for testing, do "insmod sbp2 serialize_io=0"
+ *
+ * - Hot-Plugging: Need to add procfs support and integration with linux
+ * hot-plug support (http://linux-hotplug.sourceforge.net) for auto-mounting
+ * of drives.
+ *
+ * - Error Handling: SCSI aborts and bus reset requests are handled somewhat
+ * but the code needs additional debugging.
+ *
+ * - IEEE-1394 Bus Management: There is currently little bus management
+ * in the core IEEE-1394 stack. Because of this, the SBP-2 driver handles
+ * detection of SBP-2 devices itself. This should be moved to the core
+ * stack.
+ *
+ * - The SBP-2 driver is currently only supported as a module. It would not take
+ * much work to allow it to be compiled into the kernel, but you'd have to
+ * add some init code to the kernel to support this... and modules are much
+ * more flexible anyway. ;-)
+ *
+ * - Workaround for PPC pismo firewire chipset (enable SBP2_PPC_PISMO_WORKAROUND
+ * define below).
+ *
+ *
+ * Core IEEE-1394 Stack Changes:
+ *
+ * - The IEEE-1394 core stack guid code attempts to read the node unique id from
+ * each attached device after a bus reset. It currently uses a block read
+ * request to do this, which "upsets" certain not-well-behaved devices, such as
+ * some drives from QPS. If you have trouble with your IEEE-1394 storage
+ * device being detected after loading sbp2, try commenting out the
+ * init_ieee1394_guid() and cleanup_ieee1394_guid() lines at the bottom of
+ * ieee1394_core.c (and rebuild ieee1394.o).
+ *
+ * - In ohci1394.h, remove the IEEE1394_USE_BOTTOM_HALVES #define, and rebuild.
+ * This will give you around 30% to 40% performance increase.
+ *
+ *
+ * History:
+ *
+ * 07/25/00 - Initial revision (JSG)
+ * 08/11/00 - Following changes/bug fixes were made (JSG):
+ * * Bug fix to SCSI procfs code (still needs to be synched with 2.4 kernel).
+ * * Bug fix where request sense commands were actually sent on the bus.
+ * * Changed bus reset/abort code to deal with devices that spin up quite
+ * slowly (which result in SCSI time-outs).
+ * * "More" properly pull information from device's config rom, for enumeration
+ * of SBP-2 devices, and determining SBP-2 register offsets.
+ * * Change Simplified Direct Access Device type to Direct Access Device type in
+ * returned inquiry data, in order to make the SCSI stack happy.
+ * * Modified driver to register with the SCSI stack "before" enumerating any attached
+ * SBP-2 devices. This means that you'll have to use procfs scsi-add-device or
+ * some sort of script to discover new SBP-2 devices.
+ * * Minor re-write of some code and other minor changes.
+ * 08/28/00 - Following changes/bug fixes were made (JSG):
+ * * Bug fixes to scatter/gather support (case of one s/g element)
+ * * Updated direction table for scsi commands (mostly DVD commands)
+ * * Retries when trying to detect SBP-2 devices (for slow devices)
+ * * Slightly better error handling (previously none) when commands time-out.
+ * * Misc. other bug fixes and code reorganization.
+ * 09/13/00 - Following changes/bug fixes were made (JSG)
+ * * Moved detection/enumeration code to a kernel thread which is woken up when IEEE-1394
+ * bus resets occur.
+ * * Added code to handle bus resets and hot-plugging while devices are mounted, but full
+ * hot-plug support is not quite there yet.
+ * * Now use speed map to determine speed and max payload sizes for ORBs
+ * * Clean-up of code and reorganization
+ * 09/19/00 - Added better hot-plug support and other minor changes (JSG)
+ * 10/15/00 - Fixes for latest 2.4.0 test kernel, minor fix for hot-plug race. (JSG)
+ * 12/03/00 - Created pool of request packet structures for use in sending out sbp2 command
+ * and agent reset requests. This removes the kmallocs/kfrees in the critical I/O paths,
+ * and also deals with some subtle race conditions related to allocating and freeing
+ * packets. (JSG)
+ * 12/09/00 - Improved the sbp2 device detection by actually reading the root and unit
+ * directory (khk@khk.net)
+ * 12/23/00 - Following changes/enhancements were made (JSG)
+ * * Only do SCSI to RBC command conversion for Direct Access and Simplified
+ * Direct Access Devices (this is pulled from the config rom root directory).
+ * This is needed because doing the conversion for all device types broke the
+ * Epson scanner. Still looking for a better way of determining when to convert
+ * commands (for RBC devices). Thanks to khk for helping on this!
+ * * Added ability to "emulate" physical dma support, for host adapters such as TILynx.
+ * * Determine max payload and speed by also looking at the host adapter's max_rec field.
+ * 01/19/01 - Added checks to sbp2 login and made the login time-out longer. Also fixed a compile
+ * problem for 2.4.0. (JSG)
+ * 01/24/01 - Fixed problem when individual s/g elements are 64KB or larger. Needed to break
+ * up these larger elements, since the sbp2 page table element size is only 16 bits. (JSG)
+ * 01/29/01 - Minor byteswap fix for login response (used for reconnect and log out).
+ * 03/07/01 - Following changes/enhancements were made (JSG)
+ * * Changes to allow us to catch the initial scsi bus scan (for detecting sbp2
+ * devices when first loading sbp2.o). To disable this, un-define
+ * SBP2_SUPPORT_INITIAL_BUS_SCAN.
+ * * Temporary fix to deal with many sbp2 devices that do not support individual
+ * transfers of greater than 128KB in size.
+ * * Mode sense conversion from 6 byte to 10 byte versions for CDRW/DVD devices. (Mark Burton)
+ * * Define allowing support for goofy sbp2 devices that do not support mode
+ * sense command at all, allowing them to be mounted rw (such as 1394 memory
+ * stick and compact flash readers). Define SBP2_MODE_SENSE_WRITE_PROTECT_HACK
+ * if you need this fix.
+ * 03/29/01 - Major performance enhancements and misc. other changes. Thanks to Daniel Berlin for many of
+ * changes and suggestions for change:
+ * * Now use sbp2 doorbell and link commands on the fly (instead of serializing requests)
+ * * Removed all bit fields in an attempt to run on PPC machines (still needs a little more work)
+ * * Added large request break-up/linking support for sbp2 chipsets that do not support transfers
+ * greater than 128KB in size.
+ * * Bumped up max commands per lun to two, and max total outstanding commands to eight.
+ * 04/03/01 - Minor clean-up. Write orb pointer directly if no outstanding commands (saves one 1394 bus
+ * transaction). Added module load options (bus scan, mode sense hack, max speed, serialize_io,
+ * no_large_transfers). Better bus reset handling while I/O pending. Set serialize_io to 1 by
+ * default (debugging of deserialized I/O in progress).
+ * 04/04/01 - Added workaround for PPC Pismo firewire chipset. See #define below. (Daniel Berlin)
+ * 04/20/01 - Minor clean-up. Allocate more orb structures when running with sbp2 target chipsets with
+ * 128KB max transfer limit.
+ * 06/16/01 - Converted DMA interfaces to pci_dma - Ben Collins
+ * <bcollins@debian.org
+ */
+
+/*
+ * Includes
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/proc_fs.h>
+#include <linux/blk.h>
+#include <linux/smp_lock.h>
+#include <asm/current.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/scatterlist.h>
+
+#include "ieee1394.h"
+#include "ieee1394_types.h"
+#include "ieee1394_core.h"
+#include "hosts.h"
+#include "highlevel.h"
+#include "ieee1394_transactions.h"
+#include "../scsi/scsi.h"
+#include "../scsi/hosts.h"
+#include "../scsi/sd.h"
+#include "sbp2.h"
+
+/*
+ * PPC firewire Pismo chipset workaround!!!
+ *
+ * This is a workaround for a bug in the firewire pismo chipset. For some odd reason the status
+ * fifo address hi/lo must be byteswapped and the response address byteswapped, but no other
+ * parts of the structure. Apple's drivers seem to specifically check for the pismo and do
+ * the same workaround for sbp2. (Daniel Berlin)
+ *
+ * Please enable the following define if you're running on the PPC Pismo chipset.
+ */
+
+#ifdef CONFIG_IEEE1394_SBP2_PISMO
+#define SBP2_NEED_LOGIN_DESCRIPTOR_WORKAROUND
+#endif
+
+/*
+ * Module load parameter definitions
+ */
+
+/*
+ * Normally the sbp2 driver tries to catch the initial scsi bus scan to pick up any
+ * attached sbp2 devices. Setting no_bus_scan to 1 tells the sbp2 driver not to catch
+ * this initial scsi bus scan on module load. You can always either add or remove devices
+ * later through the rescan-scsi-bus.sh script or scsi procfs.
+ */
+MODULE_PARM(no_bus_scan,"i");
+MODULE_PARM_DESC(no_bus_scan, "Skip the initial scsi bus scan during module load");
+static int no_bus_scan = 0;
+
+/*
+ * Set mode_sense_hack to 1 if you have some sort of unusual sbp2 device, like a 1394 memory
+ * stick reader, compact flash reader, or MO drive that does not support mode sense. Allows
+ * you to mount the media rw instead of ro.
+ */
+MODULE_PARM(mode_sense_hack,"i");
+MODULE_PARM_DESC(mode_sense_hack, "Emulate mode sense for devices like 1394 memory stick readers");
+static int mode_sense_hack = 0;
+
+/*
+ * Change max_speed on module load if you have a bad IEEE-1394 controller that has trouble running
+ * 2KB packets at 400mb.
+ *
+ * NOTE: On certain OHCI parts I have seen short packets on async transmit (probably
+ * due to PCI latency/throughput issues with the part). You can bump down the speed if
+ * you are running into problems.
+ *
+ * Valid values:
+ * max_speed = 2 (default: max speed 400mb)
+ * max_speed = 1 (max speed 200mb)
+ * max_speed = 0 (max speed 100mb)
+ */
+MODULE_PARM(max_speed,"i");
+MODULE_PARM_DESC(max_speed, "Force down max speed (2 = 400mb default, 1 = 200mb, 0 = 100mb)");
+static int max_speed = SPEED_S400;
+
+/*
+ * Set serialize_io to 1 if you'd like only one scsi command sent down to us at a time (debugging).
+ */
+MODULE_PARM(serialize_io,"i");
+MODULE_PARM_DESC(serialize_io, "Serialize all I/O coming down from the scsi drivers (debugging)");
+static int serialize_io = 1; /* serialize I/O until stress issues are resolved */
+
+/*
+ * Set no_large_packets to 1 if you'd like to limit the size of requests sent down to us (normally
+ * the sbp2 driver will break up any requests to any individual devices with 128KB transfer size limits).
+ * Sets max s/g list elements to 0x1f in size and disables s/g clustering.
+ */
+MODULE_PARM(no_large_packets,"i");
+MODULE_PARM_DESC(no_large_packets, "Do not allow large transfers from scsi drivers (debugging)");
+static int no_large_packets = 0;
+
+/*
+ * Debug levels, configured via kernel config.
+ */
+
+#ifdef CONFIG_IEEE1394_SBP2_DEBUG_ORBS
+#define SBP2_ORB_DEBUG(fmt, args...) HPSB_ERR("sbp2("__FUNCTION__"): "fmt, ## args)
+u32 global_outstanding_command_orbs = 0;
+#define outstanding_orb_incr global_outstanding_command_orbs++
+#define outstanding_orb_decr global_outstanding_command_orbs--
+#else
+#define SBP2_ORB_DEBUG(fmt, args...)
+#define outstanding_orb_incr
+#define outstanding_orb_decr
+#endif
+
+#ifdef CONFIG_IEEE1394_SBP2_DEBUG_DMA
+#define SBP2_DMA_ALLOC(fmt, args...) \
+ HPSB_ERR("sbp2("__FUNCTION__")alloc(%d): "fmt, \
+ ++global_outstanding_dmas, ## args)
+#define SBP2_DMA_FREE(fmt, args...) \
+ HPSB_ERR("sbp2("__FUNCTION__")free(%d): "fmt, \
+ --global_outstanding_dmas, ## args)
+u32 global_outstanding_dmas = 0;
+#else
+#define SBP2_DMA_ALLOC(fmt, args...)
+#define SBP2_DMA_FREE(fmt, args...)
+#endif
+
+#if CONFIG_IEEE1394_SBP2_DEBUG >= 2
+#define SBP2_DEBUG(fmt, args...) HPSB_ERR(fmt, ## args)
+#define SBP2_INFO(fmt, args...) HPSB_ERR(fmt, ## args)
+#define SBP2_NOTICE(fmt, args...) HPSB_ERR(fmt, ## args)
+#define SBP2_WARN(fmt, args...) HPSB_ERR(fmt, ## args)
+#elif CONFIG_IEEE1394_SBP2_DEBUG == 1
+#define SBP2_DEBUG(fmt, args...) HPSB_DEBUG(fmt, ## args)
+#define SBP2_INFO(fmt, args...) HPSB_INFO(fmt, ## args)
+#define SBP2_NOTICE(fmt, args...) HPSB_NOTICE(fmt, ## args)
+#define SBP2_WARN(fmt, args...) HPSB_WARN(fmt, ## args)
+#else
+#define SBP2_DEBUG(fmt, args...)
+#define SBP2_INFO(fmt, args...)
+#define SBP2_NOTICE(fmt, args...)
+#define SBP2_WARN(fmt, args...)
+#endif
+
+#define SBP2_ERR(fmt, args...) HPSB_ERR(fmt, ## args)
+
+/*
+ * Spinlock debugging stuff. I'm playing it safe until the driver has been debugged on SMP. (JSG)
+ */
+/* #define SBP2_USE_REAL_SPINLOCKS */
+#ifdef SBP2_USE_REAL_SPINLOCKS
+#define sbp2_spin_lock(lock, flags) spin_lock_irqsave(lock, flags)
+#define sbp2_spin_unlock(lock, flags) spin_unlock_irqrestore(lock, flags);
+#else
+#define sbp2_spin_lock(lock, flags) do {save_flags(flags); cli();} while (0)
+#define sbp2_spin_unlock(lock, flags) do {restore_flags(flags);} while (0)
+#endif
+
+/*
+ * Globals
+ */
+
+Scsi_Host_Template *global_scsi_tpnt = NULL;
+
+LIST_HEAD(sbp2_host_info_list);
+static int sbp2_host_count = 0;
+spinlock_t sbp2_host_info_lock = SPIN_LOCK_UNLOCKED;
+
+static struct hpsb_highlevel *sbp2_hl_handle = NULL;
+
+static struct hpsb_highlevel_ops sbp2_hl_ops = {
+ sbp2_add_host,
+ sbp2_remove_host,
+ sbp2_host_reset,
+ NULL,
+ NULL
+};
+
+static struct hpsb_address_ops sbp2_ops = {
+ write: sbp2_handle_status_write,
+};
+
+#if 0
+static struct hpsb_address_ops sbp2_physdma_ops = {
+ read: sbp2_handle_physdma_read,
+ write: sbp2_handle_physdma_write,
+};
+#endif
+
+/**************************************
+ * General utility functions
+ **************************************/
+
+
+#ifndef __BIG_ENDIAN
+/*
+ * Converts a buffer from be32 to cpu byte ordering. Length is in bytes.
+ */
+static __inline__ void sbp2util_be32_to_cpu_buffer(void *buffer, int length)
+{
+ u32 *temp = buffer;
+
+ for (length = (length >> 2); length--; )
+ temp[length] = be32_to_cpu(temp[length]);
+
+ return;
+}
+
+/*
+ * Converts a buffer from cpu to be32 byte ordering. Length is in bytes.
+ */
+static __inline__ void sbp2util_cpu_to_be32_buffer(void *buffer, int length)
+{
+ u32 *temp = buffer;
+
+ for (length = (length >> 2); length--; )
+ temp[length] = cpu_to_be32(temp[length]);
+
+ return;
+}
+#else /* BIG_ENDIAN */
+/* Why waste the cpu cycles? */
+#define sbp2util_be32_to_cpu_buffer(x,y)
+#define sbp2util_cpu_to_be32_buffer(x,y)
+#endif
+
+/*
+ * This function does quadlet sized reads (used by detection code)
+ */
+static int sbp2util_read_quadlet(struct sbp2scsi_host_info *hi, nodeid_t node, u64 addr,
+ quadlet_t *buffer)
+{
+ int retval = 0;
+ int retry_count = 3;
+
+ /*
+ * Retry a couple times if needed (for slow devices)
+ */
+ do {
+
+ retval = hpsb_read(hi->host, node, addr, buffer, 4);
+
+ if (retval) {
+ SBP2_DEBUG("sbp2: sbp2util_read_quadlet data packet error");
+ current->state = TASK_INTERRUPTIBLE;
+ schedule_timeout(HZ/50); /* 20ms delay */
+ }
+
+ retry_count--;
+
+ } while (retval && retry_count);
+
+ return(retval);
+}
+
+/*
+ * This function returns the address of the unit directory.
+ */
+static int sbp2util_unit_directory(struct sbp2scsi_host_info *hi, nodeid_t node_id, u64 *unit_directory_addr)
+{
+ quadlet_t root_directory_length, current_quadlet;
+ u64 current_addr;
+ int length, i;
+
+ /*
+ * First, read the first quadlet of the root directory to determine its size
+ */
+ if (sbp2util_read_quadlet(hi, LOCAL_BUS | node_id, CONFIG_ROM_ROOT_DIR_BASE,
+ &root_directory_length)) {
+ SBP2_DEBUG("sbp2: Error reading root directory length - bad status");
+ return(-EIO);
+ }
+
+ current_addr = CONFIG_ROM_ROOT_DIR_BASE;
+ length = be32_to_cpu(root_directory_length) >> 16;
+
+ /*
+ * Step through the root directory and look for the "Unit_Directory entry", which
+ * contains the offset to the unit directory.
+ */
+ for (i=0; i < length; i++) {
+
+ current_addr += 4;
+
+ if (sbp2util_read_quadlet(hi, LOCAL_BUS | node_id, current_addr, ¤t_quadlet)) {
+ SBP2_DEBUG("sbp2: Error reading at address 0x%08x%08x - bad status",
+ (unsigned int) ((current_addr) >> 32), (unsigned int) ((current_addr) & 0xffffffff));
+ return(-EIO);
+ }
+
+ /*
+ * Check for unit directory offset tag
+ */
+ if ((be32_to_cpu(current_quadlet) >> 24) == SBP2_UNIT_DIRECTORY_OFFSET_KEY) {
+ *unit_directory_addr = current_addr + 4 * ((be32_to_cpu(current_quadlet) & 0xffffff));
+ SBP2_DEBUG("sbp2: unit_directory_addr = %lu", *unit_directory_addr);
+ }
+ }
+
+ return(0);
+}
+
+/*
+ * This function is called to initially create a packet pool for use in sbp2 I/O requests.
+ * This packet pool is used when sending out sbp2 command and agent reset requests, and
+ * allows us to remove all kmallocs/kfrees from the critical I/O paths.
+ */
+static int sbp2util_create_request_packet_pool(struct sbp2scsi_host_info *hi)
+{
+ struct hpsb_packet *packet;
+ int i;
+ unsigned long flags;
+
+ /*
+ * Create SBP2_MAX_REQUEST_PACKETS number of request packets.
+ */
+ sbp2_spin_lock(&hi->sbp2_request_packet_lock, flags);
+ for (i=0; i<SBP2_MAX_REQUEST_PACKETS; i++) {
+
+ /*
+ * Max payload of 8 bytes since the sbp2 command request uses a payload of
+ * 8 bytes, and agent reset is a quadlet write request. Bump this up if we
+ * plan on using this pool for other stuff.
+ */
+ packet = alloc_hpsb_packet(8);
+
+ if (!packet) {
+ SBP2_ERR("sbp2: sbp2util_create_request_packet_pool - packet allocation failed!");
+ sbp2_spin_unlock(&hi->sbp2_request_packet_lock, flags);
+ return(-ENOMEM);
+ }
+
+ /*
+ * Put these request packets into a free list
+ */
+ INIT_LIST_HEAD(&hi->request_packet[i].list);
+ hi->request_packet[i].packet = packet;
+ list_add_tail(&hi->request_packet[i].list, &hi->sbp2_req_free);
+
+ }
+ sbp2_spin_unlock(&hi->sbp2_request_packet_lock, flags);
+
+ return(0);
+}
+
+/*
+ * This function is called to remove the packet pool. It is called when the sbp2 driver is unloaded.
+ */
+static void sbp2util_remove_request_packet_pool(struct sbp2scsi_host_info *hi)
+{
+ struct list_head *lh;
+ struct sbp2_request_packet *request_packet;
+ unsigned long flags;
+
+ /*
+ * Go through free list releasing packets
+ */
+ sbp2_spin_lock(&hi->sbp2_request_packet_lock, flags);
+ while (!list_empty(&hi->sbp2_req_free)) {
+
+ lh = hi->sbp2_req_free.next;
+ list_del(lh);
+
+ request_packet = list_entry(lh, struct sbp2_request_packet, list);
+
+ /*
+ * Free the hpsb packets that we allocated for the pool
+ */
+ if (request_packet) {
+ free_hpsb_packet(request_packet->packet);
+ }
+
+ }
+ sbp2_spin_unlock(&hi->sbp2_request_packet_lock, flags);
+
+ return;
+}
+
+/*
+ * This function is called to retrieve a block write packet from our packet pool. This function is
+ * used in place of calling alloc_hpsb_packet (which costs us three kmallocs). Instead we
+ * just pull out a free request packet and re-initialize values in it. I'm sure this can still
+ * stand some more optimization.
+ */
+static struct sbp2_request_packet *sbp2util_allocate_write_request_packet(struct sbp2scsi_host_info *hi,
+ nodeid_t node, u64 addr,
+ size_t data_size,
+ quadlet_t data) {
+ struct list_head *lh;
+ struct sbp2_request_packet *request_packet = NULL;
+ struct hpsb_packet *packet;
+ unsigned long flags;
+
+ sbp2_spin_lock(&hi->sbp2_request_packet_lock, flags);
+ if (!list_empty(&hi->sbp2_req_free)) {
+
+ /*
+ * Pull out a free request packet
+ */
+ lh = hi->sbp2_req_free.next;
+ list_del(lh);
+
+ request_packet = list_entry(lh, struct sbp2_request_packet, list);
+ packet = request_packet->packet;
+
+ /*
+ * Initialize the packet (this is really initialization the core 1394 stack should do,
+ * but I'm doing it myself to avoid the overhead).
+ */
+ packet->data_size = data_size;
+ INIT_LIST_HEAD(&packet->list);
+ sema_init(&packet->state_change, 0);
+ packet->state = unused;
+ packet->generation = get_hpsb_generation();
+ packet->data_be = 1;
+
+ packet->host = hi->host;
+ packet->tlabel = get_tlabel(hi->host, node, 1);
+ packet->node_id = node;
+
+ if (!data_size) {
+ fill_async_writequad(packet, addr, data);
+ } else {
+ fill_async_writeblock(packet, addr, data_size);
+ }
+
+ /*
+ * Set up a task queue completion routine, which returns the packet to the free list
+ * and releases the tlabel
+ */
+ request_packet->tq.routine = (void (*)(void*))sbp2util_free_request_packet;
+ request_packet->tq.data = request_packet;
+ request_packet->hi_context = hi;
+ queue_task(&request_packet->tq, &packet->complete_tq);
+
+ /*
+ * Now, put the packet on the in-use list
+ */
+ list_add_tail(&request_packet->list, &hi->sbp2_req_inuse);
+
+ } else {
+ SBP2_ERR("sbp2: sbp2util_allocate_request_packet - no packets available!");
+ }
+ sbp2_spin_unlock(&hi->sbp2_request_packet_lock, flags);
+
+ return(request_packet);
+}
+
+/*
+ * This function is called to return a packet to our packet pool. It is also called as a
+ * completion routine when a request packet is completed.
+ */
+static void sbp2util_free_request_packet(struct sbp2_request_packet *request_packet)
+{
+ unsigned long flags;
+ struct sbp2scsi_host_info *hi = request_packet->hi_context;
+
+ /*
+ * Free the tlabel, and return the packet to the free pool
+ */
+ sbp2_spin_lock(&hi->sbp2_request_packet_lock, flags);
+ free_tlabel(hi->host, LOCAL_BUS | request_packet->packet->node_id,
+ request_packet->packet->tlabel);
+ list_del(&request_packet->list);
+ list_add_tail(&request_packet->list, &hi->sbp2_req_free);
+ sbp2_spin_unlock(&hi->sbp2_request_packet_lock, flags);
+
+ return;
+}
+
+/*
+ * This function is called to create a pool of command orbs used for command processing. It is called
+ * when a new sbp2 device is detected.
+ */
+static int sbp2util_create_command_orb_pool(struct scsi_id_instance_data *scsi_id,
+ struct sbp2scsi_host_info *hi)
+{
+ int i;
+ unsigned long flags;
+ struct sbp2_command_info *command;
+
+ sbp2_spin_lock(&scsi_id->sbp2_command_orb_lock, flags);
+ for (i = 0; i < scsi_id->sbp2_total_command_orbs; i++) {
+ command = (struct sbp2_command_info *)
+ kmalloc(sizeof(struct sbp2_command_info), GFP_KERNEL);
+ if (!command) {
+ sbp2_spin_unlock(&scsi_id->sbp2_command_orb_lock, flags);
+ return(-ENOMEM);
+ }
+ memset(command, '\0', sizeof(struct sbp2_command_info));
+ command->command_orb_dma =
+ pci_map_single (hi->host->pdev, &command->command_orb,
+ sizeof(struct sbp2_command_orb),
+ PCI_DMA_BIDIRECTIONAL);
+ SBP2_DMA_ALLOC("single command orb DMA");
+ command->sge_dma =
+ pci_map_single (hi->host->pdev, &command->scatter_gather_element,
+ sizeof(command->scatter_gather_element),
+ PCI_DMA_BIDIRECTIONAL);
+ SBP2_DMA_ALLOC("scatter_gather_element");
+ INIT_LIST_HEAD(&command->list);
+ list_add_tail(&command->list, &scsi_id->sbp2_command_orb_completed);
+ }
+ sbp2_spin_unlock(&scsi_id->sbp2_command_orb_lock, flags);
+ return 0;
+}
+
+/*
+ * This function is called to delete a pool of command orbs.
+ */
+static void sbp2util_remove_command_orb_pool(struct scsi_id_instance_data *scsi_id,
+ struct sbp2scsi_host_info *hi)
+{
+ struct list_head *lh;
+ struct sbp2_command_info *command;
+ unsigned long flags;
+
+ sbp2_spin_lock(&scsi_id->sbp2_command_orb_lock, flags);
+ if (!list_empty(&scsi_id->sbp2_command_orb_completed)) {
+ list_for_each(lh, &scsi_id->sbp2_command_orb_completed) {
+ command = list_entry(lh, struct sbp2_command_info, list);
+
+ /* Release our generic DMA's */
+ pci_unmap_single(hi->host->pdev, command->command_orb_dma,
+ sizeof(struct sbp2_command_orb),
+ PCI_DMA_BIDIRECTIONAL);
+ SBP2_DMA_FREE("single command orb DMA");
+ pci_unmap_single(hi->host->pdev, command->sge_dma,
+ sizeof(command->scatter_gather_element),
+ PCI_DMA_BIDIRECTIONAL);
+ SBP2_DMA_FREE("scatter_gather_element");
+
+ kfree(command);
+ }
+ }
+ sbp2_spin_unlock(&scsi_id->sbp2_command_orb_lock, flags);
+ return;
+}
+
+/*
+ * This functions finds the sbp2_command for a given outstanding
+ * command orb. Only looks at the inuse list.
+ */
+static struct sbp2_command_info *sbp2util_find_command_for_orb(struct scsi_id_instance_data *scsi_id, dma_addr_t orb)
+{
+ struct list_head *lh;
+ struct sbp2_command_info *command;
+ unsigned long flags;
+
+ sbp2_spin_lock(&scsi_id->sbp2_command_orb_lock, flags);
+ if (!list_empty(&scsi_id->sbp2_command_orb_inuse)) {
+ list_for_each(lh, &scsi_id->sbp2_command_orb_inuse) {
+ command = list_entry(lh, struct sbp2_command_info, list);
+ if (command->command_orb_dma == orb) {
+ sbp2_spin_unlock(&scsi_id->sbp2_command_orb_lock, flags);
+ return (command);
+ }
+ }
+ }
+ sbp2_spin_unlock(&scsi_id->sbp2_command_orb_lock, flags);
+
+ SBP2_ORB_DEBUG("could not match command orb %x", (unsigned int)orb);
+
+ return(NULL);
+}
+
+/*
+ * This functions finds the sbp2_command for a given outstanding SCpnt. Only looks at the inuse list
+ */
+static struct sbp2_command_info *sbp2util_find_command_for_SCpnt(struct scsi_id_instance_data *scsi_id, void *SCpnt)
+{
+ struct list_head *lh;
+ struct sbp2_command_info *command;
+ unsigned long flags;
+
+ sbp2_spin_lock(&scsi_id->sbp2_command_orb_lock, flags);
+ if (!list_empty(&scsi_id->sbp2_command_orb_inuse)) {
+ list_for_each(lh, &scsi_id->sbp2_command_orb_inuse) {
+ command = list_entry(lh, struct sbp2_command_info, list);
+ if (command->Current_SCpnt == SCpnt) {
+ sbp2_spin_unlock(&scsi_id->sbp2_command_orb_lock, flags);
+ return (command);
+ }
+ }
+ }
+ sbp2_spin_unlock(&scsi_id->sbp2_command_orb_lock, flags);
+ return(NULL);
+}
+
+/*
+ * This function allocates a command orb used to send a scsi command.
+ */
+static struct sbp2_command_info *sbp2util_allocate_command_orb(struct scsi_id_instance_data *scsi_id,
+ Scsi_Cmnd *Current_SCpnt,
+ void (*Current_done)(Scsi_Cmnd *),
+ struct sbp2scsi_host_info *hi)
+{
+ struct list_head *lh;
+ struct sbp2_command_info *command = NULL;
+ unsigned long flags;
+
+ sbp2_spin_lock(&scsi_id->sbp2_command_orb_lock, flags);
+ if (!list_empty(&scsi_id->sbp2_command_orb_completed)) {
+ lh = scsi_id->sbp2_command_orb_completed.next;
+ list_del(lh);
+ command = list_entry(lh, struct sbp2_command_info, list);
+ command->Current_done = Current_done;
+ command->Current_SCpnt = Current_SCpnt;
+ command->linked = 0;
+ list_add_tail(&command->list, &scsi_id->sbp2_command_orb_inuse);
+ } else {
+ SBP2_ERR("sbp2: sbp2util_allocate_command_orb - No orbs available!");
+ }
+ sbp2_spin_unlock(&scsi_id->sbp2_command_orb_lock, flags);
+ return (command);
+}
+
+/* Free our DMA's */
+static void sbp2util_free_command_dma(struct sbp2_command_info *command)
+{
+ struct sbp2scsi_host_info *hi;
+
+ hi = (struct sbp2scsi_host_info *) command->Current_SCpnt->host->hostdata[0];
+
+ if (hi == NULL) {
+ printk(KERN_ERR __FUNCTION__": hi == NULL\n");
+ return;
+ }
+
+ if (command->cmd_dma) {
+ pci_unmap_single(hi->host->pdev, command->cmd_dma,
+ command->dma_size, command->dma_dir);
+ SBP2_DMA_FREE("single bulk");
+ command->cmd_dma = 0;
+ }
+
+ if (command->sge_buffer) {
+ pci_unmap_sg(hi->host->pdev, command->sge_buffer,
+ command->dma_size, command->dma_dir);
+ SBP2_DMA_FREE("scatter list");
+ command->sge_buffer = NULL;
+ }
+}
+
+/*
+ * This function moves a command to the completed orb list.
+ */
+static void sbp2util_mark_command_completed(struct scsi_id_instance_data *scsi_id, struct sbp2_command_info *command)
+{
+ unsigned long flags;
+
+ sbp2_spin_lock(&scsi_id->sbp2_command_orb_lock, flags);
+ list_del(&command->list);
+ sbp2util_free_command_dma(command);
+ list_add_tail(&command->list, &scsi_id->sbp2_command_orb_completed);
+ sbp2_spin_unlock(&scsi_id->sbp2_command_orb_lock, flags);
+}
+
+/*********************************************
+ * IEEE-1394 core driver stack related section
+ *********************************************/
+
+/*
+ * This function is called at SCSI init in order to register our driver with the
+ * IEEE-1394 stack
+ */
+int sbp2_init(void)
+{
+ SBP2_DEBUG("sbp2: sbp2_init");
+
+ /*
+ * Register our high level driver with 1394 stack
+ */
+ sbp2_hl_handle = hpsb_register_highlevel(SBP2_DEVICE_NAME, &sbp2_hl_ops);
+
+ if (sbp2_hl_handle == NULL) {
+ SBP2_ERR("sbp2: sbp2 failed to register with ieee1394 highlevel");
+ return(-ENOMEM);
+ }
+
+ /*
+ * Register our sbp2 status address space...
+ */
+ hpsb_register_addrspace(sbp2_hl_handle, &sbp2_ops, SBP2_STATUS_FIFO_ADDRESS,
+ SBP2_STATUS_FIFO_ADDRESS + sizeof(struct sbp2_status_block));
+
+ /*
+ * Register physical dma address space... used for
+ * adapters not supporting hardware phys dma.
+ *
+ * XXX: Disabled for now.
+ */
+ /* hpsb_register_addrspace(sbp2_hl_handle, &sbp2_physdma_ops,
+ 0x0ULL, 0xfffffffcULL); */
+
+ return(0);
+}
+
+/*
+ * This function is called from cleanup module, or during shut-down, in order to
+ * unregister our driver
+ */
+void sbp2_cleanup(void)
+{
+ SBP2_DEBUG("sbp2: sbp2_cleanup");
+
+ if (sbp2_hl_handle) {
+ hpsb_unregister_highlevel(sbp2_hl_handle);
+ sbp2_hl_handle = NULL;
+ }
+ return;
+}
+
+/*
+ * This function is called after registering our operations in sbp2_init. We go ahead and
+ * allocate some memory for our host info structure, and init some structures.
+ */
+static void sbp2_add_host(struct hpsb_host *host)
+{
+ struct sbp2scsi_host_info *hi;
+ unsigned int flags;
+
+ SBP2_DEBUG("sbp2: sbp2_add_host");
+
+ /*
+ * Allocate some memory for our host info structure
+ */
+ hi = (struct sbp2scsi_host_info *)kmalloc(sizeof(struct sbp2scsi_host_info), GFP_KERNEL);
+
+ if (hi != NULL) {
+
+ /*
+ * Initialize some host stuff
+ */
+ memset(hi, 0, sizeof(struct sbp2scsi_host_info));
+ INIT_LIST_HEAD(&hi->list);
+ INIT_LIST_HEAD(&hi->sbp2_req_inuse);
+ INIT_LIST_HEAD(&hi->sbp2_req_free);
+ hi->host = host;
+ hi->sbp2_command_lock = SPIN_LOCK_UNLOCKED;
+ hi->sbp2_request_packet_lock = SPIN_LOCK_UNLOCKED;
+
+ /*
+ * Create our request packet pool (pool of packets for use in I/O)
+ */
+ if (sbp2util_create_request_packet_pool(hi)) {
+ SBP2_ERR("sbp2: sbp2util_create_request_packet_pool failed!");
+ return;
+ }
+
+ sbp2_spin_lock(&sbp2_host_info_lock, flags);
+ list_add_tail(&hi->list, &sbp2_host_info_list);
+ sbp2_host_count++;
+ sbp2_spin_lock(&sbp2_host_info_lock, flags);
+
+ /*
+ * Initialize us to bus reset in progress
+ */
+ hi->bus_reset_in_progress = 1;
+
+ /*
+ * Register our host with the SCSI stack.
+ */
+ sbp2scsi_register_scsi_host(hi);
+
+ /*
+ * Start our kernel thread to deal with sbp2 device detection
+ */
+ init_waitqueue_head(&hi->sbp2_detection_wait);
+ hi->sbp2_detection_pid = 0;
+ hi->sbp2_detection_pid = kernel_thread(sbp2_detection_thread, hi, CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
+
+ }
+
+ return;
+}
+
+/*
+ * This fuction returns a host info structure from the host structure, in case we have multiple hosts
+ */
+static struct sbp2scsi_host_info *sbp2_find_host_info(struct hpsb_host *host) {
+ struct list_head *lh;
+ struct sbp2scsi_host_info *hi;
+
+ lh = sbp2_host_info_list.next;
+ while (lh != &sbp2_host_info_list) {
+ hi = list_entry(lh, struct sbp2scsi_host_info, list);
+ if (hi->host == host) {
+ return hi;
+ }
+ lh = lh->next;
+ }
+
+ return(NULL);
+}
+
+/*
+ * This function is called when the host is removed
+ */
+static void sbp2_remove_host(struct hpsb_host *host)
+{
+ struct sbp2scsi_host_info *hi;
+ int i;
+ unsigned int flags;
+
+ SBP2_DEBUG("sbp2: sbp2_remove_host");
+
+ sbp2_spin_lock(&sbp2_host_info_lock, flags);
+ hi = sbp2_find_host_info(host);
+
+ if (hi != NULL) {
+
+ /*
+ * Need to remove any attached SBP-2 devices. Also make sure to logout of all devices.
+ */
+ for (i=0; i<SBP2SCSI_MAX_SCSI_IDS; i++) {
+ if (hi->scsi_id[i]) {
+ sbp2_logout_device(hi, hi->scsi_id[i]);
+ hi->scsi_id[i]->validated = 0;
+ }
+ }
+
+ sbp2_remove_unvalidated_devices(hi);
+
+ list_del(&hi->list);
+ sbp2_host_count--;
+ }
+ sbp2_spin_unlock(&sbp2_host_info_lock, flags);
+
+ if (hi == NULL) {
+ SBP2_ERR("sbp2: attempt to remove unknown host %p", host);
+ return;
+ }
+
+ /*
+ * Remove the packet pool (release the packets)
+ */
+ sbp2util_remove_request_packet_pool(hi);
+
+ /*
+ * Kill our detection thread
+ */
+ if (hi->sbp2_detection_pid >= 0) {
+ kill_proc(hi->sbp2_detection_pid, SIGINT, 1);
+ }
+
+ /*
+ * Give the detection thread a little time to exit
+ */
+ current->state = TASK_INTERRUPTIBLE;
+ schedule_timeout(HZ); /* 1 second delay */
+
+ kfree(hi);
+ hi = NULL;
+
+ return;
+}
+
+/*
+ * This is our sbp2 detection thread. It is signalled when bus resets occur
+ * so that we can find and initialize any sbp2 devices.
+ */
+static int sbp2_detection_thread(void *__hi)
+{
+ struct sbp2scsi_host_info *hi = (struct sbp2scsi_host_info *)__hi;
+
+ SBP2_DEBUG("sbp2: sbp2_detection_thread");
+
+ lock_kernel();
+
+ /*
+ * This thread doesn't need any user-level access,
+ * so get rid of all our resources
+ */
+ daemonize();
+
+ /*
+ * Set-up a nice name
+ */
+ strcpy(current->comm, "sbp2");
+
+ unlock_kernel();
+
+ while ((!signal_pending(current)) && hi) {
+
+ /*
+ * Process our bus reset now
+ */
+ if (hi) {
+ MOD_INC_USE_COUNT;
+ sbp2_bus_reset_handler(hi);
+ MOD_DEC_USE_COUNT;
+ }
+
+ /*
+ * Sleep until next bus reset
+ */
+ if (hi) {
+ interruptible_sleep_on(&hi->sbp2_detection_wait);
+ }
+ }
+
+ return(0);
+}
+
+/*
+ * This function is where we first pull the node unique ids, and then allocate memory and register
+ * a SBP-2 device
+ */
+static int sbp2_start_device(struct sbp2scsi_host_info *hi, int node_id)
+{
+ quadlet_t node_unique_id_lo, node_unique_id_hi;
+ u64 node_unique_id;
+ struct scsi_id_instance_data *scsi_id = NULL;
+ int i;
+
+ SBP2_DEBUG("sbp2: sbp2_start_device");
+
+ /*
+ * Let's read the node unique id off of the device (using two quadlet reads for hi and lo)
+ */
+ if (sbp2util_read_quadlet(hi, LOCAL_BUS | node_id, CONFIG_ROM_NODE_UNIQUE_ID_HI_ADDRESS,
+ &node_unique_id_hi)) {
+ SBP2_DEBUG("sbp2: Error reading node unique id - bad status");
+ return(-EIO);
+ }
+
+ if (sbp2util_read_quadlet(hi, LOCAL_BUS | node_id, CONFIG_ROM_NODE_UNIQUE_ID_LO_ADDRESS,
+ &node_unique_id_lo)) {
+ SBP2_DEBUG("sbp2: Error reading node unique id - bad status");
+ return(-EIO);
+ }
+
+ /*
+ * Spit out the node unique ids we got
+ */
+ SBP2_DEBUG("sbp2: Node %x, node unique id hi = %x", (LOCAL_BUS | node_id), (unsigned int) node_unique_id_hi);
+ SBP2_DEBUG("sbp2: Node %x, node unique id lo = %x", (LOCAL_BUS | node_id), (unsigned int) node_unique_id_lo);
+
+ node_unique_id = (((u64)node_unique_id_hi) << 32) | ((u64)node_unique_id_lo);
+
+ /*
+ * First, we need to find out whether this is a "new" SBP-2 device plugged in, or one that already
+ * exists and is initialized. We do this by looping through our scsi id instance data structures
+ * looking for matching node unique ids.
+ */
+ for (i=0; i<SBP2SCSI_MAX_SCSI_IDS; i++) {
+
+ if (hi->scsi_id[i]) {
+
+ if (hi->scsi_id[i]->node_unique_id == node_unique_id) {
+
+ /*
+ * Update our node id
+ */
+ hi->scsi_id[i]->node_id = node_id;
+
+ /*
+ * Mark the device as validated, since it still exists on the bus
+ */
+ hi->scsi_id[i]->validated = 1;
+ SBP2_DEBUG("sbp2: SBP-2 device re-validated, SCSI ID = %x", (unsigned int) i);
+
+ /*
+ * Reconnect to the sbp-2 device
+ */
+ if (sbp2_reconnect_device(hi, hi->scsi_id[i])) {
+
+ /*
+ * Ok, reconnect has failed. Perhaps we didn't reconnect fast enough. Try
+ * doing a regular login.
+ */
+ if (sbp2_login_device(hi, hi->scsi_id[i])) {
+
+ /*
+ * Login failed too... so, just mark him as unvalidated, so that he gets cleaned up
+ * later
+ */
+ SBP2_ERR("sbp2: sbp2_reconnect_device failed!");
+ hi->scsi_id[i]->validated = 0;
+ }
+ }
+
+ if (hi->scsi_id[i]->validated) {
+
+ /*
+ * Set max retries to something large on the device
+ */
+ sbp2_set_busy_timeout(hi, hi->scsi_id[i]);
+
+ /*
+ * Do a SBP-2 fetch agent reset
+ */
+ sbp2_agent_reset(hi, hi->scsi_id[i], 0);
+
+ /*
+ * Get the max speed and packet size that we can use
+ */
+ sbp2_max_speed_and_size(hi, hi->scsi_id[i]);
+
+ }
+
+ /*
+ * Nothing more to do, since we found the device
+ */
+ return(0);
+
+ }
+ }
+ }
+
+ /*
+ * This really is a "new" device plugged in. Let's allocate memory for our scsi id instance data
+ */
+ scsi_id = (struct scsi_id_instance_data *)kmalloc(sizeof(struct scsi_id_instance_data),
+ GFP_KERNEL);
+ if (!scsi_id)
+ goto alloc_fail_first;
+ memset(scsi_id, 0, sizeof(struct scsi_id_instance_data));
+
+ /* Login FIFO DMA */
+ scsi_id->login_response =
+ pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_login_response),
+ &scsi_id->login_response_dma);
+ if (!scsi_id->login_response)
+ goto alloc_fail;
+ SBP2_DMA_ALLOC("consistent DMA region for login FIFO");
+
+ /* Reconnect ORB DMA */
+ scsi_id->reconnect_orb =
+ pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_reconnect_orb),
+ &scsi_id->reconnect_orb_dma);
+ if (!scsi_id->reconnect_orb)
+ goto alloc_fail;
+ SBP2_DMA_ALLOC("consistent DMA region for reconnect ORB");
+
+ /* Logout ORB DMA */
+ scsi_id->logout_orb =
+ pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_logout_orb),
+ &scsi_id->logout_orb_dma);
+ if (!scsi_id->logout_orb)
+ goto alloc_fail;
+ SBP2_DMA_ALLOC("consistent DMA region for logout ORB");
+
+ /* Login ORB DMA */
+ scsi_id->login_orb =
+ pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_login_orb),
+ &scsi_id->login_orb_dma);
+ if (scsi_id->login_orb == NULL) {
+alloc_fail:
+ if (scsi_id->logout_orb) {
+ pci_free_consistent(hi->host->pdev,
+ sizeof(struct sbp2_logout_orb),
+ scsi_id->logout_orb,
+ scsi_id->logout_orb_dma);
+ SBP2_DMA_FREE("logout ORB DMA");
+ }
+
+ if (scsi_id->reconnect_orb) {
+ pci_free_consistent(hi->host->pdev,
+ sizeof(struct sbp2_reconnect_orb),
+ scsi_id->reconnect_orb,
+ scsi_id->reconnect_orb_dma);
+ SBP2_DMA_FREE("reconnect ORB DMA");
+ }
+
+ if (scsi_id->login_response) {
+ pci_free_consistent(hi->host->pdev,
+ sizeof(struct sbp2_login_response),
+ scsi_id->login_response,
+ scsi_id->login_response_dma);
+ SBP2_DMA_FREE("login FIFO DMA");
+ }
+
+ kfree(scsi_id);
+alloc_fail_first:
+ SBP2_ERR ("sbp2: Could not allocate memory for scsi_id");
+ return(-ENOMEM);
+ }
+ SBP2_DMA_ALLOC("consistent DMA region for login ORB");
+
+ /*
+ * Initialize some of the fields in this structure
+ */
+ scsi_id->node_id = node_id;
+ scsi_id->node_unique_id = node_unique_id;
+ scsi_id->validated = 1;
+ scsi_id->speed_code = SPEED_S100;
+ scsi_id->max_payload_size = MAX_PAYLOAD_S100;
+
+ init_waitqueue_head(&scsi_id->sbp2_login_wait);
+
+ /*
+ * Initialize structures needed for the command orb pool.
+ */
+ INIT_LIST_HEAD(&scsi_id->sbp2_command_orb_inuse);
+ INIT_LIST_HEAD(&scsi_id->sbp2_command_orb_completed);
+ scsi_id->sbp2_command_orb_lock = SPIN_LOCK_UNLOCKED;
+ scsi_id->sbp2_total_command_orbs = 0;
+
+ /*
+ * Make sure that we've gotten ahold of the sbp2 management agent address. Also figure out the
+ * command set being used (SCSI or RBC).
+ */
+ if (sbp2_parse_unit_directory(hi, scsi_id)) {
+ SBP2_ERR("sbp2: Error while parsing sbp2 unit directory");
+ hi->scsi_id[i]->validated = 0;
+ return(-EIO);
+ }
+
+ scsi_id->sbp2_total_command_orbs = SBP2_MAX_COMMAND_ORBS;
+
+ /*
+ * Knock the total command orbs down if we are serializing I/O
+ */
+ if (serialize_io) {
+ scsi_id->sbp2_total_command_orbs = 2; /* one extra for good measure */
+ }
+
+ /*
+ * Allocate some extra command orb structures for devices with 128KB limit
+ */
+ if (scsi_id->sbp2_firmware_revision == SBP2_128KB_BROKEN_FIRMWARE) {
+ scsi_id->sbp2_total_command_orbs *= 4;
+ }
+
+ /*
+ * Create our command orb pool
+ */
+ if (sbp2util_create_command_orb_pool(scsi_id, hi)) {
+ SBP2_ERR("sbp2: sbp2util_create_command_orb_pool failed!");
+ hi->scsi_id[i]->validated = 0;
+ return (-ENOMEM);
+ }
+
+ /*
+ * Find an empty spot to stick our scsi id instance data.
+ */
+ for (i=0; i<SBP2SCSI_MAX_SCSI_IDS; i++) {
+ if (!hi->scsi_id[i]) {
+ hi->scsi_id[i] = scsi_id;
+ SBP2_DEBUG("sbp2: New SBP-2 device inserted, SCSI ID = %x", (unsigned int) i);
+ break;
+ }
+ }
+
+ /*
+ * Make sure we are not out of space
+ */
+ if (i >= SBP2SCSI_MAX_SCSI_IDS) {
+ SBP2_ERR("sbp2: No slots left for SBP-2 device");
+ hi->scsi_id[i]->validated = 0;
+ return(-EBUSY);
+ }
+
+ /*
+ * Login to the sbp-2 device
+ */
+ if (sbp2_login_device(hi, hi->scsi_id[i])) {
+
+ /*
+ * Login failed... so, just mark him as unvalidated, so that he gets cleaned up later
+ */
+ SBP2_ERR("sbp2: sbp2_login_device failed");
+ hi->scsi_id[i]->validated = 0;
+ }
+
+ if (hi->scsi_id[i]->validated) {
+
+ /*
+ * Set max retries to something large on the device
+ */
+ sbp2_set_busy_timeout(hi, hi->scsi_id[i]);
+
+ /*
+ * Do a SBP-2 fetch agent reset
+ */
+ sbp2_agent_reset(hi, hi->scsi_id[i], 0);
+
+ /*
+ * Get the max speed and packet size that we can use
+ */
+ sbp2_max_speed_and_size(hi, hi->scsi_id[i]);
+
+ }
+
+ return(0);
+}
+
+/*
+ * This function trys to determine if a device is a valid SBP-2 device
+ */
+static int sbp2_check_device(struct sbp2scsi_host_info *hi, int node_id)
+{
+ quadlet_t unit_spec_id_data = 0, unit_sw_ver_data = 0;
+ quadlet_t unit_directory_length, current_quadlet;
+ u64 unit_directory_addr, current_addr;
+ unsigned int i, length;
+
+ SBP2_DEBUG("sbp2: sbp2_check_device");
+
+ /*
+ * Let's try and read the unit spec id and unit sw ver to determine if this is an SBP2 device...
+ */
+
+ if (sbp2util_unit_directory(hi, LOCAL_BUS | node_id, &unit_directory_addr)) {
+ SBP2_DEBUG("sbp2: Error reading unit directory address - bad status");
+ return(-EIO);
+ }
+
+ /*
+ * Read the size of the unit directory
+ */
+ if (sbp2util_read_quadlet(hi, LOCAL_BUS | node_id, unit_directory_addr,
+ &unit_directory_length)) {
+ SBP2_DEBUG("sbp2: Error reading root directory length - bad status");
+ return(-EIO);
+ }
+
+ current_addr = unit_directory_addr;
+ length = be32_to_cpu(unit_directory_length) >> 16;
+
+ /*
+ * Now, step through the unit directory and look for the unit_spec_ID and the unit_sw_version
+ */
+ for (i=0; i < length; i++) {
+
+ current_addr += 4;
+
+ if (sbp2util_read_quadlet(hi, LOCAL_BUS | node_id, current_addr, ¤t_quadlet)) {
+ SBP2_DEBUG("sbp2: Error reading at address 0x%08x%08x - bad status",
+ (unsigned int) ((current_addr) >> 32), (unsigned int) ((current_addr) & 0xffffffff));
+ return(-EIO);
+ }
+
+ /*
+ * Check for unit_spec_ID tag
+ */
+ if ((be32_to_cpu(current_quadlet) >> 24) == SBP2_UNIT_SPEC_ID_KEY) {
+ unit_spec_id_data = current_quadlet;
+ SBP2_DEBUG("sbp2: Node %x, unit spec id = %x", (LOCAL_BUS | node_id),
+ (unsigned int) be32_to_cpu(unit_spec_id_data));
+ }
+
+ /*
+ * Check for unit_sw_version tag
+ */
+ if ((be32_to_cpu(current_quadlet) >> 24) == SBP2_UNIT_SW_VERSION_KEY) {
+ unit_sw_ver_data = current_quadlet;
+ SBP2_DEBUG("sbp2: Node %x, unit sw version = %x", (LOCAL_BUS | node_id),
+ (unsigned int) be32_to_cpu(unit_sw_ver_data));
+ }
+ }
+
+ /*
+ * Validate unit spec id and unit sw ver to see if this is an SBP-2 device
+ */
+ if ((be32_to_cpu(unit_spec_id_data) != SBP2_UNIT_SPEC_ID_ENTRY) ||
+ (be32_to_cpu(unit_sw_ver_data) != SBP2_SW_VERSION_ENTRY)) {
+
+ /*
+ * Not an sbp2 device
+ */
+ return(-ENXIO);
+ }
+
+ /*
+ * This device is a valid SBP-2 device
+ */
+ SBP2_INFO("sbp2: Node 0x%04x, Found SBP-2 device", (LOCAL_BUS | node_id));
+ return(0);
+}
+
+/*
+ * This function removes (cleans-up after) any unvalidated sbp2 devices
+ */
+static void sbp2_remove_unvalidated_devices(struct sbp2scsi_host_info *hi)
+{
+ int i;
+
+ /*
+ * Loop through and free any unvalidated scsi id instance data structures
+ */
+ for (i=0; i<SBP2SCSI_MAX_SCSI_IDS; i++) {
+ if (hi->scsi_id[i]) {
+ if (!hi->scsi_id[i]->validated) {
+
+ /*
+ * Complete any pending commands with selection timeout
+ */
+ sbp2scsi_complete_all_commands(hi, hi->scsi_id[i], DID_NO_CONNECT);
+
+ /*
+ * Clean up any other structures
+ */
+ if (hi->scsi_id[i]->sbp2_total_command_orbs) {
+ sbp2util_remove_command_orb_pool(hi->scsi_id[i], hi);
+ }
+ if (hi->scsi_id[i]->login_response) {
+ pci_free_consistent(hi->host->pdev,
+ sizeof(struct sbp2_login_response),
+ hi->scsi_id[i]->login_response,
+ hi->scsi_id[i]->login_response_dma);
+ SBP2_DMA_FREE("single login FIFO");
+ }
+
+ if (hi->scsi_id[i]->login_orb) {
+ pci_free_consistent(hi->host->pdev,
+ sizeof(struct sbp2_login_orb),
+ hi->scsi_id[i]->login_orb,
+ hi->scsi_id[i]->login_orb_dma);
+ SBP2_DMA_FREE("single login ORB");
+ }
+
+ if (hi->scsi_id[i]->reconnect_orb) {
+ pci_free_consistent(hi->host->pdev,
+ sizeof(struct sbp2_reconnect_orb),
+ hi->scsi_id[i]->reconnect_orb,
+ hi->scsi_id[i]->reconnect_orb_dma);
+ SBP2_DMA_FREE("single reconnect orb");
+ }
+
+ if (hi->scsi_id[i]->logout_orb) {
+ pci_free_consistent(hi->host->pdev,
+ sizeof(struct sbp2_logout_orb),
+ hi->scsi_id[i]->logout_orb,
+ hi->scsi_id[i]->reconnect_orb_dma);
+ SBP2_DMA_FREE("single logout orb");
+ }
+
+ kfree(hi->scsi_id[i]);
+ hi->scsi_id[i] = NULL;
+ SBP2_DEBUG("sbp2: Unvalidated SBP-2 device removed, SCSI ID = %x", (unsigned int) i);
+ }
+ }
+ }
+
+ return;
+}
+
+/*
+ * This function is our reset handler. It is run out of a thread, since we get
+ * notified of a bus reset from a bh (or interrupt).
+ */
+static void sbp2_bus_reset_handler(void *context)
+{
+ struct sbp2scsi_host_info *hi = context;
+ quadlet_t signature_data;
+ int i;
+ unsigned long flags;
+ struct scsi_id_instance_data *scsi_id;
+
+ SBP2_DEBUG("sbp2: sbp2_bus_reset_handler");
+
+ /*
+ * TODO. Check and keep track of generation number of all requests, in case a
+ * bus reset occurs while trying to find and login to SBP-2 devices.
+ */
+
+ /*
+ * First thing to do. Invalidate all SBP-2 devices. This is needed so that
+ * we stop sending down I/O requests to the device, and also so that we can
+ * figure out which devices have disappeared after a bus reset.
+ */
+ for (i=0; i<SBP2SCSI_MAX_SCSI_IDS; i++) {
+ if (hi->scsi_id[i]) {
+ hi->scsi_id[i]->validated = 0;
+ }
+ }
+
+ /*
+ * Give the sbp2 devices a little time to recover after the bus reset
+ */
+ current->state = TASK_INTERRUPTIBLE;
+ schedule_timeout(HZ/2); /* 1/2 second delay */
+
+ /*
+ * Spit out what we know from the host
+ */
+ SBP2_DEBUG("host: node_count = %x", (unsigned int) hi->host->node_count);
+ SBP2_DEBUG("host: selfid_count = %x", (unsigned int) hi->host->selfid_count);
+ SBP2_DEBUG("host: node_id = %x", (unsigned int) hi->host->node_id);
+ SBP2_DEBUG("host: irm_id = %x", (unsigned int) hi->host->irm_id);
+ SBP2_DEBUG("host: busmgr_id = %x", (unsigned int) hi->host->busmgr_id);
+ SBP2_DEBUG("host: is_root = %x", (unsigned int) hi->host->is_root);
+ SBP2_DEBUG("host: is_cycmst = %x", (unsigned int) hi->host->is_cycmst);
+ SBP2_DEBUG("host: is_irm = %x", (unsigned int) hi->host->is_irm);
+ SBP2_DEBUG("host: is_busmgr = %x", (unsigned int) hi->host->is_busmgr);
+
+ /*
+ * Let's try and figure out which devices out there are SBP-2 devices! Loop through all
+ * nodes out there.
+ */
+ for (i=0; i<hi->host->node_count; i++) {
+
+ /*
+ * Don't read from ourselves!
+ */
+ if (i != ((hi->host->node_id) & NODE_MASK)) {
+
+ /*
+ * Try and send a request for a config rom signature. This is expected to fail for
+ * some nodes, as they might be repeater phys or not be initialized.
+ */
+ if (!sbp2util_read_quadlet(hi, LOCAL_BUS | i, CONFIG_ROM_SIGNATURE_ADDRESS, &signature_data)) {
+
+ if (be32_to_cpu(signature_data) == IEEE1394_CONFIG_ROM_SIGNATURE) {
+
+ /*
+ * Hey, we've got a valid responding IEEE1394 node. Need to now see if it's an SBP-2 device
+ */
+ if (!sbp2_check_device(hi, i)) {
+
+ /*
+ * Found an SBP-2 device. Now, actually start the device.
+ */
+ sbp2_start_device(hi, i);
+ }
+ }
+ }
+ }
+ }
+
+ /*
+ * This code needs protection
+ */
+ sbp2_spin_lock(&hi->sbp2_command_lock, flags);
+
+ /*
+ * Ok, we've discovered and re-validated all SBP-2 devices out there. Let's remove structures of all
+ * devices not re-validated (meaning they've been removed).
+ */
+ sbp2_remove_unvalidated_devices(hi);
+
+ /*
+ * Complete any pending commands with busy (so they get retried) and remove them from our queue
+ */
+ for (i=0; i<SBP2SCSI_MAX_SCSI_IDS; i++) {
+ if (hi->scsi_id[i]) {
+ sbp2scsi_complete_all_commands(hi, hi->scsi_id[i], DID_BUS_BUSY);
+ }
+ }
+
+ /*
+ * Now, note that the bus reset is complete (finally!)
+ */
+ hi->bus_reset_in_progress = 0;
+
+ /*
+ * Deal with the initial scsi bus scan if needed (since we only now know if there are
+ * any sbp2 devices attached)
+ */
+ if (!no_bus_scan && !hi->initial_scsi_bus_scan_complete && hi->bus_scan_SCpnt) {
+
+ hi->initial_scsi_bus_scan_complete = 1;
+ scsi_id = hi->scsi_id[hi->bus_scan_SCpnt->target];
+
+ /*
+ * If the sbp2 device exists, then let's now execute the command.
+ * If not, then just complete it as a selection time-out.
+ */
+ if (scsi_id) {
+ if (sbp2_send_command(hi, scsi_id, hi->bus_scan_SCpnt, hi->bus_scan_done)) {
+ SBP2_ERR("sbp2: Error sending SCSI command");
+ sbp2scsi_complete_command(hi, scsi_id, SBP2_SCSI_STATUS_SELECTION_TIMEOUT,
+ hi->bus_scan_SCpnt, hi->bus_scan_done);
+ }
+ } else {
+ void (*done)(Scsi_Cmnd *) = hi->bus_scan_done;
+ hi->bus_scan_SCpnt->result = DID_NO_CONNECT << 16;
+ done (hi->bus_scan_SCpnt);
+ }
+ }
+
+ sbp2_spin_unlock(&hi->sbp2_command_lock, flags);
+
+ return;
+}
+
+
+/*
+ * This is called from the host's bh when a bus reset is complete. We wake up our detection thread
+ * to deal with the reset
+ */
+static void sbp2_host_reset(struct hpsb_host *host)
+{
+ unsigned long flags;
+ struct sbp2scsi_host_info *hi;
+ int i;
+
+ SBP2_INFO("sbp2: IEEE-1394 bus reset");
+ sbp2_spin_lock(&sbp2_host_info_lock, flags);
+ hi = sbp2_find_host_info(host);
+
+ if (hi != NULL) {
+
+ /*
+ * Wake up our detection thread, only if it's not already handling a reset
+ */
+ if (!hi->bus_reset_in_progress) {
+
+ /*
+ * First thing to do. Invalidate all SBP-2 devices. This is needed so that
+ * we stop sending down I/O requests to the device, and also so that we can
+ * figure out which devices have disappeared after a bus reset.
+ */
+ for (i=0; i<SBP2SCSI_MAX_SCSI_IDS; i++) {
+ if (hi->scsi_id[i]) {
+ hi->scsi_id[i]->validated = 0;
+ }
+ }
+
+ hi->bus_reset_in_progress = 1;
+
+ wake_up(&hi->sbp2_detection_wait);
+ }
+ }
+ sbp2_spin_unlock(&sbp2_host_info_lock, flags);
+ return;
+}
+
+/* XXX: How best to handle these with DMA interface? */
+
+#if 0
+/*
+ * This function deals with physical dma write requests (for adapters that do not support
+ * physical dma in hardware).
+ */
+static int sbp2_handle_physdma_write(struct hpsb_host *host, int nodeid, quadlet_t *data,
+ u64 addr, unsigned int length)
+{
+
+ /*
+ * Manually put the data in the right place.
+ */
+ memcpy(bus_to_virt((u32)addr), data, length);
+ return(RCODE_COMPLETE);
+}
+
+/*
+ * This function deals with physical dma read requests (for adapters that do not support
+ * physical dma in hardware).
+ */
+static int sbp2_handle_physdma_read(struct hpsb_host *host, int nodeid, quadlet_t *data,
+ u64 addr, unsigned int length)
+{
+
+ /*
+ * Grab data from memory and send a read response.
+ */
+ memcpy(data, bus_to_virt((u32)addr), length);
+ return(RCODE_COMPLETE);
+}
+#endif
+
+/**************************************
+ * SBP-2 protocol related section
+ **************************************/
+
+/*
+ * This function is called in order to login to a particular SBP-2 device, after a bus reset
+ */
+static int sbp2_login_device(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id)
+{
+ quadlet_t data[2];
+ unsigned long flags;
+
+ SBP2_DEBUG("sbp2: sbp2_login_device");
+
+ if (!scsi_id->login_orb) {
+ SBP2_DEBUG("sbp2: sbp2_login_device: login_orb not alloc'd!");
+ return(-EIO);
+ }
+
+ /*
+ * Set-up login ORB
+ */
+ scsi_id->login_orb->password_hi = 0; /* Assume no password */
+ scsi_id->login_orb->password_lo = 0;
+ SBP2_DEBUG("sbp2: sbp2_login_device: password_hi/lo initialized");
+#ifdef SBP2_NEED_LOGIN_DESCRIPTOR_WORKAROUND
+ scsi_id->login_orb->login_response_lo = cpu_to_le32(scsi_id->login_response_dma);
+ scsi_id->login_orb->login_response_hi = cpu_to_le32(ORB_SET_NODE_ID(hi->host->node_id));
+#else
+ scsi_id->login_orb->login_response_lo = scsi_id->login_response_dma;
+ scsi_id->login_orb->login_response_hi = ORB_SET_NODE_ID(hi->host->node_id);
+#endif
+ SBP2_DEBUG("sbp2: sbp2_login_device: login_response_hi/lo initialized");
+ scsi_id->login_orb->lun_misc = ORB_SET_FUNCTION(LOGIN_REQUEST);
+ scsi_id->login_orb->lun_misc |= ORB_SET_RECONNECT(0); /* One second reconnect time */
+ scsi_id->login_orb->lun_misc |= ORB_SET_EXCLUSIVE(1); /* Exclusive access to device */
+ scsi_id->login_orb->lun_misc |= ORB_SET_NOTIFY(1); /* Notify us of login complete */
+ SBP2_DEBUG("sbp2: sbp2_login_device: lun_misc initialized");
+ scsi_id->login_orb->passwd_resp_lengths = ORB_SET_LOGIN_RESP_LENGTH(sizeof(struct sbp2_login_response));
+ SBP2_DEBUG("sbp2: sbp2_login_device: passwd_resp_lengths initialized");
+#ifdef SBP2_NEED_LOGIN_DESCRIPTOR_WORKAROUND
+ scsi_id->login_orb->status_FIFO_lo = cpu_to_le32((u32)SBP2_STATUS_FIFO_ADDRESS_LO);
+ scsi_id->login_orb->status_FIFO_hi = (ORB_SET_NODE_ID(hi->host->node_id) | cpu_to_le16(SBP2_STATUS_FIFO_ADDRESS_HI));
+#else
+ scsi_id->login_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO;
+ scsi_id->login_orb->status_FIFO_hi = (ORB_SET_NODE_ID(hi->host->node_id) | SBP2_STATUS_FIFO_ADDRESS_HI);
+#endif
+ SBP2_DEBUG("sbp2: sbp2_login_device: status FIFO initialized");
+
+ /*
+ * Byte swap ORB if necessary
+ */
+ sbp2util_cpu_to_be32_buffer(scsi_id->login_orb, sizeof(struct sbp2_login_orb));
+
+ SBP2_DEBUG("sbp2: sbp2_login_device: orb byte-swapped");
+
+ /*
+ * Initialize login response and status fifo
+ */
+ memset(scsi_id->login_response, 0, sizeof(struct sbp2_login_response));
+ memset(&scsi_id->status_block, 0, sizeof(struct sbp2_status_block));
+
+ SBP2_DEBUG("sbp2: sbp2_login_device: login_response/status FIFO memset");
+
+ /*
+ * Ok, let's write to the target's management agent register
+ */
+ data[0] = ORB_SET_NODE_ID(hi->host->node_id);
+ data[1] = scsi_id->login_orb_dma;
+ sbp2util_cpu_to_be32_buffer(data, 8);
+
+ SBP2_DEBUG("sbp2: sbp2_login_device: prepared to write");
+
+ hpsb_write(hi->host, LOCAL_BUS | scsi_id->node_id, scsi_id->sbp2_management_agent_addr, data, 8);
+
+ /*
+ * Wait for login status... but, only if the device has not already logged-in (some devices are fast)
+ */
+
+ SBP2_DEBUG("sbp2: sbp2_login_device: written");
+ save_flags(flags);
+ cli();
+ if (scsi_id->status_block.ORB_offset_lo != scsi_id->login_orb_dma) {
+ interruptible_sleep_on_timeout(&scsi_id->sbp2_login_wait, 10*HZ); /* 10 second timeout */
+ }
+ restore_flags(flags);
+
+ SBP2_DEBUG("sbp2: sbp2_login_device: initial check");
+
+ /*
+ * Match status to the login orb. If they do not match, it's probably because the login timed-out
+ */
+ if (scsi_id->status_block.ORB_offset_lo != scsi_id->login_orb_dma) {
+ SBP2_ERR("sbp2: Error logging into SBP-2 device - login timed-out");
+ return(-EIO);
+ }
+
+ SBP2_DEBUG("sbp2: sbp2_login_device: second check");
+
+ /*
+ * Check status
+ */
+ if (STATUS_GET_RESP(scsi_id->status_block.ORB_offset_hi_misc) ||
+ STATUS_GET_DEAD_BIT(scsi_id->status_block.ORB_offset_hi_misc) ||
+ STATUS_GET_SBP_STATUS(scsi_id->status_block.ORB_offset_hi_misc)) {
+
+ SBP2_ERR("sbp2: Error logging into SBP-2 device - login failed");
+ return(-EIO);
+ }
+
+ /*
+ * Byte swap the login response, for use when reconnecting or logging out.
+ */
+ sbp2util_cpu_to_be32_buffer(scsi_id->login_response, sizeof(struct sbp2_login_response));
+
+ /*
+ * Grab our command block agent address from the login response
+ */
+ SBP2_DEBUG("sbp2: command_block_agent_hi = %x", (unsigned int)scsi_id->login_response->command_block_agent_hi);
+ SBP2_DEBUG("sbp2: command_block_agent_lo = %x", (unsigned int)scsi_id->login_response->command_block_agent_lo);
+
+ scsi_id->sbp2_command_block_agent_addr = ((u64)scsi_id->login_response->command_block_agent_hi) << 32;
+ scsi_id->sbp2_command_block_agent_addr |= ((u64)scsi_id->login_response->command_block_agent_lo);
+ scsi_id->sbp2_command_block_agent_addr &= 0x0000ffffffffffffULL;
+
+ SBP2_INFO("sbp2: Logged into SBP-2 device");
+
+ return(0);
+
+}
+
+/*
+ * This function is called in order to logout from a particular SBP-2 device, usually called during driver
+ * unload
+ */
+static int sbp2_logout_device(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id)
+{
+ quadlet_t data[2];
+
+ SBP2_DEBUG("sbp2: sbp2_logout_device");
+
+ /*
+ * Set-up logout ORB
+ */
+ scsi_id->logout_orb->reserved1 = 0x0;
+ scsi_id->logout_orb->reserved2 = 0x0;
+ scsi_id->logout_orb->reserved3 = 0x0;
+ scsi_id->logout_orb->reserved4 = 0x0;
+ scsi_id->logout_orb->login_ID_misc = ORB_SET_FUNCTION(LOGOUT_REQUEST);
+ scsi_id->logout_orb->login_ID_misc |= ORB_SET_LOGIN_ID(scsi_id->login_response->length_login_ID);
+ scsi_id->logout_orb->login_ID_misc |= ORB_SET_NOTIFY(1); /* Notify us when complete */
+ scsi_id->logout_orb->reserved5 = 0x0;
+#ifdef SBP2_NEED_LOGIN_DESCRIPTOR_WORKAROUND
+ scsi_id->logout_orb->status_FIFO_lo = cpu_to_le32((u32)SBP2_STATUS_FIFO_ADDRESS_LO);
+ scsi_id->logout_orb->status_FIFO_hi = (ORB_SET_NODE_ID(hi->host->node_id) | cpu_to_le16(SBP2_STATUS_FIFO_ADDRESS_HI));
+#else
+ scsi_id->logout_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO;
+ scsi_id->logout_orb->status_FIFO_hi = (ORB_SET_NODE_ID(hi->host->node_id) | SBP2_STATUS_FIFO_ADDRESS_HI);
+#endif
+
+ /*
+ * Byte swap ORB if necessary
+ */
+ sbp2util_cpu_to_be32_buffer(scsi_id->logout_orb, sizeof(struct sbp2_logout_orb));
+
+ /*
+ * Ok, let's write to the target's management agent register
+ */
+ data[0] = ORB_SET_NODE_ID(hi->host->node_id);
+ data[1] = scsi_id->logout_orb_dma;
+ sbp2util_cpu_to_be32_buffer(data, 8);
+
+ hpsb_write(hi->host, LOCAL_BUS | scsi_id->node_id, scsi_id->sbp2_management_agent_addr, data, 8);
+
+ /*
+ * Wait for device to logout...
+ */
+ interruptible_sleep_on_timeout(&scsi_id->sbp2_login_wait, HZ); /* 1 second timeout */
+
+ SBP2_INFO("sbp2: Logged out of SBP-2 device");
+
+ return(0);
+
+}
+
+/*
+ * This function is called in order to reconnect to a particular SBP-2 device, after a bus reset
+ */
+static int sbp2_reconnect_device(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id)
+{
+ quadlet_t data[2];
+ unsigned long flags;
+
+ SBP2_DEBUG("sbp2: sbp2_reconnect_device");
+
+ /*
+ * Set-up reconnect ORB
+ */
+ scsi_id->reconnect_orb->reserved1 = 0x0;
+ scsi_id->reconnect_orb->reserved2 = 0x0;
+ scsi_id->reconnect_orb->reserved3 = 0x0;
+ scsi_id->reconnect_orb->reserved4 = 0x0;
+ scsi_id->reconnect_orb->login_ID_misc = ORB_SET_FUNCTION(RECONNECT_REQUEST);
+ scsi_id->reconnect_orb->login_ID_misc |= ORB_SET_LOGIN_ID(scsi_id->login_response->length_login_ID);
+ scsi_id->reconnect_orb->login_ID_misc |= ORB_SET_NOTIFY(1); /* Notify us when complete */
+ scsi_id->reconnect_orb->reserved5 = 0x0;
+#ifdef SBP2_NEED_LOGIN_DESCRIPTOR_WORKAROUND
+ scsi_id->reconnect_orb->status_FIFO_lo = cpu_to_le32((u32)SBP2_STATUS_FIFO_ADDRESS_LO);
+ scsi_id->reconnect_orb->status_FIFO_hi = (ORB_SET_NODE_ID(hi->host->node_id) | cpu_to_le16(SBP2_STATUS_FIFO_ADDRESS_HI));
+#else
+ scsi_id->reconnect_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO;
+ scsi_id->reconnect_orb->status_FIFO_hi = (ORB_SET_NODE_ID(hi->host->node_id) | SBP2_STATUS_FIFO_ADDRESS_HI);
+#endif
+
+ /*
+ * Byte swap ORB if necessary
+ */
+ sbp2util_cpu_to_be32_buffer(scsi_id->reconnect_orb, sizeof(struct sbp2_reconnect_orb));
+
+ /*
+ * Initialize status fifo
+ */
+ memset(&scsi_id->status_block, 0, sizeof(struct sbp2_status_block));
+
+ /*
+ * Ok, let's write to the target's management agent register
+ */
+ data[0] = ORB_SET_NODE_ID(hi->host->node_id);
+ data[1] = scsi_id->reconnect_orb_dma;
+ sbp2util_cpu_to_be32_buffer(data, 8);
+
+ hpsb_write(hi->host, LOCAL_BUS | scsi_id->node_id, scsi_id->sbp2_management_agent_addr, data, 8);
+
+ /*
+ * Wait for reconnect status... but, only if the device has not already reconnected (some devices are fast)
+ */
+ save_flags(flags);
+ cli();
+ if (scsi_id->status_block.ORB_offset_lo != scsi_id->reconnect_orb_dma) {
+ interruptible_sleep_on_timeout(&scsi_id->sbp2_login_wait, HZ); /* one second timeout */
+ }
+ restore_flags(flags);
+
+ /*
+ * Match status to the reconnect orb. If they do not match, it's probably because the reconnect timed-out
+ */
+ if (scsi_id->status_block.ORB_offset_lo != scsi_id->reconnect_orb_dma) {
+ SBP2_ERR("sbp2: Error reconnecting to SBP-2 device - reconnect timed-out");
+ return(-EIO);
+ }
+
+ /*
+ * Check status
+ */
+ if (STATUS_GET_RESP(scsi_id->status_block.ORB_offset_hi_misc) ||
+ STATUS_GET_DEAD_BIT(scsi_id->status_block.ORB_offset_hi_misc) ||
+ STATUS_GET_SBP_STATUS(scsi_id->status_block.ORB_offset_hi_misc)) {
+
+ SBP2_ERR("sbp2: Error reconnecting to SBP-2 device - reconnect failed");
+ return(-EIO);
+ }
+
+ SBP2_INFO("sbp2: Reconnected to SBP-2 device");
+
+ return(0);
+
+}
+
+/*
+ * This function is called in order to set the busy timeout (number of retries to attempt) on the sbp2 device.
+ */
+static int sbp2_set_busy_timeout(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id)
+{
+ quadlet_t data;
+
+ SBP2_DEBUG("sbp2: sbp2_set_busy_timeout");
+
+ /*
+ * Ok, let's write to the target's busy timeout register
+ */
+ data = cpu_to_be32(SBP2_BUSY_TIMEOUT_VALUE);
+
+ if (hpsb_write(hi->host, LOCAL_BUS | scsi_id->node_id, SBP2_BUSY_TIMEOUT_ADDRESS, &data, 4)) {
+ SBP2_ERR("sbp2: sbp2_set_busy_timeout error");
+ }
+
+ return(0);
+}
+
+/*
+ * This function is called to parse sbp2 device's config rom unit directory. Used to determine
+ * things like sbp2 management agent offset, and command set used (SCSI or RBC).
+ */
+static int sbp2_parse_unit_directory(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id)
+{
+ quadlet_t unit_directory_length, unit_directory_data;
+ u64 unit_directory_addr;
+ u32 i;
+
+ SBP2_DEBUG("sbp2: sbp2_parse_unit_directory");
+
+ if (sbp2util_unit_directory(hi, LOCAL_BUS | scsi_id->node_id, &unit_directory_addr)) {
+ SBP2_DEBUG("sbp2: Error reading unit directory address - bad status");
+ return(-EIO);
+ }
+
+ /*
+ * Read the size of the unit directory
+ */
+ if (sbp2util_read_quadlet(hi, LOCAL_BUS | scsi_id->node_id, unit_directory_addr,
+ &unit_directory_length)) {
+ SBP2_DEBUG("sbp2: Error reading unit directory length - bad status");
+ return(-EIO);
+ }
+
+ unit_directory_length = ((be32_to_cpu(unit_directory_length)) >> 16);
+
+ /*
+ * Now, sweep through the unit directory looking for the management agent offset
+ * Give up if we hit any error or somehow miss it...
+ */
+ for (i=0; i<unit_directory_length; i++) {
+
+ if (sbp2util_read_quadlet(hi, LOCAL_BUS | scsi_id->node_id, unit_directory_addr + (i<<2) + 4,
+ &unit_directory_data)) {
+ SBP2_DEBUG("sbp2: Error reading unit directory - bad status");
+ return(-EIO);
+ }
+
+ /*
+ * Handle different fields in the unit directory, based on keys
+ */
+ unit_directory_data = be32_to_cpu(unit_directory_data);
+ switch (unit_directory_data >> 24) {
+
+ case SBP2_CSR_OFFSET_KEY:
+
+ /*
+ * Save off the management agent address
+ */
+ scsi_id->sbp2_management_agent_addr = CONFIG_ROM_INITIAL_MEMORY_SPACE +
+ ((unit_directory_data & 0x00ffffff) << 2);
+
+ SBP2_DEBUG("sbp2: sbp2_management_agent_addr = %x", (unsigned int) scsi_id->sbp2_management_agent_addr);
+ break;
+
+ case SBP2_COMMAND_SET_SPEC_ID_KEY:
+
+ /*
+ * Command spec organization
+ */
+ scsi_id->sbp2_command_set_spec_id = unit_directory_data & 0xffffff;
+ SBP2_DEBUG("sbp2: sbp2_command_set_spec_id = %x", (unsigned int) scsi_id->sbp2_command_set_spec_id);
+ break;
+
+ case SBP2_COMMAND_SET_KEY:
+
+ /*
+ * Command set used by sbp2 device
+ */
+ scsi_id->sbp2_command_set = unit_directory_data & 0xffffff;
+ SBP2_DEBUG("sbp2: sbp2_command_set = %x", (unsigned int) scsi_id->sbp2_command_set);
+ break;
+
+ case SBP2_UNIT_CHARACTERISTICS_KEY:
+
+ /*
+ * Unit characterisitcs (orb related stuff that I'm not yet paying attention to)
+ */
+ scsi_id->sbp2_unit_characteristics = unit_directory_data & 0xffffff;
+ SBP2_DEBUG("sbp2: sbp2_unit_characteristics = %x", (unsigned int) scsi_id->sbp2_unit_characteristics);
+ break;
+
+ case SBP2_DEVICE_TYPE_AND_LUN_KEY:
+
+ /*
+ * Device type and lun (used for detemining type of sbp2 device)
+ */
+ scsi_id->sbp2_device_type_and_lun = unit_directory_data & 0xffffff;
+ SBP2_DEBUG("sbp2: sbp2_device_type_and_lun = %x", (unsigned int) scsi_id->sbp2_device_type_and_lun);
+ break;
+
+ case SBP2_UNIT_SPEC_ID_KEY:
+
+ /*
+ * Unit spec id (used for protocol detection)
+ */
+ scsi_id->sbp2_unit_spec_id = unit_directory_data & 0xffffff;
+ SBP2_DEBUG("sbp2: sbp2_unit_spec_id = %x", (unsigned int) scsi_id->sbp2_unit_spec_id);
+ break;
+
+ case SBP2_UNIT_SW_VERSION_KEY:
+
+ /*
+ * Unit sw version (used for protocol detection)
+ */
+ scsi_id->sbp2_unit_sw_version = unit_directory_data & 0xffffff;
+ SBP2_DEBUG("sbp2: sbp2_unit_sw_version = %x", (unsigned int) scsi_id->sbp2_unit_sw_version);
+ break;
+
+ case SBP2_FIRMWARE_REVISION_KEY:
+
+ /*
+ * Firmware revision (used to find broken devices). If the vendor id is 0xa0b8
+ * (Symbios vendor id), then we have a bridge with 128KB max transfer size limitation.
+ */
+ scsi_id->sbp2_firmware_revision = unit_directory_data & 0xffff00;
+ if (scsi_id->sbp2_firmware_revision == SBP2_128KB_BROKEN_FIRMWARE) {
+ SBP2_WARN("sbp2: warning: Bridge chipset supports 128KB max transfer size");
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ }
+
+ return(0);
+}
+
+/*
+ * This function is called in order to determine the max speed and packet size we can use in our ORBs.
+ */
+static int sbp2_max_speed_and_size(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id)
+{
+ quadlet_t node_options, max_rec;
+ u8 speed_code;
+
+ SBP2_DEBUG("sbp2: sbp2_max_speed_and_size");
+
+ /*
+ * Get speed code from internal host structure. There should be a better way to obtain this.
+ */
+ speed_code = hi->host->speed_map[(hi->host->node_id & NODE_MASK) * 64 + (scsi_id->node_id & NODE_MASK)];
+
+ /*
+ * Bump down our speed if there is a module parameter forcing us slower
+ */
+ if (speed_code > max_speed) {
+ speed_code = max_speed;
+ SBP2_ERR("sbp2: Reducing SBP-2 max speed allowed (%x)", max_speed);
+ }
+
+ switch (speed_code) {
+ case SPEED_S100:
+ scsi_id->speed_code = SPEED_S100;
+ scsi_id->max_payload_size = MAX_PAYLOAD_S100;
+ SBP2_INFO("sbp2: SBP-2 device max speed S100 and payload 512 bytes");
+ break;
+ case SPEED_S200:
+ scsi_id->speed_code = SPEED_S200;
+ scsi_id->max_payload_size = MAX_PAYLOAD_S200;
+ SBP2_INFO("sbp2: SBP-2 device max speed S200 and payload 1KB");
+ break;
+ case SPEED_S400:
+ scsi_id->speed_code = SPEED_S400;
+ scsi_id->max_payload_size = MAX_PAYLOAD_S400;
+ SBP2_INFO("sbp2: SBP-2 device max speed S400 and payload 2KB");
+ break;
+ default:
+ scsi_id->speed_code = SPEED_S100;
+ scsi_id->max_payload_size = MAX_PAYLOAD_S100;
+ SBP2_ERR("sbp2: Undefined speed: Using SBP-2 device max speed S100 and payload 512 bytes");
+ break;
+ }
+
+ /*
+ * Finally, check the adapter's capabilities to further bump down our max payload size
+ * if necessary. For instance, TILynx may not support the default max payload at a
+ * particular speed.
+ */
+ if (!hpsb_read(hi->host, hi->host->node_id | LOCAL_BUS, CONFIG_ROM_NODE_OPTIONS, &node_options, 4)) {
+
+ /*
+ * Grab max_rec (max payload = 2 ^ (max_rec+1)) from node options. Sbp2 max payload is
+ * defined as 2 ^ (max_pay+2)... so, have to subtract one from max rec for comparison...
+ * confusing, eh? ;-)
+ */
+ max_rec = (be32_to_cpu(node_options) & 0x0000f000) >> 12;
+ if (scsi_id->max_payload_size > (max_rec - 1)) {
+ scsi_id->max_payload_size = (max_rec - 1);
+ SBP2_ERR("sbp2: Reducing SBP-2 max payload allowed (%x)", (max_rec - 1));
+ }
+
+ }
+
+ return(0);
+}
+
+/*
+ * This function is called in order to perform a SBP-2 agent reset.
+ */
+static int sbp2_agent_reset(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id, u32 flags)
+{
+ struct sbp2_request_packet *agent_reset_request_packet;
+
+ SBP2_DEBUG("sbp2: sbp2_agent_reset");
+
+ /*
+ * Ok, let's write to the target's management agent register
+ */
+ agent_reset_request_packet = sbp2util_allocate_write_request_packet(hi, LOCAL_BUS | scsi_id->node_id,
+ scsi_id->sbp2_command_block_agent_addr + SBP2_AGENT_RESET_OFFSET,
+ 0, ntohl(SBP2_AGENT_RESET_DATA));
+
+ if (!agent_reset_request_packet) {
+ SBP2_ERR("sbp2: sbp2util_allocate_write_request_packet failed");
+ return(-EIO);
+ }
+
+ if (!hpsb_send_packet(agent_reset_request_packet->packet)) {
+ SBP2_ERR("sbp2: hpsb_send_packet failed");
+ sbp2util_free_request_packet(agent_reset_request_packet);
+ return(-EIO);
+ }
+
+ if (!(flags & SBP2_SEND_NO_WAIT)) {
+ down(&agent_reset_request_packet->packet->state_change);
+ down(&agent_reset_request_packet->packet->state_change);
+ }
+
+ /*
+ * Need to make sure orb pointer is written on next command
+ */
+ scsi_id->last_orb = NULL;
+
+ return(0);
+
+}
+
+/*
+ * This function is called to create the actual command orb and s/g list out of the
+ * scsi command itself.
+ */
+static int sbp2_create_command_orb(struct sbp2scsi_host_info *hi,
+ struct scsi_id_instance_data *scsi_id,
+ struct sbp2_command_info *command,
+ unchar *scsi_cmd,
+ unsigned int scsi_use_sg,
+ unsigned int scsi_request_bufflen,
+ void *scsi_request_buffer, int dma_dir)
+{
+ struct scatterlist *sgpnt = (struct scatterlist *) scsi_request_buffer;
+ struct sbp2_command_orb *command_orb = &command->command_orb;
+ struct sbp2_unrestricted_page_table *scatter_gather_element =
+ &command->scatter_gather_element[0];
+ u32 sg_count, sg_len;
+ dma_addr_t sg_addr;
+ int i;
+
+ /*
+ * Set-up our command ORB..
+ *
+ * NOTE: We're doing unrestricted page tables (s/g), as this is best performance
+ * (at least with the devices I have). This means that data_size becomes the number
+ * of s/g elements, and page_size should be zero (for unrestricted).
+ */
+ command_orb->next_ORB_hi = 0xffffffff;
+ command_orb->next_ORB_lo = 0xffffffff;
+ command_orb->misc = ORB_SET_MAX_PAYLOAD(scsi_id->max_payload_size);
+ command_orb->misc |= ORB_SET_SPEED(scsi_id->speed_code);
+ command_orb->misc |= ORB_SET_NOTIFY(1); /* Notify us when complete */
+
+ /*
+ * Set-up our pagetable stuff... unfortunately, this has become messier than I'd like. Need to
+ * clean this up a bit. ;-)
+ */
+ if (sbp2scsi_direction_table[*scsi_cmd] == ORB_DIRECTION_NO_DATA_TRANSFER) {
+
+ SBP2_DEBUG("sbp2: No data transfer");
+
+ /*
+ * Handle no data transfer
+ */
+ command_orb->data_descriptor_hi = 0xffffffff;
+ command_orb->data_descriptor_lo = 0xffffffff;
+ command_orb->misc |= ORB_SET_DIRECTION(1);
+
+ } else if (scsi_use_sg) {
+
+ SBP2_DEBUG("sbp2: Use scatter/gather");
+
+ /*
+ * Special case if only one element (and less than 64KB in size)
+ */
+ if ((scsi_use_sg == 1) && (sgpnt[0].length <= SBP2_MAX_SG_ELEMENT_LENGTH)) {
+
+ SBP2_DEBUG("sbp2: Only one s/g element");
+ command->dma_dir = dma_dir;
+ command->dma_size = sgpnt[0].length;
+ command->cmd_dma = pci_map_single (hi->host->pdev, sgpnt[0].address,
+ command->dma_size,
+ command->dma_dir);
+ SBP2_DMA_ALLOC("single scatter element");
+
+ command_orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
+ command_orb->data_descriptor_lo = command->cmd_dma;
+ command_orb->misc |= ORB_SET_DATA_SIZE(command->dma_size);
+ command_orb->misc |= ORB_SET_DIRECTION(sbp2scsi_direction_table[*scsi_cmd]);
+
+ } else {
+ int count = pci_map_sg(hi->host->pdev, sgpnt, scsi_use_sg, dma_dir);
+ SBP2_DMA_ALLOC("scatter list");
+
+ command->dma_size = scsi_use_sg;
+ command->dma_dir = dma_dir;
+ command->sge_buffer = sgpnt;
+
+ /* use page tables (s/g) */
+ command_orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1);
+ command_orb->misc |= ORB_SET_DIRECTION(sbp2scsi_direction_table[*scsi_cmd]);
+ command_orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
+ command_orb->data_descriptor_lo = command->sge_dma;
+
+ /*
+ * Loop through and fill out our sbp-2 page tables
+ * (and split up anything too large)
+ */
+ for (i = 0, sg_count = 0 ; i < count; i++, sgpnt++) {
+ sg_len = sg_dma_len(sgpnt);
+ sg_addr = sg_dma_address(sgpnt);
+ while (sg_len) {
+ scatter_gather_element[sg_count].segment_base_lo = sg_addr;
+ if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) {
+ scatter_gather_element[sg_count].length_segment_base_hi =
+ PAGE_TABLE_SET_SEGMENT_LENGTH(SBP2_MAX_SG_ELEMENT_LENGTH);
+ sg_addr += SBP2_MAX_SG_ELEMENT_LENGTH;
+ sg_len -= SBP2_MAX_SG_ELEMENT_LENGTH;
+ } else {
+ scatter_gather_element[sg_count].length_segment_base_hi =
+ PAGE_TABLE_SET_SEGMENT_LENGTH(sg_len);
+ sg_len = 0;
+ }
+ sg_count++;
+ }
+ }
+
+ command_orb->misc |= ORB_SET_DATA_SIZE(sg_count); /* number of page table (s/g) elements */
+
+ /*
+ * Byte swap page tables if necessary
+ */
+ sbp2util_cpu_to_be32_buffer(scatter_gather_element,
+ (sizeof(struct sbp2_unrestricted_page_table)) * sg_count);
+
+ }
+
+ } else {
+
+ SBP2_DEBUG("sbp2: No scatter/gather");
+
+ command->dma_dir = dma_dir;
+ command->dma_size = scsi_request_bufflen;
+ command->cmd_dma = pci_map_single (hi->host->pdev, scsi_request_buffer,
+ command->dma_size,
+ command->dma_dir);
+ SBP2_DMA_ALLOC("single bulk");
+
+ /*
+ * Handle case where we get a command w/o s/g enabled (but check
+ * for transfers larger than 64K)
+ */
+ if (scsi_request_bufflen <= SBP2_MAX_SG_ELEMENT_LENGTH) {
+
+ command_orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
+ command_orb->data_descriptor_lo = command->cmd_dma;
+ command_orb->misc |= ORB_SET_DATA_SIZE(scsi_request_bufflen);
+ command_orb->misc |= ORB_SET_DIRECTION(sbp2scsi_direction_table[*scsi_cmd]);
+
+ /*
+ * Sanity, in case our direction table is not up-to-date
+ */
+ if (!scsi_request_bufflen) {
+ command_orb->data_descriptor_hi = 0xffffffff;
+ command_orb->data_descriptor_lo = 0xffffffff;
+ command_orb->misc |= ORB_SET_DIRECTION(1);
+ }
+
+ } else {
+ /*
+ * Need to turn this into page tables, since the buffer is too large.
+ */
+ command_orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
+ command_orb->data_descriptor_lo = command->sge_dma;
+ command_orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1); /* use page tables (s/g) */
+ command_orb->misc |= ORB_SET_DIRECTION(sbp2scsi_direction_table[*scsi_cmd]);
+
+ /*
+ * fill out our sbp-2 page tables (and split up the large buffer)
+ */
+ sg_count = 0;
+ sg_len = scsi_request_bufflen;
+ sg_addr = command->cmd_dma;
+ while (sg_len) {
+ scatter_gather_element[sg_count].segment_base_lo = sg_addr;
+ if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) {
+ scatter_gather_element[sg_count].length_segment_base_hi =
+ PAGE_TABLE_SET_SEGMENT_LENGTH(SBP2_MAX_SG_ELEMENT_LENGTH);
+ sg_addr += SBP2_MAX_SG_ELEMENT_LENGTH;
+ sg_len -= SBP2_MAX_SG_ELEMENT_LENGTH;
+ } else {
+ scatter_gather_element[sg_count].length_segment_base_hi =
+ PAGE_TABLE_SET_SEGMENT_LENGTH(sg_len);
+ sg_len = 0;
+ }
+ sg_count++;
+ }
+
+ command_orb->misc |= ORB_SET_DATA_SIZE(sg_count); /* number of page table (s/g) elements */
+
+ /*
+ * Byte swap page tables if necessary
+ */
+ sbp2util_cpu_to_be32_buffer(scatter_gather_element,
+ (sizeof(struct sbp2_unrestricted_page_table)) *
+ sg_count);
+
+ }
+
+ }
+
+ /*
+ * Byte swap command ORB if necessary
+ */
+ sbp2util_cpu_to_be32_buffer(command_orb, sizeof(struct sbp2_command_orb));
+
+ /*
+ * Put our scsi command in the command ORB
+ */
+ memset(command_orb->cdb, 0, 12);
+ memcpy(command_orb->cdb, scsi_cmd, COMMAND_SIZE(*scsi_cmd));
+
+ return(0);
+}
+
+/*
+ * This function is called in order to begin a regular SBP-2 command.
+ */
+static int sbp2_link_orb_command(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id,
+ struct sbp2_command_info *command)
+{
+ struct sbp2_request_packet *command_request_packet;
+ struct sbp2_command_orb *command_orb = &command->command_orb;
+
+ outstanding_orb_incr;
+ SBP2_ORB_DEBUG("sending command orb %p, linked = %x, total orbs = %x",
+ command_orb, command->linked, global_outstanding_command_orbs);
+
+ /*
+ * Check to see if there are any previous orbs to use
+ */
+ if (scsi_id->last_orb == NULL) {
+
+ /*
+ * Ok, let's write to the target's management agent register
+ */
+ if (!hi->bus_reset_in_progress) {
+
+ command_request_packet = sbp2util_allocate_write_request_packet(hi, LOCAL_BUS | scsi_id->node_id,
+ scsi_id->sbp2_command_block_agent_addr + SBP2_ORB_POINTER_OFFSET,
+ 8, 0);
+
+ if (!command_request_packet) {
+ SBP2_ERR("sbp2: sbp2util_allocate_write_request_packet failed");
+ return(-EIO);
+ }
+
+ command_request_packet->packet->data[0] = ORB_SET_NODE_ID(hi->host->node_id);
+ command_request_packet->packet->data[1] = command->command_orb_dma;
+ sbp2util_cpu_to_be32_buffer(command_request_packet->packet->data, 8);
+
+ SBP2_ORB_DEBUG("write command agent, command orb %p", command_orb);
+
+ if (!hpsb_send_packet(command_request_packet->packet)) {
+ SBP2_ERR("sbp2: hpsb_send_packet failed");
+ sbp2util_free_request_packet(command_request_packet);
+ return(-EIO);
+ }
+
+ SBP2_ORB_DEBUG("write command agent complete");
+ }
+
+ scsi_id->last_orb = command_orb;
+
+ } else {
+
+ /*
+ * We have an orb already sent (maybe or maybe not
+ * processed) that we can append this orb to. So do so,
+ * and ring the doorbell. Have to be very careful
+ * modifying these next orb pointers, as they are accessed
+ * both by the sbp2 device and us.
+ */
+ scsi_id->last_orb->next_ORB_lo = cpu_to_be32(command->command_orb_dma);
+ scsi_id->last_orb->next_ORB_hi = 0x0; /* Tells hardware that this pointer is valid */
+
+ /*
+ * Only ring the doorbell if we need to (first parts of linked orbs don't need this)
+ */
+ if (!command->linked && !hi->bus_reset_in_progress) {
+
+ command_request_packet = sbp2util_allocate_write_request_packet(hi,
+ LOCAL_BUS | scsi_id->node_id,
+ scsi_id->sbp2_command_block_agent_addr + SBP2_DOORBELL_OFFSET,
+ 0, cpu_to_be32(command->command_orb_dma));
+
+ if (!command_request_packet) {
+ SBP2_ERR("sbp2: sbp2util_allocate_write_request_packet failed");
+ return(-EIO);
+ }
+
+ SBP2_ORB_DEBUG("ring doorbell, command orb %p", command_orb);
+
+ if (!hpsb_send_packet(command_request_packet->packet)) {
+ SBP2_ERR("sbp2: hpsb_send_packet failed");
+ sbp2util_free_request_packet(command_request_packet);
+ return(-EIO);
+ }
+ }
+
+ scsi_id->last_orb = command_orb;
+
+ }
+ return(0);
+}
+
+/*
+ * This function is called in order to begin a regular SBP-2 command.
+ */
+static int sbp2_send_command(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id,
+ Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
+{
+ unchar *cmd = (unchar *) SCpnt->cmnd;
+ u32 device_type = (scsi_id->sbp2_device_type_and_lun & 0x00ff0000) >> 16;
+ struct sbp2_command_info *command;
+
+ SBP2_DEBUG("sbp2: sbp2_send_command");
+ SBP2_DEBUG("sbp2: SCSI command = %02x", *cmd);
+ SBP2_DEBUG("sbp2: SCSI transfer size = %x", SCpnt->request_bufflen);
+ SBP2_DEBUG("sbp2: SCSI s/g elements = %x", (unsigned int)SCpnt->use_sg);
+
+ /*
+ * Check for broken devices that can't handle greater than 128K transfers, and deal with them in a
+ * hacked ugly way.
+ */
+ if ((scsi_id->sbp2_firmware_revision == SBP2_128KB_BROKEN_FIRMWARE) &&
+ (SCpnt->request_bufflen > SBP2_BROKEN_FIRMWARE_MAX_TRANSFER) &&
+ (device_type == TYPE_DISK) &&
+ (SCpnt->use_sg) &&
+ (*cmd == 0x28 || *cmd == 0x2a || *cmd == 0x0a || *cmd == 0x08)) {
+
+ /*
+ * Darn, a broken device. We'll need to split up the transfer ourselves
+ */
+ sbp2_send_split_command(hi, scsi_id, SCpnt, done);
+ return(0);
+ }
+
+ /*
+ * Allocate a command orb and s/g structure
+ */
+ command = sbp2util_allocate_command_orb(scsi_id, SCpnt, done, hi);
+ if (!command) {
+ return(-EIO);
+ }
+
+ /*
+ * Now actually fill in the comamnd orb and sbp2 s/g list
+ */
+ sbp2_create_command_orb(hi, scsi_id, command, cmd, SCpnt->use_sg,
+ SCpnt->request_bufflen, SCpnt->request_buffer,
+ scsi_to_pci_dma_dir(SCpnt->sc_data_direction));
+
+ /*
+ * Update our cdb if necessary (to handle sbp2 RBC command set differences).
+ * This is where the command set hacks go! =)
+ */
+ if ((device_type == TYPE_DISK) ||
+ (device_type == TYPE_SDAD) ||
+ (device_type == TYPE_ROM)) {
+ sbp2_check_sbp2_command(command->command_orb.cdb);
+ }
+
+ /*
+ * Initialize status fifo
+ */
+ memset(&scsi_id->status_block, 0, sizeof(struct sbp2_status_block));
+
+ /*
+ * Link up the orb, and ring the doorbell if needed
+ */
+ sbp2_link_orb_command(hi, scsi_id, command);
+
+ return(0);
+}
+
+/*
+ * This function is called for broken sbp2 device, where we have to break up large transfers.
+ */
+static int sbp2_send_split_command(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id,
+ Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
+{
+ unchar *cmd = (unchar *) SCpnt->cmnd;
+ struct scatterlist *sgpnt = (struct scatterlist *) SCpnt->request_buffer;
+ struct sbp2_command_info *command;
+ unsigned int i, block_count, block_address, block_size;
+ unsigned int current_sg = 0;
+ unsigned int total_transfer = 0;
+ unsigned int total_sg = 0;
+ unchar new_cmd[12];
+
+ memset(new_cmd, 0, 12);
+ memcpy(new_cmd, cmd, COMMAND_SIZE(*cmd));
+
+ /*
+ * Turns command into 10 byte version
+ */
+ sbp2_check_sbp2_command(new_cmd);
+
+ /*
+ * Pull block size, block address, block count from command sent down
+ */
+ block_count = (cmd[7] << 8) | cmd[8];
+ block_address = (cmd[2] << 24) | (cmd[3] << 16) | (cmd[4] << 8) | cmd[5];
+ block_size = SCpnt->request_bufflen/block_count;
+
+ /*
+ * Walk the scsi s/g list to determine how much we can transfer in one pop
+ */
+ for (i=0; i<SCpnt->use_sg; i++) {
+
+ total_transfer+=sgpnt[i].length;
+ total_sg++;
+
+ if (total_transfer > SBP2_BROKEN_FIRMWARE_MAX_TRANSFER) {
+
+ /*
+ * Back everything up one, so that we're less than 128KB
+ */
+ total_transfer-=sgpnt[i].length;
+ total_sg--;
+ i--;
+
+ command = sbp2util_allocate_command_orb(scsi_id, SCpnt, done, hi);
+ if (!command) {
+ return(-EIO);
+ }
+
+ /*
+ * This is not the final piece, so mark it as linked
+ */
+ command->linked = 1;
+
+ block_count = total_transfer/block_size;
+ new_cmd[2] = (unchar) (block_address >> 24) & 0xff;
+ new_cmd[3] = (unchar) (block_address >> 16) & 0xff;
+ new_cmd[4] = (unchar) (block_address >> 8) & 0xff;
+ new_cmd[5] = (unchar) block_address & 0xff;
+ new_cmd[7] = (unchar) (block_count >> 8) & 0xff;
+ new_cmd[8] = (unchar) block_count & 0xff;
+ block_address+=block_count;
+
+ sbp2_create_command_orb(hi, scsi_id, command, new_cmd, total_sg,
+ total_transfer, &sgpnt[current_sg],
+ scsi_to_pci_dma_dir(SCpnt->sc_data_direction));
+
+ /*
+ * Link up the orb, and ring the doorbell if needed
+ */
+ memset(&scsi_id->status_block, 0, sizeof(struct sbp2_status_block));
+ sbp2_link_orb_command(hi, scsi_id, command);
+
+ current_sg += total_sg;
+ total_sg = 0;
+ total_transfer = 0;
+
+ }
+
+ }
+
+ /*
+ * Get the last piece...
+ */
+ command = sbp2util_allocate_command_orb(scsi_id, SCpnt, done, hi);
+ if (!command) {
+ return(-EIO);
+ }
+
+ block_count = total_transfer/block_size;
+ new_cmd[2] = (unchar) (block_address >> 24) & 0xff;
+ new_cmd[3] = (unchar) (block_address >> 16) & 0xff;
+ new_cmd[4] = (unchar) (block_address >> 8) & 0xff;
+ new_cmd[5] = (unchar) block_address & 0xff;
+ new_cmd[7] = (unchar) (block_count >> 8) & 0xff;
+ new_cmd[8] = (unchar) block_count & 0xff;
+
+ sbp2_create_command_orb(hi, scsi_id, command, new_cmd, total_sg,
+ total_transfer, &sgpnt[current_sg],
+ scsi_to_pci_dma_dir(SCpnt->sc_data_direction));
+
+ /*
+ * Link up the orb, and ring the doorbell if needed
+ */
+ memset(&scsi_id->status_block, 0, sizeof(struct sbp2_status_block));
+ sbp2_link_orb_command(hi, scsi_id, command);
+
+
+ return(0);
+}
+
+/*
+ * This function deals with command set differences between Linux scsi command set and sbp2 RBC
+ * command set.
+ */
+static void sbp2_check_sbp2_command(unchar *cmd)
+{
+ unchar new_cmd[16];
+
+ SBP2_DEBUG("sbp2: sbp2_check_sbp2_command");
+
+ switch (*cmd) {
+
+ case READ_6:
+
+ SBP2_DEBUG("sbp2: Convert READ_6 to READ_10");
+
+ /*
+ * Need to turn read_6 into read_10
+ */
+ new_cmd[0] = 0x28;
+ new_cmd[1] = (cmd[1] & 0xe0);
+ new_cmd[2] = 0x0;
+ new_cmd[3] = (cmd[1] & 0x1f);
+ new_cmd[4] = cmd[2];
+ new_cmd[5] = cmd[3];
+ new_cmd[6] = 0x0;
+ new_cmd[7] = 0x0;
+ new_cmd[8] = cmd[4];
+ new_cmd[9] = cmd[5];
+
+ memcpy(cmd, new_cmd, 10);
+
+ break;
+
+ case WRITE_6:
+
+ SBP2_DEBUG("sbp2: Convert WRITE_6 to WRITE_10");
+
+ /*
+ * Need to turn write_6 into write_10
+ */
+ new_cmd[0] = 0x2a;
+ new_cmd[1] = (cmd[1] & 0xe0);
+ new_cmd[2] = 0x0;
+ new_cmd[3] = (cmd[1] & 0x1f);
+ new_cmd[4] = cmd[2];
+ new_cmd[5] = cmd[3];
+ new_cmd[6] = 0x0;
+ new_cmd[7] = 0x0;
+ new_cmd[8] = cmd[4];
+ new_cmd[9] = cmd[5];
+
+ memcpy(cmd, new_cmd, 10);
+
+ break;
+
+ case MODE_SENSE:
+
+ SBP2_DEBUG("sbp2: Convert MODE_SENSE_6 to MOSE_SENSE_10");
+
+ /*
+ * Need to turn mode_sense_6 into mode_sense_10
+ */
+ new_cmd[0] = 0x5a;
+ new_cmd[1] = cmd[1];
+ new_cmd[2] = cmd[2];
+ new_cmd[3] = 0x0;
+ new_cmd[4] = 0x0;
+ new_cmd[5] = 0x0;
+ new_cmd[6] = 0x0;
+ new_cmd[7] = 0x0;
+ new_cmd[8] = cmd[4];
+ new_cmd[9] = cmd[5];
+
+ memcpy(cmd, new_cmd, 10);
+
+ break;
+
+ case MODE_SELECT:
+
+ /*
+ * TODO. Probably need to change mode select to 10 byte version
+ */
+
+ default:
+ break;
+ }
+
+ return;
+}
+
+/*
+ * Translates SBP-2 status into SCSI sense data for check conditions
+ */
+static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status, unchar *sense_data)
+{
+ SBP2_DEBUG("sbp2: sbp2_status_to_sense_data");
+
+ /*
+ * Ok, it's pretty ugly... ;-)
+ */
+ sense_data[0] = 0x70;
+ sense_data[1] = 0x0;
+ sense_data[2] = sbp2_status[9];
+ sense_data[3] = sbp2_status[12];
+ sense_data[4] = sbp2_status[13];
+ sense_data[5] = sbp2_status[14];
+ sense_data[6] = sbp2_status[15];
+ sense_data[7] = 10;
+ sense_data[8] = sbp2_status[16];
+ sense_data[9] = sbp2_status[17];
+ sense_data[10] = sbp2_status[18];
+ sense_data[11] = sbp2_status[19];
+ sense_data[12] = sbp2_status[10];
+ sense_data[13] = sbp2_status[11];
+ sense_data[14] = sbp2_status[20];
+ sense_data[15] = sbp2_status[21];
+
+ return(sbp2_status[8] & 0x3f); /* return scsi status */
+}
+
+/*
+ * This function is called after a command is completed, in order to do any necessary SBP-2
+ * response data translations for the SCSI stack
+ */
+static void sbp2_check_sbp2_response(struct sbp2scsi_host_info *hi,
+ struct scsi_id_instance_data *scsi_id,
+ Scsi_Cmnd *SCpnt)
+{
+ u8 *scsi_buf = SCpnt->request_buffer;
+ u32 device_type = (scsi_id->sbp2_device_type_and_lun & 0x00ff0000) >> 16;
+
+ SBP2_DEBUG("sbp2: sbp2_check_sbp2_response");
+
+ switch (SCpnt->cmnd[0]) {
+
+ case INQUIRY:
+
+ SBP2_DEBUG("sbp2: Check Inquiry data");
+
+ /*
+ * Check for Simple Direct Access Device and change it to TYPE_DISK
+ */
+ if ((scsi_buf[0] & 0x1f) == TYPE_SDAD) {
+ SBP2_DEBUG("sbp2: Changing TYPE_SDAD to TYPE_DISK");
+ scsi_buf[0] &= 0xe0;
+ }
+
+ /*
+ * Fix ansi revision and response data format
+ */
+ scsi_buf[2] |= 2;
+ scsi_buf[3] = (scsi_buf[3] & 0xf0) | 2;
+
+ break;
+
+ case MODE_SENSE:
+
+ if ((device_type == TYPE_DISK) ||
+ (device_type == TYPE_SDAD) ||
+ (device_type == TYPE_ROM)) {
+
+ SBP2_DEBUG("sbp2: Modify mode sense response (10 byte version)");
+
+ scsi_buf[0] = scsi_buf[1]; /* Mode data length */
+ scsi_buf[1] = scsi_buf[2]; /* Medium type */
+ scsi_buf[2] = scsi_buf[3]; /* Device specific parameter */
+ scsi_buf[3] = scsi_buf[7]; /* Block descriptor length */
+ memcpy(scsi_buf + 4, scsi_buf + 8, scsi_buf[0]);
+
+ }
+
+ break;
+
+ case MODE_SELECT:
+
+ /*
+ * TODO. Probably need to change mode select to 10 byte version
+ */
+
+ default:
+ break;
+ }
+ return;
+}
+
+/*
+ * This function deals with status writes from the SBP-2 device
+ */
+static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int destid,
+ quadlet_t *data, u64 addr, unsigned int length)
+{
+ struct sbp2scsi_host_info *hi = NULL;
+ struct scsi_id_instance_data *scsi_id = NULL;
+ int i;
+ unsigned long flags;
+ Scsi_Cmnd *SCpnt = NULL;
+ u32 scsi_status = SBP2_SCSI_STATUS_GOOD;
+ struct sbp2_command_info *command;
+
+ SBP2_DEBUG("sbp2: sbp2_handle_status_write");
+
+ if (!host) {
+ SBP2_ERR("sbp2: host is NULL - this is bad!");
+ return(RCODE_ADDRESS_ERROR);
+ }
+
+ sbp2_spin_lock(&sbp2_host_info_lock, flags);
+ hi = sbp2_find_host_info(host);
+ sbp2_spin_unlock(&sbp2_host_info_lock, flags);
+
+ if (!hi) {
+ SBP2_ERR("sbp2: host info is NULL - this is bad!");
+ return(RCODE_ADDRESS_ERROR);
+ }
+
+ sbp2_spin_lock(&hi->sbp2_command_lock, flags);
+
+ /*
+ * Find our scsi_id structure
+ */
+ for (i=0; i<SBP2SCSI_MAX_SCSI_IDS; i++) {
+ if (hi->scsi_id[i]) {
+ if (hi->scsi_id[i]->node_id == (nodeid & NODE_MASK)) {
+ scsi_id = hi->scsi_id[i];
+ SBP2_DEBUG("sbp2: SBP-2 status write from node %x", scsi_id->node_id);
+ break;
+ }
+ }
+ }
+
+ if (!scsi_id) {
+ SBP2_ERR("sbp2: scsi_id is NULL - device is gone?");
+ sbp2_spin_unlock(&hi->sbp2_command_lock, flags);
+ return(RCODE_ADDRESS_ERROR);
+ }
+
+ /*
+ * Put response into scsi_id status fifo...
+ */
+ memcpy(&scsi_id->status_block, data, length);
+
+ /*
+ * Byte swap first two quadlets (8 bytes) of status for processing
+ */
+ sbp2util_be32_to_cpu_buffer(&scsi_id->status_block, 8);
+
+ /*
+ * Handle command ORB status here if necessary. First, need to match status with command.
+ */
+ command = sbp2util_find_command_for_orb(scsi_id, scsi_id->status_block.ORB_offset_lo);
+ if (command) {
+
+ SBP2_DEBUG("sbp2: Found status for command ORB");
+
+ SBP2_ORB_DEBUG("matched command orb %p", &command->command_orb);
+ outstanding_orb_decr;
+
+ /*
+ * Matched status with command, now grab scsi command pointers and check status
+ */
+ SCpnt = command->Current_SCpnt;
+ sbp2util_mark_command_completed(scsi_id, command);
+
+ if (SCpnt && !command->linked) {
+
+ /*
+ * Handle check conditions
+ */
+ if (STATUS_GET_SBP_STATUS(scsi_id->status_block.ORB_offset_hi_misc)) {
+
+ SBP2_DEBUG("sbp2: CHECK CONDITION");
+
+ /*
+ * Translate SBP-2 status to SCSI sense data
+ */
+ scsi_status = sbp2_status_to_sense_data((unchar *)&scsi_id->status_block, SCpnt->sense_buffer);
+
+ /*
+ * Initiate a fetch agent reset.
+ */
+ sbp2_agent_reset(hi, scsi_id, SBP2_SEND_NO_WAIT);
+
+ }
+
+ SBP2_ORB_DEBUG("completing command orb %p", &command->command_orb);
+
+ /*
+ * Complete the SCSI command
+ */
+ SBP2_DEBUG("sbp2: Completing SCSI command");
+ sbp2scsi_complete_command(hi, scsi_id, scsi_status, SCpnt, command->Current_done);
+ SBP2_ORB_DEBUG("command orb completed");
+ }
+
+ /*
+ * Check here to see if there are no commands in-use. If there are none, we can
+ * null out last orb so that next time around we write directly to the orb pointer...
+ * Quick start saves one 1394 bus transaction.
+ */
+ if (list_empty(&scsi_id->sbp2_command_orb_inuse)) {
+ scsi_id->last_orb = NULL;
+ }
+
+ }
+
+ sbp2_spin_unlock(&hi->sbp2_command_lock, flags);
+ wake_up(&scsi_id->sbp2_login_wait);
+ return(RCODE_COMPLETE);
+}
+
+
+/**************************************
+ * SCSI interface related section
+ **************************************/
+
+/*
+ * This routine is the main request entry routine for doing I/O. It is
+ * called from the scsi stack directly.
+ */
+static int sbp2scsi_queuecommand (Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
+{
+ struct sbp2scsi_host_info *hi = NULL;
+ struct scsi_id_instance_data *scsi_id = NULL;
+ unsigned long flags;
+
+ SBP2_DEBUG("sbp2: sbp2scsi_queuecommand");
+
+ /*
+ * Pull our host info and scsi id instance data from the scsi command
+ */
+ hi = (struct sbp2scsi_host_info *) SCpnt->host->hostdata[0];
+
+ if (!hi) {
+ SBP2_ERR("sbp2: sbp2scsi_host_info is NULL - this is bad!");
+ SCpnt->result = DID_NO_CONNECT << 16;
+ done (SCpnt);
+ return(0);
+ }
+
+ scsi_id = hi->scsi_id[SCpnt->target];
+
+ /*
+ * Save off the command if this is the initial bus scan... so that we can
+ * complete it after we find all our sbp2 devices on the 1394 bus
+ */
+ if (!no_bus_scan && !hi->initial_scsi_bus_scan_complete) {
+ hi->bus_scan_SCpnt = SCpnt;
+ hi->bus_scan_done = done;
+ return(0);
+ }
+
+ /*
+ * If scsi_id is null, it means there is no device in this slot, so we should return
+ * selection timeout.
+ */
+ if (!scsi_id) {
+ SCpnt->result = DID_NO_CONNECT << 16;
+ done (SCpnt);
+ return(0);
+ }
+
+ /*
+ * Until we handle multiple luns, just return selection time-out to any IO directed at non-zero LUNs
+ */
+ if (SCpnt->lun) {
+ SCpnt->result = DID_NO_CONNECT << 16;
+ done (SCpnt);
+ return(0);
+ }
+
+ /*
+ * Check for request sense command, and handle it here (autorequest sense)
+ */
+ if (SCpnt->cmnd[0] == REQUEST_SENSE) {
+ SBP2_DEBUG("sbp2: REQUEST_SENSE");
+ memcpy(SCpnt->request_buffer, SCpnt->sense_buffer, SCpnt->request_bufflen);
+ memset(SCpnt->sense_buffer, 0, sizeof(SCpnt->sense_buffer));
+ sbp2scsi_complete_command(hi, scsi_id, SBP2_SCSI_STATUS_GOOD, SCpnt, done);
+ return(0);
+ }
+
+ /*
+ * Check to see if there is a command in progress and just return busy (to be queued later)
+ */
+ if (hi->bus_reset_in_progress) {
+ SBP2_ERR("sbp2: Bus reset in progress - rejecting command");
+ SCpnt->result = DID_BUS_BUSY << 16;
+ done (SCpnt);
+ return(0);
+ }
+
+ /*
+ * Try and send our SCSI command
+ */
+ sbp2_spin_lock(&hi->sbp2_command_lock, flags);
+ if (sbp2_send_command(hi, scsi_id, SCpnt, done)) {
+ SBP2_ERR("sbp2: Error sending SCSI command");
+ sbp2scsi_complete_command(hi, scsi_id, SBP2_SCSI_STATUS_SELECTION_TIMEOUT, SCpnt, done);
+ }
+ sbp2_spin_unlock(&hi->sbp2_command_lock, flags);
+
+ return(0);
+}
+
+/*
+ * This function is called in order to complete all outstanding SBP-2 commands (in case of resets, etc.).
+ */
+static void sbp2scsi_complete_all_commands(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id,
+ u32 status)
+{
+ struct list_head *lh;
+ struct sbp2_command_info *command;
+
+ SBP2_DEBUG("sbp2: sbp2_complete_all_commands");
+
+ while (!list_empty(&scsi_id->sbp2_command_orb_inuse)) {
+ SBP2_DEBUG("sbp2: Found pending command to complete");
+ lh = scsi_id->sbp2_command_orb_inuse.next;
+ command = list_entry(lh, struct sbp2_command_info, list);
+ sbp2util_mark_command_completed(scsi_id, command);
+ if (command->Current_SCpnt && !command->linked) {
+ void (*done)(Scsi_Cmnd *) = command->Current_done;
+ command->Current_SCpnt->result = status << 16;
+ done (command->Current_SCpnt);
+ }
+ }
+
+ return;
+}
+
+/*
+ * This function is called in order to complete a regular SBP-2 command.
+ */
+static void sbp2scsi_complete_command(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id, u32 scsi_status,
+ Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
+{
+ SBP2_DEBUG("sbp2: sbp2scsi_complete_command");
+
+ /*
+ * Sanity
+ */
+ if (!SCpnt) {
+ SBP2_ERR("sbp2: SCpnt is NULL");
+ return;
+ }
+
+ /*
+ * If a bus reset is in progress and there was an error, don't complete the command,
+ * just let it get retried at the end of the bus reset.
+ */
+ if ((hi->bus_reset_in_progress) && (scsi_status != SBP2_SCSI_STATUS_GOOD)) {
+ SBP2_ERR("sbp2: Bus reset in progress - retry command later");
+ return;
+ }
+
+ /*
+ * Switch on scsi status
+ */
+ switch (scsi_status) {
+ case SBP2_SCSI_STATUS_GOOD:
+ SCpnt->result = DID_OK;
+ break;
+
+ case SBP2_SCSI_STATUS_BUSY:
+ SBP2_ERR("sbp2: SBP2_SCSI_STATUS_BUSY");
+ SCpnt->result = DID_BUS_BUSY << 16;
+ break;
+
+ case SBP2_SCSI_STATUS_CHECK_CONDITION:
+ SBP2_DEBUG("sbp2: SBP2_SCSI_STATUS_CHECK_CONDITION");
+ SCpnt->result = CHECK_CONDITION << 1;
+
+ /*
+ * Debug stuff
+ */
+ print_sense("bh", SCpnt);
+
+ break;
+
+ case SBP2_SCSI_STATUS_SELECTION_TIMEOUT:
+ SBP2_ERR("sbp2: SBP2_SCSI_STATUS_SELECTION_TIMEOUT");
+ SCpnt->result = DID_NO_CONNECT << 16;
+ break;
+
+ case SBP2_SCSI_STATUS_CONDITION_MET:
+ case SBP2_SCSI_STATUS_RESERVATION_CONFLICT:
+ case SBP2_SCSI_STATUS_COMMAND_TERMINATED:
+ SBP2_ERR("sbp2: Bad SCSI status = %x", scsi_status);
+ SCpnt->result = DID_ERROR << 16;
+ break;
+
+ default:
+ SBP2_ERR("sbp2: Unsupported SCSI status = %x", scsi_status);
+ SCpnt->result = DID_ERROR << 16;
+ }
+
+ /*
+ * Take care of any sbp2 response data mucking here (RBC stuff, etc.)
+ */
+ if (SCpnt->result == DID_OK) {
+ sbp2_check_sbp2_response(hi, scsi_id, SCpnt);
+ }
+
+ /*
+ * One more quick hack (not enabled by default). Some sbp2 devices do not support
+ * mode sense. Turn-on this hack to allow the device to pass the sd driver's
+ * write-protect test (so that you can mount the device rw).
+ */
+ if (mode_sense_hack && SCpnt->result != DID_OK && SCpnt->cmnd[0] == MODE_SENSE) {
+ SBP2_INFO("sbp2: Returning success to mode sense command");
+ SCpnt->result = DID_OK;
+ SCpnt->sense_buffer[0] = 0;
+ memset (SCpnt->request_buffer, 0, 8);
+ }
+
+ /*
+ * If a bus reset is in progress and there was an error, complete the command
+ * as busy so that it will get retried.
+ */
+ if ((hi->bus_reset_in_progress) && (scsi_status != SBP2_SCSI_STATUS_GOOD)) {
+ SBP2_ERR("sbp2: Completing command with busy (bus reset)");
+ SCpnt->result = DID_BUS_BUSY << 16;
+ }
+
+ /*
+ * If a unit attention occurs, return busy status so it gets retried... it could have happened because
+ * of a 1394 bus reset or hot-plug...
+ */
+ if ((scsi_status == SBP2_SCSI_STATUS_CHECK_CONDITION) && (SCpnt->sense_buffer[2] == UNIT_ATTENTION)) {
+ SBP2_INFO("sbp2: UNIT ATTENTION - return busy");
+ SCpnt->result = DID_BUS_BUSY << 16;
+ }
+
+ /*
+ * Tell scsi stack that we're done with this command
+ */
+ done (SCpnt);
+
+ return;
+}
+
+/*
+ * Called by scsi stack when something has really gone wrong.
+ * Usually called when a command has timed-out for some reason.
+ */
+static int sbp2scsi_abort (Scsi_Cmnd *SCpnt)
+{
+ struct sbp2scsi_host_info *hi = (struct sbp2scsi_host_info *) SCpnt->host->hostdata[0];
+ struct scsi_id_instance_data *scsi_id = hi->scsi_id[SCpnt->target];
+ struct sbp2_command_info *command;
+ unsigned long flags;
+
+ SBP2_ERR("sbp2: aborting sbp2 command");
+
+ if (scsi_id) {
+
+ /*
+ * Right now, just return any matching command structures to the free pool (there may
+ * be more than one because of broken up/linked commands).
+ */
+ sbp2_spin_lock(&hi->sbp2_command_lock, flags);
+ do {
+ command = sbp2util_find_command_for_SCpnt(scsi_id, SCpnt);
+ if (command) {
+ SBP2_DEBUG("sbp2: Found command to abort");
+ sbp2util_mark_command_completed(scsi_id, command);
+ if (command->Current_SCpnt && !command->linked) {
+ void (*done)(Scsi_Cmnd *) = command->Current_done;
+ command->Current_SCpnt->result = DID_ABORT << 16;
+ done (command->Current_SCpnt);
+ }
+ }
+ } while (command);
+
+ /*
+ * Initiate a fetch agent reset.
+ */
+ sbp2_agent_reset(hi, scsi_id, SBP2_SEND_NO_WAIT);
+ sbp2scsi_complete_all_commands(hi, scsi_id, DID_BUS_BUSY);
+ sbp2_spin_unlock(&hi->sbp2_command_lock, flags);
+ }
+
+ return(SCSI_ABORT_SUCCESS);
+}
+
+/*
+ * Called by scsi stack when something has really gone wrong.
+ */
+static int sbp2scsi_reset (Scsi_Cmnd *SCpnt, unsigned int reset_flags)
+{
+ struct sbp2scsi_host_info *hi = (struct sbp2scsi_host_info *) SCpnt->host->hostdata[0];
+
+ SBP2_ERR("sbp2: reset requested");
+
+ if (hi) {
+ SBP2_ERR("sbp2: generating IEEE-1394 bus reset");
+ hpsb_reset_bus(hi->host, LONG_RESET);
+ }
+
+ return(SCSI_RESET_SUCCESS);
+}
+
+/*
+ * Called by scsi stack to get bios parameters (used by fdisk, and at boot).
+ */
+static int sbp2scsi_biosparam (Scsi_Disk *disk, kdev_t dev, int geom[])
+{
+ int heads, sectors, cylinders;
+
+ SBP2_DEBUG("sbp2: request for bios parameters");
+
+ heads = 64;
+ sectors = 32;
+ cylinders = disk->capacity / (heads * sectors);
+
+ if (cylinders > 1024) {
+ heads = 255;
+ sectors = 63;
+ cylinders = disk->capacity / (heads * sectors);
+ }
+
+ geom[0] = heads;
+ geom[1] = sectors;
+ geom[2] = cylinders;
+
+ return(0);
+}
+
+/*
+ * This routine is called at setup (init) and does nothing. Not used here. =)
+ */
+void sbp2scsi_setup( char *str, int *ints)
+{
+ SBP2_DEBUG("sbp2: sbp2scsi_setup");
+ return;
+}
+
+/*
+ * This is our detection routine, and is where we init everything.
+ */
+static int sbp2scsi_detect (Scsi_Host_Template *tpnt)
+{
+ SBP2_DEBUG("sbp2: sbp2scsi_detect");
+
+ global_scsi_tpnt = tpnt;
+
+ global_scsi_tpnt->proc_name = "sbp2";
+
+ /*
+ * Module load option for force one command at a time
+ */
+ if (serialize_io) {
+ SBP2_ERR("sbp2: Driver forced to serialize I/O (serialize_io = 1)");
+ global_scsi_tpnt->can_queue = 1;
+ global_scsi_tpnt->cmd_per_lun = 1;
+ }
+
+ /*
+ * Module load option to limit max size of requests from the scsi drivers
+ */
+ if (no_large_packets) {
+ SBP2_ERR("sbp2: Driver forced to limit max transfer size (no_large_packets = 1)");
+ global_scsi_tpnt->sg_tablesize = 0x1f;
+ global_scsi_tpnt->use_clustering = DISABLE_CLUSTERING;
+ }
+
+ if (no_bus_scan) {
+ SBP2_ERR("sbp2: Initial scsi bus scan deferred (no_bus_scan = 1)");
+ }
+
+ if (mode_sense_hack) {
+ SBP2_ERR("sbp2: Mode sense emulation enabled (mode_sense_hack = 1)");
+ }
+
+ sbp2_init();
+
+ if (!sbp2_host_count) {
+ SBP2_ERR("sbp2: Please load the lower level IEEE-1394 driver (e.g. ohci1394) before sbp2...");
+ if (sbp2_hl_handle) {
+ hpsb_unregister_highlevel(sbp2_hl_handle);
+ sbp2_hl_handle = NULL;
+ }
+ }
+
+ /*
+ * Since we are returning this count, it means that sbp2 must be loaded "after" the
+ * host adapter module...
+ */
+ return(sbp2_host_count);
+}
+
+/*
+ * This function is called from sbp2_add_host, and is where we register our scsi host
+ */
+static void sbp2scsi_register_scsi_host(struct sbp2scsi_host_info *hi)
+{
+ struct Scsi_Host *shpnt = NULL;
+
+ SBP2_DEBUG("sbp2: sbp2scsi_register_scsi_host");
+ SBP2_DEBUG("sbp2: sbp2scsi_host_info = %p", hi);
+
+ /*
+ * Let's register with the scsi stack
+ */
+ if (global_scsi_tpnt) {
+
+ shpnt = scsi_register (global_scsi_tpnt, sizeof(void *));
+
+ /*
+ * If successful, save off a context (to be used when SCSI commands are received)
+ */
+ if (shpnt) {
+ shpnt->hostdata[0] = (unsigned long)hi;
+ }
+ }
+
+ return;
+}
+
+/*
+ * Called when our module is released
+ */
+static int sbp2scsi_release(struct Scsi_Host *host)
+{
+ SBP2_DEBUG("sbp2: sbp2scsi_release");
+ sbp2_cleanup();
+ return(0);
+}
+
+/*
+ * Called for contents of procfs
+ */
+static const char *sbp2scsi_info (struct Scsi_Host *host)
+{
+ return "IEEE-1394 SBP-2 protocol driver";
+}
+
+/*
+ * Module related section
+ */
+
+MODULE_AUTHOR("James Goodwin <jamesg@filanet.com>");
+MODULE_DESCRIPTION("IEEE-1394 SBP-2 protocol driver");
+MODULE_SUPPORTED_DEVICE("sbp2");
+
+/*
+ * SCSI host template
+ */
+static Scsi_Host_Template driver_template = SBP2SCSI;
+
+#include "../scsi/scsi_module.c"
--- /dev/null
+/*
+ * sbp2.h - Defines and prototypes for sbp2.c
+ *
+ * Copyright (C) 2000 James Goodwin, Filanet Corporation (www.filanet.com)
+ * jamesg@filanet.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef SBP2_H
+#define SBP2_H
+
+#define SBP2_DEVICE_NAME "sbp2"
+#define SBP2_DEVICE_NAME_SIZE 4
+
+/*
+ * SBP2 specific structures and defines
+ */
+
+#define ORB_FMT_CMD 0x0
+#define ORB_FMT_DUMMY 0x3
+
+#define ORB_DIRECTION_WRITE_TO_MEDIA 0x0
+#define ORB_DIRECTION_READ_FROM_MEDIA 0x1
+#define ORB_DIRECTION_NO_DATA_TRANSFER 0x2
+
+#define SPEED_S100 0x0
+#define SPEED_S200 0x1
+#define SPEED_S400 0x2
+#define SPEED_S800 0x3
+#define SPEED_S1600 0x4
+#define SPEED_S3200 0x5
+
+/* 2^(MAX_PAYLOAD+2) = Maximum data transfer length */
+#define MAX_PAYLOAD_S100 0x7
+#define MAX_PAYLOAD_S200 0x8
+#define MAX_PAYLOAD_S400 0x9
+
+#define ORB_SET_NOTIFY(value) ((value & 0x1) << 31)
+#define ORB_SET_RQ_FMT(value) ((value & 0x3) << 29)
+#define ORB_SET_NODE_ID(value) ((value & 0xffff) << 16)
+
+struct sbp2_dummy_orb {
+ volatile u32 next_ORB_hi;
+ volatile u32 next_ORB_lo;
+ u32 reserved1;
+ u32 reserved2;
+ u32 notify_rq_fmt;
+ u8 command_block[12];
+};
+
+#define ORB_SET_DATA_SIZE(value) (value & 0xffff)
+#define ORB_SET_PAGE_SIZE(value) ((value & 0x7) << 16)
+#define ORB_SET_PAGE_TABLE_PRESENT(value) ((value & 0x1) << 19)
+#define ORB_SET_MAX_PAYLOAD(value) ((value & 0xf) << 20)
+#define ORB_SET_SPEED(value) ((value & 0x7) << 24)
+#define ORB_SET_DIRECTION(value) ((value & 0x1) << 27)
+
+struct sbp2_command_orb {
+ volatile u32 next_ORB_hi;
+ volatile u32 next_ORB_lo;
+ u32 data_descriptor_hi;
+ u32 data_descriptor_lo;
+ u32 misc;
+ u8 cdb[12];
+};
+
+#define LOGIN_REQUEST 0x0
+#define QUERY_LOGINS_REQUEST 0x1
+#define RECONNECT_REQUEST 0x3
+#define SET_PASSWORD_REQUEST 0x4
+#define LOGOUT_REQUEST 0x7
+#define ABORT_TASK_REQUEST 0xb
+#define ABORT_TASK_SET 0xc
+#define LOGICAL_UNIT_RESET 0xe
+#define TARGET_RESET_REQUEST 0xf
+
+#define ORB_SET_LUN(value) (value & 0xffff)
+#define ORB_SET_FUNCTION(value) ((value & 0xf) << 16)
+#define ORB_SET_RECONNECT(value) ((value & 0xf) << 20)
+#define ORB_SET_EXCLUSIVE(value) ((value & 0x1) << 28)
+#define ORB_SET_LOGIN_RESP_LENGTH(value) (value & 0xffff)
+#define ORB_SET_PASSWD_LENGTH(value) ((value & 0xffff) << 16)
+
+struct sbp2_login_orb {
+ u32 password_hi;
+ u32 password_lo;
+ u32 login_response_hi;
+ u32 login_response_lo;
+ u32 lun_misc;
+ u32 passwd_resp_lengths;
+ u32 status_FIFO_hi;
+ u32 status_FIFO_lo;
+};
+
+#define RESPONSE_GET_LOGIN_ID(value) (value & 0xffff)
+#define RESPONSE_GET_LENGTH(value) ((value >> 16) & 0xffff)
+#define RESPONSE_GET_RECONNECT_HOLD(value) (value & 0xffff)
+
+struct sbp2_login_response {
+ u32 length_login_ID;
+ u32 command_block_agent_hi;
+ u32 command_block_agent_lo;
+ u32 reconnect_hold;
+};
+
+#define ORB_SET_LOGIN_ID(value) (value & 0xffff)
+
+struct sbp2_reconnect_orb {
+ u32 reserved1;
+ u32 reserved2;
+ u32 reserved3;
+ u32 reserved4;
+ u32 login_ID_misc;
+ u32 reserved5;
+ u32 status_FIFO_hi;
+ u32 status_FIFO_lo;
+};
+
+struct sbp2_logout_orb {
+ u32 reserved1;
+ u32 reserved2;
+ u32 reserved3;
+ u32 reserved4;
+ u32 login_ID_misc;
+ u32 reserved5;
+ u32 status_FIFO_hi;
+ u32 status_FIFO_lo;
+};
+
+#define PAGE_TABLE_SET_SEGMENT_BASE_HI(value) (value & 0xffff)
+#define PAGE_TABLE_SET_SEGMENT_LENGTH(value) ((value & 0xffff) << 16)
+
+struct sbp2_unrestricted_page_table {
+ u32 length_segment_base_hi;
+ u32 segment_base_lo;
+};
+
+#define RESP_STATUS_REQUEST_COMPLETE 0x0
+#define RESP_STATUS_TRANSPORT_FAILURE 0x1
+#define RESP_STATUS_ILLEGAL_REQUEST 0x2
+#define RESP_STATUS_VENDOR_DEPENDENT 0x3
+
+#define SBP2_STATUS_NO_ADDITIONAL_INFO 0x0
+#define SBP2_STATUS_REQ_TYPE_NOT_SUPPORTED 0x1
+#define SBP2_STATUS_SPEED_NOT_SUPPORTED 0x2
+#define SBP2_STATUS_PAGE_SIZE_NOT_SUPPORTED 0x3
+#define SBP2_STATUS_ACCESS_DENIED 0x4
+#define SBP2_STATUS_LU_NOT_SUPPORTED 0x5
+#define SBP2_STATUS_MAX_PAYLOAD_TOO_SMALL 0x6
+#define SBP2_STATUS_RESOURCES_UNAVAILABLE 0x8
+#define SBP2_STATUS_FUNCTION_REJECTED 0x9
+#define SBP2_STATUS_LOGIN_ID_NOT_RECOGNIZED 0xa
+#define SBP2_STATUS_DUMMY_ORB_COMPLETED 0xb
+#define SBP2_STATUS_REQUEST_ABORTED 0xc
+#define SBP2_STATUS_UNSPECIFIED_ERROR 0xff
+
+#define SFMT_CURRENT_ERROR 0x0
+#define SFMT_DEFERRED_ERROR 0x1
+#define SFMT_VENDOR_DEPENDENT_STATUS 0x3
+
+#define SBP2_SCSI_STATUS_GOOD 0x0
+#define SBP2_SCSI_STATUS_CHECK_CONDITION 0x2
+#define SBP2_SCSI_STATUS_CONDITION_MET 0x4
+#define SBP2_SCSI_STATUS_BUSY 0x8
+#define SBP2_SCSI_STATUS_RESERVATION_CONFLICT 0x18
+#define SBP2_SCSI_STATUS_COMMAND_TERMINATED 0x22
+
+#define SBP2_SCSI_STATUS_SELECTION_TIMEOUT 0xff
+
+#define STATUS_GET_ORB_OFFSET_HI(value) (value & 0xffff)
+#define STATUS_GET_SBP_STATUS(value) ((value >> 16) & 0xff)
+#define STATUS_GET_LENGTH(value) ((value >> 24) & 0x7)
+#define STATUS_GET_DEAD_BIT(value) ((value >> 27) & 0x1)
+#define STATUS_GET_RESP(value) ((value >> 28) & 0x3)
+#define STATUS_GET_SRC(value) ((value >> 30) & 0x3)
+
+struct sbp2_status_block {
+ u32 ORB_offset_hi_misc;
+ u32 ORB_offset_lo;
+ u8 command_set_dependent[24];
+};
+
+/*
+ * Miscellaneous SBP2 related config rom defines
+ */
+
+#define SBP2_STATUS_FIFO_ADDRESS 0xfffe00000000ULL /* for write posting! */
+#define SBP2_STATUS_FIFO_ADDRESS_HI 0xfffe
+#define SBP2_STATUS_FIFO_ADDRESS_LO 0x0
+
+#define SBP2_UNIT_DIRECTORY_OFFSET_KEY 0xd1
+#define SBP2_CSR_OFFSET_KEY 0x54
+#define SBP2_UNIT_SPEC_ID_KEY 0x12
+#define SBP2_UNIT_SW_VERSION_KEY 0x13
+#define SBP2_COMMAND_SET_SPEC_ID_KEY 0x38
+#define SBP2_COMMAND_SET_KEY 0x39
+#define SBP2_UNIT_CHARACTERISTICS_KEY 0x3a
+#define SBP2_DEVICE_TYPE_AND_LUN_KEY 0x14
+#define SBP2_FIRMWARE_REVISION_KEY 0x3c
+
+#define SBP2_AGENT_STATE_OFFSET 0x00ULL
+#define SBP2_AGENT_RESET_OFFSET 0x04ULL
+#define SBP2_ORB_POINTER_OFFSET 0x08ULL
+#define SBP2_DOORBELL_OFFSET 0x10ULL
+#define SBP2_UNSOLICITED_STATUS_ENABLE_OFFSET 0x14ULL
+#define SBP2_UNSOLICITED_STATUS_VALUE 0xf
+
+#define SBP2_BUSY_TIMEOUT_ADDRESS 0xfffff0000210ULL
+#define SBP2_BUSY_TIMEOUT_VALUE 0xf
+
+#define SBP2_AGENT_RESET_DATA 0xf
+
+/*
+ * Unit spec id and sw version entry for SBP-2 devices
+ */
+
+#define SBP2_UNIT_SPEC_ID_ENTRY 0x1200609e
+#define SBP2_SW_VERSION_ENTRY 0x13010483
+
+/*
+ * Miscellaneous general config rom related defines
+ */
+
+#define CONFIG_ROM_INITIAL_MEMORY_SPACE 0xfffff0000000ULL
+
+#define CONFIG_ROM_BASE_ADDRESS 0xfffff0000400ULL
+#define CONFIG_ROM_ROOT_DIR_BASE 0xfffff0000414ULL
+#define CONFIG_ROM_NODE_UNIQUE_ID_HI_ADDRESS 0xfffff000040cULL
+#define CONFIG_ROM_NODE_UNIQUE_ID_LO_ADDRESS 0xfffff0000410ULL
+#define CONFIG_ROM_SIGNATURE_ADDRESS 0xfffff0000404ULL
+#define CONFIG_ROM_NODE_OPTIONS 0xfffff0000408ULL
+#define CONFIG_ROM_UNIT_DIRECTORY_OFFSET 0xfffff0000424ULL
+
+#define IEEE1394_CONFIG_ROM_SIGNATURE 0x31333934
+
+#define SBP2_128KB_BROKEN_FIRMWARE 0xa0b800
+#define SBP2_BROKEN_FIRMWARE_MAX_TRANSFER 0x20000
+
+/*
+ * Flags for SBP-2 functions
+ */
+#define SBP2_SEND_NO_WAIT 0x00000001
+
+/*
+ * SCSI specific stuff
+ */
+
+#define SBP2_MAX_SG_ELEMENTS SG_ALL
+#define SBP2_CLUSTERING ENABLE_CLUSTERING
+#define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000
+#define SBP2SCSI_MAX_SCSI_IDS 8
+#define SBP2SCSI_MAX_OUTSTANDING_CMDS 8 /* Max total outstanding sbp2 commands allowed at a time! */
+#define SBP2SCSI_MAX_CMDS_PER_LUN 4 /* Max outstanding sbp2 commands per device - tune as needed */
+
+#ifndef TYPE_SDAD
+#define TYPE_SDAD 0x0e /* simplified direct access device */
+#endif
+
+/*
+ * SCSI direction table... since the scsi stack doesn't specify direction... =(
+ *
+ * DIN = IN data direction
+ * DOU = OUT data direction
+ * DNO = No data transfer
+ * DUN = Unknown data direction
+ */
+#define DIN ORB_DIRECTION_READ_FROM_MEDIA
+#define DOU ORB_DIRECTION_WRITE_TO_MEDIA
+#define DNO ORB_DIRECTION_NO_DATA_TRANSFER
+#define DUN DIN
+
+static unchar sbp2scsi_direction_table[0x100] = {
+ DNO,DNO,DIN,DIN,DOU,DIN,DIN,DOU,DIN,DUN,DOU,DOU,DUN,DUN,DUN,DIN,
+ DNO,DIN,DIN,DOU,DIN,DOU,DNO,DNO,DOU,DNO,DIN,DNO,DIN,DOU,DNO,DUN,
+ DIN,DUN,DIN,DIN,DOU,DIN,DUN,DUN,DIN,DIN,DOU,DNO,DUN,DIN,DOU,DOU,
+ DOU,DOU,DOU,DNO,DIN,DNO,DNO,DIN,DOU,DOU,DOU,DOU,DIN,DOU,DIN,DOU,
+ DOU,DOU,DIN,DIN,DIN,DNO,DIN,DNO,DNO,DNO,DUN,DNO,DOU,DIN,DNO,DUN,
+ DUN,DIN,DIN,DNO,DOU,DOU,DUN,DUN,DNO,DIN,DIN,DNO,DIN,DOU,DUN,DUN,
+ DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
+ DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
+ DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
+ DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
+ DUN,DNO,DOU,DOU,DIN,DNO,DNO,DNO,DIN,DNO,DOU,DUN,DNO,DIN,DOU,DOU,
+ DOU,DOU,DOU,DNO,DUN,DIN,DOU,DIN,DIN,DIN,DNO,DNO,DNO,DIN,DIN,DUN,
+ DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
+ DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
+ DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DOU,DUN,DUN,DUN,DUN,DUN,
+ DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN
+};
+
+
+/*
+ * Scsi_Host structure
+ */
+#define SBP2SCSI { \
+ name: "IEEE1394 SBP-2", \
+ detect: sbp2scsi_detect, \
+ release: sbp2scsi_release, \
+ info: sbp2scsi_info, \
+ queuecommand: sbp2scsi_queuecommand, \
+ abort: sbp2scsi_abort, \
+ reset: sbp2scsi_reset, \
+ bios_param: sbp2scsi_biosparam, \
+ can_queue: SBP2SCSI_MAX_OUTSTANDING_CMDS, \
+ this_id: -1, \
+ sg_tablesize: SBP2_MAX_SG_ELEMENTS, \
+ cmd_per_lun: SBP2SCSI_MAX_CMDS_PER_LUN, \
+ use_clustering: SBP2_CLUSTERING, \
+ emulated: 1 \
+}
+
+/*
+ * Number of request packets available for actual sbp2 I/O requests (these are used
+ * for sending command and agent reset packets).
+ */
+#define SBP2_MAX_REQUEST_PACKETS SBP2SCSI_MAX_OUTSTANDING_CMDS /* Per host adapter instance */
+#define SBP2_MAX_COMMAND_ORBS SBP2SCSI_MAX_CMDS_PER_LUN * 2 /* Per sbp2 device instance */
+
+/*
+ * Request packets structure (used for sending command and agent reset packets)
+ */
+struct sbp2_request_packet {
+
+ struct list_head list;
+ struct hpsb_packet *packet;
+ struct tq_struct tq;
+ void *hi_context;
+
+};
+
+/*
+ * Encapsulates all the info necessary for an outstanding command.
+ */
+struct sbp2_command_info {
+
+ struct list_head list;
+ struct sbp2_command_orb command_orb;
+ dma_addr_t command_orb_dma;
+ Scsi_Cmnd *Current_SCpnt;
+ void (*Current_done)(Scsi_Cmnd *);
+ unsigned int linked;
+
+ /* Also need s/g structure for each sbp2 command */
+ struct sbp2_unrestricted_page_table scatter_gather_element[SBP2_MAX_SG_ELEMENTS];
+ dma_addr_t sge_dma;
+ void *sge_buffer;
+ dma_addr_t cmd_dma;
+ int dma_type;
+ unsigned long dma_size;
+ int dma_dir;
+
+};
+
+/*
+ * Information needed on a per scsi id basis (one for each sbp2 device)
+ */
+struct scsi_id_instance_data {
+
+ /*
+ * Various sbp2 specific structures
+ */
+ struct sbp2_command_orb *last_orb;
+ struct sbp2_login_orb *login_orb;
+ dma_addr_t login_orb_dma;
+ struct sbp2_login_response *login_response;
+ dma_addr_t login_response_dma;
+ struct sbp2_reconnect_orb *reconnect_orb;
+ dma_addr_t reconnect_orb_dma;
+ struct sbp2_logout_orb *logout_orb;
+ dma_addr_t logout_orb_dma;
+ struct sbp2_status_block status_block;
+
+ /*
+ * Stuff we need to know about the sbp2 device itself
+ */
+ u64 node_unique_id;
+ u64 sbp2_management_agent_addr;
+ u64 sbp2_command_block_agent_addr;
+ u32 node_id;
+ u32 speed_code;
+ u32 max_payload_size;
+
+ /*
+ * Values pulled from the device's unit directory
+ */
+ u32 sbp2_unit_spec_id;
+ u32 sbp2_unit_sw_version;
+ u32 sbp2_command_set_spec_id;
+ u32 sbp2_command_set;
+ u32 sbp2_unit_characteristics;
+ u32 sbp2_device_type_and_lun;
+ u32 sbp2_firmware_revision;
+
+ /*
+ * Wait queue used for logins, reconnects, logouts
+ */
+ wait_queue_head_t sbp2_login_wait;
+
+ /*
+ * Flag noting whether the sbp2 device is currently validated (for use during
+ * bus resets).
+ */
+ u32 validated;
+
+ /*
+ * Pool of command orbs, so we can have more than overlapped command per id
+ */
+ spinlock_t sbp2_command_orb_lock;
+ struct list_head sbp2_command_orb_inuse;
+ struct list_head sbp2_command_orb_completed;
+ u32 sbp2_total_command_orbs;
+
+};
+
+/*
+ * Sbp2 host data structure (one per sbp2 host)
+ */
+struct sbp2scsi_host_info {
+
+ /*
+ * For use in keeping track of hosts
+ */
+ struct list_head list;
+ struct hpsb_host *host;
+
+ /*
+ * Spin locks for command processing and packet pool management
+ */
+ spinlock_t sbp2_command_lock;
+ spinlock_t sbp2_request_packet_lock;
+
+ /*
+ * Flag indicating if a bus reset (or device detection) is in progress
+ */
+ u32 bus_reset_in_progress;
+
+ /*
+ * We currently use a kernel thread for dealing with bus resets and sbp2
+ * device detection. We use this to wake up the thread when needed.
+ */
+ wait_queue_head_t sbp2_detection_wait;
+
+ /*
+ * PID of sbp2 detection kernel thread
+ */
+ int sbp2_detection_pid;
+
+ /*
+ * Lists keeping track of inuse/free sbp2_request_packets. These structures are
+ * used for sending out sbp2 command and agent reset packets. We initially create
+ * a pool of request packets so that we don't have to do any kmallocs while in critical
+ * I/O paths.
+ */
+ struct list_head sbp2_req_inuse;
+ struct list_head sbp2_req_free;
+
+ /*
+ * Stuff to keep track of the initial scsi bus scan (so that we don't miss it)
+ */
+ u32 initial_scsi_bus_scan_complete;
+ Scsi_Cmnd *bus_scan_SCpnt;
+ void (*bus_scan_done)(Scsi_Cmnd *);
+
+ /*
+ * Here is the pool of request packets. All the hpsb packets (for 1394 bus transactions)
+ * are allocated at init and simply re-initialized when needed.
+ */
+ struct sbp2_request_packet request_packet[SBP2_MAX_REQUEST_PACKETS];
+
+ /*
+ * SCSI ID instance data (one for each sbp2 device instance possible)
+ */
+ struct scsi_id_instance_data *scsi_id[SBP2SCSI_MAX_SCSI_IDS];
+
+};
+
+/*
+ * Function prototypes
+ */
+
+/*
+ * Various utility prototypes
+ */
+static int sbp2util_read_quadlet(struct sbp2scsi_host_info *hi, nodeid_t node, u64 addr,
+ quadlet_t *buffer);
+static int sbp2util_unit_directory(struct sbp2scsi_host_info *hi, nodeid_t node, u64 *addr);
+static int sbp2util_create_request_packet_pool(struct sbp2scsi_host_info *hi);
+static void sbp2util_remove_request_packet_pool(struct sbp2scsi_host_info *hi);
+static struct sbp2_request_packet *sbp2util_allocate_write_request_packet(struct sbp2scsi_host_info *hi,
+ nodeid_t node, u64 addr,
+ size_t data_size,
+ quadlet_t data);
+static void sbp2util_free_request_packet(struct sbp2_request_packet *request_packet);
+static int sbp2util_create_command_orb_pool(struct scsi_id_instance_data *scsi_id, struct sbp2scsi_host_info *hi);
+static void sbp2util_remove_command_orb_pool(struct scsi_id_instance_data *scsi_id, struct sbp2scsi_host_info *hi);
+static struct sbp2_command_info *sbp2util_find_command_for_orb(struct scsi_id_instance_data *scsi_id, dma_addr_t orb);
+static struct sbp2_command_info *sbp2util_find_command_for_SCpnt(struct scsi_id_instance_data *scsi_id, void *SCpnt);
+static struct sbp2_command_info *sbp2util_allocate_command_orb(struct scsi_id_instance_data *scsi_id,
+ Scsi_Cmnd *Current_SCpnt,
+ void (*Current_done)(Scsi_Cmnd *),
+ struct sbp2scsi_host_info *hi);
+static void sbp2util_mark_command_completed(struct scsi_id_instance_data *scsi_id,
+ struct sbp2_command_info *command);
+
+/*
+ * IEEE-1394 core driver related prototypes
+ */
+static void sbp2_remove_unvalidated_devices(struct sbp2scsi_host_info *hi);
+static int sbp2_start_device(struct sbp2scsi_host_info *hi, int node_id);
+static int sbp2_check_device(struct sbp2scsi_host_info *hi, int node_id);
+static void sbp2_bus_reset_handler(void *context);
+static void sbp2_add_host(struct hpsb_host *host);
+static struct sbp2scsi_host_info *sbp2_find_host_info(struct hpsb_host *host);
+static void sbp2_remove_host(struct hpsb_host *host);
+static void sbp2_host_reset(struct hpsb_host *host);
+static int sbp2_detection_thread(void *__sbp2);
+int sbp2_init(void);
+void sbp2_cleanup(void);
+#if 0
+static int sbp2_handle_physdma_write(struct hpsb_host *host, int nodeid, quadlet_t *data,
+ u64 addr, unsigned int length);
+static int sbp2_handle_physdma_read(struct hpsb_host *host, int nodeid, quadlet_t *data,
+ u64 addr, unsigned int length);
+#endif
+/*
+ * SBP-2 protocol related prototypes
+ */
+static int sbp2_login_device(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id);
+static int sbp2_reconnect_device(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id);
+static int sbp2_logout_device(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id);
+static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int destid,
+ quadlet_t *data, u64 addr, unsigned int length);
+static int sbp2_agent_reset(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id, u32 flags);
+static int sbp2_create_command_orb(struct sbp2scsi_host_info *hi,
+ struct scsi_id_instance_data *scsi_id,
+ struct sbp2_command_info *command,
+ unchar *scsi_cmd,
+ unsigned int scsi_use_sg,
+ unsigned int scsi_request_bufflen,
+ void *scsi_request_buffer, int dma_dir);
+static int sbp2_link_orb_command(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id,
+ struct sbp2_command_info *command);
+static int sbp2_send_command(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id,
+ Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *));
+static int sbp2_send_split_command(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id,
+ Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *));
+static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status, unchar *sense_data);
+static void sbp2_check_sbp2_command(unchar *cmd);
+static void sbp2_check_sbp2_response(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id,
+ Scsi_Cmnd *SCpnt);
+static int sbp2_parse_unit_directory(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id);
+static int sbp2_set_busy_timeout(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id);
+static int sbp2_max_speed_and_size(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id);
+
+/*
+ * Scsi interface related prototypes
+ */
+static const char *sbp2scsi_info (struct Scsi_Host *host);
+static int sbp2scsi_detect (Scsi_Host_Template *tpnt);
+void sbp2scsi_setup(char *str, int *ints);
+static int sbp2scsi_biosparam (Scsi_Disk *disk, kdev_t dev, int geom[]);
+static int sbp2scsi_abort (Scsi_Cmnd *SCpnt);
+static int sbp2scsi_reset (Scsi_Cmnd *SCpnt, unsigned int reset_flags);
+static int sbp2scsi_queuecommand (Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *));
+static void sbp2scsi_complete_all_commands(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id,
+ u32 status);
+static void sbp2scsi_complete_command(struct sbp2scsi_host_info *hi, struct scsi_id_instance_data *scsi_id,
+ u32 scsi_status, Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *));
+static void sbp2scsi_register_scsi_host(struct sbp2scsi_host_info *hi);
+
+#endif /* SBP2_H */
/*
* video1394.c - video driver for OHCI 1394 boards
* Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
+ * Peter Schlaile <udbz@rz.uni-karlsruhe.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
#include <linux/config.h>
#include <linux/kernel.h>
+#include <linux/list.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/wait.h>
#include <linux/errno.h>
#include <linux/module.h>
+#include <linux/init.h>
#include <linux/pci.h>
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/proc_fs.h>
#include <linux/tqueue.h>
#include <linux/delay.h>
+#include <linux/devfs_fs_kernel.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include "ieee1394_types.h"
#include "hosts.h"
#include "ieee1394_core.h"
+#include "highlevel.h"
#include "video1394.h"
#include "ohci1394.h"
int ctx;
int channel;
int last_buffer;
+ int * next_buffer; /* For ISO Transmit of video packets
+ to write the correct SYT field
+ into the next block */
unsigned int num_desc;
unsigned int buf_size;
unsigned int frame_size;
struct dma_cmd **ir_prg;
struct it_dma_prg **it_prg;
unsigned int *buffer_status;
+ unsigned int *last_used_cmd; /* For ISO Transmit with
+ variable sized packets only ! */
int ctrlClear;
int ctrlSet;
int cmdPtr;
int ctxMatch;
wait_queue_head_t waitq;
spinlock_t lock;
+ int flags;
};
struct video_card {
struct ti_ohci *ohci;
+ struct list_head list;
+ int id;
+ devfs_handle_t devfs;
struct dma_iso_ctx **ir_context;
struct dma_iso_ctx **it_context;
void irq_handler(int card, quadlet_t isoRecvIntEvent,
quadlet_t isoXmitIntEvent);
-static struct video_card video_cards[MAX_OHCI1394_CARDS];
-static int num_of_video_cards = 0;
+LIST_HEAD(video1394_cards);
+static spinlock_t video1394_cards_lock = SPIN_LOCK_UNLOCKED;
+
+static devfs_handle_t devfs_handle;
+static struct hpsb_highlevel *hl_handle = NULL;
+
static struct video_template video_tmpl = { irq_handler };
-/* Taken from bttv.c */
+/* Code taken from bttv.c */
+
/*******************************/
/* Memory management functions */
/*******************************/
*
* The code used to assume that the kernel vmalloc mappings
* existed in the page tables of every process, this is simply
- * not guarenteed. We now use pgd_offset_k which is the
+ * not guaranteed. We now use pgd_offset_k which is the
* defined way to get at the kernel page tables.
*/
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0)
-#define page_address(x) (x)
-#endif
-
/* Given PGD from the address space's page table, return the kernel
* virtual mapping of the physical memory mapped at ADR.
*/
ptep = pte_offset(pmd, adr);
pte = *ptep;
if(pte_present(pte)) {
- ret = (unsigned long) page_address(pte_page(pte));
+ ret = (unsigned long)
+ page_address(pte_page(pte));
ret |= (adr & (PAGE_SIZE - 1));
}
}
vfree(mem);
}
}
+/* End of code taken from bttv.c */
static int free_dma_iso_ctx(struct dma_iso_ctx **d)
{
if ((*d)->buffer_status)
kfree((*d)->buffer_status);
-
+ if ((*d)->last_used_cmd)
+ kfree((*d)->last_used_cmd);
+ if ((*d)->next_buffer)
+ kfree((*d)->next_buffer);
+
kfree(*d);
*d = NULL;
d = (struct dma_iso_ctx *)kmalloc(sizeof(struct dma_iso_ctx),
GFP_KERNEL);
if (d==NULL) {
- PRINT(KERN_ERR, ohci->id, "failed to allocate dma_iso_ctx");
+ PRINT(KERN_ERR, ohci->id, "Failed to allocate dma_iso_ctx");
return NULL;
}
d->buf = rvmalloc(d->num_desc * d->buf_size);
if (d->buf == NULL) {
- PRINT(KERN_ERR, ohci->id, "failed to allocate dma buffer");
+ PRINT(KERN_ERR, ohci->id, "Failed to allocate dma buffer");
free_dma_iso_ctx(&d);
return NULL;
}
if (d->ir_prg == NULL) {
PRINT(KERN_ERR, ohci->id,
- "failed to allocate dma ir prg");
+ "Failed to allocate dma ir prg");
free_dma_iso_ctx(&d);
return NULL;
}
GFP_KERNEL);
if (d->ir_prg[i] == NULL) {
PRINT(KERN_ERR, ohci->id,
- "failed to allocate dma ir prg");
+ "Failed to allocate dma ir prg");
free_dma_iso_ctx(&d);
return NULL;
}
if (d->it_prg == NULL) {
PRINT(KERN_ERR, ohci->id,
- "failed to allocate dma it prg");
+ "Failed to allocate dma it prg");
free_dma_iso_ctx(&d);
return NULL;
}
if (PAGE_SIZE % packet_size || packet_size>4096) {
PRINT(KERN_ERR, ohci->id,
- "Packet size %d not yet supported\n",
- packet_size);
+ "Packet size %d (page_size: %ld) "
+ "not yet supported\n",
+ packet_size, PAGE_SIZE);
free_dma_iso_ctx(&d);
return NULL;
}
GFP_KERNEL);
if (d->it_prg[i] == NULL) {
PRINT(KERN_ERR, ohci->id,
- "failed to allocate dma it prg");
+ "Failed to allocate dma it prg");
free_dma_iso_ctx(&d);
return NULL;
}
d->buffer_status = kmalloc(d->num_desc * sizeof(unsigned int),
GFP_KERNEL);
+ d->last_used_cmd = kmalloc(d->num_desc * sizeof(unsigned int),
+ GFP_KERNEL);
+ d->next_buffer = kmalloc(d->num_desc * sizeof(int),
+ GFP_KERNEL);
if (d->buffer_status == NULL) {
- PRINT(KERN_ERR, ohci->id, "failed to allocate dma ir prg");
+ PRINT(KERN_ERR, ohci->id, "Failed to allocate buffer_status");
+ free_dma_iso_ctx(&d);
+ return NULL;
+ }
+ if (d->last_used_cmd == NULL) {
+ PRINT(KERN_ERR, ohci->id, "Failed to allocate last_used_cmd");
+ free_dma_iso_ctx(&d);
+ return NULL;
+ }
+ if (d->next_buffer == NULL) {
+ PRINT(KERN_ERR, ohci->id, "Failed to allocate next_buffer");
free_dma_iso_ctx(&d);
return NULL;
}
memset(d->buffer_status, 0, d->num_desc * sizeof(unsigned int));
+ memset(d->last_used_cmd, 0, d->num_desc * sizeof(unsigned int));
+ memset(d->next_buffer, -1, d->num_desc * sizeof(int));
spin_lock_init(&d->lock);
struct ti_ohci *ohci = (struct ti_ohci *)d->ohci;
int i;
+ d->flags = flags;
+
ohci1394_stop_context(ohci, d->ctrlClear, NULL);
for (i=0;i<d->num_desc;i++) {
return i;
}
- PRINT(KERN_ERR, ohci->id,
- "no iso context is listening to channel %d",
+ PRINT(KERN_ERR, ohci->id, "No iso context is listening to channel %d",
channel);
+
return -1;
}
return i;
}
- PRINT(KERN_ERR, ohci->id,
- "no iso context is talking to channel %d",
+ PRINT(KERN_ERR, ohci->id, "No iso context is talking to channel %d",
channel);
+
return -1;
}
return 0;
}
+static inline void put_timestamp(struct ti_ohci *ohci, struct dma_iso_ctx * d,
+ int curr, int n)
+{
+ unsigned char* buf = d->buf + n * d->buf_size;
+ u32 cycleTimer;
+ u32 timeStamp;
+
+ if (n == -1) {
+ return;
+ }
+
+ cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
+
+ timeStamp = ((cycleTimer & 0x0fff) + 11059);
+ timeStamp = (timeStamp % 3072 + ((timeStamp / 3072) << 12)
+ + (cycleTimer & 0xf000)) & 0xffff;
+
+ buf[6] = timeStamp >> 8;
+ buf[7] = timeStamp & 0xff;
+
+#if 0
+ printk("curr: %d, next: %d, cycleTimer: %08x timeStamp: %08x\n",
+ curr, n, cycleTimer, timeStamp);
+#endif
+}
+
int wakeup_dma_it_ctx(struct ti_ohci *ohci, struct dma_iso_ctx *d)
{
int i;
spin_lock(&d->lock);
for (i=0;i<d->num_desc;i++) {
- if (d->it_prg[i][d->nb_cmd-1].end.status & 0xFFFF0000) {
- d->it_prg[i][d->nb_cmd-1].end.status = 0;
+ if (d->it_prg[i][d->last_used_cmd[i]].end.status& 0xFFFF0000) {
+ int next = d->next_buffer[i];
+ put_timestamp(ohci, d, i, next);
+ d->it_prg[i][d->last_used_cmd[i]].end.status = 0;
d->buffer_status[i] = VIDEO1394_BUFFER_READY;
}
}
struct it_dma_prg *it_prg = d->it_prg[n];
unsigned long buf = (unsigned long)d->buf+n*d->buf_size;
int i;
-
+ d->last_used_cmd[n] = d->nb_cmd - 1;
for (i=0;i<d->nb_cmd;i++) {
it_prg[i].begin.control = OUTPUT_MORE_IMMEDIATE | 8 ;
it_prg[i].begin.status = 0;
- /* FIXME: what is the tag value + speed selection */
it_prg[i].data[0] =
- (DMA_SPEED_400<<16) | (d->channel<<8) | 0xa0;
+ (DMA_SPEED_100 << 16)
+ | (/* tag */ 1 << 14)
+ | (d->channel << 8)
+ | (TCODE_ISO_DATA << 4);
if (i==0) it_prg[i].data[0] |= sync_tag;
it_prg[i].data[1] = d->packet_size << 16;
it_prg[i].data[2] = 0;
}
}
-static void initialize_dma_it_ctx(struct dma_iso_ctx *d, int sync_tag)
+static void initialize_dma_it_prg_var_packet_queue(
+ struct dma_iso_ctx *d, int n, unsigned int * packet_sizes,
+ struct ti_ohci *ohci)
+{
+ struct it_dma_prg *it_prg = d->it_prg[n];
+ int i;
+
+#if 0
+ if (n != -1) {
+ put_timestamp(ohci, d, n);
+ }
+#endif
+ d->last_used_cmd[n] = d->nb_cmd - 1;
+
+ for (i = 0; i < d->nb_cmd; i++) {
+ unsigned int size;
+ if (packet_sizes[i] > d->packet_size) {
+ size = d->packet_size;
+ } else {
+ size = packet_sizes[i];
+ }
+ it_prg[i].data[1] = size << 16;
+ it_prg[i].end.control = 0x100c0000;
+
+ if (i < d->nb_cmd-1 && packet_sizes[i+1] != 0) {
+ it_prg[i].end.control |= size;
+ it_prg[i].begin.branchAddress =
+ (virt_to_bus(&(it_prg[i+1].begin.control))
+ & 0xfffffff0) | 0x3;
+ it_prg[i].end.branchAddress =
+ (virt_to_bus(&(it_prg[i+1].begin.control))
+ & 0xfffffff0) | 0x3;
+ } else {
+ /* the last prg generates an interrupt */
+ it_prg[i].end.control |= 0x08300000 | size;
+ /* the last prg doesn't branch */
+ it_prg[i].begin.branchAddress = 0;
+ it_prg[i].end.branchAddress = 0;
+ d->last_used_cmd[n] = i;
+ break;
+ }
+ }
+}
+
+static void initialize_dma_it_ctx(struct dma_iso_ctx *d, int sync_tag,
+ int flags)
{
struct ti_ohci *ohci = (struct ti_ohci *)d->ohci;
int i;
+ d->flags = flags;
+
ohci1394_stop_context(ohci, d->ctrlClear, NULL);
for (i=0;i<d->num_desc;i++)
static int video1394_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
{
- struct video_card *video = &video_cards[MINOR(inode->i_rdev)];
+ struct video_card *video = NULL;
struct ti_ohci *ohci= video->ohci;
unsigned long flags;
+ struct list_head *lh;
+
+ spin_lock_irqsave(&video1394_cards_lock, flags);
+ if (!list_empty(&video1394_cards)) {
+ struct video_card *p;
+ list_for_each(lh, &video1394_cards) {
+ p = list_entry(lh, struct video_card, list);
+ if (p->id == MINOR(inode->i_rdev)) {
+ video = p;
+ break;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&video1394_cards_lock, flags);
+
+ if (video == NULL) {
+ PRINT_G(KERN_ERR, __FUNCTION__": Unknown video card for minor %d", MINOR(inode->i_rdev));
+ return -EFAULT;
+ }
switch(cmd)
{
return -EFAULT;
if (v.channel<0 || v.channel>(ISO_CHANNELS-1)) {
PRINT(KERN_ERR, ohci->id,
- "iso channel %d out of bound", v.channel);
+ "Iso channel %d out of bound", v.channel);
return -EFAULT;
}
mask = (u64)0x1<<v.channel;
(u32)(ohci->ISO_channel_usage&0xffffffff));
if (ohci->ISO_channel_usage & mask) {
PRINT(KERN_ERR, ohci->id,
- "channel %d is already taken", v.channel);
+ "Channel %d is already taken", v.channel);
return -EFAULT;
}
ohci->ISO_channel_usage |= mask;
if (i==(ohci->nb_iso_rcv_ctx-1)) {
PRINT(KERN_ERR, ohci->id,
- "no iso context available");
+ "No iso context available");
return -EFAULT;
}
if (i==ohci->nb_iso_xmit_ctx) {
PRINT(KERN_ERR, ohci->id,
- "no iso context available");
+ "No iso context available");
return -EFAULT;
}
return -EFAULT;
}
initialize_dma_it_ctx(video->it_context[i],
- v.sync_tag);
+ v.sync_tag, v.flags);
video->current_ctx = video->it_context[i];
v.buf_size = video->it_context[i]->buf_size;
PRINT(KERN_INFO, ohci->id,
- "iso context %d talk on channel %d", i,
+ "Iso context %d talk on channel %d", i,
v.channel);
}
if (channel<0 || channel>(ISO_CHANNELS-1)) {
PRINT(KERN_ERR, ohci->id,
- "iso channel %d out of bound", channel);
+ "Iso channel %d out of bound", channel);
return -EFAULT;
}
mask = (u64)0x1<<channel;
if (!(ohci->ISO_channel_usage & mask)) {
PRINT(KERN_ERR, ohci->id,
- "channel %d is not being used", channel);
+ "Channel %d is not being used", channel);
return -EFAULT;
}
ohci->ISO_channel_usage &= ~mask;
free_dma_iso_ctx(&video->ir_context[i]);
PRINT(KERN_INFO, ohci->id,
- "iso context %d stop listening on channel %d",
+ "Iso context %d stop listening on channel %d",
i+1, channel);
}
else {
free_dma_iso_ctx(&video->it_context[i]);
PRINT(KERN_INFO, ohci->id,
- "iso context %d stop talking on channel %d",
+ "Iso context %d stop talking on channel %d",
i, channel);
}
if ((v.buffer<0) || (v.buffer>d->num_desc)) {
PRINT(KERN_ERR, ohci->id,
- "buffer %d out of range",v.buffer);
+ "Buffer %d out of range",v.buffer);
return -EFAULT;
}
if (d->buffer_status[v.buffer]==VIDEO1394_BUFFER_QUEUED) {
PRINT(KERN_ERR, ohci->id,
- "buffer %d is already used",v.buffer);
+ "Buffer %d is already used",v.buffer);
spin_unlock_irqrestore(&d->lock,flags);
return -EFAULT;
}
if ((v.buffer<0) || (v.buffer>d->num_desc)) {
PRINT(KERN_ERR, ohci->id,
- "buffer %d out of range",v.buffer);
+ "Buffer %d out of range",v.buffer);
return -EFAULT;
}
break;
default:
PRINT(KERN_ERR, ohci->id,
- "buffer %d is not queued",v.buffer);
+ "Buffer %d is not queued",v.buffer);
spin_unlock_irqrestore(&d->lock, flags);
return -EFAULT;
}
case VIDEO1394_TALK_QUEUE_BUFFER:
{
struct video1394_wait v;
+ struct video1394_queue_variable qv;
struct dma_iso_ctx *d;
int i;
if ((v.buffer<0) || (v.buffer>d->num_desc)) {
PRINT(KERN_ERR, ohci->id,
- "buffer %d out of range",v.buffer);
+ "Buffer %d out of range",v.buffer);
return -EFAULT;
}
+ if (d->flags & VIDEO1394_VARIABLE_PACKET_SIZE) {
+ if (copy_from_user(&qv, (void *)arg, sizeof(qv)))
+ return -EFAULT;
+ if (!access_ok(VERIFY_READ, qv.packet_sizes,
+ d->nb_cmd * sizeof(unsigned int))) {
+ return -EFAULT;
+ }
+ }
+
spin_lock_irqsave(&d->lock,flags);
if (d->buffer_status[v.buffer]!=VIDEO1394_BUFFER_FREE) {
PRINT(KERN_ERR, ohci->id,
- "buffer %d is already used",v.buffer);
+ "Buffer %d is already used",v.buffer);
spin_unlock_irqrestore(&d->lock,flags);
return -EFAULT;
}
+ if (d->flags & VIDEO1394_VARIABLE_PACKET_SIZE) {
+ initialize_dma_it_prg_var_packet_queue(
+ d, v.buffer, qv.packet_sizes,
+ ohci);
+ }
+
d->buffer_status[v.buffer]=VIDEO1394_BUFFER_QUEUED;
if (d->last_buffer>=0) {
d->it_prg[d->last_buffer]
- [d->nb_cmd-1].end.branchAddress =
+ [ d->last_used_cmd[d->last_buffer]
+ ].end.branchAddress =
(virt_to_bus(&(d->it_prg[v.buffer][0].begin.control))
& 0xfffffff0) | 0x3;
d->it_prg[d->last_buffer]
- [d->nb_cmd-1].begin.branchAddress =
+ [d->last_used_cmd[d->last_buffer]
+ ].begin.branchAddress =
(virt_to_bus(&(d->it_prg[v.buffer][0].begin.control))
& 0xfffffff0) | 0x3;
+ d->next_buffer[d->last_buffer] = v.buffer;
}
d->last_buffer = v.buffer;
+ d->next_buffer[d->last_buffer] = -1;
- d->it_prg[d->last_buffer][d->nb_cmd-1].end.branchAddress = 0;
+ d->it_prg[d->last_buffer][d->last_used_cmd[d->last_buffer]].end.branchAddress = 0;
spin_unlock_irqrestore(&d->lock,flags);
if ((v.buffer<0) || (v.buffer>d->num_desc)) {
PRINT(KERN_ERR, ohci->id,
- "buffer %d out of range",v.buffer);
+ "Buffer %d out of range",v.buffer);
return -EFAULT;
}
return 0;
default:
PRINT(KERN_ERR, ohci->id,
- "buffer %d is not queued",v.buffer);
+ "Buffer %d is not queued",v.buffer);
return -EFAULT;
}
}
int video1394_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct video_card *video =
- &video_cards[MINOR(file->f_dentry->d_inode->i_rdev)];
+ struct video_card *video = NULL;
struct ti_ohci *ohci;
- int res = -EINVAL;
+ int res = -EINVAL, flags;
+ struct list_head *lh;
+
+ spin_lock_irqsave(&video1394_cards_lock, flags);
+ if (!list_empty(&video1394_cards)) {
+ struct video_card *p;
+ list_for_each(lh, &video1394_cards) {
+ p = list_entry(lh, struct video_card, list);
+ if (p->id == MINOR(file->f_dentry->d_inode->i_rdev)) {
+ video = p;
+ break;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&video1394_cards_lock, flags);
+
+ if (video == NULL) {
+ PRINT_G(KERN_ERR, __FUNCTION__": Unknown video card for minor %d",
+ MINOR(file->f_dentry->d_inode->i_rdev));
+ return -EFAULT;
+ }
lock_kernel();
ohci = video->ohci;
- PRINT(KERN_INFO, ohci->id, "mmap");
+
if (video->current_ctx == NULL) {
- PRINT(KERN_ERR, ohci->id, "current iso context not set");
+ PRINT(KERN_ERR, ohci->id, "Current iso context not set");
} else
res = do_iso_mmap(ohci, video->current_ctx,
(char *)vma->vm_start,
static int video1394_open(struct inode *inode, struct file *file)
{
- int i = MINOR(inode->i_rdev);
-
- if (i<0 || i>=num_of_video_cards) {
- PRINT(KERN_ERR, i, "ohci card %d not found", i);
- return -EIO;
+ int i = MINOR(inode->i_rdev), flags;
+ struct video_card *video = NULL;
+ struct list_head *lh;
+
+ spin_lock_irqsave(&video1394_cards_lock, flags);
+ if (!list_empty(&video1394_cards)) {
+ struct video_card *p;
+ list_for_each(lh, &video1394_cards) {
+ p = list_entry(lh, struct video_card, list);
+ if (p->id == i) {
+ video = p;
+ break;
+ }
+ }
}
+ spin_unlock_irqrestore(&video1394_cards_lock, flags);
- V22_COMPAT_MOD_INC_USE_COUNT;
+ if (video == NULL)
+ return -EIO;
- PRINT(KERN_INFO, i, "open");
+ MOD_INC_USE_COUNT;
return 0;
}
static int video1394_release(struct inode *inode, struct file *file)
{
- struct video_card *video = &video_cards[MINOR(inode->i_rdev)];
- struct ti_ohci *ohci= video->ohci;
+ struct video_card *video = NULL;
+ struct ti_ohci *ohci;
u64 mask;
- int i;
+ int i, flags;
+ struct list_head *lh;
+
+ spin_lock_irqsave(&video1394_cards_lock, flags);
+ if (!list_empty(&video1394_cards)) {
+ struct video_card *p;
+ list_for_each(lh, &video1394_cards) {
+ p = list_entry(lh, struct video_card, list);
+ if (p->id == MINOR(inode->i_rdev)) {
+ video = p;
+ break;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&video1394_cards_lock, flags);
+
+ if (video == NULL) {
+ PRINT_G(KERN_ERR, __FUNCTION__": Unknown device for minor %d",
+ MINOR(inode->i_rdev));
+ return 1;
+ }
+
+ ohci = video->ohci;
lock_kernel();
for (i=0;i<ohci->nb_iso_rcv_ctx-1;i++)
mask = (u64)0x1<<video->ir_context[i]->channel;
if (!(ohci->ISO_channel_usage & mask))
PRINT(KERN_ERR, ohci->id,
- "channel %d is not being used",
+ "Channel %d is not being used",
video->ir_context[i]->channel);
else
ohci->ISO_channel_usage &= ~mask;
PRINT(KERN_INFO, ohci->id,
- "iso receive context %d stop listening "
+ "Iso receive context %d stop listening "
"on channel %d", i+1,
video->ir_context[i]->channel);
free_dma_iso_ctx(&video->ir_context[i]);
mask = (u64)0x1<<video->it_context[i]->channel;
if (!(ohci->ISO_channel_usage & mask))
PRINT(KERN_ERR, ohci->id,
- "channel %d is not being used",
+ "Channel %d is not being used",
video->it_context[i]->channel);
else
ohci->ISO_channel_usage &= ~mask;
PRINT(KERN_INFO, ohci->id,
- "iso transmit context %d stop talking "
+ "Iso transmit context %d stop talking "
"on channel %d", i+1,
video->it_context[i]->channel);
free_dma_iso_ctx(&video->it_context[i]);
}
+ MOD_DEC_USE_COUNT;
- V22_COMPAT_MOD_DEC_USE_COUNT;
-
- PRINT(KERN_INFO, ohci->id, "release");
unlock_kernel();
return 0;
}
void irq_handler(int card, quadlet_t isoRecvIntEvent,
quadlet_t isoXmitIntEvent)
{
- int i;
- struct video_card *video = &video_cards[card];
+ int i, flags;
+ struct video_card *video = NULL;
+ struct list_head *lh;
+
+ spin_lock_irqsave(&video1394_cards_lock, flags);
+ if (!list_empty(&video1394_cards)) {
+ struct video_card *p;
+ list_for_each(lh, &video1394_cards) {
+ p = list_entry(lh, struct video_card, list);
+ if (p->id == card) {
+ video = p;
+ break;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&video1394_cards_lock, flags);
+
+ if (video == NULL) {
+ PRINT_G(KERN_ERR, __FUNCTION__": Unknown card number %d!!",
+ card);
+ return;
+ }
DBGMSG(card, "Iso event Recv: %08x Xmit: %08x",
isoRecvIntEvent, isoXmitIntEvent);
static struct file_operations video1394_fops=
{
- OWNER_THIS_MODULE
+ owner: THIS_MODULE,
ioctl: video1394_ioctl,
mmap: video1394_mmap,
open: video1394_open,
release: video1394_release
};
-static int video1394_init(int i, struct ti_ohci *ohci)
+static int video1394_init(struct ti_ohci *ohci)
{
- struct video_card *video = &video_cards[i];
+ struct video_card *video = kmalloc(sizeof(struct video_card), GFP_KERNEL);
+ int flags;
+ char name[16];
+
+ if (video == NULL) {
+ PRINT(KERN_ERR, ohci->id, "Cannot allocate video_card");
+ return -1;
+ }
+
+ memset(video, 0, sizeof(struct video_card));
+
+ spin_lock_irqsave(&video1394_cards_lock, flags);
+ INIT_LIST_HEAD(&video->list);
+ list_add_tail(&video->list, &video1394_cards);
+ spin_unlock_irqrestore(&video1394_cards_lock, flags);
if (ohci1394_register_video(ohci, &video_tmpl)<0) {
- PRINT(KERN_ERR, i, "register_video failed");
+ PRINT(KERN_ERR, ohci->id, "Register_video failed");
return -1;
}
+ video->id = ohci->id;
video->ohci = ohci;
/* Iso receive dma contexts */
return -1;
}
+ sprintf(name, "%d", video->id);
+ video->devfs = devfs_register(devfs_handle, name,
+ DEVFS_FL_AUTO_OWNER,
+ VIDEO1394_MAJOR, 0,
+ S_IFCHR | S_IRUSR | S_IWUSR,
+ &video1394_fops, NULL);
+
return 0;
}
+/* Must be called under spinlock */
static void remove_card(struct video_card *video)
{
- int i;
+ int i, flags;
ohci1394_unregister_video(video->ohci, &video_tmpl);
+ devfs_unregister(video->devfs);
+
/* Free the iso receive contexts */
if (video->ir_context) {
for (i=0;i<video->ohci->nb_iso_rcv_ctx-1;i++) {
}
kfree(video->it_context);
}
+ spin_lock_irqsave(&video1394_cards_lock, flags);
+ list_del(&video->list);
+ spin_unlock_irqrestore(&video1394_cards_lock, flags);
+
+ kfree(video);
}
-#ifdef MODULE
+static void video1394_remove_host (struct hpsb_host *host)
+{
+ struct ti_ohci *ohci;
+ int flags;
+ struct list_head *lh;
+
+ /* We only work with the OHCI-1394 driver */
+ if (strcmp(host->template->name, OHCI1394_DRIVER_NAME))
+ return;
+
+ ohci = (struct ti_ohci *)host->hostdata;
+
+ spin_lock_irqsave(&video1394_cards_lock, flags);
+ if (!list_empty(&video1394_cards)) {
+ struct video_card *p;
+ list_for_each(lh, &video1394_cards) {
+ p = list_entry(lh, struct video_card, list);
+ if (p ->ohci == ohci) {
+ remove_card(p);
+ return;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&video1394_cards_lock, flags);
+
+ return;
+}
+
+static void video1394_add_host (struct hpsb_host *host)
+{
+ struct ti_ohci *ohci;
-/* EXPORT_NO_SYMBOLS; */
+ /* We only work with the OHCI-1394 driver */
+ if (strcmp(host->template->name, OHCI1394_DRIVER_NAME))
+ return;
+
+ ohci = (struct ti_ohci *)host->hostdata;
+
+ video1394_init(ohci);
+
+ return;
+}
+
+static struct hpsb_highlevel_ops hl_ops = {
+ add_host: video1394_add_host,
+ remove_host: video1394_remove_host,
+};
MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
MODULE_DESCRIPTION("driver for digital video on OHCI board");
MODULE_SUPPORTED_DEVICE("video1394");
-void cleanup_module(void)
+static void __exit video1394_exit_module (void)
{
- int i;
- unregister_chrdev(VIDEO1394_MAJOR, VIDEO1394_DRIVER_NAME);
+ hpsb_unregister_highlevel (hl_handle);
- for (i=0; i<num_of_video_cards; i++)
- remove_card(&video_cards[i]);
+ devfs_unregister(devfs_handle);
+ devfs_unregister_chrdev(VIDEO1394_MAJOR, VIDEO1394_DRIVER_NAME);
- printk(KERN_INFO "removed " VIDEO1394_DRIVER_NAME " module\n");
+ PRINT_G(KERN_INFO, "Removed " VIDEO1394_DRIVER_NAME " module\n");
}
-int init_module(void)
+static int __init video1394_init_module (void)
{
- struct ti_ohci *ohci;
- int i;
-
- memset(video_cards, 0, MAX_OHCI1394_CARDS * sizeof(struct video_card));
- num_of_video_cards = 0;
-
- for (i=0; i<MAX_OHCI1394_CARDS; i++) {
- ohci=ohci1394_get_struct(i);
- if (ohci) {
- num_of_video_cards++;
- video1394_init(i, ohci);
- }
- }
-
- if (!num_of_video_cards) {
- PRINT_G(KERN_INFO, "no ohci card found... init failed");
+ if (devfs_register_chrdev(VIDEO1394_MAJOR, VIDEO1394_DRIVER_NAME,
+ &video1394_fops)) {
+ PRINT_G(KERN_ERR, "video1394: unable to get major %d\n",
+ VIDEO1394_MAJOR);
return -EIO;
}
-
- if (register_chrdev(VIDEO1394_MAJOR, VIDEO1394_DRIVER_NAME,
- &video1394_fops)) {
- printk("video1394: unable to get major %d\n",
- VIDEO1394_MAJOR);
- return -EIO;
+ devfs_handle = devfs_mk_dir(NULL, VIDEO1394_DRIVER_NAME, NULL);
+
+ hl_handle = hpsb_register_highlevel (VIDEO1394_DRIVER_NAME, &hl_ops);
+ if (hl_handle == NULL) {
+ PRINT_G(KERN_ERR, "No more memory for driver\n");
+ devfs_unregister(devfs_handle);
+ devfs_unregister_chrdev(VIDEO1394_MAJOR, VIDEO1394_DRIVER_NAME);
+ return -ENOMEM;
}
- PRINT_G(KERN_INFO, "initialized with %d ohci cards",
- num_of_video_cards);
-
return 0;
}
-#endif /* MODULE */
-
-
+module_init(video1394_init_module);
+module_exit(video1394_exit_module);
/*
* video1394.h - driver for OHCI 1394 boards
* Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
+ * Peter Schlaile <udbz@rz.uni-karlsruhe.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
VIDEO1394_TALK_WAIT_BUFFER
};
-#define VIDEO1394_SYNC_FRAMES 0x00000001
-#define VIDEO1394_INCLUDE_ISO_HEADERS 0x00000002
+#define VIDEO1394_SYNC_FRAMES 0x00000001
+#define VIDEO1394_INCLUDE_ISO_HEADERS 0x00000002
+#define VIDEO1394_VARIABLE_PACKET_SIZE 0x00000004
struct video1394_mmap {
- int channel;
- int sync_tag;
- int nb_buffers;
- int buf_size;
- int packet_size;
- int fps;
- int flags;
+ unsigned int channel;
+ unsigned int sync_tag;
+ unsigned int nb_buffers;
+ unsigned int buf_size;
+ unsigned int packet_size; /* For VARIABLE_PACKET_SIZE:
+ Maximum packet size */
+ unsigned int fps;
+ unsigned int flags;
+};
+
+/* For TALK_QUEUE_BUFFER with VIDEO1394_VARIABLE_PACKET_SIZE use */
+struct video1394_queue_variable {
+ unsigned int channel;
+ unsigned int buffer;
+ unsigned int* packet_sizes; /* Buffer of size:
+ buf_size / packet_size */
};
struct video1394_wait {
- int channel;
- int buffer;
+ unsigned int channel;
+ unsigned int buffer;
};
}
}
-static inline int jiftime(char *s, long mark)
+int jiftime(char *s, long mark)
{
s += 8;
* PPPoE --- PPP over Ethernet (RFC 2516)
*
*
- * Version: 0.6.6
+ * Version: 0.6.8
*
* 030700 : Fixed connect logic to allow for disconnect.
* 270700 : Fixed potential SMP problems; we must protect against
* 111100 : Fix recvmsg.
* 050101 : Fix PADT procesing.
* 140501 : Use pppoe_rcv_core to handle all backlog. (Alexey)
+ * 170701 : Do not lock_sock with rwlock held. (DaveM)
+ * Ignore discovery frames if user has socket
+ * locked. (DaveM)
+ * Ignore return value of dev_queue_xmit in __pppoe_xmit
+ * or else we may kfree an SKB twice. (DaveM)
+ * 190701 : When doing copies of skb's in __pppoe_xmit, always delete
+ * the original skb that was passed in on success, never on
+ * failure. Delete the copy of the skb on failure to avoid
+ * a memory leak.
*
- * Author: Michal Ostrowski <mostrows@styx.uwaterloo.ca>
+ * Author: Michal Ostrowski <mostrows@speakeasy.net>
* Contributors:
* Arnaldo Carvalho de Melo <acme@xconectiva.com.br>
+ * David S. Miller (davem@redhat.com)
*
* License:
* This program is free software; you can redistribute it and/or
static int hash_item(unsigned long sid, unsigned char *addr)
{
- char hash=0;
- int i,j;
- for (i = 0; i < ETH_ALEN ; ++i){
- for (j = 0; j < 8/PPPOE_HASH_BITS ; ++j){
+ char hash = 0;
+ int i, j;
+
+ for (i = 0; i < ETH_ALEN ; ++i) {
+ for (j = 0; j < 8/PPPOE_HASH_BITS ; ++j) {
hash ^= addr[i] >> ( j * PPPOE_HASH_BITS );
}
}
read_lock_bh(&pppoe_hash_lock);
po = __get_item(sid, addr);
- if(po)
+ if (po)
sock_hold(po->sk);
read_unlock_bh(&pppoe_hash_lock);
* Certain device events require that sockets be unconnected.
*
**************************************************************************/
+
+static void pppoe_flush_dev(struct net_device *dev)
+{
+ int hash;
+
+ if (dev == NULL)
+ BUG();
+
+ read_lock_bh(&pppoe_hash_lock);
+ for (hash = 0; hash < PPPOE_HASH_SIZE; hash++) {
+ struct pppox_opt *po = item_hash_table[hash];
+
+ while (po != NULL) {
+ if (po->pppoe_dev == dev) {
+ struct sock *sk = po->sk;
+
+ sock_hold(sk);
+ po->pppoe_dev = NULL;
+
+ /* We hold a reference to SK, now drop the
+ * hash table lock so that we may attempt
+ * to lock the socket (which can sleep).
+ */
+ read_unlock_bh(&pppoe_hash_lock);
+
+ lock_sock(sk);
+
+ if (sk->state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
+ pppox_unbind_sock(sk);
+ dev_put(dev);
+ sk->state = PPPOX_DEAD;
+ sk->state_change(sk);
+ }
+
+ release_sock(sk);
+
+ sock_put(sk);
+
+ read_lock_bh(&pppoe_hash_lock);
+
+ /* Now restart from the beginning of this
+ * hash chain. We always NULL out pppoe_dev
+ * so we are guarenteed to make forward
+ * progress.
+ */
+ po = item_hash_table[hash];
+ continue;
+ }
+ po = po->next;
+ }
+ }
+ read_unlock_bh(&pppoe_hash_lock);
+}
+
static int pppoe_device_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
- int error = NOTIFY_DONE;
struct net_device *dev = (struct net_device *) ptr;
- struct pppox_opt *po = NULL;
- int hash = 0;
/* Only look at sockets that are using this specific device. */
switch (event) {
case NETDEV_CHANGEMTU:
- /* A change in mtu is a bad thing, requiring
- * LCP re-negotiation.
- */
+ /* A change in mtu is a bad thing, requiring
+ * LCP re-negotiation.
+ */
+
case NETDEV_GOING_DOWN:
case NETDEV_DOWN:
-
/* Find every socket on this device and kill it. */
- read_lock_bh(&pppoe_hash_lock);
-
- while (!po && hash < PPPOE_HASH_SIZE){
- po = item_hash_table[hash];
- ++hash;
- }
-
- while (po && hash < PPPOE_HASH_SIZE){
- if(po->pppoe_dev == dev){
- lock_sock(po->sk);
- if (po->sk->state & (PPPOX_CONNECTED|PPPOX_BOUND)){
- pppox_unbind_sock(po->sk);
-
- dev_put(po->pppoe_dev);
- po->pppoe_dev = NULL;
-
- po->sk->state = PPPOX_DEAD;
- po->sk->state_change(po->sk);
- }
- release_sock(po->sk);
- }
- if (po->next) {
- po = po->next;
- } else {
- po = NULL;
- while (!po && hash < PPPOE_HASH_SIZE){
- po = item_hash_table[hash];
- ++hash;
- }
- }
- }
- read_unlock_bh(&pppoe_hash_lock);
+ pppoe_flush_dev(dev);
break;
+
default:
break;
};
- return error;
+ return NOTIFY_DONE;
}
* Do the real work of receiving a PPPoE Session frame.
*
***********************************************************************/
-int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb){
- struct pppox_opt *po=sk->protinfo.pppox;
+int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
+{
+ struct pppox_opt *po = sk->protinfo.pppox;
struct pppox_opt *relay_po = NULL;
if (sk->state & PPPOX_BOUND) {
skb_pull(skb, sizeof(struct pppoe_hdr));
-
ppp_input(&po->chan, skb);
- } else if( sk->state & PPPOX_RELAY ){
+ } else if (sk->state & PPPOX_RELAY) {
+ relay_po = get_item_by_addr(&po->pppoe_relay);
- relay_po = get_item_by_addr( &po->pppoe_relay );
-
- if( relay_po == NULL ||
- !( relay_po->sk->state & PPPOX_CONNECTED ) ){
- goto abort;
- }
+ if (relay_po == NULL)
+ goto abort_kfree;
+
+ if ((relay_po->sk->state & PPPOX_CONNECTED) == 0)
+ goto abort_put;
skb_pull(skb, sizeof(struct pppoe_hdr));
- if( !__pppoe_xmit( relay_po->sk , skb) ){
- goto abort;
- }
+ if (!__pppoe_xmit( relay_po->sk , skb))
+ goto abort_put;
} else {
sock_queue_rcv_skb(sk, skb);
}
- return 1;
-abort:
- if(relay_po)
- sock_put(relay_po->sk);
- return 0;
-
-}
+ return NET_RX_SUCCESS;
+abort_put:
+ sock_put(relay_po->sk);
+abort_kfree:
+ kfree_skb(skb);
+ return NET_RX_DROP;
+}
/************************************************************************
*
po = get_item((unsigned long) ph->sid, skb->mac.ethernet->h_source);
- if(!po){
+ if (!po) {
kfree_skb(skb);
- return 0;
+ return NET_RX_DROP;
}
sk = po->sk;
bh_lock_sock(sk);
/* Socket state is unknown, must put skb into backlog. */
- if( sk->lock.users != 0 ){
- sk_add_backlog( sk, skb);
- ret = 1;
- }else{
+ if (sk->lock.users != 0) {
+ sk_add_backlog(sk, skb);
+ ret = NET_RX_SUCCESS;
+ } else {
ret = pppoe_rcv_core(sk, skb);
}
bh_unlock_sock(sk);
sock_put(sk);
+
return ret;
}
{
struct pppoe_hdr *ph = (struct pppoe_hdr *) skb->nh.raw;
struct pppox_opt *po;
- struct sock *sk = NULL;
if (ph->code != PADT_CODE)
goto abort;
po = get_item((unsigned long) ph->sid, skb->mac.ethernet->h_source);
+ if (po) {
+ struct sock *sk = po->sk;
- if (!po)
- goto abort;
+ bh_lock_sock(sk);
- sk = po->sk;
+ /* If the user has locked the socket, just ignore
+ * the packet. With the way two rcv protocols hook into
+ * one socket family type, we cannot (easily) distinguish
+ * what kind of SKB it is during backlog rcv.
+ */
+ if (sk->lock.users == 0)
+ pppox_unbind_sock(sk);
- pppox_unbind_sock(sk);
+ bh_unlock_sock(sk);
+ sock_put(sk);
+ }
- sock_put(sk);
- abort:
+abort:
kfree_skb(skb);
- return 0;
+ return NET_RX_SUCCESS; /* Lies... :-) */
}
struct packet_type pppoes_ptype = {
struct sock *sk = sock->sk;
struct net_device *dev = NULL;
struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
- struct pppox_opt *po=sk->protinfo.pppox;
+ struct pppox_opt *po = sk->protinfo.pppox;
int error;
lock_sock(sk);
po->pppoe_dev = dev;
- if( ! (dev->flags & IFF_UP) )
+ if (!(dev->flags & IFF_UP))
goto err_put;
+
memcpy(&po->pppoe_pa,
&sp->sa_addr.pppoe,
sizeof(struct pppoe_addr));
/* PPPoE address from the user specifies an outbound
PPPoE address to which frames are forwarded to */
err = -EFAULT;
- if( copy_from_user(&po->pppoe_relay,
+ if (copy_from_user(&po->pppoe_relay,
(void*)arg,
sizeof(struct sockaddr_pppox)))
break;
dev = sk->protinfo.pppox->pppoe_dev;
error = -EMSGSIZE;
- if(total_len > dev->mtu+dev->hard_header_len)
+ if (total_len > (dev->mtu + dev->hard_header_len))
goto end;
ph = (struct pppoe_hdr *) skb_put(skb, total_len + sizeof(struct pppoe_hdr));
start = (char *) &ph->tag[0];
- error = memcpy_fromiovec( start, m->msg_iov, total_len);
+ error = memcpy_fromiovec(start, m->msg_iov, total_len);
if (error < 0) {
kfree_skb(skb);
dev_queue_xmit(skb);
- end:
+end:
release_sock(sk);
return error;
}
+
/************************************************************************
*
* xmit function for internal use.
struct pppoe_hdr *ph;
int headroom = skb_headroom(skb);
int data_len = skb->len;
+ struct sk_buff *skb2;
- if (sk->dead || !(sk->state & PPPOX_CONNECTED)) {
+ if (sk->dead || !(sk->state & PPPOX_CONNECTED))
goto abort;
- }
hdr.ver = 1;
hdr.type = 1;
hdr.sid = sk->num;
hdr.length = htons(skb->len);
- if (!dev) {
+ if (!dev)
goto abort;
- }
/* Copy the skb if there is no space for the header. */
if (headroom < (sizeof(struct pppoe_hdr) + dev->hard_header_len)) {
- struct sk_buff *skb2;
-
skb2 = dev_alloc_skb(32+skb->len +
sizeof(struct pppoe_hdr) +
dev->hard_header_len);
skb_reserve(skb2, dev->hard_header_len + sizeof(struct pppoe_hdr));
memcpy(skb_put(skb2, skb->len), skb->data, skb->len);
-
- skb_unlink(skb);
- kfree_skb(skb);
- skb = skb2;
+ } else {
+ /* Make a clone so as to not disturb the original skb,
+ * give dev_queue_xmit something it can free.
+ */
+ skb2 = skb_clone(skb, GFP_ATOMIC);
}
- ph = (struct pppoe_hdr *) skb_push(skb, sizeof(struct pppoe_hdr));
+ ph = (struct pppoe_hdr *) skb_push(skb2, sizeof(struct pppoe_hdr));
memcpy(ph, &hdr, sizeof(struct pppoe_hdr));
- skb->protocol = __constant_htons(ETH_P_PPP_SES);
+ skb2->protocol = __constant_htons(ETH_P_PPP_SES);
- skb->nh.raw = skb->data;
+ skb2->nh.raw = skb2->data;
- skb->dev = dev;
+ skb2->dev = dev;
- dev->hard_header(skb, dev, ETH_P_PPP_SES,
+ dev->hard_header(skb2, dev, ETH_P_PPP_SES,
sk->protinfo.pppox->pppoe_pa.remote,
NULL, data_len);
- if (dev_queue_xmit(skb) < 0)
+ /* We're transmitting skb2, and assuming that dev_queue_xmit
+ * will free it. The generic ppp layer however, is expecting
+ * that we give back 'skb' (not 'skb2') in case of failure,
+ * but free it in case of success.
+ */
+
+ if (dev_queue_xmit(skb2) < 0)
goto abort;
+ kfree_skb(skb);
return 1;
- abort:
+
+abort:
return 0;
}
int err = register_pppox_proto(PX_PROTO_OE, &pppoe_proto);
if (err == 0) {
- printk(KERN_INFO "Registered PPPoE v0.6.5\n");
-
dev_add_pack(&pppoes_ptype);
dev_add_pack(&pppoed_ptype);
register_netdevice_notifier(&pppoe_notifier);
err = sock_register(&pppox_proto_family);
- if (err == 0) {
- printk(KERN_INFO "Registered PPPoX v0.5\n");
- }
-
return err;
}
#include <linux/delay.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <asm/uaccess.h>
+#include <asm/io.h>
#ifdef HAS_FIRMWARE
#include "starfire_firmware.h"
* return the value. I chose the former way.
*/
#ifndef CONFIG_PCI
-/*static*/ int shift_state;
+int shift_state;
+struct kbd_struct kbd_table[MAX_NR_CONSOLES];
#endif
static int npadch = -1; /* -1 or number assembled on pad */
static unsigned char diacr;
static char rep; /* flag telling character repeat */
-struct kbd_struct kbd_table[MAX_NR_CONSOLES];
static struct tty_struct **ttytab;
static struct kbd_struct * kbd = kbd_table;
static struct tty_struct * tty;
set_leds();
}
+#ifdef CONFIG_PCI
+extern int spawnpid, spawnsig;
+#else
int spawnpid, spawnsig;
+#endif
+
static void spawn_console(void)
{
#include <linux/timer.h>
#include <linux/ioport.h> // request_region() prototype
#include <linux/vmalloc.h> // ioremap()
+#include <linux/completion.h>
#ifdef __alpha__
#define __KERNEL_SYSCALLS__
#endif
req = &SCpnt->request;
req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */
- if (req->sem != NULL) {
- up(req->sem);
+ if (req->waiting != NULL) {
+ complete(req->waiting);
}
}
spin_lock_irqsave(&io_request_lock, flags);
{
- DECLARE_MUTEX_LOCKED(sem);
- ScsiPassThruCmnd->request.sem = &sem;
+ DECLARE_COMPLETION(wait);
+ ScsiPassThruCmnd->request.waiting = &wait;
// eventually gets us to our own _quecommand routine
scsi_do_cmd( ScsiPassThruCmnd, &vendor_cmd->cdb[0],
buf,
spin_unlock_irqrestore(&io_request_lock, flags);
// Other I/Os can now resume; we wait for our ioctl
// command to complete
- down(&sem);
+ wait_for_completion(&wait);
spin_lock_irqsave(&io_request_lock, flags);
- ScsiPassThruCmnd->request.sem = NULL;
+ ScsiPassThruCmnd->request.waiting = NULL;
}
result = ScsiPassThruCmnd->result;
*/
#include "gdth_ioctl.h"
+#include <linux/completion.h>
int gdth_proc_info(char *buffer,char **start,off_t offset,int length,
int hostno,int inout)
char *cmnd, int timeout)
{
unsigned bufflen;
-#if LINUX_VERSION_CODE >= 0x020322
- DECLARE_MUTEX_LOCKED(sem);
-#else
- struct semaphore sem = MUTEX_LOCKED;
-#endif
+ DECLARE_COMPLETION(wait);
TRACE2(("gdth_do_cmd()\n"));
if (gdtcmd != NULL) {
bufflen = 0;
}
scp->request.rq_status = RQ_SCSI_BUSY;
- scp->request.sem = &sem;
+ scp->request.waiting = &wait;
#if LINUX_VERSION_CODE >= 0x020322
scsi_do_cmd(scp, cmnd, gdtcmd, bufflen, gdth_scsi_done, timeout*HZ, 1);
#else
scsi_do_cmd(scp, cmnd, gdtcmd, bufflen, gdth_scsi_done, timeout*HZ, 1);
GDTH_UNLOCK_SCSI_DOCMD();
#endif
- down(&sem);
+ wait_for_completion(&wait);
}
void gdth_scsi_done(Scsi_Cmnd *scp)
scp->request.rq_status = RQ_SCSI_DONE;
- if (scp->request.sem != NULL)
- up(scp->request.sem);
+ if (scp->request.waiting != NULL)
+ complete(scp->request.waiting);
}
static int gdth_ioctl_alloc(int hanum, ushort size)
#if DEBUG
STp->write_pending = 0;
#endif
- up(SCpnt->request.sem);
+ complete(SCpnt->request.waiting);
}
#if DEBUG
else if (debugging)
if (SRpnt->sr_device->scsi_level <= SCSI_2)
cmd[1] |= (SRpnt->sr_device->lun << 5) & 0xe0;
- init_MUTEX_LOCKED(&STp->sem);
+ init_completion(&STp->wait);
SRpnt->sr_use_sg = (bytes > (STp->buffer)->sg[0].length) ?
(STp->buffer)->use_sg : 0;
if (SRpnt->sr_use_sg) {
bp = (STp->buffer)->b_data;
SRpnt->sr_data_direction = direction;
SRpnt->sr_cmd_len = 0;
- SRpnt->sr_request.sem = &(STp->sem);
+ SRpnt->sr_request.waiting = &(STp->wait);
SRpnt->sr_request.rq_status = RQ_SCSI_BUSY;
SRpnt->sr_request.rq_dev = STp->devt;
scsi_do_req(SRpnt, (void *)cmd, bp, bytes, osst_sleep_done, timeout, retries);
if (do_wait) {
- down(SRpnt->sr_request.sem);
- SRpnt->sr_request.sem = NULL;
+ wait_for_completion(SRpnt->sr_request.waiting);
+ SRpnt->sr_request.waiting = NULL;
STp->buffer->syscall_result = osst_chk_result(STp, SRpnt);
#ifdef OSST_INJECT_ERRORS
if (STp->buffer->syscall_result == 0 &&
else
STp->nbr_finished++;
#endif
- down(&(STp->sem));
- (STp->buffer)->last_SRpnt->sr_request.sem = NULL;
+ wait_for_completion(&(STp->wait));
+ (STp->buffer)->last_SRpnt->sr_request.waiting = NULL;
STp->buffer->syscall_result = osst_chk_result(STp, STp->buffer->last_SRpnt);
#include <asm/byteorder.h>
#include <linux/config.h>
+#include <linux/completion.h>
#ifdef CONFIG_DEVFS_FS
#include <linux/devfs_fs_kernel.h>
#endif
unsigned capacity;
Scsi_Device* device;
struct semaphore lock; /* for serialization */
- struct semaphore sem; /* for SCSI commands */
+ struct completion wait; /* for SCSI commands */
OSST_buffer * buffer;
/* Drive characteristics */
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/smp_lock.h>
+#include <linux/completion.h>
#define __KERNEL_SYSCALLS__
req = &SCpnt->request;
req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */
- if (req->sem != NULL) {
- up(req->sem);
+ if (req->waiting != NULL) {
+ complete(req->waiting);
}
}
}
SCpnt->request.rq_status = RQ_SCSI_BUSY;
- SCpnt->request.sem = NULL; /* And no one is waiting for this
+ SCpnt->request.waiting = NULL; /* And no one is waiting for this
* to complete */
atomic_inc(&SCpnt->host->host_active);
atomic_inc(&SCpnt->device->device_active);
void *buffer, unsigned bufflen,
int timeout, int retries)
{
- DECLARE_MUTEX_LOCKED(sem);
+ DECLARE_COMPLETION(wait);
- SRpnt->sr_request.sem = &sem;
+ SRpnt->sr_request.waiting = &wait;
SRpnt->sr_request.rq_status = RQ_SCSI_BUSY;
scsi_do_req (SRpnt, (void *) cmnd,
buffer, bufflen, scsi_wait_done, timeout, retries);
- down (&sem);
- SRpnt->sr_request.sem = NULL;
+ wait_for_completion(&wait);
+ SRpnt->sr_request.waiting = NULL;
if( SRpnt->sr_command != NULL )
{
scsi_release_command(SRpnt->sr_command);
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/smp_lock.h>
+#include <linux/completion.h>
#define __KERNEL_SYSCALLS__
* request, wake them up. Typically used to wake up processes trying
* to swap a page into memory.
*/
- if (req->sem != NULL) {
- up(req->sem);
+ if (req->waiting != NULL) {
+ complete(req->waiting);
}
add_blkdev_randomness(MAJOR(req->rq_dev));
(STp->buffer)->last_SRpnt = SCpnt->sc_request;
DEB( STp->write_pending = 0; )
- up(SCpnt->request.sem);
+ complete(SCpnt->request.waiting);
}
DEB(
else if (debugging)
if (SRpnt->sr_device->scsi_level <= SCSI_2)
cmd[1] |= (SRpnt->sr_device->lun << 5) & 0xe0;
- init_MUTEX_LOCKED(&STp->sem);
+ init_completion(&STp->wait);
SRpnt->sr_use_sg = (bytes > (STp->buffer)->sg[0].length) ?
(STp->buffer)->use_sg : 0;
if (SRpnt->sr_use_sg) {
bp = (STp->buffer)->b_data;
SRpnt->sr_data_direction = direction;
SRpnt->sr_cmd_len = 0;
- SRpnt->sr_request.sem = &(STp->sem);
+ SRpnt->sr_request.waiting = &(STp->wait);
SRpnt->sr_request.rq_status = RQ_SCSI_BUSY;
SRpnt->sr_request.rq_dev = STp->devt;
st_sleep_done, timeout, retries);
if (do_wait) {
- down(SRpnt->sr_request.sem);
- SRpnt->sr_request.sem = NULL;
+ wait_for_completion(SRpnt->sr_request.waiting);
+ SRpnt->sr_request.waiting = NULL;
(STp->buffer)->syscall_result = st_chk_result(STp, SRpnt);
}
return SRpnt;
STp->nbr_finished++;
) /* end DEB */
- down(&(STp->sem));
- (STp->buffer)->last_SRpnt->sr_request.sem = NULL;
+ wait_for_completion(&(STp->wait));
+ (STp->buffer)->last_SRpnt->sr_request.waiting = NULL;
(STp->buffer)->syscall_result = st_chk_result(STp, (STp->buffer)->last_SRpnt);
scsi_release_request((STp->buffer)->last_SRpnt);
#include "scsi.h"
#endif
#include <linux/devfs_fs_kernel.h>
+#include <linux/completion.h>
/* The tape buffer descriptor. */
typedef struct {
kdev_t devt;
Scsi_Device *device;
struct semaphore lock; /* For serialization */
- struct semaphore sem; /* For SCSI commands */
+ struct completion wait; /* For SCSI commands */
ST_buffer *buffer;
/* Drive characteristics */
pr_debug("AFFS: remove_header(key=%ld)\n", inode->i_ino);
retval = -EIO;
- bh = affs_bread(sb, (u32)dentry->d_fsdata);
+ bh = affs_bread(sb, (u32)(long)dentry->d_fsdata);
if (!bh)
goto done;
* Currently only a stub-function.
*
* Note that setuid/setgid files won't make a core-dump if the uid/gid
- * changed due to the set[u|g]id. It's enforced by the "current->dumpable"
+ * changed due to the set[u|g]id. It's enforced by the "current->mm->dumpable"
* field, which also makes sure the core-dumps won't be recursive if the
* dumping of the process results in another error..
*/
<directory_structure>
<data>
-<superblock>: struct cramfs_super (see cramfs.h).
+<superblock>: struct cramfs_super (see cramfs_fs.h).
<directory_structure>:
For each file:
- struct cramfs_inode (see cramfs.h).
+ struct cramfs_inode (see cramfs_fs.h).
Filename. Not generally null-terminated, but it is
null-padded to a multiple of 4 bytes.
+++ /dev/null
-#ifndef __CRAMFS_H
-#define __CRAMFS_H
-
-#define CRAMFS_MAGIC 0x28cd3d45 /* some random number */
-#define CRAMFS_SIGNATURE "Compressed ROMFS"
-
-/*
- * Reasonably terse representation of the inode data.
- */
-struct cramfs_inode {
- u32 mode:16, uid:16;
- /* SIZE for device files is i_rdev */
- u32 size:24, gid:8;
- /* NAMELEN is the length of the file name, divided by 4 and
- rounded up. (cramfs doesn't support hard links.) */
- /* OFFSET: For symlinks and non-empty regular files, this
- contains the offset (divided by 4) of the file data in
- compressed form (starting with an array of block pointers;
- see README). For non-empty directories it is the offset
- (divided by 4) of the inode of the first file in that
- directory. For anything else, offset is zero. */
- u32 namelen:6, offset:26;
-};
-
-/*
- * Superblock information at the beginning of the FS.
- */
-struct cramfs_super {
- u32 magic; /* 0x28cd3d45 - random number */
- u32 size; /* Not used. mkcramfs currently
- writes a constant 1<<16 here. */
- u32 flags; /* 0 */
- u32 future; /* 0 */
- u8 signature[16]; /* "Compressed ROMFS" */
- u8 fsid[16]; /* random number */
- u8 name[16]; /* user-defined name */
- struct cramfs_inode root; /* Root inode data */
-};
-
-/*
- * Valid values in super.flags. Currently we refuse to mount
- * if (flags & ~CRAMFS_SUPPORTED_FLAGS). Maybe that should be
- * changed to test super.future instead.
- */
-#define CRAMFS_SUPPORTED_FLAGS (0xff)
-
-/* Uncompression interfaces to the underlying zlib */
-int cramfs_uncompress_block(void *dst, int dstlen, void *src, int srclen);
-int cramfs_uncompress_init(void);
-int cramfs_uncompress_exit(void);
-
-#endif
#include <linux/init.h>
#include <linux/string.h>
#include <linux/locks.h>
+#include <linux/blkdev.h>
+#include <linux/cramfs_fs.h>
#include <asm/uaccess.h>
-#include "cramfs.h"
+#define CRAMFS_SB_MAGIC u.cramfs_sb.magic
+#define CRAMFS_SB_SIZE u.cramfs_sb.size
+#define CRAMFS_SB_BLOCKS u.cramfs_sb.blocks
+#define CRAMFS_SB_FILES u.cramfs_sb.files
+#define CRAMFS_SB_FLAGS u.cramfs_sb.flags
static struct super_operations cramfs_ops;
static struct inode_operations cramfs_dir_inode_operations;
inode->i_mode = cramfs_inode->mode;
inode->i_uid = cramfs_inode->uid;
inode->i_size = cramfs_inode->size;
+ inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1;
+ inode->i_blksize = PAGE_CACHE_SIZE;
inode->i_gid = cramfs_inode->gid;
inode->i_ino = CRAMINO(cramfs_inode);
/* inode->i_nlink is left 1 - arguably wrong for directories,
static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned int len)
{
struct buffer_head * bh_array[BLKS_PER_BUF];
- unsigned i, blocknr, buffer;
+ struct buffer_head * read_array[BLKS_PER_BUF];
+ unsigned i, blocknr, buffer, unread;
+ unsigned long devsize;
+ int major, minor;
+
char *data;
if (!len)
return read_buffers[i] + blk_offset;
}
+ devsize = ~0UL;
+ major = MAJOR(sb->s_dev);
+ minor = MINOR(sb->s_dev);
+
+ if (blk_size[major])
+ devsize = blk_size[major][minor] >> 2;
+
/* Ok, read in BLKS_PER_BUF pages completely first. */
- for (i = 0; i < BLKS_PER_BUF; i++)
- bh_array[i] = bread(sb->s_dev, blocknr + i, PAGE_CACHE_SIZE);
+ unread = 0;
+ for (i = 0; i < BLKS_PER_BUF; i++) {
+ struct buffer_head *bh;
+
+ bh = NULL;
+ if (blocknr + i < devsize) {
+ bh = getblk(sb->s_dev, blocknr + i, PAGE_CACHE_SIZE);
+ if (!buffer_uptodate(bh))
+ read_array[unread++] = bh;
+ }
+ bh_array[i] = bh;
+ }
+
+ if (unread) {
+ ll_rw_block(READ, unread, read_array);
+ do {
+ unread--;
+ wait_on_buffer(read_array[unread]);
+ } while (unread);
+ }
/* Ok, copy them to the staging area without sleeping. */
buffer = next_buffer;
/* Do sanity checks on the superblock */
if (super.magic != CRAMFS_MAGIC) {
- printk("wrong magic\n");
- goto out;
- }
- if (memcmp(super.signature, CRAMFS_SIGNATURE, sizeof(super.signature))) {
- printk("wrong signature\n");
- goto out;
+ /* check at 512 byte offset */
+ memcpy(&super, cramfs_read(sb, 512, sizeof(super)), sizeof(super));
+ if (super.magic != CRAMFS_MAGIC) {
+ printk(KERN_ERR "cramfs: wrong magic\n");
+ goto out;
+ }
}
+
+ /* get feature flags first */
if (super.flags & ~CRAMFS_SUPPORTED_FLAGS) {
- printk("unsupported filesystem features\n");
+ printk(KERN_ERR "cramfs: unsupported filesystem features\n");
goto out;
}
/* Check that the root inode is in a sane state */
if (!S_ISDIR(super.root.mode)) {
- printk("root is not a directory\n");
+ printk(KERN_ERR "cramfs: root is not a directory\n");
goto out;
}
root_offset = super.root.offset << 2;
+ if (super.flags & CRAMFS_FLAG_FSID_VERSION_2) {
+ sb->CRAMFS_SB_SIZE=super.size;
+ sb->CRAMFS_SB_BLOCKS=super.fsid.blocks;
+ sb->CRAMFS_SB_FILES=super.fsid.files;
+ } else {
+ sb->CRAMFS_SB_SIZE=1<<28;
+ sb->CRAMFS_SB_BLOCKS=0;
+ sb->CRAMFS_SB_FILES=0;
+ }
+ sb->CRAMFS_SB_MAGIC=super.magic;
+ sb->CRAMFS_SB_FLAGS=super.flags;
if (root_offset == 0)
- printk(KERN_INFO "cramfs: note: empty filesystem");
- else if (root_offset != sizeof(struct cramfs_super)) {
- printk("bad root offset %lu\n", root_offset);
+ printk(KERN_INFO "cramfs: empty filesystem");
+ else if (!(super.flags & CRAMFS_FLAG_SHIFTED_ROOT_OFFSET) &&
+ ((root_offset != sizeof(struct cramfs_super)) &&
+ (root_offset != 512 + sizeof(struct cramfs_super))))
+ {
+ printk(KERN_ERR "cramfs: bad root offset %lu\n", root_offset);
goto out;
}
/* Set it all up.. */
- sb->s_op = &cramfs_ops;
- sb->s_root = d_alloc_root(get_cramfs_inode(sb, &super.root));
+ sb->s_op = &cramfs_ops;
+ sb->s_root = d_alloc_root(get_cramfs_inode(sb, &super.root));
retval = sb;
out:
return retval;
{
buf->f_type = CRAMFS_MAGIC;
buf->f_bsize = PAGE_CACHE_SIZE;
+ buf->f_blocks = sb->CRAMFS_SB_BLOCKS;
buf->f_bfree = 0;
buf->f_bavail = 0;
+ buf->f_files = sb->CRAMFS_SB_FILES;
buf->f_ffree = 0;
buf->f_namelen = 255;
return 0;
static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry)
{
unsigned int offset = 0;
+ int sorted = dir->i_sb->CRAMFS_SB_FLAGS & CRAMFS_FLAG_SORTED_DIRS;
while (offset < dir->i_size) {
struct cramfs_inode *de;
char *name;
- int namelen;
+ int namelen, retval;
de = cramfs_read(dir->i_sb, OFFSET(dir) + offset, sizeof(*de)+256);
name = (char *)(de+1);
+
+ /* Try to take advantage of sorted directories */
+ if (sorted && (dentry->d_name.name[0] < name[0]))
+ break;
+
namelen = de->namelen << 2;
offset += sizeof(*de) + namelen;
}
if (namelen != dentry->d_name.len)
continue;
- if (memcmp(dentry->d_name.name, name, namelen))
+ retval = memcmp(dentry->d_name.name, name, namelen);
+ if (retval > 0)
continue;
- d_add(dentry, get_cramfs_inode(dir->i_sb, de));
- return NULL;
+ if (!retval) {
+ d_add(dentry, get_cramfs_inode(dir->i_sb, de));
+ return NULL;
+ }
+ /* else (retval < 0) */
+ if (sorted)
+ break;
}
d_add(dentry, NULL);
return NULL;
current->sas_ss_sp = current->sas_ss_size = 0;
if (current->euid == current->uid && current->egid == current->gid)
- current->dumpable = 1;
+ current->mm->dumpable = 1;
name = bprm->filename;
for (i=0; (ch = *(name++)) != '\0';) {
if (ch == '/')
if (bprm->e_uid != current->euid || bprm->e_gid != current->egid ||
permission(bprm->file->f_dentry->d_inode,MAY_READ))
- current->dumpable = 0;
+ current->mm->dumpable = 0;
/* An exec changes our domain. We are no longer part of the thread
group */
if (bprm->e_uid != current->uid || bprm->e_gid != current->gid ||
!cap_issubset(new_permitted, current->cap_permitted)) {
- current->dumpable = 0;
+ current->mm->dumpable = 0;
lock_kernel();
if (must_not_trace_exec(current)
binfmt = current->binfmt;
if (!binfmt || !binfmt->core_dump)
goto fail;
- if (!current->dumpable)
+ if (!current->mm->dumpable)
goto fail;
- current->dumpable = 0;
+ current->mm->dumpable = 0;
if (current->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump)
goto fail;
"as nfsd port\n", port);
}
- if ((port = root_nfs_getport(NFS_MNT_PROGRAM, nfsd_ver, proto)) < 0) {
+ if ((port = root_nfs_getport(NFS_MNT_PROGRAM, mountd_ver, proto)) < 0) {
printk(KERN_ERR "Root-NFS: Unable to get mountd port "
"number from server, using default\n");
port = mountd_port;
inode->u.proc_i.task = task;
inode->i_uid = 0;
inode->i_gid = 0;
- if (ino == PROC_PID_INO || task->dumpable) {
+ if (ino == PROC_PID_INO || task->mm->dumpable) {
inode->i_uid = task->euid;
inode->i_gid = task->egid;
}
regs->ar_bspstore = IA64_RBS_BOT; \
regs->ar_fpsr = FPSR_DEFAULT; \
regs->loadrs = 0; \
- regs->r8 = current->dumpable; /* set "don't zap registers" flag */ \
+ regs->r8 = current->mm->dumpable; /* set "don't zap registers" flag */ \
regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \
- if (!__builtin_expect (current->dumpable, 1)) { \
+ if (!__builtin_expect (current->mm->dumpable, 1)) { \
/* \
* Zap scratch regs to avoid leaking bits between processes with different \
* uid/privileges. \
-/* $Id: bitops.h,v 1.62 2001/07/07 10:58:22 davem Exp $
+/* $Id: bitops.h,v 1.63 2001/07/17 16:17:33 anton Exp $
* bitops.h: Bit string operations on the Sparc.
*
* Copyright 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright 1996 Eddie C. Dost (ecd@skynet.be)
+ * Copyright 2001 Anton Blanchard (anton@samba.org)
*/
#ifndef _SPARC_BITOPS_H
#include <linux/kernel.h>
#include <asm/byteorder.h>
-
-#ifndef __KERNEL__
-
-/* User mode bitops, defined here for convenience. Note: these are not
- * atomic, so packages like nthreads should do some locking around these
- * themself.
- */
-
-extern __inline__ unsigned long set_bit(unsigned long nr, void *addr)
-{
- int mask;
- unsigned long *ADDR = (unsigned long *) addr;
-
- ADDR += nr >> 5;
- mask = 1 << (nr & 31);
- __asm__ __volatile__("
- ld [%0], %%g3
- or %%g3, %2, %%g2
- st %%g2, [%0]
- and %%g3, %2, %0
- "
- : "=&r" (ADDR)
- : "0" (ADDR), "r" (mask)
- : "g2", "g3");
-
- return (unsigned long) ADDR;
-}
-
-extern __inline__ unsigned long clear_bit(unsigned long nr, void *addr)
-{
- int mask;
- unsigned long *ADDR = (unsigned long *) addr;
-
- ADDR += nr >> 5;
- mask = 1 << (nr & 31);
- __asm__ __volatile__("
- ld [%0], %%g3
- andn %%g3, %2, %%g2
- st %%g2, [%0]
- and %%g3, %2, %0
- "
- : "=&r" (ADDR)
- : "0" (ADDR), "r" (mask)
- : "g2", "g3");
-
- return (unsigned long) ADDR;
-}
-
-extern __inline__ void change_bit(unsigned long nr, void *addr)
-{
- int mask;
- unsigned long *ADDR = (unsigned long *) addr;
-
- ADDR += nr >> 5;
- mask = 1 << (nr & 31);
- __asm__ __volatile__("
- ld [%0], %%g3
- xor %%g3, %2, %%g2
- st %%g2, [%0]
- and %%g3, %2, %0
- "
- : "=&r" (ADDR)
- : "0" (ADDR), "r" (mask)
- : "g2", "g3");
-}
-
-#else /* __KERNEL__ */
-
#include <asm/system.h>
-/* Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0'
+#ifdef __KERNEL__
+
+/*
+ * Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0'
* is in the highest of the four bytes and bit '31' is the high bit
* within the first byte. Sparc is BIG-Endian. Unless noted otherwise
* all bit-ops return 0 if bit was previously clear and != 0 otherwise.
*/
-
-extern __inline__ int test_and_set_bit(unsigned long nr, volatile void *addr)
+static __inline__ int test_and_set_bit(unsigned long nr, volatile void *addr)
{
register unsigned long mask asm("g2");
register unsigned long *ADDR asm("g1");
return mask != 0;
}
-extern __inline__ void set_bit(unsigned long nr, volatile void *addr)
+static __inline__ void set_bit(unsigned long nr, volatile void *addr)
{
(void) test_and_set_bit(nr, addr);
}
-extern __inline__ int test_and_clear_bit(unsigned long nr, volatile void *addr)
+static __inline__ int test_and_clear_bit(unsigned long nr, volatile void *addr)
{
register unsigned long mask asm("g2");
register unsigned long *ADDR asm("g1");
return mask != 0;
}
-extern __inline__ void clear_bit(unsigned long nr, volatile void *addr)
+static __inline__ void clear_bit(unsigned long nr, volatile void *addr)
{
(void) test_and_clear_bit(nr, addr);
}
-#define test_and_change_bit(n, a) __test_and_change_bit(n, a)
-
-extern __inline__ int __test_and_change_bit(unsigned long nr, volatile void *addr)
+static __inline__ int test_and_change_bit(unsigned long nr, volatile void *addr)
{
register unsigned long mask asm("g2");
register unsigned long *ADDR asm("g1");
return mask != 0;
}
-#define change_bit(n, a) __change_bit(n, a)
-
-extern __inline__ void __change_bit(unsigned long nr, volatile void *addr)
+static __inline__ void change_bit(unsigned long nr, volatile void *addr)
{
(void) test_and_change_bit(nr, addr);
}
-#endif /* __KERNEL__ */
+/*
+ * non-atomic versions
+ */
+static __inline__ void __set_bit(int nr, volatile void *addr)
+{
+ unsigned long mask = 1UL << (nr & 0x1f);
+ unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
+
+ *p |= mask;
+}
+
+static __inline__ void __clear_bit(int nr, volatile void *addr)
+{
+ unsigned long mask = 1UL << (nr & 0x1f);
+ unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
+
+ *p &= ~mask;
+}
+
+static __inline__ void __change_bit(int nr, volatile void *addr)
+{
+ unsigned long mask = 1UL << (nr & 0x1f);
+ unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
+
+ *p ^= mask;
+}
+
+static __inline__ int __test_and_set_bit(int nr, volatile void *addr)
+{
+ unsigned long mask = 1UL << (nr & 0x1f);
+ unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
+ unsigned long old = *p;
+
+ *p = old | mask;
+ return (old & mask) != 0;
+}
+
+static __inline__ int __test_and_clear_bit(int nr, volatile void *addr)
+{
+ unsigned long mask = 1UL << (nr & 0x1f);
+ unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
+ unsigned long old = *p;
+
+ *p = old & ~mask;
+ return (old & mask) != 0;
+}
+
+static __inline__ int __test_and_change_bit(int nr, volatile void *addr)
+{
+ unsigned long mask = 1UL << (nr & 0x1f);
+ unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
+ unsigned long old = *p;
+
+ *p = old ^ mask;
+ return (old & mask) != 0;
+}
#define smp_mb__before_clear_bit() do { } while(0)
#define smp_mb__after_clear_bit() do { } while(0)
/* The following routine need not be atomic. */
-extern __inline__ int test_bit(int nr, __const__ void *addr)
+static __inline__ int test_bit(int nr, __const__ void *addr)
{
return (1 & (((__const__ unsigned int *) addr)[nr >> 5] >> (nr & 31))) != 0;
}
/* The easy/cheese version for now. */
-extern __inline__ unsigned long ffz(unsigned long word)
+static __inline__ unsigned long ffz(unsigned long word)
{
unsigned long result = 0;
return result;
}
-#ifdef __KERNEL__
-
/*
* ffs: find first bit set. This is defined the same way as
* the libc and compiler builtin ffs routines, therefore
* differs in spirit from the above ffz (man ffs).
*/
-
#define ffs(x) generic_ffs(x)
/*
* hweightN: returns the hamming weight (i.e. the number
* of bits set) of a N-bit word
*/
-
#define hweight32(x) generic_hweight32(x)
#define hweight16(x) generic_hweight16(x)
#define hweight8(x) generic_hweight8(x)
-#endif /* __KERNEL__ */
-
-/* find_next_zero_bit() finds the first zero bit in a bit string of length
+/*
+ * find_next_zero_bit() finds the first zero bit in a bit string of length
* 'size' bits, starting the search at bit 'offset'. This is largely based
* on Linus's ALPHA routines, which are pretty portable BTW.
*/
-
-extern __inline__ unsigned long find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
+static __inline__ unsigned long find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
{
unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
unsigned long result = offset & ~31UL;
return result + ffz(tmp);
}
-/* Linus sez that gcc can optimize the following correctly, we'll see if this
+/*
+ * Linus sez that gcc can optimize the following correctly, we'll see if this
* holds on the Sparc as it does for the ALPHA.
*/
-
#define find_first_zero_bit(addr, size) \
find_next_zero_bit((addr), (size), 0)
-#ifndef __KERNEL__
-
-extern __inline__ int set_le_bit(int nr, void *addr)
+static __inline__ int test_le_bit(int nr, __const__ void * addr)
{
- int mask;
- unsigned char *ADDR = (unsigned char *) addr;
-
- ADDR += nr >> 3;
- mask = 1 << (nr & 0x07);
- __asm__ __volatile__("
- ldub [%0], %%g3
- or %%g3, %2, %%g2
- stb %%g2, [%0]
- and %%g3, %2, %0
- "
- : "=&r" (ADDR)
- : "0" (ADDR), "r" (mask)
- : "g2", "g3");
-
- return (int) ADDR;
+ __const__ unsigned char *ADDR = (__const__ unsigned char *) addr;
+ return (ADDR[nr >> 3] >> (nr & 7)) & 1;
}
-extern __inline__ int clear_le_bit(int nr, void *addr)
+/*
+ * non-atomic versions
+ */
+static __inline__ void __set_le_bit(int nr, void *addr)
{
- int mask;
- unsigned char *ADDR = (unsigned char *) addr;
+ unsigned char *ADDR = (unsigned char *)addr;
ADDR += nr >> 3;
- mask = 1 << (nr & 0x07);
- __asm__ __volatile__("
- ldub [%0], %%g3
- andn %%g3, %2, %%g2
- stb %%g2, [%0]
- and %%g3, %2, %0
- "
- : "=&r" (ADDR)
- : "0" (ADDR), "r" (mask)
- : "g2", "g3");
-
- return (int) ADDR;
+ *ADDR |= 1 << (nr & 0x07);
}
-#else /* __KERNEL__ */
-
-/* Now for the ext2 filesystem bit operations and helper routines. */
-
-extern __inline__ int set_le_bit(int nr, volatile void * addr)
+static __inline__ void __clear_le_bit(int nr, void *addr)
{
- register int mask asm("g2");
- register unsigned char *ADDR asm("g1");
+ unsigned char *ADDR = (unsigned char *)addr;
- ADDR = ((unsigned char *) addr) + (nr >> 3);
- mask = 1 << (nr & 0x07);
- __asm__ __volatile__("
- mov %%o7, %%g4
- call ___set_le_bit
- add %%o7, 8, %%o7
-" : "=&r" (mask)
- : "0" (mask), "r" (ADDR)
- : "g3", "g4", "g5", "g7", "cc");
-
- return mask;
+ ADDR += nr >> 3;
+ *ADDR &= ~(1 << (nr & 0x07));
}
-extern __inline__ int clear_le_bit(int nr, volatile void * addr)
+static __inline__ int __test_and_set_le_bit(int nr, void *addr)
{
- register int mask asm("g2");
- register unsigned char *ADDR asm("g1");
+ int mask, retval;
+ unsigned char *ADDR = (unsigned char *)addr;
- ADDR = ((unsigned char *) addr) + (nr >> 3);
+ ADDR += nr >> 3;
mask = 1 << (nr & 0x07);
- __asm__ __volatile__("
- mov %%o7, %%g4
- call ___clear_le_bit
- add %%o7, 8, %%o7
-" : "=&r" (mask)
- : "0" (mask), "r" (ADDR)
- : "g3", "g4", "g5", "g7", "cc");
-
- return mask;
+ retval = (mask & *ADDR) != 0;
+ *ADDR |= mask;
+ return retval;
}
-#endif /* __KERNEL__ */
-
-extern __inline__ int test_le_bit(int nr, __const__ void * addr)
+static __inline__ int __test_and_clear_le_bit(int nr, void *addr)
{
- int mask;
- __const__ unsigned char *ADDR = (__const__ unsigned char *) addr;
+ int mask, retval;
+ unsigned char *ADDR = (unsigned char *)addr;
ADDR += nr >> 3;
mask = 1 << (nr & 0x07);
- return ((mask & *ADDR) != 0);
+ retval = (mask & *ADDR) != 0;
+ *ADDR &= ~mask;
+ return retval;
}
-#ifdef __KERNEL__
-
-#define ext2_set_bit set_le_bit
-#define ext2_clear_bit clear_le_bit
-#define ext2_test_bit test_le_bit
-
-#endif /* __KERNEL__ */
-
-#define find_first_zero_le_bit(addr, size) \
- find_next_zero_le_bit((addr), (size), 0)
-
-extern __inline__ unsigned long find_next_zero_le_bit(void *addr, unsigned long size, unsigned long offset)
+static __inline__ unsigned long find_next_zero_le_bit(void *addr, unsigned long size, unsigned long offset)
{
unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
unsigned long result = offset & ~31UL;
return result + ffz(__swab32(tmp));
}
-#ifdef __KERNEL__
+#define find_first_zero_le_bit(addr, size) \
+ find_next_zero_le_bit((addr), (size), 0)
-#define ext2_find_first_zero_bit find_first_zero_le_bit
-#define ext2_find_next_zero_bit find_next_zero_le_bit
+#define ext2_set_bit __test_and_set_le_bit
+#define ext2_clear_bit __test_and_clear_le_bit
+#define ext2_test_bit test_le_bit
+#define ext2_find_first_zero_bit find_first_zero_le_bit
+#define ext2_find_next_zero_bit find_next_zero_le_bit
/* Bitmap functions for the minix filesystem. */
-#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
-#define minix_set_bit(nr,addr) set_bit(nr,addr)
-#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
-#define minix_test_bit(nr,addr) test_bit(nr,addr)
-#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
+#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
+#define minix_set_bit(nr,addr) set_bit(nr,addr)
+#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
+#define minix_test_bit(nr,addr) test_bit(nr,addr)
+#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
#endif /* __KERNEL__ */
-/* $Id: pgalloc.h,v 1.12 2001/04/26 02:36:35 davem Exp $ */
+/* $Id: pgalloc.h,v 1.13 2001/07/17 16:17:33 anton Exp $ */
#ifndef _SPARC_PGALLOC_H
#define _SPARC_PGALLOC_H
BTFIXUPDEF_CALL(int, do_check_pgt_cache, int, int)
#define do_check_pgt_cache(low,high) BTFIXUP_CALL(do_check_pgt_cache)(low,high)
-/*
- * Allocate and free page tables. The xxx_kernel() versions are
- * used to allocate a kernel page table - this turns on ASN bits
- * if any, and marks the page tables reserved.
- */
-BTFIXUPDEF_CALL(void, pte_free_kernel, pte_t *)
-BTFIXUPDEF_CALL(pte_t *, pte_alloc_kernel, pmd_t *, unsigned long)
+BTFIXUPDEF_CALL(pgd_t *, get_pgd_fast, void)
+#define get_pgd_fast() BTFIXUP_CALL(get_pgd_fast)()
-#define pte_free_kernel(pte) BTFIXUP_CALL(pte_free_kernel)(pte)
-#define pte_alloc_kernel(pmd,addr) BTFIXUP_CALL(pte_alloc_kernel)(pmd,addr)
+BTFIXUPDEF_CALL(void, free_pgd_fast, pgd_t *)
+#define free_pgd_fast(pgd) BTFIXUP_CALL(free_pgd_fast)(pgd)
-BTFIXUPDEF_CALL(void, pmd_free_kernel, pmd_t *)
-BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_kernel, pgd_t *, unsigned long)
+#define pgd_free(pgd) free_pgd_fast(pgd)
+#define pgd_alloc(mm) get_pgd_fast()
-#define pmd_free_kernel(pmd) BTFIXUP_CALL(pmd_free_kernel)(pmd)
-#define pmd_alloc_kernel(pgd,addr) BTFIXUP_CALL(pmd_alloc_kernel)(pgd,addr)
+#define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
-BTFIXUPDEF_CALL(void, pte_free, pte_t *)
-BTFIXUPDEF_CALL(pte_t *, pte_alloc, pmd_t *, unsigned long)
+static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+ BUG();
+ return 0;
+}
+
+BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_one_fast, struct mm_struct *, unsigned long)
+#define pmd_alloc_one_fast(mm, address) BTFIXUP_CALL(pmd_alloc_one_fast)(mm, address)
+
+BTFIXUPDEF_CALL(void, free_pmd_fast, pmd_t *)
+#define free_pmd_fast(pmd) BTFIXUP_CALL(free_pmd_fast)(pmd)
+
+#define pmd_free(pmd) free_pmd_fast(pmd)
-#define pte_free(pte) BTFIXUP_CALL(pte_free)(pte)
-#define pte_alloc(pmd,addr) BTFIXUP_CALL(pte_alloc)(pmd,addr)
+#define pmd_populate(MM, PMD, PTE) pmd_set(PMD, PTE)
-BTFIXUPDEF_CALL(void, pmd_free, pmd_t *)
-BTFIXUPDEF_CALL(pmd_t *, pmd_alloc, pgd_t *, unsigned long)
+BTFIXUPDEF_CALL(pte_t *, pte_alloc_one, struct mm_struct *, unsigned long)
+#define pte_alloc_one(mm, address) BTFIXUP_CALL(pte_alloc_one)(mm, address)
-#define pmd_free(pmd) BTFIXUP_CALL(pmd_free)(pmd)
-#define pmd_alloc(pgd,addr) BTFIXUP_CALL(pmd_alloc)(pgd,addr)
+BTFIXUPDEF_CALL(pte_t *, pte_alloc_one_fast, struct mm_struct *, unsigned long)
+#define pte_alloc_one_fast(mm, address) BTFIXUP_CALL(pte_alloc_one_fast)(mm, address)
-BTFIXUPDEF_CALL(void, pgd_free, pgd_t *)
-BTFIXUPDEF_CALL(pgd_t *, pgd_alloc, void)
+BTFIXUPDEF_CALL(void, free_pte_fast, pte_t *)
+#define free_pte_fast(pte) BTFIXUP_CALL(free_pte_fast)(pte)
-#define pgd_free(pgd) BTFIXUP_CALL(pgd_free)(pgd)
-#define pgd_alloc(mm) BTFIXUP_CALL(pgd_alloc)()
+#define pte_free(pte) free_pte_fast(pte)
-#endif /* _SPARC64_PGALLOC_H */
+#endif /* _SPARC_PGALLOC_H */
-/* $Id: atomic.h,v 1.21 2000/10/03 07:28:56 anton Exp $
+/* $Id: atomic.h,v 1.22 2001/07/11 23:56:07 davem Exp $
* atomic.h: Thankfully the V9 is at least reasonable for this
* stuff.
*
/*
* Ok, this is an expanded form so that we can use the same
* request for paging requests when that is implemented. In
- * paging, 'bh' is NULL, and the semaphore is used to wait
- * for read/write completion.
+ * paging, 'bh' is NULL, and the completion is used to wait
+ * for the IO to be ready.
*/
struct request {
struct list_head queue;
unsigned long current_nr_sectors;
void * special;
char * buffer;
- struct semaphore * sem;
+ struct completion * waiting;
struct buffer_head * bh;
struct buffer_head * bhtail;
request_queue_t *q;
--- /dev/null
+#ifndef __CRAMFS_H
+#define __CRAMFS_H
+
+#ifndef __KERNEL__
+
+typedef unsigned char u8;
+typedef unsigned short u16;
+typedef unsigned int u32;
+
+#endif
+
+#define CRAMFS_MAGIC 0x28cd3d45 /* some random number */
+#define CRAMFS_SIGNATURE "Compressed ROMFS"
+
+/*
+ * Width of various bitfields in struct cramfs_inode.
+ * Primarily used to generate warnings in mkcramfs.
+ */
+#define CRAMFS_MODE_WIDTH 16
+#define CRAMFS_UID_WIDTH 16
+#define CRAMFS_SIZE_WIDTH 24
+#define CRAMFS_GID_WIDTH 8
+#define CRAMFS_NAMELEN_WIDTH 6
+#define CRAMFS_OFFSET_WIDTH 26
+
+/*
+ * Reasonably terse representation of the inode data.
+ */
+struct cramfs_inode {
+ u32 mode:CRAMFS_MODE_WIDTH, uid:CRAMFS_UID_WIDTH;
+ /* SIZE for device files is i_rdev */
+ u32 size:CRAMFS_SIZE_WIDTH, gid:CRAMFS_GID_WIDTH;
+ /* NAMELEN is the length of the file name, divided by 4 and
+ rounded up. (cramfs doesn't support hard links.) */
+ /* OFFSET: For symlinks and non-empty regular files, this
+ contains the offset (divided by 4) of the file data in
+ compressed form (starting with an array of block pointers;
+ see README). For non-empty directories it is the offset
+ (divided by 4) of the inode of the first file in that
+ directory. For anything else, offset is zero. */
+ u32 namelen:CRAMFS_NAMELEN_WIDTH, offset:CRAMFS_OFFSET_WIDTH;
+};
+
+struct cramfs_info {
+ u32 crc;
+ u32 edition;
+ u32 blocks;
+ u32 files;
+};
+
+/*
+ * Superblock information at the beginning of the FS.
+ */
+struct cramfs_super {
+ u32 magic; /* 0x28cd3d45 - random number */
+ u32 size; /* length in bytes */
+ u32 flags; /* 0 */
+ u32 future; /* 0 */
+ u8 signature[16]; /* "Compressed ROMFS" */
+ struct cramfs_info fsid; /* unique filesystem info */
+ u8 name[16]; /* user-defined name */
+ struct cramfs_inode root; /* Root inode data */
+};
+
+/*
+ * Feature flags
+ *
+ * 0x00000000 - 0x000000ff: features that work for all past kernels
+ * 0x00000100 - 0xffffffff: features that don't work for past kernels
+ */
+#define CRAMFS_FLAG_FSID_VERSION_2 0x00000001 /* fsid version #2 */
+#define CRAMFS_FLAG_SORTED_DIRS 0x00000002 /* sorted dirs */
+#define CRAMFS_FLAG_HOLES 0x00000100 /* support for holes */
+#define CRAMFS_FLAG_WRONG_SIGNATURE 0x00000200 /* reserved */
+#define CRAMFS_FLAG_SHIFTED_ROOT_OFFSET 0x00000400 /* shifted root fs */
+
+/*
+ * Valid values in super.flags. Currently we refuse to mount
+ * if (flags & ~CRAMFS_SUPPORTED_FLAGS). Maybe that should be
+ * changed to test super.future instead.
+ */
+#define CRAMFS_SUPPORTED_FLAGS (0x7ff)
+
+/* Uncompression interfaces to the underlying zlib */
+int cramfs_uncompress_block(void *dst, int dstlen, void *src, int srclen);
+int cramfs_uncompress_init(void);
+int cramfs_uncompress_exit(void);
+
+#endif
--- /dev/null
+#ifndef _CRAMFS_FS_SB
+#define _CRAMFS_FS_SB
+
+/*
+ * cramfs super-block data in memory
+ */
+struct cramfs_sb_info {
+ unsigned long magic;
+ unsigned long size;
+ unsigned long blocks;
+ unsigned long files;
+ unsigned long flags;
+};
+
+#endif
-/* $Id: ethtool.h,v 1.2 2000/11/12 10:05:57 davem Exp $
+/*
* ethtool.h: Defines for Linux ethtool.
*
* Copyright (C) 1998 David S. Miller (davem@redhat.com)
char bus_info[32]; /* Bus info for this interface. For PCI
* devices, use pci_dev->slot_name. */
char reserved1[32];
- char reserved2[32];
+ char reserved2[28];
+ u32 regdump_len; /* Amount of data from ETHTOOL_GREGS */
};
/* CMDs currently supported */
#define ETHTOOL_GSET 0x00000001 /* Get settings. */
#define ETHTOOL_SSET 0x00000002 /* Set settings, privileged. */
#define ETHTOOL_GDRVINFO 0x00000003 /* Get driver info. */
+#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers, privileged. */
/* compatibility with older code */
#define SPARC_ETH_GSET ETHTOOL_GSET
#include <linux/udf_fs_sb.h>
#include <linux/ncp_fs_sb.h>
#include <linux/usbdev_fs_sb.h>
+#include <linux/cramfs_fs_sb.h>
extern struct list_head super_blocks;
struct udf_sb_info udf_sb;
struct ncp_sb_info ncpfs_sb;
struct usbdev_sb_info usbdevfs_sb;
+ struct cramfs_sb_info cramfs_sb;
void *generic_sbp;
} u;
/*
#define PTT_GEN_ERR __constant_htons(0x0203)
struct pppoe_hdr {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
__u8 ver : 4;
__u8 type : 4;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ __u8 type : 4;
+ __u8 ver : 4;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
__u8 code;
__u16 sid;
__u16 length;
#endif
-#define MAX_ADDR_LEN 7 /* Largest hardware address length */
+#define MAX_ADDR_LEN 8 /* Largest hardware address length */
/*
* Compute the worst case header length according to the protocols
#define PR_SET_PDEATHSIG 1 /* Second arg is a signal */
#define PR_GET_PDEATHSIG 2 /* Second arg is a ptr to return the signal */
-/* Get/set current->dumpable */
+/* Get/set current->mm->dumpable */
#define PR_GET_DUMPABLE 3
#define PR_SET_DUMPABLE 4
unsigned long cpu_vm_mask;
unsigned long swap_address;
+ unsigned dumpable:1;
+
/* Architecture-specific MM context */
mm_context_t context;
};
int pdeath_signal; /* The signal sent when the parent dies */
/* ??? */
unsigned long personality;
- int dumpable:1;
int did_exec:1;
pid_t pid;
pid_t pgrp;
* Alan Cox : Support for TCP parameters.
* Alexey Kuznetsov: Major changes for new routing code.
* Mike McLagan : Routing by source
+ * Robert Olsson : Added rt_cache statistics
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
__u32 i_packets;
};
+struct rt_cache_stat
+{
+ unsigned int in_hit;
+ unsigned int in_slow_tot;
+ unsigned int in_slow_mc;
+ unsigned int in_no_route;
+ unsigned int in_brd;
+ unsigned int in_martian_dst;
+ unsigned int in_martian_src;
+ unsigned int out_hit;
+ unsigned int out_slow_tot;
+ unsigned int out_slow_mc;
+};
+
extern struct ip_rt_acct *ip_rt_acct;
struct in_device;
}
if (new_egid != old_egid)
{
- current->dumpable = 0;
+ current->mm->dumpable = 0;
wmb();
}
if (rgid != (gid_t) -1 ||
{
if(old_egid != gid)
{
- current->dumpable=0;
+ current->mm->dumpable=0;
wmb();
}
current->gid = current->egid = current->sgid = current->fsgid = gid;
{
if(old_egid != gid)
{
- current->dumpable=0;
+ current->mm->dumpable=0;
wmb();
}
current->egid = current->fsgid = gid;
if(dumpclear)
{
- current->dumpable = 0;
+ current->mm->dumpable = 0;
wmb();
}
current->uid = new_ruid;
if (new_euid != old_euid)
{
- current->dumpable=0;
+ current->mm->dumpable=0;
wmb();
}
current->fsuid = current->euid = new_euid;
if (old_euid != uid)
{
- current->dumpable = 0;
+ current->mm->dumpable = 0;
wmb();
}
current->fsuid = current->euid = uid;
if (euid != (uid_t) -1) {
if (euid != current->euid)
{
- current->dumpable = 0;
+ current->mm->dumpable = 0;
wmb();
}
current->euid = euid;
if (egid != (gid_t) -1) {
if (egid != current->egid)
{
- current->dumpable = 0;
+ current->mm->dumpable = 0;
wmb();
}
current->egid = egid;
{
if (uid != old_fsuid)
{
- current->dumpable = 0;
+ current->mm->dumpable = 0;
wmb();
}
current->fsuid = uid;
{
if (gid != old_fsgid)
{
- current->dumpable = 0;
+ current->mm->dumpable = 0;
wmb();
}
current->fsgid = gid;
error = put_user(current->pdeath_signal, (int *)arg2);
break;
case PR_GET_DUMPABLE:
- if (current->dumpable)
+ if (current->mm->dumpable)
error = 1;
break;
case PR_SET_DUMPABLE:
error = -EINVAL;
break;
}
- current->dumpable = arg2;
+ current->mm->dumpable = arg2;
break;
case PR_SET_UNALIGN:
#ifdef SET_UNALIGN_CTL
*
* ROUTE - implementation of the IP router.
*
- * Version: $Id: route.c,v 1.94 2001/05/05 01:01:02 davem Exp $
+ * Version: $Id: route.c,v 1.95 2001/07/10 22:32:51 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
* Vladimir V. Ivanov : IP rule info (flowid) is really useful.
* Marc Boucher : routing by fwmark
+ * Robert Olsson : Added rt_cache statistics
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
static unsigned rt_hash_mask;
static int rt_hash_log;
+struct rt_cache_stat rt_cache_stat[NR_CPUS];
+
static int rt_intern_hash(unsigned hash, struct rtable *rth,
struct rtable **res);
len = length;
return len;
}
+
+static int rt_cache_stat_get_info(char *buffer, char **start, off_t offset, int length)
+{
+ unsigned int dst_entries = atomic_read(&ipv4_dst_ops.entries);
+ int i, lcpu;
+ int len = 0;
+
+ for (lcpu = 0; lcpu < smp_num_cpus; lcpu++) {
+ i = cpu_logical_map(lcpu);
+
+ len += sprintf(buffer+len, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ dst_entries,
+ rt_cache_stat[i].in_hit,
+ rt_cache_stat[i].in_slow_tot,
+ rt_cache_stat[i].in_slow_mc,
+ rt_cache_stat[i].in_no_route,
+ rt_cache_stat[i].in_brd,
+ rt_cache_stat[i].in_martian_dst,
+ rt_cache_stat[i].in_martian_src,
+
+ rt_cache_stat[i].out_hit,
+ rt_cache_stat[i].out_slow_tot,
+ rt_cache_stat[i].out_slow_mc
+ );
+ }
+ len -= offset;
+
+ if (len > length)
+ len = length;
+ if (len < 0)
+ len = 0;
+
+ *start = buffer + offset;
+ return len;
+}
static __inline__ void rt_free(struct rtable *rt)
{
if (!LOCAL_MCAST(daddr) && IN_DEV_MFORWARD(in_dev))
rth->u.dst.input = ip_mr_input;
#endif
+ rt_cache_stat[smp_processor_id()].in_slow_mc++;
in_dev_put(in_dev);
hash = rt_hash_code(daddr, saddr ^ (dev->ifindex << 5), tos);
}
free_res = 1;
+ rt_cache_stat[smp_processor_id()].in_slow_tot++;
+
#ifdef CONFIG_IP_ROUTE_NAT
/* Policy is applied before mapping destination,
but rerouting after map should be made with old source.
}
flags |= RTCF_BROADCAST;
res.type = RTN_BROADCAST;
+ rt_cache_stat[smp_processor_id()].in_brd++;
local_input:
rth = dst_alloc(&ipv4_dst_ops);
goto intern;
no_route:
+ rt_cache_stat[smp_processor_id()].in_no_route++;
spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
res.type = RTN_UNREACHABLE;
goto local_input;
* Do not cache martian addresses: they should be logged (RFC1812)
*/
martian_destination:
+ rt_cache_stat[smp_processor_id()].in_martian_dst++;
#ifdef CONFIG_IP_ROUTE_VERBOSE
if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
printk(KERN_WARNING "martian destination %u.%u.%u.%u from "
goto done;
martian_source:
+
+ rt_cache_stat[smp_processor_id()].in_martian_src++;
#ifdef CONFIG_IP_ROUTE_VERBOSE
if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
/*
rth->u.dst.lastuse = jiffies;
dst_hold(&rth->u.dst);
rth->u.dst.__use++;
+ rt_cache_stat[smp_processor_id()].in_hit++;
read_unlock(&rt_hash_table[hash].lock);
skb->dst = (struct dst_entry*)rth;
return 0;
rth->u.dst.output=ip_output;
+ rt_cache_stat[smp_processor_id()].out_slow_tot++;
+
if (flags & RTCF_LOCAL) {
rth->u.dst.input = ip_local_deliver;
rth->rt_spec_dst = key.dst;
}
if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
rth->rt_spec_dst = key.src;
- if (flags & RTCF_LOCAL && !(dev_out->flags & IFF_LOOPBACK))
+ if (flags & RTCF_LOCAL && !(dev_out->flags & IFF_LOOPBACK)) {
rth->u.dst.output = ip_mc_output;
+ rt_cache_stat[smp_processor_id()].out_slow_mc++;
+ }
#ifdef CONFIG_IP_MROUTE
if (res.type == RTN_MULTICAST) {
struct in_device *in_dev = in_dev_get(dev_out);
rth->u.dst.lastuse = jiffies;
dst_hold(&rth->u.dst);
rth->u.dst.__use++;
+ rt_cache_stat[smp_processor_id()].out_hit++;
read_unlock_bh(&rt_hash_table[hash].lock);
*rp = rth;
return 0;
add_timer(&rt_periodic_timer);
proc_net_create ("rt_cache", 0, rt_cache_get_info);
+ proc_net_create ("rt_cache_stat", 0, rt_cache_stat_get_info);
#ifdef CONFIG_NET_CLS_ROUTE
create_proc_read_entry("net/rt_acct", 0, 0, ip_rt_acct_read, NULL);
#endif
/*
* Check protocol is in range
*/
- if(family<0 || family>=NPROTO)
+ if (family < 0 || family >= NPROTO)
return -EAFNOSUPPORT;
+ if (type < 0 || type >= SOCK_MAX)
+ return -EINVAL;
/* Compatibility.
-CFLAGS = -Wall -O2
-CPPFLAGS = -I../../fs/cramfs
+CC = gcc
+CFLAGS = -W -Wall -O2 -g
+CPPFLAGS = -I../../include
LDLIBS = -lz
-PROGS = mkcramfs
+PROGS = mkcramfs cramfsck
all: $(PROGS)
--- /dev/null
+/*
+ * cramfsck - check a cramfs file system
+ *
+ * Copyright (C) 2000-2001 Transmeta Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * 1999/12/03: Linus Torvalds (cramfs tester and unarchive program)
+ * 2000/06/03: Daniel Quinlan (CRC and length checking program)
+ * 2000/06/04: Daniel Quinlan (merged programs, added options, support
+ * for special files, preserve permissions and
+ * ownership, cramfs superblock v2, bogus mode
+ * test, pathname length test, etc.)
+ * 2000/06/06: Daniel Quinlan (support for holes, pretty-printing,
+ * symlink size test)
+ * 2000/07/11: Daniel Quinlan (file length tests, start at offset 0 or 512,
+ * fsck-compatible exit codes)
+ * 2000/07/15: Daniel Quinlan (initial support for block devices)
+ */
+
+/* compile-time options */
+#define INCLUDE_FS_TESTS /* include cramfs checking and extraction */
+
+#include <sys/types.h>
+#include <stdio.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <sys/fcntl.h>
+#include <dirent.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <string.h>
+#include <assert.h>
+#include <getopt.h>
+#include <sys/sysmacros.h>
+#include <utime.h>
+#include <sys/ioctl.h>
+#define _LINUX_STRING_H_
+#include <linux/fs.h>
+#include <linux/cramfs_fs.h>
+#include <zlib.h>
+
+static const char *progname = "cramfsck";
+
+static int fd; /* ROM image file descriptor */
+static char *filename; /* ROM image filename */
+struct cramfs_super *super; /* just find the cramfs superblock once */
+static int opt_verbose = 0; /* 1 = verbose (-v), 2+ = very verbose (-vv) */
+#ifdef INCLUDE_FS_TESTS
+static int opt_extract = 0; /* extract cramfs (-x) */
+char *extract_dir = NULL; /* extraction directory (-x) */
+
+unsigned long start_inode = 1 << 28; /* start of first non-root inode */
+unsigned long end_inode = 0; /* end of the directory structure */
+unsigned long start_data = 1 << 28; /* start of the data (256 MB = max) */
+unsigned long end_data = 0; /* end of the data */
+/* true? cramfs_super < start_inode < end_inode <= start_data <= end_data */
+static uid_t euid; /* effective UID */
+
+#define PAD_SIZE 512
+#define PAGE_CACHE_SIZE (4096)
+
+/* Guarantee access to at least 8kB at a time */
+#define ROMBUFFER_BITS 13
+#define ROMBUFFERSIZE (1 << ROMBUFFER_BITS)
+#define ROMBUFFERMASK (ROMBUFFERSIZE-1)
+static char read_buffer[ROMBUFFERSIZE * 2];
+static unsigned long read_buffer_block = ~0UL;
+
+/* Uncompressing data structures... */
+static char outbuffer[PAGE_CACHE_SIZE*2];
+z_stream stream;
+
+#endif /* INCLUDE_FS_TESTS */
+
+/* Input status of 0 to print help and exit without an error. */
+static void usage(int status)
+{
+ FILE *stream = status ? stderr : stdout;
+
+ fprintf(stream, "usage: %s [-hv] [-x dir] file\n"
+ " -h print this help\n"
+ " -x dir extract into dir\n"
+ " -v be more verbose\n"
+ " file file to test\n", progname);
+
+ exit(status);
+}
+
+#ifdef INCLUDE_FS_TESTS
+void print_node(char type, struct cramfs_inode *i, char *name)
+{
+ char info[10];
+
+ if (S_ISCHR(i->mode) || (S_ISBLK(i->mode))) {
+ /* major/minor numbers can be as high as 2^12 or 4096 */
+ snprintf(info, 10, "%4d,%4d", major(i->size), minor(i->size));
+ }
+ else {
+ /* size be as high as 2^24 or 16777216 */
+ snprintf(info, 10, "%9d", i->size);
+ }
+
+ printf("%c %04o %s %5d:%-3d %s\n",
+ type, i->mode & ~S_IFMT, info, i->uid, i->gid, name);
+}
+
+/*
+ * Create a fake "blocked" access
+ */
+static void *romfs_read(unsigned long offset)
+{
+ unsigned int block = offset >> ROMBUFFER_BITS;
+ if (block != read_buffer_block) {
+ read_buffer_block = block;
+ lseek(fd, block << ROMBUFFER_BITS, SEEK_SET);
+ read(fd, read_buffer, ROMBUFFERSIZE * 2);
+ }
+ return read_buffer + (offset & ROMBUFFERMASK);
+}
+
+static struct cramfs_inode *cramfs_iget(struct cramfs_inode * i)
+{
+ struct cramfs_inode *inode = malloc(sizeof(struct cramfs_inode));
+ *inode = *i;
+ return inode;
+}
+
+static struct cramfs_inode *iget(unsigned int ino)
+{
+ return cramfs_iget(romfs_read(ino));
+}
+
+void iput(struct cramfs_inode *inode)
+{
+ free(inode);
+}
+
+/*
+ * Return the offset of the root directory,
+ * or 0 if none.
+ */
+static struct cramfs_inode *read_super(void)
+{
+ unsigned long offset;
+
+ offset = super->root.offset << 2;
+ if (super->magic != CRAMFS_MAGIC)
+ return NULL;
+ if (memcmp(super->signature, CRAMFS_SIGNATURE, sizeof(super->signature)) != 0)
+ return NULL;
+ if (offset < sizeof(super))
+ return NULL;
+ return cramfs_iget(&super->root);
+}
+
+static int uncompress_block(void *src, int len)
+{
+ int err;
+
+ stream.next_in = src;
+ stream.avail_in = len;
+
+ stream.next_out = (unsigned char *) outbuffer;
+ stream.avail_out = PAGE_CACHE_SIZE*2;
+
+ inflateReset(&stream);
+
+ err = inflate(&stream, Z_FINISH);
+ if (err != Z_STREAM_END) {
+ fprintf(stderr, "%s: error %d while decompressing! %p(%d)\n",
+ filename, err, src, len);
+ exit(4);
+ }
+ return stream.total_out;
+}
+
+static void change_file_status(char *path, struct cramfs_inode *i)
+{
+ struct utimbuf epoch = { 0, 0 };
+
+ if (euid == 0) {
+ if (lchown(path, i->uid, i->gid) < 0) {
+ perror(path);
+ exit(8);
+ }
+ if (S_ISLNK(i->mode))
+ return;
+ if ((S_ISUID | S_ISGID) & i->mode) {
+ if (chmod(path, i->mode) < 0) {
+ perror(path);
+ exit(8);
+ }
+ }
+ }
+ if (S_ISLNK(i->mode))
+ return;
+ if (utime(path, &epoch) < 0) {
+ perror(path);
+ exit(8);
+ }
+}
+
+static void do_symlink(char *path, struct cramfs_inode *i)
+{
+ unsigned long offset = i->offset << 2;
+ unsigned long curr = offset + 4;
+ unsigned long next = *(u32 *) romfs_read(offset);
+ unsigned long size;
+
+ if (next > end_data) {
+ end_data = next;
+ }
+
+ size = uncompress_block(romfs_read(curr), next - curr);
+ if (size != i->size) {
+ fprintf(stderr, "%s: size error in symlink `%s'\n",
+ filename, path);
+ exit(4);
+ }
+ outbuffer[size] = 0;
+ if (opt_verbose) {
+ char *str;
+
+ str = malloc(strlen(outbuffer) + strlen(path) + 5);
+ strcpy(str, path);
+ strncat(str, " -> ", 4);
+ strncat(str, outbuffer, size);
+
+ print_node('l', i, str);
+ if (opt_verbose > 1) {
+ printf(" uncompressing block at %ld to %ld (%ld)\n", curr, next, next - curr);
+ }
+ }
+ if (opt_extract) {
+ symlink(outbuffer, path);
+ change_file_status(path, i);
+ }
+}
+
+static void do_special_inode(char *path, struct cramfs_inode *i)
+{
+ dev_t devtype = 0;
+ char type;
+
+ if (S_ISCHR(i->mode)) {
+ devtype = i->size;
+ type = 'c';
+ }
+ else if (S_ISBLK(i->mode)) {
+ devtype = i->size;
+ type = 'b';
+ }
+ else if (S_ISFIFO(i->mode))
+ type = 'p';
+ else if (S_ISSOCK(i->mode))
+ type = 's';
+ else {
+ fprintf(stderr, "%s: bogus mode on `%s' (%o)\n", filename, path, i->mode);
+ exit(4);
+ }
+
+ if (opt_verbose) {
+ print_node(type, i, path);
+ }
+
+ if (opt_extract) {
+ if (mknod(path, i->mode, devtype) < 0) {
+ perror(path);
+ exit(8);
+ }
+ change_file_status(path, i);
+ }
+}
+
+static void do_uncompress(int fd, unsigned long offset, unsigned long size)
+{
+ unsigned long curr = offset + 4 * ((size + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE);
+
+ do {
+ unsigned long out = PAGE_CACHE_SIZE;
+ unsigned long next = *(u32 *) romfs_read(offset);
+
+ if (next > end_data) {
+ end_data = next;
+ }
+
+ offset += 4;
+ if (curr == next) {
+ if (opt_verbose > 1) {
+ printf(" hole at %ld (%d)\n", curr, PAGE_CACHE_SIZE);
+ }
+ if (size < PAGE_CACHE_SIZE)
+ out = size;
+ memset(outbuffer, 0x00, out);
+ }
+ else {
+ if (opt_verbose > 1) {
+ printf(" uncompressing block at %ld to %ld (%ld)\n", curr, next, next - curr);
+ }
+ out = uncompress_block(romfs_read(curr), next - curr);
+ }
+ if (size >= PAGE_CACHE_SIZE) {
+ if (out != PAGE_CACHE_SIZE) {
+ fprintf(stderr, "%s: Non-block (%ld) bytes\n", filename, out);
+ exit(4);
+ }
+ } else {
+ if (out != size) {
+ fprintf(stderr, "%s: Non-size (%ld vs %ld) bytes\n", filename, out, size);
+ exit(4);
+ }
+ }
+ size -= out;
+ if (opt_extract) {
+ write(fd, outbuffer, out);
+ }
+ curr = next;
+ } while (size);
+}
+
+static void expand_fs(int pathlen, char *path, struct cramfs_inode *inode)
+{
+ if (S_ISDIR(inode->mode)) {
+ int count = inode->size;
+ unsigned long offset = inode->offset << 2;
+ char *newpath = malloc(pathlen + 256);
+
+ if (count > 0 && offset < start_inode) {
+ start_inode = offset;
+ }
+ /* XXX - need to check end_inode for empty case? */
+ memcpy(newpath, path, pathlen);
+ newpath[pathlen] = '/';
+ pathlen++;
+ if (opt_verbose) {
+ print_node('d', inode, path);
+ }
+ if (opt_extract) {
+ mkdir(path, inode->mode);
+ change_file_status(path, inode);
+ }
+ while (count > 0) {
+ struct cramfs_inode *child = iget(offset);
+ int size;
+ int newlen = child->namelen << 2;
+
+ size = sizeof(struct cramfs_inode) + newlen;
+ count -= size;
+
+ offset += sizeof(struct cramfs_inode);
+
+ memcpy(newpath + pathlen, romfs_read(offset), newlen);
+ newpath[pathlen + newlen] = 0;
+ if ((pathlen + newlen) - strlen(newpath) > 3) {
+ fprintf(stderr, "%s: invalid cramfs--bad path length\n", filename);
+ exit(4);
+ }
+ expand_fs(strlen(newpath), newpath, child);
+
+ offset += newlen;
+
+ if (offset > end_inode) {
+ end_inode = offset;
+ }
+ }
+ return;
+ }
+ if (S_ISREG(inode->mode)) {
+ int fd = 0;
+ unsigned long offset = inode->offset << 2;
+
+ if (offset > 0 && offset < start_data) {
+ start_data = offset;
+ }
+ if (opt_verbose) {
+ print_node('f', inode, path);
+ }
+ if (opt_extract) {
+ fd = open(path, O_WRONLY | O_CREAT | O_TRUNC, inode->mode);
+ }
+ if (inode->size) {
+ do_uncompress(fd, offset, inode->size);
+ }
+ if (opt_extract) {
+ close(fd);
+ change_file_status(path, inode);
+ }
+ return;
+ }
+ if (S_ISLNK(inode->mode)) {
+ unsigned long offset = inode->offset << 2;
+
+ if (offset < start_data) {
+ start_data = offset;
+ }
+ do_symlink(path, inode);
+ return;
+ }
+ else {
+ do_special_inode(path, inode);
+ return;
+ }
+}
+#endif /* INCLUDE_FS_TESTS */
+
+int main(int argc, char **argv)
+{
+ void *buf;
+ size_t length;
+ struct stat st;
+ u32 crc_old, crc_new;
+#ifdef INCLUDE_FS_TESTS
+ struct cramfs_inode *root;
+#endif /* INCLUDE_FS_TESTS */
+ int c; /* for getopt */
+ int start = 0;
+
+ if (argc)
+ progname = argv[0];
+
+ /* command line options */
+ while ((c = getopt(argc, argv, "hx:v")) != EOF) {
+ switch (c) {
+ case 'h':
+ usage(0);
+ case 'x':
+#ifdef INCLUDE_FS_TESTS
+ opt_extract = 1;
+ extract_dir = malloc(strlen(optarg) + 1);
+ strcpy(extract_dir, optarg);
+ break;
+#else /* not INCLUDE_FS_TESTS */
+ fprintf(stderr, "%s: compiled without -x support\n",
+ progname);
+ exit(16);
+#endif /* not INCLUDE_FS_TESTS */
+ case 'v':
+ opt_verbose++;
+ break;
+ }
+ }
+
+ if ((argc - optind) != 1)
+ usage(16);
+ filename = argv[optind];
+
+ /* find the physical size of the file or block device */
+ if (lstat(filename, &st) < 0) {
+ perror(filename);
+ exit(8);
+ }
+ fd = open(filename, O_RDONLY);
+ if (fd < 0) {
+ perror(filename);
+ exit(8);
+ }
+ if (S_ISBLK(st.st_mode)) {
+ if (ioctl(fd, BLKGETSIZE, &length) < 0) {
+ fprintf(stderr, "%s: warning--unable to determine filesystem size \n", filename);
+ exit(4);
+ }
+ length = length * 512;
+ }
+ else if (S_ISREG(st.st_mode)) {
+ length = st.st_size;
+ }
+ else {
+ fprintf(stderr, "%s is not a block device or file\n", filename);
+ exit(8);
+ }
+
+ if (length < sizeof(struct cramfs_super)) {
+ fprintf(stderr, "%s: invalid cramfs--file length too short\n", filename);
+ exit(4);
+ }
+
+ if (S_ISBLK(st.st_mode)) {
+ /* nasty because mmap of block devices fails */
+ buf = mmap(NULL, length, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ read(fd, buf, length);
+ }
+ else {
+ /* nice and easy */
+ buf = mmap(NULL, length, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
+ }
+
+ /* XXX - this could be cleaner... */
+ if (((struct cramfs_super *) buf)->magic == CRAMFS_MAGIC) {
+ start = 0;
+ super = (struct cramfs_super *) buf;
+ }
+ else if (length >= (PAD_SIZE + sizeof(struct cramfs_super)) &&
+ ((((struct cramfs_super *) (buf + PAD_SIZE))->magic == CRAMFS_MAGIC)))
+ {
+ start = PAD_SIZE;
+ super = (struct cramfs_super *) (buf + PAD_SIZE);
+ }
+ else {
+ fprintf(stderr, "%s: invalid cramfs--wrong magic\n", filename);
+ exit(4);
+ }
+
+ if (super->flags & CRAMFS_FLAG_FSID_VERSION_2) {
+ /* length test */
+ if (length < super->size) {
+ fprintf(stderr, "%s: invalid cramfs--file length too short\n", filename);
+ exit(4);
+ }
+ else if (length > super->size) {
+ fprintf(stderr, "%s: warning--file length too long, padded image?\n", filename);
+ }
+
+ /* CRC test */
+ crc_old = super->fsid.crc;
+ super->fsid.crc = crc32(0L, Z_NULL, 0);
+ crc_new = crc32(0L, Z_NULL, 0);
+ crc_new = crc32(crc_new, (unsigned char *) buf+start, super->size - start);
+ if (crc_new != crc_old) {
+ fprintf(stderr, "%s: invalid cramfs--crc error\n", filename);
+ exit(4);
+ }
+ }
+ else {
+ fprintf(stderr, "%s: warning--old cramfs image, no CRC\n",
+ filename);
+ }
+
+#ifdef INCLUDE_FS_TESTS
+ super = (struct cramfs_super *) malloc(sizeof(struct cramfs_super));
+ if (((struct cramfs_super *) buf)->magic == CRAMFS_MAGIC) {
+ memcpy(super, buf, sizeof(struct cramfs_super));
+ }
+ else if (length >= (PAD_SIZE + sizeof(struct cramfs_super)) &&
+ ((((struct cramfs_super *) (buf + PAD_SIZE))->magic == CRAMFS_MAGIC)))
+ {
+ memcpy(super, (buf + PAD_SIZE), sizeof(struct cramfs_super));
+ }
+
+ munmap(buf, length);
+
+ /* file format test, uses fake "blocked" accesses */
+ root = read_super();
+ umask(0);
+ euid = geteuid();
+ if (!root) {
+ fprintf(stderr, "%s: invalid cramfs--bad superblock\n",
+ filename);
+ exit(4);
+ }
+ stream.next_in = NULL;
+ stream.avail_in = 0;
+ inflateInit(&stream);
+
+ if (!extract_dir) {
+ extract_dir = "root";
+ }
+
+ expand_fs(strlen(extract_dir), extract_dir, root);
+ inflateEnd(&stream);
+
+ if (start_data != 1 << 28 && end_inode != start_data) {
+ fprintf(stderr, "%s: invalid cramfs--directory data end (%ld) != file data start (%ld)\n", filename, end_inode, start_data);
+ exit(4);
+ }
+ if (super->flags & CRAMFS_FLAG_FSID_VERSION_2) {
+ if (end_data > super->size) {
+ fprintf(stderr, "%s: invalid cramfs--invalid file data offset\n", filename);
+ exit(4);
+ }
+ }
+#endif /* INCLUDE_FS_TESTS */
+
+ exit(0);
+}
+/*
+ * mkcramfs - make a cramfs file system
+ *
+ * Copyright (C) 1999-2001 Transmeta Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
#include <sys/types.h>
#include <stdio.h>
#include <sys/stat.h>
#include <errno.h>
#include <string.h>
#include <assert.h>
-
-/* zlib required.. */
+#include <getopt.h>
+#include <linux/cramfs_fs.h>
#include <zlib.h>
-typedef unsigned char u8;
-typedef unsigned short u16;
-typedef unsigned int u32;
-
-#include "cramfs.h"
+#define PAD_SIZE 512 /* only 0 and 512 supported by kernel */
static const char *progname = "mkcramfs";
/* N.B. If you change the disk format of cramfs, please update fs/cramfs/README. */
-static void usage(void)
+/* Input status of 0 to print help and exit without an error. */
+static void usage(int status)
{
- fprintf(stderr, "Usage: '%s dirname outfile'\n"
- " where <dirname> is the root of the\n"
- " filesystem to be compressed.\n", progname);
- exit(1);
+ FILE *stream = status ? stderr : stdout;
+
+ fprintf(stream, "usage: %s [-h] [-e edition] [-i file] [-n name] dirname outfile\n"
+ " -h print this help\n"
+ " -E make all warnings errors (non-zero exit status)\n"
+ " -e edition set edition number (part of fsid)\n"
+ " -i file insert a file image into the filesystem (requires >= 2.4.0)\n"
+ " -n name set name of cramfs filesystem\n"
+ " -p pad by %d bytes for boot code\n"
+ " -s sort directory entries (old option, ignored)\n"
+ " -z make explicit holes (requires >= 2.3.39)\n"
+ " dirname root of the filesystem to be compressed\n"
+ " outfile output file\n", progname, PAD_SIZE);
+
+ exit(status);
}
-/*
- * If DO_HOLES is defined, then mkcramfs can create explicit holes in the
- * data, which saves 26 bytes per hole (which is a lot smaller a saving than
- * most filesystems).
- *
- * Note that kernels up to at least 2.3.39 don't support cramfs holes, which
- * is why this defaults to undefined at the moment.
- */
-/* #define DO_HOLES 1 */
-
#define PAGE_CACHE_SIZE (4096)
/* The kernel assumes PAGE_CACHE_SIZE as block size. */
static unsigned int blksize = PAGE_CACHE_SIZE;
+static long total_blocks = 0, total_nodes = 1; /* pre-count the root node */
+static int image_length = 0;
+
+/*
+ * If opt_holes is set, then mkcramfs can create explicit holes in the
+ * data, which saves 26 bytes per hole (which is a lot smaller a
+ * saving than most most filesystems).
+ *
+ * Note that kernels up to at least 2.3.39 don't support cramfs holes,
+ * which is why this is turned off by default.
+ */
+static int opt_edition = 0;
+static int opt_errors = 0;
+static int opt_holes = 0;
+static int opt_pad = 0;
+static char *opt_image = NULL;
+static char *opt_name = NULL;
-static int warn_dev, warn_gid, warn_namelen, warn_size, warn_uid;
+static int warn_dev, warn_gid, warn_namelen, warn_skip, warn_size, warn_uid;
#ifndef MIN
# define MIN(_a,_b) ((_a) < (_b) ? (_a) : (_b))
struct entry *next;
};
-/*
- * Width of various bitfields in struct cramfs_inode.
- * Used only to generate warnings.
- */
-#define SIZE_WIDTH 24
-#define UID_WIDTH 16
-#define GID_WIDTH 8
-#define OFFSET_WIDTH 26
-
/*
* The longest file name component to allow for in the input directory tree.
* Ext2fs (and many others) allow up to 255 bytes. A couple of filesystems
static void eliminate_doubles(struct entry *root,struct entry *orig) {
if(orig) {
- if(orig->size && orig->uncompressed)
+ if(orig->size && orig->uncompressed)
find_identical_file(root,orig);
eliminate_doubles(root,orig->child);
eliminate_doubles(root,orig->next);
}
}
+/*
+ * We define our own sorting function instead of using alphasort which
+ * uses strcoll and changes ordering based on locale information.
+ */
+static int cramsort (const void *a, const void *b)
+{
+ return strcmp ((*(const struct dirent **) a)->d_name,
+ (*(const struct dirent **) b)->d_name);
+}
+
static unsigned int parse_directory(struct entry *root_entry, const char *name, struct entry **prev, loff_t *fslen_ub)
{
- DIR *dir;
- int count = 0, totalsize = 0;
- struct dirent *dirent;
+ struct dirent **dirlist;
+ int totalsize = 0, dircount, dirindex;
char *path, *endpath;
size_t len = strlen(name);
- dir = opendir(name);
- if (!dir) {
- perror(name);
- exit(2);
- }
-
/* Set up the path. */
/* TODO: Reuse the parent's buffer to save memcpy'ing and duplication. */
path = malloc(len + 1 + MAX_INPUT_NAMELEN + 1);
if (!path) {
perror(NULL);
- exit(1);
+ exit(8);
}
memcpy(path, name, len);
endpath = path + len;
*endpath = '/';
endpath++;
- while ((dirent = readdir(dir)) != NULL) {
+ /* read in the directory and sort */
+ dircount = scandir(name, &dirlist, 0, cramsort);
+
+ if (dircount < 0) {
+ perror(name);
+ exit(8);
+ }
+
+ /* process directory */
+ for (dirindex = 0; dirindex < dircount; dirindex++) {
+ struct dirent *dirent;
struct entry *entry;
struct stat st;
int size;
size_t namelen;
+ dirent = dirlist[dirindex];
+
/* Ignore "." and ".." - we won't be adding them to the archive */
if (dirent->d_name[0] == '.') {
if (dirent->d_name[1] == '\0')
"Very long (%u bytes) filename `%s' found.\n"
" Please increase MAX_INPUT_NAMELEN in mkcramfs.c and recompile. Exiting.\n",
namelen, dirent->d_name);
- exit(1);
+ exit(8);
}
memcpy(endpath, dirent->d_name, namelen + 1);
if (lstat(path, &st) < 0) {
perror(endpath);
+ warn_skip = 1;
continue;
}
entry = calloc(1, sizeof(struct entry));
if (!entry) {
perror(NULL);
- exit(5);
+ exit(8);
}
entry->name = strdup(dirent->d_name);
if (!entry->name) {
perror(NULL);
- exit(1);
+ exit(8);
}
if (namelen > 255) {
/* Can't happen when reading from ext2fs. */
entry->mode = st.st_mode;
entry->size = st.st_size;
entry->uid = st.st_uid;
- if (entry->uid >= 1 << UID_WIDTH)
+ if (entry->uid >= 1 << CRAMFS_UID_WIDTH)
warn_uid = 1;
entry->gid = st.st_gid;
- if (entry->gid >= 1 << GID_WIDTH)
+ if (entry->gid >= 1 << CRAMFS_GID_WIDTH)
/* TODO: We ought to replace with a default
gid instead of truncating; otherwise there
are security problems. Maybe mode should
int fd = open(path, O_RDONLY);
if (fd < 0) {
perror(path);
+ warn_skip = 1;
continue;
}
if (entry->size) {
- if ((entry->size >= 1 << SIZE_WIDTH)) {
+ if ((entry->size >= 1 << CRAMFS_SIZE_WIDTH)) {
warn_size = 1;
- entry->size = (1 << SIZE_WIDTH) - 1;
+ entry->size = (1 << CRAMFS_SIZE_WIDTH) - 1;
}
entry->uncompressed = mmap(NULL, entry->size, PROT_READ, MAP_PRIVATE, fd, 0);
if (-1 == (int) (long) entry->uncompressed) {
perror("mmap");
- exit(5);
+ exit(8);
}
}
close(fd);
entry->uncompressed = malloc(entry->size);
if (!entry->uncompressed) {
perror(NULL);
- exit(5);
+ exit(8);
}
if (readlink(path, entry->uncompressed, entry->size) < 0) {
perror(path);
+ warn_skip = 1;
continue;
}
+ } else if (S_ISFIFO(st.st_mode) || S_ISSOCK(st.st_mode)) {
+ /* maybe we should skip sockets */
+ entry->size = 0;
} else {
entry->size = st.st_rdev;
- if (entry->size & -(1<<SIZE_WIDTH))
+ if (entry->size & -(1<<CRAMFS_SIZE_WIDTH))
warn_dev = 1;
}
if (S_ISREG(st.st_mode) || S_ISLNK(st.st_mode)) {
+ int blocks = ((entry->size - 1) / blksize + 1);
+
/* block pointers & data expansion allowance + data */
- if(entry->size)
- *fslen_ub += ((4+26)*((entry->size - 1) / blksize + 1)
- + MIN(entry->size + 3, st.st_blocks << 9));
- else
- *fslen_ub += MIN(entry->size + 3, st.st_blocks << 9);
+ if(entry->size)
+ *fslen_ub += (4+26)*blocks + entry->size + 3;
}
/* Link it into the list */
*prev = entry;
prev = &entry->next;
- count++;
totalsize += size;
}
- closedir(dir);
free(path);
+ free(dirlist); /* allocated by scandir() with malloc() */
return totalsize;
}
-static void set_random(void *area, size_t size)
-{
- int fd = open("/dev/random", O_RDONLY);
-
- if (fd >= 0) {
- if (read(fd, area, size) == size)
- return;
- }
- memset(area, 0x00, size);
-}
-
/* Returns sizeof(struct cramfs_super), which includes the root inode. */
-static unsigned int write_superblock(struct entry *root, char *base)
+static unsigned int write_superblock(struct entry *root, char *base, int size)
{
struct cramfs_super *super = (struct cramfs_super *) base;
- unsigned int offset = sizeof(struct cramfs_super);
+ unsigned int offset = sizeof(struct cramfs_super) + image_length;
+
+ if (opt_pad) {
+ offset += opt_pad;
+ }
super->magic = CRAMFS_MAGIC;
- super->flags = 0;
- /* Note: 0x10000 is meaningless, which is a bug; but
- super->size is never used anyway. */
- super->size = 0x10000;
+ super->flags = CRAMFS_FLAG_FSID_VERSION_2 | CRAMFS_FLAG_SORTED_DIRS;
+ if (opt_holes)
+ super->flags |= CRAMFS_FLAG_HOLES;
+ if (image_length > 0)
+ super->flags |= CRAMFS_FLAG_SHIFTED_ROOT_OFFSET;
+ super->size = size;
memcpy(super->signature, CRAMFS_SIGNATURE, sizeof(super->signature));
- set_random(super->fsid, sizeof(super->fsid));
- strncpy(super->name, "Compressed", sizeof(super->name));
+
+ super->fsid.crc = crc32(0L, Z_NULL, 0);
+ super->fsid.edition = opt_edition;
+ super->fsid.blocks = total_blocks;
+ super->fsid.files = total_nodes;
+
+ memset(super->name, 0x00, sizeof(super->name));
+ if (opt_name)
+ strncpy(super->name, opt_name, sizeof(super->name));
+ else
+ strncpy(super->name, "Compressed", sizeof(super->name));
super->root.mode = root->mode;
super->root.uid = root->uid;
static void set_data_offset(struct entry *entry, char *base, unsigned long offset)
{
struct cramfs_inode *inode = (struct cramfs_inode *) (base + entry->dir_offset);
+#ifdef DEBUG
assert ((offset & 3) == 0);
- if (offset >= (1 << (2 + OFFSET_WIDTH))) {
+#endif /* DEBUG */
+ if (offset >= (1 << (2 + CRAMFS_OFFSET_WIDTH))) {
fprintf(stderr, "filesystem too big. Exiting.\n");
- exit(1);
+ exit(8);
}
inode->offset = (offset >> 2);
}
write over inode->offset later. */
offset += sizeof(struct cramfs_inode);
+ total_nodes++; /* another node */
memcpy(base + offset, entry->name, len);
/* Pad up the name to a 4-byte boundary */
while (len & 3) {
if (entry->child) {
if (stack_entries >= MAXENTRIES) {
fprintf(stderr, "Exceeded MAXENTRIES. Raise this value in mkcramfs.c and recompile. Exiting.\n");
- exit(1);
+ exit(8);
}
entry_stack[stack_entries] = entry;
stack_entries++;
return offset;
}
-#ifdef DO_HOLES
-/*
- * Returns non-zero iff the first LEN bytes from BEGIN are all NULs.
- */
-static int
-is_zero(char const *begin, unsigned len)
+static int is_zero(char const *begin, unsigned len)
{
- return (len-- == 0 ||
- (begin[0] == '\0' &&
- (len-- == 0 ||
- (begin[1] == '\0' &&
- (len-- == 0 ||
- (begin[2] == '\0' &&
- (len-- == 0 ||
- (begin[3] == '\0' &&
- memcmp(begin, begin + 4, len) == 0))))))));
+ if (opt_holes)
+ /* Returns non-zero iff the first LEN bytes from BEGIN are
+ all NULs. */
+ return (len-- == 0 ||
+ (begin[0] == '\0' &&
+ (len-- == 0 ||
+ (begin[1] == '\0' &&
+ (len-- == 0 ||
+ (begin[2] == '\0' &&
+ (len-- == 0 ||
+ (begin[3] == '\0' &&
+ memcmp(begin, begin + 4, len) == 0))))))));
+ else
+ /* Never create holes. */
+ return 0;
}
-#else /* !DO_HOLES */
-# define is_zero(_begin,_len) (0) /* Never create holes. */
-#endif /* !DO_HOLES */
/*
* One 4-byte pointer per block and then the actual blocked
unsigned long curr = offset + 4 * blocks;
int change;
+ total_blocks += blocks;
+
do {
unsigned long len = 2 * blksize;
unsigned int input = size;
if (len > blksize*2) {
/* (I don't think this can happen with zlib.) */
printf("AIEEE: block \"compressed\" to > 2*blocklength (%ld)\n", len);
- exit(1);
+ exit(8);
}
*(u32 *) (base + offset) = curr;
return offset;
}
+static unsigned int write_file(char *file, char *base, unsigned int offset)
+{
+ int fd;
+ char *buf;
+
+ fd = open(file, O_RDONLY);
+ if (fd < 0) {
+ perror(file);
+ exit(8);
+ }
+ buf = mmap(NULL, image_length, PROT_READ, MAP_PRIVATE, fd, 0);
+ memcpy(base + offset, buf, image_length);
+ munmap(buf, image_length);
+ close (fd);
+ /* Pad up the image_length to a 4-byte boundary */
+ while (image_length & 3) {
+ *(base + offset + image_length) = '\0';
+ image_length++;
+ }
+ return (offset + image_length);
+}
/*
* Maximum size fs you can create is roughly 256MB. (The last file's
* Note that if you want it to fit in a ROM then you're limited to what the
* hardware and kernel can support (64MB?).
*/
-#define MAXFSLEN ((((1 << OFFSET_WIDTH) - 1) << 2) /* offset */ \
- + (1 << SIZE_WIDTH) - 1 /* filesize */ \
- + (1 << SIZE_WIDTH) * 4 / PAGE_CACHE_SIZE /* block pointers */ )
+#define MAXFSLEN ((((1 << CRAMFS_OFFSET_WIDTH) - 1) << 2) /* offset */ \
+ + (1 << CRAMFS_SIZE_WIDTH) - 1 /* filesize */ \
+ + (1 << CRAMFS_SIZE_WIDTH) * 4 / PAGE_CACHE_SIZE /* block pointers */ )
/*
*/
int main(int argc, char **argv)
{
- struct stat st;
+ struct stat st; /* used twice... */
struct entry *root_entry;
char *rom_image;
- unsigned int offset;
- ssize_t written;
+ ssize_t offset, written;
int fd;
- loff_t fslen_ub = 0; /* initial guess (upper-bound) of
- required filesystem size */
- char const *dirname;
+ /* initial guess (upper-bound) of required filesystem size */
+ loff_t fslen_ub = sizeof(struct cramfs_super);
+ char const *dirname, *outfile;
+ u32 crc = crc32(0L, Z_NULL, 0);
+ int c; /* for getopt */
+
+ total_blocks = 0;
if (argc)
progname = argv[0];
- if (argc != 3)
- usage();
- if (stat(dirname = argv[1], &st) < 0) {
- perror(argv[1]);
- exit(1);
+ /* command line options */
+ while ((c = getopt(argc, argv, "hEe:i:n:psz")) != EOF) {
+ switch (c) {
+ case 'h':
+ usage(0);
+ case 'E':
+ opt_errors = 1;
+ break;
+ case 'e':
+ opt_edition = atoi(optarg);
+ break;
+ case 'i':
+ opt_image = optarg;
+ if (lstat(opt_image, &st) < 0) {
+ perror(opt_image);
+ exit(16);
+ }
+ image_length = st.st_size; /* may be padded later */
+ fslen_ub += (image_length + 3); /* 3 is for padding */
+ break;
+ case 'n':
+ opt_name = optarg;
+ break;
+ case 'p':
+ opt_pad = PAD_SIZE;
+ fslen_ub += PAD_SIZE;
+ break;
+ case 's':
+ /* old option, ignored */
+ break;
+ case 'z':
+ opt_holes = 1;
+ break;
+ }
+ }
+
+ if ((argc - optind) != 2)
+ usage(16);
+ dirname = argv[optind];
+ outfile = argv[optind + 1];
+
+ if (stat(dirname, &st) < 0) {
+ perror(dirname);
+ exit(16);
}
- fd = open(argv[2], O_WRONLY | O_CREAT | O_TRUNC, 0666);
+ fd = open(outfile, O_WRONLY | O_CREAT | O_TRUNC, 0666);
root_entry = calloc(1, sizeof(struct entry));
if (!root_entry) {
perror(NULL);
- exit(5);
+ exit(8);
}
root_entry->mode = st.st_mode;
root_entry->uid = st.st_uid;
root_entry->gid = st.st_gid;
- root_entry->size = parse_directory(root_entry, argv[1], &root_entry->child, &fslen_ub);
+ root_entry->size = parse_directory(root_entry, dirname, &root_entry->child, &fslen_ub);
+
+ /* always allocate a multiple of blksize bytes because that's
+ what we're going to write later on */
+ fslen_ub = ((fslen_ub - 1) | (blksize - 1)) + 1;
+
if (fslen_ub > MAXFSLEN) {
fprintf(stderr,
- "warning: guestimate of required size (upper bound) is %luMB, but maximum image size is %uMB. We might die prematurely.\n",
- (unsigned long) (fslen_ub >> 20),
+ "warning: guestimate of required size (upper bound) is %LdMB, but maximum image size is %uMB. We might die prematurely.\n",
+ fslen_ub >> 20,
MAXFSLEN >> 20);
fslen_ub = MAXFSLEN;
}
possible. */
eliminate_doubles(root_entry,root_entry);
-
/* TODO: Why do we use a private/anonymous mapping here
followed by a write below, instead of just a shared mapping
and a couple of ftruncate calls? Is it just to save us
RAM free. If the reason is to be able to write to
un-mmappable block devices, then we could try shared mmap
and revert to anonymous mmap if the shared mmap fails. */
- rom_image = mmap(NULL, fslen_ub, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ rom_image = mmap(NULL, fslen_ub?fslen_ub:1, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+
if (-1 == (int) (long) rom_image) {
perror("ROM image map");
- exit(1);
+ exit(8);
+ }
+
+ /* Skip the first opt_pad bytes for boot loader code */
+ offset = opt_pad;
+ memset(rom_image, 0x00, opt_pad);
+
+ /* Skip the superblock and come back to write it later. */
+ offset += sizeof(struct cramfs_super);
+
+ /* Insert a file image. */
+ if (opt_image) {
+ printf("Including: %s\n", opt_image);
+ offset = write_file(opt_image, rom_image, offset);
}
- offset = write_superblock(root_entry, rom_image);
- printf("Super block: %d bytes\n", offset);
offset = write_directory_structure(root_entry->child, rom_image, offset);
printf("Directory data: %d bytes\n", offset);
offset = ((offset - 1) | (blksize - 1)) + 1;
printf("Everything: %d kilobytes\n", offset >> 10);
+ /* Write the superblock now that we can fill in all of the fields. */
+ write_superblock(root_entry, rom_image+opt_pad, offset);
+ printf("Super block: %d bytes\n", sizeof(struct cramfs_super));
+
+ /* Put the checksum in. */
+ crc = crc32(crc, (rom_image+opt_pad), (offset-opt_pad));
+ ((struct cramfs_super *) (rom_image+opt_pad))->fsid.crc = crc;
+ printf("CRC: %x\n", crc);
+
+ /* Check to make sure we allocated enough space. */
+ if (fslen_ub < offset) {
+ fprintf(stderr, "not enough space allocated for ROM image (%Ld allocated, %d used)\n",
+ fslen_ub, offset);
+ exit(8);
+ }
+
written = write(fd, rom_image, offset);
if (written < 0) {
- perror("rom image");
- exit(1);
+ perror("ROM image");
+ exit(8);
}
if (offset != written) {
fprintf(stderr, "ROM image write failed (%d %d)\n", written, offset);
- exit(1);
+ exit(8);
}
/* (These warnings used to come at the start, but they scroll off the
if (warn_namelen) /* (can't happen when reading from ext2fs) */
fprintf(stderr, /* bytes, not chars: think UTF8. */
"warning: filenames truncated to 255 bytes.\n");
+ if (warn_skip)
+ fprintf(stderr, "warning: files were skipped due to errors.\n");
if (warn_size)
fprintf(stderr,
"warning: file sizes truncated to %luMB (minus 1 byte).\n",
- 1L << (SIZE_WIDTH - 20));
+ 1L << (CRAMFS_SIZE_WIDTH - 20));
if (warn_uid) /* (not possible with current Linux versions) */
fprintf(stderr,
"warning: uids truncated to %u bits. (This may be a security concern.)\n",
- UID_WIDTH);
+ CRAMFS_UID_WIDTH);
if (warn_gid)
fprintf(stderr,
"warning: gids truncated to %u bits. (This may be a security concern.)\n",
- GID_WIDTH);
+ CRAMFS_GID_WIDTH);
if (warn_dev)
fprintf(stderr,
"WARNING: device numbers truncated to %u bits. This almost certainly means\n"
"that some device files will be wrong.\n",
- OFFSET_WIDTH);
+ CRAMFS_OFFSET_WIDTH);
+ if (opt_errors &&
+ (warn_namelen||warn_skip||warn_size||warn_uid||warn_gid||warn_dev))
+ exit(8);
return 0;
}