locking rules:
All except ->poll() may block.
BKL
-llseek: yes
+llseek: yes (see below)
read: no
write: no
readdir: yes (see below)
readv: no
writev: no
+->llseek() locking has moved from llseek to the individual llseek
+implementations. If your fs is not using generic_file_llseek, you
+need to acquire and release the BKL in your ->llseek().
+
->open() locking is in-transit: big lock partially moved into the methods.
The only exception is ->open() in the instances of file_operations that never
end up in ->i_fop/->proc_fops, i.e. ones that belong to character devices
- Tag VFS deletable in <devfs_mk_symlink> if handle ignored
- Updated README from master HTML file
+===============================================================================
+Changes for patch v208
+
+- Added KERN_* to remaining messages
+
+- Cleaned up declaration of <stat_read>
+
+- Updated README from master HTML file
Linux Devfs (Device File System) FAQ
Richard Gooch
-20-JAN-2002
+24-JAN-2002
Document languages:
A Korean translation by viatoris@nownuri.net is available at
-http://home.nownuri.net/~viatoris/devfs/devfs.html
-
-A newer version is under construcation at
-
http://viatoris.new21.org/devfs/devfs.html
AD1816 SOUND DRIVER
P: Thorsten Knabe
-M: Thorsten Knabe <tek@rbg.informatik.tu-darmstadt.de>
-M: Thorsten Knabe <tek01@hrzpub.tu-darmstadt.de>
W: http://www.student.informatik.tu-darmstadt.de/~tek/projects/linux.html
W: http://www.tu-darmstadt.de/~tek01/projects/linux.html
S: Maintained
ARPD SUPPORT
P: Jonathan Layes
-M: layes@loran.com
L: linux-net@vger.kernel.org
S: Maintained
BERKSHIRE PRODUCTS PC WATCHDOG DRIVER
P: Kenji Hollis
-M: kenji@bitgate.com
W: http://ftp.bitgate.com/pcwd/
S: Maintained
DIGI INTL. EPCA DRIVER
P: Chad Schwartz
M: support@dgii.com
-M: chads@dgii.com
L: digilnux@dgii.com
S: Maintained
DIGI RIGHTSWITCH NETWORK DRIVER
P: Rick Richardson
-M: rick@remotepoint.com
L: linux-net@vger.kernel.org
W: http://www.dgii.com/linux/
S: Maintained
DRM DRIVERS
P: Rik Faith
-M: faith@valinux.com
+M: faith@redhat.com
L: dri-devel@lists.sourceforge.net
S: Supported
EATA-DMA SCSI DRIVER
P: Michael Neuffer
-M: mike@i-Connect.Net
L: linux-eata@i-connect.net, linux-scsi@vger.kernel.org
S: Maintained
LOGICAL VOLUME MANAGER
P: Heinz Mauelshagen
-M: mge@sistina.de
L: linux-LVM@sistina.com
W: http://www.sistina.com/lvm
S: Maintained
OLYMPIC NETWORK DRIVER
P: Peter De Shrijver
-M: p2@ace.ulyssis.sutdent.kuleuven.ac.be
+M: p2@ace.ulyssis.student.ac.be
P: Mike Phillips
M: mikep@linuxtr.net
L: linux-net@vger.kernel.org
RISCOM8 DRIVER
P: Dmitry Gorodchanin
-M: pgmdsg@ibi.com
L: linux-kernel@vger.kernel.org
S: Maintained
USB SERIAL BELKIN F5U103 DRIVER
P: William Greathouse
M: wgreathouse@smva.com
-M: wgreathouse@myfavoritei.com
L: linux-usb-users@lists.sourceforge.net
L: linux-usb-devel@lists.sourceforge.net
S: Maintained
USB SERIAL CYBERJACK PINPAD/E-COM DRIVER
-M: linux-usb@sii.li
L: linux-usb-users@lists.sourceforge.net
L: linux-usb-devel@lists.sourceforge.net
S: Supported
ZF MACHZ WATCHDOG
P: Fernando Fuganti
-M: fuganti@conectiva.com.br
M: fuganti@netbank.com.br
W: http://cvs.conectiva.com.br/drivers/ZFL-watchdog/
S: Maintained
VERSION = 2
PATCHLEVEL = 5
SUBLEVEL = 3
-EXTRAVERSION =-pre6
+EXTRAVERSION =
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
DRIVERS-$(CONFIG_FC4) += drivers/fc4/fc4.a
DRIVERS-$(CONFIG_ALL_PPC) += drivers/macintosh/macintosh.o
DRIVERS-$(CONFIG_MAC) += drivers/macintosh/macintosh.o
-DRIVERS-$(CONFIG_ISAPNP) += drivers/pnp/pnp.o
+DRIVERS-$(CONFIG_PNP) += drivers/pnp/pnp.o
DRIVERS-$(CONFIG_SGI_IP22) += drivers/sgi/sgi.a
DRIVERS-$(CONFIG_VT) += drivers/video/video.o
DRIVERS-$(CONFIG_PARIDE) += drivers/block/paride/paride.a
printf("#define TASK_FLAGS %ld\n",
(long)offsetof(struct task_struct, flags));
printf("#define TASK_SIGPENDING %ld\n",
- (long)offsetof(struct task_struct, sigpending));
+#error (long)offsetof(struct task_struct, sigpending));
printf("#define TASK_ADDR_LIMIT %ld\n",
(long)offsetof(struct task_struct, addr_limit));
printf("#define TASK_EXEC_DOMAIN %ld\n",
(long)offsetof(struct task_struct, exec_domain));
printf("#define TASK_NEED_RESCHED %ld\n",
- (long)offsetof(struct task_struct, need_resched));
+#error (long)offsetof(struct task_struct, work.need_resched));
printf("#define TASK_SIZE %ld\n", sizeof(struct task_struct));
printf("#define STACK_SIZE %ld\n", sizeof(union task_union));
*/
#define TASK_STATE 0
#define TASK_FLAGS 8
-#define TASK_SIGPENDING 16
+#error #define TASK_SIGPENDING 16
#define TASK_ADDR_LIMIT 24
#define TASK_EXEC_DOMAIN 32
-#define TASK_NEED_RESCHED 40
-#define TASK_PTRACE 48
+#error #define TASK_NEED_RESCHED 40
+#error #define TASK_PTRACE 48
#define TASK_PROCESSOR 100
/*
and $0,8,$0
beq $0,restore_all
ret_from_reschedule:
- ldq $2,TASK_NEED_RESCHED($8)
+#error ldq $2,TASK_NEED_RESCHED($8)
lda $4,init_task_union
bne $2,reschedule
xor $4,$8,$4
- ldl $5,TASK_SIGPENDING($8)
+#error ldl $5,TASK_SIGPENDING($8)
beq $4,restore_all
bne $5,signal_return
restore_all:
/* Although we are an idle CPU, we do not want to
get into the scheduler unnecessarily. */
- long oldval = xchg(¤t->need_resched, -1UL);
+ long oldval = xchg(¤t->work.need_resched, -1UL);
if (!oldval)
- while (current->need_resched < 0);
+ while (current->work.need_resched < 0);
schedule();
check_pgt_cache();
}
* stack.
*/
ret_fast_syscall:
- ldr r1, [tsk, #TSK_NEED_RESCHED]
- ldr r2, [tsk, #TSK_SIGPENDING]
+#error ldr r1, [tsk, #TSK_NEED_RESCHED]
+#error ldr r2, [tsk, #TSK_SIGPENDING]
teq r1, #0 @ need_resched || sigpending
teqeq r2, #0
bne slow
bl SYMBOL_NAME(schedule)
ENTRY(ret_to_user)
ret_slow_syscall:
- ldr r1, [tsk, #TSK_NEED_RESCHED]
- ldr r2, [tsk, #TSK_SIGPENDING]
+#error ldr r1, [tsk, #TSK_NEED_RESCHED]
+#error ldr r2, [tsk, #TSK_SIGPENDING]
1: teq r1, #0 @ need_resched => schedule()
bne reschedule
teq r2, #0 @ sigpending => do_signal()
mov r0, #0 @ NULL 'oldset'
mov r1, sp @ 'regs'
mov r2, why @ 'syscall'
- b SYMBOL_NAME(do_signal) @ note the bl above sets lr
+#error b SYMBOL_NAME(do_signal) @ note the bl above sets lr
/*
* This is how we return from a fork. __switch_to will be calling us
beq ret_slow_syscall
mov r1, sp
mov r0, #1 @ trace exit [IP = 1]
- bl SYMBOL_NAME(syscall_trace)
+#error bl SYMBOL_NAME(syscall_trace)
b ret_slow_syscall
__sys_trace:
add r1, sp, #S_OFF
mov r0, #0 @ trace entry [IP = 0]
- bl SYMBOL_NAME(syscall_trace)
+#error bl SYMBOL_NAME(syscall_trace)
adrsvc al, lr, __sys_trace_return @ return address
add r1, sp, #S_R0 + S_OFF @ pointer to regs
str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
mov r1, sp
mov r0, #1 @ trace exit [IP = 1]
- bl SYMBOL_NAME(syscall_trace)
+#error bl SYMBOL_NAME(syscall_trace)
b ret_slow_syscall
.align 5
void func(void)
{
-DEFN("TSK_SIGPENDING", OFF_TSK(sigpending));
+#error DEFN("TSK_SIGPENDING", OFF_TSK(sigpending));
DEFN("TSK_ADDR_LIMIT", OFF_TSK(addr_limit));
-DEFN("TSK_NEED_RESCHED", OFF_TSK(need_resched));
-DEFN("TSK_PTRACE", OFF_TSK(ptrace));
+#error DEFN("TSK_NEED_RESCHED", OFF_TSK(need_resched));
+#error DEFN("TSK_PTRACE", OFF_TSK(ptrace));
DEFN("TSK_USED_MATH", OFF_TSK(used_math));
DEFN("TSS_SAVE", OFF_TSK(thread.save));
VAL (NAME, offsetof (TYPE, MEMBER))
/* task_struct offsets. */
-OF (LTASK_SIGPENDING, struct task_struct, sigpending)
-OF (LTASK_NEEDRESCHED, struct task_struct, need_resched)
-OF (LTASK_PTRACE, struct task_struct, ptrace)
+#error OF (LTASK_SIGPENDING, struct task_struct, sigpending)
+#error OF (LTASK_NEEDRESCHED, struct task_struct, need_resched)
+#error OF (LTASK_PTRACE, struct task_struct, ptrace)
OF (LTASK_PID, struct task_struct, pid)
/* pt_regs offsets. */
# CONFIG_NCPFS_NLS is not set
# CONFIG_NCPFS_EXTRAS is not set
# CONFIG_ZISOFS_FS is not set
-# CONFIG_ZLIB_FS_INFLATE is not set
#
# Partition Types
# Library routines
#
# CONFIG_CRC32 is not set
+# CONFIG_ZLIB_INFLATE is not set
+# CONFIG_ZLIB_DEFLATE is not set
{
rdmsr(MSR_IA32_MC0_ADDR+i*4, alow, ahigh);
printk(" at %08x%08x",
- high, low);
+ ahigh, alow);
}
printk("\n");
/* Clear it */
*/
state = 0
flags = 4
-sigpending = 8
+work = 8
+need_resched = work+0
+syscall_trace = work+1
+sigpending = work+2
+notify_resume = work+3
addr_limit = 12
exec_domain = 16
-need_resched = 20
tsk_ptrace = 24
cpu = 32
call *%edx
addl $4, %esp
popl %eax
- jmp ret_from_sys_call
+ jmp resume_userspace
ENTRY(lcall27)
pushfl # We get a different stack layout with call gates,
call *%edx
addl $4, %esp
popl %eax
- jmp ret_from_sys_call
+ jmp resume_userspace
ENTRY(ret_from_fork)
call SYMBOL_NAME(schedule_tail)
addl $4, %esp
GET_CURRENT(%ebx)
- testb $0x02,tsk_ptrace(%ebx) # PT_TRACESYS
- jne tracesys_exit
- jmp ret_from_sys_call
+ jmp syscall_exit
/*
* Return to user mode is not as complex as all this looks,
* less clear than it otherwise should be.
*/
+ # userspace resumption stub bypassing syscall exit tracing
+ ALIGN
+ENTRY(ret_from_intr)
+ GET_CURRENT(%ebx)
+ret_from_exception:
+ movl EFLAGS(%esp),%eax # mix EFLAGS and CS
+ movb CS(%esp),%al
+ testl $(VM_MASK | 3),%eax
+ jz restore_all # returning to kernel-space or vm86-space
+ENTRY(resume_userspace)
+ cli # make sure need_resched and sigpending don't change
+ # between sampling and the iret
+ movl work(%ebx),%ecx
+ andl $0xffff00ff,%ecx # current->work (ignoring syscall_trace)
+ jne work_pending
+ jmp restore_all
+
+ # system call handler stub
+ ALIGN
ENTRY(system_call)
pushl %eax # save orig_eax
SAVE_ALL
GET_CURRENT(%ebx)
- testb $0x02,tsk_ptrace(%ebx) # PT_TRACESYS
- jne tracesys
cmpl $(NR_syscalls),%eax
- jae badsys
+ jae syscall_badsys
+ testb $0xff,syscall_trace(%ebx) # system call tracing in operation
+ jnz syscall_trace_entry
+syscall_traced:
call *SYMBOL_NAME(sys_call_table)(,%eax,4)
- movl %eax,EAX(%esp) # save the return value
-ENTRY(ret_from_sys_call)
- cli # need_resched and signals atomic test
- cmpl $0,need_resched(%ebx)
- jne reschedule
- cmpl $0,sigpending(%ebx)
- jne signal_return
+ movl %eax,EAX(%esp) # store the return value
+syscall_exit:
+ cli # make sure need_resched and sigpending don't change
+ # between sampling and the iret
+ movl work(%ebx),%ecx
+ testl %ecx,%ecx # current->work
+ jne syscall_exit_work
restore_all:
RESTORE_ALL
+ # perform work that needs to be done immediately before resumption
ALIGN
-signal_return:
- sti # we can get here from an interrupt handler
+work_pending:
+ testb %cl,%cl # current->work.need_resched
+ jz work_notifysig
+work_resched:
+ call SYMBOL_NAME(schedule)
+ cli # make sure need_resched and sigpending don't change
+ # between sampling and the iret
+ movl work(%ebx),%ecx
+ andl $0xffff00ff,%ecx # ignore the syscall trace counter
+ jz restore_all
+ testb %cl,%cl # current->work.need_resched
+ jnz work_resched
+
+work_notifysig: # deal with pending signals and notify-resume requests
testl $(VM_MASK),EFLAGS(%esp)
movl %esp,%eax
- jne v86_signal_return
+ jne work_notifysig_v86 # returning to kernel-space or vm86-space
xorl %edx,%edx
- call SYMBOL_NAME(do_signal)
+ call SYMBOL_NAME(do_notify_resume)
jmp restore_all
ALIGN
-v86_signal_return:
+work_notifysig_v86:
+ pushl %ecx
call SYMBOL_NAME(save_v86_state)
+ popl %ecx
movl %eax,%esp
xorl %edx,%edx
- call SYMBOL_NAME(do_signal)
+ call SYMBOL_NAME(do_notify_resume)
jmp restore_all
+ # perform syscall exit tracing
ALIGN
-tracesys:
+syscall_trace_entry:
movl $-ENOSYS,EAX(%esp)
- call SYMBOL_NAME(syscall_trace)
+ movl %esp,%eax
+ xorl %edx,%edx
+ call SYMBOL_NAME(do_syscall_trace)
movl ORIG_EAX(%esp),%eax
cmpl $(NR_syscalls),%eax
- jae tracesys_exit
- call *SYMBOL_NAME(sys_call_table)(,%eax,4)
- movl %eax,EAX(%esp) # save the return value
-tracesys_exit:
- call SYMBOL_NAME(syscall_trace)
- jmp ret_from_sys_call
-badsys:
- movl $-ENOSYS,EAX(%esp)
- jmp ret_from_sys_call
+ jnae syscall_traced
+ jmp syscall_exit
+ # perform syscall exit tracing
ALIGN
-ENTRY(ret_from_intr)
- GET_CURRENT(%ebx)
-ret_from_exception:
- movl EFLAGS(%esp),%eax # mix EFLAGS and CS
- movb CS(%esp),%al
- testl $(VM_MASK | 3),%eax # return to VM86 mode or non-supervisor?
- jne ret_from_sys_call
- jmp restore_all
+syscall_exit_work:
+ testb %ch,%ch # current->work.syscall_trace
+ jz work_pending
+ sti # could let do_syscall_trace() call schedule() instead
+ movl %esp,%eax
+ movl $1,%edx
+ call SYMBOL_NAME(do_syscall_trace)
+ jmp resume_userspace
ALIGN
-reschedule:
- call SYMBOL_NAME(schedule) # test
- jmp ret_from_sys_call
+syscall_badsys:
+ movl $-ENOSYS,EAX(%esp)
+ jmp resume_userspace
ENTRY(divide_error)
pushl $0 # no error code
.long SYMBOL_NAME(sys_ni_syscall) /* Reserved for Security */
.long SYMBOL_NAME(sys_gettid)
.long SYMBOL_NAME(sys_readahead) /* 225 */
+ .long SYMBOL_NAME(sys_setxattr)
+ .long SYMBOL_NAME(sys_lsetxattr)
+ .long SYMBOL_NAME(sys_fsetxattr)
+ .long SYMBOL_NAME(sys_getxattr)
+ .long SYMBOL_NAME(sys_lgetxattr) /* 230 */
+ .long SYMBOL_NAME(sys_fgetxattr)
+ .long SYMBOL_NAME(sys_listxattr)
+ .long SYMBOL_NAME(sys_llistxattr)
+ .long SYMBOL_NAME(sys_flistxattr)
+ .long SYMBOL_NAME(sys_removexattr) /* 235 */
+ .long SYMBOL_NAME(sys_lremovexattr)
+ .long SYMBOL_NAME(sys_fremovexattr)
.rept NR_syscalls-(.-sys_call_table)/4
.long SYMBOL_NAME(sys_ni_syscall)
.quad 0x00409a0000000000 /* 0x48 APM CS code */
.quad 0x00009a0000000000 /* 0x50 APM CS 16 code (16 bit) */
.quad 0x0040920000000000 /* 0x58 APM DS data */
+ /* Segments used for calling PnP BIOS */
+ .quad 0x00c09a0000000000 /* 0x60 32-bit code */
+ .quad 0x00809a0000000000 /* 0x68 16-bit code */
+ .quad 0x0080920000000000 /* 0x70 16-bit data */
+ .quad 0x0080920000000000 /* 0x78 16-bit data */
+ .quad 0x0080920000000000 /* 0x80 16-bit data */
+ .quad 0x0000000000000000 /* 0x88 not used */
+ .quad 0x0000000000000000 /* 0x90 not used */
+ .quad 0x0000000000000000 /* 0x98 not used */
+ /* Per CPU segments */
.fill NR_CPUS*4,8,0 /* space for TSS's and LDT's */
/*
/*
* On SMP it's slightly faster (but much more power-consuming!)
- * to poll the ->need_resched flag instead of waiting for the
+ * to poll the ->work.need_resched flag instead of waiting for the
* cross-CPU IPI to arrive. Use this option with caution.
*/
static void poll_idle (void)
* Deal with another CPU just having chosen a thread to
* run here:
*/
- oldval = xchg(¤t->need_resched, -1);
+ oldval = xchg(¤t->work.need_resched, -1);
if (!oldval)
asm volatile(
"2:"
- "cmpl $-1, %0;"
+ "cmpb $-1, %0;"
"rep; nop;"
"je 2b;"
- : :"m" (current->need_resched));
+ : :"m" (current->work.need_resched));
}
/*
ret = -EIO;
if ((unsigned long) data > _NSIG)
break;
- if (request == PTRACE_SYSCALL)
- child->ptrace |= PT_TRACESYS;
- else
- child->ptrace &= ~PT_TRACESYS;
+ if (request == PTRACE_SYSCALL) {
+ if (!(child->ptrace & PT_SYSCALLTRACE)) {
+ child->ptrace |= PT_SYSCALLTRACE;
+ child->work.syscall_trace++;
+ }
+ }
+ else {
+ if (child->ptrace & PT_SYSCALLTRACE) {
+ child->ptrace &= ~PT_SYSCALLTRACE;
+ child->work.syscall_trace--;
+ }
+ }
child->exit_code = data;
/* make sure the single step bit is not set. */
tmp = get_stack_long(child, EFL_OFFSET) & ~TRAP_FLAG;
ret = -EIO;
if ((unsigned long) data > _NSIG)
break;
- child->ptrace &= ~PT_TRACESYS;
+ if (child->ptrace & PT_SYSCALLTRACE) {
+ child->ptrace &= ~PT_SYSCALLTRACE;
+ child->work.syscall_trace--;
+ }
if ((child->ptrace & PT_DTRACE) == 0) {
/* Spurious delayed TF traps may occur */
child->ptrace |= PT_DTRACE;
return ret;
}
-asmlinkage void syscall_trace(void)
+/* notification of system call entry/exit
+ * - triggered by current->work.syscall_trace
+ */
+__attribute__((regparm(3)))
+void do_syscall_trace(struct pt_regs *regs, int entryexit)
{
- if ((current->ptrace & (PT_PTRACED|PT_TRACESYS)) !=
- (PT_PTRACED|PT_TRACESYS))
+ if ((current->ptrace & (PT_PTRACED|PT_SYSCALLTRACE)) !=
+ (PT_PTRACED|PT_SYSCALLTRACE))
return;
/* the 0x80 provides a way for the tracing parent to distinguish
between a syscall stop and SIGTRAP delivery */
current->exit_code = 0;
}
}
+
+/* notification of userspace execution resumption
+ * - triggered by current->work.notify_resume
+ */
+__attribute__((regparm(3)))
+void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
+ struct task_work work_pending)
+{
+ /* deal with pending signal delivery */
+ if (work_pending.sigpending)
+ do_signal(regs,oldset);
+}
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-int FASTCALL(do_signal(struct pt_regs *regs, sigset_t *oldset));
-
int copy_siginfo_to_user(siginfo_t *to, siginfo_t *from)
{
if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
{
if (vm86 && regs->eflags & VM_MASK)
goto vm86_trap;
+
+#ifdef CONFIG_PNPBIOS
+ if (regs->xcs == 0x60 || regs->xcs == 0x68)
+ {
+ extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
+ extern u32 pnp_bios_is_utter_crap;
+ pnp_bios_is_utter_crap = 1;
+ printk(KERN_CRIT "PNPBIOS fault.. attempting recovery.\n");
+ __asm__ volatile(
+ "movl %0, %%esp\n\t"
+ "jmp %1\n\t"
+ : "=a" (pnp_bios_fault_esp), "=b" (pnp_bios_fault_eip));
+ panic("do_trap: can't hit this");
+ }
+#endif
+
if (!(regs->xcs & 3))
goto kernel_trap;
info->regs.__null_ds = 0;
info->regs.__null_es = 0;
-/* we are clearing fs,gs later just before "jmp ret_from_sys_call",
+/* we are clearing fs,gs later just before "jmp resume_userspace",
* because starting with Linux 2.1.x they aren't no longer saved/restored
*/
__asm__ __volatile__(
"xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs\n\t"
"movl %0,%%esp\n\t"
- "jmp ret_from_sys_call"
+ "jmp resume_userspace"
: /* no outputs */
:"r" (&info->regs), "b" (tsk) : "ax");
/* we never return here */
regs32 = save_v86_state(regs16);
regs32->eax = retval;
__asm__ __volatile__("movl %0,%%esp\n\t"
- "jmp ret_from_sys_call"
+ "jmp resume_userspace"
: : "r" (regs32), "b" (current));
}
.body
mov loc2=b6
;;
- br.call.sptk.many rp=syscall_trace
+#error br.call.sptk.many rp=syscall_trace
.ret3: mov rp=loc0
mov ar.pfs=loc1
mov b6=loc2
GLOBAL_ENTRY(ia64_trace_syscall)
PT_REGS_UNWIND_INFO(0)
- br.call.sptk.many rp=invoke_syscall_trace // give parent a chance to catch syscall args
+#error br.call.sptk.many rp=invoke_syscall_trace // give parent a chance to catch syscall args
.ret6: br.call.sptk.many rp=b6 // do the syscall
strace_check_retval:
cmp.lt p6,p0=r8,r0 // syscall failed?
.mem.offset 0,0; st8.spill [r2]=r8 // store return value in slot for r8
.mem.offset 8,0; st8.spill [r3]=r10 // clear error indication in slot for r10
ia64_strace_leave_kernel:
- br.call.sptk.many rp=invoke_syscall_trace // give parent a chance to catch return value
+#error br.call.sptk.many rp=invoke_syscall_trace // give parent a chance to catch return value
.rety: br.cond.sptk ia64_leave_kernel
strace_error:
#ifdef CONFIG_PERFMON
(pUser) ld8 r19=[r19] // load current->thread.pfm_must_block
#endif
-(pUser) ld8 r17=[r17] // load current->need_resched
-(pUser) ld4 r18=[r18] // load current->sigpending
+#error (pUser) ld8 r17=[r17] // load current->need_resched
+#error (pUser) ld4 r18=[r18] // load current->sigpending
;;
#ifdef CONFIG_PERFMON
(pUser) cmp.ne.unc p9,p0=r19,r0 // current->thread.pfm_must_block != 0?
#endif
-(pUser) cmp.ne.unc p7,p0=r17,r0 // current->need_resched != 0?
-(pUser) cmp.ne.unc p8,p0=r18,r0 // current->sigpending != 0?
+#error (pUser) cmp.ne.unc p7,p0=r17,r0 // current->need_resched != 0?
+#errror (pUser) cmp.ne.unc p8,p0=r18,r0 // current->sigpending != 0?
;;
adds r2=PT(R8)+16,r12
adds r3=PT(R9)+16,r12
.spillpsp ar.unat, 16 // (note that offset is relative to psp+0x10!)
st8 [sp]=r9,-16 // allocate space for ar.unat and save it
.body
- br.call.sptk.many rp=ia64_do_signal
+#error br.call.sptk.many rp=ia64_do_signal
.ret15: .restore sp
adds sp=16,sp // pop scratch stack space
;;
{ "SIGFRAME_SIZE", sizeof (struct sigframe) },
{ "UNW_FRAME_INFO_SIZE", sizeof (struct unw_frame_info) },
{ "", 0 }, /* spacer */
- { "IA64_TASK_PTRACE_OFFSET", offsetof (struct task_struct, ptrace) },
- { "IA64_TASK_SIGPENDING_OFFSET", offsetof (struct task_struct, sigpending) },
- { "IA64_TASK_NEED_RESCHED_OFFSET", offsetof (struct task_struct, need_resched) },
+#error { "IA64_TASK_PTRACE_OFFSET", offsetof (struct task_struct, ptrace) },
+#error { "IA64_TASK_SIGPENDING_OFFSET", offsetof (struct task_struct, sigpending) },
+#error { "IA64_TASK_NEED_RESCHED_OFFSET", offsetof (struct task_struct, need_resched) },
{ "IA64_TASK_PROCESSOR_OFFSET", offsetof (struct task_struct, processor) },
{ "IA64_TASK_THREAD_OFFSET", offsetof (struct task_struct, thread) },
{ "IA64_TASK_THREAD_KSP_OFFSET", offsetof (struct task_struct, thread.ksp) },
DEFINE(TASK_STATE, offsetof(struct task_struct, state));
DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
- DEFINE(TASK_SIGPENDING, offsetof(struct task_struct, sigpending));
- DEFINE(TASK_NEEDRESCHED, offsetof(struct task_struct, need_resched));
+#error DEFINE(TASK_SIGPENDING, offsetof(struct task_struct, work.sigpending));
+#error DEFINE(TASK_NEEDRESCHED, offsetof(struct task_struct, work.need_resched));
DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
DEFINE(TASK_MM, offsetof(struct task_struct, mm));
DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
EXPORT(ret_from_fork)
move a0, v0 # prev
jal schedule_tail
- lw t0, TASK_PTRACE($28) # syscall tracing enabled?
+#error lw t0, TASK_PTRACE($28) # syscall tracing enabled?
andi t0, PT_TRACESYS
bnez t0, tracesys_exit
j ret_from_sys_call
mtc0 t0, CP0_STATUS
nop; nop; nop
- lw v0, TASK_NEED_RESCHED($28)
- lw v1, TASK_SIGPENDING($28)
+#error lw v0, TASK_NEED_RESCHED($28)
+#error lw v1, TASK_SIGPENDING($28)
bnez v0, reschedule
bnez v1, signal_return
restore_all: .set noat
move a0, zero
move a1, sp
- jal do_signal
+#error jal do_signal
b restore_all
/*
stack_done:
sw a3, PT_R26(sp) # save for syscall restart
- lw t0, TASK_PTRACE($28) # syscall tracing enabled?
+#error lw t0, TASK_PTRACE($28) # syscall tracing enabled?
andi t0, PT_TRACESYS
bnez t0, trace_a_syscall
xori t0, t0, 1
mtc0 t0, CP0_STATUS
- lw t2, TASK_NEED_RESCHED($28)
+#error lw t2, TASK_NEED_RESCHED($28)
bnez t2, o32_reschedule
- lw v0, TASK_SIGPENDING($28)
+#error lw v0, TASK_SIGPENDING($28)
bnez v0, signal_return
restore_all:
RESTORE_SOME
move a0, zero
move a1, sp
- jal do_signal
+#error jal do_signal
b restore_all
o32_reschedule:
trace_a_syscall:
SAVE_STATIC
sw t2, PT_R1(sp)
- jal syscall_trace
+#error jal syscall_trace
lw t2, PT_R1(sp)
lw a0, PT_R4(sp) # Restore argument registers
sw v0, PT_R0(sp) # set flag for syscall restarting
1: sw v0, PT_R2(sp) # result
- jal syscall_trace
+#error jal syscall_trace
j ret_from_sys_call
/* ------------------------------------------------------------------------ */
static void reschedule_this_cpu(void *dummy)
{
- current->need_resched = 1;
+ current->work.need_resched = 1;
}
void FASTCALL(smp_send_reschedule(int cpu))
text("/* MIPS task_struct offsets. */");
offset("#define TASK_STATE ", struct task_struct, state);
offset("#define TASK_FLAGS ", struct task_struct, flags);
- offset("#define TASK_SIGPENDING ", struct task_struct, sigpending);
- offset("#define TASK_NEED_RESCHED ", struct task_struct, need_resched);
- offset("#define TASK_PTRACE ", struct task_struct, ptrace);
+#error offset("#define TASK_SIGPENDING ", struct task_struct, work.sigpending);
+#error offset("#define TASK_NEED_RESCHED ", struct task_struct, work.need_resched);
+#error offset("#define TASK_PTRACE ", struct task_struct, ptrace);
offset("#define TASK_COUNTER ", struct task_struct, counter);
offset("#define TASK_NICE ", struct task_struct, nice);
offset("#define TASK_MM ", struct task_struct, mm);
#include <asm/stackframe.h>
/* This duplicates the definition from <linux/sched.h> */
-#define PT_TRACESYS 0x00000002 /* tracing system calls */
+#error #define PT_TRACESYS 0x00000002 /* tracing system calls */
#define KU_USER 0x10
FEXPORT(ret_from_fork)
move a0, v0 # prev
jal schedule_tail
- lw t0, TASK_PTRACE($28) # syscall tracing enabled?
- andi t0, PT_TRACESYS
+#error lw t0, TASK_PTRACE($28) # syscall tracing enabled?
+#error andi t0, PT_TRACESYS
bnez t0, tracesys_exit
j ret_from_sys_call
xori t0, t0, 1
mtc0 t0, CP0_STATUS
- ld v0, TASK_NEED_RESCHED($28)
- lw v1, TASK_SIGPENDING($28)
+#error ld v0, TASK_NEED_RESCHED($28)
+#error lw v1, TASK_SIGPENDING($28)
bnez v0, reschedule
bnez v1, signal_return
ori t0, t0, 1
mtc0 t0, CP0_STATUS
- ld t2, TASK_NEED_RESCHED($28)
+#error ld t2, TASK_NEED_RESCHED($28)
bnez t2, reschedule
- lw v0, TASK_SIGPENDING($28)
+#error lw v0, TASK_SIGPENDING($28)
bnez v0, signal_return
restore_all:
#include <asm/sysmips.h>
/* This duplicates the definition from <linux/sched.h> */
-#define PT_TRACESYS 0x00000002 /* tracing system calls */
+#error #define PT_TRACESYS 0x00000002 /* tracing system calls */
/* This duplicates the definition from <asm/signal.h> */
#define SIGILL 4 /* Illegal instruction (ANSI). */
bgez t0, stackargs
stack_done:
- ld t0, TASK_PTRACE($28) # syscall tracing enabled?
- andi t0, PT_TRACESYS
+#error ld t0, TASK_PTRACE($28) # syscall tracing enabled?
+#error andi t0, PT_TRACESYS
bnez t0, trace_a_syscall
jalr t2 # Do The Real Thing (TM)
xori t0, t0, 1
mtc0 t0, CP0_STATUS
- ld t2, TASK_NEED_RESCHED($28)
+#error ld t2, TASK_NEED_RESCHED($28)
bnez t2, o32_reschedule
- lw v0, TASK_SIGPENDING($28)
+#error lw v0, TASK_SIGPENDING($28)
bnez v0, signal_return
restore_all: RESTORE_SOME
move a0, zero
move a1, sp
SAVE_STATIC
- jal do_signal
+#error jal do_signal
o32_reschedule:
SAVE_STATIC
sd v0, PT_R0(sp) # set flag for syscall restarting
1: sd v0, PT_R2(sp) # result
- jal syscall_trace
+#error jal syscall_trace
j o32_ret_from_sys_call
/* ------------------------------------------------------------------------ */
1: sd v0, PT_R2(sp) # result
/* Success, so skip usual error handling garbage. */
- ld t0, TASK_PTRACE($28) # syscall tracing enabled?
- andi t0, PT_TRACESYS
+#error ld t0, TASK_PTRACE($28) # syscall tracing enabled?
+#error andi t0, PT_TRACESYS
bnez t0, 1f
b o32_ret_from_sys_call
1: SAVE_STATIC
- jal syscall_trace
+#error jal syscall_trace
li a3, 0 # success
j ret_from_sys_call
text("/* MIPS task_struct offsets. */");
offset("#define TASK_STATE ", struct task_struct, state);
offset("#define TASK_FLAGS ", struct task_struct, flags);
- offset("#define TASK_SIGPENDING ", struct task_struct, sigpending);
- offset("#define TASK_NEED_RESCHED ", struct task_struct, need_resched);
- offset("#define TASK_PTRACE ", struct task_struct, ptrace);
+#error offset("#define TASK_SIGPENDING ", struct task_struct, sigpending);
+#error offset("#define TASK_NEED_RESCHED ", struct task_struct, need_resched);
+#error offset("#define TASK_PTRACE ", struct task_struct, ptrace);
offset("#define TASK_COUNTER ", struct task_struct, counter);
offset("#define TASK_NICE ", struct task_struct, nice);
offset("#define TASK_MM ", struct task_struct, mm);
copy %r30,%r1
/* FIXME! depi below has hardcoded dependency on kernel stack size */
depi 0,31,14,%r1 /* get task pointer */
- LDREG TASK_NEED_RESCHED(%r1),%r19 /* sched.h: long need_resched */
+#error LDREG TASK_NEED_RESCHED(%r1),%r19 /* sched.h: long need_resched */
comib,<>,n 0,%r19,intr_do_resched /* forward */
intr_check_sig:
/* As above */
copy %r30,%r1
depi 0,31,14,%r1 /* get task pointer */
- ldw TASK_SIGPENDING(%r1),%r19 /* sched.h: int sigpending */
+#error ldw TASK_SIGPENDING(%r1),%r19 /* sched.h: int sigpending */
comib,<>,n 0,%r19,intr_do_signal /* forward */
intr_restore:
copy %r0, %r24 /* unsigned long in_syscall */
copy %r16, %r25 /* struct pt_regs *regs */
ssm PSW_SM_I, %r0
- bl do_signal,%r2
+#error bl do_signal,%r2
copy %r0, %r26 /* sigset_t *oldset = NULL */
b intr_restore
/* check for reschedule */
- LDREG TASK_NEED_RESCHED-TASK_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* long */
+#error LDREG TASK_NEED_RESCHED-TASK_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* long */
comib,<>,n 0,%r19,syscall_do_resched /* forward */
syscall_check_sig:
ldo -TASK_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
/* check for pending signals */
- ldw TASK_SIGPENDING(%r1),%r19
+#error ldw TASK_SIGPENDING(%r1),%r19
comib,<>,n 0,%r19,syscall_do_signal /* forward */
syscall_restore:
/* disable interrupts while dicking with the kernel stack, */
/* or life can become unpleasant */
rsm PSW_SM_I, %r20
- LDREG TASK_PTRACE(%r1), %r19 /* Are we being ptraced? */
+#error LDREG TASK_PTRACE(%r1), %r19 /* Are we being ptraced? */
bb,<,n %r19,31,syscall_restore_rfi
LDREG TASK_PT_GR20(%r1),%r19
mtctl %r19, %cr27
ldi 1, %r24 /* unsigned long in_syscall */
- bl do_signal,%r2
+#error bl do_signal,%r2
copy %r0, %r26 /* sigset_t *oldset = NULL */
ldo -TASK_SZ_ALGN-FRAME_SIZE(%r30), %r1 /* reload task ptr */
text("/* PARISC task_struct offsets. */");
offset("#define TASK_STATE ", struct task_struct, state);
offset("#define TASK_FLAGS ", struct task_struct, flags);
- offset("#define TASK_SIGPENDING ", struct task_struct, sigpending);
+#error offset("#define TASK_SIGPENDING ", struct task_struct, sigpending);
offset("#define TASK_SEGMENT ", struct task_struct, addr_limit);
- offset("#define TASK_NEED_RESCHED ", struct task_struct, need_resched);
+#error offset("#define TASK_NEED_RESCHED ", struct task_struct, need_resched);
offset("#define TASK_COUNTER ", struct task_struct, counter);
- offset("#define TASK_PTRACE ", struct task_struct, ptrace);
+#error offset("#define TASK_PTRACE ", struct task_struct, ptrace);
offset("#define TASK_NICE ", struct task_struct, nice);
offset("#define TASK_MM ", struct task_struct, mm);
offset("#define TASK_PROCESSOR ", struct task_struct, processor);
.globl ret_from_fork
ret_from_fork:
bl schedule_tail
- lwz r0,TASK_PTRACE(r2)
+#error lwz r0,TASK_PTRACE(r2)
andi. r0,r0,PT_TRACESYS
- bnel- syscall_trace
+#error bnel- syscall_trace
b ret_from_except
.globl ret_from_intercept
lwz r3,_MSR(r1) /* Returning to user mode? */
andi. r3,r3,MSR_PR
beq+ do_signal_ret /* if so, check need_resched and signals */
- lwz r3,NEED_RESCHED(r2)
+#error lwz r3,NEED_RESCHED(r2)
cmpi 0,r3,0 /* check need_resched flag */
beq+ 7f
bl schedule
-7: lwz r5,SIGPENDING(r2) /* Check for pending unblocked signals */
+#error 7: lwz r5,SIGPENDING(r2) /* Check for pending unblocked signals */
cmpwi 0,r5,0
beq+ do_signal_ret
li r3,0
addi r4,r1,STACK_FRAME_OVERHEAD
- bl do_signal
+#error bl do_signal
.globl do_signal_ret
do_signal_ret:
.globl ret_to_user_hook
DEFINE(NEXT_TASK, offsetof(struct task_struct, next_task));
DEFINE(COUNTER, offsetof(struct task_struct, counter));
DEFINE(PROCESSOR, offsetof(struct task_struct, processor));
- DEFINE(SIGPENDING, offsetof(struct task_struct, sigpending));
+#error DEFINE(SIGPENDING, offsetof(struct task_struct, sigpending));
DEFINE(THREAD, offsetof(struct task_struct, thread));
DEFINE(MM, offsetof(struct task_struct, mm));
DEFINE(ACTIVE_MM, offsetof(struct task_struct, active_mm));
DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
DEFINE(PT_TRACESYS, PT_TRACESYS);
DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
- DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
- DEFINE(NEED_RESCHED, offsetof(struct task_struct, need_resched));
+#error DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
+#error DEFINE(NEED_RESCHED, offsetof(struct task_struct, need_resched));
DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0]));
DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr));
#ifdef CONFIG_ALTIVEC
smp_call_function_interrupt();
break;
case PPC_MSG_RESCHEDULE:
- current->need_resched = 1;
+ current->work.need_resched = 1;
break;
case PPC_MSG_INVALIDATE_TLB:
_tlbia();
*/
state = 0
flags = 4
-sigpending = 8
-need_resched = 24
-tsk_ptrace = 28
+#error sigpending = 8
+#error need_resched = 24
+#error tsk_ptrace = 28
processor = 56
/*
stosm 24(%r15),0x03 # reenable interrupts
sll %r8,2
l %r8,sys_call_table-entry_base(8,%r13) # get address of system call
- tm tsk_ptrace+3(%r9),0x02 # PT_TRACESYS
+#error tm tsk_ptrace+3(%r9),0x02 # PT_TRACESYS
bnz BASED(sysc_tracesys)
basr %r14,%r8 # call sys_xxxx
st %r2,SP_R2(%r15) # store return value (change R2 on stack)
#
# check, if reschedule is needed
#
- icm %r0,15,need_resched(%r9) # get need_resched from task_struct
+#error icm %r0,15,need_resched(%r9) # get need_resched from task_struct
bnz BASED(sysc_reschedule)
- icm %r0,15,sigpending(%r9) # get sigpending from task_struct
+#error icm %r0,15,sigpending(%r9) # get sigpending from task_struct
bnz BASED(sysc_signal_return)
sysc_leave:
tm SP_PGM_OLD_ILC(%r15),0xff
#
# check, if reschedule is needed
#
- icm %r0,15,need_resched(%r9) # get need_resched from task_struct
+#error icm %r0,15,need_resched(%r9) # get need_resched from task_struct
bnz BASED(io_reschedule)
- icm %r0,15,sigpending(%r9) # get sigpending from task_struct
+#error icm %r0,15,sigpending(%r9) # get sigpending from task_struct
bnz BASED(io_signal_return)
io_leave:
stnsm 24(%r15),0xfc # disable I/O and ext. interrupts
*/
.Ls390_mcck: .long s390_do_machine_check
.Ldo_IRQ: .long do_IRQ
-.Ldo_signal: .long do_signal
+#error .Ldo_signal: .long do_signal
.Ldo_softirq: .long do_softirq
.Lentry_base: .long entry_base
.Lext_hash: .long ext_int_hash
.Lsigreturn: .long sys_sigreturn
.Lsigsuspend: .long sys_sigsuspend
.Lsigaltstack: .long sys_sigaltstack
-.Ltrace: .long syscall_trace
+#error .Ltrace: .long syscall_trace
.Lvfork: .long sys_vfork
#ifdef CONFIG_SMP
*/
state = 0
flags = 8
-sigpending = 16
-need_resched = 32
-tsk_ptrace = 40
+#error sigpending = 16
+#error need_resched = 32
+#error tsk_ptrace = 40
processor = 92
/*
#
# check, if reschedule is needed
#
- lg %r0,need_resched(%r9) # get need_resched from task_struct
+#error lg %r0,need_resched(%r9) # get need_resched from task_struct
ltgr %r0,%r0
jnz sysc_reschedule
- icm %r0,15,sigpending(%r9) # get sigpending from task_struct
+#error icm %r0,15,sigpending(%r9) # get sigpending from task_struct
jnz sysc_signal_return
sysc_leave:
tm SP_PGM_OLD_ILC(%r15),0xff
sysc_tracesys:
lghi %r2,-ENOSYS
stg %r2,SP_R2(%r15) # give sysc_trace an -ENOSYS retval
- brasl %r14,syscall_trace
+#error brasl %r14,syscall_trace
lg %r2,SP_R2(%r15)
cghi %r2,-ENOSYS
je sysc_tracesys_dn1
basr %r14,%r8 # call sys_xxx
stg %r2,SP_R2(%r15) # store return value
larl %r14,sysc_return
- jg syscall_trace # return point is sysc_return
+#error jg syscall_trace # return point is sysc_return
#
# call schedule with sysc_return as return-address
#
# check, if reschedule is needed
#
- lg %r0,need_resched(%r9) # get need_resched from task_struct
+#error lg %r0,need_resched(%r9) # get need_resched from task_struct
ltgr %r0,%r0
jnz io_reschedule
- icm %r0,15,sigpending(%r9) # get sigpending from task_struct
+#error icm %r0,15,sigpending(%r9) # get sigpending from task_struct
jnz io_signal_return
io_leave:
stnsm 48(%r15),0xfc # disable I/O and ext. interrupts
* These are offsets into the task-struct.
*/
flags = 4
-sigpending = 8
-need_resched = 20
-tsk_ptrace = 24
+#error sigpending = 8
+#error need_resched = 20
+#error tsk_ptrace = 24
-PT_TRACESYS = 0x00000002
+#error PT_TRACESYS = 0x00000002
ENOSYS = 38
EINVAL = 22
! If we're being traced, return via syscall_ret_trace, otherwise
! return directly to ret_from_syscall
stc k_current, r0
- mov.l @(tsk_ptrace,r0), r0 ! Is current PTRACE_SYSCALL'd?
- mov #PT_TRACESYS, r1
+#error mov.l @(tsk_ptrace,r0), r0 ! Is current PTRACE_SYSCALL'd?
+#error mov #PT_TRACESYS, r1
tst r1, r0
bt ret_from_syscall
bra syscall_ret_trace
STI()
!
stc k_current, r11
- mov.l @(tsk_ptrace,r11), r10 ! Is current PTRACE_SYSCALL'd?
- mov #PT_TRACESYS, r11
+#error mov.l @(tsk_ptrace,r11), r10 ! Is current PTRACE_SYSCALL'd?
+#error mov #PT_TRACESYS, r11
tst r11, r10
bt 5f
! Yes it is traced.
.align 2
__TRA: .long TRA
__syscall_trace:
- .long SYMBOL_NAME(syscall_trace)
+#error .long SYMBOL_NAME(syscall_trace)
__n_sys:.long NR_syscalls
__sct: .long SYMBOL_NAME(sys_call_table)
__syscall_ret_trace:
ldc r0, sr
!
stc k_current, r1
- mov.l @(need_resched,r1), r0
+#error mov.l @(need_resched,r1), r0
tst r0, r0
bf reschedule
- mov.l @(sigpending,r1), r0
+#error mov.l @(sigpending,r1), r0
tst r0, r0
bt restore_all
signal_return:
lds r0, pr
.align 2
__do_signal:
- .long SYMBOL_NAME(do_signal)
+#error .long SYMBOL_NAME(do_signal)
__irq_stat:
.long SYMBOL_NAME(irq_stat)
nop
1:
- ld [%curptr + AOFF_task_need_resched], %g2
+#error ld [%curptr + AOFF_task_need_resched], %g2
orcc %g2, %g0, %g0
be signal_p
- ld [%curptr + AOFF_task_sigpending], %g2
+#error ld [%curptr + AOFF_task_sigpending], %g2
call C_LABEL(schedule)
nop
- ld [%curptr + AOFF_task_sigpending], %g2
+#error ld [%curptr + AOFF_task_sigpending], %g2
signal_p:
cmp %g2, 0
bz,a ret_trap_continue
add %sp, REGWIN_SZ, %o0
b signal_p
- ld [%curptr + AOFF_task_sigpending], %g2
+#error ld [%curptr + AOFF_task_sigpending], %g2
ret_trap_nobufwins:
/* Load up the user's out registers so we can pull
nop
b signal_p
- ld [%curptr + AOFF_task_sigpending], %g2
+#error ld [%curptr + AOFF_task_sigpending], %g2
ret_trap_kernel:
/* Will the rett land us in the invalid window? */
add %sp, REGWIN_SZ, %o0
b signal_p
- ld [%curptr + AOFF_task_sigpending], %g2
+#error ld [%curptr + AOFF_task_sigpending], %g2
.globl C_LABEL(sun4c_rett_stackchk)
C_LABEL(sun4c_rett_stackchk):
/* Reschedule call back. */
void smp_reschedule_irq(void)
{
- current->need_resched = 1;
+ current->work.need_resched = 1;
}
/* Stopping processors. */
/* endless idle loop with no priority at all */
for (;;) {
- /* If current->need_resched is zero we should really
+ /* If current->work.need_resched is zero we should really
* setup for a system wakup event and execute a shutdown
* instruction.
*
wrpr %g0, RTRAP_PSTATE, %pstate
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
/* Redo sched+sig checks */
- ldx [%g6 + AOFF_task_need_resched], %l0
+#error ldx [%g6 + AOFF_task_need_resched], %l0
brz,pt %l0, 1f
nop
call schedule
wrpr %g0, RTRAP_PSTATE, %pstate
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
-1: lduw [%g6 + AOFF_task_sigpending], %l0
+#error1: lduw [%g6 + AOFF_task_sigpending], %l0
brz,pt %l0, __handle_user_windows_continue
nop
clr %o0
call fault_in_user_windows
wrpr %g0, RTRAP_PSTATE, %pstate
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
-1: ldx [%g6 + AOFF_task_need_resched], %l0
+#error 1: ldx [%g6 + AOFF_task_need_resched], %l0
brz,pt %l0, 1f
nop
call schedule
wrpr %g0, RTRAP_PSTATE, %pstate
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
-1: lduw [%g6 + AOFF_task_sigpending], %l0
+#error 1: lduw [%g6 + AOFF_task_sigpending], %l0
brz,pt %l0, __handle_perfctrs_continue
sethi %hi(TSTATE_PEF), %o0
clr %o0
*/
to_user: wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
__handle_preemption_continue:
- ldx [%g6 + AOFF_task_need_resched], %l0
+#error ldx [%g6 + AOFF_task_need_resched], %l0
brnz,pn %l0, __handle_preemption
- lduw [%g6 + AOFF_task_sigpending], %l0
+#error lduw [%g6 + AOFF_task_sigpending], %l0
brnz,pn %l0, __handle_signal
nop
__handle_signal_continue:
}
EXPORT_SYMBOL(device_register);
+EXPORT_SYMBOL(put_device);
EXPORT_SYMBOL(iobus_register);
+EXPORT_SYMBOL(put_iobus);
EXPORT_SYMBOL(device_driver_init);
if (!dev->driver)
goto done;
- num_args = sscanf(buf,"%s %s %u",str_command,str_stage,&state);
+ num_args = sscanf(buf,"%10s %10s %u",str_command,str_stage,&state);
error = -EINVAL;
#include <linux/ppp_defs.h>
#include <linux/ppp-comp.h>
-#include "zlib.c"
+#include <linux/zlib.h>
/*
* State for a Deflate (de)compressor.
#define DEFLATE_OVHD 2 /* Deflate overhead/packet */
-static void *zalloc __P((void *, unsigned int items, unsigned int size));
-static void *zalloc_init __P((void *, unsigned int items,
- unsigned int size));
-static void zfree __P((void *, void *ptr));
static void *z_comp_alloc __P((unsigned char *options, int opt_len));
static void *z_decomp_alloc __P((unsigned char *options, int opt_len));
static void z_comp_free __P((void *state));
static void z_decomp_reset __P((void *state));
static void z_comp_stats __P((void *state, struct compstat *stats));
-struct chunk_header {
- int valloced; /* allocated with valloc, not kmalloc */
- int guard; /* check for overwritten header */
-};
-
-#define GUARD_MAGIC 0x77a8011a
-#define MIN_VMALLOC 2048 /* use kmalloc for blocks < this */
-
-/*
- * Space allocation and freeing routines for use by zlib routines.
- */
-void
-zfree(arg, ptr)
- void *arg;
- void *ptr;
-{
- struct chunk_header *hdr = ((struct chunk_header *)ptr) - 1;
-
- if (hdr->guard != GUARD_MAGIC) {
- printk(KERN_WARNING "zfree: header corrupted (%x %x) at %p\n",
- hdr->valloced, hdr->guard, hdr);
- return;
- }
- if (hdr->valloced)
- vfree(hdr);
- else
- kfree(hdr);
-}
-
-void *
-zalloc(arg, items, size)
- void *arg;
- unsigned int items, size;
-{
- struct chunk_header *hdr;
- unsigned nbytes;
-
- nbytes = items * size + sizeof(*hdr);
- hdr = kmalloc(nbytes, GFP_ATOMIC);
- if (hdr == 0)
- return 0;
- hdr->valloced = 0;
- hdr->guard = GUARD_MAGIC;
- return (void *) (hdr + 1);
-}
-
-void *
-zalloc_init(arg, items, size)
- void *arg;
- unsigned int items, size;
-{
- struct chunk_header *hdr;
- unsigned nbytes;
-
- nbytes = items * size + sizeof(*hdr);
- if (nbytes >= MIN_VMALLOC)
- hdr = vmalloc(nbytes);
- else
- hdr = kmalloc(nbytes, GFP_KERNEL);
- if (hdr == 0)
- return 0;
- hdr->valloced = nbytes >= MIN_VMALLOC;
- hdr->guard = GUARD_MAGIC;
- return (void *) (hdr + 1);
-}
-
static void
z_comp_free(arg)
void *arg;
struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
if (state) {
- deflateEnd(&state->strm);
+ zlib_deflateEnd(&state->strm);
+ if (state->strm.workspace)
+ kfree(state->strm.workspace);
kfree(state);
MOD_DEC_USE_COUNT;
}
if (w_size < DEFLATE_MIN_SIZE || w_size > DEFLATE_MAX_SIZE)
return NULL;
- state = (struct ppp_deflate_state *) kmalloc(sizeof(*state), GFP_KERNEL);
+ state = (struct ppp_deflate_state *) kmalloc(sizeof(*state),
+ GFP_KERNEL);
if (state == NULL)
return NULL;
MOD_INC_USE_COUNT;
memset (state, 0, sizeof (struct ppp_deflate_state));
- state->strm.next_in = NULL;
- state->strm.zalloc = zalloc_init;
- state->strm.zfree = zfree;
- state->w_size = w_size;
+ state->strm.next_in = NULL;
+ state->w_size = w_size;
+ state->strm.workspace = kmalloc(zlib_deflate_workspacesize(),
+ GFP_KERNEL);
+ if (state->strm.workspace == NULL)
+ goto out_free;
- if (deflateInit2(&state->strm, Z_DEFAULT_COMPRESSION,
+ if (zlib_deflateInit2(&state->strm, Z_DEFAULT_COMPRESSION,
DEFLATE_METHOD_VAL, -w_size, 8, Z_DEFAULT_STRATEGY)
!= Z_OK)
goto out_free;
- state->strm.zalloc = zalloc;
return (void *) state;
out_free:
state->unit = unit;
state->debug = debug;
- deflateReset(&state->strm);
+ zlib_deflateReset(&state->strm);
return 1;
}
struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
state->seqno = 0;
- deflateReset(&state->strm);
+ zlib_deflateReset(&state->strm);
}
int
state->strm.avail_in = (isize - off);
for (;;) {
- r = deflate(&state->strm, Z_PACKET_FLUSH);
+ r = zlib_deflate(&state->strm, Z_PACKET_FLUSH);
if (r != Z_OK) {
if (state->debug)
printk(KERN_ERR
struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
if (state) {
- inflateEnd(&state->strm);
+ zlib_inflateEnd(&state->strm);
+ if (state->strm.workspace)
+ kfree(state->strm.workspace);
kfree(state);
MOD_DEC_USE_COUNT;
}
MOD_INC_USE_COUNT;
memset (state, 0, sizeof (struct ppp_deflate_state));
- state->w_size = w_size;
- state->strm.next_out = NULL;
- state->strm.zalloc = zalloc_init;
- state->strm.zfree = zfree;
+ state->w_size = w_size;
+ state->strm.next_out = NULL;
+ state->strm.workspace = kmalloc(zlib_inflate_workspacesize(),
+ GFP_KERNEL);
+ if (state->strm.workspace == NULL)
+ goto out_free;
- if (inflateInit2(&state->strm, -w_size) != Z_OK)
+ if (zlib_inflateInit2(&state->strm, -w_size) != Z_OK)
goto out_free;
- state->strm.zalloc = zalloc;
return (void *) state;
out_free:
state->debug = debug;
state->mru = mru;
- inflateReset(&state->strm);
+ zlib_inflateReset(&state->strm);
return 1;
}
struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
state->seqno = 0;
- inflateReset(&state->strm);
+ zlib_inflateReset(&state->strm);
}
/*
* Call inflate, supplying more input or output as needed.
*/
for (;;) {
- r = inflate(&state->strm, Z_PACKET_FLUSH);
+ r = zlib_inflate(&state->strm, Z_PACKET_FLUSH);
if (r != Z_OK) {
if (state->debug)
printk(KERN_DEBUG "z_decompress%d: inflate returned %d (%s)\n",
++state->strm.avail_in;
}
- r = inflateIncomp(&state->strm);
+ r = zlib_inflateIncomp(&state->strm);
if (r != Z_OK) {
/* gak! */
if (state->debug) {
* the documentation/chipset releases. An on-line errata would be welcome.
*
* TODO:
- * - some trivial error lurk,
- * - the stats are fscked,
+ * - syncppp oopses. X25 untested.
* - use polling at high irq/s,
* - performance analysis,
* - endianness.
*
+ * 2001/12/10 Daniela Squassoni <daniela@cyclades.com>
+ * - Contribution to support the new generic HDLC layer.
+ *
+ * 2002/01 Ueimor
+ * - old style interface removal
+ * - dscc4_release_ring fix (related to DMA mapping)
+ * - hard_start_xmit fix (hint: TxSizeMax)
+ * - misc crapectomy.
*/
#include <linux/version.h>
#include <linux/hdlc.h>
/* Version */
-static const char version[] = "$Id: dscc4.c,v 1.130 2001/02/25 15:27:34 romieu Exp $\n";
+static const char version[] = "$Id: dscc4.c,v 1.157 2002/01/28 01:54:19 romieu Exp $\n";
static int debug;
+static int quartz;
+
+#define DRV_NAME "dscc4"
+#undef DSCC4_POLLING
+#define DEBUG
/* Module parameters */
+
MODULE_AUTHOR("Maintainer: Francois Romieu <romieu@cogenit.fr>");
-MODULE_DESCRIPTION("Siemens PEB20534 PCI Controller");
+MODULE_DESCRIPTION("Siemens PEB20534 PCI Controler");
MODULE_LICENSE("GPL");
MODULE_PARM(debug,"i");
+MODULE_PARM_DESC(debug,"Enable/disable extra messages");
+MODULE_PARM(quartz,"i");
+MODULE_PARM_DESC(quartz,"If present, on-board quartz frequency (Hz)");
+
+EXPORT_NO_SYMBOLS;
/* Structures */
+
+struct thingie {
+ int define;
+ u32 bits;
+};
+
struct TxFD {
u32 state;
u32 next;
};
#define DEBUG
-#define DEBUG_PARANOID
+#define DEBUG_PARANOIA
#define TX_RING_SIZE 32
#define RX_RING_SIZE 32
#define IRQ_RING_SIZE 64 /* Keep it A multiple of 32 */
#define TX_TIMEOUT (HZ/10)
+#define DSCC4_HZ_MAX 33000000
#define BRR_DIVIDER_MAX 64*0x00008000
#define dev_per_card 4
#define SOURCE_ID(flags) ((flags >> 28 ) & 0x03)
#define TO_SIZE(state) ((state >> 16) & 0x1fff)
#define TO_STATE(len) cpu_to_le32((len & TxSizeMax) << 16)
-#define RX_MAX(len) ((((len) >> 5) + 1) << 5)
+#define RX_MAX(len) ((((len) >> 5) + 1)<< 5)
#define SCC_REG_START(id) SCC_START+(id)*SCC_OFFSET
#undef DEBUG
spinlock_t lock;
struct pci_dev *pdev;
- struct net_device *root;
+ struct dscc4_dev_priv *root;
dma_addr_t iqcfg_dma;
u32 xtal_hz;
};
dma_addr_t iqtx_dma;
dma_addr_t iqrx_dma;
- struct net_device_stats stats;
struct timer_list timer;
struct dscc4_pci_priv *pci_priv;
spinlock_t lock;
int dev_id;
- u32 flags;
+ volatile u32 flags;
u32 timer_help;
u32 hi_expected;
- struct hdlc_device_struct hdlc;
- int usecount;
+ hdlc_device hdlc;
+ sync_serial_settings settings;
+ unsigned short encoding;
+ unsigned short parity;
+ u32 pad __attribute__ ((aligned (4)));
};
/* GLOBAL registers definitions */
#define ISR 0x58
/* Bit masks */
+#define EncodingMask 0x00700000
+#define CrcMask 0x00000003
+
#define IntRxScc0 0x10000000
#define IntTxScc0 0x01000000
#define Rdt 0x00200000
#define Idr 0x00100000
#define Idt 0x00080000
-#define TxSccRes 0x01000000
-#define RxSccRes 0x00010000
-#define TxSizeMax 0x1ffc
-#define RxSizeMax 0x1ffc
+#define TxSccRes 0x01000000
+#define RxSccRes 0x00010000
+#define TxSizeMax 0x1fff
#define Ccr0ClockMask 0x0000003f
#define Ccr1LoopMask 0x00000200
#define FrameVfr 0x80
#define FrameRdo 0x40
#define FrameCrc 0x20
+#define FrameRab 0x10
#define FrameAborted 0x00000200
#define FrameEnd 0x80000000
#define DataComplete 0x40000000
#define RdoSet 0x00000004
/* Functions prototypes */
-static __inline__ void dscc4_rx_irq(struct dscc4_pci_priv *, struct net_device *);
-static __inline__ void dscc4_tx_irq(struct dscc4_pci_priv *, struct net_device *);
+static inline void dscc4_rx_irq(struct dscc4_pci_priv *, struct dscc4_dev_priv *);
+static inline void dscc4_tx_irq(struct dscc4_pci_priv *, struct dscc4_dev_priv *);
static int dscc4_found1(struct pci_dev *, unsigned long ioaddr);
static int dscc4_init_one(struct pci_dev *, const struct pci_device_id *ent);
static int dscc4_open(struct net_device *);
static int dscc4_start_xmit(struct sk_buff *, struct net_device *);
static int dscc4_close(struct net_device *);
static int dscc4_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-static int dscc4_change_mtu(struct net_device *dev, int mtu);
static int dscc4_init_ring(struct net_device *);
static void dscc4_release_ring(struct dscc4_dev_priv *);
static void dscc4_timer(unsigned long);
static void dscc4_tx_timeout(struct net_device *);
static void dscc4_irq(int irq, void *dev_id, struct pt_regs *ptregs);
-static struct net_device_stats *dscc4_get_stats(struct net_device *);
-static int dscc4_attach_hdlc_device(struct net_device *);
-static void dscc4_unattach_hdlc_device(struct net_device *);
-static int dscc4_hdlc_open(struct hdlc_device_struct *);
-static void dscc4_hdlc_close(struct hdlc_device_struct *);
-static int dscc4_hdlc_ioctl(struct hdlc_device_struct *, struct ifreq *, int);
-static int dscc4_hdlc_xmit(hdlc_device *, struct sk_buff *);
-#ifdef EXPERIMENTAL_POLLING
+static int dscc4_hdlc_attach(hdlc_device *, unsigned short, unsigned short);
+static int dscc4_set_iface(struct net_device *);
+static inline int dscc4_set_quartz(struct dscc4_dev_priv *, int);
+#ifdef DSCC4_POLLING
static int dscc4_tx_poll(struct dscc4_dev_priv *, struct net_device *);
#endif
-void inline reset_TxFD(struct TxFD *tx_fd) {
- /* FIXME: test with the last arg (size specification) = 0 */
- tx_fd->state = FrameEnd | Hold | 0x00100000;
- tx_fd->complete = 0x00000000;
+static inline void dscc4_patch_register(u32 ioaddr, u32 mask, u32 value)
+{
+ u32 state;
+
+ state = readl(ioaddr);
+ state &= ~mask;
+ state |= value;
+ writel(state, ioaddr);
}
-void inline dscc4_release_ring_skbuff(struct sk_buff **p, int n)
+int state_check(u32 state, struct dscc4_dev_priv *dpriv,
+ struct net_device *dev, const char *msg)
{
- for(; n > 0; n--) {
- if (*p)
- dev_kfree_skb(*p);
- p++;
+#ifdef DEBUG_PARANOIA
+ if (SOURCE_ID(state) != dpriv->dev_id) {
+ printk(KERN_DEBUG "%s (%s): Source Id=%d, state=%08x\n",
+ dev->name, msg, SOURCE_ID(state), state );
+ return -1;
}
+ if (state & 0x0df80c00) {
+ printk(KERN_DEBUG "%s (%s): state=%08x (UFO alert)\n",
+ dev->name, msg, state);
+ return -1;
+ }
+ return 0;
+#else
+ return 1;
+#endif
+}
+
+void inline reset_TxFD(struct TxFD *tx_fd) {
+ /* FIXME: test with the last arg (size specification) = 0 */
+ tx_fd->state = FrameEnd | Hold | 0x00100000;
+ tx_fd->complete = 0x00000000;
}
static void dscc4_release_ring(struct dscc4_dev_priv *dpriv)
{
struct pci_dev *pdev = dpriv->pci_priv->pdev;
+ struct TxFD *tx_fd = dpriv->tx_fd;
+ struct RxFD *rx_fd = dpriv->rx_fd;
+ struct sk_buff **skbuff;
+ int i;
+
+ pci_free_consistent(pdev, TX_RING_SIZE*sizeof(struct TxFD), tx_fd,
+ dpriv->tx_fd_dma);
+ pci_free_consistent(pdev, RX_RING_SIZE*sizeof(struct RxFD), rx_fd,
+ dpriv->rx_fd_dma);
+
+ skbuff = dpriv->tx_skbuff;
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (*skbuff) {
+ pci_unmap_single(pdev, tx_fd->data, (*skbuff)->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb(*skbuff);
+ }
+ skbuff++;
+ tx_fd++;
+ }
- pci_free_consistent(pdev, TX_RING_SIZE*sizeof(struct TxFD),
- dpriv->tx_fd, dpriv->tx_fd_dma);
- pci_free_consistent(pdev, RX_RING_SIZE*sizeof(struct RxFD),
- dpriv->rx_fd, dpriv->rx_fd_dma);
- dscc4_release_ring_skbuff(dpriv->tx_skbuff, TX_RING_SIZE);
- dscc4_release_ring_skbuff(dpriv->rx_skbuff, RX_RING_SIZE);
+ skbuff = dpriv->rx_skbuff;
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ if (*skbuff) {
+ pci_unmap_single(pdev, rx_fd->data, (*skbuff)->len,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(*skbuff);
+ }
+ skbuff++;
+ rx_fd++;
+ }
}
void inline try_get_rx_skb(struct dscc4_dev_priv *priv, int cur, struct net_device *dev)
{
struct sk_buff *skb;
- skb = dev_alloc_skb(RX_MAX(HDLC_MAX_MRU+2));
+ skb = dev_alloc_skb(RX_MAX(HDLC_MAX_MRU));
priv->rx_skbuff[cur] = skb;
if (!skb) {
priv->rx_fd[cur--].data = (u32) NULL;
printk(KERN_ERR "%s: %s timeout\n", dev->name, msg);
return -1;
}
+ rmb();
}
printk(KERN_DEBUG "%s: %s ack (%d try)\n", dev->name, msg, i);
return 0;
return -1;
}
-static __inline__ int dscc4_xpr_ack(struct dscc4_dev_priv *dpriv)
+static inline int dscc4_xpr_ack(struct dscc4_dev_priv *dpriv)
{
- int cur;
+ int cur, ret = 0;
s16 i;
cur = dpriv->iqtx_current%IRQ_RING_SIZE;
for (i = 0; i >= 0; i++) {
if (!(dpriv->flags & (NeedIDR | NeedIDT)) ||
(dpriv->iqtx[cur] & Xpr))
- return 0;
+ break;
+ smp_rmb();
}
- printk(KERN_ERR "%s: %s timeout\n", "dscc4", "XPR");
- return -1;
+ if (i < 0) {
+ printk(KERN_ERR "%s: %s timeout\n", "dscc4", "XPR");
+ ret = -1;
+ }
+ return ret;
}
-static __inline__ void dscc4_rx_skb(struct dscc4_dev_priv *dpriv, int cur,
+static inline void dscc4_rx_skb(struct dscc4_dev_priv *dpriv, int cur,
struct RxFD *rx_fd, struct net_device *dev)
{
struct pci_dev *pdev = dpriv->pci_priv->pdev;
pci_dma_sync_single(pdev, rx_fd->data, pkt_len + 1, PCI_DMA_FROMDEVICE);
if((skb->data[pkt_len] & FrameOk) == FrameOk) {
pci_unmap_single(pdev, rx_fd->data, skb->len, PCI_DMA_FROMDEVICE);
- dpriv->stats.rx_packets++;
- dpriv->stats.rx_bytes += pkt_len;
+ dev_to_hdlc(dev)->stats.rx_packets++;
+ dev_to_hdlc(dev)->stats.rx_bytes += pkt_len;
skb->tail += pkt_len;
skb->len = pkt_len;
if (netif_running(hdlc_to_dev(&dpriv->hdlc)))
- hdlc_netif_rx(&dpriv->hdlc, skb);
- else
- netif_rx(skb);
+ skb->protocol = htons(ETH_P_HDLC);
+ netif_rx(skb);
try_get_rx_skb(dpriv, cur, dev);
} else {
if(skb->data[pkt_len] & FrameRdo)
- dpriv->stats.rx_fifo_errors++;
+ dev_to_hdlc(dev)->stats.rx_fifo_errors++;
else if(!(skb->data[pkt_len] | ~FrameCrc))
- dpriv->stats.rx_crc_errors++;
- else if(!(skb->data[pkt_len] | ~FrameVfr))
- dpriv->stats.rx_length_errors++;
+ dev_to_hdlc(dev)->stats.rx_crc_errors++;
+ else if(!(skb->data[pkt_len] | ~(FrameVfr | FrameRab)))
+ dev_to_hdlc(dev)->stats.rx_length_errors++;
else
- dpriv->stats.rx_errors++;
+ dev_to_hdlc(dev)->stats.rx_errors++;
}
rx_fd->state1 |= Hold;
rx_fd->state2 = 0x00000000;
{
struct dscc4_pci_priv *priv;
struct dscc4_dev_priv *dpriv;
- int i;
static int cards_found = 0;
unsigned long ioaddr;
+ int i;
printk(KERN_DEBUG "%s", version);
goto err_out;
if (!request_mem_region(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0), "registers")) {
- printk (KERN_ERR "dscc4: can't reserve MMIO region (regs)\n");
+ printk(KERN_ERR "%s: can't reserve MMIO region (regs)\n",
+ DRV_NAME);
goto err_out;
}
if (!request_mem_region(pci_resource_start(pdev, 1),
pci_resource_len(pdev, 1), "LBI interface")) {
- printk (KERN_ERR "dscc4: can't reserve MMIO region (lbi)\n");
+ printk(KERN_ERR "%s: can't reserve MMIO region (lbi)\n",
+ DRV_NAME);
goto err_out_free_mmio_region0;
}
ioaddr = (unsigned long)ioremap(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
if (!ioaddr) {
- printk(KERN_ERR "dscc4: cannot remap MMIO region %lx @ %lx\n",
- pci_resource_len(pdev, 0), pci_resource_start(pdev, 0));
+ printk(KERN_ERR "%s: cannot remap MMIO region %lx @ %lx\n",
+ DRV_NAME, pci_resource_len(pdev, 0),
+ pci_resource_start(pdev, 0));
goto err_out_free_mmio_region;
}
- printk(KERN_DEBUG "Siemens DSCC4, MMIO at %#lx (regs), %#lx (lbi), IRQ %d.\n",
+ printk(KERN_DEBUG "Siemens DSCC4, MMIO at %#lx (regs), %#lx (lbi), IRQ %d\n",
pci_resource_start(pdev, 0),
pci_resource_start(pdev, 1), pdev->irq);
- /* High PCI latency useless. Cf app. note. */
+ /* No need for High PCI latency. Cf app. note. */
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x10);
pci_set_master(pdev);
priv = (struct dscc4_pci_priv *)pci_get_drvdata(pdev);
- if (request_irq(pdev->irq, &dscc4_irq, SA_SHIRQ, "dscc4", priv->root)) {
- printk(KERN_WARNING "dscc4: IRQ %d is busy\n", pdev->irq);
+ if (request_irq(pdev->irq, &dscc4_irq, SA_SHIRQ, DRV_NAME, priv->root)){
+ printk(KERN_WARNING "%s: IRQ %d busy\n", DRV_NAME, pdev->irq);
goto err_out_iounmap;
}
- priv->pdev = pdev;
/* power up/little endian/dma core controlled via hold bit */
writel(0x00000000, ioaddr + GMODE);
* IQRX/TXi needs to be set soon. Learned it the hard way...
*/
for(i = 0; i < dev_per_card; i++) {
- dpriv = (struct dscc4_dev_priv *)(priv->root + i)->priv;
+ dpriv = priv->root + i;
dpriv->iqtx = (u32 *) pci_alloc_consistent(pdev,
IRQ_RING_SIZE*sizeof(u32), &dpriv->iqtx_dma);
if (!dpriv->iqtx)
writel(dpriv->iqtx_dma, ioaddr + IQTX0 + i*4);
}
for(i = 0; i < dev_per_card; i++) {
- dpriv = (struct dscc4_dev_priv *)(priv->root + i)->priv;
+ dpriv = priv->root + i;
dpriv->iqrx = (u32 *) pci_alloc_consistent(pdev,
IRQ_RING_SIZE*sizeof(u32), &dpriv->iqrx_dma);
if (!dpriv->iqrx)
writel(dpriv->iqrx_dma, ioaddr + IQRX0 + i*4);
}
- /*
- * Cf application hint. Beware of hard-lock condition on
- * threshold .
- */
+ /* Cf application hint. Beware of hard-lock condition on threshold. */
writel(0x42104000, ioaddr + FIFOCR1);
//writel(0x9ce69800, ioaddr + FIFOCR2);
writel(0xdef6d800, ioaddr + FIFOCR2);
err_out_free_iqrx:
while (--i >= 0) {
- dpriv = (struct dscc4_dev_priv *)(priv->root + i)->priv;
+ dpriv = priv->root + i;
pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32),
dpriv->iqrx, dpriv->iqrx_dma);
}
i = dev_per_card;
err_out_free_iqtx:
while (--i >= 0) {
- dpriv = (struct dscc4_dev_priv *)(priv->root + i)->priv;
+ dpriv = priv->root + i;
pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32),
dpriv->iqtx, dpriv->iqtx_dma);
}
return -ENODEV;
};
+/*
+ * Let's hope the default values are decent enough to protect my
+ * feet from the user's gun - Ueimor
+ */
+static void dscc4_init_registers(u32 base_addr, int dev_id)
+{
+ u32 ioaddr = base_addr + SCC_REG_START(dev_id);
+
+ writel(0x80001000, ioaddr + CCR0);
+
+ writel(LengthCheck | (HDLC_MAX_MRU >> 5), ioaddr + RLCR);
+
+ /* no address recognition/crc-CCITT/cts enabled */
+ writel(0x021c8000, ioaddr + CCR1);
+
+ /* crc not forwarded */
+ writel(0x00050008 & ~RxActivate, ioaddr + CCR2);
+ // crc forwarded
+ //writel(0x00250008 & ~RxActivate, ioaddr + CCR2);
+
+ /* Don't mask RDO. Ever. */
+#ifdef DSCC4_POLLING
+ writel(0xfffeef7f, ioaddr + IMR); /* Interrupt mask */
+#else
+ //writel(0xfffaef7f, ioaddr + IMR); /* Interrupt mask */
+ writel(0xfffaef7e, ioaddr + IMR); /* Interrupt mask */
+#endif
+}
+
static int dscc4_found1(struct pci_dev *pdev, unsigned long ioaddr)
{
struct dscc4_pci_priv *ppriv;
- struct dscc4_dev_priv *dpriv;
- struct net_device *dev;
+ struct dscc4_dev_priv *root;
int i = 0;
- dpriv = (struct dscc4_dev_priv *)
- kmalloc(dev_per_card*sizeof(struct dscc4_dev_priv), GFP_KERNEL);
- if (!dpriv) {
- printk(KERN_ERR "dscc4: can't allocate data\n");
+ root = (struct dscc4_dev_priv *)
+ kmalloc(dev_per_card*sizeof(*root), GFP_KERNEL);
+ if (!root) {
+ printk(KERN_ERR "%s: can't allocate data\n", DRV_NAME);
goto err_out;
}
- memset(dpriv, 0, dev_per_card*sizeof(struct dscc4_dev_priv));
-
- dev = (struct net_device *)
- kmalloc(dev_per_card*sizeof(struct net_device), GFP_KERNEL);
- if (!dev) {
- printk(KERN_ERR "dscc4: can't allocate net_device\n");
- goto err_dealloc_priv;
- }
- memset(dev, 0, dev_per_card*sizeof(struct net_device));
+ memset(root, 0, dev_per_card*sizeof(*root));
- ppriv = (struct dscc4_pci_priv *)
- kmalloc(sizeof(struct dscc4_pci_priv), GFP_KERNEL);
+ ppriv = (struct dscc4_pci_priv *) kmalloc(sizeof(*ppriv), GFP_KERNEL);
if (!ppriv) {
- printk(KERN_ERR "dscc4: can't allocate pci private data.\n");
- goto err_dealloc_dev;
+ printk(KERN_ERR "%s: can't allocate private data\n", DRV_NAME);
+ goto err_free_dev;
}
memset(ppriv, 0, sizeof(struct dscc4_pci_priv));
for (i = 0; i < dev_per_card; i++) {
- struct dscc4_dev_priv *p;
- struct net_device *d;
+ struct dscc4_dev_priv *dpriv = root + i;
+ hdlc_device *hdlc = &dpriv->hdlc;
+ struct net_device *d = hdlc_to_dev(hdlc);
- d = dev + i;
d->base_addr = ioaddr;
d->init = NULL;
d->irq = pdev->irq;
- /* The card adds the crc */
- d->type = ARPHRD_RAWHDLC;
d->open = dscc4_open;
d->stop = dscc4_close;
- d->hard_start_xmit = dscc4_start_xmit;
d->set_multicast_list = NULL;
d->do_ioctl = dscc4_ioctl;
- d->get_stats = dscc4_get_stats;
- d->change_mtu = dscc4_change_mtu;
- d->mtu = HDLC_MAX_MTU;
- d->flags = IFF_MULTICAST|IFF_POINTOPOINT|IFF_NOARP;
d->tx_timeout = dscc4_tx_timeout;
d->watchdog_timeo = TX_TIMEOUT;
- p = dpriv + i;
- p->dev_id = i;
- p->pci_priv = ppriv;
- spin_lock_init(&p->lock);
- d->priv = p;
+ dpriv->dev_id = i;
+ dpriv->pci_priv = ppriv;
+ spin_lock_init(&dpriv->lock);
+ d->priv = dpriv;
- if (dev_alloc_name(d, "scc%d")<0) {
- printk(KERN_ERR "dev_alloc_name failed for scc.\n");
- goto err_dealloc_dev;
- }
- if (register_netdev(d)) {
- printk(KERN_ERR "%s: register_netdev != 0.\n", d->name);
- goto err_dealloc_dev;
+ hdlc->xmit = dscc4_start_xmit;
+ hdlc->attach = dscc4_hdlc_attach;
+
+ if (register_hdlc_device(hdlc)) {
+ printk(KERN_ERR "%s: unable to register\n", DRV_NAME);
+ goto err_unregister;
}
- dscc4_attach_hdlc_device(d);
+ hdlc->proto = IF_PROTO_HDLC;
SET_MODULE_OWNER(d);
+ dscc4_init_registers(ioaddr, i);
+ dpriv->parity = PARITY_CRC16_PR0_CCITT;
+ dpriv->encoding = ENCODING_NRZ;
}
- ppriv->root = dev;
- ppriv->pdev = pdev;
+ if (dscc4_set_quartz(root, quartz) < 0)
+ goto err_unregister;
+ ppriv->root = root;
spin_lock_init(&ppriv->lock);
pci_set_drvdata(pdev, ppriv);
return 0;
-err_dealloc_dev:
+err_unregister:
while (--i >= 0)
- unregister_netdev(dev + i);
- kfree(dev);
-err_dealloc_priv:
- kfree(dpriv);
+ unregister_hdlc_device(&root[i].hdlc);
+ kfree(ppriv);
+err_free_dev:
+ kfree(root);
err_out:
return -1;
};
printk(KERN_DEBUG "%s: pending events\n", dev->name);
dev->trans_start = jiffies;
spin_lock_irqsave(&ppriv->lock, flags);
- dscc4_tx_irq(ppriv, dev);
+ dscc4_tx_irq(ppriv, dpriv);
spin_unlock_irqrestore(&ppriv->lock, flags);
} else {
struct TxFD *tx_fd;
printk(KERN_DEBUG "%s: missing events\n", dev->name);
i = dpriv->tx_dirty%TX_RING_SIZE;
j = dpriv->tx_current - dpriv->tx_dirty;
- dpriv->stats.tx_dropped += j;
+ dev_to_hdlc(dev)->stats.tx_dropped += j;
while(j--) {
skb = dpriv->tx_skbuff[i];
tx_fd = dpriv->tx_fd + i;
static void dscc4_tx_timeout(struct net_device *dev)
{
/* FIXME: something is missing there */
-};
+}
+
+static int dscc4_loopback_check(struct dscc4_dev_priv *dpriv)
+{
+ sync_serial_settings *settings = &dpriv->settings;
+
+ if (settings->loopback && (settings->clock_type != CLOCK_INT)) {
+ struct net_device *dev = hdlc_to_dev(&dpriv->hdlc);
+
+ printk(KERN_INFO "%s: loopback requires clock\n", dev->name);
+ return -1;
+ }
+ return 0;
+}
static int dscc4_open(struct net_device *dev)
{
- struct dscc4_dev_priv *dpriv = (struct dscc4_dev_priv *)dev->priv;
+ struct dscc4_dev_priv *dpriv = dev->priv;
+ hdlc_device *hdlc = &dpriv->hdlc;
struct dscc4_pci_priv *ppriv;
- u32 ioaddr = 0;
+ u32 ioaddr;
+ int ret = -EAGAIN;
+
+ if ((dscc4_loopback_check(dpriv) < 0) || !dev->hard_start_xmit)
+ goto err;
+
+ if ((ret = hdlc_open(hdlc)))
+ goto err;
MOD_INC_USE_COUNT;
ioaddr = dev->base_addr + SCC_REG_START(dpriv->dev_id);
- /* FIXME: VIS */
- writel(readl(ioaddr + CCR0) | 0x80001000, ioaddr + CCR0);
-
- writel(LengthCheck | (HDLC_MAX_MRU >> 5), ioaddr + RLCR);
-
- /* no address recognition/crc-CCITT/cts enabled */
- writel(readl(ioaddr + CCR1) | 0x021c8000, ioaddr + CCR1);
-
- /* Ccr2.Rac = 0 */
- writel(0x00050008 & ~RxActivate, ioaddr + CCR2);
-
-#ifdef EXPERIMENTAL_POLLING
- writel(0xfffeef7f, ioaddr + IMR); /* Interrupt mask */
-#else
- /* Don't mask RDO. Ever. */
- //writel(0xfffaef7f, ioaddr + IMR); /* Interrupt mask */
- writel(0xfffaef7e, ioaddr + IMR); /* Interrupt mask */
-#endif
/* IDT+IDR during XPR */
dpriv->flags = NeedIDR | NeedIDT;
printk(KERN_ERR "%s busy. Try later\n", dev->name);
goto err_free_ring;
}
+
+ /* Posted write is flushed in the busy-waiting loop */
writel(TxSccRes | RxSccRes, ioaddr + CMDR);
- /* ... the following isn't */
if (dscc4_wait_ack_cec(ioaddr, dev, "Cec"))
goto err_free_ring;
err_free_ring:
dscc4_release_ring(dpriv);
err_out:
+ hdlc_close(hdlc);
MOD_DEC_USE_COUNT;
- return -EAGAIN;
+err:
+ return ret;
}
-#ifdef EXPERIMENTAL_POLLING
+#ifdef DSCC4_POLLING
static int dscc4_tx_poll(struct dscc4_dev_priv *dpriv, struct net_device *dev)
{
/* FIXME: it's gonna be easy (TM), for sure */
}
-#endif /* EXPERIMENTAL_POLLING */
+#endif /* DSCC4_POLLING */
static int dscc4_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
next = dpriv->tx_current%TX_RING_SIZE;
dpriv->tx_skbuff[next] = skb;
tx_fd = dpriv->tx_fd + next;
- tx_fd->state = FrameEnd | Hold | TO_STATE(skb->len & TxSizeMax);
+ printk(KERN_DEBUG "%s: %d sent\n", dev->name, skb->len);
+ tx_fd->state = FrameEnd | Hold | TO_STATE(skb->len);
tx_fd->data = pci_map_single(ppriv->pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
tx_fd->complete = 0x00000000;
mb(); // FIXME: suppress ?
-#ifdef EXPERIMENTAL_POLLING
+#ifdef DSCC4_POLLING
spin_lock(&dpriv->lock);
while(dscc4_tx_poll(dpriv, dev));
spin_unlock(&dpriv->lock);
struct dscc4_dev_priv *dpriv = (struct dscc4_dev_priv *)dev->priv;
u32 ioaddr = dev->base_addr;
int dev_id;
+ hdlc_device *hdlc = dev_to_hdlc(dev);
del_timer_sync(&dpriv->timer);
netif_stop_queue(dev);
writel(0x00050000, ioaddr + SCC_REG_START(dev_id) + CCR2);
writel(MTFi|Rdr|Rdt, ioaddr + CH0CFG + dev_id*0x0c); /* Reset Rx/Tx */
writel(0x00000001, ioaddr + GCMDR);
+ readl(ioaddr + GCMDR);
+ /*
+ * FIXME: wait for the command ack before returning the memory
+ * structures to the kernel.
+ */
+ hdlc_close(hdlc);
dscc4_release_ring(dpriv);
MOD_DEC_USE_COUNT;
return 0;
}
+static inline int dscc4_check_clock_ability(int port)
+{
+ int ret = 0;
+
+#ifdef CONFIG_DSCC4_CLOCK_ON_TWO_PORTS_ONLY
+ if (port >= 2)
+ ret = -1;
+#endif
+ return ret;
+}
+
static int dscc4_set_clock(struct net_device *dev, u32 *bps, u32 *state)
{
struct dscc4_dev_priv *dpriv = (struct dscc4_dev_priv *)dev->priv;
u32 brr;
*state &= ~Ccr0ClockMask;
- if (*bps) { /* DCE */
+ if (*bps) { /* Clock generated - required for DCE */
u32 n = 0, m = 0, divider;
int xtal;
xtal = dpriv->pci_priv->xtal_hz;
if (!xtal)
return -1;
+ if (dscc4_check_clock_ability(dpriv->dev_id) < 0)
+ return -1;
divider = xtal / *bps;
if (divider > BRR_DIVIDER_MAX) {
divider >>= 4;
if (!(*state & 0x00000001)) /* Clock mode 6b */
divider <<= 4;
*bps = xtal / divider;
- } else { /* DTE */
+ } else {
/*
+ * External clock - DTE
* "state" already reflects Clock mode 0a.
* Nothing more to be done
*/
return 0;
}
-#ifdef LATER_PLEASE
-/*
- * -*- [RFC] Configuring Synchronous Interfaces in Linux -*-
- */
-
-// FIXME: MEDIA already defined in linux/hdlc.h
-#define HDLC_MEDIA_V35 0
-#define HDLC_MEDIA_RS232 1
-#define HDLC_MEDIA_X21 2
-#define HDLC_MEDIA_E1 3
-#define HDLC_MEDIA_HSSI 4
-
-#define HDLC_CODING_NRZ 0
-#define HDLC_CODING_NRZI 1
-#define HDLC_CODING_FM0 2
-#define HDLC_CODING_FM1 3
-#define HDLC_CODING_MANCHESTER 4
-
-#define HDLC_CRC_NONE 0
-#define HDLC_CRC_16 1
-#define HDLC_CRC_32 2
-#define HDLC_CRC_CCITT 3
-
-/* RFC: add the crc reset value ? */
-struct hdlc_physical {
- u8 media;
- u8 coding;
- u32 rate;
- u8 crc;
- u8 crc_siz; /* 2 or 4 bytes */
- u8 shared_flags; /* Discouraged on the DSCC4 */
-};
-
-// FIXME: PROTO already defined in linux/hdlc.h
-#define HDLC_PROTO_RAW 0
-#define HDLC_PROTO_FR 1
-#define HDLC_PROTO_X25 2
-#define HDLC_PROTO_PPP 3
-#define HDLC_PROTO_CHDLC 4
-
-struct hdlc_protocol {
- u8 proto;
-
- union {
- } u;
-};
-
-struct screq {
- u16 media_group;
-
- union {
- struct hdlc_physical hdlc_phy;
- struct hdlc_protocol hdlc_proto;
- } u;
-};
-
-// FIXME: go sub-module
-static struct {
- u16 coding;
- u16 bits;
-} map[] = {
- {HDLC_CODING_NRZ, 0x00},
- {HDLC_CODING_NRZI, 0x20},
- {HDLC_CODING_FM0, 0x40},
- {HDLC_CODING_FM1, 0x50},
- {HDLC_CODING_MANCHESTER, 0x60},
- {65535, 0x00}
-};
-#endif /* LATER_PLEASE */
-
static int dscc4_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct dscc4_dev_priv *dpriv = dev->priv;
- u32 state, ioaddr;
+ struct if_settings *if_s = &ifr->ifr_settings;
+ const size_t size = sizeof(dpriv->settings);
+ int ret = 0;
if (dev->flags & IFF_UP)
return -EBUSY;
- switch (cmd) {
- /* Set built-in quartz frequency */
- case SIOCDEVPRIVATE: {
- u32 hz;
+ if (cmd != SIOCDEVICE)
+ return -EOPNOTSUPP;
- hz = ifr->ifr_ifru.ifru_ivalue;
- if (hz >= 33000000) /* 33 MHz */
- return -EOPNOTSUPP;
- dpriv->pci_priv->xtal_hz = hz;
- return 0;
- }
- /* Set/unset loopback */
- case SIOCDEVPRIVATE+1: {
- u32 flags;
-
- ioaddr = dev->base_addr + CCR1 +
- SCC_REG_START(dpriv->dev_id);
- state = readl(ioaddr);
- flags = ifr->ifr_ifru.ifru_ivalue;
- if (flags & 0x00000001) {
- printk(KERN_DEBUG "%s: loopback\n", dev->name);
- state |= 0x00000100;
- } else {
- printk(KERN_DEBUG "%s: normal\n", dev->name);
- state &= ~0x00000100;
- }
- writel(state, ioaddr);
+ switch(ifr->ifr_settings.type) {
+ case IF_GET_IFACE:
+ if_s->type = IF_IFACE_SYNC_SERIAL;
+ if (if_s->data_length == 0)
return 0;
- }
+ if (if_s->data_length < size)
+ return -ENOMEM;
+ if (copy_to_user(if_s->data, &dpriv->settings, size))
+ return -EFAULT;
+ if_s->data_length = size;
+ break;
+
+ case IF_IFACE_SYNC_SERIAL:
+ if(!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (if_s->data_length != size)
+ return -ENOMEM;
+
+ if (copy_from_user(&dpriv->settings, if_s->data, size))
+ return -EFAULT;
+ ret = dscc4_set_iface(dev);
+ break;
+
+ default:
+ ret = hdlc_ioctl(dev, ifr, cmd);
+ break;
+ }
-#ifdef LATER_PLEASE
- case SIOCDEVPRIVATE+2: {
- {
- struct screq scr;
+ return ret;
+}
- err = copy_from_user(&scr, ifr->ifr_ifru.ifru_data, sizeof(struct screq));
- if (err)
- return err;
- do {
- if (scr.u.hdlc_phy.coding == map[i].coding)
- break;
- } while (map[++i].coding != 65535);
- if (!map[i].coding)
- return -EOPNOTSUPP;
-
- ioaddr = dev->base_addr + CCR0 +
- SCC_REG_START(dpriv->dev_id);
- state = readl(ioaddr) & ~EncodingMask;
- state |= (u32)map[i].bits << 16;
- writel(state, ioaddr);
- printk("state: %08x\n", state); /* DEBUG */
- return 0;
- }
- case SIOCDEVPRIVATE+3: {
- struct screq *scr = (struct screq *)ifr->ifr_ifru.ifru_data;
+static inline int dscc4_set_quartz(struct dscc4_dev_priv *dpriv, int hz)
+{
+ int ret = 0;
- ioaddr = dev->base_addr + CCR0 +
- SCC_REG_START(dpriv->dev_id);
- state = (readl(ioaddr) & EncodingMask) >> 16;
- do {
- if (state == map[i].bits)
- break;
- } while (map[++i].coding);
- return put_user(map[i].coding, (u16 *)scr->u.hdlc_phy.coding);
- }
-#endif /* LATER_PLEASE */
-
- case HDLCSCLOCKRATE:
- {
- u32 state, bps;
-
- bps = ifr->ifr_ifru.ifru_ivalue;
- ioaddr = dev->base_addr + CCR0 +
- SCC_REG_START(dpriv->dev_id);
- state = readl(ioaddr);
- if(dscc4_set_clock(dev, &bps, &state) < 0)
- return -EOPNOTSUPP;
- if (bps) { /* DCE */
- printk(KERN_DEBUG "%s: generated RxClk (DCE)\n",
- dev->name);
- ifr->ifr_ifru.ifru_ivalue = bps;
- } else { /* DTE */
- state = 0x80001000;
- printk(KERN_DEBUG "%s: external RxClk (DTE)\n",
- dev->name);
- }
- writel(state, ioaddr);
- return 0;
- }
- case HDLCGCLOCKRATE: {
- u32 brr;
- int bps;
-
- brr = readl(dev->base_addr + BRR +
- SCC_REG_START(dpriv->dev_id));
- bps = dpriv->pci_priv->xtal_hz >> (brr >> 8);
- bps /= (brr & 0x3f) + 1;
- ifr->ifr_ifru.ifru_ivalue = bps;
- return 0;
+ if ((hz < 0) || (hz > DSCC4_HZ_MAX))
+ ret = -EOPNOTSUPP;
+ else
+ dpriv->pci_priv->xtal_hz = hz;
+
+ return ret;
+}
+
+static int dscc4_match(struct thingie *p, int value)
+{
+ int i;
+
+ for (i = 0; p[i].define != -1; i++) {
+ if (value == p[i].define)
+ break;
+ }
+ if (p[i].define == -1)
+ return -1;
+ else
+ return i;
+}
+
+static int dscc4_clock_setting(struct net_device *dev)
+{
+ struct dscc4_dev_priv *dpriv = dev->priv;
+ sync_serial_settings *settings = &dpriv->settings;
+ u32 bps, state;
+ u32 ioaddr;
+
+ bps = settings->clock_rate;
+ ioaddr = dev->base_addr + CCR0 + SCC_REG_START(dpriv->dev_id);
+ state = readl(ioaddr);
+ if(dscc4_set_clock(dev, &bps, &state) < 0)
+ return -EOPNOTSUPP;
+ if (bps) { /* DCE */
+ printk(KERN_DEBUG "%s: generated RxClk (DCE)\n", dev->name);
+ if (settings->clock_rate != bps) {
+ settings->clock_rate = bps;
+ printk(KERN_DEBUG "%s: clock adjusted from %08d to %08d \n",
+ dev->name, dpriv->settings.clock_rate, bps);
}
+ } else { /* DTE */
+ state = 0x80001000;
+ printk(KERN_DEBUG "%s: external RxClk (DTE)\n", dev->name);
+ }
+ writel(state, ioaddr);
+ return 0;
+}
+
+static int dscc4_encoding_setting(struct net_device *dev)
+{
+ struct dscc4_dev_priv *dpriv = dev->priv;
+ struct thingie encoding[] = {
+ { ENCODING_NRZ, 0x00000000 },
+ { ENCODING_NRZI, 0x00200000 },
+ { ENCODING_FM_MARK, 0x00400000 },
+ { ENCODING_FM_SPACE, 0x00500000 },
+ { ENCODING_MANCHESTER, 0x00600000 },
+ { -1, 0}
+ };
+ int i, ret = 0;
+
+ i = dscc4_match(encoding, dpriv->encoding);
+ if (i >= 0) {
+ u32 ioaddr;
+
+ ioaddr = dev->base_addr + CCR0 + SCC_REG_START(dpriv->dev_id);
+ dscc4_patch_register(ioaddr, EncodingMask, encoding[i].bits);
+ } else
+ ret = -EOPNOTSUPP;
+ return ret;
+}
- default:
- return -EOPNOTSUPP;
+static int dscc4_loopback_setting(struct net_device *dev)
+{
+ struct dscc4_dev_priv *dpriv = dev->priv;
+ sync_serial_settings *settings = &dpriv->settings;
+ u32 ioaddr, state;
+
+ ioaddr = dev->base_addr + CCR1 + SCC_REG_START(dpriv->dev_id);
+ state = readl(ioaddr);
+ if (settings->loopback) {
+ printk(KERN_DEBUG "%s: loopback\n", dev->name);
+ state |= 0x00000100;
+ } else {
+ printk(KERN_DEBUG "%s: normal\n", dev->name);
+ state &= ~0x00000100;
}
+ writel(state, ioaddr);
+ return 0;
}
-static int dscc4_change_mtu(struct net_device *dev, int mtu)
+static int dscc4_crc_setting(struct net_device *dev)
{
- /* FIXME: chainsaw coded... */
- if ((mtu <= 3) || (mtu > 65531))
- return -EINVAL;
- if(dev->flags & IFF_UP)
- return -EBUSY;
- dev->mtu = mtu;
- return(0);
+ struct dscc4_dev_priv *dpriv = dev->priv;
+ struct thingie crc[] = {
+ { PARITY_CRC16_PR0_CCITT, 0x00000010 },
+ { PARITY_CRC16_PR1_CCITT, 0x00000000 },
+ { PARITY_CRC32_PR0_CCITT, 0x00000011 },
+ { PARITY_CRC32_PR1_CCITT, 0x00000001 }
+ };
+ int i, ret = 0;
+
+ i = dscc4_match(crc, dpriv->parity);
+ if (i >= 0) {
+ u32 ioaddr;
+
+ ioaddr = dev->base_addr + CCR1 + SCC_REG_START(dpriv->dev_id);
+ dscc4_patch_register(ioaddr, CrcMask, crc[i].bits);
+ } else
+ ret = -EOPNOTSUPP;
+ return ret;
+}
+
+static int dscc4_set_iface(struct net_device *dev)
+{
+ struct {
+ int (*action)(struct net_device *);
+ } *p, do_setting[] = {
+ { dscc4_encoding_setting },
+ { dscc4_clock_setting },
+ { dscc4_loopback_setting },
+ { dscc4_crc_setting },
+ { NULL }
+ };
+ int ret = 0;
+
+ for (p = do_setting; p->action; p++) {
+ if ((ret = p->action(dev)) < 0)
+ break;
+ }
+ return ret;
}
-static void dscc4_irq(int irq, void *dev_instance, struct pt_regs *ptregs)
+static void dscc4_irq(int irq, void *token, struct pt_regs *ptregs)
{
- struct net_device *dev = dev_instance;
+ struct dscc4_dev_priv *root = token;
struct dscc4_pci_priv *priv;
+ struct net_device *dev;
u32 ioaddr, state;
unsigned long flags;
int i;
- priv = ((struct dscc4_dev_priv *)dev->priv)->pci_priv;
- /*
- * FIXME: shorten the protected area (set some bit telling we're
- * in an interrupt or increment some work-to-do counter etc...)
- */
+ priv = root->pci_priv;
+ dev = hdlc_to_dev(&root->hdlc);
+
spin_lock_irqsave(&priv->lock, flags);
ioaddr = dev->base_addr;
if (state & RxEvt) {
i = dev_per_card - 1;
do {
- dscc4_rx_irq(priv, dev + i);
+ dscc4_rx_irq(priv, root + i);
} while (--i >= 0);
state &= ~RxEvt;
}
if (state & TxEvt) {
i = dev_per_card - 1;
do {
- dscc4_tx_irq(priv, dev + i);
+ dscc4_tx_irq(priv, root + i);
} while (--i >= 0);
state &= ~TxEvt;
}
spin_unlock_irqrestore(&priv->lock, flags);
}
-static __inline__ void dscc4_tx_irq(struct dscc4_pci_priv *ppriv,
- struct net_device *dev)
+static inline void dscc4_tx_irq(struct dscc4_pci_priv *ppriv, struct dscc4_dev_priv *dpriv)
{
- struct dscc4_dev_priv *dpriv = dev->priv;
+ struct net_device *dev = hdlc_to_dev(&dpriv->hdlc);
u32 state;
int cur, loop = 0;
dpriv->iqtx[cur] = 0;
dpriv->iqtx_current++;
-#ifdef DEBUG_PARANOID
- if (SOURCE_ID(state) != dpriv->dev_id) {
- printk(KERN_DEBUG "%s (Tx): Source Id=%d, state=%08x\n",
- dev->name, SOURCE_ID(state), state );
+ if (state_check(state, dpriv, dev, "Tx"))
return;
- }
- if (state & 0x0df80c00) {
- printk(KERN_DEBUG "%s (Tx): state=%08x (UFO alert)\n",
- dev->name, state);
- return;
- }
-#endif
+
// state &= 0x0fffffff; /* Tracking the analyzed bits */
if (state & SccEvt) {
if (state & Alls) {
"%s: DataComplete=0 cur=%d isr=%08x state=%08x\n",
dev->name, cur, isr, state);
writel(isr, ioaddr);
- dpriv->stats.tx_dropped++;
+ dev_to_hdlc(dev)->stats.tx_dropped++;
} else {
tx_fd->complete &= ~DataComplete;
if (tx_fd->state & FrameEnd) {
- dpriv->stats.tx_packets++;
- dpriv->stats.tx_bytes += skb->len;
+ dev_to_hdlc(dev)->stats.tx_packets++;
+ dev_to_hdlc(dev)->stats.tx_bytes += skb->len;
}
}
* Transmit Data Underrun
*/
if (state & Xdu) {
- printk(KERN_ERR "dscc4: XDU. Contact maintainer\n");
+ printk(KERN_ERR "%s: XDU. Ask maintainer\n", DRV_NAME);
dpriv->flags = NeedIDT;
/* Tx reset */
writel(MTFi | Rdt,
}
} else { /* ! SccEvt */
if (state & Hi) {
-#ifdef EXPERIMENTAL_POLLING
+#ifdef DSCC4_POLLING
while(!dscc4_tx_poll(dpriv, dev));
#endif
state &= ~Hi;
* FIXME: it may be avoided. Re-re-re-read the manual.
*/
if (state & Err) {
- printk(KERN_ERR "%s: Tx ERR\n", dev->name);
- dpriv->stats.tx_errors++;
+ printk(KERN_ERR "%s (Tx): ERR\n", dev->name);
+ dev_to_hdlc(dev)->stats.tx_errors++;
state &= ~Err;
}
}
goto try;
}
-static __inline__ void dscc4_rx_irq(struct dscc4_pci_priv *priv, struct net_device *dev)
+static inline void dscc4_rx_irq(struct dscc4_pci_priv *priv,
+ struct dscc4_dev_priv *dpriv)
{
- struct dscc4_dev_priv *dpriv = dev->priv;
+ struct net_device *dev = hdlc_to_dev(&dpriv->hdlc);
u32 state;
int cur;
dpriv->iqrx[cur] = 0;
dpriv->iqrx_current++;
-#ifdef DEBUG_PARANOID
- if (SOURCE_ID(state) != dpriv->dev_id) {
- printk(KERN_DEBUG "%s (Rx): Source Id=%d, state=%08x\n",
- dev->name, SOURCE_ID(state), state);
- goto try;
- }
- if (state & 0x0df80c00) {
- printk(KERN_DEBUG "%s (Rx): state=%08x (UFO alert)\n",
- dev->name, state);
- goto try;
- }
-#endif
+ if (state_check(state, dpriv, dev, "Tx"))
+ return;
+
if (!(state & SccEvt)){
struct RxFD *rx_fd;
#ifdef DEBUG_PARANOIA
for (i = 0; evts[i].irq_name; i++) {
if (state & evts[i].mask) {
- printk(KERN_DEBUG "dscc4(%s): %s\n",
- dev->name, evts[i].irq_name);
+ printk(KERN_DEBUG "%s: %s\n", dev->name,
+ evts[i].irq_name);
if (!(state &= ~evts[i].mask))
goto try;
}
if (!(rx_fd->state2 & DataComplete))
break;
if (rx_fd->state2 & FrameAborted) {
- dpriv->stats.rx_over_errors++;
+ dev_to_hdlc(dev)->stats.rx_over_errors++;
rx_fd->state1 |= Hold;
rx_fd->state2 = 0x00000000;
rx_fd->end = 0xbabeface;
if (debug) {
if (dpriv->flags & RdoSet)
printk(KERN_DEBUG
- "dscc4: no RDO in Rx data\n");
+ "%s: no RDO in Rx data\n", DRV_NAME);
}
#ifdef DSCC4_RDO_EXPERIMENTAL_RECOVERY
/*
goto try;
}
if (state & Flex) {
+ printk(KERN_DEBUG "%s: Flex. Ttttt...\n", DRV_NAME);
if (!(state &= ~Flex))
goto try;
}
* - Fe=1 (required by No=0 or we got an Err irq and must reset).
* Alas, it fails (and locks solid). Thus the introduction of a dummy
* skb to avoid No=0 (choose one: Ugly [ ] Tasteless [ ] VMS [ ]).
- * TODO: fiddle the tx threshold when time permits.
+ * 2002/01: errata sheet acknowledges the problem [X].
*/
struct sk_buff *skb;
rx_fd->state1 = HiDesc; /* Hi, no Hold */
rx_fd->state2 = 0x00000000;
rx_fd->end = 0xbabeface;
- rx_fd->state1 |= ((u32)(HDLC_MAX_MRU & RxSizeMax)) << 16;
+ rx_fd->state1 |= (RX_MAX(HDLC_MAX_MRU) << 16);
try_get_rx_skb(dpriv, i, dev);
i++;
rx_fd->next = (u32)(dpriv->rx_fd_dma + i*sizeof(struct RxFD));
return -1;
}
-static struct net_device_stats *dscc4_get_stats(struct net_device *dev)
-{
- struct dscc4_dev_priv *priv = (struct dscc4_dev_priv *)dev->priv;
-
- return &priv->stats;
-}
-
static void __exit dscc4_remove_one(struct pci_dev *pdev)
{
struct dscc4_pci_priv *ppriv;
- struct net_device *root;
+ struct dscc4_dev_priv *root;
+ u32 ioaddr;
int i;
ppriv = pci_get_drvdata(pdev);
root = ppriv->root;
+ ioaddr = hdlc_to_dev(&root->hdlc)->base_addr;
free_irq(pdev->irq, root);
pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), ppriv->iqcfg,
ppriv->iqcfg_dma);
- for (i=0; i < dev_per_card; i++) {
- struct dscc4_dev_priv *dpriv;
- struct net_device *dev;
+ for (i = 0; i < dev_per_card; i++) {
+ struct dscc4_dev_priv *dpriv = root + i;
+ hdlc_device *hdlc = &dpriv->hdlc;
- dev = ppriv->root + i;
- dscc4_unattach_hdlc_device(dev);
+ unregister_hdlc_device(hdlc);
- dpriv = (struct dscc4_dev_priv *)dev->priv;
pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32),
dpriv->iqrx, dpriv->iqrx_dma);
pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32),
dpriv->iqtx, dpriv->iqtx_dma);
- unregister_netdev(dev);
}
- kfree(root->priv);
- iounmap((void *)root->base_addr);
+ iounmap((void *)ioaddr);
kfree(root);
+ pci_set_drvdata(pdev, NULL);
kfree(ppriv);
release_mem_region(pci_resource_start(pdev, 1),
pci_resource_len(pdev, 0));
}
-static int dscc4_hdlc_ioctl(struct hdlc_device_struct *hdlc, struct ifreq *ifr, int cmd)
-{
- struct net_device *dev = (struct net_device *)hdlc->netdev.base_addr;
- int result;
-
- /* FIXME: locking ? */
- result = dscc4_ioctl(dev, ifr, cmd);
- return result;
-}
-
-static int dscc4_hdlc_open(struct hdlc_device_struct *hdlc)
-{
- struct net_device *dev = (struct net_device *)(hdlc->netdev.base_addr);
-
- if (netif_running(dev)) {
- printk(KERN_DEBUG "%s: already running\n", dev->name); // DEBUG
- return 0;
- }
- return dscc4_open(dev);
-}
-
-static int dscc4_hdlc_xmit(hdlc_device *hdlc, struct sk_buff *skb)
-{
- struct net_device *dev = (struct net_device *)hdlc->netdev.base_addr;
-
- return dscc4_start_xmit(skb, dev);
-}
-
-static void dscc4_hdlc_close(struct hdlc_device_struct *hdlc)
+static int dscc4_hdlc_attach(hdlc_device *hdlc, unsigned short encoding,
+ unsigned short parity)
{
- struct net_device *dev = (struct net_device *)hdlc->netdev.base_addr;
- struct dscc4_dev_priv *dpriv;
+ struct dscc4_dev_priv *dpriv = hdlc_to_dev(hdlc)->priv;
- dpriv = dev->priv;
- --dpriv->usecount;
-}
-
-/* Operated under dev lock */
-static int dscc4_attach_hdlc_device(struct net_device *dev)
-{
- struct dscc4_dev_priv *dpriv = dev->priv;
- struct hdlc_device_struct *hdlc;
- int result;
-
- hdlc = &dpriv->hdlc;
- /* XXX: Don't look at the next line */
- hdlc->netdev.base_addr = (unsigned long)dev;
- hdlc->set_mode = NULL;
- hdlc->open = dscc4_hdlc_open;
- hdlc->close = dscc4_hdlc_close;
- hdlc->ioctl = dscc4_hdlc_ioctl;
- hdlc->xmit = dscc4_hdlc_xmit;
-
- result = register_hdlc_device(hdlc);
- if (!result)
- dpriv->usecount++;
- return result;
-}
+ if (encoding != ENCODING_NRZ &&
+ encoding != ENCODING_NRZI &&
+ encoding != ENCODING_FM_MARK &&
+ encoding != ENCODING_FM_SPACE &&
+ encoding != ENCODING_MANCHESTER)
+ return -EINVAL;
-/* Operated under dev lock */
-static void dscc4_unattach_hdlc_device(struct net_device *dev)
-{
- struct dscc4_dev_priv *dpriv = dev->priv;
+ if (parity != PARITY_NONE &&
+ parity != PARITY_CRC16_PR0_CCITT &&
+ parity != PARITY_CRC16_PR1_CCITT &&
+ parity != PARITY_CRC32_PR0_CCITT &&
+ parity != PARITY_CRC32_PR1_CCITT)
+ return -EINVAL;
- unregister_hdlc_device(&dpriv->hdlc);
- dpriv->usecount--;
+ dpriv->encoding = encoding;
+ dpriv->parity = parity;
+ return 0;
}
static struct pci_device_id dscc4_pci_tbl[] __devinitdata = {
+++ /dev/null
-/*
- * This file is derived from various .h and .c files from the zlib-1.0.4
- * distribution by Jean-loup Gailly and Mark Adler, with some additions
- * by Paul Mackerras to aid in implementing Deflate compression and
- * decompression for PPP packets. See zlib.h for conditions of
- * distribution and use.
- *
- * Changes that have been made include:
- * - added Z_PACKET_FLUSH (see zlib.h for details)
- * - added inflateIncomp and deflateOutputPending
- * - allow strm->next_out to be NULL, meaning discard the output
- *
- * $Id: zlib.c,v 1.3 1997/12/23 10:47:42 paulus Exp $
- */
-
-/*
- * ==FILEVERSION 971210==
- *
- * This marker is used by the Linux installation script to determine
- * whether an up-to-date version of this file is already installed.
- */
-
-#define NO_DUMMY_DECL
-#define NO_ZCFUNCS
-#define MY_ZCALLOC
-
-#if defined(__FreeBSD__) && (defined(KERNEL) || defined(_KERNEL))
-#define inflate inflate_ppp /* FreeBSD already has an inflate :-( */
-#endif
-
-
-/* +++ zutil.h */
-/* zutil.h -- internal interface and configuration of the compression library
- * Copyright (C) 1995-1996 Jean-loup Gailly.
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* WARNING: this file should *not* be used by applications. It is
- part of the implementation of the compression library and is
- subject to change. Applications should only use zlib.h.
- */
-
-/* From: zutil.h,v 1.16 1996/07/24 13:41:13 me Exp $ */
-
-#ifndef _Z_UTIL_H
-#define _Z_UTIL_H
-
-#include "zlib.h"
-
-#if defined(KERNEL) || defined(_KERNEL)
-/* Assume this is a *BSD or SVR4 kernel */
-#include <sys/types.h>
-#include <sys/time.h>
-#include <sys/systm.h>
-# define HAVE_MEMCPY
-# define memcpy(d, s, n) bcopy((s), (d), (n))
-# define memset(d, v, n) bzero((d), (n))
-# define memcmp bcmp
-
-#else
-#if defined(__KERNEL__)
-/* Assume this is a Linux kernel */
-#include <linux/string.h>
-#define HAVE_MEMCPY
-
-#else /* not kernel */
-
-#if defined(MSDOS)||defined(VMS)||defined(CRAY)||defined(WIN32)||defined(RISCOS)
-# include <stddef.h>
-# include <errno.h>
-#else
- extern int errno;
-#endif
-#ifdef STDC
-# include <string.h>
-# include <stdlib.h>
-#endif
-#endif /* __KERNEL__ */
-#endif /* _KERNEL || KERNEL */
-
-#ifndef local
-# define local static
-#endif
-/* compile with -Dlocal if your debugger can't find static symbols */
-
-typedef unsigned char uch;
-typedef uch FAR uchf;
-typedef unsigned short ush;
-typedef ush FAR ushf;
-typedef unsigned long ulg;
-
-extern const char *z_errmsg[10]; /* indexed by 2-zlib_error */
-/* (size given to avoid silly warnings with Visual C++) */
-
-#define ERR_MSG(err) z_errmsg[Z_NEED_DICT-(err)]
-
-#define ERR_RETURN(strm,err) \
- return (strm->msg = (char*)ERR_MSG(err), (err))
-/* To be used only when the state is known to be valid */
-
- /* common constants */
-
-#ifndef DEF_WBITS
-# define DEF_WBITS MAX_WBITS
-#endif
-/* default windowBits for decompression. MAX_WBITS is for compression only */
-
-#if MAX_MEM_LEVEL >= 8
-# define DEF_MEM_LEVEL 8
-#else
-# define DEF_MEM_LEVEL MAX_MEM_LEVEL
-#endif
-/* default memLevel */
-
-#define STORED_BLOCK 0
-#define STATIC_TREES 1
-#define DYN_TREES 2
-/* The three kinds of block type */
-
-#define MIN_MATCH 3
-#define MAX_MATCH 258
-/* The minimum and maximum match lengths */
-
-#define PRESET_DICT 0x20 /* preset dictionary flag in zlib header */
-
- /* target dependencies */
-
-#ifdef MSDOS
-# define OS_CODE 0x00
-# ifdef __TURBOC__
-# include <alloc.h>
-# else /* MSC or DJGPP */
-# include <malloc.h>
-# endif
-#endif
-
-#ifdef OS2
-# define OS_CODE 0x06
-#endif
-
-#ifdef WIN32 /* Window 95 & Windows NT */
-# define OS_CODE 0x0b
-#endif
-
-#if defined(VAXC) || defined(VMS)
-# define OS_CODE 0x02
-# define FOPEN(name, mode) \
- fopen((name), (mode), "mbc=60", "ctx=stm", "rfm=fix", "mrs=512")
-#endif
-
-#ifdef AMIGA
-# define OS_CODE 0x01
-#endif
-
-#if defined(ATARI) || defined(atarist)
-# define OS_CODE 0x05
-#endif
-
-#ifdef MACOS
-# define OS_CODE 0x07
-#endif
-
-#ifdef __50SERIES /* Prime/PRIMOS */
-# define OS_CODE 0x0F
-#endif
-
-#ifdef TOPS20
-# define OS_CODE 0x0a
-#endif
-
-#if defined(_BEOS_) || defined(RISCOS)
-# define fdopen(fd,mode) NULL /* No fdopen() */
-#endif
-
- /* Common defaults */
-
-#ifndef OS_CODE
-# define OS_CODE 0x03 /* assume Unix */
-#endif
-
-#ifndef FOPEN
-# define FOPEN(name, mode) fopen((name), (mode))
-#endif
-
- /* functions */
-
-#ifdef HAVE_STRERROR
- extern char *strerror OF((int));
-# define zstrerror(errnum) strerror(errnum)
-#else
-# define zstrerror(errnum) ""
-#endif
-
-#if defined(pyr)
-# define NO_MEMCPY
-#endif
-#if (defined(M_I86SM) || defined(M_I86MM)) && !defined(_MSC_VER)
- /* Use our own functions for small and medium model with MSC <= 5.0.
- * You may have to use the same strategy for Borland C (untested).
- */
-# define NO_MEMCPY
-#endif
-#if defined(STDC) && !defined(HAVE_MEMCPY) && !defined(NO_MEMCPY)
-# define HAVE_MEMCPY
-#endif
-#ifdef HAVE_MEMCPY
-# ifdef SMALL_MEDIUM /* MSDOS small or medium model */
-# define zmemcpy _fmemcpy
-# define zmemcmp _fmemcmp
-# define zmemzero(dest, len) _fmemset(dest, 0, len)
-# else
-# define zmemcpy memcpy
-# define zmemcmp memcmp
-# define zmemzero(dest, len) memset(dest, 0, len)
-# endif
-#else
- extern void zmemcpy OF((Bytef* dest, Bytef* source, uInt len));
- extern int zmemcmp OF((Bytef* s1, Bytef* s2, uInt len));
- extern void zmemzero OF((Bytef* dest, uInt len));
-#endif
-
-/* Diagnostic functions */
-#ifdef DEBUG_ZLIB
-# include <stdio.h>
-# ifndef verbose
-# define verbose 0
-# endif
- extern void z_error OF((char *m));
-# define Assert(cond,msg) {if(!(cond)) z_error(msg);}
-# define Trace(x) fprintf x
-# define Tracev(x) {if (verbose) fprintf x ;}
-# define Tracevv(x) {if (verbose>1) fprintf x ;}
-# define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
-# define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
-#else
-# define Assert(cond,msg)
-# define Trace(x)
-# define Tracev(x)
-# define Tracevv(x)
-# define Tracec(c,x)
-# define Tracecv(c,x)
-#endif
-
-
-typedef uLong (*check_func) OF((uLong check, const Bytef *buf, uInt len));
-
-voidpf zcalloc OF((voidpf opaque, unsigned items, unsigned size));
-void zcfree OF((voidpf opaque, voidpf ptr));
-
-#define ZALLOC(strm, items, size) \
- (*((strm)->zalloc))((strm)->opaque, (items), (size))
-#define ZFREE(strm, addr) (*((strm)->zfree))((strm)->opaque, (voidpf)(addr))
-#define TRY_FREE(s, p) {if (p) ZFREE(s, p);}
-
-#endif /* _Z_UTIL_H */
-/* --- zutil.h */
-
-/* +++ deflate.h */
-/* deflate.h -- internal compression state
- * Copyright (C) 1995-1996 Jean-loup Gailly
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* WARNING: this file should *not* be used by applications. It is
- part of the implementation of the compression library and is
- subject to change. Applications should only use zlib.h.
- */
-
-/* From: deflate.h,v 1.10 1996/07/02 12:41:00 me Exp $ */
-
-#ifndef _DEFLATE_H
-#define _DEFLATE_H
-
-/* #include "zutil.h" */
-
-/* ===========================================================================
- * Internal compression state.
- */
-
-#define LENGTH_CODES 29
-/* number of length codes, not counting the special END_BLOCK code */
-
-#define LITERALS 256
-/* number of literal bytes 0..255 */
-
-#define L_CODES (LITERALS+1+LENGTH_CODES)
-/* number of Literal or Length codes, including the END_BLOCK code */
-
-#define D_CODES 30
-/* number of distance codes */
-
-#define BL_CODES 19
-/* number of codes used to transfer the bit lengths */
-
-#define HEAP_SIZE (2*L_CODES+1)
-/* maximum heap size */
-
-#define MAX_BITS 15
-/* All codes must not exceed MAX_BITS bits */
-
-#define INIT_STATE 42
-#define BUSY_STATE 113
-#define FINISH_STATE 666
-/* Stream status */
-
-
-/* Data structure describing a single value and its code string. */
-typedef struct ct_data_s {
- union {
- ush freq; /* frequency count */
- ush code; /* bit string */
- } fc;
- union {
- ush dad; /* father node in Huffman tree */
- ush len; /* length of bit string */
- } dl;
-} FAR ct_data;
-
-#define Freq fc.freq
-#define Code fc.code
-#define Dad dl.dad
-#define Len dl.len
-
-typedef struct static_tree_desc_s static_tree_desc;
-
-typedef struct tree_desc_s {
- ct_data *dyn_tree; /* the dynamic tree */
- int max_code; /* largest code with non zero frequency */
- static_tree_desc *stat_desc; /* the corresponding static tree */
-} FAR tree_desc;
-
-typedef ush Pos;
-typedef Pos FAR Posf;
-typedef unsigned IPos;
-
-/* A Pos is an index in the character window. We use short instead of int to
- * save space in the various tables. IPos is used only for parameter passing.
- */
-
-typedef struct deflate_state {
- z_streamp strm; /* pointer back to this zlib stream */
- int status; /* as the name implies */
- Bytef *pending_buf; /* output still pending */
- ulg pending_buf_size; /* size of pending_buf */
- Bytef *pending_out; /* next pending byte to output to the stream */
- int pending; /* nb of bytes in the pending buffer */
- int noheader; /* suppress zlib header and adler32 */
- Byte data_type; /* UNKNOWN, BINARY or ASCII */
- Byte method; /* STORED (for zip only) or DEFLATED */
- int last_flush; /* value of flush param for previous deflate call */
-
- /* used by deflate.c: */
-
- uInt w_size; /* LZ77 window size (32K by default) */
- uInt w_bits; /* log2(w_size) (8..16) */
- uInt w_mask; /* w_size - 1 */
-
- Bytef *window;
- /* Sliding window. Input bytes are read into the second half of the window,
- * and move to the first half later to keep a dictionary of at least wSize
- * bytes. With this organization, matches are limited to a distance of
- * wSize-MAX_MATCH bytes, but this ensures that IO is always
- * performed with a length multiple of the block size. Also, it limits
- * the window size to 64K, which is quite useful on MSDOS.
- * To do: use the user input buffer as sliding window.
- */
-
- ulg window_size;
- /* Actual size of window: 2*wSize, except when the user input buffer
- * is directly used as sliding window.
- */
-
- Posf *prev;
- /* Link to older string with same hash index. To limit the size of this
- * array to 64K, this link is maintained only for the last 32K strings.
- * An index in this array is thus a window index modulo 32K.
- */
-
- Posf *head; /* Heads of the hash chains or NIL. */
-
- uInt ins_h; /* hash index of string to be inserted */
- uInt hash_size; /* number of elements in hash table */
- uInt hash_bits; /* log2(hash_size) */
- uInt hash_mask; /* hash_size-1 */
-
- uInt hash_shift;
- /* Number of bits by which ins_h must be shifted at each input
- * step. It must be such that after MIN_MATCH steps, the oldest
- * byte no longer takes part in the hash key, that is:
- * hash_shift * MIN_MATCH >= hash_bits
- */
-
- long block_start;
- /* Window position at the beginning of the current output block. Gets
- * negative when the window is moved backwards.
- */
-
- uInt match_length; /* length of best match */
- IPos prev_match; /* previous match */
- int match_available; /* set if previous match exists */
- uInt strstart; /* start of string to insert */
- uInt match_start; /* start of matching string */
- uInt lookahead; /* number of valid bytes ahead in window */
-
- uInt prev_length;
- /* Length of the best match at previous step. Matches not greater than this
- * are discarded. This is used in the lazy match evaluation.
- */
-
- uInt max_chain_length;
- /* To speed up deflation, hash chains are never searched beyond this
- * length. A higher limit improves compression ratio but degrades the
- * speed.
- */
-
- uInt max_lazy_match;
- /* Attempt to find a better match only when the current match is strictly
- * smaller than this value. This mechanism is used only for compression
- * levels >= 4.
- */
-# define max_insert_length max_lazy_match
- /* Insert new strings in the hash table only if the match length is not
- * greater than this length. This saves time but degrades compression.
- * max_insert_length is used only for compression levels <= 3.
- */
-
- int level; /* compression level (1..9) */
- int strategy; /* favor or force Huffman coding*/
-
- uInt good_match;
- /* Use a faster search when the previous match is longer than this */
-
- int nice_match; /* Stop searching when current match exceeds this */
-
- /* used by trees.c: */
- /* Didn't use ct_data typedef below to suppress compiler warning */
- struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */
- struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */
- struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */
-
- struct tree_desc_s l_desc; /* desc. for literal tree */
- struct tree_desc_s d_desc; /* desc. for distance tree */
- struct tree_desc_s bl_desc; /* desc. for bit length tree */
-
- ush bl_count[MAX_BITS+1];
- /* number of codes at each bit length for an optimal tree */
-
- int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */
- int heap_len; /* number of elements in the heap */
- int heap_max; /* element of largest frequency */
- /* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used.
- * The same heap array is used to build all trees.
- */
-
- uch depth[2*L_CODES+1];
- /* Depth of each subtree used as tie breaker for trees of equal frequency
- */
-
- uchf *l_buf; /* buffer for literals or lengths */
-
- uInt lit_bufsize;
- /* Size of match buffer for literals/lengths. There are 4 reasons for
- * limiting lit_bufsize to 64K:
- * - frequencies can be kept in 16 bit counters
- * - if compression is not successful for the first block, all input
- * data is still in the window so we can still emit a stored block even
- * when input comes from standard input. (This can also be done for
- * all blocks if lit_bufsize is not greater than 32K.)
- * - if compression is not successful for a file smaller than 64K, we can
- * even emit a stored file instead of a stored block (saving 5 bytes).
- * This is applicable only for zip (not gzip or zlib).
- * - creating new Huffman trees less frequently may not provide fast
- * adaptation to changes in the input data statistics. (Take for
- * example a binary file with poorly compressible code followed by
- * a highly compressible string table.) Smaller buffer sizes give
- * fast adaptation but have of course the overhead of transmitting
- * trees more frequently.
- * - I can't count above 4
- */
-
- uInt last_lit; /* running index in l_buf */
-
- ushf *d_buf;
- /* Buffer for distances. To simplify the code, d_buf and l_buf have
- * the same number of elements. To use different lengths, an extra flag
- * array would be necessary.
- */
-
- ulg opt_len; /* bit length of current block with optimal trees */
- ulg static_len; /* bit length of current block with static trees */
- ulg compressed_len; /* total bit length of compressed file */
- uInt matches; /* number of string matches in current block */
- int last_eob_len; /* bit length of EOB code for last block */
-
-#ifdef DEBUG_ZLIB
- ulg bits_sent; /* bit length of the compressed data */
-#endif
-
- ush bi_buf;
- /* Output buffer. bits are inserted starting at the bottom (least
- * significant bits).
- */
- int bi_valid;
- /* Number of valid bits in bi_buf. All bits above the last valid bit
- * are always zero.
- */
-
-} FAR deflate_state;
-
-/* Output a byte on the stream.
- * IN assertion: there is enough room in pending_buf.
- */
-#define put_byte(s, c) {s->pending_buf[s->pending++] = (c);}
-
-
-#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1)
-/* Minimum amount of lookahead, except at the end of the input file.
- * See deflate.c for comments about the MIN_MATCH+1.
- */
-
-#define MAX_DIST(s) ((s)->w_size-MIN_LOOKAHEAD)
-/* In order to simplify the code, particularly on 16 bit machines, match
- * distances are limited to MAX_DIST instead of WSIZE.
- */
-
- /* in trees.c */
-void _tr_init OF((deflate_state *s));
-int _tr_tally OF((deflate_state *s, unsigned dist, unsigned lc));
-ulg _tr_flush_block OF((deflate_state *s, charf *buf, ulg stored_len,
- int eof));
-void _tr_align OF((deflate_state *s));
-void _tr_stored_block OF((deflate_state *s, charf *buf, ulg stored_len,
- int eof));
-void _tr_stored_type_only OF((deflate_state *));
-
-#endif
-/* --- deflate.h */
-
-/* +++ deflate.c */
-/* deflate.c -- compress data using the deflation algorithm
- * Copyright (C) 1995-1996 Jean-loup Gailly.
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/*
- * ALGORITHM
- *
- * The "deflation" process depends on being able to identify portions
- * of the input text which are identical to earlier input (within a
- * sliding window trailing behind the input currently being processed).
- *
- * The most straightforward technique turns out to be the fastest for
- * most input files: try all possible matches and select the longest.
- * The key feature of this algorithm is that insertions into the string
- * dictionary are very simple and thus fast, and deletions are avoided
- * completely. Insertions are performed at each input character, whereas
- * string matches are performed only when the previous match ends. So it
- * is preferable to spend more time in matches to allow very fast string
- * insertions and avoid deletions. The matching algorithm for small
- * strings is inspired from that of Rabin & Karp. A brute force approach
- * is used to find longer strings when a small match has been found.
- * A similar algorithm is used in comic (by Jan-Mark Wams) and freeze
- * (by Leonid Broukhis).
- * A previous version of this file used a more sophisticated algorithm
- * (by Fiala and Greene) which is guaranteed to run in linear amortized
- * time, but has a larger average cost, uses more memory and is patented.
- * However the F&G algorithm may be faster for some highly redundant
- * files if the parameter max_chain_length (described below) is too large.
- *
- * ACKNOWLEDGEMENTS
- *
- * The idea of lazy evaluation of matches is due to Jan-Mark Wams, and
- * I found it in 'freeze' written by Leonid Broukhis.
- * Thanks to many people for bug reports and testing.
- *
- * REFERENCES
- *
- * Deutsch, L.P.,"DEFLATE Compressed Data Format Specification".
- * Available in ftp://ds.internic.net/rfc/rfc1951.txt
- *
- * A description of the Rabin and Karp algorithm is given in the book
- * "Algorithms" by R. Sedgewick, Addison-Wesley, p252.
- *
- * Fiala,E.R., and Greene,D.H.
- * Data Compression with Finite Windows, Comm.ACM, 32,4 (1989) 490-595
- *
- */
-
-/* From: deflate.c,v 1.15 1996/07/24 13:40:58 me Exp $ */
-
-/* #include "deflate.h" */
-
-char deflate_copyright[] = " deflate 1.0.4 Copyright 1995-1996 Jean-loup Gailly ";
-/*
- If you use the zlib library in a product, an acknowledgment is welcome
- in the documentation of your product. If for some reason you cannot
- include such an acknowledgment, I would appreciate that you keep this
- copyright string in the executable of your product.
- */
-
-/* ===========================================================================
- * Function prototypes.
- */
-typedef enum {
- need_more, /* block not completed, need more input or more output */
- block_done, /* block flush performed */
- finish_started, /* finish started, need only more output at next deflate */
- finish_done /* finish done, accept no more input or output */
-} block_state;
-
-typedef block_state (*compress_func) OF((deflate_state *s, int flush));
-/* Compression function. Returns the block state after the call. */
-
-local void fill_window OF((deflate_state *s));
-local block_state deflate_stored OF((deflate_state *s, int flush));
-local block_state deflate_fast OF((deflate_state *s, int flush));
-local block_state deflate_slow OF((deflate_state *s, int flush));
-local void lm_init OF((deflate_state *s));
-local void putShortMSB OF((deflate_state *s, uInt b));
-local void flush_pending OF((z_streamp strm));
-local int read_buf OF((z_streamp strm, charf *buf, unsigned size));
-#ifdef ASMV
- void match_init OF((void)); /* asm code initialization */
- uInt longest_match OF((deflate_state *s, IPos cur_match));
-#else
-local uInt longest_match OF((deflate_state *s, IPos cur_match));
-#endif
-
-#ifdef DEBUG_ZLIB
-local void check_match OF((deflate_state *s, IPos start, IPos match,
- int length));
-#endif
-
-/* ===========================================================================
- * Local data
- */
-
-#define NIL 0
-/* Tail of hash chains */
-
-#ifndef TOO_FAR
-# define TOO_FAR 4096
-#endif
-/* Matches of length 3 are discarded if their distance exceeds TOO_FAR */
-
-#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1)
-/* Minimum amount of lookahead, except at the end of the input file.
- * See deflate.c for comments about the MIN_MATCH+1.
- */
-
-/* Values for max_lazy_match, good_match and max_chain_length, depending on
- * the desired pack level (0..9). The values given below have been tuned to
- * exclude worst case performance for pathological files. Better values may be
- * found for specific files.
- */
-typedef struct config_s {
- ush good_length; /* reduce lazy search above this match length */
- ush max_lazy; /* do not perform lazy search above this match length */
- ush nice_length; /* quit search above this match length */
- ush max_chain;
- compress_func func;
-} config;
-
-local config configuration_table[10] = {
-/* good lazy nice chain */
-/* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */
-/* 1 */ {4, 4, 8, 4, deflate_fast}, /* maximum speed, no lazy matches */
-/* 2 */ {4, 5, 16, 8, deflate_fast},
-/* 3 */ {4, 6, 32, 32, deflate_fast},
-
-/* 4 */ {4, 4, 16, 16, deflate_slow}, /* lazy matches */
-/* 5 */ {8, 16, 32, 32, deflate_slow},
-/* 6 */ {8, 16, 128, 128, deflate_slow},
-/* 7 */ {8, 32, 128, 256, deflate_slow},
-/* 8 */ {32, 128, 258, 1024, deflate_slow},
-/* 9 */ {32, 258, 258, 4096, deflate_slow}}; /* maximum compression */
-
-/* Note: the deflate() code requires max_lazy >= MIN_MATCH and max_chain >= 4
- * For deflate_fast() (levels <= 3) good is ignored and lazy has a different
- * meaning.
- */
-
-#define EQUAL 0
-/* result of memcmp for equal strings */
-
-#ifndef NO_DUMMY_DECL
-struct static_tree_desc_s {int dummy;}; /* for buggy compilers */
-#endif
-
-/* ===========================================================================
- * Update a hash value with the given input byte
- * IN assertion: all calls to UPDATE_HASH are made with consecutive
- * input characters, so that a running hash key can be computed from the
- * previous key instead of complete recalculation each time.
- */
-#define UPDATE_HASH(s,h,c) (h = (((h)<<s->hash_shift) ^ (c)) & s->hash_mask)
-
-
-/* ===========================================================================
- * Insert string str in the dictionary and set match_head to the previous head
- * of the hash chain (the most recent string with same hash key). Return
- * the previous length of the hash chain.
- * IN assertion: all calls to INSERT_STRING are made with consecutive
- * input characters and the first MIN_MATCH bytes of str are valid
- * (except for the last MIN_MATCH-1 bytes of the input file).
- */
-#define INSERT_STRING(s, str, match_head) \
- (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \
- s->prev[(str) & s->w_mask] = match_head = s->head[s->ins_h], \
- s->head[s->ins_h] = (Pos)(str))
-
-/* ===========================================================================
- * Initialize the hash table (avoiding 64K overflow for 16 bit systems).
- * prev[] will be initialized on the fly.
- */
-#define CLEAR_HASH(s) \
- s->head[s->hash_size-1] = NIL; \
- zmemzero((charf *)s->head, (unsigned)(s->hash_size-1)*sizeof(*s->head));
-
-/* ========================================================================= */
-int deflateInit_(strm, level, version, stream_size)
- z_streamp strm;
- int level;
- const char *version;
- int stream_size;
-{
- return deflateInit2_(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL,
- Z_DEFAULT_STRATEGY, version, stream_size);
- /* To do: ignore strm->next_in if we use it as window */
-}
-
-/* ========================================================================= */
-int deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
- version, stream_size)
- z_streamp strm;
- int level;
- int method;
- int windowBits;
- int memLevel;
- int strategy;
- const char *version;
- int stream_size;
-{
- deflate_state *s;
- int noheader = 0;
- static char* my_version = ZLIB_VERSION;
-
- ushf *overlay;
- /* We overlay pending_buf and d_buf+l_buf. This works since the average
- * output size for (length,distance) codes is <= 24 bits.
- */
-
- if (version == Z_NULL || version[0] != my_version[0] ||
- stream_size != sizeof(z_stream)) {
- return Z_VERSION_ERROR;
- }
- if (strm == Z_NULL) return Z_STREAM_ERROR;
-
- strm->msg = Z_NULL;
-#ifndef NO_ZCFUNCS
- if (strm->zalloc == Z_NULL) {
- strm->zalloc = zcalloc;
- strm->opaque = (voidpf)0;
- }
- if (strm->zfree == Z_NULL) strm->zfree = zcfree;
-#endif
-
- if (level == Z_DEFAULT_COMPRESSION) level = 6;
-
- if (windowBits < 0) { /* undocumented feature: suppress zlib header */
- noheader = 1;
- windowBits = -windowBits;
- }
- if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method != Z_DEFLATED ||
- windowBits < 8 || windowBits > 15 || level < 0 || level > 9 ||
- strategy < 0 || strategy > Z_HUFFMAN_ONLY) {
- return Z_STREAM_ERROR;
- }
- s = (deflate_state *) ZALLOC(strm, 1, sizeof(deflate_state));
- if (s == Z_NULL) return Z_MEM_ERROR;
- strm->state = (struct internal_state FAR *)s;
- s->strm = strm;
-
- s->noheader = noheader;
- s->w_bits = windowBits;
- s->w_size = 1 << s->w_bits;
- s->w_mask = s->w_size - 1;
-
- s->hash_bits = memLevel + 7;
- s->hash_size = 1 << s->hash_bits;
- s->hash_mask = s->hash_size - 1;
- s->hash_shift = ((s->hash_bits+MIN_MATCH-1)/MIN_MATCH);
-
- s->window = (Bytef *) ZALLOC(strm, s->w_size, 2*sizeof(Byte));
- s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos));
- s->head = (Posf *) ZALLOC(strm, s->hash_size, sizeof(Pos));
-
- s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */
-
- overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2);
- s->pending_buf = (uchf *) overlay;
- s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L);
-
- if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL ||
- s->pending_buf == Z_NULL) {
- strm->msg = (char*)ERR_MSG(Z_MEM_ERROR);
- deflateEnd (strm);
- return Z_MEM_ERROR;
- }
- s->d_buf = overlay + s->lit_bufsize/sizeof(ush);
- s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize;
-
- s->level = level;
- s->strategy = strategy;
- s->method = (Byte)method;
-
- return deflateReset(strm);
-}
-
-/* ========================================================================= */
-int deflateSetDictionary (strm, dictionary, dictLength)
- z_streamp strm;
- const Bytef *dictionary;
- uInt dictLength;
-{
- deflate_state *s;
- uInt length = dictLength;
- uInt n;
- IPos hash_head = 0;
-
- if (strm == Z_NULL || strm->state == Z_NULL || dictionary == Z_NULL)
- return Z_STREAM_ERROR;
-
- s = (deflate_state *) strm->state;
- if (s->status != INIT_STATE) return Z_STREAM_ERROR;
-
- strm->adler = adler32(strm->adler, dictionary, dictLength);
-
- if (length < MIN_MATCH) return Z_OK;
- if (length > MAX_DIST(s)) {
- length = MAX_DIST(s);
-#ifndef USE_DICT_HEAD
- dictionary += dictLength - length; /* use the tail of the dictionary */
-#endif
- }
- zmemcpy((charf *)s->window, dictionary, length);
- s->strstart = length;
- s->block_start = (long)length;
-
- /* Insert all strings in the hash table (except for the last two bytes).
- * s->lookahead stays null, so s->ins_h will be recomputed at the next
- * call of fill_window.
- */
- s->ins_h = s->window[0];
- UPDATE_HASH(s, s->ins_h, s->window[1]);
- for (n = 0; n <= length - MIN_MATCH; n++) {
- INSERT_STRING(s, n, hash_head);
- }
- if (hash_head) hash_head = 0; /* to make compiler happy */
- return Z_OK;
-}
-
-/* ========================================================================= */
-int deflateReset (strm)
- z_streamp strm;
-{
- deflate_state *s;
-
- if (strm == Z_NULL || strm->state == Z_NULL ||
- strm->zalloc == Z_NULL || strm->zfree == Z_NULL) return Z_STREAM_ERROR;
-
- strm->total_in = strm->total_out = 0;
- strm->msg = Z_NULL; /* use zfree if we ever allocate msg dynamically */
- strm->data_type = Z_UNKNOWN;
-
- s = (deflate_state *)strm->state;
- s->pending = 0;
- s->pending_out = s->pending_buf;
-
- if (s->noheader < 0) {
- s->noheader = 0; /* was set to -1 by deflate(..., Z_FINISH); */
- }
- s->status = s->noheader ? BUSY_STATE : INIT_STATE;
- strm->adler = 1;
- s->last_flush = Z_NO_FLUSH;
-
- _tr_init(s);
- lm_init(s);
-
- return Z_OK;
-}
-
-/* ========================================================================= */
-int deflateParams(strm, level, strategy)
- z_streamp strm;
- int level;
- int strategy;
-{
- deflate_state *s;
- compress_func func;
- int err = Z_OK;
-
- if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
- s = (deflate_state *) strm->state;
-
- if (level == Z_DEFAULT_COMPRESSION) {
- level = 6;
- }
- if (level < 0 || level > 9 || strategy < 0 || strategy > Z_HUFFMAN_ONLY) {
- return Z_STREAM_ERROR;
- }
- func = configuration_table[s->level].func;
-
- if (func != configuration_table[level].func && strm->total_in != 0) {
- /* Flush the last buffer: */
- err = deflate(strm, Z_PARTIAL_FLUSH);
- }
- if (s->level != level) {
- s->level = level;
- s->max_lazy_match = configuration_table[level].max_lazy;
- s->good_match = configuration_table[level].good_length;
- s->nice_match = configuration_table[level].nice_length;
- s->max_chain_length = configuration_table[level].max_chain;
- }
- s->strategy = strategy;
- return err;
-}
-
-/* =========================================================================
- * Put a short in the pending buffer. The 16-bit value is put in MSB order.
- * IN assertion: the stream state is correct and there is enough room in
- * pending_buf.
- */
-local void putShortMSB (s, b)
- deflate_state *s;
- uInt b;
-{
- put_byte(s, (Byte)(b >> 8));
- put_byte(s, (Byte)(b & 0xff));
-}
-
-/* =========================================================================
- * Flush as much pending output as possible. All deflate() output goes
- * through this function so some applications may wish to modify it
- * to avoid allocating a large strm->next_out buffer and copying into it.
- * (See also read_buf()).
- */
-local void flush_pending(strm)
- z_streamp strm;
-{
- deflate_state *s = (deflate_state *) strm->state;
- unsigned len = s->pending;
-
- if (len > strm->avail_out) len = strm->avail_out;
- if (len == 0) return;
-
- if (strm->next_out != Z_NULL) {
- zmemcpy(strm->next_out, s->pending_out, len);
- strm->next_out += len;
- }
- s->pending_out += len;
- strm->total_out += len;
- strm->avail_out -= len;
- s->pending -= len;
- if (s->pending == 0) {
- s->pending_out = s->pending_buf;
- }
-}
-
-/* ========================================================================= */
-int deflate (strm, flush)
- z_streamp strm;
- int flush;
-{
- int old_flush; /* value of flush param for previous deflate call */
- deflate_state *s;
-
- if (strm == Z_NULL || strm->state == Z_NULL ||
- flush > Z_FINISH || flush < 0) {
- return Z_STREAM_ERROR;
- }
- s = (deflate_state *) strm->state;
-
- if ((strm->next_in == Z_NULL && strm->avail_in != 0) ||
- (s->status == FINISH_STATE && flush != Z_FINISH)) {
- ERR_RETURN(strm, Z_STREAM_ERROR);
- }
- if (strm->avail_out == 0) ERR_RETURN(strm, Z_BUF_ERROR);
-
- s->strm = strm; /* just in case */
- old_flush = s->last_flush;
- s->last_flush = flush;
-
- /* Write the zlib header */
- if (s->status == INIT_STATE) {
-
- uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8;
- uInt level_flags = (s->level-1) >> 1;
-
- if (level_flags > 3) level_flags = 3;
- header |= (level_flags << 6);
- if (s->strstart != 0) header |= PRESET_DICT;
- header += 31 - (header % 31);
-
- s->status = BUSY_STATE;
- putShortMSB(s, header);
-
- /* Save the adler32 of the preset dictionary: */
- if (s->strstart != 0) {
- putShortMSB(s, (uInt)(strm->adler >> 16));
- putShortMSB(s, (uInt)(strm->adler & 0xffff));
- }
- strm->adler = 1L;
- }
-
- /* Flush as much pending output as possible */
- if (s->pending != 0) {
- flush_pending(strm);
- if (strm->avail_out == 0) {
- /* Since avail_out is 0, deflate will be called again with
- * more output space, but possibly with both pending and
- * avail_in equal to zero. There won't be anything to do,
- * but this is not an error situation so make sure we
- * return OK instead of BUF_ERROR at next call of deflate:
- */
- s->last_flush = -1;
- return Z_OK;
- }
-
- /* Make sure there is something to do and avoid duplicate consecutive
- * flushes. For repeated and useless calls with Z_FINISH, we keep
- * returning Z_STREAM_END instead of Z_BUFF_ERROR.
- */
- } else if (strm->avail_in == 0 && flush <= old_flush &&
- flush != Z_FINISH) {
- ERR_RETURN(strm, Z_BUF_ERROR);
- }
-
- /* User must not provide more input after the first FINISH: */
- if (s->status == FINISH_STATE && strm->avail_in != 0) {
- ERR_RETURN(strm, Z_BUF_ERROR);
- }
-
- /* Start a new block or continue the current one.
- */
- if (strm->avail_in != 0 || s->lookahead != 0 ||
- (flush != Z_NO_FLUSH && s->status != FINISH_STATE)) {
- block_state bstate;
-
- bstate = (*(configuration_table[s->level].func))(s, flush);
-
- if (bstate == finish_started || bstate == finish_done) {
- s->status = FINISH_STATE;
- }
- if (bstate == need_more || bstate == finish_started) {
- if (strm->avail_out == 0) {
- s->last_flush = -1; /* avoid BUF_ERROR next call, see above */
- }
- return Z_OK;
- /* If flush != Z_NO_FLUSH && avail_out == 0, the next call
- * of deflate should use the same flush parameter to make sure
- * that the flush is complete. So we don't have to output an
- * empty block here, this will be done at next call. This also
- * ensures that for a very small output buffer, we emit at most
- * one empty block.
- */
- }
- if (bstate == block_done) {
- if (flush == Z_PARTIAL_FLUSH) {
- _tr_align(s);
- } else if (flush == Z_PACKET_FLUSH) {
- /* Output just the 3-bit `stored' block type value,
- but not a zero length. */
- _tr_stored_type_only(s);
- } else { /* FULL_FLUSH or SYNC_FLUSH */
- _tr_stored_block(s, (char*)0, 0L, 0);
- /* For a full flush, this empty block will be recognized
- * as a special marker by inflate_sync().
- */
- if (flush == Z_FULL_FLUSH) {
- CLEAR_HASH(s); /* forget history */
- }
- }
- flush_pending(strm);
- if (strm->avail_out == 0) {
- s->last_flush = -1; /* avoid BUF_ERROR at next call, see above */
- return Z_OK;
- }
- }
- }
- Assert(strm->avail_out > 0, "bug2");
-
- if (flush != Z_FINISH) return Z_OK;
- if (s->noheader) return Z_STREAM_END;
-
- /* Write the zlib trailer (adler32) */
- putShortMSB(s, (uInt)(strm->adler >> 16));
- putShortMSB(s, (uInt)(strm->adler & 0xffff));
- flush_pending(strm);
- /* If avail_out is zero, the application will call deflate again
- * to flush the rest.
- */
- s->noheader = -1; /* write the trailer only once! */
- return s->pending != 0 ? Z_OK : Z_STREAM_END;
-}
-
-/* ========================================================================= */
-int deflateEnd (strm)
- z_streamp strm;
-{
- int status;
- deflate_state *s;
-
- if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
- s = (deflate_state *) strm->state;
-
- status = s->status;
- if (status != INIT_STATE && status != BUSY_STATE &&
- status != FINISH_STATE) {
- return Z_STREAM_ERROR;
- }
-
- /* Deallocate in reverse order of allocations: */
- TRY_FREE(strm, s->pending_buf);
- TRY_FREE(strm, s->head);
- TRY_FREE(strm, s->prev);
- TRY_FREE(strm, s->window);
-
- ZFREE(strm, s);
- strm->state = Z_NULL;
-
- return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK;
-}
-
-/* =========================================================================
- * Copy the source state to the destination state.
- */
-int deflateCopy (dest, source)
- z_streamp dest;
- z_streamp source;
-{
- deflate_state *ds;
- deflate_state *ss;
- ushf *overlay;
-
- if (source == Z_NULL || dest == Z_NULL || source->state == Z_NULL)
- return Z_STREAM_ERROR;
- ss = (deflate_state *) source->state;
-
- *dest = *source;
-
- ds = (deflate_state *) ZALLOC(dest, 1, sizeof(deflate_state));
- if (ds == Z_NULL) return Z_MEM_ERROR;
- dest->state = (struct internal_state FAR *) ds;
- *ds = *ss;
- ds->strm = dest;
-
- ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte));
- ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos));
- ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos));
- overlay = (ushf *) ZALLOC(dest, ds->lit_bufsize, sizeof(ush)+2);
- ds->pending_buf = (uchf *) overlay;
-
- if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL ||
- ds->pending_buf == Z_NULL) {
- deflateEnd (dest);
- return Z_MEM_ERROR;
- }
- /* ??? following zmemcpy doesn't work for 16-bit MSDOS */
- zmemcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte));
- zmemcpy(ds->prev, ss->prev, ds->w_size * sizeof(Pos));
- zmemcpy(ds->head, ss->head, ds->hash_size * sizeof(Pos));
- zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size);
-
- ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf);
- ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush);
- ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize;
-
- ds->l_desc.dyn_tree = ds->dyn_ltree;
- ds->d_desc.dyn_tree = ds->dyn_dtree;
- ds->bl_desc.dyn_tree = ds->bl_tree;
-
- return Z_OK;
-}
-
-/* ===========================================================================
- * Return the number of bytes of output which are immediately available
- * for output from the decompressor.
- */
-int deflateOutputPending (strm)
- z_streamp strm;
-{
- if (strm == Z_NULL || strm->state == Z_NULL) return 0;
-
- return ((deflate_state *)(strm->state))->pending;
-}
-
-/* ===========================================================================
- * Read a new buffer from the current input stream, update the adler32
- * and total number of bytes read. All deflate() input goes through
- * this function so some applications may wish to modify it to avoid
- * allocating a large strm->next_in buffer and copying from it.
- * (See also flush_pending()).
- */
-local int read_buf(strm, buf, size)
- z_streamp strm;
- charf *buf;
- unsigned size;
-{
- unsigned len = strm->avail_in;
-
- if (len > size) len = size;
- if (len == 0) return 0;
-
- strm->avail_in -= len;
-
- if (!((deflate_state *)(strm->state))->noheader) {
- strm->adler = adler32(strm->adler, strm->next_in, len);
- }
- zmemcpy(buf, strm->next_in, len);
- strm->next_in += len;
- strm->total_in += len;
-
- return (int)len;
-}
-
-/* ===========================================================================
- * Initialize the "longest match" routines for a new zlib stream
- */
-local void lm_init (s)
- deflate_state *s;
-{
- s->window_size = (ulg)2L*s->w_size;
-
- CLEAR_HASH(s);
-
- /* Set the default configuration parameters:
- */
- s->max_lazy_match = configuration_table[s->level].max_lazy;
- s->good_match = configuration_table[s->level].good_length;
- s->nice_match = configuration_table[s->level].nice_length;
- s->max_chain_length = configuration_table[s->level].max_chain;
-
- s->strstart = 0;
- s->block_start = 0L;
- s->lookahead = 0;
- s->match_length = s->prev_length = MIN_MATCH-1;
- s->match_available = 0;
- s->ins_h = 0;
-#ifdef ASMV
- match_init(); /* initialize the asm code */
-#endif
-}
-
-/* ===========================================================================
- * Set match_start to the longest match starting at the given string and
- * return its length. Matches shorter or equal to prev_length are discarded,
- * in which case the result is equal to prev_length and match_start is
- * garbage.
- * IN assertions: cur_match is the head of the hash chain for the current
- * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1
- * OUT assertion: the match length is not greater than s->lookahead.
- */
-#ifndef ASMV
-/* For 80x86 and 680x0, an optimized version will be provided in match.asm or
- * match.S. The code will be functionally equivalent.
- */
-local uInt longest_match(s, cur_match)
- deflate_state *s;
- IPos cur_match; /* current match */
-{
- unsigned chain_length = s->max_chain_length;/* max hash chain length */
- register Bytef *scan = s->window + s->strstart; /* current string */
- register Bytef *match; /* matched string */
- register int len; /* length of current match */
- int best_len = s->prev_length; /* best match length so far */
- int nice_match = s->nice_match; /* stop if match long enough */
- IPos limit = s->strstart > (IPos)MAX_DIST(s) ?
- s->strstart - (IPos)MAX_DIST(s) : NIL;
- /* Stop when cur_match becomes <= limit. To simplify the code,
- * we prevent matches with the string of window index 0.
- */
- Posf *prev = s->prev;
- uInt wmask = s->w_mask;
-
-#ifdef UNALIGNED_OK
- /* Compare two bytes at a time. Note: this is not always beneficial.
- * Try with and without -DUNALIGNED_OK to check.
- */
- register Bytef *strend = s->window + s->strstart + MAX_MATCH - 1;
- register ush scan_start = *(ushf*)scan;
- register ush scan_end = *(ushf*)(scan+best_len-1);
-#else
- register Bytef *strend = s->window + s->strstart + MAX_MATCH;
- register Byte scan_end1 = scan[best_len-1];
- register Byte scan_end = scan[best_len];
-#endif
-
- /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
- * It is easy to get rid of this optimization if necessary.
- */
- Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
-
- /* Do not waste too much time if we already have a good match: */
- if (s->prev_length >= s->good_match) {
- chain_length >>= 2;
- }
- /* Do not look for matches beyond the end of the input. This is necessary
- * to make deflate deterministic.
- */
- if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead;
-
- Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead");
-
- do {
- Assert(cur_match < s->strstart, "no future");
- match = s->window + cur_match;
-
- /* Skip to next match if the match length cannot increase
- * or if the match length is less than 2:
- */
-#if (defined(UNALIGNED_OK) && MAX_MATCH == 258)
- /* This code assumes sizeof(unsigned short) == 2. Do not use
- * UNALIGNED_OK if your compiler uses a different size.
- */
- if (*(ushf*)(match+best_len-1) != scan_end ||
- *(ushf*)match != scan_start) continue;
-
- /* It is not necessary to compare scan[2] and match[2] since they are
- * always equal when the other bytes match, given that the hash keys
- * are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at
- * strstart+3, +5, ... up to strstart+257. We check for insufficient
- * lookahead only every 4th comparison; the 128th check will be made
- * at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is
- * necessary to put more guard bytes at the end of the window, or
- * to check more often for insufficient lookahead.
- */
- Assert(scan[2] == match[2], "scan[2]?");
- scan++, match++;
- do {
- } while (*(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
- *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
- *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
- *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
- scan < strend);
- /* The funny "do {}" generates better code on most compilers */
-
- /* Here, scan <= window+strstart+257 */
- Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
- if (*scan == *match) scan++;
-
- len = (MAX_MATCH - 1) - (int)(strend-scan);
- scan = strend - (MAX_MATCH-1);
-
-#else /* UNALIGNED_OK */
-
- if (match[best_len] != scan_end ||
- match[best_len-1] != scan_end1 ||
- *match != *scan ||
- *++match != scan[1]) continue;
-
- /* The check at best_len-1 can be removed because it will be made
- * again later. (This heuristic is not always a win.)
- * It is not necessary to compare scan[2] and match[2] since they
- * are always equal when the other bytes match, given that
- * the hash keys are equal and that HASH_BITS >= 8.
- */
- scan += 2, match++;
- Assert(*scan == *match, "match[2]?");
-
- /* We check for insufficient lookahead only every 8th comparison;
- * the 256th check will be made at strstart+258.
- */
- do {
- } while (*++scan == *++match && *++scan == *++match &&
- *++scan == *++match && *++scan == *++match &&
- *++scan == *++match && *++scan == *++match &&
- *++scan == *++match && *++scan == *++match &&
- scan < strend);
-
- Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
-
- len = MAX_MATCH - (int)(strend - scan);
- scan = strend - MAX_MATCH;
-
-#endif /* UNALIGNED_OK */
-
- if (len > best_len) {
- s->match_start = cur_match;
- best_len = len;
- if (len >= nice_match) break;
-#ifdef UNALIGNED_OK
- scan_end = *(ushf*)(scan+best_len-1);
-#else
- scan_end1 = scan[best_len-1];
- scan_end = scan[best_len];
-#endif
- }
- } while ((cur_match = prev[cur_match & wmask]) > limit
- && --chain_length != 0);
-
- if ((uInt)best_len <= s->lookahead) return best_len;
- return s->lookahead;
-}
-#endif /* ASMV */
-
-#ifdef DEBUG_ZLIB
-/* ===========================================================================
- * Check that the match at match_start is indeed a match.
- */
-local void check_match(s, start, match, length)
- deflate_state *s;
- IPos start, match;
- int length;
-{
- /* check that the match is indeed a match */
- if (zmemcmp((charf *)s->window + match,
- (charf *)s->window + start, length) != EQUAL) {
- fprintf(stderr, " start %u, match %u, length %d\n",
- start, match, length);
- do {
- fprintf(stderr, "%c%c", s->window[match++], s->window[start++]);
- } while (--length != 0);
- z_error("invalid match");
- }
- if (z_verbose > 1) {
- fprintf(stderr,"\\[%d,%d]", start-match, length);
- do { putc(s->window[start++], stderr); } while (--length != 0);
- }
-}
-#else
-# define check_match(s, start, match, length)
-#endif
-
-/* ===========================================================================
- * Fill the window when the lookahead becomes insufficient.
- * Updates strstart and lookahead.
- *
- * IN assertion: lookahead < MIN_LOOKAHEAD
- * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD
- * At least one byte has been read, or avail_in == 0; reads are
- * performed for at least two bytes (required for the zip translate_eol
- * option -- not supported here).
- */
-local void fill_window(s)
- deflate_state *s;
-{
- register unsigned n, m;
- register Posf *p;
- unsigned more; /* Amount of free space at the end of the window. */
- uInt wsize = s->w_size;
-
- do {
- more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart);
-
- /* Deal with !@#$% 64K limit: */
- if (more == 0 && s->strstart == 0 && s->lookahead == 0) {
- more = wsize;
-
- } else if (more == (unsigned)(-1)) {
- /* Very unlikely, but possible on 16 bit machine if strstart == 0
- * and lookahead == 1 (input done one byte at time)
- */
- more--;
-
- /* If the window is almost full and there is insufficient lookahead,
- * move the upper half to the lower one to make room in the upper half.
- */
- } else if (s->strstart >= wsize+MAX_DIST(s)) {
-
- zmemcpy((charf *)s->window, (charf *)s->window+wsize,
- (unsigned)wsize);
- s->match_start -= wsize;
- s->strstart -= wsize; /* we now have strstart >= MAX_DIST */
- s->block_start -= (long) wsize;
-
- /* Slide the hash table (could be avoided with 32 bit values
- at the expense of memory usage). We slide even when level == 0
- to keep the hash table consistent if we switch back to level > 0
- later. (Using level 0 permanently is not an optimal usage of
- zlib, so we don't care about this pathological case.)
- */
- n = s->hash_size;
- p = &s->head[n];
- do {
- m = *--p;
- *p = (Pos)(m >= wsize ? m-wsize : NIL);
- } while (--n);
-
- n = wsize;
- p = &s->prev[n];
- do {
- m = *--p;
- *p = (Pos)(m >= wsize ? m-wsize : NIL);
- /* If n is not on any hash chain, prev[n] is garbage but
- * its value will never be used.
- */
- } while (--n);
- more += wsize;
- }
- if (s->strm->avail_in == 0) return;
-
- /* If there was no sliding:
- * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&
- * more == window_size - lookahead - strstart
- * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)
- * => more >= window_size - 2*WSIZE + 2
- * In the BIG_MEM or MMAP case (not yet supported),
- * window_size == input_size + MIN_LOOKAHEAD &&
- * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.
- * Otherwise, window_size == 2*WSIZE so more >= 2.
- * If there was sliding, more >= WSIZE. So in all cases, more >= 2.
- */
- Assert(more >= 2, "more < 2");
-
- n = read_buf(s->strm, (charf *)s->window + s->strstart + s->lookahead,
- more);
- s->lookahead += n;
-
- /* Initialize the hash value now that we have some input: */
- if (s->lookahead >= MIN_MATCH) {
- s->ins_h = s->window[s->strstart];
- UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
-#if MIN_MATCH != 3
- Call UPDATE_HASH() MIN_MATCH-3 more times
-#endif
- }
- /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage,
- * but this is not important since only literal bytes will be emitted.
- */
-
- } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0);
-}
-
-/* ===========================================================================
- * Flush the current block, with given end-of-file flag.
- * IN assertion: strstart is set to the end of the current match.
- */
-#define FLUSH_BLOCK_ONLY(s, eof) { \
- _tr_flush_block(s, (s->block_start >= 0L ? \
- (charf *)&s->window[(unsigned)s->block_start] : \
- (charf *)Z_NULL), \
- (ulg)((long)s->strstart - s->block_start), \
- (eof)); \
- s->block_start = s->strstart; \
- flush_pending(s->strm); \
- Tracev((stderr,"[FLUSH]")); \
-}
-
-/* Same but force premature exit if necessary. */
-#define FLUSH_BLOCK(s, eof) { \
- FLUSH_BLOCK_ONLY(s, eof); \
- if (s->strm->avail_out == 0) return (eof) ? finish_started : need_more; \
-}
-
-/* ===========================================================================
- * Copy without compression as much as possible from the input stream, return
- * the current block state.
- * This function does not insert new strings in the dictionary since
- * uncompressible data is probably not useful. This function is used
- * only for the level=0 compression option.
- * NOTE: this function should be optimized to avoid extra copying from
- * window to pending_buf.
- */
-local block_state deflate_stored(s, flush)
- deflate_state *s;
- int flush;
-{
- /* Stored blocks are limited to 0xffff bytes, pending_buf is limited
- * to pending_buf_size, and each stored block has a 5 byte header:
- */
- ulg max_block_size = 0xffff;
- ulg max_start;
-
- if (max_block_size > s->pending_buf_size - 5) {
- max_block_size = s->pending_buf_size - 5;
- }
-
- /* Copy as much as possible from input to output: */
- for (;;) {
- /* Fill the window as much as possible: */
- if (s->lookahead <= 1) {
-
- Assert(s->strstart < s->w_size+MAX_DIST(s) ||
- s->block_start >= (long)s->w_size, "slide too late");
-
- fill_window(s);
- if (s->lookahead == 0 && flush == Z_NO_FLUSH) return need_more;
-
- if (s->lookahead == 0) break; /* flush the current block */
- }
- Assert(s->block_start >= 0L, "block gone");
-
- s->strstart += s->lookahead;
- s->lookahead = 0;
-
- /* Emit a stored block if pending_buf will be full: */
- max_start = s->block_start + max_block_size;
- if (s->strstart == 0 || (ulg)s->strstart >= max_start) {
- /* strstart == 0 is possible when wraparound on 16-bit machine */
- s->lookahead = (uInt)(s->strstart - max_start);
- s->strstart = (uInt)max_start;
- FLUSH_BLOCK(s, 0);
- }
- /* Flush if we may have to slide, otherwise block_start may become
- * negative and the data will be gone:
- */
- if (s->strstart - (uInt)s->block_start >= MAX_DIST(s)) {
- FLUSH_BLOCK(s, 0);
- }
- }
- FLUSH_BLOCK(s, flush == Z_FINISH);
- return flush == Z_FINISH ? finish_done : block_done;
-}
-
-/* ===========================================================================
- * Compress as much as possible from the input stream, return the current
- * block state.
- * This function does not perform lazy evaluation of matches and inserts
- * new strings in the dictionary only for unmatched strings or for short
- * matches. It is used only for the fast compression options.
- */
-local block_state deflate_fast(s, flush)
- deflate_state *s;
- int flush;
-{
- IPos hash_head = NIL; /* head of the hash chain */
- int bflush; /* set if current block must be flushed */
-
- for (;;) {
- /* Make sure that we always have enough lookahead, except
- * at the end of the input file. We need MAX_MATCH bytes
- * for the next match, plus MIN_MATCH bytes to insert the
- * string following the next match.
- */
- if (s->lookahead < MIN_LOOKAHEAD) {
- fill_window(s);
- if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
- return need_more;
- }
- if (s->lookahead == 0) break; /* flush the current block */
- }
-
- /* Insert the string window[strstart .. strstart+2] in the
- * dictionary, and set hash_head to the head of the hash chain:
- */
- if (s->lookahead >= MIN_MATCH) {
- INSERT_STRING(s, s->strstart, hash_head);
- }
-
- /* Find the longest match, discarding those <= prev_length.
- * At this point we have always match_length < MIN_MATCH
- */
- if (hash_head != NIL && s->strstart - hash_head <= MAX_DIST(s)) {
- /* To simplify the code, we prevent matches with the string
- * of window index 0 (in particular we have to avoid a match
- * of the string with itself at the start of the input file).
- */
- if (s->strategy != Z_HUFFMAN_ONLY) {
- s->match_length = longest_match (s, hash_head);
- }
- /* longest_match() sets match_start */
- }
- if (s->match_length >= MIN_MATCH) {
- check_match(s, s->strstart, s->match_start, s->match_length);
-
- bflush = _tr_tally(s, s->strstart - s->match_start,
- s->match_length - MIN_MATCH);
-
- s->lookahead -= s->match_length;
-
- /* Insert new strings in the hash table only if the match length
- * is not too large. This saves time but degrades compression.
- */
- if (s->match_length <= s->max_insert_length &&
- s->lookahead >= MIN_MATCH) {
- s->match_length--; /* string at strstart already in hash table */
- do {
- s->strstart++;
- INSERT_STRING(s, s->strstart, hash_head);
- /* strstart never exceeds WSIZE-MAX_MATCH, so there are
- * always MIN_MATCH bytes ahead.
- */
- } while (--s->match_length != 0);
- s->strstart++;
- } else {
- s->strstart += s->match_length;
- s->match_length = 0;
- s->ins_h = s->window[s->strstart];
- UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
-#if MIN_MATCH != 3
- Call UPDATE_HASH() MIN_MATCH-3 more times
-#endif
- /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not
- * matter since it will be recomputed at next deflate call.
- */
- }
- } else {
- /* No match, output a literal byte */
- Tracevv((stderr,"%c", s->window[s->strstart]));
- bflush = _tr_tally (s, 0, s->window[s->strstart]);
- s->lookahead--;
- s->strstart++;
- }
- if (bflush) FLUSH_BLOCK(s, 0);
- }
- FLUSH_BLOCK(s, flush == Z_FINISH);
- return flush == Z_FINISH ? finish_done : block_done;
-}
-
-/* ===========================================================================
- * Same as above, but achieves better compression. We use a lazy
- * evaluation for matches: a match is finally adopted only if there is
- * no better match at the next window position.
- */
-local block_state deflate_slow(s, flush)
- deflate_state *s;
- int flush;
-{
- IPos hash_head = NIL; /* head of hash chain */
- int bflush; /* set if current block must be flushed */
-
- /* Process the input block. */
- for (;;) {
- /* Make sure that we always have enough lookahead, except
- * at the end of the input file. We need MAX_MATCH bytes
- * for the next match, plus MIN_MATCH bytes to insert the
- * string following the next match.
- */
- if (s->lookahead < MIN_LOOKAHEAD) {
- fill_window(s);
- if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
- return need_more;
- }
- if (s->lookahead == 0) break; /* flush the current block */
- }
-
- /* Insert the string window[strstart .. strstart+2] in the
- * dictionary, and set hash_head to the head of the hash chain:
- */
- if (s->lookahead >= MIN_MATCH) {
- INSERT_STRING(s, s->strstart, hash_head);
- }
-
- /* Find the longest match, discarding those <= prev_length.
- */
- s->prev_length = s->match_length, s->prev_match = s->match_start;
- s->match_length = MIN_MATCH-1;
-
- if (hash_head != NIL && s->prev_length < s->max_lazy_match &&
- s->strstart - hash_head <= MAX_DIST(s)) {
- /* To simplify the code, we prevent matches with the string
- * of window index 0 (in particular we have to avoid a match
- * of the string with itself at the start of the input file).
- */
- if (s->strategy != Z_HUFFMAN_ONLY) {
- s->match_length = longest_match (s, hash_head);
- }
- /* longest_match() sets match_start */
-
- if (s->match_length <= 5 && (s->strategy == Z_FILTERED ||
- (s->match_length == MIN_MATCH &&
- s->strstart - s->match_start > TOO_FAR))) {
-
- /* If prev_match is also MIN_MATCH, match_start is garbage
- * but we will ignore the current match anyway.
- */
- s->match_length = MIN_MATCH-1;
- }
- }
- /* If there was a match at the previous step and the current
- * match is not better, output the previous match:
- */
- if (s->prev_length >= MIN_MATCH && s->match_length <= s->prev_length) {
- uInt max_insert = s->strstart + s->lookahead - MIN_MATCH;
- /* Do not insert strings in hash table beyond this. */
-
- check_match(s, s->strstart-1, s->prev_match, s->prev_length);
-
- bflush = _tr_tally(s, s->strstart -1 - s->prev_match,
- s->prev_length - MIN_MATCH);
-
- /* Insert in hash table all strings up to the end of the match.
- * strstart-1 and strstart are already inserted. If there is not
- * enough lookahead, the last two strings are not inserted in
- * the hash table.
- */
- s->lookahead -= s->prev_length-1;
- s->prev_length -= 2;
- do {
- if (++s->strstart <= max_insert) {
- INSERT_STRING(s, s->strstart, hash_head);
- }
- } while (--s->prev_length != 0);
- s->match_available = 0;
- s->match_length = MIN_MATCH-1;
- s->strstart++;
-
- if (bflush) FLUSH_BLOCK(s, 0);
-
- } else if (s->match_available) {
- /* If there was no match at the previous position, output a
- * single literal. If there was a match but the current match
- * is longer, truncate the previous match to a single literal.
- */
- Tracevv((stderr,"%c", s->window[s->strstart-1]));
- if (_tr_tally (s, 0, s->window[s->strstart-1])) {
- FLUSH_BLOCK_ONLY(s, 0);
- }
- s->strstart++;
- s->lookahead--;
- if (s->strm->avail_out == 0) return need_more;
- } else {
- /* There is no previous match to compare with, wait for
- * the next step to decide.
- */
- s->match_available = 1;
- s->strstart++;
- s->lookahead--;
- }
- }
- Assert (flush != Z_NO_FLUSH, "no flush?");
- if (s->match_available) {
- Tracevv((stderr,"%c", s->window[s->strstart-1]));
- _tr_tally (s, 0, s->window[s->strstart-1]);
- s->match_available = 0;
- }
- FLUSH_BLOCK(s, flush == Z_FINISH);
- return flush == Z_FINISH ? finish_done : block_done;
-}
-/* --- deflate.c */
-
-/* +++ trees.c */
-/* trees.c -- output deflated data using Huffman coding
- * Copyright (C) 1995-1996 Jean-loup Gailly
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/*
- * ALGORITHM
- *
- * The "deflation" process uses several Huffman trees. The more
- * common source values are represented by shorter bit sequences.
- *
- * Each code tree is stored in a compressed form which is itself
- * a Huffman encoding of the lengths of all the code strings (in
- * ascending order by source values). The actual code strings are
- * reconstructed from the lengths in the inflate process, as described
- * in the deflate specification.
- *
- * REFERENCES
- *
- * Deutsch, L.P.,"'Deflate' Compressed Data Format Specification".
- * Available in ftp.uu.net:/pub/archiving/zip/doc/deflate-1.1.doc
- *
- * Storer, James A.
- * Data Compression: Methods and Theory, pp. 49-50.
- * Computer Science Press, 1988. ISBN 0-7167-8156-5.
- *
- * Sedgewick, R.
- * Algorithms, p290.
- * Addison-Wesley, 1983. ISBN 0-201-06672-6.
- */
-
-/* From: trees.c,v 1.11 1996/07/24 13:41:06 me Exp $ */
-
-/* #include "deflate.h" */
-
-#ifdef DEBUG_ZLIB
-# include <ctype.h>
-#endif
-
-/* ===========================================================================
- * Constants
- */
-
-#define MAX_BL_BITS 7
-/* Bit length codes must not exceed MAX_BL_BITS bits */
-
-#define END_BLOCK 256
-/* end of block literal code */
-
-#define REP_3_6 16
-/* repeat previous bit length 3-6 times (2 bits of repeat count) */
-
-#define REPZ_3_10 17
-/* repeat a zero length 3-10 times (3 bits of repeat count) */
-
-#define REPZ_11_138 18
-/* repeat a zero length 11-138 times (7 bits of repeat count) */
-
-local int extra_lbits[LENGTH_CODES] /* extra bits for each length code */
- = {0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0};
-
-local int extra_dbits[D_CODES] /* extra bits for each distance code */
- = {0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13};
-
-local int extra_blbits[BL_CODES]/* extra bits for each bit length code */
- = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7};
-
-local uch bl_order[BL_CODES]
- = {16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15};
-/* The lengths of the bit length codes are sent in order of decreasing
- * probability, to avoid transmitting the lengths for unused bit length codes.
- */
-
-#define Buf_size (8 * 2*sizeof(char))
-/* Number of bits used within bi_buf. (bi_buf might be implemented on
- * more than 16 bits on some systems.)
- */
-
-/* ===========================================================================
- * Local data. These are initialized only once.
- */
-
-local ct_data static_ltree[L_CODES+2];
-/* The static literal tree. Since the bit lengths are imposed, there is no
- * need for the L_CODES extra codes used during heap construction. However
- * The codes 286 and 287 are needed to build a canonical tree (see _tr_init
- * below).
- */
-
-local ct_data static_dtree[D_CODES];
-/* The static distance tree. (Actually a trivial tree since all codes use
- * 5 bits.)
- */
-
-local uch dist_code[512];
-/* distance codes. The first 256 values correspond to the distances
- * 3 .. 258, the last 256 values correspond to the top 8 bits of
- * the 15 bit distances.
- */
-
-local uch length_code[MAX_MATCH-MIN_MATCH+1];
-/* length code for each normalized match length (0 == MIN_MATCH) */
-
-local int base_length[LENGTH_CODES];
-/* First normalized length for each code (0 = MIN_MATCH) */
-
-local int base_dist[D_CODES];
-/* First normalized distance for each code (0 = distance of 1) */
-
-struct static_tree_desc_s {
- ct_data *static_tree; /* static tree or NULL */
- intf *extra_bits; /* extra bits for each code or NULL */
- int extra_base; /* base index for extra_bits */
- int elems; /* max number of elements in the tree */
- int max_length; /* max bit length for the codes */
-};
-
-local static_tree_desc static_l_desc =
-{static_ltree, extra_lbits, LITERALS+1, L_CODES, MAX_BITS};
-
-local static_tree_desc static_d_desc =
-{static_dtree, extra_dbits, 0, D_CODES, MAX_BITS};
-
-local static_tree_desc static_bl_desc =
-{(ct_data *)0, extra_blbits, 0, BL_CODES, MAX_BL_BITS};
-
-/* ===========================================================================
- * Local (static) routines in this file.
- */
-
-local void tr_static_init OF((void));
-local void init_block OF((deflate_state *s));
-local void pqdownheap OF((deflate_state *s, ct_data *tree, int k));
-local void gen_bitlen OF((deflate_state *s, tree_desc *desc));
-local void gen_codes OF((ct_data *tree, int max_code, ushf *bl_count));
-local void build_tree OF((deflate_state *s, tree_desc *desc));
-local void scan_tree OF((deflate_state *s, ct_data *tree, int max_code));
-local void send_tree OF((deflate_state *s, ct_data *tree, int max_code));
-local int build_bl_tree OF((deflate_state *s));
-local void send_all_trees OF((deflate_state *s, int lcodes, int dcodes,
- int blcodes));
-local void compress_block OF((deflate_state *s, ct_data *ltree,
- ct_data *dtree));
-local void set_data_type OF((deflate_state *s));
-local unsigned bi_reverse OF((unsigned value, int length));
-local void bi_windup OF((deflate_state *s));
-local void bi_flush OF((deflate_state *s));
-local void copy_block OF((deflate_state *s, charf *buf, unsigned len,
- int header));
-
-#ifndef DEBUG_ZLIB
-# define send_code(s, c, tree) send_bits(s, tree[c].Code, tree[c].Len)
- /* Send a code of the given tree. c and tree must not have side effects */
-
-#else /* DEBUG_ZLIB */
-# define send_code(s, c, tree) \
- { if (verbose>2) fprintf(stderr,"\ncd %3d ",(c)); \
- send_bits(s, tree[c].Code, tree[c].Len); }
-#endif
-
-#define d_code(dist) \
- ((dist) < 256 ? dist_code[dist] : dist_code[256+((dist)>>7)])
-/* Mapping from a distance to a distance code. dist is the distance - 1 and
- * must not have side effects. dist_code[256] and dist_code[257] are never
- * used.
- */
-
-/* ===========================================================================
- * Output a short LSB first on the stream.
- * IN assertion: there is enough room in pendingBuf.
- */
-#define put_short(s, w) { \
- put_byte(s, (uch)((w) & 0xff)); \
- put_byte(s, (uch)((ush)(w) >> 8)); \
-}
-
-/* ===========================================================================
- * Send a value on a given number of bits.
- * IN assertion: length <= 16 and value fits in length bits.
- */
-#ifdef DEBUG_ZLIB
-local void send_bits OF((deflate_state *s, int value, int length));
-
-local void send_bits(s, value, length)
- deflate_state *s;
- int value; /* value to send */
- int length; /* number of bits */
-{
- Tracevv((stderr," l %2d v %4x ", length, value));
- Assert(length > 0 && length <= 15, "invalid length");
- s->bits_sent += (ulg)length;
-
- /* If not enough room in bi_buf, use (valid) bits from bi_buf and
- * (16 - bi_valid) bits from value, leaving (width - (16-bi_valid))
- * unused bits in value.
- */
- if (s->bi_valid > (int)Buf_size - length) {
- s->bi_buf |= (value << s->bi_valid);
- put_short(s, s->bi_buf);
- s->bi_buf = (ush)value >> (Buf_size - s->bi_valid);
- s->bi_valid += length - Buf_size;
- } else {
- s->bi_buf |= value << s->bi_valid;
- s->bi_valid += length;
- }
-}
-#else /* !DEBUG_ZLIB */
-
-#define send_bits(s, value, length) \
-{ int len = length;\
- if (s->bi_valid > (int)Buf_size - len) {\
- int val = value;\
- s->bi_buf |= (val << s->bi_valid);\
- put_short(s, s->bi_buf);\
- s->bi_buf = (ush)val >> (Buf_size - s->bi_valid);\
- s->bi_valid += len - Buf_size;\
- } else {\
- s->bi_buf |= (value) << s->bi_valid;\
- s->bi_valid += len;\
- }\
-}
-#endif /* DEBUG_ZLIB */
-
-
-#define MAX(a,b) (a >= b ? a : b)
-/* the arguments must not have side effects */
-
-/* ===========================================================================
- * Initialize the various 'constant' tables. In a multi-threaded environment,
- * this function may be called by two threads concurrently, but this is
- * harmless since both invocations do exactly the same thing.
- */
-local void tr_static_init()
-{
- static int static_init_done;
- int n; /* iterates over tree elements */
- int bits; /* bit counter */
- int length; /* length value */
- int code; /* code value */
- int dist; /* distance index */
- ush bl_count[MAX_BITS+1];
- /* number of codes at each bit length for an optimal tree */
-
- if (static_init_done) return;
-
- /* Initialize the mapping length (0..255) -> length code (0..28) */
- length = 0;
- for (code = 0; code < LENGTH_CODES-1; code++) {
- base_length[code] = length;
- for (n = 0; n < (1<<extra_lbits[code]); n++) {
- length_code[length++] = (uch)code;
- }
- }
- Assert (length == 256, "tr_static_init: length != 256");
- /* Note that the length 255 (match length 258) can be represented
- * in two different ways: code 284 + 5 bits or code 285, so we
- * overwrite length_code[255] to use the best encoding:
- */
- length_code[length-1] = (uch)code;
-
- /* Initialize the mapping dist (0..32K) -> dist code (0..29) */
- dist = 0;
- for (code = 0 ; code < 16; code++) {
- base_dist[code] = dist;
- for (n = 0; n < (1<<extra_dbits[code]); n++) {
- dist_code[dist++] = (uch)code;
- }
- }
- Assert (dist == 256, "tr_static_init: dist != 256");
- dist >>= 7; /* from now on, all distances are divided by 128 */
- for ( ; code < D_CODES; code++) {
- base_dist[code] = dist << 7;
- for (n = 0; n < (1<<(extra_dbits[code]-7)); n++) {
- dist_code[256 + dist++] = (uch)code;
- }
- }
- Assert (dist == 256, "tr_static_init: 256+dist != 512");
-
- /* Construct the codes of the static literal tree */
- for (bits = 0; bits <= MAX_BITS; bits++) bl_count[bits] = 0;
- n = 0;
- while (n <= 143) static_ltree[n++].Len = 8, bl_count[8]++;
- while (n <= 255) static_ltree[n++].Len = 9, bl_count[9]++;
- while (n <= 279) static_ltree[n++].Len = 7, bl_count[7]++;
- while (n <= 287) static_ltree[n++].Len = 8, bl_count[8]++;
- /* Codes 286 and 287 do not exist, but we must include them in the
- * tree construction to get a canonical Huffman tree (longest code
- * all ones)
- */
- gen_codes((ct_data *)static_ltree, L_CODES+1, bl_count);
-
- /* The static distance tree is trivial: */
- for (n = 0; n < D_CODES; n++) {
- static_dtree[n].Len = 5;
- static_dtree[n].Code = bi_reverse((unsigned)n, 5);
- }
- static_init_done = 1;
-}
-
-/* ===========================================================================
- * Initialize the tree data structures for a new zlib stream.
- */
-void _tr_init(s)
- deflate_state *s;
-{
- tr_static_init();
-
- s->compressed_len = 0L;
-
- s->l_desc.dyn_tree = s->dyn_ltree;
- s->l_desc.stat_desc = &static_l_desc;
-
- s->d_desc.dyn_tree = s->dyn_dtree;
- s->d_desc.stat_desc = &static_d_desc;
-
- s->bl_desc.dyn_tree = s->bl_tree;
- s->bl_desc.stat_desc = &static_bl_desc;
-
- s->bi_buf = 0;
- s->bi_valid = 0;
- s->last_eob_len = 8; /* enough lookahead for inflate */
-#ifdef DEBUG_ZLIB
- s->bits_sent = 0L;
-#endif
-
- /* Initialize the first block of the first file: */
- init_block(s);
-}
-
-/* ===========================================================================
- * Initialize a new block.
- */
-local void init_block(s)
- deflate_state *s;
-{
- int n; /* iterates over tree elements */
-
- /* Initialize the trees. */
- for (n = 0; n < L_CODES; n++) s->dyn_ltree[n].Freq = 0;
- for (n = 0; n < D_CODES; n++) s->dyn_dtree[n].Freq = 0;
- for (n = 0; n < BL_CODES; n++) s->bl_tree[n].Freq = 0;
-
- s->dyn_ltree[END_BLOCK].Freq = 1;
- s->opt_len = s->static_len = 0L;
- s->last_lit = s->matches = 0;
-}
-
-#define SMALLEST 1
-/* Index within the heap array of least frequent node in the Huffman tree */
-
-
-/* ===========================================================================
- * Remove the smallest element from the heap and recreate the heap with
- * one less element. Updates heap and heap_len.
- */
-#define pqremove(s, tree, top) \
-{\
- top = s->heap[SMALLEST]; \
- s->heap[SMALLEST] = s->heap[s->heap_len--]; \
- pqdownheap(s, tree, SMALLEST); \
-}
-
-/* ===========================================================================
- * Compares to subtrees, using the tree depth as tie breaker when
- * the subtrees have equal frequency. This minimizes the worst case length.
- */
-#define smaller(tree, n, m, depth) \
- (tree[n].Freq < tree[m].Freq || \
- (tree[n].Freq == tree[m].Freq && depth[n] <= depth[m]))
-
-/* ===========================================================================
- * Restore the heap property by moving down the tree starting at node k,
- * exchanging a node with the smallest of its two sons if necessary, stopping
- * when the heap property is re-established (each father smaller than its
- * two sons).
- */
-local void pqdownheap(s, tree, k)
- deflate_state *s;
- ct_data *tree; /* the tree to restore */
- int k; /* node to move down */
-{
- int v = s->heap[k];
- int j = k << 1; /* left son of k */
- while (j <= s->heap_len) {
- /* Set j to the smallest of the two sons: */
- if (j < s->heap_len &&
- smaller(tree, s->heap[j+1], s->heap[j], s->depth)) {
- j++;
- }
- /* Exit if v is smaller than both sons */
- if (smaller(tree, v, s->heap[j], s->depth)) break;
-
- /* Exchange v with the smallest son */
- s->heap[k] = s->heap[j]; k = j;
-
- /* And continue down the tree, setting j to the left son of k */
- j <<= 1;
- }
- s->heap[k] = v;
-}
-
-/* ===========================================================================
- * Compute the optimal bit lengths for a tree and update the total bit length
- * for the current block.
- * IN assertion: the fields freq and dad are set, heap[heap_max] and
- * above are the tree nodes sorted by increasing frequency.
- * OUT assertions: the field len is set to the optimal bit length, the
- * array bl_count contains the frequencies for each bit length.
- * The length opt_len is updated; static_len is also updated if stree is
- * not null.
- */
-local void gen_bitlen(s, desc)
- deflate_state *s;
- tree_desc *desc; /* the tree descriptor */
-{
- ct_data *tree = desc->dyn_tree;
- int max_code = desc->max_code;
- ct_data *stree = desc->stat_desc->static_tree;
- intf *extra = desc->stat_desc->extra_bits;
- int base = desc->stat_desc->extra_base;
- int max_length = desc->stat_desc->max_length;
- int h; /* heap index */
- int n, m; /* iterate over the tree elements */
- int bits; /* bit length */
- int xbits; /* extra bits */
- ush f; /* frequency */
- int overflow = 0; /* number of elements with bit length too large */
-
- for (bits = 0; bits <= MAX_BITS; bits++) s->bl_count[bits] = 0;
-
- /* In a first pass, compute the optimal bit lengths (which may
- * overflow in the case of the bit length tree).
- */
- tree[s->heap[s->heap_max]].Len = 0; /* root of the heap */
-
- for (h = s->heap_max+1; h < HEAP_SIZE; h++) {
- n = s->heap[h];
- bits = tree[tree[n].Dad].Len + 1;
- if (bits > max_length) bits = max_length, overflow++;
- tree[n].Len = (ush)bits;
- /* We overwrite tree[n].Dad which is no longer needed */
-
- if (n > max_code) continue; /* not a leaf node */
-
- s->bl_count[bits]++;
- xbits = 0;
- if (n >= base) xbits = extra[n-base];
- f = tree[n].Freq;
- s->opt_len += (ulg)f * (bits + xbits);
- if (stree) s->static_len += (ulg)f * (stree[n].Len + xbits);
- }
- if (overflow == 0) return;
-
- Trace((stderr,"\nbit length overflow\n"));
- /* This happens for example on obj2 and pic of the Calgary corpus */
-
- /* Find the first bit length which could increase: */
- do {
- bits = max_length-1;
- while (s->bl_count[bits] == 0) bits--;
- s->bl_count[bits]--; /* move one leaf down the tree */
- s->bl_count[bits+1] += 2; /* move one overflow item as its brother */
- s->bl_count[max_length]--;
- /* The brother of the overflow item also moves one step up,
- * but this does not affect bl_count[max_length]
- */
- overflow -= 2;
- } while (overflow > 0);
-
- /* Now recompute all bit lengths, scanning in increasing frequency.
- * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all
- * lengths instead of fixing only the wrong ones. This idea is taken
- * from 'ar' written by Haruhiko Okumura.)
- */
- for (bits = max_length; bits != 0; bits--) {
- n = s->bl_count[bits];
- while (n != 0) {
- m = s->heap[--h];
- if (m > max_code) continue;
- if (tree[m].Len != (unsigned) bits) {
- Trace((stderr,"code %d bits %d->%d\n", m, tree[m].Len, bits));
- s->opt_len += ((long)bits - (long)tree[m].Len)
- *(long)tree[m].Freq;
- tree[m].Len = (ush)bits;
- }
- n--;
- }
- }
-}
-
-/* ===========================================================================
- * Generate the codes for a given tree and bit counts (which need not be
- * optimal).
- * IN assertion: the array bl_count contains the bit length statistics for
- * the given tree and the field len is set for all tree elements.
- * OUT assertion: the field code is set for all tree elements of non
- * zero code length.
- */
-local void gen_codes (tree, max_code, bl_count)
- ct_data *tree; /* the tree to decorate */
- int max_code; /* largest code with non zero frequency */
- ushf *bl_count; /* number of codes at each bit length */
-{
- ush next_code[MAX_BITS+1]; /* next code value for each bit length */
- ush code = 0; /* running code value */
- int bits; /* bit index */
- int n; /* code index */
-
- /* The distribution counts are first used to generate the code values
- * without bit reversal.
- */
- for (bits = 1; bits <= MAX_BITS; bits++) {
- next_code[bits] = code = (code + bl_count[bits-1]) << 1;
- }
- /* Check that the bit counts in bl_count are consistent. The last code
- * must be all ones.
- */
- Assert (code + bl_count[MAX_BITS]-1 == (1<<MAX_BITS)-1,
- "inconsistent bit counts");
- Tracev((stderr,"\ngen_codes: max_code %d ", max_code));
-
- for (n = 0; n <= max_code; n++) {
- int len = tree[n].Len;
- if (len == 0) continue;
- /* Now reverse the bits */
- tree[n].Code = bi_reverse(next_code[len]++, len);
-
- Tracecv(tree != static_ltree, (stderr,"\nn %3d %c l %2d c %4x (%x) ",
- n, (isgraph(n) ? n : ' '), len, tree[n].Code, next_code[len]-1));
- }
-}
-
-/* ===========================================================================
- * Construct one Huffman tree and assigns the code bit strings and lengths.
- * Update the total bit length for the current block.
- * IN assertion: the field freq is set for all tree elements.
- * OUT assertions: the fields len and code are set to the optimal bit length
- * and corresponding code. The length opt_len is updated; static_len is
- * also updated if stree is not null. The field max_code is set.
- */
-local void build_tree(s, desc)
- deflate_state *s;
- tree_desc *desc; /* the tree descriptor */
-{
- ct_data *tree = desc->dyn_tree;
- ct_data *stree = desc->stat_desc->static_tree;
- int elems = desc->stat_desc->elems;
- int n, m; /* iterate over heap elements */
- int max_code = -1; /* largest code with non zero frequency */
- int node; /* new node being created */
-
- /* Construct the initial heap, with least frequent element in
- * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1].
- * heap[0] is not used.
- */
- s->heap_len = 0, s->heap_max = HEAP_SIZE;
-
- for (n = 0; n < elems; n++) {
- if (tree[n].Freq != 0) {
- s->heap[++(s->heap_len)] = max_code = n;
- s->depth[n] = 0;
- } else {
- tree[n].Len = 0;
- }
- }
-
- /* The pkzip format requires that at least one distance code exists,
- * and that at least one bit should be sent even if there is only one
- * possible code. So to avoid special checks later on we force at least
- * two codes of non zero frequency.
- */
- while (s->heap_len < 2) {
- node = s->heap[++(s->heap_len)] = (max_code < 2 ? ++max_code : 0);
- tree[node].Freq = 1;
- s->depth[node] = 0;
- s->opt_len--; if (stree) s->static_len -= stree[node].Len;
- /* node is 0 or 1 so it does not have extra bits */
- }
- desc->max_code = max_code;
-
- /* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree,
- * establish sub-heaps of increasing lengths:
- */
- for (n = s->heap_len/2; n >= 1; n--) pqdownheap(s, tree, n);
-
- /* Construct the Huffman tree by repeatedly combining the least two
- * frequent nodes.
- */
- node = elems; /* next internal node of the tree */
- do {
- pqremove(s, tree, n); /* n = node of least frequency */
- m = s->heap[SMALLEST]; /* m = node of next least frequency */
-
- s->heap[--(s->heap_max)] = n; /* keep the nodes sorted by frequency */
- s->heap[--(s->heap_max)] = m;
-
- /* Create a new node father of n and m */
- tree[node].Freq = tree[n].Freq + tree[m].Freq;
- s->depth[node] = (uch) (MAX(s->depth[n], s->depth[m]) + 1);
- tree[n].Dad = tree[m].Dad = (ush)node;
-#ifdef DUMP_BL_TREE
- if (tree == s->bl_tree) {
- fprintf(stderr,"\nnode %d(%d), sons %d(%d) %d(%d)",
- node, tree[node].Freq, n, tree[n].Freq, m, tree[m].Freq);
- }
-#endif
- /* and insert the new node in the heap */
- s->heap[SMALLEST] = node++;
- pqdownheap(s, tree, SMALLEST);
-
- } while (s->heap_len >= 2);
-
- s->heap[--(s->heap_max)] = s->heap[SMALLEST];
-
- /* At this point, the fields freq and dad are set. We can now
- * generate the bit lengths.
- */
- gen_bitlen(s, (tree_desc *)desc);
-
- /* The field len is now set, we can generate the bit codes */
- gen_codes ((ct_data *)tree, max_code, s->bl_count);
-}
-
-/* ===========================================================================
- * Scan a literal or distance tree to determine the frequencies of the codes
- * in the bit length tree.
- */
-local void scan_tree (s, tree, max_code)
- deflate_state *s;
- ct_data *tree; /* the tree to be scanned */
- int max_code; /* and its largest code of non zero frequency */
-{
- int n; /* iterates over all tree elements */
- int prevlen = -1; /* last emitted length */
- int curlen; /* length of current code */
- int nextlen = tree[0].Len; /* length of next code */
- int count = 0; /* repeat count of the current code */
- int max_count = 7; /* max repeat count */
- int min_count = 4; /* min repeat count */
-
- if (nextlen == 0) max_count = 138, min_count = 3;
- tree[max_code+1].Len = (ush)0xffff; /* guard */
-
- for (n = 0; n <= max_code; n++) {
- curlen = nextlen; nextlen = tree[n+1].Len;
- if (++count < max_count && curlen == nextlen) {
- continue;
- } else if (count < min_count) {
- s->bl_tree[curlen].Freq += count;
- } else if (curlen != 0) {
- if (curlen != prevlen) s->bl_tree[curlen].Freq++;
- s->bl_tree[REP_3_6].Freq++;
- } else if (count <= 10) {
- s->bl_tree[REPZ_3_10].Freq++;
- } else {
- s->bl_tree[REPZ_11_138].Freq++;
- }
- count = 0; prevlen = curlen;
- if (nextlen == 0) {
- max_count = 138, min_count = 3;
- } else if (curlen == nextlen) {
- max_count = 6, min_count = 3;
- } else {
- max_count = 7, min_count = 4;
- }
- }
-}
-
-/* ===========================================================================
- * Send a literal or distance tree in compressed form, using the codes in
- * bl_tree.
- */
-local void send_tree (s, tree, max_code)
- deflate_state *s;
- ct_data *tree; /* the tree to be scanned */
- int max_code; /* and its largest code of non zero frequency */
-{
- int n; /* iterates over all tree elements */
- int prevlen = -1; /* last emitted length */
- int curlen; /* length of current code */
- int nextlen = tree[0].Len; /* length of next code */
- int count = 0; /* repeat count of the current code */
- int max_count = 7; /* max repeat count */
- int min_count = 4; /* min repeat count */
-
- /* tree[max_code+1].Len = -1; */ /* guard already set */
- if (nextlen == 0) max_count = 138, min_count = 3;
-
- for (n = 0; n <= max_code; n++) {
- curlen = nextlen; nextlen = tree[n+1].Len;
- if (++count < max_count && curlen == nextlen) {
- continue;
- } else if (count < min_count) {
- do { send_code(s, curlen, s->bl_tree); } while (--count != 0);
-
- } else if (curlen != 0) {
- if (curlen != prevlen) {
- send_code(s, curlen, s->bl_tree); count--;
- }
- Assert(count >= 3 && count <= 6, " 3_6?");
- send_code(s, REP_3_6, s->bl_tree); send_bits(s, count-3, 2);
-
- } else if (count <= 10) {
- send_code(s, REPZ_3_10, s->bl_tree); send_bits(s, count-3, 3);
-
- } else {
- send_code(s, REPZ_11_138, s->bl_tree); send_bits(s, count-11, 7);
- }
- count = 0; prevlen = curlen;
- if (nextlen == 0) {
- max_count = 138, min_count = 3;
- } else if (curlen == nextlen) {
- max_count = 6, min_count = 3;
- } else {
- max_count = 7, min_count = 4;
- }
- }
-}
-
-/* ===========================================================================
- * Construct the Huffman tree for the bit lengths and return the index in
- * bl_order of the last bit length code to send.
- */
-local int build_bl_tree(s)
- deflate_state *s;
-{
- int max_blindex; /* index of last bit length code of non zero freq */
-
- /* Determine the bit length frequencies for literal and distance trees */
- scan_tree(s, (ct_data *)s->dyn_ltree, s->l_desc.max_code);
- scan_tree(s, (ct_data *)s->dyn_dtree, s->d_desc.max_code);
-
- /* Build the bit length tree: */
- build_tree(s, (tree_desc *)(&(s->bl_desc)));
- /* opt_len now includes the length of the tree representations, except
- * the lengths of the bit lengths codes and the 5+5+4 bits for the counts.
- */
-
- /* Determine the number of bit length codes to send. The pkzip format
- * requires that at least 4 bit length codes be sent. (appnote.txt says
- * 3 but the actual value used is 4.)
- */
- for (max_blindex = BL_CODES-1; max_blindex >= 3; max_blindex--) {
- if (s->bl_tree[bl_order[max_blindex]].Len != 0) break;
- }
- /* Update opt_len to include the bit length tree and counts */
- s->opt_len += 3*(max_blindex+1) + 5+5+4;
- Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld",
- s->opt_len, s->static_len));
-
- return max_blindex;
-}
-
-/* ===========================================================================
- * Send the header for a block using dynamic Huffman trees: the counts, the
- * lengths of the bit length codes, the literal tree and the distance tree.
- * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4.
- */
-local void send_all_trees(s, lcodes, dcodes, blcodes)
- deflate_state *s;
- int lcodes, dcodes, blcodes; /* number of codes for each tree */
-{
- int rank; /* index in bl_order */
-
- Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes");
- Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES,
- "too many codes");
- Tracev((stderr, "\nbl counts: "));
- send_bits(s, lcodes-257, 5); /* not +255 as stated in appnote.txt */
- send_bits(s, dcodes-1, 5);
- send_bits(s, blcodes-4, 4); /* not -3 as stated in appnote.txt */
- for (rank = 0; rank < blcodes; rank++) {
- Tracev((stderr, "\nbl code %2d ", bl_order[rank]));
- send_bits(s, s->bl_tree[bl_order[rank]].Len, 3);
- }
- Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent));
-
- send_tree(s, (ct_data *)s->dyn_ltree, lcodes-1); /* literal tree */
- Tracev((stderr, "\nlit tree: sent %ld", s->bits_sent));
-
- send_tree(s, (ct_data *)s->dyn_dtree, dcodes-1); /* distance tree */
- Tracev((stderr, "\ndist tree: sent %ld", s->bits_sent));
-}
-
-/* ===========================================================================
- * Send a stored block
- */
-void _tr_stored_block(s, buf, stored_len, eof)
- deflate_state *s;
- charf *buf; /* input block */
- ulg stored_len; /* length of input block */
- int eof; /* true if this is the last block for a file */
-{
- send_bits(s, (STORED_BLOCK<<1)+eof, 3); /* send block type */
- s->compressed_len = (s->compressed_len + 3 + 7) & (ulg)~7L;
- s->compressed_len += (stored_len + 4) << 3;
-
- copy_block(s, buf, (unsigned)stored_len, 1); /* with header */
-}
-
-/* Send just the `stored block' type code without any length bytes or data.
- */
-void _tr_stored_type_only(s)
- deflate_state *s;
-{
- send_bits(s, (STORED_BLOCK << 1), 3);
- bi_windup(s);
- s->compressed_len = (s->compressed_len + 3) & ~7L;
-}
-
-
-/* ===========================================================================
- * Send one empty static block to give enough lookahead for inflate.
- * This takes 10 bits, of which 7 may remain in the bit buffer.
- * The current inflate code requires 9 bits of lookahead. If the
- * last two codes for the previous block (real code plus EOB) were coded
- * on 5 bits or less, inflate may have only 5+3 bits of lookahead to decode
- * the last real code. In this case we send two empty static blocks instead
- * of one. (There are no problems if the previous block is stored or fixed.)
- * To simplify the code, we assume the worst case of last real code encoded
- * on one bit only.
- */
-void _tr_align(s)
- deflate_state *s;
-{
- send_bits(s, STATIC_TREES<<1, 3);
- send_code(s, END_BLOCK, static_ltree);
- s->compressed_len += 10L; /* 3 for block type, 7 for EOB */
- bi_flush(s);
- /* Of the 10 bits for the empty block, we have already sent
- * (10 - bi_valid) bits. The lookahead for the last real code (before
- * the EOB of the previous block) was thus at least one plus the length
- * of the EOB plus what we have just sent of the empty static block.
- */
- if (1 + s->last_eob_len + 10 - s->bi_valid < 9) {
- send_bits(s, STATIC_TREES<<1, 3);
- send_code(s, END_BLOCK, static_ltree);
- s->compressed_len += 10L;
- bi_flush(s);
- }
- s->last_eob_len = 7;
-}
-
-/* ===========================================================================
- * Determine the best encoding for the current block: dynamic trees, static
- * trees or store, and output the encoded block to the zip file. This function
- * returns the total compressed length for the file so far.
- */
-ulg _tr_flush_block(s, buf, stored_len, eof)
- deflate_state *s;
- charf *buf; /* input block, or NULL if too old */
- ulg stored_len; /* length of input block */
- int eof; /* true if this is the last block for a file */
-{
- ulg opt_lenb, static_lenb; /* opt_len and static_len in bytes */
- int max_blindex = 0; /* index of last bit length code of non zero freq */
-
- /* Build the Huffman trees unless a stored block is forced */
- if (s->level > 0) {
-
- /* Check if the file is ascii or binary */
- if (s->data_type == Z_UNKNOWN) set_data_type(s);
-
- /* Construct the literal and distance trees */
- build_tree(s, (tree_desc *)(&(s->l_desc)));
- Tracev((stderr, "\nlit data: dyn %ld, stat %ld", s->opt_len,
- s->static_len));
-
- build_tree(s, (tree_desc *)(&(s->d_desc)));
- Tracev((stderr, "\ndist data: dyn %ld, stat %ld", s->opt_len,
- s->static_len));
- /* At this point, opt_len and static_len are the total bit lengths of
- * the compressed block data, excluding the tree representations.
- */
-
- /* Build the bit length tree for the above two trees, and get the index
- * in bl_order of the last bit length code to send.
- */
- max_blindex = build_bl_tree(s);
-
- /* Determine the best encoding. Compute first the block length in bytes*/
- opt_lenb = (s->opt_len+3+7)>>3;
- static_lenb = (s->static_len+3+7)>>3;
-
- Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ",
- opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len,
- s->last_lit));
-
- if (static_lenb <= opt_lenb) opt_lenb = static_lenb;
-
- } else {
- Assert(buf != (char*)0, "lost buf");
- opt_lenb = static_lenb = stored_len + 5; /* force a stored block */
- }
-
- /* If compression failed and this is the first and last block,
- * and if the .zip file can be seeked (to rewrite the local header),
- * the whole file is transformed into a stored file:
- */
-#ifdef STORED_FILE_OK
-# ifdef FORCE_STORED_FILE
- if (eof && s->compressed_len == 0L) { /* force stored file */
-# else
- if (stored_len <= opt_lenb && eof && s->compressed_len==0L && seekable()) {
-# endif
- /* Since LIT_BUFSIZE <= 2*WSIZE, the input data must be there: */
- if (buf == (charf*)0) error ("block vanished");
-
- copy_block(s, buf, (unsigned)stored_len, 0); /* without header */
- s->compressed_len = stored_len << 3;
- s->method = STORED;
- } else
-#endif /* STORED_FILE_OK */
-
-#ifdef FORCE_STORED
- if (buf != (char*)0) { /* force stored block */
-#else
- if (stored_len+4 <= opt_lenb && buf != (char*)0) {
- /* 4: two words for the lengths */
-#endif
- /* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE.
- * Otherwise we can't have processed more than WSIZE input bytes since
- * the last block flush, because compression would have been
- * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to
- * transform a block into a stored block.
- */
- _tr_stored_block(s, buf, stored_len, eof);
-
-#ifdef FORCE_STATIC
- } else if (static_lenb >= 0) { /* force static trees */
-#else
- } else if (static_lenb == opt_lenb) {
-#endif
- send_bits(s, (STATIC_TREES<<1)+eof, 3);
- compress_block(s, (ct_data *)static_ltree, (ct_data *)static_dtree);
- s->compressed_len += 3 + s->static_len;
- } else {
- send_bits(s, (DYN_TREES<<1)+eof, 3);
- send_all_trees(s, s->l_desc.max_code+1, s->d_desc.max_code+1,
- max_blindex+1);
- compress_block(s, (ct_data *)s->dyn_ltree, (ct_data *)s->dyn_dtree);
- s->compressed_len += 3 + s->opt_len;
- }
- Assert (s->compressed_len == s->bits_sent, "bad compressed size");
- init_block(s);
-
- if (eof) {
- bi_windup(s);
- s->compressed_len += 7; /* align on byte boundary */
- }
- Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3,
- s->compressed_len-7*eof));
-
- return s->compressed_len >> 3;
-}
-
-/* ===========================================================================
- * Save the match info and tally the frequency counts. Return true if
- * the current block must be flushed.
- */
-int _tr_tally (s, dist, lc)
- deflate_state *s;
- unsigned dist; /* distance of matched string */
- unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */
-{
- s->d_buf[s->last_lit] = (ush)dist;
- s->l_buf[s->last_lit++] = (uch)lc;
- if (dist == 0) {
- /* lc is the unmatched char */
- s->dyn_ltree[lc].Freq++;
- } else {
- s->matches++;
- /* Here, lc is the match length - MIN_MATCH */
- dist--; /* dist = match distance - 1 */
- Assert((ush)dist < (ush)MAX_DIST(s) &&
- (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) &&
- (ush)d_code(dist) < (ush)D_CODES, "_tr_tally: bad match");
-
- s->dyn_ltree[length_code[lc]+LITERALS+1].Freq++;
- s->dyn_dtree[d_code(dist)].Freq++;
- }
-
- /* Try to guess if it is profitable to stop the current block here */
- if (s->level > 2 && (s->last_lit & 0xfff) == 0) {
- /* Compute an upper bound for the compressed length */
- ulg out_length = (ulg)s->last_lit*8L;
- ulg in_length = (ulg)((long)s->strstart - s->block_start);
- int dcode;
- for (dcode = 0; dcode < D_CODES; dcode++) {
- out_length += (ulg)s->dyn_dtree[dcode].Freq *
- (5L+extra_dbits[dcode]);
- }
- out_length >>= 3;
- Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ",
- s->last_lit, in_length, out_length,
- 100L - out_length*100L/in_length));
- if (s->matches < s->last_lit/2 && out_length < in_length/2) return 1;
- }
- return (s->last_lit == s->lit_bufsize-1);
- /* We avoid equality with lit_bufsize because of wraparound at 64K
- * on 16 bit machines and because stored blocks are restricted to
- * 64K-1 bytes.
- */
-}
-
-/* ===========================================================================
- * Send the block data compressed using the given Huffman trees
- */
-local void compress_block(s, ltree, dtree)
- deflate_state *s;
- ct_data *ltree; /* literal tree */
- ct_data *dtree; /* distance tree */
-{
- unsigned dist; /* distance of matched string */
- int lc; /* match length or unmatched char (if dist == 0) */
- unsigned lx = 0; /* running index in l_buf */
- unsigned code; /* the code to send */
- int extra; /* number of extra bits to send */
-
- if (s->last_lit != 0) do {
- dist = s->d_buf[lx];
- lc = s->l_buf[lx++];
- if (dist == 0) {
- send_code(s, lc, ltree); /* send a literal byte */
- Tracecv(isgraph(lc), (stderr," '%c' ", lc));
- } else {
- /* Here, lc is the match length - MIN_MATCH */
- code = length_code[lc];
- send_code(s, code+LITERALS+1, ltree); /* send the length code */
- extra = extra_lbits[code];
- if (extra != 0) {
- lc -= base_length[code];
- send_bits(s, lc, extra); /* send the extra length bits */
- }
- dist--; /* dist is now the match distance - 1 */
- code = d_code(dist);
- Assert (code < D_CODES, "bad d_code");
-
- send_code(s, code, dtree); /* send the distance code */
- extra = extra_dbits[code];
- if (extra != 0) {
- dist -= base_dist[code];
- send_bits(s, dist, extra); /* send the extra distance bits */
- }
- } /* literal or match pair ? */
-
- /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */
- Assert(s->pending < s->lit_bufsize + 2*lx, "pendingBuf overflow");
-
- } while (lx < s->last_lit);
-
- send_code(s, END_BLOCK, ltree);
- s->last_eob_len = ltree[END_BLOCK].Len;
-}
-
-/* ===========================================================================
- * Set the data type to ASCII or BINARY, using a crude approximation:
- * binary if more than 20% of the bytes are <= 6 or >= 128, ascii otherwise.
- * IN assertion: the fields freq of dyn_ltree are set and the total of all
- * frequencies does not exceed 64K (to fit in an int on 16 bit machines).
- */
-local void set_data_type(s)
- deflate_state *s;
-{
- int n = 0;
- unsigned ascii_freq = 0;
- unsigned bin_freq = 0;
- while (n < 7) bin_freq += s->dyn_ltree[n++].Freq;
- while (n < 128) ascii_freq += s->dyn_ltree[n++].Freq;
- while (n < LITERALS) bin_freq += s->dyn_ltree[n++].Freq;
- s->data_type = (Byte)(bin_freq > (ascii_freq >> 2) ? Z_BINARY : Z_ASCII);
-}
-
-/* ===========================================================================
- * Reverse the first len bits of a code, using straightforward code (a faster
- * method would use a table)
- * IN assertion: 1 <= len <= 15
- */
-local unsigned bi_reverse(code, len)
- unsigned code; /* the value to invert */
- int len; /* its bit length */
-{
- register unsigned res = 0;
- do {
- res |= code & 1;
- code >>= 1, res <<= 1;
- } while (--len > 0);
- return res >> 1;
-}
-
-/* ===========================================================================
- * Flush the bit buffer, keeping at most 7 bits in it.
- */
-local void bi_flush(s)
- deflate_state *s;
-{
- if (s->bi_valid == 16) {
- put_short(s, s->bi_buf);
- s->bi_buf = 0;
- s->bi_valid = 0;
- } else if (s->bi_valid >= 8) {
- put_byte(s, (Byte)s->bi_buf);
- s->bi_buf >>= 8;
- s->bi_valid -= 8;
- }
-}
-
-/* ===========================================================================
- * Flush the bit buffer and align the output on a byte boundary
- */
-local void bi_windup(s)
- deflate_state *s;
-{
- if (s->bi_valid > 8) {
- put_short(s, s->bi_buf);
- } else if (s->bi_valid > 0) {
- put_byte(s, (Byte)s->bi_buf);
- }
- s->bi_buf = 0;
- s->bi_valid = 0;
-#ifdef DEBUG_ZLIB
- s->bits_sent = (s->bits_sent+7) & ~7;
-#endif
-}
-
-/* ===========================================================================
- * Copy a stored block, storing first the length and its
- * one's complement if requested.
- */
-local void copy_block(s, buf, len, header)
- deflate_state *s;
- charf *buf; /* the input data */
- unsigned len; /* its length */
- int header; /* true if block header must be written */
-{
- bi_windup(s); /* align on byte boundary */
- s->last_eob_len = 8; /* enough lookahead for inflate */
-
- if (header) {
- put_short(s, (ush)len);
- put_short(s, (ush)~len);
-#ifdef DEBUG_ZLIB
- s->bits_sent += 2*16;
-#endif
- }
-#ifdef DEBUG_ZLIB
- s->bits_sent += (ulg)len<<3;
-#endif
- /* bundle up the put_byte(s, *buf++) calls */
- zmemcpy(&s->pending_buf[s->pending], buf, len);
- s->pending += len;
-}
-/* --- trees.c */
-
-/* +++ inflate.c */
-/* inflate.c -- zlib interface to inflate modules
- * Copyright (C) 1995-1996 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* #include "zutil.h" */
-
-/* +++ infblock.h */
-/* infblock.h -- header to use infblock.c
- * Copyright (C) 1995-1996 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* WARNING: this file should *not* be used by applications. It is
- part of the implementation of the compression library and is
- subject to change. Applications should only use zlib.h.
- */
-
-struct inflate_blocks_state;
-typedef struct inflate_blocks_state FAR inflate_blocks_statef;
-
-extern inflate_blocks_statef * inflate_blocks_new OF((
- z_streamp z,
- check_func c, /* check function */
- uInt w)); /* window size */
-
-extern int inflate_blocks OF((
- inflate_blocks_statef *,
- z_streamp ,
- int)); /* initial return code */
-
-extern void inflate_blocks_reset OF((
- inflate_blocks_statef *,
- z_streamp ,
- uLongf *)); /* check value on output */
-
-extern int inflate_blocks_free OF((
- inflate_blocks_statef *,
- z_streamp ,
- uLongf *)); /* check value on output */
-
-extern void inflate_set_dictionary OF((
- inflate_blocks_statef *s,
- const Bytef *d, /* dictionary */
- uInt n)); /* dictionary length */
-
-extern int inflate_addhistory OF((
- inflate_blocks_statef *,
- z_streamp));
-
-extern int inflate_packet_flush OF((
- inflate_blocks_statef *));
-/* --- infblock.h */
-
-#ifndef NO_DUMMY_DECL
-struct inflate_blocks_state {int dummy;}; /* for buggy compilers */
-#endif
-
-/* inflate private state */
-struct internal_state {
-
- /* mode */
- enum {
- METHOD, /* waiting for method byte */
- FLAG, /* waiting for flag byte */
- DICT4, /* four dictionary check bytes to go */
- DICT3, /* three dictionary check bytes to go */
- DICT2, /* two dictionary check bytes to go */
- DICT1, /* one dictionary check byte to go */
- DICT0, /* waiting for inflateSetDictionary */
- BLOCKS, /* decompressing blocks */
- CHECK4, /* four check bytes to go */
- CHECK3, /* three check bytes to go */
- CHECK2, /* two check bytes to go */
- CHECK1, /* one check byte to go */
- DONE, /* finished check, done */
- BAD} /* got an error--stay here */
- mode; /* current inflate mode */
-
- /* mode dependent information */
- union {
- uInt method; /* if FLAGS, method byte */
- struct {
- uLong was; /* computed check value */
- uLong need; /* stream check value */
- } check; /* if CHECK, check values to compare */
- uInt marker; /* if BAD, inflateSync's marker bytes count */
- } sub; /* submode */
-
- /* mode independent information */
- int nowrap; /* flag for no wrapper */
- uInt wbits; /* log2(window size) (8..15, defaults to 15) */
- inflate_blocks_statef
- *blocks; /* current inflate_blocks state */
-
-};
-
-
-int inflateReset(z)
-z_streamp z;
-{
- uLong c;
-
- if (z == Z_NULL || z->state == Z_NULL)
- return Z_STREAM_ERROR;
- z->total_in = z->total_out = 0;
- z->msg = Z_NULL;
- z->state->mode = z->state->nowrap ? BLOCKS : METHOD;
- inflate_blocks_reset(z->state->blocks, z, &c);
- Trace((stderr, "inflate: reset\n"));
- return Z_OK;
-}
-
-
-int inflateEnd(z)
-z_streamp z;
-{
- uLong c;
-
- if (z == Z_NULL || z->state == Z_NULL || z->zfree == Z_NULL)
- return Z_STREAM_ERROR;
- if (z->state->blocks != Z_NULL)
- inflate_blocks_free(z->state->blocks, z, &c);
- ZFREE(z, z->state);
- z->state = Z_NULL;
- Trace((stderr, "inflate: end\n"));
- return Z_OK;
-}
-
-
-int inflateInit2_(z, w, version, stream_size)
-z_streamp z;
-int w;
-const char *version;
-int stream_size;
-{
- if (version == Z_NULL || version[0] != ZLIB_VERSION[0] ||
- stream_size != sizeof(z_stream))
- return Z_VERSION_ERROR;
-
- /* initialize state */
- if (z == Z_NULL)
- return Z_STREAM_ERROR;
- z->msg = Z_NULL;
-#ifndef NO_ZCFUNCS
- if (z->zalloc == Z_NULL)
- {
- z->zalloc = zcalloc;
- z->opaque = (voidpf)0;
- }
- if (z->zfree == Z_NULL) z->zfree = zcfree;
-#endif
- if ((z->state = (struct internal_state FAR *)
- ZALLOC(z,1,sizeof(struct internal_state))) == Z_NULL)
- return Z_MEM_ERROR;
- z->state->blocks = Z_NULL;
-
- /* handle undocumented nowrap option (no zlib header or check) */
- z->state->nowrap = 0;
- if (w < 0)
- {
- w = - w;
- z->state->nowrap = 1;
- }
-
- /* set window size */
- if (w < 8 || w > 15)
- {
- inflateEnd(z);
- return Z_STREAM_ERROR;
- }
- z->state->wbits = (uInt)w;
-
- /* create inflate_blocks state */
- if ((z->state->blocks =
- inflate_blocks_new(z, z->state->nowrap ? Z_NULL : adler32, (uInt)1 << w))
- == Z_NULL)
- {
- inflateEnd(z);
- return Z_MEM_ERROR;
- }
- Trace((stderr, "inflate: allocated\n"));
-
- /* reset state */
- inflateReset(z);
- return Z_OK;
-}
-
-
-int inflateInit_(z, version, stream_size)
-z_streamp z;
-const char *version;
-int stream_size;
-{
- return inflateInit2_(z, DEF_WBITS, version, stream_size);
-}
-
-
-#define NEEDBYTE {if(z->avail_in==0)goto empty;r=Z_OK;}
-#define NEXTBYTE (z->avail_in--,z->total_in++,*z->next_in++)
-
-int inflate(z, f)
-z_streamp z;
-int f;
-{
- int r;
- uInt b;
-
- if (z == Z_NULL || z->state == Z_NULL || z->next_in == Z_NULL || f < 0)
- return Z_STREAM_ERROR;
- r = Z_BUF_ERROR;
- while (1) switch (z->state->mode)
- {
- case METHOD:
- NEEDBYTE
- if (((z->state->sub.method = NEXTBYTE) & 0xf) != Z_DEFLATED)
- {
- z->state->mode = BAD;
- z->msg = (char*)"unknown compression method";
- z->state->sub.marker = 5; /* can't try inflateSync */
- break;
- }
- if ((z->state->sub.method >> 4) + 8 > z->state->wbits)
- {
- z->state->mode = BAD;
- z->msg = (char*)"invalid window size";
- z->state->sub.marker = 5; /* can't try inflateSync */
- break;
- }
- z->state->mode = FLAG;
- case FLAG:
- NEEDBYTE
- b = NEXTBYTE;
- if (((z->state->sub.method << 8) + b) % 31)
- {
- z->state->mode = BAD;
- z->msg = (char*)"incorrect header check";
- z->state->sub.marker = 5; /* can't try inflateSync */
- break;
- }
- Trace((stderr, "inflate: zlib header ok\n"));
- if (!(b & PRESET_DICT))
- {
- z->state->mode = BLOCKS;
- break;
- }
- z->state->mode = DICT4;
- case DICT4:
- NEEDBYTE
- z->state->sub.check.need = (uLong)NEXTBYTE << 24;
- z->state->mode = DICT3;
- case DICT3:
- NEEDBYTE
- z->state->sub.check.need += (uLong)NEXTBYTE << 16;
- z->state->mode = DICT2;
- case DICT2:
- NEEDBYTE
- z->state->sub.check.need += (uLong)NEXTBYTE << 8;
- z->state->mode = DICT1;
- case DICT1:
- NEEDBYTE
- z->state->sub.check.need += (uLong)NEXTBYTE;
- z->adler = z->state->sub.check.need;
- z->state->mode = DICT0;
- return Z_NEED_DICT;
- case DICT0:
- z->state->mode = BAD;
- z->msg = (char*)"need dictionary";
- z->state->sub.marker = 0; /* can try inflateSync */
- return Z_STREAM_ERROR;
- case BLOCKS:
- r = inflate_blocks(z->state->blocks, z, r);
- if (f == Z_PACKET_FLUSH && z->avail_in == 0 && z->avail_out != 0)
- r = inflate_packet_flush(z->state->blocks);
- if (r == Z_DATA_ERROR)
- {
- z->state->mode = BAD;
- z->state->sub.marker = 0; /* can try inflateSync */
- break;
- }
- if (r != Z_STREAM_END)
- return r;
- r = Z_OK;
- inflate_blocks_reset(z->state->blocks, z, &z->state->sub.check.was);
- if (z->state->nowrap)
- {
- z->state->mode = DONE;
- break;
- }
- z->state->mode = CHECK4;
- case CHECK4:
- NEEDBYTE
- z->state->sub.check.need = (uLong)NEXTBYTE << 24;
- z->state->mode = CHECK3;
- case CHECK3:
- NEEDBYTE
- z->state->sub.check.need += (uLong)NEXTBYTE << 16;
- z->state->mode = CHECK2;
- case CHECK2:
- NEEDBYTE
- z->state->sub.check.need += (uLong)NEXTBYTE << 8;
- z->state->mode = CHECK1;
- case CHECK1:
- NEEDBYTE
- z->state->sub.check.need += (uLong)NEXTBYTE;
-
- if (z->state->sub.check.was != z->state->sub.check.need)
- {
- z->state->mode = BAD;
- z->msg = (char*)"incorrect data check";
- z->state->sub.marker = 5; /* can't try inflateSync */
- break;
- }
- Trace((stderr, "inflate: zlib check ok\n"));
- z->state->mode = DONE;
- case DONE:
- return Z_STREAM_END;
- case BAD:
- return Z_DATA_ERROR;
- default:
- return Z_STREAM_ERROR;
- }
-
- empty:
- if (f != Z_PACKET_FLUSH)
- return r;
- z->state->mode = BAD;
- z->msg = (char *)"need more for packet flush";
- z->state->sub.marker = 0; /* can try inflateSync */
- return Z_DATA_ERROR;
-}
-
-
-int inflateSetDictionary(z, dictionary, dictLength)
-z_streamp z;
-const Bytef *dictionary;
-uInt dictLength;
-{
- uInt length = dictLength;
-
- if (z == Z_NULL || z->state == Z_NULL || z->state->mode != DICT0)
- return Z_STREAM_ERROR;
-
- if (adler32(1L, dictionary, dictLength) != z->adler) return Z_DATA_ERROR;
- z->adler = 1L;
-
- if (length >= ((uInt)1<<z->state->wbits))
- {
- length = (1<<z->state->wbits)-1;
- dictionary += dictLength - length;
- }
- inflate_set_dictionary(z->state->blocks, dictionary, length);
- z->state->mode = BLOCKS;
- return Z_OK;
-}
-
-/*
- * This subroutine adds the data at next_in/avail_in to the output history
- * without performing any output. The output buffer must be "caught up";
- * i.e. no pending output (hence s->read equals s->write), and the state must
- * be BLOCKS (i.e. we should be willing to see the start of a series of
- * BLOCKS). On exit, the output will also be caught up, and the checksum
- * will have been updated if need be.
- */
-
-int inflateIncomp(z)
-z_stream *z;
-{
- if (z->state->mode != BLOCKS)
- return Z_DATA_ERROR;
- return inflate_addhistory(z->state->blocks, z);
-}
-
-
-int inflateSync(z)
-z_streamp z;
-{
- uInt n; /* number of bytes to look at */
- Bytef *p; /* pointer to bytes */
- uInt m; /* number of marker bytes found in a row */
- uLong r, w; /* temporaries to save total_in and total_out */
-
- /* set up */
- if (z == Z_NULL || z->state == Z_NULL)
- return Z_STREAM_ERROR;
- if (z->state->mode != BAD)
- {
- z->state->mode = BAD;
- z->state->sub.marker = 0;
- }
- if ((n = z->avail_in) == 0)
- return Z_BUF_ERROR;
- p = z->next_in;
- m = z->state->sub.marker;
-
- /* search */
- while (n && m < 4)
- {
- if (*p == (Byte)(m < 2 ? 0 : 0xff))
- m++;
- else if (*p)
- m = 0;
- else
- m = 4 - m;
- p++, n--;
- }
-
- /* restore */
- z->total_in += p - z->next_in;
- z->next_in = p;
- z->avail_in = n;
- z->state->sub.marker = m;
-
- /* return no joy or set up to restart on a new block */
- if (m != 4)
- return Z_DATA_ERROR;
- r = z->total_in; w = z->total_out;
- inflateReset(z);
- z->total_in = r; z->total_out = w;
- z->state->mode = BLOCKS;
- return Z_OK;
-}
-
-#undef NEEDBYTE
-#undef NEXTBYTE
-/* --- inflate.c */
-
-/* +++ infblock.c */
-/* infblock.c -- interpret and process block types to last block
- * Copyright (C) 1995-1996 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* #include "zutil.h" */
-/* #include "infblock.h" */
-
-/* +++ inftrees.h */
-/* inftrees.h -- header to use inftrees.c
- * Copyright (C) 1995-1996 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* WARNING: this file should *not* be used by applications. It is
- part of the implementation of the compression library and is
- subject to change. Applications should only use zlib.h.
- */
-
-/* Huffman code lookup table entry--this entry is four bytes for machines
- that have 16-bit pointers (e.g. PC's in the small or medium model). */
-
-typedef struct inflate_huft_s FAR inflate_huft;
-
-struct inflate_huft_s {
- union {
- struct {
- Byte Exop; /* number of extra bits or operation */
- Byte Bits; /* number of bits in this code or subcode */
- } what;
- Bytef *pad; /* pad structure to a power of 2 (4 bytes for */
- } word; /* 16-bit, 8 bytes for 32-bit machines) */
- union {
- uInt Base; /* literal, length base, or distance base */
- inflate_huft *Next; /* pointer to next level of table */
- } more;
-};
-
-#ifdef DEBUG_ZLIB
- extern uInt inflate_hufts;
-#endif
-
-extern int inflate_trees_bits OF((
- uIntf *, /* 19 code lengths */
- uIntf *, /* bits tree desired/actual depth */
- inflate_huft * FAR *, /* bits tree result */
- z_streamp )); /* for zalloc, zfree functions */
-
-extern int inflate_trees_dynamic OF((
- uInt, /* number of literal/length codes */
- uInt, /* number of distance codes */
- uIntf *, /* that many (total) code lengths */
- uIntf *, /* literal desired/actual bit depth */
- uIntf *, /* distance desired/actual bit depth */
- inflate_huft * FAR *, /* literal/length tree result */
- inflate_huft * FAR *, /* distance tree result */
- z_streamp )); /* for zalloc, zfree functions */
-
-extern int inflate_trees_fixed OF((
- uIntf *, /* literal desired/actual bit depth */
- uIntf *, /* distance desired/actual bit depth */
- inflate_huft * FAR *, /* literal/length tree result */
- inflate_huft * FAR *)); /* distance tree result */
-
-extern int inflate_trees_free OF((
- inflate_huft *, /* tables to free */
- z_streamp )); /* for zfree function */
-
-/* --- inftrees.h */
-
-/* +++ infcodes.h */
-/* infcodes.h -- header to use infcodes.c
- * Copyright (C) 1995-1996 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* WARNING: this file should *not* be used by applications. It is
- part of the implementation of the compression library and is
- subject to change. Applications should only use zlib.h.
- */
-
-struct inflate_codes_state;
-typedef struct inflate_codes_state FAR inflate_codes_statef;
-
-extern inflate_codes_statef *inflate_codes_new OF((
- uInt, uInt,
- inflate_huft *, inflate_huft *,
- z_streamp ));
-
-extern int inflate_codes OF((
- inflate_blocks_statef *,
- z_streamp ,
- int));
-
-extern void inflate_codes_free OF((
- inflate_codes_statef *,
- z_streamp ));
-
-/* --- infcodes.h */
-
-/* +++ infutil.h */
-/* infutil.h -- types and macros common to blocks and codes
- * Copyright (C) 1995-1996 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* WARNING: this file should *not* be used by applications. It is
- part of the implementation of the compression library and is
- subject to change. Applications should only use zlib.h.
- */
-
-#ifndef _INFUTIL_H
-#define _INFUTIL_H
-
-typedef enum {
- TYPE, /* get type bits (3, including end bit) */
- LENS, /* get lengths for stored */
- STORED, /* processing stored block */
- TABLE, /* get table lengths */
- BTREE, /* get bit lengths tree for a dynamic block */
- DTREE, /* get length, distance trees for a dynamic block */
- CODES, /* processing fixed or dynamic block */
- DRY, /* output remaining window bytes */
- DONEB, /* finished last block, done */
- BADB} /* got a data error--stuck here */
-inflate_block_mode;
-
-/* inflate blocks semi-private state */
-struct inflate_blocks_state {
-
- /* mode */
- inflate_block_mode mode; /* current inflate_block mode */
-
- /* mode dependent information */
- union {
- uInt left; /* if STORED, bytes left to copy */
- struct {
- uInt table; /* table lengths (14 bits) */
- uInt index; /* index into blens (or border) */
- uIntf *blens; /* bit lengths of codes */
- uInt bb; /* bit length tree depth */
- inflate_huft *tb; /* bit length decoding tree */
- } trees; /* if DTREE, decoding info for trees */
- struct {
- inflate_huft *tl;
- inflate_huft *td; /* trees to free */
- inflate_codes_statef
- *codes;
- } decode; /* if CODES, current state */
- } sub; /* submode */
- uInt last; /* true if this block is the last block */
-
- /* mode independent information */
- uInt bitk; /* bits in bit buffer */
- uLong bitb; /* bit buffer */
- Bytef *window; /* sliding window */
- Bytef *end; /* one byte after sliding window */
- Bytef *read; /* window read pointer */
- Bytef *write; /* window write pointer */
- check_func checkfn; /* check function */
- uLong check; /* check on output */
-
-};
-
-
-/* defines for inflate input/output */
-/* update pointers and return */
-#define UPDBITS {s->bitb=b;s->bitk=k;}
-#define UPDIN {z->avail_in=n;z->total_in+=p-z->next_in;z->next_in=p;}
-#define UPDOUT {s->write=q;}
-#define UPDATE {UPDBITS UPDIN UPDOUT}
-#define LEAVE {UPDATE return inflate_flush(s,z,r);}
-/* get bytes and bits */
-#define LOADIN {p=z->next_in;n=z->avail_in;b=s->bitb;k=s->bitk;}
-#define NEEDBYTE {if(n)r=Z_OK;else LEAVE}
-#define NEXTBYTE (n--,*p++)
-#define NEEDBITS(j) {while(k<(j)){NEEDBYTE;b|=((uLong)NEXTBYTE)<<k;k+=8;}}
-#define DUMPBITS(j) {b>>=(j);k-=(j);}
-/* output bytes */
-#define WAVAIL (uInt)(q<s->read?s->read-q-1:s->end-q)
-#define LOADOUT {q=s->write;m=(uInt)WAVAIL;}
-#define WWRAP {if(q==s->end&&s->read!=s->window){q=s->window;m=(uInt)WAVAIL;}}
-#define FLUSH {UPDOUT r=inflate_flush(s,z,r); LOADOUT}
-#define NEEDOUT {if(m==0){WWRAP if(m==0){FLUSH WWRAP if(m==0) LEAVE}}r=Z_OK;}
-#define OUTBYTE(a) {*q++=(Byte)(a);m--;}
-/* load local pointers */
-#define LOAD {LOADIN LOADOUT}
-
-/* masks for lower bits (size given to avoid silly warnings with Visual C++) */
-extern uInt inflate_mask[17];
-
-/* copy as much as possible from the sliding window to the output area */
-extern int inflate_flush OF((
- inflate_blocks_statef *,
- z_streamp ,
- int));
-
-#ifndef NO_DUMMY_DECL
-struct internal_state {int dummy;}; /* for buggy compilers */
-#endif
-
-#endif
-/* --- infutil.h */
-
-#ifndef NO_DUMMY_DECL
-struct inflate_codes_state {int dummy;}; /* for buggy compilers */
-#endif
-
-/* Table for deflate from PKZIP's appnote.txt. */
-local const uInt border[] = { /* Order of the bit length code lengths */
- 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
-
-/*
- Notes beyond the 1.93a appnote.txt:
-
- 1. Distance pointers never point before the beginning of the output
- stream.
- 2. Distance pointers can point back across blocks, up to 32k away.
- 3. There is an implied maximum of 7 bits for the bit length table and
- 15 bits for the actual data.
- 4. If only one code exists, then it is encoded using one bit. (Zero
- would be more efficient, but perhaps a little confusing.) If two
- codes exist, they are coded using one bit each (0 and 1).
- 5. There is no way of sending zero distance codes--a dummy must be
- sent if there are none. (History: a pre 2.0 version of PKZIP would
- store blocks with no distance codes, but this was discovered to be
- too harsh a criterion.) Valid only for 1.93a. 2.04c does allow
- zero distance codes, which is sent as one code of zero bits in
- length.
- 6. There are up to 286 literal/length codes. Code 256 represents the
- end-of-block. Note however that the static length tree defines
- 288 codes just to fill out the Huffman codes. Codes 286 and 287
- cannot be used though, since there is no length base or extra bits
- defined for them. Similarily, there are up to 30 distance codes.
- However, static trees define 32 codes (all 5 bits) to fill out the
- Huffman codes, but the last two had better not show up in the data.
- 7. Unzip can check dynamic Huffman blocks for complete code sets.
- The exception is that a single code would not be complete (see #4).
- 8. The five bits following the block type is really the number of
- literal codes sent minus 257.
- 9. Length codes 8,16,16 are interpreted as 13 length codes of 8 bits
- (1+6+6). Therefore, to output three times the length, you output
- three codes (1+1+1), whereas to output four times the same length,
- you only need two codes (1+3). Hmm.
- 10. In the tree reconstruction algorithm, Code = Code + Increment
- only if BitLength(i) is not zero. (Pretty obvious.)
- 11. Correction: 4 Bits: # of Bit Length codes - 4 (4 - 19)
- 12. Note: length code 284 can represent 227-258, but length code 285
- really is 258. The last length deserves its own, short code
- since it gets used a lot in very redundant files. The length
- 258 is special since 258 - 3 (the min match length) is 255.
- 13. The literal/length and distance code bit lengths are read as a
- single stream of lengths. It is possible (and advantageous) for
- a repeat code (16, 17, or 18) to go across the boundary between
- the two sets of lengths.
- */
-
-
-void inflate_blocks_reset(s, z, c)
-inflate_blocks_statef *s;
-z_streamp z;
-uLongf *c;
-{
- if (s->checkfn != Z_NULL)
- *c = s->check;
- if (s->mode == BTREE || s->mode == DTREE)
- ZFREE(z, s->sub.trees.blens);
- if (s->mode == CODES)
- {
- inflate_codes_free(s->sub.decode.codes, z);
- inflate_trees_free(s->sub.decode.td, z);
- inflate_trees_free(s->sub.decode.tl, z);
- }
- s->mode = TYPE;
- s->bitk = 0;
- s->bitb = 0;
- s->read = s->write = s->window;
- if (s->checkfn != Z_NULL)
- z->adler = s->check = (*s->checkfn)(0L, Z_NULL, 0);
- Trace((stderr, "inflate: blocks reset\n"));
-}
-
-
-inflate_blocks_statef *inflate_blocks_new(z, c, w)
-z_streamp z;
-check_func c;
-uInt w;
-{
- inflate_blocks_statef *s;
-
- if ((s = (inflate_blocks_statef *)ZALLOC
- (z,1,sizeof(struct inflate_blocks_state))) == Z_NULL)
- return s;
- if ((s->window = (Bytef *)ZALLOC(z, 1, w)) == Z_NULL)
- {
- ZFREE(z, s);
- return Z_NULL;
- }
- s->end = s->window + w;
- s->checkfn = c;
- s->mode = TYPE;
- Trace((stderr, "inflate: blocks allocated\n"));
- inflate_blocks_reset(s, z, &s->check);
- return s;
-}
-
-
-#ifdef DEBUG_ZLIB
- extern uInt inflate_hufts;
-#endif
-int inflate_blocks(s, z, r)
-inflate_blocks_statef *s;
-z_streamp z;
-int r;
-{
- uInt t; /* temporary storage */
- uLong b; /* bit buffer */
- uInt k; /* bits in bit buffer */
- Bytef *p; /* input data pointer */
- uInt n; /* bytes available there */
- Bytef *q; /* output window write pointer */
- uInt m; /* bytes to end of window or read pointer */
-
- /* copy input/output information to locals (UPDATE macro restores) */
- LOAD
-
- /* process input based on current state */
- while (1) switch (s->mode)
- {
- case TYPE:
- NEEDBITS(3)
- t = (uInt)b & 7;
- s->last = t & 1;
- switch (t >> 1)
- {
- case 0: /* stored */
- Trace((stderr, "inflate: stored block%s\n",
- s->last ? " (last)" : ""));
- DUMPBITS(3)
- t = k & 7; /* go to byte boundary */
- DUMPBITS(t)
- s->mode = LENS; /* get length of stored block */
- break;
- case 1: /* fixed */
- Trace((stderr, "inflate: fixed codes block%s\n",
- s->last ? " (last)" : ""));
- {
- uInt bl, bd;
- inflate_huft *tl, *td;
-
- inflate_trees_fixed(&bl, &bd, &tl, &td);
- s->sub.decode.codes = inflate_codes_new(bl, bd, tl, td, z);
- if (s->sub.decode.codes == Z_NULL)
- {
- r = Z_MEM_ERROR;
- LEAVE
- }
- s->sub.decode.tl = Z_NULL; /* don't try to free these */
- s->sub.decode.td = Z_NULL;
- }
- DUMPBITS(3)
- s->mode = CODES;
- break;
- case 2: /* dynamic */
- Trace((stderr, "inflate: dynamic codes block%s\n",
- s->last ? " (last)" : ""));
- DUMPBITS(3)
- s->mode = TABLE;
- break;
- case 3: /* illegal */
- DUMPBITS(3)
- s->mode = BADB;
- z->msg = (char*)"invalid block type";
- r = Z_DATA_ERROR;
- LEAVE
- }
- break;
- case LENS:
- NEEDBITS(32)
- if ((((~b) >> 16) & 0xffff) != (b & 0xffff))
- {
- s->mode = BADB;
- z->msg = (char*)"invalid stored block lengths";
- r = Z_DATA_ERROR;
- LEAVE
- }
- s->sub.left = (uInt)b & 0xffff;
- b = k = 0; /* dump bits */
- Tracev((stderr, "inflate: stored length %u\n", s->sub.left));
- s->mode = s->sub.left ? STORED : (s->last ? DRY : TYPE);
- break;
- case STORED:
- if (n == 0)
- LEAVE
- NEEDOUT
- t = s->sub.left;
- if (t > n) t = n;
- if (t > m) t = m;
- zmemcpy(q, p, t);
- p += t; n -= t;
- q += t; m -= t;
- if ((s->sub.left -= t) != 0)
- break;
- Tracev((stderr, "inflate: stored end, %lu total out\n",
- z->total_out + (q >= s->read ? q - s->read :
- (s->end - s->read) + (q - s->window))));
- s->mode = s->last ? DRY : TYPE;
- break;
- case TABLE:
- NEEDBITS(14)
- s->sub.trees.table = t = (uInt)b & 0x3fff;
-#ifndef PKZIP_BUG_WORKAROUND
- if ((t & 0x1f) > 29 || ((t >> 5) & 0x1f) > 29)
- {
- s->mode = BADB;
- z->msg = (char*)"too many length or distance symbols";
- r = Z_DATA_ERROR;
- LEAVE
- }
-#endif
- t = 258 + (t & 0x1f) + ((t >> 5) & 0x1f);
- if (t < 19)
- t = 19;
- if ((s->sub.trees.blens = (uIntf*)ZALLOC(z, t, sizeof(uInt))) == Z_NULL)
- {
- r = Z_MEM_ERROR;
- LEAVE
- }
- DUMPBITS(14)
- s->sub.trees.index = 0;
- Tracev((stderr, "inflate: table sizes ok\n"));
- s->mode = BTREE;
- case BTREE:
- while (s->sub.trees.index < 4 + (s->sub.trees.table >> 10))
- {
- NEEDBITS(3)
- s->sub.trees.blens[border[s->sub.trees.index++]] = (uInt)b & 7;
- DUMPBITS(3)
- }
- while (s->sub.trees.index < 19)
- s->sub.trees.blens[border[s->sub.trees.index++]] = 0;
- s->sub.trees.bb = 7;
- t = inflate_trees_bits(s->sub.trees.blens, &s->sub.trees.bb,
- &s->sub.trees.tb, z);
- if (t != Z_OK)
- {
- ZFREE(z, s->sub.trees.blens);
- r = t;
- if (r == Z_DATA_ERROR)
- s->mode = BADB;
- LEAVE
- }
- s->sub.trees.index = 0;
- Tracev((stderr, "inflate: bits tree ok\n"));
- s->mode = DTREE;
- case DTREE:
- while (t = s->sub.trees.table,
- s->sub.trees.index < 258 + (t & 0x1f) + ((t >> 5) & 0x1f))
- {
- inflate_huft *h;
- uInt i, j, c;
-
- t = s->sub.trees.bb;
- NEEDBITS(t)
- h = s->sub.trees.tb + ((uInt)b & inflate_mask[t]);
- t = h->word.what.Bits;
- c = h->more.Base;
- if (c < 16)
- {
- DUMPBITS(t)
- s->sub.trees.blens[s->sub.trees.index++] = c;
- }
- else /* c == 16..18 */
- {
- i = c == 18 ? 7 : c - 14;
- j = c == 18 ? 11 : 3;
- NEEDBITS(t + i)
- DUMPBITS(t)
- j += (uInt)b & inflate_mask[i];
- DUMPBITS(i)
- i = s->sub.trees.index;
- t = s->sub.trees.table;
- if (i + j > 258 + (t & 0x1f) + ((t >> 5) & 0x1f) ||
- (c == 16 && i < 1))
- {
- inflate_trees_free(s->sub.trees.tb, z);
- ZFREE(z, s->sub.trees.blens);
- s->mode = BADB;
- z->msg = (char*)"invalid bit length repeat";
- r = Z_DATA_ERROR;
- LEAVE
- }
- c = c == 16 ? s->sub.trees.blens[i - 1] : 0;
- do {
- s->sub.trees.blens[i++] = c;
- } while (--j);
- s->sub.trees.index = i;
- }
- }
- inflate_trees_free(s->sub.trees.tb, z);
- s->sub.trees.tb = Z_NULL;
- {
- uInt bl, bd;
- inflate_huft *tl, *td;
- inflate_codes_statef *c;
-
- bl = 9; /* must be <= 9 for lookahead assumptions */
- bd = 6; /* must be <= 9 for lookahead assumptions */
- t = s->sub.trees.table;
-#ifdef DEBUG_ZLIB
- inflate_hufts = 0;
-#endif
- t = inflate_trees_dynamic(257 + (t & 0x1f), 1 + ((t >> 5) & 0x1f),
- s->sub.trees.blens, &bl, &bd, &tl, &td, z);
- ZFREE(z, s->sub.trees.blens);
- if (t != Z_OK)
- {
- if (t == (uInt)Z_DATA_ERROR)
- s->mode = BADB;
- r = t;
- LEAVE
- }
- Tracev((stderr, "inflate: trees ok, %d * %d bytes used\n",
- inflate_hufts, sizeof(inflate_huft)));
- if ((c = inflate_codes_new(bl, bd, tl, td, z)) == Z_NULL)
- {
- inflate_trees_free(td, z);
- inflate_trees_free(tl, z);
- r = Z_MEM_ERROR;
- LEAVE
- }
- s->sub.decode.codes = c;
- s->sub.decode.tl = tl;
- s->sub.decode.td = td;
- }
- s->mode = CODES;
- case CODES:
- UPDATE
- if ((r = inflate_codes(s, z, r)) != Z_STREAM_END)
- return inflate_flush(s, z, r);
- r = Z_OK;
- inflate_codes_free(s->sub.decode.codes, z);
- inflate_trees_free(s->sub.decode.td, z);
- inflate_trees_free(s->sub.decode.tl, z);
- LOAD
- Tracev((stderr, "inflate: codes end, %lu total out\n",
- z->total_out + (q >= s->read ? q - s->read :
- (s->end - s->read) + (q - s->window))));
- if (!s->last)
- {
- s->mode = TYPE;
- break;
- }
- if (k > 7) /* return unused byte, if any */
- {
- Assert(k < 16, "inflate_codes grabbed too many bytes")
- k -= 8;
- n++;
- p--; /* can always return one */
- }
- s->mode = DRY;
- case DRY:
- FLUSH
- if (s->read != s->write)
- LEAVE
- s->mode = DONEB;
- case DONEB:
- r = Z_STREAM_END;
- LEAVE
- case BADB:
- r = Z_DATA_ERROR;
- LEAVE
- default:
- r = Z_STREAM_ERROR;
- LEAVE
- }
-}
-
-
-int inflate_blocks_free(s, z, c)
-inflate_blocks_statef *s;
-z_streamp z;
-uLongf *c;
-{
- inflate_blocks_reset(s, z, c);
- ZFREE(z, s->window);
- ZFREE(z, s);
- Trace((stderr, "inflate: blocks freed\n"));
- return Z_OK;
-}
-
-
-void inflate_set_dictionary(s, d, n)
-inflate_blocks_statef *s;
-const Bytef *d;
-uInt n;
-{
- zmemcpy((charf *)s->window, d, n);
- s->read = s->write = s->window + n;
-}
-
-/*
- * This subroutine adds the data at next_in/avail_in to the output history
- * without performing any output. The output buffer must be "caught up";
- * i.e. no pending output (hence s->read equals s->write), and the state must
- * be BLOCKS (i.e. we should be willing to see the start of a series of
- * BLOCKS). On exit, the output will also be caught up, and the checksum
- * will have been updated if need be.
- */
-int inflate_addhistory(s, z)
-inflate_blocks_statef *s;
-z_stream *z;
-{
- uLong b; /* bit buffer */ /* NOT USED HERE */
- uInt k; /* bits in bit buffer */ /* NOT USED HERE */
- uInt t; /* temporary storage */
- Bytef *p; /* input data pointer */
- uInt n; /* bytes available there */
- Bytef *q; /* output window write pointer */
- uInt m; /* bytes to end of window or read pointer */
-
- if (s->read != s->write)
- return Z_STREAM_ERROR;
- if (s->mode != TYPE)
- return Z_DATA_ERROR;
-
- /* we're ready to rock */
- LOAD
- /* while there is input ready, copy to output buffer, moving
- * pointers as needed.
- */
- while (n) {
- t = n; /* how many to do */
- /* is there room until end of buffer? */
- if (t > m) t = m;
- /* update check information */
- if (s->checkfn != Z_NULL)
- s->check = (*s->checkfn)(s->check, q, t);
- zmemcpy(q, p, t);
- q += t;
- p += t;
- n -= t;
- z->total_out += t;
- s->read = q; /* drag read pointer forward */
-/* WWRAP */ /* expand WWRAP macro by hand to handle s->read */
- if (q == s->end) {
- s->read = q = s->window;
- m = WAVAIL;
- }
- }
- UPDATE
- return Z_OK;
-}
-
-
-/*
- * At the end of a Deflate-compressed PPP packet, we expect to have seen
- * a `stored' block type value but not the (zero) length bytes.
- */
-int inflate_packet_flush(s)
- inflate_blocks_statef *s;
-{
- if (s->mode != LENS)
- return Z_DATA_ERROR;
- s->mode = TYPE;
- return Z_OK;
-}
-/* --- infblock.c */
-
-/* +++ inftrees.c */
-/* inftrees.c -- generate Huffman trees for efficient decoding
- * Copyright (C) 1995-1996 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* #include "zutil.h" */
-/* #include "inftrees.h" */
-
-char inflate_copyright[] = " inflate 1.0.4 Copyright 1995-1996 Mark Adler ";
-/*
- If you use the zlib library in a product, an acknowledgment is welcome
- in the documentation of your product. If for some reason you cannot
- include such an acknowledgment, I would appreciate that you keep this
- copyright string in the executable of your product.
- */
-
-#ifndef NO_DUMMY_DECL
-struct internal_state {int dummy;}; /* for buggy compilers */
-#endif
-
-/* simplify the use of the inflate_huft type with some defines */
-#define base more.Base
-#define next more.Next
-#define exop word.what.Exop
-#define bits word.what.Bits
-
-
-local int huft_build OF((
- uIntf *, /* code lengths in bits */
- uInt, /* number of codes */
- uInt, /* number of "simple" codes */
- const uIntf *, /* list of base values for non-simple codes */
- const uIntf *, /* list of extra bits for non-simple codes */
- inflate_huft * FAR*,/* result: starting table */
- uIntf *, /* maximum lookup bits (returns actual) */
- z_streamp )); /* for zalloc function */
-
-local voidpf falloc OF((
- voidpf, /* opaque pointer (not used) */
- uInt, /* number of items */
- uInt)); /* size of item */
-
-/* Tables for deflate from PKZIP's appnote.txt. */
-local const uInt cplens[31] = { /* Copy lengths for literal codes 257..285 */
- 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
- 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
- /* see note #13 above about 258 */
-local const uInt cplext[31] = { /* Extra bits for literal codes 257..285 */
- 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
- 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 112, 112}; /* 112==invalid */
-local const uInt cpdist[30] = { /* Copy offsets for distance codes 0..29 */
- 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
- 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
- 8193, 12289, 16385, 24577};
-local const uInt cpdext[30] = { /* Extra bits for distance codes */
- 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
- 7, 7, 8, 8, 9, 9, 10, 10, 11, 11,
- 12, 12, 13, 13};
-
-/*
- Huffman code decoding is performed using a multi-level table lookup.
- The fastest way to decode is to simply build a lookup table whose
- size is determined by the longest code. However, the time it takes
- to build this table can also be a factor if the data being decoded
- is not very long. The most common codes are necessarily the
- shortest codes, so those codes dominate the decoding time, and hence
- the speed. The idea is you can have a shorter table that decodes the
- shorter, more probable codes, and then point to subsidiary tables for
- the longer codes. The time it costs to decode the longer codes is
- then traded against the time it takes to make longer tables.
-
- This results of this trade are in the variables lbits and dbits
- below. lbits is the number of bits the first level table for literal/
- length codes can decode in one step, and dbits is the same thing for
- the distance codes. Subsequent tables are also less than or equal to
- those sizes. These values may be adjusted either when all of the
- codes are shorter than that, in which case the longest code length in
- bits is used, or when the shortest code is *longer* than the requested
- table size, in which case the length of the shortest code in bits is
- used.
-
- There are two different values for the two tables, since they code a
- different number of possibilities each. The literal/length table
- codes 286 possible values, or in a flat code, a little over eight
- bits. The distance table codes 30 possible values, or a little less
- than five bits, flat. The optimum values for speed end up being
- about one bit more than those, so lbits is 8+1 and dbits is 5+1.
- The optimum values may differ though from machine to machine, and
- possibly even between compilers. Your mileage may vary.
- */
-
-
-/* If BMAX needs to be larger than 16, then h and x[] should be uLong. */
-#define BMAX 15 /* maximum bit length of any code */
-#define N_MAX 288 /* maximum number of codes in any set */
-
-#ifdef DEBUG_ZLIB
- uInt inflate_hufts;
-#endif
-
-local int huft_build(b, n, s, d, e, t, m, zs)
-uIntf *b; /* code lengths in bits (all assumed <= BMAX) */
-uInt n; /* number of codes (assumed <= N_MAX) */
-uInt s; /* number of simple-valued codes (0..s-1) */
-const uIntf *d; /* list of base values for non-simple codes */
-const uIntf *e; /* list of extra bits for non-simple codes */
-inflate_huft * FAR *t; /* result: starting table */
-uIntf *m; /* maximum lookup bits, returns actual */
-z_streamp zs; /* for zalloc function */
-/* Given a list of code lengths and a maximum table size, make a set of
- tables to decode that set of codes. Return Z_OK on success, Z_BUF_ERROR
- if the given code set is incomplete (the tables are still built in this
- case), Z_DATA_ERROR if the input is invalid (an over-subscribed set of
- lengths), or Z_MEM_ERROR if not enough memory. */
-{
-
- uInt a; /* counter for codes of length k */
- uInt c[BMAX+1]; /* bit length count table */
- uInt f; /* i repeats in table every f entries */
- int g; /* maximum code length */
- int h; /* table level */
- register uInt i; /* counter, current code */
- register uInt j; /* counter */
- register int k; /* number of bits in current code */
- int l; /* bits per table (returned in m) */
- register uIntf *p; /* pointer into c[], b[], or v[] */
- inflate_huft *q; /* points to current table */
- struct inflate_huft_s r; /* table entry for structure assignment */
- inflate_huft *u[BMAX]; /* table stack */
- uInt v[N_MAX]; /* values in order of bit length */
- register int w; /* bits before this table == (l * h) */
- uInt x[BMAX+1]; /* bit offsets, then code stack */
- uIntf *xp; /* pointer into x */
- int y; /* number of dummy codes added */
- uInt z; /* number of entries in current table */
-
-
- /* Generate counts for each bit length */
- p = c;
-#define C0 *p++ = 0;
-#define C2 C0 C0 C0 C0
-#define C4 C2 C2 C2 C2
- C4 /* clear c[]--assume BMAX+1 is 16 */
- p = b; i = n;
- do {
- c[*p++]++; /* assume all entries <= BMAX */
- } while (--i);
- if (c[0] == n) /* null input--all zero length codes */
- {
- *t = (inflate_huft *)Z_NULL;
- *m = 0;
- return Z_OK;
- }
-
-
- /* Find minimum and maximum length, bound *m by those */
- l = *m;
- for (j = 1; j <= BMAX; j++)
- if (c[j])
- break;
- k = j; /* minimum code length */
- if ((uInt)l < j)
- l = j;
- for (i = BMAX; i; i--)
- if (c[i])
- break;
- g = i; /* maximum code length */
- if ((uInt)l > i)
- l = i;
- *m = l;
-
-
- /* Adjust last length count to fill out codes, if needed */
- for (y = 1 << j; j < i; j++, y <<= 1)
- if ((y -= c[j]) < 0)
- return Z_DATA_ERROR;
- if ((y -= c[i]) < 0)
- return Z_DATA_ERROR;
- c[i] += y;
-
-
- /* Generate starting offsets into the value table for each length */
- x[1] = j = 0;
- p = c + 1; xp = x + 2;
- while (--i) { /* note that i == g from above */
- *xp++ = (j += *p++);
- }
-
-
- /* Make a table of values in order of bit lengths */
- p = b; i = 0;
- do {
- if ((j = *p++) != 0)
- v[x[j]++] = i;
- } while (++i < n);
- n = x[g]; /* set n to length of v */
-
-
- /* Generate the Huffman codes and for each, make the table entries */
- x[0] = i = 0; /* first Huffman code is zero */
- p = v; /* grab values in bit order */
- h = -1; /* no tables yet--level -1 */
- w = -l; /* bits decoded == (l * h) */
- u[0] = (inflate_huft *)Z_NULL; /* just to keep compilers happy */
- q = (inflate_huft *)Z_NULL; /* ditto */
- z = 0; /* ditto */
-
- /* go through the bit lengths (k already is bits in shortest code) */
- for (; k <= g; k++)
- {
- a = c[k];
- while (a--)
- {
- /* here i is the Huffman code of length k bits for value *p */
- /* make tables up to required level */
- while (k > w + l)
- {
- h++;
- w += l; /* previous table always l bits */
-
- /* compute minimum size table less than or equal to l bits */
- z = g - w;
- z = z > (uInt)l ? l : z; /* table size upper limit */
- if ((f = 1 << (j = k - w)) > a + 1) /* try a k-w bit table */
- { /* too few codes for k-w bit table */
- f -= a + 1; /* deduct codes from patterns left */
- xp = c + k;
- if (j < z)
- while (++j < z) /* try smaller tables up to z bits */
- {
- if ((f <<= 1) <= *++xp)
- break; /* enough codes to use up j bits */
- f -= *xp; /* else deduct codes from patterns */
- }
- }
- z = 1 << j; /* table entries for j-bit table */
-
- /* allocate and link in new table */
- if ((q = (inflate_huft *)ZALLOC
- (zs,z + 1,sizeof(inflate_huft))) == Z_NULL)
- {
- if (h)
- inflate_trees_free(u[0], zs);
- return Z_MEM_ERROR; /* not enough memory */
- }
-#ifdef DEBUG_ZLIB
- inflate_hufts += z + 1;
-#endif
- *t = q + 1; /* link to list for huft_free() */
- *(t = &(q->next)) = Z_NULL;
- u[h] = ++q; /* table starts after link */
-
- /* connect to last table, if there is one */
- if (h)
- {
- x[h] = i; /* save pattern for backing up */
- r.bits = (Byte)l; /* bits to dump before this table */
- r.exop = (Byte)j; /* bits in this table */
- r.next = q; /* pointer to this table */
- j = i >> (w - l); /* (get around Turbo C bug) */
- u[h-1][j] = r; /* connect to last table */
- }
- }
-
- /* set up table entry in r */
- r.bits = (Byte)(k - w);
- if (p >= v + n)
- r.exop = 128 + 64; /* out of values--invalid code */
- else if (*p < s)
- {
- r.exop = (Byte)(*p < 256 ? 0 : 32 + 64); /* 256 is end-of-block */
- r.base = *p++; /* simple code is just the value */
- }
- else
- {
- r.exop = (Byte)(e[*p - s] + 16 + 64);/* non-simple--look up in lists */
- r.base = d[*p++ - s];
- }
-
- /* fill code-like entries with r */
- f = 1 << (k - w);
- for (j = i >> w; j < z; j += f)
- q[j] = r;
-
- /* backwards increment the k-bit code i */
- for (j = 1 << (k - 1); i & j; j >>= 1)
- i ^= j;
- i ^= j;
-
- /* backup over finished tables */
- while ((i & ((1 << w) - 1)) != x[h])
- {
- h--; /* don't need to update q */
- w -= l;
- }
- }
- }
-
-
- /* Return Z_BUF_ERROR if we were given an incomplete table */
- return y != 0 && g != 1 ? Z_BUF_ERROR : Z_OK;
-}
-
-
-int inflate_trees_bits(c, bb, tb, z)
-uIntf *c; /* 19 code lengths */
-uIntf *bb; /* bits tree desired/actual depth */
-inflate_huft * FAR *tb; /* bits tree result */
-z_streamp z; /* for zfree function */
-{
- int r;
-
- r = huft_build(c, 19, 19, (uIntf*)Z_NULL, (uIntf*)Z_NULL, tb, bb, z);
- if (r == Z_DATA_ERROR)
- z->msg = (char*)"oversubscribed dynamic bit lengths tree";
- else if (r == Z_BUF_ERROR || *bb == 0)
- {
- inflate_trees_free(*tb, z);
- z->msg = (char*)"incomplete dynamic bit lengths tree";
- r = Z_DATA_ERROR;
- }
- return r;
-}
-
-
-int inflate_trees_dynamic(nl, nd, c, bl, bd, tl, td, z)
-uInt nl; /* number of literal/length codes */
-uInt nd; /* number of distance codes */
-uIntf *c; /* that many (total) code lengths */
-uIntf *bl; /* literal desired/actual bit depth */
-uIntf *bd; /* distance desired/actual bit depth */
-inflate_huft * FAR *tl; /* literal/length tree result */
-inflate_huft * FAR *td; /* distance tree result */
-z_streamp z; /* for zfree function */
-{
- int r;
-
- /* build literal/length tree */
- r = huft_build(c, nl, 257, cplens, cplext, tl, bl, z);
- if (r != Z_OK || *bl == 0)
- {
- if (r == Z_DATA_ERROR)
- z->msg = (char*)"oversubscribed literal/length tree";
- else if (r != Z_MEM_ERROR)
- {
- inflate_trees_free(*tl, z);
- z->msg = (char*)"incomplete literal/length tree";
- r = Z_DATA_ERROR;
- }
- return r;
- }
-
- /* build distance tree */
- r = huft_build(c + nl, nd, 0, cpdist, cpdext, td, bd, z);
- if (r != Z_OK || (*bd == 0 && nl > 257))
- {
- if (r == Z_DATA_ERROR)
- z->msg = (char*)"oversubscribed distance tree";
- else if (r == Z_BUF_ERROR) {
-#ifdef PKZIP_BUG_WORKAROUND
- r = Z_OK;
- }
-#else
- inflate_trees_free(*td, z);
- z->msg = (char*)"incomplete distance tree";
- r = Z_DATA_ERROR;
- }
- else if (r != Z_MEM_ERROR)
- {
- z->msg = (char*)"empty distance tree with lengths";
- r = Z_DATA_ERROR;
- }
- inflate_trees_free(*tl, z);
- return r;
-#endif
- }
-
- /* done */
- return Z_OK;
-}
-
-
-/* build fixed tables only once--keep them here */
-local int fixed_built = 0;
-#define FIXEDH 530 /* number of hufts used by fixed tables */
-local inflate_huft fixed_mem[FIXEDH];
-local uInt fixed_bl;
-local uInt fixed_bd;
-local inflate_huft *fixed_tl;
-local inflate_huft *fixed_td;
-
-
-local voidpf falloc(q, n, s)
-voidpf q; /* opaque pointer */
-uInt n; /* number of items */
-uInt s; /* size of item */
-{
- Assert(s == sizeof(inflate_huft) && n <= *(intf *)q,
- "inflate_trees falloc overflow");
- *(intf *)q -= n+s-s; /* s-s to avoid warning */
- return (voidpf)(fixed_mem + *(intf *)q);
-}
-
-
-int inflate_trees_fixed(bl, bd, tl, td)
-uIntf *bl; /* literal desired/actual bit depth */
-uIntf *bd; /* distance desired/actual bit depth */
-inflate_huft * FAR *tl; /* literal/length tree result */
-inflate_huft * FAR *td; /* distance tree result */
-{
- /* build fixed tables if not already (multiple overlapped executions ok) */
- if (!fixed_built)
- {
- int k; /* temporary variable */
- unsigned c[288]; /* length list for huft_build */
- z_stream z; /* for falloc function */
- int f = FIXEDH; /* number of hufts left in fixed_mem */
-
- /* set up fake z_stream for memory routines */
- z.zalloc = falloc;
- z.zfree = Z_NULL;
- z.opaque = (voidpf)&f;
-
- /* literal table */
- for (k = 0; k < 144; k++)
- c[k] = 8;
- for (; k < 256; k++)
- c[k] = 9;
- for (; k < 280; k++)
- c[k] = 7;
- for (; k < 288; k++)
- c[k] = 8;
- fixed_bl = 7;
- huft_build(c, 288, 257, cplens, cplext, &fixed_tl, &fixed_bl, &z);
-
- /* distance table */
- for (k = 0; k < 30; k++)
- c[k] = 5;
- fixed_bd = 5;
- huft_build(c, 30, 0, cpdist, cpdext, &fixed_td, &fixed_bd, &z);
-
- /* done */
- Assert(f == 0, "invalid build of fixed tables");
- fixed_built = 1;
- }
- *bl = fixed_bl;
- *bd = fixed_bd;
- *tl = fixed_tl;
- *td = fixed_td;
- return Z_OK;
-}
-
-
-int inflate_trees_free(t, z)
-inflate_huft *t; /* table to free */
-z_streamp z; /* for zfree function */
-/* Free the malloc'ed tables built by huft_build(), which makes a linked
- list of the tables it made, with the links in a dummy first entry of
- each table. */
-{
- register inflate_huft *p, *q, *r;
-
- /* Reverse linked list */
- p = Z_NULL;
- q = t;
- while (q != Z_NULL)
- {
- r = (q - 1)->next;
- (q - 1)->next = p;
- p = q;
- q = r;
- }
- /* Go through linked list, freeing from the malloced (t[-1]) address. */
- while (p != Z_NULL)
- {
- q = (--p)->next;
- ZFREE(z,p);
- p = q;
- }
- return Z_OK;
-}
-/* --- inftrees.c */
-
-/* +++ infcodes.c */
-/* infcodes.c -- process literals and length/distance pairs
- * Copyright (C) 1995-1996 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* #include "zutil.h" */
-/* #include "inftrees.h" */
-/* #include "infblock.h" */
-/* #include "infcodes.h" */
-/* #include "infutil.h" */
-
-/* +++ inffast.h */
-/* inffast.h -- header to use inffast.c
- * Copyright (C) 1995-1996 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* WARNING: this file should *not* be used by applications. It is
- part of the implementation of the compression library and is
- subject to change. Applications should only use zlib.h.
- */
-
-extern int inflate_fast OF((
- uInt,
- uInt,
- inflate_huft *,
- inflate_huft *,
- inflate_blocks_statef *,
- z_streamp ));
-/* --- inffast.h */
-
-/* simplify the use of the inflate_huft type with some defines */
-#define base more.Base
-#define next more.Next
-#define exop word.what.Exop
-#define bits word.what.Bits
-
-/* inflate codes private state */
-struct inflate_codes_state {
-
- /* mode */
- enum { /* waiting for "i:"=input, "o:"=output, "x:"=nothing */
- START, /* x: set up for LEN */
- LEN, /* i: get length/literal/eob next */
- LENEXT, /* i: getting length extra (have base) */
- DIST, /* i: get distance next */
- DISTEXT, /* i: getting distance extra */
- COPY, /* o: copying bytes in window, waiting for space */
- LIT, /* o: got literal, waiting for output space */
- WASH, /* o: got eob, possibly still output waiting */
- END, /* x: got eob and all data flushed */
- BADCODE} /* x: got error */
- mode; /* current inflate_codes mode */
-
- /* mode dependent information */
- uInt len;
- union {
- struct {
- inflate_huft *tree; /* pointer into tree */
- uInt need; /* bits needed */
- } code; /* if LEN or DIST, where in tree */
- uInt lit; /* if LIT, literal */
- struct {
- uInt get; /* bits to get for extra */
- uInt dist; /* distance back to copy from */
- } copy; /* if EXT or COPY, where and how much */
- } sub; /* submode */
-
- /* mode independent information */
- Byte lbits; /* ltree bits decoded per branch */
- Byte dbits; /* dtree bits decoder per branch */
- inflate_huft *ltree; /* literal/length/eob tree */
- inflate_huft *dtree; /* distance tree */
-
-};
-
-
-inflate_codes_statef *inflate_codes_new(bl, bd, tl, td, z)
-uInt bl, bd;
-inflate_huft *tl;
-inflate_huft *td; /* need separate declaration for Borland C++ */
-z_streamp z;
-{
- inflate_codes_statef *c;
-
- if ((c = (inflate_codes_statef *)
- ZALLOC(z,1,sizeof(struct inflate_codes_state))) != Z_NULL)
- {
- c->mode = START;
- c->lbits = (Byte)bl;
- c->dbits = (Byte)bd;
- c->ltree = tl;
- c->dtree = td;
- Tracev((stderr, "inflate: codes new\n"));
- }
- return c;
-}
-
-
-int inflate_codes(s, z, r)
-inflate_blocks_statef *s;
-z_streamp z;
-int r;
-{
- uInt j; /* temporary storage */
- inflate_huft *t; /* temporary pointer */
- uInt e; /* extra bits or operation */
- uLong b; /* bit buffer */
- uInt k; /* bits in bit buffer */
- Bytef *p; /* input data pointer */
- uInt n; /* bytes available there */
- Bytef *q; /* output window write pointer */
- uInt m; /* bytes to end of window or read pointer */
- Bytef *f; /* pointer to copy strings from */
- inflate_codes_statef *c = s->sub.decode.codes; /* codes state */
-
- /* copy input/output information to locals (UPDATE macro restores) */
- LOAD
-
- /* process input and output based on current state */
- while (1) switch (c->mode)
- { /* waiting for "i:"=input, "o:"=output, "x:"=nothing */
- case START: /* x: set up for LEN */
-#ifndef SLOW
- if (m >= 258 && n >= 10)
- {
- UPDATE
- r = inflate_fast(c->lbits, c->dbits, c->ltree, c->dtree, s, z);
- LOAD
- if (r != Z_OK)
- {
- c->mode = r == Z_STREAM_END ? WASH : BADCODE;
- break;
- }
- }
-#endif /* !SLOW */
- c->sub.code.need = c->lbits;
- c->sub.code.tree = c->ltree;
- c->mode = LEN;
- case LEN: /* i: get length/literal/eob next */
- j = c->sub.code.need;
- NEEDBITS(j)
- t = c->sub.code.tree + ((uInt)b & inflate_mask[j]);
- DUMPBITS(t->bits)
- e = (uInt)(t->exop);
- if (e == 0) /* literal */
- {
- c->sub.lit = t->base;
- Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ?
- "inflate: literal '%c'\n" :
- "inflate: literal 0x%02x\n", t->base));
- c->mode = LIT;
- break;
- }
- if (e & 16) /* length */
- {
- c->sub.copy.get = e & 15;
- c->len = t->base;
- c->mode = LENEXT;
- break;
- }
- if ((e & 64) == 0) /* next table */
- {
- c->sub.code.need = e;
- c->sub.code.tree = t->next;
- break;
- }
- if (e & 32) /* end of block */
- {
- Tracevv((stderr, "inflate: end of block\n"));
- c->mode = WASH;
- break;
- }
- c->mode = BADCODE; /* invalid code */
- z->msg = (char*)"invalid literal/length code";
- r = Z_DATA_ERROR;
- LEAVE
- case LENEXT: /* i: getting length extra (have base) */
- j = c->sub.copy.get;
- NEEDBITS(j)
- c->len += (uInt)b & inflate_mask[j];
- DUMPBITS(j)
- c->sub.code.need = c->dbits;
- c->sub.code.tree = c->dtree;
- Tracevv((stderr, "inflate: length %u\n", c->len));
- c->mode = DIST;
- case DIST: /* i: get distance next */
- j = c->sub.code.need;
- NEEDBITS(j)
- t = c->sub.code.tree + ((uInt)b & inflate_mask[j]);
- DUMPBITS(t->bits)
- e = (uInt)(t->exop);
- if (e & 16) /* distance */
- {
- c->sub.copy.get = e & 15;
- c->sub.copy.dist = t->base;
- c->mode = DISTEXT;
- break;
- }
- if ((e & 64) == 0) /* next table */
- {
- c->sub.code.need = e;
- c->sub.code.tree = t->next;
- break;
- }
- c->mode = BADCODE; /* invalid code */
- z->msg = (char*)"invalid distance code";
- r = Z_DATA_ERROR;
- LEAVE
- case DISTEXT: /* i: getting distance extra */
- j = c->sub.copy.get;
- NEEDBITS(j)
- c->sub.copy.dist += (uInt)b & inflate_mask[j];
- DUMPBITS(j)
- Tracevv((stderr, "inflate: distance %u\n", c->sub.copy.dist));
- c->mode = COPY;
- case COPY: /* o: copying bytes in window, waiting for space */
-#ifndef __TURBOC__ /* Turbo C bug for following expression */
- f = (uInt)(q - s->window) < c->sub.copy.dist ?
- s->end - (c->sub.copy.dist - (q - s->window)) :
- q - c->sub.copy.dist;
-#else
- f = q - c->sub.copy.dist;
- if ((uInt)(q - s->window) < c->sub.copy.dist)
- f = s->end - (c->sub.copy.dist - (uInt)(q - s->window));
-#endif
- while (c->len)
- {
- NEEDOUT
- OUTBYTE(*f++)
- if (f == s->end)
- f = s->window;
- c->len--;
- }
- c->mode = START;
- break;
- case LIT: /* o: got literal, waiting for output space */
- NEEDOUT
- OUTBYTE(c->sub.lit)
- c->mode = START;
- break;
- case WASH: /* o: got eob, possibly more output */
- FLUSH
- if (s->read != s->write)
- LEAVE
- c->mode = END;
- case END:
- r = Z_STREAM_END;
- LEAVE
- case BADCODE: /* x: got error */
- r = Z_DATA_ERROR;
- LEAVE
- default:
- r = Z_STREAM_ERROR;
- LEAVE
- }
-}
-
-
-void inflate_codes_free(c, z)
-inflate_codes_statef *c;
-z_streamp z;
-{
- ZFREE(z, c);
- Tracev((stderr, "inflate: codes free\n"));
-}
-/* --- infcodes.c */
-
-/* +++ infutil.c */
-/* inflate_util.c -- data and routines common to blocks and codes
- * Copyright (C) 1995-1996 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* #include "zutil.h" */
-/* #include "infblock.h" */
-/* #include "inftrees.h" */
-/* #include "infcodes.h" */
-/* #include "infutil.h" */
-
-#ifndef NO_DUMMY_DECL
-struct inflate_codes_state {int dummy;}; /* for buggy compilers */
-#endif
-
-/* And'ing with mask[n] masks the lower n bits */
-uInt inflate_mask[17] = {
- 0x0000,
- 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff,
- 0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff
-};
-
-
-/* copy as much as possible from the sliding window to the output area */
-int inflate_flush(s, z, r)
-inflate_blocks_statef *s;
-z_streamp z;
-int r;
-{
- uInt n;
- Bytef *p;
- Bytef *q;
-
- /* local copies of source and destination pointers */
- p = z->next_out;
- q = s->read;
-
- /* compute number of bytes to copy as far as end of window */
- n = (uInt)((q <= s->write ? s->write : s->end) - q);
- if (n > z->avail_out) n = z->avail_out;
- if (n && r == Z_BUF_ERROR) r = Z_OK;
-
- /* update counters */
- z->avail_out -= n;
- z->total_out += n;
-
- /* update check information */
- if (s->checkfn != Z_NULL)
- z->adler = s->check = (*s->checkfn)(s->check, q, n);
-
- /* copy as far as end of window */
- if (p != Z_NULL) {
- zmemcpy(p, q, n);
- p += n;
- }
- q += n;
-
- /* see if more to copy at beginning of window */
- if (q == s->end)
- {
- /* wrap pointers */
- q = s->window;
- if (s->write == s->end)
- s->write = s->window;
-
- /* compute bytes to copy */
- n = (uInt)(s->write - q);
- if (n > z->avail_out) n = z->avail_out;
- if (n && r == Z_BUF_ERROR) r = Z_OK;
-
- /* update counters */
- z->avail_out -= n;
- z->total_out += n;
-
- /* update check information */
- if (s->checkfn != Z_NULL)
- z->adler = s->check = (*s->checkfn)(s->check, q, n);
-
- /* copy */
- if (p != Z_NULL) {
- zmemcpy(p, q, n);
- p += n;
- }
- q += n;
- }
-
- /* update pointers */
- z->next_out = p;
- s->read = q;
-
- /* done */
- return r;
-}
-/* --- infutil.c */
-
-/* +++ inffast.c */
-/* inffast.c -- process literals and length/distance pairs fast
- * Copyright (C) 1995-1996 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* #include "zutil.h" */
-/* #include "inftrees.h" */
-/* #include "infblock.h" */
-/* #include "infcodes.h" */
-/* #include "infutil.h" */
-/* #include "inffast.h" */
-
-#ifndef NO_DUMMY_DECL
-struct inflate_codes_state {int dummy;}; /* for buggy compilers */
-#endif
-
-/* simplify the use of the inflate_huft type with some defines */
-#define base more.Base
-#define next more.Next
-#define exop word.what.Exop
-#define bits word.what.Bits
-
-/* macros for bit input with no checking and for returning unused bytes */
-#define GRABBITS(j) {while(k<(j)){b|=((uLong)NEXTBYTE)<<k;k+=8;}}
-#define UNGRAB {n+=(c=k>>3);p-=c;k&=7;}
-
-/* Called with number of bytes left to write in window at least 258
- (the maximum string length) and number of input bytes available
- at least ten. The ten bytes are six bytes for the longest length/
- distance pair plus four bytes for overloading the bit buffer. */
-
-int inflate_fast(bl, bd, tl, td, s, z)
-uInt bl, bd;
-inflate_huft *tl;
-inflate_huft *td; /* need separate declaration for Borland C++ */
-inflate_blocks_statef *s;
-z_streamp z;
-{
- inflate_huft *t; /* temporary pointer */
- uInt e; /* extra bits or operation */
- uLong b; /* bit buffer */
- uInt k; /* bits in bit buffer */
- Bytef *p; /* input data pointer */
- uInt n; /* bytes available there */
- Bytef *q; /* output window write pointer */
- uInt m; /* bytes to end of window or read pointer */
- uInt ml; /* mask for literal/length tree */
- uInt md; /* mask for distance tree */
- uInt c; /* bytes to copy */
- uInt d; /* distance back to copy from */
- Bytef *r; /* copy source pointer */
-
- /* load input, output, bit values */
- LOAD
-
- /* initialize masks */
- ml = inflate_mask[bl];
- md = inflate_mask[bd];
-
- /* do until not enough input or output space for fast loop */
- do { /* assume called with m >= 258 && n >= 10 */
- /* get literal/length code */
- GRABBITS(20) /* max bits for literal/length code */
- if ((e = (t = tl + ((uInt)b & ml))->exop) == 0)
- {
- DUMPBITS(t->bits)
- Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ?
- "inflate: * literal '%c'\n" :
- "inflate: * literal 0x%02x\n", t->base));
- *q++ = (Byte)t->base;
- m--;
- continue;
- }
- do {
- DUMPBITS(t->bits)
- if (e & 16)
- {
- /* get extra bits for length */
- e &= 15;
- c = t->base + ((uInt)b & inflate_mask[e]);
- DUMPBITS(e)
- Tracevv((stderr, "inflate: * length %u\n", c));
-
- /* decode distance base of block to copy */
- GRABBITS(15); /* max bits for distance code */
- e = (t = td + ((uInt)b & md))->exop;
- do {
- DUMPBITS(t->bits)
- if (e & 16)
- {
- /* get extra bits to add to distance base */
- e &= 15;
- GRABBITS(e) /* get extra bits (up to 13) */
- d = t->base + ((uInt)b & inflate_mask[e]);
- DUMPBITS(e)
- Tracevv((stderr, "inflate: * distance %u\n", d));
-
- /* do the copy */
- m -= c;
- if ((uInt)(q - s->window) >= d) /* offset before dest */
- { /* just copy */
- r = q - d;
- *q++ = *r++; c--; /* minimum count is three, */
- *q++ = *r++; c--; /* so unroll loop a little */
- }
- else /* else offset after destination */
- {
- e = d - (uInt)(q - s->window); /* bytes from offset to end */
- r = s->end - e; /* pointer to offset */
- if (c > e) /* if source crosses, */
- {
- c -= e; /* copy to end of window */
- do {
- *q++ = *r++;
- } while (--e);
- r = s->window; /* copy rest from start of window */
- }
- }
- do { /* copy all or what's left */
- *q++ = *r++;
- } while (--c);
- break;
- }
- else if ((e & 64) == 0)
- e = (t = t->next + ((uInt)b & inflate_mask[e]))->exop;
- else
- {
- z->msg = (char*)"invalid distance code";
- UNGRAB
- UPDATE
- return Z_DATA_ERROR;
- }
- } while (1);
- break;
- }
- if ((e & 64) == 0)
- {
- if ((e = (t = t->next + ((uInt)b & inflate_mask[e]))->exop) == 0)
- {
- DUMPBITS(t->bits)
- Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ?
- "inflate: * literal '%c'\n" :
- "inflate: * literal 0x%02x\n", t->base));
- *q++ = (Byte)t->base;
- m--;
- break;
- }
- }
- else if (e & 32)
- {
- Tracevv((stderr, "inflate: * end of block\n"));
- UNGRAB
- UPDATE
- return Z_STREAM_END;
- }
- else
- {
- z->msg = (char*)"invalid literal/length code";
- UNGRAB
- UPDATE
- return Z_DATA_ERROR;
- }
- } while (1);
- } while (m >= 258 && n >= 10);
-
- /* not enough input or output--restore pointers and return */
- UNGRAB
- UPDATE
- return Z_OK;
-}
-/* --- inffast.c */
-
-/* +++ zutil.c */
-/* zutil.c -- target dependent utility functions for the compression library
- * Copyright (C) 1995-1996 Jean-loup Gailly.
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* From: zutil.c,v 1.17 1996/07/24 13:41:12 me Exp $ */
-
-/* #include "zutil.h" */
-
-#ifndef NO_DUMMY_DECL
-struct internal_state {int dummy;}; /* for buggy compilers */
-#endif
-
-#ifndef STDC
-extern void exit OF((int));
-#endif
-
-const char *z_errmsg[10] = {
-"need dictionary", /* Z_NEED_DICT 2 */
-"stream end", /* Z_STREAM_END 1 */
-"", /* Z_OK 0 */
-"file error", /* Z_ERRNO (-1) */
-"stream error", /* Z_STREAM_ERROR (-2) */
-"data error", /* Z_DATA_ERROR (-3) */
-"insufficient memory", /* Z_MEM_ERROR (-4) */
-"buffer error", /* Z_BUF_ERROR (-5) */
-"incompatible version",/* Z_VERSION_ERROR (-6) */
-""};
-
-
-const char *zlibVersion()
-{
- return ZLIB_VERSION;
-}
-
-#ifdef DEBUG_ZLIB
-void z_error (m)
- char *m;
-{
- fprintf(stderr, "%s\n", m);
- exit(1);
-}
-#endif
-
-#ifndef HAVE_MEMCPY
-
-void zmemcpy(dest, source, len)
- Bytef* dest;
- Bytef* source;
- uInt len;
-{
- if (len == 0) return;
- do {
- *dest++ = *source++; /* ??? to be unrolled */
- } while (--len != 0);
-}
-
-int zmemcmp(s1, s2, len)
- Bytef* s1;
- Bytef* s2;
- uInt len;
-{
- uInt j;
-
- for (j = 0; j < len; j++) {
- if (s1[j] != s2[j]) return 2*(s1[j] > s2[j])-1;
- }
- return 0;
-}
-
-void zmemzero(dest, len)
- Bytef* dest;
- uInt len;
-{
- if (len == 0) return;
- do {
- *dest++ = 0; /* ??? to be unrolled */
- } while (--len != 0);
-}
-#endif
-
-#ifdef __TURBOC__
-#if (defined( __BORLANDC__) || !defined(SMALL_MEDIUM)) && !defined(__32BIT__)
-/* Small and medium model in Turbo C are for now limited to near allocation
- * with reduced MAX_WBITS and MAX_MEM_LEVEL
- */
-# define MY_ZCALLOC
-
-/* Turbo C malloc() does not allow dynamic allocation of 64K bytes
- * and farmalloc(64K) returns a pointer with an offset of 8, so we
- * must fix the pointer. Warning: the pointer must be put back to its
- * original form in order to free it, use zcfree().
- */
-
-#define MAX_PTR 10
-/* 10*64K = 640K */
-
-local int next_ptr = 0;
-
-typedef struct ptr_table_s {
- voidpf org_ptr;
- voidpf new_ptr;
-} ptr_table;
-
-local ptr_table table[MAX_PTR];
-/* This table is used to remember the original form of pointers
- * to large buffers (64K). Such pointers are normalized with a zero offset.
- * Since MSDOS is not a preemptive multitasking OS, this table is not
- * protected from concurrent access. This hack doesn't work anyway on
- * a protected system like OS/2. Use Microsoft C instead.
- */
-
-voidpf zcalloc (voidpf opaque, unsigned items, unsigned size)
-{
- voidpf buf = opaque; /* just to make some compilers happy */
- ulg bsize = (ulg)items*size;
-
- /* If we allocate less than 65520 bytes, we assume that farmalloc
- * will return a usable pointer which doesn't have to be normalized.
- */
- if (bsize < 65520L) {
- buf = farmalloc(bsize);
- if (*(ush*)&buf != 0) return buf;
- } else {
- buf = farmalloc(bsize + 16L);
- }
- if (buf == NULL || next_ptr >= MAX_PTR) return NULL;
- table[next_ptr].org_ptr = buf;
-
- /* Normalize the pointer to seg:0 */
- *((ush*)&buf+1) += ((ush)((uch*)buf-0) + 15) >> 4;
- *(ush*)&buf = 0;
- table[next_ptr++].new_ptr = buf;
- return buf;
-}
-
-void zcfree (voidpf opaque, voidpf ptr)
-{
- int n;
- if (*(ush*)&ptr != 0) { /* object < 64K */
- farfree(ptr);
- return;
- }
- /* Find the original pointer */
- for (n = 0; n < next_ptr; n++) {
- if (ptr != table[n].new_ptr) continue;
-
- farfree(table[n].org_ptr);
- while (++n < next_ptr) {
- table[n-1] = table[n];
- }
- next_ptr--;
- return;
- }
- ptr = opaque; /* just to make some compilers happy */
- Assert(0, "zcfree: ptr not found");
-}
-#endif
-#endif /* __TURBOC__ */
-
-
-#if defined(M_I86) && !defined(__32BIT__)
-/* Microsoft C in 16-bit mode */
-
-# define MY_ZCALLOC
-
-#if (!defined(_MSC_VER) || (_MSC_VER < 600))
-# define _halloc halloc
-# define _hfree hfree
-#endif
-
-voidpf zcalloc (voidpf opaque, unsigned items, unsigned size)
-{
- if (opaque) opaque = 0; /* to make compiler happy */
- return _halloc((long)items, size);
-}
-
-void zcfree (voidpf opaque, voidpf ptr)
-{
- if (opaque) opaque = 0; /* to make compiler happy */
- _hfree(ptr);
-}
-
-#endif /* MSC */
-
-
-#ifndef MY_ZCALLOC /* Any system without a special alloc function */
-
-#ifndef STDC
-extern voidp calloc OF((uInt items, uInt size));
-extern void free OF((voidpf ptr));
-#endif
-
-voidpf zcalloc (opaque, items, size)
- voidpf opaque;
- unsigned items;
- unsigned size;
-{
- if (opaque) items += size - size; /* make compiler happy */
- return (voidpf)calloc(items, size);
-}
-
-void zcfree (opaque, ptr)
- voidpf opaque;
- voidpf ptr;
-{
- free(ptr);
- if (opaque) return; /* make compiler happy */
-}
-
-#endif /* MY_ZCALLOC */
-/* --- zutil.c */
-
-/* +++ adler32.c */
-/* adler32.c -- compute the Adler-32 checksum of a data stream
- * Copyright (C) 1995-1996 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* From: adler32.c,v 1.10 1996/05/22 11:52:18 me Exp $ */
-
-/* #include "zlib.h" */
-
-#define BASE 65521L /* largest prime smaller than 65536 */
-#define NMAX 5552
-/* NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 */
-
-#define DO1(buf,i) {s1 += buf[i]; s2 += s1;}
-#define DO2(buf,i) DO1(buf,i); DO1(buf,i+1);
-#define DO4(buf,i) DO2(buf,i); DO2(buf,i+2);
-#define DO8(buf,i) DO4(buf,i); DO4(buf,i+4);
-#define DO16(buf) DO8(buf,0); DO8(buf,8);
-
-/* ========================================================================= */
-uLong adler32(adler, buf, len)
- uLong adler;
- const Bytef *buf;
- uInt len;
-{
- unsigned long s1 = adler & 0xffff;
- unsigned long s2 = (adler >> 16) & 0xffff;
- int k;
-
- if (buf == Z_NULL) return 1L;
-
- while (len > 0) {
- k = len < NMAX ? len : NMAX;
- len -= k;
- while (k >= 16) {
- DO16(buf);
- buf += 16;
- k -= 16;
- }
- if (k != 0) do {
- s1 += *buf++;
- s2 += s1;
- } while (--k);
- s1 %= BASE;
- s2 %= BASE;
- }
- return (s2 << 16) | s1;
-}
-/* --- adler32.c */
+++ /dev/null
-/* $Id: zlib.h,v 1.2 1997/12/23 10:47:44 paulus Exp $ */
-
-/*
- * This file is derived from zlib.h and zconf.h from the zlib-1.0.4
- * distribution by Jean-loup Gailly and Mark Adler, with some additions
- * by Paul Mackerras to aid in implementing Deflate compression and
- * decompression for PPP packets.
- */
-
-/*
- * ==FILEVERSION 971127==
- *
- * This marker is used by the Linux installation script to determine
- * whether an up-to-date version of this file is already installed.
- */
-
-
-/* +++ zlib.h */
-/* zlib.h -- interface of the 'zlib' general purpose compression library
- version 1.0.4, Jul 24th, 1996.
-
- Copyright (C) 1995-1996 Jean-loup Gailly and Mark Adler
-
- This software is provided 'as-is', without any express or implied
- warranty. In no event will the authors be held liable for any damages
- arising from the use of this software.
-
- Permission is granted to anyone to use this software for any purpose,
- including commercial applications, and to alter it and redistribute it
- freely, subject to the following restrictions:
-
- 1. The origin of this software must not be misrepresented; you must not
- claim that you wrote the original software. If you use this software
- in a product, an acknowledgment in the product documentation would be
- appreciated but is not required.
- 2. Altered source versions must be plainly marked as such, and must not be
- misrepresented as being the original software.
- 3. This notice may not be removed or altered from any source distribution.
-
- Jean-loup Gailly Mark Adler
- gzip@prep.ai.mit.edu madler@alumni.caltech.edu
-
-
- The data format used by the zlib library is described by RFCs (Request for
- Comments) 1950 to 1952 in the files ftp://ds.internic.net/rfc/rfc1950.txt
- (zlib format), rfc1951.txt (deflate format) and rfc1952.txt (gzip format).
-*/
-
-#ifndef _ZLIB_H
-#define _ZLIB_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-/* +++ zconf.h */
-/* zconf.h -- configuration of the zlib compression library
- * Copyright (C) 1995-1996 Jean-loup Gailly.
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* From: zconf.h,v 1.20 1996/07/02 15:09:28 me Exp $ */
-
-#ifndef _ZCONF_H
-#define _ZCONF_H
-
-/*
- * If you *really* need a unique prefix for all types and library functions,
- * compile with -DZ_PREFIX. The "standard" zlib should be compiled without it.
- */
-#ifdef Z_PREFIX
-# define deflateInit_ z_deflateInit_
-# define deflate z_deflate
-# define deflateEnd z_deflateEnd
-# define inflateInit_ z_inflateInit_
-# define inflate z_inflate
-# define inflateEnd z_inflateEnd
-# define deflateInit2_ z_deflateInit2_
-# define deflateSetDictionary z_deflateSetDictionary
-# define deflateCopy z_deflateCopy
-# define deflateReset z_deflateReset
-# define deflateParams z_deflateParams
-# define inflateInit2_ z_inflateInit2_
-# define inflateSetDictionary z_inflateSetDictionary
-# define inflateSync z_inflateSync
-# define inflateReset z_inflateReset
-# define compress z_compress
-# define uncompress z_uncompress
-# define adler32 z_adler32
-# define crc32 z_crc32
-# define get_crc_table z_get_crc_table
-
-# define Byte z_Byte
-# define uInt z_uInt
-# define uLong z_uLong
-# define Bytef z_Bytef
-# define charf z_charf
-# define intf z_intf
-# define uIntf z_uIntf
-# define uLongf z_uLongf
-# define voidpf z_voidpf
-# define voidp z_voidp
-#endif
-
-#if (defined(_WIN32) || defined(__WIN32__)) && !defined(WIN32)
-# define WIN32
-#endif
-#if defined(__GNUC__) || defined(WIN32) || defined(__386__) || defined(i386)
-# ifndef __32BIT__
-# define __32BIT__
-# endif
-#endif
-#if defined(__MSDOS__) && !defined(MSDOS)
-# define MSDOS
-#endif
-
-/*
- * Compile with -DMAXSEG_64K if the alloc function cannot allocate more
- * than 64k bytes at a time (needed on systems with 16-bit int).
- */
-#if defined(MSDOS) && !defined(__32BIT__)
-# define MAXSEG_64K
-#endif
-#ifdef MSDOS
-# define UNALIGNED_OK
-#endif
-
-#if (defined(MSDOS) || defined(_WINDOWS) || defined(WIN32)) && !defined(STDC)
-# define STDC
-#endif
-#if (defined(__STDC__) || defined(__cplusplus)) && !defined(STDC)
-# define STDC
-#endif
-
-#ifndef STDC
-# ifndef const /* cannot use !defined(STDC) && !defined(const) on Mac */
-# define const
-# endif
-#endif
-
-/* Some Mac compilers merge all .h files incorrectly: */
-#if defined(__MWERKS__) || defined(applec) ||defined(THINK_C) ||defined(__SC__)
-# define NO_DUMMY_DECL
-#endif
-
-/* Maximum value for memLevel in deflateInit2 */
-#ifndef MAX_MEM_LEVEL
-# ifdef MAXSEG_64K
-# define MAX_MEM_LEVEL 8
-# else
-# define MAX_MEM_LEVEL 9
-# endif
-#endif
-
-/* Maximum value for windowBits in deflateInit2 and inflateInit2 */
-#ifndef MAX_WBITS
-# define MAX_WBITS 15 /* 32K LZ77 window */
-#endif
-
-/* The memory requirements for deflate are (in bytes):
- 1 << (windowBits+2) + 1 << (memLevel+9)
- that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values)
- plus a few kilobytes for small objects. For example, if you want to reduce
- the default memory requirements from 256K to 128K, compile with
- make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7"
- Of course this will generally degrade compression (there's no free lunch).
-
- The memory requirements for inflate are (in bytes) 1 << windowBits
- that is, 32K for windowBits=15 (default value) plus a few kilobytes
- for small objects.
-*/
-
- /* Type declarations */
-
-#ifndef OF /* function prototypes */
-# ifdef STDC
-# define OF(args) args
-# else
-# define OF(args) ()
-# endif
-#endif
-
-/* The following definitions for FAR are needed only for MSDOS mixed
- * model programming (small or medium model with some far allocations).
- * This was tested only with MSC; for other MSDOS compilers you may have
- * to define NO_MEMCPY in zutil.h. If you don't need the mixed model,
- * just define FAR to be empty.
- */
-#if (defined(M_I86SM) || defined(M_I86MM)) && !defined(__32BIT__)
- /* MSC small or medium model */
-# define SMALL_MEDIUM
-# ifdef _MSC_VER
-# define FAR __far
-# else
-# define FAR far
-# endif
-#endif
-#if defined(__BORLANDC__) && (defined(__SMALL__) || defined(__MEDIUM__))
-# ifndef __32BIT__
-# define SMALL_MEDIUM
-# define FAR __far
-# endif
-#endif
-#ifndef FAR
-# define FAR
-#endif
-
-typedef unsigned char Byte; /* 8 bits */
-typedef unsigned int uInt; /* 16 bits or more */
-typedef unsigned long uLong; /* 32 bits or more */
-
-#if defined(__BORLANDC__) && defined(SMALL_MEDIUM)
- /* Borland C/C++ ignores FAR inside typedef */
-# define Bytef Byte FAR
-#else
- typedef Byte FAR Bytef;
-#endif
-typedef char FAR charf;
-typedef int FAR intf;
-typedef uInt FAR uIntf;
-typedef uLong FAR uLongf;
-
-#ifdef STDC
- typedef void FAR *voidpf;
- typedef void *voidp;
-#else
- typedef Byte FAR *voidpf;
- typedef Byte *voidp;
-#endif
-
-
-/* Compile with -DZLIB_DLL for Windows DLL support */
-#if (defined(_WINDOWS) || defined(WINDOWS)) && defined(ZLIB_DLL)
-# include <windows.h>
-# define EXPORT WINAPI
-#else
-# define EXPORT
-#endif
-
-#endif /* _ZCONF_H */
-/* --- zconf.h */
-
-#define ZLIB_VERSION "1.0.4P"
-
-/*
- The 'zlib' compression library provides in-memory compression and
- decompression functions, including integrity checks of the uncompressed
- data. This version of the library supports only one compression method
- (deflation) but other algorithms may be added later and will have the same
- stream interface.
-
- For compression the application must provide the output buffer and
- may optionally provide the input buffer for optimization. For decompression,
- the application must provide the input buffer and may optionally provide
- the output buffer for optimization.
-
- Compression can be done in a single step if the buffers are large
- enough (for example if an input file is mmap'ed), or can be done by
- repeated calls of the compression function. In the latter case, the
- application must provide more input and/or consume the output
- (providing more output space) before each call.
-
- The library does not install any signal handler. It is recommended to
- add at least a handler for SIGSEGV when decompressing; the library checks
- the consistency of the input data whenever possible but may go nuts
- for some forms of corrupted input.
-*/
-
-typedef voidpf (*alloc_func) OF((voidpf opaque, uInt items, uInt size));
-typedef void (*free_func) OF((voidpf opaque, voidpf address));
-
-struct internal_state;
-
-typedef struct z_stream_s {
- Bytef *next_in; /* next input byte */
- uInt avail_in; /* number of bytes available at next_in */
- uLong total_in; /* total nb of input bytes read so far */
-
- Bytef *next_out; /* next output byte should be put there */
- uInt avail_out; /* remaining free space at next_out */
- uLong total_out; /* total nb of bytes output so far */
-
- char *msg; /* last error message, NULL if no error */
- struct internal_state FAR *state; /* not visible by applications */
-
- alloc_func zalloc; /* used to allocate the internal state */
- free_func zfree; /* used to free the internal state */
- voidpf opaque; /* private data object passed to zalloc and zfree */
-
- int data_type; /* best guess about the data type: ascii or binary */
- uLong adler; /* adler32 value of the uncompressed data */
- uLong reserved; /* reserved for future use */
-} z_stream;
-
-typedef z_stream FAR *z_streamp;
-
-/*
- The application must update next_in and avail_in when avail_in has
- dropped to zero. It must update next_out and avail_out when avail_out
- has dropped to zero. The application must initialize zalloc, zfree and
- opaque before calling the init function. All other fields are set by the
- compression library and must not be updated by the application.
-
- The opaque value provided by the application will be passed as the first
- parameter for calls of zalloc and zfree. This can be useful for custom
- memory management. The compression library attaches no meaning to the
- opaque value.
-
- zalloc must return Z_NULL if there is not enough memory for the object.
- On 16-bit systems, the functions zalloc and zfree must be able to allocate
- exactly 65536 bytes, but will not be required to allocate more than this
- if the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS,
- pointers returned by zalloc for objects of exactly 65536 bytes *must*
- have their offset normalized to zero. The default allocation function
- provided by this library ensures this (see zutil.c). To reduce memory
- requirements and avoid any allocation of 64K objects, at the expense of
- compression ratio, compile the library with -DMAX_WBITS=14 (see zconf.h).
-
- The fields total_in and total_out can be used for statistics or
- progress reports. After compression, total_in holds the total size of
- the uncompressed data and may be saved for use in the decompressor
- (particularly if the decompressor wants to decompress everything in
- a single step).
-*/
-
- /* constants */
-
-#define Z_NO_FLUSH 0
-#define Z_PARTIAL_FLUSH 1
-#define Z_PACKET_FLUSH 2
-#define Z_SYNC_FLUSH 3
-#define Z_FULL_FLUSH 4
-#define Z_FINISH 5
-/* Allowed flush values; see deflate() below for details */
-
-#define Z_OK 0
-#define Z_STREAM_END 1
-#define Z_NEED_DICT 2
-#define Z_ERRNO (-1)
-#define Z_STREAM_ERROR (-2)
-#define Z_DATA_ERROR (-3)
-#define Z_MEM_ERROR (-4)
-#define Z_BUF_ERROR (-5)
-#define Z_VERSION_ERROR (-6)
-/* Return codes for the compression/decompression functions. Negative
- * values are errors, positive values are used for special but normal events.
- */
-
-#define Z_NO_COMPRESSION 0
-#define Z_BEST_SPEED 1
-#define Z_BEST_COMPRESSION 9
-#define Z_DEFAULT_COMPRESSION (-1)
-/* compression levels */
-
-#define Z_FILTERED 1
-#define Z_HUFFMAN_ONLY 2
-#define Z_DEFAULT_STRATEGY 0
-/* compression strategy; see deflateInit2() below for details */
-
-#define Z_BINARY 0
-#define Z_ASCII 1
-#define Z_UNKNOWN 2
-/* Possible values of the data_type field */
-
-#define Z_DEFLATED 8
-/* The deflate compression method (the only one supported in this version) */
-
-#define Z_NULL 0 /* for initializing zalloc, zfree, opaque */
-
-#define zlib_version zlibVersion()
-/* for compatibility with versions < 1.0.2 */
-
- /* basic functions */
-
-extern const char * EXPORT zlibVersion OF((void));
-/* The application can compare zlibVersion and ZLIB_VERSION for consistency.
- If the first character differs, the library code actually used is
- not compatible with the zlib.h header file used by the application.
- This check is automatically made by deflateInit and inflateInit.
- */
-
-/*
-extern int EXPORT deflateInit OF((z_streamp strm, int level));
-
- Initializes the internal stream state for compression. The fields
- zalloc, zfree and opaque must be initialized before by the caller.
- If zalloc and zfree are set to Z_NULL, deflateInit updates them to
- use default allocation functions.
-
- The compression level must be Z_DEFAULT_COMPRESSION, or between 0 and 9:
- 1 gives best speed, 9 gives best compression, 0 gives no compression at
- all (the input data is simply copied a block at a time).
- Z_DEFAULT_COMPRESSION requests a default compromise between speed and
- compression (currently equivalent to level 6).
-
- deflateInit returns Z_OK if success, Z_MEM_ERROR if there was not
- enough memory, Z_STREAM_ERROR if level is not a valid compression level,
- Z_VERSION_ERROR if the zlib library version (zlib_version) is incompatible
- with the version assumed by the caller (ZLIB_VERSION).
- msg is set to null if there is no error message. deflateInit does not
- perform any compression: this will be done by deflate().
-*/
-
-
-extern int EXPORT deflate OF((z_streamp strm, int flush));
-/*
- Performs one or both of the following actions:
-
- - Compress more input starting at next_in and update next_in and avail_in
- accordingly. If not all input can be processed (because there is not
- enough room in the output buffer), next_in and avail_in are updated and
- processing will resume at this point for the next call of deflate().
-
- - Provide more output starting at next_out and update next_out and avail_out
- accordingly. This action is forced if the parameter flush is non zero.
- Forcing flush frequently degrades the compression ratio, so this parameter
- should be set only when necessary (in interactive applications).
- Some output may be provided even if flush is not set.
-
- Before the call of deflate(), the application should ensure that at least
- one of the actions is possible, by providing more input and/or consuming
- more output, and updating avail_in or avail_out accordingly; avail_out
- should never be zero before the call. The application can consume the
- compressed output when it wants, for example when the output buffer is full
- (avail_out == 0), or after each call of deflate(). If deflate returns Z_OK
- and with zero avail_out, it must be called again after making room in the
- output buffer because there might be more output pending.
-
- If the parameter flush is set to Z_PARTIAL_FLUSH, the current compression
- block is terminated and flushed to the output buffer so that the
- decompressor can get all input data available so far. For method 9, a future
- variant on method 8, the current block will be flushed but not terminated.
- Z_SYNC_FLUSH has the same effect as partial flush except that the compressed
- output is byte aligned (the compressor can clear its internal bit buffer)
- and the current block is always terminated; this can be useful if the
- compressor has to be restarted from scratch after an interruption (in which
- case the internal state of the compressor may be lost).
- If flush is set to Z_FULL_FLUSH, the compression block is terminated, a
- special marker is output and the compression dictionary is discarded; this
- is useful to allow the decompressor to synchronize if one compressed block
- has been damaged (see inflateSync below). Flushing degrades compression and
- so should be used only when necessary. Using Z_FULL_FLUSH too often can
- seriously degrade the compression. If deflate returns with avail_out == 0,
- this function must be called again with the same value of the flush
- parameter and more output space (updated avail_out), until the flush is
- complete (deflate returns with non-zero avail_out).
-
- If the parameter flush is set to Z_PACKET_FLUSH, the compression
- block is terminated, and a zero-length stored block is output,
- omitting the length bytes (the effect of this is that the 3-bit type
- code 000 for a stored block is output, and the output is then
- byte-aligned). This is designed for use at the end of a PPP packet.
-
- If the parameter flush is set to Z_FINISH, pending input is processed,
- pending output is flushed and deflate returns with Z_STREAM_END if there
- was enough output space; if deflate returns with Z_OK, this function must be
- called again with Z_FINISH and more output space (updated avail_out) but no
- more input data, until it returns with Z_STREAM_END or an error. After
- deflate has returned Z_STREAM_END, the only possible operations on the
- stream are deflateReset or deflateEnd.
-
- Z_FINISH can be used immediately after deflateInit if all the compression
- is to be done in a single step. In this case, avail_out must be at least
- 0.1% larger than avail_in plus 12 bytes. If deflate does not return
- Z_STREAM_END, then it must be called again as described above.
-
- deflate() may update data_type if it can make a good guess about
- the input data type (Z_ASCII or Z_BINARY). In doubt, the data is considered
- binary. This field is only for information purposes and does not affect
- the compression algorithm in any manner.
-
- deflate() returns Z_OK if some progress has been made (more input
- processed or more output produced), Z_STREAM_END if all input has been
- consumed and all output has been produced (only when flush is set to
- Z_FINISH), Z_STREAM_ERROR if the stream state was inconsistent (for example
- if next_in or next_out was NULL), Z_BUF_ERROR if no progress is possible.
-*/
-
-
-extern int EXPORT deflateEnd OF((z_streamp strm));
-/*
- All dynamically allocated data structures for this stream are freed.
- This function discards any unprocessed input and does not flush any
- pending output.
-
- deflateEnd returns Z_OK if success, Z_STREAM_ERROR if the
- stream state was inconsistent, Z_DATA_ERROR if the stream was freed
- prematurely (some input or output was discarded). In the error case,
- msg may be set but then points to a static string (which must not be
- deallocated).
-*/
-
-
-/*
-extern int EXPORT inflateInit OF((z_streamp strm));
-
- Initializes the internal stream state for decompression. The fields
- zalloc, zfree and opaque must be initialized before by the caller. If
- zalloc and zfree are set to Z_NULL, inflateInit updates them to use default
- allocation functions.
-
- inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not
- enough memory, Z_VERSION_ERROR if the zlib library version is incompatible
- with the version assumed by the caller. msg is set to null if there is no
- error message. inflateInit does not perform any decompression: this will be
- done by inflate().
-*/
-
-
-extern int EXPORT inflate OF((z_streamp strm, int flush));
-/*
- Performs one or both of the following actions:
-
- - Decompress more input starting at next_in and update next_in and avail_in
- accordingly. If not all input can be processed (because there is not
- enough room in the output buffer), next_in is updated and processing
- will resume at this point for the next call of inflate().
-
- - Provide more output starting at next_out and update next_out and avail_out
- accordingly. inflate() provides as much output as possible, until there
- is no more input data or no more space in the output buffer (see below
- about the flush parameter).
-
- Before the call of inflate(), the application should ensure that at least
- one of the actions is possible, by providing more input and/or consuming
- more output, and updating the next_* and avail_* values accordingly.
- The application can consume the uncompressed output when it wants, for
- example when the output buffer is full (avail_out == 0), or after each
- call of inflate(). If inflate returns Z_OK and with zero avail_out, it
- must be called again after making room in the output buffer because there
- might be more output pending.
-
- If the parameter flush is set to Z_PARTIAL_FLUSH or Z_PACKET_FLUSH,
- inflate flushes as much output as possible to the output buffer. The
- flushing behavior of inflate is not specified for values of the flush
- parameter other than Z_PARTIAL_FLUSH, Z_PACKET_FLUSH or Z_FINISH, but the
- current implementation actually flushes as much output as possible
- anyway. For Z_PACKET_FLUSH, inflate checks that once all the input data
- has been consumed, it is expecting to see the length field of a stored
- block; if not, it returns Z_DATA_ERROR.
-
- inflate() should normally be called until it returns Z_STREAM_END or an
- error. However if all decompression is to be performed in a single step
- (a single call of inflate), the parameter flush should be set to
- Z_FINISH. In this case all pending input is processed and all pending
- output is flushed; avail_out must be large enough to hold all the
- uncompressed data. (The size of the uncompressed data may have been saved
- by the compressor for this purpose.) The next operation on this stream must
- be inflateEnd to deallocate the decompression state. The use of Z_FINISH
- is never required, but can be used to inform inflate that a faster routine
- may be used for the single inflate() call.
-
- inflate() returns Z_OK if some progress has been made (more input
- processed or more output produced), Z_STREAM_END if the end of the
- compressed data has been reached and all uncompressed output has been
- produced, Z_NEED_DICT if a preset dictionary is needed at this point (see
- inflateSetDictionary below), Z_DATA_ERROR if the input data was corrupted,
- Z_STREAM_ERROR if the stream structure was inconsistent (for example if
- next_in or next_out was NULL), Z_MEM_ERROR if there was not enough memory,
- Z_BUF_ERROR if no progress is possible or if there was not enough room in
- the output buffer when Z_FINISH is used. In the Z_DATA_ERROR case, the
- application may then call inflateSync to look for a good compression block.
- In the Z_NEED_DICT case, strm->adler is set to the Adler32 value of the
- dictionary chosen by the compressor.
-*/
-
-
-extern int EXPORT inflateEnd OF((z_streamp strm));
-/*
- All dynamically allocated data structures for this stream are freed.
- This function discards any unprocessed input and does not flush any
- pending output.
-
- inflateEnd returns Z_OK if success, Z_STREAM_ERROR if the stream state
- was inconsistent. In the error case, msg may be set but then points to a
- static string (which must not be deallocated).
-*/
-
- /* Advanced functions */
-
-/*
- The following functions are needed only in some special applications.
-*/
-
-/*
-extern int EXPORT deflateInit2 OF((z_streamp strm,
- int level,
- int method,
- int windowBits,
- int memLevel,
- int strategy));
-
- This is another version of deflateInit with more compression options. The
- fields next_in, zalloc, zfree and opaque must be initialized before by
- the caller.
-
- The method parameter is the compression method. It must be Z_DEFLATED in
- this version of the library. (Method 9 will allow a 64K history buffer and
- partial block flushes.)
-
- The windowBits parameter is the base two logarithm of the window size
- (the size of the history buffer). It should be in the range 8..15 for this
- version of the library (the value 16 will be allowed for method 9). Larger
- values of this parameter result in better compression at the expense of
- memory usage. The default value is 15 if deflateInit is used instead.
-
- The memLevel parameter specifies how much memory should be allocated
- for the internal compression state. memLevel=1 uses minimum memory but
- is slow and reduces compression ratio; memLevel=9 uses maximum memory
- for optimal speed. The default value is 8. See zconf.h for total memory
- usage as a function of windowBits and memLevel.
-
- The strategy parameter is used to tune the compression algorithm. Use the
- value Z_DEFAULT_STRATEGY for normal data, Z_FILTERED for data produced by a
- filter (or predictor), or Z_HUFFMAN_ONLY to force Huffman encoding only (no
- string match). Filtered data consists mostly of small values with a
- somewhat random distribution. In this case, the compression algorithm is
- tuned to compress them better. The effect of Z_FILTERED is to force more
- Huffman coding and less string matching; it is somewhat intermediate
- between Z_DEFAULT and Z_HUFFMAN_ONLY. The strategy parameter only affects
- the compression ratio but not the correctness of the compressed output even
- if it is not set appropriately.
-
- If next_in is not null, the library will use this buffer to hold also
- some history information; the buffer must either hold the entire input
- data, or have at least 1<<(windowBits+1) bytes and be writable. If next_in
- is null, the library will allocate its own history buffer (and leave next_in
- null). next_out need not be provided here but must be provided by the
- application for the next call of deflate().
-
- If the history buffer is provided by the application, next_in must
- must never be changed by the application since the compressor maintains
- information inside this buffer from call to call; the application
- must provide more input only by increasing avail_in. next_in is always
- reset by the library in this case.
-
- deflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was
- not enough memory, Z_STREAM_ERROR if a parameter is invalid (such as
- an invalid method). msg is set to null if there is no error message.
- deflateInit2 does not perform any compression: this will be done by
- deflate().
-*/
-
-extern int EXPORT deflateSetDictionary OF((z_streamp strm,
- const Bytef *dictionary,
- uInt dictLength));
-/*
- Initializes the compression dictionary (history buffer) from the given
- byte sequence without producing any compressed output. This function must
- be called immediately after deflateInit or deflateInit2, before any call
- of deflate. The compressor and decompressor must use exactly the same
- dictionary (see inflateSetDictionary).
- The dictionary should consist of strings (byte sequences) that are likely
- to be encountered later in the data to be compressed, with the most commonly
- used strings preferably put towards the end of the dictionary. Using a
- dictionary is most useful when the data to be compressed is short and
- can be predicted with good accuracy; the data can then be compressed better
- than with the default empty dictionary. In this version of the library,
- only the last 32K bytes of the dictionary are used.
- Upon return of this function, strm->adler is set to the Adler32 value
- of the dictionary; the decompressor may later use this value to determine
- which dictionary has been used by the compressor. (The Adler32 value
- applies to the whole dictionary even if only a subset of the dictionary is
- actually used by the compressor.)
-
- deflateSetDictionary returns Z_OK if success, or Z_STREAM_ERROR if a
- parameter is invalid (such as NULL dictionary) or the stream state
- is inconsistent (for example if deflate has already been called for this
- stream). deflateSetDictionary does not perform any compression: this will
- be done by deflate().
-*/
-
-extern int EXPORT deflateCopy OF((z_streamp dest,
- z_streamp source));
-/*
- Sets the destination stream as a complete copy of the source stream. If
- the source stream is using an application-supplied history buffer, a new
- buffer is allocated for the destination stream. The compressed output
- buffer is always application-supplied. It's the responsibility of the
- application to provide the correct values of next_out and avail_out for the
- next call of deflate.
-
- This function can be useful when several compression strategies will be
- tried, for example when there are several ways of pre-processing the input
- data with a filter. The streams that will be discarded should then be freed
- by calling deflateEnd. Note that deflateCopy duplicates the internal
- compression state which can be quite large, so this strategy is slow and
- can consume lots of memory.
-
- deflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not
- enough memory, Z_STREAM_ERROR if the source stream state was inconsistent
- (such as zalloc being NULL). msg is left unchanged in both source and
- destination.
-*/
-
-extern int EXPORT deflateReset OF((z_streamp strm));
-/*
- This function is equivalent to deflateEnd followed by deflateInit,
- but does not free and reallocate all the internal compression state.
- The stream will keep the same compression level and any other attributes
- that may have been set by deflateInit2.
-
- deflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
- stream state was inconsistent (such as zalloc or state being NULL).
-*/
-
-extern int EXPORT deflateParams OF((z_streamp strm, int level, int strategy));
-/*
- Dynamically update the compression level and compression strategy.
- This can be used to switch between compression and straight copy of
- the input data, or to switch to a different kind of input data requiring
- a different strategy. If the compression level is changed, the input
- available so far is compressed with the old level (and may be flushed);
- the new level will take effect only at the next call of deflate().
-
- Before the call of deflateParams, the stream state must be set as for
- a call of deflate(), since the currently available input may have to
- be compressed and flushed. In particular, strm->avail_out must be non-zero.
-
- deflateParams returns Z_OK if success, Z_STREAM_ERROR if the source
- stream state was inconsistent or if a parameter was invalid, Z_BUF_ERROR
- if strm->avail_out was zero.
-*/
-
-extern int EXPORT deflateOutputPending OF((z_streamp strm));
-/*
- Returns the number of bytes of output which are immediately
- available from the compressor (i.e. without any further input
- or flush).
-*/
-
-/*
-extern int EXPORT inflateInit2 OF((z_streamp strm,
- int windowBits));
-
- This is another version of inflateInit with more compression options. The
- fields next_out, zalloc, zfree and opaque must be initialized before by
- the caller.
-
- The windowBits parameter is the base two logarithm of the maximum window
- size (the size of the history buffer). It should be in the range 8..15 for
- this version of the library (the value 16 will be allowed soon). The
- default value is 15 if inflateInit is used instead. If a compressed stream
- with a larger window size is given as input, inflate() will return with
- the error code Z_DATA_ERROR instead of trying to allocate a larger window.
-
- If next_out is not null, the library will use this buffer for the history
- buffer; the buffer must either be large enough to hold the entire output
- data, or have at least 1<<windowBits bytes. If next_out is null, the
- library will allocate its own buffer (and leave next_out null). next_in
- need not be provided here but must be provided by the application for the
- next call of inflate().
-
- If the history buffer is provided by the application, next_out must
- never be changed by the application since the decompressor maintains
- history information inside this buffer from call to call; the application
- can only reset next_out to the beginning of the history buffer when
- avail_out is zero and all output has been consumed.
-
- inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was
- not enough memory, Z_STREAM_ERROR if a parameter is invalid (such as
- windowBits < 8). msg is set to null if there is no error message.
- inflateInit2 does not perform any decompression: this will be done by
- inflate().
-*/
-
-extern int EXPORT inflateSetDictionary OF((z_streamp strm,
- const Bytef *dictionary,
- uInt dictLength));
-/*
- Initializes the decompression dictionary (history buffer) from the given
- uncompressed byte sequence. This function must be called immediately after
- a call of inflate if this call returned Z_NEED_DICT. The dictionary chosen
- by the compressor can be determined from the Adler32 value returned by this
- call of inflate. The compressor and decompressor must use exactly the same
- dictionary (see deflateSetDictionary).
-
- inflateSetDictionary returns Z_OK if success, Z_STREAM_ERROR if a
- parameter is invalid (such as NULL dictionary) or the stream state is
- inconsistent, Z_DATA_ERROR if the given dictionary doesn't match the
- expected one (incorrect Adler32 value). inflateSetDictionary does not
- perform any decompression: this will be done by subsequent calls of
- inflate().
-*/
-
-extern int EXPORT inflateSync OF((z_streamp strm));
-/*
- Skips invalid compressed data until the special marker (see deflate()
- above) can be found, or until all available input is skipped. No output
- is provided.
-
- inflateSync returns Z_OK if the special marker has been found, Z_BUF_ERROR
- if no more input was provided, Z_DATA_ERROR if no marker has been found,
- or Z_STREAM_ERROR if the stream structure was inconsistent. In the success
- case, the application may save the current current value of total_in which
- indicates where valid compressed data was found. In the error case, the
- application may repeatedly call inflateSync, providing more input each time,
- until success or end of the input data.
-*/
-
-extern int EXPORT inflateReset OF((z_streamp strm));
-/*
- This function is equivalent to inflateEnd followed by inflateInit,
- but does not free and reallocate all the internal decompression state.
- The stream will keep attributes that may have been set by inflateInit2.
-
- inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
- stream state was inconsistent (such as zalloc or state being NULL).
-*/
-
-extern int inflateIncomp OF((z_stream *strm));
-/*
- This function adds the data at next_in (avail_in bytes) to the output
- history without performing any output. There must be no pending output,
- and the decompressor must be expecting to see the start of a block.
- Calling this function is equivalent to decompressing a stored block
- containing the data at next_in (except that the data is not output).
-*/
-
- /* utility functions */
-
-/*
- The following utility functions are implemented on top of the
- basic stream-oriented functions. To simplify the interface, some
- default options are assumed (compression level, window size,
- standard memory allocation functions). The source code of these
- utility functions can easily be modified if you need special options.
-*/
-
-extern int EXPORT compress OF((Bytef *dest, uLongf *destLen,
- const Bytef *source, uLong sourceLen));
-/*
- Compresses the source buffer into the destination buffer. sourceLen is
- the byte length of the source buffer. Upon entry, destLen is the total
- size of the destination buffer, which must be at least 0.1% larger than
- sourceLen plus 12 bytes. Upon exit, destLen is the actual size of the
- compressed buffer.
- This function can be used to compress a whole file at once if the
- input file is mmap'ed.
- compress returns Z_OK if success, Z_MEM_ERROR if there was not
- enough memory, Z_BUF_ERROR if there was not enough room in the output
- buffer.
-*/
-
-extern int EXPORT uncompress OF((Bytef *dest, uLongf *destLen,
- const Bytef *source, uLong sourceLen));
-/*
- Decompresses the source buffer into the destination buffer. sourceLen is
- the byte length of the source buffer. Upon entry, destLen is the total
- size of the destination buffer, which must be large enough to hold the
- entire uncompressed data. (The size of the uncompressed data must have
- been saved previously by the compressor and transmitted to the decompressor
- by some mechanism outside the scope of this compression library.)
- Upon exit, destLen is the actual size of the compressed buffer.
- This function can be used to decompress a whole file at once if the
- input file is mmap'ed.
-
- uncompress returns Z_OK if success, Z_MEM_ERROR if there was not
- enough memory, Z_BUF_ERROR if there was not enough room in the output
- buffer, or Z_DATA_ERROR if the input data was corrupted.
-*/
-
-
-typedef voidp gzFile;
-
-extern gzFile EXPORT gzopen OF((const char *path, const char *mode));
-/*
- Opens a gzip (.gz) file for reading or writing. The mode parameter
- is as in fopen ("rb" or "wb") but can also include a compression level
- ("wb9"). gzopen can be used to read a file which is not in gzip format;
- in this case gzread will directly read from the file without decompression.
- gzopen returns NULL if the file could not be opened or if there was
- insufficient memory to allocate the (de)compression state; errno
- can be checked to distinguish the two cases (if errno is zero, the
- zlib error is Z_MEM_ERROR).
-*/
-
-extern gzFile EXPORT gzdopen OF((int fd, const char *mode));
-/*
- gzdopen() associates a gzFile with the file descriptor fd. File
- descriptors are obtained from calls like open, dup, creat, pipe or
- fileno (in the file has been previously opened with fopen).
- The mode parameter is as in gzopen.
- The next call of gzclose on the returned gzFile will also close the
- file descriptor fd, just like fclose(fdopen(fd), mode) closes the file
- descriptor fd. If you want to keep fd open, use gzdopen(dup(fd), mode).
- gzdopen returns NULL if there was insufficient memory to allocate
- the (de)compression state.
-*/
-
-extern int EXPORT gzread OF((gzFile file, voidp buf, unsigned len));
-/*
- Reads the given number of uncompressed bytes from the compressed file.
- If the input file was not in gzip format, gzread copies the given number
- of bytes into the buffer.
- gzread returns the number of uncompressed bytes actually read (0 for
- end of file, -1 for error). */
-
-extern int EXPORT gzwrite OF((gzFile file, const voidp buf, unsigned len));
-/*
- Writes the given number of uncompressed bytes into the compressed file.
- gzwrite returns the number of uncompressed bytes actually written
- (0 in case of error).
-*/
-
-extern int EXPORT gzflush OF((gzFile file, int flush));
-/*
- Flushes all pending output into the compressed file. The parameter
- flush is as in the deflate() function. The return value is the zlib
- error number (see function gzerror below). gzflush returns Z_OK if
- the flush parameter is Z_FINISH and all output could be flushed.
- gzflush should be called only when strictly necessary because it can
- degrade compression.
-*/
-
-extern int EXPORT gzclose OF((gzFile file));
-/*
- Flushes all pending output if necessary, closes the compressed file
- and deallocates all the (de)compression state. The return value is the zlib
- error number (see function gzerror below).
-*/
-
-extern const char * EXPORT gzerror OF((gzFile file, int *errnum));
-/*
- Returns the error message for the last error which occurred on the
- given compressed file. errnum is set to zlib error number. If an
- error occurred in the file system and not in the compression library,
- errnum is set to Z_ERRNO and the application may consult errno
- to get the exact error code.
-*/
-
- /* checksum functions */
-
-/*
- These functions are not related to compression but are exported
- anyway because they might be useful in applications using the
- compression library.
-*/
-
-extern uLong EXPORT adler32 OF((uLong adler, const Bytef *buf, uInt len));
-
-/*
- Update a running Adler-32 checksum with the bytes buf[0..len-1] and
- return the updated checksum. If buf is NULL, this function returns
- the required initial value for the checksum.
- An Adler-32 checksum is almost as reliable as a CRC32 but can be computed
- much faster. Usage example:
-
- uLong adler = adler32(0L, Z_NULL, 0);
-
- while (read_buffer(buffer, length) != EOF) {
- adler = adler32(adler, buffer, length);
- }
- if (adler != original_adler) error();
-*/
-
-extern uLong EXPORT crc32 OF((uLong crc, const Bytef *buf, uInt len));
-/*
- Update a running crc with the bytes buf[0..len-1] and return the updated
- crc. If buf is NULL, this function returns the required initial value
- for the crc. Pre- and post-conditioning (one's complement) is performed
- within this function so it shouldn't be done by the application.
- Usage example:
-
- uLong crc = crc32(0L, Z_NULL, 0);
-
- while (read_buffer(buffer, length) != EOF) {
- crc = crc32(crc, buffer, length);
- }
- if (crc != original_crc) error();
-*/
-
-
- /* various hacks, don't look :) */
-
-/* deflateInit and inflateInit are macros to allow checking the zlib version
- * and the compiler's view of z_stream:
- */
-extern int EXPORT deflateInit_ OF((z_streamp strm, int level,
- const char *version, int stream_size));
-extern int EXPORT inflateInit_ OF((z_streamp strm,
- const char *version, int stream_size));
-extern int EXPORT deflateInit2_ OF((z_streamp strm, int level, int method,
- int windowBits, int memLevel, int strategy,
- const char *version, int stream_size));
-extern int EXPORT inflateInit2_ OF((z_streamp strm, int windowBits,
- const char *version, int stream_size));
-#define deflateInit(strm, level) \
- deflateInit_((strm), (level), ZLIB_VERSION, sizeof(z_stream))
-#define inflateInit(strm) \
- inflateInit_((strm), ZLIB_VERSION, sizeof(z_stream))
-#define deflateInit2(strm, level, method, windowBits, memLevel, strategy) \
- deflateInit2_((strm),(level),(method),(windowBits),(memLevel),\
- (strategy), ZLIB_VERSION, sizeof(z_stream))
-#define inflateInit2(strm, windowBits) \
- inflateInit2_((strm), (windowBits), ZLIB_VERSION, sizeof(z_stream))
-
-#if !defined(_Z_UTIL_H) && !defined(NO_DUMMY_DECL)
- struct internal_state {int dummy;}; /* hack for buggy compilers */
-#endif
-
-uLongf *get_crc_table OF((void)); /* can be used by asm versions of crc32() */
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _ZLIB_H */
-/* --- zlib.h */
If unsure, say Y.
+CONFIG_PNPBIOS
+ Linux uses the PNPBIOS as defined in "Plug and Play BIOS
+ Specification Version 1.0A May 5, 1994" to autodetect built-in
+ mainboard resources (e.g. parallel port resources).
+
+ Other features (e.g. change resources, ESCD, event notification,
+ Docking station information, ISAPNP services) are not used.
+
+ Note: ACPI is expected to supersede PNPBIOS some day, currently it
+ co-exists nicely.
+
+ See latest pcmcia-cs (stand-alone package) for a nice "lspnp" tools,
+ or have a look at /proc/bus/pnp.
+
+ If unsure, say Y.
+
dep_tristate ' ISA Plug and Play support' CONFIG_ISAPNP $CONFIG_PNP
+if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
+ dep_bool ' PNPBIOS support (EXPERIMENTAL)' CONFIG_PNPBIOS $CONFIG_PNP
+fi
+
endmenu
O_TARGET := pnp.o
-export-objs := isapnp.o
-list-multi := isa-pnp.o
+export-objs := isapnp.o pnpbios_core.o
+multi-objs := isa-pnp.o pnpbios.o
-proc-$(CONFIG_PROC_FS) = isapnp_proc.o
-isa-pnp-objs := isapnp.o quirks.o $(proc-y)
+isa-pnp-proc-$(CONFIG_PROC_FS) = isapnp_proc.o
+pnpbios-proc-$(CONFIG_PROC_FS) = pnpbios_proc.o
+
+isa-pnp-objs := isapnp.o quirks.o $(isa-pnp-proc-y)
+pnpbios-objs := pnpbios_core.o $(pnpbios-proc-y)
obj-$(CONFIG_ISAPNP) += isa-pnp.o
+obj-$(CONFIG_PNPBIOS) += pnpbios.o
include $(TOPDIR)/Rules.make
isa-pnp.o: $(isa-pnp-objs)
$(LD) $(LD_RFLAG) -r -o $@ $(isa-pnp-objs)
+
+pnpbios.o: $(pnpbios-objs)
+ $(LD) $(LD_RFLAG) -r -o $@ $(pnpbios-objs)
case _STAG_END:
if (size > 0)
isapnp_skip_bytes(size);
+ isapnp_config_prepare(dev);
return 1;
default:
printk(KERN_ERR "isapnp: unexpected or unknown tag type 0x%x for logical device %i (device %i), ignored\n", type, dev->devfn, card->number);
--- /dev/null
+/*
+ * PnP BIOS services
+ *
+ * Originally (C) 1998 Christian Schmidt <schmidt@digadd.de>
+ * Modifications (c) 1998 Tom Lees <tom@lpsg.demon.co.uk>
+ * Minor reorganizations by David Hinds <dahinds@users.sourceforge.net>
+ * Modifications (c) 2001 by Thomas Hood <jdthood@mail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * References:
+ * Compaq Computer Corporation, Phoenix Technologies Ltd., Intel Corporation
+ * Plug and Play BIOS Specification, Version 1.0A, May 5, 1994
+ * Plug and Play BIOS Clarification Paper, October 6, 1994
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <linux/kernel.h>
+#include <linux/pnpbios.h>
+#include <asm/page.h>
+#include <asm/system.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <asm/desc.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/kmod.h>
+#include <linux/completion.h>
+#include <linux/spinlock.h>
+#include <asm/system.h>
+
+
+/*
+ *
+ * PnP BIOS INTERFACE
+ *
+ */
+
+/* PnP BIOS signature: "$PnP" */
+#define PNP_SIGNATURE (('$' << 0) + ('P' << 8) + ('n' << 16) + ('P' << 24))
+
+#pragma pack(1)
+union pnp_bios_expansion_header {
+ struct {
+ u32 signature; /* "$PnP" */
+ u8 version; /* in BCD */
+ u8 length; /* length in bytes, currently 21h */
+ u16 control; /* system capabilities */
+ u8 checksum; /* all bytes must add up to 0 */
+
+ u32 eventflag; /* phys. address of the event flag */
+ u16 rmoffset; /* real mode entry point */
+ u16 rmcseg;
+ u16 pm16offset; /* 16 bit protected mode entry */
+ u32 pm16cseg;
+ u32 deviceID; /* EISA encoded system ID or 0 */
+ u16 rmdseg; /* real mode data segment */
+ u32 pm16dseg; /* 16 bit pm data segment base */
+ } fields;
+ char chars[0x21]; /* To calculate the checksum */
+};
+#pragma pack()
+
+static struct {
+ u16 offset;
+ u16 segment;
+} pnp_bios_callpoint;
+
+static union pnp_bios_expansion_header * pnp_bios_hdr = NULL;
+
+/* The PnP BIOS entries in the GDT */
+#define PNP_GDT (0x0060)
+#define PNP_CS32 (PNP_GDT+0x00) /* segment for calling fn */
+#define PNP_CS16 (PNP_GDT+0x08) /* code segment for BIOS */
+#define PNP_DS (PNP_GDT+0x10) /* data segment for BIOS */
+#define PNP_TS1 (PNP_GDT+0x18) /* transfer data segment */
+#define PNP_TS2 (PNP_GDT+0x20) /* another data segment */
+
+/*
+ * These are some opcodes for a "static asmlinkage"
+ * As this code is *not* executed inside the linux kernel segment, but in a
+ * alias at offset 0, we need a far return that can not be compiled by
+ * default (please, prove me wrong! this is *really* ugly!)
+ * This is the only way to get the bios to return into the kernel code,
+ * because the bios code runs in 16 bit protected mode and therefore can only
+ * return to the caller if the call is within the first 64kB, and the linux
+ * kernel begins at offset 3GB...
+ */
+
+asmlinkage void pnp_bios_callfunc(void);
+
+__asm__(
+ ".text \n"
+ __ALIGN_STR "\n"
+ SYMBOL_NAME_STR(pnp_bios_callfunc) ":\n"
+ " pushl %edx \n"
+ " pushl %ecx \n"
+ " pushl %ebx \n"
+ " pushl %eax \n"
+ " lcallw " SYMBOL_NAME_STR(pnp_bios_callpoint) "\n"
+ " addl $16, %esp \n"
+ " lret \n"
+ ".previous \n"
+);
+
+#define Q_SET_SEL(selname, address, size) \
+set_base (gdt [(selname) >> 3], __va((u32)(address))); \
+set_limit (gdt [(selname) >> 3], size)
+
+#define Q2_SET_SEL(selname, address, size) \
+set_base (gdt [(selname) >> 3], (u32)(address)); \
+set_limit (gdt [(selname) >> 3], size)
+
+/*
+ * At some point we want to use this stack frame pointer to unwind
+ * after PnP BIOS oopses.
+ */
+
+u32 pnp_bios_fault_esp;
+u32 pnp_bios_fault_eip;
+u32 pnp_bios_is_utter_crap = 0;
+
+static spinlock_t pnp_bios_lock;
+
+static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
+ u16 arg4, u16 arg5, u16 arg6, u16 arg7)
+{
+ unsigned long flags;
+ u16 status;
+
+ /*
+ * PnP BIOSes are generally not terribly re-entrant.
+ * Also, don't rely on them to save everything correctly.
+ */
+ if(pnp_bios_is_utter_crap)
+ return PNP_FUNCTION_NOT_SUPPORTED;
+
+ /* On some boxes IRQ's during PnP BIOS calls are deadly. */
+ spin_lock_irqsave(&pnp_bios_lock, flags);
+ __cli();
+ __asm__ __volatile__(
+ "pushl %%ebp\n\t"
+ "pushl %%edi\n\t"
+ "pushl %%esi\n\t"
+ "pushl %%ds\n\t"
+ "pushl %%es\n\t"
+ "pushl %%fs\n\t"
+ "pushl %%gs\n\t"
+ "pushfl\n\t"
+ "movl %%esp, pnp_bios_fault_esp\n\t"
+ "movl $1f, pnp_bios_fault_eip\n\t"
+ "lcall %5,%6\n\t"
+ "1:popfl\n\t"
+ "popl %%gs\n\t"
+ "popl %%fs\n\t"
+ "popl %%es\n\t"
+ "popl %%ds\n\t"
+ "popl %%esi\n\t"
+ "popl %%edi\n\t"
+ "popl %%ebp\n\t"
+ : "=a" (status)
+ : "0" ((func) | (((u32)arg1) << 16)),
+ "b" ((arg2) | (((u32)arg3) << 16)),
+ "c" ((arg4) | (((u32)arg5) << 16)),
+ "d" ((arg6) | (((u32)arg7) << 16)),
+ "i" (PNP_CS32),
+ "i" (0)
+ : "memory"
+ );
+ spin_unlock_irqrestore(&pnp_bios_lock, flags);
+
+ /* If we get here and this is set then the PnP BIOS faulted on us. */
+ if(pnp_bios_is_utter_crap)
+ {
+ printk(KERN_ERR "PnPBIOS: Warning! Your PnP BIOS caused a fatal error. Attempting to continue.\n");
+ printk(KERN_ERR "PnPBIOS: You may need to reboot with the \"nobiospnp\" option to operate stably.\n");
+ printk(KERN_ERR "PnPBIOS: Check with your vendor for an updated BIOS\n");
+ }
+
+ return status;
+}
+
+
+/*
+ *
+ * UTILITY FUNCTIONS
+ *
+ */
+
+void *pnpbios_kmalloc(size_t size, int f)
+{
+ void *p = kmalloc( size, f );
+ if ( p == NULL )
+ printk(KERN_ERR "PnPBIOS: kmalloc() failed.\n");
+ return p;
+}
+
+/*
+ * Call this only after init time
+ */
+static int pnp_bios_present(void)
+{
+ return (pnp_bios_hdr != NULL);
+}
+
+/* Forward declaration */
+static void update_devlist( u8 nodenum, struct pnp_bios_node *data );
+
+
+/*
+ *
+ * PnP BIOS ACCESS FUNCTIONS
+ *
+ */
+
+#define PNP_GET_NUM_SYS_DEV_NODES 0x00
+#define PNP_GET_SYS_DEV_NODE 0x01
+#define PNP_SET_SYS_DEV_NODE 0x02
+#define PNP_GET_EVENT 0x03
+#define PNP_SEND_MESSAGE 0x04
+#define PNP_GET_DOCKING_STATION_INFORMATION 0x05
+#define PNP_SET_STATIC_ALLOCED_RES_INFO 0x09
+#define PNP_GET_STATIC_ALLOCED_RES_INFO 0x0a
+#define PNP_GET_APM_ID_TABLE 0x0b
+#define PNP_GET_PNP_ISA_CONFIG_STRUC 0x40
+#define PNP_GET_ESCD_INFO 0x41
+#define PNP_READ_ESCD 0x42
+#define PNP_WRITE_ESCD 0x43
+
+/*
+ * Call PnP BIOS with function 0x00, "get number of system device nodes"
+ */
+static int __pnp_bios_dev_node_info(struct pnp_dev_node_info *data)
+{
+ u16 status;
+ if (!pnp_bios_present ())
+ return PNP_FUNCTION_NOT_SUPPORTED;
+ Q2_SET_SEL(PNP_TS1, data, sizeof(struct pnp_dev_node_info));
+ status = call_pnp_bios(PNP_GET_NUM_SYS_DEV_NODES, 0, PNP_TS1, 2, PNP_TS1, PNP_DS, 0, 0);
+ data->no_nodes &= 0xff;
+ return status;
+}
+
+int pnp_bios_dev_node_info(struct pnp_dev_node_info *data)
+{
+ int status = __pnp_bios_dev_node_info( data );
+ if ( status )
+ printk(KERN_WARNING "PnPBIOS: dev_node_info: Unexpected status 0x%x\n", status);
+ return status;
+}
+
+/*
+ * Note that some PnP BIOSes (e.g., on Sony Vaio laptops) die a horrible
+ * death if they are asked to access the "current" configuration.
+ * Therefore, if it's a matter of indifference, it's better to call
+ * get_dev_node() and set_dev_node() with boot=1 rather than with boot=0.
+ */
+
+/*
+ * Call PnP BIOS with function 0x01, "get system device node"
+ * Input: *nodenum = desired node,
+ * boot = whether to get nonvolatile boot (!=0)
+ * or volatile current (0) config
+ * Output: *nodenum=next node or 0xff if no more nodes
+ */
+static int __pnp_bios_get_dev_node(u8 *nodenum, char boot, struct pnp_bios_node *data)
+{
+ u16 status;
+ if (!pnp_bios_present ())
+ return PNP_FUNCTION_NOT_SUPPORTED;
+ if ( !boot & pnpbios_dont_use_current_config )
+ return PNP_FUNCTION_NOT_SUPPORTED;
+ Q2_SET_SEL(PNP_TS1, nodenum, sizeof(char));
+ Q2_SET_SEL(PNP_TS2, data, 64 * 1024);
+ status = call_pnp_bios(PNP_GET_SYS_DEV_NODE, 0, PNP_TS1, 0, PNP_TS2, boot ? 2 : 1, PNP_DS, 0);
+ return status;
+}
+
+int pnp_bios_get_dev_node(u8 *nodenum, char boot, struct pnp_bios_node *data)
+{
+ int status;
+ status = __pnp_bios_get_dev_node( nodenum, boot, data );
+ if ( status )
+ printk(KERN_WARNING "PnPBIOS: get_dev_node: Unexpected 0x%x\n", status);
+ return status;
+}
+
+
+/*
+ * Call PnP BIOS with function 0x02, "set system device node"
+ * Input: *nodenum = desired node,
+ * boot = whether to set nonvolatile boot (!=0)
+ * or volatile current (0) config
+ */
+static int __pnp_bios_set_dev_node(u8 nodenum, char boot, struct pnp_bios_node *data)
+{
+ u16 status;
+ if (!pnp_bios_present ())
+ return PNP_FUNCTION_NOT_SUPPORTED;
+ if ( !boot & pnpbios_dont_use_current_config )
+ return PNP_FUNCTION_NOT_SUPPORTED;
+ Q2_SET_SEL(PNP_TS1, data, /* *((u16 *) data)*/ 65536);
+ status = call_pnp_bios(PNP_SET_SYS_DEV_NODE, nodenum, 0, PNP_TS1, boot ? 2 : 1, PNP_DS, 0, 0);
+ return status;
+}
+
+int pnp_bios_set_dev_node(u8 nodenum, char boot, struct pnp_bios_node *data)
+{
+ int status;
+ status = __pnp_bios_set_dev_node( nodenum, boot, data );
+ if ( status ) {
+ printk(KERN_WARNING "PnPBIOS: set_dev_node: Unexpected set_dev_node status 0x%x\n", status);
+ return status;
+ }
+ if ( !boot ) {
+ /* Update devlist */
+ u8 thisnodenum = nodenum;
+ status = __pnp_bios_get_dev_node( &nodenum, boot, data );
+ if ( status ) {
+ printk(KERN_WARNING "PnPBIOS: set_dev_node: Unexpected get_dev_node status 0x%x\n", status);
+ return status;
+ }
+ update_devlist( thisnodenum, data );
+ }
+ return status;
+}
+
+#if needed
+/*
+ * Call PnP BIOS with function 0x03, "get event"
+ */
+static int pnp_bios_get_event(u16 *event)
+{
+ u16 status;
+ if (!pnp_bios_present ())
+ return PNP_FUNCTION_NOT_SUPPORTED;
+ Q2_SET_SEL(PNP_TS1, event, sizeof(u16));
+ status = call_pnp_bios(PNP_GET_EVENT, 0, PNP_TS1, PNP_DS, 0, 0 ,0 ,0);
+ return status;
+}
+#endif
+
+#if needed
+/*
+ * Call PnP BIOS with function 0x04, "send message"
+ */
+static int pnp_bios_send_message(u16 message)
+{
+ u16 status;
+ if (!pnp_bios_present ())
+ return PNP_FUNCTION_NOT_SUPPORTED;
+ status = call_pnp_bios(PNP_SEND_MESSAGE, message, PNP_DS, 0, 0, 0, 0, 0);
+ return status;
+}
+#endif
+
+#ifdef CONFIG_HOTPLUG
+/*
+ * Call PnP BIOS with function 0x05, "get docking station information"
+ */
+static int pnp_bios_dock_station_info(struct pnp_docking_station_info *data)
+{
+ u16 status;
+ if (!pnp_bios_present ())
+ return PNP_FUNCTION_NOT_SUPPORTED;
+ Q2_SET_SEL(PNP_TS1, data, sizeof(struct pnp_docking_station_info));
+ status = call_pnp_bios(PNP_GET_DOCKING_STATION_INFORMATION, 0, PNP_TS1, PNP_DS, 0, 0, 0, 0);
+ return status;
+}
+#endif
+
+#if needed
+/*
+ * Call PnP BIOS with function 0x09, "set statically allocated resource
+ * information"
+ */
+static int pnp_bios_set_stat_res(char *info)
+{
+ u16 status;
+ if (!pnp_bios_present ())
+ return PNP_FUNCTION_NOT_SUPPORTED;
+ Q2_SET_SEL(PNP_TS1, info, *((u16 *) info));
+ status = call_pnp_bios(PNP_SET_STATIC_ALLOCED_RES_INFO, 0, PNP_TS1, PNP_DS, 0, 0, 0, 0);
+ return status;
+}
+#endif
+
+#if needed
+/*
+ * Call PnP BIOS with function 0x0a, "get statically allocated resource
+ * information"
+ */
+static int pnp_bios_get_stat_res(char *info)
+{
+ u16 status;
+ if (!pnp_bios_present ())
+ return PNP_FUNCTION_NOT_SUPPORTED;
+ Q2_SET_SEL(PNP_TS1, info, 64 * 1024);
+ status = call_pnp_bios(PNP_GET_STATIC_ALLOCED_RES_INFO, 0, PNP_TS1, PNP_DS, 0, 0, 0, 0);
+ return status;
+}
+#endif
+
+#if needed
+/*
+ * Call PnP BIOS with function 0x0b, "get APM id table"
+ */
+static int pnp_bios_apm_id_table(char *table, u16 *size)
+{
+ u16 status;
+ if (!pnp_bios_present ())
+ return PNP_FUNCTION_NOT_SUPPORTED;
+ Q2_SET_SEL(PNP_TS1, table, *size);
+ Q2_SET_SEL(PNP_TS2, size, sizeof(u16));
+ status = call_pnp_bios(PNP_GET_APM_ID_TABLE, 0, PNP_TS2, 0, PNP_TS1, PNP_DS, 0, 0);
+ return status;
+}
+#endif
+
+#if needed
+/*
+ * Call PnP BIOS with function 0x40, "get isa pnp configuration structure"
+ */
+static int pnp_bios_isapnp_config(struct pnp_isa_config_struc *data)
+{
+ u16 status;
+ if (!pnp_bios_present ())
+ return PNP_FUNCTION_NOT_SUPPORTED;
+ Q2_SET_SEL(PNP_TS1, data, sizeof(struct pnp_isa_config_struc));
+ status = call_pnp_bios(PNP_GET_PNP_ISA_CONFIG_STRUC, 0, PNP_TS1, PNP_DS, 0, 0, 0, 0);
+ return status;
+}
+#endif
+
+#if needed
+/*
+ * Call PnP BIOS with function 0x41, "get ESCD info"
+ */
+static int pnp_bios_escd_info(struct escd_info_struc *data)
+{
+ u16 status;
+ if (!pnp_bios_present ())
+ return ESCD_FUNCTION_NOT_SUPPORTED;
+ Q2_SET_SEL(PNP_TS1, data, sizeof(struct escd_info_struc));
+ status = call_pnp_bios(PNP_GET_ESCD_INFO, 0, PNP_TS1, 2, PNP_TS1, 4, PNP_TS1, PNP_DS);
+ return status;
+}
+#endif
+
+#if needed
+/*
+ * Call PnP BIOS function 0x42, "read ESCD"
+ * nvram_base is determined by calling escd_info
+ */
+static int pnp_bios_read_escd(char *data, u32 nvram_base)
+{
+ u16 status;
+ if (!pnp_bios_present ())
+ return ESCD_FUNCTION_NOT_SUPPORTED;
+ Q2_SET_SEL(PNP_TS1, data, 64 * 1024);
+ set_base(gdt[PNP_TS2 >> 3], nvram_base);
+ set_limit(gdt[PNP_TS2 >> 3], 64 * 1024);
+ status = call_pnp_bios(PNP_READ_ESCD, 0, PNP_TS1, PNP_TS2, PNP_DS, 0, 0, 0);
+ return status;
+}
+#endif
+
+#if needed
+/*
+ * Call PnP BIOS function 0x43, "write ESCD"
+ */
+static int pnp_bios_write_escd(char *data, u32 nvram_base)
+{
+ u16 status;
+ if (!pnp_bios_present ())
+ return ESCD_FUNCTION_NOT_SUPPORTED;
+ Q2_SET_SEL(PNP_TS1, data, 64 * 1024);
+ set_base(gdt[PNP_TS2 >> 3], nvram_base);
+ set_limit(gdt[PNP_TS2 >> 3], 64 * 1024);
+ status = call_pnp_bios(PNP_WRITE_ESCD, 0, PNP_TS1, PNP_TS2, PNP_DS, 0, 0, 0);
+ return status;
+}
+#endif
+
+
+/*
+ *
+ * DOCKING FUNCTIONS
+ *
+ */
+
+#ifdef CONFIG_HOTPLUG
+
+static int unloading = 0;
+static struct completion unload_sem;
+
+/*
+ * (Much of this belongs in a shared routine somewhere)
+ */
+
+static int pnp_dock_event(int dock, struct pnp_docking_station_info *info)
+{
+ char *argv [3], **envp, *buf, *scratch;
+ int i = 0, value;
+
+ if (!hotplug_path [0])
+ return -ENOENT;
+ if (!current->fs->root) {
+ return -EAGAIN;
+ }
+ if (!(envp = (char **) pnpbios_kmalloc (20 * sizeof (char *), GFP_KERNEL))) {
+ return -ENOMEM;
+ }
+ if (!(buf = pnpbios_kmalloc (256, GFP_KERNEL))) {
+ kfree (envp);
+ return -ENOMEM;
+ }
+
+ /* only one standardized param to hotplug command: type */
+ argv [0] = hotplug_path;
+ argv [1] = "dock";
+ argv [2] = 0;
+
+ /* minimal command environment */
+ envp [i++] = "HOME=/";
+ envp [i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
+
+#ifdef DEBUG
+ /* hint that policy agent should enter no-stdout debug mode */
+ envp [i++] = "DEBUG=kernel";
+#endif
+ /* extensible set of named bus-specific parameters,
+ * supporting multiple driver selection algorithms.
+ */
+ scratch = buf;
+
+ /* action: add, remove */
+ envp [i++] = scratch;
+ scratch += sprintf (scratch, "ACTION=%s", dock?"add":"remove") + 1;
+
+ /* Report the ident for the dock */
+ envp [i++] = scratch;
+ scratch += sprintf (scratch, "DOCK=%x/%x/%x",
+ info->location_id, info->serial, info->capabilities);
+ envp[i] = 0;
+
+ value = call_usermodehelper (argv [0], argv, envp);
+ kfree (buf);
+ kfree (envp);
+ return 0;
+}
+
+/*
+ * Poll the PnP docking at regular intervals
+ */
+static int pnp_dock_thread(void * unused)
+{
+ static struct pnp_docking_station_info now;
+ int docked = -1, d;
+ daemonize();
+ reparent_to_init();
+ strcpy(current->comm, "kpnpbios");
+ while(!unloading && !signal_pending(current))
+ {
+ int err;
+
+ /*
+ * Poll every 2 seconds
+ */
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ*2);
+ if(signal_pending(current))
+ break;
+
+ err = pnp_bios_dock_station_info(&now);
+
+ switch(err)
+ {
+ /*
+ * No dock to manage
+ */
+ case PNP_FUNCTION_NOT_SUPPORTED:
+ complete_and_exit(&unload_sem, 0);
+ case PNP_SYSTEM_NOT_DOCKED:
+ d = 0;
+ break;
+ case PNP_SUCCESS:
+ d = 1;
+ break;
+ default:
+ printk(KERN_WARNING "PnPBIOS: pnp_dock_thread: Unexpected status 0x%x returned by BIOS.\n", err);
+ continue;
+ }
+ if(d != docked)
+ {
+ if(pnp_dock_event(d, &now)==0)
+ {
+ docked = d;
+#if 0
+ printk(KERN_INFO "PnPBIOS: Docking station %stached.\n", docked?"at":"de");
+#endif
+ }
+ }
+ }
+ complete_and_exit(&unload_sem, 0);
+}
+
+#endif /* CONFIG_HOTPLUG */
+
+
+/*
+ *
+ * NODE DATA PARSING FUNCTIONS
+ *
+ */
+
+static void add_irqresource(struct pci_dev *dev, int irq)
+{
+ int i = 0;
+ while (!(dev->irq_resource[i].flags & IORESOURCE_UNSET) && i < DEVICE_COUNT_IRQ) i++;
+ if (i < DEVICE_COUNT_IRQ) {
+ dev->irq_resource[i].start = (unsigned long) irq;
+ dev->irq_resource[i].flags = IORESOURCE_IRQ; // Also clears _UNSET flag
+ }
+}
+
+static void add_dmaresource(struct pci_dev *dev, int dma)
+{
+ int i = 0;
+ while (!(dev->dma_resource[i].flags & IORESOURCE_UNSET) && i < DEVICE_COUNT_DMA) i++;
+ if (i < DEVICE_COUNT_DMA) {
+ dev->dma_resource[i].start = (unsigned long) dma;
+ dev->dma_resource[i].flags = IORESOURCE_DMA; // Also clears _UNSET flag
+ }
+}
+
+static void add_ioresource(struct pci_dev *dev, int io, int len)
+{
+ int i = 0;
+ while (!(dev->resource[i].flags & IORESOURCE_UNSET) && i < DEVICE_COUNT_RESOURCE) i++;
+ if (i < DEVICE_COUNT_RESOURCE) {
+ dev->resource[i].start = (unsigned long) io;
+ dev->resource[i].end = (unsigned long)(io + len - 1);
+ dev->resource[i].flags = IORESOURCE_IO; // Also clears _UNSET flag
+ }
+}
+
+static void add_memresource(struct pci_dev *dev, int mem, int len)
+{
+ int i = 0;
+ while (!(dev->resource[i].flags & IORESOURCE_UNSET) && i < DEVICE_COUNT_RESOURCE) i++;
+ if (i < DEVICE_COUNT_RESOURCE) {
+ dev->resource[i].start = (unsigned long) mem;
+ dev->resource[i].end = (unsigned long)(mem + len - 1);
+ dev->resource[i].flags = IORESOURCE_MEM; // Also clears _UNSET flag
+ }
+}
+
+static void node_resource_data_to_dev(struct pnp_bios_node *node, struct pci_dev *dev)
+{
+ unsigned char *p = node->data, *lastp=NULL;
+ int i;
+
+ /*
+ * First, set resource info to default values
+ */
+ for (i=0;i<DEVICE_COUNT_RESOURCE;i++) {
+ dev->resource[i].start = 0; // "disabled"
+ dev->resource[i].flags = IORESOURCE_UNSET;
+ }
+ for (i=0;i<DEVICE_COUNT_IRQ;i++) {
+ dev->irq_resource[i].start = (unsigned long)-1; // "disabled"
+ dev->irq_resource[i].flags = IORESOURCE_UNSET;
+ }
+ for (i=0;i<DEVICE_COUNT_DMA;i++) {
+ dev->dma_resource[i].start = (unsigned long)-1; // "disabled"
+ dev->dma_resource[i].flags = IORESOURCE_UNSET;
+ }
+
+ /*
+ * Fill in dev resource info
+ */
+ while ( (char *)p < ((char *)node->data + node->size )) {
+ if(p==lastp) break;
+
+ if( p[0] & 0x80 ) {// large item
+ switch (p[0] & 0x7f) {
+ case 0x01: // memory
+ {
+ int io = *(short *) &p[4];
+ int len = *(short *) &p[10];
+ add_memresource(dev, io, len);
+ break;
+ }
+ case 0x02: // device name
+ {
+ int len = *(short *) &p[1];
+ memcpy(dev->name, p + 3, len >= 80 ? 79 : len);
+ break;
+ }
+ case 0x05: // 32-bit memory
+ {
+ int io = *(int *) &p[4];
+ int len = *(int *) &p[16];
+ add_memresource(dev, io, len);
+ break;
+ }
+ case 0x06: // fixed location 32-bit memory
+ {
+ int io = *(int *) &p[4];
+ int len = *(int *) &p[8];
+ add_memresource(dev, io, len);
+ break;
+ }
+ } /* switch */
+ lastp = p+3;
+ p = p + p[1] + p[2]*256 + 3;
+ continue;
+ }
+ if ((p[0]>>3) == 0x0f) // end tag
+ break;
+ switch (p[0]>>3) {
+ case 0x04: // irq
+ {
+ int i, mask, irq = -1;
+ mask= p[1] + p[2]*256;
+ for (i=0;i<16;i++, mask=mask>>1)
+ if(mask & 0x01) irq=i;
+ add_irqresource(dev, irq);
+ break;
+ }
+ case 0x05: // dma
+ {
+ int i, mask, dma = -1;
+ mask = p[1];
+ for (i=0;i<8;i++, mask = mask>>1)
+ if(mask & 0x01) dma=i;
+ add_dmaresource(dev, dma);
+ break;
+ }
+ case 0x08: // io
+ {
+ int io= p[2] + p[3] *256;
+ int len = p[7];
+ add_ioresource(dev, io, len);
+ break;
+ }
+ case 0x09: // fixed location io
+ {
+ int io = p[1] + p[2] * 256;
+ int len = p[3];
+ add_ioresource(dev, io, len);
+ break;
+ }
+ } /* switch */
+ lastp=p+1;
+ p = p + (p[0] & 0x07) + 1;
+
+ } /* while */
+
+ return;
+}
+
+
+/*
+ *
+ * DEVICE LIST MANAGEMENT FUNCTIONS
+ *
+ *
+ * Some of these are exported to give public access
+ *
+ * Question: Why maintain a device list when the PnP BIOS can
+ * list devices for us? Answer: Some PnP BIOSes can't report
+ * the current configuration, only the boot configuration.
+ * The boot configuration can be changed, so we need to keep
+ * a record of what the configuration was when we booted;
+ * presumably it continues to describe the current config.
+ * For those BIOSes that can change the current config, we
+ * keep the information in the devlist up to date.
+ *
+ * Note that it is currently assumed that the list does not
+ * grow or shrink in size after init time, and slot_name
+ * never changes. The list is protected by a spinlock.
+ */
+
+static LIST_HEAD(pnpbios_devices);
+
+static spinlock_t pnpbios_devices_lock;
+
+static int inline insert_device(struct pci_dev *dev)
+{
+
+ /*
+ * FIXME: Check for re-add of existing node;
+ * return -1 if node already present
+ */
+
+ /* We don't lock because we only do this at init time */
+ list_add_tail(&dev->global_list, &pnpbios_devices);
+
+ return 0;
+}
+
+#define HEX(id,a) hex[((id)>>a) & 15]
+#define CHAR(id,a) (0x40 + (((id)>>a) & 31))
+//
+static void inline pnpid32_to_pnpid(u32 id, char *str)
+{
+ const char *hex = "0123456789abcdef";
+
+ id = be32_to_cpu(id);
+ str[0] = CHAR(id, 26);
+ str[1] = CHAR(id, 21);
+ str[2] = CHAR(id,16);
+ str[3] = HEX(id, 12);
+ str[4] = HEX(id, 8);
+ str[5] = HEX(id, 4);
+ str[6] = HEX(id, 0);
+ str[7] = '\0';
+
+ return;
+}
+//
+#undef CHAR
+#undef HEX
+
+/*
+ * Build a linked list of pci_devs in order of ascending node number
+ * Called only at init time.
+ */
+static void __init build_devlist(void)
+{
+ int i;
+ int nodenum;
+ int nodes_got = 0;
+ int devs = 0;
+ struct pnp_bios_node *node;
+ struct pnp_dev_node_info node_info;
+ struct pci_dev *dev;
+
+ if (!pnp_bios_present ())
+ return;
+
+ if (pnp_bios_dev_node_info(&node_info) != 0)
+ return;
+
+ node = pnpbios_kmalloc(node_info.max_node_size, GFP_KERNEL);
+ if (!node)
+ return;
+
+ for(i=0,nodenum=0; i<0xff && nodenum!=0xff; i++) {
+ int thisnodenum = nodenum;
+ /* For now we build the list from the "boot" config
+ * because asking for the "current" config causes
+ * some BIOSes to crash. */
+ if (pnp_bios_get_dev_node((u8 *)&nodenum, (char )1 , node)) {
+ printk(KERN_WARNING "PnPBIOS: PnP BIOS reported error on attempt to get dev node.\n");
+ break;
+ }
+ /* The BIOS returns with nodenum = the next node number */
+ if (nodenum < thisnodenum) {
+ printk(KERN_WARNING "PnPBIOS: Node number is out of sequence. Naughty BIOS!\n");
+ break;
+ }
+ nodes_got++;
+ dev = pnpbios_kmalloc(sizeof (struct pci_dev), GFP_KERNEL);
+ if (!dev)
+ break;
+ memset(dev,0,sizeof(struct pci_dev));
+ dev->devfn=thisnodenum;
+ memcpy(dev->name,"PNPBIOS",8);
+ pnpid32_to_pnpid(node->eisa_id,dev->slot_name);
+ node_resource_data_to_dev(node,dev);
+ if(insert_device(dev)<0)
+ kfree(dev);
+ else
+ devs++;
+ }
+ kfree(node);
+
+ printk(KERN_INFO "PnPBIOS: %i node%s reported by PnP BIOS; %i recorded by driver.\n",
+ nodes_got, nodes_got != 1 ? "s" : "", devs);
+}
+
+static struct pci_dev *find_device_by_nodenum( u8 nodenum )
+{
+ struct pci_dev *dev;
+
+ pnpbios_for_each_dev(dev) {
+ if(dev->devfn == nodenum)
+ return dev;
+ }
+
+ return NULL;
+}
+
+static void update_devlist( u8 nodenum, struct pnp_bios_node *data )
+{
+ unsigned long flags;
+ struct pci_dev *dev;
+
+ spin_lock_irqsave(&pnpbios_devices_lock, flags);
+ dev = find_device_by_nodenum( nodenum );
+ if ( dev ) {
+ node_resource_data_to_dev(data,dev);
+ }
+ spin_unlock_irqrestore(&pnpbios_devices_lock, flags);
+
+ return;
+}
+
+
+/*
+ *
+ * DRIVER REGISTRATION FUNCTIONS
+ *
+ *
+ * Exported to give public access
+ *
+ */
+
+static LIST_HEAD(pnpbios_drivers);
+
+static const struct pnpbios_device_id *
+match_device(const struct pnpbios_device_id *ids, const struct pci_dev *dev)
+{
+ while (*ids->id)
+ {
+ if(memcmp(ids->id, dev->slot_name, 7)==0)
+ return ids;
+ ids++;
+ }
+ return NULL;
+}
+
+static int announce_device(struct pnpbios_driver *drv, struct pci_dev *dev)
+{
+ const struct pnpbios_device_id *id;
+ struct pci_dev tmpdev;
+ int ret;
+
+ if (drv->id_table) {
+ id = match_device(drv->id_table, dev);
+ if (!id)
+ return 0;
+ } else
+ id = NULL;
+
+ memcpy( &tmpdev, dev, sizeof(struct pci_dev));
+ tmpdev.global_list.prev = NULL;
+ tmpdev.global_list.next = NULL;
+
+ dev_probe_lock();
+ /* Obviously, probe() should not call any pnpbios functions */
+ ret = drv->probe(&tmpdev, id);
+ dev_probe_unlock();
+ if (ret < 1)
+ return 0;
+
+ dev->driver = (void *)drv;
+
+ return 1;
+}
+
+/**
+ * pnpbios_register_driver - register a new pci driver
+ * @drv: the driver structure to register
+ *
+ * Adds the driver structure to the list of registered drivers
+ *
+ * For each device in the pnpbios device list that matches one of
+ * the ids in drv->id_table, calls the driver's "probe" function with
+ * arguments (1) a pointer to a *temporary* struct pci_dev containing
+ * resource info for the device, and (2) a pointer to the id string
+ * of the device. Expects the probe function to return 1 if the
+ * driver claims the device (otherwise 0) in which case, marks the
+ * device as having this driver.
+ *
+ * Returns the number of pci devices which were claimed by the driver
+ * during registration. The driver remains registered even if the
+ * return value is zero.
+ */
+int pnpbios_register_driver(struct pnpbios_driver *drv)
+{
+ struct pci_dev *dev;
+ unsigned long flags;
+ int count = 0;
+
+ list_add_tail(&drv->node, &pnpbios_drivers);
+ spin_lock_irqsave(&pnpbios_devices_lock, flags);
+ pnpbios_for_each_dev(dev) {
+ if (!pnpbios_dev_driver(dev))
+ count += announce_device(drv, dev);
+ }
+ spin_unlock_irqrestore(&pnpbios_devices_lock, flags);
+ return count;
+}
+
+EXPORT_SYMBOL(pnpbios_register_driver);
+
+/**
+ * pnpbios_unregister_driver - unregister a pci driver
+ * @drv: the driver structure to unregister
+ *
+ * Deletes the driver structure from the list of registered PnPBIOS
+ * drivers, gives it a chance to clean up by calling its "remove"
+ * function for each device it was responsible for, and marks those
+ * devices as driverless.
+ */
+void pnpbios_unregister_driver(struct pnpbios_driver *drv)
+{
+ unsigned long flags;
+ struct pci_dev *dev;
+
+ list_del(&drv->node);
+ spin_lock_irqsave(&pnpbios_devices_lock, flags);
+ pnpbios_for_each_dev(dev) {
+ if (dev->driver == (void *)drv) {
+ if (drv->remove)
+ drv->remove(dev);
+ dev->driver = NULL;
+ }
+ }
+ spin_unlock_irqrestore(&pnpbios_devices_lock, flags);
+}
+
+EXPORT_SYMBOL(pnpbios_unregister_driver);
+
+
+/*
+ *
+ * RESOURCE RESERVATION FUNCTIONS
+ *
+ *
+ * Used only at init time
+ *
+ */
+
+static void __init reserve_ioport_range(char *pnpid, int start, int end)
+{
+ struct resource *res;
+ char *regionid;
+
+ regionid = pnpbios_kmalloc(16, GFP_KERNEL);
+ if ( regionid == NULL )
+ return;
+ sprintf(regionid, "PnPBIOS %s", pnpid);
+ res = request_region(start,end-start+1,regionid);
+ if ( res == NULL )
+ kfree( regionid );
+ else
+ res->flags &= ~IORESOURCE_BUSY;
+ /*
+ * Failures at this point are usually harmless. pci quirks for
+ * example do reserve stuff they know about too, so we may well
+ * have double reservations.
+ */
+ printk(KERN_INFO
+ "PnPBIOS: %s: ioport range 0x%x-0x%x %s reserved.\n",
+ pnpid, start, end,
+ NULL != res ? "has been" : "could not be"
+ );
+
+ return;
+}
+
+static void __init reserve_resources_of_dev( struct pci_dev *dev )
+{
+ int i;
+
+ for (i=0;i<DEVICE_COUNT_RESOURCE;i++) {
+ if ( dev->resource[i].flags & IORESOURCE_UNSET )
+ /* end of resources */
+ break;
+ if (dev->resource[i].flags & IORESOURCE_IO) {
+ /* ioport */
+ if ( dev->resource[i].start == 0 )
+ /* disabled */
+ /* Do nothing */
+ continue;
+ if ( dev->resource[i].start < 0x100 )
+ /*
+ * Below 0x100 is only standard PC hardware
+ * (pics, kbd, timer, dma, ...)
+ * We should not get resource conflicts there,
+ * and the kernel reserves these anyway
+ * (see arch/i386/kernel/setup.c).
+ * So, do nothing
+ */
+ continue;
+ if ( dev->resource[i].end < dev->resource[i].start )
+ /* invalid endpoint */
+ /* Do nothing */
+ continue;
+ reserve_ioport_range(
+ dev->slot_name,
+ dev->resource[i].start,
+ dev->resource[i].end
+ );
+ } else if (dev->resource[i].flags & IORESOURCE_MEM) {
+ /* iomem */
+ /* For now do nothing */
+ continue;
+ } else {
+ /* Neither ioport nor iomem */
+ /* Do nothing */
+ continue;
+ }
+ }
+
+ return;
+}
+
+static void __init reserve_resources( void )
+{
+ struct pci_dev *dev;
+
+ pnpbios_for_each_dev(dev) {
+ if (
+ 0 != strcmp(dev->slot_name,"PNP0c01") && /* memory controller */
+ 0 != strcmp(dev->slot_name,"PNP0c02") /* system peripheral: other */
+ ) {
+ continue;
+ }
+ reserve_resources_of_dev(dev);
+ }
+
+ return;
+}
+
+
+/*
+ *
+ * INIT AND EXIT
+ *
+ */
+
+extern int is_sony_vaio_laptop;
+
+static int pnpbios_disabled; /* = 0 */
+static int dont_reserve_resources; /* = 0 */
+int pnpbios_dont_use_current_config; /* = 0 */
+
+#ifndef MODULE
+static int __init pnpbios_setup(char *str)
+{
+ int invert;
+
+ while ((str != NULL) && (*str != '\0')) {
+ if (strncmp(str, "off", 3) == 0)
+ pnpbios_disabled=1;
+ if (strncmp(str, "on", 2) == 0)
+ pnpbios_disabled=0;
+ invert = (strncmp(str, "no-", 3) == 0);
+ if (invert)
+ str += 3;
+ if (strncmp(str, "curr", 4) == 0)
+ pnpbios_dont_use_current_config = invert;
+ if (strncmp(str, "res", 3) == 0)
+ dont_reserve_resources = invert;
+ str = strchr(str, ',');
+ if (str != NULL)
+ str += strspn(str, ", \t");
+ }
+
+ return 1;
+}
+
+__setup("pnpbios=", pnpbios_setup);
+#endif
+
+subsys_initcall(pnpbios_init);
+
+void __init pnpbios_init(void)
+{
+ union pnp_bios_expansion_header *check;
+ u8 sum;
+ int i, length;
+
+ spin_lock_init(&pnp_bios_lock);
+ spin_lock_init(&pnpbios_devices_lock);
+
+ if(pnpbios_disabled) {
+ printk(KERN_INFO "PnPBIOS: Disabled.\n");
+ return;
+ }
+
+ if ( is_sony_vaio_laptop )
+ pnpbios_dont_use_current_config = 1;
+
+ /*
+ * Search the defined area (0xf0000-0xffff0) for a valid PnP BIOS
+ * structure and, if one is found, sets up the selectors and
+ * entry points
+ */
+ for (check = (union pnp_bios_expansion_header *) __va(0xf0000);
+ check < (union pnp_bios_expansion_header *) __va(0xffff0);
+ ((void *) (check)) += 16) {
+ if (check->fields.signature != PNP_SIGNATURE)
+ continue;
+ length = check->fields.length;
+ if (!length)
+ continue;
+ for (sum = 0, i = 0; i < length; i++)
+ sum += check->chars[i];
+ if (sum)
+ continue;
+ if (check->fields.version < 0x10) {
+ printk(KERN_WARNING "PnPBIOS: PnP BIOS version %d.%d is not supported.\n",
+ check->fields.version >> 4,
+ check->fields.version & 15);
+ continue;
+ }
+ printk(KERN_INFO "PnPBIOS: Found PnP BIOS installation structure at 0x%p.\n", check);
+ printk(KERN_INFO "PnPBIOS: PnP BIOS version %d.%d, entry 0x%x:0x%x, dseg 0x%x.\n",
+ check->fields.version >> 4, check->fields.version & 15,
+ check->fields.pm16cseg, check->fields.pm16offset,
+ check->fields.pm16dseg);
+ Q2_SET_SEL(PNP_CS32, &pnp_bios_callfunc, 64 * 1024);
+ Q_SET_SEL(PNP_CS16, check->fields.pm16cseg, 64 * 1024);
+ Q_SET_SEL(PNP_DS, check->fields.pm16dseg, 64 * 1024);
+ pnp_bios_callpoint.offset = check->fields.pm16offset;
+ pnp_bios_callpoint.segment = PNP_CS16;
+ pnp_bios_hdr = check;
+ break;
+ }
+ build_devlist();
+ if ( ! dont_reserve_resources )
+ reserve_resources();
+#ifdef CONFIG_PROC_FS
+ pnpbios_proc_init();
+#endif
+#ifdef CONFIG_HOTPLUG
+ init_completion(&unload_sem);
+ if(kernel_thread(pnp_dock_thread, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGNAL)>0)
+ unloading = 0;
+#endif
+}
+
+#ifdef MODULE
+
+MODULE_LICENSE("GPL");
+
+/* We have to run it early and not as a module. */
+module_init(pnpbios_init);
+
+#ifdef CONFIG_HOTPLUG
+static void pnpbios_exit(void)
+{
+ /* free_resources() ought to go here */
+ /* pnpbios_proc_done() */
+ unloading = 1;
+ wait_for_completion(&unload_sem);
+}
+
+module_exit(pnpbios_exit);
+
+#endif
+#endif
--- /dev/null
+/*
+ * pnp_proc.c: /proc/bus/pnp interface for Plug and Play devices
+ *
+ * Written by David Hinds, dahinds@users.sourceforge.net
+ */
+
+//#include <pcmcia/config.h>
+#define __NO_VERSION__
+//#include <pcmcia/k_compat.h>
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include <linux/pnpbios.h>
+
+static struct proc_dir_entry *proc_pnp = NULL;
+static struct proc_dir_entry *proc_pnp_boot = NULL;
+static struct pnp_dev_node_info node_info;
+
+static int proc_read_devices(char *buf, char **start, off_t pos,
+ int count, int *eof, void *data)
+{
+ struct pnp_bios_node *node;
+ int i;
+ u8 nodenum;
+ char *p = buf;
+
+ if (pos != 0) {
+ *eof = 1;
+ return 0;
+ }
+ node = pnpbios_kmalloc(node_info.max_node_size, GFP_KERNEL);
+ if (!node) return -ENOMEM;
+ for (i=0,nodenum=0;i<0xff && nodenum!=0xff; i++) {
+ if ( pnp_bios_get_dev_node(&nodenum, 1, node) )
+ break;
+ p += sprintf(p, "%02x\t%08x\t%02x:%02x:%02x\t%04x\n",
+ node->handle, node->eisa_id,
+ node->type_code[0], node->type_code[1],
+ node->type_code[2], node->flags);
+ }
+ kfree(node);
+ return (p-buf);
+}
+
+static int proc_read_node(char *buf, char **start, off_t pos,
+ int count, int *eof, void *data)
+{
+ struct pnp_bios_node *node;
+ int boot = (long)data >> 8;
+ u8 nodenum = (long)data;
+ int len;
+
+ if (pos != 0) {
+ *eof = 1;
+ return 0;
+ }
+ node = pnpbios_kmalloc(node_info.max_node_size, GFP_KERNEL);
+ if (!node) return -ENOMEM;
+ if ( pnp_bios_get_dev_node(&nodenum, boot, node) )
+ return -EIO;
+ len = node->size - sizeof(struct pnp_bios_node);
+ memcpy(buf, node->data, len);
+ kfree(node);
+ return len;
+}
+
+static int proc_write_node(struct file *file, const char *buf,
+ unsigned long count, void *data)
+{
+ struct pnp_bios_node *node;
+ int boot = (long)data >> 8;
+ u8 nodenum = (long)data;
+
+ node = pnpbios_kmalloc(node_info.max_node_size, GFP_KERNEL);
+ if (!node) return -ENOMEM;
+ if ( pnp_bios_get_dev_node(&nodenum, boot, node) )
+ return -EIO;
+ if (count != node->size - sizeof(struct pnp_bios_node))
+ return -EINVAL;
+ memcpy(node->data, buf, count);
+ if (pnp_bios_set_dev_node(node->handle, boot, node) != 0)
+ return -EINVAL;
+ kfree(node);
+ return count;
+}
+
+/*
+ * When this is called, pnpbios functions are assumed to
+ * work and the pnpbios_dont_use_current_config flag
+ * should already have been set to the appropriate value
+ */
+void pnpbios_proc_init( void )
+{
+ struct pnp_bios_node *node;
+ struct proc_dir_entry *ent;
+ char name[3];
+ int i;
+ u8 nodenum;
+
+ if (pnp_bios_dev_node_info(&node_info) != 0) return;
+
+ proc_pnp = proc_mkdir("pnp", proc_bus);
+ if (!proc_pnp) return;
+ proc_pnp_boot = proc_mkdir("boot", proc_pnp);
+ if (!proc_pnp_boot) return;
+ create_proc_read_entry("devices", 0, proc_pnp, proc_read_devices, NULL);
+
+ node = pnpbios_kmalloc(node_info.max_node_size, GFP_KERNEL);
+ if (!node) return;
+ for (i=0,nodenum = 0; i<0xff && nodenum != 0xff; i++) {
+ if (pnp_bios_get_dev_node(&nodenum, 1, node) != 0)
+ break;
+ sprintf(name, "%02x", node->handle);
+ if ( !pnpbios_dont_use_current_config ) {
+ ent = create_proc_entry(name, 0, proc_pnp);
+ if (ent) {
+ ent->read_proc = proc_read_node;
+ ent->write_proc = proc_write_node;
+ ent->data = (void *)(long)(node->handle);
+ }
+ }
+ ent = create_proc_entry(name, 0, proc_pnp_boot);
+ if (ent) {
+ ent->read_proc = proc_read_node;
+ ent->write_proc = proc_write_node;
+ ent->data = (void *)(long)(node->handle+0x100);
+ }
+ }
+ kfree(node);
+}
+
+void pnpbios_proc_done(void)
+{
+ int i;
+ char name[3];
+
+ if (!proc_pnp) return;
+
+ for (i=0; i<0xff; i++) {
+ sprintf(name, "%02x", i);
+ if ( !pnpbios_dont_use_current_config )
+ remove_proc_entry(name, proc_pnp);
+ remove_proc_entry(name, proc_pnp_boot);
+ }
+ remove_proc_entry("boot", proc_pnp);
+ remove_proc_entry("devices", proc_pnp);
+ remove_proc_entry("pnp", proc_bus);
+}
TW_Command *command_packet;
if (test_and_set_bit(TW_IN_INTR, &tw_dev->flags))
return;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(tw_dev->host->host_lock, flags);
if (tw_dev->tw_pci_dev->irq == irq) {
spin_lock(&tw_dev->tw_lock);
}
spin_unlock(&tw_dev->tw_lock);
}
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
clear_bit(TW_IN_INTR, &tw_dev->flags);
} /* End tw_interrupt() */
return 0;
}
- spin_unlock_irq(&io_request_lock);
ret = tw_findcards(tw_host);
- spin_lock_irq(&io_request_lock);
return ret;
} /* End tw_scsi_detect() */
}
/* We have to let AEN requests through before the reset */
- spin_unlock_irq(&io_request_lock);
+ spin_unlock_irq(tw_dev->host->host_lock);
mdelay(TW_AEN_WAIT_TIME);
- spin_lock_irq(&io_request_lock);
+ spin_lock_irq(tw_dev->host->host_lock);
spin_lock(&tw_dev->tw_lock);
tw_dev->num_aborts++;
}
/* We have to let AEN requests through before the reset */
- spin_unlock_irq(&io_request_lock);
+ spin_unlock_irq(tw_dev->host->host_lock);
mdelay(TW_AEN_WAIT_TIME);
- spin_lock_irq(&io_request_lock);
+ spin_lock_irq(tw_dev->host->host_lock);
spin_lock(&tw_dev->tw_lock);
tw_dev->num_resets++;
if (!search) {
#ifdef __powerpc__
- if (request_irq(host->irq, do_NCR53c7x0_intr, SA_SHIRQ, "53c7,8xx", NULL))
+ if (request_irq(host->irq, do_NCR53c7x0_intr, SA_SHIRQ, "53c7,8xx", host))
#else
- if (request_irq(host->irq, do_NCR53c7x0_intr, SA_INTERRUPT, "53c7,8xx", NULL))
+ if (request_irq(host->irq, do_NCR53c7x0_intr, SA_INTERRUPT, "53c7,8xx", host))
#endif
{
do_NCR53c7x0_intr(int irq, void *dev_id, struct pt_regs * regs) {
unsigned long flags;
- spin_lock_irqsave(&io_request_lock, flags);
+ struct Scsi_Host *dev = dev_id;
+
+ spin_lock_irqsave(dev->host_lock, flags);
NCR53c7x0_intr(irq, dev_id, regs);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
/*
__u8 pun = 0xff, lun = 0xff;
unsigned long flags;
- /* Unfortunately, we have to take the io_request_lock here
- * rather than the host lock hostdata->lock because we're
- * looking to exclude queuecommand from messing with the
- * registers while we're processing the interrupt. Since
- * queuecommand is called holding io_request_lock, and we have
- * to take io_request_lock before we call the command
- * scsi_done, we would get a deadlock if we took
- * hostdata->lock here and in queuecommand (because the order
- * of locking in queuecommand: 1) io_request_lock then 2)
- * hostdata->lock would be the reverse of taking it in this
- * routine */
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(host->host_lock, flags);
if((istat = NCR_700_readb(host, ISTAT_REG))
& (SCSI_INT_PENDING | DMA_INT_PENDING)) {
__u32 dsps;
}
}
out_unlock:
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(host->host_lock, flags);
}
/* FIXME: Need to put some proc information in and plumb it
static void do_AM53C974_intr(int irq, void *dev_id, struct pt_regs *regs)
{
unsigned long flags;
-
- spin_lock_irqsave(&io_request_lock, flags);
+ struct Scsi_Host *dev = dev_id;
+
+ spin_lock_irqsave(dev->host_lock, flags);
AM53C974_intr(irq, dev_id, regs);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
/************************************************************************
void BusLogic_AcquireHostAdapterLockIH(BusLogic_HostAdapter_T *HostAdapter,
ProcessorFlags_T *ProcessorFlags)
{
- spin_lock_irqsave(&HostAdapter->SCSI_Host->host_lock, *ProcessorFlags);
+ spin_lock_irqsave(HostAdapter->SCSI_Host->host_lock, *ProcessorFlags);
}
void BusLogic_ReleaseHostAdapterLockIH(BusLogic_HostAdapter_T *HostAdapter,
ProcessorFlags_T *ProcessorFlags)
{
- spin_unlock_irqrestore(&HostAdapter->SCSI_Host->host_lock, *ProcessorFlags);
+ spin_unlock_irqrestore(HostAdapter->SCSI_Host->host_lock, *ProcessorFlags);
}
}
restore_flags(flags);
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(instance->host_lock, flags);
run_main();
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(instance->host_lock, flags);
}
#endif /* def USLEEP */
NCR5380_setup(instance);
for (trying_irqs = i = 0, mask = 1; i < 16; ++i, mask <<= 1)
- if ((mask & possible) && (request_irq(i, &probe_intr, SA_INTERRUPT, "NCR-probe", NULL)
+ if ((mask & possible) && (request_irq(i, &probe_intr, SA_INTERRUPT, "NCR-probe", instance)
== 0))
trying_irqs |= mask;
* this should prevent any race conditions.
*/
- spin_unlock_irq(&io_request_lock);
+ spin_unlock_irq(instance->host_lock);
save_flags(flags);
break;
} /* for instance */
} while (!done);
- spin_lock_irq(&io_request_lock);
+ spin_lock_irq(instance->host_lock);
/* cli();*/
main_running = 0;
}
{
unsigned long timeout = jiffies + NCR_TIMEOUT;
- spin_unlock_irq(&io_request_lock);
+ spin_unlock_irq(instance->host_lock);
while (NCR5380_read(BUS_AND_STATUS_REG) & BASR_ACK
&& time_before(jiffies, timeout));
- spin_lock_irq(&io_request_lock);
+ spin_lock_irq(instance->host_lock);
if (time_after_eq(jiffies, timeout) )
printk("scsi%d: timeout at NCR5380.c:%d\n",
static void do_NCR5380_intr(int irq, void *dev_id, struct pt_regs *regs) {
unsigned long flags;
-
- spin_lock_irqsave(&io_request_lock, flags);
+
+ struct Scsi_Host *dev = dev_id;
+
+ spin_lock_irqsave(dev->host_lock, flags);
NCR5380_intr(irq, dev_id, regs);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
#endif
{
unsigned long timeout = jiffies + 2 * NCR_TIMEOUT;
- spin_unlock_irq(&io_request_lock);
+ spin_unlock_irq(instance->host_lock);
while (!(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_PROGRESS)
&& time_before(jiffies,timeout));
- spin_lock_irq(&io_request_lock);
+ spin_lock_irq(instance->host_lock);
if (time_after_eq(jiffies,timeout)) {
printk("scsi: arbitration timeout at %d\n", __LINE__);
hostdata->selecting = 0; /* clear this pointer, because we passed the
waiting period */
#else
- spin_unlock_irq(&io_request_lock);
+ spin_unlock_irq(instance->host_lock);
while (time_before(jiffies, timeout) && !(NCR5380_read(STATUS_REG) &
(SR_BSY | SR_IO)));
- spin_lock_irq(&io_request_lock);
+ spin_lock_irq(instance->host_lock);
#endif
if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) ==
(SR_SEL | SR_IO)) {
{
unsigned long timeout = jiffies + NCR_TIMEOUT;
- spin_unlock_irq(&io_request_lock);
+ spin_unlock_irq(instance->host_lock);
while (!(NCR5380_read(STATUS_REG) & SR_REQ) && time_before(jiffies, timeout));
- spin_lock_irq(&io_request_lock);
+ spin_lock_irq(instance->host_lock);
if (time_after_eq(jiffies, timeout)) {
printk("scsi%d: timeout at NCR5380.c:%d\n", instance->host_no, __LINE__);
struct NCR_ESP *esp;
unsigned long flags;
int again;
+ struct Scsi_Host *dev = dev_id;
/* Handle all ESP interrupts showing at this IRQ level. */
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(dev->host_lock, flags);
repeat:
again = 0;
for_each_esp(esp) {
}
if(again)
goto repeat;
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
#else
/* For SMP we only service one ESP on the list list at our IRQ level! */
{
struct NCR_ESP *esp;
unsigned long flags;
-
+ struct Scsi_Host *dev = dev_id;
+
/* Handle all ESP interrupts showing at this IRQ level. */
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(dev->host_lock, flags);
for_each_esp(esp) {
if(((esp)->irq & 0xf) == irq) {
if(esp->dma_irq_p(esp)) {
}
}
out:
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
#endif
DEB(printk("NCR53c406a: using port_base %x\n", port_base));
if(irq_level > 0) {
- if(request_irq(irq_level, do_NCR53c406a_intr, 0, "NCR53c406a", NULL)){
+ if(request_irq(irq_level, do_NCR53c406a_intr, 0, "NCR53c406a", shpnt)){
printk("NCR53c406a: unable to allocate IRQ %d\n", irq_level);
goto err_release;
}
static void
do_NCR53c406a_intr(int unused, void *dev_id, struct pt_regs *regs){
unsigned long flags;
-
- spin_lock_irqsave(&io_request_lock, flags);
+ struct Scsi_Host * dev = dev_id;
+
+ spin_lock_irqsave(dev->host_lock, flags);
NCR53c406a_intr(0, dev_id, regs);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
static void
continue;
if (status & ISTR_INTS) {
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(instance->host_lock, flags);
wd33c93_intr (instance);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(instance->host_lock, flags);
}
}
}
{
unsigned long flags;
unsigned int status = DMA(a3000_host)->ISTR;
-
+ struct Scsi_Host *dev = dummy;
+
if (!(status & ISTR_INT_P))
return;
if (status & ISTR_INTS)
{
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(dev->host_lock, flags);
wd33c93_intr (a3000_host);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
} else
printk("Non-serviced A3000 SCSI-interrupt? ISTR = %02x\n",
status);
/* host_lock taken by mid-level prior to call but need to protect */
/* against own ISR */
- spin_lock_irqsave(&boardp->lock, flags);
+ spin_lock_irqsave(boardp->lock, flags);
/*
* Block new commands while handling a reset or abort request.
ASC_STATS(scp->host, done);
ASC_ASSERT(scp->scsi_done != NULL);
if (from_isr)
- spin_lock_irqsave(&scp->host->host_lock, flags);
+ spin_lock_irqsave(scp->host->host_lock, flags);
scp->scsi_done(scp);
if (from_isr)
- spin_unlock_irqrestore(&scp->host->host_lock, flags);
+ spin_unlock_irqrestore(scp->host->host_lock, flags);
scp = tscp;
}
ASC_DBG(2, "asc_scsi_done_list: done\n");
printk(KERN_INFO "aha152x%d: trying software interrupt, ", HOSTNO);
SETPORT(DMACNTRL0, SWINT|INTEN);
- spin_unlock_irq(&shpnt->host_lock);
+ spin_unlock_irq(shpnt->host_lock);
mdelay(1000);
- spin_lock_irq(&shpnt->host_lock);
+ spin_lock_irq(shpnt->host_lock);
free_irq(shpnt->irq, shpnt);
if (!HOSTDATA(shpnt)->swint) {
if (!shost)
panic("Splunge!");
- spin_lock_irqsave(&shost->host_lock, flags);
+ spin_lock_irqsave(shost->host_lock, flags);
aha1542_intr_handle(shost, dev_id, regs);
- spin_unlock_irqrestore(&shost->host_lock, flags);
+ spin_unlock_irqrestore(shost->host_lock, flags);
}
/* A "high" level interrupt handler */
* check for timeout, and if we are doing something like this
* we are pretty desperate anyways.
*/
- spin_unlock_irq(&SCpnt->host->host_lock);
+ spin_unlock_irq(SCpnt->host->host_lock);
scsi_sleep(4 * HZ);
- spin_lock_irq(&SCpnt->host->host_lock);
+ spin_lock_irq(SCpnt->host->host_lock);
WAIT(STATUS(SCpnt->host->io_port),
STATMASK, INIT | IDLE, STST | DIAGF | INVDCMD | DF | CDF);
* check for timeout, and if we are doing something like this
* we are pretty desperate anyways.
*/
- spin_unlock_irq(&SCpnt->host->host_lock);
+ spin_unlock_irq(SCpnt->host->host_lock);
scsi_sleep(4 * HZ);
- spin_lock_irq(&SCpnt->host->host_lock);
+ spin_lock_irq(SCpnt->host->host_lock);
WAIT(STATUS(SCpnt->host->io_port),
STATMASK, INIT | IDLE, STST | DIAGF | INVDCMD | DF | CDF);
if (!host)
panic("aha1740.c: Irq from unknown host!\n");
- spin_lock_irqsave(&host->host_lock, flags);
+ spin_lock_irqsave(host->host_lock, flags);
base = host->io_port;
number_serviced = 0;
number_serviced++;
}
- spin_unlock_irqrestore(&host->host_lock, flags);
+ spin_unlock_irqrestore(host->host_lock, flags);
}
int aha1740_queuecommand(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
/*
* Complete the command
*/
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(workrequ->host->host_lock, flags);
(*workrequ->scsi_done) (workrequ);
/*
*/
dev->id[target_id].curr_req = 0;
dev->working--;
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(workrequ->host->host_lock, flags);
/*
* Take it back wide
*/
esp->irq = IRQ_AMIGA_PORTS;
esp->slot = board+REAL_BLZ1230_ESP_ADDR;
if (request_irq(IRQ_AMIGA_PORTS, esp_intr, SA_SHIRQ,
- "Blizzard 1230 SCSI IV", esp_intr))
+ "Blizzard 1230 SCSI IV", esp->ehost))
goto err_out;
/* Figure out our scsi ID on the bus */
esp->irq = IRQ_AMIGA_PORTS;
request_irq(IRQ_AMIGA_PORTS, esp_intr, SA_SHIRQ,
- "Blizzard 2060 SCSI", esp_intr);
+ "Blizzard 2060 SCSI", esp->ehost);
/* Figure out our scsi ID on the bus */
esp->scsi_id = 7;
cpqfcHBAdata->notify_wt = &sem;
/* must unlock before kernel_thread(), for it may cause a reschedule. */
- spin_unlock_irq(&HostAdapter->host_lock);
+ spin_unlock_irq(HostAdapter->host_lock);
kernel_thread((int (*)(void *))cpqfcTSWorkerThread,
(void *) HostAdapter, 0);
/*
*/
down (&sem);
- spin_lock_irq(&HostAdapter->host_lock);
+ spin_lock_irq(HostAdapter->host_lock);
cpqfcHBAdata->notify_wt = NULL;
LEAVE("launch_FC_worker_thread");
// start our kernel worker thread
- spin_lock_irq(&HostAdapter->host_lock);
+ spin_lock_irq(HostAdapter->host_lock);
launch_FCworker_thread(HostAdapter);
unsigned long stop_time;
- spin_unlock_irq(&HostAdapter->host_lock);
+ spin_unlock_irq(HostAdapter->host_lock);
stop_time = jiffies + 4*HZ;
while ( time_before(jiffies, stop_time) )
schedule(); // (our worker task needs to run)
}
- spin_lock_irq(&HostAdapter->host_lock);
+ spin_lock_irq(HostAdapter->host_lock);
NumberOfAdapters++;
- spin_unlock_irq(&HostAdapter->host_lock);
+ spin_unlock_irq(HostAdapter->host_lock);
} // end of while()
}
int retval;
Scsi_Device *SDpnt = Cmnd->device;
// printk(" ENTERING cpqfcTS_eh_device_reset() \n");
- spin_unlock_irq(&Cmnd->host->host_lock);
+ spin_unlock_irq(Cmnd->host->host_lock);
retval = cpqfcTS_TargetDeviceReset( SDpnt, 0);
- spin_lock_irq(&Cmnd->host->host_lock);
+ spin_lock_irq(Cmnd->host->host_lock);
return retval;
}
UCHAR IntPending;
ENTER("intr_handler");
- spin_lock_irqsave( &HostAdapter->host_lock, flags);
+ spin_lock_irqsave( HostAdapter->host_lock, flags);
// is this our INT?
IntPending = readb( cpqfcHBA->fcChip.Registers.INTPEND.address);
}
}
}
- spin_unlock_irqrestore( &HostAdapter->host_lock, flags);
+ spin_unlock_irqrestore( HostAdapter->host_lock, flags);
LEAVE("intr_handler");
}
PCI_TRACE( 0x90)
// first, take the IO lock so the SCSI upper layers can't call
// into our _quecommand function (this also disables INTs)
- spin_lock_irqsave( &HostAdapter->host_lock, flags); // STOP _que function
+ spin_lock_irqsave( HostAdapter->host_lock, flags); // STOP _que function
PCI_TRACE( 0x90)
CPQ_SPINLOCK_HBA( cpqfcHBAdata)
PCI_TRACE( 0x90)
// release the IO lock (and re-enable interrupts)
- spin_unlock_irqrestore( &HostAdapter->host_lock, flags);
+ spin_unlock_irqrestore( HostAdapter->host_lock, flags);
// disable OUR HBA interrupt (keep them off as much as possible
// during error recovery)
goto Skip;
// STOP _que function
- spin_lock_irqsave( &cpqfcHBAdata->HostAdapter->host_lock, flags);
+ spin_lock_irqsave( cpqfcHBAdata->HostAdapter->host_lock, flags);
PCI_TRACE( 0xA8)
cpqfcHBAdata->BoardLock = &BoardLock; // stop Linux SCSI command queuing
// release the IO lock (and re-enable interrupts)
- spin_unlock_irqrestore( &cpqfcHBAdata->HostAdapter->host_lock, flags);
+ spin_unlock_irqrestore( cpqfcHBAdata->HostAdapter->host_lock, flags);
// Ensure no contention from _quecommand or Worker process
CPQ_SPINLOCK_HBA( cpqfcHBAdata)
esp->irq = IRQ_AMIGA_PORTS;
request_irq(IRQ_AMIGA_PORTS, esp_intr, SA_SHIRQ,
- "CyberStorm SCSI", esp_intr);
+ "CyberStorm SCSI", esp->ehost);
/* Figure out our scsi ID on the bus */
/* The DMA cond flag contains a hardcoded jumper bit
* which can be used to select host number 6 or 7.
esp->irq = IRQ_AMIGA_PORTS;
request_irq(IRQ_AMIGA_PORTS, esp_intr, SA_SHIRQ,
- "CyberStorm SCSI Mk II", esp_intr);
+ "CyberStorm SCSI Mk II", esp->ehost);
/* Figure out our scsi ID on the bus */
esp->scsi_id = 7;
esp_initialize(esp);
if (request_irq(esp->irq, esp_intr, SA_INTERRUPT,
- "NCR 53C94 SCSI", NULL))
+ "NCR 53C94 SCSI", esp->ehost))
goto err_dealloc;
if (request_irq(SCSI_DMA_INT, scsi_dma_int, SA_INTERRUPT,
- "JUNKIO SCSI DMA", NULL))
+ "JUNKIO SCSI DMA", esp->ehost))
goto err_free_irq;
}
esp->dma_advance_sg = 0;
if (request_irq(esp->irq, esp_intr, SA_INTERRUPT,
- "PMAZ_AA", NULL)) {
+ "PMAZ_AA", esp->ehost)) {
esp_deallocate(esp);
release_tc_card(slot);
continue;
timeout *= HZ;
if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irq(&pHba->host->host_lock);
+ spin_unlock_irq(pHba->host->host_lock);
if (!timeout)
schedule();
else
schedule_timeout(timeout*HZ);
- spin_lock_irq(&pHba->host->host_lock);
+ spin_lock_irq(pHba->host->host_lock);
}
wq_write_lock_irq(&adpt_wq_i2o_post.lock);
__remove_wait_queue(&adpt_wq_i2o_post, &wait);
}
do {
- spin_lock_irqsave(&pHba->host->host_lock, flags);
+ spin_lock_irqsave(pHba->host->host_lock, flags);
// This state stops any new commands from enterring the
// controller while processing the ioctl
// pHba->state |= DPTI_STATE_IOCTL;
// the queue empties and stops. We need a way to restart the queue
rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
// pHba->state &= ~DPTI_STATE_IOCTL;
- spin_unlock_irqrestore(&pHba->host->host_lock, flags);
+ spin_unlock_irqrestore(pHba->host->host_lock, flags);
} while(rcode == -ETIMEDOUT);
if(rcode){
break;
}
case I2ORESETCMD:
- spin_lock_irqsave(&pHba->host->host_lock, flags);
+ spin_lock_irqsave(pHba->host->host_lock, flags);
adpt_hba_reset(pHba);
- spin_unlock_irqrestore(&pHba->host->host_lock, flags);
+ spin_unlock_irqrestore(pHba->host->host_lock, flags);
break;
case I2ORESCANCMD:
adpt_rescan(pHba);
printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
return;
}
- spin_lock_irqsave(&pHba->host->host_lock, flags);
+ spin_lock_irqsave(pHba->host->host_lock, flags);
while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
m = readl(pHba->reply_port);
if(m == EMPTY_QUEUE){
wmb();
rmb();
}
-out: spin_unlock_irqrestore(&pHba->host->host_lock, flags);
+out: spin_unlock_irqrestore(pHba->host->host_lock, flags);
}
static s32 adpt_scsi_to_i2o(adpt_hba* pHba, Scsi_Cmnd* cmd, struct adpt_device* d)
s32 rcode;
ulong flags;
- spin_lock_irqsave(&pHba->host->host_lock, flags);
+ spin_lock_irqsave(pHba->host->host_lock, flags);
if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
goto out;
if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
goto out;
rcode = 0;
-out: spin_unlock_irqrestore(&pHba->host->host_lock, flags);
+out: spin_unlock_irqrestore(pHba->host->host_lock, flags);
return rcode;
}
/* With interrupts enabled, it will sometimes hang when doing heavy
* reads. So better not enable them until I finger it out. */
if (instance->irq != IRQ_NONE)
- if (request_irq(instance->irq, do_dtc_intr, SA_INTERRUPT, "dtc")) {
+ if (request_irq(instance->irq, do_dtc_intr, SA_INTERRUPT, "dtc", instance)) {
printk("scsi%d : IRQ%d not free, interrupts disabled\n",
instance->host_no, instance->irq);
instance->irq = IRQ_NONE;
HD(j)->in_reset = TRUE;
- spin_unlock_irq(&sh[j]->host_lock);
+ spin_unlock_irq(sh[j]->host_lock);
time = jiffies;
while ((jiffies - time) < (10 * HZ) && limit++ < 200000) udelay(100L);
- spin_lock_irq(&sh[j]->host_lock);
+ spin_lock_irq(sh[j]->host_lock);
printk("%s: reset, interrupts disabled, loops %d.\n", BN(j), limit);
/* Check if the interrupt must be processed by this handler */
if ((j = (unsigned int)((char *)shap - sha)) >= num_boards) return;
- spin_lock_irqsave(&sh[j]->host_lock, spin_flags);
+ spin_lock_irqsave(sh[j]->host_lock, spin_flags);
ihdlr(irq, j);
- spin_unlock_irqrestore(&sh[j]->host_lock, spin_flags);
+ spin_unlock_irqrestore(sh[j]->host_lock, spin_flags);
}
int eata2x_release(struct Scsi_Host *shpnt) {
void do_eata_int_handler(int irq, void *dev_id, struct pt_regs * regs)
{
unsigned long flags;
-
- spin_lock_irqsave(&io_request_lock, flags);
+ struct Scsi_Host *dev = dev_id;
+
+ spin_lock_irqsave(dev->host_lock, flags);
eata_int_handler(irq, dev_id, regs);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
void eata_int_handler(int irq, void *dev_id, struct pt_regs * regs)
if (reg_IRQ[i] >= 1){ /* exchange the interrupt handler which */
free_irq(i, NULL); /* we used for probing with the real one */
request_irq(i, (void *)(do_eata_int_handler), SA_INTERRUPT|SA_SHIRQ,
- "eata_dma", NULL);
+ "eata_dma", first_HBA); /* Check it */
}
}
void do_eata_pio_int_handler(int irq, void *dev_id, struct pt_regs * regs)
{
unsigned long flags;
-
- spin_lock_irqsave(&io_request_lock, flags);
+ struct Scsi_Host *dev = dev_id;
+
+ spin_lock_irqsave(dev->host_lock, flags);
eata_pio_int_handler(irq, dev_id, regs);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
void eata_pio_int_handler(int irq, void *dev_id, struct pt_regs * regs)
return (FALSE);
}
+ request_region(base, 8, "eata_pio");
+
+ size = sizeof(hostdata) + (sizeof(struct eata_ccb) * ntohs(gc->queuesiz));
+
+ sh = scsi_register(tpnt, size);
+ if(sh == NULL)
+ {
+ release_region(base, 8);
+ return FALSE;
+ }
+
if (!reg_IRQ[gc->IRQ]) { /* Interrupt already registered ? */
if (!request_irq(gc->IRQ, do_eata_pio_int_handler, SA_INTERRUPT,
- "EATA-PIO", NULL)){
+ "EATA-PIO", sh)){
reg_IRQ[gc->IRQ]++;
if (!gc->IRQ_TR)
reg_IRQL[gc->IRQ] = TRUE; /* IRQ is edge triggered */
} else {
printk("Couldn't allocate IRQ %d, Sorry.\n", gc->IRQ);
+ release_region(base, 8);
return (FALSE);
}
} else { /* More than one HBA on this IRQ */
if (reg_IRQL[gc->IRQ] == TRUE) {
printk("Can't support more than one HBA on this IRQ,\n"
" if the IRQ is edge triggered. Sorry.\n");
+ release_region(base, 8);
return (FALSE);
} else
reg_IRQ[gc->IRQ]++;
}
- request_region(base, 8, "eata_pio");
-
- size = sizeof(hostdata) + (sizeof(struct eata_ccb) * ntohs(gc->queuesiz));
-
- sh = scsi_register(tpnt, size);
- if(sh == NULL)
- {
- release_region(base, 8);
- return FALSE;
- }
-
hd = SD(sh);
memset(hd->ccb, 0, (sizeof(struct eata_ccb) * ntohs(gc->queuesiz)));
* sanely maintain.
*/
if (request_irq(esp->ehost->irq, esp_intr,
- SA_SHIRQ, "ESP SCSI", esp)) {
+ SA_SHIRQ, "ESP SCSI", esp->ehost)) {
printk("esp%d: Cannot acquire irq line\n",
esp->esp_id);
return -1;
esp->irq = IRQ_AMIGA_PORTS;
esp->slot = board+FASTLANE_ESP_ADDR;
if (request_irq(IRQ_AMIGA_PORTS, esp_intr, SA_SHIRQ,
- "Fastlane SCSI", esp_intr)) {
+ "Fastlane SCSI", esp->ehost)) {
printk(KERN_WARNING "Fastlane: Could not get IRQ%d, aborting.\n", IRQ_AMIGA_PORTS);
goto err_unmap;
}
#if EVERY_ACCESS
printk( " AFAIL " );
#endif
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(shpnt->host_lock, flags);
my_done( shpnt, DID_BUS_BUSY << 16 );
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(shpnt->host_lock, flags);
return;
}
current_SC->SCp.phase = in_selection;
#if EVERY_ACCESS
printk( " SFAIL " );
#endif
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(shpnt->host_lock, flags);
my_done( shpnt, DID_NO_CONNECT << 16 );
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(shpnt->host_lock, flags);
return;
} else {
#if EVERY_ACCESS
#if EVERY_ACCESS
printk( "BEFORE MY_DONE. . ." );
#endif
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(shpnt->host_lock, flags);
my_done( shpnt,
(current_SC->SCp.Status & 0xff)
| ((current_SC->SCp.Message & 0xff) << 8) | (DID_OK << 16) );
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(shpnt->host_lock, flags);
#if EVERY_ACCESS
printk( "RETURNING.\n" );
#endif
restore_flags( flags );
/* Aborts are not done well. . . */
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(shpnt->host_lock, flags);
my_done( shpnt, DID_ABORT << 16 );
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(shpnt->host_lock, flags);
return SCSI_ABORT_SUCCESS;
}
#if EVERY_ACCESS
printk( " AFAIL " );
#endif
- spin_lock_irqsave(¤t_SC->host->host_lock, flags);
+ spin_lock_irqsave(current_SC->host->host_lock, flags);
my_done( DID_BUS_BUSY << 16 );
- spin_unlock_irqrestore(¤t_SC->host->host_lock, flags);
+ spin_unlock_irqrestore(current_SC->host->host_lock, flags);
return;
}
current_SC->SCp.phase = in_selection;
#if EVERY_ACCESS
printk( " SFAIL " );
#endif
- spin_lock_irqsave(¤t_SC->host->host_lock, flags);
+ spin_lock_irqsave(current_SC->host->host_lock, flags);
my_done( DID_NO_CONNECT << 16 );
- spin_unlock_irqrestore(¤t_SC->host->host_lock, flags);
+ spin_unlock_irqrestore(current_SC->host->host_lock, flags);
return;
} else {
#if EVERY_ACCESS
#if EVERY_ACCESS
printk( "BEFORE MY_DONE. . ." );
#endif
- spin_lock_irqsave(¤t_SC->host->host_lock, flags);
+ spin_lock_irqsave(current_SC->host->host_lock, flags);
my_done( (current_SC->SCp.Status & 0xff)
| ((current_SC->SCp.Message & 0xff) << 8) | (DID_OK << 16) );
- spin_unlock_irqrestore(¤t_SC->host->host_lock, flags);
+ spin_unlock_irqrestore(current_SC->host->host_lock, flags);
#if EVERY_ACCESS
printk( "RETURNING.\n" );
#endif
#define GDTH_LOCK_HA(ha,flags) spin_lock_irqsave(&(ha)->smp_lock,flags)
#define GDTH_UNLOCK_HA(ha,flags) spin_unlock_irqrestore(&(ha)->smp_lock,flags)
-#define GDTH_LOCK_SCSI_DONE(flags) spin_lock_irqsave(&io_request_lock,flags)
-#define GDTH_UNLOCK_SCSI_DONE(flags) spin_unlock_irqrestore(&io_request_lock,flags)
-#define GDTH_LOCK_SCSI_DOCMD() spin_lock_irq(&io_request_lock)
-#define GDTH_UNLOCK_SCSI_DOCMD() spin_unlock_irq(&io_request_lock)
+#define GDTH_LOCK_SCSI_DONE(dev, flags) spin_lock_irqsave(dev->host_lock,flags)
+#define GDTH_UNLOCK_SCSI_DONE(flags) spin_unlock_irqrestore(dev->host_lock,flags)
+#define GDTH_LOCK_SCSI_DOCMD(dev) spin_lock_irq(dev->host_lock)
+#define GDTH_UNLOCK_SCSI_DOCMD(dev) spin_unlock_irq(dev->host_lock)
#else
#define GDTH_INIT_LOCK_HA(ha) do {} while (0)
#define GDTH_LOCK_HA(ha,flags) do {save_flags(flags); cli();} while (0)
#define GDTH_UNLOCK_HA(ha,flags) do {restore_flags(flags);} while (0)
-#define GDTH_LOCK_SCSI_DONE(flags) do {} while (0)
-#define GDTH_UNLOCK_SCSI_DONE(flags) do {} while (0)
-#define GDTH_LOCK_SCSI_DOCMD() do {} while (0)
-#define GDTH_UNLOCK_SCSI_DOCMD() do {} while (0)
+#define GDTH_LOCK_SCSI_DONE(dev, flags) do {} while (0)
+#define GDTH_UNLOCK_SCSI_DONE(dev, flags) do {} while (0)
+#define GDTH_LOCK_SCSI_DOCMD(dev) do {} while (0)
+#define GDTH_UNLOCK_SCSI_DOCMD(dev) do {} while (0)
#endif
/* LILO and modprobe/insmod parameters */
if (rval == 2) {
gdth_putq(hanum,scp,scp->SCp.this_residual);
} else if (rval == 1) {
- GDTH_LOCK_SCSI_DONE(flags);
+ GDTH_LOCK_SCSI_DONE(scp->host, flags);
scp->scsi_done(scp);
- GDTH_UNLOCK_SCSI_DONE(flags);
+ GDTH_UNLOCK_SCSI_DONE(scp->host,flags);
}
gdth_next(hanum);
}
if (!(status & GVP11_DMAC_INT_PENDING))
continue;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(instance->host_lock, flags);
wd33c93_intr (instance);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(instance->host_lock, flags);
}
}
/* current version of this driver-source: */
#define IBMMCA_SCSI_DRIVER_VERSION "4.0b"
-#define IBMLOCK spin_lock_irqsave(&io_request_lock, flags);
-#define IBMUNLOCK spin_unlock_irqrestore(&io_request_lock, flags);
+#define IBMLOCK(dev) spin_lock_irqsave(dev->host_lock, flags);
+#define IBMUNLOCK(dev) spin_unlock_irqrestore(dev->host_lock, flags);
/* driver configuration */
#define IM_MAX_HOSTS 8 /* maximum number of host adapters */
Scsi_Cmnd *cmd;
int lastSCSI;
- IBMLOCK
+ IBMLOCK(dev_id)
/* search for one adapter-response on shared interrupt */
for (host_index=0;
hosts[host_index] && !(inb(IM_STAT_REG(host_index)) & IM_INTR_REQUEST);
host_index++);
/* return if some other device on this IRQ caused the interrupt */
if (!hosts[host_index]) {
- IBMUNLOCK
+ IBMUNLOCK(dev_id)
return;
}
if ((reset_status(host_index) == IM_RESET_NOT_IN_PROGRESS_NO_INT)||
(reset_status(host_index) == IM_RESET_FINISHED_OK_NO_INT)) {
reset_status(host_index) = IM_RESET_NOT_IN_PROGRESS;
- IBMUNLOCK
+ IBMUNLOCK(dev_id)
return;
}
/*must wait for attention reg not busy, then send EOI to subsystem */
while (1) {
if (!(inb (IM_STAT_REG(host_index)) & IM_BUSY)) break;
- IBMUNLOCK /* cycle interrupt */
- IBMLOCK
+ IBMUNLOCK(dev_id) /* cycle interrupt */
+ IBMLOCK(dev_id)
}
ihost_index=host_index;
/*get command result and logical device */
/* get the last_scsi_command here */
lastSCSI = last_scsi_command(ihost_index)[ldn];
outb (IM_EOI | ldn, IM_ATTN_REG(ihost_index));
- IBMUNLOCK
+ IBMUNLOCK(dev_id)
/*these should never happen (hw fails, or a local programming bug) */
if (!global_command_error_excuse) {
switch (cmd_result) {
unsigned long flags;
/* must wait for attention reg not busy */
while (1) {
- IBMLOCK
+ IBMLOCK(hosts[host_index])
if (!(inb(IM_STAT_REG(host_index)) & IM_BUSY)) break;
- IBMUNLOCK
+ IBMUNLOCK(hosts[host_index])
}
/* write registers and enable system interrupts */
outl (cmd_reg, IM_CMD_REG(host_index));
outb (attn_reg, IM_ATTN_REG(host_index));
- IBMUNLOCK
+ IBMUNLOCK(hosts[host_index])
return;
}
unsigned int pos[8];
unsigned long flags;
- IBMLOCK
+ IBMLOCK(dev)
shpnt = dev; /* assign host-structure to local pointer */
len = 0; /* set filled text-buffer index to 0 */
/* get the _special contents of the hostdata structure */
while ( len % sizeof( int ) != ( sizeof ( int ) - 1 ) )
len += sprintf (buf+len, " ");
len += sprintf (buf+len, "\n");
- IBMUNLOCK
+ IBMUNLOCK(shpnt)
return len;
}
#ifdef IM_DEBUG_PROBE
printk("IBM MCA SCSI: Abort subroutine called...\n");
#endif
- IBMLOCK
+ IBMLOCK(cmd->host)
shpnt = cmd->host;
/* search for the right hostadapter */
for (host_index = 0; hosts[host_index] && hosts[host_index]->host_no != shpnt->host_no; host_index++);
cmd->result = DID_NO_CONNECT << 16;
if (cmd->scsi_done) (cmd->scsi_done) (cmd);
shpnt = cmd->host;
- IBMUNLOCK
+ IBMUNLOCK(shpnt)
#ifdef IM_DEBUG_PROBE
printk("IBM MCA SCSI: Abort adapter selection failed!\n");
#endif
/*if cmd for this ldn has already finished, no need to abort */
if (!ld(host_index)[ldn].cmd) {
- IBMUNLOCK
+ IBMUNLOCK(shpnt)
return SCSI_ABORT_NOT_RUNNING;
}
while (1) {
if (!(inb (IM_STAT_REG(host_index)) & IM_BUSY))
break;
- IBMUNLOCK
- IBMLOCK
+ IBMUNLOCK(shpnt)
+ IBMLOCK(shpnt)
}
/* write registers and enable system interrupts */
outl (imm_command, IM_CMD_REG(host_index));
outb (IM_IMM_CMD | ldn, IM_ATTN_REG(host_index));
- IBMUNLOCK
+ IBMUNLOCK(shpnt)
#ifdef IM_DEBUG_PROBE
printk("IBM MCA SCSI: Abort queued to adapter...\n");
#endif
/*if abort went well, call saved done, then return success or error */
if (cmd->result == (DID_ABORT << 16)) {
- IBMLOCK
+ IBMLOCK(shpnt)
cmd->result |= DID_ABORT << 16;
if (cmd->scsi_done) (cmd->scsi_done) (cmd);
ld(host_index)[ldn].cmd = NULL;
- IBMUNLOCK
+ IBMUNLOCK(shpnt)
#ifdef IM_DEBUG_PROBE
printk("IBM MCA SCSI: Abort finished with success.\n");
#endif
return SCSI_ABORT_SUCCESS;
} else {
- IBMLOCK
+ IBMLOCK(shpnt)
cmd->result |= DID_NO_CONNECT << 16;
if (cmd->scsi_done) (cmd->scsi_done) (cmd);
ld(host_index)[ldn].cmd = NULL;
- IBMUNLOCK
+ IBMUNLOCK(shpnt)
#ifdef IM_DEBUG_PROBE
printk("IBM MCA SCSI: Abort failed.\n");
#endif
printk("IBM MCA SCSI: Reset called with NULL-command!\n");
return(SCSI_RESET_SNOOZE);
}
- IBMLOCK
+ IBMLOCK(cmd->host)
ticks = IM_RESET_DELAY*HZ;
shpnt = cmd->host;
/* search for the right hostadapter */
if (local_checking_phase_flag(host_index)) {
printk("IBM MCA SCSI: unable to reset while checking devices.\n");
- IBMUNLOCK
+ IBMUNLOCK(shpnt)
return SCSI_RESET_SNOOZE;
}
while (1) {
if (!(inb (IM_STAT_REG(host_index)) & IM_BUSY))
break;
- IBMUNLOCK
- IBMLOCK
+ IBMUNLOCK(shpnt)
+ IBMLOCK(shpnt)
}
/*write registers and enable system interrupts */
outl (imm_command, IM_CMD_REG(host_index));
printk("IBM MCA SCSI: reset did not complete within %d seconds.\n",
IM_RESET_DELAY);
reset_status(host_index) = IM_RESET_FINISHED_FAIL;
- IBMUNLOCK
+ IBMUNLOCK(shpnt)
return SCSI_RESET_ERROR;
}
/* if reset failed, just return an error */
if (reset_status(host_index) == IM_RESET_FINISHED_FAIL) {
printk("IBM MCA SCSI: reset failed.\n");
- IBMUNLOCK
+ IBMUNLOCK(shpnt)
return SCSI_RESET_ERROR;
}
/* so reset finished ok - call outstanding done's, and return success */
printk ("IBM MCA SCSI: Reset successfully completed.\n");
- IBMUNLOCK
+ IBMUNLOCK(shpnt)
for (i = 0; i < MAX_LOG_DEV; i++) {
cmd_aid = ld(host_index)[i].cmd;
if (cmd_aid && cmd_aid->scsi_done) {
unsigned long flags;
int max_pun;
- IBMLOCK
for (i = 0; hosts[i] && hosts[i]->host_no != hostno; i++);
+ IBMLOCK(hosts[i]) /* Check it */
shpnt = hosts[i];
host_index = i;
if (!shpnt) {
*start = buffer + offset;
len -= offset;
if (len > length) len = length;
- IBMUNLOCK
+ IBMUNLOCK(shpnt)
return len;
}
if (cmd->SCp.phase > 0)
imm_pb_release(cmd->host->unique_id);
- spin_lock_irqsave(&host->host_lock, flags);
+ spin_lock_irqsave(host->host_lock, flags);
tmp->cur_cmd = 0;
cmd->scsi_done(cmd);
- spin_unlock_irqrestore(&host->host_lock, flags);
+ spin_unlock_irqrestore(host->host_lock, flags);
return;
}
# define in2000__INITFUNC(function) __initfunc(function)
# define in2000__INIT __init
# define in2000__INITDATA __initdata
-# define CLISPIN_LOCK(host,flags) spin_lock_irqsave(&host->host_lock, flags)
-# define CLISPIN_UNLOCK(host,flags) spin_unlock_irqrestore(&host->host_lock, \
+# define CLISPIN_LOCK(host,flags) spin_lock_irqsave(host->host_lock, flags)
+# define CLISPIN_UNLOCK(host,flags) spin_unlock_irqrestore(host->host_lock, \
flags)
int in2000_detect(Scsi_Host_Template *) in2000__INIT;
static void i91u_intr0(int irqno, void *dev_id, struct pt_regs *regs)
{
unsigned long flags;
-
+ struct Scsi_Host *dev = dev_id;
+
if (tul_hcs[0].HCS_Intr != irqno)
return;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(dev->host_lock, flags);
tul_isr(&tul_hcs[0]);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
static void i91u_intr1(int irqno, void *dev_id, struct pt_regs *regs)
{
unsigned long flags;
-
+ struct Scsi_Host *dev = dev_id;
+
if (tul_hcs[1].HCS_Intr != irqno)
return;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(dev->host_lock, flags);
tul_isr(&tul_hcs[1]);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
static void i91u_intr2(int irqno, void *dev_id, struct pt_regs *regs)
{
unsigned long flags;
-
+ struct Scsi_Host *dev = dev_id;
+
if (tul_hcs[2].HCS_Intr != irqno)
return;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(dev->host_lock, flags);
tul_isr(&tul_hcs[2]);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
static void i91u_intr3(int irqno, void *dev_id, struct pt_regs *regs)
{
unsigned long flags;
-
+ struct Scsi_Host *dev = dev_id;
+
if (tul_hcs[3].HCS_Intr != irqno)
return;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(dev->host_lock, flags);
tul_isr(&tul_hcs[3]);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
static void i91u_intr4(int irqno, void *dev_id, struct pt_regs *regs)
{
unsigned long flags;
-
+ struct Scsi_Host *dev = dev_id;
+
if (tul_hcs[4].HCS_Intr != irqno)
return;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(dev->host_lock, flags);
tul_isr(&tul_hcs[4]);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
static void i91u_intr5(int irqno, void *dev_id, struct pt_regs *regs)
{
unsigned long flags;
-
+ struct Scsi_Host *dev = dev_id;
+
if (tul_hcs[5].HCS_Intr != irqno)
return;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(dev->host_lock, flags);
tul_isr(&tul_hcs[5]);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
static void i91u_intr6(int irqno, void *dev_id, struct pt_regs *regs)
{
unsigned long flags;
-
+ struct Scsi_Host *dev = dev_id;
+
if (tul_hcs[6].HCS_Intr != irqno)
return;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(dev->host_lock, flags);
tul_isr(&tul_hcs[6]);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
static void i91u_intr7(int irqno, void *dev_id, struct pt_regs *regs)
{
unsigned long flags;
-
+ struct Scsi_Host *dev = dev_id;
+
if (tul_hcs[7].HCS_Intr != irqno)
return;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(dev->host_lock, flags);
tul_isr(&tul_hcs[7]);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
/*
}
-static void subIntr(ORC_HCS * pHCB, int irqno)
+static void subIntr(ORC_HCS * pHCB, int irqno, struct Scsi_Host *dev)
{
unsigned long flags;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(dev->host_lock, flags);
if (pHCB->HCS_Intr != irqno) {
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
return;
}
orc_interrupt(pHCB);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
/*
*/
static void inia100_intr0(int irqno, void *dev_id, struct pt_regs *regs)
{
- subIntr(&orc_hcs[0], irqno);
+ subIntr(&orc_hcs[0], irqno, dev_id);
}
static void inia100_intr1(int irqno, void *dev_id, struct pt_regs *regs)
{
- subIntr(&orc_hcs[1], irqno);
+ subIntr(&orc_hcs[1], irqno, dev_id);
}
static void inia100_intr2(int irqno, void *dev_id, struct pt_regs *regs)
{
- subIntr(&orc_hcs[2], irqno);
+ subIntr(&orc_hcs[2], irqno, dev_id);
}
static void inia100_intr3(int irqno, void *dev_id, struct pt_regs *regs)
{
- subIntr(&orc_hcs[3], irqno);
+ subIntr(&orc_hcs[3], irqno, dev_id);
}
static void inia100_intr4(int irqno, void *dev_id, struct pt_regs *regs)
{
- subIntr(&orc_hcs[4], irqno);
+ subIntr(&orc_hcs[4], irqno, dev_id);
}
static void inia100_intr5(int irqno, void *dev_id, struct pt_regs *regs)
{
- subIntr(&orc_hcs[5], irqno);
+ subIntr(&orc_hcs[5], irqno, dev_id);
}
static void inia100_intr6(int irqno, void *dev_id, struct pt_regs *regs)
{
- subIntr(&orc_hcs[6], irqno);
+ subIntr(&orc_hcs[6], irqno, dev_id);
}
static void inia100_intr7(int irqno, void *dev_id, struct pt_regs *regs)
{
- subIntr(&orc_hcs[7], irqno);
+ subIntr(&orc_hcs[7], irqno, dev_id);
}
/*
char *kern_area;
u_int32_t datasize;
- spin_unlock_irq(&SC->host->host_lock);
+ spin_unlock_irq(SC->host->host_lock);
/* wait for the command to finish */
down(&ha->ioctl_sem);
/* reobtain the lock */
- spin_lock_irq(&SC->host->host_lock);
+ spin_lock_irq(SC->host->host_lock);
/* command finished -- copy back */
user_area = *((char **) &SC->cmnd[4]);
METHOD_TRACE("do_ipsintr", 2);
- spin_lock_irqsave(&host->host_lock, cpu_flags);
+ spin_lock_irqsave(host->host_lock, cpu_flags);
if (test_and_set_bit(IPS_IN_INTR, &ha->flags)) {
- spin_unlock_irqrestore(&host->host_lock, cpu_flags);
+ spin_unlock_irqrestore(host->host_lock, cpu_flags);
return ;
}
if (!ha) {
clear_bit(IPS_IN_INTR, &ha->flags);
- spin_unlock_irqrestore(&host->host_lock, cpu_flags);
+ spin_unlock_irqrestore(host->host_lock, cpu_flags);
return;
}
if (!ha->active) {
clear_bit(IPS_IN_INTR, &ha->flags);
- spin_unlock_irqrestore(&host->host_lock, cpu_flags);
+ spin_unlock_irqrestore(host->host_lock, cpu_flags);
return;
}
clear_bit(IPS_IN_INTR, &ha->flags);
- spin_unlock_irqrestore(&host->host_lock, cpu_flags);
+ spin_unlock_irqrestore(host->host_lock, cpu_flags);
/* start the next command */
ips_next(ha, IPS_INTR_ON);
task.data = (void *) &flash_data;
/* Unlock the per-board lock */
- spin_unlock_irq(&SC->host->host_lock);
+ spin_unlock_irq(SC->host->host_lock);
queue_task(&task, &tq_immediate);
mark_bh(IMMEDIATE_BH);
down(&ha->flash_ioctl_sem);
/* Obtain the per-board lock */
- spin_lock_irq(&SC->host->host_lock);
+ spin_lock_irq(SC->host->host_lock);
return (flash_data.retcode);
}
task.data = (void *) &flash_data;
/* Unlock the per-board lock */
- spin_unlock_irq(&SC->host->host_lock);
+ spin_unlock_irq(SC->host->host_lock);
queue_task(&task, &tq_immediate);
mark_bh(IMMEDIATE_BH);
down(&ha->flash_ioctl_sem);
/* Obtain the per-board lock */
- spin_lock_irq(&SC->host->host_lock);
+ spin_lock_irq(SC->host->host_lock);
return (flash_data.retcode);
}
* this command won't time out
*/
if (intr == IPS_INTR_ON) {
- spin_lock_irqsave(&host->host_lock, cpu_flags2);
+ spin_lock_irqsave(host->host_lock, cpu_flags2);
intr_status = IPS_INTR_IORL;
} else {
intr_status = intr;
}
if (intr == IPS_INTR_ON)
- spin_unlock_irqrestore(&host->host_lock, cpu_flags2);
+ spin_unlock_irqrestore(host->host_lock, cpu_flags2);
#ifndef NO_IPS_CMDLINE
/*
* We were called under the HA lock so we can assume that interrupts
* are masked.
*/
- spin_lock(&host->host_lock);
+ spin_lock(host->host_lock);
while (test_and_set_bit(IPS_IN_INTR, &ha->flags))
udelay(1000);
clear_bit(IPS_IN_INTR, &ha->flags);
- spin_unlock(&host->host_lock);
+ spin_unlock(host->host_lock);
}
udelay(1000); /* 1 milisecond */
esp->irq = JAZZ_SCSI_IRQ;
request_irq(JAZZ_SCSI_IRQ, esp_intr, SA_INTERRUPT, "JAZZ SCSI",
- NULL);
+ esp->ehost);
/*
* FIXME, look if the scsi id is availabe from NVRAM
do_mac53c94_interrupt(int irq, void *dev_id, struct pt_regs *ptregs)
{
unsigned long flags;
-
- spin_lock_irqsave(&io_request_lock, flags);
+ struct Scsi_Host *dev = ((struct fsc_state *) dev_id)->current_req->host;
+
+ spin_lock_irqsave(dev->host_lock, flags);
mac53c94_interrupt(irq, dev_id, ptregs);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
static void
esp->irq = IRQ_MAC_SCSI;
- request_irq(IRQ_MAC_SCSI, esp_intr, 0, "Mac ESP SCSI", esp);
+ request_irq(IRQ_MAC_SCSI, esp_intr, 0, "Mac ESP SCSI", esp->ehost);
#if 0 /* conflicts with IOP ADB */
- request_irq(IRQ_MAC_SCSIDRQ, fake_drq, 0, "Mac ESP DRQ", esp);
+ request_irq(IRQ_MAC_SCSIDRQ, fake_drq, 0, "Mac ESP DRQ", esp->ehost);
#endif
if (macintosh_config->scsi_type == MAC_SCSI_QUADRA) {
esp->irq = IRQ_MAC_SCSIDRQ;
#if 0 /* conflicts with IOP ADB */
- request_irq(IRQ_MAC_SCSIDRQ, esp_intr, 0, "Mac ESP SCSI 2", esp);
+ request_irq(IRQ_MAC_SCSIDRQ, esp_intr, 0, "Mac ESP SCSI 2", esp->ehost);
#endif
esp->cfreq = 25000000;
esp->slot = slot;
if (request_irq(esp->irq, esp_intr, 0,
- "NCR 53c9x SCSI", esp_intr))
+ "NCR 53c9x SCSI", esp->ehost))
{
printk("Unable to request IRQ %d.\n", esp->irq);
esp_deallocate(esp);
#define DRIVER_LOCK(p)
#define DRIVER_UNLOCK(p)
#define IO_LOCK_T unsigned long io_flags = 0
-#define IO_LOCK(host) spin_lock_irqsave(&(host)->host_lock,io_flags)
-#define IO_UNLOCK(host) spin_unlock_irqrestore(&(host)->host_lock,io_flags)
-#define IO_LOCK_IRQ(host) spin_lock_irq(&(host)->host_lock)
-#define IO_UNLOCK_IRQ(host) spin_unlock_irq(&(host)->host_lock)
+#define IO_LOCK(host) spin_lock_irqsave(host->host_lock,io_flags)
+#define IO_UNLOCK(host) spin_unlock_irqrestore(host->host_lock,io_flags)
+#define IO_LOCK_IRQ(host) spin_lock_irq(host->host_lock)
+#define IO_UNLOCK_IRQ(host) spin_unlock_irq(host->host_lock)
#define queue_task_irq(a,b) queue_task(a,b)
#define queue_task_irq_off(a,b) queue_task(a,b)
#define DRIVER_LOCK(p)
#define DRIVER_UNLOCK(p)
#define IO_LOCK_T unsigned long io_flags = 0
-#define IO_LOCK(host) spin_lock_irqsave(&io_request_lock,io_flags);
-#define IO_UNLOCK(host) spin_unlock_irqrestore(&io_request_lock,io_flags);
+#define IO_LOCK(host) spin_lock_irqsave(host->host_lock,io_flags);
+#define IO_UNLOCK(host) spin_unlock_irqrestore(host->host_lock,io_flags);
#define pci_free_consistent(a,b,c,d)
#define pci_unmap_single(a,b,c,d)
unsigned long flags;
for (;;) {
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(ms->host->host_lock, flags);
cmd = ms->completed_q;
if (cmd == NULL) {
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(ms->host->host_lock, flags);
break;
}
ms->completed_q = (Scsi_Cmnd *) cmd->host_scribble;
(*cmd->scsi_done)(cmd);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(ms->host->host_lock, flags);
}
}
#endif /* MESH_NEW_STYLE_EH */
do_mesh_interrupt(int irq, void *dev_id, struct pt_regs *ptregs)
{
unsigned long flags;
-
- spin_lock_irqsave(&io_request_lock, flags);
+ struct Scsi_Host *dev = ((struct mech_state *)dev_id)->host;
+
+ spin_lock_irqsave(dev->host_lock, flags);
mesh_interrupt(irq, dev_id, ptregs);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
static void handle_error(struct mesh_state *ms)
esp->irq = IRQ_AMIGA_PORTS;
request_irq(IRQ_AMIGA_PORTS, esp_intr, SA_SHIRQ,
- "BSC Oktagon SCSI", esp_intr);
+ "BSC Oktagon SCSI", esp->ehost);
/* Figure out our scsi ID on the bus */
esp->scsi_id = 7;
instance->irq = NCR5380_probe_irq(instance, PAS16_IRQS);
if (instance->irq != IRQ_NONE)
- if (request_irq(instance->irq, do_pas16_intr, SA_INTERRUPT, "pas16", NULL)) {
+ if (request_irq(instance->irq, do_pas16_intr, SA_INTERRUPT, "pas16", instance)) {
printk("scsi%d : IRQ%d not free, interrupts disabled\n",
instance->host_no, instance->irq);
instance->irq = IRQ_NONE;
goto out;
}
- spin_lock_irqsave(&shost->host_lock, flags);
+ spin_lock_irqsave(shost->host_lock, flags);
padapter = HOSTDATA(shost);
tag0 = tag & 0x7F; // mask off the error bit
* Disable interrupts, if they aren't already disabled and acquire
* the I/O spinlock.
*/
- spin_lock_irqsave (&host->host_lock, flags);
+ spin_lock_irqsave (host->host_lock, flags);
DEB (printk ("\nPCI2220I: Timeout expired "));
if ( padapter->failinprog )
* which will enable interrupts if and only if they were
* enabled on entry.
*/
- spin_unlock_irqrestore (&host->host_lock, flags);
+ spin_unlock_irqrestore (host->host_lock, flags);
}
/****************************************************************
* Name: SetReconstruct :LOCAL
* Disable interrupts, if they aren't already disabled and acquire
* the I/O spinlock.
*/
- spin_lock_irqsave(&host->host_lock, flags);
+ spin_lock_irqsave(host->host_lock, flags);
if ( padapter->SCpnt )
goto reconTimerExpiry;
* which will enable interrupts if and only if they were
* enabled on entry.
*/
- spin_unlock_irqrestore(&host->host_lock, flags);
+ spin_unlock_irqrestore(host->host_lock, flags);
}
/****************************************************************
* Name: Irq_Handler :LOCAL
goto out;
}
- spin_lock_irqsave(&shost->host_lock, flags);
+ spin_lock_irqsave(shost->host_lock, flags);
padapter = HOSTDATA(shost);
pdev = padapter->pdev;
SCpnt = padapter->SCpnt;
OpDone (padapter, zl);
irq_return:
- spin_unlock_irqrestore(&shost->host_lock, flags);
+ spin_unlock_irqrestore(shost->host_lock, flags);
out:;
}
/****************************************************************
* unlock to allow the lowlevel parport driver to probe
* the irqs
*/
- spin_unlock_irq(&io_request_lock);
pb = parport_enumerate();
printk("ppa: Version %s\n", PPA_VERSION);
if (!pb) {
printk("ppa: parport reports no devices.\n");
- spin_lock_irq(&io_request_lock);
return 0;
}
retry_entry:
"pardevice is owning the port for too longtime!\n",
i);
parport_unregister_device(ppa_hosts[i].dev);
- spin_lock_irq(&io_request_lock);
+ spin_lock_irq(ppa_hosts[i].cur_cmd->host->host_lock);
return 0;
}
}
printk(" supported by the imm (ZIP Plus) driver. If the\n");
printk(" cable is marked with \"AutoDetect\", this is what has\n");
printk(" happened.\n");
- spin_lock_irq(&io_request_lock);
+ spin_lock_irq(hreg->host_lock);
return 0;
}
try_again = 1;
goto retry_entry;
} else {
- spin_lock_irq(&io_request_lock);
+ spin_lock_irq(hreg->host_lock);
return 1; /* return number of hosts detected */
}
}
tmp->cur_cmd = 0;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(cmd->host->host_lock, flags);
cmd->scsi_done(cmd);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(cmd->host->host_lock, flags);
return;
}
static void do_Irq_Handler (int irq, void *dev_id, struct pt_regs *regs)
{
unsigned long flags;
-
- spin_lock_irqsave(&io_request_lock, flags);
+ struct Scsi_Host *dev = dev_id;
+
+ spin_lock_irqsave(dev->host_lock, flags);
Irq_Handler(irq, dev_id, regs);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
/****************************************************************
* Name: Psi240i_QueueCommand
save_flags (flags);
cli ();
- if ( request_irq (chipConfig.irq, do_Irq_Handler, 0, "psi240i", NULL) )
+ if ( request_irq (chipConfig.irq, do_Irq_Handler, 0, "psi240i", pshost) )
{
printk ("Unable to allocate IRQ for PSI-240I controller.\n");
restore_flags (flags);
return;
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,1,95)
- spin_lock_irqsave(&io_request_lock, cpu_flags);
+ spin_lock_irqsave(ha->host->host_lock, cpu_flags);
if(test_and_set_bit(QLA1280_IN_ISR_BIT, &ha->flags))
{
COMTRACE('X')
- spin_unlock_irqrestore(&io_request_lock, cpu_flags);
+ spin_unlock_irqrestore(ha->host->host_lock, cpu_flags);
return;
}
ha->isr_count++;
qla1280_done(ha, (srb_t **)&ha->done_q_first, (srb_t **)&ha->done_q_last);
clear_bit(QLA1280_IN_ISR_BIT, &ha->flags);
- spin_unlock_irqrestore(&io_request_lock, cpu_flags);
+ spin_unlock_irqrestore(ha->host->host_lock, cpu_flags);
#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2,1,95) */
if( test_bit(QLA1280_IN_ISR_BIT, (int *)&ha->flags) )
COMTRACE('p')
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,95)
- spin_lock_irqsave(&io_request_lock, cpu_flags);
+ spin_lock_irqsave(ha->host->host_lock, cpu_flags);
#endif
if (ha->flags.isp_abort_needed)
qla1280_abort_isp(ha);
qla1280_done(ha, (srb_t **)&ha->done_q_first, (srb_t **)&ha->done_q_last);
ha->flags.dpc_sched = FALSE;
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,95)
- spin_unlock_irqrestore(&io_request_lock, cpu_flags);
+ spin_unlock_irqrestore(ha->host->host_lock, cpu_flags);
#endif
}
unsigned long flags;
struct Scsi_Host *host = dev_id;
- spin_lock_irqsave(&host->host_lock, flags);
+ spin_lock_irqsave(host->host_lock, flags);
ql_ihandl(irq, dev_id, regs);
- spin_unlock_irqrestore(&host->host_lock, flags);
+ spin_unlock_irqrestore(host->host_lock, flags);
}
#endif
hostno = instance->host_no;
if (request_irq (irq, do_seagate_reconnect_intr, SA_INTERRUPT,
(controller_type == SEAGATE) ? "seagate" : "tmc-8xx",
- NULL)) {
+ instance)) {
printk ("scsi%d : unable to allocate IRQ%d\n", hostno, irq);
return 0;
}
static void do_seagate_reconnect_intr (int irq, void *dev_id, struct pt_regs *regs)
{
unsigned long flags;
-
- spin_lock_irqsave (&io_request_lock, flags);
+ struct Scsi_Host *dev = dev_id;
+
+ spin_lock_irqsave (dev->host_lock, flags);
seagate_reconnect_intr (irq, dev_id, regs);
- spin_unlock_irqrestore (&io_request_lock, flags);
+ spin_unlock_irqrestore (dev->host_lock, flags);
}
static void seagate_reconnect_intr (int irq, void *dev_id, struct pt_regs *regs)
static void sgiwd93_intr(int irq, void *dev_id, struct pt_regs *regs)
{
unsigned long flags;
-
- spin_lock_irqsave(&io_request_lock, flags);
+ struct Scsi_Host *dev = dev_id;
+
+ spin_lock_irqsave(dev->host_lock, flags);
wd33c93_intr((struct Scsi_Host *) dev_id);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
#undef DEBUG_DMA
struct Scsi_Host *host = dev_id;
unsigned long flags;
- spin_lock_irqsave(&host->host_lock, flags);
+ spin_lock_irqsave(host->host_lock, flags);
sim710_intr_handle(irq, host, regs);
- spin_unlock_irqrestore(&host->host_lock, flags);
+ spin_unlock_irqrestore(host->host_lock, flags);
}
esp->irq = 2;
if (request_irq(esp->irq, esp_intr, SA_INTERRUPT,
- "SUN3X SCSI", NULL)) {
+ "SUN3X SCSI", esp->ehost)) {
esp_deallocate(esp);
return 0;
}
static void sym53c416_intr_handle(int irq, void *dev_id, struct pt_regs *regs)
{
+ struct Scsi_Host *dev = dev_id;
int base = 0;
int i;
unsigned long flags = 0;
}
/* Now we have the base address and we can start handling the interrupt */
- spin_lock_irqsave(&io_request_lock,flags);
+ spin_lock_irqsave(dev->host_lock,flags);
status_reg = inb(base + STATUS_REG);
pio_int_reg = inb(base + PIO_INT_REG);
int_reg = inb(base + INT_REG);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
/* First, we handle error conditions */
if(int_reg & SCI) /* SCSI Reset */
printk(KERN_DEBUG "sym53c416: Reset received\n");
current_command->SCp.phase = idle;
current_command->result = DID_RESET << 16;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(dev->host_lock, flags);
current_command->scsi_done(current_command);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
return;
}
if(int_reg & ILCMD) /* Illegal Command */
printk(KERN_WARNING "sym53c416: Illegal Command: 0x%02x.\n", inb(base + COMMAND_REG));
current_command->SCp.phase = idle;
current_command->result = DID_ERROR << 16;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(dev->host_lock, flags);
current_command->scsi_done(current_command);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
return;
}
if(status_reg & GE) /* Gross Error */
printk(KERN_WARNING "sym53c416: Controller reports gross error.\n");
current_command->SCp.phase = idle;
current_command->result = DID_ERROR << 16;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(dev->host_lock, flags);
current_command->scsi_done(current_command);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
return;
}
if(status_reg & PE) /* Parity Error */
printk(KERN_WARNING "sym53c416:SCSI parity error.\n");
current_command->SCp.phase = idle;
current_command->result = DID_PARITY << 16;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(dev->host_lock, flags);
current_command->scsi_done(current_command);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
return;
}
if(pio_int_reg & (CE | OUE))
printk(KERN_WARNING "sym53c416: PIO interrupt error.\n");
current_command->SCp.phase = idle;
current_command->result = DID_ERROR << 16;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(dev->host_lock, flags);
current_command->scsi_done(current_command);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
return;
}
if(int_reg & DIS) /* Disconnect */
else
current_command->result = (current_command->SCp.Status & 0xFF) | ((current_command->SCp.Message & 0xFF) << 8) | (DID_OK << 16);
current_command->SCp.phase = idle;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(dev->host_lock, flags);
current_command->scsi_done(current_command);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
return;
}
/* Now we handle SCSI phases */
cli();
/* FIXME: Request_irq with CLI is not safe */
/* Request for specified IRQ */
- if(request_irq(hosts[i].irq, sym53c416_intr_handle, 0, ID, NULL))
+ if(request_irq(hosts[i].irq, sym53c416_intr_handle, 0, ID, shpnt))
{
restore_flags(flags);
printk(KERN_ERR "sym53c416: Unable to assign IRQ %d\n", hosts[i].irq);
#define NCR_UNLOCK_NCB(np, flags) spin_unlock_irqrestore(&np->smp_lock, flags)
#define NCR_LOCK_SCSI_DONE(host, flags) \
- spin_lock_irqsave(&(host)->host_lock, flags)
+ spin_lock_irqsave((host)->host_lock, flags)
#define NCR_UNLOCK_SCSI_DONE(host, flags) \
- spin_unlock_irqrestore(&((host)->host_lock), flags)
+ spin_unlock_irqrestore(((host)->host_lock), flags)
#else
instance->irq = NCR5380_probe_irq(instance, T128_IRQS);
if (instance->irq != IRQ_NONE)
- if (request_irq(instance->irq, do_t128_intr, SA_INTERRUPT, "t128", NULL)) {
+ if (request_irq(instance->irq, do_t128_intr, SA_INTERRUPT, "t128", instance)) {
printk("scsi%d : IRQ%d not free, interrupts disabled\n",
instance->host_no, instance->irq);
instance->irq = IRQ_NONE;
# define DC390_IFLAGS unsigned long iflags;
# define DC390_DFLAGS unsigned long dflags;
-# define DC390_LOCK_IO spin_lock_irqsave (&io_request_lock, iflags)
-# define DC390_UNLOCK_IO spin_unlock_irqrestore (&io_request_lock, iflags)
+# define DC390_LOCK_IO spin_lock_irqsave (((struct Scsi_Host *)dev)->host_lock, iflags)
+# define DC390_UNLOCK_IO spin_unlock_irqrestore (((struct Scsi_Host *)dev)->host_lock, iflags)
# define DC390_LOCK_DRV spin_lock_irqsave (&dc390_drvlock, dflags)
# define DC390_UNLOCK_DRV spin_unlock_irqrestore (&dc390_drvlock, dflags)
# define DC390_AFLAGS unsigned long aflags;
# define DC390_IFLAGS
# define DC390_DFLAGS unsigned long dflags;
-# define DC390_LOCK_IO /* spin_lock_irqsave (&io_request_lock, iflags) */
-# define DC390_UNLOCK_IO /* spin_unlock_irqrestore (&io_request_lock, iflags) */
+# define DC390_LOCK_IO(dev) /* spin_lock_irqsave (&io_request_lock, iflags) */
+# define DC390_UNLOCK_IO(dev) /* spin_unlock_irqrestore (&io_request_lock, iflags) */
# define DC390_LOCK_DRV spin_lock_irqsave (&dc390_drvlock, dflags)
# define DC390_UNLOCK_DRV spin_unlock_irqrestore (&dc390_drvlock, dflags)
# define DC390_LOCK_DRV_NI spin_lock (&dc390_drvlock)
# define DC390_IFLAGS unsigned long iflags;
# define DC390_DFLAGS unsigned long dflags;
spinlock_t dc390_drvlock = SPIN_LOCK_UNLOCKED;
-# define DC390_LOCK_IO spin_lock_irqsave (&io_request_lock, iflags)
-# define DC390_UNLOCK_IO spin_unlock_irqrestore (&io_request_lock, iflags)
+# define DC390_LOCK_IO(dev) spin_lock_irqsave (((struct Scsi_Host *)dev)->host_lock, iflags)
+# define DC390_UNLOCK_IO(dev) spin_unlock_irqrestore (((struct Scsi_Host *)dev)->host_lock, iflags)
# define DC390_LOCK_DRV spin_lock_irqsave (&dc390_drvlock, dflags)
# define DC390_UNLOCK_DRV spin_unlock_irqrestore (&dc390_drvlock, dflags)
# define DC390_LOCK_DRV_NI spin_lock (&dc390_drvlock)
DC390_IFLAGS
DC390_AFLAGS
DEBUG0(printk ("DC390: Debug: Waiting queue woken up by timer!\n");)
- DC390_LOCK_IO;
+ DC390_LOCK_IO(pACB.pScsiHost);
DC390_LOCK_ACB;
dc390_Waiting_process (pACB);
DC390_UNLOCK_ACB;
- DC390_UNLOCK_IO;
+ DC390_UNLOCK_IO(pACB.pScsiHost);
}
/***********************************************************************
DC390_AFLAGS
pos[length] = 0;
- DC390_LOCK_IO;
+ DC390_LOCK_IO(pACB.pScsiHost);
DC390_LOCK_ACB;
/* UPPERCASE */
/* Don't use kernel toupper, because of 2.0.x bug: ctmp unexported */
DC390_UNLOCK_ACB;
if (needs_inquiry)
{ dc390_updateDCB (pACB, pDCB); dc390_inquiry (pACB, pDCB); };
- DC390_UNLOCK_IO;
+ DC390_UNLOCK_IO(pACB.pScsiHost);
return (length);
einv2:
einv:
/* spin_unlock (strtok_lock); */
DC390_UNLOCK_ACB;
- DC390_UNLOCK_IO;
+ DC390_UNLOCK_IO(pACB.pScsiHost);
printk (KERN_WARNING "DC390: parse error near \"%s\"\n", (pos? pos: "NULL"));
return (-EINVAL);
printk (KERN_WARNING "DC390: Driver reset requested!\n");
DC390_UNLOCK_ACB;
DC390_reset (&cmd, 0);
- DC390_UNLOCK_IO;
+ DC390_UNLOCK_IO(pACB.pScsiHost);
};
return (length);
{
dc390_dumpinfo (pACB, 0, 0);
DC390_UNLOCK_ACB;
- DC390_UNLOCK_IO;
+ DC390_UNLOCK_IO(pACB.pScsiHost);
}
return (length);
dev, pDCB->TargetID, pDCB->TargetLUN);
DC390_UNLOCK_ACB;
dc390_inquiry (pACB, pDCB);
- DC390_UNLOCK_IO;
+ DC390_UNLOCK_IO(pACB.pScsiHost);
};
return (length);
/* TO DO: We should make sure no pending commands are left */
dc390_remove_dev (pACB, pDCB);
DC390_UNLOCK_ACB;
- DC390_UNLOCK_IO;
+ DC390_UNLOCK_IO(pACB.pScsiHost);
};
return (length);
dc390_initDCB (pACB, &pDCB, id, lun);
DC390_UNLOCK_ACB;
dc390_inquiry (pACB, pDCB);
- DC390_UNLOCK_IO;
+ DC390_UNLOCK_IO(pACB.pScsiHost);
};
return (length);
DC390_UNLOCK_ACB;
dc390_sendstart (pACB, pDCB);
dc390_inquiry (pACB, pDCB);
- DC390_UNLOCK_IO;
+ DC390_UNLOCK_IO(pACB.pScsiHost);
};
return (length);
printk (KERN_WARNING "DC390: Ignore cmnd to illegal Dev(Idx) %i. Valid range: 0 - %i.\n",
dev, pACB->DCBCnt - 1);
DC390_UNLOCK_ACB;
- DC390_UNLOCK_IO;
+ DC390_UNLOCK_IO(pACB.pScsiHost);
return (-EINVAL);
DC390_AFLAGS DC390_IFLAGS
PACB pACB = (PACB)(host->hostdata);
- DC390_LOCK_IO;
+ DC390_LOCK_IO(host);
DC390_LOCK_ACB;
/* TO DO: We should check for outstanding commands first. */
release_region(host->io_port,host->n_io_port);
dc390_freeDCBs (host);
DC390_UNLOCK_ACB;
- DC390_UNLOCK_IO;
+ DC390_UNLOCK_IO(host);
return( 1 );
}
#endif /* def MODULE */
HD(j)->in_reset = TRUE;
- spin_unlock_irq(&sh[j]->host_lock);
+ spin_unlock_irq(sh[j]->host_lock);
time = jiffies;
while ((jiffies - time) < (10 * HZ) && limit++ < 200000) udelay(100L);
- spin_lock_irq(&sh[j]->host_lock);
+ spin_lock_irq(sh[j]->host_lock);
printk("%s: reset, interrupts disabled, loops %d.\n", BN(j), limit);
/* Check if the interrupt must be processed by this handler */
if ((j = (unsigned int)((char *)shap - sha)) >= num_boards) return;
- spin_lock_irqsave(&sh[j]->host_lock, spin_flags);
+ spin_lock_irqsave(sh[j]->host_lock, spin_flags);
ihdlr(irq, j);
- spin_unlock_irqrestore(&sh[j]->host_lock, spin_flags);
+ spin_unlock_irqrestore(sh[j]->host_lock, spin_flags);
}
int u14_34f_release(struct Scsi_Host *shpnt) {
config.mscp_free = ~0;
#endif
- if (request_irq(config.interrupt, do_ultrastor_interrupt, 0, "Ultrastor", NULL)) {
+ /*
+ * Brrr, &config.mscp[0].SCint->host) it is something magical....
+ * XXX and FIXME
+ */
+ if (request_irq(config.interrupt, do_ultrastor_interrupt, 0, "Ultrastor", &config.mscp[0].SCint->host)) {
printk("Unable to allocate IRQ%u for UltraStor controller.\n",
config.interrupt);
return FALSE;
printk("U24F: invalid IRQ\n");
return FALSE;
}
- if (request_irq(config.interrupt, do_ultrastor_interrupt, 0, "Ultrastor", NULL))
- {
- printk("Unable to allocate IRQ%u for UltraStor controller.\n",
- config.interrupt);
- return FALSE;
- }
+
/* BIOS addr set */
/* base port set */
config.port_address = addr;
free_irq(config.interrupt, do_ultrastor_interrupt);
return FALSE;
}
+
+ if (request_irq(config.interrupt, do_ultrastor_interrupt, 0, "Ultrastor", shpnt))
+ {
+ printk("Unable to allocate IRQ%u for UltraStor controller.\n",
+ config.interrupt);
+ return FALSE;
+ }
shpnt->irq = config.interrupt;
shpnt->dma_channel = config.dma_channel;
static void do_ultrastor_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
unsigned long flags;
-
- spin_lock_irqsave(&io_request_lock, flags);
+ struct Scsi_Host *dev = dev_id;
+
+ spin_lock_irqsave(dev->host_lock, flags);
ultrastor_interrupt(irq, dev_id, regs);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
MODULE_LICENSE("GPL");
save_flags (flags);
cli ();
while (busy) { /* someone else is allocating */
- spin_unlock_irq(&host->host_lock);
+ spin_unlock_irq(host->host_lock);
for (now = jiffies; now == jiffies; ); /* wait a jiffy */
- spin_lock_irq(&host->host_lock);
+ spin_lock_irq(host->host_lock);
}
busy = 1; /* not busy now; it's our turn */
while (freescbs < needed) {
timeout = jiffies + WAITnexttimeout;
do {
- spin_unlock_irq(&host->host_lock);
+ spin_unlock_irq(host->host_lock);
for (now = jiffies; now == jiffies; ); /* wait a jiffy */
- spin_lock_irq(&host->host_lock);
+ spin_lock_irq(host->host_lock);
} while (freescbs < needed && time_before_eq(jiffies, timeout));
/*
* If we get here with enough free Scbs, we can take them.
unsigned long flags;
struct Scsi_Host *host = dev_id;
- spin_lock_irqsave(&host->host_lock, flags);
+ spin_lock_irqsave(host->host_lock, flags);
wd7000_intr_handle(irq, dev_id, regs);
- spin_unlock_irqrestore(&host->host_lock, flags);
+ spin_unlock_irqrestore(host->host_lock, flags);
}
* Analog Devices (A major AC97 codec maker)
* Intel Corp (you've probably heard of them already)
*
- * AC97 clues and assistance provided by
+ * AC97 clues and assistance provided by
* Analog Devices
* Zach 'Fufu' Brown
* Jeff Garzik
* This is available via the 'ftsodell=1' option.
*
* If you need to force a specific rate set the clocking= option
+ *
+ * This driver is cursed. (Ben LaHaise)
*/
#include <linux/module.h>
#ifndef PCI_DEVICE_ID_INTEL_440MX
#define PCI_DEVICE_ID_INTEL_440MX 0x7195
#endif
+#ifndef PCI_DEVICE_ID_SI_7012
+#define PCI_DEVICE_ID_SI_7012 0x7012
+#endif
+#ifndef PCI_DEVICE_ID_NVIDIA_MCP1_AUDIO
+#define PCI_DEVICE_ID_NVIDIA_MCP1_AUDIO 0x01b1
+#endif
static int ftsodell=0;
static int strict_clocking=0;
-static unsigned int clocking=48000;
+static unsigned int clocking=0;
static int spdif_locked=0;
//#define DEBUG
//#define DEBUG2
//#define DEBUG_INTERRUPTS
+//#define DEBUG_MMAP
#define ADC_RUNNING 1
#define DAC_RUNNING 2
#define INT_MASK (INT_SEC|INT_PRI|INT_MC|INT_PO|INT_PI|INT_MO|INT_NI|INT_GPI)
-#define DRIVER_VERSION "0.04"
+#define DRIVER_VERSION "0.21"
/* magic numbers to protect our data structures */
#define I810_CARD_MAGIC 0x5072696E /* "Prin" */
ICH82901AB,
INTEL440MX,
INTELICH2,
- INTELICH3
+ INTELICH3,
+ SI7012,
+ NVIDIA_NFORCE
};
static char * card_names[] = {
"Intel ICH 82901AB",
"Intel 440MX",
"Intel ICH2",
- "Intel ICH3"
+ "Intel ICH3",
+ "SiS 7012",
+ "NVIDIA nForce Audio"
};
static struct pci_device_id i810_pci_tbl [] __initdata = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, INTELICH2},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH3,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, INTELICH3},
+ {PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_7012,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, SI7012},
+ {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_MCP1_AUDIO,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, NVIDIA_NFORCE},
{0,}
};
struct i810_channel *(*alloc_rec_pcm_channel)(struct i810_card *);
struct i810_channel *(*alloc_rec_mic_channel)(struct i810_card *);
void (*free_pcm_channel)(struct i810_card *, int chan);
+
+ /* We have a *very* long init time possibly, so use this to block */
+ /* attempts to open our devices before we are ready (stops oops'es) */
+ int initializing;
};
static struct i810_card *devs = NULL;
static int i810_open_mixdev(struct inode *inode, struct file *file);
-static int i810_ioctl_mixdev(struct inode *inode, struct file *file, unsigned int cmd,
- unsigned long arg);
-
-static inline unsigned ld2(unsigned int x)
-{
- unsigned r = 0;
-
- if (x >= 0x10000) {
- x >>= 16;
- r += 16;
- }
- if (x >= 0x100) {
- x >>= 8;
- r += 8;
- }
- if (x >= 0x10) {
- x >>= 4;
- r += 4;
- }
- if (x >= 4) {
- x >>= 2;
- r += 2;
- }
- if (x >= 2)
- r++;
- return r;
-}
-
+static int i810_ioctl_mixdev(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg);
static u16 i810_ac97_get(struct ac97_codec *dev, u8 reg);
static void i810_ac97_set(struct ac97_codec *dev, u8 reg, u16 data);
rate = 8000;
dmabuf->rate = (rate * 48000)/clocking;
}
-
- new_rate = ac97_set_dac_rate(codec, rate);
+ new_rate=ac97_set_dac_rate(codec, rate);
if(new_rate != rate) {
dmabuf->rate = (new_rate * 48000)/clocking;
}
static inline unsigned i810_get_dma_addr(struct i810_state *state, int rec)
{
struct dmabuf *dmabuf = &state->dmabuf;
- unsigned int civ, offset;
- struct i810_channel *c;
+ unsigned int civ, offset, port, port_picb, bytes = 2;
if (!dmabuf->enable)
return 0;
+
if (rec)
- c = dmabuf->read_channel;
+ port = state->card->iobase + dmabuf->read_channel->port;
else
- c = dmabuf->write_channel;
+ port = state->card->iobase + dmabuf->write_channel->port;
+
+ if(state->card->pci_id == PCI_DEVICE_ID_SI_7012) {
+ port_picb = port + OFF_SR;
+ bytes = 1;
+ } else
+ port_picb = port + OFF_PICB;
+
do {
- civ = inb(state->card->iobase+c->port+OFF_CIV);
- offset = (civ + 1) * dmabuf->fragsize -
- 2 * inw(state->card->iobase+c->port+OFF_PICB);
- /* CIV changed before we read PICB (very seldom) ?
- * then PICB was rubbish, so try again */
- } while (civ != inb(state->card->iobase+c->port+OFF_CIV));
+ civ = inb(port+OFF_CIV) & 31;
+ offset = inw(port_picb);
+ /* Must have a delay here! */
+ if(offset == 0)
+ udelay(1);
+ /* Reread both registers and make sure that that total
+ * offset from the first reading to the second is 0.
+ * There is an issue with SiS hardware where it will count
+ * picb down to 0, then update civ to the next value,
+ * then set the new picb to fragsize bytes. We can catch
+ * it between the civ update and the picb update, making
+ * it look as though we are 1 fragsize ahead of where we
+ * are. The next to we get the address though, it will
+ * be back in the right place, and we will suddenly think
+ * we just went forward dmasize - fragsize bytes, causing
+ * totally stupid *huge* dma overrun messages. We are
+ * assuming that the 1us delay is more than long enough
+ * that we won't have to worry about the chip still being
+ * out of sync with reality ;-)
+ */
+ } while (civ != (inb(port+OFF_CIV) & 31) || offset != inw(port_picb));
- return offset;
+ return (((civ + 1) * dmabuf->fragsize - (bytes * offset))
+ % dmabuf->dmasize);
}
-//static void resync_dma_ptrs(struct i810_state *state, int rec)
-//{
-// struct dmabuf *dmabuf = &state->dmabuf;
-// struct i810_channel *c;
-// int offset;
-//
-// if(rec) {
-// c = dmabuf->read_channel;
-// } else {
-// c = dmabuf->write_channel;
-// }
-// if(c==NULL)
-// return;
-// offset = inb(state->card->iobase+c->port+OFF_CIV);
-// if(offset == inb(state->card->iobase+c->port+OFF_LVI))
-// offset++;
-// offset *= dmabuf->fragsize;
-//
-// dmabuf->hwptr=dmabuf->swptr = offset;
-//}
-
/* Stop recording (lock held) */
static inline void __stop_adc(struct i810_state *state)
{
// wait for the card to acknowledge shutdown
while( inb(card->iobase + PI_CR) != 0 ) ;
// now clear any latent interrupt bits (like the halt bit)
- outb( inb(card->iobase + PI_SR), card->iobase + PI_SR );
+ if(card->pci_id == PCI_DEVICE_ID_SI_7012)
+ outb( inb(card->iobase + PI_PICB), card->iobase + PI_PICB );
+ else
+ outb( inb(card->iobase + PI_SR), card->iobase + PI_SR );
outl( inl(card->iobase + GLOB_STA) & INT_PI, card->iobase + GLOB_STA);
}
spin_unlock_irqrestore(&card->lock, flags);
}
-static void start_adc(struct i810_state *state)
+static inline void __start_adc(struct i810_state *state)
{
struct dmabuf *dmabuf = &state->dmabuf;
- struct i810_card *card = state->card;
- unsigned long flags;
if (dmabuf->count < dmabuf->dmasize && dmabuf->ready && !dmabuf->enable &&
(dmabuf->trigger & PCM_ENABLE_INPUT)) {
- spin_lock_irqsave(&card->lock, flags);
dmabuf->enable |= ADC_RUNNING;
- outb((1<<4) | (1<<2) | 1, card->iobase + PI_CR);
- spin_unlock_irqrestore(&card->lock, flags);
+ outb((1<<4) | (1<<2) | 1, state->card->iobase + PI_CR);
}
}
+static void start_adc(struct i810_state *state)
+{
+ struct i810_card *card = state->card;
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->lock, flags);
+ __start_adc(state);
+ spin_unlock_irqrestore(&card->lock, flags);
+}
+
/* stop playback (lock held) */
static inline void __stop_dac(struct i810_state *state)
{
// wait for the card to acknowledge shutdown
while( inb(card->iobase + PO_CR) != 0 ) ;
// now clear any latent interrupt bits (like the halt bit)
- outb( inb(card->iobase + PO_SR), card->iobase + PO_SR );
+ if(card->pci_id == PCI_DEVICE_ID_SI_7012)
+ outb( inb(card->iobase + PO_PICB), card->iobase + PO_PICB );
+ else
+ outb( inb(card->iobase + PO_SR), card->iobase + PO_SR );
outl( inl(card->iobase + GLOB_STA) & INT_PO, card->iobase + GLOB_STA);
}
spin_unlock_irqrestore(&card->lock, flags);
}
-static void start_dac(struct i810_state *state)
+static inline void __start_dac(struct i810_state *state)
{
struct dmabuf *dmabuf = &state->dmabuf;
- struct i810_card *card = state->card;
- unsigned long flags;
if (dmabuf->count > 0 && dmabuf->ready && !dmabuf->enable &&
(dmabuf->trigger & PCM_ENABLE_OUTPUT)) {
- spin_lock_irqsave(&card->lock, flags);
dmabuf->enable |= DAC_RUNNING;
- outb((1<<4) | (1<<2) | 1, card->iobase + PO_CR);
- spin_unlock_irqrestore(&card->lock, flags);
+ outb((1<<4) | (1<<2) | 1, state->card->iobase + PO_CR);
}
}
+static void start_dac(struct i810_state *state)
+{
+ struct i810_card *card = state->card;
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->lock, flags);
+ __start_dac(state);
+ spin_unlock_irqrestore(&card->lock, flags);
+}
#define DMABUF_DEFAULTORDER (16-PAGE_SHIFT)
#define DMABUF_MINORDER 1
dmabuf->ossfragsize = (PAGE_SIZE<<DMABUF_DEFAULTORDER)/dmabuf->ossmaxfrags;
size = dmabuf->ossfragsize * dmabuf->ossmaxfrags;
+ if(dmabuf->rawbuf && (PAGE_SIZE << dmabuf->buforder) == size)
+ return 0;
/* alloc enough to satisfy the oss params */
for (order = DMABUF_DEFAULTORDER; order >= DMABUF_MINORDER; order--) {
if ( (PAGE_SIZE<<order) > size )
dmabuf->swptr = dmabuf->hwptr = 0;
spin_unlock_irqrestore(&state->card->lock, flags);
- /* allocate DMA buffer if not allocated yet */
- if (dmabuf->rawbuf)
- dealloc_dmabuf(state);
+ /* allocate DMA buffer, let alloc_dmabuf determine if we are already
+ * allocated well enough or if we should replace the current buffer
+ * (assuming one is already allocated, if it isn't, then allocate it).
+ */
if ((ret = alloc_dmabuf(state)))
return ret;
/* FIXME: figure out all this OSS fragment stuff */
/* I did, it now does what it should according to the OSS API. DL */
+ /* We may not have realloced our dmabuf, but the fragment size to
+ * fragment number ratio may have changed, so go ahead and reprogram
+ * things
+ */
dmabuf->dmasize = PAGE_SIZE << dmabuf->buforder;
dmabuf->numfrag = SG_LEN;
dmabuf->fragsize = dmabuf->dmasize/dmabuf->numfrag;
sg->busaddr=virt_to_bus(dmabuf->rawbuf+dmabuf->fragsize*i);
// the card will always be doing 16bit stereo
sg->control=dmabuf->fragsamples;
+ if(state->card->pci_id == PCI_DEVICE_ID_SI_7012)
+ sg->control <<= 1;
sg->control|=CON_BUFPAD;
// set us up to get IOC interrupts as often as needed to
// satisfy numfrag requirements, no more
outl(virt_to_bus(&c->sg[0]), state->card->iobase+c->port+OFF_BDBAR);
outb(0, state->card->iobase+c->port+OFF_CIV);
outb(0, state->card->iobase+c->port+OFF_LVI);
- dmabuf->count = 0;
spin_unlock_irqrestore(&state->card->lock, flags);
else
port += dmabuf->write_channel->port;
- if(dmabuf->mapped) {
- if(rec)
- dmabuf->swptr = (dmabuf->hwptr + dmabuf->dmasize
- - dmabuf->count) % dmabuf->dmasize;
- else
- dmabuf->swptr = (dmabuf->hwptr + dmabuf->count)
- % dmabuf->dmasize;
- }
- /*
- * two special cases, count == 0 on write
- * means no data, and count == dmasize
- * means no data on read, handle appropriately
+ /* if we are currently stopped, then our CIV is actually set to our
+ * *last* sg segment and we are ready to wrap to the next. However,
+ * if we set our LVI to the last sg segment, then it won't wrap to
+ * the next sg segment, it won't even get a start. So, instead, when
+ * we are stopped, we set both the LVI value and also we increment
+ * the CIV value to the next sg segment to be played so that when
+ * we call start_{dac,adc}, things will operate properly
*/
- if(!rec && dmabuf->count == 0) {
- outb(inb(port+OFF_CIV),port+OFF_LVI);
- return;
- }
- if(rec && dmabuf->count == dmabuf->dmasize) {
- outb(inb(port+OFF_CIV),port+OFF_LVI);
- return;
+ if (!dmabuf->enable && dmabuf->ready) {
+ if(rec && dmabuf->count < dmabuf->dmasize &&
+ (dmabuf->trigger & PCM_ENABLE_INPUT))
+ {
+ outb((inb(port+OFF_CIV)+1)&31, port+OFF_LVI);
+ __start_adc(state);
+ while( !(inb(port + OFF_CR) & ((1<<4) | (1<<2))) ) ;
+ } else if (!rec && dmabuf->count &&
+ (dmabuf->trigger & PCM_ENABLE_OUTPUT))
+ {
+ outb((inb(port+OFF_CIV)+1)&31, port+OFF_LVI);
+ __start_dac(state);
+ while( !(inb(port + OFF_CR) & ((1<<4) | (1<<2))) ) ;
+ }
}
+
/* swptr - 1 is the tail of our transfer */
x = (dmabuf->dmasize + dmabuf->swptr - 1) % dmabuf->dmasize;
x /= dmabuf->fragsize;
- outb(x&31, port+OFF_LVI);
+ outb(x, port+OFF_LVI);
}
static void i810_update_lvi(struct i810_state *state, int rec)
/* update hardware pointer */
hwptr = i810_get_dma_addr(state, 1);
diff = (dmabuf->dmasize + hwptr - dmabuf->hwptr) % dmabuf->dmasize;
-// printk("HWP %d,%d,%d\n", hwptr, dmabuf->hwptr, diff);
+#if defined(DEBUG_INTERRUPTS) || defined(DEBUG_MMAP)
+ printk("ADC HWP %d,%d,%d\n", hwptr, dmabuf->hwptr, diff);
+#endif
dmabuf->hwptr = hwptr;
dmabuf->total_bytes += diff;
dmabuf->count += diff;
/* this is normal for the end of a read */
/* only give an error if we went past the */
/* last valid sg entry */
- if(inb(state->card->iobase + PI_CIV) !=
- inb(state->card->iobase + PI_LVI)) {
+ if((inb(state->card->iobase + PI_CIV) & 31) !=
+ (inb(state->card->iobase + PI_LVI) & 31)) {
printk(KERN_WARNING "i810_audio: DMA overrun on read\n");
dmabuf->error++;
}
/* update hardware pointer */
hwptr = i810_get_dma_addr(state, 0);
diff = (dmabuf->dmasize + hwptr - dmabuf->hwptr) % dmabuf->dmasize;
-// printk("HWP %d,%d,%d\n", hwptr, dmabuf->hwptr, diff);
+#if defined(DEBUG_INTERRUPTS) || defined(DEBUG_MMAP)
+ printk("DAC HWP %d,%d,%d\n", hwptr, dmabuf->hwptr, diff);
+#endif
dmabuf->hwptr = hwptr;
dmabuf->total_bytes += diff;
dmabuf->count -= diff;
/* this is normal for the end of a write */
/* only give an error if we went past the */
/* last valid sg entry */
- if(inb(state->card->iobase + PO_CIV) !=
- inb(state->card->iobase + PO_LVI)) {
+ if((inb(state->card->iobase + PO_CIV) & 31) !=
+ (inb(state->card->iobase + PO_LVI) & 31)) {
printk(KERN_WARNING "i810_audio: DMA overrun on write\n");
printk("i810_audio: CIV %d, LVI %d, hwptr %x, "
"count %d\n",
- inb(state->card->iobase + PO_CIV),
- inb(state->card->iobase + PO_LVI),
+ inb(state->card->iobase + PO_CIV) & 31,
+ inb(state->card->iobase + PO_LVI) & 31,
dmabuf->hwptr, dmabuf->count);
dmabuf->error++;
}
}
}
-static int drain_dac(struct i810_state *state, int nonblock)
+static inline int i810_get_free_write_space(struct i810_state *state)
+{
+ struct dmabuf *dmabuf = &state->dmabuf;
+ int free;
+
+ i810_update_ptr(state);
+ // catch underruns during playback
+ if (dmabuf->count < 0) {
+ dmabuf->count = 0;
+ dmabuf->swptr = dmabuf->hwptr;
+ }
+ free = dmabuf->dmasize - dmabuf->count;
+ free -= (dmabuf->hwptr % dmabuf->fragsize);
+ if(free < 0)
+ return(0);
+ return(free);
+}
+
+static inline int i810_get_available_read_data(struct i810_state *state)
+{
+ struct dmabuf *dmabuf = &state->dmabuf;
+ int avail;
+
+ i810_update_ptr(state);
+ // catch overruns during record
+ if (dmabuf->count > dmabuf->dmasize) {
+ dmabuf->count = dmabuf->dmasize;
+ dmabuf->swptr = dmabuf->hwptr;
+ }
+ avail = dmabuf->count;
+ avail -= (dmabuf->hwptr % dmabuf->fragsize);
+ if(avail < 0)
+ return(0);
+ return(avail);
+}
+
+static int drain_dac(struct i810_state *state, int signals_allowed)
{
DECLARE_WAITQUEUE(wait, current);
struct dmabuf *dmabuf = &state->dmabuf;
if (!dmabuf->ready)
return 0;
-
+ if(dmabuf->mapped) {
+ stop_dac(state);
+ return 0;
+ }
add_wait_queue(&dmabuf->wait, &wait);
for (;;) {
- /* It seems that we have to set the current state to TASK_INTERRUPTIBLE
- every time to make the process really go to sleep */
- set_current_state(TASK_INTERRUPTIBLE);
spin_lock_irqsave(&state->card->lock, flags);
i810_update_ptr(state);
if (count <= 0)
break;
- if (signal_pending(current))
- break;
-
- i810_update_lvi(state,0);
- if (dmabuf->enable != DAC_RUNNING)
- start_dac(state);
-
- if (nonblock) {
- remove_wait_queue(&dmabuf->wait, &wait);
- set_current_state(TASK_RUNNING);
- return -EBUSY;
+ /*
+ * This will make sure that our LVI is correct, that our
+ * pointer is updated, and that the DAC is running. We
+ * have to force the setting of dmabuf->trigger to avoid
+ * any possible deadlocks.
+ */
+ if(!dmabuf->enable) {
+ dmabuf->trigger = PCM_ENABLE_OUTPUT;
+ i810_update_lvi(state,0);
}
+ if (signal_pending(current) && signals_allowed) {
+ break;
+ }
- tmo = (dmabuf->dmasize * HZ) / dmabuf->rate;
- tmo >>= 1;
- if (!schedule_timeout(tmo ? tmo : 1) && tmo){
+ /* It seems that we have to set the current state to
+ * TASK_INTERRUPTIBLE every time to make the process
+ * really go to sleep. This also has to be *after* the
+ * update_ptr() call because update_ptr is likely to
+ * do a wake_up() which will unset this before we ever
+ * try to sleep, resuling in a tight loop in this code
+ * instead of actually sleeping and waiting for an
+ * interrupt to wake us up!
+ */
+ set_current_state(TASK_INTERRUPTIBLE);
+ /*
+ * set the timeout to significantly longer than it *should*
+ * take for the DAC to drain the DMA buffer
+ */
+ tmo = (count * HZ) / (dmabuf->rate);
+ if (!schedule_timeout(tmo >= 2 ? tmo : 2)){
printk(KERN_ERR "i810_audio: drain_dac, dma timeout?\n");
+ count = 0;
break;
}
}
- stop_dac(state);
- synchronize_irq();
- remove_wait_queue(&dmabuf->wait, &wait);
set_current_state(TASK_RUNNING);
- if (signal_pending(current))
+ remove_wait_queue(&dmabuf->wait, &wait);
+ if(count > 0 && signal_pending(current) && signals_allowed)
return -ERESTARTSYS;
-
+ stop_dac(state);
return 0;
}
if(!state->dmabuf.ready)
continue;
dmabuf = &state->dmabuf;
- if(dmabuf->enable & DAC_RUNNING)
+ if(dmabuf->enable & DAC_RUNNING) {
c=dmabuf->write_channel;
- else if(dmabuf->enable & ADC_RUNNING)
+ } else if(dmabuf->enable & ADC_RUNNING) {
c=dmabuf->read_channel;
- else /* This can occur going from R/W to close */
+ } else /* This can occur going from R/W to close */
continue;
port+=c->port;
-
- status = inw(port + OFF_SR);
+
+ if(card->pci_id == PCI_DEVICE_ID_SI_7012)
+ status = inw(port + OFF_PICB);
+ else
+ status = inw(port + OFF_SR);
+
#ifdef DEBUG_INTERRUPTS
printk("NUM %d PORT %X IRQ ( ST%d ", c->num, c->port, status);
#endif
if(status & DMA_INT_COMPLETE)
{
+ /* only wake_up() waiters if this interrupt signals
+ * us being beyond a userfragsize of data open or
+ * available, and i810_update_ptr() does that for
+ * us
+ */
i810_update_ptr(state);
#ifdef DEBUG_INTERRUPTS
printk("COMP %d ", dmabuf->hwptr /
dmabuf->fragsize);
#endif
}
- if(status & DMA_INT_LVI)
+ if(status & (DMA_INT_LVI | DMA_INT_DCH))
{
+ /* wake_up() unconditionally on LVI and DCH */
i810_update_ptr(state);
wake_up(&dmabuf->wait);
#ifdef DEBUG_INTERRUPTS
- printk("LVI ");
+ if(status & DMA_INT_LVI)
+ printk("LVI ");
+ if(status & DMA_INT_DCH)
+ printk("DCH -");
#endif
- }
- if(status & DMA_INT_DCH)
- {
- i810_update_ptr(state);
if(dmabuf->enable & DAC_RUNNING)
count = dmabuf->count;
else
count = dmabuf->dmasize - dmabuf->count;
if(count > 0) {
outb(inb(port+OFF_CR) | 1, port+OFF_CR);
+#ifdef DEBUG_INTERRUPTS
+ printk(" CONTINUE ");
+#endif
} else {
+ if (dmabuf->enable & DAC_RUNNING)
+ __stop_dac(state);
+ if (dmabuf->enable & ADC_RUNNING)
+ __stop_adc(state);
+ dmabuf->enable = 0;
wake_up(&dmabuf->wait);
#ifdef DEBUG_INTERRUPTS
- printk("DCH - STOP ");
+ printk(" STOP ");
#endif
}
}
- outw(status & DMA_INT_MASK, port + OFF_SR);
+ if(card->pci_id == PCI_DEVICE_ID_SI_7012)
+ outw(status & DMA_INT_MASK, port + OFF_PICB);
+ else
+ outw(status & DMA_INT_MASK, port + OFF_SR);
}
#ifdef DEBUG_INTERRUPTS
printk(")\n");
return ret;
if (!access_ok(VERIFY_WRITE, buffer, count))
return -EFAULT;
- dmabuf->trigger &= ~PCM_ENABLE_OUTPUT;
ret = 0;
add_wait_queue(&dmabuf->wait, &waita);
while (count > 0) {
+ set_current_state(TASK_INTERRUPTIBLE);
spin_lock_irqsave(&card->lock, flags);
if (PM_SUSPENDED(card)) {
spin_unlock_irqrestore(&card->lock, flags);
- set_current_state(TASK_INTERRUPTIBLE);
schedule();
if (signal_pending(current)) {
if (!ret) ret = -EAGAIN;
continue;
}
swptr = dmabuf->swptr;
- if (dmabuf->count > dmabuf->dmasize) {
- dmabuf->count = dmabuf->dmasize;
- }
- cnt = dmabuf->count - dmabuf->fragsize;
+ cnt = i810_get_available_read_data(state);
// this is to make the copy_to_user simpler below
if(cnt > (dmabuf->dmasize - swptr))
cnt = dmabuf->dmasize - swptr;
if (cnt > count)
cnt = count;
+ /* Lop off the last two bits to force the code to always
+ * write in full samples. This keeps software that sets
+ * O_NONBLOCK but doesn't check the return value of the
+ * write call from getting things out of state where they
+ * think a full 4 byte sample was written when really only
+ * a portion was, resulting in odd sound and stereo
+ * hysteresis.
+ */
+ cnt &= ~0x3;
if (cnt <= 0) {
unsigned long tmo;
- if(!dmabuf->enable) {
- dmabuf->trigger |= PCM_ENABLE_INPUT;
- start_adc(state);
- }
+ /*
+ * Don't let us deadlock. The ADC won't start if
+ * dmabuf->trigger isn't set. A call to SETTRIGGER
+ * could have turned it off after we set it to on
+ * previously.
+ */
+ dmabuf->trigger = PCM_ENABLE_INPUT;
+ /*
+ * This does three things. Updates LVI to be correct,
+ * makes sure the ADC is running, and updates the
+ * hwptr.
+ */
i810_update_lvi(state,1);
if (file->f_flags & O_NONBLOCK) {
if (!ret) ret = -EAGAIN;
- return ret;
+ goto done;
}
- /* This isnt strictly right for the 810 but it'll do */
- tmo = (dmabuf->dmasize * HZ) / (dmabuf->rate * 2);
- tmo >>= 1;
+ /* Set the timeout to how long it would take to fill
+ * two of our buffers. If we haven't been woke up
+ * by then, then we know something is wrong.
+ */
+ tmo = (dmabuf->dmasize * HZ * 2) / (dmabuf->rate * 4);
/* There are two situations when sleep_on_timeout returns, one is when
the interrupt is serviced correctly and the process is waked up by
ISR ON TIME. Another is when timeout is expired, which means that
is TOO LATE for the process to be scheduled to run (scheduler latency)
which results in a (potential) buffer overrun. And worse, there is
NOTHING we can do to prevent it. */
- if (!interruptible_sleep_on_timeout(&dmabuf->wait, tmo)) {
+ if (!schedule_timeout(tmo >= 2 ? tmo : 2)) {
#ifdef DEBUG
printk(KERN_ERR "i810_audio: recording schedule timeout, "
"dmasz %u fragsz %u count %i hwptr %u swptr %u\n",
}
if (signal_pending(current)) {
ret = ret ? ret : -ERESTARTSYS;
- return ret;
+ goto done;
}
continue;
}
buffer += cnt;
ret += cnt;
}
- i810_update_lvi(state,1);
- if(!(dmabuf->enable & ADC_RUNNING))
- start_adc(state);
done:
+ i810_update_lvi(state,1);
set_current_state(TASK_RUNNING);
remove_wait_queue(&dmabuf->wait, &waita);
return ret;
if (!access_ok(VERIFY_READ, buffer, count))
return -EFAULT;
- dmabuf->trigger &= ~PCM_ENABLE_INPUT;
ret = 0;
add_wait_queue(&dmabuf->wait, &waita);
while (count > 0) {
+ set_current_state(TASK_INTERRUPTIBLE);
spin_lock_irqsave(&state->card->lock, flags);
if (PM_SUSPENDED(card)) {
spin_unlock_irqrestore(&card->lock, flags);
- set_current_state(TASK_INTERRUPTIBLE);
schedule();
if (signal_pending(current)) {
if (!ret) ret = -EAGAIN;
}
swptr = dmabuf->swptr;
- if (dmabuf->count < 0) {
- dmabuf->count = 0;
- }
- cnt = dmabuf->dmasize - dmabuf->fragsize - dmabuf->count;
- // this is to make the copy_from_user simpler below
+ cnt = i810_get_free_write_space(state);
+ /* Bound the maximum size to how much we can copy to the
+ * dma buffer before we hit the end. If we have more to
+ * copy then it will get done in a second pass of this
+ * loop starting from the beginning of the buffer.
+ */
if(cnt > (dmabuf->dmasize - swptr))
cnt = dmabuf->dmasize - swptr;
spin_unlock_irqrestore(&state->card->lock, flags);
#endif
if (cnt > count)
cnt = count;
+ /* Lop off the last two bits to force the code to always
+ * write in full samples. This keeps software that sets
+ * O_NONBLOCK but doesn't check the return value of the
+ * write call from getting things out of state where they
+ * think a full 4 byte sample was written when really only
+ * a portion was, resulting in odd sound and stereo
+ * hysteresis.
+ */
+ cnt &= ~0x3;
if (cnt <= 0) {
unsigned long tmo;
// There is data waiting to be played
- if(!dmabuf->enable && dmabuf->count) {
- /* force the starting incase SETTRIGGER has been used */
- /* to stop it, otherwise this is a deadlock situation */
- dmabuf->trigger |= PCM_ENABLE_OUTPUT;
- start_dac(state);
- }
- // Update the LVI pointer in case we have already
- // written data in this syscall and are just waiting
- // on the tail bit of data
+ /*
+ * Force the trigger setting since we would
+ * deadlock with it set any other way
+ */
+ dmabuf->trigger = PCM_ENABLE_OUTPUT;
i810_update_lvi(state,0);
if (file->f_flags & O_NONBLOCK) {
if (!ret) ret = -EAGAIN;
goto ret;
}
/* Not strictly correct but works */
- tmo = (dmabuf->dmasize * HZ) / (dmabuf->rate * 4);
+ tmo = (dmabuf->dmasize * HZ * 2) / (dmabuf->rate * 4);
/* There are two situations when sleep_on_timeout returns, one is when
the interrupt is serviced correctly and the process is waked up by
ISR ON TIME. Another is when timeout is expired, which means that
is TOO LATE for the process to be scheduled to run (scheduler latency)
which results in a (potential) buffer underrun. And worse, there is
NOTHING we can do to prevent it. */
- if (!interruptible_sleep_on_timeout(&dmabuf->wait, tmo)) {
+ if (!schedule_timeout(tmo >= 2 ? tmo : 2)) {
#ifdef DEBUG
printk(KERN_ERR "i810_audio: playback schedule timeout, "
"dmasz %u fragsz %u count %i hwptr %u swptr %u\n",
x = dmabuf->fragsize - (swptr % dmabuf->fragsize);
memset(dmabuf->rawbuf + swptr, '\0', x);
}
+ret:
i810_update_lvi(state,0);
- if (!dmabuf->enable && dmabuf->count >= dmabuf->userfragsize)
- start_dac(state);
- ret:
set_current_state(TASK_RUNNING);
remove_wait_queue(&dmabuf->wait, &waita);
return 0;
poll_wait(file, &dmabuf->wait, wait);
spin_lock_irqsave(&state->card->lock, flags);
- i810_update_ptr(state);
- if (file->f_mode & FMODE_READ && dmabuf->enable & ADC_RUNNING) {
- if (dmabuf->count >= (signed)dmabuf->fragsize)
+ if (dmabuf->enable & ADC_RUNNING ||
+ dmabuf->trigger & PCM_ENABLE_INPUT) {
+ if (i810_get_available_read_data(state) >=
+ (signed)dmabuf->userfragsize)
mask |= POLLIN | POLLRDNORM;
}
- if (file->f_mode & FMODE_WRITE && dmabuf->enable & DAC_RUNNING) {
- if (dmabuf->mapped) {
- if (dmabuf->count >= (signed)dmabuf->fragsize)
- mask |= POLLOUT | POLLWRNORM;
- } else {
- if ((signed)dmabuf->dmasize >= dmabuf->count + (signed)dmabuf->fragsize)
- mask |= POLLOUT | POLLWRNORM;
- }
+ if (dmabuf->enable & DAC_RUNNING ||
+ dmabuf->trigger & PCM_ENABLE_OUTPUT) {
+ if (i810_get_free_write_space(state) >=
+ (signed)dmabuf->userfragsize)
+ mask |= POLLOUT | POLLWRNORM;
}
spin_unlock_irqrestore(&state->card->lock, flags);
-
return mask;
}
if (size > (PAGE_SIZE << dmabuf->buforder))
goto out;
ret = -EAGAIN;
- if (remap_page_range(vma, vma->vm_start, virt_to_phys(dmabuf->rawbuf),
+ if (remap_page_range(vma->vm_start, virt_to_phys(dmabuf->rawbuf),
size, vma->vm_page_prot))
goto out;
dmabuf->mapped = 1;
- if(vma->vm_flags & VM_WRITE)
- dmabuf->count = dmabuf->dmasize;
- else
- dmabuf->count = 0;
+ dmabuf->trigger = 0;
ret = 0;
-#ifdef DEBUG
+#ifdef DEBUG_MMAP
printk("i810_audio: mmap'ed %ld bytes of data space\n", size);
#endif
out:
static int i810_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
{
struct i810_state *state = (struct i810_state *)file->private_data;
+ struct i810_channel *c = NULL;
struct dmabuf *dmabuf = &state->dmabuf;
unsigned long flags;
audio_buf_info abinfo;
count_info cinfo;
unsigned int i_glob_cnt;
- int val = 0, mapped, ret;
+ int val = 0, ret;
struct ac97_codec *codec = state->card->ac97_codec[0];
- mapped = ((file->f_mode & FMODE_WRITE) && dmabuf->mapped) ||
- ((file->f_mode & FMODE_READ) && dmabuf->mapped);
#ifdef DEBUG
printk("i810_audio: i810_ioctl, arg=0x%x, cmd=", arg ? *(int *)arg : 0);
#endif
#ifdef DEBUG
printk("SNDCTL_DSP_RESET\n");
#endif
- /* FIXME: spin_lock ? */
+ spin_lock_irqsave(&state->card->lock, flags);
if (dmabuf->enable == DAC_RUNNING) {
- stop_dac(state);
+ c = dmabuf->write_channel;
+ __stop_dac(state);
}
if (dmabuf->enable == ADC_RUNNING) {
- stop_adc(state);
+ c = dmabuf->read_channel;
+ __stop_adc(state);
}
+ if (c != NULL) {
+ outb(2, state->card->iobase+c->port+OFF_CR); /* reset DMA machine */
+ outl(virt_to_bus(&c->sg[0]), state->card->iobase+c->port+OFF_BDBAR);
+ outb(0, state->card->iobase+c->port+OFF_CIV);
+ outb(0, state->card->iobase+c->port+OFF_LVI);
+ }
+
+ spin_unlock_irqrestore(&state->card->lock, flags);
synchronize_irq();
dmabuf->ready = 0;
dmabuf->swptr = dmabuf->hwptr = 0;
#endif
if (dmabuf->enable != DAC_RUNNING || file->f_flags & O_NONBLOCK)
return 0;
- drain_dac(state, 0);
- dmabuf->ready = 0;
- dmabuf->swptr = dmabuf->hwptr = 0;
- dmabuf->count = dmabuf->total_bytes = 0;
+ if((val = drain_dac(state, 1)))
+ return val;
+ dmabuf->total_bytes = 0;
return 0;
case SNDCTL_DSP_SPEED: /* set smaple rate */
#ifdef DEBUG
printk("SNDCTL_DSP_STEREO\n");
#endif
- if (get_user(val, (int *)arg))
- return -EFAULT;
-
if (dmabuf->enable & DAC_RUNNING) {
stop_dac(state);
}
#ifdef DEBUG
printk("SNDCTL_DSP_SETFMT\n");
#endif
- if (get_user(val, (int *)arg))
- return -EFAULT;
-
- switch ( val ) {
- case AFMT_S16_LE:
- break;
- case AFMT_QUERY:
- default:
- val = AFMT_S16_LE;
- break;
- }
- return put_user(val, (int *)arg);
+ return put_user(AFMT_S16_LE, (int *)arg);
case SNDCTL_DSP_CHANNELS:
#ifdef DEBUG
dmabuf->ossfragsize = 1<<(val & 0xffff);
dmabuf->ossmaxfrags = (val >> 16) & 0xffff;
- if (dmabuf->ossmaxfrags <= 4)
- dmabuf->ossmaxfrags = 4;
- else if (dmabuf->ossmaxfrags <= 8)
- dmabuf->ossmaxfrags = 8;
- else if (dmabuf->ossmaxfrags <= 16)
- dmabuf->ossmaxfrags = 16;
- else
- dmabuf->ossmaxfrags = 32;
+ if (!dmabuf->ossfragsize || !dmabuf->ossmaxfrags)
+ return -EINVAL;
+ /*
+ * Bound the frag size into our allowed range of 256 - 4096
+ */
+ if (dmabuf->ossfragsize < 256)
+ dmabuf->ossfragsize = 256;
+ else if (dmabuf->ossfragsize > 4096)
+ dmabuf->ossfragsize = 4096;
+ /*
+ * The numfrags could be something reasonable, or it could
+ * be 0xffff meaning "Give me as much as possible". So,
+ * we check the numfrags * fragsize doesn't exceed our
+ * 64k buffer limit, nor is it less than our 8k minimum.
+ * If it fails either one of these checks, then adjust the
+ * number of fragments, not the size of them. It's OK if
+ * our number of fragments doesn't equal 32 or anything
+ * like our hardware based number now since we are using
+ * a different frag count for the hardware. Before we get
+ * into this though, bound the maxfrags to avoid overflow
+ * issues. A reasonable bound would be 64k / 256 since our
+ * maximum buffer size is 64k and our minimum frag size is
+ * 256. On the other end, our minimum buffer size is 8k and
+ * our maximum frag size is 4k, so the lower bound should
+ * be 2.
+ */
+
+ if(dmabuf->ossmaxfrags > 256)
+ dmabuf->ossmaxfrags = 256;
+ else if (dmabuf->ossmaxfrags < 2)
+ dmabuf->ossmaxfrags = 2;
+
val = dmabuf->ossfragsize * dmabuf->ossmaxfrags;
- if (val < 16384)
- val = 16384;
- if (val > 65536)
- val = 65536;
- dmabuf->ossmaxfrags = val/dmabuf->ossfragsize;
- if(dmabuf->ossmaxfrags<4)
- dmabuf->ossfragsize = val/4;
+ while (val < 8192) {
+ val <<= 1;
+ dmabuf->ossmaxfrags <<= 1;
+ }
+ while (val > 65536) {
+ val >>= 1;
+ dmabuf->ossmaxfrags >>= 1;
+ }
dmabuf->ready = 0;
#ifdef DEBUG
printk("SNDCTL_DSP_SETFRAGMENT 0x%x, %d, %d\n", val,
i810_update_ptr(state);
abinfo.fragsize = dmabuf->userfragsize;
abinfo.fragstotal = dmabuf->userfrags;
- if(dmabuf->mapped)
- abinfo.bytes = dmabuf->count;
- else
- abinfo.bytes = dmabuf->dmasize - dmabuf->count;
+ if (dmabuf->mapped)
+ abinfo.bytes = dmabuf->dmasize;
+ else
+ abinfo.bytes = i810_get_free_write_space(state);
abinfo.fragments = abinfo.bytes / dmabuf->userfragsize;
spin_unlock_irqrestore(&state->card->lock, flags);
-#ifdef DEBUG
+#if defined(DEBUG) || defined(DEBUG_MMAP)
printk("SNDCTL_DSP_GETOSPACE %d, %d, %d, %d\n", abinfo.bytes,
abinfo.fragsize, abinfo.fragments, abinfo.fragstotal);
#endif
if (!dmabuf->ready && (val = prog_dmabuf(state, 0)) != 0)
return val;
spin_lock_irqsave(&state->card->lock, flags);
- i810_update_ptr(state);
+ val = i810_get_free_write_space(state);
cinfo.bytes = dmabuf->total_bytes;
cinfo.ptr = dmabuf->hwptr;
- cinfo.blocks = (dmabuf->dmasize - dmabuf->count)/dmabuf->userfragsize;
- if (dmabuf->mapped) {
- dmabuf->count = (dmabuf->dmasize -
- (dmabuf->count & (dmabuf->userfragsize-1)));
+ cinfo.blocks = val/dmabuf->userfragsize;
+ if (dmabuf->mapped && (dmabuf->trigger & PCM_ENABLE_OUTPUT)) {
+ dmabuf->count += val;
+ dmabuf->swptr = (dmabuf->swptr + val) % dmabuf->dmasize;
__i810_update_lvi(state, 0);
}
spin_unlock_irqrestore(&state->card->lock, flags);
-#ifdef DEBUG
+#if defined(DEBUG) || defined(DEBUG_MMAP)
printk("SNDCTL_DSP_GETOPTR %d, %d, %d, %d\n", cinfo.bytes,
cinfo.blocks, cinfo.ptr, dmabuf->count);
#endif
if (!dmabuf->ready && (val = prog_dmabuf(state, 1)) != 0)
return val;
spin_lock_irqsave(&state->card->lock, flags);
- i810_update_ptr(state);
+ abinfo.bytes = i810_get_available_read_data(state);
abinfo.fragsize = dmabuf->userfragsize;
abinfo.fragstotal = dmabuf->userfrags;
- abinfo.bytes = dmabuf->dmasize - dmabuf->count;
abinfo.fragments = abinfo.bytes / dmabuf->userfragsize;
spin_unlock_irqrestore(&state->card->lock, flags);
-#ifdef DEBUG
+#if defined(DEBUG) || defined(DEBUG_MMAP)
printk("SNDCTL_DSP_GETISPACE %d, %d, %d, %d\n", abinfo.bytes,
abinfo.fragsize, abinfo.fragments, abinfo.fragstotal);
#endif
if (!dmabuf->ready && (val = prog_dmabuf(state, 0)) != 0)
return val;
spin_lock_irqsave(&state->card->lock, flags);
- i810_update_ptr(state);
+ val = i810_get_available_read_data(state);
cinfo.bytes = dmabuf->total_bytes;
- cinfo.blocks = dmabuf->count/dmabuf->userfragsize;
+ cinfo.blocks = val/dmabuf->userfragsize;
cinfo.ptr = dmabuf->hwptr;
- if (dmabuf->mapped) {
- dmabuf->count &= (dmabuf->userfragsize-1);
+ if (dmabuf->mapped && (dmabuf->trigger & PCM_ENABLE_INPUT)) {
+ dmabuf->count -= val;
+ dmabuf->swptr = (dmabuf->swptr + val) % dmabuf->dmasize;
__i810_update_lvi(state, 1);
}
spin_unlock_irqrestore(&state->card->lock, flags);
-#ifdef DEBUG
+#if defined(DEBUG) || defined(DEBUG_MMAP)
printk("SNDCTL_DSP_GETIPTR %d, %d, %d, %d\n", cinfo.bytes,
cinfo.blocks, cinfo.ptr, dmabuf->count);
#endif
case SNDCTL_DSP_SETTRIGGER:
if (get_user(val, (int *)arg))
return -EFAULT;
-#ifdef DEBUG
+#if defined(DEBUG) || defined(DEBUG_MMAP)
printk("SNDCTL_DSP_SETTRIGGER 0x%x\n", val);
#endif
if( !(val & PCM_ENABLE_INPUT) && dmabuf->enable == ADC_RUNNING) {
stop_dac(state);
}
dmabuf->trigger = val;
- if(val & PCM_ENABLE_OUTPUT) {
+ if(val & PCM_ENABLE_OUTPUT && !(dmabuf->enable & DAC_RUNNING)) {
if (!dmabuf->write_channel) {
dmabuf->ready = 0;
dmabuf->write_channel = state->card->alloc_pcm_channel(state->card);
if (!dmabuf->ready && (ret = prog_dmabuf(state, 0)))
return ret;
if (dmabuf->mapped) {
- dmabuf->count = dmabuf->dmasize;
- i810_update_lvi(state,0);
- }
- if (!dmabuf->enable && dmabuf->count > dmabuf->userfragsize)
+ spin_lock_irqsave(&state->card->lock, flags);
+ i810_update_ptr(state);
+ dmabuf->count = 0;
+ dmabuf->swptr = dmabuf->hwptr;
+ dmabuf->count = i810_get_free_write_space(state);
+ dmabuf->swptr = (dmabuf->swptr + dmabuf->count) % dmabuf->dmasize;
+ __i810_update_lvi(state, 0);
+ spin_unlock_irqrestore(&state->card->lock, flags);
+ } else
start_dac(state);
}
- if(val & PCM_ENABLE_INPUT) {
+ if(val & PCM_ENABLE_INPUT && !(dmabuf->enable & ADC_RUNNING)) {
if (!dmabuf->read_channel) {
dmabuf->ready = 0;
dmabuf->read_channel = state->card->alloc_rec_pcm_channel(state->card);
if (!dmabuf->ready && (ret = prog_dmabuf(state, 1)))
return ret;
if (dmabuf->mapped) {
+ spin_lock_irqsave(&state->card->lock, flags);
+ i810_update_ptr(state);
+ dmabuf->swptr = dmabuf->hwptr;
dmabuf->count = 0;
- i810_update_lvi(state,1);
+ spin_unlock_irqrestore(&state->card->lock, flags);
}
- if (!dmabuf->enable && dmabuf->count <
- (dmabuf->dmasize - dmabuf->userfragsize))
- start_adc(state);
+ i810_update_lvi(state, 1);
+ start_adc(state);
}
return 0;
/* find an avaiable virtual channel (instance of /dev/dsp) */
while (card != NULL) {
- for (i = 0; i < NR_HW_CH; i++) {
+ /*
+ * If we are initializing and then fail, card could go
+ * away unuexpectedly while we are in the for() loop.
+ * So, check for card on each iteration before we check
+ * for card->initializing to avoid a possible oops.
+ * This usually only matters for times when the driver is
+ * autoloaded by kmod.
+ */
+ for (i = 0; i < 50 && card && card->initializing; i++) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ/20);
+ }
+ for (i = 0; i < NR_HW_CH && card && !card->initializing; i++) {
if (card->states[i] == NULL) {
state = card->states[i] = (struct i810_state *)
kmalloc(sizeof(struct i810_state), GFP_KERNEL);
card->states[i] = NULL;;
return -EBUSY;
}
- i810_set_adc_rate(state, 8000);
dmabuf->trigger |= PCM_ENABLE_INPUT;
+ i810_set_adc_rate(state, 8000);
}
if(file->f_mode & FMODE_WRITE) {
if((dmabuf->write_channel = card->alloc_pcm_channel(card)) == NULL) {
/* Initialize to 8kHz? What if we don't support 8kHz? */
/* Let's change this to check for S/PDIF stuff */
+ dmabuf->trigger |= PCM_ENABLE_OUTPUT;
if ( spdif_locked ) {
i810_set_dac_rate(state, spdif_locked);
i810_set_spdif_output(state, AC97_EA_SPSA_3_4, spdif_locked);
} else {
i810_set_dac_rate(state, 8000);
}
- dmabuf->trigger |= PCM_ENABLE_OUTPUT;
}
/* set default sample format. According to OSS Programmer's Guide /dev/dsp
lock_kernel();
/* stop DMA state machine and free DMA buffers/channels */
- if(dmabuf->enable & DAC_RUNNING ||
- (dmabuf->count && (dmabuf->trigger & PCM_ENABLE_OUTPUT))) {
- drain_dac(state,0);
+ if(dmabuf->trigger & PCM_ENABLE_OUTPUT) {
+ drain_dac(state, 0);
}
- if(dmabuf->enable & ADC_RUNNING) {
+ if(dmabuf->trigger & PCM_ENABLE_INPUT) {
stop_adc(state);
}
spin_lock_irqsave(&card->lock, flags);
unsigned int minor = minor(inode->i_rdev);
struct i810_card *card = devs;
- for (card = devs; card != NULL; card = card->next)
- for (i = 0; i < NR_AC97; i++)
+ for (card = devs; card != NULL; card = card->next) {
+ /*
+ * If we are initializing and then fail, card could go
+ * away unuexpectedly while we are in the for() loop.
+ * So, check for card on each iteration before we check
+ * for card->initializing to avoid a possible oops.
+ * This usually only matters for times when the driver is
+ * autoloaded by kmod.
+ */
+ for (i = 0; i < 50 && card && card->initializing; i++) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ/20);
+ }
+ for (i = 0; i < NR_AC97 && card && !card->initializing; i++)
if (card->ac97_codec[i] != NULL &&
card->ac97_codec[i]->dev_mixer == minor) {
file->private_data = card->ac97_codec[i];
return 0;
}
+ }
return -ENODEV;
}
}
memset(card, 0, sizeof(*card));
+ card->initializing = 1;
card->iobase = pci_resource_start (pci_dev, 1);
card->ac97base = pci_resource_start (pci_dev, 0);
card->pci_dev = pci_dev;
}
pci_set_drvdata(pci_dev, card);
- if(clocking == 48000) {
+ if(clocking == 0) {
+ clocking = 48000;
i810_configure_clocking();
}
kfree(card);
return -ENODEV;
}
-
+ card->initializing = 0;
return 0;
}
if (card->ac97_codec[i] != NULL) {
unregister_sound_mixer(card->ac97_codec[i]->dev_mixer);
kfree (card->ac97_codec[i]);
+ card->ac97_codec[i] = NULL;
}
unregister_sound_dsp(card->dev_audio);
kfree(card);
if(ftsodell != 0) {
printk("i810_audio: ftsodell is now a deprecated option.\n");
}
- if(clocking == 48000) {
- i810_configure_clocking();
- }
if(spdif_locked > 0 ) {
if(spdif_locked == 32000 || spdif_locked == 44100 || spdif_locked == 48000) {
printk("i810_audio: Enabling S/PDIF at sample rate %dHz.\n", spdif_locked);
} else {
- printk("i810_audio: S/PDIF can only be locked to 32000, 441000, or 48000Hz.\n");
+ printk("i810_audio: S/PDIF can only be locked to 32000, 44100, or 48000Hz.\n");
spdif_locked = 0;
}
}
// hcd_monitor_hook(MONITOR_URB_SUBMIT, urb)
// It would catch submission paths for all urbs.
+ /* increment the reference count of the urb, as we now also control it. */
+ urb = usb_get_urb(urb);
+
/*
* Atomically queue the urb, first to our records, then to the HCD.
* Access to urb->status is controlled by urb->lock ... changes on
/* pass ownership to the completion handler */
usb_dec_dev_use (dev);
urb->complete (urb);
+ usb_put_urb (urb);
}
EXPORT_SYMBOL (usb_hcd_giveback_urb);
/*
* Version Information
*/
-#define DRIVER_VERSION "v1.48 for Linux 2.4"
+#define DRIVER_VERSION "v1.48a for Linux 2.4"
#define EMAIL "mmcclell@bigfoot.com"
#define DRIVER_AUTHOR "Mark McClelland <mmcclell@bigfoot.com> & Bret Wallach \
& Orion Sky Lawlor <olawlor@acm.org> & Kevin Moore & Charl P. Botha \
static struct proc_dir_entry *ov511_proc_entry = NULL;
extern struct proc_dir_entry *video_proc_entry;
+static struct file_operations ov511_control_fops = {
+ ioctl: ov511_control_ioctl,
+};
+
#define YES_NO(x) ((x) ? "yes" : "no")
/* /proc/video/ov511/<minor#>/info */
unlock_kernel();
return;
}
- ov511->proc_control->proc_fops->ioctl = ov511_control_ioctl;
ov511->proc_control->data = ov511;
+ ov511->proc_control->proc_fops = &ov511_control_fops;
unlock_kernel();
}
}
}
- usb_driver_release_interface(&ov511_driver,
- &ov511->dev->actconfig->interface[ov511->iface]);
- ov511->dev = NULL;
-
#if defined(CONFIG_PROC_FS) && defined(CONFIG_VIDEO_PROC_FS)
destroy_proc_ov511_cam(ov511);
#endif
+ usb_driver_release_interface(&ov511_driver,
+ &ov511->dev->actconfig->interface[ov511->iface]);
+ ov511->dev = NULL;
+
/* Free the memory */
if (ov511 && !ov511->user) {
ov511_dealloc(ov511, 1);
* ----------------------------------------------------------------------*/
/**
- * usb_alloc_urb - creates a new urb for a USB driver to use
- * @iso_packets: number of iso packets for this urb
+ * usb_alloc_urb - creates a new urb for a USB driver to use
+ * @iso_packets: number of iso packets for this urb
*
- * Creates an urb for the USB driver to use and returns a pointer to it.
- * If no memory is available, NULL is returned.
+ * Creates an urb for the USB driver to use, initializes a few internal
+ * structures, incrementes the usage counter, and returns a pointer to it.
+ *
+ * If no memory is available, NULL is returned.
*
- * If the driver want to use this urb for interrupt, control, or bulk
- * endpoints, pass '0' as the number of iso packets.
+ * If the driver want to use this urb for interrupt, control, or bulk
+ * endpoints, pass '0' as the number of iso packets.
*
- * The driver should call usb_free_urb() when it is finished with the urb.
+ * The driver must call usb_free_urb() when it is finished with the urb.
*/
struct urb *usb_alloc_urb(int iso_packets)
{
}
memset(urb, 0, sizeof(*urb));
-
+ atomic_inc(&urb->count);
spin_lock_init(&urb->lock);
return urb;
}
/**
- * usb_free_urb - frees the memory used by a urb
- * @urb: pointer to the urb to free
+ * usb_free_urb - frees the memory used by a urb when all users of it are finished
+ * @urb: pointer to the urb to free
+ *
+ * Must be called when a user of a urb is finished with it. When the last user
+ * of the urb calls this function, the memory of the urb is freed.
*
- * If an urb is created with a call to usb_create_urb() it should be
- * cleaned up with a call to usb_free_urb() when the driver is finished
- * with it.
+ * Note: The transfer buffer associated with the urb is not freed, that must be
+ * done elsewhere.
*/
void usb_free_urb(struct urb *urb)
{
if (urb)
- kfree(urb);
+ if (atomic_dec_and_test(&urb->count))
+ kfree(urb);
}
+
+/**
+ * usb_get_urb - incrementes the reference count of the urb
+ * @urb: pointer to the urb to modify
+ *
+ * This must be called whenever a urb is transfered from a device driver to a
+ * host controller driver. This allows proper reference counting to happen
+ * for urbs.
+ *
+ * A pointer to the urb with the incremented reference counter is returned.
+ */
+struct urb * usb_get_urb(struct urb *urb)
+{
+ if (urb) {
+ atomic_inc(&urb->count);
+ return urb;
+ } else
+ return NULL;
+}
+
+
/*-------------------------------------------------------------------*/
/**
* This call may be issued in interrupt context.
*
* The caller must have correctly initialized the URB before submitting
- * it. Macros such as FILL_BULK_URB() and FILL_CONTROL_URB() are
+ * it. Functions such as usb_fill_bulk_urb() and usb_fill_control_urb() are
* available to ensure that most fields are correctly initialized, for
* the particular kind of transfer, although they will not initialize
* any transfer flags.
// asynchronous request completion model
EXPORT_SYMBOL(usb_alloc_urb);
EXPORT_SYMBOL(usb_free_urb);
+EXPORT_SYMBOL(usb_get_urb);
EXPORT_SYMBOL(usb_submit_urb);
EXPORT_SYMBOL(usb_unlink_urb);
define_bool CONFIG_SMB_FS n
fi
-#
-# Do we need the compression support?
-#
if [ "$CONFIG_ZISOFS" = "y" ]; then
define_tristate CONFIG_ZISOFS_FS $CONFIG_ISO9660_FS
else
define_tristate CONFIG_ZISOFS_FS n
fi
-if [ "$CONFIG_CRAMFS" = "y" -o "$CONFIG_ZISOFS_FS" = "y" ]; then
- define_tristate CONFIG_ZLIB_FS_INFLATE y
-else
- if [ "$CONFIG_CRAMFS" = "m" -o "$CONFIG_ZISOFS_FS" = "m" ]; then
- define_tristate CONFIG_ZLIB_FS_INFLATE m
- else
- define_tristate CONFIG_ZLIB_FS_INFLATE n
- fi
-fi
mainmenu_option next_comment
comment 'Partition Types'
bio.o super.o block_dev.o char_dev.o stat.o exec.o pipe.o \
namei.o fcntl.o ioctl.o readdir.o select.o fifo.o locks.o \
dcache.o inode.o attr.o bad_inode.o file.o iobuf.o dnotify.o \
- filesystems.o namespace.o seq_file.o
+ filesystems.o namespace.o seq_file.o xattr.o
ifeq ($(CONFIG_QUOTA),y)
obj-y += dquot.o
subdir-$(CONFIG_EXT3_FS) += ext3 # Before ext2 so root fs can be ext3
subdir-$(CONFIG_JBD) += jbd
subdir-$(CONFIG_EXT2_FS) += ext2
-subdir-$(CONFIG_ZLIB_FS_INFLATE) += inflate_fs
subdir-$(CONFIG_CRAMFS) += cramfs
subdir-$(CONFIG_RAMFS) += ramfs
subdir-$(CONFIG_CODA_FS) += coda
loff_t size = file->f_dentry->d_inode->i_bdev->bd_inode->i_size;
loff_t retval;
+ lock_kernel();
+
switch (origin) {
case 2:
offset += size;
}
retval = offset;
}
+ unlock_kernel();
return retval;
}
return 0;
}
+/* utility function for filesystems that need to do work on expanding
+ * truncates. Uses prepare/commit_write to allow the filesystem to
+ * deal with the hole.
+ */
+int generic_cont_expand(struct inode *inode, loff_t size)
+{
+ struct address_space *mapping = inode->i_mapping;
+ struct page *page;
+ unsigned long index, offset, limit;
+ int err;
+
+ err = -EFBIG;
+ limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
+ if (limit != RLIM_INFINITY && size > (loff_t)limit) {
+ send_sig(SIGXFSZ, current, 0);
+ goto out;
+ }
+ if (size > inode->i_sb->s_maxbytes)
+ goto out;
+
+ offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
+
+ /* ugh. in prepare/commit_write, if from==to==start of block, we
+ ** skip the prepare. make sure we never send an offset for the start
+ ** of a block
+ */
+ if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
+ offset++;
+ }
+ index = size >> PAGE_CACHE_SHIFT;
+ err = -ENOMEM;
+ page = grab_cache_page(mapping, index);
+ if (!page)
+ goto out;
+ err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
+ if (!err) {
+ err = mapping->a_ops->commit_write(NULL, page, offset, offset);
+ }
+ UnlockPage(page);
+ page_cache_release(page);
+ if (err > 0)
+ err = 0;
+out:
+ return err;
+}
+
/*
* For moronic filesystems that do not allow holes in file.
* We may have to extend the file.
obj-m := $(O_TARGET)
-CFLAGS_uncompress.o := -I $(TOPDIR)/fs/inflate_fs
-
include $(TOPDIR)/Rules.make
Fixed deadlock bug in <devfs_d_revalidate_wait>.
Tag VFS deletable in <devfs_mk_symlink> if handle ignored.
v1.10
+ 20020129 Richard Gooch <rgooch@atnf.csiro.au>
+ Added KERN_* to remaining messages.
+ Cleaned up declaration of <stat_read>.
+ v1.11
*/
#include <linux/types.h>
#include <linux/errno.h>
#include <asm/bitops.h>
#include <asm/atomic.h>
-#define DEVFS_VERSION "1.10 (20020120)"
+#define DEVFS_VERSION "1.11 (20020129)"
#define DEVFS_NAME "devfs"
unsigned int cmd, unsigned long arg);
static int devfsd_close (struct inode *inode, struct file *file);
#ifdef CONFIG_DEVFS_DEBUG
-static int stat_read (struct file *file, char *buf, size_t len,
- loff_t *ppos);
+static ssize_t stat_read (struct file *file, char *buf, size_t len,
+ loff_t *ppos);
static struct file_operations stat_fops =
{
read: stat_read,
if (bdops->check_media_change == NULL) goto out;
if ( !bdops->check_media_change (dev) ) goto out;
retval = 1;
- printk ( KERN_DEBUG "VFS: Disk change detected on device %s\n",
+ printk (KERN_DEBUG "VFS: Disk change detected on device %s\n",
kdevname (dev) );
- if (invalidate_device(dev, 0))
- printk("VFS: busy inodes on changed media..\n");
+ if ( invalidate_device (dev, 0) )
+ printk (KERN_WARNING "VFS: busy inodes on changed media..\n");
/* Ugly hack to disable messages about unable to read partition table */
tmp = warn_no_part;
warn_no_part = 0;
return sb;
out_no_root:
- printk ("devfs_read_super: get root inode failed\n");
+ PRINTK ("(): get root inode failed\n");
if (root_inode) iput (root_inode);
return NULL;
} /* End Function devfs_read_super */
{
int err;
- printk ("%s: v%s Richard Gooch (rgooch@atnf.csiro.au)\n",
+ printk (KERN_INFO "%s: v%s Richard Gooch (rgooch@atnf.csiro.au)\n",
DEVFS_NAME, DEVFS_VERSION);
devfsd_buf_cache = kmem_cache_create ("devfsd_event",
sizeof (struct devfsd_buf_entry),
if (!devfsd_buf_cache) OOPS ("(): unable to allocate event slab\n");
#ifdef CONFIG_DEVFS_DEBUG
devfs_debug = devfs_debug_init;
- printk ("%s: devfs_debug: 0x%0x\n", DEVFS_NAME, devfs_debug);
+ printk (KERN_INFO "%s: devfs_debug: 0x%0x\n", DEVFS_NAME, devfs_debug);
#endif
- printk ("%s: boot_options: 0x%0x\n", DEVFS_NAME, boot_options);
+ printk (KERN_INFO "%s: boot_options: 0x%0x\n", DEVFS_NAME, boot_options);
err = register_filesystem (&devfs_fs_type);
if (!err)
{
if ( !(boot_options & OPTION_MOUNT) ) return;
err = do_mount ("none", "/dev", "devfs", 0, "");
- if (err == 0) printk ("Mounted devfs on /dev\n");
- else printk ("Warning: unable to mount devfs, err: %d\n", err);
+ if (err == 0) printk (KERN_INFO "Mounted devfs on /dev\n");
+ else PRINTK ("(): unable to mount devfs, err: %d\n", err);
} /* End Function mount_devfs_fs */
module_init(init_devfs_fs)
struct driver_file_entry * entry;
struct device * dev;
ssize_t retval = 0;
+ char * page;
entry = (struct driver_file_entry *)file->private_data;
if (!entry) {
dev = list_entry(entry->parent,struct device, dir);
+ page = (char *)__get_free_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ if (count >= PAGE_SIZE)
+ count = PAGE_SIZE - 1;
+ if (copy_from_user(page,buf,count))
+ goto done;
+ *(page + count) = '\0';
+
while (count > 0) {
ssize_t len;
- len = entry->store(dev,buf,count,*ppos);
+ len = entry->store(dev,page + retval,count,*ppos);
if (len <= 0) {
if (len < 0)
*ppos += len;
buf += len;
}
+ done:
+ free_page((unsigned long)page);
return retval;
}
return 0;
}
-static int driverfs_flush(struct file * filp)
+static int driverfs_release(struct inode * inode, struct file * filp)
{
struct driver_file_entry * entry;
struct device * dev;
llseek: driverfs_file_lseek,
mmap: generic_file_mmap,
open: driverfs_open_file,
- flush: driverfs_flush,
+ release: driverfs_release,
fsync: driverfs_sync_file,
};
ei->i_dir_acl = 0;
ei->i_dtime = 0;
#ifdef EXT3_PREALLOCATE
+ ei->i_prealloc_block = 0;
ei->i_prealloc_count = 0;
#endif
ei->i_block_group = i;
{
long long retval;
+ lock_kernel();
+
switch (origin) {
case 2:
offset += file->f_dentry->d_inode->i_size;
}
retval = offset;
}
+ unlock_kernel();
return retval;
}
{
long long retval;
+ lock_kernel();
+
switch (origin) {
case 2:
offset += file->f_dentry->d_inode->i_size;
}
retval = offset;
}
+ unlock_kernel();
return retval;
}
struct inode *i = filp->f_dentry->d_inode;
struct hpfs_inode_info *hpfs_inode = hpfs_i(i);
struct super_block *s = i->i_sb;
+
+ lock_kernel();
+
/*printk("dir lseek\n");*/
if (new_off == 0 || new_off == 1 || new_off == 11 || new_off == 12 || new_off == 13) goto ok;
hpfs_lock_inode(i);
}
hpfs_unlock_inode(i);
ok:
+ unlock_kernel();
return filp->f_pos = new_off;
fail:
hpfs_unlock_inode(i);
/*printk("illegal lseek: %016llx\n", new_off);*/
+ unlock_kernel();
return -ESPIPE;
}
obj-m := $(O_TARGET)
-CFLAGS_compress.o := -I $(TOPDIR)/fs/inflate_fs
-
include $(TOPDIR)/Rules.make
COMPR_OBJS := compr.o compr_rubin.o compr_rtime.o pushpull.o \
- compr_zlib.o zlib.o
+ compr_zlib.o
JFFS2_OBJS := dir.o file.o ioctl.o nodelist.o malloc.o \
read.o nodemgmt.o readinode.o super.o write.o scan.o gc.o \
symlink.o build.o erase.o background.o
*
*/
+#ifdef __KERNEL__
+#include <linux/zlib.h>
+#else
#include "zlib.h"
+#endif
#ifdef __KERNEL__
#include <linux/kernel.h>
#include <linux/jffs2.h>
#include "nodelist.h"
-static void *zalloc(void *opaque, unsigned nr, unsigned size)
-{
- /* How much does it request? Should we use vmalloc? Or be dynamic? */
- return kmalloc(nr * size, GFP_KERNEL);
-}
-
-static void zfree(void *opaque, void *addr)
-{
- kfree(addr);
-}
#else
#define min(x,y) ((x)<(y)?(x):(y))
#ifndef D1
return -1;
#ifdef __KERNEL__
- strm.zalloc = zalloc;
- strm.zfree = zfree;
+ strm.workspace = kmalloc(zlib_deflate_workspacesize(),
+ GFP_KERNEL);
+ if (strm.workspace == NULL) {
+ printk(KERN_WARNING "deflateInit alloc of workspace failed\n");
+ return -1;
+ }
#else
strm.zalloc = (void *)0;
strm.zfree = (void *)0;
#endif
- if (Z_OK != deflateInit(&strm, 3)) {
+ if (Z_OK != zlib_deflateInit(&strm, 3)) {
printk(KERN_WARNING "deflateInit failed\n");
return -1;
}
strm.avail_in = min((unsigned)(*sourcelen-strm.total_in), strm.avail_out);
D1(printk(KERN_DEBUG "calling deflate with avail_in %d, avail_out %d\n",
strm.avail_in, strm.avail_out));
- ret = deflate(&strm, Z_PARTIAL_FLUSH);
+ ret = zlib_deflate(&strm, Z_PARTIAL_FLUSH);
D1(printk(KERN_DEBUG "deflate returned with avail_in %d, avail_out %d, total_in %ld, total_out %ld\n",
strm.avail_in, strm.avail_out, strm.total_in, strm.total_out));
if (ret != Z_OK) {
D1(printk(KERN_DEBUG "deflate in loop returned %d\n", ret));
- deflateEnd(&strm);
- return -1;
+ goto out_err;
}
}
strm.avail_out += STREAM_END_SPACE;
strm.avail_in = 0;
- ret = deflate(&strm, Z_FINISH);
+ ret = zlib_deflate(&strm, Z_FINISH);
if (ret != Z_STREAM_END) {
D1(printk(KERN_DEBUG "final deflate returned %d\n", ret));
- deflateEnd(&strm);
- return -1;
+ goto out_err;
+
}
- deflateEnd(&strm);
+ zlib_deflateEnd(&strm);
+ kfree(strm.workspace);
D1(printk(KERN_DEBUG "zlib compressed %ld bytes into %ld\n", strm.total_in, strm.total_out));
*dstlen = strm.total_out;
*sourcelen = strm.total_in;
return 0;
+
+ out_err:
+ zlib_deflateEnd(&strm);
+ kfree(strm.workspace);
+ return -1;
}
void zlib_decompress(unsigned char *data_in, unsigned char *cpage_out,
int ret;
#ifdef __KERNEL__
- strm.zalloc = zalloc;
- strm.zfree = zfree;
+ strm.workspace = kmalloc(zlib_inflate_workspacesize(),
+ GFP_KERNEL);
+ if (strm.workspace == NULL) {
+ printk(KERN_WARNING "inflateInit alloc of workspace failed\n");
+ return;
+ }
#else
strm.zalloc = (void *)0;
strm.zfree = (void *)0;
#endif
- if (Z_OK != inflateInit(&strm)) {
+ if (Z_OK != zlib_inflateInit(&strm)) {
printk(KERN_WARNING "inflateInit failed\n");
return;
}
strm.avail_out = destlen;
strm.total_out = 0;
- while((ret = inflate(&strm, Z_FINISH)) == Z_OK)
+ while((ret = zlib_inflate(&strm, Z_FINISH)) == Z_OK)
;
if (ret != Z_STREAM_END) {
printk(KERN_NOTICE "inflate returned %d\n", ret);
}
- inflateEnd(&strm);
+ zlib_inflateEnd(&strm);
+ kfree(strm.workspace);
}
+++ /dev/null
-/*
- * This file is derived from various .h and .c files from the zlib-1.0.4
- * distribution by Jean-loup Gailly and Mark Adler, with some additions
- * by Paul Mackerras to aid in implementing Deflate compression and
- * decompression for PPP packets. See zlib.h for conditions of
- * distribution and use.
- *
- * Changes that have been made include:
- * - added Z_PACKET_FLUSH (see zlib.h for details)
- * - added inflateIncomp and deflateOutputPending
- * - allow strm->next_out to be NULL, meaning discard the output
- *
- * $Id: zlib.c,v 1.3 1997/12/23 10:47:42 paulus Exp $
- */
-
-/*
- * ==FILEVERSION 971210==
- *
- * This marker is used by the Linux installation script to determine
- * whether an up-to-date version of this file is already installed.
- */
-
-#define NO_DUMMY_DECL
-#define NO_ZCFUNCS
-#define MY_ZCALLOC
-
-#if defined(__FreeBSD__) && (defined(KERNEL) || defined(_KERNEL))
-#define inflate inflate_ppp /* FreeBSD already has an inflate :-( */
-#endif
-
-
-/* +++ zutil.h */
-/* zutil.h -- internal interface and configuration of the compression library
- * Copyright (C) 1995-1996 Jean-loup Gailly.
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* WARNING: this file should *not* be used by applications. It is
- part of the implementation of the compression library and is
- subject to change. Applications should only use zlib.h.
- */
-
-/* From: zutil.h,v 1.16 1996/07/24 13:41:13 me Exp $ */
-
-#ifndef _Z_UTIL_H
-#define _Z_UTIL_H
-
-#include "zlib.h"
-
-#if defined(KERNEL) || defined(_KERNEL)
-/* Assume this is a *BSD or SVR4 kernel */
-#include <sys/types.h>
-#include <sys/time.h>
-#include <sys/systm.h>
-# define HAVE_MEMCPY
-# define memcpy(d, s, n) bcopy((s), (d), (n))
-# define memset(d, v, n) bzero((d), (n))
-# define memcmp bcmp
-
-#else
-#if defined(__KERNEL__)
-/* Assume this is a Linux kernel */
-#include <linux/string.h>
-#define HAVE_MEMCPY
-
-#else /* not kernel */
-
-#if defined(MSDOS)||defined(VMS)||defined(CRAY)||defined(WIN32)||defined(RISCOS)
-# include <stddef.h>
-# include <errno.h>
-#else
- extern int errno;
-#endif
-#ifdef STDC
-# include <string.h>
-# include <stdlib.h>
-#endif
-#endif /* __KERNEL__ */
-#endif /* _KERNEL || KERNEL */
-
-#ifndef local
-# define local static
-#endif
-/* compile with -Dlocal if your debugger can't find static symbols */
-
-typedef unsigned char uch;
-typedef uch FAR uchf;
-typedef unsigned short ush;
-typedef ush FAR ushf;
-typedef unsigned long ulg;
-
-extern const char *z_errmsg[10]; /* indexed by 2-zlib_error */
-/* (size given to avoid silly warnings with Visual C++) */
-
-#define ERR_MSG(err) z_errmsg[Z_NEED_DICT-(err)]
-
-#define ERR_RETURN(strm,err) \
- return (strm->msg = (char*)ERR_MSG(err), (err))
-/* To be used only when the state is known to be valid */
-
- /* common constants */
-
-#ifndef DEF_WBITS
-# define DEF_WBITS MAX_WBITS
-#endif
-/* default windowBits for decompression. MAX_WBITS is for compression only */
-
-#if MAX_MEM_LEVEL >= 8
-# define DEF_MEM_LEVEL 8
-#else
-# define DEF_MEM_LEVEL MAX_MEM_LEVEL
-#endif
-/* default memLevel */
-
-#define STORED_BLOCK 0
-#define STATIC_TREES 1
-#define DYN_TREES 2
-/* The three kinds of block type */
-
-#define MIN_MATCH 3
-#define MAX_MATCH 258
-/* The minimum and maximum match lengths */
-
-#define PRESET_DICT 0x20 /* preset dictionary flag in zlib header */
-
- /* target dependencies */
-
-#ifdef MSDOS
-# define OS_CODE 0x00
-# ifdef __TURBOC__
-# include <alloc.h>
-# else /* MSC or DJGPP */
-# include <malloc.h>
-# endif
-#endif
-
-#ifdef OS2
-# define OS_CODE 0x06
-#endif
-
-#ifdef WIN32 /* Window 95 & Windows NT */
-# define OS_CODE 0x0b
-#endif
-
-#if defined(VAXC) || defined(VMS)
-# define OS_CODE 0x02
-# define FOPEN(name, mode) \
- fopen((name), (mode), "mbc=60", "ctx=stm", "rfm=fix", "mrs=512")
-#endif
-
-#ifdef AMIGA
-# define OS_CODE 0x01
-#endif
-
-#if defined(ATARI) || defined(atarist)
-# define OS_CODE 0x05
-#endif
-
-#ifdef MACOS
-# define OS_CODE 0x07
-#endif
-
-#ifdef __50SERIES /* Prime/PRIMOS */
-# define OS_CODE 0x0F
-#endif
-
-#ifdef TOPS20
-# define OS_CODE 0x0a
-#endif
-
-#if defined(_BEOS_) || defined(RISCOS)
-# define fdopen(fd,mode) NULL /* No fdopen() */
-#endif
-
- /* Common defaults */
-
-#ifndef OS_CODE
-# define OS_CODE 0x03 /* assume Unix */
-#endif
-
-#ifndef FOPEN
-# define FOPEN(name, mode) fopen((name), (mode))
-#endif
-
- /* functions */
-
-#ifdef HAVE_STRERROR
- extern char *strerror OF((int));
-# define zstrerror(errnum) strerror(errnum)
-#else
-# define zstrerror(errnum) ""
-#endif
-
-#if defined(pyr)
-# define NO_MEMCPY
-#endif
-#if (defined(M_I86SM) || defined(M_I86MM)) && !defined(_MSC_VER)
- /* Use our own functions for small and medium model with MSC <= 5.0.
- * You may have to use the same strategy for Borland C (untested).
- */
-# define NO_MEMCPY
-#endif
-#if defined(STDC) && !defined(HAVE_MEMCPY) && !defined(NO_MEMCPY)
-# define HAVE_MEMCPY
-#endif
-#ifdef HAVE_MEMCPY
-# ifdef SMALL_MEDIUM /* MSDOS small or medium model */
-# define zmemcpy _fmemcpy
-# define zmemcmp _fmemcmp
-# define zmemzero(dest, len) _fmemset(dest, 0, len)
-# else
-# define zmemcpy memcpy
-# define zmemcmp memcmp
-# define zmemzero(dest, len) memset(dest, 0, len)
-# endif
-#else
- extern void zmemcpy OF((Bytef* dest, Bytef* source, uInt len));
- extern int zmemcmp OF((Bytef* s1, Bytef* s2, uInt len));
- extern void zmemzero OF((Bytef* dest, uInt len));
-#endif
-
-/* Diagnostic functions */
-#ifdef DEBUG_ZLIB
-# include <stdio.h>
-# ifndef verbose
-# define verbose 0
-# endif
- extern void z_error OF((char *m));
-# define Assert(cond,msg) {if(!(cond)) z_error(msg);}
-# define Trace(x) fprintf x
-# define Tracev(x) {if (verbose) fprintf x ;}
-# define Tracevv(x) {if (verbose>1) fprintf x ;}
-# define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
-# define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
-#else
-# define Assert(cond,msg)
-# define Trace(x)
-# define Tracev(x)
-# define Tracevv(x)
-# define Tracec(c,x)
-# define Tracecv(c,x)
-#endif
-
-
-typedef uLong (*check_func) OF((uLong check, const Bytef *buf, uInt len));
-
-voidpf zcalloc OF((voidpf opaque, unsigned items, unsigned size));
-void zcfree OF((voidpf opaque, voidpf ptr));
-
-#define ZALLOC(strm, items, size) \
- (*((strm)->zalloc))((strm)->opaque, (items), (size))
-#define ZFREE(strm, addr) (*((strm)->zfree))((strm)->opaque, (voidpf)(addr))
-#define TRY_FREE(s, p) {if (p) ZFREE(s, p);}
-
-#endif /* _Z_UTIL_H */
-/* --- zutil.h */
-
-/* +++ deflate.h */
-/* deflate.h -- internal compression state
- * Copyright (C) 1995-1996 Jean-loup Gailly
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* WARNING: this file should *not* be used by applications. It is
- part of the implementation of the compression library and is
- subject to change. Applications should only use zlib.h.
- */
-
-/* From: deflate.h,v 1.10 1996/07/02 12:41:00 me Exp $ */
-
-#ifndef _DEFLATE_H
-#define _DEFLATE_H
-
-/* #include "zutil.h" */
-
-/* ===========================================================================
- * Internal compression state.
- */
-
-#define LENGTH_CODES 29
-/* number of length codes, not counting the special END_BLOCK code */
-
-#define LITERALS 256
-/* number of literal bytes 0..255 */
-
-#define L_CODES (LITERALS+1+LENGTH_CODES)
-/* number of Literal or Length codes, including the END_BLOCK code */
-
-#define D_CODES 30
-/* number of distance codes */
-
-#define BL_CODES 19
-/* number of codes used to transfer the bit lengths */
-
-#define HEAP_SIZE (2*L_CODES+1)
-/* maximum heap size */
-
-#define MAX_BITS 15
-/* All codes must not exceed MAX_BITS bits */
-
-#define INIT_STATE 42
-#define BUSY_STATE 113
-#define FINISH_STATE 666
-/* Stream status */
-
-
-/* Data structure describing a single value and its code string. */
-typedef struct ct_data_s {
- union {
- ush freq; /* frequency count */
- ush code; /* bit string */
- } fc;
- union {
- ush dad; /* father node in Huffman tree */
- ush len; /* length of bit string */
- } dl;
-} FAR ct_data;
-
-#define Freq fc.freq
-#define Code fc.code
-#define Dad dl.dad
-#define Len dl.len
-
-typedef struct static_tree_desc_s static_tree_desc;
-
-typedef struct tree_desc_s {
- ct_data *dyn_tree; /* the dynamic tree */
- int max_code; /* largest code with non zero frequency */
- static_tree_desc *stat_desc; /* the corresponding static tree */
-} FAR tree_desc;
-
-typedef ush Pos;
-typedef Pos FAR Posf;
-typedef unsigned IPos;
-
-/* A Pos is an index in the character window. We use short instead of int to
- * save space in the various tables. IPos is used only for parameter passing.
- */
-
-typedef struct deflate_state {
- z_streamp strm; /* pointer back to this zlib stream */
- int status; /* as the name implies */
- Bytef *pending_buf; /* output still pending */
- ulg pending_buf_size; /* size of pending_buf */
- Bytef *pending_out; /* next pending byte to output to the stream */
- int pending; /* nb of bytes in the pending buffer */
- int noheader; /* suppress zlib header and adler32 */
- Byte data_type; /* UNKNOWN, BINARY or ASCII */
- Byte method; /* STORED (for zip only) or DEFLATED */
- int last_flush; /* value of flush param for previous deflate call */
-
- /* used by deflate.c: */
-
- uInt w_size; /* LZ77 window size (32K by default) */
- uInt w_bits; /* log2(w_size) (8..16) */
- uInt w_mask; /* w_size - 1 */
-
- Bytef *window;
- /* Sliding window. Input bytes are read into the second half of the window,
- * and move to the first half later to keep a dictionary of at least wSize
- * bytes. With this organization, matches are limited to a distance of
- * wSize-MAX_MATCH bytes, but this ensures that IO is always
- * performed with a length multiple of the block size. Also, it limits
- * the window size to 64K, which is quite useful on MSDOS.
- * To do: use the user input buffer as sliding window.
- */
-
- ulg window_size;
- /* Actual size of window: 2*wSize, except when the user input buffer
- * is directly used as sliding window.
- */
-
- Posf *prev;
- /* Link to older string with same hash index. To limit the size of this
- * array to 64K, this link is maintained only for the last 32K strings.
- * An index in this array is thus a window index modulo 32K.
- */
-
- Posf *head; /* Heads of the hash chains or NIL. */
-
- uInt ins_h; /* hash index of string to be inserted */
- uInt hash_size; /* number of elements in hash table */
- uInt hash_bits; /* log2(hash_size) */
- uInt hash_mask; /* hash_size-1 */
-
- uInt hash_shift;
- /* Number of bits by which ins_h must be shifted at each input
- * step. It must be such that after MIN_MATCH steps, the oldest
- * byte no longer takes part in the hash key, that is:
- * hash_shift * MIN_MATCH >= hash_bits
- */
-
- long block_start;
- /* Window position at the beginning of the current output block. Gets
- * negative when the window is moved backwards.
- */
-
- uInt match_length; /* length of best match */
- IPos prev_match; /* previous match */
- int match_available; /* set if previous match exists */
- uInt strstart; /* start of string to insert */
- uInt match_start; /* start of matching string */
- uInt lookahead; /* number of valid bytes ahead in window */
-
- uInt prev_length;
- /* Length of the best match at previous step. Matches not greater than this
- * are discarded. This is used in the lazy match evaluation.
- */
-
- uInt max_chain_length;
- /* To speed up deflation, hash chains are never searched beyond this
- * length. A higher limit improves compression ratio but degrades the
- * speed.
- */
-
- uInt max_lazy_match;
- /* Attempt to find a better match only when the current match is strictly
- * smaller than this value. This mechanism is used only for compression
- * levels >= 4.
- */
-# define max_insert_length max_lazy_match
- /* Insert new strings in the hash table only if the match length is not
- * greater than this length. This saves time but degrades compression.
- * max_insert_length is used only for compression levels <= 3.
- */
-
- int level; /* compression level (1..9) */
- int strategy; /* favor or force Huffman coding*/
-
- uInt good_match;
- /* Use a faster search when the previous match is longer than this */
-
- int nice_match; /* Stop searching when current match exceeds this */
-
- /* used by trees.c: */
- /* Didn't use ct_data typedef below to supress compiler warning */
- struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */
- struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */
- struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */
-
- struct tree_desc_s l_desc; /* desc. for literal tree */
- struct tree_desc_s d_desc; /* desc. for distance tree */
- struct tree_desc_s bl_desc; /* desc. for bit length tree */
-
- ush bl_count[MAX_BITS+1];
- /* number of codes at each bit length for an optimal tree */
-
- int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */
- int heap_len; /* number of elements in the heap */
- int heap_max; /* element of largest frequency */
- /* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used.
- * The same heap array is used to build all trees.
- */
-
- uch depth[2*L_CODES+1];
- /* Depth of each subtree used as tie breaker for trees of equal frequency
- */
-
- uchf *l_buf; /* buffer for literals or lengths */
-
- uInt lit_bufsize;
- /* Size of match buffer for literals/lengths. There are 4 reasons for
- * limiting lit_bufsize to 64K:
- * - frequencies can be kept in 16 bit counters
- * - if compression is not successful for the first block, all input
- * data is still in the window so we can still emit a stored block even
- * when input comes from standard input. (This can also be done for
- * all blocks if lit_bufsize is not greater than 32K.)
- * - if compression is not successful for a file smaller than 64K, we can
- * even emit a stored file instead of a stored block (saving 5 bytes).
- * This is applicable only for zip (not gzip or zlib).
- * - creating new Huffman trees less frequently may not provide fast
- * adaptation to changes in the input data statistics. (Take for
- * example a binary file with poorly compressible code followed by
- * a highly compressible string table.) Smaller buffer sizes give
- * fast adaptation but have of course the overhead of transmitting
- * trees more frequently.
- * - I can't count above 4
- */
-
- uInt last_lit; /* running index in l_buf */
-
- ushf *d_buf;
- /* Buffer for distances. To simplify the code, d_buf and l_buf have
- * the same number of elements. To use different lengths, an extra flag
- * array would be necessary.
- */
-
- ulg opt_len; /* bit length of current block with optimal trees */
- ulg static_len; /* bit length of current block with static trees */
- ulg compressed_len; /* total bit length of compressed file */
- uInt matches; /* number of string matches in current block */
- int last_eob_len; /* bit length of EOB code for last block */
-
-#ifdef DEBUG_ZLIB
- ulg bits_sent; /* bit length of the compressed data */
-#endif
-
- ush bi_buf;
- /* Output buffer. bits are inserted starting at the bottom (least
- * significant bits).
- */
- int bi_valid;
- /* Number of valid bits in bi_buf. All bits above the last valid bit
- * are always zero.
- */
-
-} FAR deflate_state;
-
-/* Output a byte on the stream.
- * IN assertion: there is enough room in pending_buf.
- */
-#define put_byte(s, c) {s->pending_buf[s->pending++] = (c);}
-
-
-#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1)
-/* Minimum amount of lookahead, except at the end of the input file.
- * See deflate.c for comments about the MIN_MATCH+1.
- */
-
-#define MAX_DIST(s) ((s)->w_size-MIN_LOOKAHEAD)
-/* In order to simplify the code, particularly on 16 bit machines, match
- * distances are limited to MAX_DIST instead of WSIZE.
- */
-
- /* in trees.c */
-void _tr_init OF((deflate_state *s));
-int _tr_tally OF((deflate_state *s, unsigned dist, unsigned lc));
-ulg _tr_flush_block OF((deflate_state *s, charf *buf, ulg stored_len,
- int eof));
-void _tr_align OF((deflate_state *s));
-void _tr_stored_block OF((deflate_state *s, charf *buf, ulg stored_len,
- int eof));
-void _tr_stored_type_only OF((deflate_state *));
-
-#endif
-/* --- deflate.h */
-
-/* +++ deflate.c */
-/* deflate.c -- compress data using the deflation algorithm
- * Copyright (C) 1995-1996 Jean-loup Gailly.
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/*
- * ALGORITHM
- *
- * The "deflation" process depends on being able to identify portions
- * of the input text which are identical to earlier input (within a
- * sliding window trailing behind the input currently being processed).
- *
- * The most straightforward technique turns out to be the fastest for
- * most input files: try all possible matches and select the longest.
- * The key feature of this algorithm is that insertions into the string
- * dictionary are very simple and thus fast, and deletions are avoided
- * completely. Insertions are performed at each input character, whereas
- * string matches are performed only when the previous match ends. So it
- * is preferable to spend more time in matches to allow very fast string
- * insertions and avoid deletions. The matching algorithm for small
- * strings is inspired from that of Rabin & Karp. A brute force approach
- * is used to find longer strings when a small match has been found.
- * A similar algorithm is used in comic (by Jan-Mark Wams) and freeze
- * (by Leonid Broukhis).
- * A previous version of this file used a more sophisticated algorithm
- * (by Fiala and Greene) which is guaranteed to run in linear amortized
- * time, but has a larger average cost, uses more memory and is patented.
- * However the F&G algorithm may be faster for some highly redundant
- * files if the parameter max_chain_length (described below) is too large.
- *
- * ACKNOWLEDGEMENTS
- *
- * The idea of lazy evaluation of matches is due to Jan-Mark Wams, and
- * I found it in 'freeze' written by Leonid Broukhis.
- * Thanks to many people for bug reports and testing.
- *
- * REFERENCES
- *
- * Deutsch, L.P.,"DEFLATE Compressed Data Format Specification".
- * Available in ftp://ds.internic.net/rfc/rfc1951.txt
- *
- * A description of the Rabin and Karp algorithm is given in the book
- * "Algorithms" by R. Sedgewick, Addison-Wesley, p252.
- *
- * Fiala,E.R., and Greene,D.H.
- * Data Compression with Finite Windows, Comm.ACM, 32,4 (1989) 490-595
- *
- */
-
-/* From: deflate.c,v 1.15 1996/07/24 13:40:58 me Exp $ */
-
-/* #include "deflate.h" */
-
-char deflate_copyright[] = " deflate 1.0.4 Copyright 1995-1996 Jean-loup Gailly ";
-/*
- If you use the zlib library in a product, an acknowledgment is welcome
- in the documentation of your product. If for some reason you cannot
- include such an acknowledgment, I would appreciate that you keep this
- copyright string in the executable of your product.
- */
-
-/* ===========================================================================
- * Function prototypes.
- */
-typedef enum {
- need_more, /* block not completed, need more input or more output */
- block_done, /* block flush performed */
- finish_started, /* finish started, need only more output at next deflate */
- finish_done /* finish done, accept no more input or output */
-} block_state;
-
-typedef block_state (*compress_func) OF((deflate_state *s, int flush));
-/* Compression function. Returns the block state after the call. */
-
-local void fill_window OF((deflate_state *s));
-local block_state deflate_stored OF((deflate_state *s, int flush));
-local block_state deflate_fast OF((deflate_state *s, int flush));
-local block_state deflate_slow OF((deflate_state *s, int flush));
-local void lm_init OF((deflate_state *s));
-local void putShortMSB OF((deflate_state *s, uInt b));
-local void flush_pending OF((z_streamp strm));
-local int read_buf OF((z_streamp strm, charf *buf, unsigned size));
-#ifdef ASMV
- void match_init OF((void)); /* asm code initialization */
- uInt longest_match OF((deflate_state *s, IPos cur_match));
-#else
-local uInt longest_match OF((deflate_state *s, IPos cur_match));
-#endif
-
-#ifdef DEBUG_ZLIB
-local void check_match OF((deflate_state *s, IPos start, IPos match,
- int length));
-#endif
-
-/* ===========================================================================
- * Local data
- */
-
-#define NIL 0
-/* Tail of hash chains */
-
-#ifndef TOO_FAR
-# define TOO_FAR 4096
-#endif
-/* Matches of length 3 are discarded if their distance exceeds TOO_FAR */
-
-#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1)
-/* Minimum amount of lookahead, except at the end of the input file.
- * See deflate.c for comments about the MIN_MATCH+1.
- */
-
-/* Values for max_lazy_match, good_match and max_chain_length, depending on
- * the desired pack level (0..9). The values given below have been tuned to
- * exclude worst case performance for pathological files. Better values may be
- * found for specific files.
- */
-typedef struct config_s {
- ush good_length; /* reduce lazy search above this match length */
- ush max_lazy; /* do not perform lazy search above this match length */
- ush nice_length; /* quit search above this match length */
- ush max_chain;
- compress_func func;
-} config;
-
-local config configuration_table[10] = {
-/* good lazy nice chain */
-/* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */
-/* 1 */ {4, 4, 8, 4, deflate_fast}, /* maximum speed, no lazy matches */
-/* 2 */ {4, 5, 16, 8, deflate_fast},
-/* 3 */ {4, 6, 32, 32, deflate_fast},
-
-/* 4 */ {4, 4, 16, 16, deflate_slow}, /* lazy matches */
-/* 5 */ {8, 16, 32, 32, deflate_slow},
-/* 6 */ {8, 16, 128, 128, deflate_slow},
-/* 7 */ {8, 32, 128, 256, deflate_slow},
-/* 8 */ {32, 128, 258, 1024, deflate_slow},
-/* 9 */ {32, 258, 258, 4096, deflate_slow}}; /* maximum compression */
-
-/* Note: the deflate() code requires max_lazy >= MIN_MATCH and max_chain >= 4
- * For deflate_fast() (levels <= 3) good is ignored and lazy has a different
- * meaning.
- */
-
-#define EQUAL 0
-/* result of memcmp for equal strings */
-
-#ifndef NO_DUMMY_DECL
-struct static_tree_desc_s {int dummy;}; /* for buggy compilers */
-#endif
-
-/* ===========================================================================
- * Update a hash value with the given input byte
- * IN assertion: all calls to to UPDATE_HASH are made with consecutive
- * input characters, so that a running hash key can be computed from the
- * previous key instead of complete recalculation each time.
- */
-#define UPDATE_HASH(s,h,c) (h = (((h)<<s->hash_shift) ^ (c)) & s->hash_mask)
-
-
-/* ===========================================================================
- * Insert string str in the dictionary and set match_head to the previous head
- * of the hash chain (the most recent string with same hash key). Return
- * the previous length of the hash chain.
- * IN assertion: all calls to to INSERT_STRING are made with consecutive
- * input characters and the first MIN_MATCH bytes of str are valid
- * (except for the last MIN_MATCH-1 bytes of the input file).
- */
-#define INSERT_STRING(s, str, match_head) \
- (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \
- s->prev[(str) & s->w_mask] = match_head = s->head[s->ins_h], \
- s->head[s->ins_h] = (Pos)(str))
-
-/* ===========================================================================
- * Initialize the hash table (avoiding 64K overflow for 16 bit systems).
- * prev[] will be initialized on the fly.
- */
-#define CLEAR_HASH(s) \
- s->head[s->hash_size-1] = NIL; \
- zmemzero((charf *)s->head, (unsigned)(s->hash_size-1)*sizeof(*s->head));
-
-/* ========================================================================= */
-int deflateInit_(strm, level, version, stream_size)
- z_streamp strm;
- int level;
- const char *version;
- int stream_size;
-{
- return deflateInit2_(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL,
- Z_DEFAULT_STRATEGY, version, stream_size);
- /* To do: ignore strm->next_in if we use it as window */
-}
-
-/* ========================================================================= */
-int deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
- version, stream_size)
- z_streamp strm;
- int level;
- int method;
- int windowBits;
- int memLevel;
- int strategy;
- const char *version;
- int stream_size;
-{
- deflate_state *s;
- int noheader = 0;
- static char* my_version = ZLIB_VERSION;
-
- ushf *overlay;
- /* We overlay pending_buf and d_buf+l_buf. This works since the average
- * output size for (length,distance) codes is <= 24 bits.
- */
-
- if (version == Z_NULL || version[0] != my_version[0] ||
- stream_size != sizeof(z_stream)) {
- return Z_VERSION_ERROR;
- }
- if (strm == Z_NULL) return Z_STREAM_ERROR;
-
- strm->msg = Z_NULL;
-#ifndef NO_ZCFUNCS
- if (strm->zalloc == Z_NULL) {
- strm->zalloc = zcalloc;
- strm->opaque = (voidpf)0;
- }
- if (strm->zfree == Z_NULL) strm->zfree = zcfree;
-#endif
-
- if (level == Z_DEFAULT_COMPRESSION) level = 6;
-
- if (windowBits < 0) { /* undocumented feature: suppress zlib header */
- noheader = 1;
- windowBits = -windowBits;
- }
- if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method != Z_DEFLATED ||
- windowBits < 8 || windowBits > 15 || level < 0 || level > 9 ||
- strategy < 0 || strategy > Z_HUFFMAN_ONLY) {
- return Z_STREAM_ERROR;
- }
- s = (deflate_state *) ZALLOC(strm, 1, sizeof(deflate_state));
- if (s == Z_NULL) return Z_MEM_ERROR;
- strm->state = (struct internal_state FAR *)s;
- s->strm = strm;
-
- s->noheader = noheader;
- s->w_bits = windowBits;
- s->w_size = 1 << s->w_bits;
- s->w_mask = s->w_size - 1;
-
- s->hash_bits = memLevel + 7;
- s->hash_size = 1 << s->hash_bits;
- s->hash_mask = s->hash_size - 1;
- s->hash_shift = ((s->hash_bits+MIN_MATCH-1)/MIN_MATCH);
-
- s->window = (Bytef *) ZALLOC(strm, s->w_size, 2*sizeof(Byte));
- s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos));
- s->head = (Posf *) ZALLOC(strm, s->hash_size, sizeof(Pos));
-
- s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */
-
- overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2);
- s->pending_buf = (uchf *) overlay;
- s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L);
-
- if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL ||
- s->pending_buf == Z_NULL) {
- strm->msg = (char*)ERR_MSG(Z_MEM_ERROR);
- deflateEnd (strm);
- return Z_MEM_ERROR;
- }
- s->d_buf = overlay + s->lit_bufsize/sizeof(ush);
- s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize;
-
- s->level = level;
- s->strategy = strategy;
- s->method = (Byte)method;
-
- return deflateReset(strm);
-}
-
-/* ========================================================================= */
-int deflateSetDictionary (strm, dictionary, dictLength)
- z_streamp strm;
- const Bytef *dictionary;
- uInt dictLength;
-{
- deflate_state *s;
- uInt length = dictLength;
- uInt n;
- IPos hash_head = 0;
-
- if (strm == Z_NULL || strm->state == Z_NULL || dictionary == Z_NULL)
- return Z_STREAM_ERROR;
-
- s = (deflate_state *) strm->state;
- if (s->status != INIT_STATE) return Z_STREAM_ERROR;
-
- strm->adler = adler32(strm->adler, dictionary, dictLength);
-
- if (length < MIN_MATCH) return Z_OK;
- if (length > MAX_DIST(s)) {
- length = MAX_DIST(s);
-#ifndef USE_DICT_HEAD
- dictionary += dictLength - length; /* use the tail of the dictionary */
-#endif
- }
- zmemcpy((charf *)s->window, dictionary, length);
- s->strstart = length;
- s->block_start = (long)length;
-
- /* Insert all strings in the hash table (except for the last two bytes).
- * s->lookahead stays null, so s->ins_h will be recomputed at the next
- * call of fill_window.
- */
- s->ins_h = s->window[0];
- UPDATE_HASH(s, s->ins_h, s->window[1]);
- for (n = 0; n <= length - MIN_MATCH; n++) {
- INSERT_STRING(s, n, hash_head);
- }
- if (hash_head) hash_head = 0; /* to make compiler happy */
- return Z_OK;
-}
-
-/* ========================================================================= */
-int deflateReset (strm)
- z_streamp strm;
-{
- deflate_state *s;
-
- if (strm == Z_NULL || strm->state == Z_NULL ||
- strm->zalloc == Z_NULL || strm->zfree == Z_NULL) return Z_STREAM_ERROR;
-
- strm->total_in = strm->total_out = 0;
- strm->msg = Z_NULL; /* use zfree if we ever allocate msg dynamically */
- strm->data_type = Z_UNKNOWN;
-
- s = (deflate_state *)strm->state;
- s->pending = 0;
- s->pending_out = s->pending_buf;
-
- if (s->noheader < 0) {
- s->noheader = 0; /* was set to -1 by deflate(..., Z_FINISH); */
- }
- s->status = s->noheader ? BUSY_STATE : INIT_STATE;
- strm->adler = 1;
- s->last_flush = Z_NO_FLUSH;
-
- _tr_init(s);
- lm_init(s);
-
- return Z_OK;
-}
-
-/* ========================================================================= */
-int deflateParams(strm, level, strategy)
- z_streamp strm;
- int level;
- int strategy;
-{
- deflate_state *s;
- compress_func func;
- int err = Z_OK;
-
- if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
- s = (deflate_state *) strm->state;
-
- if (level == Z_DEFAULT_COMPRESSION) {
- level = 6;
- }
- if (level < 0 || level > 9 || strategy < 0 || strategy > Z_HUFFMAN_ONLY) {
- return Z_STREAM_ERROR;
- }
- func = configuration_table[s->level].func;
-
- if (func != configuration_table[level].func && strm->total_in != 0) {
- /* Flush the last buffer: */
- err = deflate(strm, Z_PARTIAL_FLUSH);
- }
- if (s->level != level) {
- s->level = level;
- s->max_lazy_match = configuration_table[level].max_lazy;
- s->good_match = configuration_table[level].good_length;
- s->nice_match = configuration_table[level].nice_length;
- s->max_chain_length = configuration_table[level].max_chain;
- }
- s->strategy = strategy;
- return err;
-}
-
-/* =========================================================================
- * Put a short in the pending buffer. The 16-bit value is put in MSB order.
- * IN assertion: the stream state is correct and there is enough room in
- * pending_buf.
- */
-local void putShortMSB (s, b)
- deflate_state *s;
- uInt b;
-{
- put_byte(s, (Byte)(b >> 8));
- put_byte(s, (Byte)(b & 0xff));
-}
-
-/* =========================================================================
- * Flush as much pending output as possible. All deflate() output goes
- * through this function so some applications may wish to modify it
- * to avoid allocating a large strm->next_out buffer and copying into it.
- * (See also read_buf()).
- */
-local void flush_pending(strm)
- z_streamp strm;
-{
- deflate_state *s = (deflate_state *) strm->state;
- unsigned len = s->pending;
-
- if (len > strm->avail_out) len = strm->avail_out;
- if (len == 0) return;
-
- if (strm->next_out != Z_NULL) {
- zmemcpy(strm->next_out, s->pending_out, len);
- strm->next_out += len;
- }
- s->pending_out += len;
- strm->total_out += len;
- strm->avail_out -= len;
- s->pending -= len;
- if (s->pending == 0) {
- s->pending_out = s->pending_buf;
- }
-}
-
-/* ========================================================================= */
-int deflate (strm, flush)
- z_streamp strm;
- int flush;
-{
- int old_flush; /* value of flush param for previous deflate call */
- deflate_state *s;
-
- if (strm == Z_NULL || strm->state == Z_NULL ||
- flush > Z_FINISH || flush < 0) {
- return Z_STREAM_ERROR;
- }
- s = (deflate_state *) strm->state;
-
- if ((strm->next_in == Z_NULL && strm->avail_in != 0) ||
- (s->status == FINISH_STATE && flush != Z_FINISH)) {
- ERR_RETURN(strm, Z_STREAM_ERROR);
- }
- if (strm->avail_out == 0) ERR_RETURN(strm, Z_BUF_ERROR);
-
- s->strm = strm; /* just in case */
- old_flush = s->last_flush;
- s->last_flush = flush;
-
- /* Write the zlib header */
- if (s->status == INIT_STATE) {
-
- uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8;
- uInt level_flags = (s->level-1) >> 1;
-
- if (level_flags > 3) level_flags = 3;
- header |= (level_flags << 6);
- if (s->strstart != 0) header |= PRESET_DICT;
- header += 31 - (header % 31);
-
- s->status = BUSY_STATE;
- putShortMSB(s, header);
-
- /* Save the adler32 of the preset dictionary: */
- if (s->strstart != 0) {
- putShortMSB(s, (uInt)(strm->adler >> 16));
- putShortMSB(s, (uInt)(strm->adler & 0xffff));
- }
- strm->adler = 1L;
- }
-
- /* Flush as much pending output as possible */
- if (s->pending != 0) {
- flush_pending(strm);
- if (strm->avail_out == 0) {
- /* Since avail_out is 0, deflate will be called again with
- * more output space, but possibly with both pending and
- * avail_in equal to zero. There won't be anything to do,
- * but this is not an error situation so make sure we
- * return OK instead of BUF_ERROR at next call of deflate:
- */
- s->last_flush = -1;
- return Z_OK;
- }
-
- /* Make sure there is something to do and avoid duplicate consecutive
- * flushes. For repeated and useless calls with Z_FINISH, we keep
- * returning Z_STREAM_END instead of Z_BUFF_ERROR.
- */
- } else if (strm->avail_in == 0 && flush <= old_flush &&
- flush != Z_FINISH) {
- ERR_RETURN(strm, Z_BUF_ERROR);
- }
-
- /* User must not provide more input after the first FINISH: */
- if (s->status == FINISH_STATE && strm->avail_in != 0) {
- ERR_RETURN(strm, Z_BUF_ERROR);
- }
-
- /* Start a new block or continue the current one.
- */
- if (strm->avail_in != 0 || s->lookahead != 0 ||
- (flush != Z_NO_FLUSH && s->status != FINISH_STATE)) {
- block_state bstate;
-
- bstate = (*(configuration_table[s->level].func))(s, flush);
-
- if (bstate == finish_started || bstate == finish_done) {
- s->status = FINISH_STATE;
- }
- if (bstate == need_more || bstate == finish_started) {
- if (strm->avail_out == 0) {
- s->last_flush = -1; /* avoid BUF_ERROR next call, see above */
- }
- return Z_OK;
- /* If flush != Z_NO_FLUSH && avail_out == 0, the next call
- * of deflate should use the same flush parameter to make sure
- * that the flush is complete. So we don't have to output an
- * empty block here, this will be done at next call. This also
- * ensures that for a very small output buffer, we emit at most
- * one empty block.
- */
- }
- if (bstate == block_done) {
- if (flush == Z_PARTIAL_FLUSH) {
- _tr_align(s);
- } else if (flush == Z_PACKET_FLUSH) {
- /* Output just the 3-bit `stored' block type value,
- but not a zero length. */
- _tr_stored_type_only(s);
- } else { /* FULL_FLUSH or SYNC_FLUSH */
- _tr_stored_block(s, (char*)0, 0L, 0);
- /* For a full flush, this empty block will be recognized
- * as a special marker by inflate_sync().
- */
- if (flush == Z_FULL_FLUSH) {
- CLEAR_HASH(s); /* forget history */
- }
- }
- flush_pending(strm);
- if (strm->avail_out == 0) {
- s->last_flush = -1; /* avoid BUF_ERROR at next call, see above */
- return Z_OK;
- }
- }
- }
- Assert(strm->avail_out > 0, "bug2");
-
- if (flush != Z_FINISH) return Z_OK;
- if (s->noheader) return Z_STREAM_END;
-
- /* Write the zlib trailer (adler32) */
- putShortMSB(s, (uInt)(strm->adler >> 16));
- putShortMSB(s, (uInt)(strm->adler & 0xffff));
- flush_pending(strm);
- /* If avail_out is zero, the application will call deflate again
- * to flush the rest.
- */
- s->noheader = -1; /* write the trailer only once! */
- return s->pending != 0 ? Z_OK : Z_STREAM_END;
-}
-
-/* ========================================================================= */
-int deflateEnd (strm)
- z_streamp strm;
-{
- int status;
- deflate_state *s;
-
- if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
- s = (deflate_state *) strm->state;
-
- status = s->status;
- if (status != INIT_STATE && status != BUSY_STATE &&
- status != FINISH_STATE) {
- return Z_STREAM_ERROR;
- }
-
- /* Deallocate in reverse order of allocations: */
- TRY_FREE(strm, s->pending_buf);
- TRY_FREE(strm, s->head);
- TRY_FREE(strm, s->prev);
- TRY_FREE(strm, s->window);
-
- ZFREE(strm, s);
- strm->state = Z_NULL;
-
- return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK;
-}
-
-/* =========================================================================
- * Copy the source state to the destination state.
- */
-int deflateCopy (dest, source)
- z_streamp dest;
- z_streamp source;
-{
- deflate_state *ds;
- deflate_state *ss;
- ushf *overlay;
-
- if (source == Z_NULL || dest == Z_NULL || source->state == Z_NULL)
- return Z_STREAM_ERROR;
- ss = (deflate_state *) source->state;
-
- *dest = *source;
-
- ds = (deflate_state *) ZALLOC(dest, 1, sizeof(deflate_state));
- if (ds == Z_NULL) return Z_MEM_ERROR;
- dest->state = (struct internal_state FAR *) ds;
- *ds = *ss;
- ds->strm = dest;
-
- ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte));
- ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos));
- ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos));
- overlay = (ushf *) ZALLOC(dest, ds->lit_bufsize, sizeof(ush)+2);
- ds->pending_buf = (uchf *) overlay;
-
- if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL ||
- ds->pending_buf == Z_NULL) {
- deflateEnd (dest);
- return Z_MEM_ERROR;
- }
- /* ??? following zmemcpy doesn't work for 16-bit MSDOS */
- zmemcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte));
- zmemcpy(ds->prev, ss->prev, ds->w_size * sizeof(Pos));
- zmemcpy(ds->head, ss->head, ds->hash_size * sizeof(Pos));
- zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size);
-
- ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf);
- ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush);
- ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize;
-
- ds->l_desc.dyn_tree = ds->dyn_ltree;
- ds->d_desc.dyn_tree = ds->dyn_dtree;
- ds->bl_desc.dyn_tree = ds->bl_tree;
-
- return Z_OK;
-}
-
-/* ===========================================================================
- * Return the number of bytes of output which are immediately available
- * for output from the decompressor.
- */
-int deflateOutputPending (strm)
- z_streamp strm;
-{
- if (strm == Z_NULL || strm->state == Z_NULL) return 0;
-
- return ((deflate_state *)(strm->state))->pending;
-}
-
-/* ===========================================================================
- * Read a new buffer from the current input stream, update the adler32
- * and total number of bytes read. All deflate() input goes through
- * this function so some applications may wish to modify it to avoid
- * allocating a large strm->next_in buffer and copying from it.
- * (See also flush_pending()).
- */
-local int read_buf(strm, buf, size)
- z_streamp strm;
- charf *buf;
- unsigned size;
-{
- unsigned len = strm->avail_in;
-
- if (len > size) len = size;
- if (len == 0) return 0;
-
- strm->avail_in -= len;
-
- if (!((deflate_state *)(strm->state))->noheader) {
- strm->adler = adler32(strm->adler, strm->next_in, len);
- }
- zmemcpy(buf, strm->next_in, len);
- strm->next_in += len;
- strm->total_in += len;
-
- return (int)len;
-}
-
-/* ===========================================================================
- * Initialize the "longest match" routines for a new zlib stream
- */
-local void lm_init (s)
- deflate_state *s;
-{
- s->window_size = (ulg)2L*s->w_size;
-
- CLEAR_HASH(s);
-
- /* Set the default configuration parameters:
- */
- s->max_lazy_match = configuration_table[s->level].max_lazy;
- s->good_match = configuration_table[s->level].good_length;
- s->nice_match = configuration_table[s->level].nice_length;
- s->max_chain_length = configuration_table[s->level].max_chain;
-
- s->strstart = 0;
- s->block_start = 0L;
- s->lookahead = 0;
- s->match_length = s->prev_length = MIN_MATCH-1;
- s->match_available = 0;
- s->ins_h = 0;
-#ifdef ASMV
- match_init(); /* initialize the asm code */
-#endif
-}
-
-/* ===========================================================================
- * Set match_start to the longest match starting at the given string and
- * return its length. Matches shorter or equal to prev_length are discarded,
- * in which case the result is equal to prev_length and match_start is
- * garbage.
- * IN assertions: cur_match is the head of the hash chain for the current
- * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1
- * OUT assertion: the match length is not greater than s->lookahead.
- */
-#ifndef ASMV
-/* For 80x86 and 680x0, an optimized version will be provided in match.asm or
- * match.S. The code will be functionally equivalent.
- */
-local uInt longest_match(s, cur_match)
- deflate_state *s;
- IPos cur_match; /* current match */
-{
- unsigned chain_length = s->max_chain_length;/* max hash chain length */
- register Bytef *scan = s->window + s->strstart; /* current string */
- register Bytef *match; /* matched string */
- register int len; /* length of current match */
- int best_len = s->prev_length; /* best match length so far */
- int nice_match = s->nice_match; /* stop if match long enough */
- IPos limit = s->strstart > (IPos)MAX_DIST(s) ?
- s->strstart - (IPos)MAX_DIST(s) : NIL;
- /* Stop when cur_match becomes <= limit. To simplify the code,
- * we prevent matches with the string of window index 0.
- */
- Posf *prev = s->prev;
- uInt wmask = s->w_mask;
-
-#ifdef UNALIGNED_OK
- /* Compare two bytes at a time. Note: this is not always beneficial.
- * Try with and without -DUNALIGNED_OK to check.
- */
- register Bytef *strend = s->window + s->strstart + MAX_MATCH - 1;
- register ush scan_start = *(ushf*)scan;
- register ush scan_end = *(ushf*)(scan+best_len-1);
-#else
- register Bytef *strend = s->window + s->strstart + MAX_MATCH;
- register Byte scan_end1 = scan[best_len-1];
- register Byte scan_end = scan[best_len];
-#endif
-
- /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
- * It is easy to get rid of this optimization if necessary.
- */
- Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
-
- /* Do not waste too much time if we already have a good match: */
- if (s->prev_length >= s->good_match) {
- chain_length >>= 2;
- }
- /* Do not look for matches beyond the end of the input. This is necessary
- * to make deflate deterministic.
- */
- if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead;
-
- Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead");
-
- do {
- Assert(cur_match < s->strstart, "no future");
- match = s->window + cur_match;
-
- /* Skip to next match if the match length cannot increase
- * or if the match length is less than 2:
- */
-#if (defined(UNALIGNED_OK) && MAX_MATCH == 258)
- /* This code assumes sizeof(unsigned short) == 2. Do not use
- * UNALIGNED_OK if your compiler uses a different size.
- */
- if (*(ushf*)(match+best_len-1) != scan_end ||
- *(ushf*)match != scan_start) continue;
-
- /* It is not necessary to compare scan[2] and match[2] since they are
- * always equal when the other bytes match, given that the hash keys
- * are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at
- * strstart+3, +5, ... up to strstart+257. We check for insufficient
- * lookahead only every 4th comparison; the 128th check will be made
- * at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is
- * necessary to put more guard bytes at the end of the window, or
- * to check more often for insufficient lookahead.
- */
- Assert(scan[2] == match[2], "scan[2]?");
- scan++, match++;
- do {
- } while (*(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
- *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
- *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
- *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
- scan < strend);
- /* The funny "do {}" generates better code on most compilers */
-
- /* Here, scan <= window+strstart+257 */
- Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
- if (*scan == *match) scan++;
-
- len = (MAX_MATCH - 1) - (int)(strend-scan);
- scan = strend - (MAX_MATCH-1);
-
-#else /* UNALIGNED_OK */
-
- if (match[best_len] != scan_end ||
- match[best_len-1] != scan_end1 ||
- *match != *scan ||
- *++match != scan[1]) continue;
-
- /* The check at best_len-1 can be removed because it will be made
- * again later. (This heuristic is not always a win.)
- * It is not necessary to compare scan[2] and match[2] since they
- * are always equal when the other bytes match, given that
- * the hash keys are equal and that HASH_BITS >= 8.
- */
- scan += 2, match++;
- Assert(*scan == *match, "match[2]?");
-
- /* We check for insufficient lookahead only every 8th comparison;
- * the 256th check will be made at strstart+258.
- */
- do {
- } while (*++scan == *++match && *++scan == *++match &&
- *++scan == *++match && *++scan == *++match &&
- *++scan == *++match && *++scan == *++match &&
- *++scan == *++match && *++scan == *++match &&
- scan < strend);
-
- Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
-
- len = MAX_MATCH - (int)(strend - scan);
- scan = strend - MAX_MATCH;
-
-#endif /* UNALIGNED_OK */
-
- if (len > best_len) {
- s->match_start = cur_match;
- best_len = len;
- if (len >= nice_match) break;
-#ifdef UNALIGNED_OK
- scan_end = *(ushf*)(scan+best_len-1);
-#else
- scan_end1 = scan[best_len-1];
- scan_end = scan[best_len];
-#endif
- }
- } while ((cur_match = prev[cur_match & wmask]) > limit
- && --chain_length != 0);
-
- if ((uInt)best_len <= s->lookahead) return best_len;
- return s->lookahead;
-}
-#endif /* ASMV */
-
-#ifdef DEBUG_ZLIB
-/* ===========================================================================
- * Check that the match at match_start is indeed a match.
- */
-local void check_match(s, start, match, length)
- deflate_state *s;
- IPos start, match;
- int length;
-{
- /* check that the match is indeed a match */
- if (zmemcmp((charf *)s->window + match,
- (charf *)s->window + start, length) != EQUAL) {
- fprintf(stderr, " start %u, match %u, length %d\n",
- start, match, length);
- do {
- fprintf(stderr, "%c%c", s->window[match++], s->window[start++]);
- } while (--length != 0);
- z_error("invalid match");
- }
- if (z_verbose > 1) {
- fprintf(stderr,"\\[%d,%d]", start-match, length);
- do { putc(s->window[start++], stderr); } while (--length != 0);
- }
-}
-#else
-# define check_match(s, start, match, length)
-#endif
-
-/* ===========================================================================
- * Fill the window when the lookahead becomes insufficient.
- * Updates strstart and lookahead.
- *
- * IN assertion: lookahead < MIN_LOOKAHEAD
- * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD
- * At least one byte has been read, or avail_in == 0; reads are
- * performed for at least two bytes (required for the zip translate_eol
- * option -- not supported here).
- */
-local void fill_window(s)
- deflate_state *s;
-{
- register unsigned n, m;
- register Posf *p;
- unsigned more; /* Amount of free space at the end of the window. */
- uInt wsize = s->w_size;
-
- do {
- more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart);
-
- /* Deal with !@#$% 64K limit: */
- if (more == 0 && s->strstart == 0 && s->lookahead == 0) {
- more = wsize;
-
- } else if (more == (unsigned)(-1)) {
- /* Very unlikely, but possible on 16 bit machine if strstart == 0
- * and lookahead == 1 (input done one byte at time)
- */
- more--;
-
- /* If the window is almost full and there is insufficient lookahead,
- * move the upper half to the lower one to make room in the upper half.
- */
- } else if (s->strstart >= wsize+MAX_DIST(s)) {
-
- zmemcpy((charf *)s->window, (charf *)s->window+wsize,
- (unsigned)wsize);
- s->match_start -= wsize;
- s->strstart -= wsize; /* we now have strstart >= MAX_DIST */
- s->block_start -= (long) wsize;
-
- /* Slide the hash table (could be avoided with 32 bit values
- at the expense of memory usage). We slide even when level == 0
- to keep the hash table consistent if we switch back to level > 0
- later. (Using level 0 permanently is not an optimal usage of
- zlib, so we don't care about this pathological case.)
- */
- n = s->hash_size;
- p = &s->head[n];
- do {
- m = *--p;
- *p = (Pos)(m >= wsize ? m-wsize : NIL);
- } while (--n);
-
- n = wsize;
- p = &s->prev[n];
- do {
- m = *--p;
- *p = (Pos)(m >= wsize ? m-wsize : NIL);
- /* If n is not on any hash chain, prev[n] is garbage but
- * its value will never be used.
- */
- } while (--n);
- more += wsize;
- }
- if (s->strm->avail_in == 0) return;
-
- /* If there was no sliding:
- * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&
- * more == window_size - lookahead - strstart
- * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)
- * => more >= window_size - 2*WSIZE + 2
- * In the BIG_MEM or MMAP case (not yet supported),
- * window_size == input_size + MIN_LOOKAHEAD &&
- * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.
- * Otherwise, window_size == 2*WSIZE so more >= 2.
- * If there was sliding, more >= WSIZE. So in all cases, more >= 2.
- */
- Assert(more >= 2, "more < 2");
-
- n = read_buf(s->strm, (charf *)s->window + s->strstart + s->lookahead,
- more);
- s->lookahead += n;
-
- /* Initialize the hash value now that we have some input: */
- if (s->lookahead >= MIN_MATCH) {
- s->ins_h = s->window[s->strstart];
- UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
-#if MIN_MATCH != 3
- Call UPDATE_HASH() MIN_MATCH-3 more times
-#endif
- }
- /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage,
- * but this is not important since only literal bytes will be emitted.
- */
-
- } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0);
-}
-
-/* ===========================================================================
- * Flush the current block, with given end-of-file flag.
- * IN assertion: strstart is set to the end of the current match.
- */
-#define FLUSH_BLOCK_ONLY(s, eof) { \
- _tr_flush_block(s, (s->block_start >= 0L ? \
- (charf *)&s->window[(unsigned)s->block_start] : \
- (charf *)Z_NULL), \
- (ulg)((long)s->strstart - s->block_start), \
- (eof)); \
- s->block_start = s->strstart; \
- flush_pending(s->strm); \
- Tracev((stderr,"[FLUSH]")); \
-}
-
-/* Same but force premature exit if necessary. */
-#define FLUSH_BLOCK(s, eof) { \
- FLUSH_BLOCK_ONLY(s, eof); \
- if (s->strm->avail_out == 0) return (eof) ? finish_started : need_more; \
-}
-
-/* ===========================================================================
- * Copy without compression as much as possible from the input stream, return
- * the current block state.
- * This function does not insert new strings in the dictionary since
- * uncompressible data is probably not useful. This function is used
- * only for the level=0 compression option.
- * NOTE: this function should be optimized to avoid extra copying from
- * window to pending_buf.
- */
-local block_state deflate_stored(s, flush)
- deflate_state *s;
- int flush;
-{
- /* Stored blocks are limited to 0xffff bytes, pending_buf is limited
- * to pending_buf_size, and each stored block has a 5 byte header:
- */
- ulg max_block_size = 0xffff;
- ulg max_start;
-
- if (max_block_size > s->pending_buf_size - 5) {
- max_block_size = s->pending_buf_size - 5;
- }
-
- /* Copy as much as possible from input to output: */
- for (;;) {
- /* Fill the window as much as possible: */
- if (s->lookahead <= 1) {
-
- Assert(s->strstart < s->w_size+MAX_DIST(s) ||
- s->block_start >= (long)s->w_size, "slide too late");
-
- fill_window(s);
- if (s->lookahead == 0 && flush == Z_NO_FLUSH) return need_more;
-
- if (s->lookahead == 0) break; /* flush the current block */
- }
- Assert(s->block_start >= 0L, "block gone");
-
- s->strstart += s->lookahead;
- s->lookahead = 0;
-
- /* Emit a stored block if pending_buf will be full: */
- max_start = s->block_start + max_block_size;
- if (s->strstart == 0 || (ulg)s->strstart >= max_start) {
- /* strstart == 0 is possible when wraparound on 16-bit machine */
- s->lookahead = (uInt)(s->strstart - max_start);
- s->strstart = (uInt)max_start;
- FLUSH_BLOCK(s, 0);
- }
- /* Flush if we may have to slide, otherwise block_start may become
- * negative and the data will be gone:
- */
- if (s->strstart - (uInt)s->block_start >= MAX_DIST(s)) {
- FLUSH_BLOCK(s, 0);
- }
- }
- FLUSH_BLOCK(s, flush == Z_FINISH);
- return flush == Z_FINISH ? finish_done : block_done;
-}
-
-/* ===========================================================================
- * Compress as much as possible from the input stream, return the current
- * block state.
- * This function does not perform lazy evaluation of matches and inserts
- * new strings in the dictionary only for unmatched strings or for short
- * matches. It is used only for the fast compression options.
- */
-local block_state deflate_fast(s, flush)
- deflate_state *s;
- int flush;
-{
- IPos hash_head = NIL; /* head of the hash chain */
- int bflush; /* set if current block must be flushed */
-
- for (;;) {
- /* Make sure that we always have enough lookahead, except
- * at the end of the input file. We need MAX_MATCH bytes
- * for the next match, plus MIN_MATCH bytes to insert the
- * string following the next match.
- */
- if (s->lookahead < MIN_LOOKAHEAD) {
- fill_window(s);
- if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
- return need_more;
- }
- if (s->lookahead == 0) break; /* flush the current block */
- }
-
- /* Insert the string window[strstart .. strstart+2] in the
- * dictionary, and set hash_head to the head of the hash chain:
- */
- if (s->lookahead >= MIN_MATCH) {
- INSERT_STRING(s, s->strstart, hash_head);
- }
-
- /* Find the longest match, discarding those <= prev_length.
- * At this point we have always match_length < MIN_MATCH
- */
- if (hash_head != NIL && s->strstart - hash_head <= MAX_DIST(s)) {
- /* To simplify the code, we prevent matches with the string
- * of window index 0 (in particular we have to avoid a match
- * of the string with itself at the start of the input file).
- */
- if (s->strategy != Z_HUFFMAN_ONLY) {
- s->match_length = longest_match (s, hash_head);
- }
- /* longest_match() sets match_start */
- }
- if (s->match_length >= MIN_MATCH) {
- check_match(s, s->strstart, s->match_start, s->match_length);
-
- bflush = _tr_tally(s, s->strstart - s->match_start,
- s->match_length - MIN_MATCH);
-
- s->lookahead -= s->match_length;
-
- /* Insert new strings in the hash table only if the match length
- * is not too large. This saves time but degrades compression.
- */
- if (s->match_length <= s->max_insert_length &&
- s->lookahead >= MIN_MATCH) {
- s->match_length--; /* string at strstart already in hash table */
- do {
- s->strstart++;
- INSERT_STRING(s, s->strstart, hash_head);
- /* strstart never exceeds WSIZE-MAX_MATCH, so there are
- * always MIN_MATCH bytes ahead.
- */
- } while (--s->match_length != 0);
- s->strstart++;
- } else {
- s->strstart += s->match_length;
- s->match_length = 0;
- s->ins_h = s->window[s->strstart];
- UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
-#if MIN_MATCH != 3
- Call UPDATE_HASH() MIN_MATCH-3 more times
-#endif
- /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not
- * matter since it will be recomputed at next deflate call.
- */
- }
- } else {
- /* No match, output a literal byte */
- Tracevv((stderr,"%c", s->window[s->strstart]));
- bflush = _tr_tally (s, 0, s->window[s->strstart]);
- s->lookahead--;
- s->strstart++;
- }
- if (bflush) FLUSH_BLOCK(s, 0);
- }
- FLUSH_BLOCK(s, flush == Z_FINISH);
- return flush == Z_FINISH ? finish_done : block_done;
-}
-
-/* ===========================================================================
- * Same as above, but achieves better compression. We use a lazy
- * evaluation for matches: a match is finally adopted only if there is
- * no better match at the next window position.
- */
-local block_state deflate_slow(s, flush)
- deflate_state *s;
- int flush;
-{
- IPos hash_head = NIL; /* head of hash chain */
- int bflush; /* set if current block must be flushed */
-
- /* Process the input block. */
- for (;;) {
- /* Make sure that we always have enough lookahead, except
- * at the end of the input file. We need MAX_MATCH bytes
- * for the next match, plus MIN_MATCH bytes to insert the
- * string following the next match.
- */
- if (s->lookahead < MIN_LOOKAHEAD) {
- fill_window(s);
- if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
- return need_more;
- }
- if (s->lookahead == 0) break; /* flush the current block */
- }
-
- /* Insert the string window[strstart .. strstart+2] in the
- * dictionary, and set hash_head to the head of the hash chain:
- */
- if (s->lookahead >= MIN_MATCH) {
- INSERT_STRING(s, s->strstart, hash_head);
- }
-
- /* Find the longest match, discarding those <= prev_length.
- */
- s->prev_length = s->match_length, s->prev_match = s->match_start;
- s->match_length = MIN_MATCH-1;
-
- if (hash_head != NIL && s->prev_length < s->max_lazy_match &&
- s->strstart - hash_head <= MAX_DIST(s)) {
- /* To simplify the code, we prevent matches with the string
- * of window index 0 (in particular we have to avoid a match
- * of the string with itself at the start of the input file).
- */
- if (s->strategy != Z_HUFFMAN_ONLY) {
- s->match_length = longest_match (s, hash_head);
- }
- /* longest_match() sets match_start */
-
- if (s->match_length <= 5 && (s->strategy == Z_FILTERED ||
- (s->match_length == MIN_MATCH &&
- s->strstart - s->match_start > TOO_FAR))) {
-
- /* If prev_match is also MIN_MATCH, match_start is garbage
- * but we will ignore the current match anyway.
- */
- s->match_length = MIN_MATCH-1;
- }
- }
- /* If there was a match at the previous step and the current
- * match is not better, output the previous match:
- */
- if (s->prev_length >= MIN_MATCH && s->match_length <= s->prev_length) {
- uInt max_insert = s->strstart + s->lookahead - MIN_MATCH;
- /* Do not insert strings in hash table beyond this. */
-
- check_match(s, s->strstart-1, s->prev_match, s->prev_length);
-
- bflush = _tr_tally(s, s->strstart -1 - s->prev_match,
- s->prev_length - MIN_MATCH);
-
- /* Insert in hash table all strings up to the end of the match.
- * strstart-1 and strstart are already inserted. If there is not
- * enough lookahead, the last two strings are not inserted in
- * the hash table.
- */
- s->lookahead -= s->prev_length-1;
- s->prev_length -= 2;
- do {
- if (++s->strstart <= max_insert) {
- INSERT_STRING(s, s->strstart, hash_head);
- }
- } while (--s->prev_length != 0);
- s->match_available = 0;
- s->match_length = MIN_MATCH-1;
- s->strstart++;
-
- if (bflush) FLUSH_BLOCK(s, 0);
-
- } else if (s->match_available) {
- /* If there was no match at the previous position, output a
- * single literal. If there was a match but the current match
- * is longer, truncate the previous match to a single literal.
- */
- Tracevv((stderr,"%c", s->window[s->strstart-1]));
- if (_tr_tally (s, 0, s->window[s->strstart-1])) {
- FLUSH_BLOCK_ONLY(s, 0);
- }
- s->strstart++;
- s->lookahead--;
- if (s->strm->avail_out == 0) return need_more;
- } else {
- /* There is no previous match to compare with, wait for
- * the next step to decide.
- */
- s->match_available = 1;
- s->strstart++;
- s->lookahead--;
- }
- }
- Assert (flush != Z_NO_FLUSH, "no flush?");
- if (s->match_available) {
- Tracevv((stderr,"%c", s->window[s->strstart-1]));
- _tr_tally (s, 0, s->window[s->strstart-1]);
- s->match_available = 0;
- }
- FLUSH_BLOCK(s, flush == Z_FINISH);
- return flush == Z_FINISH ? finish_done : block_done;
-}
-/* --- deflate.c */
-
-/* +++ trees.c */
-/* trees.c -- output deflated data using Huffman coding
- * Copyright (C) 1995-1996 Jean-loup Gailly
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/*
- * ALGORITHM
- *
- * The "deflation" process uses several Huffman trees. The more
- * common source values are represented by shorter bit sequences.
- *
- * Each code tree is stored in a compressed form which is itself
- * a Huffman encoding of the lengths of all the code strings (in
- * ascending order by source values). The actual code strings are
- * reconstructed from the lengths in the inflate process, as described
- * in the deflate specification.
- *
- * REFERENCES
- *
- * Deutsch, L.P.,"'Deflate' Compressed Data Format Specification".
- * Available in ftp.uu.net:/pub/archiving/zip/doc/deflate-1.1.doc
- *
- * Storer, James A.
- * Data Compression: Methods and Theory, pp. 49-50.
- * Computer Science Press, 1988. ISBN 0-7167-8156-5.
- *
- * Sedgewick, R.
- * Algorithms, p290.
- * Addison-Wesley, 1983. ISBN 0-201-06672-6.
- */
-
-/* From: trees.c,v 1.11 1996/07/24 13:41:06 me Exp $ */
-
-/* #include "deflate.h" */
-
-#ifdef DEBUG_ZLIB
-# include <ctype.h>
-#endif
-
-/* ===========================================================================
- * Constants
- */
-
-#define MAX_BL_BITS 7
-/* Bit length codes must not exceed MAX_BL_BITS bits */
-
-#define END_BLOCK 256
-/* end of block literal code */
-
-#define REP_3_6 16
-/* repeat previous bit length 3-6 times (2 bits of repeat count) */
-
-#define REPZ_3_10 17
-/* repeat a zero length 3-10 times (3 bits of repeat count) */
-
-#define REPZ_11_138 18
-/* repeat a zero length 11-138 times (7 bits of repeat count) */
-
-local int extra_lbits[LENGTH_CODES] /* extra bits for each length code */
- = {0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0};
-
-local int extra_dbits[D_CODES] /* extra bits for each distance code */
- = {0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13};
-
-local int extra_blbits[BL_CODES]/* extra bits for each bit length code */
- = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7};
-
-local uch bl_order[BL_CODES]
- = {16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15};
-/* The lengths of the bit length codes are sent in order of decreasing
- * probability, to avoid transmitting the lengths for unused bit length codes.
- */
-
-#define Buf_size (8 * 2*sizeof(char))
-/* Number of bits used within bi_buf. (bi_buf might be implemented on
- * more than 16 bits on some systems.)
- */
-
-/* ===========================================================================
- * Local data. These are initialized only once.
- */
-
-local ct_data static_ltree[L_CODES+2];
-/* The static literal tree. Since the bit lengths are imposed, there is no
- * need for the L_CODES extra codes used during heap construction. However
- * The codes 286 and 287 are needed to build a canonical tree (see _tr_init
- * below).
- */
-
-local ct_data static_dtree[D_CODES];
-/* The static distance tree. (Actually a trivial tree since all codes use
- * 5 bits.)
- */
-
-local uch dist_code[512];
-/* distance codes. The first 256 values correspond to the distances
- * 3 .. 258, the last 256 values correspond to the top 8 bits of
- * the 15 bit distances.
- */
-
-local uch length_code[MAX_MATCH-MIN_MATCH+1];
-/* length code for each normalized match length (0 == MIN_MATCH) */
-
-local int base_length[LENGTH_CODES];
-/* First normalized length for each code (0 = MIN_MATCH) */
-
-local int base_dist[D_CODES];
-/* First normalized distance for each code (0 = distance of 1) */
-
-struct static_tree_desc_s {
- ct_data *static_tree; /* static tree or NULL */
- intf *extra_bits; /* extra bits for each code or NULL */
- int extra_base; /* base index for extra_bits */
- int elems; /* max number of elements in the tree */
- int max_length; /* max bit length for the codes */
-};
-
-local static_tree_desc static_l_desc =
-{static_ltree, extra_lbits, LITERALS+1, L_CODES, MAX_BITS};
-
-local static_tree_desc static_d_desc =
-{static_dtree, extra_dbits, 0, D_CODES, MAX_BITS};
-
-local static_tree_desc static_bl_desc =
-{(ct_data *)0, extra_blbits, 0, BL_CODES, MAX_BL_BITS};
-
-/* ===========================================================================
- * Local (static) routines in this file.
- */
-
-local void tr_static_init OF((void));
-local void init_block OF((deflate_state *s));
-local void pqdownheap OF((deflate_state *s, ct_data *tree, int k));
-local void gen_bitlen OF((deflate_state *s, tree_desc *desc));
-local void gen_codes OF((ct_data *tree, int max_code, ushf *bl_count));
-local void build_tree OF((deflate_state *s, tree_desc *desc));
-local void scan_tree OF((deflate_state *s, ct_data *tree, int max_code));
-local void send_tree OF((deflate_state *s, ct_data *tree, int max_code));
-local int build_bl_tree OF((deflate_state *s));
-local void send_all_trees OF((deflate_state *s, int lcodes, int dcodes,
- int blcodes));
-local void compress_block OF((deflate_state *s, ct_data *ltree,
- ct_data *dtree));
-local void set_data_type OF((deflate_state *s));
-local unsigned bi_reverse OF((unsigned value, int length));
-local void bi_windup OF((deflate_state *s));
-local void bi_flush OF((deflate_state *s));
-local void copy_block OF((deflate_state *s, charf *buf, unsigned len,
- int header));
-
-#ifndef DEBUG_ZLIB
-# define send_code(s, c, tree) send_bits(s, tree[c].Code, tree[c].Len)
- /* Send a code of the given tree. c and tree must not have side effects */
-
-#else /* DEBUG_ZLIB */
-# define send_code(s, c, tree) \
- { if (verbose>2) fprintf(stderr,"\ncd %3d ",(c)); \
- send_bits(s, tree[c].Code, tree[c].Len); }
-#endif
-
-#define d_code(dist) \
- ((dist) < 256 ? dist_code[dist] : dist_code[256+((dist)>>7)])
-/* Mapping from a distance to a distance code. dist is the distance - 1 and
- * must not have side effects. dist_code[256] and dist_code[257] are never
- * used.
- */
-
-/* ===========================================================================
- * Output a short LSB first on the stream.
- * IN assertion: there is enough room in pendingBuf.
- */
-#define put_short(s, w) { \
- put_byte(s, (uch)((w) & 0xff)); \
- put_byte(s, (uch)((ush)(w) >> 8)); \
-}
-
-/* ===========================================================================
- * Send a value on a given number of bits.
- * IN assertion: length <= 16 and value fits in length bits.
- */
-#ifdef DEBUG_ZLIB
-local void send_bits OF((deflate_state *s, int value, int length));
-
-local void send_bits(s, value, length)
- deflate_state *s;
- int value; /* value to send */
- int length; /* number of bits */
-{
- Tracevv((stderr," l %2d v %4x ", length, value));
- Assert(length > 0 && length <= 15, "invalid length");
- s->bits_sent += (ulg)length;
-
- /* If not enough room in bi_buf, use (valid) bits from bi_buf and
- * (16 - bi_valid) bits from value, leaving (width - (16-bi_valid))
- * unused bits in value.
- */
- if (s->bi_valid > (int)Buf_size - length) {
- s->bi_buf |= (value << s->bi_valid);
- put_short(s, s->bi_buf);
- s->bi_buf = (ush)value >> (Buf_size - s->bi_valid);
- s->bi_valid += length - Buf_size;
- } else {
- s->bi_buf |= value << s->bi_valid;
- s->bi_valid += length;
- }
-}
-#else /* !DEBUG_ZLIB */
-
-#define send_bits(s, value, length) \
-{ int len = length;\
- if (s->bi_valid > (int)Buf_size - len) {\
- int val = value;\
- s->bi_buf |= (val << s->bi_valid);\
- put_short(s, s->bi_buf);\
- s->bi_buf = (ush)val >> (Buf_size - s->bi_valid);\
- s->bi_valid += len - Buf_size;\
- } else {\
- s->bi_buf |= (value) << s->bi_valid;\
- s->bi_valid += len;\
- }\
-}
-#endif /* DEBUG_ZLIB */
-
-
-#define MAX(a,b) (a >= b ? a : b)
-/* the arguments must not have side effects */
-
-/* ===========================================================================
- * Initialize the various 'constant' tables. In a multi-threaded environment,
- * this function may be called by two threads concurrently, but this is
- * harmless since both invocations do exactly the same thing.
- */
-local void tr_static_init()
-{
- static int static_init_done = 0;
- int n; /* iterates over tree elements */
- int bits; /* bit counter */
- int length; /* length value */
- int code; /* code value */
- int dist; /* distance index */
- ush bl_count[MAX_BITS+1];
- /* number of codes at each bit length for an optimal tree */
-
- if (static_init_done) return;
-
- /* Initialize the mapping length (0..255) -> length code (0..28) */
- length = 0;
- for (code = 0; code < LENGTH_CODES-1; code++) {
- base_length[code] = length;
- for (n = 0; n < (1<<extra_lbits[code]); n++) {
- length_code[length++] = (uch)code;
- }
- }
- Assert (length == 256, "tr_static_init: length != 256");
- /* Note that the length 255 (match length 258) can be represented
- * in two different ways: code 284 + 5 bits or code 285, so we
- * overwrite length_code[255] to use the best encoding:
- */
- length_code[length-1] = (uch)code;
-
- /* Initialize the mapping dist (0..32K) -> dist code (0..29) */
- dist = 0;
- for (code = 0 ; code < 16; code++) {
- base_dist[code] = dist;
- for (n = 0; n < (1<<extra_dbits[code]); n++) {
- dist_code[dist++] = (uch)code;
- }
- }
- Assert (dist == 256, "tr_static_init: dist != 256");
- dist >>= 7; /* from now on, all distances are divided by 128 */
- for ( ; code < D_CODES; code++) {
- base_dist[code] = dist << 7;
- for (n = 0; n < (1<<(extra_dbits[code]-7)); n++) {
- dist_code[256 + dist++] = (uch)code;
- }
- }
- Assert (dist == 256, "tr_static_init: 256+dist != 512");
-
- /* Construct the codes of the static literal tree */
- for (bits = 0; bits <= MAX_BITS; bits++) bl_count[bits] = 0;
- n = 0;
- while (n <= 143) static_ltree[n++].Len = 8, bl_count[8]++;
- while (n <= 255) static_ltree[n++].Len = 9, bl_count[9]++;
- while (n <= 279) static_ltree[n++].Len = 7, bl_count[7]++;
- while (n <= 287) static_ltree[n++].Len = 8, bl_count[8]++;
- /* Codes 286 and 287 do not exist, but we must include them in the
- * tree construction to get a canonical Huffman tree (longest code
- * all ones)
- */
- gen_codes((ct_data *)static_ltree, L_CODES+1, bl_count);
-
- /* The static distance tree is trivial: */
- for (n = 0; n < D_CODES; n++) {
- static_dtree[n].Len = 5;
- static_dtree[n].Code = bi_reverse((unsigned)n, 5);
- }
- static_init_done = 1;
-}
-
-/* ===========================================================================
- * Initialize the tree data structures for a new zlib stream.
- */
-void _tr_init(s)
- deflate_state *s;
-{
- tr_static_init();
-
- s->compressed_len = 0L;
-
- s->l_desc.dyn_tree = s->dyn_ltree;
- s->l_desc.stat_desc = &static_l_desc;
-
- s->d_desc.dyn_tree = s->dyn_dtree;
- s->d_desc.stat_desc = &static_d_desc;
-
- s->bl_desc.dyn_tree = s->bl_tree;
- s->bl_desc.stat_desc = &static_bl_desc;
-
- s->bi_buf = 0;
- s->bi_valid = 0;
- s->last_eob_len = 8; /* enough lookahead for inflate */
-#ifdef DEBUG_ZLIB
- s->bits_sent = 0L;
-#endif
-
- /* Initialize the first block of the first file: */
- init_block(s);
-}
-
-/* ===========================================================================
- * Initialize a new block.
- */
-local void init_block(s)
- deflate_state *s;
-{
- int n; /* iterates over tree elements */
-
- /* Initialize the trees. */
- for (n = 0; n < L_CODES; n++) s->dyn_ltree[n].Freq = 0;
- for (n = 0; n < D_CODES; n++) s->dyn_dtree[n].Freq = 0;
- for (n = 0; n < BL_CODES; n++) s->bl_tree[n].Freq = 0;
-
- s->dyn_ltree[END_BLOCK].Freq = 1;
- s->opt_len = s->static_len = 0L;
- s->last_lit = s->matches = 0;
-}
-
-#define SMALLEST 1
-/* Index within the heap array of least frequent node in the Huffman tree */
-
-
-/* ===========================================================================
- * Remove the smallest element from the heap and recreate the heap with
- * one less element. Updates heap and heap_len.
- */
-#define pqremove(s, tree, top) \
-{\
- top = s->heap[SMALLEST]; \
- s->heap[SMALLEST] = s->heap[s->heap_len--]; \
- pqdownheap(s, tree, SMALLEST); \
-}
-
-/* ===========================================================================
- * Compares to subtrees, using the tree depth as tie breaker when
- * the subtrees have equal frequency. This minimizes the worst case length.
- */
-#define smaller(tree, n, m, depth) \
- (tree[n].Freq < tree[m].Freq || \
- (tree[n].Freq == tree[m].Freq && depth[n] <= depth[m]))
-
-/* ===========================================================================
- * Restore the heap property by moving down the tree starting at node k,
- * exchanging a node with the smallest of its two sons if necessary, stopping
- * when the heap property is re-established (each father smaller than its
- * two sons).
- */
-local void pqdownheap(s, tree, k)
- deflate_state *s;
- ct_data *tree; /* the tree to restore */
- int k; /* node to move down */
-{
- int v = s->heap[k];
- int j = k << 1; /* left son of k */
- while (j <= s->heap_len) {
- /* Set j to the smallest of the two sons: */
- if (j < s->heap_len &&
- smaller(tree, s->heap[j+1], s->heap[j], s->depth)) {
- j++;
- }
- /* Exit if v is smaller than both sons */
- if (smaller(tree, v, s->heap[j], s->depth)) break;
-
- /* Exchange v with the smallest son */
- s->heap[k] = s->heap[j]; k = j;
-
- /* And continue down the tree, setting j to the left son of k */
- j <<= 1;
- }
- s->heap[k] = v;
-}
-
-/* ===========================================================================
- * Compute the optimal bit lengths for a tree and update the total bit length
- * for the current block.
- * IN assertion: the fields freq and dad are set, heap[heap_max] and
- * above are the tree nodes sorted by increasing frequency.
- * OUT assertions: the field len is set to the optimal bit length, the
- * array bl_count contains the frequencies for each bit length.
- * The length opt_len is updated; static_len is also updated if stree is
- * not null.
- */
-local void gen_bitlen(s, desc)
- deflate_state *s;
- tree_desc *desc; /* the tree descriptor */
-{
- ct_data *tree = desc->dyn_tree;
- int max_code = desc->max_code;
- ct_data *stree = desc->stat_desc->static_tree;
- intf *extra = desc->stat_desc->extra_bits;
- int base = desc->stat_desc->extra_base;
- int max_length = desc->stat_desc->max_length;
- int h; /* heap index */
- int n, m; /* iterate over the tree elements */
- int bits; /* bit length */
- int xbits; /* extra bits */
- ush f; /* frequency */
- int overflow = 0; /* number of elements with bit length too large */
-
- for (bits = 0; bits <= MAX_BITS; bits++) s->bl_count[bits] = 0;
-
- /* In a first pass, compute the optimal bit lengths (which may
- * overflow in the case of the bit length tree).
- */
- tree[s->heap[s->heap_max]].Len = 0; /* root of the heap */
-
- for (h = s->heap_max+1; h < HEAP_SIZE; h++) {
- n = s->heap[h];
- bits = tree[tree[n].Dad].Len + 1;
- if (bits > max_length) bits = max_length, overflow++;
- tree[n].Len = (ush)bits;
- /* We overwrite tree[n].Dad which is no longer needed */
-
- if (n > max_code) continue; /* not a leaf node */
-
- s->bl_count[bits]++;
- xbits = 0;
- if (n >= base) xbits = extra[n-base];
- f = tree[n].Freq;
- s->opt_len += (ulg)f * (bits + xbits);
- if (stree) s->static_len += (ulg)f * (stree[n].Len + xbits);
- }
- if (overflow == 0) return;
-
- Trace((stderr,"\nbit length overflow\n"));
- /* This happens for example on obj2 and pic of the Calgary corpus */
-
- /* Find the first bit length which could increase: */
- do {
- bits = max_length-1;
- while (s->bl_count[bits] == 0) bits--;
- s->bl_count[bits]--; /* move one leaf down the tree */
- s->bl_count[bits+1] += 2; /* move one overflow item as its brother */
- s->bl_count[max_length]--;
- /* The brother of the overflow item also moves one step up,
- * but this does not affect bl_count[max_length]
- */
- overflow -= 2;
- } while (overflow > 0);
-
- /* Now recompute all bit lengths, scanning in increasing frequency.
- * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all
- * lengths instead of fixing only the wrong ones. This idea is taken
- * from 'ar' written by Haruhiko Okumura.)
- */
- for (bits = max_length; bits != 0; bits--) {
- n = s->bl_count[bits];
- while (n != 0) {
- m = s->heap[--h];
- if (m > max_code) continue;
- if (tree[m].Len != (unsigned) bits) {
- Trace((stderr,"code %d bits %d->%d\n", m, tree[m].Len, bits));
- s->opt_len += ((long)bits - (long)tree[m].Len)
- *(long)tree[m].Freq;
- tree[m].Len = (ush)bits;
- }
- n--;
- }
- }
-}
-
-/* ===========================================================================
- * Generate the codes for a given tree and bit counts (which need not be
- * optimal).
- * IN assertion: the array bl_count contains the bit length statistics for
- * the given tree and the field len is set for all tree elements.
- * OUT assertion: the field code is set for all tree elements of non
- * zero code length.
- */
-local void gen_codes (tree, max_code, bl_count)
- ct_data *tree; /* the tree to decorate */
- int max_code; /* largest code with non zero frequency */
- ushf *bl_count; /* number of codes at each bit length */
-{
- ush next_code[MAX_BITS+1]; /* next code value for each bit length */
- ush code = 0; /* running code value */
- int bits; /* bit index */
- int n; /* code index */
-
- /* The distribution counts are first used to generate the code values
- * without bit reversal.
- */
- for (bits = 1; bits <= MAX_BITS; bits++) {
- next_code[bits] = code = (code + bl_count[bits-1]) << 1;
- }
- /* Check that the bit counts in bl_count are consistent. The last code
- * must be all ones.
- */
- Assert (code + bl_count[MAX_BITS]-1 == (1<<MAX_BITS)-1,
- "inconsistent bit counts");
- Tracev((stderr,"\ngen_codes: max_code %d ", max_code));
-
- for (n = 0; n <= max_code; n++) {
- int len = tree[n].Len;
- if (len == 0) continue;
- /* Now reverse the bits */
- tree[n].Code = bi_reverse(next_code[len]++, len);
-
- Tracecv(tree != static_ltree, (stderr,"\nn %3d %c l %2d c %4x (%x) ",
- n, (isgraph(n) ? n : ' '), len, tree[n].Code, next_code[len]-1));
- }
-}
-
-/* ===========================================================================
- * Construct one Huffman tree and assigns the code bit strings and lengths.
- * Update the total bit length for the current block.
- * IN assertion: the field freq is set for all tree elements.
- * OUT assertions: the fields len and code are set to the optimal bit length
- * and corresponding code. The length opt_len is updated; static_len is
- * also updated if stree is not null. The field max_code is set.
- */
-local void build_tree(s, desc)
- deflate_state *s;
- tree_desc *desc; /* the tree descriptor */
-{
- ct_data *tree = desc->dyn_tree;
- ct_data *stree = desc->stat_desc->static_tree;
- int elems = desc->stat_desc->elems;
- int n, m; /* iterate over heap elements */
- int max_code = -1; /* largest code with non zero frequency */
- int node; /* new node being created */
-
- /* Construct the initial heap, with least frequent element in
- * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1].
- * heap[0] is not used.
- */
- s->heap_len = 0, s->heap_max = HEAP_SIZE;
-
- for (n = 0; n < elems; n++) {
- if (tree[n].Freq != 0) {
- s->heap[++(s->heap_len)] = max_code = n;
- s->depth[n] = 0;
- } else {
- tree[n].Len = 0;
- }
- }
-
- /* The pkzip format requires that at least one distance code exists,
- * and that at least one bit should be sent even if there is only one
- * possible code. So to avoid special checks later on we force at least
- * two codes of non zero frequency.
- */
- while (s->heap_len < 2) {
- node = s->heap[++(s->heap_len)] = (max_code < 2 ? ++max_code : 0);
- tree[node].Freq = 1;
- s->depth[node] = 0;
- s->opt_len--; if (stree) s->static_len -= stree[node].Len;
- /* node is 0 or 1 so it does not have extra bits */
- }
- desc->max_code = max_code;
-
- /* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree,
- * establish sub-heaps of increasing lengths:
- */
- for (n = s->heap_len/2; n >= 1; n--) pqdownheap(s, tree, n);
-
- /* Construct the Huffman tree by repeatedly combining the least two
- * frequent nodes.
- */
- node = elems; /* next internal node of the tree */
- do {
- pqremove(s, tree, n); /* n = node of least frequency */
- m = s->heap[SMALLEST]; /* m = node of next least frequency */
-
- s->heap[--(s->heap_max)] = n; /* keep the nodes sorted by frequency */
- s->heap[--(s->heap_max)] = m;
-
- /* Create a new node father of n and m */
- tree[node].Freq = tree[n].Freq + tree[m].Freq;
- s->depth[node] = (uch) (MAX(s->depth[n], s->depth[m]) + 1);
- tree[n].Dad = tree[m].Dad = (ush)node;
-#ifdef DUMP_BL_TREE
- if (tree == s->bl_tree) {
- fprintf(stderr,"\nnode %d(%d), sons %d(%d) %d(%d)",
- node, tree[node].Freq, n, tree[n].Freq, m, tree[m].Freq);
- }
-#endif
- /* and insert the new node in the heap */
- s->heap[SMALLEST] = node++;
- pqdownheap(s, tree, SMALLEST);
-
- } while (s->heap_len >= 2);
-
- s->heap[--(s->heap_max)] = s->heap[SMALLEST];
-
- /* At this point, the fields freq and dad are set. We can now
- * generate the bit lengths.
- */
- gen_bitlen(s, (tree_desc *)desc);
-
- /* The field len is now set, we can generate the bit codes */
- gen_codes ((ct_data *)tree, max_code, s->bl_count);
-}
-
-/* ===========================================================================
- * Scan a literal or distance tree to determine the frequencies of the codes
- * in the bit length tree.
- */
-local void scan_tree (s, tree, max_code)
- deflate_state *s;
- ct_data *tree; /* the tree to be scanned */
- int max_code; /* and its largest code of non zero frequency */
-{
- int n; /* iterates over all tree elements */
- int prevlen = -1; /* last emitted length */
- int curlen; /* length of current code */
- int nextlen = tree[0].Len; /* length of next code */
- int count = 0; /* repeat count of the current code */
- int max_count = 7; /* max repeat count */
- int min_count = 4; /* min repeat count */
-
- if (nextlen == 0) max_count = 138, min_count = 3;
- tree[max_code+1].Len = (ush)0xffff; /* guard */
-
- for (n = 0; n <= max_code; n++) {
- curlen = nextlen; nextlen = tree[n+1].Len;
- if (++count < max_count && curlen == nextlen) {
- continue;
- } else if (count < min_count) {
- s->bl_tree[curlen].Freq += count;
- } else if (curlen != 0) {
- if (curlen != prevlen) s->bl_tree[curlen].Freq++;
- s->bl_tree[REP_3_6].Freq++;
- } else if (count <= 10) {
- s->bl_tree[REPZ_3_10].Freq++;
- } else {
- s->bl_tree[REPZ_11_138].Freq++;
- }
- count = 0; prevlen = curlen;
- if (nextlen == 0) {
- max_count = 138, min_count = 3;
- } else if (curlen == nextlen) {
- max_count = 6, min_count = 3;
- } else {
- max_count = 7, min_count = 4;
- }
- }
-}
-
-/* ===========================================================================
- * Send a literal or distance tree in compressed form, using the codes in
- * bl_tree.
- */
-local void send_tree (s, tree, max_code)
- deflate_state *s;
- ct_data *tree; /* the tree to be scanned */
- int max_code; /* and its largest code of non zero frequency */
-{
- int n; /* iterates over all tree elements */
- int prevlen = -1; /* last emitted length */
- int curlen; /* length of current code */
- int nextlen = tree[0].Len; /* length of next code */
- int count = 0; /* repeat count of the current code */
- int max_count = 7; /* max repeat count */
- int min_count = 4; /* min repeat count */
-
- /* tree[max_code+1].Len = -1; */ /* guard already set */
- if (nextlen == 0) max_count = 138, min_count = 3;
-
- for (n = 0; n <= max_code; n++) {
- curlen = nextlen; nextlen = tree[n+1].Len;
- if (++count < max_count && curlen == nextlen) {
- continue;
- } else if (count < min_count) {
- do { send_code(s, curlen, s->bl_tree); } while (--count != 0);
-
- } else if (curlen != 0) {
- if (curlen != prevlen) {
- send_code(s, curlen, s->bl_tree); count--;
- }
- Assert(count >= 3 && count <= 6, " 3_6?");
- send_code(s, REP_3_6, s->bl_tree); send_bits(s, count-3, 2);
-
- } else if (count <= 10) {
- send_code(s, REPZ_3_10, s->bl_tree); send_bits(s, count-3, 3);
-
- } else {
- send_code(s, REPZ_11_138, s->bl_tree); send_bits(s, count-11, 7);
- }
- count = 0; prevlen = curlen;
- if (nextlen == 0) {
- max_count = 138, min_count = 3;
- } else if (curlen == nextlen) {
- max_count = 6, min_count = 3;
- } else {
- max_count = 7, min_count = 4;
- }
- }
-}
-
-/* ===========================================================================
- * Construct the Huffman tree for the bit lengths and return the index in
- * bl_order of the last bit length code to send.
- */
-local int build_bl_tree(s)
- deflate_state *s;
-{
- int max_blindex; /* index of last bit length code of non zero freq */
-
- /* Determine the bit length frequencies for literal and distance trees */
- scan_tree(s, (ct_data *)s->dyn_ltree, s->l_desc.max_code);
- scan_tree(s, (ct_data *)s->dyn_dtree, s->d_desc.max_code);
-
- /* Build the bit length tree: */
- build_tree(s, (tree_desc *)(&(s->bl_desc)));
- /* opt_len now includes the length of the tree representations, except
- * the lengths of the bit lengths codes and the 5+5+4 bits for the counts.
- */
-
- /* Determine the number of bit length codes to send. The pkzip format
- * requires that at least 4 bit length codes be sent. (appnote.txt says
- * 3 but the actual value used is 4.)
- */
- for (max_blindex = BL_CODES-1; max_blindex >= 3; max_blindex--) {
- if (s->bl_tree[bl_order[max_blindex]].Len != 0) break;
- }
- /* Update opt_len to include the bit length tree and counts */
- s->opt_len += 3*(max_blindex+1) + 5+5+4;
- Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld",
- s->opt_len, s->static_len));
-
- return max_blindex;
-}
-
-/* ===========================================================================
- * Send the header for a block using dynamic Huffman trees: the counts, the
- * lengths of the bit length codes, the literal tree and the distance tree.
- * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4.
- */
-local void send_all_trees(s, lcodes, dcodes, blcodes)
- deflate_state *s;
- int lcodes, dcodes, blcodes; /* number of codes for each tree */
-{
- int rank; /* index in bl_order */
-
- Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes");
- Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES,
- "too many codes");
- Tracev((stderr, "\nbl counts: "));
- send_bits(s, lcodes-257, 5); /* not +255 as stated in appnote.txt */
- send_bits(s, dcodes-1, 5);
- send_bits(s, blcodes-4, 4); /* not -3 as stated in appnote.txt */
- for (rank = 0; rank < blcodes; rank++) {
- Tracev((stderr, "\nbl code %2d ", bl_order[rank]));
- send_bits(s, s->bl_tree[bl_order[rank]].Len, 3);
- }
- Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent));
-
- send_tree(s, (ct_data *)s->dyn_ltree, lcodes-1); /* literal tree */
- Tracev((stderr, "\nlit tree: sent %ld", s->bits_sent));
-
- send_tree(s, (ct_data *)s->dyn_dtree, dcodes-1); /* distance tree */
- Tracev((stderr, "\ndist tree: sent %ld", s->bits_sent));
-}
-
-/* ===========================================================================
- * Send a stored block
- */
-void _tr_stored_block(s, buf, stored_len, eof)
- deflate_state *s;
- charf *buf; /* input block */
- ulg stored_len; /* length of input block */
- int eof; /* true if this is the last block for a file */
-{
- send_bits(s, (STORED_BLOCK<<1)+eof, 3); /* send block type */
- s->compressed_len = (s->compressed_len + 3 + 7) & (ulg)~7L;
- s->compressed_len += (stored_len + 4) << 3;
-
- copy_block(s, buf, (unsigned)stored_len, 1); /* with header */
-}
-
-/* Send just the `stored block' type code without any length bytes or data.
- */
-void _tr_stored_type_only(s)
- deflate_state *s;
-{
- send_bits(s, (STORED_BLOCK << 1), 3);
- bi_windup(s);
- s->compressed_len = (s->compressed_len + 3) & ~7L;
-}
-
-
-/* ===========================================================================
- * Send one empty static block to give enough lookahead for inflate.
- * This takes 10 bits, of which 7 may remain in the bit buffer.
- * The current inflate code requires 9 bits of lookahead. If the
- * last two codes for the previous block (real code plus EOB) were coded
- * on 5 bits or less, inflate may have only 5+3 bits of lookahead to decode
- * the last real code. In this case we send two empty static blocks instead
- * of one. (There are no problems if the previous block is stored or fixed.)
- * To simplify the code, we assume the worst case of last real code encoded
- * on one bit only.
- */
-void _tr_align(s)
- deflate_state *s;
-{
- send_bits(s, STATIC_TREES<<1, 3);
- send_code(s, END_BLOCK, static_ltree);
- s->compressed_len += 10L; /* 3 for block type, 7 for EOB */
- bi_flush(s);
- /* Of the 10 bits for the empty block, we have already sent
- * (10 - bi_valid) bits. The lookahead for the last real code (before
- * the EOB of the previous block) was thus at least one plus the length
- * of the EOB plus what we have just sent of the empty static block.
- */
- if (1 + s->last_eob_len + 10 - s->bi_valid < 9) {
- send_bits(s, STATIC_TREES<<1, 3);
- send_code(s, END_BLOCK, static_ltree);
- s->compressed_len += 10L;
- bi_flush(s);
- }
- s->last_eob_len = 7;
-}
-
-/* ===========================================================================
- * Determine the best encoding for the current block: dynamic trees, static
- * trees or store, and output the encoded block to the zip file. This function
- * returns the total compressed length for the file so far.
- */
-ulg _tr_flush_block(s, buf, stored_len, eof)
- deflate_state *s;
- charf *buf; /* input block, or NULL if too old */
- ulg stored_len; /* length of input block */
- int eof; /* true if this is the last block for a file */
-{
- ulg opt_lenb, static_lenb; /* opt_len and static_len in bytes */
- int max_blindex = 0; /* index of last bit length code of non zero freq */
-
- /* Build the Huffman trees unless a stored block is forced */
- if (s->level > 0) {
-
- /* Check if the file is ascii or binary */
- if (s->data_type == Z_UNKNOWN) set_data_type(s);
-
- /* Construct the literal and distance trees */
- build_tree(s, (tree_desc *)(&(s->l_desc)));
- Tracev((stderr, "\nlit data: dyn %ld, stat %ld", s->opt_len,
- s->static_len));
-
- build_tree(s, (tree_desc *)(&(s->d_desc)));
- Tracev((stderr, "\ndist data: dyn %ld, stat %ld", s->opt_len,
- s->static_len));
- /* At this point, opt_len and static_len are the total bit lengths of
- * the compressed block data, excluding the tree representations.
- */
-
- /* Build the bit length tree for the above two trees, and get the index
- * in bl_order of the last bit length code to send.
- */
- max_blindex = build_bl_tree(s);
-
- /* Determine the best encoding. Compute first the block length in bytes*/
- opt_lenb = (s->opt_len+3+7)>>3;
- static_lenb = (s->static_len+3+7)>>3;
-
- Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ",
- opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len,
- s->last_lit));
-
- if (static_lenb <= opt_lenb) opt_lenb = static_lenb;
-
- } else {
- Assert(buf != (char*)0, "lost buf");
- opt_lenb = static_lenb = stored_len + 5; /* force a stored block */
- }
-
- /* If compression failed and this is the first and last block,
- * and if the .zip file can be seeked (to rewrite the local header),
- * the whole file is transformed into a stored file:
- */
-#ifdef STORED_FILE_OK
-# ifdef FORCE_STORED_FILE
- if (eof && s->compressed_len == 0L) { /* force stored file */
-# else
- if (stored_len <= opt_lenb && eof && s->compressed_len==0L && seekable()) {
-# endif
- /* Since LIT_BUFSIZE <= 2*WSIZE, the input data must be there: */
- if (buf == (charf*)0) error ("block vanished");
-
- copy_block(s, buf, (unsigned)stored_len, 0); /* without header */
- s->compressed_len = stored_len << 3;
- s->method = STORED;
- } else
-#endif /* STORED_FILE_OK */
-
-#ifdef FORCE_STORED
- if (buf != (char*)0) { /* force stored block */
-#else
- if (stored_len+4 <= opt_lenb && buf != (char*)0) {
- /* 4: two words for the lengths */
-#endif
- /* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE.
- * Otherwise we can't have processed more than WSIZE input bytes since
- * the last block flush, because compression would have been
- * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to
- * transform a block into a stored block.
- */
- _tr_stored_block(s, buf, stored_len, eof);
-
-#ifdef FORCE_STATIC
- } else if (static_lenb >= 0) { /* force static trees */
-#else
- } else if (static_lenb == opt_lenb) {
-#endif
- send_bits(s, (STATIC_TREES<<1)+eof, 3);
- compress_block(s, (ct_data *)static_ltree, (ct_data *)static_dtree);
- s->compressed_len += 3 + s->static_len;
- } else {
- send_bits(s, (DYN_TREES<<1)+eof, 3);
- send_all_trees(s, s->l_desc.max_code+1, s->d_desc.max_code+1,
- max_blindex+1);
- compress_block(s, (ct_data *)s->dyn_ltree, (ct_data *)s->dyn_dtree);
- s->compressed_len += 3 + s->opt_len;
- }
- Assert (s->compressed_len == s->bits_sent, "bad compressed size");
- init_block(s);
-
- if (eof) {
- bi_windup(s);
- s->compressed_len += 7; /* align on byte boundary */
- }
- Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3,
- s->compressed_len-7*eof));
-
- return s->compressed_len >> 3;
-}
-
-/* ===========================================================================
- * Save the match info and tally the frequency counts. Return true if
- * the current block must be flushed.
- */
-int _tr_tally (s, dist, lc)
- deflate_state *s;
- unsigned dist; /* distance of matched string */
- unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */
-{
- s->d_buf[s->last_lit] = (ush)dist;
- s->l_buf[s->last_lit++] = (uch)lc;
- if (dist == 0) {
- /* lc is the unmatched char */
- s->dyn_ltree[lc].Freq++;
- } else {
- s->matches++;
- /* Here, lc is the match length - MIN_MATCH */
- dist--; /* dist = match distance - 1 */
- Assert((ush)dist < (ush)MAX_DIST(s) &&
- (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) &&
- (ush)d_code(dist) < (ush)D_CODES, "_tr_tally: bad match");
-
- s->dyn_ltree[length_code[lc]+LITERALS+1].Freq++;
- s->dyn_dtree[d_code(dist)].Freq++;
- }
-
- /* Try to guess if it is profitable to stop the current block here */
- if (s->level > 2 && (s->last_lit & 0xfff) == 0) {
- /* Compute an upper bound for the compressed length */
- ulg out_length = (ulg)s->last_lit*8L;
- ulg in_length = (ulg)((long)s->strstart - s->block_start);
- int dcode;
- for (dcode = 0; dcode < D_CODES; dcode++) {
- out_length += (ulg)s->dyn_dtree[dcode].Freq *
- (5L+extra_dbits[dcode]);
- }
- out_length >>= 3;
- Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ",
- s->last_lit, in_length, out_length,
- 100L - out_length*100L/in_length));
- if (s->matches < s->last_lit/2 && out_length < in_length/2) return 1;
- }
- return (s->last_lit == s->lit_bufsize-1);
- /* We avoid equality with lit_bufsize because of wraparound at 64K
- * on 16 bit machines and because stored blocks are restricted to
- * 64K-1 bytes.
- */
-}
-
-/* ===========================================================================
- * Send the block data compressed using the given Huffman trees
- */
-local void compress_block(s, ltree, dtree)
- deflate_state *s;
- ct_data *ltree; /* literal tree */
- ct_data *dtree; /* distance tree */
-{
- unsigned dist; /* distance of matched string */
- int lc; /* match length or unmatched char (if dist == 0) */
- unsigned lx = 0; /* running index in l_buf */
- unsigned code; /* the code to send */
- int extra; /* number of extra bits to send */
-
- if (s->last_lit != 0) do {
- dist = s->d_buf[lx];
- lc = s->l_buf[lx++];
- if (dist == 0) {
- send_code(s, lc, ltree); /* send a literal byte */
- Tracecv(isgraph(lc), (stderr," '%c' ", lc));
- } else {
- /* Here, lc is the match length - MIN_MATCH */
- code = length_code[lc];
- send_code(s, code+LITERALS+1, ltree); /* send the length code */
- extra = extra_lbits[code];
- if (extra != 0) {
- lc -= base_length[code];
- send_bits(s, lc, extra); /* send the extra length bits */
- }
- dist--; /* dist is now the match distance - 1 */
- code = d_code(dist);
- Assert (code < D_CODES, "bad d_code");
-
- send_code(s, code, dtree); /* send the distance code */
- extra = extra_dbits[code];
- if (extra != 0) {
- dist -= base_dist[code];
- send_bits(s, dist, extra); /* send the extra distance bits */
- }
- } /* literal or match pair ? */
-
- /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */
- Assert(s->pending < s->lit_bufsize + 2*lx, "pendingBuf overflow");
-
- } while (lx < s->last_lit);
-
- send_code(s, END_BLOCK, ltree);
- s->last_eob_len = ltree[END_BLOCK].Len;
-}
-
-/* ===========================================================================
- * Set the data type to ASCII or BINARY, using a crude approximation:
- * binary if more than 20% of the bytes are <= 6 or >= 128, ascii otherwise.
- * IN assertion: the fields freq of dyn_ltree are set and the total of all
- * frequencies does not exceed 64K (to fit in an int on 16 bit machines).
- */
-local void set_data_type(s)
- deflate_state *s;
-{
- int n = 0;
- unsigned ascii_freq = 0;
- unsigned bin_freq = 0;
- while (n < 7) bin_freq += s->dyn_ltree[n++].Freq;
- while (n < 128) ascii_freq += s->dyn_ltree[n++].Freq;
- while (n < LITERALS) bin_freq += s->dyn_ltree[n++].Freq;
- s->data_type = (Byte)(bin_freq > (ascii_freq >> 2) ? Z_BINARY : Z_ASCII);
-}
-
-/* ===========================================================================
- * Reverse the first len bits of a code, using straightforward code (a faster
- * method would use a table)
- * IN assertion: 1 <= len <= 15
- */
-local unsigned bi_reverse(code, len)
- unsigned code; /* the value to invert */
- int len; /* its bit length */
-{
- register unsigned res = 0;
- do {
- res |= code & 1;
- code >>= 1, res <<= 1;
- } while (--len > 0);
- return res >> 1;
-}
-
-/* ===========================================================================
- * Flush the bit buffer, keeping at most 7 bits in it.
- */
-local void bi_flush(s)
- deflate_state *s;
-{
- if (s->bi_valid == 16) {
- put_short(s, s->bi_buf);
- s->bi_buf = 0;
- s->bi_valid = 0;
- } else if (s->bi_valid >= 8) {
- put_byte(s, (Byte)s->bi_buf);
- s->bi_buf >>= 8;
- s->bi_valid -= 8;
- }
-}
-
-/* ===========================================================================
- * Flush the bit buffer and align the output on a byte boundary
- */
-local void bi_windup(s)
- deflate_state *s;
-{
- if (s->bi_valid > 8) {
- put_short(s, s->bi_buf);
- } else if (s->bi_valid > 0) {
- put_byte(s, (Byte)s->bi_buf);
- }
- s->bi_buf = 0;
- s->bi_valid = 0;
-#ifdef DEBUG_ZLIB
- s->bits_sent = (s->bits_sent+7) & ~7;
-#endif
-}
-
-/* ===========================================================================
- * Copy a stored block, storing first the length and its
- * one's complement if requested.
- */
-local void copy_block(s, buf, len, header)
- deflate_state *s;
- charf *buf; /* the input data */
- unsigned len; /* its length */
- int header; /* true if block header must be written */
-{
- bi_windup(s); /* align on byte boundary */
- s->last_eob_len = 8; /* enough lookahead for inflate */
-
- if (header) {
- put_short(s, (ush)len);
- put_short(s, (ush)~len);
-#ifdef DEBUG_ZLIB
- s->bits_sent += 2*16;
-#endif
- }
-#ifdef DEBUG_ZLIB
- s->bits_sent += (ulg)len<<3;
-#endif
- /* bundle up the put_byte(s, *buf++) calls */
- zmemcpy(&s->pending_buf[s->pending], buf, len);
- s->pending += len;
-}
-/* --- trees.c */
-
-/* +++ inflate.c */
-/* inflate.c -- zlib interface to inflate modules
- * Copyright (C) 1995-1996 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* #include "zutil.h" */
-
-/* +++ infblock.h */
-/* infblock.h -- header to use infblock.c
- * Copyright (C) 1995-1996 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* WARNING: this file should *not* be used by applications. It is
- part of the implementation of the compression library and is
- subject to change. Applications should only use zlib.h.
- */
-
-struct inflate_blocks_state;
-typedef struct inflate_blocks_state FAR inflate_blocks_statef;
-
-extern inflate_blocks_statef * inflate_blocks_new OF((
- z_streamp z,
- check_func c, /* check function */
- uInt w)); /* window size */
-
-extern int inflate_blocks OF((
- inflate_blocks_statef *,
- z_streamp ,
- int)); /* initial return code */
-
-extern void inflate_blocks_reset OF((
- inflate_blocks_statef *,
- z_streamp ,
- uLongf *)); /* check value on output */
-
-extern int inflate_blocks_free OF((
- inflate_blocks_statef *,
- z_streamp ,
- uLongf *)); /* check value on output */
-
-extern void inflate_set_dictionary OF((
- inflate_blocks_statef *s,
- const Bytef *d, /* dictionary */
- uInt n)); /* dictionary length */
-
-extern int inflate_addhistory OF((
- inflate_blocks_statef *,
- z_streamp));
-
-extern int inflate_packet_flush OF((
- inflate_blocks_statef *));
-/* --- infblock.h */
-
-#ifndef NO_DUMMY_DECL
-struct inflate_blocks_state {int dummy;}; /* for buggy compilers */
-#endif
-
-/* inflate private state */
-struct internal_state {
-
- /* mode */
- enum {
- METHOD, /* waiting for method byte */
- FLAG, /* waiting for flag byte */
- DICT4, /* four dictionary check bytes to go */
- DICT3, /* three dictionary check bytes to go */
- DICT2, /* two dictionary check bytes to go */
- DICT1, /* one dictionary check byte to go */
- DICT0, /* waiting for inflateSetDictionary */
- BLOCKS, /* decompressing blocks */
- CHECK4, /* four check bytes to go */
- CHECK3, /* three check bytes to go */
- CHECK2, /* two check bytes to go */
- CHECK1, /* one check byte to go */
- DONE, /* finished check, done */
- BAD} /* got an error--stay here */
- mode; /* current inflate mode */
-
- /* mode dependent information */
- union {
- uInt method; /* if FLAGS, method byte */
- struct {
- uLong was; /* computed check value */
- uLong need; /* stream check value */
- } check; /* if CHECK, check values to compare */
- uInt marker; /* if BAD, inflateSync's marker bytes count */
- } sub; /* submode */
-
- /* mode independent information */
- int nowrap; /* flag for no wrapper */
- uInt wbits; /* log2(window size) (8..15, defaults to 15) */
- inflate_blocks_statef
- *blocks; /* current inflate_blocks state */
-
-};
-
-
-int inflateReset(z)
-z_streamp z;
-{
- uLong c;
-
- if (z == Z_NULL || z->state == Z_NULL)
- return Z_STREAM_ERROR;
- z->total_in = z->total_out = 0;
- z->msg = Z_NULL;
- z->state->mode = z->state->nowrap ? BLOCKS : METHOD;
- inflate_blocks_reset(z->state->blocks, z, &c);
- Trace((stderr, "inflate: reset\n"));
- return Z_OK;
-}
-
-
-int inflateEnd(z)
-z_streamp z;
-{
- uLong c;
-
- if (z == Z_NULL || z->state == Z_NULL || z->zfree == Z_NULL)
- return Z_STREAM_ERROR;
- if (z->state->blocks != Z_NULL)
- inflate_blocks_free(z->state->blocks, z, &c);
- ZFREE(z, z->state);
- z->state = Z_NULL;
- Trace((stderr, "inflate: end\n"));
- return Z_OK;
-}
-
-
-int inflateInit2_(z, w, version, stream_size)
-z_streamp z;
-int w;
-const char *version;
-int stream_size;
-{
- if (version == Z_NULL || version[0] != ZLIB_VERSION[0] ||
- stream_size != sizeof(z_stream))
- return Z_VERSION_ERROR;
-
- /* initialize state */
- if (z == Z_NULL)
- return Z_STREAM_ERROR;
- z->msg = Z_NULL;
-#ifndef NO_ZCFUNCS
- if (z->zalloc == Z_NULL)
- {
- z->zalloc = zcalloc;
- z->opaque = (voidpf)0;
- }
- if (z->zfree == Z_NULL) z->zfree = zcfree;
-#endif
- if ((z->state = (struct internal_state FAR *)
- ZALLOC(z,1,sizeof(struct internal_state))) == Z_NULL)
- return Z_MEM_ERROR;
- z->state->blocks = Z_NULL;
-
- /* handle undocumented nowrap option (no zlib header or check) */
- z->state->nowrap = 0;
- if (w < 0)
- {
- w = - w;
- z->state->nowrap = 1;
- }
-
- /* set window size */
- if (w < 8 || w > 15)
- {
- inflateEnd(z);
- return Z_STREAM_ERROR;
- }
- z->state->wbits = (uInt)w;
-
- /* create inflate_blocks state */
- if ((z->state->blocks =
- inflate_blocks_new(z, z->state->nowrap ? Z_NULL : adler32, (uInt)1 << w))
- == Z_NULL)
- {
- inflateEnd(z);
- return Z_MEM_ERROR;
- }
- Trace((stderr, "inflate: allocated\n"));
-
- /* reset state */
- inflateReset(z);
- return Z_OK;
-}
-
-
-int inflateInit_(z, version, stream_size)
-z_streamp z;
-const char *version;
-int stream_size;
-{
- return inflateInit2_(z, DEF_WBITS, version, stream_size);
-}
-
-
-#define NEEDBYTE {if(z->avail_in==0)goto empty;r=Z_OK;}
-#define NEXTBYTE (z->avail_in--,z->total_in++,*z->next_in++)
-
-int inflate(z, f)
-z_streamp z;
-int f;
-{
- int r;
- uInt b;
-
- if (z == Z_NULL || z->state == Z_NULL || z->next_in == Z_NULL || f < 0)
- return Z_STREAM_ERROR;
- r = Z_BUF_ERROR;
- while (1) switch (z->state->mode)
- {
- case METHOD:
- NEEDBYTE
- if (((z->state->sub.method = NEXTBYTE) & 0xf) != Z_DEFLATED)
- {
- z->state->mode = BAD;
- z->msg = (char*)"unknown compression method";
- z->state->sub.marker = 5; /* can't try inflateSync */
- break;
- }
- if ((z->state->sub.method >> 4) + 8 > z->state->wbits)
- {
- z->state->mode = BAD;
- z->msg = (char*)"invalid window size";
- z->state->sub.marker = 5; /* can't try inflateSync */
- break;
- }
- z->state->mode = FLAG;
- case FLAG:
- NEEDBYTE
- b = NEXTBYTE;
- if (((z->state->sub.method << 8) + b) % 31)
- {
- z->state->mode = BAD;
- z->msg = (char*)"incorrect header check";
- z->state->sub.marker = 5; /* can't try inflateSync */
- break;
- }
- Trace((stderr, "inflate: zlib header ok\n"));
- if (!(b & PRESET_DICT))
- {
- z->state->mode = BLOCKS;
- break;
- }
- z->state->mode = DICT4;
- case DICT4:
- NEEDBYTE
- z->state->sub.check.need = (uLong)NEXTBYTE << 24;
- z->state->mode = DICT3;
- case DICT3:
- NEEDBYTE
- z->state->sub.check.need += (uLong)NEXTBYTE << 16;
- z->state->mode = DICT2;
- case DICT2:
- NEEDBYTE
- z->state->sub.check.need += (uLong)NEXTBYTE << 8;
- z->state->mode = DICT1;
- case DICT1:
- NEEDBYTE
- z->state->sub.check.need += (uLong)NEXTBYTE;
- z->adler = z->state->sub.check.need;
- z->state->mode = DICT0;
- return Z_NEED_DICT;
- case DICT0:
- z->state->mode = BAD;
- z->msg = (char*)"need dictionary";
- z->state->sub.marker = 0; /* can try inflateSync */
- return Z_STREAM_ERROR;
- case BLOCKS:
- r = inflate_blocks(z->state->blocks, z, r);
- if (f == Z_PACKET_FLUSH && z->avail_in == 0 && z->avail_out != 0)
- r = inflate_packet_flush(z->state->blocks);
- if (r == Z_DATA_ERROR)
- {
- z->state->mode = BAD;
- z->state->sub.marker = 0; /* can try inflateSync */
- break;
- }
- if (r != Z_STREAM_END)
- return r;
- r = Z_OK;
- inflate_blocks_reset(z->state->blocks, z, &z->state->sub.check.was);
- if (z->state->nowrap)
- {
- z->state->mode = DONE;
- break;
- }
- z->state->mode = CHECK4;
- case CHECK4:
- NEEDBYTE
- z->state->sub.check.need = (uLong)NEXTBYTE << 24;
- z->state->mode = CHECK3;
- case CHECK3:
- NEEDBYTE
- z->state->sub.check.need += (uLong)NEXTBYTE << 16;
- z->state->mode = CHECK2;
- case CHECK2:
- NEEDBYTE
- z->state->sub.check.need += (uLong)NEXTBYTE << 8;
- z->state->mode = CHECK1;
- case CHECK1:
- NEEDBYTE
- z->state->sub.check.need += (uLong)NEXTBYTE;
-
- if (z->state->sub.check.was != z->state->sub.check.need)
- {
- z->state->mode = BAD;
- z->msg = (char*)"incorrect data check";
- z->state->sub.marker = 5; /* can't try inflateSync */
- break;
- }
- Trace((stderr, "inflate: zlib check ok\n"));
- z->state->mode = DONE;
- case DONE:
- return Z_STREAM_END;
- case BAD:
- return Z_DATA_ERROR;
- default:
- return Z_STREAM_ERROR;
- }
-
- empty:
- if (f != Z_PACKET_FLUSH)
- return r;
- z->state->mode = BAD;
- z->msg = (char *)"need more for packet flush";
- z->state->sub.marker = 0; /* can try inflateSync */
- return Z_DATA_ERROR;
-}
-
-
-int inflateSetDictionary(z, dictionary, dictLength)
-z_streamp z;
-const Bytef *dictionary;
-uInt dictLength;
-{
- uInt length = dictLength;
-
- if (z == Z_NULL || z->state == Z_NULL || z->state->mode != DICT0)
- return Z_STREAM_ERROR;
-
- if (adler32(1L, dictionary, dictLength) != z->adler) return Z_DATA_ERROR;
- z->adler = 1L;
-
- if (length >= ((uInt)1<<z->state->wbits))
- {
- length = (1<<z->state->wbits)-1;
- dictionary += dictLength - length;
- }
- inflate_set_dictionary(z->state->blocks, dictionary, length);
- z->state->mode = BLOCKS;
- return Z_OK;
-}
-
-/*
- * This subroutine adds the data at next_in/avail_in to the output history
- * without performing any output. The output buffer must be "caught up";
- * i.e. no pending output (hence s->read equals s->write), and the state must
- * be BLOCKS (i.e. we should be willing to see the start of a series of
- * BLOCKS). On exit, the output will also be caught up, and the checksum
- * will have been updated if need be.
- */
-
-int inflateIncomp(z)
-z_stream *z;
-{
- if (z->state->mode != BLOCKS)
- return Z_DATA_ERROR;
- return inflate_addhistory(z->state->blocks, z);
-}
-
-
-int inflateSync(z)
-z_streamp z;
-{
- uInt n; /* number of bytes to look at */
- Bytef *p; /* pointer to bytes */
- uInt m; /* number of marker bytes found in a row */
- uLong r, w; /* temporaries to save total_in and total_out */
-
- /* set up */
- if (z == Z_NULL || z->state == Z_NULL)
- return Z_STREAM_ERROR;
- if (z->state->mode != BAD)
- {
- z->state->mode = BAD;
- z->state->sub.marker = 0;
- }
- if ((n = z->avail_in) == 0)
- return Z_BUF_ERROR;
- p = z->next_in;
- m = z->state->sub.marker;
-
- /* search */
- while (n && m < 4)
- {
- if (*p == (Byte)(m < 2 ? 0 : 0xff))
- m++;
- else if (*p)
- m = 0;
- else
- m = 4 - m;
- p++, n--;
- }
-
- /* restore */
- z->total_in += p - z->next_in;
- z->next_in = p;
- z->avail_in = n;
- z->state->sub.marker = m;
-
- /* return no joy or set up to restart on a new block */
- if (m != 4)
- return Z_DATA_ERROR;
- r = z->total_in; w = z->total_out;
- inflateReset(z);
- z->total_in = r; z->total_out = w;
- z->state->mode = BLOCKS;
- return Z_OK;
-}
-
-#undef NEEDBYTE
-#undef NEXTBYTE
-/* --- inflate.c */
-
-/* +++ infblock.c */
-/* infblock.c -- interpret and process block types to last block
- * Copyright (C) 1995-1996 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* #include "zutil.h" */
-/* #include "infblock.h" */
-
-/* +++ inftrees.h */
-/* inftrees.h -- header to use inftrees.c
- * Copyright (C) 1995-1996 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* WARNING: this file should *not* be used by applications. It is
- part of the implementation of the compression library and is
- subject to change. Applications should only use zlib.h.
- */
-
-/* Huffman code lookup table entry--this entry is four bytes for machines
- that have 16-bit pointers (e.g. PC's in the small or medium model). */
-
-typedef struct inflate_huft_s FAR inflate_huft;
-
-struct inflate_huft_s {
- union {
- struct {
- Byte Exop; /* number of extra bits or operation */
- Byte Bits; /* number of bits in this code or subcode */
- } what;
- Bytef *pad; /* pad structure to a power of 2 (4 bytes for */
- } word; /* 16-bit, 8 bytes for 32-bit machines) */
- union {
- uInt Base; /* literal, length base, or distance base */
- inflate_huft *Next; /* pointer to next level of table */
- } more;
-};
-
-#ifdef DEBUG_ZLIB
- extern uInt inflate_hufts;
-#endif
-
-extern int inflate_trees_bits OF((
- uIntf *, /* 19 code lengths */
- uIntf *, /* bits tree desired/actual depth */
- inflate_huft * FAR *, /* bits tree result */
- z_streamp )); /* for zalloc, zfree functions */
-
-extern int inflate_trees_dynamic OF((
- uInt, /* number of literal/length codes */
- uInt, /* number of distance codes */
- uIntf *, /* that many (total) code lengths */
- uIntf *, /* literal desired/actual bit depth */
- uIntf *, /* distance desired/actual bit depth */
- inflate_huft * FAR *, /* literal/length tree result */
- inflate_huft * FAR *, /* distance tree result */
- z_streamp )); /* for zalloc, zfree functions */
-
-extern int inflate_trees_fixed OF((
- uIntf *, /* literal desired/actual bit depth */
- uIntf *, /* distance desired/actual bit depth */
- inflate_huft * FAR *, /* literal/length tree result */
- inflate_huft * FAR *)); /* distance tree result */
-
-extern int inflate_trees_free OF((
- inflate_huft *, /* tables to free */
- z_streamp )); /* for zfree function */
-
-/* --- inftrees.h */
-
-/* +++ infcodes.h */
-/* infcodes.h -- header to use infcodes.c
- * Copyright (C) 1995-1996 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* WARNING: this file should *not* be used by applications. It is
- part of the implementation of the compression library and is
- subject to change. Applications should only use zlib.h.
- */
-
-struct inflate_codes_state;
-typedef struct inflate_codes_state FAR inflate_codes_statef;
-
-extern inflate_codes_statef *inflate_codes_new OF((
- uInt, uInt,
- inflate_huft *, inflate_huft *,
- z_streamp ));
-
-extern int inflate_codes OF((
- inflate_blocks_statef *,
- z_streamp ,
- int));
-
-extern void inflate_codes_free OF((
- inflate_codes_statef *,
- z_streamp ));
-
-/* --- infcodes.h */
-
-/* +++ infutil.h */
-/* infutil.h -- types and macros common to blocks and codes
- * Copyright (C) 1995-1996 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* WARNING: this file should *not* be used by applications. It is
- part of the implementation of the compression library and is
- subject to change. Applications should only use zlib.h.
- */
-
-#ifndef _INFUTIL_H
-#define _INFUTIL_H
-
-typedef enum {
- TYPE, /* get type bits (3, including end bit) */
- LENS, /* get lengths for stored */
- STORED, /* processing stored block */
- TABLE, /* get table lengths */
- BTREE, /* get bit lengths tree for a dynamic block */
- DTREE, /* get length, distance trees for a dynamic block */
- CODES, /* processing fixed or dynamic block */
- DRY, /* output remaining window bytes */
- DONEB, /* finished last block, done */
- BADB} /* got a data error--stuck here */
-inflate_block_mode;
-
-/* inflate blocks semi-private state */
-struct inflate_blocks_state {
-
- /* mode */
- inflate_block_mode mode; /* current inflate_block mode */
-
- /* mode dependent information */
- union {
- uInt left; /* if STORED, bytes left to copy */
- struct {
- uInt table; /* table lengths (14 bits) */
- uInt index; /* index into blens (or border) */
- uIntf *blens; /* bit lengths of codes */
- uInt bb; /* bit length tree depth */
- inflate_huft *tb; /* bit length decoding tree */
- } trees; /* if DTREE, decoding info for trees */
- struct {
- inflate_huft *tl;
- inflate_huft *td; /* trees to free */
- inflate_codes_statef
- *codes;
- } decode; /* if CODES, current state */
- } sub; /* submode */
- uInt last; /* true if this block is the last block */
-
- /* mode independent information */
- uInt bitk; /* bits in bit buffer */
- uLong bitb; /* bit buffer */
- Bytef *window; /* sliding window */
- Bytef *end; /* one byte after sliding window */
- Bytef *read; /* window read pointer */
- Bytef *write; /* window write pointer */
- check_func checkfn; /* check function */
- uLong check; /* check on output */
-
-};
-
-
-/* defines for inflate input/output */
-/* update pointers and return */
-#define UPDBITS {s->bitb=b;s->bitk=k;}
-#define UPDIN {z->avail_in=n;z->total_in+=p-z->next_in;z->next_in=p;}
-#define UPDOUT {s->write=q;}
-#define UPDATE {UPDBITS UPDIN UPDOUT}
-#define LEAVE {UPDATE return inflate_flush(s,z,r);}
-/* get bytes and bits */
-#define LOADIN {p=z->next_in;n=z->avail_in;b=s->bitb;k=s->bitk;}
-#define NEEDBYTE {if(n)r=Z_OK;else LEAVE}
-#define NEXTBYTE (n--,*p++)
-#define NEEDBITS(j) {while(k<(j)){NEEDBYTE;b|=((uLong)NEXTBYTE)<<k;k+=8;}}
-#define DUMPBITS(j) {b>>=(j);k-=(j);}
-/* output bytes */
-#define WAVAIL (uInt)(q<s->read?s->read-q-1:s->end-q)
-#define LOADOUT {q=s->write;m=(uInt)WAVAIL;}
-#define WWRAP {if(q==s->end&&s->read!=s->window){q=s->window;m=(uInt)WAVAIL;}}
-#define FLUSH {UPDOUT r=inflate_flush(s,z,r); LOADOUT}
-#define NEEDOUT {if(m==0){WWRAP if(m==0){FLUSH WWRAP if(m==0) LEAVE}}r=Z_OK;}
-#define OUTBYTE(a) {*q++=(Byte)(a);m--;}
-/* load local pointers */
-#define LOAD {LOADIN LOADOUT}
-
-/* masks for lower bits (size given to avoid silly warnings with Visual C++) */
-extern uInt inflate_mask[17];
-
-/* copy as much as possible from the sliding window to the output area */
-extern int inflate_flush OF((
- inflate_blocks_statef *,
- z_streamp ,
- int));
-
-#ifndef NO_DUMMY_DECL
-struct internal_state {int dummy;}; /* for buggy compilers */
-#endif
-
-#endif
-/* --- infutil.h */
-
-#ifndef NO_DUMMY_DECL
-struct inflate_codes_state {int dummy;}; /* for buggy compilers */
-#endif
-
-/* Table for deflate from PKZIP's appnote.txt. */
-local const uInt border[] = { /* Order of the bit length code lengths */
- 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
-
-/*
- Notes beyond the 1.93a appnote.txt:
-
- 1. Distance pointers never point before the beginning of the output
- stream.
- 2. Distance pointers can point back across blocks, up to 32k away.
- 3. There is an implied maximum of 7 bits for the bit length table and
- 15 bits for the actual data.
- 4. If only one code exists, then it is encoded using one bit. (Zero
- would be more efficient, but perhaps a little confusing.) If two
- codes exist, they are coded using one bit each (0 and 1).
- 5. There is no way of sending zero distance codes--a dummy must be
- sent if there are none. (History: a pre 2.0 version of PKZIP would
- store blocks with no distance codes, but this was discovered to be
- too harsh a criterion.) Valid only for 1.93a. 2.04c does allow
- zero distance codes, which is sent as one code of zero bits in
- length.
- 6. There are up to 286 literal/length codes. Code 256 represents the
- end-of-block. Note however that the static length tree defines
- 288 codes just to fill out the Huffman codes. Codes 286 and 287
- cannot be used though, since there is no length base or extra bits
- defined for them. Similarily, there are up to 30 distance codes.
- However, static trees define 32 codes (all 5 bits) to fill out the
- Huffman codes, but the last two had better not show up in the data.
- 7. Unzip can check dynamic Huffman blocks for complete code sets.
- The exception is that a single code would not be complete (see #4).
- 8. The five bits following the block type is really the number of
- literal codes sent minus 257.
- 9. Length codes 8,16,16 are interpreted as 13 length codes of 8 bits
- (1+6+6). Therefore, to output three times the length, you output
- three codes (1+1+1), whereas to output four times the same length,
- you only need two codes (1+3). Hmm.
- 10. In the tree reconstruction algorithm, Code = Code + Increment
- only if BitLength(i) is not zero. (Pretty obvious.)
- 11. Correction: 4 Bits: # of Bit Length codes - 4 (4 - 19)
- 12. Note: length code 284 can represent 227-258, but length code 285
- really is 258. The last length deserves its own, short code
- since it gets used a lot in very redundant files. The length
- 258 is special since 258 - 3 (the min match length) is 255.
- 13. The literal/length and distance code bit lengths are read as a
- single stream of lengths. It is possible (and advantageous) for
- a repeat code (16, 17, or 18) to go across the boundary between
- the two sets of lengths.
- */
-
-
-void inflate_blocks_reset(s, z, c)
-inflate_blocks_statef *s;
-z_streamp z;
-uLongf *c;
-{
- if (s->checkfn != Z_NULL)
- *c = s->check;
- if (s->mode == BTREE || s->mode == DTREE)
- ZFREE(z, s->sub.trees.blens);
- if (s->mode == CODES)
- {
- inflate_codes_free(s->sub.decode.codes, z);
- inflate_trees_free(s->sub.decode.td, z);
- inflate_trees_free(s->sub.decode.tl, z);
- }
- s->mode = TYPE;
- s->bitk = 0;
- s->bitb = 0;
- s->read = s->write = s->window;
- if (s->checkfn != Z_NULL)
- z->adler = s->check = (*s->checkfn)(0L, Z_NULL, 0);
- Trace((stderr, "inflate: blocks reset\n"));
-}
-
-
-inflate_blocks_statef *inflate_blocks_new(z, c, w)
-z_streamp z;
-check_func c;
-uInt w;
-{
- inflate_blocks_statef *s;
-
- if ((s = (inflate_blocks_statef *)ZALLOC
- (z,1,sizeof(struct inflate_blocks_state))) == Z_NULL)
- return s;
- if ((s->window = (Bytef *)ZALLOC(z, 1, w)) == Z_NULL)
- {
- ZFREE(z, s);
- return Z_NULL;
- }
- s->end = s->window + w;
- s->checkfn = c;
- s->mode = TYPE;
- Trace((stderr, "inflate: blocks allocated\n"));
- inflate_blocks_reset(s, z, &s->check);
- return s;
-}
-
-
-#ifdef DEBUG_ZLIB
- extern uInt inflate_hufts;
-#endif
-int inflate_blocks(s, z, r)
-inflate_blocks_statef *s;
-z_streamp z;
-int r;
-{
- uInt t; /* temporary storage */
- uLong b; /* bit buffer */
- uInt k; /* bits in bit buffer */
- Bytef *p; /* input data pointer */
- uInt n; /* bytes available there */
- Bytef *q; /* output window write pointer */
- uInt m; /* bytes to end of window or read pointer */
-
- /* copy input/output information to locals (UPDATE macro restores) */
- LOAD
-
- /* process input based on current state */
- while (1) switch (s->mode)
- {
- case TYPE:
- NEEDBITS(3)
- t = (uInt)b & 7;
- s->last = t & 1;
- switch (t >> 1)
- {
- case 0: /* stored */
- Trace((stderr, "inflate: stored block%s\n",
- s->last ? " (last)" : ""));
- DUMPBITS(3)
- t = k & 7; /* go to byte boundary */
- DUMPBITS(t)
- s->mode = LENS; /* get length of stored block */
- break;
- case 1: /* fixed */
- Trace((stderr, "inflate: fixed codes block%s\n",
- s->last ? " (last)" : ""));
- {
- uInt bl, bd;
- inflate_huft *tl, *td;
-
- inflate_trees_fixed(&bl, &bd, &tl, &td);
- s->sub.decode.codes = inflate_codes_new(bl, bd, tl, td, z);
- if (s->sub.decode.codes == Z_NULL)
- {
- r = Z_MEM_ERROR;
- LEAVE
- }
- s->sub.decode.tl = Z_NULL; /* don't try to free these */
- s->sub.decode.td = Z_NULL;
- }
- DUMPBITS(3)
- s->mode = CODES;
- break;
- case 2: /* dynamic */
- Trace((stderr, "inflate: dynamic codes block%s\n",
- s->last ? " (last)" : ""));
- DUMPBITS(3)
- s->mode = TABLE;
- break;
- case 3: /* illegal */
- DUMPBITS(3)
- s->mode = BADB;
- z->msg = (char*)"invalid block type";
- r = Z_DATA_ERROR;
- LEAVE
- }
- break;
- case LENS:
- NEEDBITS(32)
- if ((((~b) >> 16) & 0xffff) != (b & 0xffff))
- {
- s->mode = BADB;
- z->msg = (char*)"invalid stored block lengths";
- r = Z_DATA_ERROR;
- LEAVE
- }
- s->sub.left = (uInt)b & 0xffff;
- b = k = 0; /* dump bits */
- Tracev((stderr, "inflate: stored length %u\n", s->sub.left));
- s->mode = s->sub.left ? STORED : (s->last ? DRY : TYPE);
- break;
- case STORED:
- if (n == 0)
- LEAVE
- NEEDOUT
- t = s->sub.left;
- if (t > n) t = n;
- if (t > m) t = m;
- zmemcpy(q, p, t);
- p += t; n -= t;
- q += t; m -= t;
- if ((s->sub.left -= t) != 0)
- break;
- Tracev((stderr, "inflate: stored end, %lu total out\n",
- z->total_out + (q >= s->read ? q - s->read :
- (s->end - s->read) + (q - s->window))));
- s->mode = s->last ? DRY : TYPE;
- break;
- case TABLE:
- NEEDBITS(14)
- s->sub.trees.table = t = (uInt)b & 0x3fff;
-#ifndef PKZIP_BUG_WORKAROUND
- if ((t & 0x1f) > 29 || ((t >> 5) & 0x1f) > 29)
- {
- s->mode = BADB;
- z->msg = (char*)"too many length or distance symbols";
- r = Z_DATA_ERROR;
- LEAVE
- }
-#endif
- t = 258 + (t & 0x1f) + ((t >> 5) & 0x1f);
- if (t < 19)
- t = 19;
- if ((s->sub.trees.blens = (uIntf*)ZALLOC(z, t, sizeof(uInt))) == Z_NULL)
- {
- r = Z_MEM_ERROR;
- LEAVE
- }
- DUMPBITS(14)
- s->sub.trees.index = 0;
- Tracev((stderr, "inflate: table sizes ok\n"));
- s->mode = BTREE;
- case BTREE:
- while (s->sub.trees.index < 4 + (s->sub.trees.table >> 10))
- {
- NEEDBITS(3)
- s->sub.trees.blens[border[s->sub.trees.index++]] = (uInt)b & 7;
- DUMPBITS(3)
- }
- while (s->sub.trees.index < 19)
- s->sub.trees.blens[border[s->sub.trees.index++]] = 0;
- s->sub.trees.bb = 7;
- t = inflate_trees_bits(s->sub.trees.blens, &s->sub.trees.bb,
- &s->sub.trees.tb, z);
- if (t != Z_OK)
- {
- ZFREE(z, s->sub.trees.blens);
- r = t;
- if (r == Z_DATA_ERROR)
- s->mode = BADB;
- LEAVE
- }
- s->sub.trees.index = 0;
- Tracev((stderr, "inflate: bits tree ok\n"));
- s->mode = DTREE;
- case DTREE:
- while (t = s->sub.trees.table,
- s->sub.trees.index < 258 + (t & 0x1f) + ((t >> 5) & 0x1f))
- {
- inflate_huft *h;
- uInt i, j, c;
-
- t = s->sub.trees.bb;
- NEEDBITS(t)
- h = s->sub.trees.tb + ((uInt)b & inflate_mask[t]);
- t = h->word.what.Bits;
- c = h->more.Base;
- if (c < 16)
- {
- DUMPBITS(t)
- s->sub.trees.blens[s->sub.trees.index++] = c;
- }
- else /* c == 16..18 */
- {
- i = c == 18 ? 7 : c - 14;
- j = c == 18 ? 11 : 3;
- NEEDBITS(t + i)
- DUMPBITS(t)
- j += (uInt)b & inflate_mask[i];
- DUMPBITS(i)
- i = s->sub.trees.index;
- t = s->sub.trees.table;
- if (i + j > 258 + (t & 0x1f) + ((t >> 5) & 0x1f) ||
- (c == 16 && i < 1))
- {
- inflate_trees_free(s->sub.trees.tb, z);
- ZFREE(z, s->sub.trees.blens);
- s->mode = BADB;
- z->msg = (char*)"invalid bit length repeat";
- r = Z_DATA_ERROR;
- LEAVE
- }
- c = c == 16 ? s->sub.trees.blens[i - 1] : 0;
- do {
- s->sub.trees.blens[i++] = c;
- } while (--j);
- s->sub.trees.index = i;
- }
- }
- inflate_trees_free(s->sub.trees.tb, z);
- s->sub.trees.tb = Z_NULL;
- {
- uInt bl, bd;
- inflate_huft *tl, *td;
- inflate_codes_statef *c;
-
- bl = 9; /* must be <= 9 for lookahead assumptions */
- bd = 6; /* must be <= 9 for lookahead assumptions */
- t = s->sub.trees.table;
-#ifdef DEBUG_ZLIB
- inflate_hufts = 0;
-#endif
- t = inflate_trees_dynamic(257 + (t & 0x1f), 1 + ((t >> 5) & 0x1f),
- s->sub.trees.blens, &bl, &bd, &tl, &td, z);
- ZFREE(z, s->sub.trees.blens);
- if (t != Z_OK)
- {
- if (t == (uInt)Z_DATA_ERROR)
- s->mode = BADB;
- r = t;
- LEAVE
- }
- Tracev((stderr, "inflate: trees ok, %d * %d bytes used\n",
- inflate_hufts, sizeof(inflate_huft)));
- if ((c = inflate_codes_new(bl, bd, tl, td, z)) == Z_NULL)
- {
- inflate_trees_free(td, z);
- inflate_trees_free(tl, z);
- r = Z_MEM_ERROR;
- LEAVE
- }
- s->sub.decode.codes = c;
- s->sub.decode.tl = tl;
- s->sub.decode.td = td;
- }
- s->mode = CODES;
- case CODES:
- UPDATE
- if ((r = inflate_codes(s, z, r)) != Z_STREAM_END)
- return inflate_flush(s, z, r);
- r = Z_OK;
- inflate_codes_free(s->sub.decode.codes, z);
- inflate_trees_free(s->sub.decode.td, z);
- inflate_trees_free(s->sub.decode.tl, z);
- LOAD
- Tracev((stderr, "inflate: codes end, %lu total out\n",
- z->total_out + (q >= s->read ? q - s->read :
- (s->end - s->read) + (q - s->window))));
- if (!s->last)
- {
- s->mode = TYPE;
- break;
- }
- if (k > 7) /* return unused byte, if any */
- {
- Assert(k < 16, "inflate_codes grabbed too many bytes")
- k -= 8;
- n++;
- p--; /* can always return one */
- }
- s->mode = DRY;
- case DRY:
- FLUSH
- if (s->read != s->write)
- LEAVE
- s->mode = DONEB;
- case DONEB:
- r = Z_STREAM_END;
- LEAVE
- case BADB:
- r = Z_DATA_ERROR;
- LEAVE
- default:
- r = Z_STREAM_ERROR;
- LEAVE
- }
-}
-
-
-int inflate_blocks_free(s, z, c)
-inflate_blocks_statef *s;
-z_streamp z;
-uLongf *c;
-{
- inflate_blocks_reset(s, z, c);
- ZFREE(z, s->window);
- ZFREE(z, s);
- Trace((stderr, "inflate: blocks freed\n"));
- return Z_OK;
-}
-
-
-void inflate_set_dictionary(s, d, n)
-inflate_blocks_statef *s;
-const Bytef *d;
-uInt n;
-{
- zmemcpy((charf *)s->window, d, n);
- s->read = s->write = s->window + n;
-}
-
-/*
- * This subroutine adds the data at next_in/avail_in to the output history
- * without performing any output. The output buffer must be "caught up";
- * i.e. no pending output (hence s->read equals s->write), and the state must
- * be BLOCKS (i.e. we should be willing to see the start of a series of
- * BLOCKS). On exit, the output will also be caught up, and the checksum
- * will have been updated if need be.
- */
-int inflate_addhistory(s, z)
-inflate_blocks_statef *s;
-z_stream *z;
-{
- uLong b; /* bit buffer */ /* NOT USED HERE */
- uInt k; /* bits in bit buffer */ /* NOT USED HERE */
- uInt t; /* temporary storage */
- Bytef *p; /* input data pointer */
- uInt n; /* bytes available there */
- Bytef *q; /* output window write pointer */
- uInt m; /* bytes to end of window or read pointer */
-
- if (s->read != s->write)
- return Z_STREAM_ERROR;
- if (s->mode != TYPE)
- return Z_DATA_ERROR;
-
- /* we're ready to rock */
- LOAD
- /* while there is input ready, copy to output buffer, moving
- * pointers as needed.
- */
- while (n) {
- t = n; /* how many to do */
- /* is there room until end of buffer? */
- if (t > m) t = m;
- /* update check information */
- if (s->checkfn != Z_NULL)
- s->check = (*s->checkfn)(s->check, q, t);
- zmemcpy(q, p, t);
- q += t;
- p += t;
- n -= t;
- z->total_out += t;
- s->read = q; /* drag read pointer forward */
-/* WWRAP */ /* expand WWRAP macro by hand to handle s->read */
- if (q == s->end) {
- s->read = q = s->window;
- m = WAVAIL;
- }
- }
- UPDATE
- return Z_OK;
-}
-
-
-/*
- * At the end of a Deflate-compressed PPP packet, we expect to have seen
- * a `stored' block type value but not the (zero) length bytes.
- */
-int inflate_packet_flush(s)
- inflate_blocks_statef *s;
-{
- if (s->mode != LENS)
- return Z_DATA_ERROR;
- s->mode = TYPE;
- return Z_OK;
-}
-/* --- infblock.c */
-
-/* +++ inftrees.c */
-/* inftrees.c -- generate Huffman trees for efficient decoding
- * Copyright (C) 1995-1996 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* #include "zutil.h" */
-/* #include "inftrees.h" */
-
-char inflate_copyright[] = " inflate 1.0.4 Copyright 1995-1996 Mark Adler ";
-/*
- If you use the zlib library in a product, an acknowledgment is welcome
- in the documentation of your product. If for some reason you cannot
- include such an acknowledgment, I would appreciate that you keep this
- copyright string in the executable of your product.
- */
-
-#ifndef NO_DUMMY_DECL
-struct internal_state {int dummy;}; /* for buggy compilers */
-#endif
-
-/* simplify the use of the inflate_huft type with some defines */
-#define base more.Base
-#define next more.Next
-#define exop word.what.Exop
-#define bits word.what.Bits
-
-
-local int huft_build OF((
- uIntf *, /* code lengths in bits */
- uInt, /* number of codes */
- uInt, /* number of "simple" codes */
- const uIntf *, /* list of base values for non-simple codes */
- const uIntf *, /* list of extra bits for non-simple codes */
- inflate_huft * FAR*,/* result: starting table */
- uIntf *, /* maximum lookup bits (returns actual) */
- z_streamp )); /* for zalloc function */
-
-local voidpf falloc OF((
- voidpf, /* opaque pointer (not used) */
- uInt, /* number of items */
- uInt)); /* size of item */
-
-/* Tables for deflate from PKZIP's appnote.txt. */
-local const uInt cplens[31] = { /* Copy lengths for literal codes 257..285 */
- 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
- 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
- /* see note #13 above about 258 */
-local const uInt cplext[31] = { /* Extra bits for literal codes 257..285 */
- 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
- 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 112, 112}; /* 112==invalid */
-local const uInt cpdist[30] = { /* Copy offsets for distance codes 0..29 */
- 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
- 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
- 8193, 12289, 16385, 24577};
-local const uInt cpdext[30] = { /* Extra bits for distance codes */
- 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
- 7, 7, 8, 8, 9, 9, 10, 10, 11, 11,
- 12, 12, 13, 13};
-
-/*
- Huffman code decoding is performed using a multi-level table lookup.
- The fastest way to decode is to simply build a lookup table whose
- size is determined by the longest code. However, the time it takes
- to build this table can also be a factor if the data being decoded
- is not very long. The most common codes are necessarily the
- shortest codes, so those codes dominate the decoding time, and hence
- the speed. The idea is you can have a shorter table that decodes the
- shorter, more probable codes, and then point to subsidiary tables for
- the longer codes. The time it costs to decode the longer codes is
- then traded against the time it takes to make longer tables.
-
- This results of this trade are in the variables lbits and dbits
- below. lbits is the number of bits the first level table for literal/
- length codes can decode in one step, and dbits is the same thing for
- the distance codes. Subsequent tables are also less than or equal to
- those sizes. These values may be adjusted either when all of the
- codes are shorter than that, in which case the longest code length in
- bits is used, or when the shortest code is *longer* than the requested
- table size, in which case the length of the shortest code in bits is
- used.
-
- There are two different values for the two tables, since they code a
- different number of possibilities each. The literal/length table
- codes 286 possible values, or in a flat code, a little over eight
- bits. The distance table codes 30 possible values, or a little less
- than five bits, flat. The optimum values for speed end up being
- about one bit more than those, so lbits is 8+1 and dbits is 5+1.
- The optimum values may differ though from machine to machine, and
- possibly even between compilers. Your mileage may vary.
- */
-
-
-/* If BMAX needs to be larger than 16, then h and x[] should be uLong. */
-#define BMAX 15 /* maximum bit length of any code */
-#define N_MAX 288 /* maximum number of codes in any set */
-
-#ifdef DEBUG_ZLIB
- uInt inflate_hufts;
-#endif
-
-local int huft_build(b, n, s, d, e, t, m, zs)
-uIntf *b; /* code lengths in bits (all assumed <= BMAX) */
-uInt n; /* number of codes (assumed <= N_MAX) */
-uInt s; /* number of simple-valued codes (0..s-1) */
-const uIntf *d; /* list of base values for non-simple codes */
-const uIntf *e; /* list of extra bits for non-simple codes */
-inflate_huft * FAR *t; /* result: starting table */
-uIntf *m; /* maximum lookup bits, returns actual */
-z_streamp zs; /* for zalloc function */
-/* Given a list of code lengths and a maximum table size, make a set of
- tables to decode that set of codes. Return Z_OK on success, Z_BUF_ERROR
- if the given code set is incomplete (the tables are still built in this
- case), Z_DATA_ERROR if the input is invalid (an over-subscribed set of
- lengths), or Z_MEM_ERROR if not enough memory. */
-{
-
- uInt a; /* counter for codes of length k */
- uInt c[BMAX+1]; /* bit length count table */
- uInt f; /* i repeats in table every f entries */
- int g; /* maximum code length */
- int h; /* table level */
- register uInt i; /* counter, current code */
- register uInt j; /* counter */
- register int k; /* number of bits in current code */
- int l; /* bits per table (returned in m) */
- register uIntf *p; /* pointer into c[], b[], or v[] */
- inflate_huft *q; /* points to current table */
- struct inflate_huft_s r; /* table entry for structure assignment */
- inflate_huft *u[BMAX]; /* table stack */
- uInt v[N_MAX]; /* values in order of bit length */
- register int w; /* bits before this table == (l * h) */
- uInt x[BMAX+1]; /* bit offsets, then code stack */
- uIntf *xp; /* pointer into x */
- int y; /* number of dummy codes added */
- uInt z; /* number of entries in current table */
-
-
- /* Generate counts for each bit length */
- p = c;
-#define C0 *p++ = 0;
-#define C2 C0 C0 C0 C0
-#define C4 C2 C2 C2 C2
- C4 /* clear c[]--assume BMAX+1 is 16 */
- p = b; i = n;
- do {
- c[*p++]++; /* assume all entries <= BMAX */
- } while (--i);
- if (c[0] == n) /* null input--all zero length codes */
- {
- *t = (inflate_huft *)Z_NULL;
- *m = 0;
- return Z_OK;
- }
-
-
- /* Find minimum and maximum length, bound *m by those */
- l = *m;
- for (j = 1; j <= BMAX; j++)
- if (c[j])
- break;
- k = j; /* minimum code length */
- if ((uInt)l < j)
- l = j;
- for (i = BMAX; i; i--)
- if (c[i])
- break;
- g = i; /* maximum code length */
- if ((uInt)l > i)
- l = i;
- *m = l;
-
-
- /* Adjust last length count to fill out codes, if needed */
- for (y = 1 << j; j < i; j++, y <<= 1)
- if ((y -= c[j]) < 0)
- return Z_DATA_ERROR;
- if ((y -= c[i]) < 0)
- return Z_DATA_ERROR;
- c[i] += y;
-
-
- /* Generate starting offsets into the value table for each length */
- x[1] = j = 0;
- p = c + 1; xp = x + 2;
- while (--i) { /* note that i == g from above */
- *xp++ = (j += *p++);
- }
-
-
- /* Make a table of values in order of bit lengths */
- p = b; i = 0;
- do {
- if ((j = *p++) != 0)
- v[x[j]++] = i;
- } while (++i < n);
- n = x[g]; /* set n to length of v */
-
-
- /* Generate the Huffman codes and for each, make the table entries */
- x[0] = i = 0; /* first Huffman code is zero */
- p = v; /* grab values in bit order */
- h = -1; /* no tables yet--level -1 */
- w = -l; /* bits decoded == (l * h) */
- u[0] = (inflate_huft *)Z_NULL; /* just to keep compilers happy */
- q = (inflate_huft *)Z_NULL; /* ditto */
- z = 0; /* ditto */
-
- /* go through the bit lengths (k already is bits in shortest code) */
- for (; k <= g; k++)
- {
- a = c[k];
- while (a--)
- {
- /* here i is the Huffman code of length k bits for value *p */
- /* make tables up to required level */
- while (k > w + l)
- {
- h++;
- w += l; /* previous table always l bits */
-
- /* compute minimum size table less than or equal to l bits */
- z = g - w;
- z = z > (uInt)l ? l : z; /* table size upper limit */
- if ((f = 1 << (j = k - w)) > a + 1) /* try a k-w bit table */
- { /* too few codes for k-w bit table */
- f -= a + 1; /* deduct codes from patterns left */
- xp = c + k;
- if (j < z)
- while (++j < z) /* try smaller tables up to z bits */
- {
- if ((f <<= 1) <= *++xp)
- break; /* enough codes to use up j bits */
- f -= *xp; /* else deduct codes from patterns */
- }
- }
- z = 1 << j; /* table entries for j-bit table */
-
- /* allocate and link in new table */
- if ((q = (inflate_huft *)ZALLOC
- (zs,z + 1,sizeof(inflate_huft))) == Z_NULL)
- {
- if (h)
- inflate_trees_free(u[0], zs);
- return Z_MEM_ERROR; /* not enough memory */
- }
-#ifdef DEBUG_ZLIB
- inflate_hufts += z + 1;
-#endif
- *t = q + 1; /* link to list for huft_free() */
- *(t = &(q->next)) = Z_NULL;
- u[h] = ++q; /* table starts after link */
-
- /* connect to last table, if there is one */
- if (h)
- {
- x[h] = i; /* save pattern for backing up */
- r.bits = (Byte)l; /* bits to dump before this table */
- r.exop = (Byte)j; /* bits in this table */
- r.next = q; /* pointer to this table */
- j = i >> (w - l); /* (get around Turbo C bug) */
- u[h-1][j] = r; /* connect to last table */
- }
- }
-
- /* set up table entry in r */
- r.bits = (Byte)(k - w);
- if (p >= v + n)
- r.exop = 128 + 64; /* out of values--invalid code */
- else if (*p < s)
- {
- r.exop = (Byte)(*p < 256 ? 0 : 32 + 64); /* 256 is end-of-block */
- r.base = *p++; /* simple code is just the value */
- }
- else
- {
- r.exop = (Byte)(e[*p - s] + 16 + 64);/* non-simple--look up in lists */
- r.base = d[*p++ - s];
- }
-
- /* fill code-like entries with r */
- f = 1 << (k - w);
- for (j = i >> w; j < z; j += f)
- q[j] = r;
-
- /* backwards increment the k-bit code i */
- for (j = 1 << (k - 1); i & j; j >>= 1)
- i ^= j;
- i ^= j;
-
- /* backup over finished tables */
- while ((i & ((1 << w) - 1)) != x[h])
- {
- h--; /* don't need to update q */
- w -= l;
- }
- }
- }
-
-
- /* Return Z_BUF_ERROR if we were given an incomplete table */
- return y != 0 && g != 1 ? Z_BUF_ERROR : Z_OK;
-}
-
-
-int inflate_trees_bits(c, bb, tb, z)
-uIntf *c; /* 19 code lengths */
-uIntf *bb; /* bits tree desired/actual depth */
-inflate_huft * FAR *tb; /* bits tree result */
-z_streamp z; /* for zfree function */
-{
- int r;
-
- r = huft_build(c, 19, 19, (uIntf*)Z_NULL, (uIntf*)Z_NULL, tb, bb, z);
- if (r == Z_DATA_ERROR)
- z->msg = (char*)"oversubscribed dynamic bit lengths tree";
- else if (r == Z_BUF_ERROR || *bb == 0)
- {
- inflate_trees_free(*tb, z);
- z->msg = (char*)"incomplete dynamic bit lengths tree";
- r = Z_DATA_ERROR;
- }
- return r;
-}
-
-
-int inflate_trees_dynamic(nl, nd, c, bl, bd, tl, td, z)
-uInt nl; /* number of literal/length codes */
-uInt nd; /* number of distance codes */
-uIntf *c; /* that many (total) code lengths */
-uIntf *bl; /* literal desired/actual bit depth */
-uIntf *bd; /* distance desired/actual bit depth */
-inflate_huft * FAR *tl; /* literal/length tree result */
-inflate_huft * FAR *td; /* distance tree result */
-z_streamp z; /* for zfree function */
-{
- int r;
-
- /* build literal/length tree */
- r = huft_build(c, nl, 257, cplens, cplext, tl, bl, z);
- if (r != Z_OK || *bl == 0)
- {
- if (r == Z_DATA_ERROR)
- z->msg = (char*)"oversubscribed literal/length tree";
- else if (r != Z_MEM_ERROR)
- {
- inflate_trees_free(*tl, z);
- z->msg = (char*)"incomplete literal/length tree";
- r = Z_DATA_ERROR;
- }
- return r;
- }
-
- /* build distance tree */
- r = huft_build(c + nl, nd, 0, cpdist, cpdext, td, bd, z);
- if (r != Z_OK || (*bd == 0 && nl > 257))
- {
- if (r == Z_DATA_ERROR)
- z->msg = (char*)"oversubscribed distance tree";
- else if (r == Z_BUF_ERROR) {
-#ifdef PKZIP_BUG_WORKAROUND
- r = Z_OK;
- }
-#else
- inflate_trees_free(*td, z);
- z->msg = (char*)"incomplete distance tree";
- r = Z_DATA_ERROR;
- }
- else if (r != Z_MEM_ERROR)
- {
- z->msg = (char*)"empty distance tree with lengths";
- r = Z_DATA_ERROR;
- }
- inflate_trees_free(*tl, z);
- return r;
-#endif
- }
-
- /* done */
- return Z_OK;
-}
-
-
-/* build fixed tables only once--keep them here */
-local int fixed_built = 0;
-#define FIXEDH 530 /* number of hufts used by fixed tables */
-local inflate_huft fixed_mem[FIXEDH];
-local uInt fixed_bl;
-local uInt fixed_bd;
-local inflate_huft *fixed_tl;
-local inflate_huft *fixed_td;
-
-
-local voidpf falloc(q, n, s)
-voidpf q; /* opaque pointer */
-uInt n; /* number of items */
-uInt s; /* size of item */
-{
- Assert(s == sizeof(inflate_huft) && n <= *(intf *)q,
- "inflate_trees falloc overflow");
- *(intf *)q -= n+s-s; /* s-s to avoid warning */
- return (voidpf)(fixed_mem + *(intf *)q);
-}
-
-
-int inflate_trees_fixed(bl, bd, tl, td)
-uIntf *bl; /* literal desired/actual bit depth */
-uIntf *bd; /* distance desired/actual bit depth */
-inflate_huft * FAR *tl; /* literal/length tree result */
-inflate_huft * FAR *td; /* distance tree result */
-{
- /* build fixed tables if not already (multiple overlapped executions ok) */
- if (!fixed_built)
- {
- int k; /* temporary variable */
- unsigned c[288]; /* length list for huft_build */
- z_stream z; /* for falloc function */
- int f = FIXEDH; /* number of hufts left in fixed_mem */
-
- /* set up fake z_stream for memory routines */
- z.zalloc = falloc;
- z.zfree = Z_NULL;
- z.opaque = (voidpf)&f;
-
- /* literal table */
- for (k = 0; k < 144; k++)
- c[k] = 8;
- for (; k < 256; k++)
- c[k] = 9;
- for (; k < 280; k++)
- c[k] = 7;
- for (; k < 288; k++)
- c[k] = 8;
- fixed_bl = 7;
- huft_build(c, 288, 257, cplens, cplext, &fixed_tl, &fixed_bl, &z);
-
- /* distance table */
- for (k = 0; k < 30; k++)
- c[k] = 5;
- fixed_bd = 5;
- huft_build(c, 30, 0, cpdist, cpdext, &fixed_td, &fixed_bd, &z);
-
- /* done */
- Assert(f == 0, "invalid build of fixed tables");
- fixed_built = 1;
- }
- *bl = fixed_bl;
- *bd = fixed_bd;
- *tl = fixed_tl;
- *td = fixed_td;
- return Z_OK;
-}
-
-
-int inflate_trees_free(t, z)
-inflate_huft *t; /* table to free */
-z_streamp z; /* for zfree function */
-/* Free the malloc'ed tables built by huft_build(), which makes a linked
- list of the tables it made, with the links in a dummy first entry of
- each table. */
-{
- register inflate_huft *p, *q, *r;
-
- /* Reverse linked list */
- p = Z_NULL;
- q = t;
- while (q != Z_NULL)
- {
- r = (q - 1)->next;
- (q - 1)->next = p;
- p = q;
- q = r;
- }
- /* Go through linked list, freeing from the malloced (t[-1]) address. */
- while (p != Z_NULL)
- {
- q = (--p)->next;
- ZFREE(z,p);
- p = q;
- }
- return Z_OK;
-}
-/* --- inftrees.c */
-
-/* +++ infcodes.c */
-/* infcodes.c -- process literals and length/distance pairs
- * Copyright (C) 1995-1996 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* #include "zutil.h" */
-/* #include "inftrees.h" */
-/* #include "infblock.h" */
-/* #include "infcodes.h" */
-/* #include "infutil.h" */
-
-/* +++ inffast.h */
-/* inffast.h -- header to use inffast.c
- * Copyright (C) 1995-1996 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* WARNING: this file should *not* be used by applications. It is
- part of the implementation of the compression library and is
- subject to change. Applications should only use zlib.h.
- */
-
-extern int inflate_fast OF((
- uInt,
- uInt,
- inflate_huft *,
- inflate_huft *,
- inflate_blocks_statef *,
- z_streamp ));
-/* --- inffast.h */
-
-/* simplify the use of the inflate_huft type with some defines */
-#define base more.Base
-#define next more.Next
-#define exop word.what.Exop
-#define bits word.what.Bits
-
-/* inflate codes private state */
-struct inflate_codes_state {
-
- /* mode */
- enum { /* waiting for "i:"=input, "o:"=output, "x:"=nothing */
- START, /* x: set up for LEN */
- LEN, /* i: get length/literal/eob next */
- LENEXT, /* i: getting length extra (have base) */
- DIST, /* i: get distance next */
- DISTEXT, /* i: getting distance extra */
- COPY, /* o: copying bytes in window, waiting for space */
- LIT, /* o: got literal, waiting for output space */
- WASH, /* o: got eob, possibly still output waiting */
- END, /* x: got eob and all data flushed */
- BADCODE} /* x: got error */
- mode; /* current inflate_codes mode */
-
- /* mode dependent information */
- uInt len;
- union {
- struct {
- inflate_huft *tree; /* pointer into tree */
- uInt need; /* bits needed */
- } code; /* if LEN or DIST, where in tree */
- uInt lit; /* if LIT, literal */
- struct {
- uInt get; /* bits to get for extra */
- uInt dist; /* distance back to copy from */
- } copy; /* if EXT or COPY, where and how much */
- } sub; /* submode */
-
- /* mode independent information */
- Byte lbits; /* ltree bits decoded per branch */
- Byte dbits; /* dtree bits decoder per branch */
- inflate_huft *ltree; /* literal/length/eob tree */
- inflate_huft *dtree; /* distance tree */
-
-};
-
-
-inflate_codes_statef *inflate_codes_new(bl, bd, tl, td, z)
-uInt bl, bd;
-inflate_huft *tl;
-inflate_huft *td; /* need separate declaration for Borland C++ */
-z_streamp z;
-{
- inflate_codes_statef *c;
-
- if ((c = (inflate_codes_statef *)
- ZALLOC(z,1,sizeof(struct inflate_codes_state))) != Z_NULL)
- {
- c->mode = START;
- c->lbits = (Byte)bl;
- c->dbits = (Byte)bd;
- c->ltree = tl;
- c->dtree = td;
- Tracev((stderr, "inflate: codes new\n"));
- }
- return c;
-}
-
-
-int inflate_codes(s, z, r)
-inflate_blocks_statef *s;
-z_streamp z;
-int r;
-{
- uInt j; /* temporary storage */
- inflate_huft *t; /* temporary pointer */
- uInt e; /* extra bits or operation */
- uLong b; /* bit buffer */
- uInt k; /* bits in bit buffer */
- Bytef *p; /* input data pointer */
- uInt n; /* bytes available there */
- Bytef *q; /* output window write pointer */
- uInt m; /* bytes to end of window or read pointer */
- Bytef *f; /* pointer to copy strings from */
- inflate_codes_statef *c = s->sub.decode.codes; /* codes state */
-
- /* copy input/output information to locals (UPDATE macro restores) */
- LOAD
-
- /* process input and output based on current state */
- while (1) switch (c->mode)
- { /* waiting for "i:"=input, "o:"=output, "x:"=nothing */
- case START: /* x: set up for LEN */
-#ifndef SLOW
- if (m >= 258 && n >= 10)
- {
- UPDATE
- r = inflate_fast(c->lbits, c->dbits, c->ltree, c->dtree, s, z);
- LOAD
- if (r != Z_OK)
- {
- c->mode = r == Z_STREAM_END ? WASH : BADCODE;
- break;
- }
- }
-#endif /* !SLOW */
- c->sub.code.need = c->lbits;
- c->sub.code.tree = c->ltree;
- c->mode = LEN;
- case LEN: /* i: get length/literal/eob next */
- j = c->sub.code.need;
- NEEDBITS(j)
- t = c->sub.code.tree + ((uInt)b & inflate_mask[j]);
- DUMPBITS(t->bits)
- e = (uInt)(t->exop);
- if (e == 0) /* literal */
- {
- c->sub.lit = t->base;
- Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ?
- "inflate: literal '%c'\n" :
- "inflate: literal 0x%02x\n", t->base));
- c->mode = LIT;
- break;
- }
- if (e & 16) /* length */
- {
- c->sub.copy.get = e & 15;
- c->len = t->base;
- c->mode = LENEXT;
- break;
- }
- if ((e & 64) == 0) /* next table */
- {
- c->sub.code.need = e;
- c->sub.code.tree = t->next;
- break;
- }
- if (e & 32) /* end of block */
- {
- Tracevv((stderr, "inflate: end of block\n"));
- c->mode = WASH;
- break;
- }
- c->mode = BADCODE; /* invalid code */
- z->msg = (char*)"invalid literal/length code";
- r = Z_DATA_ERROR;
- LEAVE
- case LENEXT: /* i: getting length extra (have base) */
- j = c->sub.copy.get;
- NEEDBITS(j)
- c->len += (uInt)b & inflate_mask[j];
- DUMPBITS(j)
- c->sub.code.need = c->dbits;
- c->sub.code.tree = c->dtree;
- Tracevv((stderr, "inflate: length %u\n", c->len));
- c->mode = DIST;
- case DIST: /* i: get distance next */
- j = c->sub.code.need;
- NEEDBITS(j)
- t = c->sub.code.tree + ((uInt)b & inflate_mask[j]);
- DUMPBITS(t->bits)
- e = (uInt)(t->exop);
- if (e & 16) /* distance */
- {
- c->sub.copy.get = e & 15;
- c->sub.copy.dist = t->base;
- c->mode = DISTEXT;
- break;
- }
- if ((e & 64) == 0) /* next table */
- {
- c->sub.code.need = e;
- c->sub.code.tree = t->next;
- break;
- }
- c->mode = BADCODE; /* invalid code */
- z->msg = (char*)"invalid distance code";
- r = Z_DATA_ERROR;
- LEAVE
- case DISTEXT: /* i: getting distance extra */
- j = c->sub.copy.get;
- NEEDBITS(j)
- c->sub.copy.dist += (uInt)b & inflate_mask[j];
- DUMPBITS(j)
- Tracevv((stderr, "inflate: distance %u\n", c->sub.copy.dist));
- c->mode = COPY;
- case COPY: /* o: copying bytes in window, waiting for space */
-#ifndef __TURBOC__ /* Turbo C bug for following expression */
- f = (uInt)(q - s->window) < c->sub.copy.dist ?
- s->end - (c->sub.copy.dist - (q - s->window)) :
- q - c->sub.copy.dist;
-#else
- f = q - c->sub.copy.dist;
- if ((uInt)(q - s->window) < c->sub.copy.dist)
- f = s->end - (c->sub.copy.dist - (uInt)(q - s->window));
-#endif
- while (c->len)
- {
- NEEDOUT
- OUTBYTE(*f++)
- if (f == s->end)
- f = s->window;
- c->len--;
- }
- c->mode = START;
- break;
- case LIT: /* o: got literal, waiting for output space */
- NEEDOUT
- OUTBYTE(c->sub.lit)
- c->mode = START;
- break;
- case WASH: /* o: got eob, possibly more output */
- FLUSH
- if (s->read != s->write)
- LEAVE
- c->mode = END;
- case END:
- r = Z_STREAM_END;
- LEAVE
- case BADCODE: /* x: got error */
- r = Z_DATA_ERROR;
- LEAVE
- default:
- r = Z_STREAM_ERROR;
- LEAVE
- }
-}
-
-
-void inflate_codes_free(c, z)
-inflate_codes_statef *c;
-z_streamp z;
-{
- ZFREE(z, c);
- Tracev((stderr, "inflate: codes free\n"));
-}
-/* --- infcodes.c */
-
-/* +++ infutil.c */
-/* inflate_util.c -- data and routines common to blocks and codes
- * Copyright (C) 1995-1996 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* #include "zutil.h" */
-/* #include "infblock.h" */
-/* #include "inftrees.h" */
-/* #include "infcodes.h" */
-/* #include "infutil.h" */
-
-#ifndef NO_DUMMY_DECL
-struct inflate_codes_state {int dummy;}; /* for buggy compilers */
-#endif
-
-/* And'ing with mask[n] masks the lower n bits */
-uInt inflate_mask[17] = {
- 0x0000,
- 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff,
- 0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff
-};
-
-
-/* copy as much as possible from the sliding window to the output area */
-int inflate_flush(s, z, r)
-inflate_blocks_statef *s;
-z_streamp z;
-int r;
-{
- uInt n;
- Bytef *p;
- Bytef *q;
-
- /* local copies of source and destination pointers */
- p = z->next_out;
- q = s->read;
-
- /* compute number of bytes to copy as far as end of window */
- n = (uInt)((q <= s->write ? s->write : s->end) - q);
- if (n > z->avail_out) n = z->avail_out;
- if (n && r == Z_BUF_ERROR) r = Z_OK;
-
- /* update counters */
- z->avail_out -= n;
- z->total_out += n;
-
- /* update check information */
- if (s->checkfn != Z_NULL)
- z->adler = s->check = (*s->checkfn)(s->check, q, n);
-
- /* copy as far as end of window */
- if (p != Z_NULL) {
- zmemcpy(p, q, n);
- p += n;
- }
- q += n;
-
- /* see if more to copy at beginning of window */
- if (q == s->end)
- {
- /* wrap pointers */
- q = s->window;
- if (s->write == s->end)
- s->write = s->window;
-
- /* compute bytes to copy */
- n = (uInt)(s->write - q);
- if (n > z->avail_out) n = z->avail_out;
- if (n && r == Z_BUF_ERROR) r = Z_OK;
-
- /* update counters */
- z->avail_out -= n;
- z->total_out += n;
-
- /* update check information */
- if (s->checkfn != Z_NULL)
- z->adler = s->check = (*s->checkfn)(s->check, q, n);
-
- /* copy */
- if (p != Z_NULL) {
- zmemcpy(p, q, n);
- p += n;
- }
- q += n;
- }
-
- /* update pointers */
- z->next_out = p;
- s->read = q;
-
- /* done */
- return r;
-}
-/* --- infutil.c */
-
-/* +++ inffast.c */
-/* inffast.c -- process literals and length/distance pairs fast
- * Copyright (C) 1995-1996 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* #include "zutil.h" */
-/* #include "inftrees.h" */
-/* #include "infblock.h" */
-/* #include "infcodes.h" */
-/* #include "infutil.h" */
-/* #include "inffast.h" */
-
-#ifndef NO_DUMMY_DECL
-struct inflate_codes_state {int dummy;}; /* for buggy compilers */
-#endif
-
-/* simplify the use of the inflate_huft type with some defines */
-#define base more.Base
-#define next more.Next
-#define exop word.what.Exop
-#define bits word.what.Bits
-
-/* macros for bit input with no checking and for returning unused bytes */
-#define GRABBITS(j) {while(k<(j)){b|=((uLong)NEXTBYTE)<<k;k+=8;}}
-#define UNGRAB {n+=(c=k>>3);p-=c;k&=7;}
-
-/* Called with number of bytes left to write in window at least 258
- (the maximum string length) and number of input bytes available
- at least ten. The ten bytes are six bytes for the longest length/
- distance pair plus four bytes for overloading the bit buffer. */
-
-int inflate_fast(bl, bd, tl, td, s, z)
-uInt bl, bd;
-inflate_huft *tl;
-inflate_huft *td; /* need separate declaration for Borland C++ */
-inflate_blocks_statef *s;
-z_streamp z;
-{
- inflate_huft *t; /* temporary pointer */
- uInt e; /* extra bits or operation */
- uLong b; /* bit buffer */
- uInt k; /* bits in bit buffer */
- Bytef *p; /* input data pointer */
- uInt n; /* bytes available there */
- Bytef *q; /* output window write pointer */
- uInt m; /* bytes to end of window or read pointer */
- uInt ml; /* mask for literal/length tree */
- uInt md; /* mask for distance tree */
- uInt c; /* bytes to copy */
- uInt d; /* distance back to copy from */
- Bytef *r; /* copy source pointer */
-
- /* load input, output, bit values */
- LOAD
-
- /* initialize masks */
- ml = inflate_mask[bl];
- md = inflate_mask[bd];
-
- /* do until not enough input or output space for fast loop */
- do { /* assume called with m >= 258 && n >= 10 */
- /* get literal/length code */
- GRABBITS(20) /* max bits for literal/length code */
- if ((e = (t = tl + ((uInt)b & ml))->exop) == 0)
- {
- DUMPBITS(t->bits)
- Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ?
- "inflate: * literal '%c'\n" :
- "inflate: * literal 0x%02x\n", t->base));
- *q++ = (Byte)t->base;
- m--;
- continue;
- }
- do {
- DUMPBITS(t->bits)
- if (e & 16)
- {
- /* get extra bits for length */
- e &= 15;
- c = t->base + ((uInt)b & inflate_mask[e]);
- DUMPBITS(e)
- Tracevv((stderr, "inflate: * length %u\n", c));
-
- /* decode distance base of block to copy */
- GRABBITS(15); /* max bits for distance code */
- e = (t = td + ((uInt)b & md))->exop;
- do {
- DUMPBITS(t->bits)
- if (e & 16)
- {
- /* get extra bits to add to distance base */
- e &= 15;
- GRABBITS(e) /* get extra bits (up to 13) */
- d = t->base + ((uInt)b & inflate_mask[e]);
- DUMPBITS(e)
- Tracevv((stderr, "inflate: * distance %u\n", d));
-
- /* do the copy */
- m -= c;
- if ((uInt)(q - s->window) >= d) /* offset before dest */
- { /* just copy */
- r = q - d;
- *q++ = *r++; c--; /* minimum count is three, */
- *q++ = *r++; c--; /* so unroll loop a little */
- }
- else /* else offset after destination */
- {
- e = d - (uInt)(q - s->window); /* bytes from offset to end */
- r = s->end - e; /* pointer to offset */
- if (c > e) /* if source crosses, */
- {
- c -= e; /* copy to end of window */
- do {
- *q++ = *r++;
- } while (--e);
- r = s->window; /* copy rest from start of window */
- }
- }
- do { /* copy all or what's left */
- *q++ = *r++;
- } while (--c);
- break;
- }
- else if ((e & 64) == 0)
- e = (t = t->next + ((uInt)b & inflate_mask[e]))->exop;
- else
- {
- z->msg = (char*)"invalid distance code";
- UNGRAB
- UPDATE
- return Z_DATA_ERROR;
- }
- } while (1);
- break;
- }
- if ((e & 64) == 0)
- {
- if ((e = (t = t->next + ((uInt)b & inflate_mask[e]))->exop) == 0)
- {
- DUMPBITS(t->bits)
- Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ?
- "inflate: * literal '%c'\n" :
- "inflate: * literal 0x%02x\n", t->base));
- *q++ = (Byte)t->base;
- m--;
- break;
- }
- }
- else if (e & 32)
- {
- Tracevv((stderr, "inflate: * end of block\n"));
- UNGRAB
- UPDATE
- return Z_STREAM_END;
- }
- else
- {
- z->msg = (char*)"invalid literal/length code";
- UNGRAB
- UPDATE
- return Z_DATA_ERROR;
- }
- } while (1);
- } while (m >= 258 && n >= 10);
-
- /* not enough input or output--restore pointers and return */
- UNGRAB
- UPDATE
- return Z_OK;
-}
-/* --- inffast.c */
-
-/* +++ zutil.c */
-/* zutil.c -- target dependent utility functions for the compression library
- * Copyright (C) 1995-1996 Jean-loup Gailly.
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* From: zutil.c,v 1.17 1996/07/24 13:41:12 me Exp $ */
-
-/* #include "zutil.h" */
-
-#ifndef NO_DUMMY_DECL
-struct internal_state {int dummy;}; /* for buggy compilers */
-#endif
-
-#ifndef STDC
-extern void exit OF((int));
-#endif
-
-const char *z_errmsg[10] = {
-"need dictionary", /* Z_NEED_DICT 2 */
-"stream end", /* Z_STREAM_END 1 */
-"", /* Z_OK 0 */
-"file error", /* Z_ERRNO (-1) */
-"stream error", /* Z_STREAM_ERROR (-2) */
-"data error", /* Z_DATA_ERROR (-3) */
-"insufficient memory", /* Z_MEM_ERROR (-4) */
-"buffer error", /* Z_BUF_ERROR (-5) */
-"incompatible version",/* Z_VERSION_ERROR (-6) */
-""};
-
-
-const char *zlibVersion()
-{
- return ZLIB_VERSION;
-}
-
-#ifdef DEBUG_ZLIB
-void z_error (m)
- char *m;
-{
- fprintf(stderr, "%s\n", m);
- exit(1);
-}
-#endif
-
-#ifndef HAVE_MEMCPY
-
-void zmemcpy(dest, source, len)
- Bytef* dest;
- Bytef* source;
- uInt len;
-{
- if (len == 0) return;
- do {
- *dest++ = *source++; /* ??? to be unrolled */
- } while (--len != 0);
-}
-
-int zmemcmp(s1, s2, len)
- Bytef* s1;
- Bytef* s2;
- uInt len;
-{
- uInt j;
-
- for (j = 0; j < len; j++) {
- if (s1[j] != s2[j]) return 2*(s1[j] > s2[j])-1;
- }
- return 0;
-}
-
-void zmemzero(dest, len)
- Bytef* dest;
- uInt len;
-{
- if (len == 0) return;
- do {
- *dest++ = 0; /* ??? to be unrolled */
- } while (--len != 0);
-}
-#endif
-
-#ifdef __TURBOC__
-#if (defined( __BORLANDC__) || !defined(SMALL_MEDIUM)) && !defined(__32BIT__)
-/* Small and medium model in Turbo C are for now limited to near allocation
- * with reduced MAX_WBITS and MAX_MEM_LEVEL
- */
-# define MY_ZCALLOC
-
-/* Turbo C malloc() does not allow dynamic allocation of 64K bytes
- * and farmalloc(64K) returns a pointer with an offset of 8, so we
- * must fix the pointer. Warning: the pointer must be put back to its
- * original form in order to free it, use zcfree().
- */
-
-#define MAX_PTR 10
-/* 10*64K = 640K */
-
-local int next_ptr = 0;
-
-typedef struct ptr_table_s {
- voidpf org_ptr;
- voidpf new_ptr;
-} ptr_table;
-
-local ptr_table table[MAX_PTR];
-/* This table is used to remember the original form of pointers
- * to large buffers (64K). Such pointers are normalized with a zero offset.
- * Since MSDOS is not a preemptive multitasking OS, this table is not
- * protected from concurrent access. This hack doesn't work anyway on
- * a protected system like OS/2. Use Microsoft C instead.
- */
-
-voidpf zcalloc (voidpf opaque, unsigned items, unsigned size)
-{
- voidpf buf = opaque; /* just to make some compilers happy */
- ulg bsize = (ulg)items*size;
-
- /* If we allocate less than 65520 bytes, we assume that farmalloc
- * will return a usable pointer which doesn't have to be normalized.
- */
- if (bsize < 65520L) {
- buf = farmalloc(bsize);
- if (*(ush*)&buf != 0) return buf;
- } else {
- buf = farmalloc(bsize + 16L);
- }
- if (buf == NULL || next_ptr >= MAX_PTR) return NULL;
- table[next_ptr].org_ptr = buf;
-
- /* Normalize the pointer to seg:0 */
- *((ush*)&buf+1) += ((ush)((uch*)buf-0) + 15) >> 4;
- *(ush*)&buf = 0;
- table[next_ptr++].new_ptr = buf;
- return buf;
-}
-
-void zcfree (voidpf opaque, voidpf ptr)
-{
- int n;
- if (*(ush*)&ptr != 0) { /* object < 64K */
- farfree(ptr);
- return;
- }
- /* Find the original pointer */
- for (n = 0; n < next_ptr; n++) {
- if (ptr != table[n].new_ptr) continue;
-
- farfree(table[n].org_ptr);
- while (++n < next_ptr) {
- table[n-1] = table[n];
- }
- next_ptr--;
- return;
- }
- ptr = opaque; /* just to make some compilers happy */
- Assert(0, "zcfree: ptr not found");
-}
-#endif
-#endif /* __TURBOC__ */
-
-
-#if defined(M_I86) && !defined(__32BIT__)
-/* Microsoft C in 16-bit mode */
-
-# define MY_ZCALLOC
-
-#if (!defined(_MSC_VER) || (_MSC_VER < 600))
-# define _halloc halloc
-# define _hfree hfree
-#endif
-
-voidpf zcalloc (voidpf opaque, unsigned items, unsigned size)
-{
- if (opaque) opaque = 0; /* to make compiler happy */
- return _halloc((long)items, size);
-}
-
-void zcfree (voidpf opaque, voidpf ptr)
-{
- if (opaque) opaque = 0; /* to make compiler happy */
- _hfree(ptr);
-}
-
-#endif /* MSC */
-
-
-#ifndef MY_ZCALLOC /* Any system without a special alloc function */
-
-#ifndef STDC
-extern voidp calloc OF((uInt items, uInt size));
-extern void free OF((voidpf ptr));
-#endif
-
-voidpf zcalloc (opaque, items, size)
- voidpf opaque;
- unsigned items;
- unsigned size;
-{
- if (opaque) items += size - size; /* make compiler happy */
- return (voidpf)calloc(items, size);
-}
-
-void zcfree (opaque, ptr)
- voidpf opaque;
- voidpf ptr;
-{
- free(ptr);
- if (opaque) return; /* make compiler happy */
-}
-
-#endif /* MY_ZCALLOC */
-/* --- zutil.c */
-
-/* +++ adler32.c */
-/* adler32.c -- compute the Adler-32 checksum of a data stream
- * Copyright (C) 1995-1996 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* From: adler32.c,v 1.10 1996/05/22 11:52:18 me Exp $ */
-
-/* #include "zlib.h" */
-
-#define BASE 65521L /* largest prime smaller than 65536 */
-#define NMAX 5552
-/* NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 */
-
-#define DO1(buf,i) {s1 += buf[i]; s2 += s1;}
-#define DO2(buf,i) DO1(buf,i); DO1(buf,i+1);
-#define DO4(buf,i) DO2(buf,i); DO2(buf,i+2);
-#define DO8(buf,i) DO4(buf,i); DO4(buf,i+4);
-#define DO16(buf) DO8(buf,0); DO8(buf,8);
-
-/* ========================================================================= */
-uLong adler32(adler, buf, len)
- uLong adler;
- const Bytef *buf;
- uInt len;
-{
- unsigned long s1 = adler & 0xffff;
- unsigned long s2 = (adler >> 16) & 0xffff;
- int k;
-
- if (buf == Z_NULL) return 1L;
-
- while (len > 0) {
- k = len < NMAX ? len : NMAX;
- len -= k;
- while (k >= 16) {
- DO16(buf);
- buf += 16;
- k -= 16;
- }
- if (k != 0) do {
- s1 += *buf++;
- s2 += s1;
- } while (--k);
- s1 %= BASE;
- s2 %= BASE;
- }
- return (s2 << 16) | s1;
-}
-/* --- adler32.c */
+++ /dev/null
-/* $Id: zlib.h,v 1.2 1997/12/23 10:47:44 paulus Exp $ */
-
-/*
- * This file is derived from zlib.h and zconf.h from the zlib-1.0.4
- * distribution by Jean-loup Gailly and Mark Adler, with some additions
- * by Paul Mackerras to aid in implementing Deflate compression and
- * decompression for PPP packets.
- */
-
-/*
- * ==FILEVERSION 971127==
- *
- * This marker is used by the Linux installation script to determine
- * whether an up-to-date version of this file is already installed.
- */
-
-
-/* +++ zlib.h */
-/* zlib.h -- interface of the 'zlib' general purpose compression library
- version 1.0.4, Jul 24th, 1996.
-
- Copyright (C) 1995-1996 Jean-loup Gailly and Mark Adler
-
- This software is provided 'as-is', without any express or implied
- warranty. In no event will the authors be held liable for any damages
- arising from the use of this software.
-
- Permission is granted to anyone to use this software for any purpose,
- including commercial applications, and to alter it and redistribute it
- freely, subject to the following restrictions:
-
- 1. The origin of this software must not be misrepresented; you must not
- claim that you wrote the original software. If you use this software
- in a product, an acknowledgment in the product documentation would be
- appreciated but is not required.
- 2. Altered source versions must be plainly marked as such, and must not be
- misrepresented as being the original software.
- 3. This notice may not be removed or altered from any source distribution.
-
- Jean-loup Gailly Mark Adler
- gzip@prep.ai.mit.edu madler@alumni.caltech.edu
-
-
- The data format used by the zlib library is described by RFCs (Request for
- Comments) 1950 to 1952 in the files ftp://ds.internic.net/rfc/rfc1950.txt
- (zlib format), rfc1951.txt (deflate format) and rfc1952.txt (gzip format).
-*/
-
-#ifndef _ZLIB_H
-#define _ZLIB_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-/* +++ zconf.h */
-/* zconf.h -- configuration of the zlib compression library
- * Copyright (C) 1995-1996 Jean-loup Gailly.
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* From: zconf.h,v 1.20 1996/07/02 15:09:28 me Exp $ */
-
-#ifndef _ZCONF_H
-#define _ZCONF_H
-
-/*
- * If you *really* need a unique prefix for all types and library functions,
- * compile with -DZ_PREFIX. The "standard" zlib should be compiled without it.
- */
-#ifdef Z_PREFIX
-# define deflateInit_ z_deflateInit_
-# define deflate z_deflate
-# define deflateEnd z_deflateEnd
-# define inflateInit_ z_inflateInit_
-# define inflate z_inflate
-# define inflateEnd z_inflateEnd
-# define deflateInit2_ z_deflateInit2_
-# define deflateSetDictionary z_deflateSetDictionary
-# define deflateCopy z_deflateCopy
-# define deflateReset z_deflateReset
-# define deflateParams z_deflateParams
-# define inflateInit2_ z_inflateInit2_
-# define inflateSetDictionary z_inflateSetDictionary
-# define inflateSync z_inflateSync
-# define inflateReset z_inflateReset
-# define compress z_compress
-# define uncompress z_uncompress
-# define adler32 z_adler32
-# define crc32 z_crc32
-# define get_crc_table z_get_crc_table
-
-# define Byte z_Byte
-# define uInt z_uInt
-# define uLong z_uLong
-# define Bytef z_Bytef
-# define charf z_charf
-# define intf z_intf
-# define uIntf z_uIntf
-# define uLongf z_uLongf
-# define voidpf z_voidpf
-# define voidp z_voidp
-#endif
-
-#if (defined(_WIN32) || defined(__WIN32__)) && !defined(WIN32)
-# define WIN32
-#endif
-#if defined(__GNUC__) || defined(WIN32) || defined(__386__) || defined(i386)
-# ifndef __32BIT__
-# define __32BIT__
-# endif
-#endif
-#if defined(__MSDOS__) && !defined(MSDOS)
-# define MSDOS
-#endif
-
-/*
- * Compile with -DMAXSEG_64K if the alloc function cannot allocate more
- * than 64k bytes at a time (needed on systems with 16-bit int).
- */
-#if defined(MSDOS) && !defined(__32BIT__)
-# define MAXSEG_64K
-#endif
-#ifdef MSDOS
-# define UNALIGNED_OK
-#endif
-
-#if (defined(MSDOS) || defined(_WINDOWS) || defined(WIN32)) && !defined(STDC)
-# define STDC
-#endif
-#if (defined(__STDC__) || defined(__cplusplus)) && !defined(STDC)
-# define STDC
-#endif
-
-#ifndef STDC
-# ifndef const /* cannot use !defined(STDC) && !defined(const) on Mac */
-# define const
-# endif
-#endif
-
-/* Some Mac compilers merge all .h files incorrectly: */
-#if defined(__MWERKS__) || defined(applec) ||defined(THINK_C) ||defined(__SC__)
-# define NO_DUMMY_DECL
-#endif
-
-/* Maximum value for memLevel in deflateInit2 */
-#ifndef MAX_MEM_LEVEL
-# ifdef MAXSEG_64K
-# define MAX_MEM_LEVEL 8
-# else
-# define MAX_MEM_LEVEL 9
-# endif
-#endif
-
-/* Maximum value for windowBits in deflateInit2 and inflateInit2 */
-#ifndef MAX_WBITS
-# define MAX_WBITS 15 /* 32K LZ77 window */
-#endif
-
-/* The memory requirements for deflate are (in bytes):
- 1 << (windowBits+2) + 1 << (memLevel+9)
- that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values)
- plus a few kilobytes for small objects. For example, if you want to reduce
- the default memory requirements from 256K to 128K, compile with
- make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7"
- Of course this will generally degrade compression (there's no free lunch).
-
- The memory requirements for inflate are (in bytes) 1 << windowBits
- that is, 32K for windowBits=15 (default value) plus a few kilobytes
- for small objects.
-*/
-
- /* Type declarations */
-
-#ifndef OF /* function prototypes */
-# ifdef STDC
-# define OF(args) args
-# else
-# define OF(args) ()
-# endif
-#endif
-
-/* The following definitions for FAR are needed only for MSDOS mixed
- * model programming (small or medium model with some far allocations).
- * This was tested only with MSC; for other MSDOS compilers you may have
- * to define NO_MEMCPY in zutil.h. If you don't need the mixed model,
- * just define FAR to be empty.
- */
-#if (defined(M_I86SM) || defined(M_I86MM)) && !defined(__32BIT__)
- /* MSC small or medium model */
-# define SMALL_MEDIUM
-# ifdef _MSC_VER
-# define FAR __far
-# else
-# define FAR far
-# endif
-#endif
-#if defined(__BORLANDC__) && (defined(__SMALL__) || defined(__MEDIUM__))
-# ifndef __32BIT__
-# define SMALL_MEDIUM
-# define FAR __far
-# endif
-#endif
-#ifndef FAR
-# define FAR
-#endif
-
-typedef unsigned char Byte; /* 8 bits */
-typedef unsigned int uInt; /* 16 bits or more */
-typedef unsigned long uLong; /* 32 bits or more */
-
-#if defined(__BORLANDC__) && defined(SMALL_MEDIUM)
- /* Borland C/C++ ignores FAR inside typedef */
-# define Bytef Byte FAR
-#else
- typedef Byte FAR Bytef;
-#endif
-typedef char FAR charf;
-typedef int FAR intf;
-typedef uInt FAR uIntf;
-typedef uLong FAR uLongf;
-
-#ifdef STDC
- typedef void FAR *voidpf;
- typedef void *voidp;
-#else
- typedef Byte FAR *voidpf;
- typedef Byte *voidp;
-#endif
-
-
-/* Compile with -DZLIB_DLL for Windows DLL support */
-#if (defined(_WINDOWS) || defined(WINDOWS)) && defined(ZLIB_DLL)
-# include <windows.h>
-# define EXPORT WINAPI
-#else
-# define EXPORT
-#endif
-
-#endif /* _ZCONF_H */
-/* --- zconf.h */
-
-#define ZLIB_VERSION "1.0.4P"
-
-/*
- The 'zlib' compression library provides in-memory compression and
- decompression functions, including integrity checks of the uncompressed
- data. This version of the library supports only one compression method
- (deflation) but other algorithms may be added later and will have the same
- stream interface.
-
- For compression the application must provide the output buffer and
- may optionally provide the input buffer for optimization. For decompression,
- the application must provide the input buffer and may optionally provide
- the output buffer for optimization.
-
- Compression can be done in a single step if the buffers are large
- enough (for example if an input file is mmap'ed), or can be done by
- repeated calls of the compression function. In the latter case, the
- application must provide more input and/or consume the output
- (providing more output space) before each call.
-
- The library does not install any signal handler. It is recommended to
- add at least a handler for SIGSEGV when decompressing; the library checks
- the consistency of the input data whenever possible but may go nuts
- for some forms of corrupted input.
-*/
-
-typedef voidpf (*alloc_func) OF((voidpf opaque, uInt items, uInt size));
-typedef void (*free_func) OF((voidpf opaque, voidpf address));
-
-struct internal_state;
-
-typedef struct z_stream_s {
- Bytef *next_in; /* next input byte */
- uInt avail_in; /* number of bytes available at next_in */
- uLong total_in; /* total nb of input bytes read so far */
-
- Bytef *next_out; /* next output byte should be put there */
- uInt avail_out; /* remaining free space at next_out */
- uLong total_out; /* total nb of bytes output so far */
-
- char *msg; /* last error message, NULL if no error */
- struct internal_state FAR *state; /* not visible by applications */
-
- alloc_func zalloc; /* used to allocate the internal state */
- free_func zfree; /* used to free the internal state */
- voidpf opaque; /* private data object passed to zalloc and zfree */
-
- int data_type; /* best guess about the data type: ascii or binary */
- uLong adler; /* adler32 value of the uncompressed data */
- uLong reserved; /* reserved for future use */
-} z_stream;
-
-typedef z_stream FAR *z_streamp;
-
-/*
- The application must update next_in and avail_in when avail_in has
- dropped to zero. It must update next_out and avail_out when avail_out
- has dropped to zero. The application must initialize zalloc, zfree and
- opaque before calling the init function. All other fields are set by the
- compression library and must not be updated by the application.
-
- The opaque value provided by the application will be passed as the first
- parameter for calls of zalloc and zfree. This can be useful for custom
- memory management. The compression library attaches no meaning to the
- opaque value.
-
- zalloc must return Z_NULL if there is not enough memory for the object.
- On 16-bit systems, the functions zalloc and zfree must be able to allocate
- exactly 65536 bytes, but will not be required to allocate more than this
- if the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS,
- pointers returned by zalloc for objects of exactly 65536 bytes *must*
- have their offset normalized to zero. The default allocation function
- provided by this library ensures this (see zutil.c). To reduce memory
- requirements and avoid any allocation of 64K objects, at the expense of
- compression ratio, compile the library with -DMAX_WBITS=14 (see zconf.h).
-
- The fields total_in and total_out can be used for statistics or
- progress reports. After compression, total_in holds the total size of
- the uncompressed data and may be saved for use in the decompressor
- (particularly if the decompressor wants to decompress everything in
- a single step).
-*/
-
- /* constants */
-
-#define Z_NO_FLUSH 0
-#define Z_PARTIAL_FLUSH 1
-#define Z_PACKET_FLUSH 2
-#define Z_SYNC_FLUSH 3
-#define Z_FULL_FLUSH 4
-#define Z_FINISH 5
-/* Allowed flush values; see deflate() below for details */
-
-#define Z_OK 0
-#define Z_STREAM_END 1
-#define Z_NEED_DICT 2
-#define Z_ERRNO (-1)
-#define Z_STREAM_ERROR (-2)
-#define Z_DATA_ERROR (-3)
-#define Z_MEM_ERROR (-4)
-#define Z_BUF_ERROR (-5)
-#define Z_VERSION_ERROR (-6)
-/* Return codes for the compression/decompression functions. Negative
- * values are errors, positive values are used for special but normal events.
- */
-
-#define Z_NO_COMPRESSION 0
-#define Z_BEST_SPEED 1
-#define Z_BEST_COMPRESSION 9
-#define Z_DEFAULT_COMPRESSION (-1)
-/* compression levels */
-
-#define Z_FILTERED 1
-#define Z_HUFFMAN_ONLY 2
-#define Z_DEFAULT_STRATEGY 0
-/* compression strategy; see deflateInit2() below for details */
-
-#define Z_BINARY 0
-#define Z_ASCII 1
-#define Z_UNKNOWN 2
-/* Possible values of the data_type field */
-
-#define Z_DEFLATED 8
-/* The deflate compression method (the only one supported in this version) */
-
-#define Z_NULL 0 /* for initializing zalloc, zfree, opaque */
-
-#define zlib_version zlibVersion()
-/* for compatibility with versions < 1.0.2 */
-
- /* basic functions */
-
-extern const char * EXPORT zlibVersion OF((void));
-/* The application can compare zlibVersion and ZLIB_VERSION for consistency.
- If the first character differs, the library code actually used is
- not compatible with the zlib.h header file used by the application.
- This check is automatically made by deflateInit and inflateInit.
- */
-
-/*
-extern int EXPORT deflateInit OF((z_streamp strm, int level));
-
- Initializes the internal stream state for compression. The fields
- zalloc, zfree and opaque must be initialized before by the caller.
- If zalloc and zfree are set to Z_NULL, deflateInit updates them to
- use default allocation functions.
-
- The compression level must be Z_DEFAULT_COMPRESSION, or between 0 and 9:
- 1 gives best speed, 9 gives best compression, 0 gives no compression at
- all (the input data is simply copied a block at a time).
- Z_DEFAULT_COMPRESSION requests a default compromise between speed and
- compression (currently equivalent to level 6).
-
- deflateInit returns Z_OK if success, Z_MEM_ERROR if there was not
- enough memory, Z_STREAM_ERROR if level is not a valid compression level,
- Z_VERSION_ERROR if the zlib library version (zlib_version) is incompatible
- with the version assumed by the caller (ZLIB_VERSION).
- msg is set to null if there is no error message. deflateInit does not
- perform any compression: this will be done by deflate().
-*/
-
-
-extern int EXPORT deflate OF((z_streamp strm, int flush));
-/*
- Performs one or both of the following actions:
-
- - Compress more input starting at next_in and update next_in and avail_in
- accordingly. If not all input can be processed (because there is not
- enough room in the output buffer), next_in and avail_in are updated and
- processing will resume at this point for the next call of deflate().
-
- - Provide more output starting at next_out and update next_out and avail_out
- accordingly. This action is forced if the parameter flush is non zero.
- Forcing flush frequently degrades the compression ratio, so this parameter
- should be set only when necessary (in interactive applications).
- Some output may be provided even if flush is not set.
-
- Before the call of deflate(), the application should ensure that at least
- one of the actions is possible, by providing more input and/or consuming
- more output, and updating avail_in or avail_out accordingly; avail_out
- should never be zero before the call. The application can consume the
- compressed output when it wants, for example when the output buffer is full
- (avail_out == 0), or after each call of deflate(). If deflate returns Z_OK
- and with zero avail_out, it must be called again after making room in the
- output buffer because there might be more output pending.
-
- If the parameter flush is set to Z_PARTIAL_FLUSH, the current compression
- block is terminated and flushed to the output buffer so that the
- decompressor can get all input data available so far. For method 9, a future
- variant on method 8, the current block will be flushed but not terminated.
- Z_SYNC_FLUSH has the same effect as partial flush except that the compressed
- output is byte aligned (the compressor can clear its internal bit buffer)
- and the current block is always terminated; this can be useful if the
- compressor has to be restarted from scratch after an interruption (in which
- case the internal state of the compressor may be lost).
- If flush is set to Z_FULL_FLUSH, the compression block is terminated, a
- special marker is output and the compression dictionary is discarded; this
- is useful to allow the decompressor to synchronize if one compressed block
- has been damaged (see inflateSync below). Flushing degrades compression and
- so should be used only when necessary. Using Z_FULL_FLUSH too often can
- seriously degrade the compression. If deflate returns with avail_out == 0,
- this function must be called again with the same value of the flush
- parameter and more output space (updated avail_out), until the flush is
- complete (deflate returns with non-zero avail_out).
-
- If the parameter flush is set to Z_PACKET_FLUSH, the compression
- block is terminated, and a zero-length stored block is output,
- omitting the length bytes (the effect of this is that the 3-bit type
- code 000 for a stored block is output, and the output is then
- byte-aligned). This is designed for use at the end of a PPP packet.
-
- If the parameter flush is set to Z_FINISH, pending input is processed,
- pending output is flushed and deflate returns with Z_STREAM_END if there
- was enough output space; if deflate returns with Z_OK, this function must be
- called again with Z_FINISH and more output space (updated avail_out) but no
- more input data, until it returns with Z_STREAM_END or an error. After
- deflate has returned Z_STREAM_END, the only possible operations on the
- stream are deflateReset or deflateEnd.
-
- Z_FINISH can be used immediately after deflateInit if all the compression
- is to be done in a single step. In this case, avail_out must be at least
- 0.1% larger than avail_in plus 12 bytes. If deflate does not return
- Z_STREAM_END, then it must be called again as described above.
-
- deflate() may update data_type if it can make a good guess about
- the input data type (Z_ASCII or Z_BINARY). In doubt, the data is considered
- binary. This field is only for information purposes and does not affect
- the compression algorithm in any manner.
-
- deflate() returns Z_OK if some progress has been made (more input
- processed or more output produced), Z_STREAM_END if all input has been
- consumed and all output has been produced (only when flush is set to
- Z_FINISH), Z_STREAM_ERROR if the stream state was inconsistent (for example
- if next_in or next_out was NULL), Z_BUF_ERROR if no progress is possible.
-*/
-
-
-extern int EXPORT deflateEnd OF((z_streamp strm));
-/*
- All dynamically allocated data structures for this stream are freed.
- This function discards any unprocessed input and does not flush any
- pending output.
-
- deflateEnd returns Z_OK if success, Z_STREAM_ERROR if the
- stream state was inconsistent, Z_DATA_ERROR if the stream was freed
- prematurely (some input or output was discarded). In the error case,
- msg may be set but then points to a static string (which must not be
- deallocated).
-*/
-
-
-/*
-extern int EXPORT inflateInit OF((z_streamp strm));
-
- Initializes the internal stream state for decompression. The fields
- zalloc, zfree and opaque must be initialized before by the caller. If
- zalloc and zfree are set to Z_NULL, inflateInit updates them to use default
- allocation functions.
-
- inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not
- enough memory, Z_VERSION_ERROR if the zlib library version is incompatible
- with the version assumed by the caller. msg is set to null if there is no
- error message. inflateInit does not perform any decompression: this will be
- done by inflate().
-*/
-
-
-extern int EXPORT inflate OF((z_streamp strm, int flush));
-/*
- Performs one or both of the following actions:
-
- - Decompress more input starting at next_in and update next_in and avail_in
- accordingly. If not all input can be processed (because there is not
- enough room in the output buffer), next_in is updated and processing
- will resume at this point for the next call of inflate().
-
- - Provide more output starting at next_out and update next_out and avail_out
- accordingly. inflate() provides as much output as possible, until there
- is no more input data or no more space in the output buffer (see below
- about the flush parameter).
-
- Before the call of inflate(), the application should ensure that at least
- one of the actions is possible, by providing more input and/or consuming
- more output, and updating the next_* and avail_* values accordingly.
- The application can consume the uncompressed output when it wants, for
- example when the output buffer is full (avail_out == 0), or after each
- call of inflate(). If inflate returns Z_OK and with zero avail_out, it
- must be called again after making room in the output buffer because there
- might be more output pending.
-
- If the parameter flush is set to Z_PARTIAL_FLUSH or Z_PACKET_FLUSH,
- inflate flushes as much output as possible to the output buffer. The
- flushing behavior of inflate is not specified for values of the flush
- parameter other than Z_PARTIAL_FLUSH, Z_PACKET_FLUSH or Z_FINISH, but the
- current implementation actually flushes as much output as possible
- anyway. For Z_PACKET_FLUSH, inflate checks that once all the input data
- has been consumed, it is expecting to see the length field of a stored
- block; if not, it returns Z_DATA_ERROR.
-
- inflate() should normally be called until it returns Z_STREAM_END or an
- error. However if all decompression is to be performed in a single step
- (a single call of inflate), the parameter flush should be set to
- Z_FINISH. In this case all pending input is processed and all pending
- output is flushed; avail_out must be large enough to hold all the
- uncompressed data. (The size of the uncompressed data may have been saved
- by the compressor for this purpose.) The next operation on this stream must
- be inflateEnd to deallocate the decompression state. The use of Z_FINISH
- is never required, but can be used to inform inflate that a faster routine
- may be used for the single inflate() call.
-
- inflate() returns Z_OK if some progress has been made (more input
- processed or more output produced), Z_STREAM_END if the end of the
- compressed data has been reached and all uncompressed output has been
- produced, Z_NEED_DICT if a preset dictionary is needed at this point (see
- inflateSetDictionary below), Z_DATA_ERROR if the input data was corrupted,
- Z_STREAM_ERROR if the stream structure was inconsistent (for example if
- next_in or next_out was NULL), Z_MEM_ERROR if there was not enough memory,
- Z_BUF_ERROR if no progress is possible or if there was not enough room in
- the output buffer when Z_FINISH is used. In the Z_DATA_ERROR case, the
- application may then call inflateSync to look for a good compression block.
- In the Z_NEED_DICT case, strm->adler is set to the Adler32 value of the
- dictionary chosen by the compressor.
-*/
-
-
-extern int EXPORT inflateEnd OF((z_streamp strm));
-/*
- All dynamically allocated data structures for this stream are freed.
- This function discards any unprocessed input and does not flush any
- pending output.
-
- inflateEnd returns Z_OK if success, Z_STREAM_ERROR if the stream state
- was inconsistent. In the error case, msg may be set but then points to a
- static string (which must not be deallocated).
-*/
-
- /* Advanced functions */
-
-/*
- The following functions are needed only in some special applications.
-*/
-
-/*
-extern int EXPORT deflateInit2 OF((z_streamp strm,
- int level,
- int method,
- int windowBits,
- int memLevel,
- int strategy));
-
- This is another version of deflateInit with more compression options. The
- fields next_in, zalloc, zfree and opaque must be initialized before by
- the caller.
-
- The method parameter is the compression method. It must be Z_DEFLATED in
- this version of the library. (Method 9 will allow a 64K history buffer and
- partial block flushes.)
-
- The windowBits parameter is the base two logarithm of the window size
- (the size of the history buffer). It should be in the range 8..15 for this
- version of the library (the value 16 will be allowed for method 9). Larger
- values of this parameter result in better compression at the expense of
- memory usage. The default value is 15 if deflateInit is used instead.
-
- The memLevel parameter specifies how much memory should be allocated
- for the internal compression state. memLevel=1 uses minimum memory but
- is slow and reduces compression ratio; memLevel=9 uses maximum memory
- for optimal speed. The default value is 8. See zconf.h for total memory
- usage as a function of windowBits and memLevel.
-
- The strategy parameter is used to tune the compression algorithm. Use the
- value Z_DEFAULT_STRATEGY for normal data, Z_FILTERED for data produced by a
- filter (or predictor), or Z_HUFFMAN_ONLY to force Huffman encoding only (no
- string match). Filtered data consists mostly of small values with a
- somewhat random distribution. In this case, the compression algorithm is
- tuned to compress them better. The effect of Z_FILTERED is to force more
- Huffman coding and less string matching; it is somewhat intermediate
- between Z_DEFAULT and Z_HUFFMAN_ONLY. The strategy parameter only affects
- the compression ratio but not the correctness of the compressed output even
- if it is not set appropriately.
-
- If next_in is not null, the library will use this buffer to hold also
- some history information; the buffer must either hold the entire input
- data, or have at least 1<<(windowBits+1) bytes and be writable. If next_in
- is null, the library will allocate its own history buffer (and leave next_in
- null). next_out need not be provided here but must be provided by the
- application for the next call of deflate().
-
- If the history buffer is provided by the application, next_in must
- must never be changed by the application since the compressor maintains
- information inside this buffer from call to call; the application
- must provide more input only by increasing avail_in. next_in is always
- reset by the library in this case.
-
- deflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was
- not enough memory, Z_STREAM_ERROR if a parameter is invalid (such as
- an invalid method). msg is set to null if there is no error message.
- deflateInit2 does not perform any compression: this will be done by
- deflate().
-*/
-
-extern int EXPORT deflateSetDictionary OF((z_streamp strm,
- const Bytef *dictionary,
- uInt dictLength));
-/*
- Initializes the compression dictionary (history buffer) from the given
- byte sequence without producing any compressed output. This function must
- be called immediately after deflateInit or deflateInit2, before any call
- of deflate. The compressor and decompressor must use exactly the same
- dictionary (see inflateSetDictionary).
- The dictionary should consist of strings (byte sequences) that are likely
- to be encountered later in the data to be compressed, with the most commonly
- used strings preferably put towards the end of the dictionary. Using a
- dictionary is most useful when the data to be compressed is short and
- can be predicted with good accuracy; the data can then be compressed better
- than with the default empty dictionary. In this version of the library,
- only the last 32K bytes of the dictionary are used.
- Upon return of this function, strm->adler is set to the Adler32 value
- of the dictionary; the decompressor may later use this value to determine
- which dictionary has been used by the compressor. (The Adler32 value
- applies to the whole dictionary even if only a subset of the dictionary is
- actually used by the compressor.)
-
- deflateSetDictionary returns Z_OK if success, or Z_STREAM_ERROR if a
- parameter is invalid (such as NULL dictionary) or the stream state
- is inconsistent (for example if deflate has already been called for this
- stream). deflateSetDictionary does not perform any compression: this will
- be done by deflate().
-*/
-
-extern int EXPORT deflateCopy OF((z_streamp dest,
- z_streamp source));
-/*
- Sets the destination stream as a complete copy of the source stream. If
- the source stream is using an application-supplied history buffer, a new
- buffer is allocated for the destination stream. The compressed output
- buffer is always application-supplied. It's the responsibility of the
- application to provide the correct values of next_out and avail_out for the
- next call of deflate.
-
- This function can be useful when several compression strategies will be
- tried, for example when there are several ways of pre-processing the input
- data with a filter. The streams that will be discarded should then be freed
- by calling deflateEnd. Note that deflateCopy duplicates the internal
- compression state which can be quite large, so this strategy is slow and
- can consume lots of memory.
-
- deflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not
- enough memory, Z_STREAM_ERROR if the source stream state was inconsistent
- (such as zalloc being NULL). msg is left unchanged in both source and
- destination.
-*/
-
-extern int EXPORT deflateReset OF((z_streamp strm));
-/*
- This function is equivalent to deflateEnd followed by deflateInit,
- but does not free and reallocate all the internal compression state.
- The stream will keep the same compression level and any other attributes
- that may have been set by deflateInit2.
-
- deflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
- stream state was inconsistent (such as zalloc or state being NULL).
-*/
-
-extern int EXPORT deflateParams OF((z_streamp strm, int level, int strategy));
-/*
- Dynamically update the compression level and compression strategy.
- This can be used to switch between compression and straight copy of
- the input data, or to switch to a different kind of input data requiring
- a different strategy. If the compression level is changed, the input
- available so far is compressed with the old level (and may be flushed);
- the new level will take effect only at the next call of deflate().
-
- Before the call of deflateParams, the stream state must be set as for
- a call of deflate(), since the currently available input may have to
- be compressed and flushed. In particular, strm->avail_out must be non-zero.
-
- deflateParams returns Z_OK if success, Z_STREAM_ERROR if the source
- stream state was inconsistent or if a parameter was invalid, Z_BUF_ERROR
- if strm->avail_out was zero.
-*/
-
-extern int EXPORT deflateOutputPending OF((z_streamp strm));
-/*
- Returns the number of bytes of output which are immediately
- available from the compressor (i.e. without any further input
- or flush).
-*/
-
-/*
-extern int EXPORT inflateInit2 OF((z_streamp strm,
- int windowBits));
-
- This is another version of inflateInit with more compression options. The
- fields next_out, zalloc, zfree and opaque must be initialized before by
- the caller.
-
- The windowBits parameter is the base two logarithm of the maximum window
- size (the size of the history buffer). It should be in the range 8..15 for
- this version of the library (the value 16 will be allowed soon). The
- default value is 15 if inflateInit is used instead. If a compressed stream
- with a larger window size is given as input, inflate() will return with
- the error code Z_DATA_ERROR instead of trying to allocate a larger window.
-
- If next_out is not null, the library will use this buffer for the history
- buffer; the buffer must either be large enough to hold the entire output
- data, or have at least 1<<windowBits bytes. If next_out is null, the
- library will allocate its own buffer (and leave next_out null). next_in
- need not be provided here but must be provided by the application for the
- next call of inflate().
-
- If the history buffer is provided by the application, next_out must
- never be changed by the application since the decompressor maintains
- history information inside this buffer from call to call; the application
- can only reset next_out to the beginning of the history buffer when
- avail_out is zero and all output has been consumed.
-
- inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was
- not enough memory, Z_STREAM_ERROR if a parameter is invalid (such as
- windowBits < 8). msg is set to null if there is no error message.
- inflateInit2 does not perform any decompression: this will be done by
- inflate().
-*/
-
-extern int EXPORT inflateSetDictionary OF((z_streamp strm,
- const Bytef *dictionary,
- uInt dictLength));
-/*
- Initializes the decompression dictionary (history buffer) from the given
- uncompressed byte sequence. This function must be called immediately after
- a call of inflate if this call returned Z_NEED_DICT. The dictionary chosen
- by the compressor can be determined from the Adler32 value returned by this
- call of inflate. The compressor and decompressor must use exactly the same
- dictionary (see deflateSetDictionary).
-
- inflateSetDictionary returns Z_OK if success, Z_STREAM_ERROR if a
- parameter is invalid (such as NULL dictionary) or the stream state is
- inconsistent, Z_DATA_ERROR if the given dictionary doesn't match the
- expected one (incorrect Adler32 value). inflateSetDictionary does not
- perform any decompression: this will be done by subsequent calls of
- inflate().
-*/
-
-extern int EXPORT inflateSync OF((z_streamp strm));
-/*
- Skips invalid compressed data until the special marker (see deflate()
- above) can be found, or until all available input is skipped. No output
- is provided.
-
- inflateSync returns Z_OK if the special marker has been found, Z_BUF_ERROR
- if no more input was provided, Z_DATA_ERROR if no marker has been found,
- or Z_STREAM_ERROR if the stream structure was inconsistent. In the success
- case, the application may save the current current value of total_in which
- indicates where valid compressed data was found. In the error case, the
- application may repeatedly call inflateSync, providing more input each time,
- until success or end of the input data.
-*/
-
-extern int EXPORT inflateReset OF((z_streamp strm));
-/*
- This function is equivalent to inflateEnd followed by inflateInit,
- but does not free and reallocate all the internal decompression state.
- The stream will keep attributes that may have been set by inflateInit2.
-
- inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
- stream state was inconsistent (such as zalloc or state being NULL).
-*/
-
-extern int inflateIncomp OF((z_stream *strm));
-/*
- This function adds the data at next_in (avail_in bytes) to the output
- history without performing any output. There must be no pending output,
- and the decompressor must be expecting to see the start of a block.
- Calling this function is equivalent to decompressing a stored block
- containing the data at next_in (except that the data is not output).
-*/
-
- /* utility functions */
-
-/*
- The following utility functions are implemented on top of the
- basic stream-oriented functions. To simplify the interface, some
- default options are assumed (compression level, window size,
- standard memory allocation functions). The source code of these
- utility functions can easily be modified if you need special options.
-*/
-
-extern int EXPORT compress OF((Bytef *dest, uLongf *destLen,
- const Bytef *source, uLong sourceLen));
-/*
- Compresses the source buffer into the destination buffer. sourceLen is
- the byte length of the source buffer. Upon entry, destLen is the total
- size of the destination buffer, which must be at least 0.1% larger than
- sourceLen plus 12 bytes. Upon exit, destLen is the actual size of the
- compressed buffer.
- This function can be used to compress a whole file at once if the
- input file is mmap'ed.
- compress returns Z_OK if success, Z_MEM_ERROR if there was not
- enough memory, Z_BUF_ERROR if there was not enough room in the output
- buffer.
-*/
-
-extern int EXPORT uncompress OF((Bytef *dest, uLongf *destLen,
- const Bytef *source, uLong sourceLen));
-/*
- Decompresses the source buffer into the destination buffer. sourceLen is
- the byte length of the source buffer. Upon entry, destLen is the total
- size of the destination buffer, which must be large enough to hold the
- entire uncompressed data. (The size of the uncompressed data must have
- been saved previously by the compressor and transmitted to the decompressor
- by some mechanism outside the scope of this compression library.)
- Upon exit, destLen is the actual size of the compressed buffer.
- This function can be used to decompress a whole file at once if the
- input file is mmap'ed.
-
- uncompress returns Z_OK if success, Z_MEM_ERROR if there was not
- enough memory, Z_BUF_ERROR if there was not enough room in the output
- buffer, or Z_DATA_ERROR if the input data was corrupted.
-*/
-
-
-typedef voidp gzFile;
-
-extern gzFile EXPORT gzopen OF((const char *path, const char *mode));
-/*
- Opens a gzip (.gz) file for reading or writing. The mode parameter
- is as in fopen ("rb" or "wb") but can also include a compression level
- ("wb9"). gzopen can be used to read a file which is not in gzip format;
- in this case gzread will directly read from the file without decompression.
- gzopen returns NULL if the file could not be opened or if there was
- insufficient memory to allocate the (de)compression state; errno
- can be checked to distinguish the two cases (if errno is zero, the
- zlib error is Z_MEM_ERROR).
-*/
-
-extern gzFile EXPORT gzdopen OF((int fd, const char *mode));
-/*
- gzdopen() associates a gzFile with the file descriptor fd. File
- descriptors are obtained from calls like open, dup, creat, pipe or
- fileno (in the file has been previously opened with fopen).
- The mode parameter is as in gzopen.
- The next call of gzclose on the returned gzFile will also close the
- file descriptor fd, just like fclose(fdopen(fd), mode) closes the file
- descriptor fd. If you want to keep fd open, use gzdopen(dup(fd), mode).
- gzdopen returns NULL if there was insufficient memory to allocate
- the (de)compression state.
-*/
-
-extern int EXPORT gzread OF((gzFile file, voidp buf, unsigned len));
-/*
- Reads the given number of uncompressed bytes from the compressed file.
- If the input file was not in gzip format, gzread copies the given number
- of bytes into the buffer.
- gzread returns the number of uncompressed bytes actually read (0 for
- end of file, -1 for error). */
-
-extern int EXPORT gzwrite OF((gzFile file, const voidp buf, unsigned len));
-/*
- Writes the given number of uncompressed bytes into the compressed file.
- gzwrite returns the number of uncompressed bytes actually written
- (0 in case of error).
-*/
-
-extern int EXPORT gzflush OF((gzFile file, int flush));
-/*
- Flushes all pending output into the compressed file. The parameter
- flush is as in the deflate() function. The return value is the zlib
- error number (see function gzerror below). gzflush returns Z_OK if
- the flush parameter is Z_FINISH and all output could be flushed.
- gzflush should be called only when strictly necessary because it can
- degrade compression.
-*/
-
-extern int EXPORT gzclose OF((gzFile file));
-/*
- Flushes all pending output if necessary, closes the compressed file
- and deallocates all the (de)compression state. The return value is the zlib
- error number (see function gzerror below).
-*/
-
-extern const char * EXPORT gzerror OF((gzFile file, int *errnum));
-/*
- Returns the error message for the last error which occurred on the
- given compressed file. errnum is set to zlib error number. If an
- error occurred in the file system and not in the compression library,
- errnum is set to Z_ERRNO and the application may consult errno
- to get the exact error code.
-*/
-
- /* checksum functions */
-
-/*
- These functions are not related to compression but are exported
- anyway because they might be useful in applications using the
- compression library.
-*/
-
-extern uLong EXPORT adler32 OF((uLong adler, const Bytef *buf, uInt len));
-
-/*
- Update a running Adler-32 checksum with the bytes buf[0..len-1] and
- return the updated checksum. If buf is NULL, this function returns
- the required initial value for the checksum.
- An Adler-32 checksum is almost as reliable as a CRC32 but can be computed
- much faster. Usage example:
-
- uLong adler = adler32(0L, Z_NULL, 0);
-
- while (read_buffer(buffer, length) != EOF) {
- adler = adler32(adler, buffer, length);
- }
- if (adler != original_adler) error();
-*/
-
-extern uLong EXPORT crc32 OF((uLong crc, const Bytef *buf, uInt len));
-/*
- Update a running crc with the bytes buf[0..len-1] and return the updated
- crc. If buf is NULL, this function returns the required initial value
- for the crc. Pre- and post-conditioning (one's complement) is performed
- within this function so it shouldn't be done by the application.
- Usage example:
-
- uLong crc = crc32(0L, Z_NULL, 0);
-
- while (read_buffer(buffer, length) != EOF) {
- crc = crc32(crc, buffer, length);
- }
- if (crc != original_crc) error();
-*/
-
-
- /* various hacks, don't look :) */
-
-/* deflateInit and inflateInit are macros to allow checking the zlib version
- * and the compiler's view of z_stream:
- */
-extern int EXPORT deflateInit_ OF((z_streamp strm, int level,
- const char *version, int stream_size));
-extern int EXPORT inflateInit_ OF((z_streamp strm,
- const char *version, int stream_size));
-extern int EXPORT deflateInit2_ OF((z_streamp strm, int level, int method,
- int windowBits, int memLevel, int strategy,
- const char *version, int stream_size));
-extern int EXPORT inflateInit2_ OF((z_streamp strm, int windowBits,
- const char *version, int stream_size));
-#define deflateInit(strm, level) \
- deflateInit_((strm), (level), ZLIB_VERSION, sizeof(z_stream))
-#define inflateInit(strm) \
- inflateInit_((strm), ZLIB_VERSION, sizeof(z_stream))
-#define deflateInit2(strm, level, method, windowBits, memLevel, strategy) \
- deflateInit2_((strm),(level),(method),(windowBits),(memLevel),\
- (strategy), ZLIB_VERSION, sizeof(z_stream))
-#define inflateInit2(strm, windowBits) \
- inflateInit2_((strm), (windowBits), ZLIB_VERSION, sizeof(z_stream))
-
-#if !defined(_Z_UTIL_H) && !defined(NO_DUMMY_DECL)
- struct internal_state {int dummy;}; /* hack for buggy compilers */
-#endif
-
-uLongf *get_crc_table OF((void)); /* can be used by asm versions of crc32() */
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _ZLIB_H */
-/* --- zlib.h */
* Wait for the lockd process to exit, but since we're holding
* the lockd semaphore, we can't wait around forever ...
*/
- current->sigpending = 0;
+ current->work.sigpending = 0;
interruptible_sleep_on_timeout(&lockd_exit, HZ);
if (nlmsvc_pid) {
printk(KERN_WARNING
return 0;
}
- current->sigpending = 0;
+ current->work.sigpending = 0;
want_lock++;
while (hash_count || hash_lock) {
interruptible_sleep_on(&hash_wait);
#include <linux/stat.h>
#define __NO_VERSION__
#include <linux/module.h>
+#include <linux/smp_lock.h>
#include <asm/bitops.h>
static ssize_t proc_file_read(struct file * file, char * buf,
static loff_t
proc_file_lseek(struct file * file, loff_t offset, int orig)
{
+ lock_kernel();
+
switch (orig) {
case 0:
if (offset < 0)
- return -EINVAL;
+ goto out;
file->f_pos = offset;
+ unlock_kernel();
return(file->f_pos);
case 1:
if (offset + file->f_pos < 0)
- return -EINVAL;
+ goto out;
file->f_pos += offset;
+ unlock_kernel();
return(file->f_pos);
case 2:
- return(-EINVAL);
+ goto out;
default:
- return(-EINVAL);
+ goto out;
}
+
+out:
+ unlock_kernel();
+ return -EINVAL;
}
/*
loff_t generic_file_llseek(struct file *file, loff_t offset, int origin)
{
long long retval;
+ struct inode *inode = file->f_dentry->d_inode->i_mapping->host;
+ down(&inode->i_sem);
switch (origin) {
case 2:
- offset += file->f_dentry->d_inode->i_size;
+ offset += inode->i_size;
break;
case 1:
offset += file->f_pos;
}
retval = -EINVAL;
- if (offset>=0 && offset<=file->f_dentry->d_inode->i_sb->s_maxbytes) {
+ if (offset>=0 && offset<=inode->i_sb->s_maxbytes) {
if (offset != file->f_pos) {
file->f_pos = offset;
file->f_reada = 0;
}
retval = offset;
}
+ up(&inode->i_sem);
return retval;
}
{
long long retval;
+ lock_kernel();
switch (origin) {
case 2:
offset += file->f_dentry->d_inode->i_size;
}
retval = offset;
}
+ unlock_kernel();
return retval;
}
static inline loff_t llseek(struct file *file, loff_t offset, int origin)
{
loff_t (*fn)(struct file *, loff_t, int);
- loff_t retval;
fn = default_llseek;
if (file->f_op && file->f_op->llseek)
fn = file->f_op->llseek;
- lock_kernel();
- retval = fn(file, offset, origin);
- unlock_kernel();
- return retval;
+ return fn(file, offset, origin);
}
asmlinkage off_t sys_lseek(unsigned int fd, off_t offset, unsigned int origin)
O_TARGET := reiserfs.o
obj-y := bitmap.o do_balan.o namei.o inode.o file.o dir.o fix_node.o super.o prints.o objectid.o \
-lbalance.o ibalance.o stree.o hashes.o buffer2.o tail_conversion.o journal.o resize.o tail_conversion.o version.o item_ops.o ioctl.o procfs.o
+lbalance.o ibalance.o stree.o hashes.o buffer2.o tail_conversion.o journal.o resize.o tail_conversion.o item_ops.o ioctl.o procfs.o
obj-m := $(O_TARGET)
to free a list of blocks at once. -Hans */
/* I wonder if it would be less modest
now that we use journaling. -Hans */
-void reiserfs_free_block (struct reiserfs_transaction_handle *th, unsigned long block)
+static void _reiserfs_free_block (struct reiserfs_transaction_handle *th, unsigned long block)
{
struct super_block * s = th->t_super;
struct reiserfs_super_block * rs;
struct buffer_head ** apbh;
int nr, offset;
- RFALSE(!s, "vs-4060: trying to free block on nonexistent device");
- RFALSE(is_reusable (s, block, 1) == 0, "vs-4070: can not free such block");
-
PROC_INFO_INC( s, free_block );
rs = SB_DISK_SUPER_BLOCK (s);
get_bit_address (s, block, &nr, &offset);
- /* mark it before we clear it, just in case */
- journal_mark_freed(th, s, block) ;
+ if (nr >= sb_bmap_nr (rs)) {
+ reiserfs_warning ("vs-4075: reiserfs_free_block: "
+ "block %lu is out of range on %s\n",
+ block, bdevname(s->s_dev));
+ return;
+ }
reiserfs_prepare_for_journal(s, apbh[nr], 1 ) ;
s->s_dirt = 1;
}
+void reiserfs_free_block (struct reiserfs_transaction_handle *th,
+ unsigned long block) {
+ struct super_block * s = th->t_super;
+ RFALSE(!s, "vs-4061: trying to free block on nonexistent device");
+ RFALSE(is_reusable (s, block, 1) == 0, "vs-4071: can not free such block");
+ /* mark it before we clear it, just in case */
+ journal_mark_freed(th, s, block) ;
+ _reiserfs_free_block(th, block) ;
+}
+
+/* preallocated blocks don't need to be run through journal_mark_freed */
+void reiserfs_free_prealloc_block (struct reiserfs_transaction_handle *th,
+ unsigned long block) {
+ struct super_block * s = th->t_super;
+
+ RFALSE(!s, "vs-4060: trying to free block on nonexistent device");
+ RFALSE(is_reusable (s, block, 1) == 0, "vs-4070: can not free such block");
+ _reiserfs_free_block(th, block) ;
+}
/* beginning from offset-th bit in bmap_nr-th bitmap block,
find_forward finds the closest zero bit. It returns 1 and zero
goto free_and_return ;
}
search_start = new_block ;
- if (search_start >= reiserfs_get_journal_block(s) &&
- search_start < (reiserfs_get_journal_block(s) + JOURNAL_BLOCK_COUNT)) {
- reiserfs_warning("vs-4130: reiserfs_new_blocknrs: trying to allocate log block %lu\n",
- search_start) ;
- search_start++ ;
- amount_needed++ ;
- continue ;
- }
-
+
+ /* make sure the block is not of journal or reserved area */
+ if (is_block_in_log_or_reserved_area(s, search_start)) {
+ reiserfs_warning("vs-4130: reiserfs_new_blocknrs: trying to allocate log block %lu\n",
+ search_start) ;
+ search_start++ ;
+ amount_needed++ ;
+ continue ;
+ }
+
+
reiserfs_prepare_for_journal(s, SB_AP_BITMAP(s)[i], 1) ;
RFALSE( buffer_locked (SB_AP_BITMAP (s)[i]) ||
** to be grouped towards the start of the border
*/
border = le32_to_cpu(INODE_PKEY(p_s_inode)->k_dir_id) % (SB_BLOCK_COUNT(th->t_super) - bstart - 1) ;
- } else {
- /* why would we want to delcare a local variable to this if statement
- ** name border????? -chris
- ** unsigned long border = 0;
- */
- if (!reiserfs_hashed_relocation(th->t_super)) {
+ } else if (!reiserfs_hashed_relocation(th->t_super)) {
hash_in = le32_to_cpu((INODE_PKEY(p_s_inode))->k_dir_id);
/* I wonder if the CPU cost of the
hash will obscure the layout
hash_out = keyed_hash(((char *) (&hash_in)), 4);
border = hash_out % (SB_BLOCK_COUNT(th->t_super) - bstart - 1) ;
- }
}
border += bstart ;
allocated[0] = 0 ; /* important. Allows a check later on to see if at
*free_blocknrs = 0;
blks = PREALLOCATION_SIZE-1;
for (blks_gotten=0; blks_gotten<PREALLOCATION_SIZE; blks_gotten++) {
+
ret = do_reiserfs_new_blocknrs(th, free_blocknrs, search_start,
1/*amount_needed*/,
0/*for root reserved*/,
static void __discard_prealloc (struct reiserfs_transaction_handle * th,
struct reiserfs_inode_info *ei)
{
+ unsigned long save = ei->i_prealloc_block ;
while (ei->i_prealloc_count > 0) {
- reiserfs_free_block(th,ei->i_prealloc_block);
+ reiserfs_free_prealloc_block(th,ei->i_prealloc_block);
ei->i_prealloc_block++;
ei->i_prealloc_count --;
}
+ ei->i_prealloc_block = save;
list_del_init(&(ei->i_prealloc_list));
}
* Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
*/
-
-/*
- * Contains code from
- *
- * linux/include/linux/lock.h and linux/fs/buffer.c /linux/fs/minix/fsync.c
- *
- * Copyright (C) 1991, 1992 Linus Torvalds
- */
-
#include <linux/config.h>
#include <linux/sched.h>
#include <linux/locks.h>
/*
* wait_buffer_until_released
* reiserfs_bread
- * reiserfs_getblk
- * get_new_buffer
*/
-
-
/* when we allocate a new block (get_new_buffer, get_empty_nodes) and
get buffer for it, it is possible that it is held by someone else
or even by this process. In this function we wait until all other
then it creates a new buffer and schedules I/O to read the
block. */
/* The function is NOT SCHEDULE-SAFE! */
-
struct buffer_head * reiserfs_bread (struct super_block *super, int n_block)
{
struct buffer_head *result;
return result;
}
-/* This function looks for a buffer which contains a given block. If
- the block is in cache it returns it, otherwise it returns a new
- buffer which is not uptodate. This is called by reiserfs_bread and
- other functions. Note that get_new_buffer ought to be called this
- and this ought to be called get_new_buffer, since this doesn't
- actually get the block off of the disk. */
-/* The function is NOT SCHEDULE-SAFE! */
-
-struct buffer_head * reiserfs_getblk(struct super_block *sb, int n_block)
-{
- return sb_getblk(sb, n_block);
-}
-
-#ifdef NEW_GET_NEW_BUFFER
-
-/* returns one buffer with a blocknr near blocknr. */
-static int get_new_buffer_near_blocknr(
- struct super_block * p_s_sb,
- int blocknr,
- struct buffer_head ** pp_s_new_bh,
- struct path * p_s_path
- ) {
- unsigned long n_new_blocknumber = 0;
- int n_ret_value,
- n_repeat = CARRY_ON;
-
-#ifdef CONFIG_REISERFS_CHECK
- int repeat_counter = 0;
-
- if (!blocknr)
- printk ("blocknr passed to get_new_buffer_near_blocknr was 0");
-#endif
-
-
- if ( (n_ret_value = reiserfs_new_blocknrs (p_s_sb, &n_new_blocknumber,
- blocknr, 1)) == NO_DISK_SPACE )
- return NO_DISK_SPACE;
-
- *pp_s_new_bh = reiserfs_getblk(p_s_sb, n_new_blocknumber);
- if ( buffer_uptodate(*pp_s_new_bh) ) {
-
- RFALSE( buffer_dirty(*pp_s_new_bh) || (*pp_s_new_bh)->b_dev == NODEV,
- "PAP-14080: invalid uptodate buffer %b for the new block",
- *pp_s_new_bh);
-
- /* Free path buffers to prevent deadlock. */
- /* It is possible that this process has the buffer, which this function is getting, already in
- its path, and is responsible for double incrementing the value of b_count. If we recalculate
- the path after schedule we can avoid risking an endless loop. This problematic situation is
- possible in a multiple processing environment. Suppose process 1 has acquired a path P; then
- process 2 balanced and remove block A from the tree. Process 1 continues and runs
- get_new_buffer, that returns buffer with block A. If node A was on the path P, then it will
- have b_count == 2. If we now will simply wait in while ( (*pp_s_new_bh)->b_count > 1 ) we get
- into an endless loop, as nobody will release this buffer and the current process holds buffer
- twice. That is why we do decrement_counters_in_path(p_s_path) before waiting until b_count
- becomes 1. (it there were other processes holding node A, then eventually we will get a
- moment, when all of them released a buffer). */
- if ( atomic_read (&((*pp_s_new_bh)->b_count)) > 1 ) {
- decrement_counters_in_path(p_s_path);
- n_ret_value |= SCHEDULE_OCCURRED;
- }
-
- while ( atomic_read (&((*pp_s_new_bh)->b_count)) > 1 ) {
-
-#ifdef REISERFS_INFO
- printk("get_new_buffer() calls schedule to decrement b_count\n");
-#endif
-
-#ifdef CONFIG_REISERFS_CHECK
- if ( ! (++repeat_counter % 10000) )
- printk("get_new_buffer(%u): counter(%d) too big", current->pid, repeat_counter);
-#endif
- yield();
- }
-
-#ifdef CONFIG_REISERFS_CHECK
- if ( buffer_dirty(*pp_s_new_bh) || (*pp_s_new_bh)->b_dev == NODEV ) {
- print_buffer_head(*pp_s_new_bh,"get_new_buffer");
- reiserfs_panic(p_s_sb, "PAP-14090: get_new_buffer: invalid uptodate buffer %b for the new block(case 2)", *pp_s_new_bh);
- }
-#endif
-
- }
- else {
- ;
-
- RFALSE( atomic_read (&((*pp_s_new_bh)->b_count)) != 1,
- "PAP-14100: not uptodate buffer %b for the new block has b_count more than one",
- *pp_s_new_bh);
-
- }
- return (n_ret_value | n_repeat);
-}
-
-
-/* returns the block number of the last unformatted node, assumes p_s_key_to_search.k_offset is a byte in the tail of
- the file, Useful for when you want to append to a file, and convert a direct item into an unformatted node near the
- last unformatted node of the file. Putting the unformatted node near the direct item is potentially very bad to do.
- If there is no unformatted node in the file, then we return the block number of the direct item. */
-/* The function is NOT SCHEDULE-SAFE! */
-inline int get_last_unformatted_node_blocknr_of_file( struct key * p_s_key_to_search, struct super_block * p_s_sb,
- struct buffer_head * p_s_bh
- struct path * p_unf_search_path, struct inode * p_s_inode)
-
-{
- struct key unf_key_to_search;
- struct item_head * p_s_ih;
- int n_pos_in_item;
- struct buffer_head * p_indirect_item_bh;
-
- copy_key(&unf_key_to_search,p_s_key_to_search);
- unf_key_to_search.k_uniqueness = TYPE_INDIRECT;
- unf_key_to_search.k_offset = REISERFS_I(p_s_inode)->i_first_direct_byte - 1;
-
- /* p_s_key_to_search->k_offset - MAX_ITEM_LEN(p_s_sb->s_blocksize); */
- if (search_for_position_by_key (p_s_sb, &unf_key_to_search, p_unf_search_path, &n_pos_in_item) == POSITION_FOUND)
- {
- p_s_ih = B_N_PITEM_HEAD(p_indirect_item_bh = PATH_PLAST_BUFFER(p_unf_search_path), PATH_LAST_POSITION(p_unf_search_path));
- return (B_I_POS_UNFM_POINTER(p_indirect_item_bh, p_s_ih, n_pos_in_item));
- }
- /* else */
- printk("reiser-1800: search for unformatted node failed, p_s_key_to_search->k_offset = %u, unf_key_to_search.k_offset = %u, MAX_ITEM_LEN(p_s_sb->s_blocksize) = %ld, debug this\n", p_s_key_to_search->k_offset, unf_key_to_search.k_offset, MAX_ITEM_LEN(p_s_sb->s_blocksize) );
- print_buffer_head(PATH_PLAST_BUFFER(p_unf_search_path), "the buffer holding the item before the key we failed to find");
- print_block_head(PATH_PLAST_BUFFER(p_unf_search_path), "the block head");
- return 0; /* keeps the compiler quiet */
-}
-
-
- /* hasn't been out of disk space tested */
-/* The function is NOT SCHEDULE-SAFE! */
-static int get_buffer_near_last_unf ( struct super_block * p_s_sb, struct key * p_s_key_to_search,
- struct inode * p_s_inode, struct buffer_head * p_s_bh,
- struct buffer_head ** pp_s_un_bh, struct path * p_s_search_path)
-{
- int unf_blocknr = 0, /* blocknr from which we start search for a free block for an unformatted node, if 0
- then we didn't find an unformatted node though we might have found a file hole */
- n_repeat = CARRY_ON;
- struct key unf_key_to_search;
- struct path unf_search_path;
-
- copy_key(&unf_key_to_search,p_s_key_to_search);
- unf_key_to_search.k_uniqueness = TYPE_INDIRECT;
-
- if (
- (REISERFS_I(p_s_inode)->i_first_direct_byte > 4095) /* i_first_direct_byte gets used for all sorts of
- crap other than what the name indicates, thus
- testing to see if it is 0 is not enough */
- && (REISERFS_I(p_s_inode)->i_first_direct_byte < MAX_KEY_OFFSET) /* if there is no direct item then
- i_first_direct_byte = MAX_KEY_OFFSET */
- )
- {
- /* actually, we don't want the last unformatted node, we want the last unformatted node
- which is before the current file offset */
- unf_key_to_search.k_offset = ((REISERFS_I(p_s_inode)->i_first_direct_byte -1) < unf_key_to_search.k_offset) ? REISERFS_I(p_s_inode)->i_first_direct_byte -1 : unf_key_to_search.k_offset;
-
- while (unf_key_to_search.k_offset > -1)
- {
- /* This is our poorly documented way of initializing paths. -Hans */
- init_path (&unf_search_path);
- /* get the blocknr from which we start the search for a free block. */
- unf_blocknr = get_last_unformatted_node_blocknr_of_file( p_s_key_to_search, /* assumes this points to the file tail */
- p_s_sb, /* lets us figure out the block size */
- p_s_bh, /* if there is no unformatted node in the file,
- then it returns p_s_bh->b_blocknr */
- &unf_search_path,
- p_s_inode
- );
-/* printk("in while loop: unf_blocknr = %d, *pp_s_un_bh = %p\n", unf_blocknr, *pp_s_un_bh); */
- if (unf_blocknr)
- break;
- else /* release the path and search again, this could be really slow for huge
- holes.....better to spend the coding time adding compression though.... -Hans */
- {
- /* Vladimir, is it a problem that I don't brelse these buffers ?-Hans */
- decrement_counters_in_path(&unf_search_path);
- unf_key_to_search.k_offset -= 4096;
- }
- }
- if (unf_blocknr) {
- n_repeat |= get_new_buffer_near_blocknr(p_s_sb, unf_blocknr, pp_s_un_bh, p_s_search_path);
- }
- else { /* all unformatted nodes are holes */
- n_repeat |= get_new_buffer_near_blocknr(p_s_sb, p_s_bh->b_blocknr, pp_s_un_bh, p_s_search_path);
- }
- }
- else { /* file has no unformatted nodes */
- n_repeat |= get_new_buffer_near_blocknr(p_s_sb, p_s_bh->b_blocknr, pp_s_un_bh, p_s_search_path);
-/* printk("in else: unf_blocknr = %d, *pp_s_un_bh = %p\n", unf_blocknr, *pp_s_un_bh); */
-/* print_path (0, p_s_search_path); */
- }
-
- return n_repeat;
-}
-
-#endif /* NEW_GET_NEW_BUFFER */
-
-
-#ifdef OLD_GET_NEW_BUFFER
-
-/* The function is NOT SCHEDULE-SAFE! */
-int get_new_buffer(
- struct reiserfs_transaction_handle *th,
- struct buffer_head * p_s_bh,
- struct buffer_head ** pp_s_new_bh,
- struct path * p_s_path
- ) {
- unsigned long n_new_blocknumber = 0;
- int n_repeat;
- struct super_block * p_s_sb = th->t_super;
-
- if ( (n_repeat = reiserfs_new_unf_blocknrs (th, &n_new_blocknumber, p_s_bh->b_blocknr)) == NO_DISK_SPACE )
- return NO_DISK_SPACE;
-
- *pp_s_new_bh = reiserfs_getblk(p_s_sb, n_new_blocknumber);
- if (atomic_read (&(*pp_s_new_bh)->b_count) > 1) {
- /* Free path buffers to prevent deadlock which can occur in the
- situation like : this process holds p_s_path; Block
- (*pp_s_new_bh)->b_blocknr is on the path p_s_path, but it is
- not necessary, that *pp_s_new_bh is in the tree; process 2
- could remove it from the tree and freed block
- (*pp_s_new_bh)->b_blocknr. Reiserfs_new_blocknrs in above
- returns block (*pp_s_new_bh)->b_blocknr. Reiserfs_getblk gets
- buffer for it, and it has b_count > 1. If we now will simply
- wait in while ( (*pp_s_new_bh)->b_count > 1 ) we get into an
- endless loop, as nobody will release this buffer and the
- current process holds buffer twice. That is why we do
- decrement_counters_in_path(p_s_path) before waiting until
- b_count becomes 1. (it there were other processes holding node
- pp_s_new_bh, then eventually we will get a moment, when all of
- them released a buffer). */
- decrement_counters_in_path(p_s_path);
- wait_buffer_until_released (*pp_s_new_bh);
- n_repeat |= SCHEDULE_OCCURRED;
- }
-
- RFALSE( atomic_read (&((*pp_s_new_bh)->b_count)) != 1 ||
- buffer_dirty (*pp_s_new_bh),
- "PAP-14100: not free or dirty buffer %b for the new block",
- *pp_s_new_bh);
-
- return n_repeat;
-}
-
-#endif /* OLD_GET_NEW_BUFFER */
-
-
-#ifdef GET_MANY_BLOCKNRS
- /* code not yet functional */
-get_next_blocknr (
- unsigned long * p_blocknr_array, /* we get a whole bunch of blocknrs all at once for
- the write. This is better than getting them one at
- a time. */
- unsigned long ** p_blocknr_index, /* pointer to current offset into the array. */
- unsigned long blocknr_array_length
-)
-{
- unsigned long return_value;
-
- if (*p_blocknr_index < p_blocknr_array + blocknr_array_length) {
- return_value = **p_blocknr_index;
- **p_blocknr_index = 0;
- *p_blocknr_index++;
- return (return_value);
- }
- else
- {
- kfree (p_blocknr_array);
- }
-}
-#endif /* GET_MANY_BLOCKNRS */
-
fsync: reiserfs_dir_fsync,
};
-/*
- * directories can handle most operations...
- */
-struct inode_operations reiserfs_dir_inode_operations = {
- //&reiserfs_dir_operations, /* default_file_ops */
- create: reiserfs_create,
- lookup: reiserfs_lookup,
- link: reiserfs_link,
- unlink: reiserfs_unlink,
- symlink: reiserfs_symlink,
- mkdir: reiserfs_mkdir,
- rmdir: reiserfs_rmdir,
- mknod: reiserfs_mknod,
- rename: reiserfs_rename,
-};
-
int reiserfs_dir_fsync(struct file *filp, struct dentry *dentry, int datasync) {
lock_kernel();
reiserfs_commit_for_inode(dentry->d_inode) ;
}
/* directory continues in the right neighboring block */
- set_cpu_key_k_offset (&pos_key, le_key_k_offset (ITEM_VERSION_1, rkey));
+ set_cpu_key_k_offset (&pos_key, le_key_k_offset (KEY_FORMAT_3_5, rkey));
} /* while */
reiserfs_check_path(&path_to_entry) ;
return 0;
}
+
+/* compose directory item containing "." and ".." entries (entries are
+ not aligned to 4 byte boundary) */
+/* the last four params are LE */
+void make_empty_dir_item_v1 (char * body, __u32 dirid, __u32 objid,
+ __u32 par_dirid, __u32 par_objid)
+{
+ struct reiserfs_de_head * deh;
+
+ memset (body, 0, EMPTY_DIR_SIZE_V1);
+ deh = (struct reiserfs_de_head *)body;
+
+ /* direntry header of "." */
+ put_deh_offset( &(deh[0]), DOT_OFFSET );
+ /* these two are from make_le_item_head, and are are LE */
+ deh[0].deh_dir_id = dirid;
+ deh[0].deh_objectid = objid;
+ deh[0].deh_state = 0; /* Endian safe if 0 */
+ put_deh_location( &(deh[0]), EMPTY_DIR_SIZE_V1 - strlen( "." ));
+ mark_de_visible(&(deh[0]));
+
+ /* direntry header of ".." */
+ put_deh_offset( &(deh[1]), DOT_DOT_OFFSET);
+ /* key of ".." for the root directory */
+ /* these two are from the inode, and are are LE */
+ deh[1].deh_dir_id = par_dirid;
+ deh[1].deh_objectid = par_objid;
+ deh[1].deh_state = 0; /* Endian safe if 0 */
+ put_deh_location( &(deh[1]), deh_location( &(deh[0]) ) - strlen( ".." ) );
+ mark_de_visible(&(deh[1]));
+
+ /* copy ".." and "." */
+ memcpy (body + deh_location( &(deh[0]) ), ".", 1);
+ memcpy (body + deh_location( &(deh[1]) ), "..", 2);
+}
+
+/* compose directory item containing "." and ".." entries */
+void make_empty_dir_item (char * body, __u32 dirid, __u32 objid,
+ __u32 par_dirid, __u32 par_objid)
+{
+ struct reiserfs_de_head * deh;
+
+ memset (body, 0, EMPTY_DIR_SIZE);
+ deh = (struct reiserfs_de_head *)body;
+
+ /* direntry header of "." */
+ put_deh_offset( &(deh[0]), DOT_OFFSET );
+ /* these two are from make_le_item_head, and are are LE */
+ deh[0].deh_dir_id = dirid;
+ deh[0].deh_objectid = objid;
+ deh[0].deh_state = 0; /* Endian safe if 0 */
+ put_deh_location( &(deh[0]), EMPTY_DIR_SIZE - ROUND_UP( strlen( "." ) ) );
+ mark_de_visible(&(deh[0]));
+
+ /* direntry header of ".." */
+ put_deh_offset( &(deh[1]), DOT_DOT_OFFSET );
+ /* key of ".." for the root directory */
+ /* these two are from the inode, and are are LE */
+ deh[1].deh_dir_id = par_dirid;
+ deh[1].deh_objectid = par_objid;
+ deh[1].deh_state = 0; /* Endian safe if 0 */
+ put_deh_location( &(deh[1]), deh_location( &(deh[0])) - ROUND_UP( strlen( ".." ) ) );
+ mark_de_visible(&(deh[1]));
+
+ /* copy ".." and "." */
+ memcpy (body + deh_location( &(deh[0]) ), ".", 1);
+ memcpy (body + deh_location( &(deh[1]) ), "..", 2);
+}
int pos_in_item;
int zeros_num;
-#if 0
- if (tb->insert_size [0] % 4) {
- reiserfs_panic (tb->tb_sb, "balance_leaf: wrong insert_size %d",
- tb->insert_size [0]);
- }
-#endif
-
PROC_INFO_INC( tb -> tb_sb, balance_at[ 0 ] );
/* Make balance in case insert_size[0] < 0 */
/* fast out for when nothing needs to be done */
if ((atomic_read(&inode->i_count) > 1 ||
- REISERFS_I(inode)->i_pack_on_close ||
+ !(REISERFS_I(inode)->i_flags & i_pack_on_close_mask) ||
!tail_has_to_be_packed(inode)) &&
REISERFS_I(inode)->i_prealloc_count <= 0) {
return 0;
journal_end(&th, inode->i_sb, JOURNAL_PER_BALANCE_CNT * 3) ;
if (atomic_read(&inode->i_count) <= 1 &&
- REISERFS_I(inode)->i_pack_on_close &&
+ (REISERFS_I(inode)->i_flags & i_pack_on_close_mask) &&
tail_has_to_be_packed (inode)) {
/* if regular file is released by last holder and it has been
appended (we append by unformatted node only) or its direct
/* version 2 items will be caught by the s_maxbytes check
** done for us in vmtruncate
*/
- if (inode_items_version(inode) == ITEM_VERSION_1 &&
+ if (get_inode_item_key_version(inode) == KEY_FORMAT_3_5 &&
attr->ia_size > MAX_NON_LFS)
return -EFBIG ;
+
+ /* fill in hole pointers in the expanding truncate case. */
+ if (attr->ia_size > inode->i_size) {
+ error = generic_cont_expand(inode, attr->ia_size) ;
+ if (REISERFS_I(inode)->i_prealloc_count > 0) {
+ struct reiserfs_transaction_handle th ;
+ /* we're changing at most 2 bitmaps, inode + super */
+ journal_begin(&th, inode->i_sb, 4) ;
+ reiserfs_discard_prealloc (&th, inode);
+ journal_end(&th, inode->i_sb, 4) ;
+ }
+ if (error)
+ return error ;
+ }
}
+ if ((((attr->ia_valid & ATTR_UID) && (attr->ia_uid & ~0xffff)) ||
+ ((attr->ia_valid & ATTR_GID) && (attr->ia_gid & ~0xffff))) &&
+ (get_inode_sd_version (inode) == STAT_DATA_V1))
+ /* stat data of format v3.5 has 16 bit uid and gid */
+ return -EINVAL;
+
error = inode_change_ok(inode, attr) ;
if (!error)
inode_setattr(inode, attr) ;
RFALSE( ! *p_n_blocknr,
"PAP-8135: reiserfs_new_blocknrs failed when got new blocks");
- p_s_new_bh = reiserfs_getblk(p_s_sb, *p_n_blocknr);
+ p_s_new_bh = sb_getblk(p_s_sb, *p_n_blocknr);
if (atomic_read (&(p_s_new_bh->b_count)) > 1) {
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
/*
// entry would eat 2 byte of virtual node space
return sb->s_blocksize;
-#if 0
- size = sizeof (struct virtual_node) + sizeof (struct virtual_item);
- ih = B_N_PITEM_HEAD (bh, 0);
- nr_items = B_NR_ITEMS (bh);
- for (i = 0; i < nr_items; i ++, ih ++) {
- /* each item occupies some space in virtual node */
- size += sizeof (struct virtual_item);
- if (is_direntry_le_ih (ih))
- /* each entry and new one occupeis 2 byte in the virtual node */
- size += (ih_entry_count(ih) + 1) * sizeof( __u16 );
- }
-
- /* 1 bit for each bitmap block to note whether bitmap block was
- dirtied in the operation */
- /* size += (SB_BMAP_NR (sb) * 2 / 8 + 4);*/
- return size;
-#endif
}
"at the beginning of fix_nodes or not in tree (mode %c)", p_s_tbS0, p_s_tbS0, n_op_mode);
}
- // FIXME: new items have to be of 8 byte multiples. Including new
- // directory items those look like old ones
- /*
- if (p_s_tb->insert_size[0] % 8)
- reiserfs_panic (p_s_tb->tb_sb, "vs-: fix_nodes: incorrect insert_size %d, "
- "mode %c",
- p_s_tb->insert_size[0], n_op_mode);
- */
-
/* Check parameters. */
switch (n_op_mode) {
case M_INSERT:
#define GET_BLOCK_READ_DIRECT 4 /* read the tail if indirect item not found */
#define GET_BLOCK_NO_ISEM 8 /* i_sem is not held, don't preallocate */
+static int reiserfs_get_block (struct inode * inode, sector_t block,
+ struct buffer_head * bh_result, int create);
//
// initially this function was derived from minix or ext2's analog and
// evolved as the prototype did
reiserfs_delete_object (&th, inode);
pop_journal_writer(windex) ;
- reiserfs_release_objectid (&th, inode->i_ino);
journal_end(&th, inode->i_sb, jbegin_count) ;
- up (&inode->i_sem);
+ up (&inode->i_sem);
+
+ /* all items of file are deleted, so we can remove "save" link */
+ remove_save_link (inode, 0/* not truncate */);
} else {
/* no object items are in the tree */
;
void make_cpu_key (struct cpu_key * key, struct inode * inode, loff_t offset,
int type, int length )
{
- _make_cpu_key (key, inode_items_version (inode), le32_to_cpu (INODE_PKEY (inode)->k_dir_id),
- le32_to_cpu (INODE_PKEY (inode)->k_objectid),
- offset, type, length);
+ _make_cpu_key (key, get_inode_item_key_version (inode), le32_to_cpu (INODE_PKEY (inode)->k_dir_id),
+ le32_to_cpu (INODE_PKEY (inode)->k_objectid),
+ offset, type, length);
}
// files which were created in the earlier version can not be longer,
// than 2 gb
//
-int file_capable (struct inode * inode, long block)
+static int file_capable (struct inode * inode, long block)
{
- if (inode_items_version (inode) != ITEM_VERSION_1 || // it is new file.
+ if (get_inode_item_key_version (inode) != KEY_FORMAT_3_5 || // it is new file.
block < (1 << (31 - inode->i_sb->s_blocksize_bits))) // old file, but 'block' is inside of 2gb
return 1;
pathrelse (&path);
if (p)
kunmap(bh_result->b_page) ;
- if ((args & GET_BLOCK_NO_HOLE)) {
+ // We do not return -ENOENT if there is a hole but page is uptodate, because it means
+ // That there is some MMAPED data associated with it that is yet to be written to disk.
+ if ((args & GET_BLOCK_NO_HOLE) && !Page_Uptodate(bh_result->b_page) ) {
return -ENOENT ;
}
return 0 ;
ret = 0 ;
if (blocknr) {
map_bh(bh_result, inode->i_sb, blocknr);
- } else if ((args & GET_BLOCK_NO_HOLE)) {
+ } else
+ // We do not return -ENOENT if there is a hole but page is uptodate, because it means
+ // That there is some MMAPED data associated with it that is yet to be written to disk.
+ if ((args & GET_BLOCK_NO_HOLE) && !Page_Uptodate(bh_result->b_page) ) {
ret = -ENOENT ;
- }
+ }
+
pathrelse (&path);
if (p)
kunmap(bh_result->b_page) ;
*/
if (buffer_uptodate(bh_result)) {
goto finished ;
+ } else
+ /*
+ ** grab_tail_page can trigger calls to reiserfs_get_block on up to date
+ ** pages without any buffers. If the page is up to date, we don't want
+ ** read old data off disk. Set the up to date bit on the buffer instead
+ ** and jump to the end
+ */
+ if (Page_Uptodate(bh_result->b_page)) {
+ mark_buffer_uptodate(bh_result, 1);
+ goto finished ;
}
// read file tail into part of page
/* bad.... */
lock_kernel() ;
th.t_trans_id = 0 ;
- version = inode_items_version (inode);
+ version = get_inode_item_key_version (inode);
if (block < 0) {
unlock_kernel();
return ret;
}
- REISERFS_I(inode)->i_pack_on_close = 1 ;
+ REISERFS_I(inode)->i_flags |= i_pack_on_close_mask ;
windex = push_journal_writer("reiserfs_get_block") ;
}
if (retval == POSITION_FOUND) {
reiserfs_warning ("vs-825: reiserfs_get_block: "
- "%k should not be found\n", &key);
+ "%K should not be found\n", &key);
retval = -EEXIST;
if (allocated_block_nr)
reiserfs_free_block (&th, allocated_block_nr);
struct stat_data_v1 * sd = (struct stat_data_v1 *)B_I_PITEM (bh, ih);
unsigned long blocks;
- inode_items_version (inode) = ITEM_VERSION_1;
+ set_inode_item_key_version (inode, KEY_FORMAT_3_5);
+ set_inode_sd_version (inode, STAT_DATA_V1);
inode->i_mode = sd_v1_mode(sd);
inode->i_nlink = sd_v1_nlink(sd);
inode->i_uid = sd_v1_uid(sd);
inode->i_generation = sd_v2_generation(sd);
if (S_ISDIR (inode->i_mode) || S_ISLNK (inode->i_mode))
- inode_items_version (inode) = ITEM_VERSION_1;
+ set_inode_item_key_version (inode, KEY_FORMAT_3_5);
else
- inode_items_version (inode) = ITEM_VERSION_2;
+ set_inode_item_key_version (inode, KEY_FORMAT_3_6);
REISERFS_I(inode)->i_first_direct_byte = 0;
}
- REISERFS_I(inode)->i_pack_on_close = 0;
+ REISERFS_I(inode)->i_flags = 0;
REISERFS_I(inode)->i_prealloc_block = 0;
REISERFS_I(inode)->i_prealloc_count = 0;
REISERFS_I(inode)->i_trans_id = 0;
REISERFS_I(inode)->i_trans_index = 0;
/* nopack = 0, by default */
- REISERFS_I(inode)->nopack = 0;
+ REISERFS_I(inode)->i_flags &= ~i_nopack_mask;
pathrelse (path);
if (S_ISREG (inode->i_mode)) {
/* set version 1, version 2 could be used too, because stat data
key is the same in both versions */
- key.version = ITEM_VERSION_1;
+ key.version = KEY_FORMAT_3_5;
key.on_disk_key.k_dir_id = dirino;
key.on_disk_key.k_objectid = inode->i_ino;
key.on_disk_key.u.k_offset_v1.k_offset = SD_OFFSET;
}
init_inode (inode, &path_to_sd);
+
+ /* It is possible that knfsd is trying to access inode of a file
+ that is being removed from the disk by some other thread. As we
+ update sd on unlink all that is required is to check for nlink
+ here. This bug was first found by Sizif when debugging
+ SquidNG/Butterfly, forgotten, and found again after Philippe
+ Gramoulle <philippe.gramoulle@mmania.com> reproduced it.
+
+ More logical fix would require changes in fs/inode.c:iput() to
+ remove inode from hash-table _after_ fs cleaned disk stuff up and
+ in iget() to return NULL if I_FREEING inode is found in
+ hash-table. */
+ /* Currently there is one place where it's ok to meet inode with
+ nlink==0: processing of open-unlinked and half-truncated files
+ during mount (fs/reiserfs/super.c:finish_unfinished()). */
+ if( ( inode -> i_nlink == 0 ) &&
+ ! inode -> i_sb -> u.reiserfs_sb.s_is_unlinked_ok ) {
+ reiserfs_warning( "vs-13075: reiserfs_read_inode2: "
+ "dead inode read from disk %K. "
+ "This is likely to be race with knfsd. Ignore\n",
+ &key );
+ make_bad_inode( inode );
+ }
+
reiserfs_check_path(&path_to_sd) ; /* init inode should be relsing */
}
}
struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, __u32 *data,
- int len, int fhtype, int parent) {
+ int len, int fhtype, int parent) {
struct cpu_key key ;
struct inode *inode = NULL ;
struct list_head *lp;
}
}
-void reiserfs_dirty_inode (struct inode * inode) {
- struct reiserfs_transaction_handle th ;
-
- if (inode->i_sb->s_flags & MS_RDONLY) {
- reiserfs_warning("clm-6006: writing inode %lu on readonly FS\n",
- inode->i_ino) ;
- return ;
- }
- lock_kernel() ;
-
- /* this is really only used for atime updates, so they don't have
- ** to be included in O_SYNC or fsync
- */
- journal_begin(&th, inode->i_sb, 1) ;
- reiserfs_update_sd (&th, inode);
- journal_end(&th, inode->i_sb, 1) ;
- unlock_kernel() ;
-}
-
-
/* FIXME: no need any more. right? */
int reiserfs_sync_inode (struct reiserfs_transaction_handle *th, struct inode * inode)
{
struct cpu_key key;
int retval;
- _make_cpu_key (&key, ITEM_VERSION_1, le32_to_cpu (ih->ih_key.k_dir_id),
+ _make_cpu_key (&key, KEY_FORMAT_3_5, le32_to_cpu (ih->ih_key.k_dir_id),
le32_to_cpu (ih->ih_key.k_objectid), DOT_OFFSET, TYPE_DIRENTRY, 3/*key length*/);
/* compose item head for new item. Directories consist of items of
old type (ITEM_VERSION_1). Do not set key (second arg is 0), it
is done by reiserfs_new_inode */
if (old_format_only (sb)) {
- make_le_item_head (ih, 0, ITEM_VERSION_1, DOT_OFFSET, TYPE_DIRENTRY, EMPTY_DIR_SIZE_V1, 2);
+ make_le_item_head (ih, 0, KEY_FORMAT_3_5, DOT_OFFSET, TYPE_DIRENTRY, EMPTY_DIR_SIZE_V1, 2);
make_empty_dir_item_v1 (body, ih->ih_key.k_dir_id, ih->ih_key.k_objectid,
INODE_PKEY (dir)->k_dir_id,
INODE_PKEY (dir)->k_objectid );
} else {
- make_le_item_head (ih, 0, ITEM_VERSION_1, DOT_OFFSET, TYPE_DIRENTRY, EMPTY_DIR_SIZE, 2);
+ make_le_item_head (ih, 0, KEY_FORMAT_3_5, DOT_OFFSET, TYPE_DIRENTRY, EMPTY_DIR_SIZE, 2);
make_empty_dir_item (body, ih->ih_key.k_dir_id, ih->ih_key.k_objectid,
INODE_PKEY (dir)->k_dir_id,
struct cpu_key key;
int retval;
- _make_cpu_key (&key, ITEM_VERSION_1,
+ _make_cpu_key (&key, KEY_FORMAT_3_5,
le32_to_cpu (ih->ih_key.k_dir_id),
le32_to_cpu (ih->ih_key.k_objectid),
1, TYPE_DIRECT, 3/*key length*/);
- make_le_item_head (ih, 0, ITEM_VERSION_1, 1, TYPE_DIRECT, item_len, 0/*free_space*/);
+ make_le_item_head (ih, 0, KEY_FORMAT_3_5, 1, TYPE_DIRECT, item_len, 0/*free_space*/);
/* look for place in the tree for new item */
retval = search_item (sb, &key, path);
inode->i_generation = ++event;
#endif
if (old_format_only (sb))
- make_le_item_head (&ih, 0, ITEM_VERSION_1, SD_OFFSET, TYPE_STAT_DATA, SD_V1_SIZE, MAX_US_INT);
+ make_le_item_head (&ih, 0, KEY_FORMAT_3_5, SD_OFFSET, TYPE_STAT_DATA, SD_V1_SIZE, MAX_US_INT);
else
- make_le_item_head (&ih, 0, ITEM_VERSION_2, SD_OFFSET, TYPE_STAT_DATA, SD_SIZE, MAX_US_INT);
+ make_le_item_head (&ih, 0, KEY_FORMAT_3_6, SD_OFFSET, TYPE_STAT_DATA, SD_SIZE, MAX_US_INT);
/* key to search for correct place for new stat data */
- _make_cpu_key (&key, ITEM_VERSION_2, le32_to_cpu (ih.ih_key.k_dir_id),
+ _make_cpu_key (&key, KEY_FORMAT_3_6, le32_to_cpu (ih.ih_key.k_dir_id),
le32_to_cpu (ih.ih_key.k_objectid), SD_OFFSET, TYPE_STAT_DATA, 3/*key length*/);
/* find proper place for inserting of stat data */
REISERFS_I(inode)->i_first_direct_byte = S_ISLNK(mode) ? 1 :
U32_MAX/*NO_BYTES_IN_DIRECT_ITEM*/;
- REISERFS_I(inode)->i_pack_on_close = 0;
+ REISERFS_I(inode)->i_flags = 0;
REISERFS_I(inode)->i_prealloc_block = 0;
REISERFS_I(inode)->i_prealloc_count = 0;
- REISERFS_I(inode)->nopack = 0;
REISERFS_I(inode)->i_trans_id = 0;
REISERFS_I(inode)->i_trans_index = 0;
- if (old_format_only (sb))
+ if (old_format_only (sb)) {
+ if (inode->i_uid & ~0xffff || inode->i_gid & ~0xffff) {
+ pathrelse (&path_to_key);
+ /* i_uid or i_gid is too big to be stored in stat data v3.5 */
+ iput (inode);
+ *err = -EINVAL;
+ return NULL;
+ }
inode2sd_v1 (&sd, inode);
- else
+ } else
inode2sd (&sd, inode);
// these do not go to on-disk stat data
// format, other new objects will consist of new items)
memcpy (INODE_PKEY (inode), &(ih.ih_key), KEY_SIZE);
if (old_format_only (sb) || S_ISDIR(mode) || S_ISLNK(mode))
- inode_items_version (inode) = ITEM_VERSION_1;
+ set_inode_item_key_version (inode, KEY_FORMAT_3_5);
else
- inode_items_version (inode) = ITEM_VERSION_2;
-
+ set_inode_item_key_version (inode, KEY_FORMAT_3_6);
+ if (old_format_only (sb))
+ set_inode_sd_version (inode, STAT_DATA_V1);
+ else
+ set_inode_sd_version (inode, STAT_DATA_V2);
+
/* insert the stat data into the tree */
retval = reiserfs_insert_item (th, &path_to_key, &key, &ih, (char *)(&sd));
if (retval) {
** because the truncate might pack the item anyway
** (it will unmap bh if it packs).
*/
- journal_begin(&th, p_s_inode->i_sb, JOURNAL_PER_BALANCE_CNT * 2 ) ;
+ /* it is enough to reserve space in transaction for 2 balancings:
+ one for "save" link adding and another for the first
+ cut_from_item. 1 is for update_sd */
+ journal_begin(&th, p_s_inode->i_sb, JOURNAL_PER_BALANCE_CNT * 2 + 1 ) ;
reiserfs_update_inode_transaction(p_s_inode) ;
windex = push_journal_writer("reiserfs_vfs_truncate_file") ;
+ if (update_timestamps)
+ /* we are doing real truncate: if the system crashes before the last
+ transaction of truncating gets committed - on reboot the file
+ either appears truncated properly or not truncated at all */
+ add_save_link (&th, p_s_inode, 1);
reiserfs_do_truncate (&th, p_s_inode, page, update_timestamps) ;
pop_journal_writer(windex) ;
- journal_end(&th, p_s_inode->i_sb, JOURNAL_PER_BALANCE_CNT * 2 ) ;
+ journal_end(&th, p_s_inode->i_sb, JOURNAL_PER_BALANCE_CNT * 2 + 1 ) ;
+
+ if (update_timestamps)
+ remove_save_link (p_s_inode, 1/* truncate */);
if (page) {
length = offset & (blocksize - 1) ;
int bytes_copied = 0 ;
int copy_size ;
+ kmap(bh_result->b_page) ;
start_over:
lock_kernel() ;
journal_begin(&th, inode->i_sb, jbegin_count) ;
/* this is where we fill in holes in the file. */
if (use_get_block) {
- kmap(bh_result->b_page) ;
retval = reiserfs_get_block(inode, block, bh_result,
GET_BLOCK_CREATE | GET_BLOCK_NO_ISEM) ;
- kunmap(bh_result->b_page) ;
if (!retval) {
if (!buffer_mapped(bh_result) || bh_result->b_blocknr == 0) {
/* get_block failed to find a mapped unformatted node. */
}
}
}
+ kunmap(bh_result->b_page) ;
return retval ;
}
/* we test for O_SYNC here so we can commit the transaction
** for any packed tails the file might have had
*/
- if (f->f_flags & O_SYNC) {
+ if (f && (f->f_flags & O_SYNC)) {
lock_kernel() ;
reiserfs_commit_for_inode(inode) ;
unlock_kernel();
return -EINVAL ;
}
/* ioctl already done */
- if (REISERFS_I(inode)->nopack) {
+ if (REISERFS_I(inode)->i_flags & i_nopack_mask) {
return 0 ;
}
lock_kernel();
write_from = inode->i_size & (blocksize - 1) ;
/* if we are on a block boundary, we are already unpacked. */
if ( write_from == 0) {
- REISERFS_I(inode)->nopack = 1;
+ REISERFS_I(inode)->i_flags |= i_nopack_mask;
goto out ;
}
/* conversion can change page contents, must flush */
flush_dcache_page(page) ;
- REISERFS_I(inode)->nopack = 1;
+ REISERFS_I(inode)->i_flags |= i_nopack_mask;
kunmap(page) ; /* mapped by prepare_write */
out_unlock:
static int flush_journal_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall) ;
static int flush_commit_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall) ;
static int can_dirty(struct reiserfs_journal_cnode *cn) ;
+static int journal_join(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks);
+static int release_journal_dev( struct super_block *super,
+ struct reiserfs_journal *journal );
static void init_journal_hash(struct super_block *p_s_sb) {
memset(SB_JOURNAL(p_s_sb)->j_hash_table, 0, JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *)) ;
atomic_set(&(jl->j_commit_flushing), 1) ;
- if (jl->j_len > JOURNAL_TRANS_MAX) {
+ if (jl->j_len > SB_JOURNAL_TRANS_MAX(s)) {
reiserfs_panic(s, "journal-512: flush_commit_list: length is %lu, list number %d\n", jl->j_len, jl - SB_JOURNAL_LIST(s)) ;
return 0 ;
}
retry:
count = 0 ;
for (i = 0 ; atomic_read(&(jl->j_commit_left)) > 1 && i < (jl->j_len + 1) ; i++) { /* everything but commit_bh */
- bn = reiserfs_get_journal_block(s) + (jl->j_start+i) % JOURNAL_BLOCK_COUNT;
- tbh = sb_get_hash_table(s, bn) ;
+ bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) + (jl->j_start+i) % SB_ONDISK_JOURNAL_SIZE(s);
+ tbh = get_hash_table(SB_JOURNAL_DEV(s), bn, s->s_blocksize) ;
/* kill this sanity check */
if (count > (orig_commit_left + 2)) {
if (count > 0) {
for (i = 0 ; atomic_read(&(jl->j_commit_left)) > 1 &&
i < (jl->j_len + 1) ; i++) { /* everything but commit_bh */
- bn = reiserfs_get_journal_block(s) + (jl->j_start + i) % JOURNAL_BLOCK_COUNT ;
- tbh = sb_get_hash_table(s, bn) ;
+ bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) + (jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s) ;
+ tbh = get_hash_table(SB_JOURNAL_DEV(s), bn, s->s_blocksize) ;
wait_on_buffer(tbh) ;
if (!buffer_uptodate(tbh)) {
atomic_set(&(jl->j_flushing), 1) ;
count = 0 ;
- if (j_len_saved > JOURNAL_TRANS_MAX) {
+ if (j_len_saved > SB_JOURNAL_TRANS_MAX(s)) {
reiserfs_panic(s, "journal-715: flush_journal_list, length is %lu, list number %d\n", j_len_saved, jl - SB_JOURNAL_LIST(s)) ;
atomic_dec(&(jl->j_flushing)) ;
return 0 ;
** being flushed
*/
if (flushall) {
- update_journal_header_block(s, (jl->j_start + jl->j_len + 2) % JOURNAL_BLOCK_COUNT, jl->j_trans_id) ;
+ update_journal_header_block(s, (jl->j_start + jl->j_len + 2) % SB_ONDISK_JOURNAL_SIZE(s), jl->j_trans_id) ;
}
remove_all_from_journal_list(s, jl, 0) ;
jl->j_len = 0 ;
wake_up(&reiserfs_commit_thread_wait) ;
sleep_on(&reiserfs_commit_thread_done) ;
+ release_journal_dev( p_s_sb, SB_JOURNAL( p_s_sb ) );
free_journal_ram(p_s_sb) ;
return 0 ;
struct reiserfs_journal_commit *commit) {
if (le32_to_cpu(commit->j_trans_id) != le32_to_cpu(desc->j_trans_id) ||
le32_to_cpu(commit->j_len) != le32_to_cpu(desc->j_len) ||
- le32_to_cpu(commit->j_len) > JOURNAL_TRANS_MAX ||
+ le32_to_cpu(commit->j_len) > SB_JOURNAL_TRANS_MAX(p_s_sb) ||
le32_to_cpu(commit->j_len) <= 0
) {
return 1 ;
*newest_mount_id) ;
return -1 ;
}
- offset = d_bh->b_blocknr - reiserfs_get_journal_block(p_s_sb) ;
+ offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) ;
/* ok, we have a journal description block, lets see if the transaction was valid */
- c_bh = sb_bread(p_s_sb, reiserfs_get_journal_block(p_s_sb) + ((offset + le32_to_cpu(desc->j_len) + 1) % JOURNAL_BLOCK_COUNT)) ;
+ c_bh = bread(SB_JOURNAL_DEV(p_s_sb), SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
+ ((offset + le32_to_cpu(desc->j_len) + 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb)),
+ p_s_sb->s_blocksize) ;
if (!c_bh)
return 0 ;
commit = (struct reiserfs_journal_commit *)c_bh->b_data ;
reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
"journal_transaction_is_valid, commit offset %ld had bad "
"time %d or length %d\n",
- c_bh->b_blocknr - reiserfs_get_journal_block(p_s_sb),
+ c_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
le32_to_cpu(commit->j_trans_id),
le32_to_cpu(commit->j_len));
brelse(c_bh) ;
brelse(c_bh) ;
reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1006: found valid "
"transaction start offset %lu, len %d id %d\n",
- d_bh->b_blocknr - reiserfs_get_journal_block(p_s_sb),
+ d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
le32_to_cpu(desc->j_len), le32_to_cpu(desc->j_trans_id)) ;
return 1 ;
} else {
unsigned long trans_offset ;
int i;
- d_bh = sb_bread(p_s_sb, cur_dblock) ;
+ d_bh = bread(SB_JOURNAL_DEV(p_s_sb), cur_dblock, p_s_sb->s_blocksize) ;
if (!d_bh)
return 1 ;
desc = (struct reiserfs_journal_desc *)d_bh->b_data ;
- trans_offset = d_bh->b_blocknr - reiserfs_get_journal_block(p_s_sb) ;
+ trans_offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) ;
reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1037: "
"journal_read_transaction, offset %lu, len %d mount_id %d\n",
- d_bh->b_blocknr - reiserfs_get_journal_block(p_s_sb),
+ d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
le32_to_cpu(desc->j_len), le32_to_cpu(desc->j_mount_id)) ;
if (le32_to_cpu(desc->j_trans_id) < oldest_trans_id) {
reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1039: "
"journal_read_trans skipping because %lu is too old\n",
- cur_dblock - reiserfs_get_journal_block(p_s_sb)) ;
+ cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb)) ;
brelse(d_bh) ;
return 1 ;
}
brelse(d_bh) ;
return 1 ;
}
- c_bh = sb_bread(p_s_sb, reiserfs_get_journal_block(p_s_sb) + ((trans_offset + le32_to_cpu(desc->j_len) + 1) % JOURNAL_BLOCK_COUNT)) ;
+ c_bh = bread(SB_JOURNAL_DEV(p_s_sb), SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
+ ((trans_offset + le32_to_cpu(desc->j_len) + 1) %
+ SB_ONDISK_JOURNAL_SIZE(p_s_sb)), p_s_sb->s_blocksize) ;
if (!c_bh) {
brelse(d_bh) ;
return 1 ;
if (journal_compare_desc_commit(p_s_sb, desc, commit)) {
reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal_read_transaction, "
"commit offset %ld had bad time %d or length %d\n",
- c_bh->b_blocknr - reiserfs_get_journal_block(p_s_sb),
+ c_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
le32_to_cpu(commit->j_trans_id), le32_to_cpu(commit->j_len));
brelse(c_bh) ;
brelse(d_bh) ;
}
/* get all the buffer heads */
for(i = 0 ; i < le32_to_cpu(desc->j_len) ; i++) {
- log_blocks[i] = sb_getblk(p_s_sb, reiserfs_get_journal_block(p_s_sb) + (trans_offset + 1 + i) % JOURNAL_BLOCK_COUNT);
+ log_blocks[i] = getblk(SB_JOURNAL_DEV(p_s_sb), SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + (trans_offset + 1 + i) % SB_ONDISK_JOURNAL_SIZE(p_s_sb), p_s_sb->s_blocksize);
if (i < JOURNAL_TRANS_HALF) {
real_blocks[i] = sb_getblk(p_s_sb, le32_to_cpu(desc->j_realblock[i])) ;
} else {
real_blocks[i] = sb_getblk(p_s_sb, le32_to_cpu(commit->j_realblock[i - JOURNAL_TRANS_HALF])) ;
}
- if (real_blocks[i]->b_blocknr >= reiserfs_get_journal_block(p_s_sb) &&
- real_blocks[i]->b_blocknr < (reiserfs_get_journal_block(p_s_sb)+JOURNAL_BLOCK_COUNT)) {
+ /* make sure we don't try to replay onto log or reserved area */
+ if (is_block_in_log_or_reserved_area(p_s_sb, real_blocks[i]->b_blocknr)) {
reiserfs_warning("journal-1204: REPLAY FAILURE fsck required! Trying to replay onto a log block\n") ;
brelse_array(log_blocks, i) ;
brelse_array(real_blocks, i) ;
}
brelse(real_blocks[i]) ;
}
- cur_dblock = reiserfs_get_journal_block(p_s_sb) + ((trans_offset + le32_to_cpu(desc->j_len) + 2) % JOURNAL_BLOCK_COUNT) ;
+ cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + ((trans_offset + le32_to_cpu(desc->j_len) + 2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb)) ;
reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1095: setting journal "
"start to offset %ld\n",
- cur_dblock - reiserfs_get_journal_block(p_s_sb)) ;
+ cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb)) ;
/* init starting values for the first transaction, in case this is the last transaction to be replayed. */
- SB_JOURNAL(p_s_sb)->j_start = cur_dblock - reiserfs_get_journal_block(p_s_sb) ;
+ SB_JOURNAL(p_s_sb)->j_start = cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) ;
SB_JOURNAL(p_s_sb)->j_last_flush_trans_id = trans_id ;
SB_JOURNAL(p_s_sb)->j_trans_id = trans_id + 1;
brelse(c_bh) ;
int continue_replay = 1 ;
int ret ;
- cur_dblock = reiserfs_get_journal_block(p_s_sb) ;
- printk("reiserfs: checking transaction log (device %s) ...\n", p_s_sb->s_id) ;
+ cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) ;
+ printk("reiserfs: checking transaction log (%s) for (%s)\n",
+ bdevname(SB_JOURNAL_DEV(p_s_sb)), p_s_sb->s_id) ;
start = CURRENT_TIME ;
/* step 1, read in the journal header block. Check the transaction it says
** is the first unflushed, and if that transaction is not valid,
** replay is done
*/
- SB_JOURNAL(p_s_sb)->j_header_bh = sb_bread(p_s_sb,
- reiserfs_get_journal_block(p_s_sb) +
- JOURNAL_BLOCK_COUNT) ;
+ SB_JOURNAL(p_s_sb)->j_header_bh = bread (SB_JOURNAL_DEV(p_s_sb),
+ SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
+ SB_ONDISK_JOURNAL_SIZE(p_s_sb),
+ p_s_sb->s_blocksize) ;
if (!SB_JOURNAL(p_s_sb)->j_header_bh) {
return 1 ;
}
jh = (struct reiserfs_journal_header *)(SB_JOURNAL(p_s_sb)->j_header_bh->b_data) ;
if (le32_to_cpu(jh->j_first_unflushed_offset) >= 0 &&
- le32_to_cpu(jh->j_first_unflushed_offset) < JOURNAL_BLOCK_COUNT &&
+ le32_to_cpu(jh->j_first_unflushed_offset) < SB_ONDISK_JOURNAL_SIZE(p_s_sb) &&
le32_to_cpu(jh->j_last_flush_trans_id) > 0) {
- last_flush_start = reiserfs_get_journal_block(p_s_sb) +
+ last_flush_start = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
le32_to_cpu(jh->j_first_unflushed_offset) ;
last_flush_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) ;
reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1153: found in "
** there is nothing more we can do, and it makes no sense to read
** through the whole log.
*/
- d_bh = sb_bread(p_s_sb, reiserfs_get_journal_block(p_s_sb) + le32_to_cpu(jh->j_first_unflushed_offset)) ;
+ d_bh = bread(SB_JOURNAL_DEV(p_s_sb), SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + le32_to_cpu(jh->j_first_unflushed_offset), p_s_sb->s_blocksize) ;
ret = journal_transaction_is_valid(p_s_sb, d_bh, NULL, NULL) ;
if (!ret) {
continue_replay = 0 ;
/* ok, there are transactions that need to be replayed. start with the first log block, find
** all the valid transactions, and pick out the oldest.
*/
- while(continue_replay && cur_dblock < (reiserfs_get_journal_block(p_s_sb) + JOURNAL_BLOCK_COUNT)) {
- d_bh = sb_bread(p_s_sb, cur_dblock) ;
+ while(continue_replay && cur_dblock < (SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + SB_ONDISK_JOURNAL_SIZE(p_s_sb))) {
+ d_bh = bread(SB_JOURNAL_DEV(p_s_sb), cur_dblock, p_s_sb->s_blocksize) ;
ret = journal_transaction_is_valid(p_s_sb, d_bh, &oldest_invalid_trans_id, &newest_mount_id) ;
if (ret == 1) {
desc = (struct reiserfs_journal_desc *)d_bh->b_data ;
newest_mount_id = le32_to_cpu(desc->j_mount_id) ;
reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1179: Setting "
"oldest_start to offset %lu, trans_id %lu\n",
- oldest_start - reiserfs_get_journal_block(p_s_sb),
+ oldest_start - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
oldest_trans_id) ;
} else if (oldest_trans_id > le32_to_cpu(desc->j_trans_id)) {
/* one we just read was older */
oldest_start = d_bh->b_blocknr ;
reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1180: Resetting "
"oldest_start to offset %lu, trans_id %lu\n",
- oldest_start - reiserfs_get_journal_block(p_s_sb),
+ oldest_start - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
oldest_trans_id) ;
}
if (newest_mount_id < le32_to_cpu(desc->j_mount_id)) {
/* step three, starting at the oldest transaction, replay */
if (last_flush_start > 0) {
oldest_start = last_flush_start ;
- oldest_trans_id = last_flush_trans_id ;
+ oldest_trans_id = last_flush_trans_id + 1 ;
}
cur_dblock = oldest_start ;
if (oldest_trans_id) {
reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1206: Starting replay "
"from offset %lu, trans_id %lu\n",
- cur_dblock - reiserfs_get_journal_block(p_s_sb),
+ cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
oldest_trans_id) ;
}
} else if (ret != 0) {
break ;
}
- cur_dblock = reiserfs_get_journal_block(p_s_sb) + SB_JOURNAL(p_s_sb)->j_start ;
+ cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + SB_JOURNAL(p_s_sb)->j_start ;
replay_count++ ;
+ if (cur_dblock == oldest_start)
+ break;
}
if (oldest_trans_id == 0) {
break ;
}
wake_up(&reiserfs_commit_thread_done) ;
- interruptible_sleep_on_timeout(&reiserfs_commit_thread_wait, 5) ;
+ interruptible_sleep_on_timeout(&reiserfs_commit_thread_wait, 5 * HZ) ;
}
unlock_kernel() ;
wake_up(&reiserfs_commit_thread_done) ;
}
}
+static int release_journal_dev( struct super_block *super,
+ struct reiserfs_journal *journal )
+{
+ int result;
+
+ result = 0;
+
+ if( journal -> j_dev_bd != NULL ) {
+ result = blkdev_put( journal -> j_dev_bd, BDEV_FS );
+ journal -> j_dev_bd = NULL;
+ }
+ if( journal -> j_dev_file != NULL ) {
+ result = filp_close( journal -> j_dev_file, NULL );
+ journal -> j_dev_file = NULL;
+ }
+ if( result != 0 ) {
+ reiserfs_warning("sh-457: release_journal_dev: Cannot release journal device: %i", result );
+ }
+ return result;
+}
+
+static int journal_init_dev( struct super_block *super,
+ struct reiserfs_journal *journal,
+ const char *jdev_name )
+{
+ int result;
+ kdev_t jdev;
+
+ result = 0;
+
+ journal -> j_dev_bd = NULL;
+ journal -> j_dev_file = NULL;
+ jdev = SB_JOURNAL_DEV( super ) =
+ SB_ONDISK_JOURNAL_DEVICE( super ) ?
+ to_kdev_t(SB_ONDISK_JOURNAL_DEVICE( super )) : super -> s_dev;
+ /* there is no "jdev" option and journal is on separate device */
+ if( ( !jdev_name || !jdev_name[ 0 ] ) &&
+ SB_ONDISK_JOURNAL_DEVICE( super ) ) {
+ journal -> j_dev_bd = bdget( kdev_t_to_nr( jdev ) );
+ if( journal -> j_dev_bd )
+ result = blkdev_get( journal -> j_dev_bd,
+ FMODE_READ | FMODE_WRITE, 0,
+ BDEV_FS );
+ else
+ result = -ENOMEM;
+ if( result != 0 )
+ printk( "sh-458: journal_init_dev: cannot init journal device\n '%s': %i",
+ kdevname( jdev ), result );
+
+ return result;
+ }
+
+ /* no "jdev" option and journal is on the host device */
+ if( !jdev_name || !jdev_name[ 0 ] )
+ return 0;
+ journal -> j_dev_file = filp_open( jdev_name, 0, 0 );
+ if( !IS_ERR( journal -> j_dev_file ) ) {
+ struct inode *jdev_inode;
+
+ jdev_inode = journal -> j_dev_file -> f_dentry -> d_inode;
+ journal -> j_dev_bd = jdev_inode -> i_bdev;
+ if( !S_ISBLK( jdev_inode -> i_mode ) ) {
+ printk( "journal_init_dev: '%s' is not a block device", jdev_name );
+ result = -ENOTBLK;
+ } else if( journal -> j_dev_file -> f_vfsmnt -> mnt_flags & MNT_NODEV) {
+ printk( "journal_init_dev: Cannot use devices on '%s'", jdev_name );
+ result = -EACCES;
+ } else if( jdev_inode -> i_bdev == NULL ) {
+ printk( "journal_init_dev: bdev unintialized for '%s'", jdev_name );
+ result = -ENOMEM;
+ } else if( ( result = blkdev_get( jdev_inode -> i_bdev,
+ FMODE_READ | FMODE_WRITE,
+ 0, BDEV_FS ) ) != 0 ) {
+ printk( "journal_init_dev: Cannot load device '%s': %i", jdev_name,
+ result );
+ } else
+ /* ok */
+ SB_JOURNAL_DEV( super ) =
+ to_kdev_t( jdev_inode -> i_bdev -> bd_dev );
+ } else {
+ result = PTR_ERR( journal -> j_dev_file );
+ journal -> j_dev_file = NULL;
+ printk( "journal_init_dev: Cannot open '%s': %i", jdev_name, result );
+ }
+ if( result != 0 ) {
+ release_journal_dev( super, journal );
+ }
+ printk( "journal_init_dev: journal device: %s", kdevname( SB_JOURNAL_DEV( super ) ) );
+ return result;
+}
+
/*
** must be called once on fs mount. calls journal_read for you
*/
-int journal_init(struct super_block *p_s_sb) {
- int num_cnodes = JOURNAL_BLOCK_COUNT * 2 ;
+int journal_init(struct super_block *p_s_sb, const char * j_dev_name, int old_format) {
+ int num_cnodes = SB_ONDISK_JOURNAL_SIZE(p_s_sb) * 2 ;
+ struct buffer_head *bhjh;
+ struct reiserfs_super_block * rs;
+ struct reiserfs_journal_header *jh;
+ struct reiserfs_journal *journal;
if (sizeof(struct reiserfs_journal_commit) != 4096 ||
sizeof(struct reiserfs_journal_desc) != 4096
sizeof(struct reiserfs_journal_desc)) ;
return 1 ;
}
- /* sanity check to make sure they don't overflow the journal */
- if (JOURNAL_BLOCK_COUNT > reiserfs_get_journal_orig_size(p_s_sb)) {
- printk("journal-1393: current JOURNAL_BLOCK_COUNT (%d) is too big. This FS was created with a journal size of %lu blocks\n",
- JOURNAL_BLOCK_COUNT, reiserfs_get_journal_orig_size(p_s_sb)) ;
+
+ journal = SB_JOURNAL(p_s_sb) = vmalloc(sizeof (struct reiserfs_journal)) ;
+ if (!journal) {
+ printk("journal-1256: unable to get memory for journal structure\n") ;
return 1 ;
}
- SB_JOURNAL(p_s_sb) = vmalloc(sizeof (struct reiserfs_journal)) ;
-
- if (!SB_JOURNAL(p_s_sb)) {
- printk("journal-1256: unable to get memory for journal structure\n") ;
+ memset(journal, 0, sizeof(struct reiserfs_journal)) ;
+
+ /* reserved for journal area support */
+ SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb) = (old_format ?
+ REISERFS_OLD_DISK_OFFSET_IN_BYTES / p_s_sb->s_blocksize +
+ SB_BMAP_NR(p_s_sb) + 1 :
+ REISERFS_DISK_OFFSET_IN_BYTES / p_s_sb->s_blocksize + 2);
+
+ if( journal_init_dev( p_s_sb, journal, j_dev_name ) != 0 ) {
+ printk( "sh-462: unable to initialize jornal device\n");
+ return 1;
+ }
+
+ rs = SB_DISK_SUPER_BLOCK(p_s_sb);
+
+ /* read journal header */
+ bhjh = bread (SB_JOURNAL_DEV(p_s_sb),
+ SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + SB_ONDISK_JOURNAL_SIZE(p_s_sb),
+ SB_BLOCKSIZE(p_s_sb));
+ if (!bhjh) {
+ printk("sh-459: unable to read journal header\n") ;
+ return 1 ;
+ }
+ jh = (struct reiserfs_journal_header *)(bhjh->b_data);
+
+ /* make sure that journal matches to the super block */
+ if (is_reiserfs_jr(rs) && (jh->jh_journal.jp_journal_magic != sb_jp_journal_magic(rs))) {
+ char jname[ 32 ];
+ char fname[ 32 ];
+
+ strcpy( jname, kdevname( SB_JOURNAL_DEV(p_s_sb) ) );
+ strcpy( fname, kdevname( p_s_sb->s_dev ) );
+ printk("sh-460: journal header magic %x (device %s) does not match "
+ "to magic found in super block %x (device %s)\n",
+ jh->jh_journal.jp_journal_magic, jname,
+ sb_jp_journal_magic(rs), fname);
+ brelse (bhjh);
return 1 ;
}
- memset(SB_JOURNAL(p_s_sb), 0, sizeof(struct reiserfs_journal)) ;
+
+ SB_JOURNAL_TRANS_MAX(p_s_sb) = le32_to_cpu (jh->jh_journal.jp_journal_trans_max);
+ SB_JOURNAL_MAX_BATCH(p_s_sb) = le32_to_cpu (jh->jh_journal.jp_journal_max_batch);
+ SB_JOURNAL_MAX_COMMIT_AGE(p_s_sb) = le32_to_cpu (jh->jh_journal.jp_journal_max_commit_age);
+ SB_JOURNAL_MAX_TRANS_AGE(p_s_sb) = JOURNAL_MAX_TRANS_AGE;
+
+ if (SB_JOURNAL_TRANS_MAX(p_s_sb)) {
+ /* make sure these parameters are available, assign it if they are not */
+ __u32 initial = SB_JOURNAL_TRANS_MAX(p_s_sb);
+ __u32 ratio = 1;
+
+ if (p_s_sb->s_blocksize < 4096)
+ ratio = 4096 / p_s_sb->s_blocksize;
+
+ if (SB_ONDISK_JOURNAL_SIZE(p_s_sb)/SB_JOURNAL_TRANS_MAX(p_s_sb) < JOURNAL_MIN_RATIO)
+ SB_JOURNAL_TRANS_MAX(p_s_sb) = SB_ONDISK_JOURNAL_SIZE(p_s_sb) / JOURNAL_MIN_RATIO;
+ if (SB_JOURNAL_TRANS_MAX(p_s_sb) > JOURNAL_TRANS_MAX_DEFAULT / ratio)
+ SB_JOURNAL_TRANS_MAX(p_s_sb) = JOURNAL_TRANS_MAX_DEFAULT / ratio;
+ if (SB_JOURNAL_TRANS_MAX(p_s_sb) < JOURNAL_TRANS_MIN_DEFAULT / ratio)
+ SB_JOURNAL_TRANS_MAX(p_s_sb) = JOURNAL_TRANS_MIN_DEFAULT / ratio;
+
+ if (SB_JOURNAL_TRANS_MAX(p_s_sb) != initial)
+ printk ("sh-461: journal_init: wrong transaction max size (%u). Changed to %u\n",
+ initial, SB_JOURNAL_TRANS_MAX(p_s_sb));
+
+ SB_JOURNAL_MAX_BATCH(p_s_sb) = SB_JOURNAL_TRANS_MAX(p_s_sb)*
+ JOURNAL_MAX_BATCH_DEFAULT/JOURNAL_TRANS_MAX_DEFAULT;
+ }
+
+ if (!SB_JOURNAL_TRANS_MAX(p_s_sb)) {
+ /*we have the file system was created by old version of mkreiserfs
+ so this field contains zero value */
+ SB_JOURNAL_TRANS_MAX(p_s_sb) = JOURNAL_TRANS_MAX_DEFAULT ;
+ SB_JOURNAL_MAX_BATCH(p_s_sb) = JOURNAL_MAX_BATCH_DEFAULT ;
+ SB_JOURNAL_MAX_COMMIT_AGE(p_s_sb) = JOURNAL_MAX_COMMIT_AGE ;
+
+ /* for blocksize >= 4096 - max transaction size is 1024. For block size < 4096
+ trans max size is decreased proportionally */
+ if (p_s_sb->s_blocksize < 4096) {
+ SB_JOURNAL_TRANS_MAX(p_s_sb) /= (4096 / p_s_sb->s_blocksize) ;
+ SB_JOURNAL_MAX_BATCH(p_s_sb) = (SB_JOURNAL_TRANS_MAX(p_s_sb)) * 9 / 10 ;
+ }
+ }
+ printk ("Reiserfs journal params: device %s, size %u, "
+ "journal first block %u, max trans len %u, max batch %u, "
+ "max commit age %u, max trans age %u\n",
+ kdevname( SB_JOURNAL_DEV(p_s_sb) ),
+ SB_ONDISK_JOURNAL_SIZE(p_s_sb),
+ SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
+ SB_JOURNAL_TRANS_MAX(p_s_sb),
+ SB_JOURNAL_MAX_BATCH(p_s_sb),
+ SB_JOURNAL_MAX_COMMIT_AGE(p_s_sb),
+ SB_JOURNAL_MAX_TRANS_AGE(p_s_sb));
+
+ brelse (bhjh);
+
SB_JOURNAL(p_s_sb)->j_list_bitmap_index = 0 ;
SB_JOURNAL_LIST_INDEX(p_s_sb) = -10000 ; /* make sure flush_old_commits does not try to flush a list while replay is on */
CLONE_FS | CLONE_FILES | CLONE_VM) ;
}
return 0 ;
+
}
/*
if (reiserfs_dont_log(th->t_super))
return 0 ;
if ( SB_JOURNAL(th->t_super)->j_must_wait > 0 ||
- (SB_JOURNAL(th->t_super)->j_len_alloc + new_alloc) >= JOURNAL_MAX_BATCH ||
+ (SB_JOURNAL(th->t_super)->j_len_alloc + new_alloc) >= SB_JOURNAL_MAX_BATCH(th->t_super) ||
atomic_read(&(SB_JOURNAL(th->t_super)->j_jlock)) ||
- (now - SB_JOURNAL(th->t_super)->j_trans_start_time) > JOURNAL_MAX_TRANS_AGE ||
- SB_JOURNAL(th->t_super)->j_cnode_free < (JOURNAL_TRANS_MAX * 3)) {
+ (now - SB_JOURNAL(th->t_super)->j_trans_start_time) > SB_JOURNAL_MAX_TRANS_AGE(th->t_super) ||
+ SB_JOURNAL(th->t_super)->j_cnode_free < (SB_JOURNAL_TRANS_MAX(th->t_super) * 3)) {
return 1 ;
}
return 0 ;
** we don't sleep if there aren't other writers
*/
-
if ( (!join && SB_JOURNAL(p_s_sb)->j_must_wait > 0) ||
- ( !join && (SB_JOURNAL(p_s_sb)->j_len_alloc + nblocks + 2) >= JOURNAL_MAX_BATCH) ||
+ ( !join && (SB_JOURNAL(p_s_sb)->j_len_alloc + nblocks + 2) >= SB_JOURNAL_MAX_BATCH(p_s_sb)) ||
(!join && atomic_read(&(SB_JOURNAL(p_s_sb)->j_wcount)) > 0 && SB_JOURNAL(p_s_sb)->j_trans_start_time > 0 &&
- (now - SB_JOURNAL(p_s_sb)->j_trans_start_time) > JOURNAL_MAX_TRANS_AGE) ||
+ (now - SB_JOURNAL(p_s_sb)->j_trans_start_time) > SB_JOURNAL_MAX_TRANS_AGE(p_s_sb)) ||
(!join && atomic_read(&(SB_JOURNAL(p_s_sb)->j_jlock)) ) ||
- (!join && SB_JOURNAL(p_s_sb)->j_cnode_free < (JOURNAL_TRANS_MAX * 3))) {
+ (!join && SB_JOURNAL(p_s_sb)->j_cnode_free < (SB_JOURNAL_TRANS_MAX(p_s_sb) * 3))) {
unlock_journal(p_s_sb) ; /* allow others to finish this transaction */
}
-int journal_join(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks) {
+static int journal_join(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks) {
return do_journal_begin_r(th, p_s_sb, nblocks, 1) ;
}
/* this error means I've screwed up, and we've overflowed the transaction.
** Nothing can be done here, except make the FS readonly or panic.
*/
- if (SB_JOURNAL(p_s_sb)->j_len >= JOURNAL_TRANS_MAX) {
+ if (SB_JOURNAL(p_s_sb)->j_len >= SB_JOURNAL_TRANS_MAX(p_s_sb)) {
reiserfs_panic(th->t_super, "journal-1413: journal_mark_dirty: j_len (%lu) is too big\n", SB_JOURNAL(p_s_sb)->j_len) ;
}
**
** returns 1 if it cleaned and relsed the buffer. 0 otherwise
*/
-int remove_from_transaction(struct super_block *p_s_sb, unsigned long blocknr, int already_cleaned) {
+static int remove_from_transaction(struct super_block *p_s_sb, unsigned long blocknr, int already_cleaned) {
struct buffer_head *bh ;
struct reiserfs_journal_cnode *cn ;
int ret = 0;
int show_reiserfs_locks(void) {
dump_journal_writers() ;
-#if 0 /* debugging code for when we are compiled static don't delete */
- p_s_sb = sb_entry(super_blocks.next);
- while (p_s_sb != sb_entry(&super_blocks)) {
- if (reiserfs_is_super(p_s_sb)) {
-printk("journal lock is %d, join lock is %d, writers %d must wait is %d\n",
- atomic_read(&(SB_JOURNAL(p_s_sb)->j_wlock)),
- atomic_read(&(SB_JOURNAL(p_s_sb)->j_jlock)),
- atomic_read(&(SB_JOURNAL(p_s_sb)->j_wcount)),
- SB_JOURNAL(p_s_sb)->j_must_wait) ;
- printk("used cnodes %d, free cnodes %d\n", SB_JOURNAL(p_s_sb)->j_cnode_used, SB_JOURNAL(p_s_sb)->j_cnode_free) ;
- }
- p_s_sb = sb_entry(p_s_sb->s_list.next);
- }
-#endif
return 0 ;
}
/* starting with oldest, loop until we get to the start */
i = (SB_JOURNAL_LIST_INDEX(p_s_sb) + 1) % JOURNAL_LIST_COUNT ;
while(i != start) {
- if (SB_JOURNAL_LIST(p_s_sb)[i].j_len > 0 && ((now - SB_JOURNAL_LIST(p_s_sb)[i].j_timestamp) > JOURNAL_MAX_COMMIT_AGE ||
+ if (SB_JOURNAL_LIST(p_s_sb)[i].j_len > 0 && ((now - SB_JOURNAL_LIST(p_s_sb)[i].j_timestamp) > SB_JOURNAL_MAX_COMMIT_AGE(p_s_sb) ||
immediate)) {
/* we have to check again to be sure the current transaction did not change */
if (i != SB_JOURNAL_LIST_INDEX(p_s_sb)) {
if (!immediate && atomic_read(&(SB_JOURNAL(p_s_sb)->j_wcount)) <= 0 &&
SB_JOURNAL(p_s_sb)->j_trans_start_time > 0 &&
SB_JOURNAL(p_s_sb)->j_len > 0 &&
- (now - SB_JOURNAL(p_s_sb)->j_trans_start_time) > JOURNAL_MAX_TRANS_AGE) {
+ (now - SB_JOURNAL(p_s_sb)->j_trans_start_time) > SB_JOURNAL_MAX_TRANS_AGE(p_s_sb)) {
journal_join(&th, p_s_sb, 1) ;
reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
journal_mark_dirty(&th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
/* deal with old transactions where we are the last writers */
now = CURRENT_TIME ;
- if ((now - SB_JOURNAL(p_s_sb)->j_trans_start_time) > JOURNAL_MAX_TRANS_AGE) {
+ if ((now - SB_JOURNAL(p_s_sb)->j_trans_start_time) > SB_JOURNAL_MAX_TRANS_AGE(p_s_sb)) {
commit_now = 1 ;
SB_JOURNAL(p_s_sb)->j_next_async_flush = 1 ;
}
/* don't batch when someone is waiting on j_join_wait */
/* don't batch when syncing the commit or flushing the whole trans */
if (!(SB_JOURNAL(p_s_sb)->j_must_wait > 0) && !(atomic_read(&(SB_JOURNAL(p_s_sb)->j_jlock))) && !flush && !commit_now &&
- (SB_JOURNAL(p_s_sb)->j_len < JOURNAL_MAX_BATCH) &&
- SB_JOURNAL(p_s_sb)->j_len_alloc < JOURNAL_MAX_BATCH && SB_JOURNAL(p_s_sb)->j_cnode_free > (JOURNAL_TRANS_MAX * 3)) {
+ (SB_JOURNAL(p_s_sb)->j_len < SB_JOURNAL_MAX_BATCH(p_s_sb)) &&
+ SB_JOURNAL(p_s_sb)->j_len_alloc < SB_JOURNAL_MAX_BATCH(p_s_sb) && SB_JOURNAL(p_s_sb)->j_cnode_free > (SB_JOURNAL_TRANS_MAX(p_s_sb) * 3)) {
SB_JOURNAL(p_s_sb)->j_bcount++ ;
unlock_journal(p_s_sb) ;
return 0 ;
}
- if (SB_JOURNAL(p_s_sb)->j_start > JOURNAL_BLOCK_COUNT) {
+ if (SB_JOURNAL(p_s_sb)->j_start > SB_ONDISK_JOURNAL_SIZE(p_s_sb)) {
reiserfs_panic(p_s_sb, "journal-003: journal_end: j_start (%ld) is too high\n", SB_JOURNAL(p_s_sb)->j_start) ;
}
return 1 ;
rs = SB_DISK_SUPER_BLOCK(p_s_sb) ;
/* setup description block */
- d_bh = sb_getblk(p_s_sb, reiserfs_get_journal_block(p_s_sb) + SB_JOURNAL(p_s_sb)->j_start) ;
+ d_bh = getblk(SB_JOURNAL_DEV(p_s_sb), SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + SB_JOURNAL(p_s_sb)->j_start, p_s_sb->s_blocksize) ;
mark_buffer_uptodate(d_bh, 1) ;
desc = (struct reiserfs_journal_desc *)(d_bh)->b_data ;
memset(desc, 0, sizeof(struct reiserfs_journal_desc)) ;
desc->j_trans_id = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_trans_id) ;
/* setup commit block. Don't write (keep it clean too) this one until after everyone else is written */
- c_bh = sb_getblk(p_s_sb, reiserfs_get_journal_block(p_s_sb) +
- ((SB_JOURNAL(p_s_sb)->j_start + SB_JOURNAL(p_s_sb)->j_len + 1) % JOURNAL_BLOCK_COUNT)) ;
+ c_bh = getblk(SB_JOURNAL_DEV(p_s_sb), SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
+ ((SB_JOURNAL(p_s_sb)->j_start + SB_JOURNAL(p_s_sb)->j_len + 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb)),
+ p_s_sb->s_blocksize) ;
commit = (struct reiserfs_journal_commit *)c_bh->b_data ;
memset(commit, 0, sizeof(struct reiserfs_journal_commit)) ;
commit->j_trans_id = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_trans_id) ;
last_cn->next = jl_cn ;
}
last_cn = jl_cn ;
- if (cn->bh->b_blocknr >= reiserfs_get_journal_block(p_s_sb) &&
- cn->bh->b_blocknr < (reiserfs_get_journal_block(p_s_sb) + JOURNAL_BLOCK_COUNT)) {
+ /* make sure the block we are trying to log is not a block
+ of journal or reserved area */
+
+ if (is_block_in_log_or_reserved_area(p_s_sb, cn->bh->b_blocknr)) {
reiserfs_panic(p_s_sb, "journal-2332: Trying to log block %lu, which is a log block\n", cn->bh->b_blocknr) ;
}
jl_cn->blocknr = cn->bh->b_blocknr ;
/* copy all the real blocks into log area. dirty log blocks */
if (test_bit(BH_JDirty, &cn->bh->b_state)) {
struct buffer_head *tmp_bh ;
- tmp_bh = sb_getblk(p_s_sb, reiserfs_get_journal_block(p_s_sb) +
- ((cur_write_start + jindex) % JOURNAL_BLOCK_COUNT)) ;
+ tmp_bh = getblk(SB_JOURNAL_DEV(p_s_sb), SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
+ ((cur_write_start + jindex) % SB_ONDISK_JOURNAL_SIZE(p_s_sb)),
+ p_s_sb->s_blocksize) ;
mark_buffer_uptodate(tmp_bh, 1) ;
memcpy(tmp_bh->b_data, cn->bh->b_data, cn->bh->b_size) ;
jindex++ ;
/* reset journal values for the next transaction */
old_start = SB_JOURNAL(p_s_sb)->j_start ;
- SB_JOURNAL(p_s_sb)->j_start = (SB_JOURNAL(p_s_sb)->j_start + SB_JOURNAL(p_s_sb)->j_len + 2) % JOURNAL_BLOCK_COUNT;
+ SB_JOURNAL(p_s_sb)->j_start = (SB_JOURNAL(p_s_sb)->j_start + SB_JOURNAL(p_s_sb)->j_len + 2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb);
atomic_set(&(SB_JOURNAL(p_s_sb)->j_wcount), 0) ;
SB_JOURNAL(p_s_sb)->j_bcount = 0 ;
SB_JOURNAL(p_s_sb)->j_last = NULL ;
for (i = 0 ; i < JOURNAL_LIST_COUNT ; i++) {
jindex = i ;
if (SB_JOURNAL_LIST(p_s_sb)[jindex].j_len > 0 && SB_JOURNAL(p_s_sb)->j_start <= SB_JOURNAL_LIST(p_s_sb)[jindex].j_start) {
- if ((SB_JOURNAL(p_s_sb)->j_start + JOURNAL_TRANS_MAX + 1) >= SB_JOURNAL_LIST(p_s_sb)[jindex].j_start) {
+ if ((SB_JOURNAL(p_s_sb)->j_start + SB_JOURNAL_TRANS_MAX(p_s_sb) + 1) >= SB_JOURNAL_LIST(p_s_sb)[jindex].j_start) {
flush_journal_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + jindex, 1) ;
}
} else if (SB_JOURNAL_LIST(p_s_sb)[jindex].j_len > 0 &&
- (SB_JOURNAL(p_s_sb)->j_start + JOURNAL_TRANS_MAX + 1) > JOURNAL_BLOCK_COUNT) {
- if (((SB_JOURNAL(p_s_sb)->j_start + JOURNAL_TRANS_MAX + 1) % JOURNAL_BLOCK_COUNT) >=
+ (SB_JOURNAL(p_s_sb)->j_start + SB_JOURNAL_TRANS_MAX(p_s_sb) + 1) > SB_ONDISK_JOURNAL_SIZE(p_s_sb)) {
+ if (((SB_JOURNAL(p_s_sb)->j_start + SB_JOURNAL_TRANS_MAX(p_s_sb) + 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb)) >=
SB_JOURNAL_LIST(p_s_sb)[jindex].j_start) {
flush_journal_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + jindex, 1 ) ;
}
/* this check should always be run, to send old lists to disk */
if (SB_JOURNAL_LIST(p_s_sb)[jindex].j_len > 0 &&
SB_JOURNAL_LIST(p_s_sb)[jindex].j_timestamp <
- (CURRENT_TIME - (JOURNAL_MAX_TRANS_AGE * 4))) {
+ (CURRENT_TIME - (SB_JOURNAL_MAX_TRANS_AGE(p_s_sb) * 4))) {
flush_journal_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + jindex, 1 ) ;
}
}
/* form item header */
memcpy (&new_ih.ih_key, &ih->ih_key, KEY_SIZE);
- put_ih_version( &new_ih, ITEM_VERSION_1 );
+ put_ih_version( &new_ih, KEY_FORMAT_3_5 );
/* calculate item len */
put_ih_item_len( &new_ih, DEH_SIZE * copy_count + copy_records_len );
put_ih_entry_count( &new_ih, 0 );
set_le_ih_k_offset (&new_ih, U32_MAX);
/* this item is not yet valid, but we want I_IS_DIRECTORY_ITEM to return 1 for it, so we -1 */
}
- set_le_key_k_type (ITEM_VERSION_1, &(new_ih.ih_key), TYPE_DIRENTRY);
+ set_le_key_k_type (KEY_FORMAT_3_5, &(new_ih.ih_key), TYPE_DIRENTRY);
}
/* insert item into dest buffer */
#include <linux/reiserfs_fs.h>
#include <linux/smp_lock.h>
- /* there should be an overview right
- here, as there should be in every
- conceptual grouping of code. This
- should be combined with dir.c and
- called dir.c (naming will become
- too large to be called one file in
- a few years), stop senselessly
- imitating the incoherent
- structuring of code used by other
- filesystems. */
-
#define INC_DIR_INODE_NLINK(i) if (i->i_nlink != 1) { i->i_nlink++; if (i->i_nlink >= REISERFS_LINK_MAX) i->i_nlink=1; }
#define DEC_DIR_INODE_NLINK(i) if (i->i_nlink != 1) i->i_nlink--;
BUG ();
/* store key of the found entry */
- de->de_entry_key.version = ITEM_VERSION_1;
+ de->de_entry_key.version = KEY_FORMAT_3_5;
de->de_entry_key.on_disk_key.k_dir_id = le32_to_cpu (de->de_ih->ih_key.k_dir_id);
de->de_entry_key.on_disk_key.k_objectid = le32_to_cpu (de->de_ih->ih_key.k_objectid);
set_cpu_key_k_offset (&(de->de_entry_key), deh_offset (deh));
while (1) {
retval = search_by_entry_key (dir->i_sb, &key_to_search, path_to_entry, de);
- if (retval == IO_ERROR)
- // FIXME: still has to be dealt with
- reiserfs_panic (dir->i_sb, "zam-7001: io error in " __FUNCTION__ "\n");
+ if (retval == IO_ERROR) {
+ reiserfs_warning ("zam-7001: io error in " __FUNCTION__ "\n");
+ return IO_ERROR;
+ }
/* compare names for all entries having given hash value */
retval = linear_search_in_dir_item (&key_to_search, de, name, namelen);
// at the ext2 code and comparing. It's subfunctions contain no code
// used as a template unless they are so labeled.
//
-struct dentry * reiserfs_lookup (struct inode * dir, struct dentry * dentry)
+static struct dentry * reiserfs_lookup (struct inode * dir, struct dentry * dentry)
{
int retval;
struct inode * inode = 0;
return NULL;
}
-
//
// a portion of this function, particularly the VFS interface portion,
// was derived from minix or ext2's analog and evolved as the
// at the ext2 code and comparing. It's subfunctions contain no code
// used as a template unless they are so labeled.
//
-int reiserfs_create (struct inode * dir, struct dentry *dentry, int mode)
+static int reiserfs_create (struct inode * dir, struct dentry *dentry, int mode)
{
int retval;
struct inode * inode;
// at the ext2 code and comparing. It's subfunctions contain no code
// used as a template unless they are so labeled.
//
-int reiserfs_mknod (struct inode * dir, struct dentry *dentry, int mode, int rdev)
+static int reiserfs_mknod (struct inode * dir, struct dentry *dentry, int mode, int rdev)
{
int retval;
struct inode * inode;
// at the ext2 code and comparing. It's subfunctions contain no code
// used as a template unless they are so labeled.
//
-int reiserfs_mkdir (struct inode * dir, struct dentry *dentry, int mode)
+static int reiserfs_mkdir (struct inode * dir, struct dentry *dentry, int mode)
{
int retval;
struct inode * inode;
// at the ext2 code and comparing. It's subfunctions contain no code
// used as a template unless they are so labeled.
//
-int reiserfs_rmdir (struct inode * dir, struct dentry *dentry)
+static int reiserfs_rmdir (struct inode * dir, struct dentry *dentry)
{
int retval;
struct inode * inode;
int windex ;
struct reiserfs_transaction_handle th ;
- int jbegin_count = JOURNAL_PER_BALANCE_CNT * 3;
+ int jbegin_count;
INITIALIZE_PATH (path);
struct reiserfs_dir_entry de;
+ /* we will be doing 2 balancings and update 2 stat data */
+ jbegin_count = JOURNAL_PER_BALANCE_CNT * 2 + 2;
+
journal_begin(&th, dir->i_sb, jbegin_count) ;
windex = push_journal_writer("reiserfs_rmdir") ;
dir->i_blocks = ((dir->i_size + 511) >> 9);
reiserfs_update_sd (&th, dir);
+ /* prevent empty directory from getting lost */
+ add_save_link (&th, inode, 0/* not truncate */);
+
pop_journal_writer(windex) ;
journal_end(&th, dir->i_sb, jbegin_count) ;
reiserfs_check_path(&path) ;
// at the ext2 code and comparing. It's subfunctions contain no code
// used as a template unless they are so labeled.
//
-int reiserfs_unlink (struct inode * dir, struct dentry *dentry)
+static int reiserfs_unlink (struct inode * dir, struct dentry *dentry)
{
int retval;
struct inode * inode;
INITIALIZE_PATH (path);
int windex ;
struct reiserfs_transaction_handle th ;
- int jbegin_count = JOURNAL_PER_BALANCE_CNT * 3;
+ int jbegin_count;
+
+ inode = dentry->d_inode;
+
+ /* in this transaction we can be doing at max two balancings and update
+ two stat datas */
+ jbegin_count = JOURNAL_PER_BALANCE_CNT * 2 + 2;
journal_begin(&th, dir->i_sb, jbegin_count) ;
windex = push_journal_writer("reiserfs_unlink") ;
retval = -ENOENT;
goto end_unlink;
}
- inode = dentry->d_inode;
reiserfs_update_inode_transaction(inode) ;
reiserfs_update_inode_transaction(dir) ;
dir->i_ctime = dir->i_mtime = CURRENT_TIME;
reiserfs_update_sd (&th, dir);
+ if (!inode->i_nlink)
+ /* prevent file from getting lost */
+ add_save_link (&th, inode, 0/* not truncate */);
+
pop_journal_writer(windex) ;
journal_end(&th, dir->i_sb, jbegin_count) ;
reiserfs_check_path(&path) ;
// at the ext2 code and comparing. It's subfunctions contain no code
// used as a template unless they are so labeled.
//
-int reiserfs_symlink (struct inode * dir, struct dentry * dentry, const char * symname)
+static int reiserfs_symlink (struct inode * dir, struct dentry * dentry, const char * symname)
{
int retval;
struct inode * inode;
}
item_len = ROUND_UP (strlen (symname));
- if (item_len > MAX_ITEM_LEN (dir->i_sb->s_blocksize)) {
+ if (item_len > MAX_DIRECT_ITEM_LEN (dir->i_sb->s_blocksize)) {
iput(inode) ;
return -ENAMETOOLONG;
}
// at the ext2 code and comparing. It's subfunctions contain no code
// used as a template unless they are so labeled.
//
-int reiserfs_link (struct dentry * old_dentry, struct inode * dir, struct dentry * dentry)
+static int reiserfs_link (struct dentry * old_dentry, struct inode * dir, struct dentry * dentry)
{
int retval;
struct inode *inode = old_dentry->d_inode;
* one path. If it holds 2 or more, it can get into endless waiting in
* get_empty_nodes or its clones
*/
-int reiserfs_rename (struct inode * old_dir, struct dentry *old_dentry,
- struct inode * new_dir, struct dentry *new_dentry)
+static int reiserfs_rename (struct inode * old_dir, struct dentry *old_dentry,
+ struct inode * new_dir, struct dentry *new_dentry)
{
int retval;
INITIALIZE_PATH (old_entry_path);
INITIALIZE_PATH (new_entry_path);
INITIALIZE_PATH (dot_dot_entry_path);
- struct item_head new_entry_ih, old_entry_ih ;
+ struct item_head new_entry_ih, old_entry_ih, dot_dot_ih ;
struct reiserfs_dir_entry old_de, new_de, dot_dot_de;
struct inode * old_inode, * new_inode;
int windex ;
struct reiserfs_transaction_handle th ;
- int jbegin_count = JOURNAL_PER_BALANCE_CNT * 3;
+ int jbegin_count ;
+ /* two balancings: old name removal, new name insertion or "save" link,
+ stat data updates: old directory and new directory and maybe block
+ containing ".." of renamed directory */
+ jbegin_count = JOURNAL_PER_BALANCE_CNT * 3 + 3;
+
old_inode = old_dentry->d_inode;
new_inode = new_dentry->d_inode;
// FIXME: is it possible, that new_inode == 0 here? If yes, it
// is not clear how does ext2 handle that
if (!new_inode) {
- printk ("reiserfs_rename: new entry is found, new inode == 0\n");
- BUG ();
+ reiserfs_panic (old_dir->i_sb,
+ "vs-7050: new entry is found, new inode == 0\n");
}
} else if (retval) {
pop_journal_writer(windex) ;
copy_item_head(&old_entry_ih, get_ih(&old_entry_path)) ;
+ reiserfs_prepare_for_journal(old_inode->i_sb, old_de.de_bh, 1) ;
+
// look for new name by reiserfs_find_entry
new_de.de_gen_number_bit_string = 0;
retval = reiserfs_find_entry (new_dir, new_dentry->d_name.name, new_dentry->d_name.len,
if (S_ISDIR(old_inode->i_mode)) {
if (search_by_entry_key (new_dir->i_sb, &dot_dot_de.de_entry_key, &dot_dot_entry_path, &dot_dot_de) != NAME_FOUND)
BUG ();
+ copy_item_head(&dot_dot_ih, get_ih(&dot_dot_entry_path)) ;
// node containing ".." gets into transaction
reiserfs_prepare_for_journal(old_inode->i_sb, dot_dot_de.de_bh, 1) ;
}
** of the above checks could have scheduled. We have to be
** sure our items haven't been shifted by another process.
*/
- if (!entry_points_to_object(new_dentry->d_name.name,
+ if (item_moved(&new_entry_ih, &new_entry_path) ||
+ !entry_points_to_object(new_dentry->d_name.name,
new_dentry->d_name.len,
&new_de, new_inode) ||
- item_moved(&new_entry_ih, &new_entry_path) ||
item_moved(&old_entry_ih, &old_entry_path) ||
!entry_points_to_object (old_dentry->d_name.name,
old_dentry->d_name.len,
&old_de, old_inode)) {
reiserfs_restore_prepared_buffer (old_inode->i_sb, new_de.de_bh);
+ reiserfs_restore_prepared_buffer (old_inode->i_sb, old_de.de_bh);
if (S_ISDIR(old_inode->i_mode))
reiserfs_restore_prepared_buffer (old_inode->i_sb, dot_dot_de.de_bh);
-#if 0
- // FIXME: do we need this? shouldn't we simply continue?
- run_task_queue(&tq_disk);
- yield();
-#endif
continue;
}
+ if (S_ISDIR(old_inode->i_mode)) {
+ if ( item_moved(&dot_dot_ih, &dot_dot_entry_path) ||
+ !entry_points_to_object ( "..", 2, &dot_dot_de, old_dir) ) {
+ reiserfs_restore_prepared_buffer (old_inode->i_sb, old_de.de_bh);
+ reiserfs_restore_prepared_buffer (old_inode->i_sb, new_de.de_bh);
+ reiserfs_restore_prepared_buffer (old_inode->i_sb, dot_dot_de.de_bh);
+ continue;
+ }
+ }
RFALSE( S_ISDIR(old_inode->i_mode) &&
- (!entry_points_to_object ("..", 2, &dot_dot_de, old_dir) ||
- !reiserfs_buffer_prepared(dot_dot_de.de_bh)), "" );
+ !reiserfs_buffer_prepared(dot_dot_de.de_bh), "" );
break;
}
journal_mark_dirty (&th, old_dir->i_sb, new_de.de_bh);
mark_de_hidden (old_de.de_deh + old_de.de_entry_num);
+ journal_mark_dirty (&th, old_dir->i_sb, old_de.de_bh);
old_dir->i_ctime = old_dir->i_mtime = CURRENT_TIME;
new_dir->i_ctime = new_dir->i_mtime = CURRENT_TIME;
if (new_inode) {
// adjust link number of the victim
if (S_ISDIR(new_inode->i_mode)) {
- DEC_DIR_INODE_NLINK(new_inode)
+ new_inode->i_nlink = 0;
} else {
- new_inode->i_nlink--;
+ new_inode->i_nlink--;
}
new_inode->i_ctime = CURRENT_TIME;
}
if (S_ISDIR(old_inode->i_mode)) {
- //if (dot_dot_de.de_bh) {
- // adjust ".." of renamed directory
+ // adjust ".." of renamed directory
set_ino_in_dir_entry (&dot_dot_de, INODE_PKEY (new_dir));
journal_mark_dirty (&th, new_dir->i_sb, dot_dot_de.de_bh);
-
- DEC_DIR_INODE_NLINK(old_dir)
- if (new_inode) {
- if (S_ISDIR(new_inode->i_mode)) {
- DEC_DIR_INODE_NLINK(new_inode)
- } else {
- new_inode->i_nlink--;
- }
- } else {
- INC_DIR_INODE_NLINK(new_dir)
- }
+
+ if (!new_inode)
+ /* there (in new_dir) was no directory, so it got new link
+ (".." of renamed directory) */
+ INC_DIR_INODE_NLINK(new_dir);
+
+ /* old directory lost one link - ".. " of renamed directory */
+ DEC_DIR_INODE_NLINK(old_dir);
}
// looks like in 2.3.99pre3 brelse is atomic. so we can use pathrelse
// anybody, but it will panic if will not be able to find the
// entry. This needs one more clean up
if (reiserfs_cut_from_item (&th, &old_entry_path, &(old_de.de_entry_key), old_dir, NULL, 0) < 0)
- reiserfs_warning ("vs-: reiserfs_rename: coudl not cut old name. Fsck later?\n");
+ reiserfs_warning ("vs-7060: reiserfs_rename: couldn't not cut old name. Fsck later?\n");
old_dir->i_size -= DEH_SIZE + old_de.de_entrylen;
old_dir->i_blocks = ((old_dir->i_size + 511) >> 9);
reiserfs_update_sd (&th, old_dir);
reiserfs_update_sd (&th, new_dir);
- if (new_inode)
+
+ if (new_inode) {
+ if (new_inode->i_nlink == 0)
+ add_save_link (&th, new_inode, 0/* not truncate */);
reiserfs_update_sd (&th, new_inode);
+ }
pop_journal_writer(windex) ;
journal_end(&th, old_dir->i_sb, jbegin_count) ;
return 0;
}
+
+
+/*
+ * directories can handle most operations...
+ */
+struct inode_operations reiserfs_dir_inode_operations = {
+ //&reiserfs_dir_operations, /* default_file_ops */
+ create: reiserfs_create,
+ lookup: reiserfs_lookup,
+ link: reiserfs_link,
+ unlink: reiserfs_unlink,
+ symlink: reiserfs_symlink,
+ mkdir: reiserfs_mkdir,
+ rmdir: reiserfs_rmdir,
+ mknod: reiserfs_mknod,
+ rename: reiserfs_rename,
+};
+
#include <linux/config.h>
#include <linux/string.h>
#include <linux/locks.h>
+#include <linux/random.h>
#include <linux/sched.h>
#include <linux/reiserfs_fs.h>
-
+#include <linux/reiserfs_fs_sb.h>
// find where objectid map starts
#define objectid_map(s,rs) (old_format_only (s) ? \
}
/* JDM comparing two little-endian values for equality -- safe */
- if (rs->s_oid_cursize == rs->s_oid_maxsize) {
+ if (sb_oid_cursize(rs) == sb_oid_maxsize(rs)) {
/* objectid map must be expanded, but there is no space */
PROC_INFO_INC( s, leaked_oid );
return;
i += 2;
}
- reiserfs_warning ("vs-15010: reiserfs_release_objectid: tried to free free object id (%lu)",
+ reiserfs_warning ("vs-15011: reiserfs_release_objectid: tried to free free object id (%lu)\n",
( long unsigned ) objectid_to_release);
}
int reiserfs_convert_objectid_map_v1(struct super_block *s) {
struct reiserfs_super_block *disk_sb = SB_DISK_SUPER_BLOCK (s);
- int cur_size = le16_to_cpu(disk_sb->s_oid_cursize) ;
+ int cur_size = sb_oid_cursize(disk_sb);
int new_size = (s->s_blocksize - SB_SIZE) / sizeof(__u32) / 2 * 2 ;
- int old_max = le16_to_cpu(disk_sb->s_oid_maxsize) ;
+ int old_max = sb_oid_maxsize(disk_sb);
struct reiserfs_super_block_v1 *disk_sb_v1 ;
__u32 *objectid_map, *new_objectid_map ;
int i ;
** map
*/
objectid_map[new_size - 1] = objectid_map[cur_size - 1] ;
- disk_sb->s_oid_cursize = cpu_to_le16(new_size) ;
+ set_sb_oid_cursize(disk_sb,new_size) ;
}
/* move the smaller objectid map past the end of the new super */
for (i = new_size - 1 ; i >= 0 ; i--) {
/* set the max size so we don't overflow later */
- disk_sb->s_oid_maxsize = cpu_to_le16(new_size) ;
+ set_sb_oid_maxsize(disk_sb,new_size) ;
+
+ /* Zero out label and generate random UUID */
+ memset(disk_sb->s_label, 0, sizeof(disk_sb->s_label)) ;
+ generate_random_uuid(disk_sb->s_uuid);
/* finally, zero out the unused chunk of the new super */
memset(disk_sb->s_unused, 0, sizeof(disk_sb->s_unused)) ;
static void sprintf_item_head (char * buf, struct item_head * ih)
{
if (ih) {
- sprintf (buf, "%s", (ih_version (ih) == ITEM_VERSION_2) ? "*NEW* " : "*OLD*");
+ sprintf (buf, "%s", (ih_version (ih) == KEY_FORMAT_3_6) ? "*3.6* " : "*3.5*");
sprintf_le_key (buf + strlen (buf), &(ih->ih_key));
sprintf (buf + strlen (buf), ", item_len %d, item_location %d, "
"free_space(entry_count) %d",
char *version;
- if (strncmp (rs->s_magic, REISERFS_SUPER_MAGIC_STRING,
- strlen ( REISERFS_SUPER_MAGIC_STRING)) == 0) {
+ if (is_reiserfs_3_5(rs)) {
version = "3.5";
- } else if( strncmp (rs->s_magic, REISER2FS_SUPER_MAGIC_STRING,
- strlen ( REISER2FS_SUPER_MAGIC_STRING)) == 0) {
+ } else if (is_reiserfs_3_6(rs)) {
version = "3.6";
+ } else if (is_reiserfs_jr(rs)) {
+ version = ((sb_version(rs) == REISERFS_VERSION_2) ?
+ "3.6" : "3.5");
} else {
return 1;
}
// someone stores reiserfs super block in some data block ;)
// skipped = (bh->b_blocknr * bh->b_size) / sb_blocksize(rs);
skipped = bh->b_blocknr;
- data_blocks = sb_block_count(rs) - skipped - 1 -
- sb_bmap_nr(rs) - (sb_orig_journal_size(rs) + 1) -
- sb_free_blocks(rs);
- printk ("Busy blocks (skipped %d, bitmaps - %d, journal blocks - %d\n"
- "1 super blocks, %d data blocks\n",
- skipped, sb_bmap_nr(rs),
- (sb_orig_journal_size(rs) + 1), data_blocks);
+ data_blocks = sb_block_count(rs) - skipped - 1 - sb_bmap_nr(rs) -
+ (!is_reiserfs_jr(rs) ? sb_jp_journal_size(rs) + 1 : sb_reserved_for_journal(rs)) -
+ sb_free_blocks(rs);
+ printk ("Busy blocks (skipped %d, bitmaps - %d, journal (or reserved) blocks - %d\n"
+ "1 super block, %d data blocks\n",
+ skipped, sb_bmap_nr(rs), (!is_reiserfs_jr(rs) ? (sb_jp_journal_size(rs) + 1) :
+ sb_reserved_for_journal(rs)) , data_blocks);
printk ("Root block %u\n", sb_root_block(rs));
- printk ("Journal block (first) %d\n", sb_journal_block(rs));
- printk ("Journal dev %d\n", sb_journal_dev(rs));
- printk ("Journal orig size %d\n", sb_orig_journal_size(rs));
- printk ("Filesystem state %s\n",
- (sb_state(rs) == REISERFS_VALID_FS) ? "VALID" : "ERROR");
+ printk ("Journal block (first) %d\n", sb_jp_journal_1st_block(rs));
+ printk ("Journal dev %d\n", sb_jp_journal_dev(rs));
+ printk ("Journal orig size %d\n", sb_jp_journal_size(rs));
+ printk ("FS state %d\n", sb_fs_state(rs));
printk ("Hash function \"%s\"\n",
sb_hash_function_code(rs) == TEA_HASH ? "tea" :
( sb_hash_function_code(rs) == YURA_HASH ? "rupasov" : (sb_hash_function_code(rs) == R5_HASH ? "r5" : "unknown")));
-
+
printk ("Tree height %d\n", sb_tree_height(rs));
return 0;
}
-
static int print_desc_block (struct buffer_head * bh)
{
struct reiserfs_journal_desc * desc;
#include <linux/sched.h>
#include <asm/uaccess.h>
#include <linux/reiserfs_fs.h>
+#include <linux/reiserfs_fs_sb.h>
#include <linux/smp_lock.h>
#include <linux/locks.h>
#include <linux/init.h>
{
int len = 0;
struct super_block *sb;
+ char *format;
- sb = procinfo_prologue( ( kdev_t ) ( int ) data );
+ sb = procinfo_prologue( to_kdev_t((int)data) );
if( sb == NULL )
return -ENOENT;
+ if ( sb->u.reiserfs_sb.s_properties & (1 << REISERFS_3_6) ) {
+ format = "3.6";
+ } else if ( sb->u.reiserfs_sb.s_properties & (1 << REISERFS_3_5) ) {
+ format = "3.5";
+ } else {
+ format = "unknown";
+ }
+
len += sprintf( &buffer[ len ], "%s format\twith checks %s\n",
- old_format_only( sb ) ? "old" : "new",
+ format,
#if defined( CONFIG_REISERFS_CHECK )
"on"
#else
int count, int *eof, void *data )
{
int len = 0;
-
- len += sprintf( &buffer[ len ], "%s [%s]\n",
- reiserfs_get_version_string(),
-#if defined( CONFIG_REISERFS_FS_MODULE )
- "as module"
-#else
- "built into kernel"
-#endif
- );
return reiserfs_proc_tail( len, buffer, start, offset, count, eof );
}
#define D2C( x ) le16_to_cpu( x )
#define D4C( x ) le32_to_cpu( x )
-#define DF( x ) D2C( rs -> x )
-#define DFL( x ) D4C( rs -> x )
+#define DF( x ) D2C( rs -> s_v1.x )
+#define DFL( x ) D4C( rs -> s_v1.x )
#define objectid_map( s, rs ) (old_format_only (s) ? \
(__u32 *)((struct reiserfs_super_block_v1 *)rs + 1) : \
#define MAP( i ) D4C( objectid_map( sb, rs )[ i ] )
#define DJF( x ) le32_to_cpu( rs -> x )
+#define DJV( x ) le32_to_cpu( s_v1 -> x )
+#define DJP( x ) le32_to_cpu( jp -> x )
#define JF( x ) ( r -> s_journal -> x )
int reiserfs_super_in_proc( char *buffer, char **start, off_t offset,
struct reiserfs_sb_info *r;
int len = 0;
- sb = procinfo_prologue( ( kdev_t ) ( int ) data );
+ sb = procinfo_prologue( to_kdev_t((int)data) );
if( sb == NULL )
return -ENOENT;
r = &sb->u.reiserfs_sb;
dont_have_tails( sb ) ? "NO_TAILS " : "TAILS ",
replay_only( sb ) ? "REPLAY_ONLY " : "",
reiserfs_dont_log( sb ) ? "DONT_LOG " : "LOG ",
- old_format_only( sb ) ? "CONV " : "",
+ convert_reiserfs( sb ) ? "CONV " : "",
atomic_read( &r -> s_generation_counter ),
SF( s_kmallocs ),
int len = 0;
int level;
- sb = procinfo_prologue( ( kdev_t ) ( int ) data );
+ sb = procinfo_prologue( to_kdev_t((int)data) );
if( sb == NULL )
return -ENOENT;
r = &sb->u.reiserfs_sb;
struct reiserfs_sb_info *r = &sb->u.reiserfs_sb;
int len = 0;
- sb = procinfo_prologue( ( kdev_t ) ( int ) data );
+ sb = procinfo_prologue( to_kdev_t((int)data) );
if( sb == NULL )
return -ENOENT;
r = &sb->u.reiserfs_sb;
int hash_code;
int len = 0;
- sb = procinfo_prologue( ( kdev_t ) ( int ) data );
+ sb = procinfo_prologue( to_kdev_t((int)data) );
if( sb == NULL )
return -ENOENT;
sb_info = &sb->u.reiserfs_sb;
"blocksize: \t%i\n"
"oid_maxsize: \t%i\n"
"oid_cursize: \t%i\n"
- "state: \t%i\n"
- "magic: \t%12.12s\n"
+ "umount_state: \t%i\n"
+ "magic: \t%10.10s\n"
+ "fs_state: \t%i\n"
"hash: \t%s\n"
"tree_height: \t%i\n"
"bmap_nr: \t%i\n"
- "version: \t%i\n",
+ "version: \t%i\n"
+ "reserved_for_journal: \t%i\n",
DFL( s_block_count ),
DFL( s_free_blocks ),
DF( s_blocksize ),
DF( s_oid_maxsize ),
DF( s_oid_cursize ),
- DF( s_state ),
- rs -> s_magic,
+ DF( s_umount_state ),
+ rs -> s_v1.s_magic,
+ DF( s_fs_state ),
hash_code == TEA_HASH ? "tea" :
( hash_code == YURA_HASH ) ? "rupasov" :
( hash_code == R5_HASH ) ? "r5" :
( hash_code == UNSET_HASH ) ? "unset" : "unknown",
DF( s_tree_height ),
DF( s_bmap_nr ),
- DF( s_version ) );
+ DF( s_version ),
+ DF (s_reserved_for_journal));
procinfo_epilogue( sb );
return reiserfs_proc_tail( len, buffer, start, offset, count, eof );
int len = 0;
int exact;
- sb = procinfo_prologue( ( kdev_t ) ( int ) data );
+ sb = procinfo_prologue( to_kdev_t((int)data) );
if( sb == NULL )
return -ENOENT;
sb_info = &sb->u.reiserfs_sb;
rs = sb_info -> s_rs;
- mapsize = le16_to_cpu( rs -> s_oid_cursize );
+ mapsize = le16_to_cpu( rs -> s_v1.s_oid_cursize );
total_used = 0;
for( i = 0 ; i < mapsize ; ++i ) {
}
len += sprintf( &buffer[ len ], "total: \t%i [%i/%i] used: %lu [%s]\n",
i,
- mapsize, le16_to_cpu( rs -> s_oid_maxsize ),
+ mapsize, le16_to_cpu( rs -> s_v1.s_oid_maxsize ),
total_used, exact ? "exact" : "estimation" );
procinfo_epilogue( sb );
struct super_block *sb;
struct reiserfs_sb_info *r;
struct reiserfs_super_block *rs;
+ struct journal_params *jp;
int len = 0;
- sb = procinfo_prologue( ( kdev_t ) ( int ) data );
+ sb = procinfo_prologue( to_kdev_t((int)data) );
if( sb == NULL )
return -ENOENT;
r = &sb->u.reiserfs_sb;
rs = r -> s_rs;
+ jp = &rs->s_v1.s_journal;
len += sprintf( &buffer[ len ],
/* on-disk fields */
- "s_journal_block: \t%i\n"
- "s_journal_dev: \t%s[%x]\n"
- "s_orig_journal_size: \t%i\n"
- "s_journal_trans_max: \t%i\n"
- "s_journal_block_count: \t%i\n"
- "s_journal_max_batch: \t%i\n"
- "s_journal_max_commit_age: \t%i\n"
- "s_journal_max_trans_age: \t%i\n"
+ "jp_journal_1st_block: \t%i\n"
+ "jp_journal_dev: \t%s[%x]\n"
+ "jp_journal_size: \t%i\n"
+ "jp_journal_trans_max: \t%i\n"
+ "jp_journal_magic: \t%i\n"
+ "jp_journal_max_batch: \t%i\n"
+ "jp_journal_max_commit_age: \t%i\n"
+ "jp_journal_max_trans_age: \t%i\n"
/* incore fields */
+ "j_1st_reserved_block: \t%i\n"
"j_state: \t%i\n"
"j_trans_id: \t%lu\n"
"j_mount_id: \t%lu\n"
"prepare: \t%12lu\n"
"prepare_retry: \t%12lu\n",
- DJF( s_journal_block ),
- DJF( s_journal_dev ) == 0 ? "none" : bdevname( DJF( s_journal_dev ) ),
- DJF( s_journal_dev ),
- DJF( s_orig_journal_size ),
- DJF( s_journal_trans_max ),
- DJF( s_journal_block_count ),
- DJF( s_journal_max_batch ),
- DJF( s_journal_max_commit_age ),
- DJF( s_journal_max_trans_age ),
-
+ DJP( jp_journal_1st_block ),
+ DJP( jp_journal_dev ) == 0 ? "none" : bdevname(to_kdev_t(DJP( jp_journal_dev ))),
+ DJP( jp_journal_dev ),
+ DJP( jp_journal_size ),
+ DJP( jp_journal_trans_max ),
+ DJP( jp_journal_magic ),
+ DJP( jp_journal_max_batch ),
+ DJP( jp_journal_max_commit_age ),
+ DJP( jp_journal_max_trans_age ),
+
+ JF( j_1st_reserved_block ),
JF( j_state ),
JF( j_trans_id ),
JF( j_mount_id ),
{
return ( sb->u.reiserfs_sb.procdir ) ? create_proc_read_entry
( name, 0, sb->u.reiserfs_sb.procdir, func,
- ( void * ) ( int ) sb -> s_dev ) : NULL;
+ ( void * ) kdev_t_to_nr( sb -> s_dev ) ) : NULL;
}
void reiserfs_proc_unregister( struct super_block *sb, const char *name )
for (i = 0; i < bmap_nr; i++)
bitmap[i] = SB_AP_BITMAP(s)[i];
for (i = bmap_nr; i < bmap_nr_new; i++) {
- bitmap[i] = reiserfs_getblk(s, i * s->s_blocksize * 8);
- memset(bitmap[i]->b_data, 0, sb->s_blocksize);
+ bitmap[i] = sb_getblk(s, i * s->s_blocksize * 8);
+ memset(bitmap[i]->b_data, 0, sb_blocksize(sb));
reiserfs_test_and_set_le_bit(0, bitmap[i]->b_data);
mark_buffer_dirty(bitmap[i]) ;
// find out version of the key
to->version = le_key_version (from);
- if (to->version == ITEM_VERSION_1) {
+ if (to->version == KEY_FORMAT_3_5) {
to->on_disk_key.u.k_offset_v1.k_offset = le32_to_cpu (from->u.k_offset_v1.k_offset);
to->on_disk_key.u.k_offset_v1.k_uniqueness = le32_to_cpu (from->u.k_offset_v1.k_uniqueness);
} else {
if (blocknr == 0)
return;
- bh = reiserfs_getblk (s, blocknr);
+ bh = sb_getblk (s, blocknr);
if (!buffer_uptodate (bh)) {
ll_rw_block (READA, 1, &bh);
continue;
}
- RFALSE( ! key_in_buffer(p_s_search_path, p_s_key, p_s_sb),
+ /* only check that the key is in the buffer if p_s_key is not
+ equal to the MAX_KEY. Latter case is only possible in
+ "finish_unfinished()" processing during mount. */
+ RFALSE( COMP_KEYS( &MAX_KEY, p_s_key ) &&
+ ! key_in_buffer(p_s_search_path, p_s_key, p_s_sb),
"PAP-5130: key is not in the buffer");
#ifdef CONFIG_REISERFS_CHECK
if ( cur_tb ) {
}
// new file gets truncated
- if (inode_items_version (inode) == ITEM_VERSION_2) {
+ if (get_inode_item_key_version (inode) == KEY_FORMAT_3_6) {
//
round_len = ROUND_UP (new_file_length);
/* this was n_new_file_length < le_ih ... */
struct item_head * p_le_ih = PATH_PITEM_HEAD(p_s_path);
struct buffer_head * p_s_bh = PATH_PLAST_BUFFER(p_s_path);
-#ifdef CONFIG_REISERFS_CHECK
- int n_repeat_counter = 0;
-#endif
-
/* Stat_data item. */
if ( is_statdata_le_ih (p_le_ih) ) {
{
int n_unfm_number, /* Number of the item unformatted nodes. */
n_counter,
- n_retry, /* Set to one if there is unformatted node buffer in use. */
n_blk_size;
__u32 * p_n_unfm_pointer; /* Pointer to the unformatted node number. */
__u32 tmp;
struct item_head s_ih; /* Item header. */
char c_mode; /* Returned mode of the balance. */
- struct buffer_head * p_s_un_bh;
int need_research;
// note: path could be changed, first line in for loop takes care
// of it
- for ( n_retry = 0, n_counter = *p_n_removed;
- n_counter < n_unfm_number; n_counter++, p_n_unfm_pointer-- ) {
+ for (n_counter = *p_n_removed;
+ n_counter < n_unfm_number; n_counter++, p_n_unfm_pointer-- ) {
if (item_moved (&s_ih, p_s_path)) {
need_research = 1 ;
p_n_unfm_pointer > (__u32 *)B_I_PITEM(p_s_bh, &s_ih) + I_UNFM_NUM(&s_ih) - 1,
"vs-5265: pointer out of range");
- if ( ! get_block_num(p_n_unfm_pointer,0) ) { /* Hole, nothing to remove. */
- if ( ! n_retry )
+ /* Hole, nothing to remove. */
+ if ( ! get_block_num(p_n_unfm_pointer,0) ) {
(*p_n_removed)++;
- continue;
- }
- /* Search for the buffer in cache. */
- p_s_un_bh = sb_get_hash_table(p_s_sb, get_block_num(p_n_unfm_pointer,0));
-
- if (p_s_un_bh) {
- mark_buffer_clean(p_s_un_bh) ;
- if (buffer_locked(p_s_un_bh)) {
- __wait_on_buffer(p_s_un_bh) ;
- }
- /* even if the item moves, the block number of the
- ** unformatted node we want to cut won't. So, it was
- ** safe to clean the buffer here, this block _will_
- ** get freed during this call to prepare_for_delete_or_cut
- */
- if ( item_moved (&s_ih, p_s_path) ) {
- need_research = 1;
- brelse(p_s_un_bh) ;
- break ;
- }
+ continue;
}
- if ( p_s_un_bh && block_in_use (p_s_un_bh)) {
- /* Block is locked or held more than by one holder and by
- journal. */
-#ifdef CONFIG_REISERFS_CHECK
- if (n_repeat_counter && (n_repeat_counter % 100000) == 0) {
- printk("prepare_for_delete, waiting on buffer %lu, b_count %d, %s%cJDIRTY %cJDIRTY_WAIT\n",
- p_s_un_bh->b_blocknr, atomic_read (&p_s_un_bh->b_count),
- buffer_locked (p_s_un_bh) ? "locked, " : "",
- buffer_journaled(p_s_un_bh) ? ' ' : '!',
- buffer_journal_dirty(p_s_un_bh) ? ' ' : '!') ;
-
- }
-#endif
- n_retry = 1;
- brelse (p_s_un_bh);
- continue;
- }
-
- if ( ! n_retry )
- (*p_n_removed)++;
-
- RFALSE( p_s_un_bh &&
- get_block_num(p_n_unfm_pointer, 0) != p_s_un_bh->b_blocknr,
- // note: minix_truncate allows that. As truncate is
- // protected by down (inode->i_sem), two truncates can not
- // co-exist
- "PAP-5280: blocks numbers are different");
+ (*p_n_removed)++;
tmp = get_block_num(p_n_unfm_pointer,0);
put_block_num(p_n_unfm_pointer, 0, 0);
journal_mark_dirty (th, p_s_sb, p_s_bh);
- bforget (p_s_un_bh);
inode->i_blocks -= p_s_sb->s_blocksize / 512;
reiserfs_free_block(th, tmp);
if ( item_moved (&s_ih, p_s_path) ) {
- need_research = 1;
- break ;
- }
+ need_research = 1;
+ break ;
+ }
}
/* a trick. If the buffer has been logged, this
*/
reiserfs_restore_prepared_buffer(p_s_sb, p_s_bh);
- if ( n_retry ) {
- /* There is block in use. Wait, they should release it soon */
-
- RFALSE( *p_n_removed >= n_unfm_number, "PAP-5290: illegal case");
-#ifdef CONFIG_REISERFS_CHECK
- if ( !(++n_repeat_counter % 500000) ) {
- reiserfs_warning("PAP-5300: prepare_for_delete_or_cut: (pid %u): "
- "could not delete item %k in (%d) iterations. New file length %Lu. (inode %Ld), Still trying\n",
- current->pid, p_s_item_key, n_repeat_counter, n_new_file_length, inode->i_size);
- if (n_repeat_counter == 5000000) {
- print_block (PATH_PLAST_BUFFER(p_s_path), 3,
- PATH_LAST_POSITION (p_s_path) - 2, PATH_LAST_POSITION (p_s_path) + 2);
- reiserfs_panic(p_s_sb, "PAP-5305: prepare_for_delete_or_cut: key %k, new_file_length %Ld",
- p_s_item_key, n_new_file_length);
- }
- }
-#endif
-
- run_task_queue(&tq_disk);
- yield();
- }
/* This loop can be optimized. */
} while ( (*p_n_removed < n_unfm_number || need_research) &&
search_for_position_by_key(p_s_sb, p_s_item_key, p_s_path) == POSITION_FOUND );
/* this deletes item which never gets split */
-static void reiserfs_delete_solid_item (struct reiserfs_transaction_handle *th,
- struct key * key)
+void reiserfs_delete_solid_item (struct reiserfs_transaction_handle *th,
+ struct key * key)
{
struct tree_balance tb;
INITIALIZE_PATH (path);
while (1) {
retval = search_item (th->t_super, &cpu_key, &path);
if (retval == IO_ERROR) {
- reiserfs_warning ("vs-: reiserfs_delete_solid_item: "
+ reiserfs_warning ("vs-5350: reiserfs_delete_solid_item: "
"i/o failure occurred trying to delete %K\n", &cpu_key);
break;
}
if (retval != ITEM_FOUND) {
pathrelse (&path);
- reiserfs_warning ("vs-: reiserfs_delete_solid_item: %k not found",
+ reiserfs_warning ("vs-5355: reiserfs_delete_solid_item: %k not found",
key);
break;
}
}
// IO_ERROR, NO_DISK_SPACE, etc
- reiserfs_warning ("vs-: reiserfs_delete_solid_item: "
+ reiserfs_warning ("vs-5360: reiserfs_delete_solid_item: "
"could not delete %K due to fix_nodes failure\n", &cpu_key);
unfix_nodes (&tb);
break;
/* for directory this deletes item containing "." and ".." */
reiserfs_do_truncate (th, inode, NULL, 0/*no timestamp updates*/);
- /* delete stat data */
- /* this debug code needs to go away. Trying to find a truncate race
- ** -- clm -- 4/1/2000
- */
-#if 0
- if (inode->i_nlink != 0) {
- reiserfs_warning("clm-4001: deleting inode with link count==%d\n", inode->i_nlink) ;
- }
-#endif
#if defined( USE_INODE_GENERATION_COUNTER )
if( !old_format_only ( th -> t_super ) )
{
*/
if (atomic_read(&p_s_inode->i_count) > 1 ||
!tail_has_to_be_packed (p_s_inode) ||
- !page || REISERFS_I(p_s_inode)->nopack) {
+ !page || (REISERFS_I(p_s_inode)->i_flags & i_nopack_mask)) {
// leave tail in an unformatted node
*p_c_mode = M_SKIP_BALANCING;
cut_bytes = n_block_size - (n_new_file_size & (n_block_size - 1));
** be flushed before the transaction commits, so we don't need to
** deal with it here.
*/
- REISERFS_I(p_s_inode)->i_pack_on_close = 0 ;
+ REISERFS_I(p_s_inode)->i_flags &= ~i_pack_on_close_mask ;
}
return n_ret_value;
}
static void truncate_directory (struct reiserfs_transaction_handle *th, struct inode * inode)
{
if (inode->i_nlink)
- reiserfs_warning ("vs-5655: truncate_directory: link count != 0");
+ reiserfs_warning ("vs-5655: truncate_directory: link count != 0\n");
- set_le_key_k_offset (ITEM_VERSION_1, INODE_PKEY (inode), DOT_OFFSET);
- set_le_key_k_type (ITEM_VERSION_1, INODE_PKEY (inode), TYPE_DIRENTRY);
+ set_le_key_k_offset (KEY_FORMAT_3_5, INODE_PKEY (inode), DOT_OFFSET);
+ set_le_key_k_type (KEY_FORMAT_3_5, INODE_PKEY (inode), TYPE_DIRENTRY);
reiserfs_delete_solid_item (th, INODE_PKEY (inode));
- set_le_key_k_offset (ITEM_VERSION_1, INODE_PKEY (inode), SD_OFFSET);
- set_le_key_k_type (ITEM_VERSION_1, INODE_PKEY (inode), TYPE_STAT_DATA);
+ set_le_key_k_offset (KEY_FORMAT_3_5, INODE_PKEY (inode), SD_OFFSET);
+ set_le_key_k_type (KEY_FORMAT_3_5, INODE_PKEY (inode), TYPE_STAT_DATA);
}
pathrelse(&s_search_path);
return;
}
+
/* Update key to search for the last file item. */
set_cpu_key_k_offset (&s_item_key, n_file_size);
if (update_timestamps) {
p_s_inode->i_mtime = p_s_inode->i_ctime = CURRENT_TIME;
- // FIXME: sd gets wrong size here
}
reiserfs_update_sd(th, p_s_inode) ;
#define REISERFS_OLD_BLOCKSIZE 4096
#define REISERFS_SUPER_MAGIC_STRING_OFFSET_NJ 20
-char reiserfs_super_magic_string[] = REISERFS_SUPER_MAGIC_STRING;
-char reiser2fs_super_magic_string[] = REISER2FS_SUPER_MAGIC_STRING;
+const char reiserfs_3_5_magic_string[] = REISERFS_SUPER_MAGIC_STRING;
+const char reiserfs_3_6_magic_string[] = REISER2FS_SUPER_MAGIC_STRING;
+const char reiserfs_jr_magic_string[] = REISER2FS_JR_SUPER_MAGIC_STRING;
+
+int is_reiserfs_3_5 (struct reiserfs_super_block * rs)
+{
+ return !strncmp (rs->s_v1.s_magic, reiserfs_3_5_magic_string,
+ strlen (reiserfs_3_5_magic_string));
+}
+
+
+int is_reiserfs_3_6 (struct reiserfs_super_block * rs)
+{
+ return !strncmp (rs->s_v1.s_magic, reiserfs_3_6_magic_string,
+ strlen (reiserfs_3_6_magic_string));
+}
+
+
+int is_reiserfs_jr (struct reiserfs_super_block * rs)
+{
+ return !strncmp (rs->s_v1.s_magic, reiserfs_jr_magic_string,
+ strlen (reiserfs_jr_magic_string));
+}
+
+
+static int is_any_reiserfs_magic_string (struct reiserfs_super_block * rs)
+{
+ return (is_reiserfs_3_5 (rs) || is_reiserfs_3_6 (rs) ||
+ is_reiserfs_jr (rs));
+}
+
+static int reiserfs_remount (struct super_block * s, int * flags, char * data);
+static int reiserfs_statfs (struct super_block * s, struct statfs * buf);
//
// a portion of this function, particularly the VFS interface portion,
// at the ext2 code and comparing. It's subfunctions contain no code
// used as a template unless they are so labeled.
//
-void reiserfs_write_super (struct super_block * s)
+static void reiserfs_write_super (struct super_block * s)
{
int dirty = 0 ;
// at the ext2 code and comparing. It's subfunctions contain no code
// used as a template unless they are so labeled.
//
-void reiserfs_write_super_lockfs (struct super_block * s)
+static void reiserfs_write_super_lockfs (struct super_block * s)
{
int dirty = 0 ;
reiserfs_allow_writes(s) ;
}
+extern const struct key MAX_KEY;
+
+
+/* this is used to delete "save link" when there are no items of a
+ file it points to. It can either happen if unlink is completed but
+ "save unlink" removal, or if file has both unlink and truncate
+ pending and as unlink completes first (because key of "save link"
+ protecting unlink is bigger that a key lf "save link" which
+ protects truncate), so there left no items to make truncate
+ completion on */
+static void remove_save_link_only (struct super_block * s, struct key * key)
+{
+ struct reiserfs_transaction_handle th;
+
+ /* we are going to do one balancing */
+ journal_begin (&th, s, JOURNAL_PER_BALANCE_CNT);
+
+ reiserfs_delete_solid_item (&th, key);
+ if (is_direct_le_key (KEY_FORMAT_3_5, key))
+ /* removals are protected by direct items */
+ reiserfs_release_objectid (&th, le32_to_cpu (key->k_objectid));
+
+ journal_end (&th, s, JOURNAL_PER_BALANCE_CNT);
+}
+
+
+/* look for uncompleted unlinks and truncates and complete them */
+static void finish_unfinished (struct super_block * s)
+{
+ INITIALIZE_PATH (path);
+ struct cpu_key max_cpu_key, obj_key;
+ struct key save_link_key;
+ int retval;
+ struct item_head * ih;
+ struct buffer_head * bh;
+ int item_pos;
+ char * item;
+ int done;
+ struct inode * inode;
+ int truncate;
+
+
+ /* compose key to look for "save" links */
+ max_cpu_key.version = KEY_FORMAT_3_5;
+ max_cpu_key.on_disk_key = MAX_KEY;
+ max_cpu_key.key_length = 3;
+
+ done = 0;
+ s -> u.reiserfs_sb.s_is_unlinked_ok = 1;
+ while (1) {
+ retval = search_item (s, &max_cpu_key, &path);
+ if (retval != ITEM_NOT_FOUND) {
+ reiserfs_warning ("vs-2140: finish_unfinished: search_by_key returned %d\n",
+ retval);
+ break;
+ }
+
+ bh = get_last_bh (&path);
+ item_pos = get_item_pos (&path);
+ if (item_pos != B_NR_ITEMS (bh)) {
+ reiserfs_warning ("vs-2060: finish_unfinished: wrong position found\n");
+ break;
+ }
+ item_pos --;
+ ih = B_N_PITEM_HEAD (bh, item_pos);
+
+ if (le32_to_cpu (ih->ih_key.k_dir_id) != MAX_KEY_OBJECTID)
+ /* there are no "save" links anymore */
+ break;
+
+ save_link_key = ih->ih_key;
+ if (is_indirect_le_ih (ih))
+ truncate = 1;
+ else
+ truncate = 0;
+
+ /* reiserfs_iget needs k_dirid and k_objectid only */
+ item = B_I_PITEM (bh, ih);
+ obj_key.on_disk_key.k_dir_id = le32_to_cpu (*(__u32 *)item);
+ obj_key.on_disk_key.k_objectid = le32_to_cpu (ih->ih_key.k_objectid);
+ obj_key.on_disk_key.u.k_offset_v1.k_offset = 0;
+ obj_key.on_disk_key.u.k_offset_v1.k_uniqueness = 0;
+
+ pathrelse (&path);
+
+ inode = reiserfs_iget (s, &obj_key);
+ if (!inode) {
+ /* the unlink almost completed, it just did not manage to remove
+ "save" link and release objectid */
+ reiserfs_warning ("vs-2180: finish_unfinished: iget failed for %K\n",
+ &obj_key);
+ remove_save_link_only (s, &save_link_key);
+ continue;
+ }
+
+ if (!truncate && inode->i_nlink) {
+ /* file is not unlinked */
+ reiserfs_warning ("vs-2185: finish_unfinished: file %K is not unlinked\n",
+ &obj_key);
+ remove_save_link_only (s, &save_link_key);
+ continue;
+ }
+
+ if (truncate) {
+ REISERFS_I(inode) -> i_flags |= i_link_saved_truncate_mask;
+ /* not completed truncate found. New size was committed together
+ with "save" link */
+ reiserfs_warning ("Truncating %k to %Ld ..",
+ INODE_PKEY (inode), inode->i_size);
+ reiserfs_truncate_file (inode, 0/*don't update modification time*/);
+ remove_save_link (inode, truncate);
+ } else {
+ REISERFS_I(inode) -> i_flags |= i_link_saved_unlink_mask;
+ /* not completed unlink (rmdir) found */
+ reiserfs_warning ("Removing %k..", INODE_PKEY (inode));
+ /* removal gets completed in iput */
+ }
+
+ iput (inode);
+ printk ("done\n");
+ done ++;
+ }
+ s -> u.reiserfs_sb.s_is_unlinked_ok = 0;
+
+ pathrelse (&path);
+ if (done)
+ reiserfs_warning ("There were %d uncompleted unlinks/truncates. "
+ "Completed\n", done);
+}
+
+/* to protect file being unlinked from getting lost we "safe" link files
+ being unlinked. This link will be deleted in the same transaction with last
+ item of file. mounting the filesytem we scan all these links and remove
+ files which almost got lost */
+void add_save_link (struct reiserfs_transaction_handle * th,
+ struct inode * inode, int truncate)
+{
+ INITIALIZE_PATH (path);
+ int retval;
+ struct cpu_key key;
+ struct item_head ih;
+ __u32 link;
+
+ /* file can only get one "save link" of each kind */
+ RFALSE( truncate &&
+ ( REISERFS_I(inode) -> i_flags & i_link_saved_truncate_mask ),
+ "saved link already exists for truncated inode %lx",
+ ( long ) inode -> i_ino );
+ RFALSE( !truncate &&
+ ( REISERFS_I(inode) -> i_flags & i_link_saved_unlink_mask ),
+ "saved link already exists for unlinked inode %lx",
+ ( long ) inode -> i_ino );
+
+ /* setup key of "save" link */
+ key.version = KEY_FORMAT_3_5;
+ key.on_disk_key.k_dir_id = MAX_KEY_OBJECTID;
+ key.on_disk_key.k_objectid = inode->i_ino;
+ if (!truncate) {
+ /* unlink, rmdir, rename */
+ set_cpu_key_k_offset (&key, 1 + inode->i_sb->s_blocksize);
+ set_cpu_key_k_type (&key, TYPE_DIRECT);
+
+ /* item head of "safe" link */
+ make_le_item_head (&ih, &key, key.version, 1 + inode->i_sb->s_blocksize, TYPE_DIRECT,
+ 4/*length*/, 0xffff/*free space*/);
+ } else {
+ /* truncate */
+ set_cpu_key_k_offset (&key, 1);
+ set_cpu_key_k_type (&key, TYPE_INDIRECT);
+
+ /* item head of "safe" link */
+ make_le_item_head (&ih, &key, key.version, 1, TYPE_INDIRECT,
+ 4/*length*/, 0/*free space*/);
+ }
+ key.key_length = 3;
+
+ /* look for its place in the tree */
+ retval = search_item (inode->i_sb, &key, &path);
+ if (retval != ITEM_NOT_FOUND) {
+ reiserfs_warning ("vs-2100: add_save_link:"
+ "search_by_key (%K) returned %d\n", &key, retval);
+ pathrelse (&path);
+ return;
+ }
+
+ /* body of "save" link */
+ link = cpu_to_le32 (INODE_PKEY (inode)->k_dir_id);
+
+ /* put "save" link inot tree */
+ retval = reiserfs_insert_item (th, &path, &key, &ih, (char *)&link);
+ if (retval)
+ reiserfs_warning ("vs-2120: add_save_link: insert_item returned %d\n",
+ retval);
+ else {
+ if( truncate )
+ REISERFS_I(inode) -> i_flags |= i_link_saved_truncate_mask;
+ else
+ REISERFS_I(inode) -> i_flags |= i_link_saved_unlink_mask;
+ }
+}
+
+
+/* this opens transaction unlike add_save_link */
+void remove_save_link (struct inode * inode, int truncate)
+{
+ struct reiserfs_transaction_handle th;
+ struct key key;
+
+
+ /* we are going to do one balancing only */
+ journal_begin (&th, inode->i_sb, JOURNAL_PER_BALANCE_CNT);
+
+ /* setup key of "save" link */
+ key.k_dir_id = cpu_to_le32 (MAX_KEY_OBJECTID);
+ key.k_objectid = INODE_PKEY (inode)->k_objectid;
+ if (!truncate) {
+ /* unlink, rmdir, rename */
+ set_le_key_k_offset (KEY_FORMAT_3_5, &key,
+ 1 + inode->i_sb->s_blocksize);
+ set_le_key_k_type (KEY_FORMAT_3_5, &key, TYPE_DIRECT);
+ } else {
+ /* truncate */
+ set_le_key_k_offset (KEY_FORMAT_3_5, &key, 1);
+ set_le_key_k_type (KEY_FORMAT_3_5, &key, TYPE_INDIRECT);
+ }
+
+ if( ( truncate &&
+ ( REISERFS_I(inode) -> i_flags & i_link_saved_truncate_mask ) ) ||
+ ( !truncate &&
+ ( REISERFS_I(inode) -> i_flags & i_link_saved_unlink_mask ) ) )
+ reiserfs_delete_solid_item (&th, &key);
+ if (!truncate) {
+ reiserfs_release_objectid (&th, inode->i_ino);
+ REISERFS_I(inode) -> i_flags &= ~i_link_saved_unlink_mask;
+ } else
+ REISERFS_I(inode) -> i_flags &= ~i_link_saved_truncate_mask;
+
+ journal_end (&th, inode->i_sb, JOURNAL_PER_BALANCE_CNT);
+}
+
+
//
// a portion of this function, particularly the VFS interface portion,
// was derived from minix or ext2's analog and evolved as the
// at the ext2 code and comparing. It's subfunctions contain no code
// used as a template unless they are so labeled.
//
-void reiserfs_put_super (struct super_block * s)
+static void reiserfs_put_super (struct super_block * s)
{
int i;
struct reiserfs_transaction_handle th ;
if (!(s->s_flags & MS_RDONLY)) {
journal_begin(&th, s, 10) ;
reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;
- set_sb_state( SB_DISK_SUPER_BLOCK(s), s->u.reiserfs_sb.s_mount_state );
+ set_sb_umount_state( SB_DISK_SUPER_BLOCK(s), s->u.reiserfs_sb.s_mount_state );
journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB (s));
}
printk(KERN_INFO "reiserfs_inode_cache: not all structures were freed\n");
}
+/* we don't mark inodes dirty, we just log them */
+static void reiserfs_dirty_inode (struct inode * inode) {
+ struct reiserfs_transaction_handle th ;
+
+ if (inode->i_sb->s_flags & MS_RDONLY) {
+ reiserfs_warning("clm-6006: writing inode %lu on readonly FS\n",
+ inode->i_ino) ;
+ return ;
+ }
+ lock_kernel() ;
+
+ /* this is really only used for atime updates, so they don't have
+ ** to be included in O_SYNC or fsync
+ */
+ journal_begin(&th, inode->i_sb, 1) ;
+ reiserfs_update_sd (&th, inode);
+ journal_end(&th, inode->i_sb, 1) ;
+ unlock_kernel() ;
+}
+
struct super_operations reiserfs_sops =
{
alloc_inode: reiserfs_alloc_inode,
};
/* this was (ext2)parse_options */
-static int parse_options (char * options, unsigned long * mount_options, unsigned long * blocks)
+static int parse_options (char * options, unsigned long * mount_options, unsigned long * blocks, char **jdev_name)
{
char * this_char;
char * value;
printk("reiserfs: hash option requires a value\n");
return 0 ;
}
+ } else if (!strcmp (this_char, "jdev")) {
+ if (value && *value && jdev_name) {
+ *jdev_name = value;
+ } else {
+ printk("reiserfs: jdev option requires a value\n");
+ return 0 ;
+ }
} else {
printk ("reiserfs: Unrecognized mount option %s\n", this_char);
return 0;
// at the ext2 code and comparing. It's subfunctions contain no code
// used as a template unless they are so labeled.
//
-int reiserfs_remount (struct super_block * s, int * flags, char * data)
+static int reiserfs_remount (struct super_block * s, int * flags, char * data)
{
struct reiserfs_super_block * rs;
struct reiserfs_transaction_handle th ;
unsigned long mount_options;
rs = SB_DISK_SUPER_BLOCK (s);
-
- if (!parse_options(data, &mount_options, &blocks))
+ if (!parse_options(data, &mount_options, &blocks, NULL))
return 0;
if(blocks) {
if (*flags & MS_RDONLY) {
/* try to remount file system with read-only permissions */
- if (sb_state(rs) == REISERFS_VALID_FS || s->u.reiserfs_sb.s_mount_state != REISERFS_VALID_FS) {
+ if (sb_umount_state(rs) == REISERFS_VALID_FS || s->u.reiserfs_sb.s_mount_state != REISERFS_VALID_FS) {
return 0;
}
journal_begin(&th, s, 10) ;
/* Mounting a rw partition read-only. */
reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;
- set_sb_state( rs, s->u.reiserfs_sb.s_mount_state );
+ set_sb_umount_state( rs, s->u.reiserfs_sb.s_mount_state );
journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB (s));
s->s_dirt = 0;
} else {
- s->u.reiserfs_sb.s_mount_state = sb_state(rs) ;
+ s->u.reiserfs_sb.s_mount_state = sb_umount_state(rs) ;
s->s_flags &= ~MS_RDONLY ; /* now it is safe to call journal_begin */
journal_begin(&th, s, 10) ;
/* Mount a partition which is read-only, read-write */
reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;
- s->u.reiserfs_sb.s_mount_state = sb_state(rs);
+ s->u.reiserfs_sb.s_mount_state = sb_umount_state(rs);
s->s_flags &= ~MS_RDONLY;
- set_sb_state( rs, REISERFS_ERROR_FS );
+ set_sb_umount_state( rs, REISERFS_ERROR_FS );
/* mark_buffer_dirty (SB_BUFFER_WITH_SB (s), 1); */
journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB (s));
s->s_dirt = 0;
/* this will force a full flush of all journal lists */
SB_JOURNAL(s)->j_must_wait = 1 ;
journal_end(&th, s, 10) ;
+
+ if (!( *flags & MS_RDONLY ) )
+ finish_unfinished( s );
+
return 0;
}
bh = sb_bread (s, offset / s->s_blocksize);
if (!bh) {
- printk ("read_super_block: "
- "bread failed (dev %s, block %ld, size %ld)\n",
+ printk ("sh-2006: read_super_block: "
+ "bread failed (dev %s, block %lu, size %lu)\n",
s->s_id, offset / s->s_blocksize, s->s_blocksize);
return 1;
}
rs = (struct reiserfs_super_block *)bh->b_data;
- if (!is_reiserfs_magic_string (rs)) {
- printk ("read_super_block: "
- "can't find a reiserfs filesystem on (dev %s, block %lu, size %ld)\n",
- s->s_id, bh->b_blocknr, s->s_blocksize);
+ if (!is_any_reiserfs_magic_string (rs)) {
brelse (bh);
return 1;
}
brelse (bh);
sb_set_blocksize (s, sb_blocksize(rs));
-
+
bh = reiserfs_bread (s, offset / s->s_blocksize);
if (!bh) {
- printk("read_super_block: "
- "bread failed (dev %s, block %ld, size %ld)\n",
+ printk("sh-2007: read_super_block: "
+ "bread failed (dev %s, block %lu, size %lu)\n",
s->s_id, offset / s->s_blocksize, s->s_blocksize);
return 1;
}
rs = (struct reiserfs_super_block *)bh->b_data;
- if (!is_reiserfs_magic_string (rs) || sb_blocksize(rs) != s->s_blocksize) {
- printk ("read_super_block: "
- "can't find a reiserfs filesystem on (dev %s, block %lu, size %ld)\n",
+ if (sb_blocksize(rs) != s->s_blocksize) {
+ printk ("sh-2011: read_super_block: "
+ "can't find a reiserfs filesystem on (dev %s, block %lu, size %lu)\n",
s->s_id, bh->b_blocknr, s->s_blocksize);
brelse (bh);
- printk ("read_super_block: can't find a reiserfs filesystem on dev %s.\n", s->s_id);
return 1;
}
- /* must check to be sure we haven't pulled an old format super out
- ** of the old format's log. This is a kludge of a check, but it
- ** will work. If block we've just read in is inside the
- ** journal for that super, it can't be valid.
- */
- if (bh->b_blocknr >= sb_journal_block(rs) &&
- bh->b_blocknr < (sb_journal_block(rs) + JOURNAL_BLOCK_COUNT)) {
- brelse(bh) ;
- printk("super-459: read_super_block: "
- "super found at block %lu is within its own log. "
- "It must not be of this format type.\n", bh->b_blocknr) ;
- return 1 ;
- }
+
SB_BUFFER_WITH_SB (s) = bh;
SB_DISK_SUPER_BLOCK (s) = rs;
+
+ if (is_reiserfs_jr (rs)) {
+ /* magic is of non-standard journal filesystem, look at s_version to
+ find which format is in use */
+ if (sb_version(rs) == REISERFS_VERSION_2)
+ printk ("read_super_block: found reiserfs format \"3.6\" "
+ "with non-standard journal\n");
+ else if (sb_version(rs) == REISERFS_VERSION_1)
+ printk ("read_super_block: found reiserfs format \"3.5\" "
+ "with non-standard journal\n");
+ else {
+ printk ("sh-2012: read_super_block: found unknown format \"%u\" "
+ "of reiserfs with non-standard magic\n", sb_version(rs));
+ return 1;
+ }
+ }
+ else
+ /* s_version of standard format may contain incorrect information,
+ so we just look at the magic string */
+ printk ("found reiserfs format \"%s\" with standard journal\n",
+ is_reiserfs_3_5 (rs) ? "3.5" : "3.6");
+
s->s_op = &reiserfs_sops;
/* new format is limited by the 32 bit wide i_blocks field, want to
// at the ext2 code and comparing. It's subfunctions contain no code
// used as a template unless they are so labeled.
//
-struct super_block * reiserfs_read_super (struct super_block * s, void * data, int silent)
+static struct super_block * reiserfs_read_super (struct super_block * s, void * data, int silent)
{
int size;
struct inode *root_inode;
unsigned long blocks;
int jinit_done = 0 ;
struct reiserfs_iget4_args args ;
-
+ struct reiserfs_super_block * rs;
+ char *jdev_name;
memset (&s->u.reiserfs_sb, 0, sizeof (struct reiserfs_sb_info));
-
- if (parse_options ((char *) data, &(s->u.reiserfs_sb.s_mount_opt), &blocks) == 0) {
+ jdev_name = NULL;
+ if (parse_options ((char *) data, &(s->u.reiserfs_sb.s_mount_opt), &blocks, &jdev_name) == 0) {
return NULL;
}
size = block_size(s->s_dev);
sb_set_blocksize(s, size);
-
- /* read block (64-th 1k block), which can contain reiserfs super block */
- if (read_super_block (s, REISERFS_DISK_OFFSET_IN_BYTES)) {
- // try old format (undistributed bitmap, super block in 8-th 1k block of a device)
- sb_set_blocksize(s, size);
- if (read_super_block (s, REISERFS_OLD_DISK_OFFSET_IN_BYTES))
- goto error;
- else
- old_format = 1;
+
+ /* try old format (undistributed bitmap, super block in 8-th 1k block of a device) */
+ if (!read_super_block (s, REISERFS_OLD_DISK_OFFSET_IN_BYTES))
+ old_format = 1;
+ /* try new format (64-th 1k block), which can contain reiserfs super block */
+ else if (read_super_block (s, REISERFS_DISK_OFFSET_IN_BYTES)) {
+ printk("sh-2021: reiserfs_read_super: can not find reiserfs on %s\n", s->s_id);
+ goto error;
}
-
s->u.reiserfs_sb.s_mount_state = SB_REISERFS_STATE(s);
s->u.reiserfs_sb.s_mount_state = REISERFS_VALID_FS ;
#endif
// set_device_ro(s->s_dev, 1) ;
- if (journal_init(s)) {
- printk("reiserfs_read_super: unable to initialize journal space\n") ;
+ if( journal_init(s, jdev_name, old_format) ) {
+ printk("sh-2022: reiserfs_read_super: unable to initialize journal space\n") ;
goto error ;
} else {
jinit_done = 1 ; /* once this is set, journal_release must be called
goto error ;
}
+ rs = SB_DISK_SUPER_BLOCK (s);
+ if (is_reiserfs_3_5 (rs) || (is_reiserfs_jr (rs) && SB_VERSION (s) == REISERFS_VERSION_1))
+ set_bit(REISERFS_3_5, &(s->u.reiserfs_sb.s_properties));
+ else
+ set_bit(REISERFS_3_6, &(s->u.reiserfs_sb.s_properties));
+
if (!(s->s_flags & MS_RDONLY)) {
- struct reiserfs_super_block * rs = SB_DISK_SUPER_BLOCK (s);
- int old_magic;
-
- old_magic = strncmp (rs->s_magic, REISER2FS_SUPER_MAGIC_STRING,
- strlen ( REISER2FS_SUPER_MAGIC_STRING));
- if( old_magic && le16_to_cpu(rs->s_version) != 0 ) {
- dput(s->s_root) ;
- s->s_root = NULL ;
- reiserfs_warning("reiserfs: wrong version/magic combination in the super-block\n") ;
- goto error ;
- }
journal_begin(&th, s, 1) ;
reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1) ;
- set_sb_state( rs, REISERFS_ERROR_FS );
-
- if ( old_magic ) {
- // filesystem created under 3.5.x found
- if (!old_format_only (s)) {
- reiserfs_warning("reiserfs: converting 3.5.x filesystem to the new format\n") ;
- // after this 3.5.x will not be able to mount this partition
- memcpy (rs->s_magic, REISER2FS_SUPER_MAGIC_STRING,
- sizeof (REISER2FS_SUPER_MAGIC_STRING));
-
- reiserfs_convert_objectid_map_v1(s) ;
- } else {
- reiserfs_warning("reiserfs: using 3.5.x disk format\n") ;
- }
- } else {
- // new format found
- set_bit (REISERFS_CONVERT, &(s->u.reiserfs_sb.s_mount_opt));
+ set_sb_umount_state( rs, REISERFS_ERROR_FS );
+ set_sb_fs_state (rs, 0);
+
+ if (old_format_only(s)) {
+ /* filesystem of format 3.5 either with standard or non-standard
+ journal */
+ if (convert_reiserfs (s)) {
+ /* and -o conv is given */
+ reiserfs_warning ("reiserfs: converting 3.5 filesystem to the 3.6 format\n") ;
+
+ if (is_reiserfs_3_5 (rs))
+ /* put magic string of 3.6 format. 2.2 will not be able to
+ mount this filesystem anymore */
+ memcpy (rs->s_v1.s_magic, reiserfs_3_6_magic_string,
+ sizeof (reiserfs_3_6_magic_string));
+
+ set_sb_version(rs,REISERFS_VERSION_2);
+ reiserfs_convert_objectid_map_v1(s) ;
+ set_bit(REISERFS_3_6, &(s->u.reiserfs_sb.s_properties));
+ clear_bit(REISERFS_3_5, &(s->u.reiserfs_sb.s_properties));
+ } else {
+ reiserfs_warning("reiserfs: using 3.5.x disk format\n") ;
+ }
}
- // mark hash in super block: it could be unset. overwrite should be ok
- set_sb_hash_function_code( rs, function2code(s->u.reiserfs_sb.s_hash_function ) );
-
journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB (s));
journal_end(&th, s, 1) ;
+
+ /* look for files which were to be removed in previous session */
+ finish_unfinished (s);
+
s->s_dirt = 0;
} else {
- struct reiserfs_super_block * rs = SB_DISK_SUPER_BLOCK (s);
- if (strncmp (rs->s_magic, REISER2FS_SUPER_MAGIC_STRING,
- strlen ( REISER2FS_SUPER_MAGIC_STRING))) {
+ if ( old_format_only(s) ) {
reiserfs_warning("reiserfs: using 3.5.x disk format\n") ;
}
}
+ // mark hash in super block: it could be unset. overwrite should be ok
+ set_sb_hash_function_code( rs, function2code(s->u.reiserfs_sb.s_hash_function ) );
reiserfs_proc_info_init( s );
reiserfs_proc_register( s, "version", reiserfs_version_in_proc );
reiserfs_proc_register( s, "journal", reiserfs_journal_in_proc );
init_waitqueue_head (&(s->u.reiserfs_sb.s_wait));
- printk("%s\n", reiserfs_get_version_string()) ;
return s;
error:
// at the ext2 code and comparing. It's subfunctions contain no code
// used as a template unless they are so labeled.
//
-int reiserfs_statfs (struct super_block * s, struct statfs * buf)
+static int reiserfs_statfs (struct super_block * s, struct statfs * buf)
{
struct reiserfs_super_block * rs = SB_DISK_SUPER_BLOCK (s);
return err;
}
+
MODULE_DESCRIPTION("ReiserFS journaled filesystem");
MODULE_AUTHOR("Hans Reiser <reiser@namesys.com>");
MODULE_LICENSE("GPL");
/* Set direct item header to insert. */
- make_le_item_head (&s_ih, 0, inode_items_version (p_s_inode), pos1 + 1,
+ make_le_item_head (&s_ih, 0, get_inode_item_key_version (p_s_inode), pos1 + 1,
TYPE_DIRECT, round_tail_len, 0xffff/*ih_free_space*/);
/* we want a pointer to the first byte of the tail in the page.
+++ /dev/null
-/*
- * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
- */
-
-char *reiserfs_get_version_string(void) {
- return "ReiserFS version 3.6.25" ;
-}
long long retval;
struct inode *inode = file->f_dentry->d_inode;
+ lock_kernel();
+
switch (origin) {
case 2:
offset += inode->i_size;
}
retval = offset;
}
+ unlock_kernel();
return retval;
}
--- /dev/null
+/*
+ File: fs/xattr.c
+
+ Extended attribute handling.
+
+ Copyright (C) 2001 by Andreas Gruenbacher <a.gruenbacher@computer.org>
+ Copyright (C) 2001 SGI - Silicon Graphics, Inc <linux-xfs@oss.sgi.com>
+ */
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/smp_lock.h>
+#include <linux/file.h>
+#include <linux/xattr.h>
+#include <asm/uaccess.h>
+
+/*
+ * Extended attribute memory allocation wrappers, originally
+ * based on the Intermezzo PRESTO_ALLOC/PRESTO_FREE macros.
+ * The vmalloc use here is very uncommon - extended attributes
+ * are supposed to be small chunks of metadata, and it is quite
+ * unusual to have very many extended attributes, so lists tend
+ * to be quite short as well. The 64K upper limit is derived
+ * from the extended attribute size limit used by XFS.
+ * Intentionally allow zero @size for value/list size requests.
+ */
+static void *
+xattr_alloc(size_t size, size_t limit)
+{
+ void *ptr;
+
+ if (size > limit)
+ return ERR_PTR(-E2BIG);
+
+ if (!size) /* size request, no buffer is needed */
+ return NULL;
+ else if (size <= PAGE_SIZE)
+ ptr = kmalloc((unsigned long) size, GFP_KERNEL);
+ else
+ ptr = vmalloc((unsigned long) size);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+ return ptr;
+}
+
+static void
+xattr_free(void *ptr, size_t size)
+{
+ if (!size) /* size request, no buffer was needed */
+ return;
+ else if (size <= PAGE_SIZE)
+ kfree(ptr);
+ else
+ vfree(ptr);
+}
+
+/*
+ * Extended attribute SET operations
+ */
+static long
+setxattr(struct dentry *d, char *name, void *value, size_t size, int flags)
+{
+ int error;
+ void *kvalue;
+ char kname[XATTR_NAME_MAX + 1];
+
+ if (flags & ~(XATTR_CREATE|XATTR_REPLACE))
+ return -EINVAL;
+
+ if (copy_from_user(kname, name, XATTR_NAME_MAX))
+ return -EFAULT;
+ kname[XATTR_NAME_MAX] = '\0';
+
+ kvalue = xattr_alloc(size, XATTR_SIZE_MAX);
+ if (IS_ERR(kvalue))
+ return PTR_ERR(kvalue);
+
+ if (size > 0 && copy_from_user(kvalue, value, size)) {
+ xattr_free(kvalue, size);
+ return -EFAULT;
+ }
+
+ error = -EOPNOTSUPP;
+ if (d->d_inode->i_op && d->d_inode->i_op->setxattr) {
+ lock_kernel();
+ error = d->d_inode->i_op->setxattr(d, kname, kvalue, size, flags);
+ unlock_kernel();
+ }
+
+ xattr_free(kvalue, size);
+ return error;
+}
+
+asmlinkage long
+sys_setxattr(char *path, char *name, void *value, size_t size, int flags)
+{
+ struct nameidata nd;
+ int error;
+
+ error = user_path_walk(path, &nd);
+ if (error)
+ return error;
+ error = setxattr(nd.dentry, name, value, size, flags);
+ path_release(&nd);
+ return error;
+}
+
+asmlinkage long
+sys_lsetxattr(char *path, char *name, void *value, size_t size, int flags)
+{
+ struct nameidata nd;
+ int error;
+
+ error = user_path_walk_link(path, &nd);
+ if (error)
+ return error;
+ error = setxattr(nd.dentry, name, value, size, flags);
+ path_release(&nd);
+ return error;
+}
+
+asmlinkage long
+sys_fsetxattr(int fd, char *name, void *value, size_t size, int flags)
+{
+ struct file *f;
+ int error = -EBADF;
+
+ f = fget(fd);
+ if (!f)
+ return error;
+ error = setxattr(f->f_dentry, name, value, size, flags);
+ fput(f);
+ return error;
+}
+
+/*
+ * Extended attribute GET operations
+ */
+static long
+getxattr(struct dentry *d, char *name, void *value, size_t size)
+{
+ int error;
+ void *kvalue;
+ char kname[XATTR_NAME_MAX + 1];
+
+ if (copy_from_user(kname, name, XATTR_NAME_MAX))
+ return -EFAULT;
+ kname[XATTR_NAME_MAX] = '\0';
+
+ kvalue = xattr_alloc(size, XATTR_SIZE_MAX);
+ if (IS_ERR(kvalue))
+ return PTR_ERR(kvalue);
+
+ error = -EOPNOTSUPP;
+ if (d->d_inode->i_op && d->d_inode->i_op->getxattr) {
+ lock_kernel();
+ error = d->d_inode->i_op->getxattr(d, kname, kvalue, size);
+ unlock_kernel();
+ }
+
+ if (kvalue && error > 0)
+ if (copy_to_user(value, kvalue, size))
+ error = -EFAULT;
+ xattr_free(kvalue, size);
+ return error;
+}
+
+asmlinkage long
+sys_getxattr(char *path, char *name, void *value, size_t size)
+{
+ struct nameidata nd;
+ int error;
+
+ error = user_path_walk(path, &nd);
+ if (error)
+ return error;
+ error = getxattr(nd.dentry, name, value, size);
+ path_release(&nd);
+ return error;
+}
+
+asmlinkage long
+sys_lgetxattr(char *path, char *name, void *value, size_t size)
+{
+ struct nameidata nd;
+ int error;
+
+ error = user_path_walk_link(path, &nd);
+ if (error)
+ return error;
+ error = getxattr(nd.dentry, name, value, size);
+ path_release(&nd);
+ return error;
+}
+
+asmlinkage long
+sys_fgetxattr(int fd, char *name, void *value, size_t size)
+{
+ struct file *f;
+ int error = -EBADF;
+
+ f = fget(fd);
+ if (!f)
+ return error;
+ error = getxattr(f->f_dentry, name, value, size);
+ fput(f);
+ return error;
+}
+
+/*
+ * Extended attribute LIST operations
+ */
+static long
+listxattr(struct dentry *d, char *list, size_t size)
+{
+ int error;
+ char *klist;
+
+ klist = (char *)xattr_alloc(size, XATTR_LIST_MAX);
+ if (IS_ERR(klist))
+ return PTR_ERR(klist);
+
+ error = -EOPNOTSUPP;
+ if (d->d_inode->i_op && d->d_inode->i_op->listxattr) {
+ lock_kernel();
+ error = d->d_inode->i_op->listxattr(d, klist, size);
+ unlock_kernel();
+ }
+
+ if (klist && error > 0)
+ if (copy_to_user(list, klist, size))
+ error = -EFAULT;
+ xattr_free(klist, size);
+ return error;
+}
+
+asmlinkage long
+sys_listxattr(char *path, char *list, size_t size)
+{
+ struct nameidata nd;
+ int error;
+
+ error = user_path_walk(path, &nd);
+ if (error)
+ return error;
+ error = listxattr(nd.dentry, list, size);
+ path_release(&nd);
+ return error;
+}
+
+asmlinkage long
+sys_llistxattr(char *path, char *list, size_t size)
+{
+ struct nameidata nd;
+ int error;
+
+ error = user_path_walk_link(path, &nd);
+ if (error)
+ return error;
+ error = listxattr(nd.dentry, list, size);
+ path_release(&nd);
+ return error;
+}
+
+asmlinkage long
+sys_flistxattr(int fd, char *list, size_t size)
+{
+ struct file *f;
+ int error = -EBADF;
+
+ f = fget(fd);
+ if (!f)
+ return error;
+ error = listxattr(f->f_dentry, list, size);
+ fput(f);
+ return error;
+}
+
+/*
+ * Extended attribute REMOVE operations
+ */
+static long
+removexattr(struct dentry *d, char *name)
+{
+ int error;
+ char kname[XATTR_NAME_MAX + 1];
+
+ if (copy_from_user(kname, name, XATTR_NAME_MAX))
+ return -EFAULT;
+ kname[XATTR_NAME_MAX] = '\0';
+
+ error = -EOPNOTSUPP;
+ if (d->d_inode->i_op && d->d_inode->i_op->removexattr) {
+ lock_kernel();
+ error = d->d_inode->i_op->removexattr(d, kname);
+ unlock_kernel();
+ }
+ return error;
+}
+
+asmlinkage long
+sys_removexattr(char *path, char *name)
+{
+ struct nameidata nd;
+ int error;
+
+ error = user_path_walk(path, &nd);
+ if (error)
+ return error;
+ error = removexattr(nd.dentry, name);
+ path_release(&nd);
+ return error;
+}
+
+asmlinkage long
+sys_lremovexattr(char *path, char *name)
+{
+ struct nameidata nd;
+ int error;
+
+ error = user_path_walk_link(path, &nd);
+ if (error)
+ return error;
+ error = removexattr(nd.dentry, name);
+ path_release(&nd);
+ return error;
+}
+
+asmlinkage long
+sys_fremovexattr(int fd, char *name)
+{
+ struct file *f;
+ int error = -EBADF;
+
+ f = fget(fd);
+ if (!f)
+ return error;
+ error = removexattr(f->f_dentry, name);
+ fput(f);
+ return error;
+}
* 9 - APM BIOS support
* 10 - APM BIOS support
* 11 - APM BIOS support
+ * 12 - PNPBIOS support
+ * 13 - PNPBIOS support
+ * 14 - PNPBIOS support
+ * 15 - PNPBIOS support
+ * 16 - PNPBIOS support
+ * 17 - not used
+ * 18 - not used
+ * 19 - not used
*
* The TSS+LDT descriptors are spread out a bit so that every CPU
* has an exclusive cacheline for the per-CPU TSS and LDT:
*
- * 12 - CPU#0 TSS <-- new cacheline
- * 13 - CPU#0 LDT
- * 14 - not used
- * 15 - not used
- * 16 - CPU#1 TSS <-- new cacheline
- * 17 - CPU#1 LDT
- * 18 - not used
- * 19 - not used
+ * 20 - CPU#0 TSS <-- new cacheline
+ * 21 - CPU#0 LDT
+ * 22 - not used
+ * 23 - not used
+ * 24 - CPU#1 TSS <-- new cacheline
+ * 25 - CPU#1 LDT
+ * 26 - not used
+ * 27 - not used
* ... NR_CPUS per-CPU TSS+LDT's if on SMP
*
* Entry into gdt where to find first TSS.
*/
-#define __FIRST_TSS_ENTRY 12
+#define __FIRST_TSS_ENTRY 20
#define __FIRST_LDT_ENTRY (__FIRST_TSS_ENTRY+1)
#define __TSS(n) (((n)<<2) + __FIRST_TSS_ENTRY)
#define _ASMi386_SIGNAL_H
#include <linux/types.h>
+#include <linux/linkage.h>
/* Avoid too many header ordering problems. */
struct siginfo;
return word;
}
+struct pt_regs;
+extern int FASTCALL(do_signal(struct pt_regs *regs, sigset_t *oldset));
+
#endif /* __KERNEL__ */
#endif
#define __NR_security 223 /* syscall for security modules */
#define __NR_gettid 224
#define __NR_readahead 225
+#define __NR_setxattr 226
+#define __NR_lsetxattr 227
+#define __NR_fsetxattr 228
+#define __NR_getxattr 229
+#define __NR_lgetxattr 230
+#define __NR_fgetxattr 231
+#define __NR_listxattr 232
+#define __NR_llistxattr 233
+#define __NR_flistxattr 234
+#define __NR_removexattr 235
+#define __NR_lremovexattr 236
+#define __NR_fremovexattr 237
/* user-visible error numbers are in the range -1 - -124: see <asm-i386/errno.h> */
__u32 i_faddr;
__u8 i_frag_no;
__u8 i_frag_size;
- __u16 unused; /* formerly i_osync */
#endif
__u32 i_file_acl;
__u32 i_dir_acl;
int (*revalidate) (struct dentry *);
int (*setattr) (struct dentry *, struct iattr *);
int (*getattr) (struct dentry *, struct iattr *);
+ int (*setxattr) (struct dentry *, char *, void *, size_t, int);
+ int (*getxattr) (struct dentry *, char *, void *, size_t);
+ int (*listxattr) (struct dentry *, char *, size_t);
+ int (*removexattr) (struct dentry *, char *);
};
struct seq_file;
extern int block_prepare_write(struct page*, unsigned, unsigned, get_block_t*);
extern int cont_prepare_write(struct page*, unsigned, unsigned, get_block_t*,
unsigned long *);
+extern int generic_cont_expand(struct inode *inode, loff_t size) ;
extern int block_commit_write(struct page *page, unsigned from, unsigned to);
extern int block_sync_page(struct page *);
siglock: SPIN_LOCK_UNLOCKED \
}
+#define INIT_TASK_WORK \
+{ \
+ need_resched: 0, \
+ syscall_trace: 0, \
+ sigpending: 0, \
+ notify_resume: 0, \
+}
+
/*
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
{ \
state: 0, \
flags: 0, \
- sigpending: 0, \
+ work: INIT_TASK_WORK, \
addr_limit: KERNEL_DS, \
exec_domain: &default_exec_domain, \
lock_depth: -1, \
#define NAME_MAX 255 /* # chars in a file name */
#define PATH_MAX 4096 /* # chars in a path name including nul */
#define PIPE_BUF 4096 /* # bytes in atomic write to a pipe */
+#define XATTR_NAME_MAX 255 /* # chars in an extended attribute name */
+#define XATTR_SIZE_MAX 65536 /* size of an extended attribute value (64k) */
+#define XATTR_LIST_MAX 65536 /* size of extended attribute namelist (64k) */
#define RTSIG_MAX 32
--- /dev/null
+/*
+ * Include file for the interface to a PnP BIOS
+ *
+ * Original BIOS code (C) 1998 Christian Schmidt (chr.schmidt@tu-bs.de)
+ * PnP handler parts (c) 1998 Tom Lees <tom@lpsg.demon.co.uk>
+ * Minor reorganizations by David Hinds <dhinds@zen.stanford.edu>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _LINUX_PNPBIOS_H
+#define _LINUX_PNPBIOS_H
+
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <linux/pci.h>
+
+/*
+ * Status codes (warnings and errors)
+ */
+#define PNP_SUCCESS 0x00
+#define PNP_NOT_SET_STATICALLY 0x7f
+#define PNP_UNKNOWN_FUNCTION 0x81
+#define PNP_FUNCTION_NOT_SUPPORTED 0x82
+#define PNP_INVALID_HANDLE 0x83
+#define PNP_BAD_PARAMETER 0x84
+#define PNP_SET_FAILED 0x85
+#define PNP_EVENTS_NOT_PENDING 0x86
+#define PNP_SYSTEM_NOT_DOCKED 0x87
+#define PNP_NO_ISA_PNP_CARDS 0x88
+#define PNP_UNABLE_TO_DETERMINE_DOCK_CAPABILITIES 0x89
+#define PNP_CONFIG_CHANGE_FAILED_NO_BATTERY 0x8a
+#define PNP_CONFIG_CHANGE_FAILED_RESOURCE_CONFLICT 0x8b
+#define PNP_BUFFER_TOO_SMALL 0x8c
+#define PNP_USE_ESCD_SUPPORT 0x8d
+#define PNP_MESSAGE_NOT_SUPPORTED 0x8e
+#define PNP_HARDWARE_ERROR 0x8f
+
+#define ESCD_SUCCESS 0x00
+#define ESCD_IO_ERROR_READING 0x55
+#define ESCD_INVALID 0x56
+#define ESCD_BUFFER_TOO_SMALL 0x59
+#define ESCD_NVRAM_TOO_SMALL 0x5a
+#define ESCD_FUNCTION_NOT_SUPPORTED 0x81
+
+/*
+ * Events that can be received by "get event"
+ */
+#define PNPEV_ABOUT_TO_CHANGE_CONFIG 0x0001
+#define PNPEV_DOCK_CHANGED 0x0002
+#define PNPEV_SYSTEM_DEVICE_CHANGED 0x0003
+#define PNPEV_CONFIG_CHANGED_FAILED 0x0004
+#define PNPEV_UNKNOWN_SYSTEM_EVENT 0xffff
+/* 0x8000 through 0xfffe are OEM defined */
+
+/*
+ * Messages that should be sent through "send message"
+ */
+#define PNPMSG_OK 0x00
+#define PNPMSG_ABORT 0x01
+#define PNPMSG_UNDOCK_DEFAULT_ACTION 0x40
+#define PNPMSG_POWER_OFF 0x41
+#define PNPMSG_PNP_OS_ACTIVE 0x42
+#define PNPMSG_PNP_OS_INACTIVE 0x43
+/* 0x8000 through 0xffff are OEM defined */
+
+#pragma pack(1)
+struct pnp_dev_node_info {
+ __u16 no_nodes;
+ __u16 max_node_size;
+};
+struct pnp_docking_station_info {
+ __u32 location_id;
+ __u32 serial;
+ __u16 capabilities;
+};
+struct pnp_isa_config_struc {
+ __u8 revision;
+ __u8 no_csns;
+ __u16 isa_rd_data_port;
+ __u16 reserved;
+};
+struct escd_info_struc {
+ __u16 min_escd_write_size;
+ __u16 escd_size;
+ __u32 nv_storage_base;
+};
+struct pnp_bios_node {
+ __u16 size;
+ __u8 handle;
+ __u32 eisa_id;
+ __u8 type_code[3];
+ __u16 flags;
+ __u8 data[0];
+};
+#pragma pack()
+
+struct pnpbios_device_id
+{
+ char id[8];
+ unsigned long driver_data;
+};
+
+struct pnpbios_driver {
+ struct list_head node;
+ char *name;
+ const struct pnpbios_device_id *id_table; /* NULL if wants all devices */
+ int (*probe) (struct pci_dev *dev, const struct pnpbios_device_id *id); /* New device inserted */
+ void (*remove) (struct pci_dev *dev); /* Device removed, either due to hotplug remove or module remove */
+};
+
+#ifdef CONFIG_PNPBIOS
+
+/* exported */
+extern int pnpbios_register_driver(struct pnpbios_driver *drv);
+extern void pnpbios_unregister_driver(struct pnpbios_driver *drv);
+
+/* non-exported */
+#define pnpbios_for_each_dev(dev) \
+ for(dev = pnpbios_dev_g(pnpbios_devices.next); dev != pnpbios_dev_g(&pnpbios_devices); dev = pnpbios_dev_g(dev->global_list.next))
+
+
+#define pnpbios_dev_g(n) list_entry(n, struct pci_dev, global_list)
+
+static __inline struct pnpbios_driver *pnpbios_dev_driver(const struct pci_dev *dev)
+{
+ return (struct pnpbios_driver *)dev->driver;
+}
+
+extern int pnpbios_dont_use_current_config;
+extern void *pnpbios_kmalloc(size_t size, int f);
+extern void pnpbios_init (void);
+extern void pnpbios_proc_init (void);
+
+extern int pnp_bios_dev_node_info (struct pnp_dev_node_info *data);
+extern int pnp_bios_get_dev_node (u8 *nodenum, char config, struct pnp_bios_node *data);
+extern int pnp_bios_set_dev_node (u8 nodenum, char config, struct pnp_bios_node *data);
+#if needed
+extern int pnp_bios_get_event (u16 *message);
+extern int pnp_bios_send_message (u16 message);
+extern int pnp_bios_set_stat_res (char *info);
+extern int pnp_bios_get_stat_res (char *info);
+extern int pnp_bios_apm_id_table (char *table, u16 *size);
+extern int pnp_bios_isapnp_config (struct pnp_isa_config_struc *data);
+extern int pnp_bios_escd_info (struct escd_info_struc *data);
+extern int pnp_bios_read_escd (char *data, u32 nvram_base);
+extern int pnp_bios_write_escd (char *data, u32 nvram_base);
+#endif
+
+/*
+ * a helper function which helps ensure correct pnpbios_driver
+ * setup and cleanup for commonly-encountered hotplug/modular cases
+ *
+ * This MUST stay in a header, as it checks for -DMODULE
+ */
+
+static inline int pnpbios_module_init(struct pnpbios_driver *drv)
+{
+ int rc = pnpbios_register_driver (drv);
+
+ if (rc > 0)
+ return 0;
+
+ /* iff CONFIG_HOTPLUG and built into kernel, we should
+ * leave the driver around for future hotplug events.
+ * For the module case, a hotplug daemon of some sort
+ * should load a module in response to an insert event. */
+#if defined(CONFIG_HOTPLUG) && !defined(MODULE)
+ if (rc == 0)
+ return 0;
+#else
+ if (rc == 0)
+ rc = -ENODEV;
+#endif
+
+ /* if we get here, we need to clean up pci driver instance
+ * and return some sort of error */
+ pnpbios_unregister_driver (drv);
+
+ return rc;
+}
+
+#else /* CONFIG_PNPBIOS */
+
+static __inline__ int pnpbios_register_driver(struct pnpbios_driver *drv)
+{
+ return 0;
+}
+
+static __inline__ void pnpbios_unregister_driver(struct pnpbios_driver *drv)
+{
+ return;
+}
+
+#endif /* CONFIG_PNPBIOS */
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_PNPBIOS_H */
*/
- /* Vladimir, what is the story with
- new_get_new_buffer nowadays? I
- want a complete explanation written
- here. */
-
-/* NEW_GET_NEW_BUFFER will try to allocate new blocks better */
-/*#define NEW_GET_NEW_BUFFER*/
-#define OLD_GET_NEW_BUFFER
-
- /* Vladimir, what about this one too? */
-/* if this is undefined, all inode changes get into stat data immediately, if it can be found in RAM */
-#define DIRTY_LATER
-
-/* enable journalling */
-#define ENABLE_JOURNAL
-
#define USE_INODE_GENERATION_COUNTER
-
-#ifdef __KERNEL__
-
-/* #define REISERFS_CHECK */
-
#define REISERFS_PREALLOCATE
-#endif
#define PREALLOCATION_SIZE 8
-/* if this is undefined, all inode changes get into stat data
- immediately, if it can be found in RAM */
-#define DIRTY_LATER
-
-
-/*#define READ_LOCK_REISERFS*/
-
-
/* n must be power of 2 */
#define _ROUND_UP(x,n) (((x)+(n)-1u) & ~((n)-1u))
// to be ok for alpha and others we have to align structures to 8 byte
// boundary.
// FIXME: do not change 4 by anything else: there is code which relies on that
- /* what 4? -Hans */
#define ROUND_UP(x) _ROUND_UP(x,8LL)
/* debug levels. Right now, CONFIG_REISERFS_CHECK means print all debug
* Structure of super block on disk, a version of which in RAM is often accessed as s->u.reiserfs_sb.s_rs
* the version in RAM is part of a larger structure containing fields never written to disk.
*/
+#define UNSET_HASH 0 // read_super will guess about, what hash names
+ // in directories were sorted with
+#define TEA_HASH 1
+#define YURA_HASH 2
+#define R5_HASH 3
+#define DEFAULT_HASH R5_HASH
+
+
+struct journal_params {
+ __u32 jp_journal_1st_block; /* where does journal start from on its
+ * device */
+ __u32 jp_journal_dev; /* journal device st_rdev */
+ __u32 jp_journal_size; /* size of the journal */
+ __u32 jp_journal_trans_max; /* max number of blocks in a transaction. */
+ __u32 jp_journal_magic; /* random value made on fs creation (this
+ * was sb_journal_block_count) */
+ __u32 jp_journal_max_batch; /* max number of blocks to batch into a
+ * trans */
+ __u32 jp_journal_max_commit_age; /* in seconds, how old can an async
+ * commit be */
+ __u32 jp_journal_max_trans_age; /* in seconds, how old can a transaction
+ * be */
+};
+
+/* this is the super from 3.5.X, where X >= 10 */
+struct reiserfs_super_block_v1
+{
+ __u32 s_block_count; /* blocks count */
+ __u32 s_free_blocks; /* free blocks count */
+ __u32 s_root_block; /* root block number */
+ struct journal_params s_journal;
+ __u16 s_blocksize; /* block size */
+ __u16 s_oid_maxsize; /* max size of object id array, see
+ * get_objectid() commentary */
+ __u16 s_oid_cursize; /* current size of object id array */
+ __u16 s_umount_state; /* this is set to 1 when filesystem was
+ * umounted, to 2 - when not */
+ char s_magic[10]; /* reiserfs magic string indicates that
+ * file system is reiserfs:
+ * "ReIsErFs" or "ReIsEr2Fs" or "ReIsEr3Fs" */
+ __u16 s_fs_state; /* it is set to used by fsck to mark which
+ * phase of rebuilding is done */
+ __u32 s_hash_function_code; /* indicate, what hash function is being use
+ * to sort names in a directory*/
+ __u16 s_tree_height; /* height of disk tree */
+ __u16 s_bmap_nr; /* amount of bitmap blocks needed to address
+ * each block of file system */
+ __u16 s_version; /* this field is only reliable on filesystem
+ * with non-standard journal */
+ __u16 s_reserved_for_journal; /* size in blocks of journal area on main
+ * device, we need to keep after
+ * making fs with non-standard journal */
+} __attribute__ ((__packed__));
+
+#define SB_SIZE_V1 (sizeof(struct reiserfs_super_block_v1))
+
+/* this is the on disk super block */
+struct reiserfs_super_block
+{
+ struct reiserfs_super_block_v1 s_v1;
+ __u32 s_inode_generation;
+ __u32 s_flags; /* Right now used only by inode-attributes, if enabled */
+ unsigned char s_uuid[16]; /* filesystem unique identifier */
+ unsigned char s_label[16]; /* filesystem volume label */
+ char s_unused[88] ; /* zero filled by mkreiserfs and
+ * reiserfs_convert_objectid_map_v1()
+ * so any additions must be updated
+ * there as well. */
+} __attribute__ ((__packed__));
+
+#define SB_SIZE (sizeof(struct reiserfs_super_block))
+
+#define REISERFS_VERSION_1 0
+#define REISERFS_VERSION_2 2
+
+
+// on-disk super block fields converted to cpu form
+#define SB_DISK_SUPER_BLOCK(s) ((s)->u.reiserfs_sb.s_rs)
+#define SB_V1_DISK_SUPER_BLOCK(s) (&(SB_DISK_SUPER_BLOCK(s)->s_v1))
+#define SB_BLOCKSIZE(s) \
+ le32_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_blocksize))
+#define SB_BLOCK_COUNT(s) \
+ le32_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_block_count))
+#define SB_FREE_BLOCKS(s) \
+ le32_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_free_blocks))
+#define SB_REISERFS_MAGIC(s) \
+ (SB_V1_DISK_SUPER_BLOCK(s)->s_magic)
+#define SB_ROOT_BLOCK(s) \
+ le32_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_root_block))
+#define SB_TREE_HEIGHT(s) \
+ le16_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_tree_height))
+#define SB_REISERFS_STATE(s) \
+ le16_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_umount_state))
+#define SB_VERSION(s) le16_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_version))
+#define SB_BMAP_NR(s) le16_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_bmap_nr))
+
+#define PUT_SB_BLOCK_COUNT(s, val) \
+ do { SB_V1_DISK_SUPER_BLOCK(s)->s_block_count = cpu_to_le32(val); } while (0)
+#define PUT_SB_FREE_BLOCKS(s, val) \
+ do { SB_V1_DISK_SUPER_BLOCK(s)->s_free_blocks = cpu_to_le32(val); } while (0)
+#define PUT_SB_ROOT_BLOCK(s, val) \
+ do { SB_V1_DISK_SUPER_BLOCK(s)->s_root_block = cpu_to_le32(val); } while (0)
+#define PUT_SB_TREE_HEIGHT(s, val) \
+ do { SB_V1_DISK_SUPER_BLOCK(s)->s_tree_height = cpu_to_le16(val); } while (0)
+#define PUT_SB_REISERFS_STATE(s, val) \
+ do { SB_V1_DISK_SUPER_BLOCK(s)->s_umount_state = cpu_to_le16(val); } while (0)
+#define PUT_SB_VERSION(s, val) \
+ do { SB_V1_DISK_SUPER_BLOCK(s)->s_version = cpu_to_le16(val); } while (0)
+#define PUT_SB_BMAP_NR(s, val) \
+ do { SB_V1_DISK_SUPER_BLOCK(s)->s_bmap_nr = cpu_to_le16 (val); } while (0)
+
+
+#define SB_ONDISK_JP(s) (&SB_V1_DISK_SUPER_BLOCK(s)->s_journal)
+#define SB_ONDISK_JOURNAL_SIZE(s) \
+ le32_to_cpu ((SB_ONDISK_JP(s)->jp_journal_size))
+#define SB_ONDISK_JOURNAL_1st_BLOCK(s) \
+ le32_to_cpu ((SB_ONDISK_JP(s)->jp_journal_1st_block))
+#define SB_ONDISK_JOURNAL_DEVICE(s) \
+ le32_to_cpu ((SB_ONDISK_JP(s)->jp_journal_dev))
+#define SB_ONDISK_RESERVED_FOR_JOURNAL(s) \
+ le32_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_reserved_for_journal))
+
+#define is_block_in_log_or_reserved_area(s, block) \
+ block >= SB_JOURNAL_1st_RESERVED_BLOCK(s) \
+ && block < SB_JOURNAL_1st_RESERVED_BLOCK(s) + \
+ ((!is_reiserfs_jr(SB_DISK_SUPER_BLOCK(s)) ? \
+ SB_ONDISK_JOURNAL_SIZE(s) + 1 : SB_ONDISK_RESERVED_FOR_JOURNAL(s)))
+
+
/* used by gcc */
#define REISERFS_SUPER_MAGIC 0x52654973
look at the superblock, etc. */
#define REISERFS_SUPER_MAGIC_STRING "ReIsErFs"
#define REISER2FS_SUPER_MAGIC_STRING "ReIsEr2Fs"
+#define REISER2FS_JR_SUPER_MAGIC_STRING "ReIsEr3Fs"
-extern char reiserfs_super_magic_string[];
-extern char reiser2fs_super_magic_string[];
+extern const char reiserfs_3_5_magic_string[];
+extern const char reiserfs_3_6_magic_string[];
+extern const char reiserfs_jr_magic_string[];
-static inline int is_reiserfs_magic_string (const struct reiserfs_super_block * rs)
-{
- return (!strncmp (rs->s_magic, reiserfs_super_magic_string,
- strlen ( reiserfs_super_magic_string)) ||
- !strncmp (rs->s_magic, reiser2fs_super_magic_string,
- strlen ( reiser2fs_super_magic_string)));
-}
+int is_reiserfs_3_5 (struct reiserfs_super_block * rs);
+int is_reiserfs_3_6 (struct reiserfs_super_block * rs);
+int is_reiserfs_jr (struct reiserfs_super_block * rs);
- /* ReiserFS leaves the first 64k unused,
- so that partition labels have enough
- space. If someone wants to write a
- fancy bootloader that needs more than
- 64k, let us know, and this will be
- increased in size. This number must
- be larger than than the largest block
- size on any platform, or code will
- break. -Hans */
+/* ReiserFS leaves the first 64k unused, so that partition labels have
+ enough space. If someone wants to write a fancy bootloader that
+ needs more than 64k, let us know, and this will be increased in size.
+ This number must be larger than than the largest block size on any
+ platform, or code will break. -Hans */
#define REISERFS_DISK_OFFSET_IN_BYTES (64 * 1024)
#define REISERFS_FIRST_BLOCK unused_define
+#define REISERFS_JOURNAL_OFFSET_IN_BYTES REISERFS_DISK_OFFSET_IN_BYTES
/* the spot for the super in versions 3.5 - 3.5.10 (inclusive) */
#define REISERFS_OLD_DISK_OFFSET_IN_BYTES (8 * 1024)
-
// reiserfs internal error code (used by search_by_key adn fix_nodes))
#define CARRY_ON 0
#define REPEAT_SEARCH -1
#define NO_BALANCING_NEEDED (-4)
#define NO_MORE_UNUSED_CONTIGUOUS_BLOCKS (-5)
-//#define SCHEDULE_OCCURRED 1
-//#define PATH_INCORRECT 2
-
-//#define NO_DISK_SPACE (-1)
-
-
-
typedef unsigned long b_blocknr_t;
typedef __u32 unp_t;
- /* who is responsible for this
- completely uncommented struct? */
struct unfm_nodeinfo {
- /* This is what? */
unp_t unfm_nodenum;
- /* now this I know what it is, and
- most of the people on our project
- know what it is, but I bet nobody
- new I hire will have a clue. */
unsigned short unfm_freespace;
};
+/* there are two formats of keys: 3.5 and 3.6
+ */
+#define KEY_FORMAT_3_5 0
+#define KEY_FORMAT_3_6 1
+
+/* there are two stat datas */
+#define STAT_DATA_V1 0
+#define STAT_DATA_V2 1
-/* when reiserfs_file_write is called with a byte count >= MIN_PACK_ON_CLOSE,
-** it sets the inode to pack on close, and when extending the file, will only
-** use unformatted nodes.
-**
-** This is a big speed up for the journal, which is badly hurt by direct->indirect
-** conversions (they must be logged).
-*/
-#define MIN_PACK_ON_CLOSE 512
static inline struct reiserfs_inode_info *REISERFS_I(struct inode *inode)
{
return list_entry(inode, struct reiserfs_inode_info, vfs_inode);
}
-// this says about version of all items (but stat data) the object
-// consists of
-#define inode_items_version(inode) (REISERFS_I(inode)->i_version)
-
-
- /* This is an aggressive tail suppression policy, I am hoping it
- improves our benchmarks. The principle behind it is that
- percentage space saving is what matters, not absolute space
- saving. This is non-intuitive, but it helps to understand it if
- you consider that the cost to access 4 blocks is not much more
- than the cost to access 1 block, if you have to do a seek and
- rotate. A tail risks a non-linear disk access that is
- significant as a percentage of total time cost for a 4 block file
- and saves an amount of space that is less significant as a
- percentage of space, or so goes the hypothesis. -Hans */
+/** this says about version of key of all items (but stat data) the
+ object consists of */
+#define get_inode_item_key_version( inode ) \
+ ((REISERFS_I(inode)->i_flags & i_item_key_version_mask) ? KEY_FORMAT_3_6 : KEY_FORMAT_3_5)
+
+#define set_inode_item_key_version( inode, version ) \
+ ({ if((version)==KEY_FORMAT_3_6) \
+ REISERFS_I(inode)->i_flags |= i_item_key_version_mask; \
+ else \
+ REISERFS_I(inode)->i_flags &= ~i_item_key_version_mask; })
+
+#define get_inode_sd_version(inode) \
+ ((REISERFS_I(inode)->i_flags & i_stat_data_version_mask) ? STAT_DATA_V2 : STAT_DATA_V1)
+
+#define set_inode_sd_version(inode, version) \
+ ({ if((version)==STAT_DATA_V2) \
+ REISERFS_I(inode)->i_flags |= i_stat_data_version_mask; \
+ else \
+ REISERFS_I(inode)->i_flags &= ~i_stat_data_version_mask; })
+
+/* This is an aggressive tail suppression policy, I am hoping it
+ improves our benchmarks. The principle behind it is that percentage
+ space saving is what matters, not absolute space saving. This is
+ non-intuitive, but it helps to understand it if you consider that the
+ cost to access 4 blocks is not much more than the cost to access 1
+ block, if you have to do a seek and rotate. A tail risks a
+ non-linear disk access that is significant as a percentage of total
+ time cost for a 4 block file and saves an amount of space that is
+ less significant as a percentage of space, or so goes the hypothesis.
+ -Hans */
#define STORE_TAIL_IN_UNFM(n_file_size,n_tail_size,n_block_size) \
(\
(!(n_tail_size)) || \
/*
- * values for s_state field
+ * values for s_umount_state field
*/
#define REISERFS_VALID_FS 1
#define REISERFS_ERROR_FS 2
/* KEY & ITEM HEAD */
/***************************************************************************/
-//
-// we do support for old format of reiserfs: the problem is to
-// distinuquish keys with 32 bit offset and keys with 60 bit ones. On
-// leaf level we use ih_version of struct item_head (was
-// ih_reserved). For all old items it is set to 0
-// (ITEM_VERSION_1). For new items it is ITEM_VERSION_2. On internal
-// levels we have to know version of item key belongs to.
-//
-#define ITEM_VERSION_1 0
-#define ITEM_VERSION_2 1
-
-
-/* loff_t - long long */
-
-
//
// directories use this key as well as old files
//
__u64 linear;
} __attribute__ ((__packed__)) offset_v2_esafe_overlay;
-static inline __u16 offset_v2_k_type( struct offset_v2 *v2 )
+static inline __u16 offset_v2_k_type( const struct offset_v2 *v2 )
{
- offset_v2_esafe_overlay tmp = *(offset_v2_esafe_overlay *)v2;
+ offset_v2_esafe_overlay tmp = *(const offset_v2_esafe_overlay *)v2;
tmp.linear = le64_to_cpu( tmp.linear );
return tmp.offset_v2.k_type;
}
tmp->linear = le64_to_cpu(tmp->linear);
}
-static inline loff_t offset_v2_k_offset( struct offset_v2 *v2 )
+static inline loff_t offset_v2_k_offset( const struct offset_v2 *v2 )
{
- offset_v2_esafe_overlay tmp = *(offset_v2_esafe_overlay *)v2;
+ offset_v2_esafe_overlay tmp = *(const offset_v2_esafe_overlay *)v2;
tmp.linear = le64_to_cpu( tmp.linear );
return tmp.offset_v2.k_offset;
}
indirect2direct conversion */
};
-
-
-
-
-
-
- /* Our function for comparing keys can compare keys of different
- lengths. It takes as a parameter the length of the keys it is to
- compare. These defines are used in determining what is to be
- passed to it as that parameter. */
+/* Our function for comparing keys can compare keys of different
+ lengths. It takes as a parameter the length of the keys it is to
+ compare. These defines are used in determining what is to be passed
+ to it as that parameter. */
#define REISERFS_FULL_KEY_LEN 4
-
#define REISERFS_SHORT_KEY_LEN 2
/* The result of the key compare */
#define KEY_FOUND 1
#define KEY_NOT_FOUND 0
-
#define KEY_SIZE (sizeof(struct key))
#define SHORT_KEY_SIZE (sizeof (__u32) + sizeof (__u32))
#define GOTO_PREVIOUS_ITEM 2
#define NAME_FOUND_INVISIBLE 3
-
-
/* Everything in the filesystem is stored as a set of items. The
item head contains the key of the item, its free space (for
indirect items) and specifies the location of the item itself
struct item_head
{
- struct key ih_key; /* Everything in the tree is found by searching for it based on its key.*/
-
- /* This is bloat, this should be part
- of the item not the item
- header. -Hans */
- union {
- __u16 ih_free_space_reserved; /* The free space in the last unformatted node of an indirect item if this
- is an indirect item. This equals 0xFFFF iff this is a direct item or
- stat data item. Note that the key, not this field, is used to determine
- the item type, and thus which field this union contains. */
- __u16 ih_entry_count; /* Iff this is a directory item, this field equals the number of directory
- entries in the directory item. */
- } __attribute__ ((__packed__)) u;
- __u16 ih_item_len; /* total size of the item body */
- __u16 ih_item_location; /* an offset to the item body within the block */
- /* I thought we were going to use this
- for having lots of item types? Why
- don't you use this for item type
- not item version. That is how you
- talked me into this field a year
- ago, remember? I am still not
- convinced it needs to be 16 bits
- (for at least many years), but at
- least I can sympathize with that
- hope. Change the name from version
- to type, and tell people not to use
- FFFF in case 16 bits is someday too
- small and needs to be extended:-). */
- __u16 ih_version; /* 0 for all old items, 2 for new
- ones. Highest bit is set by fsck
- temporary, cleaned after all done */
+ /* Everything in the tree is found by searching for it based on
+ * its key.*/
+ struct key ih_key;
+ union {
+ /* The free space in the last unformatted node of an
+ indirect item if this is an indirect item. This
+ equals 0xFFFF iff this is a direct item or stat data
+ item. Note that the key, not this field, is used to
+ determine the item type, and thus which field this
+ union contains. */
+ __u16 ih_free_space_reserved;
+ /* Iff this is a directory item, this field equals the
+ number of directory entries in the directory item. */
+ __u16 ih_entry_count;
+ } __attribute__ ((__packed__)) u;
+ __u16 ih_item_len; /* total size of the item body */
+ __u16 ih_item_location; /* an offset to the item body
+ * within the block */
+ __u16 ih_version; /* 0 for all old items, 2 for new
+ ones. Highest bit is set by fsck
+ temporary, cleaned after all
+ done */
} __attribute__ ((__packed__));
/* size of item header */
#define IH_SIZE (sizeof(struct item_head))
#define unreachable_item(ih) (ih_version(ih) & (1 << 15))
-#define get_ih_free_space(ih) (ih_version (ih) == ITEM_VERSION_2 ? 0 : ih_free_space (ih))
-#define set_ih_free_space(ih,val) put_ih_free_space((ih), ((ih_version(ih) == ITEM_VERSION_2) ? 0 : (val)))
+#define get_ih_free_space(ih) (ih_version (ih) == KEY_FORMAT_3_6 ? 0 : ih_free_space (ih))
+#define set_ih_free_space(ih,val) put_ih_free_space((ih), ((ih_version(ih) == KEY_FORMAT_3_6) ? 0 : (val)))
/* these operate on indirect items, where you've got an array of ints
** at a possibly unaligned location. These are a noop on ia32
#define V1_DIRENTRY_UNIQUENESS 500
#define V1_ANY_UNIQUENESS 555 // FIXME: comment is required
+extern void reiserfs_warning (const char * fmt, ...);
+/* __attribute__( ( format ( printf, 1, 2 ) ) ); */
+
//
// here are conversion routines
//
case V1_INDIRECT_UNIQUENESS: return TYPE_INDIRECT;
case V1_DIRECT_UNIQUENESS: return TYPE_DIRECT;
case V1_DIRENTRY_UNIQUENESS: return TYPE_DIRENTRY;
+ default:
+ reiserfs_warning( "vs-500: unknown uniqueness %d\n", uniqueness);
+ case V1_ANY_UNIQUENESS:
+ return TYPE_ANY;
}
-/*
- if (uniqueness != V1_ANY_UNIQUENESS) {
- printk ("uniqueness %d\n", uniqueness);
- BUG ();
- }
-*/
- return TYPE_ANY;
}
static inline __u32 type2uniqueness (int type) CONSTF;
case TYPE_INDIRECT: return V1_INDIRECT_UNIQUENESS;
case TYPE_DIRECT: return V1_DIRECT_UNIQUENESS;
case TYPE_DIRENTRY: return V1_DIRENTRY_UNIQUENESS;
+ default:
+ reiserfs_warning( "vs-501: unknown type %d\n", type);
+ case TYPE_ANY:
+ return V1_ANY_UNIQUENESS;
}
- /*
- if (type != TYPE_ANY)
- BUG ();
- */
- return V1_ANY_UNIQUENESS;
}
-
//
// key is pointer to on disk key which is stored in le, result is cpu,
// there is no way to get version of object from key, so, provide
//
static inline loff_t le_key_k_offset (int version, const struct key * key)
{
- return (version == ITEM_VERSION_1) ?
+ return (version == KEY_FORMAT_3_5) ?
le32_to_cpu( key->u.k_offset_v1.k_offset ) :
offset_v2_k_offset( &(key->u.k_offset_v2) );
}
static inline loff_t le_key_k_type (int version, const struct key * key)
{
- return (version == ITEM_VERSION_1) ?
+ return (version == KEY_FORMAT_3_5) ?
uniqueness2type( le32_to_cpu( key->u.k_offset_v1.k_uniqueness)) :
offset_v2_k_type( &(key->u.k_offset_v2) );
}
static inline void set_le_key_k_offset (int version, struct key * key, loff_t offset)
{
- (version == ITEM_VERSION_1) ?
+ (version == KEY_FORMAT_3_5) ?
(key->u.k_offset_v1.k_offset = cpu_to_le32 (offset)) : /* jdm check */
(set_offset_v2_k_offset( &(key->u.k_offset_v2), offset ));
}
+
+
static inline void set_le_ih_k_offset (struct item_head * ih, loff_t offset)
{
set_le_key_k_offset (ih_version (ih), &(ih->ih_key), offset);
}
-
static inline void set_le_key_k_type (int version, struct key * key, int type)
{
- (version == ITEM_VERSION_1) ?
+ (version == KEY_FORMAT_3_5) ?
(key->u.k_offset_v1.k_uniqueness = cpu_to_le32(type2uniqueness(type))):
(set_offset_v2_k_type( &(key->u.k_offset_v2), type ));
}
//
static inline loff_t cpu_key_k_offset (const struct cpu_key * key)
{
- return (key->version == ITEM_VERSION_1) ?
+ return (key->version == KEY_FORMAT_3_5) ?
key->on_disk_key.u.k_offset_v1.k_offset :
key->on_disk_key.u.k_offset_v2.k_offset;
}
static inline loff_t cpu_key_k_type (const struct cpu_key * key)
{
- return (key->version == ITEM_VERSION_1) ?
+ return (key->version == KEY_FORMAT_3_5) ?
uniqueness2type (key->on_disk_key.u.k_offset_v1.k_uniqueness) :
key->on_disk_key.u.k_offset_v2.k_type;
}
static inline void set_cpu_key_k_offset (struct cpu_key * key, loff_t offset)
{
- (key->version == ITEM_VERSION_1) ?
+ (key->version == KEY_FORMAT_3_5) ?
(key->on_disk_key.u.k_offset_v1.k_offset = offset) :
(key->on_disk_key.u.k_offset_v2.k_offset = offset);
}
static inline void set_cpu_key_k_type (struct cpu_key * key, int type)
{
- (key->version == ITEM_VERSION_1) ?
+ (key->version == KEY_FORMAT_3_5) ?
(key->on_disk_key.u.k_offset_v1.k_uniqueness = type2uniqueness (type)):
(key->on_disk_key.u.k_offset_v2.k_type = type);
}
+
static inline void cpu_key_k_offset_dec (struct cpu_key * key)
{
- if (key->version == ITEM_VERSION_1)
+ if (key->version == KEY_FORMAT_3_5)
key->on_disk_key.u.k_offset_v1.k_offset --;
else
key->on_disk_key.u.k_offset_v2.k_offset --;
} __attribute__ ((__packed__));
#define SD_V1_SIZE (sizeof(struct stat_data_v1))
-#define stat_data_v1(ih) (ih_version (ih) == ITEM_VERSION_1)
+#define stat_data_v1(ih) (ih_version (ih) == KEY_FORMAT_3_5)
#define sd_v1_mode(sdp) (le16_to_cpu((sdp)->sd_mode))
#define set_sd_v1_mode(sdp,v) ((sdp)->sd_mode = cpu_to_le16(v))
#define sd_v1_nlink(sdp) (le16_to_cpu((sdp)->sd_nlink))
} __attribute__ ((__packed__)) u;
} __attribute__ ((__packed__));
//
-// this is 40 bytes long
+// this is 44 bytes long
//
#define SD_SIZE (sizeof(struct stat_data))
#define SD_V2_SIZE SD_SIZE
-#define stat_data_v2(ih) (ih_version (ih) == ITEM_VERSION_2)
+#define stat_data_v2(ih) (ih_version (ih) == KEY_FORMAT_3_6)
#define sd_v2_mode(sdp) (le16_to_cpu((sdp)->sd_mode))
#define set_sd_v2_mode(sdp,v) ((sdp)->sd_mode = cpu_to_le16(v))
/* sd_reserved */
#define de_visible(deh) test_bit_unaligned (DEH_Visible, &((deh)->deh_state))
#define de_hidden(deh) !test_bit_unaligned (DEH_Visible, &((deh)->deh_state))
-/* compose directory item containing "." and ".." entries (entries are
- not aligned to 4 byte boundary) */
-/* the last four params are LE */
-static inline void make_empty_dir_item_v1 (char * body,
- __u32 dirid, __u32 objid,
- __u32 par_dirid, __u32 par_objid)
-{
- struct reiserfs_de_head * deh;
-
- memset (body, 0, EMPTY_DIR_SIZE_V1);
- deh = (struct reiserfs_de_head *)body;
-
- /* direntry header of "." */
- put_deh_offset( &(deh[0]), DOT_OFFSET );
- /* these two are from make_le_item_head, and are are LE */
- deh[0].deh_dir_id = dirid;
- deh[0].deh_objectid = objid;
- deh[0].deh_state = 0; /* Endian safe if 0 */
- put_deh_location( &(deh[0]), EMPTY_DIR_SIZE_V1 - strlen( "." ));
- mark_de_visible(&(deh[0]));
-
- /* direntry header of ".." */
- put_deh_offset( &(deh[1]), DOT_DOT_OFFSET);
- /* key of ".." for the root directory */
- /* these two are from the inode, and are are LE */
- deh[1].deh_dir_id = par_dirid;
- deh[1].deh_objectid = par_objid;
- deh[1].deh_state = 0; /* Endian safe if 0 */
- put_deh_location( &(deh[1]), deh_location( &(deh[0]) ) - strlen( ".." ) );
- mark_de_visible(&(deh[1]));
-
- /* copy ".." and "." */
- memcpy (body + deh_location( &(deh[0]) ), ".", 1);
- memcpy (body + deh_location( &(deh[1]) ), "..", 2);
-}
-
-/* compose directory item containing "." and ".." entries */
-static inline void make_empty_dir_item (char * body,
- __u32 dirid, __u32 objid,
- __u32 par_dirid, __u32 par_objid)
-{
- struct reiserfs_de_head * deh;
-
- memset (body, 0, EMPTY_DIR_SIZE);
- deh = (struct reiserfs_de_head *)body;
-
- /* direntry header of "." */
- put_deh_offset( &(deh[0]), DOT_OFFSET );
- /* these two are from make_le_item_head, and are are LE */
- deh[0].deh_dir_id = dirid;
- deh[0].deh_objectid = objid;
- deh[0].deh_state = 0; /* Endian safe if 0 */
- put_deh_location( &(deh[0]), EMPTY_DIR_SIZE - ROUND_UP( strlen( "." ) ) );
- mark_de_visible(&(deh[0]));
-
- /* direntry header of ".." */
- put_deh_offset( &(deh[1]), DOT_DOT_OFFSET );
- /* key of ".." for the root directory */
- /* these two are from the inode, and are are LE */
- deh[1].deh_dir_id = par_dirid;
- deh[1].deh_objectid = par_objid;
- deh[1].deh_state = 0; /* Endian safe if 0 */
- put_deh_location( &(deh[1]), deh_location( &(deh[0])) - ROUND_UP( strlen( ".." ) ) );
- mark_de_visible(&(deh[1]));
-
- /* copy ".." and "." */
- memcpy (body + deh_location( &(deh[0]) ), ".", 1);
- memcpy (body + deh_location( &(deh[1]) ), "..", 2);
-}
-
+extern void make_empty_dir_item_v1 (char * body, __u32 dirid, __u32 objid,
+ __u32 par_dirid, __u32 par_objid);
+extern void make_empty_dir_item (char * body, __u32 dirid, __u32 objid,
+ __u32 par_dirid, __u32 par_objid);
/* array of the entry headers */
/* get item body */
// two entries per block (at least)
//#define REISERFS_MAX_NAME_LEN(block_size)
//((block_size - BLKH_SIZE - IH_SIZE - DEH_SIZE * 2) / 2)
-
-// two entries per block (at least)
#define REISERFS_MAX_NAME_LEN(block_size) 255
-
-
/* this structure is used for operations on directory entries. It is
not a disk structure. */
/* When reiserfs_find_entry or search_by_entry_key find directory
// in in-core inode key is stored on le form
#define INODE_PKEY(inode) ((struct key *)(REISERFS_I(inode)->i_key))
-//#define mark_tail_converted(inode) (atomic_set(&(REISERFS_I(inode)->i_converted),1))
-//#define unmark_tail_converted(inode) (REISERFS_I(inode)->i_converted), 0))
-//#define is_tail_converted(inode) (REISERFS_I(inode)->i_converted)))
-
-
#define MAX_UL_INT 0xffffffff
#define MAX_INT 0x7ffffff
#define MAX_US_INT 0xffff
-///#define TOO_LONG_LENGTH (~0ULL)
-
// reiserfs version 2 has max offset 60 bits. Version 1 - 32 bit offset
#define U32_MAX (~(__u32)0)
+
static inline loff_t max_reiserfs_offset (struct inode * inode)
{
- if (inode_items_version (inode) == ITEM_VERSION_1)
+ if (get_inode_item_key_version(inode) == KEY_FORMAT_3_5)
return (loff_t)U32_MAX;
return (loff_t)((~(__u64)0) >> 4);
/* FIXATE NODES */
/***************************************************************************/
-//#define VI_TYPE_STAT_DATA 1
-//#define VI_TYPE_DIRECT 2
-//#define VI_TYPE_INDIRECT 4
-//#define VI_TYPE_DIRECTORY 8
-//#define VI_TYPE_FIRST_DIRECTORY_ITEM 16
-//#define VI_TYPE_INSERTED_DIRECTORY_ITEM 32
-
#define VI_TYPE_LEFT_MERGEABLE 1
#define VI_TYPE_RIGHT_MERGEABLE 2
#define COMP_KEYS comp_keys
#define COMP_SHORT_KEYS comp_short_keys
-#define keys_of_same_object comp_short_keys
-
-/*#define COMP_KEYS(p_s_key1, p_s_key2) comp_keys((unsigned long *)(p_s_key1), (unsigned long *)(p_s_key2))
-#define COMP_SHORT_KEYS(p_s_key1, p_s_key2) comp_short_keys((unsigned long *)(p_s_key1), (unsigned long *)(p_s_key2))*/
-
+/*#define keys_of_same_object comp_short_keys*/
/* number of blocks pointed to by the indirect item */
#define I_UNFM_NUM(p_s_ih) ( ih_item_len(p_s_ih) / UNFM_P_SIZE )
__u32 j_last_flush_trans_id ; /* id of last fully flushed transaction */
__u32 j_first_unflushed_offset ; /* offset in the log of where to start replay after a crash */
__u32 j_mount_id ;
+ /* 12 */ struct journal_params jh_journal;
} ;
extern task_queue reiserfs_commit_thread_tq ;
/* biggest tunable defines are right here */
#define JOURNAL_BLOCK_COUNT 8192 /* number of blocks in the journal */
-#define JOURNAL_MAX_BATCH 900 /* max blocks to batch into one transaction, don't make this any bigger than 900 */
+#define JOURNAL_TRANS_MAX_DEFAULT 1024 /* biggest possible single transaction, don't change for now (8/3/99) */
+#define JOURNAL_TRANS_MIN_DEFAULT 256
+#define JOURNAL_MAX_BATCH_DEFAULT 900 /* max blocks to batch into one transaction, don't make this any bigger than 900 */
+#define JOURNAL_MIN_RATIO 2
#define JOURNAL_MAX_COMMIT_AGE 30
#define JOURNAL_MAX_TRANS_AGE 30
#define JOURNAL_PER_BALANCE_CNT (3 * (MAX_HEIGHT-2) + 9)
void reiserfs_check_lock_depth(char *caller) ;
void reiserfs_prepare_for_journal(struct super_block *, struct buffer_head *bh, int wait) ;
void reiserfs_restore_prepared_buffer(struct super_block *, struct buffer_head *bh) ;
-int journal_init(struct super_block *) ;
+int journal_init(struct super_block *, const char * j_dev_name, int old_format) ;
int journal_release(struct reiserfs_transaction_handle*, struct super_block *) ;
int journal_release_error(struct reiserfs_transaction_handle*, struct super_block *) ;
int journal_end(struct reiserfs_transaction_handle *, struct super_block *, unsigned long) ;
int journal_mark_freed(struct reiserfs_transaction_handle *, struct super_block *, unsigned long blocknr) ;
int push_journal_writer(char *w) ;
int pop_journal_writer(int windex) ;
-int journal_lock_dobalance(struct super_block *p_s_sb) ;
-int journal_unlock_dobalance(struct super_block *p_s_sb) ;
int journal_transaction_should_end(struct reiserfs_transaction_handle *, int) ;
int reiserfs_in_journal(struct super_block *p_s_sb, unsigned long bl, int searchall, unsigned long *next) ;
int journal_begin(struct reiserfs_transaction_handle *, struct super_block *p_s_sb, unsigned long) ;
-int journal_join(struct reiserfs_transaction_handle *, struct super_block *p_s_sb, unsigned long) ;
struct super_block *reiserfs_get_super(kdev_t dev) ;
void flush_async_commits(struct super_block *p_s_sb) ;
-int remove_from_transaction(struct super_block *p_s_sb, unsigned long blocknr, int already_cleaned) ;
-
int buffer_journaled(const struct buffer_head *bh) ;
int mark_buffer_journal_new(struct buffer_head *bh) ;
int reiserfs_sync_all_buffers(kdev_t dev, int wait) ;
return 0 ;
}
+void add_save_link (struct reiserfs_transaction_handle * th,
+ struct inode * inode, int truncate);
+void remove_save_link (struct inode * inode, int truncate);
+
/* objectid.c */
__u32 reiserfs_get_unused_objectid (struct reiserfs_transaction_handle *th);
void reiserfs_release_objectid (struct reiserfs_transaction_handle *th, __u32 objectid_to_release);
type = offset_v2_k_type( &(key->u.k_offset_v2));
if (type != TYPE_DIRECT && type != TYPE_INDIRECT && type != TYPE_DIRENTRY)
- return ITEM_VERSION_1;
+ return KEY_FORMAT_3_5;
- return ITEM_VERSION_2;
+ return KEY_FORMAT_3_6;
}
static inline void copy_key (struct key *to, const struct key *from)
{
- memcpy (to, from, KEY_SIZE);
+ memcpy (to, from, KEY_SIZE);
}
struct inode * inode,
struct buffer_head * p_s_un_bh);
-
+void reiserfs_delete_solid_item (struct reiserfs_transaction_handle *th,
+ struct key * key);
void reiserfs_delete_object (struct reiserfs_transaction_handle *th, struct inode * p_s_inode);
void reiserfs_do_truncate (struct reiserfs_transaction_handle *th,
struct inode * p_s_inode, struct page *,
int update_timestamps);
-//
-//void lock_inode_to_convert (struct inode * p_s_inode);
-//void unlock_inode_after_convert (struct inode * p_s_inode);
-//void increment_i_read_sync_counter (struct inode * p_s_inode);
-//void decrement_i_read_sync_counter (struct inode * p_s_inode);
-
#define i_block_size(inode) ((inode)->i_sb->s_blocksize)
#define file_size(inode) ((inode)->i_size)
#define tail_has_to_be_packed(inode) (!dont_have_tails ((inode)->i_sb) &&\
!STORE_TAIL_IN_UNFM(file_size (inode), tail_size(inode), i_block_size (inode)))
-/*
-int get_buffer_by_range (struct super_block * p_s_sb, struct key * p_s_range_begin, struct key * p_s_range_end,
- struct buffer_head ** pp_s_buf, unsigned long * p_n_objectid);
-int get_buffers_from_range (struct super_block * p_s_sb, struct key * p_s_range_start, struct key * p_s_range_end,
- struct buffer_head ** p_s_range_buffers,
- int n_max_nr_buffers_to_return);
-*/
-
void padd_item (char * item, int total_length, int length);
-
/* inode.c */
+void reiserfs_read_inode (struct inode * inode) ;
+void reiserfs_read_inode2(struct inode * inode, void *p) ;
+void reiserfs_delete_inode (struct inode * inode);
+void reiserfs_write_inode (struct inode * inode, int) ;
+struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, __u32 *data,
+ int len, int fhtype, int parent);
+int reiserfs_dentry_to_fh(struct dentry *dentry, __u32 *data, int *lenp, int need_parent);
+
int reiserfs_prepare_write(struct file *, struct page *, unsigned, unsigned) ;
void reiserfs_truncate_file(struct inode *, int update_timestamps) ;
void make_cpu_key (struct cpu_key * cpu_key, struct inode * inode, loff_t offset,
void make_le_item_head (struct item_head * ih, const struct cpu_key * key,
int version,
loff_t offset, int type, int length, int entry_count);
-/*void store_key (struct key * key);
-void forget_key (struct key * key);*/
-int reiserfs_get_block (struct inode * inode, sector_t block,
- struct buffer_head * bh_result, int create);
struct inode * reiserfs_iget (struct super_block * s,
const struct cpu_key * key);
-void reiserfs_read_inode (struct inode * inode) ;
-void reiserfs_read_inode2(struct inode * inode, void *p) ;
-void reiserfs_delete_inode (struct inode * inode);
-extern int reiserfs_notify_change(struct dentry * dentry, struct iattr * attr);
-void reiserfs_write_inode (struct inode * inode, int) ;
-/* nfsd support functions */
-struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, __u32 *fh, int len, int fhtype, int parent);
-int reiserfs_dentry_to_fh(struct dentry *, __u32 *fh, int *lenp, int need_parent);
-
-/* we don't mark inodes dirty, we just log them */
-void reiserfs_dirty_inode (struct inode * inode) ;
struct inode * reiserfs_new_inode (struct reiserfs_transaction_handle *th,
struct inode * dir, int mode,
struct dentry *dentry, struct inode *inode, int * err);
int reiserfs_sync_inode (struct reiserfs_transaction_handle *th, struct inode * inode);
void reiserfs_update_sd (struct reiserfs_transaction_handle *th, struct inode * inode);
-int reiserfs_inode_setattr(struct dentry *, struct iattr * attr);
/* namei.c */
inline void set_de_name_and_namelen (struct reiserfs_dir_entry * de);
int search_by_entry_key (struct super_block * sb, const struct cpu_key * key,
struct path * path,
struct reiserfs_dir_entry * de);
-struct dentry * reiserfs_lookup (struct inode * dir, struct dentry *dentry);
-int reiserfs_create (struct inode * dir, struct dentry *dentry, int mode);
-int reiserfs_mknod (struct inode * dir_inode, struct dentry *dentry, int mode, int rdev);
-int reiserfs_mkdir (struct inode * dir, struct dentry *dentry, int mode);
-int reiserfs_rmdir (struct inode * dir, struct dentry *dentry);
-int reiserfs_unlink (struct inode * dir, struct dentry *dentry);
-int reiserfs_symlink (struct inode * dir, struct dentry *dentry, const char * symname);
-int reiserfs_link (struct dentry * old_dentry, struct inode * dir, struct dentry *dentry);
-int reiserfs_rename (struct inode * old_dir, struct dentry *old_dentry, struct inode * new_dir, struct dentry *new_dentry);
-
-/* super.c */
-inline void reiserfs_mark_buffer_dirty (struct buffer_head * bh, int flag);
-inline void reiserfs_mark_buffer_clean (struct buffer_head * bh);
-void reiserfs_write_super (struct super_block * s);
-void reiserfs_put_super (struct super_block * s);
-int reiserfs_remount (struct super_block * s, int * flags, char * data);
-/*int read_super_block (struct super_block * s, int size);
-int read_bitmaps (struct super_block * s);
-int read_old_bitmaps (struct super_block * s);
-int read_old_super_block (struct super_block * s, int size);*/
-struct super_block * reiserfs_read_super (struct super_block * s, void * data, int silent);
-int reiserfs_statfs (struct super_block * s, struct statfs * buf);
-
/* procfs.c */
#if defined( CONFIG_PROC_FS ) && defined( CONFIG_REISERFS_PROC_INFO )
/* prints.c */
void reiserfs_panic (struct super_block * s, const char * fmt, ...)
__attribute__ ( ( noreturn ) );/* __attribute__( ( format ( printf, 2, 3 ) ) ) */
-void reiserfs_warning (const char * fmt, ...);
-/* __attribute__( ( format ( printf, 1, 2 ) ) ); */
void reiserfs_debug (struct super_block *s, int level, const char * fmt, ...);
/* __attribute__( ( format ( printf, 3, 4 ) ) ); */
void print_virtual_node (struct virtual_node * vn);
__u32 yura_hash (const signed char *msg, int len);
__u32 r5_hash (const signed char *msg, int len);
-/* version.c */
-const char *reiserfs_get_version_string(void) CONSTF;
-
/* the ext2 bit routines adjust for big or little endian as
** appropriate for the arch, so in our laziness we use them rather
** than using the bit routines they call more directly. These
#define reiserfs_test_le_bit ext2_test_bit
#define reiserfs_find_next_zero_le_bit ext2_find_next_zero_bit
-
-//
-// this was totally copied from from linux's
-// find_first_zero_bit and changed a bit
-//
-
-#ifdef __i386__
-
-static __inline__ int
-find_first_nonzero_bit(const void * addr, unsigned size) {
- int res;
- int __d0;
- void *__d1;
-
-
- if (!size) {
- return (0);
- }
- __asm__ __volatile__ (
- "cld\n\t"
- "xorl %%eax,%%eax\n\t"
- "repe; scasl\n\t"
- "je 1f\n\t"
- "movl -4(%%edi),%%eax\n\t"
- "subl $4, %%edi\n\t"
- "bsfl %%eax,%%eax\n\t"
- "1:\tsubl %%edx,%%edi\n\t"
- "shll $3,%%edi\n\t"
- "addl %%edi,%%eax"
- :"=a" (res),
- "=c"(__d0), "=D"(__d1)
- :"1" ((size + 31) >> 5), "d" (addr), "2" (addr));
- return (res);
-}
-
-#else /* __i386__ */
-
-static __inline__ int find_next_nonzero_bit(const void * addr, unsigned size,
- unsigned offset)
-{
- unsigned int * p = ((unsigned int *) addr) + (offset >> 5);
- unsigned int result = offset & ~31UL;
- unsigned int tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 31UL;
- if (offset) {
- tmp = *p++;
- /* set to zero first offset bits */
- tmp &= ~(~0UL >> (32-offset));
- if (size < 32)
- goto found_first;
- if (tmp != 0U)
- goto found_middle;
- size -= 32;
- result += 32;
- }
- while (size >= 32) {
- if ((tmp = *p++) != 0U)
- goto found_middle;
- result += 32;
- size -= 32;
- }
- if (!size)
- return result;
- tmp = *p;
-found_first:
-found_middle:
- return result + ffs(tmp);
-}
-
-#define find_first_nonzero_bit(addr,size) find_next_nonzero_bit((addr), (size), 0)
-
-#endif /* 0 */
-
/* sometimes reiserfs_truncate may require to allocate few new blocks
to perform indirect2direct conversion. People probably used to
think, that truncate should work without problems on a filesystem
absolutely safe */
#define SPARE_SPACE 500
-static inline unsigned long reiserfs_get_journal_block(const struct super_block *s) {
- return le32_to_cpu(SB_DISK_SUPER_BLOCK(s)->s_journal_block) ;
-}
-static inline unsigned long reiserfs_get_journal_orig_size(const struct super_block *s) {
- return le32_to_cpu(SB_DISK_SUPER_BLOCK(s)->s_orig_journal_size) ;
-}
/* prototypes from ioctl.c */
int reiserfs_ioctl (struct inode * inode, struct file * filp,
#include <linux/list.h>
+/** bitmasks for i_flags field in reiserfs-specific part of inode */
+typedef enum {
+ /** this says what format of key do all items (but stat data) of
+ an object have. If this is set, that format is 3.6 otherwise
+ - 3.5 */
+ i_item_key_version_mask = 0x0001,
+ /** If this is unset, object has 3.5 stat data, otherwise, it has
+ 3.6 stat data with 64bit size, 32bit nlink etc. */
+ i_stat_data_version_mask = 0x0002,
+ /** file might need tail packing on close */
+ i_pack_on_close_mask = 0x0004,
+ /** don't pack tail of file */
+ i_nopack_mask = 0x0008,
+ /** If those is set, "safe link" was created for this file during
+ truncate or unlink. Safe link is used to avoid leakage of disk
+ space on crash with some files open, but unlinked. */
+ i_link_saved_unlink_mask = 0x0010,
+ i_link_saved_truncate_mask = 0x0020
+} reiserfs_inode_flags;
+
+
struct reiserfs_inode_info {
- __u32 i_key [4];/* key is still 4 32 bit integers */
-
- /* this comment will be totally
- cryptic to readers not familiar
- with 3.5/3.6 format conversion, and
- it does not consider that that 3.6
- might not be the last version */
- int i_version; // this says whether file is old or new
-
- int i_pack_on_close ; // file might need tail packing on close
-
- __u32 i_first_direct_byte; // offset of first byte stored in direct item.
-
- /* My guess is this contains the first
- unused block of a sequence of
- blocks plus the length of the
- sequence, which I think is always
- at least two at the time of the
- preallocation. I really prefer
- allocate on flush conceptually.....
-
- You know, it really annoys me when
- code is this badly commented that I
- have to guess what it does.
- Neither I nor anyone else has time
- for guessing what your
- datastructures mean. -Hans */
- //For preallocation
- int i_prealloc_block;
- int i_prealloc_count;
- struct list_head i_prealloc_list; /* per-transaction list of inodes which
- * have preallocated blocks */
- /* I regret that you think the below
- is a comment you should make.... -Hans */
- //nopack-attribute
- int nopack;
-
- /* we use these for fsync or O_SYNC to decide which transaction needs
- ** to be committed in order for this inode to be properly flushed
- */
- unsigned long i_trans_id ;
- unsigned long i_trans_index ;
- struct inode vfs_inode;
+ __u32 i_key [4];/* key is still 4 32 bit integers */
+ /** transient inode flags that are never stored on disk. Bitmasks
+ for this field are defined above. */
+ __u32 i_flags;
+
+ __u32 i_first_direct_byte; // offset of first byte stored in direct item.
+
+ int i_prealloc_block; /* first unused block of a sequence of unused blocks */
+ int i_prealloc_count; /* length of that sequence */
+ struct list_head i_prealloc_list; /* per-transaction list of inodes which
+ * have preallocated blocks */
+
+ /* we use these for fsync or O_SYNC to decide which transaction
+ ** needs to be committed in order for this inode to be properly
+ ** flushed */
+ unsigned long i_trans_id ;
+ unsigned long i_trans_index ;
+ struct inode vfs_inode;
};
-
#endif
#include <linux/tqueue.h>
#endif
-//
-// super block's field values
-//
-/*#define REISERFS_VERSION 0 undistributed bitmap */
-/*#define REISERFS_VERSION 1 distributed bitmap and resizer*/
-#define REISERFS_VERSION_2 2 /* distributed bitmap, resizer, 64-bit, etc*/
-#define UNSET_HASH 0 // read_super will guess about, what hash names
- // in directories were sorted with
-#define TEA_HASH 1
-#define YURA_HASH 2
-#define R5_HASH 3
-#define DEFAULT_HASH R5_HASH
-
-/* this is the on disk super block */
-
-struct reiserfs_super_block
-{
- __u32 s_block_count;
- __u32 s_free_blocks; /* free blocks count */
- __u32 s_root_block; /* root block number */
- __u32 s_journal_block; /* journal block number */
- __u32 s_journal_dev; /* journal device number */
-
- /* Since journal size is currently a #define in a header file, if
- ** someone creates a disk with a 16MB journal and moves it to a
- ** system with 32MB journal default, they will overflow their journal
- ** when they mount the disk. s_orig_journal_size, plus some checks
- ** while mounting (inside journal_init) prevent that from happening
- */
- /* great comment Chris. Thanks. -Hans */
-
- __u32 s_orig_journal_size;
- __u32 s_journal_trans_max ; /* max number of blocks in a transaction. */
- __u32 s_journal_block_count ; /* total size of the journal. can change over time */
- __u32 s_journal_max_batch ; /* max number of blocks to batch into a trans */
- __u32 s_journal_max_commit_age ; /* in seconds, how old can an async commit be */
- __u32 s_journal_max_trans_age ; /* in seconds, how old can a transaction be */
- __u16 s_blocksize; /* block size */
- __u16 s_oid_maxsize; /* max size of object id array, see get_objectid() commentary */
- __u16 s_oid_cursize; /* current size of object id array */
- __u16 s_state; /* valid or error */
- char s_magic[12]; /* reiserfs magic string indicates that file system is reiserfs */
- __u32 s_hash_function_code; /* indicate, what hash function is being use to sort names in a directory*/
- __u16 s_tree_height; /* height of disk tree */
- __u16 s_bmap_nr; /* amount of bitmap blocks needed to address each block of file system */
- __u16 s_version; /* I'd prefer it if this was a string,
- something like "3.6.4", and maybe
- 16 bytes long mostly unused. We
- don't need to save bytes in the
- superblock. -Hans */
- __u16 s_reserved;
- __u32 s_inode_generation;
- char s_unused[124] ; /* zero filled by mkreiserfs */
-} __attribute__ ((__packed__));
-
-#define SB_SIZE (sizeof(struct reiserfs_super_block))
/* struct reiserfs_super_block accessors/mutators
* since this is a disk structure, it will always be in
* little endian format. */
-#define sb_block_count(sbp) (le32_to_cpu((sbp)->s_block_count))
-#define set_sb_block_count(sbp,v) ((sbp)->s_block_count = cpu_to_le32(v))
-#define sb_free_blocks(sbp) (le32_to_cpu((sbp)->s_free_blocks))
-#define set_sb_free_blocks(sbp,v) ((sbp)->s_free_blocks = cpu_to_le32(v))
-#define sb_root_block(sbp) (le32_to_cpu((sbp)->s_root_block))
-#define set_sb_root_block(sbp,v) ((sbp)->s_root_block = cpu_to_le32(v))
-#define sb_journal_block(sbp) (le32_to_cpu((sbp)->s_journal_block))
-#define set_sb_journal_block(sbp,v) ((sbp)->s_journal_block = cpu_to_le32(v))
-#define sb_journal_dev(sbp) (le32_to_cpu((sbp)->s_journal_dev))
-#define set_sb_journal_dev(sbp,v) ((sbp)->s_journal_dev = cpu_to_le32(v))
-#define sb_orig_journal_size(sbp) (le32_to_cpu((sbp)->s_orig_journal_size))
-#define set_sb_orig_journal_size(sbp,v) \
- ((sbp)->s_orig_journal_size = cpu_to_le32(v))
-#define sb_journal_trans_max(sbp) (le32_to_cpu((sbp)->s_journal_trans_max))
-#define set_journal_trans_max(sbp,v) \
- ((sbp)->s_journal_trans_max = cpu_to_le32(v))
-#define sb_journal_block_count(sbp) (le32_to_cpu((sbp)->journal_block_count))
-#define sb_set_journal_block_count(sbp,v) \
- ((sbp)->s_journal_block_count = cpu_to_le32(v))
-#define sb_journal_max_batch(sbp) (le32_to_cpu((sbp)->s_journal_max_batch))
-#define set_sb_journal_max_batch(sbp,v) \
- ((sbp)->s_journal_max_batch = cpu_to_le32(v))
-#define sb_jourmal_max_commit_age(sbp) \
- (le32_to_cpu((sbp)->s_journal_max_commit_age))
-#define set_sb_journal_max_commit_age(sbp,v) \
- ((sbp)->s_journal_max_commit_age = cpu_to_le32(v))
-#define sb_jourmal_max_trans_age(sbp) \
- (le32_to_cpu((sbp)->s_journal_max_trans_age))
-#define set_sb_journal_max_trans_age(sbp,v) \
- ((sbp)->s_journal_max_trans_age = cpu_to_le32(v))
-#define sb_blocksize(sbp) (le16_to_cpu((sbp)->s_blocksize))
-#define set_sb_blocksize(sbp,v) ((sbp)->s_blocksize = cpu_to_le16(v))
-#define sb_oid_maxsize(sbp) (le16_to_cpu((sbp)->s_oid_maxsize))
-#define set_sb_oid_maxsize(sbp,v) ((sbp)->s_oid_maxsize = cpu_to_le16(v))
-#define sb_oid_cursize(sbp) (le16_to_cpu((sbp)->s_oid_cursize))
-#define set_sb_oid_cursize(sbp,v) ((sbp)->s_oid_cursize = cpu_to_le16(v))
-#define sb_state(sbp) (le16_to_cpu((sbp)->s_state))
-#define set_sb_state(sbp,v) ((sbp)->s_state = cpu_to_le16(v))
+#define sb_block_count(sbp) (le32_to_cpu((sbp)->s_v1.s_block_count))
+#define set_sb_block_count(sbp,v) ((sbp)->s_v1.s_block_count = cpu_to_le32(v))
+#define sb_free_blocks(sbp) (le32_to_cpu((sbp)->s_v1.s_free_blocks))
+#define set_sb_free_blocks(sbp,v) ((sbp)->s_v1.s_free_blocks = cpu_to_le32(v))
+#define sb_root_block(sbp) (le32_to_cpu((sbp)->s_v1.s_root_block))
+#define set_sb_root_block(sbp,v) ((sbp)->s_v1.s_root_block = cpu_to_le32(v))
+
+#define sb_jp_journal_1st_block(sbp) \
+ (le32_to_cpu((sbp)->s_v1.s_journal.jp_journal_1st_block))
+#define set_sb_jp_journal_1st_block(sbp,v) \
+ ((sbp)->s_v1.s_journal.jp_journal_1st_block = cpu_to_le32(v))
+#define sb_jp_journal_dev(sbp) \
+ (le32_to_cpu((sbp)->s_v1.s_journal.jp_journal_dev))
+#define set_sb_jp_journal_dev(sbp,v) \
+ ((sbp)->s_v1.s_journal.jp_journal_dev = cpu_to_le32(v))
+#define sb_jp_journal_size(sbp) \
+ (le32_to_cpu((sbp)->s_v1.s_journal.jp_journal_size))
+#define set_sb_jp_journal_size(sbp,v) \
+ ((sbp)->s_v1.s_journal.jp_journal_size = cpu_to_le32(v))
+#define sb_jp_journal_trans_max(sbp) \
+ (le32_to_cpu((sbp)->s_v1.s_journal.jp_journal_trans_max))
+#define set_sb_jp_journal_trans_max(sbp,v) \
+ ((sbp)->s_v1.s_journal.jp_journal_trans_max = cpu_to_le32(v))
+#define sb_jp_journal_magic(sbp) \
+ (le32_to_cpu((sbp)->s_v1.s_journal.jp_journal_magic))
+#define set_sb_jp_journal_magic(sbp,v) \
+ ((sbp)->s_v1.s_journal.jp_journal_magic = cpu_to_le32(v))
+#define sb_jp_journal_max_batch(sbp) \
+ (le32_to_cpu((sbp)->s_v1.s_journal.jp_journal_max_batch))
+#define set_sb_jp_journal_max_batch(sbp,v) \
+ ((sbp)->s_v1.s_journal.jp_journal_max_batch = cpu_to_le32(v))
+#define sb_jp_jourmal_max_commit_age(sbp) \
+ (le32_to_cpu((sbp)->s_v1.s_journal.jp_journal_max_commit_age))
+#define set_sb_jp_journal_max_commit_age(sbp,v) \
+ ((sbp)->s_v1.s_journal.jp_journal_max_commit_age = cpu_to_le32(v))
+
+#define sb_blocksize(sbp) (le16_to_cpu((sbp)->s_v1.s_blocksize))
+#define set_sb_blocksize(sbp,v) ((sbp)->s_v1.s_blocksize = cpu_to_le16(v))
+#define sb_oid_maxsize(sbp) (le16_to_cpu((sbp)->s_v1.s_oid_maxsize))
+#define set_sb_oid_maxsize(sbp,v) ((sbp)->s_v1.s_oid_maxsize = cpu_to_le16(v))
+#define sb_oid_cursize(sbp) (le16_to_cpu((sbp)->s_v1.s_oid_cursize))
+#define set_sb_oid_cursize(sbp,v) ((sbp)->s_v1.s_oid_cursize = cpu_to_le16(v))
+#define sb_umount_state(sbp) (le16_to_cpu((sbp)->s_v1.s_umount_state))
+#define set_sb_umount_state(sbp,v) ((sbp)->s_v1.s_umount_state = cpu_to_le16(v))
+#define sb_fs_state(sbp) (le16_to_cpu((sbp)->s_v1.s_fs_state))
+#define set_sb_fs_state(sbp,v) ((sbp)->s_v1.s_fs_state = cpu_to_le16(v))
#define sb_hash_function_code(sbp) \
- (le32_to_cpu((sbp)->s_hash_function_code))
+ (le32_to_cpu((sbp)->s_v1.s_hash_function_code))
#define set_sb_hash_function_code(sbp,v) \
- ((sbp)->s_hash_function_code = cpu_to_le32(v))
-#define sb_tree_height(sbp) (le16_to_cpu((sbp)->s_tree_height))
-#define set_sb_tree_height(sbp,v) ((sbp)->s_tree_height = cpu_to_le16(v))
-#define sb_bmap_nr(sbp) (le16_to_cpu((sbp)->s_bmap_nr))
-#define set_sb_bmap_nr(sbp,v) ((sbp)->s_bmap_nr = cpu_to_le16(v))
-#define sb_version(sbp) (le16_to_cpu((sbp)->s_version))
-#define set_sb_version(sbp,v) ((sbp)->s_version = cpu_to_le16(v))
-
-/* this is the super from 3.5.X, where X >= 10 */
-struct reiserfs_super_block_v1
-{
- __u32 s_block_count; /* blocks count */
- __u32 s_free_blocks; /* free blocks count */
- __u32 s_root_block; /* root block number */
- __u32 s_journal_block; /* journal block number */
- __u32 s_journal_dev; /* journal device number */
- __u32 s_orig_journal_size; /* size of the journal on FS creation. used to make sure they don't overflow it */
- __u32 s_journal_trans_max ; /* max number of blocks in a transaction. */
- __u32 s_journal_block_count ; /* total size of the journal. can change over time */
- __u32 s_journal_max_batch ; /* max number of blocks to batch into a trans */
- __u32 s_journal_max_commit_age ; /* in seconds, how old can an async commit be */
- __u32 s_journal_max_trans_age ; /* in seconds, how old can a transaction be */
- __u16 s_blocksize; /* block size */
- __u16 s_oid_maxsize; /* max size of object id array, see get_objectid() commentary */
- __u16 s_oid_cursize; /* current size of object id array */
- __u16 s_state; /* valid or error */
- char s_magic[16]; /* reiserfs magic string indicates that file system is reiserfs */
- __u16 s_tree_height; /* height of disk tree */
- __u16 s_bmap_nr; /* amount of bitmap blocks needed to address each block of file system */
- __u32 s_reserved;
-} __attribute__ ((__packed__));
-
-#define SB_SIZE_V1 (sizeof(struct reiserfs_super_block_v1))
+ ((sbp)->s_v1.s_hash_function_code = cpu_to_le32(v))
+#define sb_tree_height(sbp) (le16_to_cpu((sbp)->s_v1.s_tree_height))
+#define set_sb_tree_height(sbp,v) ((sbp)->s_v1.s_tree_height = cpu_to_le16(v))
+#define sb_bmap_nr(sbp) (le16_to_cpu((sbp)->s_v1.s_bmap_nr))
+#define set_sb_bmap_nr(sbp,v) ((sbp)->s_v1.s_bmap_nr = cpu_to_le16(v))
+#define sb_version(sbp) (le16_to_cpu((sbp)->s_v1.s_version))
+#define set_sb_version(sbp,v) ((sbp)->s_v1.s_version = cpu_to_le16(v))
+
+#define sb_reserved_for_journal(sbp) \
+ (le16_to_cpu((sbp)->s_v1.s_reserved_for_journal))
+#define set_sb_reserved_for_journal(sbp,v) \
+ ((sbp)->s_v1.s_reserved_for_journal = cpu_to_le16(v))
/* LOGGING -- */
/* we have a node size define somewhere in reiserfs_fs.h. -Hans */
#define JOURNAL_BLOCK_SIZE 4096 /* BUG gotta get rid of this */
#define JOURNAL_MAX_CNODE 1500 /* max cnodes to allocate. */
-#define JOURNAL_TRANS_MAX 1024 /* biggest possible single transaction, don't change for now (8/3/99) */
#define JOURNAL_HASH_SIZE 8192
#define JOURNAL_NUM_BITMAPS 5 /* number of copies of the bitmaps to have floating. Must be >= 2 */
#define JOURNAL_LIST_COUNT 64
struct buffer_head ** j_ap_blocks ; /* journal blocks on disk */
struct reiserfs_journal_cnode *j_last ; /* newest journal block */
struct reiserfs_journal_cnode *j_first ; /* oldest journal block. start here for traverse */
-
+
+ kdev_t j_dev;
+ struct file *j_dev_file;
+ struct block_device *j_dev_bd;
+ int j_1st_reserved_block; /* first block on s_dev of reserved area journal */
+
int j_state ;
unsigned long j_trans_id ;
unsigned long j_mount_id ;
int j_cnode_used ; /* number of cnodes on the used list */
int j_cnode_free ; /* number of cnodes on the free list */
+ unsigned int s_journal_trans_max ; /* max number of blocks in a transaction. */
+ unsigned int s_journal_max_batch ; /* max number of blocks to batch into a trans */
+ unsigned int s_journal_max_commit_age ; /* in seconds, how old can an async commit be */
+ unsigned int s_journal_max_trans_age ; /* in seconds, how old can a transaction be */
+
struct reiserfs_journal_cnode *j_cnode_free_list ;
struct reiserfs_journal_cnode *j_cnode_free_orig ; /* orig pointer returned from vmalloc */
/* To be obsoleted soon by per buffer seals.. -Hans */
atomic_t s_generation_counter; // increased by one every time the
// tree gets re-balanced
+ unsigned long s_properties; /* File system properties. Currently holds
+ on-disk FS format */
/* session statistics */
int s_kmallocs;
int s_bmaps_without_search;
int s_direct2indirect;
int s_indirect2direct;
+ /* set up when it's ok for reiserfs_read_inode2() to read from
+ disk inode with nlink==0. Currently this is only used during
+ finish_unfinished() processing at mount time */
+ int s_is_unlinked_ok;
reiserfs_proc_info_data_t s_proc_info_data;
struct proc_dir_entry *procdir;
};
+/* Definitions of reiserfs on-disk properties: */
+#define REISERFS_3_5 0
+#define REISERFS_3_6 1
+/* Mount options */
#define NOTAIL 0 /* -o notail: no tails will be created in a session */
#define REPLAYONLY 3 /* replay journal and return 0. Use by fsck */
#define REISERFS_NOLOG 4 /* -o nolog: turn journalling off */
#define dont_have_tails(s) ((s)->u.reiserfs_sb.s_mount_opt & (1 << NOTAIL))
#define replay_only(s) ((s)->u.reiserfs_sb.s_mount_opt & (1 << REPLAYONLY))
#define reiserfs_dont_log(s) ((s)->u.reiserfs_sb.s_mount_opt & (1 << REISERFS_NOLOG))
-#define old_format_only(s) ((SB_VERSION(s) != REISERFS_VERSION_2) && !((s)->u.reiserfs_sb.s_mount_opt & (1 << REISERFS_CONVERT)))
+#define old_format_only(s) ((s)->u.reiserfs_sb.s_properties & (1 << REISERFS_3_5))
+#define convert_reiserfs(s) ((s)->u.reiserfs_sb.s_mount_opt & (1 << REISERFS_CONVERT))
void reiserfs_file_buffer (struct buffer_head * bh, int list);
#define SB_BUFFER_WITH_SB(s) ((s)->u.reiserfs_sb.s_sbh)
#define SB_JOURNAL(s) ((s)->u.reiserfs_sb.s_journal)
+#define SB_JOURNAL_1st_RESERVED_BLOCK(s) (SB_JOURNAL(s)->j_1st_reserved_block)
#define SB_JOURNAL_LIST(s) (SB_JOURNAL(s)->j_journal_list)
#define SB_JOURNAL_LIST_INDEX(s) (SB_JOURNAL(s)->j_journal_list_index)
#define SB_JOURNAL_LEN_FREE(s) (SB_JOURNAL(s)->j_journal_len_free)
#define SB_AP_BITMAP(s) ((s)->u.reiserfs_sb.s_ap_bitmap)
+#define SB_DISK_JOURNAL_HEAD(s) (SB_JOURNAL(s)->j_header_bh->)
-// on-disk super block fields converted to cpu form
-#define SB_DISK_SUPER_BLOCK(s) ((s)->u.reiserfs_sb.s_rs)
-#define SB_BLOCK_COUNT(s) sb_block_count (SB_DISK_SUPER_BLOCK(s))
-#define SB_FREE_BLOCKS(s) sb_free_blocks (SB_DISK_SUPER_BLOCK(s))
-#define SB_REISERFS_MAGIC(s) (SB_DISK_SUPER_BLOCK(s)->s_magic)
-#define SB_ROOT_BLOCK(s) sb_root_block (SB_DISK_SUPER_BLOCK(s))
-#define SB_TREE_HEIGHT(s) sb_tree_height (SB_DISK_SUPER_BLOCK(s))
-#define SB_REISERFS_STATE(s) sb_state (SB_DISK_SUPER_BLOCK(s))
-#define SB_VERSION(s) sb_version (SB_DISK_SUPER_BLOCK(s))
-#define SB_BMAP_NR(s) sb_bmap_nr(SB_DISK_SUPER_BLOCK(s))
-
-#define PUT_SB_BLOCK_COUNT(s, val) do { set_sb_block_count( SB_DISK_SUPER_BLOCK(s), val); } while (0)
-#define PUT_SB_FREE_BLOCKS(s, val) do { set_sb_free_blocks( SB_DISK_SUPER_BLOCK(s), val); } while (0)
-#define PUT_SB_ROOT_BLOCK(s, val) do { set_sb_root_block( SB_DISK_SUPER_BLOCK(s), val); } while (0)
-#define PUT_SB_TREE_HEIGHT(s, val) do { set_sb_tree_height( SB_DISK_SUPER_BLOCK(s), val); } while (0)
-#define PUT_SB_REISERFS_STATE(s, val) do { set_sb_state( SB_DISK_SUPER_BLOCK(s), val); } while (0)
-#define PUT_SB_VERSION(s, val) do { set_sb_version( SB_DISK_SUPER_BLOCK(s), val); } while (0)
-#define PUT_SB_BMAP_NR(s, val) do { set_sb_bmap_nr( SB_DISK_SUPER_BLOCK(s), val); } while (0)
+#define SB_JOURNAL_TRANS_MAX(s) (SB_JOURNAL(s)->s_journal_trans_max)
+#define SB_JOURNAL_MAX_BATCH(s) (SB_JOURNAL(s)->s_journal_max_batch)
+#define SB_JOURNAL_MAX_COMMIT_AGE(s) (SB_JOURNAL(s)->s_journal_max_commit_age)
+#define SB_JOURNAL_MAX_TRANS_AGE(s) (SB_JOURNAL(s)->s_journal_max_trans_age)
+#define SB_JOURNAL_DEV(s) (SB_JOURNAL(s)->j_dev)
+
#endif /* _LINUX_REISER_FS_SB */
typedef struct prio_array prio_array_t;
+/* this struct must occupy one 32-bit chunk so that is can be read in one go */
+struct task_work {
+ __s8 need_resched;
+ __u8 syscall_trace; /* count of syscall interceptors */
+ __u8 sigpending;
+ __u8 notify_resume; /* request for notification on
+ userspace execution resumption */
+} __attribute__((packed));
+
struct task_struct {
/*
* offsets of these are hardcoded elsewhere - touch with care
*/
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
unsigned long flags; /* per process flags, defined below */
- int sigpending;
+ volatile struct task_work work;
+
mm_segment_t addr_limit; /* thread address space:
0-0xBFFFFFFF for user-thead
0-0xFFFFFFFF for kernel-thread
*/
struct exec_domain *exec_domain;
- volatile long need_resched;
+ long __pad;
unsigned long ptrace;
int lock_depth; /* Lock depth */
*/
#define PT_PTRACED 0x00000001
-#define PT_TRACESYS 0x00000002
+#define PT_SYSCALLTRACE 0x00000002 /* T if syscall_trace is +1 for ptrace() */
#define PT_DTRACE 0x00000004 /* delayed trace (used on m68k, i386) */
#define PT_TRACESYSGOOD 0x00000008
#define PT_PTRACE_CAP 0x00000010 /* ptracer can follow suid-exec */
static inline int signal_pending(struct task_struct *p)
{
- return (p->sigpending != 0);
+ return (p->work.sigpending != 0);
}
static inline int need_resched(void)
{
- return unlikely(current->need_resched != 0);
+ return unlikely(current->work.need_resched != 0);
}
static inline void cond_resched(void)
static inline void recalc_sigpending(struct task_struct *t)
{
- t->sigpending = has_pending_signals(&t->pending.signal, &t->blocked);
+ t->work.sigpending = has_pending_signals(&t->pending.signal, &t->blocked);
}
/* True if we are on the alternate signal stack. */
* @iso_frame_desc: Used to provide arrays of ISO transfer buffers and to
* collect the transfer status for each buffer.
*
- * This structure identifies USB transfer requests. URBs may be allocated
- * in any way, although usb_alloc_urb() is often convenient. Initialization
- * may be done using various usb_fill_*_urb() functions. URBs are submitted
- * using usb_submit_urb(), and pending requests may be canceled using
- * usb_unlink_urb().
+ * This structure identifies USB transfer requests. URBs must be allocated by
+ * calling usb_alloc_urb() and freed with a call to usb_free_urb().
+ * Initialization may be done using various usb_fill_*_urb() functions. URBs
+ * are submitted using usb_submit_urb(), and pending requests may be canceled
+ * using usb_unlink_urb().
*
* Initialization:
*
struct urb
{
spinlock_t lock; /* lock for the URB */
+ atomic_t count; /* reference count of the URB */
void *hcpriv; /* private data for host controller */
struct list_head urb_list; /* list pointer to all active urbs */
struct urb *next; /* (in) pointer to next URB */
extern struct urb *usb_alloc_urb(int iso_packets);
extern void usb_free_urb(struct urb *urb);
+#define usb_put_urb usb_free_urb
+extern struct urb *usb_get_urb(struct urb *urb);
extern int usb_submit_urb(struct urb *urb);
extern int usb_unlink_urb(struct urb *urb);
--- /dev/null
+/*
+ File: linux/xattr.h
+
+ Extended attributes handling.
+
+ Copyright (C) 2001 by Andreas Gruenbacher <a.gruenbacher@computer.org>
+ Copyright (C) 2001 SGI - Silicon Graphics, Inc <linux-xfs@oss.sgi.com>
+*/
+#ifndef _LINUX_XATTR_H
+#define _LINUX_XATTR_H
+
+#define XATTR_CREATE 0x1 /* set value, fail if attr already exists */
+#define XATTR_REPLACE 0x2 /* set value, fail if attr does not exist */
+
+#endif /* _LINUX_XATTR_H */
--- /dev/null
+/* zlib.h -- interface of the 'zlib' general purpose compression library
+ version 1.1.3, July 9th, 1998
+
+ Copyright (C) 1995-1998 Jean-loup Gailly and Mark Adler
+
+ This software is provided 'as-is', without any express or implied
+ warranty. In no event will the authors be held liable for any damages
+ arising from the use of this software.
+
+ Permission is granted to anyone to use this software for any purpose,
+ including commercial applications, and to alter it and redistribute it
+ freely, subject to the following restrictions:
+
+ 1. The origin of this software must not be misrepresented; you must not
+ claim that you wrote the original software. If you use this software
+ in a product, an acknowledgment in the product documentation would be
+ appreciated but is not required.
+ 2. Altered source versions must be plainly marked as such, and must not be
+ misrepresented as being the original software.
+ 3. This notice may not be removed or altered from any source distribution.
+
+ Jean-loup Gailly Mark Adler
+ jloup@gzip.org madler@alumni.caltech.edu
+
+
+ The data format used by the zlib library is described by RFCs (Request for
+ Comments) 1950 to 1952 in the files ftp://ds.internic.net/rfc/rfc1950.txt
+ (zlib format), rfc1951.txt (deflate format) and rfc1952.txt (gzip format).
+*/
+
+#ifndef _ZLIB_H
+#define _ZLIB_H
+
+#include "zconf.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define ZLIB_VERSION "1.1.3"
+
+/*
+ The 'zlib' compression library provides in-memory compression and
+ decompression functions, including integrity checks of the uncompressed
+ data. This version of the library supports only one compression method
+ (deflation) but other algorithms will be added later and will have the same
+ stream interface.
+
+ Compression can be done in a single step if the buffers are large
+ enough (for example if an input file is mmap'ed), or can be done by
+ repeated calls of the compression function. In the latter case, the
+ application must provide more input and/or consume the output
+ (providing more output space) before each call.
+
+ The library also supports reading and writing files in gzip (.gz) format
+ with an interface similar to that of stdio.
+
+ The library does not install any signal handler. The decoder checks
+ the consistency of the compressed data, so the library should never
+ crash even in case of corrupted input.
+*/
+
+typedef voidpf (*alloc_func) OF((voidpf opaque, uInt items, uInt size));
+typedef void (*free_func) OF((voidpf opaque, voidpf address));
+
+struct internal_state;
+
+typedef struct z_stream_s {
+ Bytef *next_in; /* next input byte */
+ uInt avail_in; /* number of bytes available at next_in */
+ uLong total_in; /* total nb of input bytes read so far */
+
+ Bytef *next_out; /* next output byte should be put there */
+ uInt avail_out; /* remaining free space at next_out */
+ uLong total_out; /* total nb of bytes output so far */
+
+ char *msg; /* last error message, NULL if no error */
+ struct internal_state FAR *state; /* not visible by applications */
+
+ void *workspace; /* memory allocated for this stream */
+
+ int data_type; /* best guess about the data type: ascii or binary */
+ uLong adler; /* adler32 value of the uncompressed data */
+ uLong reserved; /* reserved for future use */
+} z_stream;
+
+typedef z_stream FAR *z_streamp;
+
+/*
+ The application must update next_in and avail_in when avail_in has
+ dropped to zero. It must update next_out and avail_out when avail_out
+ has dropped to zero. The application must initialize zalloc, zfree and
+ opaque before calling the init function. All other fields are set by the
+ compression library and must not be updated by the application.
+
+ The opaque value provided by the application will be passed as the first
+ parameter for calls of zalloc and zfree. This can be useful for custom
+ memory management. The compression library attaches no meaning to the
+ opaque value.
+
+ zalloc must return Z_NULL if there is not enough memory for the object.
+ If zlib is used in a multi-threaded application, zalloc and zfree must be
+ thread safe.
+
+ On 16-bit systems, the functions zalloc and zfree must be able to allocate
+ exactly 65536 bytes, but will not be required to allocate more than this
+ if the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS,
+ pointers returned by zalloc for objects of exactly 65536 bytes *must*
+ have their offset normalized to zero. The default allocation function
+ provided by this library ensures this (see zutil.c). To reduce memory
+ requirements and avoid any allocation of 64K objects, at the expense of
+ compression ratio, compile the library with -DMAX_WBITS=14 (see zconf.h).
+
+ The fields total_in and total_out can be used for statistics or
+ progress reports. After compression, total_in holds the total size of
+ the uncompressed data and may be saved for use in the decompressor
+ (particularly if the decompressor wants to decompress everything in
+ a single step).
+*/
+
+ /* constants */
+
+#define Z_NO_FLUSH 0
+#define Z_PARTIAL_FLUSH 1 /* will be removed, use Z_SYNC_FLUSH instead */
+#define Z_PACKET_FLUSH 2
+#define Z_SYNC_FLUSH 3
+#define Z_FULL_FLUSH 4
+#define Z_FINISH 5
+/* Allowed flush values; see deflate() below for details */
+
+#define Z_OK 0
+#define Z_STREAM_END 1
+#define Z_NEED_DICT 2
+#define Z_ERRNO (-1)
+#define Z_STREAM_ERROR (-2)
+#define Z_DATA_ERROR (-3)
+#define Z_MEM_ERROR (-4)
+#define Z_BUF_ERROR (-5)
+#define Z_VERSION_ERROR (-6)
+/* Return codes for the compression/decompression functions. Negative
+ * values are errors, positive values are used for special but normal events.
+ */
+
+#define Z_NO_COMPRESSION 0
+#define Z_BEST_SPEED 1
+#define Z_BEST_COMPRESSION 9
+#define Z_DEFAULT_COMPRESSION (-1)
+/* compression levels */
+
+#define Z_FILTERED 1
+#define Z_HUFFMAN_ONLY 2
+#define Z_DEFAULT_STRATEGY 0
+/* compression strategy; see deflateInit2() below for details */
+
+#define Z_BINARY 0
+#define Z_ASCII 1
+#define Z_UNKNOWN 2
+/* Possible values of the data_type field */
+
+#define Z_DEFLATED 8
+/* The deflate compression method (the only one supported in this version) */
+
+#define Z_NULL 0 /* for initializing zalloc, zfree, opaque */
+
+ /* basic functions */
+
+ZEXTERN const char * ZEXPORT zlib_zlibVersion OF((void));
+/* The application can compare zlibVersion and ZLIB_VERSION for consistency.
+ If the first character differs, the library code actually used is
+ not compatible with the zlib.h header file used by the application.
+ This check is automatically made by deflateInit and inflateInit.
+ */
+
+ZEXTERN int ZEXPORT zlib_deflate_workspacesize OF((void));
+/*
+ Returns the number of bytes that needs to be allocated for a per-
+ stream workspace. A pointer to this number of bytes should be
+ returned in stream->workspace before calling zlib_deflateInit().
+*/
+
+/*
+ZEXTERN int ZEXPORT deflateInit OF((z_streamp strm, int level));
+
+ Initializes the internal stream state for compression. The fields
+ zalloc, zfree and opaque must be initialized before by the caller.
+ If zalloc and zfree are set to Z_NULL, deflateInit updates them to
+ use default allocation functions.
+
+ The compression level must be Z_DEFAULT_COMPRESSION, or between 0 and 9:
+ 1 gives best speed, 9 gives best compression, 0 gives no compression at
+ all (the input data is simply copied a block at a time).
+ Z_DEFAULT_COMPRESSION requests a default compromise between speed and
+ compression (currently equivalent to level 6).
+
+ deflateInit returns Z_OK if success, Z_MEM_ERROR if there was not
+ enough memory, Z_STREAM_ERROR if level is not a valid compression level,
+ Z_VERSION_ERROR if the zlib library version (zlib_version) is incompatible
+ with the version assumed by the caller (ZLIB_VERSION).
+ msg is set to null if there is no error message. deflateInit does not
+ perform any compression: this will be done by deflate().
+*/
+
+
+ZEXTERN int ZEXPORT zlib_deflate OF((z_streamp strm, int flush));
+/*
+ deflate compresses as much data as possible, and stops when the input
+ buffer becomes empty or the output buffer becomes full. It may introduce some
+ output latency (reading input without producing any output) except when
+ forced to flush.
+
+ The detailed semantics are as follows. deflate performs one or both of the
+ following actions:
+
+ - Compress more input starting at next_in and update next_in and avail_in
+ accordingly. If not all input can be processed (because there is not
+ enough room in the output buffer), next_in and avail_in are updated and
+ processing will resume at this point for the next call of deflate().
+
+ - Provide more output starting at next_out and update next_out and avail_out
+ accordingly. This action is forced if the parameter flush is non zero.
+ Forcing flush frequently degrades the compression ratio, so this parameter
+ should be set only when necessary (in interactive applications).
+ Some output may be provided even if flush is not set.
+
+ Before the call of deflate(), the application should ensure that at least
+ one of the actions is possible, by providing more input and/or consuming
+ more output, and updating avail_in or avail_out accordingly; avail_out
+ should never be zero before the call. The application can consume the
+ compressed output when it wants, for example when the output buffer is full
+ (avail_out == 0), or after each call of deflate(). If deflate returns Z_OK
+ and with zero avail_out, it must be called again after making room in the
+ output buffer because there might be more output pending.
+
+ If the parameter flush is set to Z_SYNC_FLUSH, all pending output is
+ flushed to the output buffer and the output is aligned on a byte boundary, so
+ that the decompressor can get all input data available so far. (In particular
+ avail_in is zero after the call if enough output space has been provided
+ before the call.) Flushing may degrade compression for some compression
+ algorithms and so it should be used only when necessary.
+
+ If flush is set to Z_FULL_FLUSH, all output is flushed as with
+ Z_SYNC_FLUSH, and the compression state is reset so that decompression can
+ restart from this point if previous compressed data has been damaged or if
+ random access is desired. Using Z_FULL_FLUSH too often can seriously degrade
+ the compression.
+
+ If deflate returns with avail_out == 0, this function must be called again
+ with the same value of the flush parameter and more output space (updated
+ avail_out), until the flush is complete (deflate returns with non-zero
+ avail_out).
+
+ If the parameter flush is set to Z_FINISH, pending input is processed,
+ pending output is flushed and deflate returns with Z_STREAM_END if there
+ was enough output space; if deflate returns with Z_OK, this function must be
+ called again with Z_FINISH and more output space (updated avail_out) but no
+ more input data, until it returns with Z_STREAM_END or an error. After
+ deflate has returned Z_STREAM_END, the only possible operations on the
+ stream are deflateReset or deflateEnd.
+
+ Z_FINISH can be used immediately after deflateInit if all the compression
+ is to be done in a single step. In this case, avail_out must be at least
+ 0.1% larger than avail_in plus 12 bytes. If deflate does not return
+ Z_STREAM_END, then it must be called again as described above.
+
+ deflate() sets strm->adler to the adler32 checksum of all input read
+ so far (that is, total_in bytes).
+
+ deflate() may update data_type if it can make a good guess about
+ the input data type (Z_ASCII or Z_BINARY). In doubt, the data is considered
+ binary. This field is only for information purposes and does not affect
+ the compression algorithm in any manner.
+
+ deflate() returns Z_OK if some progress has been made (more input
+ processed or more output produced), Z_STREAM_END if all input has been
+ consumed and all output has been produced (only when flush is set to
+ Z_FINISH), Z_STREAM_ERROR if the stream state was inconsistent (for example
+ if next_in or next_out was NULL), Z_BUF_ERROR if no progress is possible
+ (for example avail_in or avail_out was zero).
+*/
+
+
+ZEXTERN int ZEXPORT zlib_deflateEnd OF((z_streamp strm));
+/*
+ All dynamically allocated data structures for this stream are freed.
+ This function discards any unprocessed input and does not flush any
+ pending output.
+
+ deflateEnd returns Z_OK if success, Z_STREAM_ERROR if the
+ stream state was inconsistent, Z_DATA_ERROR if the stream was freed
+ prematurely (some input or output was discarded). In the error case,
+ msg may be set but then points to a static string (which must not be
+ deallocated).
+*/
+
+
+ZEXTERN int ZEXPORT zlib_inflate_workspacesize OF((void));
+/*
+ Returns the number of bytes that needs to be allocated for a per-
+ stream workspace. A pointer to this number of bytes should be
+ returned in stream->workspace before calling zlib_inflateInit().
+*/
+
+/*
+ZEXTERN int ZEXPORT zlib_inflateInit OF((z_streamp strm));
+
+ Initializes the internal stream state for decompression. The fields
+ next_in, avail_in, and workspace must be initialized before by
+ the caller. If next_in is not Z_NULL and avail_in is large enough (the exact
+ value depends on the compression method), inflateInit determines the
+ compression method from the zlib header and allocates all data structures
+ accordingly; otherwise the allocation will be deferred to the first call of
+ inflate. If zalloc and zfree are set to Z_NULL, inflateInit updates them to
+ use default allocation functions.
+
+ inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough
+ memory, Z_VERSION_ERROR if the zlib library version is incompatible with the
+ version assumed by the caller. msg is set to null if there is no error
+ message. inflateInit does not perform any decompression apart from reading
+ the zlib header if present: this will be done by inflate(). (So next_in and
+ avail_in may be modified, but next_out and avail_out are unchanged.)
+*/
+
+
+ZEXTERN int ZEXPORT zlib_inflate OF((z_streamp strm, int flush));
+/*
+ inflate decompresses as much data as possible, and stops when the input
+ buffer becomes empty or the output buffer becomes full. It may some
+ introduce some output latency (reading input without producing any output)
+ except when forced to flush.
+
+ The detailed semantics are as follows. inflate performs one or both of the
+ following actions:
+
+ - Decompress more input starting at next_in and update next_in and avail_in
+ accordingly. If not all input can be processed (because there is not
+ enough room in the output buffer), next_in is updated and processing
+ will resume at this point for the next call of inflate().
+
+ - Provide more output starting at next_out and update next_out and avail_out
+ accordingly. inflate() provides as much output as possible, until there
+ is no more input data or no more space in the output buffer (see below
+ about the flush parameter).
+
+ Before the call of inflate(), the application should ensure that at least
+ one of the actions is possible, by providing more input and/or consuming
+ more output, and updating the next_* and avail_* values accordingly.
+ The application can consume the uncompressed output when it wants, for
+ example when the output buffer is full (avail_out == 0), or after each
+ call of inflate(). If inflate returns Z_OK and with zero avail_out, it
+ must be called again after making room in the output buffer because there
+ might be more output pending.
+
+ If the parameter flush is set to Z_SYNC_FLUSH, inflate flushes as much
+ output as possible to the output buffer. The flushing behavior of inflate is
+ not specified for values of the flush parameter other than Z_SYNC_FLUSH
+ and Z_FINISH, but the current implementation actually flushes as much output
+ as possible anyway.
+
+ inflate() should normally be called until it returns Z_STREAM_END or an
+ error. However if all decompression is to be performed in a single step
+ (a single call of inflate), the parameter flush should be set to
+ Z_FINISH. In this case all pending input is processed and all pending
+ output is flushed; avail_out must be large enough to hold all the
+ uncompressed data. (The size of the uncompressed data may have been saved
+ by the compressor for this purpose.) The next operation on this stream must
+ be inflateEnd to deallocate the decompression state. The use of Z_FINISH
+ is never required, but can be used to inform inflate that a faster routine
+ may be used for the single inflate() call.
+
+ If a preset dictionary is needed at this point (see inflateSetDictionary
+ below), inflate sets strm-adler to the adler32 checksum of the
+ dictionary chosen by the compressor and returns Z_NEED_DICT; otherwise
+ it sets strm->adler to the adler32 checksum of all output produced
+ so far (that is, total_out bytes) and returns Z_OK, Z_STREAM_END or
+ an error code as described below. At the end of the stream, inflate()
+ checks that its computed adler32 checksum is equal to that saved by the
+ compressor and returns Z_STREAM_END only if the checksum is correct.
+
+ inflate() returns Z_OK if some progress has been made (more input processed
+ or more output produced), Z_STREAM_END if the end of the compressed data has
+ been reached and all uncompressed output has been produced, Z_NEED_DICT if a
+ preset dictionary is needed at this point, Z_DATA_ERROR if the input data was
+ corrupted (input stream not conforming to the zlib format or incorrect
+ adler32 checksum), Z_STREAM_ERROR if the stream structure was inconsistent
+ (for example if next_in or next_out was NULL), Z_MEM_ERROR if there was not
+ enough memory, Z_BUF_ERROR if no progress is possible or if there was not
+ enough room in the output buffer when Z_FINISH is used. In the Z_DATA_ERROR
+ case, the application may then call inflateSync to look for a good
+ compression block.
+*/
+
+
+ZEXTERN int ZEXPORT zlib_inflateEnd OF((z_streamp strm));
+/*
+ All dynamically allocated data structures for this stream are freed.
+ This function discards any unprocessed input and does not flush any
+ pending output.
+
+ inflateEnd returns Z_OK if success, Z_STREAM_ERROR if the stream state
+ was inconsistent. In the error case, msg may be set but then points to a
+ static string (which must not be deallocated).
+*/
+
+ /* Advanced functions */
+
+/*
+ The following functions are needed only in some special applications.
+*/
+
+/*
+ZEXTERN int ZEXPORT deflateInit2 OF((z_streamp strm,
+ int level,
+ int method,
+ int windowBits,
+ int memLevel,
+ int strategy));
+
+ This is another version of deflateInit with more compression options. The
+ fields next_in, zalloc, zfree and opaque must be initialized before by
+ the caller.
+
+ The method parameter is the compression method. It must be Z_DEFLATED in
+ this version of the library.
+
+ The windowBits parameter is the base two logarithm of the window size
+ (the size of the history buffer). It should be in the range 8..15 for this
+ version of the library. Larger values of this parameter result in better
+ compression at the expense of memory usage. The default value is 15 if
+ deflateInit is used instead.
+
+ The memLevel parameter specifies how much memory should be allocated
+ for the internal compression state. memLevel=1 uses minimum memory but
+ is slow and reduces compression ratio; memLevel=9 uses maximum memory
+ for optimal speed. The default value is 8. See zconf.h for total memory
+ usage as a function of windowBits and memLevel.
+
+ The strategy parameter is used to tune the compression algorithm. Use the
+ value Z_DEFAULT_STRATEGY for normal data, Z_FILTERED for data produced by a
+ filter (or predictor), or Z_HUFFMAN_ONLY to force Huffman encoding only (no
+ string match). Filtered data consists mostly of small values with a
+ somewhat random distribution. In this case, the compression algorithm is
+ tuned to compress them better. The effect of Z_FILTERED is to force more
+ Huffman coding and less string matching; it is somewhat intermediate
+ between Z_DEFAULT and Z_HUFFMAN_ONLY. The strategy parameter only affects
+ the compression ratio but not the correctness of the compressed output even
+ if it is not set appropriately.
+
+ deflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough
+ memory, Z_STREAM_ERROR if a parameter is invalid (such as an invalid
+ method). msg is set to null if there is no error message. deflateInit2 does
+ not perform any compression: this will be done by deflate().
+*/
+
+ZEXTERN int ZEXPORT zlib_deflateSetDictionary OF((z_streamp strm,
+ const Bytef *dictionary,
+ uInt dictLength));
+/*
+ Initializes the compression dictionary from the given byte sequence
+ without producing any compressed output. This function must be called
+ immediately after deflateInit, deflateInit2 or deflateReset, before any
+ call of deflate. The compressor and decompressor must use exactly the same
+ dictionary (see inflateSetDictionary).
+
+ The dictionary should consist of strings (byte sequences) that are likely
+ to be encountered later in the data to be compressed, with the most commonly
+ used strings preferably put towards the end of the dictionary. Using a
+ dictionary is most useful when the data to be compressed is short and can be
+ predicted with good accuracy; the data can then be compressed better than
+ with the default empty dictionary.
+
+ Depending on the size of the compression data structures selected by
+ deflateInit or deflateInit2, a part of the dictionary may in effect be
+ discarded, for example if the dictionary is larger than the window size in
+ deflate or deflate2. Thus the strings most likely to be useful should be
+ put at the end of the dictionary, not at the front.
+
+ Upon return of this function, strm->adler is set to the Adler32 value
+ of the dictionary; the decompressor may later use this value to determine
+ which dictionary has been used by the compressor. (The Adler32 value
+ applies to the whole dictionary even if only a subset of the dictionary is
+ actually used by the compressor.)
+
+ deflateSetDictionary returns Z_OK if success, or Z_STREAM_ERROR if a
+ parameter is invalid (such as NULL dictionary) or the stream state is
+ inconsistent (for example if deflate has already been called for this stream
+ or if the compression method is bsort). deflateSetDictionary does not
+ perform any compression: this will be done by deflate().
+*/
+
+ZEXTERN int ZEXPORT zlib_deflateCopy OF((z_streamp dest,
+ z_streamp source));
+/*
+ Sets the destination stream as a complete copy of the source stream.
+
+ This function can be useful when several compression strategies will be
+ tried, for example when there are several ways of pre-processing the input
+ data with a filter. The streams that will be discarded should then be freed
+ by calling deflateEnd. Note that deflateCopy duplicates the internal
+ compression state which can be quite large, so this strategy is slow and
+ can consume lots of memory.
+
+ deflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not
+ enough memory, Z_STREAM_ERROR if the source stream state was inconsistent
+ (such as zalloc being NULL). msg is left unchanged in both source and
+ destination.
+*/
+
+ZEXTERN int ZEXPORT zlib_deflateReset OF((z_streamp strm));
+/*
+ This function is equivalent to deflateEnd followed by deflateInit,
+ but does not free and reallocate all the internal compression state.
+ The stream will keep the same compression level and any other attributes
+ that may have been set by deflateInit2.
+
+ deflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
+ stream state was inconsistent (such as zalloc or state being NULL).
+*/
+
+ZEXTERN int ZEXPORT zlib_deflateParams OF((z_streamp strm,
+ int level,
+ int strategy));
+/*
+ Dynamically update the compression level and compression strategy. The
+ interpretation of level and strategy is as in deflateInit2. This can be
+ used to switch between compression and straight copy of the input data, or
+ to switch to a different kind of input data requiring a different
+ strategy. If the compression level is changed, the input available so far
+ is compressed with the old level (and may be flushed); the new level will
+ take effect only at the next call of deflate().
+
+ Before the call of deflateParams, the stream state must be set as for
+ a call of deflate(), since the currently available input may have to
+ be compressed and flushed. In particular, strm->avail_out must be non-zero.
+
+ deflateParams returns Z_OK if success, Z_STREAM_ERROR if the source
+ stream state was inconsistent or if a parameter was invalid, Z_BUF_ERROR
+ if strm->avail_out was zero.
+*/
+
+/*
+ZEXTERN int ZEXPORT inflateInit2 OF((z_streamp strm,
+ int windowBits));
+
+ This is another version of inflateInit with an extra parameter. The
+ fields next_in, avail_in, zalloc, zfree and opaque must be initialized
+ before by the caller.
+
+ The windowBits parameter is the base two logarithm of the maximum window
+ size (the size of the history buffer). It should be in the range 8..15 for
+ this version of the library. The default value is 15 if inflateInit is used
+ instead. If a compressed stream with a larger window size is given as
+ input, inflate() will return with the error code Z_DATA_ERROR instead of
+ trying to allocate a larger window.
+
+ inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough
+ memory, Z_STREAM_ERROR if a parameter is invalid (such as a negative
+ memLevel). msg is set to null if there is no error message. inflateInit2
+ does not perform any decompression apart from reading the zlib header if
+ present: this will be done by inflate(). (So next_in and avail_in may be
+ modified, but next_out and avail_out are unchanged.)
+*/
+
+ZEXTERN int ZEXPORT zlib_inflateSetDictionary OF((z_streamp strm,
+ const Bytef *dictionary,
+ uInt dictLength));
+/*
+ Initializes the decompression dictionary from the given uncompressed byte
+ sequence. This function must be called immediately after a call of inflate
+ if this call returned Z_NEED_DICT. The dictionary chosen by the compressor
+ can be determined from the Adler32 value returned by this call of
+ inflate. The compressor and decompressor must use exactly the same
+ dictionary (see deflateSetDictionary).
+
+ inflateSetDictionary returns Z_OK if success, Z_STREAM_ERROR if a
+ parameter is invalid (such as NULL dictionary) or the stream state is
+ inconsistent, Z_DATA_ERROR if the given dictionary doesn't match the
+ expected one (incorrect Adler32 value). inflateSetDictionary does not
+ perform any decompression: this will be done by subsequent calls of
+ inflate().
+*/
+
+ZEXTERN int ZEXPORT zlib_inflateSync OF((z_streamp strm));
+/*
+ Skips invalid compressed data until a full flush point (see above the
+ description of deflate with Z_FULL_FLUSH) can be found, or until all
+ available input is skipped. No output is provided.
+
+ inflateSync returns Z_OK if a full flush point has been found, Z_BUF_ERROR
+ if no more input was provided, Z_DATA_ERROR if no flush point has been found,
+ or Z_STREAM_ERROR if the stream structure was inconsistent. In the success
+ case, the application may save the current current value of total_in which
+ indicates where valid compressed data was found. In the error case, the
+ application may repeatedly call inflateSync, providing more input each time,
+ until success or end of the input data.
+*/
+
+ZEXTERN int ZEXPORT zlib_inflateReset OF((z_streamp strm));
+/*
+ This function is equivalent to inflateEnd followed by inflateInit,
+ but does not free and reallocate all the internal decompression state.
+ The stream will keep attributes that may have been set by inflateInit2.
+
+ inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
+ stream state was inconsistent (such as zalloc or state being NULL).
+*/
+
+extern int ZEXPORT zlib_inflateIncomp OF((z_stream *strm));
+/*
+ This function adds the data at next_in (avail_in bytes) to the output
+ history without performing any output. There must be no pending output,
+ and the decompressor must be expecting to see the start of a block.
+ Calling this function is equivalent to decompressing a stored block
+ containing the data at next_in (except that the data is not output).
+*/
+
+ /* various hacks, don't look :) */
+
+/* deflateInit and inflateInit are macros to allow checking the zlib version
+ * and the compiler's view of z_stream:
+ */
+ZEXTERN int ZEXPORT zlib_deflateInit_ OF((z_streamp strm, int level,
+ const char *version, int stream_size));
+ZEXTERN int ZEXPORT zlib_inflateInit_ OF((z_streamp strm,
+ const char *version, int stream_size));
+ZEXTERN int ZEXPORT zlib_deflateInit2_ OF((z_streamp strm, int level, int method,
+ int windowBits, int memLevel,
+ int strategy, const char *version,
+ int stream_size));
+ZEXTERN int ZEXPORT zlib_inflateInit2_ OF((z_streamp strm, int windowBits,
+ const char *version, int stream_size));
+#define zlib_deflateInit(strm, level) \
+ zlib_deflateInit_((strm), (level), ZLIB_VERSION, sizeof(z_stream))
+#define zlib_inflateInit(strm) \
+ zlib_inflateInit_((strm), ZLIB_VERSION, sizeof(z_stream))
+#define zlib_deflateInit2(strm, level, method, windowBits, memLevel, strategy) \
+ zlib_deflateInit2_((strm),(level),(method),(windowBits),(memLevel),\
+ (strategy), ZLIB_VERSION, sizeof(z_stream))
+#define zlib_inflateInit2(strm, windowBits) \
+ zlib_inflateInit2_((strm), (windowBits), ZLIB_VERSION, sizeof(z_stream))
+
+
+#if !defined(_Z_UTIL_H) && !defined(NO_DUMMY_DECL)
+ struct internal_state {int dummy;}; /* hack for buggy compilers */
+#endif
+
+ZEXTERN const char * ZEXPORT zlib_zError OF((int err));
+ZEXTERN int ZEXPORT zlib_inflateSyncPoint OF((z_streamp z));
+ZEXTERN const uLongf * ZEXPORT zlib_get_crc_table OF((void));
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _ZLIB_H */
-/* zlib.h -- interface of the 'zlib' general purpose compression library
- version 1.1.3, July 9th, 1998
-
- Copyright (C) 1995-1998 Jean-loup Gailly and Mark Adler
-
- This software is provided 'as-is', without any express or implied
- warranty. In no event will the authors be held liable for any damages
- arising from the use of this software.
-
- Permission is granted to anyone to use this software for any purpose,
- including commercial applications, and to alter it and redistribute it
- freely, subject to the following restrictions:
-
- 1. The origin of this software must not be misrepresented; you must not
- claim that you wrote the original software. If you use this software
- in a product, an acknowledgment in the product documentation would be
- appreciated but is not required.
- 2. Altered source versions must be plainly marked as such, and must not be
- misrepresented as being the original software.
- 3. This notice may not be removed or altered from any source distribution.
-
- Jean-loup Gailly Mark Adler
- jloup@gzip.org madler@alumni.caltech.edu
-
-
- The data format used by the zlib library is described by RFCs (Request for
- Comments) 1950 to 1952 in the files ftp://ds.internic.net/rfc/rfc1950.txt
- (zlib format), rfc1951.txt (deflate format) and rfc1952.txt (gzip format).
-*/
-
-#ifndef _ZLIB_H
-#define _ZLIB_H
-
-#include "zconf.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define ZLIB_VERSION "1.1.3"
-
-/*
- The 'zlib' compression library provides in-memory compression and
- decompression functions, including integrity checks of the uncompressed
- data. This version of the library supports only one compression method
- (deflation) but other algorithms will be added later and will have the same
- stream interface.
-
- Compression can be done in a single step if the buffers are large
- enough (for example if an input file is mmap'ed), or can be done by
- repeated calls of the compression function. In the latter case, the
- application must provide more input and/or consume the output
- (providing more output space) before each call.
-
- The library also supports reading and writing files in gzip (.gz) format
- with an interface similar to that of stdio.
-
- The library does not install any signal handler. The decoder checks
- the consistency of the compressed data, so the library should never
- crash even in case of corrupted input.
-*/
-
-typedef voidpf (*alloc_func) OF((voidpf opaque, uInt items, uInt size));
-typedef void (*free_func) OF((voidpf opaque, voidpf address));
-
-struct internal_state;
-
-typedef struct z_stream_s {
- Bytef *next_in; /* next input byte */
- uInt avail_in; /* number of bytes available at next_in */
- uLong total_in; /* total nb of input bytes read so far */
-
- Bytef *next_out; /* next output byte should be put there */
- uInt avail_out; /* remaining free space at next_out */
- uLong total_out; /* total nb of bytes output so far */
-
- char *msg; /* last error message, NULL if no error */
- struct internal_state FAR *state; /* not visible by applications */
-
- void *workspace; /* memory allocated for this stream */
-
- int data_type; /* best guess about the data type: ascii or binary */
- uLong adler; /* adler32 value of the uncompressed data */
- uLong reserved; /* reserved for future use */
-} z_stream;
-
-typedef z_stream FAR *z_streamp;
-
-/*
- The application must update next_in and avail_in when avail_in has
- dropped to zero. It must update next_out and avail_out when avail_out
- has dropped to zero. The application must initialize zalloc, zfree and
- opaque before calling the init function. All other fields are set by the
- compression library and must not be updated by the application.
-
- The opaque value provided by the application will be passed as the first
- parameter for calls of zalloc and zfree. This can be useful for custom
- memory management. The compression library attaches no meaning to the
- opaque value.
-
- zalloc must return Z_NULL if there is not enough memory for the object.
- If zlib is used in a multi-threaded application, zalloc and zfree must be
- thread safe.
-
- On 16-bit systems, the functions zalloc and zfree must be able to allocate
- exactly 65536 bytes, but will not be required to allocate more than this
- if the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS,
- pointers returned by zalloc for objects of exactly 65536 bytes *must*
- have their offset normalized to zero. The default allocation function
- provided by this library ensures this (see zutil.c). To reduce memory
- requirements and avoid any allocation of 64K objects, at the expense of
- compression ratio, compile the library with -DMAX_WBITS=14 (see zconf.h).
-
- The fields total_in and total_out can be used for statistics or
- progress reports. After compression, total_in holds the total size of
- the uncompressed data and may be saved for use in the decompressor
- (particularly if the decompressor wants to decompress everything in
- a single step).
-*/
-
- /* constants */
-
-#define Z_NO_FLUSH 0
-#define Z_PARTIAL_FLUSH 1 /* will be removed, use Z_SYNC_FLUSH instead */
-#define Z_SYNC_FLUSH 2
-#define Z_FULL_FLUSH 3
-#define Z_FINISH 4
-/* Allowed flush values; see deflate() below for details */
-
-#define Z_OK 0
-#define Z_STREAM_END 1
-#define Z_NEED_DICT 2
-#define Z_ERRNO (-1)
-#define Z_STREAM_ERROR (-2)
-#define Z_DATA_ERROR (-3)
-#define Z_MEM_ERROR (-4)
-#define Z_BUF_ERROR (-5)
-#define Z_VERSION_ERROR (-6)
-/* Return codes for the compression/decompression functions. Negative
- * values are errors, positive values are used for special but normal events.
- */
-
-#define Z_NO_COMPRESSION 0
-#define Z_BEST_SPEED 1
-#define Z_BEST_COMPRESSION 9
-#define Z_DEFAULT_COMPRESSION (-1)
-/* compression levels */
-
-#define Z_FILTERED 1
-#define Z_HUFFMAN_ONLY 2
-#define Z_DEFAULT_STRATEGY 0
-/* compression strategy; see deflateInit2() below for details */
-
-#define Z_BINARY 0
-#define Z_ASCII 1
-#define Z_UNKNOWN 2
-/* Possible values of the data_type field */
-
-#define Z_DEFLATED 8
-/* The deflate compression method (the only one supported in this version) */
-
-#define Z_NULL 0 /* for initializing zalloc, zfree, opaque */
-
- /* basic functions */
-
-ZEXTERN const char * ZEXPORT zlib_fs_zlibVersion OF((void));
-/* The application can compare zlibVersion and ZLIB_VERSION for consistency.
- If the first character differs, the library code actually used is
- not compatible with the zlib.h header file used by the application.
- This check is automatically made by deflateInit and inflateInit.
- */
-
-/*
-ZEXTERN int ZEXPORT deflateInit OF((z_streamp strm, int level));
-
- Initializes the internal stream state for compression. The fields
- zalloc, zfree and opaque must be initialized before by the caller.
- If zalloc and zfree are set to Z_NULL, deflateInit updates them to
- use default allocation functions.
-
- The compression level must be Z_DEFAULT_COMPRESSION, or between 0 and 9:
- 1 gives best speed, 9 gives best compression, 0 gives no compression at
- all (the input data is simply copied a block at a time).
- Z_DEFAULT_COMPRESSION requests a default compromise between speed and
- compression (currently equivalent to level 6).
-
- deflateInit returns Z_OK if success, Z_MEM_ERROR if there was not
- enough memory, Z_STREAM_ERROR if level is not a valid compression level,
- Z_VERSION_ERROR if the zlib library version (zlib_version) is incompatible
- with the version assumed by the caller (ZLIB_VERSION).
- msg is set to null if there is no error message. deflateInit does not
- perform any compression: this will be done by deflate().
-*/
-
-
-ZEXTERN int ZEXPORT zlib_fs_deflate OF((z_streamp strm, int flush));
-/*
- deflate compresses as much data as possible, and stops when the input
- buffer becomes empty or the output buffer becomes full. It may introduce some
- output latency (reading input without producing any output) except when
- forced to flush.
-
- The detailed semantics are as follows. deflate performs one or both of the
- following actions:
-
- - Compress more input starting at next_in and update next_in and avail_in
- accordingly. If not all input can be processed (because there is not
- enough room in the output buffer), next_in and avail_in are updated and
- processing will resume at this point for the next call of deflate().
-
- - Provide more output starting at next_out and update next_out and avail_out
- accordingly. This action is forced if the parameter flush is non zero.
- Forcing flush frequently degrades the compression ratio, so this parameter
- should be set only when necessary (in interactive applications).
- Some output may be provided even if flush is not set.
-
- Before the call of deflate(), the application should ensure that at least
- one of the actions is possible, by providing more input and/or consuming
- more output, and updating avail_in or avail_out accordingly; avail_out
- should never be zero before the call. The application can consume the
- compressed output when it wants, for example when the output buffer is full
- (avail_out == 0), or after each call of deflate(). If deflate returns Z_OK
- and with zero avail_out, it must be called again after making room in the
- output buffer because there might be more output pending.
-
- If the parameter flush is set to Z_SYNC_FLUSH, all pending output is
- flushed to the output buffer and the output is aligned on a byte boundary, so
- that the decompressor can get all input data available so far. (In particular
- avail_in is zero after the call if enough output space has been provided
- before the call.) Flushing may degrade compression for some compression
- algorithms and so it should be used only when necessary.
-
- If flush is set to Z_FULL_FLUSH, all output is flushed as with
- Z_SYNC_FLUSH, and the compression state is reset so that decompression can
- restart from this point if previous compressed data has been damaged or if
- random access is desired. Using Z_FULL_FLUSH too often can seriously degrade
- the compression.
-
- If deflate returns with avail_out == 0, this function must be called again
- with the same value of the flush parameter and more output space (updated
- avail_out), until the flush is complete (deflate returns with non-zero
- avail_out).
-
- If the parameter flush is set to Z_FINISH, pending input is processed,
- pending output is flushed and deflate returns with Z_STREAM_END if there
- was enough output space; if deflate returns with Z_OK, this function must be
- called again with Z_FINISH and more output space (updated avail_out) but no
- more input data, until it returns with Z_STREAM_END or an error. After
- deflate has returned Z_STREAM_END, the only possible operations on the
- stream are deflateReset or deflateEnd.
-
- Z_FINISH can be used immediately after deflateInit if all the compression
- is to be done in a single step. In this case, avail_out must be at least
- 0.1% larger than avail_in plus 12 bytes. If deflate does not return
- Z_STREAM_END, then it must be called again as described above.
-
- deflate() sets strm->adler to the adler32 checksum of all input read
- so far (that is, total_in bytes).
-
- deflate() may update data_type if it can make a good guess about
- the input data type (Z_ASCII or Z_BINARY). In doubt, the data is considered
- binary. This field is only for information purposes and does not affect
- the compression algorithm in any manner.
-
- deflate() returns Z_OK if some progress has been made (more input
- processed or more output produced), Z_STREAM_END if all input has been
- consumed and all output has been produced (only when flush is set to
- Z_FINISH), Z_STREAM_ERROR if the stream state was inconsistent (for example
- if next_in or next_out was NULL), Z_BUF_ERROR if no progress is possible
- (for example avail_in or avail_out was zero).
-*/
-
-
-ZEXTERN int ZEXPORT zlib_fs_deflateEnd OF((z_streamp strm));
-/*
- All dynamically allocated data structures for this stream are freed.
- This function discards any unprocessed input and does not flush any
- pending output.
-
- deflateEnd returns Z_OK if success, Z_STREAM_ERROR if the
- stream state was inconsistent, Z_DATA_ERROR if the stream was freed
- prematurely (some input or output was discarded). In the error case,
- msg may be set but then points to a static string (which must not be
- deallocated).
-*/
-
-
-ZEXTERN int ZEXPORT zlib_fs_inflate_workspacesize OF((void));
-/*
- Returns the number of bytes that needs to be allocated for a per-
- stream workspace. A pointer to this number of bytes should be
- returned in stream->workspace before calling zlib_fs_inflateInit().
-*/
-
-/*
-ZEXTERN int ZEXPORT zlib_fs_inflateInit OF((z_streamp strm));
-
- Initializes the internal stream state for decompression. The fields
- next_in, avail_in, and workspace must be initialized before by
- the caller. If next_in is not Z_NULL and avail_in is large enough (the exact
- value depends on the compression method), inflateInit determines the
- compression method from the zlib header and allocates all data structures
- accordingly; otherwise the allocation will be deferred to the first call of
- inflate. If zalloc and zfree are set to Z_NULL, inflateInit updates them to
- use default allocation functions.
-
- inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough
- memory, Z_VERSION_ERROR if the zlib library version is incompatible with the
- version assumed by the caller. msg is set to null if there is no error
- message. inflateInit does not perform any decompression apart from reading
- the zlib header if present: this will be done by inflate(). (So next_in and
- avail_in may be modified, but next_out and avail_out are unchanged.)
-*/
-
-
-ZEXTERN int ZEXPORT zlib_fs_inflate OF((z_streamp strm, int flush));
-/*
- inflate decompresses as much data as possible, and stops when the input
- buffer becomes empty or the output buffer becomes full. It may some
- introduce some output latency (reading input without producing any output)
- except when forced to flush.
-
- The detailed semantics are as follows. inflate performs one or both of the
- following actions:
-
- - Decompress more input starting at next_in and update next_in and avail_in
- accordingly. If not all input can be processed (because there is not
- enough room in the output buffer), next_in is updated and processing
- will resume at this point for the next call of inflate().
-
- - Provide more output starting at next_out and update next_out and avail_out
- accordingly. inflate() provides as much output as possible, until there
- is no more input data or no more space in the output buffer (see below
- about the flush parameter).
-
- Before the call of inflate(), the application should ensure that at least
- one of the actions is possible, by providing more input and/or consuming
- more output, and updating the next_* and avail_* values accordingly.
- The application can consume the uncompressed output when it wants, for
- example when the output buffer is full (avail_out == 0), or after each
- call of inflate(). If inflate returns Z_OK and with zero avail_out, it
- must be called again after making room in the output buffer because there
- might be more output pending.
-
- If the parameter flush is set to Z_SYNC_FLUSH, inflate flushes as much
- output as possible to the output buffer. The flushing behavior of inflate is
- not specified for values of the flush parameter other than Z_SYNC_FLUSH
- and Z_FINISH, but the current implementation actually flushes as much output
- as possible anyway.
-
- inflate() should normally be called until it returns Z_STREAM_END or an
- error. However if all decompression is to be performed in a single step
- (a single call of inflate), the parameter flush should be set to
- Z_FINISH. In this case all pending input is processed and all pending
- output is flushed; avail_out must be large enough to hold all the
- uncompressed data. (The size of the uncompressed data may have been saved
- by the compressor for this purpose.) The next operation on this stream must
- be inflateEnd to deallocate the decompression state. The use of Z_FINISH
- is never required, but can be used to inform inflate that a faster routine
- may be used for the single inflate() call.
-
- If a preset dictionary is needed at this point (see inflateSetDictionary
- below), inflate sets strm-adler to the adler32 checksum of the
- dictionary chosen by the compressor and returns Z_NEED_DICT; otherwise
- it sets strm->adler to the adler32 checksum of all output produced
- so far (that is, total_out bytes) and returns Z_OK, Z_STREAM_END or
- an error code as described below. At the end of the stream, inflate()
- checks that its computed adler32 checksum is equal to that saved by the
- compressor and returns Z_STREAM_END only if the checksum is correct.
-
- inflate() returns Z_OK if some progress has been made (more input processed
- or more output produced), Z_STREAM_END if the end of the compressed data has
- been reached and all uncompressed output has been produced, Z_NEED_DICT if a
- preset dictionary is needed at this point, Z_DATA_ERROR if the input data was
- corrupted (input stream not conforming to the zlib format or incorrect
- adler32 checksum), Z_STREAM_ERROR if the stream structure was inconsistent
- (for example if next_in or next_out was NULL), Z_MEM_ERROR if there was not
- enough memory, Z_BUF_ERROR if no progress is possible or if there was not
- enough room in the output buffer when Z_FINISH is used. In the Z_DATA_ERROR
- case, the application may then call inflateSync to look for a good
- compression block.
-*/
-
-
-ZEXTERN int ZEXPORT zlib_fs_inflateEnd OF((z_streamp strm));
-/*
- All dynamically allocated data structures for this stream are freed.
- This function discards any unprocessed input and does not flush any
- pending output.
-
- inflateEnd returns Z_OK if success, Z_STREAM_ERROR if the stream state
- was inconsistent. In the error case, msg may be set but then points to a
- static string (which must not be deallocated).
-*/
-
- /* Advanced functions */
-
-/*
- The following functions are needed only in some special applications.
-*/
-
-/*
-ZEXTERN int ZEXPORT deflateInit2 OF((z_streamp strm,
- int level,
- int method,
- int windowBits,
- int memLevel,
- int strategy));
-
- This is another version of deflateInit with more compression options. The
- fields next_in, zalloc, zfree and opaque must be initialized before by
- the caller.
-
- The method parameter is the compression method. It must be Z_DEFLATED in
- this version of the library.
-
- The windowBits parameter is the base two logarithm of the window size
- (the size of the history buffer). It should be in the range 8..15 for this
- version of the library. Larger values of this parameter result in better
- compression at the expense of memory usage. The default value is 15 if
- deflateInit is used instead.
-
- The memLevel parameter specifies how much memory should be allocated
- for the internal compression state. memLevel=1 uses minimum memory but
- is slow and reduces compression ratio; memLevel=9 uses maximum memory
- for optimal speed. The default value is 8. See zconf.h for total memory
- usage as a function of windowBits and memLevel.
-
- The strategy parameter is used to tune the compression algorithm. Use the
- value Z_DEFAULT_STRATEGY for normal data, Z_FILTERED for data produced by a
- filter (or predictor), or Z_HUFFMAN_ONLY to force Huffman encoding only (no
- string match). Filtered data consists mostly of small values with a
- somewhat random distribution. In this case, the compression algorithm is
- tuned to compress them better. The effect of Z_FILTERED is to force more
- Huffman coding and less string matching; it is somewhat intermediate
- between Z_DEFAULT and Z_HUFFMAN_ONLY. The strategy parameter only affects
- the compression ratio but not the correctness of the compressed output even
- if it is not set appropriately.
-
- deflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough
- memory, Z_STREAM_ERROR if a parameter is invalid (such as an invalid
- method). msg is set to null if there is no error message. deflateInit2 does
- not perform any compression: this will be done by deflate().
-*/
-
-ZEXTERN int ZEXPORT zlib_fs_deflateSetDictionary OF((z_streamp strm,
- const Bytef *dictionary,
- uInt dictLength));
-/*
- Initializes the compression dictionary from the given byte sequence
- without producing any compressed output. This function must be called
- immediately after deflateInit, deflateInit2 or deflateReset, before any
- call of deflate. The compressor and decompressor must use exactly the same
- dictionary (see inflateSetDictionary).
-
- The dictionary should consist of strings (byte sequences) that are likely
- to be encountered later in the data to be compressed, with the most commonly
- used strings preferably put towards the end of the dictionary. Using a
- dictionary is most useful when the data to be compressed is short and can be
- predicted with good accuracy; the data can then be compressed better than
- with the default empty dictionary.
-
- Depending on the size of the compression data structures selected by
- deflateInit or deflateInit2, a part of the dictionary may in effect be
- discarded, for example if the dictionary is larger than the window size in
- deflate or deflate2. Thus the strings most likely to be useful should be
- put at the end of the dictionary, not at the front.
-
- Upon return of this function, strm->adler is set to the Adler32 value
- of the dictionary; the decompressor may later use this value to determine
- which dictionary has been used by the compressor. (The Adler32 value
- applies to the whole dictionary even if only a subset of the dictionary is
- actually used by the compressor.)
-
- deflateSetDictionary returns Z_OK if success, or Z_STREAM_ERROR if a
- parameter is invalid (such as NULL dictionary) or the stream state is
- inconsistent (for example if deflate has already been called for this stream
- or if the compression method is bsort). deflateSetDictionary does not
- perform any compression: this will be done by deflate().
-*/
-
-ZEXTERN int ZEXPORT zlib_fs_deflateCopy OF((z_streamp dest,
- z_streamp source));
-/*
- Sets the destination stream as a complete copy of the source stream.
-
- This function can be useful when several compression strategies will be
- tried, for example when there are several ways of pre-processing the input
- data with a filter. The streams that will be discarded should then be freed
- by calling deflateEnd. Note that deflateCopy duplicates the internal
- compression state which can be quite large, so this strategy is slow and
- can consume lots of memory.
-
- deflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not
- enough memory, Z_STREAM_ERROR if the source stream state was inconsistent
- (such as zalloc being NULL). msg is left unchanged in both source and
- destination.
-*/
-
-ZEXTERN int ZEXPORT zlib_fs_deflateReset OF((z_streamp strm));
-/*
- This function is equivalent to deflateEnd followed by deflateInit,
- but does not free and reallocate all the internal compression state.
- The stream will keep the same compression level and any other attributes
- that may have been set by deflateInit2.
-
- deflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
- stream state was inconsistent (such as zalloc or state being NULL).
-*/
-
-ZEXTERN int ZEXPORT zlib_fs_deflateParams OF((z_streamp strm,
- int level,
- int strategy));
-/*
- Dynamically update the compression level and compression strategy. The
- interpretation of level and strategy is as in deflateInit2. This can be
- used to switch between compression and straight copy of the input data, or
- to switch to a different kind of input data requiring a different
- strategy. If the compression level is changed, the input available so far
- is compressed with the old level (and may be flushed); the new level will
- take effect only at the next call of deflate().
-
- Before the call of deflateParams, the stream state must be set as for
- a call of deflate(), since the currently available input may have to
- be compressed and flushed. In particular, strm->avail_out must be non-zero.
-
- deflateParams returns Z_OK if success, Z_STREAM_ERROR if the source
- stream state was inconsistent or if a parameter was invalid, Z_BUF_ERROR
- if strm->avail_out was zero.
-*/
-
-/*
-ZEXTERN int ZEXPORT inflateInit2 OF((z_streamp strm,
- int windowBits));
-
- This is another version of inflateInit with an extra parameter. The
- fields next_in, avail_in, zalloc, zfree and opaque must be initialized
- before by the caller.
-
- The windowBits parameter is the base two logarithm of the maximum window
- size (the size of the history buffer). It should be in the range 8..15 for
- this version of the library. The default value is 15 if inflateInit is used
- instead. If a compressed stream with a larger window size is given as
- input, inflate() will return with the error code Z_DATA_ERROR instead of
- trying to allocate a larger window.
-
- inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough
- memory, Z_STREAM_ERROR if a parameter is invalid (such as a negative
- memLevel). msg is set to null if there is no error message. inflateInit2
- does not perform any decompression apart from reading the zlib header if
- present: this will be done by inflate(). (So next_in and avail_in may be
- modified, but next_out and avail_out are unchanged.)
-*/
-
-ZEXTERN int ZEXPORT zlib_fs_inflateSetDictionary OF((z_streamp strm,
- const Bytef *dictionary,
- uInt dictLength));
-/*
- Initializes the decompression dictionary from the given uncompressed byte
- sequence. This function must be called immediately after a call of inflate
- if this call returned Z_NEED_DICT. The dictionary chosen by the compressor
- can be determined from the Adler32 value returned by this call of
- inflate. The compressor and decompressor must use exactly the same
- dictionary (see deflateSetDictionary).
-
- inflateSetDictionary returns Z_OK if success, Z_STREAM_ERROR if a
- parameter is invalid (such as NULL dictionary) or the stream state is
- inconsistent, Z_DATA_ERROR if the given dictionary doesn't match the
- expected one (incorrect Adler32 value). inflateSetDictionary does not
- perform any decompression: this will be done by subsequent calls of
- inflate().
-*/
-
-ZEXTERN int ZEXPORT zlib_fs_inflateSync OF((z_streamp strm));
-/*
- Skips invalid compressed data until a full flush point (see above the
- description of deflate with Z_FULL_FLUSH) can be found, or until all
- available input is skipped. No output is provided.
-
- inflateSync returns Z_OK if a full flush point has been found, Z_BUF_ERROR
- if no more input was provided, Z_DATA_ERROR if no flush point has been found,
- or Z_STREAM_ERROR if the stream structure was inconsistent. In the success
- case, the application may save the current current value of total_in which
- indicates where valid compressed data was found. In the error case, the
- application may repeatedly call inflateSync, providing more input each time,
- until success or end of the input data.
-*/
-
-ZEXTERN int ZEXPORT zlib_fs_inflateReset OF((z_streamp strm));
-/*
- This function is equivalent to inflateEnd followed by inflateInit,
- but does not free and reallocate all the internal decompression state.
- The stream will keep attributes that may have been set by inflateInit2.
-
- inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
- stream state was inconsistent (such as zalloc or state being NULL).
-*/
-
- /* checksum functions */
-
-/*
- These functions are not related to compression but are exported
- anyway because they might be useful in applications using the
- compression library.
-*/
-
-ZEXTERN uLong ZEXPORT zlib_fs_adler32 OF((uLong adler, const Bytef *buf, uInt len));
-
-/*
- Update a running Adler-32 checksum with the bytes buf[0..len-1] and
- return the updated checksum. If buf is NULL, this function returns
- the required initial value for the checksum.
- An Adler-32 checksum is almost as reliable as a CRC32 but can be computed
- much faster. Usage example:
-
- uLong adler = adler32(0L, Z_NULL, 0);
-
- while (read_buffer(buffer, length) != EOF) {
- adler = adler32(adler, buffer, length);
- }
- if (adler != original_adler) error();
-*/
-
-ZEXTERN uLong ZEXPORT zlib_fs_crc32 OF((uLong crc, const Bytef *buf, uInt len));
-/*
- Update a running crc with the bytes buf[0..len-1] and return the updated
- crc. If buf is NULL, this function returns the required initial value
- for the crc. Pre- and post-conditioning (one's complement) is performed
- within this function so it shouldn't be done by the application.
- Usage example:
-
- uLong crc = crc32(0L, Z_NULL, 0);
-
- while (read_buffer(buffer, length) != EOF) {
- crc = crc32(crc, buffer, length);
- }
- if (crc != original_crc) error();
-*/
-
-
- /* various hacks, don't look :) */
-
-/* deflateInit and inflateInit are macros to allow checking the zlib version
- * and the compiler's view of z_stream:
- */
-ZEXTERN int ZEXPORT zlib_fs_deflateInit_ OF((z_streamp strm, int level,
- const char *version, int stream_size));
-ZEXTERN int ZEXPORT zlib_fs_inflateInit_ OF((z_streamp strm,
- const char *version, int stream_size));
-ZEXTERN int ZEXPORT zlib_fs_deflateInit2_ OF((z_streamp strm, int level, int method,
- int windowBits, int memLevel,
- int strategy, const char *version,
- int stream_size));
-ZEXTERN int ZEXPORT zlib_fs_inflateInit2_ OF((z_streamp strm, int windowBits,
- const char *version, int stream_size));
+/* zlib_fs.h -- A compatability file mapping the zlib functions to zlib_fs
+ functions. This will go away. */
+#ifndef _ZLIB_FS_H
+#define _ZLIB_FS_H
+
+#include <linux/zlib.h>
+
+#define zlib_fs_inflate_workspacesize zlib_inflate_workspacesize
+#define zlib_fs_deflate_workspacesize zlib_deflate_workspacesize
+#define zlib_fs_zlibVersion zlib_zlibVersion
+#define zlib_fs_deflate zlib_deflate
+#define zlib_fs_deflateEnd zlib_deflateEnd
+#define zlib_fs_inflate zlib_inflate
+#define zlib_fs_inflateEnd zlib_inflateEnd
+#define zlib_fs_deflateSetDictionary zlib_deflateSetDictionary
+#define zlib_fs_deflateCopy zlib_deflateCopy
+#define zlib_fs_deflateReset zlib_deflateReset
+#define zlib_fs_deflateParams zlib_deflateParams
+#define zlib_fs_inflateIncomp zlib_inflateIncomp
+#define zlib_fs_inflateSetDictionary zlib_inflateSetDictionary
+#define zlib_fs_inflateSync zlib_inflateSync
+#define zlib_fs_inflateReset zlib_inflateReset
+#define zlib_fs_adler32 zlib_adler32
+#define zlib_fs_crc32 zlib_crc32
#define zlib_fs_deflateInit(strm, level) \
- zlib_fs_deflateInit_((strm), (level), ZLIB_VERSION, sizeof(z_stream))
+ zlib_deflateInit_((strm), (level), ZLIB_VERSION, sizeof(z_stream))
#define zlib_fs_inflateInit(strm) \
- zlib_fs_inflateInit_((strm), ZLIB_VERSION, sizeof(z_stream))
-#define zlib_fs_deflateInit2(strm, level, method, windowBits, memLevel, strategy) \
- zlib_fs_deflateInit2_((strm),(level),(method),(windowBits),(memLevel),\
- (strategy), ZLIB_VERSION, sizeof(z_stream))
+ zlib_inflateInit_((strm), ZLIB_VERSION, sizeof(z_stream))
+#define zlib_fs_deflateInit2(strm, level, method, windowBits, memLevel, strategy)\
+ zlib_deflateInit2_((strm),(level),(method),(windowBits),(memLevel),\
+ (strategy), ZLIB_VERSION, sizeof(z_stream))
#define zlib_fs_inflateInit2(strm, windowBits) \
- zlib_fs_inflateInit2_((strm), (windowBits), ZLIB_VERSION, sizeof(z_stream))
-
-
-#if !defined(_Z_UTIL_H) && !defined(NO_DUMMY_DECL)
- struct internal_state {int dummy;}; /* hack for buggy compilers */
-#endif
-
-ZEXTERN const char * ZEXPORT zlib_fs_zError OF((int err));
-ZEXTERN int ZEXPORT zlib_fs_inflateSyncPoint OF((z_streamp z));
-ZEXTERN const uLongf * ZEXPORT zlib_fs_get_crc_table OF((void));
-
-#ifdef __cplusplus
-}
-#endif
+ zlib_inflateInit2_((strm), (windowBits), ZLIB_VERSION, \
+ sizeof(z_stream))
-#endif /* _ZLIB_H */
+#endif /* _ZLIB_FS_H */
}
spin_lock_init(&p->alloc_lock);
- p->sigpending = 0;
+ p->work.sigpending = 0;
init_sigpending(&p->pending);
p->it_real_value = p->it_virt_value = p->it_prof_value = 0;
* Let the child process run first, to avoid most of the
* COW overhead when the child exec()s afterwards.
*/
- current->need_resched = 1;
+ current->work.need_resched = 1;
fork_out:
return retval;
EXPORT_SYMBOL(block_read_full_page);
EXPORT_SYMBOL(block_prepare_write);
EXPORT_SYMBOL(block_sync_page);
+EXPORT_SYMBOL(generic_cont_expand);
EXPORT_SYMBOL(cont_prepare_write);
EXPORT_SYMBOL(generic_commit_write);
EXPORT_SYMBOL(block_truncate_page);
{
int need_resched;
- need_resched = p->need_resched;
+ need_resched = p->work.need_resched;
wmb();
- p->need_resched = 1;
+ p->work.need_resched = 1;
if (!need_resched && (p->cpu != smp_processor_id()))
smp_send_reschedule(p->cpu);
}
this_rq->nr_running++;
enqueue_task(next, this_rq->active);
if (next->prio < current->prio)
- current->need_resched = 1;
+ current->work.need_resched = 1;
if (!idle && --imbalance) {
if (array == busiest->expired) {
array = busiest->active;
#endif
/* Task might have expired already, but not scheduled off yet */
if (p->array != rq->active) {
- p->need_resched = 1;
+ p->work.need_resched = 1;
return;
}
spin_lock(&rq->lock);
*/
if ((p->policy == SCHED_RR) && !--p->time_slice) {
p->time_slice = NICE_TO_TIMESLICE(p->__nice);
- p->need_resched = 1;
+ p->work.need_resched = 1;
/* put it at the end of the queue: */
dequeue_task(p, rq->active);
p->sleep_avg--;
if (!--p->time_slice) {
dequeue_task(p, rq->active);
- p->need_resched = 1;
+ p->work.need_resched = 1;
p->prio = effective_prio(p);
p->time_slice = NICE_TO_TIMESLICE(p->__nice);
switch_tasks:
prefetch(next);
- prev->need_resched = 0;
+ prev->work.need_resched = 0;
if (likely(prev != next)) {
rq->nr_switches++;
idle->state = TASK_RUNNING;
idle->cpu = cpu;
double_rq_unlock(idle_rq, rq);
- idle->need_resched = 1;
+ idle->work.need_resched = 1;
__restore_flags(flags);
}
void
flush_signals(struct task_struct *t)
{
- t->sigpending = 0;
+ t->work.sigpending = 0;
flush_sigqueue(&t->pending);
}
if (atomic_dec_and_test(&sig->count))
kmem_cache_free(sigact_cachep, sig);
}
- tsk->sigpending = 0;
+ tsk->work.sigpending = 0;
flush_sigqueue(&tsk->pending);
spin_unlock_irq(&tsk->sigmask_lock);
}
if (current->notifier) {
if (sigismember(current->notifier_mask, sig)) {
if (!(current->notifier)(current->notifier_data)) {
- current->sigpending = 0;
+ current->work.sigpending = 0;
return 0;
}
}
*/
static inline void signal_wake_up(struct task_struct *t)
{
- t->sigpending = 1;
+ t->work.sigpending = 1;
#ifdef CONFIG_SMP
/*
comment 'Library routines'
tristate 'CRC32 functions' CONFIG_CRC32
+
+#
+# Do we need the compression support?
+#
+if [ "$CONFIG_CRAMFS" = "y" -o \
+ "$CONFIG_PPP_DEFLATE" = "y" -o \
+ "$CONFIG_JFFS2_FS" = "y" -o \
+ "$CONFIG_ZISOFS_FS" = "y" ]; then
+ define_tristate CONFIG_ZLIB_INFLATE y
+else
+ if [ "$CONFIG_CRAMFS" = "m" -o \
+ "$CONFIG_PPP_DEFLATE" = "m" -o \
+ "$CONFIG_JFFS2_FS" = "m" -o \
+ "$CONFIG_ZISOFS_FS" = "m" ]; then
+ define_tristate CONFIG_ZLIB_INFLATE m
+ else
+ define_tristate CONFIG_ZLIB_INFLATE n
+ fi
+fi
+
+if [ "$CONFIG_PPP_DEFLATE" = "y" -o \
+ "$CONFIG_JFFS2_FS" = "y" ]; then
+ define_tristate CONFIG_ZLIB_DEFLATE y
+else
+ if [ "$CONFIG_PPP_DEFLATE" = "m" -o \
+ "$CONFIG_JFFS2_FS" = "m" ]; then
+ define_tristate CONFIG_ZLIB_DEFLATE m
+ else
+ define_tristate CONFIG_ZLIB_DEFLATE n
+ fi
+fi
+
endmenu
endif
obj-$(CONFIG_CRC32) += crc32.o
+
+subdir-$(CONFIG_ZLIB_INFLATE) += zlib_inflate
+subdir-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate
+
+# Include the subdirs, if necessary.
+obj-y += $(join $(subdir-y),$(subdir-y:%=/%.o))
+
include $(TOPDIR)/drivers/net/Makefile.lib
include $(TOPDIR)/drivers/usb/Makefile.lib
include $(TOPDIR)/fs/Makefile.lib
--- /dev/null
+#
+# This is a modified version of zlib, which does all memory
+# allocation ahead of time.
+#
+# This is the compression code, see zlib_inflate for the
+# decompression code.
+#
+
+O_TARGET := zlib_deflate.o
+
+export-objs := deflate_syms.o
+
+obj-y := deflate.o deftree.o deflate_syms.o
+obj-m := $(O_TARGET)
+
+EXTRA_CFLAGS += -I $(TOPDIR)/lib/zlib_deflate
+
+include $(TOPDIR)/Rules.make
--- /dev/null
+/* +++ deflate.c */
+/* deflate.c -- compress data using the deflation algorithm
+ * Copyright (C) 1995-1996 Jean-loup Gailly.
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/*
+ * ALGORITHM
+ *
+ * The "deflation" process depends on being able to identify portions
+ * of the input text which are identical to earlier input (within a
+ * sliding window trailing behind the input currently being processed).
+ *
+ * The most straightforward technique turns out to be the fastest for
+ * most input files: try all possible matches and select the longest.
+ * The key feature of this algorithm is that insertions into the string
+ * dictionary are very simple and thus fast, and deletions are avoided
+ * completely. Insertions are performed at each input character, whereas
+ * string matches are performed only when the previous match ends. So it
+ * is preferable to spend more time in matches to allow very fast string
+ * insertions and avoid deletions. The matching algorithm for small
+ * strings is inspired from that of Rabin & Karp. A brute force approach
+ * is used to find longer strings when a small match has been found.
+ * A similar algorithm is used in comic (by Jan-Mark Wams) and freeze
+ * (by Leonid Broukhis).
+ * A previous version of this file used a more sophisticated algorithm
+ * (by Fiala and Greene) which is guaranteed to run in linear amortized
+ * time, but has a larger average cost, uses more memory and is patented.
+ * However the F&G algorithm may be faster for some highly redundant
+ * files if the parameter max_chain_length (described below) is too large.
+ *
+ * ACKNOWLEDGEMENTS
+ *
+ * The idea of lazy evaluation of matches is due to Jan-Mark Wams, and
+ * I found it in 'freeze' written by Leonid Broukhis.
+ * Thanks to many people for bug reports and testing.
+ *
+ * REFERENCES
+ *
+ * Deutsch, L.P.,"DEFLATE Compressed Data Format Specification".
+ * Available in ftp://ds.internic.net/rfc/rfc1951.txt
+ *
+ * A description of the Rabin and Karp algorithm is given in the book
+ * "Algorithms" by R. Sedgewick, Addison-Wesley, p252.
+ *
+ * Fiala,E.R., and Greene,D.H.
+ * Data Compression with Finite Windows, Comm.ACM, 32,4 (1989) 490-595
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/zutil.h>
+#include "defutil.h"
+
+
+/* ===========================================================================
+ * Function prototypes.
+ */
+typedef enum {
+ need_more, /* block not completed, need more input or more output */
+ block_done, /* block flush performed */
+ finish_started, /* finish started, need only more output at next deflate */
+ finish_done /* finish done, accept no more input or output */
+} block_state;
+
+typedef block_state (*compress_func) OF((deflate_state *s, int flush));
+/* Compression function. Returns the block state after the call. */
+
+local void fill_window OF((deflate_state *s));
+local block_state deflate_stored OF((deflate_state *s, int flush));
+local block_state deflate_fast OF((deflate_state *s, int flush));
+local block_state deflate_slow OF((deflate_state *s, int flush));
+local void lm_init OF((deflate_state *s));
+local void putShortMSB OF((deflate_state *s, uInt b));
+local void flush_pending OF((z_streamp strm));
+local int read_buf OF((z_streamp strm, Bytef *buf, unsigned size));
+local uInt longest_match OF((deflate_state *s, IPos cur_match));
+
+#ifdef DEBUG_ZLIB
+local void check_match OF((deflate_state *s, IPos start, IPos match,
+ int length));
+#endif
+
+/* ===========================================================================
+ * Local data
+ */
+
+#define NIL 0
+/* Tail of hash chains */
+
+#ifndef TOO_FAR
+# define TOO_FAR 4096
+#endif
+/* Matches of length 3 are discarded if their distance exceeds TOO_FAR */
+
+#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1)
+/* Minimum amount of lookahead, except at the end of the input file.
+ * See deflate.c for comments about the MIN_MATCH+1.
+ */
+
+/* Values for max_lazy_match, good_match and max_chain_length, depending on
+ * the desired pack level (0..9). The values given below have been tuned to
+ * exclude worst case performance for pathological files. Better values may be
+ * found for specific files.
+ */
+typedef struct config_s {
+ ush good_length; /* reduce lazy search above this match length */
+ ush max_lazy; /* do not perform lazy search above this match length */
+ ush nice_length; /* quit search above this match length */
+ ush max_chain;
+ compress_func func;
+} config;
+
+local const config configuration_table[10] = {
+/* good lazy nice chain */
+/* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */
+/* 1 */ {4, 4, 8, 4, deflate_fast}, /* maximum speed, no lazy matches */
+/* 2 */ {4, 5, 16, 8, deflate_fast},
+/* 3 */ {4, 6, 32, 32, deflate_fast},
+
+/* 4 */ {4, 4, 16, 16, deflate_slow}, /* lazy matches */
+/* 5 */ {8, 16, 32, 32, deflate_slow},
+/* 6 */ {8, 16, 128, 128, deflate_slow},
+/* 7 */ {8, 32, 128, 256, deflate_slow},
+/* 8 */ {32, 128, 258, 1024, deflate_slow},
+/* 9 */ {32, 258, 258, 4096, deflate_slow}}; /* maximum compression */
+
+/* Note: the deflate() code requires max_lazy >= MIN_MATCH and max_chain >= 4
+ * For deflate_fast() (levels <= 3) good is ignored and lazy has a different
+ * meaning.
+ */
+
+#define EQUAL 0
+/* result of memcmp for equal strings */
+
+/* ===========================================================================
+ * Update a hash value with the given input byte
+ * IN assertion: all calls to to UPDATE_HASH are made with consecutive
+ * input characters, so that a running hash key can be computed from the
+ * previous key instead of complete recalculation each time.
+ */
+#define UPDATE_HASH(s,h,c) (h = (((h)<<s->hash_shift) ^ (c)) & s->hash_mask)
+
+
+/* ===========================================================================
+ * Insert string str in the dictionary and set match_head to the previous head
+ * of the hash chain (the most recent string with same hash key). Return
+ * the previous length of the hash chain.
+ * IN assertion: all calls to to INSERT_STRING are made with consecutive
+ * input characters and the first MIN_MATCH bytes of str are valid
+ * (except for the last MIN_MATCH-1 bytes of the input file).
+ */
+#define INSERT_STRING(s, str, match_head) \
+ (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \
+ s->prev[(str) & s->w_mask] = match_head = s->head[s->ins_h], \
+ s->head[s->ins_h] = (Pos)(str))
+
+/* ===========================================================================
+ * Initialize the hash table (avoiding 64K overflow for 16 bit systems).
+ * prev[] will be initialized on the fly.
+ */
+#define CLEAR_HASH(s) \
+ s->head[s->hash_size-1] = NIL; \
+ memset((charf *)s->head, 0, (unsigned)(s->hash_size-1)*sizeof(*s->head));
+
+/* ========================================================================= */
+int zlib_deflateInit_(strm, level, version, stream_size)
+ z_streamp strm;
+ int level;
+ const char *version;
+ int stream_size;
+{
+ return zlib_deflateInit2_(strm, level, Z_DEFLATED, MAX_WBITS,
+ DEF_MEM_LEVEL,
+ Z_DEFAULT_STRATEGY, version, stream_size);
+ /* To do: ignore strm->next_in if we use it as window */
+}
+
+/* ========================================================================= */
+int zlib_deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
+ version, stream_size)
+ z_streamp strm;
+ int level;
+ int method;
+ int windowBits;
+ int memLevel;
+ int strategy;
+ const char *version;
+ int stream_size;
+{
+ deflate_state *s;
+ int noheader = 0;
+ static char* my_version = ZLIB_VERSION;
+ deflate_workspace *mem;
+
+ ushf *overlay;
+ /* We overlay pending_buf and d_buf+l_buf. This works since the average
+ * output size for (length,distance) codes is <= 24 bits.
+ */
+
+ if (version == Z_NULL || version[0] != my_version[0] ||
+ stream_size != sizeof(z_stream)) {
+ return Z_VERSION_ERROR;
+ }
+ if (strm == Z_NULL) return Z_STREAM_ERROR;
+
+ strm->msg = Z_NULL;
+
+ if (level == Z_DEFAULT_COMPRESSION) level = 6;
+
+ mem = (deflate_workspace *) strm->workspace;
+
+ if (windowBits < 0) { /* undocumented feature: suppress zlib header */
+ noheader = 1;
+ windowBits = -windowBits;
+ }
+ if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method != Z_DEFLATED ||
+ windowBits < 8 || windowBits > 15 || level < 0 || level > 9 ||
+ strategy < 0 || strategy > Z_HUFFMAN_ONLY) {
+ return Z_STREAM_ERROR;
+ }
+ s = (deflate_state *) &(mem->deflate_memory);
+ strm->state = (struct internal_state FAR *)s;
+ s->strm = strm;
+
+ s->noheader = noheader;
+ s->w_bits = windowBits;
+ s->w_size = 1 << s->w_bits;
+ s->w_mask = s->w_size - 1;
+
+ s->hash_bits = memLevel + 7;
+ s->hash_size = 1 << s->hash_bits;
+ s->hash_mask = s->hash_size - 1;
+ s->hash_shift = ((s->hash_bits+MIN_MATCH-1)/MIN_MATCH);
+
+ s->window = (Bytef *) mem->window_memory;
+ s->prev = (Posf *) mem->prev_memory;
+ s->head = (Posf *) mem->head_memory;
+
+ s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */
+
+ overlay = (ushf *) mem->overlay_memory;
+ s->pending_buf = (uchf *) overlay;
+ s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L);
+
+ s->d_buf = overlay + s->lit_bufsize/sizeof(ush);
+ s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize;
+
+ s->level = level;
+ s->strategy = strategy;
+ s->method = (Byte)method;
+
+ return zlib_deflateReset(strm);
+}
+
+/* ========================================================================= */
+int zlib_deflateSetDictionary (strm, dictionary, dictLength)
+ z_streamp strm;
+ const Bytef *dictionary;
+ uInt dictLength;
+{
+ deflate_state *s;
+ uInt length = dictLength;
+ uInt n;
+ IPos hash_head = 0;
+
+ if (strm == Z_NULL || strm->state == Z_NULL || dictionary == Z_NULL)
+ return Z_STREAM_ERROR;
+
+ s = (deflate_state *) strm->state;
+ if (s->status != INIT_STATE) return Z_STREAM_ERROR;
+
+ strm->adler = zlib_adler32(strm->adler, dictionary, dictLength);
+
+ if (length < MIN_MATCH) return Z_OK;
+ if (length > MAX_DIST(s)) {
+ length = MAX_DIST(s);
+#ifndef USE_DICT_HEAD
+ dictionary += dictLength - length; /* use the tail of the dictionary */
+#endif
+ }
+ memcpy((charf *)s->window, dictionary, length);
+ s->strstart = length;
+ s->block_start = (long)length;
+
+ /* Insert all strings in the hash table (except for the last two bytes).
+ * s->lookahead stays null, so s->ins_h will be recomputed at the next
+ * call of fill_window.
+ */
+ s->ins_h = s->window[0];
+ UPDATE_HASH(s, s->ins_h, s->window[1]);
+ for (n = 0; n <= length - MIN_MATCH; n++) {
+ INSERT_STRING(s, n, hash_head);
+ }
+ if (hash_head) hash_head = 0; /* to make compiler happy */
+ return Z_OK;
+}
+
+/* ========================================================================= */
+int zlib_deflateReset (strm)
+ z_streamp strm;
+{
+ deflate_state *s;
+
+ if (strm == Z_NULL || strm->state == Z_NULL)
+ return Z_STREAM_ERROR;
+
+ strm->total_in = strm->total_out = 0;
+ strm->msg = Z_NULL;
+ strm->data_type = Z_UNKNOWN;
+
+ s = (deflate_state *)strm->state;
+ s->pending = 0;
+ s->pending_out = s->pending_buf;
+
+ if (s->noheader < 0) {
+ s->noheader = 0; /* was set to -1 by deflate(..., Z_FINISH); */
+ }
+ s->status = s->noheader ? BUSY_STATE : INIT_STATE;
+ strm->adler = 1;
+ s->last_flush = Z_NO_FLUSH;
+
+ zlib_tr_init(s);
+ lm_init(s);
+
+ return Z_OK;
+}
+
+/* ========================================================================= */
+int zlib_deflateParams(strm, level, strategy)
+ z_streamp strm;
+ int level;
+ int strategy;
+{
+ deflate_state *s;
+ compress_func func;
+ int err = Z_OK;
+
+ if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
+ s = (deflate_state *) strm->state;
+
+ if (level == Z_DEFAULT_COMPRESSION) {
+ level = 6;
+ }
+ if (level < 0 || level > 9 || strategy < 0 || strategy > Z_HUFFMAN_ONLY) {
+ return Z_STREAM_ERROR;
+ }
+ func = configuration_table[s->level].func;
+
+ if (func != configuration_table[level].func && strm->total_in != 0) {
+ /* Flush the last buffer: */
+ err = zlib_deflate(strm, Z_PARTIAL_FLUSH);
+ }
+ if (s->level != level) {
+ s->level = level;
+ s->max_lazy_match = configuration_table[level].max_lazy;
+ s->good_match = configuration_table[level].good_length;
+ s->nice_match = configuration_table[level].nice_length;
+ s->max_chain_length = configuration_table[level].max_chain;
+ }
+ s->strategy = strategy;
+ return err;
+}
+
+/* =========================================================================
+ * Put a short in the pending buffer. The 16-bit value is put in MSB order.
+ * IN assertion: the stream state is correct and there is enough room in
+ * pending_buf.
+ */
+local void putShortMSB (s, b)
+ deflate_state *s;
+ uInt b;
+{
+ put_byte(s, (Byte)(b >> 8));
+ put_byte(s, (Byte)(b & 0xff));
+}
+
+/* =========================================================================
+ * Flush as much pending output as possible. All deflate() output goes
+ * through this function so some applications may wish to modify it
+ * to avoid allocating a large strm->next_out buffer and copying into it.
+ * (See also read_buf()).
+ */
+local void flush_pending(strm)
+ z_streamp strm;
+{
+ deflate_state *s = (deflate_state *) strm->state;
+ unsigned len = s->pending;
+
+ if (len > strm->avail_out) len = strm->avail_out;
+ if (len == 0) return;
+
+ if (strm->next_out != Z_NULL) {
+ memcpy(strm->next_out, s->pending_out, len);
+ strm->next_out += len;
+ }
+ s->pending_out += len;
+ strm->total_out += len;
+ strm->avail_out -= len;
+ s->pending -= len;
+ if (s->pending == 0) {
+ s->pending_out = s->pending_buf;
+ }
+}
+
+/* ========================================================================= */
+int zlib_deflate (strm, flush)
+ z_streamp strm;
+ int flush;
+{
+ int old_flush; /* value of flush param for previous deflate call */
+ deflate_state *s;
+
+ if (strm == Z_NULL || strm->state == Z_NULL ||
+ flush > Z_FINISH || flush < 0) {
+ return Z_STREAM_ERROR;
+ }
+ s = (deflate_state *) strm->state;
+
+ if ((strm->next_in == Z_NULL && strm->avail_in != 0) ||
+ (s->status == FINISH_STATE && flush != Z_FINISH)) {
+ return Z_STREAM_ERROR;
+ }
+ if (strm->avail_out == 0) return Z_BUF_ERROR;
+
+ s->strm = strm; /* just in case */
+ old_flush = s->last_flush;
+ s->last_flush = flush;
+
+ /* Write the zlib header */
+ if (s->status == INIT_STATE) {
+
+ uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8;
+ uInt level_flags = (s->level-1) >> 1;
+
+ if (level_flags > 3) level_flags = 3;
+ header |= (level_flags << 6);
+ if (s->strstart != 0) header |= PRESET_DICT;
+ header += 31 - (header % 31);
+
+ s->status = BUSY_STATE;
+ putShortMSB(s, header);
+
+ /* Save the adler32 of the preset dictionary: */
+ if (s->strstart != 0) {
+ putShortMSB(s, (uInt)(strm->adler >> 16));
+ putShortMSB(s, (uInt)(strm->adler & 0xffff));
+ }
+ strm->adler = 1L;
+ }
+
+ /* Flush as much pending output as possible */
+ if (s->pending != 0) {
+ flush_pending(strm);
+ if (strm->avail_out == 0) {
+ /* Since avail_out is 0, deflate will be called again with
+ * more output space, but possibly with both pending and
+ * avail_in equal to zero. There won't be anything to do,
+ * but this is not an error situation so make sure we
+ * return OK instead of BUF_ERROR at next call of deflate:
+ */
+ s->last_flush = -1;
+ return Z_OK;
+ }
+
+ /* Make sure there is something to do and avoid duplicate consecutive
+ * flushes. For repeated and useless calls with Z_FINISH, we keep
+ * returning Z_STREAM_END instead of Z_BUFF_ERROR.
+ */
+ } else if (strm->avail_in == 0 && flush <= old_flush &&
+ flush != Z_FINISH) {
+ return Z_BUF_ERROR;
+ }
+
+ /* User must not provide more input after the first FINISH: */
+ if (s->status == FINISH_STATE && strm->avail_in != 0) {
+ return Z_BUF_ERROR;
+ }
+
+ /* Start a new block or continue the current one.
+ */
+ if (strm->avail_in != 0 || s->lookahead != 0 ||
+ (flush != Z_NO_FLUSH && s->status != FINISH_STATE)) {
+ block_state bstate;
+
+ bstate = (*(configuration_table[s->level].func))(s, flush);
+
+ if (bstate == finish_started || bstate == finish_done) {
+ s->status = FINISH_STATE;
+ }
+ if (bstate == need_more || bstate == finish_started) {
+ if (strm->avail_out == 0) {
+ s->last_flush = -1; /* avoid BUF_ERROR next call, see above */
+ }
+ return Z_OK;
+ /* If flush != Z_NO_FLUSH && avail_out == 0, the next call
+ * of deflate should use the same flush parameter to make sure
+ * that the flush is complete. So we don't have to output an
+ * empty block here, this will be done at next call. This also
+ * ensures that for a very small output buffer, we emit at most
+ * one empty block.
+ */
+ }
+ if (bstate == block_done) {
+ if (flush == Z_PARTIAL_FLUSH) {
+ zlib_tr_align(s);
+ } else if (flush == Z_PACKET_FLUSH) {
+ /* Output just the 3-bit `stored' block type value,
+ but not a zero length. */
+ zlib_tr_stored_type_only(s);
+ } else { /* FULL_FLUSH or SYNC_FLUSH */
+ zlib_tr_stored_block(s, (char*)0, 0L, 0);
+ /* For a full flush, this empty block will be recognized
+ * as a special marker by inflate_sync().
+ */
+ if (flush == Z_FULL_FLUSH) {
+ CLEAR_HASH(s); /* forget history */
+ }
+ }
+ flush_pending(strm);
+ if (strm->avail_out == 0) {
+ s->last_flush = -1; /* avoid BUF_ERROR at next call, see above */
+ return Z_OK;
+ }
+ }
+ }
+ Assert(strm->avail_out > 0, "bug2");
+
+ if (flush != Z_FINISH) return Z_OK;
+ if (s->noheader) return Z_STREAM_END;
+
+ /* Write the zlib trailer (adler32) */
+ putShortMSB(s, (uInt)(strm->adler >> 16));
+ putShortMSB(s, (uInt)(strm->adler & 0xffff));
+ flush_pending(strm);
+ /* If avail_out is zero, the application will call deflate again
+ * to flush the rest.
+ */
+ s->noheader = -1; /* write the trailer only once! */
+ return s->pending != 0 ? Z_OK : Z_STREAM_END;
+}
+
+/* ========================================================================= */
+int zlib_deflateEnd (strm)
+ z_streamp strm;
+{
+ int status;
+ deflate_state *s;
+
+ if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
+ s = (deflate_state *) strm->state;
+
+ status = s->status;
+ if (status != INIT_STATE && status != BUSY_STATE &&
+ status != FINISH_STATE) {
+ return Z_STREAM_ERROR;
+ }
+
+ strm->state = Z_NULL;
+
+ return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK;
+}
+
+/* =========================================================================
+ * Copy the source state to the destination state.
+ */
+int zlib_deflateCopy (dest, source)
+ z_streamp dest;
+ z_streamp source;
+{
+#ifdef MAXSEG_64K
+ return Z_STREAM_ERROR;
+#else
+ deflate_state *ds;
+ deflate_state *ss;
+ ushf *overlay;
+ deflate_workspace *mem;
+
+
+ if (source == Z_NULL || dest == Z_NULL || source->state == Z_NULL) {
+ return Z_STREAM_ERROR;
+ }
+
+ ss = (deflate_state *) source->state;
+
+ *dest = *source;
+
+ mem = (deflate_workspace *) dest->workspace;
+
+ ds = &(mem->deflate_memory);
+
+ dest->state = (struct internal_state FAR *) ds;
+ *ds = *ss;
+ ds->strm = dest;
+
+ ds->window = (Bytef *) mem->window_memory;
+ ds->prev = (Posf *) mem->prev_memory;
+ ds->head = (Posf *) mem->head_memory;
+ overlay = (ushf *) mem->overlay_memory;
+ ds->pending_buf = (uchf *) overlay;
+
+ memcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte));
+ memcpy(ds->prev, ss->prev, ds->w_size * sizeof(Pos));
+ memcpy(ds->head, ss->head, ds->hash_size * sizeof(Pos));
+ memcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size);
+
+ ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf);
+ ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush);
+ ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize;
+
+ ds->l_desc.dyn_tree = ds->dyn_ltree;
+ ds->d_desc.dyn_tree = ds->dyn_dtree;
+ ds->bl_desc.dyn_tree = ds->bl_tree;
+
+ return Z_OK;
+#endif
+}
+
+/* ===========================================================================
+ * Read a new buffer from the current input stream, update the adler32
+ * and total number of bytes read. All deflate() input goes through
+ * this function so some applications may wish to modify it to avoid
+ * allocating a large strm->next_in buffer and copying from it.
+ * (See also flush_pending()).
+ */
+local int read_buf(strm, buf, size)
+ z_streamp strm;
+ Bytef *buf;
+ unsigned size;
+{
+ unsigned len = strm->avail_in;
+
+ if (len > size) len = size;
+ if (len == 0) return 0;
+
+ strm->avail_in -= len;
+
+ if (!((deflate_state *)(strm->state))->noheader) {
+ strm->adler = zlib_adler32(strm->adler, strm->next_in, len);
+ }
+ memcpy(buf, strm->next_in, len);
+ strm->next_in += len;
+ strm->total_in += len;
+
+ return (int)len;
+}
+
+/* ===========================================================================
+ * Initialize the "longest match" routines for a new zlib stream
+ */
+local void lm_init (s)
+ deflate_state *s;
+{
+ s->window_size = (ulg)2L*s->w_size;
+
+ CLEAR_HASH(s);
+
+ /* Set the default configuration parameters:
+ */
+ s->max_lazy_match = configuration_table[s->level].max_lazy;
+ s->good_match = configuration_table[s->level].good_length;
+ s->nice_match = configuration_table[s->level].nice_length;
+ s->max_chain_length = configuration_table[s->level].max_chain;
+
+ s->strstart = 0;
+ s->block_start = 0L;
+ s->lookahead = 0;
+ s->match_length = s->prev_length = MIN_MATCH-1;
+ s->match_available = 0;
+ s->ins_h = 0;
+}
+
+/* ===========================================================================
+ * Set match_start to the longest match starting at the given string and
+ * return its length. Matches shorter or equal to prev_length are discarded,
+ * in which case the result is equal to prev_length and match_start is
+ * garbage.
+ * IN assertions: cur_match is the head of the hash chain for the current
+ * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1
+ * OUT assertion: the match length is not greater than s->lookahead.
+ */
+/* For 80x86 and 680x0, an optimized version will be provided in match.asm or
+ * match.S. The code will be functionally equivalent.
+ */
+local uInt longest_match(s, cur_match)
+ deflate_state *s;
+ IPos cur_match; /* current match */
+{
+ unsigned chain_length = s->max_chain_length;/* max hash chain length */
+ register Bytef *scan = s->window + s->strstart; /* current string */
+ register Bytef *match; /* matched string */
+ register int len; /* length of current match */
+ int best_len = s->prev_length; /* best match length so far */
+ int nice_match = s->nice_match; /* stop if match long enough */
+ IPos limit = s->strstart > (IPos)MAX_DIST(s) ?
+ s->strstart - (IPos)MAX_DIST(s) : NIL;
+ /* Stop when cur_match becomes <= limit. To simplify the code,
+ * we prevent matches with the string of window index 0.
+ */
+ Posf *prev = s->prev;
+ uInt wmask = s->w_mask;
+
+#ifdef UNALIGNED_OK
+ /* Compare two bytes at a time. Note: this is not always beneficial.
+ * Try with and without -DUNALIGNED_OK to check.
+ */
+ register Bytef *strend = s->window + s->strstart + MAX_MATCH - 1;
+ register ush scan_start = *(ushf*)scan;
+ register ush scan_end = *(ushf*)(scan+best_len-1);
+#else
+ register Bytef *strend = s->window + s->strstart + MAX_MATCH;
+ register Byte scan_end1 = scan[best_len-1];
+ register Byte scan_end = scan[best_len];
+#endif
+
+ /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
+ * It is easy to get rid of this optimization if necessary.
+ */
+ Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
+
+ /* Do not waste too much time if we already have a good match: */
+ if (s->prev_length >= s->good_match) {
+ chain_length >>= 2;
+ }
+ /* Do not look for matches beyond the end of the input. This is necessary
+ * to make deflate deterministic.
+ */
+ if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead;
+
+ Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead");
+
+ do {
+ Assert(cur_match < s->strstart, "no future");
+ match = s->window + cur_match;
+
+ /* Skip to next match if the match length cannot increase
+ * or if the match length is less than 2:
+ */
+#if (defined(UNALIGNED_OK) && MAX_MATCH == 258)
+ /* This code assumes sizeof(unsigned short) == 2. Do not use
+ * UNALIGNED_OK if your compiler uses a different size.
+ */
+ if (*(ushf*)(match+best_len-1) != scan_end ||
+ *(ushf*)match != scan_start) continue;
+
+ /* It is not necessary to compare scan[2] and match[2] since they are
+ * always equal when the other bytes match, given that the hash keys
+ * are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at
+ * strstart+3, +5, ... up to strstart+257. We check for insufficient
+ * lookahead only every 4th comparison; the 128th check will be made
+ * at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is
+ * necessary to put more guard bytes at the end of the window, or
+ * to check more often for insufficient lookahead.
+ */
+ Assert(scan[2] == match[2], "scan[2]?");
+ scan++, match++;
+ do {
+ } while (*(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
+ *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
+ *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
+ *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
+ scan < strend);
+ /* The funny "do {}" generates better code on most compilers */
+
+ /* Here, scan <= window+strstart+257 */
+ Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
+ if (*scan == *match) scan++;
+
+ len = (MAX_MATCH - 1) - (int)(strend-scan);
+ scan = strend - (MAX_MATCH-1);
+
+#else /* UNALIGNED_OK */
+
+ if (match[best_len] != scan_end ||
+ match[best_len-1] != scan_end1 ||
+ *match != *scan ||
+ *++match != scan[1]) continue;
+
+ /* The check at best_len-1 can be removed because it will be made
+ * again later. (This heuristic is not always a win.)
+ * It is not necessary to compare scan[2] and match[2] since they
+ * are always equal when the other bytes match, given that
+ * the hash keys are equal and that HASH_BITS >= 8.
+ */
+ scan += 2, match++;
+ Assert(*scan == *match, "match[2]?");
+
+ /* We check for insufficient lookahead only every 8th comparison;
+ * the 256th check will be made at strstart+258.
+ */
+ do {
+ } while (*++scan == *++match && *++scan == *++match &&
+ *++scan == *++match && *++scan == *++match &&
+ *++scan == *++match && *++scan == *++match &&
+ *++scan == *++match && *++scan == *++match &&
+ scan < strend);
+
+ Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
+
+ len = MAX_MATCH - (int)(strend - scan);
+ scan = strend - MAX_MATCH;
+
+#endif /* UNALIGNED_OK */
+
+ if (len > best_len) {
+ s->match_start = cur_match;
+ best_len = len;
+ if (len >= nice_match) break;
+#ifdef UNALIGNED_OK
+ scan_end = *(ushf*)(scan+best_len-1);
+#else
+ scan_end1 = scan[best_len-1];
+ scan_end = scan[best_len];
+#endif
+ }
+ } while ((cur_match = prev[cur_match & wmask]) > limit
+ && --chain_length != 0);
+
+ if ((uInt)best_len <= s->lookahead) return best_len;
+ return s->lookahead;
+}
+
+#ifdef DEBUG_ZLIB
+/* ===========================================================================
+ * Check that the match at match_start is indeed a match.
+ */
+local void check_match(s, start, match, length)
+ deflate_state *s;
+ IPos start, match;
+ int length;
+{
+ /* check that the match is indeed a match */
+ if (memcmp((charf *)s->window + match,
+ (charf *)s->window + start, length) != EQUAL) {
+ fprintf(stderr, " start %u, match %u, length %d\n",
+ start, match, length);
+ do {
+ fprintf(stderr, "%c%c", s->window[match++], s->window[start++]);
+ } while (--length != 0);
+ z_error("invalid match");
+ }
+ if (z_verbose > 1) {
+ fprintf(stderr,"\\[%d,%d]", start-match, length);
+ do { putc(s->window[start++], stderr); } while (--length != 0);
+ }
+}
+#else
+# define check_match(s, start, match, length)
+#endif
+
+/* ===========================================================================
+ * Fill the window when the lookahead becomes insufficient.
+ * Updates strstart and lookahead.
+ *
+ * IN assertion: lookahead < MIN_LOOKAHEAD
+ * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD
+ * At least one byte has been read, or avail_in == 0; reads are
+ * performed for at least two bytes (required for the zip translate_eol
+ * option -- not supported here).
+ */
+local void fill_window(s)
+ deflate_state *s;
+{
+ register unsigned n, m;
+ register Posf *p;
+ unsigned more; /* Amount of free space at the end of the window. */
+ uInt wsize = s->w_size;
+
+ do {
+ more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart);
+
+ /* Deal with !@#$% 64K limit: */
+ if (more == 0 && s->strstart == 0 && s->lookahead == 0) {
+ more = wsize;
+
+ } else if (more == (unsigned)(-1)) {
+ /* Very unlikely, but possible on 16 bit machine if strstart == 0
+ * and lookahead == 1 (input done one byte at time)
+ */
+ more--;
+
+ /* If the window is almost full and there is insufficient lookahead,
+ * move the upper half to the lower one to make room in the upper half.
+ */
+ } else if (s->strstart >= wsize+MAX_DIST(s)) {
+
+ memcpy((charf *)s->window, (charf *)s->window+wsize,
+ (unsigned)wsize);
+ s->match_start -= wsize;
+ s->strstart -= wsize; /* we now have strstart >= MAX_DIST */
+ s->block_start -= (long) wsize;
+
+ /* Slide the hash table (could be avoided with 32 bit values
+ at the expense of memory usage). We slide even when level == 0
+ to keep the hash table consistent if we switch back to level > 0
+ later. (Using level 0 permanently is not an optimal usage of
+ zlib, so we don't care about this pathological case.)
+ */
+ n = s->hash_size;
+ p = &s->head[n];
+ do {
+ m = *--p;
+ *p = (Pos)(m >= wsize ? m-wsize : NIL);
+ } while (--n);
+
+ n = wsize;
+ p = &s->prev[n];
+ do {
+ m = *--p;
+ *p = (Pos)(m >= wsize ? m-wsize : NIL);
+ /* If n is not on any hash chain, prev[n] is garbage but
+ * its value will never be used.
+ */
+ } while (--n);
+ more += wsize;
+ }
+ if (s->strm->avail_in == 0) return;
+
+ /* If there was no sliding:
+ * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&
+ * more == window_size - lookahead - strstart
+ * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)
+ * => more >= window_size - 2*WSIZE + 2
+ * In the BIG_MEM or MMAP case (not yet supported),
+ * window_size == input_size + MIN_LOOKAHEAD &&
+ * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.
+ * Otherwise, window_size == 2*WSIZE so more >= 2.
+ * If there was sliding, more >= WSIZE. So in all cases, more >= 2.
+ */
+ Assert(more >= 2, "more < 2");
+
+ n = read_buf(s->strm, s->window + s->strstart + s->lookahead, more);
+ s->lookahead += n;
+
+ /* Initialize the hash value now that we have some input: */
+ if (s->lookahead >= MIN_MATCH) {
+ s->ins_h = s->window[s->strstart];
+ UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
+#if MIN_MATCH != 3
+ Call UPDATE_HASH() MIN_MATCH-3 more times
+#endif
+ }
+ /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage,
+ * but this is not important since only literal bytes will be emitted.
+ */
+
+ } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0);
+}
+
+/* ===========================================================================
+ * Flush the current block, with given end-of-file flag.
+ * IN assertion: strstart is set to the end of the current match.
+ */
+#define FLUSH_BLOCK_ONLY(s, eof) { \
+ zlib_tr_flush_block(s, (s->block_start >= 0L ? \
+ (charf *)&s->window[(unsigned)s->block_start] : \
+ (charf *)Z_NULL), \
+ (ulg)((long)s->strstart - s->block_start), \
+ (eof)); \
+ s->block_start = s->strstart; \
+ flush_pending(s->strm); \
+ Tracev((stderr,"[FLUSH]")); \
+}
+
+/* Same but force premature exit if necessary. */
+#define FLUSH_BLOCK(s, eof) { \
+ FLUSH_BLOCK_ONLY(s, eof); \
+ if (s->strm->avail_out == 0) return (eof) ? finish_started : need_more; \
+}
+
+/* ===========================================================================
+ * Copy without compression as much as possible from the input stream, return
+ * the current block state.
+ * This function does not insert new strings in the dictionary since
+ * uncompressible data is probably not useful. This function is used
+ * only for the level=0 compression option.
+ * NOTE: this function should be optimized to avoid extra copying from
+ * window to pending_buf.
+ */
+local block_state deflate_stored(s, flush)
+ deflate_state *s;
+ int flush;
+{
+ /* Stored blocks are limited to 0xffff bytes, pending_buf is limited
+ * to pending_buf_size, and each stored block has a 5 byte header:
+ */
+ ulg max_block_size = 0xffff;
+ ulg max_start;
+
+ if (max_block_size > s->pending_buf_size - 5) {
+ max_block_size = s->pending_buf_size - 5;
+ }
+
+ /* Copy as much as possible from input to output: */
+ for (;;) {
+ /* Fill the window as much as possible: */
+ if (s->lookahead <= 1) {
+
+ Assert(s->strstart < s->w_size+MAX_DIST(s) ||
+ s->block_start >= (long)s->w_size, "slide too late");
+
+ fill_window(s);
+ if (s->lookahead == 0 && flush == Z_NO_FLUSH) return need_more;
+
+ if (s->lookahead == 0) break; /* flush the current block */
+ }
+ Assert(s->block_start >= 0L, "block gone");
+
+ s->strstart += s->lookahead;
+ s->lookahead = 0;
+
+ /* Emit a stored block if pending_buf will be full: */
+ max_start = s->block_start + max_block_size;
+ if (s->strstart == 0 || (ulg)s->strstart >= max_start) {
+ /* strstart == 0 is possible when wraparound on 16-bit machine */
+ s->lookahead = (uInt)(s->strstart - max_start);
+ s->strstart = (uInt)max_start;
+ FLUSH_BLOCK(s, 0);
+ }
+ /* Flush if we may have to slide, otherwise block_start may become
+ * negative and the data will be gone:
+ */
+ if (s->strstart - (uInt)s->block_start >= MAX_DIST(s)) {
+ FLUSH_BLOCK(s, 0);
+ }
+ }
+ FLUSH_BLOCK(s, flush == Z_FINISH);
+ return flush == Z_FINISH ? finish_done : block_done;
+}
+
+/* ===========================================================================
+ * Compress as much as possible from the input stream, return the current
+ * block state.
+ * This function does not perform lazy evaluation of matches and inserts
+ * new strings in the dictionary only for unmatched strings or for short
+ * matches. It is used only for the fast compression options.
+ */
+local block_state deflate_fast(s, flush)
+ deflate_state *s;
+ int flush;
+{
+ IPos hash_head = NIL; /* head of the hash chain */
+ int bflush; /* set if current block must be flushed */
+
+ for (;;) {
+ /* Make sure that we always have enough lookahead, except
+ * at the end of the input file. We need MAX_MATCH bytes
+ * for the next match, plus MIN_MATCH bytes to insert the
+ * string following the next match.
+ */
+ if (s->lookahead < MIN_LOOKAHEAD) {
+ fill_window(s);
+ if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
+ return need_more;
+ }
+ if (s->lookahead == 0) break; /* flush the current block */
+ }
+
+ /* Insert the string window[strstart .. strstart+2] in the
+ * dictionary, and set hash_head to the head of the hash chain:
+ */
+ if (s->lookahead >= MIN_MATCH) {
+ INSERT_STRING(s, s->strstart, hash_head);
+ }
+
+ /* Find the longest match, discarding those <= prev_length.
+ * At this point we have always match_length < MIN_MATCH
+ */
+ if (hash_head != NIL && s->strstart - hash_head <= MAX_DIST(s)) {
+ /* To simplify the code, we prevent matches with the string
+ * of window index 0 (in particular we have to avoid a match
+ * of the string with itself at the start of the input file).
+ */
+ if (s->strategy != Z_HUFFMAN_ONLY) {
+ s->match_length = longest_match (s, hash_head);
+ }
+ /* longest_match() sets match_start */
+ }
+ if (s->match_length >= MIN_MATCH) {
+ check_match(s, s->strstart, s->match_start, s->match_length);
+
+ bflush = zlib_tr_tally(s, s->strstart - s->match_start,
+ s->match_length - MIN_MATCH);
+
+ s->lookahead -= s->match_length;
+
+ /* Insert new strings in the hash table only if the match length
+ * is not too large. This saves time but degrades compression.
+ */
+ if (s->match_length <= s->max_insert_length &&
+ s->lookahead >= MIN_MATCH) {
+ s->match_length--; /* string at strstart already in hash table */
+ do {
+ s->strstart++;
+ INSERT_STRING(s, s->strstart, hash_head);
+ /* strstart never exceeds WSIZE-MAX_MATCH, so there are
+ * always MIN_MATCH bytes ahead.
+ */
+ } while (--s->match_length != 0);
+ s->strstart++;
+ } else {
+ s->strstart += s->match_length;
+ s->match_length = 0;
+ s->ins_h = s->window[s->strstart];
+ UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
+#if MIN_MATCH != 3
+ Call UPDATE_HASH() MIN_MATCH-3 more times
+#endif
+ /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not
+ * matter since it will be recomputed at next deflate call.
+ */
+ }
+ } else {
+ /* No match, output a literal byte */
+ Tracevv((stderr,"%c", s->window[s->strstart]));
+ bflush = zlib_tr_tally (s, 0, s->window[s->strstart]);
+ s->lookahead--;
+ s->strstart++;
+ }
+ if (bflush) FLUSH_BLOCK(s, 0);
+ }
+ FLUSH_BLOCK(s, flush == Z_FINISH);
+ return flush == Z_FINISH ? finish_done : block_done;
+}
+
+/* ===========================================================================
+ * Same as above, but achieves better compression. We use a lazy
+ * evaluation for matches: a match is finally adopted only if there is
+ * no better match at the next window position.
+ */
+local block_state deflate_slow(s, flush)
+ deflate_state *s;
+ int flush;
+{
+ IPos hash_head = NIL; /* head of hash chain */
+ int bflush; /* set if current block must be flushed */
+
+ /* Process the input block. */
+ for (;;) {
+ /* Make sure that we always have enough lookahead, except
+ * at the end of the input file. We need MAX_MATCH bytes
+ * for the next match, plus MIN_MATCH bytes to insert the
+ * string following the next match.
+ */
+ if (s->lookahead < MIN_LOOKAHEAD) {
+ fill_window(s);
+ if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
+ return need_more;
+ }
+ if (s->lookahead == 0) break; /* flush the current block */
+ }
+
+ /* Insert the string window[strstart .. strstart+2] in the
+ * dictionary, and set hash_head to the head of the hash chain:
+ */
+ if (s->lookahead >= MIN_MATCH) {
+ INSERT_STRING(s, s->strstart, hash_head);
+ }
+
+ /* Find the longest match, discarding those <= prev_length.
+ */
+ s->prev_length = s->match_length, s->prev_match = s->match_start;
+ s->match_length = MIN_MATCH-1;
+
+ if (hash_head != NIL && s->prev_length < s->max_lazy_match &&
+ s->strstart - hash_head <= MAX_DIST(s)) {
+ /* To simplify the code, we prevent matches with the string
+ * of window index 0 (in particular we have to avoid a match
+ * of the string with itself at the start of the input file).
+ */
+ if (s->strategy != Z_HUFFMAN_ONLY) {
+ s->match_length = longest_match (s, hash_head);
+ }
+ /* longest_match() sets match_start */
+
+ if (s->match_length <= 5 && (s->strategy == Z_FILTERED ||
+ (s->match_length == MIN_MATCH &&
+ s->strstart - s->match_start > TOO_FAR))) {
+
+ /* If prev_match is also MIN_MATCH, match_start is garbage
+ * but we will ignore the current match anyway.
+ */
+ s->match_length = MIN_MATCH-1;
+ }
+ }
+ /* If there was a match at the previous step and the current
+ * match is not better, output the previous match:
+ */
+ if (s->prev_length >= MIN_MATCH && s->match_length <= s->prev_length) {
+ uInt max_insert = s->strstart + s->lookahead - MIN_MATCH;
+ /* Do not insert strings in hash table beyond this. */
+
+ check_match(s, s->strstart-1, s->prev_match, s->prev_length);
+
+ bflush = zlib_tr_tally(s, s->strstart -1 - s->prev_match,
+ s->prev_length - MIN_MATCH);
+
+ /* Insert in hash table all strings up to the end of the match.
+ * strstart-1 and strstart are already inserted. If there is not
+ * enough lookahead, the last two strings are not inserted in
+ * the hash table.
+ */
+ s->lookahead -= s->prev_length-1;
+ s->prev_length -= 2;
+ do {
+ if (++s->strstart <= max_insert) {
+ INSERT_STRING(s, s->strstart, hash_head);
+ }
+ } while (--s->prev_length != 0);
+ s->match_available = 0;
+ s->match_length = MIN_MATCH-1;
+ s->strstart++;
+
+ if (bflush) FLUSH_BLOCK(s, 0);
+
+ } else if (s->match_available) {
+ /* If there was no match at the previous position, output a
+ * single literal. If there was a match but the current match
+ * is longer, truncate the previous match to a single literal.
+ */
+ Tracevv((stderr,"%c", s->window[s->strstart-1]));
+ if (zlib_tr_tally (s, 0, s->window[s->strstart-1])) {
+ FLUSH_BLOCK_ONLY(s, 0);
+ }
+ s->strstart++;
+ s->lookahead--;
+ if (s->strm->avail_out == 0) return need_more;
+ } else {
+ /* There is no previous match to compare with, wait for
+ * the next step to decide.
+ */
+ s->match_available = 1;
+ s->strstart++;
+ s->lookahead--;
+ }
+ }
+ Assert (flush != Z_NO_FLUSH, "no flush?");
+ if (s->match_available) {
+ Tracevv((stderr,"%c", s->window[s->strstart-1]));
+ zlib_tr_tally (s, 0, s->window[s->strstart-1]);
+ s->match_available = 0;
+ }
+ FLUSH_BLOCK(s, flush == Z_FINISH);
+ return flush == Z_FINISH ? finish_done : block_done;
+}
+
+ZEXTERN int ZEXPORT zlib_deflate_workspacesize ()
+{
+ return sizeof(deflate_workspace);
+}
--- /dev/null
+/*
+ * linux/lib/zlib_deflate/deflate_syms.c
+ *
+ * Exported symbols for the deflate functionality.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/zlib.h>
+
+EXPORT_SYMBOL(zlib_deflate_workspacesize);
+EXPORT_SYMBOL(zlib_deflate);
+EXPORT_SYMBOL(zlib_deflateInit_);
+EXPORT_SYMBOL(zlib_deflateInit2_);
+EXPORT_SYMBOL(zlib_deflateEnd);
+EXPORT_SYMBOL(zlib_deflateReset);
+EXPORT_SYMBOL(zlib_deflateCopy);
+EXPORT_SYMBOL(zlib_deflateParams);
+MODULE_LICENSE("GPL");
--- /dev/null
+/* +++ trees.c */
+/* trees.c -- output deflated data using Huffman coding
+ * Copyright (C) 1995-1996 Jean-loup Gailly
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/*
+ * ALGORITHM
+ *
+ * The "deflation" process uses several Huffman trees. The more
+ * common source values are represented by shorter bit sequences.
+ *
+ * Each code tree is stored in a compressed form which is itself
+ * a Huffman encoding of the lengths of all the code strings (in
+ * ascending order by source values). The actual code strings are
+ * reconstructed from the lengths in the inflate process, as described
+ * in the deflate specification.
+ *
+ * REFERENCES
+ *
+ * Deutsch, L.P.,"'Deflate' Compressed Data Format Specification".
+ * Available in ftp.uu.net:/pub/archiving/zip/doc/deflate-1.1.doc
+ *
+ * Storer, James A.
+ * Data Compression: Methods and Theory, pp. 49-50.
+ * Computer Science Press, 1988. ISBN 0-7167-8156-5.
+ *
+ * Sedgewick, R.
+ * Algorithms, p290.
+ * Addison-Wesley, 1983. ISBN 0-201-06672-6.
+ */
+
+/* From: trees.c,v 1.11 1996/07/24 13:41:06 me Exp $ */
+
+/* #include "deflate.h" */
+
+#include <linux/zutil.h>
+#include "defutil.h"
+
+#ifdef DEBUG_ZLIB
+# include <ctype.h>
+#endif
+
+/* ===========================================================================
+ * Constants
+ */
+
+#define MAX_BL_BITS 7
+/* Bit length codes must not exceed MAX_BL_BITS bits */
+
+#define END_BLOCK 256
+/* end of block literal code */
+
+#define REP_3_6 16
+/* repeat previous bit length 3-6 times (2 bits of repeat count) */
+
+#define REPZ_3_10 17
+/* repeat a zero length 3-10 times (3 bits of repeat count) */
+
+#define REPZ_11_138 18
+/* repeat a zero length 11-138 times (7 bits of repeat count) */
+
+local const int extra_lbits[LENGTH_CODES] /* extra bits for each length code */
+ = {0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0};
+
+local const int extra_dbits[D_CODES] /* extra bits for each distance code */
+ = {0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13};
+
+local const int extra_blbits[BL_CODES]/* extra bits for each bit length code */
+ = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7};
+
+local const uch bl_order[BL_CODES]
+ = {16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15};
+/* The lengths of the bit length codes are sent in order of decreasing
+ * probability, to avoid transmitting the lengths for unused bit length codes.
+ */
+
+#define Buf_size (8 * 2*sizeof(char))
+/* Number of bits used within bi_buf. (bi_buf might be implemented on
+ * more than 16 bits on some systems.)
+ */
+
+/* ===========================================================================
+ * Local data. These are initialized only once.
+ */
+
+local ct_data static_ltree[L_CODES+2];
+/* The static literal tree. Since the bit lengths are imposed, there is no
+ * need for the L_CODES extra codes used during heap construction. However
+ * The codes 286 and 287 are needed to build a canonical tree (see zlib_tr_init
+ * below).
+ */
+
+local ct_data static_dtree[D_CODES];
+/* The static distance tree. (Actually a trivial tree since all codes use
+ * 5 bits.)
+ */
+
+local uch dist_code[512];
+/* distance codes. The first 256 values correspond to the distances
+ * 3 .. 258, the last 256 values correspond to the top 8 bits of
+ * the 15 bit distances.
+ */
+
+local uch length_code[MAX_MATCH-MIN_MATCH+1];
+/* length code for each normalized match length (0 == MIN_MATCH) */
+
+local int base_length[LENGTH_CODES];
+/* First normalized length for each code (0 = MIN_MATCH) */
+
+local int base_dist[D_CODES];
+/* First normalized distance for each code (0 = distance of 1) */
+
+struct static_tree_desc_s {
+ const ct_data *static_tree; /* static tree or NULL */
+ const intf *extra_bits; /* extra bits for each code or NULL */
+ int extra_base; /* base index for extra_bits */
+ int elems; /* max number of elements in the tree */
+ int max_length; /* max bit length for the codes */
+};
+
+local static_tree_desc static_l_desc =
+{static_ltree, extra_lbits, LITERALS+1, L_CODES, MAX_BITS};
+
+local static_tree_desc static_d_desc =
+{static_dtree, extra_dbits, 0, D_CODES, MAX_BITS};
+
+local static_tree_desc static_bl_desc =
+{(const ct_data *)0, extra_blbits, 0, BL_CODES, MAX_BL_BITS};
+
+/* ===========================================================================
+ * Local (static) routines in this file.
+ */
+
+local void tr_static_init OF((void));
+local void init_block OF((deflate_state *s));
+local void pqdownheap OF((deflate_state *s, ct_data *tree, int k));
+local void gen_bitlen OF((deflate_state *s, tree_desc *desc));
+local void gen_codes OF((ct_data *tree, int max_code, ushf *bl_count));
+local void build_tree OF((deflate_state *s, tree_desc *desc));
+local void scan_tree OF((deflate_state *s, ct_data *tree, int max_code));
+local void send_tree OF((deflate_state *s, ct_data *tree, int max_code));
+local int build_bl_tree OF((deflate_state *s));
+local void send_all_trees OF((deflate_state *s, int lcodes, int dcodes,
+ int blcodes));
+local void compress_block OF((deflate_state *s, ct_data *ltree,
+ ct_data *dtree));
+local void set_data_type OF((deflate_state *s));
+local unsigned bi_reverse OF((unsigned value, int length));
+local void bi_windup OF((deflate_state *s));
+local void bi_flush OF((deflate_state *s));
+local void copy_block OF((deflate_state *s, charf *buf, unsigned len,
+ int header));
+
+#ifndef DEBUG_ZLIB
+# define send_code(s, c, tree) send_bits(s, tree[c].Code, tree[c].Len)
+ /* Send a code of the given tree. c and tree must not have side effects */
+
+#else /* DEBUG_ZLIB */
+# define send_code(s, c, tree) \
+ { if (z_verbose>2) fprintf(stderr,"\ncd %3d ",(c)); \
+ send_bits(s, tree[c].Code, tree[c].Len); }
+#endif
+
+#define d_code(dist) \
+ ((dist) < 256 ? dist_code[dist] : dist_code[256+((dist)>>7)])
+/* Mapping from a distance to a distance code. dist is the distance - 1 and
+ * must not have side effects. dist_code[256] and dist_code[257] are never
+ * used.
+ */
+
+/* ===========================================================================
+ * Send a value on a given number of bits.
+ * IN assertion: length <= 16 and value fits in length bits.
+ */
+#ifdef DEBUG_ZLIB
+local void send_bits OF((deflate_state *s, int value, int length));
+
+local void send_bits(s, value, length)
+ deflate_state *s;
+ int value; /* value to send */
+ int length; /* number of bits */
+{
+ Tracevv((stderr," l %2d v %4x ", length, value));
+ Assert(length > 0 && length <= 15, "invalid length");
+ s->bits_sent += (ulg)length;
+
+ /* If not enough room in bi_buf, use (valid) bits from bi_buf and
+ * (16 - bi_valid) bits from value, leaving (width - (16-bi_valid))
+ * unused bits in value.
+ */
+ if (s->bi_valid > (int)Buf_size - length) {
+ s->bi_buf |= (value << s->bi_valid);
+ put_short(s, s->bi_buf);
+ s->bi_buf = (ush)value >> (Buf_size - s->bi_valid);
+ s->bi_valid += length - Buf_size;
+ } else {
+ s->bi_buf |= value << s->bi_valid;
+ s->bi_valid += length;
+ }
+}
+#else /* !DEBUG_ZLIB */
+
+#define send_bits(s, value, length) \
+{ int len = length;\
+ if (s->bi_valid > (int)Buf_size - len) {\
+ int val = value;\
+ s->bi_buf |= (val << s->bi_valid);\
+ put_short(s, s->bi_buf);\
+ s->bi_buf = (ush)val >> (Buf_size - s->bi_valid);\
+ s->bi_valid += len - Buf_size;\
+ } else {\
+ s->bi_buf |= (value) << s->bi_valid;\
+ s->bi_valid += len;\
+ }\
+}
+#endif /* DEBUG_ZLIB */
+
+
+#define MAX(a,b) (a >= b ? a : b)
+/* the arguments must not have side effects */
+
+/* ===========================================================================
+ * Initialize the various 'constant' tables. In a multi-threaded environment,
+ * this function may be called by two threads concurrently, but this is
+ * harmless since both invocations do exactly the same thing.
+ */
+local void tr_static_init()
+{
+ static int static_init_done = 0;
+ int n; /* iterates over tree elements */
+ int bits; /* bit counter */
+ int length; /* length value */
+ int code; /* code value */
+ int dist; /* distance index */
+ ush bl_count[MAX_BITS+1];
+ /* number of codes at each bit length for an optimal tree */
+
+ if (static_init_done) return;
+
+ /* Initialize the mapping length (0..255) -> length code (0..28) */
+ length = 0;
+ for (code = 0; code < LENGTH_CODES-1; code++) {
+ base_length[code] = length;
+ for (n = 0; n < (1<<extra_lbits[code]); n++) {
+ length_code[length++] = (uch)code;
+ }
+ }
+ Assert (length == 256, "tr_static_init: length != 256");
+ /* Note that the length 255 (match length 258) can be represented
+ * in two different ways: code 284 + 5 bits or code 285, so we
+ * overwrite length_code[255] to use the best encoding:
+ */
+ length_code[length-1] = (uch)code;
+
+ /* Initialize the mapping dist (0..32K) -> dist code (0..29) */
+ dist = 0;
+ for (code = 0 ; code < 16; code++) {
+ base_dist[code] = dist;
+ for (n = 0; n < (1<<extra_dbits[code]); n++) {
+ dist_code[dist++] = (uch)code;
+ }
+ }
+ Assert (dist == 256, "tr_static_init: dist != 256");
+ dist >>= 7; /* from now on, all distances are divided by 128 */
+ for ( ; code < D_CODES; code++) {
+ base_dist[code] = dist << 7;
+ for (n = 0; n < (1<<(extra_dbits[code]-7)); n++) {
+ dist_code[256 + dist++] = (uch)code;
+ }
+ }
+ Assert (dist == 256, "tr_static_init: 256+dist != 512");
+
+ /* Construct the codes of the static literal tree */
+ for (bits = 0; bits <= MAX_BITS; bits++) bl_count[bits] = 0;
+ n = 0;
+ while (n <= 143) static_ltree[n++].Len = 8, bl_count[8]++;
+ while (n <= 255) static_ltree[n++].Len = 9, bl_count[9]++;
+ while (n <= 279) static_ltree[n++].Len = 7, bl_count[7]++;
+ while (n <= 287) static_ltree[n++].Len = 8, bl_count[8]++;
+ /* Codes 286 and 287 do not exist, but we must include them in the
+ * tree construction to get a canonical Huffman tree (longest code
+ * all ones)
+ */
+ gen_codes((ct_data *)static_ltree, L_CODES+1, bl_count);
+
+ /* The static distance tree is trivial: */
+ for (n = 0; n < D_CODES; n++) {
+ static_dtree[n].Len = 5;
+ static_dtree[n].Code = bi_reverse((unsigned)n, 5);
+ }
+ static_init_done = 1;
+}
+
+/* ===========================================================================
+ * Initialize the tree data structures for a new zlib stream.
+ */
+void zlib_tr_init(s)
+ deflate_state *s;
+{
+ tr_static_init();
+
+ s->compressed_len = 0L;
+
+ s->l_desc.dyn_tree = s->dyn_ltree;
+ s->l_desc.stat_desc = &static_l_desc;
+
+ s->d_desc.dyn_tree = s->dyn_dtree;
+ s->d_desc.stat_desc = &static_d_desc;
+
+ s->bl_desc.dyn_tree = s->bl_tree;
+ s->bl_desc.stat_desc = &static_bl_desc;
+
+ s->bi_buf = 0;
+ s->bi_valid = 0;
+ s->last_eob_len = 8; /* enough lookahead for inflate */
+#ifdef DEBUG_ZLIB
+ s->bits_sent = 0L;
+#endif
+
+ /* Initialize the first block of the first file: */
+ init_block(s);
+}
+
+/* ===========================================================================
+ * Initialize a new block.
+ */
+local void init_block(s)
+ deflate_state *s;
+{
+ int n; /* iterates over tree elements */
+
+ /* Initialize the trees. */
+ for (n = 0; n < L_CODES; n++) s->dyn_ltree[n].Freq = 0;
+ for (n = 0; n < D_CODES; n++) s->dyn_dtree[n].Freq = 0;
+ for (n = 0; n < BL_CODES; n++) s->bl_tree[n].Freq = 0;
+
+ s->dyn_ltree[END_BLOCK].Freq = 1;
+ s->opt_len = s->static_len = 0L;
+ s->last_lit = s->matches = 0;
+}
+
+#define SMALLEST 1
+/* Index within the heap array of least frequent node in the Huffman tree */
+
+
+/* ===========================================================================
+ * Remove the smallest element from the heap and recreate the heap with
+ * one less element. Updates heap and heap_len.
+ */
+#define pqremove(s, tree, top) \
+{\
+ top = s->heap[SMALLEST]; \
+ s->heap[SMALLEST] = s->heap[s->heap_len--]; \
+ pqdownheap(s, tree, SMALLEST); \
+}
+
+/* ===========================================================================
+ * Compares to subtrees, using the tree depth as tie breaker when
+ * the subtrees have equal frequency. This minimizes the worst case length.
+ */
+#define smaller(tree, n, m, depth) \
+ (tree[n].Freq < tree[m].Freq || \
+ (tree[n].Freq == tree[m].Freq && depth[n] <= depth[m]))
+
+/* ===========================================================================
+ * Restore the heap property by moving down the tree starting at node k,
+ * exchanging a node with the smallest of its two sons if necessary, stopping
+ * when the heap property is re-established (each father smaller than its
+ * two sons).
+ */
+local void pqdownheap(s, tree, k)
+ deflate_state *s;
+ ct_data *tree; /* the tree to restore */
+ int k; /* node to move down */
+{
+ int v = s->heap[k];
+ int j = k << 1; /* left son of k */
+ while (j <= s->heap_len) {
+ /* Set j to the smallest of the two sons: */
+ if (j < s->heap_len &&
+ smaller(tree, s->heap[j+1], s->heap[j], s->depth)) {
+ j++;
+ }
+ /* Exit if v is smaller than both sons */
+ if (smaller(tree, v, s->heap[j], s->depth)) break;
+
+ /* Exchange v with the smallest son */
+ s->heap[k] = s->heap[j]; k = j;
+
+ /* And continue down the tree, setting j to the left son of k */
+ j <<= 1;
+ }
+ s->heap[k] = v;
+}
+
+/* ===========================================================================
+ * Compute the optimal bit lengths for a tree and update the total bit length
+ * for the current block.
+ * IN assertion: the fields freq and dad are set, heap[heap_max] and
+ * above are the tree nodes sorted by increasing frequency.
+ * OUT assertions: the field len is set to the optimal bit length, the
+ * array bl_count contains the frequencies for each bit length.
+ * The length opt_len is updated; static_len is also updated if stree is
+ * not null.
+ */
+local void gen_bitlen(s, desc)
+ deflate_state *s;
+ tree_desc *desc; /* the tree descriptor */
+{
+ ct_data *tree = desc->dyn_tree;
+ int max_code = desc->max_code;
+ const ct_data *stree = desc->stat_desc->static_tree;
+ const intf *extra = desc->stat_desc->extra_bits;
+ int base = desc->stat_desc->extra_base;
+ int max_length = desc->stat_desc->max_length;
+ int h; /* heap index */
+ int n, m; /* iterate over the tree elements */
+ int bits; /* bit length */
+ int xbits; /* extra bits */
+ ush f; /* frequency */
+ int overflow = 0; /* number of elements with bit length too large */
+
+ for (bits = 0; bits <= MAX_BITS; bits++) s->bl_count[bits] = 0;
+
+ /* In a first pass, compute the optimal bit lengths (which may
+ * overflow in the case of the bit length tree).
+ */
+ tree[s->heap[s->heap_max]].Len = 0; /* root of the heap */
+
+ for (h = s->heap_max+1; h < HEAP_SIZE; h++) {
+ n = s->heap[h];
+ bits = tree[tree[n].Dad].Len + 1;
+ if (bits > max_length) bits = max_length, overflow++;
+ tree[n].Len = (ush)bits;
+ /* We overwrite tree[n].Dad which is no longer needed */
+
+ if (n > max_code) continue; /* not a leaf node */
+
+ s->bl_count[bits]++;
+ xbits = 0;
+ if (n >= base) xbits = extra[n-base];
+ f = tree[n].Freq;
+ s->opt_len += (ulg)f * (bits + xbits);
+ if (stree) s->static_len += (ulg)f * (stree[n].Len + xbits);
+ }
+ if (overflow == 0) return;
+
+ Trace((stderr,"\nbit length overflow\n"));
+ /* This happens for example on obj2 and pic of the Calgary corpus */
+
+ /* Find the first bit length which could increase: */
+ do {
+ bits = max_length-1;
+ while (s->bl_count[bits] == 0) bits--;
+ s->bl_count[bits]--; /* move one leaf down the tree */
+ s->bl_count[bits+1] += 2; /* move one overflow item as its brother */
+ s->bl_count[max_length]--;
+ /* The brother of the overflow item also moves one step up,
+ * but this does not affect bl_count[max_length]
+ */
+ overflow -= 2;
+ } while (overflow > 0);
+
+ /* Now recompute all bit lengths, scanning in increasing frequency.
+ * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all
+ * lengths instead of fixing only the wrong ones. This idea is taken
+ * from 'ar' written by Haruhiko Okumura.)
+ */
+ for (bits = max_length; bits != 0; bits--) {
+ n = s->bl_count[bits];
+ while (n != 0) {
+ m = s->heap[--h];
+ if (m > max_code) continue;
+ if (tree[m].Len != (unsigned) bits) {
+ Trace((stderr,"code %d bits %d->%d\n", m, tree[m].Len, bits));
+ s->opt_len += ((long)bits - (long)tree[m].Len)
+ *(long)tree[m].Freq;
+ tree[m].Len = (ush)bits;
+ }
+ n--;
+ }
+ }
+}
+
+/* ===========================================================================
+ * Generate the codes for a given tree and bit counts (which need not be
+ * optimal).
+ * IN assertion: the array bl_count contains the bit length statistics for
+ * the given tree and the field len is set for all tree elements.
+ * OUT assertion: the field code is set for all tree elements of non
+ * zero code length.
+ */
+local void gen_codes (tree, max_code, bl_count)
+ ct_data *tree; /* the tree to decorate */
+ int max_code; /* largest code with non zero frequency */
+ ushf *bl_count; /* number of codes at each bit length */
+{
+ ush next_code[MAX_BITS+1]; /* next code value for each bit length */
+ ush code = 0; /* running code value */
+ int bits; /* bit index */
+ int n; /* code index */
+
+ /* The distribution counts are first used to generate the code values
+ * without bit reversal.
+ */
+ for (bits = 1; bits <= MAX_BITS; bits++) {
+ next_code[bits] = code = (code + bl_count[bits-1]) << 1;
+ }
+ /* Check that the bit counts in bl_count are consistent. The last code
+ * must be all ones.
+ */
+ Assert (code + bl_count[MAX_BITS]-1 == (1<<MAX_BITS)-1,
+ "inconsistent bit counts");
+ Tracev((stderr,"\ngen_codes: max_code %d ", max_code));
+
+ for (n = 0; n <= max_code; n++) {
+ int len = tree[n].Len;
+ if (len == 0) continue;
+ /* Now reverse the bits */
+ tree[n].Code = bi_reverse(next_code[len]++, len);
+
+ Tracecv(tree != static_ltree, (stderr,"\nn %3d %c l %2d c %4x (%x) ",
+ n, (isgraph(n) ? n : ' '), len, tree[n].Code, next_code[len]-1));
+ }
+}
+
+/* ===========================================================================
+ * Construct one Huffman tree and assigns the code bit strings and lengths.
+ * Update the total bit length for the current block.
+ * IN assertion: the field freq is set for all tree elements.
+ * OUT assertions: the fields len and code are set to the optimal bit length
+ * and corresponding code. The length opt_len is updated; static_len is
+ * also updated if stree is not null. The field max_code is set.
+ */
+local void build_tree(s, desc)
+ deflate_state *s;
+ tree_desc *desc; /* the tree descriptor */
+{
+ ct_data *tree = desc->dyn_tree;
+ const ct_data *stree = desc->stat_desc->static_tree;
+ int elems = desc->stat_desc->elems;
+ int n, m; /* iterate over heap elements */
+ int max_code = -1; /* largest code with non zero frequency */
+ int node; /* new node being created */
+
+ /* Construct the initial heap, with least frequent element in
+ * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1].
+ * heap[0] is not used.
+ */
+ s->heap_len = 0, s->heap_max = HEAP_SIZE;
+
+ for (n = 0; n < elems; n++) {
+ if (tree[n].Freq != 0) {
+ s->heap[++(s->heap_len)] = max_code = n;
+ s->depth[n] = 0;
+ } else {
+ tree[n].Len = 0;
+ }
+ }
+
+ /* The pkzip format requires that at least one distance code exists,
+ * and that at least one bit should be sent even if there is only one
+ * possible code. So to avoid special checks later on we force at least
+ * two codes of non zero frequency.
+ */
+ while (s->heap_len < 2) {
+ node = s->heap[++(s->heap_len)] = (max_code < 2 ? ++max_code : 0);
+ tree[node].Freq = 1;
+ s->depth[node] = 0;
+ s->opt_len--; if (stree) s->static_len -= stree[node].Len;
+ /* node is 0 or 1 so it does not have extra bits */
+ }
+ desc->max_code = max_code;
+
+ /* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree,
+ * establish sub-heaps of increasing lengths:
+ */
+ for (n = s->heap_len/2; n >= 1; n--) pqdownheap(s, tree, n);
+
+ /* Construct the Huffman tree by repeatedly combining the least two
+ * frequent nodes.
+ */
+ node = elems; /* next internal node of the tree */
+ do {
+ pqremove(s, tree, n); /* n = node of least frequency */
+ m = s->heap[SMALLEST]; /* m = node of next least frequency */
+
+ s->heap[--(s->heap_max)] = n; /* keep the nodes sorted by frequency */
+ s->heap[--(s->heap_max)] = m;
+
+ /* Create a new node father of n and m */
+ tree[node].Freq = tree[n].Freq + tree[m].Freq;
+ s->depth[node] = (uch) (MAX(s->depth[n], s->depth[m]) + 1);
+ tree[n].Dad = tree[m].Dad = (ush)node;
+#ifdef DUMP_BL_TREE
+ if (tree == s->bl_tree) {
+ fprintf(stderr,"\nnode %d(%d), sons %d(%d) %d(%d)",
+ node, tree[node].Freq, n, tree[n].Freq, m, tree[m].Freq);
+ }
+#endif
+ /* and insert the new node in the heap */
+ s->heap[SMALLEST] = node++;
+ pqdownheap(s, tree, SMALLEST);
+
+ } while (s->heap_len >= 2);
+
+ s->heap[--(s->heap_max)] = s->heap[SMALLEST];
+
+ /* At this point, the fields freq and dad are set. We can now
+ * generate the bit lengths.
+ */
+ gen_bitlen(s, (tree_desc *)desc);
+
+ /* The field len is now set, we can generate the bit codes */
+ gen_codes ((ct_data *)tree, max_code, s->bl_count);
+}
+
+/* ===========================================================================
+ * Scan a literal or distance tree to determine the frequencies of the codes
+ * in the bit length tree.
+ */
+local void scan_tree (s, tree, max_code)
+ deflate_state *s;
+ ct_data *tree; /* the tree to be scanned */
+ int max_code; /* and its largest code of non zero frequency */
+{
+ int n; /* iterates over all tree elements */
+ int prevlen = -1; /* last emitted length */
+ int curlen; /* length of current code */
+ int nextlen = tree[0].Len; /* length of next code */
+ int count = 0; /* repeat count of the current code */
+ int max_count = 7; /* max repeat count */
+ int min_count = 4; /* min repeat count */
+
+ if (nextlen == 0) max_count = 138, min_count = 3;
+ tree[max_code+1].Len = (ush)0xffff; /* guard */
+
+ for (n = 0; n <= max_code; n++) {
+ curlen = nextlen; nextlen = tree[n+1].Len;
+ if (++count < max_count && curlen == nextlen) {
+ continue;
+ } else if (count < min_count) {
+ s->bl_tree[curlen].Freq += count;
+ } else if (curlen != 0) {
+ if (curlen != prevlen) s->bl_tree[curlen].Freq++;
+ s->bl_tree[REP_3_6].Freq++;
+ } else if (count <= 10) {
+ s->bl_tree[REPZ_3_10].Freq++;
+ } else {
+ s->bl_tree[REPZ_11_138].Freq++;
+ }
+ count = 0; prevlen = curlen;
+ if (nextlen == 0) {
+ max_count = 138, min_count = 3;
+ } else if (curlen == nextlen) {
+ max_count = 6, min_count = 3;
+ } else {
+ max_count = 7, min_count = 4;
+ }
+ }
+}
+
+/* ===========================================================================
+ * Send a literal or distance tree in compressed form, using the codes in
+ * bl_tree.
+ */
+local void send_tree (s, tree, max_code)
+ deflate_state *s;
+ ct_data *tree; /* the tree to be scanned */
+ int max_code; /* and its largest code of non zero frequency */
+{
+ int n; /* iterates over all tree elements */
+ int prevlen = -1; /* last emitted length */
+ int curlen; /* length of current code */
+ int nextlen = tree[0].Len; /* length of next code */
+ int count = 0; /* repeat count of the current code */
+ int max_count = 7; /* max repeat count */
+ int min_count = 4; /* min repeat count */
+
+ /* tree[max_code+1].Len = -1; */ /* guard already set */
+ if (nextlen == 0) max_count = 138, min_count = 3;
+
+ for (n = 0; n <= max_code; n++) {
+ curlen = nextlen; nextlen = tree[n+1].Len;
+ if (++count < max_count && curlen == nextlen) {
+ continue;
+ } else if (count < min_count) {
+ do { send_code(s, curlen, s->bl_tree); } while (--count != 0);
+
+ } else if (curlen != 0) {
+ if (curlen != prevlen) {
+ send_code(s, curlen, s->bl_tree); count--;
+ }
+ Assert(count >= 3 && count <= 6, " 3_6?");
+ send_code(s, REP_3_6, s->bl_tree); send_bits(s, count-3, 2);
+
+ } else if (count <= 10) {
+ send_code(s, REPZ_3_10, s->bl_tree); send_bits(s, count-3, 3);
+
+ } else {
+ send_code(s, REPZ_11_138, s->bl_tree); send_bits(s, count-11, 7);
+ }
+ count = 0; prevlen = curlen;
+ if (nextlen == 0) {
+ max_count = 138, min_count = 3;
+ } else if (curlen == nextlen) {
+ max_count = 6, min_count = 3;
+ } else {
+ max_count = 7, min_count = 4;
+ }
+ }
+}
+
+/* ===========================================================================
+ * Construct the Huffman tree for the bit lengths and return the index in
+ * bl_order of the last bit length code to send.
+ */
+local int build_bl_tree(s)
+ deflate_state *s;
+{
+ int max_blindex; /* index of last bit length code of non zero freq */
+
+ /* Determine the bit length frequencies for literal and distance trees */
+ scan_tree(s, (ct_data *)s->dyn_ltree, s->l_desc.max_code);
+ scan_tree(s, (ct_data *)s->dyn_dtree, s->d_desc.max_code);
+
+ /* Build the bit length tree: */
+ build_tree(s, (tree_desc *)(&(s->bl_desc)));
+ /* opt_len now includes the length of the tree representations, except
+ * the lengths of the bit lengths codes and the 5+5+4 bits for the counts.
+ */
+
+ /* Determine the number of bit length codes to send. The pkzip format
+ * requires that at least 4 bit length codes be sent. (appnote.txt says
+ * 3 but the actual value used is 4.)
+ */
+ for (max_blindex = BL_CODES-1; max_blindex >= 3; max_blindex--) {
+ if (s->bl_tree[bl_order[max_blindex]].Len != 0) break;
+ }
+ /* Update opt_len to include the bit length tree and counts */
+ s->opt_len += 3*(max_blindex+1) + 5+5+4;
+ Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld",
+ s->opt_len, s->static_len));
+
+ return max_blindex;
+}
+
+/* ===========================================================================
+ * Send the header for a block using dynamic Huffman trees: the counts, the
+ * lengths of the bit length codes, the literal tree and the distance tree.
+ * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4.
+ */
+local void send_all_trees(s, lcodes, dcodes, blcodes)
+ deflate_state *s;
+ int lcodes, dcodes, blcodes; /* number of codes for each tree */
+{
+ int rank; /* index in bl_order */
+
+ Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes");
+ Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES,
+ "too many codes");
+ Tracev((stderr, "\nbl counts: "));
+ send_bits(s, lcodes-257, 5); /* not +255 as stated in appnote.txt */
+ send_bits(s, dcodes-1, 5);
+ send_bits(s, blcodes-4, 4); /* not -3 as stated in appnote.txt */
+ for (rank = 0; rank < blcodes; rank++) {
+ Tracev((stderr, "\nbl code %2d ", bl_order[rank]));
+ send_bits(s, s->bl_tree[bl_order[rank]].Len, 3);
+ }
+ Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent));
+
+ send_tree(s, (ct_data *)s->dyn_ltree, lcodes-1); /* literal tree */
+ Tracev((stderr, "\nlit tree: sent %ld", s->bits_sent));
+
+ send_tree(s, (ct_data *)s->dyn_dtree, dcodes-1); /* distance tree */
+ Tracev((stderr, "\ndist tree: sent %ld", s->bits_sent));
+}
+
+/* ===========================================================================
+ * Send a stored block
+ */
+void zlib_tr_stored_block(s, buf, stored_len, eof)
+ deflate_state *s;
+ charf *buf; /* input block */
+ ulg stored_len; /* length of input block */
+ int eof; /* true if this is the last block for a file */
+{
+ send_bits(s, (STORED_BLOCK<<1)+eof, 3); /* send block type */
+ s->compressed_len = (s->compressed_len + 3 + 7) & (ulg)~7L;
+ s->compressed_len += (stored_len + 4) << 3;
+
+ copy_block(s, buf, (unsigned)stored_len, 1); /* with header */
+}
+
+/* Send just the `stored block' type code without any length bytes or data.
+ */
+void zlib_tr_stored_type_only(s)
+ deflate_state *s;
+{
+ send_bits(s, (STORED_BLOCK << 1), 3);
+ bi_windup(s);
+ s->compressed_len = (s->compressed_len + 3) & ~7L;
+}
+
+
+/* ===========================================================================
+ * Send one empty static block to give enough lookahead for inflate.
+ * This takes 10 bits, of which 7 may remain in the bit buffer.
+ * The current inflate code requires 9 bits of lookahead. If the
+ * last two codes for the previous block (real code plus EOB) were coded
+ * on 5 bits or less, inflate may have only 5+3 bits of lookahead to decode
+ * the last real code. In this case we send two empty static blocks instead
+ * of one. (There are no problems if the previous block is stored or fixed.)
+ * To simplify the code, we assume the worst case of last real code encoded
+ * on one bit only.
+ */
+void zlib_tr_align(s)
+ deflate_state *s;
+{
+ send_bits(s, STATIC_TREES<<1, 3);
+ send_code(s, END_BLOCK, static_ltree);
+ s->compressed_len += 10L; /* 3 for block type, 7 for EOB */
+ bi_flush(s);
+ /* Of the 10 bits for the empty block, we have already sent
+ * (10 - bi_valid) bits. The lookahead for the last real code (before
+ * the EOB of the previous block) was thus at least one plus the length
+ * of the EOB plus what we have just sent of the empty static block.
+ */
+ if (1 + s->last_eob_len + 10 - s->bi_valid < 9) {
+ send_bits(s, STATIC_TREES<<1, 3);
+ send_code(s, END_BLOCK, static_ltree);
+ s->compressed_len += 10L;
+ bi_flush(s);
+ }
+ s->last_eob_len = 7;
+}
+
+/* ===========================================================================
+ * Determine the best encoding for the current block: dynamic trees, static
+ * trees or store, and output the encoded block to the zip file. This function
+ * returns the total compressed length for the file so far.
+ */
+ulg zlib_tr_flush_block(s, buf, stored_len, eof)
+ deflate_state *s;
+ charf *buf; /* input block, or NULL if too old */
+ ulg stored_len; /* length of input block */
+ int eof; /* true if this is the last block for a file */
+{
+ ulg opt_lenb, static_lenb; /* opt_len and static_len in bytes */
+ int max_blindex = 0; /* index of last bit length code of non zero freq */
+
+ /* Build the Huffman trees unless a stored block is forced */
+ if (s->level > 0) {
+
+ /* Check if the file is ascii or binary */
+ if (s->data_type == Z_UNKNOWN) set_data_type(s);
+
+ /* Construct the literal and distance trees */
+ build_tree(s, (tree_desc *)(&(s->l_desc)));
+ Tracev((stderr, "\nlit data: dyn %ld, stat %ld", s->opt_len,
+ s->static_len));
+
+ build_tree(s, (tree_desc *)(&(s->d_desc)));
+ Tracev((stderr, "\ndist data: dyn %ld, stat %ld", s->opt_len,
+ s->static_len));
+ /* At this point, opt_len and static_len are the total bit lengths of
+ * the compressed block data, excluding the tree representations.
+ */
+
+ /* Build the bit length tree for the above two trees, and get the index
+ * in bl_order of the last bit length code to send.
+ */
+ max_blindex = build_bl_tree(s);
+
+ /* Determine the best encoding. Compute first the block length in bytes*/
+ opt_lenb = (s->opt_len+3+7)>>3;
+ static_lenb = (s->static_len+3+7)>>3;
+
+ Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ",
+ opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len,
+ s->last_lit));
+
+ if (static_lenb <= opt_lenb) opt_lenb = static_lenb;
+
+ } else {
+ Assert(buf != (char*)0, "lost buf");
+ opt_lenb = static_lenb = stored_len + 5; /* force a stored block */
+ }
+
+ /* If compression failed and this is the first and last block,
+ * and if the .zip file can be seeked (to rewrite the local header),
+ * the whole file is transformed into a stored file:
+ */
+#ifdef STORED_FILE_OK
+# ifdef FORCE_STORED_FILE
+ if (eof && s->compressed_len == 0L) { /* force stored file */
+# else
+ if (stored_len <= opt_lenb && eof && s->compressed_len==0L && seekable()) {
+# endif
+ /* Since LIT_BUFSIZE <= 2*WSIZE, the input data must be there: */
+ if (buf == (charf*)0) error ("block vanished");
+
+ copy_block(s, buf, (unsigned)stored_len, 0); /* without header */
+ s->compressed_len = stored_len << 3;
+ s->method = STORED;
+ } else
+#endif /* STORED_FILE_OK */
+
+#ifdef FORCE_STORED
+ if (buf != (char*)0) { /* force stored block */
+#else
+ if (stored_len+4 <= opt_lenb && buf != (char*)0) {
+ /* 4: two words for the lengths */
+#endif
+ /* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE.
+ * Otherwise we can't have processed more than WSIZE input bytes since
+ * the last block flush, because compression would have been
+ * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to
+ * transform a block into a stored block.
+ */
+ zlib_tr_stored_block(s, buf, stored_len, eof);
+
+#ifdef FORCE_STATIC
+ } else if (static_lenb >= 0) { /* force static trees */
+#else
+ } else if (static_lenb == opt_lenb) {
+#endif
+ send_bits(s, (STATIC_TREES<<1)+eof, 3);
+ compress_block(s, (ct_data *)static_ltree, (ct_data *)static_dtree);
+ s->compressed_len += 3 + s->static_len;
+ } else {
+ send_bits(s, (DYN_TREES<<1)+eof, 3);
+ send_all_trees(s, s->l_desc.max_code+1, s->d_desc.max_code+1,
+ max_blindex+1);
+ compress_block(s, (ct_data *)s->dyn_ltree, (ct_data *)s->dyn_dtree);
+ s->compressed_len += 3 + s->opt_len;
+ }
+ Assert (s->compressed_len == s->bits_sent, "bad compressed size");
+ init_block(s);
+
+ if (eof) {
+ bi_windup(s);
+ s->compressed_len += 7; /* align on byte boundary */
+ }
+ Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3,
+ s->compressed_len-7*eof));
+
+ return s->compressed_len >> 3;
+}
+
+/* ===========================================================================
+ * Save the match info and tally the frequency counts. Return true if
+ * the current block must be flushed.
+ */
+int zlib_tr_tally (s, dist, lc)
+ deflate_state *s;
+ unsigned dist; /* distance of matched string */
+ unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */
+{
+ s->d_buf[s->last_lit] = (ush)dist;
+ s->l_buf[s->last_lit++] = (uch)lc;
+ if (dist == 0) {
+ /* lc is the unmatched char */
+ s->dyn_ltree[lc].Freq++;
+ } else {
+ s->matches++;
+ /* Here, lc is the match length - MIN_MATCH */
+ dist--; /* dist = match distance - 1 */
+ Assert((ush)dist < (ush)MAX_DIST(s) &&
+ (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) &&
+ (ush)d_code(dist) < (ush)D_CODES, "zlib_tr_tally: bad match");
+
+ s->dyn_ltree[length_code[lc]+LITERALS+1].Freq++;
+ s->dyn_dtree[d_code(dist)].Freq++;
+ }
+
+ /* Try to guess if it is profitable to stop the current block here */
+ if ((s->last_lit & 0xfff) == 0 && s->level > 2) {
+ /* Compute an upper bound for the compressed length */
+ ulg out_length = (ulg)s->last_lit*8L;
+ ulg in_length = (ulg)((long)s->strstart - s->block_start);
+ int dcode;
+ for (dcode = 0; dcode < D_CODES; dcode++) {
+ out_length += (ulg)s->dyn_dtree[dcode].Freq *
+ (5L+extra_dbits[dcode]);
+ }
+ out_length >>= 3;
+ Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ",
+ s->last_lit, in_length, out_length,
+ 100L - out_length*100L/in_length));
+ if (s->matches < s->last_lit/2 && out_length < in_length/2) return 1;
+ }
+ return (s->last_lit == s->lit_bufsize-1);
+ /* We avoid equality with lit_bufsize because of wraparound at 64K
+ * on 16 bit machines and because stored blocks are restricted to
+ * 64K-1 bytes.
+ */
+}
+
+/* ===========================================================================
+ * Send the block data compressed using the given Huffman trees
+ */
+local void compress_block(s, ltree, dtree)
+ deflate_state *s;
+ ct_data *ltree; /* literal tree */
+ ct_data *dtree; /* distance tree */
+{
+ unsigned dist; /* distance of matched string */
+ int lc; /* match length or unmatched char (if dist == 0) */
+ unsigned lx = 0; /* running index in l_buf */
+ unsigned code; /* the code to send */
+ int extra; /* number of extra bits to send */
+
+ if (s->last_lit != 0) do {
+ dist = s->d_buf[lx];
+ lc = s->l_buf[lx++];
+ if (dist == 0) {
+ send_code(s, lc, ltree); /* send a literal byte */
+ Tracecv(isgraph(lc), (stderr," '%c' ", lc));
+ } else {
+ /* Here, lc is the match length - MIN_MATCH */
+ code = length_code[lc];
+ send_code(s, code+LITERALS+1, ltree); /* send the length code */
+ extra = extra_lbits[code];
+ if (extra != 0) {
+ lc -= base_length[code];
+ send_bits(s, lc, extra); /* send the extra length bits */
+ }
+ dist--; /* dist is now the match distance - 1 */
+ code = d_code(dist);
+ Assert (code < D_CODES, "bad d_code");
+
+ send_code(s, code, dtree); /* send the distance code */
+ extra = extra_dbits[code];
+ if (extra != 0) {
+ dist -= base_dist[code];
+ send_bits(s, dist, extra); /* send the extra distance bits */
+ }
+ } /* literal or match pair ? */
+
+ /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */
+ Assert(s->pending < s->lit_bufsize + 2*lx, "pendingBuf overflow");
+
+ } while (lx < s->last_lit);
+
+ send_code(s, END_BLOCK, ltree);
+ s->last_eob_len = ltree[END_BLOCK].Len;
+}
+
+/* ===========================================================================
+ * Set the data type to ASCII or BINARY, using a crude approximation:
+ * binary if more than 20% of the bytes are <= 6 or >= 128, ascii otherwise.
+ * IN assertion: the fields freq of dyn_ltree are set and the total of all
+ * frequencies does not exceed 64K (to fit in an int on 16 bit machines).
+ */
+local void set_data_type(s)
+ deflate_state *s;
+{
+ int n = 0;
+ unsigned ascii_freq = 0;
+ unsigned bin_freq = 0;
+ while (n < 7) bin_freq += s->dyn_ltree[n++].Freq;
+ while (n < 128) ascii_freq += s->dyn_ltree[n++].Freq;
+ while (n < LITERALS) bin_freq += s->dyn_ltree[n++].Freq;
+ s->data_type = (Byte)(bin_freq > (ascii_freq >> 2) ? Z_BINARY : Z_ASCII);
+}
+
+/* ===========================================================================
+ * Copy a stored block, storing first the length and its
+ * one's complement if requested.
+ */
+local void copy_block(s, buf, len, header)
+ deflate_state *s;
+ charf *buf; /* the input data */
+ unsigned len; /* its length */
+ int header; /* true if block header must be written */
+{
+ bi_windup(s); /* align on byte boundary */
+ s->last_eob_len = 8; /* enough lookahead for inflate */
+
+ if (header) {
+ put_short(s, (ush)len);
+ put_short(s, (ush)~len);
+#ifdef DEBUG_ZLIB
+ s->bits_sent += 2*16;
+#endif
+ }
+#ifdef DEBUG_ZLIB
+ s->bits_sent += (ulg)len<<3;
+#endif
+ /* bundle up the put_byte(s, *buf++) calls */
+ memcpy(&s->pending_buf[s->pending], buf, len);
+ s->pending += len;
+}
+
--- /dev/null
+
+
+
+#define Assert(err, str)
+#define Trace(dummy)
+#define Tracev(dummy)
+#define Tracecv(err, dummy)
+#define Tracevv(dummy)
+
+
+
+#define LENGTH_CODES 29
+/* number of length codes, not counting the special END_BLOCK code */
+
+#define LITERALS 256
+/* number of literal bytes 0..255 */
+
+#define L_CODES (LITERALS+1+LENGTH_CODES)
+/* number of Literal or Length codes, including the END_BLOCK code */
+
+#define D_CODES 30
+/* number of distance codes */
+
+#define BL_CODES 19
+/* number of codes used to transfer the bit lengths */
+
+#define HEAP_SIZE (2*L_CODES+1)
+/* maximum heap size */
+
+#define MAX_BITS 15
+/* All codes must not exceed MAX_BITS bits */
+
+#define INIT_STATE 42
+#define BUSY_STATE 113
+#define FINISH_STATE 666
+/* Stream status */
+
+
+/* Data structure describing a single value and its code string. */
+typedef struct ct_data_s {
+ union {
+ ush freq; /* frequency count */
+ ush code; /* bit string */
+ } fc;
+ union {
+ ush dad; /* father node in Huffman tree */
+ ush len; /* length of bit string */
+ } dl;
+} FAR ct_data;
+
+#define Freq fc.freq
+#define Code fc.code
+#define Dad dl.dad
+#define Len dl.len
+
+typedef struct static_tree_desc_s static_tree_desc;
+
+typedef struct tree_desc_s {
+ ct_data *dyn_tree; /* the dynamic tree */
+ int max_code; /* largest code with non zero frequency */
+ static_tree_desc *stat_desc; /* the corresponding static tree */
+} FAR tree_desc;
+
+typedef ush Pos;
+typedef Pos FAR Posf;
+typedef unsigned IPos;
+
+/* A Pos is an index in the character window. We use short instead of int to
+ * save space in the various tables. IPos is used only for parameter passing.
+ */
+
+typedef struct deflate_state {
+ z_streamp strm; /* pointer back to this zlib stream */
+ int status; /* as the name implies */
+ Bytef *pending_buf; /* output still pending */
+ ulg pending_buf_size; /* size of pending_buf */
+ Bytef *pending_out; /* next pending byte to output to the stream */
+ int pending; /* nb of bytes in the pending buffer */
+ int noheader; /* suppress zlib header and adler32 */
+ Byte data_type; /* UNKNOWN, BINARY or ASCII */
+ Byte method; /* STORED (for zip only) or DEFLATED */
+ int last_flush; /* value of flush param for previous deflate call */
+
+ /* used by deflate.c: */
+
+ uInt w_size; /* LZ77 window size (32K by default) */
+ uInt w_bits; /* log2(w_size) (8..16) */
+ uInt w_mask; /* w_size - 1 */
+
+ Bytef *window;
+ /* Sliding window. Input bytes are read into the second half of the window,
+ * and move to the first half later to keep a dictionary of at least wSize
+ * bytes. With this organization, matches are limited to a distance of
+ * wSize-MAX_MATCH bytes, but this ensures that IO is always
+ * performed with a length multiple of the block size. Also, it limits
+ * the window size to 64K, which is quite useful on MSDOS.
+ * To do: use the user input buffer as sliding window.
+ */
+
+ ulg window_size;
+ /* Actual size of window: 2*wSize, except when the user input buffer
+ * is directly used as sliding window.
+ */
+
+ Posf *prev;
+ /* Link to older string with same hash index. To limit the size of this
+ * array to 64K, this link is maintained only for the last 32K strings.
+ * An index in this array is thus a window index modulo 32K.
+ */
+
+ Posf *head; /* Heads of the hash chains or NIL. */
+
+ uInt ins_h; /* hash index of string to be inserted */
+ uInt hash_size; /* number of elements in hash table */
+ uInt hash_bits; /* log2(hash_size) */
+ uInt hash_mask; /* hash_size-1 */
+
+ uInt hash_shift;
+ /* Number of bits by which ins_h must be shifted at each input
+ * step. It must be such that after MIN_MATCH steps, the oldest
+ * byte no longer takes part in the hash key, that is:
+ * hash_shift * MIN_MATCH >= hash_bits
+ */
+
+ long block_start;
+ /* Window position at the beginning of the current output block. Gets
+ * negative when the window is moved backwards.
+ */
+
+ uInt match_length; /* length of best match */
+ IPos prev_match; /* previous match */
+ int match_available; /* set if previous match exists */
+ uInt strstart; /* start of string to insert */
+ uInt match_start; /* start of matching string */
+ uInt lookahead; /* number of valid bytes ahead in window */
+
+ uInt prev_length;
+ /* Length of the best match at previous step. Matches not greater than this
+ * are discarded. This is used in the lazy match evaluation.
+ */
+
+ uInt max_chain_length;
+ /* To speed up deflation, hash chains are never searched beyond this
+ * length. A higher limit improves compression ratio but degrades the
+ * speed.
+ */
+
+ uInt max_lazy_match;
+ /* Attempt to find a better match only when the current match is strictly
+ * smaller than this value. This mechanism is used only for compression
+ * levels >= 4.
+ */
+# define max_insert_length max_lazy_match
+ /* Insert new strings in the hash table only if the match length is not
+ * greater than this length. This saves time but degrades compression.
+ * max_insert_length is used only for compression levels <= 3.
+ */
+
+ int level; /* compression level (1..9) */
+ int strategy; /* favor or force Huffman coding*/
+
+ uInt good_match;
+ /* Use a faster search when the previous match is longer than this */
+
+ int nice_match; /* Stop searching when current match exceeds this */
+
+ /* used by trees.c: */
+ /* Didn't use ct_data typedef below to supress compiler warning */
+ struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */
+ struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */
+ struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */
+
+ struct tree_desc_s l_desc; /* desc. for literal tree */
+ struct tree_desc_s d_desc; /* desc. for distance tree */
+ struct tree_desc_s bl_desc; /* desc. for bit length tree */
+
+ ush bl_count[MAX_BITS+1];
+ /* number of codes at each bit length for an optimal tree */
+
+ int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */
+ int heap_len; /* number of elements in the heap */
+ int heap_max; /* element of largest frequency */
+ /* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used.
+ * The same heap array is used to build all trees.
+ */
+
+ uch depth[2*L_CODES+1];
+ /* Depth of each subtree used as tie breaker for trees of equal frequency
+ */
+
+ uchf *l_buf; /* buffer for literals or lengths */
+
+ uInt lit_bufsize;
+ /* Size of match buffer for literals/lengths. There are 4 reasons for
+ * limiting lit_bufsize to 64K:
+ * - frequencies can be kept in 16 bit counters
+ * - if compression is not successful for the first block, all input
+ * data is still in the window so we can still emit a stored block even
+ * when input comes from standard input. (This can also be done for
+ * all blocks if lit_bufsize is not greater than 32K.)
+ * - if compression is not successful for a file smaller than 64K, we can
+ * even emit a stored file instead of a stored block (saving 5 bytes).
+ * This is applicable only for zip (not gzip or zlib).
+ * - creating new Huffman trees less frequently may not provide fast
+ * adaptation to changes in the input data statistics. (Take for
+ * example a binary file with poorly compressible code followed by
+ * a highly compressible string table.) Smaller buffer sizes give
+ * fast adaptation but have of course the overhead of transmitting
+ * trees more frequently.
+ * - I can't count above 4
+ */
+
+ uInt last_lit; /* running index in l_buf */
+
+ ushf *d_buf;
+ /* Buffer for distances. To simplify the code, d_buf and l_buf have
+ * the same number of elements. To use different lengths, an extra flag
+ * array would be necessary.
+ */
+
+ ulg opt_len; /* bit length of current block with optimal trees */
+ ulg static_len; /* bit length of current block with static trees */
+ ulg compressed_len; /* total bit length of compressed file */
+ uInt matches; /* number of string matches in current block */
+ int last_eob_len; /* bit length of EOB code for last block */
+
+#ifdef DEBUG_ZLIB
+ ulg bits_sent; /* bit length of the compressed data */
+#endif
+
+ ush bi_buf;
+ /* Output buffer. bits are inserted starting at the bottom (least
+ * significant bits).
+ */
+ int bi_valid;
+ /* Number of valid bits in bi_buf. All bits above the last valid bit
+ * are always zero.
+ */
+
+} FAR deflate_state;
+
+typedef struct deflate_workspace {
+ /* State memory for the deflator */
+ deflate_state deflate_memory;
+ Byte window_memory[2 * (1 << MAX_WBITS)];
+ Pos prev_memory[1 << MAX_WBITS];
+ Pos head_memory[1 << (MAX_MEM_LEVEL + 7)];
+ char overlay_memory[(1 << (MAX_MEM_LEVEL + 6)) * (sizeof(ush)+2)];
+} deflate_workspace;
+
+/* Output a byte on the stream.
+ * IN assertion: there is enough room in pending_buf.
+ */
+#define put_byte(s, c) {s->pending_buf[s->pending++] = (c);}
+
+
+#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1)
+/* Minimum amount of lookahead, except at the end of the input file.
+ * See deflate.c for comments about the MIN_MATCH+1.
+ */
+
+#define MAX_DIST(s) ((s)->w_size-MIN_LOOKAHEAD)
+/* In order to simplify the code, particularly on 16 bit machines, match
+ * distances are limited to MAX_DIST instead of WSIZE.
+ */
+
+ /* in trees.c */
+void zlib_tr_init OF((deflate_state *s));
+int zlib_tr_tally OF((deflate_state *s, unsigned dist, unsigned lc));
+ulg zlib_tr_flush_block OF((deflate_state *s, charf *buf, ulg stored_len,
+ int eof));
+void zlib_tr_align OF((deflate_state *s));
+void zlib_tr_stored_block OF((deflate_state *s, charf *buf, ulg stored_len,
+ int eof));
+void zlib_tr_stored_type_only OF((deflate_state *));
+
+
+/* ===========================================================================
+ * Output a short LSB first on the stream.
+ * IN assertion: there is enough room in pendingBuf.
+ */
+#define put_short(s, w) { \
+ put_byte(s, (uch)((w) & 0xff)); \
+ put_byte(s, (uch)((ush)(w) >> 8)); \
+}
+
+/* ===========================================================================
+ * Reverse the first len bits of a code, using straightforward code (a faster
+ * method would use a table)
+ * IN assertion: 1 <= len <= 15
+ */
+static inline unsigned bi_reverse(unsigned code, /* the value to invert */
+ int len) /* its bit length */
+{
+ register unsigned res = 0;
+ do {
+ res |= code & 1;
+ code >>= 1, res <<= 1;
+ } while (--len > 0);
+ return res >> 1;
+}
+
+/* ===========================================================================
+ * Flush the bit buffer, keeping at most 7 bits in it.
+ */
+static inline void bi_flush(deflate_state *s)
+{
+ if (s->bi_valid == 16) {
+ put_short(s, s->bi_buf);
+ s->bi_buf = 0;
+ s->bi_valid = 0;
+ } else if (s->bi_valid >= 8) {
+ put_byte(s, (Byte)s->bi_buf);
+ s->bi_buf >>= 8;
+ s->bi_valid -= 8;
+ }
+}
+
+/* ===========================================================================
+ * Flush the bit buffer and align the output on a byte boundary
+ */
+static inline void bi_windup(deflate_state *s)
+{
+ if (s->bi_valid > 8) {
+ put_short(s, s->bi_buf);
+ } else if (s->bi_valid > 0) {
+ put_byte(s, (Byte)s->bi_buf);
+ }
+ s->bi_buf = 0;
+ s->bi_valid = 0;
+#ifdef DEBUG_ZLIB
+ s->bits_sent = (s->bits_sent+7) & ~7;
+#endif
+}
+
--- /dev/null
+#
+# This is a modified version of zlib, which does all memory
+# allocation ahead of time.
+#
+# This is only the decompression, see zlib_deflate for the
+# the compression
+#
+# Decompression needs to be serialized for each memory
+# allocation.
+#
+# (The upsides of the simplification is that you can't get in
+# any nasty situations wrt memory management, and that the
+# uncompression can be done without blocking on allocation).
+#
+
+O_TARGET := zlib_inflate.o
+
+export-objs := inflate_syms.o
+
+obj-y := infblock.o infcodes.o inffast.o inflate.o \
+ inftrees.o infutil.o inflate_syms.o
+obj-m := $(O_TARGET)
+
+EXTRA_CFLAGS += -I $(TOPDIR)/lib/zlib_inflate
+
+include $(TOPDIR)/Rules.make
--- /dev/null
+/* infblock.c -- interpret and process block types to last block
+ * Copyright (C) 1995-1998 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+#include <linux/zutil.h>
+#include "infblock.h"
+#include "inftrees.h"
+#include "infcodes.h"
+#include "infutil.h"
+
+struct inflate_codes_state;
+
+/* simplify the use of the inflate_huft type with some defines */
+#define exop word.what.Exop
+#define bits word.what.Bits
+
+/* Table for deflate from PKZIP's appnote.txt. */
+local const uInt border[] = { /* Order of the bit length code lengths */
+ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
+
+/*
+ Notes beyond the 1.93a appnote.txt:
+
+ 1. Distance pointers never point before the beginning of the output
+ stream.
+ 2. Distance pointers can point back across blocks, up to 32k away.
+ 3. There is an implied maximum of 7 bits for the bit length table and
+ 15 bits for the actual data.
+ 4. If only one code exists, then it is encoded using one bit. (Zero
+ would be more efficient, but perhaps a little confusing.) If two
+ codes exist, they are coded using one bit each (0 and 1).
+ 5. There is no way of sending zero distance codes--a dummy must be
+ sent if there are none. (History: a pre 2.0 version of PKZIP would
+ store blocks with no distance codes, but this was discovered to be
+ too harsh a criterion.) Valid only for 1.93a. 2.04c does allow
+ zero distance codes, which is sent as one code of zero bits in
+ length.
+ 6. There are up to 286 literal/length codes. Code 256 represents the
+ end-of-block. Note however that the static length tree defines
+ 288 codes just to fill out the Huffman codes. Codes 286 and 287
+ cannot be used though, since there is no length base or extra bits
+ defined for them. Similarily, there are up to 30 distance codes.
+ However, static trees define 32 codes (all 5 bits) to fill out the
+ Huffman codes, but the last two had better not show up in the data.
+ 7. Unzip can check dynamic Huffman blocks for complete code sets.
+ The exception is that a single code would not be complete (see #4).
+ 8. The five bits following the block type is really the number of
+ literal codes sent minus 257.
+ 9. Length codes 8,16,16 are interpreted as 13 length codes of 8 bits
+ (1+6+6). Therefore, to output three times the length, you output
+ three codes (1+1+1), whereas to output four times the same length,
+ you only need two codes (1+3). Hmm.
+ 10. In the tree reconstruction algorithm, Code = Code + Increment
+ only if BitLength(i) is not zero. (Pretty obvious.)
+ 11. Correction: 4 Bits: # of Bit Length codes - 4 (4 - 19)
+ 12. Note: length code 284 can represent 227-258, but length code 285
+ really is 258. The last length deserves its own, short code
+ since it gets used a lot in very redundant files. The length
+ 258 is special since 258 - 3 (the min match length) is 255.
+ 13. The literal/length and distance code bit lengths are read as a
+ single stream of lengths. It is possible (and advantageous) for
+ a repeat code (16, 17, or 18) to go across the boundary between
+ the two sets of lengths.
+ */
+
+
+void zlib_inflate_blocks_reset(s, z, c)
+inflate_blocks_statef *s;
+z_streamp z;
+uLongf *c;
+{
+ if (c != Z_NULL)
+ *c = s->check;
+ if (s->mode == CODES)
+ zlib_inflate_codes_free(s->sub.decode.codes, z);
+ s->mode = TYPE;
+ s->bitk = 0;
+ s->bitb = 0;
+ s->read = s->write = s->window;
+ if (s->checkfn != Z_NULL)
+ z->adler = s->check = (*s->checkfn)(0L, (const Bytef *)Z_NULL, 0);
+}
+
+inflate_blocks_statef *zlib_inflate_blocks_new(z, c, w)
+z_streamp z;
+check_func c;
+uInt w;
+{
+ inflate_blocks_statef *s;
+
+ s = &WS(z)->working_blocks_state;
+ s->hufts = WS(z)->working_hufts;
+ s->window = WS(z)->working_window;
+ s->end = s->window + w;
+ s->checkfn = c;
+ s->mode = TYPE;
+ zlib_inflate_blocks_reset(s, z, Z_NULL);
+ return s;
+}
+
+
+int zlib_inflate_blocks(s, z, r)
+inflate_blocks_statef *s;
+z_streamp z;
+int r;
+{
+ uInt t; /* temporary storage */
+ uLong b; /* bit buffer */
+ uInt k; /* bits in bit buffer */
+ Bytef *p; /* input data pointer */
+ uInt n; /* bytes available there */
+ Bytef *q; /* output window write pointer */
+ uInt m; /* bytes to end of window or read pointer */
+
+ /* copy input/output information to locals (UPDATE macro restores) */
+ LOAD
+
+ /* process input based on current state */
+ while (1) switch (s->mode)
+ {
+ case TYPE:
+ NEEDBITS(3)
+ t = (uInt)b & 7;
+ s->last = t & 1;
+ switch (t >> 1)
+ {
+ case 0: /* stored */
+ DUMPBITS(3)
+ t = k & 7; /* go to byte boundary */
+ DUMPBITS(t)
+ s->mode = LENS; /* get length of stored block */
+ break;
+ case 1: /* fixed */
+ {
+ uInt bl, bd;
+ inflate_huft *tl, *td;
+
+ zlib_inflate_trees_fixed(&bl, &bd, &tl, &td, z);
+ s->sub.decode.codes = zlib_inflate_codes_new(bl, bd, tl, td, z);
+ if (s->sub.decode.codes == Z_NULL)
+ {
+ r = Z_MEM_ERROR;
+ LEAVE
+ }
+ }
+ DUMPBITS(3)
+ s->mode = CODES;
+ break;
+ case 2: /* dynamic */
+ DUMPBITS(3)
+ s->mode = TABLE;
+ break;
+ case 3: /* illegal */
+ DUMPBITS(3)
+ s->mode = B_BAD;
+ z->msg = (char*)"invalid block type";
+ r = Z_DATA_ERROR;
+ LEAVE
+ }
+ break;
+ case LENS:
+ NEEDBITS(32)
+ if ((((~b) >> 16) & 0xffff) != (b & 0xffff))
+ {
+ s->mode = B_BAD;
+ z->msg = (char*)"invalid stored block lengths";
+ r = Z_DATA_ERROR;
+ LEAVE
+ }
+ s->sub.left = (uInt)b & 0xffff;
+ b = k = 0; /* dump bits */
+ s->mode = s->sub.left ? STORED : (s->last ? DRY : TYPE);
+ break;
+ case STORED:
+ if (n == 0)
+ LEAVE
+ NEEDOUT
+ t = s->sub.left;
+ if (t > n) t = n;
+ if (t > m) t = m;
+ memcpy(q, p, t);
+ p += t; n -= t;
+ q += t; m -= t;
+ if ((s->sub.left -= t) != 0)
+ break;
+ s->mode = s->last ? DRY : TYPE;
+ break;
+ case TABLE:
+ NEEDBITS(14)
+ s->sub.trees.table = t = (uInt)b & 0x3fff;
+#ifndef PKZIP_BUG_WORKAROUND
+ if ((t & 0x1f) > 29 || ((t >> 5) & 0x1f) > 29)
+ {
+ s->mode = B_BAD;
+ z->msg = (char*)"too many length or distance symbols";
+ r = Z_DATA_ERROR;
+ LEAVE
+ }
+#endif
+ {
+ s->sub.trees.blens = WS(z)->working_blens;
+ }
+ DUMPBITS(14)
+ s->sub.trees.index = 0;
+ s->mode = BTREE;
+ case BTREE:
+ while (s->sub.trees.index < 4 + (s->sub.trees.table >> 10))
+ {
+ NEEDBITS(3)
+ s->sub.trees.blens[border[s->sub.trees.index++]] = (uInt)b & 7;
+ DUMPBITS(3)
+ }
+ while (s->sub.trees.index < 19)
+ s->sub.trees.blens[border[s->sub.trees.index++]] = 0;
+ s->sub.trees.bb = 7;
+ t = zlib_inflate_trees_bits(s->sub.trees.blens, &s->sub.trees.bb,
+ &s->sub.trees.tb, s->hufts, z);
+ if (t != Z_OK)
+ {
+ r = t;
+ if (r == Z_DATA_ERROR)
+ s->mode = B_BAD;
+ LEAVE
+ }
+ s->sub.trees.index = 0;
+ s->mode = DTREE;
+ case DTREE:
+ while (t = s->sub.trees.table,
+ s->sub.trees.index < 258 + (t & 0x1f) + ((t >> 5) & 0x1f))
+ {
+ inflate_huft *h;
+ uInt i, j, c;
+
+ t = s->sub.trees.bb;
+ NEEDBITS(t)
+ h = s->sub.trees.tb + ((uInt)b & zlib_inflate_mask[t]);
+ t = h->bits;
+ c = h->base;
+ if (c < 16)
+ {
+ DUMPBITS(t)
+ s->sub.trees.blens[s->sub.trees.index++] = c;
+ }
+ else /* c == 16..18 */
+ {
+ i = c == 18 ? 7 : c - 14;
+ j = c == 18 ? 11 : 3;
+ NEEDBITS(t + i)
+ DUMPBITS(t)
+ j += (uInt)b & zlib_inflate_mask[i];
+ DUMPBITS(i)
+ i = s->sub.trees.index;
+ t = s->sub.trees.table;
+ if (i + j > 258 + (t & 0x1f) + ((t >> 5) & 0x1f) ||
+ (c == 16 && i < 1))
+ {
+ s->mode = B_BAD;
+ z->msg = (char*)"invalid bit length repeat";
+ r = Z_DATA_ERROR;
+ LEAVE
+ }
+ c = c == 16 ? s->sub.trees.blens[i - 1] : 0;
+ do {
+ s->sub.trees.blens[i++] = c;
+ } while (--j);
+ s->sub.trees.index = i;
+ }
+ }
+ s->sub.trees.tb = Z_NULL;
+ {
+ uInt bl, bd;
+ inflate_huft *tl, *td;
+ inflate_codes_statef *c;
+
+ bl = 9; /* must be <= 9 for lookahead assumptions */
+ bd = 6; /* must be <= 9 for lookahead assumptions */
+ t = s->sub.trees.table;
+ t = zlib_inflate_trees_dynamic(257 + (t & 0x1f), 1 + ((t >> 5) & 0x1f),
+ s->sub.trees.blens, &bl, &bd, &tl, &td,
+ s->hufts, z);
+ if (t != Z_OK)
+ {
+ if (t == (uInt)Z_DATA_ERROR)
+ s->mode = B_BAD;
+ r = t;
+ LEAVE
+ }
+ if ((c = zlib_inflate_codes_new(bl, bd, tl, td, z)) == Z_NULL)
+ {
+ r = Z_MEM_ERROR;
+ LEAVE
+ }
+ s->sub.decode.codes = c;
+ }
+ s->mode = CODES;
+ case CODES:
+ UPDATE
+ if ((r = zlib_inflate_codes(s, z, r)) != Z_STREAM_END)
+ return zlib_inflate_flush(s, z, r);
+ r = Z_OK;
+ zlib_inflate_codes_free(s->sub.decode.codes, z);
+ LOAD
+ if (!s->last)
+ {
+ s->mode = TYPE;
+ break;
+ }
+ s->mode = DRY;
+ case DRY:
+ FLUSH
+ if (s->read != s->write)
+ LEAVE
+ s->mode = B_DONE;
+ case B_DONE:
+ r = Z_STREAM_END;
+ LEAVE
+ case B_BAD:
+ r = Z_DATA_ERROR;
+ LEAVE
+ default:
+ r = Z_STREAM_ERROR;
+ LEAVE
+ }
+}
+
+
+int zlib_inflate_blocks_free(s, z)
+inflate_blocks_statef *s;
+z_streamp z;
+{
+ zlib_inflate_blocks_reset(s, z, Z_NULL);
+ return Z_OK;
+}
+
+
+void zlib_inflate_set_dictionary(s, d, n)
+inflate_blocks_statef *s;
+const Bytef *d;
+uInt n;
+{
+ memcpy(s->window, d, n);
+ s->read = s->write = s->window + n;
+}
+
+
+/* Returns true if inflate is currently at the end of a block generated
+ * by Z_SYNC_FLUSH or Z_FULL_FLUSH.
+ * IN assertion: s != Z_NULL
+ */
+int zlib_inflate_blocks_sync_point(s)
+inflate_blocks_statef *s;
+{
+ return s->mode == LENS;
+}
--- /dev/null
+/* infblock.h -- header to use infblock.c
+ * Copyright (C) 1995-1998 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+#ifndef _INFBLOCK_H
+#define _INFBLOCK_H
+
+struct inflate_blocks_state;
+typedef struct inflate_blocks_state FAR inflate_blocks_statef;
+
+extern inflate_blocks_statef * zlib_inflate_blocks_new OF((
+ z_streamp z,
+ check_func c, /* check function */
+ uInt w)); /* window size */
+
+extern int zlib_inflate_blocks OF((
+ inflate_blocks_statef *,
+ z_streamp ,
+ int)); /* initial return code */
+
+extern void zlib_inflate_blocks_reset OF((
+ inflate_blocks_statef *,
+ z_streamp ,
+ uLongf *)); /* check value on output */
+
+extern int zlib_inflate_blocks_free OF((
+ inflate_blocks_statef *,
+ z_streamp));
+
+extern void zlib_inflate_set_dictionary OF((
+ inflate_blocks_statef *s,
+ const Bytef *d, /* dictionary */
+ uInt n)); /* dictionary length */
+
+extern int zlib_inflate_blocks_sync_point OF((
+ inflate_blocks_statef *s));
+
+#endif /* _INFBLOCK_H */
--- /dev/null
+/* infcodes.c -- process literals and length/distance pairs
+ * Copyright (C) 1995-1998 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+#include <linux/zutil.h>
+#include "inftrees.h"
+#include "infblock.h"
+#include "infcodes.h"
+#include "infutil.h"
+#include "inffast.h"
+
+/* simplify the use of the inflate_huft type with some defines */
+#define exop word.what.Exop
+#define bits word.what.Bits
+
+inflate_codes_statef *zlib_inflate_codes_new(bl, bd, tl, td, z)
+uInt bl, bd;
+inflate_huft *tl;
+inflate_huft *td; /* need separate declaration for Borland C++ */
+z_streamp z;
+{
+ inflate_codes_statef *c;
+
+ c = &WS(z)->working_state;
+ {
+ c->mode = START;
+ c->lbits = (Byte)bl;
+ c->dbits = (Byte)bd;
+ c->ltree = tl;
+ c->dtree = td;
+ }
+ return c;
+}
+
+
+int zlib_inflate_codes(s, z, r)
+inflate_blocks_statef *s;
+z_streamp z;
+int r;
+{
+ uInt j; /* temporary storage */
+ inflate_huft *t; /* temporary pointer */
+ uInt e; /* extra bits or operation */
+ uLong b; /* bit buffer */
+ uInt k; /* bits in bit buffer */
+ Bytef *p; /* input data pointer */
+ uInt n; /* bytes available there */
+ Bytef *q; /* output window write pointer */
+ uInt m; /* bytes to end of window or read pointer */
+ Bytef *f; /* pointer to copy strings from */
+ inflate_codes_statef *c = s->sub.decode.codes; /* codes state */
+
+ /* copy input/output information to locals (UPDATE macro restores) */
+ LOAD
+
+ /* process input and output based on current state */
+ while (1) switch (c->mode)
+ { /* waiting for "i:"=input, "o:"=output, "x:"=nothing */
+ case START: /* x: set up for LEN */
+#ifndef SLOW
+ if (m >= 258 && n >= 10)
+ {
+ UPDATE
+ r = zlib_inflate_fast(c->lbits, c->dbits, c->ltree, c->dtree, s, z);
+ LOAD
+ if (r != Z_OK)
+ {
+ c->mode = r == Z_STREAM_END ? WASH : BADCODE;
+ break;
+ }
+ }
+#endif /* !SLOW */
+ c->sub.code.need = c->lbits;
+ c->sub.code.tree = c->ltree;
+ c->mode = LEN;
+ case LEN: /* i: get length/literal/eob next */
+ j = c->sub.code.need;
+ NEEDBITS(j)
+ t = c->sub.code.tree + ((uInt)b & zlib_inflate_mask[j]);
+ DUMPBITS(t->bits)
+ e = (uInt)(t->exop);
+ if (e == 0) /* literal */
+ {
+ c->sub.lit = t->base;
+ c->mode = LIT;
+ break;
+ }
+ if (e & 16) /* length */
+ {
+ c->sub.copy.get = e & 15;
+ c->len = t->base;
+ c->mode = LENEXT;
+ break;
+ }
+ if ((e & 64) == 0) /* next table */
+ {
+ c->sub.code.need = e;
+ c->sub.code.tree = t + t->base;
+ break;
+ }
+ if (e & 32) /* end of block */
+ {
+ c->mode = WASH;
+ break;
+ }
+ c->mode = BADCODE; /* invalid code */
+ z->msg = (char*)"invalid literal/length code";
+ r = Z_DATA_ERROR;
+ LEAVE
+ case LENEXT: /* i: getting length extra (have base) */
+ j = c->sub.copy.get;
+ NEEDBITS(j)
+ c->len += (uInt)b & zlib_inflate_mask[j];
+ DUMPBITS(j)
+ c->sub.code.need = c->dbits;
+ c->sub.code.tree = c->dtree;
+ c->mode = DIST;
+ case DIST: /* i: get distance next */
+ j = c->sub.code.need;
+ NEEDBITS(j)
+ t = c->sub.code.tree + ((uInt)b & zlib_inflate_mask[j]);
+ DUMPBITS(t->bits)
+ e = (uInt)(t->exop);
+ if (e & 16) /* distance */
+ {
+ c->sub.copy.get = e & 15;
+ c->sub.copy.dist = t->base;
+ c->mode = DISTEXT;
+ break;
+ }
+ if ((e & 64) == 0) /* next table */
+ {
+ c->sub.code.need = e;
+ c->sub.code.tree = t + t->base;
+ break;
+ }
+ c->mode = BADCODE; /* invalid code */
+ z->msg = (char*)"invalid distance code";
+ r = Z_DATA_ERROR;
+ LEAVE
+ case DISTEXT: /* i: getting distance extra */
+ j = c->sub.copy.get;
+ NEEDBITS(j)
+ c->sub.copy.dist += (uInt)b & zlib_inflate_mask[j];
+ DUMPBITS(j)
+ c->mode = COPY;
+ case COPY: /* o: copying bytes in window, waiting for space */
+#ifndef __TURBOC__ /* Turbo C bug for following expression */
+ f = (uInt)(q - s->window) < c->sub.copy.dist ?
+ s->end - (c->sub.copy.dist - (q - s->window)) :
+ q - c->sub.copy.dist;
+#else
+ f = q - c->sub.copy.dist;
+ if ((uInt)(q - s->window) < c->sub.copy.dist)
+ f = s->end - (c->sub.copy.dist - (uInt)(q - s->window));
+#endif
+ while (c->len)
+ {
+ NEEDOUT
+ OUTBYTE(*f++)
+ if (f == s->end)
+ f = s->window;
+ c->len--;
+ }
+ c->mode = START;
+ break;
+ case LIT: /* o: got literal, waiting for output space */
+ NEEDOUT
+ OUTBYTE(c->sub.lit)
+ c->mode = START;
+ break;
+ case WASH: /* o: got eob, possibly more output */
+ if (k > 7) /* return unused byte, if any */
+ {
+ k -= 8;
+ n++;
+ p--; /* can always return one */
+ }
+ FLUSH
+ if (s->read != s->write)
+ LEAVE
+ c->mode = END;
+ case END:
+ r = Z_STREAM_END;
+ LEAVE
+ case BADCODE: /* x: got error */
+ r = Z_DATA_ERROR;
+ LEAVE
+ default:
+ r = Z_STREAM_ERROR;
+ LEAVE
+ }
+#ifdef NEED_DUMMY_RETURN
+ return Z_STREAM_ERROR; /* Some dumb compilers complain without this */
+#endif
+}
+
+
+void zlib_inflate_codes_free(c, z)
+inflate_codes_statef *c;
+z_streamp z;
+{
+}
--- /dev/null
+/* infcodes.h -- header to use infcodes.c
+ * Copyright (C) 1995-1998 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+#ifndef _INFCODES_H
+#define _INFCODES_H
+
+#include "infblock.h"
+
+struct inflate_codes_state;
+typedef struct inflate_codes_state FAR inflate_codes_statef;
+
+extern inflate_codes_statef *zlib_inflate_codes_new OF((
+ uInt, uInt,
+ inflate_huft *, inflate_huft *,
+ z_streamp ));
+
+extern int zlib_inflate_codes OF((
+ inflate_blocks_statef *,
+ z_streamp ,
+ int));
+
+extern void zlib_inflate_codes_free OF((
+ inflate_codes_statef *,
+ z_streamp ));
+
+#endif /* _INFCODES_H */
--- /dev/null
+/* inffast.c -- process literals and length/distance pairs fast
+ * Copyright (C) 1995-1998 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+#include <linux/zutil.h>
+#include "inftrees.h"
+#include "infblock.h"
+#include "infcodes.h"
+#include "infutil.h"
+#include "inffast.h"
+
+struct inflate_codes_state;
+
+/* simplify the use of the inflate_huft type with some defines */
+#define exop word.what.Exop
+#define bits word.what.Bits
+
+/* macros for bit input with no checking and for returning unused bytes */
+#define GRABBITS(j) {while(k<(j)){b|=((uLong)NEXTBYTE)<<k;k+=8;}}
+#define UNGRAB {c=z->avail_in-n;c=(k>>3)<c?k>>3:c;n+=c;p-=c;k-=c<<3;}
+
+/* Called with number of bytes left to write in window at least 258
+ (the maximum string length) and number of input bytes available
+ at least ten. The ten bytes are six bytes for the longest length/
+ distance pair plus four bytes for overloading the bit buffer. */
+
+int zlib_inflate_fast(bl, bd, tl, td, s, z)
+uInt bl, bd;
+inflate_huft *tl;
+inflate_huft *td; /* need separate declaration for Borland C++ */
+inflate_blocks_statef *s;
+z_streamp z;
+{
+ inflate_huft *t; /* temporary pointer */
+ uInt e; /* extra bits or operation */
+ uLong b; /* bit buffer */
+ uInt k; /* bits in bit buffer */
+ Bytef *p; /* input data pointer */
+ uInt n; /* bytes available there */
+ Bytef *q; /* output window write pointer */
+ uInt m; /* bytes to end of window or read pointer */
+ uInt ml; /* mask for literal/length tree */
+ uInt md; /* mask for distance tree */
+ uInt c; /* bytes to copy */
+ uInt d; /* distance back to copy from */
+ Bytef *r; /* copy source pointer */
+
+ /* load input, output, bit values */
+ LOAD
+
+ /* initialize masks */
+ ml = zlib_inflate_mask[bl];
+ md = zlib_inflate_mask[bd];
+
+ /* do until not enough input or output space for fast loop */
+ do { /* assume called with m >= 258 && n >= 10 */
+ /* get literal/length code */
+ GRABBITS(20) /* max bits for literal/length code */
+ if ((e = (t = tl + ((uInt)b & ml))->exop) == 0)
+ {
+ DUMPBITS(t->bits)
+ *q++ = (Byte)t->base;
+ m--;
+ continue;
+ }
+ do {
+ DUMPBITS(t->bits)
+ if (e & 16)
+ {
+ /* get extra bits for length */
+ e &= 15;
+ c = t->base + ((uInt)b & zlib_inflate_mask[e]);
+ DUMPBITS(e)
+
+ /* decode distance base of block to copy */
+ GRABBITS(15); /* max bits for distance code */
+ e = (t = td + ((uInt)b & md))->exop;
+ do {
+ DUMPBITS(t->bits)
+ if (e & 16)
+ {
+ /* get extra bits to add to distance base */
+ e &= 15;
+ GRABBITS(e) /* get extra bits (up to 13) */
+ d = t->base + ((uInt)b & zlib_inflate_mask[e]);
+ DUMPBITS(e)
+
+ /* do the copy */
+ m -= c;
+ if ((uInt)(q - s->window) >= d) /* offset before dest */
+ { /* just copy */
+ r = q - d;
+ *q++ = *r++; c--; /* minimum count is three, */
+ *q++ = *r++; c--; /* so unroll loop a little */
+ }
+ else /* else offset after destination */
+ {
+ e = d - (uInt)(q - s->window); /* bytes from offset to end */
+ r = s->end - e; /* pointer to offset */
+ if (c > e) /* if source crosses, */
+ {
+ c -= e; /* copy to end of window */
+ do {
+ *q++ = *r++;
+ } while (--e);
+ r = s->window; /* copy rest from start of window */
+ }
+ }
+ do { /* copy all or what's left */
+ *q++ = *r++;
+ } while (--c);
+ break;
+ }
+ else if ((e & 64) == 0)
+ {
+ t += t->base;
+ e = (t += ((uInt)b & zlib_inflate_mask[e]))->exop;
+ }
+ else
+ {
+ z->msg = (char*)"invalid distance code";
+ UNGRAB
+ UPDATE
+ return Z_DATA_ERROR;
+ }
+ } while (1);
+ break;
+ }
+ if ((e & 64) == 0)
+ {
+ t += t->base;
+ if ((e = (t += ((uInt)b & zlib_inflate_mask[e]))->exop) == 0)
+ {
+ DUMPBITS(t->bits)
+ *q++ = (Byte)t->base;
+ m--;
+ break;
+ }
+ }
+ else if (e & 32)
+ {
+ UNGRAB
+ UPDATE
+ return Z_STREAM_END;
+ }
+ else
+ {
+ z->msg = (char*)"invalid literal/length code";
+ UNGRAB
+ UPDATE
+ return Z_DATA_ERROR;
+ }
+ } while (1);
+ } while (m >= 258 && n >= 10);
+
+ /* not enough input or output--restore pointers and return */
+ UNGRAB
+ UPDATE
+ return Z_OK;
+}
--- /dev/null
+/* inffast.h -- header to use inffast.c
+ * Copyright (C) 1995-1998 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+extern int zlib_inflate_fast OF((
+ uInt,
+ uInt,
+ inflate_huft *,
+ inflate_huft *,
+ inflate_blocks_statef *,
+ z_streamp ));
--- /dev/null
+/* inffixed.h -- table for decoding fixed codes
+ * Generated automatically by the maketree.c program
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+local uInt fixed_bl = 9;
+local uInt fixed_bd = 5;
+local inflate_huft fixed_tl[] = {
+ {{{96,7}},256}, {{{0,8}},80}, {{{0,8}},16}, {{{84,8}},115},
+ {{{82,7}},31}, {{{0,8}},112}, {{{0,8}},48}, {{{0,9}},192},
+ {{{80,7}},10}, {{{0,8}},96}, {{{0,8}},32}, {{{0,9}},160},
+ {{{0,8}},0}, {{{0,8}},128}, {{{0,8}},64}, {{{0,9}},224},
+ {{{80,7}},6}, {{{0,8}},88}, {{{0,8}},24}, {{{0,9}},144},
+ {{{83,7}},59}, {{{0,8}},120}, {{{0,8}},56}, {{{0,9}},208},
+ {{{81,7}},17}, {{{0,8}},104}, {{{0,8}},40}, {{{0,9}},176},
+ {{{0,8}},8}, {{{0,8}},136}, {{{0,8}},72}, {{{0,9}},240},
+ {{{80,7}},4}, {{{0,8}},84}, {{{0,8}},20}, {{{85,8}},227},
+ {{{83,7}},43}, {{{0,8}},116}, {{{0,8}},52}, {{{0,9}},200},
+ {{{81,7}},13}, {{{0,8}},100}, {{{0,8}},36}, {{{0,9}},168},
+ {{{0,8}},4}, {{{0,8}},132}, {{{0,8}},68}, {{{0,9}},232},
+ {{{80,7}},8}, {{{0,8}},92}, {{{0,8}},28}, {{{0,9}},152},
+ {{{84,7}},83}, {{{0,8}},124}, {{{0,8}},60}, {{{0,9}},216},
+ {{{82,7}},23}, {{{0,8}},108}, {{{0,8}},44}, {{{0,9}},184},
+ {{{0,8}},12}, {{{0,8}},140}, {{{0,8}},76}, {{{0,9}},248},
+ {{{80,7}},3}, {{{0,8}},82}, {{{0,8}},18}, {{{85,8}},163},
+ {{{83,7}},35}, {{{0,8}},114}, {{{0,8}},50}, {{{0,9}},196},
+ {{{81,7}},11}, {{{0,8}},98}, {{{0,8}},34}, {{{0,9}},164},
+ {{{0,8}},2}, {{{0,8}},130}, {{{0,8}},66}, {{{0,9}},228},
+ {{{80,7}},7}, {{{0,8}},90}, {{{0,8}},26}, {{{0,9}},148},
+ {{{84,7}},67}, {{{0,8}},122}, {{{0,8}},58}, {{{0,9}},212},
+ {{{82,7}},19}, {{{0,8}},106}, {{{0,8}},42}, {{{0,9}},180},
+ {{{0,8}},10}, {{{0,8}},138}, {{{0,8}},74}, {{{0,9}},244},
+ {{{80,7}},5}, {{{0,8}},86}, {{{0,8}},22}, {{{192,8}},0},
+ {{{83,7}},51}, {{{0,8}},118}, {{{0,8}},54}, {{{0,9}},204},
+ {{{81,7}},15}, {{{0,8}},102}, {{{0,8}},38}, {{{0,9}},172},
+ {{{0,8}},6}, {{{0,8}},134}, {{{0,8}},70}, {{{0,9}},236},
+ {{{80,7}},9}, {{{0,8}},94}, {{{0,8}},30}, {{{0,9}},156},
+ {{{84,7}},99}, {{{0,8}},126}, {{{0,8}},62}, {{{0,9}},220},
+ {{{82,7}},27}, {{{0,8}},110}, {{{0,8}},46}, {{{0,9}},188},
+ {{{0,8}},14}, {{{0,8}},142}, {{{0,8}},78}, {{{0,9}},252},
+ {{{96,7}},256}, {{{0,8}},81}, {{{0,8}},17}, {{{85,8}},131},
+ {{{82,7}},31}, {{{0,8}},113}, {{{0,8}},49}, {{{0,9}},194},
+ {{{80,7}},10}, {{{0,8}},97}, {{{0,8}},33}, {{{0,9}},162},
+ {{{0,8}},1}, {{{0,8}},129}, {{{0,8}},65}, {{{0,9}},226},
+ {{{80,7}},6}, {{{0,8}},89}, {{{0,8}},25}, {{{0,9}},146},
+ {{{83,7}},59}, {{{0,8}},121}, {{{0,8}},57}, {{{0,9}},210},
+ {{{81,7}},17}, {{{0,8}},105}, {{{0,8}},41}, {{{0,9}},178},
+ {{{0,8}},9}, {{{0,8}},137}, {{{0,8}},73}, {{{0,9}},242},
+ {{{80,7}},4}, {{{0,8}},85}, {{{0,8}},21}, {{{80,8}},258},
+ {{{83,7}},43}, {{{0,8}},117}, {{{0,8}},53}, {{{0,9}},202},
+ {{{81,7}},13}, {{{0,8}},101}, {{{0,8}},37}, {{{0,9}},170},
+ {{{0,8}},5}, {{{0,8}},133}, {{{0,8}},69}, {{{0,9}},234},
+ {{{80,7}},8}, {{{0,8}},93}, {{{0,8}},29}, {{{0,9}},154},
+ {{{84,7}},83}, {{{0,8}},125}, {{{0,8}},61}, {{{0,9}},218},
+ {{{82,7}},23}, {{{0,8}},109}, {{{0,8}},45}, {{{0,9}},186},
+ {{{0,8}},13}, {{{0,8}},141}, {{{0,8}},77}, {{{0,9}},250},
+ {{{80,7}},3}, {{{0,8}},83}, {{{0,8}},19}, {{{85,8}},195},
+ {{{83,7}},35}, {{{0,8}},115}, {{{0,8}},51}, {{{0,9}},198},
+ {{{81,7}},11}, {{{0,8}},99}, {{{0,8}},35}, {{{0,9}},166},
+ {{{0,8}},3}, {{{0,8}},131}, {{{0,8}},67}, {{{0,9}},230},
+ {{{80,7}},7}, {{{0,8}},91}, {{{0,8}},27}, {{{0,9}},150},
+ {{{84,7}},67}, {{{0,8}},123}, {{{0,8}},59}, {{{0,9}},214},
+ {{{82,7}},19}, {{{0,8}},107}, {{{0,8}},43}, {{{0,9}},182},
+ {{{0,8}},11}, {{{0,8}},139}, {{{0,8}},75}, {{{0,9}},246},
+ {{{80,7}},5}, {{{0,8}},87}, {{{0,8}},23}, {{{192,8}},0},
+ {{{83,7}},51}, {{{0,8}},119}, {{{0,8}},55}, {{{0,9}},206},
+ {{{81,7}},15}, {{{0,8}},103}, {{{0,8}},39}, {{{0,9}},174},
+ {{{0,8}},7}, {{{0,8}},135}, {{{0,8}},71}, {{{0,9}},238},
+ {{{80,7}},9}, {{{0,8}},95}, {{{0,8}},31}, {{{0,9}},158},
+ {{{84,7}},99}, {{{0,8}},127}, {{{0,8}},63}, {{{0,9}},222},
+ {{{82,7}},27}, {{{0,8}},111}, {{{0,8}},47}, {{{0,9}},190},
+ {{{0,8}},15}, {{{0,8}},143}, {{{0,8}},79}, {{{0,9}},254},
+ {{{96,7}},256}, {{{0,8}},80}, {{{0,8}},16}, {{{84,8}},115},
+ {{{82,7}},31}, {{{0,8}},112}, {{{0,8}},48}, {{{0,9}},193},
+ {{{80,7}},10}, {{{0,8}},96}, {{{0,8}},32}, {{{0,9}},161},
+ {{{0,8}},0}, {{{0,8}},128}, {{{0,8}},64}, {{{0,9}},225},
+ {{{80,7}},6}, {{{0,8}},88}, {{{0,8}},24}, {{{0,9}},145},
+ {{{83,7}},59}, {{{0,8}},120}, {{{0,8}},56}, {{{0,9}},209},
+ {{{81,7}},17}, {{{0,8}},104}, {{{0,8}},40}, {{{0,9}},177},
+ {{{0,8}},8}, {{{0,8}},136}, {{{0,8}},72}, {{{0,9}},241},
+ {{{80,7}},4}, {{{0,8}},84}, {{{0,8}},20}, {{{85,8}},227},
+ {{{83,7}},43}, {{{0,8}},116}, {{{0,8}},52}, {{{0,9}},201},
+ {{{81,7}},13}, {{{0,8}},100}, {{{0,8}},36}, {{{0,9}},169},
+ {{{0,8}},4}, {{{0,8}},132}, {{{0,8}},68}, {{{0,9}},233},
+ {{{80,7}},8}, {{{0,8}},92}, {{{0,8}},28}, {{{0,9}},153},
+ {{{84,7}},83}, {{{0,8}},124}, {{{0,8}},60}, {{{0,9}},217},
+ {{{82,7}},23}, {{{0,8}},108}, {{{0,8}},44}, {{{0,9}},185},
+ {{{0,8}},12}, {{{0,8}},140}, {{{0,8}},76}, {{{0,9}},249},
+ {{{80,7}},3}, {{{0,8}},82}, {{{0,8}},18}, {{{85,8}},163},
+ {{{83,7}},35}, {{{0,8}},114}, {{{0,8}},50}, {{{0,9}},197},
+ {{{81,7}},11}, {{{0,8}},98}, {{{0,8}},34}, {{{0,9}},165},
+ {{{0,8}},2}, {{{0,8}},130}, {{{0,8}},66}, {{{0,9}},229},
+ {{{80,7}},7}, {{{0,8}},90}, {{{0,8}},26}, {{{0,9}},149},
+ {{{84,7}},67}, {{{0,8}},122}, {{{0,8}},58}, {{{0,9}},213},
+ {{{82,7}},19}, {{{0,8}},106}, {{{0,8}},42}, {{{0,9}},181},
+ {{{0,8}},10}, {{{0,8}},138}, {{{0,8}},74}, {{{0,9}},245},
+ {{{80,7}},5}, {{{0,8}},86}, {{{0,8}},22}, {{{192,8}},0},
+ {{{83,7}},51}, {{{0,8}},118}, {{{0,8}},54}, {{{0,9}},205},
+ {{{81,7}},15}, {{{0,8}},102}, {{{0,8}},38}, {{{0,9}},173},
+ {{{0,8}},6}, {{{0,8}},134}, {{{0,8}},70}, {{{0,9}},237},
+ {{{80,7}},9}, {{{0,8}},94}, {{{0,8}},30}, {{{0,9}},157},
+ {{{84,7}},99}, {{{0,8}},126}, {{{0,8}},62}, {{{0,9}},221},
+ {{{82,7}},27}, {{{0,8}},110}, {{{0,8}},46}, {{{0,9}},189},
+ {{{0,8}},14}, {{{0,8}},142}, {{{0,8}},78}, {{{0,9}},253},
+ {{{96,7}},256}, {{{0,8}},81}, {{{0,8}},17}, {{{85,8}},131},
+ {{{82,7}},31}, {{{0,8}},113}, {{{0,8}},49}, {{{0,9}},195},
+ {{{80,7}},10}, {{{0,8}},97}, {{{0,8}},33}, {{{0,9}},163},
+ {{{0,8}},1}, {{{0,8}},129}, {{{0,8}},65}, {{{0,9}},227},
+ {{{80,7}},6}, {{{0,8}},89}, {{{0,8}},25}, {{{0,9}},147},
+ {{{83,7}},59}, {{{0,8}},121}, {{{0,8}},57}, {{{0,9}},211},
+ {{{81,7}},17}, {{{0,8}},105}, {{{0,8}},41}, {{{0,9}},179},
+ {{{0,8}},9}, {{{0,8}},137}, {{{0,8}},73}, {{{0,9}},243},
+ {{{80,7}},4}, {{{0,8}},85}, {{{0,8}},21}, {{{80,8}},258},
+ {{{83,7}},43}, {{{0,8}},117}, {{{0,8}},53}, {{{0,9}},203},
+ {{{81,7}},13}, {{{0,8}},101}, {{{0,8}},37}, {{{0,9}},171},
+ {{{0,8}},5}, {{{0,8}},133}, {{{0,8}},69}, {{{0,9}},235},
+ {{{80,7}},8}, {{{0,8}},93}, {{{0,8}},29}, {{{0,9}},155},
+ {{{84,7}},83}, {{{0,8}},125}, {{{0,8}},61}, {{{0,9}},219},
+ {{{82,7}},23}, {{{0,8}},109}, {{{0,8}},45}, {{{0,9}},187},
+ {{{0,8}},13}, {{{0,8}},141}, {{{0,8}},77}, {{{0,9}},251},
+ {{{80,7}},3}, {{{0,8}},83}, {{{0,8}},19}, {{{85,8}},195},
+ {{{83,7}},35}, {{{0,8}},115}, {{{0,8}},51}, {{{0,9}},199},
+ {{{81,7}},11}, {{{0,8}},99}, {{{0,8}},35}, {{{0,9}},167},
+ {{{0,8}},3}, {{{0,8}},131}, {{{0,8}},67}, {{{0,9}},231},
+ {{{80,7}},7}, {{{0,8}},91}, {{{0,8}},27}, {{{0,9}},151},
+ {{{84,7}},67}, {{{0,8}},123}, {{{0,8}},59}, {{{0,9}},215},
+ {{{82,7}},19}, {{{0,8}},107}, {{{0,8}},43}, {{{0,9}},183},
+ {{{0,8}},11}, {{{0,8}},139}, {{{0,8}},75}, {{{0,9}},247},
+ {{{80,7}},5}, {{{0,8}},87}, {{{0,8}},23}, {{{192,8}},0},
+ {{{83,7}},51}, {{{0,8}},119}, {{{0,8}},55}, {{{0,9}},207},
+ {{{81,7}},15}, {{{0,8}},103}, {{{0,8}},39}, {{{0,9}},175},
+ {{{0,8}},7}, {{{0,8}},135}, {{{0,8}},71}, {{{0,9}},239},
+ {{{80,7}},9}, {{{0,8}},95}, {{{0,8}},31}, {{{0,9}},159},
+ {{{84,7}},99}, {{{0,8}},127}, {{{0,8}},63}, {{{0,9}},223},
+ {{{82,7}},27}, {{{0,8}},111}, {{{0,8}},47}, {{{0,9}},191},
+ {{{0,8}},15}, {{{0,8}},143}, {{{0,8}},79}, {{{0,9}},255}
+ };
+local inflate_huft fixed_td[] = {
+ {{{80,5}},1}, {{{87,5}},257}, {{{83,5}},17}, {{{91,5}},4097},
+ {{{81,5}},5}, {{{89,5}},1025}, {{{85,5}},65}, {{{93,5}},16385},
+ {{{80,5}},3}, {{{88,5}},513}, {{{84,5}},33}, {{{92,5}},8193},
+ {{{82,5}},9}, {{{90,5}},2049}, {{{86,5}},129}, {{{192,5}},24577},
+ {{{80,5}},2}, {{{87,5}},385}, {{{83,5}},25}, {{{91,5}},6145},
+ {{{81,5}},7}, {{{89,5}},1537}, {{{85,5}},97}, {{{93,5}},24577},
+ {{{80,5}},4}, {{{88,5}},769}, {{{84,5}},49}, {{{92,5}},12289},
+ {{{82,5}},13}, {{{90,5}},3073}, {{{86,5}},193}, {{{192,5}},24577}
+ };
--- /dev/null
+/* inflate.c -- zlib interface to inflate modules
+ * Copyright (C) 1995-1998 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+#include <linux/module.h>
+#include <linux/zutil.h>
+#include "infblock.h"
+#include "infutil.h"
+
+int ZEXPORT zlib_inflate_workspacesize(void)
+{
+ return sizeof(struct inflate_workspace);
+}
+
+
+int ZEXPORT zlib_inflateReset(z)
+z_streamp z;
+{
+ if (z == Z_NULL || z->state == Z_NULL || z->workspace == Z_NULL)
+ return Z_STREAM_ERROR;
+ z->total_in = z->total_out = 0;
+ z->msg = Z_NULL;
+ z->state->mode = z->state->nowrap ? BLOCKS : METHOD;
+ zlib_inflate_blocks_reset(z->state->blocks, z, Z_NULL);
+ return Z_OK;
+}
+
+
+int ZEXPORT zlib_inflateEnd(z)
+z_streamp z;
+{
+ if (z == Z_NULL || z->state == Z_NULL || z->workspace == Z_NULL)
+ return Z_STREAM_ERROR;
+ if (z->state->blocks != Z_NULL)
+ zlib_inflate_blocks_free(z->state->blocks, z);
+ z->state = Z_NULL;
+ return Z_OK;
+}
+
+
+int ZEXPORT zlib_inflateInit2_(z, w, version, stream_size)
+z_streamp z;
+int w;
+const char *version;
+int stream_size;
+{
+ if (version == Z_NULL || version[0] != ZLIB_VERSION[0] ||
+ stream_size != sizeof(z_stream) || z->workspace == Z_NULL)
+ return Z_VERSION_ERROR;
+
+ /* initialize state */
+ if (z == Z_NULL)
+ return Z_STREAM_ERROR;
+ z->msg = Z_NULL;
+ z->state = &WS(z)->internal_state;
+ z->state->blocks = Z_NULL;
+
+ /* handle undocumented nowrap option (no zlib header or check) */
+ z->state->nowrap = 0;
+ if (w < 0)
+ {
+ w = - w;
+ z->state->nowrap = 1;
+ }
+
+ /* set window size */
+ if (w < 8 || w > 15)
+ {
+ zlib_inflateEnd(z);
+ return Z_STREAM_ERROR;
+ }
+ z->state->wbits = (uInt)w;
+
+ /* create inflate_blocks state */
+ if ((z->state->blocks =
+ zlib_inflate_blocks_new(z, z->state->nowrap ? Z_NULL : zlib_adler32, (uInt)1 << w))
+ == Z_NULL)
+ {
+ zlib_inflateEnd(z);
+ return Z_MEM_ERROR;
+ }
+
+ /* reset state */
+ zlib_inflateReset(z);
+ return Z_OK;
+}
+
+
+/*
+ * At the end of a Deflate-compressed PPP packet, we expect to have seen
+ * a `stored' block type value but not the (zero) length bytes.
+ */
+static int zlib_inflate_packet_flush(inflate_blocks_statef *s)
+{
+ if (s->mode != LENS)
+ return Z_DATA_ERROR;
+ s->mode = TYPE;
+ return Z_OK;
+}
+
+
+int ZEXPORT zlib_inflateInit_(z, version, stream_size)
+z_streamp z;
+const char *version;
+int stream_size;
+{
+ return zlib_inflateInit2_(z, DEF_WBITS, version, stream_size);
+}
+
+#undef NEEDBYTE
+#undef NEXTBYTE
+#define NEEDBYTE {if(z->avail_in==0)goto empty;r=f;}
+#define NEXTBYTE (z->avail_in--,z->total_in++,*z->next_in++)
+
+int ZEXPORT zlib_inflate(z, f)
+z_streamp z;
+int f;
+{
+ int r;
+ uInt b;
+
+ if (z == Z_NULL || z->state == Z_NULL || z->next_in == Z_NULL)
+ return Z_STREAM_ERROR;
+ f = f == Z_FINISH ? Z_BUF_ERROR : Z_OK;
+ r = Z_BUF_ERROR;
+ while (1) switch (z->state->mode)
+ {
+ case METHOD:
+ NEEDBYTE
+ if (((z->state->sub.method = NEXTBYTE) & 0xf) != Z_DEFLATED)
+ {
+ z->state->mode = I_BAD;
+ z->msg = (char*)"unknown compression method";
+ z->state->sub.marker = 5; /* can't try inflateSync */
+ break;
+ }
+ if ((z->state->sub.method >> 4) + 8 > z->state->wbits)
+ {
+ z->state->mode = I_BAD;
+ z->msg = (char*)"invalid window size";
+ z->state->sub.marker = 5; /* can't try inflateSync */
+ break;
+ }
+ z->state->mode = FLAG;
+ case FLAG:
+ NEEDBYTE
+ b = NEXTBYTE;
+ if (((z->state->sub.method << 8) + b) % 31)
+ {
+ z->state->mode = I_BAD;
+ z->msg = (char*)"incorrect header check";
+ z->state->sub.marker = 5; /* can't try inflateSync */
+ break;
+ }
+ if (!(b & PRESET_DICT))
+ {
+ z->state->mode = BLOCKS;
+ break;
+ }
+ z->state->mode = DICT4;
+ case DICT4:
+ NEEDBYTE
+ z->state->sub.check.need = (uLong)NEXTBYTE << 24;
+ z->state->mode = DICT3;
+ case DICT3:
+ NEEDBYTE
+ z->state->sub.check.need += (uLong)NEXTBYTE << 16;
+ z->state->mode = DICT2;
+ case DICT2:
+ NEEDBYTE
+ z->state->sub.check.need += (uLong)NEXTBYTE << 8;
+ z->state->mode = DICT1;
+ case DICT1:
+ NEEDBYTE
+ z->state->sub.check.need += (uLong)NEXTBYTE;
+ z->adler = z->state->sub.check.need;
+ z->state->mode = DICT0;
+ return Z_NEED_DICT;
+ case DICT0:
+ z->state->mode = I_BAD;
+ z->msg = (char*)"need dictionary";
+ z->state->sub.marker = 0; /* can try inflateSync */
+ return Z_STREAM_ERROR;
+ case BLOCKS:
+ r = zlib_inflate_blocks(z->state->blocks, z, r);
+ if (f == Z_PACKET_FLUSH && z->avail_in == 0 && z->avail_out != 0)
+ r = zlib_inflate_packet_flush(z->state->blocks);
+ if (r == Z_DATA_ERROR)
+ {
+ z->state->mode = I_BAD;
+ z->state->sub.marker = 0; /* can try inflateSync */
+ break;
+ }
+ if (r == Z_OK)
+ r = f;
+ if (r != Z_STREAM_END)
+ return r;
+ r = f;
+ zlib_inflate_blocks_reset(z->state->blocks, z, &z->state->sub.check.was);
+ if (z->state->nowrap)
+ {
+ z->state->mode = I_DONE;
+ break;
+ }
+ z->state->mode = CHECK4;
+ case CHECK4:
+ NEEDBYTE
+ z->state->sub.check.need = (uLong)NEXTBYTE << 24;
+ z->state->mode = CHECK3;
+ case CHECK3:
+ NEEDBYTE
+ z->state->sub.check.need += (uLong)NEXTBYTE << 16;
+ z->state->mode = CHECK2;
+ case CHECK2:
+ NEEDBYTE
+ z->state->sub.check.need += (uLong)NEXTBYTE << 8;
+ z->state->mode = CHECK1;
+ case CHECK1:
+ NEEDBYTE
+ z->state->sub.check.need += (uLong)NEXTBYTE;
+
+ if (z->state->sub.check.was != z->state->sub.check.need)
+ {
+ z->state->mode = I_BAD;
+ z->msg = (char*)"incorrect data check";
+ z->state->sub.marker = 5; /* can't try inflateSync */
+ break;
+ }
+ z->state->mode = I_DONE;
+ case I_DONE:
+ return Z_STREAM_END;
+ case I_BAD:
+ return Z_DATA_ERROR;
+ default:
+ return Z_STREAM_ERROR;
+ }
+ empty:
+ if (f != Z_PACKET_FLUSH)
+ return r;
+ z->state->mode = I_BAD;
+ z->msg = (char *)"need more for packet flush";
+ z->state->sub.marker = 0; /* can try inflateSync */
+ return Z_DATA_ERROR;
+}
+
+
+int ZEXPORT zlib_inflateSync(z)
+z_streamp z;
+{
+ uInt n; /* number of bytes to look at */
+ Bytef *p; /* pointer to bytes */
+ uInt m; /* number of marker bytes found in a row */
+ uLong r, w; /* temporaries to save total_in and total_out */
+
+ /* set up */
+ if (z == Z_NULL || z->state == Z_NULL)
+ return Z_STREAM_ERROR;
+ if (z->state->mode != I_BAD)
+ {
+ z->state->mode = I_BAD;
+ z->state->sub.marker = 0;
+ }
+ if ((n = z->avail_in) == 0)
+ return Z_BUF_ERROR;
+ p = z->next_in;
+ m = z->state->sub.marker;
+
+ /* search */
+ while (n && m < 4)
+ {
+ static const Byte mark[4] = {0, 0, 0xff, 0xff};
+ if (*p == mark[m])
+ m++;
+ else if (*p)
+ m = 0;
+ else
+ m = 4 - m;
+ p++, n--;
+ }
+
+ /* restore */
+ z->total_in += p - z->next_in;
+ z->next_in = p;
+ z->avail_in = n;
+ z->state->sub.marker = m;
+
+ /* return no joy or set up to restart on a new block */
+ if (m != 4)
+ return Z_DATA_ERROR;
+ r = z->total_in; w = z->total_out;
+ zlib_inflateReset(z);
+ z->total_in = r; z->total_out = w;
+ z->state->mode = BLOCKS;
+ return Z_OK;
+}
+
+
+/* Returns true if inflate is currently at the end of a block generated
+ * by Z_SYNC_FLUSH or Z_FULL_FLUSH. This function is used by one PPP
+ * implementation to provide an additional safety check. PPP uses Z_SYNC_FLUSH
+ * but removes the length bytes of the resulting empty stored block. When
+ * decompressing, PPP checks that at the end of input packet, inflate is
+ * waiting for these length bytes.
+ */
+int ZEXPORT zlib_inflateSyncPoint(z)
+z_streamp z;
+{
+ if (z == Z_NULL || z->state == Z_NULL || z->state->blocks == Z_NULL)
+ return Z_STREAM_ERROR;
+ return zlib_inflate_blocks_sync_point(z->state->blocks);
+}
+
+/*
+ * This subroutine adds the data at next_in/avail_in to the output history
+ * without performing any output. The output buffer must be "caught up";
+ * i.e. no pending output (hence s->read equals s->write), and the state must
+ * be BLOCKS (i.e. we should be willing to see the start of a series of
+ * BLOCKS). On exit, the output will also be caught up, and the checksum
+ * will have been updated if need be.
+ */
+static int zlib_inflate_addhistory(inflate_blocks_statef *s,
+ z_stream *z)
+{
+ uLong b; /* bit buffer */ /* NOT USED HERE */
+ uInt k; /* bits in bit buffer */ /* NOT USED HERE */
+ uInt t; /* temporary storage */
+ Bytef *p; /* input data pointer */
+ uInt n; /* bytes available there */
+ Bytef *q; /* output window write pointer */
+ uInt m; /* bytes to end of window or read pointer */
+
+ if (s->read != s->write)
+ return Z_STREAM_ERROR;
+ if (s->mode != TYPE)
+ return Z_DATA_ERROR;
+
+ /* we're ready to rock */
+ LOAD
+ /* while there is input ready, copy to output buffer, moving
+ * pointers as needed.
+ */
+ while (n) {
+ t = n; /* how many to do */
+ /* is there room until end of buffer? */
+ if (t > m) t = m;
+ /* update check information */
+ if (s->checkfn != Z_NULL)
+ s->check = (*s->checkfn)(s->check, q, t);
+ memcpy(q, p, t);
+ q += t;
+ p += t;
+ n -= t;
+ z->total_out += t;
+ s->read = q; /* drag read pointer forward */
+/* WWRAP */ /* expand WWRAP macro by hand to handle s->read */
+ if (q == s->end) {
+ s->read = q = s->window;
+ m = WAVAIL;
+ }
+ }
+ UPDATE
+ return Z_OK;
+}
+
+
+/*
+ * This subroutine adds the data at next_in/avail_in to the output history
+ * without performing any output. The output buffer must be "caught up";
+ * i.e. no pending output (hence s->read equals s->write), and the state must
+ * be BLOCKS (i.e. we should be willing to see the start of a series of
+ * BLOCKS). On exit, the output will also be caught up, and the checksum
+ * will have been updated if need be.
+ */
+
+int ZEXPORT zlib_inflateIncomp(z)
+z_stream *z;
+{
+ if (z->state->mode != BLOCKS)
+ return Z_DATA_ERROR;
+ return zlib_inflate_addhistory(z->state->blocks, z);
+}
--- /dev/null
+/*
+ * linux/lib/zlib_inflate/inflate_syms.c
+ *
+ * Exported symbols for the inflate functionality.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/zlib.h>
+
+EXPORT_SYMBOL(zlib_inflate_workspacesize);
+EXPORT_SYMBOL(zlib_inflate);
+EXPORT_SYMBOL(zlib_inflateInit_);
+EXPORT_SYMBOL(zlib_inflateInit2_);
+EXPORT_SYMBOL(zlib_inflateEnd);
+EXPORT_SYMBOL(zlib_inflateSync);
+EXPORT_SYMBOL(zlib_inflateReset);
+EXPORT_SYMBOL(zlib_inflateSyncPoint);
+MODULE_LICENSE("GPL");
--- /dev/null
+/* inftrees.c -- generate Huffman trees for efficient decoding
+ * Copyright (C) 1995-1998 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+#include <linux/zutil.h>
+#include "inftrees.h"
+#include "infutil.h"
+
+static const char inflate_copyright[] =
+ " inflate 1.1.3 Copyright 1995-1998 Mark Adler ";
+/*
+ If you use the zlib library in a product, an acknowledgment is welcome
+ in the documentation of your product. If for some reason you cannot
+ include such an acknowledgment, I would appreciate that you keep this
+ copyright string in the executable of your product.
+ */
+struct internal_state;
+
+/* simplify the use of the inflate_huft type with some defines */
+#define exop word.what.Exop
+#define bits word.what.Bits
+
+
+local int huft_build OF((
+ uIntf *, /* code lengths in bits */
+ uInt, /* number of codes */
+ uInt, /* number of "simple" codes */
+ const uIntf *, /* list of base values for non-simple codes */
+ const uIntf *, /* list of extra bits for non-simple codes */
+ inflate_huft * FAR*,/* result: starting table */
+ uIntf *, /* maximum lookup bits (returns actual) */
+ inflate_huft *, /* space for trees */
+ uInt *, /* hufts used in space */
+ uIntf * )); /* space for values */
+
+/* Tables for deflate from PKZIP's appnote.txt. */
+local const uInt cplens[31] = { /* Copy lengths for literal codes 257..285 */
+ 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
+ 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
+ /* see note #13 above about 258 */
+local const uInt cplext[31] = { /* Extra bits for literal codes 257..285 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
+ 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 112, 112}; /* 112==invalid */
+local const uInt cpdist[30] = { /* Copy offsets for distance codes 0..29 */
+ 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
+ 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
+ 8193, 12289, 16385, 24577};
+local const uInt cpdext[30] = { /* Extra bits for distance codes */
+ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
+ 7, 7, 8, 8, 9, 9, 10, 10, 11, 11,
+ 12, 12, 13, 13};
+
+/*
+ Huffman code decoding is performed using a multi-level table lookup.
+ The fastest way to decode is to simply build a lookup table whose
+ size is determined by the longest code. However, the time it takes
+ to build this table can also be a factor if the data being decoded
+ is not very long. The most common codes are necessarily the
+ shortest codes, so those codes dominate the decoding time, and hence
+ the speed. The idea is you can have a shorter table that decodes the
+ shorter, more probable codes, and then point to subsidiary tables for
+ the longer codes. The time it costs to decode the longer codes is
+ then traded against the time it takes to make longer tables.
+
+ This results of this trade are in the variables lbits and dbits
+ below. lbits is the number of bits the first level table for literal/
+ length codes can decode in one step, and dbits is the same thing for
+ the distance codes. Subsequent tables are also less than or equal to
+ those sizes. These values may be adjusted either when all of the
+ codes are shorter than that, in which case the longest code length in
+ bits is used, or when the shortest code is *longer* than the requested
+ table size, in which case the length of the shortest code in bits is
+ used.
+
+ There are two different values for the two tables, since they code a
+ different number of possibilities each. The literal/length table
+ codes 286 possible values, or in a flat code, a little over eight
+ bits. The distance table codes 30 possible values, or a little less
+ than five bits, flat. The optimum values for speed end up being
+ about one bit more than those, so lbits is 8+1 and dbits is 5+1.
+ The optimum values may differ though from machine to machine, and
+ possibly even between compilers. Your mileage may vary.
+ */
+
+
+/* If BMAX needs to be larger than 16, then h and x[] should be uLong. */
+#define BMAX 15 /* maximum bit length of any code */
+
+local int huft_build(b, n, s, d, e, t, m, hp, hn, v)
+uIntf *b; /* code lengths in bits (all assumed <= BMAX) */
+uInt n; /* number of codes (assumed <= 288) */
+uInt s; /* number of simple-valued codes (0..s-1) */
+const uIntf *d; /* list of base values for non-simple codes */
+const uIntf *e; /* list of extra bits for non-simple codes */
+inflate_huft * FAR *t; /* result: starting table */
+uIntf *m; /* maximum lookup bits, returns actual */
+inflate_huft *hp; /* space for trees */
+uInt *hn; /* hufts used in space */
+uIntf *v; /* working area: values in order of bit length */
+/* Given a list of code lengths and a maximum table size, make a set of
+ tables to decode that set of codes. Return Z_OK on success, Z_BUF_ERROR
+ if the given code set is incomplete (the tables are still built in this
+ case), Z_DATA_ERROR if the input is invalid (an over-subscribed set of
+ lengths), or Z_MEM_ERROR if not enough memory. */
+{
+
+ uInt a; /* counter for codes of length k */
+ uInt c[BMAX+1]; /* bit length count table */
+ uInt f; /* i repeats in table every f entries */
+ int g; /* maximum code length */
+ int h; /* table level */
+ register uInt i; /* counter, current code */
+ register uInt j; /* counter */
+ register int k; /* number of bits in current code */
+ int l; /* bits per table (returned in m) */
+ uInt mask; /* (1 << w) - 1, to avoid cc -O bug on HP */
+ register uIntf *p; /* pointer into c[], b[], or v[] */
+ inflate_huft *q; /* points to current table */
+ struct inflate_huft_s r; /* table entry for structure assignment */
+ inflate_huft *u[BMAX]; /* table stack */
+ register int w; /* bits before this table == (l * h) */
+ uInt x[BMAX+1]; /* bit offsets, then code stack */
+ uIntf *xp; /* pointer into x */
+ int y; /* number of dummy codes added */
+ uInt z; /* number of entries in current table */
+
+
+ /* Generate counts for each bit length */
+ p = c;
+#define C0 *p++ = 0;
+#define C2 C0 C0 C0 C0
+#define C4 C2 C2 C2 C2
+ C4 /* clear c[]--assume BMAX+1 is 16 */
+ p = b; i = n;
+ do {
+ c[*p++]++; /* assume all entries <= BMAX */
+ } while (--i);
+ if (c[0] == n) /* null input--all zero length codes */
+ {
+ *t = (inflate_huft *)Z_NULL;
+ *m = 0;
+ return Z_OK;
+ }
+
+
+ /* Find minimum and maximum length, bound *m by those */
+ l = *m;
+ for (j = 1; j <= BMAX; j++)
+ if (c[j])
+ break;
+ k = j; /* minimum code length */
+ if ((uInt)l < j)
+ l = j;
+ for (i = BMAX; i; i--)
+ if (c[i])
+ break;
+ g = i; /* maximum code length */
+ if ((uInt)l > i)
+ l = i;
+ *m = l;
+
+
+ /* Adjust last length count to fill out codes, if needed */
+ for (y = 1 << j; j < i; j++, y <<= 1)
+ if ((y -= c[j]) < 0)
+ return Z_DATA_ERROR;
+ if ((y -= c[i]) < 0)
+ return Z_DATA_ERROR;
+ c[i] += y;
+
+
+ /* Generate starting offsets into the value table for each length */
+ x[1] = j = 0;
+ p = c + 1; xp = x + 2;
+ while (--i) { /* note that i == g from above */
+ *xp++ = (j += *p++);
+ }
+
+
+ /* Make a table of values in order of bit lengths */
+ p = b; i = 0;
+ do {
+ if ((j = *p++) != 0)
+ v[x[j]++] = i;
+ } while (++i < n);
+ n = x[g]; /* set n to length of v */
+
+
+ /* Generate the Huffman codes and for each, make the table entries */
+ x[0] = i = 0; /* first Huffman code is zero */
+ p = v; /* grab values in bit order */
+ h = -1; /* no tables yet--level -1 */
+ w = -l; /* bits decoded == (l * h) */
+ u[0] = (inflate_huft *)Z_NULL; /* just to keep compilers happy */
+ q = (inflate_huft *)Z_NULL; /* ditto */
+ z = 0; /* ditto */
+
+ /* go through the bit lengths (k already is bits in shortest code) */
+ for (; k <= g; k++)
+ {
+ a = c[k];
+ while (a--)
+ {
+ /* here i is the Huffman code of length k bits for value *p */
+ /* make tables up to required level */
+ while (k > w + l)
+ {
+ h++;
+ w += l; /* previous table always l bits */
+
+ /* compute minimum size table less than or equal to l bits */
+ z = g - w;
+ z = z > (uInt)l ? l : z; /* table size upper limit */
+ if ((f = 1 << (j = k - w)) > a + 1) /* try a k-w bit table */
+ { /* too few codes for k-w bit table */
+ f -= a + 1; /* deduct codes from patterns left */
+ xp = c + k;
+ if (j < z)
+ while (++j < z) /* try smaller tables up to z bits */
+ {
+ if ((f <<= 1) <= *++xp)
+ break; /* enough codes to use up j bits */
+ f -= *xp; /* else deduct codes from patterns */
+ }
+ }
+ z = 1 << j; /* table entries for j-bit table */
+
+ /* allocate new table */
+ if (*hn + z > MANY) /* (note: doesn't matter for fixed) */
+ return Z_MEM_ERROR; /* not enough memory */
+ u[h] = q = hp + *hn;
+ *hn += z;
+
+ /* connect to last table, if there is one */
+ if (h)
+ {
+ x[h] = i; /* save pattern for backing up */
+ r.bits = (Byte)l; /* bits to dump before this table */
+ r.exop = (Byte)j; /* bits in this table */
+ j = i >> (w - l);
+ r.base = (uInt)(q - u[h-1] - j); /* offset to this table */
+ u[h-1][j] = r; /* connect to last table */
+ }
+ else
+ *t = q; /* first table is returned result */
+ }
+
+ /* set up table entry in r */
+ r.bits = (Byte)(k - w);
+ if (p >= v + n)
+ r.exop = 128 + 64; /* out of values--invalid code */
+ else if (*p < s)
+ {
+ r.exop = (Byte)(*p < 256 ? 0 : 32 + 64); /* 256 is end-of-block */
+ r.base = *p++; /* simple code is just the value */
+ }
+ else
+ {
+ r.exop = (Byte)(e[*p - s] + 16 + 64);/* non-simple--look up in lists */
+ r.base = d[*p++ - s];
+ }
+
+ /* fill code-like entries with r */
+ f = 1 << (k - w);
+ for (j = i >> w; j < z; j += f)
+ q[j] = r;
+
+ /* backwards increment the k-bit code i */
+ for (j = 1 << (k - 1); i & j; j >>= 1)
+ i ^= j;
+ i ^= j;
+
+ /* backup over finished tables */
+ mask = (1 << w) - 1; /* needed on HP, cc -O bug */
+ while ((i & mask) != x[h])
+ {
+ h--; /* don't need to update q */
+ w -= l;
+ mask = (1 << w) - 1;
+ }
+ }
+ }
+
+
+ /* Return Z_BUF_ERROR if we were given an incomplete table */
+ return y != 0 && g != 1 ? Z_BUF_ERROR : Z_OK;
+}
+
+
+int zlib_inflate_trees_bits(c, bb, tb, hp, z)
+uIntf *c; /* 19 code lengths */
+uIntf *bb; /* bits tree desired/actual depth */
+inflate_huft * FAR *tb; /* bits tree result */
+inflate_huft *hp; /* space for trees */
+z_streamp z; /* for messages */
+{
+ int r;
+ uInt hn = 0; /* hufts used in space */
+ uIntf *v; /* work area for huft_build */
+
+ v = WS(z)->tree_work_area_1;
+ r = huft_build(c, 19, 19, (uIntf*)Z_NULL, (uIntf*)Z_NULL,
+ tb, bb, hp, &hn, v);
+ if (r == Z_DATA_ERROR)
+ z->msg = (char*)"oversubscribed dynamic bit lengths tree";
+ else if (r == Z_BUF_ERROR || *bb == 0)
+ {
+ z->msg = (char*)"incomplete dynamic bit lengths tree";
+ r = Z_DATA_ERROR;
+ }
+ return r;
+}
+
+int zlib_inflate_trees_dynamic(nl, nd, c, bl, bd, tl, td, hp, z)
+uInt nl; /* number of literal/length codes */
+uInt nd; /* number of distance codes */
+uIntf *c; /* that many (total) code lengths */
+uIntf *bl; /* literal desired/actual bit depth */
+uIntf *bd; /* distance desired/actual bit depth */
+inflate_huft * FAR *tl; /* literal/length tree result */
+inflate_huft * FAR *td; /* distance tree result */
+inflate_huft *hp; /* space for trees */
+z_streamp z; /* for messages */
+{
+ int r;
+ uInt hn = 0; /* hufts used in space */
+ uIntf *v; /* work area for huft_build */
+
+ /* allocate work area */
+ v = WS(z)->tree_work_area_2;
+
+ /* build literal/length tree */
+ r = huft_build(c, nl, 257, cplens, cplext, tl, bl, hp, &hn, v);
+ if (r != Z_OK || *bl == 0)
+ {
+ if (r == Z_DATA_ERROR)
+ z->msg = (char*)"oversubscribed literal/length tree";
+ else if (r != Z_MEM_ERROR)
+ {
+ z->msg = (char*)"incomplete literal/length tree";
+ r = Z_DATA_ERROR;
+ }
+ return r;
+ }
+
+ /* build distance tree */
+ r = huft_build(c + nl, nd, 0, cpdist, cpdext, td, bd, hp, &hn, v);
+ if (r != Z_OK || (*bd == 0 && nl > 257))
+ {
+ if (r == Z_DATA_ERROR)
+ z->msg = (char*)"oversubscribed distance tree";
+ else if (r == Z_BUF_ERROR) {
+#ifdef PKZIP_BUG_WORKAROUND
+ r = Z_OK;
+ }
+#else
+ z->msg = (char*)"incomplete distance tree";
+ r = Z_DATA_ERROR;
+ }
+ else if (r != Z_MEM_ERROR)
+ {
+ z->msg = (char*)"empty distance tree with lengths";
+ r = Z_DATA_ERROR;
+ }
+ return r;
+#endif
+ }
+
+ /* done */
+ return Z_OK;
+}
+
+
+/* build fixed tables only once--keep them here */
+#include "inffixed.h"
+
+
+int zlib_inflate_trees_fixed(bl, bd, tl, td, z)
+uIntf *bl; /* literal desired/actual bit depth */
+uIntf *bd; /* distance desired/actual bit depth */
+inflate_huft * FAR *tl; /* literal/length tree result */
+inflate_huft * FAR *td; /* distance tree result */
+z_streamp z; /* for memory allocation */
+{
+ *bl = fixed_bl;
+ *bd = fixed_bd;
+ *tl = fixed_tl;
+ *td = fixed_td;
+ return Z_OK;
+}
--- /dev/null
+/* inftrees.h -- header to use inftrees.c
+ * Copyright (C) 1995-1998 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+/* Huffman code lookup table entry--this entry is four bytes for machines
+ that have 16-bit pointers (e.g. PC's in the small or medium model). */
+
+#ifndef _INFTREES_H
+#define _INFTREES_H
+
+typedef struct inflate_huft_s FAR inflate_huft;
+
+struct inflate_huft_s {
+ union {
+ struct {
+ Byte Exop; /* number of extra bits or operation */
+ Byte Bits; /* number of bits in this code or subcode */
+ } what;
+ uInt pad; /* pad structure to a power of 2 (4 bytes for */
+ } word; /* 16-bit, 8 bytes for 32-bit int's) */
+ uInt base; /* literal, length base, distance base,
+ or table offset */
+};
+
+/* Maximum size of dynamic tree. The maximum found in a long but non-
+ exhaustive search was 1004 huft structures (850 for length/literals
+ and 154 for distances, the latter actually the result of an
+ exhaustive search). The actual maximum is not known, but the
+ value below is more than safe. */
+#define MANY 1440
+
+extern int zlib_inflate_trees_bits OF((
+ uIntf *, /* 19 code lengths */
+ uIntf *, /* bits tree desired/actual depth */
+ inflate_huft * FAR *, /* bits tree result */
+ inflate_huft *, /* space for trees */
+ z_streamp)); /* for messages */
+
+extern int zlib_inflate_trees_dynamic OF((
+ uInt, /* number of literal/length codes */
+ uInt, /* number of distance codes */
+ uIntf *, /* that many (total) code lengths */
+ uIntf *, /* literal desired/actual bit depth */
+ uIntf *, /* distance desired/actual bit depth */
+ inflate_huft * FAR *, /* literal/length tree result */
+ inflate_huft * FAR *, /* distance tree result */
+ inflate_huft *, /* space for trees */
+ z_streamp)); /* for messages */
+
+extern int zlib_inflate_trees_fixed OF((
+ uIntf *, /* literal desired/actual bit depth */
+ uIntf *, /* distance desired/actual bit depth */
+ inflate_huft * FAR *, /* literal/length tree result */
+ inflate_huft * FAR *, /* distance tree result */
+ z_streamp)); /* for memory allocation */
+
+#endif /* _INFTREES_H */
--- /dev/null
+/* inflate_util.c -- data and routines common to blocks and codes
+ * Copyright (C) 1995-1998 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+#include <linux/zutil.h>
+#include "infblock.h"
+#include "inftrees.h"
+#include "infcodes.h"
+#include "infutil.h"
+
+struct inflate_codes_state;
+
+/* And'ing with mask[n] masks the lower n bits */
+uInt zlib_inflate_mask[17] = {
+ 0x0000,
+ 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff,
+ 0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff
+};
+
+
+/* copy as much as possible from the sliding window to the output area */
+int zlib_inflate_flush(s, z, r)
+inflate_blocks_statef *s;
+z_streamp z;
+int r;
+{
+ uInt n;
+ Bytef *p;
+ Bytef *q;
+
+ /* local copies of source and destination pointers */
+ p = z->next_out;
+ q = s->read;
+
+ /* compute number of bytes to copy as far as end of window */
+ n = (uInt)((q <= s->write ? s->write : s->end) - q);
+ if (n > z->avail_out) n = z->avail_out;
+ if (n && r == Z_BUF_ERROR) r = Z_OK;
+
+ /* update counters */
+ z->avail_out -= n;
+ z->total_out += n;
+
+ /* update check information */
+ if (s->checkfn != Z_NULL)
+ z->adler = s->check = (*s->checkfn)(s->check, q, n);
+
+ /* copy as far as end of window */
+ memcpy(p, q, n);
+ p += n;
+ q += n;
+
+ /* see if more to copy at beginning of window */
+ if (q == s->end)
+ {
+ /* wrap pointers */
+ q = s->window;
+ if (s->write == s->end)
+ s->write = s->window;
+
+ /* compute bytes to copy */
+ n = (uInt)(s->write - q);
+ if (n > z->avail_out) n = z->avail_out;
+ if (n && r == Z_BUF_ERROR) r = Z_OK;
+
+ /* update counters */
+ z->avail_out -= n;
+ z->total_out += n;
+
+ /* update check information */
+ if (s->checkfn != Z_NULL)
+ z->adler = s->check = (*s->checkfn)(s->check, q, n);
+
+ /* copy */
+ memcpy(p, q, n);
+ p += n;
+ q += n;
+ }
+
+ /* update pointers */
+ z->next_out = p;
+ s->read = q;
+
+ /* done */
+ return r;
+}
--- /dev/null
+/* infutil.h -- types and macros common to blocks and codes
+ * Copyright (C) 1995-1998 Mark Adler
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+#ifndef _INFUTIL_H
+#define _INFUTIL_H
+
+#include "zconf.h"
+#include "inftrees.h"
+#include "infcodes.h"
+
+typedef enum {
+ TYPE, /* get type bits (3, including end bit) */
+ LENS, /* get lengths for stored */
+ STORED, /* processing stored block */
+ TABLE, /* get table lengths */
+ BTREE, /* get bit lengths tree for a dynamic block */
+ DTREE, /* get length, distance trees for a dynamic block */
+ CODES, /* processing fixed or dynamic block */
+ DRY, /* output remaining window bytes */
+ B_DONE, /* finished last block, done */
+ B_BAD} /* got a data error--stuck here */
+inflate_block_mode;
+
+/* inflate blocks semi-private state */
+struct inflate_blocks_state {
+
+ /* mode */
+ inflate_block_mode mode; /* current inflate_block mode */
+
+ /* mode dependent information */
+ union {
+ uInt left; /* if STORED, bytes left to copy */
+ struct {
+ uInt table; /* table lengths (14 bits) */
+ uInt index; /* index into blens (or border) */
+ uIntf *blens; /* bit lengths of codes */
+ uInt bb; /* bit length tree depth */
+ inflate_huft *tb; /* bit length decoding tree */
+ } trees; /* if DTREE, decoding info for trees */
+ struct {
+ inflate_codes_statef
+ *codes;
+ } decode; /* if CODES, current state */
+ } sub; /* submode */
+ uInt last; /* true if this block is the last block */
+
+ /* mode independent information */
+ uInt bitk; /* bits in bit buffer */
+ uLong bitb; /* bit buffer */
+ inflate_huft *hufts; /* single malloc for tree space */
+ Bytef *window; /* sliding window */
+ Bytef *end; /* one byte after sliding window */
+ Bytef *read; /* window read pointer */
+ Bytef *write; /* window write pointer */
+ check_func checkfn; /* check function */
+ uLong check; /* check on output */
+
+};
+
+
+/* defines for inflate input/output */
+/* update pointers and return */
+#define UPDBITS {s->bitb=b;s->bitk=k;}
+#define UPDIN {z->avail_in=n;z->total_in+=p-z->next_in;z->next_in=p;}
+#define UPDOUT {s->write=q;}
+#define UPDATE {UPDBITS UPDIN UPDOUT}
+#define LEAVE {UPDATE return zlib_inflate_flush(s,z,r);}
+/* get bytes and bits */
+#define LOADIN {p=z->next_in;n=z->avail_in;b=s->bitb;k=s->bitk;}
+#define NEEDBYTE {if(n)r=Z_OK;else LEAVE}
+#define NEXTBYTE (n--,*p++)
+#define NEEDBITS(j) {while(k<(j)){NEEDBYTE;b|=((uLong)NEXTBYTE)<<k;k+=8;}}
+#define DUMPBITS(j) {b>>=(j);k-=(j);}
+/* output bytes */
+#define WAVAIL (uInt)(q<s->read?s->read-q-1:s->end-q)
+#define LOADOUT {q=s->write;m=(uInt)WAVAIL;}
+#define WRAP {if(q==s->end&&s->read!=s->window){q=s->window;m=(uInt)WAVAIL;}}
+#define FLUSH {UPDOUT r=zlib_inflate_flush(s,z,r); LOADOUT}
+#define NEEDOUT {if(m==0){WRAP if(m==0){FLUSH WRAP if(m==0) LEAVE}}r=Z_OK;}
+#define OUTBYTE(a) {*q++=(Byte)(a);m--;}
+/* load local pointers */
+#define LOAD {LOADIN LOADOUT}
+
+/* masks for lower bits (size given to avoid silly warnings with Visual C++) */
+extern uInt zlib_inflate_mask[17];
+
+/* copy as much as possible from the sliding window to the output area */
+extern int zlib_inflate_flush OF((
+ inflate_blocks_statef *,
+ z_streamp ,
+ int));
+
+/* inflate private state */
+typedef enum {
+ METHOD, /* waiting for method byte */
+ FLAG, /* waiting for flag byte */
+ DICT4, /* four dictionary check bytes to go */
+ DICT3, /* three dictionary check bytes to go */
+ DICT2, /* two dictionary check bytes to go */
+ DICT1, /* one dictionary check byte to go */
+ DICT0, /* waiting for inflateSetDictionary */
+ BLOCKS, /* decompressing blocks */
+ CHECK4, /* four check bytes to go */
+ CHECK3, /* three check bytes to go */
+ CHECK2, /* two check bytes to go */
+ CHECK1, /* one check byte to go */
+ I_DONE, /* finished check, done */
+ I_BAD} /* got an error--stay here */
+inflate_mode;
+
+struct internal_state {
+
+ /* mode */
+ inflate_mode mode; /* current inflate mode */
+
+ /* mode dependent information */
+ union {
+ uInt method; /* if FLAGS, method byte */
+ struct {
+ uLong was; /* computed check value */
+ uLong need; /* stream check value */
+ } check; /* if CHECK, check values to compare */
+ uInt marker; /* if BAD, inflateSync's marker bytes count */
+ } sub; /* submode */
+
+ /* mode independent information */
+ int nowrap; /* flag for no wrapper */
+ uInt wbits; /* log2(window size) (8..15, defaults to 15) */
+ inflate_blocks_statef
+ *blocks; /* current inflate_blocks state */
+
+};
+
+/* inflate codes private state */
+typedef enum { /* waiting for "i:"=input, "o:"=output, "x:"=nothing */
+ START, /* x: set up for LEN */
+ LEN, /* i: get length/literal/eob next */
+ LENEXT, /* i: getting length extra (have base) */
+ DIST, /* i: get distance next */
+ DISTEXT, /* i: getting distance extra */
+ COPY, /* o: copying bytes in window, waiting for space */
+ LIT, /* o: got literal, waiting for output space */
+ WASH, /* o: got eob, possibly still output waiting */
+ END, /* x: got eob and all data flushed */
+ BADCODE} /* x: got error */
+inflate_codes_mode;
+
+struct inflate_codes_state {
+
+ /* mode */
+ inflate_codes_mode mode; /* current inflate_codes mode */
+
+ /* mode dependent information */
+ uInt len;
+ union {
+ struct {
+ inflate_huft *tree; /* pointer into tree */
+ uInt need; /* bits needed */
+ } code; /* if LEN or DIST, where in tree */
+ uInt lit; /* if LIT, literal */
+ struct {
+ uInt get; /* bits to get for extra */
+ uInt dist; /* distance back to copy from */
+ } copy; /* if EXT or COPY, where and how much */
+ } sub; /* submode */
+
+ /* mode independent information */
+ Byte lbits; /* ltree bits decoded per branch */
+ Byte dbits; /* dtree bits decoder per branch */
+ inflate_huft *ltree; /* literal/length/eob tree */
+ inflate_huft *dtree; /* distance tree */
+
+};
+
+/* memory allocation for inflation */
+
+struct inflate_workspace {
+ inflate_codes_statef working_state;
+ struct inflate_blocks_state working_blocks_state;
+ struct internal_state internal_state;
+ unsigned int tree_work_area_1[19];
+ unsigned int tree_work_area_2[288];
+ unsigned working_blens[258 + 0x1f + 0x1f];
+ inflate_huft working_hufts[MANY];
+ unsigned char working_window[1 << MAX_WBITS];
+};
+
+#define WS(z) ((struct inflate_workspace *)(z->workspace))
+
+#endif
--- /dev/null
+/* zconf.h -- configuration of the zlib compression library
+ * Copyright (C) 1995-1998 Jean-loup Gailly.
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* @(#) $Id$ */
+
+#ifndef _ZCONF_H
+#define _ZCONF_H
+
+#if defined(__GNUC__) || defined(__386__) || defined(i386)
+# ifndef __32BIT__
+# define __32BIT__
+# endif
+#endif
+
+#if defined(__STDC__) || defined(__cplusplus)
+# ifndef STDC
+# define STDC
+# endif
+#endif
+
+/* The memory requirements for deflate are (in bytes):
+ (1 << (windowBits+2)) + (1 << (memLevel+9))
+ that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values)
+ plus a few kilobytes for small objects. For example, if you want to reduce
+ the default memory requirements from 256K to 128K, compile with
+ make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7"
+ Of course this will generally degrade compression (there's no free lunch).
+
+ The memory requirements for inflate are (in bytes) 1 << windowBits
+ that is, 32K for windowBits=15 (default value) plus a few kilobytes
+ for small objects.
+*/
+
+/* Maximum value for memLevel in deflateInit2 */
+#ifndef MAX_MEM_LEVEL
+# define MAX_MEM_LEVEL 9
+#endif
+
+/* Maximum value for windowBits in deflateInit2 and inflateInit2.
+ * WARNING: reducing MAX_WBITS makes minigzip unable to extract .gz files
+ * created by gzip. (Files created by minigzip can still be extracted by
+ * gzip.)
+ */
+#ifndef MAX_WBITS
+# define MAX_WBITS 15 /* 32K LZ77 window */
+#endif
+
+ /* Type declarations */
+
+#ifndef OF /* function prototypes */
+# ifdef STDC
+# define OF(args) args
+# else
+# define OF(args) ()
+# endif
+#endif
+
+#ifndef ZEXPORT
+# define ZEXPORT
+#endif
+#ifndef ZEXPORTVA
+# define ZEXPORTVA
+#endif
+#ifndef ZEXTERN
+# define ZEXTERN extern
+#endif
+#ifndef FAR
+# define FAR
+#endif
+
+typedef unsigned char Byte; /* 8 bits */
+typedef unsigned int uInt; /* 16 bits or more */
+typedef unsigned long uLong; /* 32 bits or more */
+
+typedef Byte FAR Bytef;
+typedef char FAR charf;
+typedef int FAR intf;
+typedef uInt FAR uIntf;
+typedef uLong FAR uLongf;
+
+typedef void FAR *voidpf;
+typedef void *voidp;
+
+#include <linux/types.h> /* for off_t */
+#include <linux/unistd.h> /* for SEEK_* and off_t */
+#define z_off_t off_t
+
+#endif /* _ZCONF_H */
--- /dev/null
+/* zutil.h -- internal interface and configuration of the compression library
+ * Copyright (C) 1995-1998 Jean-loup Gailly.
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+/* @(#) $Id: zutil.h,v 1.1 2000/01/01 03:32:23 davem Exp $ */
+
+#ifndef _Z_UTIL_H
+#define _Z_UTIL_H
+
+#include <linux/zlib.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+
+#ifndef local
+# define local static
+#endif
+/* compile with -Dlocal if your debugger can't find static symbols */
+
+typedef unsigned char uch;
+typedef uch FAR uchf;
+typedef unsigned short ush;
+typedef ush FAR ushf;
+typedef unsigned long ulg;
+
+ /* common constants */
+
+#ifndef DEF_WBITS
+# define DEF_WBITS MAX_WBITS
+#endif
+/* default windowBits for decompression. MAX_WBITS is for compression only */
+
+#if MAX_MEM_LEVEL >= 8
+# define DEF_MEM_LEVEL 8
+#else
+# define DEF_MEM_LEVEL MAX_MEM_LEVEL
+#endif
+/* default memLevel */
+
+#define STORED_BLOCK 0
+#define STATIC_TREES 1
+#define DYN_TREES 2
+/* The three kinds of block type */
+
+#define MIN_MATCH 3
+#define MAX_MATCH 258
+/* The minimum and maximum match lengths */
+
+#define PRESET_DICT 0x20 /* preset dictionary flag in zlib header */
+
+ /* target dependencies */
+
+ /* Common defaults */
+
+#ifndef OS_CODE
+# define OS_CODE 0x03 /* assume Unix */
+#endif
+
+ /* functions */
+
+typedef uLong (ZEXPORT *check_func) OF((uLong check, const Bytef *buf,
+ uInt len));
+
+
+ /* checksum functions */
+
+#define BASE 65521L /* largest prime smaller than 65536 */
+#define NMAX 5552
+/* NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 */
+
+#define DO1(buf,i) {s1 += buf[i]; s2 += s1;}
+#define DO2(buf,i) DO1(buf,i); DO1(buf,i+1);
+#define DO4(buf,i) DO2(buf,i); DO2(buf,i+2);
+#define DO8(buf,i) DO4(buf,i); DO4(buf,i+4);
+#define DO16(buf) DO8(buf,0); DO8(buf,8);
+
+/* ========================================================================= */
+/*
+ Update a running Adler-32 checksum with the bytes buf[0..len-1] and
+ return the updated checksum. If buf is NULL, this function returns
+ the required initial value for the checksum.
+ An Adler-32 checksum is almost as reliable as a CRC32 but can be computed
+ much faster. Usage example:
+
+ uLong adler = adler32(0L, Z_NULL, 0);
+
+ while (read_buffer(buffer, length) != EOF) {
+ adler = adler32(adler, buffer, length);
+ }
+ if (adler != original_adler) error();
+*/
+static inline uLong zlib_adler32(uLong adler,
+ const Bytef *buf,
+ uInt len)
+{
+ unsigned long s1 = adler & 0xffff;
+ unsigned long s2 = (adler >> 16) & 0xffff;
+ int k;
+
+ if (buf == Z_NULL) return 1L;
+
+ while (len > 0) {
+ k = len < NMAX ? len : NMAX;
+ len -= k;
+ while (k >= 16) {
+ DO16(buf);
+ buf += 16;
+ k -= 16;
+ }
+ if (k != 0) do {
+ s1 += *buf++;
+ s2 += s1;
+ } while (--k);
+ s1 %= BASE;
+ s2 %= BASE;
+ }
+ return (s2 << 16) | s1;
+}
+
+#endif /* _Z_UTIL_H */
unsigned long flags;
while (all_tasks) {
- current->sigpending = 0;
+ current->work.sigpending = 0;
rpc_killall_tasks(NULL);
__rpc_schedule();
if (all_tasks) {
* Usually rpciod will exit very quickly, so we
* wait briefly before checking the process id.
*/
- current->sigpending = 0;
+ current->work.sigpending = 0;
yield();
/*
* Display a message if we're going to wait longer.
progp->pg_name, proto == IPPROTO_UDP? "udp" : "tcp", port);
if (!port)
- current->sigpending = 0;
+ current->work.sigpending = 0;
for (i = 0; i < progp->pg_nvers; i++) {
if (progp->pg_vers[i] == NULL)
#
-# Extract available help for an option from Configure.help
+# Extract available help for an option from Config.help
# and send it to standard output.
#
# Most of this function was borrowed from the original kernel
# Configure script.
#
function extract_help () {
- if [ -f Documentation/Configure.help ]
- then
- #first escape regexp special characters in the argument:
- var=$(echo "$1"|sed 's/[][\/.^$*]/\\&/g')
- #now pick out the right help text:
- text=$(sed -n "/^$var[ ]*\$/,\${
+ #first escape regexp special characters in the argument:
+ var=$(echo "$1"|sed 's/[][\/.^$*]/\\&/g')
+ #now pick out the right help text:
+ text=$(cat /dev/null $(find . -name Config.help) |
+ sed -n "/^$var[ ]*\$/,\${
/^$var[ ]*\$/c\\
${var}:\\
s/^ //
/<file:\\([^>]*\\)>/s//\\1/g
p
- }" Documentation/Configure.help)
-
- if [ -z "$text" ]
- then
- echo "There is no help available for this kernel option."
- return 1
- else
- echo "$text"
- fi
- else
- echo "There is no help available for this kernel option."
- return 1
- fi
+ }")
+
+ if [ -z "$text" ]
+ then
+ echo "There is no help available for this kernel option."
+ return 1
+ else
+ echo "$text"
+ fi
}
#