SMP-FAQ on the WWW at http://www.irisa.fr/prive/mentre/smp-faq/ .
If you don't know what to do here, say N.
+
+Multiquad support for NUMA systems
+CONFIG_MULTIQUAD
+ This option is used for getting Linux to run on a (IBM/Sequent) NUMA
+ multiquad box. This changes the way that processors are bootstrapped,
+ and uses Clustered Logical APIC addressing mode instead of Flat Logical.
+ You will need a new lynxer.elf file to flash your firmware with - send
+ email to Martin.Bligh@us.ibm.com
IO-APIC Support on Uniprocessors
CONFIG_X86_UP_IOAPIC
here and read Documentation/modules.txt. The module will be called
smc-ircc.o.
+VLSI 82C147 PCI-IrDA Controller Driver
+CONFIG_VLSI_FIR
+ Say Y here if you want to build support for the VLSI 82C147
+ PCI-IrDA Controller. This controller is used by the HP OmniBook 800
+ and 5500 notebooks. The driver provides support for SIR, MIR and
+ FIR (4Mbps) speeds.
+
+ If you want to compile it as a module, say M here and read
+ <file:Documentation/modules.txt>. The module will be called vlsi_ir.o.
+
Serial dongle support
CONFIG_DONGLE
Say Y here if you have an infrared device that connects to your
Using JFFS2
-----------
-Using JFFS2 (the Second Journaling Flash File System) is probably the most
+Using JFFS2 (the Second Journalling Flash File System) is probably the most
convenient way to store a writable filesystem into flash. JFFS2 is used in
conjunction with the MTD layer which is responsible for low-level flash
management. More information on the Linux MTD can be found on-line at:
VERSION = 2
PATCHLEVEL = 4
SUBLEVEL = 11
-EXTRAVERSION =-pre3
+EXTRAVERSION =-pre4
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
echo "#define KERNEL_SIZE `ls -l vmlinux.nh | awk '{print $$5}'`" > $@T
ifdef INITRD
[ -f $(INITRD) ] || exit 1
- echo "#define INITRD_SIZE `ls -l $(INITRD) | awk '{print $$5}'`" >> $@T
+ echo "#define INITRD_IMAGE_SIZE `ls -l $(INITRD) | awk '{print $$5}'`" >> $@T
endif
cmp -s $@T $@ || mv -f $@T $@
rm -f $@T
*/
static long nbytes;
static char envval[256] __attribute__((aligned(8)));
-#ifdef INITRD_SIZE
+#ifdef INITRD_IMAGE_SIZE
static unsigned long initrd_start;
#endif
}
pal_init();
-#ifdef INITRD_SIZE
+#ifdef INITRD_IMAGE_SIZE
/* The initrd must be page-aligned. See below for the
cause of the magic number 5. */
initrd_start = ((START_ADDR + 5*KERNEL_SIZE) | (PAGE_SIZE-1)) + 1;
*
* Sigh... */
-#ifdef INITRD_SIZE
- load(initrd_start, KERNEL_ORIGIN+KERNEL_SIZE, INITRD_SIZE);
+#ifdef INITRD_IMAGE_SIZE
+ load(initrd_start, KERNEL_ORIGIN+KERNEL_SIZE, INITRD_IMAGE_SIZE);
#endif
load(START_ADDR+(4*KERNEL_SIZE), KERNEL_ORIGIN, KERNEL_SIZE);
load(START_ADDR, START_ADDR+(4*KERNEL_SIZE), KERNEL_SIZE);
memset((char*)ZERO_PGE, 0, PAGE_SIZE);
strcpy((char*)ZERO_PGE, envval);
-#ifdef INITRD_SIZE
+#ifdef INITRD_IMAGE_SIZE
((long *)(ZERO_PGE+256))[0] = initrd_start;
- ((long *)(ZERO_PGE+256))[1] = INITRD_SIZE;
+ ((long *)(ZERO_PGE+256))[1] = INITRD_IMAGE_SIZE;
#endif
runkernel();
then
define_bool CONFIG_ALPHA_EV6 y
define_bool CONFIG_ALPHA_TSUNAMI y
+ bool 'EV67 (or later) CPU (speed > 600MHz)?' CONFIG_ALPHA_EV67
fi
if [ "$CONFIG_ALPHA_SHARK" = "y" ]
then
+ define_bool CONFIG_ALPHA_EV6 y
define_bool CONFIG_ALPHA_EV67 y
define_bool CONFIG_ALPHA_TSUNAMI y
fi
if [ "$CONFIG_ALPHA_WILDFIRE" = "y" -o "$CONFIG_ALPHA_TITAN" = "y" ]
then
- define_bool CONFIG_ALPHA_EV6 y
+ define_bool CONFIG_ALPHA_EV6 y
+ define_bool CONFIG_ALPHA_EV67 y
fi
if [ "$CONFIG_ALPHA_RAWHIDE" = "y" ]
then
then
define_bool CONFIG_ALPHA_IRONGATE y
define_bool CONFIG_ALPHA_EV6 y
+ define_bool CONFIG_ALPHA_EV67 y
fi
if [ "$CONFIG_ALPHA_JENSEN" = "y" -o "$CONFIG_ALPHA_MIKASA" = "y" \
wrent(entInt, 0);
alpha_mv.init_irq();
-
- /* If we had wanted SRM console printk echoing early, undo it now. */
- if (alpha_using_srm && srmcons_output) {
- unregister_srm_console();
- }
}
/*
/* Which processor we booted from. */
int boot_cpuid;
-/* Using SRM callbacks for initial console output. This works from
- setup_arch() time through the end of init_IRQ(), as those places
- are under our control.
-
- By default, OFF; set it with a bootcommand arg of "srmcons".
-*/
+/*
+ * Using SRM callbacks for initial console output. This works from
+ * setup_arch() time through the end of time_init(), as those places
+ * are under our (Alpha) control.
+
+ * "srmcons" specified in the boot command arguments allows us to
+ * see kernel messages during the period of time before the true
+ * console device is "registered" during console_init(). As of this
+ * version (2.4.10), time_init() is the last Alpha-specific code
+ * called before console_init(), so we put "unregister" code
+ * there to prevent schizophrenic console behavior later... ;-}
+ *
+ * By default, OFF; set it with a bootcommand arg of "srmcons".
+ */
int srmcons_output = 0;
/* Enforce a memory size limit; useful for testing. By default, none. */
static struct alpha_machine_vector *get_sysvec(long, long, long);
static struct alpha_machine_vector *get_sysvec_byname(const char *);
-static void get_sysnames(long, long, char **, char **);
+static void get_sysnames(long, long, long, char **, char **);
static char command_line[COMMAND_LINE_SIZE];
char saved_command_line[COMMAND_LINE_SIZE];
/*
* Indentify and reconfigure for the current system.
*/
+ cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset);
+
get_sysnames(hwrpb->sys_type, hwrpb->sys_variation,
- &type_name, &var_name);
+ cpu->type, &type_name, &var_name);
if (*var_name == '0')
var_name = "";
if (!vec) {
- cpu = (struct percpu_struct*)
- ((char*)hwrpb + hwrpb->processor_offset);
vec = get_sysvec(hwrpb->sys_type, hwrpb->sys_variation,
cpu->type);
}
/* Member ID is a bit-field. */
long member = (variation >> 10) & 0x3f;
+ cpu &= 0xffffffff; /* make it usable */
+
switch (type) {
case ST_DEC_ALCOR:
if (member < N(alcor_indices))
case ST_DEC_EB164:
if (member < N(eb164_indices))
vec = eb164_vecs[eb164_indices[member]];
+ /* PC164 may show as EB164 variation with EV56 CPU,
+ but, since no true EB164 had anything but EV5... */
+ if (vec == &eb164_mv && cpu == EV56_CPU)
+ vec = &pc164_mv;
break;
case ST_DEC_EB64P:
if (member < N(eb64p_indices))
vec = tsunami_vecs[tsunami_indices[member]];
break;
case ST_DEC_1000:
- cpu &= 0xffffffff;
if (cpu == EV5_CPU || cpu == EV56_CPU)
vec = &mikasa_primo_mv;
else
vec = &mikasa_mv;
break;
case ST_DEC_NORITAKE:
- cpu &= 0xffffffff;
if (cpu == EV5_CPU || cpu == EV56_CPU)
vec = &noritake_primo_mv;
else
vec = &noritake_mv;
break;
case ST_DEC_2100_A500:
- cpu &= 0xffffffff;
if (cpu == EV5_CPU || cpu == EV56_CPU)
vec = &sable_gamma_mv;
else
}
static void
-get_sysnames(long type, long variation,
+get_sysnames(long type, long variation, long cpu,
char **type_name, char **variation_name)
{
long member;
member = (variation >> 10) & 0x3f; /* member ID is a bit-field */
+ cpu &= 0xffffffff; /* make it usable */
+
switch (type) { /* select by family */
default: /* default to variation "0" for now */
break;
case ST_DEC_EB164:
if (member < N(eb164_indices))
*variation_name = eb164_names[eb164_indices[member]];
+ /* PC164 may show as EB164 variation, but with EV56 CPU,
+ so, since no true EB164 had anything but EV5... */
+ if (eb164_indices[member] == 0 && cpu == EV56_CPU)
+ *variation_name = eb164_names[1]; /* make it PC164 */
break;
case ST_DEC_ALCOR:
if (member < N(alcor_indices))
cpu_name = cpu_names[cpu_index];
get_sysnames(hwrpb->sys_type, hwrpb->sys_variation,
- &systype_name, &sysvariation_name);
+ cpu->type, &systype_name, &sysvariation_name);
nr_processors = get_nr_processors(cpu, hwrpb->nr_processors);
}
static void __init
-cabriolet_init_irq(void)
+common_init_irq(void (*srm_dev_int)(unsigned long v, struct pt_regs *r))
{
init_i8259a_irqs();
if (alpha_using_srm) {
- alpha_mv.device_interrupt = srm_device_interrupt;
+ alpha_mv.device_interrupt = srm_dev_int;
init_srm_irqs(35, 0);
}
else {
setup_irq(16+4, &isa_cascade_irqaction);
}
-#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_PC164)
-static void
-pc164_device_interrupt(unsigned long v, struct pt_regs *r)
+static void __init
+cabriolet_init_irq(void)
{
- /* In theory, the PC164 has the same interrupt hardware as
- the other Cabriolet based systems. However, something
- got screwed up late in the development cycle which broke
- the interrupt masking hardware. Repeat, it is not
- possible to mask and ack interrupts. At all.
+ common_init_irq(srm_device_interrupt);
+}
+
+#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_PC164)
+/* In theory, the PC164 has the same interrupt hardware as the other
+ Cabriolet based systems. However, something got screwed up late
+ in the development cycle which broke the interrupt masking hardware.
+ Repeat, it is not possible to mask and ack interrupts. At all.
- In an attempt to work around this, while processing
- interrupts, we do not allow the IPL to drop below what
- it is currently. This prevents the possibility of
- recursion.
+ In an attempt to work around this, while processing interrupts,
+ we do not allow the IPL to drop below what it is currently. This
+ prevents the possibility of recursion.
- ??? Another option might be to force all PCI devices
- to use edge triggered rather than level triggered
- interrupts. That might be too invasive though. */
+ ??? Another option might be to force all PCI devices to use edge
+ triggered rather than level triggered interrupts. That might be
+ too invasive though. */
+static void
+pc164_srm_device_interrupt(unsigned long v, struct pt_regs *r)
+{
+ __min_ipl = getipl();
+ srm_device_interrupt(v, r);
+ __min_ipl = 0;
+}
+
+static void
+pc164_device_interrupt(unsigned long v, struct pt_regs *r)
+{
__min_ipl = getipl();
cabriolet_device_interrupt(v, r);
__min_ipl = 0;
}
+
+static void __init
+pc164_init_irq(void)
+{
+ common_init_irq(pc164_srm_device_interrupt);
+}
#endif
/*
device_interrupt: pc164_device_interrupt,
init_arch: cia_init_arch,
- init_irq: cabriolet_init_irq,
+ init_irq: pc164_init_irq,
init_rtc: common_init_rtc,
init_pci: alphapc164_init_pci,
pci_map_irq: alphapc164_map_irq,
alpha_mv.init_rtc();
do_get_fast_time = do_gettimeofday;
+
+ /*
+ * If we had wanted SRM console printk echoing early, undo it now.
+ *
+ * "srmcons" specified in the boot command arguments allows us to
+ * see kernel messages during the period of time before the true
+ * console device is "registered" during console_init(). As of this
+ * version (2.4.10), time_init() is the last Alpha-specific code
+ * called before console_init(), so we put this "unregister" code
+ * here to prevent schizophrenic console behavior later... ;-}
+ */
+ if (alpha_using_srm && srmcons_output) {
+ unregister_srm_console();
+ srmcons_output = 0;
+ }
}
/*
static int vidport;
static int lines, cols;
+#ifdef CONFIG_MULTIQUAD
+static void *xquad_portio = NULL;
+#endif
+
#include "../../../../lib/inflate.c"
static void *malloc(int size)
if [ "$CONFIG_X86_UP_IOAPIC" = "y" ]; then
define_bool CONFIG_X86_IO_APIC y
fi
+else
+ bool 'Multiquad NUMA system' CONFIG_MULTIQUAD
fi
if [ "$CONFIG_SMP" = "y" -a "$CONFIG_X86_CMPXCHG" = "y" ]; then
# CONFIG_MATH_EMULATION is not set
# CONFIG_MTRR is not set
CONFIG_SMP=y
+# CONFIG_MULTIQUAD is not set
CONFIG_HAVE_DEC_LOCK=y
#
# CONFIG_VFAT_FS is not set
# CONFIG_EFS_FS is not set
# CONFIG_JFFS_FS is not set
+# CONFIG_JFFS2_FS is not set
# CONFIG_CRAMFS is not set
CONFIG_TMPFS=y
# CONFIG_RAMFS is not set
return maxlvt;
}
-static void clear_local_APIC(void)
+void clear_local_APIC(void)
{
int maxlvt;
unsigned long v;
{
unsigned long value, ver, maxlvt;
+ /* Pound the ESR really hard over the head with a big hammer - mbligh */
+ if (esr_disable) {
+ apic_write(APIC_ESR, 0);
+ apic_write(APIC_ESR, 0);
+ apic_write(APIC_ESR, 0);
+ apic_write(APIC_ESR, 0);
+ }
+
value = apic_read(APIC_LVR);
ver = GET_APIC_VERSION(value);
/*
* Double-check wether this APIC is really registered.
+ * This is meaningless in clustered apic mode, so we skip it.
*/
- if (!test_bit(GET_APIC_ID(apic_read(APIC_ID)), &phys_cpu_present_map))
+ if (!clustered_apic_mode &&
+ !test_bit(GET_APIC_ID(apic_read(APIC_ID)), &phys_cpu_present_map))
BUG();
/*
* document number 292116). So here it goes...
*/
- /*
- * Put the APIC into flat delivery mode.
- * Must be "all ones" explicitly for 82489DX.
- */
- apic_write_around(APIC_DFR, 0xffffffff);
+ if (!clustered_apic_mode) {
+ /*
+ * In clustered apic mode, the firmware does this for us
+ * Put the APIC into flat delivery mode.
+ * Must be "all ones" explicitly for 82489DX.
+ */
+ apic_write_around(APIC_DFR, 0xffffffff);
- /*
- * Set up the logical destination ID.
- */
- value = apic_read(APIC_LDR);
- value &= ~APIC_LDR_MASK;
- value |= (1<<(smp_processor_id()+24));
- apic_write_around(APIC_LDR, value);
+ /*
+ * Set up the logical destination ID.
+ */
+ value = apic_read(APIC_LDR);
+ value &= ~APIC_LDR_MASK;
+ value |= (1<<(smp_processor_id()+24));
+ apic_write_around(APIC_LDR, value);
+ }
/*
* Set Task Priority to 'accept all'. We never change this
value |= APIC_LVT_LEVEL_TRIGGER;
apic_write_around(APIC_LVT1, value);
- if (APIC_INTEGRATED(ver)) { /* !82489DX */
+ if (APIC_INTEGRATED(ver) && !esr_disable) { /* !82489DX */
maxlvt = get_maxlvt();
if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
apic_write(APIC_ESR, 0);
apic_write(APIC_ESR, 0);
value = apic_read(APIC_ESR);
printk("ESR value after enabling vector: %08lx\n", value);
- } else
- printk("No ESR for 82489DX.\n");
+ } else {
+ if (esr_disable)
+ /*
+ * Something untraceble is creating bad interrupts on
+ * secondary quads ... for the moment, just leave the
+ * ESR disabled - we can't do anything useful with the
+ * errors anyway - mbligh
+ */
+ printk("Leaving ESR disabled.\n");
+ else
+ printk("No ESR for 82489DX.\n");
+ }
if (nmi_watchdog == NMI_LOCAL_APIC)
setup_apic_nmi_watchdog();
}
set_bit(X86_FEATURE_APIC, &boot_cpu_data.x86_capability);
mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
- boot_cpu_id = 0;
+ boot_cpu_physical_apicid = 0;
if (nmi_watchdog != NMI_NONE)
nmi_watchdog = NMI_LOCAL_APIC;
* Fetch the APIC ID of the BSP in case we have a
* default configuration (or the MP table is broken).
*/
- if (boot_cpu_id == -1U)
- boot_cpu_id = GET_APIC_ID(apic_read(APIC_ID));
+ if (boot_cpu_physical_apicid == -1U)
+ boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
#ifdef CONFIG_X86_IO_APIC
{
/*
* Complain if the BIOS pretends there is one.
*/
- if (!cpu_has_apic && APIC_INTEGRATED(apic_version[boot_cpu_id])) {
+ if (!cpu_has_apic && APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
- boot_cpu_id);
+ boot_cpu_physical_apicid);
return -1;
}
connect_bsp_APIC();
phys_cpu_present_map = 1;
- apic_write_around(APIC_ID, boot_cpu_id);
+ apic_write_around(APIC_ID, boot_cpu_physical_apicid);
apic_pm_init2();
*/
int nr_ioapic_registers[MAX_IO_APICS];
-#if CONFIG_SMP
-# define TARGET_CPUS cpu_online_map
-#else
-# define TARGET_CPUS 0x01
-#endif
/*
* Rough estimation of how many shared IRQs there are, can
* be changed anytime.
memset(&entry,0,sizeof(entry));
entry.delivery_mode = dest_LowestPrio;
- entry.dest_mode = 1; /* logical delivery */
+ entry.dest_mode = INT_DELIVERY_MODE;
entry.mask = 0; /* enable IRQ */
entry.dest.logical.logical_dest = TARGET_CPUS;
* We use logical delivery to get the timer IRQ
* to the first CPU.
*/
- entry.dest_mode = 1; /* logical delivery */
+ entry.dest_mode = INT_DELIVERY_MODE;
entry.mask = 0; /* unmask IRQ now */
entry.dest.logical.logical_dest = TARGET_CPUS;
entry.delivery_mode = dest_LowestPrio;
unsigned char old_id;
unsigned long flags;
+ if (clustered_apic_mode)
+ /* We don't have a good way to do this yet - hack */
+ phys_id_present_map = (u_long) 0xf;
/*
* Set the IOAPIC ID to the value stored in the MPC table.
*/
i);
phys_id_present_map |= 1 << i;
mp_ioapics[apic].mpc_apicid = i;
+ } else {
+ printk("Setting %d in the phys_id_present_map\n", mp_ioapics[apic].mpc_apicid);
+ phys_id_present_map |= 1 << mp_ioapics[apic].mpc_apicid;
}
+
/*
* We need to adjust the IRQ routing table
* if the ID changed.
unsigned long mp_lapic_addr;
/* Processor that is doing the boot up */
-unsigned int boot_cpu_id = -1U;
+unsigned int boot_cpu_physical_apicid = -1U;
/* Internal processor count */
static unsigned int num_processors;
if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
Dprintk(" Bootup CPU\n");
- boot_cpu_id = m->mpc_apicid;
+ boot_cpu_physical_apicid = m->mpc_apicid;
}
+
num_processors++;
if (m->mpc_apicid > MAX_APICS) {
}
ver = m->mpc_apicver;
- phys_cpu_present_map |= 1 << m->mpc_apicid;
+ if (clustered_apic_mode)
+ /* Crude temporary hack. Assumes processors are sequential */
+ phys_cpu_present_map |= 1 << (num_processors-1);
+ else
+ phys_cpu_present_map |= 1 << m->mpc_apicid;
+
/*
* Validate version
*/
}
}
}
+ if (clustered_apic_mode && nr_ioapics > 2) {
+ /* don't initialise IO apics on secondary quads */
+ nr_ioapics = 2;
+ }
if (!num_processors)
printk(KERN_ERR "SMP mptable: no processors registered!\n");
return num_processors;
if ((reboot_cpu == -1) ||
(reboot_cpu > (NR_CPUS -1)) ||
!(phys_cpu_present_map & (1<<cpuid)))
- reboot_cpu = boot_cpu_id;
+ reboot_cpu = boot_cpu_physical_apicid;
reboot_smp = 0; /* use this as a flag to only go through this once*/
/* re-run this function on the other CPUs
struct cpuinfo_x86 *c = cpu_data;
int i, n;
- for (n = 0; n < NR_CPUS; n++, c++) {
+ /*
+ * WARNING - nasty evil hack ... if we print > 8, it overflows the
+ * page buffer and corrupts memory - this needs fixing properly
+ */
+ for (n = 0; n < 8; n++, c++) {
+ /* for (n = 0; n < NR_CPUS; n++, c++) { */
int fpu_exception;
#ifdef CONFIG_SMP
if (!(cpu_online_map & (1<<n)))
return p - buffer;
}
-static unsigned long cpu_initialized __initdata = 0;
+unsigned long cpu_initialized __initdata = 0;
/*
* cpu_init() initializes state that is per-CPU. Some data is already
#include <asm/mtrr.h>
#include <asm/pgalloc.h>
+#include <asm/smpboot.h>
/*
* Some notes on x86 processor bugs affecting SMP operation:
apic_write_around(APIC_ICR, cfg);
}
-static inline void send_IPI_allbutself(int vector)
-{
- /*
- * if there are no other CPUs in the system then
- * we get an APIC send error if we try to broadcast.
- * thus we have to avoid sending IPIs in this case.
- */
- if (smp_num_cpus > 1)
- __send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
-}
-
-static inline void send_IPI_all(int vector)
-{
- __send_IPI_shortcut(APIC_DEST_ALLINC, vector);
-}
-
void send_IPI_self(int vector)
{
__send_IPI_shortcut(APIC_DEST_SELF, vector);
}
-static inline void send_IPI_mask(int mask, int vector)
+static inline void send_IPI_mask_bitmask(int mask, int vector)
{
unsigned long cfg;
unsigned long flags;
__save_flags(flags);
__cli();
+
/*
* Wait for idle.
*/
apic_wait_icr_idle();
-
+
/*
* prepare target chip field
*/
cfg = __prepare_ICR2(mask);
apic_write_around(APIC_ICR2, cfg);
-
+
/*
* program the ICR
*/
cfg = __prepare_ICR(0, vector);
-
+
/*
* Send the IPI. The write to APIC_ICR fires this off.
*/
apic_write_around(APIC_ICR, cfg);
+
__restore_flags(flags);
}
+static inline void send_IPI_mask_sequence(int mask, int vector)
+{
+ unsigned long cfg, flags;
+ unsigned int query_cpu, query_mask;
+
+ /*
+ * Hack. The clustered APIC addressing mode doesn't allow us to send
+ * to an arbitrary mask, so I do a unicasts to each CPU instead. This
+ * should be modified to do 1 message per cluster ID - mbligh
+ */
+
+ __save_flags(flags);
+ __cli();
+
+ for (query_cpu = 0; query_cpu < NR_CPUS; ++query_cpu) {
+ query_mask = 1 << query_cpu;
+ if (query_mask & mask) {
+
+ /*
+ * Wait for idle.
+ */
+ apic_wait_icr_idle();
+
+ /*
+ * prepare target chip field
+ */
+ cfg = __prepare_ICR2(cpu_to_logical_apicid(query_cpu));
+ apic_write_around(APIC_ICR2, cfg);
+
+ /*
+ * program the ICR
+ */
+ cfg = __prepare_ICR(0, vector);
+
+ /*
+ * Send the IPI. The write to APIC_ICR fires this off.
+ */
+ apic_write_around(APIC_ICR, cfg);
+ }
+ }
+ __restore_flags(flags);
+}
+
+static inline void send_IPI_mask(int mask, int vector)
+{
+ if (clustered_apic_mode)
+ send_IPI_mask_sequence(mask, vector);
+ else
+ send_IPI_mask_bitmask(mask, vector);
+}
+
+static inline void send_IPI_allbutself(int vector)
+{
+ /*
+ * if there are no other CPUs in the system then
+ * we get an APIC send error if we try to broadcast.
+ * thus we have to avoid sending IPIs in this case.
+ */
+ if (!(smp_num_cpus > 1))
+ return;
+
+ if (clustered_apic_mode) {
+ // Pointless. Use send_IPI_mask to do this instead
+ int cpu;
+
+ if (smp_num_cpus > 1) {
+ for (cpu = 0; cpu < smp_num_cpus; ++cpu) {
+ if (cpu != smp_processor_id())
+ send_IPI_mask(1 << cpu, vector);
+ }
+ }
+ } else {
+ __send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
+ return;
+ }
+}
+
+static inline void send_IPI_all(int vector)
+{
+ if (clustered_apic_mode) {
+ // Pointless. Use send_IPI_mask to do this instead
+ int cpu;
+
+ for (cpu = 0; cpu < smp_num_cpus; ++cpu) {
+ send_IPI_mask(1 << cpu, vector);
+ }
+ } else {
+ __send_IPI_shortcut(APIC_DEST_ALLINC, vector);
+ }
+}
+
/*
* Smarter SMP flushing macros.
* c/o Linus Torvalds.
* Ingo Molnar : various cleanups and rewrites
* Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug.
* Maciej W. Rozycki : Bits for genuine 82489DX APICs
+ * Martin J. Bligh : Added support for multi-quad systems
*/
#include <linux/config.h>
#include <linux/mc146818rtc.h>
#include <asm/mtrr.h>
#include <asm/pgalloc.h>
+#include <asm/smpboot.h>
/* Set if we find a B stepping CPU */
static int smp_b_stepping;
/* Bitmask of currently online CPUs */
unsigned long cpu_online_map;
-/* which CPU (physical APIC ID) maps to which logical CPU number */
-volatile int x86_apicid_to_cpu[NR_CPUS];
-/* which logical CPU number maps to which CPU (physical APIC ID) */
-volatile int x86_cpu_to_apicid[NR_CPUS];
-
static volatile unsigned long cpu_callin_map;
static volatile unsigned long cpu_callout_map;
* our local APIC. We have to wait for the IPI or we'll
* lock up on an APIC access.
*/
- while (!atomic_read(&init_deasserted));
+ if (!clustered_apic_mode)
+ while (!atomic_read(&init_deasserted));
/*
* (This works even if the APIC is not enabled.)
*/
Dprintk("CALLIN, before setup_local_APIC().\n");
+ /*
+ * Because we use NMIs rather than the INIT-STARTUP sequence to
+ * bootstrap the CPUs, the APIC may be in a wierd state. Kick it.
+ */
+ if (clustered_apic_mode)
+ clear_local_APIC();
setup_local_APIC();
- sti();
+ __sti();
#ifdef CONFIG_MTRR
/*
return do_fork(CLONE_VM|CLONE_PID, 0, ®s, 0);
}
+/* which physical APIC ID maps to which logical CPU number */
+volatile int physical_apicid_2_cpu[MAX_APICID];
+/* which logical CPU number maps to which physical APIC ID */
+volatile int cpu_2_physical_apicid[NR_CPUS];
+
+/* which logical APIC ID maps to which logical CPU number */
+volatile int logical_apicid_2_cpu[MAX_APICID];
+/* which logical CPU number maps to which logical APIC ID */
+volatile int cpu_2_logical_apicid[NR_CPUS];
+
+static inline void init_cpu_to_apicid(void)
+/* Initialize all maps between cpu number and apicids */
+{
+ int apicid, cpu;
+
+ for (apicid = 0; apicid < MAX_APICID; apicid++) {
+ physical_apicid_2_cpu[apicid] = -1;
+ logical_apicid_2_cpu[apicid] = -1;
+ }
+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
+ cpu_2_physical_apicid[cpu] = -1;
+ cpu_2_logical_apicid[cpu] = -1;
+ }
+}
+
+static inline void map_cpu_to_boot_apicid(int cpu, int apicid)
+/*
+ * set up a mapping between cpu and apicid. Uses logical apicids for multiquad,
+ * else physical apic ids
+ */
+{
+ if (clustered_apic_mode) {
+ logical_apicid_2_cpu[apicid] = cpu;
+ cpu_2_logical_apicid[cpu] = apicid;
+ } else {
+ physical_apicid_2_cpu[apicid] = cpu;
+ cpu_2_physical_apicid[cpu] = apicid;
+ }
+}
+
+static inline void unmap_cpu_to_boot_apicid(int cpu, int apicid)
+/*
+ * undo a mapping between cpu and apicid. Uses logical apicids for multiquad,
+ * else physical apic ids
+ */
+{
+ if (clustered_apic_mode) {
+ logical_apicid_2_cpu[apicid] = -1;
+ cpu_2_logical_apicid[cpu] = -1;
+ } else {
+ physical_apicid_2_cpu[apicid] = -1;
+ cpu_2_physical_apicid[cpu] = -1;
+ }
+}
+
#if APIC_DEBUG
static inline void inquire_remote_apic(int apicid)
{
}
#endif
-static void __init do_boot_cpu (int apicid)
+static int wakeup_secondary_via_NMI(int logical_apicid)
+/*
+ * Poke the other CPU in the eye to wake it up. Remember that the normal
+ * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
+ * won't ... remember to clear down the APIC, etc later.
+ */
{
- struct task_struct *idle;
- unsigned long send_status, accept_status, boot_status, maxlvt;
- int timeout, num_starts, j, cpu;
- unsigned long start_eip;
-
- cpu = ++cpucount;
- /*
- * We can't use kernel_thread since we must avoid to
- * reschedule the child.
- */
- if (fork_by_hand() < 0)
- panic("failed fork for CPU %d", cpu);
-
- /*
- * We remove it from the pidhash and the runqueue
- * once we got the process:
- */
- idle = init_task.prev_task;
- if (!idle)
- panic("No idle process for CPU %d", cpu);
-
- idle->processor = cpu;
- x86_cpu_to_apicid[cpu] = apicid;
- x86_apicid_to_cpu[apicid] = cpu;
- idle->has_cpu = 1; /* we schedule the first task manually */
- idle->thread.eip = (unsigned long) start_secondary;
+ unsigned long send_status = 0, accept_status = 0;
+ int timeout, maxlvt;
- del_from_runqueue(idle);
- unhash_process(idle);
- init_tasks[cpu] = idle;
+ /* Target chip */
+ apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(logical_apicid));
- /* start_eip had better be page-aligned! */
- start_eip = setup_trampoline();
+ /* Boot on the stack */
+ /* Kick the second */
+ apic_write_around(APIC_ICR, APIC_DM_NMI | APIC_DEST_LOGICAL);
- /* So we see what's up */
- printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
- stack_start.esp = (void *) (1024 + PAGE_SIZE + (char *)idle);
+ Dprintk("Waiting for send to finish...\n");
+ timeout = 0;
+ do {
+ Dprintk("+");
+ udelay(100);
+ send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
+ } while (send_status && (timeout++ < 1000));
/*
- * This grunge runs the startup process for
- * the targeted processor.
+ * Give the other CPU some time to accept the IPI.
*/
-
- atomic_set(&init_deasserted, 0);
-
- Dprintk("Setting warm reset code and vector.\n");
-
- CMOS_WRITE(0xa, 0xf);
- local_flush_tlb();
- Dprintk("1.\n");
- *((volatile unsigned short *) phys_to_virt(0x469)) = start_eip >> 4;
- Dprintk("2.\n");
- *((volatile unsigned short *) phys_to_virt(0x467)) = start_eip & 0xf;
- Dprintk("3.\n");
-
+ udelay(200);
/*
- * Be paranoid about clearing APIC errors.
+ * Due to the Pentium erratum 3AP.
*/
- if (APIC_INTEGRATED(apic_version[apicid])) {
+ maxlvt = get_maxlvt();
+ if (maxlvt > 3) {
apic_read_around(APIC_SPIV);
apic_write(APIC_ESR, 0);
- apic_read(APIC_ESR);
}
+ accept_status = (apic_read(APIC_ESR) & 0xEF);
+ Dprintk("NMI sent.\n");
- /*
- * Status is now clean
- */
- send_status = 0;
- accept_status = 0;
- boot_status = 0;
+ if (send_status)
+ printk("APIC never delivered???\n");
+ if (accept_status)
+ printk("APIC delivery error (%lx).\n", accept_status);
- /*
- * Starting actual IPI sequence...
- */
+ return (send_status | accept_status);
+}
+
+static int wakeup_secondary_via_INIT(int phys_apicid, unsigned long start_eip)
+{
+ unsigned long send_status = 0, accept_status = 0;
+ int maxlvt, timeout, num_starts, j;
Dprintk("Asserting INIT.\n");
/*
* Turn INIT on target chip
*/
- apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
+ apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
/*
* Send IPI
Dprintk("Deasserting INIT.\n");
/* Target chip */
- apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
+ apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
/* Send IPI */
apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
* Should we send STARTUP IPIs ?
*
* Determine this based on the APIC version.
- * If we don't have an integrated APIC, don't
- * send the STARTUP IPIs.
+ * If we don't have an integrated APIC, don't send the STARTUP IPIs.
*/
- if (APIC_INTEGRATED(apic_version[apicid]))
+ if (APIC_INTEGRATED(apic_version[phys_apicid]))
num_starts = 2;
else
num_starts = 0;
*/
/* Target chip */
- apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
+ apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
/* Boot on the stack */
/* Kick the second */
if (accept_status)
printk("APIC delivery error (%lx).\n", accept_status);
- if (!send_status && !accept_status) {
+ return (send_status | accept_status);
+}
+
+extern unsigned long cpu_initialized;
+
+static void __init do_boot_cpu (int apicid)
+/*
+ * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
+ * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
+ */
+{
+ struct task_struct *idle;
+ unsigned long boot_error = 0;
+ int timeout, cpu;
+ unsigned long start_eip;
+ unsigned short nmi_high, nmi_low;
+
+ cpu = ++cpucount;
+ /*
+ * We can't use kernel_thread since we must avoid to
+ * reschedule the child.
+ */
+ if (fork_by_hand() < 0)
+ panic("failed fork for CPU %d", cpu);
+
+ /*
+ * We remove it from the pidhash and the runqueue
+ * once we got the process:
+ */
+ idle = init_task.prev_task;
+ if (!idle)
+ panic("No idle process for CPU %d", cpu);
+
+ idle->processor = cpu;
+
+ map_cpu_to_boot_apicid(cpu, apicid);
+
+ idle->has_cpu = 1; /* we schedule the first task manually */
+ idle->thread.eip = (unsigned long) start_secondary;
+
+ del_from_runqueue(idle);
+ unhash_process(idle);
+ init_tasks[cpu] = idle;
+
+ /* start_eip had better be page-aligned! */
+ start_eip = setup_trampoline();
+
+ /* So we see what's up */
+ printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
+ stack_start.esp = (void *) (1024 + PAGE_SIZE + (char *)idle);
+
+ /*
+ * This grunge runs the startup process for
+ * the targeted processor.
+ */
+
+ atomic_set(&init_deasserted, 0);
+
+ Dprintk("Setting warm reset code and vector.\n");
+
+ if (clustered_apic_mode) {
+ /* stash the current NMI vector, so we can put things back */
+ nmi_high = *((volatile unsigned short *) TRAMPOLINE_HIGH);
+ nmi_low = *((volatile unsigned short *) TRAMPOLINE_LOW);
+ }
+
+ CMOS_WRITE(0xa, 0xf);
+ local_flush_tlb();
+ Dprintk("1.\n");
+ *((volatile unsigned short *) TRAMPOLINE_HIGH) = start_eip >> 4;
+ Dprintk("2.\n");
+ *((volatile unsigned short *) TRAMPOLINE_LOW) = start_eip & 0xf;
+ Dprintk("3.\n");
+
+ /*
+ * Be paranoid about clearing APIC errors.
+ */
+ if (!clustered_apic_mode && APIC_INTEGRATED(apic_version[apicid])) {
+ apic_read_around(APIC_SPIV);
+ apic_write(APIC_ESR, 0);
+ apic_read(APIC_ESR);
+ }
+
+ /*
+ * Status is now clean
+ */
+ boot_error = 0;
+
+ /*
+ * Starting actual IPI sequence...
+ */
+
+ if (clustered_apic_mode)
+ boot_error = wakeup_secondary_via_NMI(apicid);
+ else
+ boot_error = wakeup_secondary_via_INIT(apicid, start_eip);
+
+ if (!boot_error) {
/*
* allow APs to start initializing.
*/
print_cpu_info(&cpu_data[cpu]);
Dprintk("CPU has booted.\n");
} else {
- boot_status = 1;
+ boot_error= 1;
if (*((volatile unsigned char *)phys_to_virt(8192))
== 0xA5)
/* trampoline started but...? */
/* trampoline code not run */
printk("Not responding.\n");
#if APIC_DEBUG
- inquire_remote_apic(apicid);
+ if (!clustered_apic_mode)
+ inquire_remote_apic(apicid);
#endif
}
}
- if (send_status || accept_status || boot_status) {
- x86_cpu_to_apicid[cpu] = -1;
- x86_apicid_to_cpu[apicid] = -1;
+ if (boot_error) {
+ /* Try to put things back the way they were before ... */
+ unmap_cpu_to_boot_apicid(cpu, apicid);
+ clear_bit(cpu, &cpu_callout_map); /* was set here (do_boot_cpu()) */
+ clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
+ clear_bit(cpu, &cpu_online_map); /* was set in smp_callin() */
cpucount--;
}
/* mark "stuck" area as not stuck */
*((volatile unsigned long *)phys_to_virt(8192)) = 0;
+
+ if(clustered_apic_mode) {
+ printk("Restoring NMI vector\n");
+ *((volatile unsigned short *) TRAMPOLINE_HIGH) = nmi_high;
+ *((volatile unsigned short *) TRAMPOLINE_LOW) = nmi_low;
+ }
}
cycles_t cacheflush_time;
extern int prof_old_multiplier[NR_CPUS];
extern int prof_counter[NR_CPUS];
+static int boot_cpu_logical_apicid;
+/* Where the IO area was mapped on multiquad, always 0 otherwise */
+void *xquad_portio = NULL;
+
void __init smp_boot_cpus(void)
{
- int apicid, cpu;
+ int apicid, cpu, bit;
+
+ if (clustered_apic_mode) {
+ /* remap the 1st quad's 256k range for cross-quad I/O */
+ xquad_portio = ioremap (XQUAD_PORTIO_BASE, XQUAD_PORTIO_LEN);
+ printk("Cross quad port I/O vaddr 0x%08lx, len %08lx\n",
+ (u_long) xquad_portio, (u_long) XQUAD_PORTIO_LEN);
+ }
#ifdef CONFIG_MTRR
/* Must be done before other processors booted */
* and the per-CPU profiling counter/multiplier
*/
- for (apicid = 0; apicid < NR_CPUS; apicid++) {
- x86_apicid_to_cpu[apicid] = -1;
- prof_counter[apicid] = 1;
- prof_old_multiplier[apicid] = 1;
- prof_multiplier[apicid] = 1;
+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
+ prof_counter[cpu] = 1;
+ prof_old_multiplier[cpu] = 1;
+ prof_multiplier[cpu] = 1;
}
+ init_cpu_to_apicid();
+
/*
* Setup boot CPU information
*/
* We have the boot CPU online for sure.
*/
set_bit(0, &cpu_online_map);
- x86_apicid_to_cpu[boot_cpu_id] = 0;
- x86_cpu_to_apicid[0] = boot_cpu_id;
+ boot_cpu_logical_apicid = logical_smp_processor_id();
+ map_cpu_to_boot_apicid(0, boot_cpu_apicid);
+
global_irq_holder = 0;
current->processor = 0;
init_idle();
/*
* Should not be necessary because the MP table should list the boot
* CPU too, but we do it for the sake of robustness anyway.
+ * Makes no sense to do this check in clustered apic mode, so skip it
*/
- if (!test_bit(boot_cpu_id, &phys_cpu_present_map)) {
+ if (!clustered_apic_mode &&
+ !test_bit(boot_cpu_physical_apicid, &phys_cpu_present_map)) {
printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
- boot_cpu_id);
+ boot_cpu_physical_apicid);
phys_cpu_present_map |= (1 << hard_smp_processor_id());
}
/*
* If we couldn't find a local APIC, then get out of here now!
*/
- if (APIC_INTEGRATED(apic_version[boot_cpu_id]) &&
+ if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) &&
!test_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability)) {
printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
- boot_cpu_id);
+ boot_cpu_physical_apicid);
printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
#ifndef CONFIG_VISWS
io_apic_irqs = 0;
connect_bsp_APIC();
setup_local_APIC();
- if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_id)
+ if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_physical_apicid)
BUG();
/*
- * Now scan the CPU present map and fire up the other CPUs.
+ * Scan the CPU present map and fire up the other CPUs via do_boot_cpu
+ *
+ * In clustered apic mode, phys_cpu_present_map is a constructed thus:
+ * bits 0-3 are quad0, 4-7 are quad1, etc. A perverse twist on the
+ * clustered apic ID.
*/
Dprintk("CPU present map: %lx\n", phys_cpu_present_map);
- for (apicid = 0; apicid < NR_CPUS; apicid++) {
+ for (bit = 0; bit < NR_CPUS; bit++) {
+ apicid = cpu_present_to_apicid(bit);
/*
* Don't even attempt to start the boot CPU!
*/
- if (apicid == boot_cpu_id)
+ if (apicid == boot_cpu_apicid)
continue;
- if (!(phys_cpu_present_map & (1 << apicid)))
+ if (!(phys_cpu_present_map & (1 << bit)))
continue;
if ((max_cpus >= 0) && (max_cpus <= cpucount+1))
continue;
/*
* Make sure we unmap all failed CPUs
*/
- if ((x86_apicid_to_cpu[apicid] == -1) &&
- (phys_cpu_present_map & (1 << apicid)))
- printk("phys CPU #%d not responding - cannot use it.\n",apicid);
+ if ((boot_apicid_to_cpu(apicid) == -1) &&
+ (phys_cpu_present_map & (1 << bit)))
+ printk("CPU #%d not responding - cannot use it.\n",
+ apicid);
}
/*
ENTRY(trampoline_data)
r_base = .
-
+#ifdef CONFIG_MULTIQUAD
+ wbinvd
+#endif /* CONFIG_MULTIQUAD */
mov %cs, %ax # Code and data in the same place
mov %ax, %ds
-# $Id: Config.in,v 1.66 2001/05/07 21:00:43 dwmw2 Exp $
+# $Id: Config.in,v 1.70 2001/08/11 16:13:38 dwmw2 Exp $
mainmenu_option next_comment
comment 'Memory Technology Devices (MTD)'
dep_tristate ' MTD partitioning support' CONFIG_MTD_PARTITIONS $CONFIG_MTD
dep_tristate ' RedBoot partition table parsing' CONFIG_MTD_REDBOOT_PARTS $CONFIG_MTD_PARTITIONS
dep_tristate ' Compaq bootldr partition table parsing' CONFIG_MTD_BOOTLDR_PARTS $CONFIG_MTD_PARTITIONS
+ dep_tristate ' ARM Firmware Suite partition parsing' CONFIG_MTD_AFS_PARTS $CONFIG_MTD_PARTITIONS
comment 'User Modules And Translation Layers'
dep_tristate ' Direct char device access to MTD devices' CONFIG_MTD_CHAR $CONFIG_MTD
# Note 2! The CFLAGS definitions are now inherited from the
# parent makes..
#
-# $Id: Makefile,v 1.60 2001/05/31 20:43:18 dwmw2 Exp $
+# $Id: Makefile,v 1.63 2001/06/13 09:43:07 dwmw2 Exp $
obj-y += chips/chipslink.o maps/mapslink.o \
O_TARGET := mtdlink.o
-export-objs := mtdcore.o mtdpart.o redboot.o bootldr.o
+export-objs := mtdcore.o mtdpart.o redboot.o bootldr.o afs.o
list-multi := nftl.o
mod-subdirs :=
obj-$(CONFIG_MTD_PARTITIONS) += mtdpart.o
obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
obj-$(CONFIG_MTD_BOOTLDR_PARTS) += bootldr.o
+obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
# 'Users' - code which presents functionality to userspace.
obj-$(CONFIG_MTD_CHAR) += mtdchar.o
--- /dev/null
+/*======================================================================
+
+ drivers/mtd/afs.c: ARM Flash Layout/Partitioning
+
+ Copyright (C) 2000 ARM Limited
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ This is access code for flashes using ARM's flash partitioning
+ standards.
+
+ $Id: afs.c,v 1.6 2001/10/02 10:04:51 rmk Exp $
+
+======================================================================*/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/init.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+struct footer_struct {
+ u32 image_info_base; /* Address of first word of ImageFooter */
+ u32 image_start; /* Start of area reserved by this footer */
+ u32 signature; /* 'Magic' number proves it's a footer */
+ u32 type; /* Area type: ARM Image, SIB, customer */
+ u32 checksum; /* Just this structure */
+};
+
+struct image_info_struct {
+ u32 bootFlags; /* Boot flags, compression etc. */
+ u32 imageNumber; /* Unique number, selects for boot etc. */
+ u32 loadAddress; /* Address program should be loaded to */
+ u32 length; /* Actual size of image */
+ u32 address; /* Image is executed from here */
+ char name[16]; /* Null terminated */
+ u32 headerBase; /* Flash Address of any stripped header */
+ u32 header_length; /* Length of header in memory */
+ u32 headerType; /* AIF, RLF, s-record etc. */
+ u32 checksum; /* Image checksum (inc. this struct) */
+};
+
+static int
+afs_read_footer(struct mtd_info *mtd, u_int *img_start, u_int *iis_start,
+ u_int off, u_int mask)
+{
+ struct footer_struct fs;
+ u_int ptr = off + mtd->erasesize - sizeof(fs);
+ size_t sz;
+ int ret;
+
+ ret = mtd->read(mtd, ptr, sizeof(fs), &sz, (u_char *) &fs);
+ if (ret >= 0 && sz != sizeof(fs))
+ ret = -EINVAL;
+
+ if (ret < 0) {
+ printk(KERN_ERR "AFS: mtd read failed at 0x%x: %d\n",
+ ptr, ret);
+ return ret;
+ }
+
+ /*
+ * Does it contain the magic number?
+ */
+ if (fs.signature != 0xa0ffff9f)
+ ret = 1;
+
+ *iis_start = fs.image_info_base & mask;
+ *img_start = fs.image_start & mask;
+
+ /*
+ * Check the image info base. This can not
+ * be located after the footer structure.
+ */
+ if (*iis_start >= ptr)
+ ret = 1;
+
+ /*
+ * Check the start of this image. The image
+ * data can not be located after this block.
+ */
+ if (*img_start > off)
+ ret = 1;
+
+ return ret;
+}
+
+static int
+afs_read_iis(struct mtd_info *mtd, struct image_info_struct *iis, u_int ptr)
+{
+ size_t sz;
+ int ret;
+
+ memset(iis, 0, sizeof(*iis));
+ ret = mtd->read(mtd, ptr, sizeof(*iis), &sz, (u_char *) iis);
+ if (ret >= 0 && sz != sizeof(*iis))
+ ret = -EINVAL;
+ if (ret < 0)
+ printk(KERN_ERR "AFS: mtd read failed at 0x%x: %d\n",
+ ptr, ret);
+
+ return ret;
+}
+
+int parse_afs_partitions(struct mtd_info *mtd, struct mtd_partition **pparts)
+{
+ struct mtd_partition *parts;
+ u_int mask, off, idx, sz;
+ int ret = 0;
+ char *str;
+
+ /*
+ * This is the address mask; we use this to mask off out of
+ * range address bits.
+ */
+ mask = mtd->size - 1;
+
+ /*
+ * First, calculate the size of the array we need for the
+ * partition information. We include in this the size of
+ * the strings.
+ */
+ for (idx = off = sz = 0; off < mtd->size; off += mtd->erasesize) {
+ struct image_info_struct iis;
+ u_int iis_ptr, img_ptr;
+
+ ret = afs_read_footer(mtd, &img_ptr, &iis_ptr, off, mask);
+ if (ret < 0)
+ break;
+ if (ret == 1)
+ continue;
+
+ ret = afs_read_iis(mtd, &iis, iis_ptr);
+ if (ret < 0)
+ break;
+
+ sz += sizeof(struct mtd_partition);
+ sz += strlen(iis.name) + 1;
+ idx += 1;
+ }
+
+ if (!sz)
+ return ret;
+
+ parts = kmalloc(sz, GFP_KERNEL);
+ if (!parts)
+ return -ENOMEM;
+
+ str = (char *)(parts + idx);
+
+ /*
+ * Identify the partitions
+ */
+ for (idx = off = 0; off < mtd->size; off += mtd->erasesize) {
+ struct image_info_struct iis;
+ u_int iis_ptr, img_ptr, size;
+
+ /* Read the footer. */
+ ret = afs_read_footer(mtd, &img_ptr, &iis_ptr, off, mask);
+ if (ret < 0)
+ break;
+ if (ret == 1)
+ continue;
+
+ /* Read the image info block */
+ ret = afs_read_iis(mtd, &iis, iis_ptr);
+ if (ret < 0)
+ break;
+
+ strcpy(str, iis.name);
+ size = mtd->erasesize + off - img_ptr;
+
+ /*
+ * In order to support JFFS2 partitions on this layout,
+ * we must lie to MTD about the real size of JFFS2
+ * partitions; this ensures that the AFS flash footer
+ * won't be erased by JFFS2. Please ensure that your
+ * JFFS2 partitions are given image numbers between
+ * 1000 and 2000 inclusive.
+ */
+ if (iis.imageNumber >= 1000 && iis.imageNumber < 2000)
+ size -= mtd->erasesize;
+
+ parts[idx].name = str;
+ parts[idx].size = size;
+ parts[idx].offset = img_ptr;
+ parts[idx].mask_flags = 0;
+
+ printk(" mtd%d: at 0x%08x, %5dKB, %8u, %s\n",
+ idx, img_ptr, parts[idx].size / 1024,
+ iis.imageNumber, str);
+
+ idx += 1;
+ str = str + strlen(iis.name) + 1;
+ }
+
+ if (!idx) {
+ kfree(parts);
+ parts = NULL;
+ }
+
+ *pparts = parts;
+ return idx ? idx : ret;
+}
+
+EXPORT_SYMBOL(parse_afs_partitions);
+
+MODULE_AUTHOR("ARM Ltd");
+MODULE_DESCRIPTION("ARM Firmware Suite partition parser");
+MODULE_LICENSE("GPL");
*
* Copyright 2001 Compaq Computer Corporation.
*
- * $Id: bootldr.c,v 1.4 2001/06/02 18:24:27 nico Exp $
+ * $Id: bootldr.c,v 1.6 2001/10/02 15:05:11 dwmw2 Exp $
*
* Use consistent with the GNU GPL is permitted,
* provided that this copyright notice is
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
+#include <asm/setup.h>
+#include <linux/bootmem.h>
#define FLASH_PARTITION_NAMELEN 32
enum LFR_FLAGS {
LFR_EXPAND = 8 /* expand partition size to fit rest of flash */
};
+// the tags are parsed too early to malloc or alloc_bootmem so we'll fix it
+// for now
+#define MAX_NUM_PARTITIONS 8
typedef struct FlashRegion {
char name[FLASH_PARTITION_NAMELEN];
unsigned long base;
typedef struct BootldrFlashPartitionTable {
int magic; /* should be filled with 0x646c7470 (btlp) BOOTLDR_PARTITION_MAGIC */
int npartitions;
- struct FlashRegion partition[0];
+ struct FlashRegion partition[8];
} BootldrFlashPartitionTable;
#define BOOTLDR_MAGIC 0x646c7462 /* btld: marks a valid bootldr image */
#define BOOTCAP_PARTITIONS (1<<1) /* partition table stored in params sector */
#define BOOTCAP_PARAMS_AFTER_BOOTLDR (1<<2) /* params sector right after bootldr sector(s), else in last sector */
+static struct BootldrFlashPartitionTable Table;
+static struct BootldrFlashPartitionTable *partition_table = NULL;
+
+
int parse_bootldr_partitions(struct mtd_info *master, struct mtd_partition **pparts)
{
struct mtd_partition *parts;
long bootmagic = 0;
long bootcap = 0;
int namelen = 0;
- struct BootldrFlashPartitionTable *partition_table = NULL;
+
char *names;
+#if 0
/* verify bootldr magic */
ret = master->read(master, BOOTLDR_MAGIC_OFFSET, sizeof(long), &retlen, (void *)&bootmagic);
if (ret)
partition_table_offset = master->size - master->erasesize;
printk(__FUNCTION__ ": partition_table_offset=%#lx\n", partition_table_offset);
+ printk(__FUNCTION__ ": ptable_addr=%#lx\n", ptable_addr);
+
/* Read the partition table */
partition_table = (struct BootldrFlashPartitionTable *)kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!partition_table)
return -ENOMEM;
+
ret = master->read(master, partition_table_offset,
PAGE_SIZE, &retlen, (void *)partition_table);
if (ret)
- goto out;
+ goto out;
+
+#endif
+ if (!partition_table)
+ return -ENOMEM;
+
printk(__FUNCTION__ ": magic=%#x\n", partition_table->magic);
+ printk(__FUNCTION__ ": numPartitions=%#x\n", partition_table->npartitions);
+
/* check for partition table magic number */
if (partition_table->magic != BOOTLDR_PARTITION_MAGIC)
goto out;
- npartitions = partition_table->npartitions;
+ npartitions = (partition_table->npartitions > MAX_NUM_PARTITIONS)?
+ MAX_NUM_PARTITIONS:partition_table->npartitions;
printk(__FUNCTION__ ": npartitions=%#x\n", npartitions);
names = (char *)&parts[npartitions];
memset(parts, 0, sizeof(*parts)*npartitions + namelen);
+
+
+ // from here we use the partition table
for (i = 0; i < npartitions; i++) {
struct FlashRegion *partition = &partition_table->partition[i];
const char *name = partition->name;
*pparts = parts;
out:
+#if 0
if (partition_table)
kfree(partition_table);
+#endif
+
return ret;
}
+
+static int __init parse_tag_ptable(const struct tag *tag)
+{
+ char buf[128];
+ int i;
+ int j;
+
+ partition_table = &Table;
+
+#ifdef CONFIG_DEBUG_LL
+ sprintf(buf,"ptable: magic = = 0x%lx npartitions= %d \n",
+ tag->u.ptable.magic,tag->u.ptable.npartitions);
+ printascii(buf);
+
+ for (i=0; i<tag->u.ptable.npartitions; i++){
+ sprintf(buf,"ptable: partition name = %s base= 0x%lx size= 0x%lx flags= 0x%lx\n",
+ (char *) (&tag->u.ptable.partition[i].name[0]),
+ tag->u.ptable.partition[i].base,
+ tag->u.ptable.partition[i].size,
+ tag->u.ptable.partition[i].flags);
+ printascii(buf);
+ }
+#endif
+
+ memcpy((void *)partition_table,(void *) (&(tag->u.ptable)),sizeof(partition_table) +
+ sizeof(struct FlashRegion)*tag->u.ptable.npartitions);
+
+
+ return 0;
+}
+
+__tagtable(ATAG_PTABLE, parse_tag_ptable);
+
EXPORT_SYMBOL(parse_bootldr_partitions);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Compaq Computer Corporation");
+MODULE_DESCRIPTION("Parsing code for Compaq bootldr partitions");
# drivers/mtd/chips/Config.in
-# $Id: Config.in,v 1.4 2001/05/14 09:48:12 dwmw2 Exp $
+# $Id: Config.in,v 1.12 2001/09/23 15:35:21 dwmw2 Exp $
mainmenu_option next_comment
comment 'RAM/ROM/Flash chip drivers'
-dep_tristate ' Common Flash Interface (CFI) support' CONFIG_MTD_CFI $CONFIG_MTD
-if [ "$CONFIG_MTD_CFI" = "y" -o "$CONFIG_MTD_CFI" = "m" ]; then
- bool ' CFI Virtual erase regions (EXPERIMENTAL)' CONFIG_MTD_CFI_VIRTUAL_ER
- bool ' CFI Advanced configuration options' CONFIG_MTD_CFI_ADV_OPTIONS
+dep_tristate ' Detect flash chips by Common Flash Interface (CFI) probe' CONFIG_MTD_CFI $CONFIG_MTD
+#dep_tristate ' Detect non-CFI Intel-compatible flash chips' CONFIG_MTD_INTELPROBE $CONFIG_MTD
+dep_tristate ' Detect non-CFI AMD/JEDEC-compatible flash chips' CONFIG_MTD_JEDECPROBE $CONFIG_MTD
+
+if [ "$CONFIG_MTD_CFI" = "y" -o "$CONFIG_MTD_INTELPROBE" = "y" -o "$CONFIG_MTD_JEDECPROBE" = "y" ]; then
+ define_bool CONFIG_MTD_GEN_PROBE y
+else
+ if [ "$CONFIG_MTD_CFI" = "m" -o "$CONFIG_MTD_INTELPROBE" = "m" -o "$CONFIG_MTD_JEDECPROBE" = "m" ]; then
+ define_bool CONFIG_MTD_GEN_PROBE m
+ else
+ define_bool CONFIG_MTD_GEN_PROBE n
+ fi
+fi
+if [ "$CONFIG_MTD_GEN_PROBE" = "y" -o "$CONFIG_MTD_GEN_PROBE" = "m" ]; then
+ bool ' Flash chip driver advanced configuration options' CONFIG_MTD_CFI_ADV_OPTIONS
if [ "$CONFIG_MTD_CFI_ADV_OPTIONS" = "y" ]; then
choice 'Flash cmd/query data swapping' \
"NO CONFIG_MTD_CFI_NOSWAP \
BIG_ENDIAN_BYTE CONFIG_MTD_CFI_BE_BYTE_SWAP \
- LITTLE_ENDIAN_BYTE CONFIG_MTD_CFI_LE_BYTE_SWAP \
- LART_ENDIAN_BYTE CONFIG_MTD_CFI_LART_BIT_SWAP" NO
+ LITTLE_ENDIAN_BYTE CONFIG_MTD_CFI_LE_BYTE_SWAP" NO
bool ' Specific CFI Flash geometry selection' CONFIG_MTD_CFI_GEOMETRY
if [ "$CONFIG_MTD_CFI_GEOMETRY" = "y" ]; then
bool ' Support 8-bit buswidth' CONFIG_MTD_CFI_B1
fi
fi
fi
-dep_tristate ' CFI support for Intel/Sharp Basic/Extended Commands' CONFIG_MTD_CFI_INTELEXT $CONFIG_MTD_CFI
-dep_tristate ' CFI support for AMD/Fujitsu Standard Commands' CONFIG_MTD_CFI_AMDSTD $CONFIG_MTD_CFI
-dep_tristate ' AMD compatible flash chip support (non-CFI)' CONFIG_MTD_AMDSTD $CONFIG_MTD
-dep_tristate ' pre-CFI Sharp chip support' CONFIG_MTD_SHARP $CONFIG_MTD
+dep_tristate ' Support for Intel/Sharp flash chips' CONFIG_MTD_CFI_INTELEXT $CONFIG_MTD_GEN_PROBE
+dep_tristate ' Support for AMD/Fujitsu flash chips' CONFIG_MTD_CFI_AMDSTD $CONFIG_MTD_GEN_PROBE
+
dep_tristate ' Support for RAM chips in bus mapping' CONFIG_MTD_RAM $CONFIG_MTD
dep_tristate ' Support for ROM chips in bus mapping' CONFIG_MTD_ROM $CONFIG_MTD
-dep_tristate ' JEDEC device support' CONFIG_MTD_JEDEC $CONFIG_MTD
+dep_tristate ' Support for absent chips in bus mapping' CONFIG_MTD_ABSENT $CONFIG_MTD
+
+bool ' Older (theoretically obsoleted now) drivers for non-CFI chips' CONFIG_MTD_OBSOLETE_CHIPS
+dep_tristate ' AMD compatible flash chip support (non-CFI)' CONFIG_MTD_AMDSTD $CONFIG_MTD $CONFIG_MTD_OBSOLETE_CHIPS
+dep_tristate ' pre-CFI Sharp chip support' CONFIG_MTD_SHARP $CONFIG_MTD $CONFIG_MTD_OBSOLETE_CHIPS
+dep_tristate ' JEDEC device support' CONFIG_MTD_JEDEC $CONFIG_MTD $CONFIG_MTD_OBSOLETE_CHIPS
+
endmenu
#
# linux/drivers/chips/Makefile
#
-# $Id: Makefile,v 1.4 2001/06/09 19:57:57 dwmw2 Exp $
+# $Id: Makefile,v 1.6 2001/09/02 18:57:01 dwmw2 Exp $
O_TARGET := chipslink.o
obj-$(CONFIG_MTD) += chipreg.o
obj-$(CONFIG_MTD_AMDSTD) += amd_flash.o
-obj-$(CONFIG_MTD_CFI) += cfi_probe.o cfi_jedec.o
+obj-$(CONFIG_MTD_CFI) += cfi_probe.o
obj-$(CONFIG_MTD_CFI_AMDSTD) += cfi_cmdset_0002.o
obj-$(CONFIG_MTD_CFI_INTELEXT) += cfi_cmdset_0001.o
+obj-$(CONFIG_MTD_GEN_PROBE) += gen_probe.o
+obj-$(CONFIG_MTD_INTELPROBE) += intel_probe.o
obj-$(CONFIG_MTD_JEDEC) += jedec.o
+obj-$(CONFIG_MTD_JEDECPROBE) += jedec_probe.o
obj-$(CONFIG_MTD_RAM) += map_ram.o
obj-$(CONFIG_MTD_ROM) += map_rom.o
obj-$(CONFIG_MTD_SHARP) += sharp.o
+obj-$(CONFIG_MTD_ABSENT) += map_absent.o
include $(TOPDIR)/Rules.make
*
* Author: Jonas Holmberg <jonas.holmberg@axis.com>
*
- * $Id: amd_flash.c,v 1.8 2001/06/02 14:47:16 dwmw2 Exp $
+ * $Id: amd_flash.c,v 1.15 2001/10/02 15:05:11 dwmw2 Exp $
*
* Copyright (c) 2001 Axis Communications AB
*
/* Addresses */
#define ADDR_MANUFACTURER 0x0000
#define ADDR_DEVICE_ID 0x0001
+#define ADDR_SECTOR_LOCK 0x0002
+#define ADDR_HANDSHAKE 0x0003
#define ADDR_UNLOCK_1 0x0555
#define ADDR_UNLOCK_2 0x02AA
#define CMD_SECTOR_ERASE_UNLOCK_DATA 0x0080
#define CMD_SECTOR_ERASE_UNLOCK_DATA_2 0x0030
+#define CMD_UNLOCK_SECTOR 0x0060
+
/* Manufacturers */
#define MANUFACTURER_AMD 0x0001
#define MANUFACTURER_FUJITSU 0x0004
#define AM29LV800BT 0x22DA
#define AM29LV160DT 0x22C4
#define AM29LV160DB 0x2249
+#define AM29BDS323D 0x22D1
+#define AM29BDS643D 0x227E
+
/* Fujitsu */
#define MBM29LV160TE 0x22C4
(wide_read(map, addr) & D6_MASK));
}
+static inline void unlock_sector(struct map_info *map, unsigned long sect_addr,
+ int unlock)
+{
+ /* Sector lock address. A6 = 1 for unlock, A6 = 0 for lock */
+ int SLA = unlock ?
+ (sect_addr | (0x40 * map->buswidth)) :
+ (sect_addr & ~(0x40 * map->buswidth)) ;
+
+ __u32 cmd = make_cmd(map, CMD_UNLOCK_SECTOR);
+
+ wide_write(map, make_cmd(map, CMD_RESET_DATA), 0);
+ wide_write(map, cmd, SLA); /* 1st cycle: write cmd to any address */
+ wide_write(map, cmd, SLA); /* 2nd cycle: write cmd to any address */
+ wide_write(map, cmd, SLA); /* 3rd cycle: write cmd to SLA */
+}
+
+static inline int is_sector_locked(struct map_info *map,
+ unsigned long sect_addr)
+{
+ int status;
+
+ wide_write(map, CMD_RESET_DATA, 0);
+ send_cmd(map, sect_addr, CMD_MANUFACTURER_UNLOCK_DATA);
+
+ /* status is 0x0000 for unlocked and 0x0001 for locked */
+ status = wide_read(map, sect_addr + (map->buswidth * ADDR_SECTOR_LOCK));
+ wide_write(map, CMD_RESET_DATA, 0);
+ return status;
+}
+
+static int amd_flash_do_unlock(struct mtd_info *mtd, loff_t ofs, size_t len,
+ int is_unlock)
+{
+ struct map_info *map;
+ struct mtd_erase_region_info *merip;
+ int eraseoffset, erasesize, eraseblocks;
+ int i;
+ int retval = 0;
+ int lock_status;
+
+ map = mtd->priv;
+
+ /* Pass the whole chip through sector by sector and check for each
+ sector if the sector and the given interval overlap */
+ for(i = 0; i < mtd->numeraseregions; i++) {
+ merip = &mtd->eraseregions[i];
+
+ eraseoffset = merip->offset;
+ erasesize = merip->erasesize;
+ eraseblocks = merip->numblocks;
+
+ if (ofs > eraseoffset + erasesize)
+ continue;
+
+ while (eraseblocks > 0) {
+ if (ofs < eraseoffset + erasesize && ofs + len > eraseoffset) {
+ unlock_sector(map, eraseoffset, is_unlock);
+
+ lock_status = is_sector_locked(map, eraseoffset);
+
+ if (is_unlock && lock_status) {
+ printk("Cannot unlock sector at address %x length %xx\n",
+ eraseoffset, merip->erasesize);
+ retval = -1;
+ } else if (!is_unlock && !lock_status) {
+ printk("Cannot lock sector at address %x length %x\n",
+ eraseoffset, merip->erasesize);
+ retval = -1;
+ }
+ }
+ eraseoffset += erasesize;
+ eraseblocks --;
+ }
+ }
+ return retval;
+}
+
+static int amd_flash_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
+{
+ return amd_flash_do_unlock(mtd, ofs, len, 1);
+}
+
+static int amd_flash_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
+{
+ return amd_flash_do_unlock(mtd, ofs, len, 0);
+}
/*
struct amd_flash_private *private,
const struct amd_flash_info *table, int table_size)
{
- __u32 mfr_id, dev_id;
+ __u32 mfr_id;
+ __u32 dev_id;
struct map_info *map = mtd->priv;
struct amd_flash_private temp;
int i;
if ((map->buswidth == 4) && ((mfr_id >> 16) == (mfr_id & 0xffff)) &&
((dev_id >> 16) == (dev_id & 0xffff))) {
- mfr_id = mfr_id & 0xffff;
- dev_id = dev_id & 0xffff;
+ mfr_id &= 0xffff;
+ dev_id &= 0xffff;
} else {
temp.interleave = 1;
}
* autoselect mode now.
*/
for (j = 0; j < private->numchips; j++) {
- if ((wide_read(map, chips[j].start +
- (map->buswidth *
- ADDR_MANUFACTURER))
- == mfr_id)
- &&
- (wide_read(map, chips[j].start +
- (map->buswidth *
- ADDR_DEVICE_ID))
- == dev_id)) {
+ __u32 mfr_id_other;
+ __u32 dev_id_other;
+
+ mfr_id_other =
+ wide_read(map, chips[j].start +
+ (map->buswidth *
+ ADDR_MANUFACTURER
+ ));
+ dev_id_other =
+ wide_read(map, chips[j].start +
+ (map->buswidth *
+ ADDR_DEVICE_ID));
+ if (temp.interleave == 2) {
+ mfr_id_other &= 0xffff;
+ dev_id_other &= 0xffff;
+ }
+ if ((mfr_id_other == mfr_id) &&
+ (dev_id_other == dev_id)) {
/* Exit autoselect mode. */
send_cmd(map, base,
{ offset: 0x008000, erasesize: 0x08000, numblocks: 1 },
{ offset: 0x010000, erasesize: 0x10000, numblocks: 31 }
}
+ }, {
+ mfr_id: MANUFACTURER_AMD,
+ dev_id: AM29BDS323D,
+ name: "AMD AM29BDS323D",
+ size: 0x00400000,
+ numeraseregions: 3,
+ regions: {
+ { offset: 0x000000, erasesize: 0x10000, numblocks: 48 },
+ { offset: 0x300000, erasesize: 0x10000, numblocks: 15 },
+ { offset: 0x3f0000, erasesize: 0x02000, numblocks: 8 },
+ }
+ }, {
+ mfr_id: MANUFACTURER_AMD,
+ dev_id: AM29BDS643D,
+ name: "AMD AM29BDS643D",
+ size: 0x00800000,
+ numeraseregions: 3,
+ regions: {
+ { offset: 0x000000, erasesize: 0x10000, numblocks: 96 },
+ { offset: 0x600000, erasesize: 0x10000, numblocks: 31 },
+ { offset: 0x7f0000, erasesize: 0x02000, numblocks: 8 },
+ }
}
};
mtd->sync = amd_flash_sync;
mtd->suspend = amd_flash_suspend;
mtd->resume = amd_flash_resume;
+ mtd->lock = amd_flash_lock;
+ mtd->unlock = amd_flash_unlock;
private = kmalloc(sizeof(*private) + (sizeof(struct flchip) *
temp.numchips), GFP_KERNEL);
wide_write(map, datum, adr);
times_left = 500000;
- while (times_left-- && flash_is_busy(map, chip->start,
- private->interleave)) {
+ while (times_left-- && flash_is_busy(map, adr, private->interleave)) {
if (current->need_resched) {
spin_unlock_bh(chip->mutex);
schedule();
schedule_timeout(HZ);
spin_lock_bh(chip->mutex);
- while (flash_is_busy(map, chip->start, private->interleave)) {
+ while (flash_is_busy(map, adr, private->interleave)) {
if (chip->state != FL_ERASING) {
/* Someone's suspended the erase. Sleep */
module_init(amd_flash_init);
module_exit(amd_flash_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jonas Holmberg <jonas.holmberg@axis.com>");
+MODULE_DESCRIPTION("Old MTD chip driver for AMD flash chips");
*
* (C) 2000 Red Hat. GPL'd
*
- * $Id: cfi_cmdset_0001.c,v 1.80 2001/06/03 01:32:57 nico Exp $
+ * $Id: cfi_cmdset_0001.c,v 1.87 2001/10/02 15:05:11 dwmw2 Exp $
*
*
* 10/10/2000 Nicolas Pitre <nico@cam.org>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/delay.h>
+#include <linux/interrupt.h>
#include <linux/mtd/map.h>
#include <linux/mtd/cfi.h>
#include <linux/mtd/compatmac.h>
static void cfi_intelext_destroy(struct mtd_info *);
-void cfi_cmdset_0001(struct map_info *, int, unsigned long);
+struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
static struct mtd_info *cfi_intelext_setup (struct map_info *);
static struct mtd_chip_driver cfi_intelext_chipdrv = {
- probe: cfi_intelext_setup,
+ probe: NULL, /* Not usable directly */
destroy: cfi_intelext_destroy,
- name: "cfi_intel",
+ name: "cfi_cmdset_0001",
module: THIS_MODULE
};
/* #define DEBUG_LOCK_BITS */
+/* #define DEBUG_CFI_FEATURES */
-/* This routine is made available to other mtd code via
- * inter_module_register. It must only be accessed through
- * inter_module_get which will bump the use count of this module. The
- * addresses passed back in cfi are valid as long as the use count of
- * this module is non-zero, i.e. between inter_module_get and
- * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
- */
-void cfi_cmdset_0001(struct map_info *map, int primary, unsigned long base)
+#ifdef DEBUG_CFI_FEATURES
+static void cfi_tell_features(struct cfi_pri_intelext *extp)
{
- struct cfi_private *cfi = map->fldrv_priv;
- int i;
- struct cfi_pri_intelext *extp;
- int ofs_factor = cfi->interleave * cfi->device_type;
-
- __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
-
- //printk(" Intel/Sharp Extended Query Table at 0x%4.4X\n", adr);
-
- if (!adr)
- return;
-
- /* Switch it into Query Mode */
- switch(CFIDEV_BUSWIDTH) {
- case 1:
- map->write8(map, 0x98, 0x55);
- break;
- case 2:
- map->write16(map, 0x9898, 0xaa);
- break;
- case 4:
- map->write32(map, 0x98989898, 0x154);
- break;
- }
-
- extp = kmalloc(sizeof(*extp), GFP_KERNEL);
- if (!extp) {
- printk("Failed to allocate memory\n");
- return;
- }
-
- /* Read in the Extended Query Table */
- for (i=0; i<sizeof(*extp); i++) {
- ((unsigned char *)extp)[i] =
- cfi_read_query(map, (base+((adr+i)*cfi->interleave*cfi->device_type)));
- }
-
- if (extp->MajorVersion != '1' ||
- (extp->MinorVersion < '0' || extp->MinorVersion > '2')) {
- printk(" Unknown IntelExt Extended Query version %c.%c.\n",
- extp->MajorVersion, extp->MinorVersion);
- kfree(extp);
- return;
- }
-
- /* Do some byteswapping if necessary */
- extp->FeatureSupport = cfi32_to_cpu(extp->FeatureSupport);
- extp->BlkStatusRegMask = cfi32_to_cpu(extp->BlkStatusRegMask);
-
-
- /* Tell the user about it in lots of lovely detail */
-#if 0
printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
if (extp->VppOptimal)
printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
-#endif
- /* OK. We like it. Take over the control of it. */
+}
+#endif
- /* Switch it into Read Mode */
- switch(CFIDEV_BUSWIDTH) {
- case 1:
- map->write8(map, 0xff, 0x55);
- break;
- case 2:
- map->write16(map, 0xffff, 0xaa);
- break;
- case 4:
- map->write32(map, 0xffffffff, 0x154);
- break;
- }
+/* This routine is made available to other mtd code via
+ * inter_module_register. It must only be accessed through
+ * inter_module_get which will bump the use count of this module. The
+ * addresses passed back in cfi are valid as long as the use count of
+ * this module is non-zero, i.e. between inter_module_get and
+ * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
+ */
+struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ int i;
+ __u32 base = cfi->chips[0].start;
+
+ if (cfi->cfi_mode) {
+ /*
+ * It's a real CFI chip, not one for which the probe
+ * routine faked a CFI structure. So we read the feature
+ * table from it.
+ */
+ __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
+ struct cfi_pri_intelext *extp;
+ int ofs_factor = cfi->interleave * cfi->device_type;
+
+ //printk(" Intel/Sharp Extended Query Table at 0x%4.4X\n", adr);
+ if (!adr)
+ return NULL;
+ /* Switch it into Query Mode */
+ cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
- /* If there was an old setup function, decrease its use count */
- if (map->fldrv)
- if(map->fldrv->module)
- __MOD_DEC_USE_COUNT(map->fldrv->module);
-
- if (cfi->cmdset_priv)
- kfree(cfi->cmdset_priv);
+ extp = kmalloc(sizeof(*extp), GFP_KERNEL);
+ if (!extp) {
+ printk(KERN_ERR "Failed to allocate memory\n");
+ return NULL;
+ }
+
+ /* Read in the Extended Query Table */
+ for (i=0; i<sizeof(*extp); i++) {
+ ((unsigned char *)extp)[i] =
+ cfi_read_query(map, (base+((adr+i)*ofs_factor)));
+ }
+
+ if (extp->MajorVersion != '1' ||
+ (extp->MinorVersion < '0' || extp->MinorVersion > '2')) {
+ printk(KERN_WARNING " Unknown IntelExt Extended Query "
+ "version %c.%c.\n", extp->MajorVersion,
+ extp->MinorVersion);
+ kfree(extp);
+ return NULL;
+ }
+
+ /* Do some byteswapping if necessary */
+ extp->FeatureSupport = cfi32_to_cpu(extp->FeatureSupport);
+ extp->BlkStatusRegMask = cfi32_to_cpu(extp->BlkStatusRegMask);
+
+#ifdef DEBUG_CFI_FEATURES
+ /* Tell the user about it in lots of lovely detail */
+ cfi_tell_features(extp);
+#endif
+
+ /* Install our own private info structure */
+ cfi->cmdset_priv = extp;
+ }
for (i=0; i< cfi->numchips; i++) {
cfi->chips[i].word_write_time = 128;
cfi->chips[i].buffer_write_time = 128;
cfi->chips[i].erase_time = 1024;
}
-
map->fldrv = &cfi_intelext_chipdrv;
MOD_INC_USE_COUNT;
-
- cfi->cmdset_priv = extp;
-
-#if 1 /* Does this work? */
- cfi_send_gen_cmd(0x90, 0x55, base, map, cfi, cfi->device_type, NULL);
-
- cfi->mfr = cfi_read_query(map, base);
- cfi->id = cfi_read_query(map, base + ofs_factor);
-
- printk("JEDEC ID: %2.2X %2.2X\n", cfi->mfr, cfi->id);
-#endif
-
- cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
- return;
+
+ /* Make sure it's in read mode */
+ cfi_send_gen_cmd(0xff, 0x55, base, map, cfi, cfi->device_type, NULL);
+ return cfi_intelext_setup(map);
}
static struct mtd_info *cfi_intelext_setup(struct map_info *map)
unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
- //printk("number of CFI chips: %d\n", cfi->numchips);
+ //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
if (!mtd) {
- printk("Failed to allocate memory for MTD device\n");
- kfree(cfi->cmdset_priv);
- return NULL;
+ printk(KERN_ERR "Failed to allocate memory for MTD device\n");
+ kfree(cfi->cmdset_priv);
+ return NULL;
}
memset(mtd, 0, sizeof(*mtd));
mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
* mtd->numeraseregions, GFP_KERNEL);
if (!mtd->eraseregions) {
- printk("Failed to allocate memory for MTD erase region info\n");
- kfree(cfi->cmdset_priv);
- return NULL;
+ printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
+ kfree(cfi->cmdset_priv);
+ return NULL;
}
for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
if (offset != devsize) {
/* Argh */
- printk("Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
+ printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
kfree(mtd->eraseregions);
kfree(cfi->cmdset_priv);
return NULL;
}
for (i=0; i<mtd->numeraseregions;i++){
- printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
+ printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
i,mtd->eraseregions[i].offset,
mtd->eraseregions[i].erasesize,
mtd->eraseregions[i].numblocks);
mtd->erase = cfi_intelext_erase_varsize;
mtd->read = cfi_intelext_read;
if ( cfi->cfiq->BufWriteTimeoutTyp ) {
- //printk( KERN_INFO"Using buffer write method\n" );
+ //printk(KERN_INFO "Using buffer write method\n" );
mtd->write = cfi_intelext_write_buffers;
} else {
- //printk( KERN_INFO"Using word write method\n" );
+ //printk(KERN_INFO "Using word write method\n" );
mtd->write = cfi_intelext_write_words;
}
mtd->sync = cfi_intelext_sync;
*/
switch (chip->state) {
case FL_ERASING:
+ if (!((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2)
+ goto sleep; /* We don't support erase suspend */
+
cfi_write (map, CMD(0xb0), cmd_addr);
+ /* If the flash has finished erasing, then 'erase suspend'
+ * appears to make some (28F320) flash devices switch to
+ * 'read' mode. Make sure that we switch to 'read status'
+ * mode so we get the right data. --rmk
+ */
+ cfi_write(map, CMD(0x70), cmd_addr);
chip->oldstate = FL_ERASING;
chip->state = FL_ERASE_SUSPENDING;
-// printk("Erase suspending at 0x%lx\n", cmd_addr);
+ // printk("Erase suspending at 0x%lx\n", cmd_addr);
for (;;) {
status = cfi_read(map, cmd_addr);
if ((status & status_OK) == status_OK)
if (time_after(jiffies, timeo)) {
/* Urgh */
cfi_write(map, CMD(0xd0), cmd_addr);
+ /* make sure we're in 'read status' mode */
+ cfi_write(map, CMD(0x70), cmd_addr);
chip->state = FL_ERASING;
spin_unlock_bh(chip->mutex);
- printk("Chip not ready after erase suspended\n");
+ printk(KERN_ERR "Chip not ready after erase "
+ "suspended: status = 0x%x\n", status);
return -EIO;
}
-
+
spin_unlock_bh(chip->mutex);
cfi_udelay(1);
spin_lock_bh(chip->mutex);
}
-
+
suspended = 1;
cfi_write(map, CMD(0xff), cmd_addr);
chip->state = FL_READY;
break;
-
+
#if 0
case FL_WRITING:
/* Not quite yet */
/* Urgh. Chip not yet ready to talk to us. */
if (time_after(jiffies, timeo)) {
spin_unlock_bh(chip->mutex);
- printk("waiting for chip to be ready timed out in read. WSM status = %x\n", status);
+ printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %x\n", status);
return -EIO;
}
goto retry;
default:
+ sleep:
/* Stick ourselves on a wait queue to be woken when
someone changes the status */
set_current_state(TASK_UNINTERRUPTIBLE);
/* Urgh. Chip not yet ready to talk to us. */
if (time_after(jiffies, timeo)) {
spin_unlock_bh(chip->mutex);
- printk("waiting for chip to be ready timed out in read\n");
+ printk(KERN_ERR "waiting for chip to be ready timed out in read\n");
return -EIO;
}
chip->state = FL_STATUS;
DISABLE_VPP(map);
spin_unlock_bh(chip->mutex);
- printk("waiting for chip to be ready timed out in word write\n");
+ printk(KERN_ERR "waiting for chip to be ready timed out in word write\n");
return -EIO;
}
/* Urgh. Chip not yet ready to talk to us. */
if (time_after(jiffies, timeo)) {
spin_unlock_bh(chip->mutex);
- printk("waiting for chip to be ready timed out in buffer write\n");
+ printk(KERN_ERR "waiting for chip to be ready timed out in buffer write\n");
return -EIO;
}
chip->state = FL_STATUS;
DISABLE_VPP(map);
spin_unlock_bh(chip->mutex);
- printk("Chip not ready for buffer write. Xstatus = %x, status = %x\n", status, cfi_read(map, cmd_adr));
+ printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %x, status = %x\n", status, cfi_read(map, cmd_adr));
return -EIO;
}
}
chip->state = FL_STATUS;
DISABLE_VPP(map);
spin_unlock_bh(chip->mutex);
- printk("waiting for chip to be ready timed out in bufwrite\n");
+ printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
return -EIO;
}
/* Urgh. Chip not yet ready to talk to us. */
if (time_after(jiffies, timeo)) {
spin_unlock_bh(chip->mutex);
- printk("waiting for chip to be ready timed out in erase\n");
+ printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
return -EIO;
}
spin_unlock_bh(chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- timeo = jiffies + (HZ*2); /* FIXME */
+ timeo = jiffies + (HZ*20); /* FIXME */
spin_lock_bh(chip->mutex);
continue;
}
if (time_after(jiffies, timeo)) {
cfi_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
- printk("waiting for erase to complete timed out. Xstatus = %x, status = %x.\n", status, cfi_read(map, adr));
+ printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %x, status = %x.\n", status, cfi_read(map, adr));
DISABLE_VPP(map);
spin_unlock_bh(chip->mutex);
return -EIO;
/* Urgh. Chip not yet ready to talk to us. */
if (time_after(jiffies, timeo)) {
spin_unlock_bh(chip->mutex);
- printk("waiting for chip to be ready timed out in lock\n");
+ printk(KERN_ERR "waiting for chip to be ready timed out in lock\n");
return -EIO;
}
if (time_after(jiffies, timeo)) {
cfi_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
- printk("waiting for lock to complete timed out. Xstatus = %x, status = %x.\n", status, cfi_read(map, adr));
+ printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %x, status = %x.\n", status, cfi_read(map, adr));
DISABLE_VPP(map);
spin_unlock_bh(chip->mutex);
return -EIO;
/* Urgh. Chip not yet ready to talk to us. */
if (time_after(jiffies, timeo)) {
spin_unlock_bh(chip->mutex);
- printk("waiting for chip to be ready timed out in unlock\n");
+ printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n");
return -EIO;
}
if (time_after(jiffies, timeo)) {
cfi_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
- printk("waiting for unlock to complete timed out. Xstatus = %x, status = %x.\n", status, cfi_read(map, adr));
+ printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %x, status = %x.\n", status, cfi_read(map, adr));
DISABLE_VPP(map);
spin_unlock_bh(chip->mutex);
return -EIO;
spin_lock_bh(chip->mutex);
if (chip->state == FL_PM_SUSPENDED) {
+ /* No need to force it into a known state here,
+ because we're returning failure, and it didn't
+ get power cycled */
chip->state = chip->oldstate;
wake_up(&chip->wq);
}
spin_lock_bh(chip->mutex);
+ /* Go to known state. Chip may have been power cycled */
if (chip->state == FL_PM_SUSPENDED) {
- /* We need to force it back to a known state. */
- cfi_write(map, CMD(0xff), 0);
+ cfi_write(map, CMD(0xFF), 0);
chip->state = FL_READY;
wake_up(&chip->wq);
}
kfree(cfi);
}
-#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
-#define cfi_intelext_init init_module
-#define cfi_intelext_exit cleanup_module
-#endif
-
static char im_name_1[]="cfi_cmdset_0001";
static char im_name_3[]="cfi_cmdset_0003";
-
-mod_init_t cfi_intelext_init(void)
+int __init cfi_intelext_init(void)
{
inter_module_register(im_name_1, THIS_MODULE, &cfi_cmdset_0001);
inter_module_register(im_name_3, THIS_MODULE, &cfi_cmdset_0001);
return 0;
}
-mod_exit_t cfi_intelext_exit(void)
+static void __exit cfi_intelext_exit(void)
{
inter_module_unregister(im_name_1);
inter_module_unregister(im_name_3);
module_init(cfi_intelext_init);
module_exit(cfi_intelext_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
+MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
*
* This code is GPL
*
- * $Id: cfi_cmdset_0002.c,v 1.48 2001/06/03 01:32:57 nico Exp $
+ * $Id: cfi_cmdset_0002.c,v 1.51 2001/10/02 15:05:12 dwmw2 Exp $
*
*/
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/delay.h>
+#include <linux/interrupt.h>
#include <linux/mtd/map.h>
#include <linux/mtd/cfi.h>
static void cfi_amdstd_destroy(struct mtd_info *);
-void cfi_cmdset_0002(struct map_info *, int, unsigned long);
+struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
static struct mtd_info *cfi_amdstd_setup (struct map_info *);
static struct mtd_chip_driver cfi_amdstd_chipdrv = {
- probe: cfi_amdstd_setup,
+ probe: NULL, /* Not usable directly */
destroy: cfi_amdstd_destroy,
name: "cfi_cmdset_0002",
module: THIS_MODULE
};
-void cfi_cmdset_0002(struct map_info *map, int primary, unsigned long base)
+struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
{
struct cfi_private *cfi = map->fldrv_priv;
unsigned char bootloc;
int ofs_factor = cfi->interleave * cfi->device_type;
int i;
__u8 major, minor;
-// struct cfi_pri_intelext *extp;
+ __u32 base = cfi->chips[0].start;
- if (cfi->cfi_mode==0){
- __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
+ if (cfi->cfi_mode==1){
+ __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
- cfi_send_gen_cmd(0x98, 0x55, 0, map, cfi, cfi->device_type, NULL);
-
- major = cfi_read_query(map, (adr+3)*ofs_factor);
- minor = cfi_read_query(map, (adr+4)*ofs_factor);
-
- printk(" Amd/Fujitsu Extended Query Table v%c.%c at 0x%4.4X\n",
- major, minor, adr);
-
- cfi_send_gen_cmd(0xf0, 0x55, 0, map, cfi, cfi->device_type, NULL);
-
- cfi_send_gen_cmd(0xaa, 0x555, base, map, cfi, cfi->device_type, NULL);
- cfi_send_gen_cmd(0x55, 0x2aa, base, map, cfi, cfi->device_type, NULL);
- cfi_send_gen_cmd(0x90, 0x555, base, map, cfi, cfi->device_type, NULL);
- cfi->mfr = cfi_read_query(map, base);
- cfi->id = cfi_read_query(map, base + ofs_factor);
+ cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
+
+ major = cfi_read_query(map, base + (adr+3)*ofs_factor);
+ minor = cfi_read_query(map, base + (adr+4)*ofs_factor);
+
+ printk(" Amd/Fujitsu Extended Query Table v%c.%c at 0x%4.4X\n",
+ major, minor, adr);
+ cfi_send_gen_cmd(0xf0, 0x55, base, map, cfi, cfi->device_type, NULL);
+
+ cfi_send_gen_cmd(0xaa, 0x555, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, 0x2aa, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x90, 0x555, base, map, cfi, cfi->device_type, NULL);
+ cfi->mfr = cfi_read_query(map, base);
+ cfi->id = cfi_read_query(map, base + ofs_factor);
- /* Wheee. Bring me the head of someone at AMD. */
+ /* Wheee. Bring me the head of someone at AMD. */
#ifdef AMD_BOOTLOC_BUG
- if (((major << 8) | minor) < 0x3131) {
- /* CFI version 1.0 => don't trust bootloc */
- if (cfi->id & 0x80) {
- printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
- bootloc = 3; /* top boot */
- } else {
- bootloc = 2; /* bottom boot */
- }
- } else
+ if (((major << 8) | minor) < 0x3131) {
+ /* CFI version 1.0 => don't trust bootloc */
+ if (cfi->id & 0x80) {
+ printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
+ bootloc = 3; /* top boot */
+ } else {
+ bootloc = 2; /* bottom boot */
+ }
+ } else
#endif
- {
- cfi_send_gen_cmd(0x98, 0x55, 0, map, cfi, cfi->device_type, NULL);
- bootloc = cfi_read_query(map, (adr+15)*ofs_factor);
- }
- if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
- printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
-
- for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
- int j = (cfi->cfiq->NumEraseRegions-1)-i;
- __u32 swap;
-
- swap = cfi->cfiq->EraseRegionInfo[i];
- cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
- cfi->cfiq->EraseRegionInfo[j] = swap;
+ {
+ cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
+ bootloc = cfi_read_query(map, base + (adr+15)*ofs_factor);
+ }
+ if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
+ printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
+
+ for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
+ int j = (cfi->cfiq->NumEraseRegions-1)-i;
+ __u32 swap;
+
+ swap = cfi->cfiq->EraseRegionInfo[i];
+ cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
+ cfi->cfiq->EraseRegionInfo[j] = swap;
+ }
}
- }
- }
-
- /* If there was an old setup function, decrease its use count */
- if (map->fldrv)
- if(map->fldrv->module)
- __MOD_DEC_USE_COUNT(map->fldrv->module);
-
- if (cfi->cmdset_priv)
- kfree(cfi->cmdset_priv);
+ switch (cfi->device_type) {
+ case CFI_DEVICETYPE_X8:
+ cfi->addr_unlock1 = 0x555;
+ cfi->addr_unlock2 = 0x2aa;
+ break;
+ case CFI_DEVICETYPE_X16:
+ cfi->addr_unlock1 = 0xaaa;
+ if (map->buswidth == cfi->interleave) {
+ /* X16 chip(s) in X8 mode */
+ cfi->addr_unlock2 = 0x555;
+ } else {
+ cfi->addr_unlock2 = 0x554;
+ }
+ break;
+ case CFI_DEVICETYPE_X32:
+ cfi->addr_unlock1 = 0x1555;
+ cfi->addr_unlock2 = 0xaaa;
+ break;
+ default:
+ printk(KERN_NOTICE "Eep. Unknown cfi_cmdset_0002 device type %d\n", cfi->device_type);
+ return NULL;
+ }
+ } /* CFI mode */
- for (i=0; i< cfi->numchips; i++) {
+ for (i=0; i< cfi->numchips; i++) {
cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
- }
+ }
+
+ map->fldrv = &cfi_amdstd_chipdrv;
+ MOD_INC_USE_COUNT;
- map->fldrv = &cfi_amdstd_chipdrv;
- MOD_INC_USE_COUNT;
- cfi_send_gen_cmd(0xf0, 0x55, 0, map, cfi, cfi->device_type, NULL);
- return;
+ cfi_send_gen_cmd(0xf0, 0x55, base, map, cfi, cfi->device_type, NULL);
+ return cfi_amdstd_setup(map);
}
static struct mtd_info *cfi_amdstd_setup(struct map_info *map)
kfree(cfi);
}
-#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
-#define cfi_amdstd_init init_module
-#define cfi_amdstd_exit cleanup_module
-#endif
-
static char im_name[]="cfi_cmdset_0002";
-mod_init_t cfi_amdstd_init(void)
+int __init cfi_amdstd_init(void)
{
inter_module_register(im_name, THIS_MODULE, &cfi_cmdset_0002);
return 0;
}
-mod_exit_t cfi_amdstd_exit(void)
+static void __exit cfi_amdstd_exit(void)
{
inter_module_unregister(im_name);
}
module_init(cfi_amdstd_init);
module_exit(cfi_amdstd_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
+MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
+++ /dev/null
-/* $Id: cfi_jedec.c,v 1.5 2001/06/02 14:52:23 dwmw2 Exp $ */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <asm/io.h>
-#include <asm/byteorder.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-
-#include <linux/mtd/map.h>
-#include <linux/mtd/cfi.h>
-
-/* Manufacturers */
-#define MANUFACTURER_AMD 0x0001
-#define MANUFACTURER_FUJITSU 0x0004
-#define MANUFACTURER_ATMEL 0x001f
-#define MANUFACTURER_ST 0x0020
-#define MANUFACTURER_SST 0x00BF
-#define MANUFACTURER_TOSHIBA 0x0098
-
-/* AMD */
-#define AM29F800BB 0x2258
-#define AM29F800BT 0x22D6
-#define AM29LV800BB 0x225B
-#define AM29LV800BT 0x22DA
-#define AM29LV160DT 0x22C4
-#define AM29LV160DB 0x2249
-
-/* Atmel */
-#define AT49BV16X4 0x00c0
-#define AT49BV16X4T 0x00c2
-
-/* Fujitsu */
-#define MBM29LV160TE 0x22C4
-#define MBM29LV160BE 0x2249
-
-/* ST - www.st.com */
-#define M29W800T 0x00D7
-#define M29W160DT 0x22C4
-#define M29W160DB 0x2249
-
-/* SST */
-#define SST39LF800 0x2781
-#define SST39LF160 0x2782
-
-/* Toshiba */
-#define TC58FVT160 0x00C2
-#define TC58FVB160 0x0043
-
-
-struct amd_flash_info {
- const __u16 mfr_id;
- const __u16 dev_id;
- const char *name;
- const int DevSize;
- const int InterfaceDesc;
- const int NumEraseRegions;
- const ulong regions[4];
-};
-
-#define ERASEINFO(size,blocks) (size<<8)|(blocks-1)
-
-#define SIZE_1MiB 20
-#define SIZE_2MiB 21
-#define SIZE_4MiB 22
-
-static const struct amd_flash_info jedec_table[] = {
- {
- mfr_id: MANUFACTURER_AMD,
- dev_id: AM29LV160DT,
- name: "AMD AM29LV160DT",
- DevSize: SIZE_2MiB,
- NumEraseRegions: 4,
- regions: {ERASEINFO(0x10000,31),
- ERASEINFO(0x08000,1),
- ERASEINFO(0x02000,2),
- ERASEINFO(0x04000,1)
- }
- }, {
- mfr_id: MANUFACTURER_AMD,
- dev_id: AM29LV160DB,
- name: "AMD AM29LV160DB",
- DevSize: SIZE_2MiB,
- NumEraseRegions: 4,
- regions: {ERASEINFO(0x04000,1),
- ERASEINFO(0x02000,2),
- ERASEINFO(0x08000,1),
- ERASEINFO(0x10000,31)
- }
- }, {
- mfr_id: MANUFACTURER_TOSHIBA,
- dev_id: TC58FVT160,
- name: "Toshiba TC58FVT160",
- DevSize: SIZE_2MiB,
- NumEraseRegions: 4,
- regions: {ERASEINFO(0x10000,31),
- ERASEINFO(0x08000,1),
- ERASEINFO(0x02000,2),
- ERASEINFO(0x04000,1)
- }
- }, {
- mfr_id: MANUFACTURER_FUJITSU,
- dev_id: MBM29LV160TE,
- name: "Fujitsu MBM29LV160TE",
- DevSize: SIZE_2MiB,
- NumEraseRegions: 4,
- regions: {ERASEINFO(0x10000,31),
- ERASEINFO(0x08000,1),
- ERASEINFO(0x02000,2),
- ERASEINFO(0x04000,1)
- }
- }, {
- mfr_id: MANUFACTURER_TOSHIBA,
- dev_id: TC58FVB160,
- name: "Toshiba TC58FVB160",
- DevSize: SIZE_2MiB,
- NumEraseRegions: 4,
- regions: {ERASEINFO(0x04000,1),
- ERASEINFO(0x02000,2),
- ERASEINFO(0x08000,1),
- ERASEINFO(0x10000,31)
- }
- }, {
- mfr_id: MANUFACTURER_FUJITSU,
- dev_id: MBM29LV160BE,
- name: "Fujitsu MBM29LV160BE",
- DevSize: SIZE_2MiB,
- NumEraseRegions: 4,
- regions: {ERASEINFO(0x04000,1),
- ERASEINFO(0x02000,2),
- ERASEINFO(0x08000,1),
- ERASEINFO(0x10000,31)
- }
- }, {
- mfr_id: MANUFACTURER_AMD,
- dev_id: AM29LV800BB,
- name: "AMD AM29LV800BB",
- DevSize: SIZE_1MiB,
- NumEraseRegions: 4,
- regions: {ERASEINFO(0x04000,1),
- ERASEINFO(0x02000,2),
- ERASEINFO(0x08000,1),
- ERASEINFO(0x10000,15),
- }
- }, {
- mfr_id: MANUFACTURER_AMD,
- dev_id: AM29F800BB,
- name: "AMD AM29F800BB",
- DevSize: SIZE_1MiB,
- NumEraseRegions: 4,
- regions: {ERASEINFO(0x04000,1),
- ERASEINFO(0x02000,2),
- ERASEINFO(0x08000,1),
- ERASEINFO(0x10000,15),
- }
- }, {
- mfr_id: MANUFACTURER_AMD,
- dev_id: AM29LV800BT,
- name: "AMD AM29LV800BT",
- DevSize: SIZE_1MiB,
- NumEraseRegions: 4,
- regions: {ERASEINFO(0x10000,15),
- ERASEINFO(0x08000,1),
- ERASEINFO(0x02000,2),
- ERASEINFO(0x04000,1)
- }
- }, {
- mfr_id: MANUFACTURER_AMD,
- dev_id: AM29F800BT,
- name: "AMD AM29F800BT",
- DevSize: SIZE_1MiB,
- NumEraseRegions: 4,
- regions: {ERASEINFO(0x10000,15),
- ERASEINFO(0x08000,1),
- ERASEINFO(0x02000,2),
- ERASEINFO(0x04000,1)
- }
- }, {
- mfr_id: MANUFACTURER_AMD,
- dev_id: AM29LV800BB,
- name: "AMD AM29LV800BB",
- DevSize: SIZE_1MiB,
- NumEraseRegions: 4,
- regions: {ERASEINFO(0x10000,15),
- ERASEINFO(0x08000,1),
- ERASEINFO(0x02000,2),
- ERASEINFO(0x04000,1)
- }
- }, {
- mfr_id: MANUFACTURER_ST,
- dev_id: M29W800T,
- name: "ST M29W800T",
- DevSize: SIZE_1MiB,
- NumEraseRegions: 4,
- regions: {ERASEINFO(0x10000,15),
- ERASEINFO(0x08000,1),
- ERASEINFO(0x02000,2),
- ERASEINFO(0x04000,1)
- }
- }, {
- mfr_id: MANUFACTURER_ST,
- dev_id: M29W160DT,
- name: "ST M29W160DT",
- DevSize: SIZE_2MiB,
- NumEraseRegions: 4,
- regions: {ERASEINFO(0x10000,31),
- ERASEINFO(0x08000,1),
- ERASEINFO(0x02000,2),
- ERASEINFO(0x04000,1)
- }
- }, {
- mfr_id: MANUFACTURER_ST,
- dev_id: M29W160DB,
- name: "ST M29W160DB",
- DevSize: SIZE_2MiB,
- NumEraseRegions: 4,
- regions: {ERASEINFO(0x04000,1),
- ERASEINFO(0x02000,2),
- ERASEINFO(0x08000,1),
- ERASEINFO(0x10000,31)
- }
- }, {
- mfr_id: MANUFACTURER_ATMEL,
- dev_id: AT49BV16X4,
- name: "Atmel AT49BV16X4",
- DevSize: SIZE_2MiB,
- NumEraseRegions: 3,
- regions: {ERASEINFO(0x02000,8),
- ERASEINFO(0x08000,2),
- ERASEINFO(0x10000,30)
- }
- }, {
- mfr_id: MANUFACTURER_ATMEL,
- dev_id: AT49BV16X4T,
- name: "Atmel AT49BV16X4T",
- DevSize: SIZE_2MiB,
- NumEraseRegions: 3,
- regions: {ERASEINFO(0x10000,30),
- ERASEINFO(0x08000,2),
- ERASEINFO(0x02000,8)
- }
- }, {
- 0
- }
-};
-
-int cfi_jedec_lookup(int index, int mfr_id, int dev_id)
-{
- if (index>=0){
- if (jedec_table[index].mfr_id == mfr_id &&
- jedec_table[index].dev_id == dev_id) return index;
- }
- else{
- for (index=0; jedec_table[index].mfr_id; index++){
- if (jedec_table[index].mfr_id == mfr_id &&
- jedec_table[index].dev_id == dev_id) return index;
- }
- }
- return -1;
-}
-
-int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
-{
-int i,num_erase_regions;
-
- printk("Found: %s\n",jedec_table[index].name);
-
- num_erase_regions = jedec_table[index].NumEraseRegions;
-
- p_cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL);
- if (!p_cfi->cfiq) {
- //xx printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name);
- return -1;
- }
-
- memset(p_cfi->cfiq,0,sizeof(struct cfi_ident));
-
- p_cfi->cfiq->P_ID = P_ID_AMD_STD;
- p_cfi->cfiq->NumEraseRegions = jedec_table[index].NumEraseRegions;
- p_cfi->cfiq->DevSize = jedec_table[index].DevSize;
-
- for (i=0; i<num_erase_regions; i++){
- p_cfi->cfiq->EraseRegionInfo[i] = jedec_table[index].regions[i];
- }
- return 0; /* ok */
-}
-
/*
Common Flash Interface probe code.
(C) 2000 Red Hat. GPL'd.
- $Id: cfi_probe.c,v 1.60 2001/06/03 01:32:57 nico Exp $
+ $Id: cfi_probe.c,v 1.66 2001/10/02 15:05:12 dwmw2 Exp $
*/
#include <linux/config.h>
#include <asm/byteorder.h>
#include <linux/errno.h>
#include <linux/slab.h>
+#include <linux/interrupt.h>
#include <linux/mtd/map.h>
#include <linux/mtd/cfi.h>
+#include <linux/mtd/gen_probe.h>
-/* #define DEBUG_CFI */
+//#define DEBUG_CFI
#ifdef DEBUG_CFI
static void print_cfi_ident(struct cfi_ident *);
int cfi_jedec_setup(struct cfi_private *p_cfi, int index);
int cfi_jedec_lookup(int index, int mfr_id, int dev_id);
-static void check_cmd_set(struct map_info *, int, unsigned long);
-static struct cfi_private *cfi_cfi_probe(struct map_info *);
-struct mtd_info *cfi_probe(struct map_info *map);
-
-
-static struct mtd_chip_driver cfi_chipdrv = {
- probe: cfi_probe,
- name: "cfi",
- module: THIS_MODULE
-};
-
-
-struct mtd_info *cfi_probe(struct map_info *map)
-{
- struct mtd_info *mtd = NULL;
- struct cfi_private *cfi;
-
- /* First probe the map to see if we have CFI stuff there. */
- cfi = cfi_cfi_probe(map);
-
- if (!cfi)
- return NULL;
-
- map->fldrv_priv = cfi;
- /* OK we liked it. Now find a driver for the command set it talks */
-
- check_cmd_set(map, 1, cfi->chips[0].start); /* First the primary cmdset */
- if (!map->fldrv)
- check_cmd_set(map, 0, cfi->chips[0].start); /* Then the secondary */
-
- /* check_cmd_set() will have used inter_module_get to increase
- the use count of the module which provides the command set
- driver. If we're quitting, we have to decrease it again.
- */
-
- if(map->fldrv) {
- mtd = map->fldrv->probe(map);
- /* Undo the use count we held onto from inter_module_get */
-#ifdef MODULE
- if(map->fldrv->module)
- __MOD_DEC_USE_COUNT(map->fldrv->module);
-#endif
- if (mtd)
- return mtd;
- }
- printk(KERN_WARNING"cfi_probe: No supported Vendor Command Set found\n");
-
- kfree(cfi->cfiq);
- kfree(cfi);
- map->fldrv_priv = NULL;
- return NULL;
-}
+static int cfi_probe_chip(struct map_info *map, __u32 base,
+ struct flchip *chips, struct cfi_private *cfi);
+static int cfi_chip_setup(struct map_info *map, struct cfi_private *cfi);
-static __u32 cfi_send_cmd(u_char cmd, __u32 base, struct map_info *map, struct cfi_private *cfi)
-{
- return cfi_send_gen_cmd(cmd, 0x55, base, map, cfi, cfi->device_type, NULL);
-}
+struct mtd_info *cfi_probe(struct map_info *map);
/* check for QRY, or search for jedec id.
in: interleave,type,mode
ret: table index, <0 for error
*/
-static int cfi_check_qry_or_id(struct map_info *map, __u32 base, int index,
+static inline int qry_present(struct map_info *map, __u32 base,
struct cfi_private *cfi)
{
- __u32 manufacturer_id, device_id;
int osf = cfi->interleave * cfi->device_type; // scale factor
- //printk("cfi_check_qry_or_id: base=0x%08lx interl=%d type=%d index=%d\n",base,cfi->interleave,cfi->device_type,index);
+ if (cfi_read(map,base+osf*0x10)==cfi_build_cmd('Q',map,cfi) &&
+ cfi_read(map,base+osf*0x11)==cfi_build_cmd('R',map,cfi) &&
+ cfi_read(map,base+osf*0x12)==cfi_build_cmd('Y',map,cfi))
+ return 1; // ok !
- switch(cfi->cfi_mode){
- case 0:
- if (cfi_read(map,base+osf*0x10)==cfi_build_cmd('Q',map,cfi) &&
- cfi_read(map,base+osf*0x11)==cfi_build_cmd('R',map,cfi) &&
- cfi_read(map,base+osf*0x12)==cfi_build_cmd('Y',map,cfi))
- return 0; // ok !
- break;
-
- case 1:
- manufacturer_id = cfi_read(map,base+0*osf);
- device_id = cfi_read(map,base+1*osf);
- //printk("cfi_check_qry_or_id: man=0x%lx,id=0x%lx\n",manufacturer_id, device_id);
-
- return cfi_jedec_lookup(index,manufacturer_id,device_id);
- }
-
- return -1; // nothing found
-}
-
-static void cfi_qry_mode(struct map_info *map, __u32 base, struct cfi_private *cfi)
-{
- switch(cfi->cfi_mode){
- case 0:
- /* Query */
- cfi_send_cmd(0x98, base, map, cfi);
- break;
-
- case 1:
-
- /* Autoselect */
- cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, CFI_DEVICETYPE_X8, NULL);
- cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, CFI_DEVICETYPE_X8, NULL);
- cfi_send_gen_cmd(0x90, cfi->addr_unlock1, base, map, cfi, CFI_DEVICETYPE_X8, NULL);
- break;
- }
+ return 0; // nothing found
}
-static int cfi_probe_chip_1(struct map_info *map, __u32 base,
+static int cfi_probe_chip(struct map_info *map, __u32 base,
struct flchip *chips, struct cfi_private *cfi)
{
- int index;
- __u32 tmp,ofs;
-
- ofs = cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, &tmp);
-
- cfi_qry_mode(map,base,cfi);
+ int i;
- index=cfi_check_qry_or_id(map,base,-1,cfi);
- if (index<0) return -1;
-
- if (chips){
- int i;
-
- for (i=0; i<cfi->numchips; i++){
- /* This chip should be in read mode if it's one
- we've already touched. */
- if (cfi_check_qry_or_id(map,chips[i].start,index,cfi) >= 0){
- cfi_send_gen_cmd(0xF0, 0, chips[i].start, map, cfi, cfi->device_type, NULL);
- if (cfi_check_qry_or_id(map,chips[i].start,index,cfi) >= 0){
- /* Yes it's got QRY for data. Most unfortunate.
- Stick the old one in read mode too. */
- cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
- if (cfi_check_qry_or_id(map,base,index,cfi) >= 0){
- /* OK, so has the new one. Assume it's an alias */
- printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n",
- map->name, base, chips[i].start);
- return -1;
- }
- } else {
- /*
- * FIXME: Is this supposed to work?
- * The third argument is already
- * multiplied as this within the
- * function definition. (Nicolas Pitre)
- */
- cfi_send_gen_cmd(0xF0, 0, base+0xaa*cfi->interleave * cfi->device_type, map, cfi, cfi->device_type, NULL);
- cfi_send_gen_cmd(0xF0, 0, chips[i].start+0xaa*cfi->interleave * cfi->device_type, map, cfi, cfi->device_type, NULL);
- return -1;
- }
- }
- } /* for i */
-
- /* OK, if we got to here, then none of the previous chips appear to
- be aliases for the current one. */
- if (cfi->numchips == MAX_CFI_CHIPS) {
- printk(KERN_WARNING"%s: Too many flash chips detected. Increase MAX_CFI_CHIPS from %d.\n", map->name, MAX_CFI_CHIPS);
- /* Doesn't matter about resetting it to Read Mode - we're not going to talk to it anyway */
- return -1;
- }
- chips[cfi->numchips].start = base;
- chips[cfi->numchips].state = FL_READY;
- chips[cfi->numchips].mutex = &chips[cfi->numchips]._spinlock;
- cfi->numchips++;
-
- /* Put it back into Read Mode */
- cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
+
+ if (!qry_present(map,base,cfi))
+ return 0;
+
+ if (!cfi->numchips) {
+ /* This is the first time we're called. Set up the CFI
+ stuff accordingly and return */
+ return cfi_chip_setup(map, cfi);
}
- printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit mode\n", map->name,
- cfi->interleave, cfi->device_type*8, base, map->buswidth*8);
-
- return index;
-}
-/* put dev into qry mode, and try cfi and jedec modes for the given type/interleave
- */
-static int cfi_probe_chip(struct map_info *map, __u32 base,
- struct flchip *chips, struct cfi_private *cfi)
-{
- int index;
- cfi->cfi_mode=0; /* cfi mode */
-
- switch (cfi->device_type) {
- case CFI_DEVICETYPE_X8:
- cfi->addr_unlock1 = 0x555;
- cfi->addr_unlock2 = 0x2aa;
- break;
- case CFI_DEVICETYPE_X16:
- cfi->addr_unlock1 = 0xaaa;
- if (map->buswidth == cfi->interleave) {
- /* X16 chip(s) in X8 mode */
- cfi->addr_unlock2 = 0x555;
- } else {
- cfi->addr_unlock2 = 0x554;
+ /* Check each previous chip to see if it's an alias */
+ for (i=0; i<cfi->numchips; i++) {
+ /* This chip should be in read mode if it's one
+ we've already touched. */
+ if (qry_present(map,chips[i].start,cfi)) {
+ /* Eep. This chip also had the QRY marker.
+ * Is it an alias for the new one? */
+ cfi_send_gen_cmd(0xF0, 0, chips[i].start, map, cfi, cfi->device_type, NULL);
+
+ /* If the QRY marker goes away, it's an alias */
+ if (!qry_present(map, chips[i].start, cfi)) {
+ printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n",
+ map->name, base, chips[i].start);
+ return 0;
+ }
+ /* Yes, it's actually got QRY for data. Most
+ * unfortunate. Stick the new chip in read mode
+ * too and if it's the same, assume it's an alias. */
+ /* FIXME: Use other modes to do a proper check */
+ cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
+
+ if (qry_present(map, base, cfi)) {
+ printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n",
+ map->name, base, chips[i].start);
+ return 0;
+ }
}
- break;
- case CFI_DEVICETYPE_X32:
- cfi->addr_unlock1 = 0x1555;
- cfi->addr_unlock2 = 0xaaa;
- break;
- default:
- return 0;
}
- index = cfi_probe_chip_1(map,base,chips,cfi);
- if (index>=0) return index;
-
- cfi->cfi_mode=1; /* jedec mode */
- index = cfi_probe_chip_1(map,base,chips,cfi);
- if (index>=0) return index;
- cfi->addr_unlock1 = 0x5555;
- cfi->addr_unlock2 = 0x2aaa;
- index = cfi_probe_chip_1(map,base,chips,cfi);
+ /* OK, if we got to here, then none of the previous chips appear to
+ be aliases for the current one. */
+ if (cfi->numchips == MAX_CFI_CHIPS) {
+ printk(KERN_WARNING"%s: Too many flash chips detected. Increase MAX_CFI_CHIPS from %d.\n", map->name, MAX_CFI_CHIPS);
+ /* Doesn't matter about resetting it to Read Mode - we're not going to talk to it anyway */
+ return -1;
+ }
+ chips[cfi->numchips].start = base;
+ chips[cfi->numchips].state = FL_READY;
+ cfi->numchips++;
- return index;
-}
-
-/*
- * Since probeing for CFI chips requires writing to the device problems may
- * occur if the flash is not present and RAM is accessed instead. For now we
- * assume that the flash is present so we don't check for RAM or replace
- * possibly overwritten data.
- */
-static int cfi_probe_new_chip(struct map_info *map, unsigned long base,
- struct flchip *chips, struct cfi_private *cfi)
-{
-int index;
- switch (map->buswidth) {
-#ifdef CFIDEV_BUSWIDTH_1
- case CFIDEV_BUSWIDTH_1:
- cfi->interleave = CFIDEV_INTERLEAVE_1;
- cfi->device_type = CFI_DEVICETYPE_X8;
- index = cfi_probe_chip(map,base,chips,cfi);
- if (index>=0) return index;
-
- cfi->device_type = CFI_DEVICETYPE_X16;
- index = cfi_probe_chip(map,base,chips,cfi);
- if (index>=0) return index;
- break;
-#endif
+ /* Put it back into Read Mode */
+ cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
-#ifdef CFIDEV_BUSWIDTH_2
- case CFIDEV_BUSWIDTH_2:
-#ifdef CFIDEV_INTERLEAVE_1
- cfi->interleave = CFIDEV_INTERLEAVE_1;
- cfi->device_type = CFI_DEVICETYPE_X16;
- index = cfi_probe_chip(map,base,chips,cfi);
- if (index>=0) return index;
-#endif
-#ifdef CFIDEV_INTERLEAVE_2
- cfi->interleave = CFIDEV_INTERLEAVE_2;
- cfi->device_type = CFI_DEVICETYPE_X8;
- index = cfi_probe_chip(map,base,chips,cfi);
- if (index>=0) return index;
-
- cfi->device_type = CFI_DEVICETYPE_X16;
- index = cfi_probe_chip(map,base,chips,cfi);
- if (index>=0) return index;
-
-#endif
- break;
-#endif
-
-#ifdef CFIDEV_BUSWIDTH_4
- case CFIDEV_BUSWIDTH_4:
-#ifdef CFIDEV_INTERLEAVE_4
- cfi->interleave = CFIDEV_INTERLEAVE_4;
- cfi->device_type = CFI_DEVICETYPE_X16;
- index = cfi_probe_chip(map,base,chips,cfi);
- if (index>=0) return index;
-
- cfi->device_type = CFI_DEVICETYPE_X32;
- index = cfi_probe_chip(map,base,chips,cfi);
- if (index>=0) return index;
-
- cfi->device_type = CFI_DEVICETYPE_X8;
- index = cfi_probe_chip(map,base,chips,cfi);
- if (index>=0) return index;
-#endif
-#ifdef CFIDEV_INTERLEAVE_2
- cfi->interleave = CFIDEV_INTERLEAVE_2;
- cfi->device_type = CFI_DEVICETYPE_X16;
- index = cfi_probe_chip(map,base,chips,cfi);
- if (index>=0) return index;
-#endif
-#ifdef CFIDEV_INTERLEAVE_1
- cfi->interleave = CFIDEV_INTERLEAVE_1;
- cfi->device_type = CFI_DEVICETYPE_X32;
- index = cfi_probe_chip(map,base,chips,cfi);
- if (index>=0) return index;
-#endif
- break;
-#endif
- default:
- printk(KERN_WARNING "cfi_probe called with unsupported buswidth %d\n", map->buswidth);
- return -1;
- } // switch
- return -1;
+ printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit mode\n",
+ map->name, cfi->interleave, cfi->device_type*8, base,
+ map->buswidth*8);
+
+ return 1;
}
-static struct cfi_private *cfi_cfi_probe(struct map_info *map)
+static int cfi_chip_setup(struct map_info *map,
+ struct cfi_private *cfi)
{
- unsigned long base=0;
- struct cfi_private cfi;
- struct cfi_private *retcfi;
- struct flchip chip[MAX_CFI_CHIPS];
- int i,index;
- char num_erase_regions;
- int ofs_factor;
-
- memset(&cfi, 0, sizeof(cfi));
-
- /* The first invocation (with chips == NULL) leaves the device in Query Mode */
- index = cfi_probe_new_chip(map, 0, NULL, &cfi);
-
- if (index<0) {
- printk(KERN_WARNING"%s: Found no CFI device at location zero\n", map->name);
- /* Doesn't appear to be CFI-compliant at all */
- return NULL;
- }
-
- /* Read the Basic Query Structure from the device */
-
- ofs_factor = cfi.interleave*cfi.device_type;
-
- /* First, work out the amount of space to allocate */
- if (cfi.cfi_mode==0){
- num_erase_regions = cfi_read_query(map, base + (0x10 + 28)*ofs_factor);
+ int ofs_factor = cfi->interleave*cfi->device_type;
+ __u32 base = 0;
+ int num_erase_regions = cfi_read_query(map, base + (0x10 + 28)*ofs_factor);
+ int i;
#ifdef DEBUG_CFI
- printk("Number of erase regions: %d\n", num_erase_regions);
+ printk("Number of erase regions: %d\n", num_erase_regions);
#endif
+ if (!num_erase_regions)
+ return 0;
- cfi.cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL);
- if (!cfi.cfiq) {
- printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name);
- return NULL;
- }
-
- memset(cfi.cfiq,0,sizeof(struct cfi_ident));
-
- cfi.fast_prog=1; /* CFI supports fast programming */
-
- /* CFI flash */
- for (i=0; i<(sizeof(struct cfi_ident) + num_erase_regions * 4); i++) {
- ((unsigned char *)cfi.cfiq)[i] = cfi_read_query(map,base + (0x10 + i)*ofs_factor);
- }
-
- /* Do any necessary byteswapping */
- cfi.cfiq->P_ID = le16_to_cpu(cfi.cfiq->P_ID);
-
- cfi.cfiq->P_ADR = le16_to_cpu(cfi.cfiq->P_ADR);
- cfi.cfiq->A_ID = le16_to_cpu(cfi.cfiq->A_ID);
- cfi.cfiq->A_ADR = le16_to_cpu(cfi.cfiq->A_ADR);
- cfi.cfiq->InterfaceDesc = le16_to_cpu(cfi.cfiq->InterfaceDesc);
- cfi.cfiq->MaxBufWriteSize = le16_to_cpu(cfi.cfiq->MaxBufWriteSize);
-
- for (i=0; i<cfi.cfiq->NumEraseRegions; i++) {
- cfi.cfiq->EraseRegionInfo[i] = le32_to_cpu(cfi.cfiq->EraseRegionInfo[i]);
-
-#ifdef DEBUG_CFI
- printk(" Erase Region #%d: BlockSize 0x%4.4X bytes, %d blocks\n",
- i, (cfi.cfiq->EraseRegionInfo[i] >> 8) & ~0xff,
- (cfi.cfiq->EraseRegionInfo[i] & 0xffff) + 1);
-#endif
- }
- }
- else{
- /* JEDEC flash */
- if (cfi_jedec_setup(&cfi,index)<0){
- printk(KERN_WARNING "cfi_jedec_setup failed\n");
- return NULL;
- }
+ cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL);
+ if (!cfi->cfiq) {
+ printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name);
+ return 0;
}
-
- if (cfi.cfiq->NumEraseRegions == 0) {
- printk(KERN_WARNING "Number of erase regions is zero\n");
- kfree(cfi.cfiq);
- return NULL;
+
+ memset(cfi->cfiq,0,sizeof(struct cfi_ident));
+
+ cfi->cfi_mode = 1;
+ cfi->fast_prog=1; /* CFI supports fast programming */
+
+ /* Read the CFI info structure */
+ for (i=0; i<(sizeof(struct cfi_ident) + num_erase_regions * 4); i++) {
+ ((unsigned char *)cfi->cfiq)[i] = cfi_read_query(map,base + (0x10 + i)*ofs_factor);
}
+
+ /* Do any necessary byteswapping */
+ cfi->cfiq->P_ID = le16_to_cpu(cfi->cfiq->P_ID);
+
+ cfi->cfiq->P_ADR = le16_to_cpu(cfi->cfiq->P_ADR);
+ cfi->cfiq->A_ID = le16_to_cpu(cfi->cfiq->A_ID);
+ cfi->cfiq->A_ADR = le16_to_cpu(cfi->cfiq->A_ADR);
+ cfi->cfiq->InterfaceDesc = le16_to_cpu(cfi->cfiq->InterfaceDesc);
+ cfi->cfiq->MaxBufWriteSize = le16_to_cpu(cfi->cfiq->MaxBufWriteSize);
#ifdef DEBUG_CFI
/* Dump the information therein */
- print_cfi_ident(cfi.cfiq);
+ print_cfi_ident(cfi->cfiq);
#endif
- cfi_send_cmd(0xFF, base, map, &cfi);
-
- /* OK. We've worked out what it is and we're happy with it. Now see if there are others */
-
- chip[0].start = 0;
- chip[0].state = FL_READY;
- chip[0].mutex = &chip[0]._spinlock;
-
- cfi.chipshift = cfi.cfiq->DevSize;
- cfi.numchips = 1;
-
- if (!cfi.chipshift) {
- printk(KERN_ERR"cfi.chipsize is zero. This is bad. cfi.cfiq->DevSize is %d\n", cfi.cfiq->DevSize);
- kfree(cfi.cfiq);
- return NULL;
- }
- switch (cfi.interleave) {
- case 2: cfi.chipshift += 1; break;
- case 4: cfi.chipshift += 2; break;
+ for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
+ cfi->cfiq->EraseRegionInfo[i] = le32_to_cpu(cfi->cfiq->EraseRegionInfo[i]);
+
+#ifdef DEBUG_CFI
+ printk(" Erase Region #%d: BlockSize 0x%4.4X bytes, %d blocks\n",
+ i, (cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff,
+ (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1);
+#endif
}
+ /* Put it back into Read Mode */
+ cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
- for (base = (1<<cfi.chipshift); base < map->size; base += (1<<cfi.chipshift))
- cfi_probe_chip_1(map, base, &chip[0], &cfi);
-
- retcfi = kmalloc(sizeof(struct cfi_private) + cfi.numchips * sizeof(struct flchip), GFP_KERNEL);
-
- if (!retcfi) {
- printk(KERN_WARNING "%s: kmalloc failed for CFI private structure\n", map->name);
- kfree(cfi.cfiq);
- return NULL;
- }
- memcpy(retcfi, &cfi, sizeof(cfi));
- memcpy(&retcfi->chips[0], chip, sizeof(struct flchip) * cfi.numchips);
- for (i=0; i< retcfi->numchips; i++) {
- init_waitqueue_head(&retcfi->chips[i].wq);
- spin_lock_init(&retcfi->chips[i]._spinlock);
- retcfi->chips[i].mutex = &retcfi->chips[i]._spinlock;
- }
- return retcfi;
+ return 1;
}
#ifdef DEBUG_CFI
}
#endif /* DEBUG_CFI */
-typedef void cfi_cmdset_fn_t(struct map_info *, int, unsigned long);
-
-extern cfi_cmdset_fn_t cfi_cmdset_0001;
-extern cfi_cmdset_fn_t cfi_cmdset_0002;
-
-static void cfi_cmdset_unknown(struct map_info *map, int primary, unsigned long base)
-{
- __u16 adr;
- struct cfi_private *cfi = map->fldrv_priv;
- __u16 type = primary?cfi->cfiq->P_ID:cfi->cfiq->A_ID;
-#ifdef HAVE_INTER_MODULE
- char probename[32];
- cfi_cmdset_fn_t *probe_function;
-
- sprintf(probename, "cfi_cmdset_%4.4X", type);
-
- probe_function = inter_module_get_request(probename, probename);
-
- if (probe_function) {
- (*probe_function)(map, primary, base);
- return;
- }
-#endif
- printk(KERN_NOTICE "Support for command set %04X not present\n", type);
- /* This was a command set we don't know about. Print only the basic info */
- adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
-
- if (!adr) {
- printk(" No Extended Query Table\n");
- }
- else {
- int ofs_factor = cfi->interleave * cfi->device_type;
-
- if (cfi_read_query(map,base + adr*ofs_factor) != (primary?'P':'A') ||
- cfi_read_query(map,base + (adr+1)*ofs_factor) != (primary?'R':'L') ||
- cfi_read_query(map,base + (adr+2)*ofs_factor) != (primary?'I':'T')) {
- printk ("Invalid Extended Query Table at %4.4X: %2.2X %2.2X %2.2X\n",
- adr,
- cfi_read_query(map,base + adr*ofs_factor),
- cfi_read_query(map,base + (adr+1)*ofs_factor),
- cfi_read_query(map,base + (adr+2)*ofs_factor));
- }
- else {
- printk(" Extended Query Table version %c.%c\n",
- cfi_read_query(map,base + (adr+3)*ofs_factor),
- cfi_read_query(map,base + (adr+4)*ofs_factor));
- }
- }
- cfi_send_cmd(0xff, base, map, cfi);
-}
+static struct chip_probe cfi_chip_probe = {
+ name: "CFI",
+ probe_chip: cfi_probe_chip
+};
-static void check_cmd_set(struct map_info *map, int primary, unsigned long base)
+struct mtd_info *cfi_probe(struct map_info *map)
{
- struct cfi_private *cfi = map->fldrv_priv;
- __u16 type = primary?cfi->cfiq->P_ID:cfi->cfiq->A_ID;
-
- if (type == P_ID_NONE || type == P_ID_RESERVED)
- return;
- /* Put it in query mode */
- cfi_qry_mode(map,base,cfi);
-
- switch(type){
- /* Urgh. Ifdefs. The version with weak symbols was
- * _much_ nicer. Shame it didn't seem to work on
- * anything but x86, really.
- * But we can't rely in inter_module_get() because
- * that'd mean we depend on link order.
- */
-#ifdef CONFIG_MTD_CFI_INTELEXT
- case 0x0001:
- case 0x0003:
- return cfi_cmdset_0001(map, primary, base);
-#endif
-#ifdef CONFIG_MTD_CFI_AMDSTD
- case 0x0002:
- return cfi_cmdset_0002(map, primary, base);
-#endif
- }
-
- return cfi_cmdset_unknown(map, primary, base);
+ /*
+ * Just use the generic probe stuff to call our CFI-specific
+ * chip_probe routine in all the possible permutations, etc.
+ */
+ return mtd_do_chip_probe(map, &cfi_chip_probe);
}
+static struct mtd_chip_driver cfi_chipdrv = {
+ probe: cfi_probe,
+ name: "cfi_probe",
+ module: THIS_MODULE
+};
-#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
-#define cfi_probe_init init_module
-#define cfi_probe_exit cleanup_module
-#endif
-
-mod_init_t cfi_probe_init(void)
+int __init cfi_probe_init(void)
{
register_mtd_chip_driver(&cfi_chipdrv);
return 0;
}
-mod_exit_t cfi_probe_exit(void)
+static void __exit cfi_probe_exit(void)
{
unregister_mtd_chip_driver(&cfi_chipdrv);
}
module_init(cfi_probe_init);
module_exit(cfi_probe_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
+MODULE_DESCRIPTION("Probe code for CFI-compliant flash chips");
/*
- * $Id: chipreg.c,v 1.8 2001/06/09 19:58:19 dwmw2 Exp $
+ * $Id: chipreg.c,v 1.12 2001/10/02 15:29:53 dwmw2 Exp $
*
* Registration for chip drivers
*
EXPORT_SYMBOL(register_mtd_chip_driver);
EXPORT_SYMBOL(unregister_mtd_chip_driver);
EXPORT_SYMBOL(do_map_probe);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("Core routines for registering and invoking MTD chip drivers");
--- /dev/null
+/*
+ * Routines common to all CFI-type probes.
+ * (C) 2001, 2001 Red Hat, Inc.
+ * GPL'd
+ * $Id: gen_probe.c,v 1.5 2001/10/02 15:05:12 dwmw2 Exp $
+ */
+
+#include <linux/kernel.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/cfi.h>
+#include <linux/mtd/gen_probe.h>
+
+static struct mtd_info *check_cmd_set(struct map_info *, int);
+static struct cfi_private *genprobe_ident_chips(struct map_info *map,
+ struct chip_probe *cp);
+static int genprobe_new_chip(struct map_info *map, struct chip_probe *cp,
+ struct cfi_private *cfi);
+
+struct mtd_info *mtd_do_chip_probe(struct map_info *map, struct chip_probe *cp)
+{
+ struct mtd_info *mtd = NULL;
+ struct cfi_private *cfi;
+
+ /* First probe the map to see if we have CFI stuff there. */
+ cfi = genprobe_ident_chips(map, cp);
+
+ if (!cfi)
+ return NULL;
+
+ map->fldrv_priv = cfi;
+ /* OK we liked it. Now find a driver for the command set it talks */
+
+ mtd = check_cmd_set(map, 1); /* First the primary cmdset */
+ if (!mtd)
+ mtd = check_cmd_set(map, 0); /* Then the secondary */
+
+ if (mtd)
+ return mtd;
+
+ printk(KERN_WARNING"cfi_probe: No supported Vendor Command Set found\n");
+
+ kfree(cfi->cfiq);
+ kfree(cfi);
+ map->fldrv_priv = NULL;
+ return NULL;
+}
+EXPORT_SYMBOL(mtd_do_chip_probe);
+
+
+struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chip_probe *cp)
+{
+ unsigned long base=0;
+ struct cfi_private cfi;
+ struct cfi_private *retcfi;
+ struct flchip chip[MAX_CFI_CHIPS];
+ int i;
+
+ memset(&cfi, 0, sizeof(cfi));
+
+ /* Call the probetype-specific code with all permutations of
+ interleave and device type, etc. */
+ if (!genprobe_new_chip(map, cp, &cfi)) {
+ /* The probe didn't like it */
+ printk(KERN_WARNING "%s: Found no %s device at location zero\n",
+ cp->name, map->name);
+ return NULL;
+ }
+
+#if 0 /* Let the CFI probe routine do this sanity check. The Intel and AMD
+ probe routines won't ever return a broken CFI structure anyway,
+ because they make them up themselves.
+ */
+ if (cfi.cfiq->NumEraseRegions == 0) {
+ printk(KERN_WARNING "Number of erase regions is zero\n");
+ kfree(cfi.cfiq);
+ return NULL;
+ }
+#endif
+ chip[0].start = 0;
+ chip[0].state = FL_READY;
+ cfi.chipshift = cfi.cfiq->DevSize;
+
+ switch(cfi.interleave) {
+#ifdef CFIDEV_INTERLEAVE_1
+ case 1:
+ break;
+#endif
+#ifdef CFIDEV_INTERLEAVE_2
+ case 2:
+ cfi.chipshift++;
+ break;
+#endif
+#ifdef CFIDEV_INTERLEAVE_4
+ case 4:
+ cfi.chipshift+=2;
+ break;
+#endif
+ default:
+ BUG();
+ }
+
+ cfi.numchips = 1;
+
+ /*
+ * Now probe for other chips, checking sensibly for aliases while
+ * we're at it. The new_chip probe above should have let the first
+ * chip in read mode.
+ */
+
+ for (base = (1<<cfi.chipshift); base + (1<<cfi.chipshift) <= map->size;
+ base += (1<<cfi.chipshift))
+ cp->probe_chip(map, base, &chip[0], &cfi);
+
+ /*
+ * Now allocate the space for the structures we need to return to
+ * our caller, and copy the appropriate data into them.
+ */
+
+ retcfi = kmalloc(sizeof(struct cfi_private) + cfi.numchips * sizeof(struct flchip), GFP_KERNEL);
+
+ if (!retcfi) {
+ printk(KERN_WARNING "%s: kmalloc failed for CFI private structure\n", map->name);
+ kfree(cfi.cfiq);
+ return NULL;
+ }
+
+ memcpy(retcfi, &cfi, sizeof(cfi));
+ memcpy(&retcfi->chips[0], chip, sizeof(struct flchip) * cfi.numchips);
+
+ /* Fix up the stuff that breaks when you move it */
+ for (i=0; i< retcfi->numchips; i++) {
+ init_waitqueue_head(&retcfi->chips[i].wq);
+ spin_lock_init(&retcfi->chips[i]._spinlock);
+ retcfi->chips[i].mutex = &retcfi->chips[i]._spinlock;
+ }
+
+ return retcfi;
+}
+
+
+static int genprobe_new_chip(struct map_info *map, struct chip_probe *cp,
+ struct cfi_private *cfi)
+{
+ switch (map->buswidth) {
+#ifdef CFIDEV_BUSWIDTH_1
+ case CFIDEV_BUSWIDTH_1:
+ cfi->interleave = CFIDEV_INTERLEAVE_1;
+
+ cfi->device_type = CFI_DEVICETYPE_X8;
+ if (cp->probe_chip(map, 0, NULL, cfi))
+ return 1;
+
+ cfi->device_type = CFI_DEVICETYPE_X16;
+ if (cp->probe_chip(map, 0, NULL, cfi))
+ return 1;
+ break;
+#endif /* CFIDEV_BUSWITDH_1 */
+
+#ifdef CFIDEV_BUSWIDTH_2
+ case CFIDEV_BUSWIDTH_2:
+#ifdef CFIDEV_INTERLEAVE_1
+ cfi->interleave = CFIDEV_INTERLEAVE_1;
+
+ cfi->device_type = CFI_DEVICETYPE_X16;
+ if (cp->probe_chip(map, 0, NULL, cfi))
+ return 1;
+#endif /* CFIDEV_INTERLEAVE_1 */
+#ifdef CFIDEV_INTERLEAVE_2
+ cfi->interleave = CFIDEV_INTERLEAVE_2;
+
+ cfi->device_type = CFI_DEVICETYPE_X8;
+ if (cp->probe_chip(map, 0, NULL, cfi))
+ return 1;
+
+ cfi->device_type = CFI_DEVICETYPE_X16;
+ if (cp->probe_chip(map, 0, NULL, cfi))
+ return 1;
+#endif /* CFIDEV_INTERLEAVE_2 */
+ break;
+#endif /* CFIDEV_BUSWIDTH_2 */
+
+#ifdef CFIDEV_BUSWIDTH_4
+ case CFIDEV_BUSWIDTH_4:
+#if defined(CFIDEV_INTERLEAVE_1) && defined(SOMEONE_ACTUALLY_MAKES_THESE)
+ cfi->interleave = CFIDEV_INTERLEAVE_1;
+
+ cfi->device_type = CFI_DEVICETYPE_X32;
+ if (cp->probe_chip(map, 0, NULL, cfi))
+ return 1;
+#endif /* CFIDEV_INTERLEAVE_1 */
+#ifdef CFIDEV_INTERLEAVE_2
+ cfi->interleave = CFIDEV_INTERLEAVE_2;
+
+#ifdef SOMEONE_ACTUALLY_MAKES_THESE
+ cfi->device_type = CFI_DEVICETYPE_X32;
+ if (cp->probe_chip(map, 0, NULL, cfi))
+ return 1;
+#endif
+ cfi->device_type = CFI_DEVICETYPE_X16;
+ if (cp->probe_chip(map, 0, NULL, cfi))
+ return 1;
+
+ cfi->device_type = CFI_DEVICETYPE_X8;
+ if (cp->probe_chip(map, 0, NULL, cfi))
+ return 1;
+#endif /* CFIDEV_INTERLEAVE_2 */
+#ifdef CFIDEV_INTERLEAVE_4
+ cfi->interleave = CFIDEV_INTERLEAVE_4;
+
+#ifdef SOMEONE_ACTUALLY_MAKES_THESE
+ cfi->device_type = CFI_DEVICETYPE_X32;
+ if (cp->probe_chip(map, 0, NULL, cfi))
+ return 1;
+#endif
+ cfi->device_type = CFI_DEVICETYPE_X16;
+ if (cp->probe_chip(map, 0, NULL, cfi))
+ return 1;
+
+ cfi->device_type = CFI_DEVICETYPE_X8;
+ if (cp->probe_chip(map, 0, NULL, cfi))
+ return 1;
+#endif /* CFIDEV_INTERLEAVE_4 */
+ break;
+#endif /* CFIDEV_BUSWIDTH_4 */
+
+ default:
+ printk(KERN_WARNING "genprobe_new_chip called with unsupported buswidth %d\n", map->buswidth);
+ return 0;
+ }
+ return 0;
+}
+
+
+typedef struct mtd_info *cfi_cmdset_fn_t(struct map_info *, int);
+
+extern cfi_cmdset_fn_t cfi_cmdset_0001;
+extern cfi_cmdset_fn_t cfi_cmdset_0002;
+
+static inline struct mtd_info *cfi_cmdset_unknown(struct map_info *map,
+ int primary)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ __u16 type = primary?cfi->cfiq->P_ID:cfi->cfiq->A_ID;
+#if defined(CONFIG_MODULES) && defined(HAVE_INTER_MODULE)
+ char probename[32];
+ cfi_cmdset_fn_t *probe_function;
+
+ sprintf(probename, "cfi_cmdset_%4.4X", type);
+
+ probe_function = inter_module_get_request(probename, probename);
+
+ if (probe_function) {
+ struct mtd_info *mtd;
+
+ mtd = (*probe_function)(map, primary);
+ /* If it was happy, it'll have increased its own use count */
+ inter_module_put(probename);
+ return mtd;
+ }
+#endif
+ printk(KERN_NOTICE "Support for command set %04X not present\n",
+ type);
+
+ return NULL;
+}
+
+static struct mtd_info *check_cmd_set(struct map_info *map, int primary)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ __u16 type = primary?cfi->cfiq->P_ID:cfi->cfiq->A_ID;
+
+ if (type == P_ID_NONE || type == P_ID_RESERVED)
+ return NULL;
+
+ switch(type){
+ /* Urgh. Ifdefs. The version with weak symbols was
+ * _much_ nicer. Shame it didn't seem to work on
+ * anything but x86, really.
+ * But we can't rely in inter_module_get() because
+ * that'd mean we depend on link order.
+ */
+#ifdef CONFIG_MTD_CFI_INTELEXT
+ case 0x0001:
+ case 0x0003:
+ return cfi_cmdset_0001(map, primary);
+#endif
+#ifdef CONFIG_MTD_CFI_AMDSTD
+ case 0x0002:
+ return cfi_cmdset_0002(map, primary);
+#endif
+ }
+
+ return cfi_cmdset_unknown(map, primary);
+}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("Helper routines for flash chip probe code");
* not going to guess how to send commands to them, plus I expect they will
* all speak CFI..
*
- * $Id: jedec.c,v 1.8 2001/06/09 23:56:57 dwmw2 Exp $
+ * $Id: jedec.c,v 1.11 2001/10/02 15:05:12 dwmw2 Exp $
*/
#include <linux/mtd/jedec.h>
-struct mtd_info *jedec_probe(struct map_info *);
-int jedec_probe8(struct map_info *map,unsigned long base,
+static struct mtd_info *jedec_probe(struct map_info *);
+static int jedec_probe8(struct map_info *map,unsigned long base,
struct jedec_private *priv);
-int jedec_probe16(struct map_info *map,unsigned long base,
+static int jedec_probe16(struct map_info *map,unsigned long base,
struct jedec_private *priv);
-int jedec_probe32(struct map_info *map,unsigned long base,
+static int jedec_probe32(struct map_info *map,unsigned long base,
struct jedec_private *priv);
static void jedec_flash_chip_scan(struct jedec_private *priv,unsigned long start,
unsigned long len);
static int jedec_read_banked(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf);
-struct mtd_info *jedec_probe(struct map_info *map);
+static struct mtd_info *jedec_probe(struct map_info *map);
};
/* Probe entry point */
-struct jedec_private priv;
-struct mtd_info __MTD;
-struct mtd_info *jedec_probe(struct map_info *map)
+
+static struct mtd_info *jedec_probe(struct map_info *map)
{
- struct mtd_info *MTD = &__MTD;
+ struct mtd_info *MTD;
+ struct jedec_private *priv;
unsigned long Base;
unsigned long SectorSize;
unsigned count;
unsigned I,Uniq;
char Part[200];
memset(&priv,0,sizeof(priv));
+
+ MTD = kmalloc(sizeof(struct mtd_info) + sizeof(struct jedec_private), GFP_KERNEL);
+ if (!MTD)
+ return NULL;
+
+ memset(MTD, 0, sizeof(struct mtd_info) + sizeof(struct jedec_private));
+ priv = (struct jedec_private *)&MTD[1];
my_bank_size = map->size;
if (map->size/my_bank_size > MAX_JEDEC_CHIPS)
{
printk("mtd: Increase MAX_JEDEC_CHIPS, too many banks.\n");
+ kfree(MTD);
return 0;
}
map->buswidth = 1;
if (map->buswidth == 1){
- if (jedec_probe8(map,Base,&priv) == 0) {
+ if (jedec_probe8(map,Base,priv) == 0) {
printk("did recognize jedec chip\n");
+ kfree(MTD);
return 0;
}
}
if (map->buswidth == 2)
- jedec_probe16(map,Base,&priv);
+ jedec_probe16(map,Base,priv);
if (map->buswidth == 4)
- jedec_probe32(map,Base,&priv);
+ jedec_probe32(map,Base,priv);
}
// Get the biggest sector size
SectorSize = 0;
- for (I = 0; priv.chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
+ for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
{
- // printk("priv.chips[%d].jedec is %x\n",I,priv.chips[I].jedec);
- // printk("priv.chips[%d].sectorsize is %lx\n",I,priv.chips[I].sectorsize);
- if (priv.chips[I].sectorsize > SectorSize)
- SectorSize = priv.chips[I].sectorsize;
+ // printk("priv->chips[%d].jedec is %x\n",I,priv->chips[I].jedec);
+ // printk("priv->chips[%d].sectorsize is %lx\n",I,priv->chips[I].sectorsize);
+ if (priv->chips[I].sectorsize > SectorSize)
+ SectorSize = priv->chips[I].sectorsize;
}
// Quickly ensure that the other sector sizes are factors of the largest
- for (I = 0; priv.chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
+ for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
{
- if ((SectorSize/priv.chips[I].sectorsize)*priv.chips[I].sectorsize != SectorSize)
+ if ((SectorSize/priv->chips[I].sectorsize)*priv->chips[I].sectorsize != SectorSize)
{
printk("mtd: Failed. Device has incompatible mixed sector sizes\n");
+ kfree(MTD);
return 0;
}
}
Part[sizeof(Part)-11] = 0;
strcat(Part," ");
Uniq = 0;
- for (I = 0; priv.chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
+ for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
{
const struct JEDECTable *JEDEC;
- if (priv.chips[I+1].jedec == priv.chips[I].jedec)
+ if (priv->chips[I+1].jedec == priv->chips[I].jedec)
{
count++;
continue;
}
// Locate the chip in the jedec table
- JEDEC = jedec_idtoinf(priv.chips[I].jedec >> 8,priv.chips[I].jedec);
+ JEDEC = jedec_idtoinf(priv->chips[I].jedec >> 8,priv->chips[I].jedec);
if (JEDEC == 0)
{
printk("mtd: Internal Error, JEDEC not set\n");
+ kfree(MTD);
return 0;
}
are empty banks. Note, the last bank does not count here, only the
first banks are important. Holes on non-bank boundaries can not exist
due to the way the detection algorithm works. */
- if (priv.size < my_bank_size)
- my_bank_size = priv.size;
- priv.is_banked = 0;
- //printk("priv.size is %x, my_bank_size is %x\n",priv.size,my_bank_size);
- //printk("priv.bank_fill[0] is %x\n",priv.bank_fill[0]);
- if (!priv.size) {
- printk("priv.size is zero\n");
+ if (priv->size < my_bank_size)
+ my_bank_size = priv->size;
+ priv->is_banked = 0;
+ //printk("priv->size is %x, my_bank_size is %x\n",priv->size,my_bank_size);
+ //printk("priv->bank_fill[0] is %x\n",priv->bank_fill[0]);
+ if (!priv->size) {
+ printk("priv->size is zero\n");
+ kfree(MTD);
return 0;
}
- if (priv.size/my_bank_size) {
- if (priv.size/my_bank_size == 1) {
- priv.size = my_bank_size;
+ if (priv->size/my_bank_size) {
+ if (priv->size/my_bank_size == 1) {
+ priv->size = my_bank_size;
}
else {
- for (I = 0; I != priv.size/my_bank_size - 1; I++)
+ for (I = 0; I != priv->size/my_bank_size - 1; I++)
{
- if (priv.bank_fill[I] != my_bank_size)
- priv.is_banked = 1;
+ if (priv->bank_fill[I] != my_bank_size)
+ priv->is_banked = 1;
/* This even could be eliminated, but new de-optimized read/write
functions have to be written */
- printk("priv.bank_fill[%d] is %lx, priv.bank_fill[0] is %lx\n",I,priv.bank_fill[I],priv.bank_fill[0]);
- if (priv.bank_fill[I] != priv.bank_fill[0])
+ printk("priv->bank_fill[%d] is %lx, priv->bank_fill[0] is %lx\n",I,priv->bank_fill[I],priv->bank_fill[0]);
+ if (priv->bank_fill[I] != priv->bank_fill[0])
{
- printk("mtd: Failed. Cannot handle unsymetric banking\n");
+ printk("mtd: Failed. Cannot handle unsymmetric banking\n");
+ kfree(MTD);
return 0;
}
}
}
}
- if (priv.is_banked == 1)
+ if (priv->is_banked == 1)
strcat(Part,", banked");
// printk("Part: '%s'\n",Part);
MTD->flags = MTD_CAP_NORFLASH;
MTD->erasesize = SectorSize*(map->buswidth);
// printk("MTD->erasesize is %x\n",(unsigned int)MTD->erasesize);
- MTD->size = priv.size;
+ MTD->size = priv->size;
// printk("MTD->size is %x\n",(unsigned int)MTD->size);
//MTD->module = THIS_MODULE; // ? Maybe this should be the low level module?
MTD->erase = flash_erase;
- if (priv.is_banked == 1)
+ if (priv->is_banked == 1)
MTD->read = jedec_read_banked;
else
MTD->read = jedec_read;
MTD->write = flash_write;
MTD->sync = jedec_sync;
MTD->priv = map;
- map->fldrv_priv = &priv;
+ map->fldrv_priv = priv;
map->fldrv = &jedec_chipdrv;
MOD_INC_USE_COUNT;
return MTD;
}
// Look for flash using an 8 bit bus interface
-int jedec_probe8(struct map_info *map,unsigned long base,
+static int jedec_probe8(struct map_info *map,unsigned long base,
struct jedec_private *priv)
{
#define flread(x) map->read8(map,base+x)
}
// Look for flash using a 16 bit bus interface (ie 2 8-bit chips)
-int jedec_probe16(struct map_info *map,unsigned long base,
+static int jedec_probe16(struct map_info *map,unsigned long base,
struct jedec_private *priv)
{
return 0;
}
// Look for flash using a 32 bit bus interface (ie 4 8-bit chips)
-int jedec_probe32(struct map_info *map,unsigned long base,
+static int jedec_probe32(struct map_info *map,unsigned long base,
struct jedec_private *priv)
{
#define flread(x) map->read32(map,base+((x)<<2))
chip->length = (start + len - ByteStart + (1 << chip->addrshift)-1) >> chip->addrshift;
}
}
- /*}}}*/
-#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
-#define jedec_probe_init init_module
-#define jedec_probe_exit cleanup_module
-#endif
int __init jedec_probe_init(void)
{
module_init(jedec_probe_init);
module_exit(jedec_probe_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jason Gunthorpe <jgg@deltatee.com> et al.");
+MODULE_DESCRIPTION("Old MTD chip driver for JEDEC-compliant flash chips");
--- /dev/null
+/*
+ Common Flash Interface probe code.
+ (C) 2000 Red Hat. GPL'd.
+ $Id: jedec_probe.c,v 1.3 2001/10/02 15:05:12 dwmw2 Exp $
+*/
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+
+#include <linux/mtd/map.h>
+#include <linux/mtd/cfi.h>
+#include <linux/mtd/gen_probe.h>
+
+
+/* Manufacturers */
+#define MANUFACTURER_AMD 0x0001
+#define MANUFACTURER_FUJITSU 0x0004
+#define MANUFACTURER_ATMEL 0x001f
+#define MANUFACTURER_ST 0x0020
+#define MANUFACTURER_SST 0x00BF
+#define MANUFACTURER_TOSHIBA 0x0098
+
+/* AMD */
+#define AM29F800BB 0x2258
+#define AM29F800BT 0x22D6
+#define AM29LV800BB 0x225B
+#define AM29LV800BT 0x22DA
+#define AM29LV160DT 0x22C4
+#define AM29LV160DB 0x2249
+
+/* Atmel */
+#define AT49BV16X4 0x00c0
+#define AT49BV16X4T 0x00c2
+
+/* Fujitsu */
+#define MBM29LV160TE 0x22C4
+#define MBM29LV160BE 0x2249
+
+/* ST - www.st.com */
+#define M29W800T 0x00D7
+#define M29W160DT 0x22C4
+#define M29W160DB 0x2249
+
+/* SST */
+#define SST39LF800 0x2781
+#define SST39LF160 0x2782
+
+/* Toshiba */
+#define TC58FVT160 0x00C2
+#define TC58FVB160 0x0043
+
+
+struct amd_flash_info {
+ const __u16 mfr_id;
+ const __u16 dev_id;
+ const char *name;
+ const int DevSize;
+ const int InterfaceDesc;
+ const int NumEraseRegions;
+ const ulong regions[4];
+};
+
+#define ERASEINFO(size,blocks) (size<<8)|(blocks-1)
+
+#define SIZE_1MiB 20
+#define SIZE_2MiB 21
+#define SIZE_4MiB 22
+
+static const struct amd_flash_info jedec_table[] = {
+ {
+ mfr_id: MANUFACTURER_AMD,
+ dev_id: AM29LV160DT,
+ name: "AMD AM29LV160DT",
+ DevSize: SIZE_2MiB,
+ NumEraseRegions: 4,
+ regions: {ERASEINFO(0x10000,31),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ mfr_id: MANUFACTURER_AMD,
+ dev_id: AM29LV160DB,
+ name: "AMD AM29LV160DB",
+ DevSize: SIZE_2MiB,
+ NumEraseRegions: 4,
+ regions: {ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,31)
+ }
+ }, {
+ mfr_id: MANUFACTURER_TOSHIBA,
+ dev_id: TC58FVT160,
+ name: "Toshiba TC58FVT160",
+ DevSize: SIZE_2MiB,
+ NumEraseRegions: 4,
+ regions: {ERASEINFO(0x10000,31),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ mfr_id: MANUFACTURER_FUJITSU,
+ dev_id: MBM29LV160TE,
+ name: "Fujitsu MBM29LV160TE",
+ DevSize: SIZE_2MiB,
+ NumEraseRegions: 4,
+ regions: {ERASEINFO(0x10000,31),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ mfr_id: MANUFACTURER_TOSHIBA,
+ dev_id: TC58FVB160,
+ name: "Toshiba TC58FVB160",
+ DevSize: SIZE_2MiB,
+ NumEraseRegions: 4,
+ regions: {ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,31)
+ }
+ }, {
+ mfr_id: MANUFACTURER_FUJITSU,
+ dev_id: MBM29LV160BE,
+ name: "Fujitsu MBM29LV160BE",
+ DevSize: SIZE_2MiB,
+ NumEraseRegions: 4,
+ regions: {ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,31)
+ }
+ }, {
+ mfr_id: MANUFACTURER_AMD,
+ dev_id: AM29LV800BB,
+ name: "AMD AM29LV800BB",
+ DevSize: SIZE_1MiB,
+ NumEraseRegions: 4,
+ regions: {ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,15),
+ }
+ }, {
+ mfr_id: MANUFACTURER_AMD,
+ dev_id: AM29F800BB,
+ name: "AMD AM29F800BB",
+ DevSize: SIZE_1MiB,
+ NumEraseRegions: 4,
+ regions: {ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,15),
+ }
+ }, {
+ mfr_id: MANUFACTURER_AMD,
+ dev_id: AM29LV800BT,
+ name: "AMD AM29LV800BT",
+ DevSize: SIZE_1MiB,
+ NumEraseRegions: 4,
+ regions: {ERASEINFO(0x10000,15),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ mfr_id: MANUFACTURER_AMD,
+ dev_id: AM29F800BT,
+ name: "AMD AM29F800BT",
+ DevSize: SIZE_1MiB,
+ NumEraseRegions: 4,
+ regions: {ERASEINFO(0x10000,15),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ mfr_id: MANUFACTURER_AMD,
+ dev_id: AM29LV800BB,
+ name: "AMD AM29LV800BB",
+ DevSize: SIZE_1MiB,
+ NumEraseRegions: 4,
+ regions: {ERASEINFO(0x10000,15),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ mfr_id: MANUFACTURER_ST,
+ dev_id: M29W800T,
+ name: "ST M29W800T",
+ DevSize: SIZE_1MiB,
+ NumEraseRegions: 4,
+ regions: {ERASEINFO(0x10000,15),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ mfr_id: MANUFACTURER_ST,
+ dev_id: M29W160DT,
+ name: "ST M29W160DT",
+ DevSize: SIZE_2MiB,
+ NumEraseRegions: 4,
+ regions: {ERASEINFO(0x10000,31),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ mfr_id: MANUFACTURER_ST,
+ dev_id: M29W160DB,
+ name: "ST M29W160DB",
+ DevSize: SIZE_2MiB,
+ NumEraseRegions: 4,
+ regions: {ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,31)
+ }
+ }, {
+ mfr_id: MANUFACTURER_ATMEL,
+ dev_id: AT49BV16X4,
+ name: "Atmel AT49BV16X4",
+ DevSize: SIZE_2MiB,
+ NumEraseRegions: 3,
+ regions: {ERASEINFO(0x02000,8),
+ ERASEINFO(0x08000,2),
+ ERASEINFO(0x10000,30)
+ }
+ }, {
+ mfr_id: MANUFACTURER_ATMEL,
+ dev_id: AT49BV16X4T,
+ name: "Atmel AT49BV16X4T",
+ DevSize: SIZE_2MiB,
+ NumEraseRegions: 3,
+ regions: {ERASEINFO(0x10000,30),
+ ERASEINFO(0x08000,2),
+ ERASEINFO(0x02000,8)
+ }
+ }
+};
+
+
+static int cfi_jedec_setup(struct cfi_private *p_cfi, int index);
+
+static int jedec_probe_chip(struct map_info *map, __u32 base,
+ struct flchip *chips, struct cfi_private *cfi);
+
+struct mtd_info *jedec_probe(struct map_info *map);
+#define jedec_read_mfr(map, base, osf) cfi_read(map, base)
+#define jedec_read_id(map, base, osf) cfi_read(map, (base)+(osf))
+
+static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
+{
+ int i,num_erase_regions;
+
+ printk("Found: %s\n",jedec_table[index].name);
+
+ num_erase_regions = jedec_table[index].NumEraseRegions;
+
+ p_cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL);
+ if (!p_cfi->cfiq) {
+ //xx printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name);
+ return 0;
+ }
+
+ memset(p_cfi->cfiq,0,sizeof(struct cfi_ident));
+
+ p_cfi->cfiq->P_ID = P_ID_AMD_STD;
+ p_cfi->cfiq->NumEraseRegions = jedec_table[index].NumEraseRegions;
+ p_cfi->cfiq->DevSize = jedec_table[index].DevSize;
+
+ for (i=0; i<num_erase_regions; i++){
+ p_cfi->cfiq->EraseRegionInfo[i] = jedec_table[index].regions[i];
+ }
+ return 1; /* ok */
+}
+
+static int jedec_probe_chip(struct map_info *map, __u32 base,
+ struct flchip *chips, struct cfi_private *cfi)
+{
+ int i;
+ int osf = cfi->interleave * cfi->device_type;
+ int retried = 0;
+
+ if (!cfi->numchips) {
+ switch (cfi->device_type) {
+ case CFI_DEVICETYPE_X8:
+ cfi->addr_unlock1 = 0x555;
+ cfi->addr_unlock2 = 0x2aa;
+ break;
+ case CFI_DEVICETYPE_X16:
+ cfi->addr_unlock1 = 0xaaa;
+ if (map->buswidth == cfi->interleave) {
+ /* X16 chip(s) in X8 mode */
+ cfi->addr_unlock2 = 0x555;
+ } else {
+ cfi->addr_unlock2 = 0x554;
+ }
+ break;
+ case CFI_DEVICETYPE_X32:
+ cfi->addr_unlock1 = 0x1555;
+ cfi->addr_unlock2 = 0xaaa;
+ break;
+ default:
+ printk(KERN_NOTICE "Eep. Unknown jedec_probe device type %d\n", cfi->device_type);
+ return 0;
+ }
+ }
+
+ retry:
+ /* Reset */
+ cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
+
+ /* Autoselect Mode */
+ cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, CFI_DEVICETYPE_X8, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, CFI_DEVICETYPE_X8, NULL);
+ cfi_send_gen_cmd(0x90, cfi->addr_unlock1, base, map, cfi, CFI_DEVICETYPE_X8, NULL);
+
+ if (!cfi->numchips) {
+ /* This is the first time we're called. Set up the CFI
+ stuff accordingly and return */
+
+ cfi->mfr = jedec_read_mfr(map, base, osf);
+ cfi->id = jedec_read_id(map, base, osf);
+
+ for (i=0; i<sizeof(jedec_table)/sizeof(jedec_table[0]); i++) {
+ if (cfi->mfr == jedec_table[i].mfr_id &&
+ cfi->id == jedec_table[i].dev_id)
+ return cfi_jedec_setup(cfi, i);
+ }
+ if (!retried++) {
+ /* Deal with whichever strange chips these were */
+ cfi->addr_unlock1 |= cfi->addr_unlock1 << 8;
+ cfi->addr_unlock2 |= cfi->addr_unlock2 << 8;
+ goto retry;
+ }
+ return 0;
+ }
+
+ /* Check each previous chip to see if it's an alias */
+ for (i=0; i<cfi->numchips; i++) {
+ /* This chip should be in read mode if it's one
+ we've already touched. */
+ if (jedec_read_mfr(map, base, osf) == cfi->mfr &&
+ jedec_read_id(map, base, osf) == cfi->id) {
+ /* Eep. This chip also looks like it's in autoselect mode.
+ Is it an alias for the new one? */
+
+ cfi_send_gen_cmd(0xF0, 0, chips[i].start, map, cfi, cfi->device_type, NULL);
+ /* If the device IDs go away, it's an alias */
+ if (jedec_read_mfr(map, base, osf) != cfi->mfr ||
+ jedec_read_id(map, base, osf) != cfi->id) {
+ printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n",
+ map->name, base, chips[i].start);
+ return 0;
+ }
+
+ /* Yes, it's actually got the device IDs as data. Most
+ * unfortunate. Stick the new chip in read mode
+ * too and if it's the same, assume it's an alias. */
+ /* FIXME: Use other modes to do a proper check */
+ cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
+ if (jedec_read_mfr(map, base, osf) == cfi->mfr &&
+ jedec_read_id(map, base, osf) == cfi->id) {
+ printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n",
+ map->name, base, chips[i].start);
+ return 0;
+ }
+ }
+ }
+
+ /* OK, if we got to here, then none of the previous chips appear to
+ be aliases for the current one. */
+ if (cfi->numchips == MAX_CFI_CHIPS) {
+ printk(KERN_WARNING"%s: Too many flash chips detected. Increase MAX_CFI_CHIPS from %d.\n", map->name, MAX_CFI_CHIPS);
+ /* Doesn't matter about resetting it to Read Mode - we're not going to talk to it anyway */
+ return -1;
+ }
+ chips[cfi->numchips].start = base;
+ chips[cfi->numchips].state = FL_READY;
+ cfi->numchips++;
+
+ /* Put it back into Read Mode */
+ cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
+
+ printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit mode\n",
+ map->name, cfi->interleave, cfi->device_type*8, base,
+ map->buswidth*8);
+
+ return 1;
+}
+
+static struct chip_probe jedec_chip_probe = {
+ name: "JEDEC",
+ probe_chip: jedec_probe_chip
+};
+
+struct mtd_info *jedec_probe(struct map_info *map)
+{
+ /*
+ * Just use the generic probe stuff to call our CFI-specific
+ * chip_probe routine in all the possible permutations, etc.
+ */
+ return mtd_do_chip_probe(map, &jedec_chip_probe);
+}
+
+static struct mtd_chip_driver jedec_chipdrv = {
+ probe: jedec_probe,
+ name: "jedec_probe",
+ module: THIS_MODULE
+};
+
+int __init jedec_probe_init(void)
+{
+ register_mtd_chip_driver(&jedec_chipdrv);
+ return 0;
+}
+
+static void __exit jedec_probe_exit(void)
+{
+ unregister_mtd_chip_driver(&jedec_chipdrv);
+}
+
+module_init(jedec_probe_init);
+module_exit(jedec_probe_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Erwin Authried <eauth@softsys.co.at> et al.");
+MODULE_DESCRIPTION("Probe code for JEDEC-compliant flash chips");
--- /dev/null
+/*
+ * Common code to handle absent "placeholder" devices
+ * Copyright 2001 Resilience Corporation <ebrower@resilience.com>
+ * $Id: map_absent.c,v 1.2 2001/10/02 15:05:12 dwmw2 Exp $
+ *
+ * This map driver is used to allocate "placeholder" MTD
+ * devices on systems that have socketed/removable media.
+ * Use of this driver as a fallback preserves the expected
+ * registration of MTD device nodes regardless of probe outcome.
+ * A usage example is as follows:
+ *
+ * my_dev[i] = do_map_probe("cfi", &my_map[i]);
+ * if(NULL == my_dev[i]) {
+ * my_dev[i] = do_map_probe("map_absent", &my_map[i]);
+ * }
+ *
+ * Any device 'probed' with this driver will return -ENODEV
+ * upon open.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+
+#include <linux/mtd/map.h>
+
+
+static int map_absent_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
+static int map_absent_write (struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
+static int map_absent_erase (struct mtd_info *, struct erase_info *);
+static void map_absent_sync (struct mtd_info *);
+static struct mtd_info *map_absent_probe(struct map_info *map);
+static void map_absent_destroy (struct mtd_info *);
+
+
+static struct mtd_chip_driver map_absent_chipdrv = {
+ probe: map_absent_probe,
+ destroy: map_absent_destroy,
+ name: "map_absent",
+ module: THIS_MODULE
+};
+
+static struct mtd_info *map_absent_probe(struct map_info *map)
+{
+ struct mtd_info *mtd;
+
+ mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
+ if (!mtd) {
+ return NULL;
+ }
+
+ memset(mtd, 0, sizeof(*mtd));
+
+ map->fldrv = &map_absent_chipdrv;
+ mtd->priv = map;
+ mtd->name = map->name;
+ mtd->type = MTD_ABSENT;
+ mtd->size = map->size;
+ mtd->erase = map_absent_erase;
+ mtd->read = map_absent_read;
+ mtd->write = map_absent_write;
+ mtd->sync = map_absent_sync;
+ mtd->flags = 0;
+ mtd->erasesize = PAGE_SIZE;
+
+ MOD_INC_USE_COUNT;
+ return mtd;
+}
+
+
+static int map_absent_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
+{
+ *retlen = 0;
+ return -ENODEV;
+}
+
+static int map_absent_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
+{
+ *retlen = 0;
+ return -ENODEV;
+}
+
+static int map_absent_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ return -ENODEV;
+}
+
+static void map_absent_sync(struct mtd_info *mtd)
+{
+ /* nop */
+}
+
+static void map_absent_destroy(struct mtd_info *mtd)
+{
+ /* nop */
+}
+
+int __init map_absent_init(void)
+{
+ register_mtd_chip_driver(&map_absent_chipdrv);
+ return 0;
+}
+
+static void __exit map_absent_exit(void)
+{
+ unregister_mtd_chip_driver(&map_absent_chipdrv);
+}
+
+module_init(map_absent_init);
+module_exit(map_absent_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Resilience Corporation - Eric Brower <ebrower@resilience.com>");
+MODULE_DESCRIPTION("Placeholder MTD chip driver for 'absent' chips");
/*
* Common code to handle map devices which are simple RAM
* (C) 2000 Red Hat. GPL'd.
- * $Id: map_ram.c,v 1.11 2001/06/08 15:34:04 dwmw2 Exp $
+ * $Id: map_ram.c,v 1.14 2001/10/02 15:05:12 dwmw2 Exp $
*/
#include <linux/module.h>
static struct mtd_chip_driver mapram_chipdrv = {
probe: map_ram_probe,
- name: "ram",
+ name: "map_ram",
module: THIS_MODULE
};
mtd->priv = map;
mtd->name = map->name;
mtd->type = MTD_RAM;
- mtd->erasesize = 0x10000;
mtd->size = map->size;
mtd->erase = mapram_erase;
mtd->read = mapram_read;
mtd->write = mapram_write;
mtd->sync = mapram_nop;
mtd->flags = MTD_CAP_RAM | MTD_VOLATILE;
+
mtd->erasesize = PAGE_SIZE;
+ while(mtd->size & (mtd->erasesize - 1))
+ mtd->erasesize >>= 1;
MOD_INC_USE_COUNT;
return mtd;
/* Nothing to see here */
}
-#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
-#define map_ram_init init_module
-#define map_ram_exit cleanup_module
-#endif
-
-static int __init map_ram_init(void)
+int __init map_ram_init(void)
{
register_mtd_chip_driver(&mapram_chipdrv);
return 0;
module_init(map_ram_init);
module_exit(map_ram_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("MTD chip driver for RAM chips");
/*
* Common code to handle map devices which are simple ROM
* (C) 2000 Red Hat. GPL'd.
- * $Id: map_rom.c,v 1.14 2001/06/02 14:30:43 dwmw2 Exp $
+ * $Id: map_rom.c,v 1.17 2001/10/02 15:05:12 dwmw2 Exp $
*/
+#include <linux/version.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
static struct mtd_chip_driver maprom_chipdrv = {
probe: map_rom_probe,
- name: "rom",
+ name: "map_rom",
module: THIS_MODULE
};
mtd->sync = maprom_nop;
mtd->flags = MTD_CAP_ROM;
mtd->erasesize = 131072;
+ while(mtd->size & (mtd->erasesize - 1))
+ mtd->erasesize >>= 1;
MOD_INC_USE_COUNT;
return mtd;
return -EIO;
}
-#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
-#define map_rom_init init_module
-#define map_rom_exit cleanup_module
-#endif
-
-mod_init_t map_rom_init(void)
+int __init map_rom_init(void)
{
register_mtd_chip_driver(&maprom_chipdrv);
return 0;
}
-mod_exit_t map_rom_exit(void)
+static void __exit map_rom_exit(void)
{
unregister_mtd_chip_driver(&maprom_chipdrv);
}
module_init(map_rom_init);
module_exit(map_rom_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("MTD chip driver for ROM chips");
* Copyright 2000,2001 David A. Schleef <ds@schleef.org>
* 2000,2001 Lineo, Inc.
*
- * $Id: sharp.c,v 1.4 2001/04/29 16:21:17 dwmw2 Exp $
+ * $Id: sharp.c,v 1.6 2001/10/02 15:05:12 dwmw2 Exp $
*
* Devices supported:
* LH28F016SCT Symmetrical block flash memory, 2Mx8
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/errno.h>
+#include <linux/interrupt.h>
#include <linux/mtd/map.h>
#include <linux/mtd/cfi.h>
#include <linux/delay.h>
}
-#if LINUX_VERSION_CODE < 0x020212 && defined(MODULE)
-#define sharp_probe_init init_module
-#define sharp_probe_exit cleanup_module
-#endif
-
int __init sharp_probe_init(void)
{
printk("MTD Sharp chip driver <ds@lineo.com>\n");
module_init(sharp_probe_init);
module_exit(sharp_probe_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Schleef <ds@schleef.org>");
+MODULE_DESCRIPTION("Old MTD chip driver for pre-CFI Sharp flash chips");
# drivers/mtd/maps/Config.in
-# $Id: Config.in,v 1.2 2001/04/29 16:24:34 dwmw2 Exp $
+# $Id: Config.in,v 1.5 2001/09/23 15:33:10 dwmw2 Exp $
mainmenu_option next_comment
bool ' PMC551 Debugging' CONFIG_MTD_PMC551_DEBUG
fi
dep_tristate ' Uncached system RAM' CONFIG_MTD_SLRAM $CONFIG_MTD
+if [ "$CONFIG_SA1100_LART" = "y" ]; then
+ dep_tristate ' 28F160xx flash driver for LART' CONFIG_MTD_LART $CONFIG_MTD
+fi
dep_tristate ' Test driver using RAM' CONFIG_MTD_MTDRAM $CONFIG_MTD
if [ "$CONFIG_MTD_MTDRAM" = "y" -o "$CONFIG_MTD_MTDRAM" = "m" ]; then
int 'MTDRAM device size in KiB' CONFIG_MTDRAM_TOTAL_SIZE 4096
hex 'SRAM Hexadecimal Absolute position or 0' CONFIG_MTDRAM_ABS_POS 0
fi
fi
+dep_tristate ' MTD emulation using block device' CONFIG_MTD_BLKMTD $CONFIG_MTD
comment 'Disk-On-Chip Device Drivers'
dep_tristate ' M-Systems Disk-On-Chip 1000' CONFIG_MTD_DOC1000 $CONFIG_MTD
#
# linux/drivers/devices/Makefile
#
-# $Id: Makefile,v 1.2 2001/04/19 22:12:36 dwmw2 Exp $
+# $Id: Makefile,v 1.4 2001/06/26 21:10:05 spse Exp $
O_TARGET := devlink.o
obj-$(CONFIG_MTD_SLRAM) += slram.o
obj-$(CONFIG_MTD_PMC551) += pmc551.o
obj-$(CONFIG_MTD_MTDRAM) += mtdram.o
+obj-$(CONFIG_MTD_LART) += lart.o
+obj-$(CONFIG_MTD_BLKMTD) += blkmtd.o
include $(TOPDIR)/Rules.make
--- /dev/null
+/*
+ * $Id: blkmtd.c,v 1.3 2001/10/02 15:33:20 dwmw2 Exp $
+ * blkmtd.c - use a block device as a fake MTD
+ *
+ * Author: Simon Evans <spse@secret.org.uk>
+ *
+ * Copyright (C) 2001 Simon Evans <spse@secret.org.uk>
+ *
+ * Licence: GPL
+ *
+ * How it works:
+ * The driver uses raw/io to read/write the device and the page
+ * cache to cache access. Writes update the page cache with the
+ * new data but make a copy of the new page(s) and then a kernel
+ * thread writes pages out to the device in the background. This
+ * ensures tht writes are order even if a page is updated twice.
+ * Also, since pages in the page cache are never marked as dirty,
+ * we dont have to worry about writepage() being called on some
+ * random page which may not be in the write order.
+ *
+ * Erases are handled like writes, so the callback is called after
+ * the page cache has been updated. Sync()ing will wait until it is
+ * all done.
+ *
+ * It can be loaded Read-Only to prevent erases and writes to the
+ * medium.
+ *
+ * Todo:
+ * Make the write queue size dynamic so this it is not too big on
+ * small memory systems and too small on large memory systems.
+ *
+ * Page cache usage may still be a bit wrong. Check we are doing
+ * everything proberly.
+ *
+ * Somehow allow writes to dirty the page cache so we dont use too
+ * much memory making copies of outgoing pages. Need to handle case
+ * where page x is written to, then page y, then page x again before
+ * any of them have been committed to disk.
+ *
+ * Reading should read multiple pages at once rather than using
+ * readpage() for each one. This is easy and will be fixed asap.
+ *
+ * Dont run the write_thread if readonly. This is also easy and will
+ * be fixed asap.
+ *
+ * Even though the multiple erase regions are used if the default erase
+ * block size doesnt match the device properly, erases currently wont
+ * work on the last page if it is not a full page.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/iobuf.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/mtd/compatmac.h>
+#include <linux/mtd/mtd.h>
+
+#if CONFIG_MODVERSION==1
+#define MODVERSIONS
+#include <linux/modversions.h>
+#endif
+
+/* Default erase size in K, always make it a multiple of PAGE_SIZE */
+#define CONFIG_MTD_BLKDEV_ERASESIZE 128
+#define VERSION "1.1"
+extern int *blk_size[];
+extern int *blksize_size[];
+
+/* Info for the block device */
+typedef struct mtd_raw_dev_data_s {
+ struct block_device *binding;
+ int sector_size, sector_bits, total_sectors;
+ size_t totalsize;
+ int readonly;
+ struct address_space as;
+ struct file *file;
+} mtd_raw_dev_data_t;
+
+/* Info for each queue item in the write queue */
+typedef struct mtdblkdev_write_queue_s {
+ mtd_raw_dev_data_t *rawdevice;
+ struct page **pages;
+ int pagenr;
+ int pagecnt;
+ int iserase;
+} mtdblkdev_write_queue_t;
+
+
+/* Static info about the MTD, used in cleanup_module */
+static struct mtd_info *mtd_info;
+
+/* Write queue fixed size */
+#define WRITE_QUEUE_SZ 512
+
+/* Storage for the write queue */
+static mtdblkdev_write_queue_t write_queue[WRITE_QUEUE_SZ];
+static int volatile write_queue_head;
+static int volatile write_queue_tail;
+static int volatile write_queue_cnt;
+static spinlock_t mbd_writeq_lock = SPIN_LOCK_UNLOCKED;
+
+/* Tell the write thread to finish */
+static volatile int write_task_finish = 0;
+
+/* ipc with the write thread */
+#if LINUX_VERSION_CODE > 0x020300
+static DECLARE_MUTEX_LOCKED(thread_sem);
+static DECLARE_WAIT_QUEUE_HEAD(thr_wq);
+static DECLARE_WAIT_QUEUE_HEAD(mtbd_sync_wq);
+#else
+static struct semaphore thread_sem = MUTEX_LOCKED;
+DECLARE_WAIT_QUEUE_HEAD(thr_wq);
+DECLARE_WAIT_QUEUE_HEAD(mtbd_sync_wq);
+#endif
+
+
+
+/* Module parameters passed by insmod/modprobe */
+char *device; /* the block device to use */
+int erasesz; /* optional default erase size */
+int ro; /* optional read only flag */
+int bs; /* optionally force the block size (avoid using) */
+int count; /* optionally force the block count (avoid using) */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Simon Evans <spse@secret.org.uk>");
+MODULE_DESCRIPTION("Emulate an MTD using a block device");
+MODULE_PARM(device, "s");
+MODULE_PARM_DESC(device, "block device to use");
+MODULE_PARM(erasesz, "i");
+MODULE_PARM_DESC(erasesz, "optional erase size to use in KB. eg 4=4K.");
+MODULE_PARM(ro, "i");
+MODULE_PARM_DESC(ro, "1=Read only, writes and erases cause errors");
+MODULE_PARM(bs, "i");
+MODULE_PARM_DESC(bs, "force the block size in bytes");
+MODULE_PARM(count, "i");
+MODULE_PARM_DESC(count, "force the block count");
+#endif
+
+
+
+/* Page cache stuff */
+
+/* writepage() - should never be called - catch it anyway */
+static int blkmtd_writepage(struct page *page)
+{
+ printk("blkmtd: writepage called!!!\n");
+ return -EIO;
+}
+
+
+/* readpage() - reads one page from the block device */
+static int blkmtd_readpage(struct file *file, struct page *page)
+{
+ int err;
+ int sectornr, sectors, i;
+ struct kiobuf *iobuf;
+ mtd_raw_dev_data_t *rawdevice = (mtd_raw_dev_data_t *)file->private_data;
+ kdev_t dev;
+
+ if(!rawdevice) {
+ printk("blkmtd: readpage: PANIC file->private_data == NULL\n");
+ return -EIO;
+ }
+ dev = to_kdev_t(rawdevice->binding->bd_dev);
+
+ DEBUG(2, "blkmtd: readpage called, dev = `%s' page = %p index = %ld\n",
+ bdevname(dev), page, page->index);
+
+ if(Page_Uptodate(page)) {
+ DEBUG(1, "blkmtd: readpage page %ld is already upto date\n", page->index);
+ UnlockPage(page);
+ return 0;
+ }
+
+ ClearPageUptodate(page);
+ ClearPageError(page);
+
+ /* see if page is in the outgoing write queue */
+ spin_lock(&mbd_writeq_lock);
+ if(write_queue_cnt) {
+ int i = write_queue_tail;
+ while(i != write_queue_head) {
+ mtdblkdev_write_queue_t *item = &write_queue[i];
+ if(page->index >= item->pagenr && page->index < item->pagenr+item->pagecnt) {
+ /* yes it is */
+ int index = item->pagenr - page->index;
+ DEBUG(1, "blkmtd: readpage: found page %ld in outgoing write queue\n",
+ page->index);
+ if(item->iserase) {
+ memset(page_address(page), 0xff, PAGE_SIZE);
+ } else {
+ memcpy(page_address(page), page_address(item->pages[index]), PAGE_SIZE);
+ }
+ SetPageUptodate(page);
+ flush_dcache_page(page);
+ UnlockPage(page);
+ spin_unlock(&mbd_writeq_lock);
+ return 0;
+ }
+ i++;
+ i %= WRITE_QUEUE_SZ;
+ }
+ }
+ spin_unlock(&mbd_writeq_lock);
+
+
+ DEBUG(3, "blkmtd: readpage: getting kiovec\n");
+ err = alloc_kiovec(1, &iobuf);
+ if (err) {
+ return err;
+ }
+ iobuf->offset = 0;
+ iobuf->nr_pages = 1;
+ iobuf->length = PAGE_SIZE;
+ iobuf->locked = 1;
+ iobuf->maplist[0] = page;
+ sectornr = page->index << (PAGE_SHIFT - rawdevice->sector_bits);
+ sectors = 1 << (PAGE_SHIFT - rawdevice->sector_bits);
+ DEBUG(3, "blkmtd: readpage: sectornr = %d sectors = %d\n", sectornr, sectors);
+ for(i = 0; i < sectors; i++) {
+ iobuf->blocks[i] = sectornr++;
+ }
+
+ DEBUG(3, "bklmtd: readpage: starting brw_kiovec\n");
+ err = brw_kiovec(READ, 1, &iobuf, dev, iobuf->blocks, rawdevice->sector_size);
+ DEBUG(3, "blkmtd: readpage: finished, err = %d\n", err);
+ iobuf->locked = 0;
+ free_kiovec(1, &iobuf);
+ if(err != PAGE_SIZE) {
+ printk("blkmtd: readpage: error reading page %ld\n", page->index);
+ memset(page_address(page), 0, PAGE_SIZE);
+ SetPageError(page);
+ err = -EIO;
+ } else {
+ DEBUG(3, "blkmtd: readpage: setting page upto date\n");
+ SetPageUptodate(page);
+ err = 0;
+ }
+ flush_dcache_page(page);
+ UnlockPage(page);
+ DEBUG(2, "blkmtd: readpage: finished, err = %d\n", err);
+ return 0;
+}
+
+
+static struct address_space_operations blkmtd_aops = {
+ writepage: blkmtd_writepage,
+ readpage: blkmtd_readpage,
+};
+
+
+/* This is the kernel thread that empties the write queue to disk */
+static int write_queue_task(void *data)
+{
+ int err;
+ struct task_struct *tsk = current;
+ struct kiobuf *iobuf;
+
+ DECLARE_WAITQUEUE(wait, tsk);
+ DEBUG(1, "blkmtd: writetask: starting (pid = %d)\n", tsk->pid);
+ daemonize();
+ strcpy(tsk->comm, "blkmtdd");
+ tsk->tty = NULL;
+ spin_lock_irq(&tsk->sigmask_lock);
+ sigfillset(&tsk->blocked);
+ recalc_sigpending(tsk);
+ spin_unlock_irq(&tsk->sigmask_lock);
+ exit_sighand(tsk);
+
+ if(alloc_kiovec(1, &iobuf))
+ return 0;
+ DEBUG(2, "blkmtd: writetask: entering main loop\n");
+ add_wait_queue(&thr_wq, &wait);
+
+ while(1) {
+ spin_lock(&mbd_writeq_lock);
+
+ if(!write_queue_cnt) {
+ /* If nothing in the queue, wake up anyone wanting to know when there
+ is space in the queue then sleep for 2*HZ */
+ spin_unlock(&mbd_writeq_lock);
+ DEBUG(3, "blkmtd: writetask: queue empty\n");
+ if(waitqueue_active(&mtbd_sync_wq))
+ wake_up(&mtbd_sync_wq);
+ interruptible_sleep_on_timeout(&thr_wq, 2*HZ);
+ DEBUG(3, "blkmtd: writetask: woken up\n");
+ if(write_task_finish)
+ break;
+ } else {
+ /* we have stuff to write */
+ mtdblkdev_write_queue_t *item = &write_queue[write_queue_tail];
+ struct page **pages = item->pages;
+ int pagecnt = item->pagecnt;
+ int pagenr = item->pagenr;
+ int i;
+ int max_sectors = KIO_MAX_SECTORS >> (item->rawdevice->sector_bits - 9);
+ kdev_t dev = to_kdev_t(item->rawdevice->binding->bd_dev);
+
+
+ DEBUG(3, "blkmtd: writetask: got %d queue items\n", write_queue_cnt);
+ set_current_state(TASK_RUNNING);
+ spin_unlock(&mbd_writeq_lock);
+
+ DEBUG(2, "blkmtd: write_task: writing pagenr = %d pagecnt = %d",
+ item->pagenr, item->pagecnt);
+
+ iobuf->offset = 0;
+ iobuf->locked = 1;
+
+ /* Loop through all the pages to be written in the queue item, remembering
+ we can only write KIO_MAX_SECTORS at a time */
+
+ while(pagecnt) {
+ int sectornr = pagenr << (PAGE_SHIFT - item->rawdevice->sector_bits);
+ int sectorcnt = pagecnt << (PAGE_SHIFT - item->rawdevice->sector_bits);
+ int cursectors = (sectorcnt < max_sectors) ? sectorcnt : max_sectors;
+ int cpagecnt = (cursectors << item->rawdevice->sector_bits) + PAGE_SIZE-1;
+ cpagecnt >>= PAGE_SHIFT;
+
+ for(i = 0; i < cpagecnt; i++)
+ iobuf->maplist[i] = *(pages++);
+
+ for(i = 0; i < cursectors; i++) {
+ iobuf->blocks[i] = sectornr++;
+ }
+
+ iobuf->nr_pages = cpagecnt;
+ iobuf->length = cursectors << item->rawdevice->sector_bits;
+ DEBUG(3, "blkmtd: write_task: about to kiovec\n");
+ err = brw_kiovec(WRITE, 1, &iobuf, dev, iobuf->blocks, item->rawdevice->sector_size);
+ DEBUG(3, "bklmtd: write_task: done, err = %d\n", err);
+ if(err != (cursectors << item->rawdevice->sector_bits)) {
+ /* if an error occured - set this to exit the loop */
+ pagecnt = 0;
+ } else {
+ pagenr += cpagecnt;
+ pagecnt -= cpagecnt;
+ }
+ }
+
+ /* free up the pages used in the write and list of pages used in the write
+ queue item */
+ iobuf->locked = 0;
+ spin_lock(&mbd_writeq_lock);
+ write_queue_cnt--;
+ write_queue_tail++;
+ write_queue_tail %= WRITE_QUEUE_SZ;
+ for(i = 0 ; i < item->pagecnt; i++) {
+ UnlockPage(item->pages[i]);
+ __free_pages(item->pages[i], 0);
+ }
+ kfree(item->pages);
+ item->pages = NULL;
+ spin_unlock(&mbd_writeq_lock);
+ /* Tell others there is some space in the write queue */
+ if(waitqueue_active(&mtbd_sync_wq))
+ wake_up(&mtbd_sync_wq);
+ }
+ }
+ remove_wait_queue(&thr_wq, &wait);
+ DEBUG(1, "blkmtd: writetask: exiting\n");
+ free_kiovec(1, &iobuf);
+ /* Tell people we have exitd */
+ up(&thread_sem);
+ return 0;
+}
+
+
+/* Add a range of pages into the outgoing write queue, making copies of them */
+static int queue_page_write(mtd_raw_dev_data_t *rawdevice, struct page **pages,
+ int pagenr, int pagecnt, int iserase)
+{
+ struct page *outpage;
+ struct page **new_pages;
+ mtdblkdev_write_queue_t *item;
+ int i;
+ DECLARE_WAITQUEUE(wait, current);
+ DEBUG(2, "mtdblkdev: queue_page_write: adding pagenr = %d pagecnt = %d\n", pagenr, pagecnt);
+
+ if(!pagecnt)
+ return 0;
+
+ if(pages == NULL)
+ return -EINVAL;
+
+ /* create a array for the list of pages */
+ new_pages = kmalloc(pagecnt * sizeof(struct page *), GFP_KERNEL);
+ if(new_pages == NULL)
+ return -ENOMEM;
+
+ /* make copies of the pages in the page cache */
+ for(i = 0; i < pagecnt; i++) {
+ outpage = alloc_pages(GFP_KERNEL, 0);
+ if(!outpage) {
+ while(i--) {
+ UnlockPage(new_pages[i]);
+ __free_pages(new_pages[i], 0);
+ }
+ kfree(new_pages);
+ return -ENOMEM;
+ }
+ lock_page(outpage);
+ memcpy(page_address(outpage), page_address(pages[i]), PAGE_SIZE);
+ new_pages[i] = outpage;
+ }
+
+ /* wait until there is some space in the write queue */
+ test_lock:
+ spin_lock(&mbd_writeq_lock);
+ if(write_queue_cnt == WRITE_QUEUE_SZ) {
+ spin_unlock(&mbd_writeq_lock);
+ DEBUG(3, "blkmtd: queue_page: Queue full\n");
+ current->state = TASK_UNINTERRUPTIBLE;
+ add_wait_queue(&mtbd_sync_wq, &wait);
+ wake_up_interruptible(&thr_wq);
+ schedule();
+ current->state = TASK_RUNNING;
+ remove_wait_queue(&mtbd_sync_wq, &wait);
+ DEBUG(3, "blkmtd: queue_page: Queue has %d items in it\n", write_queue_cnt);
+ goto test_lock;
+ }
+
+ DEBUG(3, "blkmtd: queue_write_page: qhead: %d qtail: %d qcnt: %d\n",
+ write_queue_head, write_queue_tail, write_queue_cnt);
+
+ /* fix up the queue item */
+ item = &write_queue[write_queue_head];
+ item->pages = new_pages;
+ item->pagenr = pagenr;
+ item->pagecnt = pagecnt;
+ item->rawdevice = rawdevice;
+ item->iserase = iserase;
+
+ write_queue_head++;
+ write_queue_head %= WRITE_QUEUE_SZ;
+ write_queue_cnt++;
+ DEBUG(3, "blkmtd: queue_write_page: qhead: %d qtail: %d qcnt: %d\n",
+ write_queue_head, write_queue_tail, write_queue_cnt);
+ spin_unlock(&mbd_writeq_lock);
+ DEBUG(2, "blkmtd: queue_page_write: finished\n");
+ return 0;
+}
+
+
+/* erase a specified part of the device */
+static int blkmtd_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ mtd_raw_dev_data_t *rawdevice = mtd->priv;
+ size_t from;
+ u_long len;
+ int err = 0;
+
+ /* check readonly */
+ if(rawdevice->readonly) {
+ printk("blkmtd: error: trying to erase readonly device %s\n", device);
+ instr->state = MTD_ERASE_FAILED;
+ goto erase_callback;
+ }
+
+ instr->state = MTD_ERASING;
+ from = instr->addr;
+ len = instr->len;
+
+ /* check page alignment of start and length */
+ DEBUG(2, "blkmtd: erase: dev = `%s' from = %d len = %ld\n",
+ bdevname(rawdevice->binding->bd_dev), from, len);
+ if(from % PAGE_SIZE) {
+ printk("blkmtd: erase: addr not page aligned (addr = %d)\n", from);
+ instr->state = MTD_ERASE_FAILED;
+ err = -EIO;
+ }
+
+ if(len % PAGE_SIZE) {
+ printk("blkmtd: erase: len not a whole number of pages (len = %ld)\n", len);
+ instr->state = MTD_ERASE_FAILED;
+ err = -EIO;
+ }
+
+ if(instr->state != MTD_ERASE_FAILED) {
+ /* start the erase */
+ int pagenr, pagecnt;
+ struct page *page, **pages;
+ int i = 0;
+
+ pagenr = from >> PAGE_SHIFT;
+ pagecnt = len >> PAGE_SHIFT;
+ DEBUG(3, "blkmtd: erase: pagenr = %d pagecnt = %d\n", pagenr, pagecnt);
+ pages = kmalloc(pagecnt * sizeof(struct page *), GFP_KERNEL);
+ if(pages == NULL) {
+ err = -ENOMEM;
+ instr->state = MTD_ERASE_FAILED;
+ goto erase_out;
+ }
+
+ while(pagecnt) {
+ /* get the page via the page cache */
+ DEBUG(3, "blkmtd: erase: doing grap_cache_page() for page %d\n", pagenr);
+ page = grab_cache_page(&rawdevice->as, pagenr);
+ if(!page) {
+ DEBUG(3, "blkmtd: erase: grab_cache_page() failed for page %d\n", pagenr);
+ kfree(pages);
+ err = -EIO;
+ instr->state = MTD_ERASE_FAILED;
+ goto erase_out;
+ }
+ memset(page_address(page), 0xff, PAGE_SIZE);
+ pages[i] = page;
+ pagecnt--;
+ pagenr++;
+ i++;
+ }
+ DEBUG(3, "blkmtd: erase: queuing page write\n");
+ err = queue_page_write(rawdevice, pages, from >> PAGE_SHIFT, len >> PAGE_SHIFT, 1);
+ pagecnt = len >> PAGE_SHIFT;
+ if(!err) {
+ while(pagecnt--) {
+ SetPageUptodate(pages[pagecnt]);
+ UnlockPage(pages[pagecnt]);
+ page_cache_release(pages[pagecnt]);
+ flush_dcache_page(pages[pagecnt]);
+ }
+ kfree(pages);
+ instr->state = MTD_ERASE_DONE;
+ } else {
+ while(pagecnt--) {
+ SetPageError(pages[pagecnt]);
+ page_cache_release(pages[pagecnt]);
+ }
+ kfree(pages);
+ instr->state = MTD_ERASE_FAILED;
+ }
+ }
+ erase_out:
+ DEBUG(3, "blkmtd: erase: checking callback\n");
+ erase_callback:
+ if (instr->callback) {
+ (*(instr->callback))(instr);
+ }
+ DEBUG(2, "blkmtd: erase: finished (err = %d)\n", err);
+ return err;
+}
+
+
+/* read a range of the data via the page cache */
+static int blkmtd_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ mtd_raw_dev_data_t *rawdevice = mtd->priv;
+ int err = 0;
+ int offset;
+ int pagenr, pages;
+
+ *retlen = 0;
+
+ DEBUG(2, "blkmtd: read: dev = `%s' from = %ld len = %d buf = %p\n",
+ bdevname(rawdevice->binding->bd_dev), (long int)from, len, buf);
+
+ pagenr = from >> PAGE_SHIFT;
+ offset = from - (pagenr << PAGE_SHIFT);
+
+ pages = (offset+len+PAGE_SIZE-1) >> PAGE_SHIFT;
+ DEBUG(3, "blkmtd: read: pagenr = %d offset = %d, pages = %d\n", pagenr, offset, pages);
+
+ /* just loop through each page, getting it via readpage() - slow but easy */
+ while(pages) {
+ struct page *page;
+ int cpylen;
+ DEBUG(3, "blkmtd: read: looking for page: %d\n", pagenr);
+ page = read_cache_page(&rawdevice->as, pagenr, (filler_t *)blkmtd_readpage, rawdevice->file);
+ if(IS_ERR(page)) {
+ return PTR_ERR(page);
+ }
+ wait_on_page(page);
+ if(!Page_Uptodate(page)) {
+ /* error reading page */
+ printk("blkmtd: read: page not uptodate\n");
+ page_cache_release(page);
+ return -EIO;
+ }
+
+ cpylen = (PAGE_SIZE > len) ? len : PAGE_SIZE;
+ if(offset+cpylen > PAGE_SIZE)
+ cpylen = PAGE_SIZE-offset;
+
+ memcpy(buf + *retlen, page_address(page) + offset, cpylen);
+ offset = 0;
+ len -= cpylen;
+ *retlen += cpylen;
+ pagenr++;
+ pages--;
+ page_cache_release(page);
+ }
+
+ DEBUG(2, "blkmtd: end read: retlen = %d, err = %d\n", *retlen, err);
+ return err;
+}
+
+
+/* write a range of the data via the page cache.
+ *
+ * Basic operation. break the write into three parts.
+ *
+ * 1. From a page unaligned start up until the next page boundary
+ * 2. Page sized, page aligned blocks
+ * 3. From end of last aligned block to end of range
+ *
+ * 1,3 are read via the page cache and readpage() since these are partial
+ * pages, 2 we just grab pages from the page cache, not caring if they are
+ * already in memory or not since they will be completly overwritten.
+ *
+ */
+
+static int blkmtd_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ mtd_raw_dev_data_t *rawdevice = mtd->priv;
+ int err = 0;
+ int offset;
+ int pagenr;
+ size_t len1 = 0, len2 = 0, len3 = 0;
+ struct page **pages;
+ int pagecnt = 0;
+
+ *retlen = 0;
+ DEBUG(2, "blkmtd: write: dev = `%s' to = %ld len = %d buf = %p\n",
+ bdevname(rawdevice->binding->bd_dev), (long int)to, len, buf);
+
+ /* handle readonly and out of range numbers */
+
+ if(rawdevice->readonly) {
+ printk("blkmtd: error: trying to write to a readonly device %s\n", device);
+ return -EROFS;
+ }
+
+ if(to >= rawdevice->totalsize) {
+ return -ENOSPC;
+ }
+
+ if(to + len > rawdevice->totalsize) {
+ len = (rawdevice->totalsize - to);
+ }
+
+
+ pagenr = to >> PAGE_SHIFT;
+ offset = to - (pagenr << PAGE_SHIFT);
+
+ /* see if we have to do a partial write at the start */
+ if(offset) {
+ if((offset + len) > PAGE_SIZE) {
+ len1 = PAGE_SIZE - offset;
+ len -= len1;
+ } else {
+ len1 = len;
+ len = 0;
+ }
+ }
+
+ /* calculate the length of the other two regions */
+ len3 = len & ~PAGE_MASK;
+ len -= len3;
+ len2 = len;
+
+
+ if(len1)
+ pagecnt++;
+ if(len2)
+ pagecnt += len2 >> PAGE_SHIFT;
+ if(len3)
+ pagecnt++;
+
+ DEBUG(3, "blkmtd: write: len1 = %d len2 = %d len3 = %d pagecnt = %d\n", len1, len2, len3, pagecnt);
+
+ /* get space for list of pages */
+ pages = kmalloc(pagecnt * sizeof(struct page *), GFP_KERNEL);
+ if(pages == NULL) {
+ return -ENOMEM;
+ }
+ pagecnt = 0;
+
+ if(len1) {
+ /* do partial start region */
+ struct page *page;
+
+ DEBUG(3, "blkmtd: write: doing partial start, page = %d len = %d offset = %d\n", pagenr, len1, offset);
+ page = read_cache_page(&rawdevice->as, pagenr, (filler_t *)blkmtd_readpage, rawdevice->file);
+
+ if(IS_ERR(page)) {
+ kfree(pages);
+ return PTR_ERR(page);
+ }
+ memcpy(page_address(page)+offset, buf, len1);
+ pages[pagecnt++] = page;
+ buf += len1;
+ *retlen = len1;
+ err = 0;
+ pagenr++;
+ }
+
+ /* Now do the main loop to a page aligned, n page sized output */
+ if(len2) {
+ int pagesc = len2 >> PAGE_SHIFT;
+ DEBUG(3, "blkmtd: write: whole pages start = %d, count = %d\n", pagenr, pagesc);
+ while(pagesc) {
+ struct page *page;
+
+ /* see if page is in the page cache */
+ DEBUG(3, "blkmtd: write: grabbing page %d from page cache\n", pagenr);
+ page = grab_cache_page(&rawdevice->as, pagenr);
+ DEBUG(3, "blkmtd: write: got page %d from page cache\n", pagenr);
+ if(!page) {
+ printk("blkmtd: write: cant grab cache page %d\n", pagenr);
+ err = -EIO;
+ goto write_err;
+ }
+ memcpy(page_address(page), buf, PAGE_SIZE);
+ pages[pagecnt++] = page;
+ UnlockPage(page);
+ pagenr++;
+ pagesc--;
+ buf += PAGE_SIZE;
+ *retlen += PAGE_SIZE;
+ }
+ }
+
+
+ if(len3) {
+ /* do the third region */
+ struct page *page;
+ DEBUG(3, "blkmtd: write: doing partial end, page = %d len = %d\n", pagenr, len3);
+ page = read_cache_page(&rawdevice->as, pagenr, (filler_t *)blkmtd_readpage, rawdevice->file);
+ if(IS_ERR(page)) {
+ err = PTR_ERR(page);
+ goto write_err;
+ }
+ memcpy(page_address(page), buf, len3);
+ DEBUG(3, "blkmtd: write: writing out partial end\n");
+ pages[pagecnt++] = page;
+ *retlen += len3;
+ err = 0;
+ }
+ DEBUG(2, "blkmtd: write: end, retlen = %d, err = %d\n", *retlen, err);
+ /* submit it to the write task */
+ err = queue_page_write(rawdevice, pages, to >> PAGE_SHIFT, pagecnt, 0);
+ if(!err) {
+ while(pagecnt--) {
+ SetPageUptodate(pages[pagecnt]);
+ flush_dcache_page(pages[pagecnt]);
+ page_cache_release(pages[pagecnt]);
+ }
+ kfree(pages);
+ return 0;
+ }
+
+ write_err:
+ while(--pagecnt) {
+ SetPageError(pages[pagecnt]);
+ page_cache_release(pages[pagecnt]);
+ }
+ kfree(pages);
+ return err;
+}
+
+
+/* sync the device - wait until the write queue is empty */
+static void blkmtd_sync(struct mtd_info *mtd)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ DEBUG(2, "blkmtd: sync: called\n");
+
+ stuff_inq:
+ spin_lock(&mbd_writeq_lock);
+ if(write_queue_cnt) {
+ spin_unlock(&mbd_writeq_lock);
+ current->state = TASK_UNINTERRUPTIBLE;
+ add_wait_queue(&mtbd_sync_wq, &wait);
+ DEBUG(3, "blkmtd: sync: waking up task\n");
+ wake_up_interruptible(&thr_wq);
+ schedule();
+ current->state = TASK_RUNNING;
+ remove_wait_queue(&mtbd_sync_wq, &wait);
+ DEBUG(3, "blkmtd: sync: waking up after write task\n");
+ goto stuff_inq;
+ }
+ spin_unlock(&mbd_writeq_lock);
+
+ DEBUG(2, "blkmtdL sync: finished\n");
+}
+
+/* Cleanup and exit - sync the device and kill of the kernel thread */
+static void __exit cleanup_blkmtd(void)
+{
+ if (mtd_info) {
+ mtd_raw_dev_data_t *rawdevice = mtd_info->priv;
+ // sync the device
+ if (rawdevice) {
+ blkmtd_sync(mtd_info);
+ write_task_finish = 1;
+ wake_up_interruptible(&thr_wq);
+ down(&thread_sem);
+ if(rawdevice->binding != NULL)
+ blkdev_put(rawdevice->binding, BDEV_RAW);
+ filp_close(rawdevice->file, NULL);
+ kfree(mtd_info->priv);
+ }
+ if(mtd_info->eraseregions)
+ kfree(mtd_info->eraseregions);
+ del_mtd_device(mtd_info);
+ kfree(mtd_info);
+ mtd_info = NULL;
+ }
+ printk("blkmtd: unloaded for %s\n", device);
+}
+
+extern struct module __this_module;
+
+/* for a given size and initial erase size, calculate the number and size of each
+ erase region */
+static int __init calc_erase_regions(struct mtd_erase_region_info *info, size_t erase_size, size_t total_size)
+{
+ int count = 0;
+ int offset = 0;
+ int regions = 0;
+
+ while(total_size) {
+ count = total_size / erase_size;
+ if(count) {
+ total_size = total_size % erase_size;
+ if(info) {
+ info->offset = offset;
+ info->erasesize = erase_size;
+ info->numblocks = count;
+ info++;
+ }
+ offset += (count * erase_size);
+ regions++;
+ }
+ while(erase_size > total_size)
+ erase_size >>= 1;
+ }
+ return regions;
+}
+
+
+/* Startup */
+static int __init init_blkmtd(void)
+{
+ struct file *file = NULL;
+ struct inode *inode;
+ mtd_raw_dev_data_t *rawdevice = NULL;
+ int maj, min;
+ int i, blocksize, blocksize_bits;
+ loff_t size = 0;
+ int readonly = 0;
+ int erase_size = CONFIG_MTD_BLKDEV_ERASESIZE;
+ kdev_t rdev;
+ int err;
+ int mode;
+ int totalsize = 0, total_sectors = 0;
+ int regions;
+
+ mtd_info = NULL;
+
+ // Check args
+ if(device == 0) {
+ printk("blkmtd: error, missing `device' name\n");
+ return 1;
+ }
+
+ if(ro)
+ readonly = 1;
+
+ if(erasesz)
+ erase_size = erasesz;
+
+ DEBUG(1, "blkmtd: got device = `%s' erase size = %dK readonly = %s\n", device, erase_size, readonly ? "yes" : "no");
+ // Get a handle on the device
+ mode = (readonly) ? O_RDONLY : O_RDWR;
+ file = filp_open(device, mode, 0);
+ if(IS_ERR(file)) {
+ DEBUG(2, "blkmtd: open_namei returned %ld\n", PTR_ERR(file));
+ return 1;
+ }
+
+ /* determine is this is a block device and if so get its major and minor
+ numbers */
+ inode = file->f_dentry->d_inode;
+ if(!S_ISBLK(inode->i_mode)) {
+ printk("blkmtd: %s not a block device\n", device);
+ filp_close(file, NULL);
+ return 1;
+ }
+ rdev = inode->i_rdev;
+ //filp_close(file, NULL);
+ DEBUG(1, "blkmtd: found a block device major = %d, minor = %d\n",
+ MAJOR(rdev), MINOR(rdev));
+ maj = MAJOR(rdev);
+ min = MINOR(rdev);
+
+ if(maj == MTD_BLOCK_MAJOR) {
+ printk("blkmtd: attempting to use an MTD device as a block device\n");
+ return 1;
+ }
+
+ DEBUG(1, "blkmtd: devname = %s\n", bdevname(rdev));
+ blocksize = BLOCK_SIZE;
+
+ if(bs) {
+ blocksize = bs;
+ } else {
+ if (blksize_size[maj] && blksize_size[maj][min]) {
+ DEBUG(2, "blkmtd: blksize_size = %d\n", blksize_size[maj][min]);
+ blocksize = blksize_size[maj][min];
+ }
+ }
+ i = blocksize;
+ blocksize_bits = 0;
+ while(i != 1) {
+ blocksize_bits++;
+ i >>= 1;
+ }
+
+ if(count) {
+ size = count;
+ } else {
+ if (blk_size[maj]) {
+ size = ((loff_t) blk_size[maj][min] << BLOCK_SIZE_BITS) >> blocksize_bits;
+ }
+ }
+ total_sectors = size;
+ size *= blocksize;
+ totalsize = size;
+ DEBUG(1, "blkmtd: size = %ld\n", (long int)size);
+
+ if(size == 0) {
+ printk("blkmtd: cant determine size\n");
+ return 1;
+ }
+ rawdevice = (mtd_raw_dev_data_t *)kmalloc(sizeof(mtd_raw_dev_data_t), GFP_KERNEL);
+ if(rawdevice == NULL) {
+ err = -ENOMEM;
+ goto init_err;
+ }
+ memset(rawdevice, 0, sizeof(mtd_raw_dev_data_t));
+ // get the block device
+ rawdevice->binding = bdget(kdev_t_to_nr(MKDEV(maj, min)));
+ err = blkdev_get(rawdevice->binding, mode, 0, BDEV_RAW);
+ if (err) {
+ goto init_err;
+ }
+ rawdevice->totalsize = totalsize;
+ rawdevice->total_sectors = total_sectors;
+ rawdevice->sector_size = blocksize;
+ rawdevice->sector_bits = blocksize_bits;
+ rawdevice->readonly = readonly;
+
+ DEBUG(2, "sector_size = %d, sector_bits = %d\n", rawdevice->sector_size, rawdevice->sector_bits);
+
+ mtd_info = (struct mtd_info *)kmalloc(sizeof(struct mtd_info), GFP_KERNEL);
+ if (mtd_info == NULL) {
+ err = -ENOMEM;
+ goto init_err;
+ }
+ memset(mtd_info, 0, sizeof(*mtd_info));
+
+ // Setup the MTD structure
+ mtd_info->name = "blkmtd block device";
+ if(readonly) {
+ mtd_info->type = MTD_ROM;
+ mtd_info->flags = MTD_CAP_ROM;
+ mtd_info->erasesize = erase_size << 10;
+ } else {
+ mtd_info->type = MTD_RAM;
+ mtd_info->flags = MTD_CAP_RAM;
+ mtd_info->erasesize = erase_size << 10;
+ }
+ mtd_info->size = size;
+ mtd_info->erase = blkmtd_erase;
+ mtd_info->read = blkmtd_read;
+ mtd_info->write = blkmtd_write;
+ mtd_info->sync = blkmtd_sync;
+ mtd_info->point = 0;
+ mtd_info->unpoint = 0;
+
+ mtd_info->priv = rawdevice;
+ regions = calc_erase_regions(NULL, erase_size << 10, size);
+ DEBUG(1, "blkmtd: init: found %d erase regions\n", regions);
+ mtd_info->eraseregions = kmalloc(regions * sizeof(struct mtd_erase_region_info), GFP_KERNEL);
+ if(mtd_info->eraseregions == NULL) {
+ }
+ mtd_info->numeraseregions = regions;
+ calc_erase_regions(mtd_info->eraseregions, erase_size << 10, size);
+
+ /* setup the page cache info */
+ INIT_LIST_HEAD(&rawdevice->as.clean_pages);
+ INIT_LIST_HEAD(&rawdevice->as.dirty_pages);
+ INIT_LIST_HEAD(&rawdevice->as.locked_pages);
+ rawdevice->as.nrpages = 0;
+ rawdevice->as.a_ops = &blkmtd_aops;
+ rawdevice->as.host = inode;
+ rawdevice->as.i_mmap = NULL;
+ rawdevice->as.i_mmap_shared = NULL;
+ spin_lock_init(&rawdevice->as.i_shared_lock);
+ rawdevice->as.gfp_mask = GFP_KERNEL;
+ rawdevice->file = file;
+
+ file->private_data = rawdevice;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
+ mtd_info->module = THIS_MODULE;
+#endif
+ if (add_mtd_device(mtd_info)) {
+ err = -EIO;
+ goto init_err;
+ }
+ init_waitqueue_head(&thr_wq);
+ init_waitqueue_head(&mtbd_sync_wq);
+ DEBUG(3, "blkmtd: init: kernel task @ %p\n", write_queue_task);
+ DEBUG(2, "blkmtd: init: starting kernel task\n");
+ kernel_thread(write_queue_task, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
+ DEBUG(2, "blkmtd: init: started\n");
+ printk("blkmtd loaded: version = %s using %s erase_size = %dK %s\n", VERSION, device, erase_size, (readonly) ? "(read-only)" : "");
+ return 0;
+
+ init_err:
+ if(!rawdevice) {
+ if(rawdevice->binding)
+ blkdev_put(rawdevice->binding, BDEV_RAW);
+
+ kfree(rawdevice);
+ rawdevice = NULL;
+ }
+ if(mtd_info) {
+ if(mtd_info->eraseregions)
+ kfree(mtd_info->eraseregions);
+ kfree(mtd_info);
+ mtd_info = NULL;
+ }
+ return err;
+}
+
+module_init(init_blkmtd);
+module_exit(cleanup_blkmtd);
/*======================================================================
- $Id: doc1000.c,v 1.11 2000/11/24 13:43:16 dwmw2 Exp $
+ $Id: doc1000.c,v 1.15 2001/10/02 15:05:13 dwmw2 Exp $
======================================================================*/
static inline int byte_write (volatile u_char *addr, u_char byte)
{
register u_char status;
- register u_short i = 0;
-
- do {
+ register u_short i = 0;
+
+ do {
status = readb(addr);
if (status & CSR_WR_READY)
{
}
-#if defined (MODULE) && LINUX_VERSION_CODE < 0x20211
-#define init_doc1000 init_module
-#define cleanup_doc1000 cleanup_module
-#endif
-
int __init init_doc1000(void)
{
struct mypriv *priv;
kfree(mymtd);
}
-#if LINUX_VERSION_CODE >= 0x20211
module_init(init_doc1000);
module_exit(cleanup_doc1000);
-#endif
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("MTD driver for DiskOnChip 1000");
+
* (c) 1999 Machine Vision Holdings, Inc.
* (c) 1999, 2000 David Woodhouse <dwmw2@infradead.org>
*
- * $Id: doc2000.c,v 1.43 2001/06/02 14:30:43 dwmw2 Exp $
+ * $Id: doc2000.c,v 1.46 2001/10/02 15:05:13 dwmw2 Exp $
*/
#include <linux/kernel.h>
size_t *retlen, u_char *buf);
static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
size_t *retlen, const u_char *buf);
+static int doc_write_oob_nolock(struct mtd_info *mtd, loff_t ofs, size_t len,
+ size_t *retlen, const u_char *buf);
static int doc_erase (struct mtd_info *mtd, struct erase_info *instr);
static struct mtd_info *doc2klist = NULL;
static int _DoC_WaitReady(struct DiskOnChip *doc)
{
unsigned long docptr = doc->virtadr;
- unsigned short c = 0xffff;
+ unsigned long timeo = jiffies + (HZ * 10);
DEBUG(MTD_DEBUG_LEVEL3,
"_DoC_WaitReady called for out-of-line wait\n");
/* Out-of-line routine to wait for chip response */
- while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B) && --c)
- ;
-
- if (c == 0)
- DEBUG(MTD_DEBUG_LEVEL2, "_DoC_WaitReady timed out.\n");
+ while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) {
+ if (time_after(jiffies, timeo)) {
+ DEBUG(MTD_DEBUG_LEVEL2, "_DoC_WaitReady timed out.\n");
+ return -EIO;
+ }
+ if (current->need_resched) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(1);
+ }
+ else
+ udelay(1);
+ }
- return (c == 0);
+ return 0;
}
static inline int DoC_WaitReady(struct DiskOnChip *doc)
/* Read a buffer from DoC, taking care of Millennium odditys */
static void DoC_ReadBuf(struct DiskOnChip *doc, u_char * buf, int len)
{
- int dummy;
+ volatile int dummy;
int modulus = 0xffff;
unsigned long docptr;
int i;
/* If there are none at all that we recognise, bail */
if (!this->numchips) {
- printk("No flash chips recognised.\n");
+ printk(KERN_NOTICE "No flash chips recognised.\n");
return;
}
/* Allocate an array to hold the information for each chip */
this->chips = kmalloc(sizeof(struct Nand) * this->numchips, GFP_KERNEL);
if (!this->chips) {
- printk("No memory for allocating chip info structures\n");
+ printk(KERN_NOTICE "No memory for allocating chip info structures\n");
return;
}
this->curfloor = -1;
this->curchip = -1;
+ init_MUTEX(&this->lock);
/* Ident all the chips present. */
DoC_ScanChips(this);
if (from >= this->totlen)
return -EINVAL;
+ down(&this->lock);
+
/* Don't allow a single read to cross a 512-byte block boundary */
if (from + len > ((from | 0x1ff) + 1))
len = ((from | 0x1ff) + 1) - from;
int nb_errors;
/* There was an ECC error */
#ifdef ECC_DEBUG
- printk("DiskOnChip ECC Error: Read at %lx\n", (long)from);
+ printk(KERN_ERR "DiskOnChip ECC Error: Read at %lx\n", (long)from);
#endif
/* Read the ECC syndrom through the DiskOnChip ECC logic.
These syndrome will be all ZERO when there is no error */
nb_errors = doc_decode_ecc(buf, syndrome);
#ifdef ECC_DEBUG
- printk("Errors corrected: %x\n", nb_errors);
+ printk(KERN_ERR "Errors corrected: %x\n", nb_errors);
#endif
if (nb_errors < 0) {
/* We return error, but have actually done the read. Not that
}
#ifdef PSYCHO_DEBUG
- printk("ECC DATA at %lxB: %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
+ printk(KERN_DEBUG "ECC DATA at %lxB: %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
(long)from, eccbuf[0], eccbuf[1], eccbuf[2],
eccbuf[3], eccbuf[4], eccbuf[5]);
#endif
DoC_WaitReady(this);
}
+ up(&this->lock);
+
return ret;
}
if (to >= this->totlen)
return -EINVAL;
+ down(&this->lock);
+
/* Don't allow a single write to cross a 512-byte block boundary */
if (to + len > ((to | 0x1ff) + 1))
len = ((to | 0x1ff) + 1) - to;
DoC_Delay(this, 2);
if (ReadDOC_(docptr, this->ioreg) & 1) {
- printk("Error programming flash\n");
+ printk(KERN_ERR "Error programming flash\n");
/* Error in programming */
*retlen = 0;
+ up(&this->lock);
return -EIO;
}
DoC_Delay(this, 2);
if (ReadDOC_(docptr, this->ioreg) & 1) {
- printk("Error programming flash\n");
+ printk(KERN_ERR "Error programming flash\n");
/* Error in programming */
*retlen = 0;
+ up(&this->lock);
return -EIO;
}
if (eccbuf) {
unsigned char x[8];
size_t dummy;
+ int ret;
/* Write the ECC data to flash */
for (di=0; di<6; di++)
x[6]=0x55;
x[7]=0x55;
- return doc_write_oob(mtd, to, 8, &dummy, x);
+ ret = doc_write_oob_nolock(mtd, to, 8, &dummy, x);
+ up(&this->lock);
+ return ret;
}
-
+ up(&this->lock);
return 0;
}
size_t * retlen, u_char * buf)
{
struct DiskOnChip *this = (struct DiskOnChip *) mtd->priv;
- int len256 = 0;
+ int len256 = 0, ret;
unsigned long docptr;
struct Nand *mychip;
+ down(&this->lock);
+
docptr = this->virtadr;
mychip = &this->chips[ofs >> this->chipshift];
/* Reading the full OOB data drops us off of the end of the page,
* causing the flash device to go into busy mode, so we need
* to wait until ready 11.4.1 and Toshiba TC58256FT docs */
- return DoC_WaitReady(this);
+
+ ret = DoC_WaitReady(this);
+
+ up(&this->lock);
+ return ret;
}
-static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
- size_t * retlen, const u_char * buf)
+static int doc_write_oob_nolock(struct mtd_info *mtd, loff_t ofs, size_t len,
+ size_t * retlen, const u_char * buf)
{
struct DiskOnChip *this = (struct DiskOnChip *) mtd->priv;
int len256 = 0;
unsigned long docptr = this->virtadr;
struct Nand *mychip = &this->chips[ofs >> this->chipshift];
- int dummy;
+ volatile int dummy;
// printk("doc_write_oob(%lx, %d): %2.2X %2.2X %2.2X %2.2X ... %2.2X %2.2X .. %2.2X %2.2X\n",(long)ofs, len,
// buf[0], buf[1], buf[2], buf[3], buf[8], buf[9], buf[14],buf[15]);
DoC_Delay(this, 2);
if (ReadDOC_(docptr, this->ioreg) & 1) {
- printk("Error programming oob data\n");
+ printk(KERN_ERR "Error programming oob data\n");
/* There was an error */
*retlen = 0;
return -EIO;
DoC_Delay(this, 2);
if (ReadDOC_(docptr, this->ioreg) & 1) {
- printk("Error programming oob data\n");
+ printk(KERN_ERR "Error programming oob data\n");
/* There was an error */
*retlen = 0;
return -EIO;
return 0;
}
+
+static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
+ size_t * retlen, const u_char * buf)
+{
+ struct DiskOnChip *this = (struct DiskOnChip *) mtd->priv;
+ int ret;
+
+ down(&this->lock);
+ ret = doc_write_oob_nolock(mtd, ofs, len, retlen, buf);
+
+ up(&this->lock);
+ return ret;
+}
-int doc_erase(struct mtd_info *mtd, struct erase_info *instr)
+static int doc_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct DiskOnChip *this = (struct DiskOnChip *) mtd->priv;
__u32 ofs = instr->addr;
__u32 len = instr->len;
+ volatile int dummy;
unsigned long docptr;
struct Nand *mychip;
- if (len != mtd->erasesize)
- printk(KERN_WARNING "Erase not right size (%x != %x)n",
- len, mtd->erasesize);
-
- docptr = this->virtadr;
+ down(&this->lock);
- mychip = &this->chips[ofs >> this->chipshift];
-
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(this, mychip->floor);
- DoC_SelectChip(this, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(this, mychip->chip);
+ if (ofs & (mtd->erasesize-1) || len & (mtd->erasesize-1)) {
+ up(&this->lock);
+ return -EINVAL;
}
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
- instr->state = MTD_ERASE_PENDING;
+ instr->state = MTD_ERASING;
+
+ docptr = this->virtadr;
- DoC_Command(this, NAND_CMD_ERASE1, 0);
- DoC_Address(this, ADDR_PAGE, ofs, 0, 0);
- DoC_Command(this, NAND_CMD_ERASE2, 0);
+ /* FIXME: Do this in the background. Use timers or schedule_task() */
+ while(len) {
+ mychip = &this->chips[ofs >> this->chipshift];
- instr->state = MTD_ERASING;
+ if (this->curfloor != mychip->floor) {
+ DoC_SelectFloor(this, mychip->floor);
+ DoC_SelectChip(this, mychip->chip);
+ } else if (this->curchip != mychip->chip) {
+ DoC_SelectChip(this, mychip->chip);
+ }
+ this->curfloor = mychip->floor;
+ this->curchip = mychip->chip;
- DoC_Command(this, NAND_CMD_STATUS, CDSN_CTRL_WP);
+ DoC_Command(this, NAND_CMD_ERASE1, 0);
+ DoC_Address(this, ADDR_PAGE, ofs, 0, 0);
+ DoC_Command(this, NAND_CMD_ERASE2, 0);
- if (ReadDOC_(docptr, this->ioreg) & 1) {
- printk("Error writing\n");
- /* There was an error */
- instr->state = MTD_ERASE_FAILED;
- } else
- instr->state = MTD_ERASE_DONE;
+ DoC_Command(this, NAND_CMD_STATUS, CDSN_CTRL_WP);
+
+ dummy = ReadDOC(docptr, CDSNSlowIO);
+ DoC_Delay(this, 2);
+
+ if (ReadDOC_(docptr, this->ioreg) & 1) {
+ printk(KERN_ERR "Error erasing at 0x%x\n", ofs);
+ /* There was an error */
+ instr->state = MTD_ERASE_FAILED;
+ goto callback;
+ }
+ ofs += mtd->erasesize;
+ len -= mtd->erasesize;
+ }
+ instr->state = MTD_ERASE_DONE;
+ callback:
if (instr->callback)
instr->callback(instr);
+ up(&this->lock);
return 0;
}
*
****************************************************************************/
-#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
-#define cleanup_doc2000 cleanup_module
-#define init_doc2000 init_module
-#endif
-
int __init init_doc2000(void)
{
inter_module_register(im_name, THIS_MODULE, &DoC2k_init);
module_exit(cleanup_doc2000);
module_init(init_doc2000);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
+MODULE_DESCRIPTION("MTD driver for DiskOnChip 2000 and Millennium");
+
* (c) 1999 Machine Vision Holdings, Inc.
* (c) 1999, 2000 David Woodhouse <dwmw2@infradead.org>
*
- * $Id: doc2001.c,v 1.34 2001/06/02 14:30:43 dwmw2 Exp $
+ * $Id: doc2001.c,v 1.35 2001/10/02 15:05:13 dwmw2 Exp $
*/
#include <linux/kernel.h>
*
****************************************************************************/
-#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
-#define cleanup_doc2001 cleanup_module
-#define init_doc2001 init_module
-#endif
-
int __init init_doc2001(void)
{
inter_module_register(im_name, THIS_MODULE, &DoCMil_init);
module_exit(cleanup_doc2001);
module_init(init_doc2001);
-
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
+MODULE_DESCRIPTION("Alternative driver for DiskOnChip Millennium");
* Author: Fabrice Bellard (fabrice.bellard@netgem.com)
* Copyright (C) 2000 Netgem S.A.
*
- * $Id: docecc.c,v 1.1 2000/11/03 12:43:43 dwmw2 Exp $
+ * $Id: docecc.c,v 1.4 2001/10/02 15:05:13 dwmw2 Exp $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
#include <linux/init.h>
#include <linux/types.h>
+#include <linux/mtd/compatmac.h> /* for min() in older kernels */
#include <linux/mtd/mtd.h>
#include <linux/mtd/doc2000.h>
den = 0;
/* lambda[i+1] for i even is the formal derivative lambda_pr of lambda[i] */
- for (i = min_t(int, deg_lambda,NN-KK-1) & ~1; i >= 0; i -=2) {
+ for (i = min(deg_lambda,NN-KK-1) & ~1; i >= 0; i -=2) {
if(lambda[i+1] != A0)
den ^= Alpha_to[modnn(lambda[i+1] + i * root[j])];
}
return nb_errors;
}
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Fabrice Bellard <fabrice.bellard@netgem.com>");
+MODULE_DESCRIPTION("ECC code for correcting errors detected by DiskOnChip 2000 and Millennium ECC hardware");
/* Probe routines common to all DoC devices */
/* (c) 1999 Machine Vision Holdings, Inc. */
/* Author: David Woodhouse <dwmw2@infradead.org> */
-/* $Id: docprobe.c,v 1.27 2001/06/03 19:06:09 dwmw2 Exp $ */
+/* $Id: docprobe.c,v 1.30 2001/10/02 15:05:13 dwmw2 Exp $ */
return 0;
}
+static int docfound;
static void __init DoC_Probe(unsigned long physadr)
{
return;
if ((ChipID = doccheck(docptr, physadr))) {
-
+ docfound = 1;
mtd = kmalloc(sizeof(struct DiskOnChip) + sizeof(struct mtd_info), GFP_KERNEL);
if (!mtd) {
- printk("Cannot allocate memory for data structures. Dropping.\n");
+ printk(KERN_WARNING "Cannot allocate memory for data structures. Dropping.\n");
iounmap((void *)docptr);
return;
}
inter_module_put(im_funcname);
return;
}
- printk("Cannot find driver for DiskOnChip %s at 0x%lX\n", name, physadr);
+ printk(KERN_NOTICE "Cannot find driver for DiskOnChip %s at 0x%lX\n", name, physadr);
}
iounmap((void *)docptr);
}
*
****************************************************************************/
-#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
-#define init_doc init_module
-#endif
-
int __init init_doc(void)
{
int i;
- printk(KERN_NOTICE "M-Systems DiskOnChip driver. (C) 1999 Machine Vision Holdings, Inc.\n");
-#ifdef PRERELEASE
- printk(KERN_INFO "$Id: docprobe.c,v 1.27 2001/06/03 19:06:09 dwmw2 Exp $\n");
-#endif
if (doc_config_location) {
- printk("Using configured probe address 0x%lx\n", doc_config_location);
+ printk(KERN_INFO "Using configured DiskOnChip probe address 0x%lx\n", doc_config_location);
DoC_Probe(doc_config_location);
} else {
for (i=0; doc_locations[i]; i++) {
DoC_Probe(doc_locations[i]);
}
}
+ /* No banner message any more. Print a message if no DiskOnChip
+ found, so the user knows we at least tried. */
+ if (!docfound)
+ printk(KERN_INFO "No recognised DiskOnChip devices found\n");
/* So it looks like we've been used and we get unloaded */
MOD_INC_USE_COUNT;
MOD_DEC_USE_COUNT;
module_init(init_doc);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("Probe code for DiskOnChip 2000 and Millennium devices");
+
--- /dev/null
+
+/*
+ * MTD driver for the 28F160F3 Flash Memory (non-CFI) on LART.
+ *
+ * $Id: lart.c,v 1.2 2001/10/02 15:05:13 dwmw2 Exp $
+ *
+ * Author: Abraham vd Merwe <abraham@2d3d.co.za>
+ *
+ * Copyright (c) 2001, 2d3D, Inc.
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * References:
+ *
+ * [1] 3 Volt Fast Boot Block Flash Memory" Intel Datasheet
+ * - Order Number: 290644-005
+ * - January 2000
+ *
+ * [2] MTD internal API documentation
+ * - http://www.linux-mtd.infradead.org/tech/
+ *
+ * Limitations:
+ *
+ * Even though this driver is written for 3 Volt Fast Boot
+ * Block Flash Memory, it is rather specific to LART. With
+ * Minor modifications, notably the without data/address line
+ * mangling and different bus settings, etc. it should be
+ * trivial to adapt to other platforms.
+ *
+ * If somebody would sponsor me a different board, I'll
+ * adapt the driver (:
+ */
+
+/* debugging */
+//#define LART_DEBUG
+
+/* partition support */
+#define HAVE_PARTITIONS
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/errno.h>
+#include <linux/mtd/mtd.h>
+#ifdef HAVE_PARTITIONS
+#include <linux/mtd/partitions.h>
+#endif
+
+#ifndef CONFIG_SA1100_LART
+#error This is for LART architecture only
+#endif
+
+static char module_name[] = "lart";
+
+/*
+ * These values is specific to 28Fxxxx3 flash memory.
+ * See section 2.3.1 in "3 Volt Fast Boot Block Flash Memory" Intel Datasheet
+ */
+#define FLASH_BLOCKSIZE_PARAM (4096 * BUSWIDTH)
+#define FLASH_NUMBLOCKS_16m_PARAM 8
+#define FLASH_NUMBLOCKS_8m_PARAM 8
+
+/*
+ * These values is specific to 28Fxxxx3 flash memory.
+ * See section 2.3.2 in "3 Volt Fast Boot Block Flash Memory" Intel Datasheet
+ */
+#define FLASH_BLOCKSIZE_MAIN (32768 * BUSWIDTH)
+#define FLASH_NUMBLOCKS_16m_MAIN 31
+#define FLASH_NUMBLOCKS_8m_MAIN 15
+
+/*
+ * These values are specific to LART
+ */
+
+/* general */
+#define BUSWIDTH 4 /* don't change this - a lot of the code _will_ break if you change this */
+#define FLASH_OFFSET 0xe8000000 /* see linux/arch/arm/mach-sa1100/lart.c */
+
+/* blob */
+#define NUM_BLOB_BLOCKS FLASH_NUMBLOCKS_16m_PARAM
+#define BLOB_START 0x00000000
+#define BLOB_LEN (NUM_BLOB_BLOCKS * FLASH_BLOCKSIZE_PARAM)
+
+/* kernel */
+#define NUM_KERNEL_BLOCKS 7
+#define KERNEL_START (BLOB_START + BLOB_LEN)
+#define KERNEL_LEN (NUM_KERNEL_BLOCKS * FLASH_BLOCKSIZE_MAIN)
+
+/* initial ramdisk */
+#define NUM_INITRD_BLOCKS 24
+#define INITRD_START (KERNEL_START + KERNEL_LEN)
+#define INITRD_LEN (NUM_INITRD_BLOCKS * FLASH_BLOCKSIZE_MAIN)
+
+/*
+ * See section 4.0 in "3 Volt Fast Boot Block Flash Memory" Intel Datasheet
+ */
+#define READ_ARRAY 0x00FF00FF /* Read Array/Reset */
+#define READ_ID_CODES 0x00900090 /* Read Identifier Codes */
+#define ERASE_SETUP 0x00200020 /* Block Erase */
+#define ERASE_CONFIRM 0x00D000D0 /* Block Erase and Program Resume */
+#define PGM_SETUP 0x00400040 /* Program */
+#define STATUS_READ 0x00700070 /* Read Status Register */
+#define STATUS_CLEAR 0x00500050 /* Clear Status Register */
+#define STATUS_BUSY 0x00800080 /* Write State Machine Status (WSMS) */
+#define STATUS_ERASE_ERR 0x00200020 /* Erase Status (ES) */
+#define STATUS_PGM_ERR 0x00100010 /* Program Status (PS) */
+
+/*
+ * See section 4.2 in "3 Volt Fast Boot Block Flash Memory" Intel Datasheet
+ */
+#define FLASH_MANUFACTURER 0x00890089
+#define FLASH_DEVICE_8mbit_TOP 0x88f188f1
+#define FLASH_DEVICE_8mbit_BOTTOM 0x88f288f2
+#define FLASH_DEVICE_16mbit_TOP 0x88f388f3
+#define FLASH_DEVICE_16mbit_BOTTOM 0x88f488f4
+
+/***************************************************************************************************/
+
+/*
+ * The data line mapping on LART is as follows:
+ *
+ * U2 CPU | U3 CPU
+ * -------------------
+ * 0 20 | 0 12
+ * 1 22 | 1 14
+ * 2 19 | 2 11
+ * 3 17 | 3 9
+ * 4 24 | 4 0
+ * 5 26 | 5 2
+ * 6 31 | 6 7
+ * 7 29 | 7 5
+ * 8 21 | 8 13
+ * 9 23 | 9 15
+ * 10 18 | 10 10
+ * 11 16 | 11 8
+ * 12 25 | 12 1
+ * 13 27 | 13 3
+ * 14 30 | 14 6
+ * 15 28 | 15 4
+ */
+
+/* Mangle data (x) */
+#define DATA_TO_FLASH(x) \
+ ( \
+ (((x) & 0x08009000) >> 11) + \
+ (((x) & 0x00002000) >> 10) + \
+ (((x) & 0x04004000) >> 8) + \
+ (((x) & 0x00000010) >> 4) + \
+ (((x) & 0x91000820) >> 3) + \
+ (((x) & 0x22080080) >> 2) + \
+ ((x) & 0x40000400) + \
+ (((x) & 0x00040040) << 1) + \
+ (((x) & 0x00110000) << 4) + \
+ (((x) & 0x00220100) << 5) + \
+ (((x) & 0x00800208) << 6) + \
+ (((x) & 0x00400004) << 9) + \
+ (((x) & 0x00000001) << 12) + \
+ (((x) & 0x00000002) << 13) \
+ )
+
+/* Unmangle data (x) */
+#define FLASH_TO_DATA(x) \
+ ( \
+ (((x) & 0x00010012) << 11) + \
+ (((x) & 0x00000008) << 10) + \
+ (((x) & 0x00040040) << 8) + \
+ (((x) & 0x00000001) << 4) + \
+ (((x) & 0x12200104) << 3) + \
+ (((x) & 0x08820020) << 2) + \
+ ((x) & 0x40000400) + \
+ (((x) & 0x00080080) >> 1) + \
+ (((x) & 0x01100000) >> 4) + \
+ (((x) & 0x04402000) >> 5) + \
+ (((x) & 0x20008200) >> 6) + \
+ (((x) & 0x80000800) >> 9) + \
+ (((x) & 0x00001000) >> 12) + \
+ (((x) & 0x00004000) >> 13) \
+ )
+
+/*
+ * The address line mapping on LART is as follows:
+ *
+ * U3 CPU | U2 CPU
+ * -------------------
+ * 0 2 | 0 2
+ * 1 3 | 1 3
+ * 2 9 | 2 9
+ * 3 13 | 3 8
+ * 4 8 | 4 7
+ * 5 12 | 5 6
+ * 6 11 | 6 5
+ * 7 10 | 7 4
+ * 8 4 | 8 10
+ * 9 5 | 9 11
+ * 10 6 | 10 12
+ * 11 7 | 11 13
+ *
+ * BOOT BLOCK BOUNDARY
+ *
+ * 12 15 | 12 15
+ * 13 14 | 13 14
+ * 14 16 | 14 16
+ *
+ * MAIN BLOCK BOUNDARY
+ *
+ * 15 17 | 15 18
+ * 16 18 | 16 17
+ * 17 20 | 17 20
+ * 18 19 | 18 19
+ * 19 21 | 19 21
+ *
+ * As we can see from above, the addresses aren't mangled across
+ * block boundaries, so we don't need to worry about address
+ * translations except for sending/reading commands during
+ * initialization
+ */
+
+/* Mangle address (x) on chip U2 */
+#define ADDR_TO_FLASH_U2(x) \
+ ( \
+ (((x) & 0x00000f00) >> 4) + \
+ (((x) & 0x00042000) << 1) + \
+ (((x) & 0x0009c003) << 2) + \
+ (((x) & 0x00021080) << 3) + \
+ (((x) & 0x00000010) << 4) + \
+ (((x) & 0x00000040) << 5) + \
+ (((x) & 0x00000024) << 7) + \
+ (((x) & 0x00000008) << 10) \
+ )
+
+/* Unmangle address (x) on chip U2 */
+#define FLASH_U2_TO_ADDR(x) \
+ ( \
+ (((x) << 4) & 0x00000f00) + \
+ (((x) >> 1) & 0x00042000) + \
+ (((x) >> 2) & 0x0009c003) + \
+ (((x) >> 3) & 0x00021080) + \
+ (((x) >> 4) & 0x00000010) + \
+ (((x) >> 5) & 0x00000040) + \
+ (((x) >> 7) & 0x00000024) + \
+ (((x) >> 10) & 0x00000008) \
+ )
+
+/* Mangle address (x) on chip U3 */
+#define ADDR_TO_FLASH_U3(x) \
+ ( \
+ (((x) & 0x00000080) >> 3) + \
+ (((x) & 0x00000040) >> 1) + \
+ (((x) & 0x00052020) << 1) + \
+ (((x) & 0x00084f03) << 2) + \
+ (((x) & 0x00029010) << 3) + \
+ (((x) & 0x00000008) << 5) + \
+ (((x) & 0x00000004) << 7) \
+ )
+
+/* Unmangle address (x) on chip U3 */
+#define FLASH_U3_TO_ADDR(x) \
+ ( \
+ (((x) << 3) & 0x00000080) + \
+ (((x) << 1) & 0x00000040) + \
+ (((x) >> 1) & 0x00052020) + \
+ (((x) >> 2) & 0x00084f03) + \
+ (((x) >> 3) & 0x00029010) + \
+ (((x) >> 5) & 0x00000008) + \
+ (((x) >> 7) & 0x00000004) \
+ )
+
+/***************************************************************************************************/
+
+static __u8 read8 (__u32 offset)
+{
+ volatile __u8 *data = (__u8 *) (FLASH_OFFSET + offset);
+#ifdef LART_DEBUG
+ printk (KERN_DEBUG "%s(): 0x%.8x -> 0x%.2x\n",__FUNCTION__,offset,*data);
+#endif
+ return (*data);
+}
+
+static __u32 read32 (__u32 offset)
+{
+ volatile __u32 *data = (__u32 *) (FLASH_OFFSET + offset);
+#ifdef LART_DEBUG
+ printk (KERN_DEBUG "%s(): 0x%.8x -> 0x%.8x\n",__FUNCTION__,offset,*data);
+#endif
+ return (*data);
+}
+
+static void write32 (__u32 x,__u32 offset)
+{
+ volatile __u32 *data = (__u32 *) (FLASH_OFFSET + offset);
+ *data = x;
+#ifdef LART_DEBUG
+ printk (KERN_DEBUG "%s(): 0x%.8x <- 0x%.8x\n",__FUNCTION__,offset,*data);
+#endif
+}
+
+/***************************************************************************************************/
+
+/*
+ * Probe for 16mbit flash memory on a LART board without doing
+ * too much damage. Since we need to write 1 dword to memory,
+ * we're f**cked if this happens to be DRAM since we can't
+ * restore the memory (otherwise we might exit Read Array mode).
+ *
+ * Returns 1 if we found 16mbit flash memory on LART, 0 otherwise.
+ */
+static int flash_probe (void)
+{
+ __u32 manufacturer,devtype;
+
+ /* setup "Read Identifier Codes" mode */
+ write32 (DATA_TO_FLASH (READ_ID_CODES),0x00000000);
+
+ /* probe U2. U2/U3 returns the same data since the first 3
+ * address lines is mangled in the same way */
+ manufacturer = FLASH_TO_DATA (read32 (ADDR_TO_FLASH_U2 (0x00000000)));
+ devtype = FLASH_TO_DATA (read32 (ADDR_TO_FLASH_U2 (0x00000001)));
+
+ /* put the flash back into command mode */
+ write32 (DATA_TO_FLASH (READ_ARRAY),0x00000000);
+
+ return (manufacturer == FLASH_MANUFACTURER && (devtype == FLASH_DEVICE_16mbit_TOP || FLASH_DEVICE_16mbit_BOTTOM));
+}
+
+/*
+ * Erase one block of flash memory at offset ``offset'' which is any
+ * address within the block which should be erased.
+ *
+ * Returns 1 if successful, 0 otherwise.
+ */
+static inline int erase_block (__u32 offset)
+{
+ __u32 status;
+
+#ifdef LART_DEBUG
+ printk (KERN_DEBUG "%s(): 0x%.8x\n",__FUNCTION__,offset);
+#endif
+
+ /* erase and confirm */
+ write32 (DATA_TO_FLASH (ERASE_SETUP),offset);
+ write32 (DATA_TO_FLASH (ERASE_CONFIRM),offset);
+
+ /* wait for block erase to finish */
+ do
+ {
+ write32 (DATA_TO_FLASH (STATUS_READ),offset);
+ status = FLASH_TO_DATA (read32 (offset));
+ }
+ while ((~status & STATUS_BUSY) != 0);
+
+ /* put the flash back into command mode */
+ write32 (DATA_TO_FLASH (READ_ARRAY),offset);
+
+ /* was the erase successfull? */
+ if ((status & STATUS_ERASE_ERR))
+ {
+ printk (KERN_WARNING "%s: erase error at address 0x%.8x.\n",module_name,offset);
+ return (0);
+ }
+
+ return (1);
+}
+
+static int flash_erase (struct mtd_info *mtd,struct erase_info *instr)
+{
+ __u32 addr,len;
+ int i,first;
+
+#ifdef LART_DEBUG
+ printk (KERN_DEBUG "%s(addr = 0x%.8x, len = %d)\n",__FUNCTION__,instr->addr,instr->len);
+#endif
+
+ /* sanity checks */
+ if (instr->addr + instr->len > mtd->size) return (-EINVAL);
+
+ /*
+ * check that both start and end of the requested erase are
+ * aligned with the erasesize at the appropriate addresses.
+ *
+ * skip all erase regions which are ended before the start of
+ * the requested erase. Actually, to save on the calculations,
+ * we skip to the first erase region which starts after the
+ * start of the requested erase, and then go back one.
+ */
+ for (i = 0; i < mtd->numeraseregions && instr->addr >= mtd->eraseregions[i].offset; i++) ;
+ i--;
+
+ /*
+ * ok, now i is pointing at the erase region in which this
+ * erase request starts. Check the start of the requested
+ * erase range is aligned with the erase size which is in
+ * effect here.
+ */
+ if (instr->addr & (mtd->eraseregions[i].erasesize - 1)) return (-EINVAL);
+
+ /* Remember the erase region we start on */
+ first = i;
+
+ /*
+ * next, check that the end of the requested erase is aligned
+ * with the erase region at that address.
+ *
+ * as before, drop back one to point at the region in which
+ * the address actually falls
+ */
+ for (; i < mtd->numeraseregions && instr->addr + instr->len >= mtd->eraseregions[i].offset; i++) ;
+ i--;
+
+ /* is the end aligned on a block boundary? */
+ if ((instr->addr + instr->len) & (mtd->eraseregions[i].erasesize - 1)) return (-EINVAL);
+
+ addr = instr->addr;
+ len = instr->len;
+
+ i = first;
+
+ /* now erase those blocks */
+ while (len)
+ {
+ if (!erase_block (addr))
+ {
+ instr->state = MTD_ERASE_FAILED;
+ return (-EIO);
+ }
+
+ addr += mtd->eraseregions[i].erasesize;
+ len -= mtd->eraseregions[i].erasesize;
+
+ if (addr == mtd->eraseregions[i].offset + (mtd->eraseregions[i].erasesize * mtd->eraseregions[i].numblocks)) i++;
+ }
+
+ instr->state = MTD_ERASE_DONE;
+ if (instr->callback) instr->callback (instr);
+
+ return (0);
+}
+
+static int flash_read (struct mtd_info *mtd,loff_t from,size_t len,size_t *retlen,u_char *buf)
+{
+#ifdef LART_DEBUG
+ printk (KERN_DEBUG "%s(from = 0x%.8x, len = %d)\n",__FUNCTION__,(__u32) from,len);
+#endif
+
+ /* sanity checks */
+ if (!len) return (0);
+ if (from + len > mtd->size) return (-EINVAL);
+
+ /* we always read len bytes */
+ *retlen = len;
+
+ /* first, we read bytes until we reach a dword boundary */
+ if (from & (BUSWIDTH - 1))
+ {
+ int gap = BUSWIDTH - (from & (BUSWIDTH - 1));
+
+ while (len && gap--) *buf++ = read8 (from++), len--;
+ }
+
+ /* now we read dwords until we reach a non-dword boundary */
+ while (len >= BUSWIDTH)
+ {
+ *((__u32 *) buf) = read32 (from);
+
+ buf += BUSWIDTH;
+ from += BUSWIDTH;
+ len -= BUSWIDTH;
+ }
+
+ /* top up the last unaligned bytes */
+ if (len & (BUSWIDTH - 1))
+ while (len--) *buf++ = read8 (from++);
+
+ return (0);
+}
+
+/*
+ * Write one dword ``x'' to flash memory at offset ``offset''. ``offset''
+ * must be 32 bits, i.e. it must be on a dword boundary.
+ *
+ * Returns 1 if successful, 0 otherwise.
+ */
+static inline int write_dword (__u32 offset,__u32 x)
+{
+ __u32 status;
+
+#ifdef LART_DEBUG
+ printk (KERN_DEBUG "%s(): 0x%.8x <- 0x%.8x\n",__FUNCTION__,offset,x);
+#endif
+
+ /* setup writing */
+ write32 (DATA_TO_FLASH (PGM_SETUP),offset);
+
+ /* write the data */
+ write32 (x,offset);
+
+ /* wait for the write to finish */
+ do
+ {
+ write32 (DATA_TO_FLASH (STATUS_READ),offset);
+ status = FLASH_TO_DATA (read32 (offset));
+ }
+ while ((~status & STATUS_BUSY) != 0);
+
+ /* put the flash back into command mode */
+ write32 (DATA_TO_FLASH (READ_ARRAY),offset);
+
+ /* was the write successfull? */
+ if ((status & STATUS_PGM_ERR) || read32 (offset) != x)
+ {
+ printk (KERN_WARNING "%s: write error at address 0x%.8x.\n",module_name,offset);
+ return (0);
+ }
+
+ return (1);
+}
+
+static int flash_write (struct mtd_info *mtd,loff_t to,size_t len,size_t *retlen,const u_char *buf)
+{
+ __u8 tmp[4];
+ int i,n;
+
+#ifdef LART_DEBUG
+ printk (KERN_DEBUG "%s(to = 0x%.8x, len = %d)\n",__FUNCTION__,(__u32) to,len);
+#endif
+
+ *retlen = 0;
+
+ /* sanity checks */
+ if (!len) return (0);
+ if (to + len > mtd->size) return (-EINVAL);
+
+ /* first, we write a 0xFF.... padded byte until we reach a dword boundary */
+ if (to & (BUSWIDTH - 1))
+ {
+ __u32 aligned = to & ~(BUSWIDTH - 1);
+ int gap = to - aligned;
+
+ i = n = 0;
+
+ while (gap--) tmp[i++] = 0xFF;
+ while (len && i < BUSWIDTH) tmp[i++] = buf[n++], len--;
+ while (i < BUSWIDTH) tmp[i++] = 0xFF;
+
+ if (!write_dword (aligned,*((__u32 *) tmp))) return (-EIO);
+
+ to += n;
+ buf += n;
+ *retlen += n;
+ }
+
+ /* now we write dwords until we reach a non-dword boundary */
+ while (len >= BUSWIDTH)
+ {
+ if (!write_dword (to,*((__u32 *) buf))) return (-EIO);
+
+ to += BUSWIDTH;
+ buf += BUSWIDTH;
+ *retlen += BUSWIDTH;
+ len -= BUSWIDTH;
+ }
+
+ /* top up the last unaligned bytes, padded with 0xFF.... */
+ if (len & (BUSWIDTH - 1))
+ {
+ i = n = 0;
+
+ while (len--) tmp[i++] = buf[n++];
+ while (i < BUSWIDTH) tmp[i++] = 0xFF;
+
+ if (!write_dword (to,*((__u32 *) tmp))) return (-EIO);
+
+ *retlen += n;
+ }
+
+ return (0);
+}
+
+/***************************************************************************************************/
+
+#define NB_OF(x) (sizeof (x) / sizeof (x[0]))
+
+static struct mtd_info mtd;
+
+static struct mtd_erase_region_info erase_regions[] =
+{
+ /* parameter blocks */
+ {
+ offset: 0x00000000,
+ erasesize: FLASH_BLOCKSIZE_PARAM,
+ numblocks: FLASH_NUMBLOCKS_16m_PARAM
+ },
+ /* main blocks */
+ {
+ offset: FLASH_BLOCKSIZE_PARAM * FLASH_NUMBLOCKS_16m_PARAM,
+ erasesize: FLASH_BLOCKSIZE_MAIN,
+ numblocks: FLASH_NUMBLOCKS_16m_MAIN
+ }
+};
+
+#ifdef HAVE_PARTITIONS
+static struct mtd_partition lart_partitions[] =
+{
+ /* blob */
+ {
+ name: "blob",
+ offset: BLOB_START,
+ size: BLOB_LEN,
+ mask_flags: 0
+ },
+ /* kernel */
+ {
+ name: "kernel",
+ offset: KERNEL_START, /* MTDPART_OFS_APPEND */
+ size: KERNEL_LEN,
+ mask_flags: 0
+ },
+ /* initial ramdisk / file system */
+ {
+ name: "file system",
+ offset: INITRD_START, /* MTDPART_OFS_APPEND */
+ size: INITRD_LEN, /* MTDPART_SIZ_FULL */
+ mask_flags: 0
+ }
+};
+#endif
+
+int __init lart_flash_init (void)
+{
+ int result;
+ memset (&mtd,0,sizeof (mtd));
+ printk ("MTD driver for LART. Written by Abraham vd Merwe <abraham@2d3d.co.za>\n");
+ printk ("%s: Probing for 28F160x3 flash on LART...\n",module_name);
+ if (!flash_probe ())
+ {
+ printk (KERN_WARNING "%s: Found no LART compatible flash device\n",module_name);
+ return (-ENXIO);
+ }
+ printk ("%s: This looks like a LART board to me.\n",module_name);
+ mtd.name = module_name;
+ mtd.type = MTD_NORFLASH;
+ mtd.flags = MTD_CAP_NORFLASH;
+ mtd.size = FLASH_BLOCKSIZE_PARAM * FLASH_NUMBLOCKS_16m_PARAM + FLASH_BLOCKSIZE_MAIN * FLASH_NUMBLOCKS_16m_MAIN;
+ mtd.erasesize = FLASH_BLOCKSIZE_MAIN;
+ mtd.numeraseregions = NB_OF (erase_regions);
+ mtd.eraseregions = erase_regions;
+ mtd.module = THIS_MODULE;
+ mtd.erase = flash_erase;
+ mtd.read = flash_read;
+ mtd.write = flash_write;
+
+#ifdef LART_DEBUG
+ printk (KERN_DEBUG
+ "mtd.name = %s\n"
+ "mtd.size = 0x%.8x (%uM)\n"
+ "mtd.erasesize = 0x%.8x (%uK)\n"
+ "mtd.numeraseregions = %d\n",
+ mtd.name,
+ mtd.size,mtd.size / (1024*1024),
+ mtd.erasesize,mtd.erasesize / 1024,
+ mtd.numeraseregions);
+
+ if (mtd.numeraseregions)
+ for (result = 0; result < mtd.numeraseregions; result++)
+ printk (KERN_DEBUG
+ "\n\n"
+ "mtd.eraseregions[%d].offset = 0x%.8x\n"
+ "mtd.eraseregions[%d].erasesize = 0x%.8x (%uK)\n"
+ "mtd.eraseregions[%d].numblocks = %d\n",
+ result,mtd.eraseregions[result].offset,
+ result,mtd.eraseregions[result].erasesize,mtd.eraseregions[result].erasesize / 1024,
+ result,mtd.eraseregions[result].numblocks);
+
+#ifdef HAVE_PARTITIONS
+ printk ("\npartitions = %d\n",NB_OF (lart_partitions));
+
+ for (result = 0; result < NB_OF (lart_partitions); result++)
+ printk (KERN_DEBUG
+ "\n\n"
+ "lart_partitions[%d].name = %s\n"
+ "lart_partitions[%d].offset = 0x%.8x\n"
+ "lart_partitions[%d].size = 0x%.8x (%uK)\n",
+ result,lart_partitions[result].name,
+ result,lart_partitions[result].offset,
+ result,lart_partitions[result].size,lart_partitions[result].size / 1024);
+#endif
+#endif
+
+#ifndef HAVE_PARTITIONS
+ result = add_mtd_device (&mtd);
+#else
+ result = add_mtd_partitions (&mtd,lart_partitions,NB_OF (lart_partitions));
+#endif
+
+ return (result);
+}
+
+void __exit lart_flash_exit (void)
+{
+#ifndef HAVE_PARTITIONS
+ del_mtd_device (&mtd);
+#else
+ del_mtd_partitions (&mtd);
+#endif
+}
+
+module_init (lart_flash_init);
+module_exit (lart_flash_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Abraham vd Merwe <abraham@2d3d.co.za>");
+MODULE_DESCRIPTION("MTD driver for Intel 28F160F3 on LART board");
+
+
/*
* mtdram - a test mtd device
- * $Id: mtdram.c,v 1.24 2001/06/09 23:09:23 dwmw2 Exp $
+ * $Id: mtdram.c,v 1.25 2001/10/02 15:05:13 dwmw2 Exp $
* Author: Alexander Larsson <alex@cendio.se>
*
* Copyright (c) 1999 Alexander Larsson <alex@cendio.se>
return 0;
}
-#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
-#define init_mtdram init_module
-#define cleanup_mtdram cleanup_module
-#endif
-
-//static void __exit cleanup_mtdram(void)
-mod_exit_t cleanup_mtdram(void)
+static void __exit cleanup_mtdram(void)
{
if (mtd_info) {
del_mtd_device(mtd_info);
}
}
-mod_init_t init_mtdram(void)
+int __init init_mtdram(void)
{
// Allocate some memory
mtd_info = (struct mtd_info *)kmalloc(sizeof(struct mtd_info), GFP_KERNEL);
module_init(init_mtdram);
module_exit(cleanup_mtdram);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alexander Larsson <alexl@redhat.com>");
+MODULE_DESCRIPTION("Simulated MTD driver for testing");
+
/*
- * $Id: pmc551.c,v 1.17 2001/05/22 13:56:46 dwmw2 Exp $
+ * $Id: pmc551.c,v 1.19 2001/10/02 15:05:13 dwmw2 Exp $
*
* PMC551 PCI Mezzanine Ram Device
*
* waiting for it to set .. this does not safely handle busted
* devices that never reset the register correctly which will
* cause the system to hang w/ a reboot being the only chance at
- * recover.
+ * recover. [sort of fixed, could be better]
+ * * Add I2C handling of the SROM so we can read the SROM's information
+ * about the aperture size. This should always accurately reflect the
+ * onboard memory size.
+ * * Comb the init routine. It's still a bit cludgy on a few things.
*/
#include <linux/config.h>
static int pmc551_erase (struct mtd_info *mtd, struct erase_info *instr)
{
- struct mypriv *priv = mtd->priv;
- u32 start_addr_highbits;
- u32 end_addr_highbits;
- u32 start_addr_lowbits;
- u32 end_addr_lowbits;
+ struct mypriv *priv = (struct mypriv *)mtd->priv;
+ u32 soff_hi, soff_lo; /* start address offset hi/lo */
+ u32 eoff_hi, eoff_lo; /* end address offset hi/lo */
unsigned long end;
+ u_char *ptr;
+ size_t retlen;
+
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk(KERN_DEBUG "pmc551_erase(pos:%ld, len:%ld)\n", (long)instr->addr, (long)instr->len);
+#endif
- end = instr->addr + instr->len;
+ end = instr->addr + instr->len - 1;
- /* Is it too much memory? The second check find if we wrap around
- past the end of a u32. */
- if ((end > mtd->size) || (end < instr->addr)) {
+ /* Is it past the end? */
+ if ( end > mtd->size ) {
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk(KERN_DEBUG "pmc551_erase() out of bounds (%ld > %ld)\n", (long)end, (long)mtd->size);
+#endif
return -EINVAL;
}
- start_addr_highbits = instr->addr & PMC551_ADDR_HIGH_MASK;
- end_addr_highbits = end & PMC551_ADDR_HIGH_MASK;
- start_addr_lowbits = instr->addr & PMC551_ADDR_LOW_MASK;
- end_addr_lowbits = end & PMC551_ADDR_LOW_MASK;
+ eoff_hi = end & ~(priv->asize - 1);
+ soff_hi = instr->addr & ~(priv->asize - 1);
+ eoff_lo = end & (priv->asize - 1);
+ soff_lo = instr->addr & (priv->asize - 1);
+
+ pmc551_point (mtd, instr->addr, instr->len, &retlen, &ptr);
- pci_write_config_dword ( priv->dev,
- PMC551_PCI_MEM_MAP0,
- (priv->mem_map0_base_val
- | start_addr_highbits));
- if (start_addr_highbits == end_addr_highbits) {
+ if ( soff_hi == eoff_hi || mtd->size == priv->asize) {
/* The whole thing fits within one access, so just one shot
will do it. */
- memset(priv->start + start_addr_lowbits,
- 0xff,
- instr->len);
+ memset(ptr, 0xff, instr->len);
} else {
/* We have to do multiple writes to get all the data
written. */
- memset(priv->start + start_addr_lowbits,
- 0xff,
- priv->aperture_size - start_addr_lowbits);
- start_addr_highbits += priv->aperture_size;
- while (start_addr_highbits != end_addr_highbits) {
- pci_write_config_dword ( priv->dev,
- PMC551_PCI_MEM_MAP0,
- (priv->mem_map0_base_val
- | start_addr_highbits));
- memset(priv->start,
- 0xff,
- priv->aperture_size);
- start_addr_highbits += priv->aperture_size;
+ while (soff_hi != eoff_hi) {
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk( KERN_DEBUG "pmc551_erase() soff_hi: %ld, eoff_hi: %ld\n", (long)soff_hi, (long)eoff_hi);
+#endif
+ memset(ptr, 0xff, priv->asize);
+ if (soff_hi + priv->asize >= mtd->size) {
+ goto out;
+ }
+ soff_hi += priv->asize;
+ pmc551_point (mtd,(priv->base_map0|soff_hi),
+ priv->asize, &retlen, &ptr);
}
- priv->curr_mem_map0_val = (priv->mem_map0_base_val
- | start_addr_highbits);
- pci_write_config_dword ( priv->dev,
- PMC551_PCI_MEM_MAP0,
- priv->curr_mem_map0_val);
- memset(priv->start,
- 0xff,
- end_addr_lowbits);
+ memset (ptr, 0xff, eoff_lo);
}
+out:
instr->state = MTD_ERASE_DONE;
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk(KERN_DEBUG "pmc551_erase() done\n");
+#endif
if (instr->callback) {
(*(instr->callback))(instr);
}
-
return 0;
}
+static int pmc551_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
+{
+ struct mypriv *priv = (struct mypriv *)mtd->priv;
+ u32 soff_hi;
+ u32 soff_lo;
+
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk(KERN_DEBUG "pmc551_point(%ld, %ld)\n", (long)from, (long)len);
+#endif
+
+ if (from + len > mtd->size) {
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk(KERN_DEBUG "pmc551_point() out of bounds (%ld > %ld)\n", (long)from+len, (long)mtd->size);
+#endif
+ return -EINVAL;
+ }
+
+ soff_hi = from & ~(priv->asize - 1);
+ soff_lo = from & (priv->asize - 1);
+
+ /* Cheap hack optimization */
+ if( priv->curr_map0 != from ) {
+ pci_write_config_dword ( priv->dev, PMC551_PCI_MEM_MAP0,
+ (priv->base_map0 | soff_hi) );
+ priv->curr_map0 = soff_hi;
+ }
+
+ *mtdbuf = priv->start + soff_lo;
+ *retlen = len;
+ return 0;
+}
+
+
static void pmc551_unpoint (struct mtd_info *mtd, u_char *addr)
-{}
+{
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk(KERN_DEBUG "pmc551_unpoint()\n");
+#endif
+}
-static int pmc551_read (struct mtd_info *mtd,
- loff_t from,
- size_t len,
- size_t *retlen,
- u_char *buf)
+static int pmc551_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
{
struct mypriv *priv = (struct mypriv *)mtd->priv;
- u32 start_addr_highbits;
- u32 end_addr_highbits;
- u32 start_addr_lowbits;
- u32 end_addr_lowbits;
+ u32 soff_hi, soff_lo; /* start address offset hi/lo */
+ u32 eoff_hi, eoff_lo; /* end address offset hi/lo */
unsigned long end;
+ u_char *ptr;
u_char *copyto = buf;
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk(KERN_DEBUG "pmc551_read(pos:%ld, len:%ld) asize: %ld\n", (long)from, (long)len, (long)priv->asize);
+#endif
+
+ end = from + len - 1;
/* Is it past the end? */
- if (from > mtd->size) {
+ if (end > mtd->size) {
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk(KERN_DEBUG "pmc551_read() out of bounds (%ld > %ld)\n", (long) end, (long)mtd->size);
+#endif
return -EINVAL;
}
- end = from + len;
- start_addr_highbits = from & PMC551_ADDR_HIGH_MASK;
- end_addr_highbits = end & PMC551_ADDR_HIGH_MASK;
- start_addr_lowbits = from & PMC551_ADDR_LOW_MASK;
- end_addr_lowbits = end & PMC551_ADDR_LOW_MASK;
-
-
- /* Only rewrite the first value if it doesn't match our current
- values. Most operations are on the same page as the previous
- value, so this is a pretty good optimization. */
- if (priv->curr_mem_map0_val !=
- (priv->mem_map0_base_val | start_addr_highbits)) {
- priv->curr_mem_map0_val = (priv->mem_map0_base_val
- | start_addr_highbits);
- pci_write_config_dword ( priv->dev,
- PMC551_PCI_MEM_MAP0,
- priv->curr_mem_map0_val);
- }
+ soff_hi = from & ~(priv->asize - 1);
+ eoff_hi = end & ~(priv->asize - 1);
+ soff_lo = from & (priv->asize - 1);
+ eoff_lo = end & (priv->asize - 1);
+
+ pmc551_point (mtd, from, len, retlen, &ptr);
- if (start_addr_highbits == end_addr_highbits) {
+ if (soff_hi == eoff_hi) {
/* The whole thing fits within one access, so just one shot
will do it. */
- memcpy(copyto,
- priv->start + start_addr_lowbits,
- len);
+ memcpy(copyto, ptr, len);
copyto += len;
} else {
/* We have to do multiple writes to get all the data
written. */
- memcpy(copyto,
- priv->start + start_addr_lowbits,
- priv->aperture_size - start_addr_lowbits);
- copyto += priv->aperture_size - start_addr_lowbits;
- start_addr_highbits += priv->aperture_size;
- while (start_addr_highbits != end_addr_highbits) {
- pci_write_config_dword ( priv->dev,
- PMC551_PCI_MEM_MAP0,
- (priv->mem_map0_base_val
- | start_addr_highbits));
- memcpy(copyto,
- priv->start,
- priv->aperture_size);
- copyto += priv->aperture_size;
- start_addr_highbits += priv->aperture_size;
- if (start_addr_highbits >= mtd->size) {
- /* Make sure we have the right value here. */
- priv->curr_mem_map0_val
- = (priv->mem_map0_base_val
- | start_addr_highbits);
+ while (soff_hi != eoff_hi) {
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk( KERN_DEBUG "pmc551_read() soff_hi: %ld, eoff_hi: %ld\n", (long)soff_hi, (long)eoff_hi);
+#endif
+ memcpy(copyto, ptr, priv->asize);
+ copyto += priv->asize;
+ if (soff_hi + priv->asize >= mtd->size) {
goto out;
}
+ soff_hi += priv->asize;
+ pmc551_point (mtd, soff_hi, priv->asize, retlen, &ptr);
}
- priv->curr_mem_map0_val = (priv->mem_map0_base_val
- | start_addr_highbits);
- pci_write_config_dword ( priv->dev,
- PMC551_PCI_MEM_MAP0,
- priv->curr_mem_map0_val);
- memcpy(copyto,
- priv->start,
- end_addr_lowbits);
- copyto += end_addr_lowbits;
+ memcpy(copyto, ptr, eoff_lo);
+ copyto += eoff_lo;
}
out:
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk(KERN_DEBUG "pmc551_read() done\n");
+#endif
*retlen = copyto - buf;
return 0;
}
static int pmc551_write (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
{
struct mypriv *priv = (struct mypriv *)mtd->priv;
- u32 start_addr_highbits;
- u32 end_addr_highbits;
- u32 start_addr_lowbits;
- u32 end_addr_lowbits;
+ u32 soff_hi, soff_lo; /* start address offset hi/lo */
+ u32 eoff_hi, eoff_lo; /* end address offset hi/lo */
unsigned long end;
+ u_char *ptr;
const u_char *copyfrom = buf;
- /* Is it past the end? */
- if (to > mtd->size) {
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk(KERN_DEBUG "pmc551_write(pos:%ld, len:%ld) asize:%ld\n", (long)to, (long)len, (long)priv->asize);
+#endif
+
+ end = to + len - 1;
+ /* Is it past the end? or did the u32 wrap? */
+ if (end > mtd->size ) {
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk(KERN_DEBUG "pmc551_write() out of bounds (end: %ld, size: %ld, to: %ld)\n", (long) end, (long)mtd->size, (long)to);
+#endif
return -EINVAL;
}
- end = to + len;
- start_addr_highbits = to & PMC551_ADDR_HIGH_MASK;
- end_addr_highbits = end & PMC551_ADDR_HIGH_MASK;
- start_addr_lowbits = to & PMC551_ADDR_LOW_MASK;
- end_addr_lowbits = end & PMC551_ADDR_LOW_MASK;
-
-
- /* Only rewrite the first value if it doesn't match our current
- values. Most operations are on the same page as the previous
- value, so this is a pretty good optimization. */
- if (priv->curr_mem_map0_val !=
- (priv->mem_map0_base_val | start_addr_highbits)) {
- priv->curr_mem_map0_val = (priv->mem_map0_base_val
- | start_addr_highbits);
- pci_write_config_dword ( priv->dev,
- PMC551_PCI_MEM_MAP0,
- priv->curr_mem_map0_val);
- }
+ soff_hi = to & ~(priv->asize - 1);
+ eoff_hi = end & ~(priv->asize - 1);
+ soff_lo = to & (priv->asize - 1);
+ eoff_lo = end & (priv->asize - 1);
+
+ pmc551_point (mtd, to, len, retlen, &ptr);
- if (start_addr_highbits == end_addr_highbits) {
+ if (soff_hi == eoff_hi) {
/* The whole thing fits within one access, so just one shot
will do it. */
- memcpy(priv->start + start_addr_lowbits,
- copyfrom,
- len);
+ memcpy(ptr, copyfrom, len);
copyfrom += len;
} else {
/* We have to do multiple writes to get all the data
written. */
- memcpy(priv->start + start_addr_lowbits,
- copyfrom,
- priv->aperture_size - start_addr_lowbits);
- copyfrom += priv->aperture_size - start_addr_lowbits;
- start_addr_highbits += priv->aperture_size;
- while (start_addr_highbits != end_addr_highbits) {
- pci_write_config_dword ( priv->dev,
- PMC551_PCI_MEM_MAP0,
- (priv->mem_map0_base_val
- | start_addr_highbits));
- memcpy(priv->start,
- copyfrom,
- priv->aperture_size);
- copyfrom += priv->aperture_size;
- start_addr_highbits += priv->aperture_size;
- if (start_addr_highbits >= mtd->size) {
- /* Make sure we have the right value here. */
- priv->curr_mem_map0_val
- = (priv->mem_map0_base_val
- | start_addr_highbits);
+ while (soff_hi != eoff_hi) {
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk( KERN_DEBUG "pmc551_write() soff_hi: %ld, eoff_hi: %ld\n", (long)soff_hi, (long)eoff_hi);
+#endif
+ memcpy(ptr, copyfrom, priv->asize);
+ copyfrom += priv->asize;
+ if (soff_hi >= mtd->size) {
goto out;
}
+ soff_hi += priv->asize;
+ pmc551_point (mtd, soff_hi, priv->asize, retlen, &ptr);
}
- priv->curr_mem_map0_val = (priv->mem_map0_base_val
- | start_addr_highbits);
- pci_write_config_dword ( priv->dev,
- PMC551_PCI_MEM_MAP0,
- priv->curr_mem_map0_val);
- memcpy(priv->start,
- copyfrom,
- end_addr_lowbits);
- copyfrom += end_addr_lowbits;
+ memcpy(ptr, copyfrom, eoff_lo);
+ copyfrom += eoff_lo;
}
out:
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk(KERN_DEBUG "pmc551_write() done\n");
+#endif
*retlen = copyfrom - buf;
return 0;
}
#ifndef CONFIG_MTD_PMC551_BUGFIX
pci_write_config_dword( dev, PCI_BASE_ADDRESS_0, ~0 );
pci_read_config_dword( dev, PCI_BASE_ADDRESS_0, &size );
+ size = (size&PCI_BASE_ADDRESS_MEM_MASK);
+ size &= ~(size-1);
pci_write_config_dword( dev, PCI_BASE_ADDRESS_0, cfg );
- size=~(size&PCI_BASE_ADDRESS_MEM_MASK)+1;
#else
/*
* Get the size of the memory by reading all the DRAM size values
* The loop is taken directly from Ramix's example code. I assume that
* this must be held high for some duration of time, but I can find no
* documentation refrencing the reasons why.
- *
*/
for ( i = 1; i<=8 ; i++) {
pci_write_config_word (dev, PMC551_SDRAM_CMD, 0x0df);
* it's possible that the reset of the V370PDC nuked the original
* setup
*/
+ /*
cfg |= PCI_BASE_ADDRESS_MEM_PREFETCH;
pci_write_config_dword( dev, PCI_BASE_ADDRESS_0, cfg );
+ */
/*
* Turn PCI memory and I/O bus access back on
* Some screen fun
*/
printk(KERN_DEBUG "pmc551: %d%c (0x%x) of %sprefetchable memory at 0x%lx\n",
- (size<1024)?size:(size<1048576)?size/1024:size/1024/1024,
+ (size<1024)?size:(size<1048576)?size>>10:size>>20,
(size<1024)?'B':(size<1048576)?'K':'M',
size, ((dcmd&(0x1<<3)) == 0)?"non-":"",
PCI_BASE_ADDRESS(dev)&PCI_BASE_ADDRESS_MEM_MASK );
* Kernel version specific module stuffages
*/
-#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
-#define init_pmc551 init_module
-#define cleanup_pmc551 cleanup_module
-#endif
-#if defined(MODULE)
+MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mark Ferrell <mferrell@mvista.com>");
MODULE_DESCRIPTION(PMC551_VERSION);
MODULE_PARM(msize, "i");
-MODULE_PARM_DESC(msize, "memory size, 6=32M, 7=64M, 8=128M, etc.. [32M-1024M]");
+MODULE_PARM_DESC(msize, "memory size in Megabytes [1 - 1024]");
MODULE_PARM(asize, "i");
-MODULE_PARM_DESC(asize, "aperture size, must be <= memsize [1M-1024M]");
-#endif
+MODULE_PARM_DESC(asize, "aperture size, must be <= memsize [1-1024]");
+
/*
* Stuff these outside the ifdef so as to not bust compiled in driver support
*/
u32 length = 0;
if(msize) {
- if (msize < 6 || msize > 11 ) {
- printk(KERN_NOTICE "pmc551: Invalid memory size\n");
- return -ENODEV;
+ msize = (1 << (ffs(msize) - 1))<<20;
+ if (msize > (1<<30)) {
+ printk(KERN_NOTICE "pmc551: Invalid memory size [%d]\n", msize);
+ return -EINVAL;
}
- msize = (512*1024)<<msize;
}
if(asize) {
- if (asize < 1 || asize > 11 ) {
- printk(KERN_NOTICE "pmc551: Invalid aperture size\n");
- return -ENODEV;
+ asize = (1 << (ffs(asize) - 1))<<20;
+ if (asize > (1<<30) ) {
+ printk(KERN_NOTICE "pmc551: Invalid aperture size [%d]\n", asize);
+ return -EINVAL;
}
- asize = (512*1024)<<asize;
}
printk(KERN_INFO PMC551_VERSION);
*/
for( count = 0; count < MAX_MTD_DEVICES; count++ ) {
- if ( (PCI_Device = pci_find_device( PCI_VENDOR_ID_V3_SEMI,
- PCI_DEVICE_ID_V3_SEMI_V370PDC, PCI_Device ) ) == NULL) {
+ if ((PCI_Device = pci_find_device(PCI_VENDOR_ID_V3_SEMI,
+ PCI_DEVICE_ID_V3_SEMI_V370PDC,
+ PCI_Device ) ) == NULL) {
break;
}
- printk(KERN_NOTICE "pmc551: Found PCI V370PDC IRQ:%d\n",
- PCI_Device->irq);
+ printk(KERN_NOTICE "pmc551: Found PCI V370PDC at 0x%lX\n",
+ PCI_BASE_ADDRESS(PCI_Device));
/*
* The PMC551 device acts VERY weird if you don't init it
printk(KERN_NOTICE "pmc551: Cannot init SDRAM\n");
break;
}
+
+ /*
+ * This is needed untill the driver is capable of reading the
+ * onboard I2C SROM to discover the "real" memory size.
+ */
if(msize) {
length = msize;
printk(KERN_NOTICE "pmc551: Using specified memory size 0x%x\n", length);
+ } else {
+ msize = length;
}
mtd = kmalloc(sizeof(struct mtd_info), GFP_KERNEL);
}
memset(priv, 0, sizeof(*priv));
mtd->priv = priv;
-
priv->dev = PCI_Device;
- if(asize) {
- if(asize > length) {
- asize=length;
- printk(KERN_NOTICE "pmc551: reducing aperture size to fit memory [0x%x]\n",asize);
- } else {
- printk(KERN_NOTICE "pmc551: Using specified aperture size 0x%x\n", asize);
- }
- priv->aperture_size = asize;
+
+ if(asize > length) {
+ printk(KERN_NOTICE "pmc551: reducing aperture size to fit %dM\n",length>>20);
+ priv->asize = asize = length;
+ } else if (asize == 0 || asize == length) {
+ printk(KERN_NOTICE "pmc551: Using existing aperture size %dM\n", length>>20);
+ priv->asize = asize = length;
} else {
- priv->aperture_size = length;
+ printk(KERN_NOTICE "pmc551: Using specified aperture size %dM\n", asize>>20);
+ priv->asize = asize;
}
priv->start = ioremap((PCI_BASE_ADDRESS(PCI_Device)
& PCI_BASE_ADDRESS_MEM_MASK),
- priv->aperture_size);
+ priv->asize);
if (!priv->start) {
+ printk(KERN_NOTICE "pmc551: Unable to map IO space\n");
kfree(mtd->priv);
kfree(mtd);
break;
}
- /*
- * Due to the dynamic nature of the code, we need to figure
- * this out in order to stuff the register to set the proper
- * aperture size. If you know of an easier way to do this then
- * PLEASE help yourself.
- *
- * Not with bloody floating point, you don't. Consider yourself
- * duly LARTed. dwmw2.
- */
- {
- u32 size;
- u16 bits;
- size = priv->aperture_size>>20;
- for(bits=0;!(size&0x01)&&size>0;bits++,size=size>>1);
- //size=((u32)((log10(priv->aperture_size)/.30103)-19)<<4);
- priv->mem_map0_base_val = (PMC551_PCI_MEM_MAP_REG_EN
- | PMC551_PCI_MEM_MAP_ENABLE
- | size);
+
#ifdef CONFIG_MTD_PMC551_DEBUG
- printk(KERN_NOTICE "pmc551: aperture set to %d[%d]\n",
- size, size>>4);
+ printk( KERN_DEBUG "pmc551: setting aperture to %d\n",
+ ffs(priv->asize>>20)-1);
#endif
- }
- priv->curr_mem_map0_val = priv->mem_map0_base_val;
-
- pci_write_config_dword ( priv->dev,
- PMC551_PCI_MEM_MAP0,
- priv->curr_mem_map0_val);
-
- mtd->size = length;
- mtd->flags = (MTD_CLEAR_BITS
- | MTD_SET_BITS
- | MTD_WRITEB_WRITEABLE
- | MTD_VOLATILE);
- mtd->erase = pmc551_erase;
- mtd->point = NULL;
- mtd->unpoint = pmc551_unpoint;
- mtd->read = pmc551_read;
- mtd->write = pmc551_write;
- mtd->module = THIS_MODULE;
- mtd->type = MTD_RAM;
- mtd->name = "PMC551 RAM board";
- mtd->erasesize = 0x10000;
+
+ priv->base_map0 = ( PMC551_PCI_MEM_MAP_REG_EN
+ | PMC551_PCI_MEM_MAP_ENABLE
+ | (ffs(priv->asize>>20)-1)<<4 );
+ priv->curr_map0 = priv->base_map0;
+ pci_write_config_dword ( priv->dev, PMC551_PCI_MEM_MAP0,
+ priv->curr_map0 );
+
+#ifdef CONFIG_MTD_PMC551_DEBUG
+ printk( KERN_DEBUG "pmc551: aperture set to %d\n",
+ (priv->base_map0 & 0xF0)>>4 );
+#endif
+
+ mtd->size = msize;
+ mtd->flags = MTD_CAP_RAM;
+ mtd->erase = pmc551_erase;
+ mtd->read = pmc551_read;
+ mtd->write = pmc551_write;
+ mtd->point = pmc551_point;
+ mtd->unpoint = pmc551_unpoint;
+ mtd->module = THIS_MODULE;
+ mtd->type = MTD_RAM;
+ mtd->name = "PMC551 RAM board";
+ mtd->erasesize = 0x10000;
if (add_mtd_device(mtd)) {
printk(KERN_NOTICE "pmc551: Failed to register new device\n");
}
printk(KERN_NOTICE "Registered pmc551 memory device.\n");
printk(KERN_NOTICE "Mapped %dM of memory from 0x%p to 0x%p\n",
- priv->aperture_size/1024/1024,
+ priv->asize>>20,
priv->start,
- priv->start + priv->aperture_size);
+ priv->start + priv->asize);
printk(KERN_NOTICE "Total memory is %d%c\n",
(length<1024)?length:
- (length<1048576)?length/1024:length/1024/1024,
+ (length<1048576)?length>>10:length>>20,
(length<1024)?'B':(length<1048576)?'K':'M');
priv->nextpmc551 = pmc551list;
pmc551list = mtd;
}
if( !pmc551list ) {
- printk(KERN_NOTICE "pmc551: not detected,\n");
+ printk(KERN_NOTICE "pmc551: not detected\n");
return -ENODEV;
} else {
printk(KERN_NOTICE "pmc551: %d pmc551 devices loaded\n", found);
priv = (struct mypriv *)mtd->priv;
pmc551list = priv->nextpmc551;
- if(priv->start)
- iounmap(priv->start);
+ if(priv->start) {
+ printk (KERN_DEBUG "pmc551: unmapping %dM starting at 0x%p\n",
+ priv->asize>>20, priv->start);
+ iounmap (priv->start);
+ }
kfree (mtd->priv);
- del_mtd_device(mtd);
- kfree(mtd);
+ del_mtd_device (mtd);
+ kfree (mtd);
found++;
}
/*======================================================================
- $Id: slram.c,v 1.19 2001/06/02 20:33:20 dwmw2 Exp $
+ $Id: slram.c,v 1.25 2001/10/02 15:05:13 dwmw2 Exp $
======================================================================*/
static char *map;
#endif
-#ifdef MODULE
-#if LINUX_VERSION_CODE < 0x20212
-#define init_slram init_module
-#define cleanup_slram cleanup_module
-#endif
-
MODULE_PARM(map, "3-" __MODULE_STRING(SLRAM_MAX_DEVICES_PARAMS) "s");
MODULE_PARM_DESC(map, "List of memory regions to map. \"map=<name>, <start>, <length / end>\"");
-#endif
static slram_mtd_list_t *slram_mtdlist = NULL;
/*====================================================================*/
-int register_device(char *name, long start, long length)
+int register_device(char *name, unsigned long start, unsigned long length)
{
slram_mtd_list_t **curmtd;
}
*curmtd = kmalloc(sizeof(slram_mtd_list_t), GFP_KERNEL);
- if (!curmtd) {
+ if (!(*curmtd)) {
E("slram: Cannot allocate new MTD device.\n");
return(-ENOMEM);
}
kfree((*curmtd)->mtdinfo);
return(-EAGAIN);
}
- T("slram: Registered device %s from %dKiB to %dKiB\n", name,
- (int)(start / 1024), (int)((start + length) / 1024));
+ T("slram: Registered device %s from %luKiB to %luKiB\n", name,
+ (start / 1024), ((start + length) / 1024));
T("slram: Mapped from 0x%p to 0x%p\n",
((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start,
((slram_priv_t *)(*curmtd)->mtdinfo->priv)->end);
}
}
-int handle_unit(long value, char *unit)
+unsigned long handle_unit(unsigned long value, char *unit)
{
if ((*unit == 'M') || (*unit == 'm')) {
return(value * 1024 * 1024);
int parse_cmdline(char *devname, char *szstart, char *szlength)
{
char *buffer;
- long devstart;
- long devlength;
+ unsigned long devstart;
+ unsigned long devlength;
if ((!devname) || (!szstart) || (!szlength)) {
unregister_devices();
devlength = simple_strtoul(szlength + 1, &buffer, 0);
devlength = handle_unit(devlength, buffer);
}
- T("slram: devname=%s, devstart=%li, devlength=%li\n",
+ T("slram: devname=%s, devstart=0x%lx, devlength=0x%lx\n",
devname, devstart, devlength);
if ((devstart < 0) || (devlength < 0)) {
E("slram: Illegal start / length parameter.\n");
module_init(init_slram);
module_exit(cleanup_slram);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jochen Schaeuble <psionic@psionic.de>");
+MODULE_DESCRIPTION("MTD driver for uncached system RAM");
/* This version ported to the Linux-MTD system by dwmw2@infradead.org
- * $Id: ftl.c,v 1.35 2001/06/09 00:40:17 dwmw2 Exp $
+ * $Id: ftl.c,v 1.39 2001/10/02 15:05:11 dwmw2 Exp $
*
* Fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
* - fixes some leaks on failure in build_maps and ftl_notify_add, cleanups
max_offset = (0x100000<part->mtd->size)?0x100000:part->mtd->size;
/* Search first megabyte for a valid FTL header */
for (offset = 0;
- offset < max_offset;
+ (offset + sizeof(header)) < max_offset;
offset += part->mtd->erasesize ? : 0x2000) {
ret = part->mtd->read(part->mtd, offset, sizeof(header), &ret,
}
}
-#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
-#define init_ftl init_module
-#define cleanup_ftl cleanup_module
-#endif
-
-mod_init_t init_ftl(void)
+int init_ftl(void)
{
int i;
memset(myparts, 0, sizeof(myparts));
- DEBUG(0, "$Id: ftl.c,v 1.35 2001/06/09 00:40:17 dwmw2 Exp $\n");
+ DEBUG(0, "$Id: ftl.c,v 1.39 2001/10/02 15:05:11 dwmw2 Exp $\n");
if (register_blkdev(FTL_MAJOR, "ftl", &ftl_blk_fops)) {
printk(KERN_NOTICE "ftl_cs: unable to grab major "
return 0;
}
-mod_exit_t cleanup_ftl(void)
+static void __exit cleanup_ftl(void)
{
unregister_mtd_user(&ftl_notifier);
module_init(init_ftl);
module_exit(cleanup_ftl);
+
+
+MODULE_LICENSE("Dual MPL/GPL");
+MODULE_AUTHOR("David Hinds <dhinds@sonic.net>");
+MODULE_DESCRIPTION("Support code for Flash Translation Layer, used on PCMCIA devices and M-Systems DiskOnChip 1000");
# drivers/mtd/maps/Config.in
-# $Id: Config.in,v 1.9.2.1 2001/06/09 19:43:49 dwmw2 Exp $
+# $Id: Config.in,v 1.16 2001/09/19 18:28:37 dwmw2 Exp $
mainmenu_option next_comment
fi
if [ "$CONFIG_SPARC" = "y" -o "$CONFIG_SPARC64" = "y" ]; then
- dep_tristate ' Sun Microsystems userflash support' CONFIG_MTD_SUN_UFLASH $CONFIG_SPARC64
+ dep_tristate ' Sun Microsystems userflash support' CONFIG_MTD_SUN_UFLASH $CONFIG_MTD_CFI
fi
-if [ "$CONFIG_ARM" = "y" ]; then
- dep_tristate ' CFI Flash device mapped on Nora' CONFIG_MTD_NORA $CONFIG_MTD_CFI
-fi
-dep_tristate ' CFI Flash device mapped on Photron PNC-2000' CONFIG_MTD_PNC2000 $CONFIG_MTD_CFI $CONFIG_MTD_PARTITIONS
-dep_tristate ' CFI Flash device mapped on RPX Lite or CLLF' CONFIG_MTD_RPXLITE $CONFIG_MTD_CFI
-if [ "$CONFIG_X86" = "y" ]; then
+
+if [ "$CONFIG_X86" = "y" ]; then
+ dep_tristate ' CFI Flash device mapped on Photron PNC-2000' CONFIG_MTD_PNC2000 $CONFIG_MTD_CFI $CONFIG_MTD_PARTITIONS
dep_tristate ' CFI Flash device mapped on AMD SC520 CDP' CONFIG_MTD_SC520CDP $CONFIG_MTD_CFI
dep_tristate ' CFI Flash device mapped on AMD NetSc520' CONFIG_MTD_NETSC520 $CONFIG_MTD_CFI $CONFIG_MTD_PARTITIONS
+ dep_tristate ' CFI Flash device mapped on Arcom SBC-GXx boards' CONFIG_MTD_SBC_GXX $CONFIG_MTD_CFI_INTELEXT $CONFIG_MTD_PARTITIONS
+ dep_tristate ' CFI Flash device mapped on Arcom ELAN-104NC' CONFIG_MTD_ELAN_104NC $CONFIG_MTD_CFI_INTELEXT $CONFIG_MTD_PARTITIONS
+ dep_tristate ' JEDEC Flash device mapped on Mixcom piggyback card' CONFIG_MTD_MIXMEM $CONFIG_MTD_JEDEC
+ dep_tristate ' JEDEC Flash device mapped on Octagon 5066 SBC' CONFIG_MTD_OCTAGON $CONFIG_MTD_JEDEC
+ dep_tristate ' JEDEC Flash device mapped on Tempustech VMAX SBC301' CONFIG_MTD_VMAX $CONFIG_MTD_JEDEC
+ dep_tristate ' BIOS flash chip on Intel L440GX boards' CONFIG_MTD_L440GX $CONFIG_I386 $CONFIG_MTD_JEDEC
+fi
+
+if [ "$CONFIG_PPC" = "y" ]; then
+ dep_tristate ' CFI Flash device mapped on TQM8XXL' CONFIG_MTD_TQM8XXL $CONFIG_MTD_CFI $CONFIG_TQM8xxL $CONFIG_PPC
+ dep_tristate ' CFI Flash device mapped on RPX Lite or CLLF' CONFIG_MTD_RPXLITE $CONFIG_MTD_CFI $CONFIG_PPC
+ dep_tristate ' CFI Flash device mapped on D-Box2' CONFIG_MTD_DBOX2 $CONFIG_MTD_CFI_INTELSTD $CONFIG_MTD_CFI_INTELEXT $CONFIG_MTD_CFI_AMDSTD
+ dep_tristate ' CFI Flash device mapping on FlagaDM' CONFIG_MTD_CFI_FLAGADM $CONFIG_MTD_CFI
+fi
+
+if [ "$CONFIG_MIPS" = "y" ]; then
+ dep_tristate ' Flash chip mapping on ITE QED-4N-S01B, Globespan IVR or custom board' CONFIG_MTD_CSTM_MIPS_IXX $CONFIG_MTD_CFI $CONFIG_MTD_JEDEC $CONFIG_MTD_PARTITIONS
+ if [ "$CONFIG_MTD_CSTM_MIPS_IXX" = "y" -o "$CONFIG_MTD_CSTM_MIPS_IXX" = "m" ]; then
+ hex ' Physical start address of flash mapping' CONFIG_MTD_CSTM_MIPS_IXX_START 0x8000000
+ hex ' Physical length of flash mapping' CONFIG_MTD_CSTM_MIPS_IXX_LEN 0x4000000
+ int ' Bus width in octets' CONFIG_MTD_CSTM_MIPS_IXX_BUSWIDTH 2
+ fi
+ dep_tristate ' Momenco Ocelot boot flash device' CONFIG_MTD_OCELOT $CONFIG_MOMENCO_OCELOT
+fi
+
+if [ "$CONFIG_SH" = "y" ]; then
+ dep_tristate ' CFI Flash device mapped on Hitachi SolutionEngine' CONFIG_MTD_SOLUTIONENGINE $CONFIG_MTD_CFI $CONFIG_SH $CONFIG_MTD_REDBOOT_PARTS
fi
-dep_tristate ' CFI Flash device mapped on Arcom SBC-GXx boards' CONFIG_MTD_SBC_GXX $CONFIG_MTD_CFI_INTELEXT $CONFIG_MTD_PARTITIONS
-dep_tristate ' CFI Flash device mapped on Arcom ELAN-104NC' CONFIG_MTD_ELAN_104NC $CONFIG_MTD_CFI_INTELEXT $CONFIG_MTD_PARTITIONS
+
if [ "$CONFIG_ARM" = "y" ]; then
-dep_tristate ' CFI Flash device mapped on StrongARM SA11x0' CONFIG_MTD_SA1100 $CONFIG_MTD_CFI $CONFIG_ARCH_SA1100 $CONFIG_MTD_PARTITIONS
- dep_bool ' Support for RedBoot partition tables on SA11x0' CONFIG_MTD_SA1100_REDBOOT_PARTITIONS $CONFIG_MTD_SA1100 $CONFIG_MTD_REDBOOT_PARTS
- dep_bool ' Support for Compaq bootldr partition tables on SA11x0' CONFIG_MTD_SA1100_BOOTLDR_PARTITIONS $CONFIG_MTD_SA1100 $CONFIG_MTD_BOOTLDR_PARTS
+ dep_tristate ' CFI Flash device mapped on Nora' CONFIG_MTD_NORA $CONFIG_MTD_CFI
+ dep_tristate ' CFI Flash device mapped on ARM Integrator/P720T' CONFIG_MTD_ARM_INTEGRATOR $CONFIG_MTD_CFI
+ dep_tristate ' Cirrus CDB89712 evaluation board mappings' CONFIG_MTD_CDB89712 $CONFIG_MTD_CFI $CONFIG_ARCH_CDB89712
+ dep_tristate ' CFI Flash device mapped on StrongARM SA11x0' CONFIG_MTD_SA1100 $CONFIG_MTD_CFI $CONFIG_ARCH_SA1100 $CONFIG_MTD_PARTITIONS
dep_tristate ' CFI Flash device mapped on DC21285 Footbridge' CONFIG_MTD_DC21285 $CONFIG_MTD_CFI $CONFIG_ARCH_FOOTBRIDGE $CONFIG_MTD_PARTITIONS
- dep_tristate ' CFI Flash device mapped on the XScale IQ80310 board' CONFIG_MTD_IQ80310 $CONFIG_MTD_CFI $CONFIG_XSCALE_IQ80310
-fi
-dep_tristate ' CFI Flash device mapped on D-Box2' CONFIG_MTD_DBOX2 $CONFIG_MTD_CFI_INTELSTD $CONFIG_MTD_CFI_INTELEXT $CONFIG_MTD_CFI_AMDSTD
-dep_tristate ' Flash chip mapping on ITE QED-4N-S01B, Globespan IVR or custom board' CONFIG_MTD_CSTM_MIPS_IXX $CONFIG_MTD_CFI $CONFIG_MTD_JEDEC $CONFIG_MTD_PARTITIONS
-if [ "$CONFIG_MTD_CSTM_MIPS_IXX" = "y" -o "$CONFIG_MTD_CSTM_MIPS_IXX" = "m" ]; then
- hex ' Physical start address of flash mapping' CONFIG_MTD_CSTM_MIPS_IXX_START 0x8000000
- hex ' Physical length of flash mapping' CONFIG_MTD_CSTM_MIPS_IXX_LEN 0x4000000
- int ' Bus width in octets' CONFIG_MTD_CSTM_MIPS_IXX_BUSWIDTH 2
+ dep_tristate ' CFI Flash device mapped on the XScale IQ80310 board' CONFIG_MTD_IQ80310 $CONFIG_MTD_CFI $CONFIG_ARCH_IQ80310
fi
-dep_tristate ' CFI Flash device mapping on FlagaDM' CONFIG_MTD_CFI_FLAGADM $CONFIG_MTD_CFI
-dep_tristate ' JEDEC Flash device mapped on Mixcom piggyback card' CONFIG_MTD_MIXMEM $CONFIG_MTD_JEDEC
-dep_tristate ' JEDEC Flash device mapped on Octagon 5066 SBC' CONFIG_MTD_OCTAGON $CONFIG_MTD_JEDEC
-dep_tristate ' JEDEC Flash device mapped on Tempustech VMAX SBC301' CONFIG_MTD_VMAX $CONFIG_MTD_JEDEC
-dep_tristate ' Momenco Ocelot boot flash device' CONFIG_MTD_OCELOT $CONFIG_MOMENCO_OCELOT
+
endmenu
#
# linux/drivers/maps/Makefile
#
-# $Id: Makefile,v 1.9.2.1 2001/06/09 19:43:49 dwmw2 Exp $
+# $Id: Makefile,v 1.13 2001/08/16 15:16:58 rmk Exp $
O_TARGET := mapslink.o
# Chip mappings
-
+obj-$(CONFIG_MTD_CDB89712) += cdb89712.o
+obj-$(CONFIG_MTD_ARM_INTEGRATOR)+= integrator-flash.o
obj-$(CONFIG_MTD_CFI_FLAGADM) += cfi_flagadm.o
obj-$(CONFIG_MTD_CSTM_MIPS_IXX) += cstm_mips_ixx.o
obj-$(CONFIG_MTD_DC21285) += dc21285.o
obj-$(CONFIG_MTD_ELAN_104NC) += elan-104nc.o
obj-$(CONFIG_MTD_IQ80310) += iq80310.o
+obj-$(CONFIG_MTD_L440GX) += l440gx.o
obj-$(CONFIG_MTD_NORA) += nora.o
obj-$(CONFIG_MTD_OCTAGON) += octagon-5066.o
obj-$(CONFIG_MTD_PHYSMAP) += physmap.o
obj-$(CONFIG_MTD_PNC2000) += pnc2000.o
obj-$(CONFIG_MTD_RPXLITE) += rpxlite.o
+obj-$(CONFIG_MTD_TQM8XXL) += tqm8xxl.o
obj-$(CONFIG_MTD_SA1100) += sa1100-flash.o
obj-$(CONFIG_MTD_SBC_GXX) += sbc_gxx.o
obj-$(CONFIG_MTD_SC520CDP) += sc520cdp.o
obj-$(CONFIG_MTD_VMAX) += vmax301.o
obj-$(CONFIG_MTD_DBOX2) += dbox2-flash.o
obj-$(CONFIG_MTD_OCELOT) += ocelot.o
+obj-$(CONFIG_MTD_SOLUTIONENGINE)+= solutionengine.o
include $(TOPDIR)/Rules.make
--- /dev/null
+/*
+ * Flash on Cirrus CDB89712
+ *
+ * $Id: cdb89712.c,v 1.3 2001/10/02 15:14:43 rmk Exp $
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <asm/io.h>
+#include <asm/arch/hardware.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+
+
+__u8 cdb89712_read8(struct map_info *map, unsigned long ofs)
+{
+ return __raw_readb(map->map_priv_1 + ofs);
+}
+
+__u16 cdb89712_read16(struct map_info *map, unsigned long ofs)
+{
+ return __raw_readw(map->map_priv_1 + ofs);
+}
+
+__u32 cdb89712_read32(struct map_info *map, unsigned long ofs)
+{
+ return __raw_readl(map->map_priv_1 + ofs);
+}
+
+void cdb89712_write8(struct map_info *map, __u8 d, unsigned long adr)
+{
+ __raw_writeb(d, map->map_priv_1 + adr);
+ mb();
+}
+
+void cdb89712_write16(struct map_info *map, __u16 d, unsigned long adr)
+{
+ __raw_writew(d, map->map_priv_1 + adr);
+ mb();
+}
+
+void cdb89712_write32(struct map_info *map, __u32 d, unsigned long adr)
+{
+ __raw_writel(d, map->map_priv_1 + adr);
+ mb();
+}
+
+void cdb89712_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
+{
+ // printk ("cdb89712_copy_from: 0x%x@0x%x -> 0x%x\n", len, from, to);
+ memcpy_fromio(to, map->map_priv_1 + from, len);
+}
+
+void cdb89712_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+{
+ while(len) {
+ __raw_writeb(*(unsigned char *) from, map->map_priv_1 + to);
+ from++;
+ to++;
+ len--;
+ }
+}
+
+
+static struct mtd_info *flash_mtd;
+
+struct map_info cdb89712_flash_map = {
+ name: "flash",
+ size: FLASH_SIZE,
+ buswidth: FLASH_WIDTH,
+ read8: cdb89712_read8,
+ read16: cdb89712_read16,
+ read32: cdb89712_read32,
+ copy_from: cdb89712_copy_from,
+ write8: cdb89712_write8,
+ write16: cdb89712_write16,
+ write32: cdb89712_write32,
+ copy_to: cdb89712_copy_to
+};
+
+struct resource cdb89712_flash_resource = {
+ name: "Flash",
+ start: FLASH_START,
+ end: FLASH_START + FLASH_SIZE - 1,
+ flags: IORESOURCE_IO | IORESOURCE_BUSY,
+};
+
+static int __init init_cdb89712_flash (void)
+{
+ int err;
+
+ if (request_resource (&ioport_resource, &cdb89712_flash_resource)) {
+ printk(KERN_NOTICE "Failed to reserve Cdb89712 FLASH space\n");
+ err = -EBUSY;
+ goto out;
+ }
+
+ cdb89712_flash_map.map_priv_1 = (unsigned long)ioremap(FLASH_START, FLASH_SIZE);
+ if (!cdb89712_flash_map.map_priv_1) {
+ printk(KERN_NOTICE "Failed to ioremap Cdb89712 FLASH space\n");
+ err = -EIO;
+ goto out_resource;
+ }
+
+ flash_mtd = do_map_probe("cfi_probe", &cdb89712_flash_map);
+ if (!flash_mtd) {
+ flash_mtd = do_map_probe("map_rom", &cdb89712_flash_map);
+ if (flash_mtd)
+ flash_mtd->erasesize = 0x10000;
+ }
+ if (!flash_mtd) {
+ printk("FLASH probe failed\n");
+ err = -ENXIO;
+ goto out_ioremap;
+ }
+
+ flash_mtd->module = THIS_MODULE;
+
+ if (add_mtd_device(flash_mtd)) {
+ printk("FLASH device addition failed\n");
+ err = -ENOMEM;
+ goto out_probe;
+ }
+
+ return 0;
+
+out_probe:
+ map_destroy(flash_mtd);
+ flash_mtd = 0;
+out_ioremap:
+ iounmap((void *)cdb89712_flash_map.map_priv_1);
+out_resource:
+ release_resource (&cdb89712_flash_resource);
+out:
+ return err;
+}
+
+
+
+
+
+static struct mtd_info *sram_mtd;
+
+struct map_info cdb89712_sram_map = {
+ name: "SRAM",
+ size: SRAM_SIZE,
+ buswidth: SRAM_WIDTH,
+ read8: cdb89712_read8,
+ read16: cdb89712_read16,
+ read32: cdb89712_read32,
+ copy_from: cdb89712_copy_from,
+ write8: cdb89712_write8,
+ write16: cdb89712_write16,
+ write32: cdb89712_write32,
+ copy_to: cdb89712_copy_to
+};
+
+struct resource cdb89712_sram_resource = {
+ name: "SRAM",
+ start: SRAM_START,
+ end: SRAM_START + SRAM_SIZE - 1,
+ flags: IORESOURCE_IO | IORESOURCE_BUSY,
+};
+
+static int __init init_cdb89712_sram (void)
+{
+ int err;
+
+ if (request_resource (&ioport_resource, &cdb89712_sram_resource)) {
+ printk(KERN_NOTICE "Failed to reserve Cdb89712 SRAM space\n");
+ err = -EBUSY;
+ goto out;
+ }
+
+ cdb89712_sram_map.map_priv_1 = (unsigned long)ioremap(SRAM_START, SRAM_SIZE);
+ if (!cdb89712_sram_map.map_priv_1) {
+ printk(KERN_NOTICE "Failed to ioremap Cdb89712 SRAM space\n");
+ err = -EIO;
+ goto out_resource;
+ }
+
+ sram_mtd = do_map_probe("map_ram", &cdb89712_sram_map);
+ if (!sram_mtd) {
+ printk("SRAM probe failed\n");
+ err = -ENXIO;
+ goto out_ioremap;
+ }
+
+ sram_mtd->module = THIS_MODULE;
+ sram_mtd->erasesize = 16;
+
+ if (add_mtd_device(sram_mtd)) {
+ printk("SRAM device addition failed\n");
+ err = -ENOMEM;
+ goto out_probe;
+ }
+
+ return 0;
+
+out_probe:
+ map_destroy(sram_mtd);
+ sram_mtd = 0;
+out_ioremap:
+ iounmap((void *)cdb89712_sram_map.map_priv_1);
+out_resource:
+ release_resource (&cdb89712_sram_resource);
+out:
+ return err;
+}
+
+
+
+
+
+
+
+static struct mtd_info *bootrom_mtd;
+
+struct map_info cdb89712_bootrom_map = {
+ name: "BootROM",
+ size: BOOTROM_SIZE,
+ buswidth: BOOTROM_WIDTH,
+ read8: cdb89712_read8,
+ read16: cdb89712_read16,
+ read32: cdb89712_read32,
+ copy_from: cdb89712_copy_from,
+};
+
+struct resource cdb89712_bootrom_resource = {
+ name: "BootROM",
+ start: BOOTROM_START,
+ end: BOOTROM_START + BOOTROM_SIZE - 1,
+ flags: IORESOURCE_IO | IORESOURCE_BUSY,
+};
+
+static int __init init_cdb89712_bootrom (void)
+{
+ int err;
+
+ if (request_resource (&ioport_resource, &cdb89712_bootrom_resource)) {
+ printk(KERN_NOTICE "Failed to reserve Cdb89712 BOOTROM space\n");
+ err = -EBUSY;
+ goto out;
+ }
+
+ cdb89712_bootrom_map.map_priv_1 = (unsigned long)ioremap(BOOTROM_START, BOOTROM_SIZE);
+ if (!cdb89712_bootrom_map.map_priv_1) {
+ printk(KERN_NOTICE "Failed to ioremap Cdb89712 BootROM space\n");
+ err = -EIO;
+ goto out_resource;
+ }
+
+ bootrom_mtd = do_map_probe("map_rom", &cdb89712_bootrom_map);
+ if (!bootrom_mtd) {
+ printk("BootROM probe failed\n");
+ err = -ENXIO;
+ goto out_ioremap;
+ }
+
+ bootrom_mtd->module = THIS_MODULE;
+ bootrom_mtd->erasesize = 0x10000;
+
+ if (add_mtd_device(bootrom_mtd)) {
+ printk("BootROM device addition failed\n");
+ err = -ENOMEM;
+ goto out_probe;
+ }
+
+ return 0;
+
+out_probe:
+ map_destroy(bootrom_mtd);
+ bootrom_mtd = 0;
+out_ioremap:
+ iounmap((void *)cdb89712_bootrom_map.map_priv_1);
+out_resource:
+ release_resource (&cdb89712_bootrom_resource);
+out:
+ return err;
+}
+
+
+
+
+
+static int __init init_cdb89712_maps(void)
+{
+
+ printk(KERN_INFO "Cirrus CDB89712 MTD mappings:\n Flash 0x%x at 0x%x\n SRAM 0x%x at 0x%x\n BootROM 0x%x at 0x%x\n",
+ FLASH_SIZE, FLASH_START, SRAM_SIZE, SRAM_START, BOOTROM_SIZE, BOOTROM_START);
+
+ init_cdb89712_flash();
+ init_cdb89712_sram();
+ init_cdb89712_bootrom();
+
+ return 0;
+}
+
+
+static void __exit cleanup_cdb89712_maps(void)
+{
+ if (sram_mtd) {
+ del_mtd_device(sram_mtd);
+ map_destroy(sram_mtd);
+ iounmap((void *)cdb89712_sram_map.map_priv_1);
+ release_resource (&cdb89712_sram_resource);
+ }
+
+ if (flash_mtd) {
+ del_mtd_device(flash_mtd);
+ map_destroy(flash_mtd);
+ iounmap((void *)cdb89712_flash_map.map_priv_1);
+ release_resource (&cdb89712_flash_resource);
+ }
+
+ if (bootrom_mtd) {
+ del_mtd_device(bootrom_mtd);
+ map_destroy(bootrom_mtd);
+ iounmap((void *)cdb89712_bootrom_map.map_priv_1);
+ release_resource (&cdb89712_bootrom_resource);
+ }
+}
+
+module_init(init_cdb89712_maps);
+module_exit(cleanup_cdb89712_maps);
+
+MODULE_AUTHOR("Ray L");
+MODULE_DESCRIPTION("ARM CDB89712 map driver");
+MODULE_LICENSE("GPL");
/*
* Copyright © 2001 Flaga hf. Medical Devices, Kári DavÃðsson <kd@flaga.is>
*
- * $Id: cfi_flagadm.c,v 1.5 2001/05/29 15:47:49 kd Exp $
+ * $Id: cfi_flagadm.c,v 1.7 2001/10/02 15:05:13 dwmw2 Exp $
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
#define PARTITION_COUNT (sizeof(flagadm_parts)/sizeof(struct mtd_partition))
-#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
-#define init_flagadm init_module
-#define cleanup_flagadm cleanup_module
-#endif
-
static struct mtd_info *mymtd;
int __init init_flagadm(void)
printk("Failed to ioremap\n");
return -EIO;
}
- mymtd = do_map_probe("cfi", &flagadm_map);
+ mymtd = do_map_probe("cfi_probe", &flagadm_map);
if (mymtd) {
mymtd->module = THIS_MODULE;
add_mtd_partitions(mymtd, flagadm_parts, PARTITION_COUNT);
module_init(init_flagadm);
module_exit(cleanup_flagadm);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kári DavÃðsson <kd@flaga.is>");
+MODULE_DESCRIPTION("MTD map driver for Flaga digital module");
/*
- * $Id: cstm_mips_ixx.c,v 1.3 2001/06/02 14:52:23 dwmw2 Exp $
+ * $Id: cstm_mips_ixx.c,v 1.5 2001/10/02 15:05:14 dwmw2 Exp $
*
* Mapping of a custom board with both AMD CFI and JEDEC flash in partitions.
* Config with both CFI and JEDEC device support.
struct map_info cstm_mips_ixx_map[PHYSMAP_NUMBER];
-#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
-#define init_cstm_mips_ixx init_module
-#define cleanup_cstm_mips_ixx cleanup_module
-#endif
-
int __init init_cstm_mips_ixx(void)
{
int i;
for (i=0;i<PHYSMAP_NUMBER;i++) {
parts = &cstm_mips_ixx_partitions[i][0];
jedec = 0;
- mymtd = (struct mtd_info *)do_map_probe("cfi", &cstm_mips_ixx_map[i]);
+ mymtd = (struct mtd_info *)do_map_probe("cfi_probe", &cstm_mips_ixx_map[i]);
//printk(KERN_NOTICE "phymap %d cfi_probe: mymtd is %x\n",i,(unsigned int)mymtd);
if (!mymtd) {
jedec = 1;
module_init(init_cstm_mips_ixx);
module_exit(cleanup_cstm_mips_ixx);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alice Hennessy <ahennessy@mvista.com>");
+MODULE_DESCRIPTION("MTD map driver for ITE 8172G and Globespan IVR boards");
/*
- * $Id: dbox2-flash.c,v 1.2 2001/04/26 15:42:43 dwmw2 Exp $
+ * $Id: dbox2-flash.c,v 1.4 2001/10/02 15:05:14 dwmw2 Exp $
*
* Nokia / Sagem D-Box 2 flash driver
*/
copy_to: dbox2_flash_copy_to
};
-#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
-#define init_dbox2_flash init_module
-#define cleanup_dbox2_flash cleanup_module
-#endif
-
-mod_init_t init_dbox2_flash(void)
+int __init init_dbox2_flash(void)
{
printk(KERN_NOTICE "D-Box 2 flash driver (size->0x%X mem->0x%X)\n", WINDOW_SIZE, WINDOW_ADDR);
dbox2_flash_map.map_priv_1 = (unsigned long)ioremap(WINDOW_ADDR, WINDOW_SIZE);
}
// Probe for dual Intel 28F320 or dual AMD
- mymtd = do_map_probe("cfi", &dbox2_flash_map);
+ mymtd = do_map_probe("cfi_probe", &dbox2_flash_map);
if (!mymtd) {
// Probe for single Intel 28F640
dbox2_flash_map.buswidth = 2;
- mymtd = do_map_probe("cfi", &dbox2_flash_map);
+ mymtd = do_map_probe("cfi_probe", &dbox2_flash_map);
}
if (mymtd) {
return -ENXIO;
}
-mod_exit_t cleanup_dbox2_flash(void)
+static void __exit cleanup_dbox2_flash(void)
{
if (mymtd) {
del_mtd_partitions(mymtd);
module_init(init_dbox2_flash);
module_exit(cleanup_dbox2_flash);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kári DavÃðsson <kd@flaga.is>");
+MODULE_DESCRIPTION("MTD map driver for Nokia/Sagem D-Box 2 board");
*
* This code is GPL
*
- * $Id: dc21285.c,v 1.4 2001/04/26 15:40:23 dwmw2 Exp $
+ * $Id: dc21285.c,v 1.6 2001/10/02 15:05:14 dwmw2 Exp $
*/
#include <linux/module.h>
return -EIO;
}
- mymtd = do_map_probe("cfi", &dc21285_map);
+ mymtd = do_map_probe("cfi_probe", &dc21285_map);
if (mymtd) {
int nrparts;
module_init(init_dc21285);
module_exit(cleanup_dc21285);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nicolas Pitre <nico@cam.org>");
+MODULE_DESCRIPTION("MTD map driver for DC21285 boards");
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
- $Id: elan-104nc.c,v 1.10 2001/06/02 14:30:44 dwmw2 Exp $
+ $Id: elan-104nc.c,v 1.12 2001/10/02 15:05:14 dwmw2 Exp $
The ELAN-104NC has up to 8 Mibyte of Intel StrataFlash (28F320/28F640) in x16
mode. This drivers uses the CFI probe and Intel Extended Command Set drivers.
/* MTD device for all of the flash. */
static struct mtd_info *all_mtd;
-#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
-#define init_elan_104nc init_module
-#define cleanup_elan_104nc cleanup_module
-#endif
-
-mod_exit_t cleanup_elan_104nc(void)
+static void __exit cleanup_elan_104nc(void)
{
if( all_mtd ) {
del_mtd_partitions( all_mtd );
release_region(PAGE_IO,PAGE_IO_SIZE);
}
-mod_init_t init_elan_104nc(void)
+int __init init_elan_104nc(void)
{
/* Urg! We use I/O port 0x22 without request_region()ing it */
/*
elan_104nc_setup();
/* Probe for chip. */
- all_mtd = do_map_probe("cfi", &elan_104nc_map );
+ all_mtd = do_map_probe("cfi_probe", &elan_104nc_map );
if( !all_mtd ) {
cleanup_elan_104nc();
return -ENXIO;
module_init(init_elan_104nc);
module_exit(cleanup_elan_104nc);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Arcom Control Systems Ltd.");
+MODULE_DESCRIPTION("MTD map driver for Arcom Control Systems ELAN-104NC");
--- /dev/null
+/*======================================================================
+
+ drivers/mtd/maps/armflash.c: ARM Flash Layout/Partitioning
+
+ Copyright (C) 2000 ARM Limited
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ This is access code for flashes using ARM's flash partitioning
+ standards.
+
+ $Id: integrator-flash.c,v 1.6 2001/10/02 16:00:01 dwmw2 Exp $
+
+======================================================================*/
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/hardware.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
+extern int parse_afs_partitions(struct mtd_info *, struct mtd_partition **);
+
+// board specific stuff - sorry, it should be in arch/arm/mach-*.
+#ifdef CONFIG_ARCH_INTEGRATOR
+
+#define FLASH_BASE INTEGRATOR_FLASH_BASE
+#define FLASH_SIZE INTEGRATOR_FLASH_SIZE
+
+#define FLASH_PART_SIZE 0x400000
+
+#define SC_CTRLC (IO_ADDRESS(INTEGRATOR_SC_BASE) + INTEGRATOR_SC_CTRLC_OFFSET)
+#define SC_CTRLS (IO_ADDRESS(INTEGRATOR_SC_BASE) + INTEGRATOR_SC_CTRLS_OFFSET)
+#define EBI_CSR1 (IO_ADDRESS(INTEGRATOR_EBI_BASE) + INTEGRATOR_EBI_CSR1_OFFSET)
+#define EBI_LOCK (IO_ADDRESS(INTEGRATOR_EBI_BASE) + INTEGRATOR_EBI_LOCK_OFFSET)
+
+/*
+ * Initialise the flash access systems:
+ * - Disable VPP
+ * - Assert WP
+ * - Set write enable bit in EBI reg
+ */
+static void armflash_flash_init(void)
+{
+ unsigned int tmp;
+
+ __raw_writel(INTEGRATOR_SC_CTRL_nFLVPPEN | INTEGRATOR_SC_CTRL_nFLWP, SC_CTRLC);
+
+ tmp = __raw_readl(EBI_CSR1) | INTEGRATOR_EBI_WRITE_ENABLE;
+ __raw_writel(tmp, EBI_CSR1);
+
+ if (!(__raw_readl(EBI_CSR1) & INTEGRATOR_EBI_WRITE_ENABLE)) {
+ __raw_writel(0xa05f, EBI_LOCK);
+ __raw_writel(tmp, EBI_CSR1);
+ __raw_writel(0, EBI_LOCK);
+ }
+}
+
+/*
+ * Shutdown the flash access systems:
+ * - Disable VPP
+ * - Assert WP
+ * - Clear write enable bit in EBI reg
+ */
+static void armflash_flash_exit(void)
+{
+ unsigned int tmp;
+
+ __raw_writel(INTEGRATOR_SC_CTRL_nFLVPPEN | INTEGRATOR_SC_CTRL_nFLWP, SC_CTRLC);
+
+ /*
+ * Clear the write enable bit in system controller EBI register.
+ */
+ tmp = __raw_readl(EBI_CSR1) & ~INTEGRATOR_EBI_WRITE_ENABLE;
+ __raw_writel(tmp, EBI_CSR1);
+
+ if (__raw_readl(EBI_CSR1) & INTEGRATOR_EBI_WRITE_ENABLE) {
+ __raw_writel(0xa05f, EBI_LOCK);
+ __raw_writel(tmp, EBI_CSR1);
+ __raw_writel(0, EBI_LOCK);
+ }
+}
+
+static void armflash_flash_wp(int on)
+{
+ unsigned int reg;
+
+ if (on)
+ reg = SC_CTRLC;
+ else
+ reg = SC_CTRLS;
+
+ __raw_writel(INTEGRATOR_SC_CTRL_nFLWP, reg);
+}
+
+static void armflash_set_vpp(struct map_info *map, int on)
+{
+ unsigned int reg;
+
+ if (on)
+ reg = SC_CTRLS;
+ else
+ reg = SC_CTRLC;
+
+ __raw_writel(INTEGRATOR_SC_CTRL_nFLVPPEN, reg);
+}
+#endif
+
+#ifdef CONFIG_ARCH_P720T
+
+#define FLASH_BASE (0x04000000)
+#define FLASH_SIZE (64*1024*1024)
+
+#define FLASH_PART_SIZE (4*1024*1024)
+#define FLASH_BLOCK_SIZE (128*1024)
+
+static void armflash_flash_init(void)
+{
+}
+
+static void armflash_flash_exit(void)
+{
+}
+
+static void armflash_flash_wp(int on)
+{
+}
+
+static void armflash_set_vpp(struct map_info *map, int on)
+{
+}
+#endif
+
+static __u8 armflash_read8(struct map_info *map, unsigned long ofs)
+{
+ return readb(ofs + map->map_priv_2);
+}
+
+static __u16 armflash_read16(struct map_info *map, unsigned long ofs)
+{
+ return readw(ofs + map->map_priv_2);
+}
+
+static __u32 armflash_read32(struct map_info *map, unsigned long ofs)
+{
+ return readl(ofs + map->map_priv_2);
+}
+
+static void armflash_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
+{
+ memcpy(to, (void *) (from + map->map_priv_2), len);
+}
+
+static void armflash_write8(struct map_info *map, __u8 d, unsigned long adr)
+{
+ writeb(d, adr + map->map_priv_2);
+}
+
+static void armflash_write16(struct map_info *map, __u16 d, unsigned long adr)
+{
+ writew(d, adr + map->map_priv_2);
+}
+
+static void armflash_write32(struct map_info *map, __u32 d, unsigned long adr)
+{
+ writel(d, adr + map->map_priv_2);
+}
+
+static void armflash_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+{
+ memcpy((void *) (to + map->map_priv_2), from, len);
+}
+
+static struct map_info armflash_map =
+{
+ name: "AFS",
+ read8: armflash_read8,
+ read16: armflash_read16,
+ read32: armflash_read32,
+ copy_from: armflash_copy_from,
+ write8: armflash_write8,
+ write16: armflash_write16,
+ write32: armflash_write32,
+ copy_to: armflash_copy_to,
+ set_vpp: armflash_set_vpp,
+};
+
+static struct mtd_info *mtd;
+
+static int __init armflash_cfi_init(void *base, u_int size)
+{
+ struct mtd_partition *parts;
+ int ret;
+
+ armflash_flash_init();
+ armflash_flash_wp(1);
+
+ /*
+ * look for CFI based flash parts fitted to this board
+ */
+ armflash_map.size = size;
+ armflash_map.buswidth = 4;
+ armflash_map.map_priv_2 = (unsigned long) base;
+
+ /*
+ * Also, the CFI layer automatically works out what size
+ * of chips we have, and does the necessary identification
+ * for us automatically.
+ */
+ mtd = do_map_probe("cfi_probe", &armflash_map);
+ if (!mtd)
+ return -ENXIO;
+
+ mtd->module = THIS_MODULE;
+
+ ret = parse_afs_partitions(mtd, &parts);
+ if (ret > 0) {
+ ret = add_mtd_partitions(mtd, parts, ret);
+ /* we don't need the partition info any longer */
+ kfree(parts);
+ if (ret)
+ printk(KERN_ERR "mtd partition registration "
+ "failed: %d\n", ret);
+ }
+
+ /*
+ * If we got an error, free all resources.
+ */
+ if (ret < 0) {
+ del_mtd_partitions(mtd);
+ map_destroy(mtd);
+ }
+
+ return ret;
+}
+
+static void armflash_cfi_exit(void)
+{
+ if (mtd) {
+ del_mtd_partitions(mtd);
+ map_destroy(mtd);
+ }
+}
+
+static int __init armflash_init(void)
+{
+ int err = -EBUSY;
+ void *base;
+
+ if (request_mem_region(FLASH_BASE, FLASH_SIZE, "flash") == NULL)
+ goto out;
+
+ base = ioremap(FLASH_BASE, FLASH_SIZE);
+ err = -ENOMEM;
+ if (base == NULL)
+ goto release;
+
+ err = armflash_cfi_init(base, FLASH_SIZE);
+ if (err) {
+ iounmap(base);
+release:
+ release_mem_region(FLASH_BASE, FLASH_SIZE);
+ }
+out:
+ return err;
+}
+
+static void __exit armflash_exit(void)
+{
+ armflash_cfi_exit();
+ iounmap((void *)armflash_map.map_priv_2);
+ release_mem_region(FLASH_BASE, FLASH_SIZE);
+ armflash_flash_exit();
+}
+
+module_init(armflash_init);
+module_exit(armflash_exit);
+
+MODULE_AUTHOR("ARM Ltd");
+MODULE_DESCRIPTION("ARM Integrator CFI map driver");
+MODULE_LICENSE("GPL");
/*
- * $Id: iq80310.c,v 1.3 2001/04/26 15:40:23 dwmw2 Exp $
+ * $Id: iq80310.c,v 1.8 2001/10/02 15:05:14 dwmw2 Exp $
*
* Mapping for the Intel XScale IQ80310 evaluation board
*
copy_to: iq80310_copy_to
};
-static struct mtd_partition iq80310_partitions[3] = {
+static struct mtd_partition iq80310_partitions[4] = {
{
- name: "firmware",
+ name: "Firmware",
size: 0x00080000,
offset: 0,
mask_flags: MTD_WRITEABLE /* force read-only */
},{
- name: "kernel",
- size: 0x00080000,
+ name: "Kernel",
+ size: 0x000a0000,
offset: 0x00080000,
},{
- name: "filesystem",
- size: 0x00700000,
- offset: 0x00100000
+ name: "Filesystem",
+ size: 0x00600000,
+ offset: 0x00120000
+ },{
+ name: "RedBoot",
+ size: 0x000e0000,
+ offset: 0x00720000,
+ mask_flags: MTD_WRITEABLE
}
};
+#define NB_OF(x) (sizeof(x)/sizeof(x[0]))
+
+static struct mtd_info *mymtd;
+static struct mtd_partition *parsed_parts;
+
+extern int parse_redboot_partitions(struct mtd_info *master, struct mtd_partition **pparts);
+
static int __init init_iq80310(void)
{
- iq80310_map.map_priv_1 = (unsigned long)__ioremap(WINDOW_ADDR, WINDOW_SIZE, 0);
+ struct mtd_partition *parts;
+ int nb_parts = 0;
+ int parsed_nr_parts = 0;
+ char *part_type = "static";
+ iq80310_map.map_priv_1 = (unsigned long)__ioremap(WINDOW_ADDR, WINDOW_SIZE, 0);
if (!iq80310_map.map_priv_1) {
printk("Failed to ioremap\n");
return -EIO;
}
- mymtd = do_map_probe("cfi", &iq80310_map);
- if (mymtd) {
- mymtd->module = THIS_MODULE;
- add_mtd_partitions(mymtd, iq80310_partitions, 3);
- return 0;
+ mymtd = do_map_probe("cfi_probe", &iq80310_map);
+ if (!mymtd) {
+ iounmap((void *)iq80310_map.map_priv_1);
+ return -ENXIO;
}
+ mymtd->module = THIS_MODULE;
- iounmap((void *)iq80310_map.map_priv_1);
- return -ENXIO;
+#ifdef CONFIG_MTD_REDBOOT_PARTS
+ if (parsed_nr_parts == 0) {
+ int ret = parse_redboot_partitions(mymtd, &parsed_parts);
+
+ if (ret > 0) {
+ part_type = "RedBoot";
+ parsed_nr_parts = ret;
+ }
+ }
+#endif
+
+ if (parsed_nr_parts > 0) {
+ parts = parsed_parts;
+ nb_parts = parsed_nr_parts;
+ } else {
+ parts = iq80310_partitions;
+ nb_parts = NB_OF(iq80310_partitions);
+ }
+ printk(KERN_NOTICE "Using %s partition definition\n", part_type);
+ add_mtd_partitions(mymtd, parts, nb_parts);
+ return 0;
}
static void __exit cleanup_iq80310(void)
if (mymtd) {
del_mtd_partitions(mymtd);
map_destroy(mymtd);
+ if (parsed_parts)
+ kfree(parsed_parts);
}
- if (iq80310_map.map_priv_1) {
+ if (iq80310_map.map_priv_1)
iounmap((void *)iq80310_map.map_priv_1);
- iq80310_map.map_priv_1 = 0;
- }
+ return 0;
}
module_init(init_iq80310);
module_exit(cleanup_iq80310);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nicolas Pitre <nico@cam.org>");
+MODULE_DESCRIPTION("MTD map driver for Intel XScale IQ80310 evaluation board");
--- /dev/null
+/*
+ * $Id: l440gx.c,v 1.7 2001/10/02 15:05:14 dwmw2 Exp $
+ *
+ * BIOS Flash chip on Intel 440GX board.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <asm/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/config.h>
+
+
+#define WINDOW_ADDR 0xfff00000
+#define WINDOW_SIZE 0x00100000
+#define BUSWIDTH 1
+
+#define IOBASE 0xc00
+#define TRIBUF_PORT (IOBASE+0x37)
+#define VPP_PORT (IOBASE+0x28)
+
+static struct mtd_info *mymtd;
+
+__u8 l440gx_read8(struct map_info *map, unsigned long ofs)
+{
+ return __raw_readb(map->map_priv_1 + ofs);
+}
+
+__u16 l440gx_read16(struct map_info *map, unsigned long ofs)
+{
+ return __raw_readw(map->map_priv_1 + ofs);
+}
+
+__u32 l440gx_read32(struct map_info *map, unsigned long ofs)
+{
+ return __raw_readl(map->map_priv_1 + ofs);
+}
+
+void l440gx_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
+{
+ memcpy_fromio(to, map->map_priv_1 + from, len);
+}
+
+void l440gx_write8(struct map_info *map, __u8 d, unsigned long adr)
+{
+ __raw_writeb(d, map->map_priv_1 + adr);
+ mb();
+}
+
+void l440gx_write16(struct map_info *map, __u16 d, unsigned long adr)
+{
+ __raw_writew(d, map->map_priv_1 + adr);
+ mb();
+}
+
+void l440gx_write32(struct map_info *map, __u32 d, unsigned long adr)
+{
+ __raw_writel(d, map->map_priv_1 + adr);
+ mb();
+}
+
+void l440gx_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+{
+ memcpy_toio(map->map_priv_1 + to, from, len);
+}
+
+void l440gx_set_vpp(struct map_info *map, int vpp)
+{
+ unsigned long l;
+
+ l = inl(VPP_PORT);
+ l = vpp?(l | 1):(l & ~1);
+ outl(l, VPP_PORT);
+}
+
+struct map_info l440gx_map = {
+ name: "L440GX BIOS",
+ size: WINDOW_SIZE,
+ buswidth: BUSWIDTH,
+ read8: l440gx_read8,
+ read16: l440gx_read16,
+ read32: l440gx_read32,
+ copy_from: l440gx_copy_from,
+ write8: l440gx_write8,
+ write16: l440gx_write16,
+ write32: l440gx_write32,
+ copy_to: l440gx_copy_to,
+ set_vpp: l440gx_set_vpp
+};
+
+static int __init init_l440gx(void)
+{
+ struct pci_dev *dev;
+ unsigned char b;
+ __u16 w;
+
+ dev = pci_find_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0,
+ NULL);
+
+ if (!dev) {
+ printk(KERN_NOTICE "L440GX flash mapping: failed to find PIIX4 ISA bridge, cannot continue\n");
+ return -ENODEV;
+ }
+
+
+ l440gx_map.map_priv_1 = (unsigned long)ioremap(WINDOW_ADDR, WINDOW_SIZE);
+
+ if (!l440gx_map.map_priv_1) {
+ printk("Failed to ioremap L440GX flash region\n");
+ return -ENOMEM;
+ }
+
+ /* Set XBCS# */
+ pci_read_config_word(dev, 0x4e, &w);
+ w |= 0x4;
+ pci_write_config_word(dev, 0x4e, w);
+
+ /* Enable the gate on the WE line */
+ b = inb(TRIBUF_PORT);
+ b |= 1;
+ outb(b, TRIBUF_PORT);
+
+ printk(KERN_NOTICE "Enabled WE line to L440GX BIOS flash chip.\n");
+
+ mymtd = do_map_probe("jedec", &l440gx_map);
+ if (!mymtd) {
+ printk(KERN_NOTICE "JEDEC probe on BIOS chip failed. Using ROM\n");
+ mymtd = do_map_probe("map_rom", &l440gx_map);
+ }
+ if (mymtd) {
+ mymtd->module = THIS_MODULE;
+
+ add_mtd_device(mymtd);
+ return 0;
+ }
+
+ iounmap((void *)l440gx_map.map_priv_1);
+ return -ENXIO;
+}
+
+static void __exit cleanup_l440gx(void)
+{
+ del_mtd_device(mymtd);
+ map_destroy(mymtd);
+
+ iounmap((void *)l440gx_map.map_priv_1);
+}
+
+module_init(init_l440gx);
+module_exit(cleanup_l440gx);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("MTD map driver for BIOS chips on Intel L440GX motherboards");
* Copyright (C) 2001 Mark Langsdorf (mark.langsdorf@amd.com)
* based on sc520cdp.c by Sysgo Real-Time Solutions GmbH
*
- * $Id: netsc520.c,v 1.3 2001/06/02 14:52:23 dwmw2 Exp $
+ * $Id: netsc520.c,v 1.5 2001/10/02 15:05:14 dwmw2 Exp $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
static struct mtd_info *mymtd;
-#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
-#define init_netsc520 init_module
-#define cleanup_netsc520 cleanup_module
-#endif
-
-
static int __init init_netsc520(void)
{
printk(KERN_NOTICE "NetSc520 flash device: %lx at %lx\n", netsc520_map.size, netsc520_map.map_priv_2);
printk("Failed to ioremap_nocache\n");
return -EIO;
}
- mymtd = do_map_probe("cfi", &netsc520_map);
+ mymtd = do_map_probe("cfi_probe", &netsc520_map);
if(!mymtd)
- mymtd = do_map_probe("ram", &netsc520_map);
+ mymtd = do_map_probe("map_ram", &netsc520_map);
if(!mymtd)
- mymtd = do_map_probe("rom", &netsc520_map);
+ mymtd = do_map_probe("map_rom", &netsc520_map);
if (!mymtd) {
iounmap((void *)netsc520_map.map_priv_1);
module_init(init_netsc520);
module_exit(cleanup_netsc520);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@amd.com>");
+MODULE_DESCRIPTION("MTD map driver for AMD NetSc520 Demonstration Board");
/*
- * $Id: nora.c,v 1.19 2001/04/26 15:40:23 dwmw2 Exp $
+ * $Id: nora.c,v 1.21 2001/10/02 15:05:14 dwmw2 Exp $
*
* This is so simple I love it.
*/
}
};
-
-#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
-#define init_nora init_module
-#define cleanup_nora cleanup_module
-#endif
-
int __init init_nora(void)
{
printk(KERN_NOTICE "nora flash device: %x at %x\n", WINDOW_SIZE, WINDOW_ADDR);
- mymtd = do_map_probe("cfi", &nora_map);
+ mymtd = do_map_probe("cfi_probe", &nora_map);
if (mymtd) {
mymtd->module = THIS_MODULE;
module_init(init_nora);
module_exit(cleanup_nora);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Red Hat, Inc. - David Woodhouse <dwmw2@cambridge.redhat.com>");
+MODULE_DESCRIPTION("MTD map driver for Nora board");
/*
- * $Id: ocelot.c,v 1.4 2001/06/08 15:36:27 dwmw2 Exp $
+ * $Id: ocelot.c,v 1.6 2001/10/02 15:05:14 dwmw2 Exp $
*
* Flash on Momenco Ocelot
*/
// ocelot_nvram_map.map_priv_2 = ocelot_nvram_map.map_priv_1;
/* And do the RAM probe on it to get an MTD device */
- nvram_mtd = do_map_probe("ram", &ocelot_nvram_map);
+ nvram_mtd = do_map_probe("map_ram", &ocelot_nvram_map);
if (!nvram_mtd) {
printk("NVRAM probe failed\n");
goto fail_1;
}
/* If that failed or the jumper's absent, pretend it's ROM */
if (!flash_mtd) {
- flash_mtd = do_map_probe("rom", &ocelot_flash_map);
+ flash_mtd = do_map_probe("map_rom", &ocelot_flash_map);
/* If we're treating it as ROM, set the erase size */
if (flash_mtd)
flash_mtd->erasesize = 0x10000;
module_init(init_ocelot_maps);
module_exit(cleanup_ocelot_maps);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Red Hat, Inc. - David Woodhouse <dwmw2@cambridge.redhat.com>");
+MODULE_DESCRIPTION("MTD map driver for Momenco Ocelot board");
-// $Id: octagon-5066.c,v 1.17 2001/06/02 14:30:44 dwmw2 Exp $
+// $Id: octagon-5066.c,v 1.19 2001/10/02 15:05:14 dwmw2 Exp $
/* ######################################################################
Octagon 5066 MTD Driver.
return 0;
}
-#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
-#define init_oct5066 init_module
-#define cleanup_oct5066 cleanup_module
-#endif
-
void cleanup_oct5066(void)
{
int i;
WINDOW_START+WINDOW_LENGTH);
for (i=0; i<2; i++) {
- oct5066_mtd[i] = do_map_probe("cfi", &oct5066_map[i]);
+ oct5066_mtd[i] = do_map_probe("cfi_probe", &oct5066_map[i]);
if (!oct5066_mtd[i])
oct5066_mtd[i] = do_map_probe("jedec", &oct5066_map[i]);
if (!oct5066_mtd[i])
- oct5066_mtd[i] = do_map_probe("ram", &oct5066_map[i]);
+ oct5066_mtd[i] = do_map_probe("map_ram", &oct5066_map[i]);
if (!oct5066_mtd[i])
- oct5066_mtd[i] = do_map_probe("rom", &oct5066_map[i]);
+ oct5066_mtd[i] = do_map_probe("map_rom", &oct5066_map[i]);
if (oct5066_mtd[i]) {
oct5066_mtd[i]->module = THIS_MODULE;
add_mtd_device(oct5066_mtd[i]);
module_init(init_oct5066);
module_exit(cleanup_oct5066);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jason Gunthorpe <jgg@deltatee.com>, David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("MTD map driver for Octagon 5066 Single Board Computer");
/*
- * $Id: physmap.c,v 1.13 2001/06/10 00:14:55 dwmw2 Exp $
+ * $Id: physmap.c,v 1.15 2001/10/02 15:05:14 dwmw2 Exp $
*
* Normal mappings of chips in physical memory
*/
copy_to: physmap_copy_to
};
-#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
-#define init_physmap init_module
-#define cleanup_physmap cleanup_module
-#endif
-
int __init init_physmap(void)
{
printk(KERN_NOTICE "physmap flash device: %x at %x\n", WINDOW_SIZE, WINDOW_ADDR);
printk("Failed to ioremap\n");
return -EIO;
}
- mymtd = do_map_probe("cfi", &physmap_map);
+ mymtd = do_map_probe("cfi_probe", &physmap_map);
if (mymtd) {
mymtd->module = THIS_MODULE;
module_init(init_physmap);
module_exit(cleanup_physmap);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("Generic configurable MTD map driver");
*
* This code is GPL
*
- * $Id: pnc2000.c,v 1.8 2001/06/10 00:09:45 dwmw2 Exp $
+ * $Id: pnc2000.c,v 1.10 2001/10/02 15:05:14 dwmw2 Exp $
*/
#include <linux/module.h>
*/
static struct mtd_info *mymtd;
-#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
-#define init_pnc2000 init_module
-#define cleanup_pnc2000 cleanup_module
-#endif
-
int __init init_pnc2000(void)
{
printk(KERN_NOTICE "Photron PNC-2000 flash mapping: %x at %x\n", WINDOW_SIZE, WINDOW_ADDR);
- mymtd = do_map_probe("cfi", &pnc_map);
+ mymtd = do_map_probe("cfi_probe", &pnc_map);
if (mymtd) {
mymtd->module = THIS_MODULE;
return add_mtd_partitions(mymtd, pnc_partitions, 3);
module_init(init_pnc2000);
module_exit(cleanup_pnc2000);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp>");
+MODULE_DESCRIPTION("MTD map driver for Photron PNC-2000 board");
/*
- * $Id: rpxlite.c,v 1.12 2001/04/26 15:40:23 dwmw2 Exp $
+ * $Id: rpxlite.c,v 1.15 2001/10/02 15:05:14 dwmw2 Exp $
*
* Handle mapping of the flash on the RPX Lite and CLLF boards
*/
__u8 rpxlite_read8(struct map_info *map, unsigned long ofs)
{
- return readb(map->map_priv_1 + ofs);
+ return __raw_readb(map->map_priv_1 + ofs);
}
__u16 rpxlite_read16(struct map_info *map, unsigned long ofs)
{
- return readw(map->map_priv_1 + ofs);
+ return __raw_readw(map->map_priv_1 + ofs);
}
__u32 rpxlite_read32(struct map_info *map, unsigned long ofs)
{
- return readl(map->map_priv_1 + ofs);
+ return __raw_readl(map->map_priv_1 + ofs);
}
void rpxlite_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
void rpxlite_write8(struct map_info *map, __u8 d, unsigned long adr)
{
- writeb(d, map->map_priv_1 + adr);
+ __raw_writeb(d, map->map_priv_1 + adr);
+ mb();
}
void rpxlite_write16(struct map_info *map, __u16 d, unsigned long adr)
{
- writew(d, map->map_priv_1 + adr);
+ __raw_writew(d, map->map_priv_1 + adr);
+ mb();
}
void rpxlite_write32(struct map_info *map, __u32 d, unsigned long adr)
{
- writel(d, map->map_priv_1 + adr);
+ __raw_writel(d, map->map_priv_1 + adr);
+ mb();
}
void rpxlite_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
copy_to: rpxlite_copy_to
};
-#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
-#define init_rpxlite init_module
-#define cleanup_rpxlite cleanup_module
-#endif
-
int __init init_rpxlite(void)
{
printk(KERN_NOTICE "RPX Lite or CLLF flash device: %x at %x\n", WINDOW_SIZE*4, WINDOW_ADDR);
printk("Failed to ioremap\n");
return -EIO;
}
- mymtd = do_map_probe("cfi", &rpxlite_map);
+ mymtd = do_map_probe("cfi_probe", &rpxlite_map);
if (mymtd) {
mymtd->module = THIS_MODULE;
add_mtd_device(mymtd);
module_init(init_rpxlite);
module_exit(cleanup_rpxlite);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Arnold Christensen <AKC@pel.dk>");
+MODULE_DESCRIPTION("MTD map driver for RPX Lite and CLLF boards");
*
* (C) 2000 Nicolas Pitre <nico@cam.org>
*
- * $Id: sa1100-flash.c,v 1.15 2001/06/02 18:29:22 nico Exp $
+ * $Id: sa1100-flash.c,v 1.22 2001/10/02 10:04:52 rmk Exp $
*/
#include <linux/config.h>
#include <linux/mtd/partitions.h>
#include <asm/hardware.h>
+#include <asm/io.h>
#ifndef CONFIG_ARCH_SA1100
static __u8 sa1100_read8(struct map_info *map, unsigned long ofs)
{
- return *(__u8 *)(WINDOW_ADDR + ofs);
+ return readb(map->map_priv_1 + ofs);
}
static __u16 sa1100_read16(struct map_info *map, unsigned long ofs)
{
- return *(__u16 *)(WINDOW_ADDR + ofs);
+ return readw(map->map_priv_1 + ofs);
}
static __u32 sa1100_read32(struct map_info *map, unsigned long ofs)
{
- return *(__u32 *)(WINDOW_ADDR + ofs);
+ return readl(map->map_priv_1 + ofs);
}
static void sa1100_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
{
- memcpy(to, (void *)(WINDOW_ADDR + from), len);
+ memcpy(to, (void *)(map->map_priv_1 + from), len);
}
static void sa1100_write8(struct map_info *map, __u8 d, unsigned long adr)
{
- *(__u8 *)(WINDOW_ADDR + adr) = d;
+ writeb(d, map->map_priv_1 + adr);
}
static void sa1100_write16(struct map_info *map, __u16 d, unsigned long adr)
{
- *(__u16 *)(WINDOW_ADDR + adr) = d;
+ writew(d, map->map_priv_1 + adr);
}
static void sa1100_write32(struct map_info *map, __u32 d, unsigned long adr)
{
- *(__u32 *)(WINDOW_ADDR + adr) = d;
+ writel(d, map->map_priv_1 + adr);
}
static void sa1100_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
{
- memcpy((void *)(WINDOW_ADDR + to), from, len);
+ memcpy((void *)(map->map_priv_1 + to), from, len);
}
-#ifdef CONFIG_SA1100_BITSY
+#ifdef CONFIG_SA1100_H3600
-static void bitsy_set_vpp(struct map_info *map, int vpp)
+static void h3600_set_vpp(struct map_info *map, int vpp)
{
if (vpp)
- set_bitsy_egpio(EGPIO_BITSY_VPP_ON);
+ set_h3600_egpio(EGPIO_H3600_VPP_ON);
else
- clr_bitsy_egpio(EGPIO_BITSY_VPP_ON);
+ clr_h3600_egpio(EGPIO_H3600_VPP_ON);
}
#endif
write8: sa1100_write8,
write16: sa1100_write16,
write32: sa1100_write32,
- copy_to: sa1100_copy_to
+ copy_to: sa1100_copy_to,
+
+ map_priv_1: WINDOW_ADDR,
};
#endif /* CONFIG_SA1100_HUW_WEBPANEL */
-#ifdef CONFIG_SA1100_BITSY
+#ifdef CONFIG_SA1100_H3600
-static unsigned long bitsy_max_flash_size = 0x02000000;
-static struct mtd_partition bitsy_partitions[] = {
+static unsigned long h3600_max_flash_size = 0x02000000;
+static struct mtd_partition h3600_partitions[] = {
{
- name: "BITSY boot firmware",
+ name: "H3600 boot firmware",
size: 0x00040000,
offset: 0,
mask_flags: MTD_WRITEABLE /* force read-only */
},{
- name: "BITSY kernel",
+ name: "H3600 kernel",
size: 0x00080000,
offset: 0x40000
},{
- name: "BITSY params",
+ name: "H3600 params",
size: 0x00040000,
offset: 0xC0000
},{
#ifdef CONFIG_JFFS2_FS
- name: "BITSY root jffs2",
+ name: "H3600 root jffs2",
offset: 0x00100000,
size: MTDPART_SIZ_FULL
#else
- name: "BITSY initrd",
+ name: "H3600 initrd",
size: 0x00100000,
offset: 0x00100000
},{
- name: "BITSY root cramfs",
+ name: "H3600 root cramfs",
size: 0x00300000,
offset: 0x00200000
},{
- name: "BITSY usr cramfs",
+ name: "H3600 usr cramfs",
size: 0x00800000,
offset: 0x00500000
},{
- name: "BITSY usr local",
+ name: "H3600 usr local",
offset: 0x00d00000,
size: MTDPART_SIZ_FULL
#endif
static unsigned long graphicsclient_max_flash_size = 0x01000000;
static struct mtd_partition graphicsclient_partitions[] = {
{
- name: "Bootloader + zImage",
+ name: "zImage",
offset: 0,
size: 0x100000
},
#endif
-#ifdef CONFIG_SA1100_LART
+#ifdef CONFIG_SA1100_GRAPHICSMASTER
-static unsigned long lart_max_flash_size = 0x00400000;
-static struct mtd_partition lart_partitions[] = {
- { offset: 0, size: 0x020000 },
- { offset: MTDPART_OFS_APPEND, size: 0x0e0000 },
- { offset: MTDPART_OFS_APPEND, size: MTDPART_SIZ_FULL }
+static unsigned long graphicsmaster_max_flash_size = 0x01000000;
+static struct mtd_partition graphicsmaster_partitions[] = {
+ {
+ name: "zImage",
+ offset: 0,
+ size: 0x100000
+ },
+ {
+ name: "ramdisk.gz",
+ offset: MTDPART_OFS_APPEND,
+ size: 0x300000
+ },
+ {
+ name: "User FS",
+ offset: MTDPART_OFS_APPEND,
+ size: MTDPART_SIZ_FULL
+ }
};
#endif
{
name: "initrd",
offset: 0x00180000,
- size: 0x00200000,
+ size: 0x00280000,
},
{
name: "initrd-test",
#endif
+#ifdef CONFIG_SA1100_STORK
+
+static unsigned long stork_max_flash_size = 0x02000000;
+static struct mtd_partition stork_partitions[] = {
+ {
+ name: "STORK boot firmware",
+ size: 0x00040000,
+ offset: 0,
+ mask_flags: MTD_WRITEABLE /* force read-only */
+ },{
+ name: "STORK params",
+ size: 0x00040000,
+ offset: 0x40000
+ },{
+ name: "STORK kernel",
+ size: 0x00100000,
+ offset: 0x80000
+ },{
+#ifdef CONFIG_JFFS2_FS
+ name: "STORK root jffs2",
+ offset: 0x00180000,
+ size: MTDPART_SIZ_FULL
+#else
+ name: "STORK initrd",
+ size: 0x00100000,
+ offset: 0x00180000
+ },{
+ name: "STORK root cramfs",
+ size: 0x00300000,
+ offset: 0x00280000
+ },{
+ name: "STORK usr cramfs",
+ size: 0x00800000,
+ offset: 0x00580000
+ },{
+ name: "STORK usr local",
+ offset: 0x00d80000,
+ size: MTDPART_SIZ_FULL
+#endif
+ }
+};
+
+#endif
+
#define NB_OF(x) (sizeof(x)/sizeof(x[0]))
int parsed_nr_parts = 0;
char *part_type;
+ /* Default flash buswidth */
sa1100_map.buswidth = (MSC0 & MSC_RBW) ? 2 : 4;
- printk(KERN_NOTICE "SA1100 flash: probing %d-bit flash bus\n", sa1100_map.buswidth*8);
- mymtd = do_map_probe("cfi", &sa1100_map);
- if (!mymtd)
- return -ENXIO;
- mymtd->module = THIS_MODULE;
/*
* Static partition definition selection
}
#endif
-#ifdef CONFIG_SA1100_BITSY
- if (machine_is_bitsy()) {
- parts = bitsy_partitions;
- nb_parts = NB_OF(bitsy_partitions);
- sa1100_map.size = bitsy_max_flash_size;
- sa1100_map.set_vpp = bitsy_set_vpp;
+#ifdef CONFIG_SA1100_H3600
+ if (machine_is_h3600()) {
+ parts = h3600_partitions;
+ nb_parts = NB_OF(h3600_partitions);
+ sa1100_map.size = h3600_max_flash_size;
+ sa1100_map.set_vpp = h3600_set_vpp;
}
#endif
#ifdef CONFIG_SA1100_FREEBIRD
parts = graphicsclient_partitions;
nb_parts = NB_OF(graphicsclient_partitions);
sa1100_map.size = graphicsclient_max_flash_size;
+ sa1100_map.buswidth = (MSC1 & MSC_RBW) ? 2:4;
}
#endif
-#ifdef CONFIG_SA1100_LART
- if (machine_is_lart()) {
- parts = lart_partitions;
- nb_parts = NB_OF(lart_partitions);
- sa1100_map.size = lart_max_flash_size;
+#ifdef CONFIG_SA1100_GRAPHICSMASTER
+ if (machine_is_graphicsmaster()) {
+ parts = graphicsmaster_partitions;
+ nb_parts = NB_OF(graphicsmaster_partitions);
+ sa1100_map.size = graphicsmaster_max_flash_size;
+ sa1100_map.buswidth = (MSC1 & MSC_RBW) ? 2:4;
}
#endif
#ifdef CONFIG_SA1100_PANGOLIN
sa1100_map.size = flexanet_max_flash_size;
}
#endif
-
-
- if (!nb_parts) {
- printk(KERN_WARNING "MTD: no known flash definition for this SA1100 machine\n");
- return -ENXIO;
+#ifdef CONFIG_SA1100_STORK
+ if (machine_is_stork()) {
+ parts = stork_partitions;
+ nb_parts = NB_OF(stork_partitions);
+ sa1100_map.size = stork_max_flash_size;
}
+#endif
+ /*
+ * Now let's probe for the actual flash. Do it here since
+ * specific machine settings might have been set above.
+ */
+ printk(KERN_NOTICE "SA1100 flash: probing %d-bit flash bus\n", sa1100_map.buswidth*8);
+ mymtd = do_map_probe("cfi_probe", &sa1100_map);
+ if (!mymtd)
+ return -ENXIO;
+ mymtd->module = THIS_MODULE;
/*
* Dynamic partition selection stuff (might override the static ones)
*/
-#ifdef CONFIG_MTD_SA1100_REDBOOT_PARTITIONS
+#ifdef CONFIG_MTD_REDBOOT_PARTS
if (parsed_nr_parts == 0) {
int ret = parse_redboot_partitions(mymtd, &parsed_parts);
}
}
#endif
-#ifdef CONFIG_MTD_SA1100_BOOTLDR_PARTITIONS
+#ifdef CONFIG_MTD_BOOTLDR_PARTS
if (parsed_nr_parts == 0) {
int ret = parse_bootldr_partitions(mymtd, &parsed_parts);
if (ret > 0) {
module_init(sa1100_mtd_init);
module_exit(sa1100_mtd_cleanup);
+
+MODULE_AUTHOR("Nicolas Pitre");
+MODULE_DESCRIPTION("SA1100 CFI map driver");
+MODULE_LICENSE("GPL");
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
- $Id: sbc_gxx.c,v 1.17 2001/06/02 14:52:23 dwmw2 Exp $
+ $Id: sbc_gxx.c,v 1.19 2001/10/02 15:05:14 dwmw2 Exp $
The SBC-MediaGX / SBC-GXx has up to 16 MiB of
Intel StrataFlash (28F320/28F640) in x8 mode.
/* MTD device for all of the flash. */
static struct mtd_info *all_mtd;
-#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
-#define init_sbc_gxx init_module
-#define cleanup_sbc_gxx cleanup_module
-#endif
-
-mod_exit_t cleanup_sbc_gxx(void)
+static void __exit cleanup_sbc_gxx(void)
{
if( all_mtd ) {
del_mtd_partitions( all_mtd );
release_region(PAGE_IO,PAGE_IO_SIZE);
}
-mod_init_t init_sbc_gxx(void)
+int __init init_sbc_gxx(void)
{
if (check_region(PAGE_IO,PAGE_IO_SIZE) != 0) {
printk( KERN_ERR"%s: IO ports 0x%x-0x%x in use\n",
WINDOW_START, WINDOW_START+WINDOW_LENGTH-1 );
/* Probe for chip. */
- all_mtd = do_map_probe( "cfi", &sbc_gxx_map );
+ all_mtd = do_map_probe( "cfi_probe", &sbc_gxx_map );
if( !all_mtd ) {
cleanup_sbc_gxx();
return -ENXIO;
module_init(init_sbc_gxx);
module_exit(cleanup_sbc_gxx);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Arcom Control Systems Ltd.");
+MODULE_DESCRIPTION("MTD map driver for SBC-GXm and SBC-GX1 series boards");
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
- * $Id: sc520cdp.c,v 1.7 2001/06/02 14:52:23 dwmw2 Exp $
+ * $Id: sc520cdp.c,v 1.9 2001/10/02 15:05:14 dwmw2 Exp $
*
*
* The SC520CDP is an evaluation board for the Elan SC520 processor available
static struct mtd_info *mymtd[NUM_FLASH_BANKS];
-#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
-#define init_sc520cdp init_module
-#define cleanup_sc520cdp cleanup_module
-#endif
-
#ifdef REPROGRAM_PAR
/*
printk("Failed to ioremap_nocache\n");
return -EIO;
}
- mymtd[i] = do_map_probe("cfi", &sc520cdp_map[i]);
+ mymtd[i] = do_map_probe("cfi_probe", &sc520cdp_map[i]);
if(!mymtd[i])
mymtd[i] = do_map_probe("jedec", &sc520cdp_map[i]);
if(!mymtd[i])
- mymtd[i] = do_map_probe("rom", &sc520cdp_map[i]);
+ mymtd[i] = do_map_probe("map_rom", &sc520cdp_map[i]);
if (mymtd[i]) {
mymtd[i]->module = THIS_MODULE;
module_init(init_sc520cdp);
module_exit(cleanup_sc520cdp);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sysgo Real-Time Solutions GmbH");
+MODULE_DESCRIPTION("MTD map driver for AMD SC520 Customer Development Platform");
--- /dev/null
+/*
+ * $Id: solutionengine.c,v 1.3 2001/10/02 15:05:14 dwmw2 Exp $
+ *
+ * Flash and EPROM on Hitachi Solution Engine and similar boards.
+ *
+ * (C) 2001 Red Hat, Inc.
+ *
+ * GPL'd
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <asm/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+
+extern int parse_redboot_partitions(struct mtd_info *master, struct mtd_partition **pparts);
+
+__u32 soleng_read32(struct map_info *map, unsigned long ofs)
+{
+ return __raw_readl(map->map_priv_1 + ofs);
+}
+
+void soleng_write32(struct map_info *map, __u32 d, unsigned long adr)
+{
+ __raw_writel(d, map->map_priv_1 + adr);
+ mb();
+}
+
+void soleng_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
+{
+ memcpy_fromio(to, map->map_priv_1 + from, len);
+}
+
+
+static struct mtd_info *flash_mtd;
+static struct mtd_info *eprom_mtd;
+
+static struct mtd_partition *parsed_parts;
+
+struct map_info soleng_eprom_map = {
+ name: "Solution Engine EPROM",
+ size: 0x400000,
+ buswidth: 4,
+ copy_from: soleng_copy_from,
+};
+
+struct map_info soleng_flash_map = {
+ name: "Solution Engine FLASH",
+ size: 0x400000,
+ buswidth: 4,
+ read32: soleng_read32,
+ copy_from: soleng_copy_from,
+ write32: soleng_write32,
+};
+
+static int __init init_soleng_maps(void)
+{
+ int nr_parts;
+
+ /* First probe at offset 0 */
+ soleng_flash_map.map_priv_1 = P2SEGADDR(0);
+ soleng_eprom_map.map_priv_1 = P1SEGADDR(0x400000);
+
+ printk(KERN_NOTICE "Probing for flash chips at 0x000000:\n");
+ flash_mtd = do_map_probe("cfi_probe", &soleng_flash_map);
+ if (!flash_mtd) {
+ /* Not there. Try swapping */
+ printk(KERN_NOTICE "Probing for flash chips at 0x400000:\n");
+ soleng_flash_map.map_priv_1 = P2SEGADDR(0x400000);
+ soleng_eprom_map.map_priv_1 = P1SEGADDR(0);
+ flash_mtd = do_map_probe("cfi_probe", &soleng_flash_map);
+ if (!flash_mtd) {
+ /* Eep. */
+ printk(KERN_NOTICE "Flash chips not detected at either possible location.\n");
+ return -ENXIO;
+ }
+ }
+ printk(KERN_NOTICE "Solution Engine: Flash at 0x%08lx, EPROM at 0x%08lx\n",
+ soleng_flash_map.map_priv_1 & 0x1fffffff,
+ soleng_eprom_map.map_priv_1 & 0x1fffffff);
+ flash_mtd->module = THIS_MODULE;
+
+ eprom_mtd = do_map_probe("map_rom", &soleng_eprom_map);
+ if (eprom_mtd) {
+ eprom_mtd->module = THIS_MODULE;
+ add_mtd_device(eprom_mtd);
+ }
+
+ nr_parts = parse_redboot_partitions(flash_mtd, &parsed_parts);
+
+ if (nr_parts)
+ add_mtd_partitions(flash_mtd, parsed_parts, nr_parts);
+ else
+ add_mtd_device(flash_mtd);
+
+ return 0;
+}
+
+static void __exit cleanup_soleng_maps(void)
+{
+ if (eprom_mtd) {
+ del_mtd_device(eprom_mtd);
+ map_destroy(eprom_mtd);
+ }
+
+ if (parsed_parts)
+ del_mtd_partitions(flash_mtd);
+ else
+ del_mtd_device(flash_mtd);
+ map_destroy(flash_mtd);
+}
+
+module_init(init_soleng_maps);
+module_exit(cleanup_soleng_maps);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("MTD map driver for Hitachi SolutionEngine (and similar) boards");
+
-/* $Id: sun_uflash.c,v 1.2 2001/04/26 15:40:23 dwmw2 Exp $
+/* $Id: sun_uflash.c,v 1.4 2001/10/02 15:05:14 dwmw2 Exp $
*
* sun_uflash - Driver implementation for user-programmable flash
* present on many Sun Microsystems SME boardsets.
("User-programmable flash device on Sun Microsystems boardsets");
MODULE_SUPPORTED_DEVICE
("userflash");
+MODULE_LICENSE
+ ("GPL");
static LIST_HEAD(device_list);
struct uflash_dev {
}
/* MTD registration */
- pdev->mtd = do_map_probe("cfi", &pdev->map);
+ pdev->mtd = do_map_probe("cfi_probe", &pdev->map);
if(0 == pdev->mtd) {
iounmap((void *)pdev->map.map_priv_1);
kfree(pdev->name);
--- /dev/null
+/*
+ * Handle mapping of the flash memory access routines
+ * on TQM8xxL based devices.
+ *
+ * $Id: tqm8xxl.c,v 1.3 2001/10/02 15:05:14 dwmw2 Exp $
+ *
+ * based on rpxlite.c
+ *
+ * Copyright(C) 2001 Kirk Lee <kirk@hpc.ee.ntu.edu.tw>
+ *
+ * This code is GPLed
+ *
+ */
+
+/*
+ * According to TQM8xxL hardware manual, TQM8xxL series have
+ * following flash memory organisations:
+ * | capacity | | chip type | | bank0 | | bank1 |
+ * 2MiB 512Kx16 2MiB 0
+ * 4MiB 1Mx16 4MiB 0
+ * 8MiB 1Mx16 4MiB 4MiB
+ * Thus, we choose CONFIG_MTD_CFI_I2 & CONFIG_MTD_CFI_B4 at
+ * kernel configuration.
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <asm/io.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+#define FLASH_ADDR 0x40000000
+#define FLASH_SIZE 0x00800000
+#define FLASH_BANK_MAX 4
+
+// trivial struct to describe partition information
+struct mtd_part_def
+{
+ int nums;
+ unsigned char *type;
+ struct mtd_partition* mtd_part;
+};
+
+//static struct mtd_info *mymtd;
+static struct mtd_info* mtd_banks[FLASH_BANK_MAX];
+static struct map_info* map_banks[FLASH_BANK_MAX];
+static struct mtd_part_def part_banks[FLASH_BANK_MAX];
+static unsigned long num_banks;
+static unsigned long start_scan_addr;
+
+__u8 tqm8xxl_read8(struct map_info *map, unsigned long ofs)
+{
+ return *((__u8 *)(map->map_priv_1 + ofs));
+}
+
+__u16 tqm8xxl_read16(struct map_info *map, unsigned long ofs)
+{
+ return *((__u16 *)(map->map_priv_1 + ofs));
+}
+
+__u32 tqm8xxl_read32(struct map_info *map, unsigned long ofs)
+{
+ return *((__u32 *)(map->map_priv_1 + ofs));
+}
+
+void tqm8xxl_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
+{
+ memcpy_fromio(to, (void *)(map->map_priv_1 + from), len);
+}
+
+void tqm8xxl_write8(struct map_info *map, __u8 d, unsigned long adr)
+{
+ *((__u8 *)(map->map_priv_1 + adr)) = d;
+}
+
+void tqm8xxl_write16(struct map_info *map, __u16 d, unsigned long adr)
+{
+ *((__u16 *)( map->map_priv_1 + adr)) = d;
+}
+
+void tqm8xxl_write32(struct map_info *map, __u32 d, unsigned long adr)
+{
+ *((__u32 *)(map->map_priv_1 + adr)) = d;
+}
+
+void tqm8xxl_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+{
+ memcpy_toio((void *)(map->map_priv_1 + to), from, len);
+}
+
+struct map_info tqm8xxl_map = {
+ name: "TQM8xxL",
+ //size: WINDOW_SIZE,
+ buswidth: 4,
+ read8: tqm8xxl_read8,
+ read16: tqm8xxl_read16,
+ read32: tqm8xxl_read32,
+ copy_from: tqm8xxl_copy_from,
+ write8: tqm8xxl_write8,
+ write16: tqm8xxl_write16,
+ write32: tqm8xxl_write32,
+ copy_to: tqm8xxl_copy_to
+};
+
+/*
+ * Here are partition information for all known TQM8xxL series devices.
+ * See include/linux/mtd/partitions.h for definition of the mtd_partition
+ * structure.
+ *
+ * The *_max_flash_size is the maximum possible mapped flash size which
+ * is not necessarily the actual flash size. It must correspond to the
+ * value specified in the mapping definition defined by the
+ * "struct map_desc *_io_desc" for the corresponding machine.
+ */
+
+#ifdef CONFIG_MTD_PARTITIONS
+/* Currently, TQM8xxL has upto 8MiB flash */
+static unsigned long tqm8xxl_max_flash_size = 0x00800000;
+
+/* partition definition for first flash bank
+ * also ref. to "drivers\char\flash_config.c"
+ */
+static struct mtd_partition tqm8xxl_partitions[] = {
+ {
+ name: "ppcboot",
+ offset: 0x00000000,
+ size: 0x00020000, /* 128KB */
+ mask_flags: MTD_WRITEABLE, /* force read-only */
+ },
+ {
+ name: "kernel", /* default kernel image */
+ offset: 0x00020000,
+ size: 0x000e0000,
+ mask_flags: MTD_WRITEABLE, /* force read-only */
+ },
+ {
+ name: "user",
+ offset: 0x00100000,
+ size: 0x00100000,
+ },
+ {
+ name: "initrd",
+ offset: 0x00200000,
+ size: 0x00200000,
+ }
+};
+/* partition definition for second flahs bank */
+static struct mtd_partition tqm8xxl_fs_partitions[] = {
+ {
+ name: "cramfs",
+ offset: 0x00000000,
+ size: 0x00200000,
+ },
+ {
+ name: "jffs",
+ offset: 0x00200000,
+ size: 0x00200000,
+ //size: MTDPART_SIZ_FULL,
+ }
+};
+#endif
+
+#define NB_OF(x) (sizeof(x)/sizeof(x[0]))
+
+int __init init_tqm_mtd(void)
+{
+ int idx = 0, ret = 0;
+ unsigned long flash_addr, flash_size, mtd_size = 0;
+ /* pointer to TQM8xxL board info data */
+ bd_t *bd = (bd_t *)__res;
+
+ flash_addr = bd->bi_flashstart;
+ flash_size = bd->bi_flashsize;
+ //request maximum flash size address spzce
+ start_scan_addr = (unsigned long)ioremap(flash_addr, flash_size);
+ if (!start_scan_addr) {
+ //printk("%s:Failed to ioremap address:0x%x\n", __FUNCTION__, FLASH_ADDR);
+ printk("%s:Failed to ioremap address:0x%x\n", __FUNCTION__, flash_addr);
+ return -EIO;
+ }
+ for(idx = 0 ; idx < FLASH_BANK_MAX ; idx++)
+ {
+ if(mtd_size >= flash_size)
+ break;
+
+ printk("%s: chip probing count %d\n", __FUNCTION__, idx);
+
+ map_banks[idx] = (struct map_info *)kmalloc(sizeof(struct map_info), GFP_KERNEL);
+ if(map_banks[idx] == NULL)
+ {
+ //return -ENOMEM;
+ ret = -ENOMEM;
+ goto error_mem;
+ }
+ memset((void *)map_banks[idx], 0, sizeof(struct map_info));
+ map_banks[idx]->name = (char *)kmalloc(16, GFP_KERNEL);
+ if(map_banks[idx]->name == NULL)
+ {
+ //return -ENOMEM;
+ ret = -ENOMEM;
+ goto error_mem;
+ }
+ memset((void *)map_banks[idx]->name, 0, 16);
+
+ sprintf(map_banks[idx]->name, "TQM8xxL%d", idx);
+ map_banks[idx]->buswidth = 4;
+ map_banks[idx]->read8 = tqm8xxl_read8;
+ map_banks[idx]->read16 = tqm8xxl_read16;
+ map_banks[idx]->read32 = tqm8xxl_read32;
+ map_banks[idx]->copy_from = tqm8xxl_copy_from;
+ map_banks[idx]->write8 = tqm8xxl_write8;
+ map_banks[idx]->write16 = tqm8xxl_write16;
+ map_banks[idx]->write32 = tqm8xxl_write32;
+ map_banks[idx]->copy_to = tqm8xxl_copy_to;
+ map_banks[idx]->map_priv_1 =
+ start_scan_addr + ((idx > 0) ?
+ (mtd_banks[idx-1] ? mtd_banks[idx-1]->size : 0) : 0);
+ //start to probe flash chips
+ mtd_banks[idx] = do_map_probe("cfi_probe", map_banks[idx]);
+ if(mtd_banks[idx])
+ {
+ mtd_banks[idx]->module = THIS_MODULE;
+ mtd_size += mtd_banks[idx]->size;
+ num_banks++;
+ printk("%s: bank%d, name:%s, size:%dbytes \n", __FUNCTION__, num_banks,
+ mtd_banks[idx]->name, mtd_banks[idx]->size);
+ }
+ }
+
+ /* no supported flash chips found */
+ if(!num_banks)
+ {
+ printk("TQM8xxL: No support flash chips found!\n");
+ ret = -ENXIO;
+ goto error_mem;
+ }
+
+#ifdef CONFIG_MTD_PARTITIONS
+ /*
+ * Select Static partition definitions
+ */
+ part_banks[0].mtd_part = tqm8xxl_partitions;
+ part_banks[0].type = "Static image";
+ part_banks[0].nums = NB_OF(tqm8xxl_partitions);
+ part_banks[1].mtd_part = tqm8xxl_fs_partitions;
+ part_banks[1].type = "Static file system";
+ part_banks[1].nums = NB_OF(tqm8xxl_fs_partitions);
+ for(idx = 0; idx < num_banks ; idx++)
+ {
+ if (part_banks[idx].nums == 0) {
+ printk(KERN_NOTICE "TQM flash%d: no partition info available, registering whole flash at once\n", idx);
+ add_mtd_device(mtd_banks[idx]);
+ } else {
+ printk(KERN_NOTICE "TQM flash%d: Using %s partition definition\n",
+ idx, part_banks[idx].type);
+ add_mtd_partitions(mtd_banks[idx], part_banks[idx].mtd_part,
+ part_banks[idx].nums);
+ }
+ }
+#else
+ printk(KERN_NOTICE "TQM flash: registering %d whole flash banks at once\n", num_banks);
+ for(idx = 0 ; idx < num_banks ; idx++)
+ add_mtd_device(mtd_banks[idx]);
+#endif
+ return 0;
+error_mem:
+ for(idx = 0 ; idx < FLASH_BANK_MAX ; idx++)
+ {
+ if(map_banks[idx] != NULL)
+ {
+ if(map_banks[idx]->name != NULL)
+ {
+ kfree(map_banks[idx]->name);
+ map_banks[idx]->name = NULL;
+ }
+ kfree(map_banks[idx]);
+ map_banks[idx] = NULL;
+ }
+ }
+ //return -ENOMEM;
+error:
+ iounmap((void *)start_scan_addr);
+ //return -ENXIO;
+ return ret;
+}
+
+static void __exit cleanup_tqm_mtd(void)
+{
+ unsigned int idx = 0;
+ for(idx = 0 ; idx < num_banks ; idx++)
+ {
+ /* destroy mtd_info previously allocated */
+ if (mtd_banks[idx]) {
+ del_mtd_partitions(mtd_banks[idx]);
+ map_destroy(mtd_banks[idx]);
+ }
+ /* release map_info not used anymore */
+ kfree(map_banks[idx]->name);
+ kfree(map_banks[idx]);
+ }
+ if (start_scan_addr) {
+ iounmap((void *)start_scan_addr);
+ start_scan_addr = 0;
+ }
+}
+
+module_init(init_tqm_mtd);
+module_exit(cleanup_tqm_mtd);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kirk Lee <kirk@hpc.ee.ntu.edu.tw>");
+MODULE_DESCRIPTION("MTD map driver for TQM8xxL boards");
-// $Id: vmax301.c,v 1.22 2001/06/02 14:30:44 dwmw2 Exp $
+// $Id: vmax301.c,v 1.24 2001/10/02 15:05:14 dwmw2 Exp $
/* ######################################################################
Tempustech VMAX SBC301 MTD Driver.
static struct mtd_info *vmax_mtd[2] = {NULL, NULL};
-#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
-#define init_vmax301 init_module
-#define cleanup_vmax301 cleanup_module
-#endif
-
static void __exit cleanup_vmax301(void)
{
int i;
vmax_map[1].map_priv_1 = iomapadr + (3*WINDOW_START);
for (i=0; i<2; i++) {
- vmax_mtd[i] = do_map_probe("cfi", &vmax_map[i]);
+ vmax_mtd[i] = do_map_probe("cfi_probe", &vmax_map[i]);
if (!vmax_mtd[i])
vmax_mtd[i] = do_map_probe("jedec", &vmax_map[i]);
if (!vmax_mtd[i])
- vmax_mtd[i] = do_map_probe("ram", &vmax_map[i]);
+ vmax_mtd[i] = do_map_probe("map_ram", &vmax_map[i]);
if (!vmax_mtd[i])
- vmax_mtd[i] = do_map_probe("rom", &vmax_map[i]);
+ vmax_mtd[i] = do_map_probe("map_rom", &vmax_map[i]);
if (vmax_mtd[i]) {
vmax_mtd[i]->module = THIS_MODULE;
add_mtd_device(vmax_mtd[i]);
module_init(init_vmax301);
module_exit(cleanup_vmax301);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("MTD map driver for Tempustech VMAX SBC301 board");
/*
* Direct MTD block device access
*
- * $Id: mtdblock.c,v 1.38 2000/11/27 08:50:22 dwmw2 Exp $
+ * $Id: mtdblock.c,v 1.47 2001/10/02 15:05:11 dwmw2 Exp $
*
* 02-nov-2000 Nicolas Pitre Added read-modify-write with cache
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/compatmac.h>
#define MAJOR_NR MTD_BLOCK_MAJOR
#define DEVICE_NAME "mtdblock"
int len, const char *buf)
{
struct mtd_info *mtd = mtdblk->mtd;
- unsigned int sect_size = mtd->erasesize;
+ unsigned int sect_size = mtdblk->cache_size;
size_t retlen;
int ret;
DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: write on \"%s\" at 0x%lx, size 0x%x\n",
mtd->name, pos, len);
+ if (!sect_size)
+ return MTD_WRITE (mtd, pos, len, &retlen, buf);
+
while (len > 0) {
unsigned long sect_start = (pos/sect_size)*sect_size;
unsigned int offset = pos - sect_start;
int len, char *buf)
{
struct mtd_info *mtd = mtdblk->mtd;
- unsigned int sect_size = mtd->erasesize;
+ unsigned int sect_size = mtdblk->cache_size;
size_t retlen;
int ret;
DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n",
mtd->name, pos, len);
+ if (!sect_size)
+ return MTD_READ (mtd, pos, len, &retlen, buf);
+
while (len > 0) {
unsigned long sect_start = (pos/sect_size)*sect_size;
unsigned int offset = pos - sect_start;
static int mtdblock_open(struct inode *inode, struct file *file)
{
struct mtdblk_dev *mtdblk;
+ struct mtd_info *mtd;
int dev;
DEBUG(MTD_DEBUG_LEVEL1,"mtdblock_open\n");
dev = MINOR(inode->i_rdev);
if (dev >= MAX_MTD_DEVICES)
return -EINVAL;
+
+ mtd = get_mtd_device(NULL, dev);
+ if (!mtd)
+ return -ENODEV;
+ if (MTD_ABSENT == mtd->type) {
+ put_mtd_device(mtd);
+ return -ENODEV;
+ }
MOD_INC_USE_COUNT;
mtdblk = kmalloc(sizeof(struct mtdblk_dev), GFP_KERNEL);
if (!mtdblk) {
+ put_mtd_device(mtd);
MOD_DEC_USE_COUNT;
return -ENOMEM;
}
memset(mtdblk, 0, sizeof(*mtdblk));
mtdblk->count = 1;
- mtdblk->mtd = get_mtd_device(NULL, dev);
-
- if (!mtdblk->mtd) {
- kfree(mtdblk);
- MOD_DEC_USE_COUNT;
- return -ENODEV;
- }
+ mtdblk->mtd = mtd;
init_MUTEX (&mtdblk->cache_sem);
mtdblk->cache_state = STATE_EMPTY;
- mtdblk->cache_size = mtdblk->mtd->erasesize;
- mtdblk->cache_data = vmalloc(mtdblk->mtd->erasesize);
- if (!mtdblk->cache_data) {
- put_mtd_device(mtdblk->mtd);
- kfree(mtdblk);
- MOD_DEC_USE_COUNT;
- return -ENOMEM;
+ if ((mtdblk->mtd->flags & MTD_CAP_RAM) != MTD_CAP_RAM &&
+ mtdblk->mtd->erasesize) {
+ mtdblk->cache_size = mtdblk->mtd->erasesize;
+ mtdblk->cache_data = vmalloc(mtdblk->mtd->erasesize);
+ if (!mtdblk->cache_data) {
+ put_mtd_device(mtdblk->mtd);
+ kfree(mtdblk);
+ MOD_DEC_USE_COUNT;
+ return -ENOMEM;
+ }
}
/* OK, we've created a new one. Add it to the list. */
mtdblks[dev] = mtdblk;
mtd_sizes[dev] = mtdblk->mtd->size/1024;
- mtd_blksizes[dev] = mtdblk->mtd->erasesize;
+ if (mtdblk->mtd->erasesize)
+ mtd_blksizes[dev] = mtdblk->mtd->erasesize;
if (mtd_blksizes[dev] > PAGE_SIZE)
mtd_blksizes[dev] = PAGE_SIZE;
set_device_ro (inode->i_rdev, !(mtdblk->mtd->flags & MTD_WRITEABLE));
if (inode == NULL)
release_return(-ENODEV);
-
+
invalidate_device(inode->i_rdev, 1);
dev = MINOR(inode->i_rdev);
}
static volatile int leaving = 0;
-#if LINUX_VERSION_CODE > 0x020300
static DECLARE_MUTEX_LOCKED(thread_sem);
static DECLARE_WAIT_QUEUE_HEAD(thr_wq);
-#else
-static struct semaphore thread_sem = MUTEX_LOCKED;
-DECLARE_WAIT_QUEUE_HEAD(thr_wq);
-#endif
int mtdblock_thread(void *dummy)
{
{
char name[8];
- if (!mtd)
+ if (!mtd || mtd->type == MTD_ABSENT)
return;
sprintf(name, "%d", mtd->index);
static void mtd_notify_remove(struct mtd_info* mtd)
{
- if (!mtd)
+ if (!mtd || mtd->type == MTD_ABSENT)
return;
devfs_unregister(devfs_rw_handle[mtd->index]);
}
#endif
-#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
-#define init_mtdblock init_module
-#define cleanup_mtdblock cleanup_module
-#endif
-
int __init init_mtdblock(void)
{
int i;
blksize_size[MAJOR_NR] = mtd_blksizes;
blk_size[MAJOR_NR] = mtd_sizes;
-#if LINUX_VERSION_CODE < 0x20320
- blk_dev[MAJOR_NR].request_fn = mtdblock_request;
-#else
blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), &mtdblock_request);
-#endif
kernel_thread (mtdblock_thread, NULL, CLONE_FS|CLONE_FILES|CLONE_SIGHAND);
return 0;
}
#else
unregister_blkdev(MAJOR_NR,DEVICE_NAME);
#endif
-#if LINUX_VERSION_CODE < 0x20320
- blk_dev[MAJOR_NR].request_fn = NULL;
-#else
blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));
-#endif
blksize_size[MAJOR_NR] = NULL;
blk_size[MAJOR_NR] = NULL;
}
module_init(init_mtdblock);
module_exit(cleanup_mtdblock);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nicolas Pitre <nico@cam.org> et al.");
+MODULE_DESCRIPTION("Caching read/erase/writeback block device emulation access to MTD devices");
/*
- * $Id: mtdblock_ro.c,v 1.5 2001/06/10 01:41:53 dwmw2 Exp $
+ * $Id: mtdblock_ro.c,v 1.9 2001/10/02 15:05:11 dwmw2 Exp $
*
* Read-only version of the mtdblock device, without the
* read/erase/modify/writeback stuff
#include <linux/types.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/compatmac.h>
#define MAJOR_NR MTD_BLOCK_MAJOR
#define DEVICE_NAME "mtdblock"
dev = MINOR(inode->i_rdev);
- MOD_INC_USE_COUNT;
-
mtd = get_mtd_device(NULL, dev);
-
- if (!mtd) {
- MOD_DEC_USE_COUNT;
- return -ENODEV;
+ if (!mtd)
+ return -EINVAL;
+ if (MTD_ABSENT == mtd->type) {
+ put_mtd_device(mtd);
+ return -EINVAL;
}
+ MOD_INC_USE_COUNT;
+
mtd_sizes[dev] = mtd->size>>9;
DEBUG(1, "ok\n");
};
#endif
-#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
-#define init_mtdblock init_module
-#define cleanup_mtdblock cleanup_module
-#endif
-
int __init init_mtdblock(void)
{
int i;
blksize_size[MAJOR_NR] = NULL;
blk_size[MAJOR_NR] = mtd_sizes;
-#if LINUX_VERSION_CODE < 0x20320
- blk_dev[MAJOR_NR].request_fn = mtdblock_request;
-#else
blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), &mtdblock_request);
-#endif
return 0;
}
static void __exit cleanup_mtdblock(void)
{
unregister_blkdev(MAJOR_NR,DEVICE_NAME);
+ blksize_size[MAJOR_NR] = NULL;
+ blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));
}
module_init(init_mtdblock);
module_exit(cleanup_mtdblock);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Erwin Authried <eauth@softsys.co.at> et al.");
+MODULE_DESCRIPTION("Simple read-only block device emulation access to MTD devices");
/*
- * $Id: mtdchar.c,v 1.38.2.1 2001/06/09 17:31:16 dwmw2 Exp $
+ * $Id: mtdchar.c,v 1.44 2001/10/02 15:05:11 dwmw2 Exp $
*
* Character-device access to raw MTD devices.
+ * Pure 2.4 version - compatibility cruft removed to mtdchar-compat.c
*
*/
-
-#include <linux/mtd/compatmac.h>
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/module.h>
return -EACCES;
mtd = get_mtd_device(NULL, devnum);
-
+
if (!mtd)
return -ENODEV;
+ if (MTD_ABSENT == mtd->type) {
+ put_mtd_device(mtd);
+ return -ENODEV;
+ }
+
file->private_data = mtd;
/* You can't open it RW if it's not a writeable device */
/*====================================================================*/
-static release_t mtd_close(struct inode *inode,
- struct file *file)
+static int mtd_close(struct inode *inode, struct file *file)
{
struct mtd_info *mtd;
put_mtd_device(mtd);
- release_return(0);
+ return 0;
} /* mtd_close */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
-#define FILE_POS *ppos
-#else
-#define FILE_POS file->f_pos
-#endif
-
/* FIXME: This _really_ needs to die. In 2.5, we should lock the
userspace buffer down and use it directly with readv/writev.
*/
DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n");
- if (FILE_POS + count > mtd->size)
- count = mtd->size - FILE_POS;
+ if (*ppos + count > mtd->size)
+ count = mtd->size - *ppos;
if (!count)
return 0;
if (!kbuf)
return -ENOMEM;
- ret = MTD_READ(mtd, FILE_POS, len, &retlen, kbuf);
+ ret = MTD_READ(mtd, *ppos, len, &retlen, kbuf);
if (!ret) {
- FILE_POS += retlen;
+ *ppos += retlen;
if (copy_to_user(buf, kbuf, retlen)) {
kfree(kbuf);
return -EFAULT;
DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n");
- if (FILE_POS == mtd->size)
+ if (*ppos == mtd->size)
return -ENOSPC;
- if (FILE_POS + count > mtd->size)
- count = mtd->size - FILE_POS;
+ if (*ppos + count > mtd->size)
+ count = mtd->size - *ppos;
if (!count)
return 0;
return -EFAULT;
}
- ret = (*(mtd->write))(mtd, FILE_POS, len, &retlen, kbuf);
+ ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf);
if (!ret) {
- FILE_POS += retlen;
+ *ppos += retlen;
total_retlen += retlen;
count -= retlen;
buf += retlen;
wq_head is no longer there when the
callback routine tries to wake us up.
*/
- current->state = TASK_UNINTERRUPTIBLE;
- add_wait_queue(&waitq, &wait);
ret = mtd->erase(mtd, erase);
- if (!ret)
- schedule();
- remove_wait_queue(&waitq, &wait);
- current->state = TASK_RUNNING;
- if (!ret)
- ret = (erase->state == MTD_ERASE_FAILED);
+ if (!ret) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&waitq, &wait);
+ if (erase->state != MTD_ERASE_DONE &&
+ erase->state != MTD_ERASE_FAILED)
+ schedule();
+ remove_wait_queue(&waitq, &wait);
+ set_current_state(TASK_RUNNING);
+
+ ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0;
+ }
kfree(erase);
}
break;
}
#endif
-#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
-#define init_mtdchar init_module
-#define cleanup_mtdchar cleanup_module
-#endif
-
-mod_init_t init_mtdchar(void)
+static int __init init_mtdchar(void)
{
#ifdef CONFIG_DEVFS_FS
if (devfs_register_chrdev(MTD_CHAR_MAJOR, "mtd", &mtd_fops))
return 0;
}
-mod_exit_t cleanup_mtdchar(void)
+static void __exit cleanup_mtdchar(void)
{
#ifdef CONFIG_DEVFS_FS
unregister_mtd_user(¬ifier);
module_init(init_mtdchar);
module_exit(cleanup_mtdchar);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("Direct character-device access to MTD devices");
/*
- * $Id: mtdcore.c,v 1.30 2001/06/02 14:30:42 dwmw2 Exp $
+ * $Id: mtdcore.c,v 1.31 2001/10/02 15:05:11 dwmw2 Exp $
*
* Core registration and callback routines for MTD
* drivers and users.
/*====================================================================*/
/* Init code */
-#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
-#define init_mtd init_module
-#define cleanup_mtd cleanup_module
-#endif
-
-mod_init_t init_mtd(void)
+int __init init_mtd(void)
{
#ifdef CONFIG_PROC_FS
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
return 0;
}
-mod_exit_t cleanup_mtd(void)
+static void __exit cleanup_mtd(void)
{
#ifdef CONFIG_PM
if (mtd_pm_dev) {
module_exit(cleanup_mtd);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("Core MTD registration and access routines");
*
* This code is GPL
*
- * $Id: mtdpart.c,v 1.21 2001/06/09 16:33:32 dwmw2 Exp $
+ * $Id: mtdpart.c,v 1.23 2001/10/02 15:05:11 dwmw2 Exp $
*/
#include <linux/module.h>
slave->mtd.read = part_read;
slave->mtd.write = part_write;
- slave->mtd.sync = part_sync;
+ if (master->sync)
+ slave->mtd.sync = part_sync;
if (!i && master->suspend && master->resume) {
slave->mtd.suspend = part_suspend;
slave->mtd.resume = part_resume;
EXPORT_SYMBOL(add_mtd_partitions);
EXPORT_SYMBOL(del_mtd_partitions);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nicolas Pitre <nico@cam.org>");
+MODULE_DESCRIPTION("Generic support for partitioning of MTD devices");
+
# drivers/mtd/nand/Config.in
-# $Id: Config.in,v 1.3 2001/07/03 17:50:56 sjhill Exp $
+# $Id: Config.in,v 1.4 2001/09/19 09:35:23 dwmw2 Exp $
mainmenu_option next_comment
#
# linux/drivers/nand/Makefile
#
-# $Id: Makefile,v 1.4 2001/06/28 10:49:45 dwmw2 Exp $
+# $Id: Makefile,v 1.5 2001/09/19 22:39:59 dwmw2 Exp $
O_TARGET := nandlink.o
export-objs := nand.o nand_ecc.o
-obj-$(CONFIG_MTD_NAND) += nand.o
-obj-$(CONFIG_MTD_NAND_ECC) += nand_ecc.o
+nandobjs-y := nand.o
+nandobjs-$(CONFIG_MTD_NAND_ECC) += nand_ecc.o
+
+obj-$(CONFIG_MTD_NAND) += $(nandobjs-y)
obj-$(CONFIG_MTD_NAND_SPIA) += spia.o
include $(TOPDIR)/Rules.make
*
* Copyright (C) 2000 Steven J. Hill (sjhill@cotw.com)
*
- * $Id: nand.c,v 1.10 2001/03/20 07:26:01 dwmw2 Exp $
+ * $Id: nand.c,v 1.12 2001/10/02 15:05:14 dwmw2 Exp $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/nand_ids.h>
+#include <linux/interrupt.h>
#include <asm/io.h>
#ifdef CONFIG_MTD_NAND_ECC
}
EXPORT_SYMBOL(nand_scan);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Steven J. Hill <sjhill@cotw.com");
+MODULE_DESCRIPTION("Generic NAND flash driver code");
/*
- * drivers/mtd/spia.c
+ * drivers/mtd/nand/spia.c
*
* Copyright (C) 2000 Steven J. Hill (sjhill@cotw.com)
*
- * $Id: spia.c,v 1.11 2001/07/03 17:50:56 sjhill Exp $
+ * $Id: spia.c,v 1.12 2001/10/02 15:05:14 dwmw2 Exp $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
/*
* Module stuff
*/
-#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
- #define spia_init init_module
- #define spia_cleanup cleanup_module
-#endif
static int spia_io_base = SPIA_IO_BASE;
static int spia_fio_base = SPIA_FIO_BASE;
}
module_exit(spia_cleanup);
#endif
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Steven J. Hill <sjhill@cotw.com");
+MODULE_DESCRIPTION("Board-specific glue layer for NAND flash on SPIA board");
/* Linux driver for NAND Flash Translation Layer */
/* (c) 1999 Machine Vision Holdings, Inc. */
/* Author: David Woodhouse <dwmw2@infradead.org> */
-/* $Id: nftlcore.c,v 1.73 2001/06/09 01:09:43 dwmw2 Exp $ */
+/* $Id: nftlcore.c,v 1.82 2001/10/02 15:05:11 dwmw2 Exp $ */
/*
The contents of this file are distributed under the GNU General
/* We're passed the number of the last EUN in the chain, to save us from
having to look it up again */
u16 pot = nftl->LastFreeEUN;
- int silly = -1;
+ int silly = nftl->nb_blocks;
/* Normally, we force a fold to happen before we run out of free blocks completely */
if (!desperate && nftl->numfreeEUNs < 2) {
"in Virtual Unit Chain %d for block %d\n",
thisVUC, block);
break;
- case SECTOR_IGNORE:
case SECTOR_DELETED:
+ if (!BlockFreeFound[block])
+ BlockMap[block] = BLOCK_NIL;
+ else
+ printk(KERN_WARNING
+ "SECTOR_DELETED found after SECTOR_FREE "
+ "in Virtual Unit Chain %d for block %d\n",
+ thisVUC, block);
+ break;
+
+ case SECTOR_IGNORE:
break;
default:
printk("Unknown status for block %d in EUN %d: %x\n",
if (inplace) {
/* We're being asked to be a fold-in-place. Check
- that all blocks are either present or SECTOR_FREE
- in the target block. If not, we're going to have
- to fold out-of-place anyway.
+ that all blocks which actually have data associated
+ with them (i.e. BlockMap[block] != BLOCK_NIL) are
+ either already present or SECTOR_FREE in the target
+ block. If not, we're going to have to fold out-of-place
+ anyway.
*/
for (block = 0; block < nftl->EraseSize / 512 ; block++) {
if (BlockLastState[block] != SECTOR_FREE &&
+ BlockMap[block] != BLOCK_NIL &&
BlockMap[block] != targetEUN) {
DEBUG(MTD_DEBUG_LEVEL1, "Setting inplace to 0. VUC %d, "
"block %d was %x lastEUN, "
u16 ChainLength = 0, thislen;
u16 chain, EUN;
- for (chain = 0; chain < nftl->MediaHdr.FormattedSize / nftl->EraseSize; chain++) {
+ for (chain = 0; chain < le32_to_cpu(nftl->MediaHdr.FormattedSize) / nftl->EraseSize; chain++) {
EUN = nftl->EUNtable[chain];
thislen = 0;
case BLKGETSIZE64:
return put_user((u64)part_table[MINOR(inode->i_rdev)].nr_sects << 9,
(u64 *)arg);
-
+
case BLKFLSBUF:
if (!capable(CAP_SYS_ADMIN)) return -EACCES;
fsync_dev(inode->i_rdev);
remove: NFTL_notify_remove
};
-static int __init init_nftl(void)
+extern char nftlmountrev[];
+
+int __init init_nftl(void)
{
int i;
- printk(KERN_NOTICE
- "M-Systems NAND Flash Translation Layer driver. (C) 1999 MVHI\n");
#ifdef PRERELEASE
- printk(KERN_INFO"$Id: nftlcore.c,v 1.73 2001/06/09 01:09:43 dwmw2 Exp $\n");
+ printk(KERN_INFO "NFTL driver: nftlcore.c $Revision: 1.82 $, nftlmount.c %s\n", nftlmountrev);
#endif
if (register_blkdev(MAJOR_NR, "nftl", &nftl_fops)){
- printk("unable to register NFTL block device on major %d\n",
- MAJOR_NR);
+ printk("unable to register NFTL block device on major %d\n", MAJOR_NR);
return -EBUSY;
} else {
blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), &nftl_request);
module_init(init_nftl);
module_exit(cleanup_nftl);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>, Fabrice Bellard <fabrice.bellard@netgem.com> et al.");
+MODULE_DESCRIPTION("Support code for NAND Flash Translation Layer, used on M-Systems DiskOnChip 2000 and Millennium");
* Author: Fabrice Bellard (fabrice.bellard@netgem.com)
* Copyright (C) 2000 Netgem S.A.
*
- * $Id: nftlmount.c,v 1.17 2001/06/02 20:33:20 dwmw2 Exp $
+ * $Id: nftlmount.c,v 1.23 2001/09/19 21:42:32 dwmw2 Exp $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
#define SECTORSIZE 512
+char nftlmountrev[]="$Revision: 1.23 $";
+
/* find_boot_record: Find the NFTL Media Header and its Spare copy which contains the
* various device information of the NFTL partition and Bad Unit Table. Update
* the ReplUnitTable[] table accroding to the Bad Unit Table. ReplUnitTable[]
{
struct nftl_uci1 h1;
struct nftl_oob oob;
- unsigned int block, boot_record_count;
+ unsigned int block, boot_record_count = 0;
int retlen;
u8 buf[SECTORSIZE];
struct NFTLMediaHeader *mh = &nftl->MediaHdr;
+ unsigned int i;
nftl->MediaUnit = BLOCK_NIL;
nftl->SpareMediaUnit = BLOCK_NIL;
- boot_record_count = 0;
/* search for a valid boot record */
for (block = 0; block < nftl->nb_blocks; block++) {
- unsigned int erase_mark;
+ int ret;
+
+ /* Check for ANAND header first. Then can whinge if it's found but later
+ checks fail */
+ if ((ret = MTD_READ(nftl->mtd, block * nftl->EraseSize, SECTORSIZE, &retlen, buf))) {
+ static int warncount = 5;
+
+ if (warncount) {
+ printk(KERN_WARNING "Block read at 0x%x of mtd%d failed: %d\n",
+ block * nftl->EraseSize, nftl->mtd->index, ret);
+ if (!--warncount)
+ printk(KERN_WARNING "Further failures for this block will not be printed\n");
+ }
+ continue;
+ }
- /* read ANAND header. To be safer with BIOS, also use erase mark as discriminant */
- if (MTD_READOOB(nftl->mtd, block * nftl->EraseSize + SECTORSIZE + 8,
- 8, &retlen, (char *)&h1) < 0)
+ if (retlen < 6 || memcmp(buf, "ANAND", 6)) {
+ /* ANAND\0 not found. Continue */
+#if 0
+ printk(KERN_DEBUG "ANAND header not found at 0x%x in mtd%d\n",
+ block * nftl->EraseSize, nftl->mtd->index);
+#endif
continue;
+ }
- erase_mark = le16_to_cpu ((h1.EraseMark | h1.EraseMark1));
- if (erase_mark != ERASE_MARK)
+ /* To be safer with BIOS, also use erase mark as discriminant */
+ if ((ret = MTD_READOOB(nftl->mtd, block * nftl->EraseSize + SECTORSIZE + 8,
+ 8, &retlen, (char *)&h1) < 0)) {
+ printk(KERN_WARNING "ANAND header found at 0x%x in mtd%d, but OOB data read failed (err %d)\n",
+ block * nftl->EraseSize, nftl->mtd->index, ret);
continue;
+ }
- if (MTD_READECC(nftl->mtd, block * nftl->EraseSize, SECTORSIZE,
- &retlen, buf, (char *)&oob) < 0)
+#if 1 /* Some people seem to have devices without ECC or erase marks
+ on the Media Header blocks. There are enough other sanity
+ checks in here that we can probably do without it.
+ */
+ if (le16_to_cpu ((h1.EraseMark | h1.EraseMark1) != ERASE_MARK)) {
+ printk(KERN_NOTICE "ANAND header found at 0x%x in mtd%d, but erase mark not present (0x%04x,0x%04x instead)\n",
+ block * nftl->EraseSize, nftl->mtd->index,
+ le16_to_cpu(h1.EraseMark), le16_to_cpu(h1.EraseMark1));
continue;
+ }
+
+ /* Finally reread to check ECC */
+ if ((ret = MTD_READECC(nftl->mtd, block * nftl->EraseSize, SECTORSIZE,
+ &retlen, buf, (char *)&oob) < 0)) {
+ printk(KERN_NOTICE "ANAND header found at 0x%x in mtd%d, but ECC read failed (err %d)\n",
+ block * nftl->EraseSize, nftl->mtd->index, ret);
+ continue;
+ }
+
+ /* Paranoia. Check the ANAND header is still there after the ECC read */
+ if (memcmp(buf, "ANAND", 6)) {
+ printk(KERN_NOTICE "ANAND header found at 0x%x in mtd%d, but went away on reread!\n",
+ block * nftl->EraseSize, nftl->mtd->index);
+ printk(KERN_NOTICE "New data are: %02x %02x %02x %02x %02x %02x\n",
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
+ continue;
+ }
+#endif
+
+ /* OK, we like it. */
+
+ if (boot_record_count) {
+ /* We've already processed one. So we just check if
+ this one is the same as the first one we found */
+ if (memcmp(mh, buf, sizeof(struct NFTLMediaHeader))) {
+ printk(KERN_NOTICE "NFTL Media Headers at 0x%x and 0x%x disagree.\n",
+ nftl->MediaUnit * nftl->EraseSize, block * nftl->EraseSize);
+ /* if (debug) Print both side by side */
+ return -1;
+ }
+ if (boot_record_count == 1)
+ nftl->SpareMediaUnit = block;
+
+ boot_record_count++;
+ continue;
+ }
+ /* This is the first we've seen. Copy the media header structure into place */
memcpy(mh, buf, sizeof(struct NFTLMediaHeader));
- if (memcmp(mh->DataOrgID, "ANAND", 6) == 0) {
- /* first boot record */
- if (boot_record_count == 0) {
- unsigned int i;
- /* header found : read the bad block table data */
- if (mh->UnitSizeFactor != 0xff) {
- printk("Sorry, we don't support UnitSizeFactor "
- "of != 1 yet\n");
- goto ReplUnitTable;
- }
- nftl->nb_boot_blocks = le16_to_cpu(mh->FirstPhysicalEUN);
- if ((nftl->nb_boot_blocks + 2) >= nftl->nb_blocks) {
- printk(KERN_NOTICE "Potential NFTL Media Header found, but sanity check failed:\n");
- printk(KERN_NOTICE "nb_boot_blocks (%d) + 2 > nb_blocks (%d)\n",
- nftl->nb_boot_blocks, nftl->nb_blocks);
- goto ReplUnitTable; /* small consistency check */
- }
+ /* Do some sanity checks on it */
+ if (mh->UnitSizeFactor != 0xff) {
+ printk(KERN_NOTICE "Sorry, we don't support UnitSizeFactor "
+ "of != 1 yet.\n");
+ return -1;
+ }
- nftl->numvunits = le32_to_cpu(mh->FormattedSize) / nftl->EraseSize;
- if (nftl->numvunits > (nftl->nb_blocks - nftl->nb_boot_blocks - 2)) {
- printk(KERN_NOTICE "Potential NFTL Media Header found, but sanity check failed:\n");
- printk(KERN_NOTICE "numvunits (%d) > nb_blocks (%d) - nb_boot_blocks(%d) - 2\n",
- nftl->numvunits, nftl->nb_blocks, nftl->nb_boot_blocks);
- goto ReplUnitTable; /* small consistency check */
- }
- /* FixMe: with bad blocks, the total size available is not FormattedSize any
- more !!! */
- nftl->nr_sects = nftl->numvunits * (nftl->EraseSize / SECTORSIZE);
- nftl->MediaUnit = block;
-
- /* read the Bad Erase Unit Table and modify ReplUnitTable[] accordingly */
- for (i = 0; i < nftl->nb_blocks; i++) {
- if ((i & (SECTORSIZE - 1)) == 0) {
- /* read one sector for every SECTORSIZE of blocks */
- if (MTD_READECC(nftl->mtd, block * nftl->EraseSize +
- i + SECTORSIZE, SECTORSIZE,
- &retlen, buf, (char *)&oob) < 0)
- goto ReplUnitTable;
- }
- /* mark the Bad Erase Unit as RESERVED in ReplUnitTable */
- if (buf[i & (SECTORSIZE - 1)] != 0xff)
- nftl->ReplUnitTable[i] = BLOCK_RESERVED;
- }
+ nftl->nb_boot_blocks = le16_to_cpu(mh->FirstPhysicalEUN);
+ if ((nftl->nb_boot_blocks + 2) >= nftl->nb_blocks) {
+ printk(KERN_NOTICE "NFTL Media Header sanity check failed:\n");
+ printk(KERN_NOTICE "nb_boot_blocks (%d) + 2 > nb_blocks (%d)\n",
+ nftl->nb_boot_blocks, nftl->nb_blocks);
+ return -1;
+ }
- boot_record_count++;
- } else if (boot_record_count == 1) {
- nftl->SpareMediaUnit = block;
- boot_record_count++;
- break;
+ nftl->numvunits = le32_to_cpu(mh->FormattedSize) / nftl->EraseSize;
+ if (nftl->numvunits > (nftl->nb_blocks - nftl->nb_boot_blocks - 2)) {
+ printk(KERN_NOTICE "NFTL Media Header sanity check failed:\n");
+ printk(KERN_NOTICE "numvunits (%d) > nb_blocks (%d) - nb_boot_blocks(%d) - 2\n",
+ nftl->numvunits, nftl->nb_blocks, nftl->nb_boot_blocks);
+ return -1;
+ }
+
+ nftl->nr_sects = nftl->numvunits * (nftl->EraseSize / SECTORSIZE);
+
+ /* If we're not using the last sectors in the device for some reason,
+ reduce nb_blocks accordingly so we forget they're there */
+ nftl->nb_blocks = le16_to_cpu(mh->NumEraseUnits) + le16_to_cpu(mh->FirstPhysicalEUN);
+
+ /* read the Bad Erase Unit Table and modify ReplUnitTable[] accordingly */
+ for (i = 0; i < nftl->nb_blocks; i++) {
+ if ((i & (SECTORSIZE - 1)) == 0) {
+ /* read one sector for every SECTORSIZE of blocks */
+ if ((ret = MTD_READECC(nftl->mtd, block * nftl->EraseSize +
+ i + SECTORSIZE, SECTORSIZE,
+ &retlen, buf, (char *)&oob)) < 0) {
+ printk(KERN_NOTICE "Read of bad sector table failed (err %d)\n",
+ ret);
+ return -1;
+ }
}
+ /* mark the Bad Erase Unit as RESERVED in ReplUnitTable */
+ if (buf[i & (SECTORSIZE - 1)] != 0xff)
+ nftl->ReplUnitTable[i] = BLOCK_RESERVED;
}
- ReplUnitTable:;
- }
-
- if (boot_record_count == 0) {
- /* no boot record found */
- return -1;
- } else {
- return 0;
- }
+
+ nftl->MediaUnit = block;
+ boot_record_count++;
+
+ } /* foreach (block) */
+
+ return boot_record_count?0:-1;
}
static int memcmpb(void *a, int c, int n)
/*
- * $Id: redboot.c,v 1.4 2001/05/31 20:43:18 dwmw2 Exp $
+ * $Id: redboot.c,v 1.5 2001/10/02 15:05:11 dwmw2 Exp $
*
* Parse RedBoot-style Flash Image System (FIS) tables and
* produce a Linux partition array to match.
}
EXPORT_SYMBOL(parse_redboot_partitions);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Red Hat, Inc. - David Woodhouse <dwmw2@cambridge.redhat.com>");
+MODULE_DESCRIPTION("Parsing code for RedBoot Flash Image System (FIS) tables");
/*****************************************************************************
*
* Filename: irda-usb.c
- * Version: 0.9a
+ * Version: 0.9b
* Description: IrDA-USB Driver
* Status: Experimental
* Author: Dag Brattli <dag@brattli.net>
*
*****************************************************************************/
+/*
+ * IMPORTANT NOTE
+ * --------------
+ *
+ * As of kernel 2.4.10, this is the state of compliance and testing of
+ * this driver (irda-usb) with regards to the USB low level drivers...
+ *
+ * This driver has been tested SUCCESSFULLY with the following drivers :
+ * o usb-uhci (For Intel/Via USB controllers)
+ * o usb-ohci (For other USB controllers)
+ *
+ * This driver has NOT been tested with the following drivers :
+ * o usb-ehci (USB 2.0 controllers)
+ *
+ * This driver WON'T WORK with the following drivers :
+ * o uhci (Alternate/JE driver for Intel/Via USB controllers)
+ * Amongst the reasons :
+ * o uhci doesn't implement USB_ZERO_PACKET
+ * o uhci non-compliant use of urb->timeout
+ *
+ * Jean II
+ */
+
+/*------------------------------------------------------------------*/
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <net/irda/irda-usb.h>
+/*------------------------------------------------------------------*/
+
static int qos_mtt_bits = 0;
/* Master instance for each hardware found */
{ }, /* The end */
};
+/*
+ * Important note :
+ * Devices based on the SigmaTel chipset (0x66f, 0x4200) are not compliant
+ * with the USB-IrDA specification (and actually very very different), and
+ * there is no way this driver can support those devices, apart from
+ * a complete rewrite...
+ * Jean II
+ */
+
MODULE_DEVICE_TABLE(usb, dongles);
/*------------------------------------------------------------------*/
frame, IRDA_USB_SPEED_MTU,
speed_bulk_callback, self);
purb->transfer_buffer_length = USB_IRDA_HEADER;
- purb->transfer_flags = USB_QUEUE_BULK;
+ purb->transfer_flags = USB_QUEUE_BULK | USB_ASYNC_UNLINK;
purb->timeout = MSECS_TO_JIFFIES(100);
if ((ret = usb_submit_urb(purb))) {
spin_unlock_irqrestore(&self->lock, flags);
}
-#ifdef IU_BUG_KICK_TX
-/*------------------------------------------------------------------*/
-/*
- * Send an empty URB to the dongle
- * The goal there is to try to resynchronise with the dongle. An empty
- * frame signify the end of a Tx frame. Jean II
- */
-static inline void irda_usb_send_empty(struct irda_usb_cb *self)
-{
- purb_t purb;
- int ret;
-
- IRDA_DEBUG(0, __FUNCTION__ "()\n");
-
- /* Grab the empty URB */
- purb = &self->empty_urb;
- if (purb->status != USB_ST_NOERROR) {
- WARNING(__FUNCTION__ "(), Empty URB still in use!\n");
- return;
- }
-
- /* Submit the Empty URB */
- FILL_BULK_URB(purb, self->usbdev,
- usb_sndbulkpipe(self->usbdev, self->bulk_out_ep),
- self->speed_buff, IRDA_USB_SPEED_MTU,
- speed_bulk_callback, self);
- purb->transfer_buffer_length = 0;
- purb->transfer_flags = USB_QUEUE_BULK;
- purb->timeout = MSECS_TO_JIFFIES(100);
-
- if ((ret = usb_submit_urb(purb))) {
- IRDA_DEBUG(0, __FUNCTION__ "(), failed Empty URB\n");
- }
-}
-#endif /* IU_BUG_KICK_TX */
-
/*------------------------------------------------------------------*/
/*
* Note : this function will be called with both speed_urb and empty_urb...
skb->data, IRDA_USB_MAX_MTU,
write_bulk_callback, skb);
purb->transfer_buffer_length = skb->len;
- purb->transfer_flags = USB_QUEUE_BULK;
-#ifdef IU_USE_USB_ZERO_FLAG
- /* This flag indicates that what we send is not a continuous stream
- * of data but separate frames. In this case, the USB layer will
- * insert empty packet to separate our frames.
- * This flag was previously called USB_DISABLE_SPD - Jean II */
+ /* Note : unlink *must* be Asynchronous because of the code in
+ * irda_usb_net_timeout() -> call in irq - Jean II */
+ purb->transfer_flags = USB_QUEUE_BULK | USB_ASYNC_UNLINK;
+ /* This flag (USB_ZERO_PACKET) indicates that what we send is not
+ * a continuous stream of data but separate packets.
+ * In this case, the USB layer will insert an empty USB frame (TD)
+ * after each of our packets that is exact multiple of the frame size.
+ * This is how the dongle will detect the end of packet - Jean II */
purb->transfer_flags |= USB_ZERO_PACKET;
-#endif /* IU_USE_USB_ZERO_FLAG */
- purb->timeout = MSECS_TO_JIFFIES(100);
-
+ /* Timeout need to be shorter than NET watchdog timer */
+ purb->timeout = MSECS_TO_JIFFIES(200);
+
/* Generate min turn time. FIXME: can we do better than this? */
/* Trying to a turnaround time at this level is trying to measure
- * processor clock cycle with a watch, approximate at best...
+ * processor clock cycle with a wrist-watch, approximate at best...
*
* What we know is the last time we received a frame over USB.
* Due to latency over USB that depend on the USB load, we don't
int diff;
get_fast_time(&self->now);
diff = self->now.tv_usec - self->stamp.tv_usec;
+#ifdef IU_USB_MIN_RTT
+ /* Factor in USB delays -> Get rid of udelay() that
+ * would be lost in the noise - Jean II */
+ diff -= IU_USB_MIN_RTT;
+#endif /* IU_USB_MIN_RTT */
if (diff < 0)
diff += 1000000;
}
}
+ /* Ask USB to send the packet */
if ((res = usb_submit_urb(purb))) {
IRDA_DEBUG(0, __FUNCTION__ "(), failed Tx URB\n");
self->stats.tx_errors++;
self->stats.tx_bytes += skb->len;
netdev->trans_start = jiffies;
-
-#ifdef IU_BUG_KICK_TX
- /* Kick Tx?
- * If the packet is a multiple of 64, the USB layer
- * should send an empty frame (a short packet) to signal
- * the end of frame (that's part of the USB spec).
- * If we enable USB_ZERO_PACKET, the USB layer will just do
- * that (more efficiently) and this code is useless.
- * Better keep this code until USB code clear up this mess...
- *
- * Note : we can't use the speed URB, because the frame
- * might contain a speed change that may be deferred
- * (so we have hard_xmit => tx_urb+empty_urb+speed_urb).
- * Jean II */
- if ((skb->len % self->bulk_out_mtu) == 0) {
- IRDA_DEBUG(2, __FUNCTION__ "(), Kick Tx...\n");
- irda_usb_send_empty(self);
- }
-#endif /* IU_BUG_KICK_TX */
}
spin_unlock_irqrestore(&self->lock, flags);
return;
}
-#ifdef IU_BUG_KICK_TX
- /* Check empty URB */
- purb = &(self->empty_urb);
+ /* Check speed URB */
+ purb = &(self->speed_urb);
if (purb->status != USB_ST_NOERROR) {
- WARNING("%s: Empty change timed out, urb->status=%d, urb->transfer_flags=0x%04X\n", netdev->name, purb->status, purb->transfer_flags);
+ WARNING("%s: Speed change timed out, urb->status=%d, urb->transfer_flags=0x%04X\n", netdev->name, purb->status, purb->transfer_flags);
switch (purb->status) {
- case -ECONNABORTED: /* -103 */
- case -ECONNRESET: /* -104 */
- case -ENOENT: /* -2 */
- purb->status = USB_ST_NOERROR;
- done = 1;
- break;
case USB_ST_URB_PENDING: /* -EINPROGRESS == -115 */
usb_unlink_urb(purb);
- /* Note : above will *NOT* call netif_wake_queue()
- * in completion handler - Jean II */
+ /* Note : above will *NOT* call netif_wake_queue()
+ * in completion handler, we will come back here.
+ * Jean II */
done = 1;
break;
- default:
- /* ??? */
- break;
- }
- }
-#endif /* IU_BUG_KICK_TX */
-
- /* Check speed URB */
- purb = &(self->speed_urb);
- if (purb->status != USB_ST_NOERROR) {
- WARNING("%s: Speed change timed out, urb->status=%d, urb->transfer_flags=0x%04X\n", netdev->name, purb->status, purb->transfer_flags);
-
- switch (purb->status) {
case -ECONNABORTED: /* -103 */
case -ECONNRESET: /* -104 */
- case -ENOENT: /* -2 */
+ case -ETIMEDOUT: /* -110 */
+ case -ENOENT: /* -2 (urb unlinked by us) */
+ default: /* ??? - Play safe */
purb->status = USB_ST_NOERROR;
netif_wake_queue(self->netdev);
done = 1;
break;
- case USB_ST_URB_PENDING: /* -EINPROGRESS == -115 */
- usb_unlink_urb(purb);
- /* Note : above will call netif_wake_queue()
- * in completion handler - Jean II */
- done = 1;
- break;
- default:
- /* ??? */
- break;
}
}
#endif /* IU_BUG_KICK_TIMEOUT */
switch (purb->status) {
+ case USB_ST_URB_PENDING: /* -EINPROGRESS == -115 */
+ usb_unlink_urb(purb);
+ /* Note : above will *NOT* call netif_wake_queue()
+ * in completion handler, because purb->status will
+ * be -ENOENT. We will fix that at the next watchdog,
+ * leaving more time to USB to recover...
+ * Also, we are in interrupt, so we need to have
+ * USB_ASYNC_UNLINK to work properly...
+ * Jean II */
+ done = 1;
+ break;
case -ECONNABORTED: /* -103 */
case -ECONNRESET: /* -104 */
- case -ENOENT: /* -2 */
+ case -ETIMEDOUT: /* -110 */
+ case -ENOENT: /* -2 (urb unlinked by us) */
+ default: /* ??? - Play safe */
if(skb != NULL) {
dev_kfree_skb_any(skb);
purb->context = NULL;
netif_wake_queue(self->netdev);
done = 1;
break;
- case USB_ST_URB_PENDING: /* -EINPROGRESS == -115 */
- usb_unlink_urb(purb);
- /* Note : above will call netif_wake_queue()
- * in completion handler - Jean II */
- done = 1;
- break;
- default:
- /* ??? */
- break;
}
}
* Try to work around USB failures...
*/
+/*
+ * Note :
+ * Some of you may have noticed that most dongle have an interrupt in pipe
+ * that we don't use. Here is the little secret...
+ * When we hang a Rx URB on the bulk in pipe, it generates some USB traffic
+ * in every USB frame. This is unnecessary overhead.
+ * The interrupt in pipe will generate an event every time a packet is
+ * received. Reading an interrupt pipe adds minimal overhead, but has some
+ * latency (~1ms).
+ * If we are connected (speed != 9600), we want to minimise latency, so
+ * we just always hang the Rx URB and ignore the interrupt.
+ * If we are not connected (speed == 9600), there is usually no Rx traffic,
+ * and we want to minimise the USB overhead. In this case we should wait
+ * on the interrupt pipe and hang the Rx URB only when an interrupt is
+ * received.
+ * Jean II
+ */
+
/*------------------------------------------------------------------*/
/*
* Submit a Rx URB to the USB layer to handle reception of a frame
skb->data, skb->truesize,
irda_usb_receive, skb);
purb->transfer_flags = USB_QUEUE_BULK;
+ /* Note : unlink *must* be synchronous because of the code in
+ * irda_usb_net_close() -> free the skb - Jean II */
purb->status = USB_ST_NOERROR;
purb->next = NULL; /* Don't auto resubmit URBs */
netdev->init = irda_usb_net_init;
netdev->hard_start_xmit = irda_usb_hard_xmit;
netdev->tx_timeout = irda_usb_net_timeout;
- netdev->watchdog_timeo = 110*HZ/1000; /* 110 ms > USB timeout */
+ netdev->watchdog_timeo = 250*HZ/1000; /* 250 ms > USB timeout */
netdev->open = irda_usb_net_open;
netdev->stop = irda_usb_net_close;
netdev->get_stats = irda_usb_net_get_stats;
"Reserved",
"Reserved",
"HP HSDL-1100/HSDL-2100",
- "HP HSDL-1100/HSDL-2100"
+ "HP HSDL-1100/HSDL-2100",
"Supports SIR Mode only",
"No dongle connected",
};
{ "37M707", KEY55_1|SIR|SERx4, 0x42, 0x00 },
{ "37M81X", KEY55_1|SIR|SERx4, 0x4d, 0x00 },
{ "37N958FR", KEY55_1|FIR|SERx4, 0x09, 0x04 },
- { "37N972", KEY55_1|FIR|SERx4, 0x0a, 0x00 },
+ { "37N971", KEY55_1|FIR|SERx4, 0x0a, 0x00 },
{ "37N972", KEY55_1|FIR|SERx4, 0x0b, 0x00 },
{ NULL }
};
static int ircc_dma=255;
static int ircc_fir=0;
static int ircc_sir=0;
+static int ircc_cfg=0;
static unsigned short dev_count=0;
return -ENODEV;
}
+ /* try user provided configuration register base address */
+ if (ircc_cfg>0) {
+ MESSAGE(" Overriding configuration address 0x%04x\n", ircc_cfg);
+ if (!smc_superio_fdc(ircc_cfg))
+ ret=0;
+ }
+
/* Trys to open for all the SMC chipsets we know about */
IRDA_DEBUG(0, __FUNCTION__
ret=0;
if (!smc_superio_fdc(0x370))
ret=0;
+ if (!smc_superio_fdc(0xe0))
+ ret=0;
if (!smc_superio_lpc(0x2e))
ret=0;
if (!smc_superio_lpc(0x4e))
MODULE_PARM_DESC(ircc_fir, "FIR Base Address");
MODULE_PARM(ircc_sir, "1-4i");
MODULE_PARM_DESC(ircc_sir, "SIR Base Address");
+MODULE_PARM(ircc_cfg, "1-4i");
+MODULE_PARM_DESC(ircc_cfg, "Configuration register base address");
#endif /* MODULE */
*
* vlsi_ir.c: VLSI82C147 PCI IrDA controller driver for Linux
*
- * Version: 0.1, Aug 6, 2001
+ * Version: 0.3, Sep 30, 2001
*
* Copyright (c) 2001 Martin Diehl
*
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/delay.h>
+#include <linux/time.h>
#include <net/irda/irda.h>
#include <net/irda/irda_device.h>
MODULE_LICENSE("GPL");
-
static /* const */ char drivername[] = "vlsi_ir";
-#define PCI_CLASS_IRDA_GENERIC 0x0d00
+#define PCI_CLASS_WIRELESS_IRDA 0x0d00
static struct pci_device_id vlsi_irda_table [] __devinitdata = { {
- class: PCI_CLASS_IRDA_GENERIC << 8,
+ class: PCI_CLASS_WIRELESS_IRDA << 8,
vendor: PCI_VENDOR_ID_VLSI,
device: PCI_DEVICE_ID_VLSI_82C147,
}, { /* all zeroes */ }
MODULE_PARM(ringsize, "1-2i");
-MODULE_PARM_DESC(ringsize, "tx, rx ring descriptor size");
+MODULE_PARM_DESC(ringsize, "TX, RX ring descriptor size");
/* ringsize: size of the tx and rx descriptor rings
* independent for tx and rx
* specify as ringsize=tx[,rx]
* allowed values: 4, 8, 16, 32, 64
+ * Due to the IrDA 1.x max. allowed window size=7,
+ * there should be no gain when using rings larger than 8
*/
-static int ringsize[] = {16,16}; /* default is tx=rx=16 */
+static int ringsize[] = {8,8}; /* default is tx=rx=8 */
MODULE_PARM(sirpulse, "i");
-MODULE_PARM_DESC(sirpulse, "sir pulse width tuning");
+MODULE_PARM_DESC(sirpulse, "SIR pulse width tuning");
-/* sirpulse: tuning of the sir pulse width within IrPHY 1.3 limits
- * 0: real short, 1.5us (exception 6us at 2.4kb/s)
+/* sirpulse: tuning of the SIR pulse width within IrPHY 1.3 limits
+ * 0: very short, 1.5us (exception: 6us at 2.4 kbaud)
* 1: nominal 3/16 bittime width
+ * note: IrDA compliant peer devices should be happy regardless
+ * which one is used. Primary goal is to save some power
+ * on the sender's side - at 9.6kbaud for example the short
+ * pulse width saves more than 90% of the transmitted IR power.
*/
static int sirpulse = 1; /* default is 3/16 bittime */
-MODULE_PARM(mtt_bits, "i");
-MODULE_PARM_DESC(mtt_bits, "IrLAP bitfield representing min-turn-time");
+MODULE_PARM(qos_mtt_bits, "i");
+MODULE_PARM_DESC(qos_mtt_bits, "IrLAP bitfield representing min-turn-time");
-/* mtt_bit: encoded min-turn-time values we accept for connections
- * according to IrLAP definition (section 6.6.8)
- * the widespreadly used HP HDLS-1100 requires 1 msec
+/* qos_mtt_bits: encoded min-turn-time value we require the peer device
+ * to use before transmitting to us. "Type 1" (per-station)
+ * bitfield according to IrLAP definition (section 6.6.8)
+ * The HP HDLS-1100 requires 1 msec - don't even know
+ * if this is the one which is used by my OB800
*/
-static int mtt_bits = 0x07; /* default is 1 ms or more */
+static int qos_mtt_bits = 0x04; /* default is 1 ms */
/********************************************************/
/* some helpers for operations on ring descriptors */
-static inline int rd_is_active(struct ring_descr *rd)
+static inline int rd_is_active(struct vlsi_ring *r, unsigned i)
+{
+ return ((r->hw[i].rd_status & RD_STAT_ACTIVE) != 0);
+}
+
+static inline void rd_activate(struct vlsi_ring *r, unsigned i)
{
- return ((rd->rd_status & RD_STAT_ACTIVE) != 0);
+ r->hw[i].rd_status |= RD_STAT_ACTIVE;
}
-static inline void rd_set_addr_status(struct ring_descr *rd, dma_addr_t a, u8 s)
+static inline void rd_set_addr_status(struct vlsi_ring *r, unsigned i, dma_addr_t a, u8 s)
{
- /* overlayed - order is important! */
+ struct ring_descr *rd = r->hw +i;
+
+ /* ordering is important for two reasons:
+ * - overlayed: writing addr overwrites status
+ * - we want to write status last so we have valid address in
+ * case status has RD_STAT_ACTIVE set
+ */
+ if ((a & ~DMA_MASK_MSTRPAGE) != MSTRPAGE_VALUE)
+ BUG();
+
+ a &= DMA_MASK_MSTRPAGE; /* clear highbyte to make sure we won't write
+ * to status - just in case MSTRPAGE_VALUE!=0
+ */
rd->rd_addr = a;
- rd->rd_status = s;
+ wmb();
+ rd->rd_status = s; /* potentially passes ownership to the hardware */
}
-static inline void rd_set_status(struct ring_descr *rd, u8 s)
+static inline void rd_set_status(struct vlsi_ring *r, unsigned i, u8 s)
{
- rd->rd_status = s;
+ r->hw[i].rd_status = s;
}
-static inline void rd_set_count(struct ring_descr *rd, u16 c)
+static inline void rd_set_count(struct vlsi_ring *r, unsigned i, u16 c)
{
- rd->rd_count = c;
+ r->hw[i].rd_count = c;
}
-static inline u8 rd_get_status(struct ring_descr *rd)
+static inline u8 rd_get_status(struct vlsi_ring *r, unsigned i)
{
- return rd->rd_status;
+ return r->hw[i].rd_status;
}
-static inline dma_addr_t rd_get_addr(struct ring_descr *rd)
+static inline dma_addr_t rd_get_addr(struct vlsi_ring *r, unsigned i)
{
dma_addr_t a;
- a = (rd->rd_addr & DMA_MASK_MSTRPAGE) | (MSTRPAGE_VALUE << 24);
+ a = (r->hw[i].rd_addr & DMA_MASK_MSTRPAGE) | (MSTRPAGE_VALUE << 24);
return a;
}
-static inline u16 rd_get_count(struct ring_descr *rd)
+static inline u16 rd_get_count(struct vlsi_ring *r, unsigned i)
{
- return rd->rd_count;
+ return r->hw[i].rd_count;
}
+/* producer advances r->head when descriptor was added for processing by hw */
-/* advancing indices pointing into descriptor rings */
-
-static inline void ring_ptr_inc(unsigned *ptr, unsigned mask)
+static inline void ring_put(struct vlsi_ring *r)
{
- *ptr = (*ptr + 1) & mask;
+ r->head = (r->head + 1) & r->mask;
}
+/* consumer advances r->tail when descriptor was removed after getting processed by hw */
-/********************************************************/
-
-
-#define MAX_PACKET_LEN 2048 /* IrDA MTU */
+static inline void ring_get(struct vlsi_ring *r)
+{
+ r->tail = (r->tail + 1) & r->mask;
+}
-/* increase transfer buffer size somewhat so we have enough space left
- * when packet size increases during wrapping due to XBOFs and escapes.
- * well, this wastes some memory - anyway, later we will
- * either map skb's directly or use pci_pool allocator...
- */
-#define XFER_BUF_SIZE (MAX_PACKET_LEN+512)
+/********************************************************/
/* the memory required to hold the 2 descriptor rings */
#define RING_ENTRY_SIZE (2 * MAX_RING_DESCR * sizeof(struct ring_entry))
-
/********************************************************/
/* just dump all registers */
-static void vlsi_reg_debug(int iobase, const char *s)
+static void vlsi_reg_debug(unsigned iobase, const char *s)
{
int i;
u8 clkctl, lock;
int i, count;
- if (clksrc < 0 || clksrc > 3) {
- printk(KERN_ERR "%s: invalid clksrc=%d\n", __FUNCTION__, clksrc);
- return -1;
- }
if (clksrc < 2) { /* auto or PLL: try PLL */
clkctl = CLKCTL_NO_PD | CLKCTL_CLKSTP;
pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
- /* protocol to detect PLL lock synchronisation */
+ /* procedure to detect PLL lock synchronisation:
+ * after 0.5 msec initial delay we expect to find 3 PLL lock
+ * indications within 10 msec for successful PLL detection.
+ */
udelay(500);
count = 0;
for (i = 500; i <= 10000; i += 50) { /* max 10 msec */
pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
return -1;
}
- else /* was: clksrc=0(auto) */
- clksrc = 3; /* fallback to 40MHz XCLK (OB800) */
+ else /* was: clksrc=0(auto) */
+ clksrc = 3; /* fallback to 40MHz XCLK (OB800) */
printk(KERN_INFO "%s: PLL not locked, fallback to clksrc=%d\n",
__FUNCTION__, clksrc);
}
- else { /* got succesful PLL lock */
+ else { /* got successful PLL lock */
clksrc = 1;
return 0;
}
}
/* we get here if either no PLL detected in auto-mode or
- the external clock source explicitly specified */
+ the external clock source was explicitly specified */
clkctl = CLKCTL_EXTCLK | CLKCTL_CLKSTP;
if (clksrc == 3)
/* ### FIXME: don't use old virt_to_bus() anymore! */
-static int vlsi_alloc_buffers_init(vlsi_irda_dev_t *idev)
+
+static void vlsi_arm_rx(struct vlsi_ring *r)
{
- void *buf;
- int i, j;
+ unsigned i;
+ dma_addr_t ba;
+
+ for (i = 0; i < r->size; i++) {
+ if (r->buf[i].data == NULL)
+ BUG();
+ ba = virt_to_bus(r->buf[i].data);
+ rd_set_addr_status(r, i, ba, RD_STAT_ACTIVE);
+ }
+}
- idev->ring_buf = kmalloc(RING_ENTRY_SIZE,GFP_KERNEL);
- if (!idev->ring_buf)
- return -ENOMEM;
- memset(idev->ring_buf, 0, RING_ENTRY_SIZE);
-
- for (i = MAX_RING_DESCR; i < MAX_RING_DESCR+ringsize[0]; i++) {
- buf = kmalloc(XFER_BUF_SIZE, GFP_KERNEL|GFP_DMA);
- if (!buf) {
- for (j = MAX_RING_DESCR; j < i; j++)
- kfree(idev->ring_buf[j].head);
- kfree(idev->ring_buf);
- idev->ring_buf = NULL;
+static int vlsi_alloc_ringbuf(struct vlsi_ring *r)
+{
+ unsigned i, j;
+
+ r->head = r->tail = 0;
+ r->mask = r->size - 1;
+ for (i = 0; i < r->size; i++) {
+ r->buf[i].skb = NULL;
+ r->buf[i].data = kmalloc(XFER_BUF_SIZE, GFP_KERNEL|GFP_DMA);
+ if (r->buf[i].data == NULL) {
+ for (j = 0; j < i; j++) {
+ kfree(r->buf[j].data);
+ r->buf[j].data = NULL;
+ }
return -ENOMEM;
}
- idev->ring_buf[i].head = buf;
- idev->ring_buf[i].skb = NULL;
- rd_set_addr_status(idev->ring_hw+i,virt_to_bus(buf), 0);
}
+ return 0;
+}
- for (i = 0; i < ringsize[1]; i++) {
- buf = kmalloc(XFER_BUF_SIZE, GFP_KERNEL|GFP_DMA);
- if (!buf) {
- for (j = 0; j < i; j++)
- kfree(idev->ring_buf[j].head);
- for (j = MAX_RING_DESCR; j < MAX_RING_DESCR+ringsize[0]; j++)
- kfree(idev->ring_buf[j].head);
- kfree(idev->ring_buf);
- idev->ring_buf = NULL;
- return -ENOMEM;
+static void vlsi_free_ringbuf(struct vlsi_ring *r)
+{
+ unsigned i;
+
+ for (i = 0; i < r->size; i++) {
+ if (r->buf[i].data == NULL)
+ continue;
+ if (r->buf[i].skb) {
+ dev_kfree_skb(r->buf[i].skb);
+ r->buf[i].skb = NULL;
}
- idev->ring_buf[i].head = buf;
- idev->ring_buf[i].skb = NULL;
- rd_set_addr_status(idev->ring_hw+i,virt_to_bus(buf), RD_STAT_ACTIVE);
+ else
+ kfree(r->buf[i].data);
+ r->buf[i].data = NULL;
}
-
- return 0;
}
static int vlsi_init_ring(vlsi_irda_dev_t *idev)
{
+ char *ringarea;
- idev->tx_mask = MAX_RING_DESCR | (ringsize[0] - 1);
- idev->rx_mask = ringsize[1] - 1;
-
- idev->ring_hw = pci_alloc_consistent(idev->pdev,
- RING_AREA_SIZE, &idev->busaddr);
- if (!idev->ring_hw) {
+ ringarea = pci_alloc_consistent(idev->pdev, RING_AREA_SIZE, &idev->busaddr);
+ if (!ringarea) {
printk(KERN_ERR "%s: insufficient memory for descriptor rings\n",
__FUNCTION__);
return -ENOMEM;
}
+ memset(ringarea, 0, RING_AREA_SIZE);
+
#if 0
printk(KERN_DEBUG "%s: (%d,%d)-ring %p / %p\n", __FUNCTION__,
- ringsize[0], ringsize[1], idev->ring_hw,
+ ringsize[0], ringsize[1], ringarea,
(void *)(unsigned)idev->busaddr);
#endif
- memset(idev->ring_hw, 0, RING_AREA_SIZE);
- if (vlsi_alloc_buffers_init(idev)) {
-
- pci_free_consistent(idev->pdev, RING_AREA_SIZE,
- idev->ring_hw, idev->busaddr);
- printk(KERN_ERR "%s: insufficient memory for ring buffers\n",
- __FUNCTION__);
- return -1;
+ idev->rx_ring.size = ringsize[1];
+ idev->rx_ring.hw = (struct ring_descr *)ringarea;
+ if (!vlsi_alloc_ringbuf(&idev->rx_ring)) {
+ idev->tx_ring.size = ringsize[0];
+ idev->tx_ring.hw = idev->rx_ring.hw + MAX_RING_DESCR;
+ if (!vlsi_alloc_ringbuf(&idev->tx_ring)) {
+ idev->virtaddr = ringarea;
+ return 0;
+ }
+ vlsi_free_ringbuf(&idev->rx_ring);
}
- return 0;
+ pci_free_consistent(idev->pdev, RING_AREA_SIZE,
+ ringarea, idev->busaddr);
+ printk(KERN_ERR "%s: insufficient memory for ring buffers\n",
+ __FUNCTION__);
+ return -1;
}
vlsi_irda_dev_t *idev = ndev->priv;
unsigned long flags;
u16 nphyctl;
- int iobase;
+ unsigned iobase;
u16 config;
unsigned mode;
int ret;
else if (baudrate == 1152000) {
mode = IFF_MIR;
config = IRCFG_MIR | IRCFG_CRC16;
- nphyctl = PHYCTL_MIR(baudrate);
+ nphyctl = PHYCTL_MIR(clksrc==3);
}
else {
mode = IFF_SIR;
outw(nphyctl, iobase+VLSI_PIO_NPHYCTL);
wmb();
outw(IRENABLE_IREN, iobase+VLSI_PIO_IRENABLE);
- /* chip fetches IRCFG on next rising edge of its 8MHz clock */
+
+ /* chip fetches IRCFG on next rising edge of its 8MHz clock */
mb();
config = inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_MASK;
static int vlsi_init_chip(struct net_device *ndev)
{
vlsi_irda_dev_t *idev = ndev->priv;
+ unsigned iobase;
u16 ptr;
- unsigned iobase;
-
iobase = ndev->base_addr;
- outw(0, iobase+VLSI_PIO_IRENABLE);
-
outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); /* w/c pending IRQ, disable all INT */
+ outw(0, iobase+VLSI_PIO_IRENABLE); /* disable IrPHY-interface */
+
/* disable everything, particularly IRCFG_MSTR - which resets the RING_PTR */
outw(0, iobase+VLSI_PIO_IRCFG);
outw(0, iobase+VLSI_PIO_IRENABLE);
- outw(MAX_PACKET_LEN, iobase+VLSI_PIO_MAXPKT);
+ outw(MAX_PACKET_LENGTH, iobase+VLSI_PIO_MAXPKT); /* max possible value=0x0fff */
outw(BUS_TO_RINGBASE(idev->busaddr), iobase+VLSI_PIO_RINGBASE);
- outw(TX_RX_TO_RINGSIZE(ringsize[0], ringsize[1]), iobase+VLSI_PIO_RINGSIZE);
-
+ outw(TX_RX_TO_RINGSIZE(idev->tx_ring.size, idev->rx_ring.size),
+ iobase+VLSI_PIO_RINGSIZE);
ptr = inw(iobase+VLSI_PIO_RINGPTR);
- idev->rx_put = idev->rx_get = RINGPTR_GET_RX(ptr);
- idev->tx_put = idev->tx_get = RINGPTR_GET_TX(ptr);
+ idev->rx_ring.head = idev->rx_ring.tail = RINGPTR_GET_RX(ptr);
+ idev->tx_ring.head = idev->tx_ring.tail = RINGPTR_GET_TX(ptr);
outw(IRCFG_MSTR, iobase+VLSI_PIO_IRCFG); /* ready for memory access */
wmb();
idev->new_baud = 9600; /* start with IrPHY using 9600(SIR) mode */
vlsi_set_baud(ndev);
- outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR);
+ outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); /* just in case - w/c pending IRQ's */
wmb();
/* DO NOT BLINDLY ENABLE IRINTR_ACTEN!
* basically every received pulse fires an ACTIVITY-INT
- * leading to >1000 INT's per second instead of few 10
+ * leading to >>1000 INT's per second instead of few 10
*/
outb(IRINTR_RPKTEN|IRINTR_TPKTEN, iobase+VLSI_PIO_IRINTR);
/**************************************************************/
+static void vlsi_refill_rx(struct vlsi_ring *r)
+{
+ do {
+ if (rd_is_active(r, r->head))
+ BUG();
+ rd_activate(r, r->head);
+ ring_put(r);
+ } while (r->head != r->tail);
+}
+
+
static int vlsi_rx_interrupt(struct net_device *ndev)
{
vlsi_irda_dev_t *idev = ndev->priv;
- int iobase;
- int entry;
+ struct vlsi_ring *r;
int len;
u8 status;
- u16 word;
struct sk_buff *skb;
int crclen;
- iobase = ndev->base_addr;
-
- entry = idev->rx_get;
-
- while ( !rd_is_active(idev->ring_hw+idev->rx_get) ) {
-
- ring_ptr_inc(&idev->rx_get, idev->rx_mask);
-
- while (entry != idev->rx_get) {
-
- status = rd_get_status(idev->ring_hw+entry);
-
- if (status & RD_STAT_ACTIVE) {
- printk(KERN_CRIT "%s: rx still active!!!\n",
- __FUNCTION__);
- break;
- }
- if (status & RX_STAT_ERROR) {
- idev->stats.rx_errors++;
- if (status & RX_STAT_OVER)
- idev->stats.rx_over_errors++;
- if (status & RX_STAT_LENGTH)
- idev->stats.rx_length_errors++;
- if (status & RX_STAT_PHYERR)
- idev->stats.rx_frame_errors++;
- if (status & RX_STAT_CRCERR)
- idev->stats.rx_crc_errors++;
+ r = &idev->rx_ring;
+ while (!rd_is_active(r, r->tail)) {
+
+ status = rd_get_status(r, r->tail);
+ if (status & RX_STAT_ERROR) {
+ idev->stats.rx_errors++;
+ if (status & RX_STAT_OVER)
+ idev->stats.rx_over_errors++;
+ if (status & RX_STAT_LENGTH)
+ idev->stats.rx_length_errors++;
+ if (status & RX_STAT_PHYERR)
+ idev->stats.rx_frame_errors++;
+ if (status & RX_STAT_CRCERR)
+ idev->stats.rx_crc_errors++;
+ }
+ else {
+ len = rd_get_count(r, r->tail);
+ crclen = (idev->mode==IFF_FIR) ? sizeof(u32) : sizeof(u16);
+ if (len < crclen)
+ printk(KERN_ERR "%s: strange frame (len=%d)\n",
+ __FUNCTION__, len);
+ else
+ len -= crclen; /* remove trailing CRC */
+
+ skb = dev_alloc_skb(len+1);
+ if (skb) {
+ skb->dev = ndev;
+ skb_reserve(skb,1);
+ memcpy(skb_put(skb,len), r->buf[r->tail].data, len);
+ idev->stats.rx_packets++;
+ idev->stats.rx_bytes += len;
+ skb->mac.raw = skb->data;
+ skb->protocol = htons(ETH_P_IRDA);
+ netif_rx(skb);
}
else {
- len = rd_get_count(idev->ring_hw+entry);
- crclen = (idev->mode==IFF_FIR) ? 4 : 2;
- if (len < crclen)
- printk(KERN_ERR "%s: strange frame (len=%d)\n",
- __FUNCTION__, len);
- else
- len -= crclen; /* remove trailing CRC */
-
- skb = dev_alloc_skb(len+1);
- if (skb) {
- skb->dev = ndev;
- skb_reserve(skb,1);
- memcpy(skb_put(skb,len), idev->ring_buf[entry].head, len);
- idev->stats.rx_packets++;
- idev->stats.rx_bytes += len;
- skb->mac.raw = skb->data;
- skb->protocol = htons(ETH_P_IRDA);
- netif_rx(skb);
- }
- else {
- idev->stats.rx_dropped++;
- printk(KERN_ERR "%s: rx packet dropped\n", __FUNCTION__);
- }
+ idev->stats.rx_dropped++;
+ printk(KERN_ERR "%s: rx packet dropped\n", __FUNCTION__);
}
- rd_set_count(idev->ring_hw+entry, 0);
- rd_set_status(idev->ring_hw+entry, RD_STAT_ACTIVE);
- ring_ptr_inc(&entry, idev->rx_mask);
+ }
+ rd_set_count(r, r->tail, 0);
+ rd_set_status(r, r->tail, 0);
+ ring_get(r);
+ if (r->tail == r->head) {
+ printk(KERN_WARNING "%s: rx ring exhausted\n", __FUNCTION__);
+ break;
}
}
- idev->rx_put = idev->rx_get;
- idev->rx_get = entry;
- word = inw(iobase+VLSI_PIO_IRENABLE);
- if (!(word & IRENABLE_ENTXST)) {
+ do_gettimeofday(&idev->last_rx); /* remember "now" for later mtt delay */
- /* only rewrite ENRX, if tx not running!
- * rewriting ENRX during tx in progress wouldn't hurt
- * but would be racy since we would also have to rewrite
- * ENTX then (same register) - which might get disabled meanwhile.
- */
+ vlsi_refill_rx(r);
- outw(0, iobase+VLSI_PIO_IRENABLE);
-
- word = inw(iobase+VLSI_PIO_IRCFG);
- mb();
- outw(word | IRCFG_ENRX, iobase+VLSI_PIO_IRCFG);
- wmb();
- outw(IRENABLE_IREN, iobase+VLSI_PIO_IRENABLE);
- }
mb();
- outw(0, iobase+VLSI_PIO_PROMPT);
+ outw(0, ndev->base_addr+VLSI_PIO_PROMPT);
+
return 0;
}
static int vlsi_tx_interrupt(struct net_device *ndev)
{
vlsi_irda_dev_t *idev = ndev->priv;
- int iobase;
- int entry;
+ struct vlsi_ring *r;
+ unsigned iobase;
int ret;
u16 config;
u16 status;
- ret = 0;
- iobase = ndev->base_addr;
-
- entry = idev->tx_get;
-
- while ( !rd_is_active(idev->ring_hw+idev->tx_get) ) {
+ r = &idev->tx_ring;
+ while (!rd_is_active(r, r->tail)) {
+ if (r->tail == r->head)
+ break; /* tx ring empty - nothing to send anymore */
- if (idev->tx_get == idev->tx_put) { /* tx ring empty */
- /* sth more to do here? */
- break;
+ status = rd_get_status(r, r->tail);
+ if (status & TX_STAT_UNDRN) {
+ idev->stats.tx_errors++;
+ idev->stats.tx_fifo_errors++;
}
- ring_ptr_inc(&idev->tx_get, idev->tx_mask);
- while (entry != idev->tx_get) {
- status = rd_get_status(idev->ring_hw+entry);
- if (status & RD_STAT_ACTIVE) {
- printk(KERN_CRIT "%s: tx still active!!!\n",
- __FUNCTION__);
- break;
- }
- if (status & TX_STAT_UNDRN) {
- idev->stats.tx_errors++;
- idev->stats.tx_fifo_errors++;
- }
- else {
- idev->stats.tx_packets++;
- idev->stats.tx_bytes += rd_get_count(idev->ring_hw+entry);
- }
- rd_set_count(idev->ring_hw+entry, 0);
- rd_set_status(idev->ring_hw+entry, 0);
- ring_ptr_inc(&entry, idev->tx_mask);
+ else {
+ idev->stats.tx_packets++;
+ idev->stats.tx_bytes += rd_get_count(r, r->tail); /* not correct for SIR */
+ }
+ rd_set_count(r, r->tail, 0);
+ rd_set_status(r, r->tail, 0);
+ if (r->buf[r->tail].skb) {
+ rd_set_addr_status(r, r->tail, 0, 0);
+ dev_kfree_skb(r->buf[r->tail].skb);
+ r->buf[r->tail].skb = NULL;
+ r->buf[r->tail].data = NULL;
}
+ ring_get(r);
}
- outw(0, iobase+VLSI_PIO_IRENABLE);
- config = inw(iobase+VLSI_PIO_IRCFG);
- mb();
+ ret = 0;
+ iobase = ndev->base_addr;
- if (idev->tx_get != idev->tx_put) { /* tx ring not empty */
- outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG);
- ret = 1; /* no speed-change-check */
+ if (r->head == r->tail) { /* tx ring empty: re-enable rx */
+
+ outw(0, iobase+VLSI_PIO_IRENABLE);
+ config = inw(iobase+VLSI_PIO_IRCFG);
+ mb();
+ outw((config & ~IRCFG_ENTX) | IRCFG_ENRX, iobase+VLSI_PIO_IRCFG);
+ wmb();
+ outw(IRENABLE_IREN, iobase+VLSI_PIO_IRENABLE);
}
else
- outw((config & ~IRCFG_ENTX) | IRCFG_ENRX, iobase+VLSI_PIO_IRCFG);
- wmb();
- outw(IRENABLE_IREN, iobase+VLSI_PIO_IRENABLE);
+ ret = 1; /* no speed-change-check */
mb();
-
outw(0, iobase+VLSI_PIO_PROMPT);
- wmb();
- idev->tx_get = entry;
if (netif_queue_stopped(ndev)) {
netif_wake_queue(ndev);
printk(KERN_DEBUG "%s: queue awoken\n", __FUNCTION__);
}
+#if 0 /* disable ACTIVITY handling for now */
+
static int vlsi_act_interrupt(struct net_device *ndev)
{
printk(KERN_DEBUG "%s\n", __FUNCTION__);
return 0;
}
-
+#endif
static void vlsi_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
{
struct net_device *ndev = dev_instance;
vlsi_irda_dev_t *idev = ndev->priv;
- int iobase;
+ unsigned iobase;
u8 irintr;
- int boguscount = 20;
+ int boguscount = 32;
int no_speed_check = 0;
+ unsigned got_act;
unsigned long flags;
-
+ got_act = 0;
iobase = ndev->base_addr;
spin_lock_irqsave(&idev->lock,flags);
do {
if (irintr&IRINTR_TPKTINT)
no_speed_check |= vlsi_tx_interrupt(ndev);
- if ((irintr&IRINTR_ACTIVITY) && !(irintr^IRINTR_ACTIVITY) )
- no_speed_check |= vlsi_act_interrupt(ndev);
+#if 0 /* disable ACTIVITY handling for now */
+
+ if (got_act && irintr==IRINTR_ACTIVITY) /* nothing new */
+ break;
+ if ((irintr&IRINTR_ACTIVITY) && !(irintr^IRINTR_ACTIVITY) ) {
+ no_speed_check |= vlsi_act_interrupt(ndev);
+ got_act = 1;
+ }
+#endif
if (irintr & ~(IRINTR_RPKTINT|IRINTR_TPKTINT|IRINTR_ACTIVITY))
printk(KERN_DEBUG "%s: IRINTR = %02x\n",
__FUNCTION__, (unsigned)irintr);
/**************************************************************/
+
+/* writing all-zero to the VLSI PCI IO register area seems to prevent
+ * some occasional situations where the hardware fails (symptoms are
+ * what appears as stalled tx/rx state machines, i.e. everything ok for
+ * receive or transmit but hw makes no progress or is unable to access
+ * the bus memory locations).
+ * Best place to call this is immediately after/before the internal clock
+ * gets started/stopped.
+ */
+
+static inline void vlsi_clear_regs(unsigned iobase)
+{
+ unsigned i;
+ const unsigned chip_io_extent = 32;
+
+ for (i = 0; i < chip_io_extent; i += sizeof(u16))
+ outw(0, iobase + i);
+}
+
+
static int vlsi_open(struct net_device *ndev)
{
vlsi_irda_dev_t *idev = ndev->priv;
struct pci_dev *pdev = idev->pdev;
- char hwname[32];
int err;
-
- MOD_INC_USE_COUNT; /* still needed? - we have SET_MODULE_OWNER! */
+ char hwname[32];
if (pci_request_regions(pdev,drivername)) {
printk(KERN_ERR "%s: io resource busy\n", __FUNCTION__);
- MOD_DEC_USE_COUNT;
return -EAGAIN;
}
- if (request_irq(ndev->irq, vlsi_interrupt, SA_SHIRQ|SA_INTERRUPT,
+ /* under some rare occasions the chip apparently comes up
+ * with IRQ's pending. So we get interrupts invoked much too early
+ * which will immediately kill us again :-(
+ * so we better w/c pending IRQ and disable them all
+ */
+
+ outb(IRINTR_INT_MASK, ndev->base_addr+VLSI_PIO_IRINTR);
+
+ if (request_irq(ndev->irq, vlsi_interrupt, SA_SHIRQ,
drivername, ndev)) {
printk(KERN_ERR "%s: couldn't get IRQ: %d\n",
__FUNCTION__, ndev->irq);
pci_release_regions(pdev);
- MOD_DEC_USE_COUNT;
return -EAGAIN;
}
printk(KERN_INFO "%s: got resources for %s - irq=%d / io=%04lx\n",
__FUNCTION__);
free_irq(ndev->irq,ndev);
pci_release_regions(pdev);
- MOD_DEC_USE_COUNT;
return -EIO;
}
vlsi_start_clock(pdev);
+ vlsi_clear_regs(ndev->base_addr);
+
err = vlsi_init_ring(idev);
if (err) {
vlsi_unset_clock(pdev);
free_irq(ndev->irq,ndev);
pci_release_regions(pdev);
- MOD_DEC_USE_COUNT;
return err;
}
(idev->mode==IFF_SIR)?"SIR":((idev->mode==IFF_MIR)?"MIR":"FIR"),
(sirpulse)?"3/16 bittime":"short");
- sprintf(hwname, "VLSI-FIR");
+ vlsi_arm_rx(&idev->rx_ring);
+
+ do_gettimeofday(&idev->last_rx); /* first mtt may start from now on */
+
+ sprintf(hwname, "VLSI-FIR @ 0x%04x", (unsigned)ndev->base_addr);
idev->irlap = irlap_open(ndev,&idev->qos,hwname);
netif_start_queue(ndev);
{
vlsi_irda_dev_t *idev = ndev->priv;
struct pci_dev *pdev = idev->pdev;
- int i;
u8 cmd;
unsigned iobase;
outw(0, iobase+VLSI_PIO_IRCFG); /* disable everything */
wmb();
outw(IRENABLE_IREN, iobase+VLSI_PIO_IRENABLE);
- mb(); /* from now on */
+ mb(); /* ... from now on */
outw(0, iobase+VLSI_PIO_IRENABLE);
wmb();
+ vlsi_clear_regs(ndev->base_addr);
+
vlsi_stop_clock(pdev);
vlsi_unset_clock(pdev);
free_irq(ndev->irq,ndev);
- if (idev->ring_buf) {
- for (i = 0; i < 2*MAX_RING_DESCR; i++) {
- if (idev->ring_buf[i].head)
- kfree(idev->ring_buf[i].head);
- }
- kfree(idev->ring_buf);
- }
+ vlsi_free_ringbuf(&idev->rx_ring);
+ vlsi_free_ringbuf(&idev->tx_ring);
if (idev->busaddr)
- pci_free_consistent(idev->pdev,RING_AREA_SIZE,idev->ring_hw,idev->busaddr);
+ pci_free_consistent(idev->pdev,RING_AREA_SIZE,idev->virtaddr,idev->busaddr);
- idev->ring_buf = NULL;
- idev->ring_hw = NULL;
+ idev->virtaddr = NULL;
idev->busaddr = 0;
pci_read_config_byte(pdev, PCI_COMMAND, &cmd);
printk(KERN_INFO "%s: device %s stopped\n", __FUNCTION__, ndev->name);
- MOD_DEC_USE_COUNT;
return 0;
}
static int vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
vlsi_irda_dev_t *idev = ndev->priv;
+ struct vlsi_ring *r;
unsigned long flags;
- int iobase;
+ unsigned iobase;
u8 status;
u16 config;
int mtt;
- int entry;
int len, speed;
+ struct timeval now, ready;
- iobase = ndev->base_addr;
+ status = 0;
speed = irda_get_next_speed(skb);
}
status = TX_STAT_CLRENTX; /* stop tx-ring after this frame */
}
- else
- status = 0;
+ if (skb->len == 0) {
+ printk(KERN_ERR "%s: blocking 0-size packet???\n",
+ __FUNCTION__);
+ dev_kfree_skb(skb);
+ return 0;
+ }
- spin_lock_irqsave(&idev->lock,flags);
+ r = &idev->tx_ring;
- entry = idev->tx_put;
+ if (rd_is_active(r, r->head))
+ BUG();
if (idev->mode == IFF_SIR) {
status |= TX_STAT_DISCRC;
- len = async_wrap_skb(skb, idev->ring_buf[entry].head,
- XFER_BUF_SIZE);
+ len = async_wrap_skb(skb, r->buf[r->head].data, XFER_BUF_SIZE);
}
else { /* hw deals with MIR/FIR mode */
len = skb->len;
- memcpy(idev->ring_buf[entry].head, skb->data, len);
+ memcpy(r->buf[r->head].data, skb->data, len);
}
- if (len == 0)
- printk(KERN_ERR "%s: sending 0-size packet???\n",
- __FUNCTION__);
-
- status |= RD_STAT_ACTIVE;
-
- rd_set_count(idev->ring_hw+entry, len);
- rd_set_status(idev->ring_hw+entry, status);
- ring_ptr_inc(&idev->tx_put, idev->tx_mask);
+ rd_set_count(r, r->head, len);
+ rd_set_addr_status(r, r->head, virt_to_bus(r->buf[r->head].data), status);
- dev_kfree_skb(skb);
+ /* new entry not yet activated! */
#if 0
printk(KERN_DEBUG "%s: dump entry %d: %u %02x %08x\n",
- __FUNCTION__, entry,
- idev->ring_hw[entry].rd_count,
- (unsigned)idev->ring_hw[entry].rd_status,
- idev->ring_hw[entry].rd_addr & 0xffffffff);
+ __FUNCTION__, r->head,
+ idev->ring_hw[r->head].rd_count,
+ (unsigned)idev->ring_hw[r->head].rd_status,
+ idev->ring_hw[r->head].rd_addr & 0xffffffff);
vlsi_reg_debug(iobase,__FUNCTION__);
#endif
+
+ /* let mtt delay pass before we need to acquire the spinlock! */
+
+ if ((mtt = irda_get_mtt(skb)) > 0) {
+
+ ready.tv_usec = idev->last_rx.tv_usec + mtt;
+ ready.tv_sec = idev->last_rx.tv_sec;
+ if (ready.tv_usec >= 1000000) {
+ ready.tv_usec -= 1000000;
+ ready.tv_sec++; /* IrLAP 1.1: mtt always < 1 sec */
+ }
+ for(;;) {
+ do_gettimeofday(&now);
+ if (now.tv_sec > ready.tv_sec
+ || (now.tv_sec==ready.tv_sec && now.tv_usec>=ready.tv_usec))
+ break;
+ udelay(100);
+ }
+ }
+
/*
- * race window due to concurrent controller processing!
+ * race window ahead, due to concurrent controller processing!
*
- * we may loose ENTX at any time when the controller
- * fetches an inactive descr or one with CLR_ENTX set.
- * therefore we only rely on the controller telling us
- * tx is already stopped because (cannot restart without PROMPT).
- * instead we depend on the tx-complete-isr to detect the
- * false negatives and retrigger the tx ring.
- * that's why we need interrupts disabled till tx has been
- * kicked, so the tx-complete-isr was either already finished
- * before we've put the new active descriptor on the ring - or
- * the isr will be called after the new active descr is on the
- * ring _and_ the ring was prompted. Making these two steps
- * atomic allows to resolve the race.
+ * We need to disable IR output in order to switch to TX mode.
+ * Better not do this blindly anytime we want to transmit something
+ * because TX may already run. However the controller may stop TX
+ * at any time when fetching an inactive descriptor or one with
+ * CLR_ENTX set. So we switch on TX only, if TX was not running
+ * _after_ the new descriptor was activated on the ring. This ensures
+ * we will either find TX already stopped or we can be sure, there
+ * will be a TX-complete interrupt even if the chip stopped doing
+ * TX just after we found it still running. The ISR will then find
+ * the non-empty ring and restart TX processing. The enclosing
+ * spinlock is required to get serialization with the ISR right.
*/
+
iobase = ndev->base_addr;
- if (!(inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_ENTXST)) {
+ spin_lock_irqsave(&idev->lock,flags);
- mtt = irda_get_mtt(skb);
- if (mtt) {
- udelay(mtt); /* ### FIXME ... */
- }
+ rd_activate(r, r->head);
+ ring_put(r);
+ if (!(inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_ENTXST)) {
+
outw(0, iobase+VLSI_PIO_IRENABLE);
config = inw(iobase+VLSI_PIO_IRCFG);
outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG);
wmb();
outw(IRENABLE_IREN, iobase+VLSI_PIO_IRENABLE);
-
mb();
-
outw(0, iobase+VLSI_PIO_PROMPT);
wmb();
}
- spin_unlock_irqrestore(&idev->lock, flags);
-
- if (idev->tx_put == idev->tx_get) {
+ if (r->head == r->tail) {
netif_stop_queue(ndev);
printk(KERN_DEBUG "%s: tx ring full - queue stopped: %d/%d\n",
- __FUNCTION__, idev->tx_put, idev->tx_get);
- entry = idev->tx_get;
+ __FUNCTION__, r->head, r->tail);
+#if 0
printk(KERN_INFO "%s: dump stalled entry %d: %u %02x %08x\n",
- __FUNCTION__, entry,
- idev->ring_hw[entry].rd_count,
- (unsigned)idev->ring_hw[entry].rd_status,
- idev->ring_hw[entry].rd_addr & 0xffffffff);
+ __FUNCTION__, r->tail,
+ r->hw[r->tail].rd_count,
+ (unsigned)r->hw[r->tail].rd_status,
+ r->hw[r->tail].rd_addr & 0xffffffff);
+#endif
vlsi_reg_debug(iobase,__FUNCTION__);
}
-// vlsi_reg_debug(iobase, __FUNCTION__);
+ spin_unlock_irqrestore(&idev->lock, flags);
+
+ dev_kfree_skb(skb);
return 0;
}
irda_device_set_media_busy(ndev, TRUE);
break;
case SIOCGRECEIVING:
+ /* the best we can do: check whether there are any bytes in rx fifo.
+ * The trustable window (in case some data arrives just afterwards)
+ * may be as short as 1usec or so at 4Mbps - no way for future-telling.
+ */
fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
irq->ifr_receiving = (fifocnt!=0) ? 1 : 0;
break;
return -1;
}
pci_set_master(pdev);
-
pdev->dma_mask = DMA_MASK_MSTRPAGE;
pci_write_config_byte(pdev, VLSI_PCI_MSTRPAGE, MSTRPAGE_VALUE);
| IR_19200 | IR_38400 | IR_57600 | IR_115200
| IR_1152000 | (IR_4000000 << 8);
- idev->qos.min_turn_time.bits = mtt_bits;
+ idev->qos.min_turn_time.bits = qos_mtt_bits;
irda_qos_bits_to_value(&idev->qos);
irda_device_setup(ndev);
- /* currently no media definitions for SIR/MIR/FIR */
+ /* currently no public media definitions for IrDA */
ndev->flags |= IFF_PORTSEL | IFF_AUTOMEDIA;
ndev->if_port = IF_PORT_UNKNOWN;
vlsi_irda_dev_t *idev;
int alloc_size;
- printk(KERN_INFO "%s: found IrDA PCI controler %s\n", drivername, pdev->name);
+ vlsi_reg_debug(0x3000, "vlsi initial state");
if (pci_enable_device(pdev))
goto out;
+ printk(KERN_INFO "%s: IrDA PCI controller %s detected\n",
+ drivername, pdev->name);
+
if ( !pci_resource_start(pdev,0)
|| !(pci_resource_flags(pdev,0) & IORESOURCE_IO) ) {
printk(KERN_ERR "%s: bar 0 invalid", __FUNCTION__);
- goto out;
+ goto out_disable;
}
alloc_size = sizeof(*ndev) + sizeof(*idev);
if (ndev==NULL) {
printk(KERN_ERR "%s: Unable to allocate device memory.\n",
__FUNCTION__);
- goto out;
+ goto out_disable;
}
memset(ndev, 0, alloc_size);
out_freedev:
kfree(ndev);
+out_disable:
+ pci_disable_device(pdev);
out:
pdev->driver_data = NULL;
return -ENODEV;
static int __init vlsi_mod_init(void)
{
+ int i;
+
if (clksrc < 0 || clksrc > 3) {
- printk(KERN_ERR "%s: invalid clksrc=%d\n", __FUNCTION__, clksrc);
+ printk(KERN_ERR "%s: invalid clksrc=%d\n", drivername, clksrc);
return -1;
}
- if ( ringsize[0]==0 || (ringsize[0] & ~(64|32|16|8|4))
- || ((ringsize[0]-1)&ringsize[0])) {
- printk(KERN_INFO "%s: invalid tx ringsize %d - using default=16\n",
- __FUNCTION__, ringsize[0]);
- ringsize[0] = 16;
+
+ for (i = 0; i < 2; i++) {
+ switch(ringsize[i]) {
+ case 4:
+ case 8:
+ case 16:
+ case 32:
+ case 64:
+ break;
+ default:
+ printk(KERN_WARNING "%s: invalid %s ringsize %d",
+ drivername, (i)?"rx":"tx", ringsize[i]);
+ printk(", using default=8\n");
+ ringsize[i] = 8;
+ break;
+ }
}
- if ( ringsize[1]==0 || (ringsize[1] & ~(64|32|16|8|4))
- || ((ringsize[1]-1)&ringsize[1])) {
- printk(KERN_INFO "%s: invalid rx ringsize %d - using default=16\n",
- __FUNCTION__, ringsize[1]);
- ringsize[1] = 16;
- }
+
sirpulse = !!sirpulse;
+
return pci_module_init(&vlsi_irda_driver);
}
#include <linux/slab.h>
-#define DEBUG_CONFIG 1
+#define DEBUG_CONFIG 0
#if DEBUG_CONFIG
# define DBGC(args) printk args
#else
#include <linux/slab.h>
-#define DEBUG_CONFIG 1
+#define DEBUG_CONFIG 0
#if DEBUG_CONFIG
# define DBGC(args) printk args
#else
dep_tristate ' UMSDOS: Unix-like file system on top of standard MSDOS fs' CONFIG_UMSDOS_FS $CONFIG_MSDOS_FS
dep_tristate ' VFAT (Windows-95) fs support' CONFIG_VFAT_FS $CONFIG_FAT_FS
dep_tristate 'EFS file system support (read only) (EXPERIMENTAL)' CONFIG_EFS_FS $CONFIG_EXPERIMENTAL
-dep_tristate 'Journalling Flash File System (JFFS) support (EXPERIMENTAL)' CONFIG_JFFS_FS $CONFIG_EXPERIMENTAL $CONFIG_MTD
+dep_tristate 'Journalling Flash File System (JFFS) support' CONFIG_JFFS_FS $CONFIG_MTD
if [ "$CONFIG_JFFS_FS" = "y" -o "$CONFIG_JFFS_FS" = "m" ] ; then
int 'JFFS debugging verbosity (0 = quiet, 3 = noisy)' CONFIG_JFFS_FS_VERBOSE 0
+ bool 'JFFS stats available in /proc filesystem' CONFIG_JFFS_PROC_FS
+fi
+dep_tristate 'Journalling Flash File System v2 (JFFS2) support (EXPERIMENTAL)' CONFIG_JFFS2_FS $CONFIG_EXPERIMENTAL $CONFIG_MTD
+if [ "$CONFIG_JFFS2_FS" != "n" ] ; then
+ int 'JFFS2 debugging verbosity (0 = quiet, 3 = noisy)' CONFIG_JFFS2_FS_DEBUG 0
fi
tristate 'Compressed ROM file system support' CONFIG_CRAMFS
bool 'Virtual memory file system support (former shm fs)' CONFIG_TMPFS
super.o block_dev.o char_dev.o stat.o exec.o pipe.o namei.o \
fcntl.o ioctl.o readdir.o select.o fifo.o locks.o \
dcache.o inode.o attr.o bad_inode.o file.o iobuf.o dnotify.o \
- filesystems.o
+ filesystems.o namespace.o
ifeq ($(CONFIG_QUOTA),y)
obj-y += dquot.o
subdir-$(CONFIG_UFS_FS) += ufs
subdir-$(CONFIG_EFS_FS) += efs
subdir-$(CONFIG_JFFS_FS) += jffs
+subdir-$(CONFIG_JFFS2_FS) += jffs2
subdir-$(CONFIG_AFFS_FS) += affs
subdir-$(CONFIG_ROMFS_FS) += romfs
subdir-$(CONFIG_QNX4FS_FS) += qnx4
#
# Makefile for the linux Journalling Flash FileSystem (JFFS) routines.
#
-# $Id: Makefile,v 1.7 2000/08/04 12:46:34 dwmw2 Exp $
+# $Id: Makefile,v 1.11 2001/09/25 20:59:41 dwmw2 Exp $
#
# Note! Dependencies are done automagically by 'make dep', which also
# removes any old dependencies. DON'T put your own dependencies here
#
# Note 2! The CFLAGS definitions are now in the main makefile...
-ifndef CONFIG_JFFS_FS
+list-multi := jffs.o
-# We're being invoked outside a normal kernel build. Fake it
-EXTRA_CFLAGS= -I$(shell pwd)/../../include
-
-# You need to change this to build for 2.2, dunno how to check for it.
-
-#INODE_O := inode-v22.o
-INODE_O := inode-v23.o
-
-else
+jffs-objs := jffs_fm.o intrep.o
ifeq ($(PATCHLEVEL),2)
- INODE_O := inode-v22.o
+ jffs-objs += inode-v22.o
else
- INODE_O := inode-v23.o
+ jffs-objs += inode-v23.o
endif
+ifeq ($(CONFIG_JFFS_PROC_FS),y)
+ jffs-objs += jffs_proc.o
endif
O_TARGET := jffs.o
-obj-m := $(O_TARGET)
-obj-y := jffs_fm.o intrep.o $(INODE_O)
+
+obj-y := $(jffs-objs)
+obj-m := $(O_TARGET)
include $(TOPDIR)/Rules.make
+
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * $Id: inode-v23.c,v 1.43.2.6 2001/01/09 00:32:48 dwmw2 Exp $
- * + sb_maxbytes / generic_file_open() fixes for 2.4.0-ac4
+ * $Id: inode-v23.c,v 1.70 2001/10/02 09:16:02 dwmw2 Exp $
*
* Ported to Linux 2.3.x and MTD:
* Copyright (C) 2000 Alexander Larsson (alex@cendio.se), Cendio Systems AB
#include <asm/semaphore.h>
#include <asm/byteorder.h>
#include <asm/uaccess.h>
+
#include "jffs_fm.h"
#include "intrep.h"
+#if CONFIG_JFFS_PROC_FS
+#include "jffs_proc.h"
+#endif
static int jffs_remove(struct inode *dir, struct dentry *dentry, int type);
static struct inode_operations jffs_dir_inode_operations;
static struct address_space_operations jffs_address_operations;
+kmem_cache_t *node_cache = NULL;
+kmem_cache_t *fm_cache = NULL;
/* Called by the VFS at mount time to initialize the whole file system. */
static struct super_block *
c = (struct jffs_control *) sb->u.generic_sbp;
+#ifdef CONFIG_JFFS_PROC_FS
+ /* Set up the jffs proc file system. */
+ if (jffs_register_jffs_proc_dir(dev, c) < 0) {
+ printk(KERN_WARNING "JFFS: Failed to initialize the JFFS "
+ "proc file system for device %s.\n",
+ kdevname(dev));
+ }
+#endif
+
/* Set the Garbage Collection thresholds */
/* GC if free space goes below 5% of the total size */
D2(printk("jffs_put_super()\n"));
+#ifdef CONFIG_JFFS_PROC_FS
+ jffs_unregister_jffs_proc_dir(c);
+#endif
+
if (c->gc_task) {
D1(printk (KERN_NOTICE "jffs_put_super(): Telling gc thread to die.\n"));
send_sig(SIGKILL, c->gc_task, 1);
recoverable = 1;
}
- if (!(new_node = (struct jffs_node *)
- kmalloc(sizeof(struct jffs_node), GFP_KERNEL))) {
+ if (!(new_node = jffs_alloc_node())) {
D(printk("jffs_setattr(): Allocation failed!\n"));
D3(printk (KERN_NOTICE "notify_change(): up biglock\n"));
up(&fmc->biglock);
return -ENOMEM;
}
- DJM(no_jffs_node++);
new_node->data_offset = 0;
new_node->removed_size = 0;
raw_inode.magic = JFFS_MAGIC_BITMASK;
/* Write this node to the flash. */
if ((res = jffs_write_node(c, new_node, &raw_inode, f->name, 0, recoverable, f)) < 0) {
D(printk("jffs_notify_change(): The write failed!\n"));
- kfree(new_node);
- DJM(no_jffs_node--);
+ jffs_free_node(new_node);
D3(printk (KERN_NOTICE "n_c(): up biglock\n"));
up(&c->fmc->biglock);
return res;
buf->f_bsize = PAGE_CACHE_SIZE;
buf->f_blocks = (fmc->flash_size / PAGE_CACHE_SIZE)
- (fmc->min_free_size / PAGE_CACHE_SIZE);
- buf->f_bfree = (jffs_free_size1(fmc) / PAGE_CACHE_SIZE
- + jffs_free_size2(fmc) / PAGE_CACHE_SIZE)
- - (fmc->min_free_size / PAGE_CACHE_SIZE);
+ buf->f_bfree = (jffs_free_size1(fmc) + jffs_free_size2(fmc) +
+ fmc->dirty_size - fmc->min_free_size)
+ >> PAGE_CACHE_SHIFT;
buf->f_bavail = buf->f_bfree;
/* Find out how many files there are in the filesystem. */
__u32 rename_data = 0;
D2(printk("***jffs_rename()\n"));
-
+
D(printk("jffs_rename(): old_dir: 0x%p, old name: 0x%p, "
"new_dir: 0x%p, new name: 0x%p\n",
old_dir, old_dentry->d_name.name,
down(&c->fmc->biglock);
/* Create a node and initialize as much as needed. */
result = -ENOMEM;
- if (!(node = (struct jffs_node *) kmalloc(sizeof(struct jffs_node),
- GFP_KERNEL))) {
+ if (!(node = jffs_alloc_node())) {
D(printk("jffs_rename(): Allocation failed: node == 0\n"));
goto jffs_rename_end;
}
- DJM(no_jffs_node++);
node->data_offset = 0;
node->removed_size = 0;
new_dentry->d_name.name,
(unsigned char*)&rename_data, 0, f)) < 0) {
D(printk("jffs_rename(): Failed to write node to flash.\n"));
- kfree(node);
- DJM(no_jffs_node--);
+ jffs_free_node(node);
goto jffs_rename_end;
}
raw_inode.dsize = 0;
if (filp->f_pos == 0) {
D3(printk("jffs_readdir(): \".\" %lu\n", inode->i_ino));
if (filldir(dirent, ".", 1, filp->f_pos, inode->i_ino, DT_DIR) < 0) {
- D3(printk (KERN_NOTICE "readdir(): up biglock\n"));
+ D3(printk (KERN_NOTICE "readdir(): up biglock\n"));
up(&c->fmc->biglock);
return 0;
}
}
D3(printk("jffs_readdir(): \"..\" %u\n", ddino));
if (filldir(dirent, "..", 2, filp->f_pos, ddino, DT_DIR) < 0) {
- D3(printk (KERN_NOTICE "readdir(): up biglock\n"));
+ D3(printk (KERN_NOTICE "readdir(): up biglock\n"));
up(&c->fmc->biglock);
return 0;
}
}
f = ((struct jffs_file *)inode->u.generic_ip)->children;
- j=2;
+ j = 2;
while(f && (f->deleted || j++ < filp->f_pos )) {
f = f->sibling_next;
}
/* iget calls jffs_read_inode, so we need to drop the biglock
before calling iget. Unfortunately, the GC has a tendency
- to sneak in here, because iget sometimes calls schedule ().
- */
+ to sneak in here, because iget sometimes calls schedule ().
+ */
if ((len == 1) && (name[0] == '.')) {
- D3(printk (KERN_NOTICE "lookup(): up biglock\n"));
+ D3(printk (KERN_NOTICE "lookup(): up biglock\n"));
up(&c->fmc->biglock);
if (!(inode = iget(dir->i_sb, d->ino))) {
D(printk("jffs_lookup(): . iget() ==> NULL\n"));
/* Try to read a page of data from a file. */
static int
-jffs_readpage(struct file *file, struct page *page)
+jffs_do_readpage_nolock(struct file *file, struct page *page)
{
void *buf;
unsigned long read_len;
- int result = -EIO;
- struct inode *inode = page->mapping->host;
+ int result;
+ struct inode *inode = (struct inode*)page->mapping->host;
struct jffs_file *f = (struct jffs_file *)inode->u.generic_ip;
struct jffs_control *c = (struct jffs_control *)inode->i_sb->u.generic_sbp;
int r;
D3(printk (KERN_NOTICE "readpage(): down biglock\n"));
down(&c->fmc->biglock);
+ read_len = 0;
+ result = 0;
+
offset = page->index << PAGE_CACHE_SHIFT;
if (offset < inode->i_size) {
read_len = min_t(long, inode->i_size - offset, PAGE_SIZE);
r = jffs_read_data(f, buf, offset, read_len);
- if (r == read_len) {
- if (read_len < PAGE_SIZE) {
- memset(buf + read_len, 0,
- PAGE_SIZE - read_len);
- }
- SetPageUptodate(page);
- result = 0;
+ if (r != read_len) {
+ result = -EIO;
+ D(
+ printk("***jffs_readpage(): Read error! "
+ "Wanted to read %lu bytes but only "
+ "read %d bytes.\n", read_len, r);
+ );
}
- D(else {
- printk("***jffs_readpage(): Read error! "
- "Wanted to read %lu bytes but only "
- "read %d bytes.\n", read_len, r);
- });
+
}
+ /* This handles the case of partial or no read in above */
+ if(read_len < PAGE_SIZE)
+ memset(buf + read_len, 0, PAGE_SIZE - read_len);
+
D3(printk (KERN_NOTICE "readpage(): up biglock\n"));
up(&c->fmc->biglock);
-
+
if (result) {
- memset(buf, 0, PAGE_SIZE);
SetPageError(page);
+ }else {
+ SetPageUptodate(page);
}
flush_dcache_page(page);
- UnlockPage(page);
-
put_page(page);
D3(printk("jffs_readpage(): Leaving...\n"));
return result;
-} /* jffs_readpage() */
+} /* jffs_do_readpage_nolock() */
+static int jffs_readpage(struct file *file, struct page *page)
+{
+ int ret = jffs_do_readpage_nolock(file, page);
+ UnlockPage(page);
+ return ret;
+}
/* Create a new directory. */
static int
}
/* Create a node and initialize it as much as needed. */
- if (!(node = (struct jffs_node *) kmalloc(sizeof(struct jffs_node),
- GFP_KERNEL))) {
+ if (!(node = jffs_alloc_node())) {
D(printk("jffs_mkdir(): Allocation failed: node == 0\n"));
result = -ENOMEM;
goto jffs_mkdir_end;
}
- DJM(no_jffs_node++);
node->data_offset = 0;
node->removed_size = 0;
if ((result = jffs_write_node(c, node, &raw_inode,
dentry->d_name.name, 0, 0, NULL)) < 0) {
D(printk("jffs_mkdir(): jffs_write_node() failed.\n"));
- kfree(node);
- DJM(no_jffs_node--);
+ jffs_free_node(node);
goto jffs_mkdir_end;
}
int result = 0;
D1({
- int len = dentry->d_name.len;
- const char *name = dentry->d_name.name;
+ int len = dentry->d_name.len;
+ const char *name = dentry->d_name.name;
char *_name = (char *) kmalloc(len + 1, GFP_KERNEL);
memcpy(_name, name, len);
_name[len] = '\0';
/* Create a node for the deletion. */
result = -ENOMEM;
- if (!(del_node = (struct jffs_node *)
- kmalloc(sizeof(struct jffs_node), GFP_KERNEL))) {
+ if (!(del_node = jffs_alloc_node())) {
D(printk("jffs_remove(): Allocation failed!\n"));
goto jffs_remove_end;
}
- DJM(no_jffs_node++);
del_node->data_offset = 0;
del_node->removed_size = 0;
/* Write the new node to the flash memory. */
if (jffs_write_node(c, del_node, &raw_inode, 0, 0, 1, del_f) < 0) {
- kfree(del_node);
- DJM(no_jffs_node--);
+ jffs_free_node(del_node);
result = -EIO;
goto jffs_remove_end;
}
down(&c->fmc->biglock);
/* Create and initialize a new node. */
- if (!(node = (struct jffs_node *) kmalloc(sizeof(struct jffs_node),
- GFP_KERNEL))) {
+ if (!(node = jffs_alloc_node())) {
D(printk("jffs_mknod(): Allocation failed!\n"));
result = -ENOMEM;
goto jffs_mknod_err;
}
- DJM(no_jffs_node++);
node->data_offset = 0;
node->removed_size = 0;
/* Write the new node to the flash. */
if ((err = jffs_write_node(c, node, &raw_inode, dentry->d_name.name,
- (unsigned char *)&dev, 0, NULL)) < 0) {
+ (unsigned char *)&dev, 0, NULL)) < 0) {
D(printk("jffs_mknod(): jffs_write_node() failed.\n"));
result = err;
goto jffs_mknod_err;
jffs_mknod_err:
if (node) {
- kfree(node);
- DJM(no_jffs_node--);
+ jffs_free_node(node);
}
jffs_mknod_end:
int err;
D1({
- int len = dentry->d_name.len;
+ int len = dentry->d_name.len;
char *_name = (char *)kmalloc(len + 1, GFP_KERNEL);
char *_symname = (char *)kmalloc(symname_len + 1, GFP_KERNEL);
memcpy(_name, dentry->d_name.name, len);
c = dir_f->c;
/* Create a node and initialize it as much as needed. */
- if (!(node = (struct jffs_node *) kmalloc(sizeof(struct jffs_node),
- GFP_KERNEL))) {
+ if (!(node = jffs_alloc_node())) {
D(printk("jffs_symlink(): Allocation failed: node = NULL\n"));
return -ENOMEM;
}
D3(printk (KERN_NOTICE "symlink(): down biglock\n"));
down(&c->fmc->biglock);
- DJM(no_jffs_node++);
node->data_offset = 0;
node->removed_size = 0;
if ((err = jffs_write_node(c, node, &raw_inode, dentry->d_name.name,
(const unsigned char *)symname, 0, NULL)) < 0) {
D(printk("jffs_symlink(): jffs_write_node() failed.\n"));
- kfree(node);
- DJM(no_jffs_node--);
+ jffs_free_node(node);
goto jffs_symlink_end;
}
int err;
D1({
- int len = dentry->d_name.len;
+ int len = dentry->d_name.len;
char *s = (char *)kmalloc(len + 1, GFP_KERNEL);
memcpy(s, dentry->d_name.name, len);
s[len] = '\0';
c = dir_f->c;
/* Create a node and initialize as much as needed. */
- if (!(node = (struct jffs_node *) kmalloc(sizeof(struct jffs_node),
- GFP_KERNEL))) {
+ if (!(node = jffs_alloc_node())) {
D(printk("jffs_create(): Allocation failed: node == 0\n"));
return -ENOMEM;
}
D3(printk (KERN_NOTICE "create(): down biglock\n"));
down(&c->fmc->biglock);
- DJM(no_jffs_node++);
node->data_offset = 0;
node->removed_size = 0;
if ((err = jffs_write_node(c, node, &raw_inode,
dentry->d_name.name, 0, 0, NULL)) < 0) {
D(printk("jffs_create(): jffs_write_node() failed.\n"));
- kfree(node);
- DJM(no_jffs_node--);
+ jffs_free_node(node);
goto jffs_create_end;
}
err = -EROFS;
goto out_isem;
}
-#endif
+#endif
err = -EINVAL;
if (!S_ISREG(inode->i_mode)) {
D(printk("jffs_file_write(): inode->i_mode == 0x%08x\n",
- inode->i_mode));
+ inode->i_mode));
goto out_isem;
}
if (!(f = (struct jffs_file *)inode->u.generic_ip)) {
D(printk("jffs_file_write(): inode->u.generic_ip = 0x%p\n",
- inode->u.generic_ip));
+ inode->u.generic_ip));
goto out_isem;
}
c = f->c;
/*
- * This will never trigger with sane page sizes. leave it in anyway,
- * since I'm thinking about how to merge larger writes (the current idea
- * is to poke a thread that does the actual I/O and starts by doing a
- * down(&inode->i_sem). then we would need to get the page cache pages
- * and have a list of I/O requests and do write-merging here.
+ * This will never trigger with sane page sizes. leave it in
+ * anyway, since I'm thinking about how to merge larger writes
+ * (the current idea is to poke a thread that does the actual
+ * I/O and starts by doing a down(&inode->i_sem). then we
+ * would need to get the page cache pages and have a list of
+ * I/O requests and do write-merging here.
* -- prumpf
*/
-
thiscount = min(c->fmc->max_chunk_size - sizeof(struct jffs_raw_inode), count);
D3(printk (KERN_NOTICE "file_write(): down biglock\n"));
while (count) {
/* Things are going to be written so we could allocate and
initialize the necessary data structures now. */
- if (!(node = (struct jffs_node *) kmalloc(sizeof(struct jffs_node),
- GFP_KERNEL))) {
+ if (!(node = jffs_alloc_node())) {
D(printk("jffs_file_write(): node == 0\n"));
err = -ENOMEM;
goto out;
}
- DJM(no_jffs_node++);
-
+
node->data_offset = pos;
node->removed_size = 0;
-
+
/* Initialize the raw inode. */
raw_inode.magic = JFFS_MAGIC_BITMASK;
raw_inode.ino = f->ino;
raw_inode.pino = f->pino;
raw_inode.mode = f->mode;
-
+
raw_inode.uid = f->uid;
raw_inode.gid = f->gid;
raw_inode.atime = CURRENT_TIME;
raw_inode.spare = 0;
raw_inode.rename = 0;
raw_inode.deleted = 0;
-
+
if (pos < f->size) {
- node->removed_size = raw_inode.rsize =
- min_t(unsigned int, thiscount, f->size - pos);
-
- /* If this node is going entirely over the top of old data,
- we can allow it to go into the reserved space, because
- we can that GC can reclaim the space later.
+ node->removed_size = raw_inode.rsize = min(thiscount, (__u32)(f->size - pos));
+
+ /* If this node is going entirely over the top of old data,
+ we can allow it to go into the reserved space, because
+ we know that GC can reclaim the space later.
*/
if (pos + thiscount < f->size) {
/* If all the data we're overwriting are _real_,
*/
}
}
-
+
/* Write the new node to the flash. */
- /* NOTE: We would be quite happy if jffs_write_node() wrote a
- smaller node than we were expecting. There's no need for it
- to waste the space at the end of the flash just because it's
+ /* NOTE: We would be quite happy if jffs_write_node() wrote a
+ smaller node than we were expecting. There's no need for it
+ to waste the space at the end of the flash just because it's
a little smaller than what we asked for. But that's a whole
- new can of worms which I'm not going to open this week.
+ new can of worms which I'm not going to open this week.
-- dwmw2.
*/
if ((err = jffs_write_node(c, node, &raw_inode, f->name,
(const unsigned char *)buf,
recoverable, f)) < 0) {
D(printk("jffs_file_write(): jffs_write_node() failed.\n"));
- kfree(node);
- DJM(no_jffs_node--);
+ jffs_free_node(node);
goto out;
}
D3(printk("jffs_file_write(): new f_pos %ld.\n", (long)pos));
- thiscount = min_t(unsigned int, c->fmc->max_chunk_size - sizeof(struct jffs_raw_inode), count);
+ thiscount = min(c->fmc->max_chunk_size - sizeof(struct jffs_raw_inode), count);
}
out:
D3(printk (KERN_NOTICE "file_write(): up biglock\n"));
static ssize_t
jffs_prepare_write(struct file *filp, struct page *page,
- unsigned from, unsigned to)
+ unsigned from, unsigned to)
{
/* FIXME: we should detect some error conditions here */
-
+
+ /* Bugger that. We should make sure the page is uptodate */
+ if (!Page_Uptodate(page) && (from || to < PAGE_CACHE_SIZE))
+ return jffs_do_readpage_nolock(filp, page);
+
return 0;
} /* jffs_prepare_write() */
static ssize_t
jffs_commit_write(struct file *filp, struct page *page,
- unsigned from, unsigned to)
+ unsigned from, unsigned to)
{
- void *addr = page_address(page) + from;
- /* XXX: PAGE_CACHE_SHIFT or PAGE_SHIFT */
- loff_t pos = (page->index<<PAGE_CACHE_SHIFT) + from;
-
- return jffs_file_write(filp, addr, to-from, &pos);
+ void *addr = page_address(page) + from;
+ /* XXX: PAGE_CACHE_SHIFT or PAGE_SHIFT */
+ loff_t pos = (page->index<<PAGE_CACHE_SHIFT) + from;
+
+ return jffs_file_write(filp, addr, to-from, &pos);
} /* jffs_commit_write() */
/* This is our ioctl() routine. */
if (copy_to_user((struct jffs_flash_status *)arg,
&fst,
sizeof(struct jffs_flash_status))) {
- ret = -EFAULT;
+ ret = -EFAULT;
}
}
break;
static int jffs_fsync(struct file *f, struct dentry *d, int datasync)
{
- /* We currently have O_SYNC operations at all times.
- Do nothing
+ /* We currently have O_SYNC operations at all times.
+ Do nothing.
*/
return 0;
}
+extern int generic_file_open(struct inode *, struct file *) __attribute__((weak));
+extern loff_t generic_file_llseek(struct file *, loff_t, int) __attribute__((weak));
+
static struct file_operations jffs_file_operations =
{
- llseek: generic_file_llseek, /* llseek */
- read: generic_file_read, /* read */
- write: generic_file_write, /* write */
- ioctl: jffs_ioctl, /* ioctl */
- mmap: generic_file_mmap, /* mmap */
- open: generic_file_open,
- fsync: jffs_fsync,
+ open: generic_file_open,
+ llseek: generic_file_llseek,
+ read: generic_file_read,
+ write: generic_file_write,
+ ioctl: jffs_ioctl,
+ mmap: generic_file_mmap,
+ fsync: jffs_fsync,
};
{
struct jffs_file *f;
struct jffs_control *c;
- D1(printk("jffs_delete_inode(): inode->i_ino == %lu\n",
+ D3(printk("jffs_delete_inode(): inode->i_ino == %lu\n",
inode->i_ino));
lock_kernel();
-
inode->i_size = 0;
inode->i_blocks = 0;
inode->u.generic_ip = 0;
jffs_garbage_collect_trigger(c);
}
-
static struct super_operations jffs_ops =
{
read_inode: jffs_read_inode,
static int __init
init_jffs_fs(void)
{
- printk("JFFS version "
- JFFS_VERSION_STRING
- ", (C) 1999, 2000 Axis Communications AB\n");
+ printk(KERN_INFO "JFFS version " JFFS_VERSION_STRING
+ ", (C) 1999, 2000 Axis Communications AB\n");
+
+#ifdef CONFIG_JFFS_PROC_FS
+ jffs_proc_root = proc_mkdir("jffs", proc_root_fs);
+#endif
+ fm_cache = kmem_cache_create("jffs_fm", sizeof(struct jffs_fm),
+ 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+ node_cache = kmem_cache_create("jffs_node",sizeof(struct jffs_node),
+ 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
return register_filesystem(&jffs_fs_type);
}
exit_jffs_fs(void)
{
unregister_filesystem(&jffs_fs_type);
+ kmem_cache_destroy(fm_cache);
+ kmem_cache_destroy(node_cache);
}
EXPORT_NO_SYMBOLS;
module_init(init_jffs_fs)
module_exit(exit_jffs_fs)
+
+MODULE_DESCRIPTION("The Journalling Flash File System");
+MODULE_AUTHOR("Axis Communications AB.");
+MODULE_LICENSE("GPL");
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * $Id: intrep.c,v 1.71.2.4 2001/01/08 23:27:02 dwmw2 Exp $
+ * $Id: intrep.c,v 1.102 2001/09/23 23:28:36 dwmw2 Exp $
*
* Ported to Linux 2.3.x and MTD:
* Copyright (C) 2000 Alexander Larsson (alex@cendio.se), Cendio Systems AB
*/
#define __NO_VERSION__
+#include <linux/config.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/jffs.h>
#include "intrep.h"
#include "jffs_fm.h"
-#if defined(JFFS_MEMORY_DEBUG) && JFFS_MEMORY_DEBUG
-long no_jffs_file = 0;
long no_jffs_node = 0;
+long no_jffs_file = 0;
+#if defined(JFFS_MEMORY_DEBUG) && JFFS_MEMORY_DEBUG
long no_jffs_control = 0;
long no_jffs_raw_inode = 0;
long no_jffs_node_ref = 0;
static int jffs_scan_flash(struct jffs_control *c);
static int jffs_update_file(struct jffs_file *f, struct jffs_node *node);
-static __u8 flash_read_u8(struct mtd_info *mtd, loff_t from);
-#if 1
+#if CONFIG_JFFS_FS_VERBOSE > 0
+static __u8
+flash_read_u8(struct mtd_info *mtd, loff_t from)
+{
+ size_t retlen;
+ __u8 ret;
+ int res;
+
+ res = MTD_READ(mtd, from, 1, &retlen, &ret);
+ if (retlen != 1) {
+ printk("Didn't read a byte in flash_read_u8(). Returned %d\n", res);
+ return 0;
+ }
+
+ return ret;
+}
+
static void
jffs_hexdump(struct mtd_info *mtd, loff_t pos, int size)
{
size -= 16;
}
}
+
#endif
#define flash_safe_acquire(arg)
int res;
D3(printk(KERN_NOTICE "flash_safe_read(%p, %08x, %p, %08x)\n",
- mtd, from, buf, count));
+ mtd, (unsigned int) from, buf, count));
res = MTD_READ(mtd, from, count, &retlen, buf);
if (retlen != count) {
}
-static __u8
-flash_read_u8(struct mtd_info *mtd, loff_t from)
+static int
+flash_safe_write(struct mtd_info *mtd, loff_t to,
+ const u_char *buf, size_t count)
{
size_t retlen;
- __u8 ret;
int res;
- res = MTD_READ(mtd, from, 1, &retlen, &ret);
- if (retlen != 1) {
- printk("Didn't read a byte in flash_read_u8(). Returned %d\n", res);
- return 0;
- }
+ D3(printk(KERN_NOTICE "flash_safe_write(%p, %08x, %p, %08x)\n",
+ mtd, (unsigned int) to, buf, count));
- return ret;
+ res = MTD_WRITE(mtd, to, count, &retlen, buf);
+ if (retlen != count) {
+ printk("Didn't write all bytes in flash_safe_write(). Returned %d\n", res);
+ }
+ return res?res:retlen;
}
static int
-flash_safe_write(struct mtd_info *mtd, loff_t to,
- const u_char *buf, size_t count)
+flash_safe_writev(struct mtd_info *mtd, const struct iovec *vecs,
+ unsigned long iovec_cnt, loff_t to)
{
- size_t retlen;
+ size_t retlen, retlen_a;
+ int i;
int res;
- res = MTD_WRITE(mtd, to, count, &retlen, buf);
- if (retlen != count) {
- printk("Didn't write all bytes in flash_safe_write(). Returned %d\n", res);
+ D3(printk(KERN_NOTICE "flash_safe_writev(%p, %08x, %p)\n",
+ mtd, (unsigned int) to, vecs));
+
+ if (mtd->writev) {
+ res = MTD_WRITEV(mtd, vecs, iovec_cnt, to, &retlen);
+ return res ? res : retlen;
+ }
+ /* Not implemented writev. Repeatedly use write - on the not so
+ unreasonable assumption that the mtd driver doesn't care how
+ many write cycles we use. */
+ res=0;
+ retlen=0;
+
+ for (i=0; !res && i<iovec_cnt; i++) {
+ res = MTD_WRITE(mtd, to, vecs[i].iov_len, &retlen_a, vecs[i].iov_base);
+ if (retlen_a != vecs[i].iov_len) {
+ printk("Didn't write all bytes in flash_safe_writev(). Returned %d\n", res);
+ if (i != iovec_cnt-1)
+ return -EIO;
+ }
+ /* If res is non-zero, retlen_a is undefined, but we don't
+ care because in that case it's not going to be
+ returned anyway.
+ */
+ to += retlen_a;
+ retlen += retlen_a;
}
return res?res:retlen;
}
flash_memset(struct mtd_info *mtd, loff_t to,
const u_char c, size_t size)
{
- static unsigned char pattern[16];
+ static unsigned char pattern[64];
int i;
/* fill up pattern */
- for(i = 0; i < 16; i++)
+ for(i = 0; i < 64; i++)
pattern[i] = c;
- /* write as many 16-byte chunks as we can */
+ /* write as many 64-byte chunks as we can */
- while (size >= 16) {
- flash_safe_write(mtd, to, pattern, 16);
- size -= 16;
- to += 16;
+ while (size >= 64) {
+ flash_safe_write(mtd, to, pattern, 64);
+ size -= 64;
+ to += 64;
}
/* and the rest */
}
-__u32
-jffs_checksum_flash(struct mtd_info *mtd, loff_t start, int size)
+int
+jffs_checksum_flash(struct mtd_info *mtd, loff_t start, int size, __u32 *result)
{
__u32 sum = 0;
loff_t ptr = start;
/* Allocate read buffer */
read_buf = (__u8 *) kmalloc (sizeof(__u8) * 4096, GFP_KERNEL);
-
+ if (!read_buf) {
+ printk(KERN_NOTICE "kmalloc failed in jffs_checksum_flash()\n");
+ return -ENOMEM;
+ }
/* Loop until checksum done */
while (size) {
/* Get amount of data to read */
/* Return result */
D3(printk("checksum result: 0x%08x\n", sum));
- return sum;
+ *result = sum;
+ return 0;
}
+
static __inline__ void jffs_fm_write_lock(struct jffs_fmcontrol *fmc)
{
// down(&fmc->wlock);
D(printk("jffs_create_file(): Failed!\n"));
return 0;
}
- DJM(no_jffs_file++);
+ no_jffs_file++;
memset(f, 0, sizeof(struct jffs_file));
f->ino = raw_inode->ino;
f->pino = raw_inode->pino;
GFP_KERNEL))) {
return -ENOMEM;
}
- DJM(no_jffs_file++);
- if (!(node = (struct jffs_node *)kmalloc(sizeof(struct jffs_node),
- GFP_KERNEL))) {
+ no_jffs_file++;
+ if (!(node = jffs_alloc_node())) {
kfree(root);
- DJM(no_jffs_file--);
+ no_jffs_file--;
return -ENOMEM;
}
DJM(no_jffs_node++);
c->building_fs = 1;
c->sb = sb;
if ((err = jffs_scan_flash(c)) < 0) {
- goto jffs_build_fs_fail;
+ if(err == -EAGAIN){
+ /* scan_flash() wants us to try once more. A flipping
+ bits sector was detect in the middle of the scan flash.
+ Clean up old allocated memory before going in.
+ */
+ D1(printk("jffs_build_fs: Cleaning up all control structures,"
+ " reallocating them and trying mount again.\n"));
+ jffs_cleanup_control(c);
+ if (!(c = jffs_create_control(sb->s_dev))) {
+ return -ENOMEM;
+ }
+ c->building_fs = 1;
+ c->sb = sb;
+
+ if ((err = jffs_scan_flash(c)) < 0) {
+ goto jffs_build_fs_fail;
+ }
+ }else{
+ goto jffs_build_fs_fail;
+ }
}
/* Add a virtual root node if no one exists. */
} /* jffs_build_fs() */
+/*
+ This checks for sectors that were being erased in their previous
+ lifetimes and for some reason or the other (power fail etc.),
+ the erase cycles never completed.
+ As the flash array would have reverted back to read status,
+ these sectors are detected by the symptom of the "flipping bits",
+ i.e. bits being read back differently from the same location in
+ flash if read multiple times.
+ The only solution to this is to re-erase the entire
+ sector.
+ Unfortunately detecting "flipping bits" is not a simple exercise
+ as a bit may be read back at 1 or 0 depending on the alignment
+ of the stars in the universe.
+ The level of confidence is in direct proportion to the number of
+ scans done. By power fail testing I (Vipin) have been able to
+ proove that reading twice is not enough.
+ Maybe 4 times? Change NUM_REREADS to a higher number if you want
+ a (even) higher degree of confidence in your mount process.
+ A higher number would of course slow down your mount.
+*/
+int check_partly_erased_sectors(struct jffs_fmcontrol *fmc){
+
+#define NUM_REREADS 4 /* see note above */
+#define READ_AHEAD_BYTES 4096 /* must be a multiple of 4,
+ usually set to kernel page size */
+
+ __u8 *read_buf1;
+ __u8 *read_buf2;
+
+ int err = 0;
+ int retlen;
+ int i;
+ int cnt;
+ __u32 offset;
+ loff_t pos = 0;
+ loff_t end = fmc->flash_size;
+
+
+ /* Allocate read buffers */
+ read_buf1 = (__u8 *) kmalloc (sizeof(__u8) * READ_AHEAD_BYTES, GFP_KERNEL);
+ if (!read_buf1)
+ return -ENOMEM;
+
+ read_buf2 = (__u8 *) kmalloc (sizeof(__u8) * READ_AHEAD_BYTES, GFP_KERNEL);
+ if (!read_buf2) {
+ kfree(read_buf1);
+ return -ENOMEM;
+ }
+
+ CHECK_NEXT:
+ while(pos < end){
+
+ D1(printk("check_partly_erased_sector():checking sector which contains"
+ " offset 0x%x for flipping bits..\n", (__u32)pos));
+
+ retlen = flash_safe_read(fmc->mtd, pos,
+ &read_buf1[0], READ_AHEAD_BYTES);
+ retlen &= ~3;
+
+ for(cnt = 0; cnt < NUM_REREADS; cnt++){
+ (void)flash_safe_read(fmc->mtd, pos,
+ &read_buf2[0], READ_AHEAD_BYTES);
+
+ for (i=0 ; i < retlen ; i+=4) {
+ /* buffers MUST match, double word for word! */
+ if(*((__u32 *) &read_buf1[i]) !=
+ *((__u32 *) &read_buf2[i])
+ ){
+ /* flipping bits detected, time to erase sector */
+ /* This will help us log some statistics etc. */
+ D1(printk("Flipping bits detected in re-read round:%i of %i\n",
+ cnt, NUM_REREADS));
+ D1(printk("check_partly_erased_sectors:flipping bits detected"
+ " @offset:0x%x(0x%x!=0x%x)\n",
+ (__u32)pos+i, *((__u32 *) &read_buf1[i]),
+ *((__u32 *) &read_buf2[i])));
+
+ /* calculate start of present sector */
+ offset = (((__u32)pos+i)/(__u32)fmc->sector_size) * (__u32)fmc->sector_size;
+
+ D1(printk("check_partly_erased_sector():erasing sector starting 0x%x.\n",
+ offset));
+
+ if (flash_erase_region(fmc->mtd,
+ offset, fmc->sector_size) < 0) {
+ printk(KERN_ERR "JFFS: Erase of flash failed. "
+ "offset = %u, erase_size = %d\n",
+ offset , fmc->sector_size);
+
+ err = -EIO;
+ goto returnBack;
+
+ }else{
+ D1(printk("JFFS: Erase of flash sector @0x%x successful.\n",
+ offset));
+ /* skip ahead to the next sector */
+ pos = (((__u32)pos+i)/(__u32)fmc->sector_size) * (__u32)fmc->sector_size;
+ pos += fmc->sector_size;
+ goto CHECK_NEXT;
+ }
+ }
+ }
+ }
+ pos += READ_AHEAD_BYTES;
+ }
+
+ returnBack:
+ kfree(read_buf1);
+ kfree(read_buf2);
+
+ D2(printk("check_partly_erased_sector():Done checking all sectors till offset 0x%x for flipping bits.\n",
+ (__u32)pos));
+
+ return err;
+
+}/* end check_partly_erased_sectors() */
+
+
+
/* Scan the whole flash memory in order to find all nodes in the
file systems. */
static int
__u8 tmp_accurate;
__u16 tmp_chksum;
__u32 deleted_file;
- loff_t pos = fmc->flash_start;
+ loff_t pos = 0;
loff_t start;
- loff_t end = fmc->flash_start + fmc->flash_size;
+ loff_t test_start;
+ loff_t end = fmc->flash_size;
__u8 *read_buf;
int i, len, retlen;
+ __u32 offset;
+
+ __u32 free_chunk_size1;
+ __u32 free_chunk_size2;
+
+
+#define NUMFREEALLOWED 2 /* 2 chunks of at least erase size space allowed */
+ int num_free_space = 0; /* Flag err if more than TWO
+ free blocks found. This is NOT allowed
+ by the current jffs design.
+ */
+ int num_free_spc_not_accp = 0; /* For debugging purposed keep count
+ of how much free space was rejected and
+ marked dirty
+ */
D1(printk("jffs_scan_flash(): start pos = 0x%lx, end = 0x%lx\n",
(long)pos, (long)end));
flash_safe_acquire(fmc->mtd);
+ /*
+ check and make sure that any sector does not suffer
+ from the "partly erased, bit flipping syndrome" (TM Vipin :)
+ If so, offending sectors will be erased.
+ */
+ if(check_partly_erased_sectors(fmc) < 0){
+
+ flash_safe_release(fmc->mtd);
+ return -EIO; /* bad, bad, bad error. Cannot continue.*/
+ }
+
/* Allocate read buffer */
read_buf = (__u8 *) kmalloc (sizeof(__u8) * 4096, GFP_KERNEL);
-
+ if (!read_buf) {
+ flash_safe_release(fmc->mtd);
+ return -ENOMEM;
+ }
+
/* Start the scan. */
while (pos < end) {
deleted_file = 0;
switch (flash_read_u32(fmc->mtd, pos)) {
case JFFS_EMPTY_BITMASK:
- /* We have found 0xff at this position. We have to
+ /* We have found 0xffffffff at this position. We have to
scan the rest of the flash till the end or till
- something else than 0xff is found. */
- D1(printk("jffs_scan_flash(): 0xff at pos 0x%lx.\n",
+ something else than 0xffffffff is found.
+ Keep going till we do not find JFFS_EMPTY_BITMASK
+ anymore */
+
+ D1(printk("jffs_scan_flash(): 0xffffffff at pos 0x%lx.\n",
(long)pos));
- len = end - pos < 4096 ? end - pos : 4096;
+ while(pos < end){
- retlen = flash_safe_read(fmc->mtd, pos,
+ len = end - pos < 4096 ? end - pos : 4096;
+
+ retlen = flash_safe_read(fmc->mtd, pos,
&read_buf[0], len);
- retlen &= ~3;
-
- for (i=0 ; i < retlen ; i+=4, pos += 4) {
- if(*((__u32 *) &read_buf[i]) !=
- JFFS_EMPTY_BITMASK)
+ retlen &= ~3;
+
+ for (i=0 ; i < retlen ; i+=4, pos += 4) {
+ if(*((__u32 *) &read_buf[i]) !=
+ JFFS_EMPTY_BITMASK)
break;
+ }
+ if (i == retlen)
+ continue;
+ else
+ break;
}
- if (i == retlen)
- continue;
-
- D1(printk("jffs_scan_flash(): 0xff ended at "
- "pos 0x%lx.\n", (long)pos));
+ D1(printk("jffs_scan_flash():0xffffffff ended at pos 0x%lx.\n",
+ (long)pos));
+
/* If some free space ends in the middle of a sector,
- treat it as dirty rather than clean.
+ treat it as dirty rather than clean.
This is to handle the case where one thread
allocated space for a node, but didn't get to
actually _write_ it before power was lost, leaving
only from the beginning of this sector
(or from start)
*/
- if (start < (pos & ~(fmc->sector_size-1))) {
- D1(printk("Reducing start to 0x%lx from 0x%lx\n", (unsigned long)pos & ~(fmc->sector_size-1), (unsigned long)start));
- start = pos & ~(fmc->sector_size-1);
+
+ test_start = pos & ~(fmc->sector_size-1); /* end of last sector */
+
+ if (start < test_start) {
+
+ /* free space started in the previous sector! */
+
+ if((num_free_space < NUMFREEALLOWED) &&
+ ((unsigned int)(test_start - start) >= fmc->sector_size)){
+
+ /*
+ Count it in if we are still under NUMFREEALLOWED *and* it is
+ at least 1 erase sector in length. This will keep us from
+ picking any little ole' space as "free".
+ */
+
+ D1(printk("Reducing end of free space to 0x%x from 0x%x\n",
+ (unsigned int)test_start, (unsigned int)pos));
+
+ D1(printk("Free space accepted: Starting 0x%x for 0x%x bytes\n",
+ (unsigned int) start,
+ (unsigned int)(test_start - start)));
+
+ /* below, space from "start" to "pos" will be marked dirty. */
+ start = test_start;
+
+ /* Being in here means that we have found at least an entire
+ erase sector size of free space ending on a sector boundary.
+ Keep track of free spaces accepted.
+ */
+ num_free_space++;
+ }else{
+ num_free_spc_not_accp++;
+ D1(printk("Free space (#%i) found but *Not* accepted: Starting"
+ " 0x%x for 0x%x bytes\n",
+ num_free_spc_not_accp, (unsigned int)start,
+ (unsigned int)((unsigned int)(pos & ~(fmc->sector_size-1)) - (unsigned int)start)));
+
+ }
+
}
- D1(printk("Dirty space: 0x%lx for 0x%lx bytes\n", (unsigned long)start, (unsigned long)(pos - start)));
- jffs_fmalloced(fmc, (__u32) start,
- (__u32) (pos - start), 0);
+ if((((__u32)(pos - start)) != 0)){
+
+ D1(printk("Dirty space: Starting 0x%x for 0x%x bytes\n",
+ (unsigned int) start, (unsigned int) (pos - start)));
+ jffs_fmalloced(fmc, (__u32) start,
+ (__u32) (pos - start), 0);
+ }else{
+ /* "Flipping bits" detected. This means that our scan for them
+ did not catch this offset. See check_partly_erased_sectors() for
+ more info.
+ */
+
+ D1(printk("jffs_scan_flash():wants to allocate dirty flash "
+ "space for 0 bytes.\n"));
+ D1(printk("jffs_scan_flash(): Flipping bits! We will free "
+ "all allocated memory, erase this sector and remount\n"));
+
+ /* calculate start of present sector */
+ offset = (((__u32)pos)/(__u32)fmc->sector_size) * (__u32)fmc->sector_size;
+
+ D1(printk("jffs_scan_flash():erasing sector starting 0x%x.\n",
+ offset));
+
+ if (flash_erase_region(fmc->mtd,
+ offset, fmc->sector_size) < 0) {
+ printk(KERN_ERR "JFFS: Erase of flash failed. "
+ "offset = %u, erase_size = %d\n",
+ offset , fmc->sector_size);
+
+ flash_safe_release(fmc->mtd);
+ kfree (read_buf);
+ return -1; /* bad, bad, bad! */
+
+ }
+ flash_safe_release(fmc->mtd);
+ kfree (read_buf);
+
+ return -EAGAIN; /* erased offending sector. Try mount one more time please. */
+ }
+ }else{
+ /* Being in here means that we have found free space that ends on an erase sector
+ boundary.
+ Count it in if we are still under NUMFREEALLOWED *and* it is at least 1 erase
+ sector in length. This will keep us from picking any little ole' space as "free".
+ */
+ if((num_free_space < NUMFREEALLOWED) &&
+ ((unsigned int)(pos - start) >= fmc->sector_size)){
+ /* We really don't do anything to mark space as free, except *not*
+ mark it dirty and just advance the "pos" location pointer.
+ It will automatically be picked up as free space.
+ */
+ num_free_space++;
+ D1(printk("Free space accepted: Starting 0x%x for 0x%x bytes\n",
+ (unsigned int) start, (unsigned int) (pos - start)));
+ }else{
+ num_free_spc_not_accp++;
+ D1(printk("Free space (#%i) found but *Not* accepted: Starting "
+ "0x%x for 0x%x bytes\n", num_free_spc_not_accp,
+ (unsigned int) start,
+ (unsigned int) (pos - start)));
+
+ /* Mark this space as dirty. We already have our free space. */
+ D1(printk("Dirty space: Starting 0x%x for 0x%x bytes\n",
+ (unsigned int) start, (unsigned int) (pos - start)));
+ jffs_fmalloced(fmc, (__u32) start,
+ (__u32) (pos - start), 0);
+ }
+
+ }
+ if(num_free_space > NUMFREEALLOWED){
+ printk(KERN_WARNING "jffs_scan_flash(): Found free space "
+ "number %i. Only %i free space is allowed.\n",
+ num_free_space, NUMFREEALLOWED);
}
continue;
case JFFS_DIRTY_BITMASK:
- /* We have found 0x00 at this position. Scan as far
+ /* We have found 0x00000000 at this position. Scan as far
as possible to find out how much is dirty. */
- D1(printk("jffs_scan_flash(): 0x00 at pos 0x%lx.\n",
+ D1(printk("jffs_scan_flash(): 0x00000000 at pos 0x%lx.\n",
(long)pos));
for (; pos < end
&& JFFS_DIRTY_BITMASK == flash_read_u32(fmc->mtd, pos);
"hexdump(pos = 0x%lx, len = 128):\n",
(long)pos));
D1(jffs_hexdump(fmc->mtd, pos, 128));
- cont_dirty:
+
for (pos += 4; pos < end; pos += 4) {
switch (flash_read_u32(fmc->mtd, pos)) {
case JFFS_MAGIC_BITMASK:
- jffs_fmalloced(fmc, (__u32) start,
- (__u32) (pos - start),
- 0);
- goto cont_scan;
case JFFS_EMPTY_BITMASK:
- /* First, mark as dirty the region
- which really does contain crap. */
- jffs_fmalloced(fmc, (__u32) start,
- (__u32) (pos - start),
- 0);
+ /* handle these in the main switch() loop */
+ goto cont_scan;
- /* Then, scan the region which looks free.
- Depending on how large it is, we may
- mark it dirty too.
- */
- start = pos;
- for (; pos < end ; pos += 4) {
- switch (flash_read_u32(fmc->mtd, pos)) {
- case JFFS_MAGIC_BITMASK:
- if (pos - start < fmc->max_chunk_size) {
- /* Not much free space. Mark it dirty. */
- jffs_fmalloced(fmc, (__u32)start,
- (__u32)pos-start, 0);
- }
- goto cont_scan;
-
- case JFFS_EMPTY_BITMASK:
- /* More empty space */
- continue;
-
- default:
- /* i.e. more dirt */
- if (pos - start < fmc->max_chunk_size) {
- /* There wasn't much before the dirt
- started again. Just mark it all dirty
- */
- goto cont_dirty;
- }
- /* There was quite a lot of free space. Leave it
- free.
- */
- goto cont_scan;
- }
- }
default:
break;
}
}
+
cont_scan:
+ /* First, mark as dirty the region
+ which really does contain crap. */
+ jffs_fmalloced(fmc, (__u32) start,
+ (__u32) (pos - start),
+ 0);
+
continue;
- }
+ }/* switch */
/* We have found the beginning of an inode. Create a
node for it unless there already is one available. */
if (!node) {
- if (!(node = (struct jffs_node *)
- kmalloc(sizeof(struct jffs_node),
- GFP_KERNEL))) {
+ if (!(node = jffs_alloc_node())) {
/* Free read buffer */
kfree (read_buf);
if (raw_inode.rename) {
deleted_file = flash_read_u32(fmc->mtd, pos);
}
- checksum = jffs_checksum_flash(fmc->mtd, pos, raw_inode.dsize);
+ if (jffs_checksum_flash(fmc->mtd, pos, raw_inode.dsize, &checksum)) {
+ printk("jffs_checksum_flash() failed to calculate a checksum\n");
+ jffs_fmalloced(fmc, (__u32) start,
+ (__u32) (pos - start), 0);
+ /* Reuse this unused struct jffs_node. */
+ continue;
+ }
pos += raw_inode.dsize
+ JFFS_GET_PAD_BYTES(raw_inode.dsize);
node);
if (!node->fm) {
D(printk("jffs_scan_flash(): !node->fm\n"));
- kfree(node);
+ jffs_free_node(node);
DJM(no_jffs_node--);
/* Free read buffer */
GFP_KERNEL);
if (!dl) {
D(printk("jffs_scan_flash: !dl\n"));
- kfree(node);
+ jffs_free_node(node);
DJM(no_jffs_node--);
/* Release the flash device */
}
if (node) {
- kfree(node);
+ jffs_free_node(node);
DJM(no_jffs_node--);
}
jffs_build_end(fmc);
/* Free read buffer */
kfree (read_buf);
+ if(!num_free_space){
+ printk(KERN_WARNING "jffs_scan_flash(): Did not find even a single "
+ "chunk of free space. This is BAD!\n");
+ }
+
/* Return happy */
D3(printk("jffs_scan_flash(): Leaving...\n"));
flash_safe_release(fmc->mtd);
- return 0;
+
+ /* This is to trap the "free size accounting screwed error. */
+ free_chunk_size1 = jffs_free_size1(fmc);
+ free_chunk_size2 = jffs_free_size2(fmc);
+
+ if (free_chunk_size1 + free_chunk_size2 != fmc->free_size) {
+
+ printk(KERN_WARNING "jffs_scan_falsh():Free size accounting screwed\n");
+ printk(KERN_WARNING "jfffs_scan_flash():free_chunk_size1 == 0x%x, "
+ "free_chunk_size2 == 0x%x, fmc->free_size == 0x%x\n",
+ free_chunk_size1, free_chunk_size2, fmc->free_size);
+
+ return -1; /* Do NOT mount f/s so that we can inspect what happened.
+ Mounting this screwed up f/s will screw us up anyway.
+ */
+ }
+
+ return 0; /* as far as we are concerned, we are happy! */
} /* jffs_scan_flash() */
mod_type));
jffs_unlink_node_from_version_list(f, cur);
jffs_fmfree(f->c->fmc, cur->fm, cur);
- kfree(cur);
+ jffs_free_node(cur);
DJM(no_jffs_node--);
}
else {
struct jffs_file *f)
{
struct jffs_fmcontrol *fmc = c->fmc;
- struct jffs_fm *fm = NULL;
+ struct jffs_fm *fm;
+ struct iovec node_iovec[4];
+ unsigned long iovec_cnt;
+
__u32 pos;
int err;
__u32 slack = 0;
jffs_fm_write_lock(fmc);
+retry:
+ fm = NULL;
+ err = 0;
while (!fm) {
/* Deadlocks suck. */
if (f) {
raw_inode->version = f->highest_version + 1;
D1(printk (KERN_NOTICE "jffs_write_node(): setting version of %s to %d\n", f->name, raw_inode->version));
+
+ /* if the file was deleted, set the deleted bit in the raw inode */
+ if (f->deleted)
+ raw_inode->deleted = 1;
}
/* Compute the checksum for the data and name chunks. */
"flash at pos 0x%lx:\n", (long)pos));
D3(jffs_print_raw_inode(raw_inode));
- /* Step 1: Write the raw jffs inode to the flash. */
- if ((err = flash_safe_write(fmc->mtd, pos,
- (u_char *)raw_inode,
- sizeof(struct jffs_raw_inode))) < 0) {
- jffs_fmfree_partly(fmc, fm,
- total_name_size + total_data_size);
- jffs_fm_write_unlock(fmc);
- printk(KERN_ERR "JFFS: jffs_write_node: Failed to write "
- "raw_inode.\n");
- return err;
- }
- pos += sizeof(struct jffs_raw_inode);
+ /* The actual raw JFFS node */
+ node_iovec[0].iov_base = (void *) raw_inode;
+ node_iovec[0].iov_len = (size_t) sizeof(struct jffs_raw_inode);
+ iovec_cnt = 1;
- /* Step 2: Write the name, if there is any. */
+ /* Get name and size if there is one */
if (raw_inode->nsize) {
- if ((err = flash_safe_write(fmc->mtd, pos,
- (u_char *)name,
- raw_inode->nsize)) < 0) {
- jffs_fmfree_partly(fmc, fm, total_data_size);
- jffs_fm_write_unlock(fmc);
- printk(KERN_ERR "JFFS: jffs_write_node: Failed to "
- "write the name.\n");
- return err;
+ node_iovec[iovec_cnt].iov_base = (void *) name;
+ node_iovec[iovec_cnt].iov_len = (size_t) raw_inode->nsize;
+ iovec_cnt++;
+
+ if (JFFS_GET_PAD_BYTES(raw_inode->nsize)) {
+ static char allff[3]={255,255,255};
+ /* Add some extra padding if necessary */
+ node_iovec[iovec_cnt].iov_base = allff;
+ node_iovec[iovec_cnt].iov_len =
+ JFFS_GET_PAD_BYTES(raw_inode->nsize);
+ iovec_cnt++;
}
- pos += total_name_size;
}
- if (raw_inode->deleted)
- f->deleted = 1;
- /* Step 3: Append the actual data, if any. */
+ /* Get data and size if there is any */
if (raw_inode->dsize) {
- if ((err = flash_safe_write(fmc->mtd, pos, data,
- raw_inode->dsize)) < 0) {
- jffs_fmfree_partly(fmc, fm, 0);
- jffs_fm_write_unlock(fmc);
- printk(KERN_ERR "JFFS: jffs_write_node: Failed to "
- "write the data.\n");
- return err;
- }
+ node_iovec[iovec_cnt].iov_base = (void *) data;
+ node_iovec[iovec_cnt].iov_len = (size_t) raw_inode->dsize;
+ iovec_cnt++;
+ /* No need to pad this because we're not actually putting
+ anything after it.
+ */
}
+
+ if ((err = flash_safe_writev(fmc->mtd, node_iovec, iovec_cnt,
+ pos) < 0)) {
+ jffs_fmfree_partly(fmc, fm, 0);
+ jffs_fm_write_unlock(fmc);
+ printk(KERN_ERR "JFFS: jffs_write_node: Failed to write, "
+ "requested %i, wrote %i\n", total_size, err);
+ goto retry;
+ }
+ if (raw_inode->deleted)
+ f->deleted = 1;
+
jffs_fm_write_unlock(fmc);
D3(printk("jffs_write_node(): Leaving...\n"));
return raw_inode->dsize;
"version: %u, node_offset: %u\n",
f->name, node->ino, node->version, node_offset));
- r = min_t(u32, avail, max_size);
+ r = min(avail, max_size);
D3(printk(KERN_NOTICE "jffs_get_node_data\n"));
flash_safe_read(fmc->mtd, pos, buf, r);
int r;
if (!node->fm) {
/* This node does not refer to real data. */
- r = min_t(u32, size - read_data,
- node->data_size - node_offset);
+ r = min(size - read_data,
+ node->data_size - node_offset);
memset(&buf[read_data], 0, r);
}
else if ((r = jffs_get_node_data(f, node, &buf[read_data],
while (node) {
p = node;
node = node->version_next;
- kfree(p);
+ jffs_free_node(p);
DJM(no_jffs_node--);
}
return 0;
DJM(no_name--);
}
kfree(f);
- DJM(no_jffs_file--);
+ no_jffs_file--;
return 0;
}
+long
+jffs_get_file_count(void)
+{
+ return no_jffs_file;
+}
/* See if a file is deleted. If so, mark that file's nodes as obsolete. */
int
D3(printk("jffs_delete_data(): Split node with "
"version number %u.\n", n->version));
- if (!(new_node = (struct jffs_node *)
- kmalloc(sizeof(struct jffs_node),
- GFP_KERNEL))) {
+ if (!(new_node = jffs_alloc_node())) {
D(printk("jffs_delete_data(): -ENOMEM\n"));
return -ENOMEM;
}
else {
/* No. No need to split the node. Just remove
the end of the node. */
- int r = min_t(u32, n->data_offset + n->data_size
- - offset, remove_size);
+ int r = min(n->data_offset + n->data_size
+ - offset, remove_size);
n->data_size -= r;
remove_size -= r;
n = n->range_next;
}
jffs_unlink_node_from_range_list(f, p);
jffs_unlink_node_from_version_list(f, p);
- kfree(p);
+ jffs_free_node(p);
DJM(no_jffs_node--);
}
else {
front of this insertion. This "virtual node" will not
be associated with any space on the flash device. */
struct jffs_node *virtual_node;
- if (!(virtual_node = (struct jffs_node *)
- kmalloc(sizeof(struct jffs_node),
- GFP_KERNEL))) {
+ if (!(virtual_node = jffs_alloc_node())) {
return -ENOMEM;
}
/* Rewrite `size' bytes, and begin at `node'. */
int
-jffs_rewrite_data(struct jffs_file *f, struct jffs_node *node, int size)
+jffs_rewrite_data(struct jffs_file *f, struct jffs_node *node, __u32 size)
{
struct jffs_control *c = f->c;
struct jffs_fmcontrol *fmc = c->fmc;
f->ino, (f->name ? f->name : "(null)"), size));
/* Create and initialize the new node. */
- if (!(new_node = (struct jffs_node *)
- kmalloc(sizeof(struct jffs_node), GFP_KERNEL))) {
+ if (!(new_node = jffs_alloc_node())) {
D(printk("jffs_rewrite_data(): "
"Failed to allocate node.\n"));
return -ENOMEM;
new_node->fm_offset = sizeof(struct jffs_raw_inode)
+ total_name_size;
+retry:
jffs_fm_write_lock(fmc);
+ err = 0;
if ((err = jffs_fmalloc(fmc, total_size, new_node, &fm)) < 0) {
DJM(no_jffs_node--);
jffs_fm_write_unlock(fmc);
D(printk("jffs_rewrite_data(): Failed to allocate fm.\n"));
- kfree(new_node);
+ jffs_free_node(new_node);
return err;
}
else if (!fm->nodes) {
raw_inode.nlink = f->nlink;
raw_inode.spare = 0;
raw_inode.rename = 0;
- raw_inode.deleted = 0;
+ raw_inode.deleted = f->deleted;
raw_inode.accurate = 0xff;
raw_inode.dchksum = 0;
raw_inode.nchksum = 0;
total_name_size + total_data_size);
jffs_fm_write_unlock(fmc);
printk(KERN_ERR "JFFS: jffs_rewrite_data: Write error during "
- "rewrite. (raw inode)\n");
- return err;
+ "rewrite. (raw inode)\n");
+ printk(KERN_ERR "JFFS: jffs_rewrite_data: Now retrying "
+ "rewrite. (raw inode)\n");
+ goto retry;
}
pos += sizeof(struct jffs_raw_inode);
/* Write the name to the flash memory. */
if (f->nsize) {
D3(printk("jffs_rewrite_data(): Writing name \"%s\" to "
- "pos 0x%ul.\n", f->name, (long)pos));
+ "pos 0x%ul.\n", f->name, (unsigned int) pos));
if ((err = flash_safe_write(fmc->mtd, pos,
(u_char *)f->name,
f->nsize)) < 0) {
jffs_fmfree_partly(fmc, fm, total_data_size);
jffs_fm_write_unlock(fmc);
printk(KERN_ERR "JFFS: jffs_rewrite_data: Write "
- "error during rewrite. (name)\n");
- return err;
+ "error during rewrite. (name)\n");
+ printk(KERN_ERR "JFFS: jffs_rewrite_data: Now retrying "
+ "rewrite. (name)\n");
+ goto retry;
}
pos += total_name_size;
raw_inode.nchksum = jffs_checksum(f->name, f->nsize);
}
while (size) {
- __u32 s = min_t(int, size, PAGE_SIZE);
+ __u32 s = min(size, (__u32)PAGE_SIZE);
if ((r = jffs_read_data(f, (char *)page,
offset, s)) < s) {
free_page((unsigned long)page);
printk(KERN_ERR "JFFS: jffs_rewrite_data: "
"Write error during rewrite. "
"(data)\n");
- return err;
+ goto retry;
}
pos += r;
size -= r;
jffs_fm_write_unlock(fmc);
printk(KERN_ERR "JFFS: jffs_rewrite_data: Write error during "
"rewrite. (checksum)\n");
- return err;
+ goto retry;
}
/* Now make the file system aware of the newly written node. */
struct jffs_fmcontrol *fmc = c->fmc;
struct jffs_node *node;
struct jffs_file *f;
- int size, err = 0;
- int data_size;
- int total_name_size;
+ int err = 0;
+ __u32 size;
+ __u32 data_size;
+ __u32 total_name_size;
__u32 extra_available;
__u32 space_needed;
__u32 free_chunk_size1 = jffs_free_size1(fmc);
what's available */
if (size > JFFS_PAD(node->data_size) + total_name_size +
sizeof(struct jffs_raw_inode) + extra_available) {
- D1(printk("Reducing size of new node from %d to %d to avoid "
- "catching our tail\n", size,
- JFFS_PAD(node->data_size) + JFFS_PAD(node->name_size) +
- sizeof(struct jffs_raw_inode) + extra_available));
+ D1(printk("Reducing size of new node from %d to %ld to avoid "
+ "catching our tail\n", size,
+ (long) (JFFS_PAD(node->data_size) + JFFS_PAD(node->name_size) +
+ sizeof(struct jffs_raw_inode) + extra_available)));
D1(printk("space_needed = %d, extra_available = %d\n",
space_needed, extra_available));
return err;
}
- offset = fmc->head->offset - fmc->flash_start;
+ offset = fmc->head->offset;
/* Now, let's try to do the erase. */
if ((err = flash_erase_region(fmc->mtd,
printk("JFFS: Erase failed! pos = 0x%lx\n",
(long)pos);
jffs_hexdump(fmc->mtd, pos,
- min_t(u32, 256, end - pos));
+ jffs_min(256, end - pos));
err = -1;
break;
}
c->fmc->free_size, c->fmc->dirty_size, c->fmc->sector_size));
/* If there's not enough dirty space to free a block, there's no point. */
- if (c->fmc->dirty_size < c->fmc->sector_size)
+ if (c->fmc->dirty_size < c->fmc->sector_size) {
+ D2(printk(KERN_NOTICE "thread_should_wake(): Not waking. Insufficient dirty space\n"));
return 0;
-
+ }
+#if 1
+ /* If there is too much RAM used by the various structures, GC */
+ if (jffs_get_node_inuse() > (c->fmc->used_size/c->fmc->max_chunk_size * 5 + jffs_get_file_count() * 2 + 50)) {
+ /* FIXME: Provide proof that this test can be satisfied. We
+ don't want a filesystem doing endless GC just because this
+ condition cannot ever be false.
+ */
+ D2(printk(KERN_NOTICE "thread_should_wake(): Waking due to number of nodes\n"));
+ return 1;
+ }
+#endif
/* If there are fewer free bytes than the threshold, GC */
- if (c->fmc->dirty_size < c->gc_minfree_threshold)
+ if (c->fmc->free_size < c->gc_minfree_threshold) {
+ D2(printk(KERN_NOTICE "thread_should_wake(): Waking due to insufficent free space\n"));
return 1;
-
+ }
/* If there are more dirty bytes than the threshold, GC */
- if (c->fmc->dirty_size > c->gc_maxdirty_threshold)
+ if (c->fmc->dirty_size > c->gc_maxdirty_threshold) {
+ D2(printk(KERN_NOTICE "thread_should_wake(): Waking due to excessive dirty space\n"));
return 1;
-
+ }
/* FIXME: What about the "There are many versions of a node" condition? */
return 0;
case SIGKILL:
D1(printk("jffs_garbage_collect_thread(): SIGKILL received.\n"));
c->gc_task = NULL;
- unlock_kernel();
complete_and_exit(&c->gc_thread_comp, 0);
}
}
D1(printk (KERN_NOTICE "jffs_garbage_collect_thread(): collecting.\n"));
- if (fmc->dirty_size < fmc->sector_size) {
- D1(printk(KERN_WARNING "jffs_garbage_collect_thread with insufficient dirty space (0x%x)\n", fmc->dirty_size));
- continue;
- }
-
D3(printk (KERN_NOTICE "g_c_thread(): down biglock\n"));
down(&fmc->biglock);
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * $Id: intrep.h,v 1.11 2000/08/17 22:46:46 bmatthews Exp $
+ * $Id: intrep.h,v 1.14 2001/09/23 23:28:37 dwmw2 Exp $
*
*/
#ifndef __LINUX_JFFS_INTREP_H__
#define __LINUX_JFFS_INTREP_H__
#include "jffs_fm.h"
+struct jffs_node *jffs_alloc_node(void);
+void jffs_free_node(struct jffs_node *n);
+int jffs_get_node_inuse(void);
+long jffs_get_file_count(void);
+
__u32 jffs_checksum(const void *data, int size);
void jffs_cleanup_control(struct jffs_control *c);
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * $Id: jffs_fm.c,v 1.18 2000/08/21 10:41:45 dwmw2 Exp $
+ * $Id: jffs_fm.c,v 1.27 2001/09/20 12:29:47 dwmw2 Exp $
*
* Ported to Linux 2.3.x and MTD:
* Copyright (C) 2000 Alexander Larsson (alex@cendio.se), Cendio Systems AB
static int jffs_mark_obsolete(struct jffs_fmcontrol *fmc, __u32 fm_offset);
#endif
+extern kmem_cache_t *fm_cache;
+extern kmem_cache_t *node_cache;
/* This function creates a new shiny flash memory control structure. */
struct jffs_fmcontrol *
mtd = get_mtd_device(NULL, MINOR(dev));
- if (!mtd)
- {
+ if (!mtd) {
kfree(fmc);
+ DJM(no_jffs_fmcontrol--);
return NULL;
}
/* Retrieve the size of the flash memory. */
- fmc->flash_start = 0;
fmc->flash_size = mtd->size;
- D3(printk(" fmc->flash_start = 0x%08x\n", fmc->flash_start));
D3(printk(" fmc->flash_size = %d bytes\n", fmc->flash_size));
fmc->used_size = 0;
to write out larger nodes than the ones it's obsoleting.
We should fix it so it doesn't have to write the name
_every_ time. Later.
+ + another 2 sectors because people keep getting GC stuck and
+ we don't know why. This scares me - I want formal proof
+ of correctness of whatever number we put here. dwmw2.
*/
- fmc->min_free_size = fmc->sector_size << 1;
+ fmc->min_free_size = fmc->sector_size << 2;
fmc->mtd = mtd;
fmc->c = c;
fmc->head = 0;
while ((cur = next)) {
next = next->next;
- kfree(cur);
- DJM(no_jffs_fm--);
+ jffs_free_fm(cur);
}
put_mtd_device(fmc->mtd);
kfree(fmc);
{
__u32 head;
__u32 tail;
- __u32 end = fmc->flash_start + fmc->flash_size;
+ __u32 end = fmc->flash_size;
if (!fmc->head) {
/* There is nothing on the flash. */
head = fmc->head->offset;
tail = fmc->tail->offset + fmc->tail->size;
if (tail == end) {
- tail = fmc->flash_start;
+ tail = 0;
}
ASSERT(else if (tail > end) {
printk(KERN_WARNING "jffs_free_size1(): tail > end\n");
- tail = fmc->flash_start;
+ tail = 0;
});
if (head <= tail) {
if (fmc->head) {
__u32 head = fmc->head->offset;
__u32 tail = fmc->tail->offset + fmc->tail->size;
- if (tail == fmc->flash_start + fmc->flash_size) {
- tail = fmc->flash_start;
+ if (tail == fmc->flash_size) {
+ tail = 0;
}
if (tail >= head) {
- return head - fmc->flash_start;
+ return head;
}
}
return 0;
*result = 0;
- if (!(fm = (struct jffs_fm*)kmalloc(sizeof(struct jffs_fm),
- GFP_KERNEL))) {
+ if (!(fm = jffs_alloc_fm())) {
D(printk("jffs_fmalloc(): kmalloc() failed! (fm)\n"));
return -ENOMEM;
}
- DJM(no_jffs_fm++);
free_chunk_size1 = jffs_free_size1(fmc);
free_chunk_size2 = jffs_free_size2(fmc);
GFP_KERNEL))) {
D(printk("jffs_fmalloc(): kmalloc() failed! "
"(node_ref)\n"));
- kfree(fm);
- DJM(no_jffs_fm--);
+ jffs_free_fm(fm);
return -ENOMEM;
}
DJM(no_jffs_node_ref++);
fm->nodes->next = 0;
if (fmc->tail) {
fm->offset = fmc->tail->offset + fmc->tail->size;
- if (fm->offset
- == fmc->flash_start + fmc->flash_size) {
- fm->offset = fmc->flash_start;
+ if (fm->offset == fmc->flash_size) {
+ fm->offset = 0;
}
- ASSERT(else if (fm->offset
- > fmc->flash_start
- + fmc->flash_size) {
+ ASSERT(else if (fm->offset > fmc->flash_size) {
printk(KERN_WARNING "jffs_fmalloc(): "
"offset > flash_end\n");
- fm->offset = fmc->flash_start;
+ fm->offset = 0;
});
}
else {
/* There don't have to be files in the file
system yet. */
- fm->offset = fmc->flash_start;
+ fm->offset = 0;
}
fm->size = size;
fmc->free_size -= size;
else if (size > free_chunk_size2) {
printk(KERN_WARNING "JFFS: Tried to allocate a too "
"large flash memory chunk. (size = %u)\n", size);
- kfree(fm);
- DJM(no_jffs_fm--);
+ jffs_free_fm(fm);
return -ENOSPC;
}
else {
return -1;
}
#endif
- fmc->c->sb->s_dirt = 1;
}
ASSERT(if (!del) {
D3(printk("jffs_fmalloced()\n"));
- if (!(fm = (struct jffs_fm *)kmalloc(sizeof(struct jffs_fm),
- GFP_KERNEL))) {
+ if (!(fm = jffs_alloc_fm())) {
D(printk("jffs_fmalloced(0x%p, %u, %u, 0x%p): failed!\n",
fmc, offset, size, node));
return 0;
}
- DJM(no_jffs_fm++);
fm->offset = offset;
fm->size = size;
fm->prev = 0;
kmalloc(sizeof(struct jffs_node_ref),
GFP_KERNEL))) {
D(printk("jffs_fmalloced(): !fm->nodes\n"));
- kfree(fm);
- DJM(no_jffs_fm--);
+ jffs_free_fm(fm);
return 0;
}
DJM(no_jffs_node_ref++);
fmc->used_size -= fm->size;
if (fm == fmc->tail) {
fm->size -= size;
+ fmc->free_size += size;
}
fmc->dirty_size += fm->size;
}
fm = fm->next;
fm->prev = 0;
fmc->head = fm;
- kfree(del);
- DJM(no_jffs_fm--);
+ jffs_free_fm(del);
}
else {
fm->size -= erased_size;
/* Calculate how much space that is dirty. */
for (fm = fmc->head; fm && !fm->nodes; fm = fm->next) {
- if (size && fm->offset == fmc->flash_start) {
+ if (size && fm->offset == 0) {
/* We have reached the beginning of the flash. */
break;
}
/* Someone's signature contained this:
There's a fine line between fishing and just standing on
the shore like an idiot... */
- ret = jffs_flash_erasable_size(fmc->mtd,
- fmc->head->offset - fmc->flash_start, size);
+ ret = jffs_flash_erasable_size(fmc->mtd, fmc->head->offset, size);
ASSERT(if (ret < 0) {
printk("jffs_erasable_size: flash_erasable_size() "
"returned something less than zero (%ld).\n", ret);
printk("jffs_erasable_size: offset = 0x%08x\n",
- fmc->head->offset - fmc->flash_start);
+ fmc->head->offset);
});
/* If there is dirt on the flash (which is the reason to why
if (del->next) {
del->next->prev = head;
}
- kfree(del);
- DJM(no_jffs_fm--);
+ jffs_free_fm(del);
}
}
return (ret >= 0 ? ret : 0);
}
+struct jffs_fm *jffs_alloc_fm(void)
+{
+ struct jffs_fm *fm;
+
+ fm = kmem_cache_alloc(fm_cache,GFP_KERNEL);
+ DJM(if (fm) no_jffs_fm++;);
+
+ return fm;
+}
+
+void jffs_free_fm(struct jffs_fm *n)
+{
+ kmem_cache_free(fm_cache,n);
+ DJM(no_jffs_fm--);
+}
+
+
+
+struct jffs_node *jffs_alloc_node(void)
+{
+ struct jffs_node *n;
+
+ n = (struct jffs_node *)kmem_cache_alloc(node_cache,GFP_KERNEL);
+ if(n != NULL)
+ no_jffs_node++;
+ return n;
+}
+
+void jffs_free_node(struct jffs_node *n)
+{
+ kmem_cache_free(node_cache,n);
+ no_jffs_node--;
+}
+
+
+int jffs_get_node_inuse(void)
+{
+ return no_jffs_node;
+}
void
jffs_print_fmcontrol(struct jffs_fmcontrol *fmc)
{
D(printk("struct jffs_fmcontrol: 0x%p\n", fmc));
D(printk("{\n"));
- D(printk(" 0x%08x, /* flash_start */\n", fmc->flash_start));
D(printk(" %u, /* flash_size */\n", fmc->flash_size));
D(printk(" %u, /* used_size */\n", fmc->used_size));
D(printk(" %u, /* dirty_size */\n", fmc->dirty_size));
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * $Id: jffs_fm.h,v 1.10 2000/08/17 15:42:44 dwmw2 Exp $
+ * $Id: jffs_fm.h,v 1.13 2001/01/11 12:03:25 dwmw2 Exp $
*
* Ported to Linux 2.3.x and MTD:
* Copyright (C) 2000 Alexander Larsson (alex@cendio.se), Cendio Systems AB
/* How many padding bytes should be inserted between two chunks of data
on the flash? */
-#define JFFS_GET_PAD_BYTES(size) ((JFFS_ALIGN_SIZE \
- - ((__u32)(size) % JFFS_ALIGN_SIZE)) \
- % JFFS_ALIGN_SIZE)
+#define JFFS_GET_PAD_BYTES(size) ( (JFFS_ALIGN_SIZE-1) & -(__u32)(size) )
#define JFFS_PAD(size) ( (size + (JFFS_ALIGN_SIZE-1)) & ~(JFFS_ALIGN_SIZE-1) )
+
+
+
+void jffs_free_fm(struct jffs_fm *n);
+struct jffs_fm *jffs_alloc_fm(void);
+
+
struct jffs_node_ref
{
struct jffs_node *node;
struct jffs_fmcontrol
{
- __u32 flash_start;
__u32 flash_size;
__u32 used_size;
__u32 dirty_size;
flash memory so it will be referenced by the head member. */
+
struct jffs_fmcontrol *jffs_build_begin(struct jffs_control *c, kdev_t dev);
void jffs_build_end(struct jffs_fmcontrol *fmc);
void jffs_cleanup_fmcontrol(struct jffs_fmcontrol *fmc);
--- /dev/null
+/*
+ * JFFS -- Journaling Flash File System, Linux implementation.
+ *
+ * Copyright (C) 2000 Axis Communications AB.
+ *
+ * Created by Simon Kagstrom <simonk@axis.com>.
+ *
+ * $Id: jffs_proc.c,v 1.5 2001/06/02 14:34:55 dwmw2 Exp $
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Overview:
+ * This file defines JFFS partition entries in the proc file system.
+ *
+ * TODO:
+ * Create some more proc files for different kinds of info, i.e. statistics
+ * about written and read bytes, number of calls to different routines,
+ * reports about failures.
+ */
+
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/jffs.h>
+#include <linux/slab.h>
+#include <linux/proc_fs.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include "jffs_fm.h"
+#include "jffs_proc.h"
+
+/*
+ * Structure for a JFFS partition in the system
+ */
+struct jffs_partition_dir {
+ struct jffs_control *c;
+ struct proc_dir_entry *part_root;
+ struct proc_dir_entry *part_info;
+ struct proc_dir_entry *part_layout;
+ struct jffs_partition_dir *next;
+};
+
+/*
+ * Structure for top-level entry in '/proc/fs' directory
+ */
+struct proc_dir_entry *jffs_proc_root;
+
+/*
+ * Linked list of 'jffs_partition_dirs' to help us track
+ * the mounted JFFS partitions in the system
+ */
+static struct jffs_partition_dir *jffs_part_dirs = 0;
+
+/*
+ * Read functions for entries
+ */
+static int jffs_proc_info_read(char *page, char **start, off_t off,
+ int count, int *eof, void *data);
+static int jffs_proc_layout_read (char *page, char **start, off_t off,
+ int count, int *eof, void *data);
+
+
+/*
+ * Register a JFFS partition directory (called upon mount)
+ */
+int jffs_register_jffs_proc_dir(kdev_t dev, struct jffs_control *c)
+{
+ struct jffs_partition_dir *part_dir;
+ struct proc_dir_entry *part_info = 0;
+ struct proc_dir_entry *part_layout = 0;
+ struct proc_dir_entry *part_root = 0;
+
+ /* Allocate structure for local JFFS partition table */
+ if (!(part_dir = (struct jffs_partition_dir *)
+ kmalloc (sizeof (struct jffs_partition_dir), GFP_KERNEL))) {
+ return -ENOMEM;
+ }
+
+ /* Create entry for this partition */
+ if ((part_root = create_proc_entry (kdevname(dev),
+ S_IFDIR | S_IRUGO | S_IXUGO, jffs_proc_root))) {
+ part_root->read_proc = jffs_proc_info_read;
+ part_root->data = (void *) c;
+ }
+ else {
+ kfree (part_dir);
+ return -ENOMEM;
+ }
+
+ /* Create entry for 'info' file */
+ if ((part_info = create_proc_entry ("info", 0, part_root))) {
+ part_info->read_proc = jffs_proc_info_read;
+ part_info->data = (void *) c;
+ }
+ else {
+ remove_proc_entry (part_root->name, jffs_proc_root);
+ kfree (part_dir);
+ return -ENOMEM;
+ }
+
+ /* Create entry for 'layout' file */
+ if ((part_layout = create_proc_entry ("layout", 0, part_root))) {
+ part_layout->read_proc = jffs_proc_layout_read;
+ part_layout->data = (void *) c;
+ }
+ else {
+ remove_proc_entry (part_info->name, part_root);
+ remove_proc_entry (part_root->name, jffs_proc_root);
+ kfree (part_dir);
+ return -ENOMEM;
+ }
+
+ /* Fill in structure for table and insert in the list */
+ part_dir->c = c;
+ part_dir->part_root = part_root;
+ part_dir->part_info = part_info;
+ part_dir->part_layout = part_layout;
+ part_dir->next = jffs_part_dirs;
+ jffs_part_dirs = part_dir;
+
+ /* Return happy */
+ return 0;
+}
+
+
+/*
+ * Unregister a JFFS partition directory (called at umount)
+ */
+int jffs_unregister_jffs_proc_dir(struct jffs_control *c)
+{
+ struct jffs_partition_dir *part_dir = jffs_part_dirs;
+ struct jffs_partition_dir *prev_part_dir = 0;
+
+ while (part_dir) {
+ if (part_dir->c == c) {
+ /* Remove entries for partition */
+ remove_proc_entry (part_dir->part_info->name,
+ part_dir->part_root);
+ remove_proc_entry (part_dir->part_layout->name,
+ part_dir->part_root);
+ remove_proc_entry (part_dir->part_root->name,
+ jffs_proc_root);
+
+ /* Remove entry from list */
+ if (prev_part_dir)
+ prev_part_dir->next = part_dir->next;
+ else
+ jffs_part_dirs = part_dir->next;
+
+ /*
+ * Check to see if this is the last one
+ * and remove the entry from '/proc/fs'
+ * if it is.
+ */
+ if (jffs_part_dirs == part_dir->next)
+#if LINUX_VERSION_CODE < 0x020300
+ remove_proc_entry ("jffs", &proc_root_fs);
+#else
+ remove_proc_entry ("jffs", proc_root_fs);
+#endif
+
+ /* Free memory for entry */
+ kfree(part_dir);
+
+ /* Return happy */
+ return 0;
+ }
+
+ /* Move to next entry */
+ prev_part_dir = part_dir;
+ part_dir = part_dir->next;
+ }
+
+ /* Return unhappy */
+ return -1;
+}
+
+
+/*
+ * Read a JFFS partition's `info' file
+ */
+static int jffs_proc_info_read (char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ struct jffs_control *c = (struct jffs_control *) data;
+ int len = 0;
+
+ /* Get information on the parition */
+ len += sprintf (page,
+ "partition size: %08lX (%u)\n"
+ "sector size: %08lX (%u)\n"
+ "used size: %08lX (%u)\n"
+ "dirty size: %08lX (%u)\n"
+ "free size: %08lX (%u)\n\n",
+ (unsigned long) c->fmc->flash_size, c->fmc->flash_size,
+ (unsigned long) c->fmc->sector_size, c->fmc->sector_size,
+ (unsigned long) c->fmc->used_size, c->fmc->used_size,
+ (unsigned long) c->fmc->dirty_size, c->fmc->dirty_size,
+ (unsigned long) (c->fmc->flash_size -
+ (c->fmc->used_size + c->fmc->dirty_size)),
+ c->fmc->flash_size - (c->fmc->used_size + c->fmc->dirty_size));
+
+ /* We're done */
+ *eof = 1;
+
+ /* Return length */
+ return len;
+}
+
+
+/*
+ * Read a JFFS partition's `layout' file
+ */
+static int jffs_proc_layout_read (char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ struct jffs_control *c = (struct jffs_control *) data;
+ struct jffs_fm *fm = 0;
+ struct jffs_fm *last_fm = 0;
+ int len = 0;
+
+ /* Get the first item in the list */
+ fm = c->fmc->head;
+
+ /* Print free space */
+ if (fm && fm->offset) {
+ len += sprintf (page, "00000000 %08lX free\n",
+ (unsigned long) fm->offset);
+ }
+
+ /* Loop through all of the flash control structures */
+ while (fm && (len < (off + count))) {
+ if (fm->nodes) {
+ len += sprintf (page + len,
+ "%08lX %08lX ino=%08lX, ver=%08lX\n",
+ (unsigned long) fm->offset,
+ (unsigned long) fm->size,
+ (unsigned long) fm->nodes->node->ino,
+ (unsigned long) fm->nodes->node->version);
+ }
+ else {
+ len += sprintf (page + len,
+ "%08lX %08lX dirty\n",
+ (unsigned long) fm->offset,
+ (unsigned long) fm->size);
+ }
+ last_fm = fm;
+ fm = fm->next;
+ }
+
+ /* Print free space */
+ if ((len < (off + count)) && last_fm
+ && (last_fm->offset < c->fmc->flash_size)) {
+ len += sprintf (page + len,
+ "%08lX %08lX free\n",
+ (unsigned long) last_fm->offset +
+ last_fm->size,
+ (unsigned long) (c->fmc->flash_size -
+ (last_fm->offset + last_fm->size)));
+ }
+
+ /* We're done */
+ *eof = 1;
+
+ /* Return length */
+ return len;
+}
--- /dev/null
+/*
+ * JFFS -- Journaling Flash File System, Linux implementation.
+ *
+ * Copyright (C) 2000 Axis Communications AB.
+ *
+ * Created by Simon Kagstrom <simonk@axis.com>.
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * $Id: jffs_proc.h,v 1.2 2000/11/15 22:04:12 sjhill Exp $
+ */
+
+/* jffs_proc.h defines a structure for inclusion in the proc-file system. */
+#ifndef __LINUX_JFFS_PROC_H__
+#define __LINUX_JFFS_PROC_H__
+
+#include <linux/proc_fs.h>
+
+/* The proc_dir_entry for jffs (defined in jffs_proc.c). */
+extern struct proc_dir_entry *jffs_proc_root;
+
+int jffs_register_jffs_proc_dir(kdev_t dev, struct jffs_control *c);
+int jffs_unregister_jffs_proc_dir(struct jffs_control *c);
+
+#endif /* __LINUX_JFFS_PROC_H__ */
#
# Makefile for the linux Journalling Flash FileSystem (JFFS) routines.
#
-# $Id: Makefile,v 1.21 2001/03/25 22:36:12 dwmw2 Exp $
+# $Id: Makefile,v 1.25 2001/09/25 20:59:41 dwmw2 Exp $
#
# Note! Dependencies are done automagically by 'make dep', which also
# removes any old dependencies. DON'T put your own dependencies here
#
# Note 2! The CFLAGS definitions are now in the main makefile...
-ifndef CONFIG_JFFS2_FS
-
-CC += -I$(shell pwd)/../../include
-
-obj-m := jffs2.o comprmod.o
-# We're being invoked outside a normal kernel build. Fake it
-EXTRA_CFLAGS= -DCONFIG_JFFS2_FS_DEBUG=1 -g
-endif
-
-obj-$(CONFIG_JFFS2_FS) += jffs2.o
COMPR_OBJS := compr.o compr_rubin.o compr_rtime.o pushpull.o \
compr_zlib.o zlib.o
read.o nodemgmt.o readinode.o super.o write.o scan.o gc.o \
symlink.o build.o erase.o background.o
-jffs2-objs := $(COMPR_OBJS) $(JFFS2_OBJS)
-comprmod-objs := $(COMPR_OBJS) comprtest.o
+O_TARGET := jffs2.o
-jffs2.o: $(jffs2-objs)
- $(LD) -r -o $@ $(jffs2-objs)
-
-comprmod.o: $(comprmod-objs)
- $(LD) -r -o $@ $(comprmod-objs)
+obj-y := $(COMPR_OBJS) $(JFFS2_OBJS)
+obj-m := $(O_TARGET)
include $(TOPDIR)/Rules.make
+
* provisions above, a recipient may use your version of this file
* under either the RHEPL or the GPL.
*
- * $Id: background.c,v 1.10 2001/03/15 15:38:23 dwmw2 Exp $
+ * $Id: background.c,v 1.15 2001/09/20 08:05:04 dwmw2 Exp $
*
*/
#include <linux/jffs2.h>
#include <linux/mtd/mtd.h>
#include <linux/interrupt.h>
-#include <linux/smp_lock.h>
+#include <linux/completion.h>
#include "nodelist.h"
spin_unlock_bh(&c->erase_completion_lock);
}
+/* This must only ever be called when no GC thread is currently running */
int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c)
{
pid_t pid;
int ret = 0;
- init_MUTEX_LOCKED(&c->gc_thread_sem);
+
+ if (c->gc_task)
+ BUG();
+
+ init_MUTEX_LOCKED(&c->gc_thread_start);
init_completion(&c->gc_thread_exit);
-
+
pid = kernel_thread(jffs2_garbage_collect_thread, c, CLONE_FS|CLONE_FILES);
if (pid < 0) {
printk(KERN_WARNING "fork failed for JFFS2 garbage collect thread: %d\n", -pid);
+ complete(&c->gc_thread_exit);
ret = pid;
} else {
/* Wait for it... */
D1(printk(KERN_DEBUG "JFFS2: Garbage collect thread is pid %d\n", pid));
- down(&c->gc_thread_sem);
+ down(&c->gc_thread_start);
}
- up(&c->gc_thread_sem);
return ret;
}
send_sig(SIGKILL, c->gc_task, 1);
}
spin_unlock_bh(&c->erase_completion_lock);
- down(&c->gc_thread_sem);
wait_for_completion(&c->gc_thread_exit);
}
daemonize();
current->tty = NULL;
c->gc_task = current;
- up(&c->gc_thread_sem);
+ up(&c->gc_thread_start);
sprintf(current->comm, "jffs2_gcd_mtd%d", c->mtd->index);
D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread sleeping...\n"));
}
- schedule(); /* Yes, we do this even if we want to go
- on immediately - we're a low priority
- background task. */
+ if (current->need_resched)
+ schedule();
/* Put_super will send a SIGKILL and then wait on the sem.
*/
spin_lock_bh(&c->erase_completion_lock);
c->gc_task = NULL;
spin_unlock_bh(&c->erase_completion_lock);
- up(&c->gc_thread_sem);
- complete_and_exit(&c->gc_thread_exit,0 );
+ complete_and_exit(&c->gc_thread_exit, 0);
case SIGHUP:
D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGHUP received.\n"));
* provisions above, a recipient may use your version of this file
* under either the RHEPL or the GPL.
*
- * $Id: compr.c,v 1.16 2001/03/15 15:38:23 dwmw2 Exp $
+ * $Id: compr.c,v 1.17 2001/09/23 09:56:46 dwmw2 Exp $
*
*/
if (!ret) {
return JFFS2_COMPR_ZLIB;
}
-
+#if 0 /* Disabled 23/9/1. With zlib it hardly ever gets a look in */
ret = dynrubin_compress(data_in, cpage_out, datalen, cdatalen);
if (!ret) {
return JFFS2_COMPR_DYNRUBIN;
}
-
-#if 0 /* Phase this one out */
+#endif
+#if 0 /* Disabled 26/2/1. Obsoleted by dynrubin */
ret = rubinmips_compress(data_in, cpage_out, datalen, cdatalen);
if (!ret) {
return JFFS2_COMPR_RUBINMIPS;
}
#endif
+ /* rtime does manage to recompress already-compressed data */
ret = rtime_compress(data_in, cpage_out, datalen, cdatalen);
if (!ret) {
return JFFS2_COMPR_RTIME;
case JFFS2_COMPR_RTIME:
rtime_decompress(cdata_in, data_out, cdatalen, datalen);
break;
-#if 1 /* Phase this one out */
+
case JFFS2_COMPR_RUBINMIPS:
+#if 0 /* Disabled 23/9/1 */
rubinmips_decompress(cdata_in, data_out, cdatalen, datalen);
- break;
+#else
+ printk(KERN_WARNING "JFFS2: Rubinmips compression encountered but support not compiled in!\n");
#endif
+ break;
case JFFS2_COMPR_DYNRUBIN:
+#if 1 /* Phase this one out */
dynrubin_decompress(cdata_in, data_out, cdatalen, datalen);
+#else
+ printk(KERN_WARNING "JFFS2: Dynrubin compression encountered but support not compiled in!\n");
+#endif
break;
default:
* provisions above, a recipient may use your version of this file
* under either the RHEPL or the GPL.
*
- * $Id: compr_rubin.c,v 1.11 2001/03/21 16:20:48 dwmw2 Exp $
+ * $Id: compr_rubin.c,v 1.13 2001/09/23 10:06:05 rmk Exp $
*
*/
#include <linux/string.h>
+#include <linux/types.h>
#include "compr_rubin.h"
#include "histo_mips.h"
;
}
-
+static void __do_decode(struct rubin_state *rs, unsigned long p, unsigned long q)
+{
+ register unsigned long lower_bits_rubin = LOWER_BITS_RUBIN;
+ unsigned long rec_q;
+ int c, bits = 0;
+
+ /*
+ * First, work out how many bits we need from the input stream.
+ * Note that we have already done the initial check on this
+ * loop prior to calling this function.
+ */
+ do {
+ bits++;
+ q &= lower_bits_rubin;
+ q <<= 1;
+ p <<= 1;
+ } while ((q >= UPPER_BIT_RUBIN) || ((p + q) <= UPPER_BIT_RUBIN));
+
+ rs->p = p;
+ rs->q = q;
+
+ rs->bit_number += bits;
+
+ /*
+ * Now get the bits. We really want this to be "get n bits".
+ */
+ rec_q = rs->rec_q;
+ do {
+ c = pullbit(&rs->pp);
+ rec_q &= lower_bits_rubin;
+ rec_q <<= 1;
+ rec_q += c;
+ } while (--bits);
+ rs->rec_q = rec_q;
+}
int decode(struct rubin_state *rs, long A, long B)
{
-
- char c;
- long i0, i1, threshold;
+ unsigned long p = rs->p, q = rs->q;
+ long i0, threshold;
int symbol;
-
- while ((rs->q >= UPPER_BIT_RUBIN) || ((rs->p + rs->q) <= UPPER_BIT_RUBIN)) {
- c = pullbit(&rs->pp);
- rs->bit_number++;
- rs->q &= LOWER_BITS_RUBIN;
- rs->q <<= 1;
- rs->p <<= 1;
- rs->rec_q &= LOWER_BITS_RUBIN;
- rs->rec_q <<= 1;
- rs->rec_q += c;
- };
+ if (q >= UPPER_BIT_RUBIN || ((p + q) <= UPPER_BIT_RUBIN))
+ __do_decode(rs, p, q);
+
i0 = A * rs->p / (A + B);
if (i0 <= 0) {
i0 = 1;
if (i0 >= rs->p) {
i0 = rs->p - 1;
}
- i1 = rs->p - i0;
-
threshold = rs->q + i0;
- if (rs->rec_q < threshold) {
- symbol = 0;
- rs->p = i0;
- } else {
- symbol = 1;
- rs->p = i1;
+ symbol = rs->rec_q >= threshold;
+ if (rs->rec_q >= threshold) {
rs->q += i0;
+ i0 = rs->p - i0;
}
-
+
+ rs->p = i0;
+
return symbol;
}
static int in_byte(struct rubin_state *rs)
{
- int i;
- int result=0;
- for (i=0;i<8;i++) {
- result |= decode(rs, rs->bit_divider-rs->bits[i],rs->bits[i])<<i;
- }
+ int i, result = 0, bit_divider = rs->bit_divider;
+
+ for (i = 0; i < 8; i++)
+ result |= decode(rs, bit_divider - rs->bits[i], rs->bits[i]) << i;
+
return result;
}
* provisions above, a recipient may use your version of this file
* under either the RHEPL or the GPL.
*
- * $Id: compr_zlib.c,v 1.6 2001/04/18 15:04:00 dwmw2 Exp $
+ * $Id: compr_zlib.c,v 1.8 2001/09/20 15:28:31 dwmw2 Exp $
*
*/
#ifdef __KERNEL__
#include <linux/kernel.h>
+#include <linux/mtd/compatmac.h> /* for min() */
#include <linux/slab.h>
#include <linux/jffs2.h>
#include "nodelist.h"
while (strm.total_out < *dstlen - STREAM_END_SPACE && strm.total_in < *sourcelen) {
strm.avail_out = *dstlen - (strm.total_out + STREAM_END_SPACE);
- strm.avail_in = min(*sourcelen-strm.total_in, strm.avail_out);
+ strm.avail_in = min((unsigned)(*sourcelen-strm.total_in), strm.avail_out);
D1(printk(KERN_DEBUG "calling deflate with avail_in %d, avail_out %d\n",
strm.avail_in, strm.avail_out));
ret = deflate(&strm, Z_PARTIAL_FLUSH);
* provisions above, a recipient may use your version of this file
* under either the RHEPL or the GPL.
*
- * $Id: erase.c,v 1.19 2001/03/25 22:36:12 dwmw2 Exp $
+ * $Id: erase.c,v 1.23 2001/09/19 21:51:11 dwmw2 Exp $
*
*/
#include <linux/kernel.h>
return;
}
- printk(KERN_WARNING "Erase at 0x%08x failed immediately: %d\n", jeb->offset, ret);
+ if (ret == -EROFS)
+ printk(KERN_WARNING "Erase at 0x%08x failed immediately: -EROFS. Is the sector locked?\n", jeb->offset);
+ else
+ printk(KERN_WARNING "Erase at 0x%08x failed immediately: errno %d\n", jeb->offset, ret);
spin_lock_bh(&c->erase_completion_lock);
list_del(&jeb->list);
list_add(&jeb->list, &c->bad_list);
spin_unlock(&priv->c->erase_completion_lock);
wake_up(&priv->c->erase_wait);
} else {
- D1(printk(KERN_DEBUG "Erase completed successfully at 0x%08lx\n", instr->addr));
+ D1(printk(KERN_DEBUG "Erase completed successfully at 0x%08x\n", instr->addr));
spin_lock(&priv->c->erase_completion_lock);
list_del(&priv->jeb->list);
list_add_tail(&priv->jeb->list, &priv->c->erase_complete_list);
/* Hmmm. Maybe we should accept the extra space it takes and make
this a standard doubly-linked list? */
-static inline void jffs2_remove_node_ref_from_ino_list(struct jffs2_sb_info *sbinfo, struct jffs2_raw_node_ref *ref)
+static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c,
+ struct jffs2_raw_node_ref *ref, struct jffs2_eraseblock *jeb)
{
- struct jffs2_inode_cache *ic;
- struct jffs2_raw_node_ref **prev, *this;
- D2(int c=0);
+ struct jffs2_inode_cache *ic = NULL;
+ struct jffs2_raw_node_ref **prev;
- this = ref;
- while(this->next_in_ino)
- this = this->next_in_ino;
+ prev = &ref->next_in_ino;
- ic = (struct jffs2_inode_cache *)this;
-
- D1(printk(KERN_DEBUG "Removing node at phys 0x%08x from ino #%u\n", ref->flash_offset &~3, ic->ino));
-
- prev = &ic->nodes;
- if (!*prev) {
- printk(KERN_WARNING "Eep. ic->nodes == NULL.\n");
- return;
- }
- while (*prev != ref) {
+ /* Walk the inode's list once, removing any nodes from this eraseblock */
+ while (1) {
if (!(*prev)->next_in_ino) {
- printk(KERN_WARNING "Eep. node at phys 0x%08x, mem %p. next_in_ino is NULL.\n", (*prev)->flash_offset &~3,
- *prev);
- return;
+ /* We're looking at the jffs2_inode_cache, which is
+ at the end of the linked list. Stash it and continue
+ from the beginning of the list */
+ ic = (struct jffs2_inode_cache *)(*prev);
+ prev = &ic->nodes;
+ continue;
+ }
+
+ if (((*prev)->flash_offset & ~(c->sector_size -1)) == jeb->offset) {
+ /* It's in the block we're erasing */
+ struct jffs2_raw_node_ref *this;
+
+ this = *prev;
+ *prev = this->next_in_ino;
+ this->next_in_ino = NULL;
+
+ if (this == ref)
+ break;
+
+ continue;
}
- prev = &(*prev)->next_in_ino;
+ /* Not to be deleted. Skip */
+ prev = &((*prev)->next_in_ino);
}
- *prev = ref->next_in_ino;
- this = ic->nodes;
- D2(printk(KERN_DEBUG "After remove_node_ref_from_ino_list: \n" KERN_DEBUG);
- while(this) {
- printk( "0x%08x(%d)->", this->flash_offset & ~3, this->flash_offset &3);
- if (++c == 5) {
- printk("\n" KERN_DEBUG);
- c=0;
- }
- this = this->next_in_ino;
+
+ /* PARANOIA */
+ if (!ic) {
+ printk(KERN_WARNING "inode_cache not found in remove_node_refs()!!\n");
+ return;
}
- printk("\n"););
+
+ D1(printk(KERN_DEBUG "Removed nodes in range 0x%08x-0x%08x from ino #%u\n",
+ jeb->offset, jeb->offset + c->sector_size, ic->ino));
+
+ D2({
+ int i=0;
+ struct jffs2_raw_node_ref *this;
+ printk(KERN_DEBUG "After remove_node_refs_from_ino_list: \n" KERN_DEBUG);
+
+ this = ic->nodes;
+
+ while(this) {
+ printk( "0x%08x(%d)->", this->flash_offset & ~3, this->flash_offset &3);
+ if (++i == 5) {
+ printk("\n" KERN_DEBUG);
+ i=0;
+ }
+ this = this->next_in_ino;
+ }
+ printk("\n");
+ });
+
if (ic->nodes == (void *)ic) {
D1(printk(KERN_DEBUG "inocache for ino #%u is all gone now. Freeing\n", ic->ino));
- jffs2_del_ino_cache(sbinfo, ic);
+ jffs2_del_ino_cache(c, ic);
jffs2_free_inode_cache(ic);
}
}
/* Remove from the inode-list */
if (ref->next_in_ino)
- jffs2_remove_node_ref_from_ino_list(c, ref);
- /* else it was a non-inode node so don't bother */
+ jffs2_remove_node_refs_from_ino_list(c, ref, jeb);
+ /* else it was a non-inode node or already removed, so don't bother */
jffs2_free_raw_node_ref(ref);
}
D1(printk(KERN_DEBUG "Verifying erase at 0x%08x\n", jeb->offset));
while(ofs < jeb->offset + c->sector_size) {
- __u32 readlen = min(PAGE_SIZE, jeb->offset + c->sector_size - ofs);
+ __u32 readlen = min((__u32)PAGE_SIZE, jeb->offset + c->sector_size - ofs);
int i;
ret = c->mtd->read(c->mtd, ofs, readlen, &retlen, ebuf);
* provisions above, a recipient may use your version of this file
* under either the RHEPL or the GPL.
*
- * $Id: file.c,v 1.55 2001/05/29 09:19:24 dwmw2 Exp $
+ * $Id: file.c,v 1.58 2001/09/20 15:28:31 dwmw2 Exp $
*
*/
#include <linux/kernel.h>
+#include <linux/mtd/compatmac.h> /* for min() */
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include "crc32.h"
extern int generic_file_open(struct inode *, struct file *) __attribute__((weak));
+extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin) __attribute__((weak));
int jffs2_null_fsync(struct file *filp, struct dentry *dentry, int datasync)
ri.mode = inode->i_mode;
ri.uid = inode->i_uid;
ri.gid = inode->i_gid;
- ri.isize = max(inode->i_size, pageofs);
+ ri.isize = max((__u32)inode->i_size, pageofs);
ri.atime = ri.ctime = ri.mtime = CURRENT_TIME;
ri.offset = inode->i_size;
ri.dsize = pageofs - inode->i_size;
struct inode *inode = filp->f_dentry->d_inode;
struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
- ssize_t newsize = max(filp->f_dentry->d_inode->i_size, (pg->index << PAGE_CACHE_SHIFT) + end);
+ __u32 newsize = max_t(__u32, filp->f_dentry->d_inode->i_size, (pg->index << PAGE_CACHE_SHIFT) + end);
__u32 file_ofs = (pg->index << PAGE_CACHE_SHIFT);
- unsigned writelen = min(PAGE_CACHE_SIZE, newsize - file_ofs);
+ __u32 writelen = min((__u32)PAGE_CACHE_SIZE, newsize - file_ofs);
struct jffs2_raw_inode *ri;
int ret = 0;
ssize_t writtenlen = 0;
ri->mode = inode->i_mode;
ri->uid = inode->i_uid;
ri->gid = inode->i_gid;
- ri->isize = max(inode->i_size, file_ofs + datalen);
+ ri->isize = max((__u32)inode->i_size, file_ofs + datalen);
ri->atime = ri->ctime = ri->mtime = CURRENT_TIME;
ri->offset = file_ofs;
ri->csize = cdatalen;
* provisions above, a recipient may use your version of this file
* under either the RHEPL or the GPL.
*
- * $Id: gc.c,v 1.51 2001/05/24 22:24:39 dwmw2 Exp $
+ * $Id: gc.c,v 1.52 2001/09/19 21:53:47 dwmw2 Exp $
*
*/
/* Shitloads of space */
/* FIXME: Integrate this properly with GC calculations */
start &= ~(PAGE_CACHE_SIZE-1);
- end = min(start + PAGE_CACHE_SIZE, inode->i_size);
+ end = min_t(__u32, start + PAGE_CACHE_SIZE, inode->i_size);
D1(printk(KERN_DEBUG "Plenty of free space, so expanding to write from offset 0x%x to 0x%x\n",
start, end));
if (end < orig_end) {
* provisions above, a recipient may use your version of this file
* under either the RHEPL or the GPL.
*
- * $Id: nodelist.c,v 1.28 2001/03/27 06:20:04 dwmw2 Exp $
+ * $Id: nodelist.c,v 1.29 2001/09/19 00:06:35 dwmw2 Exp $
*
*/
dirent we've already read from the flash
*/
if (retlen > sizeof(struct jffs2_raw_dirent))
- memcpy(&fd->name[0], &node.d.name[0], min(node.d.nsize, retlen-sizeof(struct jffs2_raw_dirent)));
+ memcpy(&fd->name[0], &node.d.name[0], min((__u32)node.d.nsize, (retlen-sizeof(struct jffs2_raw_dirent))));
/* Do we need to copy any more of the name directly
from the flash?
* provisions above, a recipient may use your version of this file
* under either the RHEPL or the GPL.
*
- * $Id: nodelist.h,v 1.45 2001/03/20 17:43:57 dwmw2 Exp $
+ * $Id: nodelist.h,v 1.46 2001/09/18 23:43:05 dwmw2 Exp $
*
*/
#define PAD(x) (((x)+3)&~3)
-/* These probably ought to be somewhere central. */
-//#define min(x,y) ({ typeof((x)) _x = (x); typeof((y)) _y = (y); (_x>_y)?_y:_x; })
-//#define max(x,y) ({ typeof((x)) _x = (x); typeof((y)) _y = (y); (_x>_y)?_x:_y; })
-
static inline int jffs2_raw_ref_to_inum(struct jffs2_raw_node_ref *raw)
{
while(raw->next_in_ino) {
* provisions above, a recipient may use your version of this file
* under either the RHEPL or the GPL.
*
- * $Id: nodemgmt.c,v 1.39 2001/04/11 22:20:26 dwmw2 Exp $
+ * $Id: nodemgmt.c,v 1.45 2001/09/20 08:05:05 dwmw2 Exp $
*
*/
ret = jffs2_garbage_collect_pass(c);
if (ret)
return ret;
- if (signal_pending(current)) {
- return -EINTR;
- }
- if(current->need_resched) {
+
+ if (current->need_resched)
schedule();
- }
+
+ if (signal_pending(current))
+ return -EINTR;
+
down(&c->alloc_sem);
spin_lock_bh(&c->erase_completion_lock);
}
if (c->mtd->type != MTD_NORFLASH && c->mtd->type != MTD_RAM)
return;
+ if (OFNI_BS_2SFFJ(c)->s_flags & MS_RDONLY)
+ return;
D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref->flash_offset &~3));
ret = c->mtd->read(c->mtd, ref->flash_offset &~3, sizeof(n), &retlen, (char *)&n);
* provisions above, a recipient may use your version of this file
* under either the RHEPL or the GPL.
*
- * $Id: pushpull.c,v 1.6 2001/03/15 15:38:24 dwmw2 Exp $
+ * $Id: pushpull.c,v 1.7 2001/09/23 10:04:15 rmk Exp $
*
*/
{
return pp->ofs;
}
-
-
-int pullbit(struct pushpull *pp)
-{
- int bit;
-
- bit = (pp->buf[pp->ofs >> 3] >> (7-(pp->ofs & 7))) & 1;
-
- pp->ofs++;
- return bit;
-}
-
-int pulledbits (struct pushpull *pp)
-{
- return pp->ofs;
-}
-
* provisions above, a recipient may use your version of this file
* under either the RHEPL or the GPL.
*
- * $Id: pushpull.h,v 1.4 2001/03/15 15:38:24 dwmw2 Exp $
+ * $Id: pushpull.h,v 1.5 2001/09/23 10:04:15 rmk Exp $
*
*/
void init_pushpull(struct pushpull *, char *, unsigned, unsigned, unsigned);
int pushbit(struct pushpull *pp, int bit, int use_reserved);
int pushedbits(struct pushpull *pp);
-int pullbit(struct pushpull *pp);
-int pulledbits(struct pushpull *);
+
+static inline int pullbit(struct pushpull *pp)
+{
+ int bit;
+
+ bit = (pp->buf[pp->ofs >> 3] >> (7-(pp->ofs & 7))) & 1;
+
+ pp->ofs++;
+ return bit;
+}
+
+static inline int pulledbits(struct pushpull *pp)
+{
+ return pp->ofs;
+}
#endif /* __PUSHPULL_H__ */
* provisions above, a recipient may use your version of this file
* under either the RHEPL or the GPL.
*
- * $Id: scan.c,v 1.44 2001/04/13 00:46:41 nico Exp $
+ * $Id: scan.c,v 1.51 2001/09/19 00:06:35 dwmw2 Exp $
*
*/
#include <linux/kernel.h>
int jffs2_scan_medium(struct jffs2_sb_info *c)
{
int i, ret;
+ __u32 empty_blocks = 0;
+
if (!c->blocks) {
printk(KERN_WARNING "EEEK! c->blocks is NULL!\n");
return -EINVAL;
struct jffs2_eraseblock *jeb = &c->blocks[i];
ret = jffs2_scan_eraseblock(c, jeb);
- if (ret)
+ if (ret < 0)
return ret;
ACCT_PARANOIA_CHECK(jeb);
- /* Now decide which list to put it on */
- if (jeb->used_size == PAD(sizeof(struct jffs2_unknown_node)) && !jeb->first_node->next_in_ino) {
+ /* Now decide which list to put it on */
+ if (ret == 1) {
+ /*
+ * Empty block. Since we can't be sure it
+ * was entirely erased, we just queue it for erase
+ * again. It will be marked as such when the erase
+ * is complete. Meanwhile we still count it as empty
+ * for later checks.
+ */
+ list_add(&jeb->list, &c->erase_pending_list);
+ empty_blocks++;
+ c->nr_erasing_blocks++;
+ } else if (jeb->used_size == PAD(sizeof(struct jffs2_unknown_node)) && !jeb->first_node->next_in_ino) {
/* Only a CLEANMARKER node is valid */
if (!jeb->dirty_size) {
/* It's actually free */
printk(KERN_NOTICE "JFFS2: Erase block at 0x%08x is not formatted. It will be erased\n", jeb->offset);
list_add(&jeb->list, &c->erase_pending_list);
c->nr_erasing_blocks++;
- }
- }
- if (c->nr_erasing_blocks)
+ }
+ }
+ if (c->nr_erasing_blocks) {
+ if (!c->used_size && empty_blocks != c->nr_blocks) {
+ printk(KERN_NOTICE "Cowardly refusing to erase blocks on filesystem with no valid JFFS2 nodes\n");
+ return -EIO;
+ }
jffs2_erase_pending_trigger(c);
-
+ }
return 0;
}
__u32 ofs, prevofs;
__u32 hdr_crc, nodetype;
int err;
- int noise = 10;
+ int noise = 0;
ofs = jeb->offset;
prevofs = jeb->offset - 1;
D1(printk(KERN_DEBUG "jffs2_scan_eraseblock(): Scanning block at 0x%x\n", ofs));
+ err = jffs2_scan_empty(c, jeb, &ofs, &noise);
+ if (err) return err;
+ if (ofs == jeb->offset + c->sector_size) {
+ D1(printk(KERN_DEBUG "Block at 0x%08x is empty (erased)\n", jeb->offset));
+ return 1; /* special return code */
+ }
+
+ noise = 10;
+
while(ofs < jeb->offset + c->sector_size) {
ssize_t retlen;
ACCT_PARANOIA_CHECK(jeb);
case JFFS2_FEATURE_ROCOMPAT:
printk(KERN_NOTICE "Read-only compatible feature node (0x%04x) found at offset 0x%08x\n", node.nodetype, ofs);
c->flags |= JFFS2_SB_FLAG_RO;
+ if (!(OFNI_BS_2SFFJ(c)->s_flags & MS_RDONLY))
+ return -EROFS;
DIRTY_SPACE(PAD(node.totlen));
ofs += PAD(node.totlen);
continue;
__u32 scanlen = (jeb->offset + c->sector_size) - *startofs;
__u32 curofs = *startofs;
- buf = kmalloc(min(PAGE_SIZE, scanlen), GFP_KERNEL);
+ buf = kmalloc(min((__u32)PAGE_SIZE, scanlen), GFP_KERNEL);
if (!buf) {
printk(KERN_WARNING "Scan buffer allocation failed\n");
return -ENOMEM;
ssize_t retlen;
int ret, i;
- ret = c->mtd->read(c->mtd, curofs, min(PAGE_SIZE, scanlen), &retlen, (char *)buf);
+ ret = c->mtd->read(c->mtd, curofs, min((__u32)PAGE_SIZE, scanlen), &retlen, (char *)buf);
if(ret) {
- D1(printk(KERN_WARNING "jffs2_scan_empty(): Read 0x%lx bytes at 0x%08x returned %d\n", min(PAGE_SIZE, scanlen), curofs, ret));
+ D1(printk(KERN_WARNING "jffs2_scan_empty(): Read 0x%x bytes at 0x%08x returned %d\n", min((__u32)PAGE_SIZE, scanlen), curofs, ret));
kfree(buf);
return ret;
}
if (crc != rd.name_crc) {
printk(KERN_NOTICE "jffs2_scan_dirent_node(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
*ofs, rd.name_crc, crc);
+ fd->name[rd.nsize]=0;
+ D1(printk(KERN_NOTICE "Name for which CRC failed is (now) '%s', ino #%d\n", fd->name, rd.ino));
jffs2_free_full_dirent(fd);
/* FIXME: Why do we believe totlen? */
DIRTY_SPACE(PAD(rd.totlen));
* provisions above, a recipient may use your version of this file
* under either the RHEPL or the GPL.
*
- * $Id: super.c,v 1.43 2001/05/29 08:59:47 dwmw2 Exp $
+ * $Id: super.c,v 1.48 2001/10/02 09:16:23 dwmw2 Exp $
*
*/
#include <linux/pagemap.h>
#include <linux/mtd/mtd.h>
#include <linux/interrupt.h>
-
#include "nodelist.h"
#ifndef MTD_BLOCK_MAJOR
put_super: jffs2_put_super,
write_super: jffs2_write_super,
statfs: jffs2_statfs,
-// remount_fs: jffs2_remount_fs,
+ remount_fs: jffs2_remount_fs,
clear_inode: jffs2_clear_inode
};
sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_magic = JFFS2_SUPER_MAGIC;
- jffs2_start_garbage_collect_thread(c);
+ if (!(sb->s_flags & MS_RDONLY))
+ jffs2_start_garbage_collect_thread(c);
return sb;
out_root_i:
D2(printk(KERN_DEBUG "jffs2: jffs2_put_super()\n"));
- jffs2_stop_garbage_collect_thread(c);
+ if (!(sb->s_flags & MS_RDONLY))
+ jffs2_stop_garbage_collect_thread(c);
jffs2_free_ino_caches(c);
jffs2_free_raw_node_refs(c);
kfree(c->blocks);
+ if (c->mtd->sync)
+ c->mtd->sync(c->mtd);
put_mtd_device(c->mtd);
D1(printk(KERN_DEBUG "jffs2_put_super returning\n"));
}
+int jffs2_remount_fs (struct super_block *sb, int *flags, char *data)
+{
+ struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
+
+ if (c->flags & JFFS2_SB_FLAG_RO && !(sb->s_flags & MS_RDONLY))
+ return -EROFS;
+
+ /* We stop if it was running, then restart if it needs to.
+ This also catches the case where it was stopped and this
+ is just a remount to restart it */
+ if (!(sb->s_flags & MS_RDONLY))
+ jffs2_stop_garbage_collect_thread(c);
+
+ if (!(*flags & MS_RDONLY))
+ jffs2_start_garbage_collect_thread(c);
+
+ sb->s_flags = (sb->s_flags & ~MS_RDONLY)|(*flags & MS_RDONLY);
+
+ return 0;
+}
+
void jffs2_write_super (struct super_block *sb)
{
struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
sb->s_dirt = 0;
+
+ if (sb->s_flags & MS_RDONLY)
+ return;
+
jffs2_garbage_collect_trigger(c);
jffs2_erase_pending_blocks(c);
jffs2_mark_erased_blocks(c);
module_init(init_jffs2_fs);
module_exit(exit_jffs2_fs);
+
+MODULE_DESCRIPTION("The Journalling Flash File System, v2");
+MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_LICENSE("GPL"); // Actually dual-licensed, but it doesn't matter for
+ // the sake of this tag. It's Free Software.
* provisions above, a recipient may use your version of this file
* under either the RHEPL or the GPL.
*
- * $Id: write.c,v 1.27 2001/04/11 15:29:34 dwmw2 Exp $
+ * $Id: write.c,v 1.28 2001/05/01 16:25:25 dwmw2 Exp $
*
*/
struct iovec vecs[2];
int ret;
- D1(printk(KERN_DEBUG "jffs2_write_dirent(ino #%u, name \"%s\"->ino #%u\n", rd->pino, name, rd->ino));
+ D1(printk(KERN_DEBUG "jffs2_write_dirent(ino #%u, name at *0x%p \"%s\"->ino #%u, name_crc 0x%08x)\n", rd->pino, name, name, rd->ino, rd->name_crc));
writecheck(c->mtd, flash_ofs);
D1(if(rd->hdr_crc != crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)) {
--- /dev/null
+/*
+ * linux/fs/namespace.c
+ *
+ * (C) Copyright Al Viro 2000, 2001
+ * Released under GPL v2.
+ *
+ * Based on code from fs/super.c, copyright Linus Torvalds and others.
+ * Heavily rewritten.
+ */
+
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+#include <linux/init.h>
+#include <linux/quotaops.h>
+#include <linux/acct.h>
+#include <linux/module.h>
+#include <linux/devfs_fs_kernel.h>
+
+#include <asm/uaccess.h>
+
+#include <linux/nfs_fs.h>
+#include <linux/nfs_fs_sb.h>
+#include <linux/nfs_mount.h>
+
+struct vfsmount *do_kern_mount(char *type, int flags, char *name, void *data);
+int do_remount_sb(struct super_block *sb, int flags, char * data);
+
+static struct list_head *mount_hashtable;
+static int hash_mask, hash_bits;
+static kmem_cache_t *mnt_cache;
+
+static LIST_HEAD(vfsmntlist);
+static DECLARE_MUTEX(mount_sem);
+
+/* Will be static */
+struct vfsmount *root_vfsmnt;
+
+static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
+{
+ unsigned long tmp = ((unsigned long) mnt / L1_CACHE_BYTES);
+ tmp += ((unsigned long) dentry / L1_CACHE_BYTES);
+ tmp = tmp + (tmp >> hash_bits);
+ return tmp & hash_mask;
+}
+
+struct vfsmount *alloc_vfsmnt(void)
+{
+ struct vfsmount *mnt = kmem_cache_alloc(mnt_cache, GFP_KERNEL);
+ if (mnt) {
+ memset(mnt, 0, sizeof(struct vfsmount));
+ atomic_set(&mnt->mnt_count,1);
+ INIT_LIST_HEAD(&mnt->mnt_hash);
+ INIT_LIST_HEAD(&mnt->mnt_child);
+ INIT_LIST_HEAD(&mnt->mnt_mounts);
+ INIT_LIST_HEAD(&mnt->mnt_list);
+ mnt->mnt_owner = current->uid;
+ }
+ return mnt;
+}
+
+void free_vfsmnt(struct vfsmount *mnt)
+{
+ if (mnt->mnt_devname)
+ kfree(mnt->mnt_devname);
+ kmem_cache_free(mnt_cache, mnt);
+}
+
+struct vfsmount *lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
+{
+ struct list_head * head = mount_hashtable + hash(mnt, dentry);
+ struct list_head * tmp = head;
+ struct vfsmount *p;
+
+ for (;;) {
+ tmp = tmp->next;
+ p = NULL;
+ if (tmp == head)
+ break;
+ p = list_entry(tmp, struct vfsmount, mnt_hash);
+ if (p->mnt_parent == mnt && p->mnt_mountpoint == dentry)
+ break;
+ }
+ return p;
+}
+
+static int check_mnt(struct vfsmount *mnt)
+{
+ spin_lock(&dcache_lock);
+ while (mnt->mnt_parent != mnt)
+ mnt = mnt->mnt_parent;
+ spin_unlock(&dcache_lock);
+ return mnt == root_vfsmnt;
+}
+
+static void detach_mnt(struct vfsmount *mnt, struct nameidata *old_nd)
+{
+ old_nd->dentry = mnt->mnt_mountpoint;
+ old_nd->mnt = mnt->mnt_parent;
+ mnt->mnt_parent = mnt;
+ mnt->mnt_mountpoint = mnt->mnt_root;
+ list_del_init(&mnt->mnt_child);
+ list_del_init(&mnt->mnt_hash);
+ old_nd->dentry->d_mounted--;
+}
+
+static void attach_mnt(struct vfsmount *mnt, struct nameidata *nd)
+{
+ mnt->mnt_parent = mntget(nd->mnt);
+ mnt->mnt_mountpoint = dget(nd->dentry);
+ list_add(&mnt->mnt_hash, mount_hashtable+hash(nd->mnt, nd->dentry));
+ list_add(&mnt->mnt_child, &nd->mnt->mnt_mounts);
+ nd->dentry->d_mounted++;
+}
+
+static struct vfsmount *next_mnt(struct vfsmount *p, struct vfsmount *root)
+{
+ struct list_head *next = p->mnt_mounts.next;
+ if (next == &p->mnt_mounts) {
+ while (1) {
+ if (p == root)
+ return NULL;
+ next = p->mnt_child.next;
+ if (next != &p->mnt_parent->mnt_mounts)
+ break;
+ p = p->mnt_parent;
+ }
+ }
+ return list_entry(next, struct vfsmount, mnt_child);
+}
+
+/* Use octal escapes, like mount does, for embedded spaces etc. */
+static unsigned char need_escaping[] = { ' ', '\t', '\n', '\\' };
+
+static int
+mangle(const unsigned char *s, char *buf, int len) {
+ char *sp;
+ int n;
+
+ sp = buf;
+ while(*s && sp-buf < len-3) {
+ for (n = 0; n < sizeof(need_escaping); n++) {
+ if (*s == need_escaping[n]) {
+ *sp++ = '\\';
+ *sp++ = '0' + ((*s & 0300) >> 6);
+ *sp++ = '0' + ((*s & 070) >> 3);
+ *sp++ = '0' + (*s & 07);
+ goto next;
+ }
+ }
+ *sp++ = *s;
+ next:
+ s++;
+ }
+ return sp - buf; /* no trailing NUL */
+}
+
+static struct proc_fs_info {
+ int flag;
+ char *str;
+} fs_info[] = {
+ { MS_SYNCHRONOUS, ",sync" },
+ { MS_MANDLOCK, ",mand" },
+ { MS_NOATIME, ",noatime" },
+ { MS_NODIRATIME, ",nodiratime" },
+ { 0, NULL }
+};
+
+static struct proc_fs_info mnt_info[] = {
+ { MNT_NOSUID, ",nosuid" },
+ { MNT_NODEV, ",nodev" },
+ { MNT_NOEXEC, ",noexec" },
+ { 0, NULL }
+};
+
+static struct proc_nfs_info {
+ int flag;
+ char *str;
+ char *nostr;
+} nfs_info[] = {
+ { NFS_MOUNT_SOFT, ",soft", ",hard" },
+ { NFS_MOUNT_INTR, ",intr", "" },
+ { NFS_MOUNT_POSIX, ",posix", "" },
+ { NFS_MOUNT_TCP, ",tcp", ",udp" },
+ { NFS_MOUNT_NOCTO, ",nocto", "" },
+ { NFS_MOUNT_NOAC, ",noac", "" },
+ { NFS_MOUNT_NONLM, ",nolock", ",lock" },
+ { NFS_MOUNT_BROKEN_SUID, ",broken_suid", "" },
+ { 0, NULL, NULL }
+};
+
+static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root)
+{
+ char *name = old->mnt_devname;
+ struct vfsmount *mnt = alloc_vfsmnt();
+ struct super_block *sb = old->mnt_sb;
+
+ if (!mnt)
+ goto out;
+
+ if (name) {
+ mnt->mnt_devname = kmalloc(strlen(name)+1, GFP_KERNEL);
+ if (mnt->mnt_devname)
+ strcpy(mnt->mnt_devname, name);
+ }
+ mnt->mnt_sb = sb;
+ mnt->mnt_root = dget(root);
+ mnt->mnt_mountpoint = mnt->mnt_root;
+ mnt->mnt_parent = mnt;
+ mnt->mnt_flags = old->mnt_flags;
+
+ atomic_inc(&sb->s_active);
+out:
+ return mnt;
+}
+
+int get_filesystem_info( char *buf )
+{
+ struct list_head *p;
+ struct proc_fs_info *fs_infop;
+ struct proc_nfs_info *nfs_infop;
+ struct nfs_server *nfss;
+ int len, prevlen;
+ char *path, *buffer = (char *) __get_free_page(GFP_KERNEL);
+
+ if (!buffer) return 0;
+ len = prevlen = 0;
+
+#define FREEROOM ((int)PAGE_SIZE-200-len)
+#define MANGLE(s) len += mangle((s), buf+len, FREEROOM);
+
+ for (p = vfsmntlist.next; p != &vfsmntlist; p = p->next) {
+ struct vfsmount *tmp = list_entry(p, struct vfsmount, mnt_list);
+ path = d_path(tmp->mnt_root, tmp, buffer, PAGE_SIZE);
+ if (!path)
+ continue;
+ MANGLE(tmp->mnt_devname ? tmp->mnt_devname : "none");
+ buf[len++] = ' ';
+ MANGLE(path);
+ buf[len++] = ' ';
+ MANGLE(tmp->mnt_sb->s_type->name);
+ len += sprintf(buf+len, " %s",
+ tmp->mnt_sb->s_flags & MS_RDONLY ? "ro" : "rw");
+ for (fs_infop = fs_info; fs_infop->flag; fs_infop++) {
+ if (tmp->mnt_sb->s_flags & fs_infop->flag)
+ MANGLE(fs_infop->str);
+ }
+ for (fs_infop = mnt_info; fs_infop->flag; fs_infop++) {
+ if (tmp->mnt_flags & fs_infop->flag)
+ MANGLE(fs_infop->str);
+ }
+ if (!strcmp("nfs", tmp->mnt_sb->s_type->name)) {
+ nfss = &tmp->mnt_sb->u.nfs_sb.s_server;
+ len += sprintf(buf+len, ",v%d", nfss->rpc_ops->version);
+
+ len += sprintf(buf+len, ",rsize=%d", nfss->rsize);
+
+ len += sprintf(buf+len, ",wsize=%d", nfss->wsize);
+#if 0
+ if (nfss->timeo != 7*HZ/10) {
+ len += sprintf(buf+len, ",timeo=%d",
+ nfss->timeo*10/HZ);
+ }
+ if (nfss->retrans != 3) {
+ len += sprintf(buf+len, ",retrans=%d",
+ nfss->retrans);
+ }
+#endif
+ if (nfss->acregmin != 3*HZ) {
+ len += sprintf(buf+len, ",acregmin=%d",
+ nfss->acregmin/HZ);
+ }
+ if (nfss->acregmax != 60*HZ) {
+ len += sprintf(buf+len, ",acregmax=%d",
+ nfss->acregmax/HZ);
+ }
+ if (nfss->acdirmin != 30*HZ) {
+ len += sprintf(buf+len, ",acdirmin=%d",
+ nfss->acdirmin/HZ);
+ }
+ if (nfss->acdirmax != 60*HZ) {
+ len += sprintf(buf+len, ",acdirmax=%d",
+ nfss->acdirmax/HZ);
+ }
+ for (nfs_infop = nfs_info; nfs_infop->flag; nfs_infop++) {
+ char *str;
+ if (nfss->flags & nfs_infop->flag)
+ str = nfs_infop->str;
+ else
+ str = nfs_infop->nostr;
+ MANGLE(str);
+ }
+ len += sprintf(buf+len, ",addr=");
+ MANGLE(nfss->hostname);
+ }
+ len += sprintf(buf + len, " 0 0\n");
+ if (FREEROOM <= 3) {
+ len = prevlen;
+ len += sprintf(buf+len, "# truncated\n");
+ break;
+ }
+ prevlen = len;
+ }
+
+ free_page((unsigned long) buffer);
+ return len;
+#undef MANGLE
+#undef FREEROOM
+}
+
+/*
+ * Doesn't take quota and stuff into account. IOW, in some cases it will
+ * give false negatives. The main reason why it's here is that we need
+ * a non-destructive way to look for easily umountable filesystems.
+ */
+int may_umount(struct vfsmount *mnt)
+{
+ if (atomic_read(&mnt->mnt_count) > 2)
+ return -EBUSY;
+ return 0;
+}
+
+void umount_tree(struct vfsmount *mnt)
+{
+ struct vfsmount *p;
+ LIST_HEAD(kill);
+
+ if (list_empty(&mnt->mnt_list))
+ return;
+
+ for (p = mnt; p; p = next_mnt(p, mnt)) {
+ list_del(&p->mnt_list);
+ list_add(&p->mnt_list, &kill);
+ }
+
+ while (!list_empty(&kill)) {
+ mnt = list_entry(kill.next, struct vfsmount, mnt_list);
+ list_del_init(&mnt->mnt_list);
+ if (mnt->mnt_parent == mnt) {
+ spin_unlock(&dcache_lock);
+ } else {
+ struct nameidata old_nd;
+ detach_mnt(mnt, &old_nd);
+ spin_unlock(&dcache_lock);
+ path_release(&old_nd);
+ }
+ mntput(mnt);
+ spin_lock(&dcache_lock);
+ }
+}
+
+static int do_umount(struct vfsmount *mnt, int flags)
+{
+ struct super_block * sb = mnt->mnt_sb;
+ int retval = 0;
+
+ /*
+ * If we may have to abort operations to get out of this
+ * mount, and they will themselves hold resources we must
+ * allow the fs to do things. In the Unix tradition of
+ * 'Gee thats tricky lets do it in userspace' the umount_begin
+ * might fail to complete on the first run through as other tasks
+ * must return, and the like. Thats for the mount program to worry
+ * about for the moment.
+ */
+
+ lock_kernel();
+ if( (flags&MNT_FORCE) && sb->s_op->umount_begin)
+ sb->s_op->umount_begin(sb);
+ unlock_kernel();
+
+ /*
+ * No sense to grab the lock for this test, but test itself looks
+ * somewhat bogus. Suggestions for better replacement?
+ * Ho-hum... In principle, we might treat that as umount + switch
+ * to rootfs. GC would eventually take care of the old vfsmount.
+ * The problem being: we have to implement rootfs and GC for that ;-)
+ * Actually it makes sense, especially if rootfs would contain a
+ * /reboot - static binary that would close all descriptors and
+ * call reboot(9). Then init(8) could umount root and exec /reboot.
+ */
+ if (mnt == current->fs->rootmnt && !(flags & MNT_DETACH)) {
+ int retval = 0;
+ /*
+ * Special case for "unmounting" root ...
+ * we just try to remount it readonly.
+ */
+ if (!(sb->s_flags & MS_RDONLY)) {
+ down_write(&sb->s_umount);
+ lock_kernel();
+ retval = do_remount_sb(sb, MS_RDONLY, 0);
+ unlock_kernel();
+ up_write(&sb->s_umount);
+ }
+ return retval;
+ }
+
+ down(&mount_sem);
+ spin_lock(&dcache_lock);
+
+ if (atomic_read(&sb->s_active) == 1) {
+ /* last instance - try to be smart */
+ spin_unlock(&dcache_lock);
+ lock_kernel();
+ DQUOT_OFF(sb);
+ acct_auto_close(sb->s_dev);
+ unlock_kernel();
+ spin_lock(&dcache_lock);
+ }
+ retval = -EBUSY;
+ if (atomic_read(&mnt->mnt_count) == 2 || flags & MNT_DETACH) {
+ umount_tree(mnt);
+ retval = 0;
+ }
+ spin_unlock(&dcache_lock);
+ up(&mount_sem);
+ return retval;
+}
+
+/*
+ * Now umount can handle mount points as well as block devices.
+ * This is important for filesystems which use unnamed block devices.
+ *
+ * We now support a flag for forced unmount like the other 'big iron'
+ * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
+ */
+
+asmlinkage long sys_umount(char * name, int flags)
+{
+ struct nameidata nd;
+ char *kname;
+ int retval;
+
+ kname = getname(name);
+ retval = PTR_ERR(kname);
+ if (IS_ERR(kname))
+ goto out;
+ retval = 0;
+ if (path_init(kname, LOOKUP_POSITIVE|LOOKUP_FOLLOW, &nd))
+ retval = path_walk(kname, &nd);
+ putname(kname);
+ if (retval)
+ goto out;
+ retval = -EINVAL;
+ if (nd.dentry != nd.mnt->mnt_root)
+ goto dput_and_out;
+ if (!check_mnt(nd.mnt))
+ goto dput_and_out;
+
+ retval = -EPERM;
+ if (!capable(CAP_SYS_ADMIN) && current->uid!=nd.mnt->mnt_owner)
+ goto dput_and_out;
+
+ retval = do_umount(nd.mnt, flags);
+ path_release(&nd);
+ goto out;
+dput_and_out:
+ path_release(&nd);
+out:
+ return retval;
+}
+
+/*
+ * The 2.0 compatible umount. No flags.
+ */
+
+asmlinkage long sys_oldumount(char * name)
+{
+ return sys_umount(name,0);
+}
+
+static int mount_is_safe(struct nameidata *nd)
+{
+ if (capable(CAP_SYS_ADMIN))
+ return 0;
+ return -EPERM;
+#ifdef notyet
+ if (S_ISLNK(nd->dentry->d_inode->i_mode))
+ return -EPERM;
+ if (nd->dentry->d_inode->i_mode & S_ISVTX) {
+ if (current->uid != nd->dentry->d_inode->i_uid)
+ return -EPERM;
+ }
+ if (permission(nd->dentry->d_inode, MAY_WRITE))
+ return -EPERM;
+ return 0;
+#endif
+}
+
+/* Will become static */
+int graft_tree(struct vfsmount *mnt, struct nameidata *nd)
+{
+ if (mnt->mnt_sb->s_flags & MS_NOUSER)
+ return -EINVAL;
+
+ if (S_ISDIR(nd->dentry->d_inode->i_mode) !=
+ S_ISDIR(mnt->mnt_root->d_inode->i_mode))
+ return -ENOTDIR;
+
+ down(&nd->dentry->d_inode->i_zombie);
+ if (IS_DEADDIR(nd->dentry->d_inode))
+ goto fail1;
+
+ spin_lock(&dcache_lock);
+ if (!IS_ROOT(nd->dentry) && d_unhashed(nd->dentry))
+ goto fail;
+
+ attach_mnt(mnt, nd);
+ list_add(&mnt->mnt_list, vfsmntlist.prev);
+ spin_unlock(&dcache_lock);
+ up(&nd->dentry->d_inode->i_zombie);
+ mntget(mnt);
+ return 0;
+fail:
+ spin_unlock(&dcache_lock);
+fail1:
+ up(&nd->dentry->d_inode->i_zombie);
+ return -ENOENT;
+}
+
+/*
+ * do loopback mount.
+ */
+static int do_loopback(struct nameidata *nd, char *old_name)
+{
+ struct nameidata old_nd;
+ struct vfsmount *mnt = NULL;
+ int err;
+
+ err = mount_is_safe(nd);
+ if (err)
+ return err;
+
+ if (!old_name || !*old_name)
+ return -EINVAL;
+
+ if (path_init(old_name, LOOKUP_POSITIVE|LOOKUP_FOLLOW, &old_nd))
+ err = path_walk(old_name, &old_nd);
+ if (err)
+ return err;
+
+ down(&mount_sem);
+ err = -EINVAL;
+ if (check_mnt(nd->mnt)) {
+ err = -ENOMEM;
+ mnt = clone_mnt(old_nd.mnt, old_nd.dentry);
+ }
+ if (mnt) {
+ err = graft_tree(mnt, nd);
+ mntput(mnt);
+ }
+
+ up(&mount_sem);
+ path_release(&old_nd);
+ return err;
+}
+
+/*
+ * change filesystem flags. dir should be a physical root of filesystem.
+ * on it - tough luck.
+ */
+
+static int do_remount(struct nameidata *nd,int flags,int mnt_flags,char *data)
+{
+ int err;
+ struct super_block * sb = nd->mnt->mnt_sb;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (!check_mnt(nd->mnt))
+ return -EINVAL;
+
+ if (nd->dentry != nd->mnt->mnt_root)
+ return -EINVAL;
+
+ down_write(&sb->s_umount);
+ err = do_remount_sb(sb, flags, data);
+ if (!err)
+ nd->mnt->mnt_flags=mnt_flags;
+ up_write(&sb->s_umount);
+ return err;
+}
+
+static int do_add_mount(struct nameidata *nd, char *type, int flags,
+ int mnt_flags, char *name, void *data)
+{
+ struct vfsmount *mnt = do_kern_mount(type, flags, name, data);
+ int err = PTR_ERR(mnt);
+
+ if (IS_ERR(mnt))
+ goto out;
+
+ down(&mount_sem);
+ /* Something was mounted here while we slept */
+ while(d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry))
+ ;
+ err = -EINVAL;
+ if (!check_mnt(nd->mnt))
+ goto unlock;
+
+ /* Refuse the same filesystem on the same mount point */
+ err = -EBUSY;
+ if (nd->mnt->mnt_sb == mnt->mnt_sb && nd->mnt->mnt_root == nd->dentry)
+ goto unlock;
+
+ mnt->mnt_flags = mnt_flags;
+ err = graft_tree(mnt, nd);
+unlock:
+ up(&mount_sem);
+ mntput(mnt);
+out:
+ return err;
+}
+
+static int copy_mount_options (const void *data, unsigned long *where)
+{
+ int i;
+ unsigned long page;
+ unsigned long size;
+
+ *where = 0;
+ if (!data)
+ return 0;
+
+ if (!(page = __get_free_page(GFP_KERNEL)))
+ return -ENOMEM;
+
+ /* We only care that *some* data at the address the user
+ * gave us is valid. Just in case, we'll zero
+ * the remainder of the page.
+ */
+ /* copy_from_user cannot cross TASK_SIZE ! */
+ size = TASK_SIZE - (unsigned long)data;
+ if (size > PAGE_SIZE)
+ size = PAGE_SIZE;
+
+ i = size - copy_from_user((void *)page, data, size);
+ if (!i) {
+ free_page(page);
+ return -EFAULT;
+ }
+ if (i != PAGE_SIZE)
+ memset((char *)page + i, 0, PAGE_SIZE - i);
+ *where = page;
+ return 0;
+}
+
+/*
+ * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
+ * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
+ *
+ * data is a (void *) that can point to any structure up to
+ * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
+ * information (or be NULL).
+ *
+ * Pre-0.97 versions of mount() didn't have a flags word.
+ * When the flags word was introduced its top half was required
+ * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
+ * Therefore, if this magic number is present, it carries no information
+ * and must be discarded.
+ */
+long do_mount(char * dev_name, char * dir_name, char *type_page,
+ unsigned long flags, void *data_page)
+{
+ struct nameidata nd;
+ int retval = 0;
+ int mnt_flags = 0;
+
+ /* Discard magic */
+ if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
+ flags &= ~MS_MGC_MSK;
+
+ /* Basic sanity checks */
+
+ if (!dir_name || !*dir_name || !memchr(dir_name, 0, PAGE_SIZE))
+ return -EINVAL;
+ if (dev_name && !memchr(dev_name, 0, PAGE_SIZE))
+ return -EINVAL;
+
+ /* Separate the per-mountpoint flags */
+ if (flags & MS_NOSUID)
+ mnt_flags |= MNT_NOSUID;
+ if (flags & MS_NODEV)
+ mnt_flags |= MNT_NODEV;
+ if (flags & MS_NOEXEC)
+ mnt_flags |= MNT_NOEXEC;
+ flags &= ~(MS_NOSUID|MS_NOEXEC|MS_NODEV);
+
+ /* ... and get the mountpoint */
+ if (path_init(dir_name, LOOKUP_FOLLOW|LOOKUP_POSITIVE, &nd))
+ retval = path_walk(dir_name, &nd);
+ if (retval)
+ return retval;
+
+ if (flags & MS_REMOUNT)
+ retval = do_remount(&nd, flags&~MS_REMOUNT, mnt_flags,
+ (char *)data_page);
+ else if (flags & MS_BIND)
+ retval = do_loopback(&nd, dev_name);
+ else
+ retval = do_add_mount(&nd, type_page, flags, mnt_flags,
+ dev_name, data_page);
+ path_release(&nd);
+ return retval;
+}
+
+asmlinkage long sys_mount(char * dev_name, char * dir_name, char * type,
+ unsigned long flags, void * data)
+{
+ int retval;
+ unsigned long data_page;
+ unsigned long type_page;
+ unsigned long dev_page;
+ char *dir_page;
+
+ retval = copy_mount_options (type, &type_page);
+ if (retval < 0)
+ return retval;
+
+ dir_page = getname(dir_name);
+ retval = PTR_ERR(dir_page);
+ if (IS_ERR(dir_page))
+ goto out1;
+
+ retval = copy_mount_options (dev_name, &dev_page);
+ if (retval < 0)
+ goto out2;
+
+ retval = copy_mount_options (data, &data_page);
+ if (retval < 0)
+ goto out3;
+
+ lock_kernel();
+ retval = do_mount((char*)dev_page, dir_page, (char*)type_page,
+ flags, (void*)data_page);
+ unlock_kernel();
+ free_page(data_page);
+
+out3:
+ free_page(dev_page);
+out2:
+ putname(dir_page);
+out1:
+ free_page(type_page);
+ return retval;
+}
+
+static void chroot_fs_refs(struct nameidata *old_nd, struct nameidata *new_nd)
+{
+ struct task_struct *p;
+ struct fs_struct *fs;
+
+ read_lock(&tasklist_lock);
+ for_each_task(p) {
+ task_lock(p);
+ fs = p->fs;
+ if (fs) {
+ atomic_inc(&fs->count);
+ task_unlock(p);
+ if (fs->root==old_nd->dentry&&fs->rootmnt==old_nd->mnt)
+ set_fs_root(fs, new_nd->mnt, new_nd->dentry);
+ if (fs->pwd==old_nd->dentry&&fs->pwdmnt==old_nd->mnt)
+ set_fs_pwd(fs, new_nd->mnt, new_nd->dentry);
+ put_fs_struct(fs);
+ } else
+ task_unlock(p);
+ }
+ read_unlock(&tasklist_lock);
+}
+
+/*
+ * Moves the current root to put_root, and sets root/cwd of all processes
+ * which had them on the old root to new_root.
+ *
+ * Note:
+ * - we don't move root/cwd if they are not at the root (reason: if something
+ * cared enough to change them, it's probably wrong to force them elsewhere)
+ * - it's okay to pick a root that isn't the root of a file system, e.g.
+ * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
+ * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
+ * first.
+ */
+
+asmlinkage long sys_pivot_root(const char *new_root, const char *put_old)
+{
+ struct vfsmount *tmp;
+ struct nameidata new_nd, old_nd, parent_nd, root_parent, user_nd;
+ char *name;
+ int error;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ lock_kernel();
+
+ name = getname(new_root);
+ error = PTR_ERR(name);
+ if (IS_ERR(name))
+ goto out0;
+ error = 0;
+ if (path_init(name, LOOKUP_POSITIVE|LOOKUP_FOLLOW|LOOKUP_DIRECTORY, &new_nd))
+ error = path_walk(name, &new_nd);
+ putname(name);
+ if (error)
+ goto out0;
+ error = -EINVAL;
+ if (!check_mnt(new_nd.mnt))
+ goto out1;
+
+ name = getname(put_old);
+ error = PTR_ERR(name);
+ if (IS_ERR(name))
+ goto out1;
+ error = 0;
+ if (path_init(name, LOOKUP_POSITIVE|LOOKUP_FOLLOW|LOOKUP_DIRECTORY, &old_nd))
+ error = path_walk(name, &old_nd);
+ putname(name);
+ if (error)
+ goto out1;
+
+ read_lock(¤t->fs->lock);
+ user_nd.mnt = mntget(current->fs->rootmnt);
+ user_nd.dentry = dget(current->fs->root);
+ read_unlock(¤t->fs->lock);
+ down(&mount_sem);
+ down(&old_nd.dentry->d_inode->i_zombie);
+ error = -EINVAL;
+ if (!check_mnt(user_nd.mnt))
+ goto out2;
+ error = -ENOENT;
+ if (IS_DEADDIR(new_nd.dentry->d_inode))
+ goto out2;
+ if (d_unhashed(new_nd.dentry) && !IS_ROOT(new_nd.dentry))
+ goto out2;
+ if (d_unhashed(old_nd.dentry) && !IS_ROOT(old_nd.dentry))
+ goto out2;
+ error = -EBUSY;
+ if (new_nd.mnt == user_nd.mnt || old_nd.mnt == user_nd.mnt)
+ goto out2; /* loop */
+ error = -EINVAL;
+ if (user_nd.mnt->mnt_root != user_nd.dentry)
+ goto out2;
+ if (new_nd.mnt->mnt_root != new_nd.dentry)
+ goto out2; /* not a mountpoint */
+ tmp = old_nd.mnt; /* make sure we can reach put_old from new_root */
+ spin_lock(&dcache_lock);
+ if (tmp != new_nd.mnt) {
+ for (;;) {
+ if (tmp->mnt_parent == tmp)
+ goto out3;
+ if (tmp->mnt_parent == new_nd.mnt)
+ break;
+ tmp = tmp->mnt_parent;
+ }
+ if (!is_subdir(tmp->mnt_mountpoint, new_nd.dentry))
+ goto out3;
+ } else if (!is_subdir(old_nd.dentry, new_nd.dentry))
+ goto out3;
+ detach_mnt(new_nd.mnt, &parent_nd);
+ detach_mnt(user_nd.mnt, &root_parent);
+ attach_mnt(user_nd.mnt, &old_nd);
+ attach_mnt(new_nd.mnt, &root_parent);
+ spin_unlock(&dcache_lock);
+ chroot_fs_refs(&user_nd, &new_nd);
+ error = 0;
+ path_release(&root_parent);
+ path_release(&parent_nd);
+out2:
+ up(&old_nd.dentry->d_inode->i_zombie);
+ up(&mount_sem);
+ path_release(&user_nd);
+ path_release(&old_nd);
+out1:
+ path_release(&new_nd);
+out0:
+ unlock_kernel();
+ return error;
+out3:
+ spin_unlock(&dcache_lock);
+ goto out2;
+}
+
+/*
+ * Absolutely minimal fake fs - only empty root directory and nothing else.
+ * In 2.5 we'll use ramfs or tmpfs, but for now it's all we need - just
+ * something to go with root vfsmount.
+ */
+static struct dentry *rootfs_lookup(struct inode *dir, struct dentry *dentry)
+{
+ d_add(dentry, NULL);
+ return NULL;
+}
+static struct file_operations rootfs_dir_operations = {
+ read: generic_read_dir,
+ readdir: dcache_readdir,
+};
+static struct inode_operations rootfs_dir_inode_operations = {
+ lookup: rootfs_lookup,
+};
+static struct super_block *rootfs_read_super(struct super_block * sb, void * data, int silent)
+{
+ struct inode * inode;
+ struct dentry * root;
+ static struct super_operations s_ops = {};
+ sb->s_op = &s_ops;
+ inode = new_inode(sb);
+ if (!inode)
+ return NULL;
+ inode->i_mode = S_IFDIR|0555;
+ inode->i_uid = inode->i_gid = 0;
+ inode->i_op = &rootfs_dir_inode_operations;
+ inode->i_fop = &rootfs_dir_operations;
+ root = d_alloc_root(inode);
+ if (!root) {
+ iput(inode);
+ return NULL;
+ }
+ sb->s_root = root;
+ return sb;
+}
+static DECLARE_FSTYPE(root_fs_type, "rootfs", rootfs_read_super, FS_NOMOUNT);
+
+static void __init init_mount_tree(void)
+{
+ register_filesystem(&root_fs_type);
+ root_vfsmnt = do_kern_mount("rootfs", 0, "rootfs", NULL);
+ if (IS_ERR(root_vfsmnt))
+ panic("can't allocate root vfsmount");
+}
+
+void __init mnt_init(unsigned long mempages)
+{
+ struct list_head *d;
+ unsigned long order;
+ unsigned int nr_hash;
+ int i;
+
+ mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount),
+ 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+ if (!mnt_cache)
+ panic("Cannot create vfsmount cache");
+
+ mempages >>= (16 - PAGE_SHIFT);
+ mempages *= sizeof(struct list_head);
+ for (order = 0; ((1UL << order) << PAGE_SHIFT) < mempages; order++)
+ ;
+
+ do {
+ mount_hashtable = (struct list_head *)
+ __get_free_pages(GFP_ATOMIC, order);
+ } while (mount_hashtable == NULL && --order >= 0);
+
+ if (!mount_hashtable)
+ panic("Failed to allocate mount hash table\n");
+
+ /*
+ * Find the power-of-two list-heads that can fit into the allocation..
+ * We don't guarantee that "sizeof(struct list_head)" is necessarily
+ * a power-of-two.
+ */
+ nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct list_head);
+ hash_bits = 0;
+ do {
+ hash_bits++;
+ } while ((nr_hash >> hash_bits) != 0);
+ hash_bits--;
+
+ /*
+ * Re-calculate the actual number of entries and the mask
+ * from the number of bits we can fit.
+ */
+ nr_hash = 1UL << hash_bits;
+ hash_mask = nr_hash-1;
+
+ printk("Mount-cache hash table entries: %d (order: %ld, %ld bytes)\n",
+ nr_hash, order, (PAGE_SIZE << order));
+
+ /* And initialize the newly allocated array */
+ d = mount_hashtable;
+ i = nr_hash;
+ do {
+ INIT_LIST_HEAD(d);
+ d++;
+ i--;
+ } while (i);
+ init_mount_tree();
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+
+int __init change_root(kdev_t new_root_dev,const char *put_old)
+{
+ struct vfsmount *old_rootmnt;
+ struct nameidata devfs_nd, nd;
+ struct nameidata parent_nd;
+ char *new_devname = kmalloc(strlen("/dev/root.old")+1, GFP_KERNEL);
+ int error = 0;
+
+ if (new_devname)
+ strcpy(new_devname, "/dev/root.old");
+
+ read_lock(¤t->fs->lock);
+ old_rootmnt = mntget(current->fs->rootmnt);
+ read_unlock(¤t->fs->lock);
+ /* First unmount devfs if mounted */
+ if (path_init("/dev", LOOKUP_FOLLOW|LOOKUP_POSITIVE, &devfs_nd))
+ error = path_walk("/dev", &devfs_nd);
+ if (!error) {
+ if (devfs_nd.mnt->mnt_sb->s_magic == DEVFS_SUPER_MAGIC &&
+ devfs_nd.dentry == devfs_nd.mnt->mnt_root) {
+ do_umount(devfs_nd.mnt, 0);
+ }
+ path_release(&devfs_nd);
+ }
+ spin_lock(&dcache_lock);
+ detach_mnt(old_rootmnt, &parent_nd);
+ spin_unlock(&dcache_lock);
+ ROOT_DEV = new_root_dev;
+ mount_root();
+#if 1
+ shrink_dcache();
+ printk("change_root: old root has d_count=%d\n",
+ atomic_read(&old_rootmnt->mnt_root->d_count));
+#endif
+ mount_devfs_fs ();
+ /*
+ * Get the new mount directory
+ */
+ error = 0;
+ if (path_init(put_old, LOOKUP_FOLLOW|LOOKUP_POSITIVE|LOOKUP_DIRECTORY, &nd))
+ error = path_walk(put_old, &nd);
+ if (error) {
+ int blivet;
+ struct block_device *ramdisk = old_rootmnt->mnt_sb->s_bdev;
+
+ atomic_inc(&ramdisk->bd_count);
+ blivet = blkdev_get(ramdisk, FMODE_READ, 0, BDEV_FS);
+ printk(KERN_NOTICE "Trying to unmount old root ... ");
+ if (!blivet) {
+ spin_lock(&dcache_lock);
+ list_del(&old_rootmnt->mnt_list);
+ spin_unlock(&dcache_lock);
+ mntput(old_rootmnt);
+ mntput(old_rootmnt);
+ blivet = ioctl_by_bdev(ramdisk, BLKFLSBUF, 0);
+ path_release(&parent_nd);
+ blkdev_put(ramdisk, BDEV_FS);
+ }
+ if (blivet) {
+ printk(KERN_ERR "error %d\n", blivet);
+ } else {
+ printk("okay\n");
+ error = 0;
+ }
+ kfree(new_devname);
+ return error;
+ }
+
+ spin_lock(&dcache_lock);
+ attach_mnt(old_rootmnt, &nd);
+ if (new_devname) {
+ if (old_rootmnt->mnt_devname)
+ kfree(old_rootmnt->mnt_devname);
+ old_rootmnt->mnt_devname = new_devname;
+ }
+ spin_unlock(&dcache_lock);
+
+ /* put the old stuff */
+ path_release(&parent_nd);
+ mntput(old_rootmnt);
+ path_release(&nd);
+ return 0;
+}
+
+#endif
#define __NO_VERSION__
#include <linux/module.h>
-/*
- * We use a semaphore to synchronize all mount/umount
- * activity - imagine the mess if we have a race between
- * unmounting a filesystem and re-mounting it (or something
- * else).
- */
-static DECLARE_MUTEX(mount_sem);
-
extern void wait_for_keypress(void);
extern int root_mountflags;
-static int do_remount_sb(struct super_block *sb, int flags, char * data);
+int do_remount_sb(struct super_block *sb, int flags, char * data);
/* this is initialized in init/main.c */
kdev_t ROOT_DEV;
return fs;
}
-static LIST_HEAD(vfsmntlist);
-static struct vfsmount *root_vfsmnt;
-
-static struct list_head *mount_hashtable;
-static int hash_mask, hash_bits;
-static kmem_cache_t *mnt_cache;
-
-static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
-{
- unsigned long tmp = ((unsigned long) mnt / L1_CACHE_BYTES);
- tmp += ((unsigned long) dentry / L1_CACHE_BYTES);
- tmp = tmp + (tmp >> hash_bits);
- return tmp & hash_mask;
-}
-
-struct vfsmount *alloc_vfsmnt(void)
-{
- struct vfsmount *mnt = kmem_cache_alloc(mnt_cache, GFP_KERNEL);
- if (mnt) {
- memset(mnt, 0, sizeof(struct vfsmount));
- atomic_set(&mnt->mnt_count,1);
- INIT_LIST_HEAD(&mnt->mnt_hash);
- INIT_LIST_HEAD(&mnt->mnt_child);
- INIT_LIST_HEAD(&mnt->mnt_mounts);
- INIT_LIST_HEAD(&mnt->mnt_list);
- mnt->mnt_owner = current->uid;
- }
- return mnt;
-}
-
-struct vfsmount *lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
-{
- struct list_head * head = mount_hashtable + hash(mnt, dentry);
- struct list_head * tmp = head;
- struct vfsmount *p;
-
- for (;;) {
- tmp = tmp->next;
- p = NULL;
- if (tmp == head)
- break;
- p = list_entry(tmp, struct vfsmount, mnt_hash);
- if (p->mnt_parent == mnt && p->mnt_mountpoint == dentry)
- break;
- }
- return p;
-}
+struct vfsmount *alloc_vfsmnt(void);
+void free_vfsmnt(struct vfsmount *mnt);
+void umount_tree(struct vfsmount *mnt);
-static int check_mnt(struct vfsmount *mnt)
-{
- spin_lock(&dcache_lock);
- while (mnt->mnt_parent != mnt)
- mnt = mnt->mnt_parent;
- spin_unlock(&dcache_lock);
- return mnt == root_vfsmnt;
-}
-
-static void detach_mnt(struct vfsmount *mnt, struct nameidata *old_nd)
-{
- old_nd->dentry = mnt->mnt_mountpoint;
- old_nd->mnt = mnt->mnt_parent;
- mnt->mnt_parent = mnt;
- mnt->mnt_mountpoint = mnt->mnt_root;
- list_del_init(&mnt->mnt_child);
- list_del_init(&mnt->mnt_hash);
- old_nd->dentry->d_mounted--;
-}
-
-static void attach_mnt(struct vfsmount *mnt, struct nameidata *nd)
-{
- mnt->mnt_parent = mntget(nd->mnt);
- mnt->mnt_mountpoint = dget(nd->dentry);
- list_add(&mnt->mnt_hash, mount_hashtable+hash(nd->mnt, nd->dentry));
- list_add(&mnt->mnt_child, &nd->mnt->mnt_mounts);
- nd->dentry->d_mounted++;
-}
-
-static struct vfsmount *next_mnt(struct vfsmount *p, struct vfsmount *root)
-{
- struct list_head *next = p->mnt_mounts.next;
- if (next == &p->mnt_mounts) {
- while (1) {
- if (p == root)
- return NULL;
- next = p->mnt_child.next;
- if (next != &p->mnt_parent->mnt_mounts)
- break;
- p = p->mnt_parent;
- }
- }
- return list_entry(next, struct vfsmount, mnt_child);
-}
-
-static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root)
-{
- char *name = old->mnt_devname;
- struct vfsmount *mnt = alloc_vfsmnt();
- struct super_block *sb = old->mnt_sb;
-
- if (!mnt)
- goto out;
-
- if (name) {
- mnt->mnt_devname = kmalloc(strlen(name)+1, GFP_KERNEL);
- if (mnt->mnt_devname)
- strcpy(mnt->mnt_devname, name);
- }
- mnt->mnt_sb = sb;
- mnt->mnt_root = dget(root);
- mnt->mnt_mountpoint = mnt->mnt_root;
- mnt->mnt_parent = mnt;
- mnt->mnt_flags = old->mnt_flags;
-
- atomic_inc(&sb->s_active);
-out:
- return mnt;
-}
-
-static int graft_tree(struct vfsmount *mnt, struct nameidata *nd)
-{
- if (mnt->mnt_sb->s_flags & MS_NOUSER)
- return -EINVAL;
-
- if (S_ISDIR(nd->dentry->d_inode->i_mode) !=
- S_ISDIR(mnt->mnt_root->d_inode->i_mode))
- return -ENOTDIR;
-
- down(&nd->dentry->d_inode->i_zombie);
- if (IS_DEADDIR(nd->dentry->d_inode))
- goto fail1;
-
- spin_lock(&dcache_lock);
- if (!IS_ROOT(nd->dentry) && d_unhashed(nd->dentry))
- goto fail;
-
- attach_mnt(mnt, nd);
- list_add(&mnt->mnt_list, vfsmntlist.prev);
- spin_unlock(&dcache_lock);
- up(&nd->dentry->d_inode->i_zombie);
- mntget(mnt);
- return 0;
-fail:
- spin_unlock(&dcache_lock);
-fail1:
- up(&nd->dentry->d_inode->i_zombie);
- return -ENOENT;
-}
+/* Will go away */
+extern struct vfsmount *root_vfsmnt;
+extern int graft_tree(struct vfsmount *mnt, struct nameidata *nd);
static void kill_super(struct super_block *);
struct super_block *sb = mnt->mnt_sb;
dput(mnt->mnt_root);
- if (mnt->mnt_devname)
- kfree(mnt->mnt_devname);
- kmem_cache_free(mnt_cache, mnt);
+ free_vfsmnt(mnt);
kill_super(sb);
}
-/* Use octal escapes, like mount does, for embedded spaces etc. */
-static unsigned char need_escaping[] = { ' ', '\t', '\n', '\\' };
-
-static int
-mangle(const unsigned char *s, char *buf, int len) {
- char *sp;
- int n;
-
- sp = buf;
- while(*s && sp-buf < len-3) {
- for (n = 0; n < sizeof(need_escaping); n++) {
- if (*s == need_escaping[n]) {
- *sp++ = '\\';
- *sp++ = '0' + ((*s & 0300) >> 6);
- *sp++ = '0' + ((*s & 070) >> 3);
- *sp++ = '0' + (*s & 07);
- goto next;
- }
- }
- *sp++ = *s;
- next:
- s++;
- }
- return sp - buf; /* no trailing NUL */
-}
-
-static struct proc_fs_info {
- int flag;
- char *str;
-} fs_info[] = {
- { MS_SYNCHRONOUS, ",sync" },
- { MS_MANDLOCK, ",mand" },
- { MS_NOATIME, ",noatime" },
- { MS_NODIRATIME, ",nodiratime" },
- { 0, NULL }
-};
-
-static struct proc_fs_info mnt_info[] = {
- { MNT_NOSUID, ",nosuid" },
- { MNT_NODEV, ",nodev" },
- { MNT_NOEXEC, ",noexec" },
- { 0, NULL }
-};
-
-static struct proc_nfs_info {
- int flag;
- char *str;
- char *nostr;
-} nfs_info[] = {
- { NFS_MOUNT_SOFT, ",soft", ",hard" },
- { NFS_MOUNT_INTR, ",intr", "" },
- { NFS_MOUNT_POSIX, ",posix", "" },
- { NFS_MOUNT_TCP, ",tcp", ",udp" },
- { NFS_MOUNT_NOCTO, ",nocto", "" },
- { NFS_MOUNT_NOAC, ",noac", "" },
- { NFS_MOUNT_NONLM, ",nolock", ",lock" },
- { NFS_MOUNT_BROKEN_SUID, ",broken_suid", "" },
- { 0, NULL, NULL }
-};
-
-int get_filesystem_info( char *buf )
-{
- struct list_head *p;
- struct proc_fs_info *fs_infop;
- struct proc_nfs_info *nfs_infop;
- struct nfs_server *nfss;
- int len, prevlen;
- char *path, *buffer = (char *) __get_free_page(GFP_KERNEL);
-
- if (!buffer) return 0;
- len = prevlen = 0;
-
-#define FREEROOM ((int)PAGE_SIZE-200-len)
-#define MANGLE(s) len += mangle((s), buf+len, FREEROOM);
-
- for (p = vfsmntlist.next; p != &vfsmntlist; p = p->next) {
- struct vfsmount *tmp = list_entry(p, struct vfsmount, mnt_list);
- path = d_path(tmp->mnt_root, tmp, buffer, PAGE_SIZE);
- if (!path)
- continue;
- MANGLE(tmp->mnt_devname ? tmp->mnt_devname : "none");
- buf[len++] = ' ';
- MANGLE(path);
- buf[len++] = ' ';
- MANGLE(tmp->mnt_sb->s_type->name);
- len += sprintf(buf+len, " %s",
- tmp->mnt_sb->s_flags & MS_RDONLY ? "ro" : "rw");
- for (fs_infop = fs_info; fs_infop->flag; fs_infop++) {
- if (tmp->mnt_sb->s_flags & fs_infop->flag)
- MANGLE(fs_infop->str);
- }
- for (fs_infop = mnt_info; fs_infop->flag; fs_infop++) {
- if (tmp->mnt_flags & fs_infop->flag)
- MANGLE(fs_infop->str);
- }
- if (!strcmp("nfs", tmp->mnt_sb->s_type->name)) {
- nfss = &tmp->mnt_sb->u.nfs_sb.s_server;
- len += sprintf(buf+len, ",v%d", nfss->rpc_ops->version);
-
- len += sprintf(buf+len, ",rsize=%d", nfss->rsize);
-
- len += sprintf(buf+len, ",wsize=%d", nfss->wsize);
-#if 0
- if (nfss->timeo != 7*HZ/10) {
- len += sprintf(buf+len, ",timeo=%d",
- nfss->timeo*10/HZ);
- }
- if (nfss->retrans != 3) {
- len += sprintf(buf+len, ",retrans=%d",
- nfss->retrans);
- }
-#endif
- if (nfss->acregmin != 3*HZ) {
- len += sprintf(buf+len, ",acregmin=%d",
- nfss->acregmin/HZ);
- }
- if (nfss->acregmax != 60*HZ) {
- len += sprintf(buf+len, ",acregmax=%d",
- nfss->acregmax/HZ);
- }
- if (nfss->acdirmin != 30*HZ) {
- len += sprintf(buf+len, ",acdirmin=%d",
- nfss->acdirmin/HZ);
- }
- if (nfss->acdirmax != 60*HZ) {
- len += sprintf(buf+len, ",acdirmax=%d",
- nfss->acdirmax/HZ);
- }
- for (nfs_infop = nfs_info; nfs_infop->flag; nfs_infop++) {
- char *str;
- if (nfss->flags & nfs_infop->flag)
- str = nfs_infop->str;
- else
- str = nfs_infop->nostr;
- MANGLE(str);
- }
- len += sprintf(buf+len, ",addr=");
- MANGLE(nfss->hostname);
- }
- len += sprintf(buf + len, " 0 0\n");
- if (FREEROOM <= 3) {
- len = prevlen;
- len += sprintf(buf+len, "# truncated\n");
- break;
- }
- prevlen = len;
- }
-
- free_page((unsigned long) buffer);
- return len;
-#undef MANGLE
-#undef FREEROOM
-}
-
static inline void __put_super(struct super_block *sb)
{
spin_lock(&sb_lock);
* is used as a reference - file system type and the device are ignored.
*/
-static int do_remount_sb(struct super_block *sb, int flags, char *data)
+int do_remount_sb(struct super_block *sb, int flags, char *data)
{
int retval;
return 0;
}
-/*
- * Doesn't take quota and stuff into account. IOW, in some cases it will
- * give false negatives. The main reason why it's here is that we need
- * a non-destructive way to look for easily umountable filesystems.
- */
-int may_umount(struct vfsmount *mnt)
-{
- if (atomic_read(&mnt->mnt_count) > 2)
- return -EBUSY;
- return 0;
-}
-
-void umount_tree(struct vfsmount *mnt)
-{
- struct vfsmount *p;
- LIST_HEAD(kill);
-
- if (list_empty(&mnt->mnt_list))
- return;
-
- for (p = mnt; p; p = next_mnt(p, mnt)) {
- list_del(&p->mnt_list);
- list_add(&p->mnt_list, &kill);
- }
-
- while (!list_empty(&kill)) {
- mnt = list_entry(kill.next, struct vfsmount, mnt_list);
- list_del_init(&mnt->mnt_list);
- if (mnt->mnt_parent == mnt) {
- spin_unlock(&dcache_lock);
- } else {
- struct nameidata old_nd;
- detach_mnt(mnt, &old_nd);
- spin_unlock(&dcache_lock);
- path_release(&old_nd);
- }
- mntput(mnt);
- spin_lock(&dcache_lock);
- }
-}
-
-static int do_umount(struct vfsmount *mnt, int flags)
-{
- struct super_block * sb = mnt->mnt_sb;
- int retval = 0;
-
- /*
- * If we may have to abort operations to get out of this
- * mount, and they will themselves hold resources we must
- * allow the fs to do things. In the Unix tradition of
- * 'Gee thats tricky lets do it in userspace' the umount_begin
- * might fail to complete on the first run through as other tasks
- * must return, and the like. Thats for the mount program to worry
- * about for the moment.
- */
-
- if( (flags&MNT_FORCE) && sb->s_op->umount_begin)
- sb->s_op->umount_begin(sb);
-
- /*
- * No sense to grab the lock for this test, but test itself looks
- * somewhat bogus. Suggestions for better replacement?
- * Ho-hum... In principle, we might treat that as umount + switch
- * to rootfs. GC would eventually take care of the old vfsmount.
- * The problem being: we have to implement rootfs and GC for that ;-)
- * Actually it makes sense, especially if rootfs would contain a
- * /reboot - static binary that would close all descriptors and
- * call reboot(9). Then init(8) could umount root and exec /reboot.
- */
- if (mnt == current->fs->rootmnt && !(flags & MNT_DETACH)) {
- int retval = 0;
- /*
- * Special case for "unmounting" root ...
- * we just try to remount it readonly.
- */
- if (!(sb->s_flags & MS_RDONLY)) {
- down_write(&sb->s_umount);
- retval = do_remount_sb(sb, MS_RDONLY, 0);
- up_write(&sb->s_umount);
- }
- return retval;
- }
-
- spin_lock(&dcache_lock);
-
- if (atomic_read(&sb->s_active) == 1) {
- /* last instance - try to be smart */
- spin_unlock(&dcache_lock);
- DQUOT_OFF(sb);
- acct_auto_close(sb->s_dev);
- spin_lock(&dcache_lock);
- }
- retval = -EBUSY;
- if (atomic_read(&mnt->mnt_count) == 2 || flags & MNT_DETACH) {
- umount_tree(mnt);
- retval = 0;
- }
- spin_unlock(&dcache_lock);
- return retval;
-}
-
-/*
- * Now umount can handle mount points as well as block devices.
- * This is important for filesystems which use unnamed block devices.
- *
- * We now support a flag for forced unmount like the other 'big iron'
- * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
- */
-
-asmlinkage long sys_umount(char * name, int flags)
-{
- struct nameidata nd;
- char *kname;
- int retval;
-
- kname = getname(name);
- retval = PTR_ERR(kname);
- if (IS_ERR(kname))
- goto out;
- retval = 0;
- if (path_init(kname, LOOKUP_POSITIVE|LOOKUP_FOLLOW, &nd))
- retval = path_walk(kname, &nd);
- putname(kname);
- if (retval)
- goto out;
- retval = -EINVAL;
- if (nd.dentry != nd.mnt->mnt_root)
- goto dput_and_out;
- if (!check_mnt(nd.mnt))
- goto dput_and_out;
-
- retval = -EPERM;
- if (!capable(CAP_SYS_ADMIN) && current->uid!=nd.mnt->mnt_owner)
- goto dput_and_out;
-
- down(&mount_sem);
- lock_kernel();
- retval = do_umount(nd.mnt, flags);
- unlock_kernel();
- path_release(&nd);
- up(&mount_sem);
- goto out;
-dput_and_out:
- path_release(&nd);
-out:
- return retval;
-}
-
-/*
- * The 2.0 compatible umount. No flags.
- */
-
-asmlinkage long sys_oldumount(char * name)
-{
- return sys_umount(name,0);
-}
-
-static int mount_is_safe(struct nameidata *nd)
-{
- if (capable(CAP_SYS_ADMIN))
- return 0;
- return -EPERM;
-#ifdef notyet
- if (S_ISLNK(nd->dentry->d_inode->i_mode))
- return -EPERM;
- if (nd->dentry->d_inode->i_mode & S_ISVTX) {
- if (current->uid != nd->dentry->d_inode->i_uid)
- return -EPERM;
- }
- if (permission(nd->dentry->d_inode, MAY_WRITE))
- return -EPERM;
- return 0;
-#endif
-}
-
-/*
- * do loopback mount.
- */
-static int do_loopback(struct nameidata *nd, char *old_name)
-{
- struct nameidata old_nd;
- struct vfsmount *mnt = NULL;
- int err;
-
- err = mount_is_safe(nd);
- if (err)
- return err;
-
- if (!old_name || !*old_name)
- return -EINVAL;
-
- if (path_init(old_name, LOOKUP_POSITIVE|LOOKUP_FOLLOW, &old_nd))
- err = path_walk(old_name, &old_nd);
- if (err)
- return err;
-
- down(&mount_sem);
- err = -EINVAL;
- if (check_mnt(nd->mnt)) {
- err = -ENOMEM;
- mnt = clone_mnt(old_nd.mnt, old_nd.dentry);
- }
- if (mnt) {
- err = graft_tree(mnt, nd);
- mntput(mnt);
- }
-
- up(&mount_sem);
- path_release(&old_nd);
- return err;
-}
-
-/*
- * change filesystem flags. dir should be a physical root of filesystem.
- * If you've mounted a non-root directory somewhere and want to do remount
- * on it - tough luck.
- */
-
-static int do_remount(struct nameidata *nd,int flags,int mnt_flags,char *data)
-{
- int err;
- struct super_block * sb = nd->mnt->mnt_sb;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if (!check_mnt(nd->mnt))
- return -EINVAL;
-
- if (nd->dentry != nd->mnt->mnt_root)
- return -EINVAL;
-
- down_write(&sb->s_umount);
- err = do_remount_sb(sb, flags, data);
- if (!err)
- nd->mnt->mnt_flags=mnt_flags;
- up_write(&sb->s_umount);
- return err;
-}
-
struct vfsmount *do_kern_mount(char *type, int flags, char *name, void *data)
{
struct file_system_type * fstype;
sb = get_sb_nodev(fstype, flags, data);
if (IS_ERR(sb)) {
- if (mnt->mnt_devname)
- kfree(mnt->mnt_devname);
- kmem_cache_free(mnt_cache, mnt);
+ free_vfsmnt(mnt);
mnt = (struct vfsmount *)sb;
goto fs_out;
}
return do_kern_mount((char *)type->name, 0, (char *)type->name, NULL);
}
-static int do_add_mount(struct nameidata *nd, char *type, int flags,
- int mnt_flags, char *name, void *data)
-{
- struct vfsmount *mnt = do_kern_mount(type, flags, name, data);
- int err = PTR_ERR(mnt);
-
- if (IS_ERR(mnt))
- goto out;
-
- down(&mount_sem);
- /* Something was mounted here while we slept */
- while(d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry))
- ;
- err = -EINVAL;
- if (!check_mnt(nd->mnt))
- goto unlock;
-
- /* Refuse the same filesystem on the same mount point */
- err = -EBUSY;
- if (nd->mnt->mnt_sb == mnt->mnt_sb && nd->mnt->mnt_root == nd->dentry)
- goto unlock;
-
- mnt->mnt_flags = mnt_flags;
- err = graft_tree(mnt, nd);
-unlock:
- up(&mount_sem);
- mntput(mnt);
-out:
- return err;
-}
-
-static int copy_mount_options (const void *data, unsigned long *where)
-{
- int i;
- unsigned long page;
- unsigned long size;
-
- *where = 0;
- if (!data)
- return 0;
-
- if (!(page = __get_free_page(GFP_KERNEL)))
- return -ENOMEM;
-
- /* We only care that *some* data at the address the user
- * gave us is valid. Just in case, we'll zero
- * the remainder of the page.
- */
- /* copy_from_user cannot cross TASK_SIZE ! */
- size = TASK_SIZE - (unsigned long)data;
- if (size > PAGE_SIZE)
- size = PAGE_SIZE;
-
- i = size - copy_from_user((void *)page, data, size);
- if (!i) {
- free_page(page);
- return -EFAULT;
- }
- if (i != PAGE_SIZE)
- memset((char *)page + i, 0, PAGE_SIZE - i);
- *where = page;
- return 0;
-}
-
-/*
- * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
- * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
- *
- * data is a (void *) that can point to any structure up to
- * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
- * information (or be NULL).
- *
- * Pre-0.97 versions of mount() didn't have a flags word.
- * When the flags word was introduced its top half was required
- * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
- * Therefore, if this magic number is present, it carries no information
- * and must be discarded.
- */
-long do_mount(char * dev_name, char * dir_name, char *type_page,
- unsigned long flags, void *data_page)
-{
- struct nameidata nd;
- int retval = 0;
- int mnt_flags = 0;
-
- /* Discard magic */
- if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
- flags &= ~MS_MGC_MSK;
-
- /* Basic sanity checks */
-
- if (!dir_name || !*dir_name || !memchr(dir_name, 0, PAGE_SIZE))
- return -EINVAL;
- if (dev_name && !memchr(dev_name, 0, PAGE_SIZE))
- return -EINVAL;
-
- /* Separate the per-mountpoint flags */
- if (flags & MS_NOSUID)
- mnt_flags |= MNT_NOSUID;
- if (flags & MS_NODEV)
- mnt_flags |= MNT_NODEV;
- if (flags & MS_NOEXEC)
- mnt_flags |= MNT_NOEXEC;
- flags &= ~(MS_NOSUID|MS_NOEXEC|MS_NODEV);
-
- /* ... and get the mountpoint */
- if (path_init(dir_name, LOOKUP_FOLLOW|LOOKUP_POSITIVE, &nd))
- retval = path_walk(dir_name, &nd);
- if (retval)
- return retval;
-
- if (flags & MS_REMOUNT)
- retval = do_remount(&nd, flags&~MS_REMOUNT, mnt_flags,
- (char *)data_page);
- else if (flags & MS_BIND)
- retval = do_loopback(&nd, dev_name);
- else
- retval = do_add_mount(&nd, type_page, flags, mnt_flags,
- dev_name, data_page);
- path_release(&nd);
- return retval;
-}
-
-asmlinkage long sys_mount(char * dev_name, char * dir_name, char * type,
- unsigned long flags, void * data)
-{
- int retval;
- unsigned long data_page;
- unsigned long type_page;
- unsigned long dev_page;
- char *dir_page;
-
- retval = copy_mount_options (type, &type_page);
- if (retval < 0)
- return retval;
-
- dir_page = getname(dir_name);
- retval = PTR_ERR(dir_page);
- if (IS_ERR(dir_page))
- goto out1;
-
- retval = copy_mount_options (dev_name, &dev_page);
- if (retval < 0)
- goto out2;
-
- retval = copy_mount_options (data, &data_page);
- if (retval < 0)
- goto out3;
-
- lock_kernel();
- retval = do_mount((char*)dev_page, dir_page, (char*)type_page,
- flags, (void*)data_page);
- unlock_kernel();
- free_page(data_page);
-
-out3:
- free_page(dev_page);
-out2:
- putname(dir_page);
-out1:
- free_page(type_page);
- return retval;
-}
-
void __init mount_root(void)
{
struct nameidata root_nd;
return;
}
}
-
-static void chroot_fs_refs(struct dentry *old_root,
- struct vfsmount *old_rootmnt,
- struct dentry *new_root,
- struct vfsmount *new_rootmnt)
-{
- struct task_struct *p;
- struct fs_struct *fs;
-
- read_lock(&tasklist_lock);
- for_each_task(p) {
- task_lock(p);
- fs = p->fs;
- if (fs) {
- atomic_inc(&fs->count);
- task_unlock(p);
- if (fs->root==old_root && fs->rootmnt==old_rootmnt)
- set_fs_root(fs, new_rootmnt, new_root);
- if (fs->pwd==old_root && fs->pwdmnt==old_rootmnt)
- set_fs_pwd(fs, new_rootmnt, new_root);
- put_fs_struct(fs);
- } else
- task_unlock(p);
- }
- read_unlock(&tasklist_lock);
-}
-
-/*
- * Moves the current root to put_root, and sets root/cwd of all processes
- * which had them on the old root to new_root.
- *
- * Note:
- * - we don't move root/cwd if they are not at the root (reason: if something
- * cared enough to change them, it's probably wrong to force them elsewhere)
- * - it's okay to pick a root that isn't the root of a file system, e.g.
- * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
- * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
- * first.
- */
-
-asmlinkage long sys_pivot_root(const char *new_root, const char *put_old)
-{
- struct vfsmount *tmp;
- struct nameidata new_nd, old_nd, parent_nd, root_parent, user_nd;
- char *name;
- int error;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- lock_kernel();
-
- name = getname(new_root);
- error = PTR_ERR(name);
- if (IS_ERR(name))
- goto out0;
- error = 0;
- if (path_init(name, LOOKUP_POSITIVE|LOOKUP_FOLLOW|LOOKUP_DIRECTORY, &new_nd))
- error = path_walk(name, &new_nd);
- putname(name);
- if (error)
- goto out0;
- error = -EINVAL;
- if (!check_mnt(new_nd.mnt))
- goto out1;
-
- name = getname(put_old);
- error = PTR_ERR(name);
- if (IS_ERR(name))
- goto out1;
- error = 0;
- if (path_init(name, LOOKUP_POSITIVE|LOOKUP_FOLLOW|LOOKUP_DIRECTORY, &old_nd))
- error = path_walk(name, &old_nd);
- putname(name);
- if (error)
- goto out1;
-
- read_lock(¤t->fs->lock);
- user_nd.mnt = mntget(current->fs->rootmnt);
- user_nd.dentry = dget(current->fs->root);
- read_unlock(¤t->fs->lock);
- down(&mount_sem);
- down(&old_nd.dentry->d_inode->i_zombie);
- error = -EINVAL;
- if (!check_mnt(user_nd.mnt))
- goto out2;
- error = -ENOENT;
- if (IS_DEADDIR(new_nd.dentry->d_inode))
- goto out2;
- if (d_unhashed(new_nd.dentry) && !IS_ROOT(new_nd.dentry))
- goto out2;
- if (d_unhashed(old_nd.dentry) && !IS_ROOT(old_nd.dentry))
- goto out2;
- error = -EBUSY;
- if (new_nd.mnt == user_nd.mnt || old_nd.mnt == user_nd.mnt)
- goto out2; /* loop */
- error = -EINVAL;
- if (user_nd.mnt->mnt_root != user_nd.dentry)
- goto out2;
- if (new_nd.mnt->mnt_root != new_nd.dentry)
- goto out2; /* not a mountpoint */
- tmp = old_nd.mnt; /* make sure we can reach put_old from new_root */
- spin_lock(&dcache_lock);
- if (tmp != new_nd.mnt) {
- for (;;) {
- if (tmp->mnt_parent == tmp)
- goto out3;
- if (tmp->mnt_parent == new_nd.mnt)
- break;
- tmp = tmp->mnt_parent;
- }
- if (!is_subdir(tmp->mnt_mountpoint, new_nd.dentry))
- goto out3;
- } else if (!is_subdir(old_nd.dentry, new_nd.dentry))
- goto out3;
- detach_mnt(new_nd.mnt, &parent_nd);
- detach_mnt(user_nd.mnt, &root_parent);
- attach_mnt(user_nd.mnt, &old_nd);
- attach_mnt(new_nd.mnt, &root_parent);
- spin_unlock(&dcache_lock);
- chroot_fs_refs(user_nd.dentry,user_nd.mnt,new_nd.dentry,new_nd.mnt);
- error = 0;
- path_release(&root_parent);
- path_release(&parent_nd);
-out2:
- up(&old_nd.dentry->d_inode->i_zombie);
- up(&mount_sem);
- path_release(&user_nd);
- path_release(&old_nd);
-out1:
- path_release(&new_nd);
-out0:
- unlock_kernel();
- return error;
-out3:
- spin_unlock(&dcache_lock);
- goto out2;
-}
-
-
-#ifdef CONFIG_BLK_DEV_INITRD
-
-int __init change_root(kdev_t new_root_dev,const char *put_old)
-{
- struct vfsmount *old_rootmnt;
- struct nameidata devfs_nd, nd;
- struct nameidata parent_nd;
- char *new_devname = kmalloc(strlen("/dev/root.old")+1, GFP_KERNEL);
- int error = 0;
-
- if (new_devname)
- strcpy(new_devname, "/dev/root.old");
-
- read_lock(¤t->fs->lock);
- old_rootmnt = mntget(current->fs->rootmnt);
- read_unlock(¤t->fs->lock);
- /* First unmount devfs if mounted */
- if (path_init("/dev", LOOKUP_FOLLOW|LOOKUP_POSITIVE, &devfs_nd))
- error = path_walk("/dev", &devfs_nd);
- if (!error) {
- if (devfs_nd.mnt->mnt_sb->s_magic == DEVFS_SUPER_MAGIC &&
- devfs_nd.dentry == devfs_nd.mnt->mnt_root) {
- down(&mount_sem);
- do_umount(devfs_nd.mnt, 0);
- path_release(&devfs_nd);
- up(&mount_sem);
- } else
- path_release(&devfs_nd);
- }
- spin_lock(&dcache_lock);
- detach_mnt(old_rootmnt, &parent_nd);
- spin_unlock(&dcache_lock);
- ROOT_DEV = new_root_dev;
- mount_root();
-#if 1
- shrink_dcache();
- printk("change_root: old root has d_count=%d\n",
- atomic_read(&old_rootmnt->mnt_root->d_count));
-#endif
- mount_devfs_fs ();
- /*
- * Get the new mount directory
- */
- error = 0;
- if (path_init(put_old, LOOKUP_FOLLOW|LOOKUP_POSITIVE|LOOKUP_DIRECTORY, &nd))
- error = path_walk(put_old, &nd);
- if (error) {
- int blivet;
- struct block_device *ramdisk = old_rootmnt->mnt_sb->s_bdev;
-
- atomic_inc(&ramdisk->bd_count);
- blivet = blkdev_get(ramdisk, FMODE_READ, 0, BDEV_FS);
- printk(KERN_NOTICE "Trying to unmount old root ... ");
- if (!blivet) {
- spin_lock(&dcache_lock);
- list_del(&old_rootmnt->mnt_list);
- spin_unlock(&dcache_lock);
- mntput(old_rootmnt);
- mntput(old_rootmnt);
- blivet = ioctl_by_bdev(ramdisk, BLKFLSBUF, 0);
- path_release(&parent_nd);
- blkdev_put(ramdisk, BDEV_FS);
- }
- if (blivet) {
- printk(KERN_ERR "error %d\n", blivet);
- } else {
- printk("okay\n");
- error = 0;
- }
- kfree(new_devname);
- return error;
- }
-
- spin_lock(&dcache_lock);
- attach_mnt(old_rootmnt, &nd);
- if (new_devname) {
- if (old_rootmnt->mnt_devname)
- kfree(old_rootmnt->mnt_devname);
- old_rootmnt->mnt_devname = new_devname;
- }
- spin_unlock(&dcache_lock);
-
- /* put the old stuff */
- path_release(&parent_nd);
- mntput(old_rootmnt);
- path_release(&nd);
- return 0;
-}
-
-#endif
-
-/*
- * Absolutely minimal fake fs - only empty root directory and nothing else.
- * In 2.5 we'll use ramfs or tmpfs, but for now it's all we need - just
- * something to go with root vfsmount.
- */
-static struct dentry *rootfs_lookup(struct inode *dir, struct dentry *dentry)
-{
- d_add(dentry, NULL);
- return NULL;
-}
-static struct file_operations rootfs_dir_operations = {
- read: generic_read_dir,
- readdir: dcache_readdir,
-};
-static struct inode_operations rootfs_dir_inode_operations = {
- lookup: rootfs_lookup,
-};
-static struct super_block *rootfs_read_super(struct super_block * sb, void * data, int silent)
-{
- struct inode * inode;
- struct dentry * root;
- static struct super_operations s_ops = {};
- sb->s_op = &s_ops;
- inode = new_inode(sb);
- if (!inode)
- return NULL;
- inode->i_mode = S_IFDIR|0555;
- inode->i_uid = inode->i_gid = 0;
- inode->i_op = &rootfs_dir_inode_operations;
- inode->i_fop = &rootfs_dir_operations;
- root = d_alloc_root(inode);
- if (!root) {
- iput(inode);
- return NULL;
- }
- sb->s_root = root;
- return sb;
-}
-static DECLARE_FSTYPE(root_fs_type, "rootfs", rootfs_read_super, FS_NOMOUNT);
-
-static void __init init_mount_tree(void)
-{
- register_filesystem(&root_fs_type);
- root_vfsmnt = do_kern_mount("rootfs", 0, "rootfs", NULL);
- if (IS_ERR(root_vfsmnt))
- panic("can't allocate root vfsmount");
-}
-
-void __init mnt_init(unsigned long mempages)
-{
- struct list_head *d;
- unsigned long order;
- unsigned int nr_hash;
- int i;
-
- mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount),
- 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
- if (!mnt_cache)
- panic("Cannot create vfsmount cache");
-
- mempages >>= (16 - PAGE_SHIFT);
- mempages *= sizeof(struct list_head);
- for (order = 0; ((1UL << order) << PAGE_SHIFT) < mempages; order++)
- ;
-
- do {
- mount_hashtable = (struct list_head *)
- __get_free_pages(GFP_ATOMIC, order);
- } while (mount_hashtable == NULL && --order >= 0);
-
- if (!mount_hashtable)
- panic("Failed to allocate mount hash table\n");
-
- /*
- * Find the power-of-two list-heads that can fit into the allocation..
- * We don't guarantee that "sizeof(struct list_head)" is necessarily
- * a power-of-two.
- */
- nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct list_head);
- hash_bits = 0;
- do {
- hash_bits++;
- } while ((nr_hash >> hash_bits) != 0);
- hash_bits--;
-
- /*
- * Re-calculate the actual number of entries and the mask
- * from the number of bits we can fit.
- */
- nr_hash = 1UL << hash_bits;
- hash_mask = nr_hash-1;
-
- printk("Mount-cache hash table entries: %d (order: %ld, %ld bytes)\n",
- nr_hash, order, (PAGE_SIZE << order));
-
- /* And initialize the newly allocated array */
- d = mount_hashtable;
- i = nr_hash;
- do {
- INIT_LIST_HEAD(d);
- d++;
- i--;
- } while (i);
- init_mount_tree();
-}
#include <linux/config.h>
/* Bytes per L1 (data) cache line. */
-#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
+#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
# define L1_CACHE_BYTES 64
# define L1_CACHE_SHIFT 6
#else
return addr + APECS_DENSE_MEM;
}
-__EXTERN_INLINE void apecs_iounmap(unsigned addr)
+__EXTERN_INLINE void apecs_iounmap(unsigned long addr)
{
return;
}
outb_p((val),RTC_PORT(1)); \
})
-#define RTC_IRQ 0 /* Don't support interrupt features. */
-
#endif /* __ASM_ALPHA_MC146818RTC_H */
#error "EV6-only feature in a generic kernel"
#endif
#if defined(CONFIG_ALPHA_GENERIC) || \
- ((defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)) && \
- !defined(USE_48_BIT_KSEG))
+ (defined(CONFIG_ALPHA_EV6) && !defined(USE_48_BIT_KSEG))
#define PHYS_TWIDDLE(phys) \
((((phys) & 0xc0000000000UL) == 0x40000000000UL) \
? ((phys) ^= 0xc0000000000UL) : (phys))
#ifdef CONFIG_ALPHA_EV5
#define implver() IMPLVER_EV5
#endif
-#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
+#if defined(CONFIG_ALPHA_EV6)
#define implver() IMPLVER_EV6
#endif
#endif
}
extern int get_maxlvt(void);
+extern void clear_local_APIC(void);
extern void connect_bsp_APIC (void);
extern void disconnect_bsp_APIC (void);
extern void disable_local_APIC (void);
* - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
*/
-#ifdef SLOW_IO_BY_JUMPING
-#define __SLOW_DOWN_IO "\njmp 1f\n1:\tjmp 1f\n1:"
-#else
-#define __SLOW_DOWN_IO "\noutb %%al,$0x80"
-#endif
-
-#ifdef REALLY_SLOW_IO
-#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO
-#else
-#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO
-#endif
-
-/*
- * Talk about misusing macros..
- */
-#define __OUT1(s,x) \
-static inline void out##s(unsigned x value, unsigned short port) {
-
-#define __OUT2(s,s1,s2) \
-__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1"
-
-#define __OUT(s,s1,x) \
-__OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \
-__OUT1(s##_p,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));} \
-
-#define __IN1(s) \
-static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v;
-
-#define __IN2(s,s1,s2) \
-__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0"
-
-#define __IN(s,s1,i...) \
-__IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
-__IN1(s##_p) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
-
-#define __INS(s) \
-static inline void ins##s(unsigned short port, void * addr, unsigned long count) \
-{ __asm__ __volatile__ ("rep ; ins" #s \
-: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
-
-#define __OUTS(s) \
-static inline void outs##s(unsigned short port, const void * addr, unsigned long count) \
-{ __asm__ __volatile__ ("rep ; outs" #s \
-: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
-
-#define RETURN_TYPE unsigned char
-__IN(b,"")
-#undef RETURN_TYPE
-#define RETURN_TYPE unsigned short
-__IN(w,"")
-#undef RETURN_TYPE
-#define RETURN_TYPE unsigned int
-__IN(l,"")
-#undef RETURN_TYPE
-
-__OUT(b,"b",char)
-__OUT(w,"w",short)
-__OUT(l,,int)
-
-__INS(b)
-__INS(w)
-__INS(l)
-
-__OUTS(b)
-__OUTS(w)
-__OUTS(l)
-
#define IO_SPACE_LIMIT 0xffff
+#define XQUAD_PORTIO_BASE 0xfe400000
+#define XQUAD_PORTIO_LEN 0x40000 /* 256k per quad. Only remapping 1st */
+
#ifdef __KERNEL__
#include <linux/vmalloc.h>
#endif /* __KERNEL__ */
+#ifdef SLOW_IO_BY_JUMPING
+#define __SLOW_DOWN_IO "\njmp 1f\n1:\tjmp 1f\n1:"
+#else
+#define __SLOW_DOWN_IO "\noutb %%al,$0x80"
+#endif
+
+#ifdef REALLY_SLOW_IO
+#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO
+#else
+#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO
+#endif
+
+#ifdef CONFIG_MULTIQUAD
+extern void *xquad_portio; /* Where the IO area was mapped */
+#endif /* CONFIG_MULTIQUAD */
+
+/*
+ * Talk about misusing macros..
+ */
+#define __OUT1(s,x) \
+static inline void out##s(unsigned x value, unsigned short port) {
+
+#define __OUT2(s,s1,s2) \
+__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1"
+
+#ifdef CONFIG_MULTIQUAD
+/* Make the default portio routines operate on quad 0 for now */
+#define __OUT(s,s1,x) \
+__OUT1(s##_local,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \
+__OUT1(s##_p_local,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));} \
+__OUTQ0(s,s,x) \
+__OUTQ0(s,s##_p,x)
+#else
+#define __OUT(s,s1,x) \
+__OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "Nd" (port)); } \
+__OUT1(s##_p,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));}
+#endif /* CONFIG_MULTIQUAD */
+
+#ifdef CONFIG_MULTIQUAD
+#define __OUTQ0(s,ss,x) /* Do the equivalent of the portio op on quad 0 */ \
+static inline void out##ss(unsigned x value, unsigned short port) { \
+ if (xquad_portio) \
+ write##s(value, (unsigned long) xquad_portio + port); \
+ else /* We're still in early boot, running on quad 0 */ \
+ out##ss##_local(value, port); \
+}
+
+#define __INQ0(s,ss) /* Do the equivalent of the portio op on quad 0 */ \
+static inline RETURN_TYPE in##ss(unsigned short port) { \
+ if (xquad_portio) \
+ return read##s((unsigned long) xquad_portio + port); \
+ else /* We're still in early boot, running on quad 0 */ \
+ return in##ss##_local(port); \
+}
+#endif /* CONFIG_MULTIQUAD */
+
+#define __IN1(s) \
+static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v;
+
+#define __IN2(s,s1,s2) \
+__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0"
+
+#ifdef CONFIG_MULTIQUAD
+#define __IN(s,s1,i...) \
+__IN1(s##_local) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
+__IN1(s##_p_local) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
+__INQ0(s,s) \
+__INQ0(s,s##_p)
+#else
+#define __IN(s,s1,i...) \
+__IN1(s) __IN2(s,s1,"w") : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
+__IN1(s##_p) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; }
+#endif /* CONFIG_MULTIQUAD */
+
+#define __INS(s) \
+static inline void ins##s(unsigned short port, void * addr, unsigned long count) \
+{ __asm__ __volatile__ ("rep ; ins" #s \
+: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
+
+#define __OUTS(s) \
+static inline void outs##s(unsigned short port, const void * addr, unsigned long count) \
+{ __asm__ __volatile__ ("rep ; outs" #s \
+: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
+
+#define RETURN_TYPE unsigned char
+__IN(b,"")
+#undef RETURN_TYPE
+#define RETURN_TYPE unsigned short
+__IN(w,"")
+#undef RETURN_TYPE
+#define RETURN_TYPE unsigned int
+__IN(l,"")
+#undef RETURN_TYPE
+
+__OUT(b,"b",char)
+__OUT(w,"w",short)
+__OUT(l,,int)
+
+__INS(b)
+__INS(w)
+__INS(l)
+
+__OUTS(b)
+__OUTS(w)
+__OUTS(l)
+
#endif
* 7 2 CPU MCA+PCI
*/
-#define MAX_IRQ_SOURCES 128
+#define MAX_IRQ_SOURCES 256
#define MAX_MP_BUSSES 32
enum mp_bustype {
MP_BUS_ISA = 1,
extern int mp_bus_id_to_type [MAX_MP_BUSSES];
extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES];
-extern unsigned int boot_cpu_id;
+extern unsigned int boot_cpu_physical_apicid;
extern unsigned long phys_cpu_present_map;
extern int smp_found_config;
extern void find_smp_config (void);
#endif
#endif
+#if CONFIG_SMP
+# ifdef CONFIG_MULTIQUAD
+# define TARGET_CPUS 0xf /* all CPUs in *THIS* quad */
+# define INT_DELIVERY_MODE 0 /* physical delivery on LOCAL quad */
+# else
+# define TARGET_CPUS cpu_online_map
+# define INT_DELIVERY_MODE 1 /* logical delivery broadcast to all procs */
+# endif
+#else
+# define TARGET_CPUS 0x01
+#endif
+
#ifdef CONFIG_SMP
#ifndef ASSEMBLY
* Some lowlevel functions might want to know about
* the real APIC ID <-> CPU # mapping.
*/
-extern volatile int x86_apicid_to_cpu[NR_CPUS];
-extern volatile int x86_cpu_to_apicid[NR_CPUS];
+#define MAX_APICID 256
+extern volatile int cpu_to_physical_apicid[NR_CPUS];
+extern volatile int physical_apicid_to_cpu[MAX_APICID];
+extern volatile int cpu_to_logical_apicid[NR_CPUS];
+extern volatile int logical_apicid_to_cpu[MAX_APICID];
+
+#ifndef clustered_apic_mode
+ #ifdef CONFIG_MULTIQUAD
+ #define clustered_apic_mode (1)
+ #define esr_disable (1)
+ #else /* !CONFIG_MULTIQUAD */
+ #define clustered_apic_mode (0)
+ #define esr_disable (0)
+ #endif /* CONFIG_MULTIQUAD */
+#endif
/*
* General functions that each host system must provide.
return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID));
}
+extern __inline int logical_smp_processor_id(void)
+{
+ /* we don't want to mark this access volatile - bad code generation */
+ return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
+}
+
#endif /* !ASSEMBLY */
#define NO_PROC_ID 0xFF /* No processor magic marker */
--- /dev/null
+#ifndef __ASM_SMPBOOT_H
+#define __ASM_SMPBOOT_H
+
+#ifndef clustered_apic_mode
+ #ifdef CONFIG_MULTIQUAD
+ #define clustered_apic_mode (1)
+ #else /* !CONFIG_MULTIQUAD */
+ #define clustered_apic_mode (0)
+ #endif /* CONFIG_MULTIQUAD */
+#endif
+
+#ifdef CONFIG_MULTIQUAD
+ #define TRAMPOLINE_LOW phys_to_virt(0x8)
+ #define TRAMPOLINE_HIGH phys_to_virt(0xa)
+#else /* !CONFIG_MULTIQUAD */
+ #define TRAMPOLINE_LOW phys_to_virt(0x467)
+ #define TRAMPOLINE_HIGH phys_to_virt(0x469)
+#endif /* CONFIG_MULTIQUAD */
+
+#ifdef CONFIG_MULTIQUAD
+ #define boot_cpu_apicid boot_cpu_logical_apicid
+#else /* !CONFIG_MULTIQUAD */
+ #define boot_cpu_apicid boot_cpu_physical_apicid
+#endif /* CONFIG_MULTIQUAD */
+
+/*
+ * How to map from the cpu_present_map
+ */
+#ifdef CONFIG_MULTIQUAD
+ #define cpu_present_to_apicid(mps_cpu) ( ((mps_cpu/4)*16) + (1<<(mps_cpu%4)) )
+#else /* !CONFIG_MULTIQUAD */
+ #define cpu_present_to_apicid(apicid) (apicid)
+#endif /* CONFIG_MULTIQUAD */
+
+/*
+ * Mappings between logical cpu number and logical / physical apicid
+ * The first four macros are trivial, but it keeps the abstraction consistent
+ */
+extern volatile int logical_apicid_2_cpu[];
+extern volatile int cpu_2_logical_apicid[];
+extern volatile int physical_apicid_2_cpu[];
+extern volatile int cpu_2_physical_apicid[];
+
+#define logical_apicid_to_cpu(apicid) logical_apicid_2_cpu[apicid]
+#define cpu_to_logical_apicid(cpu) cpu_2_logical_apicid[cpu]
+#define physical_apicid_to_cpu(apicid) physical_apicid_2_cpu[apicid]
+#define cpu_to_physical_apicid(cpu) cpu_2_physical_apicid[cpu]
+#ifdef CONFIG_MULTIQUAD /* use logical IDs to bootstrap */
+#define boot_apicid_to_cpu(apicid) logical_apicid_2_cpu[apicid]
+#define cpu_to_boot_apicid(cpu) cpu_2_logical_apicid[cpu]
+#else /* !CONFIG_MULTIQUAD */ /* use physical IDs to bootstrap */
+#define boot_apicid_to_cpu(apicid) physical_apicid_2_cpu[apicid]
+#define cpu_to_boot_apicid(cpu) cpu_2_physical_apicid[cpu]
+#endif /* CONFIG_MULTIQUAD */
+
+
+#ifdef CONFIG_MULTIQUAD
+#else /* !CONFIG_MULTIQUAD */
+#endif /* CONFIG_MULTIQUAD */
+
+
+#endif
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * $Id: jffs.h,v 1.11 2000/08/04 12:46:34 dwmw2 Exp $
+ * $Id: jffs.h,v 1.20 2001/09/18 21:33:37 dwmw2 Exp $
*
* Ported to Linux 2.3.x and MTD:
* Copyright (C) 2000 Alexander Larsson (alex@cendio.se), Cendio Systems AB
#ifndef __LINUX_JFFS_H__
#define __LINUX_JFFS_H__
-#define JFFS_VERSION_STRING "1.0"
-
+#include <linux/types.h>
#include <linux/completion.h>
+#define JFFS_VERSION_STRING "1.0"
+
/* This is a magic number that is used as an identification number for
this file system. It is written to the super_block structure. */
#define JFFS_MAGIC_SB_BITMASK 0x07c0 /* 1984 */
/* This stuff could be used for finding memory leaks. */
#define JFFS_MEMORY_DEBUG 0
-#if defined(JFFS_MEMORY_DEBUG) && JFFS_MEMORY_DEBUG
-extern long no_jffs_file;
extern long no_jffs_node;
+extern long no_jffs_file;
+#if defined(JFFS_MEMORY_DEBUG) && JFFS_MEMORY_DEBUG
extern long no_jffs_control;
extern long no_jffs_raw_inode;
extern long no_jffs_node_ref;
-/* $Id: jffs2_fs_sb.h,v 1.15 2001/04/18 13:05:28 dwmw2 Exp $ */
+/* $Id: jffs2_fs_sb.h,v 1.16 2001/09/18 20:15:18 dwmw2 Exp $ */
#ifndef _JFFS2_FS_SB
#define _JFFS2_FS_SB
// pid_t thread_pid; /* GC thread's PID */
struct task_struct *gc_task; /* GC task struct */
- struct semaphore gc_thread_sem; /* GC thread startup mutex */
- struct completion gc_thread_exit; /* GC thread exit completion */
-
+ struct semaphore gc_thread_start; /* GC thread start mutex */
+ struct completion gc_thread_exit; /* GC thread exit completion port */
// __u32 gc_minfree_threshold; /* GC trigger thresholds */
// __u32 gc_maxdirty_threshold;
/* Common Flash Interface structures
* See http://support.intel.com/design/flash/technote/index.htm
- * $Id: cfi.h,v 1.22 2001/07/06 09:29:07 dwmw2 Exp $
+ * $Id: cfi.h,v 1.25 2001/09/04 07:06:21 dwmw2 Exp $
*/
#ifndef __MTD_CFI_H__
struct mtd_info *(*cmdset_setup)(struct map_info *);
struct cfi_ident *cfiq; /* For now only one. We insist that all devs
must be of the same type. */
- __u8 mfr, id;
+ int mfr, id;
int numchips;
unsigned long chipshift; /* Because they're of the same type */
const char *im_name; /* inter_module name for cmdset_setup */
static inline void cfi_udelay(int us)
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
- if (current->need_resched)
- schedule();
+ if (current->need_resched) {
+ unsigned long t = us * HZ / 1000000;
+ if (t < 1)
+ t = 1;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(t);
+ }
else
#endif
udelay(us);
/*
- * $Id: cfi_endian.h,v 1.9 2001/04/23 21:19:11 nico Exp $
- *
- * It seems that some helpful people decided to make life easier
- * for software engineers who aren't capable of dealing with the
- * concept of byteswapping, and advise engineers to swap the bytes
- * by wiring the data lines up to flash chips from BE hosts backwards.
- *
- * So we have ugly stuff here to disable the byteswapping where necessary.
- * I'm not going to try to do this dynamically.
- *
- * At first I thought these guys were on crack, but then I discovered the
- * LART.
+ * $Id: cfi_endian.h,v 1.10 2001/06/18 11:00:46 abz Exp $
*
*/
#define CFI_BIG_ENDIAN
#endif
-#ifdef CONFIG_MTD_CFI_LART_BIT_SWAP
-#define CFI_LART_ENDIAN
-#endif
-
#endif
#if defined(CFI_LITTLE_ENDIAN)
#define cpu_to_cfi32(x) (x)
#define cfi16_to_cpu(x) (x)
#define cfi32_to_cpu(x) (x)
-#elif defined (CFI_LART_ENDIAN)
-/*
- Fuck me backwards. The data line mapping on LART is as follows:
-
- U2 CPU | U3 CPU
- 0 20 | 0 12
- 1 22 | 1 14
- 2 19 | 2 11
- 3 17 | 3 9
- 4 24 | 4 0
- 5 26 | 5 2
- 6 31 | 6 7
- 7 29 | 7 5
- 8 21 | 8 13
- 9 23 | 9 15
- 10 18 | 10 10
- 11 16 | 11 8
- 12 25 | 12 1
- 13 27 | 13 3
- 14 30 | 14 6
- 15 28 | 15 4
-
- For historical reference: the reason why the LART has this strange
- mapping is that the designer of the board wanted address lines to
- be as short as possible. Why? Because in that way you don't need
- drivers in the address lines so the memory access time can be held
- short. -- Erik Mouw <J.A.K.Mouw@its.tudelft.nl>
-*/
-/* cpu_to_cfi16() and cfi16_to_cpu() are not needed because the LART
- * only has 32 bit wide Flash memory. -- Erik
- */
-#define cpu_to_cfi16(x) (x)
-#define cfi16_to_cpu(x) (x)
-static inline __u32 cfi32_to_cpu(__u32 x)
-{
- __u32 ret;
-
- ret = (x & 0x08009000) >> 11;
- ret |= (x & 0x00002000) >> 10;
- ret |= (x & 0x04004000) >> 8;
- ret |= (x & 0x00000010) >> 4;
- ret |= (x & 0x91000820) >> 3;
- ret |= (x & 0x22080080) >> 2;
- ret |= (x & 0x40000400);
- ret |= (x & 0x00040040) << 1;
- ret |= (x & 0x00110000) << 4;
- ret |= (x & 0x00220100) << 5;
- ret |= (x & 0x00800208) << 6;
- ret |= (x & 0x00400004) << 9;
- ret |= (x & 0x00000001) << 12;
- ret |= (x & 0x00000002) << 13;
-
- return ret;
-}
-static inline __u32 cpu_to_cfi32(__u32 x)
-{
- __u32 ret;
-
- ret = (x & 0x00010012) << 11;
- ret |= (x & 0x00000008) << 10;
- ret |= (x & 0x00040040) << 8;
- ret |= (x & 0x00000001) << 4;
- ret |= (x & 0x12200104) << 3;
- ret |= (x & 0x08820020) << 2;
- ret |= (x & 0x40000400);
- ret |= (x & 0x00080080) >> 1;
- ret |= (x & 0x01100000) >> 4;
- ret |= (x & 0x04402000) >> 5;
- ret |= (x & 0x20008200) >> 6;
- ret |= (x & 0x80000800) >> 9;
- ret |= (x & 0x00001000) >> 12;
- ret |= (x & 0x00004000) >> 13;
-
- return ret;
-}
#else
#error No CFI endianness defined
#endif
/* Linux driver for Disk-On-Chip 2000 */
/* (c) 1999 Machine Vision Holdings, Inc. */
/* Author: David Woodhouse <dwmw2@mvhi.com> */
-/* $Id: doc2000.h,v 1.13 2001/05/29 12:03:45 dwmw2 Exp $ */
+/* $Id: doc2000.h,v 1.15 2001/09/19 00:22:15 dwmw2 Exp $ */
#ifndef __MTD_DOC2000_H__
#define __MTD_DOC2000_H__
int numchips;
struct Nand *chips;
struct mtd_info *nextdoc;
+ struct semaphore lock;
};
int doc_decode_ecc(unsigned char sector[512], unsigned char ecc1[6]);
/*
+ * $Id: ftl.h,v 1.5 2001/06/02 20:35:51 dwmw2 Exp $
+ *
+ * Derived from (and probably identical to):
* ftl.h 1.7 1999/10/25 20:23:17
*
* The contents of this file are subject to the Mozilla Public License
* are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
*
* Alternatively, the contents of this file may be used under the
- * terms of the GNU General Public License version 2 (the "GPL"), in which
- * case the provisions of the GPL are applicable instead of the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
* above. If you wish to allow the use of your version of this file
* only under the terms of the GPL and not to allow others to use
* your version of this file under the MPL, indicate your decision by
--- /dev/null
+/*
+ * (C) 2001, 2001 Red Hat, Inc.
+ * GPL'd
+ * $Id: gen_probe.h,v 1.1 2001/09/02 18:50:13 dwmw2 Exp $
+ */
+
+#ifndef __LINUX_MTD_GEN_PROBE_H__
+#define __LINUX_MTD_GEN_PROBE_H__
+
+#include <linux/mtd/flashchip.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/cfi.h>
+
+struct chip_probe {
+ char *name;
+ int (*probe_chip)(struct map_info *map, __u32 base,
+ struct flchip *chips, struct cfi_private *cfi);
+
+};
+
+struct mtd_info *mtd_do_chip_probe(struct map_info *map, struct chip_probe *cp);
+
+#endif /* __LINUX_MTD_GEN_PROBE_H__ */
-/* iflash.h $revision$ $date$ (David Hinds) */
+/* $Id: iflash.h,v 1.2 2000/11/13 18:01:54 dwmw2 Exp $ */
#ifndef __MTD_IFLASH_H__
#define __MTD_IFLASH_H__
/* Overhauled routines for dealing with different mmap regions of flash */
-/* $Id: map.h,v 1.24 2001/06/09 19:53:16 dwmw2 Exp $ */
+/* $Id: map.h,v 1.25 2001/09/09 15:04:17 dwmw2 Exp $ */
#ifndef __LINUX_MTD_MAP_H__
#define __LINUX_MTD_MAP_H__
{
struct map_info *map = mtd->priv;
- map->fldrv->destroy(mtd);
+ if (map->fldrv->destroy)
+ map->fldrv->destroy(mtd);
#ifdef CONFIG_MODULES
if (map->fldrv->module)
__MOD_DEC_USE_COUNT(map->fldrv->module);
/*
- * $Id: pmc551.h,v 1.3 2000/10/30 20:03:23 major Exp $
+ * $Id: pmc551.h,v 1.4 2001/06/12 16:19:38 major Exp $
*
* PMC551 PCI Mezzanine Ram Device
*
#include <linux/mtd/mtd.h>
-#define PMC551_VERSION "$Id: pmc551.h,v 1.3 2000/10/30 20:03:23 major Exp $\n"\
+#define PMC551_VERSION "$Id: pmc551.h,v 1.4 2001/06/12 16:19:38 major Exp $\n"\
"Ramix PMC551 PCI Mezzanine Ram Driver. (C) 1999,2000 Nortel Networks.\n"
/*
struct mypriv {
struct pci_dev *dev;
u_char *start;
- u32 mem_map0_base_val;
- u32 curr_mem_map0_val;
- u32 aperture_size;
+ u32 base_map0;
+ u32 curr_map0;
+ u32 asize;
struct mtd_info *nextpmc551;
};
*/
static int pmc551_erase(struct mtd_info *, struct erase_info *);
static void pmc551_unpoint(struct mtd_info *, u_char *);
+static int pmc551_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf);
static int pmc551_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
-static int pmc551_write(struct mtd_info *, loff_t, size_t, size_t *, const u_char
-*);
+static int pmc551_write(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
+
/*
* Define the PCI ID's if the kernel doesn't define them for us
#define PMC551_DRAM_BLK_SET_ROW_MUX(x,v) (((x) & ~0x00000f00) | (((v) & 0xf) << 8))
-#define PMC551_ADDR_HIGH_MASK 0x3ff00000
-#define PMC551_ADDR_LOW_MASK 0x000fffff
-
#endif /* __MTD_PMC551_H__ */
extern void add_to_page_cache(struct page * page, struct address_space *mapping, unsigned long index);
extern void add_to_page_cache_locked(struct page * page, struct address_space *mapping, unsigned long index);
+extern int add_to_page_cache_unique(struct page * page, struct address_space *mapping, unsigned long index, struct page **hash);
extern void ___wait_on_page(struct page *);
/* linux/mm/page_alloc.c */
/* linux/mm/swap_state.c */
+#define SWAP_CACHE_INFO
+#ifdef SWAP_CACHE_INFO
extern void show_swap_cache_info(void);
-extern void add_to_swap_cache(struct page *, swp_entry_t);
+#endif
+extern int add_to_swap_cache(struct page *, swp_entry_t);
extern void __delete_from_swap_cache(struct page *page);
extern void delete_from_swap_cache(struct page *page);
extern void free_page_and_swap_cache(struct page *page);
extern struct page * read_swap_cache_async(swp_entry_t);
/* linux/mm/oom_kill.c */
+extern int out_of_memory(void);
extern void oom_kill(void);
/* linux/mm/swapfile.c */
asmlinkage long sys_swapoff(const char *);
asmlinkage long sys_swapon(const char *, int);
-#define SWAP_CACHE_INFO
-
-#ifdef SWAP_CACHE_INFO
-extern unsigned long swap_cache_add_total;
-extern unsigned long swap_cache_del_total;
-extern unsigned long swap_cache_find_total;
-extern unsigned long swap_cache_find_success;
-#endif
-
extern spinlock_t pagemap_lru_lock;
extern void FASTCALL(mark_page_accessed(struct page *));
/*****************************************************************************
*
* Filename: irda-usb.h
- * Version: 0.9a
+ * Version: 0.9b
* Description: IrDA-USB Driver
* Status: Experimental
* Author: Dag Brattli <dag@brattli.net>
#define IU_MAX_RX_URBS (IU_MAX_ACTIVE_RX_URBS + 1)
/* Various ugly stuff to try to workaround generic problems */
-/* The USB layer should send empty frames at the end of packets multiple
- * of the frame size. As it doesn't do it by default, we need to do it
- * ourselves... See also following option. */
-#undef IU_BUG_KICK_TX
-/* Use the USB_ZERO_PACKET flag instead of sending empty frame (above)
- * Work only with usb-uhci.o so far. Please fix uhic.c and usb-ohci.c */
-#define IU_USE_USB_ZERO_FLAG
/* Send speed command in case of timeout, just for trying to get things sane */
#define IU_BUG_KICK_TIMEOUT
/* Show the USB class descriptor */
#undef IU_DUMP_CLASS_DESC
+/* Assume a minimum round trip latency for USB transfer (in us)...
+ * USB transfer are done in the next USB slot if there is no traffic
+ * (1/19 msec) and is done at 12 Mb/s :
+ * Waiting for slot + tx = (53us + 16us) * 2 = 137us minimum.
+ * Rx notification will only be done at the end of the USB frame period :
+ * OHCI : frame period = 1ms
+ * UHCI : frame period = 1ms, but notification can take 2 or 3 ms :-(
+ * EHCI : frame period = 125us */
+#define IU_USB_MIN_RTT 500 /* This should be safe in most cases */
/* Inbound header */
#define MEDIA_BUSY 0x80
struct urb *idle_rx_urb; /* Pointer to idle URB in Rx path */
struct urb tx_urb; /* URB used to send data frames */
struct urb speed_urb; /* URB used to send speed commands */
-#ifdef IU_BUG_KICK_TX
- struct urb empty_urb; /* URB used to send empty commands */
-#endif IU_BUG_KICK_TX
struct net_device *netdev; /* Yes! we are some kind of netdev. */
struct net_device_stats stats;
void irlmp_discovery_confirm(hashbin_t *discovery_log);
void irlmp_discovery_request(int nslots);
-struct irda_device_info *irlmp_get_discoveries(int *pn, __u16 mask);
+struct irda_device_info *irlmp_get_discoveries(int *pn, __u16 mask, int nslots);
void irlmp_do_expiry(void);
void irlmp_do_discovery(int nslots);
discovery_t *irlmp_get_discovery_response(void);
*
* vlsi_ir.h: VLSI82C147 PCI IrDA controller driver for Linux
*
- * Version: 0.1, Aug 6, 2001
+ * Version: 0.3, Sep 30, 2001
*
* Copyright (c) 2001 Martin Diehl
*
*
* On my HP OB-800 the BIOS sets external 40MHz clock as source
* when IrDA enabled and I've never detected any PLL lock success.
- * Apparently the 14.31818MHz OSC input required for the PLL to work
+ * Apparently the 14.3...MHz OSC input required for the PLL to work
* is not connected and the 40MHz EXTCLK is provided externally.
* At least this is what makes the driver working for me.
*/
/* PLL control */
CLKCTL_NO_PD = 0x04, /* PD# (inverted power down) signal,
- * i.e. PLL is powered, if PD_INV is set */
+ * i.e. PLL is powered, if NO_PD set */
CLKCTL_LOCK = 0x40, /* (ro) set, if PLL is locked */
/* clock source selection */
CLKCTL_CLKSTP = 0x80, /* set to disconnect from selected clock source */
CLKCTL_WAKE = 0x08 /* set to enable wakeup feature: whenever IR activity
- * is detected, PD_INV gets set and CLKSTP cleared */
+ * is detected, NO_PD gets set and CLKSTP cleared */
};
/* ------------------------------------------ */
* restriction to the first 16MB of physical address range.
* Hence the approach here is to enable PCI busmaster support using
* the correct 32bit dma-mask used by the chip. Afterwards the device's
- * dma-mask gets restricted to 24bit, which must be honoured by all
- * allocations for memory areas to be exposed to the chip.
+ * dma-mask gets restricted to 24bit, which must be honoured somehow by
+ * all allocations for memory areas to be exposed to the chip ...
*
* Note:
* Don't be surprised to get "Setting latency timer..." messages every
/* VLSI_PCIIRMISC: IR Miscellaneous Register (u8, rw) */
-/* leagcy UART emulation - not used by this driver - would require:
+/* legacy UART emulation - not used by this driver - would require:
* (see below for some register-value definitions)
*
* - IRMISC_UARTEN must be set to enable UART address decoding
IRMISC_IRRAIL = 0x40, /* (ro?) IR rail power indication (and control?)
* 0=3.3V / 1=5V. Probably set during power-on?
- * Not touched by driver */
+ * unclear - not touched by driver */
IRMISC_IRPD = 0x08, /* transceiver power down, if set */
/* legacy UART control */
VLSI_PIO_RINGBASE = 0x04, /* [23:10] of ring address (u16, rw) */
VLSI_PIO_RINGSIZE = 0x06, /* rx/tx ring size (u16, rw) */
VLSI_PIO_PROMPT = 0x08, /* triggers ring processing (u16, wo) */
- /* 0x0a-0x0f: reserved, duplicated UART regs */
+ /* 0x0a-0x0f: reserved / duplicated UART regs */
VLSI_PIO_IRCFG = 0x10, /* configuration select (u16, rw) */
VLSI_PIO_SIRFLAG = 0x12, /* BOF/EOF for filtered SIR (u16, ro) */
VLSI_PIO_IRENABLE = 0x14, /* enable and status register (u16, rw/ro) */
VLSI_PIO_NPHYCTL = 0x18, /* next physical layer select (u16, rw) */
VLSI_PIO_MAXPKT = 0x1a, /* [11:0] max len for packet receive (u16, rw) */
VLSI_PIO_RCVBCNT = 0x1c /* current receive-FIFO byte count (u16, ro) */
- /* 0x1e-0x1f: reserved, duplicated UART regs */
+ /* 0x1e-0x1f: reserved / duplicated UART regs */
};
/* ------------------------------------------ */
* interrupt condition bits:
* set according to corresponding interrupt source
* (regardless of the state of the enable bits)
- * enable bit status indicated whether interrupt gets raised
+ * enable bit status indicates whether interrupt gets raised
* write-to-clear
* note: RPKTINT and TPKTINT behave different in legacy UART mode (which we don't use :-)
*/
/* VLSI_PIO_RINGPTR: Ring Pointer Read-Back Register (u16, ro) */
-#define MAX_RING_DESCR 64 /* tx, rx rings may contain up to 64 descr each */
-
/* _both_ ring pointers are indices relative to the _entire_ rx,tx-ring!
* i.e. the referenced descriptor is located
* at RINGBASE + PTR * sizeof(descr) for rx and tx
- * therefore, the tx-pointer has offset by MAX_RING_DESCR
+ * therefore, the tx-pointer has offset MAX_RING_DESCR
*/
+#define MAX_RING_DESCR 64 /* tx, rx rings may contain up to 64 descr each */
+
#define RINGPTR_RX_MASK (MAX_RING_DESCR-1)
-#define RINGPTR_TX_MASK ((MAX_RING_DESCR|(MAX_RING_DESCR-1))<<8)
+#define RINGPTR_TX_MASK ((MAX_RING_DESCR-1)<<8)
#define RINGPTR_GET_RX(p) ((p)&RINGPTR_RX_MASK)
#define RINGPTR_GET_TX(p) (((p)&RINGPTR_TX_MASK)>>8)
/* Contains [23:10] part of the ring base (bus-) address
* which must be 1k-alinged. [31:24] is taken from
* VLSI_PCI_MSTRPAGE above.
- * The controler initiates non-burst PCI BM cycles to
+ * The controller initiates non-burst PCI BM cycles to
* fetch and update the descriptors in the ring.
* Once fetched, the descriptor remains cached onchip
* until it gets closed and updated due to the ring
* processing state machine.
* The entire ring area is split in rx and tx areas with each
* area consisting of 64 descriptors of 8 bytes each.
- * The rx(tx) ring is located at ringbase+0 (ringbase+8*64).
+ * The rx(tx) ring is located at ringbase+0 (ringbase+64*8).
*/
#define BUS_TO_RINGBASE(p) (((p)>>10)&0x3fff)
/* VLSI_PIO_PROMPT: Ring Prompting Register (u16, write-to-start) */
/* writing any value kicks the ring processing state machines
- * for both tx, rx rings.
- * currently enabled rings (according to IRENABLE_ENTXST, IRENABLE_ENRXST
- * status reporting - see below) are considered as follows:
+ * for both tx, rx rings as follows:
* - active rings (currently owning an active descriptor)
* ignore the prompt and continue
* - idle rings fetch the next descr from the ring and start
/* notes:
* - not more than one SIR/MIR/FIR bit must be set at any time
* - SIR, MIR, FIR and CRC16 select the configuration which will
- * be applied now/next time if/when IRENABLE_IREN is _cleared_ (see below)
+ * be applied on next 0->1 transition of IRENABLE_IREN (see below).
* - besides allowing the PCI interface to execute busmaster cycles
* and therefore the ring SM to operate, the MSTR bit has side-effects:
* when MSTR is cleared, the RINGPTR's get reset and the legacy UART mode
*/
enum vlsi_pio_irenable {
- IRENABLE_IREN = 0x8000, /* enable IR phy and gate mode config (rw) */
+ IRENABLE_IREN = 0x8000, /* enable IR phy and gate the mode config (rw) */
IRENABLE_CFGER = 0x4000, /* mode configuration error (ro) */
IRENABLE_FIR_ON = 0x2000, /* FIR on status (ro) */
IRENABLE_MIR_ON = 0x1000, /* MIR on status (ro) */
* specification, which provides 1.5 usec pulse width for all speeds (except
* for 2.4kbaud getting 6usec). This is well inside IrPHY v1.3 specs and
* reduces the transceiver power which drains the battery. At 9.6kbaud for
- * example this makes more than 90% battery power saving!
+ * example this amounts to more than 90% battery power saving!
*
* MIR-mode: BAUD = 0
* PLSWID = 9(10) for 40(48) MHz input clock
*/
#define BWP_TO_PHYCTL(B,W,P) ((((B)&0x3f)<<10) | (((W)&0x1f)<<5) | (((P)&0x1f)<<0))
-#define BAUD_BITS(br) ((115200/br)-1)
+#define BAUD_BITS(br) ((115200/(br))-1)
static inline unsigned
calc_width_bits(unsigned baudrate, unsigned widthselect, unsigned clockselect)
/* VLSI_PIO_MAXPKT: Maximum Packet Length register (u16, rw) */
-/* specifies the maximum legth (up to 4096 bytes), which a
+/* specifies the maximum legth (up to 4k - or (4k-1)? - bytes), which a
* received frame may have - i.e. the size of the corresponding
* receive buffers. For simplicity we use the same length for
- * receive and submit buffers. Therefore we use 3k to have
- * enough space for a lot of XBOF's and escapes we may need at
- * some point when wrapping MTU=2048 sized packets for transmission.
+ * receive and submit buffers and increase transfer buffer size
+ * byond IrDA-MTU = 2048 so we have sufficient space left when
+ * packet size increases during wrapping due to XBOFs and CE's.
+ * Even for receiving unwrapped frames we need >MAX_PACKET_LEN
+ * space since the controller appends FCS/CRC (2 or 4 bytes)
+ * so we use 2*IrDA-MTU for both directions and cover even the
+ * worst case, where all data bytes have to be escaped when wrapping.
+ * well, this wastes some memory - anyway, later we will
+ * either map skb's directly or use pci_pool allocator...
*/
+
+#define IRDA_MTU 2048 /* seems to be undefined elsewhere */
+
+#define XFER_BUF_SIZE (2*IRDA_MTU)
-#define MAX_PACKET_LENGTH 3172
+#define MAX_PACKET_LENGTH (XFER_BUF_SIZE-1) /* register uses only [11:0] */
/* ------------------------------------------ */
/* recive packet counter gets incremented on every non-filtered
* byte which was put in the receive fifo and reset for each
* new packet. Used to decide whether we are just in the middle
- * of receiving receiving
+ * of receiving
*/
#define RCVBCNT_MASK 0x0fff
struct ring_entry {
struct sk_buff *skb;
- void *head;
+ void *data;
+};
+
+
+struct vlsi_ring {
+ unsigned size;
+ unsigned mask;
+ unsigned head, tail;
+ struct ring_descr *hw;
+ struct ring_entry buf[MAX_RING_DESCR];
};
/* ------------------------------------------ */
-/* our compound VLSI-PCI-IRDA device information */
+/* our private compound VLSI-PCI-IRDA device information */
typedef struct vlsi_irda_dev {
struct pci_dev *pdev;
int baud, new_baud;
dma_addr_t busaddr;
+ void *virtaddr;
+ struct vlsi_ring tx_ring, rx_ring;
- struct ring_descr *ring_hw;
-
- struct ring_entry *ring_buf;
-
- unsigned tx_mask, rx_mask;
-
- unsigned tx_put, tx_get, rx_put, rx_get;
+ struct timeval last_rx;
spinlock_t lock;
{ "cciss/c0d14p",0x68E0 },
{ "cciss/c0d15p",0x68F0 },
#endif
-#ifdef CONFIG_NFTL
{ "nftla", 0x5d00 },
-#endif
+ { "nftlb", 0x5d10 },
+ { "nftlc", 0x5d20 },
+ { "nftld", 0x5d30 },
+ { "ftla", 0x2c00 },
+ { "ftlb", 0x2c08 },
+ { "ftlc", 0x2c10 },
+ { "ftld", 0x2c18 },
+ { "mtdblock", 0x1f00 },
{ NULL, 0 }
};
extern void setup_arch(char **);
extern void cpu_idle(void);
+volatile unsigned long wait_init_idle = 0UL;
+
#ifndef CONFIG_SMP
#ifdef CONFIG_X86_LOCAL_APIC
#else
+
/* Called by boot processor to activate the rest. */
static void __init smp_init(void)
{
/* Get other processors into their bootup holding patterns. */
smp_boot_cpus();
+ wait_init_idle = cpu_online_map;
+ clear_bit(current->processor, &wait_init_idle); /* Don't wait on me! */
+ printk("Waiting on wait_init_idle (map = 0x%lx)\n", wait_init_idle);
smp_threads_ready=1;
smp_commence();
+
+ /* Wait for the other cpus to set up their idle processes */
+ while (1) {
+ if (!wait_init_idle)
+ break;
+ rep_nop();
+ }
+ printk("All processors have done init_idle\n");
}
#endif
/*
* Activate the first processor.
*/
-
+
asmlinkage void __init start_kernel(void)
{
char * command_line;
asmlinkage long
sys_personality(u_long personality)
{
- if (personality == 0xffffffff)
- goto ret;
- set_personality(personality);
- if (current->personality != personality)
- return -EINVAL;
-ret:
- return (current->personality);
+ u_long old = current->personality;;
+
+ if (personality != 0xffffffff) {
+ set_personality(personality);
+ if (current->personality != personality)
+ return -EINVAL;
+ }
+
+ return (long)old;
}
atomic_inc(¤t->files->count);
}
+extern volatile unsigned long wait_init_idle;
+
void __init init_idle(void)
{
struct schedule_data * sched_data;
}
sched_data->curr = current;
sched_data->last_schedule = get_cycles();
+ clear_bit(current->processor, &wait_init_idle);
}
extern void init_timervecs (void);
{
unsigned long flags;
- if (PageLocked(page))
- BUG();
-
flags = page->flags & ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_dirty | 1 << PG_referenced | 1 << PG_arch_1 | 1 << PG_checked);
page->flags = flags | (1 << PG_locked);
page_cache_get(page);
spin_unlock(&pagecache_lock);
}
-static int add_to_page_cache_unique(struct page * page,
+int add_to_page_cache_unique(struct page * page,
struct address_space *mapping, unsigned long offset,
struct page **hash)
{
schedule();
return;
}
+
+static inline int node_zones_low(pg_data_t *pgdat)
+{
+ zone_t * zone;
+ int i;
+
+ for (i = pgdat->nr_zones-1; i >= 0; i--) {
+ zone = pgdat->node_zones + i;
+
+ if (zone->free_pages > (zone->pages_low))
+ return 0;
+
+ }
+ return 1;
+}
+
+static int all_zones_low(void)
+{
+ pg_data_t * pgdat = pgdat_list;
+
+ pgdat = pgdat_list;
+ do {
+ if (node_zones_low(pgdat))
+ continue;
+ return 0;
+ } while ((pgdat = pgdat->node_next));
+
+ return 1;
+}
+
+/**
+ * out_of_memory - is the system out of memory?
+ *
+ * Returns 0 if there is still enough memory left,
+ * 1 when we are out of memory (otherwise).
+ */
+int out_of_memory(void)
+{
+ long cache_mem, limit;
+
+ /* Enough free memory? Not OOM. */
+ if (!all_zones_low())
+ return 0;
+
+ /* Enough swap space left? Not OOM. */
+ if (nr_swap_pages > 0)
+ return 0;
+
+ /*
+ * If the buffer and page cache (excluding swap cache) are over
+ * their (/proc tunable) minimum, we're still not OOM. We test
+ * this to make sure we don't return OOM when the system simply
+ * has a hard time with the cache.
+ */
+ cache_mem = atomic_read(&page_cache_size);
+ cache_mem -= swapper_space.nrpages;
+ limit = 2;
+ limit *= num_physpages / 100;
+
+ if (cache_mem > limit)
+ return 0;
+
+ /* Else... */
+ return 1;
+}
if (!z)
break;
- if (zone_free_pages(z, order) > z->pages_high) {
+ if (zone_free_pages(z, order) > z->pages_min) {
page = rmqueue(z, order);
if (page)
return page;
}
}
+
+ goto rebalance;
}
printk(KERN_NOTICE "__alloc_pages: %u-order allocation failed (gfp=0x%x/%i) from %p\n",
{
unsigned int order;
unsigned type;
+ pg_data_t *tmpdat = pgdat;
+
+ printk("Free pages: %6dkB (%6dkB HighMem)\n",
+ nr_free_pages() << (PAGE_SHIFT-10),
+ nr_free_highpages() << (PAGE_SHIFT-10));
+
+ while (tmpdat) {
+ zone_t *zone;
+ for (zone = tmpdat->node_zones;
+ zone < tmpdat->node_zones + MAX_NR_ZONES; zone++)
+ printk("Zone:%s freepages:%6lukB min:%6luKB low:%6lukB "
+ "high:%6lukB\n",
+ zone->name,
+ (zone->free_pages)
+ << ((PAGE_SHIFT-10)),
+ zone->pages_min
+ << ((PAGE_SHIFT-10)),
+ zone->pages_low
+ << ((PAGE_SHIFT-10)),
+ zone->pages_high
+ << ((PAGE_SHIFT-10)));
+
+ tmpdat = tmpdat->node_next;
+ }
printk("Free pages: %6dkB (%6dkB HighMem)\n",
nr_free_pages() << (PAGE_SHIFT-10),
index = page->index;
inode = mapping->host;
info = &inode->u.shmem_i;
+getswap:
+ swap = get_swap_page();
+ if (!swap.val) {
+ activate_page(page);
+ SetPageDirty(page);
+ error = -ENOMEM;
+ goto out;
+ }
spin_lock(&info->lock);
entry = shmem_swp_entry(info, index);
/* Remove it from the page cache */
lru_cache_del(page);
remove_inode_page(page);
+ page_cache_release(page);
- swap_list_lock();
- swap = get_swap_page();
-
- if (!swap.val) {
- swap_list_unlock();
- /* Add it back to the page cache */
+ /* Add it to the swap cache */
+ if (add_to_swap_cache(page, swap) != 0) {
+ /*
+ * Raced with "speculative" read_swap_cache_async.
+ * Add page back to page cache, unref swap, try again.
+ */
add_to_page_cache_locked(page, mapping, index);
- activate_page(page);
- SetPageDirty(page);
- error = -ENOMEM;
- goto out;
+ spin_unlock(&info->lock);
+ swap_free(swap);
+ goto getswap;
}
- /* Add it to the swap cache */
- add_to_swap_cache(page, swap);
- swap_list_unlock();
-
- set_page_dirty(page);
- info->swapped++;
*entry = swap;
+ info->swapped++;
+ spin_unlock(&info->lock);
+ set_page_dirty(page);
error = 0;
out:
- spin_unlock(&info->lock);
UnlockPage(page);
- page_cache_release(page);
return error;
}
};
#ifdef SWAP_CACHE_INFO
-unsigned long swap_cache_add_total;
-unsigned long swap_cache_del_total;
-unsigned long swap_cache_find_total;
-unsigned long swap_cache_find_success;
+#define INC_CACHE_INFO(x) (swap_cache_info.x++)
+
+static struct {
+ unsigned long add_total;
+ unsigned long del_total;
+ unsigned long find_success;
+ unsigned long find_total;
+ unsigned long noent_race;
+ unsigned long exist_race;
+} swap_cache_info;
void show_swap_cache_info(void)
{
- printk("Swap cache: add %ld, delete %ld, find %ld/%ld\n",
- swap_cache_add_total,
- swap_cache_del_total,
- swap_cache_find_success, swap_cache_find_total);
+ printk("Swap cache: add %lu, delete %lu, find %lu/%lu, race %lu+%lu\n",
+ swap_cache_info.add_total, swap_cache_info.del_total,
+ swap_cache_info.find_success, swap_cache_info.find_total,
+ swap_cache_info.noent_race, swap_cache_info.exist_race);
}
+#else
+#define INC_CACHE_INFO(x) do { } while (0)
#endif
-void add_to_swap_cache(struct page *page, swp_entry_t entry)
+int add_to_swap_cache(struct page *page, swp_entry_t entry)
{
- unsigned long flags;
-
-#ifdef SWAP_CACHE_INFO
- swap_cache_add_total++;
-#endif
+ if (page->mapping)
+ BUG();
+ if (!swap_duplicate(entry)) {
+ INC_CACHE_INFO(noent_race);
+ return -ENOENT;
+ }
+ if (add_to_page_cache_unique(page, &swapper_space, entry.val,
+ page_hash(&swapper_space, entry.val)) != 0) {
+ swap_free(entry);
+ INC_CACHE_INFO(exist_race);
+ return -EEXIST;
+ }
+ page->flags |= (1 << PG_uptodate);
if (!PageLocked(page))
BUG();
- if (page->mapping)
+ if (!PageSwapCache(page))
BUG();
-
- /* clear PG_dirty so a subsequent set_page_dirty takes effect */
- flags = page->flags & ~(1 << PG_error | 1 << PG_dirty | 1 << PG_arch_1 | 1 << PG_referenced);
- page->flags = flags | (1 << PG_uptodate);
- add_to_page_cache_locked(page, &swapper_space, entry.val);
+ INC_CACHE_INFO(add_total);
+ return 0;
}
/*
*/
void __delete_from_swap_cache(struct page *page)
{
-#ifdef SWAP_CACHE_INFO
- swap_cache_del_total++;
-#endif
if (!PageLocked(page))
BUG();
if (!PageSwapCache(page))
BUG();
-
ClearPageDirty(page);
__remove_inode_page(page);
+ INC_CACHE_INFO(del_total);
}
/*
{
struct page *found;
-#ifdef SWAP_CACHE_INFO
- swap_cache_find_total++;
-#endif
found = find_get_page(&swapper_space, entry.val);
/*
* Unsafe to assert PageSwapCache and mapping on page found:
* the swap cache at this moment. find_lock_page would prevent
* that, but no need to change: we _have_ got the right page.
*/
-#ifdef SWAP_CACHE_INFO
+ INC_CACHE_INFO(find_total);
if (found)
- swap_cache_find_success++;
-#endif
+ INC_CACHE_INFO(find_success);
return found;
}
*/
struct page * read_swap_cache_async(swp_entry_t entry)
{
- struct page *found_page, *new_page;
- struct page **hash;
-
- /*
- * Look for the page in the swap cache. Since we normally call
- * this only after lookup_swap_cache() failed, re-calling that
- * would confuse the statistics: use __find_get_page() directly.
- */
- hash = page_hash(&swapper_space, entry.val);
- found_page = __find_get_page(&swapper_space, entry.val, hash);
- if (found_page)
- goto out;
-
- new_page = alloc_page(GFP_HIGHUSER);
- if (!new_page)
- goto out; /* Out of memory */
- if (TryLockPage(new_page))
- BUG();
-
- /*
- * Check the swap cache again, in case we stalled above.
- * swap_list_lock is guarding against races between this check
- * and where the new page is added to the swap cache below.
- * It is also guarding against race where try_to_swap_out
- * allocates entry with get_swap_page then adds to cache.
- */
- swap_list_lock();
- found_page = __find_get_page(&swapper_space, entry.val, hash);
- if (found_page)
- goto out_free_page;
-
- /*
- * Make sure the swap entry is still in use. It could have gone
- * since caller dropped page_table_lock, while allocating page above,
- * or while allocating page in prior call via swapin_readahead.
- */
- if (!swap_duplicate(entry)) /* Account for the swap cache */
- goto out_free_page;
-
- /*
- * Add it to the swap cache and read its contents.
- */
- add_to_swap_cache(new_page, entry);
- swap_list_unlock();
-
- rw_swap_page(READ, new_page);
- return new_page;
-
-out_free_page:
- swap_list_unlock();
- UnlockPage(new_page);
- page_cache_release(new_page);
-out:
+ struct page *found_page, *new_page = NULL;
+ int err;
+
+ do {
+ /*
+ * First check the swap cache. Since this is normally
+ * called after lookup_swap_cache() failed, re-calling
+ * that would confuse statistics: use find_get_page()
+ * directly.
+ */
+ found_page = find_get_page(&swapper_space, entry.val);
+ if (found_page)
+ break;
+
+ /*
+ * Get a new page to read into from swap.
+ */
+ if (!new_page) {
+ new_page = alloc_page(GFP_HIGHUSER);
+ if (!new_page)
+ break; /* Out of memory */
+ }
+
+ /*
+ * Associate the page with swap entry in the swap cache.
+ * May fail (-ENOENT) if swap entry has been freed since
+ * our caller observed it. May fail (-EEXIST) if there
+ * is already a page associated with this entry in the
+ * swap cache: added by a racing read_swap_cache_async,
+ * or by try_to_swap_out (or shmem_writepage) re-using
+ * the just freed swap entry for an existing page.
+ */
+ err = add_to_swap_cache(new_page, entry);
+ if (!err) {
+ /*
+ * Initiate read into locked page and return.
+ */
+ rw_swap_page(READ, new_page);
+ return new_page;
+ }
+ } while (err != -ENOENT);
+
+ if (new_page)
+ page_cache_release(new_page);
return found_page;
}
si->lowest_bit = si->max;
si->highest_bit = 0;
}
- /* Initial count 1 for user reference + 1 for swap cache */
- si->swap_map[offset] = 2;
+ si->swap_map[offset] = 1;
nr_swap_pages--;
si->cluster_next = offset+1;
return offset;
return 0;
}
-/*
- * Callers of get_swap_page must hold swap_list_lock across the call,
- * and across the following add_to_swap_cache, to guard against races
- * with read_swap_cache_async.
- */
swp_entry_t get_swap_page(void)
{
struct swap_info_struct * p;
int type, wrapped = 0;
entry.val = 0; /* Out of memory */
+ swap_list_lock();
type = swap_list.next;
if (type < 0)
goto out;
goto out; /* out of swap space */
}
out:
+ swap_list_unlock();
return entry;
}
* we have the swap cache set up to associate the
* page with that swap entry.
*/
- swap_list_lock();
- entry = get_swap_page();
- if (entry.val) {
+ for (;;) {
+ entry = get_swap_page();
+ if (!entry.val)
+ break;
/* Add it to the swap cache and mark it dirty */
- add_to_swap_cache(page, entry);
- swap_list_unlock();
- set_page_dirty(page);
- goto set_swap_pte;
+ if (add_to_swap_cache(page, entry) == 0) {
+ set_page_dirty(page);
+ goto set_swap_pte;
+ }
+ /* Raced with "speculative" read_swap_cache_async */
+ swap_free(entry);
}
/* No swap space left */
spin_lock(&pagemap_lru_lock);
while (max_scan && (entry = inactive_list.prev) != &inactive_list) {
struct page * page;
- swp_entry_t swap;
if (unlikely(current->need_resched)) {
spin_unlock(&pagemap_lru_lock);
/* point of no return */
if (likely(!PageSwapCache(page))) {
- swap.val = 0;
__remove_inode_page(page);
+ spin_unlock(&pagecache_lock);
} else {
+ swp_entry_t swap;
swap.val = page->index;
__delete_from_swap_cache(page);
- }
- spin_unlock(&pagecache_lock);
-
- __lru_cache_del(page);
-
- if (unlikely(swap.val != 0)) {
- /* must drop lru lock if getting swap_list lock */
- spin_unlock(&pagemap_lru_lock);
+ spin_unlock(&pagecache_lock);
swap_free(swap);
- spin_lock(&pagemap_lru_lock);
}
+ __lru_cache_del(page);
UnlockPage(page);
/* effectively free the page here */
int try_to_free_pages(zone_t * classzone, unsigned int gfp_mask, unsigned int order)
{
int ret = 0;
+ int nr_pages = SWAP_CLUSTER_MAX;
- for (;;) {
- int priority = DEF_PRIORITY;
- int nr_pages = SWAP_CLUSTER_MAX;
-
- do {
- nr_pages = shrink_caches(priority, classzone, gfp_mask, nr_pages);
- if (nr_pages <= 0)
- return 1;
+ nr_pages = shrink_caches(DEF_PRIORITY, classzone, gfp_mask, nr_pages);
- ret |= swap_out(priority, classzone, gfp_mask, SWAP_CLUSTER_MAX << 2);
- } while (--priority);
+ if (nr_pages < SWAP_CLUSTER_MAX)
+ ret |= 1;
- if (likely(ret))
- break;
- if (likely(current->pid != 1))
- break;
- current->policy |= SCHED_YIELD;
- __set_current_state(TASK_RUNNING);
- schedule();
- }
+ ret |= swap_out(DEF_PRIORITY, classzone, gfp_mask, SWAP_CLUSTER_MAX << 2);
return ret;
}
if (!try_to_free_pages(zone, GFP_KSWAPD, 0)) {
zone->need_balance = 0;
__set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(HZ*5);
+ schedule_timeout(HZ);
continue;
}
if (check_classzone_need_balance(zone))
do
need_more_balance |= kswapd_balance_pgdat(pgdat);
while ((pgdat = pgdat->node_next));
+ if (need_more_balance && out_of_memory()) {
+ oom_kill();
+ }
} while (need_more_balance);
}
* Note : we have to use irlmp_get_discoveries(), as opposed
* to play with the cachelog directly, because while we are
* making our ias query, le log might change... */
- discoveries = irlmp_get_discoveries(&number, self->mask);
+ discoveries = irlmp_get_discoveries(&number, self->mask, self->nslots);
/* Check if the we got some results */
if (discoveries == NULL)
return -ENETUNREACH; /* No nodes discovered */
switch (optname) {
case IRLMP_ENUMDEVICES:
/* Ask lmp for the current discovery log */
- discoveries = irlmp_get_discoveries(&list.len, self->mask);
+ discoveries = irlmp_get_discoveries(&list.len, self->mask,
+ self->nslots);
/* Check if the we got some results */
if (discoveries == NULL)
return -EAGAIN; /* Didn't find any devices */
struct ias_value *irias_new_string_value(char *string)
{
struct ias_value *value;
- int len;
- char *new_str;
value = kmalloc(sizeof(struct ias_value), GFP_ATOMIC);
if (value == NULL) {
* We are allowed to send two frames, but this may increase
* the connect latency, so lets not do it for now.
*/
- /* What the hell is this ? - Jean II */
+ /* This is full of good intentions, but doesn't work in
+ * practice.
+ * After sending the first UA response, we switch the
+ * dongle to the negociated speed, which is usually
+ * different than 9600 kb/s.
+ * From there, there is two solutions :
+ * 1) The other end has received the first UA response :
+ * it will set up the connection, move to state LAP_NRM_P,
+ * and will ignore and drop the second UA response.
+ * Actually, it's even worse : the other side will almost
+ * immediately send a RR that will likely collide with the
+ * UA response (depending on negociated turnaround).
+ * 2) The other end has not received the first UA response,
+ * will stay at 9600 and will never see the second UA response.
+ * Jean II */
irlap_send_ua_response_frame(self, &self->qos_rx);
#endif
*/
void irlmp_discovery_request(int nslots)
{
- /* Check if user wants to override the default */
- if (nslots == DISCOVERY_DEFAULT_SLOTS)
- nslots = sysctl_discovery_slots;
-
/* Return current cached discovery log */
irlmp_discovery_confirm(irlmp->cachelog);
* Start a single discovery operation if discovery is not already
* running
*/
- if (!sysctl_discovery)
+ if (!sysctl_discovery) {
+ /* Check if user wants to override the default */
+ if (nslots == DISCOVERY_DEFAULT_SLOTS)
+ nslots = sysctl_discovery_slots;
+
irlmp_do_discovery(nslots);
- /* Note : we never do expiry here. Expiry will run on the
- * discovery timer regardless of the state of sysctl_discovery
- * Jean II */
+ /* Note : we never do expiry here. Expiry will run on the
+ * discovery timer regardless of the state of sysctl_discovery
+ * Jean II */
+ }
}
/*
- * Function irlmp_get_discoveries (pn, mask)
+ * Function irlmp_get_discoveries (pn, mask, slots)
*
* Return the current discovery log
*
*/
-struct irda_device_info *irlmp_get_discoveries(int *pn, __u16 mask)
+struct irda_device_info *irlmp_get_discoveries(int *pn, __u16 mask, int nslots)
{
+ /* If discovery is not enabled, it's likely that the discovery log
+ * will be empty. So, we trigger a single discovery, so that next
+ * time the user call us there might be some results in the log.
+ * Jean II
+ */
+ if (!sysctl_discovery) {
+ /* Check if user wants to override the default */
+ if (nslots == DISCOVERY_DEFAULT_SLOTS)
+ nslots = sysctl_discovery_slots;
+
+ /* Start discovery - will complete sometime later */
+ irlmp_do_discovery(nslots);
+ /* Note : we never do expiry here. Expiry will run on the
+ * discovery timer regardless of the state of sysctl_discovery
+ * Jean II */
+ }
+
/* Return current cached discovery log */
return(irlmp_copy_discoveries(irlmp->cachelog, pn, mask));
}
DENTER(IRDA_SR_TRACE, "(self=0x%X)\n", (unsigned int) self);
/* Ask lmp for the current discovery log */
- self->discoveries = irlmp_get_discoveries(&self->disco_number, self->mask);
+ self->discoveries = irlmp_get_discoveries(&self->disco_number, self->mask,
+ DISCOVERY_DEFAULT_SLOTS);
/* Check if the we got some results */
if(self->discoveries == NULL)
DENTER(IRDA_SR_TRACE, "(self=0x%X)\n", (unsigned int) self);
/* Ask lmp for the current discovery log */
- discoveries = irlmp_get_discoveries(&number, 0xffff);
+ discoveries = irlmp_get_discoveries(&number, 0xffff,
+ DISCOVERY_DEFAULT_SLOTS);
/* Check if the we got some results */
if(discoveries == NULL)
DRETURN(-ENETUNREACH, IRDA_SR_INFO, "Cachelog empty...\n");
DENTER(IRDA_SERV_TRACE, "(self=0x%X)\n", (unsigned int) self);
/* Ask lmp for the current discovery log */
- discoveries = irlmp_get_discoveries(&number, 0xffff);
+ discoveries = irlmp_get_discoveries(&number, 0xffff,
+ DISCOVERY_DEFAULT_SLOTS);
/* Check if the we got some results */
if (discoveries == NULL)
DRETURN(-ENETUNREACH, IRDA_SERV_INFO, "Cachelog empty...\n");
*/
#include "irnet_ppp.h" /* Private header */
-#include <linux/module.h>
+/* Please put other headers in irnet.h - Thanks */
/************************* CONTROL CHANNEL *************************/
/*
__u16 mask = irlmp_service_to_hint(S_LAN);
/* Ask IrLMP for the current discovery log */
- ap->discoveries = irlmp_get_discoveries(&ap->disco_number, mask);
+ ap->discoveries = irlmp_get_discoveries(&ap->disco_number, mask,
+ DISCOVERY_DEFAULT_SLOTS);
/* Check if the we got some results */
if(ap->discoveries == NULL)
ap->disco_number = -1;