USB Handspring Visor Driver
CONFIG_USB_SERIAL_VISOR
- Say Y here if you want to connect to your HandSpring Visor through
- its USB docking station. See http://usbvisor.sourceforge.net for
- more information on using this driver.
+ Say Y here if you want to connect to your HandSpring Visor, Palm m500
+ or m505 through its USB docking station.
+ See http://usbvisor.sourceforge.net for more information on using this
+ driver.
This code is also available as a module ( = code which can be
inserted in and removed from the running kernel whenever you want).
Microtek USB scanner support
CONFIG_USB_MICROTEK
- Say Y here if you want support for the Microtek X6USB and possibly
- some other scanners by that vendor. The scanner will appear as a
- scsi generic device to the rest of the system.
- A patched version of SANE is necessary to use the
- scanner. It's available at
- http://fachschaft.cup.uni-muenchen.de/~neukum/scanner.html
+ Say Y here if you want support for the Microtek X6USB and
+ possibly the Phantom 336CX, Phantom C6 and ScanMaker V6U(S)L.
+ Support for anything but the X6 is experimetal.
+ Please report failures and successes.
+ The scanner will appear as a scsi generic device to the rest
+ of the system. Scsi support is required for this driver to compile
+ and work. SANE 1.0.4 or newer is needed to make use of your scanner.
This driver can be compiled as a module.
USB Bluetooth support
VERSION = 2
PATCHLEVEL = 4
-SUBLEVEL = 5
-EXTRAVERSION =
+SUBLEVEL = 6
+EXTRAVERSION =-pre1
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
#include <asm/ptrace.h>
#include <asm/system.h>
#include <asm/core_tsunami.h>
#undef __EXTERN_INLINE
+#include <linux/bootmem.h>
+
#include "proto.h"
#include "pci_impl.h"
#include <linux/pci.h>
#include <linux/init.h>
-#define __EXTERN_INLINE inline
-#include <asm/io.h>
-#include <asm/core_tsunami.h>
-#undef __EXTERN_INLINE
-
#include <asm/ptrace.h>
#include <asm/system.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/bitops.h>
#include <asm/mmu_context.h>
+#include <asm/io.h>
#include <asm/pgtable.h>
+#include <asm/core_tsunami.h>
#include <asm/hwrpb.h>
#include "proto.h"
irq -= 16;
hose = irq / 24;
irq -= hose * 24;
+ mask = 1 << irq;
spin_lock(&rawhide_irq_lock);
- mask = cached_irq_masks[hose] |= 1 << irq;
- mask |= hose_irq_masks[hose];
+ mask |= cached_irq_masks[hose];
+ cached_irq_masks[hose] = mask;
rawhide_update_irq_hw(hose, mask);
spin_unlock(&rawhide_irq_lock);
}
irq -= 16;
hose = irq / 24;
irq -= hose * 24;
+ mask = ~(1 << irq) | hose_irq_masks[hose];
spin_lock(&rawhide_irq_lock);
- mask = cached_irq_masks[hose] &= ~(1 << irq);
- mask |= hose_irq_masks[hose];
+ mask &= cached_irq_masks[hose];
+ cached_irq_masks[hose] = mask;
rawhide_update_irq_hw(hose, mask);
spin_unlock(&rawhide_irq_lock);
}
+static void
+rawhide_mask_and_ack_irq(unsigned int irq)
+{
+ unsigned int mask, mask1, hose;
+
+ irq -= 16;
+ hose = irq / 24;
+ irq -= hose * 24;
+ mask1 = 1 << irq;
+ mask = ~mask1 | hose_irq_masks[hose];
+
+ spin_lock(&rawhide_irq_lock);
+
+ mask &= cached_irq_masks[hose];
+ cached_irq_masks[hose] = mask;
+ rawhide_update_irq_hw(hose, mask);
+
+ /* Clear the interrupt. */
+ *(vuip)MCPCIA_INT_REQ(MCPCIA_HOSE2MID(hose)) = mask1;
+
+ spin_unlock(&rawhide_irq_lock);
+}
static unsigned int
rawhide_startup_irq(unsigned int irq)
shutdown: rawhide_disable_irq,
enable: rawhide_enable_irq,
disable: rawhide_disable_irq,
- ack: rawhide_disable_irq,
+ ack: rawhide_mask_and_ack_irq,
end: rawhide_end_irq,
};
mcpcia_init_hoses();
for (hose = hose_head; hose; hose = hose->next) {
- int h = hose->index;
- rawhide_update_irq_hw(h, hose_irq_masks[h]);
+ unsigned int h = hose->index;
+ unsigned int mask = hose_irq_masks[h];
+
+ cached_irq_masks[h] = mask;
+ *(vuip)MCPCIA_INT_MASK0(MCPCIA_HOSE2MID(h)) = mask;
+ *(vuip)MCPCIA_INT_MASK1(MCPCIA_HOSE2MID(h)) = 0;
}
for (i = 16; i < 128; ++i) {
-/* $Id: ide.c,v 1.9 2001/03/01 13:11:18 bjornw Exp $
+/* $Id: ide.c,v 1.16 2001/04/05 08:30:07 matsfg Exp $
*
* Etrax specific IDE functions, like init and PIO-mode setting etc.
* Almost the entire ide.c is used for the rest of the Etrax ATA driver.
* Mikael Starvik (pio setup stuff)
*
* $Log: ide.c,v $
+ * Revision 1.16 2001/04/05 08:30:07 matsfg
+ * Corrected cse1 and csp0 reset.
+ *
+ * Revision 1.15 2001/04/04 14:34:06 bjornw
+ * Re-instated code that mysteriously disappeared during review updates.
+ *
+ * Revision 1.14 2001/04/04 13:45:12 matsfg
+ * Calls REG_SHADOW_SET for cse1 reset so only the resetbit is affected
+ *
+ * Revision 1.13 2001/04/04 13:26:40 matsfg
+ * memmapping is done in init.c
+ *
+ * Revision 1.12 2001/04/04 11:37:56 markusl
+ * Updated according to review remarks
+ *
+ * Revision 1.11 2001/03/29 12:49:14 matsfg
+ * Changed check for ata_tot_size from >= to >.
+ * Sets sw_len to 0 if size is exactly 65536.
+ *
+ * Revision 1.10 2001/03/16 09:39:30 matsfg
+ * Support for reset on port CSP0
+ *
* Revision 1.9 2001/03/01 13:11:18 bjornw
* 100 -> HZ
*
#define ATA_PIO0_STROBE 19
#define ATA_PIO0_HOLD 4
+static int e100_dmaproc (ide_dma_action_t func, ide_drive_t *drive);
+static void e100_ideproc (ide_ide_action_t func, ide_drive_t *drive,
+ void *buffer, unsigned int length);
+
/*
* good_dma_drives() lists the model names (from "hdparm -i")
* of drives which do not support mword2 DMA but which are
unsigned long flags;
pio = 4;
- //pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
+ /* pio = ide_get_best_pio_mode(drive, pio, 4, NULL); */
save_flags(flags);
cli();
restore_flags(flags);
}
-static int e100_dmaproc (ide_dma_action_t func, ide_drive_t *drive); /* defined below */
-static void e100_ideproc (ide_ide_action_t func, ide_drive_t *drive,
- void *buffer, unsigned int length); /* defined below */
-
void __init
init_e100_ide (void)
{
*R_GEN_CONFIG = genconfig_shadow;
#ifdef CONFIG_ETRAX_IDE_CSE1_16_RESET
-#ifndef CONFIG_CRIS_LOW_MAP
- /* remap the I/O-mapped reset-bit from CSE1 to something inside our kernel space */
- reset_addr = (unsigned long *)ioremap((unsigned long)(MEM_CSE1_START |
- MEM_NON_CACHEABLE), 16);
- *reset_addr = 0;
-#else
- /* LOW_MAP, can't do the ioremap, but it's already mapped straight over */
- reset_addr = (unsigned long *)(MEM_CSE1_START | MEM_NON_CACHEABLE);
- *reset_addr = 0;
+ init_ioremap();
+ REG_SHADOW_SET(port_cse1_addr, port_cse1_shadow, 16, 0);
#endif
+
+#ifdef CONFIG_ETRAX_IDE_CSP0_8_RESET
+ init_ioremap();
+ REG_SHADOW_SET(port_csp0_addr, port_csp0_shadow, 8, 0);
#endif
/* wait some */
-
- dummy = 1;
- dummy = 2;
- dummy = 3;
+ udelay(25);
#ifdef CONFIG_ETRAX_IDE_CSE1_16_RESET
- *reset_addr = 1 << 16;
+ REG_SHADOW_SET(port_cse1_addr, port_cse1_shadow, 16, 1);
+#endif
+#ifdef CONFIG_ETRAX_IDE_CSP0_8_RESET
+ REG_SHADOW_SET(port_csp0_addr, port_csp0_shadow, 8, 1);
#endif
#ifdef CONFIG_ETRAX_IDE_G27_RESET
*R_PORT_G_DATA = 0; /* de-assert bus-reset */
e100_atapi_input_bytes (ide_drive_t *drive, void *buffer, unsigned int bytecount)
{
ide_ioreg_t data_reg = IDE_DATA_REG;
- unsigned long status;
D(printk("atapi_input_bytes, dreg 0x%x, buffer 0x%x, count %d\n",
data_reg, buffer, bytecount));
/* initiate a multi word dma read using PIO handshaking */
- *R_ATA_TRANSFER_CNT = bytecount >> 1;
+ *R_ATA_TRANSFER_CNT = IO_FIELD(R_ATA_TRANSFER_CNT, count, bytecount >> 1);
*R_ATA_CTRL_DATA = data_reg |
IO_STATE(R_ATA_CTRL_DATA, rw, read) |
LED_DISK_READ(1);
WAIT_DMA(3);
LED_DISK_READ(0);
-
+
#if 0
- /* old polled transfer code */
-
- /* initiate a multi word read */
-
- *R_ATA_TRANSFER_CNT = wcount << 1;
-
- *R_ATA_CTRL_DATA = data_reg |
- IO_STATE(R_ATA_CTRL_DATA, rw, read) |
- IO_STATE(R_ATA_CTRL_DATA, src_dst, register) |
- IO_STATE(R_ATA_CTRL_DATA, handsh, pio) |
- IO_STATE(R_ATA_CTRL_DATA, multi, on) |
- IO_STATE(R_ATA_CTRL_DATA, dma_size, word);
-
- /* svinto has a latency until the busy bit actually is set */
-
- nop(); nop();
- nop(); nop();
- nop(); nop();
- nop(); nop();
- nop(); nop();
-
- /* unit should be busy during multi transfer */
- while((status = *R_ATA_STATUS_DATA) & IO_MASK(R_ATA_STATUS_DATA, busy)) {
- while(!(status & IO_MASK(R_ATA_STATUS_DATA, dav)))
- status = *R_ATA_STATUS_DATA;
- *ptr++ = (unsigned short)(status & 0xffff);
- }
+ /* old polled transfer code
+ * this should be moved into a new function that can do polled
+ * transfers if DMA is not available
+ */
+
+ /* initiate a multi word read */
+
+ *R_ATA_TRANSFER_CNT = wcount << 1;
+
+ *R_ATA_CTRL_DATA = data_reg |
+ IO_STATE(R_ATA_CTRL_DATA, rw, read) |
+ IO_STATE(R_ATA_CTRL_DATA, src_dst, register) |
+ IO_STATE(R_ATA_CTRL_DATA, handsh, pio) |
+ IO_STATE(R_ATA_CTRL_DATA, multi, on) |
+ IO_STATE(R_ATA_CTRL_DATA, dma_size, word);
+
+ /* svinto has a latency until the busy bit actually is set */
+
+ nop(); nop();
+ nop(); nop();
+ nop(); nop();
+ nop(); nop();
+ nop(); nop();
+
+ /* unit should be busy during multi transfer */
+ while((status = *R_ATA_STATUS_DATA) & IO_MASK(R_ATA_STATUS_DATA, busy)) {
+ while(!(status & IO_MASK(R_ATA_STATUS_DATA, dav)))
+ status = *R_ATA_STATUS_DATA;
+ *ptr++ = (unsigned short)(status & 0xffff);
+ }
#endif
}
e100_atapi_output_bytes (ide_drive_t *drive, void *buffer, unsigned int bytecount)
{
ide_ioreg_t data_reg = IDE_DATA_REG;
- unsigned short *ptr = (unsigned short *)buffer;
- unsigned long ctrl;
D(printk("atapi_output_bytes, dreg 0x%x, buffer 0x%x, count %d\n",
data_reg, buffer, bytecount));
/* initiate a multi word dma write using PIO handshaking */
- *R_ATA_TRANSFER_CNT = bytecount >> 1;
+ *R_ATA_TRANSFER_CNT = IO_FIELD(R_ATA_TRANSFER_CNT, count, bytecount >> 1);
*R_ATA_CTRL_DATA = data_reg |
IO_STATE(R_ATA_CTRL_DATA, rw, write) |
LED_DISK_WRITE(0);
#if 0
- /* old polled write code */
-
- while(*R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy)); /* wait for busy flag */
-
- /* initiate a multi word write */
-
- *R_ATA_TRANSFER_CNT = bytecount >> 1;
-
- ctrl = data_reg |
- IO_STATE(R_ATA_CTRL_DATA, rw, write) |
- IO_STATE(R_ATA_CTRL_DATA, src_dst, register) |
- IO_STATE(R_ATA_CTRL_DATA, handsh, pio) |
- IO_STATE(R_ATA_CTRL_DATA, multi, on) |
- IO_STATE(R_ATA_CTRL_DATA, dma_size, word);
-
- LED_DISK_WRITE(1);
-
- /* Etrax will set busy = 1 until the multi pio transfer has finished
- * and tr_rdy = 1 after each succesful word transfer.
- * When the last byte has been transferred Etrax will first set tr_tdy = 1
- * and then busy = 0 (not in the same cycle). If we read busy before it
- * has been set to 0 we will think that we should transfer more bytes
- * and then tr_rdy would be 0 forever. This is solved by checking busy
- * in the inner loop.
- */
-
- do {
- *R_ATA_CTRL_DATA = ctrl | *ptr++;
- while(!(*R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, tr_rdy)) &&
- (*R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy)));
- } while(*R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy));
+ /* old polled write code - see comment in input_bytes */
+
+ /* wait for busy flag */
+ while(*R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy));
+
+ /* initiate a multi word write */
+
+ *R_ATA_TRANSFER_CNT = bytecount >> 1;
+
+ ctrl = data_reg |
+ IO_STATE(R_ATA_CTRL_DATA, rw, write) |
+ IO_STATE(R_ATA_CTRL_DATA, src_dst, register) |
+ IO_STATE(R_ATA_CTRL_DATA, handsh, pio) |
+ IO_STATE(R_ATA_CTRL_DATA, multi, on) |
+ IO_STATE(R_ATA_CTRL_DATA, dma_size, word);
+
+ LED_DISK_WRITE(1);
+
+ /* Etrax will set busy = 1 until the multi pio transfer has finished
+ * and tr_rdy = 1 after each succesful word transfer.
+ * When the last byte has been transferred Etrax will first set tr_tdy = 1
+ * and then busy = 0 (not in the same cycle). If we read busy before it
+ * has been set to 0 we will think that we should transfer more bytes
+ * and then tr_rdy would be 0 forever. This is solved by checking busy
+ * in the inner loop.
+ */
+
+ do {
+ *R_ATA_CTRL_DATA = ctrl | *ptr++;
+ while(!(*R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, tr_rdy)) &&
+ (*R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy)));
+ } while(*R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy));
+
+ LED_DISK_WRITE(0);
+#endif
- LED_DISK_WRITE(0);
-#endif
}
/*
those blocks that were actually set-up for transfer.
*/
- if(ata_tot_size + size >= 131072) {
+ if(ata_tot_size + size > 131072) {
printk("too large total ATA DMA request, %d + %d!\n", ata_tot_size, size);
return 1;
}
addr += 65536;
}
/* ok we want to do IO at addr, size bytes. set up a new descriptor entry */
- ata_descrs[count].sw_len = size;
+ if(size == 65536) {
+ ata_descrs[count].sw_len = 0; /* 0 means 65536, this is a 16-bit field */
+ }
+ else {
+ ata_descrs[count].sw_len = size;
+ }
ata_descrs[count].ctrl = 0;
ata_descrs[count].buf = addr;
ata_descrs[count].next = virt_to_phys(&ata_descrs[count + 1]);
/* initiate a multi word dma read using DMA handshaking */
- *R_ATA_TRANSFER_CNT = ata_tot_size >> 1;
+ *R_ATA_TRANSFER_CNT =
+ IO_FIELD(R_ATA_TRANSFER_CNT, count, ata_tot_size >> 1);
- *R_ATA_CTRL_DATA = IDE_DATA_REG |
+ *R_ATA_CTRL_DATA =
+ IO_FIELD(R_ATA_CTRL_DATA, data, IDE_DATA_REG) |
IO_STATE(R_ATA_CTRL_DATA, rw, read) |
IO_STATE(R_ATA_CTRL_DATA, src_dst, dma) |
IO_STATE(R_ATA_CTRL_DATA, handsh, dma) |
/* initiate a multi word dma write using DMA handshaking */
- *R_ATA_TRANSFER_CNT = ata_tot_size >> 1;
+ *R_ATA_TRANSFER_CNT =
+ IO_FIELD(R_ATA_TRANSFER_CNT, count, ata_tot_size >> 1);
- *R_ATA_CTRL_DATA = IDE_DATA_REG |
+ *R_ATA_CTRL_DATA =
+ IO_FIELD(R_ATA_CTRL_DATA, data, IDE_DATA_REG) |
IO_STATE(R_ATA_CTRL_DATA, rw, write) |
IO_STATE(R_ATA_CTRL_DATA, src_dst, dma) |
IO_STATE(R_ATA_CTRL_DATA, handsh, dma) |
irq_enter(cpu, 0);
smp_local_timer_interrupt(regs);
irq_exit(cpu, 0);
+
+ if (softirq_pending(cpu))
+ do_softirq();
}
/*
movl $-8192, reg; \
andl %esp, reg
+#ifdef CONFIG_SMP
+#define CHECK_SOFTIRQ \
+ movl processor(%ebx),%eax; \
+ shll $CONFIG_X86_L1_CACHE_SHIFT,%eax; \
+ movl SYMBOL_NAME(irq_stat)(,%eax),%ecx; \
+ testl SYMBOL_NAME(irq_stat)+4(,%eax),%ecx
+#else
+#define CHECK_SOFTIRQ \
+ movl SYMBOL_NAME(irq_stat),%ecx; \
+ testl SYMBOL_NAME(irq_stat)+4,%ecx
+#endif
+
ENTRY(lcall7)
pushfl # We get a different stack layout with call gates,
pushl %eax # which has to be cleaned up later..
call *SYMBOL_NAME(sys_call_table)(,%eax,4)
movl %eax,EAX(%esp) # save the return value
ENTRY(ret_from_sys_call)
-#ifdef CONFIG_SMP
- movl processor(%ebx),%eax
- shll $CONFIG_X86_L1_CACHE_SHIFT,%eax
- movl SYMBOL_NAME(irq_stat)(,%eax),%ecx # softirq_active
- testl SYMBOL_NAME(irq_stat)+4(,%eax),%ecx # softirq_mask
-#else
- movl SYMBOL_NAME(irq_stat),%ecx # softirq_active
- testl SYMBOL_NAME(irq_stat)+4,%ecx # softirq_mask
-#endif
- jne handle_softirq
-
-ret_with_reschedule:
+ cli
+ CHECK_SOFTIRQ
+ jne handle_softirq
cmpl $0,need_resched(%ebx)
jne reschedule
cmpl $0,sigpending(%ebx)
jne v86_signal_return
xorl %edx,%edx
call SYMBOL_NAME(do_signal)
+#ifdef CONFIG_SMP
+ GET_CURRENT(%ebx)
+#endif
+ cli
+ CHECK_SOFTIRQ
+ je restore_all
+ call SYMBOL_NAME(do_softirq)
jmp restore_all
ALIGN
movl %eax,%esp
xorl %edx,%edx
call SYMBOL_NAME(do_signal)
+#ifdef CONFIG_SMP
+ GET_CURRENT(%ebx)
+#endif
+ cli
+ CHECK_SOFTIRQ
+ je restore_all
+ call SYMBOL_NAME(do_softirq)
jmp restore_all
ALIGN
ALIGN
ret_from_exception:
-#ifdef CONFIG_SMP
- GET_CURRENT(%ebx)
- movl processor(%ebx),%eax
- shll $CONFIG_X86_L1_CACHE_SHIFT,%eax
- movl SYMBOL_NAME(irq_stat)(,%eax),%ecx # softirq_active
- testl SYMBOL_NAME(irq_stat)+4(,%eax),%ecx # softirq_mask
-#else
- movl SYMBOL_NAME(irq_stat),%ecx # softirq_active
- testl SYMBOL_NAME(irq_stat)+4,%ecx # softirq_mask
-#endif
- jne handle_softirq
+ cli
+ CHECK_SOFTIRQ
+ jne handle_softirq
+ cmpl $0,need_resched(%ebx)
+ jne reschedule
+ cmpl $0,sigpending(%ebx)
+ jne signal_return
+ jmp restore_all
ENTRY(ret_from_intr)
GET_CURRENT(%ebx)
movl EFLAGS(%esp),%eax # mix EFLAGS and CS
movb CS(%esp),%al
testl $(VM_MASK | 3),%eax # return to VM86 mode or non-supervisor?
- jne ret_with_reschedule
+ jne ret_from_sys_call
jmp restore_all
ALIGN
*/
static int pin_2_irq(int idx, int apic, int pin);
-int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pci_pin)
+int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
{
int apic, i, best_guess = -1;
+ Dprintk("querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
+ bus, slot, pin);
+ if (mp_bus_id_to_pci_bus[bus] == -1) {
+ printk(KERN_WARNING "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
+ return -1;
+ }
for (i = 0; i < mp_irq_entries; i++) {
int lbus = mp_irqs[i].mpc_srcbus;
if ((mp_bus_id_to_type[lbus] == MP_BUS_PCI) &&
!mp_irqs[i].mpc_irqtype &&
- (bus == mp_bus_id_to_pci_bus[mp_irqs[i].mpc_srcbus]) &&
+ (bus == lbus) &&
(slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
int irq = pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq);
if (!(apic || IO_APIC_IRQ(irq)))
continue;
- if (pci_pin == (mp_irqs[i].mpc_srcbusirq & 3))
+ if (pin == (mp_irqs[i].mpc_srcbusirq & 3))
return irq;
/*
* Use the first all-but-pin matching entry as a
printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)®_01);
printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.entries);
if ( (reg_01.entries != 0x0f) && /* older (Neptune) boards */
+ (reg_01.entries != 0x11) &&
(reg_01.entries != 0x17) && /* typical ISA+PCI boards */
(reg_01.entries != 0x1b) && /* Compaq Proliant boards */
(reg_01.entries != 0x1f) && /* dual Xeon boards */
+ (reg_01.entries != 0x20) &&
(reg_01.entries != 0x22) && /* bigger Xeon boards */
(reg_01.entries != 0x2E) &&
(reg_01.entries != 0x3F)
desc->handler->end(irq);
spin_unlock(&desc->lock);
- if (softirq_active(cpu) & softirq_mask(cpu))
+ if (softirq_pending(cpu))
do_softirq();
return 1;
}
*/
int apic_version [MAX_APICS];
int mp_bus_id_to_type [MAX_MP_BUSSES];
-int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { -1, };
+int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
int mp_current_pci_id;
int pic_mode;
unsigned long mp_lapic_addr;
if (pin) {
pin--; /* interrupt pins are numbered starting from 1 */
irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin);
-/*
- * Will be removed completely if things work out well with fuzzy parsing
- */
-#if 0
+ /*
+ * Busses behind bridges are typically not listed in the MP-table.
+ * In this case we have to look up the IRQ based on the parent bus,
+ * parent slot, and pin number. The SMP code detects such bridged
+ * busses itself so we should get into this branch reliably.
+ */
if (irq < 0 && dev->bus->parent) { /* go back to the bridge */
struct pci_dev * bridge = dev->bus->self;
printk(KERN_WARNING "PCI: using PPB(B%d,I%d,P%d) to get irq %d\n",
bridge->bus->number, PCI_SLOT(bridge->devfn), pin, irq);
}
-#endif
if (irq >= 0) {
printk(KERN_INFO "PCI->APIC IRQ transform: (B%d,I%d,P%d) -> %d\n",
dev->bus->number, PCI_SLOT(dev->devfn), pin, irq);
*/
if (PortP->TxStart == 0) {
rio_dprintk (RIO_DEBUG_ROUTE, "Tx pkts not set up yet\n");
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
break;
}
__raw_writel(priv->unit << 16 | lan_send_context, msg+2); // InitiatorContext
__raw_writel(1 << 30 | 1 << 3, msg+3); // TransmitControlWord
// bit 30: reply as soon as transmission attempt is complete
- // bit 3: Supress CRC generation
+ // bit 3: Suppress CRC generation
__raw_writel(0xD5000000 | skb->len, msg+4); // MAC hdr included
__raw_writel((u32)skb, msg+5); // TransactionContext
__raw_writel(virt_to_bus(skb->data), msg+6);
/*
* If there was an error condition, pass the info back to the user.
*/
+ result = SRpnt->sr_result;
if (SRpnt->sr_result) {
int sb_len = sizeof(SRpnt->sr_sense_buffer);
sb_len = (sb_len > OMAX_SB_LEN) ? OMAX_SB_LEN : sb_len;
if (copy_to_user(cmd_in, SRpnt->sr_sense_buffer, sb_len))
- return -EFAULT;
- } else
+ result = -EFAULT;
+ } else {
if (copy_to_user(cmd_in, buf, outlen))
- return -EFAULT;
-
- result = SRpnt->sr_result;
+ result = -EFAULT;
+ }
SDpnt = SRpnt->sr_device;
scsi_release_request(SRpnt);
dep_tristate ' USB Kodak DC-2xx Camera support' CONFIG_USB_DC2XX $CONFIG_USB
dep_tristate ' USB Mustek MDC800 Digital Camera support (EXPERIMENTAL)' CONFIG_USB_MDC800 $CONFIG_USB $CONFIG_EXPERIMENTAL
dep_tristate ' USB Scanner support' CONFIG_USB_SCANNER $CONFIG_USB
- dep_tristate ' Microtek X6USB scanner support (EXPERIMENTAL)' CONFIG_USB_MICROTEK $CONFIG_USB $CONFIG_SCSI $CONFIG_EXPERIMENTAL
+ dep_tristate ' Microtek X6USB scanner support' CONFIG_USB_MICROTEK $CONFIG_USB $CONFIG_SCSI
comment 'USB Multimedia devices'
dep_tristate ' USB IBM (Xirlink) C-it Camera support' CONFIG_USB_IBMCAM $CONFIG_USB $CONFIG_VIDEO_DEV
/*
- * bluetooth.c Version 0.8
+ * bluetooth.c Version 0.10
*
* Copyright (c) 2000 Greg Kroah-Hartman <greg@kroah.com>
* Copyright (c) 2000 Mark Douglas Corner <mcorner@umich.edu>
*
* USB Bluetooth driver, based on the Bluetooth Spec version 1.0B
*
+ * (2001/05/28) Version 0.10 gkh
+ * - Fixed problem with using data from userspace in the bluetooth_write
+ * function as found by the CHECKER project.
+ * - Added a buffer to the write_urb_pool which reduces the number of
+ * buffers being created and destroyed for ever write. Also cleans
+ * up the logic a bit.
+ * - Added a buffer to the control_urb_pool which fixes a memory leak
+ * when the device is removed from the system.
+ *
* (08/04/2001) gb
* Identify version on module load.
*
/*
* Version Information
*/
-#define DRIVER_VERSION "v0.8"
+#define DRIVER_VERSION "v0.10"
#define DRIVER_AUTHOR "Greg Kroah-Hartman, Mark Douglas Corner"
#define DRIVER_DESC "USB Bluetooth driver"
}
-static int bluetooth_ctrl_msg (struct usb_bluetooth *bluetooth, int request, int value, void *buf, int len)
+static int bluetooth_ctrl_msg (struct usb_bluetooth *bluetooth, int request, int value, const unsigned char *buf, int len)
{
struct urb *urb = NULL;
devrequest *dr = NULL;
return -ENOMEM;
}
- /* free up the last buffer that this urb used */
- if (urb->transfer_buffer != NULL) {
- kfree(urb->transfer_buffer);
- urb->transfer_buffer = NULL;
+ /* keep increasing the urb transfer buffer to fit the size of the message */
+ if (urb->transfer_buffer == NULL) {
+ urb->transfer_buffer = kmalloc (len, GFP_KERNEL);
+ if (urb->transfer_buffer == NULL) {
+ err (__FUNCTION__" - out of memory");
+ return -ENOMEM;
+ }
+ }
+ if (urb->transfer_buffer_length < len) {
+ kfree (urb->transfer_buffer);
+ urb->transfer_buffer = kmalloc (len, GFP_KERNEL);
+ if (urb->transfer_buffer == NULL) {
+ err (__FUNCTION__" - out of memory");
+ return -ENOMEM;
+ }
}
+ memcpy (urb->transfer_buffer, buf, len);
dr->requesttype = BLUETOOTH_CONTROL_REQUEST_TYPE;
dr->request = request;
dr->length = cpu_to_le16p(&len);
FILL_CONTROL_URB (urb, bluetooth->dev, usb_sndctrlpipe(bluetooth->dev, 0),
- (unsigned char*)dr, buf, len, bluetooth_ctrl_callback, bluetooth);
+ (unsigned char*)dr, urb->transfer_buffer, len, bluetooth_ctrl_callback, bluetooth);
/* send it down the pipe */
status = usb_submit_urb(urb);
if (status)
dbg(__FUNCTION__ " - usb_submit_urb(control) failed with status = %d", status);
- return 0;
+ return status;
}
{
struct usb_bluetooth *bluetooth = get_usb_bluetooth ((struct usb_bluetooth *)tty->driver_data, __FUNCTION__);
struct urb *urb = NULL;
- unsigned char *new_buffer;
+ unsigned char *temp_buffer = NULL;
+ const unsigned char *current_buffer;
const unsigned char *current_position;
- int status;
int bytes_sent;
int buffer_size;
int i;
+ int retval = 0;
if (!bluetooth) {
return -ENODEV;
printk ("\n");
#endif
- switch (*buf) {
+ if (from_user) {
+ temp_buffer = kmalloc (count, GFP_KERNEL);
+ if (temp_buffer == NULL) {
+ err (__FUNCTION__ "- out of memory.");
+ retval = -ENOMEM;
+ goto exit;
+ }
+ copy_from_user (temp_buffer, buf, count);
+ current_buffer = temp_buffer;
+ } else {
+ current_buffer = buf;
+ }
+
+ switch (*current_buffer) {
/* First byte indicates the type of packet */
case CMD_PKT:
/* dbg(__FUNCTION__ "- Send cmd_pkt len:%d", count);*/
if (in_interrupt()){
printk("cmd_pkt from interrupt!\n");
- return count;
- }
-
- new_buffer = kmalloc (count-1, GFP_KERNEL);
-
- if (!new_buffer) {
- err (__FUNCTION__ "- out of memory.");
- return -ENOMEM;
+ retval = count;
+ goto exit;
}
- if (from_user)
- copy_from_user (new_buffer, buf+1, count-1);
- else
- memcpy (new_buffer, buf+1, count-1);
-
- if (bluetooth_ctrl_msg (bluetooth, 0x00, 0x00, new_buffer, count-1) != 0) {
- kfree (new_buffer);
- return 0;
+ retval = bluetooth_ctrl_msg (bluetooth, 0x00, 0x00, ¤t_buffer[1], count-1);
+ if (retval) {
+ goto exit;
}
-
- /* need to free new_buffer somehow... FIXME */
- return count;
+ retval = count;
+ break;
case ACL_PKT:
- current_position = buf;
+ current_position = current_buffer;
++current_position;
--count;
bytes_sent = 0;
}
if (urb == NULL) {
dbg (__FUNCTION__ " - no free urbs");
- return bytes_sent;
+ retval = bytes_sent;
+ goto exit;
}
- /* free up the last buffer that this urb used */
- if (urb->transfer_buffer != NULL) {
- kfree(urb->transfer_buffer);
- urb->transfer_buffer = NULL;
- }
buffer_size = MIN (count, bluetooth->bulk_out_buffer_size);
-
- new_buffer = kmalloc (buffer_size, GFP_KERNEL);
- if (new_buffer == NULL) {
- err(__FUNCTION__" no more kernel memory...");
- return bytes_sent;
- }
-
- if (from_user)
- copy_from_user(new_buffer, current_position, buffer_size);
- else
- memcpy (new_buffer, current_position, buffer_size);
+ memcpy (urb->transfer_buffer, current_position, buffer_size);
/* build up our urb */
FILL_BULK_URB (urb, bluetooth->dev, usb_sndbulkpipe(bluetooth->dev, bluetooth->bulk_out_endpointAddress),
- new_buffer, buffer_size, bluetooth_write_bulk_callback, bluetooth);
+ urb->transfer_buffer, buffer_size, bluetooth_write_bulk_callback, bluetooth);
urb->transfer_flags |= USB_QUEUE_BULK;
/* send it down the pipe */
- status = usb_submit_urb(urb);
- if (status)
- dbg(__FUNCTION__ " - usb_submit_urb(write bulk) failed with status = %d", status);
+ retval = usb_submit_urb(urb);
+ if (retval) {
+ dbg(__FUNCTION__ " - usb_submit_urb(write bulk) failed with error = %d", retval);
+ goto exit;
+ }
#ifdef BTBUGGYHARDWARE
/* A workaround for the stalled data bug */
/* May or may not be needed...*/
count -= buffer_size;
}
- return bytes_sent + 1;
+ retval = bytes_sent + 1;
+ break;
default :
dbg(__FUNCTION__" - unsupported (at this time) write type");
+ retval = -EINVAL;
+ break;
}
- return 0;
+exit:
+ if (temp_buffer != NULL)
+ kfree (temp_buffer);
+
+ return retval;
}
err("No free urbs available");
goto probe_error;
}
- urb->transfer_buffer = NULL;
+ urb->transfer_buffer = kmalloc (bluetooth->bulk_out_buffer_size, GFP_KERNEL);
+ if (urb->transfer_buffer == NULL) {
+ err("out of memory");
+ goto probe_error;
+ }
bluetooth->write_urb_pool[i] = urb;
}
if (bluetooth->interrupt_in_buffer)
kfree (bluetooth->interrupt_in_buffer);
for (i = 0; i < NUM_BULK_URBS; ++i)
- if (bluetooth->write_urb_pool[i])
+ if (bluetooth->write_urb_pool[i]) {
+ if (bluetooth->write_urb_pool[i]->transfer_buffer)
+ kfree (bluetooth->write_urb_pool[i]->transfer_buffer);
usb_free_urb (bluetooth->write_urb_pool[i]);
+ }
for (i = 0; i < NUM_CONTROL_URBS; ++i)
- if (bluetooth->control_urb_pool[i])
+ if (bluetooth->control_urb_pool[i]) {
+ if (bluetooth->control_urb_pool[i]->transfer_buffer)
+ kfree (bluetooth->control_urb_pool[i]->transfer_buffer);
usb_free_urb (bluetooth->control_urb_pool[i]);
+ }
bluetooth_table[minor] = NULL;
/* IMPORTANT: This output MUST be kept under PAGE_SIZE
* or we need to get more sophisticated. */
- out += sprintf (out, "driver_version : %s\n", version);
+ out += sprintf (out, "driver_version : %s\n", DRIVER_VERSION);
out += sprintf (out, "custom_id : %d\n", ov511->customid);
out += sprintf (out, "model : %s\n", ov511->desc ?
clist[ov511->desc].description : "unknown");
static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data)
{
int ret;
+ unsigned char *buffer;
DECLARE_WAITQUEUE(wait, current);
+ buffer = kmalloc(size,GFP_KERNEL);
+ if (!buffer) {
+ err("unable to allocate memory for configuration descriptors");
+ return 0;
+ }
+ memcpy(buffer,data,size);
+
while ( pegasus->flags & ETH_REGS_CHANGED ) {
pegasus->flags |= CTRL_URB_SLEEP;
interruptible_sleep_on( &pegasus->ctrl_wait );
FILL_CONTROL_URB( &pegasus->ctrl_urb, pegasus->usb,
usb_rcvctrlpipe(pegasus->usb,0),
(char *)&pegasus->dr,
- data, size, ctrl_callback, pegasus );
+ buffer, size, ctrl_callback, pegasus );
add_wait_queue( &pegasus->ctrl_wait, &wait );
set_current_state( TASK_INTERRUPTIBLE );
schedule();
remove_wait_queue( &pegasus->ctrl_wait, &wait );
out:
+ memcpy(data,buffer,size);
+ kfree(buffer);
return ret;
}
static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data)
{
int ret;
+ unsigned char *buffer;
DECLARE_WAITQUEUE(wait, current);
+ buffer = kmalloc(size, GFP_KERNEL);
+ if (!buffer) {
+ err("unable to allocate memory for configuration descriptors");
+ return 0;
+ }
+ memcpy(buffer, data, size);
+
while ( pegasus->flags & ETH_REGS_CHANGED ) {
pegasus->flags |= CTRL_URB_SLEEP ;
interruptible_sleep_on( &pegasus->ctrl_wait );
FILL_CONTROL_URB( &pegasus->ctrl_urb, pegasus->usb,
usb_sndctrlpipe(pegasus->usb,0),
(char *)&pegasus->dr,
- data, size, ctrl_callback, pegasus );
+ buffer, size, ctrl_callback, pegasus );
add_wait_queue( &pegasus->ctrl_wait, &wait );
set_current_state( TASK_INTERRUPTIBLE );
if ( (ret = usb_submit_urb( &pegasus->ctrl_urb )) ) {
err( __FUNCTION__ " BAD CTRL %d", ret);
+ kfree(buffer);
return ret;
}
schedule();
remove_wait_queue( &pegasus->ctrl_wait, &wait );
+ kfree(buffer);
return ret;
}
static int set_register( pegasus_t *pegasus, __u16 indx, __u8 data )
{
int ret;
+ unsigned char *buffer;
__u16 dat = data;
DECLARE_WAITQUEUE(wait, current);
+ buffer = kmalloc(1, GFP_KERNEL);
+ if (!buffer) {
+ err("unable to allocate memory for configuration descriptors");
+ return 0;
+ }
+ memcpy(buffer, &data, 1);
+
while ( pegasus->flags & ETH_REGS_CHANGED ) {
pegasus->flags |= CTRL_URB_SLEEP;
interruptible_sleep_on( &pegasus->ctrl_wait );
FILL_CONTROL_URB( &pegasus->ctrl_urb, pegasus->usb,
usb_sndctrlpipe(pegasus->usb,0),
(char *)&pegasus->dr,
- &data, 1, ctrl_callback, pegasus );
+ buffer, 1, ctrl_callback, pegasus );
add_wait_queue( &pegasus->ctrl_wait, &wait );
set_current_state( TASK_INTERRUPTIBLE );
if ( (ret = usb_submit_urb( &pegasus->ctrl_urb )) ) {
err( __FUNCTION__ " BAD CTRL %d", ret);
+ kfree(buffer);
return ret;
}
schedule();
remove_wait_queue( &pegasus->ctrl_wait, &wait );
+ kfree(buffer);
return ret;
}
}
if (status) {
- dbg(__FUNCTION__" - nonzero write bulk status received: %d", urb->status);
+ dbg(__FUNCTION__" - nonzero write bulk status received: %d", status);
return;
}
/*
* USB HandSpring Visor driver
*
- * Copyright (C) 1999, 2000
+ * Copyright (C) 1999 - 2001
* Greg Kroah-Hartman (greg@kroah.com)
*
* This program is free software; you can redistribute it and/or modify
*
* See Documentation/usb/usb-serial.txt for more information on using this driver
*
+ * (05/28/2000) gkh
+ * Added initial support for the Palm m500 and Palm m505 devices.
+ *
* (04/08/2001) gb
* Identify version on module load.
*
/*
* Version Information
*/
-#define DRIVER_VERSION "v1.0.0"
+#define DRIVER_VERSION "v1.1"
#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>"
#define DRIVER_DESC "USB HandSpring Visor driver"
static void visor_read_bulk_callback (struct urb *urb);
+static __devinitdata struct usb_device_id visor_id_table [] = {
+ { USB_DEVICE(HANDSPRING_VENDOR_ID, HANDSPRING_VISOR_ID) },
+ { } /* Terminating entry */
+};
+
+static __devinitdata struct usb_device_id palm_m500_id_table [] = {
+ { USB_DEVICE(PALM_VENDOR_ID, PALM_M500_ID) },
+ { } /* Terminating entry */
+};
+
+static __devinitdata struct usb_device_id palm_m505_id_table [] = {
+ { USB_DEVICE(PALM_VENDOR_ID, PALM_M505_ID) },
+ { } /* Terminating entry */
+};
+
+
static __devinitdata struct usb_device_id id_table [] = {
{ USB_DEVICE(HANDSPRING_VENDOR_ID, HANDSPRING_VISOR_ID) },
+ { USB_DEVICE(PALM_VENDOR_ID, PALM_M500_ID) },
+ { USB_DEVICE(PALM_VENDOR_ID, PALM_M505_ID) },
{ } /* Terminating entry */
};
/* All of the device info needed for the Handspring Visor */
struct usb_serial_device_type handspring_device = {
name: "Handspring Visor",
- id_table: id_table,
+ id_table: visor_id_table,
+ needs_interrupt_in: MUST_HAVE_NOT, /* this device must not have an interrupt in endpoint */
+ needs_bulk_in: MUST_HAVE, /* this device must have a bulk in endpoint */
+ needs_bulk_out: MUST_HAVE, /* this device must have a bulk out endpoint */
+ num_interrupt_in: 0,
+ num_bulk_in: 2,
+ num_bulk_out: 2,
+ num_ports: 2,
+ open: visor_open,
+ close: visor_close,
+ throttle: visor_throttle,
+ unthrottle: visor_unthrottle,
+ startup: visor_startup,
+ shutdown: visor_shutdown,
+ ioctl: visor_ioctl,
+ set_termios: visor_set_termios,
+ write: visor_write,
+ write_room: visor_write_room,
+ chars_in_buffer: visor_chars_in_buffer,
+ write_bulk_callback: visor_write_bulk_callback,
+ read_bulk_callback: visor_read_bulk_callback,
+};
+
+/* device info for the Palm M500 */
+struct usb_serial_device_type palm_m500_device = {
+ name: "Palm M500",
+ id_table: palm_m500_id_table,
needs_interrupt_in: MUST_HAVE_NOT, /* this device must not have an interrupt in endpoint */
needs_bulk_in: MUST_HAVE, /* this device must have a bulk in endpoint */
needs_bulk_out: MUST_HAVE, /* this device must have a bulk out endpoint */
read_bulk_callback: visor_read_bulk_callback,
};
+/* device info for the Palm M505 */
+struct usb_serial_device_type palm_m505_device = {
+ name: "Palm M505",
+ id_table: palm_m505_id_table,
+ needs_interrupt_in: MUST_HAVE_NOT, /* this device must not have an interrupt in endpoint */
+ needs_bulk_in: MUST_HAVE, /* this device must have a bulk in endpoint */
+ needs_bulk_out: MUST_HAVE, /* this device must have a bulk out endpoint */
+ num_interrupt_in: 0,
+ num_bulk_in: 2,
+ num_bulk_out: 2,
+ num_ports: 2,
+ open: visor_open,
+ close: visor_close,
+ throttle: visor_throttle,
+ unthrottle: visor_unthrottle,
+ startup: visor_startup,
+ shutdown: visor_shutdown,
+ ioctl: visor_ioctl,
+ set_termios: visor_set_termios,
+ write: visor_write,
+ write_room: visor_write_room,
+ chars_in_buffer: visor_chars_in_buffer,
+ write_bulk_callback: visor_write_bulk_callback,
+ read_bulk_callback: visor_read_bulk_callback,
+};
#define NUM_URBS 24
#define URB_TRANSFER_BUFFER_SIZE 768
return -ENOMEM;
}
+ /* force debugging on for the palm devices for now */
+ if (serial->dev->descriptor.idVendor == PALM_VENDOR_ID)
+ debug = 1;
+
dbg(__FUNCTION__);
dbg(__FUNCTION__ " - Set config to 1");
}
}
+ if (serial->dev->descriptor.idVendor == PALM_VENDOR_ID) {
+ /* Palm USB Hack */
+ response = usb_control_msg (serial->dev, usb_rcvctrlpipe(serial->dev, 0),
+ PALM_GET_SOME_UNKNOWN_INFORMATION,
+ 0xc2, 0x0000, 0x0000, transfer_buffer,
+ 0x14, 300);
+ if (response < 0) {
+ err(__FUNCTION__ " - error getting first unknown palm command");
+ } else {
+ usb_serial_debug_data (__FILE__, __FUNCTION__, 0x14, transfer_buffer);
+ }
+ response = usb_control_msg (serial->dev, usb_rcvctrlpipe(serial->dev, 0),
+ PALM_GET_SOME_UNKNOWN_INFORMATION,
+ 0xc2, 0x0000, 0x0000, transfer_buffer,
+ 0x14, 300);
+ if (response < 0) {
+ err(__FUNCTION__ " - error getting second unknown palm command");
+ } else {
+ usb_serial_debug_data (__FILE__, __FUNCTION__, 0x14, transfer_buffer);
+ }
+ }
+
/* ask for the number of bytes available, but ignore the response as it is broken */
response = usb_control_msg (serial->dev, usb_rcvctrlpipe(serial->dev, 0), VISOR_REQUEST_BYTES_AVAILABLE,
0xc2, 0x0000, 0x0005, transfer_buffer, 0x02, 300);
int i;
usb_serial_register (&handspring_device);
+ usb_serial_register (&palm_m500_device);
+ usb_serial_register (&palm_m505_device);
/* create our write urb pool and transfer buffers */
spin_lock_init (&write_urb_pool_lock);
unsigned long flags;
usb_serial_deregister (&handspring_device);
+ usb_serial_deregister (&palm_m500_device);
+ usb_serial_deregister (&palm_m505_device);
spin_lock_irqsave (&write_urb_pool_lock, flags);
/*
* USB HandSpring Visor driver
*
- * Copyright (C) 1999, 2000
+ * Copyright (C) 1999 - 2001
* Greg Kroah-Hartman (greg@kroah.com)
*
* This program is free software; you can redistribute it and/or modify
#define HANDSPRING_VENDOR_ID 0x082d
#define HANDSPRING_VISOR_ID 0x0100
+#define PALM_VENDOR_ID 0x0830
+#define PALM_M500_ID 0x0001
+#define PALM_M505_ID 0x0002
+
/****************************************************************************
* Handspring Visor Vendor specific request codes (bRequest values)
* A big thank you to Handspring for providing the following information.
#define VISOR_FUNCTION_CONSOLE 0x03
#define VISOR_FUNCTION_REMOTE_FILE_SYS 0x04
+
+/****************************************************************************
+ * PALM_GET_SOME_UNKNOWN_INFORMATION is sent by the host during enumeration to
+ * get some information from the M series devices, that is currently unknown.
+ ****************************************************************************/
+#define PALM_GET_SOME_UNKNOWN_INFORMATION 0x04
+
#endif
*/
list_del(&bus->bus_list);
- usbdevfs_remove_bus(bus);
+ usbdevfs_remove_bus(bus);
clear_bit(bus->busnum, busmap.busmap);
{
int result;
__u16 status;
+ unsigned char *buffer;
int endp=usb_pipeendpoint(pipe)|(usb_pipein(pipe)<<7);
/*
if (result < 0)
return result;
+ buffer = kmalloc(sizeof(status), GFP_KERNEL);
+ if (!buffer) {
+ err("unable to allocate memory for configuration descriptors");
+ return -ENOMEM;
+ }
+
result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
USB_REQ_GET_STATUS, USB_DIR_IN | USB_RECIP_ENDPOINT, 0, endp,
- &status, sizeof(status), HZ * SET_TIMEOUT);
+ buffer, sizeof(status), HZ * SET_TIMEOUT);
+
+ memcpy(&status, buffer, sizeof(status));
+ kfree(buffer);
+
if (result < 0)
return result;
{
int result;
unsigned int cfgno, length;
- unsigned char buffer[8];
+ unsigned char *buffer;
unsigned char *bigbuffer;
- struct usb_config_descriptor *desc =
- (struct usb_config_descriptor *)buffer;
+ struct usb_config_descriptor *desc;
if (dev->descriptor.bNumConfigurations > USB_MAXCONFIG) {
warn("too many configurations");
return -ENOMEM;
}
+ buffer = kmalloc(8, GFP_KERNEL);
+ if (!buffer) {
+ err("unable to allocate memory for configuration descriptors");
+ return -ENOMEM;
+ }
+ desc = (struct usb_config_descriptor *)buffer;
+
for (cfgno = 0; cfgno < dev->descriptor.bNumConfigurations; cfgno++) {
/* We grab the first 8 bytes so we know how long the whole */
/* configuration is */
}
}
+ kfree(buffer);
return 0;
err:
+ kfree(buffer);
dev->descriptor.bNumConfigurations = cfgno;
return result;
}
return 0;
}
-static int __init initialized = 0;
+static int initialized __initdata = 0;
int __init matroxfb_init(void)
{
#define BUFSIZE_INDEX(X) ((int) buffersize_index[(X)>>9])
#define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512)
-#define NR_RESERVED (2*MAX_BUF_PER_PAGE)
+#define NR_RESERVED (10*MAX_BUF_PER_PAGE)
#define MAX_UNUSED_BUFFERS NR_RESERVED+20 /* don't ever have more than this
number of unused buffer heads */
*/
run_task_queue(&tq_disk);
- /*
- * Set our state for sleeping, then check again for buffer heads.
- * This ensures we won't miss a wake_up from an interrupt.
- */
- wait_event(buffer_wait, nr_unused_buffer_heads >= MAX_BUF_PER_PAGE);
+ current->policy |= SCHED_YIELD;
+ __set_current_state(TASK_RUNNING);
+ schedule();
goto try_again;
}
*
* Big-endian to little-endian byte-swapping/bitmaps by
* David S. Miller (davem@caip.rutgers.edu), 1995
+ *
+ * All code that works with directory layout had been switched to pagecache
+ * and moved here. AV
*/
#include <linux/fs.h>
#include <linux/ext2_fs.h>
+#include <linux/pagemap.h>
-static unsigned char ext2_filetype_table[] = {
- DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
-};
+typedef struct ext2_dir_entry_2 ext2_dirent;
-static int ext2_readdir(struct file *, void *, filldir_t);
+/*
+ * ext2 uses block-sized chunks. Arguably, sector-sized ones would be
+ * more robust, but we have what we have
+ */
+static inline unsigned ext2_chunk_size(struct inode *inode)
+{
+ return inode->i_sb->s_blocksize;
+}
-struct file_operations ext2_dir_operations = {
- read: generic_read_dir,
- readdir: ext2_readdir,
- ioctl: ext2_ioctl,
- fsync: ext2_sync_file,
+static inline void ext2_put_page(struct page *page)
+{
+ kunmap(page);
+ page_cache_release(page);
+}
+
+static inline unsigned long dir_pages(struct inode *inode)
+{
+ return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
+}
+
+static int ext2_commit_chunk(struct page *page, unsigned from, unsigned to)
+{
+ struct inode *dir = (struct inode *)page->mapping->host;
+ int err = 0;
+ dir->i_version = ++event;
+ page->mapping->a_ops->commit_write(NULL, page, from, to);
+ if (IS_SYNC(dir))
+ err = waitfor_one_page(page);
+ return err;
+}
+
+static void ext2_check_page(struct page *page)
+{
+ struct inode *dir = (struct inode *)page->mapping->host;
+ struct super_block *sb = dir->i_sb;
+ unsigned chunk_size = ext2_chunk_size(dir);
+ char *kaddr = (char*)page_address(page);
+ u32 max_inumber = le32_to_cpu(sb->u.ext2_sb.s_es->s_inodes_count);
+ unsigned offs, rec_len;
+ unsigned limit = PAGE_CACHE_SIZE;
+ ext2_dirent *p;
+ char *error;
+
+ if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) {
+ limit = dir->i_size & ~PAGE_CACHE_MASK;
+ if (limit & (chunk_size - 1))
+ goto Ebadsize;
+ for (offs = limit; offs<PAGE_CACHE_SIZE; offs += chunk_size) {
+ ext2_dirent *p = (ext2_dirent*)(kaddr + offs);
+ p->rec_len = cpu_to_le16(chunk_size);
+ }
+ if (!limit)
+ goto out;
+ }
+ for (offs = 0; offs <= limit - EXT2_DIR_REC_LEN(1); offs += rec_len) {
+ p = (ext2_dirent *)(kaddr + offs);
+ rec_len = le16_to_cpu(p->rec_len);
+
+ if (rec_len < EXT2_DIR_REC_LEN(1))
+ goto Eshort;
+ if (rec_len & 3)
+ goto Ealign;
+ if (rec_len < EXT2_DIR_REC_LEN(p->name_len))
+ goto Enamelen;
+ if (((offs + rec_len - 1) ^ offs) & ~(chunk_size-1))
+ goto Espan;
+ if (le32_to_cpu(p->inode) > max_inumber)
+ goto Einumber;
+ }
+ if (offs != limit)
+ goto Eend;
+out:
+ SetPageChecked(page);
+ return;
+
+ /* Too bad, we had an error */
+
+Ebadsize:
+ ext2_error(sb, "ext2_check_page",
+ "size of directory #%lu is not a multiple of chunk size",
+ dir->i_ino
+ );
+ goto fail;
+Eshort:
+ error = "rec_len is smaller than minimal";
+ goto bad_entry;
+Ealign:
+ error = "unaligned directory entry";
+ goto bad_entry;
+Enamelen:
+ error = "rec_len is too small for name_len";
+ goto bad_entry;
+Espan:
+ error = "directory entry across blocks";
+ goto bad_entry;
+Einumber:
+ error = "inode out of bounds";
+bad_entry:
+ ext2_error (sb, "ext2_check_page", "bad entry in directory #%lu: %s - "
+ "offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
+ dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs,
+ (unsigned long) le32_to_cpu(p->inode),
+ rec_len, p->name_len);
+ goto fail;
+Eend:
+ p = (ext2_dirent *)(kaddr + offs);
+ ext2_error (sb, "ext2_check_page",
+ "entry in directory #%lu spans the page boundary"
+ "offset=%lu, inode=%lu",
+ dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs,
+ (unsigned long) le32_to_cpu(p->inode));
+fail:
+ SetPageChecked(page);
+ SetPageError(page);
+}
+
+static struct page * ext2_get_page(struct inode *dir, unsigned long n)
+{
+ struct address_space *mapping = dir->i_mapping;
+ struct page *page = read_cache_page(mapping, n,
+ (filler_t*)mapping->a_ops->readpage, NULL);
+ if (!IS_ERR(page)) {
+ wait_on_page(page);
+ kmap(page);
+ if (!Page_Uptodate(page))
+ goto fail;
+ if (!PageChecked(page))
+ ext2_check_page(page);
+ if (PageError(page))
+ goto fail;
+ }
+ return page;
+
+fail:
+ ext2_put_page(page);
+ return ERR_PTR(-EIO);
+}
+
+/*
+ * NOTE! unlike strncmp, ext2_match returns 1 for success, 0 for failure.
+ *
+ * len <= EXT2_NAME_LEN and de != NULL are guaranteed by caller.
+ */
+static inline int ext2_match (int len, const char * const name,
+ struct ext2_dir_entry_2 * de)
+{
+ if (len != de->name_len)
+ return 0;
+ if (!de->inode)
+ return 0;
+ return !memcmp(name, de->name, len);
+}
+
+/*
+ * p is at least 6 bytes before the end of page
+ */
+static inline ext2_dirent *ext2_next_entry(ext2_dirent *p)
+{
+ return (ext2_dirent *)((char*)p + le16_to_cpu(p->rec_len));
+}
+
+static inline unsigned
+ext2_validate_entry(char *base, unsigned offset, unsigned mask)
+{
+ ext2_dirent *de = (ext2_dirent*)(base + offset);
+ ext2_dirent *p = (ext2_dirent*)(base + (offset&mask));
+ while ((char*)p < (char*)de)
+ p = ext2_next_entry(p);
+ return (char *)p - base;
+}
+
+static unsigned char ext2_filetype_table[EXT2_FT_MAX] = {
+ [EXT2_FT_UNKNOWN] DT_UNKNOWN,
+ [EXT2_FT_REG_FILE] DT_REG,
+ [EXT2_FT_DIR] DT_DIR,
+ [EXT2_FT_CHRDEV] DT_CHR,
+ [EXT2_FT_BLKDEV] DT_BLK,
+ [EXT2_FT_FIFO] DT_FIFO,
+ [EXT2_FT_SOCK] DT_SOCK,
+ [EXT2_FT_SYMLINK] DT_LNK,
};
-int ext2_check_dir_entry (const char * function, struct inode * dir,
- struct ext2_dir_entry_2 * de,
- struct buffer_head * bh,
- unsigned long offset)
-{
- const char * error_msg = NULL;
-
- if (le16_to_cpu(de->rec_len) < EXT2_DIR_REC_LEN(1))
- error_msg = "rec_len is smaller than minimal";
- else if (le16_to_cpu(de->rec_len) % 4 != 0)
- error_msg = "rec_len % 4 != 0";
- else if (le16_to_cpu(de->rec_len) < EXT2_DIR_REC_LEN(de->name_len))
- error_msg = "rec_len is too small for name_len";
- else if (dir && ((char *) de - bh->b_data) + le16_to_cpu(de->rec_len) >
- dir->i_sb->s_blocksize)
- error_msg = "directory entry across blocks";
- else if (dir && le32_to_cpu(de->inode) > le32_to_cpu(dir->i_sb->u.ext2_sb.s_es->s_inodes_count))
- error_msg = "inode out of bounds";
-
- if (error_msg != NULL)
- ext2_error (dir->i_sb, function, "bad entry in directory #%lu: %s - "
- "offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
- dir->i_ino, error_msg, offset,
- (unsigned long) le32_to_cpu(de->inode),
- le16_to_cpu(de->rec_len), de->name_len);
- return error_msg == NULL ? 1 : 0;
-}
-
-static int ext2_readdir(struct file * filp,
- void * dirent, filldir_t filldir)
-{
- int error = 0;
- unsigned long offset, blk;
- int i, num, stored;
- struct buffer_head * bh, * tmp, * bha[16];
- struct ext2_dir_entry_2 * de;
- struct super_block * sb;
- int err;
+#define S_SHIFT 12
+static unsigned char ext2_type_by_mode[S_IFMT >> S_SHIFT] = {
+ [S_IFREG >> S_SHIFT] EXT2_FT_REG_FILE,
+ [S_IFDIR >> S_SHIFT] EXT2_FT_DIR,
+ [S_IFCHR >> S_SHIFT] EXT2_FT_CHRDEV,
+ [S_IFBLK >> S_SHIFT] EXT2_FT_BLKDEV,
+ [S_IFIFO >> S_SHIFT] EXT2_FT_FIFO,
+ [S_IFSOCK >> S_SHIFT] EXT2_FT_SOCK,
+ [S_IFLNK >> S_SHIFT] EXT2_FT_SYMLINK,
+};
+
+static inline void ext2_set_de_type(ext2_dirent *de, struct inode *inode)
+{
+ mode_t mode = inode->i_mode;
+ if (EXT2_HAS_INCOMPAT_FEATURE(inode->i_sb, EXT2_FEATURE_INCOMPAT_FILETYPE))
+ de->file_type = ext2_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
+ else
+ de->file_type = 0;
+}
+
+static int
+ext2_readdir (struct file * filp, void * dirent, filldir_t filldir)
+{
+ loff_t pos = filp->f_pos;
struct inode *inode = filp->f_dentry->d_inode;
+ struct super_block *sb = inode->i_sb;
+ unsigned offset = pos & ~PAGE_CACHE_MASK;
+ unsigned long n = pos >> PAGE_CACHE_SHIFT;
+ unsigned long npages = dir_pages(inode);
+ unsigned chunk_mask = ~(ext2_chunk_size(inode)-1);
+ unsigned char *types = NULL;
+ int need_revalidate = (filp->f_version != inode->i_version);
+
+ if (pos > inode->i_size - EXT2_DIR_REC_LEN(1))
+ goto done;
- sb = inode->i_sb;
+ if (EXT2_HAS_INCOMPAT_FEATURE(sb, EXT2_FEATURE_INCOMPAT_FILETYPE))
+ types = ext2_filetype_table;
- stored = 0;
- bh = NULL;
- offset = filp->f_pos & (sb->s_blocksize - 1);
+ for ( ; n < npages; n++, offset = 0) {
+ char *kaddr, *limit;
+ ext2_dirent *de;
+ struct page *page = ext2_get_page(inode, n);
- while (!error && !stored && filp->f_pos < inode->i_size) {
- blk = (filp->f_pos) >> EXT2_BLOCK_SIZE_BITS(sb);
- bh = ext2_bread (inode, blk, 0, &err);
- if (!bh) {
- ext2_error (sb, "ext2_readdir",
- "directory #%lu contains a hole at offset %lu",
- inode->i_ino, (unsigned long)filp->f_pos);
- filp->f_pos += sb->s_blocksize - offset;
+ if (IS_ERR(page))
continue;
+ kaddr = (char *)page_address(page);
+ if (need_revalidate) {
+ offset = ext2_validate_entry(kaddr, offset, chunk_mask);
+ need_revalidate = 0;
}
+ de = (ext2_dirent *)(kaddr+offset);
+ limit = kaddr + PAGE_CACHE_SIZE - EXT2_DIR_REC_LEN(1);
+ for ( ;(char*)de <= limit; de = ext2_next_entry(de))
+ if (de->inode) {
+ int over;
+ unsigned char d_type = DT_UNKNOWN;
- /*
- * Do the readahead
- */
- if (!offset) {
- for (i = 16 >> (EXT2_BLOCK_SIZE_BITS(sb) - 9), num = 0;
- i > 0; i--) {
- tmp = ext2_getblk (inode, ++blk, 0, &err);
- if (tmp && !buffer_uptodate(tmp) && !buffer_locked(tmp))
- bha[num++] = tmp;
- else
- brelse (tmp);
- }
- if (num) {
- ll_rw_block (READA, num, bha);
- for (i = 0; i < num; i++)
- brelse (bha[i]);
- }
- }
-
-revalidate:
- /* If the dir block has changed since the last call to
- * readdir(2), then we might be pointing to an invalid
- * dirent right now. Scan from the start of the block
- * to make sure. */
- if (filp->f_version != inode->i_version) {
- for (i = 0; i < sb->s_blocksize && i < offset; ) {
- de = (struct ext2_dir_entry_2 *)
- (bh->b_data + i);
- /* It's too expensive to do a full
- * dirent test each time round this
- * loop, but we do have to test at
- * least that it is non-zero. A
- * failure will be detected in the
- * dirent test below. */
- if (le16_to_cpu(de->rec_len) < EXT2_DIR_REC_LEN(1))
- break;
- i += le16_to_cpu(de->rec_len);
+ if (types && de->file_type < EXT2_FT_MAX)
+ d_type = types[de->file_type];
+
+ offset = (char *)de - kaddr;
+ over = filldir(dirent, de->name, de->name_len,
+ (n<<PAGE_CACHE_SHIFT) | offset,
+ le32_to_cpu(de->inode), d_type);
+ if (over) {
+ ext2_put_page(page);
+ goto done;
+ }
}
- offset = i;
- filp->f_pos = (filp->f_pos & ~(sb->s_blocksize - 1))
- | offset;
- filp->f_version = inode->i_version;
+ ext2_put_page(page);
+ }
+
+done:
+ filp->f_pos = (n << PAGE_CACHE_SHIFT) | offset;
+ filp->f_version = inode->i_version;
+ UPDATE_ATIME(inode);
+ return 0;
+}
+
+/*
+ * ext2_find_entry()
+ *
+ * finds an entry in the specified directory with the wanted name. It
+ * returns the page in which the entry was found, and the entry itself
+ * (as a parameter - res_dir). Page is returned mapped and unlocked.
+ * Entry is guaranteed to be valid.
+ */
+struct ext2_dir_entry_2 * ext2_find_entry (struct inode * dir,
+ struct dentry *dentry, struct page ** res_page)
+{
+ const char *name = dentry->d_name.name;
+ int namelen = dentry->d_name.len;
+ unsigned reclen = EXT2_DIR_REC_LEN(namelen);
+ unsigned long n;
+ unsigned long npages = dir_pages(dir);
+ struct page *page = NULL;
+ ext2_dirent * de;
+
+ /* OFFSET_CACHE */
+ *res_page = NULL;
+
+ for (n = 0; n < npages; n++) {
+ char *kaddr;
+ page = ext2_get_page(dir, n);
+ if (IS_ERR(page))
+ continue;
+
+ kaddr = (char*)page_address(page);
+ de = (ext2_dirent *) kaddr;
+ kaddr += PAGE_CACHE_SIZE - reclen;
+ for ( ; (char *) de <= kaddr ; de = ext2_next_entry(de))
+ if (ext2_match (namelen, name, de))
+ goto found;
+ ext2_put_page(page);
+ }
+ return NULL;
+
+found:
+ *res_page = page;
+ return de;
+}
+
+struct ext2_dir_entry_2 * ext2_dotdot (struct inode *dir, struct page **p)
+{
+ struct page *page = ext2_get_page(dir, 0);
+ ext2_dirent *de = NULL;
+
+ if (!IS_ERR(page)) {
+ de = ext2_next_entry((ext2_dirent *) page_address(page));
+ *p = page;
+ }
+ return de;
+}
+
+ino_t ext2_inode_by_name(struct inode * dir, struct dentry *dentry)
+{
+ ino_t res = 0;
+ struct ext2_dir_entry_2 * de;
+ struct page *page;
+
+ de = ext2_find_entry (dir, dentry, &page);
+ if (de) {
+ res = le32_to_cpu(de->inode);
+ kunmap(page);
+ page_cache_release(page);
+ }
+ return res;
+}
+
+/* Releases the page */
+void ext2_set_link(struct inode *dir, struct ext2_dir_entry_2 *de,
+ struct page *page, struct inode *inode)
+{
+ unsigned from = (char *)de-(char*)page_address(page);
+ unsigned to = from + le16_to_cpu(de->rec_len);
+ int err;
+
+ lock_page(page);
+ err = page->mapping->a_ops->prepare_write(NULL, page, from, to);
+ if (err)
+ BUG();
+ de->inode = cpu_to_le32(inode->i_ino);
+ ext2_set_de_type (de, inode);
+ err = ext2_commit_chunk(page, from, to);
+ UnlockPage(page);
+ ext2_put_page(page);
+ dir->i_mtime = dir->i_ctime = CURRENT_TIME;
+ mark_inode_dirty(dir);
+}
+
+/*
+ * Parent is locked.
+ */
+int ext2_add_link (struct dentry *dentry, struct inode *inode)
+{
+ struct inode *dir = dentry->d_parent->d_inode;
+ const char *name = dentry->d_name.name;
+ int namelen = dentry->d_name.len;
+ unsigned reclen = EXT2_DIR_REC_LEN(namelen);
+ unsigned short rec_len, name_len;
+ struct page *page = NULL;
+ ext2_dirent * de;
+ unsigned long npages = dir_pages(dir);
+ unsigned long n;
+ char *kaddr;
+ unsigned from, to;
+ int err;
+
+ /* We take care of directory expansion in the same loop */
+ for (n = 0; n <= npages; n++) {
+ page = ext2_get_page(dir, n);
+ err = PTR_ERR(page);
+ if (IS_ERR(page))
+ goto out;
+ kaddr = (char*)page_address(page);
+ de = (ext2_dirent *)kaddr;
+ kaddr += PAGE_CACHE_SIZE - reclen;
+ while ((char *)de <= kaddr) {
+ err = -EEXIST;
+ if (ext2_match (namelen, name, de))
+ goto out_page;
+ name_len = EXT2_DIR_REC_LEN(de->name_len);
+ rec_len = le16_to_cpu(de->rec_len);
+ if (!de->inode && rec_len >= reclen)
+ goto got_it;
+ if (rec_len >= name_len + reclen)
+ goto got_it;
+ de = (ext2_dirent *) ((char *) de + rec_len);
}
-
- while (!error && filp->f_pos < inode->i_size
- && offset < sb->s_blocksize) {
- de = (struct ext2_dir_entry_2 *) (bh->b_data + offset);
- if (!ext2_check_dir_entry ("ext2_readdir", inode, de,
- bh, offset)) {
- /* On error, skip the f_pos to the
- next block. */
- filp->f_pos = (filp->f_pos | (sb->s_blocksize - 1))
- + 1;
- brelse (bh);
- return stored;
- }
- offset += le16_to_cpu(de->rec_len);
- if (le32_to_cpu(de->inode)) {
- /* We might block in the next section
- * if the data destination is
- * currently swapped out. So, use a
- * version stamp to detect whether or
- * not the directory has been modified
- * during the copy operation.
- */
- unsigned long version = filp->f_version;
- unsigned char d_type = DT_UNKNOWN;
+ ext2_put_page(page);
+ }
+ BUG();
+ return -EINVAL;
+
+got_it:
+ from = (char*)de - (char*)page_address(page);
+ to = from + rec_len;
+ lock_page(page);
+ err = page->mapping->a_ops->prepare_write(NULL, page, from, to);
+ if (err)
+ goto out_unlock;
+ if (de->inode) {
+ ext2_dirent *de1 = (ext2_dirent *) ((char *) de + name_len);
+ de1->rec_len = cpu_to_le16(rec_len - name_len);
+ de->rec_len = cpu_to_le16(name_len);
+ de = de1;
+ }
+ de->name_len = namelen;
+ memcpy (de->name, name, namelen);
+ de->inode = cpu_to_le32(inode->i_ino);
+ ext2_set_de_type (de, inode);
+ err = ext2_commit_chunk(page, from, to);
+ dir->i_mtime = dir->i_ctime = CURRENT_TIME;
+ mark_inode_dirty(dir);
+ /* OFFSET_CACHE */
+out_unlock:
+ UnlockPage(page);
+out_page:
+ ext2_put_page(page);
+out:
+ return err;
+}
- if (EXT2_HAS_INCOMPAT_FEATURE(sb, EXT2_FEATURE_INCOMPAT_FILETYPE)
- && de->file_type < EXT2_FT_MAX)
- d_type = ext2_filetype_table[de->file_type];
- error = filldir(dirent, de->name,
- de->name_len,
- filp->f_pos, le32_to_cpu(de->inode),
- d_type);
- if (error)
- break;
- if (version != filp->f_version)
- goto revalidate;
- stored ++;
+/*
+ * ext2_delete_entry deletes a directory entry by merging it with the
+ * previous entry. Page is up-to-date. Releases the page.
+ */
+int ext2_delete_entry (struct ext2_dir_entry_2 * dir, struct page * page )
+{
+ struct address_space *mapping = page->mapping;
+ struct inode *inode = (struct inode*)mapping->host;
+ char *kaddr = (char*)page_address(page);
+ unsigned from = ((char*)dir - kaddr) & ~(ext2_chunk_size(inode)-1);
+ unsigned to = ((char*)dir - kaddr) + le16_to_cpu(dir->rec_len);
+ ext2_dirent * pde = NULL;
+ ext2_dirent * de = (ext2_dirent *) (kaddr + from);
+ int err;
+
+ while ((char*)de < (char*)dir) {
+ pde = de;
+ de = ext2_next_entry(de);
+ }
+ if (pde)
+ from = (char*)pde - (char*)page_address(page);
+ lock_page(page);
+ err = mapping->a_ops->prepare_write(NULL, page, from, to);
+ if (err)
+ BUG();
+ if (pde)
+ pde->rec_len = cpu_to_le16(to-from);
+ dir->inode = 0;
+ err = ext2_commit_chunk(page, from, to);
+ UnlockPage(page);
+ ext2_put_page(page);
+ inode->i_ctime = inode->i_mtime = CURRENT_TIME;
+ mark_inode_dirty(inode);
+ return err;
+}
+
+/*
+ * Set the first fragment of directory.
+ */
+int ext2_make_empty(struct inode *inode, struct inode *parent)
+{
+ struct address_space *mapping = inode->i_mapping;
+ struct page *page = grab_cache_page(mapping, 0);
+ unsigned chunk_size = ext2_chunk_size(inode);
+ struct ext2_dir_entry_2 * de;
+ char *base;
+ int err;
+
+ if (!page)
+ return -ENOMEM;
+ err = mapping->a_ops->prepare_write(NULL, page, 0, chunk_size);
+ if (err)
+ goto fail;
+
+ base = (char*)page_address(page);
+
+ de = (struct ext2_dir_entry_2 *) base;
+ de->name_len = 1;
+ de->rec_len = cpu_to_le16(EXT2_DIR_REC_LEN(1));
+ memcpy (de->name, ".\0\0", 4);
+ de->inode = cpu_to_le32(inode->i_ino);
+ ext2_set_de_type (de, inode);
+
+ de = (struct ext2_dir_entry_2 *) (base + EXT2_DIR_REC_LEN(1));
+ de->name_len = 2;
+ de->rec_len = cpu_to_le16(chunk_size - EXT2_DIR_REC_LEN(1));
+ de->inode = cpu_to_le32(parent->i_ino);
+ memcpy (de->name, "..\0", 4);
+ ext2_set_de_type (de, inode);
+
+ err = ext2_commit_chunk(page, 0, chunk_size);
+fail:
+ UnlockPage(page);
+ page_cache_release(page);
+ return err;
+}
+
+/*
+ * routine to check that the specified directory is empty (for rmdir)
+ */
+int ext2_empty_dir (struct inode * inode)
+{
+ struct page *page = NULL;
+ unsigned long i, npages = dir_pages(inode);
+
+ for (i = 0; i < npages; i++) {
+ char *kaddr;
+ ext2_dirent * de;
+ page = ext2_get_page(inode, i);
+
+ if (IS_ERR(page))
+ continue;
+
+ kaddr = (char *)page_address(page);
+ de = (ext2_dirent *)kaddr;
+ kaddr += PAGE_CACHE_SIZE-EXT2_DIR_REC_LEN(1);
+
+ while ((char *)de <= kaddr) {
+ if (de->inode != 0) {
+ /* check for . and .. */
+ if (de->name[0] != '.')
+ goto not_empty;
+ if (de->name_len > 2)
+ goto not_empty;
+ if (de->name_len < 2) {
+ if (de->inode !=
+ cpu_to_le32(inode->i_ino))
+ goto not_empty;
+ } else if (de->name[1] != '.')
+ goto not_empty;
}
- filp->f_pos += le16_to_cpu(de->rec_len);
+ de = ext2_next_entry(de);
}
- offset = 0;
- brelse (bh);
+ ext2_put_page(page);
}
- UPDATE_ATIME(inode);
+ return 1;
+
+not_empty:
+ ext2_put_page(page);
return 0;
}
+
+struct file_operations ext2_dir_operations = {
+ read: generic_read_dir,
+ readdir: ext2_readdir,
+ fsync: ext2_sync_file,
+};
lock_super (sb);
es = sb->u.ext2_sb.s_es;
- if (ino < EXT2_FIRST_INO(sb) ||
+ is_directory = S_ISDIR(inode->i_mode);
+
+ /* Do this BEFORE marking the inode not in use or returning an error */
+ clear_inode (inode);
+
+ if (ino < EXT2_FIRST_INO(sb) ||
ino > le32_to_cpu(es->s_inodes_count)) {
- ext2_error (sb, "free_inode",
- "reserved inode or nonexistent inode");
+ ext2_error (sb, "ext2_free_inode",
+ "reserved or nonexistent inode %lu", ino);
goto error_return;
}
block_group = (ino - 1) / EXT2_INODES_PER_GROUP(sb);
bitmap_nr = load_inode_bitmap (sb, block_group);
if (bitmap_nr < 0)
goto error_return;
-
- bh = sb->u.ext2_sb.s_inode_bitmap[bitmap_nr];
-
- is_directory = S_ISDIR(inode->i_mode);
- /* Do this BEFORE marking the inode not in use */
- clear_inode (inode);
+ bh = sb->u.ext2_sb.s_inode_bitmap[bitmap_nr];
/* Ok, now we can actually update the inode bitmaps.. */
if (!ext2_clear_bit (bit, bh->b_data))
* or when it reads all @depth-1 indirect blocks successfully and finds
* the whole chain, all way to the data (returns %NULL, *err == 0).
*/
-static inline Indirect *ext2_get_branch(struct inode *inode,
- int depth,
- int *offsets,
- Indirect chain[4],
- int *err)
+static Indirect *ext2_get_branch(struct inode *inode,
+ int depth,
+ int *offsets,
+ Indirect chain[4],
+ int *err)
{
kdev_t dev = inode->i_dev;
int size = inode->i_sb->s_blocksize;
goto reread;
}
-struct buffer_head * ext2_getblk(struct inode * inode, long block, int create, int * err)
-{
- struct buffer_head dummy;
- int error;
-
- dummy.b_state = 0;
- dummy.b_blocknr = -1000;
- error = ext2_get_block(inode, block, &dummy, create);
- *err = error;
- if (!error && buffer_mapped(&dummy)) {
- struct buffer_head *bh;
- bh = getblk(dummy.b_dev, dummy.b_blocknr, inode->i_sb->s_blocksize);
- if (buffer_new(&dummy)) {
- lock_buffer(bh);
- memset(bh->b_data, 0, inode->i_sb->s_blocksize);
- mark_buffer_uptodate(bh, 1);
- unlock_buffer(bh);
- mark_buffer_dirty_inode(bh, inode);
- }
- return bh;
- }
- return NULL;
-}
-
-struct buffer_head * ext2_bread (struct inode * inode, int block,
- int create, int *err)
-{
- struct buffer_head * bh;
- int prev_blocks;
-
- prev_blocks = inode->i_blocks;
-
- bh = ext2_getblk (inode, block, create, err);
- if (!bh)
- return bh;
-
- /*
- * If the inode has grown, and this is a directory, then perform
- * preallocation of a few more blocks to try to keep directory
- * fragmentation down.
- */
- if (create &&
- S_ISDIR(inode->i_mode) &&
- inode->i_blocks > prev_blocks &&
- EXT2_HAS_COMPAT_FEATURE(inode->i_sb,
- EXT2_FEATURE_COMPAT_DIR_PREALLOC)) {
- int i;
- struct buffer_head *tmp_bh;
-
- for (i = 1;
- i < EXT2_SB(inode->i_sb)->s_es->s_prealloc_dir_blocks;
- i++) {
- /*
- * ext2_getblk will zero out the contents of the
- * directory for us
- */
- tmp_bh = ext2_getblk(inode, block+i, create, err);
- if (!tmp_bh) {
- brelse (bh);
- return 0;
- }
- brelse (tmp_bh);
- }
- }
-
- if (buffer_uptodate(bh))
- return bh;
- ll_rw_block (READ, 1, &bh);
- wait_on_buffer (bh);
- if (buffer_uptodate(bh))
- return bh;
- brelse (bh);
- *err = -EIO;
- return NULL;
-}
-
static int ext2_writepage(struct page *page)
{
return block_write_full_page(page,ext2_get_block);
} else if (S_ISDIR(inode->i_mode)) {
inode->i_op = &ext2_dir_inode_operations;
inode->i_fop = &ext2_dir_operations;
+ inode->i_mapping->a_ops = &ext2_aops;
} else if (S_ISLNK(inode->i_mode)) {
if (!inode->i_blocks)
inode->i_op = &ext2_fast_symlink_inode_operations;
/*
- * linux/fs/ext2/namei.c
+ * linux/fs/ext2/namei.c
+ *
+ * Rewrite to pagecache. Almost all code had been changed, so blame me
+ * if the things go wrong. Please, send bug reports to viro@math.psu.edu
+ *
+ * Stuff here is basically a glue between the VFS and generic UNIXish
+ * filesystem that keeps everything in pagecache. All knowledge of the
+ * directory layout is in fs/ext2/dir.c - it turned out to be easily separatable
+ * and it's easier to debug that way. In principle we might want to
+ * generalize that a bit and turn it into a library. Or not.
+ *
+ * The only non-static object here is ext2_dir_inode_operations.
+ *
+ * TODO: get rid of kmap() use, add readahead.
*
* Copyright (C) 1992, 1993, 1994, 1995
* Remy Card (card@masi.ibp.fr)
*
* Big-endian to little-endian byte-swapping/bitmaps by
* David S. Miller (davem@caip.rutgers.edu), 1995
- * Directory entry file type support and forward compatibility hooks
- * for B-tree directories by Theodore Ts'o (tytso@mit.edu), 1998
*/
#include <linux/fs.h>
#include <linux/ext2_fs.h>
-#include <linux/locks.h>
-#include <linux/quotaops.h>
-
-
+#include <linux/pagemap.h>
/*
- * define how far ahead to read directories while searching them.
+ * Couple of helper functions - make the code slightly cleaner.
*/
-#define NAMEI_RA_CHUNKS 2
-#define NAMEI_RA_BLOCKS 4
-#define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
-#define NAMEI_RA_INDEX(c,b) (((c) * NAMEI_RA_BLOCKS) + (b))
-/*
- * NOTE! unlike strncmp, ext2_match returns 1 for success, 0 for failure.
- *
- * `len <= EXT2_NAME_LEN' is guaranteed by caller.
- * `de != NULL' is guaranteed by caller.
- */
-static inline int ext2_match (int len, const char * const name,
- struct ext2_dir_entry_2 * de)
+static inline void ext2_inc_count(struct inode *inode)
{
- if (len != de->name_len)
- return 0;
- if (!de->inode)
- return 0;
- return !memcmp(name, de->name, len);
+ inode->i_nlink++;
+ mark_inode_dirty(inode);
}
-/*
- * ext2_find_entry()
- *
- * finds an entry in the specified directory with the wanted name. It
- * returns the cache buffer in which the entry was found, and the entry
- * itself (as a parameter - res_dir). It does NOT read the inode of the
- * entry - you'll have to do that yourself if you want to.
- */
-static struct buffer_head * ext2_find_entry (struct inode * dir,
- const char * const name, int namelen,
- struct ext2_dir_entry_2 ** res_dir)
+static inline void ext2_dec_count(struct inode *inode)
{
- struct super_block * sb;
- struct buffer_head * bh_use[NAMEI_RA_SIZE];
- struct buffer_head * bh_read[NAMEI_RA_SIZE];
- unsigned long offset;
- int block, toread, i, err;
-
- *res_dir = NULL;
- sb = dir->i_sb;
-
- if (namelen > EXT2_NAME_LEN)
- return NULL;
-
- memset (bh_use, 0, sizeof (bh_use));
- toread = 0;
- for (block = 0; block < NAMEI_RA_SIZE; ++block) {
- struct buffer_head * bh;
-
- if ((block << EXT2_BLOCK_SIZE_BITS (sb)) >= dir->i_size)
- break;
- bh = ext2_getblk (dir, block, 0, &err);
- bh_use[block] = bh;
- if (bh && !buffer_uptodate(bh))
- bh_read[toread++] = bh;
- }
-
- for (block = 0, offset = 0; offset < dir->i_size; block++) {
- struct buffer_head * bh;
- struct ext2_dir_entry_2 * de;
- char * dlimit;
-
- if ((block % NAMEI_RA_BLOCKS) == 0 && toread) {
- ll_rw_block (READ, toread, bh_read);
- toread = 0;
- }
- bh = bh_use[block % NAMEI_RA_SIZE];
- if (!bh) {
-#if 0
- ext2_error (sb, "ext2_find_entry",
- "directory #%lu contains a hole at offset %lu",
- dir->i_ino, offset);
-#endif
- offset += sb->s_blocksize;
- continue;
- }
- wait_on_buffer (bh);
- if (!buffer_uptodate(bh)) {
- /*
- * read error: all bets are off
- */
- break;
- }
-
- de = (struct ext2_dir_entry_2 *) bh->b_data;
- dlimit = bh->b_data + sb->s_blocksize;
- while ((char *) de < dlimit) {
- /* this code is executed quadratically often */
- /* do minimal checking `by hand' */
- int de_len;
-
- if ((char *) de + namelen <= dlimit &&
- ext2_match (namelen, name, de)) {
- /* found a match -
- just to be sure, do a full check */
- if (!ext2_check_dir_entry("ext2_find_entry",
- dir, de, bh, offset))
- goto failure;
- for (i = 0; i < NAMEI_RA_SIZE; ++i) {
- if (bh_use[i] != bh)
- brelse (bh_use[i]);
- }
- *res_dir = de;
- return bh;
- }
- /* prevent looping on a bad block */
- de_len = le16_to_cpu(de->rec_len);
- if (de_len <= 0)
- goto failure;
- offset += de_len;
- de = (struct ext2_dir_entry_2 *)
- ((char *) de + de_len);
- }
+ inode->i_nlink--;
+ mark_inode_dirty(inode);
+}
- brelse (bh);
- if (((block + NAMEI_RA_SIZE) << EXT2_BLOCK_SIZE_BITS (sb)) >=
- dir->i_size)
- bh = NULL;
- else
- bh = ext2_getblk (dir, block + NAMEI_RA_SIZE, 0, &err);
- bh_use[block % NAMEI_RA_SIZE] = bh;
- if (bh && !buffer_uptodate(bh))
- bh_read[toread++] = bh;
+static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode)
+{
+ int err = ext2_add_link(dentry, inode);
+ if (!err) {
+ d_instantiate(dentry, inode);
+ return 0;
}
-
-failure:
- for (i = 0; i < NAMEI_RA_SIZE; ++i)
- brelse (bh_use[i]);
- return NULL;
+ ext2_dec_count(inode);
+ iput(inode);
+ return err;
}
+/*
+ * Methods themselves.
+ */
+
static struct dentry *ext2_lookup(struct inode * dir, struct dentry *dentry)
{
struct inode * inode;
- struct ext2_dir_entry_2 * de;
- struct buffer_head * bh;
-
+ ino_t ino;
+
if (dentry->d_name.len > EXT2_NAME_LEN)
return ERR_PTR(-ENAMETOOLONG);
- bh = ext2_find_entry (dir, dentry->d_name.name, dentry->d_name.len, &de);
+ ino = ext2_inode_by_name(dir, dentry);
inode = NULL;
- if (bh) {
- unsigned long ino = le32_to_cpu(de->inode);
- brelse (bh);
+ if (ino) {
inode = iget(dir->i_sb, ino);
-
- if (!inode)
+ if (!inode)
return ERR_PTR(-EACCES);
}
d_add(dentry, inode);
return NULL;
}
-#define S_SHIFT 12
-static unsigned char ext2_type_by_mode[S_IFMT >> S_SHIFT] = {
- [S_IFREG >> S_SHIFT] EXT2_FT_REG_FILE,
- [S_IFDIR >> S_SHIFT] EXT2_FT_DIR,
- [S_IFCHR >> S_SHIFT] EXT2_FT_CHRDEV,
- [S_IFBLK >> S_SHIFT] EXT2_FT_BLKDEV,
- [S_IFIFO >> S_SHIFT] EXT2_FT_FIFO,
- [S_IFSOCK >> S_SHIFT] EXT2_FT_SOCK,
- [S_IFLNK >> S_SHIFT] EXT2_FT_SYMLINK,
-};
-
-static inline void ext2_set_de_type(struct super_block *sb,
- struct ext2_dir_entry_2 *de,
- umode_t mode) {
- if (EXT2_HAS_INCOMPAT_FEATURE(sb, EXT2_FEATURE_INCOMPAT_FILETYPE))
- de->file_type = ext2_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
-}
-
-/*
- * ext2_add_entry()
- *
- * adds a file entry to the specified directory.
- */
-int ext2_add_entry (struct inode * dir, const char * name, int namelen,
- struct inode *inode)
-{
- unsigned long offset;
- unsigned short rec_len;
- struct buffer_head * bh;
- struct ext2_dir_entry_2 * de, * de1;
- struct super_block * sb;
- int retval;
-
- sb = dir->i_sb;
-
- if (!namelen)
- return -EINVAL;
- bh = ext2_bread (dir, 0, 0, &retval);
- if (!bh)
- return retval;
- rec_len = EXT2_DIR_REC_LEN(namelen);
- offset = 0;
- de = (struct ext2_dir_entry_2 *) bh->b_data;
- while (1) {
- if ((char *)de >= sb->s_blocksize + bh->b_data) {
- brelse (bh);
- bh = NULL;
- bh = ext2_bread (dir, offset >> EXT2_BLOCK_SIZE_BITS(sb), 1, &retval);
- if (!bh)
- return retval;
- if (dir->i_size <= offset) {
- if (dir->i_size == 0) {
- brelse(bh);
- return -ENOENT;
- }
-
- ext2_debug ("creating next block\n");
-
- de = (struct ext2_dir_entry_2 *) bh->b_data;
- de->inode = 0;
- de->rec_len = le16_to_cpu(sb->s_blocksize);
- dir->i_size = offset + sb->s_blocksize;
- dir->u.ext2_i.i_flags &= ~EXT2_BTREE_FL;
- mark_inode_dirty(dir);
- } else {
-
- ext2_debug ("skipping to next block\n");
-
- de = (struct ext2_dir_entry_2 *) bh->b_data;
- }
- }
- if (!ext2_check_dir_entry ("ext2_add_entry", dir, de, bh,
- offset)) {
- brelse (bh);
- return -ENOENT;
- }
- if (ext2_match (namelen, name, de)) {
- brelse (bh);
- return -EEXIST;
- }
- if ((le32_to_cpu(de->inode) == 0 && le16_to_cpu(de->rec_len) >= rec_len) ||
- (le16_to_cpu(de->rec_len) >= EXT2_DIR_REC_LEN(de->name_len) + rec_len)) {
- offset += le16_to_cpu(de->rec_len);
- if (le32_to_cpu(de->inode)) {
- de1 = (struct ext2_dir_entry_2 *) ((char *) de +
- EXT2_DIR_REC_LEN(de->name_len));
- de1->rec_len = cpu_to_le16(le16_to_cpu(de->rec_len) -
- EXT2_DIR_REC_LEN(de->name_len));
- de->rec_len = cpu_to_le16(EXT2_DIR_REC_LEN(de->name_len));
- de = de1;
- }
- de->file_type = EXT2_FT_UNKNOWN;
- if (inode) {
- de->inode = cpu_to_le32(inode->i_ino);
- ext2_set_de_type(dir->i_sb, de, inode->i_mode);
- } else
- de->inode = 0;
- de->name_len = namelen;
- memcpy (de->name, name, namelen);
- /*
- * XXX shouldn't update any times until successful
- * completion of syscall, but too many callers depend
- * on this.
- *
- * XXX similarly, too many callers depend on
- * ext2_new_inode() setting the times, but error
- * recovery deletes the inode, so the worst that can
- * happen is that the times are slightly out of date
- * and/or different from the directory change time.
- */
- dir->i_mtime = dir->i_ctime = CURRENT_TIME;
- dir->u.ext2_i.i_flags &= ~EXT2_BTREE_FL;
- mark_inode_dirty(dir);
- dir->i_version = ++event;
- mark_buffer_dirty_inode(bh, dir);
- if (IS_SYNC(dir)) {
- ll_rw_block (WRITE, 1, &bh);
- wait_on_buffer (bh);
- }
- brelse(bh);
- return 0;
- }
- offset += le16_to_cpu(de->rec_len);
- de = (struct ext2_dir_entry_2 *) ((char *) de + le16_to_cpu(de->rec_len));
- }
- brelse (bh);
- return -ENOSPC;
-}
-
-/*
- * ext2_delete_entry deletes a directory entry by merging it with the
- * previous entry
- */
-static int ext2_delete_entry (struct inode * dir,
- struct ext2_dir_entry_2 * de_del,
- struct buffer_head * bh)
-{
- struct ext2_dir_entry_2 * de, * pde;
- int i;
-
- i = 0;
- pde = NULL;
- de = (struct ext2_dir_entry_2 *) bh->b_data;
- while (i < bh->b_size) {
- if (!ext2_check_dir_entry ("ext2_delete_entry", NULL,
- de, bh, i))
- return -EIO;
- if (de == de_del) {
- if (pde)
- pde->rec_len =
- cpu_to_le16(le16_to_cpu(pde->rec_len) +
- le16_to_cpu(de->rec_len));
- else
- de->inode = 0;
- dir->i_version = ++event;
- mark_buffer_dirty_inode(bh, dir);
- if (IS_SYNC(dir)) {
- ll_rw_block (WRITE, 1, &bh);
- wait_on_buffer (bh);
- }
- return 0;
- }
- i += le16_to_cpu(de->rec_len);
- pde = de;
- de = (struct ext2_dir_entry_2 *) ((char *) de + le16_to_cpu(de->rec_len));
- }
- return -ENOENT;
-}
-
/*
* By the time this is called, we already have created
* the directory cache entry for the new file, but it
{
struct inode * inode = ext2_new_inode (dir, mode);
int err = PTR_ERR(inode);
- if (IS_ERR(inode))
- return err;
-
- inode->i_op = &ext2_file_inode_operations;
- inode->i_fop = &ext2_file_operations;
- inode->i_mapping->a_ops = &ext2_aops;
- inode->i_mode = mode;
- mark_inode_dirty(inode);
- err = ext2_add_entry (dir, dentry->d_name.name, dentry->d_name.len,
- inode);
- if (err) {
- inode->i_nlink--;
+ if (!IS_ERR(inode)) {
+ inode->i_op = &ext2_file_inode_operations;
+ inode->i_fop = &ext2_file_operations;
+ inode->i_mapping->a_ops = &ext2_aops;
mark_inode_dirty(inode);
- iput (inode);
- return err;
+ err = ext2_add_nondir(dentry, inode);
}
- d_instantiate(dentry, inode);
- return 0;
+ return err;
}
static int ext2_mknod (struct inode * dir, struct dentry *dentry, int mode, int rdev)
{
struct inode * inode = ext2_new_inode (dir, mode);
int err = PTR_ERR(inode);
-
- if (IS_ERR(inode))
- return err;
-
- inode->i_uid = current->fsuid;
- init_special_inode(inode, mode, rdev);
- err = ext2_add_entry (dir, dentry->d_name.name, dentry->d_name.len,
- inode);
- if (err)
- goto out_no_entry;
- mark_inode_dirty(inode);
- d_instantiate(dentry, inode);
- return 0;
-
-out_no_entry:
- inode->i_nlink--;
- mark_inode_dirty(inode);
- iput(inode);
+ if (!IS_ERR(inode)) {
+ init_special_inode(inode, mode, rdev);
+ mark_inode_dirty(inode);
+ err = ext2_add_nondir(dentry, inode);
+ }
return err;
}
-static int ext2_mkdir(struct inode * dir, struct dentry * dentry, int mode)
+static int ext2_symlink (struct inode * dir, struct dentry * dentry,
+ const char * symname)
{
+ struct super_block * sb = dir->i_sb;
+ int err = -ENAMETOOLONG;
+ unsigned l = strlen(symname)+1;
struct inode * inode;
- struct buffer_head * dir_block;
- struct ext2_dir_entry_2 * de;
- int err;
- if (dir->i_nlink >= EXT2_LINK_MAX)
- return -EMLINK;
+ if (l > sb->s_blocksize)
+ goto out;
- inode = ext2_new_inode (dir, S_IFDIR);
+ inode = ext2_new_inode (dir, S_IFLNK | S_IRWXUGO);
err = PTR_ERR(inode);
if (IS_ERR(inode))
- return err;
+ goto out;
- inode->i_op = &ext2_dir_inode_operations;
- inode->i_fop = &ext2_dir_operations;
- inode->i_size = inode->i_sb->s_blocksize;
- inode->i_blocks = 0;
- dir_block = ext2_bread (inode, 0, 1, &err);
- if (!dir_block) {
- inode->i_nlink--; /* is this nlink == 0? */
- mark_inode_dirty(inode);
- iput (inode);
- return err;
+ if (l > sizeof (inode->u.ext2_i.i_data)) {
+ /* slow symlink */
+ inode->i_op = &page_symlink_inode_operations;
+ inode->i_mapping->a_ops = &ext2_aops;
+ err = block_symlink(inode, symname, l);
+ if (err)
+ goto out_fail;
+ } else {
+ /* fast symlink */
+ inode->i_op = &ext2_fast_symlink_inode_operations;
+ memcpy((char*)&inode->u.ext2_i.i_data,symname,l);
+ inode->i_size = l-1;
}
- de = (struct ext2_dir_entry_2 *) dir_block->b_data;
- de->inode = cpu_to_le32(inode->i_ino);
- de->name_len = 1;
- de->rec_len = cpu_to_le16(EXT2_DIR_REC_LEN(de->name_len));
- strcpy (de->name, ".");
- ext2_set_de_type(dir->i_sb, de, S_IFDIR);
- de = (struct ext2_dir_entry_2 *) ((char *) de + le16_to_cpu(de->rec_len));
- de->inode = cpu_to_le32(dir->i_ino);
- de->rec_len = cpu_to_le16(inode->i_sb->s_blocksize - EXT2_DIR_REC_LEN(1));
- de->name_len = 2;
- strcpy (de->name, "..");
- ext2_set_de_type(dir->i_sb, de, S_IFDIR);
- inode->i_nlink = 2;
- mark_buffer_dirty_inode(dir_block, dir);
- brelse (dir_block);
- inode->i_mode = S_IFDIR | mode;
- if (dir->i_mode & S_ISGID)
- inode->i_mode |= S_ISGID;
mark_inode_dirty(inode);
- err = ext2_add_entry (dir, dentry->d_name.name, dentry->d_name.len,
- inode);
- if (err)
- goto out_no_entry;
- dir->i_nlink++;
- dir->u.ext2_i.i_flags &= ~EXT2_BTREE_FL;
- mark_inode_dirty(dir);
- d_instantiate(dentry, inode);
- return 0;
-out_no_entry:
- inode->i_nlink = 0;
- mark_inode_dirty(inode);
- iput (inode);
+ err = ext2_add_nondir(dentry, inode);
+out:
return err;
-}
-
-/*
- * routine to check that the specified directory is empty (for rmdir)
- */
-static int empty_dir (struct inode * inode)
-{
- unsigned long offset;
- struct buffer_head * bh;
- struct ext2_dir_entry_2 * de, * de1;
- struct super_block * sb;
- int err;
-
- sb = inode->i_sb;
- if (inode->i_size < EXT2_DIR_REC_LEN(1) + EXT2_DIR_REC_LEN(2) ||
- !(bh = ext2_bread (inode, 0, 0, &err))) {
- ext2_warning (inode->i_sb, "empty_dir",
- "bad directory (dir #%lu) - no data block",
- inode->i_ino);
- return 1;
- }
- de = (struct ext2_dir_entry_2 *) bh->b_data;
- de1 = (struct ext2_dir_entry_2 *) ((char *) de + le16_to_cpu(de->rec_len));
- if (le32_to_cpu(de->inode) != inode->i_ino || !le32_to_cpu(de1->inode) ||
- strcmp (".", de->name) || strcmp ("..", de1->name)) {
- ext2_warning (inode->i_sb, "empty_dir",
- "bad directory (dir #%lu) - no `.' or `..'",
- inode->i_ino);
- brelse (bh);
- return 1;
- }
- offset = le16_to_cpu(de->rec_len) + le16_to_cpu(de1->rec_len);
- de = (struct ext2_dir_entry_2 *) ((char *) de1 + le16_to_cpu(de1->rec_len));
- while (offset < inode->i_size ) {
- if (!bh || (void *) de >= (void *) (bh->b_data + sb->s_blocksize)) {
- brelse (bh);
- bh = ext2_bread (inode, offset >> EXT2_BLOCK_SIZE_BITS(sb), 0, &err);
- if (!bh) {
-#if 0
- ext2_error (sb, "empty_dir",
- "directory #%lu contains a hole at offset %lu",
- inode->i_ino, offset);
-#endif
- offset += sb->s_blocksize;
- continue;
- }
- de = (struct ext2_dir_entry_2 *) bh->b_data;
- }
- if (!ext2_check_dir_entry ("empty_dir", inode, de, bh,
- offset)) {
- brelse (bh);
- return 1;
- }
- if (le32_to_cpu(de->inode)) {
- brelse (bh);
- return 0;
- }
- offset += le16_to_cpu(de->rec_len);
- de = (struct ext2_dir_entry_2 *) ((char *) de + le16_to_cpu(de->rec_len));
- }
- brelse (bh);
- return 1;
-}
-static int ext2_rmdir (struct inode * dir, struct dentry *dentry)
-{
- int retval;
- struct inode * inode;
- struct buffer_head * bh;
- struct ext2_dir_entry_2 * de;
-
- retval = -ENOENT;
- bh = ext2_find_entry (dir, dentry->d_name.name, dentry->d_name.len, &de);
- if (!bh)
- goto end_rmdir;
-
- inode = dentry->d_inode;
- DQUOT_INIT(inode);
-
- retval = -EIO;
- if (le32_to_cpu(de->inode) != inode->i_ino)
- goto end_rmdir;
-
- retval = -ENOTEMPTY;
- if (!empty_dir (inode))
- goto end_rmdir;
-
- retval = ext2_delete_entry(dir, de, bh);
- if (retval)
- goto end_rmdir;
- if (inode->i_nlink != 2)
- ext2_warning (inode->i_sb, "ext2_rmdir",
- "empty directory has nlink!=2 (%d)",
- inode->i_nlink);
- inode->i_version = ++event;
- inode->i_nlink = 0;
- inode->i_size = 0;
- mark_inode_dirty(inode);
- dir->i_nlink--;
- inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
- dir->u.ext2_i.i_flags &= ~EXT2_BTREE_FL;
- mark_inode_dirty(dir);
-
-end_rmdir:
- brelse (bh);
- return retval;
+out_fail:
+ ext2_dec_count(inode);
+ iput (inode);
+ goto out;
}
-static int ext2_unlink(struct inode * dir, struct dentry *dentry)
+static int ext2_link (struct dentry * old_dentry, struct inode * dir,
+ struct dentry *dentry)
{
- int retval;
- struct inode * inode;
- struct buffer_head * bh;
- struct ext2_dir_entry_2 * de;
+ struct inode *inode = old_dentry->d_inode;
- retval = -ENOENT;
- bh = ext2_find_entry (dir, dentry->d_name.name, dentry->d_name.len, &de);
- if (!bh)
- goto end_unlink;
+ if (S_ISDIR(inode->i_mode))
+ return -EPERM;
- inode = dentry->d_inode;
- DQUOT_INIT(inode);
+ if (inode->i_nlink >= EXT2_LINK_MAX)
+ return -EMLINK;
- retval = -EIO;
- if (le32_to_cpu(de->inode) != inode->i_ino)
- goto end_unlink;
-
- if (!inode->i_nlink) {
- ext2_warning (inode->i_sb, "ext2_unlink",
- "Deleting nonexistent file (%lu), %d",
- inode->i_ino, inode->i_nlink);
- inode->i_nlink = 1;
- }
- retval = ext2_delete_entry(dir, de, bh);
- if (retval)
- goto end_unlink;
- dir->i_ctime = dir->i_mtime = CURRENT_TIME;
- dir->u.ext2_i.i_flags &= ~EXT2_BTREE_FL;
- mark_inode_dirty(dir);
- inode->i_nlink--;
- mark_inode_dirty(inode);
- inode->i_ctime = dir->i_ctime;
- retval = 0;
+ inode->i_ctime = CURRENT_TIME;
+ ext2_inc_count(inode);
+ atomic_inc(&inode->i_count);
-end_unlink:
- brelse (bh);
- return retval;
+ return ext2_add_nondir(dentry, inode);
}
-static int ext2_symlink (struct inode * dir, struct dentry *dentry, const char * symname)
+static int ext2_mkdir(struct inode * dir, struct dentry * dentry, int mode)
{
struct inode * inode;
- int l, err;
+ int err = -EMLINK;
- l = strlen(symname)+1;
- if (l > dir->i_sb->s_blocksize)
- return -ENAMETOOLONG;
+ if (dir->i_nlink >= EXT2_LINK_MAX)
+ goto out;
+
+ ext2_inc_count(dir);
- inode = ext2_new_inode (dir, S_IFLNK);
+ inode = ext2_new_inode (dir, S_IFDIR | mode);
err = PTR_ERR(inode);
if (IS_ERR(inode))
- return err;
+ goto out_dir;
- inode->i_mode = S_IFLNK | S_IRWXUGO;
+ inode->i_op = &ext2_dir_inode_operations;
+ inode->i_fop = &ext2_dir_operations;
+ inode->i_mapping->a_ops = &ext2_aops;
- if (l > sizeof (inode->u.ext2_i.i_data)) {
- inode->i_op = &page_symlink_inode_operations;
- inode->i_mapping->a_ops = &ext2_aops;
- err = block_symlink(inode, symname, l);
- if (err)
- goto out_no_entry;
- } else {
- inode->i_op = &ext2_fast_symlink_inode_operations;
- memcpy((char*)&inode->u.ext2_i.i_data,symname,l);
- inode->i_size = l-1;
- }
- mark_inode_dirty(inode);
+ ext2_inc_count(inode);
- err = ext2_add_entry (dir, dentry->d_name.name, dentry->d_name.len,
- inode);
+ err = ext2_make_empty(inode, dir);
if (err)
- goto out_no_entry;
- d_instantiate(dentry, inode);
- return 0;
+ goto out_fail;
-out_no_entry:
- inode->i_nlink--;
- mark_inode_dirty(inode);
- iput (inode);
+ err = ext2_add_link(dentry, inode);
+ if (err)
+ goto out_fail;
+
+ d_instantiate(dentry, inode);
+out:
return err;
+
+out_fail:
+ ext2_dec_count(inode);
+ ext2_dec_count(inode);
+ iput(inode);
+out_dir:
+ ext2_dec_count(dir);
+ goto out;
}
-static int ext2_link (struct dentry * old_dentry,
- struct inode * dir, struct dentry *dentry)
+static int ext2_unlink(struct inode * dir, struct dentry *dentry)
{
- struct inode *inode = old_dentry->d_inode;
- int err;
+ struct inode * inode = dentry->d_inode;
+ struct ext2_dir_entry_2 * de;
+ struct page * page;
+ int err = -ENOENT;
- if (S_ISDIR(inode->i_mode))
- return -EPERM;
+ de = ext2_find_entry (dir, dentry, &page);
+ if (!de)
+ goto out;
- if (inode->i_nlink >= EXT2_LINK_MAX)
- return -EMLINK;
-
- err = ext2_add_entry (dir, dentry->d_name.name, dentry->d_name.len,
- inode);
+ err = ext2_delete_entry (de, page);
if (err)
- return err;
+ goto out;
- inode->i_nlink++;
- inode->i_ctime = CURRENT_TIME;
- mark_inode_dirty(inode);
- atomic_inc(&inode->i_count);
- d_instantiate(dentry, inode);
- return 0;
+ inode->i_ctime = dir->i_ctime;
+ ext2_dec_count(inode);
+ err = 0;
+out:
+ return err;
}
-#define PARENT_INO(buffer) \
- ((struct ext2_dir_entry_2 *) ((char *) buffer + \
- le16_to_cpu(((struct ext2_dir_entry_2 *) buffer)->rec_len)))->inode
-
-/*
- * Anybody can rename anything with this: the permission checks are left to the
- * higher-level routines.
- */
-static int ext2_rename (struct inode * old_dir, struct dentry *old_dentry,
- struct inode * new_dir,struct dentry *new_dentry)
+static int ext2_rmdir (struct inode * dir, struct dentry *dentry)
{
- struct inode * old_inode, * new_inode;
- struct buffer_head * old_bh, * new_bh, * dir_bh;
- struct ext2_dir_entry_2 * old_de, * new_de;
- int retval;
-
- old_bh = new_bh = dir_bh = NULL;
-
- old_bh = ext2_find_entry (old_dir, old_dentry->d_name.name, old_dentry->d_name.len, &old_de);
- /*
- * Check for inode number is _not_ due to possible IO errors.
- * We might rmdir the source, keep it as pwd of some process
- * and merrily kill the link to whatever was created under the
- * same name. Goodbye sticky bit ;-<
- */
- old_inode = old_dentry->d_inode;
- retval = -ENOENT;
- if (!old_bh || le32_to_cpu(old_de->inode) != old_inode->i_ino)
- goto end_rename;
-
- new_inode = new_dentry->d_inode;
- new_bh = ext2_find_entry (new_dir, new_dentry->d_name.name,
- new_dentry->d_name.len, &new_de);
- if (new_bh) {
- if (!new_inode) {
- brelse (new_bh);
- new_bh = NULL;
- } else {
- DQUOT_INIT(new_inode);
+ struct inode * inode = dentry->d_inode;
+ int err = -ENOTEMPTY;
+
+ if (ext2_empty_dir(inode)) {
+ err = ext2_unlink(dir, dentry);
+ if (!err) {
+ inode->i_size = 0;
+ ext2_dec_count(inode);
+ ext2_dec_count(dir);
}
}
+ return err;
+}
+
+static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
+ struct inode * new_dir, struct dentry * new_dentry )
+{
+ struct inode * old_inode = old_dentry->d_inode;
+ struct inode * new_inode = new_dentry->d_inode;
+ struct page * dir_page = NULL;
+ struct ext2_dir_entry_2 * dir_de = NULL;
+ struct page * old_page;
+ struct ext2_dir_entry_2 * old_de;
+ int err = -ENOENT;
+
+ old_de = ext2_find_entry (old_dir, old_dentry, &old_page);
+ if (!old_de)
+ goto out;
+
if (S_ISDIR(old_inode->i_mode)) {
- if (new_inode) {
- retval = -ENOTEMPTY;
- if (!empty_dir (new_inode))
- goto end_rename;
- }
- retval = -EIO;
- dir_bh = ext2_bread (old_inode, 0, 0, &retval);
- if (!dir_bh)
- goto end_rename;
- if (le32_to_cpu(PARENT_INO(dir_bh->b_data)) != old_dir->i_ino)
- goto end_rename;
- retval = -EMLINK;
- if (!new_inode && new_dir!=old_dir &&
- new_dir->i_nlink >= EXT2_LINK_MAX)
- goto end_rename;
- }
- if (!new_bh) {
- retval = ext2_add_entry (new_dir, new_dentry->d_name.name,
- new_dentry->d_name.len,
- old_inode);
- if (retval)
- goto end_rename;
- } else {
- new_de->inode = le32_to_cpu(old_inode->i_ino);
- if (EXT2_HAS_INCOMPAT_FEATURE(new_dir->i_sb,
- EXT2_FEATURE_INCOMPAT_FILETYPE))
- new_de->file_type = old_de->file_type;
- new_dir->i_version = ++event;
- mark_buffer_dirty_inode(new_bh, new_dir);
- if (IS_SYNC(new_dir)) {
- ll_rw_block (WRITE, 1, &new_bh);
- wait_on_buffer (new_bh);
- }
- brelse(new_bh);
- new_bh = NULL;
+ err = -EIO;
+ dir_de = ext2_dotdot(old_inode, &dir_page);
+ if (!dir_de)
+ goto out_old;
}
-
- /*
- * Like most other Unix systems, set the ctime for inodes on a
- * rename.
- */
- old_inode->i_ctime = CURRENT_TIME;
- mark_inode_dirty(old_inode);
-
- /*
- * ok, that's it
- */
- ext2_delete_entry(old_dir, old_de, old_bh);
if (new_inode) {
- new_inode->i_nlink--;
+ struct page *new_page;
+ struct ext2_dir_entry_2 *new_de;
+
+ err = -ENOTEMPTY;
+ if (dir_de && !ext2_empty_dir (new_inode))
+ goto out_dir;
+
+ err = -ENOENT;
+ new_de = ext2_find_entry (new_dir, new_dentry, &new_page);
+ if (!new_de)
+ goto out_dir;
+ ext2_inc_count(old_inode);
+ ext2_set_link(new_dir, new_de, new_page, old_inode);
new_inode->i_ctime = CURRENT_TIME;
- mark_inode_dirty(new_inode);
- }
- old_dir->i_ctime = old_dir->i_mtime = CURRENT_TIME;
- old_dir->u.ext2_i.i_flags &= ~EXT2_BTREE_FL;
- mark_inode_dirty(old_dir);
- if (dir_bh) {
- PARENT_INO(dir_bh->b_data) = le32_to_cpu(new_dir->i_ino);
- mark_buffer_dirty_inode(dir_bh, old_inode);
- old_dir->i_nlink--;
- mark_inode_dirty(old_dir);
- if (new_inode) {
+ if (dir_de)
new_inode->i_nlink--;
- mark_inode_dirty(new_inode);
- } else {
- new_dir->i_nlink++;
- new_dir->u.ext2_i.i_flags &= ~EXT2_BTREE_FL;
- mark_inode_dirty(new_dir);
+ ext2_dec_count(new_inode);
+ } else {
+ if (dir_de) {
+ err = -EMLINK;
+ if (new_dir->i_nlink >= EXT2_LINK_MAX)
+ goto out_dir;
}
+ ext2_inc_count(old_inode);
+ err = ext2_add_link(new_dentry, old_inode);
+ if (err) {
+ ext2_dec_count(old_inode);
+ goto out_dir;
+ }
+ if (dir_de)
+ ext2_inc_count(new_dir);
+ }
+
+ ext2_delete_entry (old_de, old_page);
+ ext2_dec_count(old_inode);
+
+ if (dir_de) {
+ ext2_set_link(old_inode, dir_de, dir_page, new_dir);
+ ext2_dec_count(old_dir);
}
+ return 0;
- retval = 0;
-end_rename:
- brelse (dir_bh);
- brelse (old_bh);
- brelse (new_bh);
- return retval;
+out_dir:
+ if (dir_de) {
+ kunmap(dir_page);
+ page_cache_release(dir_page);
+ }
+out_old:
+ kunmap(old_page);
+ page_cache_release(old_page);
+out:
+ return err;
}
-/*
- * directories can handle most operations...
- */
struct inode_operations ext2_dir_inode_operations = {
create: ext2_create,
lookup: ext2_lookup,
u32 addr = ntohl(argp->addr);
dprintk("nsm: xdr_encode_mon(%08x, %d, %d, %d)\n",
- htonl(argp->addr), htonl(argp->proc),
+ htonl(argp->addr), htonl(argp->prog),
htonl(argp->vers), htonl(argp->proc));
/*
}
spin_unlock(&dcache_lock);
down_write(&sb->s_umount);
+ lock_kernel();
sb->s_root = NULL;
/* Need to clean after the sucker */
if (fs->fs_flags & FS_LITTER)
put_filesystem(fs);
sb->s_type = NULL;
unlock_super(sb);
+ unlock_kernel();
up_write(&sb->s_umount);
if (bdev) {
blkdev_put(bdev, BDEV_FS);
#include <asm/atomic.h>
#include <asm/hardirq.h>
-#define cpu_bh_disable(cpu) do { local_bh_count(cpu)++; barrier(); } while (0)
-#define cpu_bh_enable(cpu) do { barrier(); local_bh_count(cpu)--; } while (0)
+#define __cpu_bh_enable(cpu) \
+ do { barrier(); local_bh_count(cpu)--; } while (0)
+#define cpu_bh_disable(cpu) \
+ do { local_bh_count(cpu)++; barrier(); } while (0)
#define local_bh_disable() cpu_bh_disable(smp_processor_id())
-#define local_bh_enable() cpu_bh_enable(smp_processor_id())
+#define __local_bh_enable() __cpu_bh_enable(smp_processor_id())
+#define local_bh_enable() do { if (!--local_bh_count(smp_processor_id()) && softirq_pending(smp_processor_id())) { do_softirq(); __sti(); } } while (0)
#define in_softirq() (local_bh_count(smp_processor_id()) != 0)
* Ext2 directory file types. Only the low 3 bits are used. The
* other bits are reserved for now.
*/
-#define EXT2_FT_UNKNOWN 0
-#define EXT2_FT_REG_FILE 1
-#define EXT2_FT_DIR 2
-#define EXT2_FT_CHRDEV 3
-#define EXT2_FT_BLKDEV 4
-#define EXT2_FT_FIFO 5
-#define EXT2_FT_SOCK 6
-#define EXT2_FT_SYMLINK 7
-
-#define EXT2_FT_MAX 8
+enum {
+ EXT2_FT_UNKNOWN,
+ EXT2_FT_REG_FILE,
+ EXT2_FT_DIR,
+ EXT2_FT_CHRDEV,
+ EXT2_FT_BLKDEV,
+ EXT2_FT_FIFO,
+ EXT2_FT_SOCK,
+ EXT2_FT_SYMLINK,
+ EXT2_FT_MAX
+};
/*
* EXT2_DIR_PAD defines the directory entries boundaries
extern unsigned long ext2_count_free (struct buffer_head *, unsigned);
/* dir.c */
-extern int ext2_check_dir_entry (const char *, struct inode *,
- struct ext2_dir_entry_2 *, struct buffer_head *,
- unsigned long);
/* file.c */
extern int ext2_read (struct inode *, struct file *, char *, int);
/* dir.c */
extern struct file_operations ext2_dir_operations;
+extern int ext2_add_link (struct dentry *, struct inode *);
+extern ino_t ext2_inode_by_name(struct inode *, struct dentry *);
+extern int ext2_make_empty(struct inode *, struct inode *);
+extern struct ext2_dir_entry_2 * ext2_find_entry (struct inode *,struct dentry *, struct page **);
+extern int ext2_delete_entry (struct ext2_dir_entry_2 *, struct page *);
+extern int ext2_empty_dir (struct inode *);
+extern struct ext2_dir_entry_2 * ext2_dotdot (struct inode *, struct page **);
+extern void ext2_set_link(struct inode *, struct ext2_dir_entry_2 *, struct page *, struct inode *);
/* file.c */
extern struct inode_operations ext2_file_inode_operations;
asmlinkage void do_softirq(void);
extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data);
+/* Locally cached atomic variables are cheaper than cli/sti */
static inline void __cpu_raise_softirq(int cpu, int nr)
{
- softirq_active(cpu) |= (1<<nr);
+ set_bit(nr, &softirq_active(cpu));
}
-
-/* I do not want to use atomic variables now, so that cli/sti */
static inline void raise_softirq(int nr)
{
- unsigned long flags;
-
- local_irq_save(flags);
__cpu_raise_softirq(smp_processor_id(), nr);
- local_irq_restore(flags);
}
extern void softirq_init(void);
enum
{
TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
- TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
+ TASKLET_STATE_RUN /* Tasklet is running */
};
struct tasklet_head
extern struct tasklet_head tasklet_vec[NR_CPUS];
extern struct tasklet_head tasklet_hi_vec[NR_CPUS];
-#ifdef CONFIG_SMP
#define tasklet_trylock(t) (!test_and_set_bit(TASKLET_STATE_RUN, &(t)->state))
-#define tasklet_unlock_wait(t) while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
#define tasklet_unlock(t) clear_bit(TASKLET_STATE_RUN, &(t)->state)
-#else
-#define tasklet_trylock(t) 1
-#define tasklet_unlock_wait(t) do { } while (0)
-#define tasklet_unlock(t) do { } while (0)
-#endif
-
-static inline void tasklet_schedule(struct tasklet_struct *t)
-{
- if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
- int cpu = smp_processor_id();
- unsigned long flags;
-
- local_irq_save(flags);
- t->next = tasklet_vec[cpu].list;
- tasklet_vec[cpu].list = t;
- __cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
- local_irq_restore(flags);
- }
-}
-
-static inline void tasklet_hi_schedule(struct tasklet_struct *t)
-{
- if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
- int cpu = smp_processor_id();
- unsigned long flags;
-
- local_irq_save(flags);
- t->next = tasklet_hi_vec[cpu].list;
- tasklet_hi_vec[cpu].list = t;
- __cpu_raise_softirq(cpu, HI_SOFTIRQ);
- local_irq_restore(flags);
- }
-}
+#define tasklet_unlock_wait(t) while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
+extern void tasklet_schedule(struct tasklet_struct *t);
+extern void tasklet_hi_schedule(struct tasklet_struct *t);
static inline void tasklet_disable_nosync(struct tasklet_struct *t)
{
static inline void tasklet_enable(struct tasklet_struct *t)
{
- atomic_dec(&t->count);
+ if (atomic_dec_and_test(&t->count))
+ tasklet_schedule(t);
+}
+
+static inline void tasklet_hi_enable(struct tasklet_struct *t)
+{
+ if (atomic_dec_and_test(&t->count))
+ tasklet_hi_schedule(t);
}
extern void tasklet_kill(struct tasklet_struct *t);
/* arch dependent irq_stat fields */
#define nmi_count(cpu) __IRQ_STAT((cpu), __nmi_count) /* i386, ia64 */
+#define softirq_pending(cpu) \
+ ((softirq_active(cpu) & softirq_mask(cpu)))
+
#endif /* __irq_cpustat_h */
#define PG_skip 10
#define PG_inactive_clean 11
#define PG_highmem 12
+#define PG_checked 13 /* kill me in 2.5.<early>. */
/* bits 21-29 unused */
#define PG_arch_1 30
#define PG_reserved 31
#define PageLocked(page) test_bit(PG_locked, &(page)->flags)
#define LockPage(page) set_bit(PG_locked, &(page)->flags)
#define TryLockPage(page) test_and_set_bit(PG_locked, &(page)->flags)
+#define PageChecked(page) test_bit(PG_checked, &(page)->flags)
+#define SetPageChecked(page) set_bit(PG_checked, &(page)->flags)
extern void __set_page_dirty(struct page *);
* can allocate highmem pages, the *get*page*() variants return
* virtual kernel addresses to the allocated page(s).
*/
-extern struct page * FASTCALL(__alloc_pages(zonelist_t *zonelist, unsigned long order));
+extern struct page * FASTCALL(_alloc_pages(unsigned int gfp_mask, unsigned long order));
+extern struct page * FASTCALL(__alloc_pages(unsigned int gfp_mask, unsigned long order, zonelist_t *zonelist));
extern struct page * alloc_pages_node(int nid, int gfp_mask, unsigned long order);
-#ifndef CONFIG_DISCONTIGMEM
static inline struct page * alloc_pages(int gfp_mask, unsigned long order)
{
/*
*/
if (order >= MAX_ORDER)
return NULL;
- return __alloc_pages(contig_page_data.node_zonelists+(gfp_mask), order);
+ return _alloc_pages(gfp_mask, order);
}
-#else /* !CONFIG_DISCONTIGMEM */
-extern struct page * alloc_pages(int gfp_mask, unsigned long order);
-#endif /* !CONFIG_DISCONTIGMEM */
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
/*
* GFP bitmasks..
*/
-#define __GFP_WAIT 0x01
-#define __GFP_HIGH 0x02
-#define __GFP_IO 0x04
-#define __GFP_DMA 0x08
-#ifdef CONFIG_HIGHMEM
-#define __GFP_HIGHMEM 0x10
-#else
-#define __GFP_HIGHMEM 0x0 /* noop */
-#endif
+/* Zone modifiers in GFP_ZONEMASK (see linux/mmzone.h - low four bits) */
+#define __GFP_DMA 0x01
+#define __GFP_HIGHMEM 0x02
+/* Action modifiers - doesn't change the zoning */
+#define __GFP_WAIT 0x10
+#define __GFP_HIGH 0x20
+#define __GFP_IO 0x40
+#define __GFP_BUFFER 0x80
-#define GFP_BUFFER (__GFP_HIGH | __GFP_WAIT)
+#define GFP_BUFFER (__GFP_HIGH | __GFP_WAIT | __GFP_BUFFER)
#define GFP_ATOMIC (__GFP_HIGH)
#define GFP_USER ( __GFP_WAIT | __GFP_IO)
#define GFP_HIGHUSER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHMEM)
*/
typedef struct zonelist_struct {
zone_t * zones [MAX_NR_ZONES+1]; // NULL delimited
- int gfp_mask;
} zonelist_t;
-#define NR_GFPINDEX 0x20
+#define GFP_ZONEMASK 0x0f
/*
* The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM
struct bootmem_data;
typedef struct pglist_data {
zone_t node_zones[MAX_NR_ZONES];
- zonelist_t node_zonelists[NR_GFPINDEX];
+ zonelist_t node_zonelists[GFP_ZONEMASK+1];
struct page *node_mem_map;
unsigned long *valid_addr_bitmap;
struct bootmem_data *bdata;
#define SLAB_NFS GFP_NFS
#define SLAB_DMA GFP_DMA
-#define SLAB_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO)
+#define SLAB_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_BUFFER)
#define SLAB_NO_GROW 0x00001000UL /* don't grow a cache */
/* flags to pass to kmem_cache_create().
EXPORT_SYMBOL(exit_sighand);
/* internal kernel memory management */
+EXPORT_SYMBOL(_alloc_pages);
EXPORT_SYMBOL(__alloc_pages);
EXPORT_SYMBOL(alloc_pages_node);
EXPORT_SYMBOL(__get_free_pages);
EXPORT_SYMBOL(get_zeroed_page);
EXPORT_SYMBOL(__free_pages);
EXPORT_SYMBOL(free_pages);
-#ifndef CONFIG_DISCONTIGMEM
-EXPORT_SYMBOL(contig_page_data);
-#else
-EXPORT_SYMBOL(alloc_pages);
-#endif
EXPORT_SYMBOL(num_physpages);
EXPORT_SYMBOL(kmem_find_general_cachep);
EXPORT_SYMBOL(kmem_cache_create);
release_kernel_lock(prev, this_cpu);
/* Do "administrative" work here while we don't hold any locks */
- if (softirq_active(this_cpu) & softirq_mask(this_cpu))
+ if (softirq_pending(this_cpu))
goto handle_softirq;
handle_softirq_back:
int cpu = smp_processor_id();
__u32 active, mask;
+ local_irq_disable();
if (in_interrupt())
- return;
+ goto out;
local_bh_disable();
- local_irq_disable();
mask = softirq_mask(cpu);
active = softirq_active(cpu) & mask;
local_irq_enable();
h = softirq_vec;
- mask &= ~active;
do {
if (active & 1)
local_irq_disable();
- active = softirq_active(cpu);
- if ((active &= mask) != 0)
+ active = softirq_active(cpu) & mask;
+ if (active)
goto retry;
}
- local_bh_enable();
+ __local_bh_enable();
+out:
/* Leave with locally disabled hard irqs. It is critical to close
* window for infinite recursion, while we help local bh count,
struct tasklet_head tasklet_vec[NR_CPUS] __cacheline_aligned;
+void tasklet_schedule(struct tasklet_struct *t)
+{
+ unsigned long flags;
+ int cpu;
+
+ cpu = smp_processor_id();
+ local_irq_save(flags);
+ /*
+ * If nobody is running it then add it to this CPU's
+ * tasklet queue.
+ */
+ if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state) &&
+ tasklet_trylock(t)) {
+ t->next = tasklet_vec[cpu].list;
+ tasklet_vec[cpu].list = t;
+ __cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
+ tasklet_unlock(t);
+ }
+ local_irq_restore(flags);
+}
+
+void tasklet_hi_schedule(struct tasklet_struct *t)
+{
+ unsigned long flags;
+ int cpu;
+
+ cpu = smp_processor_id();
+ local_irq_save(flags);
+
+ if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state) &&
+ tasklet_trylock(t)) {
+ t->next = tasklet_hi_vec[cpu].list;
+ tasklet_hi_vec[cpu].list = t;
+ __cpu_raise_softirq(cpu, HI_SOFTIRQ);
+ tasklet_unlock(t);
+ }
+ local_irq_restore(flags);
+}
+
static void tasklet_action(struct softirq_action *a)
{
int cpu = smp_processor_id();
local_irq_disable();
list = tasklet_vec[cpu].list;
tasklet_vec[cpu].list = NULL;
- local_irq_enable();
- while (list != NULL) {
+ while (list) {
struct tasklet_struct *t = list;
list = list->next;
- if (tasklet_trylock(t)) {
- if (atomic_read(&t->count) == 0) {
- clear_bit(TASKLET_STATE_SCHED, &t->state);
-
- t->func(t->data);
- /*
- * talklet_trylock() uses test_and_set_bit that imply
- * an mb when it returns zero, thus we need the explicit
- * mb only here: while closing the critical section.
- */
-#ifdef CONFIG_SMP
- smp_mb__before_clear_bit();
-#endif
- tasklet_unlock(t);
- continue;
- }
- tasklet_unlock(t);
+ /*
+ * A tasklet is only added to the queue while it's
+ * locked, so no other CPU can have this tasklet
+ * pending:
+ */
+ if (!tasklet_trylock(t))
+ BUG();
+repeat:
+ if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
+ BUG();
+ if (!atomic_read(&t->count)) {
+ local_irq_enable();
+ t->func(t->data);
+ local_irq_disable();
+ /*
+ * One more run if the tasklet got reactivated:
+ */
+ if (test_bit(TASKLET_STATE_SCHED, &t->state))
+ goto repeat;
}
- local_irq_disable();
- t->next = tasklet_vec[cpu].list;
- tasklet_vec[cpu].list = t;
- __cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
- local_irq_enable();
+ tasklet_unlock(t);
+ if (test_bit(TASKLET_STATE_SCHED, &t->state))
+ tasklet_schedule(t);
}
+ local_irq_enable();
}
local_irq_disable();
list = tasklet_hi_vec[cpu].list;
tasklet_hi_vec[cpu].list = NULL;
- local_irq_enable();
- while (list != NULL) {
+ while (list) {
struct tasklet_struct *t = list;
list = list->next;
- if (tasklet_trylock(t)) {
- if (atomic_read(&t->count) == 0) {
- clear_bit(TASKLET_STATE_SCHED, &t->state);
-
- t->func(t->data);
- tasklet_unlock(t);
- continue;
- }
- tasklet_unlock(t);
+ if (!tasklet_trylock(t))
+ BUG();
+repeat:
+ if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
+ BUG();
+ if (!atomic_read(&t->count)) {
+ local_irq_enable();
+ t->func(t->data);
+ local_irq_disable();
+ if (test_bit(TASKLET_STATE_SCHED, &t->state))
+ goto repeat;
}
- local_irq_disable();
- t->next = tasklet_hi_vec[cpu].list;
- tasklet_hi_vec[cpu].list = t;
- __cpu_raise_softirq(cpu, HI_SOFTIRQ);
- local_irq_enable();
+ tasklet_unlock(t);
+ if (test_bit(TASKLET_STATE_SCHED, &t->state))
+ tasklet_hi_schedule(t);
}
+ local_irq_enable();
}
void tasklet_init(struct tasklet_struct *t,
void (*func)(unsigned long), unsigned long data)
{
- t->func = func;
- t->data = data;
+ t->next = NULL;
t->state = 0;
atomic_set(&t->count, 0);
+ t->func = func;
+ t->data = data;
}
void tasklet_kill(struct tasklet_struct *t)
if (PageLocked(page))
BUG();
- flags = page->flags & ~((1 << PG_uptodate) | (1 << PG_error) | (1 << PG_dirty) | (1 << PG_referenced) | (1 << PG_arch_1));
+ flags = page->flags & ~((1 << PG_uptodate) | (1 << PG_error) | (1 << PG_dirty) | (1 << PG_referenced) | (1 << PG_arch_1) | (1 << PG_checked));
page->flags = flags | (1 << PG_locked);
page_cache_get(page);
page->index = offset;
struct page * alloc_pages_node(int nid, int gfp_mask, unsigned long order)
{
#ifdef CONFIG_NUMA
- return __alloc_pages(NODE_DATA(nid)->node_zonelists + gfp_mask, order);
+ return __alloc_pages(gfp_mask, order, NODE_DATA(nid)->node_zonelists + (gfp_mask & GFP_ZONEMASK));
#else
return alloc_pages(gfp_mask, order);
#endif
static struct page * alloc_pages_pgdat(pg_data_t *pgdat, int gfp_mask,
unsigned long order)
{
- return __alloc_pages(pgdat->node_zonelists + gfp_mask, order);
+ return __alloc_pages(gfp_mask, order, pgdat->node_zonelists + (gfp_mask & GFP_ZONEMASK));
}
/*
* This can be refined. Currently, tries to do round robin, instead
* should do concentratic circle search, starting from current node.
*/
-struct page * alloc_pages(int gfp_mask, unsigned long order)
+struct page * _alloc_pages(int gfp_mask, unsigned long order)
{
struct page *ret = 0;
pg_data_t *start, *temp;
return NULL;
}
+#ifndef CONFIG_DISCONTIGMEM
+struct page *_alloc_pages(unsigned int gfp_mask, unsigned long order)
+{
+ return __alloc_pages(gfp_mask, order,
+ contig_page_data.node_zonelists+(gfp_mask & GFP_ZONEMASK));
+}
+#endif
/*
* This is the 'heart' of the zoned buddy allocator:
*/
-struct page * __alloc_pages(zonelist_t *zonelist, unsigned long order)
+struct page * __alloc_pages(unsigned int gfp_mask, unsigned long order, zonelist_t *zonelist)
{
zone_t **zone;
int direct_reclaim = 0;
- unsigned int gfp_mask = zonelist->gfp_mask;
struct page * page;
/*
}
/*
* When we arrive here, we are really tight on memory.
+ * Since kswapd didn't succeed in freeing pages for us,
+ * we try to help it.
*
- * We try to free pages ourselves by:
- * - shrinking the i/d caches.
- * - reclaiming unused memory from the slab caches.
- * - swapping/syncing pages to disk (done by page_launder)
- * - moving clean pages from the inactive dirty list to
- * the inactive clean list. (done by page_launder)
+ * Single page allocs loop until the allocation succeeds.
+ * Multi-page allocs can fail due to memory fragmentation;
+ * in that case we bail out to prevent infinite loops and
+ * hanging device drivers ...
+ *
+ * Another issue are GFP_BUFFER allocations; because they
+ * do not have __GFP_IO set it's possible we cannot make
+ * any progress freeing pages, in that case it's better
+ * to give up than to deadlock the kernel looping here.
*/
if (gfp_mask & __GFP_WAIT) {
memory_pressure++;
- try_to_free_pages(gfp_mask);
- goto try_again;
+ if (!order || free_shortage()) {
+ int progress = try_to_free_pages(gfp_mask);
+ if (progress || gfp_mask & __GFP_IO)
+ goto try_again;
+ }
}
}
{
int i, j, k;
- for (i = 0; i < NR_GFPINDEX; i++) {
+ for (i = 0; i <= GFP_ZONEMASK; i++) {
zonelist_t *zonelist;
zone_t *zone;
zonelist = pgdat->node_zonelists + i;
memset(zonelist, 0, sizeof(*zonelist));
- zonelist->gfp_mask = i;
j = 0;
k = ZONE_NORMAL;
if (i & __GFP_HIGHMEM)
* go out to Matthew Dillon.
*/
#define MAX_LAUNDER (4 * (1 << page_cluster))
+#define CAN_DO_IO (gfp_mask & __GFP_IO)
+#define CAN_DO_BUFFERS (gfp_mask & __GFP_BUFFER)
int page_launder(int gfp_mask, int sync)
{
int launder_loop, maxscan, cleaned_pages, maxlaunder;
- int can_get_io_locks;
struct list_head * page_lru;
struct page * page;
- /*
- * We can only grab the IO locks (eg. for flushing dirty
- * buffers to disk) if __GFP_IO is set.
- */
- can_get_io_locks = gfp_mask & __GFP_IO;
-
launder_loop = 0;
maxlaunder = 0;
cleaned_pages = 0;
/* Page is or was in use? Move it to the active list. */
if (PageReferenced(page) || page->age > 0 ||
+ page->zone->free_pages > page->zone->pages_high ||
(!page->buffers && page_count(page) > 1) ||
page_ramdisk(page)) {
del_page_from_inactive_dirty_list(page);
goto page_active;
/* First time through? Move it to the back of the list */
- if (!launder_loop) {
+ if (!launder_loop || !CAN_DO_IO) {
list_del(page_lru);
list_add(page_lru, &inactive_dirty_list);
UnlockPage(page);
* loads, flush out the dirty pages before we have to wait on
* IO.
*/
- if (can_get_io_locks && !launder_loop && free_shortage()) {
+ if ((CAN_DO_IO || CAN_DO_BUFFERS) && !launder_loop && free_shortage()) {
launder_loop = 1;
/* If we cleaned pages, never do synchronous IO. */
if (cleaned_pages)
/*
* When we are background aging, we try to increase the page aging
- * information in the system. When we have too many inactive pages
- * we don't do background aging since having all pages on the
- * inactive list decreases aging information.
- *
- * Since not all active pages have to be on the active list, we round
- * nr_active_pages up to num_physpages/2, if needed.
+ * information in the system.
*/
- if (!target) {
- int inactive = nr_free_pages() + nr_inactive_clean_pages() +
- nr_inactive_dirty_pages;
- int active = MAX(nr_active_pages, num_physpages / 2);
- if (active > 10 * inactive)
- maxscan = nr_active_pages >> 4;
- else if (active > 3 * inactive)
- maxscan = nr_active_pages >> 8;
- else
- return 0;
- }
+ if (!target)
+ maxscan = nr_active_pages >> 4;
/* Take the lock while messing with the list... */
spin_lock(&pagemap_lru_lock);
ret = pt->func(skb, skb->dev, pt);
- tasklet_enable(bh_task_vec+TIMER_BH);
+ tasklet_hi_enable(bh_task_vec+TIMER_BH);
spin_unlock(&net_bh_lock);
return ret;
}